Compare commits

..

2 Commits

Author SHA1 Message Date
David Wertenteil
6b3bdc08da testing release 2024-02-29 17:19:18 +02:00
Matthias Bertschy
9b665335b0 fix output dir for actions following download-artifact
Signed-off-by: Matthias Bertschy <matthias.bertschy@gmail.com>
2024-02-29 15:55:45 +01:00
113 changed files with 1837 additions and 3821 deletions

View File

@@ -23,6 +23,7 @@ jobs:
permissions:
actions: read
checks: read
contents: read
deployments: read
id-token: write
issues: read
@@ -33,22 +34,18 @@ jobs:
repository-projects: read
security-events: read
statuses: read
attestations: read
contents: write
uses: ./.github/workflows/a-pr-scanner.yaml
with:
RELEASE: ""
CLIENT: test
CGO_ENABLED: 0
GO111MODULE: ""
secrets: inherit
binary-build:
if: ${{ github.actor == 'kubescape' }}
if: ${{ github.repository_owner == 'kubescape' }}
permissions:
actions: read
checks: read
contents: write
contents: read
deployments: read
discussions: read
id-token: write
@@ -59,7 +56,6 @@ jobs:
repository-projects: read
security-events: read
statuses: read
attestations: read
uses: ./.github/workflows/b-binary-build-and-e2e-tests.yaml
with:
COMPONENT_NAME: kubescape

View File

@@ -10,7 +10,7 @@ jobs:
NEW_TAG: ${{ steps.tag-calculator.outputs.NEW_TAG }}
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # ratchet:actions/checkout@v3
- id: tag-calculator
uses: ./.github/actions/tag-action
with:
@@ -19,6 +19,7 @@ jobs:
permissions:
actions: read
checks: read
contents: read
deployments: read
discussions: read
id-token: write
@@ -29,8 +30,6 @@ jobs:
repository-projects: read
security-events: read
statuses: read
contents: write
attestations: write
needs: [retag]
uses: ./.github/workflows/b-binary-build-and-e2e-tests.yaml
with:
@@ -56,7 +55,6 @@ jobs:
repository-projects: read
statuses: read
security-events: read
attestations: read
needs: [retag, binary-build]
uses: ./.github/workflows/c-create-release.yaml
with:
@@ -64,29 +62,3 @@ jobs:
TAG: ${{ needs.retag.outputs.NEW_TAG }}
DRAFT: false
secrets: inherit
publish-image:
permissions:
actions: read
checks: read
deployments: read
discussions: read
id-token: write
issues: read
packages: write
pages: read
pull-requests: read
repository-projects: read
security-events: read
statuses: read
attestations: read
contents: write
uses: ./.github/workflows/d-publish-image.yaml
needs: [create-release, retag]
with:
client: "image-release"
image_name: "quay.io/${{ github.repository_owner }}/kubescape-cli"
image_tag: ${{ needs.retag.outputs.NEW_TAG }}
support_platforms: true
cosign: true
secrets: inherit

View File

@@ -10,7 +10,7 @@ jobs:
runs-on: ubuntu-latest
if: github.repository_owner == 'kubescape'
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # ratchet:actions/checkout@v3
with:
submodules: recursive
- name: Update new version in krew-index

View File

@@ -15,70 +15,7 @@ on:
required: false
type: string
default: "./..."
GO111MODULE:
required: true
type: string
CGO_ENABLED:
type: number
default: 1
jobs:
unit-tests:
if: ${{ github.actor != 'kubescape' }}
name: Create cross-platform build
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
with:
fetch-depth: 0
submodules: recursive
- uses: actions/setup-go@v4
name: Installing go
with:
go-version: ${{ inputs.GO_VERSION }}
cache: true
- name: Test core pkg
run: ${{ env.DOCKER_CMD }} go test -v ./...
if: startsWith(github.ref, 'refs/tags')
- name: Test httphandler pkg
run: ${{ env.DOCKER_CMD }} sh -c 'cd httphandler && go test -v ./...'
if: startsWith(github.ref, 'refs/tags')
- uses: anchore/sbom-action/download-syft@v0.15.2
name: Setup Syft
- uses: goreleaser/goreleaser-action@v5
name: Build
with:
distribution: goreleaser
version: latest
args: release --clean --snapshot
env:
RELEASE: ${{ inputs.RELEASE }}
CLIENT: ${{ inputs.CLIENT }}
CGO_ENABLED: ${{ inputs.CGO_ENABLED }}
- name: Smoke Testing
env:
RELEASE: ${{ inputs.RELEASE }}
KUBESCAPE_SKIP_UPDATE_CHECK: "true"
run: ${{ env.DOCKER_CMD }} python3 smoke_testing/init.py ${PWD}/dist/kubescape-ubuntu-latest
- name: golangci-lint
continue-on-error: false
uses: golangci/golangci-lint-action@v3
with:
version: latest
args: --timeout 10m
only-new-issues: true
skip-pkg-cache: true
skip-build-cache: true
scanners:
env:
GITGUARDIAN_API_KEY: ${{ secrets.GITGUARDIAN_API_KEY }}

View File

@@ -18,7 +18,7 @@ on:
GO_VERSION:
required: false
type: string
default: "1.22"
default: "1.21"
GO111MODULE:
required: false
type: string
@@ -30,32 +30,7 @@ on:
BINARY_TESTS:
type: string
required: false
default: '[
"ks_microservice_create_2_cronjob_mitre_and_nsa_proxy",
"ks_microservice_triggering_with_cron_job",
"ks_microservice_update_cronjob_schedule",
"ks_microservice_delete_cronjob",
"ks_microservice_create_2_cronjob_mitre_and_nsa",
"ks_microservice_ns_creation",
"ks_microservice_on_demand",
"ks_microservice_mitre_framework_on_demand",
"ks_microservice_nsa_and_mitre_framework_demand",
"scan_nsa",
"scan_mitre",
"scan_with_exceptions",
"scan_repository",
"scan_local_file",
"scan_local_glob_files",
"scan_local_list_of_files",
"scan_with_exception_to_backend",
"scan_nsa_and_submit_to_backend",
"scan_mitre_and_submit_to_backend",
"scan_local_repository_and_submit_to_backend",
"scan_repository_from_url_and_submit_to_backend",
"scan_with_custom_framework",
"scan_customer_configuration",
"scan_compliance_score"
]'
default: '[ "scan_nsa", "scan_mitre", "scan_with_exceptions", "scan_repository", "scan_local_file", "scan_local_glob_files", "scan_local_list_of_files", "scan_nsa_and_submit_to_backend", "scan_mitre_and_submit_to_backend", "scan_local_repository_and_submit_to_backend", "scan_repository_from_url_and_submit_to_backend", "scan_with_exception_to_backend", "scan_with_custom_framework", "scan_customer_configuration", "host_scanner", "scan_compliance_score", "control_cluster_from_CLI_config_scan_exclude_namespaces", "control_cluster_from_CLI_config_scan_include_namespaces", "control_cluster_from_CLI_config_scan_host_scanner_enabled", "control_cluster_from_CLI_config_scan_MITRE_framework", "control_cluster_from_CLI_vulnerabilities_scan_default", "control_cluster_from_CLI_vulnerabilities_scan_include_namespaces" ]'
workflow_call:
inputs:
@@ -70,7 +45,7 @@ on:
type: string
GO_VERSION:
type: string
default: "1.22"
default: "1.21"
GO111MODULE:
required: true
type: string
@@ -79,115 +54,30 @@ on:
default: 1
BINARY_TESTS:
type: string
default: '[
"scan_nsa",
"scan_mitre",
"scan_with_exceptions",
"scan_repository",
"scan_local_file",
"scan_local_glob_files",
"scan_local_list_of_files",
"scan_nsa_and_submit_to_backend",
"scan_mitre_and_submit_to_backend",
"scan_local_repository_and_submit_to_backend",
"scan_repository_from_url_and_submit_to_backend",
"scan_with_custom_framework",
"scan_customer_configuration",
"scan_compliance_score",
"scan_custom_framework_scanning_file_scope_testing",
"scan_custom_framework_scanning_cluster_scope_testing",
"scan_custom_framework_scanning_cluster_and_file_scope_testing"
]'
default: '[ "scan_nsa", "scan_mitre", "scan_with_exceptions", "scan_repository", "scan_local_file", "scan_local_glob_files", "scan_local_list_of_files", "scan_nsa_and_submit_to_backend", "scan_mitre_and_submit_to_backend", "scan_local_repository_and_submit_to_backend", "scan_repository_from_url_and_submit_to_backend", "scan_with_exception_to_backend", "scan_with_custom_framework", "scan_customer_configuration", "host_scanner", "scan_compliance_score", "scan_custom_framework_scanning_file_scope_testing", "scan_custom_framework_scanning_cluster_scope_testing", "scan_custom_framework_scanning_cluster_and_file_scope_testing" ]'
jobs:
wf-preparation:
name: secret-validator
runs-on: ubuntu-latest
outputs:
TEST_NAMES: ${{ steps.export_tests_to_env.outputs.TEST_NAMES }}
is-secret-set: ${{ steps.check-secret-set.outputs.is-secret-set }}
steps:
- name: check if the necessary secrets are set in github secrets
id: check-secret-set
env:
CUSTOMER: ${{ secrets.CUSTOMER }}
USERNAME: ${{ secrets.USERNAME }}
PASSWORD: ${{ secrets.PASSWORD }}
CLIENT_ID: ${{ secrets.CLIENT_ID_PROD }}
SECRET_KEY: ${{ secrets.SECRET_KEY_PROD }}
REGISTRY_USERNAME: ${{ secrets.REGISTRY_USERNAME }}
REGISTRY_PASSWORD: ${{ secrets.REGISTRY_PASSWORD }}
run: "echo \"is-secret-set=${{ env.CUSTOMER != '' && env.USERNAME != '' && env.PASSWORD != '' && env.CLIENT_ID != '' && env.SECRET_KEY != '' && env.REGISTRY_USERNAME != '' && env.REGISTRY_PASSWORD != '' }}\" >> $GITHUB_OUTPUT\n"
- id: export_tests_to_env
name: set test name
run: |
echo "TEST_NAMES=$input" >> $GITHUB_OUTPUT
env:
input: ${{ inputs.BINARY_TESTS }}
check-secret:
name: check if QUAYIO_REGISTRY_USERNAME & QUAYIO_REGISTRY_PASSWORD is set in github secrets
runs-on: ubuntu-latest
outputs:
is-secret-set: ${{ steps.check-secret-set.outputs.is-secret-set }}
steps:
- name: check if QUAYIO_REGISTRY_USERNAME & QUAYIO_REGISTRY_PASSWORD is set in github secrets
id: check-secret-set
env:
QUAYIO_REGISTRY_USERNAME: ${{ secrets.QUAYIO_REGISTRY_USERNAME }}
QUAYIO_REGISTRY_PASSWORD: ${{ secrets.QUAYIO_REGISTRY_PASSWORD }}
run: |
echo "is-secret-set=${{ env.QUAYIO_REGISTRY_USERNAME != '' && env.QUAYIO_REGISTRY_PASSWORD != '' }}" >> $GITHUB_OUTPUT
binary-build:
name: Create cross-platform build
needs: wf-preparation
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
runs-on: ubuntu-22.04-large
runs-on: ubuntu-latest
steps:
- name: (debug) Step 1 - Check disk space before checkout
run: df -h
- uses: actions/checkout@v4
- uses: actions/checkout@v3
with:
fetch-depth: 0
submodules: recursive
- name: (debug) Step 2 - Check disk space before installing Go
run: df -h
- uses: actions/setup-go@v4
name: Installing go
with:
go-version: ${{ inputs.GO_VERSION }}
cache: true
- name: (debug) Step 3 - Check disk space before build
run: df -h
- name: Test core pkg
run: ${{ env.DOCKER_CMD }} go test -v ./...
if: startsWith(github.ref, 'refs/tags')
- name: (debug) Step 4 - Check disk space before testing httphandler pkg
run: df -h
- name: Test httphandler pkg
run: ${{ env.DOCKER_CMD }} sh -c 'cd httphandler && go test -v ./...'
if: startsWith(github.ref, 'refs/tags')
- name: (debug) Step 5 - Check disk space before setting up Syft
run: df -h
- uses: anchore/sbom-action/download-syft@v0.15.2
name: Setup Syft
- name: (debug) Step 6 - Check disk space before goreleaser
run: df -h
- uses: goreleaser/goreleaser-action@v5
name: Build
with:
@@ -199,162 +89,10 @@ jobs:
CLIENT: ${{ inputs.CLIENT }}
CGO_ENABLED: ${{ inputs.CGO_ENABLED }}
- name: (debug) Step 7 - Check disk space before smoke testing
run: df -h
- name: Smoke Testing
env:
RELEASE: ${{ inputs.RELEASE }}
KUBESCAPE_SKIP_UPDATE_CHECK: "true"
run: ${{ env.DOCKER_CMD }} python3 smoke_testing/init.py ${PWD}/dist/kubescape-ubuntu-latest
- name: (debug) Step 8 - Check disk space before golangci-lint
run: df -h
- name: golangci-lint
continue-on-error: true
uses: golangci/golangci-lint-action@v3
with:
version: latest
args: --timeout 10m
only-new-issues: true
skip-pkg-cache: true
skip-build-cache: true
- name: (debug) Step 9 - Check disk space before uploading artifacts
run: df -h
- uses: actions/upload-artifact@83fd05a356d7e2593de66fc9913b3002723633cb # ratchet:actions/upload-artifact@v3.1.1
name: Upload artifacts
with:
name: kubescape
path: dist/kubescape*
if-no-files-found: error
- name: (debug) Step 10 - Check disk space after uploading artifacts
run: df -h
build-http-image:
permissions:
contents: write
id-token: write
packages: write
pull-requests: read
needs: [check-secret]
uses: kubescape/workflows/.github/workflows/incluster-comp-pr-merged.yaml@main
with:
IMAGE_NAME: quay.io/${{ github.repository_owner }}/kubescape
IMAGE_TAG: ${{ inputs.RELEASE }}
COMPONENT_NAME: kubescape
CGO_ENABLED: 0
GO111MODULE: "on"
BUILD_PLATFORM: linux/amd64,linux/arm64
GO_VERSION: "1.22"
REQUIRED_TESTS: '[
"ks_microservice_create_2_cronjob_mitre_and_nsa_proxy",
"ks_microservice_triggering_with_cron_job",
"ks_microservice_update_cronjob_schedule",
"ks_microservice_delete_cronjob",
"ks_microservice_create_2_cronjob_mitre_and_nsa",
"ks_microservice_ns_creation",
"ks_microservice_on_demand",
"ks_microservice_mitre_framework_on_demand",
"ks_microservice_nsa_and_mitre_framework_demand",
"scan_nsa",
"scan_mitre",
"scan_with_exceptions",
"scan_repository",
"scan_local_file",
"scan_local_glob_files",
"scan_local_list_of_files",
"scan_with_exception_to_backend",
"scan_nsa_and_submit_to_backend",
"scan_mitre_and_submit_to_backend",
"scan_local_repository_and_submit_to_backend",
"scan_repository_from_url_and_submit_to_backend",
"scan_with_custom_framework",
"scan_customer_configuration",
"scan_compliance_score"
]'
COSIGN: true
HELM_E2E_TEST: true
FORCE: true
secrets: inherit
run-tests:
strategy:
fail-fast: false
matrix:
TEST: ${{ fromJson(needs.wf-preparation.outputs.TEST_NAMES) }}
needs: [wf-preparation, binary-build]
if: ${{ (needs.wf-preparation.outputs.is-secret-set == 'true') && (always() && (contains(needs.*.result, 'success') || contains(needs.*.result, 'skipped')) && !(contains(needs.*.result, 'failure')) && !(contains(needs.*.result, 'cancelled'))) }}
runs-on: ubuntu-latest # This cannot change
steps:
- uses: actions/download-artifact@9bc31d5ccc31df68ecc42ccf4149144866c47d8a # ratchet:actions/download-artifact@v3.0.2
id: download-artifact
with:
name: kubescape
path: "~"
- run: ls -laR
- name: chmod +x
run: chmod +x -R ${{steps.download-artifact.outputs.download-path}}/kubescape-ubuntu-latest
- name: Checkout systests repo
uses: actions/checkout@v4
with:
repository: armosec/system-tests
path: .
- uses: actions/setup-python@d27e3f3d7c64b4bbf8e4abfb9b63b83e846e0435 # ratchet:actions/setup-python@v4
with:
python-version: '3.8.13'
cache: 'pip'
- name: create env
run: ./create_env.sh
- name: Generate uuid
id: uuid
run: |
echo "RANDOM_UUID=$(uuidgen)" >> $GITHUB_OUTPUT
- name: Create k8s Kind Cluster
id: kind-cluster-install
uses: helm/kind-action@d08cf6ff1575077dee99962540d77ce91c62387d # ratchet:helm/kind-action@v1.3.0
with:
cluster_name: ${{ steps.uuid.outputs.RANDOM_UUID }}
- name: run-tests-on-local-built-kubescape
env:
CUSTOMER: ${{ secrets.CUSTOMER }}
USERNAME: ${{ secrets.USERNAME }}
PASSWORD: ${{ secrets.PASSWORD }}
CLIENT_ID: ${{ secrets.CLIENT_ID_PROD }}
SECRET_KEY: ${{ secrets.SECRET_KEY_PROD }}
REGISTRY_USERNAME: ${{ secrets.REGISTRY_USERNAME }}
REGISTRY_PASSWORD: ${{ secrets.REGISTRY_PASSWORD }}
run: |
echo "Test history:"
echo " ${{ matrix.TEST }} " >/tmp/testhistory
cat /tmp/testhistory
source systests_python_env/bin/activate
python3 systest-cli.py \
-t ${{ matrix.TEST }} \
-b production \
-c CyberArmorTests \
--duration 3 \
--logger DEBUG \
--kwargs kubescape=${{steps.download-artifact.outputs.download-path}}/kubescape-ubuntu-latest
deactivate
- name: Test Report
uses: mikepenz/action-junit-report@6e9933f4a97f4d2b99acef4d7b97924466037882 # ratchet:mikepenz/action-junit-report@v3.6.1
if: always() # always run even if the previous step fails
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
report_paths: '**/results_xml_format/**.xml'
commit: ${{github.event.workflow_run.head_sha}}

View File

@@ -23,7 +23,7 @@ jobs:
permissions:
id-token: write
packages: write
contents: write
contents: read
pull-requests: read
uses: kubescape/workflows/.github/workflows/incluster-comp-pr-merged.yaml@main
with:

View File

@@ -24,8 +24,8 @@ jobs:
MAC_OS: macos-latest
UBUNTU_OS: ubuntu-latest
WINDOWS_OS: windows-latest
permissions:
contents: write
# permissions:
# contents: write
steps:
- uses: actions/download-artifact@9bc31d5ccc31df68ecc42ccf4149144866c47d8a # ratchet:actions/download-artifact@v3.0.2
id: download-artifact
@@ -33,59 +33,31 @@ jobs:
path: .
# TODO: kubescape-windows-latest is deprecated and should be removed
- name: Get kubescape.exe from kubescape-windows-latest.exe
run: cp ${{steps.download-artifact.outputs.download-path}}/kubescape/kubescape-${{ env.WINDOWS_OS }}.exe ${{steps.download-artifact.outputs.download-path}}/kubescape/kubescape.exe
- name: Get kubescape.exe from kubescape-windows-latest
run: cp ${{steps.download-artifact.outputs.download-path}}/kubescape-${{ env.WINDOWS_OS }} ${{steps.download-artifact.outputs.download-path}}/kubescape.exe
- name: Set release token
id: set-token
run: |
if [ "${{ secrets.GH_PERSONAL_ACCESS_TOKEN }}" != "" ]; then
echo "token=${{ secrets.GH_PERSONAL_ACCESS_TOKEN }}" >> $GITHUB_OUTPUT;
echo "TOKEN=${{ secrets.GH_PERSONAL_ACCESS_TOKEN }}" >> $GITHUB_ENV;
else
echo "token=${{ secrets.GITHUB_TOKEN }}" >> $GITHUB_OUTPUT;
echo "TOKEN=${{ secrets.GITHUB_TOKEN }}" >> $GITHUB_ENV;
fi
- name: List artifacts
- name: Generate file list
id: generate-file-list
run: |
find . -type f -print
FILES=$(find ${{steps.download-artifact.outputs.download-path}} -type f | sed 's/^/.\//')
echo "FILES=$FILES" >> $GITHUB_ENV
- name: Release
uses: softprops/action-gh-release@975c1b265e11dd76618af1c374e7981f9a6ff44a
uses: softprops/action-gh-release@de2c0eb89ae2a093876385947365aca7b0e5f844 # ratchet:softprops/action-gh-release@v1
with:
token: ${{ steps.set-token.outputs.token }}
token: ${{ env.TOKEN }}
name: ${{ inputs.RELEASE_NAME }}
tag_name: ${{ inputs.TAG }}
body: ${{ github.event.pull_request.body }}
draft: ${{ inputs.DRAFT }}
prerelease: false
fail_on_unmatched_files: true
files: |
./kubescape/kubescape-${{ env.MAC_OS }}
./kubescape/kubescape-${{ env.MAC_OS }}.sbom
./kubescape/kubescape-${{ env.MAC_OS }}.sha256
./kubescape/kubescape-${{ env.MAC_OS }}.tar.gz
./kubescape/kubescape-${{ env.UBUNTU_OS }}
./kubescape/kubescape-${{ env.UBUNTU_OS }}.sbom
./kubescape/kubescape-${{ env.UBUNTU_OS }}.sha256
./kubescape/kubescape-${{ env.UBUNTU_OS }}.tar.gz
./kubescape/kubescape-${{ env.WINDOWS_OS }}.exe
./kubescape/kubescape-${{ env.WINDOWS_OS }}.exe.sbom
./kubescape/kubescape-${{ env.WINDOWS_OS }}.exe.sha256
./kubescape/kubescape-${{ env.WINDOWS_OS }}.tar.gz
./kubescape/kubescape-arm64-${{ env.MAC_OS }}
./kubescape/kubescape-arm64-${{ env.MAC_OS }}.sbom
./kubescape/kubescape-arm64-${{ env.MAC_OS }}.sha256
./kubescape/kubescape-arm64-${{ env.MAC_OS }}.tar.gz
./kubescape/kubescape-arm64-${{ env.UBUNTU_OS }}
./kubescape/kubescape-arm64-${{ env.UBUNTU_OS }}.sbom
./kubescape/kubescape-arm64-${{ env.UBUNTU_OS }}.sha256
./kubescape/kubescape-arm64-${{ env.UBUNTU_OS }}.tar.gz
./kubescape/kubescape-arm64-${{ env.WINDOWS_OS }}.exe
./kubescape/kubescape-arm64-${{ env.WINDOWS_OS }}.exe.sbom
./kubescape/kubescape-arm64-${{ env.WINDOWS_OS }}.exe.sha256
./kubescape/kubescape-arm64-${{ env.WINDOWS_OS }}.tar.gz
./kubescape/kubescape-riscv64-${{ env.UBUNTU_OS }}
./kubescape/kubescape-riscv64-${{ env.UBUNTU_OS }}.sbom
./kubescape/kubescape-riscv64-${{ env.UBUNTU_OS }}.sha256
./kubescape/kubescape-riscv64-${{ env.UBUNTU_OS }}.tar.gz
./kubescape/kubescape.exe
prerelease: false
files: ${{ env.FILES }}

View File

@@ -1,18 +1,5 @@
name: d-publish-image
permissions:
actions: read
checks: read
contents: write
deployments: read
discussions: read
id-token: write
issues: read
packages: read
pages: read
pull-requests: read
repository-projects: read
statuses: read
security-events: read
permissions: read-all
on:
workflow_call:
inputs:
@@ -59,7 +46,7 @@ jobs:
name: Build image and upload to registry
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # ratchet:actions/checkout@v3
with:
submodules: recursive
- name: Set up QEMU
@@ -76,9 +63,9 @@ jobs:
with:
path: .
- name: mv kubescape amd64 binary
run: mv ${{steps.download-artifact.outputs.download-path}}/kubescape/kubescape-ubuntu-latest kubescape-amd64-ubuntu-latest
run: mv ${{steps.download-artifact.outputs.download-path}}/kubescape-ubuntu-latest kubescape-amd64-ubuntu-latest
- name: mv kubescape arm64 binary
run: mv ${{steps.download-artifact.outputs.download-path}}/kubescape/kubescape-arm64-ubuntu-latest kubescape-arm64-ubuntu-latest
run: mv ${{steps.download-artifact.outputs.download-path}}/kubescape-arm64-ubuntu-latest kubescape-arm64-ubuntu-latest
- name: chmod +x
run: chmod +x -v kubescape-a*
- name: Build and push images

View File

@@ -37,7 +37,7 @@ jobs:
persist-credentials: false
- name: "Run analysis"
uses: ossf/scorecard-action@0864cf19026789058feabb7e87baa5f140aac736 # v2.3.1
uses: ossf/scorecard-action@e38b1902ae4f44df626f11ba0734b14fb91f8f86 # v2.1.2
with:
results_file: results.sarif
results_format: sarif

View File

@@ -1,6 +1,6 @@
linters-settings:
govet:
shadow: true
check-shadowing: true
dupl:
threshold: 200
goconst:
@@ -24,6 +24,7 @@ linters:
- gosimple
disable:
# temporarily disabled
- varcheck
- errcheck
- dupl
- gocritic
@@ -35,6 +36,8 @@ linters:
- unparam
#- forbidigo # <- see later
# should remain disabled
- deadcode # deprecated linter
- maligned
- lll
- gochecknoinits
- gochecknoglobals

View File

@@ -12,18 +12,14 @@ before:
- go mod tidy
builds:
- goos:
- id: "kubescape-cli"
goos:
- linux
- windows
- darwin
goarch:
- amd64
- arm64
- riscv64
ldflags:
- -s -w
- -X "github.com/kubescape/kubescape/v3/core/cautils.BuildNumber={{.Env.RELEASE}}"
- -X "github.com/kubescape/kubescape/v3/core/cautils.Client={{.Env.CLIENT}}"
binary: >-
{{ .ProjectName }}-
{{- if eq .Arch "amd64" }}
@@ -34,11 +30,8 @@ builds:
no_unique_dist_dir: true
archives:
- format: binary
id: binaries
name_template: >-
{{ .Binary }}
- format: tar.gz
# this name template makes the OS and Arch compatible with the results of `uname`.
name_template: >-
{{ .Binary }}
@@ -49,12 +42,5 @@ changelog:
- "^docs:"
- "^test:"
checksum:
ids:
- binaries
split: true
sboms:
- artifacts: binary
documents:
- "{{ .Binary }}.sbom"
- artifacts: archive

View File

@@ -1,16 +1,14 @@
# Adopters
# Well-known companies
List of well-known companies who are publicly acknowledge using and/or contributing to Kubescape are (in alphabetical order):
* AWS uses Kubescape in the security training material [link](https://catalog.workshops.aws/containersecurity/en-US/module2)
* Energi Danmark: Publicly talking about how they use Kubescape in their CI/CD pipeline [link](https://www.armosec.io/energi-danmark-business-support/)
* Gitpod: Used Kubescape in their SOC2 compliance process [link](https://www.armosec.io/gitpod/)
* Intel: using Kubescape for security prioritization [video](https://youtu.be/1iCW1KboypY?si=OjmnshWbpFNVPGJT)
* Orange Business: talking about Kubescape/ARMO service they are doing [video](https://www.youtube.com/watch?v=cbJYCUM8578)
* Rabobank: talked at KCD Amsterdam about having Kubescape in their technology stack [video](https://youtu.be/oa_YJmjwepI?si=vSrFW6seMKHj2Lze) [image](/docs/img/kcd-amsterdam-rabo.jpg)
* VMWare/Bitnami: listing Kubescape in their public image/helm repository [link](https://github.com/bitnami/containers/tree/main/bitnami/kubescape)
Well-known companies who are using and/or contributing to Kubescape are (in alphabetical order):
* Accenture
* Amazon.com
* IBM
* Intel
* Meetup
* RedHat
* Scaleway
# Users

View File

@@ -59,7 +59,7 @@ _Did you know you can use Kubescape in all these places?_
## Kubescape-operator Helm-Chart
Besides the CLI, the Kubescape operator can also be installed via a Helm chart. Installing the Helm chart is an excellent way to begin using Kubescape, as it provides extensive features such as continuous scanning, image vulnerability scanning, runtime analysis, network policy generation, and more. You can find the Helm chart in the [Kubescape-operator documentation](https://kubescape.io/docs/install-operator/).
Besides the CLI, the Kubescape operator can also be installed via a Helm chart. Installing the Helm chart is an excellent way to begin using Kubescape, as it provides extensive features such as continuous scanning, image vulnerability scanning, runtime analysis, network policy generation, and more. You can find the Helm chart in the [Kubescape-operator documentation](https://kubescape.io/docs/install-helm-chart/).
## Kubescape GitHub Action
@@ -81,7 +81,7 @@ It retrieves Kubernetes objects from the API server and runs a set of [Rego snip
Kubescape is an open source project, we welcome your feedback and ideas for improvement. We are part of the Kubernetes community and are building more tests and controls as the ecosystem develops.
We hold [community meetings](https://zoom.us/j/95174063585) on Zoom, every second week on Tuesdays, at 15:00 CET. ([See that in your local time zone](https://time.is/compare/1500_in_CET)).
We hold [community meetings](https://zoom.us/j/95174063585) on Zoom, on the first Tuesday of every month, at 14:00 GMT. ([See that in your local time zone](https://time.is/compare/1400_in_GMT)).
The Kubescape project follows the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/master/code-of-conduct.md).

View File

@@ -1,4 +1,4 @@
FROM --platform=$BUILDPLATFORM golang:1.22-bullseye as builder
FROM --platform=$BUILDPLATFORM golang:1.21-bullseye as builder
ENV GO111MODULE=on CGO_ENABLED=0
WORKDIR /work

View File

@@ -47,7 +47,6 @@ The patch command can be run in 2 ways:
| -a, --addr | Address of the buildkitd service | No | unix:///run/buildkit/buildkitd.sock |
| -t, --tag | Tag of the resultant patched image | No | image_name-patched |
| --timeout | Timeout for the patching process | No | 5m |
| --ignore-errors| Ignore errors during patching | No | false |
| -u, --username | Username for the image registry login | No | |
| -p, --password | Password for the image registry login | No | |
| -f, --format | Output file format. | No | |

View File

@@ -7,6 +7,7 @@ import (
"strings"
"time"
ref "github.com/distribution/distribution/reference"
"github.com/docker/distribution/reference"
"github.com/kubescape/go-logger"
@@ -69,7 +70,6 @@ func GetPatchCmd(ks meta.IKubescape) *cobra.Command {
patchCmd.PersistentFlags().StringVarP(&patchInfo.PatchedImageTag, "tag", "t", "", "Tag for the patched image. Defaults to '<image-tag>-patched' ")
patchCmd.PersistentFlags().StringVarP(&patchInfo.BuildkitAddress, "address", "a", "unix:///run/buildkit/buildkitd.sock", "Address of buildkitd service, defaults to local buildkitd.sock")
patchCmd.PersistentFlags().DurationVar(&patchInfo.Timeout, "timeout", 5*time.Minute, "Timeout for the operation, defaults to '5m'")
patchCmd.PersistentFlags().BoolVar(&patchInfo.IgnoreError, "ignore-errors", false, "Ignore errors and continue patching other images. Default to false")
patchCmd.PersistentFlags().StringVarP(&patchInfo.Username, "username", "u", "", "Username for registry login")
patchCmd.PersistentFlags().StringVarP(&patchInfo.Password, "password", "p", "", "Password for registry login")
@@ -97,22 +97,22 @@ func validateImagePatchInfo(patchInfo *metav1.PatchInfo) error {
}
// Parse the image full name to get image name and tag
named, err := reference.ParseNamed(patchInfoImage)
named, err := ref.ParseNamed(patchInfoImage)
if err != nil {
return err
}
// If no tag or digest is provided, default to 'latest'
if reference.IsNameOnly(named) {
if ref.IsNameOnly(named) {
logger.L().Warning("Image name has no tag or digest, using latest as tag")
named = reference.TagNameOnly(named)
named = ref.TagNameOnly(named)
}
patchInfo.Image = named.String()
// If no patched image tag is provided, default to '<image-tag>-patched'
if patchInfo.PatchedImageTag == "" {
taggedName, ok := named.(reference.Tagged)
taggedName, ok := named.(ref.Tagged)
if !ok {
return errors.New("unexpected error while parsing image tag")
}

View File

@@ -4,7 +4,7 @@ import (
"fmt"
"strings"
"github.com/kubescape/go-logger"
logger "github.com/kubescape/go-logger"
"github.com/kubescape/go-logger/helpers"
"github.com/kubescape/k8s-interface/k8sinterface"
"github.com/kubescape/kubescape/v3/cmd/completion"
@@ -16,7 +16,6 @@ import (
"github.com/kubescape/kubescape/v3/cmd/patch"
"github.com/kubescape/kubescape/v3/cmd/scan"
"github.com/kubescape/kubescape/v3/cmd/update"
"github.com/kubescape/kubescape/v3/cmd/vap"
"github.com/kubescape/kubescape/v3/cmd/version"
"github.com/kubescape/kubescape/v3/core/cautils"
"github.com/kubescape/kubescape/v3/core/cautils/getter"
@@ -98,7 +97,6 @@ func getRootCmd(ks meta.IKubescape) *cobra.Command {
rootCmd.AddCommand(update.GetUpdateCmd())
rootCmd.AddCommand(fix.GetFixCmd(ks))
rootCmd.AddCommand(patch.GetPatchCmd(ks))
rootCmd.AddCommand(vap.GetVapHelperCmd())
rootCmd.AddCommand(operator.GetOperatorCmd(ks))
// deprecated commands

View File

@@ -77,7 +77,7 @@ func initEnvironment() {
)
if err != nil {
logger.L().Fatal("failed to get services from server", helpers.Error(err), helpers.String("server", rootInfo.DiscoveryServerURL))
logger.L().Fatal("failed to to get services from server", helpers.Error(err), helpers.String("server", rootInfo.DiscoveryServerURL))
return
}

View File

@@ -94,7 +94,7 @@ func getFrameworkCmd(ks meta.IKubescape, scanInfo *cautils.ScanInfo) *cobra.Comm
}
if len(args) > 1 {
if args[1] != "-" {
if len(args[1:]) == 0 || args[1] != "-" {
scanInfo.InputPatterns = args[1:]
logger.L().Debug("List of input files", helpers.Interface("patterns", scanInfo.InputPatterns))
} else { // store stdin to file - do NOT move to separate function !!
@@ -112,6 +112,7 @@ func getFrameworkCmd(ks meta.IKubescape, scanInfo *cautils.ScanInfo) *cobra.Comm
}
}
scanInfo.SetScanType(cautils.ScanTypeFramework)
scanInfo.FrameworkScan = true
scanInfo.SetPolicyIdentifiers(frameworks, apisv1.KindFramework)

View File

@@ -9,7 +9,6 @@ import (
"fmt"
"strings"
"github.com/kubescape/backend/pkg/versioncheck"
logger "github.com/kubescape/go-logger"
"github.com/kubescape/go-logger/helpers"
"github.com/kubescape/kubescape/v3/core/cautils"
@@ -33,22 +32,22 @@ func GetUpdateCmd() *cobra.Command {
Example: updateCmdExamples,
RunE: func(_ *cobra.Command, args []string) error {
ctx := context.TODO()
v := versioncheck.NewVersionCheckHandler()
versionCheckRequest := versioncheck.NewVersionCheckRequest("", versioncheck.BuildNumber, "", "", "update", nil)
v := cautils.NewVersionCheckHandler()
versionCheckRequest := cautils.NewVersionCheckRequest(cautils.BuildNumber, "", "", "update")
v.CheckLatestVersion(ctx, versionCheckRequest)
//Checking the user's version of kubescape to the latest release
if versioncheck.BuildNumber == "" || strings.Contains(versioncheck.BuildNumber, "rc") {
if cautils.BuildNumber == "" || strings.Contains(cautils.BuildNumber, "rc") {
//your version is unknown
fmt.Printf("Nothing to update: you are running the development version\n")
} else if versioncheck.LatestReleaseVersion == "" {
} else if cautils.LatestReleaseVersion == "" {
//Failed to check for updates
logger.L().Info("Failed to check for updates")
} else if versioncheck.BuildNumber == versioncheck.LatestReleaseVersion {
logger.L().Info(("Failed to check for updates"))
} else if cautils.BuildNumber == cautils.LatestReleaseVersion {
//your version == latest version
logger.L().Info("Nothing to update: you are running the latest version", helpers.String("Version", versioncheck.BuildNumber))
logger.L().Info(("Nothing to update: you are running the latest version"), helpers.String("Version", cautils.BuildNumber))
} else {
fmt.Printf("Version %s is available. Please refer to our installation documentation: %s\n", versioncheck.LatestReleaseVersion, installationLink)
fmt.Printf("Version %s is available. Please refer to our installation documentation: %s\n", cautils.LatestReleaseVersion, installationLink)
}
return nil
},

View File

@@ -1,236 +0,0 @@
package vap
import (
"errors"
"fmt"
"io"
"net/http"
"regexp"
"github.com/kubescape/go-logger"
"github.com/kubescape/kubescape/v3/core/cautils"
"sigs.k8s.io/yaml"
"github.com/spf13/cobra"
admissionv1 "k8s.io/api/admissionregistration/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
var vapHelperCmdExamples = fmt.Sprintf(`
vap command can be used for managing Validating Admission Policies in a Kubernetes cluster.
This is an experimental feature and it might change.
Examples:
# Install Kubescape CEL admission policy library
%[1]s vap deploy-library | kubectl apply -f -
# Create a policy binding
%[1]s vap create-policy-binding --name my-policy-binding --policy c-0016 --namespace=my-namespace | kubectl apply -f -
`, cautils.ExecName())
func GetVapHelperCmd() *cobra.Command {
vapHelperCmd := &cobra.Command{
Use: "vap",
Short: "Helper commands for managing Validating Admission Policies in a Kubernetes cluster",
Long: ``,
Example: vapHelperCmdExamples,
}
// Create subcommands
vapHelperCmd.AddCommand(getDeployLibraryCmd())
vapHelperCmd.AddCommand(getCreatePolicyBindingCmd())
return vapHelperCmd
}
func getDeployLibraryCmd() *cobra.Command {
return &cobra.Command{
Use: "deploy-library",
Short: "Install Kubescape CEL admission policy library",
Long: ``,
RunE: func(cmd *cobra.Command, args []string) error {
return deployLibrary()
},
}
}
func getCreatePolicyBindingCmd() *cobra.Command {
var policyBindingName string
var policyName string
var namespaceArr []string
var labelArr []string
var action string
var parameterReference string
createPolicyBindingCmd := &cobra.Command{
Use: "create-policy-binding",
Short: "Create a policy binding",
Long: ``,
RunE: func(cmd *cobra.Command, args []string) error {
// Validate the inputs
if err := isValidK8sObjectName(policyBindingName); err != nil {
return fmt.Errorf("invalid policy binding name %s: %w", policyBindingName, err)
}
if err := isValidK8sObjectName(policyName); err != nil {
return fmt.Errorf("invalid policy name %s: %w", policyName, err)
}
for _, namespace := range namespaceArr {
if err := isValidK8sObjectName(namespace); err != nil {
return fmt.Errorf("invalid namespace %s: %w", namespace, err)
}
}
for _, label := range labelArr {
// Label selector must be in the format key=value
if !regexp.MustCompile(`^[a-zA-Z0-9]+=[a-zA-Z0-9]+$`).MatchString(label) {
return fmt.Errorf("invalid label selector: %s", label)
}
}
if action != "Deny" && action != "Audit" && action != "Warn" {
return fmt.Errorf("invalid action: %s", action)
}
if parameterReference != "" {
if err := isValidK8sObjectName(parameterReference); err != nil {
return fmt.Errorf("invalid parameter reference %s: %w", parameterReference, err)
}
}
return createPolicyBinding(policyBindingName, policyName, action, parameterReference, namespaceArr, labelArr)
},
}
// Must specify the name of the policy binding
createPolicyBindingCmd.Flags().StringVarP(&policyBindingName, "name", "n", "", "Name of the policy binding")
createPolicyBindingCmd.MarkFlagRequired("name")
createPolicyBindingCmd.Flags().StringVarP(&policyName, "policy", "p", "", "Name of the policy to bind the resources to")
createPolicyBindingCmd.MarkFlagRequired("policy")
createPolicyBindingCmd.Flags().StringSliceVar(&namespaceArr, "namespace", []string{}, "Resource namespace selector")
createPolicyBindingCmd.Flags().StringSliceVar(&labelArr, "label", []string{}, "Resource label selector")
createPolicyBindingCmd.Flags().StringVarP(&action, "action", "a", "Deny", "Action to take when policy fails")
createPolicyBindingCmd.Flags().StringVarP(&parameterReference, "parameter-reference", "r", "", "Parameter reference object name")
return createPolicyBindingCmd
}
// Implementation of the VAP helper commands
// deploy-library
func deployLibrary() error {
logger.L().Info("Downloading the Kubescape CEL admission policy library")
// Download the policy-configuration-definition.yaml from the latest release URL
policyConfigurationDefinitionURL := "https://github.com/kubescape/cel-admission-library/releases/latest/download/policy-configuration-definition.yaml"
policyConfigurationDefinition, err := downloadFileToString(policyConfigurationDefinitionURL)
if err != nil {
return err
}
// Download the basic-control-configuration.yaml from the latest release URL
basicControlConfigurationURL := "https://github.com/kubescape/cel-admission-library/releases/latest/download/basic-control-configuration.yaml"
basicControlConfiguration, err := downloadFileToString(basicControlConfigurationURL)
if err != nil {
return err
}
// Download the kubescape-validating-admission-policies.yaml from the latest release URL
kubescapeValidatingAdmissionPoliciesURL := "https://github.com/kubescape/cel-admission-library/releases/latest/download/kubescape-validating-admission-policies.yaml"
kubescapeValidatingAdmissionPolicies, err := downloadFileToString(kubescapeValidatingAdmissionPoliciesURL)
if err != nil {
return err
}
logger.L().Info("Successfully downloaded admission policy library")
// Print the downloaded files to the STDOUT for the user to apply connecting them to a single YAML with ---
fmt.Println(policyConfigurationDefinition)
fmt.Println("---")
fmt.Println(basicControlConfiguration)
fmt.Println("---")
fmt.Println(kubescapeValidatingAdmissionPolicies)
return nil
}
func downloadFileToString(url string) (string, error) {
// Send an HTTP GET request to the URL
response, err := http.Get(url) //nolint:gosec
if err != nil {
return "", err // Return an empty string and the error if the request fails
}
defer response.Body.Close()
// Check for a successful response (HTTP 200 OK)
if response.StatusCode != http.StatusOK {
return "", fmt.Errorf("failed to download file: %s", response.Status)
}
// Read the response body
bodyBytes, err := io.ReadAll(response.Body)
if err != nil {
return "", err // Return an empty string and the error if reading fails
}
// Convert the byte slice to a string
bodyString := string(bodyBytes)
return bodyString, nil
}
func isValidK8sObjectName(name string) error {
// Kubernetes object names must consist of lower case alphanumeric characters, '-' or '.',
// and must start and end with an alphanumeric character (e.g., 'example.com', regex used for validation is '[a-z0-9]([-a-z0-9]*[a-z0-9])?')
// Max length of 63 characters.
if len(name) > 63 {
return errors.New("name should be less than 63 characters")
}
regex := regexp.MustCompile(`^[a-z0-9]([-a-z0-9]*[a-z0-9])?$`)
if !regex.MatchString(name) {
return errors.New("name should consist of lower case alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character")
}
return nil
}
// Create a policy binding
func createPolicyBinding(bindingName string, policyName string, action string, paramRefName string, namespaceArr []string, labelMatch []string) error {
// Create a policy binding struct
policyBinding := &admissionv1.ValidatingAdmissionPolicyBinding{}
// Print the policy binding after marshalling it to YAML to the STDOUT
// The user can apply the output to the cluster
policyBinding.APIVersion = "admissionregistration.k8s.io/v1"
policyBinding.Name = bindingName
policyBinding.Kind = "ValidatingAdmissionPolicyBinding"
policyBinding.Spec.PolicyName = policyName
policyBinding.Spec.MatchResources = &admissionv1.MatchResources{}
if len(namespaceArr) > 0 {
policyBinding.Spec.MatchResources.NamespaceSelector = &metav1.LabelSelector{
MatchExpressions: []metav1.LabelSelectorRequirement{
{
Key: "name",
Operator: metav1.LabelSelectorOpIn,
Values: namespaceArr,
},
},
}
}
if len(labelMatch) > 0 {
policyBinding.Spec.MatchResources.ObjectSelector = &metav1.LabelSelector{}
policyBinding.Spec.MatchResources.ObjectSelector.MatchLabels = make(map[string]string)
for _, label := range labelMatch {
labelParts := regexp.MustCompile(`=`).Split(label, 2)
policyBinding.Spec.MatchResources.ObjectSelector.MatchLabels[labelParts[0]] = labelParts[1]
}
}
policyBinding.Spec.ValidationActions = []admissionv1.ValidationAction{admissionv1.ValidationAction(action)}
if paramRefName != "" {
policyBinding.Spec.ParamRef = &admissionv1.ParamRef{
Name: paramRefName,
}
}
// Marshal the policy binding to YAML
out, err := yaml.Marshal(policyBinding)
if err != nil {
return err
}
fmt.Println(string(out))
return nil
}

View File

@@ -1,10 +0,0 @@
package vap
import (
"testing"
)
func TestGetVapHelperCmd(t *testing.T) {
// Call the GetFixCmd function
_ = GetVapHelperCmd()
}

View File

@@ -4,7 +4,7 @@ import (
"context"
"fmt"
"github.com/kubescape/backend/pkg/versioncheck"
"github.com/kubescape/kubescape/v3/core/cautils"
"github.com/spf13/cobra"
)
@@ -15,8 +15,8 @@ func GetVersionCmd() *cobra.Command {
Long: ``,
RunE: func(cmd *cobra.Command, args []string) error {
ctx := context.TODO()
v := versioncheck.NewIVersionCheckHandler(ctx)
versionCheckRequest := versioncheck.NewVersionCheckRequest("", versioncheck.BuildNumber, "", "", "version", nil)
v := cautils.NewIVersionCheckHandler(ctx)
versionCheckRequest := cautils.NewVersionCheckRequest(cautils.BuildNumber, "", "", "version")
v.CheckLatestVersion(ctx, versionCheckRequest)
fmt.Fprintf(cmd.OutOrStdout(),
"Your current version is: %s\n",

View File

@@ -5,7 +5,7 @@ import (
"io"
"testing"
"github.com/kubescape/backend/pkg/versioncheck"
"github.com/kubescape/kubescape/v3/core/cautils"
"github.com/stretchr/testify/assert"
)
@@ -28,7 +28,7 @@ func TestGetVersionCmd(t *testing.T) {
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
versioncheck.BuildNumber = tt.buildNumber
cautils.BuildNumber = tt.buildNumber
if cmd := GetVersionCmd(); cmd != nil {
buf := bytes.NewBufferString("")

View File

@@ -1,21 +0,0 @@
package cautils
import (
"os"
"github.com/kubescape/backend/pkg/versioncheck"
)
var BuildNumber string
var Client string
func init() {
if BuildNumber != "" {
versioncheck.BuildNumber = BuildNumber
} else {
versioncheck.BuildNumber = os.Getenv("RELEASE")
}
if Client != "" {
versioncheck.Client = Client
}
}

View File

@@ -58,7 +58,6 @@ type OPASessionObj struct {
OmitRawResources bool // omit raw resources from output
SingleResourceScan workloadinterface.IWorkload // single resource scan
TopWorkloadsByScore []reporthandling.IResource
TemplateMapping map[string]MappingNodes // Map chart obj to template (only for rendering from path)
}
func NewOPASessionObj(ctx context.Context, frameworks []reporthandling.Framework, k8sResources K8SResources, scanInfo *ScanInfo) *OPASessionObj {
@@ -75,7 +74,6 @@ func NewOPASessionObj(ctx context.Context, frameworks []reporthandling.Framework
SessionID: scanInfo.ScanID,
Metadata: scanInfoToScanMetadata(ctx, scanInfo),
OmitRawResources: scanInfo.OmitRawResources,
TemplateMapping: make(map[string]MappingNodes),
}
}

View File

@@ -3,7 +3,6 @@ package cautils
import (
"golang.org/x/mod/semver"
"github.com/kubescape/backend/pkg/versioncheck"
"github.com/kubescape/opa-utils/reporthandling"
"github.com/kubescape/opa-utils/reporthandling/apis"
reporthandlingv2 "github.com/kubescape/opa-utils/reporthandling/v2"
@@ -64,7 +63,7 @@ func (policies *Policies) Set(frameworks []reporthandling.Framework, excludedRul
// 1. Rule is compatible with the current kubescape version
// 2. Rule fits the current scanning scope
func ShouldSkipRule(control reporthandling.Control, rule reporthandling.PolicyRule, scanningScope reporthandling.ScanningScopeType) bool {
if !isRuleKubescapeVersionCompatible(rule.Attributes, versioncheck.BuildNumber) {
if !isRuleKubescapeVersionCompatible(rule.Attributes, BuildNumber) {
return true
}
if !isControlFitToScanScope(control, scanningScope) {

View File

@@ -239,59 +239,3 @@ func TestIsFrameworkFitToScanScope(t *testing.T) {
})
}
}
var rule_v1_0_131 = &reporthandling.PolicyRule{PortalBase: armotypes.PortalBase{
Attributes: map[string]interface{}{"useUntilKubescapeVersion": "v1.0.132"}}}
var rule_v1_0_132 = &reporthandling.PolicyRule{PortalBase: armotypes.PortalBase{
Attributes: map[string]interface{}{"useFromKubescapeVersion": "v1.0.132", "useUntilKubescapeVersion": "v1.0.133"}}}
var rule_v1_0_133 = &reporthandling.PolicyRule{PortalBase: armotypes.PortalBase{
Attributes: map[string]interface{}{"useFromKubescapeVersion": "v1.0.133", "useUntilKubescapeVersion": "v1.0.134"}}}
var rule_v1_0_134 = &reporthandling.PolicyRule{PortalBase: armotypes.PortalBase{
Attributes: map[string]interface{}{"useFromKubescapeVersion": "v1.0.134"}}}
var rule_invalid_from = &reporthandling.PolicyRule{PortalBase: armotypes.PortalBase{
Attributes: map[string]interface{}{"useFromKubescapeVersion": 1.0135, "useUntilKubescapeVersion": "v1.0.135"}}}
var rule_invalid_until = &reporthandling.PolicyRule{PortalBase: armotypes.PortalBase{
Attributes: map[string]interface{}{"useFromKubescapeVersion": "v1.0.135", "useUntilKubescapeVersion": 1.0135}}}
func TestIsRuleKubescapeVersionCompatible(t *testing.T) {
// local build- no build number
// should not crash when the value of useUntilKubescapeVersion is not a string
buildNumberMock := "v1.0.135"
assert.False(t, isRuleKubescapeVersionCompatible(rule_invalid_from.Attributes, buildNumberMock))
assert.False(t, isRuleKubescapeVersionCompatible(rule_invalid_until.Attributes, buildNumberMock))
// should use only rules that don't have "until"
buildNumberMock = ""
assert.False(t, isRuleKubescapeVersionCompatible(rule_v1_0_131.Attributes, buildNumberMock))
assert.False(t, isRuleKubescapeVersionCompatible(rule_v1_0_132.Attributes, buildNumberMock))
assert.False(t, isRuleKubescapeVersionCompatible(rule_v1_0_133.Attributes, buildNumberMock))
assert.True(t, isRuleKubescapeVersionCompatible(rule_v1_0_134.Attributes, buildNumberMock))
// should only use rules that version is in range of use
buildNumberMock = "v1.0.130"
assert.True(t, isRuleKubescapeVersionCompatible(rule_v1_0_131.Attributes, buildNumberMock))
assert.False(t, isRuleKubescapeVersionCompatible(rule_v1_0_132.Attributes, buildNumberMock))
assert.False(t, isRuleKubescapeVersionCompatible(rule_v1_0_133.Attributes, buildNumberMock))
assert.False(t, isRuleKubescapeVersionCompatible(rule_v1_0_134.Attributes, buildNumberMock))
// should only use rules that version is in range of use
buildNumberMock = "v1.0.132"
assert.False(t, isRuleKubescapeVersionCompatible(rule_v1_0_131.Attributes, buildNumberMock))
assert.True(t, isRuleKubescapeVersionCompatible(rule_v1_0_132.Attributes, buildNumberMock))
assert.False(t, isRuleKubescapeVersionCompatible(rule_v1_0_133.Attributes, buildNumberMock))
assert.False(t, isRuleKubescapeVersionCompatible(rule_v1_0_134.Attributes, buildNumberMock))
// should only use rules that version is in range of use
buildNumberMock = "v1.0.133"
assert.False(t, isRuleKubescapeVersionCompatible(rule_v1_0_131.Attributes, buildNumberMock))
assert.False(t, isRuleKubescapeVersionCompatible(rule_v1_0_132.Attributes, buildNumberMock))
assert.True(t, isRuleKubescapeVersionCompatible(rule_v1_0_133.Attributes, buildNumberMock))
assert.False(t, isRuleKubescapeVersionCompatible(rule_v1_0_134.Attributes, buildNumberMock))
// should only use rules that version is in range of use
buildNumberMock = "v1.0.135"
assert.False(t, isRuleKubescapeVersionCompatible(rule_v1_0_131.Attributes, buildNumberMock))
assert.False(t, isRuleKubescapeVersionCompatible(rule_v1_0_132.Attributes, buildNumberMock))
assert.False(t, isRuleKubescapeVersionCompatible(rule_v1_0_133.Attributes, buildNumberMock))
assert.True(t, isRuleKubescapeVersionCompatible(rule_v1_0_134.Attributes, buildNumberMock))
}

View File

@@ -38,7 +38,7 @@ type Chart struct {
}
// LoadResourcesFromHelmCharts scans a given path (recursively) for helm charts, renders the templates and returns a map of workloads and a map of chart names
func LoadResourcesFromHelmCharts(ctx context.Context, basePath string) (map[string][]workloadinterface.IMetadata, map[string]Chart, map[string]MappingNodes) {
func LoadResourcesFromHelmCharts(ctx context.Context, basePath string) (map[string][]workloadinterface.IMetadata, map[string]Chart) {
directories, _ := listDirs(basePath)
helmDirectories := make([]string, 0)
for _, dir := range directories {
@@ -49,18 +49,14 @@ func LoadResourcesFromHelmCharts(ctx context.Context, basePath string) (map[stri
sourceToWorkloads := map[string][]workloadinterface.IMetadata{}
sourceToChart := make(map[string]Chart, 0)
sourceToNodes := map[string]MappingNodes{}
for _, helmDir := range helmDirectories {
chart, err := NewHelmChart(helmDir)
if err == nil {
wls, templateToNodes, errs := chart.GetWorkloadsWithDefaultValues()
wls, errs := chart.GetWorkloadsWithDefaultValues()
if len(errs) > 0 {
logger.L().Ctx(ctx).Warning(fmt.Sprintf("Rendering of Helm chart template '%s', failed: %v", chart.GetName(), errs))
continue
}
for k, v := range templateToNodes {
sourceToNodes[k] = v
}
chartName := chart.GetName()
for k, v := range wls {
@@ -72,7 +68,7 @@ func LoadResourcesFromHelmCharts(ctx context.Context, basePath string) (map[stri
}
}
}
return sourceToWorkloads, sourceToChart, sourceToNodes
return sourceToWorkloads, sourceToChart
}
// If the contents at given path is a Kustomize Directory, LoadResourcesFromKustomizeDirectory will

View File

@@ -45,11 +45,10 @@ func TestLoadResourcesFromFiles(t *testing.T) {
}
func TestLoadResourcesFromHelmCharts(t *testing.T) {
sourceToWorkloads, sourceToChartName, _ := LoadResourcesFromHelmCharts(context.TODO(), helmChartPath())
sourceToWorkloads, sourceToChartName := LoadResourcesFromHelmCharts(context.TODO(), helmChartPath())
assert.Equal(t, 6, len(sourceToWorkloads))
for file, workloads := range sourceToWorkloads {
assert.Equalf(t, 1, len(workloads), "expected 1 workload in file %s", file)
w := workloads[0]

View File

@@ -9,7 +9,7 @@ import (
"github.com/kubescape/opa-utils/reporthandling"
"github.com/kubescape/opa-utils/reporthandling/attacktrack/v1alpha1"
"github.com/kubescape/regolibrary/v2/gitregostore"
"github.com/kubescape/regolibrary/gitregostore"
)
// =======================================================================================================================
@@ -29,7 +29,7 @@ type DownloadReleasedPolicy struct {
func NewDownloadReleasedPolicy() *DownloadReleasedPolicy {
return &DownloadReleasedPolicy{
gs: gitregostore.NewGitRegoStoreV2(-1),
gs: gitregostore.NewDefaultGitRegoStore(-1),
}
}

View File

@@ -2,7 +2,6 @@ package cautils
import (
"path/filepath"
"strconv"
"strings"
logger "github.com/kubescape/go-logger"
@@ -46,32 +45,22 @@ func (hc *HelmChart) GetDefaultValues() map[string]interface{} {
}
// GetWorkloads renders chart template using the default values and returns a map of source file to its workloads
func (hc *HelmChart) GetWorkloadsWithDefaultValues() (map[string][]workloadinterface.IMetadata, map[string]MappingNodes, []error) {
func (hc *HelmChart) GetWorkloadsWithDefaultValues() (map[string][]workloadinterface.IMetadata, []error) {
return hc.GetWorkloads(hc.GetDefaultValues())
}
// GetWorkloads renders chart template using the provided values and returns a map of source (absolute) file path to its workloads
func (hc *HelmChart) GetWorkloads(values map[string]interface{}) (map[string][]workloadinterface.IMetadata, map[string]MappingNodes, []error) {
func (hc *HelmChart) GetWorkloads(values map[string]interface{}) (map[string][]workloadinterface.IMetadata, []error) {
vals, err := helmchartutil.ToRenderValues(hc.chart, values, helmchartutil.ReleaseOptions{}, nil)
if err != nil {
return nil, nil, []error{err}
return nil, []error{err}
}
// change the chart to template with comment, only is template(.yaml added otherwise no)
hc.AddCommentToTemplate()
sourceToFile, err := helmengine.Render(hc.chart, vals)
if err != nil {
return nil, nil, []error{err}
return nil, []error{err}
}
// get the resouse and analysis and store it to the struct
fileMapping := make(map[string]MappingNodes)
GetTemplateMapping(sourceToFile, fileMapping)
// delete the comment from chart and from sourceToFile
RemoveComment(sourceToFile)
workloads := make(map[string][]workloadinterface.IMetadata, 0)
errs := []error{}
@@ -87,13 +76,10 @@ func (hc *HelmChart) GetWorkloads(values map[string]interface{}) (map[string][]w
if len(wls) == 0 {
continue
}
// separate base path and file name. We do not use the os.Separator because the paths returned from the helm engine are not OS specific (e.g. mychart/templates/myfile.yaml)
if firstPathSeparatorIndex := strings.Index(path, string("/")); firstPathSeparatorIndex != -1 {
absPath := filepath.Join(hc.path, path[firstPathSeparatorIndex:])
if nodes, ok := fileMapping[path]; ok {
fileMapping[absPath] = nodes
delete(fileMapping, path)
}
workloads[absPath] = []workloadinterface.IMetadata{}
for i := range wls {
lw := localworkload.NewLocalWorkload(wls[i].GetObject())
@@ -102,46 +88,5 @@ func (hc *HelmChart) GetWorkloads(values map[string]interface{}) (map[string][]w
}
}
}
return workloads, fileMapping, errs
}
func (hc *HelmChart) AddCommentToTemplate() {
for index, t := range hc.chart.Templates {
if IsYaml(strings.ToLower(t.Name)) {
var newLines []string
originalTemplate := string(t.Data)
lines := strings.Split(originalTemplate, "\n")
for index, line := range lines {
comment := " #This is the " + strconv.Itoa(index+1) + " line"
newLines = append(newLines, line+comment)
}
templateWithComment := strings.Join(newLines, "\n")
hc.chart.Templates[index].Data = []byte(templateWithComment)
}
}
}
func RemoveComment(sourceToFile map[string]string) {
// commentRe := regexp.MustCompile(CommentFormat)
for fileName, file := range sourceToFile {
if !IsYaml(strings.ToLower((fileName))) {
continue
}
sourceToFile[fileName] = commentRe.ReplaceAllLiteralString(file, "")
}
}
func GetTemplateMapping(sourceToFile map[string]string, fileMapping map[string]MappingNodes) {
for fileName, fileContent := range sourceToFile {
mappingNodes, err := GetMapping(fileName, fileContent)
if err != nil {
// if one file cannot get mapping nodes, generate error, then ignore it
logger.L().Warning("Failed to get File Mapping nodes", helpers.String("file name", fileName), helpers.Error(err))
continue
}
if len(mappingNodes.Nodes) != 0 {
fileMapping[fileName] = *mappingNodes
}
}
return workloads, errs
}

View File

@@ -83,7 +83,7 @@ func (s *HelmChartTestSuite) TestGetWorkloadsWithOverride() {
// Override default value
values["image"].(map[string]interface{})["pullPolicy"] = "Never"
fileToWorkloads, _, errs := chart.GetWorkloads(values)
fileToWorkloads, errs := chart.GetWorkloads(values)
s.Len(errs, 0)
s.Lenf(fileToWorkloads, len(s.expectedFiles), "Expected %d files", len(s.expectedFiles))
@@ -111,7 +111,7 @@ func (s *HelmChartTestSuite) TestGetWorkloadsMissingValue() {
values := chart.GetDefaultValues()
delete(values, "image")
fileToWorkloads, _, errs := chart.GetWorkloads(values)
fileToWorkloads, errs := chart.GetWorkloads(values)
s.Nil(fileToWorkloads)
s.Len(errs, 1, "Expected an error due to missing value")

View File

@@ -1,34 +0,0 @@
package cautils
type ObjectID struct {
apiVersion string
kind string
}
type MappingNode struct {
ObjectID *ObjectID
Field string
Value string
TemplateFileName string
TemplateLineNumber int
}
type MappingNodes struct {
Nodes []map[string]MappingNode //Map line number of chart to template obj map[int]MappingNode
TemplateFileName string
}
func (node *MappingNode) writeInfoToNode(objectID *ObjectID, path string, lineNumber int, value string, fileName string) {
node.Field = path
node.TemplateLineNumber = lineNumber
node.ObjectID = objectID
node.Value = value
node.TemplateFileName = fileName
}
func NewMappingNodes() *MappingNodes {
mappingNodes := new(MappingNodes)
mappingNodes.TemplateFileName = ""
return mappingNodes
}

View File

@@ -1,7 +1,7 @@
package cautils
import (
"github.com/distribution/reference"
"github.com/docker/distribution/reference"
)
func NormalizeImageName(img string) (string, error) {

View File

@@ -1,266 +0,0 @@
package cautils
import (
"fmt"
"regexp"
"strconv"
"strings"
logger "github.com/kubescape/go-logger"
"github.com/mikefarah/yq/v4/pkg/yqlib"
"gopkg.in/op/go-logging.v1"
)
const (
CommentFormat = `#This is the (?P<line>\d*) line`
)
var apiVersionRe = regexp.MustCompile(`apiVersion: (?P<apiVersion>\S*)`)
var kindRe = regexp.MustCompile(`kind: (?P<kind>\S*)`)
var pathRe = regexp.MustCompile(`path: (?P<path>\S*)`)
var typeRe = regexp.MustCompile(`type: '(?P<type>\S*)'`)
var valueRe = regexp.MustCompile(`value: (?P<value>\[.+\]|\S*)`)
var commentRe = regexp.MustCompile(CommentFormat)
var seqRe = regexp.MustCompile(`.(?P<number>\d+)(?P<point>\.?)`)
var newSeqRe = "[${number}]${point}"
var newFileSeperator = "---"
// change to use go func
func GetMapping(fileName string, fileContent string) (*MappingNodes, error) {
node := new(MappingNode)
objectID := new(ObjectID)
subFileNodes := make(map[string]MappingNode)
mappingNodes := NewMappingNodes()
mappingNodes.TemplateFileName = fileName
lines := strings.Split(fileContent, "\n")
lastNumber := -1
reducedNumber := -1 // uses to make sure line and line in yq is the same
isApiVersionEmpty := true
isKindEmpty := true
var err error
var lineExpression = `..| select(line == %d)| {"destpath": path | join("."),"type": type,"value": .}`
for i, line := range lines {
index := i
if apiVersionRe.MatchString(line) {
isApiVersionEmpty, err = extractApiVersion(line, objectID)
if err != nil {
return nil, fmt.Errorf("extractApiVersion error: err, %s", err.Error())
}
if reducedNumber == -1 {
reducedNumber = index + reducedNumber
}
continue
} else if kindRe.MatchString(line) {
isKindEmpty, err = extractKind(line, objectID)
if err != nil {
return nil, fmt.Errorf("extractKind error: err, %s", err.Error())
}
continue
} else if strings.Contains(line, newFileSeperator) { //At least two files in one yaml
mappingNodes.Nodes = append(mappingNodes.Nodes, subFileNodes)
// Restart a subfileNode
isApiVersionEmpty = false
isKindEmpty = false
subFileNodes = make(map[string]MappingNode)
continue
}
if !isApiVersionEmpty || !isKindEmpty {
// not sure if it can go to the end
index = index - reducedNumber
expression := fmt.Sprintf(lineExpression, index)
output, err := getYamlLineInfo(expression, fileContent)
if err != nil {
return nil, err
}
path := extractParameter(pathRe, output, "$path")
//if path is empty, continue
if path != "" && path != "\"\"" {
if isApiVersionEmpty || isKindEmpty {
return nil, fmt.Errorf("there is no enough objectID info")
}
splits := strings.Split(output, "dest")
if len(splits) < 2 {
return nil, fmt.Errorf("something wrong with the length of the splits, which is %d", len(splits))
} else {
// cut the redundant one
splits = splits[1:]
lastNumber, err = writeNodes(splits, lastNumber, fileName, node, objectID, subFileNodes)
if err != nil {
return nil, fmt.Errorf("writeNodes err: %s", err.Error())
}
}
}
}
if i == len(lines)-1 {
mappingNodes.Nodes = append(mappingNodes.Nodes, subFileNodes)
}
}
return mappingNodes, nil
}
func writeNodes(splits []string, lastNumber int, fileName string, node *MappingNode, objectID *ObjectID, subFileNodes map[string]MappingNode) (int, error) {
for _, split := range splits {
path := extractPath(split)
mapMatched, err := extractMapType(split)
if err != nil {
return -1, fmt.Errorf("extractMapType err: %s", err.Error())
}
if mapMatched {
lastNumber, err = writeNoteToMapping(split, lastNumber, path, fileName, node, objectID, true, subFileNodes)
if err != nil {
return -1, fmt.Errorf("map type: writeNoteToMapping, err: %s", err.Error())
}
} else {
lastNumber, err = writeNoteToMapping(split, lastNumber, path, fileName, node, objectID, false, subFileNodes)
if err != nil {
return -1, fmt.Errorf("not map type: writeNoteToMapping, err: %s", err.Error())
}
}
}
return lastNumber, nil
}
func writeNoteToMapping(split string, lastNumber int, path string, fileName string, node *MappingNode, objectID *ObjectID, isMapType bool, subFileNodes map[string]MappingNode) (int, error) {
newlastNumber, err := writeNodeInfo(split, lastNumber, path, fileName, node, objectID, isMapType)
if err != nil {
return 0, fmt.Errorf("isMapType: %v, writeNodeInfo wrong err: %s", isMapType, err.Error())
}
if _, ok := subFileNodes[path]; !ok { // Assume the path is unique in one subfile
subFileNodes[path] = *node
}
// else {
// return 0, fmt.Errorf("isMapType: %v, %s in mapping.Nodes exists", isMapType, path)
// }
return newlastNumber, nil
}
func writeNodeInfo(split string, lastNumber int, path string, fileName string, node *MappingNode, objectID *ObjectID, isMapType bool) (int, error) {
value, lineNumber, newLastNumber, err := getInfoFromOne(split, lastNumber, isMapType)
if err != nil {
return -1, fmt.Errorf("getInfoFromOne wrong err: %s", err.Error())
}
// lastNumber = newLastNumber
node.writeInfoToNode(objectID, path, lineNumber, value, fileName)
return newLastNumber, nil
}
func getInfoFromOne(output string, lastNumber int, isMapType bool) (value string, lineNumber int, newLastNumber int, err error) {
if isMapType {
value = ""
} else {
value = extractParameter(valueRe, output, "$value")
}
number := extractParameter(commentRe, output, "$line")
if number != "" {
lineNumber, err = strconv.Atoi(number)
if err != nil {
return "", -1, -1, fmt.Errorf("strconv.Atoi err: %s", err.Error())
}
if isMapType {
lineNumber = lineNumber - 1
}
lastNumber = lineNumber
// save to structure
} else {
lineNumber = lastNumber
// use the last one number
}
newLastNumber = lineNumber
return value, lineNumber, newLastNumber, nil
}
func getYamlLineInfo(expression string, yamlFile string) (string, error) {
out, err := exectuateYq(expression, yamlFile)
if err != nil {
return "", fmt.Errorf("exectuate yqlib err: %s", err.Error())
}
return out, nil
}
func exectuateYq(expression string, yamlContent string) (string, error) {
backendLoggerLeveled := logging.AddModuleLevel(logging.NewLogBackend(logger.L().GetWriter(), "", 0))
backendLoggerLeveled.SetLevel(logging.ERROR, "")
yqlib.GetLogger().SetBackend(backendLoggerLeveled)
encoder := configureEncoder()
decoder := configureDecoder(false)
stringEvaluator := yqlib.NewStringEvaluator()
out, err := stringEvaluator.Evaluate(expression, yamlContent, encoder, decoder)
if err != nil {
return "", fmt.Errorf("no matches found")
}
return out, err
}
func extractApiVersion(line string, objectID *ObjectID) (bool, error) {
apiVersion := extractParameter(apiVersionRe, line, "$apiVersion")
if apiVersion == "" {
return true, fmt.Errorf("something wrong when extracting the apiVersion, the line is %s", line)
}
objectID.apiVersion = apiVersion
return false, nil
}
func extractKind(line string, objectID *ObjectID) (bool, error) {
kind := extractParameter(kindRe, line, "$kind")
if kind == "" {
return true, fmt.Errorf("something wrong when extracting the kind, the line is %s", line)
}
objectID.kind = kind
return false, nil
}
func extractPath(split string) string {
path := extractParameter(pathRe, split, "$path")
// For each match of the regex in the content.
path = seqRe.ReplaceAllString(path, newSeqRe)
return path
}
func extractMapType(split string) (bool, error) {
pathType := extractParameter(typeRe, split, "$type")
mapMatched, err := regexp.MatchString(`!!map`, pathType)
if err != nil {
err = fmt.Errorf("regexp.MatchString err: %s", err.Error())
return false, err
}
return mapMatched, nil
}
func extractParameter(re *regexp.Regexp, line string, keyword string) string {
submatch := re.FindStringSubmatchIndex(line)
result := []byte{}
result = re.ExpandString(result, keyword, line, submatch)
parameter := string(result)
return parameter
}
//yqlib configuration
func configureEncoder() yqlib.Encoder {
indent := 2
colorsEnabled := false
yqlibEncoder := yqlib.NewYamlEncoder(indent, colorsEnabled, yqlib.ConfiguredYamlPreferences)
return yqlibEncoder
}
func configureDecoder(evaluateTogether bool) yqlib.Decoder {
prefs := yqlib.ConfiguredYamlPreferences
prefs.EvaluateTogether = evaluateTogether
yqlibDecoder := yqlib.NewYamlDecoder(prefs)
return yqlibDecoder
}

View File

@@ -1,79 +0,0 @@
package cautils
import (
"os"
"path/filepath"
"testing"
"github.com/stretchr/testify/suite"
helmchartutil "helm.sh/helm/v3/pkg/chartutil"
helmengine "helm.sh/helm/v3/pkg/engine"
)
type HelmChartGetMappingSuite struct {
suite.Suite
helmChartPath string
expectedFiles []string
fileContent map[string]string
}
func TestHelmChartGetMappingSuite(t *testing.T) {
suite.Run(t, new(HelmChartGetMappingSuite))
}
func (s *HelmChartGetMappingSuite) SetupSuite() {
o, _ := os.Getwd()
s.helmChartPath = filepath.Join(filepath.Dir(o), "..", "examples", "helm_chart_mapping_node")
s.expectedFiles = []string{
filepath.Join(s.helmChartPath, "templates", "clusterrolebinding.yaml"),
filepath.Join(s.helmChartPath, "templates", "clusterrole.yaml"),
filepath.Join(s.helmChartPath, "templates", "serviceaccount.yaml"),
filepath.Join(s.helmChartPath, "templates", "rolebinding.yaml"),
filepath.Join(s.helmChartPath, "templates", "role.yaml"),
filepath.Join(s.helmChartPath, "templates", "cronjob.yaml"),
}
s.fileContent = make(map[string]string)
hc, _ := NewHelmChart(s.helmChartPath)
values := hc.GetDefaultValues()
vals, _ := helmchartutil.ToRenderValues(hc.chart, values, helmchartutil.ReleaseOptions{}, nil)
sourceToFile, _ := helmengine.Render(hc.chart, vals)
s.fileContent = sourceToFile
}
func (s *HelmChartGetMappingSuite) TestGetMapping() {
fileNodes, err := GetMapping("rolebinding.yaml", s.fileContent["kubescape/templates/rolebinding.yaml"])
s.NoError(err, "Get Mapping nodes correctly")
s.Equal(fileNodes.TemplateFileName, "rolebinding.yaml")
s.Len(fileNodes.Nodes, 1)
s.Len(fileNodes.Nodes[0], 13)
}
func (s *HelmChartGetMappingSuite) TestGetMappingFromFileContainsMultipleSubFiles() {
fileNodes, err := GetMapping("serviceaccount.yaml", s.fileContent["kubescape/templates/serviceaccount.yaml"])
s.NoError(err, "Get Mapping nodes correctly")
s.Equal(fileNodes.TemplateFileName, "serviceaccount.yaml")
s.Len(fileNodes.Nodes, 2)
s.Len(fileNodes.Nodes[0], 8)
s.Len(fileNodes.Nodes[1], 2)
}
func (s *HelmChartGetMappingSuite) TestGetMappingFromFileCWithoutKindOrApiVersion() {
fileNodes, err := GetMapping("clusterrole.yaml", s.fileContent["kubescape/templates/clusterrole.yaml"])
s.Contains(err.Error(), "there is no enough objectID info")
s.Nil(fileNodes)
}
func (s *HelmChartGetMappingSuite) TestGetMappingFromFileCWithoutApiVersion() {
fileNodes, err := GetMapping("clusterrolebinding.yaml", s.fileContent["kubescape/templates/clusterrolebinding.yaml"])
s.Contains(err.Error(), "there is no enough objectID info")
s.Nil(fileNodes)
}

View File

@@ -85,7 +85,7 @@ func (rbacObjects *RBACObjects) rbacObjectsToResources(resources *rbacutils.Rbac
if err != nil {
return nil, err
}
crmap["apiVersion"] = "rbac.authorization.k8s.io/v1" // TODO - is the correct apiVersion?
crmap["apiVersion"] = "rbac.authorization.k8s.io/v1" // TODO - is the the correct apiVersion?
crIMeta := workloadinterface.NewWorkloadObj(crmap)
crIMeta.SetKind("ClusterRole")
allresources[crIMeta.GetID()] = crIMeta
@@ -95,7 +95,7 @@ func (rbacObjects *RBACObjects) rbacObjectsToResources(resources *rbacutils.Rbac
if err != nil {
return nil, err
}
crmap["apiVersion"] = "rbac.authorization.k8s.io/v1" // TODO - is the correct apiVersion?
crmap["apiVersion"] = "rbac.authorization.k8s.io/v1" // TODO - is the the correct apiVersion?
crIMeta := workloadinterface.NewWorkloadObj(crmap)
crIMeta.SetKind("Role")
allresources[crIMeta.GetID()] = crIMeta
@@ -105,7 +105,7 @@ func (rbacObjects *RBACObjects) rbacObjectsToResources(resources *rbacutils.Rbac
if err != nil {
return nil, err
}
crmap["apiVersion"] = "rbac.authorization.k8s.io/v1" // TODO - is the correct apiVersion?
crmap["apiVersion"] = "rbac.authorization.k8s.io/v1" // TODO - is the the correct apiVersion?
crIMeta := workloadinterface.NewWorkloadObj(crmap)
crIMeta.SetKind("ClusterRoleBinding")
allresources[crIMeta.GetID()] = crIMeta
@@ -115,7 +115,7 @@ func (rbacObjects *RBACObjects) rbacObjectsToResources(resources *rbacutils.Rbac
if err != nil {
return nil, err
}
crmap["apiVersion"] = "rbac.authorization.k8s.io/v1" // TODO - is the correct apiVersion?
crmap["apiVersion"] = "rbac.authorization.k8s.io/v1" // TODO - is the the correct apiVersion?
crIMeta := workloadinterface.NewWorkloadObj(crmap)
crIMeta.SetKind("RoleBinding")
allresources[crIMeta.GetID()] = crIMeta

View File

@@ -8,7 +8,6 @@ import (
"path/filepath"
"strings"
"github.com/kubescape/backend/pkg/versioncheck"
giturl "github.com/kubescape/go-git-url"
"github.com/kubescape/go-logger"
"github.com/kubescape/go-logger/helpers"
@@ -25,17 +24,20 @@ import (
type ScanningContext string
const (
ContextCluster ScanningContext = "cluster"
ContextFile ScanningContext = "single-file"
ContextDir ScanningContext = "local-dir"
ContextGitLocal ScanningContext = "git-local"
ContextGitRemote ScanningContext = "git-remote"
ContextCluster ScanningContext = "cluster"
ContextFile ScanningContext = "single-file"
ContextDir ScanningContext = "local-dir"
ContextGitURL ScanningContext = "git-url"
ContextGitLocal ScanningContext = "git-local"
)
const ( // deprecated
ScopeCluster = "cluster"
ScopeYAML = "yaml"
)
const (
// ScanCluster string = "cluster"
// ScanLocalFiles string = "yaml"
localControlInputsFilename string = "controls-inputs.json"
LocalExceptionsFilename string = "exceptions.json"
LocalAttackTracksFilename string = "attack-tracks.json"
@@ -108,8 +110,8 @@ type ScanInfo struct {
UseFrom []string // Load framework from local file (instead of download). Use when running offline
UseDefault bool // Load framework from cached file (instead of download). Use when running offline
UseArtifactsFrom string // Load artifacts from local path. Use when running offline
VerboseMode bool // Display all the input resources and not only failed resources
View string //
VerboseMode bool // Display all of the input resources and not only failed resources
View string // Display all of the input resources and not only failed resources
Format string // Format results (table, json, junit ...)
Output string // Store results in an output file, Output file name
FormatVersion string // Output object can be different between versions, this is for testing and backward compatibility
@@ -138,8 +140,6 @@ type ScanInfo struct {
ScanImages bool
ChartPath string
FilePath string
scanningContext *ScanningContext
cleanups []func()
}
type Getters struct {
@@ -155,12 +155,7 @@ func (scanInfo *ScanInfo) Init(ctx context.Context) {
if scanInfo.ScanID == "" {
scanInfo.ScanID = uuid.NewString()
}
}
func (scanInfo *ScanInfo) Cleanup() {
for _, cleanup := range scanInfo.cleanups {
cleanup()
}
}
func (scanInfo *ScanInfo) setUseArtifactsFrom(ctx context.Context) {
@@ -264,7 +259,7 @@ func scanInfoToScanMetadata(ctx context.Context, scanInfo *ScanInfo) *reporthand
metadata.ScanMetadata.TargetNames = append(metadata.ScanMetadata.TargetNames, policy.Identifier)
}
metadata.ScanMetadata.KubescapeVersion = versioncheck.BuildNumber
metadata.ScanMetadata.KubescapeVersion = BuildNumber
metadata.ScanMetadata.VerboseMode = scanInfo.VerboseMode
metadata.ScanMetadata.FailThreshold = scanInfo.FailThreshold
metadata.ScanMetadata.ComplianceThreshold = scanInfo.ComplianceThreshold
@@ -272,63 +267,51 @@ func scanInfoToScanMetadata(ctx context.Context, scanInfo *ScanInfo) *reporthand
metadata.ScanMetadata.VerboseMode = scanInfo.VerboseMode
metadata.ScanMetadata.ControlsInputs = scanInfo.ControlsInputs
switch scanInfo.GetScanningContext() {
inputFiles := ""
if len(scanInfo.InputPatterns) > 0 {
inputFiles = scanInfo.InputPatterns[0]
}
switch GetScanningContext(inputFiles) {
case ContextCluster:
// cluster
metadata.ScanMetadata.ScanningTarget = reporthandlingv2.Cluster
case ContextFile:
// local file
metadata.ScanMetadata.ScanningTarget = reporthandlingv2.File
case ContextGitURL:
// url
metadata.ScanMetadata.ScanningTarget = reporthandlingv2.Repo
case ContextGitLocal:
// local-git
metadata.ScanMetadata.ScanningTarget = reporthandlingv2.GitLocal
case ContextGitRemote:
// remote
metadata.ScanMetadata.ScanningTarget = reporthandlingv2.Repo
case ContextDir:
// directory
metadata.ScanMetadata.ScanningTarget = reporthandlingv2.Directory
}
scanInfo.setContextMetadata(ctx, &metadata.ContextMetadata)
setContextMetadata(ctx, &metadata.ContextMetadata, inputFiles)
return metadata
}
func (scanInfo *ScanInfo) GetInputFiles() string {
if len(scanInfo.InputPatterns) > 0 {
return scanInfo.InputPatterns[0]
}
return ""
}
func (scanInfo *ScanInfo) GetScanningContext() ScanningContext {
if scanInfo.scanningContext == nil {
scanningContext := scanInfo.getScanningContext(scanInfo.GetInputFiles())
scanInfo.scanningContext = &scanningContext
if len(scanInfo.InputPatterns) > 0 {
return GetScanningContext(scanInfo.InputPatterns[0])
}
return *scanInfo.scanningContext
return GetScanningContext("")
}
// getScanningContext get scanning context from the input param
// this function should be called only once. Call GetScanningContext() to get the scanning context
func (scanInfo *ScanInfo) getScanningContext(input string) ScanningContext {
// GetScanningContext get scanning context from the input param
func GetScanningContext(input string) ScanningContext {
// cluster
if input == "" {
return ContextCluster
}
// git url
// url
if _, err := giturl.NewGitURL(input); err == nil {
if repo, err := CloneGitRepo(&input); err == nil {
if _, err := NewLocalGitRepository(repo); err == nil {
scanInfo.cleanups = append(scanInfo.cleanups, func() {
_ = os.RemoveAll(repo)
})
return ContextGitRemote
}
}
return ContextGitURL
}
if !filepath.IsAbs(input) { // parse path
@@ -350,14 +333,19 @@ func (scanInfo *ScanInfo) getScanningContext(input string) ScanningContext {
// dir/glob
return ContextDir
}
func (scanInfo *ScanInfo) setContextMetadata(ctx context.Context, contextMetadata *reporthandlingv2.ContextMetadata) {
input := scanInfo.GetInputFiles()
switch scanInfo.GetScanningContext() {
func setContextMetadata(ctx context.Context, contextMetadata *reporthandlingv2.ContextMetadata, input string) {
switch GetScanningContext(input) {
case ContextCluster:
contextMetadata.ClusterContextMetadata = &reporthandlingv2.ClusterMetadata{
ContextName: k8sinterface.GetContextName(),
}
case ContextGitURL:
// url
context, err := metadataGitURL(input)
if err != nil {
logger.L().Ctx(ctx).Warning("in setContextMetadata", helpers.Interface("case", ContextGitURL), helpers.Error(err))
}
contextMetadata.RepoContextMetadata = context
case ContextDir:
contextMetadata.DirectoryContextMetadata = &reporthandlingv2.DirectoryContextMetadata{
BasePath: getAbsPath(input),
@@ -389,21 +377,43 @@ func (scanInfo *ScanInfo) setContextMetadata(ctx context.Context, contextMetadat
}
case ContextGitLocal:
// local
repoContext, err := metadataGitLocal(input)
context, err := metadataGitLocal(input)
if err != nil {
logger.L().Ctx(ctx).Warning("in setContextMetadata", helpers.Interface("case", ContextGitLocal), helpers.Error(err))
logger.L().Ctx(ctx).Warning("in setContextMetadata", helpers.Interface("case", ContextGitURL), helpers.Error(err))
}
contextMetadata.RepoContextMetadata = repoContext
case ContextGitRemote:
// remote
repoContext, err := metadataGitLocal(GetClonedPath(input))
if err != nil {
logger.L().Ctx(ctx).Warning("in setContextMetadata", helpers.Interface("case", ContextGitRemote), helpers.Error(err))
}
contextMetadata.RepoContextMetadata = repoContext
contextMetadata.RepoContextMetadata = context
}
}
func metadataGitURL(input string) (*reporthandlingv2.RepoContextMetadata, error) {
context := &reporthandlingv2.RepoContextMetadata{}
gitParser, err := giturl.NewGitAPI(input)
if err != nil {
return context, fmt.Errorf("%w", err)
}
if gitParser.GetBranchName() == "" {
gitParser.SetDefaultBranchName()
}
context.Provider = gitParser.GetProvider()
context.Repo = gitParser.GetRepoName()
context.Owner = gitParser.GetOwnerName()
context.Branch = gitParser.GetBranchName()
context.RemoteURL = gitParser.GetURL().String()
commit, err := gitParser.GetLatestCommit()
if err != nil {
return context, fmt.Errorf("%w", err)
}
context.LastCommit = reporthandling.LastCommit{
Hash: commit.SHA,
Date: commit.Committer.Date,
CommitterName: commit.Committer.Name,
}
return context, nil
}
func metadataGitLocal(input string) (*reporthandlingv2.RepoContextMetadata, error) {
gitParser, err := NewLocalGitRepository(input)
if err != nil {
@@ -413,31 +423,31 @@ func metadataGitLocal(input string) (*reporthandlingv2.RepoContextMetadata, erro
if err != nil {
return nil, fmt.Errorf("%w", err)
}
repoContext := &reporthandlingv2.RepoContextMetadata{}
context := &reporthandlingv2.RepoContextMetadata{}
gitParserURL, err := giturl.NewGitURL(remoteURL)
if err != nil {
return repoContext, fmt.Errorf("%w", err)
return context, fmt.Errorf("%w", err)
}
gitParserURL.SetBranchName(gitParser.GetBranchName())
repoContext.Provider = gitParserURL.GetProvider()
repoContext.Repo = gitParserURL.GetRepoName()
repoContext.Owner = gitParserURL.GetOwnerName()
repoContext.Branch = gitParserURL.GetBranchName()
repoContext.RemoteURL = gitParserURL.GetURL().String()
context.Provider = gitParserURL.GetProvider()
context.Repo = gitParserURL.GetRepoName()
context.Owner = gitParserURL.GetOwnerName()
context.Branch = gitParserURL.GetBranchName()
context.RemoteURL = gitParserURL.GetURL().String()
commit, err := gitParser.GetLastCommit()
if err != nil {
return repoContext, fmt.Errorf("%w", err)
return context, fmt.Errorf("%w", err)
}
repoContext.LastCommit = reporthandling.LastCommit{
context.LastCommit = reporthandling.LastCommit{
Hash: commit.SHA,
Date: commit.Committer.Date,
CommitterName: commit.Committer.Name,
}
repoContext.LocalRootPath, _ = gitParser.GetRootDir()
context.LocalRootPath, _ = gitParser.GetRootDir()
return repoContext, nil
return context, nil
}
func getHostname() string {
if h, e := os.Hostname(); e == nil {
@@ -454,3 +464,11 @@ func getAbsPath(p string) string {
}
return p
}
// ScanningContextToScanningScope convert the context to the deprecated scope
func ScanningContextToScanningScope(scanningContext ScanningContext) string {
if scanningContext == ContextCluster {
return ScopeCluster
}
return ScopeYAML
}

View File

@@ -3,19 +3,17 @@ package cautils
import (
"context"
"os"
"path/filepath"
"testing"
"github.com/go-git/go-git/v5"
reporthandlingv2 "github.com/kubescape/opa-utils/reporthandling/v2"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestSetContextMetadata(t *testing.T) {
{
ctx := reporthandlingv2.ContextMetadata{}
scanInfo := &ScanInfo{}
scanInfo.setContextMetadata(context.TODO(), &ctx)
setContextMetadata(context.TODO(), &ctx, "")
assert.NotNil(t, ctx.ClusterContextMetadata)
assert.Nil(t, ctx.DirectoryContextMetadata)
@@ -44,57 +42,13 @@ func TestGetHostname(t *testing.T) {
}
func TestGetScanningContext(t *testing.T) {
repoRoot, err := os.MkdirTemp("", "repo")
require.NoError(t, err)
defer func(name string) {
_ = os.Remove(name)
}(repoRoot)
_, err = git.PlainClone(repoRoot, false, &git.CloneOptions{
URL: "https://github.com/kubescape/http-request",
})
require.NoError(t, err)
tmpFile, err := os.CreateTemp("", "single.*.txt")
require.NoError(t, err)
defer func(name string) {
_ = os.Remove(name)
}(tmpFile.Name())
tests := []struct {
name string
input string
want ScanningContext
}{
{
name: "empty input",
input: "",
want: ContextCluster,
},
{
name: "git URL input",
input: "https://github.com/kubescape/http-request",
want: ContextGitRemote,
},
{
name: "local git input",
input: repoRoot,
want: ContextGitLocal,
},
{
name: "single file input",
input: tmpFile.Name(),
want: ContextFile,
},
{
name: "directory input",
input: os.TempDir(),
want: ContextDir,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
scanInfo := &ScanInfo{}
assert.Equalf(t, tt.want, scanInfo.getScanningContext(tt.input), "GetScanningContext(%v)", tt.input)
})
}
// Test with empty input
assert.Equal(t, ContextCluster, GetScanningContext(""))
// Test with Git URL input
assert.Equal(t, ContextGitURL, GetScanningContext("https://github.com/kubescape/kubescape"))
// TODO: Add more tests with other input types
}
func TestScanInfoFormats(t *testing.T) {
@@ -123,3 +77,30 @@ func TestScanInfoFormats(t *testing.T) {
})
}
}
func TestGetScanningContextWithFile(t *testing.T) {
// Test with a file
dir, err := os.MkdirTemp("", "example")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(dir)
filePath := filepath.Join(dir, "file.txt")
if _, err := os.Create(filePath); err != nil {
t.Fatal(err)
}
assert.Equal(t, ContextFile, GetScanningContext(filePath))
}
func TestGetScanningContextWithDir(t *testing.T) {
// Test with a directory
dir, err := os.MkdirTemp("", "example")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(dir)
assert.Equal(t, ContextDir, GetScanningContext(dir))
}

View File

@@ -0,0 +1,186 @@
package cautils
import (
"context"
"encoding/json"
"fmt"
"net/http"
"os"
"strings"
"github.com/armosec/utils-go/boolutils"
utils "github.com/kubescape/backend/pkg/utils"
logger "github.com/kubescape/go-logger"
"github.com/kubescape/go-logger/helpers"
"github.com/kubescape/kubescape/v3/core/cautils/getter"
"github.com/mattn/go-isatty"
"go.opentelemetry.io/otel"
"golang.org/x/mod/semver"
)
const SKIP_VERSION_CHECK_DEPRECATED_ENV = "KUBESCAPE_SKIP_UPDATE_CHECK"
const SKIP_VERSION_CHECK_ENV = "KS_SKIP_UPDATE_CHECK"
const CLIENT_ENV = "KS_CLIENT"
var BuildNumber string
var Client string
var LatestReleaseVersion string
const UnknownBuildNumber = "unknown"
type IVersionCheckHandler interface {
CheckLatestVersion(context.Context, *VersionCheckRequest) error
}
func NewIVersionCheckHandler(ctx context.Context) IVersionCheckHandler {
if BuildNumber == "" {
logger.L().Ctx(ctx).Warning("Unknown build number: this might affect your scan results. Please ensure that you are running the latest version.")
}
if v, ok := os.LookupEnv(CLIENT_ENV); ok && v != "" {
Client = v
}
if v, ok := os.LookupEnv(SKIP_VERSION_CHECK_ENV); ok && boolutils.StringToBool(v) {
return NewVersionCheckHandlerMock()
} else if v, ok := os.LookupEnv(SKIP_VERSION_CHECK_DEPRECATED_ENV); ok && boolutils.StringToBool(v) {
return NewVersionCheckHandlerMock()
}
return NewVersionCheckHandler()
}
type VersionCheckHandlerMock struct {
}
func NewVersionCheckHandlerMock() *VersionCheckHandlerMock {
return &VersionCheckHandlerMock{}
}
type VersionCheckHandler struct {
versionURL string
}
type VersionCheckRequest struct {
Client string `json:"client"` // kubescape
ClientBuild string `json:"clientBuild"` // client build environment
ClientVersion string `json:"clientVersion"` // kubescape version
Framework string `json:"framework"` // framework name
FrameworkVersion string `json:"frameworkVersion"` // framework version
ScanningTarget string `json:"target"` // Deprecated
ScanningContext string `json:"context"` // scanning context- cluster/file/gitURL/localGit/dir
TriggeredBy string `json:"triggeredBy"` // triggered by - cli/ ci / microservice
}
type VersionCheckResponse struct {
Client string `json:"client"` // kubescape
ClientUpdate string `json:"clientUpdate"` // kubescape latest version
Framework string `json:"framework"` // framework name
FrameworkUpdate string `json:"frameworkUpdate"` // framework latest version
Message string `json:"message"` // alert message
}
func NewVersionCheckHandler() *VersionCheckHandler {
return &VersionCheckHandler{
versionURL: "https://us-central1-elated-pottery-310110.cloudfunctions.net/ksgf1v1",
}
}
func getTriggerSource() string {
if strings.Contains(os.Args[0], "ksserver") {
return "microservice"
}
if !isatty.IsTerminal(os.Stdin.Fd()) && !isatty.IsCygwinTerminal(os.Stdin.Fd()) {
// non-interactive shell
return "pipeline"
}
if os.Getenv("GITHUB_ACTIONS") == "true" {
return "pipeline"
}
return "cli"
}
func NewVersionCheckRequest(buildNumber, frameworkName, frameworkVersion, scanningTarget string) *VersionCheckRequest {
if buildNumber == "" {
buildNumber = UnknownBuildNumber
}
if scanningTarget == "" {
scanningTarget = "unknown"
}
if Client == "" {
Client = "local-build"
}
return &VersionCheckRequest{
Client: "kubescape",
ClientBuild: Client,
ClientVersion: buildNumber,
Framework: frameworkName,
FrameworkVersion: frameworkVersion,
ScanningTarget: scanningTarget,
TriggeredBy: getTriggerSource(),
}
}
func (v *VersionCheckHandlerMock) CheckLatestVersion(_ context.Context, _ *VersionCheckRequest) error {
logger.L().Info("Skipping version check")
return nil
}
func (v *VersionCheckHandler) CheckLatestVersion(ctx context.Context, versionData *VersionCheckRequest) error {
ctx, span := otel.Tracer("").Start(ctx, "versionCheckHandler.CheckLatestVersion")
defer span.End()
defer func() {
if err := recover(); err != nil {
logger.L().Ctx(ctx).Warning("failed to get latest version", helpers.Interface("error", err))
}
}()
latestVersion, err := v.getLatestVersion(versionData)
if err != nil || latestVersion == nil {
return fmt.Errorf("failed to get latest version")
}
LatestReleaseVersion = latestVersion.ClientUpdate
if latestVersion.ClientUpdate != "" {
if BuildNumber != "" && semver.Compare(BuildNumber, LatestReleaseVersion) == -1 {
logger.L().Ctx(ctx).Warning(warningMessage(LatestReleaseVersion))
}
}
// TODO - Enable after supporting framework version
// if latestVersion.FrameworkUpdate != "" {
// fmt.Println(warningMessage(latestVersion.Framework, latestVersion.FrameworkUpdate))
// }
if latestVersion.Message != "" {
logger.L().Info(latestVersion.Message)
}
return nil
}
func (v *VersionCheckHandler) getLatestVersion(versionData *VersionCheckRequest) (*VersionCheckResponse, error) {
reqBody, err := json.Marshal(*versionData)
if err != nil {
return nil, fmt.Errorf("in 'CheckLatestVersion' failed to json.Marshal, reason: %s", err.Error())
}
rdr, _, err := getter.HTTPPost(http.DefaultClient, v.versionURL, reqBody, map[string]string{"Content-Type": "application/json"})
vResp, err := utils.Decode[*VersionCheckResponse](rdr)
if err != nil {
return nil, err
}
return vResp, nil
}
func warningMessage(release string) string {
return fmt.Sprintf("current version '%s' is not updated to the latest release: '%s'", BuildNumber, release)
}

View File

@@ -0,0 +1,193 @@
package cautils
import (
"context"
"fmt"
"os"
"reflect"
"testing"
"github.com/armosec/armoapi-go/armotypes"
"github.com/kubescape/opa-utils/reporthandling"
"github.com/stretchr/testify/assert"
"golang.org/x/mod/semver"
)
func TestGetKubernetesObjects(t *testing.T) {
}
var rule_v1_0_131 = &reporthandling.PolicyRule{PortalBase: armotypes.PortalBase{
Attributes: map[string]interface{}{"useUntilKubescapeVersion": "v1.0.132"}}}
var rule_v1_0_132 = &reporthandling.PolicyRule{PortalBase: armotypes.PortalBase{
Attributes: map[string]interface{}{"useFromKubescapeVersion": "v1.0.132", "useUntilKubescapeVersion": "v1.0.133"}}}
var rule_v1_0_133 = &reporthandling.PolicyRule{PortalBase: armotypes.PortalBase{
Attributes: map[string]interface{}{"useFromKubescapeVersion": "v1.0.133", "useUntilKubescapeVersion": "v1.0.134"}}}
var rule_v1_0_134 = &reporthandling.PolicyRule{PortalBase: armotypes.PortalBase{
Attributes: map[string]interface{}{"useFromKubescapeVersion": "v1.0.134"}}}
var rule_invalid_from = &reporthandling.PolicyRule{PortalBase: armotypes.PortalBase{
Attributes: map[string]interface{}{"useFromKubescapeVersion": 1.0135, "useUntilKubescapeVersion": "v1.0.135"}}}
var rule_invalid_until = &reporthandling.PolicyRule{PortalBase: armotypes.PortalBase{
Attributes: map[string]interface{}{"useFromKubescapeVersion": "v1.0.135", "useUntilKubescapeVersion": 1.0135}}}
func TestIsRuleKubescapeVersionCompatible(t *testing.T) {
// local build- no build number
// should not crash when the value of useUntilKubescapeVersion is not a string
buildNumberMock := "v1.0.135"
assert.False(t, isRuleKubescapeVersionCompatible(rule_invalid_from.Attributes, buildNumberMock))
assert.False(t, isRuleKubescapeVersionCompatible(rule_invalid_until.Attributes, buildNumberMock))
// should use only rules that don't have "until"
buildNumberMock = ""
assert.False(t, isRuleKubescapeVersionCompatible(rule_v1_0_131.Attributes, buildNumberMock))
assert.False(t, isRuleKubescapeVersionCompatible(rule_v1_0_132.Attributes, buildNumberMock))
assert.False(t, isRuleKubescapeVersionCompatible(rule_v1_0_133.Attributes, buildNumberMock))
assert.True(t, isRuleKubescapeVersionCompatible(rule_v1_0_134.Attributes, buildNumberMock))
// should only use rules that version is in range of use
buildNumberMock = "v1.0.130"
assert.True(t, isRuleKubescapeVersionCompatible(rule_v1_0_131.Attributes, buildNumberMock))
assert.False(t, isRuleKubescapeVersionCompatible(rule_v1_0_132.Attributes, buildNumberMock))
assert.False(t, isRuleKubescapeVersionCompatible(rule_v1_0_133.Attributes, buildNumberMock))
assert.False(t, isRuleKubescapeVersionCompatible(rule_v1_0_134.Attributes, buildNumberMock))
// should only use rules that version is in range of use
buildNumberMock = "v1.0.132"
assert.False(t, isRuleKubescapeVersionCompatible(rule_v1_0_131.Attributes, buildNumberMock))
assert.True(t, isRuleKubescapeVersionCompatible(rule_v1_0_132.Attributes, buildNumberMock))
assert.False(t, isRuleKubescapeVersionCompatible(rule_v1_0_133.Attributes, buildNumberMock))
assert.False(t, isRuleKubescapeVersionCompatible(rule_v1_0_134.Attributes, buildNumberMock))
// should only use rules that version is in range of use
buildNumberMock = "v1.0.133"
assert.False(t, isRuleKubescapeVersionCompatible(rule_v1_0_131.Attributes, buildNumberMock))
assert.False(t, isRuleKubescapeVersionCompatible(rule_v1_0_132.Attributes, buildNumberMock))
assert.True(t, isRuleKubescapeVersionCompatible(rule_v1_0_133.Attributes, buildNumberMock))
assert.False(t, isRuleKubescapeVersionCompatible(rule_v1_0_134.Attributes, buildNumberMock))
// should only use rules that version is in range of use
buildNumberMock = "v1.0.135"
assert.False(t, isRuleKubescapeVersionCompatible(rule_v1_0_131.Attributes, buildNumberMock))
assert.False(t, isRuleKubescapeVersionCompatible(rule_v1_0_132.Attributes, buildNumberMock))
assert.False(t, isRuleKubescapeVersionCompatible(rule_v1_0_133.Attributes, buildNumberMock))
assert.True(t, isRuleKubescapeVersionCompatible(rule_v1_0_134.Attributes, buildNumberMock))
}
func TestCheckLatestVersion_Semver_Compare(t *testing.T) {
assert.Equal(t, -1, semver.Compare("v2.0.150", "v2.0.151"))
assert.Equal(t, 0, semver.Compare("v2.0.150", "v2.0.150"))
assert.Equal(t, 1, semver.Compare("v2.0.150", "v2.0.149"))
assert.Equal(t, -1, semver.Compare("v2.0.150", "v3.0.150"))
}
func TestCheckLatestVersion(t *testing.T) {
type args struct {
ctx context.Context
versionData *VersionCheckRequest
versionURL string
}
tests := []struct {
name string
args args
err error
}{
{
name: "Get latest version",
args: args{
ctx: context.Background(),
versionData: &VersionCheckRequest{},
versionURL: "https://us-central1-elated-pottery-310110.cloudfunctions.net/ksgf1v1",
},
err: nil,
},
{
name: "Failed to get latest version",
args: args{
ctx: context.Background(),
versionData: &VersionCheckRequest{},
versionURL: "https://example.com",
},
err: fmt.Errorf("failed to get latest version"),
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
v := &VersionCheckHandler{
versionURL: tt.args.versionURL,
}
err := v.CheckLatestVersion(tt.args.ctx, tt.args.versionData)
assert.Equal(t, tt.err, err)
})
}
}
func TestVersionCheckHandler_getLatestVersion(t *testing.T) {
type fields struct {
versionURL string
}
type args struct {
versionData *VersionCheckRequest
}
tests := []struct {
name string
fields fields
args args
want *VersionCheckResponse
wantErr bool
}{
{
name: "Get latest version",
fields: fields{
versionURL: "https://us-central1-elated-pottery-310110.cloudfunctions.net/ksgf1v1",
},
args: args{
versionData: &VersionCheckRequest{
Client: "kubescape",
},
},
want: &VersionCheckResponse{
Client: "kubescape",
ClientUpdate: "v3.0.0",
},
wantErr: false,
},
{
name: "Failed to get latest version",
fields: fields{
versionURL: "https://example.com",
},
args: args{
versionData: &VersionCheckRequest{},
},
want: nil,
wantErr: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
v := &VersionCheckHandler{
versionURL: tt.fields.versionURL,
}
got, err := v.getLatestVersion(tt.args.versionData)
if (err != nil) != tt.wantErr {
t.Errorf("VersionCheckHandler.getLatestVersion() error = %v, wantErr %v", err, tt.wantErr)
return
}
if !reflect.DeepEqual(got, tt.want) {
t.Errorf("VersionCheckHandler.getLatestVersion() = %v, want %v", got, tt.want)
}
})
}
}
func TestGetTriggerSource(t *testing.T) {
// Running in github actions pipeline
os.Setenv("GITHUB_ACTIONS", "true")
source := getTriggerSource()
assert.Equal(t, "pipeline", source)
os.Args[0] = "ksserver"
source = getTriggerSource()
assert.Equal(t, "microservice", source)
}

View File

@@ -83,10 +83,7 @@ func (a *OperatorAdapter) httpPostOperatorScanRequest(body apis.Commands) (strin
return "", err
}
defer resp.Body.Close()
if resp.StatusCode != 200 {
return "", fmt.Errorf("http-error: %d", resp.StatusCode)
}
return "success", nil
return httputils.HttpRespToString(resp)
}
func (a *OperatorAdapter) OperatorScan() (string, error) {

View File

@@ -5,7 +5,6 @@ import (
"fmt"
"os"
"strings"
"time"
"github.com/anchore/grype/grype/presenter"
"github.com/anchore/grype/grype/presenter/models"
@@ -18,13 +17,8 @@ import (
"github.com/kubescape/kubescape/v3/pkg/imagescan"
"github.com/kubescape/kubescape/v3/core/pkg/resultshandling"
copa "github.com/project-copacetic/copacetic/pkg/patch"
log "github.com/sirupsen/logrus"
copaGrype "github.com/anubhav06/copa-grype/grype"
"github.com/project-copacetic/copacetic/pkg/buildkit"
"github.com/project-copacetic/copacetic/pkg/pkgmgr"
"github.com/project-copacetic/copacetic/pkg/types/unversioned"
"github.com/project-copacetic/copacetic/pkg/utils"
)
func (ks *Kubescape) Patch(ctx context.Context, patchInfo *ksmetav1.PatchInfo, scanInfo *cautils.ScanInfo) (*models.PresenterConfig, error) {
@@ -44,11 +38,6 @@ func (ks *Kubescape) Patch(ctx context.Context, patchInfo *ksmetav1.PatchInfo, s
if err != nil {
return nil, err
}
// If the scan results ID is empty, set it to "grype"
if scanResults.ID.Name == "" {
scanResults.ID.Name = "grype"
}
// Save the scan results to a file in json format
pres := presenter.GetPresenter("json", "", false, *scanResults)
@@ -57,27 +46,27 @@ func (ks *Kubescape) Patch(ctx context.Context, patchInfo *ksmetav1.PatchInfo, s
writer := printer.GetWriter(ctx, fileName)
if err = pres.Present(writer); err != nil {
if err := pres.Present(writer); err != nil {
return nil, err
}
logger.L().StopSuccess(fmt.Sprintf("Successfully scanned image: %s", patchInfo.Image))
// ===================== Patch the image using copacetic =====================
logger.L().Start("Patching image...")
patchedImageName := fmt.Sprintf("%s:%s", patchInfo.ImageName, patchInfo.PatchedImageTag)
sout, serr := os.Stdout, os.Stderr
if logger.L().GetLevel() != "debug" {
disableCopaLogger()
}
if err = copaPatch(ctx, patchInfo.Timeout, patchInfo.BuildkitAddress, patchInfo.Image, fileName, patchedImageName, "", patchInfo.IgnoreError, patchInfo.BuildKitOpts); err != nil {
if err := copa.Patch(ctx, patchInfo.Timeout, patchInfo.BuildkitAddress, patchInfo.Image, fileName, patchInfo.PatchedImageTag, ""); err != nil {
return nil, err
}
// Restore the output streams
os.Stdout, os.Stderr = sout, serr
patchedImageName := fmt.Sprintf("%s:%s", patchInfo.ImageName, patchInfo.PatchedImageTag)
logger.L().StopSuccess(fmt.Sprintf("Patched image successfully. Loaded image: %s", patchedImageName))
// ===================== Re-scan the image =====================
@@ -108,6 +97,7 @@ func (ks *Kubescape) Patch(ctx context.Context, patchInfo *ksmetav1.PatchInfo, s
Image: patchedImageName,
},
}
resultsHandler.HandleResults(ctx)
return scanResultsPatched, resultsHandler.HandleResults(ctx)
}
@@ -117,110 +107,3 @@ func disableCopaLogger() {
null, _ := os.Open(os.DevNull)
log.SetOutput(null)
}
// copaPatch is a slightly modified copy of the Patch function from the original "project-copacetic/copacetic" repo
// https://github.com/project-copacetic/copacetic/blob/main/pkg/patch/patch.go
func copaPatch(ctx context.Context, timeout time.Duration, buildkitAddr, image, reportFile, patchedImageName, workingFolder string, ignoreError bool, bkOpts buildkit.Opts) error {
timeoutCtx, cancel := context.WithTimeout(ctx, timeout)
defer cancel()
ch := make(chan error)
go func() {
ch <- patchWithContext(timeoutCtx, buildkitAddr, image, reportFile, patchedImageName, workingFolder, ignoreError, bkOpts)
}()
select {
case err := <-ch:
return err
case <-timeoutCtx.Done():
// add a grace period for long running deferred cleanup functions to complete
<-time.After(1 * time.Second)
err := fmt.Errorf("patch exceeded timeout %v", timeout)
log.Error(err)
return err
}
}
func patchWithContext(ctx context.Context, buildkitAddr, image, reportFile, patchedImageName, workingFolder string, ignoreError bool, bkOpts buildkit.Opts) error {
// Ensure working folder exists for call to InstallUpdates
if workingFolder == "" {
var err error
workingFolder, err = os.MkdirTemp("", "copa-*")
if err != nil {
return err
}
defer os.RemoveAll(workingFolder)
if err := os.Chmod(workingFolder, 0o744); err != nil {
return err
}
} else {
if isNew, err := utils.EnsurePath(workingFolder, 0o744); err != nil {
log.Errorf("failed to create workingFolder %s", workingFolder)
return err
} else if isNew {
defer os.RemoveAll(workingFolder)
}
}
// Parse report for update packages
updates, err := tryParseScanReport(reportFile)
if err != nil {
return err
}
client, err := buildkit.NewClient(ctx, bkOpts)
if err != nil {
return err
}
defer client.Close()
// Configure buildctl/client for use by package manager
config, err := buildkit.InitializeBuildkitConfig(ctx, client, image, updates)
if err != nil {
return err
}
// Create package manager helper
pkgmgr, err := pkgmgr.GetPackageManager(updates.Metadata.OS.Type, config, workingFolder)
if err != nil {
return err
}
// Export the patched image state to Docker
patchedImageState, _, err := pkgmgr.InstallUpdates(ctx, updates, ignoreError)
if err != nil {
return err
}
if err = buildkit.SolveToDocker(ctx, config.Client, patchedImageState, config.ConfigData, patchedImageName); err != nil {
return err
}
return nil
}
// This function adds support to copa for patching Kubescape produced results
func tryParseScanReport(file string) (*unversioned.UpdateManifest, error) {
parser := copaGrype.GrypeParser{}
manifest, err := parser.Parse(file)
if err != nil {
return nil, err
}
// Convert from v1alpha1 to unversioned.UpdateManifest
var um unversioned.UpdateManifest
um.Metadata.OS.Type = manifest.Metadata.OS.Type
um.Metadata.OS.Version = manifest.Metadata.OS.Version
um.Metadata.Config.Arch = manifest.Metadata.Config.Arch
um.Updates = make([]unversioned.UpdatePackage, len(manifest.Updates))
for i, update := range manifest.Updates {
um.Updates[i].Name = update.Name
um.Updates[i].InstalledVersion = update.InstalledVersion
um.Updates[i].FixedVersion = update.FixedVersion
um.Updates[i].VulnerabilityID = update.VulnerabilityID
}
return &um, nil
}

View File

@@ -4,7 +4,6 @@ import (
"context"
"fmt"
"github.com/kubescape/backend/pkg/versioncheck"
"github.com/kubescape/go-logger"
"github.com/kubescape/go-logger/helpers"
"github.com/kubescape/k8s-interface/k8sinterface"
@@ -23,7 +22,6 @@ import (
apisv1 "github.com/kubescape/opa-utils/httpserver/apis/v1"
"go.opentelemetry.io/otel"
"golang.org/x/exp/slices"
"k8s.io/client-go/kubernetes"
"github.com/kubescape/opa-utils/resources"
)
@@ -43,13 +41,10 @@ func getInterfaces(ctx context.Context, scanInfo *cautils.ScanInfo) componentInt
// ================== setup k8s interface object ======================================
var k8s *k8sinterface.KubernetesApi
var k8sClient kubernetes.Interface
if scanInfo.GetScanningContext() == cautils.ContextCluster {
k8s = getKubernetesApi()
if k8s == nil {
logger.L().Ctx(ctx).Fatal("failed connecting to Kubernetes cluster")
} else {
k8sClient = k8s.KubernetesClient
}
}
@@ -68,8 +63,8 @@ func getInterfaces(ctx context.Context, scanInfo *cautils.ScanInfo) componentInt
// ================== version testing ======================================
v := versioncheck.NewIVersionCheckHandler(ctx)
_ = v.CheckLatestVersion(ctx, versioncheck.NewVersionCheckRequest(scanInfo.AccountID, versioncheck.BuildNumber, policyIdentifierIdentities(scanInfo.PolicyIdentifier), "", string(scanInfo.GetScanningContext()), k8sClient))
v := cautils.NewIVersionCheckHandler(ctx)
v.CheckLatestVersion(ctx, cautils.NewVersionCheckRequest(cautils.BuildNumber, policyIdentifierIdentities(scanInfo.PolicyIdentifier), "", cautils.ScanningContextToScanningScope(scanInfo.GetScanningContext())))
// ================== setup host scanner object ======================================
ctxHostScanner, spanHostScanner := otel.Tracer("").Start(ctx, "setup host scanner")
@@ -128,7 +123,6 @@ func (ks *Kubescape) Scan(ctx context.Context, scanInfo *cautils.ScanInfo) (*res
// ===================== Initialization =====================
scanInfo.Init(ctxInit) // initialize scan info
defer scanInfo.Cleanup()
interfaces := getInterfaces(ctxInit, scanInfo)
interfaces.report.SetTenantConfig(interfaces.tenantConfig)
@@ -196,7 +190,7 @@ func (ks *Kubescape) Scan(ctx context.Context, scanInfo *cautils.ScanInfo) (*res
} else if err := priotizationHandler.PrioritizeResources(scanData); err != nil {
return resultsHandling, fmt.Errorf("%w", err)
}
if isPrioritizationScanType(scanInfo.ScanType) {
if err == nil && isPrioritizationScanType(scanInfo.ScanType) {
scanData.SetTopWorkloads()
}
spanPrioritization.End()
@@ -216,7 +210,7 @@ func (ks *Kubescape) Scan(ctx context.Context, scanInfo *cautils.ScanInfo) (*res
}
func scanImages(scanType cautils.ScanTypes, scanData *cautils.OPASessionObj, ctx context.Context, resultsHandling *resultshandling.ResultsHandler) {
var imagesToScan []string
imagesToScan := []string{}
if scanType == cautils.ScanTypeWorkload {
containers, err := workloadinterface.NewWorkloadObj(scanData.SingleResourceScan.GetObject()).GetContainers()
@@ -252,7 +246,7 @@ func scanImages(scanType cautils.ScanTypes, scanData *cautils.OPASessionObj, ctx
if err := scanSingleImage(ctx, img, svc, resultsHandling); err != nil {
logger.L().StopError("failed to scan", helpers.String("image", img), helpers.Error(err))
}
logger.L().StopSuccess("Done scanning", helpers.String("image", img))
logger.L().StopSuccess("Scan successful: ", helpers.String("image", img))
}
}

View File

@@ -1,18 +1,12 @@
package v1
import (
"time"
"github.com/project-copacetic/copacetic/pkg/buildkit"
)
import "time"
type PatchInfo struct {
Image string // image to be patched
PatchedImageTag string // can be empty, if empty then the image tag will be patched with the latest tag
BuildkitAddress string // buildkit address
Timeout time.Duration // timeout for patching an image
IgnoreError bool // ignore errors and continue patching
BuildKitOpts buildkit.Opts //build kit options
// Image registry credentials
Username string // username for registry login

View File

@@ -25,7 +25,7 @@ func GenerateContainerScanReportMock() ScanResultReport {
return ds
}
// GenerateContainerScanReportNoVulMock - generate a scan result
// GenerateContainerScanReportMock - generate a scan result
func GenerateContainerScanReportNoVulMock() ScanResultReport {
ds := ScanResultReport{
WLID: "wlid://cluster-k8s-geriatrix-k8s-demo3/namespace-whisky-app/deployment-whisky4all-shipping",

View File

@@ -14,6 +14,6 @@ metadata:
spec:
containers:
- name: nginx_container
image: nginx
securityContext:
runAsRoot: true
image: nginx

View File

@@ -14,6 +14,6 @@ metadata:
spec:
containers:
- name: nginx_container
image: nginx
securityContext:
runAsRoot: true
image: nginx

View File

@@ -16,6 +16,6 @@ spec:
containers:
# These are the first containers comments
- name: nginx_container
image: nginx
securityContext:
runAsRoot: true
image: nginx

View File

@@ -15,7 +15,7 @@ spec:
containers:
- name: nginx_container
image: nginx
securityContext:
runAsRoot: true
image: nginx

View File

@@ -15,7 +15,7 @@ spec:
containers:
- name: nginx_container
image: nginx
securityContext:
runAsRoot: false

View File

@@ -16,7 +16,7 @@ spec:
containers:
- name: nginx_container
image: nginx
securityContext:
runAsRoot: true
image: nginx

View File

@@ -16,7 +16,7 @@ spec:
containers:
- name: nginx_container
image: nginx
securityContext:
runAsRoot: false

View File

@@ -9,6 +9,6 @@ metadata:
spec:
containers:
- name: nginx_container
securityContext:
runAsRoot: false
image: nginx
securityContext:
runAsRoot: false

View File

@@ -9,4 +9,4 @@ metadata:
spec:
containers:
- name: nginx_container
image: nginx
image: nginx

View File

@@ -12,5 +12,4 @@ spec:
image: nginx
- name: container_with_security_issues
image: image_with_security_issues
restartPolicy: Always
image: image_with_security_issues

View File

@@ -10,5 +10,3 @@ spec:
containers:
- name: nginx_container
image: nginx
restartPolicy: Always

View File

@@ -8,7 +8,7 @@ metadata:
spec:
containers:
- name: nginx1
image: nginx
securityContext:
capabilities:
drop: ["NET_RAW", "SYS_ADM"]
image: nginx
drop: ["NET_RAW", "SYS_ADM"]

View File

@@ -8,7 +8,7 @@ metadata:
spec:
containers:
- name: nginx1
image: nginx
securityContext:
capabilities:
drop: ["NET_RAW"]
image: nginx
drop: ["NET_RAW"]

View File

@@ -9,9 +9,9 @@ metadata:
spec:
containers:
- name: nginx_container
image: nginx
securityContext:
runAsRoot: false
image: nginx
---
@@ -29,5 +29,4 @@ spec:
image: nginx
- name: container_with_security_issues
image: image_with_security_issues
restartPolicy: Always
image: image_with_security_issues

View File

@@ -25,5 +25,3 @@ spec:
containers:
- name: nginx_container
image: nginx
restartPolicy: Always

View File

@@ -9,6 +9,6 @@ metadata:
spec:
containers:
- name: nginx_container
securityContext:
runAsRoot: true
image: nginx
securityContext:
runAsRoot: true

View File

@@ -9,6 +9,6 @@ metadata:
spec:
containers:
- name: nginx_container
securityContext:
runAsRoot: false
image: nginx
securityContext:
runAsRoot: false

View File

@@ -10,9 +10,9 @@ metadata:
spec:
containers:
- name: nginx1
image: nginx
securityContext:
capabilities:
drop:
- "NET_RAW"
add: ["SYS_ADM"]
image: nginx
add: ["SYS_ADM"]

View File

@@ -10,9 +10,9 @@ metadata:
spec:
containers:
- name: nginx1
image: nginx
securityContext:
capabilities:
drop:
- "SYS_ADM"
add: ["NET_RAW"]
image: nginx
add: ["NET_RAW"]

View File

@@ -182,7 +182,7 @@ func addLinesToRemove(ctx context.Context, fixInfoMetadata *fixInfoMetadata) (in
newOriginalListTracker := updateTracker(fixInfoMetadata.originalList, fixInfoMetadata.originalListTracker)
*fixInfoMetadata.linesToRemove = append(*fixInfoMetadata.linesToRemove, linesToRemove{
startLine: currentDFSNode.node.Line,
endLine: getNodeLine(fixInfoMetadata.originalList, newOriginalListTracker-1), // newOriginalListTracker is the next node
endLine: getNodeLine(fixInfoMetadata.originalList, newOriginalListTracker),
})
return newOriginalListTracker, fixInfoMetadata.fixedListTracker

View File

@@ -307,7 +307,7 @@ func (hsh *HostSensorHandler) updatePodInListAtomic(ctx context.Context, eventTy
}
}
// tearDownHostScanner manage the host-scanner deletion.
// tearDownNamespace manage the host-scanner deletion.
func (hsh *HostSensorHandler) tearDownHostScanner(namespace string) error {
client := hsh.k8sObj.KubernetesClient

View File

@@ -155,7 +155,7 @@ func (hsh *HostSensorHandler) getKubeProxyInfo(ctx context.Context) ([]hostsenso
return hsh.sendAllPodsHTTPGETRequest(ctx, "/kubeProxyInfo", KubeProxyInfo)
}
// getControlPlaneInfo returns the list of controlPlaneInfo metadata
// getControlPlanInfo returns the list of controlPlaneInfo metadata
func (hsh *HostSensorHandler) getControlPlaneInfo(ctx context.Context) ([]hostsensor.HostSensorDataEnvelope, error) {
// loop over pods and port-forward it to each of them
return hsh.sendAllPodsHTTPGETRequest(ctx, "/controlPlaneInfo", ControlPlaneInfo)

View File

@@ -39,7 +39,7 @@ func (lm *LogsMap) isDuplicated(logContent string) bool {
return ok
}
// getOccurrence retrieve the number of occurrences logContent has been used.
// GgtOccurrence retrieve the number of occurrences logContent has been used.
func (lm *LogsMap) getOccurrence(logContent string) int {
lm.Lock()
occurrence, ok := lm.usedLogs[logContent]

View File

@@ -3,8 +3,6 @@ package opaprocessor
import (
"context"
corev1 "k8s.io/api/core/v1"
logger "github.com/kubescape/go-logger"
"github.com/kubescape/k8s-interface/k8sinterface"
"github.com/kubescape/k8s-interface/workloadinterface"
@@ -99,7 +97,7 @@ func isEmptyResources(counters reportsummary.ICounters) bool {
func getAllSupportedObjects(k8sResources cautils.K8SResources, externalResources cautils.ExternalResources, allResources map[string]workloadinterface.IMetadata, rule *reporthandling.PolicyRule) map[string][]workloadinterface.IMetadata {
k8sObjects := getKubernetesObjects(k8sResources, allResources, rule.Match)
externalObjs := getKubernetesObjectsFromExternalResources(externalResources, allResources, rule.DynamicMatch)
externalObjs := getKubenetesObjectsFromExternalResources(externalResources, allResources, rule.DynamicMatch)
if len(externalObjs) > 0 {
l, ok := k8sObjects[clusterScope]
if !ok {
@@ -111,7 +109,7 @@ func getAllSupportedObjects(k8sResources cautils.K8SResources, externalResources
return k8sObjects
}
func getKubernetesObjectsFromExternalResources(externalResources cautils.ExternalResources, allResources map[string]workloadinterface.IMetadata, match []reporthandling.RuleMatchObjects) []workloadinterface.IMetadata {
func getKubenetesObjectsFromExternalResources(externalResources cautils.ExternalResources, allResources map[string]workloadinterface.IMetadata, match []reporthandling.RuleMatchObjects) []workloadinterface.IMetadata {
k8sObjects := []workloadinterface.IMetadata{}
for m := range match {
@@ -217,39 +215,16 @@ func removePodData(workload workloadinterface.IWorkload) {
workloadinterface.RemoveFromMap(workload.GetObject(), "metadata", "managedFields")
workloadinterface.RemoveFromMap(workload.GetObject(), "status")
// containers
if containers, err := workload.GetContainers(); err == nil && len(containers) > 0 {
removeContainersData(containers)
workloadinterface.SetInMap(workload.GetObject(), workloadinterface.PodSpec(workload.GetKind()), "containers", containers)
containers, err := workload.GetContainers()
if err != nil || len(containers) == 0 {
return
}
// init containers
if initContainers, err := workload.GetInitContainers(); err == nil && len(initContainers) > 0 {
removeContainersData(initContainers)
workloadinterface.SetInMap(workload.GetObject(), workloadinterface.PodSpec(workload.GetKind()), "initContainers", initContainers)
}
// ephemeral containers
if ephemeralContainers, err := workload.GetEphemeralContainers(); err == nil && len(ephemeralContainers) > 0 {
removeEphemeralContainersData(ephemeralContainers)
workloadinterface.SetInMap(workload.GetObject(), workloadinterface.PodSpec(workload.GetKind()), "ephemeralContainers", ephemeralContainers)
}
}
func removeContainersData(containers []corev1.Container) {
for i := range containers {
for j := range containers[i].Env {
containers[i].Env[j].Value = "XXXXXX"
}
}
}
func removeEphemeralContainersData(containers []corev1.EphemeralContainer) {
for i := range containers {
for j := range containers[i].Env {
containers[i].Env[j].Value = "XXXXXX"
}
}
workloadinterface.SetInMap(workload.GetObject(), workloadinterface.PodSpec(workload.GetKind()), "containers", containers)
}
func ruleData(rule *reporthandling.PolicyRule) string {

View File

@@ -3,152 +3,21 @@ package opaprocessor
import (
"testing"
corev1 "k8s.io/api/core/v1"
"github.com/stretchr/testify/assert"
"github.com/kubescape/k8s-interface/workloadinterface"
)
func TestRemoveData(t *testing.T) {
type args struct {
w string
}
tests := []struct {
name string
args args
}{
{
name: "remove data",
args: args{
w: `{"apiVersion":"apps/v1","kind":"Deployment","metadata":{"name":"demoservice-server", "annotations": {"name": "kubectl.kubernetes.io/last-applied-configuration", "value": "blabla"}},"spec":{"replicas":1,"selector":{"matchLabels":{"app":"demoservice-server"}},"template":{"metadata":{"creationTimestamp":null,"labels":{"app":"demoservice-server"}},"spec":{"containers":[{"env":[{"name":"SERVER_PORT","value":"8089"},{"name":"SLEEP_DURATION","value":"1"},{"name":"DEMO_FOLDERS","value":"/app"},{"name":"ARMO_TEST_NAME","value":"auto_attach_deployment"},{"name":"CAA_ENABLE_CRASH_REPORTER","value":"1"}],"image":"quay.io/armosec/demoservice:v25","imagePullPolicy":"IfNotPresent","name":"demoservice","ports":[{"containerPort":8089,"protocol":"TCP"}],"resources":{},"terminationMessagePath":"/dev/termination-log","terminationMessagePolicy":"File"}],"dnsPolicy":"ClusterFirst","restartPolicy":"Always","schedulerName":"default-scheduler","securityContext":{},"terminationGracePeriodSeconds":30}}}}`,
},
},
{
name: "remove data with init containers and ephemeral containers",
args: args{
w: `{"apiVersion": "v1", "kind": "Pod", "metadata": {"name": "example-pod", "namespace": "default"}, "spec": {"containers": [{"name": "container1", "image": "nginx", "ports": [{"containerPort": 80}], "env": [{"name": "CONTAINER_ENV", "value": "container_value"}]}], "initContainers": [{"name": "init-container1", "image": "busybox", "command": ["sh", "-c", "echo 'Init Container'"], "env": [{"name": "INIT_CONTAINER_ENV", "value": "init_container_value"}]}], "ephemeralContainers": [{"name": "debug-container", "image": "busybox", "command": ["sh", "-c", "echo 'Ephemeral Container'"], "targetContainerName": "container1", "env": [{"name": "EPHEMERAL_CONTAINER_ENV", "value": "ephemeral_container_value"}]}]}}`,
},
},
{
name: "remove secret data",
args: args{
w: `{"apiVersion": "v1", "kind": "Secret", "metadata": {"name": "example-secret", "namespace": "default", "annotations": {"kubectl.kubernetes.io/last-applied-configuration": "{}"}}, "type": "Opaque", "data": {"username": "dXNlcm5hbWU=", "password": "cGFzc3dvcmQ="}}`,
},
},
{
name: "remove configMap data",
args: args{
w: `{"apiVersion": "v1", "kind": "ConfigMap", "metadata": {"name": "example-configmap", "namespace": "default", "annotations": {"kubectl.kubernetes.io/last-applied-configuration": "{}"}}, "data": {"exampleKey": "exampleValue"}}`,
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
obj, _ := workloadinterface.NewWorkload([]byte(tt.args.w))
removeData(obj)
workload := workloadinterface.NewWorkloadObj(obj.GetObject())
w := `{"apiVersion":"apps/v1","kind":"Deployment","metadata":{"name":"demoservice-server"},"spec":{"replicas":1,"selector":{"matchLabels":{"app":"demoservice-server"}},"template":{"metadata":{"creationTimestamp":null,"labels":{"app":"demoservice-server"}},"spec":{"containers":[{"env":[{"name":"SERVER_PORT","value":"8089"},{"name":"SLEEP_DURATION","value":"1"},{"name":"DEMO_FOLDERS","value":"/app"},{"name":"ARMO_TEST_NAME","value":"auto_attach_deployment"},{"name":"CAA_ENABLE_CRASH_REPORTER","value":"1"}],"image":"quay.io/armosec/demoservice:v25","imagePullPolicy":"IfNotPresent","name":"demoservice","ports":[{"containerPort":8089,"protocol":"TCP"}],"resources":{},"terminationMessagePath":"/dev/termination-log","terminationMessagePolicy":"File"}],"dnsPolicy":"ClusterFirst","restartPolicy":"Always","schedulerName":"default-scheduler","securityContext":{},"terminationGracePeriodSeconds":30}}}}`
obj, _ := workloadinterface.NewWorkload([]byte(w))
removeData(obj)
_, found := workload.GetAnnotation("kubectl.kubernetes.io/last-applied-configuration")
assert.False(t, found)
_, found = workloadinterface.InspectMap(workload.GetObject(), "metadata", "managedFields")
assert.False(t, found)
_, found = workloadinterface.InspectMap(workload.GetObject(), "status")
assert.False(t, found)
if d, ok := workloadinterface.InspectMap(workload.GetObject(), "data"); ok {
data, ok := d.(map[string]interface{})
assert.True(t, ok)
for key := range data {
assert.Equal(t, "XXXXXX", data[key])
}
}
if c, _ := workload.GetContainers(); c != nil {
for i := range c {
for _, e := range c[i].Env {
assert.Equal(t, "XXXXXX", e.Value, e.Name)
}
}
}
if ic, _ := workload.GetInitContainers(); ic != nil {
for i := range ic {
for _, e := range ic[i].Env {
assert.Equal(t, "XXXXXX", e.Value, e.Name)
}
}
}
if ec, _ := workload.GetEphemeralContainers(); ec != nil {
for i := range ec {
for _, e := range ec[i].Env {
assert.Equal(t, "XXXXXX", e.Value, e.Name)
}
}
}
})
}
}
func TestRemoveContainersData(t *testing.T) {
containers := []corev1.Container{
{
Env: []corev1.EnvVar{
{
Name: "TEST_ENV",
Value: "test_value",
},
{
Name: "ENV_2",
Value: "bla",
},
{
Name: "EMPTY_ENV",
Value: "",
},
},
},
}
removeContainersData(containers)
for _, c := range containers {
for _, e := range c.Env {
assert.Equal(t, "XXXXXX", e.Value)
}
}
}
func TestRemoveEphemeralContainersData(t *testing.T) {
containers := []corev1.EphemeralContainer{
{
EphemeralContainerCommon: corev1.EphemeralContainerCommon{
Env: []corev1.EnvVar{
{
Name: "TEST_ENV",
Value: "test_value",
},
{
Name: "ENV_2",
Value: "bla",
},
{
Name: "EMPTY_ENV",
Value: "",
},
},
},
},
}
removeEphemeralContainersData(containers)
for _, c := range containers {
for _, e := range c.Env {
workload := workloadinterface.NewWorkloadObj(obj.GetObject())
c, _ := workload.GetContainers()
for i := range c {
for _, e := range c[i].Env {
assert.Equal(t, "XXXXXX", e.Value)
}
}

View File

@@ -3,6 +3,7 @@ package resourcehandler
import (
"context"
"fmt"
"os"
"path/filepath"
"strings"
@@ -10,7 +11,7 @@ import (
"github.com/kubescape/opa-utils/reporthandling"
"k8s.io/apimachinery/pkg/version"
"github.com/kubescape/go-logger"
logger "github.com/kubescape/go-logger"
"github.com/kubescape/go-logger/helpers"
"github.com/kubescape/k8s-interface/k8sinterface"
"github.com/kubescape/kubescape/v3/core/cautils"
@@ -25,7 +26,7 @@ func NewFileResourceHandler() *FileResourceHandler {
return &FileResourceHandler{}
}
func (fileHandler *FileResourceHandler) GetResources(ctx context.Context, sessionObj *cautils.OPASessionObj, _ opaprocessor.IJobProgressNotificationClient, scanInfo *cautils.ScanInfo) (cautils.K8SResources, map[string]workloadinterface.IMetadata, cautils.ExternalResources, map[string]bool, error) {
func (fileHandler *FileResourceHandler) GetResources(ctx context.Context, sessionObj *cautils.OPASessionObj, progressListener opaprocessor.IJobProgressNotificationClient, scanInfo *cautils.ScanInfo) (cautils.K8SResources, map[string]workloadinterface.IMetadata, cautils.ExternalResources, map[string]bool, error) {
allResources := map[string]workloadinterface.IMetadata{}
externalResources := cautils.ExternalResources{}
@@ -40,16 +41,15 @@ func (fileHandler *FileResourceHandler) GetResources(ctx context.Context, sessio
for path := range scanInfo.InputPatterns {
var workloadIDToSource map[string]reporthandling.Source
var workloads []workloadinterface.IMetadata
var workloadIDToMappingNodes map[string]cautils.MappingNodes
var err error
if scanInfo.ChartPath != "" && scanInfo.FilePath != "" {
workloadIDToSource, workloads, workloadIDToMappingNodes, err = getWorkloadFromHelmChart(ctx, scanInfo.InputPatterns[path], scanInfo.ChartPath, scanInfo.FilePath)
workloadIDToSource, workloads, err = getWorkloadFromHelmChart(ctx, scanInfo.ChartPath, scanInfo.FilePath)
if err != nil {
// We should probably ignore the error so we can continue scanning other charts
}
} else {
workloadIDToSource, workloads, workloadIDToMappingNodes, err = getResourcesFromPath(ctx, scanInfo.InputPatterns[path])
workloadIDToSource, workloads, err = getResourcesFromPath(ctx, scanInfo.InputPatterns[path])
if err != nil {
return nil, allResources, nil, nil, err
}
@@ -60,7 +60,6 @@ func (fileHandler *FileResourceHandler) GetResources(ctx context.Context, sessio
for k, v := range workloadIDToSource {
sessionObj.ResourceSource[k] = v
sessionObj.TemplateMapping[k] = workloadIDToMappingNodes[k]
}
// map all resources: map["/apiVersion/version/kind"][]<k8s workloads>
@@ -87,7 +86,7 @@ func (fileHandler *FileResourceHandler) GetResources(ctx context.Context, sessio
// save only relevant resources
for i := range mappedResources {
if _, ok := k8sResources[i]; ok {
var ids []string
ids := []string{}
for j := range mappedResources[i] {
ids = append(ids, mappedResources[i][j].GetID())
allResources[mappedResources[i][j].GetID()] = mappedResources[i][j]
@@ -106,52 +105,47 @@ func (fileHandler *FileResourceHandler) GetResources(ctx context.Context, sessio
func (fileHandler *FileResourceHandler) GetCloudProvider() string {
return ""
}
func getWorkloadFromHelmChart(ctx context.Context, path, helmPath, workloadPath string) (map[string]reporthandling.Source, []workloadinterface.IMetadata, map[string]cautils.MappingNodes, error) {
clonedRepo := cautils.GetClonedPath(path)
func getWorkloadFromHelmChart(ctx context.Context, helmPath, workloadPath string) (map[string]reporthandling.Source, []workloadinterface.IMetadata, error) {
clonedRepo, err := cloneGitRepo(&helmPath)
if err != nil {
return nil, nil, err
}
if clonedRepo != "" {
// if the repo was cloned, add the workload path to the cloned repo
workloadPath = filepath.Join(clonedRepo, workloadPath)
} else {
// if the repo was not cloned
clonedRepo = path
defer os.RemoveAll(clonedRepo)
}
// Get repo root
repoRoot, gitRepo := extractGitRepo(clonedRepo)
repoRoot, gitRepo := extractGitRepo(helmPath)
helmSourceToWorkloads, helmSourceToChart, helmSourceToNodes := cautils.LoadResourcesFromHelmCharts(ctx, helmPath)
helmSourceToWorkloads, helmSourceToChart := cautils.LoadResourcesFromHelmCharts(ctx, helmPath)
if clonedRepo != "" {
workloadPath = clonedRepo + workloadPath
}
wlSource, ok := helmSourceToWorkloads[workloadPath]
if !ok {
return nil, nil, nil, fmt.Errorf("workload %s not found in chart %s", workloadPath, helmPath)
return nil, nil, fmt.Errorf("workload %s not found in chart %s", workloadPath, helmPath)
}
if len(wlSource) != 1 {
return nil, nil, nil, fmt.Errorf("workload %s found multiple times in chart %s", workloadPath, helmPath)
return nil, nil, fmt.Errorf("workload %s found multiple times in chart %s", workloadPath, helmPath)
}
helmChart, ok := helmSourceToChart[workloadPath]
if !ok {
return nil, nil, nil, fmt.Errorf("helmChart not found for workload %s", workloadPath)
}
templatesNodes, ok := helmSourceToNodes[workloadPath]
if !ok {
return nil, nil, nil, fmt.Errorf("templatesNodes not found for workload %s", workloadPath)
return nil, nil, fmt.Errorf("helmChart not found for workload %s", workloadPath)
}
workloadSource := getWorkloadSourceHelmChart(repoRoot, helmPath, gitRepo, helmChart)
workloadIDToSource := make(map[string]reporthandling.Source, 1)
workloadIDToNodes := make(map[string]cautils.MappingNodes, 1)
workloadIDToSource[wlSource[0].GetID()] = workloadSource
workloadIDToNodes[wlSource[0].GetID()] = templatesNodes
var workloads []workloadinterface.IMetadata
workloads := []workloadinterface.IMetadata{}
workloads = append(workloads, wlSource...)
return workloadIDToSource, workloads, workloadIDToNodes, nil
return workloadIDToSource, workloads, nil
}
@@ -185,15 +179,16 @@ func getWorkloadSourceHelmChart(repoRoot string, source string, gitRepo *cautils
}
}
func getResourcesFromPath(ctx context.Context, path string) (map[string]reporthandling.Source, []workloadinterface.IMetadata, map[string]cautils.MappingNodes, error) {
workloadIDToSource := make(map[string]reporthandling.Source)
workloadIDToNodes := make(map[string]cautils.MappingNodes)
var workloads []workloadinterface.IMetadata
func getResourcesFromPath(ctx context.Context, path string) (map[string]reporthandling.Source, []workloadinterface.IMetadata, error) {
workloadIDToSource := make(map[string]reporthandling.Source, 0)
workloads := []workloadinterface.IMetadata{}
clonedRepo := cautils.GetClonedPath(path)
clonedRepo, err := cloneGitRepo(&path)
if err != nil {
return nil, nil, err
}
if clonedRepo != "" {
// if the repo was cloned, add the workload path to the cloned repo
path = clonedRepo
defer os.RemoveAll(clonedRepo)
}
// Get repo root
@@ -274,16 +269,12 @@ func getResourcesFromPath(ctx context.Context, path string) (map[string]reportha
}
// load resources from helm charts
helmSourceToWorkloads, helmSourceToChart, helmSourceToNodes := cautils.LoadResourcesFromHelmCharts(ctx, path)
helmSourceToWorkloads, helmSourceToChart := cautils.LoadResourcesFromHelmCharts(ctx, path)
for source, ws := range helmSourceToWorkloads {
workloads = append(workloads, ws...)
helmChart := helmSourceToChart[source]
var templatesNodes cautils.MappingNodes
if nodes, ok := helmSourceToNodes[source]; ok {
templatesNodes = nodes
}
if clonedRepo != "" && gitRepo != nil {
if clonedRepo != "" {
url, err := gitRepo.GetRemoteUrl()
if err != nil {
logger.L().Warning("failed to get remote url", helpers.Error(err))
@@ -292,24 +283,21 @@ func getResourcesFromPath(ctx context.Context, path string) (map[string]reportha
helmChart.Path = strings.TrimSuffix(url, ".git")
repoRoot = ""
source = strings.TrimPrefix(source, fmt.Sprintf("%s/", clonedRepo))
templatesNodes.TemplateFileName = source
}
workloadSource := getWorkloadSourceHelmChart(repoRoot, source, gitRepo, helmChart)
for i := range ws {
workloadIDToSource[ws[i].GetID()] = workloadSource
workloadIDToNodes[ws[i].GetID()] = templatesNodes
}
}
if len(helmSourceToWorkloads) > 0 { // && len(helmSourceToNodes) > 0
if len(helmSourceToWorkloads) > 0 {
logger.L().Debug("helm templates found in local storage", helpers.Int("helmTemplates", len(helmSourceToWorkloads)), helpers.Int("workloads", len(workloads)))
}
//patch, get value from env
// Load resources from Kustomize directory
kustomizeSourceToWorkloads, kustomizeDirectoryName := cautils.LoadResourcesFromKustomizeDirectory(ctx, path) //?
kustomizeSourceToWorkloads, kustomizeDirectoryName := cautils.LoadResourcesFromKustomizeDirectory(ctx, path)
// update workloads and workloadIDToSource with workloads from Kustomize Directory
for source, ws := range kustomizeSourceToWorkloads {
@@ -346,7 +334,7 @@ func getResourcesFromPath(ctx context.Context, path string) (map[string]reportha
}
}
return workloadIDToSource, workloads, workloadIDToNodes, nil
return workloadIDToSource, workloads, nil
}
func extractGitRepo(path string) (string, *cautils.LocalGitRepository) {

View File

@@ -2,14 +2,40 @@ package resourcehandler
import (
"fmt"
"path/filepath"
"github.com/kubescape/go-logger"
giturl "github.com/kubescape/go-git-url"
logger "github.com/kubescape/go-logger"
"github.com/kubescape/go-logger/helpers"
"github.com/kubescape/k8s-interface/k8sinterface"
"github.com/kubescape/k8s-interface/workloadinterface"
"github.com/kubescape/opa-utils/objectsenvelopes"
)
// Clone git repository
func cloneGitRepo(path *string) (string, error) {
var clonedDir string
gitURL, err := giturl.NewGitAPI(*path)
if err != nil {
return "", nil
}
// Clone git repository if needed
logger.L().Start("cloning", helpers.String("repository url", gitURL.GetURL().String()))
clonedDir, err = cloneRepo(gitURL)
if err != nil {
logger.L().StopError("failed to clone git repo", helpers.String("url", gitURL.GetURL().String()), helpers.Error(err))
return "", fmt.Errorf("failed to clone git repo '%s', %w", gitURL.GetURL().String(), err)
}
*path = filepath.Join(clonedDir, gitURL.GetPath())
logger.L().StopSuccess("Done accessing local objects")
return clonedDir, nil
}
func addWorkloadsToResourcesMap(allResources map[string][]workloadinterface.IMetadata, workloads []workloadinterface.IMetadata) {
for i := range workloads {
groupVersionResource, err := k8sinterface.GetGroupVersionResource(workloads[i].GetKind())
@@ -66,7 +92,7 @@ func findScanObjectResource(mappedResources map[string][]workloadinterface.IMeta
logger.L().Debug("Single resource scan", helpers.String("resource", resource.GetID()))
var wls []workloadinterface.IWorkload
wls := []workloadinterface.IWorkload{}
for _, resources := range mappedResources {
for _, r := range resources {
if r.GetKind() == resource.GetKind() && r.GetName() == resource.GetName() {

View File

@@ -322,7 +322,7 @@ func (k8sHandler *K8sResourceHandler) pullResources(queryableResources Queryable
result, err := k8sHandler.pullSingleResource(&gvr, nil, queryableResources[i].FieldSelectors, globalFieldSelectors)
if err != nil {
if !strings.Contains(err.Error(), "the server could not find the requested resource") {
logger.L().Warning("failed to pull resource", helpers.String("resource", queryableResources[i].GroupVersionResourceTriplet), helpers.Error(err))
logger.L().Error("failed to pull resource", helpers.String("resource", queryableResources[i].GroupVersionResourceTriplet), helpers.Error(err))
// handle error
if errs == nil {
errs = err

View File

@@ -1,7 +1,6 @@
package cautils
package resourcehandler
import (
"crypto/sha256"
"errors"
"fmt"
nethttp "net/http"
@@ -13,44 +12,8 @@ import (
"github.com/go-git/go-git/v5/plumbing/transport"
"github.com/go-git/go-git/v5/plumbing/transport/http"
giturl "github.com/kubescape/go-git-url"
"github.com/kubescape/go-logger"
"github.com/kubescape/go-logger/helpers"
)
var tmpDirPaths map[string]string
func hashRepoURL(repoURL string) string {
h := sha256.New()
h.Write([]byte(repoURL))
return string(h.Sum(nil))
}
func getDirPath(repoURL string) string {
if tmpDirPaths == nil {
return ""
}
return tmpDirPaths[hashRepoURL(repoURL)]
}
// Create a temporary directory this function is called once
func createTempDir(repoURL string) (string, error) {
tmpDirPath := getDirPath(repoURL)
if tmpDirPath != "" {
return tmpDirPath, nil
}
// create temp directory
tmpDir, err := os.MkdirTemp("", "")
if err != nil {
return "", fmt.Errorf("failed to create temporary directory: %w", err)
}
if tmpDirPaths == nil {
tmpDirPaths = make(map[string]string)
}
tmpDirPaths[hashRepoURL(repoURL)] = tmpDir
return tmpDir, nil
}
// To Check if the given repository is Public(No Authentication needed), send a HTTP GET request to the URL
// If response code is 200, the repository is Public.
func isGitRepoPublic(u string) bool {
@@ -92,38 +55,34 @@ func getProviderError(gitURL giturl.IGitAPI) error {
// cloneRepo clones a repository to a local temporary directory and returns the directory
func cloneRepo(gitURL giturl.IGitAPI) (string, error) {
cloneURL := gitURL.GetHttpCloneURL()
// Check if directory exists
if p := getDirPath(cloneURL); p != "" {
// directory exists, meaning this repo was cloned
return p, nil
}
// Get the URL to clone
// Create temp directory
tmpDir, err := createTempDir(cloneURL)
tmpDir, err := os.MkdirTemp("", "")
if err != nil {
return "", err
return "", fmt.Errorf("failed to create temporary directory: %w", err)
}
isGitTokenPresent := isGitTokenPresent(gitURL)
// Get the URL to clone
cloneURL := gitURL.GetHttpCloneURL()
isGitRepoPublic := isGitRepoPublic(cloneURL)
// Declare the authentication variable required for cloneOptions
var auth transport.AuthMethod
if isGitTokenPresent {
if isGitRepoPublic {
// No authentication needed if repository is public
auth = nil
} else {
// Return Error if the AUTH_TOKEN is not present
if isGitTokenPresent := isGitTokenPresent(gitURL); !isGitTokenPresent {
return "", getProviderError(gitURL)
}
auth = &http.BasicAuth{
Username: "x-token-auth",
Password: gitURL.GetToken(),
}
} else {
// If the repository is public, no authentication is needed
if isGitRepoPublic(cloneURL) {
auth = nil
} else {
return "", getProviderError(gitURL)
}
}
// For Azure repo cloning
@@ -143,42 +102,6 @@ func cloneRepo(gitURL giturl.IGitAPI) (string, error) {
if err != nil {
return "", fmt.Errorf("failed to clone %s. %w", gitURL.GetRepoName(), err)
}
// tmpDir = filepath.Join(tmpDir, gitURL.GetRepoName())
tmpDirPaths[hashRepoURL(cloneURL)] = tmpDir
return tmpDir, nil
}
// CloneGitRepo clone git repository
func CloneGitRepo(path *string) (string, error) {
var clonedDir string
gitURL, err := giturl.NewGitAPI(*path)
if err != nil {
return "", nil
}
// Clone git repository if needed
logger.L().Start("cloning", helpers.String("repository url", gitURL.GetURL().String()))
clonedDir, err = cloneRepo(gitURL)
if err != nil {
logger.L().StopError("failed to clone git repo", helpers.String("url", gitURL.GetURL().String()), helpers.Error(err))
return "", fmt.Errorf("failed to clone git repo '%s', %w", gitURL.GetURL().String(), err)
}
*path = clonedDir
logger.L().StopSuccess("Done accessing remote repo")
return clonedDir, nil
}
func GetClonedPath(path string) string {
gitURL, err := giturl.NewGitAPI(path)
if err != nil {
return ""
}
return getDirPath(gitURL.GetHttpCloneURL())
}

View File

@@ -1,4 +1,4 @@
package cautils
package resourcehandler
import (
"errors"
@@ -93,63 +93,3 @@ func TestCloneRepo(t *testing.T) {
})
}
}
func TestGetClonedPath(t *testing.T) {
testCases := []struct {
name string
path string
expected string
}{
{
name: "Valid Git URL",
path: "https://github.com/kubescape/kubescape.git",
expected: "/path/to/cloned/repo", // replace with the expected path
},
{
name: "Invalid Git URL",
path: "invalid",
expected: "",
},
}
tmpDirPaths = make(map[string]string)
tmpDirPaths[hashRepoURL("https://github.com/kubescape/kubescape.git")] = "/path/to/cloned/repo" // replace with the actual path
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
result := GetClonedPath(tc.path)
if result != tc.expected {
t.Errorf("Expected %q, got %q", tc.expected, result)
}
})
}
}
func TestGetDirPath(t *testing.T) {
testCases := []struct {
name string
repoURL string
expected string
}{
{
name: "Existing Repo URL",
repoURL: "https://github.com/user/repo.git",
expected: "/path/to/cloned/repo", // replace with the expected path
},
{
name: "Non-Existing Repo URL",
repoURL: "https://github.com/user/nonexistentrepo.git",
expected: "",
},
}
// Initialize tmpDirPaths
tmpDirPaths = make(map[string]string)
tmpDirPaths[hashRepoURL("https://github.com/user/repo.git")] = "/path/to/cloned/repo" // replace with the actual path
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
result := getDirPath(tc.repoURL)
if result != tc.expected {
t.Errorf("Expected %q, got %q", tc.expected, result)
}
})
}
}

View File

@@ -114,7 +114,7 @@ func filterRuleMatchesForResource(resourceKind string, matchObjects []reporthand
return resourceMap
}
// updateQueryableResourcesMapFromRuleMatchObject updates the queryableResources map with the relevant resources from the match object.
// updateQueryableResourcesMapFromMatch updates the queryableResources map with the relevant resources from the match object.
// if namespace is not empty, the namespace filter is added to the queryable resources (which are namespaced)
// if resourcesFilterMap is not nil, only the resources with value 'true' will be added to the queryable resources
func updateQueryableResourcesMapFromRuleMatchObject(match *reporthandling.RuleMatchObjects, resourcesFilterMap map[string]bool, queryableResources QueryableResources, namespace string) {

View File

@@ -14,13 +14,11 @@ import (
const (
columnSeverity = iota
columnRef = iota
columnName = iota
columnCounterFailed = iota
columnCounterAll = iota
columnComplianceScore = iota
_rowLen = iota
controlNameMaxLength = 70
)
func generateRow(controlSummary reportsummary.IControlSummary, infoToPrintInfo []infoStars, verbose bool) []string {
@@ -32,8 +30,8 @@ func generateRow(controlSummary reportsummary.IControlSummary, infoToPrintInfo [
}
row[columnSeverity] = getSeverityColumn(controlSummary)
if len(controlSummary.GetName()) > controlNameMaxLength {
row[columnName] = controlSummary.GetName()[:controlNameMaxLength] + "..."
if len(controlSummary.GetName()) > 50 {
row[columnName] = controlSummary.GetName()[:50] + "..."
} else {
row[columnName] = controlSummary.GetName()
}
@@ -64,9 +62,8 @@ func generateRowPdf(controlSummary reportsummary.IControlSummary, infoToPrintInf
}
row[columnSeverity] = apis.ControlSeverityToString(controlSummary.GetScoreFactor())
row[columnRef] = controlSummary.GetID()
if len(controlSummary.GetName()) > controlNameMaxLength {
row[columnName] = controlSummary.GetName()[:controlNameMaxLength] + "..."
if len(controlSummary.GetName()) > 50 {
row[columnName] = controlSummary.GetName()[:50] + "..."
} else {
row[columnName] = controlSummary.GetName()
}
@@ -147,7 +144,6 @@ func getControlTableHeaders(short bool) []string {
headers[0] = "Controls"
} else {
headers = make([]string, _rowLen)
headers[columnRef] = "Control reference"
headers[columnName] = "Control name"
headers[columnCounterFailed] = "Failed resources"
headers[columnCounterAll] = "All resources"

View File

@@ -2,7 +2,6 @@ package printer
import (
"encoding/json"
"fmt"
"os"
"path/filepath"
"strconv"
@@ -41,28 +40,23 @@ func Test_generateRowPdf(t *testing.T) {
t.Errorf("got %s, want either of these: %s", c[0], "Low, Medium, High, Critical")
}
// Validating length of control ID
if len(c[1]) > 6 {
t.Errorf("got %s, want %s", c[1], "less than 7 characters")
}
// Validating length of control name
if len(c[2]) > controlNameMaxLength {
t.Errorf("got %s, want %s", c[1], fmt.Sprintf("less than %d characters", controlNameMaxLength))
if len(c[1]) > 53 {
t.Errorf("got %s, want %s", c[1], "less than 54 characters")
}
// Validating numeric fields
_, err := strconv.Atoi(c[3])
_, err := strconv.Atoi(c[2])
if err != nil {
t.Errorf("got %s, want an integer %s", c[2], err)
}
_, err = strconv.Atoi(c[4])
_, err = strconv.Atoi(c[3])
if err != nil {
t.Errorf("got %s, want an integer %s", c[3], err)
}
assert.NotEmpty(t, c[5], "expected a non-empty string")
assert.NotEmpty(t, c[4], "expected a non-empty string")
}

View File

@@ -8,16 +8,12 @@ import (
"path/filepath"
"strings"
"github.com/anchore/clio"
"github.com/anchore/grype/grype/presenter"
"github.com/anchore/grype/grype/presenter/models"
logger "github.com/kubescape/go-logger"
"github.com/kubescape/go-logger/helpers"
"github.com/kubescape/kubescape/v3/core/cautils"
"github.com/kubescape/kubescape/v3/core/pkg/resultshandling/printer"
"github.com/kubescape/kubescape/v3/core/pkg/resultshandling/printer/v2/prettyprinter/tableprinter/imageprinter"
"github.com/kubescape/opa-utils/reporthandling/results/v1/reportsummary"
"k8s.io/utils/strings/slices"
)
const (
@@ -58,41 +54,11 @@ func (jp *JsonPrinter) Score(score float32) {
fmt.Fprintf(os.Stderr, "\nOverall compliance-score (100- Excellent, 0- All failed): %d\n", cautils.Float32ToInt(score))
}
func (jp *JsonPrinter) convertToImageScanSummary(imageScanData []cautils.ImageScanData) (*imageprinter.ImageScanSummary, error) {
imageScanSummary := imageprinter.ImageScanSummary{
CVEs: []imageprinter.CVE{},
PackageScores: map[string]*imageprinter.PackageScore{},
MapsSeverityToSummary: map[string]*imageprinter.SeveritySummary{},
}
for i := range imageScanData {
if !slices.Contains(imageScanSummary.Images, imageScanData[i].Image) {
imageScanSummary.Images = append(imageScanSummary.Images, imageScanData[i].Image)
}
presenterConfig := imageScanData[i].PresenterConfig
doc, err := models.NewDocument(clio.Identification{}, presenterConfig.Packages, presenterConfig.Context, presenterConfig.Matches, presenterConfig.IgnoredMatches, presenterConfig.MetadataProvider, nil, presenterConfig.DBStatus)
if err != nil {
logger.L().Error(fmt.Sprintf("failed to create document for image: %v", imageScanData[i].Image), helpers.Error(err))
continue
}
CVEs := extractCVEs(doc.Matches)
imageScanSummary.CVEs = append(imageScanSummary.CVEs, CVEs...)
setPkgNameToScoreMap(doc.Matches, imageScanSummary.PackageScores)
setSeverityToSummaryMap(CVEs, imageScanSummary.MapsSeverityToSummary)
}
return &imageScanSummary, nil
}
func (jp *JsonPrinter) ActionPrint(ctx context.Context, opaSessionObj *cautils.OPASessionObj, imageScanData []cautils.ImageScanData) {
var err error
if opaSessionObj != nil {
err = printConfigurationsScanning(opaSessionObj, ctx, imageScanData, jp)
err = printConfigurationsScanning(opaSessionObj, ctx, jp)
} else if imageScanData != nil {
err = jp.PrintImageScan(ctx, imageScanData[0].PresenterConfig)
} else {
@@ -107,67 +73,16 @@ func (jp *JsonPrinter) ActionPrint(ctx context.Context, opaSessionObj *cautils.O
printer.LogOutputFile(jp.writer.Name())
}
func printConfigurationsScanning(opaSessionObj *cautils.OPASessionObj, ctx context.Context, imageScanData []cautils.ImageScanData, jp *JsonPrinter) error {
if imageScanData != nil {
imageScanSummary, err := jp.convertToImageScanSummary(imageScanData)
if err != nil {
logger.L().Error("failed to convert to image scan summary", helpers.Error(err))
return err
}
opaSessionObj.Report.SummaryDetails.Vulnerabilities.MapsSeverityToSummary = convertToReportSummary(imageScanSummary.MapsSeverityToSummary)
opaSessionObj.Report.SummaryDetails.Vulnerabilities.CVESummary = convertToCVESummary(imageScanSummary.CVEs)
opaSessionObj.Report.SummaryDetails.Vulnerabilities.PackageScores = convertToPackageScores(imageScanSummary.PackageScores)
opaSessionObj.Report.SummaryDetails.Vulnerabilities.Images = imageScanSummary.Images
}
func printConfigurationsScanning(opaSessionObj *cautils.OPASessionObj, ctx context.Context, jp *JsonPrinter) error {
r, err := json.Marshal(FinalizeResults(opaSessionObj))
if err != nil {
return err
}
_, err = jp.writer.Write(r)
return err
}
func convertToPackageScores(packageScores map[string]*imageprinter.PackageScore) map[string]*reportsummary.PackageSummary {
convertedPackageScores := make(map[string]*reportsummary.PackageSummary)
for pkg, score := range packageScores {
convertedPackageScores[pkg] = &reportsummary.PackageSummary{
Name: score.Name,
Version: score.Version,
Score: score.Score,
MapSeverityToCVEsNumber: score.MapSeverityToCVEsNumber,
}
}
return convertedPackageScores
}
func convertToCVESummary(cves []imageprinter.CVE) []reportsummary.CVESummary {
cveSummary := make([]reportsummary.CVESummary, len(cves))
i := 0
for _, cve := range cves {
var a reportsummary.CVESummary
a.Severity = cve.Severity
a.ID = cve.ID
a.Package = cve.Package
a.Version = cve.Version
a.FixVersions = cve.FixVersions
a.FixedState = cve.FixedState
cveSummary[i] = a
i++
}
return cveSummary
}
func convertToReportSummary(input map[string]*imageprinter.SeveritySummary) map[string]*reportsummary.SeveritySummary {
output := make(map[string]*reportsummary.SeveritySummary)
for key, value := range input {
output[key] = &reportsummary.SeveritySummary{
NumberOfCVEs: value.NumberOfCVEs,
NumberOfFixableCVEs: value.NumberOfFixableCVEs,
}
}
return output
}
func (jp *JsonPrinter) PrintImageScan(ctx context.Context, scanResults *models.PresenterConfig) error {
if scanResults == nil {
return fmt.Errorf("no image vulnerability data provided")

View File

@@ -5,8 +5,6 @@ import (
"os"
"testing"
"github.com/kubescape/kubescape/v3/core/pkg/resultshandling/printer/v2/prettyprinter/tableprinter/imageprinter"
"github.com/kubescape/opa-utils/reporthandling/results/v1/reportsummary"
"github.com/stretchr/testify/assert"
)
@@ -85,110 +83,3 @@ func TestScore_Json(t *testing.T) {
})
}
}
func TestConvertToCVESummary(t *testing.T) {
cves := []imageprinter.CVE{
{
Severity: "High",
ID: "CVE-2021-1234",
Package: "example-package",
Version: "1.0.0",
FixVersions: []string{"1.0.1", "1.0.2"},
FixedState: "true",
},
{
Severity: "Medium",
ID: "CVE-2021-5678",
Package: "another-package",
Version: "2.0.0",
FixVersions: []string{"2.0.1"},
FixedState: "false",
},
}
want := []reportsummary.CVESummary{
{
Severity: "High",
ID: "CVE-2021-1234",
Package: "example-package",
Version: "1.0.0",
FixVersions: []string{"1.0.1", "1.0.2"},
FixedState: "true",
},
{
Severity: "Medium",
ID: "CVE-2021-5678",
Package: "another-package",
Version: "2.0.0",
FixVersions: []string{"2.0.1"},
FixedState: "false",
},
}
got := convertToCVESummary(cves)
assert.Equal(t, want, got)
}
func TestConvertToPackageScores(t *testing.T) {
packageScores := map[string]*imageprinter.PackageScore{
"example-package": {
Name: "example-package",
Version: "1.0.0",
Score: 80.0,
MapSeverityToCVEsNumber: map[string]int{"High": 2, "Medium": 1},
},
"another-package": {
Name: "another-package",
Version: "2.0.0",
Score: 60.0,
MapSeverityToCVEsNumber: map[string]int{"High": 1, "Medium": 0},
},
}
want := map[string]*reportsummary.PackageSummary{
"example-package": {
Name: "example-package",
Version: "1.0.0",
Score: 80.0,
MapSeverityToCVEsNumber: map[string]int{"High": 2, "Medium": 1},
},
"another-package": {
Name: "another-package",
Version: "2.0.0",
Score: 60.0,
MapSeverityToCVEsNumber: map[string]int{"High": 1, "Medium": 0},
},
}
got := convertToPackageScores(packageScores)
assert.Equal(t, want, got)
}
func TestConvertToReportSummary(t *testing.T) {
input := map[string]*imageprinter.SeveritySummary{
"High": &imageprinter.SeveritySummary{
NumberOfCVEs: 10,
NumberOfFixableCVEs: 5,
},
"Medium": &imageprinter.SeveritySummary{
NumberOfCVEs: 5,
NumberOfFixableCVEs: 2,
},
}
want := map[string]*reportsummary.SeveritySummary{
"High": &reportsummary.SeveritySummary{
NumberOfCVEs: 10,
NumberOfFixableCVEs: 5,
},
"Medium": &reportsummary.SeveritySummary{
NumberOfCVEs: 5,
NumberOfFixableCVEs: 2,
},
}
got := convertToReportSummary(input)
assert.Equal(t, want, got)
}

View File

@@ -192,21 +192,18 @@ func (pp *PdfPrinter) printTable(m pdf.Maroto, summaryDetails *reportsummary.Sum
}
}
size := 6.0
gridSize := []uint{1, 1, 6, 1, 1, 2}
m.TableList(headers, controls, props.TableList{
HeaderProp: props.TableListContent{
Family: consts.Arial,
Style: consts.Bold,
Size: size,
GridSizes: gridSize,
Size: 6.0,
GridSizes: []uint{1, 5, 2, 2, 2},
},
ContentProp: props.TableListContent{
Family: consts.Courier,
Style: consts.Normal,
Size: size,
GridSizes: gridSize,
Size: 6.0,
GridSizes: []uint{1, 5, 2, 2, 2},
CellTextColorChangerColumnIndex: 0,
CellTextColorChangerFunc: func(cellValue string) color.Color {
if cellValue == "Critical" {

View File

@@ -3,7 +3,6 @@ package prettyprinter
import (
"fmt"
"os"
"path/filepath"
"github.com/kubescape/kubescape/v3/core/cautils"
"github.com/kubescape/kubescape/v3/core/pkg/resultshandling/printer/v2/prettyprinter/tableprinter/configurationprinter"
@@ -78,8 +77,9 @@ func (rp *RepoPrinter) getWorkloadScanCommand(ns, kind, name string, source repo
}
if source.FileType == reporthandling.SourceTypeHelmChart {
return fmt.Sprintf("%s --chart-path=%s --file-path=%s", cmd, source.HelmPath, filepath.Join(source.Path, source.RelativePath))
return fmt.Sprintf("%s --chart-path=%s --file-path=%s", cmd, source.HelmPath, fmt.Sprintf("%s/%s", source.Path, source.RelativePath))
} else {
return fmt.Sprintf("%s --file-path=%s", cmd, filepath.Join(source.Path, source.RelativePath))
return fmt.Sprintf("%s --file-path=%s", cmd, fmt.Sprintf("%s/%s", source.Path, source.RelativePath))
}
}

View File

@@ -1,7 +1,6 @@
package prettyprinter
import (
"path/filepath"
"testing"
"github.com/kubescape/opa-utils/reporthandling"
@@ -51,18 +50,7 @@ func TestRepoScan_getWorkloadScanCommand(t *testing.T) {
Path: "path",
RelativePath: "relativePath",
},
want: "$ kubescape scan workload kind/name --namespace ns --file-path=" + filepath.Join("path", "relativePath"),
},
{
testName: "relative file path",
ns: "ns",
kind: "kind",
name: "name",
source: reporthandling.Source{
Path: "",
RelativePath: "relativePath",
},
want: "$ kubescape scan workload kind/name --namespace ns --file-path=relativePath",
want: "$ kubescape scan workload kind/name --namespace ns --file-path=path/relativePath",
},
{
testName: "helm path",
@@ -75,7 +63,7 @@ func TestRepoScan_getWorkloadScanCommand(t *testing.T) {
HelmPath: "helmPath",
FileType: "Helm Chart",
},
want: "$ kubescape scan workload kind/name --namespace ns --chart-path=helmPath --file-path=" + filepath.Join("path", "relativePath"),
want: "$ kubescape scan workload kind/name --namespace ns --chart-path=helmPath --file-path=path/relativePath",
},
{
testName: "file path - no namespace",
@@ -85,7 +73,7 @@ func TestRepoScan_getWorkloadScanCommand(t *testing.T) {
Path: "path",
RelativePath: "relativePath",
},
want: "$ kubescape scan workload kind/name --file-path=" + filepath.Join("path", "relativePath"),
want: "$ kubescape scan workload kind/name --file-path=path/relativePath",
},
{
testName: "helm path - no namespace",
@@ -97,7 +85,7 @@ func TestRepoScan_getWorkloadScanCommand(t *testing.T) {
HelmPath: "helmPath",
FileType: "Helm Chart",
},
want: "$ kubescape scan workload kind/name --chart-path=helmPath --file-path=" + filepath.Join("path", "relativePath"),
want: "$ kubescape scan workload kind/name --chart-path=helmPath --file-path=path/relativePath",
},
}

View File

@@ -160,18 +160,14 @@ func failedPathsToString(control *resourcesresults.ResourceAssociatedControl) []
return paths
}
func fixPathsToString(control *resourcesresults.ResourceAssociatedControl, onlyPath bool) []string {
func fixPathsToString(control *resourcesresults.ResourceAssociatedControl) []string {
var paths []string
for j := range control.ResourceAssociatedRules {
for k := range control.ResourceAssociatedRules[j].Paths {
if p := control.ResourceAssociatedRules[j].Paths[k].FixPath.Path; p != "" {
if onlyPath {
paths = append(paths, p)
} else {
v := control.ResourceAssociatedRules[j].Paths[k].FixPath.Value
paths = append(paths, fmt.Sprintf("%s=%s", p, v))
}
v := control.ResourceAssociatedRules[j].Paths[k].FixPath.Value
paths = append(paths, fmt.Sprintf("%s=%s", p, v))
}
}
}
@@ -205,7 +201,7 @@ func reviewPathsToString(control *resourcesresults.ResourceAssociatedControl) []
}
func AssistedRemediationPathsToString(control *resourcesresults.ResourceAssociatedControl) []string {
paths := append(fixPathsToString(control, false), append(deletePathsToString(control), reviewPathsToString(control)...)...)
paths := append(fixPathsToString(control), append(deletePathsToString(control), reviewPathsToString(control)...)...)
// TODO - deprecate failedPaths once all controls support review/delete paths
paths = appendFailedPathsIfNotInPaths(paths, failedPathsToString(control))
return paths

View File

@@ -254,16 +254,16 @@ func TestFixPathsToString(t *testing.T) {
}
// Test case 1: Empty ResourceAssociatedRules
actualPaths := fixPathsToString(emptyControl, false)
actualPaths := fixPathsToString(emptyControl)
assert.Nil(t, actualPaths)
// Test case 2: Single ResourceAssociatedRule and one ReviewPath
actualPaths = fixPathsToString(singleRuleControl, false)
actualPaths = fixPathsToString(singleRuleControl)
expectedPath := []string{"fix-path1=fix-path-value1"}
assert.Equal(t, expectedPath, actualPaths)
// Test case 3: Multiple ResourceAssociatedRules and multiple ReviewPaths
actualPaths = fixPathsToString(multipleRulesControl, false)
actualPaths = fixPathsToString(multipleRulesControl)
expectedPath = []string{"fix-path2=fix-path-value2", "fix-path3=fix-path-value3"}
assert.Equal(t, expectedPath, actualPaths)
}

View File

@@ -2,7 +2,6 @@ package printer
import (
"context"
"crypto/sha256"
"encoding/json"
"fmt"
"net/url"
@@ -188,10 +187,8 @@ func (sp *SARIFPrinter) printConfigurationScan(ctx context.Context, opaSessionOb
run := sarif.NewRunWithInformationURI(toolName, toolInfoURI)
basePath := getBasePathFromMetadata(*opaSessionObj)
for resourceID, result := range opaSessionObj.ResourcesResult { //
for resourceID, result := range opaSessionObj.ResourcesResult {
if result.GetStatus(nil).IsFailed() {
helmChartFileType := false
var mappingnodes []map[string]cautils.MappingNode
resourceSource := opaSessionObj.ResourceSource[resourceID]
filepath := resourceSource.RelativePath
@@ -200,15 +197,9 @@ func (sp *SARIFPrinter) printConfigurationScan(ctx context.Context, opaSessionOb
continue
}
// If the fileType is helm chart
if templateNodes, ok := opaSessionObj.TemplateMapping[resourceID]; ok {
mappingnodes = templateNodes.Nodes
helmChartFileType = true
}
rsrcAbsPath := path.Join(basePath, filepath)
locationResolver, err := locationresolver.NewFixPathLocationResolver(rsrcAbsPath) //
if err != nil && !helmChartFileType {
locationResolver, err := locationresolver.NewFixPathLocationResolver(rsrcAbsPath)
if err != nil {
logger.L().Debug("failed to create location resolver", helpers.Error(err))
continue
}
@@ -217,24 +208,12 @@ func (sp *SARIFPrinter) printConfigurationScan(ctx context.Context, opaSessionOb
ac := toPin
if ac.GetStatus(nil).IsFailed() {
var location locationresolver.Location
ctl := opaSessionObj.Report.SummaryDetails.Controls.GetControl(reportsummary.EControlCriteriaID, ac.GetID())
if helmChartFileType {
for _, subfileNodes := range mappingnodes {
// first get the failed path, then if cannot find it, use the Fix path, cui it to find the closest error.
location, split := resolveFixLocation(subfileNodes, &ac)
sp.addRule(run, ctl)
r := sp.addResult(run, ctl, filepath, location)
collectFixesFromMappingNodes(r, ac, opaSessionObj, resourceID, filepath, rsrcAbsPath, location, subfileNodes, split)
}
} else {
location = sp.resolveFixLocation(opaSessionObj, locationResolver, &ac, resourceID)
sp.addRule(run, ctl)
r := sp.addResult(run, ctl, filepath, location)
collectFixes(ctx, r, ac, opaSessionObj, resourceID, filepath, rsrcAbsPath)
}
location := sp.resolveFixLocation(opaSessionObj, locationResolver, &ac, resourceID)
sp.addRule(run, ctl)
result := sp.addResult(run, ctl, filepath, location)
collectFixes(ctx, result, ac, opaSessionObj, resourceID, filepath)
}
}
}
@@ -278,56 +257,6 @@ func (sp *SARIFPrinter) resolveFixLocation(opaSessionObj *cautils.OPASessionObj,
return location
}
func getFixPath(ac *resourcesresults.ResourceAssociatedControl, onlyPath bool) string {
fixPaths := failedPathsToString(ac)
if len(fixPaths) == 0 {
fixPaths = fixPathsToString(ac, onlyPath)
}
var fixPath string
if len(fixPaths) > 0 {
fixPath = fixPaths[0]
}
return fixPath
}
func resolveFixLocation(mappingnodes map[string]cautils.MappingNode, ac *resourcesresults.ResourceAssociatedControl) (locationresolver.Location, int) {
defaultLocation := locationresolver.Location{Line: 1, Column: 1}
fixPath := getFixPath(ac, true)
if fixPath == "" {
return defaultLocation, -1
}
location, split := getLocationFromMappingNodes(mappingnodes, fixPath)
return location, split
}
func getLocationFromNode(node cautils.MappingNode, path string) locationresolver.Location {
line := node.TemplateLineNumber
column := (len(strings.Split(path, "."))-1)*2 + 1 //column begins with 1 instead of 0
return locationresolver.Location{Line: line, Column: column}
}
func getLocationFromMappingNodes(mappingnodes map[string]cautils.MappingNode, fixPath string) (locationresolver.Location, int) {
var location locationresolver.Location
// If cannot match any node, return default location
location = locationresolver.Location{Line: 1, Column: 1}
split := -1
if node, ok := mappingnodes[fixPath]; ok {
location = getLocationFromNode(node, fixPath)
} else {
fields := strings.Split(fixPath, ".")
for i := len(fields) - 1; i >= 0; i-- {
field := fields[:i]
closestPath := strings.Join(field, ".")
if node, ok := mappingnodes[closestPath]; ok {
location = getLocationFromNode(node, closestPath)
split = i
break
}
}
}
return location, split
}
func addFix(result *sarif.Result, filepath string, startLine, startColumn, endLine, endColumn int, text string) {
// Create a new replacement with the specified start and end lines and columns, and the inserted text.
replacement := sarif.NewReplacement(
@@ -343,15 +272,6 @@ func addFix(result *sarif.Result, filepath string, startLine, startColumn, endLi
sarif.NewSimpleArtifactLocation(filepath),
).WithReplacement(replacement)
// check if the fix is already added
for _, fix := range result.Fixes {
for _, ac := range fix.ArtifactChanges {
if hashArtifactChange(ac) == hashArtifactChange(artifactChange) {
return
}
}
}
// Add the artifact change to the result's fixes.
result.AddFix(sarif.NewFix().WithArtifactChanges([]*sarif.ArtifactChange{artifactChange}))
}
@@ -417,34 +337,33 @@ func collectDiffs(dmp *diffmatchpatch.DiffMatchPatch, diffs []diffmatchpatch.Dif
}
}
func collectFixes(ctx context.Context, result *sarif.Result, ac resourcesresults.ResourceAssociatedControl, opaSessionObj *cautils.OPASessionObj, resourceID string, filepath string, rsrcAbsPath string) {
func collectFixes(ctx context.Context, result *sarif.Result, ac resourcesresults.ResourceAssociatedControl, opaSessionObj *cautils.OPASessionObj, resourceID string, filepath string) {
for _, rule := range ac.ResourceAssociatedRules {
if !rule.GetStatus(nil).IsFailed() {
continue
}
for _, rulePaths := range rule.Paths {
fixPath := rulePaths.FixPath.Path
if fixPath == "" {
if rulePaths.FixPath.Path == "" {
continue
}
fileAsString, err := fixhandler.GetFileString(rsrcAbsPath)
if err != nil {
logger.L().Debug("failed to access "+filepath, helpers.Error(err))
continue
}
var fixedYamlString string
// if strings.HasPrefix(rulePaths.FixPath.Value, fixhandler.UserValuePrefix) {
// continue
// }
documentIndex, ok := getDocIndex(opaSessionObj, resourceID)
if !ok {
continue
}
yamlExpression := fixhandler.FixPathToValidYamlExpression(fixPath, rulePaths.FixPath.Value, documentIndex)
yamlExpression := fixhandler.FixPathToValidYamlExpression(rulePaths.FixPath.Path, rulePaths.FixPath.Value, documentIndex)
fileAsString, err := fixhandler.GetFileString(filepath)
if err != nil {
logger.L().Debug("failed to access "+filepath, helpers.Error(err))
continue
}
fixedYamlString, err = fixhandler.ApplyFixToContent(ctx, fileAsString, yamlExpression)
fixedYamlString, err := fixhandler.ApplyFixToContent(ctx, fileAsString, yamlExpression)
if err != nil {
logger.L().Debug("failed to fix "+filepath+" with "+yamlExpression, helpers.Error(err))
continue
@@ -457,98 +376,6 @@ func collectFixes(ctx context.Context, result *sarif.Result, ac resourcesresults
}
}
func collectFixesFromMappingNodes(result *sarif.Result, ac resourcesresults.ResourceAssociatedControl, opaSessionObj *cautils.OPASessionObj, resourceID string, filepath string, rsrcAbsPath string, location locationresolver.Location, subFileNodes map[string]cautils.MappingNode, split int) {
for _, rule := range ac.ResourceAssociatedRules {
if !rule.GetStatus(nil).IsFailed() {
continue
}
for _, rulePaths := range rule.Paths {
fixPath := rulePaths.FixPath.Path
if fixPath == "" {
continue
}
fileAsString, err := fixhandler.GetFileString(rsrcAbsPath)
if err != nil {
logger.L().Debug("failed to access "+filepath, helpers.Error(err))
continue
}
var fixedYamlString string
fixValue := rulePaths.FixPath.Value
if split == -1 { //replaceNode
node := subFileNodes[fixPath]
fixedYamlString = formReplaceFixedYamlString(node, fileAsString, location, fixValue, fixPath)
} else { //insertNode
maxLineNumber := getTheLocationOfAddPart(split, fixPath, subFileNodes)
fixedYamlString = applyFixToContent(split, fixPath, fileAsString, maxLineNumber, fixValue)
}
dmp := diffmatchpatch.New()
diffs := dmp.DiffMain(fileAsString, fixedYamlString, false)
collectDiffs(dmp, diffs, result, filepath, fileAsString)
}
}
}
func applyFixToContent(split int, fixPath string, fileAsString string, addLine int, value string) string {
addLines := make([]string, 0)
fields := strings.Split(fixPath, ".")
for i := split; i < len(fields); i++ {
field := fields[i]
var addedLine string
if i != len(fields)-1 {
addedLine = strings.Repeat(" ", (i*2)) + field + ":"
} else {
addedLine = strings.Repeat(" ", (i*2)) + field + ": " + value
}
addLines = append(addLines, addedLine)
}
fixedYamlString := formAddFixedYamlString(fileAsString, addLine, addLines)
return fixedYamlString
}
func formReplaceFixedYamlString(node cautils.MappingNode, fileAsString string, location locationresolver.Location, fixValue string, fixPath string) string {
replcaedValue := node.Value
yamlLines := strings.Split(fileAsString, "\n")
if replcaedValue == "" {
yamlLines[location.Line] = yamlLines[location.Line] + " # This is the suggested modification, the value for " + fixPath + " is " + fixValue + "\n"
} else {
replacedLine := "# This is the suggested modification\n" + yamlLines[location.Line]
newLine := strings.Replace(replacedLine, replcaedValue, fixValue, -1)
yamlLines[location.Line] = newLine
}
fixedYamlString := strings.Join(yamlLines, "\n")
return fixedYamlString
}
func formAddFixedYamlString(fileAsString string, addLine int, addLines []string) string {
yamlLines := strings.Split(fileAsString, "\n")
newYamlLines := append(yamlLines[:addLine], "# This is the suggested modification")
newYamlLines = append(newYamlLines, addLines...)
yamlLines = strings.Split(fileAsString, "\n")
newYamlLines = append(newYamlLines, yamlLines[addLine:]...)
fixedYamlString := strings.Join(newYamlLines, "\n")
return fixedYamlString
}
func getTheLocationOfAddPart(split int, fixPath string, mappingnodes map[string]cautils.MappingNode) int {
fields := strings.Split(fixPath, ".")
field := fields[:split]
closestPath := strings.Join(field, ".")
maxLineNumber := -1
for k, v := range mappingnodes {
if strings.Index(k, closestPath) == 0 {
if v.TemplateLineNumber > maxLineNumber {
maxLineNumber = v.TemplateLineNumber
}
}
}
return maxLineNumber
}
func getDocIndex(opaSessionObj *cautils.OPASessionObj, resourceID string) (int, bool) {
resource := opaSessionObj.AllResources[resourceID]
localworkload, ok := resource.(*localworkload.LocalWorkload)
@@ -583,7 +410,3 @@ func getBasePathFromMetadata(opaSessionObj cautils.OPASessionObj) string {
func (sp *SARIFPrinter) generateRemediationMessage(control reportsummary.IControlSummary) string {
return fmt.Sprintf("Remediation: %s", control.GetRemediation())
}
func hashArtifactChange(artifactChange *sarif.ArtifactChange) [32]byte {
acJson, _ := json.Marshal(artifactChange)
return sha256.Sum256(acJson)
}

View File

@@ -16,7 +16,7 @@ import (
const indicator = "†"
// FinalizeResults finalize the results objects by copying data from map to lists
// finalizeV2Report finalize the results objects by copying data from map to lists
func FinalizeResults(data *cautils.OPASessionObj) *reporthandlingv2.PostureReport {
report := reporthandlingv2.PostureReport{
SummaryDetails: data.Report.SummaryDetails,

View File

@@ -76,7 +76,7 @@ func (report *ReportEventReceiver) Submit(ctx context.Context, opaSessionObj *ca
}
if err := report.prepareReport(opaSessionObj); err != nil {
return fmt.Errorf("failed to submit scan results. reason: %s", err.Error())
return fmt.Errorf("failed to submit scan results. url: '%s', reason: %s", report.getReportUrl(), err.Error())
}
logger.L().Debug("", helpers.String("account ID", report.GetAccountID()))
@@ -249,7 +249,7 @@ func (report *ReportEventReceiver) sendReport(postureReport *reporthandlingv2.Po
report.tenantConfig.DeleteCredentials()
}
return fmt.Errorf("%w:%s", err, strResponse)
return fmt.Errorf("%s, %v:%s", report.getReportUrl(), err, strResponse)
}
// message is taken only from last report

View File

@@ -146,7 +146,7 @@ func ValidatePrinter(scanType cautils.ScanTypes, scanContext cautils.ScanningCon
if printFormat == printer.SARIFFormat {
// supported types for SARIF
switch scanContext {
case cautils.ContextDir, cautils.ContextFile, cautils.ContextGitLocal, cautils.ContextGitRemote:
case cautils.ContextDir, cautils.ContextFile, cautils.ContextGitLocal:
return nil
default:
return fmt.Errorf("format \"%s\" is only supported when scanning local files", printFormat)

View File

@@ -14,30 +14,30 @@ import (
type DummyReporter struct{}
func (dr *DummyReporter) Submit(_ context.Context, _ *cautils.OPASessionObj) error {
func (dr *DummyReporter) Submit(_ context.Context, opaSessionObj *cautils.OPASessionObj) error {
return nil
}
func (dr *DummyReporter) SetTenantConfig(_ cautils.ITenantConfig) {}
func (dr *DummyReporter) DisplayMessage() {}
func (dr *DummyReporter) GetURL() string { return "" }
func (dr *DummyReporter) SetTenantConfig(tenantConfig cautils.ITenantConfig) {}
func (dr *DummyReporter) DisplayMessage() {}
func (dr *DummyReporter) GetURL() string { return "" }
type SpyPrinter struct {
ActionPrintCalls int
ScoreCalls int
}
func (sp *SpyPrinter) SetWriter(_ context.Context, _ string) {}
func (sp *SpyPrinter) PrintNextSteps() {}
func (sp *SpyPrinter) ActionPrint(_ context.Context, _ *cautils.OPASessionObj, _ []cautils.ImageScanData) {
func (sp *SpyPrinter) SetWriter(_ context.Context, outputFile string) {}
func (sp *SpyPrinter) PrintNextSteps() {}
func (sp *SpyPrinter) ActionPrint(_ context.Context, opaSessionObj *cautils.OPASessionObj, _ []cautils.ImageScanData) {
sp.ActionPrintCalls += 1
}
func (sp *SpyPrinter) Score(_ float32) {
func (sp *SpyPrinter) Score(score float32) {
sp.ScoreCalls += 1
}
func TestResultsHandlerHandleResultsPrintsResultsToUI(t *testing.T) {
reporter := &DummyReporter{}
var printers []printer.IPrinter
printers := []printer.IPrinter{}
uiPrinter := &SpyPrinter{}
fakeScanData := &cautils.OPASessionObj{
Report: &reporthandlingv2.PostureReport{
@@ -50,8 +50,7 @@ func TestResultsHandlerHandleResultsPrintsResultsToUI(t *testing.T) {
rh := NewResultsHandler(reporter, printers, uiPrinter)
rh.SetData(fakeScanData)
err := rh.HandleResults(context.TODO())
assert.NoError(t, err)
rh.HandleResults(context.TODO())
want := 1
got := uiPrinter.ActionPrintCalls
@@ -148,6 +147,12 @@ func TestValidatePrinter(t *testing.T) {
format: printer.SARIFFormat,
expectErr: errors.New("format \"sarif\" is only supported when scanning local files"),
},
{
name: "sarif format for remote url context should return error",
scanContext: cautils.ContextGitURL,
format: printer.SARIFFormat,
expectErr: errors.New("format \"sarif\" is only supported when scanning local files"),
},
{
name: "sarif format for local dir context should not return error",
scanContext: cautils.ContextDir,
@@ -235,7 +240,7 @@ func TestNewPrinter(t *testing.T) {
version: defaultVersion,
},
{
name: "Pretty printer",
name: "Prettry printer",
format: "pretty-printer",
viewType: "control",
version: defaultVersion,
@@ -254,8 +259,8 @@ func TestNewPrinter(t *testing.T) {
FormatVersion: tt.version,
View: tt.viewType,
}
p := NewPrinter(ctx, tt.format, scanInfo, "my-cluster")
assert.NotNil(t, p)
printer := NewPrinter(ctx, tt.format, scanInfo, "my-cluster")
assert.NotNil(t, printer)
})
}
}

Binary file not shown.

Before

Width:  |  Height:  |  Size: 558 KiB

View File

@@ -14,19 +14,23 @@ The features serve different stages of the workflow of the users:
The items in the Kubescape roadmap are split into 3 major groups based on the feature planning maturity:
* [Planning](#planning-) - we have tickets open for these issues with a more or less clear vision of design.
* [Backlog](#backlog-) - features that were discussed at a high level but are not ready for development.
* [Backlog](#backlog-) - features that were discussed at a high level but are not ready for development.
* [Wishlist](#wishlist-) - features that we are dreaming of in 😀 and want to push them gradually forward.
## Planning 👷
* ### eBPF based anomaly detection in workloads
* ### Storing scan results in cluster
The introduction of runtime anomaly detection using eBPF (extended Berkeley Packet Filter) events marks an addition to the Kubescape project's development roadmap. This feature aims to leverage the high-performance monitoring capabilities of eBPF to detect abnormal behavior within Kubernetes workloads in real-time. By capturing and analyzing eBPF events, Kubescape will be able to identify deviations from application profiles, such as unexpected network connections, unauthorized process executions, or unusual system calls, which could indicate a security breach. This anomaly detection mechanism is designed to operate with minimal overhead, ensuring that security monitoring does not compromise system performance.
We want the Kubescape scan results (both cluster and image scan) to be stored in the cluster locally as `CRD`s. This will lead to an easier integration with results by other projects as well as with scripting via `kubectl`. Along with this, the image scan based controls will be able to avoid accessing external resources for image vulnerability scan results.
* ### Enriching Vulnerability scan results with advanced prioritization data sources
* ### Vulnerability prioritization based on workload file activity
Integrating EPSS (Exploit Prediction Scoring System) and CISA-KEV (Known Exploited Vulnerabilities) metrics into Kubescape's CLI and Operator vulnerability scan results represents a significant enhancement in the project's roadmap. This integration aims to enrich the vulnerability management process by providing more contextual and predictive insights into the security risks associated with Kubernetes clusters. By leveraging EPSS scores, Kubescape will offer predictions on the likelihood of a vulnerability being exploited, enabling users to prioritize remediations based on risk rather than just vulnerability presence. The addition of CISA-KEV metrics further enhances this capability by flagging vulnerabilities that are actively being exploited in the wild, as identified by the Cybersecurity and Infrastructure Security Agency (CISA). This dual approach ensures that Kubescape users are not only informed about the vulnerabilities in their environments but are also equipped with critical information on which vulnerabilities to remediate first, based on their exploitability and active exploitation trends. This strategic enhancement to Kubescape's vulnerability scan results will provide users with a powerful tool for making informed, risk-based security decisions in their Kubernetes environments.
Implementing an eBPF agent (based on Falco or Tracee) which tracks file activity in workloads to prioritize container image vulnerabilities.
* ### Prioritization engine using MITRE Attack matrix based attack chains
Create a security issue prioritization engine that scores resources based on control based attack chains. All Kubescape controls can be arranged into attack categories of the MITRE Attack matrix. The Attack matrix categories can be connected to each other based on a theoretical attack (ie. you can't have privilege escalation without initial access). Each of the Kubescape controls is to be categorized in these system and Kubescape will calculate a priority score based on the interconnections between failed controls.
* ### Integration with image registries
@@ -35,7 +39,7 @@ Integrating EPSS (Exploit Prediction Scoring System) and CISA-KEV (Known Exploit
* ### Kubescape CLI control over cluster operations
Add functionality to Kubescape CLI to trigger operations in Kubescape cluster components (example: trigger image scans, etc.)
* ### Git integration for pull requests
Create insightful GitHub actions for Kubescape.
@@ -87,14 +91,14 @@ Integrating EPSS (Exploit Prediction Scoring System) and CISA-KEV (Known Exploit
## Completed features 🎓
* Kubelet configuration validation
* Kubelet configuration validation
* API server configuration validation
* Image vulnerability scanning based controls
* Image vulnerability scanning based controls
* Assisted remediation (telling where/what to fix)
* Integration with Prometheus
* Configuration of controls (customizing rules for a given environment)
* Installation in the cluster for continuous monitoring
* Host scanner
* Host scanner
* Cloud vendor API integration
* Custom exceptions
* Custom frameworks

View File

@@ -22,6 +22,7 @@ e.g. When a `kube-system` resource fails and it is ok, simply add the resource t
* `frameworkName` - Framework names can be found [here](https://github.com/armosec/regolibrary/tree/master/frameworks) (regex supported)
* `controlName` - Control names can be found [here](https://github.com/armosec/regolibrary/tree/master/controls) (regex supported)
* `controlID` - Control ID can be found [here](https://github.com/armosec/regolibrary/tree/master/controls) (regex supported)
* `ruleName` - Rule names can be found [here](https://github.com/armosec/regolibrary/tree/master/rules) (regex supported)
You can find [here](https://github.com/kubescape/kubescape/tree/master/examples/exceptions) some examples of exceptions files

View File

@@ -1,29 +0,0 @@
apiVersion: v2
name: kubescape
description:
Kubescape is the first open-source tool for testing if Kubernetes is deployed securely according to multiple frameworks
regulatory, customized company policies and DevSecOps best practices, such as the [NSA-CISA](https://www.armosec.io/blog/kubernetes-hardening-guidance-summary-by-armo) and the [MITRE ATT&CK®](https://www.microsoft.com/security/blog/2021/03/23/secure-containerized-environments-with-updated-threat-matrix-for-kubernetes/) .
Kubescape scans K8s clusters, YAML files, and HELM charts, and detect misconfigurations and software vulnerabilities at early stages of the CI/CD pipeline and provides a risk score instantly and risk trends over time.
Kubescape integrates natively with other DevOps tools, including Jenkins, CircleCI and Github workflows.
# A chart can be either an 'application' or a 'library' chart.
#
# Application charts are a collection of templates that can be packaged into versioned archives
# to be deployed.
#
# Library charts provide useful utilities or functions for the chart developer. They're included as
# a dependency of application charts to inject those utilities and functions into the rendering
# pipeline. Library charts do not define any templates and therefore cannot be deployed.
type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/)
version: 1.0.0
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to
# follow Semantic Versioning. They should reflect the version the application is using.
# It is recommended to use it with quotes.
appVersion: "v1.0.128"

View File

@@ -1,62 +0,0 @@
{{/*
Expand the name of the chart.
*/}}
{{- define "kubescape.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "kubescape.fullname" -}}
{{- if .Values.fullnameOverride }}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- $name := default .Chart.Name .Values.nameOverride }}
{{- if contains $name .Release.Name }}
{{- .Release.Name | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
{{- end }}
{{- end }}
{{- end }}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "kubescape.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Common labels
*/}}
{{- define "kubescape.labels" -}}
helm.sh/chart: {{ include "kubescape.chart" . }}
{{ include "kubescape.selectorLabels" . }}
{{- if .Chart.AppVersion }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- end }}
{{/*
Selector labels
*/}}
{{- define "kubescape.selectorLabels" -}}
app.kubernetes.io/name: {{ include "kubescape.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end }}
{{/*
Create the name of the service account to use
*/}}
{{- define "kubescape.serviceAccountName" -}}
{{- if .Values.serviceAccount.create }}
{{- default (include "kubescape.fullname" .) .Values.serviceAccount.name }}
{{- else }}
{{- default "default" .Values.serviceAccount.name }}
{{- end }}
{{- end }}

View File

@@ -1,10 +0,0 @@
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: {{ include "kubescape.fullname" . }}
labels:
{{- include "kubescape.labels" . | nindent 4 }}
rules:
- apiGroups: ["*"]
resources: ["*"]
verbs: ["get", "list", "describe"]

View File

@@ -1,15 +0,0 @@
kind: ClusterRoleBinding
metadata:
name: {{ include "kubescape.fullname" . }}
labels:
{{- include "kubescape.labels" . | nindent 4 }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: {{ include "kubescape.fullname" . }}
subjects:
- kind: ServiceAccount
name: {{ include "kubescape.serviceAccountName" . }}
namespace: {{ .Release.Namespace | quote }}

View File

@@ -1,14 +0,0 @@
{{- if .Values.configMap.create -}}
kind: ConfigMap
apiVersion: v1
metadata:
name: {{ include "kubescape.fullname" . }}-configmap
labels:
{{- include "kubescape.labels" . | nindent 4 }}
data:
config.json: |
{
"customerGUID": "{{ .Values.configMap.params.customerGUID }}",
"clusterName": "{{ .Values.configMap.params.clusterName }}"
}
{{- end }}

Some files were not shown because too many files have changed in this diff Show More