mirror of
https://github.com/kubescape/kubescape.git
synced 2026-02-14 18:09:55 +00:00
Compare commits
61 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
b167435c4d | ||
|
|
9b29321a53 | ||
|
|
466a11fa1c | ||
|
|
cfe022ff1d | ||
|
|
e0eeb691e6 | ||
|
|
dc65bd4ccc | ||
|
|
02790da144 | ||
|
|
b97f50ffb5 | ||
|
|
0841d1d483 | ||
|
|
fbef268f22 | ||
|
|
427dccadd3 | ||
|
|
01bb19bf6e | ||
|
|
c0d4bb45eb | ||
|
|
222c1ec866 | ||
|
|
dc49218c7c | ||
|
|
3b4585a827 | ||
|
|
7f79bc2d1d | ||
|
|
3623e55433 | ||
|
|
2f7841b5a2 | ||
|
|
f70d81d7c4 | ||
|
|
bd49251234 | ||
|
|
57addd493f | ||
|
|
8f009d4698 | ||
|
|
7c0e38072d | ||
|
|
aa9a610c4c | ||
|
|
25bd51e8b4 | ||
|
|
2759beece5 | ||
|
|
6ce0121a03 | ||
|
|
09aa1ab866 | ||
|
|
0ec188b23d | ||
|
|
090820ba04 | ||
|
|
0cf24d058f | ||
|
|
c32e665809 | ||
|
|
82ec11b207 | ||
|
|
32a15acdea | ||
|
|
837a50c903 | ||
|
|
bd00d153e9 | ||
|
|
306050046d | ||
|
|
413db87e85 | ||
|
|
4d3b3efb9a | ||
|
|
7ca609d39f | ||
|
|
872c0c9fab | ||
|
|
9353eb5b54 | ||
|
|
aa62fbea68 | ||
|
|
08d964b631 | ||
|
|
75fb07efde | ||
|
|
9445e0aa01 | ||
|
|
ea12643a3c | ||
|
|
0c42b41dcc | ||
|
|
351f957083 | ||
|
|
9d876b14e9 | ||
|
|
895233630f | ||
|
|
423d9c5c1f | ||
|
|
3f3681a4cd | ||
|
|
d6ccc37640 | ||
|
|
3b6bc00b03 | ||
|
|
8984f941ab | ||
|
|
46eb266064 | ||
|
|
0f2125817b | ||
|
|
1225540590 | ||
|
|
0e4ff13276 |
392
.github/workflows/00-pr-scanner.yaml
vendored
392
.github/workflows/00-pr-scanner.yaml
vendored
@@ -1,191 +1,243 @@
|
||||
name: 00-pr_scanner
|
||||
permissions: read-all
|
||||
on:
|
||||
workflow_dispatch: {}
|
||||
pull_request:
|
||||
types: [opened, reopened, synchronize, ready_for_review]
|
||||
paths-ignore:
|
||||
- "**.yaml"
|
||||
- "**.yml"
|
||||
- "**.md"
|
||||
- "**.sh"
|
||||
- "website/*"
|
||||
- "examples/*"
|
||||
- "docs/*"
|
||||
- "build/*"
|
||||
- ".github/*"
|
||||
workflow_dispatch: {}
|
||||
pull_request:
|
||||
types: [opened, reopened, synchronize, ready_for_review]
|
||||
paths-ignore:
|
||||
- "**.yaml"
|
||||
- "**.yml"
|
||||
- "**.md"
|
||||
- "**.sh"
|
||||
- "website/*"
|
||||
- "examples/*"
|
||||
- "docs/*"
|
||||
- "build/*"
|
||||
- ".github/*"
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
pr-scanner:
|
||||
permissions:
|
||||
actions: read
|
||||
attestations: read
|
||||
checks: read
|
||||
contents: write
|
||||
deployments: read
|
||||
discussions: read
|
||||
id-token: write
|
||||
issues: read
|
||||
models: read
|
||||
packages: read
|
||||
pages: read
|
||||
pull-requests: write
|
||||
repository-projects: read
|
||||
security-events: read
|
||||
statuses: read
|
||||
uses: ./.github/workflows/a-pr-scanner.yaml
|
||||
with:
|
||||
RELEASE: ""
|
||||
CLIENT: test
|
||||
CGO_ENABLED: 0
|
||||
GO111MODULE: ""
|
||||
secrets: inherit
|
||||
|
||||
wf-preparation:
|
||||
name: secret-validator
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
TEST_NAMES: ${{ steps.export_tests_to_env.outputs.TEST_NAMES }}
|
||||
is-secret-set: ${{ steps.check-secret-set.outputs.is-secret-set }}
|
||||
|
||||
steps:
|
||||
- name: check if the necessary secrets are set in github secrets
|
||||
id: check-secret-set
|
||||
env:
|
||||
CUSTOMER: ${{ secrets.CUSTOMER }}
|
||||
USERNAME: ${{ secrets.USERNAME }}
|
||||
PASSWORD: ${{ secrets.PASSWORD }}
|
||||
CLIENT_ID: ${{ secrets.CLIENT_ID_PROD }}
|
||||
SECRET_KEY: ${{ secrets.SECRET_KEY_PROD }}
|
||||
REGISTRY_USERNAME: ${{ secrets.REGISTRY_USERNAME }}
|
||||
REGISTRY_PASSWORD: ${{ secrets.REGISTRY_PASSWORD }}
|
||||
run: "echo \"is-secret-set=${{ env.CUSTOMER != '' && env.USERNAME != '' && env.PASSWORD != '' && env.CLIENT_ID != '' && env.SECRET_KEY != '' && env.REGISTRY_USERNAME != '' && env.REGISTRY_PASSWORD != '' }}\" >> $GITHUB_OUTPUT\n"
|
||||
|
||||
- id: export_tests_to_env
|
||||
name: set test name
|
||||
run: |
|
||||
echo "TEST_NAMES=$input" >> $GITHUB_OUTPUT
|
||||
env:
|
||||
input: '[
|
||||
"scan_nsa",
|
||||
"scan_mitre",
|
||||
"scan_with_exceptions",
|
||||
"scan_repository",
|
||||
"scan_local_file",
|
||||
"scan_local_glob_files",
|
||||
"scan_local_list_of_files",
|
||||
"scan_nsa_and_submit_to_backend",
|
||||
"scan_mitre_and_submit_to_backend",
|
||||
"scan_local_repository_and_submit_to_backend",
|
||||
"scan_repository_from_url_and_submit_to_backend",
|
||||
"scan_with_custom_framework",
|
||||
"scan_customer_configuration",
|
||||
"scan_compliance_score",
|
||||
"scan_custom_framework_scanning_file_scope_testing",
|
||||
"scan_custom_framework_scanning_cluster_scope_testing",
|
||||
"scan_custom_framework_scanning_cluster_and_file_scope_testing"
|
||||
]'
|
||||
|
||||
run-system-tests:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
TEST: ${{ fromJson(needs.wf-preparation.outputs.TEST_NAMES) }}
|
||||
needs: [wf-preparation, pr-scanner]
|
||||
if: ${{ (needs.wf-preparation.outputs.is-secret-set == 'true') && (always() && (contains(needs.*.result, 'success') || contains(needs.*.result, 'skipped')) && !(contains(needs.*.result, 'failure')) && !(contains(needs.*.result, 'cancelled'))) }}
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
actions: read
|
||||
contents: read
|
||||
pull-requests: write
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
pr-scanner:
|
||||
permissions:
|
||||
actions: read
|
||||
artifact-metadata: read
|
||||
attestations: read
|
||||
checks: read
|
||||
contents: write
|
||||
deployments: read
|
||||
discussions: read
|
||||
id-token: write
|
||||
issues: read
|
||||
models: read
|
||||
packages: read
|
||||
pages: read
|
||||
pull-requests: write
|
||||
repository-projects: read
|
||||
security-events: read
|
||||
statuses: read
|
||||
uses: ./.github/workflows/a-pr-scanner.yaml
|
||||
with:
|
||||
fetch-depth: 0
|
||||
submodules: recursive
|
||||
RELEASE: ""
|
||||
CLIENT: test
|
||||
CGO_ENABLED: 0
|
||||
GO111MODULE: ""
|
||||
secrets: inherit
|
||||
|
||||
- uses: actions/setup-go@v4
|
||||
name: Installing go
|
||||
with:
|
||||
go-version: "1.25"
|
||||
wf-preparation:
|
||||
name: secret-validator
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
is-secret-set: ${{ steps.check-secret-set.outputs.is-secret-set }}
|
||||
|
||||
- uses: anchore/sbom-action/download-syft@v0
|
||||
name: Setup Syft
|
||||
steps:
|
||||
- name: check if the necessary secrets are set in github secrets
|
||||
id: check-secret-set
|
||||
env:
|
||||
CUSTOMER: ${{ secrets.CUSTOMER }}
|
||||
USERNAME: ${{ secrets.USERNAME }}
|
||||
PASSWORD: ${{ secrets.PASSWORD }}
|
||||
CLIENT_ID: ${{ secrets.CLIENT_ID_PROD }}
|
||||
SECRET_KEY: ${{ secrets.SECRET_KEY_PROD }}
|
||||
REGISTRY_USERNAME: ${{ secrets.REGISTRY_USERNAME }}
|
||||
REGISTRY_PASSWORD: ${{ secrets.REGISTRY_PASSWORD }}
|
||||
run: "echo \"is-secret-set=${{ env.CUSTOMER != '' && env.USERNAME != '' && env.PASSWORD != '' && env.CLIENT_ID != '' && env.SECRET_KEY != '' && env.REGISTRY_USERNAME != '' && env.REGISTRY_PASSWORD != '' }}\" >> $GITHUB_OUTPUT\n"
|
||||
|
||||
- uses: goreleaser/goreleaser-action@v6
|
||||
name: Build
|
||||
with:
|
||||
distribution: goreleaser
|
||||
version: latest
|
||||
args: build --clean --snapshot --single-target
|
||||
env:
|
||||
RELEASE: ""
|
||||
CLIENT: test
|
||||
CGO_ENABLED: 0
|
||||
|
||||
- name: chmod +x
|
||||
run: chmod +x -R ${PWD}/dist/cli_linux_amd64_v1/kubescape
|
||||
run-system-tests:
|
||||
needs: [wf-preparation, pr-scanner]
|
||||
if: ${{ (needs.wf-preparation.outputs.is-secret-set == 'true') && (always() && (contains(needs.*.result, 'success') || contains(needs.*.result, 'skipped')) && !(contains(needs.*.result, 'failure')) && !(contains(needs.*.result, 'cancelled'))) }}
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
actions: read
|
||||
contents: read
|
||||
pull-requests: write
|
||||
steps:
|
||||
- name: Set dispatch info
|
||||
id: dispatch-info
|
||||
run: |
|
||||
# Correlation ID WITHOUT attempt - so re-runs can find the original run
|
||||
CORRELATION_ID="${GITHUB_REPOSITORY##*/}-${{ github.run_id }}"
|
||||
echo "correlation_id=${CORRELATION_ID}" >> "$GITHUB_OUTPUT"
|
||||
echo "Correlation ID: ${CORRELATION_ID}, Attempt: ${{ github.run_attempt }}"
|
||||
|
||||
- name: Checkout systests repo
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
repository: armosec/system-tests
|
||||
path: system-tests
|
||||
- name: Generate GitHub App token
|
||||
id: app-token
|
||||
uses: actions/create-github-app-token@v1
|
||||
with:
|
||||
app-id: ${{ secrets.E2E_DISPATCH_APP_ID }}
|
||||
private-key: ${{ secrets.E2E_DISPATCH_APP_PRIVATE_KEY }}
|
||||
owner: armosec
|
||||
repositories: shared-workflows
|
||||
|
||||
- uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: "3.9"
|
||||
cache: "pip"
|
||||
- name: Dispatch system tests to private repo
|
||||
if: ${{ github.run_attempt == 1 }}
|
||||
env:
|
||||
GH_TOKEN: ${{ steps.app-token.outputs.token }}
|
||||
CORRELATION_ID: ${{ steps.dispatch-info.outputs.correlation_id }}
|
||||
KS_BRANCH: ${{ github.head_ref || github.ref_name }}
|
||||
run: |
|
||||
echo "Dispatching E2E tests with correlation_id: ${CORRELATION_ID}"
|
||||
echo "Using test group: KUBESCAPE_CLI_E2E"
|
||||
|
||||
- name: create env
|
||||
run: ./create_env.sh
|
||||
working-directory: system-tests
|
||||
gh api "repos/armosec/shared-workflows/dispatches" \
|
||||
-f event_type="e2e-test-trigger" \
|
||||
-f "client_payload[correlation_id]=${CORRELATION_ID}" \
|
||||
-f "client_payload[github_repository]=${GITHUB_REPOSITORY}" \
|
||||
-f "client_payload[environment]=production" \
|
||||
-f "client_payload[tests_groups]=KUBESCAPE_CLI_E2E" \
|
||||
-f "client_payload[systests_branch]=master" \
|
||||
-f "client_payload[ks_branch]=${KS_BRANCH}"
|
||||
|
||||
- name: Generate uuid
|
||||
id: uuid
|
||||
run: |
|
||||
echo "RANDOM_UUID=$(uuidgen)" >> $GITHUB_OUTPUT
|
||||
echo "Dispatch completed"
|
||||
|
||||
- name: Create k8s Kind Cluster
|
||||
id: kind-cluster-install
|
||||
uses: helm/kind-action@v1.10.0
|
||||
with:
|
||||
cluster_name: ${{ steps.uuid.outputs.RANDOM_UUID }}
|
||||
- name: Find E2E workflow run
|
||||
id: find-run
|
||||
env:
|
||||
GH_TOKEN: ${{ steps.app-token.outputs.token }}
|
||||
CORRELATION_ID: ${{ steps.dispatch-info.outputs.correlation_id }}
|
||||
run: |
|
||||
for i in {1..15}; do
|
||||
run_id=$(gh api "repos/armosec/shared-workflows/actions/runs?event=repository_dispatch&per_page=30" \
|
||||
--jq '.workflow_runs | map(select(.name | contains("'"$CORRELATION_ID"'"))) | first | .id // empty')
|
||||
|
||||
- name: run-tests-on-local-built-kubescape
|
||||
env:
|
||||
CUSTOMER: ${{ secrets.CUSTOMER }}
|
||||
USERNAME: ${{ secrets.USERNAME }}
|
||||
PASSWORD: ${{ secrets.PASSWORD }}
|
||||
CLIENT_ID: ${{ secrets.CLIENT_ID_PROD }}
|
||||
SECRET_KEY: ${{ secrets.SECRET_KEY_PROD }}
|
||||
REGISTRY_USERNAME: ${{ secrets.REGISTRY_USERNAME }}
|
||||
REGISTRY_PASSWORD: ${{ secrets.REGISTRY_PASSWORD }}
|
||||
working-directory: system-tests
|
||||
run: |
|
||||
echo "Test history:"
|
||||
echo " ${{ matrix.TEST }} " >/tmp/testhistory
|
||||
cat /tmp/testhistory
|
||||
source systests_python_env/bin/activate
|
||||
if [ -n "$run_id" ]; then
|
||||
echo "run_id=${run_id}" >> "$GITHUB_OUTPUT"
|
||||
gh api "repos/armosec/shared-workflows/actions/runs/${run_id}" --jq '"url=" + .html_url' >> "$GITHUB_OUTPUT"
|
||||
exit 0
|
||||
fi
|
||||
echo "Attempt $i: waiting for run..."
|
||||
sleep $((i < 5 ? 10 : 30))
|
||||
done
|
||||
echo "::error::Could not find workflow run"
|
||||
exit 1
|
||||
|
||||
python3 systest-cli.py \
|
||||
-t ${{ matrix.TEST }} \
|
||||
-b production \
|
||||
-c CyberArmorTests \
|
||||
--duration 3 \
|
||||
--logger DEBUG \
|
||||
--kwargs kubescape=${GITHUB_WORKSPACE}/dist/cli_linux_amd64_v1/kubescape
|
||||
- name: Re-run failed jobs in private repo
|
||||
id: rerun
|
||||
if: ${{ github.run_attempt > 1 }}
|
||||
env:
|
||||
GH_TOKEN: ${{ steps.app-token.outputs.token }}
|
||||
RUN_ID: ${{ steps.find-run.outputs.run_id }}
|
||||
run: |
|
||||
conclusion=$(gh api "repos/armosec/shared-workflows/actions/runs/${RUN_ID}" --jq '.conclusion')
|
||||
echo "Previous conclusion: $conclusion"
|
||||
|
||||
deactivate
|
||||
if [ "$conclusion" = "success" ]; then
|
||||
echo "Previous run passed. Nothing to re-run."
|
||||
echo "skip=true" >> "$GITHUB_OUTPUT"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
- name: Test Report
|
||||
uses: mikepenz/action-junit-report@v5
|
||||
if: always()
|
||||
with:
|
||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
report_paths: "system-tests/**/results_xml_format/**.xml"
|
||||
commit: ${{github.event.workflow_run.head_sha}}
|
||||
# Full rerun if cancelled, partial if failed
|
||||
if [ "$conclusion" = "cancelled" ]; then
|
||||
echo "Run was cancelled - triggering full re-run"
|
||||
gh api --method POST "repos/armosec/shared-workflows/actions/runs/${RUN_ID}/rerun"
|
||||
else
|
||||
echo "Re-running failed jobs only"
|
||||
gh api --method POST "repos/armosec/shared-workflows/actions/runs/${RUN_ID}/rerun-failed-jobs"
|
||||
fi
|
||||
|
||||
# Wait for status to flip from 'completed'
|
||||
for i in {1..30}; do
|
||||
[ "$(gh api "repos/armosec/shared-workflows/actions/runs/${RUN_ID}" --jq '.status')" != "completed" ] && break
|
||||
sleep 2
|
||||
done
|
||||
|
||||
- name: Wait for E2E tests to complete
|
||||
if: ${{ steps.rerun.outputs.skip != 'true' }}
|
||||
env:
|
||||
GH_TOKEN: ${{ steps.app-token.outputs.token }}
|
||||
RUN_ID: ${{ steps.find-run.outputs.run_id }}
|
||||
URL: ${{ steps.find-run.outputs.url }}
|
||||
run: |
|
||||
echo "Monitoring: ${URL}"
|
||||
|
||||
for i in {1..60}; do # 60 iterations × 60s = 1 hour max
|
||||
read status conclusion < <(gh api "repos/armosec/shared-workflows/actions/runs/${RUN_ID}" \
|
||||
--jq '[.status, .conclusion // "null"] | @tsv')
|
||||
|
||||
echo "Status: ${status} | Conclusion: ${conclusion}"
|
||||
|
||||
if [ "$status" = "completed" ]; then
|
||||
if [ "$conclusion" = "success" ]; then
|
||||
echo "E2E tests passed!"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
echo "::error::E2E tests failed: ${conclusion}"
|
||||
echo ""
|
||||
|
||||
# Get failed job IDs to a file first
|
||||
gh api "repos/armosec/shared-workflows/actions/runs/${RUN_ID}/jobs" \
|
||||
--jq '.jobs[] | select(.conclusion == "failure") | [.id, .name, (.steps[] | select(.conclusion == "failure") | .name)] | @tsv' > /tmp/failed_jobs.txt
|
||||
|
||||
# Process each failed job
|
||||
while IFS=$'\t' read -r job_id job_name step_name; do
|
||||
# Extract test name: "run-helm-e2e / ST (relevancy_python)" → "relevancy_python"
|
||||
test_name=$(echo "$job_name" | sed 's/.*(\(.*\))/\1/')
|
||||
|
||||
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
|
||||
echo "${job_name}"
|
||||
echo " Step: ${step_name}"
|
||||
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
|
||||
|
||||
# Fetch logs to temp file
|
||||
gh api "repos/armosec/shared-workflows/actions/jobs/${job_id}/logs" 2>/dev/null > /tmp/job_logs.txt
|
||||
|
||||
# Show summary in console
|
||||
grep -E "(ERROR|FAILURE)" /tmp/job_logs.txt | tail -10
|
||||
echo ""
|
||||
|
||||
# Save to separate file per test
|
||||
log_file="failed_${test_name}.txt"
|
||||
echo "════════════════════════════════════════" > "$log_file"
|
||||
echo "${job_name}" >> "$log_file"
|
||||
echo " Step: ${step_name}" >> "$log_file"
|
||||
echo "════════════════════════════════════════" >> "$log_file"
|
||||
last_endgroup=$(grep -n "##\\[endgroup\\]" /tmp/job_logs.txt | tail -1 | cut -d: -f1)
|
||||
if [ -n "$last_endgroup" ]; then
|
||||
tail -n +$((last_endgroup + 1)) /tmp/job_logs.txt >> "$log_file"
|
||||
else
|
||||
tail -500 /tmp/job_logs.txt >> "$log_file"
|
||||
fi
|
||||
done < /tmp/failed_jobs.txt
|
||||
|
||||
echo "View full logs: ${URL}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
sleep 60
|
||||
done
|
||||
|
||||
echo "::error::Timeout waiting for tests"
|
||||
exit 1
|
||||
|
||||
- name: Upload failed step logs
|
||||
if: failure()
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: failed-e2e-logs-attempt-${{ github.run_attempt }}
|
||||
path: failed_*.txt
|
||||
retention-days: 7
|
||||
|
||||
6
.github/workflows/02-release.yaml
vendored
6
.github/workflows/02-release.yaml
vendored
@@ -103,6 +103,12 @@ jobs:
|
||||
REGISTRY_USERNAME: ${{ secrets.REGISTRY_USERNAME }}
|
||||
REGISTRY_PASSWORD: ${{ secrets.REGISTRY_PASSWORD }}
|
||||
|
||||
- name: Update new version in krew-index
|
||||
if: github.event_name != 'workflow_dispatch' || inputs.skip_publish != true
|
||||
uses: rajatjindal/krew-release-bot@v0.0.47
|
||||
with:
|
||||
krew_template_file: .krew.yaml
|
||||
|
||||
- name: List collected system-test results (debug)
|
||||
if: always()
|
||||
run: |
|
||||
|
||||
3
.github/workflows/a-pr-scanner.yaml
vendored
3
.github/workflows/a-pr-scanner.yaml
vendored
@@ -70,8 +70,7 @@ jobs:
|
||||
|
||||
- name: golangci-lint
|
||||
continue-on-error: false
|
||||
uses: golangci/golangci-lint-action@v8
|
||||
uses: golangci/golangci-lint-action@v9
|
||||
with:
|
||||
version: v2.1
|
||||
args: --timeout 10m
|
||||
only-new-issues: true
|
||||
|
||||
2
.github/workflows/scorecard.yml
vendored
2
.github/workflows/scorecard.yml
vendored
@@ -37,7 +37,7 @@ jobs:
|
||||
persist-credentials: false
|
||||
|
||||
- name: "Run analysis"
|
||||
uses: ossf/scorecard-action@v2.4.0
|
||||
uses: ossf/scorecard-action@v2.4.3
|
||||
with:
|
||||
results_file: results.sarif
|
||||
results_format: sarif
|
||||
|
||||
@@ -18,6 +18,7 @@ archives:
|
||||
- id: cli
|
||||
ids:
|
||||
- cli
|
||||
|
||||
formats:
|
||||
- binary
|
||||
- tar.gz
|
||||
@@ -35,9 +36,10 @@ builds:
|
||||
- amd64
|
||||
- arm64
|
||||
ldflags:
|
||||
- -s -w
|
||||
- -X "github.com/kubescape/kubescape/v3/core/cautils.BuildNumber={{.Env.RELEASE}}"
|
||||
- -X "github.com/kubescape/kubescape/v3/core/cautils.Client={{.Env.CLIENT}}"
|
||||
- -X main.version={{.Version}}
|
||||
- -X main.commit={{.Commit}}
|
||||
- -X main.date={{.Date}}
|
||||
- -X github.com/kubescape/backend/pkg/versioncheck.Client={{.Env.CLIENT}}
|
||||
hooks:
|
||||
post:
|
||||
- cmd: >
|
||||
|
||||
60
.krew.yaml
Normal file
60
.krew.yaml
Normal file
@@ -0,0 +1,60 @@
|
||||
apiVersion: krew.googlecontainertools.github.com/v1alpha2
|
||||
kind: Plugin
|
||||
metadata:
|
||||
name: kubescape
|
||||
spec:
|
||||
version: {{ .TagName }}
|
||||
platforms:
|
||||
- selector:
|
||||
matchLabels:
|
||||
os: linux
|
||||
arch: amd64
|
||||
{{ addURIAndSha "https://github.com/kubescape/kubescape/releases/download/" .TagName (printf "kubescape_%s_linux_amd64.tar.gz" .TagName) .TagName }}
|
||||
bin: kubescape
|
||||
- selector:
|
||||
matchLabels:
|
||||
os: linux
|
||||
arch: arm64
|
||||
{{ addURIAndSha "https://github.com/kubescape/kubescape/releases/download/" .TagName (printf "kubescape_%s_linux_arm64.tar.gz" .TagName) .TagName }}
|
||||
bin: kubescape
|
||||
- selector:
|
||||
matchLabels:
|
||||
os: darwin
|
||||
arch: amd64
|
||||
{{ addURIAndSha "https://github.com/kubescape/kubescape/releases/download/" .TagName (printf "kubescape_%s_darwin_amd64.tar.gz" .TagName) .TagName }}
|
||||
bin: kubescape
|
||||
- selector:
|
||||
matchLabels:
|
||||
os: darwin
|
||||
arch: arm64
|
||||
{{ addURIAndSha "https://github.com/kubescape/kubescape/releases/download/" .TagName (printf "kubescape_%s_darwin_arm64.tar.gz" .TagName) .TagName }}
|
||||
bin: kubescape
|
||||
- selector:
|
||||
matchLabels:
|
||||
os: windows
|
||||
arch: amd64
|
||||
{{ addURIAndSha "https://github.com/kubescape/kubescape/releases/download/" .TagName (printf "kubescape_%s_windows_amd64.tar.gz" .TagName) .TagName }}
|
||||
bin: kubescape.exe
|
||||
- selector:
|
||||
matchLabels:
|
||||
os: windows
|
||||
arch: arm64
|
||||
{{ addURIAndSha "https://github.com/kubescape/kubescape/releases/download/" .TagName (printf "kubescape_%s_windows_arm64.tar.gz" .TagName) .TagName }}
|
||||
bin: kubescape.exe
|
||||
shortDescription: Scan resources and cluster configs against security frameworks.
|
||||
description: |
|
||||
Kubescape is the first tool for testing if Kubernetes is deployed securely
|
||||
according to mitigations and best practices. It includes risk analysis,
|
||||
security compliance, and misconfiguration scanning with an easy-to-use
|
||||
CLI interface, flexible output formats, and automated scanning capabilities.
|
||||
|
||||
Features:
|
||||
- Risk analysis: Identify vulnerabilities and security risks in your cluster
|
||||
- Security compliance: Check your cluster against multiple security frameworks
|
||||
- Misconfiguration scanning: Detect security misconfigurations in your workloads
|
||||
- Flexible output: Results in JSON, SARIF, HTML, JUnit, and Prometheus formats
|
||||
- CI/CD integration: Easily integrate into your CI/CD pipeline
|
||||
homepage: https://kubescape.io/
|
||||
caveats: |
|
||||
Requires kubectl and basic knowledge of Kubernetes.
|
||||
Run 'kubescape scan' to scan your Kubernetes cluster or manifests.
|
||||
273
KREW_RELEASE.md
Normal file
273
KREW_RELEASE.md
Normal file
@@ -0,0 +1,273 @@
|
||||
# Krew Release Automation Guide
|
||||
|
||||
This document explains how kubescape automates publishing to the Kubernetes plugin package manager, krew.
|
||||
|
||||
## What is Krew?
|
||||
|
||||
Krew is a plugin manager for `kubectl`. It allows users to discover and install `kubectl` plugins easily. You can learn more about krew at [https://krew.sigs.k8s.io/](https://krew.sigs.k8s.io/).
|
||||
|
||||
## How kubescape publishes to krew
|
||||
|
||||
We use the [krew-release-bot](https://github.com/rajatjindal/krew-release-bot) to automatically create pull requests to the [kubernetes-sigs/krew-index](https://github.com/kubernetes-sigs/krew-index) repository whenever a new release of kubescape is published.
|
||||
|
||||
### Setup Overview
|
||||
|
||||
The automation consists of three components:
|
||||
|
||||
1. **`.krew.yaml`** - A template file that the bot uses to generate the krew plugin manifest
|
||||
2. **`.github/workflows/02-release.yaml`** - GitHub Actions workflow that runs the krew-release-bot after a successful release
|
||||
3. **`.goreleaser.yaml`** - GoReleaser configuration that defines the krew manifest (though upload is skipped)
|
||||
|
||||
### Why Use krew-release-bot Instead of GoReleaser's Built-in Krew Support?
|
||||
|
||||
You might have noticed that **GoReleaser has built-in krew support** in its `krews` section. However, almost all projects (including stern) use `skip_upload: true` and rely on **krew-release-bot** instead. Here's why:
|
||||
|
||||
#### Problems with GoReleaser's Built-in Krew Publishing
|
||||
|
||||
To use GoReleaser's direct krew publishing, you would need to:
|
||||
|
||||
```yaml
|
||||
krews:
|
||||
- name: kubescape
|
||||
skip_upload: false # Instead of true
|
||||
repository:
|
||||
owner: kubernetes-sigs
|
||||
name: krew-index
|
||||
token: "{{ .Env.KREW_INDEX_TOKEN }}" # Required!
|
||||
pull_request:
|
||||
enabled: true # Requires GoReleaser Pro for cross-repo PRs
|
||||
```
|
||||
|
||||
This approach has several critical issues:
|
||||
|
||||
1. **Permission Barrier**: Almost no one has write access to `kubernetes-sigs/krew-index`. You would need special permissions from the Krew maintainers, which is rarely granted.
|
||||
|
||||
2. **Security Risk**: You'd need to store a GitHub personal access token with write access to the krew-index in your repository secrets. This token could be compromised and used to make unauthorized changes to the krew-index.
|
||||
|
||||
3. **GoReleaser Pro Required**: To create pull requests to a different repository (cross-repository), you need GoReleaser Pro, which is a paid product.
|
||||
|
||||
4. **Manual Work**: Even if you had access, you'd need to manually configure and maintain the repository settings, tokens, and potentially deal with rate limits and authentication issues.
|
||||
|
||||
#### Why krew-release-bot is the Right Solution
|
||||
|
||||
The **krew-release-bot** was created by the Kubernetes community (in collaboration with the Krew team) specifically to solve these problems:
|
||||
|
||||
- **No Repository Access Required**: The bot acts as an intermediary with pre-configured access to krew-index. You don't need write permissions.
|
||||
|
||||
- **No Tokens Needed**: It uses GitHub's `GITHUB_TOKEN` (automatically available in GitHub Actions) via webhooks and events. No personal access tokens required.
|
||||
|
||||
- **Designed for Krew**: It's specifically built for the krew-index workflow and integrates with Krew's automation.
|
||||
|
||||
- **Automatic Merging**: The Krew team has configured their CI to automatically test and merge PRs from krew-release-bot (usually within 5-10 minutes).
|
||||
|
||||
- **Officially Recommended**: The Krew team explicitly recommends this approach in their documentation as the standard way to automate plugin updates.
|
||||
|
||||
- **Free and Open Source**: No paid subscriptions required.
|
||||
|
||||
#### The Real-World Evidence
|
||||
|
||||
Looking at recent pull requests to `kubernetes-sigs/krew-index`, **almost all automated plugin updates are created by krew-release-bot**. You'll see patterns like:
|
||||
|
||||
```
|
||||
Author: krew-release-bot
|
||||
Title: "release new version v0.6.11 of radar"
|
||||
```
|
||||
|
||||
This demonstrates that the entire Kubernetes ecosystem has standardized on krew-release-bot, not GoReleaser's built-in publishing.
|
||||
|
||||
#### Summary
|
||||
|
||||
While GoReleaser's built-in krew support exists in the code, it's **practically unusable for the krew-index repository** due to permission and security constraints. The krew-release-bot is the de facto standard because:
|
||||
- It works without special permissions
|
||||
- It's more secure
|
||||
- It integrates with Krew's automation
|
||||
- It's free and recommended by the Krew team
|
||||
|
||||
This is why we use `skip_upload: true` in GoReleaser and let krew-release-bot handle the actual publishing.
|
||||
|
||||
### The Template File
|
||||
|
||||
The `.krew.yaml` file in the repository root is a Go template that contains placeholders for dynamic values:
|
||||
|
||||
```yaml
|
||||
apiVersion: krew.googlecontainertools.github.com/v1alpha2
|
||||
kind: Plugin
|
||||
metadata:
|
||||
name: kubescape
|
||||
spec:
|
||||
version: {{ .TagName }}
|
||||
platforms:
|
||||
- selector:
|
||||
matchLabels:
|
||||
os: linux
|
||||
arch: amd64
|
||||
{{ $version := trimPrefix "v" .TagName }}{{ addURIAndSha "https://github.com/kubescape/kubescape/releases/download/" .TagName (printf "kubescape_%s_linux_amd64.tar.gz" $version) .TagName }}
|
||||
bin: kubescape
|
||||
- selector:
|
||||
matchLabels:
|
||||
os: linux
|
||||
arch: arm64
|
||||
{{ $version := trimPrefix "v" .TagName }}{{ addURIAndSha "https://github.com/kubescape/kubescape/releases/download/" .TagName (printf "kubescape_%s_linux_arm64.tar.gz" $version) .TagName }}
|
||||
bin: kubescape
|
||||
- selector:
|
||||
matchLabels:
|
||||
os: darwin
|
||||
arch: amd64
|
||||
{{ $version := trimPrefix "v" .TagName }}{{ addURIAndSha "https://github.com/kubescape/kubescape/releases/download/" .TagName (printf "kubescape_%s_darwin_amd64.tar.gz" $version) .TagName }}
|
||||
bin: kubescape
|
||||
- selector:
|
||||
matchLabels:
|
||||
os: darwin
|
||||
arch: arm64
|
||||
{{ $version := trimPrefix "v" .TagName }}{{ addURIAndSha "https://github.com/kubescape/kubescape/releases/download/" .TagName (printf "kubescape_%s_darwin_arm64.tar.gz" $version) .TagName }}
|
||||
bin: kubescape
|
||||
- selector:
|
||||
matchLabels:
|
||||
os: windows
|
||||
arch: amd64
|
||||
{{ $version := trimPrefix "v" .TagName }}{{ addURIAndSha "https://github.com/kubescape/kubescape/releases/download/" .TagName (printf "kubescape_%s_windows_amd64.tar.gz" $version) .TagName }}
|
||||
bin: kubescape.exe
|
||||
- selector:
|
||||
matchLabels:
|
||||
os: windows
|
||||
arch: arm64
|
||||
{{ $version := trimPrefix "v" .TagName }}{{ addURIAndSha "https://github.com/kubescape/kubescape/releases/download/" .TagName (printf "kubescape_%s_windows_arm64.tar.gz" $version) .TagName }}
|
||||
bin: kubescape.exe
|
||||
shortDescription: Scan resources and cluster configs against security frameworks.
|
||||
description: |
|
||||
Kubescape is the first tool for testing if Kubernetes is deployed securely
|
||||
according to mitigations and best practices. It includes risk analysis,
|
||||
security compliance, and misconfiguration scanning with an easy-to-use
|
||||
CLI interface, flexible output formats, and automated scanning capabilities.
|
||||
|
||||
Features:
|
||||
- Risk analysis: Identify vulnerabilities and security risks in your cluster
|
||||
- Security compliance: Check your cluster against multiple security frameworks
|
||||
- Misconfiguration scanning: Detect security misconfigurations in your workloads
|
||||
- Flexible output: Results in JSON, SARIF, HTML, JUnit, and Prometheus formats
|
||||
- CI/CD integration: Easily integrate into your CI/CD pipeline
|
||||
homepage: https://kubescape.io/
|
||||
caveats: |
|
||||
Requires kubectl and basic knowledge of Kubernetes.
|
||||
Run 'kubescape scan' to scan your Kubernetes cluster or manifests.
|
||||
```
|
||||
|
||||
The `{{ .TagName }}` is replaced with the release tag (e.g., `v3.0.0`), `{{ trimPrefix "v" .TagName }}` removes the version prefix, and `{{ addURIAndSha ... }}` calculates the SHA256 checksum for the binary archive.
|
||||
|
||||
### Release Workflow
|
||||
|
||||
The release workflow (`.github/workflows/02-release.yaml`) can be triggered in two ways:
|
||||
|
||||
1. **Automatic**: When a new tag matching the pattern `v[0-9]+.[0-9]+.[0-9]+` is pushed to the repository
|
||||
2. **Manual**: Via `workflow_dispatch` with an optional `skip_publish` input
|
||||
|
||||
When the workflow is triggered:
|
||||
|
||||
1. GoReleaser builds and publishes the release artifacts (unless `skip_publish=true` is set)
|
||||
2. The krew-release-bot step runs conditionally:
|
||||
- It **runs** when triggered by a tag push OR by `workflow_dispatch` with `skip_publish=false`
|
||||
- It **skips** when triggered by `workflow_dispatch` with `skip_publish=true` (default)
|
||||
3. When it runs, the bot:
|
||||
- Reads the `.krew.yaml` template
|
||||
- Fills in the template with release information
|
||||
- Creates a pull request to the `kubernetes-sigs/krew-index` repository
|
||||
- The PR is automatically tested and merged by krew's infrastructure
|
||||
|
||||
### Workflow Permissions
|
||||
|
||||
The release job has the following permissions:
|
||||
|
||||
```yaml
|
||||
permissions:
|
||||
actions: read
|
||||
checks: read
|
||||
contents: write
|
||||
deployments: read
|
||||
discussions: read
|
||||
id-token: write
|
||||
issues: read
|
||||
models: read
|
||||
packages: write
|
||||
pages: read
|
||||
pull-requests: read
|
||||
repository-projects: read
|
||||
statuses: read
|
||||
security-events: read
|
||||
attestations: read
|
||||
artifact-metadata: read
|
||||
```
|
||||
|
||||
These permissions are necessary for GoReleaser to create releases and upload artifacts.
|
||||
|
||||
### Testing the Template
|
||||
|
||||
Before committing changes to `.krew.yaml`, you can test how the template will be rendered using Docker:
|
||||
|
||||
```bash
|
||||
docker run -v $(pwd)/.krew.yaml:/tmp/.krew.yaml ghcr.io/rajatjindal/krew-release-bot:v0.0.47 \
|
||||
krew-release-bot template --tag v3.0.0 --template-file /tmp/.krew.yaml
|
||||
```
|
||||
|
||||
This will output the generated krew manifest file, allowing you to verify:
|
||||
- The version field is correct
|
||||
- All download URLs are properly formatted
|
||||
- The SHA256 checksum will be calculated correctly
|
||||
|
||||
### Why skip_upload in GoReleaser?
|
||||
|
||||
In `.goreleaser.yaml`, the `krews` section has `skip_upload: true`:
|
||||
|
||||
```yaml
|
||||
krews:
|
||||
- name: kubescape
|
||||
ids:
|
||||
- cli
|
||||
skip_upload: true # We use krew-release-bot instead
|
||||
homepage: https://kubescape.io/
|
||||
description: It includes risk analysis, security compliance, and misconfiguration scanning with an easy-to-use CLI interface, flexible output formats, and automated scanning capabilities.
|
||||
short_description: Scan resources and cluster configs against security frameworks.
|
||||
```
|
||||
|
||||
This is intentional because:
|
||||
- GoReleaser generates the manifest but doesn't have built-in support for submitting PRs to krew-index
|
||||
- krew-release-bot is the recommended tool for krew automation by the Krew team
|
||||
- Using krew-release-bot provides automatic testing and merging of version bump PRs
|
||||
|
||||
### Manual Release Testing
|
||||
|
||||
You can test the release workflow manually without publishing to krew by using `workflow_dispatch`:
|
||||
|
||||
1. Go to Actions tab in GitHub
|
||||
2. Select "02-create_release" workflow
|
||||
3. Click "Run workflow"
|
||||
4. The `skip_publish` input defaults to `true` (publishing will be skipped)
|
||||
5. Set `skip_publish` to `false` if you want to test the full release process including krew indexing
|
||||
|
||||
### Making Changes to the Template
|
||||
|
||||
If you need to update the krew manifest (e.g., change the description, add platforms, or update the binary location):
|
||||
|
||||
1. Edit the `.krew.yaml` file
|
||||
2. Test your changes with the Docker command shown above
|
||||
3. Commit and push the changes
|
||||
4. The next release will use the updated template
|
||||
|
||||
### Installing kubescape via krew
|
||||
|
||||
Once the plugin is indexed in krew, users can install it with:
|
||||
|
||||
```bash
|
||||
kubectl krew install kubernetes-sigs/kubescape
|
||||
```
|
||||
|
||||
Or after index update:
|
||||
|
||||
```bash
|
||||
kubectl krew install kubescape
|
||||
```
|
||||
|
||||
### Further Reading
|
||||
|
||||
- [Krew official documentation](https://krew.sigs.k8s.io/docs/developer-guide/)
|
||||
- [krew-release-bot repository](https://github.com/rajatjindal/krew-release-bot)
|
||||
- [Krew plugin submission guide](https://krew.sigs.k8s.io/docs/developer-guide/develop/plugins/)
|
||||
@@ -37,10 +37,10 @@ _Please [star ⭐](https://github.com/kubescape/kubescape/stargazers) the repo i
|
||||
- [Demo](#-demo)
|
||||
- [Quick Start](#-quick-start)
|
||||
- [Installation](#-installation)
|
||||
- [CLI Commands](#-cli-commands)
|
||||
- [CLI Commands](#%EF%B8%8F-cli-commands)
|
||||
- [Usage Examples](#-usage-examples)
|
||||
- [Architecture](#-architecture)
|
||||
- [In-Cluster Operator](#-in-cluster-operator)
|
||||
- [Architecture](#%EF%B8%8F-architecture)
|
||||
- [In-Cluster Operator](#%EF%B8%8F-in-cluster-operator)
|
||||
- [Integrations](#-integrations)
|
||||
- [Community](#-community)
|
||||
- [Changelog](#changelog)
|
||||
|
||||
@@ -25,14 +25,16 @@ This guide covers how to build Kubescape from source.
|
||||
|
||||
- **Docker** - [Installation Guide](https://docs.docker.com/get-docker/)
|
||||
- **Docker Buildx** - For multi-platform builds (included with Docker Desktop)
|
||||
- **GoReleaser** - [Installation Guide](https://goreleaser.com/install/)
|
||||
|
||||
### Verify Prerequisites
|
||||
|
||||
```bash
|
||||
go version # Should be 1.21 or higher
|
||||
go version # Should be 1.23 or higher
|
||||
git --version
|
||||
make --version
|
||||
docker --version # Optional
|
||||
docker --version # Optional
|
||||
goreleaser --version # Optional
|
||||
```
|
||||
|
||||
---
|
||||
@@ -62,6 +64,13 @@ make build
|
||||
go build -o kubescape .
|
||||
```
|
||||
|
||||
### Build with GoReleaser
|
||||
|
||||
```bash
|
||||
# Build for your current platform
|
||||
RELEASE=v0.0.1 CLIENT=local goreleaser build --snapshot --clean --single-target
|
||||
```
|
||||
|
||||
### Cross-Compilation
|
||||
|
||||
Build for different platforms:
|
||||
@@ -87,50 +96,19 @@ GOOS=windows GOARCH=amd64 go build -o kubescape-windows-amd64.exe .
|
||||
|
||||
## Building Docker Images
|
||||
|
||||
### Build All Images
|
||||
Kubescape uses [GoReleaser](https://goreleaser.com/) to build its Docker images. The Dockerfiles are specifically designed to work with GoReleaser's build pipeline, which handles cross-compilation and places binaries in the expected directory structure.
|
||||
|
||||
### Build with GoReleaser
|
||||
|
||||
The recommended way to build Docker images locally is using GoReleaser. Note that `RELEASE`, `CLIENT`, and `RUN_E2E` environment variables are required:
|
||||
|
||||
```bash
|
||||
make all
|
||||
# Build all artifacts and Docker images locally without publishing
|
||||
# --skip=before,krew,nfpm,sbom skips unnecessary steps for faster local builds
|
||||
RELEASE=v0.0.1 CLIENT=local RUN_E2E=false goreleaser release --snapshot --clean --skip=before,nfpm,sbom
|
||||
```
|
||||
|
||||
### Build CLI Docker Image
|
||||
|
||||
Build a Docker image containing only the Kubescape CLI:
|
||||
|
||||
```bash
|
||||
# First build the binary
|
||||
make build
|
||||
|
||||
# Then build the Docker image
|
||||
docker buildx build \
|
||||
-t kubescape-cli:latest \
|
||||
-f build/kubescape-cli.Dockerfile \
|
||||
--build-arg="ks_binary=kubescape" \
|
||||
--load .
|
||||
```
|
||||
|
||||
### Build Full Kubescape Image
|
||||
|
||||
Build the complete Kubescape image (includes HTTP handler):
|
||||
|
||||
```bash
|
||||
docker buildx build \
|
||||
-t kubescape:latest \
|
||||
-f build/Dockerfile \
|
||||
--load .
|
||||
```
|
||||
|
||||
### Multi-Platform Build
|
||||
|
||||
Build for multiple architectures:
|
||||
|
||||
```bash
|
||||
docker buildx build \
|
||||
-t kubescape:latest \
|
||||
-f build/Dockerfile \
|
||||
--platform linux/amd64,linux/arm64 \
|
||||
--push .
|
||||
```
|
||||
Please read the [GoReleaser documentation](https://goreleaser.com/customization/dockers_v2/#testing-locally) for more details on using it for local testing.
|
||||
|
||||
---
|
||||
|
||||
@@ -227,14 +205,13 @@ CGO_ENABLED=0 go build -o kubescape .
|
||||
|
||||
### Docker Build Fails
|
||||
|
||||
Ensure Docker daemon is running and you have sufficient permissions:
|
||||
Ensure Docker daemon is running and you have sufficient permissions.
|
||||
|
||||
If you encounter an error like `failed to calculate checksum ... "/linux/amd64/kubescape": not found`, it usually means you are trying to run `docker build` manually. Because the Dockerfiles are optimized for GoReleaser, you should use the `goreleaser release --snapshot` command described in the [Building Docker Images](#building-docker-images) section instead.
|
||||
|
||||
```bash
|
||||
# Check Docker status
|
||||
docker info
|
||||
|
||||
# Run with sudo if needed (Linux)
|
||||
sudo docker buildx build ...
|
||||
```
|
||||
|
||||
### Out of Memory During Build
|
||||
|
||||
@@ -128,7 +128,7 @@ gha_group_start "Smoke tests"
|
||||
log "Running smoke tests with $PYTHON $SMOKE_RUNNER \"$ART_PATH\""
|
||||
# Run the test runner, propagate exit code
|
||||
set +e
|
||||
"$PYTHON" "$SMOKE_RUNNER" "$ART_PATH"
|
||||
RELEASE="${RELEASE:-}" "$PYTHON" "$SMOKE_RUNNER" "$ART_PATH"
|
||||
rc=$?
|
||||
set -e
|
||||
|
||||
|
||||
@@ -233,9 +233,10 @@ func (ksServer *KubescapeMcpserver) CallTool(name string, arguments map[string]i
|
||||
|
||||
// Get workload-level manifests
|
||||
labelSelector := ""
|
||||
if level == "workload" {
|
||||
switch level {
|
||||
case "workload":
|
||||
labelSelector = "kubescape.io/context=filtered"
|
||||
} else if level == "image" {
|
||||
case "image":
|
||||
labelSelector = "kubescape.io/context=non-filtered"
|
||||
}
|
||||
|
||||
@@ -294,11 +295,19 @@ func (ksServer *KubescapeMcpserver) CallTool(name string, arguments map[string]i
|
||||
if !ok {
|
||||
namespace = "kubescape"
|
||||
}
|
||||
namespaceStr, ok := namespace.(string)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("namespace must be a string")
|
||||
}
|
||||
manifestName, ok := arguments["manifest_name"]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("manifest_name is required")
|
||||
}
|
||||
manifest, err := ksServer.ksClient.VulnerabilityManifests(namespace.(string)).Get(context.Background(), manifestName.(string), metav1.GetOptions{})
|
||||
manifestNameStr, ok := manifestName.(string)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("manifest_name must be a string")
|
||||
}
|
||||
manifest, err := ksServer.ksClient.VulnerabilityManifests(namespaceStr).Get(context.Background(), manifestNameStr, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get vulnerability manifest: %s", err)
|
||||
}
|
||||
@@ -323,21 +332,33 @@ func (ksServer *KubescapeMcpserver) CallTool(name string, arguments map[string]i
|
||||
if !ok {
|
||||
namespace = "kubescape"
|
||||
}
|
||||
namespaceStr, ok := namespace.(string)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("namespace must be a string")
|
||||
}
|
||||
manifestName, ok := arguments["manifest_name"]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("manifest_name is required")
|
||||
}
|
||||
manifestNameStr, ok := manifestName.(string)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("manifest_name must be a string")
|
||||
}
|
||||
cveID, ok := arguments["cve_id"]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("cve_id is required")
|
||||
}
|
||||
manifest, err := ksServer.ksClient.VulnerabilityManifests(namespace.(string)).Get(context.Background(), manifestName.(string), metav1.GetOptions{})
|
||||
cveIDStr, ok := cveID.(string)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("cve_id must be a string")
|
||||
}
|
||||
manifest, err := ksServer.ksClient.VulnerabilityManifests(namespaceStr).Get(context.Background(), manifestNameStr, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get vulnerability manifest: %s", err)
|
||||
}
|
||||
var match []v1beta1.Match
|
||||
for _, m := range manifest.Spec.Payload.Matches {
|
||||
if m.Vulnerability.ID == cveID.(string) {
|
||||
if m.Vulnerability.ID == cveIDStr {
|
||||
match = append(match, m)
|
||||
}
|
||||
}
|
||||
@@ -358,7 +379,11 @@ func (ksServer *KubescapeMcpserver) CallTool(name string, arguments map[string]i
|
||||
if !ok {
|
||||
namespace = "kubescape"
|
||||
}
|
||||
manifests, err := ksServer.ksClient.WorkloadConfigurationScans(namespace.(string)).List(context.Background(), metav1.ListOptions{})
|
||||
namespaceStr, ok := namespace.(string)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("namespace must be a string")
|
||||
}
|
||||
manifests, err := ksServer.ksClient.WorkloadConfigurationScans(namespaceStr).List(context.Background(), metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -394,11 +419,19 @@ func (ksServer *KubescapeMcpserver) CallTool(name string, arguments map[string]i
|
||||
if !ok {
|
||||
namespace = "kubescape"
|
||||
}
|
||||
namespaceStr, ok := namespace.(string)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("namespace must be a string")
|
||||
}
|
||||
manifestName, ok := arguments["manifest_name"]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("manifest_name is required")
|
||||
}
|
||||
manifest, err := ksServer.ksClient.WorkloadConfigurationScans(namespace.(string)).Get(context.Background(), manifestName.(string), metav1.GetOptions{})
|
||||
manifestNameStr, ok := manifestName.(string)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("manifest_name must be a string")
|
||||
}
|
||||
manifest, err := ksServer.ksClient.WorkloadConfigurationScans(namespaceStr).Get(context.Background(), manifestNameStr, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get configuration manifest: %s", err)
|
||||
}
|
||||
@@ -448,7 +481,7 @@ func mcpServerEntrypoint() error {
|
||||
|
||||
// Start the server
|
||||
if err := server.ServeStdio(s); err != nil {
|
||||
return fmt.Errorf("Server error: %v\n", err)
|
||||
return fmt.Errorf("server error: %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -14,7 +14,7 @@ const (
|
||||
)
|
||||
|
||||
var operatorExamples = fmt.Sprintf(`
|
||||
|
||||
|
||||
# Trigger a configuration scan
|
||||
%[1]s operator scan configurations
|
||||
|
||||
@@ -34,16 +34,16 @@ func GetOperatorCmd(ks meta.IKubescape) *cobra.Command {
|
||||
Args: func(cmd *cobra.Command, args []string) error {
|
||||
operatorInfo.Subcommands = append(operatorInfo.Subcommands, "operator")
|
||||
if len(args) < 2 {
|
||||
return errors.New("For the operator sub-command, you need to provide at least one additional sub-command. Refer to the examples above.")
|
||||
return errors.New("for the operator sub-command, you need to provide at least one additional sub-command. Refer to the examples above")
|
||||
}
|
||||
return nil
|
||||
},
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
if len(args) < 2 {
|
||||
return errors.New("For the operator sub-command, you need to provide at least one additional sub-command. Refer to the examples above.")
|
||||
return errors.New("for the operator sub-command, you need to provide at least one additional sub-command. Refer to the examples above")
|
||||
}
|
||||
if args[0] != scanSubCommand {
|
||||
return errors.New(fmt.Sprintf("For the operator sub-command, only %s is supported. Refer to the examples above.", scanSubCommand))
|
||||
return fmt.Errorf("for the operator sub-command, only %s is supported. Refer to the examples above", scanSubCommand)
|
||||
}
|
||||
return nil
|
||||
},
|
||||
|
||||
@@ -21,7 +21,7 @@ func TestGetOperatorCmd(t *testing.T) {
|
||||
assert.Equal(t, operatorExamples, cmd.Example)
|
||||
|
||||
err := cmd.Args(&cobra.Command{}, []string{})
|
||||
expectedErrorMessage := "For the operator sub-command, you need to provide at least one additional sub-command. Refer to the examples above."
|
||||
expectedErrorMessage := "for the operator sub-command, you need to provide at least one additional sub-command. Refer to the examples above"
|
||||
assert.Equal(t, expectedErrorMessage, err.Error())
|
||||
|
||||
err = cmd.Args(&cobra.Command{}, []string{"scan", "configurations"})
|
||||
@@ -37,6 +37,6 @@ func TestGetOperatorCmd(t *testing.T) {
|
||||
assert.Equal(t, expectedErrorMessage, err.Error())
|
||||
|
||||
err = cmd.RunE(&cobra.Command{}, []string{"random-subcommand", "random-config"})
|
||||
expectedErrorMessage = "For the operator sub-command, only " + scanSubCommand + " is supported. Refer to the examples above."
|
||||
expectedErrorMessage = "for the operator sub-command, only " + scanSubCommand + " is supported. Refer to the examples above"
|
||||
assert.Equal(t, expectedErrorMessage, err.Error())
|
||||
}
|
||||
|
||||
@@ -32,7 +32,7 @@ func getOperatorScanCmd(ks meta.IKubescape, operatorInfo cautils.OperatorInfo) *
|
||||
return errors.New("for operator scan sub command, you must pass at least 1 more sub commands, see above examples")
|
||||
}
|
||||
if (args[0] != vulnerabilitiesSubCommand) && (args[0] != configurationsSubCommand) {
|
||||
return errors.New(fmt.Sprintf("For the operator sub-command, only %s and %s are supported. Refer to the examples above.", vulnerabilitiesSubCommand, configurationsSubCommand))
|
||||
return fmt.Errorf("for the operator sub-command, only %s and %s are supported. Refer to the examples above", vulnerabilitiesSubCommand, configurationsSubCommand)
|
||||
}
|
||||
return nil
|
||||
},
|
||||
|
||||
@@ -41,6 +41,6 @@ func TestGetOperatorScanCmd(t *testing.T) {
|
||||
assert.Nil(t, err)
|
||||
|
||||
err = cmd.RunE(&cobra.Command{}, []string{"random"})
|
||||
expectedErrorMessage = "For the operator sub-command, only " + vulnerabilitiesSubCommand + " and " + configurationsSubCommand + " are supported. Refer to the examples above."
|
||||
expectedErrorMessage = "for the operator sub-command, only " + vulnerabilitiesSubCommand + " and " + configurationsSubCommand + " are supported. Refer to the examples above"
|
||||
assert.Equal(t, expectedErrorMessage, err.Error())
|
||||
}
|
||||
|
||||
@@ -73,7 +73,7 @@ We will demonstrate how to use the patch command with an example of [nginx](http
|
||||
sudo buildkitd
|
||||
```
|
||||
|
||||
2. In a seperate terminal, run the `kubescape patch` command:
|
||||
2. In a separate terminal, run the `kubescape patch` command:
|
||||
|
||||
```bash
|
||||
sudo kubescape patch --image docker.io/library/nginx:1.22
|
||||
|
||||
14
cmd/root.go
14
cmd/root.go
@@ -44,12 +44,12 @@ var ksExamples = fmt.Sprintf(`
|
||||
%[1]s config view
|
||||
`, cautils.ExecName())
|
||||
|
||||
func NewDefaultKubescapeCommand(ctx context.Context) *cobra.Command {
|
||||
func NewDefaultKubescapeCommand(ctx context.Context, ksVersion, ksCommit, ksDate string) *cobra.Command {
|
||||
ks := core.NewKubescape(ctx)
|
||||
return getRootCmd(ks)
|
||||
return getRootCmd(ks, ksVersion, ksCommit, ksDate)
|
||||
}
|
||||
|
||||
func getRootCmd(ks meta.IKubescape) *cobra.Command {
|
||||
func getRootCmd(ks meta.IKubescape, ksVersion, ksCommit, ksDate string) *cobra.Command {
|
||||
|
||||
rootCmd := &cobra.Command{
|
||||
Use: "kubescape",
|
||||
@@ -93,7 +93,7 @@ func getRootCmd(ks meta.IKubescape) *cobra.Command {
|
||||
rootCmd.AddCommand(download.GetDownloadCmd(ks))
|
||||
rootCmd.AddCommand(list.GetListCmd(ks))
|
||||
rootCmd.AddCommand(completion.GetCompletionCmd())
|
||||
rootCmd.AddCommand(version.GetVersionCmd(ks))
|
||||
rootCmd.AddCommand(version.GetVersionCmd(ks, ksVersion, ksCommit, ksDate))
|
||||
rootCmd.AddCommand(config.GetConfigCmd(ks))
|
||||
rootCmd.AddCommand(update.GetUpdateCmd(ks))
|
||||
rootCmd.AddCommand(fix.GetFixCmd(ks))
|
||||
@@ -116,7 +116,7 @@ func getRootCmd(ks meta.IKubescape) *cobra.Command {
|
||||
return rootCmd
|
||||
}
|
||||
|
||||
func Execute(ctx context.Context) error {
|
||||
ks := NewDefaultKubescapeCommand(ctx)
|
||||
func Execute(ctx context.Context, ksVersion, ksCommit, ksDate string) error {
|
||||
ks := NewDefaultKubescapeCommand(ctx, ksVersion, ksCommit, ksDate)
|
||||
return ks.Execute()
|
||||
}
|
||||
}
|
||||
@@ -9,16 +9,16 @@ import (
|
||||
|
||||
func TestNewDefaultKubescapeCommand(t *testing.T) {
|
||||
t.Run("NewDefaultKubescapeCommand", func(t *testing.T) {
|
||||
cmd := NewDefaultKubescapeCommand(context.Background())
|
||||
cmd := NewDefaultKubescapeCommand(context.Background(), "", "", "")
|
||||
assert.NotNil(t, cmd)
|
||||
})
|
||||
}
|
||||
|
||||
func TestExecute(t *testing.T) {
|
||||
t.Run("Execute", func(t *testing.T) {
|
||||
err := Execute(context.Background())
|
||||
err := Execute(context.Background(), "", "", "")
|
||||
if err != nil {
|
||||
assert.EqualErrorf(t, err, "unknown command \"^\\\\QTestExecute\\\\E$\" for \"kubescape\"", err.Error())
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -93,6 +93,7 @@ func GetScanCommand(ks meta.IKubescape) *cobra.Command {
|
||||
scanCmd.PersistentFlags().BoolVarP(&scanInfo.EnableRegoPrint, "enable-rego-prints", "", false, "Enable sending to rego prints to the logs (use with debug log level: -l debug)")
|
||||
scanCmd.PersistentFlags().BoolVarP(&scanInfo.ScanImages, "scan-images", "", false, "Scan resources images")
|
||||
scanCmd.PersistentFlags().BoolVarP(&scanInfo.UseDefaultMatchers, "use-default-matchers", "", true, "Use default matchers (true) or CPE matchers (false) for image scanning")
|
||||
scanCmd.PersistentFlags().StringSliceVar(&scanInfo.LabelsToCopy, "labels-to-copy", nil, "Labels to copy from workloads to scan reports for easy identification. e.g: --labels-to-copy=app,team,environment")
|
||||
|
||||
scanCmd.PersistentFlags().MarkDeprecated("fail-threshold", "use '--compliance-threshold' flag instead. Flag will be removed at 1.Dec.2023")
|
||||
scanCmd.PersistentFlags().MarkDeprecated("create-account", "Create account is no longer supported. In case of a missing Account ID and a configured backend server, a new account id will be generated automatically by Kubescape. Feel free to contact the Kubescape maintainers for more information.")
|
||||
|
||||
@@ -35,7 +35,7 @@ func Test_validateControlScanInfo(t *testing.T) {
|
||||
t.Run(
|
||||
tc.Description,
|
||||
func(t *testing.T) {
|
||||
var want error = tc.Want
|
||||
var want = tc.Want
|
||||
|
||||
got := validateControlScanInfo(tc.ScanInfo)
|
||||
|
||||
@@ -85,7 +85,7 @@ func Test_validateFrameworkScanInfo(t *testing.T) {
|
||||
t.Run(
|
||||
tc.Description,
|
||||
func(t *testing.T) {
|
||||
var want error = tc.Want
|
||||
var want = tc.Want
|
||||
|
||||
got := validateFrameworkScanInfo(tc.ScanInfo)
|
||||
|
||||
|
||||
@@ -95,7 +95,7 @@ func setWorkloadScanInfo(scanInfo *cautils.ScanInfo, kind string, name string) {
|
||||
scanInfo.ScanObject.SetKind(kind)
|
||||
scanInfo.ScanObject.SetName(name)
|
||||
|
||||
scanInfo.SetPolicyIdentifiers([]string{"workloadscan"}, v1.KindFramework)
|
||||
scanInfo.SetPolicyIdentifiers([]string{"workloadscan", "allcontrols"}, v1.KindFramework)
|
||||
|
||||
if scanInfo.FilePath != "" {
|
||||
scanInfo.InputPatterns = []string{scanInfo.FilePath}
|
||||
|
||||
@@ -28,6 +28,10 @@ func TestSetWorkloadScanInfo(t *testing.T) {
|
||||
Identifier: "workloadscan",
|
||||
Kind: v1.KindFramework,
|
||||
},
|
||||
{
|
||||
Identifier: "allcontrols",
|
||||
Kind: v1.KindFramework,
|
||||
},
|
||||
},
|
||||
ScanType: cautils.ScanTypeWorkload,
|
||||
ScanObject: &objectsenvelopes.ScanObject{
|
||||
@@ -59,12 +63,19 @@ func TestSetWorkloadScanInfo(t *testing.T) {
|
||||
t.Errorf("got: %v, want: %v", scanInfo.ScanObject.Metadata.Name, tc.want.ScanObject.Metadata.Name)
|
||||
}
|
||||
|
||||
if len(scanInfo.PolicyIdentifier) != 1 {
|
||||
t.Errorf("got: %v, want: %v", len(scanInfo.PolicyIdentifier), 1)
|
||||
if len(scanInfo.PolicyIdentifier) != len(tc.want.PolicyIdentifier) {
|
||||
t.Errorf("got: %v policy identifiers, want: %v", len(scanInfo.PolicyIdentifier), len(tc.want.PolicyIdentifier))
|
||||
}
|
||||
|
||||
if scanInfo.PolicyIdentifier[0].Identifier != tc.want.PolicyIdentifier[0].Identifier {
|
||||
t.Errorf("got: %v, want: %v", scanInfo.PolicyIdentifier[0].Identifier, tc.want.PolicyIdentifier[0].Identifier)
|
||||
for i, wantPolicy := range tc.want.PolicyIdentifier {
|
||||
if i < len(scanInfo.PolicyIdentifier) {
|
||||
if scanInfo.PolicyIdentifier[i].Identifier != wantPolicy.Identifier {
|
||||
t.Errorf("got: %v, want: %v", scanInfo.PolicyIdentifier[i].Identifier, wantPolicy.Identifier)
|
||||
}
|
||||
if scanInfo.PolicyIdentifier[i].Kind != wantPolicy.Kind {
|
||||
t.Errorf("got: %v, want: %v", scanInfo.PolicyIdentifier[i].Kind, wantPolicy.Kind)
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
)
|
||||
|
||||
@@ -50,7 +50,7 @@ func TestValidateImageScanInfo(t *testing.T) {
|
||||
t.Run(
|
||||
tc.Description,
|
||||
func(t *testing.T) {
|
||||
var want error = tc.Want
|
||||
var want = tc.Want
|
||||
|
||||
got := ValidateImageScanInfo(tc.ScanInfo)
|
||||
|
||||
|
||||
@@ -9,21 +9,29 @@ import (
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
func GetVersionCmd(ks meta.IKubescape) *cobra.Command {
|
||||
func GetVersionCmd(ks meta.IKubescape, version, commit, date string) *cobra.Command {
|
||||
versionCmd := &cobra.Command{
|
||||
Use: "version",
|
||||
Short: "Get current version",
|
||||
Long: ``,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
v := versioncheck.NewIVersionCheckHandler(ks.Context())
|
||||
_ = v.CheckLatestVersion(ks.Context(), versioncheck.NewVersionCheckRequest("", versioncheck.BuildNumber, "", "", "version", nil))
|
||||
_ = v.CheckLatestVersion(ks.Context(), versioncheck.NewVersionCheckRequest("", version, "", "", "version", nil))
|
||||
|
||||
_, _ = fmt.Fprintf(cmd.OutOrStdout(),
|
||||
"Your current version is: %s\n",
|
||||
versioncheck.BuildNumber,
|
||||
version,
|
||||
)
|
||||
_, _ = fmt.Fprintf(cmd.OutOrStdout(),
|
||||
"Build commit: %s\n",
|
||||
commit,
|
||||
)
|
||||
_, _ = fmt.Fprintf(cmd.OutOrStdout(),
|
||||
"Build date: %s\n",
|
||||
date,
|
||||
)
|
||||
return nil
|
||||
},
|
||||
}
|
||||
return versionCmd
|
||||
}
|
||||
}
|
||||
@@ -21,12 +21,12 @@ func TestGetVersionCmd(t *testing.T) {
|
||||
{
|
||||
name: "Undefined Build Number",
|
||||
buildNumber: "unknown",
|
||||
want: "Your current version is: unknown\n",
|
||||
want: "Your current version is: unknown\nBuild commit: \nBuild date: \n",
|
||||
},
|
||||
{
|
||||
name: "Defined Build Number: v3.0.1",
|
||||
buildNumber: "v3.0.1",
|
||||
want: "Your current version is: v3.0.1\n",
|
||||
want: "Your current version is: v3.0.1\nBuild commit: \nBuild date: \n",
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
@@ -34,7 +34,7 @@ func TestGetVersionCmd(t *testing.T) {
|
||||
versioncheck.BuildNumber = tt.buildNumber
|
||||
|
||||
ks := core.NewKubescape(context.TODO())
|
||||
if cmd := GetVersionCmd(ks); cmd != nil {
|
||||
if cmd := GetVersionCmd(ks, tt.buildNumber, "", ""); cmd != nil {
|
||||
buf := bytes.NewBufferString("")
|
||||
cmd.SetOut(buf)
|
||||
cmd.Execute()
|
||||
@@ -46,4 +46,4 @@ func TestGetVersionCmd(t *testing.T) {
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,21 +0,0 @@
|
||||
package cautils
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
"github.com/kubescape/backend/pkg/versioncheck"
|
||||
)
|
||||
|
||||
var BuildNumber string
|
||||
var Client string
|
||||
|
||||
func init() {
|
||||
if BuildNumber != "" {
|
||||
versioncheck.BuildNumber = BuildNumber
|
||||
} else {
|
||||
versioncheck.BuildNumber = os.Getenv("RELEASE")
|
||||
}
|
||||
if Client != "" {
|
||||
versioncheck.Client = Client
|
||||
}
|
||||
}
|
||||
@@ -521,9 +521,3 @@ func GetTenantConfig(accountID, accessKey, clusterName, customClusterName string
|
||||
}
|
||||
|
||||
// firstNonEmpty returns the first non-empty string
|
||||
func firstNonEmpty(s1, s2 string) string {
|
||||
if s1 != "" {
|
||||
return s1
|
||||
}
|
||||
return s2
|
||||
}
|
||||
|
||||
@@ -69,27 +69,42 @@ type OPASessionObj struct {
|
||||
TopWorkloadsByScore []reporthandling.IResource
|
||||
TemplateMapping map[string]MappingNodes // Map chart obj to template (only for rendering from path)
|
||||
TriggeredByCLI bool
|
||||
LabelsToCopy []string // Labels to copy from workloads to scan reports
|
||||
}
|
||||
|
||||
func NewOPASessionObj(ctx context.Context, frameworks []reporthandling.Framework, k8sResources K8SResources, scanInfo *ScanInfo) *OPASessionObj {
|
||||
clusterSize := estimateClusterSize(k8sResources)
|
||||
if clusterSize < 100 {
|
||||
clusterSize = 100
|
||||
}
|
||||
|
||||
return &OPASessionObj{
|
||||
Report: &reporthandlingv2.PostureReport{},
|
||||
Policies: frameworks,
|
||||
K8SResources: k8sResources,
|
||||
AllResources: make(map[string]workloadinterface.IMetadata),
|
||||
ResourcesResult: make(map[string]resourcesresults.Result),
|
||||
ResourcesPrioritized: make(map[string]prioritization.PrioritizedResource),
|
||||
InfoMap: make(map[string]apis.StatusInfo),
|
||||
ResourceToControlsMap: make(map[string][]string),
|
||||
ResourceSource: make(map[string]reporthandling.Source),
|
||||
AllResources: make(map[string]workloadinterface.IMetadata, clusterSize),
|
||||
ResourcesResult: make(map[string]resourcesresults.Result, clusterSize),
|
||||
ResourcesPrioritized: make(map[string]prioritization.PrioritizedResource, clusterSize/10),
|
||||
InfoMap: make(map[string]apis.StatusInfo, clusterSize/10),
|
||||
ResourceToControlsMap: make(map[string][]string, clusterSize/2),
|
||||
ResourceSource: make(map[string]reporthandling.Source, clusterSize),
|
||||
SessionID: scanInfo.ScanID,
|
||||
Metadata: scanInfoToScanMetadata(ctx, scanInfo),
|
||||
OmitRawResources: scanInfo.OmitRawResources,
|
||||
TriggeredByCLI: scanInfo.TriggeredByCLI,
|
||||
TemplateMapping: make(map[string]MappingNodes),
|
||||
TemplateMapping: make(map[string]MappingNodes, clusterSize/10),
|
||||
LabelsToCopy: scanInfo.LabelsToCopy,
|
||||
}
|
||||
}
|
||||
|
||||
func estimateClusterSize(k8sResources K8SResources) int {
|
||||
total := 0
|
||||
for _, resourceIDs := range k8sResources {
|
||||
total += len(resourceIDs)
|
||||
}
|
||||
return total
|
||||
}
|
||||
|
||||
// SetTopWorkloads sets the top workloads by score
|
||||
func (sessionObj *OPASessionObj) SetTopWorkloads() {
|
||||
count := 0
|
||||
|
||||
@@ -76,14 +76,18 @@ func ShouldSkipRule(control reporthandling.Control, rule reporthandling.PolicyRu
|
||||
// In local build (BuildNumber = ""):
|
||||
// returns true only if rule doesn't have the "until" attribute
|
||||
func isRuleKubescapeVersionCompatible(attributes map[string]interface{}, version string) bool {
|
||||
normalizedVersion := version
|
||||
if version != "" && !semver.IsValid(version) {
|
||||
normalizedVersion = "v" + version
|
||||
}
|
||||
|
||||
if from, ok := attributes["useFromKubescapeVersion"]; ok && from != nil {
|
||||
switch sfrom := from.(type) {
|
||||
case string:
|
||||
if version != "" && semver.Compare(version, sfrom) == -1 {
|
||||
if normalizedVersion != "" && semver.IsValid(normalizedVersion) && semver.Compare(normalizedVersion, sfrom) == -1 {
|
||||
return false
|
||||
}
|
||||
default:
|
||||
// Handle case where useFromKubescapeVersion is not a string
|
||||
return false
|
||||
}
|
||||
}
|
||||
@@ -91,11 +95,10 @@ func isRuleKubescapeVersionCompatible(attributes map[string]interface{}, version
|
||||
if until, ok := attributes["useUntilKubescapeVersion"]; ok && until != nil {
|
||||
switch suntil := until.(type) {
|
||||
case string:
|
||||
if version == "" || semver.Compare(version, suntil) >= 0 {
|
||||
if normalizedVersion == "" || (semver.IsValid(normalizedVersion) && semver.Compare(normalizedVersion, suntil) >= 0) {
|
||||
return false
|
||||
}
|
||||
default:
|
||||
// Handle case where useUntilKubescapeVersion is not a string
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
@@ -322,7 +322,7 @@ func glob(root, pattern string, onlyDirectories bool) ([]string, error) {
|
||||
return nil
|
||||
}
|
||||
fileFormat := getFileFormat(path)
|
||||
if !(fileFormat == JSON_FILE_FORMAT || fileFormat == YAML_FILE_FORMAT) {
|
||||
if fileFormat != JSON_FILE_FORMAT && fileFormat != YAML_FILE_FORMAT {
|
||||
return nil
|
||||
}
|
||||
if matched, err := filepath.Match(pattern, filepath.Base(path)); err != nil {
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
package getter
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
@@ -102,7 +102,7 @@ func TestHttpRespToString_NilResponse(t *testing.T) {
|
||||
|
||||
func TestHttpRespToString_ValidResponse(t *testing.T) {
|
||||
resp := &http.Response{
|
||||
Body: ioutil.NopCloser(strings.NewReader("test response")),
|
||||
Body: io.NopCloser(strings.NewReader("test response")),
|
||||
Status: "200 OK",
|
||||
StatusCode: 200,
|
||||
}
|
||||
@@ -114,7 +114,7 @@ func TestHttpRespToString_ValidResponse(t *testing.T) {
|
||||
// Returns an error with status and reason when unable to read response body.
|
||||
func TestHttpRespToString_ReadError(t *testing.T) {
|
||||
resp := &http.Response{
|
||||
Body: ioutil.NopCloser(strings.NewReader("test response")),
|
||||
Body: io.NopCloser(strings.NewReader("test response")),
|
||||
}
|
||||
resp.Body.Close()
|
||||
result, err := httpRespToString(resp)
|
||||
@@ -125,7 +125,7 @@ func TestHttpRespToString_ReadError(t *testing.T) {
|
||||
// Returns an error with status and reason when unable to read response body.
|
||||
func TestHttpRespToString_ErrorCodeLessThan200(t *testing.T) {
|
||||
resp := &http.Response{
|
||||
Body: ioutil.NopCloser(strings.NewReader("test response")),
|
||||
Body: io.NopCloser(strings.NewReader("test response")),
|
||||
StatusCode: 100,
|
||||
}
|
||||
resp.Body.Close()
|
||||
|
||||
@@ -5,7 +5,6 @@ import (
|
||||
"io"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"os"
|
||||
"strings"
|
||||
"sync"
|
||||
"testing"
|
||||
@@ -25,10 +24,6 @@ const (
|
||||
|
||||
var (
|
||||
globalMx sync.Mutex // a mutex to avoid data races on package globals while testing
|
||||
|
||||
testOptions = []v1.KSCloudOption{
|
||||
v1.WithTrace(os.Getenv("DEBUG_TEST") != ""),
|
||||
}
|
||||
)
|
||||
|
||||
func TestGlobalKSCloudAPIConnector(t *testing.T) {
|
||||
@@ -113,8 +108,6 @@ func mockAPIServer(t testing.TB) *testServer {
|
||||
defer func() { _ = r.Body.Close() }()
|
||||
_, _ = io.Copy(w, r.Body)
|
||||
|
||||
return
|
||||
|
||||
})
|
||||
|
||||
return server
|
||||
|
||||
@@ -226,7 +226,7 @@ func (lp *LoadPolicy) GetControlsInputs(_ /* clusterName */ string) (map[string]
|
||||
buf, err := os.ReadFile(filePath)
|
||||
if err != nil {
|
||||
formattedError := fmt.Errorf(
|
||||
`Error opening %s file, "controls-config" will be downloaded from ARMO management portal`,
|
||||
`error opening %s file, "controls-config" will be downloaded from ARMO management portal`,
|
||||
fileName,
|
||||
)
|
||||
|
||||
@@ -236,7 +236,7 @@ func (lp *LoadPolicy) GetControlsInputs(_ /* clusterName */ string) (map[string]
|
||||
controlInputs := make(map[string][]string, 100) // from armotypes.Settings.PostureControlInputs
|
||||
if err = json.Unmarshal(buf, &controlInputs); err != nil {
|
||||
formattedError := fmt.Errorf(
|
||||
`Error reading %s file, %v, "controls-config" will be downloaded from ARMO management portal`,
|
||||
`error reading %s file, %v, "controls-config" will be downloaded from ARMO management portal`,
|
||||
fileName, err,
|
||||
)
|
||||
|
||||
|
||||
57298
core/cautils/getter/testdata/policy.json
vendored
57298
core/cautils/getter/testdata/policy.json
vendored
File diff suppressed because one or more lines are too long
@@ -9,6 +9,7 @@ import (
|
||||
"github.com/kubescape/k8s-interface/workloadinterface"
|
||||
"github.com/kubescape/opa-utils/objectsenvelopes/localworkload"
|
||||
"sigs.k8s.io/kustomize/api/krusty"
|
||||
"sigs.k8s.io/kustomize/api/types"
|
||||
"sigs.k8s.io/kustomize/kyaml/filesys"
|
||||
)
|
||||
|
||||
@@ -75,7 +76,11 @@ func getKustomizeDirectoryName(path string) string {
|
||||
func (kd *KustomizeDirectory) GetWorkloads(kustomizeDirectoryPath string) (map[string][]workloadinterface.IMetadata, []error) {
|
||||
|
||||
fSys := filesys.MakeFsOnDisk()
|
||||
kustomizer := krusty.MakeKustomizer(krusty.MakeDefaultOptions())
|
||||
// Use LoadRestrictionsNone to allow loading resources from outside the kustomize directory.
|
||||
// This is necessary for overlays that reference base configurations in parent directories.
|
||||
opts := krusty.MakeDefaultOptions()
|
||||
opts.LoadRestrictions = types.LoadRestrictionsNone
|
||||
kustomizer := krusty.MakeKustomizer(opts)
|
||||
resmap, err := kustomizer.Run(fSys, kustomizeDirectoryPath)
|
||||
|
||||
if err != nil {
|
||||
|
||||
@@ -4,6 +4,8 @@ import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestGetKustomizeDirectoryName(t *testing.T) {
|
||||
@@ -52,7 +54,7 @@ func TestGetKustomizeDirectoryName(t *testing.T) {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
tempFile := filepath.Join(tt.args.path, "kustomization.yaml")
|
||||
if tt.createKustomization {
|
||||
_ = os.WriteFile(tempFile, []byte(""), 0644)
|
||||
_ = os.WriteFile(tempFile, []byte(""), 0600)
|
||||
}
|
||||
if got := getKustomizeDirectoryName(tt.args.path); got != tt.want {
|
||||
t.Errorf("GetKustomizeDirectoryName() = %v, want %v", got, tt.want)
|
||||
@@ -61,3 +63,83 @@ func TestGetKustomizeDirectoryName(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func kustomizeTestdataPath() string {
|
||||
o, _ := os.Getwd()
|
||||
return filepath.Join(o, "testdata", "kustomize")
|
||||
}
|
||||
|
||||
// TestKustomizeOverlayWithBase tests that kustomize overlays can properly load
|
||||
// resources from base directories. This is the main fix for issue #1617.
|
||||
func TestKustomizeOverlayWithBase(t *testing.T) {
|
||||
overlayPath := filepath.Join(kustomizeTestdataPath(), "overlays", "prod")
|
||||
|
||||
// Verify it's detected as a kustomize directory
|
||||
assert.True(t, isKustomizeDirectory(overlayPath), "overlay should be detected as kustomize directory")
|
||||
|
||||
// Create kustomize directory and get workloads
|
||||
kd := NewKustomizeDirectory(overlayPath)
|
||||
workloads, errs := kd.GetWorkloads(overlayPath)
|
||||
|
||||
// Should not have errors - this was failing before the fix because
|
||||
// overlays couldn't load resources from parent base directories
|
||||
assert.Empty(t, errs, "should not have errors loading overlay with base reference")
|
||||
|
||||
// Should have workloads from the rendered overlay
|
||||
assert.NotEmpty(t, workloads, "should have workloads from rendered kustomize overlay")
|
||||
|
||||
// The overlay should have produced exactly one deployment with the merged configuration
|
||||
var deploymentFound bool
|
||||
for _, wls := range workloads {
|
||||
for _, wl := range wls {
|
||||
if wl.GetKind() == "Deployment" && wl.GetName() == "test-app" {
|
||||
deploymentFound = true
|
||||
|
||||
// Verify the deployment has the resource limits from the base
|
||||
obj := wl.GetObject()
|
||||
spec, ok := obj["spec"].(map[string]interface{})
|
||||
assert.True(t, ok, "deployment should have spec")
|
||||
|
||||
template, ok := spec["template"].(map[string]interface{})
|
||||
assert.True(t, ok, "deployment should have template")
|
||||
|
||||
templateSpec, ok := template["spec"].(map[string]interface{})
|
||||
assert.True(t, ok, "template should have spec")
|
||||
|
||||
containers, ok := templateSpec["containers"].([]interface{})
|
||||
assert.True(t, ok, "template spec should have containers")
|
||||
assert.NotEmpty(t, containers, "should have at least one container")
|
||||
|
||||
container, ok := containers[0].(map[string]interface{})
|
||||
assert.True(t, ok, "container should be a map")
|
||||
|
||||
resources, ok := container["resources"].(map[string]interface{})
|
||||
assert.True(t, ok, "container should have resources (from base)")
|
||||
|
||||
limits, ok := resources["limits"].(map[string]interface{})
|
||||
assert.True(t, ok, "resources should have limits")
|
||||
assert.Equal(t, "500m", limits["cpu"], "cpu limit should be from base")
|
||||
assert.Equal(t, "256Mi", limits["memory"], "memory limit should be from base")
|
||||
|
||||
// Verify overlay modifications were applied
|
||||
replicas, ok := spec["replicas"].(int)
|
||||
assert.True(t, ok, "replicas should be an int")
|
||||
assert.Equal(t, 3, replicas, "replicas should be modified by overlay")
|
||||
}
|
||||
}
|
||||
}
|
||||
assert.True(t, deploymentFound, "deployment should be found in rendered output")
|
||||
}
|
||||
|
||||
// TestKustomizeBaseDirectory tests that base directories work on their own
|
||||
func TestKustomizeBaseDirectory(t *testing.T) {
|
||||
basePath := filepath.Join(kustomizeTestdataPath(), "base")
|
||||
|
||||
assert.True(t, isKustomizeDirectory(basePath), "base should be detected as kustomize directory")
|
||||
|
||||
kd := NewKustomizeDirectory(basePath)
|
||||
workloads, errs := kd.GetWorkloads(basePath)
|
||||
|
||||
assert.Empty(t, errs, "should not have errors loading base directory")
|
||||
assert.NotEmpty(t, workloads, "should have workloads from base directory")
|
||||
}
|
||||
|
||||
@@ -81,7 +81,7 @@ func Test_GetRequestPayload(t *testing.T) {
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
result := tc.OperatorScanInfo.GetRequestPayload()
|
||||
result := tc.GetRequestPayload()
|
||||
assert.Equal(t, tc.result, result)
|
||||
})
|
||||
}
|
||||
@@ -136,8 +136,8 @@ func Test_ValidatePayload(t *testing.T) {
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
payload := tc.OperatorScanInfo.GetRequestPayload()
|
||||
result := tc.OperatorScanInfo.ValidatePayload(payload)
|
||||
payload := tc.GetRequestPayload()
|
||||
result := tc.ValidatePayload(payload)
|
||||
assert.Equal(t, tc.result, result)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -170,7 +170,6 @@ func getInfoFromOne(output string, lastNumber int, isMapType bool) (value string
|
||||
if isMapType {
|
||||
lineNumber = lineNumber - 1
|
||||
}
|
||||
lastNumber = lineNumber
|
||||
// save to structure
|
||||
} else {
|
||||
lineNumber = lastNumber
|
||||
|
||||
@@ -78,7 +78,7 @@ func (p *portForward) StopPortForwarder() {
|
||||
|
||||
func (p *portForward) StartPortForwarder() error {
|
||||
go func() {
|
||||
p.PortForwarder.ForwardPorts()
|
||||
p.ForwardPorts()
|
||||
}()
|
||||
p.waitForPortForwardReadiness()
|
||||
|
||||
|
||||
@@ -64,7 +64,7 @@ func Test_CreatePortForwarder(t *testing.T) {
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
k8sClient := k8sinterface.KubernetesApi{
|
||||
KubernetesClient: fake.NewSimpleClientset(),
|
||||
KubernetesClient: fake.NewClientset(),
|
||||
K8SConfig: &rest.Config{
|
||||
Host: "any",
|
||||
},
|
||||
@@ -105,7 +105,7 @@ func Test_GetPortForwardLocalhost(t *testing.T) {
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
k8sClient := k8sinterface.KubernetesApi{
|
||||
KubernetesClient: fake.NewSimpleClientset(),
|
||||
KubernetesClient: fake.NewClientset(),
|
||||
K8SConfig: &rest.Config{
|
||||
Host: "any",
|
||||
},
|
||||
|
||||
@@ -140,6 +140,7 @@ type ScanInfo struct {
|
||||
UseDefaultMatchers bool
|
||||
ChartPath string
|
||||
FilePath string
|
||||
LabelsToCopy []string // Labels to copy from workloads to scan reports
|
||||
scanningContext *ScanningContext
|
||||
cleanups []func()
|
||||
}
|
||||
|
||||
28
core/cautils/testdata/kustomize/base/deployment.yaml
vendored
Normal file
28
core/cautils/testdata/kustomize/base/deployment.yaml
vendored
Normal file
@@ -0,0 +1,28 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: test-app
|
||||
labels:
|
||||
app: test-app
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: test-app
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: test-app
|
||||
spec:
|
||||
containers:
|
||||
- name: test-container
|
||||
image: nginx:1.19
|
||||
resources:
|
||||
limits:
|
||||
cpu: "500m"
|
||||
memory: "256Mi"
|
||||
requests:
|
||||
cpu: "100m"
|
||||
memory: "128Mi"
|
||||
ports:
|
||||
- containerPort: 80
|
||||
5
core/cautils/testdata/kustomize/base/kustomization.yaml
vendored
Normal file
5
core/cautils/testdata/kustomize/base/kustomization.yaml
vendored
Normal file
@@ -0,0 +1,5 @@
|
||||
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||
kind: Kustomization
|
||||
|
||||
resources:
|
||||
- deployment.yaml
|
||||
13
core/cautils/testdata/kustomize/overlays/prod/kustomization.yaml
vendored
Normal file
13
core/cautils/testdata/kustomize/overlays/prod/kustomization.yaml
vendored
Normal file
@@ -0,0 +1,13 @@
|
||||
apiVersion: kustomize.config.k8s.io/v1beta1
|
||||
kind: Kustomization
|
||||
|
||||
resources:
|
||||
- ../../base
|
||||
|
||||
images:
|
||||
- name: nginx
|
||||
newTag: "1.21"
|
||||
|
||||
replicas:
|
||||
- name: test-app
|
||||
count: 3
|
||||
@@ -36,7 +36,7 @@ func getOperatorPod(k8sClient *k8sinterface.KubernetesApi, ns string) (*v1.Pod,
|
||||
return nil, err
|
||||
}
|
||||
if len(pods.Items) != 1 {
|
||||
return nil, errors.New("Could not find the Kubescape Operator chart, please validate that the Kubescape Operator helm chart is installed and running -> https://github.com/kubescape/helm-charts")
|
||||
return nil, errors.New("could not find the Kubescape Operator chart, please validate that the Kubescape Operator helm chart is installed and running -> https://github.com/kubescape/helm-charts")
|
||||
}
|
||||
|
||||
return &pods.Items[0], nil
|
||||
@@ -90,8 +90,8 @@ func (a *OperatorAdapter) httpPostOperatorScanRequest(body apis.Commands) (strin
|
||||
}
|
||||
|
||||
func (a *OperatorAdapter) OperatorScan() (string, error) {
|
||||
payload := a.OperatorScanInfo.GetRequestPayload()
|
||||
if err := a.OperatorScanInfo.ValidatePayload(payload); err != nil {
|
||||
payload := a.GetRequestPayload()
|
||||
if err := a.ValidatePayload(payload); err != nil {
|
||||
return "", err
|
||||
}
|
||||
res, err := a.httpPostOperatorScanRequest(*payload)
|
||||
|
||||
@@ -23,13 +23,13 @@ func Test_getOperatorPod(t *testing.T) {
|
||||
name: "test error no operator exist",
|
||||
createOperatorPod: false,
|
||||
createAnotherOperatorPodWithSameLabel: false,
|
||||
expectedError: fmt.Errorf("Could not find the Kubescape Operator chart, please validate that the Kubescape Operator helm chart is installed and running -> https://github.com/kubescape/helm-charts"),
|
||||
expectedError: fmt.Errorf("could not find the Kubescape Operator chart, please validate that the Kubescape Operator helm chart is installed and running -> https://github.com/kubescape/helm-charts"),
|
||||
},
|
||||
{
|
||||
name: "test error several operators exist",
|
||||
createOperatorPod: true,
|
||||
createAnotherOperatorPodWithSameLabel: true,
|
||||
expectedError: fmt.Errorf("Could not find the Kubescape Operator chart, please validate that the Kubescape Operator helm chart is installed and running -> https://github.com/kubescape/helm-charts"),
|
||||
expectedError: fmt.Errorf("could not find the Kubescape Operator chart, please validate that the Kubescape Operator helm chart is installed and running -> https://github.com/kubescape/helm-charts"),
|
||||
},
|
||||
{
|
||||
name: "test no error",
|
||||
@@ -42,7 +42,7 @@ func Test_getOperatorPod(t *testing.T) {
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
k8sClient := k8sinterface.KubernetesApi{
|
||||
KubernetesClient: fake.NewSimpleClientset(),
|
||||
KubernetesClient: fake.NewClientset(),
|
||||
Context: context.TODO(),
|
||||
}
|
||||
|
||||
|
||||
@@ -48,7 +48,7 @@ func (ks *Kubescape) Fix(fixInfo *metav1.FixInfo) error {
|
||||
for _, err := range errors {
|
||||
logger.L().Ctx(ks.Context()).Warning(err.Error())
|
||||
}
|
||||
return fmt.Errorf("Failed to fix some resources, check the logs for more details")
|
||||
return fmt.Errorf("failed to fix some resources, check the logs for more details")
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -64,9 +64,10 @@ func userConfirmed() bool {
|
||||
}
|
||||
|
||||
input = strings.ToLower(input)
|
||||
if input == "y" || input == "yes" {
|
||||
switch input {
|
||||
case "y", "yes":
|
||||
return true
|
||||
} else if input == "n" || input == "no" {
|
||||
case "n", "no":
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
@@ -82,7 +82,7 @@ func getReporter(ctx context.Context, tenantConfig cautils.ITenantConfig, report
|
||||
}
|
||||
|
||||
func getResourceHandler(ctx context.Context, scanInfo *cautils.ScanInfo, tenantConfig cautils.ITenantConfig, k8s *k8sinterface.KubernetesApi, hostSensorHandler hostsensorutils.IHostSensor) resourcehandler.IResourceHandler {
|
||||
ctx, span := otel.Tracer("").Start(ctx, "getResourceHandler")
|
||||
_, span := otel.Tracer("").Start(ctx, "getResourceHandler")
|
||||
defer span.End()
|
||||
|
||||
if len(scanInfo.InputPatterns) > 0 || k8s == nil {
|
||||
|
||||
@@ -48,7 +48,7 @@ func (ks *Kubescape) List(listPolicies *metav1.ListPolicies) error {
|
||||
if listFormatFunction, ok := listFormatFunc[listPolicies.Format]; ok {
|
||||
listFormatFunction(ks.Context(), listPolicies.Target, policies)
|
||||
} else {
|
||||
return fmt.Errorf("Invalid format \"%s\", Supported formats: 'pretty-print'/'json' ", listPolicies.Format)
|
||||
return fmt.Errorf("invalid format \"%s\", supported formats: 'pretty-print'/'json' ", listPolicies.Format)
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -168,7 +168,7 @@ func generateControlRows(policies []string) []table.Row {
|
||||
|
||||
docs := cautils.GetControlLink(id)
|
||||
|
||||
currentRow := table.Row{id, control, docs, strings.Replace(framework, " ", "\n", -1)}
|
||||
currentRow := table.Row{id, control, docs, strings.ReplaceAll(framework, " ", "\n")}
|
||||
|
||||
rows = append(rows, currentRow)
|
||||
}
|
||||
@@ -188,7 +188,7 @@ func generatePolicyRows(policies []string) []table.Row {
|
||||
func shortFormatControlRows(controlRows []table.Row) []table.Row {
|
||||
rows := make([]table.Row, 0, len(controlRows))
|
||||
for _, controlRow := range controlRows {
|
||||
rows = append(rows, table.Row{fmt.Sprintf("Control ID"+strings.Repeat(" ", 3)+": %+v\nControl Name"+strings.Repeat(" ", 1)+": %+v\nDocs"+strings.Repeat(" ", 9)+": %+v\nFrameworks"+strings.Repeat(" ", 3)+": %+v", controlRow[0], controlRow[1], controlRow[2], strings.Replace(controlRow[3].(string), "\n", " ", -1))})
|
||||
rows = append(rows, table.Row{fmt.Sprintf("Control ID"+strings.Repeat(" ", 3)+": %+v\nControl Name"+strings.Repeat(" ", 1)+": %+v\nDocs"+strings.Repeat(" ", 9)+": %+v\nFrameworks"+strings.Repeat(" ", 3)+": %+v", controlRow[0], controlRow[1], controlRow[2], strings.ReplaceAll(controlRow[3].(string), "\n", " "))})
|
||||
}
|
||||
return rows
|
||||
}
|
||||
|
||||
@@ -313,7 +313,7 @@ func patchWithContext(ctx context.Context, buildkitAddr, image, reportFile, patc
|
||||
return res, nil
|
||||
}, buildChannel)
|
||||
|
||||
return nil
|
||||
return err
|
||||
}
|
||||
|
||||
func getOSType(ctx context.Context, osreleaseBytes []byte) (string, error) {
|
||||
|
||||
@@ -3,8 +3,8 @@ package core
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"slices"
|
||||
|
||||
mapset "github.com/deckarep/golang-set/v2"
|
||||
"github.com/kubescape/backend/pkg/versioncheck"
|
||||
"github.com/kubescape/go-logger"
|
||||
"github.com/kubescape/go-logger/helpers"
|
||||
@@ -145,14 +145,14 @@ func (ks *Kubescape) Scan(scanInfo *cautils.ScanInfo) (*resultshandling.ResultsH
|
||||
}
|
||||
|
||||
// set policy getter only after setting the customerGUID
|
||||
scanInfo.Getters.PolicyGetter = getPolicyGetter(ctxInit, scanInfo.UseFrom, interfaces.tenantConfig.GetAccountID(), scanInfo.FrameworkScan, downloadReleasedPolicy)
|
||||
scanInfo.Getters.ControlsInputsGetter = getConfigInputsGetter(ctxInit, scanInfo.ControlsInputs, interfaces.tenantConfig.GetAccountID(), downloadReleasedPolicy)
|
||||
scanInfo.Getters.ExceptionsGetter = getExceptionsGetter(ctxInit, scanInfo.UseExceptions, interfaces.tenantConfig.GetAccountID(), downloadReleasedPolicy)
|
||||
scanInfo.Getters.AttackTracksGetter = getAttackTracksGetter(ctxInit, scanInfo.AttackTracks, interfaces.tenantConfig.GetAccountID(), downloadReleasedPolicy)
|
||||
scanInfo.PolicyGetter = getPolicyGetter(ctxInit, scanInfo.UseFrom, interfaces.tenantConfig.GetAccountID(), scanInfo.FrameworkScan, downloadReleasedPolicy)
|
||||
scanInfo.ControlsInputsGetter = getConfigInputsGetter(ctxInit, scanInfo.ControlsInputs, interfaces.tenantConfig.GetAccountID(), downloadReleasedPolicy)
|
||||
scanInfo.ExceptionsGetter = getExceptionsGetter(ctxInit, scanInfo.UseExceptions, interfaces.tenantConfig.GetAccountID(), downloadReleasedPolicy)
|
||||
scanInfo.AttackTracksGetter = getAttackTracksGetter(ctxInit, scanInfo.AttackTracks, interfaces.tenantConfig.GetAccountID(), downloadReleasedPolicy)
|
||||
|
||||
// TODO - list supported frameworks/controls
|
||||
if scanInfo.ScanAll {
|
||||
scanInfo.SetPolicyIdentifiers(listFrameworksNames(scanInfo.Getters.PolicyGetter), apisv1.KindFramework)
|
||||
scanInfo.SetPolicyIdentifiers(listFrameworksNames(scanInfo.PolicyGetter), apisv1.KindFramework)
|
||||
}
|
||||
|
||||
// remove host scanner components
|
||||
@@ -200,7 +200,7 @@ func (ks *Kubescape) Scan(scanInfo *cautils.ScanInfo) (*resultshandling.ResultsH
|
||||
// ======================== prioritization ===================
|
||||
if scanInfo.PrintAttackTree || isPrioritizationScanType(scanInfo.ScanType) {
|
||||
_, spanPrioritization := otel.Tracer("").Start(ctxOpa, "prioritization")
|
||||
if priotizationHandler, err := resourcesprioritization.NewResourcesPrioritizationHandler(ctxOpa, scanInfo.Getters.AttackTracksGetter, scanInfo.PrintAttackTree); err != nil {
|
||||
if priotizationHandler, err := resourcesprioritization.NewResourcesPrioritizationHandler(ctxOpa, scanInfo.AttackTracksGetter, scanInfo.PrintAttackTree); err != nil {
|
||||
logger.L().Ctx(ks.Context()).Warning("failed to get attack tracks, this may affect the scanning results", helpers.Error(err))
|
||||
} else if err := priotizationHandler.PrioritizeResources(scanData); err != nil {
|
||||
return resultsHandling, fmt.Errorf("%w", err)
|
||||
@@ -225,7 +225,7 @@ func (ks *Kubescape) Scan(scanInfo *cautils.ScanInfo) (*resultshandling.ResultsH
|
||||
}
|
||||
|
||||
func scanImages(scanType cautils.ScanTypes, scanData *cautils.OPASessionObj, ctx context.Context, resultsHandling *resultshandling.ResultsHandler, scanInfo *cautils.ScanInfo) {
|
||||
var imagesToScan []string
|
||||
imagesToScan := mapset.NewSet[string]()
|
||||
|
||||
if scanType == cautils.ScanTypeWorkload {
|
||||
containers, err := workloadinterface.NewWorkloadObj(scanData.SingleResourceScan.GetObject()).GetContainers()
|
||||
@@ -234,9 +234,7 @@ func scanImages(scanType cautils.ScanTypes, scanData *cautils.OPASessionObj, ctx
|
||||
return
|
||||
}
|
||||
for _, container := range containers {
|
||||
if !slices.Contains(imagesToScan, container.Image) {
|
||||
imagesToScan = append(imagesToScan, container.Image)
|
||||
}
|
||||
imagesToScan.Add(container.Image)
|
||||
}
|
||||
} else {
|
||||
for _, workload := range scanData.AllResources {
|
||||
@@ -246,9 +244,7 @@ func scanImages(scanType cautils.ScanTypes, scanData *cautils.OPASessionObj, ctx
|
||||
continue
|
||||
}
|
||||
for _, container := range containers {
|
||||
if !slices.Contains(imagesToScan, container.Image) {
|
||||
imagesToScan = append(imagesToScan, container.Image)
|
||||
}
|
||||
imagesToScan.Add(container.Image)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -261,7 +257,7 @@ func scanImages(scanType cautils.ScanTypes, scanData *cautils.OPASessionObj, ctx
|
||||
}
|
||||
defer svc.Close()
|
||||
|
||||
for _, img := range imagesToScan {
|
||||
for img := range imagesToScan.Iter() {
|
||||
logger.L().Start("Scanning", helpers.String("image", img))
|
||||
if err := scanSingleImage(ctx, img, svc, resultsHandling); err != nil {
|
||||
logger.L().StopError("failed to scan", helpers.String("image", img), helpers.Error(err))
|
||||
|
||||
@@ -46,8 +46,6 @@ var hash = []rune("abcdef0123456789")
|
||||
var nums = []rune("0123456789")
|
||||
|
||||
func randSeq(n int, bank []rune) string {
|
||||
rand.Seed(time.Now().UnixNano())
|
||||
|
||||
b := make([]rune, n)
|
||||
for i := range b {
|
||||
b[i] = bank[rand.Intn(len(bank))] //nolint:gosec
|
||||
|
||||
@@ -88,6 +88,6 @@ type PkgFiles []PackageFile
|
||||
|
||||
func (v *ScanResultReport) AsFNVHash() string {
|
||||
hasher := fnv.New64a()
|
||||
hasher.Write([]byte(fmt.Sprintf("%v", *v)))
|
||||
fmt.Fprintf(hasher, "%v", *v)
|
||||
return fmt.Sprintf("%v", hasher.Sum64())
|
||||
}
|
||||
|
||||
@@ -209,7 +209,7 @@ func (h *FixHandler) ApplyChanges(ctx context.Context, resourcesToFix []Resource
|
||||
fixedYamlString, err := ApplyFixToContent(ctx, fileAsString, yamlExpression)
|
||||
|
||||
if err != nil {
|
||||
errors = append(errors, fmt.Errorf("Failed to fix file %s: %w ", filepath, err))
|
||||
errors = append(errors, fmt.Errorf("failed to fix file %s: %w ", filepath, err))
|
||||
continue
|
||||
} else {
|
||||
updatedFiles[filepath] = true
|
||||
@@ -344,7 +344,7 @@ func GetFileString(filepath string) (string, error) {
|
||||
bytes, err := os.ReadFile(filepath)
|
||||
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("Error reading file %s", filepath)
|
||||
return "", fmt.Errorf("error reading file %s", filepath)
|
||||
}
|
||||
|
||||
return string(bytes), nil
|
||||
@@ -354,7 +354,7 @@ func writeFixesToFile(filepath, content string) error {
|
||||
err := os.WriteFile(filepath, []byte(content), 0644) //nolint:gosec
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error writing fixes to file: %w", err)
|
||||
return fmt.Errorf("error writing fixes to file: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
@@ -26,7 +26,7 @@ func decodeDocumentRoots(yamlAsString string) ([]yaml.Node, error) {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Cannot Decode File as YAML")
|
||||
return nil, fmt.Errorf("cannot decode file as YAML")
|
||||
|
||||
}
|
||||
|
||||
@@ -55,7 +55,7 @@ func getFixedNodes(ctx context.Context, yamlAsString, yamlExpression string) ([]
|
||||
fixedCandidateNodes, err := allAtOnceEvaluator.EvaluateCandidateNodes(yamlExpression, allDocuments)
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Error fixing YAML, %w", err)
|
||||
return nil, fmt.Errorf("error fixing YAML, %w", err)
|
||||
}
|
||||
|
||||
fixedNodes := make([]yaml.Node, 0)
|
||||
|
||||
@@ -86,7 +86,7 @@ func adjustFixedListLines(originalList, fixedList *[]nodeInfo) {
|
||||
func enocodeIntoYaml(parentNode *yaml.Node, nodeList *[]nodeInfo, tracker int) (string, error) {
|
||||
|
||||
if tracker < 0 || tracker >= len(*nodeList) {
|
||||
return "", fmt.Errorf("Index out of range for nodeList: tracker=%d, length=%d", tracker, len(*nodeList))
|
||||
return "", fmt.Errorf("index out of range for nodeList: tracker=%d, length=%d", tracker, len(*nodeList))
|
||||
}
|
||||
|
||||
content := make([]*yaml.Node, 0)
|
||||
@@ -112,11 +112,11 @@ func enocodeIntoYaml(parentNode *yaml.Node, nodeList *[]nodeInfo, tracker int) (
|
||||
|
||||
errorEncoding := encoder.Encode(parentForContent)
|
||||
if errorEncoding != nil {
|
||||
return "", fmt.Errorf("Error debugging node, %v", errorEncoding.Error())
|
||||
return "", fmt.Errorf("error debugging node, %v", errorEncoding.Error())
|
||||
}
|
||||
errorClosingEncoder := encoder.Close()
|
||||
if errorClosingEncoder != nil {
|
||||
return "", fmt.Errorf("Error closing encoder: %v", errorClosingEncoder.Error())
|
||||
return "", fmt.Errorf("error closing encoder: %v", errorClosingEncoder.Error())
|
||||
}
|
||||
return fmt.Sprintf(`%v`, buf.String()), nil
|
||||
}
|
||||
@@ -216,7 +216,7 @@ func getLastLineOfResource(linesSlice *[]string, currentLine int) (int, error) {
|
||||
}
|
||||
}
|
||||
|
||||
return 0, fmt.Errorf("Provided line is greater than the length of YAML file")
|
||||
return 0, fmt.Errorf("provided line is greater than the length of YAML file")
|
||||
}
|
||||
|
||||
func getNodeLine(nodeList *[]nodeInfo, tracker int) int {
|
||||
@@ -300,7 +300,7 @@ func isEmptyLineOrComment(lineContent string) bool {
|
||||
func readDocuments(ctx context.Context, reader io.Reader, decoder yqlib.Decoder) (*list.List, error) {
|
||||
err := decoder.Init(reader)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Error Initializing the decoder, %w", err)
|
||||
return nil, fmt.Errorf("error initializing the decoder, %w", err)
|
||||
}
|
||||
inputList := list.New()
|
||||
|
||||
@@ -316,7 +316,7 @@ func readDocuments(ctx context.Context, reader io.Reader, decoder yqlib.Decoder)
|
||||
}
|
||||
return inputList, nil
|
||||
} else if errorReading != nil {
|
||||
return nil, fmt.Errorf("Error Decoding YAML file, %w", errorReading)
|
||||
return nil, fmt.Errorf("error decoding yaml file, %w", errorReading)
|
||||
}
|
||||
|
||||
candidateNode.Document = currentIndex
|
||||
|
||||
@@ -434,9 +434,9 @@ func TestRemoveOutOfRangeLines(t *testing.T) {
|
||||
func TestShouldCalculateTotalNumberOfChildrenAndAddToCurrentTracker(t *testing.T) {
|
||||
node := &yaml.Node{
|
||||
Content: []*yaml.Node{
|
||||
&yaml.Node{},
|
||||
&yaml.Node{},
|
||||
&yaml.Node{},
|
||||
{},
|
||||
{},
|
||||
{},
|
||||
},
|
||||
}
|
||||
currentTracker := 5
|
||||
|
||||
@@ -1,74 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
labels:
|
||||
app: kubescape-host-scanner
|
||||
k8s-app: kubescape-host-scanner
|
||||
kubernetes.io/metadata.name: kubescape-host-scanner
|
||||
tier: kubescape-host-scanner-control-plane
|
||||
name: kubescape
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: host-scanner
|
||||
namespace: kubescape
|
||||
labels:
|
||||
app: host-scanner
|
||||
k8s-app: kubescape-host-scanner
|
||||
otel: enabled
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
name: host-scanner
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
name: host-scanner
|
||||
spec:
|
||||
tolerations:
|
||||
# this toleration is to have the DaemonDet runnable on all nodes (including masters)
|
||||
# remove it if your masters can't run pods
|
||||
- operator: Exists
|
||||
containers:
|
||||
- name: host-sensor
|
||||
image: quay.io/kubescape/host-scanner:v1.0.61
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: true
|
||||
privileged: true
|
||||
readOnlyRootFilesystem: true
|
||||
ports:
|
||||
- name: scanner # Do not change port name
|
||||
containerPort: 7888
|
||||
protocol: TCP
|
||||
resources:
|
||||
limits:
|
||||
cpu: 0.1m
|
||||
memory: 200Mi
|
||||
requests:
|
||||
cpu: 1m
|
||||
memory: 200Mi
|
||||
volumeMounts:
|
||||
- mountPath: /host_fs
|
||||
name: host-filesystem
|
||||
startupProbe:
|
||||
httpGet:
|
||||
path: /readyz
|
||||
port: 7888
|
||||
failureThreshold: 30
|
||||
periodSeconds: 1
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: 7888
|
||||
periodSeconds: 10
|
||||
terminationGracePeriodSeconds: 120
|
||||
dnsPolicy: ClusterFirstWithHostNet
|
||||
automountServiceAccountToken: false
|
||||
volumes:
|
||||
- hostPath:
|
||||
path: /
|
||||
type: Directory
|
||||
name: host-filesystem
|
||||
hostPID: true
|
||||
hostIPC: true
|
||||
@@ -18,6 +18,5 @@ func TestHostSensorHandlerMock(t *testing.T) {
|
||||
require.Nil(t, status)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Empty(t, h.GetNamespace())
|
||||
require.NoError(t, h.TearDown())
|
||||
}
|
||||
|
||||
235
core/pkg/hostsensorutils/hostsensorcollectcrds.go
Normal file
235
core/pkg/hostsensorutils/hostsensorcollectcrds.go
Normal file
@@ -0,0 +1,235 @@
|
||||
package hostsensorutils
|
||||
|
||||
import (
|
||||
"context"
|
||||
stdjson "encoding/json"
|
||||
"fmt"
|
||||
"reflect"
|
||||
|
||||
"github.com/kubescape/go-logger"
|
||||
"github.com/kubescape/go-logger/helpers"
|
||||
k8shostsensor "github.com/kubescape/k8s-interface/hostsensor"
|
||||
"github.com/kubescape/k8s-interface/k8sinterface"
|
||||
"github.com/kubescape/opa-utils/objectsenvelopes/hostsensor"
|
||||
"github.com/kubescape/opa-utils/reporthandling/apis"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
)
|
||||
|
||||
// getCRDResources retrieves resources from CRDs and converts them to HostSensorDataEnvelope format
|
||||
func (hsh *HostSensorHandler) getCRDResources(ctx context.Context, resourceType k8shostsensor.HostSensorResource) ([]hostsensor.HostSensorDataEnvelope, error) {
|
||||
pluralName := k8shostsensor.MapResourceToPlural(resourceType)
|
||||
if pluralName == "" {
|
||||
return nil, fmt.Errorf("unsupported resource type: %s", resourceType)
|
||||
}
|
||||
|
||||
// List CRD resources
|
||||
items, err := hsh.listCRDResources(ctx, pluralName, resourceType.String())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Convert to HostSensorDataEnvelope format
|
||||
result := make([]hostsensor.HostSensorDataEnvelope, 0, len(items))
|
||||
for _, item := range items {
|
||||
envelope, err := hsh.convertCRDToEnvelope(item, resourceType)
|
||||
if err != nil {
|
||||
logger.L().Warning("Failed to convert CRD to envelope",
|
||||
helpers.String("kind", resourceType.String()),
|
||||
helpers.String("name", item.GetName()),
|
||||
helpers.Error(err))
|
||||
continue
|
||||
}
|
||||
result = append(result, envelope)
|
||||
}
|
||||
|
||||
logger.L().Debug("Retrieved resources from CRDs",
|
||||
helpers.String("kind", resourceType.String()),
|
||||
helpers.Int("count", len(result)))
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// convertCRDToEnvelope converts a CRD unstructured object to HostSensorDataEnvelope
|
||||
func (hsh *HostSensorHandler) convertCRDToEnvelope(item unstructured.Unstructured, resourceType k8shostsensor.HostSensorResource) (hostsensor.HostSensorDataEnvelope, error) {
|
||||
envelope := hostsensor.HostSensorDataEnvelope{}
|
||||
|
||||
// Set API version and kind
|
||||
envelope.SetApiVersion(k8sinterface.JoinGroupVersion(hostsensor.GroupHostSensor, hostsensor.Version))
|
||||
envelope.SetKind(resourceType.String())
|
||||
|
||||
// Set name (node name)
|
||||
nodeName := item.GetName()
|
||||
envelope.SetName(nodeName)
|
||||
|
||||
// Extract content from spec.content
|
||||
content, found, err := unstructured.NestedString(item.Object, "spec", "content")
|
||||
if err != nil {
|
||||
return envelope, fmt.Errorf("failed to extract spec.content: %w", err)
|
||||
}
|
||||
if !found {
|
||||
// fallback to "spec" itself
|
||||
contentI, found, err := unstructured.NestedFieldNoCopy(item.Object, "spec")
|
||||
if err != nil {
|
||||
return envelope, fmt.Errorf("failed to extract spec: %w", err)
|
||||
}
|
||||
if !found {
|
||||
return envelope, fmt.Errorf("spec not found in CRD")
|
||||
}
|
||||
contentBytes, err := stdjson.Marshal(contentI)
|
||||
if err != nil {
|
||||
return envelope, fmt.Errorf("failed to marshal spec: %w", err)
|
||||
}
|
||||
content = string(contentBytes)
|
||||
}
|
||||
|
||||
// Set data as raw bytes
|
||||
envelope.SetData([]byte(content))
|
||||
|
||||
return envelope, nil
|
||||
}
|
||||
|
||||
// getOsReleaseFile returns the list of osRelease metadata from CRDs.
|
||||
func (hsh *HostSensorHandler) getOsReleaseFile(ctx context.Context) ([]hostsensor.HostSensorDataEnvelope, error) {
|
||||
return hsh.getCRDResources(ctx, k8shostsensor.OsReleaseFile)
|
||||
}
|
||||
|
||||
// getKernelVersion returns the list of kernelVersion metadata from CRDs.
|
||||
func (hsh *HostSensorHandler) getKernelVersion(ctx context.Context) ([]hostsensor.HostSensorDataEnvelope, error) {
|
||||
return hsh.getCRDResources(ctx, k8shostsensor.KernelVersion)
|
||||
}
|
||||
|
||||
// getLinuxSecurityHardeningStatus returns the list of LinuxSecurityHardeningStatus metadata from CRDs.
|
||||
func (hsh *HostSensorHandler) getLinuxSecurityHardeningStatus(ctx context.Context) ([]hostsensor.HostSensorDataEnvelope, error) {
|
||||
return hsh.getCRDResources(ctx, k8shostsensor.LinuxSecurityHardeningStatus)
|
||||
}
|
||||
|
||||
// getOpenPortsList returns the list of open ports from CRDs.
|
||||
func (hsh *HostSensorHandler) getOpenPortsList(ctx context.Context) ([]hostsensor.HostSensorDataEnvelope, error) {
|
||||
return hsh.getCRDResources(ctx, k8shostsensor.OpenPortsList)
|
||||
}
|
||||
|
||||
// getKernelVariables returns the list of Linux Kernel variables from CRDs.
|
||||
func (hsh *HostSensorHandler) getKernelVariables(ctx context.Context) ([]hostsensor.HostSensorDataEnvelope, error) {
|
||||
return hsh.getCRDResources(ctx, k8shostsensor.LinuxKernelVariables)
|
||||
}
|
||||
|
||||
// getKubeletInfo returns the list of kubelet metadata from CRDs.
|
||||
func (hsh *HostSensorHandler) getKubeletInfo(ctx context.Context) ([]hostsensor.HostSensorDataEnvelope, error) {
|
||||
return hsh.getCRDResources(ctx, k8shostsensor.KubeletInfo)
|
||||
}
|
||||
|
||||
// getKubeProxyInfo returns the list of kubeProxy metadata from CRDs.
|
||||
func (hsh *HostSensorHandler) getKubeProxyInfo(ctx context.Context) ([]hostsensor.HostSensorDataEnvelope, error) {
|
||||
return hsh.getCRDResources(ctx, k8shostsensor.KubeProxyInfo)
|
||||
}
|
||||
|
||||
// getControlPlaneInfo returns the list of controlPlaneInfo metadata from CRDs.
|
||||
func (hsh *HostSensorHandler) getControlPlaneInfo(ctx context.Context) ([]hostsensor.HostSensorDataEnvelope, error) {
|
||||
return hsh.getCRDResources(ctx, k8shostsensor.ControlPlaneInfo)
|
||||
}
|
||||
|
||||
// getCloudProviderInfo returns the list of cloudProviderInfo metadata from CRDs.
|
||||
func (hsh *HostSensorHandler) getCloudProviderInfo(ctx context.Context) ([]hostsensor.HostSensorDataEnvelope, error) {
|
||||
return hsh.getCRDResources(ctx, k8shostsensor.CloudProviderInfo)
|
||||
}
|
||||
|
||||
// getCNIInfo returns the list of CNI metadata from CRDs.
|
||||
func (hsh *HostSensorHandler) getCNIInfo(ctx context.Context) ([]hostsensor.HostSensorDataEnvelope, error) {
|
||||
return hsh.getCRDResources(ctx, k8shostsensor.CNIInfo)
|
||||
}
|
||||
|
||||
// hasCloudProviderInfo iterates over the []hostsensor.HostSensorDataEnvelope list to find info about the cloud provider.
|
||||
//
|
||||
// If information are found, then return true. Return false otherwise.
|
||||
func hasCloudProviderInfo(cpi []hostsensor.HostSensorDataEnvelope) bool {
|
||||
for index := range cpi {
|
||||
if !reflect.DeepEqual(cpi[index].GetData(), stdjson.RawMessage("{}\\n")) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// CollectResources collects all required information from CRDs.
|
||||
func (hsh *HostSensorHandler) CollectResources(ctx context.Context) ([]hostsensor.HostSensorDataEnvelope, map[string]apis.StatusInfo, error) {
|
||||
res := make([]hostsensor.HostSensorDataEnvelope, 0)
|
||||
infoMap := make(map[string]apis.StatusInfo)
|
||||
|
||||
logger.L().Debug("Collecting host sensor data from CRDs")
|
||||
|
||||
var hasCloudProvider bool
|
||||
for _, toPin := range []struct {
|
||||
Resource k8shostsensor.HostSensorResource
|
||||
Query func(context.Context) ([]hostsensor.HostSensorDataEnvelope, error)
|
||||
}{
|
||||
// queries to CRDs
|
||||
{
|
||||
Resource: k8shostsensor.OsReleaseFile,
|
||||
Query: hsh.getOsReleaseFile,
|
||||
},
|
||||
{
|
||||
Resource: k8shostsensor.KernelVersion,
|
||||
Query: hsh.getKernelVersion,
|
||||
},
|
||||
{
|
||||
Resource: k8shostsensor.LinuxSecurityHardeningStatus,
|
||||
Query: hsh.getLinuxSecurityHardeningStatus,
|
||||
},
|
||||
{
|
||||
Resource: k8shostsensor.OpenPortsList,
|
||||
Query: hsh.getOpenPortsList,
|
||||
},
|
||||
{
|
||||
Resource: k8shostsensor.LinuxKernelVariables,
|
||||
Query: hsh.getKernelVariables,
|
||||
},
|
||||
{
|
||||
Resource: k8shostsensor.KubeletInfo,
|
||||
Query: hsh.getKubeletInfo,
|
||||
},
|
||||
{
|
||||
Resource: k8shostsensor.KubeProxyInfo,
|
||||
Query: hsh.getKubeProxyInfo,
|
||||
},
|
||||
{
|
||||
Resource: k8shostsensor.CloudProviderInfo,
|
||||
Query: hsh.getCloudProviderInfo,
|
||||
},
|
||||
{
|
||||
Resource: k8shostsensor.CNIInfo,
|
||||
Query: hsh.getCNIInfo,
|
||||
},
|
||||
{
|
||||
// ControlPlaneInfo is queried _after_ CloudProviderInfo.
|
||||
Resource: k8shostsensor.ControlPlaneInfo,
|
||||
Query: hsh.getControlPlaneInfo,
|
||||
},
|
||||
} {
|
||||
k8sInfo := toPin
|
||||
|
||||
if k8sInfo.Resource == k8shostsensor.ControlPlaneInfo && hasCloudProvider {
|
||||
// we retrieve control plane info only if we are not using a cloud provider
|
||||
continue
|
||||
}
|
||||
|
||||
kcData, err := k8sInfo.Query(ctx)
|
||||
if err != nil {
|
||||
addInfoToMap(k8sInfo.Resource, infoMap, err)
|
||||
logger.L().Ctx(ctx).Warning("Failed to get resource from CRD",
|
||||
helpers.String("resource", k8sInfo.Resource.String()),
|
||||
helpers.Error(err))
|
||||
}
|
||||
|
||||
if k8sInfo.Resource == k8shostsensor.CloudProviderInfo {
|
||||
hasCloudProvider = hasCloudProviderInfo(kcData)
|
||||
}
|
||||
|
||||
if len(kcData) > 0 {
|
||||
res = append(res, kcData...)
|
||||
}
|
||||
}
|
||||
|
||||
logger.L().Debug("Done collecting information from CRDs", helpers.Int("totalResources", len(res)))
|
||||
return res, infoMap, nil
|
||||
}
|
||||
122
core/pkg/hostsensorutils/hostsensorcrdshandler.go
Normal file
122
core/pkg/hostsensorutils/hostsensorcrdshandler.go
Normal file
@@ -0,0 +1,122 @@
|
||||
package hostsensorutils
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/kubescape/go-logger"
|
||||
"github.com/kubescape/go-logger/helpers"
|
||||
"github.com/kubescape/k8s-interface/k8sinterface"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/client-go/dynamic"
|
||||
)
|
||||
|
||||
const (
|
||||
// Host data CRD API group and version
|
||||
hostDataGroup = "hostdata.kubescape.cloud"
|
||||
hostDataVersion = "v1beta1"
|
||||
)
|
||||
|
||||
// HostSensorHandler is a client that reads host sensor data from Kubernetes CRDs.
|
||||
//
|
||||
// The CRDs are created by the node-agent daemonset running on each node.
|
||||
type HostSensorHandler struct {
|
||||
k8sObj *k8sinterface.KubernetesApi
|
||||
dynamicClient dynamic.Interface
|
||||
}
|
||||
|
||||
// NewHostSensorHandler builds a new CRD-based host sensor handler.
|
||||
func NewHostSensorHandler(k8sObj *k8sinterface.KubernetesApi, _ string) (*HostSensorHandler, error) {
|
||||
if k8sObj == nil {
|
||||
return nil, fmt.Errorf("nil k8s interface received")
|
||||
}
|
||||
config := k8sinterface.GetK8sConfig()
|
||||
if config == nil {
|
||||
return nil, fmt.Errorf("failed to get k8s config")
|
||||
}
|
||||
// force GRPC
|
||||
config.AcceptContentTypes = "application/vnd.kubernetes.protobuf"
|
||||
config.ContentType = "application/vnd.kubernetes.protobuf"
|
||||
|
||||
// Create dynamic client for CRD access
|
||||
dynamicClient, err := dynamic.NewForConfig(config)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create dynamic client: %w", err)
|
||||
}
|
||||
|
||||
hsh := &HostSensorHandler{
|
||||
k8sObj: k8sObj,
|
||||
dynamicClient: dynamicClient,
|
||||
}
|
||||
|
||||
// Verify we can access nodes (basic sanity check)
|
||||
if nodeList, err := k8sObj.KubernetesClient.CoreV1().Nodes().List(k8sObj.Context, metav1.ListOptions{}); err != nil || len(nodeList.Items) == 0 {
|
||||
if err == nil {
|
||||
err = fmt.Errorf("no nodes to scan")
|
||||
}
|
||||
return hsh, fmt.Errorf("in NewHostSensorHandler, failed to get nodes list: %v", err)
|
||||
}
|
||||
|
||||
return hsh, nil
|
||||
}
|
||||
|
||||
// Init is a no-op for CRD-based implementation.
|
||||
// The node-agent daemonset is expected to be already deployed and creating CRDs.
|
||||
func (hsh *HostSensorHandler) Init(ctx context.Context) error {
|
||||
logger.L().Info("Using CRD-based host sensor data collection (no deployment needed)")
|
||||
|
||||
// Verify that at least one CRD type exists
|
||||
gvr := schema.GroupVersionResource{
|
||||
Group: hostDataGroup,
|
||||
Version: hostDataVersion,
|
||||
Resource: "osreleasefiles",
|
||||
}
|
||||
|
||||
list, err := hsh.dynamicClient.Resource(gvr).List(ctx, metav1.ListOptions{Limit: 1})
|
||||
if err != nil {
|
||||
logger.L().Warning("node-agent status: Failed to list OsReleaseFile CRDs - node-agent may not be deployed",
|
||||
helpers.Error(err))
|
||||
return fmt.Errorf("failed to verify CRD access: %w (ensure node-agent is deployed)", err)
|
||||
}
|
||||
|
||||
if len(list.Items) == 0 {
|
||||
logger.L().Warning("node-agent status: No OsReleaseFile CRDs found - node-agent may not be running or sensing yet")
|
||||
} else {
|
||||
logger.L().Info("node-agent status: Successfully verified CRD access", helpers.Int("osReleaseFiles", len(list.Items)))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// TearDown is a no-op for CRD-based implementation.
|
||||
// CRDs are managed by the node-agent daemonset lifecycle.
|
||||
func (hsh *HostSensorHandler) TearDown() error {
|
||||
logger.L().Debug("CRD-based host sensor teardown (no-op)")
|
||||
return nil
|
||||
}
|
||||
|
||||
// listCRDResources is a generic function to list CRD resources and convert them to the expected format.
|
||||
func (hsh *HostSensorHandler) listCRDResources(ctx context.Context, resourceName, kind string) ([]unstructured.Unstructured, error) {
|
||||
gvr := schema.GroupVersionResource{
|
||||
Group: hostDataGroup,
|
||||
Version: hostDataVersion,
|
||||
Resource: resourceName,
|
||||
}
|
||||
|
||||
logger.L().Debug("Listing CRD resources",
|
||||
helpers.String("resource", resourceName),
|
||||
helpers.String("kind", kind))
|
||||
|
||||
list, err := hsh.dynamicClient.Resource(gvr).List(ctx, metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to list %s CRDs: %w", kind, err)
|
||||
}
|
||||
|
||||
logger.L().Debug("Retrieved CRD resources",
|
||||
helpers.String("kind", kind),
|
||||
helpers.Int("count", len(list.Items)))
|
||||
|
||||
return list.Items, nil
|
||||
}
|
||||
@@ -1,457 +0,0 @@
|
||||
package hostsensorutils
|
||||
|
||||
import (
|
||||
"context"
|
||||
_ "embed"
|
||||
"fmt"
|
||||
"os"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/kubescape/go-logger"
|
||||
"github.com/kubescape/go-logger/helpers"
|
||||
"github.com/kubescape/k8s-interface/k8sinterface"
|
||||
"github.com/kubescape/k8s-interface/workloadinterface"
|
||||
"github.com/kubescape/kubescape/v3/core/cautils"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
)
|
||||
|
||||
var (
|
||||
//go:embed hostsensor.yaml
|
||||
hostSensorYAML string
|
||||
namespaceWasPresent bool
|
||||
)
|
||||
|
||||
const portName string = "scanner"
|
||||
|
||||
// HostSensorHandler is a client that interacts with a host-scanner component deployed on nodes.
|
||||
//
|
||||
// The API exposed by the host sensor is defined here: https://github.com/kubescape/host-scanner
|
||||
type HostSensorHandler struct {
|
||||
hostSensorPort int32
|
||||
hostSensorPodNames map[string]string //map from pod names to node names
|
||||
hostSensorUnscheduledPodNames map[string]string //map from pod names to node names
|
||||
k8sObj *k8sinterface.KubernetesApi
|
||||
daemonSet *appsv1.DaemonSet
|
||||
podListLock sync.RWMutex
|
||||
gracePeriod int64
|
||||
workerPool workerPool
|
||||
}
|
||||
|
||||
// NewHostSensorHandler builds a new http client to the host-scanner API.
|
||||
func NewHostSensorHandler(k8sObj *k8sinterface.KubernetesApi, hostSensorYAMLFile string) (*HostSensorHandler, error) {
|
||||
if k8sObj == nil {
|
||||
return nil, fmt.Errorf("nil k8s interface received")
|
||||
}
|
||||
|
||||
if hostSensorYAMLFile != "" {
|
||||
d, err := loadHostSensorFromFile(hostSensorYAMLFile)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to load host-scanner yaml file, reason: %w", err)
|
||||
}
|
||||
hostSensorYAML = d
|
||||
}
|
||||
|
||||
hsh := &HostSensorHandler{
|
||||
k8sObj: k8sObj,
|
||||
hostSensorPodNames: map[string]string{},
|
||||
hostSensorUnscheduledPodNames: map[string]string{},
|
||||
gracePeriod: int64(15),
|
||||
workerPool: newWorkerPool(),
|
||||
}
|
||||
|
||||
// Don't deploy on a cluster with no nodes. Some cloud providers prevent the termination of K8s objects for cluster with no nodes!!!
|
||||
if nodeList, err := k8sObj.KubernetesClient.CoreV1().Nodes().List(k8sObj.Context, metav1.ListOptions{}); err != nil || len(nodeList.Items) == 0 {
|
||||
if err == nil {
|
||||
err = fmt.Errorf("no nodes to scan")
|
||||
}
|
||||
return hsh, fmt.Errorf("in NewHostSensorHandler, failed to get nodes list: %v", err)
|
||||
}
|
||||
|
||||
return hsh, nil
|
||||
}
|
||||
|
||||
// Init deploys the host-scanner and start watching the pods on the host.
|
||||
func (hsh *HostSensorHandler) Init(ctx context.Context) error {
|
||||
// deploy the YAML
|
||||
// store namespace + port
|
||||
// store pod names
|
||||
// make sure all pods are running, after X seconds treat has running anyway, and log an error on the pods not running yet
|
||||
logger.L().Info("Installing host scanner")
|
||||
|
||||
// log is used to avoid log duplication
|
||||
// coming from the different host-scanner instances
|
||||
log := NewLogCoupling()
|
||||
|
||||
cautils.StartSpinner()
|
||||
defer cautils.StopSpinner()
|
||||
|
||||
if err := hsh.applyYAML(ctx); err != nil {
|
||||
return fmt.Errorf("failed to apply host scanner YAML, reason: %v", err)
|
||||
}
|
||||
|
||||
hsh.populatePodNamesToNodeNames(ctx, log)
|
||||
if err := hsh.checkPodForEachNode(); err != nil {
|
||||
return fmt.Errorf("%s: %v", failedToValidateHostSensorPodStatus, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// checkNamespaceWasPresent check if the given namespace was already present on kubernetes and in "Active" state.
|
||||
// Return true in case it find the namespace on the list, false otherwise.
|
||||
// In case we have some error with the kubernetes APIs, it returns an error.
|
||||
func (hsh *HostSensorHandler) checkNamespaceWasPresent(namespace string) bool {
|
||||
ns, err := hsh.k8sObj.KubernetesClient.
|
||||
CoreV1().
|
||||
Namespaces().
|
||||
Get(hsh.k8sObj.Context, namespace, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
// check also if it is in "Active" state.
|
||||
if ns.Status.Phase != corev1.NamespaceActive {
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// namespaceWasPresent return the namespaceWasPresent variable value.
|
||||
func (hsh *HostSensorHandler) namespaceWasPresent() bool {
|
||||
return namespaceWasPresent
|
||||
}
|
||||
|
||||
func (hsh *HostSensorHandler) applyYAML(ctx context.Context) error {
|
||||
workloads, err := cautils.ReadFile([]byte(hostSensorYAML), cautils.YAML_FILE_FORMAT)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to read YAML files, reason: %v", err)
|
||||
}
|
||||
|
||||
// Get namespace name
|
||||
namespaceName := cautils.GetConfigMapNamespace()
|
||||
for i := range workloads {
|
||||
if workloads[i].GetKind() == "Namespace" {
|
||||
namespaceName = workloads[i].GetName()
|
||||
break
|
||||
}
|
||||
}
|
||||
// check if namespace was already present on kubernetes
|
||||
namespaceWasPresent = hsh.checkNamespaceWasPresent(namespaceName)
|
||||
|
||||
// Update workload data before applying
|
||||
for i := range workloads {
|
||||
w := workloadinterface.NewWorkloadObj(workloads[i].GetObject())
|
||||
if w == nil {
|
||||
return fmt.Errorf("invalid workload: %v", workloads[i].GetObject())
|
||||
}
|
||||
// set namespace in all objects
|
||||
if w.GetKind() != "Namespace" {
|
||||
logger.L().Debug("Setting namespace", helpers.String("kind", w.GetKind()), helpers.String("name", w.GetName()), helpers.String("namespace", namespaceName))
|
||||
w.SetNamespace(namespaceName)
|
||||
}
|
||||
// Get container port
|
||||
if w.GetKind() == "DaemonSet" {
|
||||
containers, err := w.GetContainers()
|
||||
if err != nil {
|
||||
if erra := hsh.tearDownNamespace(namespaceName); erra != nil {
|
||||
logger.L().Ctx(ctx).Warning(failedToTeardownNamespace, helpers.Error(erra))
|
||||
}
|
||||
return fmt.Errorf("container not found in DaemonSet: %v", err)
|
||||
}
|
||||
for j := range containers {
|
||||
for k := range containers[j].Ports {
|
||||
if containers[j].Ports[k].Name == portName {
|
||||
hsh.hostSensorPort = containers[j].Ports[k].ContainerPort
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Apply workload
|
||||
var newWorkload k8sinterface.IWorkload
|
||||
var e error
|
||||
|
||||
if g, err := hsh.k8sObj.GetWorkload(w.GetNamespace(), w.GetKind(), w.GetName()); err == nil && g != nil {
|
||||
newWorkload, e = hsh.k8sObj.UpdateWorkload(w)
|
||||
} else {
|
||||
newWorkload, e = hsh.k8sObj.CreateWorkload(w)
|
||||
}
|
||||
if e != nil {
|
||||
if erra := hsh.tearDownNamespace(namespaceName); erra != nil {
|
||||
logger.L().Ctx(ctx).Warning(failedToTeardownNamespace, helpers.Error(erra))
|
||||
}
|
||||
return fmt.Errorf("failed to create/update YAML, reason: %v", e)
|
||||
}
|
||||
|
||||
// Save DaemonSet
|
||||
if newWorkload.GetKind() == "DaemonSet" {
|
||||
b, err := json.Marshal(newWorkload.GetObject())
|
||||
if err != nil {
|
||||
if erra := hsh.tearDownNamespace(namespaceName); erra != nil {
|
||||
logger.L().Ctx(ctx).Warning(failedToTeardownNamespace, helpers.Error(erra))
|
||||
}
|
||||
return fmt.Errorf("failed to Marshal YAML of DaemonSet, reason: %v", err)
|
||||
}
|
||||
var ds appsv1.DaemonSet
|
||||
if err := json.Unmarshal(b, &ds); err != nil {
|
||||
if erra := hsh.tearDownNamespace(namespaceName); erra != nil {
|
||||
logger.L().Ctx(ctx).Warning(failedToTeardownNamespace, helpers.Error(erra))
|
||||
}
|
||||
return fmt.Errorf("failed to Unmarshal YAML of DaemonSet, reason: %v", err)
|
||||
}
|
||||
hsh.daemonSet = &ds
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (hsh *HostSensorHandler) checkPodForEachNode() error {
|
||||
deadline := time.Now().Add(time.Second * 100)
|
||||
for {
|
||||
nodesList, err := hsh.k8sObj.KubernetesClient.CoreV1().Nodes().List(hsh.k8sObj.Context, metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("in checkPodsForEveryNode, failed to get nodes list: %v", nodesList)
|
||||
}
|
||||
|
||||
hsh.podListLock.RLock()
|
||||
podsNum := len(hsh.hostSensorPodNames)
|
||||
unschedPodNum := len(hsh.hostSensorUnscheduledPodNames)
|
||||
hsh.podListLock.RUnlock()
|
||||
if len(nodesList.Items) <= podsNum+unschedPodNum {
|
||||
break
|
||||
}
|
||||
|
||||
if time.Now().After(deadline) {
|
||||
hsh.podListLock.RLock()
|
||||
podsMap := hsh.hostSensorPodNames
|
||||
hsh.podListLock.RUnlock()
|
||||
return fmt.Errorf("host-scanner pods number (%d) differ than nodes number (%d) after deadline exceeded. Kubescape will take data only from the pods below: %v",
|
||||
podsNum, len(nodesList.Items), podsMap)
|
||||
}
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// initiating routine to keep pod list updated
|
||||
func (hsh *HostSensorHandler) populatePodNamesToNodeNames(ctx context.Context, log *LogsMap) {
|
||||
go func() {
|
||||
var watchRes watch.Interface
|
||||
var err error
|
||||
watchRes, err = hsh.k8sObj.KubernetesClient.CoreV1().Pods(hsh.daemonSet.Namespace).Watch(hsh.k8sObj.Context, metav1.ListOptions{
|
||||
Watch: true,
|
||||
LabelSelector: fmt.Sprintf("name=%s", hsh.daemonSet.Spec.Template.Labels["name"]),
|
||||
})
|
||||
if err != nil {
|
||||
logger.L().Ctx(ctx).Warning(failedToWatchOverDaemonSetPods, helpers.Error(err))
|
||||
}
|
||||
if watchRes == nil {
|
||||
logger.L().Ctx(ctx).Error("failed to watch over DaemonSet pods, will not be able to get host-scanner data")
|
||||
return
|
||||
}
|
||||
|
||||
for eve := range watchRes.ResultChan() {
|
||||
pod, ok := eve.Object.(*corev1.Pod)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
go hsh.updatePodInListAtomic(ctx, eve.Type, pod, log)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
func (hsh *HostSensorHandler) updatePodInListAtomic(ctx context.Context, eventType watch.EventType, podObj *corev1.Pod, log *LogsMap) {
|
||||
hsh.podListLock.Lock()
|
||||
defer hsh.podListLock.Unlock()
|
||||
|
||||
switch eventType {
|
||||
case watch.Added, watch.Modified:
|
||||
if podObj.Status.Phase == corev1.PodRunning && len(podObj.Status.ContainerStatuses) > 0 &&
|
||||
podObj.Status.ContainerStatuses[0].Ready {
|
||||
hsh.hostSensorPodNames[podObj.ObjectMeta.Name] = podObj.Spec.NodeName
|
||||
delete(hsh.hostSensorUnscheduledPodNames, podObj.ObjectMeta.Name)
|
||||
} else {
|
||||
if podObj.Status.Phase == corev1.PodPending && len(podObj.Status.Conditions) > 0 &&
|
||||
podObj.Status.Conditions[0].Reason == corev1.PodReasonUnschedulable {
|
||||
nodeName := ""
|
||||
if podObj.Spec.Affinity != nil && podObj.Spec.Affinity.NodeAffinity != nil &&
|
||||
podObj.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution != nil &&
|
||||
len(podObj.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms) > 0 &&
|
||||
len(podObj.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms[0].MatchFields) > 0 &&
|
||||
len(podObj.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms[0].MatchFields[0].Values) > 0 {
|
||||
nodeName = podObj.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms[0].MatchFields[0].Values[0]
|
||||
}
|
||||
if !log.isDuplicated(oneHostSensorPodIsUnabledToSchedule) {
|
||||
logger.L().Ctx(ctx).Warning(oneHostSensorPodIsUnabledToSchedule,
|
||||
helpers.String("message", podObj.Status.Conditions[0].Message))
|
||||
log.update(oneHostSensorPodIsUnabledToSchedule)
|
||||
}
|
||||
if nodeName != "" {
|
||||
hsh.hostSensorUnscheduledPodNames[podObj.ObjectMeta.Name] = nodeName
|
||||
}
|
||||
} else {
|
||||
delete(hsh.hostSensorPodNames, podObj.ObjectMeta.Name)
|
||||
}
|
||||
}
|
||||
default:
|
||||
delete(hsh.hostSensorPodNames, podObj.ObjectMeta.Name)
|
||||
}
|
||||
}
|
||||
|
||||
// tearDownHostScanner manage the host-scanner deletion.
|
||||
func (hsh *HostSensorHandler) tearDownHostScanner(namespace string) error {
|
||||
client := hsh.k8sObj.KubernetesClient
|
||||
|
||||
// delete host-scanner DaemonSet
|
||||
err := client.AppsV1().
|
||||
DaemonSets(namespace).
|
||||
Delete(
|
||||
hsh.k8sObj.Context,
|
||||
hsh.daemonSet.Name,
|
||||
metav1.DeleteOptions{
|
||||
GracePeriodSeconds: &hsh.gracePeriod,
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to delete host-scanner DaemonSet: %v", err)
|
||||
}
|
||||
|
||||
// wait for DaemonSet to be deleted
|
||||
err = hsh.waitHostScannerDeleted(hsh.k8sObj.Context)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to delete host-scanner DaemonSet: %v", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// tearDownNamespace manage the given namespace deletion.
|
||||
// At first, it checks if the namespace was already present before installing host-scanner.
|
||||
// In that case skips the deletion.
|
||||
// If was not, then patches the namespace in order to remove the finalizers,
|
||||
// and finally delete the it.
|
||||
func (hsh *HostSensorHandler) tearDownNamespace(namespace string) error {
|
||||
// if namespace was already present on kubernetes (before installing host-scanner),
|
||||
// then we shouldn't delete it.
|
||||
if hsh.namespaceWasPresent() {
|
||||
return nil
|
||||
}
|
||||
// to make it more readable we store the object client in a variable
|
||||
client := hsh.k8sObj.KubernetesClient
|
||||
|
||||
// prepare patch json to remove finalizers from namespace
|
||||
patchData := `
|
||||
[
|
||||
{
|
||||
"op": "replace",
|
||||
"path": "/metadata/finalizers",
|
||||
"value": []
|
||||
}
|
||||
]
|
||||
`
|
||||
// patch namespace object removing finalizers
|
||||
_, err := client.CoreV1().
|
||||
Namespaces().
|
||||
Patch(
|
||||
hsh.k8sObj.Context,
|
||||
namespace,
|
||||
types.JSONPatchType,
|
||||
[]byte(patchData),
|
||||
metav1.PatchOptions{},
|
||||
)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to remove finalizers from Namespace: %v", err)
|
||||
}
|
||||
|
||||
// patch namespace object removing finalizers
|
||||
// delete namespace object
|
||||
err = client.CoreV1().
|
||||
Namespaces().
|
||||
Delete(
|
||||
hsh.k8sObj.Context,
|
||||
namespace,
|
||||
metav1.DeleteOptions{
|
||||
GracePeriodSeconds: &hsh.gracePeriod,
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to delete %s Namespace: %v", namespace, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (hsh *HostSensorHandler) TearDown() error {
|
||||
namespace := hsh.GetNamespace()
|
||||
// delete DaemonSet
|
||||
if err := hsh.tearDownHostScanner(namespace); err != nil {
|
||||
return fmt.Errorf("failed to delete host-scanner DaemonSet: %v", err)
|
||||
}
|
||||
// delete Namespace
|
||||
if err := hsh.tearDownNamespace(namespace); err != nil {
|
||||
return fmt.Errorf("failed to delete host-scanner Namespace: %v", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (hsh *HostSensorHandler) GetNamespace() string {
|
||||
if hsh.daemonSet == nil {
|
||||
return ""
|
||||
}
|
||||
return hsh.daemonSet.Namespace
|
||||
}
|
||||
|
||||
func loadHostSensorFromFile(hostSensorYAMLFile string) (string, error) {
|
||||
dat, err := os.ReadFile(hostSensorYAMLFile)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if len(dat) == 0 {
|
||||
return "", fmt.Errorf("empty file")
|
||||
}
|
||||
|
||||
if !cautils.IsYaml(hostSensorYAMLFile) {
|
||||
return "", fmt.Errorf("invalid file format")
|
||||
}
|
||||
|
||||
return string(dat), err
|
||||
}
|
||||
|
||||
// waitHostScannerDeleted watch for host-scanner deletion.
|
||||
// In case it fails it returns an error.
|
||||
func (hsh *HostSensorHandler) waitHostScannerDeleted(ctx context.Context) error {
|
||||
labelSelector := fmt.Sprintf("name=%s", hsh.daemonSet.Name)
|
||||
opts := metav1.ListOptions{
|
||||
TypeMeta: metav1.TypeMeta{},
|
||||
LabelSelector: labelSelector,
|
||||
FieldSelector: "",
|
||||
}
|
||||
watcher, err := hsh.k8sObj.KubernetesClient.CoreV1().
|
||||
Pods(hsh.daemonSet.Namespace).
|
||||
Watch(ctx, opts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer watcher.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case event := <-watcher.ResultChan():
|
||||
if event.Type == watch.Deleted {
|
||||
return nil
|
||||
}
|
||||
case <-ctx.Done():
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,232 +0,0 @@
|
||||
package hostsensorutils
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/kubescape/kubescape/v3/internal/testutils"
|
||||
"github.com/kubescape/opa-utils/objectsenvelopes/hostsensor"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
func TestHostSensorHandler(t *testing.T) {
|
||||
t.Parallel()
|
||||
ctx := context.Background()
|
||||
|
||||
t.Run("with default manifest", func(t *testing.T) {
|
||||
t.Run("should build host sensor", func(t *testing.T) {
|
||||
k8s := NewKubernetesApiMock(WithNode(mockNode1()), WithPod(mockPod1()), WithPod(mockPod2()), WithResponses(mockResponses()))
|
||||
h, err := NewHostSensorHandler(k8s, "")
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, h)
|
||||
|
||||
t.Run("should initialize host sensor", func(t *testing.T) {
|
||||
require.NoError(t, h.Init(ctx))
|
||||
|
||||
w, err := k8s.KubernetesClient.CoreV1().Pods(h.daemonSet.Namespace).Watch(ctx, metav1.ListOptions{})
|
||||
require.NoError(t, err)
|
||||
w.Stop()
|
||||
|
||||
require.Len(t, h.hostSensorPodNames, 2)
|
||||
})
|
||||
|
||||
t.Run("should return namespace", func(t *testing.T) {
|
||||
require.Equal(t, "kubescape", h.GetNamespace())
|
||||
})
|
||||
|
||||
t.Run("should collect resources from pods - happy path", func(t *testing.T) {
|
||||
envelope, status, err := h.CollectResources(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Len(t, envelope, 9*2) // has cloud provider, no control plane requested
|
||||
require.Len(t, status, 0)
|
||||
|
||||
foundControl, foundProvider := false, false
|
||||
for _, sensed := range envelope {
|
||||
if sensed.Kind == ControlPlaneInfo.String() {
|
||||
foundControl = true
|
||||
}
|
||||
if sensed.Kind == CloudProviderInfo.String() {
|
||||
foundProvider = hasCloudProviderInfo([]hostsensor.HostSensorDataEnvelope{sensed})
|
||||
}
|
||||
}
|
||||
|
||||
require.False(t, foundControl)
|
||||
require.True(t, foundProvider)
|
||||
})
|
||||
})
|
||||
|
||||
t.Run("should build host sensor without cloud provider", func(t *testing.T) {
|
||||
k8s := NewKubernetesApiMock(WithNode(mockNode1()), WithPod(mockPod1()), WithPod(mockPod2()), WithResponses(mockResponsesNoCloudProvider()))
|
||||
h, err := NewHostSensorHandler(k8s, "")
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, h)
|
||||
|
||||
t.Run("should initialize host sensor", func(t *testing.T) {
|
||||
require.NoError(t, h.Init(ctx))
|
||||
|
||||
w, err := k8s.KubernetesClient.CoreV1().Pods(h.daemonSet.Namespace).Watch(ctx, metav1.ListOptions{})
|
||||
require.NoError(t, err)
|
||||
w.Stop()
|
||||
|
||||
require.Len(t, h.hostSensorPodNames, 2)
|
||||
})
|
||||
|
||||
t.Run("should get version", func(t *testing.T) {
|
||||
version, err := h.getVersion()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, "v1.0.45", version)
|
||||
})
|
||||
|
||||
t.Run("ForwardToPod is a stub, not implemented", func(t *testing.T) {
|
||||
resp, err := h.forwardToPod("pod1", "/version")
|
||||
require.Contains(t, err.Error(), "not implemented")
|
||||
require.Nil(t, resp)
|
||||
})
|
||||
|
||||
t.Run("should collect resources from pods", func(t *testing.T) {
|
||||
envelope, status, err := h.CollectResources(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Len(t, envelope, 10*2) // has empty cloud provider, has control plane info
|
||||
require.Len(t, status, 0)
|
||||
|
||||
foundControl, foundProvider := false, false
|
||||
for _, sensed := range envelope {
|
||||
if sensed.Kind == ControlPlaneInfo.String() {
|
||||
foundControl = true
|
||||
}
|
||||
if sensed.Kind == CloudProviderInfo.String() {
|
||||
foundProvider = hasCloudProviderInfo([]hostsensor.HostSensorDataEnvelope{sensed})
|
||||
}
|
||||
}
|
||||
|
||||
require.True(t, foundControl)
|
||||
require.False(t, foundProvider)
|
||||
})
|
||||
})
|
||||
|
||||
t.Run("should build host sensor with error in response from /version", func(t *testing.T) {
|
||||
k8s := NewKubernetesApiMock(WithNode(mockNode1()),
|
||||
WithPod(mockPod1()),
|
||||
WithPod(mockPod2()),
|
||||
WithResponses(mockResponsesNoCloudProvider()),
|
||||
WithErrorResponse(RestURL{"http", "pod1", "7888", "/version"}), // this endpoint will return an error from this pod
|
||||
WithErrorResponse(RestURL{"http", "pod2", "7888", "/version"}), // this endpoint will return an error from this pod
|
||||
)
|
||||
|
||||
h, err := NewHostSensorHandler(k8s, "")
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, h)
|
||||
|
||||
t.Run("should initialize host sensor", func(t *testing.T) {
|
||||
require.NoError(t, h.Init(ctx))
|
||||
|
||||
w, err := k8s.KubernetesClient.CoreV1().Pods(h.daemonSet.Namespace).Watch(ctx, metav1.ListOptions{})
|
||||
require.NoError(t, err)
|
||||
w.Stop()
|
||||
|
||||
require.Len(t, h.hostSensorPodNames, 2)
|
||||
})
|
||||
|
||||
t.Run("should NOT be able to get version", func(t *testing.T) {
|
||||
// NOTE: GetVersion might be successful if only one pod responds successfully.
|
||||
// In order to ensure an error, we need ALL pods to error.
|
||||
_, err := h.getVersion()
|
||||
require.Error(t, err)
|
||||
require.Contains(t, err.Error(), "mock")
|
||||
})
|
||||
})
|
||||
|
||||
t.Run("should FAIL to build host sensor because there are no nodes", func(t *testing.T) {
|
||||
h, err := NewHostSensorHandler(NewKubernetesApiMock(), "")
|
||||
require.Error(t, err)
|
||||
require.NotNil(t, h)
|
||||
require.Contains(t, err.Error(), "no nodes to scan")
|
||||
})
|
||||
})
|
||||
|
||||
t.Run("should NOT build host sensor with nil k8s API", func(t *testing.T) {
|
||||
h, err := NewHostSensorHandler(nil, "")
|
||||
require.Error(t, err)
|
||||
require.Nil(t, h)
|
||||
})
|
||||
|
||||
t.Run("with manifest from YAML file", func(t *testing.T) {
|
||||
t.Run("should build host sensor", func(t *testing.T) {
|
||||
k8s := NewKubernetesApiMock(WithNode(mockNode1()), WithPod(mockPod1()), WithPod(mockPod2()), WithResponses(mockResponses()))
|
||||
h, err := NewHostSensorHandler(k8s, filepath.Join(testutils.CurrentDir(), "hostsensor.yaml"))
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, h)
|
||||
|
||||
t.Run("should initialize host sensor", func(t *testing.T) {
|
||||
require.NoError(t, h.Init(ctx))
|
||||
|
||||
w, err := k8s.KubernetesClient.CoreV1().Pods(h.daemonSet.Namespace).Watch(ctx, metav1.ListOptions{})
|
||||
require.NoError(t, err)
|
||||
w.Stop()
|
||||
|
||||
require.Len(t, h.hostSensorPodNames, 2)
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
t.Run("with manifest from invalid YAML file", func(t *testing.T) {
|
||||
t.Run("should NOT build host sensor", func(t *testing.T) {
|
||||
var invalid string
|
||||
t.Run("should create temp file", func(t *testing.T) {
|
||||
file, err := os.CreateTemp("", "*.yaml")
|
||||
require.NoError(t, err)
|
||||
t.Cleanup(func() {
|
||||
_ = os.Remove(file.Name())
|
||||
})
|
||||
_, err = file.Write([]byte(" x: 1"))
|
||||
require.NoError(t, err)
|
||||
|
||||
invalid = file.Name()
|
||||
require.NoError(t, file.Close())
|
||||
})
|
||||
|
||||
k8s := NewKubernetesApiMock(WithNode(mockNode1()), WithPod(mockPod1()), WithPod(mockPod2()), WithResponses(mockResponses()))
|
||||
_, err := NewHostSensorHandler(k8s, filepath.Join(testutils.CurrentDir(), invalid))
|
||||
require.Error(t, err)
|
||||
})
|
||||
})
|
||||
|
||||
// TODO(test coverage): the following cases are not covered by tests yet.
|
||||
//
|
||||
// * applyYAML fails
|
||||
// * checkPodForEachNode fails, or times out
|
||||
// * non-active namespace
|
||||
// * getPodList fails when GetVersion
|
||||
// * getPodList fails when CollectResources
|
||||
// * error cases that trigger a namespace tear-down
|
||||
// * watch pods with a Delete event
|
||||
// * explicit TearDown()
|
||||
//
|
||||
// Notice that the package doesn't current pass tests with the race detector enabled.
|
||||
}
|
||||
|
||||
func TestLoadHostSensorFromFile_NoError(t *testing.T) {
|
||||
content, err := loadHostSensorFromFile("testdata/hostsensor.yaml")
|
||||
assert.NotEqual(t, "", content)
|
||||
assert.Nil(t, err)
|
||||
}
|
||||
|
||||
func TestLoadHostSensorFromFile_Error(t *testing.T) {
|
||||
content, err := loadHostSensorFromFile("testdata/hostsensor_invalid.yaml")
|
||||
assert.Equal(t, "", content)
|
||||
assert.NotNil(t, err)
|
||||
|
||||
content, err = loadHostSensorFromFile("testdata/empty_hostsensor.yaml")
|
||||
assert.Equal(t, "", content)
|
||||
assert.NotNil(t, err)
|
||||
|
||||
content, err = loadHostSensorFromFile("testdata/notAYamlFile.txt")
|
||||
assert.Equal(t, "", content)
|
||||
assert.NotNil(t, err)
|
||||
}
|
||||
@@ -1,293 +0,0 @@
|
||||
package hostsensorutils
|
||||
|
||||
import (
|
||||
"context"
|
||||
stdjson "encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/kubescape/go-logger"
|
||||
"github.com/kubescape/k8s-interface/k8sinterface"
|
||||
"github.com/kubescape/opa-utils/objectsenvelopes/hostsensor"
|
||||
"github.com/kubescape/opa-utils/reporthandling/apis"
|
||||
)
|
||||
|
||||
// getPodList clones the internal list of pods being watched as a map of pod names.
|
||||
func (hsh *HostSensorHandler) getPodList() map[string]string {
|
||||
hsh.podListLock.RLock()
|
||||
res := make(map[string]string, len(hsh.hostSensorPodNames))
|
||||
for k, v := range hsh.hostSensorPodNames {
|
||||
res[k] = v
|
||||
}
|
||||
hsh.podListLock.RUnlock()
|
||||
|
||||
return res
|
||||
}
|
||||
|
||||
// httpGetToPod sends the request to a pod using the HostSensorPort.
|
||||
func (hsh *HostSensorHandler) httpGetToPod(podName, path string) ([]byte, error) {
|
||||
restProxy := hsh.k8sObj.KubernetesClient.CoreV1().Pods(hsh.daemonSet.Namespace).ProxyGet("http", podName, fmt.Sprintf("%d", hsh.hostSensorPort), path, map[string]string{})
|
||||
return restProxy.DoRaw(hsh.k8sObj.Context)
|
||||
}
|
||||
|
||||
func (hsh *HostSensorHandler) getResourcesFromPod(podName, nodeName string, resourceKind scannerResource, path string) (hostsensor.HostSensorDataEnvelope, error) {
|
||||
// send the request and pack the response as an hostSensorDataEnvelope
|
||||
|
||||
resBytes, err := hsh.httpGetToPod(podName, path)
|
||||
if err != nil {
|
||||
return hostsensor.HostSensorDataEnvelope{}, err
|
||||
}
|
||||
|
||||
hostSensorDataEnvelope := hostsensor.HostSensorDataEnvelope{}
|
||||
hostSensorDataEnvelope.SetApiVersion(k8sinterface.JoinGroupVersion(hostsensor.GroupHostSensor, hostsensor.Version))
|
||||
hostSensorDataEnvelope.SetKind(resourceKind.String())
|
||||
hostSensorDataEnvelope.SetName(nodeName)
|
||||
hostSensorDataEnvelope.SetData(resBytes)
|
||||
|
||||
return hostSensorDataEnvelope, nil
|
||||
}
|
||||
|
||||
// forwardToPod is currently not implemented.
|
||||
func (hsh *HostSensorHandler) forwardToPod(podName, path string) ([]byte, error) {
|
||||
// NOT IN USE:
|
||||
// ---
|
||||
// spawn port forwarding
|
||||
// req := hsh.k8sObj.KubernetesClient.CoreV1().RESTClient().Post()
|
||||
// req = req.Name(podName)
|
||||
// req = req.Namespace(hsh.DaemonSet.Namespace)
|
||||
// req = req.Resource("pods")
|
||||
// req = req.SubResource("portforward")
|
||||
// ----
|
||||
// https://github.com/gianarb/kube-port-forward
|
||||
// fullPath := fmt.Sprintf("/api/v1/namespaces/%s/pods/%s/portforward",
|
||||
// hsh.DaemonSet.Namespace, podName)
|
||||
// transport, upgrader, err := spdy.RoundTripperFor(hsh.k8sObj.KubernetesClient.config)
|
||||
// if err != nil {
|
||||
// return nil, err
|
||||
// }
|
||||
// hostIP := strings.TrimLeft(req.RestConfig.Host, "htps:/")
|
||||
// dialer := spdy.NewDialer(upgrader, &http.Client{Transport: transport}, http.MethodPost, &url.URL{Scheme: "http", Path: path, Host: hostIP})
|
||||
return nil, errors.New("not implemented")
|
||||
}
|
||||
|
||||
// sendAllPodsHTTPGETRequest fills the raw bytes response in the envelope and the node name, but not the GroupVersionKind
|
||||
// so the caller is responsible to convert the raw data to some structured data and add the GroupVersionKind details
|
||||
//
|
||||
// The function produces a worker-pool with a fixed number of workers.
|
||||
//
|
||||
// For each node the request is pushed to the jobs channel, the worker sends the request and pushes the result to the result channel.
|
||||
// When all workers have finished, the function returns a list of results
|
||||
func (hsh *HostSensorHandler) sendAllPodsHTTPGETRequest(ctx context.Context, path string, requestKind scannerResource) ([]hostsensor.HostSensorDataEnvelope, error) {
|
||||
podList := hsh.getPodList()
|
||||
res := make([]hostsensor.HostSensorDataEnvelope, 0, len(podList))
|
||||
var wg sync.WaitGroup
|
||||
|
||||
// initialization of the channels
|
||||
hsh.workerPool.init(len(podList))
|
||||
|
||||
// log is used to avoid log duplication
|
||||
// coming from the different host-scanner instances
|
||||
log := NewLogCoupling()
|
||||
|
||||
hsh.workerPool.hostSensorApplyJobs(podList, path, requestKind)
|
||||
hsh.workerPool.hostSensorGetResults(&res)
|
||||
hsh.workerPool.createWorkerPool(ctx, hsh, &wg, log)
|
||||
hsh.workerPool.waitForDone(&wg)
|
||||
|
||||
return res, nil
|
||||
}
|
||||
|
||||
// getVersion returns the version of the deployed host scanner.
|
||||
//
|
||||
// NOTE: we pick the version from the first responding pod.
|
||||
func (hsh *HostSensorHandler) getVersion() (string, error) {
|
||||
// loop over pods and port-forward it to each of them
|
||||
podList := hsh.getPodList()
|
||||
|
||||
// initialization of the channels
|
||||
hsh.workerPool.init(len(podList))
|
||||
hsh.workerPool.hostSensorApplyJobs(podList, "/version", "version")
|
||||
for job := range hsh.workerPool.jobs {
|
||||
resBytes, err := hsh.httpGetToPod(job.podName, job.path)
|
||||
if err != nil {
|
||||
return "", err
|
||||
} else {
|
||||
version := strings.ReplaceAll(string(resBytes), "\"", "")
|
||||
version = strings.ReplaceAll(version, "\n", "")
|
||||
|
||||
return version, nil
|
||||
}
|
||||
}
|
||||
|
||||
return "", nil
|
||||
}
|
||||
|
||||
// getKernelVariables returns the list of Linux Kernel variables.
|
||||
func (hsh *HostSensorHandler) getKernelVariables(ctx context.Context) ([]hostsensor.HostSensorDataEnvelope, error) {
|
||||
// loop over pods and port-forward it to each of them
|
||||
return hsh.sendAllPodsHTTPGETRequest(ctx, "/LinuxKernelVariables", LinuxKernelVariables)
|
||||
}
|
||||
|
||||
// getOpenPortsList returns the list of open ports.
|
||||
func (hsh *HostSensorHandler) getOpenPortsList(ctx context.Context) ([]hostsensor.HostSensorDataEnvelope, error) {
|
||||
// loop over pods and port-forward it to each of them
|
||||
return hsh.sendAllPodsHTTPGETRequest(ctx, "/openedPorts", OpenPortsList)
|
||||
}
|
||||
|
||||
// getLinuxSecurityHardeningStatus returns the list of LinuxSecurityHardeningStatus metadata.
|
||||
func (hsh *HostSensorHandler) getLinuxSecurityHardeningStatus(ctx context.Context) ([]hostsensor.HostSensorDataEnvelope, error) {
|
||||
// loop over pods and port-forward it to each of them
|
||||
return hsh.sendAllPodsHTTPGETRequest(ctx, "/linuxSecurityHardening", LinuxSecurityHardeningStatus)
|
||||
}
|
||||
|
||||
// getKubeletInfo returns the list of kubelet metadata.
|
||||
func (hsh *HostSensorHandler) getKubeletInfo(ctx context.Context) ([]hostsensor.HostSensorDataEnvelope, error) {
|
||||
// loop over pods and port-forward it to each of them
|
||||
return hsh.sendAllPodsHTTPGETRequest(ctx, "/kubeletInfo", KubeletInfo)
|
||||
}
|
||||
|
||||
// getKubeProxyInfo returns the list of kubeProxy metadata.
|
||||
func (hsh *HostSensorHandler) getKubeProxyInfo(ctx context.Context) ([]hostsensor.HostSensorDataEnvelope, error) {
|
||||
// loop over pods and port-forward it to each of them
|
||||
return hsh.sendAllPodsHTTPGETRequest(ctx, "/kubeProxyInfo", KubeProxyInfo)
|
||||
}
|
||||
|
||||
// getControlPlaneInfo returns the list of controlPlaneInfo metadata
|
||||
func (hsh *HostSensorHandler) getControlPlaneInfo(ctx context.Context) ([]hostsensor.HostSensorDataEnvelope, error) {
|
||||
// loop over pods and port-forward it to each of them
|
||||
return hsh.sendAllPodsHTTPGETRequest(ctx, "/controlPlaneInfo", ControlPlaneInfo)
|
||||
}
|
||||
|
||||
// getCloudProviderInfo returns the list of cloudProviderInfo metadata.
|
||||
func (hsh *HostSensorHandler) getCloudProviderInfo(ctx context.Context) ([]hostsensor.HostSensorDataEnvelope, error) {
|
||||
// loop over pods and port-forward it to each of them
|
||||
return hsh.sendAllPodsHTTPGETRequest(ctx, "/cloudProviderInfo", CloudProviderInfo)
|
||||
}
|
||||
|
||||
// getCNIInfo returns the list of CNI metadata
|
||||
func (hsh *HostSensorHandler) getCNIInfo(ctx context.Context) ([]hostsensor.HostSensorDataEnvelope, error) {
|
||||
// loop over pods and port-forward it to each of them
|
||||
return hsh.sendAllPodsHTTPGETRequest(ctx, "/CNIInfo", CNIInfo)
|
||||
}
|
||||
|
||||
// getKernelVersion returns the list of kernelVersion metadata.
|
||||
func (hsh *HostSensorHandler) getKernelVersion(ctx context.Context) ([]hostsensor.HostSensorDataEnvelope, error) {
|
||||
// loop over pods and port-forward it to each of them
|
||||
return hsh.sendAllPodsHTTPGETRequest(ctx, "/kernelVersion", "KernelVersion")
|
||||
}
|
||||
|
||||
// getOsReleaseFile returns the list of osRelease metadata.
|
||||
func (hsh *HostSensorHandler) getOsReleaseFile(ctx context.Context) ([]hostsensor.HostSensorDataEnvelope, error) {
|
||||
// loop over pods and port-forward it to each of them
|
||||
return hsh.sendAllPodsHTTPGETRequest(ctx, "/osRelease", "OsReleaseFile")
|
||||
}
|
||||
|
||||
// hasCloudProviderInfo iterates over the []hostsensor.HostSensorDataEnvelope list to find info about the cloud provider.
|
||||
//
|
||||
// If information are found, then return true. Return false otherwise.
|
||||
func hasCloudProviderInfo(cpi []hostsensor.HostSensorDataEnvelope) bool {
|
||||
for index := range cpi {
|
||||
if !reflect.DeepEqual(cpi[index].GetData(), stdjson.RawMessage("{}\n")) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// CollectResources collects all required information about all the pods for this host.
|
||||
func (hsh *HostSensorHandler) CollectResources(ctx context.Context) ([]hostsensor.HostSensorDataEnvelope, map[string]apis.StatusInfo, error) {
|
||||
res := make([]hostsensor.HostSensorDataEnvelope, 0)
|
||||
infoMap := make(map[string]apis.StatusInfo)
|
||||
if hsh.daemonSet == nil {
|
||||
return res, nil, nil
|
||||
}
|
||||
|
||||
logger.L().Debug("Accessing host scanner")
|
||||
version, err := hsh.getVersion()
|
||||
if err != nil {
|
||||
logger.L().Ctx(ctx).Warning(err.Error())
|
||||
}
|
||||
|
||||
if len(version) > 0 {
|
||||
logger.L().Info("Host scanner version : " + version)
|
||||
} else {
|
||||
logger.L().Info("Unknown host scanner version")
|
||||
}
|
||||
|
||||
var hasCloudProvider bool
|
||||
for _, toPin := range []struct {
|
||||
Resource scannerResource
|
||||
Query func(context.Context) ([]hostsensor.HostSensorDataEnvelope, error)
|
||||
}{
|
||||
// queries to the deployed host-scanner
|
||||
{
|
||||
Resource: OsReleaseFile,
|
||||
Query: hsh.getOsReleaseFile,
|
||||
},
|
||||
{
|
||||
Resource: KernelVersion,
|
||||
Query: hsh.getKernelVersion,
|
||||
},
|
||||
{
|
||||
Resource: LinuxSecurityHardeningStatus,
|
||||
Query: hsh.getLinuxSecurityHardeningStatus,
|
||||
},
|
||||
{
|
||||
Resource: OpenPortsList,
|
||||
Query: hsh.getOpenPortsList,
|
||||
},
|
||||
{
|
||||
Resource: LinuxKernelVariables,
|
||||
Query: hsh.getKernelVariables,
|
||||
},
|
||||
{
|
||||
Resource: KubeletInfo,
|
||||
Query: hsh.getKubeletInfo,
|
||||
},
|
||||
{
|
||||
Resource: KubeProxyInfo,
|
||||
Query: hsh.getKubeProxyInfo,
|
||||
},
|
||||
{
|
||||
Resource: CloudProviderInfo,
|
||||
Query: hsh.getCloudProviderInfo,
|
||||
},
|
||||
{
|
||||
Resource: CNIInfo,
|
||||
Query: hsh.getCNIInfo,
|
||||
},
|
||||
{
|
||||
// ControlPlaneInfo is queried _after_ CloudProviderInfo.
|
||||
Resource: ControlPlaneInfo,
|
||||
Query: hsh.getControlPlaneInfo,
|
||||
},
|
||||
} {
|
||||
k8sInfo := toPin
|
||||
|
||||
if k8sInfo.Resource == ControlPlaneInfo && hasCloudProvider {
|
||||
// we retrieve control plane info only if we are not using a cloud provider
|
||||
continue
|
||||
}
|
||||
|
||||
kcData, err := k8sInfo.Query(ctx)
|
||||
if err != nil {
|
||||
addInfoToMap(k8sInfo.Resource, infoMap, err)
|
||||
logger.L().Ctx(ctx).Warning(err.Error())
|
||||
}
|
||||
|
||||
if k8sInfo.Resource == CloudProviderInfo {
|
||||
hasCloudProvider = hasCloudProviderInfo(kcData)
|
||||
}
|
||||
|
||||
if len(kcData) > 0 {
|
||||
res = append(res, kcData...)
|
||||
}
|
||||
}
|
||||
|
||||
logger.L().Debug("Done reading information from host scanner")
|
||||
return res, infoMap, nil
|
||||
}
|
||||
@@ -11,5 +11,4 @@ type IHostSensor interface {
|
||||
Init(ctx context.Context) error
|
||||
TearDown() error
|
||||
CollectResources(context.Context) ([]hostsensor.HostSensorDataEnvelope, map[string]apis.StatusInfo, error)
|
||||
GetNamespace() string
|
||||
}
|
||||
|
||||
@@ -27,7 +27,3 @@ func (hshm *HostSensorHandlerMock) TearDown() error {
|
||||
func (hshm *HostSensorHandlerMock) CollectResources(_ context.Context) ([]hostsensor.HostSensorDataEnvelope, map[string]apis.StatusInfo, error) {
|
||||
return []hostsensor.HostSensorDataEnvelope{}, nil, nil
|
||||
}
|
||||
|
||||
func (hshm *HostSensorHandlerMock) GetNamespace() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
@@ -1,98 +0,0 @@
|
||||
package hostsensorutils
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
|
||||
"github.com/kubescape/go-logger"
|
||||
"github.com/kubescape/go-logger/helpers"
|
||||
"github.com/kubescape/opa-utils/objectsenvelopes/hostsensor"
|
||||
)
|
||||
|
||||
const noOfWorkers int = 10
|
||||
|
||||
type job struct {
|
||||
podName string
|
||||
nodeName string
|
||||
requestKind scannerResource
|
||||
path string
|
||||
}
|
||||
|
||||
type workerPool struct {
|
||||
jobs chan job
|
||||
results chan hostsensor.HostSensorDataEnvelope
|
||||
done chan bool
|
||||
noOfWorkers int
|
||||
}
|
||||
|
||||
func newWorkerPool() workerPool {
|
||||
wp := workerPool{}
|
||||
wp.noOfWorkers = noOfWorkers
|
||||
wp.init()
|
||||
return wp
|
||||
}
|
||||
|
||||
func (wp *workerPool) init(noOfPods ...int) {
|
||||
if len(noOfPods) > 0 && noOfPods[0] < noOfWorkers {
|
||||
wp.noOfWorkers = noOfPods[0]
|
||||
}
|
||||
// init the channels
|
||||
wp.jobs = make(chan job, noOfWorkers)
|
||||
wp.results = make(chan hostsensor.HostSensorDataEnvelope, noOfWorkers)
|
||||
wp.done = make(chan bool)
|
||||
}
|
||||
|
||||
// The worker takes a job out of the chan, executes the request, and pushes the result to the results chan
|
||||
func (wp *workerPool) hostSensorWorker(ctx context.Context, hsh *HostSensorHandler, wg *sync.WaitGroup, log *LogsMap) {
|
||||
defer wg.Done()
|
||||
for job := range wp.jobs {
|
||||
hostSensorDataEnvelope, err := hsh.getResourcesFromPod(job.podName, job.nodeName, job.requestKind, job.path)
|
||||
if err != nil && !log.isDuplicated(failedToGetData) {
|
||||
logger.L().Ctx(ctx).Warning(failedToGetData, helpers.String("path", job.path), helpers.Error(err))
|
||||
log.update(failedToGetData)
|
||||
continue
|
||||
}
|
||||
wp.results <- hostSensorDataEnvelope
|
||||
}
|
||||
}
|
||||
|
||||
func (wp *workerPool) createWorkerPool(ctx context.Context, hsh *HostSensorHandler, wg *sync.WaitGroup, log *LogsMap) {
|
||||
for i := 0; i < noOfWorkers; i++ {
|
||||
wg.Add(1)
|
||||
go wp.hostSensorWorker(ctx, hsh, wg, log)
|
||||
}
|
||||
}
|
||||
|
||||
func (wp *workerPool) waitForDone(wg *sync.WaitGroup) {
|
||||
// Waiting for workers to finish
|
||||
wg.Wait()
|
||||
close(wp.results)
|
||||
|
||||
// Waiting for the results to be processed
|
||||
<-wp.done
|
||||
}
|
||||
|
||||
func (wp *workerPool) hostSensorGetResults(result *[]hostsensor.HostSensorDataEnvelope) {
|
||||
go func() {
|
||||
for res := range wp.results {
|
||||
*result = append(*result, res)
|
||||
}
|
||||
wp.done <- true
|
||||
}()
|
||||
}
|
||||
|
||||
func (wp *workerPool) hostSensorApplyJobs(podList map[string]string, path string, requestKind scannerResource) {
|
||||
go func() {
|
||||
for podName, nodeName := range podList {
|
||||
thisJob := job{
|
||||
podName: podName,
|
||||
nodeName: nodeName,
|
||||
requestKind: requestKind,
|
||||
path: path,
|
||||
}
|
||||
wp.jobs <- thisJob
|
||||
|
||||
}
|
||||
close(wp.jobs)
|
||||
}()
|
||||
}
|
||||
@@ -1,16 +0,0 @@
|
||||
package hostsensorutils
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
// Initializes a workerPool struct with default values and returns it
|
||||
func TestNewWorkerPoolDefaultValues(t *testing.T) {
|
||||
wp := newWorkerPool()
|
||||
assert.Equal(t, noOfWorkers, wp.noOfWorkers)
|
||||
assert.NotNil(t, wp.jobs)
|
||||
assert.NotNil(t, wp.results)
|
||||
assert.NotNil(t, wp.done)
|
||||
}
|
||||
@@ -1,15 +1 @@
|
||||
package hostsensorutils
|
||||
|
||||
import (
|
||||
jsoniter "github.com/json-iterator/go"
|
||||
)
|
||||
|
||||
var (
|
||||
json jsoniter.API
|
||||
)
|
||||
|
||||
func init() {
|
||||
// NOTE(fredbi): attention, this configuration rounds floats down to 6 digits
|
||||
// For finer-grained config, see: https://pkg.go.dev/github.com/json-iterator/go#section-readme
|
||||
json = jsoniter.ConfigFastest
|
||||
}
|
||||
|
||||
File diff suppressed because one or more lines are too long
@@ -1,10 +0,0 @@
|
||||
package hostsensorutils
|
||||
|
||||
// messages used for warnings
|
||||
var (
|
||||
failedToGetData = "failed to get data"
|
||||
failedToTeardownNamespace = "failed to teardown Namespace"
|
||||
oneHostSensorPodIsUnabledToSchedule = "One host-sensor pod is unable to schedule on node. We will fail to collect the data from this node"
|
||||
failedToWatchOverDaemonSetPods = "failed to watch over DaemonSet pods"
|
||||
failedToValidateHostSensorPodStatus = "failed to validate host-scanner pods status"
|
||||
)
|
||||
@@ -1,74 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
labels:
|
||||
app: kubescape-host-scanner
|
||||
k8s-app: kubescape-host-scanner
|
||||
kubernetes.io/metadata.name: kubescape-host-scanner
|
||||
tier: kubescape-host-scanner-control-plane
|
||||
name: kubescape
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: host-scanner
|
||||
namespace: kubescape
|
||||
labels:
|
||||
app: host-scanner
|
||||
k8s-app: kubescape-host-scanner
|
||||
otel: enabled
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
name: host-scanner
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
name: host-scanner
|
||||
spec:
|
||||
tolerations:
|
||||
# this toleration is to have the DaemonDet runnable on all nodes (including masters)
|
||||
# remove it if your masters can't run pods
|
||||
- operator: Exists
|
||||
containers:
|
||||
- name: host-sensor
|
||||
image: quay.io/kubescape/host-scanner:v1.0.61
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: true
|
||||
privileged: true
|
||||
readOnlyRootFilesystem: true
|
||||
ports:
|
||||
- name: scanner # Do not change port name
|
||||
containerPort: 7888
|
||||
protocol: TCP
|
||||
resources:
|
||||
limits:
|
||||
cpu: 0.1m
|
||||
memory: 200Mi
|
||||
requests:
|
||||
cpu: 1m
|
||||
memory: 200Mi
|
||||
volumeMounts:
|
||||
- mountPath: /host_fs
|
||||
name: host-filesystem
|
||||
startupProbe:
|
||||
httpGet:
|
||||
path: /readyz
|
||||
port: 7888
|
||||
failureThreshold: 30
|
||||
periodSeconds: 1
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: 7888
|
||||
periodSeconds: 10
|
||||
terminationGracePeriodSeconds: 120
|
||||
dnsPolicy: ClusterFirstWithHostNet
|
||||
automountServiceAccountToken: false
|
||||
volumes:
|
||||
- hostPath:
|
||||
path: /
|
||||
type: Directory
|
||||
name: host-filesystem
|
||||
hostPID: true
|
||||
hostIPC: true
|
||||
@@ -1 +0,0 @@
|
||||
Kubescape is Awesome!
|
||||
@@ -1,57 +1,13 @@
|
||||
package hostsensorutils
|
||||
|
||||
import (
|
||||
"github.com/kubescape/k8s-interface/hostsensor"
|
||||
"github.com/kubescape/k8s-interface/k8sinterface"
|
||||
"github.com/kubescape/opa-utils/reporthandling/apis"
|
||||
)
|
||||
|
||||
// scannerResource is the enumerated type listing all resources from the host-scanner.
|
||||
type scannerResource string
|
||||
|
||||
const (
|
||||
// host-scanner resources
|
||||
|
||||
KubeletConfiguration scannerResource = "KubeletConfiguration"
|
||||
OsReleaseFile scannerResource = "OsReleaseFile"
|
||||
KernelVersion scannerResource = "KernelVersion"
|
||||
LinuxSecurityHardeningStatus scannerResource = "LinuxSecurityHardeningStatus"
|
||||
OpenPortsList scannerResource = "OpenPortsList"
|
||||
LinuxKernelVariables scannerResource = "LinuxKernelVariables"
|
||||
KubeletCommandLine scannerResource = "KubeletCommandLine"
|
||||
KubeletInfo scannerResource = "KubeletInfo"
|
||||
KubeProxyInfo scannerResource = "KubeProxyInfo"
|
||||
ControlPlaneInfo scannerResource = "ControlPlaneInfo"
|
||||
CloudProviderInfo scannerResource = "CloudProviderInfo"
|
||||
CNIInfo scannerResource = "CNIInfo"
|
||||
)
|
||||
|
||||
func mapHostSensorResourceToApiGroup(r scannerResource) string {
|
||||
switch r {
|
||||
case
|
||||
KubeletConfiguration,
|
||||
OsReleaseFile,
|
||||
KubeletCommandLine,
|
||||
KernelVersion,
|
||||
LinuxSecurityHardeningStatus,
|
||||
OpenPortsList,
|
||||
LinuxKernelVariables,
|
||||
KubeletInfo,
|
||||
KubeProxyInfo,
|
||||
ControlPlaneInfo,
|
||||
CloudProviderInfo,
|
||||
CNIInfo:
|
||||
return "hostdata.kubescape.cloud/v1beta0"
|
||||
default:
|
||||
return ""
|
||||
}
|
||||
}
|
||||
|
||||
func (r scannerResource) String() string {
|
||||
return string(r)
|
||||
}
|
||||
|
||||
func addInfoToMap(resource scannerResource, infoMap map[string]apis.StatusInfo, err error) {
|
||||
group, version := k8sinterface.SplitApiVersion(mapHostSensorResourceToApiGroup(resource))
|
||||
func addInfoToMap(resource hostsensor.HostSensorResource, infoMap map[string]apis.StatusInfo, err error) {
|
||||
group, version := k8sinterface.SplitApiVersion(hostsensor.MapHostSensorResourceToApiGroup(resource))
|
||||
r := k8sinterface.JoinResourceTriplets(group, version, resource.String())
|
||||
infoMap[r] = apis.StatusInfo{
|
||||
InnerStatus: apis.StatusSkipped,
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/kubescape/k8s-interface/hostsensor"
|
||||
"github.com/kubescape/opa-utils/reporthandling/apis"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
@@ -20,12 +21,12 @@ func TestAddInfoToMap(t *testing.T) {
|
||||
testErr := errors.New("test error")
|
||||
|
||||
for _, toPin := range []struct {
|
||||
Resource scannerResource
|
||||
Resource hostsensor.HostSensorResource
|
||||
Err error
|
||||
Expected map[string]apis.StatusInfo
|
||||
}{
|
||||
{
|
||||
Resource: KubeletConfiguration,
|
||||
Resource: hostsensor.KubeletConfiguration,
|
||||
Err: testErr,
|
||||
Expected: map[string]apis.StatusInfo{
|
||||
"hostdata.kubescape.cloud/v1beta0/KubeletConfiguration": {
|
||||
@@ -35,7 +36,7 @@ func TestAddInfoToMap(t *testing.T) {
|
||||
},
|
||||
},
|
||||
{
|
||||
Resource: CNIInfo,
|
||||
Resource: hostsensor.CNIInfo,
|
||||
Err: testErr,
|
||||
Expected: map[string]apis.StatusInfo{
|
||||
"hostdata.kubescape.cloud/v1beta0/CNIInfo": {
|
||||
@@ -45,7 +46,7 @@ func TestAddInfoToMap(t *testing.T) {
|
||||
},
|
||||
},
|
||||
{
|
||||
Resource: scannerResource("invalid"),
|
||||
Resource: hostsensor.HostSensorResource("invalid"),
|
||||
Err: testErr,
|
||||
Expected: map[string]apis.StatusInfo{
|
||||
"//invalid": { // no group, no version
|
||||
@@ -72,55 +73,55 @@ func TestMapHostSensorResourceToApiGroup(t *testing.T) {
|
||||
url := "hostdata.kubescape.cloud/v1beta0"
|
||||
|
||||
tests := []struct {
|
||||
resource scannerResource
|
||||
resource hostsensor.HostSensorResource
|
||||
want string
|
||||
}{
|
||||
{
|
||||
resource: KubeletConfiguration,
|
||||
resource: hostsensor.KubeletConfiguration,
|
||||
want: url,
|
||||
},
|
||||
{
|
||||
resource: OsReleaseFile,
|
||||
resource: hostsensor.OsReleaseFile,
|
||||
want: url,
|
||||
},
|
||||
{
|
||||
resource: KubeletCommandLine,
|
||||
resource: hostsensor.KubeletCommandLine,
|
||||
want: url,
|
||||
},
|
||||
{
|
||||
resource: KernelVersion,
|
||||
resource: hostsensor.KernelVersion,
|
||||
want: url,
|
||||
},
|
||||
{
|
||||
resource: LinuxSecurityHardeningStatus,
|
||||
resource: hostsensor.LinuxSecurityHardeningStatus,
|
||||
want: url,
|
||||
},
|
||||
{
|
||||
resource: OpenPortsList,
|
||||
resource: hostsensor.OpenPortsList,
|
||||
want: url,
|
||||
},
|
||||
{
|
||||
resource: LinuxKernelVariables,
|
||||
resource: hostsensor.LinuxKernelVariables,
|
||||
want: url,
|
||||
},
|
||||
{
|
||||
resource: KubeletInfo,
|
||||
resource: hostsensor.KubeletInfo,
|
||||
want: url,
|
||||
},
|
||||
{
|
||||
resource: KubeProxyInfo,
|
||||
resource: hostsensor.KubeProxyInfo,
|
||||
want: url,
|
||||
},
|
||||
{
|
||||
resource: ControlPlaneInfo,
|
||||
resource: hostsensor.ControlPlaneInfo,
|
||||
want: url,
|
||||
},
|
||||
{
|
||||
resource: CloudProviderInfo,
|
||||
resource: hostsensor.CloudProviderInfo,
|
||||
want: url,
|
||||
},
|
||||
{
|
||||
resource: CNIInfo,
|
||||
resource: hostsensor.CNIInfo,
|
||||
want: url,
|
||||
},
|
||||
{
|
||||
@@ -131,7 +132,7 @@ func TestMapHostSensorResourceToApiGroup(t *testing.T) {
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.want, func(t *testing.T) {
|
||||
assert.Equal(t, tt.want, mapHostSensorResourceToApiGroup(tt.resource))
|
||||
assert.Equal(t, tt.want, hostsensor.MapHostSensorResourceToApiGroup(tt.resource))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
"sync"
|
||||
|
||||
"github.com/armosec/armoapi-go/armotypes"
|
||||
mapset "github.com/deckarep/golang-set/v2"
|
||||
"github.com/kubescape/go-logger"
|
||||
"github.com/kubescape/go-logger/helpers"
|
||||
"github.com/kubescape/k8s-interface/workloadinterface"
|
||||
@@ -43,6 +44,8 @@ type OPAProcessor struct {
|
||||
excludeNamespaces []string
|
||||
includeNamespaces []string
|
||||
printEnabled bool
|
||||
compiledModules map[string]*ast.Compiler
|
||||
compiledMu sync.RWMutex
|
||||
}
|
||||
|
||||
func NewOPAProcessor(sessionObj *cautils.OPASessionObj, regoDependenciesData *resources.RegoDependenciesData, clusterName string, excludeNamespaces string, includeNamespaces string, enableRegoPrint bool) *OPAProcessor {
|
||||
@@ -58,17 +61,18 @@ func NewOPAProcessor(sessionObj *cautils.OPASessionObj, regoDependenciesData *re
|
||||
excludeNamespaces: split(excludeNamespaces),
|
||||
includeNamespaces: split(includeNamespaces),
|
||||
printEnabled: enableRegoPrint,
|
||||
compiledModules: make(map[string]*ast.Compiler),
|
||||
}
|
||||
}
|
||||
|
||||
func (opap *OPAProcessor) ProcessRulesListener(ctx context.Context, progressListener IJobProgressNotificationClient) error {
|
||||
scanningScope := cautils.GetScanningScope(opap.Metadata.ContextMetadata)
|
||||
opap.OPASessionObj.AllPolicies = convertFrameworksToPolicies(opap.Policies, opap.ExcludedRules, scanningScope)
|
||||
opap.AllPolicies = convertFrameworksToPolicies(opap.Policies, opap.ExcludedRules, scanningScope)
|
||||
|
||||
ConvertFrameworksToSummaryDetails(&opap.Report.SummaryDetails, opap.Policies, opap.OPASessionObj.AllPolicies)
|
||||
ConvertFrameworksToSummaryDetails(&opap.Report.SummaryDetails, opap.Policies, opap.AllPolicies)
|
||||
|
||||
// process
|
||||
if err := opap.Process(ctx, opap.OPASessionObj.AllPolicies, progressListener); err != nil {
|
||||
if err := opap.Process(ctx, opap.AllPolicies, progressListener); err != nil {
|
||||
logger.L().Ctx(ctx).Warning(err.Error())
|
||||
// Return error?
|
||||
}
|
||||
@@ -126,7 +130,7 @@ func (opap *OPAProcessor) Process(ctx context.Context, policies *cautils.Policie
|
||||
}
|
||||
|
||||
func (opap *OPAProcessor) loggerStartScanning() {
|
||||
targetScan := opap.OPASessionObj.Metadata.ScanMetadata.ScanningTarget
|
||||
targetScan := opap.Metadata.ScanMetadata.ScanningTarget
|
||||
if reporthandlingv2.Cluster == targetScan {
|
||||
logger.L().Start("Scanning", helpers.String(targetScan.String(), opap.clusterName))
|
||||
} else {
|
||||
@@ -135,7 +139,7 @@ func (opap *OPAProcessor) loggerStartScanning() {
|
||||
}
|
||||
|
||||
func (opap *OPAProcessor) loggerDoneScanning() {
|
||||
targetScan := opap.OPASessionObj.Metadata.ScanMetadata.ScanningTarget
|
||||
targetScan := opap.Metadata.ScanMetadata.ScanningTarget
|
||||
if reporthandlingv2.Cluster == targetScan {
|
||||
logger.L().StopSuccess("Done scanning", helpers.String(targetScan.String(), opap.clusterName))
|
||||
} else {
|
||||
@@ -256,13 +260,14 @@ func (opap *OPAProcessor) processRule(ctx context.Context, rule *reporthandling.
|
||||
ruleResult.Paths = appendPaths(ruleResult.Paths, ruleResponse.AssistedRemediation, failedResource.GetID())
|
||||
// if ruleResponse has relatedObjects, add it to ruleResult
|
||||
if len(ruleResponse.RelatedObjects) > 0 {
|
||||
relatedResourcesSet := mapset.NewSet[string](ruleResult.RelatedResourcesIDs...)
|
||||
for _, relatedObject := range ruleResponse.RelatedObjects {
|
||||
wl := objectsenvelopes.NewObject(relatedObject.Object)
|
||||
if wl != nil {
|
||||
// avoid adding duplicate related resource IDs
|
||||
if !slices.Contains(ruleResult.RelatedResourcesIDs, wl.GetID()) {
|
||||
if !relatedResourcesSet.Contains(wl.GetID()) {
|
||||
ruleResult.RelatedResourcesIDs = append(ruleResult.RelatedResourcesIDs, wl.GetID())
|
||||
}
|
||||
relatedResourcesSet.Add(wl.GetID())
|
||||
ruleResult.Paths = appendPaths(ruleResult.Paths, relatedObject.AssistedRemediation, wl.GetID())
|
||||
}
|
||||
}
|
||||
@@ -307,27 +312,16 @@ func (opap *OPAProcessor) runOPAOnSingleRule(ctx context.Context, rule *reportha
|
||||
|
||||
// runRegoOnK8s compiles an OPA PolicyRule and evaluates its against k8s
|
||||
func (opap *OPAProcessor) runRegoOnK8s(ctx context.Context, rule *reporthandling.PolicyRule, k8sObjects []map[string]interface{}, getRuleData func(*reporthandling.PolicyRule) string, ruleRegoDependenciesData resources.RegoDependenciesData) ([]reporthandling.RuleResponse, error) {
|
||||
modules, err := getRuleDependencies(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("rule: '%s', %s", rule.Name, err.Error())
|
||||
}
|
||||
|
||||
opap.opaRegisterOnce.Do(func() {
|
||||
// register signature verification methods for the OPA ast engine (since these are package level symbols, we do it only once)
|
||||
rego.RegisterBuiltin2(cosignVerifySignatureDeclaration, cosignVerifySignatureDefinition)
|
||||
rego.RegisterBuiltin1(cosignHasSignatureDeclaration, cosignHasSignatureDefinition)
|
||||
rego.RegisterBuiltin1(imageNameNormalizeDeclaration, imageNameNormalizeDefinition)
|
||||
})
|
||||
|
||||
modules[rule.Name] = getRuleData(rule)
|
||||
|
||||
// NOTE: OPA module compilation is the most resource-intensive operation.
|
||||
compiled, err := ast.CompileModulesWithOpt(modules, ast.CompileOpts{
|
||||
EnablePrintStatements: opap.printEnabled,
|
||||
ParserOptions: ast.ParserOptions{RegoVersion: ast.RegoV0},
|
||||
})
|
||||
ruleData := getRuleData(rule)
|
||||
compiled, err := opap.getCompiledRule(ctx, rule.Name, ruleData, opap.printEnabled)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("in 'runRegoOnK8s', failed to compile rule, name: %s, reason: %w", rule.Name, err)
|
||||
return nil, fmt.Errorf("rule: '%s', %w", rule.Name, err)
|
||||
}
|
||||
|
||||
store, err := ruleRegoDependenciesData.TOStorage()
|
||||
@@ -335,7 +329,6 @@ func (opap *OPAProcessor) runRegoOnK8s(ctx context.Context, rule *reporthandling
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Eval
|
||||
results, err := opap.regoEval(ctx, k8sObjects, compiled, &store)
|
||||
if err != nil {
|
||||
logger.L().Ctx(ctx).Warning(err.Error())
|
||||
@@ -405,7 +398,7 @@ func (opap *OPAProcessor) makeRegoDeps(configInputs []reporthandling.ControlConf
|
||||
}
|
||||
|
||||
dataControlInputs := map[string]string{
|
||||
"cloudProvider": opap.OPASessionObj.Report.ClusterCloudProvider,
|
||||
"cloudProvider": opap.Report.ClusterCloudProvider,
|
||||
}
|
||||
|
||||
return resources.RegoDependenciesData{
|
||||
@@ -435,3 +428,43 @@ func split(namespaces string) []string {
|
||||
}
|
||||
return strings.Split(namespaces, ",")
|
||||
}
|
||||
|
||||
func (opap *OPAProcessor) getCompiledRule(ctx context.Context, ruleName, ruleData string, printEnabled bool) (*ast.Compiler, error) {
|
||||
cacheKey := ruleName + "|" + ruleData
|
||||
|
||||
opap.compiledMu.RLock()
|
||||
if compiled, ok := opap.compiledModules[cacheKey]; ok {
|
||||
opap.compiledMu.RUnlock()
|
||||
return compiled, nil
|
||||
}
|
||||
opap.compiledMu.RUnlock()
|
||||
|
||||
opap.compiledMu.Lock()
|
||||
defer opap.compiledMu.Unlock()
|
||||
|
||||
if compiled, ok := opap.compiledModules[cacheKey]; ok {
|
||||
return compiled, nil
|
||||
}
|
||||
|
||||
baseModules, err := getRuleDependencies(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get rule dependencies: %w", err)
|
||||
}
|
||||
|
||||
modules := make(map[string]string, len(baseModules)+1)
|
||||
for k, v := range baseModules {
|
||||
modules[k] = v
|
||||
}
|
||||
modules[ruleName] = ruleData
|
||||
|
||||
compiled, err := ast.CompileModulesWithOpt(modules, ast.CompileOpts{
|
||||
EnablePrintStatements: printEnabled,
|
||||
ParserOptions: ast.ParserOptions{RegoVersion: ast.RegoV0},
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to compile rule '%s': %w", ruleName, err)
|
||||
}
|
||||
|
||||
opap.compiledModules[cacheKey] = compiled
|
||||
return compiled, nil
|
||||
}
|
||||
|
||||
@@ -20,6 +20,7 @@ import (
|
||||
"github.com/kubescape/opa-utils/reporthandling"
|
||||
"github.com/kubescape/opa-utils/reporthandling/results/v1/resourcesresults"
|
||||
"github.com/kubescape/opa-utils/resources"
|
||||
"github.com/open-policy-agent/opa/v1/ast"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
@@ -49,10 +50,6 @@ func unzipAllResourcesTestDataAndSetVar(zipFilePath, destFilePath string) error
|
||||
os.RemoveAll(destFilePath)
|
||||
|
||||
f := archive.File[0]
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
dstFile, err := os.OpenFile(destFilePath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, f.Mode())
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -64,6 +61,12 @@ func unzipAllResourcesTestDataAndSetVar(zipFilePath, destFilePath string) error
|
||||
}
|
||||
|
||||
_, err = io.Copy(dstFile, fileInArchive) //nolint:gosec
|
||||
if err != nil {
|
||||
dstFile.Close()
|
||||
fileInArchive.Close()
|
||||
archive.Close()
|
||||
return err
|
||||
}
|
||||
|
||||
dstFile.Close()
|
||||
fileInArchive.Close()
|
||||
@@ -83,7 +86,9 @@ func unzipAllResourcesTestDataAndSetVar(zipFilePath, destFilePath string) error
|
||||
}
|
||||
|
||||
func NewOPAProcessorMock(opaSessionObjMock string, resourcesMock []byte) *OPAProcessor {
|
||||
opap := &OPAProcessor{}
|
||||
opap := &OPAProcessor{
|
||||
compiledModules: make(map[string]*ast.Compiler),
|
||||
}
|
||||
if err := json.Unmarshal([]byte(regoDependenciesData), &opap.regoDependenciesData); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
@@ -165,12 +170,12 @@ func BenchmarkProcess(b *testing.B) {
|
||||
go monitorHeapSpace(&maxHeap, quitChan)
|
||||
|
||||
// test
|
||||
opap.Process(context.Background(), opap.OPASessionObj.AllPolicies, nil)
|
||||
opap.Process(context.Background(), opap.AllPolicies, nil)
|
||||
|
||||
// teardown
|
||||
quitChan <- true
|
||||
b.Log(fmt.Sprintf("%s_max_heap_space_gb: %.2f", testName, float64(maxHeap)/(1024*1024*1024)))
|
||||
b.Log(fmt.Sprintf("%s_execution_time_sec: %f", testName, b.Elapsed().Seconds()))
|
||||
b.Logf("%s_max_heap_space_gb: %.2f", testName, float64(maxHeap)/(1024*1024*1024))
|
||||
b.Logf("%s_execution_time_sec: %f", testName, b.Elapsed().Seconds())
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -77,7 +77,7 @@ var cosignVerifySignatureDefinition = func(bctx rego.BuiltinContext, a, b *ast.T
|
||||
return nil, fmt.Errorf("invalid parameter type: %v", err)
|
||||
}
|
||||
// Replace double backslashes with single backslashes
|
||||
bbStr := strings.Replace(string(bStr), "\\n", "\n", -1)
|
||||
bbStr := strings.ReplaceAll(string(bStr), "\\n", "\n")
|
||||
result, err := verify(string(aStr), bbStr)
|
||||
if err != nil {
|
||||
// Do not change this log from debug level. We might find a lot of images without signature
|
||||
|
||||
@@ -3,7 +3,6 @@ package resourcehandler
|
||||
import (
|
||||
"context"
|
||||
_ "embed"
|
||||
"encoding/json"
|
||||
"testing"
|
||||
|
||||
"github.com/kubescape/k8s-interface/k8sinterface"
|
||||
@@ -16,21 +15,8 @@ import (
|
||||
"k8s.io/client-go/dynamic/fake"
|
||||
fakeclientset "k8s.io/client-go/kubernetes/fake"
|
||||
"k8s.io/client-go/rest"
|
||||
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
|
||||
)
|
||||
|
||||
var (
|
||||
//go:embed testdata/kubeconfig_mock.json
|
||||
kubeConfigMock string
|
||||
)
|
||||
|
||||
func getKubeConfigMock() *clientcmdapi.Config {
|
||||
kubeConfig := clientcmdapi.Config{}
|
||||
if err := json.Unmarshal([]byte(kubeConfigMock), &kubeConfig); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return &kubeConfig
|
||||
}
|
||||
func Test_getCloudMetadata(t *testing.T) {
|
||||
|
||||
tests := []struct {
|
||||
@@ -73,7 +59,7 @@ func Test_getCloudMetadata(t *testing.T) {
|
||||
// https://github.com/kubescape/kubescape/pull/1004
|
||||
// Cluster named .*eks.* config without a cloudconfig panics whereas we just want to scan a file
|
||||
func getResourceHandlerMock() *K8sResourceHandler {
|
||||
client := fakeclientset.NewSimpleClientset()
|
||||
client := fakeclientset.NewClientset()
|
||||
fakeDiscovery := client.Discovery()
|
||||
|
||||
k8s := &k8sinterface.KubernetesApi{
|
||||
|
||||
@@ -35,7 +35,7 @@ func CollectResources(ctx context.Context, rsrcHandler IResourceHandler, opaSess
|
||||
opaSessionObj.ExternalResources = externalResources
|
||||
opaSessionObj.ExcludedRules = excludedRulesMap
|
||||
|
||||
if (opaSessionObj.K8SResources == nil || len(opaSessionObj.K8SResources) == 0) && (opaSessionObj.ExternalResources == nil || len(opaSessionObj.ExternalResources) == 0) || len(opaSessionObj.AllResources) == 0 {
|
||||
if len(opaSessionObj.K8SResources) == 0 && len(opaSessionObj.ExternalResources) == 0 || len(opaSessionObj.AllResources) == 0 {
|
||||
return fmt.Errorf("no resources found to scan")
|
||||
}
|
||||
|
||||
|
||||
@@ -3,13 +3,13 @@ package resourcehandler
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
giturls "github.com/chainguard-dev/git-urls"
|
||||
"github.com/kubescape/kubescape/v3/core/cautils/getter"
|
||||
"k8s.io/utils/strings/slices"
|
||||
)
|
||||
|
||||
@@ -167,7 +167,7 @@ func (g *GitHubRepository) setBranch(branchOptional string) error {
|
||||
if g.branch != "" {
|
||||
return nil
|
||||
}
|
||||
body, err := getter.HttpGetter(&http.Client{}, g.defaultBranchAPI(), g.getHeaders())
|
||||
body, err := httpGet(&http.Client{}, g.defaultBranchAPI(), g.getHeaders())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -193,12 +193,27 @@ func (g *GitHubRepository) getHeaders() map[string]string {
|
||||
}
|
||||
return map[string]string{"Authorization": fmt.Sprintf("token %s", g.token)}
|
||||
}
|
||||
func httpGet(client *http.Client, url string, headers map[string]string) ([]byte, error) {
|
||||
req, err := http.NewRequest(http.MethodGet, url, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for k, v := range headers {
|
||||
req.Header.Set(k, v)
|
||||
}
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
return io.ReadAll(resp.Body)
|
||||
}
|
||||
func (g *GitHubRepository) setTree() error {
|
||||
if g.isFile {
|
||||
return nil
|
||||
}
|
||||
|
||||
body, err := getter.HttpGetter(&http.Client{}, g.treeAPI(), g.getHeaders())
|
||||
body, err := httpGet(&http.Client{}, g.treeAPI(), g.getHeaders())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -138,7 +138,7 @@ func updateQueryableResourcesMapFromRuleMatchObject(match *reporthandling.RuleMa
|
||||
}
|
||||
queryableResource.AddFieldSelector(globalFieldSelector)
|
||||
|
||||
if match.FieldSelector == nil || len(match.FieldSelector) == 0 {
|
||||
if len(match.FieldSelector) == 0 {
|
||||
queryableResources.Add(queryableResource)
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -37,7 +37,7 @@ func TestResolveLocation(t *testing.T) {
|
||||
|
||||
resolver, _ := NewFixPathLocationResolver(yamlFilePath)
|
||||
|
||||
for fixPath, _ := range fixPathToExpectedLineAndColumn {
|
||||
for fixPath := range fixPathToExpectedLineAndColumn {
|
||||
location, err := resolver.ResolveLocation(fixPath, 100000)
|
||||
assert.Contains(t, err.Error(), "node index [100000] out of range ")
|
||||
assert.Empty(t, location)
|
||||
|
||||
@@ -70,7 +70,7 @@ func (jp *JsonPrinter) convertToImageScanSummary(imageScanData []cautils.ImageSc
|
||||
imageScanSummary.Images = append(imageScanSummary.Images, imageScanData[i].Image)
|
||||
}
|
||||
|
||||
CVEs := extractCVEs(imageScanData[i].Matches)
|
||||
CVEs := extractCVEs(imageScanData[i].Matches, imageScanData[i].Image)
|
||||
imageScanSummary.CVEs = append(imageScanSummary.CVEs, CVEs...)
|
||||
|
||||
setPkgNameToScoreMap(imageScanData[i].Matches, imageScanSummary.PackageScores)
|
||||
@@ -121,10 +121,14 @@ func printConfigurationsScanning(opaSessionObj *cautils.OPASessionObj, imageScan
|
||||
}
|
||||
|
||||
// Convert to PostureReportWithSeverity to add severity field to controls
|
||||
// and extract specified labels from workloads
|
||||
finalizedReport := FinalizeResults(opaSessionObj)
|
||||
reportWithSeverity := ConvertToPostureReportWithSeverity(finalizedReport)
|
||||
reportWithSeverity := ConvertToPostureReportWithSeverityAndLabels(finalizedReport, opaSessionObj.LabelsToCopy, opaSessionObj.AllResources)
|
||||
|
||||
r, err := json.Marshal(reportWithSeverity)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = jp.writer.Write(r)
|
||||
|
||||
return err
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
package printer
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"io"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
@@ -79,7 +79,7 @@ func TestScore_Json(t *testing.T) {
|
||||
|
||||
// Read the contents of the temporary file
|
||||
f.Seek(0, 0)
|
||||
got, err := ioutil.ReadAll(f)
|
||||
got, err := io.ReadAll(f)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
@@ -169,22 +169,22 @@ func TestConvertToPackageScores(t *testing.T) {
|
||||
|
||||
func TestConvertToReportSummary(t *testing.T) {
|
||||
input := map[string]*imageprinter.SeveritySummary{
|
||||
"High": &imageprinter.SeveritySummary{
|
||||
"High": {
|
||||
NumberOfCVEs: 10,
|
||||
NumberOfFixableCVEs: 5,
|
||||
},
|
||||
"Medium": &imageprinter.SeveritySummary{
|
||||
"Medium": {
|
||||
NumberOfCVEs: 5,
|
||||
NumberOfFixableCVEs: 2,
|
||||
},
|
||||
}
|
||||
|
||||
want := map[string]*reportsummary.SeveritySummary{
|
||||
"High": &reportsummary.SeveritySummary{
|
||||
"High": {
|
||||
NumberOfCVEs: 10,
|
||||
NumberOfFixableCVEs: 5,
|
||||
},
|
||||
"Medium": &reportsummary.SeveritySummary{
|
||||
"Medium": {
|
||||
NumberOfCVEs: 5,
|
||||
NumberOfFixableCVEs: 2,
|
||||
},
|
||||
|
||||
@@ -125,11 +125,12 @@ func (pp *PdfPrinter) getTableObjects(summaryDetails *reportsummary.SummaryDetai
|
||||
}
|
||||
|
||||
func getSeverityColor(severity string) *props.Color {
|
||||
if severity == "Critical" {
|
||||
switch severity {
|
||||
case "Critical":
|
||||
return &props.Color{Red: 255, Green: 0, Blue: 0}
|
||||
} else if severity == "High" {
|
||||
case "High":
|
||||
return &props.Color{Red: 0, Green: 0, Blue: 255}
|
||||
} else if severity == "Medium" {
|
||||
case "Medium":
|
||||
return &props.Color{Red: 252, Green: 186, Blue: 3}
|
||||
}
|
||||
return &props.BlackColor
|
||||
|
||||
@@ -2,7 +2,7 @@ package printer
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io/ioutil"
|
||||
"io"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
@@ -76,7 +76,7 @@ func TestScore_Pdf(t *testing.T) {
|
||||
|
||||
// Read the contents of the temporary file
|
||||
f.Seek(0, 0)
|
||||
got, err := ioutil.ReadAll(f)
|
||||
got, err := io.ReadAll(f)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
@@ -24,11 +24,6 @@ import (
|
||||
"k8s.io/utils/strings/slices"
|
||||
)
|
||||
|
||||
const (
|
||||
prettyPrinterOutputFile = "report"
|
||||
clusterScanningScopeInformationLink = "https://github.com/kubescape/regolibrary/tree/master#add-a-framework"
|
||||
)
|
||||
|
||||
var _ printer.IPrinter = &PrettyPrinter{}
|
||||
|
||||
type PrettyPrinter struct {
|
||||
@@ -60,13 +55,13 @@ func NewPrettyPrinter(verboseMode bool, formatVersion string, attackTree bool, v
|
||||
func (pp *PrettyPrinter) SetMainPrinter() {
|
||||
switch pp.scanType {
|
||||
case cautils.ScanTypeCluster:
|
||||
pp.mainPrinter = prettyprinter.NewClusterPrinter(pp.writer)
|
||||
pp.mainPrinter = prettyprinter.NewClusterPrinter(pp.writer, pp.verboseMode)
|
||||
case cautils.ScanTypeRepo:
|
||||
pp.mainPrinter = prettyprinter.NewRepoPrinter(pp.writer, pp.inputPatterns)
|
||||
pp.mainPrinter = prettyprinter.NewRepoPrinter(pp.writer, pp.inputPatterns, pp.verboseMode)
|
||||
case cautils.ScanTypeImage:
|
||||
pp.mainPrinter = prettyprinter.NewImagePrinter(pp.writer, pp.verboseMode)
|
||||
case cautils.ScanTypeWorkload:
|
||||
pp.mainPrinter = prettyprinter.NewWorkloadPrinter(pp.writer)
|
||||
pp.mainPrinter = prettyprinter.NewWorkloadPrinter(pp.writer, pp.verboseMode)
|
||||
default:
|
||||
pp.mainPrinter = prettyprinter.NewSummaryPrinter(pp.writer, pp.verboseMode)
|
||||
}
|
||||
@@ -89,7 +84,7 @@ func (pp *PrettyPrinter) convertToImageScanSummary(imageScanData []cautils.Image
|
||||
imageScanSummary.Images = append(imageScanSummary.Images, imageScanData[i].Image)
|
||||
}
|
||||
|
||||
CVEs := extractCVEs(imageScanData[i].Matches)
|
||||
CVEs := extractCVEs(imageScanData[i].Matches, imageScanData[i].Image)
|
||||
imageScanSummary.CVEs = append(imageScanSummary.CVEs, CVEs...)
|
||||
|
||||
setPkgNameToScoreMap(imageScanData[i].Matches, imageScanSummary.PackageScores)
|
||||
@@ -157,12 +152,13 @@ func (pp *PrettyPrinter) printOverview(opaSessionObj *cautils.OPASessionObj, pri
|
||||
}
|
||||
|
||||
func (pp *PrettyPrinter) printHeader(opaSessionObj *cautils.OPASessionObj) {
|
||||
if pp.scanType == cautils.ScanTypeCluster {
|
||||
switch pp.scanType {
|
||||
case cautils.ScanTypeCluster:
|
||||
cautils.InfoDisplay(pp.writer, fmt.Sprintf("\nSecurity posture overview for cluster: '%s'\n\n", pp.clusterName))
|
||||
cautils.SimpleDisplay(pp.writer, "In this overview, Kubescape shows you a summary of your cluster security posture, including the number of users who can perform administrative actions. For each result greater than 0, you should evaluate its need, and then define an exception to allow it. This baseline can be used to detect drift in future.\n\n")
|
||||
} else if pp.scanType == cautils.ScanTypeRepo {
|
||||
case cautils.ScanTypeRepo:
|
||||
cautils.InfoDisplay(pp.writer, fmt.Sprintf("\nSecurity posture overview for repo: '%s'\n\n", strings.Join(pp.inputPatterns, ", ")))
|
||||
} else if pp.scanType == cautils.ScanTypeWorkload {
|
||||
case cautils.ScanTypeWorkload:
|
||||
cautils.InfoDisplay(pp.writer, "Workload security posture overview for:\n")
|
||||
ns := opaSessionObj.SingleResourceScan.GetNamespace()
|
||||
var rows []table.Row
|
||||
|
||||
@@ -14,20 +14,30 @@ import (
|
||||
type ClusterPrinter struct {
|
||||
writer *os.File
|
||||
categoriesTablePrinter configurationprinter.TablePrinter
|
||||
imageTablePrinter imageprinter.TablePrinter
|
||||
verboseMode bool
|
||||
}
|
||||
|
||||
func NewClusterPrinter(writer *os.File) *ClusterPrinter {
|
||||
func NewClusterPrinter(writer *os.File, verboseMode bool) *ClusterPrinter {
|
||||
return &ClusterPrinter{
|
||||
writer: writer,
|
||||
categoriesTablePrinter: configurationprinter.NewClusterPrinter(),
|
||||
imageTablePrinter: imageprinter.NewTableWriter(),
|
||||
verboseMode: verboseMode,
|
||||
}
|
||||
}
|
||||
|
||||
var _ MainPrinter = &ClusterPrinter{}
|
||||
|
||||
func (cp *ClusterPrinter) PrintImageScanning(summary *imageprinter.ImageScanSummary) {
|
||||
printImageScanningSummary(cp.writer, *summary, false)
|
||||
printImagesCommands(cp.writer, *summary)
|
||||
if cp.verboseMode {
|
||||
cp.imageTablePrinter.PrintImageScanningTable(cp.writer, *summary)
|
||||
cautils.SimpleDisplay(cp.writer, "\n")
|
||||
}
|
||||
printImageScanningSummary(cp.writer, *summary, cp.verboseMode)
|
||||
if !cp.verboseMode {
|
||||
printImagesCommands(cp.writer, *summary)
|
||||
}
|
||||
}
|
||||
|
||||
func (cp *ClusterPrinter) PrintConfigurationsScanning(summaryDetails *reportsummary.SummaryDetails, sortedControlIDs [][]string, topWorkloadsByScore []reporthandling.IResource) {
|
||||
|
||||
@@ -42,17 +42,21 @@ func TestClusterScan_getWorkloadScanCommand(t *testing.T) {
|
||||
|
||||
func TestNewClusterPrinter(t *testing.T) {
|
||||
// Test case 1: Valid writer
|
||||
cp := NewClusterPrinter(os.Stdout)
|
||||
cp := NewClusterPrinter(os.Stdout, false)
|
||||
assert.NotNil(t, cp)
|
||||
assert.Equal(t, os.Stdout, cp.writer)
|
||||
assert.NotNil(t, cp.categoriesTablePrinter)
|
||||
assert.NotNil(t, cp.imageTablePrinter)
|
||||
assert.False(t, cp.verboseMode)
|
||||
|
||||
// Test case 2: Nil writer
|
||||
var writer *os.File
|
||||
cp = NewClusterPrinter(writer)
|
||||
cp = NewClusterPrinter(writer, true)
|
||||
assert.NotNil(t, cp)
|
||||
assert.Nil(t, cp.writer)
|
||||
assert.NotNil(t, cp.categoriesTablePrinter)
|
||||
assert.NotNil(t, cp.imageTablePrinter)
|
||||
assert.True(t, cp.verboseMode)
|
||||
}
|
||||
|
||||
func TestPrintNextSteps(t *testing.T) {
|
||||
@@ -63,7 +67,7 @@ func TestPrintNextSteps(t *testing.T) {
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
cp := NewClusterPrinter(f)
|
||||
cp := NewClusterPrinter(f, false)
|
||||
|
||||
// Redirect stderr to the temporary file
|
||||
oldStderr := os.Stderr
|
||||
@@ -88,7 +92,7 @@ func TestPrintNextSteps(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestGetWorkloadScanCommand(t *testing.T) {
|
||||
cp := NewClusterPrinter(os.Stdout)
|
||||
cp := NewClusterPrinter(os.Stdout, false)
|
||||
assert.NotNil(t, cp)
|
||||
assert.Equal(t, os.Stdout, cp.writer)
|
||||
assert.NotNil(t, cp.categoriesTablePrinter)
|
||||
|
||||
@@ -3,6 +3,7 @@ package prettyprinter
|
||||
import (
|
||||
"os"
|
||||
|
||||
"github.com/kubescape/kubescape/v3/core/cautils"
|
||||
"github.com/kubescape/kubescape/v3/core/pkg/resultshandling/printer/v2/prettyprinter/tableprinter/configurationprinter"
|
||||
"github.com/kubescape/kubescape/v3/core/pkg/resultshandling/printer/v2/prettyprinter/tableprinter/imageprinter"
|
||||
"github.com/kubescape/opa-utils/reporthandling"
|
||||
@@ -15,6 +16,7 @@ type SummaryPrinter struct {
|
||||
writer *os.File
|
||||
verboseMode bool
|
||||
summaryTablePrinter configurationprinter.TablePrinter
|
||||
imageTablePrinter imageprinter.TablePrinter
|
||||
}
|
||||
|
||||
func NewSummaryPrinter(writer *os.File, verboseMode bool) *SummaryPrinter {
|
||||
@@ -22,12 +24,21 @@ func NewSummaryPrinter(writer *os.File, verboseMode bool) *SummaryPrinter {
|
||||
writer: writer,
|
||||
verboseMode: verboseMode,
|
||||
summaryTablePrinter: configurationprinter.NewFrameworkPrinter(verboseMode),
|
||||
imageTablePrinter: imageprinter.NewTableWriter(),
|
||||
}
|
||||
}
|
||||
|
||||
var _ MainPrinter = &RepoPrinter{}
|
||||
|
||||
func (sp *SummaryPrinter) PrintImageScanning(*imageprinter.ImageScanSummary) {}
|
||||
func (sp *SummaryPrinter) PrintImageScanning(summary *imageprinter.ImageScanSummary) {
|
||||
if sp.verboseMode {
|
||||
sp.imageTablePrinter.PrintImageScanningTable(sp.writer, *summary)
|
||||
cautils.SimpleDisplay(sp.writer, "\n")
|
||||
}
|
||||
printImageScanningSummary(sp.writer, *summary, sp.verboseMode)
|
||||
if !sp.verboseMode {
|
||||
printImagesCommands(sp.writer, *summary)
|
||||
}
|
||||
printTopComponents(sp.writer, *summary)
|
||||
}
|
||||
|
||||
func (sp *SummaryPrinter) PrintNextSteps() {}
|
||||
|
||||
|
||||
@@ -15,6 +15,7 @@ func TestNewSummaryPrinter(t *testing.T) {
|
||||
assert.Equal(t, os.Stdout, printer.writer)
|
||||
assert.Equal(t, verbose, printer.verboseMode)
|
||||
assert.NotNil(t, printer.summaryTablePrinter)
|
||||
assert.NotNil(t, printer.imageTablePrinter)
|
||||
|
||||
// Test case 2: Valid writer and non-verbose mode
|
||||
verbose = false
|
||||
@@ -23,6 +24,7 @@ func TestNewSummaryPrinter(t *testing.T) {
|
||||
assert.Equal(t, os.Stdout, printer.writer)
|
||||
assert.Equal(t, verbose, printer.verboseMode)
|
||||
assert.NotNil(t, printer.summaryTablePrinter)
|
||||
assert.NotNil(t, printer.imageTablePrinter)
|
||||
|
||||
// Test case 3: Nil writer and verbose mode
|
||||
var writer *os.File
|
||||
@@ -32,6 +34,7 @@ func TestNewSummaryPrinter(t *testing.T) {
|
||||
assert.Nil(t, printer.writer)
|
||||
assert.Equal(t, verbose, printer.verboseMode)
|
||||
assert.NotNil(t, printer.summaryTablePrinter)
|
||||
assert.NotNil(t, printer.imageTablePrinter)
|
||||
|
||||
// Test case 4: Nil writer and non-verbose mode
|
||||
verbose = false
|
||||
@@ -40,6 +43,7 @@ func TestNewSummaryPrinter(t *testing.T) {
|
||||
assert.Nil(t, printer.writer)
|
||||
assert.Equal(t, verbose, printer.verboseMode)
|
||||
assert.NotNil(t, printer.summaryTablePrinter)
|
||||
assert.NotNil(t, printer.imageTablePrinter)
|
||||
}
|
||||
|
||||
func TestGetVerboseMode(t *testing.T) {
|
||||
|
||||
@@ -15,20 +15,30 @@ import (
|
||||
type RepoPrinter struct {
|
||||
writer *os.File
|
||||
categoriesTablePrinter configurationprinter.TablePrinter
|
||||
imageTablePrinter imageprinter.TablePrinter
|
||||
verboseMode bool
|
||||
}
|
||||
|
||||
func NewRepoPrinter(writer *os.File, inputPatterns []string) *RepoPrinter {
|
||||
func NewRepoPrinter(writer *os.File, inputPatterns []string, verboseMode bool) *RepoPrinter {
|
||||
return &RepoPrinter{
|
||||
writer: writer,
|
||||
categoriesTablePrinter: configurationprinter.NewRepoPrinter(inputPatterns),
|
||||
imageTablePrinter: imageprinter.NewTableWriter(),
|
||||
verboseMode: verboseMode,
|
||||
}
|
||||
}
|
||||
|
||||
var _ MainPrinter = &RepoPrinter{}
|
||||
|
||||
func (rp *RepoPrinter) PrintImageScanning(summary *imageprinter.ImageScanSummary) {
|
||||
printImageScanningSummary(rp.writer, *summary, false)
|
||||
printImagesCommands(rp.writer, *summary)
|
||||
if rp.verboseMode {
|
||||
rp.imageTablePrinter.PrintImageScanningTable(rp.writer, *summary)
|
||||
cautils.SimpleDisplay(rp.writer, "\n")
|
||||
}
|
||||
printImageScanningSummary(rp.writer, *summary, rp.verboseMode)
|
||||
if !rp.verboseMode {
|
||||
printImagesCommands(rp.writer, *summary)
|
||||
}
|
||||
printTopComponents(rp.writer, *summary)
|
||||
}
|
||||
|
||||
|
||||
@@ -67,11 +67,11 @@ func generateCategoryStatusRow(controlSummary reportsummary.IControlSummary) tab
|
||||
|
||||
rows[0] = utils.GetStatusIcon(controlSummary.GetStatus().Status())
|
||||
|
||||
rows[1] = controlSummary.GetName()
|
||||
if len(controlSummary.GetName()) > 50 {
|
||||
rows[1] = controlSummary.GetName()[:50] + "..."
|
||||
name := controlSummary.GetName()
|
||||
if len(name) > 50 {
|
||||
rows[1] = name[:50] + "..." //nolint:gosec // Safe: rows has length 3, accessing index 1
|
||||
} else {
|
||||
rows[1] = controlSummary.GetName()
|
||||
rows[1] = name //nolint:gosec // Safe: rows has length 3, accessing index 1
|
||||
}
|
||||
|
||||
rows[2] = getDocsForControl(controlSummary)
|
||||
|
||||
@@ -8,7 +8,6 @@ import (
|
||||
"github.com/jedib0t/go-pretty/v6/table"
|
||||
"github.com/jwalton/gchalk"
|
||||
"github.com/kubescape/kubescape/v3/core/pkg/resultshandling/printer/v2/prettyprinter/tableprinter/utils"
|
||||
"github.com/kubescape/opa-utils/reporthandling"
|
||||
"github.com/kubescape/opa-utils/reporthandling/apis"
|
||||
"github.com/kubescape/opa-utils/reporthandling/results/v1/reportsummary"
|
||||
)
|
||||
@@ -102,19 +101,6 @@ func (rp *RepoPrinter) generateCountingCategoryRow(controlSummary reportsummary.
|
||||
return rows
|
||||
}
|
||||
|
||||
func (rp *RepoPrinter) getWorkloadScanCommand(ns, kind, name string, source reporthandling.Source) string {
|
||||
cmd := fmt.Sprintf("$ kubescape scan workload %s/%s/%s", ns, kind, name)
|
||||
if ns == "" {
|
||||
cmd = fmt.Sprintf("$ kubescape scan workload %s/%s", kind, name)
|
||||
}
|
||||
if source.FileType == "Helm" {
|
||||
return fmt.Sprintf("%s --chart-path=%s", cmd, source.RelativePath)
|
||||
|
||||
} else {
|
||||
return fmt.Sprintf("%s --file-path=%s", cmd, source.RelativePath)
|
||||
}
|
||||
}
|
||||
|
||||
func (rp *RepoPrinter) generateTableNextSteps(controlSummary reportsummary.IControlSummary, inputPatterns []string) string {
|
||||
return fmt.Sprintf("$ kubescape scan control %s %s -v", controlSummary.GetID(), strings.Join(inputPatterns, ","))
|
||||
}
|
||||
|
||||
@@ -72,9 +72,9 @@ func GenerateRow(controlSummary reportsummary.IControlSummary, infoToPrintInfo [
|
||||
|
||||
row[summaryColumnSeverity] = GetSeverityColumn(controlSummary)
|
||||
if len(controlSummary.GetName()) > 50 {
|
||||
row[summaryColumnName] = controlSummary.GetName()[:50] + "..."
|
||||
row[summaryColumnName] = controlSummary.GetName()[:50] + "..." //nolint:gosec // Safe: row has length _summaryRowLen (5), accessing index 1
|
||||
} else {
|
||||
row[summaryColumnName] = controlSummary.GetName()
|
||||
row[summaryColumnName] = controlSummary.GetName() //nolint:gosec // Safe: row has length _summaryRowLen (5), accessing index 1
|
||||
}
|
||||
row[summaryColumnCounterFailed] = fmt.Sprintf("%d", controlSummary.NumberOfResources().Failed())
|
||||
row[summaryColumnCounterAll] = fmt.Sprintf("%d", controlSummary.NumberOfResources().All())
|
||||
|
||||
@@ -19,6 +19,7 @@ type CVE struct {
|
||||
Version string
|
||||
FixVersions []string
|
||||
FixedState string
|
||||
Image string
|
||||
}
|
||||
|
||||
type PackageScore struct {
|
||||
|
||||
@@ -10,6 +10,7 @@ const (
|
||||
imageColumnComponent = iota
|
||||
imageColumnVersion = iota
|
||||
imageColumnFixedIn = iota
|
||||
imageColumnImage = iota
|
||||
)
|
||||
|
||||
type TableWriter struct {
|
||||
|
||||
@@ -25,6 +25,7 @@ func TestPrintImageScanningTable(t *testing.T) {
|
||||
Package: "package1",
|
||||
Version: "1.0.0",
|
||||
FixedState: string(v5.NotFixedState),
|
||||
Image: "nginx:latest",
|
||||
},
|
||||
{
|
||||
ID: "CVE-2020-0002",
|
||||
@@ -32,6 +33,7 @@ func TestPrintImageScanningTable(t *testing.T) {
|
||||
Package: "package2",
|
||||
Version: "1.0.0",
|
||||
FixedState: string(v5.NotFixedState),
|
||||
Image: "alpine:3.18",
|
||||
},
|
||||
{
|
||||
ID: "CVE-2020-0003",
|
||||
@@ -39,10 +41,11 @@ func TestPrintImageScanningTable(t *testing.T) {
|
||||
Package: "package3",
|
||||
Version: "1.0.0",
|
||||
FixedState: string(v5.NotFixedState),
|
||||
Image: "ubuntu:22.04",
|
||||
},
|
||||
},
|
||||
},
|
||||
want: "╭──────────┬───────────────┬───────────┬─────────┬──────────╮\n│ Severity │ Vulnerability │ Component │ Version │ Fixed in │\n├──────────┼───────────────┼───────────┼─────────┼──────────┤\n│ High │ CVE-2020-0002 │ package2 │ 1.0.0 │ │\n│ Medium │ CVE-2020-0003 │ package3 │ 1.0.0 │ │\n│ Low │ CVE-2020-0001 │ package1 │ 1.0.0 │ │\n╰──────────┴───────────────┴───────────┴─────────┴──────────╯\n",
|
||||
want: "╭──────────┬───────────────┬───────────┬─────────┬──────────┬──────────────╮\n│ Severity │ Vulnerability │ Component │ Version │ Fixed in │ Image │\n├──────────┼───────────────┼───────────┼─────────┼──────────┼──────────────┤\n│ High │ CVE-2020-0002 │ package2 │ 1.0.0 │ │ alpine:3.18 │\n│ Medium │ CVE-2020-0003 │ package3 │ 1.0.0 │ │ ubuntu:22.04 │\n│ Low │ CVE-2020-0001 │ package1 │ 1.0.0 │ │ nginx:latest │\n╰──────────┴───────────────┴───────────┴─────────┴──────────┴──────────────╯\n",
|
||||
},
|
||||
{
|
||||
name: "check fixed CVEs show versions",
|
||||
@@ -54,6 +57,7 @@ func TestPrintImageScanningTable(t *testing.T) {
|
||||
Package: "package1",
|
||||
Version: "1.0.0",
|
||||
FixedState: string(v5.NotFixedState),
|
||||
Image: "test:latest",
|
||||
},
|
||||
{
|
||||
ID: "CVE-2020-0002",
|
||||
@@ -62,10 +66,11 @@ func TestPrintImageScanningTable(t *testing.T) {
|
||||
Version: "1.0.0",
|
||||
FixVersions: []string{"v1", "v2"},
|
||||
FixedState: string(v5.FixedState),
|
||||
Image: "golang:1.24",
|
||||
},
|
||||
},
|
||||
},
|
||||
want: "╭──────────┬───────────────┬───────────┬─────────┬──────────╮\n│ Severity │ Vulnerability │ Component │ Version │ Fixed in │\n├──────────┼───────────────┼───────────┼─────────┼──────────┤\n│ High │ CVE-2020-0002 │ package2 │ 1.0.0 │ v1,v2 │\n│ Low │ CVE-2020-0001 │ package1 │ 1.0.0 │ │\n╰──────────┴───────────────┴───────────┴─────────┴──────────╯\n",
|
||||
want: "╭──────────┬───────────────┬───────────┬─────────┬──────────┬─────────────╮\n│ Severity │ Vulnerability │ Component │ Version │ Fixed in │ Image │\n├──────────┼───────────────┼───────────┼─────────┼──────────┼─────────────┤\n│ High │ CVE-2020-0002 │ package2 │ 1.0.0 │ v1,v2 │ golang:1.24 │\n│ Low │ CVE-2020-0001 │ package1 │ 1.0.0 │ │ test:latest │\n╰──────────┴───────────────┴───────────┴─────────┴──────────┴─────────────╯\n",
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user