mirror of
https://github.com/kubescape/kubescape.git
synced 2026-02-14 18:09:55 +00:00
Compare commits
27 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
b167435c4d | ||
|
|
9b29321a53 | ||
|
|
466a11fa1c | ||
|
|
cfe022ff1d | ||
|
|
e0eeb691e6 | ||
|
|
dc65bd4ccc | ||
|
|
02790da144 | ||
|
|
b97f50ffb5 | ||
|
|
0841d1d483 | ||
|
|
fbef268f22 | ||
|
|
427dccadd3 | ||
|
|
01bb19bf6e | ||
|
|
c0d4bb45eb | ||
|
|
222c1ec866 | ||
|
|
dc49218c7c | ||
|
|
3b4585a827 | ||
|
|
7f79bc2d1d | ||
|
|
3623e55433 | ||
|
|
2f7841b5a2 | ||
|
|
f70d81d7c4 | ||
|
|
bd49251234 | ||
|
|
57addd493f | ||
|
|
8f009d4698 | ||
|
|
7c0e38072d | ||
|
|
aa9a610c4c | ||
|
|
25bd51e8b4 | ||
|
|
2759beece5 |
258
.github/workflows/00-pr-scanner.yaml
vendored
258
.github/workflows/00-pr-scanner.yaml
vendored
@@ -50,7 +50,6 @@ jobs:
|
||||
name: secret-validator
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
TEST_NAMES: ${{ steps.export_tests_to_env.outputs.TEST_NAMES }}
|
||||
is-secret-set: ${{ steps.check-secret-set.outputs.is-secret-set }}
|
||||
|
||||
steps:
|
||||
@@ -66,33 +65,8 @@ jobs:
|
||||
REGISTRY_PASSWORD: ${{ secrets.REGISTRY_PASSWORD }}
|
||||
run: "echo \"is-secret-set=${{ env.CUSTOMER != '' && env.USERNAME != '' && env.PASSWORD != '' && env.CLIENT_ID != '' && env.SECRET_KEY != '' && env.REGISTRY_USERNAME != '' && env.REGISTRY_PASSWORD != '' }}\" >> $GITHUB_OUTPUT\n"
|
||||
|
||||
- id: export_tests_to_env
|
||||
name: set test name
|
||||
run: |
|
||||
echo "TEST_NAMES=$input" >> $GITHUB_OUTPUT
|
||||
env:
|
||||
input: '[
|
||||
"scan_nsa",
|
||||
"scan_mitre",
|
||||
"scan_with_exceptions",
|
||||
"scan_repository",
|
||||
"scan_local_file",
|
||||
"scan_local_glob_files",
|
||||
"scan_local_list_of_files",
|
||||
"scan_git_repository_and_submit_to_backend",
|
||||
"scan_and_submit_to_backend",
|
||||
"scan_customer_configuration",
|
||||
"scan_compliance_score",
|
||||
"scan_custom_framework_scanning_file_scope_testing",
|
||||
"scan_custom_framework_scanning_cluster_scope_testing",
|
||||
"scan_custom_framework_scanning_cluster_and_file_scope_testing"
|
||||
]'
|
||||
|
||||
run-system-tests:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
TEST: ${{ fromJson(needs.wf-preparation.outputs.TEST_NAMES) }}
|
||||
needs: [wf-preparation, pr-scanner]
|
||||
if: ${{ (needs.wf-preparation.outputs.is-secret-set == 'true') && (always() && (contains(needs.*.result, 'success') || contains(needs.*.result, 'skipped')) && !(contains(needs.*.result, 'failure')) && !(contains(needs.*.result, 'cancelled'))) }}
|
||||
runs-on: ubuntu-latest
|
||||
@@ -101,89 +75,169 @@ jobs:
|
||||
contents: read
|
||||
pull-requests: write
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
submodules: recursive
|
||||
|
||||
- uses: actions/setup-go@v4
|
||||
name: Installing go
|
||||
with:
|
||||
go-version: "1.25"
|
||||
|
||||
- uses: anchore/sbom-action/download-syft@v0
|
||||
name: Setup Syft
|
||||
|
||||
- uses: goreleaser/goreleaser-action@v6
|
||||
name: Build
|
||||
with:
|
||||
distribution: goreleaser
|
||||
version: latest
|
||||
args: build --clean --snapshot --single-target
|
||||
env:
|
||||
RELEASE: ""
|
||||
CLIENT: test
|
||||
CGO_ENABLED: 0
|
||||
|
||||
- name: chmod +x
|
||||
run: chmod +x -R ${PWD}/dist/cli_linux_amd64_v1/kubescape
|
||||
|
||||
- name: Checkout systests repo
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
repository: armosec/system-tests
|
||||
path: system-tests
|
||||
|
||||
- uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: "3.9"
|
||||
cache: "pip"
|
||||
|
||||
- name: create env
|
||||
run: ./create_env.sh
|
||||
working-directory: system-tests
|
||||
|
||||
- name: Generate uuid
|
||||
id: uuid
|
||||
- name: Set dispatch info
|
||||
id: dispatch-info
|
||||
run: |
|
||||
echo "RANDOM_UUID=$(uuidgen)" >> $GITHUB_OUTPUT
|
||||
# Correlation ID WITHOUT attempt - so re-runs can find the original run
|
||||
CORRELATION_ID="${GITHUB_REPOSITORY##*/}-${{ github.run_id }}"
|
||||
echo "correlation_id=${CORRELATION_ID}" >> "$GITHUB_OUTPUT"
|
||||
echo "Correlation ID: ${CORRELATION_ID}, Attempt: ${{ github.run_attempt }}"
|
||||
|
||||
- name: Create k8s Kind Cluster
|
||||
id: kind-cluster-install
|
||||
uses: helm/kind-action@v1.10.0
|
||||
- name: Generate GitHub App token
|
||||
id: app-token
|
||||
uses: actions/create-github-app-token@v1
|
||||
with:
|
||||
cluster_name: ${{ steps.uuid.outputs.RANDOM_UUID }}
|
||||
app-id: ${{ secrets.E2E_DISPATCH_APP_ID }}
|
||||
private-key: ${{ secrets.E2E_DISPATCH_APP_PRIVATE_KEY }}
|
||||
owner: armosec
|
||||
repositories: shared-workflows
|
||||
|
||||
- name: run-tests-on-local-built-kubescape
|
||||
- name: Dispatch system tests to private repo
|
||||
if: ${{ github.run_attempt == 1 }}
|
||||
env:
|
||||
CUSTOMER: ${{ secrets.CUSTOMER }}
|
||||
USERNAME: ${{ secrets.USERNAME }}
|
||||
PASSWORD: ${{ secrets.PASSWORD }}
|
||||
CLIENT_ID: ${{ secrets.CLIENT_ID_PROD }}
|
||||
SECRET_KEY: ${{ secrets.SECRET_KEY_PROD }}
|
||||
REGISTRY_USERNAME: ${{ secrets.REGISTRY_USERNAME }}
|
||||
REGISTRY_PASSWORD: ${{ secrets.REGISTRY_PASSWORD }}
|
||||
working-directory: system-tests
|
||||
GH_TOKEN: ${{ steps.app-token.outputs.token }}
|
||||
CORRELATION_ID: ${{ steps.dispatch-info.outputs.correlation_id }}
|
||||
KS_BRANCH: ${{ github.head_ref || github.ref_name }}
|
||||
run: |
|
||||
echo "Test history:"
|
||||
echo " ${{ matrix.TEST }} " >/tmp/testhistory
|
||||
cat /tmp/testhistory
|
||||
source systests_python_env/bin/activate
|
||||
echo "Dispatching E2E tests with correlation_id: ${CORRELATION_ID}"
|
||||
echo "Using test group: KUBESCAPE_CLI_E2E"
|
||||
|
||||
python3 systest-cli.py \
|
||||
-t ${{ matrix.TEST }} \
|
||||
-b production \
|
||||
-c CyberArmorTests \
|
||||
--duration 3 \
|
||||
--logger DEBUG \
|
||||
--kwargs kubescape=${GITHUB_WORKSPACE}/dist/cli_linux_amd64_v1/kubescape
|
||||
gh api "repos/armosec/shared-workflows/dispatches" \
|
||||
-f event_type="e2e-test-trigger" \
|
||||
-f "client_payload[correlation_id]=${CORRELATION_ID}" \
|
||||
-f "client_payload[github_repository]=${GITHUB_REPOSITORY}" \
|
||||
-f "client_payload[environment]=production" \
|
||||
-f "client_payload[tests_groups]=KUBESCAPE_CLI_E2E" \
|
||||
-f "client_payload[systests_branch]=master" \
|
||||
-f "client_payload[ks_branch]=${KS_BRANCH}"
|
||||
|
||||
deactivate
|
||||
echo "Dispatch completed"
|
||||
|
||||
- name: Test Report
|
||||
uses: mikepenz/action-junit-report@v5
|
||||
if: always()
|
||||
- name: Find E2E workflow run
|
||||
id: find-run
|
||||
env:
|
||||
GH_TOKEN: ${{ steps.app-token.outputs.token }}
|
||||
CORRELATION_ID: ${{ steps.dispatch-info.outputs.correlation_id }}
|
||||
run: |
|
||||
for i in {1..15}; do
|
||||
run_id=$(gh api "repos/armosec/shared-workflows/actions/runs?event=repository_dispatch&per_page=30" \
|
||||
--jq '.workflow_runs | map(select(.name | contains("'"$CORRELATION_ID"'"))) | first | .id // empty')
|
||||
|
||||
if [ -n "$run_id" ]; then
|
||||
echo "run_id=${run_id}" >> "$GITHUB_OUTPUT"
|
||||
gh api "repos/armosec/shared-workflows/actions/runs/${run_id}" --jq '"url=" + .html_url' >> "$GITHUB_OUTPUT"
|
||||
exit 0
|
||||
fi
|
||||
echo "Attempt $i: waiting for run..."
|
||||
sleep $((i < 5 ? 10 : 30))
|
||||
done
|
||||
echo "::error::Could not find workflow run"
|
||||
exit 1
|
||||
|
||||
- name: Re-run failed jobs in private repo
|
||||
id: rerun
|
||||
if: ${{ github.run_attempt > 1 }}
|
||||
env:
|
||||
GH_TOKEN: ${{ steps.app-token.outputs.token }}
|
||||
RUN_ID: ${{ steps.find-run.outputs.run_id }}
|
||||
run: |
|
||||
conclusion=$(gh api "repos/armosec/shared-workflows/actions/runs/${RUN_ID}" --jq '.conclusion')
|
||||
echo "Previous conclusion: $conclusion"
|
||||
|
||||
if [ "$conclusion" = "success" ]; then
|
||||
echo "Previous run passed. Nothing to re-run."
|
||||
echo "skip=true" >> "$GITHUB_OUTPUT"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Full rerun if cancelled, partial if failed
|
||||
if [ "$conclusion" = "cancelled" ]; then
|
||||
echo "Run was cancelled - triggering full re-run"
|
||||
gh api --method POST "repos/armosec/shared-workflows/actions/runs/${RUN_ID}/rerun"
|
||||
else
|
||||
echo "Re-running failed jobs only"
|
||||
gh api --method POST "repos/armosec/shared-workflows/actions/runs/${RUN_ID}/rerun-failed-jobs"
|
||||
fi
|
||||
|
||||
# Wait for status to flip from 'completed'
|
||||
for i in {1..30}; do
|
||||
[ "$(gh api "repos/armosec/shared-workflows/actions/runs/${RUN_ID}" --jq '.status')" != "completed" ] && break
|
||||
sleep 2
|
||||
done
|
||||
|
||||
- name: Wait for E2E tests to complete
|
||||
if: ${{ steps.rerun.outputs.skip != 'true' }}
|
||||
env:
|
||||
GH_TOKEN: ${{ steps.app-token.outputs.token }}
|
||||
RUN_ID: ${{ steps.find-run.outputs.run_id }}
|
||||
URL: ${{ steps.find-run.outputs.url }}
|
||||
run: |
|
||||
echo "Monitoring: ${URL}"
|
||||
|
||||
for i in {1..60}; do # 60 iterations × 60s = 1 hour max
|
||||
read status conclusion < <(gh api "repos/armosec/shared-workflows/actions/runs/${RUN_ID}" \
|
||||
--jq '[.status, .conclusion // "null"] | @tsv')
|
||||
|
||||
echo "Status: ${status} | Conclusion: ${conclusion}"
|
||||
|
||||
if [ "$status" = "completed" ]; then
|
||||
if [ "$conclusion" = "success" ]; then
|
||||
echo "E2E tests passed!"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
echo "::error::E2E tests failed: ${conclusion}"
|
||||
echo ""
|
||||
|
||||
# Get failed job IDs to a file first
|
||||
gh api "repos/armosec/shared-workflows/actions/runs/${RUN_ID}/jobs" \
|
||||
--jq '.jobs[] | select(.conclusion == "failure") | [.id, .name, (.steps[] | select(.conclusion == "failure") | .name)] | @tsv' > /tmp/failed_jobs.txt
|
||||
|
||||
# Process each failed job
|
||||
while IFS=$'\t' read -r job_id job_name step_name; do
|
||||
# Extract test name: "run-helm-e2e / ST (relevancy_python)" → "relevancy_python"
|
||||
test_name=$(echo "$job_name" | sed 's/.*(\(.*\))/\1/')
|
||||
|
||||
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
|
||||
echo "${job_name}"
|
||||
echo " Step: ${step_name}"
|
||||
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
|
||||
|
||||
# Fetch logs to temp file
|
||||
gh api "repos/armosec/shared-workflows/actions/jobs/${job_id}/logs" 2>/dev/null > /tmp/job_logs.txt
|
||||
|
||||
# Show summary in console
|
||||
grep -E "(ERROR|FAILURE)" /tmp/job_logs.txt | tail -10
|
||||
echo ""
|
||||
|
||||
# Save to separate file per test
|
||||
log_file="failed_${test_name}.txt"
|
||||
echo "════════════════════════════════════════" > "$log_file"
|
||||
echo "${job_name}" >> "$log_file"
|
||||
echo " Step: ${step_name}" >> "$log_file"
|
||||
echo "════════════════════════════════════════" >> "$log_file"
|
||||
last_endgroup=$(grep -n "##\\[endgroup\\]" /tmp/job_logs.txt | tail -1 | cut -d: -f1)
|
||||
if [ -n "$last_endgroup" ]; then
|
||||
tail -n +$((last_endgroup + 1)) /tmp/job_logs.txt >> "$log_file"
|
||||
else
|
||||
tail -500 /tmp/job_logs.txt >> "$log_file"
|
||||
fi
|
||||
done < /tmp/failed_jobs.txt
|
||||
|
||||
echo "View full logs: ${URL}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
sleep 60
|
||||
done
|
||||
|
||||
echo "::error::Timeout waiting for tests"
|
||||
exit 1
|
||||
|
||||
- name: Upload failed step logs
|
||||
if: failure()
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
report_paths: "system-tests/**/results_xml_format/**.xml"
|
||||
commit: ${{github.event.workflow_run.head_sha}}
|
||||
name: failed-e2e-logs-attempt-${{ github.run_attempt }}
|
||||
path: failed_*.txt
|
||||
retention-days: 7
|
||||
|
||||
6
.github/workflows/02-release.yaml
vendored
6
.github/workflows/02-release.yaml
vendored
@@ -103,6 +103,12 @@ jobs:
|
||||
REGISTRY_USERNAME: ${{ secrets.REGISTRY_USERNAME }}
|
||||
REGISTRY_PASSWORD: ${{ secrets.REGISTRY_PASSWORD }}
|
||||
|
||||
- name: Update new version in krew-index
|
||||
if: github.event_name != 'workflow_dispatch' || inputs.skip_publish != true
|
||||
uses: rajatjindal/krew-release-bot@v0.0.47
|
||||
with:
|
||||
krew_template_file: .krew.yaml
|
||||
|
||||
- name: List collected system-test results (debug)
|
||||
if: always()
|
||||
run: |
|
||||
|
||||
@@ -18,6 +18,7 @@ archives:
|
||||
- id: cli
|
||||
ids:
|
||||
- cli
|
||||
|
||||
formats:
|
||||
- binary
|
||||
- tar.gz
|
||||
@@ -35,8 +36,10 @@ builds:
|
||||
- amd64
|
||||
- arm64
|
||||
ldflags:
|
||||
- -s -w
|
||||
- -X "github.com/kubescape/kubescape/v3/core/cautils.Client={{.Env.CLIENT}}"
|
||||
- -X main.version={{.Version}}
|
||||
- -X main.commit={{.Commit}}
|
||||
- -X main.date={{.Date}}
|
||||
- -X github.com/kubescape/backend/pkg/versioncheck.Client={{.Env.CLIENT}}
|
||||
hooks:
|
||||
post:
|
||||
- cmd: >
|
||||
|
||||
60
.krew.yaml
Normal file
60
.krew.yaml
Normal file
@@ -0,0 +1,60 @@
|
||||
apiVersion: krew.googlecontainertools.github.com/v1alpha2
|
||||
kind: Plugin
|
||||
metadata:
|
||||
name: kubescape
|
||||
spec:
|
||||
version: {{ .TagName }}
|
||||
platforms:
|
||||
- selector:
|
||||
matchLabels:
|
||||
os: linux
|
||||
arch: amd64
|
||||
{{ addURIAndSha "https://github.com/kubescape/kubescape/releases/download/" .TagName (printf "kubescape_%s_linux_amd64.tar.gz" .TagName) .TagName }}
|
||||
bin: kubescape
|
||||
- selector:
|
||||
matchLabels:
|
||||
os: linux
|
||||
arch: arm64
|
||||
{{ addURIAndSha "https://github.com/kubescape/kubescape/releases/download/" .TagName (printf "kubescape_%s_linux_arm64.tar.gz" .TagName) .TagName }}
|
||||
bin: kubescape
|
||||
- selector:
|
||||
matchLabels:
|
||||
os: darwin
|
||||
arch: amd64
|
||||
{{ addURIAndSha "https://github.com/kubescape/kubescape/releases/download/" .TagName (printf "kubescape_%s_darwin_amd64.tar.gz" .TagName) .TagName }}
|
||||
bin: kubescape
|
||||
- selector:
|
||||
matchLabels:
|
||||
os: darwin
|
||||
arch: arm64
|
||||
{{ addURIAndSha "https://github.com/kubescape/kubescape/releases/download/" .TagName (printf "kubescape_%s_darwin_arm64.tar.gz" .TagName) .TagName }}
|
||||
bin: kubescape
|
||||
- selector:
|
||||
matchLabels:
|
||||
os: windows
|
||||
arch: amd64
|
||||
{{ addURIAndSha "https://github.com/kubescape/kubescape/releases/download/" .TagName (printf "kubescape_%s_windows_amd64.tar.gz" .TagName) .TagName }}
|
||||
bin: kubescape.exe
|
||||
- selector:
|
||||
matchLabels:
|
||||
os: windows
|
||||
arch: arm64
|
||||
{{ addURIAndSha "https://github.com/kubescape/kubescape/releases/download/" .TagName (printf "kubescape_%s_windows_arm64.tar.gz" .TagName) .TagName }}
|
||||
bin: kubescape.exe
|
||||
shortDescription: Scan resources and cluster configs against security frameworks.
|
||||
description: |
|
||||
Kubescape is the first tool for testing if Kubernetes is deployed securely
|
||||
according to mitigations and best practices. It includes risk analysis,
|
||||
security compliance, and misconfiguration scanning with an easy-to-use
|
||||
CLI interface, flexible output formats, and automated scanning capabilities.
|
||||
|
||||
Features:
|
||||
- Risk analysis: Identify vulnerabilities and security risks in your cluster
|
||||
- Security compliance: Check your cluster against multiple security frameworks
|
||||
- Misconfiguration scanning: Detect security misconfigurations in your workloads
|
||||
- Flexible output: Results in JSON, SARIF, HTML, JUnit, and Prometheus formats
|
||||
- CI/CD integration: Easily integrate into your CI/CD pipeline
|
||||
homepage: https://kubescape.io/
|
||||
caveats: |
|
||||
Requires kubectl and basic knowledge of Kubernetes.
|
||||
Run 'kubescape scan' to scan your Kubernetes cluster or manifests.
|
||||
273
KREW_RELEASE.md
Normal file
273
KREW_RELEASE.md
Normal file
@@ -0,0 +1,273 @@
|
||||
# Krew Release Automation Guide
|
||||
|
||||
This document explains how kubescape automates publishing to the Kubernetes plugin package manager, krew.
|
||||
|
||||
## What is Krew?
|
||||
|
||||
Krew is a plugin manager for `kubectl`. It allows users to discover and install `kubectl` plugins easily. You can learn more about krew at [https://krew.sigs.k8s.io/](https://krew.sigs.k8s.io/).
|
||||
|
||||
## How kubescape publishes to krew
|
||||
|
||||
We use the [krew-release-bot](https://github.com/rajatjindal/krew-release-bot) to automatically create pull requests to the [kubernetes-sigs/krew-index](https://github.com/kubernetes-sigs/krew-index) repository whenever a new release of kubescape is published.
|
||||
|
||||
### Setup Overview
|
||||
|
||||
The automation consists of three components:
|
||||
|
||||
1. **`.krew.yaml`** - A template file that the bot uses to generate the krew plugin manifest
|
||||
2. **`.github/workflows/02-release.yaml`** - GitHub Actions workflow that runs the krew-release-bot after a successful release
|
||||
3. **`.goreleaser.yaml`** - GoReleaser configuration that defines the krew manifest (though upload is skipped)
|
||||
|
||||
### Why Use krew-release-bot Instead of GoReleaser's Built-in Krew Support?
|
||||
|
||||
You might have noticed that **GoReleaser has built-in krew support** in its `krews` section. However, almost all projects (including stern) use `skip_upload: true` and rely on **krew-release-bot** instead. Here's why:
|
||||
|
||||
#### Problems with GoReleaser's Built-in Krew Publishing
|
||||
|
||||
To use GoReleaser's direct krew publishing, you would need to:
|
||||
|
||||
```yaml
|
||||
krews:
|
||||
- name: kubescape
|
||||
skip_upload: false # Instead of true
|
||||
repository:
|
||||
owner: kubernetes-sigs
|
||||
name: krew-index
|
||||
token: "{{ .Env.KREW_INDEX_TOKEN }}" # Required!
|
||||
pull_request:
|
||||
enabled: true # Requires GoReleaser Pro for cross-repo PRs
|
||||
```
|
||||
|
||||
This approach has several critical issues:
|
||||
|
||||
1. **Permission Barrier**: Almost no one has write access to `kubernetes-sigs/krew-index`. You would need special permissions from the Krew maintainers, which is rarely granted.
|
||||
|
||||
2. **Security Risk**: You'd need to store a GitHub personal access token with write access to the krew-index in your repository secrets. This token could be compromised and used to make unauthorized changes to the krew-index.
|
||||
|
||||
3. **GoReleaser Pro Required**: To create pull requests to a different repository (cross-repository), you need GoReleaser Pro, which is a paid product.
|
||||
|
||||
4. **Manual Work**: Even if you had access, you'd need to manually configure and maintain the repository settings, tokens, and potentially deal with rate limits and authentication issues.
|
||||
|
||||
#### Why krew-release-bot is the Right Solution
|
||||
|
||||
The **krew-release-bot** was created by the Kubernetes community (in collaboration with the Krew team) specifically to solve these problems:
|
||||
|
||||
- **No Repository Access Required**: The bot acts as an intermediary with pre-configured access to krew-index. You don't need write permissions.
|
||||
|
||||
- **No Tokens Needed**: It uses GitHub's `GITHUB_TOKEN` (automatically available in GitHub Actions) via webhooks and events. No personal access tokens required.
|
||||
|
||||
- **Designed for Krew**: It's specifically built for the krew-index workflow and integrates with Krew's automation.
|
||||
|
||||
- **Automatic Merging**: The Krew team has configured their CI to automatically test and merge PRs from krew-release-bot (usually within 5-10 minutes).
|
||||
|
||||
- **Officially Recommended**: The Krew team explicitly recommends this approach in their documentation as the standard way to automate plugin updates.
|
||||
|
||||
- **Free and Open Source**: No paid subscriptions required.
|
||||
|
||||
#### The Real-World Evidence
|
||||
|
||||
Looking at recent pull requests to `kubernetes-sigs/krew-index`, **almost all automated plugin updates are created by krew-release-bot**. You'll see patterns like:
|
||||
|
||||
```
|
||||
Author: krew-release-bot
|
||||
Title: "release new version v0.6.11 of radar"
|
||||
```
|
||||
|
||||
This demonstrates that the entire Kubernetes ecosystem has standardized on krew-release-bot, not GoReleaser's built-in publishing.
|
||||
|
||||
#### Summary
|
||||
|
||||
While GoReleaser's built-in krew support exists in the code, it's **practically unusable for the krew-index repository** due to permission and security constraints. The krew-release-bot is the de facto standard because:
|
||||
- It works without special permissions
|
||||
- It's more secure
|
||||
- It integrates with Krew's automation
|
||||
- It's free and recommended by the Krew team
|
||||
|
||||
This is why we use `skip_upload: true` in GoReleaser and let krew-release-bot handle the actual publishing.
|
||||
|
||||
### The Template File
|
||||
|
||||
The `.krew.yaml` file in the repository root is a Go template that contains placeholders for dynamic values:
|
||||
|
||||
```yaml
|
||||
apiVersion: krew.googlecontainertools.github.com/v1alpha2
|
||||
kind: Plugin
|
||||
metadata:
|
||||
name: kubescape
|
||||
spec:
|
||||
version: {{ .TagName }}
|
||||
platforms:
|
||||
- selector:
|
||||
matchLabels:
|
||||
os: linux
|
||||
arch: amd64
|
||||
{{ $version := trimPrefix "v" .TagName }}{{ addURIAndSha "https://github.com/kubescape/kubescape/releases/download/" .TagName (printf "kubescape_%s_linux_amd64.tar.gz" $version) .TagName }}
|
||||
bin: kubescape
|
||||
- selector:
|
||||
matchLabels:
|
||||
os: linux
|
||||
arch: arm64
|
||||
{{ $version := trimPrefix "v" .TagName }}{{ addURIAndSha "https://github.com/kubescape/kubescape/releases/download/" .TagName (printf "kubescape_%s_linux_arm64.tar.gz" $version) .TagName }}
|
||||
bin: kubescape
|
||||
- selector:
|
||||
matchLabels:
|
||||
os: darwin
|
||||
arch: amd64
|
||||
{{ $version := trimPrefix "v" .TagName }}{{ addURIAndSha "https://github.com/kubescape/kubescape/releases/download/" .TagName (printf "kubescape_%s_darwin_amd64.tar.gz" $version) .TagName }}
|
||||
bin: kubescape
|
||||
- selector:
|
||||
matchLabels:
|
||||
os: darwin
|
||||
arch: arm64
|
||||
{{ $version := trimPrefix "v" .TagName }}{{ addURIAndSha "https://github.com/kubescape/kubescape/releases/download/" .TagName (printf "kubescape_%s_darwin_arm64.tar.gz" $version) .TagName }}
|
||||
bin: kubescape
|
||||
- selector:
|
||||
matchLabels:
|
||||
os: windows
|
||||
arch: amd64
|
||||
{{ $version := trimPrefix "v" .TagName }}{{ addURIAndSha "https://github.com/kubescape/kubescape/releases/download/" .TagName (printf "kubescape_%s_windows_amd64.tar.gz" $version) .TagName }}
|
||||
bin: kubescape.exe
|
||||
- selector:
|
||||
matchLabels:
|
||||
os: windows
|
||||
arch: arm64
|
||||
{{ $version := trimPrefix "v" .TagName }}{{ addURIAndSha "https://github.com/kubescape/kubescape/releases/download/" .TagName (printf "kubescape_%s_windows_arm64.tar.gz" $version) .TagName }}
|
||||
bin: kubescape.exe
|
||||
shortDescription: Scan resources and cluster configs against security frameworks.
|
||||
description: |
|
||||
Kubescape is the first tool for testing if Kubernetes is deployed securely
|
||||
according to mitigations and best practices. It includes risk analysis,
|
||||
security compliance, and misconfiguration scanning with an easy-to-use
|
||||
CLI interface, flexible output formats, and automated scanning capabilities.
|
||||
|
||||
Features:
|
||||
- Risk analysis: Identify vulnerabilities and security risks in your cluster
|
||||
- Security compliance: Check your cluster against multiple security frameworks
|
||||
- Misconfiguration scanning: Detect security misconfigurations in your workloads
|
||||
- Flexible output: Results in JSON, SARIF, HTML, JUnit, and Prometheus formats
|
||||
- CI/CD integration: Easily integrate into your CI/CD pipeline
|
||||
homepage: https://kubescape.io/
|
||||
caveats: |
|
||||
Requires kubectl and basic knowledge of Kubernetes.
|
||||
Run 'kubescape scan' to scan your Kubernetes cluster or manifests.
|
||||
```
|
||||
|
||||
The `{{ .TagName }}` is replaced with the release tag (e.g., `v3.0.0`), `{{ trimPrefix "v" .TagName }}` removes the version prefix, and `{{ addURIAndSha ... }}` calculates the SHA256 checksum for the binary archive.
|
||||
|
||||
### Release Workflow
|
||||
|
||||
The release workflow (`.github/workflows/02-release.yaml`) can be triggered in two ways:
|
||||
|
||||
1. **Automatic**: When a new tag matching the pattern `v[0-9]+.[0-9]+.[0-9]+` is pushed to the repository
|
||||
2. **Manual**: Via `workflow_dispatch` with an optional `skip_publish` input
|
||||
|
||||
When the workflow is triggered:
|
||||
|
||||
1. GoReleaser builds and publishes the release artifacts (unless `skip_publish=true` is set)
|
||||
2. The krew-release-bot step runs conditionally:
|
||||
- It **runs** when triggered by a tag push OR by `workflow_dispatch` with `skip_publish=false`
|
||||
- It **skips** when triggered by `workflow_dispatch` with `skip_publish=true` (default)
|
||||
3. When it runs, the bot:
|
||||
- Reads the `.krew.yaml` template
|
||||
- Fills in the template with release information
|
||||
- Creates a pull request to the `kubernetes-sigs/krew-index` repository
|
||||
- The PR is automatically tested and merged by krew's infrastructure
|
||||
|
||||
### Workflow Permissions
|
||||
|
||||
The release job has the following permissions:
|
||||
|
||||
```yaml
|
||||
permissions:
|
||||
actions: read
|
||||
checks: read
|
||||
contents: write
|
||||
deployments: read
|
||||
discussions: read
|
||||
id-token: write
|
||||
issues: read
|
||||
models: read
|
||||
packages: write
|
||||
pages: read
|
||||
pull-requests: read
|
||||
repository-projects: read
|
||||
statuses: read
|
||||
security-events: read
|
||||
attestations: read
|
||||
artifact-metadata: read
|
||||
```
|
||||
|
||||
These permissions are necessary for GoReleaser to create releases and upload artifacts.
|
||||
|
||||
### Testing the Template
|
||||
|
||||
Before committing changes to `.krew.yaml`, you can test how the template will be rendered using Docker:
|
||||
|
||||
```bash
|
||||
docker run -v $(pwd)/.krew.yaml:/tmp/.krew.yaml ghcr.io/rajatjindal/krew-release-bot:v0.0.47 \
|
||||
krew-release-bot template --tag v3.0.0 --template-file /tmp/.krew.yaml
|
||||
```
|
||||
|
||||
This will output the generated krew manifest file, allowing you to verify:
|
||||
- The version field is correct
|
||||
- All download URLs are properly formatted
|
||||
- The SHA256 checksum will be calculated correctly
|
||||
|
||||
### Why skip_upload in GoReleaser?
|
||||
|
||||
In `.goreleaser.yaml`, the `krews` section has `skip_upload: true`:
|
||||
|
||||
```yaml
|
||||
krews:
|
||||
- name: kubescape
|
||||
ids:
|
||||
- cli
|
||||
skip_upload: true # We use krew-release-bot instead
|
||||
homepage: https://kubescape.io/
|
||||
description: It includes risk analysis, security compliance, and misconfiguration scanning with an easy-to-use CLI interface, flexible output formats, and automated scanning capabilities.
|
||||
short_description: Scan resources and cluster configs against security frameworks.
|
||||
```
|
||||
|
||||
This is intentional because:
|
||||
- GoReleaser generates the manifest but doesn't have built-in support for submitting PRs to krew-index
|
||||
- krew-release-bot is the recommended tool for krew automation by the Krew team
|
||||
- Using krew-release-bot provides automatic testing and merging of version bump PRs
|
||||
|
||||
### Manual Release Testing
|
||||
|
||||
You can test the release workflow manually without publishing to krew by using `workflow_dispatch`:
|
||||
|
||||
1. Go to Actions tab in GitHub
|
||||
2. Select "02-create_release" workflow
|
||||
3. Click "Run workflow"
|
||||
4. The `skip_publish` input defaults to `true` (publishing will be skipped)
|
||||
5. Set `skip_publish` to `false` if you want to test the full release process including krew indexing
|
||||
|
||||
### Making Changes to the Template
|
||||
|
||||
If you need to update the krew manifest (e.g., change the description, add platforms, or update the binary location):
|
||||
|
||||
1. Edit the `.krew.yaml` file
|
||||
2. Test your changes with the Docker command shown above
|
||||
3. Commit and push the changes
|
||||
4. The next release will use the updated template
|
||||
|
||||
### Installing kubescape via krew
|
||||
|
||||
Once the plugin is indexed in krew, users can install it with:
|
||||
|
||||
```bash
|
||||
kubectl krew install kubernetes-sigs/kubescape
|
||||
```
|
||||
|
||||
Or after index update:
|
||||
|
||||
```bash
|
||||
kubectl krew install kubescape
|
||||
```
|
||||
|
||||
### Further Reading
|
||||
|
||||
- [Krew official documentation](https://krew.sigs.k8s.io/docs/developer-guide/)
|
||||
- [krew-release-bot repository](https://github.com/rajatjindal/krew-release-bot)
|
||||
- [Krew plugin submission guide](https://krew.sigs.k8s.io/docs/developer-guide/develop/plugins/)
|
||||
@@ -37,10 +37,10 @@ _Please [star ⭐](https://github.com/kubescape/kubescape/stargazers) the repo i
|
||||
- [Demo](#-demo)
|
||||
- [Quick Start](#-quick-start)
|
||||
- [Installation](#-installation)
|
||||
- [CLI Commands](#-cli-commands)
|
||||
- [CLI Commands](#%EF%B8%8F-cli-commands)
|
||||
- [Usage Examples](#-usage-examples)
|
||||
- [Architecture](#-architecture)
|
||||
- [In-Cluster Operator](#-in-cluster-operator)
|
||||
- [Architecture](#%EF%B8%8F-architecture)
|
||||
- [In-Cluster Operator](#%EF%B8%8F-in-cluster-operator)
|
||||
- [Integrations](#-integrations)
|
||||
- [Community](#-community)
|
||||
- [Changelog](#changelog)
|
||||
|
||||
@@ -128,7 +128,7 @@ gha_group_start "Smoke tests"
|
||||
log "Running smoke tests with $PYTHON $SMOKE_RUNNER \"$ART_PATH\""
|
||||
# Run the test runner, propagate exit code
|
||||
set +e
|
||||
"$PYTHON" "$SMOKE_RUNNER" "$ART_PATH"
|
||||
RELEASE="${RELEASE:-}" "$PYTHON" "$SMOKE_RUNNER" "$ART_PATH"
|
||||
rc=$?
|
||||
set -e
|
||||
|
||||
|
||||
@@ -233,9 +233,10 @@ func (ksServer *KubescapeMcpserver) CallTool(name string, arguments map[string]i
|
||||
|
||||
// Get workload-level manifests
|
||||
labelSelector := ""
|
||||
if level == "workload" {
|
||||
switch level {
|
||||
case "workload":
|
||||
labelSelector = "kubescape.io/context=filtered"
|
||||
} else if level == "image" {
|
||||
case "image":
|
||||
labelSelector = "kubescape.io/context=non-filtered"
|
||||
}
|
||||
|
||||
@@ -480,7 +481,7 @@ func mcpServerEntrypoint() error {
|
||||
|
||||
// Start the server
|
||||
if err := server.ServeStdio(s); err != nil {
|
||||
return fmt.Errorf("Server error: %v\n", err)
|
||||
return fmt.Errorf("server error: %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -14,7 +14,7 @@ const (
|
||||
)
|
||||
|
||||
var operatorExamples = fmt.Sprintf(`
|
||||
|
||||
|
||||
# Trigger a configuration scan
|
||||
%[1]s operator scan configurations
|
||||
|
||||
@@ -34,16 +34,16 @@ func GetOperatorCmd(ks meta.IKubescape) *cobra.Command {
|
||||
Args: func(cmd *cobra.Command, args []string) error {
|
||||
operatorInfo.Subcommands = append(operatorInfo.Subcommands, "operator")
|
||||
if len(args) < 2 {
|
||||
return errors.New("For the operator sub-command, you need to provide at least one additional sub-command. Refer to the examples above.")
|
||||
return errors.New("for the operator sub-command, you need to provide at least one additional sub-command. Refer to the examples above")
|
||||
}
|
||||
return nil
|
||||
},
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
if len(args) < 2 {
|
||||
return errors.New("For the operator sub-command, you need to provide at least one additional sub-command. Refer to the examples above.")
|
||||
return errors.New("for the operator sub-command, you need to provide at least one additional sub-command. Refer to the examples above")
|
||||
}
|
||||
if args[0] != scanSubCommand {
|
||||
return errors.New(fmt.Sprintf("For the operator sub-command, only %s is supported. Refer to the examples above.", scanSubCommand))
|
||||
return fmt.Errorf("for the operator sub-command, only %s is supported. Refer to the examples above", scanSubCommand)
|
||||
}
|
||||
return nil
|
||||
},
|
||||
|
||||
@@ -21,7 +21,7 @@ func TestGetOperatorCmd(t *testing.T) {
|
||||
assert.Equal(t, operatorExamples, cmd.Example)
|
||||
|
||||
err := cmd.Args(&cobra.Command{}, []string{})
|
||||
expectedErrorMessage := "For the operator sub-command, you need to provide at least one additional sub-command. Refer to the examples above."
|
||||
expectedErrorMessage := "for the operator sub-command, you need to provide at least one additional sub-command. Refer to the examples above"
|
||||
assert.Equal(t, expectedErrorMessage, err.Error())
|
||||
|
||||
err = cmd.Args(&cobra.Command{}, []string{"scan", "configurations"})
|
||||
@@ -37,6 +37,6 @@ func TestGetOperatorCmd(t *testing.T) {
|
||||
assert.Equal(t, expectedErrorMessage, err.Error())
|
||||
|
||||
err = cmd.RunE(&cobra.Command{}, []string{"random-subcommand", "random-config"})
|
||||
expectedErrorMessage = "For the operator sub-command, only " + scanSubCommand + " is supported. Refer to the examples above."
|
||||
expectedErrorMessage = "for the operator sub-command, only " + scanSubCommand + " is supported. Refer to the examples above"
|
||||
assert.Equal(t, expectedErrorMessage, err.Error())
|
||||
}
|
||||
|
||||
@@ -32,7 +32,7 @@ func getOperatorScanCmd(ks meta.IKubescape, operatorInfo cautils.OperatorInfo) *
|
||||
return errors.New("for operator scan sub command, you must pass at least 1 more sub commands, see above examples")
|
||||
}
|
||||
if (args[0] != vulnerabilitiesSubCommand) && (args[0] != configurationsSubCommand) {
|
||||
return errors.New(fmt.Sprintf("For the operator sub-command, only %s and %s are supported. Refer to the examples above.", vulnerabilitiesSubCommand, configurationsSubCommand))
|
||||
return fmt.Errorf("for the operator sub-command, only %s and %s are supported. Refer to the examples above", vulnerabilitiesSubCommand, configurationsSubCommand)
|
||||
}
|
||||
return nil
|
||||
},
|
||||
|
||||
@@ -41,6 +41,6 @@ func TestGetOperatorScanCmd(t *testing.T) {
|
||||
assert.Nil(t, err)
|
||||
|
||||
err = cmd.RunE(&cobra.Command{}, []string{"random"})
|
||||
expectedErrorMessage = "For the operator sub-command, only " + vulnerabilitiesSubCommand + " and " + configurationsSubCommand + " are supported. Refer to the examples above."
|
||||
expectedErrorMessage = "for the operator sub-command, only " + vulnerabilitiesSubCommand + " and " + configurationsSubCommand + " are supported. Refer to the examples above"
|
||||
assert.Equal(t, expectedErrorMessage, err.Error())
|
||||
}
|
||||
|
||||
14
cmd/root.go
14
cmd/root.go
@@ -44,12 +44,12 @@ var ksExamples = fmt.Sprintf(`
|
||||
%[1]s config view
|
||||
`, cautils.ExecName())
|
||||
|
||||
func NewDefaultKubescapeCommand(ctx context.Context) *cobra.Command {
|
||||
func NewDefaultKubescapeCommand(ctx context.Context, ksVersion, ksCommit, ksDate string) *cobra.Command {
|
||||
ks := core.NewKubescape(ctx)
|
||||
return getRootCmd(ks)
|
||||
return getRootCmd(ks, ksVersion, ksCommit, ksDate)
|
||||
}
|
||||
|
||||
func getRootCmd(ks meta.IKubescape) *cobra.Command {
|
||||
func getRootCmd(ks meta.IKubescape, ksVersion, ksCommit, ksDate string) *cobra.Command {
|
||||
|
||||
rootCmd := &cobra.Command{
|
||||
Use: "kubescape",
|
||||
@@ -93,7 +93,7 @@ func getRootCmd(ks meta.IKubescape) *cobra.Command {
|
||||
rootCmd.AddCommand(download.GetDownloadCmd(ks))
|
||||
rootCmd.AddCommand(list.GetListCmd(ks))
|
||||
rootCmd.AddCommand(completion.GetCompletionCmd())
|
||||
rootCmd.AddCommand(version.GetVersionCmd(ks))
|
||||
rootCmd.AddCommand(version.GetVersionCmd(ks, ksVersion, ksCommit, ksDate))
|
||||
rootCmd.AddCommand(config.GetConfigCmd(ks))
|
||||
rootCmd.AddCommand(update.GetUpdateCmd(ks))
|
||||
rootCmd.AddCommand(fix.GetFixCmd(ks))
|
||||
@@ -116,7 +116,7 @@ func getRootCmd(ks meta.IKubescape) *cobra.Command {
|
||||
return rootCmd
|
||||
}
|
||||
|
||||
func Execute(ctx context.Context) error {
|
||||
ks := NewDefaultKubescapeCommand(ctx)
|
||||
func Execute(ctx context.Context, ksVersion, ksCommit, ksDate string) error {
|
||||
ks := NewDefaultKubescapeCommand(ctx, ksVersion, ksCommit, ksDate)
|
||||
return ks.Execute()
|
||||
}
|
||||
}
|
||||
@@ -9,16 +9,16 @@ import (
|
||||
|
||||
func TestNewDefaultKubescapeCommand(t *testing.T) {
|
||||
t.Run("NewDefaultKubescapeCommand", func(t *testing.T) {
|
||||
cmd := NewDefaultKubescapeCommand(context.Background())
|
||||
cmd := NewDefaultKubescapeCommand(context.Background(), "", "", "")
|
||||
assert.NotNil(t, cmd)
|
||||
})
|
||||
}
|
||||
|
||||
func TestExecute(t *testing.T) {
|
||||
t.Run("Execute", func(t *testing.T) {
|
||||
err := Execute(context.Background())
|
||||
err := Execute(context.Background(), "", "", "")
|
||||
if err != nil {
|
||||
assert.EqualErrorf(t, err, "unknown command \"^\\\\QTestExecute\\\\E$\" for \"kubescape\"", err.Error())
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -35,7 +35,7 @@ func Test_validateControlScanInfo(t *testing.T) {
|
||||
t.Run(
|
||||
tc.Description,
|
||||
func(t *testing.T) {
|
||||
var want error = tc.Want
|
||||
var want = tc.Want
|
||||
|
||||
got := validateControlScanInfo(tc.ScanInfo)
|
||||
|
||||
@@ -85,7 +85,7 @@ func Test_validateFrameworkScanInfo(t *testing.T) {
|
||||
t.Run(
|
||||
tc.Description,
|
||||
func(t *testing.T) {
|
||||
var want error = tc.Want
|
||||
var want = tc.Want
|
||||
|
||||
got := validateFrameworkScanInfo(tc.ScanInfo)
|
||||
|
||||
|
||||
@@ -50,7 +50,7 @@ func TestValidateImageScanInfo(t *testing.T) {
|
||||
t.Run(
|
||||
tc.Description,
|
||||
func(t *testing.T) {
|
||||
var want error = tc.Want
|
||||
var want = tc.Want
|
||||
|
||||
got := ValidateImageScanInfo(tc.ScanInfo)
|
||||
|
||||
|
||||
@@ -9,21 +9,29 @@ import (
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
func GetVersionCmd(ks meta.IKubescape) *cobra.Command {
|
||||
func GetVersionCmd(ks meta.IKubescape, version, commit, date string) *cobra.Command {
|
||||
versionCmd := &cobra.Command{
|
||||
Use: "version",
|
||||
Short: "Get current version",
|
||||
Long: ``,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
v := versioncheck.NewIVersionCheckHandler(ks.Context())
|
||||
_ = v.CheckLatestVersion(ks.Context(), versioncheck.NewVersionCheckRequest("", versioncheck.BuildNumber, "", "", "version", nil))
|
||||
_ = v.CheckLatestVersion(ks.Context(), versioncheck.NewVersionCheckRequest("", version, "", "", "version", nil))
|
||||
|
||||
_, _ = fmt.Fprintf(cmd.OutOrStdout(),
|
||||
"Your current version is: %s\n",
|
||||
versioncheck.BuildNumber,
|
||||
version,
|
||||
)
|
||||
_, _ = fmt.Fprintf(cmd.OutOrStdout(),
|
||||
"Build commit: %s\n",
|
||||
commit,
|
||||
)
|
||||
_, _ = fmt.Fprintf(cmd.OutOrStdout(),
|
||||
"Build date: %s\n",
|
||||
date,
|
||||
)
|
||||
return nil
|
||||
},
|
||||
}
|
||||
return versionCmd
|
||||
}
|
||||
}
|
||||
@@ -21,12 +21,12 @@ func TestGetVersionCmd(t *testing.T) {
|
||||
{
|
||||
name: "Undefined Build Number",
|
||||
buildNumber: "unknown",
|
||||
want: "Your current version is: unknown\n",
|
||||
want: "Your current version is: unknown\nBuild commit: \nBuild date: \n",
|
||||
},
|
||||
{
|
||||
name: "Defined Build Number: v3.0.1",
|
||||
buildNumber: "v3.0.1",
|
||||
want: "Your current version is: v3.0.1\n",
|
||||
want: "Your current version is: v3.0.1\nBuild commit: \nBuild date: \n",
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
@@ -34,7 +34,7 @@ func TestGetVersionCmd(t *testing.T) {
|
||||
versioncheck.BuildNumber = tt.buildNumber
|
||||
|
||||
ks := core.NewKubescape(context.TODO())
|
||||
if cmd := GetVersionCmd(ks); cmd != nil {
|
||||
if cmd := GetVersionCmd(ks, tt.buildNumber, "", ""); cmd != nil {
|
||||
buf := bytes.NewBufferString("")
|
||||
cmd.SetOut(buf)
|
||||
cmd.Execute()
|
||||
@@ -46,4 +46,4 @@ func TestGetVersionCmd(t *testing.T) {
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,27 +0,0 @@
|
||||
package cautils
|
||||
|
||||
import (
|
||||
"os"
|
||||
"runtime/debug"
|
||||
|
||||
"github.com/kubescape/backend/pkg/versioncheck"
|
||||
)
|
||||
|
||||
var Client string
|
||||
|
||||
func init() {
|
||||
// Try to get version from build info (Go 1.24+ automatically populates this from VCS tags)
|
||||
if info, ok := debug.ReadBuildInfo(); ok && info.Main.Version != "" && info.Main.Version != "(devel)" {
|
||||
versioncheck.BuildNumber = info.Main.Version
|
||||
}
|
||||
|
||||
// Fallback to RELEASE environment variable
|
||||
if versioncheck.BuildNumber == "" {
|
||||
versioncheck.BuildNumber = os.Getenv("RELEASE")
|
||||
}
|
||||
|
||||
// Client is typically set via ldflags: -X "github.com/kubescape/kubescape/v3/core/cautils.Client=..."
|
||||
if Client != "" {
|
||||
versioncheck.Client = Client
|
||||
}
|
||||
}
|
||||
@@ -521,9 +521,3 @@ func GetTenantConfig(accountID, accessKey, clusterName, customClusterName string
|
||||
}
|
||||
|
||||
// firstNonEmpty returns the first non-empty string
|
||||
func firstNonEmpty(s1, s2 string) string {
|
||||
if s1 != "" {
|
||||
return s1
|
||||
}
|
||||
return s2
|
||||
}
|
||||
|
||||
@@ -73,25 +73,38 @@ type OPASessionObj struct {
|
||||
}
|
||||
|
||||
func NewOPASessionObj(ctx context.Context, frameworks []reporthandling.Framework, k8sResources K8SResources, scanInfo *ScanInfo) *OPASessionObj {
|
||||
clusterSize := estimateClusterSize(k8sResources)
|
||||
if clusterSize < 100 {
|
||||
clusterSize = 100
|
||||
}
|
||||
|
||||
return &OPASessionObj{
|
||||
Report: &reporthandlingv2.PostureReport{},
|
||||
Policies: frameworks,
|
||||
K8SResources: k8sResources,
|
||||
AllResources: make(map[string]workloadinterface.IMetadata),
|
||||
ResourcesResult: make(map[string]resourcesresults.Result),
|
||||
ResourcesPrioritized: make(map[string]prioritization.PrioritizedResource),
|
||||
InfoMap: make(map[string]apis.StatusInfo),
|
||||
ResourceToControlsMap: make(map[string][]string),
|
||||
ResourceSource: make(map[string]reporthandling.Source),
|
||||
AllResources: make(map[string]workloadinterface.IMetadata, clusterSize),
|
||||
ResourcesResult: make(map[string]resourcesresults.Result, clusterSize),
|
||||
ResourcesPrioritized: make(map[string]prioritization.PrioritizedResource, clusterSize/10),
|
||||
InfoMap: make(map[string]apis.StatusInfo, clusterSize/10),
|
||||
ResourceToControlsMap: make(map[string][]string, clusterSize/2),
|
||||
ResourceSource: make(map[string]reporthandling.Source, clusterSize),
|
||||
SessionID: scanInfo.ScanID,
|
||||
Metadata: scanInfoToScanMetadata(ctx, scanInfo),
|
||||
OmitRawResources: scanInfo.OmitRawResources,
|
||||
TriggeredByCLI: scanInfo.TriggeredByCLI,
|
||||
TemplateMapping: make(map[string]MappingNodes),
|
||||
TemplateMapping: make(map[string]MappingNodes, clusterSize/10),
|
||||
LabelsToCopy: scanInfo.LabelsToCopy,
|
||||
}
|
||||
}
|
||||
|
||||
func estimateClusterSize(k8sResources K8SResources) int {
|
||||
total := 0
|
||||
for _, resourceIDs := range k8sResources {
|
||||
total += len(resourceIDs)
|
||||
}
|
||||
return total
|
||||
}
|
||||
|
||||
// SetTopWorkloads sets the top workloads by score
|
||||
func (sessionObj *OPASessionObj) SetTopWorkloads() {
|
||||
count := 0
|
||||
|
||||
@@ -76,14 +76,18 @@ func ShouldSkipRule(control reporthandling.Control, rule reporthandling.PolicyRu
|
||||
// In local build (BuildNumber = ""):
|
||||
// returns true only if rule doesn't have the "until" attribute
|
||||
func isRuleKubescapeVersionCompatible(attributes map[string]interface{}, version string) bool {
|
||||
normalizedVersion := version
|
||||
if version != "" && !semver.IsValid(version) {
|
||||
normalizedVersion = "v" + version
|
||||
}
|
||||
|
||||
if from, ok := attributes["useFromKubescapeVersion"]; ok && from != nil {
|
||||
switch sfrom := from.(type) {
|
||||
case string:
|
||||
if version != "" && semver.Compare(version, sfrom) == -1 {
|
||||
if normalizedVersion != "" && semver.IsValid(normalizedVersion) && semver.Compare(normalizedVersion, sfrom) == -1 {
|
||||
return false
|
||||
}
|
||||
default:
|
||||
// Handle case where useFromKubescapeVersion is not a string
|
||||
return false
|
||||
}
|
||||
}
|
||||
@@ -91,11 +95,10 @@ func isRuleKubescapeVersionCompatible(attributes map[string]interface{}, version
|
||||
if until, ok := attributes["useUntilKubescapeVersion"]; ok && until != nil {
|
||||
switch suntil := until.(type) {
|
||||
case string:
|
||||
if version == "" || semver.Compare(version, suntil) >= 0 {
|
||||
if normalizedVersion == "" || (semver.IsValid(normalizedVersion) && semver.Compare(normalizedVersion, suntil) >= 0) {
|
||||
return false
|
||||
}
|
||||
default:
|
||||
// Handle case where useUntilKubescapeVersion is not a string
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
@@ -322,7 +322,7 @@ func glob(root, pattern string, onlyDirectories bool) ([]string, error) {
|
||||
return nil
|
||||
}
|
||||
fileFormat := getFileFormat(path)
|
||||
if !(fileFormat == JSON_FILE_FORMAT || fileFormat == YAML_FILE_FORMAT) {
|
||||
if fileFormat != JSON_FILE_FORMAT && fileFormat != YAML_FILE_FORMAT {
|
||||
return nil
|
||||
}
|
||||
if matched, err := filepath.Match(pattern, filepath.Base(path)); err != nil {
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
package getter
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
@@ -102,7 +102,7 @@ func TestHttpRespToString_NilResponse(t *testing.T) {
|
||||
|
||||
func TestHttpRespToString_ValidResponse(t *testing.T) {
|
||||
resp := &http.Response{
|
||||
Body: ioutil.NopCloser(strings.NewReader("test response")),
|
||||
Body: io.NopCloser(strings.NewReader("test response")),
|
||||
Status: "200 OK",
|
||||
StatusCode: 200,
|
||||
}
|
||||
@@ -114,7 +114,7 @@ func TestHttpRespToString_ValidResponse(t *testing.T) {
|
||||
// Returns an error with status and reason when unable to read response body.
|
||||
func TestHttpRespToString_ReadError(t *testing.T) {
|
||||
resp := &http.Response{
|
||||
Body: ioutil.NopCloser(strings.NewReader("test response")),
|
||||
Body: io.NopCloser(strings.NewReader("test response")),
|
||||
}
|
||||
resp.Body.Close()
|
||||
result, err := httpRespToString(resp)
|
||||
@@ -125,7 +125,7 @@ func TestHttpRespToString_ReadError(t *testing.T) {
|
||||
// Returns an error with status and reason when unable to read response body.
|
||||
func TestHttpRespToString_ErrorCodeLessThan200(t *testing.T) {
|
||||
resp := &http.Response{
|
||||
Body: ioutil.NopCloser(strings.NewReader("test response")),
|
||||
Body: io.NopCloser(strings.NewReader("test response")),
|
||||
StatusCode: 100,
|
||||
}
|
||||
resp.Body.Close()
|
||||
|
||||
@@ -5,7 +5,6 @@ import (
|
||||
"io"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"os"
|
||||
"strings"
|
||||
"sync"
|
||||
"testing"
|
||||
@@ -25,10 +24,6 @@ const (
|
||||
|
||||
var (
|
||||
globalMx sync.Mutex // a mutex to avoid data races on package globals while testing
|
||||
|
||||
testOptions = []v1.KSCloudOption{
|
||||
v1.WithTrace(os.Getenv("DEBUG_TEST") != ""),
|
||||
}
|
||||
)
|
||||
|
||||
func TestGlobalKSCloudAPIConnector(t *testing.T) {
|
||||
@@ -113,8 +108,6 @@ func mockAPIServer(t testing.TB) *testServer {
|
||||
defer func() { _ = r.Body.Close() }()
|
||||
_, _ = io.Copy(w, r.Body)
|
||||
|
||||
return
|
||||
|
||||
})
|
||||
|
||||
return server
|
||||
|
||||
@@ -226,7 +226,7 @@ func (lp *LoadPolicy) GetControlsInputs(_ /* clusterName */ string) (map[string]
|
||||
buf, err := os.ReadFile(filePath)
|
||||
if err != nil {
|
||||
formattedError := fmt.Errorf(
|
||||
`Error opening %s file, "controls-config" will be downloaded from ARMO management portal`,
|
||||
`error opening %s file, "controls-config" will be downloaded from ARMO management portal`,
|
||||
fileName,
|
||||
)
|
||||
|
||||
@@ -236,7 +236,7 @@ func (lp *LoadPolicy) GetControlsInputs(_ /* clusterName */ string) (map[string]
|
||||
controlInputs := make(map[string][]string, 100) // from armotypes.Settings.PostureControlInputs
|
||||
if err = json.Unmarshal(buf, &controlInputs); err != nil {
|
||||
formattedError := fmt.Errorf(
|
||||
`Error reading %s file, %v, "controls-config" will be downloaded from ARMO management portal`,
|
||||
`error reading %s file, %v, "controls-config" will be downloaded from ARMO management portal`,
|
||||
fileName, err,
|
||||
)
|
||||
|
||||
|
||||
57298
core/cautils/getter/testdata/policy.json
vendored
57298
core/cautils/getter/testdata/policy.json
vendored
File diff suppressed because one or more lines are too long
@@ -54,7 +54,7 @@ func TestGetKustomizeDirectoryName(t *testing.T) {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
tempFile := filepath.Join(tt.args.path, "kustomization.yaml")
|
||||
if tt.createKustomization {
|
||||
_ = os.WriteFile(tempFile, []byte(""), 0644)
|
||||
_ = os.WriteFile(tempFile, []byte(""), 0600)
|
||||
}
|
||||
if got := getKustomizeDirectoryName(tt.args.path); got != tt.want {
|
||||
t.Errorf("GetKustomizeDirectoryName() = %v, want %v", got, tt.want)
|
||||
|
||||
@@ -81,7 +81,7 @@ func Test_GetRequestPayload(t *testing.T) {
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
result := tc.OperatorScanInfo.GetRequestPayload()
|
||||
result := tc.GetRequestPayload()
|
||||
assert.Equal(t, tc.result, result)
|
||||
})
|
||||
}
|
||||
@@ -136,8 +136,8 @@ func Test_ValidatePayload(t *testing.T) {
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
payload := tc.OperatorScanInfo.GetRequestPayload()
|
||||
result := tc.OperatorScanInfo.ValidatePayload(payload)
|
||||
payload := tc.GetRequestPayload()
|
||||
result := tc.ValidatePayload(payload)
|
||||
assert.Equal(t, tc.result, result)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -170,7 +170,6 @@ func getInfoFromOne(output string, lastNumber int, isMapType bool) (value string
|
||||
if isMapType {
|
||||
lineNumber = lineNumber - 1
|
||||
}
|
||||
lastNumber = lineNumber
|
||||
// save to structure
|
||||
} else {
|
||||
lineNumber = lastNumber
|
||||
|
||||
@@ -78,7 +78,7 @@ func (p *portForward) StopPortForwarder() {
|
||||
|
||||
func (p *portForward) StartPortForwarder() error {
|
||||
go func() {
|
||||
p.PortForwarder.ForwardPorts()
|
||||
p.ForwardPorts()
|
||||
}()
|
||||
p.waitForPortForwardReadiness()
|
||||
|
||||
|
||||
@@ -64,7 +64,7 @@ func Test_CreatePortForwarder(t *testing.T) {
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
k8sClient := k8sinterface.KubernetesApi{
|
||||
KubernetesClient: fake.NewSimpleClientset(),
|
||||
KubernetesClient: fake.NewClientset(),
|
||||
K8SConfig: &rest.Config{
|
||||
Host: "any",
|
||||
},
|
||||
@@ -105,7 +105,7 @@ func Test_GetPortForwardLocalhost(t *testing.T) {
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
k8sClient := k8sinterface.KubernetesApi{
|
||||
KubernetesClient: fake.NewSimpleClientset(),
|
||||
KubernetesClient: fake.NewClientset(),
|
||||
K8SConfig: &rest.Config{
|
||||
Host: "any",
|
||||
},
|
||||
|
||||
@@ -36,7 +36,7 @@ func getOperatorPod(k8sClient *k8sinterface.KubernetesApi, ns string) (*v1.Pod,
|
||||
return nil, err
|
||||
}
|
||||
if len(pods.Items) != 1 {
|
||||
return nil, errors.New("Could not find the Kubescape Operator chart, please validate that the Kubescape Operator helm chart is installed and running -> https://github.com/kubescape/helm-charts")
|
||||
return nil, errors.New("could not find the Kubescape Operator chart, please validate that the Kubescape Operator helm chart is installed and running -> https://github.com/kubescape/helm-charts")
|
||||
}
|
||||
|
||||
return &pods.Items[0], nil
|
||||
@@ -90,8 +90,8 @@ func (a *OperatorAdapter) httpPostOperatorScanRequest(body apis.Commands) (strin
|
||||
}
|
||||
|
||||
func (a *OperatorAdapter) OperatorScan() (string, error) {
|
||||
payload := a.OperatorScanInfo.GetRequestPayload()
|
||||
if err := a.OperatorScanInfo.ValidatePayload(payload); err != nil {
|
||||
payload := a.GetRequestPayload()
|
||||
if err := a.ValidatePayload(payload); err != nil {
|
||||
return "", err
|
||||
}
|
||||
res, err := a.httpPostOperatorScanRequest(*payload)
|
||||
|
||||
@@ -23,13 +23,13 @@ func Test_getOperatorPod(t *testing.T) {
|
||||
name: "test error no operator exist",
|
||||
createOperatorPod: false,
|
||||
createAnotherOperatorPodWithSameLabel: false,
|
||||
expectedError: fmt.Errorf("Could not find the Kubescape Operator chart, please validate that the Kubescape Operator helm chart is installed and running -> https://github.com/kubescape/helm-charts"),
|
||||
expectedError: fmt.Errorf("could not find the Kubescape Operator chart, please validate that the Kubescape Operator helm chart is installed and running -> https://github.com/kubescape/helm-charts"),
|
||||
},
|
||||
{
|
||||
name: "test error several operators exist",
|
||||
createOperatorPod: true,
|
||||
createAnotherOperatorPodWithSameLabel: true,
|
||||
expectedError: fmt.Errorf("Could not find the Kubescape Operator chart, please validate that the Kubescape Operator helm chart is installed and running -> https://github.com/kubescape/helm-charts"),
|
||||
expectedError: fmt.Errorf("could not find the Kubescape Operator chart, please validate that the Kubescape Operator helm chart is installed and running -> https://github.com/kubescape/helm-charts"),
|
||||
},
|
||||
{
|
||||
name: "test no error",
|
||||
@@ -42,7 +42,7 @@ func Test_getOperatorPod(t *testing.T) {
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
k8sClient := k8sinterface.KubernetesApi{
|
||||
KubernetesClient: fake.NewSimpleClientset(),
|
||||
KubernetesClient: fake.NewClientset(),
|
||||
Context: context.TODO(),
|
||||
}
|
||||
|
||||
|
||||
@@ -48,7 +48,7 @@ func (ks *Kubescape) Fix(fixInfo *metav1.FixInfo) error {
|
||||
for _, err := range errors {
|
||||
logger.L().Ctx(ks.Context()).Warning(err.Error())
|
||||
}
|
||||
return fmt.Errorf("Failed to fix some resources, check the logs for more details")
|
||||
return fmt.Errorf("failed to fix some resources, check the logs for more details")
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -64,9 +64,10 @@ func userConfirmed() bool {
|
||||
}
|
||||
|
||||
input = strings.ToLower(input)
|
||||
if input == "y" || input == "yes" {
|
||||
switch input {
|
||||
case "y", "yes":
|
||||
return true
|
||||
} else if input == "n" || input == "no" {
|
||||
case "n", "no":
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
@@ -82,7 +82,7 @@ func getReporter(ctx context.Context, tenantConfig cautils.ITenantConfig, report
|
||||
}
|
||||
|
||||
func getResourceHandler(ctx context.Context, scanInfo *cautils.ScanInfo, tenantConfig cautils.ITenantConfig, k8s *k8sinterface.KubernetesApi, hostSensorHandler hostsensorutils.IHostSensor) resourcehandler.IResourceHandler {
|
||||
ctx, span := otel.Tracer("").Start(ctx, "getResourceHandler")
|
||||
_, span := otel.Tracer("").Start(ctx, "getResourceHandler")
|
||||
defer span.End()
|
||||
|
||||
if len(scanInfo.InputPatterns) > 0 || k8s == nil {
|
||||
|
||||
@@ -48,7 +48,7 @@ func (ks *Kubescape) List(listPolicies *metav1.ListPolicies) error {
|
||||
if listFormatFunction, ok := listFormatFunc[listPolicies.Format]; ok {
|
||||
listFormatFunction(ks.Context(), listPolicies.Target, policies)
|
||||
} else {
|
||||
return fmt.Errorf("Invalid format \"%s\", Supported formats: 'pretty-print'/'json' ", listPolicies.Format)
|
||||
return fmt.Errorf("invalid format \"%s\", supported formats: 'pretty-print'/'json' ", listPolicies.Format)
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -168,7 +168,7 @@ func generateControlRows(policies []string) []table.Row {
|
||||
|
||||
docs := cautils.GetControlLink(id)
|
||||
|
||||
currentRow := table.Row{id, control, docs, strings.Replace(framework, " ", "\n", -1)}
|
||||
currentRow := table.Row{id, control, docs, strings.ReplaceAll(framework, " ", "\n")}
|
||||
|
||||
rows = append(rows, currentRow)
|
||||
}
|
||||
@@ -188,7 +188,7 @@ func generatePolicyRows(policies []string) []table.Row {
|
||||
func shortFormatControlRows(controlRows []table.Row) []table.Row {
|
||||
rows := make([]table.Row, 0, len(controlRows))
|
||||
for _, controlRow := range controlRows {
|
||||
rows = append(rows, table.Row{fmt.Sprintf("Control ID"+strings.Repeat(" ", 3)+": %+v\nControl Name"+strings.Repeat(" ", 1)+": %+v\nDocs"+strings.Repeat(" ", 9)+": %+v\nFrameworks"+strings.Repeat(" ", 3)+": %+v", controlRow[0], controlRow[1], controlRow[2], strings.Replace(controlRow[3].(string), "\n", " ", -1))})
|
||||
rows = append(rows, table.Row{fmt.Sprintf("Control ID"+strings.Repeat(" ", 3)+": %+v\nControl Name"+strings.Repeat(" ", 1)+": %+v\nDocs"+strings.Repeat(" ", 9)+": %+v\nFrameworks"+strings.Repeat(" ", 3)+": %+v", controlRow[0], controlRow[1], controlRow[2], strings.ReplaceAll(controlRow[3].(string), "\n", " "))})
|
||||
}
|
||||
return rows
|
||||
}
|
||||
|
||||
@@ -313,7 +313,7 @@ func patchWithContext(ctx context.Context, buildkitAddr, image, reportFile, patc
|
||||
return res, nil
|
||||
}, buildChannel)
|
||||
|
||||
return nil
|
||||
return err
|
||||
}
|
||||
|
||||
func getOSType(ctx context.Context, osreleaseBytes []byte) (string, error) {
|
||||
|
||||
@@ -3,8 +3,8 @@ package core
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"slices"
|
||||
|
||||
mapset "github.com/deckarep/golang-set/v2"
|
||||
"github.com/kubescape/backend/pkg/versioncheck"
|
||||
"github.com/kubescape/go-logger"
|
||||
"github.com/kubescape/go-logger/helpers"
|
||||
@@ -145,14 +145,14 @@ func (ks *Kubescape) Scan(scanInfo *cautils.ScanInfo) (*resultshandling.ResultsH
|
||||
}
|
||||
|
||||
// set policy getter only after setting the customerGUID
|
||||
scanInfo.Getters.PolicyGetter = getPolicyGetter(ctxInit, scanInfo.UseFrom, interfaces.tenantConfig.GetAccountID(), scanInfo.FrameworkScan, downloadReleasedPolicy)
|
||||
scanInfo.Getters.ControlsInputsGetter = getConfigInputsGetter(ctxInit, scanInfo.ControlsInputs, interfaces.tenantConfig.GetAccountID(), downloadReleasedPolicy)
|
||||
scanInfo.Getters.ExceptionsGetter = getExceptionsGetter(ctxInit, scanInfo.UseExceptions, interfaces.tenantConfig.GetAccountID(), downloadReleasedPolicy)
|
||||
scanInfo.Getters.AttackTracksGetter = getAttackTracksGetter(ctxInit, scanInfo.AttackTracks, interfaces.tenantConfig.GetAccountID(), downloadReleasedPolicy)
|
||||
scanInfo.PolicyGetter = getPolicyGetter(ctxInit, scanInfo.UseFrom, interfaces.tenantConfig.GetAccountID(), scanInfo.FrameworkScan, downloadReleasedPolicy)
|
||||
scanInfo.ControlsInputsGetter = getConfigInputsGetter(ctxInit, scanInfo.ControlsInputs, interfaces.tenantConfig.GetAccountID(), downloadReleasedPolicy)
|
||||
scanInfo.ExceptionsGetter = getExceptionsGetter(ctxInit, scanInfo.UseExceptions, interfaces.tenantConfig.GetAccountID(), downloadReleasedPolicy)
|
||||
scanInfo.AttackTracksGetter = getAttackTracksGetter(ctxInit, scanInfo.AttackTracks, interfaces.tenantConfig.GetAccountID(), downloadReleasedPolicy)
|
||||
|
||||
// TODO - list supported frameworks/controls
|
||||
if scanInfo.ScanAll {
|
||||
scanInfo.SetPolicyIdentifiers(listFrameworksNames(scanInfo.Getters.PolicyGetter), apisv1.KindFramework)
|
||||
scanInfo.SetPolicyIdentifiers(listFrameworksNames(scanInfo.PolicyGetter), apisv1.KindFramework)
|
||||
}
|
||||
|
||||
// remove host scanner components
|
||||
@@ -200,7 +200,7 @@ func (ks *Kubescape) Scan(scanInfo *cautils.ScanInfo) (*resultshandling.ResultsH
|
||||
// ======================== prioritization ===================
|
||||
if scanInfo.PrintAttackTree || isPrioritizationScanType(scanInfo.ScanType) {
|
||||
_, spanPrioritization := otel.Tracer("").Start(ctxOpa, "prioritization")
|
||||
if priotizationHandler, err := resourcesprioritization.NewResourcesPrioritizationHandler(ctxOpa, scanInfo.Getters.AttackTracksGetter, scanInfo.PrintAttackTree); err != nil {
|
||||
if priotizationHandler, err := resourcesprioritization.NewResourcesPrioritizationHandler(ctxOpa, scanInfo.AttackTracksGetter, scanInfo.PrintAttackTree); err != nil {
|
||||
logger.L().Ctx(ks.Context()).Warning("failed to get attack tracks, this may affect the scanning results", helpers.Error(err))
|
||||
} else if err := priotizationHandler.PrioritizeResources(scanData); err != nil {
|
||||
return resultsHandling, fmt.Errorf("%w", err)
|
||||
@@ -225,7 +225,7 @@ func (ks *Kubescape) Scan(scanInfo *cautils.ScanInfo) (*resultshandling.ResultsH
|
||||
}
|
||||
|
||||
func scanImages(scanType cautils.ScanTypes, scanData *cautils.OPASessionObj, ctx context.Context, resultsHandling *resultshandling.ResultsHandler, scanInfo *cautils.ScanInfo) {
|
||||
var imagesToScan []string
|
||||
imagesToScan := mapset.NewSet[string]()
|
||||
|
||||
if scanType == cautils.ScanTypeWorkload {
|
||||
containers, err := workloadinterface.NewWorkloadObj(scanData.SingleResourceScan.GetObject()).GetContainers()
|
||||
@@ -234,9 +234,7 @@ func scanImages(scanType cautils.ScanTypes, scanData *cautils.OPASessionObj, ctx
|
||||
return
|
||||
}
|
||||
for _, container := range containers {
|
||||
if !slices.Contains(imagesToScan, container.Image) {
|
||||
imagesToScan = append(imagesToScan, container.Image)
|
||||
}
|
||||
imagesToScan.Add(container.Image)
|
||||
}
|
||||
} else {
|
||||
for _, workload := range scanData.AllResources {
|
||||
@@ -246,9 +244,7 @@ func scanImages(scanType cautils.ScanTypes, scanData *cautils.OPASessionObj, ctx
|
||||
continue
|
||||
}
|
||||
for _, container := range containers {
|
||||
if !slices.Contains(imagesToScan, container.Image) {
|
||||
imagesToScan = append(imagesToScan, container.Image)
|
||||
}
|
||||
imagesToScan.Add(container.Image)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -261,7 +257,7 @@ func scanImages(scanType cautils.ScanTypes, scanData *cautils.OPASessionObj, ctx
|
||||
}
|
||||
defer svc.Close()
|
||||
|
||||
for _, img := range imagesToScan {
|
||||
for img := range imagesToScan.Iter() {
|
||||
logger.L().Start("Scanning", helpers.String("image", img))
|
||||
if err := scanSingleImage(ctx, img, svc, resultsHandling); err != nil {
|
||||
logger.L().StopError("failed to scan", helpers.String("image", img), helpers.Error(err))
|
||||
|
||||
@@ -46,8 +46,6 @@ var hash = []rune("abcdef0123456789")
|
||||
var nums = []rune("0123456789")
|
||||
|
||||
func randSeq(n int, bank []rune) string {
|
||||
rand.Seed(time.Now().UnixNano())
|
||||
|
||||
b := make([]rune, n)
|
||||
for i := range b {
|
||||
b[i] = bank[rand.Intn(len(bank))] //nolint:gosec
|
||||
|
||||
@@ -88,6 +88,6 @@ type PkgFiles []PackageFile
|
||||
|
||||
func (v *ScanResultReport) AsFNVHash() string {
|
||||
hasher := fnv.New64a()
|
||||
hasher.Write([]byte(fmt.Sprintf("%v", *v)))
|
||||
fmt.Fprintf(hasher, "%v", *v)
|
||||
return fmt.Sprintf("%v", hasher.Sum64())
|
||||
}
|
||||
|
||||
@@ -209,7 +209,7 @@ func (h *FixHandler) ApplyChanges(ctx context.Context, resourcesToFix []Resource
|
||||
fixedYamlString, err := ApplyFixToContent(ctx, fileAsString, yamlExpression)
|
||||
|
||||
if err != nil {
|
||||
errors = append(errors, fmt.Errorf("Failed to fix file %s: %w ", filepath, err))
|
||||
errors = append(errors, fmt.Errorf("failed to fix file %s: %w ", filepath, err))
|
||||
continue
|
||||
} else {
|
||||
updatedFiles[filepath] = true
|
||||
@@ -344,7 +344,7 @@ func GetFileString(filepath string) (string, error) {
|
||||
bytes, err := os.ReadFile(filepath)
|
||||
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("Error reading file %s", filepath)
|
||||
return "", fmt.Errorf("error reading file %s", filepath)
|
||||
}
|
||||
|
||||
return string(bytes), nil
|
||||
@@ -354,7 +354,7 @@ func writeFixesToFile(filepath, content string) error {
|
||||
err := os.WriteFile(filepath, []byte(content), 0644) //nolint:gosec
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error writing fixes to file: %w", err)
|
||||
return fmt.Errorf("error writing fixes to file: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
@@ -26,7 +26,7 @@ func decodeDocumentRoots(yamlAsString string) ([]yaml.Node, error) {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Cannot Decode File as YAML")
|
||||
return nil, fmt.Errorf("cannot decode file as YAML")
|
||||
|
||||
}
|
||||
|
||||
@@ -55,7 +55,7 @@ func getFixedNodes(ctx context.Context, yamlAsString, yamlExpression string) ([]
|
||||
fixedCandidateNodes, err := allAtOnceEvaluator.EvaluateCandidateNodes(yamlExpression, allDocuments)
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Error fixing YAML, %w", err)
|
||||
return nil, fmt.Errorf("error fixing YAML, %w", err)
|
||||
}
|
||||
|
||||
fixedNodes := make([]yaml.Node, 0)
|
||||
|
||||
@@ -86,7 +86,7 @@ func adjustFixedListLines(originalList, fixedList *[]nodeInfo) {
|
||||
func enocodeIntoYaml(parentNode *yaml.Node, nodeList *[]nodeInfo, tracker int) (string, error) {
|
||||
|
||||
if tracker < 0 || tracker >= len(*nodeList) {
|
||||
return "", fmt.Errorf("Index out of range for nodeList: tracker=%d, length=%d", tracker, len(*nodeList))
|
||||
return "", fmt.Errorf("index out of range for nodeList: tracker=%d, length=%d", tracker, len(*nodeList))
|
||||
}
|
||||
|
||||
content := make([]*yaml.Node, 0)
|
||||
@@ -112,11 +112,11 @@ func enocodeIntoYaml(parentNode *yaml.Node, nodeList *[]nodeInfo, tracker int) (
|
||||
|
||||
errorEncoding := encoder.Encode(parentForContent)
|
||||
if errorEncoding != nil {
|
||||
return "", fmt.Errorf("Error debugging node, %v", errorEncoding.Error())
|
||||
return "", fmt.Errorf("error debugging node, %v", errorEncoding.Error())
|
||||
}
|
||||
errorClosingEncoder := encoder.Close()
|
||||
if errorClosingEncoder != nil {
|
||||
return "", fmt.Errorf("Error closing encoder: %v", errorClosingEncoder.Error())
|
||||
return "", fmt.Errorf("error closing encoder: %v", errorClosingEncoder.Error())
|
||||
}
|
||||
return fmt.Sprintf(`%v`, buf.String()), nil
|
||||
}
|
||||
@@ -216,7 +216,7 @@ func getLastLineOfResource(linesSlice *[]string, currentLine int) (int, error) {
|
||||
}
|
||||
}
|
||||
|
||||
return 0, fmt.Errorf("Provided line is greater than the length of YAML file")
|
||||
return 0, fmt.Errorf("provided line is greater than the length of YAML file")
|
||||
}
|
||||
|
||||
func getNodeLine(nodeList *[]nodeInfo, tracker int) int {
|
||||
@@ -300,7 +300,7 @@ func isEmptyLineOrComment(lineContent string) bool {
|
||||
func readDocuments(ctx context.Context, reader io.Reader, decoder yqlib.Decoder) (*list.List, error) {
|
||||
err := decoder.Init(reader)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Error Initializing the decoder, %w", err)
|
||||
return nil, fmt.Errorf("error initializing the decoder, %w", err)
|
||||
}
|
||||
inputList := list.New()
|
||||
|
||||
@@ -316,7 +316,7 @@ func readDocuments(ctx context.Context, reader io.Reader, decoder yqlib.Decoder)
|
||||
}
|
||||
return inputList, nil
|
||||
} else if errorReading != nil {
|
||||
return nil, fmt.Errorf("Error Decoding YAML file, %w", errorReading)
|
||||
return nil, fmt.Errorf("error decoding yaml file, %w", errorReading)
|
||||
}
|
||||
|
||||
candidateNode.Document = currentIndex
|
||||
|
||||
@@ -434,9 +434,9 @@ func TestRemoveOutOfRangeLines(t *testing.T) {
|
||||
func TestShouldCalculateTotalNumberOfChildrenAndAddToCurrentTracker(t *testing.T) {
|
||||
node := &yaml.Node{
|
||||
Content: []*yaml.Node{
|
||||
&yaml.Node{},
|
||||
&yaml.Node{},
|
||||
&yaml.Node{},
|
||||
{},
|
||||
{},
|
||||
{},
|
||||
},
|
||||
}
|
||||
currentTracker := 5
|
||||
|
||||
@@ -1,74 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
labels:
|
||||
app: kubescape-host-scanner
|
||||
k8s-app: kubescape-host-scanner
|
||||
kubernetes.io/metadata.name: kubescape-host-scanner
|
||||
tier: kubescape-host-scanner-control-plane
|
||||
name: kubescape
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: host-scanner
|
||||
namespace: kubescape
|
||||
labels:
|
||||
app: host-scanner
|
||||
k8s-app: kubescape-host-scanner
|
||||
otel: enabled
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
name: host-scanner
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
name: host-scanner
|
||||
spec:
|
||||
tolerations:
|
||||
# this toleration is to have the DaemonDet runnable on all nodes (including masters)
|
||||
# remove it if your masters can't run pods
|
||||
- operator: Exists
|
||||
containers:
|
||||
- name: host-sensor
|
||||
image: quay.io/kubescape/host-scanner:v1.0.61
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: true
|
||||
privileged: true
|
||||
readOnlyRootFilesystem: true
|
||||
ports:
|
||||
- name: scanner # Do not change port name
|
||||
containerPort: 7888
|
||||
protocol: TCP
|
||||
resources:
|
||||
limits:
|
||||
cpu: 0.1m
|
||||
memory: 200Mi
|
||||
requests:
|
||||
cpu: 1m
|
||||
memory: 200Mi
|
||||
volumeMounts:
|
||||
- mountPath: /host_fs
|
||||
name: host-filesystem
|
||||
startupProbe:
|
||||
httpGet:
|
||||
path: /readyz
|
||||
port: 7888
|
||||
failureThreshold: 30
|
||||
periodSeconds: 1
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: 7888
|
||||
periodSeconds: 10
|
||||
terminationGracePeriodSeconds: 120
|
||||
dnsPolicy: ClusterFirstWithHostNet
|
||||
automountServiceAccountToken: false
|
||||
volumes:
|
||||
- hostPath:
|
||||
path: /
|
||||
type: Directory
|
||||
name: host-filesystem
|
||||
hostPID: true
|
||||
hostIPC: true
|
||||
@@ -18,6 +18,5 @@ func TestHostSensorHandlerMock(t *testing.T) {
|
||||
require.Nil(t, status)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Empty(t, h.GetNamespace())
|
||||
require.NoError(t, h.TearDown())
|
||||
}
|
||||
|
||||
235
core/pkg/hostsensorutils/hostsensorcollectcrds.go
Normal file
235
core/pkg/hostsensorutils/hostsensorcollectcrds.go
Normal file
@@ -0,0 +1,235 @@
|
||||
package hostsensorutils
|
||||
|
||||
import (
|
||||
"context"
|
||||
stdjson "encoding/json"
|
||||
"fmt"
|
||||
"reflect"
|
||||
|
||||
"github.com/kubescape/go-logger"
|
||||
"github.com/kubescape/go-logger/helpers"
|
||||
k8shostsensor "github.com/kubescape/k8s-interface/hostsensor"
|
||||
"github.com/kubescape/k8s-interface/k8sinterface"
|
||||
"github.com/kubescape/opa-utils/objectsenvelopes/hostsensor"
|
||||
"github.com/kubescape/opa-utils/reporthandling/apis"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
)
|
||||
|
||||
// getCRDResources retrieves resources from CRDs and converts them to HostSensorDataEnvelope format
|
||||
func (hsh *HostSensorHandler) getCRDResources(ctx context.Context, resourceType k8shostsensor.HostSensorResource) ([]hostsensor.HostSensorDataEnvelope, error) {
|
||||
pluralName := k8shostsensor.MapResourceToPlural(resourceType)
|
||||
if pluralName == "" {
|
||||
return nil, fmt.Errorf("unsupported resource type: %s", resourceType)
|
||||
}
|
||||
|
||||
// List CRD resources
|
||||
items, err := hsh.listCRDResources(ctx, pluralName, resourceType.String())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Convert to HostSensorDataEnvelope format
|
||||
result := make([]hostsensor.HostSensorDataEnvelope, 0, len(items))
|
||||
for _, item := range items {
|
||||
envelope, err := hsh.convertCRDToEnvelope(item, resourceType)
|
||||
if err != nil {
|
||||
logger.L().Warning("Failed to convert CRD to envelope",
|
||||
helpers.String("kind", resourceType.String()),
|
||||
helpers.String("name", item.GetName()),
|
||||
helpers.Error(err))
|
||||
continue
|
||||
}
|
||||
result = append(result, envelope)
|
||||
}
|
||||
|
||||
logger.L().Debug("Retrieved resources from CRDs",
|
||||
helpers.String("kind", resourceType.String()),
|
||||
helpers.Int("count", len(result)))
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// convertCRDToEnvelope converts a CRD unstructured object to HostSensorDataEnvelope
|
||||
func (hsh *HostSensorHandler) convertCRDToEnvelope(item unstructured.Unstructured, resourceType k8shostsensor.HostSensorResource) (hostsensor.HostSensorDataEnvelope, error) {
|
||||
envelope := hostsensor.HostSensorDataEnvelope{}
|
||||
|
||||
// Set API version and kind
|
||||
envelope.SetApiVersion(k8sinterface.JoinGroupVersion(hostsensor.GroupHostSensor, hostsensor.Version))
|
||||
envelope.SetKind(resourceType.String())
|
||||
|
||||
// Set name (node name)
|
||||
nodeName := item.GetName()
|
||||
envelope.SetName(nodeName)
|
||||
|
||||
// Extract content from spec.content
|
||||
content, found, err := unstructured.NestedString(item.Object, "spec", "content")
|
||||
if err != nil {
|
||||
return envelope, fmt.Errorf("failed to extract spec.content: %w", err)
|
||||
}
|
||||
if !found {
|
||||
// fallback to "spec" itself
|
||||
contentI, found, err := unstructured.NestedFieldNoCopy(item.Object, "spec")
|
||||
if err != nil {
|
||||
return envelope, fmt.Errorf("failed to extract spec: %w", err)
|
||||
}
|
||||
if !found {
|
||||
return envelope, fmt.Errorf("spec not found in CRD")
|
||||
}
|
||||
contentBytes, err := stdjson.Marshal(contentI)
|
||||
if err != nil {
|
||||
return envelope, fmt.Errorf("failed to marshal spec: %w", err)
|
||||
}
|
||||
content = string(contentBytes)
|
||||
}
|
||||
|
||||
// Set data as raw bytes
|
||||
envelope.SetData([]byte(content))
|
||||
|
||||
return envelope, nil
|
||||
}
|
||||
|
||||
// getOsReleaseFile returns the list of osRelease metadata from CRDs.
|
||||
func (hsh *HostSensorHandler) getOsReleaseFile(ctx context.Context) ([]hostsensor.HostSensorDataEnvelope, error) {
|
||||
return hsh.getCRDResources(ctx, k8shostsensor.OsReleaseFile)
|
||||
}
|
||||
|
||||
// getKernelVersion returns the list of kernelVersion metadata from CRDs.
|
||||
func (hsh *HostSensorHandler) getKernelVersion(ctx context.Context) ([]hostsensor.HostSensorDataEnvelope, error) {
|
||||
return hsh.getCRDResources(ctx, k8shostsensor.KernelVersion)
|
||||
}
|
||||
|
||||
// getLinuxSecurityHardeningStatus returns the list of LinuxSecurityHardeningStatus metadata from CRDs.
|
||||
func (hsh *HostSensorHandler) getLinuxSecurityHardeningStatus(ctx context.Context) ([]hostsensor.HostSensorDataEnvelope, error) {
|
||||
return hsh.getCRDResources(ctx, k8shostsensor.LinuxSecurityHardeningStatus)
|
||||
}
|
||||
|
||||
// getOpenPortsList returns the list of open ports from CRDs.
|
||||
func (hsh *HostSensorHandler) getOpenPortsList(ctx context.Context) ([]hostsensor.HostSensorDataEnvelope, error) {
|
||||
return hsh.getCRDResources(ctx, k8shostsensor.OpenPortsList)
|
||||
}
|
||||
|
||||
// getKernelVariables returns the list of Linux Kernel variables from CRDs.
|
||||
func (hsh *HostSensorHandler) getKernelVariables(ctx context.Context) ([]hostsensor.HostSensorDataEnvelope, error) {
|
||||
return hsh.getCRDResources(ctx, k8shostsensor.LinuxKernelVariables)
|
||||
}
|
||||
|
||||
// getKubeletInfo returns the list of kubelet metadata from CRDs.
|
||||
func (hsh *HostSensorHandler) getKubeletInfo(ctx context.Context) ([]hostsensor.HostSensorDataEnvelope, error) {
|
||||
return hsh.getCRDResources(ctx, k8shostsensor.KubeletInfo)
|
||||
}
|
||||
|
||||
// getKubeProxyInfo returns the list of kubeProxy metadata from CRDs.
|
||||
func (hsh *HostSensorHandler) getKubeProxyInfo(ctx context.Context) ([]hostsensor.HostSensorDataEnvelope, error) {
|
||||
return hsh.getCRDResources(ctx, k8shostsensor.KubeProxyInfo)
|
||||
}
|
||||
|
||||
// getControlPlaneInfo returns the list of controlPlaneInfo metadata from CRDs.
|
||||
func (hsh *HostSensorHandler) getControlPlaneInfo(ctx context.Context) ([]hostsensor.HostSensorDataEnvelope, error) {
|
||||
return hsh.getCRDResources(ctx, k8shostsensor.ControlPlaneInfo)
|
||||
}
|
||||
|
||||
// getCloudProviderInfo returns the list of cloudProviderInfo metadata from CRDs.
|
||||
func (hsh *HostSensorHandler) getCloudProviderInfo(ctx context.Context) ([]hostsensor.HostSensorDataEnvelope, error) {
|
||||
return hsh.getCRDResources(ctx, k8shostsensor.CloudProviderInfo)
|
||||
}
|
||||
|
||||
// getCNIInfo returns the list of CNI metadata from CRDs.
|
||||
func (hsh *HostSensorHandler) getCNIInfo(ctx context.Context) ([]hostsensor.HostSensorDataEnvelope, error) {
|
||||
return hsh.getCRDResources(ctx, k8shostsensor.CNIInfo)
|
||||
}
|
||||
|
||||
// hasCloudProviderInfo iterates over the []hostsensor.HostSensorDataEnvelope list to find info about the cloud provider.
|
||||
//
|
||||
// If information are found, then return true. Return false otherwise.
|
||||
func hasCloudProviderInfo(cpi []hostsensor.HostSensorDataEnvelope) bool {
|
||||
for index := range cpi {
|
||||
if !reflect.DeepEqual(cpi[index].GetData(), stdjson.RawMessage("{}\\n")) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// CollectResources collects all required information from CRDs.
|
||||
func (hsh *HostSensorHandler) CollectResources(ctx context.Context) ([]hostsensor.HostSensorDataEnvelope, map[string]apis.StatusInfo, error) {
|
||||
res := make([]hostsensor.HostSensorDataEnvelope, 0)
|
||||
infoMap := make(map[string]apis.StatusInfo)
|
||||
|
||||
logger.L().Debug("Collecting host sensor data from CRDs")
|
||||
|
||||
var hasCloudProvider bool
|
||||
for _, toPin := range []struct {
|
||||
Resource k8shostsensor.HostSensorResource
|
||||
Query func(context.Context) ([]hostsensor.HostSensorDataEnvelope, error)
|
||||
}{
|
||||
// queries to CRDs
|
||||
{
|
||||
Resource: k8shostsensor.OsReleaseFile,
|
||||
Query: hsh.getOsReleaseFile,
|
||||
},
|
||||
{
|
||||
Resource: k8shostsensor.KernelVersion,
|
||||
Query: hsh.getKernelVersion,
|
||||
},
|
||||
{
|
||||
Resource: k8shostsensor.LinuxSecurityHardeningStatus,
|
||||
Query: hsh.getLinuxSecurityHardeningStatus,
|
||||
},
|
||||
{
|
||||
Resource: k8shostsensor.OpenPortsList,
|
||||
Query: hsh.getOpenPortsList,
|
||||
},
|
||||
{
|
||||
Resource: k8shostsensor.LinuxKernelVariables,
|
||||
Query: hsh.getKernelVariables,
|
||||
},
|
||||
{
|
||||
Resource: k8shostsensor.KubeletInfo,
|
||||
Query: hsh.getKubeletInfo,
|
||||
},
|
||||
{
|
||||
Resource: k8shostsensor.KubeProxyInfo,
|
||||
Query: hsh.getKubeProxyInfo,
|
||||
},
|
||||
{
|
||||
Resource: k8shostsensor.CloudProviderInfo,
|
||||
Query: hsh.getCloudProviderInfo,
|
||||
},
|
||||
{
|
||||
Resource: k8shostsensor.CNIInfo,
|
||||
Query: hsh.getCNIInfo,
|
||||
},
|
||||
{
|
||||
// ControlPlaneInfo is queried _after_ CloudProviderInfo.
|
||||
Resource: k8shostsensor.ControlPlaneInfo,
|
||||
Query: hsh.getControlPlaneInfo,
|
||||
},
|
||||
} {
|
||||
k8sInfo := toPin
|
||||
|
||||
if k8sInfo.Resource == k8shostsensor.ControlPlaneInfo && hasCloudProvider {
|
||||
// we retrieve control plane info only if we are not using a cloud provider
|
||||
continue
|
||||
}
|
||||
|
||||
kcData, err := k8sInfo.Query(ctx)
|
||||
if err != nil {
|
||||
addInfoToMap(k8sInfo.Resource, infoMap, err)
|
||||
logger.L().Ctx(ctx).Warning("Failed to get resource from CRD",
|
||||
helpers.String("resource", k8sInfo.Resource.String()),
|
||||
helpers.Error(err))
|
||||
}
|
||||
|
||||
if k8sInfo.Resource == k8shostsensor.CloudProviderInfo {
|
||||
hasCloudProvider = hasCloudProviderInfo(kcData)
|
||||
}
|
||||
|
||||
if len(kcData) > 0 {
|
||||
res = append(res, kcData...)
|
||||
}
|
||||
}
|
||||
|
||||
logger.L().Debug("Done collecting information from CRDs", helpers.Int("totalResources", len(res)))
|
||||
return res, infoMap, nil
|
||||
}
|
||||
122
core/pkg/hostsensorutils/hostsensorcrdshandler.go
Normal file
122
core/pkg/hostsensorutils/hostsensorcrdshandler.go
Normal file
@@ -0,0 +1,122 @@
|
||||
package hostsensorutils
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/kubescape/go-logger"
|
||||
"github.com/kubescape/go-logger/helpers"
|
||||
"github.com/kubescape/k8s-interface/k8sinterface"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/client-go/dynamic"
|
||||
)
|
||||
|
||||
const (
|
||||
// Host data CRD API group and version
|
||||
hostDataGroup = "hostdata.kubescape.cloud"
|
||||
hostDataVersion = "v1beta1"
|
||||
)
|
||||
|
||||
// HostSensorHandler is a client that reads host sensor data from Kubernetes CRDs.
|
||||
//
|
||||
// The CRDs are created by the node-agent daemonset running on each node.
|
||||
type HostSensorHandler struct {
|
||||
k8sObj *k8sinterface.KubernetesApi
|
||||
dynamicClient dynamic.Interface
|
||||
}
|
||||
|
||||
// NewHostSensorHandler builds a new CRD-based host sensor handler.
|
||||
func NewHostSensorHandler(k8sObj *k8sinterface.KubernetesApi, _ string) (*HostSensorHandler, error) {
|
||||
if k8sObj == nil {
|
||||
return nil, fmt.Errorf("nil k8s interface received")
|
||||
}
|
||||
config := k8sinterface.GetK8sConfig()
|
||||
if config == nil {
|
||||
return nil, fmt.Errorf("failed to get k8s config")
|
||||
}
|
||||
// force GRPC
|
||||
config.AcceptContentTypes = "application/vnd.kubernetes.protobuf"
|
||||
config.ContentType = "application/vnd.kubernetes.protobuf"
|
||||
|
||||
// Create dynamic client for CRD access
|
||||
dynamicClient, err := dynamic.NewForConfig(config)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create dynamic client: %w", err)
|
||||
}
|
||||
|
||||
hsh := &HostSensorHandler{
|
||||
k8sObj: k8sObj,
|
||||
dynamicClient: dynamicClient,
|
||||
}
|
||||
|
||||
// Verify we can access nodes (basic sanity check)
|
||||
if nodeList, err := k8sObj.KubernetesClient.CoreV1().Nodes().List(k8sObj.Context, metav1.ListOptions{}); err != nil || len(nodeList.Items) == 0 {
|
||||
if err == nil {
|
||||
err = fmt.Errorf("no nodes to scan")
|
||||
}
|
||||
return hsh, fmt.Errorf("in NewHostSensorHandler, failed to get nodes list: %v", err)
|
||||
}
|
||||
|
||||
return hsh, nil
|
||||
}
|
||||
|
||||
// Init is a no-op for CRD-based implementation.
|
||||
// The node-agent daemonset is expected to be already deployed and creating CRDs.
|
||||
func (hsh *HostSensorHandler) Init(ctx context.Context) error {
|
||||
logger.L().Info("Using CRD-based host sensor data collection (no deployment needed)")
|
||||
|
||||
// Verify that at least one CRD type exists
|
||||
gvr := schema.GroupVersionResource{
|
||||
Group: hostDataGroup,
|
||||
Version: hostDataVersion,
|
||||
Resource: "osreleasefiles",
|
||||
}
|
||||
|
||||
list, err := hsh.dynamicClient.Resource(gvr).List(ctx, metav1.ListOptions{Limit: 1})
|
||||
if err != nil {
|
||||
logger.L().Warning("node-agent status: Failed to list OsReleaseFile CRDs - node-agent may not be deployed",
|
||||
helpers.Error(err))
|
||||
return fmt.Errorf("failed to verify CRD access: %w (ensure node-agent is deployed)", err)
|
||||
}
|
||||
|
||||
if len(list.Items) == 0 {
|
||||
logger.L().Warning("node-agent status: No OsReleaseFile CRDs found - node-agent may not be running or sensing yet")
|
||||
} else {
|
||||
logger.L().Info("node-agent status: Successfully verified CRD access", helpers.Int("osReleaseFiles", len(list.Items)))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// TearDown is a no-op for CRD-based implementation.
|
||||
// CRDs are managed by the node-agent daemonset lifecycle.
|
||||
func (hsh *HostSensorHandler) TearDown() error {
|
||||
logger.L().Debug("CRD-based host sensor teardown (no-op)")
|
||||
return nil
|
||||
}
|
||||
|
||||
// listCRDResources is a generic function to list CRD resources and convert them to the expected format.
|
||||
func (hsh *HostSensorHandler) listCRDResources(ctx context.Context, resourceName, kind string) ([]unstructured.Unstructured, error) {
|
||||
gvr := schema.GroupVersionResource{
|
||||
Group: hostDataGroup,
|
||||
Version: hostDataVersion,
|
||||
Resource: resourceName,
|
||||
}
|
||||
|
||||
logger.L().Debug("Listing CRD resources",
|
||||
helpers.String("resource", resourceName),
|
||||
helpers.String("kind", kind))
|
||||
|
||||
list, err := hsh.dynamicClient.Resource(gvr).List(ctx, metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to list %s CRDs: %w", kind, err)
|
||||
}
|
||||
|
||||
logger.L().Debug("Retrieved CRD resources",
|
||||
helpers.String("kind", kind),
|
||||
helpers.Int("count", len(list.Items)))
|
||||
|
||||
return list.Items, nil
|
||||
}
|
||||
@@ -1,457 +0,0 @@
|
||||
package hostsensorutils
|
||||
|
||||
import (
|
||||
"context"
|
||||
_ "embed"
|
||||
"fmt"
|
||||
"os"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/kubescape/go-logger"
|
||||
"github.com/kubescape/go-logger/helpers"
|
||||
"github.com/kubescape/k8s-interface/k8sinterface"
|
||||
"github.com/kubescape/k8s-interface/workloadinterface"
|
||||
"github.com/kubescape/kubescape/v3/core/cautils"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
)
|
||||
|
||||
var (
|
||||
//go:embed hostsensor.yaml
|
||||
hostSensorYAML string
|
||||
namespaceWasPresent bool
|
||||
)
|
||||
|
||||
const portName string = "scanner"
|
||||
|
||||
// HostSensorHandler is a client that interacts with a host-scanner component deployed on nodes.
|
||||
//
|
||||
// The API exposed by the host sensor is defined here: https://github.com/kubescape/host-scanner
|
||||
type HostSensorHandler struct {
|
||||
hostSensorPort int32
|
||||
hostSensorPodNames map[string]string //map from pod names to node names
|
||||
hostSensorUnscheduledPodNames map[string]string //map from pod names to node names
|
||||
k8sObj *k8sinterface.KubernetesApi
|
||||
daemonSet *appsv1.DaemonSet
|
||||
podListLock sync.RWMutex
|
||||
gracePeriod int64
|
||||
workerPool workerPool
|
||||
}
|
||||
|
||||
// NewHostSensorHandler builds a new http client to the host-scanner API.
|
||||
func NewHostSensorHandler(k8sObj *k8sinterface.KubernetesApi, hostSensorYAMLFile string) (*HostSensorHandler, error) {
|
||||
if k8sObj == nil {
|
||||
return nil, fmt.Errorf("nil k8s interface received")
|
||||
}
|
||||
|
||||
if hostSensorYAMLFile != "" {
|
||||
d, err := loadHostSensorFromFile(hostSensorYAMLFile)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to load host-scanner yaml file, reason: %w", err)
|
||||
}
|
||||
hostSensorYAML = d
|
||||
}
|
||||
|
||||
hsh := &HostSensorHandler{
|
||||
k8sObj: k8sObj,
|
||||
hostSensorPodNames: map[string]string{},
|
||||
hostSensorUnscheduledPodNames: map[string]string{},
|
||||
gracePeriod: int64(15),
|
||||
workerPool: newWorkerPool(),
|
||||
}
|
||||
|
||||
// Don't deploy on a cluster with no nodes. Some cloud providers prevent the termination of K8s objects for cluster with no nodes!!!
|
||||
if nodeList, err := k8sObj.KubernetesClient.CoreV1().Nodes().List(k8sObj.Context, metav1.ListOptions{}); err != nil || len(nodeList.Items) == 0 {
|
||||
if err == nil {
|
||||
err = fmt.Errorf("no nodes to scan")
|
||||
}
|
||||
return hsh, fmt.Errorf("in NewHostSensorHandler, failed to get nodes list: %v", err)
|
||||
}
|
||||
|
||||
return hsh, nil
|
||||
}
|
||||
|
||||
// Init deploys the host-scanner and start watching the pods on the host.
|
||||
func (hsh *HostSensorHandler) Init(ctx context.Context) error {
|
||||
// deploy the YAML
|
||||
// store namespace + port
|
||||
// store pod names
|
||||
// make sure all pods are running, after X seconds treat has running anyway, and log an error on the pods not running yet
|
||||
logger.L().Info("Installing host scanner")
|
||||
|
||||
// log is used to avoid log duplication
|
||||
// coming from the different host-scanner instances
|
||||
log := NewLogCoupling()
|
||||
|
||||
cautils.StartSpinner()
|
||||
defer cautils.StopSpinner()
|
||||
|
||||
if err := hsh.applyYAML(ctx); err != nil {
|
||||
return fmt.Errorf("failed to apply host scanner YAML, reason: %v", err)
|
||||
}
|
||||
|
||||
hsh.populatePodNamesToNodeNames(ctx, log)
|
||||
if err := hsh.checkPodForEachNode(); err != nil {
|
||||
return fmt.Errorf("%s: %v", failedToValidateHostSensorPodStatus, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// checkNamespaceWasPresent check if the given namespace was already present on kubernetes and in "Active" state.
|
||||
// Return true in case it find the namespace on the list, false otherwise.
|
||||
// In case we have some error with the kubernetes APIs, it returns an error.
|
||||
func (hsh *HostSensorHandler) checkNamespaceWasPresent(namespace string) bool {
|
||||
ns, err := hsh.k8sObj.KubernetesClient.
|
||||
CoreV1().
|
||||
Namespaces().
|
||||
Get(hsh.k8sObj.Context, namespace, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
// check also if it is in "Active" state.
|
||||
if ns.Status.Phase != corev1.NamespaceActive {
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// namespaceWasPresent return the namespaceWasPresent variable value.
|
||||
func (hsh *HostSensorHandler) namespaceWasPresent() bool {
|
||||
return namespaceWasPresent
|
||||
}
|
||||
|
||||
func (hsh *HostSensorHandler) applyYAML(ctx context.Context) error {
|
||||
workloads, err := cautils.ReadFile([]byte(hostSensorYAML), cautils.YAML_FILE_FORMAT)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to read YAML files, reason: %v", err)
|
||||
}
|
||||
|
||||
// Get namespace name
|
||||
namespaceName := cautils.GetConfigMapNamespace()
|
||||
for i := range workloads {
|
||||
if workloads[i].GetKind() == "Namespace" {
|
||||
namespaceName = workloads[i].GetName()
|
||||
break
|
||||
}
|
||||
}
|
||||
// check if namespace was already present on kubernetes
|
||||
namespaceWasPresent = hsh.checkNamespaceWasPresent(namespaceName)
|
||||
|
||||
// Update workload data before applying
|
||||
for i := range workloads {
|
||||
w := workloadinterface.NewWorkloadObj(workloads[i].GetObject())
|
||||
if w == nil {
|
||||
return fmt.Errorf("invalid workload: %v", workloads[i].GetObject())
|
||||
}
|
||||
// set namespace in all objects
|
||||
if w.GetKind() != "Namespace" {
|
||||
logger.L().Debug("Setting namespace", helpers.String("kind", w.GetKind()), helpers.String("name", w.GetName()), helpers.String("namespace", namespaceName))
|
||||
w.SetNamespace(namespaceName)
|
||||
}
|
||||
// Get container port
|
||||
if w.GetKind() == "DaemonSet" {
|
||||
containers, err := w.GetContainers()
|
||||
if err != nil {
|
||||
if erra := hsh.tearDownNamespace(namespaceName); erra != nil {
|
||||
logger.L().Ctx(ctx).Warning(failedToTeardownNamespace, helpers.Error(erra))
|
||||
}
|
||||
return fmt.Errorf("container not found in DaemonSet: %v", err)
|
||||
}
|
||||
for j := range containers {
|
||||
for k := range containers[j].Ports {
|
||||
if containers[j].Ports[k].Name == portName {
|
||||
hsh.hostSensorPort = containers[j].Ports[k].ContainerPort
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Apply workload
|
||||
var newWorkload k8sinterface.IWorkload
|
||||
var e error
|
||||
|
||||
if g, err := hsh.k8sObj.GetWorkload(w.GetNamespace(), w.GetKind(), w.GetName()); err == nil && g != nil {
|
||||
newWorkload, e = hsh.k8sObj.UpdateWorkload(w)
|
||||
} else {
|
||||
newWorkload, e = hsh.k8sObj.CreateWorkload(w)
|
||||
}
|
||||
if e != nil {
|
||||
if erra := hsh.tearDownNamespace(namespaceName); erra != nil {
|
||||
logger.L().Ctx(ctx).Warning(failedToTeardownNamespace, helpers.Error(erra))
|
||||
}
|
||||
return fmt.Errorf("failed to create/update YAML, reason: %v", e)
|
||||
}
|
||||
|
||||
// Save DaemonSet
|
||||
if newWorkload.GetKind() == "DaemonSet" {
|
||||
b, err := json.Marshal(newWorkload.GetObject())
|
||||
if err != nil {
|
||||
if erra := hsh.tearDownNamespace(namespaceName); erra != nil {
|
||||
logger.L().Ctx(ctx).Warning(failedToTeardownNamespace, helpers.Error(erra))
|
||||
}
|
||||
return fmt.Errorf("failed to Marshal YAML of DaemonSet, reason: %v", err)
|
||||
}
|
||||
var ds appsv1.DaemonSet
|
||||
if err := json.Unmarshal(b, &ds); err != nil {
|
||||
if erra := hsh.tearDownNamespace(namespaceName); erra != nil {
|
||||
logger.L().Ctx(ctx).Warning(failedToTeardownNamespace, helpers.Error(erra))
|
||||
}
|
||||
return fmt.Errorf("failed to Unmarshal YAML of DaemonSet, reason: %v", err)
|
||||
}
|
||||
hsh.daemonSet = &ds
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (hsh *HostSensorHandler) checkPodForEachNode() error {
|
||||
deadline := time.Now().Add(time.Second * 100)
|
||||
for {
|
||||
nodesList, err := hsh.k8sObj.KubernetesClient.CoreV1().Nodes().List(hsh.k8sObj.Context, metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("in checkPodsForEveryNode, failed to get nodes list: %v", nodesList)
|
||||
}
|
||||
|
||||
hsh.podListLock.RLock()
|
||||
podsNum := len(hsh.hostSensorPodNames)
|
||||
unschedPodNum := len(hsh.hostSensorUnscheduledPodNames)
|
||||
hsh.podListLock.RUnlock()
|
||||
if len(nodesList.Items) <= podsNum+unschedPodNum {
|
||||
break
|
||||
}
|
||||
|
||||
if time.Now().After(deadline) {
|
||||
hsh.podListLock.RLock()
|
||||
podsMap := hsh.hostSensorPodNames
|
||||
hsh.podListLock.RUnlock()
|
||||
return fmt.Errorf("host-scanner pods number (%d) differ than nodes number (%d) after deadline exceeded. Kubescape will take data only from the pods below: %v",
|
||||
podsNum, len(nodesList.Items), podsMap)
|
||||
}
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// initiating routine to keep pod list updated
|
||||
func (hsh *HostSensorHandler) populatePodNamesToNodeNames(ctx context.Context, log *LogsMap) {
|
||||
go func() {
|
||||
var watchRes watch.Interface
|
||||
var err error
|
||||
watchRes, err = hsh.k8sObj.KubernetesClient.CoreV1().Pods(hsh.daemonSet.Namespace).Watch(hsh.k8sObj.Context, metav1.ListOptions{
|
||||
Watch: true,
|
||||
LabelSelector: fmt.Sprintf("name=%s", hsh.daemonSet.Spec.Template.Labels["name"]),
|
||||
})
|
||||
if err != nil {
|
||||
logger.L().Ctx(ctx).Warning(failedToWatchOverDaemonSetPods, helpers.Error(err))
|
||||
}
|
||||
if watchRes == nil {
|
||||
logger.L().Ctx(ctx).Error("failed to watch over DaemonSet pods, will not be able to get host-scanner data")
|
||||
return
|
||||
}
|
||||
|
||||
for eve := range watchRes.ResultChan() {
|
||||
pod, ok := eve.Object.(*corev1.Pod)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
go hsh.updatePodInListAtomic(ctx, eve.Type, pod, log)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
func (hsh *HostSensorHandler) updatePodInListAtomic(ctx context.Context, eventType watch.EventType, podObj *corev1.Pod, log *LogsMap) {
|
||||
hsh.podListLock.Lock()
|
||||
defer hsh.podListLock.Unlock()
|
||||
|
||||
switch eventType {
|
||||
case watch.Added, watch.Modified:
|
||||
if podObj.Status.Phase == corev1.PodRunning && len(podObj.Status.ContainerStatuses) > 0 &&
|
||||
podObj.Status.ContainerStatuses[0].Ready {
|
||||
hsh.hostSensorPodNames[podObj.ObjectMeta.Name] = podObj.Spec.NodeName
|
||||
delete(hsh.hostSensorUnscheduledPodNames, podObj.ObjectMeta.Name)
|
||||
} else {
|
||||
if podObj.Status.Phase == corev1.PodPending && len(podObj.Status.Conditions) > 0 &&
|
||||
podObj.Status.Conditions[0].Reason == corev1.PodReasonUnschedulable {
|
||||
nodeName := ""
|
||||
if podObj.Spec.Affinity != nil && podObj.Spec.Affinity.NodeAffinity != nil &&
|
||||
podObj.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution != nil &&
|
||||
len(podObj.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms) > 0 &&
|
||||
len(podObj.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms[0].MatchFields) > 0 &&
|
||||
len(podObj.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms[0].MatchFields[0].Values) > 0 {
|
||||
nodeName = podObj.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms[0].MatchFields[0].Values[0]
|
||||
}
|
||||
if !log.isDuplicated(oneHostSensorPodIsUnabledToSchedule) {
|
||||
logger.L().Ctx(ctx).Warning(oneHostSensorPodIsUnabledToSchedule,
|
||||
helpers.String("message", podObj.Status.Conditions[0].Message))
|
||||
log.update(oneHostSensorPodIsUnabledToSchedule)
|
||||
}
|
||||
if nodeName != "" {
|
||||
hsh.hostSensorUnscheduledPodNames[podObj.ObjectMeta.Name] = nodeName
|
||||
}
|
||||
} else {
|
||||
delete(hsh.hostSensorPodNames, podObj.ObjectMeta.Name)
|
||||
}
|
||||
}
|
||||
default:
|
||||
delete(hsh.hostSensorPodNames, podObj.ObjectMeta.Name)
|
||||
}
|
||||
}
|
||||
|
||||
// tearDownHostScanner manage the host-scanner deletion.
|
||||
func (hsh *HostSensorHandler) tearDownHostScanner(namespace string) error {
|
||||
client := hsh.k8sObj.KubernetesClient
|
||||
|
||||
// delete host-scanner DaemonSet
|
||||
err := client.AppsV1().
|
||||
DaemonSets(namespace).
|
||||
Delete(
|
||||
hsh.k8sObj.Context,
|
||||
hsh.daemonSet.Name,
|
||||
metav1.DeleteOptions{
|
||||
GracePeriodSeconds: &hsh.gracePeriod,
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to delete host-scanner DaemonSet: %v", err)
|
||||
}
|
||||
|
||||
// wait for DaemonSet to be deleted
|
||||
err = hsh.waitHostScannerDeleted(hsh.k8sObj.Context)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to delete host-scanner DaemonSet: %v", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// tearDownNamespace manage the given namespace deletion.
|
||||
// At first, it checks if the namespace was already present before installing host-scanner.
|
||||
// In that case skips the deletion.
|
||||
// If was not, then patches the namespace in order to remove the finalizers,
|
||||
// and finally delete the it.
|
||||
func (hsh *HostSensorHandler) tearDownNamespace(namespace string) error {
|
||||
// if namespace was already present on kubernetes (before installing host-scanner),
|
||||
// then we shouldn't delete it.
|
||||
if hsh.namespaceWasPresent() {
|
||||
return nil
|
||||
}
|
||||
// to make it more readable we store the object client in a variable
|
||||
client := hsh.k8sObj.KubernetesClient
|
||||
|
||||
// prepare patch json to remove finalizers from namespace
|
||||
patchData := `
|
||||
[
|
||||
{
|
||||
"op": "replace",
|
||||
"path": "/metadata/finalizers",
|
||||
"value": []
|
||||
}
|
||||
]
|
||||
`
|
||||
// patch namespace object removing finalizers
|
||||
_, err := client.CoreV1().
|
||||
Namespaces().
|
||||
Patch(
|
||||
hsh.k8sObj.Context,
|
||||
namespace,
|
||||
types.JSONPatchType,
|
||||
[]byte(patchData),
|
||||
metav1.PatchOptions{},
|
||||
)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to remove finalizers from Namespace: %v", err)
|
||||
}
|
||||
|
||||
// patch namespace object removing finalizers
|
||||
// delete namespace object
|
||||
err = client.CoreV1().
|
||||
Namespaces().
|
||||
Delete(
|
||||
hsh.k8sObj.Context,
|
||||
namespace,
|
||||
metav1.DeleteOptions{
|
||||
GracePeriodSeconds: &hsh.gracePeriod,
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to delete %s Namespace: %v", namespace, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (hsh *HostSensorHandler) TearDown() error {
|
||||
namespace := hsh.GetNamespace()
|
||||
// delete DaemonSet
|
||||
if err := hsh.tearDownHostScanner(namespace); err != nil {
|
||||
return fmt.Errorf("failed to delete host-scanner DaemonSet: %v", err)
|
||||
}
|
||||
// delete Namespace
|
||||
if err := hsh.tearDownNamespace(namespace); err != nil {
|
||||
return fmt.Errorf("failed to delete host-scanner Namespace: %v", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (hsh *HostSensorHandler) GetNamespace() string {
|
||||
if hsh.daemonSet == nil {
|
||||
return ""
|
||||
}
|
||||
return hsh.daemonSet.Namespace
|
||||
}
|
||||
|
||||
func loadHostSensorFromFile(hostSensorYAMLFile string) (string, error) {
|
||||
dat, err := os.ReadFile(hostSensorYAMLFile)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if len(dat) == 0 {
|
||||
return "", fmt.Errorf("empty file")
|
||||
}
|
||||
|
||||
if !cautils.IsYaml(hostSensorYAMLFile) {
|
||||
return "", fmt.Errorf("invalid file format")
|
||||
}
|
||||
|
||||
return string(dat), err
|
||||
}
|
||||
|
||||
// waitHostScannerDeleted watch for host-scanner deletion.
|
||||
// In case it fails it returns an error.
|
||||
func (hsh *HostSensorHandler) waitHostScannerDeleted(ctx context.Context) error {
|
||||
labelSelector := fmt.Sprintf("name=%s", hsh.daemonSet.Name)
|
||||
opts := metav1.ListOptions{
|
||||
TypeMeta: metav1.TypeMeta{},
|
||||
LabelSelector: labelSelector,
|
||||
FieldSelector: "",
|
||||
}
|
||||
watcher, err := hsh.k8sObj.KubernetesClient.CoreV1().
|
||||
Pods(hsh.daemonSet.Namespace).
|
||||
Watch(ctx, opts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer watcher.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case event := <-watcher.ResultChan():
|
||||
if event.Type == watch.Deleted {
|
||||
return nil
|
||||
}
|
||||
case <-ctx.Done():
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,232 +0,0 @@
|
||||
package hostsensorutils
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/kubescape/kubescape/v3/internal/testutils"
|
||||
"github.com/kubescape/opa-utils/objectsenvelopes/hostsensor"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
func TestHostSensorHandler(t *testing.T) {
|
||||
t.Parallel()
|
||||
ctx := context.Background()
|
||||
|
||||
t.Run("with default manifest", func(t *testing.T) {
|
||||
t.Run("should build host sensor", func(t *testing.T) {
|
||||
k8s := NewKubernetesApiMock(WithNode(mockNode1()), WithPod(mockPod1()), WithPod(mockPod2()), WithResponses(mockResponses()))
|
||||
h, err := NewHostSensorHandler(k8s, "")
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, h)
|
||||
|
||||
t.Run("should initialize host sensor", func(t *testing.T) {
|
||||
require.NoError(t, h.Init(ctx))
|
||||
|
||||
w, err := k8s.KubernetesClient.CoreV1().Pods(h.daemonSet.Namespace).Watch(ctx, metav1.ListOptions{})
|
||||
require.NoError(t, err)
|
||||
w.Stop()
|
||||
|
||||
require.Len(t, h.hostSensorPodNames, 2)
|
||||
})
|
||||
|
||||
t.Run("should return namespace", func(t *testing.T) {
|
||||
require.Equal(t, "kubescape", h.GetNamespace())
|
||||
})
|
||||
|
||||
t.Run("should collect resources from pods - happy path", func(t *testing.T) {
|
||||
envelope, status, err := h.CollectResources(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Len(t, envelope, 9*2) // has cloud provider, no control plane requested
|
||||
require.Len(t, status, 0)
|
||||
|
||||
foundControl, foundProvider := false, false
|
||||
for _, sensed := range envelope {
|
||||
if sensed.Kind == ControlPlaneInfo.String() {
|
||||
foundControl = true
|
||||
}
|
||||
if sensed.Kind == CloudProviderInfo.String() {
|
||||
foundProvider = hasCloudProviderInfo([]hostsensor.HostSensorDataEnvelope{sensed})
|
||||
}
|
||||
}
|
||||
|
||||
require.False(t, foundControl)
|
||||
require.True(t, foundProvider)
|
||||
})
|
||||
})
|
||||
|
||||
t.Run("should build host sensor without cloud provider", func(t *testing.T) {
|
||||
k8s := NewKubernetesApiMock(WithNode(mockNode1()), WithPod(mockPod1()), WithPod(mockPod2()), WithResponses(mockResponsesNoCloudProvider()))
|
||||
h, err := NewHostSensorHandler(k8s, "")
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, h)
|
||||
|
||||
t.Run("should initialize host sensor", func(t *testing.T) {
|
||||
require.NoError(t, h.Init(ctx))
|
||||
|
||||
w, err := k8s.KubernetesClient.CoreV1().Pods(h.daemonSet.Namespace).Watch(ctx, metav1.ListOptions{})
|
||||
require.NoError(t, err)
|
||||
w.Stop()
|
||||
|
||||
require.Len(t, h.hostSensorPodNames, 2)
|
||||
})
|
||||
|
||||
t.Run("should get version", func(t *testing.T) {
|
||||
version, err := h.getVersion()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, "v1.0.45", version)
|
||||
})
|
||||
|
||||
t.Run("ForwardToPod is a stub, not implemented", func(t *testing.T) {
|
||||
resp, err := h.forwardToPod("pod1", "/version")
|
||||
require.Contains(t, err.Error(), "not implemented")
|
||||
require.Nil(t, resp)
|
||||
})
|
||||
|
||||
t.Run("should collect resources from pods", func(t *testing.T) {
|
||||
envelope, status, err := h.CollectResources(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Len(t, envelope, 10*2) // has empty cloud provider, has control plane info
|
||||
require.Len(t, status, 0)
|
||||
|
||||
foundControl, foundProvider := false, false
|
||||
for _, sensed := range envelope {
|
||||
if sensed.Kind == ControlPlaneInfo.String() {
|
||||
foundControl = true
|
||||
}
|
||||
if sensed.Kind == CloudProviderInfo.String() {
|
||||
foundProvider = hasCloudProviderInfo([]hostsensor.HostSensorDataEnvelope{sensed})
|
||||
}
|
||||
}
|
||||
|
||||
require.True(t, foundControl)
|
||||
require.False(t, foundProvider)
|
||||
})
|
||||
})
|
||||
|
||||
t.Run("should build host sensor with error in response from /version", func(t *testing.T) {
|
||||
k8s := NewKubernetesApiMock(WithNode(mockNode1()),
|
||||
WithPod(mockPod1()),
|
||||
WithPod(mockPod2()),
|
||||
WithResponses(mockResponsesNoCloudProvider()),
|
||||
WithErrorResponse(RestURL{"http", "pod1", "7888", "/version"}), // this endpoint will return an error from this pod
|
||||
WithErrorResponse(RestURL{"http", "pod2", "7888", "/version"}), // this endpoint will return an error from this pod
|
||||
)
|
||||
|
||||
h, err := NewHostSensorHandler(k8s, "")
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, h)
|
||||
|
||||
t.Run("should initialize host sensor", func(t *testing.T) {
|
||||
require.NoError(t, h.Init(ctx))
|
||||
|
||||
w, err := k8s.KubernetesClient.CoreV1().Pods(h.daemonSet.Namespace).Watch(ctx, metav1.ListOptions{})
|
||||
require.NoError(t, err)
|
||||
w.Stop()
|
||||
|
||||
require.Len(t, h.hostSensorPodNames, 2)
|
||||
})
|
||||
|
||||
t.Run("should NOT be able to get version", func(t *testing.T) {
|
||||
// NOTE: GetVersion might be successful if only one pod responds successfully.
|
||||
// In order to ensure an error, we need ALL pods to error.
|
||||
_, err := h.getVersion()
|
||||
require.Error(t, err)
|
||||
require.Contains(t, err.Error(), "mock")
|
||||
})
|
||||
})
|
||||
|
||||
t.Run("should FAIL to build host sensor because there are no nodes", func(t *testing.T) {
|
||||
h, err := NewHostSensorHandler(NewKubernetesApiMock(), "")
|
||||
require.Error(t, err)
|
||||
require.NotNil(t, h)
|
||||
require.Contains(t, err.Error(), "no nodes to scan")
|
||||
})
|
||||
})
|
||||
|
||||
t.Run("should NOT build host sensor with nil k8s API", func(t *testing.T) {
|
||||
h, err := NewHostSensorHandler(nil, "")
|
||||
require.Error(t, err)
|
||||
require.Nil(t, h)
|
||||
})
|
||||
|
||||
t.Run("with manifest from YAML file", func(t *testing.T) {
|
||||
t.Run("should build host sensor", func(t *testing.T) {
|
||||
k8s := NewKubernetesApiMock(WithNode(mockNode1()), WithPod(mockPod1()), WithPod(mockPod2()), WithResponses(mockResponses()))
|
||||
h, err := NewHostSensorHandler(k8s, filepath.Join(testutils.CurrentDir(), "hostsensor.yaml"))
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, h)
|
||||
|
||||
t.Run("should initialize host sensor", func(t *testing.T) {
|
||||
require.NoError(t, h.Init(ctx))
|
||||
|
||||
w, err := k8s.KubernetesClient.CoreV1().Pods(h.daemonSet.Namespace).Watch(ctx, metav1.ListOptions{})
|
||||
require.NoError(t, err)
|
||||
w.Stop()
|
||||
|
||||
require.Len(t, h.hostSensorPodNames, 2)
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
t.Run("with manifest from invalid YAML file", func(t *testing.T) {
|
||||
t.Run("should NOT build host sensor", func(t *testing.T) {
|
||||
var invalid string
|
||||
t.Run("should create temp file", func(t *testing.T) {
|
||||
file, err := os.CreateTemp("", "*.yaml")
|
||||
require.NoError(t, err)
|
||||
t.Cleanup(func() {
|
||||
_ = os.Remove(file.Name())
|
||||
})
|
||||
_, err = file.Write([]byte(" x: 1"))
|
||||
require.NoError(t, err)
|
||||
|
||||
invalid = file.Name()
|
||||
require.NoError(t, file.Close())
|
||||
})
|
||||
|
||||
k8s := NewKubernetesApiMock(WithNode(mockNode1()), WithPod(mockPod1()), WithPod(mockPod2()), WithResponses(mockResponses()))
|
||||
_, err := NewHostSensorHandler(k8s, filepath.Join(testutils.CurrentDir(), invalid))
|
||||
require.Error(t, err)
|
||||
})
|
||||
})
|
||||
|
||||
// TODO(test coverage): the following cases are not covered by tests yet.
|
||||
//
|
||||
// * applyYAML fails
|
||||
// * checkPodForEachNode fails, or times out
|
||||
// * non-active namespace
|
||||
// * getPodList fails when GetVersion
|
||||
// * getPodList fails when CollectResources
|
||||
// * error cases that trigger a namespace tear-down
|
||||
// * watch pods with a Delete event
|
||||
// * explicit TearDown()
|
||||
//
|
||||
// Notice that the package doesn't current pass tests with the race detector enabled.
|
||||
}
|
||||
|
||||
func TestLoadHostSensorFromFile_NoError(t *testing.T) {
|
||||
content, err := loadHostSensorFromFile("testdata/hostsensor.yaml")
|
||||
assert.NotEqual(t, "", content)
|
||||
assert.Nil(t, err)
|
||||
}
|
||||
|
||||
func TestLoadHostSensorFromFile_Error(t *testing.T) {
|
||||
content, err := loadHostSensorFromFile("testdata/hostsensor_invalid.yaml")
|
||||
assert.Equal(t, "", content)
|
||||
assert.NotNil(t, err)
|
||||
|
||||
content, err = loadHostSensorFromFile("testdata/empty_hostsensor.yaml")
|
||||
assert.Equal(t, "", content)
|
||||
assert.NotNil(t, err)
|
||||
|
||||
content, err = loadHostSensorFromFile("testdata/notAYamlFile.txt")
|
||||
assert.Equal(t, "", content)
|
||||
assert.NotNil(t, err)
|
||||
}
|
||||
@@ -1,293 +0,0 @@
|
||||
package hostsensorutils
|
||||
|
||||
import (
|
||||
"context"
|
||||
stdjson "encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/kubescape/go-logger"
|
||||
"github.com/kubescape/k8s-interface/k8sinterface"
|
||||
"github.com/kubescape/opa-utils/objectsenvelopes/hostsensor"
|
||||
"github.com/kubescape/opa-utils/reporthandling/apis"
|
||||
)
|
||||
|
||||
// getPodList clones the internal list of pods being watched as a map of pod names.
|
||||
func (hsh *HostSensorHandler) getPodList() map[string]string {
|
||||
hsh.podListLock.RLock()
|
||||
res := make(map[string]string, len(hsh.hostSensorPodNames))
|
||||
for k, v := range hsh.hostSensorPodNames {
|
||||
res[k] = v
|
||||
}
|
||||
hsh.podListLock.RUnlock()
|
||||
|
||||
return res
|
||||
}
|
||||
|
||||
// httpGetToPod sends the request to a pod using the HostSensorPort.
|
||||
func (hsh *HostSensorHandler) httpGetToPod(podName, path string) ([]byte, error) {
|
||||
restProxy := hsh.k8sObj.KubernetesClient.CoreV1().Pods(hsh.daemonSet.Namespace).ProxyGet("http", podName, fmt.Sprintf("%d", hsh.hostSensorPort), path, map[string]string{})
|
||||
return restProxy.DoRaw(hsh.k8sObj.Context)
|
||||
}
|
||||
|
||||
func (hsh *HostSensorHandler) getResourcesFromPod(podName, nodeName string, resourceKind scannerResource, path string) (hostsensor.HostSensorDataEnvelope, error) {
|
||||
// send the request and pack the response as an hostSensorDataEnvelope
|
||||
|
||||
resBytes, err := hsh.httpGetToPod(podName, path)
|
||||
if err != nil {
|
||||
return hostsensor.HostSensorDataEnvelope{}, err
|
||||
}
|
||||
|
||||
hostSensorDataEnvelope := hostsensor.HostSensorDataEnvelope{}
|
||||
hostSensorDataEnvelope.SetApiVersion(k8sinterface.JoinGroupVersion(hostsensor.GroupHostSensor, hostsensor.Version))
|
||||
hostSensorDataEnvelope.SetKind(resourceKind.String())
|
||||
hostSensorDataEnvelope.SetName(nodeName)
|
||||
hostSensorDataEnvelope.SetData(resBytes)
|
||||
|
||||
return hostSensorDataEnvelope, nil
|
||||
}
|
||||
|
||||
// forwardToPod is currently not implemented.
|
||||
func (hsh *HostSensorHandler) forwardToPod(podName, path string) ([]byte, error) {
|
||||
// NOT IN USE:
|
||||
// ---
|
||||
// spawn port forwarding
|
||||
// req := hsh.k8sObj.KubernetesClient.CoreV1().RESTClient().Post()
|
||||
// req = req.Name(podName)
|
||||
// req = req.Namespace(hsh.DaemonSet.Namespace)
|
||||
// req = req.Resource("pods")
|
||||
// req = req.SubResource("portforward")
|
||||
// ----
|
||||
// https://github.com/gianarb/kube-port-forward
|
||||
// fullPath := fmt.Sprintf("/api/v1/namespaces/%s/pods/%s/portforward",
|
||||
// hsh.DaemonSet.Namespace, podName)
|
||||
// transport, upgrader, err := spdy.RoundTripperFor(hsh.k8sObj.KubernetesClient.config)
|
||||
// if err != nil {
|
||||
// return nil, err
|
||||
// }
|
||||
// hostIP := strings.TrimLeft(req.RestConfig.Host, "htps:/")
|
||||
// dialer := spdy.NewDialer(upgrader, &http.Client{Transport: transport}, http.MethodPost, &url.URL{Scheme: "http", Path: path, Host: hostIP})
|
||||
return nil, errors.New("not implemented")
|
||||
}
|
||||
|
||||
// sendAllPodsHTTPGETRequest fills the raw bytes response in the envelope and the node name, but not the GroupVersionKind
|
||||
// so the caller is responsible to convert the raw data to some structured data and add the GroupVersionKind details
|
||||
//
|
||||
// The function produces a worker-pool with a fixed number of workers.
|
||||
//
|
||||
// For each node the request is pushed to the jobs channel, the worker sends the request and pushes the result to the result channel.
|
||||
// When all workers have finished, the function returns a list of results
|
||||
func (hsh *HostSensorHandler) sendAllPodsHTTPGETRequest(ctx context.Context, path string, requestKind scannerResource) ([]hostsensor.HostSensorDataEnvelope, error) {
|
||||
podList := hsh.getPodList()
|
||||
res := make([]hostsensor.HostSensorDataEnvelope, 0, len(podList))
|
||||
var wg sync.WaitGroup
|
||||
|
||||
// initialization of the channels
|
||||
hsh.workerPool.init(len(podList))
|
||||
|
||||
// log is used to avoid log duplication
|
||||
// coming from the different host-scanner instances
|
||||
log := NewLogCoupling()
|
||||
|
||||
hsh.workerPool.hostSensorApplyJobs(podList, path, requestKind)
|
||||
hsh.workerPool.hostSensorGetResults(&res)
|
||||
hsh.workerPool.createWorkerPool(ctx, hsh, &wg, log)
|
||||
hsh.workerPool.waitForDone(&wg)
|
||||
|
||||
return res, nil
|
||||
}
|
||||
|
||||
// getVersion returns the version of the deployed host scanner.
|
||||
//
|
||||
// NOTE: we pick the version from the first responding pod.
|
||||
func (hsh *HostSensorHandler) getVersion() (string, error) {
|
||||
// loop over pods and port-forward it to each of them
|
||||
podList := hsh.getPodList()
|
||||
|
||||
// initialization of the channels
|
||||
hsh.workerPool.init(len(podList))
|
||||
hsh.workerPool.hostSensorApplyJobs(podList, "/version", "version")
|
||||
for job := range hsh.workerPool.jobs {
|
||||
resBytes, err := hsh.httpGetToPod(job.podName, job.path)
|
||||
if err != nil {
|
||||
return "", err
|
||||
} else {
|
||||
version := strings.ReplaceAll(string(resBytes), "\"", "")
|
||||
version = strings.ReplaceAll(version, "\n", "")
|
||||
|
||||
return version, nil
|
||||
}
|
||||
}
|
||||
|
||||
return "", nil
|
||||
}
|
||||
|
||||
// getKernelVariables returns the list of Linux Kernel variables.
|
||||
func (hsh *HostSensorHandler) getKernelVariables(ctx context.Context) ([]hostsensor.HostSensorDataEnvelope, error) {
|
||||
// loop over pods and port-forward it to each of them
|
||||
return hsh.sendAllPodsHTTPGETRequest(ctx, "/LinuxKernelVariables", LinuxKernelVariables)
|
||||
}
|
||||
|
||||
// getOpenPortsList returns the list of open ports.
|
||||
func (hsh *HostSensorHandler) getOpenPortsList(ctx context.Context) ([]hostsensor.HostSensorDataEnvelope, error) {
|
||||
// loop over pods and port-forward it to each of them
|
||||
return hsh.sendAllPodsHTTPGETRequest(ctx, "/openedPorts", OpenPortsList)
|
||||
}
|
||||
|
||||
// getLinuxSecurityHardeningStatus returns the list of LinuxSecurityHardeningStatus metadata.
|
||||
func (hsh *HostSensorHandler) getLinuxSecurityHardeningStatus(ctx context.Context) ([]hostsensor.HostSensorDataEnvelope, error) {
|
||||
// loop over pods and port-forward it to each of them
|
||||
return hsh.sendAllPodsHTTPGETRequest(ctx, "/linuxSecurityHardening", LinuxSecurityHardeningStatus)
|
||||
}
|
||||
|
||||
// getKubeletInfo returns the list of kubelet metadata.
|
||||
func (hsh *HostSensorHandler) getKubeletInfo(ctx context.Context) ([]hostsensor.HostSensorDataEnvelope, error) {
|
||||
// loop over pods and port-forward it to each of them
|
||||
return hsh.sendAllPodsHTTPGETRequest(ctx, "/kubeletInfo", KubeletInfo)
|
||||
}
|
||||
|
||||
// getKubeProxyInfo returns the list of kubeProxy metadata.
|
||||
func (hsh *HostSensorHandler) getKubeProxyInfo(ctx context.Context) ([]hostsensor.HostSensorDataEnvelope, error) {
|
||||
// loop over pods and port-forward it to each of them
|
||||
return hsh.sendAllPodsHTTPGETRequest(ctx, "/kubeProxyInfo", KubeProxyInfo)
|
||||
}
|
||||
|
||||
// getControlPlaneInfo returns the list of controlPlaneInfo metadata
|
||||
func (hsh *HostSensorHandler) getControlPlaneInfo(ctx context.Context) ([]hostsensor.HostSensorDataEnvelope, error) {
|
||||
// loop over pods and port-forward it to each of them
|
||||
return hsh.sendAllPodsHTTPGETRequest(ctx, "/controlPlaneInfo", ControlPlaneInfo)
|
||||
}
|
||||
|
||||
// getCloudProviderInfo returns the list of cloudProviderInfo metadata.
|
||||
func (hsh *HostSensorHandler) getCloudProviderInfo(ctx context.Context) ([]hostsensor.HostSensorDataEnvelope, error) {
|
||||
// loop over pods and port-forward it to each of them
|
||||
return hsh.sendAllPodsHTTPGETRequest(ctx, "/cloudProviderInfo", CloudProviderInfo)
|
||||
}
|
||||
|
||||
// getCNIInfo returns the list of CNI metadata
|
||||
func (hsh *HostSensorHandler) getCNIInfo(ctx context.Context) ([]hostsensor.HostSensorDataEnvelope, error) {
|
||||
// loop over pods and port-forward it to each of them
|
||||
return hsh.sendAllPodsHTTPGETRequest(ctx, "/CNIInfo", CNIInfo)
|
||||
}
|
||||
|
||||
// getKernelVersion returns the list of kernelVersion metadata.
|
||||
func (hsh *HostSensorHandler) getKernelVersion(ctx context.Context) ([]hostsensor.HostSensorDataEnvelope, error) {
|
||||
// loop over pods and port-forward it to each of them
|
||||
return hsh.sendAllPodsHTTPGETRequest(ctx, "/kernelVersion", "KernelVersion")
|
||||
}
|
||||
|
||||
// getOsReleaseFile returns the list of osRelease metadata.
|
||||
func (hsh *HostSensorHandler) getOsReleaseFile(ctx context.Context) ([]hostsensor.HostSensorDataEnvelope, error) {
|
||||
// loop over pods and port-forward it to each of them
|
||||
return hsh.sendAllPodsHTTPGETRequest(ctx, "/osRelease", "OsReleaseFile")
|
||||
}
|
||||
|
||||
// hasCloudProviderInfo iterates over the []hostsensor.HostSensorDataEnvelope list to find info about the cloud provider.
|
||||
//
|
||||
// If information are found, then return true. Return false otherwise.
|
||||
func hasCloudProviderInfo(cpi []hostsensor.HostSensorDataEnvelope) bool {
|
||||
for index := range cpi {
|
||||
if !reflect.DeepEqual(cpi[index].GetData(), stdjson.RawMessage("{}\n")) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// CollectResources collects all required information about all the pods for this host.
|
||||
func (hsh *HostSensorHandler) CollectResources(ctx context.Context) ([]hostsensor.HostSensorDataEnvelope, map[string]apis.StatusInfo, error) {
|
||||
res := make([]hostsensor.HostSensorDataEnvelope, 0)
|
||||
infoMap := make(map[string]apis.StatusInfo)
|
||||
if hsh.daemonSet == nil {
|
||||
return res, nil, nil
|
||||
}
|
||||
|
||||
logger.L().Debug("Accessing host scanner")
|
||||
version, err := hsh.getVersion()
|
||||
if err != nil {
|
||||
logger.L().Ctx(ctx).Warning(err.Error())
|
||||
}
|
||||
|
||||
if len(version) > 0 {
|
||||
logger.L().Info("Host scanner version : " + version)
|
||||
} else {
|
||||
logger.L().Info("Unknown host scanner version")
|
||||
}
|
||||
|
||||
var hasCloudProvider bool
|
||||
for _, toPin := range []struct {
|
||||
Resource scannerResource
|
||||
Query func(context.Context) ([]hostsensor.HostSensorDataEnvelope, error)
|
||||
}{
|
||||
// queries to the deployed host-scanner
|
||||
{
|
||||
Resource: OsReleaseFile,
|
||||
Query: hsh.getOsReleaseFile,
|
||||
},
|
||||
{
|
||||
Resource: KernelVersion,
|
||||
Query: hsh.getKernelVersion,
|
||||
},
|
||||
{
|
||||
Resource: LinuxSecurityHardeningStatus,
|
||||
Query: hsh.getLinuxSecurityHardeningStatus,
|
||||
},
|
||||
{
|
||||
Resource: OpenPortsList,
|
||||
Query: hsh.getOpenPortsList,
|
||||
},
|
||||
{
|
||||
Resource: LinuxKernelVariables,
|
||||
Query: hsh.getKernelVariables,
|
||||
},
|
||||
{
|
||||
Resource: KubeletInfo,
|
||||
Query: hsh.getKubeletInfo,
|
||||
},
|
||||
{
|
||||
Resource: KubeProxyInfo,
|
||||
Query: hsh.getKubeProxyInfo,
|
||||
},
|
||||
{
|
||||
Resource: CloudProviderInfo,
|
||||
Query: hsh.getCloudProviderInfo,
|
||||
},
|
||||
{
|
||||
Resource: CNIInfo,
|
||||
Query: hsh.getCNIInfo,
|
||||
},
|
||||
{
|
||||
// ControlPlaneInfo is queried _after_ CloudProviderInfo.
|
||||
Resource: ControlPlaneInfo,
|
||||
Query: hsh.getControlPlaneInfo,
|
||||
},
|
||||
} {
|
||||
k8sInfo := toPin
|
||||
|
||||
if k8sInfo.Resource == ControlPlaneInfo && hasCloudProvider {
|
||||
// we retrieve control plane info only if we are not using a cloud provider
|
||||
continue
|
||||
}
|
||||
|
||||
kcData, err := k8sInfo.Query(ctx)
|
||||
if err != nil {
|
||||
addInfoToMap(k8sInfo.Resource, infoMap, err)
|
||||
logger.L().Ctx(ctx).Warning(err.Error())
|
||||
}
|
||||
|
||||
if k8sInfo.Resource == CloudProviderInfo {
|
||||
hasCloudProvider = hasCloudProviderInfo(kcData)
|
||||
}
|
||||
|
||||
if len(kcData) > 0 {
|
||||
res = append(res, kcData...)
|
||||
}
|
||||
}
|
||||
|
||||
logger.L().Debug("Done reading information from host scanner")
|
||||
return res, infoMap, nil
|
||||
}
|
||||
@@ -11,5 +11,4 @@ type IHostSensor interface {
|
||||
Init(ctx context.Context) error
|
||||
TearDown() error
|
||||
CollectResources(context.Context) ([]hostsensor.HostSensorDataEnvelope, map[string]apis.StatusInfo, error)
|
||||
GetNamespace() string
|
||||
}
|
||||
|
||||
@@ -27,7 +27,3 @@ func (hshm *HostSensorHandlerMock) TearDown() error {
|
||||
func (hshm *HostSensorHandlerMock) CollectResources(_ context.Context) ([]hostsensor.HostSensorDataEnvelope, map[string]apis.StatusInfo, error) {
|
||||
return []hostsensor.HostSensorDataEnvelope{}, nil, nil
|
||||
}
|
||||
|
||||
func (hshm *HostSensorHandlerMock) GetNamespace() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
@@ -1,98 +0,0 @@
|
||||
package hostsensorutils
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
|
||||
"github.com/kubescape/go-logger"
|
||||
"github.com/kubescape/go-logger/helpers"
|
||||
"github.com/kubescape/opa-utils/objectsenvelopes/hostsensor"
|
||||
)
|
||||
|
||||
const noOfWorkers int = 10
|
||||
|
||||
type job struct {
|
||||
podName string
|
||||
nodeName string
|
||||
requestKind scannerResource
|
||||
path string
|
||||
}
|
||||
|
||||
type workerPool struct {
|
||||
jobs chan job
|
||||
results chan hostsensor.HostSensorDataEnvelope
|
||||
done chan bool
|
||||
noOfWorkers int
|
||||
}
|
||||
|
||||
func newWorkerPool() workerPool {
|
||||
wp := workerPool{}
|
||||
wp.noOfWorkers = noOfWorkers
|
||||
wp.init()
|
||||
return wp
|
||||
}
|
||||
|
||||
func (wp *workerPool) init(noOfPods ...int) {
|
||||
if len(noOfPods) > 0 && noOfPods[0] < noOfWorkers {
|
||||
wp.noOfWorkers = noOfPods[0]
|
||||
}
|
||||
// init the channels
|
||||
wp.jobs = make(chan job, noOfWorkers)
|
||||
wp.results = make(chan hostsensor.HostSensorDataEnvelope, noOfWorkers)
|
||||
wp.done = make(chan bool)
|
||||
}
|
||||
|
||||
// The worker takes a job out of the chan, executes the request, and pushes the result to the results chan
|
||||
func (wp *workerPool) hostSensorWorker(ctx context.Context, hsh *HostSensorHandler, wg *sync.WaitGroup, log *LogsMap) {
|
||||
defer wg.Done()
|
||||
for job := range wp.jobs {
|
||||
hostSensorDataEnvelope, err := hsh.getResourcesFromPod(job.podName, job.nodeName, job.requestKind, job.path)
|
||||
if err != nil && !log.isDuplicated(failedToGetData) {
|
||||
logger.L().Ctx(ctx).Warning(failedToGetData, helpers.String("path", job.path), helpers.Error(err))
|
||||
log.update(failedToGetData)
|
||||
continue
|
||||
}
|
||||
wp.results <- hostSensorDataEnvelope
|
||||
}
|
||||
}
|
||||
|
||||
func (wp *workerPool) createWorkerPool(ctx context.Context, hsh *HostSensorHandler, wg *sync.WaitGroup, log *LogsMap) {
|
||||
for i := 0; i < noOfWorkers; i++ {
|
||||
wg.Add(1)
|
||||
go wp.hostSensorWorker(ctx, hsh, wg, log)
|
||||
}
|
||||
}
|
||||
|
||||
func (wp *workerPool) waitForDone(wg *sync.WaitGroup) {
|
||||
// Waiting for workers to finish
|
||||
wg.Wait()
|
||||
close(wp.results)
|
||||
|
||||
// Waiting for the results to be processed
|
||||
<-wp.done
|
||||
}
|
||||
|
||||
func (wp *workerPool) hostSensorGetResults(result *[]hostsensor.HostSensorDataEnvelope) {
|
||||
go func() {
|
||||
for res := range wp.results {
|
||||
*result = append(*result, res)
|
||||
}
|
||||
wp.done <- true
|
||||
}()
|
||||
}
|
||||
|
||||
func (wp *workerPool) hostSensorApplyJobs(podList map[string]string, path string, requestKind scannerResource) {
|
||||
go func() {
|
||||
for podName, nodeName := range podList {
|
||||
thisJob := job{
|
||||
podName: podName,
|
||||
nodeName: nodeName,
|
||||
requestKind: requestKind,
|
||||
path: path,
|
||||
}
|
||||
wp.jobs <- thisJob
|
||||
|
||||
}
|
||||
close(wp.jobs)
|
||||
}()
|
||||
}
|
||||
@@ -1,16 +0,0 @@
|
||||
package hostsensorutils
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
// Initializes a workerPool struct with default values and returns it
|
||||
func TestNewWorkerPoolDefaultValues(t *testing.T) {
|
||||
wp := newWorkerPool()
|
||||
assert.Equal(t, noOfWorkers, wp.noOfWorkers)
|
||||
assert.NotNil(t, wp.jobs)
|
||||
assert.NotNil(t, wp.results)
|
||||
assert.NotNil(t, wp.done)
|
||||
}
|
||||
@@ -1,15 +1 @@
|
||||
package hostsensorutils
|
||||
|
||||
import (
|
||||
jsoniter "github.com/json-iterator/go"
|
||||
)
|
||||
|
||||
var (
|
||||
json jsoniter.API
|
||||
)
|
||||
|
||||
func init() {
|
||||
// NOTE(fredbi): attention, this configuration rounds floats down to 6 digits
|
||||
// For finer-grained config, see: https://pkg.go.dev/github.com/json-iterator/go#section-readme
|
||||
json = jsoniter.ConfigFastest
|
||||
}
|
||||
|
||||
File diff suppressed because one or more lines are too long
@@ -1,10 +0,0 @@
|
||||
package hostsensorutils
|
||||
|
||||
// messages used for warnings
|
||||
var (
|
||||
failedToGetData = "failed to get data"
|
||||
failedToTeardownNamespace = "failed to teardown Namespace"
|
||||
oneHostSensorPodIsUnabledToSchedule = "One host-sensor pod is unable to schedule on node. We will fail to collect the data from this node"
|
||||
failedToWatchOverDaemonSetPods = "failed to watch over DaemonSet pods"
|
||||
failedToValidateHostSensorPodStatus = "failed to validate host-scanner pods status"
|
||||
)
|
||||
@@ -1,74 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
labels:
|
||||
app: kubescape-host-scanner
|
||||
k8s-app: kubescape-host-scanner
|
||||
kubernetes.io/metadata.name: kubescape-host-scanner
|
||||
tier: kubescape-host-scanner-control-plane
|
||||
name: kubescape
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: host-scanner
|
||||
namespace: kubescape
|
||||
labels:
|
||||
app: host-scanner
|
||||
k8s-app: kubescape-host-scanner
|
||||
otel: enabled
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
name: host-scanner
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
name: host-scanner
|
||||
spec:
|
||||
tolerations:
|
||||
# this toleration is to have the DaemonDet runnable on all nodes (including masters)
|
||||
# remove it if your masters can't run pods
|
||||
- operator: Exists
|
||||
containers:
|
||||
- name: host-sensor
|
||||
image: quay.io/kubescape/host-scanner:v1.0.61
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: true
|
||||
privileged: true
|
||||
readOnlyRootFilesystem: true
|
||||
ports:
|
||||
- name: scanner # Do not change port name
|
||||
containerPort: 7888
|
||||
protocol: TCP
|
||||
resources:
|
||||
limits:
|
||||
cpu: 0.1m
|
||||
memory: 200Mi
|
||||
requests:
|
||||
cpu: 1m
|
||||
memory: 200Mi
|
||||
volumeMounts:
|
||||
- mountPath: /host_fs
|
||||
name: host-filesystem
|
||||
startupProbe:
|
||||
httpGet:
|
||||
path: /readyz
|
||||
port: 7888
|
||||
failureThreshold: 30
|
||||
periodSeconds: 1
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: 7888
|
||||
periodSeconds: 10
|
||||
terminationGracePeriodSeconds: 120
|
||||
dnsPolicy: ClusterFirstWithHostNet
|
||||
automountServiceAccountToken: false
|
||||
volumes:
|
||||
- hostPath:
|
||||
path: /
|
||||
type: Directory
|
||||
name: host-filesystem
|
||||
hostPID: true
|
||||
hostIPC: true
|
||||
@@ -1 +0,0 @@
|
||||
Kubescape is Awesome!
|
||||
@@ -1,57 +1,13 @@
|
||||
package hostsensorutils
|
||||
|
||||
import (
|
||||
"github.com/kubescape/k8s-interface/hostsensor"
|
||||
"github.com/kubescape/k8s-interface/k8sinterface"
|
||||
"github.com/kubescape/opa-utils/reporthandling/apis"
|
||||
)
|
||||
|
||||
// scannerResource is the enumerated type listing all resources from the host-scanner.
|
||||
type scannerResource string
|
||||
|
||||
const (
|
||||
// host-scanner resources
|
||||
|
||||
KubeletConfiguration scannerResource = "KubeletConfiguration"
|
||||
OsReleaseFile scannerResource = "OsReleaseFile"
|
||||
KernelVersion scannerResource = "KernelVersion"
|
||||
LinuxSecurityHardeningStatus scannerResource = "LinuxSecurityHardeningStatus"
|
||||
OpenPortsList scannerResource = "OpenPortsList"
|
||||
LinuxKernelVariables scannerResource = "LinuxKernelVariables"
|
||||
KubeletCommandLine scannerResource = "KubeletCommandLine"
|
||||
KubeletInfo scannerResource = "KubeletInfo"
|
||||
KubeProxyInfo scannerResource = "KubeProxyInfo"
|
||||
ControlPlaneInfo scannerResource = "ControlPlaneInfo"
|
||||
CloudProviderInfo scannerResource = "CloudProviderInfo"
|
||||
CNIInfo scannerResource = "CNIInfo"
|
||||
)
|
||||
|
||||
func mapHostSensorResourceToApiGroup(r scannerResource) string {
|
||||
switch r {
|
||||
case
|
||||
KubeletConfiguration,
|
||||
OsReleaseFile,
|
||||
KubeletCommandLine,
|
||||
KernelVersion,
|
||||
LinuxSecurityHardeningStatus,
|
||||
OpenPortsList,
|
||||
LinuxKernelVariables,
|
||||
KubeletInfo,
|
||||
KubeProxyInfo,
|
||||
ControlPlaneInfo,
|
||||
CloudProviderInfo,
|
||||
CNIInfo:
|
||||
return "hostdata.kubescape.cloud/v1beta0"
|
||||
default:
|
||||
return ""
|
||||
}
|
||||
}
|
||||
|
||||
func (r scannerResource) String() string {
|
||||
return string(r)
|
||||
}
|
||||
|
||||
func addInfoToMap(resource scannerResource, infoMap map[string]apis.StatusInfo, err error) {
|
||||
group, version := k8sinterface.SplitApiVersion(mapHostSensorResourceToApiGroup(resource))
|
||||
func addInfoToMap(resource hostsensor.HostSensorResource, infoMap map[string]apis.StatusInfo, err error) {
|
||||
group, version := k8sinterface.SplitApiVersion(hostsensor.MapHostSensorResourceToApiGroup(resource))
|
||||
r := k8sinterface.JoinResourceTriplets(group, version, resource.String())
|
||||
infoMap[r] = apis.StatusInfo{
|
||||
InnerStatus: apis.StatusSkipped,
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/kubescape/k8s-interface/hostsensor"
|
||||
"github.com/kubescape/opa-utils/reporthandling/apis"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
@@ -20,12 +21,12 @@ func TestAddInfoToMap(t *testing.T) {
|
||||
testErr := errors.New("test error")
|
||||
|
||||
for _, toPin := range []struct {
|
||||
Resource scannerResource
|
||||
Resource hostsensor.HostSensorResource
|
||||
Err error
|
||||
Expected map[string]apis.StatusInfo
|
||||
}{
|
||||
{
|
||||
Resource: KubeletConfiguration,
|
||||
Resource: hostsensor.KubeletConfiguration,
|
||||
Err: testErr,
|
||||
Expected: map[string]apis.StatusInfo{
|
||||
"hostdata.kubescape.cloud/v1beta0/KubeletConfiguration": {
|
||||
@@ -35,7 +36,7 @@ func TestAddInfoToMap(t *testing.T) {
|
||||
},
|
||||
},
|
||||
{
|
||||
Resource: CNIInfo,
|
||||
Resource: hostsensor.CNIInfo,
|
||||
Err: testErr,
|
||||
Expected: map[string]apis.StatusInfo{
|
||||
"hostdata.kubescape.cloud/v1beta0/CNIInfo": {
|
||||
@@ -45,7 +46,7 @@ func TestAddInfoToMap(t *testing.T) {
|
||||
},
|
||||
},
|
||||
{
|
||||
Resource: scannerResource("invalid"),
|
||||
Resource: hostsensor.HostSensorResource("invalid"),
|
||||
Err: testErr,
|
||||
Expected: map[string]apis.StatusInfo{
|
||||
"//invalid": { // no group, no version
|
||||
@@ -72,55 +73,55 @@ func TestMapHostSensorResourceToApiGroup(t *testing.T) {
|
||||
url := "hostdata.kubescape.cloud/v1beta0"
|
||||
|
||||
tests := []struct {
|
||||
resource scannerResource
|
||||
resource hostsensor.HostSensorResource
|
||||
want string
|
||||
}{
|
||||
{
|
||||
resource: KubeletConfiguration,
|
||||
resource: hostsensor.KubeletConfiguration,
|
||||
want: url,
|
||||
},
|
||||
{
|
||||
resource: OsReleaseFile,
|
||||
resource: hostsensor.OsReleaseFile,
|
||||
want: url,
|
||||
},
|
||||
{
|
||||
resource: KubeletCommandLine,
|
||||
resource: hostsensor.KubeletCommandLine,
|
||||
want: url,
|
||||
},
|
||||
{
|
||||
resource: KernelVersion,
|
||||
resource: hostsensor.KernelVersion,
|
||||
want: url,
|
||||
},
|
||||
{
|
||||
resource: LinuxSecurityHardeningStatus,
|
||||
resource: hostsensor.LinuxSecurityHardeningStatus,
|
||||
want: url,
|
||||
},
|
||||
{
|
||||
resource: OpenPortsList,
|
||||
resource: hostsensor.OpenPortsList,
|
||||
want: url,
|
||||
},
|
||||
{
|
||||
resource: LinuxKernelVariables,
|
||||
resource: hostsensor.LinuxKernelVariables,
|
||||
want: url,
|
||||
},
|
||||
{
|
||||
resource: KubeletInfo,
|
||||
resource: hostsensor.KubeletInfo,
|
||||
want: url,
|
||||
},
|
||||
{
|
||||
resource: KubeProxyInfo,
|
||||
resource: hostsensor.KubeProxyInfo,
|
||||
want: url,
|
||||
},
|
||||
{
|
||||
resource: ControlPlaneInfo,
|
||||
resource: hostsensor.ControlPlaneInfo,
|
||||
want: url,
|
||||
},
|
||||
{
|
||||
resource: CloudProviderInfo,
|
||||
resource: hostsensor.CloudProviderInfo,
|
||||
want: url,
|
||||
},
|
||||
{
|
||||
resource: CNIInfo,
|
||||
resource: hostsensor.CNIInfo,
|
||||
want: url,
|
||||
},
|
||||
{
|
||||
@@ -131,7 +132,7 @@ func TestMapHostSensorResourceToApiGroup(t *testing.T) {
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.want, func(t *testing.T) {
|
||||
assert.Equal(t, tt.want, mapHostSensorResourceToApiGroup(tt.resource))
|
||||
assert.Equal(t, tt.want, hostsensor.MapHostSensorResourceToApiGroup(tt.resource))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
"sync"
|
||||
|
||||
"github.com/armosec/armoapi-go/armotypes"
|
||||
mapset "github.com/deckarep/golang-set/v2"
|
||||
"github.com/kubescape/go-logger"
|
||||
"github.com/kubescape/go-logger/helpers"
|
||||
"github.com/kubescape/k8s-interface/workloadinterface"
|
||||
@@ -43,6 +44,8 @@ type OPAProcessor struct {
|
||||
excludeNamespaces []string
|
||||
includeNamespaces []string
|
||||
printEnabled bool
|
||||
compiledModules map[string]*ast.Compiler
|
||||
compiledMu sync.RWMutex
|
||||
}
|
||||
|
||||
func NewOPAProcessor(sessionObj *cautils.OPASessionObj, regoDependenciesData *resources.RegoDependenciesData, clusterName string, excludeNamespaces string, includeNamespaces string, enableRegoPrint bool) *OPAProcessor {
|
||||
@@ -58,17 +61,18 @@ func NewOPAProcessor(sessionObj *cautils.OPASessionObj, regoDependenciesData *re
|
||||
excludeNamespaces: split(excludeNamespaces),
|
||||
includeNamespaces: split(includeNamespaces),
|
||||
printEnabled: enableRegoPrint,
|
||||
compiledModules: make(map[string]*ast.Compiler),
|
||||
}
|
||||
}
|
||||
|
||||
func (opap *OPAProcessor) ProcessRulesListener(ctx context.Context, progressListener IJobProgressNotificationClient) error {
|
||||
scanningScope := cautils.GetScanningScope(opap.Metadata.ContextMetadata)
|
||||
opap.OPASessionObj.AllPolicies = convertFrameworksToPolicies(opap.Policies, opap.ExcludedRules, scanningScope)
|
||||
opap.AllPolicies = convertFrameworksToPolicies(opap.Policies, opap.ExcludedRules, scanningScope)
|
||||
|
||||
ConvertFrameworksToSummaryDetails(&opap.Report.SummaryDetails, opap.Policies, opap.OPASessionObj.AllPolicies)
|
||||
ConvertFrameworksToSummaryDetails(&opap.Report.SummaryDetails, opap.Policies, opap.AllPolicies)
|
||||
|
||||
// process
|
||||
if err := opap.Process(ctx, opap.OPASessionObj.AllPolicies, progressListener); err != nil {
|
||||
if err := opap.Process(ctx, opap.AllPolicies, progressListener); err != nil {
|
||||
logger.L().Ctx(ctx).Warning(err.Error())
|
||||
// Return error?
|
||||
}
|
||||
@@ -126,7 +130,7 @@ func (opap *OPAProcessor) Process(ctx context.Context, policies *cautils.Policie
|
||||
}
|
||||
|
||||
func (opap *OPAProcessor) loggerStartScanning() {
|
||||
targetScan := opap.OPASessionObj.Metadata.ScanMetadata.ScanningTarget
|
||||
targetScan := opap.Metadata.ScanMetadata.ScanningTarget
|
||||
if reporthandlingv2.Cluster == targetScan {
|
||||
logger.L().Start("Scanning", helpers.String(targetScan.String(), opap.clusterName))
|
||||
} else {
|
||||
@@ -135,7 +139,7 @@ func (opap *OPAProcessor) loggerStartScanning() {
|
||||
}
|
||||
|
||||
func (opap *OPAProcessor) loggerDoneScanning() {
|
||||
targetScan := opap.OPASessionObj.Metadata.ScanMetadata.ScanningTarget
|
||||
targetScan := opap.Metadata.ScanMetadata.ScanningTarget
|
||||
if reporthandlingv2.Cluster == targetScan {
|
||||
logger.L().StopSuccess("Done scanning", helpers.String(targetScan.String(), opap.clusterName))
|
||||
} else {
|
||||
@@ -256,13 +260,14 @@ func (opap *OPAProcessor) processRule(ctx context.Context, rule *reporthandling.
|
||||
ruleResult.Paths = appendPaths(ruleResult.Paths, ruleResponse.AssistedRemediation, failedResource.GetID())
|
||||
// if ruleResponse has relatedObjects, add it to ruleResult
|
||||
if len(ruleResponse.RelatedObjects) > 0 {
|
||||
relatedResourcesSet := mapset.NewSet[string](ruleResult.RelatedResourcesIDs...)
|
||||
for _, relatedObject := range ruleResponse.RelatedObjects {
|
||||
wl := objectsenvelopes.NewObject(relatedObject.Object)
|
||||
if wl != nil {
|
||||
// avoid adding duplicate related resource IDs
|
||||
if !slices.Contains(ruleResult.RelatedResourcesIDs, wl.GetID()) {
|
||||
if !relatedResourcesSet.Contains(wl.GetID()) {
|
||||
ruleResult.RelatedResourcesIDs = append(ruleResult.RelatedResourcesIDs, wl.GetID())
|
||||
}
|
||||
relatedResourcesSet.Add(wl.GetID())
|
||||
ruleResult.Paths = appendPaths(ruleResult.Paths, relatedObject.AssistedRemediation, wl.GetID())
|
||||
}
|
||||
}
|
||||
@@ -307,27 +312,16 @@ func (opap *OPAProcessor) runOPAOnSingleRule(ctx context.Context, rule *reportha
|
||||
|
||||
// runRegoOnK8s compiles an OPA PolicyRule and evaluates its against k8s
|
||||
func (opap *OPAProcessor) runRegoOnK8s(ctx context.Context, rule *reporthandling.PolicyRule, k8sObjects []map[string]interface{}, getRuleData func(*reporthandling.PolicyRule) string, ruleRegoDependenciesData resources.RegoDependenciesData) ([]reporthandling.RuleResponse, error) {
|
||||
modules, err := getRuleDependencies(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("rule: '%s', %s", rule.Name, err.Error())
|
||||
}
|
||||
|
||||
opap.opaRegisterOnce.Do(func() {
|
||||
// register signature verification methods for the OPA ast engine (since these are package level symbols, we do it only once)
|
||||
rego.RegisterBuiltin2(cosignVerifySignatureDeclaration, cosignVerifySignatureDefinition)
|
||||
rego.RegisterBuiltin1(cosignHasSignatureDeclaration, cosignHasSignatureDefinition)
|
||||
rego.RegisterBuiltin1(imageNameNormalizeDeclaration, imageNameNormalizeDefinition)
|
||||
})
|
||||
|
||||
modules[rule.Name] = getRuleData(rule)
|
||||
|
||||
// NOTE: OPA module compilation is the most resource-intensive operation.
|
||||
compiled, err := ast.CompileModulesWithOpt(modules, ast.CompileOpts{
|
||||
EnablePrintStatements: opap.printEnabled,
|
||||
ParserOptions: ast.ParserOptions{RegoVersion: ast.RegoV0},
|
||||
})
|
||||
ruleData := getRuleData(rule)
|
||||
compiled, err := opap.getCompiledRule(ctx, rule.Name, ruleData, opap.printEnabled)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("in 'runRegoOnK8s', failed to compile rule, name: %s, reason: %w", rule.Name, err)
|
||||
return nil, fmt.Errorf("rule: '%s', %w", rule.Name, err)
|
||||
}
|
||||
|
||||
store, err := ruleRegoDependenciesData.TOStorage()
|
||||
@@ -335,7 +329,6 @@ func (opap *OPAProcessor) runRegoOnK8s(ctx context.Context, rule *reporthandling
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Eval
|
||||
results, err := opap.regoEval(ctx, k8sObjects, compiled, &store)
|
||||
if err != nil {
|
||||
logger.L().Ctx(ctx).Warning(err.Error())
|
||||
@@ -405,7 +398,7 @@ func (opap *OPAProcessor) makeRegoDeps(configInputs []reporthandling.ControlConf
|
||||
}
|
||||
|
||||
dataControlInputs := map[string]string{
|
||||
"cloudProvider": opap.OPASessionObj.Report.ClusterCloudProvider,
|
||||
"cloudProvider": opap.Report.ClusterCloudProvider,
|
||||
}
|
||||
|
||||
return resources.RegoDependenciesData{
|
||||
@@ -435,3 +428,43 @@ func split(namespaces string) []string {
|
||||
}
|
||||
return strings.Split(namespaces, ",")
|
||||
}
|
||||
|
||||
func (opap *OPAProcessor) getCompiledRule(ctx context.Context, ruleName, ruleData string, printEnabled bool) (*ast.Compiler, error) {
|
||||
cacheKey := ruleName + "|" + ruleData
|
||||
|
||||
opap.compiledMu.RLock()
|
||||
if compiled, ok := opap.compiledModules[cacheKey]; ok {
|
||||
opap.compiledMu.RUnlock()
|
||||
return compiled, nil
|
||||
}
|
||||
opap.compiledMu.RUnlock()
|
||||
|
||||
opap.compiledMu.Lock()
|
||||
defer opap.compiledMu.Unlock()
|
||||
|
||||
if compiled, ok := opap.compiledModules[cacheKey]; ok {
|
||||
return compiled, nil
|
||||
}
|
||||
|
||||
baseModules, err := getRuleDependencies(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get rule dependencies: %w", err)
|
||||
}
|
||||
|
||||
modules := make(map[string]string, len(baseModules)+1)
|
||||
for k, v := range baseModules {
|
||||
modules[k] = v
|
||||
}
|
||||
modules[ruleName] = ruleData
|
||||
|
||||
compiled, err := ast.CompileModulesWithOpt(modules, ast.CompileOpts{
|
||||
EnablePrintStatements: printEnabled,
|
||||
ParserOptions: ast.ParserOptions{RegoVersion: ast.RegoV0},
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to compile rule '%s': %w", ruleName, err)
|
||||
}
|
||||
|
||||
opap.compiledModules[cacheKey] = compiled
|
||||
return compiled, nil
|
||||
}
|
||||
|
||||
@@ -20,6 +20,7 @@ import (
|
||||
"github.com/kubescape/opa-utils/reporthandling"
|
||||
"github.com/kubescape/opa-utils/reporthandling/results/v1/resourcesresults"
|
||||
"github.com/kubescape/opa-utils/resources"
|
||||
"github.com/open-policy-agent/opa/v1/ast"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
@@ -49,10 +50,6 @@ func unzipAllResourcesTestDataAndSetVar(zipFilePath, destFilePath string) error
|
||||
os.RemoveAll(destFilePath)
|
||||
|
||||
f := archive.File[0]
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
dstFile, err := os.OpenFile(destFilePath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, f.Mode())
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -64,6 +61,12 @@ func unzipAllResourcesTestDataAndSetVar(zipFilePath, destFilePath string) error
|
||||
}
|
||||
|
||||
_, err = io.Copy(dstFile, fileInArchive) //nolint:gosec
|
||||
if err != nil {
|
||||
dstFile.Close()
|
||||
fileInArchive.Close()
|
||||
archive.Close()
|
||||
return err
|
||||
}
|
||||
|
||||
dstFile.Close()
|
||||
fileInArchive.Close()
|
||||
@@ -83,7 +86,9 @@ func unzipAllResourcesTestDataAndSetVar(zipFilePath, destFilePath string) error
|
||||
}
|
||||
|
||||
func NewOPAProcessorMock(opaSessionObjMock string, resourcesMock []byte) *OPAProcessor {
|
||||
opap := &OPAProcessor{}
|
||||
opap := &OPAProcessor{
|
||||
compiledModules: make(map[string]*ast.Compiler),
|
||||
}
|
||||
if err := json.Unmarshal([]byte(regoDependenciesData), &opap.regoDependenciesData); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
@@ -165,12 +170,12 @@ func BenchmarkProcess(b *testing.B) {
|
||||
go monitorHeapSpace(&maxHeap, quitChan)
|
||||
|
||||
// test
|
||||
opap.Process(context.Background(), opap.OPASessionObj.AllPolicies, nil)
|
||||
opap.Process(context.Background(), opap.AllPolicies, nil)
|
||||
|
||||
// teardown
|
||||
quitChan <- true
|
||||
b.Log(fmt.Sprintf("%s_max_heap_space_gb: %.2f", testName, float64(maxHeap)/(1024*1024*1024)))
|
||||
b.Log(fmt.Sprintf("%s_execution_time_sec: %f", testName, b.Elapsed().Seconds()))
|
||||
b.Logf("%s_max_heap_space_gb: %.2f", testName, float64(maxHeap)/(1024*1024*1024))
|
||||
b.Logf("%s_execution_time_sec: %f", testName, b.Elapsed().Seconds())
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -77,7 +77,7 @@ var cosignVerifySignatureDefinition = func(bctx rego.BuiltinContext, a, b *ast.T
|
||||
return nil, fmt.Errorf("invalid parameter type: %v", err)
|
||||
}
|
||||
// Replace double backslashes with single backslashes
|
||||
bbStr := strings.Replace(string(bStr), "\\n", "\n", -1)
|
||||
bbStr := strings.ReplaceAll(string(bStr), "\\n", "\n")
|
||||
result, err := verify(string(aStr), bbStr)
|
||||
if err != nil {
|
||||
// Do not change this log from debug level. We might find a lot of images without signature
|
||||
|
||||
@@ -3,7 +3,6 @@ package resourcehandler
|
||||
import (
|
||||
"context"
|
||||
_ "embed"
|
||||
"encoding/json"
|
||||
"testing"
|
||||
|
||||
"github.com/kubescape/k8s-interface/k8sinterface"
|
||||
@@ -16,21 +15,8 @@ import (
|
||||
"k8s.io/client-go/dynamic/fake"
|
||||
fakeclientset "k8s.io/client-go/kubernetes/fake"
|
||||
"k8s.io/client-go/rest"
|
||||
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
|
||||
)
|
||||
|
||||
var (
|
||||
//go:embed testdata/kubeconfig_mock.json
|
||||
kubeConfigMock string
|
||||
)
|
||||
|
||||
func getKubeConfigMock() *clientcmdapi.Config {
|
||||
kubeConfig := clientcmdapi.Config{}
|
||||
if err := json.Unmarshal([]byte(kubeConfigMock), &kubeConfig); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return &kubeConfig
|
||||
}
|
||||
func Test_getCloudMetadata(t *testing.T) {
|
||||
|
||||
tests := []struct {
|
||||
@@ -73,7 +59,7 @@ func Test_getCloudMetadata(t *testing.T) {
|
||||
// https://github.com/kubescape/kubescape/pull/1004
|
||||
// Cluster named .*eks.* config without a cloudconfig panics whereas we just want to scan a file
|
||||
func getResourceHandlerMock() *K8sResourceHandler {
|
||||
client := fakeclientset.NewSimpleClientset()
|
||||
client := fakeclientset.NewClientset()
|
||||
fakeDiscovery := client.Discovery()
|
||||
|
||||
k8s := &k8sinterface.KubernetesApi{
|
||||
|
||||
@@ -35,7 +35,7 @@ func CollectResources(ctx context.Context, rsrcHandler IResourceHandler, opaSess
|
||||
opaSessionObj.ExternalResources = externalResources
|
||||
opaSessionObj.ExcludedRules = excludedRulesMap
|
||||
|
||||
if (opaSessionObj.K8SResources == nil || len(opaSessionObj.K8SResources) == 0) && (opaSessionObj.ExternalResources == nil || len(opaSessionObj.ExternalResources) == 0) || len(opaSessionObj.AllResources) == 0 {
|
||||
if len(opaSessionObj.K8SResources) == 0 && len(opaSessionObj.ExternalResources) == 0 || len(opaSessionObj.AllResources) == 0 {
|
||||
return fmt.Errorf("no resources found to scan")
|
||||
}
|
||||
|
||||
|
||||
@@ -3,13 +3,13 @@ package resourcehandler
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
giturls "github.com/chainguard-dev/git-urls"
|
||||
"github.com/kubescape/kubescape/v3/core/cautils/getter"
|
||||
"k8s.io/utils/strings/slices"
|
||||
)
|
||||
|
||||
@@ -167,7 +167,7 @@ func (g *GitHubRepository) setBranch(branchOptional string) error {
|
||||
if g.branch != "" {
|
||||
return nil
|
||||
}
|
||||
body, err := getter.HttpGetter(&http.Client{}, g.defaultBranchAPI(), g.getHeaders())
|
||||
body, err := httpGet(&http.Client{}, g.defaultBranchAPI(), g.getHeaders())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -193,12 +193,27 @@ func (g *GitHubRepository) getHeaders() map[string]string {
|
||||
}
|
||||
return map[string]string{"Authorization": fmt.Sprintf("token %s", g.token)}
|
||||
}
|
||||
func httpGet(client *http.Client, url string, headers map[string]string) ([]byte, error) {
|
||||
req, err := http.NewRequest(http.MethodGet, url, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for k, v := range headers {
|
||||
req.Header.Set(k, v)
|
||||
}
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
return io.ReadAll(resp.Body)
|
||||
}
|
||||
func (g *GitHubRepository) setTree() error {
|
||||
if g.isFile {
|
||||
return nil
|
||||
}
|
||||
|
||||
body, err := getter.HttpGetter(&http.Client{}, g.treeAPI(), g.getHeaders())
|
||||
body, err := httpGet(&http.Client{}, g.treeAPI(), g.getHeaders())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -138,7 +138,7 @@ func updateQueryableResourcesMapFromRuleMatchObject(match *reporthandling.RuleMa
|
||||
}
|
||||
queryableResource.AddFieldSelector(globalFieldSelector)
|
||||
|
||||
if match.FieldSelector == nil || len(match.FieldSelector) == 0 {
|
||||
if len(match.FieldSelector) == 0 {
|
||||
queryableResources.Add(queryableResource)
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -37,7 +37,7 @@ func TestResolveLocation(t *testing.T) {
|
||||
|
||||
resolver, _ := NewFixPathLocationResolver(yamlFilePath)
|
||||
|
||||
for fixPath, _ := range fixPathToExpectedLineAndColumn {
|
||||
for fixPath := range fixPathToExpectedLineAndColumn {
|
||||
location, err := resolver.ResolveLocation(fixPath, 100000)
|
||||
assert.Contains(t, err.Error(), "node index [100000] out of range ")
|
||||
assert.Empty(t, location)
|
||||
|
||||
@@ -70,7 +70,7 @@ func (jp *JsonPrinter) convertToImageScanSummary(imageScanData []cautils.ImageSc
|
||||
imageScanSummary.Images = append(imageScanSummary.Images, imageScanData[i].Image)
|
||||
}
|
||||
|
||||
CVEs := extractCVEs(imageScanData[i].Matches)
|
||||
CVEs := extractCVEs(imageScanData[i].Matches, imageScanData[i].Image)
|
||||
imageScanSummary.CVEs = append(imageScanSummary.CVEs, CVEs...)
|
||||
|
||||
setPkgNameToScoreMap(imageScanData[i].Matches, imageScanSummary.PackageScores)
|
||||
@@ -126,6 +126,9 @@ func printConfigurationsScanning(opaSessionObj *cautils.OPASessionObj, imageScan
|
||||
reportWithSeverity := ConvertToPostureReportWithSeverityAndLabels(finalizedReport, opaSessionObj.LabelsToCopy, opaSessionObj.AllResources)
|
||||
|
||||
r, err := json.Marshal(reportWithSeverity)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = jp.writer.Write(r)
|
||||
|
||||
return err
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
package printer
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"io"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
@@ -79,7 +79,7 @@ func TestScore_Json(t *testing.T) {
|
||||
|
||||
// Read the contents of the temporary file
|
||||
f.Seek(0, 0)
|
||||
got, err := ioutil.ReadAll(f)
|
||||
got, err := io.ReadAll(f)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
@@ -169,22 +169,22 @@ func TestConvertToPackageScores(t *testing.T) {
|
||||
|
||||
func TestConvertToReportSummary(t *testing.T) {
|
||||
input := map[string]*imageprinter.SeveritySummary{
|
||||
"High": &imageprinter.SeveritySummary{
|
||||
"High": {
|
||||
NumberOfCVEs: 10,
|
||||
NumberOfFixableCVEs: 5,
|
||||
},
|
||||
"Medium": &imageprinter.SeveritySummary{
|
||||
"Medium": {
|
||||
NumberOfCVEs: 5,
|
||||
NumberOfFixableCVEs: 2,
|
||||
},
|
||||
}
|
||||
|
||||
want := map[string]*reportsummary.SeveritySummary{
|
||||
"High": &reportsummary.SeveritySummary{
|
||||
"High": {
|
||||
NumberOfCVEs: 10,
|
||||
NumberOfFixableCVEs: 5,
|
||||
},
|
||||
"Medium": &reportsummary.SeveritySummary{
|
||||
"Medium": {
|
||||
NumberOfCVEs: 5,
|
||||
NumberOfFixableCVEs: 2,
|
||||
},
|
||||
|
||||
@@ -125,11 +125,12 @@ func (pp *PdfPrinter) getTableObjects(summaryDetails *reportsummary.SummaryDetai
|
||||
}
|
||||
|
||||
func getSeverityColor(severity string) *props.Color {
|
||||
if severity == "Critical" {
|
||||
switch severity {
|
||||
case "Critical":
|
||||
return &props.Color{Red: 255, Green: 0, Blue: 0}
|
||||
} else if severity == "High" {
|
||||
case "High":
|
||||
return &props.Color{Red: 0, Green: 0, Blue: 255}
|
||||
} else if severity == "Medium" {
|
||||
case "Medium":
|
||||
return &props.Color{Red: 252, Green: 186, Blue: 3}
|
||||
}
|
||||
return &props.BlackColor
|
||||
|
||||
@@ -2,7 +2,7 @@ package printer
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io/ioutil"
|
||||
"io"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
@@ -76,7 +76,7 @@ func TestScore_Pdf(t *testing.T) {
|
||||
|
||||
// Read the contents of the temporary file
|
||||
f.Seek(0, 0)
|
||||
got, err := ioutil.ReadAll(f)
|
||||
got, err := io.ReadAll(f)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
@@ -24,11 +24,6 @@ import (
|
||||
"k8s.io/utils/strings/slices"
|
||||
)
|
||||
|
||||
const (
|
||||
prettyPrinterOutputFile = "report"
|
||||
clusterScanningScopeInformationLink = "https://github.com/kubescape/regolibrary/tree/master#add-a-framework"
|
||||
)
|
||||
|
||||
var _ printer.IPrinter = &PrettyPrinter{}
|
||||
|
||||
type PrettyPrinter struct {
|
||||
@@ -60,13 +55,13 @@ func NewPrettyPrinter(verboseMode bool, formatVersion string, attackTree bool, v
|
||||
func (pp *PrettyPrinter) SetMainPrinter() {
|
||||
switch pp.scanType {
|
||||
case cautils.ScanTypeCluster:
|
||||
pp.mainPrinter = prettyprinter.NewClusterPrinter(pp.writer)
|
||||
pp.mainPrinter = prettyprinter.NewClusterPrinter(pp.writer, pp.verboseMode)
|
||||
case cautils.ScanTypeRepo:
|
||||
pp.mainPrinter = prettyprinter.NewRepoPrinter(pp.writer, pp.inputPatterns)
|
||||
pp.mainPrinter = prettyprinter.NewRepoPrinter(pp.writer, pp.inputPatterns, pp.verboseMode)
|
||||
case cautils.ScanTypeImage:
|
||||
pp.mainPrinter = prettyprinter.NewImagePrinter(pp.writer, pp.verboseMode)
|
||||
case cautils.ScanTypeWorkload:
|
||||
pp.mainPrinter = prettyprinter.NewWorkloadPrinter(pp.writer)
|
||||
pp.mainPrinter = prettyprinter.NewWorkloadPrinter(pp.writer, pp.verboseMode)
|
||||
default:
|
||||
pp.mainPrinter = prettyprinter.NewSummaryPrinter(pp.writer, pp.verboseMode)
|
||||
}
|
||||
@@ -89,7 +84,7 @@ func (pp *PrettyPrinter) convertToImageScanSummary(imageScanData []cautils.Image
|
||||
imageScanSummary.Images = append(imageScanSummary.Images, imageScanData[i].Image)
|
||||
}
|
||||
|
||||
CVEs := extractCVEs(imageScanData[i].Matches)
|
||||
CVEs := extractCVEs(imageScanData[i].Matches, imageScanData[i].Image)
|
||||
imageScanSummary.CVEs = append(imageScanSummary.CVEs, CVEs...)
|
||||
|
||||
setPkgNameToScoreMap(imageScanData[i].Matches, imageScanSummary.PackageScores)
|
||||
@@ -157,12 +152,13 @@ func (pp *PrettyPrinter) printOverview(opaSessionObj *cautils.OPASessionObj, pri
|
||||
}
|
||||
|
||||
func (pp *PrettyPrinter) printHeader(opaSessionObj *cautils.OPASessionObj) {
|
||||
if pp.scanType == cautils.ScanTypeCluster {
|
||||
switch pp.scanType {
|
||||
case cautils.ScanTypeCluster:
|
||||
cautils.InfoDisplay(pp.writer, fmt.Sprintf("\nSecurity posture overview for cluster: '%s'\n\n", pp.clusterName))
|
||||
cautils.SimpleDisplay(pp.writer, "In this overview, Kubescape shows you a summary of your cluster security posture, including the number of users who can perform administrative actions. For each result greater than 0, you should evaluate its need, and then define an exception to allow it. This baseline can be used to detect drift in future.\n\n")
|
||||
} else if pp.scanType == cautils.ScanTypeRepo {
|
||||
case cautils.ScanTypeRepo:
|
||||
cautils.InfoDisplay(pp.writer, fmt.Sprintf("\nSecurity posture overview for repo: '%s'\n\n", strings.Join(pp.inputPatterns, ", ")))
|
||||
} else if pp.scanType == cautils.ScanTypeWorkload {
|
||||
case cautils.ScanTypeWorkload:
|
||||
cautils.InfoDisplay(pp.writer, "Workload security posture overview for:\n")
|
||||
ns := opaSessionObj.SingleResourceScan.GetNamespace()
|
||||
var rows []table.Row
|
||||
|
||||
@@ -14,20 +14,30 @@ import (
|
||||
type ClusterPrinter struct {
|
||||
writer *os.File
|
||||
categoriesTablePrinter configurationprinter.TablePrinter
|
||||
imageTablePrinter imageprinter.TablePrinter
|
||||
verboseMode bool
|
||||
}
|
||||
|
||||
func NewClusterPrinter(writer *os.File) *ClusterPrinter {
|
||||
func NewClusterPrinter(writer *os.File, verboseMode bool) *ClusterPrinter {
|
||||
return &ClusterPrinter{
|
||||
writer: writer,
|
||||
categoriesTablePrinter: configurationprinter.NewClusterPrinter(),
|
||||
imageTablePrinter: imageprinter.NewTableWriter(),
|
||||
verboseMode: verboseMode,
|
||||
}
|
||||
}
|
||||
|
||||
var _ MainPrinter = &ClusterPrinter{}
|
||||
|
||||
func (cp *ClusterPrinter) PrintImageScanning(summary *imageprinter.ImageScanSummary) {
|
||||
printImageScanningSummary(cp.writer, *summary, false)
|
||||
printImagesCommands(cp.writer, *summary)
|
||||
if cp.verboseMode {
|
||||
cp.imageTablePrinter.PrintImageScanningTable(cp.writer, *summary)
|
||||
cautils.SimpleDisplay(cp.writer, "\n")
|
||||
}
|
||||
printImageScanningSummary(cp.writer, *summary, cp.verboseMode)
|
||||
if !cp.verboseMode {
|
||||
printImagesCommands(cp.writer, *summary)
|
||||
}
|
||||
}
|
||||
|
||||
func (cp *ClusterPrinter) PrintConfigurationsScanning(summaryDetails *reportsummary.SummaryDetails, sortedControlIDs [][]string, topWorkloadsByScore []reporthandling.IResource) {
|
||||
|
||||
@@ -42,17 +42,21 @@ func TestClusterScan_getWorkloadScanCommand(t *testing.T) {
|
||||
|
||||
func TestNewClusterPrinter(t *testing.T) {
|
||||
// Test case 1: Valid writer
|
||||
cp := NewClusterPrinter(os.Stdout)
|
||||
cp := NewClusterPrinter(os.Stdout, false)
|
||||
assert.NotNil(t, cp)
|
||||
assert.Equal(t, os.Stdout, cp.writer)
|
||||
assert.NotNil(t, cp.categoriesTablePrinter)
|
||||
assert.NotNil(t, cp.imageTablePrinter)
|
||||
assert.False(t, cp.verboseMode)
|
||||
|
||||
// Test case 2: Nil writer
|
||||
var writer *os.File
|
||||
cp = NewClusterPrinter(writer)
|
||||
cp = NewClusterPrinter(writer, true)
|
||||
assert.NotNil(t, cp)
|
||||
assert.Nil(t, cp.writer)
|
||||
assert.NotNil(t, cp.categoriesTablePrinter)
|
||||
assert.NotNil(t, cp.imageTablePrinter)
|
||||
assert.True(t, cp.verboseMode)
|
||||
}
|
||||
|
||||
func TestPrintNextSteps(t *testing.T) {
|
||||
@@ -63,7 +67,7 @@ func TestPrintNextSteps(t *testing.T) {
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
cp := NewClusterPrinter(f)
|
||||
cp := NewClusterPrinter(f, false)
|
||||
|
||||
// Redirect stderr to the temporary file
|
||||
oldStderr := os.Stderr
|
||||
@@ -88,7 +92,7 @@ func TestPrintNextSteps(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestGetWorkloadScanCommand(t *testing.T) {
|
||||
cp := NewClusterPrinter(os.Stdout)
|
||||
cp := NewClusterPrinter(os.Stdout, false)
|
||||
assert.NotNil(t, cp)
|
||||
assert.Equal(t, os.Stdout, cp.writer)
|
||||
assert.NotNil(t, cp.categoriesTablePrinter)
|
||||
|
||||
@@ -3,6 +3,7 @@ package prettyprinter
|
||||
import (
|
||||
"os"
|
||||
|
||||
"github.com/kubescape/kubescape/v3/core/cautils"
|
||||
"github.com/kubescape/kubescape/v3/core/pkg/resultshandling/printer/v2/prettyprinter/tableprinter/configurationprinter"
|
||||
"github.com/kubescape/kubescape/v3/core/pkg/resultshandling/printer/v2/prettyprinter/tableprinter/imageprinter"
|
||||
"github.com/kubescape/opa-utils/reporthandling"
|
||||
@@ -15,6 +16,7 @@ type SummaryPrinter struct {
|
||||
writer *os.File
|
||||
verboseMode bool
|
||||
summaryTablePrinter configurationprinter.TablePrinter
|
||||
imageTablePrinter imageprinter.TablePrinter
|
||||
}
|
||||
|
||||
func NewSummaryPrinter(writer *os.File, verboseMode bool) *SummaryPrinter {
|
||||
@@ -22,12 +24,21 @@ func NewSummaryPrinter(writer *os.File, verboseMode bool) *SummaryPrinter {
|
||||
writer: writer,
|
||||
verboseMode: verboseMode,
|
||||
summaryTablePrinter: configurationprinter.NewFrameworkPrinter(verboseMode),
|
||||
imageTablePrinter: imageprinter.NewTableWriter(),
|
||||
}
|
||||
}
|
||||
|
||||
var _ MainPrinter = &RepoPrinter{}
|
||||
|
||||
func (sp *SummaryPrinter) PrintImageScanning(*imageprinter.ImageScanSummary) {}
|
||||
func (sp *SummaryPrinter) PrintImageScanning(summary *imageprinter.ImageScanSummary) {
|
||||
if sp.verboseMode {
|
||||
sp.imageTablePrinter.PrintImageScanningTable(sp.writer, *summary)
|
||||
cautils.SimpleDisplay(sp.writer, "\n")
|
||||
}
|
||||
printImageScanningSummary(sp.writer, *summary, sp.verboseMode)
|
||||
if !sp.verboseMode {
|
||||
printImagesCommands(sp.writer, *summary)
|
||||
}
|
||||
printTopComponents(sp.writer, *summary)
|
||||
}
|
||||
|
||||
func (sp *SummaryPrinter) PrintNextSteps() {}
|
||||
|
||||
|
||||
@@ -15,6 +15,7 @@ func TestNewSummaryPrinter(t *testing.T) {
|
||||
assert.Equal(t, os.Stdout, printer.writer)
|
||||
assert.Equal(t, verbose, printer.verboseMode)
|
||||
assert.NotNil(t, printer.summaryTablePrinter)
|
||||
assert.NotNil(t, printer.imageTablePrinter)
|
||||
|
||||
// Test case 2: Valid writer and non-verbose mode
|
||||
verbose = false
|
||||
@@ -23,6 +24,7 @@ func TestNewSummaryPrinter(t *testing.T) {
|
||||
assert.Equal(t, os.Stdout, printer.writer)
|
||||
assert.Equal(t, verbose, printer.verboseMode)
|
||||
assert.NotNil(t, printer.summaryTablePrinter)
|
||||
assert.NotNil(t, printer.imageTablePrinter)
|
||||
|
||||
// Test case 3: Nil writer and verbose mode
|
||||
var writer *os.File
|
||||
@@ -32,6 +34,7 @@ func TestNewSummaryPrinter(t *testing.T) {
|
||||
assert.Nil(t, printer.writer)
|
||||
assert.Equal(t, verbose, printer.verboseMode)
|
||||
assert.NotNil(t, printer.summaryTablePrinter)
|
||||
assert.NotNil(t, printer.imageTablePrinter)
|
||||
|
||||
// Test case 4: Nil writer and non-verbose mode
|
||||
verbose = false
|
||||
@@ -40,6 +43,7 @@ func TestNewSummaryPrinter(t *testing.T) {
|
||||
assert.Nil(t, printer.writer)
|
||||
assert.Equal(t, verbose, printer.verboseMode)
|
||||
assert.NotNil(t, printer.summaryTablePrinter)
|
||||
assert.NotNil(t, printer.imageTablePrinter)
|
||||
}
|
||||
|
||||
func TestGetVerboseMode(t *testing.T) {
|
||||
|
||||
@@ -15,20 +15,30 @@ import (
|
||||
type RepoPrinter struct {
|
||||
writer *os.File
|
||||
categoriesTablePrinter configurationprinter.TablePrinter
|
||||
imageTablePrinter imageprinter.TablePrinter
|
||||
verboseMode bool
|
||||
}
|
||||
|
||||
func NewRepoPrinter(writer *os.File, inputPatterns []string) *RepoPrinter {
|
||||
func NewRepoPrinter(writer *os.File, inputPatterns []string, verboseMode bool) *RepoPrinter {
|
||||
return &RepoPrinter{
|
||||
writer: writer,
|
||||
categoriesTablePrinter: configurationprinter.NewRepoPrinter(inputPatterns),
|
||||
imageTablePrinter: imageprinter.NewTableWriter(),
|
||||
verboseMode: verboseMode,
|
||||
}
|
||||
}
|
||||
|
||||
var _ MainPrinter = &RepoPrinter{}
|
||||
|
||||
func (rp *RepoPrinter) PrintImageScanning(summary *imageprinter.ImageScanSummary) {
|
||||
printImageScanningSummary(rp.writer, *summary, false)
|
||||
printImagesCommands(rp.writer, *summary)
|
||||
if rp.verboseMode {
|
||||
rp.imageTablePrinter.PrintImageScanningTable(rp.writer, *summary)
|
||||
cautils.SimpleDisplay(rp.writer, "\n")
|
||||
}
|
||||
printImageScanningSummary(rp.writer, *summary, rp.verboseMode)
|
||||
if !rp.verboseMode {
|
||||
printImagesCommands(rp.writer, *summary)
|
||||
}
|
||||
printTopComponents(rp.writer, *summary)
|
||||
}
|
||||
|
||||
|
||||
@@ -67,11 +67,11 @@ func generateCategoryStatusRow(controlSummary reportsummary.IControlSummary) tab
|
||||
|
||||
rows[0] = utils.GetStatusIcon(controlSummary.GetStatus().Status())
|
||||
|
||||
rows[1] = controlSummary.GetName()
|
||||
if len(controlSummary.GetName()) > 50 {
|
||||
rows[1] = controlSummary.GetName()[:50] + "..."
|
||||
name := controlSummary.GetName()
|
||||
if len(name) > 50 {
|
||||
rows[1] = name[:50] + "..." //nolint:gosec // Safe: rows has length 3, accessing index 1
|
||||
} else {
|
||||
rows[1] = controlSummary.GetName()
|
||||
rows[1] = name //nolint:gosec // Safe: rows has length 3, accessing index 1
|
||||
}
|
||||
|
||||
rows[2] = getDocsForControl(controlSummary)
|
||||
|
||||
@@ -8,7 +8,6 @@ import (
|
||||
"github.com/jedib0t/go-pretty/v6/table"
|
||||
"github.com/jwalton/gchalk"
|
||||
"github.com/kubescape/kubescape/v3/core/pkg/resultshandling/printer/v2/prettyprinter/tableprinter/utils"
|
||||
"github.com/kubescape/opa-utils/reporthandling"
|
||||
"github.com/kubescape/opa-utils/reporthandling/apis"
|
||||
"github.com/kubescape/opa-utils/reporthandling/results/v1/reportsummary"
|
||||
)
|
||||
@@ -102,19 +101,6 @@ func (rp *RepoPrinter) generateCountingCategoryRow(controlSummary reportsummary.
|
||||
return rows
|
||||
}
|
||||
|
||||
func (rp *RepoPrinter) getWorkloadScanCommand(ns, kind, name string, source reporthandling.Source) string {
|
||||
cmd := fmt.Sprintf("$ kubescape scan workload %s/%s/%s", ns, kind, name)
|
||||
if ns == "" {
|
||||
cmd = fmt.Sprintf("$ kubescape scan workload %s/%s", kind, name)
|
||||
}
|
||||
if source.FileType == "Helm" {
|
||||
return fmt.Sprintf("%s --chart-path=%s", cmd, source.RelativePath)
|
||||
|
||||
} else {
|
||||
return fmt.Sprintf("%s --file-path=%s", cmd, source.RelativePath)
|
||||
}
|
||||
}
|
||||
|
||||
func (rp *RepoPrinter) generateTableNextSteps(controlSummary reportsummary.IControlSummary, inputPatterns []string) string {
|
||||
return fmt.Sprintf("$ kubescape scan control %s %s -v", controlSummary.GetID(), strings.Join(inputPatterns, ","))
|
||||
}
|
||||
|
||||
@@ -72,9 +72,9 @@ func GenerateRow(controlSummary reportsummary.IControlSummary, infoToPrintInfo [
|
||||
|
||||
row[summaryColumnSeverity] = GetSeverityColumn(controlSummary)
|
||||
if len(controlSummary.GetName()) > 50 {
|
||||
row[summaryColumnName] = controlSummary.GetName()[:50] + "..."
|
||||
row[summaryColumnName] = controlSummary.GetName()[:50] + "..." //nolint:gosec // Safe: row has length _summaryRowLen (5), accessing index 1
|
||||
} else {
|
||||
row[summaryColumnName] = controlSummary.GetName()
|
||||
row[summaryColumnName] = controlSummary.GetName() //nolint:gosec // Safe: row has length _summaryRowLen (5), accessing index 1
|
||||
}
|
||||
row[summaryColumnCounterFailed] = fmt.Sprintf("%d", controlSummary.NumberOfResources().Failed())
|
||||
row[summaryColumnCounterAll] = fmt.Sprintf("%d", controlSummary.NumberOfResources().All())
|
||||
|
||||
@@ -19,6 +19,7 @@ type CVE struct {
|
||||
Version string
|
||||
FixVersions []string
|
||||
FixedState string
|
||||
Image string
|
||||
}
|
||||
|
||||
type PackageScore struct {
|
||||
|
||||
@@ -10,6 +10,7 @@ const (
|
||||
imageColumnComponent = iota
|
||||
imageColumnVersion = iota
|
||||
imageColumnFixedIn = iota
|
||||
imageColumnImage = iota
|
||||
)
|
||||
|
||||
type TableWriter struct {
|
||||
|
||||
@@ -25,6 +25,7 @@ func TestPrintImageScanningTable(t *testing.T) {
|
||||
Package: "package1",
|
||||
Version: "1.0.0",
|
||||
FixedState: string(v5.NotFixedState),
|
||||
Image: "nginx:latest",
|
||||
},
|
||||
{
|
||||
ID: "CVE-2020-0002",
|
||||
@@ -32,6 +33,7 @@ func TestPrintImageScanningTable(t *testing.T) {
|
||||
Package: "package2",
|
||||
Version: "1.0.0",
|
||||
FixedState: string(v5.NotFixedState),
|
||||
Image: "alpine:3.18",
|
||||
},
|
||||
{
|
||||
ID: "CVE-2020-0003",
|
||||
@@ -39,10 +41,11 @@ func TestPrintImageScanningTable(t *testing.T) {
|
||||
Package: "package3",
|
||||
Version: "1.0.0",
|
||||
FixedState: string(v5.NotFixedState),
|
||||
Image: "ubuntu:22.04",
|
||||
},
|
||||
},
|
||||
},
|
||||
want: "╭──────────┬───────────────┬───────────┬─────────┬──────────╮\n│ Severity │ Vulnerability │ Component │ Version │ Fixed in │\n├──────────┼───────────────┼───────────┼─────────┼──────────┤\n│ High │ CVE-2020-0002 │ package2 │ 1.0.0 │ │\n│ Medium │ CVE-2020-0003 │ package3 │ 1.0.0 │ │\n│ Low │ CVE-2020-0001 │ package1 │ 1.0.0 │ │\n╰──────────┴───────────────┴───────────┴─────────┴──────────╯\n",
|
||||
want: "╭──────────┬───────────────┬───────────┬─────────┬──────────┬──────────────╮\n│ Severity │ Vulnerability │ Component │ Version │ Fixed in │ Image │\n├──────────┼───────────────┼───────────┼─────────┼──────────┼──────────────┤\n│ High │ CVE-2020-0002 │ package2 │ 1.0.0 │ │ alpine:3.18 │\n│ Medium │ CVE-2020-0003 │ package3 │ 1.0.0 │ │ ubuntu:22.04 │\n│ Low │ CVE-2020-0001 │ package1 │ 1.0.0 │ │ nginx:latest │\n╰──────────┴───────────────┴───────────┴─────────┴──────────┴──────────────╯\n",
|
||||
},
|
||||
{
|
||||
name: "check fixed CVEs show versions",
|
||||
@@ -54,6 +57,7 @@ func TestPrintImageScanningTable(t *testing.T) {
|
||||
Package: "package1",
|
||||
Version: "1.0.0",
|
||||
FixedState: string(v5.NotFixedState),
|
||||
Image: "test:latest",
|
||||
},
|
||||
{
|
||||
ID: "CVE-2020-0002",
|
||||
@@ -62,10 +66,11 @@ func TestPrintImageScanningTable(t *testing.T) {
|
||||
Version: "1.0.0",
|
||||
FixVersions: []string{"v1", "v2"},
|
||||
FixedState: string(v5.FixedState),
|
||||
Image: "golang:1.24",
|
||||
},
|
||||
},
|
||||
},
|
||||
want: "╭──────────┬───────────────┬───────────┬─────────┬──────────╮\n│ Severity │ Vulnerability │ Component │ Version │ Fixed in │\n├──────────┼───────────────┼───────────┼─────────┼──────────┤\n│ High │ CVE-2020-0002 │ package2 │ 1.0.0 │ v1,v2 │\n│ Low │ CVE-2020-0001 │ package1 │ 1.0.0 │ │\n╰──────────┴───────────────┴───────────┴─────────┴──────────╯\n",
|
||||
want: "╭──────────┬───────────────┬───────────┬─────────┬──────────┬─────────────╮\n│ Severity │ Vulnerability │ Component │ Version │ Fixed in │ Image │\n├──────────┼───────────────┼───────────┼─────────┼──────────┼─────────────┤\n│ High │ CVE-2020-0002 │ package2 │ 1.0.0 │ v1,v2 │ golang:1.24 │\n│ Low │ CVE-2020-0001 │ package1 │ 1.0.0 │ │ test:latest │\n╰──────────┴───────────────┴───────────┴─────────┴──────────┴─────────────╯\n",
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
@@ -29,9 +29,12 @@ func renderTable(writer io.Writer, headers table.Row, columnAlignments []table.C
|
||||
func generateRows(summary ImageScanSummary) []table.Row {
|
||||
rows := make([]table.Row, 0, len(summary.CVEs))
|
||||
|
||||
// sort CVEs by severity
|
||||
// sort CVEs by severity (descending) and then by CVE ID (ascending)
|
||||
sort.Slice(summary.CVEs, func(i, j int) bool {
|
||||
return utils.ImageSeverityToInt(summary.CVEs[i].Severity) > utils.ImageSeverityToInt(summary.CVEs[j].Severity)
|
||||
if utils.ImageSeverityToInt(summary.CVEs[i].Severity) != utils.ImageSeverityToInt(summary.CVEs[j].Severity) {
|
||||
return utils.ImageSeverityToInt(summary.CVEs[i].Severity) > utils.ImageSeverityToInt(summary.CVEs[j].Severity)
|
||||
}
|
||||
return summary.CVEs[i].ID < summary.CVEs[j].ID
|
||||
})
|
||||
|
||||
for _, cve := range summary.CVEs {
|
||||
@@ -42,11 +45,12 @@ func generateRows(summary ImageScanSummary) []table.Row {
|
||||
}
|
||||
|
||||
func generateRow(cve CVE) table.Row {
|
||||
row := make(table.Row, 5)
|
||||
row := make(table.Row, 6)
|
||||
row[imageColumnSeverity] = utils.GetColorForVulnerabilitySeverity(cve.Severity)(cve.Severity)
|
||||
row[imageColumnName] = cve.ID
|
||||
row[imageColumnComponent] = cve.Package
|
||||
row[imageColumnVersion] = cve.Version
|
||||
row[imageColumnImage] = cve.Image
|
||||
|
||||
// if the CVE is fixed, show all the versions that fix it
|
||||
if cve.FixedState == string(v5.FixedState) {
|
||||
@@ -62,12 +66,13 @@ func generateRow(cve CVE) table.Row {
|
||||
}
|
||||
|
||||
func getImageScanningHeaders() table.Row {
|
||||
headers := make(table.Row, 5)
|
||||
headers := make(table.Row, 6)
|
||||
headers[imageColumnSeverity] = "Severity"
|
||||
headers[imageColumnName] = "Vulnerability"
|
||||
headers[imageColumnComponent] = "Component"
|
||||
headers[imageColumnVersion] = "Version"
|
||||
headers[imageColumnFixedIn] = "Fixed in"
|
||||
headers[imageColumnImage] = "Image"
|
||||
return headers
|
||||
}
|
||||
|
||||
@@ -78,5 +83,6 @@ func getImageScanningColumnsAlignments() []table.ColumnConfig {
|
||||
{Number: 3, Align: text.AlignLeft},
|
||||
{Number: 4, Align: text.AlignLeft},
|
||||
{Number: 5, Align: text.AlignLeft},
|
||||
{Number: 6, Align: text.AlignLeft},
|
||||
}
|
||||
}
|
||||
|
||||
@@ -28,6 +28,7 @@ func TestRenderTable(t *testing.T) {
|
||||
Package: "package1",
|
||||
Version: "1.0.0",
|
||||
FixedState: string(v5.NotFixedState),
|
||||
Image: "nginx:latest",
|
||||
},
|
||||
{
|
||||
ID: "CVE-2020-0002",
|
||||
@@ -35,6 +36,7 @@ func TestRenderTable(t *testing.T) {
|
||||
Package: "package2",
|
||||
Version: "1.0.0",
|
||||
FixedState: string(v5.NotFixedState),
|
||||
Image: "alpine:3.18",
|
||||
},
|
||||
{
|
||||
ID: "CVE-2020-0003",
|
||||
@@ -42,10 +44,11 @@ func TestRenderTable(t *testing.T) {
|
||||
Package: "package3",
|
||||
Version: "1.0.0",
|
||||
FixedState: string(v5.NotFixedState),
|
||||
Image: "ubuntu:22.04",
|
||||
},
|
||||
},
|
||||
},
|
||||
want: "╭──────────┬───────────────┬───────────┬─────────┬──────────╮\n│ Severity │ Vulnerability │ Component │ Version │ Fixed in │\n├──────────┼───────────────┼───────────┼─────────┼──────────┤\n│ High │ CVE-2020-0002 │ package2 │ 1.0.0 │ │\n│ Medium │ CVE-2020-0003 │ package3 │ 1.0.0 │ │\n│ Low │ CVE-2020-0001 │ package1 │ 1.0.0 │ │\n╰──────────┴───────────────┴───────────┴─────────┴──────────╯\n",
|
||||
want: "╭──────────┬───────────────┬───────────┬─────────┬──────────┬──────────────╮\n│ Severity │ Vulnerability │ Component │ Version │ Fixed in │ Image │\n├──────────┼───────────────┼───────────┼─────────┼──────────┼──────────────┤\n│ High │ CVE-2020-0002 │ package2 │ 1.0.0 │ │ alpine:3.18 │\n│ Medium │ CVE-2020-0003 │ package3 │ 1.0.0 │ │ ubuntu:22.04 │\n│ Low │ CVE-2020-0001 │ package1 │ 1.0.0 │ │ nginx:latest │\n╰──────────┴───────────────┴───────────┴─────────┴──────────┴──────────────╯\n",
|
||||
},
|
||||
{
|
||||
name: "check fixed CVEs show versions",
|
||||
@@ -57,6 +60,7 @@ func TestRenderTable(t *testing.T) {
|
||||
Package: "package1",
|
||||
Version: "1.0.0",
|
||||
FixedState: string(v5.NotFixedState),
|
||||
Image: "test:latest",
|
||||
},
|
||||
{
|
||||
ID: "CVE-2020-0002",
|
||||
@@ -65,10 +69,11 @@ func TestRenderTable(t *testing.T) {
|
||||
Version: "1.0.0",
|
||||
FixVersions: []string{"v1", "v2"},
|
||||
FixedState: string(v5.FixedState),
|
||||
Image: "golang:1.24",
|
||||
},
|
||||
},
|
||||
},
|
||||
want: "╭──────────┬───────────────┬───────────┬─────────┬──────────╮\n│ Severity │ Vulnerability │ Component │ Version │ Fixed in │\n├──────────┼───────────────┼───────────┼─────────┼──────────┤\n│ High │ CVE-2020-0002 │ package2 │ 1.0.0 │ v1,v2 │\n│ Low │ CVE-2020-0001 │ package1 │ 1.0.0 │ │\n╰──────────┴───────────────┴───────────┴─────────┴──────────╯\n",
|
||||
want: "╭──────────┬───────────────┬───────────┬─────────┬──────────┬─────────────╮\n│ Severity │ Vulnerability │ Component │ Version │ Fixed in │ Image │\n├──────────┼───────────────┼───────────┼─────────┼──────────┼─────────────┤\n│ High │ CVE-2020-0002 │ package2 │ 1.0.0 │ v1,v2 │ golang:1.24 │\n│ Low │ CVE-2020-0001 │ package1 │ 1.0.0 │ │ test:latest │\n╰──────────┴───────────────┴───────────┴─────────┴──────────┴─────────────╯\n",
|
||||
},
|
||||
}
|
||||
|
||||
@@ -121,6 +126,7 @@ func TestGenerateRows(t *testing.T) {
|
||||
Package: "package1",
|
||||
Version: "1.0.0",
|
||||
FixedState: string(v5.NotFixedState),
|
||||
Image: "nginx:latest",
|
||||
},
|
||||
{
|
||||
ID: "CVE-2020-0002",
|
||||
@@ -128,6 +134,7 @@ func TestGenerateRows(t *testing.T) {
|
||||
Package: "package2",
|
||||
Version: "1.0.0",
|
||||
FixedState: string(v5.NotFixedState),
|
||||
Image: "alpine:3.18",
|
||||
},
|
||||
{
|
||||
ID: "CVE-2020-0003",
|
||||
@@ -135,13 +142,14 @@ func TestGenerateRows(t *testing.T) {
|
||||
Package: "package3",
|
||||
Version: "1.0.0",
|
||||
FixedState: string(v5.NotFixedState),
|
||||
Image: "ubuntu:22.04",
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedRows: [][]string{
|
||||
{"High", "CVE-2020-0002", "package2", "1.0.0", ""},
|
||||
{"Medium", "CVE-2020-0003", "package3", "1.0.0", ""},
|
||||
{"Low", "CVE-2020-0001", "package1", "1.0.0", ""},
|
||||
{"High", "CVE-2020-0002", "package2", "1.0.0", "", "alpine:3.18"},
|
||||
{"Medium", "CVE-2020-0003", "package3", "1.0.0", "", "ubuntu:22.04"},
|
||||
{"Low", "CVE-2020-0001", "package1", "1.0.0", "", "nginx:latest"},
|
||||
},
|
||||
},
|
||||
{
|
||||
@@ -154,6 +162,7 @@ func TestGenerateRows(t *testing.T) {
|
||||
Package: "package1",
|
||||
Version: "1.0.0",
|
||||
FixedState: string(v5.NotFixedState),
|
||||
Image: "test:latest",
|
||||
},
|
||||
{
|
||||
ID: "CVE-2020-0002",
|
||||
@@ -162,12 +171,13 @@ func TestGenerateRows(t *testing.T) {
|
||||
Version: "1.0.0",
|
||||
FixVersions: []string{"v1", "v2"},
|
||||
FixedState: string(v5.FixedState),
|
||||
Image: "golang:1.24",
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedRows: [][]string{
|
||||
{"High", "CVE-2020-0002", "package2", "1.0.0", "v1,v2"},
|
||||
{"Low", "CVE-2020-0001", "package1", "1.0.0", ""},
|
||||
{"High", "CVE-2020-0002", "package2", "1.0.0", "v1,v2", "golang:1.24"},
|
||||
{"Low", "CVE-2020-0001", "package1", "1.0.0", "", "test:latest"},
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -206,8 +216,9 @@ func TestGenerateRow(t *testing.T) {
|
||||
Version: "1.0.0",
|
||||
FixVersions: []string{"v1", "v2"},
|
||||
FixedState: string(v5.FixedState),
|
||||
Image: "golang:1.24",
|
||||
},
|
||||
want: []string{"High", "CVE-2020-0001", "package1", "1.0.0", "v1,v2"},
|
||||
want: []string{"High", "CVE-2020-0001", "package1", "1.0.0", "v1,v2", "golang:1.24"},
|
||||
},
|
||||
{
|
||||
name: "check row with not fixed version",
|
||||
@@ -217,8 +228,9 @@ func TestGenerateRow(t *testing.T) {
|
||||
Package: "package1",
|
||||
Version: "1.0.0",
|
||||
FixedState: string(v5.NotFixedState),
|
||||
Image: "nginx:latest",
|
||||
},
|
||||
want: []string{"High", "CVE-2020-0001", "package1", "1.0.0", ""},
|
||||
want: []string{"High", "CVE-2020-0001", "package1", "1.0.0", "", "nginx:latest"},
|
||||
},
|
||||
}
|
||||
|
||||
@@ -238,7 +250,7 @@ func TestGenerateRow(t *testing.T) {
|
||||
func TestGetImageScanningHeaders(t *testing.T) {
|
||||
headers := getImageScanningHeaders()
|
||||
|
||||
expectedHeaders := []string{"Severity", "Vulnerability", "Component", "Version", "Fixed in"}
|
||||
expectedHeaders := []string{"Severity", "Vulnerability", "Component", "Version", "Fixed in", "Image"}
|
||||
|
||||
for i := range headers {
|
||||
if headers[i] != expectedHeaders[i] {
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
package utils
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"io"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
@@ -127,7 +127,7 @@ func TestPrintInfo(t *testing.T) {
|
||||
{
|
||||
name: "Critical info",
|
||||
infoToPrintInfo: []InfoStars{
|
||||
InfoStars{
|
||||
{
|
||||
Stars: "5",
|
||||
Info: "Critical Info",
|
||||
},
|
||||
@@ -137,11 +137,11 @@ func TestPrintInfo(t *testing.T) {
|
||||
{
|
||||
name: "Medium and high info",
|
||||
infoToPrintInfo: []InfoStars{
|
||||
InfoStars{
|
||||
{
|
||||
Stars: "3",
|
||||
Info: "Medium Info",
|
||||
},
|
||||
InfoStars{
|
||||
{
|
||||
Stars: "4",
|
||||
Info: "High Info",
|
||||
},
|
||||
@@ -151,11 +151,11 @@ func TestPrintInfo(t *testing.T) {
|
||||
{
|
||||
name: "Negligible and low info",
|
||||
infoToPrintInfo: []InfoStars{
|
||||
InfoStars{
|
||||
{
|
||||
Stars: "1",
|
||||
Info: "Negligible Info",
|
||||
},
|
||||
InfoStars{
|
||||
{
|
||||
Stars: "2",
|
||||
Info: "Low Info",
|
||||
},
|
||||
@@ -184,7 +184,7 @@ func TestPrintInfo(t *testing.T) {
|
||||
|
||||
// Read the contents of the temporary file
|
||||
f.Seek(0, 0)
|
||||
got, err := ioutil.ReadAll(f)
|
||||
got, err := io.ReadAll(f)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
@@ -136,7 +136,7 @@ func filterCVEsBySeverities(cves []imageprinter.CVE, severities []string) []imag
|
||||
// getSortPackageScores returns a slice of package names sorted by score
|
||||
func getSortPackageScores(pkgScores map[string]*imageprinter.PackageScore) []string {
|
||||
sortedSlice := make([]string, 0, len(pkgScores))
|
||||
for pkgName, _ := range pkgScores {
|
||||
for pkgName := range pkgScores {
|
||||
sortedSlice = append(sortedSlice, pkgName)
|
||||
}
|
||||
|
||||
@@ -203,8 +203,6 @@ func printTopComponents(writer *os.File, summary imageprinter.ImageScanSummary)
|
||||
}
|
||||
|
||||
cautils.SimpleDisplay(writer, "\n")
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func printImageScanningSummary(writer *os.File, summary imageprinter.ImageScanSummary, verboseMode bool) {
|
||||
|
||||
@@ -3,6 +3,7 @@ package prettyprinter
|
||||
import (
|
||||
"os"
|
||||
|
||||
"github.com/kubescape/kubescape/v3/core/cautils"
|
||||
"github.com/kubescape/kubescape/v3/core/pkg/resultshandling/printer/v2/prettyprinter/tableprinter/configurationprinter"
|
||||
"github.com/kubescape/kubescape/v3/core/pkg/resultshandling/printer/v2/prettyprinter/tableprinter/imageprinter"
|
||||
"github.com/kubescape/opa-utils/reporthandling"
|
||||
@@ -12,20 +13,30 @@ import (
|
||||
type WorkloadPrinter struct {
|
||||
writer *os.File
|
||||
categoriesTablePrinter configurationprinter.TablePrinter
|
||||
imageTablePrinter imageprinter.TablePrinter
|
||||
verboseMode bool
|
||||
}
|
||||
|
||||
func NewWorkloadPrinter(writer *os.File) *WorkloadPrinter {
|
||||
func NewWorkloadPrinter(writer *os.File, verboseMode bool) *WorkloadPrinter {
|
||||
return &WorkloadPrinter{
|
||||
writer: writer,
|
||||
categoriesTablePrinter: configurationprinter.NewWorkloadPrinter(),
|
||||
imageTablePrinter: imageprinter.NewTableWriter(),
|
||||
verboseMode: verboseMode,
|
||||
}
|
||||
}
|
||||
|
||||
var _ MainPrinter = &WorkloadPrinter{}
|
||||
|
||||
func (wp *WorkloadPrinter) PrintImageScanning(summary *imageprinter.ImageScanSummary) {
|
||||
printImageScanningSummary(wp.writer, *summary, false)
|
||||
printImagesCommands(wp.writer, *summary)
|
||||
if wp.verboseMode {
|
||||
wp.imageTablePrinter.PrintImageScanningTable(wp.writer, *summary)
|
||||
cautils.SimpleDisplay(wp.writer, "\n")
|
||||
}
|
||||
printImageScanningSummary(wp.writer, *summary, wp.verboseMode)
|
||||
if !wp.verboseMode {
|
||||
printImagesCommands(wp.writer, *summary)
|
||||
}
|
||||
}
|
||||
|
||||
func (wp *WorkloadPrinter) PrintNextSteps() {
|
||||
|
||||
@@ -122,7 +122,7 @@ func generateResourceHeader(short bool) table.Row {
|
||||
func shortFormatResource(resourceRows []table.Row) []table.Row {
|
||||
rows := make([]table.Row, len(resourceRows))
|
||||
for i, resourceRow := range resourceRows {
|
||||
rows[i] = table.Row{fmt.Sprintf("Severity"+strings.Repeat(" ", 13)+": %+v\nControl Name"+strings.Repeat(" ", 9)+": %+v\nDocs"+strings.Repeat(" ", 17)+": %+v\nAssisted Remediation"+strings.Repeat(" ", 1)+": %+v", resourceRow[resourceColumnSeverity], resourceRow[resourceColumnName], resourceRow[resourceColumnURL], strings.Replace(resourceRow[resourceColumnPath].(string), "\n", "\n"+strings.Repeat(" ", 23), -1))}
|
||||
rows[i] = table.Row{fmt.Sprintf("Severity"+strings.Repeat(" ", 13)+": %+v\nControl Name"+strings.Repeat(" ", 9)+": %+v\nDocs"+strings.Repeat(" ", 17)+": %+v\nAssisted Remediation"+strings.Repeat(" ", 1)+": %+v", resourceRow[resourceColumnSeverity], resourceRow[resourceColumnName], resourceRow[resourceColumnURL], strings.ReplaceAll(resourceRow[resourceColumnPath].(string), "\n", "\n"+strings.Repeat(" ", 23)))}
|
||||
}
|
||||
return rows
|
||||
}
|
||||
|
||||
@@ -152,7 +152,7 @@ func (sp *SARIFPrinter) printImageScan(ctx context.Context, scanResults cautils.
|
||||
return err
|
||||
}
|
||||
|
||||
return os.WriteFile(sp.writer.Name(), updatedSarifReport, os.ModePerm)
|
||||
return os.WriteFile(sp.writer.Name(), updatedSarifReport, 0644) //nolint:gosec // Read-only report output, acceptable permissions
|
||||
}
|
||||
|
||||
func (sp *SARIFPrinter) PrintNextSteps() {
|
||||
@@ -519,7 +519,7 @@ func formReplaceFixedYamlString(node cautils.MappingNode, fileAsString string, l
|
||||
yamlLines[location.Line] = yamlLines[location.Line] + " # This is the suggested modification, the value for " + fixPath + " is " + fixValue + "\n"
|
||||
} else {
|
||||
replacedLine := "# This is the suggested modification\n" + yamlLines[location.Line]
|
||||
newLine := strings.Replace(replacedLine, replcaedValue, fixValue, -1)
|
||||
newLine := strings.ReplaceAll(replacedLine, replcaedValue, fixValue)
|
||||
yamlLines[location.Line] = newLine
|
||||
}
|
||||
fixedYamlString := strings.Join(yamlLines, "\n")
|
||||
|
||||
@@ -289,7 +289,7 @@ func setPkgNameToScoreMap(matches match.Matches, pkgScores map[string]*imageprin
|
||||
}
|
||||
}
|
||||
|
||||
func extractCVEs(matches match.Matches) []imageprinter.CVE {
|
||||
func extractCVEs(matches match.Matches, image string) []imageprinter.CVE {
|
||||
var CVEs []imageprinter.CVE
|
||||
for _, m := range matches.Sorted() {
|
||||
cve := imageprinter.CVE{
|
||||
@@ -299,6 +299,7 @@ func extractCVEs(matches match.Matches) []imageprinter.CVE {
|
||||
Version: m.Package.Version,
|
||||
FixVersions: m.Vulnerability.Fix.Versions,
|
||||
FixedState: m.Vulnerability.Fix.State.String(),
|
||||
Image: image,
|
||||
}
|
||||
CVEs = append(CVEs, cve)
|
||||
}
|
||||
|
||||
@@ -17,6 +17,7 @@ func TestExtractCVEs(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
matches match.Matches
|
||||
image string
|
||||
want []imageprinter.CVE
|
||||
}{
|
||||
{
|
||||
@@ -40,6 +41,7 @@ func TestExtractCVEs(t *testing.T) {
|
||||
},
|
||||
},
|
||||
}...),
|
||||
image: "nginx:latest",
|
||||
want: []imageprinter.CVE{
|
||||
{
|
||||
ID: "CVE-2020-1234",
|
||||
@@ -48,6 +50,7 @@ func TestExtractCVEs(t *testing.T) {
|
||||
Version: "1.2.3",
|
||||
FixVersions: []string{"1.2.3"},
|
||||
FixedState: "Fixed",
|
||||
Image: "nginx:latest",
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -106,6 +109,7 @@ func TestExtractCVEs(t *testing.T) {
|
||||
},
|
||||
},
|
||||
}...),
|
||||
image: "golang:1.24.6",
|
||||
want: []imageprinter.CVE{
|
||||
{
|
||||
ID: "CVE-2020-1234",
|
||||
@@ -114,6 +118,7 @@ func TestExtractCVEs(t *testing.T) {
|
||||
Version: "1.2.3",
|
||||
FixVersions: []string{"1.2.3"},
|
||||
FixedState: "Fixed",
|
||||
Image: "golang:1.24.6",
|
||||
},
|
||||
{
|
||||
ID: "CVE-2020-1235",
|
||||
@@ -122,6 +127,7 @@ func TestExtractCVEs(t *testing.T) {
|
||||
Version: "1",
|
||||
FixVersions: []string{"1"},
|
||||
FixedState: "Fixed",
|
||||
Image: "golang:1.24.6",
|
||||
},
|
||||
{
|
||||
ID: "CVE-2020-1236",
|
||||
@@ -130,19 +136,21 @@ func TestExtractCVEs(t *testing.T) {
|
||||
Version: "3",
|
||||
FixVersions: []string{"2", "3", "4"},
|
||||
FixedState: "Not fixed",
|
||||
Image: "golang:1.24.6",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "empty vulns",
|
||||
matches: match.NewMatches([]match.Match{}...),
|
||||
image: "test:latest",
|
||||
want: []imageprinter.CVE{},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
actual := extractCVEs(tt.matches)
|
||||
actual := extractCVEs(tt.matches, tt.image)
|
||||
if len(actual) != len(tt.want) {
|
||||
t.Errorf("extractCVEs() = %v, want %v", actual, tt.want)
|
||||
}
|
||||
@@ -170,6 +178,9 @@ func TestExtractCVEs(t *testing.T) {
|
||||
t.Errorf("extractCVEs() = %v, want %v", actual, tt.want)
|
||||
}
|
||||
}
|
||||
if actual[i].Image != tt.want[i].Image {
|
||||
t.Errorf("extractCVEs() image = %v, want %v", actual[i].Image, tt.want[i].Image)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
748
docs/optimization-plan.md
Normal file
748
docs/optimization-plan.md
Normal file
@@ -0,0 +1,748 @@
|
||||
# Kubescape CPU/Memory Optimization Plan
|
||||
|
||||
**Issue:** #1793 - High CPU and Memory Usage on System-Constrained Environments
|
||||
**Date:** February 3, 2026
|
||||
**Root Cause Analysis:** Completed
|
||||
**Proposed Solution:** Combined optimization approach across multiple components
|
||||
|
||||
---
|
||||
|
||||
## Executive Summary
|
||||
|
||||
Investigation into issue #1793 revealed that the original worker pool proposal addressed the symptoms but not the root causes. The actual sources of resource exhaustion are:
|
||||
|
||||
- **Memory:** Unbounded data structures loading entire cluster state into memory
|
||||
- **CPU:** Repeated expensive operations (OPA compilation) and nested loop complexity
|
||||
|
||||
This document outlines a phased approach to reduce memory usage by 40-60% and CPU usage by 30-50%.
|
||||
|
||||
---
|
||||
|
||||
## Root Cause Analysis
|
||||
|
||||
### Memory Hotspots
|
||||
|
||||
1. **AllResources Map** (`core/cautils/datastructures.go:53`)
|
||||
- Loads ALL Kubernetes resources into memory at once
|
||||
- No pre-sizing causes reallocations
|
||||
- Contains every pod, deployment, service, etc. in cluster
|
||||
- **Impact:** Hundreds of MBs to several GBs for large clusters
|
||||
|
||||
2. **ResourcesResult Map** (`core/cautils/datastructures.go:54`)
|
||||
- Stores scan results for every resource
|
||||
- Grows dynamically without capacity hints
|
||||
- **Impact:** Proportional to resources scanned
|
||||
|
||||
3. **Temporary Data Structures**
|
||||
- Nested loops create temporary slices in `getKubernetesObjects`
|
||||
- Repeated allocation per rule evaluation
|
||||
- **Impact:** Memory churn and GC pressure
|
||||
|
||||
### CPU Hotspots
|
||||
|
||||
1. **OPA Module Compilation** (`core/pkg/opaprocessor/processorhandler.go:324-330`)
|
||||
- Comment explicitly states: *"OPA module compilation is the most resource-intensive operation"*
|
||||
- Compiles EVERY rule from scratch (no caching)
|
||||
- Typical scan: ~100 controls × 5 rules = 500+ compilations
|
||||
- **Impact:** High CPU, repeated compilation overhead
|
||||
|
||||
2. **6-Level Nested Loops** (`core/pkg/opaprocessor/processorhandlerutils.go:136-167`)
|
||||
- Creates temporary data structures for each rule
|
||||
- Iterates all matched resources multiple times
|
||||
- **Impact:** O(n×m×...) complexity
|
||||
|
||||
3. **O(n) Slice Operations**
|
||||
- `slices.Contains()` for deduplication in image scanning
|
||||
- `RelatedResourcesIDs` slice growth with O(n) membership checks
|
||||
- **Impact:** Degraded performance with larger datasets
|
||||
|
||||
### Codebase Evidence
|
||||
|
||||
The team is already aware of this issue, with internal documentation acknowledging the problem:
|
||||
|
||||
```go
|
||||
// isLargeCluster returns true if the cluster size is larger than the largeClusterSize
|
||||
// This code is a workaround for large clusters. The final solution will be to scan resources individually
|
||||
// Source: core/pkg/opaprocessor/processorhandlerutils.go:279
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Proposed Solutions: Six-Phase Implementation
|
||||
|
||||
### Phase 1: OPA Module Caching
|
||||
|
||||
**Objective:** Eliminate redundant rule compilations
|
||||
|
||||
**Files Modified:**
|
||||
- `core/pkg/opaprocessor/processorhandler.go`
|
||||
- `core/pkg/opaprocessor/processorhandler_test.go`
|
||||
|
||||
**Changes:**
|
||||
```go
|
||||
type OPAProcessor struct {
|
||||
// existing fields...
|
||||
compiledModules map[string]*ast.Compiler
|
||||
compiledMu sync.RWMutex
|
||||
}
|
||||
|
||||
func (opap *OPAProcessor) getCompiledRule(ctx context.Context, rule reporthandling.Rule, modules map[string]string) (*ast.Compiler, error) {
|
||||
// Check cache with read lock
|
||||
cacheKey := rule.Name + "|" + rule.Rule
|
||||
opap.compiledMu.RLock()
|
||||
if compiled, ok := opap.compiledModules[cacheKey]; ok {
|
||||
opap.compiledMu.RUnlock()
|
||||
return compiled, nil
|
||||
}
|
||||
opap.compiledMu.RUnlock()
|
||||
|
||||
// Compile new module with write lock
|
||||
opap.compiledMu.Lock()
|
||||
defer opap.compiledMu.Unlock()
|
||||
|
||||
// Double-check pattern (cache might have been filled)
|
||||
if compiled, ok := opap.compiledModules[cacheKey]; ok {
|
||||
return compiled, nil
|
||||
}
|
||||
|
||||
compiled, err := ast.CompileModulesWithOpt(modules, ast.CompileOpts{
|
||||
EnablePrintStatements: opap.printEnabled,
|
||||
ParserOptions: ast.ParserOptions{RegoVersion: ast.RegoV0},
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to compile rule '%s': %w", rule.Name, err)
|
||||
}
|
||||
|
||||
opap.compiledModules[cacheKey] = compiled
|
||||
return compiled, nil
|
||||
}
|
||||
```
|
||||
|
||||
**Integration Point:** Replace direct compilation call in `runRegoOnK8s(:338` with cached retrieval
|
||||
|
||||
**Testing:**
|
||||
- Unit test: Verify cache hit for identical rules
|
||||
- Unit test: Verify cache miss for different rules
|
||||
- Integration test: Measure scan time before/after
|
||||
|
||||
**Expected Savings:** 30-40% CPU reduction
|
||||
|
||||
**Risk:** Low - caching is a well-known pattern, minimal behavior change
|
||||
|
||||
**Dependencies:** None
|
||||
|
||||
---
|
||||
|
||||
### Phase 2: Map Pre-sizing
|
||||
|
||||
**Objective:** Reduce memory allocations and fragmentation
|
||||
|
||||
**Files Modified:**
|
||||
- `core/cautils/datastructures.go`
|
||||
- `core/cautils/datastructures_test.go`
|
||||
- `core/pkg/resourcehandler/handlerpullresources.go`
|
||||
- `core/pkg/resourcehandler/k8sresources.go`
|
||||
|
||||
**Changes:**
|
||||
|
||||
1. Update constructor to pre-size maps (cluster size estimated internally):
|
||||
```go
|
||||
func NewOPASessionObj(ctx context.Context, frameworks []reporthandling.Framework, k8sResources K8SResources, scanInfo *ScanInfo) *OPASessionObj {
|
||||
clusterSize := estimateClusterSize(k8sResources)
|
||||
if clusterSize < 100 {
|
||||
clusterSize = 100
|
||||
}
|
||||
return &OPASessionObj{
|
||||
AllResources: make(map[string]workloadinterface.IMetadata, clusterSize),
|
||||
ResourcesResult: make(map[string]resourcesresults.Result, clusterSize),
|
||||
// ... other pre-sized collections
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
2. Update resource collection to return count:
|
||||
```go
|
||||
func (k8sHandler *K8sResourceHandler) pullResources(queryableResources QueryableResources, ...) (K8SResources, map[string]workloadinterface.IMetadata, map[string]workloadinterface.IMetadata, map[string]map[string]bool, int, error) {
|
||||
// ... existing code ...
|
||||
return k8sResources, allResources, externalResources, excludedRulesMap, estimatedCount, nil
|
||||
}
|
||||
```
|
||||
|
||||
3. Pass size during initialization:
|
||||
```go
|
||||
func CollectResources(ctx context.Context, rsrcHandler IResourceHandler, opaSessionObj *cautils.OPASessionObj, ...) error {
|
||||
resourcesMap, allResources, externalResources, excludedRulesMap, estimatedCount, err := rsrcHandler.GetResources(ctx, opaSessionObj, scanInfo)
|
||||
|
||||
// Re-initialize with proper size
|
||||
if opaSessionObj.AllResources == nil {
|
||||
opaSessionObj = cautils.NewOPASessionObj(estimatedCount)
|
||||
}
|
||||
|
||||
opaSessionObj.K8SResources = resourcesMap
|
||||
opaSessionObj.AllResources = allResources
|
||||
// ...
|
||||
}
|
||||
```
|
||||
|
||||
**Testing:**
|
||||
- Unit test: Verify pre-sized maps with expected content
|
||||
- Performance test: Compare memory usage before/after
|
||||
- Integration test: Scan with varying cluster sizes
|
||||
|
||||
**Expected Savings:** 10-20% memory reduction, reduced GC pressure
|
||||
|
||||
**Risk:** Low - Go's make() with capacity hint is well-tested
|
||||
|
||||
**Dependencies:** None
|
||||
|
||||
---
|
||||
|
||||
### Phase 3: Set-based Deduplication
|
||||
|
||||
**Objective:** Replace O(n) slice operations with O(1) set operations
|
||||
|
||||
**Files Modified:**
|
||||
- `core/pkg/utils/dedup.go` (new file)
|
||||
- `core/core/scan.go`
|
||||
- `core/pkg/opaprocessor/processorhandler.go`
|
||||
|
||||
**Changes:**
|
||||
|
||||
1. Create new utility:
|
||||
```go
|
||||
// core/pkg/utils/dedup.go
|
||||
package utils
|
||||
|
||||
import "sync"
|
||||
|
||||
type StringSet struct {
|
||||
items map[string]struct{}
|
||||
mu sync.RWMutex
|
||||
}
|
||||
|
||||
func NewStringSet() *StringSet {
|
||||
return &StringSet{
|
||||
items: make(map[string]struct{}),
|
||||
}
|
||||
}
|
||||
|
||||
func (s *StringSet) Add(item string) {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
s.items[item] = struct{}{}
|
||||
}
|
||||
|
||||
func (s *StringSet) AddAll(items []string) {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
for _, item := range items {
|
||||
s.items[item] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *StringSet) Contains(item string) bool {
|
||||
s.mu.RLock()
|
||||
defer s.mu.RUnlock()
|
||||
_, ok := s.items[item]
|
||||
return ok
|
||||
}
|
||||
|
||||
func (s *StringSet) ToSlice() []string {
|
||||
s.mu.RLock()
|
||||
defer s.mu.RUnlock()
|
||||
result := make([]string, 0, len(s.items))
|
||||
for item := range s.items {
|
||||
result = append(result, item)
|
||||
}
|
||||
return result
|
||||
}
|
||||
```
|
||||
|
||||
2. Update image scanning (`core/core/scan.go:249`):
|
||||
```go
|
||||
func scanImages(scanType cautils.ScanTypes, scanData *cautils.OPASessionObj, ...) {
|
||||
var imagesToScan *utils.StringSet
|
||||
imagesToScan = utils.NewStringSet()
|
||||
|
||||
for _, workload := range scanData.AllResources {
|
||||
containers, err := workloadinterface.NewWorkloadObj(workload.GetObject()).GetContainers()
|
||||
if err != nil {
|
||||
logger.L().Error(...)
|
||||
continue
|
||||
}
|
||||
for _, container := range containers {
|
||||
if !imagesToScan.Contains(container.Image) {
|
||||
imagesToScan.Add(container.Image)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Use imagesToScan.ToSlice() for iteration
|
||||
}
|
||||
```
|
||||
|
||||
3. Update related resources (`core/pkg/opaprocessor/processorhandler.go:261`):
|
||||
```go
|
||||
var relatedResourcesIDs *utils.StringSet
|
||||
relatedResourcesIDs = utils.NewStringSet()
|
||||
|
||||
// Inside loop
|
||||
if !relatedResourcesIDs.Contains(wl.GetID()) {
|
||||
relatedResourcesIDs.Add(wl.GetID())
|
||||
// ... process related resource
|
||||
}
|
||||
```
|
||||
|
||||
**Testing:**
|
||||
- Unit tests for StringSet operations
|
||||
- Benchmark tests comparing slice.Contains vs set.Contains
|
||||
- Integration tests with real scan scenarios
|
||||
|
||||
**Expected Savings:** 5-10% CPU reduction for large clusters
|
||||
|
||||
**Risk:** Low - thread-safe set implementation, minimal behavior change
|
||||
|
||||
**Dependencies:** None
|
||||
|
||||
---
|
||||
|
||||
### Phase 4: Cache getKubernetesObjects
|
||||
|
||||
**Objective:** Eliminate repeated computation of resource groupings
|
||||
|
||||
**Files Modified:**
|
||||
- `core/pkg/opaprocessor/processorhandler.go`
|
||||
- `core/pkg/opaprocessor/processorhandlerutils.go`
|
||||
- `core/pkg/opaprocessor/processorhandler_test.go`
|
||||
|
||||
**Changes:**
|
||||
|
||||
1. Add cache to processor:
|
||||
```go
|
||||
type OPAProcessor struct {
|
||||
// existing fields...
|
||||
k8sObjectsCache map[string]map[string][]workloadinterface.IMetadata
|
||||
k8sObjectsMu sync.RWMutex
|
||||
}
|
||||
```
|
||||
|
||||
2. Add cache key generation:
|
||||
```go
|
||||
func (opap *OPAProcessor) getCacheKey(match []reporthandling.RuleMatchObjects) string {
|
||||
var strings []string
|
||||
for _, m := range match {
|
||||
for _, group := range m.APIGroups {
|
||||
for _, version := range m.APIVersions {
|
||||
for _, resource := range m.Resources {
|
||||
strings = append(strings, fmt.Sprintf("%s/%s/%s", group, version, resource))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
sort.Strings(strings)
|
||||
return strings.Join(strings, "|")
|
||||
}
|
||||
```
|
||||
|
||||
3. Wrap getKubernetesObjects with caching:
|
||||
```go
|
||||
func (opap *OPAProcessor) getKubernetesObjectsCached(k8sResources cautils.K8SResources, match []reporthandling.RuleMatchObjects) map[string][]workloadinterface.IMetadata {
|
||||
cacheKey := opap.getCacheKey(match)
|
||||
|
||||
// Try cache
|
||||
opap.k8sObjectsMu.RLock()
|
||||
if cached, ok := opap.k8sObjectsCache[cacheKey]; ok {
|
||||
opap.k8sObjectsMu.RUnlock()
|
||||
return cached
|
||||
}
|
||||
opap.k8sObjectsMu.RUnlock()
|
||||
|
||||
// Compute new value
|
||||
result := getKubernetesObjects(k8sResources, opap.AllResources, match)
|
||||
|
||||
// Store in cache
|
||||
opap.k8sObjectsMu.Lock()
|
||||
opap.k8sObjectsCache[cacheKey] = result
|
||||
opap.k8sObjectsMu.Unlock()
|
||||
|
||||
return result
|
||||
}
|
||||
```
|
||||
|
||||
**Testing:**
|
||||
- Unit test: Verify cache correctness
|
||||
- Benchmark: Compare execution time with/without cache
|
||||
- Integration test: Measure scan time on large cluster
|
||||
|
||||
**Expected Savings:** 10-15% CPU reduction
|
||||
|
||||
**Risk:** Low-Medium - needs proper cache invalidation logic (not needed as resources are static during scan)
|
||||
|
||||
**Dependencies:** None
|
||||
|
||||
---
|
||||
|
||||
### Phase 5: Resource Streaming
|
||||
|
||||
**Objective:** Process resources in batches instead of loading all at once
|
||||
|
||||
**Files Modified:**
|
||||
- `core/pkg/resourcehandler/k8sresources.go`
|
||||
- `core/pkg/resourcehandler/interface.go`
|
||||
- `core/pkg/resourcehandler/filesloader.go`
|
||||
- `core/pkg/opaprocessor/processorhandler.go`
|
||||
- `cmd/scan/scan.go`
|
||||
|
||||
**Changes:**
|
||||
|
||||
1. Add streaming interface:
|
||||
```go
|
||||
// core/pkg/resourcehandler/interface.go
|
||||
type IResourceHandler interface {
|
||||
GetResources(...) (...)
|
||||
StreamResources(ctx context.Context, batchSize int) (<-chan workloadinterface.IMetadata, error)
|
||||
}
|
||||
```
|
||||
|
||||
2. Implement streaming for Kubernetes resources:
|
||||
```go
|
||||
func (k8sHandler *K8sResourceHandler) StreamResources(ctx context.Context, batchSize int) (<-chan workloadinterface.IMetadata, error) {
|
||||
ch := make(chan workloadinterface.IMetadata, batchSize)
|
||||
|
||||
go func() {
|
||||
defer close(ch)
|
||||
|
||||
queryableResources := k8sHandler.getQueryableResources()
|
||||
|
||||
for i := range queryableResources {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
default:
|
||||
apiGroup, apiVersion, resource := k8sinterface.StringToResourceGroup(queryableResources[i].GroupVersionResourceTriplet)
|
||||
gvr := schema.GroupVersionResource{Group: apiGroup, Version: apiVersion, Resource: resource}
|
||||
|
||||
result, err := k8sHandler.pullSingleResource(&gvr, nil, queryableResources[i].FieldSelectors, nil)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
metaObjs := ConvertMapListToMeta(k8sinterface.ConvertUnstructuredSliceToMap(result))
|
||||
|
||||
for _, metaObj := range metaObjs {
|
||||
select {
|
||||
case ch <- metaObj:
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
return ch, nil
|
||||
}
|
||||
```
|
||||
|
||||
3. Update OPA processor to handle streaming:
|
||||
```go
|
||||
func (opap *OPAProcessor) ProcessWithStreaming(ctx context.Context, policies *cautils.Policies, resourceStream <-chan workloadinterface.IMetadata, batchSize int) error {
|
||||
batch := make([]workloadinterface.IMetadata, 0, batchSize)
|
||||
opaSessionObj := cautils.NewOPASessionObj(batchSize)
|
||||
|
||||
// Collect batch
|
||||
done := false
|
||||
for !done {
|
||||
select {
|
||||
case resource, ok := <-resourceStream:
|
||||
if !ok {
|
||||
done = true
|
||||
break
|
||||
}
|
||||
batch = append(batch, resource)
|
||||
|
||||
if len(batch) >= batchSize {
|
||||
opaSessionObj.AllResources = batchToMap(batch)
|
||||
if err := opap.ProcessBatch(ctx, policies); err != nil {
|
||||
return err
|
||||
}
|
||||
batch = batch[:0] // Clear batch
|
||||
}
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
}
|
||||
}
|
||||
|
||||
// Process remaining batch
|
||||
if len(batch) > 0 {
|
||||
opaSessionObj.AllResources = batchToMap(batch)
|
||||
if err := opap.ProcessBatch(ctx, policies); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
```
|
||||
|
||||
4. Add CLI flags:
|
||||
```go
|
||||
// cmd/scan/scan.go
|
||||
scanCmd.PersistentFlags().BoolVar(&scanInfo.StreamMode, "stream-resources", false, "Process resources in batches (lower memory, slightly slower)")
|
||||
scanCmd.PersistentFlags().IntVar(&scanInfo.StreamBatchSize, "stream-batch-size", 100, "Batch size for resource streaming (lower = less memory)")
|
||||
```
|
||||
|
||||
5. Auto-enable for large clusters:
|
||||
```go
|
||||
func shouldEnableStreaming(scanInfo *cautils.ScanInfo, estimatedClusterSize int) bool {
|
||||
if scanInfo.StreamMode {
|
||||
return true
|
||||
}
|
||||
|
||||
largeClusterSize, _ := cautils.ParseIntEnvVar("LARGE_CLUSTER_SIZE", 2500)
|
||||
if estimatedClusterSize > largeClusterSize {
|
||||
logger.L().Info("Large cluster detected, enabling streaming mode")
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
```
|
||||
|
||||
**Testing:**
|
||||
- Unit test: Verify streaming produces same results as batch mode
|
||||
- Performance test: Compare memory usage on large cluster
|
||||
- Integration test: Test with various batch sizes
|
||||
- End-to-end test: Verify scan results match existing behavior
|
||||
|
||||
**Expected Savings:** 30-50% memory reduction for large clusters
|
||||
|
||||
**Risk:** Medium - significant behavior change, needs thorough testing
|
||||
|
||||
**Dependencies:** Phase 2 (map pre-sizing)
|
||||
|
||||
---
|
||||
|
||||
### Phase 6: Early Cleanup
|
||||
|
||||
**Objective:** Free memory promptly after resources are processed
|
||||
|
||||
**Files Modified:**
|
||||
- `core/pkg/opaprocessor/processorhandler.go`
|
||||
- `core/pkg/opaprocessor/processorhandlerutils.go`
|
||||
|
||||
**Changes:**
|
||||
|
||||
```go
|
||||
func (opap *OPAProcessor) Process(ctx context.Context, policies *cautils.Policies, progressListener IJobProgressNotificationClient) error {
|
||||
resourcesRemaining := make(map[string]bool)
|
||||
for id := range opap.AllResources {
|
||||
resourcesRemaining[id] = true
|
||||
}
|
||||
|
||||
for _, toPin := range policies.Controls {
|
||||
control := toPin
|
||||
|
||||
resourcesAssociatedControl, err := opap.processControl(ctx, &control)
|
||||
if err != nil {
|
||||
logger.L().Ctx(ctx).Warning(err.Error())
|
||||
}
|
||||
|
||||
// Clean up processed resources if not needed for future controls
|
||||
if len(policies.Controls) > 10 && !isLargeCluster(len(opap.AllResources)) {
|
||||
for id := range resourcesAssociatedControl {
|
||||
if resourcesRemaining[id] {
|
||||
delete(resourcesRemaining, id)
|
||||
|
||||
// Remove from AllResources
|
||||
if resource, ok := opap.AllResources[id]; ok {
|
||||
removeData(resource)
|
||||
delete(opap.AllResources, id)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
```
|
||||
|
||||
**Testing:**
|
||||
- Unit test: Verify cleanup doesn't affect scan results
|
||||
- Memory test: Verify memory decreases during scan
|
||||
- Integration test: Test with policies that reference same resources
|
||||
|
||||
**Expected Savings:** 10-20% memory reduction, reduced peak memory usage
|
||||
|
||||
**Risk:** Medium - needs careful tracking of which resources are still needed
|
||||
|
||||
**Dependencies:** Phase 5 (resource streaming)
|
||||
|
||||
---
|
||||
|
||||
## Implementation Timeline
|
||||
|
||||
### Iteration 1 (Quick Wins)
|
||||
- **Week 1:** Phase 1 - OPA Module Caching
|
||||
- **Week 1:** Phase 2 - Map Pre-sizing
|
||||
- **Week 2:** Phase 3 - Set-based Deduplication
|
||||
|
||||
### Iteration 2 (Mid-Term)
|
||||
- **Week 3:** Phase 4 - Cache getKubernetesObjects
|
||||
|
||||
### Iteration 3 (Long-Term)
|
||||
- **Weeks 4-5:** Phase 5 - Resource Streaming
|
||||
- **Week 6:** Phase 6 - Early Cleanup
|
||||
|
||||
### Total Duration: 6 weeks
|
||||
|
||||
---
|
||||
|
||||
## Risk Assessment
|
||||
|
||||
| Phase | Risk Level | Mitigation Strategy |
|
||||
|-------|------------|-------------------|
|
||||
| 1 - OPA Caching | Low | Comprehensive unit tests, fallback to uncached mode |
|
||||
| 2 - Map Pre-sizing | Low | Backward compatible, capacity hints are safe |
|
||||
| 3 - Set Dedup | Low | Thread-safe implementation, comprehensive tests |
|
||||
| 4 - getK8SCache | Low-Medium | Cache key validation, cache invalidation logic |
|
||||
| 5 - Streaming | Medium | Feature flag (disable by default), extensive integration tests |
|
||||
| 6 - Early Cleanup | Medium | Track resource dependencies, thorough validation |
|
||||
|
||||
---
|
||||
|
||||
## Performance Targets
|
||||
|
||||
### Memory Usage
|
||||
- **Current (Large Cluster >2500 resources):** ~2-4 GB
|
||||
- **Target:** ~1-2 GB (50% reduction)
|
||||
|
||||
### CPU Usage
|
||||
- **Current:** High peaks during OPA evaluation
|
||||
- **Target:** 30-50% reduction in peak CPU
|
||||
|
||||
### Scan Time
|
||||
- **Expected:** Neutral to slight improvement (streaming may add 5-10% overhead on small clusters, large clusters benefit from reduced GC)
|
||||
|
||||
---
|
||||
|
||||
## CLI Flags (Phase 5)
|
||||
|
||||
```bash
|
||||
# Manual streaming mode
|
||||
kubescape scan framework all --stream-resources --stream-batch-size 50
|
||||
|
||||
# Auto-detection (default)
|
||||
kubescape scan framework all # Automatically enables streaming for large clusters
|
||||
|
||||
# Environment variable
|
||||
export KUBESCAPE_STREAM_BATCH_SIZE=100
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Backward Compatibility
|
||||
|
||||
All changes are backward compatible:
|
||||
|
||||
1. Default behavior unchanged for small clusters (<2500 resources)
|
||||
2. Streaming mode requires explicit flag or auto-detection
|
||||
3. Cache changes are transparent to users
|
||||
4. No breaking API changes
|
||||
|
||||
---
|
||||
|
||||
## Dependencies on External Packages
|
||||
|
||||
- `github.com/open-policy-agent/opa/ast` - OPA compilation (Phase 1)
|
||||
- `github.com/kubescape/opa-utils` - Existing dependencies maintained
|
||||
|
||||
No new external dependencies required.
|
||||
|
||||
---
|
||||
|
||||
## Testing Strategy
|
||||
|
||||
### Unit Tests
|
||||
- Each phase includes comprehensive unit tests
|
||||
- Mock-based testing for components without external dependencies
|
||||
- Property-based testing where applicable
|
||||
|
||||
### Integration Tests
|
||||
- End-to-end scan validation
|
||||
- Test clusters of varying sizes (100, 1000, 5000 resources)
|
||||
- Validate identical results with and without optimizations
|
||||
|
||||
### Performance Tests
|
||||
- Benchmark suite before/after each phase
|
||||
- Memory profiling (pprof) for memory validation
|
||||
- CPU profiling for CPU validation
|
||||
|
||||
### Regression Tests
|
||||
- Compare scan results before/after all phases
|
||||
- Validate all controls produce identical findings
|
||||
- Test across different Kubernetes versions
|
||||
|
||||
---
|
||||
|
||||
## Success Criteria
|
||||
|
||||
1. **CPU Usage:** ≥30% reduction in peak CPU during scanning (measured with profiling)
|
||||
2. **Memory Usage:** ≥40% reduction in peak memory for clusters >2500 resources
|
||||
3. **Functional Correctness:** 100% of control findings identical to current implementation
|
||||
4. **Scan Time:** No degradation >15% on small clusters; improvement on large clusters
|
||||
5. **Stability:** Zero new race conditions or panics in production-style testing
|
||||
|
||||
---
|
||||
|
||||
## Alternative Approaches Considered
|
||||
|
||||
### Alternative 1: Worker Pool (Original #1793 Proposal)
|
||||
- **Problem:** Addresses symptoms (concurrency) not root causes (data structures)
|
||||
- **Conclusion:** Rejected - would not solve memory accumulation
|
||||
|
||||
### Alternative 2: Offload to Managed Service
|
||||
- **Problem:** Shifts problem to infrastructure, doesn't solve core architecture
|
||||
- **Conclusion:** Not appropriate for CLI tool use case
|
||||
|
||||
### Alternative 3: External Database for State
|
||||
- **Problem:** Adds complexity, requires additional dependencies
|
||||
- **Conclusion:** Overkill for single-scan operations
|
||||
|
||||
---
|
||||
|
||||
## Open Questions
|
||||
|
||||
1. **Cache Eviction Policy:** Should OPA module cache expire after N scans? (Current: process-scoped)
|
||||
2. **Batch Size Tuning:** What default batch size balances memory vs. performance? (Proposed: 100)
|
||||
3. **Early Cleanup Threshold:** What minimum control count enables early cleanup? (Proposed: 10)
|
||||
4. **Large Cluster Threshold:** Keep existing 2500 or adjust based on optimization results?
|
||||
|
||||
---
|
||||
|
||||
## Recommendations
|
||||
|
||||
1. **Start with Phases 1-4** (low risk, good ROI) for immediate improvement
|
||||
2. **Evaluate Phase 5-6** based on actual memory gains from earlier phases
|
||||
3. **Add monitoring** to track real-world resource usage after deployment
|
||||
4. **Consider making streaming opt-in** initially, then opt-out after validation
|
||||
|
||||
---
|
||||
|
||||
## Appendix: Key Code Locations
|
||||
|
||||
| Component | File | Line | Notes |
|
||||
|-----------|------|------|-------|
|
||||
| AllResources initialization | `core/cautils/datastructures.go` | 80-81 | Map pre-sizing target |
|
||||
| OPA compilation | `core/pkg/opaprocessor/processorhandler.go` | 324-330 | Most CPU-intensive operation |
|
||||
| getKubernetesObjects | `core/pkg/opaprocessor/processorhandlerutils.go` | 136-167 | 6-level nested loops |
|
||||
| Resource collection | `core/pkg/resourcehandler/k8sresources.go` | 313-355 | Loads all resources |
|
||||
| Image deduplication | `core/core/scan.go` | 249 | O(n) slice.Contains |
|
||||
| Throttle package (unused) | `core/pkg/throttle/throttle.go` | - | Could be repurposed |
|
||||
|
||||
---
|
||||
|
||||
**Document Version:** 1.0
|
||||
**Prepared by:** Code Investigation Team
|
||||
**Review Status:** Awaiting stakeholder approval
|
||||
9
go.mod
9
go.mod
@@ -9,12 +9,13 @@ require (
|
||||
github.com/anchore/stereoscope v0.1.9
|
||||
github.com/anchore/syft v1.32.0
|
||||
github.com/anubhav06/copa-grype v1.0.3-alpha.1
|
||||
github.com/armosec/armoapi-go v0.0.562
|
||||
github.com/armosec/armoapi-go v0.0.667
|
||||
github.com/armosec/utils-go v0.0.58
|
||||
github.com/armosec/utils-k8s-go v0.0.30
|
||||
github.com/briandowns/spinner v1.23.2
|
||||
github.com/chainguard-dev/git-urls v1.0.2
|
||||
github.com/containerd/platforms v1.0.0-rc.2
|
||||
github.com/deckarep/golang-set/v2 v2.8.0
|
||||
github.com/distribution/reference v0.6.0
|
||||
github.com/docker/buildx v0.30.1
|
||||
github.com/docker/cli v29.0.3+incompatible
|
||||
@@ -31,7 +32,7 @@ require (
|
||||
github.com/kubescape/backend v0.0.20
|
||||
github.com/kubescape/go-git-url v0.0.31
|
||||
github.com/kubescape/go-logger v0.0.25
|
||||
github.com/kubescape/k8s-interface v0.0.195
|
||||
github.com/kubescape/k8s-interface v0.0.202
|
||||
github.com/kubescape/opa-utils v0.0.288
|
||||
github.com/kubescape/rbac-utils v0.0.21-0.20230806101615-07e36f555520
|
||||
github.com/kubescape/regolibrary/v2 v2.0.1
|
||||
@@ -470,7 +471,7 @@ require (
|
||||
github.com/sigstore/protobuf-specs v0.5.0 // indirect
|
||||
github.com/sigstore/rekor v1.5.0 // indirect
|
||||
github.com/sigstore/rekor-tiles/v2 v2.0.1 // indirect
|
||||
github.com/sigstore/sigstore v1.10.3 // indirect
|
||||
github.com/sigstore/sigstore v1.10.4 // indirect
|
||||
github.com/sigstore/sigstore-go v1.1.4 // indirect
|
||||
github.com/sigstore/timestamp-authority/v2 v2.0.4 // indirect
|
||||
github.com/skeema/knownhosts v1.3.1 // indirect
|
||||
@@ -492,7 +493,7 @@ require (
|
||||
github.com/thales-e-security/pool v0.0.2 // indirect
|
||||
github.com/therootcompany/xz v1.0.1 // indirect
|
||||
github.com/theupdateframework/go-tuf v0.7.0 // indirect
|
||||
github.com/theupdateframework/go-tuf/v2 v2.3.1 // indirect
|
||||
github.com/theupdateframework/go-tuf/v2 v2.4.1 // indirect
|
||||
github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 // indirect
|
||||
github.com/tjfoc/gmsm v1.4.1 // indirect
|
||||
github.com/tonistiigi/dchapes-mode v0.0.0-20250318174251-73d941a28323 // indirect
|
||||
|
||||
18
go.sum
18
go.sum
@@ -874,8 +874,8 @@ github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj
|
||||
github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
|
||||
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio=
|
||||
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs=
|
||||
github.com/armosec/armoapi-go v0.0.562 h1:Ks8XHfD1WgqKGriSC/XVgBKQf80TNORGQp8De2EFj5g=
|
||||
github.com/armosec/armoapi-go v0.0.562/go.mod h1:/j4fBpolPI6U2PndLDlfjBsf0aRvcp4SQwra4LMOaz4=
|
||||
github.com/armosec/armoapi-go v0.0.667 h1:LrFowKvthnL676Gx+hjhvqP4pQ2+CjykFO9SdIYDc/c=
|
||||
github.com/armosec/armoapi-go v0.0.667/go.mod h1:9jAH0g8ZsryhiBDd/aNMX4+n10bGwTx/doWCyyjSxts=
|
||||
github.com/armosec/gojay v1.2.17 h1:VSkLBQzD1c2V+FMtlGFKqWXNsdNvIKygTKJI9ysY8eM=
|
||||
github.com/armosec/gojay v1.2.17/go.mod h1:vuvX3DlY0nbVrJ0qCklSS733AWMoQboq3cFyuQW9ybc=
|
||||
github.com/armosec/utils-go v0.0.58 h1:g9RnRkxZAmzTfPe2ruMo2OXSYLwVSegQSkSavOfmaIE=
|
||||
@@ -1121,6 +1121,8 @@ github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/deckarep/golang-set/v2 v2.8.0 h1:swm0rlPCmdWn9mESxKOjWk8hXSqoxOp+ZlfuyaAdFlQ=
|
||||
github.com/deckarep/golang-set/v2 v2.8.0/go.mod h1:VAky9rY/yGXJOLEDv3OMci+7wtDpOF4IN+y82NBOac4=
|
||||
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0 h1:NMZiJj8QnKe1LgsbDayM4UoHwbvwDRwnI3hwNaAHRnc=
|
||||
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0/go.mod h1:ZXNYxsqcloTdSy/rNShjYzMhyjf0LaoftYK0p+A3h40=
|
||||
github.com/deitch/magic v0.0.0-20240306090643-c67ab88f10cb h1:4W/2rQ3wzEimF5s+J6OY3ODiQtJZ5W1sForSgogVXkY=
|
||||
@@ -1791,8 +1793,8 @@ github.com/kubescape/go-git-url v0.0.31 h1:VZnvtdGLVc42cQaR7llQeGZz0PnOxcs+eDig2
|
||||
github.com/kubescape/go-git-url v0.0.31/go.mod h1:3ddc1HEflms1vMhD9owt/3FBES070UaYTUarcjx8jDk=
|
||||
github.com/kubescape/go-logger v0.0.25 h1:Bi6F0856LOlvjrbSKD+ZtKKzbfRXDifhVCjK8s3kI6U=
|
||||
github.com/kubescape/go-logger v0.0.25/go.mod h1:lk+R5/lAVJo4AgD4eYUJJfVTHf7ZChS73X1MFFbeInY=
|
||||
github.com/kubescape/k8s-interface v0.0.195 h1:pJ1PT3x3fd1WatLjyZbKAfE64PWtEbvxiFjOBKSBwuU=
|
||||
github.com/kubescape/k8s-interface v0.0.195/go.mod h1:j9snZbH+RxOaa1yG/bWgTClj90q7To0rGgQepxy4b+k=
|
||||
github.com/kubescape/k8s-interface v0.0.202 h1:yu9x+07crFQAgrBatFFU2WuuxMJfHUMHVuCzuHE9Q4M=
|
||||
github.com/kubescape/k8s-interface v0.0.202/go.mod h1:d4NVhL81bVXe8yEXlkT4ZHrt3iEppEIN39b8N1oXm5s=
|
||||
github.com/kubescape/opa-utils v0.0.288 h1:X6kebUaVrM/fZt+XmeRw1mVJYkIrkyBdcbmjBs66gSc=
|
||||
github.com/kubescape/opa-utils v0.0.288/go.mod h1:9ZmBd4xni0OLffuvcp4fKMmBo/glvgbwkCY5zggIKSw=
|
||||
github.com/kubescape/rbac-utils v0.0.21-0.20230806101615-07e36f555520 h1:SqlwF8G+oFazeYmZQKoPczLEflBQpwpHCU8DoLLyfj8=
|
||||
@@ -2237,8 +2239,8 @@ github.com/sigstore/rekor v1.5.0 h1:rL7SghHd5HLCtsCrxw0yQg+NczGvM75EjSPPWuGjaiQ=
|
||||
github.com/sigstore/rekor v1.5.0/go.mod h1:D7JoVCUkxwQOpPDNYeu+CE8zeBC18Y5uDo6tF8s2rcQ=
|
||||
github.com/sigstore/rekor-tiles/v2 v2.0.1 h1:1Wfz15oSRNGF5Dzb0lWn5W8+lfO50ork4PGIfEKjZeo=
|
||||
github.com/sigstore/rekor-tiles/v2 v2.0.1/go.mod h1:Pjsbhzj5hc3MKY8FfVTYHBUHQEnP0ozC4huatu4x7OU=
|
||||
github.com/sigstore/sigstore v1.10.3 h1:s7fBYYOzW/2Vd0nND2ZdpWySb5vRF2u9eix/NZMHJm0=
|
||||
github.com/sigstore/sigstore v1.10.3/go.mod h1:T26vXIkpnGEg391v3TaZ8EERcXbnjtZb/1erh5jbIQk=
|
||||
github.com/sigstore/sigstore v1.10.4 h1:ytOmxMgLdcUed3w1SbbZOgcxqwMG61lh1TmZLN+WeZE=
|
||||
github.com/sigstore/sigstore v1.10.4/go.mod h1:tDiyrdOref3q6qJxm2G+JHghqfmvifB7hw+EReAfnbI=
|
||||
github.com/sigstore/sigstore-go v1.1.4 h1:wTTsgCHOfqiEzVyBYA6mDczGtBkN7cM8mPpjJj5QvMg=
|
||||
github.com/sigstore/sigstore-go v1.1.4/go.mod h1:2U/mQOT9cjjxrtIUeKDVhL+sHBKsnWddn8URlswdBsg=
|
||||
github.com/sigstore/sigstore/pkg/signature/kms/aws v1.10.3 h1:D/FRl5J9UYAJPGZRAJbP0dH78pfwWnKsyCSBwFBU8CI=
|
||||
@@ -2339,8 +2341,8 @@ github.com/therootcompany/xz v1.0.1 h1:CmOtsn1CbtmyYiusbfmhmkpAAETj0wBIH6kCYaX+x
|
||||
github.com/therootcompany/xz v1.0.1/go.mod h1:3K3UH1yCKgBneZYhuQUvJ9HPD19UEXEI0BWbMn8qNMY=
|
||||
github.com/theupdateframework/go-tuf v0.7.0 h1:CqbQFrWo1ae3/I0UCblSbczevCCbS31Qvs5LdxRWqRI=
|
||||
github.com/theupdateframework/go-tuf v0.7.0/go.mod h1:uEB7WSY+7ZIugK6R1hiBMBjQftaFzn7ZCDJcp1tCUug=
|
||||
github.com/theupdateframework/go-tuf/v2 v2.3.1 h1:fReZUTLvPdqIL8Rd9xEKPmaxig8GIXe0kS4RSEaRfaM=
|
||||
github.com/theupdateframework/go-tuf/v2 v2.3.1/go.mod h1:9S0Srkf3c13FelsOyt5OyG3ZZDq9OJDA4IILavrt72Y=
|
||||
github.com/theupdateframework/go-tuf/v2 v2.4.1 h1:K6ewW064rKZCPkRo1W/CTbTtm/+IB4+coG1iNURAGCw=
|
||||
github.com/theupdateframework/go-tuf/v2 v2.4.1/go.mod h1:Nex2enPVYDFCklrnbTzl3OVwD7fgIAj0J5++z/rvCj8=
|
||||
github.com/theupdateframework/notary v0.7.0 h1:QyagRZ7wlSpjT5N2qQAh/pN+DVqgekv4DzbAiAiEL3c=
|
||||
github.com/theupdateframework/notary v0.7.0/go.mod h1:c9DRxcmhHmVLDay4/2fUYdISnHqbFDGRSlXPO0AhYWw=
|
||||
github.com/tink-crypto/tink-go-awskms/v2 v2.1.0 h1:N9UxlsOzu5mttdjhxkDLbzwtEecuXmlxZVo/ds7JKJI=
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user