mirror of
https://github.com/kubescape/kubescape.git
synced 2026-02-14 18:09:55 +00:00
Compare commits
133 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
b772588e96 | ||
|
|
5d6ac80c38 | ||
|
|
33df0e5462 | ||
|
|
26ab049622 | ||
|
|
ac2aa764a4 | ||
|
|
d02bef62d3 | ||
|
|
46682dfe16 | ||
|
|
01c65194a8 | ||
|
|
25e42ee4b6 | ||
|
|
7e5abbdd73 | ||
|
|
56183ba369 | ||
|
|
a9c1ecd3b8 | ||
|
|
d900ce6146 | ||
|
|
3a80ff00b6 | ||
|
|
b989c4c21f | ||
|
|
65c26e22cf | ||
|
|
915fa919b2 | ||
|
|
8102dd93ba | ||
|
|
35cafa9eb4 | ||
|
|
cc823d7559 | ||
|
|
eaa74487c2 | ||
|
|
e8a4c2033f | ||
|
|
8fd9258efa | ||
|
|
159d3907b5 | ||
|
|
cde916bec8 | ||
|
|
8d289bd924 | ||
|
|
fda1c83d01 | ||
|
|
31b6a3c571 | ||
|
|
31a693e9b6 | ||
|
|
5de228ce0f | ||
|
|
ed27641f04 | ||
|
|
df39e10300 | ||
|
|
c7d1292c7d | ||
|
|
a52f13b8c9 | ||
|
|
16e34002f5 | ||
|
|
3242de8a28 | ||
|
|
ca2730cd85 | ||
|
|
88b55cd6c3 | ||
|
|
46ca5036c4 | ||
|
|
d8f1a25ab7 | ||
|
|
56cfb4fcef | ||
|
|
894d436274 | ||
|
|
39166d40bf | ||
|
|
2ba3f78bfc | ||
|
|
1d68d1ba67 | ||
|
|
6cc5116999 | ||
|
|
7706c1264c | ||
|
|
2f299b6201 | ||
|
|
f1af9d5687 | ||
|
|
d3abd66aa3 | ||
|
|
2a0a2cf95a | ||
|
|
e90f08968f | ||
|
|
e6b7086961 | ||
|
|
4ea35eec00 | ||
|
|
e8253d4193 | ||
|
|
8b8fe92072 | ||
|
|
bcf9a10131 | ||
|
|
b6d21ffd01 | ||
|
|
086144c3da | ||
|
|
a45ee8ed42 | ||
|
|
129b0f3ee3 | ||
|
|
01a8a34637 | ||
|
|
bcb6c06e73 | ||
|
|
da03022b94 | ||
|
|
17f313177c | ||
|
|
a81353aa15 | ||
|
|
e0b82edd1e | ||
|
|
b675d09fe2 | ||
|
|
29b9448dc0 | ||
|
|
e1020dd1a6 | ||
|
|
9b734b1fa4 | ||
|
|
9f97f91f32 | ||
|
|
c6eff8cbaa | ||
|
|
af9df548d6 | ||
|
|
786f3e6b41 | ||
|
|
904751e117 | ||
|
|
ce43661307 | ||
|
|
cd4b601557 | ||
|
|
f34f1449db | ||
|
|
16c74a228f | ||
|
|
ad01f01a6c | ||
|
|
da0b9883ea | ||
|
|
ac60dbed5e | ||
|
|
3a90682c9e | ||
|
|
160ac0db7c | ||
|
|
7ec4fb75e3 | ||
|
|
7e88357940 | ||
|
|
1ac808a935 | ||
|
|
45fcc59b5f | ||
|
|
7875c14adf | ||
|
|
5cddba77aa | ||
|
|
f3058bf168 | ||
|
|
0d1b92c2ee | ||
|
|
8de308a5b1 | ||
|
|
a7f810f0d1 | ||
|
|
e4e3071f5f | ||
|
|
9a7e61edd1 | ||
|
|
5368330df9 | ||
|
|
5e6a4cfb3f | ||
|
|
052773b0dc | ||
|
|
d462224b7a | ||
|
|
de1d8a9d86 | ||
|
|
d346b05b76 | ||
|
|
a3a61d65e9 | ||
|
|
606b0e77ca | ||
|
|
2a82d6cd21 | ||
|
|
530ffde50d | ||
|
|
7cf23e9730 | ||
|
|
8d5a8f8e22 | ||
|
|
b820ce1311 | ||
|
|
dae2458867 | ||
|
|
d45e636b52 | ||
|
|
8810631d5c | ||
|
|
6cddce7399 | ||
|
|
5d5c4f2c9f | ||
|
|
e37049f68e | ||
|
|
0622a474eb | ||
|
|
c357f12c82 | ||
|
|
2cec58384a | ||
|
|
5e4bc5ddb8 | ||
|
|
f30752d9c3 | ||
|
|
a586549c57 | ||
|
|
7c67a54230 | ||
|
|
0006d7d8e7 | ||
|
|
63083ae48a | ||
|
|
571a68fb58 | ||
|
|
ef306ca0bf | ||
|
|
1a011f4968 | ||
|
|
3cece6cf35 | ||
|
|
7fc10e8213 | ||
|
|
bb8f0e3c46 | ||
|
|
cfd85eadab | ||
|
|
87e2986024 |
5
.github/PULL_REQUEST_TEMPLATE.md
vendored
5
.github/PULL_REQUEST_TEMPLATE.md
vendored
@@ -41,7 +41,4 @@ put an [x] in the box to get it checked
|
||||
- [ ] If it is a core feature, I have added thorough tests.
|
||||
- [ ] New and existing unit tests pass locally with my changes
|
||||
|
||||
**Please open the PR against the `dev` branch (Unless the PR contains only documentation changes)**
|
||||
|
||||
-->
|
||||
|
||||
-->
|
||||
|
||||
37
.github/actions/tag-action/action.yaml
vendored
Normal file
37
.github/actions/tag-action/action.yaml
vendored
Normal file
@@ -0,0 +1,37 @@
|
||||
name: 'Tag validator and retag'
|
||||
description: 'This action will check if the tag is rc and create a new tag for release'
|
||||
inputs:
|
||||
ORIGINAL_TAG: # id of input
|
||||
description: 'Original tag'
|
||||
required: true
|
||||
default: ${{ github.ref_name }}
|
||||
SUB_STRING:
|
||||
description: 'Sub string for rc tag'
|
||||
required: true
|
||||
default: "-rc"
|
||||
outputs:
|
||||
NEW_TAG:
|
||||
description: "The new tag for release"
|
||||
value: ${{ steps.retag.outputs.NEW_TAG }}
|
||||
runs:
|
||||
using: "composite"
|
||||
steps:
|
||||
- run: |
|
||||
SUB='-rc'
|
||||
if [[ "${{ inputs.ORIGINAL_TAG }}" == *"${{ inputs.SUB_STRING }}"* ]]; then
|
||||
echo "Release candidate tag found."
|
||||
else
|
||||
echo "Release candidate tag not found."
|
||||
exit 1
|
||||
fi
|
||||
shell: bash
|
||||
|
||||
|
||||
- id: retag
|
||||
run: |
|
||||
NEW_TAG=
|
||||
echo "Original tag: ${{ inputs.ORIGINAL_TAG }}"
|
||||
NEW_TAG=$(echo ${{ inputs.ORIGINAL_TAG }} | awk -F '-rc' '{print $1}')
|
||||
echo "New tag: $NEW_TAG"
|
||||
echo "NEW_TAG=$NEW_TAG" >> $GITHUB_OUTPUT
|
||||
shell: bash
|
||||
33
.github/workflows/00-pr-scanner.yaml
vendored
Normal file
33
.github/workflows/00-pr-scanner.yaml
vendored
Normal file
@@ -0,0 +1,33 @@
|
||||
name: 00-pr_scanner
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
types: [opened, reopened, synchronize, ready_for_review]
|
||||
branches:
|
||||
- 'master'
|
||||
- 'main'
|
||||
- 'dev'
|
||||
paths-ignore:
|
||||
- '**.yaml'
|
||||
- '**.md'
|
||||
- '**.sh'
|
||||
- 'website/*'
|
||||
- 'examples/*'
|
||||
- 'docs/*'
|
||||
- 'build/*'
|
||||
- '.github/*'
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.head_ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
|
||||
jobs:
|
||||
pr-scanner:
|
||||
permissions:
|
||||
pull-requests: write
|
||||
uses: ./.github/workflows/a-pr-scanner.yaml
|
||||
with:
|
||||
RELEASE: ""
|
||||
CLIENT: test
|
||||
secrets: inherit
|
||||
57
.github/workflows/01-code-review-approved.yaml
vendored
Normal file
57
.github/workflows/01-code-review-approved.yaml
vendored
Normal file
@@ -0,0 +1,57 @@
|
||||
name: 01-code_review_approved
|
||||
on:
|
||||
pull_request_review:
|
||||
types: [submitted]
|
||||
branches:
|
||||
- 'master'
|
||||
- 'main'
|
||||
paths-ignore:
|
||||
- '**.yaml'
|
||||
- '**.md'
|
||||
- '**.sh'
|
||||
- 'website/*'
|
||||
- 'examples/*'
|
||||
- 'docs/*'
|
||||
- 'build/*'
|
||||
- '.github/*'
|
||||
|
||||
|
||||
concurrency:
|
||||
group: code-review-approved
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
|
||||
binary-build:
|
||||
if: ${{ github.event.review.state == 'approved' &&
|
||||
contains( github.event.pull_request.labels.*.name, 'trigger-integration-test') &&
|
||||
github.event.pull_request.base.ref == 'master' }} ## run only if labeled as "trigger-integration-test" and base branch is master
|
||||
uses: ./.github/workflows/b-binary-build-and-e2e-tests.yaml
|
||||
with:
|
||||
COMPONENT_NAME: kubescape
|
||||
CGO_ENABLED: 1
|
||||
GO111MODULE: ""
|
||||
GO_VERSION: "1.19"
|
||||
RELEASE: ""
|
||||
CLIENT: test
|
||||
secrets: inherit
|
||||
|
||||
|
||||
merge-to-master:
|
||||
needs: binary-build
|
||||
env:
|
||||
GH_PERSONAL_ACCESS_TOKEN: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }}
|
||||
if: ${{ (github.event.review.state == 'approved' && github.event.pull_request.base.ref == 'master') &&
|
||||
(always() && (contains(needs.*.result, 'success') || contains(needs.*.result, 'skipped')) && !(contains(needs.*.result, 'failure')) && !(contains(needs.*.result, 'cancelled'))) }}
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: merge-to-master
|
||||
if: ${{ env.GH_PERSONAL_ACCESS_TOKEN }}
|
||||
uses: pascalgn/automerge-action@v0.15.5
|
||||
env:
|
||||
GITHUB_TOKEN: "${{ secrets.GH_PERSONAL_ACCESS_TOKEN }}"
|
||||
MERGE_COMMIT_MESSAGE: "Merge to master - PR number: {pullRequest.number}"
|
||||
MERGE_ERROR_FAIL: "true"
|
||||
MERGE_METHOD: "merge"
|
||||
MERGE_LABELS: ""
|
||||
UPDATE_LABELS: ""
|
||||
64
.github/workflows/01-golang-lint.yaml
vendored
64
.github/workflows/01-golang-lint.yaml
vendored
@@ -1,64 +0,0 @@
|
||||
name: golangci-lint
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- dev
|
||||
pull_request:
|
||||
types: [ edited, opened, synchronize, reopened ]
|
||||
branches:
|
||||
- 'master'
|
||||
- 'main'
|
||||
- 'dev'
|
||||
paths-ignore:
|
||||
- '**.yaml'
|
||||
- '**.md'
|
||||
- '**.sh'
|
||||
- 'website/*'
|
||||
- 'examples/*'
|
||||
- 'docs/*'
|
||||
- 'build/*'
|
||||
- '.github/*'
|
||||
permissions:
|
||||
contents: read
|
||||
# Optional: allow read access to pull request. Use with `only-new-issues` option.
|
||||
pull-requests: read
|
||||
jobs:
|
||||
golangci:
|
||||
name: lint
|
||||
runs-on: ubuntu-20.04
|
||||
steps:
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: 1.19
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
submodules: recursive
|
||||
- name: Install libgit2
|
||||
run: make libgit2
|
||||
- name: golangci-lint
|
||||
continue-on-error: true
|
||||
uses: golangci/golangci-lint-action@v3
|
||||
with:
|
||||
# Optional: version of golangci-lint to use in form of v1.2 or v1.2.3 or `latest` to use the latest version
|
||||
version: latest
|
||||
|
||||
# Optional: working directory, useful for monorepos
|
||||
# working-directory: somedir
|
||||
|
||||
# Optional: golangci-lint command line arguments.
|
||||
# args: --issues-exit-code=0
|
||||
args: --timeout 10m --build-tags=static
|
||||
#--new-from-rev dev
|
||||
|
||||
# Optional: show only new issues if it's a pull request. The default value is `false`.
|
||||
only-new-issues: true
|
||||
|
||||
# Optional: if set to true then the all caching functionality will be complete disabled,
|
||||
# takes precedence over all other caching options.
|
||||
# skip-cache: true
|
||||
|
||||
# Optional: if set to true then the action don't cache or restore ~/go/pkg.
|
||||
# skip-pkg-cache: true
|
||||
|
||||
# Optional: if set to true then the action don't cache or restore ~/.cache/go-build.
|
||||
# skip-build-cache: true
|
||||
72
.github/workflows/02-release.yaml
vendored
Normal file
72
.github/workflows/02-release.yaml
vendored
Normal file
@@ -0,0 +1,72 @@
|
||||
name: 02-create_release
|
||||
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- 'v*.*.*-rc.*'
|
||||
|
||||
jobs:
|
||||
retag:
|
||||
outputs:
|
||||
NEW_TAG: ${{ steps.tag-calculator.outputs.NEW_TAG }}
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- id: tag-calculator
|
||||
uses: ./.github/actions/tag-action
|
||||
with:
|
||||
SUB_STRING: "-rc"
|
||||
|
||||
binary-build:
|
||||
needs: [retag]
|
||||
uses: ./.github/workflows/b-binary-build-and-e2e-tests.yaml
|
||||
with:
|
||||
COMPONENT_NAME: kubescape
|
||||
CGO_ENABLED: 1
|
||||
GO111MODULE: ""
|
||||
GO_VERSION: "1.19"
|
||||
RELEASE: ${{ needs.retag.outputs.NEW_TAG }}
|
||||
CLIENT: release
|
||||
secrets: inherit
|
||||
|
||||
create-release:
|
||||
permissions:
|
||||
contents: write
|
||||
needs: [retag, binary-build]
|
||||
uses: ./.github/workflows/c-create-release.yaml
|
||||
with:
|
||||
RELEASE_NAME: "Release ${{ needs.retag.outputs.NEW_TAG }}"
|
||||
TAG: ${{ needs.retag.outputs.NEW_TAG }}
|
||||
DRAFT: false
|
||||
secrets: inherit
|
||||
|
||||
publish-krew-plugin:
|
||||
name: Publish Krew plugin
|
||||
runs-on: ubuntu-latest
|
||||
if: "${{ github.repository_owner }} == kubescape"
|
||||
needs: create-release
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
submodules: recursive
|
||||
- name: Update new version in krew-index
|
||||
env:
|
||||
# overriding the GITHUB_REF so the action can extract the right tag -> https://github.com/rajatjindal/krew-release-bot/blob/v0.0.43/pkg/cicd/github/actions.go#L25
|
||||
GITHUB_REF: refs/tags/${{ needs.retag.outputs.NEW_TAG }}
|
||||
uses: rajatjindal/krew-release-bot@v0.0.43
|
||||
|
||||
publish-image:
|
||||
permissions:
|
||||
id-token: write
|
||||
packages: write
|
||||
contents: read
|
||||
uses: ./.github/workflows/d-publish-image.yaml
|
||||
needs: [ create-release, retag ]
|
||||
with:
|
||||
client: "image-release"
|
||||
image_name: "quay.io/${{ github.repository_owner }}/kubescape"
|
||||
image_tag: ${{ needs.retag.outputs.NEW_TAG }}
|
||||
support_platforms: true
|
||||
cosign: true
|
||||
secrets: inherit
|
||||
@@ -1,14 +1,14 @@
|
||||
name: create release digests
|
||||
name: 03-create_release_digests
|
||||
|
||||
on:
|
||||
release:
|
||||
types: [ published]
|
||||
types: [ published ]
|
||||
branches:
|
||||
- 'master'
|
||||
- 'main'
|
||||
|
||||
jobs:
|
||||
once:
|
||||
create_release_digests:
|
||||
name: Creating digests
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
52
.github/workflows/README.md
vendored
Normal file
52
.github/workflows/README.md
vendored
Normal file
@@ -0,0 +1,52 @@
|
||||
# Kubescape workflows
|
||||
|
||||
Tag terminology: `v<major>.<minor>.<patch>`
|
||||
|
||||
## Developing process
|
||||
|
||||
Kubescape's main branch is `main`, any PR will be opened against the main branch.
|
||||
|
||||
### Opening a PR
|
||||
|
||||
When a user opens a PR, this will trigger some basic tests (units, license, etc.)
|
||||
|
||||
### Reviewing a PR
|
||||
|
||||
The reviewer/maintainer of a PR will decide whether the PR introduces changes that require running the E2E system tests. If so, the reviewer will add the `trigger-integration-test` label.
|
||||
|
||||
### Approving a PR
|
||||
|
||||
Once a maintainer approves the PR, if the `trigger-integration-test` label was added to the PR, the GitHub actions will trigger the system test. The PR will be merged only after the system tests passed successfully. If the label was not added, the PR can be merged.
|
||||
|
||||
### Merging a PR
|
||||
|
||||
The code is merged, no other actions are needed
|
||||
|
||||
|
||||
## Release process
|
||||
|
||||
Every two weeks, we will create a new tag by bumping the minor version, this will create the release and publish the artifacts.
|
||||
If we are introducing breaking changes, we will update the `major` version instead.
|
||||
|
||||
When we wish to push a hot-fix/feature within the two weeks, we will bump the `patch`.
|
||||
|
||||
### Creating a new tag
|
||||
Every two weeks or upon the decision of the maintainers, a maintainer can create a tag.
|
||||
|
||||
The tag should look as follows: `v<A>.<B>.<C>-rc.D` (release candidate).
|
||||
|
||||
When creating a tag, GitHub will trigger the following actions:
|
||||
1. Basic tests - unit tests, license, etc.
|
||||
2. System tests (integration tests). If the tests fail, the actions will stop here.
|
||||
3. Create a new tag: `v<A>.<B>.<C>` (same tag just without the `rc` suffix)
|
||||
4. Create a release
|
||||
5. Publish artifacts
|
||||
6. Build and publish the docker image (this is meanwhile until we separate the microservice code from the LCI codebase)
|
||||
|
||||
## Additional Information
|
||||
|
||||
The "callers" have the alphabetic prefix and the "executes" have the numeric prefix
|
||||
|
||||
## Screenshot
|
||||
|
||||
<img width="1469" alt="image" src="https://user-images.githubusercontent.com/64066841/212532727-e82ec9e7-263d-408b-b4b0-a8c943f0109a.png">
|
||||
181
.github/workflows/a-pr-scanner.yaml
vendored
Normal file
181
.github/workflows/a-pr-scanner.yaml
vendored
Normal file
@@ -0,0 +1,181 @@
|
||||
name: a-pr-scanner
|
||||
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
RELEASE:
|
||||
description: 'release'
|
||||
required: true
|
||||
type: string
|
||||
CLIENT:
|
||||
description: 'Client name'
|
||||
required: true
|
||||
type: string
|
||||
|
||||
|
||||
jobs:
|
||||
scanners:
|
||||
env:
|
||||
GITGUARDIAN_API_KEY: ${{ secrets.GITGUARDIAN_API_KEY }}
|
||||
SNYK_TOKEN: ${{ secrets.SNYK_TOKEN }}
|
||||
name: PR Scanner
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
submodules: recursive
|
||||
|
||||
- uses: actions/setup-go@v3 # Install go because go-licenses use it
|
||||
name: Installing go
|
||||
with:
|
||||
go-version: '1.19'
|
||||
cache: true
|
||||
|
||||
- name: Scanning - Forbidden Licenses (go-licenses)
|
||||
id: licenses-scan
|
||||
continue-on-error: true
|
||||
run: |
|
||||
echo "## Installing go-licenses tool"
|
||||
go install github.com/google/go-licenses@latest
|
||||
echo "## Scanning for forbiden licenses ##"
|
||||
go-licenses check .
|
||||
|
||||
- name: Scanning - Credentials (GitGuardian)
|
||||
if: ${{ env.GITGUARDIAN_API_KEY }}
|
||||
continue-on-error: true
|
||||
id: credentials-scan
|
||||
uses: GitGuardian/ggshield-action@master
|
||||
with:
|
||||
args: -v --all-policies
|
||||
env:
|
||||
GITHUB_PUSH_BEFORE_SHA: ${{ github.event.before }}
|
||||
GITHUB_PUSH_BASE_SHA: ${{ github.event.base }}
|
||||
GITHUB_PULL_BASE_SHA: ${{ github.event.pull_request.base.sha }}
|
||||
GITHUB_DEFAULT_BRANCH: ${{ github.event.repository.default_branch }}
|
||||
GITGUARDIAN_API_KEY: ${{ secrets.GITGUARDIAN_API_KEY }}
|
||||
|
||||
- name: Scanning - Vulnerabilities (Snyk)
|
||||
if: ${{ env.SNYK_TOKEN }}
|
||||
id: vulnerabilities-scan
|
||||
continue-on-error: true
|
||||
uses: snyk/actions/golang@master
|
||||
with:
|
||||
command: test --all-projects
|
||||
env:
|
||||
SNYK_TOKEN: ${{ secrets.SNYK_TOKEN }}
|
||||
|
||||
- name: Comment results to PR
|
||||
continue-on-error: true # Warning: This might break opening PRs from forks
|
||||
uses: peter-evans/create-or-update-comment@v2.1.0
|
||||
with:
|
||||
issue-number: ${{ github.event.pull_request.number }}
|
||||
body: |
|
||||
Scan results:
|
||||
- License scan: ${{ steps.licenses-scan.outcome }}
|
||||
- Credentials scan: ${{ steps.credentials-scan.outcome }}
|
||||
- Vulnerabilities scan: ${{ steps.vulnerabilities-scan.outcome }}
|
||||
reactions: 'eyes'
|
||||
|
||||
basic-tests:
|
||||
needs: scanners
|
||||
name: Create cross-platform build
|
||||
runs-on: ${{ matrix.os }}
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
RELEASE: ${{ inputs.RELEASE }}
|
||||
CLIENT: ${{ inputs.CLIENT }}
|
||||
strategy:
|
||||
matrix:
|
||||
os: [ubuntu-20.04, macos-latest, windows-latest]
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
submodules: recursive
|
||||
|
||||
- name: Cache Go modules (Linux)
|
||||
if: matrix.os == 'ubuntu-latest'
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: |
|
||||
~/.cache/go-build
|
||||
~/go/pkg/mod
|
||||
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-go-
|
||||
|
||||
- name: Cache Go modules (macOS)
|
||||
if: matrix.os == 'macos-latest'
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: |
|
||||
~/Library/Caches/go-build
|
||||
~/go/pkg/mod
|
||||
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-go-
|
||||
|
||||
- name: Cache Go modules (Windows)
|
||||
if: matrix.os == 'windows-latest'
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: |
|
||||
~\AppData\Local\go-build
|
||||
~\go\pkg\mod
|
||||
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-go-
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: 1.19
|
||||
|
||||
- name: Install MSYS2 & libgit2 (Windows)
|
||||
shell: cmd
|
||||
run: .\build.bat all
|
||||
if: matrix.os == 'windows-latest'
|
||||
|
||||
- name: Install pkg-config (macOS)
|
||||
run: brew install pkg-config
|
||||
if: matrix.os == 'macos-latest'
|
||||
|
||||
- name: Install libgit2 (Linux/macOS)
|
||||
run: make libgit2
|
||||
if: matrix.os != 'windows-latest'
|
||||
|
||||
- name: Test core pkg
|
||||
run: go test "-tags=static,gitenabled" -v ./...
|
||||
|
||||
- name: Test httphandler pkg
|
||||
run: cd httphandler && go test "-tags=static,gitenabled" -v ./...
|
||||
|
||||
- name: Build
|
||||
env:
|
||||
RELEASE: ${{ inputs.RELEASE }}
|
||||
CLIENT: ${{ inputs.CLIENT }}
|
||||
CGO_ENABLED: 1
|
||||
run: python3 --version && python3 build.py
|
||||
|
||||
- name: Smoke Testing (Windows / MacOS)
|
||||
env:
|
||||
RELEASE: ${{ inputs.RELEASE }}
|
||||
KUBESCAPE_SKIP_UPDATE_CHECK: "true"
|
||||
run: python3 smoke_testing/init.py ${PWD}/build/kubescape-${{ matrix.os }}
|
||||
if: matrix.os != 'ubuntu-20.04'
|
||||
|
||||
- name: Smoke Testing (Linux)
|
||||
env:
|
||||
RELEASE: ${{ inputs.RELEASE }}
|
||||
KUBESCAPE_SKIP_UPDATE_CHECK: "true"
|
||||
run: python3 smoke_testing/init.py ${PWD}/build/kubescape-ubuntu-latest
|
||||
if: matrix.os == 'ubuntu-20.04'
|
||||
|
||||
- name: golangci-lint
|
||||
if: matrix.os == 'ubuntu-20.04'
|
||||
continue-on-error: true
|
||||
uses: golangci/golangci-lint-action@v3
|
||||
with:
|
||||
version: latest
|
||||
args: --timeout 10m --build-tags=static
|
||||
only-new-issues: true
|
||||
279
.github/workflows/b-binary-build-and-e2e-tests.yaml
vendored
Normal file
279
.github/workflows/b-binary-build-and-e2e-tests.yaml
vendored
Normal file
@@ -0,0 +1,279 @@
|
||||
name: b-binary-build-and-e2e-tests
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
COMPONENT_NAME:
|
||||
required: true
|
||||
type: string
|
||||
RELEASE:
|
||||
required: true
|
||||
type: string
|
||||
CLIENT:
|
||||
required: true
|
||||
type: string
|
||||
GO_VERSION:
|
||||
type: string
|
||||
default: "1.19"
|
||||
GO111MODULE:
|
||||
required: true
|
||||
type: string
|
||||
CGO_ENABLED:
|
||||
type: number
|
||||
default: 1
|
||||
BINARY_TESTS:
|
||||
type: string
|
||||
default: '[
|
||||
"scan_nsa",
|
||||
"scan_mitre",
|
||||
"scan_with_exceptions",
|
||||
"scan_repository",
|
||||
"scan_local_file",
|
||||
"scan_local_glob_files",
|
||||
"scan_local_list_of_files",
|
||||
"scan_nsa_and_submit_to_backend",
|
||||
"scan_mitre_and_submit_to_backend",
|
||||
"scan_local_repository_and_submit_to_backend",
|
||||
"scan_repository_from_url_and_submit_to_backend",
|
||||
"scan_with_exception_to_backend",
|
||||
"scan_with_custom_framework",
|
||||
"scan_customer_configuration",
|
||||
"host_scanner"
|
||||
]'
|
||||
|
||||
jobs:
|
||||
|
||||
check-secret:
|
||||
name: secret-validator
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
is-secret-set: ${{ steps.check-secret-set.outputs.is-secret-set }}
|
||||
steps:
|
||||
- name: check if the necessary secrets are set in github secrets
|
||||
id: check-secret-set
|
||||
env:
|
||||
CUSTOMER: ${{ secrets.CUSTOMER }}
|
||||
USERNAME: ${{ secrets.USERNAME }}
|
||||
PASSWORD: ${{ secrets.PASSWORD }}
|
||||
CLIENT_ID: ${{ secrets.CLIENT_ID_PROD }}
|
||||
SECRET_KEY: ${{ secrets.SECRET_KEY_PROD }}
|
||||
REGISTRY_USERNAME: ${{ secrets.REGISTRY_USERNAME }}
|
||||
REGISTRY_PASSWORD: ${{ secrets.REGISTRY_PASSWORD }}
|
||||
run: |
|
||||
echo "is-secret-set=${{ env.CUSTOMER != '' &&
|
||||
env.USERNAME != '' &&
|
||||
env.PASSWORD != '' &&
|
||||
env.CLIENT_ID != '' &&
|
||||
env.SECRET_KEY != '' &&
|
||||
env.REGISTRY_USERNAME != '' &&
|
||||
env.REGISTRY_PASSWORD != ''
|
||||
}}" >> $GITHUB_OUTPUT
|
||||
|
||||
|
||||
binary-build:
|
||||
name: Create cross-platform build
|
||||
outputs:
|
||||
TEST_NAMES: ${{ steps.export_tests_to_env.outputs.TEST_NAMES }}
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
os: [ubuntu-20.04, macos-latest, windows-latest]
|
||||
steps:
|
||||
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
submodules: recursive
|
||||
|
||||
- name: Cache Go modules (Linux)
|
||||
if: matrix.os == 'ubuntu-20.04'
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: |
|
||||
~/.cache/go-build
|
||||
~/go/pkg/mod
|
||||
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-go-
|
||||
|
||||
- name: Cache Go modules (macOS)
|
||||
if: matrix.os == 'macos-latest'
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: |
|
||||
~/Library/Caches/go-build
|
||||
~/go/pkg/mod
|
||||
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-go-
|
||||
|
||||
- name: Cache Go modules (Windows)
|
||||
if: matrix.os == 'windows-latest'
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: |
|
||||
~\AppData\Local\go-build
|
||||
~\go\pkg\mod
|
||||
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-go-
|
||||
|
||||
- uses: actions/setup-go@v3
|
||||
name: Installing go
|
||||
with:
|
||||
go-version: ${{ inputs.GO_VERSION }}
|
||||
cache: true
|
||||
|
||||
- name: Install MSYS2 & libgit2 (Windows)
|
||||
shell: cmd
|
||||
run: .\build.bat all
|
||||
if: matrix.os == 'windows-latest'
|
||||
|
||||
- name: Install pkg-config (macOS)
|
||||
run: brew install pkg-config
|
||||
if: matrix.os == 'macos-latest'
|
||||
|
||||
- name: Install libgit2 (Linux/macOS)
|
||||
run: make libgit2
|
||||
if: matrix.os != 'windows-latest'
|
||||
|
||||
- name: Test core pkg
|
||||
run: go test "-tags=static,gitenabled" -v ./...
|
||||
|
||||
- name: Test httphandler pkg
|
||||
run: cd httphandler && go test "-tags=static,gitenabled" -v ./...
|
||||
|
||||
- name: Build
|
||||
env:
|
||||
RELEASE: ${{ inputs.RELEASE }}
|
||||
CLIENT: ${{ inputs.CLIENT }}
|
||||
CGO_ENABLED: ${{ inputs.CGO_ENABLED }}
|
||||
run: python3 --version && python3 build.py
|
||||
|
||||
- name: Smoke Testing (Windows / MacOS)
|
||||
env:
|
||||
RELEASE: ${{ inputs.RELEASE }}
|
||||
KUBESCAPE_SKIP_UPDATE_CHECK: "true"
|
||||
run: python3 smoke_testing/init.py ${PWD}/build/kubescape-${{ matrix.os }}
|
||||
if: matrix.os != 'ubuntu-20.04'
|
||||
|
||||
- name: Smoke Testing (Linux)
|
||||
env:
|
||||
RELEASE: ${{ inputs.RELEASE }}
|
||||
KUBESCAPE_SKIP_UPDATE_CHECK: "true"
|
||||
run: python3 smoke_testing/init.py ${PWD}/build/kubescape-ubuntu-latest
|
||||
if: matrix.os == 'ubuntu-20.04'
|
||||
|
||||
- name: golangci-lint
|
||||
if: matrix.os == 'ubuntu-20.04'
|
||||
continue-on-error: true
|
||||
uses: golangci/golangci-lint-action@v3
|
||||
with:
|
||||
version: latest
|
||||
args: --timeout 10m --build-tags=static
|
||||
only-new-issues: true
|
||||
|
||||
- id: export_tests_to_env
|
||||
name: set test name
|
||||
run: |
|
||||
echo "TEST_NAMES=$input" >> $GITHUB_OUTPUT
|
||||
env:
|
||||
input: ${{ inputs.BINARY_TESTS }}
|
||||
|
||||
- uses: actions/upload-artifact@v3.1.1
|
||||
name: Upload artifact (Linux)
|
||||
if: matrix.os == 'ubuntu-20.04'
|
||||
with:
|
||||
name: kubescape-ubuntu-latest
|
||||
path: build/
|
||||
if-no-files-found: error
|
||||
|
||||
- uses: actions/upload-artifact@v3.1.1
|
||||
name: Upload artifact (MacOS, Win)
|
||||
if: matrix.os != 'ubuntu-20.04'
|
||||
with:
|
||||
name: kubescape-${{ matrix.os }}
|
||||
path: build/
|
||||
if-no-files-found: error
|
||||
|
||||
run-tests:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
TEST: ${{ fromJson(needs.binary-build.outputs.TEST_NAMES) }}
|
||||
needs: [check-secret, binary-build]
|
||||
if: needs.check-secret.outputs.is-secret-set == 'true'
|
||||
runs-on: ubuntu-latest # This cannot change
|
||||
steps:
|
||||
|
||||
- uses: actions/download-artifact@v3.0.2
|
||||
id: download-artifact
|
||||
with:
|
||||
name: kubescape-ubuntu-latest
|
||||
path: "~"
|
||||
|
||||
- run: ls -laR
|
||||
|
||||
- name: chmod +x
|
||||
run: chmod +x -R ${{steps.download-artifact.outputs.download-path}}/kubescape-ubuntu-latest
|
||||
|
||||
- name: Checkout systests repo
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
repository: armosec/system-tests
|
||||
path: .
|
||||
|
||||
- uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: '3.8.13'
|
||||
cache: 'pip'
|
||||
|
||||
- name: create env
|
||||
run: ./create_env.sh
|
||||
|
||||
- name: Generate uuid
|
||||
id: uuid
|
||||
run: |
|
||||
echo "RANDOM_UUID=$(uuidgen)" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Create k8s Kind Cluster
|
||||
id: kind-cluster-install
|
||||
uses: helm/kind-action@v1.3.0
|
||||
with:
|
||||
cluster_name: ${{ steps.uuid.outputs.RANDOM_UUID }}
|
||||
|
||||
- name: run-tests
|
||||
env:
|
||||
CUSTOMER: ${{ secrets.CUSTOMER }}
|
||||
USERNAME: ${{ secrets.USERNAME }}
|
||||
PASSWORD: ${{ secrets.PASSWORD }}
|
||||
CLIENT_ID: ${{ secrets.CLIENT_ID_PROD }}
|
||||
SECRET_KEY: ${{ secrets.SECRET_KEY_PROD }}
|
||||
REGISTRY_USERNAME: ${{ secrets.REGISTRY_USERNAME }}
|
||||
REGISTRY_PASSWORD: ${{ secrets.REGISTRY_PASSWORD }}
|
||||
|
||||
run: |
|
||||
echo "Test history:"
|
||||
echo " ${{ matrix.TEST }} " >/tmp/testhistory
|
||||
cat /tmp/testhistory
|
||||
source systests_python_env/bin/activate
|
||||
|
||||
python3 systest-cli.py \
|
||||
-t ${{ matrix.TEST }} \
|
||||
-b production \
|
||||
-c CyberArmorTests \
|
||||
--duration 3 \
|
||||
--logger DEBUG \
|
||||
--kwargs kubescape=${{steps.download-artifact.outputs.download-path}}/kubescape-ubuntu-latest
|
||||
|
||||
deactivate
|
||||
|
||||
- name: Test Report
|
||||
uses: mikepenz/action-junit-report@v3.6.1
|
||||
if: always() # always run even if the previous step fails
|
||||
with:
|
||||
report_paths: '**/results_xml_format/**.xml'
|
||||
commit: ${{github.event.workflow_run.head_sha}}
|
||||
|
||||
|
||||
124
.github/workflows/build.yaml
vendored
124
.github/workflows/build.yaml
vendored
@@ -1,124 +0,0 @@
|
||||
name: build
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- 'master'
|
||||
- 'main'
|
||||
paths-ignore:
|
||||
- '**.yaml'
|
||||
- '**.md'
|
||||
- '**.sh'
|
||||
- 'website/*'
|
||||
- 'examples/*'
|
||||
- 'docs/*'
|
||||
- 'build/*'
|
||||
- '.github/*'
|
||||
jobs:
|
||||
test:
|
||||
uses: ./.github/workflows/test.yaml
|
||||
with:
|
||||
release: "v2.0.${{ github.run_number }}"
|
||||
client: test
|
||||
|
||||
create-release:
|
||||
uses: ./.github/workflows/release.yaml
|
||||
needs: test
|
||||
with:
|
||||
release_name: "Release v2.0.${{ github.run_number }}"
|
||||
tag_name: "v2.0.${{ github.run_number }}"
|
||||
secrets: inherit
|
||||
|
||||
publish-artifacts:
|
||||
name: Build and publish artifacts
|
||||
needs: create-release
|
||||
runs-on: ${{ matrix.os }}
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
strategy:
|
||||
matrix:
|
||||
os: [ubuntu-20.04, macos-latest, windows-latest]
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
submodules: recursive
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: 1.19
|
||||
|
||||
- name: Install MSYS2 & libgit2 (Windows)
|
||||
shell: cmd
|
||||
run: .\build.bat all
|
||||
if: matrix.os == 'windows-latest'
|
||||
|
||||
- name: Install libgit2 (Linux/macOS)
|
||||
run: make libgit2
|
||||
if: matrix.os != 'windows-latest'
|
||||
|
||||
- name: Build
|
||||
env:
|
||||
RELEASE: v2.0.${{ github.run_number }}
|
||||
CLIENT: release
|
||||
CGO_ENABLED: 1
|
||||
run: python3 --version && python3 build.py
|
||||
|
||||
- name: Upload release binaries (Windows / MacOS)
|
||||
id: upload-release-asset-win-macos
|
||||
uses: actions/upload-release-asset@v1
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
upload_url: ${{ needs.create-release.outputs.upload_url }}
|
||||
asset_path: build/${{ matrix.os }}/kubescape
|
||||
asset_name: kubescape-${{ matrix.os }}
|
||||
asset_content_type: application/octet-stream
|
||||
if: matrix.os != 'ubuntu-20.04'
|
||||
|
||||
- name: Upload release binaries (Linux)
|
||||
id: upload-release-asset-linux
|
||||
uses: actions/upload-release-asset@v1
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
upload_url: ${{ needs.create-release.outputs.upload_url }}
|
||||
asset_path: build/ubuntu-latest/kubescape
|
||||
asset_name: kubescape-ubuntu-latest
|
||||
asset_content_type: application/octet-stream
|
||||
if: matrix.os == 'ubuntu-20.04'
|
||||
|
||||
- name: Upload release hash (Windows / MacOS)
|
||||
id: upload-release-hash-win-macos
|
||||
uses: actions/upload-release-asset@v1
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
upload_url: ${{ needs.create-release.outputs.upload_url }}
|
||||
asset_path: build/${{ matrix.os }}/kubescape.sha256
|
||||
asset_name: kubescape-${{ matrix.os }}-sha256
|
||||
asset_content_type: application/octet-stream
|
||||
if: matrix.os != 'ubuntu-20.04'
|
||||
|
||||
- name: Upload release hash (Linux)
|
||||
id: upload-release-hash-linux
|
||||
uses: actions/upload-release-asset@v1
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
upload_url: ${{ needs.create-release.outputs.upload_url }}
|
||||
asset_path: build/ubuntu-latest/kubescape.sha256
|
||||
asset_name: kubescape-ubuntu-latest-sha256
|
||||
asset_content_type: application/octet-stream
|
||||
if: matrix.os == 'ubuntu-20.04'
|
||||
|
||||
publish-image:
|
||||
uses: ./.github/workflows/build-image.yaml
|
||||
needs: create-release
|
||||
with:
|
||||
client: "image-release"
|
||||
image_name: "quay.io/${{ github.repository_owner }}/kubescape"
|
||||
image_tag: "v2.0.${{ github.run_number }}"
|
||||
support_platforms: true
|
||||
cosign: true
|
||||
secrets: inherit
|
||||
31
.github/workflows/build_dev.yaml
vendored
31
.github/workflows/build_dev.yaml
vendored
@@ -1,31 +0,0 @@
|
||||
name: build-dev
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ dev ]
|
||||
paths-ignore:
|
||||
- '**.yaml'
|
||||
- '**.md'
|
||||
- '**.sh'
|
||||
- 'website/*'
|
||||
- 'examples/*'
|
||||
- 'docs/*'
|
||||
- 'build/*'
|
||||
- '.github/*'
|
||||
jobs:
|
||||
test:
|
||||
uses: ./.github/workflows/test.yaml
|
||||
with:
|
||||
release: "v2.0.${{ github.run_number }}"
|
||||
client: test
|
||||
|
||||
# publish-dev-image:
|
||||
# uses: ./.github/workflows/build-image.yaml
|
||||
# needs: test
|
||||
# with:
|
||||
# client: "image-dev"
|
||||
# image_name: "quay.io/${{ github.repository_owner }}/kubescape"
|
||||
# image_tag: "dev-v2.0.${{ github.run_number }}"
|
||||
# support_platforms: true
|
||||
# cosign: true
|
||||
# secrets: inherit
|
||||
57
.github/workflows/c-create-release.yaml
vendored
Normal file
57
.github/workflows/c-create-release.yaml
vendored
Normal file
@@ -0,0 +1,57 @@
|
||||
name: c-create_release
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
RELEASE_NAME:
|
||||
description: 'Release name'
|
||||
required: true
|
||||
type: string
|
||||
TAG:
|
||||
description: 'Tag name'
|
||||
required: true
|
||||
type: string
|
||||
DRAFT:
|
||||
description: 'Create draft release'
|
||||
required: false
|
||||
type: boolean
|
||||
default: false
|
||||
|
||||
jobs:
|
||||
|
||||
create-release:
|
||||
name: create-release
|
||||
runs-on: ubuntu-latest
|
||||
# permissions:
|
||||
# contents: write
|
||||
steps:
|
||||
- uses: actions/download-artifact@v3.0.2
|
||||
id: download-artifact
|
||||
with:
|
||||
path: .
|
||||
|
||||
- name: Release
|
||||
uses: softprops/action-gh-release@v1
|
||||
env:
|
||||
MAC_OS: macos-latest
|
||||
UBUNTU_OS: ubuntu-latest
|
||||
WINDOWS_OS: windows-latest
|
||||
with:
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
name: ${{ inputs.RELEASE_NAME }}
|
||||
tag_name: ${{ inputs.TAG }}
|
||||
body: ${{ github.event.pull_request.body }}
|
||||
draft: ${{ inputs.DRAFT }}
|
||||
fail_on_unmatched_files: true
|
||||
prerelease: false
|
||||
files: |
|
||||
./kubescape-${{ env.MAC_OS }}/kubescape-${{ env.MAC_OS }}
|
||||
./kubescape-${{ env.MAC_OS }}/kubescape-${{ env.MAC_OS }}.sha256
|
||||
./kubescape-${{ env.MAC_OS }}/kubescape-${{ env.MAC_OS }}.tar.gz
|
||||
./kubescape-${{ env.UBUNTU_OS }}/kubescape-${{ env.UBUNTU_OS }}
|
||||
./kubescape-${{ env.UBUNTU_OS }}/kubescape-${{ env.UBUNTU_OS }}.sha256
|
||||
./kubescape-${{ env.UBUNTU_OS }}/kubescape-${{ env.UBUNTU_OS }}.tar.gz
|
||||
./kubescape-${{ env.WINDOWS_OS }}/kubescape-${{ env.WINDOWS_OS }}
|
||||
./kubescape-${{ env.WINDOWS_OS }}/kubescape-${{ env.WINDOWS_OS }}.sha256
|
||||
./kubescape-${{ env.WINDOWS_OS }}/kubescape-${{ env.WINDOWS_OS }}.tar.gz
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
name: build
|
||||
name: d-publish-image
|
||||
|
||||
on:
|
||||
workflow_call:
|
||||
@@ -33,7 +33,7 @@ jobs:
|
||||
outputs:
|
||||
is-secret-set: ${{ steps.check-secret-set.outputs.is-secret-set }}
|
||||
steps:
|
||||
- name: Check whether unity activation requests should be done
|
||||
- name: check if QUAYIO_REGISTRY_USERNAME & QUAYIO_REGISTRY_PASSWORD is set in github secrets
|
||||
id: check-secret-set
|
||||
env:
|
||||
QUAYIO_REGISTRY_USERNAME: ${{ secrets.QUAYIO_REGISTRY_USERNAME }}
|
||||
@@ -46,10 +46,6 @@ jobs:
|
||||
if: needs.check-secret.outputs.is-secret-set == 'true'
|
||||
name: Build image and upload to registry
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
id-token: write
|
||||
packages: write
|
||||
contents: read
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
24
.github/workflows/pr_checks.yaml
vendored
24
.github/workflows/pr_checks.yaml
vendored
@@ -1,24 +0,0 @@
|
||||
name: pr-checks
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
types: [ edited, opened, synchronize, reopened ]
|
||||
branches:
|
||||
- 'master'
|
||||
- 'main'
|
||||
- 'dev'
|
||||
paths-ignore:
|
||||
- '**.yaml'
|
||||
- '**.md'
|
||||
- '**.sh'
|
||||
- 'website/*'
|
||||
- 'examples/*'
|
||||
- 'docs/*'
|
||||
- 'build/*'
|
||||
- '.github/*'
|
||||
jobs:
|
||||
test:
|
||||
uses: ./.github/workflows/test.yaml
|
||||
with:
|
||||
release: "v2.0.${{ github.run_number }}"
|
||||
client: test
|
||||
41
.github/workflows/release.yaml
vendored
41
.github/workflows/release.yaml
vendored
@@ -1,41 +0,0 @@
|
||||
name: build
|
||||
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
release_name:
|
||||
description: 'release'
|
||||
required: true
|
||||
type: string
|
||||
tag_name:
|
||||
description: 'tag'
|
||||
required: true
|
||||
type: string
|
||||
draft:
|
||||
description: 'create draft release'
|
||||
required: false
|
||||
type: boolean
|
||||
default: false
|
||||
outputs:
|
||||
upload_url:
|
||||
description: "The first output string"
|
||||
value: ${{ jobs.release.outputs.upload_url }}
|
||||
|
||||
jobs:
|
||||
release:
|
||||
name: Create release
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
upload_url: ${{ steps.create_release.outputs.upload_url }}
|
||||
steps:
|
||||
- name: Create a release
|
||||
id: create_release
|
||||
uses: actions/create-release@v1
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
tag_name: ${{ inputs.tag_name }}
|
||||
release_name: ${{ inputs.release_name }}
|
||||
draft: ${{ inputs.draft }}
|
||||
prerelease: false
|
||||
|
||||
100
.github/workflows/test.yaml
vendored
100
.github/workflows/test.yaml
vendored
@@ -1,100 +0,0 @@
|
||||
name: test
|
||||
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
release:
|
||||
description: 'release'
|
||||
required: true
|
||||
type: string
|
||||
client:
|
||||
description: 'Client name'
|
||||
required: true
|
||||
type: string
|
||||
jobs:
|
||||
build:
|
||||
name: Create cross-platform build
|
||||
runs-on: ${{ matrix.os }}
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
strategy:
|
||||
matrix:
|
||||
os: [ubuntu-20.04, macos-latest, windows-latest]
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
submodules: recursive
|
||||
|
||||
- name: Cache Go modules (Linux)
|
||||
if: matrix.os == 'ubuntu-20.04'
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: |
|
||||
~/.cache/go-build
|
||||
~/go/pkg/mod
|
||||
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-go-
|
||||
|
||||
- name: Cache Go modules (macOS)
|
||||
if: matrix.os == 'macos-latest'
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: |
|
||||
~/Library/Caches/go-build
|
||||
~/go/pkg/mod
|
||||
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-go-
|
||||
|
||||
- name: Cache Go modules (Windows)
|
||||
if: matrix.os == 'windows-latest'
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: |
|
||||
~\AppData\Local\go-build
|
||||
~\go\pkg\mod
|
||||
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-go-
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: 1.19
|
||||
|
||||
- name: Install MSYS2 & libgit2 (Windows)
|
||||
shell: cmd
|
||||
run: .\build.bat all
|
||||
if: matrix.os == 'windows-latest'
|
||||
|
||||
- name: Install libgit2 (Linux/macOS)
|
||||
run: make libgit2
|
||||
if: matrix.os != 'windows-latest'
|
||||
|
||||
- name: Test core pkg
|
||||
run: go test "-tags=static,gitenabled" -v ./...
|
||||
|
||||
- name: Test httphandler pkg
|
||||
run: cd httphandler && go test "-tags=static,gitenabled" -v ./...
|
||||
|
||||
- name: Build
|
||||
env:
|
||||
RELEASE: ${{ inputs.release }}
|
||||
CLIENT: test
|
||||
CGO_ENABLED: 1
|
||||
run: python3 --version && python3 build.py
|
||||
|
||||
- name: Smoke Testing (Windows / MacOS)
|
||||
env:
|
||||
RELEASE: ${{ inputs.release }}
|
||||
KUBESCAPE_SKIP_UPDATE_CHECK: "true"
|
||||
run: python3 smoke_testing/init.py ${PWD}/build/${{ matrix.os }}/kubescape
|
||||
if: matrix.os != 'ubuntu-20.04'
|
||||
|
||||
- name: Smoke Testing (Linux)
|
||||
env:
|
||||
RELEASE: ${{ inputs.release }}
|
||||
KUBESCAPE_SKIP_UPDATE_CHECK: "true"
|
||||
run: python3 smoke_testing/init.py ${PWD}/build/ubuntu-latest/kubescape
|
||||
if: matrix.os == 'ubuntu-20.04'
|
||||
1
.gitignore
vendored
1
.gitignore
vendored
@@ -7,3 +7,4 @@
|
||||
.history
|
||||
ca.srl
|
||||
*.out
|
||||
ks
|
||||
30
.krew.yaml
Normal file
30
.krew.yaml
Normal file
@@ -0,0 +1,30 @@
|
||||
apiVersion: krew.googlecontainertools.github.com/v1alpha2
|
||||
kind: Plugin
|
||||
metadata:
|
||||
name: kubescape
|
||||
spec:
|
||||
homepage: https://github.com/kubescape/kubescape/
|
||||
shortDescription: Scan resources and cluster configs against security frameworks.
|
||||
version: {{ .TagName }}
|
||||
description: |
|
||||
It includes risk analysis, security compliance, and misconfiguration scanning
|
||||
with an easy-to-use CLI interface, flexible output formats, and automated scanning capabilities.
|
||||
platforms:
|
||||
- selector:
|
||||
matchLabels:
|
||||
os: darwin
|
||||
arch: amd64
|
||||
{{ addURIAndSha "https://github.com/kubescape/kubescape/releases/download/{{ .TagName }}/kubescape-macos-latest.tar.gz" .TagName }}
|
||||
bin: kubescape
|
||||
- selector:
|
||||
matchLabels:
|
||||
os: linux
|
||||
arch: amd64
|
||||
{{ addURIAndSha "https://github.com/kubescape/kubescape/releases/download/{{ .TagName }}/kubescape-ubuntu-latest.tar.gz" .TagName }}
|
||||
bin: kubescape
|
||||
- selector:
|
||||
matchLabels:
|
||||
os: windows
|
||||
arch: amd64
|
||||
{{ addURIAndSha "https://github.com/kubescape/kubescape/releases/download/{{ .TagName }}/kubescape-windows-latest.tar.gz" .TagName }}
|
||||
bin: kubescape
|
||||
@@ -21,7 +21,7 @@ Please follow our [code of conduct](CODE_OF_CONDUCT.md) in all of your interacti
|
||||
build.
|
||||
2. Update the README.md with details of changes to the interface, this includes new environment
|
||||
variables, exposed ports, useful file locations and container parameters.
|
||||
3. Open Pull Request to `dev` branch - we test the component before merging into the `master` branch
|
||||
3. Open Pull Request to the `master` branch.
|
||||
4. We will merge the Pull Request once you have the sign-off.
|
||||
|
||||
## Developer Certificate of Origin
|
||||
@@ -59,6 +59,36 @@ curl -Ls https://gist.githubusercontent.com/dixudx/7d7edea35b4d91e1a2a8fbf41d095
|
||||
chmod +x .git/hooks/prepare-commit-msg
|
||||
```
|
||||
|
||||
### Use semantic commit messages (optional)
|
||||
|
||||
When contributing, you could consider using [conventional commits](https://www.conventionalcommits.org/en/v1.0.0/), in order to improve logs readability and help us to automatically generate `CHANGELOG`s.
|
||||
|
||||
Format: `<type>(<scope>): <subject>`
|
||||
|
||||
`<scope>` is optional
|
||||
|
||||
#### Example
|
||||
|
||||
```
|
||||
feat(cmd): add kubectl plugin
|
||||
^--^ ^-^ ^----------------^
|
||||
| | |
|
||||
| | +-> subject: summary in present tense.
|
||||
| |
|
||||
| +-------> scope: point of interest
|
||||
|
|
||||
+-------> type: chore, docs, feat, fix, refactor, style, or test.
|
||||
```
|
||||
|
||||
More Examples:
|
||||
* `feat`: new feature for the user, not a new feature for build script
|
||||
* `fix`: bug fix for the user, not a fix to a build script
|
||||
* `docs`: changes to the documentation
|
||||
* `style`: formatting, missing semi colons, etc; no production code change
|
||||
* `refactor`: refactoring production code, eg. renaming a variable
|
||||
* `test`: adding missing tests, refactoring tests; no production code change
|
||||
* `chore`: updating grunt tasks etc; no production code change
|
||||
|
||||
## Fixing a commit where the DCO failed
|
||||
|
||||
Check out [this guide](https://github.com/src-d/guide/blob/master/developer-community/fix-DCO.md).
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
[](releases)
|
||||
[](https://github.com/kubescape/kubescape/actions/workflows/build.yaml)
|
||||
[](https://github.com/kubescape/kubescape/releases)
|
||||
[](https://github.com/kubescape/kubescape/actions/workflows/02-release.yaml)
|
||||
[](https://goreportcard.com/report/github.com/kubescape/kubescape)
|
||||
[](https://gitpod.io/#https://github.com/kubescape/kubescape)
|
||||
[](https://github.com/kubescape/kubescape/blob/master/LICENSE)
|
||||
|
||||
28
build.py
28
build.py
@@ -3,9 +3,16 @@ import sys
|
||||
import hashlib
|
||||
import platform
|
||||
import subprocess
|
||||
import tarfile
|
||||
|
||||
BASE_GETTER_CONST = "github.com/kubescape/kubescape/v2/core/cautils/getter"
|
||||
|
||||
platformSuffixes = {
|
||||
"Windows": "windows-latest",
|
||||
"Linux": "ubuntu-latest",
|
||||
"Darwin": "macos-latest",
|
||||
}
|
||||
|
||||
def check_status(status, msg):
|
||||
if status != 0:
|
||||
sys.stderr.write(msg)
|
||||
@@ -13,21 +20,15 @@ def check_status(status, msg):
|
||||
|
||||
|
||||
def get_build_dir():
|
||||
current_platform = platform.system()
|
||||
build_dir = ""
|
||||
|
||||
if current_platform == "Windows": build_dir = "windows-latest"
|
||||
elif current_platform == "Linux": build_dir = "ubuntu-latest"
|
||||
elif current_platform == "Darwin": build_dir = "macos-latest"
|
||||
else: raise OSError("Platform %s is not supported!" % (current_platform))
|
||||
|
||||
return os.path.join("build", build_dir)
|
||||
return "build"
|
||||
|
||||
|
||||
def get_package_name():
|
||||
package_name = "kubescape"
|
||||
current_platform = platform.system()
|
||||
|
||||
return package_name
|
||||
if current_platform not in platformSuffixes: raise OSError("Platform %s is not supported!" % (current_platform))
|
||||
|
||||
return "kubescape-" + platformSuffixes[current_platform]
|
||||
|
||||
|
||||
def main():
|
||||
@@ -46,6 +47,7 @@ def main():
|
||||
|
||||
ks_file = os.path.join(build_dir, package_name)
|
||||
hash_file = ks_file + ".sha256"
|
||||
tar_file = ks_file + ".tar.gz"
|
||||
|
||||
if not os.path.isdir(build_dir):
|
||||
os.makedirs(build_dir)
|
||||
@@ -73,6 +75,10 @@ def main():
|
||||
print("kubescape hash: {}, file: {}".format(hash, hash_file))
|
||||
kube_sha.write(sha256.hexdigest())
|
||||
|
||||
with tarfile.open(tar_file, 'w:gz') as archive:
|
||||
archive.add(ks_file, "kubescape")
|
||||
archive.add("LICENSE", "LICENSE")
|
||||
|
||||
print("Build Done")
|
||||
|
||||
|
||||
|
||||
@@ -25,13 +25,13 @@ RUN rm -rf git2go && make libgit2
|
||||
# build kubescape server
|
||||
WORKDIR /work/httphandler
|
||||
RUN python build.py
|
||||
RUN ls -ltr build/ubuntu-latest
|
||||
RUN ls -ltr build/
|
||||
|
||||
# build kubescape cmd
|
||||
WORKDIR /work
|
||||
RUN python build.py
|
||||
|
||||
RUN /work/build/ubuntu-latest/kubescape download artifacts -o /work/artifacts
|
||||
RUN /work/build/kubescape-ubuntu-latest download artifacts -o /work/artifacts
|
||||
|
||||
FROM alpine:3.16.2
|
||||
|
||||
@@ -45,7 +45,7 @@ USER ks
|
||||
|
||||
WORKDIR /home/ks
|
||||
|
||||
COPY --from=builder /work/httphandler/build/ubuntu-latest/kubescape /usr/bin/ksserver
|
||||
COPY --from=builder /work/build/ubuntu-latest/kubescape /usr/bin/kubescape
|
||||
COPY --from=builder /work/httphandler/build/kubescape-ubuntu-latest /usr/bin/ksserver
|
||||
COPY --from=builder /work/build/kubescape-ubuntu-latest /usr/bin/kubescape
|
||||
|
||||
ENTRYPOINT ["ksserver"]
|
||||
|
||||
@@ -1,23 +1,23 @@
|
||||
package completion
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/kubescape/kubescape/v2/core/cautils"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var completionCmdExamples = `
|
||||
|
||||
var completionCmdExamples = fmt.Sprintf(`
|
||||
# Enable BASH shell autocompletion
|
||||
$ source <(kubescape completion bash)
|
||||
$ echo 'source <(kubescape completion bash)' >> ~/.bashrc
|
||||
$ source <(%[1]s completion bash)
|
||||
$ echo 'source <(%[1]s completion bash)' >> ~/.bashrc
|
||||
|
||||
# Enable ZSH shell autocompletion
|
||||
$ source <(kubectl completion zsh)
|
||||
$ echo 'source <(kubectl completion zsh)' >> "${fpath[1]}/_kubectl"
|
||||
|
||||
`
|
||||
`, cautils.ExecName())
|
||||
|
||||
func GetCompletionCmd() *cobra.Command {
|
||||
completionCmd := &cobra.Command{
|
||||
|
||||
@@ -1,34 +1,37 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/kubescape/kubescape/v2/core/cautils"
|
||||
"github.com/kubescape/kubescape/v2/core/meta"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var (
|
||||
configExample = `
|
||||
configExample = fmt.Sprintf(`
|
||||
# View cached configurations
|
||||
kubescape config view
|
||||
%[1]s config view
|
||||
|
||||
# Delete cached configurations
|
||||
kubescape config delete
|
||||
%[1]s config delete
|
||||
|
||||
# Set cached configurations
|
||||
kubescape config set --help
|
||||
`
|
||||
setConfigExample = `
|
||||
%[1]s config set --help
|
||||
`, cautils.ExecName())
|
||||
setConfigExample = fmt.Sprintf(`
|
||||
# Set account id
|
||||
kubescape config set accountID <account id>
|
||||
%[1]s config set accountID <account id>
|
||||
|
||||
# Set client id
|
||||
kubescape config set clientID <client id>
|
||||
%[1]s config set clientID <client id>
|
||||
|
||||
# Set access key
|
||||
kubescape config set secretKey <access key>
|
||||
%[1]s config set secretKey <access key>
|
||||
|
||||
# Set cloudAPIURL
|
||||
kubescape config set cloudAPIURL <cloud API URL>
|
||||
`
|
||||
%[1]s config set cloudAPIURL <cloud API URL>
|
||||
`, cautils.ExecName())
|
||||
)
|
||||
|
||||
func GetConfigCmd(ks meta.IKubescape) *cobra.Command {
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
logger "github.com/kubescape/go-logger"
|
||||
"github.com/kubescape/kubescape/v2/core/meta"
|
||||
v1 "github.com/kubescape/kubescape/v2/core/meta/datastructures/v1"
|
||||
@@ -13,7 +15,7 @@ func getDeleteCmd(ks meta.IKubescape) *cobra.Command {
|
||||
Short: "Delete cached configurations",
|
||||
Long: ``,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
if err := ks.DeleteCachedConfig(&v1.DeleteConfig{}); err != nil {
|
||||
if err := ks.DeleteCachedConfig(context.TODO(), &v1.DeleteConfig{}); err != nil {
|
||||
logger.L().Fatal(err.Error())
|
||||
}
|
||||
},
|
||||
|
||||
@@ -1,18 +1,21 @@
|
||||
package delete
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/kubescape/kubescape/v2/core/cautils"
|
||||
"github.com/kubescape/kubescape/v2/core/meta"
|
||||
v1 "github.com/kubescape/kubescape/v2/core/meta/datastructures/v1"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var deleteExceptionsExamples = `
|
||||
var deleteExceptionsExamples = fmt.Sprintf(`
|
||||
# Delete single exception
|
||||
kubescape delete exceptions "exception name"
|
||||
%[1]s delete exceptions "exception name"
|
||||
|
||||
# Delete multiple exceptions
|
||||
kubescape delete exceptions "first exception;second exception;third exception"
|
||||
`
|
||||
%[1]s delete exceptions "first exception;second exception;third exception"
|
||||
`, cautils.ExecName())
|
||||
|
||||
func GetDeleteCmd(ks meta.IKubescape) *cobra.Command {
|
||||
var deleteInfo v1.Delete
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"strings"
|
||||
|
||||
logger "github.com/kubescape/go-logger"
|
||||
"github.com/kubescape/kubescape/v2/core/cautils"
|
||||
"github.com/kubescape/kubescape/v2/core/meta"
|
||||
v1 "github.com/kubescape/kubescape/v2/core/meta/datastructures/v1"
|
||||
"github.com/spf13/cobra"
|
||||
@@ -13,7 +14,7 @@ import (
|
||||
func getExceptionsCmd(ks meta.IKubescape, deleteInfo *v1.Delete) *cobra.Command {
|
||||
return &cobra.Command{
|
||||
Use: "exceptions <exception name>",
|
||||
Short: "Delete exceptions from Kubescape SaaS version. Run 'kubescape list exceptions' for all exceptions names",
|
||||
Short: fmt.Sprintf("Delete exceptions from Kubescape SaaS version. Run '%[1]s list exceptions' for all exceptions names", cautils.ExecName()),
|
||||
Example: deleteExceptionsExamples,
|
||||
Args: func(cmd *cobra.Command, args []string) error {
|
||||
if len(args) != 1 {
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package download
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
@@ -14,34 +15,34 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
downloadExample = `
|
||||
downloadExample = fmt.Sprintf(`
|
||||
# Download all artifacts and save them in the default path (~/.kubescape)
|
||||
kubescape download artifacts
|
||||
%[1]s download artifacts
|
||||
|
||||
# Download all artifacts and save them in /tmp path
|
||||
kubescape download artifacts --output /tmp
|
||||
%[1]s download artifacts --output /tmp
|
||||
|
||||
# Download the NSA framework. Run 'kubescape list frameworks' for all frameworks names
|
||||
kubescape download framework nsa
|
||||
# Download the NSA framework. Run '%[1]s list frameworks' for all frameworks names
|
||||
%[1]s download framework nsa
|
||||
|
||||
# Download the "C-0001" control. Run 'kubescape list controls --id' for all controls ids
|
||||
kubescape download control "C-0001"
|
||||
# Download the "C-0001" control. Run '%[1]s list controls --id' for all controls ids
|
||||
%[1]s download control "C-0001"
|
||||
|
||||
# Download the "C-0001" control. Run 'kubescape list controls --id' for all controls ids
|
||||
kubescape download control C-0001
|
||||
# Download the "C-0001" control. Run '%[1]s list controls --id' for all controls ids
|
||||
%[1]s download control C-0001
|
||||
|
||||
# Download the configured exceptions
|
||||
kubescape download exceptions
|
||||
%[1]s download exceptions
|
||||
|
||||
# Download the configured controls-inputs
|
||||
kubescape download controls-inputs
|
||||
%[1]s download controls-inputs
|
||||
|
||||
# Download the attack tracks
|
||||
kubescape download attack-tracks
|
||||
`
|
||||
%[1]s download attack-tracks
|
||||
`, cautils.ExecName())
|
||||
)
|
||||
|
||||
func GeDownloadCmd(ks meta.IKubescape) *cobra.Command {
|
||||
func GetDownloadCmd(ks meta.IKubescape) *cobra.Command {
|
||||
var downloadInfo = v1.DownloadInfo{}
|
||||
|
||||
downloadCmd := &cobra.Command{
|
||||
@@ -74,7 +75,7 @@ func GeDownloadCmd(ks meta.IKubescape) *cobra.Command {
|
||||
downloadInfo.Identifier = args[1]
|
||||
|
||||
}
|
||||
if err := ks.Download(&downloadInfo); err != nil {
|
||||
if err := ks.Download(context.TODO(), &downloadInfo); err != nil {
|
||||
logger.L().Fatal(err.Error())
|
||||
}
|
||||
return nil
|
||||
|
||||
@@ -1,23 +1,26 @@
|
||||
package fix
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/kubescape/kubescape/v2/core/cautils"
|
||||
"github.com/kubescape/kubescape/v2/core/meta"
|
||||
metav1 "github.com/kubescape/kubescape/v2/core/meta/datastructures/v1"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var fixCmdExamples = `
|
||||
var fixCmdExamples = fmt.Sprintf(`
|
||||
Fix command is for fixing kubernetes manifest files based on a scan command output.
|
||||
Use with caution, this command will change your files in-place.
|
||||
|
||||
# Fix kubernetes YAML manifest files based on a scan command output (output.json)
|
||||
1) kubescape scan --format json --format-version v2 --output output.json
|
||||
2) kubescape fix output.json
|
||||
1) %[1]s scan --format json --format-version v2 --output output.json
|
||||
2) %[1]s fix output.json
|
||||
|
||||
`
|
||||
`, cautils.ExecName())
|
||||
|
||||
func GetFixCmd(ks meta.IKubescape) *cobra.Command {
|
||||
var fixInfo metav1.FixInfo
|
||||
@@ -33,7 +36,7 @@ func GetFixCmd(ks meta.IKubescape) *cobra.Command {
|
||||
}
|
||||
fixInfo.ReportFile = args[0]
|
||||
|
||||
return ks.Fix(&fixInfo)
|
||||
return ks.Fix(context.TODO(), &fixInfo)
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package list
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
@@ -13,19 +14,19 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
listExample = `
|
||||
listExample = fmt.Sprintf(`
|
||||
# List default supported frameworks names
|
||||
kubescape list frameworks
|
||||
%[1]s list frameworks
|
||||
|
||||
# List all supported frameworks names
|
||||
kubescape list frameworks --account <account id>
|
||||
%[1]s list frameworks --account <account id>
|
||||
|
||||
# List all supported controls names with ids
|
||||
kubescape list controls
|
||||
%[1]s list controls
|
||||
|
||||
Control documentation:
|
||||
https://hub.armosec.io/docs/controls
|
||||
`
|
||||
`, cautils.ExecName())
|
||||
)
|
||||
|
||||
func GetListCmd(ks meta.IKubescape) *cobra.Command {
|
||||
@@ -55,7 +56,7 @@ func GetListCmd(ks meta.IKubescape) *cobra.Command {
|
||||
|
||||
listPolicies.Target = args[0]
|
||||
|
||||
if err := ks.List(&listPolicies); err != nil {
|
||||
if err := ks.List(context.TODO(), &listPolicies); err != nil {
|
||||
logger.L().Fatal(err.Error())
|
||||
}
|
||||
return nil
|
||||
@@ -65,7 +66,7 @@ func GetListCmd(ks meta.IKubescape) *cobra.Command {
|
||||
listCmd.PersistentFlags().StringVarP(&listPolicies.Credentials.ClientID, "client-id", "", "", "Kubescape SaaS client ID. Default will load client ID from cache, read more - https://hub.armosec.io/docs/authentication")
|
||||
listCmd.PersistentFlags().StringVarP(&listPolicies.Credentials.SecretKey, "secret-key", "", "", "Kubescape SaaS secret key. Default will load secret key from cache, read more - https://hub.armosec.io/docs/authentication")
|
||||
listCmd.PersistentFlags().StringVar(&listPolicies.Format, "format", "pretty-print", "output format. supported: 'pretty-print'/'json'")
|
||||
listCmd.PersistentFlags().MarkDeprecated("id", "Control ID's are included in list outpus")
|
||||
listCmd.PersistentFlags().MarkDeprecated("id", "Control ID's are included in list outputs")
|
||||
|
||||
return listCmd
|
||||
}
|
||||
|
||||
24
cmd/root.go
24
cmd/root.go
@@ -26,19 +26,19 @@ import (
|
||||
|
||||
var rootInfo cautils.RootInfo
|
||||
|
||||
var ksExamples = `
|
||||
var ksExamples = fmt.Sprintf(`
|
||||
# Scan command
|
||||
kubescape scan
|
||||
%[1]s scan
|
||||
|
||||
# List supported frameworks
|
||||
kubescape list frameworks
|
||||
%[1]s list frameworks
|
||||
|
||||
# Download artifacts (air-gapped environment support)
|
||||
kubescape download artifacts
|
||||
%[1]s download artifacts
|
||||
|
||||
# View cached configurations
|
||||
kubescape config view
|
||||
`
|
||||
%[1]s config view
|
||||
`, cautils.ExecName())
|
||||
|
||||
func NewDefaultKubescapeCommand() *cobra.Command {
|
||||
ks := core.NewKubescape()
|
||||
@@ -53,6 +53,16 @@ func getRootCmd(ks meta.IKubescape) *cobra.Command {
|
||||
Example: ksExamples,
|
||||
}
|
||||
|
||||
if cautils.IsKrewPlugin() {
|
||||
// Invoked as a kubectl plugin.
|
||||
|
||||
// Cobra doesn't have a way to specify a two word command (i.e. "kubectl kubescape"), so set a custom usage template
|
||||
// with kubectl in it. Cobra will use this template for the root and all child commands.
|
||||
oldUsageTemplate := rootCmd.UsageTemplate()
|
||||
newUsageTemplate := strings.NewReplacer("{{.UseLine}}", "kubectl {{.UseLine}}", "{{.CommandPath}}", "kubectl {{.CommandPath}}").Replace(oldUsageTemplate)
|
||||
rootCmd.SetUsageTemplate(newUsageTemplate)
|
||||
}
|
||||
|
||||
rootCmd.PersistentFlags().StringVar(&rootInfo.KSCloudBEURLsDep, "environment", "", envFlagUsage)
|
||||
rootCmd.PersistentFlags().StringVar(&rootInfo.KSCloudBEURLs, "env", "", envFlagUsage)
|
||||
rootCmd.PersistentFlags().MarkDeprecated("environment", "use 'env' instead")
|
||||
@@ -71,7 +81,7 @@ func getRootCmd(ks meta.IKubescape) *cobra.Command {
|
||||
|
||||
// Supported commands
|
||||
rootCmd.AddCommand(scan.GetScanCommand(ks))
|
||||
rootCmd.AddCommand(download.GeDownloadCmd(ks))
|
||||
rootCmd.AddCommand(download.GetDownloadCmd(ks))
|
||||
rootCmd.AddCommand(delete.GetDeleteCmd(ks))
|
||||
rootCmd.AddCommand(list.GetListCmd(ks))
|
||||
rootCmd.AddCommand(submit.GetSubmitCmd(ks))
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package scan
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
@@ -18,28 +19,28 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
controlExample = `
|
||||
controlExample = fmt.Sprintf(`
|
||||
# Scan the 'privileged container' control
|
||||
kubescape scan control "privileged container"
|
||||
%[1]s scan control "privileged container"
|
||||
|
||||
# Scan list of controls separated with a comma
|
||||
kubescape scan control "privileged container","HostPath mount"
|
||||
%[1]s scan control "privileged container","HostPath mount"
|
||||
|
||||
# Scan list of controls using the control ID separated with a comma
|
||||
kubescape scan control C-0058,C-0057
|
||||
%[1]s scan control C-0058,C-0057
|
||||
|
||||
Run 'kubescape list controls' for the list of supported controls
|
||||
Run '%[1]s list controls' for the list of supported controls
|
||||
|
||||
Control documentation:
|
||||
https://hub.armosec.io/docs/controls
|
||||
`
|
||||
`, cautils.ExecName())
|
||||
)
|
||||
|
||||
// controlCmd represents the control command
|
||||
func getControlCmd(ks meta.IKubescape, scanInfo *cautils.ScanInfo) *cobra.Command {
|
||||
return &cobra.Command{
|
||||
Use: "control <control names list>/<control ids list>",
|
||||
Short: "The controls you wish to use. Run 'kubescape list controls' for the list of supported controls",
|
||||
Short: fmt.Sprintf("The controls you wish to use. Run '%[1]s list controls' for the list of supported controls", cautils.ExecName()),
|
||||
Example: controlExample,
|
||||
Args: func(cmd *cobra.Command, args []string) error {
|
||||
if len(args) > 0 {
|
||||
@@ -67,7 +68,7 @@ func getControlCmd(ks meta.IKubescape, scanInfo *cautils.ScanInfo) *cobra.Comman
|
||||
|
||||
if len(args) == 0 {
|
||||
scanInfo.ScanAll = true
|
||||
} else { // expected control or list of control sepparated by ","
|
||||
} else { // expected control or list of control separated by ","
|
||||
|
||||
// Read controls from input args
|
||||
scanInfo.SetPolicyIdentifiers(strings.Split(args[0], ","), apisv1.KindControl)
|
||||
@@ -96,11 +97,12 @@ func getControlCmd(ks meta.IKubescape, scanInfo *cautils.ScanInfo) *cobra.Comman
|
||||
return err
|
||||
}
|
||||
|
||||
results, err := ks.Scan(scanInfo)
|
||||
ctx := context.TODO()
|
||||
results, err := ks.Scan(ctx, scanInfo)
|
||||
if err != nil {
|
||||
logger.L().Fatal(err.Error())
|
||||
}
|
||||
if err := results.HandleResults(); err != nil {
|
||||
if err := results.HandleResults(ctx); err != nil {
|
||||
logger.L().Fatal(err.Error())
|
||||
}
|
||||
if !scanInfo.VerboseMode {
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package scan
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
@@ -14,30 +15,31 @@ import (
|
||||
logger "github.com/kubescape/go-logger"
|
||||
"github.com/kubescape/go-logger/helpers"
|
||||
"github.com/kubescape/kubescape/v2/core/cautils"
|
||||
"github.com/kubescape/kubescape/v2/core/cautils/getter"
|
||||
"github.com/kubescape/kubescape/v2/core/meta"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var (
|
||||
frameworkExample = `
|
||||
frameworkExample = fmt.Sprintf(`
|
||||
# Scan all frameworks
|
||||
kubescape scan framework all
|
||||
%[1]s scan framework all
|
||||
|
||||
# Scan the NSA framework
|
||||
kubescape scan framework nsa
|
||||
%[1]s scan framework nsa
|
||||
|
||||
# Scan the NSA and MITRE framework
|
||||
kubescape scan framework nsa,mitre
|
||||
%[1]s scan framework nsa,mitre
|
||||
|
||||
# Scan all frameworks
|
||||
kubescape scan framework all
|
||||
%[1]s scan framework all
|
||||
|
||||
# Scan kubernetes YAML manifest files (single file or glob)
|
||||
kubescape scan framework nsa .
|
||||
%[1]s scan framework nsa .
|
||||
|
||||
Run 'kubescape list frameworks' for the list of supported frameworks
|
||||
`
|
||||
Run '%[1]s list frameworks' for the list of supported frameworks
|
||||
`, cautils.ExecName())
|
||||
|
||||
ErrUnknownSeverity = errors.New("unknown severity")
|
||||
)
|
||||
@@ -46,7 +48,7 @@ func getFrameworkCmd(ks meta.IKubescape, scanInfo *cautils.ScanInfo) *cobra.Comm
|
||||
|
||||
return &cobra.Command{
|
||||
Use: "framework <framework names list> [`<glob pattern>`/`-`] [flags]",
|
||||
Short: "The framework you wish to use. Run 'kubescape list frameworks' for the list of supported frameworks",
|
||||
Short: fmt.Sprintf("The framework you wish to use. Run '%[1]s list frameworks' for the list of supported frameworks", cautils.ExecName()),
|
||||
Example: frameworkExample,
|
||||
Long: "Execute a scan on a running Kubernetes cluster or `yaml`/`json` files (use glob) or `-` for stdin",
|
||||
Args: func(cmd *cobra.Command, args []string) error {
|
||||
@@ -71,6 +73,9 @@ func getFrameworkCmd(ks meta.IKubescape, scanInfo *cautils.ScanInfo) *cobra.Comm
|
||||
}
|
||||
scanInfo.FrameworkScan = true
|
||||
|
||||
// We do not scan all frameworks by default when triggering scan from the CLI
|
||||
scanInfo.ScanAll = false
|
||||
|
||||
var frameworks []string
|
||||
|
||||
if len(args) == 0 { // scan all frameworks
|
||||
@@ -80,11 +85,12 @@ func getFrameworkCmd(ks meta.IKubescape, scanInfo *cautils.ScanInfo) *cobra.Comm
|
||||
frameworks = strings.Split(args[0], ",")
|
||||
if cautils.StringInSlice(frameworks, "all") != cautils.ValueNotFound {
|
||||
scanInfo.ScanAll = true
|
||||
frameworks = []string{}
|
||||
frameworks = getter.NativeFrameworks
|
||||
}
|
||||
if len(args) > 1 {
|
||||
if len(args[1:]) == 0 || args[1] != "-" {
|
||||
scanInfo.InputPatterns = args[1:]
|
||||
logger.L().Debug("List of input files", helpers.Interface("patterns", scanInfo.InputPatterns))
|
||||
} else { // store stdin to file - do NOT move to separate function !!
|
||||
tempFile, err := os.CreateTemp(".", "tmp-kubescape*.yaml")
|
||||
if err != nil {
|
||||
@@ -103,12 +109,13 @@ func getFrameworkCmd(ks meta.IKubescape, scanInfo *cautils.ScanInfo) *cobra.Comm
|
||||
|
||||
scanInfo.SetPolicyIdentifiers(frameworks, apisv1.KindFramework)
|
||||
|
||||
results, err := ks.Scan(scanInfo)
|
||||
ctx := context.TODO()
|
||||
results, err := ks.Scan(ctx, scanInfo)
|
||||
if err != nil {
|
||||
logger.L().Fatal(err.Error())
|
||||
}
|
||||
|
||||
if err = results.HandleResults(); err != nil {
|
||||
if err = results.HandleResults(ctx); err != nil {
|
||||
logger.L().Fatal(err.Error())
|
||||
}
|
||||
if !scanInfo.VerboseMode {
|
||||
@@ -161,14 +168,14 @@ func countersExceedSeverityThreshold(severityCounters reportsummary.ISeverityCou
|
||||
}
|
||||
|
||||
// terminateOnExceedingSeverity terminates the application on exceeding severity
|
||||
func terminateOnExceedingSeverity(scanInfo *cautils.ScanInfo, l logger.ILogger) {
|
||||
func terminateOnExceedingSeverity(scanInfo *cautils.ScanInfo, l helpers.ILogger) {
|
||||
l.Fatal("result exceeds severity threshold", helpers.String("set severity threshold", scanInfo.FailThresholdSeverity))
|
||||
}
|
||||
|
||||
// enforceSeverityThresholds ensures that the scan results are below the defined severity threshold
|
||||
//
|
||||
// The function forces the application to terminate with an exit code 1 if at least one control failed control that exceeds the set severity threshold
|
||||
func enforceSeverityThresholds(severityCounters reportsummary.ISeverityCounters, scanInfo *cautils.ScanInfo, onExceed func(*cautils.ScanInfo, logger.ILogger)) {
|
||||
func enforceSeverityThresholds(severityCounters reportsummary.ISeverityCounters, scanInfo *cautils.ScanInfo, onExceed func(*cautils.ScanInfo, helpers.ILogger)) {
|
||||
// If a severity threshold is not set, we don’t need to enforce it
|
||||
if scanInfo.FailThresholdSeverity == "" {
|
||||
return
|
||||
|
||||
@@ -3,32 +3,33 @@ package scan
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/kubescape/k8s-interface/k8sinterface"
|
||||
"github.com/kubescape/kubescape/v2/core/cautils"
|
||||
"github.com/kubescape/kubescape/v2/core/cautils/getter"
|
||||
"github.com/kubescape/kubescape/v2/core/meta"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var scanCmdExamples = `
|
||||
var scanCmdExamples = fmt.Sprintf(`
|
||||
Scan command is for scanning an existing cluster or kubernetes manifest files based on pre-defined frameworks
|
||||
|
||||
# Scan current cluster with all frameworks
|
||||
kubescape scan --enable-host-scan --verbose
|
||||
%[1]s scan --enable-host-scan --verbose
|
||||
|
||||
# Scan kubernetes YAML manifest files
|
||||
kubescape scan .
|
||||
%[1]s scan .
|
||||
|
||||
# Scan and save the results in the JSON format
|
||||
kubescape scan --format json --output results.json --format-version=v2
|
||||
%[1]s scan --format json --output results.json --format-version=v2
|
||||
|
||||
# Display all resources
|
||||
kubescape scan --verbose
|
||||
%[1]s scan --verbose
|
||||
|
||||
# Scan different clusters from the kubectl context
|
||||
kubescape scan --kube-context <kubernetes context>
|
||||
|
||||
`
|
||||
%[1]s scan --kube-context <kubernetes context>
|
||||
`, cautils.ExecName())
|
||||
|
||||
func GetScanCommand(ks meta.IKubescape) *cobra.Command {
|
||||
var scanInfo cautils.ScanInfo
|
||||
@@ -42,7 +43,6 @@ func GetScanCommand(ks meta.IKubescape) *cobra.Command {
|
||||
Args: func(cmd *cobra.Command, args []string) error {
|
||||
if len(args) > 0 {
|
||||
if args[0] != "framework" && args[0] != "control" {
|
||||
scanInfo.ScanAll = true
|
||||
return getFrameworkCmd(ks, &scanInfo).RunE(cmd, append([]string{"all"}, args...))
|
||||
}
|
||||
}
|
||||
@@ -51,8 +51,7 @@ func GetScanCommand(ks meta.IKubescape) *cobra.Command {
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
|
||||
if len(args) == 0 {
|
||||
scanInfo.ScanAll = true
|
||||
return getFrameworkCmd(ks, &scanInfo).RunE(cmd, []string{"all"})
|
||||
return getFrameworkCmd(ks, &scanInfo).RunE(cmd, []string{strings.Join(getter.NativeFrameworks, ",")})
|
||||
}
|
||||
return nil
|
||||
},
|
||||
@@ -87,7 +86,7 @@ func GetScanCommand(ks meta.IKubescape) *cobra.Command {
|
||||
scanCmd.PersistentFlags().BoolVar(&scanInfo.UseDefault, "use-default", false, "Load local policy object from default path. If not used will download latest")
|
||||
scanCmd.PersistentFlags().StringSliceVar(&scanInfo.UseFrom, "use-from", nil, "Load local policy object from specified path. If not used will download latest")
|
||||
scanCmd.PersistentFlags().StringVar(&scanInfo.HostSensorYamlPath, "host-scan-yaml", "", "Override default host scanner DaemonSet. Use this flag cautiously")
|
||||
scanCmd.PersistentFlags().StringVar(&scanInfo.FormatVersion, "format-version", "v1", "Output object can be different between versions, this is for maintaining backward and forward compatibility. Supported:'v1'/'v2'")
|
||||
scanCmd.PersistentFlags().StringVar(&scanInfo.FormatVersion, "format-version", "v2", "Output object can be different between versions, this is for maintaining backward and forward compatibility. Supported:'v1'/'v2'")
|
||||
scanCmd.PersistentFlags().StringVar(&scanInfo.CustomClusterName, "cluster-name", "", "Set the custom name of the cluster. Not same as the kube-context flag")
|
||||
scanCmd.PersistentFlags().BoolVarP(&scanInfo.Submit, "submit", "", false, "Submit the scan results to Kubescape SaaS where you can see the results in a user-friendly UI, choose your preferred compliance framework, check risk results history and trends, manage exceptions, get remediation recommendations and much more. By default the results are not submitted")
|
||||
scanCmd.PersistentFlags().BoolVarP(&scanInfo.OmitRawResources, "omit-raw-resources", "", false, "Omit raw resources from the output. By default the raw resources are included in the output")
|
||||
|
||||
@@ -1,7 +1,8 @@
|
||||
package scan
|
||||
|
||||
import (
|
||||
logger "github.com/kubescape/go-logger"
|
||||
"context"
|
||||
|
||||
"github.com/kubescape/go-logger/helpers"
|
||||
|
||||
"github.com/kubescape/kubescape/v2/core/cautils"
|
||||
@@ -160,7 +161,7 @@ func Test_enforceSeverityThresholds(t *testing.T) {
|
||||
want := tc.Want
|
||||
|
||||
got := false
|
||||
onExceed := func(*cautils.ScanInfo, logger.ILogger) {
|
||||
onExceed := func(*cautils.ScanInfo, helpers.ILogger) {
|
||||
got = true
|
||||
}
|
||||
|
||||
@@ -193,6 +194,7 @@ func (l *spyLogger) GetLevel() string { return ""
|
||||
func (l *spyLogger) SetWriter(w *os.File) {}
|
||||
func (l *spyLogger) GetWriter() *os.File { return &os.File{} }
|
||||
func (l *spyLogger) LoggerName() string { return "" }
|
||||
func (l *spyLogger) Ctx(_ context.Context) helpers.ILogger { return l }
|
||||
|
||||
func (l *spyLogger) Fatal(msg string, details ...helpers.IDetails) {
|
||||
firstDetail := details[0]
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package submit
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
logger "github.com/kubescape/go-logger"
|
||||
@@ -26,7 +27,7 @@ func getExceptionsCmd(ks meta.IKubescape, submitInfo *metav1.Submit) *cobra.Comm
|
||||
logger.L().Fatal(err.Error())
|
||||
}
|
||||
|
||||
if err := ks.SubmitExceptions(&submitInfo.Credentials, args[0]); err != nil {
|
||||
if err := ks.SubmitExceptions(context.TODO(), &submitInfo.Credentials, args[0]); err != nil {
|
||||
logger.L().Fatal(err.Error())
|
||||
}
|
||||
},
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package submit
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/google/uuid"
|
||||
@@ -19,13 +20,13 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
rbacExamples = `
|
||||
rbacExamples = fmt.Sprintf(`
|
||||
# Submit cluster's Role-Based Access Control(RBAC)
|
||||
kubescape submit rbac
|
||||
%[1]s submit rbac
|
||||
|
||||
# Submit cluster's Role-Based Access Control(RBAC) with account ID
|
||||
kubescape submit rbac --account <account-id>
|
||||
`
|
||||
%[1]s submit rbac --account <account-id>
|
||||
`, cautils.ExecName())
|
||||
)
|
||||
|
||||
// getRBACCmd represents the RBAC command
|
||||
@@ -36,7 +37,7 @@ func getRBACCmd(ks meta.IKubescape, submitInfo *v1.Submit) *cobra.Command {
|
||||
Example: rbacExamples,
|
||||
Short: "Submit cluster's Role-Based Access Control(RBAC)",
|
||||
Long: ``,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
RunE: func(_ *cobra.Command, args []string) error {
|
||||
|
||||
if err := flagValidationSubmit(submitInfo); err != nil {
|
||||
return err
|
||||
@@ -51,7 +52,7 @@ func getRBACCmd(ks meta.IKubescape, submitInfo *v1.Submit) *cobra.Command {
|
||||
}
|
||||
|
||||
if clusterConfig.GetAccountID() == "" {
|
||||
return fmt.Errorf("account ID is not set, run 'kubescape submit rbac --account <account-id>'")
|
||||
return fmt.Errorf("account ID is not set, run '%[1]s submit rbac --account <account-id>'", cautils.ExecName())
|
||||
}
|
||||
|
||||
// list RBAC
|
||||
@@ -66,7 +67,7 @@ func getRBACCmd(ks meta.IKubescape, submitInfo *v1.Submit) *cobra.Command {
|
||||
Reporter: r,
|
||||
}
|
||||
|
||||
if err := ks.Submit(submitInterfaces); err != nil {
|
||||
if err := ks.Submit(context.TODO(), submitInterfaces); err != nil {
|
||||
logger.L().Fatal(err.Error())
|
||||
}
|
||||
return nil
|
||||
|
||||
@@ -1,11 +1,13 @@
|
||||
package submit
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/kubescape/kubescape/v2/core/cautils"
|
||||
reporthandlingv2 "github.com/kubescape/opa-utils/reporthandling/v2"
|
||||
|
||||
logger "github.com/kubescape/go-logger"
|
||||
@@ -50,7 +52,7 @@ func (resultsObject *ResultsObject) ListAllResources() (map[string]workloadinter
|
||||
|
||||
func getResultsCmd(ks meta.IKubescape, submitInfo *v1.Submit) *cobra.Command {
|
||||
var resultsCmd = &cobra.Command{
|
||||
Use: "results <json file>\nExample:\n$ kubescape submit results path/to/results.json --format-version v2",
|
||||
Use: fmt.Sprintf("results <json file>\nExample:\n$ %[1]s submit results path/to/results.json --format-version v2", cautils.ExecName()),
|
||||
Short: "Submit a pre scanned results file. The file must be in json format",
|
||||
Long: ``,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
@@ -81,13 +83,13 @@ func getResultsCmd(ks meta.IKubescape, submitInfo *v1.Submit) *cobra.Command {
|
||||
Reporter: r,
|
||||
}
|
||||
|
||||
if err := ks.Submit(submitInterfaces); err != nil {
|
||||
if err := ks.Submit(context.TODO(), submitInterfaces); err != nil {
|
||||
logger.L().Fatal(err.Error())
|
||||
}
|
||||
return nil
|
||||
},
|
||||
}
|
||||
resultsCmd.PersistentFlags().StringVar(&formatVersion, "format-version", "v1", "Output object can be differnet between versions, this is for maintaining backward and forward compatibility. Supported:'v1'/'v2'")
|
||||
resultsCmd.PersistentFlags().StringVar(&formatVersion, "format-version", "v2", "Output object can be different between versions, this is for maintaining backward and forward compatibility. Supported:'v1'/'v2'")
|
||||
|
||||
return resultsCmd
|
||||
}
|
||||
|
||||
@@ -1,18 +1,21 @@
|
||||
package submit
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/kubescape/kubescape/v2/core/cautils"
|
||||
"github.com/kubescape/kubescape/v2/core/meta"
|
||||
metav1 "github.com/kubescape/kubescape/v2/core/meta/datastructures/v1"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var submitCmdExamples = `
|
||||
var submitCmdExamples = fmt.Sprintf(`
|
||||
# Submit Kubescape scan results file
|
||||
kubescape submit results
|
||||
%[1]s submit results
|
||||
|
||||
# Submit exceptions file to Kubescape SaaS
|
||||
kubescape submit exceptions
|
||||
`
|
||||
%[1]s submit exceptions
|
||||
`, cautils.ExecName())
|
||||
|
||||
func GetSubmitCmd(ks meta.IKubescape) *cobra.Command {
|
||||
var submitInfo metav1.Submit
|
||||
|
||||
@@ -5,6 +5,7 @@ package update
|
||||
// kubescape update
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os/exec"
|
||||
"runtime"
|
||||
|
||||
@@ -13,11 +14,17 @@ import (
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var updateCmdExamples = fmt.Sprintf(`
|
||||
# Update to the latest kubescape release
|
||||
%[1]s update
|
||||
`, cautils.ExecName())
|
||||
|
||||
func GetUpdateCmd() *cobra.Command {
|
||||
updateCmd := &cobra.Command{
|
||||
Use: "update",
|
||||
Short: "Update your version",
|
||||
Long: ``,
|
||||
Use: "update",
|
||||
Short: "Update your version",
|
||||
Long: ``,
|
||||
Example: updateCmdExamples,
|
||||
RunE: func(_ *cobra.Command, args []string) error {
|
||||
//Checking the user's version of kubescape to the latest release
|
||||
if cautils.BuildNumber == cautils.LatestReleaseVersion {
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package version
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
@@ -14,8 +15,9 @@ func GetVersionCmd() *cobra.Command {
|
||||
Short: "Get current version",
|
||||
Long: ``,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
v := cautils.NewIVersionCheckHandler()
|
||||
v.CheckLatestVersion(cautils.NewVersionCheckRequest(cautils.BuildNumber, "", "", "version"))
|
||||
ctx := context.TODO()
|
||||
v := cautils.NewIVersionCheckHandler(ctx)
|
||||
v.CheckLatestVersion(ctx, cautils.NewVersionCheckRequest(cautils.BuildNumber, "", "", "version"))
|
||||
fmt.Fprintf(os.Stdout,
|
||||
"Your current version is: %s [git enabled in build: %t]\n",
|
||||
cautils.BuildNumber,
|
||||
|
||||
@@ -70,7 +70,7 @@ type ITenantConfig interface {
|
||||
// set
|
||||
SetTenant() error
|
||||
UpdateCachedConfig() error
|
||||
DeleteCachedConfig() error
|
||||
DeleteCachedConfig(ctx context.Context) error
|
||||
|
||||
// getters
|
||||
GetContextName() string
|
||||
@@ -175,9 +175,9 @@ func (lc *LocalConfig) UpdateCachedConfig() error {
|
||||
return updateConfigFile(lc.configObj)
|
||||
}
|
||||
|
||||
func (lc *LocalConfig) DeleteCachedConfig() error {
|
||||
func (lc *LocalConfig) DeleteCachedConfig(ctx context.Context) error {
|
||||
if err := DeleteConfigFile(); err != nil {
|
||||
logger.L().Warning(err.Error())
|
||||
logger.L().Ctx(ctx).Warning(err.Error())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -330,12 +330,12 @@ func (c *ClusterConfig) UpdateCachedConfig() error {
|
||||
return updateConfigFile(c.configObj)
|
||||
}
|
||||
|
||||
func (c *ClusterConfig) DeleteCachedConfig() error {
|
||||
func (c *ClusterConfig) DeleteCachedConfig(ctx context.Context) error {
|
||||
if err := c.deleteConfigMap(); err != nil {
|
||||
logger.L().Warning(err.Error())
|
||||
logger.L().Ctx(ctx).Warning(err.Error())
|
||||
}
|
||||
if err := DeleteConfigFile(); err != nil {
|
||||
logger.L().Warning(err.Error())
|
||||
logger.L().Ctx(ctx).Warning(err.Error())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
package cautils
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/armosec/armoapi-go/armotypes"
|
||||
"github.com/kubescape/k8s-interface/workloadinterface"
|
||||
"github.com/kubescape/opa-utils/reporthandling"
|
||||
@@ -36,7 +38,7 @@ type OPASessionObj struct {
|
||||
OmitRawResources bool // omit raw resources from output
|
||||
}
|
||||
|
||||
func NewOPASessionObj(frameworks []reporthandling.Framework, k8sResources *K8SResources, scanInfo *ScanInfo) *OPASessionObj {
|
||||
func NewOPASessionObj(ctx context.Context, frameworks []reporthandling.Framework, k8sResources *K8SResources, scanInfo *ScanInfo) *OPASessionObj {
|
||||
return &OPASessionObj{
|
||||
Report: &reporthandlingv2.PostureReport{},
|
||||
Policies: frameworks,
|
||||
@@ -48,7 +50,7 @@ func NewOPASessionObj(frameworks []reporthandling.Framework, k8sResources *K8SRe
|
||||
ResourceToControlsMap: make(map[string][]string),
|
||||
ResourceSource: make(map[string]reporthandling.Source),
|
||||
SessionID: scanInfo.ScanID,
|
||||
Metadata: scanInfoToScanMetadata(scanInfo),
|
||||
Metadata: scanInfoToScanMetadata(ctx, scanInfo),
|
||||
OmitRawResources: scanInfo.OmitRawResources,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
|
||||
"github.com/armosec/utils-go/boolutils"
|
||||
"github.com/kubescape/opa-utils/reporthandling"
|
||||
"github.com/kubescape/opa-utils/reporthandling/apis"
|
||||
)
|
||||
|
||||
func NewPolicies() *Policies {
|
||||
@@ -29,7 +30,16 @@ func (policies *Policies) Set(frameworks []reporthandling.Framework, version str
|
||||
if len(compatibleRules) > 0 {
|
||||
frameworks[i].Controls[j].Rules = compatibleRules
|
||||
policies.Controls[frameworks[i].Controls[j].ControlID] = frameworks[i].Controls[j]
|
||||
} else { // if the control type is manual review, add it to the list of controls
|
||||
actionRequiredStr := frameworks[i].Controls[j].GetActionRequiredAttribute()
|
||||
if actionRequiredStr == "" {
|
||||
continue
|
||||
}
|
||||
if actionRequiredStr == string(apis.SubStatusManualReview) {
|
||||
policies.Controls[frameworks[i].Controls[j].ControlID] = frameworks[i].Controls[j]
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
spinnerpkg "github.com/briandowns/spinner"
|
||||
"github.com/fatih/color"
|
||||
"github.com/mattn/go-isatty"
|
||||
"github.com/schollz/progressbar/v3"
|
||||
)
|
||||
|
||||
var FailureDisplay = color.New(color.Bold, color.FgHiRed).FprintfFunc()
|
||||
@@ -39,3 +40,28 @@ func StopSpinner() {
|
||||
}
|
||||
spinner.Stop()
|
||||
}
|
||||
|
||||
type ProgressHandler struct {
|
||||
title string
|
||||
pb *progressbar.ProgressBar
|
||||
}
|
||||
|
||||
func NewProgressHandler(title string) *ProgressHandler {
|
||||
return &ProgressHandler{title: title}
|
||||
}
|
||||
|
||||
func (p *ProgressHandler) Start(allSteps int) {
|
||||
if isatty.IsTerminal(os.Stderr.Fd()) {
|
||||
p.pb = progressbar.Default(int64(allSteps), p.title)
|
||||
} else {
|
||||
p.pb = progressbar.DefaultSilent(int64(allSteps), p.title)
|
||||
}
|
||||
}
|
||||
|
||||
func (p *ProgressHandler) ProgressJob(step int, message string) {
|
||||
p.pb.Add(step)
|
||||
p.pb.Describe(message)
|
||||
}
|
||||
|
||||
func (p *ProgressHandler) Stop() {
|
||||
}
|
||||
|
||||
@@ -2,6 +2,7 @@ package cautils
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
@@ -31,7 +32,7 @@ const (
|
||||
)
|
||||
|
||||
// LoadResourcesFromHelmCharts scans a given path (recursively) for helm charts, renders the templates and returns a map of workloads and a map of chart names
|
||||
func LoadResourcesFromHelmCharts(basePath string) (map[string][]workloadinterface.IMetadata, map[string]string) {
|
||||
func LoadResourcesFromHelmCharts(ctx context.Context, basePath string) (map[string][]workloadinterface.IMetadata, map[string]string) {
|
||||
directories, _ := listDirs(basePath)
|
||||
helmDirectories := make([]string, 0)
|
||||
for _, dir := range directories {
|
||||
@@ -47,7 +48,7 @@ func LoadResourcesFromHelmCharts(basePath string) (map[string][]workloadinterfac
|
||||
if err == nil {
|
||||
wls, errs := chart.GetWorkloadsWithDefaultValues()
|
||||
if len(errs) > 0 {
|
||||
logger.L().Error(fmt.Sprintf("Rendering of Helm chart template '%s', failed: %v", chart.GetName(), errs))
|
||||
logger.L().Ctx(ctx).Error(fmt.Sprintf("Rendering of Helm chart template '%s', failed: %v", chart.GetName(), errs))
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -63,7 +64,7 @@ func LoadResourcesFromHelmCharts(basePath string) (map[string][]workloadinterfac
|
||||
|
||||
// If the contents at given path is a Kustomize Directory, LoadResourcesFromKustomizeDirectory will
|
||||
// generate yaml files using "Kustomize" & renders a map of workloads from those yaml files
|
||||
func LoadResourcesFromKustomizeDirectory(basePath string) (map[string][]workloadinterface.IMetadata, string) {
|
||||
func LoadResourcesFromKustomizeDirectory(ctx context.Context, basePath string) (map[string][]workloadinterface.IMetadata, string) {
|
||||
isKustomizeDirectory := IsKustomizeDirectory(basePath)
|
||||
isKustomizeFile := IsKustomizeFile(basePath)
|
||||
if ok := isKustomizeDirectory || isKustomizeFile; !ok {
|
||||
@@ -87,7 +88,7 @@ func LoadResourcesFromKustomizeDirectory(basePath string) (map[string][]workload
|
||||
kustomizeDirectoryName := GetKustomizeDirectoryName(newBasePath)
|
||||
|
||||
if len(errs) > 0 {
|
||||
logger.L().Error(fmt.Sprintf("Rendering yaml from Kustomize failed: %v", errs))
|
||||
logger.L().Ctx(ctx).Error(fmt.Sprintf("Rendering yaml from Kustomize failed: %v", errs))
|
||||
}
|
||||
|
||||
for k, v := range wls {
|
||||
@@ -96,10 +97,10 @@ func LoadResourcesFromKustomizeDirectory(basePath string) (map[string][]workload
|
||||
return sourceToWorkloads, kustomizeDirectoryName
|
||||
}
|
||||
|
||||
func LoadResourcesFromFiles(input, rootPath string) map[string][]workloadinterface.IMetadata {
|
||||
func LoadResourcesFromFiles(ctx context.Context, input, rootPath string) map[string][]workloadinterface.IMetadata {
|
||||
files, errs := listFiles(input)
|
||||
if len(errs) > 0 {
|
||||
logger.L().Error(fmt.Sprintf("%v", errs))
|
||||
logger.L().Ctx(ctx).Error(fmt.Sprintf("%v", errs))
|
||||
}
|
||||
if len(files) == 0 {
|
||||
return nil
|
||||
@@ -107,7 +108,7 @@ func LoadResourcesFromFiles(input, rootPath string) map[string][]workloadinterfa
|
||||
|
||||
workloads, errs := loadFiles(rootPath, files)
|
||||
if len(errs) > 0 {
|
||||
logger.L().Error(fmt.Sprintf("%v", errs))
|
||||
logger.L().Ctx(ctx).Error(fmt.Sprintf("%v", errs))
|
||||
}
|
||||
|
||||
return workloads
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package cautils
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
@@ -30,7 +31,7 @@ func TestListFiles(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestLoadResourcesFromFiles(t *testing.T) {
|
||||
workloads := LoadResourcesFromFiles(onlineBoutiquePath(), "")
|
||||
workloads := LoadResourcesFromFiles(context.TODO(), onlineBoutiquePath(), "")
|
||||
assert.Equal(t, 12, len(workloads))
|
||||
|
||||
for i, w := range workloads {
|
||||
@@ -44,7 +45,7 @@ func TestLoadResourcesFromFiles(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestLoadResourcesFromHelmCharts(t *testing.T) {
|
||||
sourceToWorkloads, sourceToChartName := LoadResourcesFromHelmCharts(helmChartPath())
|
||||
sourceToWorkloads, sourceToChartName := LoadResourcesFromHelmCharts(context.TODO(), helmChartPath())
|
||||
assert.Equal(t, 6, len(sourceToWorkloads))
|
||||
|
||||
for file, workloads := range sourceToWorkloads {
|
||||
|
||||
@@ -5,9 +5,10 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/armosec/armoapi-go/armotypes"
|
||||
"github.com/kubescape/opa-utils/gitregostore"
|
||||
"github.com/kubescape/opa-utils/reporthandling"
|
||||
"github.com/kubescape/opa-utils/reporthandling/attacktrack/v1alpha1"
|
||||
|
||||
"github.com/kubescape/regolibrary/gitregostore"
|
||||
)
|
||||
|
||||
// =======================================================================================================================
|
||||
@@ -105,19 +106,6 @@ func (drp *DownloadReleasedPolicy) SetRegoObjects() error {
|
||||
return drp.gs.SetRegoObjects()
|
||||
}
|
||||
|
||||
func isNativeFramework(framework string) bool {
|
||||
return contains(NativeFrameworks, framework)
|
||||
}
|
||||
|
||||
func contains(s []string, str string) bool {
|
||||
for _, v := range s {
|
||||
if strings.EqualFold(v, str) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (drp *DownloadReleasedPolicy) GetExceptions(clusterName string) ([]armotypes.PostureExceptionPolicy, error) {
|
||||
exceptions, err := drp.gs.GetSystemPostureExceptionPolicies()
|
||||
if err != nil {
|
||||
|
||||
170
core/cautils/getter/downloadreleasedpolicy_test.go
Normal file
170
core/cautils/getter/downloadreleasedpolicy_test.go
Normal file
@@ -0,0 +1,170 @@
|
||||
package getter
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/fs"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestReleasedPolicy(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
p := NewDownloadReleasedPolicy()
|
||||
|
||||
t.Run("should initialize objects", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// acquire from github or from local fixture
|
||||
hydrateReleasedPolicyFromMock(t, p)
|
||||
|
||||
require.NoError(t, p.SetRegoObjects())
|
||||
|
||||
t.Run("with ListControls", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
controlIDs, err := p.ListControls()
|
||||
require.NoError(t, err)
|
||||
require.NotEmpty(t, controlIDs)
|
||||
|
||||
sampleSize := min(len(controlIDs), 10)
|
||||
|
||||
for _, toPin := range controlIDs[:sampleSize] {
|
||||
// Example of a returned "ID": `C-0154|Ensure_that_the_--client-cert-auth_argument_is_set_to_true|`
|
||||
controlString := toPin
|
||||
parts := strings.Split(controlString, "|")
|
||||
controlID := parts[0]
|
||||
|
||||
t.Run(fmt.Sprintf("with GetControl(%q)", controlID), func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
ctrl, err := p.GetControl(controlID)
|
||||
require.NoError(t, err)
|
||||
require.NotEmpty(t, ctrl)
|
||||
require.Equal(t, controlID, ctrl.ControlID)
|
||||
})
|
||||
}
|
||||
|
||||
t.Run("with unknown GetControl()", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
ctrl, err := p.GetControl("zork")
|
||||
require.Error(t, err)
|
||||
require.Nil(t, ctrl)
|
||||
})
|
||||
})
|
||||
|
||||
t.Run("with GetFrameworks", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
frameworks, err := p.GetFrameworks()
|
||||
require.NoError(t, err)
|
||||
require.NotEmpty(t, frameworks)
|
||||
|
||||
for _, toPin := range frameworks {
|
||||
framework := toPin
|
||||
require.NotEmpty(t, framework)
|
||||
require.NotEmpty(t, framework.Name)
|
||||
|
||||
t.Run(fmt.Sprintf("with GetFramework(%q)", framework.Name), func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
fw, err := p.GetFramework(framework.Name)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, fw)
|
||||
|
||||
require.EqualValues(t, framework, *fw)
|
||||
})
|
||||
}
|
||||
|
||||
t.Run("with unknown GetFramework()", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
ctrl, err := p.GetFramework("zork")
|
||||
require.Error(t, err)
|
||||
require.Nil(t, ctrl)
|
||||
})
|
||||
|
||||
t.Run("with ListFrameworks", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
frameworkIDs, err := p.ListFrameworks()
|
||||
require.NoError(t, err)
|
||||
require.NotEmpty(t, frameworkIDs)
|
||||
|
||||
require.Len(t, frameworkIDs, len(frameworks))
|
||||
})
|
||||
|
||||
})
|
||||
|
||||
t.Run("with GetControlsInput", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
controlInputs, err := p.GetControlsInputs("") // NOTE: cluster name currently unused
|
||||
require.NoError(t, err)
|
||||
require.NotEmpty(t, controlInputs)
|
||||
})
|
||||
|
||||
t.Run("with GetAttackTracks", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
attackTracks, err := p.GetAttackTracks()
|
||||
require.NoError(t, err)
|
||||
require.NotEmpty(t, attackTracks)
|
||||
})
|
||||
|
||||
t.Run("with GetExceptions", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
exceptions, err := p.GetExceptions("") // NOTE: cluster name currently unused
|
||||
require.NoError(t, err)
|
||||
require.NotEmpty(t, exceptions)
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
func min(a, b int) int {
|
||||
if a > b {
|
||||
return b
|
||||
}
|
||||
|
||||
return a
|
||||
}
|
||||
|
||||
func hydrateReleasedPolicyFromMock(t testing.TB, p *DownloadReleasedPolicy) {
|
||||
regoFile := testRegoFile("policy")
|
||||
|
||||
if _, err := os.Stat(regoFile); errors.Is(err, fs.ErrNotExist) {
|
||||
// retrieve fixture from latest released policy from github.
|
||||
//
|
||||
// NOTE: to update the mock, just delete the testdata/policy.json file and run the tests again.
|
||||
t.Logf("updating fixture file %q from github", regoFile)
|
||||
|
||||
require.NoError(t, p.SetRegoObjects())
|
||||
require.NotNil(t, p.gs)
|
||||
|
||||
require.NoError(t,
|
||||
SaveInFile(p.gs, regoFile),
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// we have a mock fixture: load this rather than calling github
|
||||
t.Logf("populating rego policy from fixture file %q", regoFile)
|
||||
buf, err := os.ReadFile(regoFile)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t,
|
||||
json.Unmarshal(buf, p.gs),
|
||||
)
|
||||
}
|
||||
|
||||
func testRegoFile(framework string) string {
|
||||
return filepath.Join(currentDir(), "testdata", fmt.Sprintf("%s.json", framework))
|
||||
}
|
||||
@@ -6,7 +6,6 @@ import (
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
)
|
||||
@@ -23,7 +22,7 @@ func SaveInFile(policy interface{}, pathStr string) error {
|
||||
err = os.WriteFile(pathStr, encodedData, 0644) //nolint:gosec
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
pathDir := path.Dir(pathStr)
|
||||
pathDir := filepath.Dir(pathStr)
|
||||
// pathDir could contain subdirectories
|
||||
if erm := os.MkdirAll(pathDir, 0755); erm != nil {
|
||||
return erm
|
||||
|
||||
68
core/cautils/getter/getpoliciesutils_test.go
Normal file
68
core/cautils/getter/getpoliciesutils_test.go
Normal file
@@ -0,0 +1,68 @@
|
||||
package getter
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestGetDefaultPath(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
const name = "mine"
|
||||
|
||||
pth := GetDefaultPath(name)
|
||||
require.Equal(t, name, filepath.Base(pth))
|
||||
require.Equal(t, ".kubescape", filepath.Base(filepath.Dir(pth)))
|
||||
}
|
||||
|
||||
func TestSaveInFile(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
dir, err := os.MkdirTemp(".", "test")
|
||||
require.NoError(t, err)
|
||||
defer func() {
|
||||
_ = os.RemoveAll(dir)
|
||||
}()
|
||||
|
||||
policy := map[string]interface{}{
|
||||
"key": "value",
|
||||
"number": 1.00,
|
||||
}
|
||||
|
||||
t.Run("should save data as JSON (target folder exists)", func(t *testing.T) {
|
||||
target := filepath.Join(dir, "target.json")
|
||||
require.NoError(t, SaveInFile(policy, target))
|
||||
|
||||
buf, err := os.ReadFile(target)
|
||||
require.NoError(t, err)
|
||||
var retrieved interface{}
|
||||
require.NoError(t, json.Unmarshal(buf, &retrieved))
|
||||
|
||||
require.EqualValues(t, policy, retrieved)
|
||||
})
|
||||
|
||||
t.Run("should save data as JSON (new target folder)", func(t *testing.T) {
|
||||
target := filepath.Join(dir, "subdir", "target.json")
|
||||
require.NoError(t, SaveInFile(policy, target))
|
||||
|
||||
buf, err := os.ReadFile(target)
|
||||
require.NoError(t, err)
|
||||
var retrieved interface{}
|
||||
require.NoError(t, json.Unmarshal(buf, &retrieved))
|
||||
|
||||
require.EqualValues(t, policy, retrieved)
|
||||
})
|
||||
|
||||
t.Run("should error", func(t *testing.T) {
|
||||
badPolicy := map[string]interface{}{
|
||||
"key": "value",
|
||||
"number": 1.00,
|
||||
"err": func() {},
|
||||
}
|
||||
target := filepath.Join(dir, "error.json")
|
||||
require.Error(t, SaveInFile(badPolicy, target))
|
||||
})
|
||||
}
|
||||
273
core/cautils/getter/kscloudapi_mocks_test.go
Normal file
273
core/cautils/getter/kscloudapi_mocks_test.go
Normal file
@@ -0,0 +1,273 @@
|
||||
package getter
|
||||
|
||||
import (
|
||||
"github.com/armosec/armoapi-go/armotypes"
|
||||
"github.com/kubescape/opa-utils/reporthandling"
|
||||
"github.com/kubescape/opa-utils/reporthandling/attacktrack/v1alpha1"
|
||||
)
|
||||
|
||||
func mockAttackTracks() []v1alpha1.AttackTrack {
|
||||
return []v1alpha1.AttackTrack{
|
||||
{
|
||||
ApiVersion: "v1",
|
||||
Kind: "track",
|
||||
Metadata: map[string]interface{}{"label": "name"},
|
||||
Spec: v1alpha1.AttackTrackSpecification{
|
||||
Version: "v2",
|
||||
Description: "a mock",
|
||||
Data: v1alpha1.AttackTrackStep{
|
||||
Name: "track1",
|
||||
Description: "mock-step",
|
||||
SubSteps: []v1alpha1.AttackTrackStep{
|
||||
{
|
||||
Name: "track1",
|
||||
Description: "mock-step",
|
||||
Controls: []v1alpha1.IAttackTrackControl{
|
||||
mockControlPtr("control-1"),
|
||||
},
|
||||
},
|
||||
},
|
||||
Controls: []v1alpha1.IAttackTrackControl{
|
||||
mockControlPtr("control-2"),
|
||||
mockControlPtr("control-3"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
ApiVersion: "v1",
|
||||
Kind: "track",
|
||||
Metadata: map[string]interface{}{"label": "stuff"},
|
||||
Spec: v1alpha1.AttackTrackSpecification{
|
||||
Version: "v1",
|
||||
Description: "another mock",
|
||||
Data: v1alpha1.AttackTrackStep{
|
||||
Name: "track2",
|
||||
Description: "mock-step2",
|
||||
SubSteps: []v1alpha1.AttackTrackStep{
|
||||
{
|
||||
Name: "track3",
|
||||
Description: "mock-step",
|
||||
Controls: []v1alpha1.IAttackTrackControl{
|
||||
mockControlPtr("control-4"),
|
||||
},
|
||||
},
|
||||
},
|
||||
Controls: []v1alpha1.IAttackTrackControl{
|
||||
mockControlPtr("control-5"),
|
||||
mockControlPtr("control-6"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func mockFrameworks() []reporthandling.Framework {
|
||||
id1s := []string{"control-1", "control-2"}
|
||||
id2s := []string{"control-3", "control-4"}
|
||||
id3s := []string{"control-5", "control-6"}
|
||||
|
||||
return []reporthandling.Framework{
|
||||
{
|
||||
PortalBase: armotypes.PortalBase{
|
||||
Name: "mock-1",
|
||||
},
|
||||
CreationTime: "now",
|
||||
Description: "mock-1",
|
||||
Controls: []reporthandling.Control{
|
||||
mockControl("control-1"),
|
||||
mockControl("control-2"),
|
||||
},
|
||||
ControlsIDs: &id1s,
|
||||
SubSections: map[string]*reporthandling.FrameworkSubSection{
|
||||
"section1": {
|
||||
ID: "section-id",
|
||||
ControlIDs: id1s,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
PortalBase: armotypes.PortalBase{
|
||||
Name: "mock-2",
|
||||
},
|
||||
CreationTime: "then",
|
||||
Description: "mock-2",
|
||||
Controls: []reporthandling.Control{
|
||||
mockControl("control-3"),
|
||||
mockControl("control-4"),
|
||||
},
|
||||
ControlsIDs: &id2s,
|
||||
SubSections: map[string]*reporthandling.FrameworkSubSection{
|
||||
"section2": {
|
||||
ID: "section-id",
|
||||
ControlIDs: id2s,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
PortalBase: armotypes.PortalBase{
|
||||
Name: "nsa",
|
||||
},
|
||||
CreationTime: "tomorrow",
|
||||
Description: "nsa mock",
|
||||
Controls: []reporthandling.Control{
|
||||
mockControl("control-5"),
|
||||
mockControl("control-6"),
|
||||
},
|
||||
ControlsIDs: &id3s,
|
||||
SubSections: map[string]*reporthandling.FrameworkSubSection{
|
||||
"section2": {
|
||||
ID: "section-id",
|
||||
ControlIDs: id3s,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func mockControl(controlID string) reporthandling.Control {
|
||||
return reporthandling.Control{
|
||||
ControlID: controlID,
|
||||
}
|
||||
}
|
||||
func mockControlPtr(controlID string) *reporthandling.Control {
|
||||
val := mockControl(controlID)
|
||||
|
||||
return &val
|
||||
}
|
||||
|
||||
func mockExceptions() []armotypes.PostureExceptionPolicy {
|
||||
return []armotypes.PostureExceptionPolicy{
|
||||
{
|
||||
PolicyType: "postureExceptionPolicy",
|
||||
CreationTime: "now",
|
||||
Actions: []armotypes.PostureExceptionPolicyActions{
|
||||
"alertOnly",
|
||||
},
|
||||
Resources: []armotypes.PortalDesignator{
|
||||
{
|
||||
DesignatorType: "Attributes",
|
||||
Attributes: map[string]string{
|
||||
"kind": "Pod",
|
||||
"name": "coredns-[A-Za-z0-9]+-[A-Za-z0-9]+",
|
||||
"namespace": "kube-system",
|
||||
},
|
||||
},
|
||||
{
|
||||
DesignatorType: "Attributes",
|
||||
Attributes: map[string]string{
|
||||
"kind": "Pod",
|
||||
"name": "etcd-.*",
|
||||
"namespace": "kube-system",
|
||||
},
|
||||
},
|
||||
},
|
||||
PosturePolicies: []armotypes.PosturePolicy{
|
||||
{
|
||||
FrameworkName: "MITRE",
|
||||
ControlID: "C-.*",
|
||||
},
|
||||
{
|
||||
FrameworkName: "another-framework",
|
||||
ControlID: "a regexp",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
PolicyType: "postureExceptionPolicy",
|
||||
CreationTime: "then",
|
||||
Actions: []armotypes.PostureExceptionPolicyActions{
|
||||
"alertOnly",
|
||||
},
|
||||
Resources: []armotypes.PortalDesignator{
|
||||
{
|
||||
DesignatorType: "Attributes",
|
||||
Attributes: map[string]string{
|
||||
"kind": "Deployment",
|
||||
"name": "my-regexp",
|
||||
},
|
||||
},
|
||||
{
|
||||
DesignatorType: "Attributes",
|
||||
Attributes: map[string]string{
|
||||
"kind": "Secret",
|
||||
"name": "another-regexp",
|
||||
},
|
||||
},
|
||||
},
|
||||
PosturePolicies: []armotypes.PosturePolicy{
|
||||
{
|
||||
FrameworkName: "yet-another-framework",
|
||||
ControlID: "a regexp",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func mockTenantResponse() *TenantResponse {
|
||||
return &TenantResponse{
|
||||
TenantID: "id",
|
||||
Token: "token",
|
||||
Expires: "expiry-time",
|
||||
AdminMail: "admin@example.com",
|
||||
}
|
||||
}
|
||||
|
||||
func mockCustomerConfig(cluster, scope string) func() *armotypes.CustomerConfig {
|
||||
if cluster == "" {
|
||||
cluster = "my-cluster"
|
||||
}
|
||||
|
||||
if scope == "" {
|
||||
scope = "default"
|
||||
}
|
||||
|
||||
return func() *armotypes.CustomerConfig {
|
||||
return &armotypes.CustomerConfig{
|
||||
Name: "user",
|
||||
Attributes: map[string]interface{}{
|
||||
"label": "value",
|
||||
},
|
||||
Scope: armotypes.PortalDesignator{
|
||||
DesignatorType: "Attributes",
|
||||
Attributes: map[string]string{
|
||||
"kind": "Cluster",
|
||||
"name": cluster,
|
||||
"scope": scope,
|
||||
},
|
||||
},
|
||||
Settings: armotypes.Settings{
|
||||
PostureControlInputs: map[string][]string{
|
||||
"inputs-1": {"x1", "y2"},
|
||||
"inputs-2": {"x2", "y2"},
|
||||
},
|
||||
PostureScanConfig: armotypes.PostureScanConfig{
|
||||
ScanFrequency: armotypes.ScanFrequency("weekly"),
|
||||
},
|
||||
VulnerabilityScanConfig: armotypes.VulnerabilityScanConfig{
|
||||
ScanFrequency: armotypes.ScanFrequency("daily"),
|
||||
CriticalPriorityThreshold: 1,
|
||||
HighPriorityThreshold: 2,
|
||||
MediumPriorityThreshold: 3,
|
||||
ScanNewDeployment: true,
|
||||
AllowlistRegistries: []string{"a", "b"},
|
||||
BlocklistRegistries: []string{"c", "d"},
|
||||
},
|
||||
SlackConfigurations: armotypes.SlackSettings{
|
||||
Token: "slack-token",
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func mockLoginResponse() *FeLoginResponse {
|
||||
return &FeLoginResponse{
|
||||
Token: "access-token",
|
||||
RefreshToken: "refresh-token",
|
||||
Expires: "expiry-time",
|
||||
ExpiresIn: 123,
|
||||
}
|
||||
}
|
||||
1096
core/cautils/getter/kscloudapi_test.go
Normal file
1096
core/cautils/getter/kscloudapi_test.go
Normal file
File diff suppressed because it is too large
Load Diff
@@ -8,7 +8,7 @@ import (
|
||||
"strings"
|
||||
)
|
||||
|
||||
var NativeFrameworks = []string{"nsa", "mitre", "armobest", "devopsbest"}
|
||||
var NativeFrameworks = []string{"allcontrols", "nsa", "mitre"}
|
||||
|
||||
func (api *KSCloudAPI) getFrameworkURL(frameworkName string) string {
|
||||
u := url.URL{}
|
||||
@@ -184,3 +184,16 @@ func parseHost(host string) (string, string) {
|
||||
// default scheme
|
||||
return "https", strings.Replace(host, "https://", "", 1)
|
||||
}
|
||||
|
||||
func isNativeFramework(framework string) bool {
|
||||
return contains(NativeFrameworks, framework)
|
||||
}
|
||||
|
||||
func contains(s []string, str string) bool {
|
||||
for _, v := range s {
|
||||
if strings.EqualFold(v, str) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
@@ -386,7 +387,7 @@ func TestLoadPolicy(t *testing.T) {
|
||||
}
|
||||
|
||||
func testFrameworkFile(framework string) string {
|
||||
return filepath.Join(".", "testdata", fmt.Sprintf("%s.json", framework))
|
||||
return filepath.Join(currentDir(), "testdata", fmt.Sprintf("%s.json", framework))
|
||||
}
|
||||
|
||||
func writeTempJSONControlInputs(t testing.TB) (string, map[string][]string) {
|
||||
@@ -407,3 +408,9 @@ func writeTempJSONControlInputs(t testing.TB) (string, map[string][]string) {
|
||||
|
||||
return fileName, mock
|
||||
}
|
||||
|
||||
func currentDir() string {
|
||||
_, filename, _, _ := runtime.Caller(1)
|
||||
|
||||
return filepath.Dir(filename)
|
||||
}
|
||||
|
||||
25821
core/cautils/getter/testdata/policy.json
vendored
Normal file
25821
core/cautils/getter/testdata/policy.json
vendored
Normal file
File diff suppressed because one or more lines are too long
20
core/cautils/krewutils.go
Normal file
20
core/cautils/krewutils.go
Normal file
@@ -0,0 +1,20 @@
|
||||
package cautils
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// ExecName returns the correct name to use in examples depending on how kubescape is invoked
|
||||
func ExecName() string {
|
||||
n := "kubescape"
|
||||
if IsKrewPlugin() {
|
||||
return "kubectl " + n
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func IsKrewPlugin() bool {
|
||||
return strings.HasPrefix(filepath.Base(os.Args[0]), "kubectl-")
|
||||
}
|
||||
@@ -3,7 +3,6 @@ package cautils
|
||||
import (
|
||||
"github.com/kubescape/k8s-interface/workloadinterface"
|
||||
"github.com/kubescape/opa-utils/reporthandling"
|
||||
helpersv1 "github.com/kubescape/opa-utils/reporthandling/helpers/v1"
|
||||
"github.com/kubescape/opa-utils/reporthandling/results/v1/reportsummary"
|
||||
)
|
||||
|
||||
@@ -72,9 +71,9 @@ func controlReportV2ToV1(opaSessionObj *OPASessionObj, frameworkName string, con
|
||||
}
|
||||
|
||||
rulev1 := rulesv1[rulev2.GetName()]
|
||||
status := rulev2.GetStatus(&helpersv1.Filters{FrameworkNames: []string{frameworkName}})
|
||||
status := rulev2.GetStatus(nil)
|
||||
|
||||
if status.IsFailed() || status.IsExcluded() {
|
||||
if status.IsFailed() {
|
||||
|
||||
// rule response
|
||||
ruleResponse := reporthandling.RuleResponse{}
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package cautils
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
@@ -8,13 +9,12 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/armosec/armoapi-go/armotypes"
|
||||
apisv1 "github.com/kubescape/opa-utils/httpserver/apis/v1"
|
||||
|
||||
giturl "github.com/kubescape/go-git-url"
|
||||
"github.com/kubescape/go-logger"
|
||||
"github.com/kubescape/go-logger/helpers"
|
||||
"github.com/kubescape/k8s-interface/k8sinterface"
|
||||
"github.com/kubescape/kubescape/v2/core/cautils/getter"
|
||||
apisv1 "github.com/kubescape/opa-utils/httpserver/apis/v1"
|
||||
"github.com/kubescape/opa-utils/reporthandling"
|
||||
reporthandlingv2 "github.com/kubescape/opa-utils/reporthandling/v2"
|
||||
|
||||
@@ -112,7 +112,7 @@ type ScanInfo struct {
|
||||
View string // Display all of the input resources and not only failed resources
|
||||
Format string // Format results (table, json, junit ...)
|
||||
Output string // Store results in an output file, Output file name
|
||||
FormatVersion string // Output object can be differnet between versions, this is for testing and backward compatibility
|
||||
FormatVersion string // Output object can be different between versions, this is for testing and backward compatibility
|
||||
CustomClusterName string // Set the custom name of the cluster
|
||||
ExcludedNamespaces string // used for host scanner namespace
|
||||
IncludeNamespaces string //
|
||||
@@ -141,16 +141,16 @@ type Getters struct {
|
||||
AttackTracksGetter getter.IAttackTracksGetter
|
||||
}
|
||||
|
||||
func (scanInfo *ScanInfo) Init() {
|
||||
func (scanInfo *ScanInfo) Init(ctx context.Context) {
|
||||
scanInfo.setUseFrom()
|
||||
scanInfo.setUseArtifactsFrom()
|
||||
scanInfo.setUseArtifactsFrom(ctx)
|
||||
if scanInfo.ScanID == "" {
|
||||
scanInfo.ScanID = uuid.NewString()
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func (scanInfo *ScanInfo) setUseArtifactsFrom() {
|
||||
func (scanInfo *ScanInfo) setUseArtifactsFrom(ctx context.Context) {
|
||||
if scanInfo.UseArtifactsFrom == "" {
|
||||
return
|
||||
}
|
||||
@@ -164,7 +164,7 @@ func (scanInfo *ScanInfo) setUseArtifactsFrom() {
|
||||
// set frameworks files
|
||||
files, err := os.ReadDir(scanInfo.UseArtifactsFrom)
|
||||
if err != nil {
|
||||
logger.L().Fatal("failed to read files from directory", helpers.String("dir", scanInfo.UseArtifactsFrom), helpers.Error(err))
|
||||
logger.L().Ctx(ctx).Fatal("failed to read files from directory", helpers.String("dir", scanInfo.UseArtifactsFrom), helpers.Error(err))
|
||||
}
|
||||
framework := &reporthandling.Framework{}
|
||||
for _, f := range files {
|
||||
@@ -223,7 +223,7 @@ func (scanInfo *ScanInfo) contains(policyName string) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func scanInfoToScanMetadata(scanInfo *ScanInfo) *reporthandlingv2.Metadata {
|
||||
func scanInfoToScanMetadata(ctx context.Context, scanInfo *ScanInfo) *reporthandlingv2.Metadata {
|
||||
metadata := &reporthandlingv2.Metadata{}
|
||||
|
||||
metadata.ScanMetadata.Format = scanInfo.Format
|
||||
@@ -277,7 +277,7 @@ func scanInfoToScanMetadata(scanInfo *ScanInfo) *reporthandlingv2.Metadata {
|
||||
|
||||
}
|
||||
|
||||
setContextMetadata(&metadata.ContextMetadata, inputFiles)
|
||||
setContextMetadata(ctx, &metadata.ContextMetadata, inputFiles)
|
||||
|
||||
return metadata
|
||||
}
|
||||
@@ -321,7 +321,7 @@ func GetScanningContext(input string) ScanningContext {
|
||||
// dir/glob
|
||||
return ContextDir
|
||||
}
|
||||
func setContextMetadata(contextMetadata *reporthandlingv2.ContextMetadata, input string) {
|
||||
func setContextMetadata(ctx context.Context, contextMetadata *reporthandlingv2.ContextMetadata, input string) {
|
||||
switch GetScanningContext(input) {
|
||||
case ContextCluster:
|
||||
contextMetadata.ClusterContextMetadata = &reporthandlingv2.ClusterMetadata{
|
||||
@@ -331,7 +331,7 @@ func setContextMetadata(contextMetadata *reporthandlingv2.ContextMetadata, input
|
||||
// url
|
||||
context, err := metadataGitURL(input)
|
||||
if err != nil {
|
||||
logger.L().Warning("in setContextMetadata", helpers.Interface("case", ContextGitURL), helpers.Error(err))
|
||||
logger.L().Ctx(ctx).Warning("in setContextMetadata", helpers.Interface("case", ContextGitURL), helpers.Error(err))
|
||||
}
|
||||
contextMetadata.RepoContextMetadata = context
|
||||
case ContextDir:
|
||||
@@ -348,7 +348,7 @@ func setContextMetadata(contextMetadata *reporthandlingv2.ContextMetadata, input
|
||||
// local
|
||||
context, err := metadataGitLocal(input)
|
||||
if err != nil {
|
||||
logger.L().Warning("in setContextMetadata", helpers.Interface("case", ContextGitURL), helpers.Error(err))
|
||||
logger.L().Ctx(ctx).Warning("in setContextMetadata", helpers.Interface("case", ContextGitURL), helpers.Error(err))
|
||||
}
|
||||
contextMetadata.RepoContextMetadata = context
|
||||
}
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package cautils
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
reporthandlingv2 "github.com/kubescape/opa-utils/reporthandling/v2"
|
||||
@@ -10,7 +11,7 @@ import (
|
||||
func TestSetContextMetadata(t *testing.T) {
|
||||
{
|
||||
ctx := reporthandlingv2.ContextMetadata{}
|
||||
setContextMetadata(&ctx, "")
|
||||
setContextMetadata(context.TODO(), &ctx, "")
|
||||
|
||||
assert.NotNil(t, ctx.ClusterContextMetadata)
|
||||
assert.Nil(t, ctx.DirectoryContextMetadata)
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package cautils
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
@@ -10,7 +11,7 @@ import (
|
||||
logger "github.com/kubescape/go-logger"
|
||||
"github.com/kubescape/go-logger/helpers"
|
||||
"github.com/kubescape/kubescape/v2/core/cautils/getter"
|
||||
|
||||
"go.opentelemetry.io/otel"
|
||||
"golang.org/x/mod/semver"
|
||||
)
|
||||
|
||||
@@ -25,12 +26,12 @@ var LatestReleaseVersion string
|
||||
const UnknownBuildNumber = "unknown"
|
||||
|
||||
type IVersionCheckHandler interface {
|
||||
CheckLatestVersion(*VersionCheckRequest) error
|
||||
CheckLatestVersion(context.Context, *VersionCheckRequest) error
|
||||
}
|
||||
|
||||
func NewIVersionCheckHandler() IVersionCheckHandler {
|
||||
func NewIVersionCheckHandler(ctx context.Context) IVersionCheckHandler {
|
||||
if BuildNumber == "" {
|
||||
logger.L().Warning("unknown build number, this might affect your scan results. Please make sure you are updated to latest version")
|
||||
logger.L().Ctx(ctx).Warning("unknown build number, this might affect your scan results. Please make sure you are updated to latest version")
|
||||
}
|
||||
|
||||
if v, ok := os.LookupEnv(CLIENT_ENV); ok && v != "" {
|
||||
@@ -98,15 +99,17 @@ func NewVersionCheckRequest(buildNumber, frameworkName, frameworkVersion, scanni
|
||||
}
|
||||
}
|
||||
|
||||
func (v *VersionCheckHandlerMock) CheckLatestVersion(versionData *VersionCheckRequest) error {
|
||||
func (v *VersionCheckHandlerMock) CheckLatestVersion(_ context.Context, _ *VersionCheckRequest) error {
|
||||
logger.L().Info("Skipping version check")
|
||||
return nil
|
||||
}
|
||||
|
||||
func (v *VersionCheckHandler) CheckLatestVersion(versionData *VersionCheckRequest) error {
|
||||
func (v *VersionCheckHandler) CheckLatestVersion(ctx context.Context, versionData *VersionCheckRequest) error {
|
||||
ctx, span := otel.Tracer("").Start(ctx, "versionCheckHandler.CheckLatestVersion")
|
||||
defer span.End()
|
||||
defer func() {
|
||||
if err := recover(); err != nil {
|
||||
logger.L().Warning("failed to get latest version", helpers.Interface("error", err))
|
||||
logger.L().Ctx(ctx).Warning("failed to get latest version", helpers.Interface("error", err))
|
||||
}
|
||||
}()
|
||||
|
||||
@@ -119,7 +122,7 @@ func (v *VersionCheckHandler) CheckLatestVersion(versionData *VersionCheckReques
|
||||
|
||||
if latestVersion.ClientUpdate != "" {
|
||||
if BuildNumber != "" && semver.Compare(BuildNumber, LatestReleaseVersion) == -1 {
|
||||
logger.L().Warning(warningMessage(LatestReleaseVersion))
|
||||
logger.L().Ctx(ctx).Warning(warningMessage(LatestReleaseVersion))
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/kubescape/k8s-interface/cloudsupport"
|
||||
cloudapis "github.com/kubescape/k8s-interface/cloudsupport/apis"
|
||||
"github.com/kubescape/opa-utils/reporthandling/apis"
|
||||
)
|
||||
|
||||
@@ -20,9 +21,12 @@ var (
|
||||
"KubeProxyInfo",
|
||||
"ControlPlaneInfo",
|
||||
"CloudProviderInfo",
|
||||
"CNIInfo",
|
||||
}
|
||||
CloudResources = []string{
|
||||
"ClusterDescribe",
|
||||
cloudapis.CloudProviderDescribeKind,
|
||||
cloudapis.CloudProviderDescribeRepositoriesKind,
|
||||
cloudapis.CloudProviderListEntitiesForPoliciesKind,
|
||||
string(cloudsupport.TypeApiServerInfo),
|
||||
}
|
||||
)
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package core
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
metav1 "github.com/kubescape/kubescape/v2/core/meta/datastructures/v1"
|
||||
@@ -42,8 +43,8 @@ func (ks *Kubescape) ViewCachedConfig(viewConfig *metav1.ViewConfig) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ks *Kubescape) DeleteCachedConfig(deleteConfig *metav1.DeleteConfig) error {
|
||||
func (ks *Kubescape) DeleteCachedConfig(ctx context.Context, deleteConfig *metav1.DeleteConfig) error {
|
||||
|
||||
tenant := getTenantConfig(nil, "", "", getKubernetesApi()) // change k8sinterface
|
||||
return tenant.DeleteCachedConfig()
|
||||
return tenant.DeleteCachedConfig(ctx)
|
||||
}
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package core
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
@@ -21,7 +22,7 @@ const (
|
||||
TargetAttackTracks = "attack-tracks"
|
||||
)
|
||||
|
||||
var downloadFunc = map[string]func(*metav1.DownloadInfo) error{
|
||||
var downloadFunc = map[string]func(context.Context, *metav1.DownloadInfo) error{
|
||||
TargetControlsInputs: downloadConfigInputs,
|
||||
TargetExceptions: downloadExceptions,
|
||||
TargetControl: downloadControl,
|
||||
@@ -38,20 +39,20 @@ func DownloadSupportCommands() []string {
|
||||
return commands
|
||||
}
|
||||
|
||||
func (ks *Kubescape) Download(downloadInfo *metav1.DownloadInfo) error {
|
||||
func (ks *Kubescape) Download(ctx context.Context, downloadInfo *metav1.DownloadInfo) error {
|
||||
setPathandFilename(downloadInfo)
|
||||
if err := os.MkdirAll(downloadInfo.Path, os.ModePerm); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := downloadArtifact(downloadInfo, downloadFunc); err != nil {
|
||||
if err := downloadArtifact(ctx, downloadInfo, downloadFunc); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func downloadArtifact(downloadInfo *metav1.DownloadInfo, downloadArtifactFunc map[string]func(*metav1.DownloadInfo) error) error {
|
||||
func downloadArtifact(ctx context.Context, downloadInfo *metav1.DownloadInfo, downloadArtifactFunc map[string]func(context.Context, *metav1.DownloadInfo) error) error {
|
||||
if f, ok := downloadArtifactFunc[downloadInfo.Target]; ok {
|
||||
if err := f(downloadInfo); err != nil {
|
||||
if err := f(ctx, downloadInfo); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
@@ -73,26 +74,26 @@ func setPathandFilename(downloadInfo *metav1.DownloadInfo) {
|
||||
}
|
||||
}
|
||||
|
||||
func downloadArtifacts(downloadInfo *metav1.DownloadInfo) error {
|
||||
func downloadArtifacts(ctx context.Context, downloadInfo *metav1.DownloadInfo) error {
|
||||
downloadInfo.FileName = ""
|
||||
var artifacts = map[string]func(*metav1.DownloadInfo) error{
|
||||
var artifacts = map[string]func(context.Context, *metav1.DownloadInfo) error{
|
||||
"controls-inputs": downloadConfigInputs,
|
||||
"exceptions": downloadExceptions,
|
||||
"framework": downloadFramework,
|
||||
"attack-tracks": downloadAttackTracks,
|
||||
}
|
||||
for artifact := range artifacts {
|
||||
if err := downloadArtifact(&metav1.DownloadInfo{Target: artifact, Path: downloadInfo.Path, FileName: fmt.Sprintf("%s.json", artifact)}, artifacts); err != nil {
|
||||
logger.L().Error("error downloading", helpers.String("artifact", artifact), helpers.Error(err))
|
||||
if err := downloadArtifact(ctx, &metav1.DownloadInfo{Target: artifact, Path: downloadInfo.Path, FileName: fmt.Sprintf("%s.json", artifact)}, artifacts); err != nil {
|
||||
logger.L().Ctx(ctx).Error("error downloading", helpers.String("artifact", artifact), helpers.Error(err))
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func downloadConfigInputs(downloadInfo *metav1.DownloadInfo) error {
|
||||
func downloadConfigInputs(ctx context.Context, downloadInfo *metav1.DownloadInfo) error {
|
||||
tenant := getTenantConfig(&downloadInfo.Credentials, "", "", getKubernetesApi())
|
||||
|
||||
controlsInputsGetter := getConfigInputsGetter(downloadInfo.Identifier, tenant.GetAccountID(), nil)
|
||||
controlsInputsGetter := getConfigInputsGetter(ctx, downloadInfo.Identifier, tenant.GetAccountID(), nil)
|
||||
controlInputs, err := controlsInputsGetter.GetControlsInputs(tenant.GetContextName())
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -112,9 +113,9 @@ func downloadConfigInputs(downloadInfo *metav1.DownloadInfo) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func downloadExceptions(downloadInfo *metav1.DownloadInfo) error {
|
||||
func downloadExceptions(ctx context.Context, downloadInfo *metav1.DownloadInfo) error {
|
||||
tenant := getTenantConfig(&downloadInfo.Credentials, "", "", getKubernetesApi())
|
||||
exceptionsGetter := getExceptionsGetter("", tenant.GetAccountID(), nil)
|
||||
exceptionsGetter := getExceptionsGetter(ctx, "", tenant.GetAccountID(), nil)
|
||||
|
||||
exceptions, err := exceptionsGetter.GetExceptions(tenant.GetContextName())
|
||||
if err != nil {
|
||||
@@ -129,15 +130,15 @@ func downloadExceptions(downloadInfo *metav1.DownloadInfo) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
logger.L().Success("Downloaded", helpers.String("artifact", downloadInfo.Target), helpers.String("path", filepath.Join(downloadInfo.Path, downloadInfo.FileName)))
|
||||
logger.L().Ctx(ctx).Success("Downloaded", helpers.String("artifact", downloadInfo.Target), helpers.String("path", filepath.Join(downloadInfo.Path, downloadInfo.FileName)))
|
||||
return nil
|
||||
}
|
||||
|
||||
func downloadAttackTracks(downloadInfo *metav1.DownloadInfo) error {
|
||||
func downloadAttackTracks(ctx context.Context, downloadInfo *metav1.DownloadInfo) error {
|
||||
var err error
|
||||
tenant := getTenantConfig(&downloadInfo.Credentials, "", "", getKubernetesApi())
|
||||
|
||||
attackTracksGetter := getAttackTracksGetter("", tenant.GetAccountID(), nil)
|
||||
attackTracksGetter := getAttackTracksGetter(ctx, "", tenant.GetAccountID(), nil)
|
||||
|
||||
attackTracks, err := attackTracksGetter.GetAttackTracks()
|
||||
if err != nil {
|
||||
@@ -157,11 +158,11 @@ func downloadAttackTracks(downloadInfo *metav1.DownloadInfo) error {
|
||||
|
||||
}
|
||||
|
||||
func downloadFramework(downloadInfo *metav1.DownloadInfo) error {
|
||||
func downloadFramework(ctx context.Context, downloadInfo *metav1.DownloadInfo) error {
|
||||
|
||||
tenant := getTenantConfig(&downloadInfo.Credentials, "", "", getKubernetesApi())
|
||||
|
||||
g := getPolicyGetter(nil, tenant.GetTenantEmail(), true, nil)
|
||||
g := getPolicyGetter(ctx, nil, tenant.GetTenantEmail(), true, nil)
|
||||
|
||||
if downloadInfo.Identifier == "" {
|
||||
// if framework name not specified - download all frameworks
|
||||
@@ -199,11 +200,11 @@ func downloadFramework(downloadInfo *metav1.DownloadInfo) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func downloadControl(downloadInfo *metav1.DownloadInfo) error {
|
||||
func downloadControl(ctx context.Context, downloadInfo *metav1.DownloadInfo) error {
|
||||
|
||||
tenant := getTenantConfig(&downloadInfo.Credentials, "", "", getKubernetesApi())
|
||||
|
||||
g := getPolicyGetter(nil, tenant.GetTenantEmail(), false, nil)
|
||||
g := getPolicyGetter(ctx, nil, tenant.GetTenantEmail(), false, nil)
|
||||
|
||||
if downloadInfo.Identifier == "" {
|
||||
// TODO - support
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package core
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
@@ -14,14 +15,14 @@ const NoChangesApplied = "No changes were applied."
|
||||
const NoResourcesToFix = "No issues to fix."
|
||||
const ConfirmationQuestion = "Would you like to apply the changes to the files above? [y|n]: "
|
||||
|
||||
func (ks *Kubescape) Fix(fixInfo *metav1.FixInfo) error {
|
||||
func (ks *Kubescape) Fix(ctx context.Context, fixInfo *metav1.FixInfo) error {
|
||||
logger.L().Info("Reading report file...")
|
||||
handler, err := fixhandler.NewFixHandler(fixInfo)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
resourcesToFix := handler.PrepareResourcesToFix()
|
||||
resourcesToFix := handler.PrepareResourcesToFix(ctx)
|
||||
|
||||
if len(resourcesToFix) == 0 {
|
||||
logger.L().Info(NoResourcesToFix)
|
||||
@@ -40,12 +41,12 @@ func (ks *Kubescape) Fix(fixInfo *metav1.FixInfo) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
updatedFilesCount, errors := handler.ApplyChanges(resourcesToFix)
|
||||
updatedFilesCount, errors := handler.ApplyChanges(ctx, resourcesToFix)
|
||||
logger.L().Info(fmt.Sprintf("Fixed resources in %d files.", updatedFilesCount))
|
||||
|
||||
if len(errors) > 0 {
|
||||
for _, err := range errors {
|
||||
logger.L().Error(err.Error())
|
||||
logger.L().Ctx(ctx).Error(err.Error())
|
||||
}
|
||||
return fmt.Errorf("Failed to fix some resources, check the logs for more details")
|
||||
}
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package core
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
@@ -15,6 +16,7 @@ import (
|
||||
printerv2 "github.com/kubescape/kubescape/v2/core/pkg/resultshandling/printer/v2"
|
||||
"github.com/kubescape/kubescape/v2/core/pkg/resultshandling/reporter"
|
||||
reporterv2 "github.com/kubescape/kubescape/v2/core/pkg/resultshandling/reporter/v2"
|
||||
"go.opentelemetry.io/otel"
|
||||
|
||||
"github.com/google/uuid"
|
||||
|
||||
@@ -35,7 +37,7 @@ func getTenantConfig(credentials *cautils.Credentials, clusterName string, custo
|
||||
return cautils.NewClusterConfig(k8s, getter.GetKSCloudAPIConnector(), credentials, clusterName, customClusterName)
|
||||
}
|
||||
|
||||
func getExceptionsGetter(useExceptions string, accountID string, downloadReleasedPolicy *getter.DownloadReleasedPolicy) getter.IExceptionsGetter {
|
||||
func getExceptionsGetter(ctx context.Context, useExceptions string, accountID string, downloadReleasedPolicy *getter.DownloadReleasedPolicy) getter.IExceptionsGetter {
|
||||
if useExceptions != "" {
|
||||
// load exceptions from file
|
||||
return getter.NewLoadPolicy([]string{useExceptions})
|
||||
@@ -49,7 +51,7 @@ func getExceptionsGetter(useExceptions string, accountID string, downloadRelease
|
||||
downloadReleasedPolicy = getter.NewDownloadReleasedPolicy()
|
||||
}
|
||||
if err := downloadReleasedPolicy.SetRegoObjects(); err != nil { // if failed to pull attack tracks, fallback to cache
|
||||
logger.L().Warning("failed to get exceptions from github release, loading attack tracks from cache", helpers.Error(err))
|
||||
logger.L().Ctx(ctx).Warning("failed to get exceptions from github release, loading attack tracks from cache", helpers.Error(err))
|
||||
return getter.NewLoadPolicy([]string{getter.GetDefaultPath(cautils.LocalExceptionsFilename)})
|
||||
}
|
||||
return downloadReleasedPolicy
|
||||
@@ -63,7 +65,9 @@ func getRBACHandler(tenantConfig cautils.ITenantConfig, k8s *k8sinterface.Kubern
|
||||
return nil
|
||||
}
|
||||
|
||||
func getReporter(tenantConfig cautils.ITenantConfig, reportID string, submit, fwScan bool, scanningContext cautils.ScanningContext) reporter.IReport {
|
||||
func getReporter(ctx context.Context, tenantConfig cautils.ITenantConfig, reportID string, submit, fwScan bool, scanningContext cautils.ScanningContext) reporter.IReport {
|
||||
ctx, span := otel.Tracer("").Start(ctx, "getReporter")
|
||||
defer span.End()
|
||||
if submit {
|
||||
submitData := reporterv2.SubmitContextScan
|
||||
if scanningContext != cautils.ContextCluster {
|
||||
@@ -83,17 +87,19 @@ func getReporter(tenantConfig cautils.ITenantConfig, reportID string, submit, fw
|
||||
return reporterv2.NewReportMock("", message)
|
||||
}
|
||||
|
||||
func getResourceHandler(scanInfo *cautils.ScanInfo, tenantConfig cautils.ITenantConfig, k8s *k8sinterface.KubernetesApi, hostSensorHandler hostsensorutils.IHostSensor, registryAdaptors *resourcehandler.RegistryAdaptors) resourcehandler.IResourceHandler {
|
||||
func getResourceHandler(ctx context.Context, scanInfo *cautils.ScanInfo, tenantConfig cautils.ITenantConfig, k8s *k8sinterface.KubernetesApi, hostSensorHandler hostsensorutils.IHostSensor, registryAdaptors *resourcehandler.RegistryAdaptors) resourcehandler.IResourceHandler {
|
||||
ctx, span := otel.Tracer("").Start(ctx, "getResourceHandler")
|
||||
defer span.End()
|
||||
if len(scanInfo.InputPatterns) > 0 || k8s == nil {
|
||||
// scanInfo.HostSensor.SetBool(false)
|
||||
return resourcehandler.NewFileResourceHandler(scanInfo.InputPatterns, registryAdaptors)
|
||||
return resourcehandler.NewFileResourceHandler(ctx, scanInfo.InputPatterns, registryAdaptors)
|
||||
}
|
||||
getter.GetKSCloudAPIConnector()
|
||||
rbacObjects := getRBACHandler(tenantConfig, k8s, scanInfo.Submit)
|
||||
return resourcehandler.NewK8sResourceHandler(k8s, getFieldSelector(scanInfo), hostSensorHandler, rbacObjects, registryAdaptors)
|
||||
}
|
||||
|
||||
func getHostSensorHandler(scanInfo *cautils.ScanInfo, k8s *k8sinterface.KubernetesApi) hostsensorutils.IHostSensor {
|
||||
func getHostSensorHandler(ctx context.Context, scanInfo *cautils.ScanInfo, k8s *k8sinterface.KubernetesApi) hostsensorutils.IHostSensor {
|
||||
if !k8sinterface.IsConnectedToCluster() || k8s == nil {
|
||||
return &hostsensorutils.HostSensorHandlerMock{}
|
||||
}
|
||||
@@ -102,12 +108,11 @@ func getHostSensorHandler(scanInfo *cautils.ScanInfo, k8s *k8sinterface.Kubernet
|
||||
// we need to determined which controls needs host scanner
|
||||
if scanInfo.HostSensorEnabled.Get() == nil && hasHostSensorControls {
|
||||
scanInfo.HostSensorEnabled.SetBool(false) // default - do not run host scanner
|
||||
logger.L().Warning("Kubernetes cluster nodes scanning is disabled. This is required to collect valuable data for certain controls. You can enable it using the --enable-host-scan flag")
|
||||
}
|
||||
if hostSensorVal := scanInfo.HostSensorEnabled.Get(); hostSensorVal != nil && *hostSensorVal {
|
||||
hostSensorHandler, err := hostsensorutils.NewHostSensorHandler(k8s, scanInfo.HostSensorYamlPath)
|
||||
if err != nil {
|
||||
logger.L().Warning(fmt.Sprintf("failed to create host scanner: %s", err.Error()))
|
||||
logger.L().Ctx(ctx).Warning(fmt.Sprintf("failed to create host scanner: %s", err.Error()))
|
||||
return &hostsensorutils.HostSensorHandlerMock{}
|
||||
}
|
||||
return hostSensorHandler
|
||||
@@ -189,7 +194,7 @@ func setSubmitBehavior(scanInfo *cautils.ScanInfo, tenantConfig cautils.ITenantC
|
||||
}
|
||||
|
||||
// setPolicyGetter set the policy getter - local file/github release/Kubescape Cloud API
|
||||
func getPolicyGetter(loadPoliciesFromFile []string, tenantEmail string, frameworkScope bool, downloadReleasedPolicy *getter.DownloadReleasedPolicy) getter.IPolicyGetter {
|
||||
func getPolicyGetter(ctx context.Context, loadPoliciesFromFile []string, tenantEmail string, frameworkScope bool, downloadReleasedPolicy *getter.DownloadReleasedPolicy) getter.IPolicyGetter {
|
||||
if len(loadPoliciesFromFile) > 0 {
|
||||
return getter.NewLoadPolicy(loadPoliciesFromFile)
|
||||
}
|
||||
@@ -200,12 +205,12 @@ func getPolicyGetter(loadPoliciesFromFile []string, tenantEmail string, framewor
|
||||
if downloadReleasedPolicy == nil {
|
||||
downloadReleasedPolicy = getter.NewDownloadReleasedPolicy()
|
||||
}
|
||||
return getDownloadReleasedPolicy(downloadReleasedPolicy)
|
||||
return getDownloadReleasedPolicy(ctx, downloadReleasedPolicy)
|
||||
|
||||
}
|
||||
|
||||
// setConfigInputsGetter sets the config input getter - local file/github release/Kubescape Cloud API
|
||||
func getConfigInputsGetter(ControlsInputs string, accountID string, downloadReleasedPolicy *getter.DownloadReleasedPolicy) getter.IControlsInputsGetter {
|
||||
func getConfigInputsGetter(ctx context.Context, ControlsInputs string, accountID string, downloadReleasedPolicy *getter.DownloadReleasedPolicy) getter.IControlsInputsGetter {
|
||||
if len(ControlsInputs) > 0 {
|
||||
return getter.NewLoadPolicy([]string{ControlsInputs})
|
||||
}
|
||||
@@ -217,14 +222,14 @@ func getConfigInputsGetter(ControlsInputs string, accountID string, downloadRele
|
||||
downloadReleasedPolicy = getter.NewDownloadReleasedPolicy()
|
||||
}
|
||||
if err := downloadReleasedPolicy.SetRegoObjects(); err != nil { // if failed to pull config inputs, fallback to BE
|
||||
logger.L().Warning("failed to get config inputs from github release, this may affect the scanning results", helpers.Error(err))
|
||||
logger.L().Ctx(ctx).Warning("failed to get config inputs from github release, this may affect the scanning results", helpers.Error(err))
|
||||
}
|
||||
return downloadReleasedPolicy
|
||||
}
|
||||
|
||||
func getDownloadReleasedPolicy(downloadReleasedPolicy *getter.DownloadReleasedPolicy) getter.IPolicyGetter {
|
||||
func getDownloadReleasedPolicy(ctx context.Context, downloadReleasedPolicy *getter.DownloadReleasedPolicy) getter.IPolicyGetter {
|
||||
if err := downloadReleasedPolicy.SetRegoObjects(); err != nil { // if failed to pull policy, fallback to cache
|
||||
logger.L().Warning("failed to get policies from github release, loading policies from cache", helpers.Error(err))
|
||||
logger.L().Ctx(ctx).Warning("failed to get policies from github release, loading policies from cache", helpers.Error(err))
|
||||
return getter.NewLoadPolicy(getDefaultFrameworksPaths())
|
||||
} else {
|
||||
return downloadReleasedPolicy
|
||||
@@ -247,7 +252,7 @@ func listFrameworksNames(policyGetter getter.IPolicyGetter) []string {
|
||||
return getter.NativeFrameworks
|
||||
}
|
||||
|
||||
func getAttackTracksGetter(attackTracks, accountID string, downloadReleasedPolicy *getter.DownloadReleasedPolicy) getter.IAttackTracksGetter {
|
||||
func getAttackTracksGetter(ctx context.Context, attackTracks, accountID string, downloadReleasedPolicy *getter.DownloadReleasedPolicy) getter.IAttackTracksGetter {
|
||||
if len(attackTracks) > 0 {
|
||||
return getter.NewLoadPolicy([]string{attackTracks})
|
||||
}
|
||||
@@ -260,18 +265,23 @@ func getAttackTracksGetter(attackTracks, accountID string, downloadReleasedPolic
|
||||
}
|
||||
|
||||
if err := downloadReleasedPolicy.SetRegoObjects(); err != nil { // if failed to pull attack tracks, fallback to cache
|
||||
logger.L().Warning("failed to get attack tracks from github release, loading attack tracks from cache", helpers.Error(err))
|
||||
logger.L().Ctx(ctx).Warning("failed to get attack tracks from github release, loading attack tracks from cache", helpers.Error(err))
|
||||
return getter.NewLoadPolicy([]string{getter.GetDefaultPath(cautils.LocalAttackTracksFilename)})
|
||||
}
|
||||
return downloadReleasedPolicy
|
||||
}
|
||||
|
||||
// getUIPrinter returns a printer that will be used to print to the program’s UI (terminal)
|
||||
func getUIPrinter(verboseMode bool, formatVersion string, attackTree bool, viewType cautils.ViewTypes) printer.IPrinter {
|
||||
p := printerv2.NewPrettyPrinter(verboseMode, formatVersion, attackTree, viewType)
|
||||
func getUIPrinter(ctx context.Context, verboseMode bool, formatVersion string, attackTree bool, viewType cautils.ViewTypes) printer.IPrinter {
|
||||
var p printer.IPrinter
|
||||
if helpers.ToLevel(logger.L().GetLevel()) >= helpers.WarningLevel {
|
||||
p = &printerv2.SilentPrinter{}
|
||||
} else {
|
||||
p = printerv2.NewPrettyPrinter(verboseMode, formatVersion, attackTree, viewType)
|
||||
|
||||
// Since the UI of the program is a CLI (Stdout), it means that it should always print to Stdout
|
||||
p.SetWriter(os.Stdout.Name())
|
||||
// Since the UI of the program is a CLI (Stdout), it means that it should always print to Stdout
|
||||
p.SetWriter(ctx, os.Stdout.Name())
|
||||
}
|
||||
|
||||
return p
|
||||
}
|
||||
|
||||
@@ -1,10 +1,14 @@
|
||||
package core
|
||||
|
||||
import (
|
||||
"context"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/kubescape/go-logger"
|
||||
"github.com/kubescape/go-logger/helpers"
|
||||
"github.com/kubescape/kubescape/v2/core/cautils"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func Test_getUIPrinter(t *testing.T) {
|
||||
@@ -13,27 +17,91 @@ func Test_getUIPrinter(t *testing.T) {
|
||||
VerboseMode: true,
|
||||
View: "control",
|
||||
}
|
||||
wantFormatVersion := scanInfo.FormatVersion
|
||||
wantVerboseMode := scanInfo.VerboseMode
|
||||
wantViewType := cautils.ViewTypes(scanInfo.View)
|
||||
|
||||
got := getUIPrinter(scanInfo.VerboseMode, scanInfo.FormatVersion, scanInfo.PrintAttackTree, cautils.ViewTypes(scanInfo.View))
|
||||
|
||||
gotValue := reflect.ValueOf(got).Elem()
|
||||
gotFormatVersion := gotValue.FieldByName("formatVersion").String()
|
||||
gotVerboseMode := gotValue.FieldByName("verboseMode").Bool()
|
||||
gotViewType := cautils.ViewTypes(gotValue.FieldByName("viewType").String())
|
||||
|
||||
if gotFormatVersion != wantFormatVersion {
|
||||
t.Errorf("Got: %s, want: %s", gotFormatVersion, wantFormatVersion)
|
||||
type args struct {
|
||||
ctx context.Context
|
||||
formatVersion string
|
||||
viewType cautils.ViewTypes
|
||||
verboseMode bool
|
||||
printAttack bool
|
||||
loggerLevel helpers.Level
|
||||
}
|
||||
type wantTypes struct {
|
||||
structType string
|
||||
formatVersion string
|
||||
viewType cautils.ViewTypes
|
||||
verboseMode bool
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
want wantTypes
|
||||
testAllFields bool
|
||||
}{
|
||||
{
|
||||
name: "Test getUIPrinter PrettyPrinter",
|
||||
args: args{
|
||||
ctx: context.TODO(),
|
||||
verboseMode: scanInfo.VerboseMode,
|
||||
formatVersion: scanInfo.FormatVersion,
|
||||
printAttack: scanInfo.PrintAttackTree,
|
||||
viewType: cautils.ViewTypes(scanInfo.View),
|
||||
loggerLevel: helpers.InfoLevel,
|
||||
},
|
||||
want: wantTypes{
|
||||
structType: "*printer.PrettyPrinter",
|
||||
formatVersion: scanInfo.FormatVersion,
|
||||
verboseMode: scanInfo.VerboseMode,
|
||||
viewType: cautils.ViewTypes(scanInfo.View),
|
||||
},
|
||||
testAllFields: true,
|
||||
},
|
||||
{
|
||||
name: "Test getUIPrinter SilentPrinter",
|
||||
args: args{
|
||||
ctx: context.TODO(),
|
||||
verboseMode: scanInfo.VerboseMode,
|
||||
formatVersion: scanInfo.FormatVersion,
|
||||
printAttack: scanInfo.PrintAttackTree,
|
||||
viewType: cautils.ViewTypes(scanInfo.View),
|
||||
loggerLevel: helpers.WarningLevel,
|
||||
},
|
||||
want: wantTypes{
|
||||
structType: "*printer.SilentPrinter",
|
||||
formatVersion: "",
|
||||
verboseMode: false,
|
||||
viewType: cautils.ViewTypes(""),
|
||||
},
|
||||
testAllFields: false,
|
||||
},
|
||||
}
|
||||
|
||||
if gotVerboseMode != wantVerboseMode {
|
||||
t.Errorf("Got: %t, want: %t", gotVerboseMode, wantVerboseMode)
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
logger.L().SetLevel(tt.args.loggerLevel.String())
|
||||
got := getUIPrinter(tt.args.ctx, tt.args.verboseMode, tt.args.formatVersion, tt.args.printAttack, tt.args.viewType)
|
||||
|
||||
if gotViewType != wantViewType {
|
||||
t.Errorf("Got: %v, want: %v", gotViewType, wantViewType)
|
||||
}
|
||||
assert.Equal(t, tt.want.structType, reflect.TypeOf(got).String())
|
||||
|
||||
if !tt.testAllFields {
|
||||
return
|
||||
}
|
||||
|
||||
gotValue := reflect.ValueOf(got).Elem()
|
||||
gotFormatVersion := gotValue.FieldByName("formatVersion").String()
|
||||
gotVerboseMode := gotValue.FieldByName("verboseMode").Bool()
|
||||
gotViewType := cautils.ViewTypes(gotValue.FieldByName("viewType").String())
|
||||
|
||||
if gotFormatVersion != tt.want.formatVersion {
|
||||
t.Errorf("Got: %s, want: %s", gotFormatVersion, tt.want.formatVersion)
|
||||
}
|
||||
|
||||
if gotVerboseMode != tt.want.verboseMode {
|
||||
t.Errorf("Got: %t, want: %t", gotVerboseMode, tt.want.verboseMode)
|
||||
}
|
||||
|
||||
if gotViewType != tt.want.viewType {
|
||||
t.Errorf("Got: %v, want: %v", gotViewType, tt.want.viewType)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package core
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"sort"
|
||||
@@ -13,13 +14,13 @@ import (
|
||||
"github.com/olekukonko/tablewriter"
|
||||
)
|
||||
|
||||
var listFunc = map[string]func(*metav1.ListPolicies) ([]string, error){
|
||||
var listFunc = map[string]func(context.Context, *metav1.ListPolicies) ([]string, error){
|
||||
"controls": listControls,
|
||||
"frameworks": listFrameworks,
|
||||
"exceptions": listExceptions,
|
||||
}
|
||||
|
||||
var listFormatFunc = map[string]func(string, []string){
|
||||
var listFormatFunc = map[string]func(context.Context, string, []string){
|
||||
"pretty-print": prettyPrintListFormat,
|
||||
"json": jsonListFormat,
|
||||
}
|
||||
@@ -31,16 +32,16 @@ func ListSupportActions() []string {
|
||||
}
|
||||
return commands
|
||||
}
|
||||
func (ks *Kubescape) List(listPolicies *metav1.ListPolicies) error {
|
||||
func (ks *Kubescape) List(ctx context.Context, listPolicies *metav1.ListPolicies) error {
|
||||
if policyListerFunc, ok := listFunc[listPolicies.Target]; ok {
|
||||
policies, err := policyListerFunc(listPolicies)
|
||||
policies, err := policyListerFunc(ctx, listPolicies)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
sort.Strings(policies)
|
||||
|
||||
if listFormatFunction, ok := listFormatFunc[listPolicies.Format]; ok {
|
||||
listFormatFunction(listPolicies.Target, policies)
|
||||
listFormatFunction(ctx, listPolicies.Target, policies)
|
||||
} else {
|
||||
return fmt.Errorf("Invalid format \"%s\", Supported formats: 'pretty-print'/'json' ", listPolicies.Format)
|
||||
}
|
||||
@@ -50,26 +51,26 @@ func (ks *Kubescape) List(listPolicies *metav1.ListPolicies) error {
|
||||
return fmt.Errorf("unknown command to download")
|
||||
}
|
||||
|
||||
func listFrameworks(listPolicies *metav1.ListPolicies) ([]string, error) {
|
||||
func listFrameworks(ctx context.Context, listPolicies *metav1.ListPolicies) ([]string, error) {
|
||||
tenant := getTenantConfig(&listPolicies.Credentials, "", "", getKubernetesApi()) // change k8sinterface
|
||||
policyGetter := getPolicyGetter(nil, tenant.GetTenantEmail(), true, nil)
|
||||
policyGetter := getPolicyGetter(ctx, nil, tenant.GetTenantEmail(), true, nil)
|
||||
|
||||
return listFrameworksNames(policyGetter), nil
|
||||
}
|
||||
|
||||
func listControls(listPolicies *metav1.ListPolicies) ([]string, error) {
|
||||
func listControls(ctx context.Context, listPolicies *metav1.ListPolicies) ([]string, error) {
|
||||
tenant := getTenantConfig(&listPolicies.Credentials, "", "", getKubernetesApi()) // change k8sinterface
|
||||
|
||||
policyGetter := getPolicyGetter(nil, tenant.GetTenantEmail(), false, nil)
|
||||
policyGetter := getPolicyGetter(ctx, nil, tenant.GetTenantEmail(), false, nil)
|
||||
return policyGetter.ListControls()
|
||||
}
|
||||
|
||||
func listExceptions(listPolicies *metav1.ListPolicies) ([]string, error) {
|
||||
func listExceptions(ctx context.Context, listPolicies *metav1.ListPolicies) ([]string, error) {
|
||||
// load tenant metav1
|
||||
tenant := getTenantConfig(&listPolicies.Credentials, "", "", getKubernetesApi())
|
||||
|
||||
var exceptionsNames []string
|
||||
ksCloudAPI := getExceptionsGetter("", tenant.GetAccountID(), nil)
|
||||
ksCloudAPI := getExceptionsGetter(ctx, "", tenant.GetAccountID(), nil)
|
||||
exceptions, err := ksCloudAPI.GetExceptions("")
|
||||
if err != nil {
|
||||
return exceptionsNames, err
|
||||
@@ -80,15 +81,15 @@ func listExceptions(listPolicies *metav1.ListPolicies) ([]string, error) {
|
||||
return exceptionsNames, nil
|
||||
}
|
||||
|
||||
func prettyPrintListFormat(targetPolicy string, policies []string) {
|
||||
func prettyPrintListFormat(ctx context.Context, targetPolicy string, policies []string) {
|
||||
if targetPolicy == "controls" {
|
||||
prettyPrintControls(policies)
|
||||
prettyPrintControls(ctx, policies)
|
||||
return
|
||||
}
|
||||
|
||||
header := fmt.Sprintf("Supported %s", targetPolicy)
|
||||
|
||||
policyTable := tablewriter.NewWriter(printer.GetWriter(""))
|
||||
policyTable := tablewriter.NewWriter(printer.GetWriter(ctx, ""))
|
||||
policyTable.SetAutoWrapText(true)
|
||||
policyTable.SetHeader([]string{header})
|
||||
policyTable.SetHeaderLine(true)
|
||||
@@ -103,14 +104,14 @@ func prettyPrintListFormat(targetPolicy string, policies []string) {
|
||||
policyTable.Render()
|
||||
}
|
||||
|
||||
func jsonListFormat(targetPolicy string, policies []string) {
|
||||
func jsonListFormat(_ context.Context, _ string, policies []string) {
|
||||
j, _ := json.MarshalIndent(policies, "", " ")
|
||||
|
||||
fmt.Printf("%s\n", j)
|
||||
}
|
||||
|
||||
func prettyPrintControls(policies []string) {
|
||||
controlsTable := tablewriter.NewWriter(printer.GetWriter(""))
|
||||
func prettyPrintControls(ctx context.Context, policies []string) {
|
||||
controlsTable := tablewriter.NewWriter(printer.GetWriter(ctx, ""))
|
||||
controlsTable.SetAutoWrapText(true)
|
||||
controlsTable.SetHeader([]string{"Control ID", "Control Name", "Docs", "Frameworks"})
|
||||
controlsTable.SetHeaderLine(true)
|
||||
|
||||
@@ -1,14 +1,12 @@
|
||||
package core
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
apisv1 "github.com/kubescape/opa-utils/httpserver/apis/v1"
|
||||
|
||||
"github.com/kubescape/k8s-interface/k8sinterface"
|
||||
|
||||
"github.com/kubescape/go-logger"
|
||||
"github.com/kubescape/go-logger/helpers"
|
||||
"github.com/kubescape/k8s-interface/k8sinterface"
|
||||
"github.com/kubescape/kubescape/v2/core/cautils"
|
||||
"github.com/kubescape/kubescape/v2/core/cautils/getter"
|
||||
"github.com/kubescape/kubescape/v2/core/pkg/hostsensorutils"
|
||||
@@ -19,6 +17,8 @@ import (
|
||||
"github.com/kubescape/kubescape/v2/core/pkg/resultshandling"
|
||||
"github.com/kubescape/kubescape/v2/core/pkg/resultshandling/printer"
|
||||
"github.com/kubescape/kubescape/v2/core/pkg/resultshandling/reporter"
|
||||
apisv1 "github.com/kubescape/opa-utils/httpserver/apis/v1"
|
||||
"go.opentelemetry.io/otel"
|
||||
|
||||
"github.com/kubescape/opa-utils/resources"
|
||||
)
|
||||
@@ -32,20 +32,22 @@ type componentInterfaces struct {
|
||||
hostSensorHandler hostsensorutils.IHostSensor
|
||||
}
|
||||
|
||||
func getInterfaces(scanInfo *cautils.ScanInfo) componentInterfaces {
|
||||
func getInterfaces(ctx context.Context, scanInfo *cautils.ScanInfo) componentInterfaces {
|
||||
ctx, span := otel.Tracer("").Start(ctx, "getInterfaces")
|
||||
defer span.End()
|
||||
|
||||
// ================== setup k8s interface object ======================================
|
||||
var k8s *k8sinterface.KubernetesApi
|
||||
if scanInfo.GetScanningContext() == cautils.ContextCluster {
|
||||
k8s = getKubernetesApi()
|
||||
if k8s == nil {
|
||||
logger.L().Fatal("failed connecting to Kubernetes cluster")
|
||||
logger.L().Ctx(ctx).Fatal("failed connecting to Kubernetes cluster")
|
||||
}
|
||||
}
|
||||
|
||||
// ================== setup tenant object ======================================
|
||||
|
||||
tenantConfig := getTenantConfig(&scanInfo.Credentials, scanInfo.KubeContext, scanInfo.CustomClusterName, k8s)
|
||||
ctxTenant, spanTenant := otel.Tracer("").Start(ctx, "setup tenant")
|
||||
tenantConfig := getTenantConfig(&scanInfo.Credentials, k8sinterface.GetContextName(), scanInfo.CustomClusterName, k8s)
|
||||
|
||||
// Set submit behavior AFTER loading tenant config
|
||||
setSubmitBehavior(scanInfo, tenantConfig)
|
||||
@@ -53,58 +55,56 @@ func getInterfaces(scanInfo *cautils.ScanInfo) componentInterfaces {
|
||||
if scanInfo.Submit {
|
||||
// submit - Create tenant & Submit report
|
||||
if err := tenantConfig.SetTenant(); err != nil {
|
||||
logger.L().Error(err.Error())
|
||||
logger.L().Ctx(ctxTenant).Error(err.Error())
|
||||
}
|
||||
|
||||
if scanInfo.OmitRawResources {
|
||||
logger.L().Warning("omit-raw-resources flag will be ignored in submit mode")
|
||||
logger.L().Ctx(ctx).Warning("omit-raw-resources flag will be ignored in submit mode")
|
||||
}
|
||||
}
|
||||
spanTenant.End()
|
||||
|
||||
// ================== version testing ======================================
|
||||
|
||||
v := cautils.NewIVersionCheckHandler()
|
||||
v.CheckLatestVersion(cautils.NewVersionCheckRequest(cautils.BuildNumber, policyIdentifierIdentities(scanInfo.PolicyIdentifier), "", cautils.ScanningContextToScanningScope(scanInfo.GetScanningContext())))
|
||||
v := cautils.NewIVersionCheckHandler(ctx)
|
||||
v.CheckLatestVersion(ctx, cautils.NewVersionCheckRequest(cautils.BuildNumber, policyIdentifierIdentities(scanInfo.PolicyIdentifier), "", cautils.ScanningContextToScanningScope(scanInfo.GetScanningContext())))
|
||||
|
||||
// ================== setup host scanner object ======================================
|
||||
|
||||
hostSensorHandler := getHostSensorHandler(scanInfo, k8s)
|
||||
if err := hostSensorHandler.Init(); err != nil {
|
||||
logger.L().Error("failed to init host scanner", helpers.Error(err))
|
||||
ctxHostScanner, spanHostScanner := otel.Tracer("").Start(ctx, "setup host scanner")
|
||||
hostSensorHandler := getHostSensorHandler(ctx, scanInfo, k8s)
|
||||
if err := hostSensorHandler.Init(ctxHostScanner); err != nil {
|
||||
logger.L().Ctx(ctxHostScanner).Error("failed to init host scanner", helpers.Error(err))
|
||||
hostSensorHandler = &hostsensorutils.HostSensorHandlerMock{}
|
||||
}
|
||||
// excluding hostsensor namespace
|
||||
if len(scanInfo.IncludeNamespaces) == 0 && hostSensorHandler.GetNamespace() != "" {
|
||||
scanInfo.ExcludedNamespaces = fmt.Sprintf("%s,%s", scanInfo.ExcludedNamespaces, hostSensorHandler.GetNamespace())
|
||||
}
|
||||
spanHostScanner.End()
|
||||
|
||||
// ================== setup registry adaptors ======================================
|
||||
|
||||
registryAdaptors, err := resourcehandler.NewRegistryAdaptors()
|
||||
if err != nil {
|
||||
logger.L().Error("failed to initialize registry adaptors", helpers.Error(err))
|
||||
logger.L().Ctx(ctx).Error("failed to initialize registry adaptors", helpers.Error(err))
|
||||
}
|
||||
|
||||
// ================== setup resource collector object ======================================
|
||||
|
||||
resourceHandler := getResourceHandler(scanInfo, tenantConfig, k8s, hostSensorHandler, registryAdaptors)
|
||||
resourceHandler := getResourceHandler(ctx, scanInfo, tenantConfig, k8s, hostSensorHandler, registryAdaptors)
|
||||
|
||||
// ================== setup reporter & printer objects ======================================
|
||||
|
||||
// reporting behavior - setup reporter
|
||||
reportHandler := getReporter(tenantConfig, scanInfo.ScanID, scanInfo.Submit, scanInfo.FrameworkScan, scanInfo.GetScanningContext())
|
||||
reportHandler := getReporter(ctx, tenantConfig, scanInfo.ScanID, scanInfo.Submit, scanInfo.FrameworkScan, scanInfo.GetScanningContext())
|
||||
|
||||
// setup printers
|
||||
formats := scanInfo.Formats()
|
||||
|
||||
outputPrinters := make([]printer.IPrinter, 0)
|
||||
for _, format := range formats {
|
||||
printerHandler := resultshandling.NewPrinter(format, scanInfo.FormatVersion, scanInfo.PrintAttackTree, scanInfo.VerboseMode, cautils.ViewTypes(scanInfo.View))
|
||||
printerHandler.SetWriter(scanInfo.Output)
|
||||
printerHandler := resultshandling.NewPrinter(ctx, format, scanInfo.FormatVersion, scanInfo.PrintAttackTree, scanInfo.VerboseMode, cautils.ViewTypes(scanInfo.View))
|
||||
printerHandler.SetWriter(ctx, scanInfo.Output)
|
||||
outputPrinters = append(outputPrinters, printerHandler)
|
||||
}
|
||||
|
||||
uiPrinter := getUIPrinter(scanInfo.VerboseMode, scanInfo.FormatVersion, scanInfo.PrintAttackTree, cautils.ViewTypes(scanInfo.View))
|
||||
uiPrinter := getUIPrinter(ctx, scanInfo.VerboseMode, scanInfo.FormatVersion, scanInfo.PrintAttackTree, cautils.ViewTypes(scanInfo.View))
|
||||
|
||||
// ================== return interface ======================================
|
||||
|
||||
@@ -118,13 +118,16 @@ func getInterfaces(scanInfo *cautils.ScanInfo) componentInterfaces {
|
||||
}
|
||||
}
|
||||
|
||||
func (ks *Kubescape) Scan(scanInfo *cautils.ScanInfo) (*resultshandling.ResultsHandler, error) {
|
||||
func (ks *Kubescape) Scan(ctx context.Context, scanInfo *cautils.ScanInfo) (*resultshandling.ResultsHandler, error) {
|
||||
ctx, spanScan := otel.Tracer("").Start(ctx, "kubescape.Scan")
|
||||
defer spanScan.End()
|
||||
logger.L().Info("Kubescape scanner starting")
|
||||
|
||||
// ===================== Initialization =====================
|
||||
scanInfo.Init() // initialize scan info
|
||||
ctxInit, spanInit := otel.Tracer("").Start(ctx, "initialization")
|
||||
scanInfo.Init(ctxInit) // initialize scan info
|
||||
|
||||
interfaces := getInterfaces(scanInfo)
|
||||
interfaces := getInterfaces(ctxInit, scanInfo)
|
||||
|
||||
cautils.ClusterName = interfaces.tenantConfig.GetContextName() // TODO - Deprecated
|
||||
cautils.CustomerGUID = interfaces.tenantConfig.GetAccountID() // TODO - Deprecated
|
||||
@@ -134,10 +137,10 @@ func (ks *Kubescape) Scan(scanInfo *cautils.ScanInfo) (*resultshandling.ResultsH
|
||||
downloadReleasedPolicy := getter.NewDownloadReleasedPolicy() // download config inputs from github release
|
||||
|
||||
// set policy getter only after setting the customerGUID
|
||||
scanInfo.Getters.PolicyGetter = getPolicyGetter(scanInfo.UseFrom, interfaces.tenantConfig.GetTenantEmail(), scanInfo.FrameworkScan, downloadReleasedPolicy)
|
||||
scanInfo.Getters.ControlsInputsGetter = getConfigInputsGetter(scanInfo.ControlsInputs, interfaces.tenantConfig.GetAccountID(), downloadReleasedPolicy)
|
||||
scanInfo.Getters.ExceptionsGetter = getExceptionsGetter(scanInfo.UseExceptions, interfaces.tenantConfig.GetAccountID(), downloadReleasedPolicy)
|
||||
scanInfo.Getters.AttackTracksGetter = getAttackTracksGetter(scanInfo.AttackTracks, interfaces.tenantConfig.GetAccountID(), downloadReleasedPolicy)
|
||||
scanInfo.Getters.PolicyGetter = getPolicyGetter(ctx, scanInfo.UseFrom, interfaces.tenantConfig.GetTenantEmail(), scanInfo.FrameworkScan, downloadReleasedPolicy)
|
||||
scanInfo.Getters.ControlsInputsGetter = getConfigInputsGetter(ctx, scanInfo.ControlsInputs, interfaces.tenantConfig.GetAccountID(), downloadReleasedPolicy)
|
||||
scanInfo.Getters.ExceptionsGetter = getExceptionsGetter(ctx, scanInfo.UseExceptions, interfaces.tenantConfig.GetAccountID(), downloadReleasedPolicy)
|
||||
scanInfo.Getters.AttackTracksGetter = getAttackTracksGetter(ctx, scanInfo.AttackTracks, interfaces.tenantConfig.GetAccountID(), downloadReleasedPolicy)
|
||||
|
||||
// TODO - list supported frameworks/controls
|
||||
if scanInfo.ScanAll {
|
||||
@@ -147,34 +150,40 @@ func (ks *Kubescape) Scan(scanInfo *cautils.ScanInfo) (*resultshandling.ResultsH
|
||||
// remove host scanner components
|
||||
defer func() {
|
||||
if err := interfaces.hostSensorHandler.TearDown(); err != nil {
|
||||
logger.L().Error("failed to tear down host scanner", helpers.Error(err))
|
||||
logger.L().Ctx(ctxInit).Error("failed to tear down host scanner", helpers.Error(err))
|
||||
}
|
||||
}()
|
||||
|
||||
resultsHandling := resultshandling.NewResultsHandler(interfaces.report, interfaces.outputPrinters, interfaces.uiPrinter)
|
||||
spanInit.End()
|
||||
|
||||
// ===================== policies & resources =====================
|
||||
ctxPolicies, spanPolicies := otel.Tracer("").Start(ctx, "policies & resources")
|
||||
policyHandler := policyhandler.NewPolicyHandler(interfaces.resourceHandler)
|
||||
scanData, err := policyHandler.CollectResources(scanInfo.PolicyIdentifier, scanInfo)
|
||||
scanData, err := policyHandler.CollectResources(ctxPolicies, scanInfo.PolicyIdentifier, scanInfo)
|
||||
if err != nil {
|
||||
return resultsHandling, err
|
||||
}
|
||||
spanPolicies.End()
|
||||
|
||||
// ========================= opa testing =====================
|
||||
ctxOpa, spanOpa := otel.Tracer("").Start(ctx, "opa testing")
|
||||
deps := resources.NewRegoDependenciesData(k8sinterface.GetK8sConfig(), interfaces.tenantConfig.GetContextName())
|
||||
reportResults := opaprocessor.NewOPAProcessor(scanData, deps)
|
||||
if err := reportResults.ProcessRulesListenner(); err != nil {
|
||||
if err := reportResults.ProcessRulesListenner(ctxOpa, cautils.NewProgressHandler("")); err != nil {
|
||||
// TODO - do something
|
||||
return resultsHandling, fmt.Errorf("%w", err)
|
||||
}
|
||||
spanOpa.End()
|
||||
|
||||
// ======================== prioritization ===================
|
||||
|
||||
if priotizationHandler, err := resourcesprioritization.NewResourcesPrioritizationHandler(scanInfo.Getters.AttackTracksGetter, scanInfo.PrintAttackTree); err != nil {
|
||||
logger.L().Warning("failed to get attack tracks, this may affect the scanning results", helpers.Error(err))
|
||||
_, spanPrioritization := otel.Tracer("").Start(ctx, "prioritization")
|
||||
if priotizationHandler, err := resourcesprioritization.NewResourcesPrioritizationHandler(ctx, scanInfo.Getters.AttackTracksGetter, scanInfo.PrintAttackTree); err != nil {
|
||||
logger.L().Ctx(ctx).Warning("failed to get attack tracks, this may affect the scanning results", helpers.Error(err))
|
||||
} else if err := priotizationHandler.PrioritizeResources(scanData); err != nil {
|
||||
return resultsHandling, fmt.Errorf("%w", err)
|
||||
}
|
||||
spanPrioritization.End()
|
||||
|
||||
// ========================= results handling =====================
|
||||
resultsHandling.SetData(scanData)
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
package core
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/kubescape/kubescape/v2/core/cautils"
|
||||
"github.com/kubescape/kubescape/v2/core/cautils/getter"
|
||||
"github.com/kubescape/kubescape/v2/core/meta/cliinterfaces"
|
||||
@@ -9,7 +11,7 @@ import (
|
||||
"github.com/kubescape/go-logger/helpers"
|
||||
)
|
||||
|
||||
func (ks *Kubescape) Submit(submitInterfaces cliinterfaces.SubmitInterfaces) error {
|
||||
func (ks *Kubescape) Submit(ctx context.Context, submitInterfaces cliinterfaces.SubmitInterfaces) error {
|
||||
|
||||
// list resources
|
||||
report, err := submitInterfaces.SubmitObjects.SetResourcesReport()
|
||||
@@ -26,7 +28,7 @@ func (ks *Kubescape) Submit(submitInterfaces cliinterfaces.SubmitInterfaces) err
|
||||
AllResources: allresources,
|
||||
Metadata: &report.Metadata,
|
||||
}
|
||||
if err := submitInterfaces.Reporter.Submit(o); err != nil {
|
||||
if err := submitInterfaces.Reporter.Submit(ctx, o); err != nil {
|
||||
return err
|
||||
}
|
||||
logger.L().Success("Data has been submitted successfully")
|
||||
@@ -35,13 +37,13 @@ func (ks *Kubescape) Submit(submitInterfaces cliinterfaces.SubmitInterfaces) err
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ks *Kubescape) SubmitExceptions(credentials *cautils.Credentials, excPath string) error {
|
||||
func (ks *Kubescape) SubmitExceptions(ctx context.Context, credentials *cautils.Credentials, excPath string) error {
|
||||
logger.L().Info("submitting exceptions", helpers.String("path", excPath))
|
||||
|
||||
// load cached config
|
||||
tenantConfig := getTenantConfig(credentials, "", "", getKubernetesApi())
|
||||
if err := tenantConfig.SetTenant(); err != nil {
|
||||
logger.L().Error("failed setting account ID", helpers.Error(err))
|
||||
logger.L().Ctx(ctx).Error("failed setting account ID", helpers.Error(err))
|
||||
}
|
||||
|
||||
// load exceptions from file
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
package meta
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/kubescape/kubescape/v2/core/cautils"
|
||||
"github.com/kubescape/kubescape/v2/core/meta/cliinterfaces"
|
||||
metav1 "github.com/kubescape/kubescape/v2/core/meta/datastructures/v1"
|
||||
@@ -8,24 +10,24 @@ import (
|
||||
)
|
||||
|
||||
type IKubescape interface {
|
||||
Scan(scanInfo *cautils.ScanInfo) (*resultshandling.ResultsHandler, error) // TODO - use scanInfo from v1
|
||||
Scan(ctx context.Context, scanInfo *cautils.ScanInfo) (*resultshandling.ResultsHandler, error) // TODO - use scanInfo from v1
|
||||
|
||||
// policies
|
||||
List(listPolicies *metav1.ListPolicies) error // TODO - return list response
|
||||
Download(downloadInfo *metav1.DownloadInfo) error // TODO - return downloaded policies
|
||||
List(ctx context.Context, listPolicies *metav1.ListPolicies) error // TODO - return list response
|
||||
Download(ctx context.Context, downloadInfo *metav1.DownloadInfo) error // TODO - return downloaded policies
|
||||
|
||||
// submit
|
||||
Submit(submitInterfaces cliinterfaces.SubmitInterfaces) error // TODO - func should receive object
|
||||
SubmitExceptions(credentials *cautils.Credentials, excPath string) error // TODO - remove
|
||||
Submit(ctx context.Context, submitInterfaces cliinterfaces.SubmitInterfaces) error // TODO - func should receive object
|
||||
SubmitExceptions(ctx context.Context, credentials *cautils.Credentials, excPath string) error // TODO - remove
|
||||
|
||||
// config
|
||||
SetCachedConfig(setConfig *metav1.SetConfig) error
|
||||
ViewCachedConfig(viewConfig *metav1.ViewConfig) error
|
||||
DeleteCachedConfig(deleteConfig *metav1.DeleteConfig) error
|
||||
DeleteCachedConfig(ctx context.Context, deleteConfig *metav1.DeleteConfig) error
|
||||
|
||||
// delete
|
||||
DeleteExceptions(deleteexceptions *metav1.DeleteExceptions) error
|
||||
|
||||
// fix
|
||||
Fix(fixInfo *metav1.FixInfo) error
|
||||
Fix(ctx context.Context, fixInfo *metav1.FixInfo) error
|
||||
}
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package fixhandler
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
@@ -119,7 +120,7 @@ func (h *FixHandler) getPathFromRawResource(obj map[string]interface{}) string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func (h *FixHandler) PrepareResourcesToFix() []ResourceFixInfo {
|
||||
func (h *FixHandler) PrepareResourcesToFix(ctx context.Context) []ResourceFixInfo {
|
||||
resourceIdToResource := h.buildResourcesMap()
|
||||
|
||||
resourcesToFix := make([]ResourceFixInfo, 0)
|
||||
@@ -141,13 +142,13 @@ func (h *FixHandler) PrepareResourcesToFix() []ResourceFixInfo {
|
||||
|
||||
relativePath, documentIndex, err := h.getFilePathAndIndex(resourcePath)
|
||||
if err != nil {
|
||||
logger.L().Error("Skipping invalid resource path: " + resourcePath)
|
||||
logger.L().Ctx(ctx).Error("Skipping invalid resource path: " + resourcePath)
|
||||
continue
|
||||
}
|
||||
|
||||
absolutePath := path.Join(h.localBasePath, relativePath)
|
||||
if _, err := os.Stat(absolutePath); err != nil {
|
||||
logger.L().Error("Skipping missing file: " + absolutePath)
|
||||
logger.L().Ctx(ctx).Error("Skipping missing file: " + absolutePath)
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -193,7 +194,7 @@ func (h *FixHandler) PrintExpectedChanges(resourcesToFix []ResourceFixInfo) {
|
||||
logger.L().Info(sb.String())
|
||||
}
|
||||
|
||||
func (h *FixHandler) ApplyChanges(resourcesToFix []ResourceFixInfo) (int, []error) {
|
||||
func (h *FixHandler) ApplyChanges(ctx context.Context, resourcesToFix []ResourceFixInfo) (int, []error) {
|
||||
updatedFiles := make(map[string]bool)
|
||||
errors := make([]error, 0)
|
||||
|
||||
@@ -207,7 +208,7 @@ func (h *FixHandler) ApplyChanges(resourcesToFix []ResourceFixInfo) (int, []erro
|
||||
continue
|
||||
}
|
||||
|
||||
fixedYamlString, err := h.ApplyFixToContent(fileAsString, yamlExpression)
|
||||
fixedYamlString, err := h.ApplyFixToContent(ctx, fileAsString, yamlExpression)
|
||||
|
||||
if err != nil {
|
||||
errors = append(errors, fmt.Errorf("Failed to fix file %s: %w ", filepath, err))
|
||||
@@ -219,7 +220,7 @@ func (h *FixHandler) ApplyChanges(resourcesToFix []ResourceFixInfo) (int, []erro
|
||||
err = writeFixesToFile(filepath, fixedYamlString)
|
||||
|
||||
if err != nil {
|
||||
logger.L().Error(fmt.Sprintf("Failed to write fixes to file %s, %v", filepath, err.Error()))
|
||||
logger.L().Ctx(ctx).Error(fmt.Sprintf("Failed to write fixes to file %s, %v", filepath, err.Error()))
|
||||
errors = append(errors, err)
|
||||
}
|
||||
}
|
||||
@@ -241,7 +242,7 @@ func (h *FixHandler) getFilePathAndIndex(filePathWithIndex string) (filePath str
|
||||
}
|
||||
}
|
||||
|
||||
func (h *FixHandler) ApplyFixToContent(yamlAsString, yamlExpression string) (fixedString string, err error) {
|
||||
func (h *FixHandler) ApplyFixToContent(ctx context.Context, yamlAsString, yamlExpression string) (fixedString string, err error) {
|
||||
newline := determineNewlineSeparator(yamlAsString)
|
||||
|
||||
yamlLines := strings.Split(yamlAsString, newline)
|
||||
@@ -252,13 +253,13 @@ func (h *FixHandler) ApplyFixToContent(yamlAsString, yamlExpression string) (fix
|
||||
return "", err
|
||||
}
|
||||
|
||||
fixedRootNodes, err := getFixedNodes(yamlAsString, yamlExpression)
|
||||
fixedRootNodes, err := getFixedNodes(ctx, yamlAsString, yamlExpression)
|
||||
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
fileFixInfo := getFixInfo(originalRootNodes, fixedRootNodes)
|
||||
fileFixInfo := getFixInfo(ctx, originalRootNodes, fixedRootNodes)
|
||||
|
||||
fixedYamlLines := getFixedYamlLines(yamlLines, fileFixInfo, newline)
|
||||
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package fixhandler
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
@@ -188,7 +189,7 @@ func TestApplyFixKeepsFormatting(t *testing.T) {
|
||||
|
||||
h, _ := NewFixHandlerMock()
|
||||
|
||||
got, _ := h.ApplyFixToContent(string(input), expression)
|
||||
got, _ := h.ApplyFixToContent(context.TODO(), string(input), expression)
|
||||
|
||||
assert.Equalf(
|
||||
t, want, got,
|
||||
|
||||
@@ -2,6 +2,7 @@ package fixhandler
|
||||
|
||||
import (
|
||||
"container/list"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
@@ -36,7 +37,7 @@ func decodeDocumentRoots(yamlAsString string) ([]yaml.Node, error) {
|
||||
return nodes, nil
|
||||
}
|
||||
|
||||
func getFixedNodes(yamlAsString, yamlExpression string) ([]yaml.Node, error) {
|
||||
func getFixedNodes(ctx context.Context, yamlAsString, yamlExpression string) ([]yaml.Node, error) {
|
||||
preferences := yqlib.ConfiguredYamlPreferences
|
||||
preferences.EvaluateTogether = true
|
||||
decoder := yqlib.NewYamlDecoder(preferences)
|
||||
@@ -44,7 +45,7 @@ func getFixedNodes(yamlAsString, yamlExpression string) ([]yaml.Node, error) {
|
||||
var allDocuments = list.New()
|
||||
reader := strings.NewReader(yamlAsString)
|
||||
|
||||
fileDocuments, err := readDocuments(reader, decoder)
|
||||
fileDocuments, err := readDocuments(ctx, reader, decoder)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -87,14 +88,14 @@ func flattenWithDFSHelper(node *yaml.Node, parent *yaml.Node, dfsOrder *[]nodeIn
|
||||
}
|
||||
}
|
||||
|
||||
func getFixInfo(originalRootNodes, fixedRootNodes []yaml.Node) fileFixInfo {
|
||||
func getFixInfo(ctx context.Context, originalRootNodes, fixedRootNodes []yaml.Node) fileFixInfo {
|
||||
contentToAdd := make([]contentToAdd, 0)
|
||||
linesToRemove := make([]linesToRemove, 0)
|
||||
|
||||
for idx := 0; idx < len(fixedRootNodes); idx++ {
|
||||
originalList := flattenWithDFS(&originalRootNodes[idx])
|
||||
fixedList := flattenWithDFS(&fixedRootNodes[idx])
|
||||
nodeContentToAdd, nodeLinesToRemove := getFixInfoHelper(*originalList, *fixedList)
|
||||
nodeContentToAdd, nodeLinesToRemove := getFixInfoHelper(ctx, *originalList, *fixedList)
|
||||
contentToAdd = append(contentToAdd, nodeContentToAdd...)
|
||||
linesToRemove = append(linesToRemove, nodeLinesToRemove...)
|
||||
}
|
||||
@@ -105,7 +106,7 @@ func getFixInfo(originalRootNodes, fixedRootNodes []yaml.Node) fileFixInfo {
|
||||
}
|
||||
}
|
||||
|
||||
func getFixInfoHelper(originalList, fixedList []nodeInfo) ([]contentToAdd, []linesToRemove) {
|
||||
func getFixInfoHelper(ctx context.Context, originalList, fixedList []nodeInfo) ([]contentToAdd, []linesToRemove) {
|
||||
|
||||
// While obtaining fixedYamlNode, comments and empty lines at the top are ignored.
|
||||
// This causes a difference in Line numbers across the tree structure. In order to
|
||||
@@ -138,20 +139,20 @@ func getFixInfoHelper(originalList, fixedList []nodeInfo) ([]contentToAdd, []lin
|
||||
fixedListTracker += 1
|
||||
|
||||
case removedNode:
|
||||
originalListTracker, fixedListTracker = addLinesToRemove(fixInfoMetadata)
|
||||
originalListTracker, fixedListTracker = addLinesToRemove(ctx, fixInfoMetadata)
|
||||
|
||||
case insertedNode:
|
||||
originalListTracker, fixedListTracker = addLinesToInsert(fixInfoMetadata)
|
||||
originalListTracker, fixedListTracker = addLinesToInsert(ctx, fixInfoMetadata)
|
||||
|
||||
case replacedNode:
|
||||
originalListTracker, fixedListTracker = updateLinesToReplace(fixInfoMetadata)
|
||||
originalListTracker, fixedListTracker = updateLinesToReplace(ctx, fixInfoMetadata)
|
||||
}
|
||||
}
|
||||
|
||||
// Some nodes are still not visited if they are removed at the end of the list
|
||||
for originalListTracker < len(originalList) {
|
||||
fixInfoMetadata.originalListTracker = originalListTracker
|
||||
originalListTracker, _ = addLinesToRemove(fixInfoMetadata)
|
||||
originalListTracker, _ = addLinesToRemove(ctx, fixInfoMetadata)
|
||||
}
|
||||
|
||||
// Some nodes are still not visited if they are inserted at the end of the list
|
||||
@@ -159,7 +160,7 @@ func getFixInfoHelper(originalList, fixedList []nodeInfo) ([]contentToAdd, []lin
|
||||
// Use negative index of last node in original list as a placeholder to determine the last line number later
|
||||
fixInfoMetadata.originalListTracker = -(len(originalList) - 1)
|
||||
fixInfoMetadata.fixedListTracker = fixedListTracker
|
||||
_, fixedListTracker = addLinesToInsert(fixInfoMetadata)
|
||||
_, fixedListTracker = addLinesToInsert(ctx, fixInfoMetadata)
|
||||
}
|
||||
|
||||
return contentToAdd, linesToRemove
|
||||
@@ -167,13 +168,13 @@ func getFixInfoHelper(originalList, fixedList []nodeInfo) ([]contentToAdd, []lin
|
||||
}
|
||||
|
||||
// Adds the lines to remove and returns the updated originalListTracker
|
||||
func addLinesToRemove(fixInfoMetadata *fixInfoMetadata) (int, int) {
|
||||
func addLinesToRemove(ctx context.Context, fixInfoMetadata *fixInfoMetadata) (int, int) {
|
||||
isOneLine, line := isOneLineSequenceNode(fixInfoMetadata.originalList, fixInfoMetadata.originalListTracker)
|
||||
|
||||
if isOneLine {
|
||||
// Remove the entire line and replace it with the sequence node in fixed info. This way,
|
||||
// the original formatting is not lost.
|
||||
return replaceSingleLineSequence(fixInfoMetadata, line)
|
||||
return replaceSingleLineSequence(ctx, fixInfoMetadata, line)
|
||||
}
|
||||
|
||||
currentDFSNode := (*fixInfoMetadata.originalList)[fixInfoMetadata.originalListTracker]
|
||||
@@ -188,18 +189,18 @@ func addLinesToRemove(fixInfoMetadata *fixInfoMetadata) (int, int) {
|
||||
}
|
||||
|
||||
// Adds the lines to insert and returns the updated fixedListTracker
|
||||
func addLinesToInsert(fixInfoMetadata *fixInfoMetadata) (int, int) {
|
||||
func addLinesToInsert(ctx context.Context, fixInfoMetadata *fixInfoMetadata) (int, int) {
|
||||
|
||||
isOneLine, line := isOneLineSequenceNode(fixInfoMetadata.fixedList, fixInfoMetadata.fixedListTracker)
|
||||
|
||||
if isOneLine {
|
||||
return replaceSingleLineSequence(fixInfoMetadata, line)
|
||||
return replaceSingleLineSequence(ctx, fixInfoMetadata, line)
|
||||
}
|
||||
|
||||
currentDFSNode := (*fixInfoMetadata.fixedList)[fixInfoMetadata.fixedListTracker]
|
||||
|
||||
lineToInsert := getLineToInsert(fixInfoMetadata)
|
||||
contentToInsert := getContent(currentDFSNode.parent, fixInfoMetadata.fixedList, fixInfoMetadata.fixedListTracker)
|
||||
contentToInsert := getContent(ctx, currentDFSNode.parent, fixInfoMetadata.fixedList, fixInfoMetadata.fixedListTracker)
|
||||
|
||||
newFixedTracker := updateTracker(fixInfoMetadata.fixedList, fixInfoMetadata.fixedListTracker)
|
||||
|
||||
@@ -212,12 +213,12 @@ func addLinesToInsert(fixInfoMetadata *fixInfoMetadata) (int, int) {
|
||||
}
|
||||
|
||||
// Adds the lines to remove and insert and updates the fixedListTracker and originalListTracker
|
||||
func updateLinesToReplace(fixInfoMetadata *fixInfoMetadata) (int, int) {
|
||||
func updateLinesToReplace(ctx context.Context, fixInfoMetadata *fixInfoMetadata) (int, int) {
|
||||
|
||||
isOneLine, line := isOneLineSequenceNode(fixInfoMetadata.fixedList, fixInfoMetadata.fixedListTracker)
|
||||
|
||||
if isOneLine {
|
||||
return replaceSingleLineSequence(fixInfoMetadata, line)
|
||||
return replaceSingleLineSequence(ctx, fixInfoMetadata, line)
|
||||
}
|
||||
|
||||
currentDFSNode := (*fixInfoMetadata.fixedList)[fixInfoMetadata.fixedListTracker]
|
||||
@@ -228,8 +229,8 @@ func updateLinesToReplace(fixInfoMetadata *fixInfoMetadata) (int, int) {
|
||||
fixInfoMetadata.fixedListTracker -= 1
|
||||
}
|
||||
|
||||
addLinesToRemove(fixInfoMetadata)
|
||||
updatedOriginalTracker, updatedFixedTracker := addLinesToInsert(fixInfoMetadata)
|
||||
addLinesToRemove(ctx, fixInfoMetadata)
|
||||
updatedOriginalTracker, updatedFixedTracker := addLinesToInsert(ctx, fixInfoMetadata)
|
||||
|
||||
return updatedOriginalTracker, updatedFixedTracker
|
||||
}
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"container/list"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
@@ -111,10 +112,10 @@ func enocodeIntoYaml(parentNode *yaml.Node, nodeList *[]nodeInfo, tracker int) (
|
||||
return fmt.Sprintf(`%v`, buf.String()), nil
|
||||
}
|
||||
|
||||
func getContent(parentNode *yaml.Node, nodeList *[]nodeInfo, tracker int) string {
|
||||
func getContent(ctx context.Context, parentNode *yaml.Node, nodeList *[]nodeInfo, tracker int) string {
|
||||
content, err := enocodeIntoYaml(parentNode, nodeList, tracker)
|
||||
if err != nil {
|
||||
logger.L().Fatal("Cannot Encode into YAML")
|
||||
logger.L().Ctx(ctx).Fatal("Cannot Encode into YAML")
|
||||
}
|
||||
|
||||
indentationSpaces := parentNode.Column - 1
|
||||
@@ -274,7 +275,7 @@ func isEmptyLineOrComment(lineContent string) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func readDocuments(reader io.Reader, decoder yqlib.Decoder) (*list.List, error) {
|
||||
func readDocuments(ctx context.Context, reader io.Reader, decoder yqlib.Decoder) (*list.List, error) {
|
||||
err := decoder.Init(reader)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Error Initializing the decoder, %w", err)
|
||||
@@ -289,7 +290,7 @@ func readDocuments(reader io.Reader, decoder yqlib.Decoder) (*list.List, error)
|
||||
if errors.Is(errorReading, io.EOF) {
|
||||
switch reader := reader.(type) {
|
||||
case *os.File:
|
||||
safelyCloseFile(reader)
|
||||
safelyCloseFile(ctx, reader)
|
||||
}
|
||||
return inputList, nil
|
||||
} else if errorReading != nil {
|
||||
@@ -305,21 +306,21 @@ func readDocuments(reader io.Reader, decoder yqlib.Decoder) (*list.List, error)
|
||||
}
|
||||
}
|
||||
|
||||
func safelyCloseFile(file *os.File) {
|
||||
func safelyCloseFile(ctx context.Context, file *os.File) {
|
||||
err := file.Close()
|
||||
if err != nil {
|
||||
logger.L().Error("Error Closing File")
|
||||
logger.L().Ctx(ctx).Error("Error Closing File")
|
||||
}
|
||||
}
|
||||
|
||||
// Remove the entire line and replace it with the sequence node in fixed info. This way,
|
||||
// the original formatting is lost.
|
||||
func replaceSingleLineSequence(fixInfoMetadata *fixInfoMetadata, line int) (int, int) {
|
||||
func replaceSingleLineSequence(ctx context.Context, fixInfoMetadata *fixInfoMetadata, line int) (int, int) {
|
||||
originalListTracker := getFirstNodeInLine(fixInfoMetadata.originalList, line)
|
||||
fixedListTracker := getFirstNodeInLine(fixInfoMetadata.fixedList, line)
|
||||
|
||||
currentDFSNode := (*fixInfoMetadata.fixedList)[fixedListTracker]
|
||||
contentToInsert := getContent(currentDFSNode.parent, fixInfoMetadata.fixedList, fixedListTracker)
|
||||
contentToInsert := getContent(ctx, currentDFSNode.parent, fixInfoMetadata.fixedList, fixedListTracker)
|
||||
|
||||
// Remove the Single line
|
||||
*fixInfoMetadata.linesToRemove = append(*fixInfoMetadata.linesToRemove, linesToRemove{
|
||||
|
||||
@@ -16,6 +16,7 @@ metadata:
|
||||
labels:
|
||||
app: host-scanner
|
||||
k8s-app: kubescape-host-scanner
|
||||
otel: enabled
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
@@ -36,7 +37,7 @@ spec:
|
||||
effect: NoSchedule
|
||||
containers:
|
||||
- name: host-sensor
|
||||
image: quay.io/kubescape/host-scanner:v1.0.39
|
||||
image: quay.io/kubescape/host-scanner:v1.0.45
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: true
|
||||
privileged: true
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package hostsensorutils
|
||||
|
||||
import (
|
||||
"context"
|
||||
_ "embed"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
@@ -22,7 +23,8 @@ import (
|
||||
|
||||
var (
|
||||
//go:embed hostsensor.yaml
|
||||
hostSensorYAML string
|
||||
hostSensorYAML string
|
||||
namespaceWasPresent bool
|
||||
)
|
||||
|
||||
const PortName string = "scanner"
|
||||
@@ -69,7 +71,7 @@ func NewHostSensorHandler(k8sObj *k8sinterface.KubernetesApi, hostSensorYAMLFile
|
||||
return hsh, nil
|
||||
}
|
||||
|
||||
func (hsh *HostSensorHandler) Init() error {
|
||||
func (hsh *HostSensorHandler) Init(ctx context.Context) error {
|
||||
// deploy the YAML
|
||||
// store namespace + port
|
||||
// store pod names
|
||||
@@ -79,19 +81,42 @@ func (hsh *HostSensorHandler) Init() error {
|
||||
|
||||
cautils.StartSpinner()
|
||||
|
||||
if err := hsh.applyYAML(); err != nil {
|
||||
if err := hsh.applyYAML(ctx); err != nil {
|
||||
cautils.StopSpinner()
|
||||
return fmt.Errorf("failed to apply host scanner YAML, reason: %v", err)
|
||||
}
|
||||
hsh.populatePodNamesToNodeNames()
|
||||
hsh.populatePodNamesToNodeNames(ctx)
|
||||
if err := hsh.checkPodForEachNode(); err != nil {
|
||||
logger.L().Error("failed to validate host-sensor pods status", helpers.Error(err))
|
||||
logger.L().Ctx(ctx).Error("failed to validate host-sensor pods status", helpers.Error(err))
|
||||
}
|
||||
cautils.StopSpinner()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (hsh *HostSensorHandler) applyYAML() error {
|
||||
// checkNamespaceWasPresent check if the given namespace was already present on kubernetes and in "Active" state.
|
||||
// Return true in case it find the namespace on the list, false otherwise.
|
||||
// In case we have some error with the kubernetes APIs, it returns an error.
|
||||
func (hsh *HostSensorHandler) checkNamespaceWasPresent(namespace string) bool {
|
||||
ns, err := hsh.k8sObj.KubernetesClient.
|
||||
CoreV1().
|
||||
Namespaces().
|
||||
Get(hsh.k8sObj.Context, namespace, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
// check also if it is in "Active" state.
|
||||
if ns.Status.Phase != corev1.NamespaceActive {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// namespaceWasPresent return the namespaceWasPresent variable value.
|
||||
func (hsh *HostSensorHandler) namespaceWasPresent() bool {
|
||||
return namespaceWasPresent
|
||||
}
|
||||
|
||||
func (hsh *HostSensorHandler) applyYAML(ctx context.Context) error {
|
||||
workloads, err := cautils.ReadFile([]byte(hostSensorYAML), cautils.YAML_FILE_FORMAT)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to read YAML files, reason: %v", err)
|
||||
@@ -105,6 +130,8 @@ func (hsh *HostSensorHandler) applyYAML() error {
|
||||
break
|
||||
}
|
||||
}
|
||||
// check if namespace was already present on kubernetes
|
||||
namespaceWasPresent = hsh.checkNamespaceWasPresent(namespaceName)
|
||||
|
||||
// Update workload data before applying
|
||||
for i := range workloads {
|
||||
@@ -121,7 +148,7 @@ func (hsh *HostSensorHandler) applyYAML() error {
|
||||
containers, err := w.GetContainers()
|
||||
if err != nil {
|
||||
if erra := hsh.tearDownNamespace(namespaceName); erra != nil {
|
||||
logger.L().Warning("failed to tear down namespace", helpers.Error(erra))
|
||||
logger.L().Ctx(ctx).Warning("failed to tear down namespace", helpers.Error(erra))
|
||||
}
|
||||
return fmt.Errorf("container not found in DaemonSet: %v", err)
|
||||
}
|
||||
@@ -146,7 +173,7 @@ func (hsh *HostSensorHandler) applyYAML() error {
|
||||
}
|
||||
if e != nil {
|
||||
if erra := hsh.tearDownNamespace(namespaceName); erra != nil {
|
||||
logger.L().Warning("failed to tear down namespace", helpers.Error(erra))
|
||||
logger.L().Ctx(ctx).Warning("failed to tear down namespace", helpers.Error(erra))
|
||||
}
|
||||
return fmt.Errorf("failed to create/update YAML, reason: %v", e)
|
||||
}
|
||||
@@ -156,14 +183,14 @@ func (hsh *HostSensorHandler) applyYAML() error {
|
||||
b, err := json.Marshal(newWorkload.GetObject())
|
||||
if err != nil {
|
||||
if erra := hsh.tearDownNamespace(namespaceName); erra != nil {
|
||||
logger.L().Warning("failed to tear down namespace", helpers.Error(erra))
|
||||
logger.L().Ctx(ctx).Warning("failed to tear down namespace", helpers.Error(erra))
|
||||
}
|
||||
return fmt.Errorf("failed to Marshal YAML of DaemonSet, reason: %v", err)
|
||||
}
|
||||
var ds appsv1.DaemonSet
|
||||
if err := json.Unmarshal(b, &ds); err != nil {
|
||||
if erra := hsh.tearDownNamespace(namespaceName); erra != nil {
|
||||
logger.L().Warning("failed to tear down namespace", helpers.Error(erra))
|
||||
logger.L().Ctx(ctx).Warning("failed to tear down namespace", helpers.Error(erra))
|
||||
}
|
||||
return fmt.Errorf("failed to Unmarshal YAML of DaemonSet, reason: %v", err)
|
||||
}
|
||||
@@ -200,7 +227,7 @@ func (hsh *HostSensorHandler) checkPodForEachNode() error {
|
||||
}
|
||||
|
||||
// initiating routine to keep pod list updated
|
||||
func (hsh *HostSensorHandler) populatePodNamesToNodeNames() {
|
||||
func (hsh *HostSensorHandler) populatePodNamesToNodeNames(ctx context.Context) {
|
||||
|
||||
go func() {
|
||||
var watchRes watch.Interface
|
||||
@@ -210,7 +237,7 @@ func (hsh *HostSensorHandler) populatePodNamesToNodeNames() {
|
||||
LabelSelector: fmt.Sprintf("name=%s", hsh.DaemonSet.Spec.Template.Labels["name"]),
|
||||
})
|
||||
if err != nil {
|
||||
logger.L().Error("failed to watch over daemonset pods - are we missing watch pods permissions?", helpers.Error(err))
|
||||
logger.L().Ctx(ctx).Error("failed to watch over daemonset pods - are we missing watch pods permissions?", helpers.Error(err))
|
||||
}
|
||||
if watchRes == nil {
|
||||
return
|
||||
@@ -220,12 +247,12 @@ func (hsh *HostSensorHandler) populatePodNamesToNodeNames() {
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
go hsh.updatePodInListAtomic(eve.Type, pod)
|
||||
go hsh.updatePodInListAtomic(ctx, eve.Type, pod)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
func (hsh *HostSensorHandler) updatePodInListAtomic(eventType watch.EventType, podObj *corev1.Pod) {
|
||||
func (hsh *HostSensorHandler) updatePodInListAtomic(ctx context.Context, eventType watch.EventType, podObj *corev1.Pod) {
|
||||
hsh.podListLock.Lock()
|
||||
defer hsh.podListLock.Unlock()
|
||||
|
||||
@@ -246,7 +273,7 @@ func (hsh *HostSensorHandler) updatePodInListAtomic(eventType watch.EventType, p
|
||||
len(podObj.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms[0].MatchFields[0].Values) > 0 {
|
||||
nodeName = podObj.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms[0].MatchFields[0].Values[0]
|
||||
}
|
||||
logger.L().Warning("One host-sensor pod is unable to schedule on node. We will fail to collect the data from this node",
|
||||
logger.L().Ctx(ctx).Warning("One host-sensor pod is unable to schedule on node. We will fail to collect the data from this node",
|
||||
helpers.String("message", podObj.Status.Conditions[0].Message),
|
||||
helpers.String("nodeName", nodeName),
|
||||
helpers.String("podName", podObj.ObjectMeta.Name))
|
||||
@@ -263,7 +290,11 @@ func (hsh *HostSensorHandler) updatePodInListAtomic(eventType watch.EventType, p
|
||||
}
|
||||
|
||||
func (hsh *HostSensorHandler) tearDownNamespace(namespace string) error {
|
||||
|
||||
// if namespace was already present on kubernetes (before installing host-scanner),
|
||||
// then we shouldn't delete it.
|
||||
if hsh.namespaceWasPresent() {
|
||||
return nil
|
||||
}
|
||||
if err := hsh.k8sObj.KubernetesClient.CoreV1().Namespaces().Delete(hsh.k8sObj.Context, namespace, metav1.DeleteOptions{GracePeriodSeconds: &hsh.gracePeriod}); err != nil {
|
||||
return fmt.Errorf("failed to delete host-sensor namespace: %v", err)
|
||||
}
|
||||
@@ -272,9 +303,11 @@ func (hsh *HostSensorHandler) tearDownNamespace(namespace string) error {
|
||||
|
||||
func (hsh *HostSensorHandler) TearDown() error {
|
||||
namespace := hsh.GetNamespace()
|
||||
// delete DaemonSet
|
||||
if err := hsh.k8sObj.KubernetesClient.AppsV1().DaemonSets(hsh.GetNamespace()).Delete(hsh.k8sObj.Context, hsh.DaemonSet.Name, metav1.DeleteOptions{GracePeriodSeconds: &hsh.gracePeriod}); err != nil {
|
||||
return fmt.Errorf("failed to delete host-sensor daemonset: %v", err)
|
||||
}
|
||||
// delete Namespace
|
||||
if err := hsh.tearDownNamespace(namespace); err != nil {
|
||||
return fmt.Errorf("failed to delete host-sensor daemonset: %v", err)
|
||||
}
|
||||
|
||||
@@ -1,8 +1,10 @@
|
||||
package hostsensorutils
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
@@ -81,7 +83,7 @@ func (hsh *HostSensorHandler) ForwardToPod(podName, path string) ([]byte, error)
|
||||
// The function produces a worker-pool with a fixed number of workers.
|
||||
// For each node the request is pushed to the jobs channel, the worker sends the request and pushes the result to the result channel.
|
||||
// When all workers have finished, the function returns a list of results
|
||||
func (hsh *HostSensorHandler) sendAllPodsHTTPGETRequest(path, requestKind string) ([]hostsensor.HostSensorDataEnvelope, error) {
|
||||
func (hsh *HostSensorHandler) sendAllPodsHTTPGETRequest(ctx context.Context, path, requestKind string) ([]hostsensor.HostSensorDataEnvelope, error) {
|
||||
podList, err := hsh.getPodList()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to sendAllPodsHTTPGETRequest: %v", err)
|
||||
@@ -94,7 +96,7 @@ func (hsh *HostSensorHandler) sendAllPodsHTTPGETRequest(path, requestKind string
|
||||
|
||||
hsh.workerPool.hostSensorApplyJobs(podList, path, requestKind)
|
||||
hsh.workerPool.hostSensorGetResults(&res)
|
||||
hsh.workerPool.createWorkerPool(hsh, &wg)
|
||||
hsh.workerPool.createWorkerPool(ctx, hsh, &wg)
|
||||
hsh.workerPool.waitForDone(&wg)
|
||||
|
||||
return res, nil
|
||||
@@ -125,51 +127,51 @@ func (hsh *HostSensorHandler) GetVersion() (string, error) {
|
||||
}
|
||||
|
||||
// return list of LinuxKernelVariables
|
||||
func (hsh *HostSensorHandler) GetKernelVariables() ([]hostsensor.HostSensorDataEnvelope, error) {
|
||||
func (hsh *HostSensorHandler) GetKernelVariables(ctx context.Context) ([]hostsensor.HostSensorDataEnvelope, error) {
|
||||
// loop over pods and port-forward it to each of them
|
||||
return hsh.sendAllPodsHTTPGETRequest("/LinuxKernelVariables", "LinuxKernelVariables")
|
||||
return hsh.sendAllPodsHTTPGETRequest(ctx, "/LinuxKernelVariables", LinuxKernelVariables)
|
||||
}
|
||||
|
||||
// return list of OpenPortsList
|
||||
func (hsh *HostSensorHandler) GetOpenPortsList() ([]hostsensor.HostSensorDataEnvelope, error) {
|
||||
func (hsh *HostSensorHandler) GetOpenPortsList(ctx context.Context) ([]hostsensor.HostSensorDataEnvelope, error) {
|
||||
// loop over pods and port-forward it to each of them
|
||||
return hsh.sendAllPodsHTTPGETRequest("/openedPorts", "OpenPortsList")
|
||||
return hsh.sendAllPodsHTTPGETRequest(ctx, "/openedPorts", OpenPortsList)
|
||||
}
|
||||
|
||||
// return list of LinuxSecurityHardeningStatus
|
||||
func (hsh *HostSensorHandler) GetLinuxSecurityHardeningStatus() ([]hostsensor.HostSensorDataEnvelope, error) {
|
||||
func (hsh *HostSensorHandler) GetLinuxSecurityHardeningStatus(ctx context.Context) ([]hostsensor.HostSensorDataEnvelope, error) {
|
||||
// loop over pods and port-forward it to each of them
|
||||
return hsh.sendAllPodsHTTPGETRequest("/linuxSecurityHardening", "LinuxSecurityHardeningStatus")
|
||||
return hsh.sendAllPodsHTTPGETRequest(ctx, "/linuxSecurityHardening", LinuxSecurityHardeningStatus)
|
||||
}
|
||||
|
||||
// return list of KubeletInfo
|
||||
func (hsh *HostSensorHandler) GetKubeletInfo() ([]hostsensor.HostSensorDataEnvelope, error) {
|
||||
func (hsh *HostSensorHandler) GetKubeletInfo(ctx context.Context) ([]hostsensor.HostSensorDataEnvelope, error) {
|
||||
// loop over pods and port-forward it to each of them
|
||||
return hsh.sendAllPodsHTTPGETRequest("/kubeletInfo", "KubeletInfo")
|
||||
return hsh.sendAllPodsHTTPGETRequest(ctx, "/kubeletInfo", KubeletInfo)
|
||||
}
|
||||
|
||||
// return list of KubeProxyInfo
|
||||
func (hsh *HostSensorHandler) GetKubeProxyInfo() ([]hostsensor.HostSensorDataEnvelope, error) {
|
||||
// return list of kubeProxyInfo
|
||||
func (hsh *HostSensorHandler) GetKubeProxyInfo(ctx context.Context) ([]hostsensor.HostSensorDataEnvelope, error) {
|
||||
// loop over pods and port-forward it to each of them
|
||||
return hsh.sendAllPodsHTTPGETRequest("/kubeProxyInfo", "KubeProxyInfo")
|
||||
return hsh.sendAllPodsHTTPGETRequest(ctx, "/kubeProxyInfo", KubeProxyInfo)
|
||||
}
|
||||
|
||||
// return list of KubeProxyInfo
|
||||
func (hsh *HostSensorHandler) GetControlPlaneInfo() ([]hostsensor.HostSensorDataEnvelope, error) {
|
||||
// return list of controlPlaneInfo
|
||||
func (hsh *HostSensorHandler) GetControlPlaneInfo(ctx context.Context) ([]hostsensor.HostSensorDataEnvelope, error) {
|
||||
// loop over pods and port-forward it to each of them
|
||||
return hsh.sendAllPodsHTTPGETRequest("/controlPlaneInfo", ControlPlaneInfo)
|
||||
return hsh.sendAllPodsHTTPGETRequest(ctx, "/controlPlaneInfo", ControlPlaneInfo)
|
||||
}
|
||||
|
||||
// return list of KubeProxyInfo
|
||||
func (hsh *HostSensorHandler) GetCloudProviderInfo() ([]hostsensor.HostSensorDataEnvelope, error) {
|
||||
// return list of cloudProviderInfo
|
||||
func (hsh *HostSensorHandler) GetCloudProviderInfo(ctx context.Context) ([]hostsensor.HostSensorDataEnvelope, error) {
|
||||
// loop over pods and port-forward it to each of them
|
||||
return hsh.sendAllPodsHTTPGETRequest("/cloudProviderInfo", CloudProviderInfo)
|
||||
return hsh.sendAllPodsHTTPGETRequest(ctx, "/cloudProviderInfo", CloudProviderInfo)
|
||||
}
|
||||
|
||||
// return list of KubeletCommandLine
|
||||
func (hsh *HostSensorHandler) GetKubeletCommandLine() ([]hostsensor.HostSensorDataEnvelope, error) {
|
||||
func (hsh *HostSensorHandler) GetKubeletCommandLine(ctx context.Context) ([]hostsensor.HostSensorDataEnvelope, error) {
|
||||
// loop over pods and port-forward it to each of them
|
||||
resps, err := hsh.sendAllPodsHTTPGETRequest("/kubeletCommandLine", "KubeletCommandLine")
|
||||
resps, err := hsh.sendAllPodsHTTPGETRequest(ctx, "/kubeletCommandLine", KubeletCommandLine)
|
||||
if err != nil {
|
||||
return resps, err
|
||||
}
|
||||
@@ -187,26 +189,32 @@ func (hsh *HostSensorHandler) GetKubeletCommandLine() ([]hostsensor.HostSensorDa
|
||||
|
||||
}
|
||||
|
||||
// return list of
|
||||
func (hsh *HostSensorHandler) GetKernelVersion() ([]hostsensor.HostSensorDataEnvelope, error) {
|
||||
// return list of CNIInfo
|
||||
func (hsh *HostSensorHandler) GetCNIInfo(ctx context.Context) ([]hostsensor.HostSensorDataEnvelope, error) {
|
||||
// loop over pods and port-forward it to each of them
|
||||
return hsh.sendAllPodsHTTPGETRequest("/kernelVersion", "KernelVersion")
|
||||
return hsh.sendAllPodsHTTPGETRequest(ctx, "/CNIInfo", CNIInfo)
|
||||
}
|
||||
|
||||
// return list of
|
||||
func (hsh *HostSensorHandler) GetOsReleaseFile() ([]hostsensor.HostSensorDataEnvelope, error) {
|
||||
// return list of kernelVersion
|
||||
func (hsh *HostSensorHandler) GetKernelVersion(ctx context.Context) ([]hostsensor.HostSensorDataEnvelope, error) {
|
||||
// loop over pods and port-forward it to each of them
|
||||
return hsh.sendAllPodsHTTPGETRequest("/osRelease", "OsReleaseFile")
|
||||
return hsh.sendAllPodsHTTPGETRequest(ctx, "/kernelVersion", "KernelVersion")
|
||||
}
|
||||
|
||||
// return list of
|
||||
func (hsh *HostSensorHandler) GetKubeletConfigurations() ([]hostsensor.HostSensorDataEnvelope, error) {
|
||||
// return list of osRelease
|
||||
func (hsh *HostSensorHandler) GetOsReleaseFile(ctx context.Context) ([]hostsensor.HostSensorDataEnvelope, error) {
|
||||
// loop over pods and port-forward it to each of them
|
||||
res, err := hsh.sendAllPodsHTTPGETRequest("/kubeletConfigurations", "KubeletConfiguration") // empty kind, will be overridden
|
||||
return hsh.sendAllPodsHTTPGETRequest(ctx, "/osRelease", "OsReleaseFile")
|
||||
}
|
||||
|
||||
// return list of kubeletConfigurations
|
||||
func (hsh *HostSensorHandler) GetKubeletConfigurations(ctx context.Context) ([]hostsensor.HostSensorDataEnvelope, error) {
|
||||
// loop over pods and port-forward it to each of them
|
||||
res, err := hsh.sendAllPodsHTTPGETRequest(ctx, "/kubeletConfigurations", "KubeletConfiguration") // empty kind, will be overridden
|
||||
for resIdx := range res {
|
||||
jsonBytes, ery := yaml.YAMLToJSON(res[resIdx].Data)
|
||||
if ery != nil {
|
||||
logger.L().Error("failed to convert kubelet configurations from yaml to json", helpers.Error(ery))
|
||||
logger.L().Ctx(ctx).Error("failed to convert kubelet configurations from yaml to json", helpers.Error(ery))
|
||||
continue
|
||||
}
|
||||
res[resIdx].SetData(jsonBytes)
|
||||
@@ -214,7 +222,18 @@ func (hsh *HostSensorHandler) GetKubeletConfigurations() ([]hostsensor.HostSenso
|
||||
return res, err
|
||||
}
|
||||
|
||||
func (hsh *HostSensorHandler) CollectResources() ([]hostsensor.HostSensorDataEnvelope, map[string]apis.StatusInfo, error) {
|
||||
// hasCloudProviderInfo iterate over the []hostsensor.HostSensorDataEnvelope list to find info about cloud provider.
|
||||
// If information are found, ther return true. Return false otherwise.
|
||||
func hasCloudProviderInfo(cpi []hostsensor.HostSensorDataEnvelope) bool {
|
||||
for index := range cpi {
|
||||
if !reflect.DeepEqual(cpi[index].GetData(), json.RawMessage("{}\n")) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (hsh *HostSensorHandler) CollectResources(ctx context.Context) ([]hostsensor.HostSensorDataEnvelope, map[string]apis.StatusInfo, error) {
|
||||
res := make([]hostsensor.HostSensorDataEnvelope, 0)
|
||||
infoMap := make(map[string]apis.StatusInfo)
|
||||
if hsh.DaemonSet == nil {
|
||||
@@ -225,7 +244,7 @@ func (hsh *HostSensorHandler) CollectResources() ([]hostsensor.HostSensorDataEnv
|
||||
logger.L().Debug("Accessing host scanner")
|
||||
version, err := hsh.GetVersion()
|
||||
if err != nil {
|
||||
logger.L().Warning(err.Error())
|
||||
logger.L().Ctx(ctx).Warning(err.Error())
|
||||
}
|
||||
if len(version) > 0 {
|
||||
logger.L().Info("Host scanner version : " + version)
|
||||
@@ -233,103 +252,116 @@ func (hsh *HostSensorHandler) CollectResources() ([]hostsensor.HostSensorDataEnv
|
||||
logger.L().Info("Unknown host scanner version")
|
||||
}
|
||||
//
|
||||
kcData, err = hsh.GetKubeletConfigurations()
|
||||
kcData, err = hsh.GetKubeletConfigurations(ctx)
|
||||
if err != nil {
|
||||
addInfoToMap(KubeletConfiguration, infoMap, err)
|
||||
logger.L().Warning(err.Error())
|
||||
logger.L().Ctx(ctx).Warning(err.Error())
|
||||
}
|
||||
if len(kcData) > 0 {
|
||||
res = append(res, kcData...)
|
||||
}
|
||||
//
|
||||
kcData, err = hsh.GetKubeletCommandLine()
|
||||
kcData, err = hsh.GetKubeletCommandLine(ctx)
|
||||
if err != nil {
|
||||
addInfoToMap(KubeletCommandLine, infoMap, err)
|
||||
logger.L().Warning(err.Error())
|
||||
logger.L().Ctx(ctx).Warning(err.Error())
|
||||
}
|
||||
if len(kcData) > 0 {
|
||||
res = append(res, kcData...)
|
||||
}
|
||||
//
|
||||
kcData, err = hsh.GetOsReleaseFile()
|
||||
kcData, err = hsh.GetOsReleaseFile(ctx)
|
||||
if err != nil {
|
||||
addInfoToMap(OsReleaseFile, infoMap, err)
|
||||
logger.L().Warning(err.Error())
|
||||
logger.L().Ctx(ctx).Warning(err.Error())
|
||||
}
|
||||
if len(kcData) > 0 {
|
||||
res = append(res, kcData...)
|
||||
}
|
||||
//
|
||||
kcData, err = hsh.GetKernelVersion()
|
||||
kcData, err = hsh.GetKernelVersion(ctx)
|
||||
if err != nil {
|
||||
addInfoToMap(KernelVersion, infoMap, err)
|
||||
logger.L().Warning(err.Error())
|
||||
logger.L().Ctx(ctx).Warning(err.Error())
|
||||
}
|
||||
if len(kcData) > 0 {
|
||||
res = append(res, kcData...)
|
||||
}
|
||||
//
|
||||
kcData, err = hsh.GetLinuxSecurityHardeningStatus()
|
||||
kcData, err = hsh.GetLinuxSecurityHardeningStatus(ctx)
|
||||
if err != nil {
|
||||
addInfoToMap(LinuxSecurityHardeningStatus, infoMap, err)
|
||||
logger.L().Warning(err.Error())
|
||||
logger.L().Ctx(ctx).Warning(err.Error())
|
||||
}
|
||||
if len(kcData) > 0 {
|
||||
res = append(res, kcData...)
|
||||
}
|
||||
//
|
||||
kcData, err = hsh.GetOpenPortsList()
|
||||
kcData, err = hsh.GetOpenPortsList(ctx)
|
||||
if err != nil {
|
||||
addInfoToMap(OpenPortsList, infoMap, err)
|
||||
logger.L().Warning(err.Error())
|
||||
logger.L().Ctx(ctx).Warning(err.Error())
|
||||
}
|
||||
if len(kcData) > 0 {
|
||||
res = append(res, kcData...)
|
||||
}
|
||||
// GetKernelVariables
|
||||
kcData, err = hsh.GetKernelVariables()
|
||||
kcData, err = hsh.GetKernelVariables(ctx)
|
||||
if err != nil {
|
||||
addInfoToMap(LinuxKernelVariables, infoMap, err)
|
||||
logger.L().Warning(err.Error())
|
||||
logger.L().Ctx(ctx).Warning(err.Error())
|
||||
}
|
||||
if len(kcData) > 0 {
|
||||
res = append(res, kcData...)
|
||||
}
|
||||
|
||||
// GetKubeletInfo
|
||||
kcData, err = hsh.GetKubeletInfo()
|
||||
kcData, err = hsh.GetKubeletInfo(ctx)
|
||||
if err != nil {
|
||||
addInfoToMap(KubeletInfo, infoMap, err)
|
||||
logger.L().Warning(err.Error())
|
||||
logger.L().Ctx(ctx).Warning(err.Error())
|
||||
}
|
||||
if len(kcData) > 0 {
|
||||
res = append(res, kcData...)
|
||||
}
|
||||
|
||||
// GetKubeProxyInfo
|
||||
kcData, err = hsh.GetKubeProxyInfo()
|
||||
kcData, err = hsh.GetKubeProxyInfo(ctx)
|
||||
if err != nil {
|
||||
addInfoToMap(KubeProxyInfo, infoMap, err)
|
||||
logger.L().Warning(err.Error())
|
||||
}
|
||||
if len(kcData) > 0 {
|
||||
res = append(res, kcData...)
|
||||
}
|
||||
|
||||
// GetControlPlaneInfo
|
||||
kcData, err = hsh.GetControlPlaneInfo()
|
||||
if err != nil {
|
||||
addInfoToMap(ControlPlaneInfo, infoMap, err)
|
||||
logger.L().Warning(err.Error())
|
||||
logger.L().Ctx(ctx).Warning(err.Error())
|
||||
}
|
||||
if len(kcData) > 0 {
|
||||
res = append(res, kcData...)
|
||||
}
|
||||
|
||||
// GetCloudProviderInfo
|
||||
kcData, err = hsh.GetCloudProviderInfo()
|
||||
kcData, err = hsh.GetCloudProviderInfo(ctx)
|
||||
isCloudProvider := hasCloudProviderInfo(kcData)
|
||||
if err != nil {
|
||||
addInfoToMap(CloudProviderInfo, infoMap, err)
|
||||
logger.L().Ctx(ctx).Warning(err.Error())
|
||||
}
|
||||
if len(kcData) > 0 {
|
||||
res = append(res, kcData...)
|
||||
}
|
||||
|
||||
// GetControlPlaneInfo
|
||||
if !isCloudProvider { // we retrieve control plane info only if we are not using a cloud provider
|
||||
kcData, err = hsh.GetControlPlaneInfo(ctx)
|
||||
if err != nil {
|
||||
addInfoToMap(ControlPlaneInfo, infoMap, err)
|
||||
logger.L().Ctx(ctx).Warning(err.Error())
|
||||
}
|
||||
if len(kcData) > 0 {
|
||||
res = append(res, kcData...)
|
||||
}
|
||||
}
|
||||
|
||||
// GetCNIInfo
|
||||
kcData, err = hsh.GetCNIInfo(ctx)
|
||||
if err != nil {
|
||||
addInfoToMap(CNIInfo, infoMap, err)
|
||||
logger.L().Warning(err.Error())
|
||||
}
|
||||
if len(kcData) > 0 {
|
||||
|
||||
@@ -1,13 +1,15 @@
|
||||
package hostsensorutils
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/kubescape/opa-utils/objectsenvelopes/hostsensor"
|
||||
"github.com/kubescape/opa-utils/reporthandling/apis"
|
||||
)
|
||||
|
||||
type IHostSensor interface {
|
||||
Init() error
|
||||
Init(ctx context.Context) error
|
||||
TearDown() error
|
||||
CollectResources() ([]hostsensor.HostSensorDataEnvelope, map[string]apis.StatusInfo, error)
|
||||
CollectResources(context.Context) ([]hostsensor.HostSensorDataEnvelope, map[string]apis.StatusInfo, error)
|
||||
GetNamespace() string
|
||||
}
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
package hostsensorutils
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/kubescape/opa-utils/objectsenvelopes/hostsensor"
|
||||
"github.com/kubescape/opa-utils/reporthandling/apis"
|
||||
)
|
||||
@@ -8,7 +10,7 @@ import (
|
||||
type HostSensorHandlerMock struct {
|
||||
}
|
||||
|
||||
func (hshm *HostSensorHandlerMock) Init() error {
|
||||
func (hshm *HostSensorHandlerMock) Init(_ context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -16,7 +18,7 @@ func (hshm *HostSensorHandlerMock) TearDown() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (hshm *HostSensorHandlerMock) CollectResources() ([]hostsensor.HostSensorDataEnvelope, map[string]apis.StatusInfo, error) {
|
||||
func (hshm *HostSensorHandlerMock) CollectResources(_ context.Context) ([]hostsensor.HostSensorDataEnvelope, map[string]apis.StatusInfo, error) {
|
||||
return []hostsensor.HostSensorDataEnvelope{}, nil, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package hostsensorutils
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
|
||||
logger "github.com/kubescape/go-logger"
|
||||
@@ -42,22 +43,22 @@ func (wp *workerPool) init(noOfPods ...int) {
|
||||
}
|
||||
|
||||
// The worker takes a job out of the chan, executes the request, and pushes the result to the results chan
|
||||
func (wp *workerPool) hostSensorWorker(hsh *HostSensorHandler, wg *sync.WaitGroup) {
|
||||
func (wp *workerPool) hostSensorWorker(ctx context.Context, hsh *HostSensorHandler, wg *sync.WaitGroup) {
|
||||
defer wg.Done()
|
||||
for job := range wp.jobs {
|
||||
hostSensorDataEnvelope, err := hsh.getResourcesFromPod(job.podName, job.nodeName, job.requestKind, job.path)
|
||||
if err != nil {
|
||||
logger.L().Error("failed to get data", helpers.String("path", job.path), helpers.String("podName", job.podName), helpers.Error(err))
|
||||
logger.L().Ctx(ctx).Error("failed to get data", helpers.String("path", job.path), helpers.String("podName", job.podName), helpers.Error(err))
|
||||
} else {
|
||||
wp.results <- hostSensorDataEnvelope
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (wp *workerPool) createWorkerPool(hsh *HostSensorHandler, wg *sync.WaitGroup) {
|
||||
func (wp *workerPool) createWorkerPool(ctx context.Context, hsh *HostSensorHandler, wg *sync.WaitGroup) {
|
||||
for i := 0; i < noOfWorkers; i++ {
|
||||
wg.Add(1)
|
||||
go wp.hostSensorWorker(hsh, wg)
|
||||
go wp.hostSensorWorker(ctx, hsh, wg)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -17,6 +17,7 @@ var (
|
||||
KubeProxyInfo = "KubeProxyInfo"
|
||||
ControlPlaneInfo = "ControlPlaneInfo"
|
||||
CloudProviderInfo = "CloudProviderInfo"
|
||||
CNIInfo = "CNIInfo"
|
||||
|
||||
MapHostSensorResourceToApiGroup = map[string]string{
|
||||
KubeletConfiguration: "hostdata.kubescape.cloud/v1beta0",
|
||||
@@ -30,6 +31,7 @@ var (
|
||||
KubeProxyInfo: "hostdata.kubescape.cloud/v1beta0",
|
||||
ControlPlaneInfo: "hostdata.kubescape.cloud/v1beta0",
|
||||
CloudProviderInfo: "hostdata.kubescape.cloud/v1beta0",
|
||||
CNIInfo: "hostdata.kubescape.cloud/v1beta0",
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
22
core/pkg/opaprocessor/cosign_has_signature.go
Normal file
22
core/pkg/opaprocessor/cosign_has_signature.go
Normal file
@@ -0,0 +1,22 @@
|
||||
package opaprocessor
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/google/go-containerregistry/pkg/name"
|
||||
"github.com/sigstore/cosign/pkg/cosign"
|
||||
)
|
||||
|
||||
func has_signature(img string) bool {
|
||||
ref, err := name.ParseReference(img)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
sins, err := cosign.FetchSignaturesForReference(context.Background(), ref)
|
||||
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
return len(sins) > 0
|
||||
}
|
||||
27
core/pkg/opaprocessor/cosign_has_signature_test.go
Normal file
27
core/pkg/opaprocessor/cosign_has_signature_test.go
Normal file
@@ -0,0 +1,27 @@
|
||||
package opaprocessor
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func Test_has_signature(t *testing.T) {
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
img string
|
||||
want bool
|
||||
}{
|
||||
{
|
||||
name: "valid signature",
|
||||
img: "quay.io/kubescape/gateway",
|
||||
want: true,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
assert.Equal(t, tt.want, has_signature(tt.img), tt.name)
|
||||
})
|
||||
}
|
||||
}
|
||||
76
core/pkg/opaprocessor/cosign_verify.go
Normal file
76
core/pkg/opaprocessor/cosign_verify.go
Normal file
@@ -0,0 +1,76 @@
|
||||
package opaprocessor
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto"
|
||||
"fmt"
|
||||
|
||||
"github.com/google/go-containerregistry/pkg/name"
|
||||
"github.com/sigstore/cosign/cmd/cosign/cli/options"
|
||||
"github.com/sigstore/cosign/cmd/cosign/cli/sign"
|
||||
"github.com/sigstore/cosign/pkg/cosign"
|
||||
"github.com/sigstore/cosign/pkg/cosign/pkcs11key"
|
||||
ociremote "github.com/sigstore/cosign/pkg/oci/remote"
|
||||
sigs "github.com/sigstore/cosign/pkg/signature"
|
||||
)
|
||||
|
||||
// VerifyCommand verifies a signature on a supplied container image
|
||||
type VerifyCommand struct {
|
||||
options.RegistryOptions
|
||||
Annotations sigs.AnnotationsMap
|
||||
CertChain string
|
||||
CertEmail string
|
||||
CertOidcProvider string
|
||||
CertIdentity string
|
||||
CertOidcIssuer string
|
||||
CertGithubWorkflowTrigger string
|
||||
CertGithubWorkflowSha string
|
||||
CertGithubWorkflowName string
|
||||
KeyRef string
|
||||
CertGithubWorkflowRef string
|
||||
SignatureRef string
|
||||
CertRef string
|
||||
CertGithubWorkflowRepository string
|
||||
Attachment string
|
||||
Slot string
|
||||
Output string
|
||||
RekorURL string
|
||||
HashAlgorithm crypto.Hash
|
||||
Sk bool
|
||||
CheckClaims bool
|
||||
LocalImage bool
|
||||
EnforceSCT bool
|
||||
}
|
||||
|
||||
// Exec runs the verification command
|
||||
func verify(img string, key string) (bool, error) {
|
||||
|
||||
co := &cosign.CheckOpts{}
|
||||
var ociremoteOpts []ociremote.Option
|
||||
attachment := ""
|
||||
|
||||
pubKey, err := sigs.LoadPublicKeyRaw([]byte(key), crypto.SHA256)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("loading public key: %w", err)
|
||||
}
|
||||
pkcs11Key, ok := pubKey.(*pkcs11key.Key)
|
||||
if ok {
|
||||
defer pkcs11Key.Close()
|
||||
}
|
||||
co.SigVerifier = pubKey
|
||||
ref, err := name.ParseReference(img)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("parsing reference: %w", err)
|
||||
}
|
||||
ref, err = sign.GetAttachedImageRef(ref, attachment, ociremoteOpts...)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("resolving attachment type %s for image %s: %w", attachment, img, err)
|
||||
}
|
||||
|
||||
_, _, err = cosign.VerifyImageSignatures(context.TODO(), ref, co)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("verifying signature: %w", err)
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
49
core/pkg/opaprocessor/cosign_verify_test.go
Normal file
49
core/pkg/opaprocessor/cosign_verify_test.go
Normal file
@@ -0,0 +1,49 @@
|
||||
package opaprocessor
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func Test_verify(t *testing.T) {
|
||||
type args struct {
|
||||
img string
|
||||
key string
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
want bool
|
||||
wantErr assert.ErrorAssertionFunc
|
||||
}{
|
||||
{
|
||||
"valid signature",
|
||||
args{
|
||||
img: "hisu/cosign-tests:signed",
|
||||
key: "-----BEGIN PUBLIC KEY-----\nMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEGnMCUU0jGe6r4mPsPuyTXf61PE4e\nNwB/31SvUMmnoyd/1UxSqd+MRPXPU6pcub4k6E9G9SprVCuf6Sydcbyiqw==\n-----END PUBLIC KEY-----",
|
||||
},
|
||||
true,
|
||||
assert.NoError,
|
||||
},
|
||||
{
|
||||
"no signature",
|
||||
args{
|
||||
img: "hisu/cosign-tests:unsigned",
|
||||
key: "-----BEGIN PUBLIC KEY-----\nMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEGnMCUU0jGe6r4mPsPuyTXf61PE4e\nNwB/31SvUMmnoyd/1UxSqd+MRPXPU6pcub4k6E9G9SprVCuf6Sydcbyiqw==\n-----END PUBLIC KEY-----",
|
||||
},
|
||||
false,
|
||||
assert.Error,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got, err := verify(tt.args.img, tt.args.key)
|
||||
if !tt.wantErr(t, err, fmt.Sprintf("verify(%v, %v)", tt.args.img, tt.args.key)) {
|
||||
return
|
||||
}
|
||||
assert.Equalf(t, tt.want, got, "verify(%v, %v)", tt.args.img, tt.args.key)
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -14,8 +14,8 @@ import (
|
||||
"github.com/kubescape/opa-utils/reporthandling"
|
||||
"github.com/kubescape/opa-utils/reporthandling/apis"
|
||||
"github.com/kubescape/opa-utils/reporthandling/results/v1/resourcesresults"
|
||||
|
||||
"github.com/open-policy-agent/opa/storage"
|
||||
"go.opentelemetry.io/otel"
|
||||
|
||||
"github.com/kubescape/k8s-interface/workloadinterface"
|
||||
|
||||
@@ -27,6 +27,12 @@ import (
|
||||
|
||||
const ScoreConfigPath = "/resources/config"
|
||||
|
||||
type IJobProgressNotificationClient interface {
|
||||
Start(allSteps int)
|
||||
ProgressJob(step int, message string)
|
||||
Stop()
|
||||
}
|
||||
|
||||
type OPAProcessor struct {
|
||||
regoDependenciesData *resources.RegoDependenciesData
|
||||
*cautils.OPASessionObj
|
||||
@@ -42,20 +48,20 @@ func NewOPAProcessor(sessionObj *cautils.OPASessionObj, regoDependenciesData *re
|
||||
regoDependenciesData: regoDependenciesData,
|
||||
}
|
||||
}
|
||||
func (opap *OPAProcessor) ProcessRulesListenner() error {
|
||||
func (opap *OPAProcessor) ProcessRulesListenner(ctx context.Context, progressListener IJobProgressNotificationClient) error {
|
||||
|
||||
opap.OPASessionObj.AllPolicies = ConvertFrameworksToPolicies(opap.Policies, cautils.BuildNumber)
|
||||
|
||||
ConvertFrameworksToSummaryDetails(&opap.Report.SummaryDetails, opap.Policies, opap.OPASessionObj.AllPolicies)
|
||||
|
||||
// process
|
||||
if err := opap.Process(opap.OPASessionObj.AllPolicies); err != nil {
|
||||
logger.L().Error(err.Error())
|
||||
if err := opap.Process(ctx, opap.OPASessionObj.AllPolicies, progressListener); err != nil {
|
||||
logger.L().Ctx(ctx).Error(err.Error())
|
||||
// Return error?
|
||||
}
|
||||
|
||||
// edit results
|
||||
opap.updateResults()
|
||||
opap.updateResults(ctx)
|
||||
|
||||
//TODO: review this location
|
||||
scorewrapper := score.NewScoreWrapper(opap.OPASessionObj)
|
||||
@@ -64,17 +70,29 @@ func (opap *OPAProcessor) ProcessRulesListenner() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (opap *OPAProcessor) Process(policies *cautils.Policies) error {
|
||||
func (opap *OPAProcessor) Process(ctx context.Context, policies *cautils.Policies, progressListener IJobProgressNotificationClient) error {
|
||||
ctx, span := otel.Tracer("").Start(ctx, "OPAProcessor.Process")
|
||||
defer span.End()
|
||||
opap.loggerStartScanning()
|
||||
|
||||
defer opap.loggerDoneScanning()
|
||||
cautils.StartSpinner()
|
||||
defer cautils.StopSpinner()
|
||||
|
||||
if progressListener != nil {
|
||||
progressListener.Start(len(policies.Controls))
|
||||
defer progressListener.Stop()
|
||||
}
|
||||
|
||||
for _, toPin := range policies.Controls {
|
||||
if progressListener != nil {
|
||||
progressListener.ProgressJob(1, fmt.Sprintf("Control %s", toPin.ControlID))
|
||||
}
|
||||
|
||||
control := toPin
|
||||
|
||||
resourcesAssociatedControl, err := opap.processControl(&control)
|
||||
resourcesAssociatedControl, err := opap.processControl(ctx, &control)
|
||||
if err != nil {
|
||||
logger.L().Error(err.Error())
|
||||
logger.L().Ctx(ctx).Error(err.Error())
|
||||
}
|
||||
|
||||
if len(resourcesAssociatedControl) == 0 {
|
||||
@@ -94,10 +112,6 @@ func (opap *OPAProcessor) Process(policies *cautils.Policies) error {
|
||||
|
||||
opap.Report.ReportGenerationTime = time.Now().UTC()
|
||||
|
||||
cautils.StopSpinner()
|
||||
|
||||
opap.loggerDoneScanning()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -119,16 +133,16 @@ func (opap *OPAProcessor) loggerDoneScanning() {
|
||||
}
|
||||
}
|
||||
|
||||
func (opap *OPAProcessor) processControl(control *reporthandling.Control) (map[string]resourcesresults.ResourceAssociatedControl, error) {
|
||||
func (opap *OPAProcessor) processControl(ctx context.Context, control *reporthandling.Control) (map[string]resourcesresults.ResourceAssociatedControl, error) {
|
||||
var errs error
|
||||
|
||||
resourcesAssociatedControl := make(map[string]resourcesresults.ResourceAssociatedControl)
|
||||
|
||||
// ruleResults := make(map[string][]resourcesresults.ResourceAssociatedRule)
|
||||
for i := range control.Rules {
|
||||
resourceAssociatedRule, err := opap.processRule(&control.Rules[i], control.FixedInput)
|
||||
resourceAssociatedRule, err := opap.processRule(ctx, &control.Rules[i], control.FixedInput)
|
||||
if err != nil {
|
||||
logger.L().Error(err.Error())
|
||||
logger.L().Ctx(ctx).Error(err.Error())
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -146,6 +160,9 @@ func (opap *OPAProcessor) processControl(control *reporthandling.Control) (map[s
|
||||
if ruleResponse != nil {
|
||||
controlResult.ResourceAssociatedRules = append(controlResult.ResourceAssociatedRules, *ruleResponse)
|
||||
}
|
||||
if control, ok := opap.AllPolicies.Controls[control.ControlID]; ok {
|
||||
controlResult.SetStatus(control)
|
||||
}
|
||||
resourcesAssociatedControl[resourceID] = controlResult
|
||||
}
|
||||
}
|
||||
@@ -154,7 +171,7 @@ func (opap *OPAProcessor) processControl(control *reporthandling.Control) (map[s
|
||||
return resourcesAssociatedControl, errs
|
||||
}
|
||||
|
||||
func (opap *OPAProcessor) processRule(rule *reporthandling.PolicyRule, fixedControlInputs map[string][]string) (map[string]*resourcesresults.ResourceAssociatedRule, error) {
|
||||
func (opap *OPAProcessor) processRule(ctx context.Context, rule *reporthandling.PolicyRule, fixedControlInputs map[string][]string) (map[string]*resourcesresults.ResourceAssociatedRule, error) {
|
||||
|
||||
postureControlInputs := opap.regoDependenciesData.GetFilteredPostureControlInputs(rule.ConfigInputs) // get store
|
||||
dataControlInputs := map[string]string{"cloudProvider": opap.OPASessionObj.Report.ClusterCloudProvider}
|
||||
@@ -179,7 +196,7 @@ func (opap *OPAProcessor) processRule(rule *reporthandling.PolicyRule, fixedCont
|
||||
|
||||
resources := map[string]*resourcesresults.ResourceAssociatedRule{}
|
||||
// the failed resources are a subgroup of the enumeratedData, so we store the enumeratedData like it was the input data
|
||||
enumeratedData, err := opap.enumerateData(rule, inputRawResources)
|
||||
enumeratedData, err := opap.enumerateData(ctx, rule, inputRawResources)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -193,10 +210,10 @@ func (opap *OPAProcessor) processRule(rule *reporthandling.PolicyRule, fixedCont
|
||||
opap.AllResources[inputResources[i].GetID()] = inputResources[i]
|
||||
}
|
||||
|
||||
ruleResponses, err := opap.runOPAOnSingleRule(rule, inputRawResources, ruleData, RuleRegoDependenciesData)
|
||||
ruleResponses, err := opap.runOPAOnSingleRule(ctx, rule, inputRawResources, ruleData, RuleRegoDependenciesData)
|
||||
if err != nil {
|
||||
// TODO - Handle error
|
||||
logger.L().Error(err.Error())
|
||||
logger.L().Ctx(ctx).Error(err.Error())
|
||||
} else {
|
||||
// ruleResponse to ruleResult
|
||||
for i := range ruleResponses {
|
||||
@@ -207,7 +224,7 @@ func (opap *OPAProcessor) processRule(rule *reporthandling.PolicyRule, fixedCont
|
||||
ruleResult = r
|
||||
}
|
||||
|
||||
ruleResult.Status = apis.StatusFailed
|
||||
ruleResult.SetStatus(apis.StatusFailed, nil)
|
||||
for j := range ruleResponses[i].FailedPaths {
|
||||
ruleResult.Paths = append(ruleResult.Paths, armotypes.PosturePaths{FailedPath: ruleResponses[i].FailedPaths[j]})
|
||||
}
|
||||
@@ -225,24 +242,27 @@ func (opap *OPAProcessor) processRule(rule *reporthandling.PolicyRule, fixedCont
|
||||
return resources, err
|
||||
}
|
||||
|
||||
func (opap *OPAProcessor) runOPAOnSingleRule(rule *reporthandling.PolicyRule, k8sObjects []map[string]interface{}, getRuleData func(*reporthandling.PolicyRule) string, ruleRegoDependenciesData resources.RegoDependenciesData) ([]reporthandling.RuleResponse, error) {
|
||||
func (opap *OPAProcessor) runOPAOnSingleRule(ctx context.Context, rule *reporthandling.PolicyRule, k8sObjects []map[string]interface{}, getRuleData func(*reporthandling.PolicyRule) string, ruleRegoDependenciesData resources.RegoDependenciesData) ([]reporthandling.RuleResponse, error) {
|
||||
switch rule.RuleLanguage {
|
||||
case reporthandling.RegoLanguage, reporthandling.RegoLanguage2:
|
||||
return opap.runRegoOnK8s(rule, k8sObjects, getRuleData, ruleRegoDependenciesData)
|
||||
return opap.runRegoOnK8s(ctx, rule, k8sObjects, getRuleData, ruleRegoDependenciesData)
|
||||
default:
|
||||
return nil, fmt.Errorf("rule: '%s', language '%v' not supported", rule.Name, rule.RuleLanguage)
|
||||
}
|
||||
}
|
||||
|
||||
func (opap *OPAProcessor) runRegoOnK8s(rule *reporthandling.PolicyRule, k8sObjects []map[string]interface{}, getRuleData func(*reporthandling.PolicyRule) string, ruleRegoDependenciesData resources.RegoDependenciesData) ([]reporthandling.RuleResponse, error) {
|
||||
func (opap *OPAProcessor) runRegoOnK8s(ctx context.Context, rule *reporthandling.PolicyRule, k8sObjects []map[string]interface{}, getRuleData func(*reporthandling.PolicyRule) string, ruleRegoDependenciesData resources.RegoDependenciesData) ([]reporthandling.RuleResponse, error) {
|
||||
|
||||
// compile modules
|
||||
modules, err := getRuleDependencies()
|
||||
modules, err := getRuleDependencies(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("rule: '%s', %s", rule.Name, err.Error())
|
||||
}
|
||||
rego.RegisterBuiltin2(cosignVerifySignatureDeclaration, cosignVerifySignatureDefinition)
|
||||
rego.RegisterBuiltin1(cosignHasSignatureDeclaration, cosignHasSignatureDefinition)
|
||||
modules[rule.Name] = getRuleData(rule)
|
||||
compiled, err := ast.CompileModules(modules)
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("in 'runRegoOnSingleRule', failed to compile rule, name: %s, reason: %s", rule.Name, err.Error())
|
||||
}
|
||||
@@ -255,7 +275,7 @@ func (opap *OPAProcessor) runRegoOnK8s(rule *reporthandling.PolicyRule, k8sObjec
|
||||
// Eval
|
||||
results, err := opap.regoEval(k8sObjects, compiled, &store)
|
||||
if err != nil {
|
||||
logger.L().Error(err.Error())
|
||||
logger.L().Ctx(ctx).Error(err.Error())
|
||||
}
|
||||
|
||||
return results, nil
|
||||
@@ -284,7 +304,7 @@ func (opap *OPAProcessor) regoEval(inputObj []map[string]interface{}, compiledRe
|
||||
return results, nil
|
||||
}
|
||||
|
||||
func (opap *OPAProcessor) enumerateData(rule *reporthandling.PolicyRule, k8sObjects []map[string]interface{}) ([]map[string]interface{}, error) {
|
||||
func (opap *OPAProcessor) enumerateData(ctx context.Context, rule *reporthandling.PolicyRule, k8sObjects []map[string]interface{}) ([]map[string]interface{}, error) {
|
||||
|
||||
if ruleEnumeratorData(rule) == "" {
|
||||
return k8sObjects, nil
|
||||
@@ -295,7 +315,7 @@ func (opap *OPAProcessor) enumerateData(rule *reporthandling.PolicyRule, k8sObje
|
||||
RuleRegoDependenciesData := resources.RegoDependenciesData{DataControlInputs: dataControlInputs,
|
||||
PostureControlInputs: postureControlInputs}
|
||||
|
||||
ruleResponse, err := opap.runOPAOnSingleRule(rule, k8sObjects, ruleEnumeratorData, RuleRegoDependenciesData)
|
||||
ruleResponse, err := opap.runOPAOnSingleRule(ctx, rule, k8sObjects, ruleEnumeratorData, RuleRegoDependenciesData)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package opaprocessor
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/armosec/armoapi-go/armotypes"
|
||||
@@ -38,7 +39,8 @@ func TestProcessResourcesResult(t *testing.T) {
|
||||
opaSessionObj.AllResources[deployment.GetID()] = deployment
|
||||
|
||||
opap := NewOPAProcessor(opaSessionObj, resources.NewRegoDependenciesDataMock())
|
||||
opap.Process(policies)
|
||||
opap.AllPolicies = policies
|
||||
opap.Process(context.TODO(), policies, nil)
|
||||
|
||||
assert.Equal(t, 1, len(opaSessionObj.ResourcesResult))
|
||||
res := opaSessionObj.ResourcesResult[deployment.GetID()]
|
||||
@@ -49,10 +51,9 @@ func TestProcessResourcesResult(t *testing.T) {
|
||||
assert.False(t, res.GetStatus(nil).IsPassed())
|
||||
assert.Equal(t, deployment.GetID(), opaSessionObj.ResourcesResult[deployment.GetID()].ResourceID)
|
||||
|
||||
opap.updateResults()
|
||||
opap.updateResults(context.TODO())
|
||||
res = opaSessionObj.ResourcesResult[deployment.GetID()]
|
||||
assert.Equal(t, 2, res.ListControlsIDs(nil).All().Len())
|
||||
assert.Equal(t, 2, res.ListControlsIDs(nil).All().Len())
|
||||
assert.Equal(t, 1, len(res.ListControlsIDs(nil).Failed()))
|
||||
assert.Equal(t, 1, len(res.ListControlsIDs(nil).Passed()))
|
||||
assert.True(t, res.GetStatus(nil).IsFailed())
|
||||
@@ -63,31 +64,29 @@ func TestProcessResourcesResult(t *testing.T) {
|
||||
summaryDetails := opaSessionObj.Report.SummaryDetails
|
||||
assert.Equal(t, 1, summaryDetails.NumberOfResources().All())
|
||||
assert.Equal(t, 1, summaryDetails.NumberOfResources().Failed())
|
||||
assert.Equal(t, 0, summaryDetails.NumberOfResources().Excluded())
|
||||
assert.Equal(t, 0, summaryDetails.NumberOfResources().Passed())
|
||||
assert.Equal(t, 0, summaryDetails.NumberOfResources().Skipped())
|
||||
|
||||
// test resource listing
|
||||
assert.Equal(t, 1, summaryDetails.ListResourcesIDs().All().Len())
|
||||
assert.Equal(t, 1, len(summaryDetails.ListResourcesIDs().Failed()))
|
||||
assert.Equal(t, 0, len(summaryDetails.ListResourcesIDs().Excluded()))
|
||||
assert.Equal(t, 0, len(summaryDetails.ListResourcesIDs().Passed()))
|
||||
assert.Equal(t, 0, len(summaryDetails.ListResourcesIDs().Skipped()))
|
||||
|
||||
// test control listing
|
||||
assert.Equal(t, res.ListControlsIDs(nil).All().Len(), summaryDetails.NumberOfControls().All())
|
||||
assert.Equal(t, len(res.ListControlsIDs(nil).Passed()), summaryDetails.NumberOfControls().Passed())
|
||||
assert.Equal(t, len(res.ListControlsIDs(nil).Skipped()), summaryDetails.NumberOfControls().Skipped())
|
||||
assert.Equal(t, len(res.ListControlsIDs(nil).Failed()), summaryDetails.NumberOfControls().Failed())
|
||||
assert.Equal(t, len(res.ListControlsIDs(nil).Excluded()), summaryDetails.NumberOfControls().Excluded())
|
||||
assert.True(t, summaryDetails.GetStatus().IsFailed())
|
||||
|
||||
opaSessionObj.Exceptions = []armotypes.PostureExceptionPolicy{*mocks.MockExceptionAllKinds(&armotypes.PosturePolicy{FrameworkName: frameworks[0].Name})}
|
||||
opap.updateResults()
|
||||
opap.updateResults(context.TODO())
|
||||
|
||||
res = opaSessionObj.ResourcesResult[deployment.GetID()]
|
||||
assert.Equal(t, 2, res.ListControlsIDs(nil).All().Len())
|
||||
assert.Equal(t, 1, len(res.ListControlsIDs(nil).Excluded()))
|
||||
assert.Equal(t, 1, len(res.ListControlsIDs(nil).Passed()))
|
||||
assert.True(t, res.GetStatus(nil).IsExcluded())
|
||||
assert.False(t, res.GetStatus(nil).IsPassed())
|
||||
assert.Equal(t, 2, len(res.ListControlsIDs(nil).Passed()))
|
||||
assert.True(t, res.GetStatus(nil).IsPassed())
|
||||
assert.False(t, res.GetStatus(nil).IsFailed())
|
||||
assert.Equal(t, deployment.GetID(), opaSessionObj.ResourcesResult[deployment.GetID()].ResourceID)
|
||||
|
||||
@@ -95,6 +94,6 @@ func TestProcessResourcesResult(t *testing.T) {
|
||||
summaryDetails = opaSessionObj.Report.SummaryDetails
|
||||
assert.Equal(t, 1, summaryDetails.ListResourcesIDs().All().Len())
|
||||
assert.Equal(t, 1, len(summaryDetails.ListResourcesIDs().Failed()))
|
||||
assert.Equal(t, 0, len(summaryDetails.ListResourcesIDs().Excluded()))
|
||||
assert.Equal(t, 0, len(summaryDetails.ListResourcesIDs().Passed()))
|
||||
assert.Equal(t, 0, len(summaryDetails.ListResourcesIDs().Skipped()))
|
||||
}
|
||||
|
||||
@@ -1,38 +1,51 @@
|
||||
package opaprocessor
|
||||
|
||||
import (
|
||||
logger "github.com/kubescape/go-logger"
|
||||
"github.com/kubescape/kubescape/v2/core/cautils"
|
||||
"context"
|
||||
|
||||
logger "github.com/kubescape/go-logger"
|
||||
"github.com/kubescape/k8s-interface/k8sinterface"
|
||||
"github.com/kubescape/k8s-interface/workloadinterface"
|
||||
"github.com/kubescape/kubescape/v2/core/cautils"
|
||||
"github.com/kubescape/opa-utils/exceptions"
|
||||
"github.com/kubescape/opa-utils/reporthandling"
|
||||
"github.com/kubescape/opa-utils/reporthandling/apis"
|
||||
"github.com/kubescape/opa-utils/reporthandling/results/v1/reportsummary"
|
||||
"github.com/kubescape/opa-utils/reporthandling/results/v1/resourcesresults"
|
||||
resources "github.com/kubescape/opa-utils/resources"
|
||||
"go.opentelemetry.io/otel"
|
||||
)
|
||||
|
||||
// updateResults updates the results objects and report objects. This is a critical function - DO NOT CHANGE
|
||||
//
|
||||
// The function:
|
||||
// - removes sensible data
|
||||
// - adds exceptions
|
||||
// - adds exceptions (and updates controls status)
|
||||
// - summarizes results
|
||||
func (opap *OPAProcessor) updateResults() {
|
||||
func (opap *OPAProcessor) updateResults(ctx context.Context) {
|
||||
ctx, span := otel.Tracer("").Start(ctx, "OPAProcessor.updateResults")
|
||||
defer span.End()
|
||||
|
||||
// remove data from all objects
|
||||
for i := range opap.AllResources {
|
||||
removeData(opap.AllResources[i])
|
||||
}
|
||||
|
||||
processor := exceptions.NewProcessor()
|
||||
|
||||
// set exceptions
|
||||
for i := range opap.ResourcesResult {
|
||||
|
||||
t := opap.ResourcesResult[i]
|
||||
|
||||
// first set exceptions
|
||||
// first set exceptions (reuse the same exceptions processor)
|
||||
if resource, ok := opap.AllResources[i]; ok {
|
||||
t.SetExceptions(resource, opap.Exceptions, cautils.ClusterName)
|
||||
t.SetExceptions(
|
||||
resource,
|
||||
opap.Exceptions,
|
||||
cautils.ClusterName,
|
||||
opap.AllPolicies.Controls, // update status depending on action required
|
||||
resourcesresults.WithExceptionsProcessor(processor),
|
||||
)
|
||||
}
|
||||
|
||||
// summarize the resources
|
||||
@@ -71,7 +84,7 @@ func mapControlToInfo(mapResourceToControls map[string][]string, infoMap map[str
|
||||
}
|
||||
|
||||
func isEmptyResources(counters reportsummary.ICounters) bool {
|
||||
return counters.Failed() == 0 && counters.Excluded() == 0 && counters.Passed() == 0
|
||||
return counters.Failed() == 0 && counters.Skipped() == 0 && counters.Passed() == 0
|
||||
}
|
||||
|
||||
func getAllSupportedObjects(k8sResources *cautils.K8SResources, ksResources *cautils.KSResources, allResources map[string]workloadinterface.IMetadata, rule *reporthandling.PolicyRule) []workloadinterface.IMetadata {
|
||||
@@ -156,10 +169,10 @@ func filterOutChildResources(objects []workloadinterface.IMetadata, match []repo
|
||||
}
|
||||
return response
|
||||
}
|
||||
func getRuleDependencies() (map[string]string, error) {
|
||||
func getRuleDependencies(ctx context.Context) (map[string]string, error) {
|
||||
modules := resources.LoadRegoModules()
|
||||
if len(modules) == 0 {
|
||||
logger.L().Warning("failed to load rule dependencies")
|
||||
logger.L().Ctx(ctx).Warning("failed to load rule dependencies")
|
||||
}
|
||||
return modules, nil
|
||||
}
|
||||
|
||||
@@ -1,9 +1,17 @@
|
||||
package opaprocessor
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
logger "github.com/kubescape/go-logger"
|
||||
"github.com/kubescape/go-logger/helpers"
|
||||
"github.com/kubescape/kubescape/v2/core/cautils"
|
||||
"github.com/kubescape/opa-utils/reporthandling"
|
||||
"github.com/kubescape/opa-utils/reporthandling/results/v1/reportsummary"
|
||||
"github.com/open-policy-agent/opa/ast"
|
||||
"github.com/open-policy-agent/opa/rego"
|
||||
"github.com/open-policy-agent/opa/topdown/builtins"
|
||||
"github.com/open-policy-agent/opa/types"
|
||||
)
|
||||
|
||||
// ConvertFrameworksToPolicies convert list of frameworks to list of policies
|
||||
@@ -43,3 +51,38 @@ func ConvertFrameworksToSummaryDetails(summaryDetails *reportsummary.SummaryDeta
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
var cosignVerifySignatureDeclaration = ®o.Function{
|
||||
Name: "cosign.verify",
|
||||
Decl: types.NewFunction(types.Args(types.S, types.A), types.B),
|
||||
Memoize: true,
|
||||
}
|
||||
var cosignVerifySignatureDefinition = func(bctx rego.BuiltinContext, a, b *ast.Term) (*ast.Term, error) {
|
||||
aStr, err := builtins.StringOperand(a.Value, 1)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid parameter type: %v", err)
|
||||
}
|
||||
bStr, err := builtins.StringOperand(b.Value, 1)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid parameter type: %v", err)
|
||||
}
|
||||
result, err := verify(string(aStr), string(bStr))
|
||||
if err != nil {
|
||||
// Do not change this log from debug level. We might find a lot of images without signature
|
||||
logger.L().Debug("failed to verify signature", helpers.String("image", string(aStr)), helpers.String("key", string(bStr)), helpers.Error(err))
|
||||
}
|
||||
return ast.BooleanTerm(result), nil
|
||||
}
|
||||
|
||||
var cosignHasSignatureDeclaration = ®o.Function{
|
||||
Name: "cosign.has_signature",
|
||||
Decl: types.NewFunction(types.Args(types.S), types.B),
|
||||
Memoize: true,
|
||||
}
|
||||
var cosignHasSignatureDefinition = func(bctx rego.BuiltinContext, a *ast.Term) (*ast.Term, error) {
|
||||
aStr, err := builtins.StringOperand(a.Value, 1)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid parameter type: %v", err)
|
||||
}
|
||||
return ast.BooleanTerm(has_signature(string(aStr))), nil
|
||||
}
|
||||
|
||||
@@ -1,13 +1,14 @@
|
||||
package policyhandler
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
logger "github.com/kubescape/go-logger"
|
||||
"github.com/kubescape/go-logger/helpers"
|
||||
helpersv1 "github.com/kubescape/opa-utils/reporthandling/helpers/v1"
|
||||
|
||||
"go.opentelemetry.io/otel"
|
||||
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
|
||||
|
||||
cloudsupportv1 "github.com/kubescape/k8s-interface/cloudsupport/v1"
|
||||
@@ -23,7 +24,7 @@ import (
|
||||
// PolicyHandler -
|
||||
type PolicyHandler struct {
|
||||
resourceHandler resourcehandler.IResourceHandler
|
||||
// we are listening on this chan in opaprocessor/processorhandler.go/ProcessRulesListenner func
|
||||
// we are listening on this chan in opaprocessor/processorhandler.go/ProcessRulesListener func
|
||||
getters *cautils.Getters
|
||||
}
|
||||
|
||||
@@ -34,19 +35,19 @@ func NewPolicyHandler(resourceHandler resourcehandler.IResourceHandler) *PolicyH
|
||||
}
|
||||
}
|
||||
|
||||
func (policyHandler *PolicyHandler) CollectResources(policyIdentifier []cautils.PolicyIdentifier, scanInfo *cautils.ScanInfo) (*cautils.OPASessionObj, error) {
|
||||
opaSessionObj := cautils.NewOPASessionObj(nil, nil, scanInfo)
|
||||
func (policyHandler *PolicyHandler) CollectResources(ctx context.Context, policyIdentifier []cautils.PolicyIdentifier, scanInfo *cautils.ScanInfo) (*cautils.OPASessionObj, error) {
|
||||
opaSessionObj := cautils.NewOPASessionObj(ctx, nil, nil, scanInfo)
|
||||
|
||||
// validate notification
|
||||
// TODO
|
||||
policyHandler.getters = &scanInfo.Getters
|
||||
|
||||
// get policies
|
||||
if err := policyHandler.getPolicies(policyIdentifier, opaSessionObj); err != nil {
|
||||
if err := policyHandler.getPolicies(ctx, policyIdentifier, opaSessionObj); err != nil {
|
||||
return opaSessionObj, err
|
||||
}
|
||||
|
||||
err := policyHandler.getResources(policyIdentifier, opaSessionObj, scanInfo)
|
||||
err := policyHandler.getResources(ctx, policyIdentifier, opaSessionObj)
|
||||
if err != nil {
|
||||
return opaSessionObj, err
|
||||
}
|
||||
@@ -58,15 +59,17 @@ func (policyHandler *PolicyHandler) CollectResources(policyIdentifier []cautils.
|
||||
return opaSessionObj, nil
|
||||
}
|
||||
|
||||
func (policyHandler *PolicyHandler) getResources(policyIdentifier []cautils.PolicyIdentifier, opaSessionObj *cautils.OPASessionObj, scanInfo *cautils.ScanInfo) error {
|
||||
opaSessionObj.Report.ClusterAPIServerInfo = policyHandler.resourceHandler.GetClusterAPIServerInfo()
|
||||
func (policyHandler *PolicyHandler) getResources(ctx context.Context, policyIdentifier []cautils.PolicyIdentifier, opaSessionObj *cautils.OPASessionObj) error {
|
||||
ctx, span := otel.Tracer("").Start(ctx, "policyHandler.getResources")
|
||||
defer span.End()
|
||||
opaSessionObj.Report.ClusterAPIServerInfo = policyHandler.resourceHandler.GetClusterAPIServerInfo(ctx)
|
||||
|
||||
// set cloud metadata only when scanning a cluster
|
||||
if opaSessionObj.Metadata.ScanMetadata.ScanningTarget == reportv2.Cluster {
|
||||
setCloudMetadata(opaSessionObj)
|
||||
}
|
||||
|
||||
resourcesMap, allResources, ksResources, err := policyHandler.resourceHandler.GetResources(opaSessionObj, &policyIdentifier[0].Designators)
|
||||
resourcesMap, allResources, ksResources, err := policyHandler.resourceHandler.GetResources(ctx, opaSessionObj, &policyIdentifier[0].Designators)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -134,7 +137,7 @@ func getCloudMetadata(opaSessionObj *cautils.OPASessionObj, config *clientcmdapi
|
||||
// check if the server is AKS. e.g. https://XXX.XX.XXX.azmk8s.io:443
|
||||
func isAKS(config *clientcmdapi.Config) bool {
|
||||
const serverIdentifierAKS = "azmk8s.io"
|
||||
if cluster, ok := config.Clusters[config.CurrentContext]; ok {
|
||||
if cluster, ok := config.Clusters[k8sinterface.GetContextName()]; ok {
|
||||
return strings.Contains(cluster.Server, serverIdentifierAKS)
|
||||
}
|
||||
return false
|
||||
@@ -142,7 +145,7 @@ func isAKS(config *clientcmdapi.Config) bool {
|
||||
|
||||
// check if the server is EKS. e.g. arn:aws:eks:eu-west-1:xxx:cluster/xxxx
|
||||
func isEKS(config *clientcmdapi.Config) bool {
|
||||
if context, ok := config.Contexts[config.CurrentContext]; ok {
|
||||
if context, ok := config.Contexts[k8sinterface.GetContextName()]; ok {
|
||||
return strings.Contains(context.Cluster, cloudsupportv1.EKS)
|
||||
}
|
||||
return false
|
||||
@@ -150,7 +153,7 @@ func isEKS(config *clientcmdapi.Config) bool {
|
||||
|
||||
// check if the server is GKE. e.g. gke_xxx-xx-0000_us-central1-c_xxxx-1
|
||||
func isGKE(config *clientcmdapi.Config) bool {
|
||||
if context, ok := config.Contexts[config.CurrentContext]; ok {
|
||||
if context, ok := config.Contexts[k8sinterface.GetContextName()]; ok {
|
||||
return strings.Contains(context.Cluster, cloudsupportv1.GKE)
|
||||
}
|
||||
return false
|
||||
|
||||
@@ -28,7 +28,6 @@ var (
|
||||
kubeConfigMock string
|
||||
)
|
||||
|
||||
|
||||
func getKubeConfigMock() *clientcmdapi.Config {
|
||||
kubeConfig := clientcmdapi.Config{}
|
||||
if err := json.Unmarshal([]byte(kubeConfigMock), &kubeConfig); err != nil {
|
||||
@@ -105,7 +104,7 @@ func Test_getCloudMetadata(t *testing.T) {
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
tt.args.kubeConfig.CurrentContext = tt.args.context
|
||||
k8sinterface.SetClusterContextName(tt.args.context)
|
||||
got := getCloudMetadata(tt.args.opaSessionObj, tt.args.kubeConfig)
|
||||
if got == nil {
|
||||
t.Errorf("getCloudMetadata() = %v, want %v", got, tt.want.Provider())
|
||||
@@ -141,7 +140,7 @@ func Test_isGKE(t *testing.T) {
|
||||
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
// set context
|
||||
tt.args.config.CurrentContext = tt.args.context
|
||||
k8sinterface.SetClusterContextName(tt.args.context)
|
||||
if got := isGKE(tt.args.config); got != tt.want {
|
||||
t.Errorf("isGKE() = %v, want %v", got, tt.want)
|
||||
}
|
||||
@@ -172,7 +171,7 @@ func Test_isEKS(t *testing.T) {
|
||||
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
// set context
|
||||
tt.args.config.CurrentContext = tt.args.context
|
||||
k8sinterface.SetClusterContextName(tt.args.context)
|
||||
if got := isEKS(tt.args.config); got != tt.want {
|
||||
t.Errorf("isEKS() = %v, want %v", got, tt.want)
|
||||
}
|
||||
@@ -203,7 +202,7 @@ func Test_isAKS(t *testing.T) {
|
||||
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
// set context
|
||||
tt.args.config.CurrentContext = tt.args.context
|
||||
k8sinterface.SetClusterContextName(tt.args.context)
|
||||
if got := isAKS(tt.args.config); got != tt.want {
|
||||
t.Errorf("isAKS() = %v, want %v", got, tt.want)
|
||||
}
|
||||
@@ -250,13 +249,12 @@ func Test_getResources(t *testing.T) {
|
||||
policyIdentifier := []cautils.PolicyIdentifier{{}}
|
||||
|
||||
assert.NotPanics(t, func() {
|
||||
policyHandler.getResources(policyIdentifier, objSession, &cautils.ScanInfo{})
|
||||
policyHandler.getResources(context.TODO(), policyIdentifier, objSession)
|
||||
}, "Cluster named .*eks.* without a cloud config panics on cluster scan !")
|
||||
|
||||
assert.NotPanics(t, func() {
|
||||
objSession.Metadata.ScanMetadata.ScanningTarget = reportv2.File
|
||||
policyHandler.getResources(policyIdentifier, objSession, &cautils.ScanInfo{})
|
||||
policyHandler.getResources(context.TODO(), policyIdentifier, objSession)
|
||||
}, "Cluster named .*eks.* without a cloud config panics on non-cluster scan !")
|
||||
|
||||
}
|
||||
|
||||
|
||||
@@ -1,25 +1,28 @@
|
||||
package policyhandler
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
apisv1 "github.com/kubescape/opa-utils/httpserver/apis/v1"
|
||||
|
||||
logger "github.com/kubescape/go-logger"
|
||||
"github.com/kubescape/go-logger/helpers"
|
||||
"github.com/kubescape/kubescape/v2/core/cautils"
|
||||
"github.com/kubescape/kubescape/v2/core/cautils/getter"
|
||||
apisv1 "github.com/kubescape/opa-utils/httpserver/apis/v1"
|
||||
"github.com/kubescape/opa-utils/reporthandling"
|
||||
"go.opentelemetry.io/otel"
|
||||
)
|
||||
|
||||
func (policyHandler *PolicyHandler) getPolicies(policyIdentifier []cautils.PolicyIdentifier, policiesAndResources *cautils.OPASessionObj) error {
|
||||
func (policyHandler *PolicyHandler) getPolicies(ctx context.Context, policyIdentifier []cautils.PolicyIdentifier, policiesAndResources *cautils.OPASessionObj) error {
|
||||
ctx, span := otel.Tracer("").Start(ctx, "policyHandler.getPolicies")
|
||||
defer span.End()
|
||||
logger.L().Info("Downloading/Loading policy definitions")
|
||||
|
||||
cautils.StartSpinner()
|
||||
defer cautils.StopSpinner()
|
||||
|
||||
policies, err := policyHandler.getScanPolicies(policyIdentifier)
|
||||
policies, err := policyHandler.getScanPolicies(ctx, policyIdentifier)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -34,7 +37,7 @@ func (policyHandler *PolicyHandler) getPolicies(policyIdentifier []cautils.Polic
|
||||
if err == nil {
|
||||
policiesAndResources.Exceptions = exceptionPolicies
|
||||
} else {
|
||||
logger.L().Error("failed to load exceptions", helpers.Error(err))
|
||||
logger.L().Ctx(ctx).Error("failed to load exceptions", helpers.Error(err))
|
||||
}
|
||||
|
||||
// get account configuration
|
||||
@@ -42,7 +45,7 @@ func (policyHandler *PolicyHandler) getPolicies(policyIdentifier []cautils.Polic
|
||||
if err == nil {
|
||||
policiesAndResources.RegoInputData.PostureControlInputs = controlsInputs
|
||||
} else {
|
||||
logger.L().Error(err.Error())
|
||||
logger.L().Ctx(ctx).Error(err.Error())
|
||||
}
|
||||
cautils.StopSpinner()
|
||||
|
||||
@@ -50,12 +53,13 @@ func (policyHandler *PolicyHandler) getPolicies(policyIdentifier []cautils.Polic
|
||||
return nil
|
||||
}
|
||||
|
||||
func (policyHandler *PolicyHandler) getScanPolicies(policyIdentifier []cautils.PolicyIdentifier) ([]reporthandling.Framework, error) {
|
||||
func (policyHandler *PolicyHandler) getScanPolicies(ctx context.Context, policyIdentifier []cautils.PolicyIdentifier) ([]reporthandling.Framework, error) {
|
||||
frameworks := []reporthandling.Framework{}
|
||||
|
||||
switch getScanKind(policyIdentifier) {
|
||||
case apisv1.KindFramework: // Download frameworks
|
||||
for _, rule := range policyIdentifier {
|
||||
logger.L().Debug("Downloading framework", helpers.String("framework", rule.Identifier))
|
||||
receivedFramework, err := policyHandler.getters.PolicyGetter.GetFramework(rule.Identifier)
|
||||
if err != nil {
|
||||
return frameworks, policyDownloadError(err)
|
||||
@@ -67,7 +71,7 @@ func (policyHandler *PolicyHandler) getScanPolicies(policyIdentifier []cautils.P
|
||||
frameworks = append(frameworks, *receivedFramework)
|
||||
cache := getter.GetDefaultPath(rule.Identifier + ".json")
|
||||
if err := getter.SaveInFile(receivedFramework, cache); err != nil {
|
||||
logger.L().Warning("failed to cache file", helpers.String("file", cache), helpers.Error(err))
|
||||
logger.L().Ctx(ctx).Warning("failed to cache file", helpers.String("file", cache), helpers.Error(err))
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -76,6 +80,7 @@ func (policyHandler *PolicyHandler) getScanPolicies(policyIdentifier []cautils.P
|
||||
var receivedControl *reporthandling.Control
|
||||
var err error
|
||||
for _, policy := range policyIdentifier {
|
||||
logger.L().Debug("Downloading control", helpers.String("control", policy.Identifier))
|
||||
receivedControl, err = policyHandler.getters.PolicyGetter.GetControl(policy.Identifier)
|
||||
if err != nil {
|
||||
return frameworks, policyDownloadError(err)
|
||||
@@ -85,7 +90,7 @@ func (policyHandler *PolicyHandler) getScanPolicies(policyIdentifier []cautils.P
|
||||
|
||||
cache := getter.GetDefaultPath(policy.Identifier + ".json")
|
||||
if err := getter.SaveInFile(receivedControl, cache); err != nil {
|
||||
logger.L().Warning("failed to cache file", helpers.String("file", cache), helpers.Error(err))
|
||||
logger.L().Ctx(ctx).Warning("failed to cache file", helpers.String("file", cache), helpers.Error(err))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package resourcehandler
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
@@ -22,7 +23,7 @@ type FileResourceHandler struct {
|
||||
registryAdaptors *RegistryAdaptors
|
||||
}
|
||||
|
||||
func NewFileResourceHandler(inputPatterns []string, registryAdaptors *RegistryAdaptors) *FileResourceHandler {
|
||||
func NewFileResourceHandler(_ context.Context, inputPatterns []string, registryAdaptors *RegistryAdaptors) *FileResourceHandler {
|
||||
k8sinterface.InitializeMapResourcesMock() // initialize the resource map
|
||||
return &FileResourceHandler{
|
||||
inputPatterns: inputPatterns,
|
||||
@@ -30,7 +31,7 @@ func NewFileResourceHandler(inputPatterns []string, registryAdaptors *RegistryAd
|
||||
}
|
||||
}
|
||||
|
||||
func (fileHandler *FileResourceHandler) GetResources(sessionObj *cautils.OPASessionObj, designator *armotypes.PortalDesignator) (*cautils.K8SResources, map[string]workloadinterface.IMetadata, *cautils.KSResources, error) {
|
||||
func (fileHandler *FileResourceHandler) GetResources(ctx context.Context, sessionObj *cautils.OPASessionObj, _ *armotypes.PortalDesignator) (*cautils.K8SResources, map[string]workloadinterface.IMetadata, *cautils.KSResources, error) {
|
||||
|
||||
//
|
||||
// build resources map
|
||||
@@ -47,7 +48,7 @@ func (fileHandler *FileResourceHandler) GetResources(sessionObj *cautils.OPASess
|
||||
cautils.StartSpinner()
|
||||
|
||||
for path := range fileHandler.inputPatterns {
|
||||
workloadIDToSource, workloads, err := getResourcesFromPath(fileHandler.inputPatterns[path])
|
||||
workloadIDToSource, workloads, err := getResourcesFromPath(ctx, fileHandler.inputPatterns[path])
|
||||
if err != nil {
|
||||
return nil, allResources, nil, err
|
||||
}
|
||||
@@ -78,7 +79,7 @@ func (fileHandler *FileResourceHandler) GetResources(sessionObj *cautils.OPASess
|
||||
|
||||
// Should Kubescape scan image related controls when scanning local files?
|
||||
// if err := fileHandler.registryAdaptors.collectImagesVulnerabilities(k8sResources, allResources, ksResources); err != nil {
|
||||
// logger.L().Warning("failed to collect images vulnerabilities", helpers.Error(err))
|
||||
// logger.L().Ctx(ctx).Warning("failed to collect images vulnerabilities", helpers.Error(err))
|
||||
// }
|
||||
|
||||
cautils.StopSpinner()
|
||||
@@ -87,8 +88,7 @@ func (fileHandler *FileResourceHandler) GetResources(sessionObj *cautils.OPASess
|
||||
return k8sResources, allResources, ksResources, nil
|
||||
}
|
||||
|
||||
func getResourcesFromPath(path string) (map[string]reporthandling.Source, []workloadinterface.IMetadata, error) {
|
||||
|
||||
func getResourcesFromPath(ctx context.Context, path string) (map[string]reporthandling.Source, []workloadinterface.IMetadata, error) {
|
||||
workloadIDToSource := make(map[string]reporthandling.Source, 0)
|
||||
workloads := []workloadinterface.IMetadata{}
|
||||
|
||||
@@ -116,7 +116,7 @@ func getResourcesFromPath(path string) (map[string]reporthandling.Source, []work
|
||||
}
|
||||
|
||||
// load resource from local file system
|
||||
sourceToWorkloads := cautils.LoadResourcesFromFiles(path, repoRoot)
|
||||
sourceToWorkloads := cautils.LoadResourcesFromFiles(ctx, path, repoRoot)
|
||||
|
||||
// update workloads and workloadIDToSource
|
||||
var warnIssued bool
|
||||
@@ -142,7 +142,7 @@ func getResourcesFromPath(path string) (map[string]reporthandling.Source, []work
|
||||
if gitRepo != nil {
|
||||
commitInfo, err := gitRepo.GetFileLastCommit(source)
|
||||
if err != nil && !warnIssued {
|
||||
logger.L().Warning("git scan skipped", helpers.Error(err))
|
||||
logger.L().Ctx(ctx).Warning("git scan skipped", helpers.Error(err))
|
||||
warnIssued = true // croak only once
|
||||
}
|
||||
|
||||
@@ -173,7 +173,7 @@ func getResourcesFromPath(path string) (map[string]reporthandling.Source, []work
|
||||
}
|
||||
|
||||
// load resources from helm charts
|
||||
helmSourceToWorkloads, helmSourceToChartName := cautils.LoadResourcesFromHelmCharts(path)
|
||||
helmSourceToWorkloads, helmSourceToChartName := cautils.LoadResourcesFromHelmCharts(ctx, path)
|
||||
for source, ws := range helmSourceToWorkloads {
|
||||
workloads = append(workloads, ws...)
|
||||
helmChartName := helmSourceToChartName[source]
|
||||
@@ -214,7 +214,7 @@ func getResourcesFromPath(path string) (map[string]reporthandling.Source, []work
|
||||
}
|
||||
|
||||
// Load resources from Kustomize directory
|
||||
kustomizeSourceToWorkloads, kustomizeDirectoryName := cautils.LoadResourcesFromKustomizeDirectory(path)
|
||||
kustomizeSourceToWorkloads, kustomizeDirectoryName := cautils.LoadResourcesFromKustomizeDirectory(ctx, path)
|
||||
|
||||
// update workloads and workloadIDToSource with workloads from Kustomize Directory
|
||||
for source, ws := range kustomizeSourceToWorkloads {
|
||||
@@ -254,6 +254,6 @@ func getResourcesFromPath(path string) (map[string]reporthandling.Source, []work
|
||||
return workloadIDToSource, workloads, nil
|
||||
}
|
||||
|
||||
func (fileHandler *FileResourceHandler) GetClusterAPIServerInfo() *version.Info {
|
||||
func (fileHandler *FileResourceHandler) GetClusterAPIServerInfo(_ context.Context) *version.Info {
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -13,6 +13,8 @@ import (
|
||||
"github.com/kubescape/opa-utils/reporthandling/apis"
|
||||
|
||||
"github.com/kubescape/k8s-interface/cloudsupport"
|
||||
cloudapis "github.com/kubescape/k8s-interface/cloudsupport/apis"
|
||||
cloudv1 "github.com/kubescape/k8s-interface/cloudsupport/v1"
|
||||
"github.com/kubescape/k8s-interface/k8sinterface"
|
||||
"github.com/kubescape/k8s-interface/workloadinterface"
|
||||
|
||||
@@ -27,6 +29,14 @@ import (
|
||||
"k8s.io/client-go/dynamic"
|
||||
)
|
||||
|
||||
type cloudResourceGetter func(string, string) (workloadinterface.IMetadata, error)
|
||||
|
||||
var cloudResourceGetterMapping = map[string]cloudResourceGetter{
|
||||
cloudapis.CloudProviderDescribeKind: cloudsupport.GetDescriptiveInfoFromCloudProvider,
|
||||
cloudapis.CloudProviderDescribeRepositoriesKind: cloudsupport.GetDescribeRepositoriesFromCloudProvider,
|
||||
cloudapis.CloudProviderListEntitiesForPoliciesKind: cloudsupport.GetListEntitiesForPoliciesFromCloudProvider,
|
||||
}
|
||||
|
||||
type K8sResourceHandler struct {
|
||||
k8s *k8sinterface.KubernetesApi
|
||||
hostSensorHandler hostsensorutils.IHostSensor
|
||||
@@ -45,7 +55,7 @@ func NewK8sResourceHandler(k8s *k8sinterface.KubernetesApi, fieldSelector IField
|
||||
}
|
||||
}
|
||||
|
||||
func (k8sHandler *K8sResourceHandler) GetResources(sessionObj *cautils.OPASessionObj, designator *armotypes.PortalDesignator) (*cautils.K8SResources, map[string]workloadinterface.IMetadata, *cautils.KSResources, error) {
|
||||
func (k8sHandler *K8sResourceHandler) GetResources(ctx context.Context, sessionObj *cautils.OPASessionObj, designator *armotypes.PortalDesignator) (*cautils.K8SResources, map[string]workloadinterface.IMetadata, *cautils.KSResources, error) {
|
||||
allResources := map[string]workloadinterface.IMetadata{}
|
||||
|
||||
// get k8s resources
|
||||
@@ -101,12 +111,12 @@ func (k8sHandler *K8sResourceHandler) GetResources(sessionObj *cautils.OPASessio
|
||||
hostResources := cautils.MapHostResources(ksResourceMap)
|
||||
// check that controls use host sensor resources
|
||||
if len(hostResources) > 0 {
|
||||
logger.L().Info("Requesting Host scanner data")
|
||||
cautils.StartSpinner()
|
||||
if sessionObj.Metadata.ScanMetadata.HostScanner {
|
||||
infoMap, err := k8sHandler.collectHostResources(allResources, ksResourceMap)
|
||||
logger.L().Info("Requesting Host scanner data")
|
||||
cautils.StartSpinner()
|
||||
infoMap, err := k8sHandler.collectHostResources(ctx, allResources, ksResourceMap)
|
||||
if err != nil {
|
||||
logger.L().Warning("failed to collect host scanner resources", helpers.Error(err))
|
||||
logger.L().Ctx(ctx).Warning("failed to collect host scanner resources", helpers.Error(err))
|
||||
cautils.SetInfoMapForResources(err.Error(), hostResources, sessionObj.InfoMap)
|
||||
} else if k8sHandler.hostSensorHandler == nil {
|
||||
// using hostSensor mock
|
||||
@@ -124,36 +134,74 @@ func (k8sHandler *K8sResourceHandler) GetResources(sessionObj *cautils.OPASessio
|
||||
}
|
||||
|
||||
if err := k8sHandler.collectRbacResources(allResources); err != nil {
|
||||
logger.L().Warning("failed to collect rbac resources", helpers.Error(err))
|
||||
logger.L().Ctx(ctx).Warning("failed to collect rbac resources", helpers.Error(err))
|
||||
}
|
||||
|
||||
cloudResources := cautils.MapCloudResources(ksResourceMap)
|
||||
|
||||
setMapNamespaceToNumOfResources(allResources, sessionObj)
|
||||
setMapNamespaceToNumOfResources(ctx, allResources, sessionObj)
|
||||
|
||||
// check that controls use cloud resources
|
||||
if len(cloudResources) > 0 {
|
||||
provider, err := getCloudProviderDescription(allResources, ksResourceMap)
|
||||
err := k8sHandler.collectCloudResources(ctx, sessionObj, allResources, ksResourceMap, cloudResources)
|
||||
if err != nil {
|
||||
cautils.SetInfoMapForResources(err.Error(), cloudResources, sessionObj.InfoMap)
|
||||
logger.L().Warning("failed to collect cloud data", helpers.Error(err))
|
||||
}
|
||||
if provider != "" {
|
||||
if sessionObj.Metadata != nil && sessionObj.Metadata.ContextMetadata.ClusterContextMetadata != nil {
|
||||
sessionObj.Metadata.ContextMetadata.ClusterContextMetadata.CloudProvider = provider
|
||||
}
|
||||
}
|
||||
|
||||
// api server info resource
|
||||
err = k8sHandler.collectAPIServerInfoResource(allResources, ksResourceMap)
|
||||
if err != nil {
|
||||
logger.L().Warning("failed to collect api server info resource", helpers.Error(err))
|
||||
logger.L().Debug("failed to collect cloud data", helpers.Error(err))
|
||||
}
|
||||
}
|
||||
|
||||
return k8sResourcesMap, allResources, ksResourceMap, nil
|
||||
}
|
||||
|
||||
func (k8sHandler *K8sResourceHandler) collectCloudResources(ctx context.Context, sessionObj *cautils.OPASessionObj, allResources map[string]workloadinterface.IMetadata, ksResourceMap *cautils.KSResources, cloudResources []string) error {
|
||||
var err error
|
||||
clusterName := cautils.ClusterName
|
||||
provider := cloudsupport.GetCloudProvider(clusterName)
|
||||
if provider == "" {
|
||||
return fmt.Errorf("failed to get cloud provider, cluster: %s", clusterName)
|
||||
}
|
||||
if sessionObj.Metadata != nil && sessionObj.Metadata.ContextMetadata.ClusterContextMetadata != nil {
|
||||
sessionObj.Metadata.ContextMetadata.ClusterContextMetadata.CloudProvider = provider
|
||||
}
|
||||
logger.L().Debug("cloud", helpers.String("cluster", clusterName), helpers.String("clusterName", clusterName), helpers.String("provider", provider))
|
||||
|
||||
for resourceKind, resourceGetter := range cloudResourceGetterMapping {
|
||||
if cloudResourceRequired(cloudResources, resourceKind) {
|
||||
logger.L().Debug("Collecting cloud data ", helpers.String("resourceKind", resourceKind))
|
||||
wl, err := resourceGetter(clusterName, provider)
|
||||
if err != nil {
|
||||
if !strings.Contains(err.Error(), cloudv1.NotSupportedMsg) {
|
||||
// Return error with useful info on how to configure credentials for getting cloud provider info
|
||||
logger.L().Debug("failed to get cloud data", helpers.String("resourceKind", resourceKind), helpers.Error(err))
|
||||
err = fmt.Errorf("failed to get %s descriptive information. Read more: https://hub.armosec.io/docs/kubescape-integration-with-cloud-providers", strings.ToUpper(provider))
|
||||
cautils.SetInfoMapForResources(err.Error(), cloudResources, sessionObj.InfoMap)
|
||||
}
|
||||
} else {
|
||||
allResources[wl.GetID()] = wl
|
||||
(*ksResourceMap)[fmt.Sprintf("%s/%s", wl.GetApiVersion(), wl.GetKind())] = []string{wl.GetID()}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// get api server info resource
|
||||
if cloudResourceRequired(cloudResources, string(cloudsupport.TypeApiServerInfo)) {
|
||||
err = k8sHandler.collectAPIServerInfoResource(allResources, ksResourceMap)
|
||||
if err != nil {
|
||||
logger.L().Ctx(ctx).Warning("failed to collect api server info resource", helpers.Error(err))
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func cloudResourceRequired(cloudResources []string, resource string) bool {
|
||||
for _, cresource := range cloudResources {
|
||||
if strings.Contains(cresource, resource) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (k8sHandler *K8sResourceHandler) collectAPIServerInfoResource(allResources map[string]workloadinterface.IMetadata, ksResourceMap *cautils.KSResources) error {
|
||||
clusterAPIServerInfo, err := k8sHandler.k8s.DiscoveryClient.ServerVersion()
|
||||
if err != nil {
|
||||
@@ -166,17 +214,17 @@ func (k8sHandler *K8sResourceHandler) collectAPIServerInfoResource(allResources
|
||||
return nil
|
||||
}
|
||||
|
||||
func (k8sHandler *K8sResourceHandler) GetClusterAPIServerInfo() *version.Info {
|
||||
func (k8sHandler *K8sResourceHandler) GetClusterAPIServerInfo(ctx context.Context) *version.Info {
|
||||
clusterAPIServerInfo, err := k8sHandler.k8s.DiscoveryClient.ServerVersion()
|
||||
if err != nil {
|
||||
logger.L().Error("failed to discover API server information", helpers.Error(err))
|
||||
logger.L().Ctx(ctx).Error("failed to discover API server information", helpers.Error(err))
|
||||
return nil
|
||||
}
|
||||
return clusterAPIServerInfo
|
||||
}
|
||||
|
||||
// set namespaceToNumOfResources map in report
|
||||
func setMapNamespaceToNumOfResources(allResources map[string]workloadinterface.IMetadata, sessionObj *cautils.OPASessionObj) {
|
||||
func setMapNamespaceToNumOfResources(ctx context.Context, allResources map[string]workloadinterface.IMetadata, sessionObj *cautils.OPASessionObj) {
|
||||
|
||||
mapNamespaceToNumberOfResources := make(map[string]int)
|
||||
for _, resource := range allResources {
|
||||
@@ -192,7 +240,7 @@ func setMapNamespaceToNumOfResources(allResources map[string]workloadinterface.I
|
||||
}
|
||||
}
|
||||
} else {
|
||||
logger.L().Warning(fmt.Sprintf("failed to get owner references. Resource %s will not be counted", obj.GetName()), helpers.Error(err))
|
||||
logger.L().Ctx(ctx).Warning(fmt.Sprintf("failed to get owner references. Resource %s will not be counted", obj.GetName()), helpers.Error(err))
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -276,9 +324,9 @@ func ConvertMapListToMeta(resourceMap []map[string]interface{}) []workloadinterf
|
||||
return workloads
|
||||
}
|
||||
|
||||
func (k8sHandler *K8sResourceHandler) collectHostResources(allResources map[string]workloadinterface.IMetadata, ksResourceMap *cautils.KSResources) (map[string]apis.StatusInfo, error) {
|
||||
func (k8sHandler *K8sResourceHandler) collectHostResources(ctx context.Context, allResources map[string]workloadinterface.IMetadata, ksResourceMap *cautils.KSResources) (map[string]apis.StatusInfo, error) {
|
||||
logger.L().Debug("Collecting host scanner resources")
|
||||
hostResources, infoMap, err := k8sHandler.hostSensorHandler.CollectResources()
|
||||
hostResources, infoMap, err := k8sHandler.hostSensorHandler.CollectResources(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -313,29 +361,6 @@ func (k8sHandler *K8sResourceHandler) collectRbacResources(allResources map[stri
|
||||
return nil
|
||||
}
|
||||
|
||||
func getCloudProviderDescription(allResources map[string]workloadinterface.IMetadata, ksResourceMap *cautils.KSResources) (string, error) {
|
||||
logger.L().Debug("Collecting cloud data")
|
||||
|
||||
clusterName := cautils.ClusterName
|
||||
|
||||
provider := cloudsupport.GetCloudProvider(clusterName)
|
||||
|
||||
if provider != "" {
|
||||
logger.L().Debug("cloud", helpers.String("cluster", clusterName), helpers.String("clusterName", clusterName), helpers.String("provider", provider))
|
||||
|
||||
wl, err := cloudsupport.GetDescriptiveInfoFromCloudProvider(clusterName, provider)
|
||||
if err != nil {
|
||||
// Return error with useful info on how to configure credentials for getting cloud provider info
|
||||
logger.L().Debug("failed to get descriptive information", helpers.Error(err))
|
||||
return provider, fmt.Errorf("failed to get %s descriptive information. Read more: https://hub.armosec.io/docs/kubescape-integration-with-cloud-providers", strings.ToUpper(provider))
|
||||
}
|
||||
allResources[wl.GetID()] = wl
|
||||
(*ksResourceMap)[fmt.Sprintf("%s/%s", wl.GetApiVersion(), wl.GetKind())] = []string{wl.GetID()}
|
||||
}
|
||||
return provider, nil
|
||||
|
||||
}
|
||||
|
||||
func (k8sHandler *K8sResourceHandler) pullWorkerNodesNumber() (int, error) {
|
||||
nodesList, err := k8sHandler.k8s.KubernetesClient.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})
|
||||
scheduableNodes := v1.NodeList{}
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package resourcehandler
|
||||
|
||||
import (
|
||||
"context"
|
||||
_ "embed"
|
||||
"encoding/json"
|
||||
"reflect"
|
||||
@@ -555,7 +556,7 @@ func TestSetMapNamespaceToNumOfResources(t *testing.T) {
|
||||
}
|
||||
|
||||
sessionObj := cautils.NewOPASessionObjMock()
|
||||
setMapNamespaceToNumOfResources(allResources, sessionObj)
|
||||
setMapNamespaceToNumOfResources(context.TODO(), allResources, sessionObj)
|
||||
expected := map[string]int{
|
||||
"kube-system": 1,
|
||||
"armo-system": 3,
|
||||
@@ -566,3 +567,13 @@ func TestSetMapNamespaceToNumOfResources(t *testing.T) {
|
||||
assert.NotContains(t, sessionObj.Metadata.ContextMetadata.ClusterContextMetadata.MapNamespaceToNumberOfResources, "clusterrole")
|
||||
assert.NotContains(t, sessionObj.Metadata.ContextMetadata.ClusterContextMetadata.MapNamespaceToNumberOfResources, "pod")
|
||||
}
|
||||
|
||||
func TestCloudResourceRequired(t *testing.T) {
|
||||
cloudResources := []string{"container.googleapis.com/v1/ClusterDescribe",
|
||||
"eks.amazonaws.com/v1/DescribeRepositories",
|
||||
"eks.amazonaws.com/v1/ListEntitiesForPolicies",
|
||||
"eks.amazonaws.com/v1/ClusterDescribe"}
|
||||
|
||||
assert.True(t, cloudResourceRequired(cloudResources, ClusterDescribe))
|
||||
assert.False(t, cloudResourceRequired(cloudResources, "ListRolePolicies"))
|
||||
}
|
||||
|
||||
@@ -12,6 +12,8 @@ import (
|
||||
|
||||
var (
|
||||
ClusterDescribe = "ClusterDescribe"
|
||||
DescribeRepositories = "DescribeRepositories"
|
||||
ListEntitiesForPolicies = "ListEntitiesForPolicies"
|
||||
KubeletConfiguration = "KubeletConfiguration"
|
||||
OsReleaseFile = "OsReleaseFile"
|
||||
KernelVersion = "KernelVersion"
|
||||
@@ -24,6 +26,7 @@ var (
|
||||
KubeProxyInfo = "KubeProxyInfo"
|
||||
ControlPlaneInfo = "ControlPlaneInfo"
|
||||
CloudProviderInfo = "CloudProviderInfo"
|
||||
CNIInfo = "CNIInfo"
|
||||
|
||||
MapResourceToApiGroup = map[string]string{
|
||||
KubeletConfiguration: "hostdata.kubescape.cloud/v1beta0",
|
||||
@@ -37,11 +40,15 @@ var (
|
||||
KubeProxyInfo: "hostdata.kubescape.cloud/v1beta0",
|
||||
ControlPlaneInfo: "hostdata.kubescape.cloud/v1beta0",
|
||||
CloudProviderInfo: "hostdata.kubescape.cloud/v1beta0",
|
||||
CNIInfo: "hostdata.kubescape.cloud/v1beta0",
|
||||
}
|
||||
MapResourceToApiGroupVuln = map[string][]string{
|
||||
ImageVulnerabilities: {"armo.vuln.images/v1", "image.vulnscan.com/v1"}}
|
||||
MapResourceToApiGroupCloud = map[string][]string{
|
||||
ClusterDescribe: {"container.googleapis.com/v1", "eks.amazonaws.com/v1", "management.azure.com/v1"}}
|
||||
ClusterDescribe: {"container.googleapis.com/v1", "eks.amazonaws.com/v1", "management.azure.com/v1"},
|
||||
DescribeRepositories: {"eks.amazonaws.com/v1"}, //TODO - add google and azure when they are supported
|
||||
ListEntitiesForPolicies: {"eks.amazonaws.com/v1"}, //TODO - add google and azure when they are supported
|
||||
}
|
||||
)
|
||||
|
||||
func isEmptyImgVulns(ksResourcesMap cautils.KSResources) bool {
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user