mirror of
https://github.com/kubescape/kubescape.git
synced 2026-02-14 18:09:55 +00:00
Compare commits
223 Commits
fix-comman
...
v2.0.398
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
a48d9be386 | ||
|
|
3c93c2c45c | ||
|
|
77e0a04c99 | ||
|
|
b8762b924c | ||
|
|
025e75213a | ||
|
|
1a3a58a309 | ||
|
|
19438e6143 | ||
|
|
284c8c737b | ||
|
|
3441a65290 | ||
|
|
773e43b1e1 | ||
|
|
ddc0b2daf2 | ||
|
|
596686602c | ||
|
|
5bb0c97f8f | ||
|
|
256db4abfb | ||
|
|
3546961a5e | ||
|
|
e6dc7c2367 | ||
|
|
07fa3b4589 | ||
|
|
d6ed4b1aca | ||
|
|
69846bb4c0 | ||
|
|
2e5ad85fe0 | ||
|
|
1025431d64 | ||
|
|
1a863473e7 | ||
|
|
28a44ac531 | ||
|
|
cf484c328b | ||
|
|
668514e08d | ||
|
|
dc45efb6ef | ||
|
|
6d3844f187 | ||
|
|
4d6e85d4c7 | ||
|
|
d336f4484c | ||
|
|
bf263d8d51 | ||
|
|
cc3cf1932c | ||
|
|
6a4dc79689 | ||
|
|
8c189f6e3c | ||
|
|
b309cfca7a | ||
|
|
c4b3ef5b80 | ||
|
|
aba978e94a | ||
|
|
a49781e9a8 | ||
|
|
3ba19f55f1 | ||
|
|
40a9b9406d | ||
|
|
d6b8f5862f | ||
|
|
09f13c05e1 | ||
|
|
b1c8872a29 | ||
|
|
22052f5869 | ||
|
|
afce43add6 | ||
|
|
4752364699 | ||
|
|
08e7108dc0 | ||
|
|
108a2d6dd8 | ||
|
|
2c28286bb1 | ||
|
|
79858b7ed7 | ||
|
|
bb2e83eb3b | ||
|
|
282a29b971 | ||
|
|
60b9edc463 | ||
|
|
0f9a5e3127 | ||
|
|
7c79c14363 | ||
|
|
fe84225252 | ||
|
|
56da8d8d92 | ||
|
|
f135e95d2c | ||
|
|
db34183fc1 | ||
|
|
8f3af71c84 | ||
|
|
116aee0c9c | ||
|
|
e5d44f741d | ||
|
|
f005cb7f80 | ||
|
|
9ae9d35ccb | ||
|
|
cb38a4e8a1 | ||
|
|
eb6d39be42 | ||
|
|
3160d74c42 | ||
|
|
5076c38482 | ||
|
|
73c55fe253 | ||
|
|
f48f81c0b5 | ||
|
|
81c1c29b7c | ||
|
|
874aa38f68 | ||
|
|
b9caaf5025 | ||
|
|
61c120de0e | ||
|
|
de3408bf57 | ||
|
|
8d32032ec1 | ||
|
|
42ed787f7b | ||
|
|
ccdba85b3c | ||
|
|
c59f7691dc | ||
|
|
cf87c2d30b | ||
|
|
b547814dec | ||
|
|
b476a72e04 | ||
|
|
4f6f85710a | ||
|
|
47c23de160 | ||
|
|
bc85844ec0 | ||
|
|
134d854722 | ||
|
|
e3522c19cc | ||
|
|
967fc3fe81 | ||
|
|
896a0699ec | ||
|
|
a53375204e | ||
|
|
b1392361f8 | ||
|
|
7b4fbffae2 | ||
|
|
34e7b9f2ad | ||
|
|
f0080bdeae | ||
|
|
0eb27389da | ||
|
|
2c5eed9ee2 | ||
|
|
2c1a5bd032 | ||
|
|
298f8346e9 | ||
|
|
1897c5a4ba | ||
|
|
57e435271e | ||
|
|
7e9b430347 | ||
|
|
ca5b3e626b | ||
|
|
3a404f29fa | ||
|
|
16073d6872 | ||
|
|
dce563d2f5 | ||
|
|
8d556a5b84 | ||
|
|
a61063e5b8 | ||
|
|
94973867db | ||
|
|
214c2dcae8 | ||
|
|
72b36bf012 | ||
|
|
4335e6ceac | ||
|
|
b5f92a7d54 | ||
|
|
41ec75d264 | ||
|
|
6d6ad1f487 | ||
|
|
3ac33d21ac | ||
|
|
04e4b37f6f | ||
|
|
3e5903de6a | ||
|
|
04ea0fe524 | ||
|
|
955d6751a9 | ||
|
|
30c43bff10 | ||
|
|
e009244566 | ||
|
|
3d3cd2c2d8 | ||
|
|
f5498371ec | ||
|
|
c3b95bed8c | ||
|
|
8ce7d6c0f6 | ||
|
|
e875f429a9 | ||
|
|
b6beff0488 | ||
|
|
60c69ac3f0 | ||
|
|
1fb9320421 | ||
|
|
9a176f6667 | ||
|
|
96ea9a9e42 | ||
|
|
e39fca0c11 | ||
|
|
2ec035005d | ||
|
|
b734b3aef0 | ||
|
|
0f5635f42d | ||
|
|
8557075b7c | ||
|
|
bc0f0e7087 | ||
|
|
8ce5f9aea3 | ||
|
|
050f9d3a4e | ||
|
|
a81bf0deb4 | ||
|
|
2059324c27 | ||
|
|
a09a0a1bca | ||
|
|
83712bb9f5 | ||
|
|
728ae47b9a | ||
|
|
2a9b272a14 | ||
|
|
8662deac43 | ||
|
|
e42644bbd8 | ||
|
|
07d30b6272 | ||
|
|
2a4f8543cc | ||
|
|
186b293cce | ||
|
|
2bfe72f39d | ||
|
|
f99f955223 | ||
|
|
ec56e69a3c | ||
|
|
3942583b1d | ||
|
|
a10b15ba4b | ||
|
|
5003cbd7a8 | ||
|
|
481a137c23 | ||
|
|
c3f7f0938d | ||
|
|
b1925fa38d | ||
|
|
d9f8a7a46f | ||
|
|
846a072bf9 | ||
|
|
5dd7bbd8a7 | ||
|
|
e1773acf24 | ||
|
|
03a0f97669 | ||
|
|
917a3f41e8 | ||
|
|
3c8da1b299 | ||
|
|
c61c7edbd0 | ||
|
|
53402d9a1c | ||
|
|
de9278b388 | ||
|
|
4fef6200f8 | ||
|
|
81771b7bd7 | ||
|
|
2fee77c42c | ||
|
|
968ecdb31d | ||
|
|
af7b36a88b | ||
|
|
6ad58d38e2 | ||
|
|
681b4ce155 | ||
|
|
9d21ac1b16 | ||
|
|
2b3fcca7e8 | ||
|
|
af8e786ab5 | ||
|
|
c8df1b8f1f | ||
|
|
4f921ddf6f | ||
|
|
4f5839870b | ||
|
|
c0d7f51d6c | ||
|
|
a81d770360 | ||
|
|
f64d5eab50 | ||
|
|
d773397fe9 | ||
|
|
2e30995bfc | ||
|
|
17a2547f18 | ||
|
|
87a5cd66c8 | ||
|
|
9436ace64f | ||
|
|
fde00f6bd8 | ||
|
|
04a72a069a | ||
|
|
e2dcb5bc15 | ||
|
|
c7040a257c | ||
|
|
602dc00c65 | ||
|
|
0339691571 | ||
|
|
9e1f3ec131 | ||
|
|
b8589819dc | ||
|
|
a3e87f4c01 | ||
|
|
21ab5a602e | ||
|
|
5d97d7b4b2 | ||
|
|
d8d7d0b372 | ||
|
|
b8323d41fc | ||
|
|
d0b5314201 | ||
|
|
547e36e73f | ||
|
|
e593a772cb | ||
|
|
4da09529b6 | ||
|
|
de375992e8 | ||
|
|
0bc4a29881 | ||
|
|
9575c92713 | ||
|
|
cf277874eb | ||
|
|
746e060402 | ||
|
|
dd3a7c816e | ||
|
|
814bc3ab2c | ||
|
|
dbaf6761df | ||
|
|
580e45827d | ||
|
|
f3b8de9d1f | ||
|
|
6e9a2f55fd | ||
|
|
dd7a8fd0c1 | ||
|
|
3373b728b7 | ||
|
|
6ec974f996 | ||
|
|
ebf1486a7d | ||
|
|
4d954b2ab0 | ||
|
|
4d155a6b4f |
54
.github/workflows/01-golang-lint.yaml
vendored
Normal file
54
.github/workflows/01-golang-lint.yaml
vendored
Normal file
@@ -0,0 +1,54 @@
|
||||
name: golangci-lint
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- dev
|
||||
pull_request:
|
||||
types: [ edited, opened, synchronize, reopened ]
|
||||
branches: [ master, dev ]
|
||||
paths-ignore:
|
||||
- '**.yaml'
|
||||
- '**.md'
|
||||
permissions:
|
||||
contents: read
|
||||
# Optional: allow read access to pull request. Use with `only-new-issues` option.
|
||||
pull-requests: read
|
||||
jobs:
|
||||
golangci:
|
||||
name: lint
|
||||
runs-on: ubuntu-20.04
|
||||
steps:
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: 1.18
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
submodules: recursive
|
||||
- name: Install libgit2
|
||||
run: make libgit2
|
||||
- name: golangci-lint
|
||||
uses: golangci/golangci-lint-action@v3
|
||||
with:
|
||||
# Optional: version of golangci-lint to use in form of v1.2 or v1.2.3 or `latest` to use the latest version
|
||||
version: latest
|
||||
|
||||
# Optional: working directory, useful for monorepos
|
||||
# working-directory: somedir
|
||||
|
||||
# Optional: golangci-lint command line arguments.
|
||||
# args: --issues-exit-code=0
|
||||
args: --timeout 10m --build-tags=static
|
||||
#--new-from-rev dev
|
||||
|
||||
# Optional: show only new issues if it's a pull request. The default value is `false`.
|
||||
only-new-issues: true
|
||||
|
||||
# Optional: if set to true then the all caching functionality will be complete disabled,
|
||||
# takes precedence over all other caching options.
|
||||
# skip-cache: true
|
||||
|
||||
# Optional: if set to true then the action don't cache or restore ~/go/pkg.
|
||||
# skip-pkg-cache: true
|
||||
|
||||
# Optional: if set to true then the action don't cache or restore ~/.cache/go-build.
|
||||
# skip-build-cache: true
|
||||
29
.github/workflows/build-image.yaml
vendored
29
.github/workflows/build-image.yaml
vendored
@@ -26,14 +26,24 @@ on:
|
||||
type: boolean
|
||||
description: 'support amd64/arm64'
|
||||
|
||||
secrets:
|
||||
QUAYIO_REGISTRY_USERNAME:
|
||||
required: true
|
||||
QUAYIO_REGISTRY_PASSWORD:
|
||||
required: true
|
||||
|
||||
jobs:
|
||||
check-secret:
|
||||
name: check if QUAYIO_REGISTRY_USERNAME & QUAYIO_REGISTRY_PASSWORD is set in github secrets
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
is-secret-set: ${{ steps.check-secret-set.outputs.is-secret-set }}
|
||||
steps:
|
||||
- name: Check whether unity activation requests should be done
|
||||
id: check-secret-set
|
||||
env:
|
||||
QUAYIO_REGISTRY_USERNAME: ${{ secrets.QUAYIO_REGISTRY_USERNAME }}
|
||||
QUAYIO_REGISTRY_PASSWORD: ${{ secrets.QUAYIO_REGISTRY_PASSWORD }}
|
||||
run: |
|
||||
echo "is-secret-set=${{ env.QUAYIO_REGISTRY_USERNAME != '' && env.QUAYIO_REGISTRY_PASSWORD != '' }}" >> $GITHUB_OUTPUT
|
||||
|
||||
build-image:
|
||||
needs: [check-secret]
|
||||
if: needs.check-secret.outputs.is-secret-set == 'true'
|
||||
name: Build image and upload to registry
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
@@ -61,10 +71,10 @@ jobs:
|
||||
- name: Build and push image
|
||||
if: ${{ inputs.support_platforms }}
|
||||
run: docker buildx build . --file build/Dockerfile --tag ${{ inputs.image_name }}:${{ inputs.image_tag }} --tag ${{ inputs.image_name }}:latest --build-arg image_version=${{ inputs.image_tag }} --build-arg client=${{ inputs.client }} --push --platform linux/amd64,linux/arm64
|
||||
|
||||
|
||||
- name: Build and push image without amd64/arm64 support
|
||||
if: ${{ !inputs.support_platforms }}
|
||||
run: docker buildx build . --file build/Dockerfile --tag ${{ inputs.image_name }}:${{ inputs.image_tag }} --tag ${{ inputs.image_name }}:latest --build-arg image_version=${{ inputs.image_tag }} --build-arg client=${{ inputs.client }} --push
|
||||
run: docker buildx build . --file build/Dockerfile --tag ${{ inputs.image_name }}:${{ inputs.image_tag }} --tag ${{ inputs.image_name }}:latest --build-arg image_version=${{ inputs.image_tag }} --build-arg client=${{ inputs.client }} --push
|
||||
|
||||
- name: Install cosign
|
||||
uses: sigstore/cosign-installer@main
|
||||
@@ -75,6 +85,5 @@ jobs:
|
||||
env:
|
||||
COSIGN_EXPERIMENTAL: "true"
|
||||
run: |
|
||||
cosign sign --force ${{ inputs.image_name }}:latest
|
||||
cosign sign --force ${{ inputs.image_name }}:${{ inputs.image_tag }}
|
||||
cosign sign --force ${{ inputs.image_name }}
|
||||
|
||||
|
||||
44
.github/workflows/build.yaml
vendored
44
.github/workflows/build.yaml
vendored
@@ -4,7 +4,6 @@ on:
|
||||
push:
|
||||
branches: [ master ]
|
||||
paths-ignore:
|
||||
# Do not run the pipeline if only Markdown files changed
|
||||
- '**.md'
|
||||
jobs:
|
||||
test:
|
||||
@@ -29,7 +28,7 @@ jobs:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
strategy:
|
||||
matrix:
|
||||
os: [ubuntu-latest, macos-latest, windows-latest]
|
||||
os: [ubuntu-20.04, macos-latest, windows-latest]
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
@@ -38,7 +37,7 @@ jobs:
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: 1.18
|
||||
go-version: 1.19
|
||||
|
||||
- name: Install MSYS2 & libgit2 (Windows)
|
||||
shell: cmd
|
||||
@@ -56,8 +55,8 @@ jobs:
|
||||
CGO_ENABLED: 1
|
||||
run: python3 --version && python3 build.py
|
||||
|
||||
- name: Upload release binaries
|
||||
id: upload-release-asset
|
||||
- name: Upload release binaries (Windows / MacOS)
|
||||
id: upload-release-asset-win-macos
|
||||
uses: actions/upload-release-asset@v1
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
@@ -66,9 +65,22 @@ jobs:
|
||||
asset_path: build/${{ matrix.os }}/kubescape
|
||||
asset_name: kubescape-${{ matrix.os }}
|
||||
asset_content_type: application/octet-stream
|
||||
if: matrix.os != 'ubuntu-20.04'
|
||||
|
||||
- name: Upload release hash
|
||||
id: upload-release-hash
|
||||
- name: Upload release binaries (Linux)
|
||||
id: upload-release-asset-linux
|
||||
uses: actions/upload-release-asset@v1
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
upload_url: ${{ needs.create-release.outputs.upload_url }}
|
||||
asset_path: build/ubuntu-latest/kubescape
|
||||
asset_name: kubescape-ubuntu-latest
|
||||
asset_content_type: application/octet-stream
|
||||
if: matrix.os == 'ubuntu-20.04'
|
||||
|
||||
- name: Upload release hash (Windows / MacOS)
|
||||
id: upload-release-hash-win-macos
|
||||
uses: actions/upload-release-asset@v1
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
@@ -77,15 +89,27 @@ jobs:
|
||||
asset_path: build/${{ matrix.os }}/kubescape.sha256
|
||||
asset_name: kubescape-${{ matrix.os }}-sha256
|
||||
asset_content_type: application/octet-stream
|
||||
|
||||
if: matrix.os != 'ubuntu-20.04'
|
||||
|
||||
- name: Upload release hash (Linux)
|
||||
id: upload-release-hash-linux
|
||||
uses: actions/upload-release-asset@v1
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
upload_url: ${{ needs.create-release.outputs.upload_url }}
|
||||
asset_path: build/ubuntu-latest/kubescape.sha256
|
||||
asset_name: kubescape-ubuntu-latest-sha256
|
||||
asset_content_type: application/octet-stream
|
||||
if: matrix.os == 'ubuntu-20.04'
|
||||
|
||||
publish-image:
|
||||
if: ${{ github.repository == 'kubescape/kubescape' }} # TODO
|
||||
uses: ./.github/workflows/build-image.yaml
|
||||
needs: create-release
|
||||
with:
|
||||
client: "image-release"
|
||||
image_name: "quay.io/${{ github.repository_owner }}/kubescape"
|
||||
image_tag: "v2.0.${{ github.run_number }}"
|
||||
support_platforms: false
|
||||
support_platforms: true
|
||||
cosign: true
|
||||
secrets: inherit
|
||||
|
||||
21
.github/workflows/build_dev.yaml
vendored
21
.github/workflows/build_dev.yaml
vendored
@@ -13,14 +13,13 @@ jobs:
|
||||
release: "v2.0.${{ github.run_number }}"
|
||||
client: test
|
||||
|
||||
publish-dev-image:
|
||||
if: ${{ github.repository == 'kubescape/kubescape' }} # TODO
|
||||
uses: ./.github/workflows/build-image.yaml
|
||||
needs: test
|
||||
with:
|
||||
client: "image-dev"
|
||||
image_name: "quay.io/${{ github.repository_owner }}/kubescape"
|
||||
image_tag: "dev-v2.0.${{ github.run_number }}"
|
||||
support_platforms: false
|
||||
cosign: true
|
||||
secrets: inherit
|
||||
# publish-dev-image:
|
||||
# uses: ./.github/workflows/build-image.yaml
|
||||
# needs: test
|
||||
# with:
|
||||
# client: "image-dev"
|
||||
# image_name: "quay.io/${{ github.repository_owner }}/kubescape"
|
||||
# image_tag: "dev-v2.0.${{ github.run_number }}"
|
||||
# support_platforms: true
|
||||
# cosign: true
|
||||
# secrets: inherit
|
||||
|
||||
2
.github/workflows/release.yaml
vendored
2
.github/workflows/release.yaml
vendored
@@ -38,4 +38,4 @@ jobs:
|
||||
release_name: ${{ inputs.release_name }}
|
||||
draft: ${{ inputs.draft }}
|
||||
prerelease: false
|
||||
|
||||
|
||||
|
||||
21
.github/workflows/test.yaml
vendored
21
.github/workflows/test.yaml
vendored
@@ -19,14 +19,14 @@ jobs:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
strategy:
|
||||
matrix:
|
||||
os: [ubuntu-latest, macos-latest, windows-latest]
|
||||
os: [ubuntu-20.04, macos-latest, windows-latest]
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
submodules: recursive
|
||||
|
||||
- name: Cache Go modules (Linux)
|
||||
if: matrix.os == 'ubuntu-latest'
|
||||
if: matrix.os == 'ubuntu-20.04'
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: |
|
||||
@@ -61,7 +61,7 @@ jobs:
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: 1.18
|
||||
go-version: 1.19
|
||||
|
||||
- name: Install MSYS2 & libgit2 (Windows)
|
||||
shell: cmd
|
||||
@@ -73,10 +73,10 @@ jobs:
|
||||
if: matrix.os != 'windows-latest'
|
||||
|
||||
- name: Test core pkg
|
||||
run: go test -tags=static -v ./...
|
||||
run: go test "-tags=static,gitenabled" -v ./...
|
||||
|
||||
- name: Test httphandler pkg
|
||||
run: cd httphandler && go test -tags=static -v ./...
|
||||
run: cd httphandler && go test "-tags=static,gitenabled" -v ./...
|
||||
|
||||
- name: Build
|
||||
env:
|
||||
@@ -85,9 +85,16 @@ jobs:
|
||||
CGO_ENABLED: 1
|
||||
run: python3 --version && python3 build.py
|
||||
|
||||
- name: Smoke Testing
|
||||
- name: Smoke Testing (Windows / MacOS)
|
||||
env:
|
||||
RELEASE: ${{ inputs.release }}
|
||||
KUBESCAPE_SKIP_UPDATE_CHECK: "true"
|
||||
run: python3 smoke_testing/init.py ${PWD}/build/${{ matrix.os }}/kubescape
|
||||
|
||||
if: matrix.os != 'ubuntu-20.04'
|
||||
|
||||
- name: Smoke Testing (Linux)
|
||||
env:
|
||||
RELEASE: ${{ inputs.release }}
|
||||
KUBESCAPE_SKIP_UPDATE_CHECK: "true"
|
||||
run: python3 smoke_testing/init.py ${PWD}/build/ubuntu-latest/kubescape
|
||||
if: matrix.os == 'ubuntu-20.04'
|
||||
|
||||
3
.gitignore
vendored
3
.gitignore
vendored
@@ -5,4 +5,5 @@
|
||||
*.pyc*
|
||||
.idea
|
||||
.history
|
||||
ca.srl
|
||||
ca.srl
|
||||
*.out
|
||||
|
||||
57
.golangci.yml
Normal file
57
.golangci.yml
Normal file
@@ -0,0 +1,57 @@
|
||||
linters-settings:
|
||||
govet:
|
||||
check-shadowing: true
|
||||
dupl:
|
||||
threshold: 200
|
||||
goconst:
|
||||
min-len: 3
|
||||
min-occurrences: 2
|
||||
gocognit:
|
||||
min-complexity: 65
|
||||
|
||||
linters:
|
||||
enable:
|
||||
- gosec
|
||||
- staticcheck
|
||||
- nolintlint
|
||||
- gofmt
|
||||
- unused
|
||||
- govet
|
||||
- bodyclose
|
||||
- typecheck
|
||||
- goimports
|
||||
- ineffassign
|
||||
- gosimple
|
||||
disable:
|
||||
# temporarily disabled
|
||||
- varcheck
|
||||
- errcheck
|
||||
- dupl
|
||||
- gocritic
|
||||
- gocognit
|
||||
- nakedret
|
||||
- revive
|
||||
- stylecheck
|
||||
- unconvert
|
||||
- unparam
|
||||
#- forbidigo # <- see later
|
||||
# should remain disabled
|
||||
- deadcode # deprecated linter
|
||||
- maligned
|
||||
- lll
|
||||
- gochecknoinits
|
||||
- gochecknoglobals
|
||||
issues:
|
||||
exclude-rules:
|
||||
- linters:
|
||||
- revive
|
||||
text: "var-naming"
|
||||
- linters:
|
||||
- revive
|
||||
text: "type name will be used as (.+?) by other packages, and that stutters"
|
||||
- linters:
|
||||
- stylecheck
|
||||
text: "ST1003"
|
||||
run:
|
||||
skip-dirs:
|
||||
- git2go
|
||||
@@ -1,127 +1,3 @@
|
||||
# Contributor Covenant Code of Conduct
|
||||
## Code of Conduct
|
||||
|
||||
## Our Pledge
|
||||
|
||||
We as members, contributors, and leaders pledge to make participation in our
|
||||
community a harassment-free experience for everyone, regardless of age, body
|
||||
size, visible or invisible disability, ethnicity, sex characteristics, gender
|
||||
identity and expression, level of experience, education, socio-economic status,
|
||||
nationality, personal appearance, race, religion, or sexual identity
|
||||
and orientation.
|
||||
|
||||
We pledge to act and interact in ways that contribute to an open, welcoming,
|
||||
diverse, inclusive, and healthy community.
|
||||
|
||||
## Our Standards
|
||||
|
||||
Examples of behavior that contributes to a positive environment for our
|
||||
community include:
|
||||
|
||||
* Demonstrating empathy and kindness toward other people
|
||||
* Being respectful of differing opinions, viewpoints, and experiences
|
||||
* Giving and gracefully accepting constructive feedback
|
||||
* Accepting responsibility and apologizing to those affected by our mistakes,
|
||||
and learning from the experience
|
||||
* Focusing on what is best not just for us as individuals, but for the
|
||||
overall community
|
||||
|
||||
Examples of unacceptable behavior include:
|
||||
|
||||
* The use of sexualized language or imagery, and sexual attention or
|
||||
advances of any kind
|
||||
* Trolling, insulting or derogatory comments, and personal or political attacks
|
||||
* Public or private harassment
|
||||
* Publishing others' private information, such as a physical or email
|
||||
address, without their explicit permission
|
||||
* Other conduct which could reasonably be considered inappropriate in a
|
||||
professional setting
|
||||
|
||||
## Enforcement Responsibilities
|
||||
|
||||
Community leaders are responsible for clarifying and enforcing our standards of
|
||||
acceptable behavior and will take appropriate and fair corrective action in
|
||||
response to any behavior that they deem inappropriate, threatening, offensive,
|
||||
or harmful.
|
||||
|
||||
Community leaders have the right and responsibility to remove, edit, or reject
|
||||
comments, commits, code, wiki edits, issues, and other contributions that are
|
||||
not aligned to this Code of Conduct, and will communicate reasons for moderation
|
||||
decisions when appropriate.
|
||||
|
||||
## Scope
|
||||
|
||||
This Code of Conduct applies within all community spaces, and also applies when
|
||||
an individual is officially representing the community in public spaces.
|
||||
Examples of representing our community include using an official e-mail address,
|
||||
posting via an official social media account, or acting as an appointed
|
||||
representative at an online or offline event.
|
||||
|
||||
## Enforcement
|
||||
|
||||
Instances of abusive, harassing, or otherwise unacceptable behavior may be
|
||||
reported to the community leaders responsible for enforcement [here](mailto:ben@armosec.io).
|
||||
All complaints will be reviewed and investigated promptly and fairly.
|
||||
|
||||
All community leaders are obligated to respect the privacy and security of the
|
||||
reporter of any incident.
|
||||
|
||||
## Enforcement Guidelines
|
||||
|
||||
Community leaders will follow these Community Impact Guidelines in determining
|
||||
the consequences for any action they deem in violation of this Code of Conduct:
|
||||
|
||||
### 1. Correction
|
||||
|
||||
**Community Impact**: Use of inappropriate language or other behavior deemed
|
||||
unprofessional or unwelcome in the community.
|
||||
|
||||
**Consequence**: A private, written warning from community leaders, providing
|
||||
clarity around the nature of the violation and an explanation of why the
|
||||
behavior was inappropriate. A public apology may be requested.
|
||||
|
||||
### 2. Warning
|
||||
|
||||
**Community Impact**: A violation through a single incident or series
|
||||
of actions.
|
||||
|
||||
**Consequence**: A warning with consequences for continued behavior. No
|
||||
interaction with the people involved, including unsolicited interaction with
|
||||
those enforcing the Code of Conduct, for a specified period of time. This
|
||||
includes avoiding interactions in community spaces as well as external channels
|
||||
like social media. Violating these terms may lead to a temporary or
|
||||
permanent ban.
|
||||
|
||||
### 3. Temporary Ban
|
||||
|
||||
**Community Impact**: A serious violation of community standards, including
|
||||
sustained inappropriate behavior.
|
||||
|
||||
**Consequence**: A temporary ban from any sort of interaction or public
|
||||
communication with the community for a specified period of time. No public or
|
||||
private interaction with the people involved, including unsolicited interaction
|
||||
with those enforcing the Code of Conduct, is allowed during this period.
|
||||
Violating these terms may lead to a permanent ban.
|
||||
|
||||
### 4. Permanent Ban
|
||||
|
||||
**Community Impact**: Demonstrating a pattern of violation of community
|
||||
standards, including sustained inappropriate behavior, harassment of an
|
||||
individual, or aggression toward or disparagement of classes of individuals.
|
||||
|
||||
**Consequence**: A permanent ban from any sort of public interaction within
|
||||
the community.
|
||||
|
||||
## Attribution
|
||||
|
||||
This Code of Conduct is adapted from the [Contributor Covenant][homepage],
|
||||
version 2.0, available at
|
||||
https://www.contributor-covenant.org/version/2/0/code_of_conduct.html.
|
||||
|
||||
Community Impact Guidelines were inspired by [Mozilla's code of conduct
|
||||
enforcement ladder](https://github.com/mozilla/diversity).
|
||||
|
||||
[homepage]: https://www.contributor-covenant.org
|
||||
|
||||
For answers to common questions about this code of conduct, see the FAQ at
|
||||
https://www.contributor-covenant.org/faq. Translations are available at
|
||||
https://www.contributor-covenant.org/translations.
|
||||
The Kubescape project follows the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/master/code-of-conduct.md).
|
||||
|
||||
@@ -1,10 +1,11 @@
|
||||
# Maintainers
|
||||
|
||||
The following table lists Kubescape project maintainers
|
||||
The following table lists the Kubescape project maintainers:
|
||||
|
||||
| Name | GitHub | Email | Organization | Role | Added/Renewed On |
|
||||
| --- | --- | --- | --- | --- | --- |
|
||||
| [Ben Hirschberg](https://www.linkedin.com/in/benyamin-ben-hirschberg-66141890) | [@slashben](https://github.com/slashben) | ben@armosec.io | [ARMO](https://www.armosec.io/) | VP R&D | 2021-09-01 |
|
||||
| [Rotem Refael](https://www.linkedin.com/in/rotem-refael) | [@rotemamsa](https://github.com/rotemamsa) | rrefael@armosec.io | [ARMO](https://www.armosec.io/) | Team Leader | 2021-10-11 |
|
||||
| [David Wertenteil](https://www.linkedin.com/in/david-wertenteil-0ba277b9) | [@dwertent](https://github.com/dwertent) | dwertent@armosec.io | [ARMO](https://www.armosec.io/) | Kubescape CLI Developer | 2021-09-01 |
|
||||
| [Bezalel Brandwine](https://www.linkedin.com/in/bezalel-brandwine) | [@Bezbran](https://github.com/Bezbran) | bbrandwine@armosec.io | [ARMO](https://www.armosec.io/) | Kubescape SaaS Developer | 2021-09-01 |
|
||||
| Name | GitHub | Organization | Added/Renewed On |
|
||||
| --- | --- | --- | --- |
|
||||
| [Ben Hirschberg](https://www.linkedin.com/in/benyamin-ben-hirschberg-66141890) | [@slashben](https://github.com/slashben) | [ARMO](https://www.armosec.io/) | 2021-09-01 |
|
||||
| [Rotem Refael](https://www.linkedin.com/in/rotem-refael) | [@rotemamsa](https://github.com/rotemamsa) | [ARMO](https://www.armosec.io/) | 2021-10-11 |
|
||||
| [David Wertenteil](https://www.linkedin.com/in/david-wertenteil-0ba277b9) | [@dwertent](https://github.com/dwertent) | [ARMO](https://www.armosec.io/) | 2021-09-01 |
|
||||
| [Bezalel Brandwine](https://www.linkedin.com/in/bezalel-brandwine) | [@Bezbran](https://github.com/Bezbran) | [ARMO](https://www.armosec.io/) | 2021-09-01 |
|
||||
| [Craig Box](https://www.linkedin.com/in/crbnz/) | [@craigbox](https://github.com/craigbox) | [ARMO](https://www.armosec.io/) | 2022-10-31 |
|
||||
|
||||
2
Makefile
2
Makefile
@@ -11,7 +11,7 @@ libgit2:
|
||||
cd git2go; make install-static
|
||||
|
||||
# go build tags
|
||||
TAGS = "static"
|
||||
TAGS = "gitenabled,static"
|
||||
|
||||
build:
|
||||
go build -v -tags=$(TAGS) .
|
||||
|
||||
45
README.md
45
README.md
@@ -11,11 +11,11 @@
|
||||
:sunglasses: [Want to contribute?](#being-a-part-of-the-team) :innocent:
|
||||
|
||||
|
||||
Kubescape is a K8s open-source tool providing a Kubernetes single pane of glass, including risk analysis, security compliance, RBAC visualizer, and image vulnerability scanning.
|
||||
Kubescape scans K8s clusters, YAML files, and HELM charts, detecting misconfigurations according to multiple frameworks (such as the [NSA-CISA](https://www.armosec.io/blog/kubernetes-hardening-guidance-summary-by-armo/?utm_source=github&utm_medium=repository), [MITRE ATT&CK®](https://www.microsoft.com/security/blog/2021/03/23/secure-containerized-environments-with-updated-threat-matrix-for-kubernetes/)), software vulnerabilities, and RBAC (role-based-access-control) violations at early stages of the CI/CD pipeline, calculates risk score instantly and shows risk trends over time.
|
||||
Kubescape is an open-source Kubernetes security platform. A single pane of glass access to view risk analysis, security compliance, RBAC visualization, and image vulnerability scanning.
|
||||
Kubescape scans Kubernetes clusters, YAML files, and Helm charts. It detects misconfigurations according to multiple frameworks (such as [NSA-CISA](https://www.armosec.io/blog/kubernetes-hardening-guidance-summary-by-armo/?utm_source=github&utm_medium=repository), [MITRE ATT&CK®](https://www.microsoft.com/security/blog/2021/03/23/secure-containerized-environments-with-updated-threat-matrix-for-kubernetes/) and [CIS Benchmark](https://www.armosec.io/blog/cis-kubernetes-benchmark-framework-scanning-tools-comparison/?utm_source=github&utm_medium=repository)). Kubescape also helps you find software vulnerabilities, and RBAC (role-based-access-control) violations at early stages of the CI/CD pipeline. It calculates your risk score instantly and shows risk trends over time.
|
||||
|
||||
It has become one of the fastest-growing Kubernetes tools among developers due to its easy-to-use CLI interface, flexible output formats, and automated scanning capabilities, saving Kubernetes users and admins precious time, effort, and resources.
|
||||
Kubescape integrates natively with other DevOps tools, including Jenkins, CircleCI, Github workflows, Prometheus, and Slack, and supports multi-cloud K8s deployments like EKS, GKE, and AKS.
|
||||
Kubescape is one of the fastest-growing Kubernetes security tools among developers. It saves Kubernetes users and admins precious time, effort, and resources with its easy-to-use CLI interface, flexible output formats, and automated scanning capabilities.
|
||||
Kubescape integrates natively with other DevOps tools, including Jenkins, CircleCI, Github workflows, Prometheus, and Slack. It supports multi-cloud Kubernetes deployments like EKS, GKE, and AKS.
|
||||
|
||||
</br>
|
||||
|
||||
@@ -52,6 +52,9 @@ kubescape scan --enable-host-scan --verbose
|
||||
</br>
|
||||
|
||||
## Architecture in short
|
||||
|
||||
[Component architecture](docs/architecture.drawio.svg)
|
||||
|
||||
### [CLI](#kubescape-cli)
|
||||
<div align="center">
|
||||
<img src="docs/ks-cli-arch.png" width="300" alt="cli-diagram">
|
||||
@@ -69,12 +72,14 @@ kubescape scan --enable-host-scan --verbose
|
||||
# Being a part of the team
|
||||
|
||||
## Community
|
||||
We invite you to our community! We are excited about this project and want to return the love we get.
|
||||
You are in vited to our community! We are excited about this project and want to return the love we get.
|
||||
|
||||
We hold community meetings in [Zoom](https://us02web.zoom.us/j/84020231442) on the first Tuesday of every month at 14:00 GMT! :sunglasses:
|
||||
We hold community meetings on [Zoom](https://us02web.zoom.us/j/84020231442) on the first Tuesday of every month at 14:00 GMT! :sunglasses:
|
||||
|
||||
Please make sure that you follow our [Code Of Conduct](https://github.com/kubescape/kubescape/blob/master/CODE_OF_CONDUCT.md).
|
||||
|
||||
## Contributions
|
||||
[Want to contribute?](https://github.com/kubescape/kubescape/blob/master/CONTRIBUTING.md) Want to discuss something? Have an issue? Please make sure that you follow our [Code Of Conduct](https://github.com/kubescape/kubescape/blob/master/CODE_OF_CONDUCT.md) .
|
||||
Want to discuss something? Have an issue? [Want to contribute?](https://github.com/kubescape/kubescape/blob/master/CONTRIBUTING.md)
|
||||
|
||||
* Feel free to pick a task from the [issues](https://github.com/kubescape/kubescape/issues?q=is%3Aissue+is%3Aopen+label%3A%22open+for+contribution%22), [roadmap](docs/roadmap.md) or suggest a feature of your own. [Contact us](MAINTAINERS.md) directly for more information :)
|
||||
* [Open an issue](https://github.com/kubescape/kubescape/issues/new/choose) , we are trying to respond within 48 hours
|
||||
@@ -220,6 +225,8 @@ kubescape scan *.yaml
|
||||
```
|
||||
|
||||
#### Scan Kubernetes manifest files from a git repository
|
||||
|
||||
```
|
||||
kubescape scan https://github.com/kubescape/kubescape
|
||||
```
|
||||
|
||||
@@ -259,7 +266,7 @@ kubescape scan --format prometheus
|
||||
kubescape scan --format html --output results.html
|
||||
```
|
||||
|
||||
#### Scan with exceptions, objects with exceptions will be presented as `exclude` and not `fail`
|
||||
#### Scan with exceptions. Objects with exceptions will be presented as `exclude` and not `fail`
|
||||
[Full documentation](examples/exceptions/README.md)
|
||||
```
|
||||
kubescape scan --exceptions examples/exceptions/exclude-kube-namespaces.json
|
||||
@@ -271,13 +278,13 @@ kubescape scan </path/to/directory>
|
||||
```
|
||||
> Kubescape will load the default value file
|
||||
|
||||
#### Scan Kustomize Directory
|
||||
#### Scan a Kustomize Directory
|
||||
```
|
||||
kubescape scan </path/to/directory>
|
||||
```
|
||||
> Kubescape will generate Kubernetes Yaml Objects using 'Kustomize' file and scans them for security.
|
||||
> Kubescape will generate Kubernetes YAML objects using a 'Kustomize' file and scan them for security.
|
||||
|
||||
### Offline/Air-gaped Environment Support
|
||||
### Offline/Air-gapped Environment Support
|
||||
|
||||
[Video tutorial](https://youtu.be/IGXL9s37smM)
|
||||
|
||||
@@ -321,7 +328,7 @@ kubescape scan framework nsa --use-from /path/nsa.json
|
||||
|
||||
 
|
||||
|
||||
Scan the YAML files while writing them using the [vs code extension](https://github.com/armosec/vscode-kubescape/blob/master/README.md)
|
||||
Scan the YAML files while writing them using the [VS Code extension](https://github.com/armosec/vscode-kubescape/blob/master/README.md)
|
||||
|
||||
## Lens Extension
|
||||
|
||||
@@ -403,15 +410,15 @@ View Kubescape scan results directly in [Lens IDE](https://k8slens.dev/) using k
|
||||
<details>
|
||||
<summary>Instructions to use the playground</summary>
|
||||
|
||||
* Apply changes you wish to make to the kubescape directory using text editors like `Vim`.
|
||||
* Apply changes you wish to make to the Kubescape directory using text editors like `Vim`.
|
||||
* [Build on Linux](https://github.com/kubescape/kubescape#build-on-linuxmacos)
|
||||
* Now, you can use Kubescape just like a normal user. Instead of using `kubescape`, use `./kubescape`. (Make sure you are inside kubescape directory because the command will execute the binary named `kubescape` in `kubescape directory`)
|
||||
* Now, you can use Kubescape like a regular user. Instead of using `kubescape`, use `./kubescape`. Make sure you are in the Kubescape directory because the command will execute the binary named `kubescape` in `kubescape directory`)
|
||||
|
||||
</details>
|
||||
|
||||
## VS code configuration samples
|
||||
## VS Code configuration samples
|
||||
|
||||
You can use the sample files below to setup your VS code environment for building and debugging purposes.
|
||||
You can use the sample files below to setup your VS Code environment for building and debugging purposes.
|
||||
|
||||
|
||||
<details><summary>.vscode/settings.json</summary>
|
||||
@@ -458,11 +465,11 @@ You can use the sample files below to setup your VS code environment for buildin
|
||||
## Technology
|
||||
Kubescape is based on the [OPA engine](https://github.com/open-policy-agent/opa) and ARMO's posture controls.
|
||||
|
||||
The tools retrieve Kubernetes objects from the API server and run a set of [rego's snippets](https://www.openpolicyagent.org/docs/latest/policy-language/) developed by [ARMO](https://www.armosec.io?utm_source=github&utm_medium=repository).
|
||||
The tools retrieve Kubernetes objects from the API server and runs a set of [Rego snippets](https://www.openpolicyagent.org/docs/latest/policy-language/) developed by [ARMO](https://www.armosec.io?utm_source=github&utm_medium=repository).
|
||||
|
||||
The results by default are printed in a pretty "console friendly" manner, but they can be retrieved in JSON format for further processing.
|
||||
The results by default are printed in a "console friendly" manner, but they can be retrieved in JSON format for further processing.
|
||||
|
||||
Kubescape is an open source project, we welcome your feedback and ideas for improvement. We’re also aiming to collaborate with the Kubernetes community to help make the tests more robust and complete as Kubernetes develops.
|
||||
Kubescape is an open source project, we welcome your feedback and ideas for improvement. We are part of the Kubernetes community and aim to make the tests more robust and complete as Kubernetes develops.
|
||||
|
||||
## Thanks to all the contributors ❤️
|
||||
<a href = "https://github.com/kubescape/kubescape/graphs/contributors">
|
||||
|
||||
12
build.py
12
build.py
@@ -40,7 +40,7 @@ def main():
|
||||
|
||||
client_var = "github.com/kubescape/kubescape/v2/core/cautils.Client"
|
||||
client_name = os.getenv("CLIENT")
|
||||
|
||||
|
||||
# Create build directory
|
||||
build_dir = get_build_dir()
|
||||
|
||||
@@ -56,15 +56,15 @@ def main():
|
||||
ldflags += " -X {}={}".format(build_url, release_version)
|
||||
if client_name:
|
||||
ldflags += " -X {}={}".format(client_var, client_name)
|
||||
|
||||
build_command = ["go", "build", "-tags=static", "-o", ks_file, "-ldflags" ,ldflags]
|
||||
|
||||
build_command = ["go", "build", "-buildmode=pie", "-tags=static,gitenabled", "-o", ks_file, "-ldflags" ,ldflags]
|
||||
|
||||
print("Building kubescape and saving here: {}".format(ks_file))
|
||||
print("Build command: {}".format(" ".join(build_command)))
|
||||
|
||||
status = subprocess.call(build_command)
|
||||
check_status(status, "Failed to build kubescape")
|
||||
|
||||
|
||||
sha256 = hashlib.sha256()
|
||||
with open(ks_file, "rb") as kube:
|
||||
sha256.update(kube.read())
|
||||
@@ -74,7 +74,7 @@ def main():
|
||||
kube_sha.write(sha256.hexdigest())
|
||||
|
||||
print("Build Done")
|
||||
|
||||
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
FROM golang:1.18-alpine as builder
|
||||
FROM golang:1.19-alpine as builder
|
||||
|
||||
ARG image_version
|
||||
ARG client
|
||||
@@ -12,7 +12,7 @@ ENV CGO_ENABLED=1
|
||||
|
||||
# Install required python/pip
|
||||
ENV PYTHONUNBUFFERED=1
|
||||
RUN apk add --update --no-cache python3 git openssl-dev musl-dev gcc make cmake pkgconfig && ln -sf python3 /usr/bin/python
|
||||
RUN apk add --update --no-cache python3 gcc make git libc-dev binutils-gold cmake pkgconfig && ln -sf python3 /usr/bin/python
|
||||
RUN python3 -m ensurepip
|
||||
RUN pip3 install --no-cache --upgrade pip setuptools
|
||||
|
||||
|
||||
@@ -9,11 +9,11 @@ import (
|
||||
|
||||
var completionCmdExamples = `
|
||||
|
||||
# Enable BASH shell autocompletion
|
||||
$ source <(kubescape completion bash)
|
||||
# Enable BASH shell autocompletion
|
||||
$ source <(kubescape completion bash)
|
||||
$ echo 'source <(kubescape completion bash)' >> ~/.bashrc
|
||||
|
||||
# Enable ZSH shell autocompletion
|
||||
# Enable ZSH shell autocompletion
|
||||
$ source <(kubectl completion zsh)
|
||||
$ echo 'source <(kubectl completion zsh)' >> "${fpath[1]}/_kubectl"
|
||||
|
||||
@@ -27,7 +27,7 @@ func GetCompletionCmd() *cobra.Command {
|
||||
Example: completionCmdExamples,
|
||||
DisableFlagsInUseLine: true,
|
||||
ValidArgs: []string{"bash", "zsh", "fish", "powershell"},
|
||||
Args: cobra.ExactValidArgs(1),
|
||||
Args: cobra.MatchAll(cobra.ExactArgs(1), cobra.OnlyValidArgs),
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
switch strings.ToLower(args[0]) {
|
||||
case "bash":
|
||||
|
||||
@@ -24,8 +24,8 @@ var (
|
||||
# Download the NSA framework. Run 'kubescape list frameworks' for all frameworks names
|
||||
kubescape download framework nsa
|
||||
|
||||
# Download the "Allowed hostPath" control. Run 'kubescape list controls' for all controls names
|
||||
kubescape download control "Allowed hostPath"
|
||||
# Download the "C-0001" control. Run 'kubescape list controls --id' for all controls ids
|
||||
kubescape download control "C-0001"
|
||||
|
||||
# Download the "C-0001" control. Run 'kubescape list controls --id' for all controls ids
|
||||
kubescape download control C-0001
|
||||
@@ -36,6 +36,8 @@ var (
|
||||
# Download the configured controls-inputs
|
||||
kubescape download controls-inputs
|
||||
|
||||
# Download the attack tracks
|
||||
kubescape download attack-tracks
|
||||
`
|
||||
)
|
||||
|
||||
@@ -68,7 +70,9 @@ func GeDownloadCmd(ks meta.IKubescape) *cobra.Command {
|
||||
}
|
||||
downloadInfo.Target = args[0]
|
||||
if len(args) >= 2 {
|
||||
downloadInfo.Name = args[1]
|
||||
|
||||
downloadInfo.Identifier = args[1]
|
||||
|
||||
}
|
||||
if err := ks.Download(&downloadInfo); err != nil {
|
||||
logger.L().Fatal(err.Error())
|
||||
|
||||
@@ -1,45 +0,0 @@
|
||||
package fix
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"github.com/kubescape/kubescape/v2/core/meta"
|
||||
metav1 "github.com/kubescape/kubescape/v2/core/meta/datastructures/v1"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var fixCmdExamples = `
|
||||
Fix command is for fixing kubernetes manifest files based on a scan command output.
|
||||
Use with caution, this command will change your files in-place.
|
||||
|
||||
# Fix kubernetes YAML manifest files based on a scan command output (output.json)
|
||||
1) kubescape scan --format json --format-version v2 --output output.json
|
||||
2) kubescape fix output.json
|
||||
|
||||
`
|
||||
|
||||
func GetFixCmd(ks meta.IKubescape) *cobra.Command {
|
||||
var fixInfo metav1.FixInfo
|
||||
|
||||
fixCmd := &cobra.Command{
|
||||
Use: "fix <report output file>",
|
||||
Short: "Fix misconfiguration in files",
|
||||
Long: ``,
|
||||
Example: fixCmdExamples,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
if len(args) < 1 {
|
||||
return errors.New("report output file is required")
|
||||
}
|
||||
fixInfo.ReportFile = args[0]
|
||||
|
||||
return ks.Fix(&fixInfo)
|
||||
},
|
||||
}
|
||||
|
||||
fixCmd.PersistentFlags().BoolVar(&fixInfo.NoConfirm, "no-confirm", false, "No confirmation will be given to the user before applying the fix (default false)")
|
||||
fixCmd.PersistentFlags().BoolVar(&fixInfo.DryRun, "dry-run", false, "No changes will be applied (default false)")
|
||||
fixCmd.PersistentFlags().BoolVar(&fixInfo.SkipUserValues, "skip-user-values", true, "Changes which involve user-defined values will be skipped")
|
||||
|
||||
return fixCmd
|
||||
}
|
||||
@@ -20,11 +20,8 @@ var (
|
||||
# List all supported frameworks names
|
||||
kubescape list frameworks --account <account id>
|
||||
|
||||
# List all supported controls names
|
||||
# List all supported controls names with ids
|
||||
kubescape list controls
|
||||
|
||||
# List all supported controls ids
|
||||
kubescape list controls --id
|
||||
|
||||
Control documentation:
|
||||
https://hub.armosec.io/docs/controls
|
||||
@@ -67,8 +64,8 @@ func GetListCmd(ks meta.IKubescape) *cobra.Command {
|
||||
listCmd.PersistentFlags().StringVarP(&listPolicies.Credentials.Account, "account", "", "", "Kubescape SaaS account ID. Default will load account ID from cache")
|
||||
listCmd.PersistentFlags().StringVarP(&listPolicies.Credentials.ClientID, "client-id", "", "", "Kubescape SaaS client ID. Default will load client ID from cache, read more - https://hub.armosec.io/docs/authentication")
|
||||
listCmd.PersistentFlags().StringVarP(&listPolicies.Credentials.SecretKey, "secret-key", "", "", "Kubescape SaaS secret key. Default will load secret key from cache, read more - https://hub.armosec.io/docs/authentication")
|
||||
listCmd.PersistentFlags().StringVar(&listPolicies.Format, "format", "pretty-print", "output format. supported: 'pretty-printer'/'json'")
|
||||
listCmd.PersistentFlags().BoolVarP(&listPolicies.ListIDs, "id", "", false, "List control ID's instead of controls names")
|
||||
listCmd.PersistentFlags().StringVar(&listPolicies.Format, "format", "pretty-print", "output format. supported: 'pretty-print'/'json'")
|
||||
listCmd.PersistentFlags().MarkDeprecated("id", "Control ID's are included in list outpus")
|
||||
|
||||
return listCmd
|
||||
}
|
||||
|
||||
@@ -10,7 +10,6 @@ import (
|
||||
"github.com/kubescape/kubescape/v2/cmd/config"
|
||||
"github.com/kubescape/kubescape/v2/cmd/delete"
|
||||
"github.com/kubescape/kubescape/v2/cmd/download"
|
||||
"github.com/kubescape/kubescape/v2/cmd/fix"
|
||||
"github.com/kubescape/kubescape/v2/cmd/list"
|
||||
"github.com/kubescape/kubescape/v2/cmd/scan"
|
||||
"github.com/kubescape/kubescape/v2/cmd/submit"
|
||||
@@ -28,7 +27,7 @@ var rootInfo cautils.RootInfo
|
||||
|
||||
var ksExamples = `
|
||||
# Scan command
|
||||
kubescape scan --submit
|
||||
kubescape scan
|
||||
|
||||
# List supported frameworks
|
||||
kubescape list frameworks
|
||||
@@ -79,7 +78,6 @@ func getRootCmd(ks meta.IKubescape) *cobra.Command {
|
||||
rootCmd.AddCommand(version.GetVersionCmd())
|
||||
rootCmd.AddCommand(config.GetConfigCmd(ks))
|
||||
rootCmd.AddCommand(update.GetUpdateCmd())
|
||||
rootCmd.AddCommand(fix.GetFixCmd(ks))
|
||||
|
||||
return rootCmd
|
||||
}
|
||||
|
||||
@@ -23,7 +23,7 @@ var (
|
||||
kubescape scan control "privileged container"
|
||||
|
||||
# Scan list of controls separated with a comma
|
||||
kubescape scan control "privileged container","allowed hostpath"
|
||||
kubescape scan control "privileged container","HostPath mount"
|
||||
|
||||
# Scan list of controls using the control ID separated with a comma
|
||||
kubescape scan control C-0058,C-0057
|
||||
@@ -61,7 +61,7 @@ func getControlCmd(ks meta.IKubescape, scanInfo *cautils.ScanInfo) *cobra.Comman
|
||||
if err := validateFrameworkScanInfo(scanInfo); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
||||
// flagValidationControl(scanInfo)
|
||||
scanInfo.PolicyIdentifier = []cautils.PolicyIdentifier{}
|
||||
|
||||
@@ -109,7 +109,7 @@ func getControlCmd(ks meta.IKubescape, scanInfo *cautils.ScanInfo) *cobra.Comman
|
||||
if results.GetRiskScore() > float32(scanInfo.FailThreshold) {
|
||||
logger.L().Fatal("scan risk-score is above permitted threshold", helpers.String("risk-score", fmt.Sprintf("%.2f", results.GetRiskScore())), helpers.String("fail-threshold", fmt.Sprintf("%.2f", scanInfo.FailThreshold)))
|
||||
}
|
||||
enforceSeverityThresholds(&results.GetResults().SummaryDetails.SeverityCounters, scanInfo, terminateOnExceedingSeverity)
|
||||
enforceSeverityThresholds(results.GetResults().SummaryDetails.GetResourcesSeverityCounters(), scanInfo, terminateOnExceedingSeverity)
|
||||
|
||||
return nil
|
||||
},
|
||||
@@ -120,6 +120,10 @@ func getControlCmd(ks meta.IKubescape, scanInfo *cautils.ScanInfo) *cobra.Comman
|
||||
func validateControlScanInfo(scanInfo *cautils.ScanInfo) error {
|
||||
severity := scanInfo.FailThresholdSeverity
|
||||
|
||||
if scanInfo.Submit && scanInfo.OmitRawResources {
|
||||
return fmt.Errorf("you can use `omit-raw-resources` or `submit`, but not both")
|
||||
}
|
||||
|
||||
if err := validateSeverity(severity); severity != "" && err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -16,14 +16,13 @@ import (
|
||||
"github.com/kubescape/kubescape/v2/core/cautils"
|
||||
"github.com/kubescape/kubescape/v2/core/meta"
|
||||
|
||||
"github.com/enescakir/emoji"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var (
|
||||
frameworkExample = `
|
||||
# Scan all frameworks and submit the results
|
||||
kubescape scan framework all --submit
|
||||
# Scan all frameworks
|
||||
kubescape scan framework all
|
||||
|
||||
# Scan the NSA framework
|
||||
kubescape scan framework nsa
|
||||
@@ -35,7 +34,7 @@ var (
|
||||
kubescape scan framework all
|
||||
|
||||
# Scan kubernetes YAML manifest files (single file or glob)
|
||||
kubescape scan framework nsa *.yaml
|
||||
kubescape scan framework nsa .
|
||||
|
||||
Run 'kubescape list frameworks' for the list of supported frameworks
|
||||
`
|
||||
@@ -113,13 +112,13 @@ func getFrameworkCmd(ks meta.IKubescape, scanInfo *cautils.ScanInfo) *cobra.Comm
|
||||
logger.L().Fatal(err.Error())
|
||||
}
|
||||
if !scanInfo.VerboseMode {
|
||||
cautils.SimpleDisplay(os.Stderr, "%s Run with '--verbose'/'-v' flag for detailed resources view\n\n", emoji.Detective)
|
||||
cautils.SimpleDisplay(os.Stderr, "Run with '--verbose'/'-v' flag for detailed resources view\n\n")
|
||||
}
|
||||
if results.GetRiskScore() > float32(scanInfo.FailThreshold) {
|
||||
logger.L().Fatal("scan risk-score is above permitted threshold", helpers.String("risk-score", fmt.Sprintf("%.2f", results.GetRiskScore())), helpers.String("fail-threshold", fmt.Sprintf("%.2f", scanInfo.FailThreshold)))
|
||||
}
|
||||
|
||||
enforceSeverityThresholds(&results.GetData().Report.SummaryDetails.SeverityCounters, scanInfo, terminateOnExceedingSeverity)
|
||||
enforceSeverityThresholds(results.GetData().Report.SummaryDetails.GetResourcesSeverityCounters(), scanInfo, terminateOnExceedingSeverity)
|
||||
return nil
|
||||
},
|
||||
}
|
||||
@@ -136,10 +135,10 @@ func countersExceedSeverityThreshold(severityCounters reportsummary.ISeverityCou
|
||||
SeverityName string
|
||||
GetFailedResources func() int
|
||||
}{
|
||||
{reporthandlingapis.SeverityLowString, severityCounters.NumberOfResourcesWithLowSeverity},
|
||||
{reporthandlingapis.SeverityMediumString, severityCounters.NumberOfResourcesWithMediumSeverity},
|
||||
{reporthandlingapis.SeverityHighString, severityCounters.NumberOfResourcesWithHighSeverity},
|
||||
{reporthandlingapis.SeverityCriticalString, severityCounters.NumberOfResourcesWithCriticalSeverity},
|
||||
{reporthandlingapis.SeverityLowString, severityCounters.NumberOfLowSeverity},
|
||||
{reporthandlingapis.SeverityMediumString, severityCounters.NumberOfMediumSeverity},
|
||||
{reporthandlingapis.SeverityHighString, severityCounters.NumberOfHighSeverity},
|
||||
{reporthandlingapis.SeverityCriticalString, severityCounters.NumberOfCriticalSeverity},
|
||||
}
|
||||
|
||||
targetSeverityIdx := 0
|
||||
@@ -201,7 +200,9 @@ func validateFrameworkScanInfo(scanInfo *cautils.ScanInfo) error {
|
||||
if 100 < scanInfo.FailThreshold || 0 > scanInfo.FailThreshold {
|
||||
return fmt.Errorf("bad argument: out of range threshold")
|
||||
}
|
||||
|
||||
if scanInfo.Submit && scanInfo.OmitRawResources {
|
||||
return fmt.Errorf("you can use `omit-raw-resources` or `submit`, but not both")
|
||||
}
|
||||
severity := scanInfo.FailThresholdSeverity
|
||||
if err := validateSeverity(severity); severity != "" && err != nil {
|
||||
return err
|
||||
|
||||
@@ -17,10 +17,10 @@ var scanCmdExamples = `
|
||||
kubescape scan --enable-host-scan --verbose
|
||||
|
||||
# Scan kubernetes YAML manifest files
|
||||
kubescape scan *.yaml
|
||||
kubescape scan .
|
||||
|
||||
# Scan and save the results in the JSON format
|
||||
kubescape scan --format json --output results.json
|
||||
kubescape scan --format json --output results.json --format-version=v2
|
||||
|
||||
# Display all resources
|
||||
kubescape scan --verbose
|
||||
@@ -58,6 +58,7 @@ func GetScanCommand(ks meta.IKubescape) *cobra.Command {
|
||||
},
|
||||
PreRun: func(cmd *cobra.Command, args []string) {
|
||||
k8sinterface.SetClusterContextName(scanInfo.KubeContext)
|
||||
|
||||
},
|
||||
PostRun: func(cmd *cobra.Command, args []string) {
|
||||
// TODO - revert context
|
||||
@@ -65,6 +66,7 @@ func GetScanCommand(ks meta.IKubescape) *cobra.Command {
|
||||
}
|
||||
|
||||
scanCmd.PersistentFlags().StringVarP(&scanInfo.Credentials.Account, "account", "", "", "Kubescape SaaS account ID. Default will load account ID from cache")
|
||||
// scanCmd.PersistentFlags().BoolVar(&scanInfo.CreateAccount, "create-account", false, "Create a Kubescape SaaS account ID account ID is not found in cache. After creating the account, the account ID will be saved in cache. In addition, the scanning results will be uploaded to the Kubescape SaaS")
|
||||
scanCmd.PersistentFlags().StringVarP(&scanInfo.Credentials.ClientID, "client-id", "", "", "Kubescape SaaS client ID. Default will load client ID from cache, read more - https://hub.armosec.io/docs/authentication")
|
||||
scanCmd.PersistentFlags().StringVarP(&scanInfo.Credentials.SecretKey, "secret-key", "", "", "Kubescape SaaS secret key. Default will load secret key from cache, read more - https://hub.armosec.io/docs/authentication")
|
||||
scanCmd.PersistentFlags().StringVarP(&scanInfo.KubeContext, "kube-context", "", "", "Kube context. Default will use the current-context")
|
||||
@@ -76,7 +78,7 @@ func GetScanCommand(ks meta.IKubescape) *cobra.Command {
|
||||
scanCmd.PersistentFlags().Float32VarP(&scanInfo.FailThreshold, "fail-threshold", "t", 100, "Failure threshold is the percent above which the command fails and returns exit code 1")
|
||||
|
||||
scanCmd.PersistentFlags().StringVar(&scanInfo.FailThresholdSeverity, "severity-threshold", "", "Severity threshold is the severity of failed controls at which the command fails and returns exit code 1")
|
||||
scanCmd.PersistentFlags().StringVarP(&scanInfo.Format, "format", "f", "pretty-printer", `Output format. Supported formats: "pretty-printer", "json", "junit", "prometheus", "pdf", "html", "sarif"`)
|
||||
scanCmd.PersistentFlags().StringVarP(&scanInfo.Format, "format", "f", "", `Output file format. Supported formats: "pretty-printer", "json", "junit", "prometheus", "pdf", "html", "sarif"`)
|
||||
scanCmd.PersistentFlags().StringVar(&scanInfo.IncludeNamespaces, "include-namespaces", "", "scan specific namespaces. e.g: --include-namespaces ns-a,ns-b")
|
||||
scanCmd.PersistentFlags().BoolVarP(&scanInfo.Local, "keep-local", "", false, "If you do not want your Kubescape results reported to configured backend.")
|
||||
scanCmd.PersistentFlags().StringVarP(&scanInfo.Output, "output", "o", "", "Output file. Print output to file and not stdout")
|
||||
@@ -88,11 +90,15 @@ func GetScanCommand(ks meta.IKubescape) *cobra.Command {
|
||||
scanCmd.PersistentFlags().StringVar(&scanInfo.FormatVersion, "format-version", "v1", "Output object can be different between versions, this is for maintaining backward and forward compatibility. Supported:'v1'/'v2'")
|
||||
scanCmd.PersistentFlags().StringVar(&scanInfo.CustomClusterName, "cluster-name", "", "Set the custom name of the cluster. Not same as the kube-context flag")
|
||||
scanCmd.PersistentFlags().BoolVarP(&scanInfo.Submit, "submit", "", false, "Submit the scan results to Kubescape SaaS where you can see the results in a user-friendly UI, choose your preferred compliance framework, check risk results history and trends, manage exceptions, get remediation recommendations and much more. By default the results are not submitted")
|
||||
scanCmd.PersistentFlags().BoolVarP(&scanInfo.OmitRawResources, "omit-raw-resources", "", false, "Omit raw resources from the output. By default the raw resources are included in the output")
|
||||
scanCmd.PersistentFlags().BoolVarP(&scanInfo.PrintAttackTree, "print-attack-tree", "", false, "Print attack tree")
|
||||
|
||||
scanCmd.PersistentFlags().MarkDeprecated("silent", "use '--logger' flag instead. Flag will be removed at 1.May.2022")
|
||||
|
||||
// hidden flags
|
||||
scanCmd.PersistentFlags().MarkHidden("host-scan-yaml") // this flag should be used very cautiously. We prefer users will not use it at all unless the DaemonSet can not run pods on the nodes
|
||||
scanCmd.PersistentFlags().MarkHidden("omit-raw-resources")
|
||||
scanCmd.PersistentFlags().MarkHidden("print-attack-tree")
|
||||
|
||||
// Retrieve --kubeconfig flag from https://github.com/kubernetes/kubectl/blob/master/pkg/cmd/cmd.go
|
||||
scanCmd.PersistentFlags().AddGoFlag(flag.Lookup("kubeconfig"))
|
||||
|
||||
@@ -24,91 +24,91 @@ func TestExceedsSeverity(t *testing.T) {
|
||||
{
|
||||
Description: "Critical failed resource should exceed Critical threshold",
|
||||
ScanInfo: &cautils.ScanInfo{FailThresholdSeverity: "critical"},
|
||||
SeverityCounters: &reportsummary.SeverityCounters{ResourcesWithCriticalSeverityCounter: 1},
|
||||
SeverityCounters: &reportsummary.SeverityCounters{CriticalSeverityCounter: 1},
|
||||
Want: true,
|
||||
},
|
||||
{
|
||||
Description: "Critical failed resource should exceed Critical threshold set as constant",
|
||||
ScanInfo: &cautils.ScanInfo{FailThresholdSeverity: apis.SeverityCriticalString},
|
||||
SeverityCounters: &reportsummary.SeverityCounters{ResourcesWithCriticalSeverityCounter: 1},
|
||||
SeverityCounters: &reportsummary.SeverityCounters{CriticalSeverityCounter: 1},
|
||||
Want: true,
|
||||
},
|
||||
{
|
||||
Description: "High failed resource should not exceed Critical threshold",
|
||||
ScanInfo: &cautils.ScanInfo{FailThresholdSeverity: "critical"},
|
||||
SeverityCounters: &reportsummary.SeverityCounters{ResourcesWithHighSeverityCounter: 1},
|
||||
SeverityCounters: &reportsummary.SeverityCounters{HighSeverityCounter: 1},
|
||||
Want: false,
|
||||
},
|
||||
{
|
||||
Description: "Critical failed resource exceeds High threshold",
|
||||
ScanInfo: &cautils.ScanInfo{FailThresholdSeverity: "high"},
|
||||
SeverityCounters: &reportsummary.SeverityCounters{ResourcesWithCriticalSeverityCounter: 1},
|
||||
SeverityCounters: &reportsummary.SeverityCounters{CriticalSeverityCounter: 1},
|
||||
Want: true,
|
||||
},
|
||||
{
|
||||
Description: "High failed resource exceeds High threshold",
|
||||
ScanInfo: &cautils.ScanInfo{FailThresholdSeverity: "high"},
|
||||
SeverityCounters: &reportsummary.SeverityCounters{ResourcesWithHighSeverityCounter: 1},
|
||||
SeverityCounters: &reportsummary.SeverityCounters{HighSeverityCounter: 1},
|
||||
Want: true,
|
||||
},
|
||||
{
|
||||
Description: "Medium failed resource does not exceed High threshold",
|
||||
ScanInfo: &cautils.ScanInfo{FailThresholdSeverity: "high"},
|
||||
SeverityCounters: &reportsummary.SeverityCounters{ResourcesWithMediumSeverityCounter: 1},
|
||||
SeverityCounters: &reportsummary.SeverityCounters{MediumSeverityCounter: 1},
|
||||
Want: false,
|
||||
},
|
||||
{
|
||||
Description: "Critical failed resource exceeds Medium threshold",
|
||||
ScanInfo: &cautils.ScanInfo{FailThresholdSeverity: "medium"},
|
||||
SeverityCounters: &reportsummary.SeverityCounters{ResourcesWithCriticalSeverityCounter: 1},
|
||||
SeverityCounters: &reportsummary.SeverityCounters{CriticalSeverityCounter: 1},
|
||||
Want: true,
|
||||
},
|
||||
{
|
||||
Description: "High failed resource exceeds Medium threshold",
|
||||
ScanInfo: &cautils.ScanInfo{FailThresholdSeverity: "medium"},
|
||||
SeverityCounters: &reportsummary.SeverityCounters{ResourcesWithHighSeverityCounter: 1},
|
||||
SeverityCounters: &reportsummary.SeverityCounters{HighSeverityCounter: 1},
|
||||
Want: true,
|
||||
},
|
||||
{
|
||||
Description: "Medium failed resource exceeds Medium threshold",
|
||||
ScanInfo: &cautils.ScanInfo{FailThresholdSeverity: "medium"},
|
||||
SeverityCounters: &reportsummary.SeverityCounters{ResourcesWithMediumSeverityCounter: 1},
|
||||
SeverityCounters: &reportsummary.SeverityCounters{MediumSeverityCounter: 1},
|
||||
Want: true,
|
||||
},
|
||||
{
|
||||
Description: "Low failed resource does not exceed Medium threshold",
|
||||
ScanInfo: &cautils.ScanInfo{FailThresholdSeverity: "medium"},
|
||||
SeverityCounters: &reportsummary.SeverityCounters{ResourcesWithLowSeverityCounter: 1},
|
||||
SeverityCounters: &reportsummary.SeverityCounters{LowSeverityCounter: 1},
|
||||
Want: false,
|
||||
},
|
||||
{
|
||||
Description: "Critical failed resource exceeds Low threshold",
|
||||
ScanInfo: &cautils.ScanInfo{FailThresholdSeverity: "low"},
|
||||
SeverityCounters: &reportsummary.SeverityCounters{ResourcesWithCriticalSeverityCounter: 1},
|
||||
SeverityCounters: &reportsummary.SeverityCounters{CriticalSeverityCounter: 1},
|
||||
Want: true,
|
||||
},
|
||||
{
|
||||
Description: "High failed resource exceeds Low threshold",
|
||||
ScanInfo: &cautils.ScanInfo{FailThresholdSeverity: "low"},
|
||||
SeverityCounters: &reportsummary.SeverityCounters{ResourcesWithHighSeverityCounter: 1},
|
||||
SeverityCounters: &reportsummary.SeverityCounters{HighSeverityCounter: 1},
|
||||
Want: true,
|
||||
},
|
||||
{
|
||||
Description: "Medium failed resource exceeds Low threshold",
|
||||
ScanInfo: &cautils.ScanInfo{FailThresholdSeverity: "low"},
|
||||
SeverityCounters: &reportsummary.SeverityCounters{ResourcesWithMediumSeverityCounter: 1},
|
||||
SeverityCounters: &reportsummary.SeverityCounters{MediumSeverityCounter: 1},
|
||||
Want: true,
|
||||
},
|
||||
{
|
||||
Description: "Low failed resource exceeds Low threshold",
|
||||
ScanInfo: &cautils.ScanInfo{FailThresholdSeverity: "low"},
|
||||
SeverityCounters: &reportsummary.SeverityCounters{ResourcesWithLowSeverityCounter: 1},
|
||||
SeverityCounters: &reportsummary.SeverityCounters{LowSeverityCounter: 1},
|
||||
Want: true,
|
||||
},
|
||||
{
|
||||
Description: "Unknown severity returns an error",
|
||||
ScanInfo: &cautils.ScanInfo{FailThresholdSeverity: "unknown"},
|
||||
SeverityCounters: &reportsummary.SeverityCounters{ResourcesWithLowSeverityCounter: 1},
|
||||
SeverityCounters: &reportsummary.SeverityCounters{LowSeverityCounter: 1},
|
||||
Want: false,
|
||||
Error: ErrUnknownSeverity,
|
||||
},
|
||||
@@ -139,7 +139,7 @@ func Test_enforceSeverityThresholds(t *testing.T) {
|
||||
}{
|
||||
{
|
||||
"Exceeding Critical severity counter should call the terminating function",
|
||||
&reportsummary.SeverityCounters{ResourcesWithCriticalSeverityCounter: 1},
|
||||
&reportsummary.SeverityCounters{CriticalSeverityCounter: 1},
|
||||
&cautils.ScanInfo{FailThresholdSeverity: apis.SeverityCriticalString},
|
||||
true,
|
||||
},
|
||||
|
||||
@@ -1,8 +1,9 @@
|
||||
package scan
|
||||
|
||||
import (
|
||||
"github.com/kubescape/kubescape/v2/core/cautils"
|
||||
"testing"
|
||||
|
||||
"github.com/kubescape/kubescape/v2/core/cautils"
|
||||
)
|
||||
|
||||
// Test_validateControlScanInfo tests how scan info is validated for the `scan control` command
|
||||
|
||||
@@ -31,10 +31,11 @@ var (
|
||||
// getRBACCmd represents the RBAC command
|
||||
func getRBACCmd(ks meta.IKubescape, submitInfo *v1.Submit) *cobra.Command {
|
||||
return &cobra.Command{
|
||||
Use: "rbac",
|
||||
Example: rbacExamples,
|
||||
Short: "Submit cluster's Role-Based Access Control(RBAC)",
|
||||
Long: ``,
|
||||
Use: "rbac",
|
||||
Deprecated: "This command is deprecated and will not be supported after 1/Jan/2023. Please use the 'scan' command instead.",
|
||||
Example: rbacExamples,
|
||||
Short: "Submit cluster's Role-Based Access Control(RBAC)",
|
||||
Long: ``,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
|
||||
if err := flagValidationSubmit(submitInfo); err != nil {
|
||||
|
||||
@@ -7,16 +7,21 @@ import (
|
||||
)
|
||||
|
||||
var submitCmdExamples = `
|
||||
# Submit Kubescape scan results file
|
||||
kubescape submit results
|
||||
|
||||
# Submit exceptions file to Kubescape SaaS
|
||||
kubescape submit exceptions
|
||||
`
|
||||
|
||||
func GetSubmitCmd(ks meta.IKubescape) *cobra.Command {
|
||||
var submitInfo metav1.Submit
|
||||
|
||||
submitCmd := &cobra.Command{
|
||||
Use: "submit <command>",
|
||||
Short: "Submit an object to the Kubescape SaaS version",
|
||||
Long: ``,
|
||||
Use: "submit <command>",
|
||||
Short: "Submit an object to the Kubescape SaaS version",
|
||||
Long: ``,
|
||||
Example: submitCmdExamples,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
},
|
||||
}
|
||||
|
||||
7
cmd/version/git_native_disabled.go
Normal file
7
cmd/version/git_native_disabled.go
Normal file
@@ -0,0 +1,7 @@
|
||||
//go:build !gitenabled
|
||||
|
||||
package version
|
||||
|
||||
func isGitEnabled() bool {
|
||||
return false
|
||||
}
|
||||
7
cmd/version/git_native_enabled.go
Normal file
7
cmd/version/git_native_enabled.go
Normal file
@@ -0,0 +1,7 @@
|
||||
//go:build gitenabled
|
||||
|
||||
package version
|
||||
|
||||
func isGitEnabled() bool {
|
||||
return true
|
||||
}
|
||||
@@ -16,7 +16,11 @@ func GetVersionCmd() *cobra.Command {
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
v := cautils.NewIVersionCheckHandler()
|
||||
v.CheckLatestVersion(cautils.NewVersionCheckRequest(cautils.BuildNumber, "", "", "version"))
|
||||
fmt.Fprintln(os.Stdout, "Your current version is: "+cautils.BuildNumber)
|
||||
fmt.Fprintf(os.Stdout,
|
||||
"Your current version is: %s [git enabled in build: %t]\n",
|
||||
cautils.BuildNumber,
|
||||
isGitEnabled(),
|
||||
)
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
12
core/cautils/controllink.go
Normal file
12
core/cautils/controllink.go
Normal file
@@ -0,0 +1,12 @@
|
||||
package cautils
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func GetControlLink(controlID string) string {
|
||||
// For CIS Controls, cis-1.1.3 will be transformed to cis-1-1-3 in documentation link.
|
||||
docLinkID := strings.ReplaceAll(controlID, ".", "-")
|
||||
return fmt.Sprintf("https://hub.armosec.io/docs/%s", strings.ToLower(docLinkID))
|
||||
}
|
||||
@@ -470,10 +470,7 @@ func (c *ClusterConfig) updateConfigMap() error {
|
||||
}
|
||||
|
||||
func updateConfigFile(configObj *ConfigObj) error {
|
||||
if err := os.WriteFile(ConfigFileFullPath(), configObj.Config(), 0664); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
return os.WriteFile(ConfigFileFullPath(), configObj.Config(), 0664) //nolint:gosec
|
||||
}
|
||||
|
||||
func (c *ClusterConfig) updateConfigData(configMap *corev1.ConfigMap) {
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"github.com/kubescape/k8s-interface/workloadinterface"
|
||||
"github.com/kubescape/opa-utils/reporthandling"
|
||||
apis "github.com/kubescape/opa-utils/reporthandling/apis"
|
||||
"github.com/kubescape/opa-utils/reporthandling/attacktrack/v1alpha1"
|
||||
"github.com/kubescape/opa-utils/reporthandling/results/v1/prioritization"
|
||||
"github.com/kubescape/opa-utils/reporthandling/results/v1/resourcesresults"
|
||||
reporthandlingv2 "github.com/kubescape/opa-utils/reporthandling/v2"
|
||||
@@ -18,18 +19,21 @@ type OPASessionObj struct {
|
||||
K8SResources *K8SResources // input k8s objects
|
||||
ArmoResource *KSResources // input ARMO objects
|
||||
AllPolicies *Policies // list of all frameworks
|
||||
Policies []reporthandling.Framework // list of frameworks to scan
|
||||
AllResources map[string]workloadinterface.IMetadata // all scanned resources, map[<resource ID>]<resource>
|
||||
ResourcesResult map[string]resourcesresults.Result // resources scan results, map[<resource ID>]<resource result>
|
||||
ResourceSource map[string]reporthandling.Source // resources sources, map[<resource ID>]<resource result>
|
||||
ResourcesPrioritized map[string]prioritization.PrioritizedResource // resources prioritization information, map[<resource ID>]<prioritized resource>
|
||||
Report *reporthandlingv2.PostureReport // scan results v2 - Remove
|
||||
Exceptions []armotypes.PostureExceptionPolicy // list of exceptions to apply on scan results
|
||||
RegoInputData RegoInputData // input passed to rego for scanning. map[<control name>][<input arguments>]
|
||||
ResourceAttackTracks map[string]v1alpha1.IAttackTrack // resources attack tracks, map[<resource ID>]<attack track>
|
||||
AttackTracks map[string]v1alpha1.IAttackTrack
|
||||
Report *reporthandlingv2.PostureReport // scan results v2 - Remove
|
||||
RegoInputData RegoInputData // input passed to rego for scanning. map[<control name>][<input arguments>]
|
||||
Metadata *reporthandlingv2.Metadata
|
||||
InfoMap map[string]apis.StatusInfo // Map errors of resources to StatusInfo
|
||||
ResourceToControlsMap map[string][]string // map[<apigroup/apiversion/resource>] = [<control_IDs>]
|
||||
SessionID string // SessionID
|
||||
InfoMap map[string]apis.StatusInfo // Map errors of resources to StatusInfo
|
||||
ResourceToControlsMap map[string][]string // map[<apigroup/apiversion/resource>] = [<control_IDs>]
|
||||
SessionID string // SessionID
|
||||
Policies []reporthandling.Framework // list of frameworks to scan
|
||||
Exceptions []armotypes.PostureExceptionPolicy // list of exceptions to apply on scan results
|
||||
OmitRawResources bool // omit raw resources from output
|
||||
}
|
||||
|
||||
func NewOPASessionObj(frameworks []reporthandling.Framework, k8sResources *K8SResources, scanInfo *ScanInfo) *OPASessionObj {
|
||||
@@ -45,6 +49,7 @@ func NewOPASessionObj(frameworks []reporthandling.Framework, k8sResources *K8SRe
|
||||
ResourceSource: make(map[string]reporthandling.Source),
|
||||
SessionID: scanInfo.ScanID,
|
||||
Metadata: scanInfoToScanMetadata(scanInfo),
|
||||
OmitRawResources: scanInfo.OmitRawResources,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -94,6 +99,7 @@ type Exception struct {
|
||||
|
||||
type RegoInputData struct {
|
||||
PostureControlInputs map[string][]string `json:"postureControlInputs"`
|
||||
DataControlInputs map[string]string `json:"dataControlInputs"`
|
||||
// ClusterName string `json:"clusterName"`
|
||||
// K8sConfig RegoK8sConfig `json:"k8sconfig"`
|
||||
}
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package getter
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/armosec/armoapi-go/armotypes"
|
||||
@@ -24,11 +25,11 @@ func NewDownloadReleasedPolicy() *DownloadReleasedPolicy {
|
||||
}
|
||||
}
|
||||
|
||||
func (drp *DownloadReleasedPolicy) GetControl(policyName string) (*reporthandling.Control, error) {
|
||||
func (drp *DownloadReleasedPolicy) GetControl(ID string) (*reporthandling.Control, error) {
|
||||
var control *reporthandling.Control
|
||||
var err error
|
||||
|
||||
control, err = drp.gs.GetOPAControl(policyName)
|
||||
control, err = drp.gs.GetOPAControlByID(ID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -55,13 +56,29 @@ func (drp *DownloadReleasedPolicy) ListFrameworks() ([]string, error) {
|
||||
return drp.gs.GetOPAFrameworksNamesList()
|
||||
}
|
||||
|
||||
func (drp *DownloadReleasedPolicy) ListControls(listType ListType) ([]string, error) {
|
||||
switch listType {
|
||||
case ListID:
|
||||
return drp.gs.GetOPAControlsIDsList()
|
||||
default:
|
||||
return drp.gs.GetOPAControlsNamesList()
|
||||
func (drp *DownloadReleasedPolicy) ListControls() ([]string, error) {
|
||||
controlsIDsList, err := drp.gs.GetOPAControlsIDsList()
|
||||
if err != nil {
|
||||
return []string{}, err
|
||||
}
|
||||
controlsNamesList, err := drp.gs.GetOPAControlsNamesList()
|
||||
if err != nil {
|
||||
return []string{}, err
|
||||
}
|
||||
controls, err := drp.gs.GetOPAControls()
|
||||
if err != nil {
|
||||
return []string{}, err
|
||||
}
|
||||
var controlsFrameworksList [][]string
|
||||
for _, control := range controls {
|
||||
controlsFrameworksList = append(controlsFrameworksList, control.FrameworkNames)
|
||||
}
|
||||
controlsNamesWithIDsandFrameworksList := make([]string, len(controlsIDsList))
|
||||
// by design all slices have the same lengt
|
||||
for i := range controlsIDsList {
|
||||
controlsNamesWithIDsandFrameworksList[i] = fmt.Sprintf("%v|%v|%v", controlsIDsList[i], controlsNamesList[i], strings.Join(controlsFrameworksList[i], ","))
|
||||
}
|
||||
return controlsNamesWithIDsandFrameworksList, nil
|
||||
}
|
||||
|
||||
func (drp *DownloadReleasedPolicy) GetControlsInputs(clusterName string) (map[string][]string, error) {
|
||||
|
||||
@@ -6,19 +6,13 @@ import (
|
||||
"github.com/kubescape/opa-utils/reporthandling/attacktrack/v1alpha1"
|
||||
)
|
||||
|
||||
// supported listing
|
||||
type ListType string
|
||||
|
||||
const ListID ListType = "id"
|
||||
const ListName ListType = "name"
|
||||
|
||||
type IPolicyGetter interface {
|
||||
GetFramework(name string) (*reporthandling.Framework, error)
|
||||
GetFrameworks() ([]reporthandling.Framework, error)
|
||||
GetControl(name string) (*reporthandling.Control, error)
|
||||
GetControl(ID string) (*reporthandling.Control, error)
|
||||
|
||||
ListFrameworks() ([]string, error)
|
||||
ListControls(ListType) ([]string, error)
|
||||
ListControls() ([]string, error)
|
||||
}
|
||||
|
||||
type IExceptionsGetter interface {
|
||||
|
||||
@@ -2,7 +2,6 @@ package getter
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
@@ -21,18 +20,19 @@ func SaveInFile(policy interface{}, pathStr string) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = os.WriteFile(pathStr, []byte(fmt.Sprintf("%v", string(encodedData))), 0644)
|
||||
err = os.WriteFile(pathStr, encodedData, 0644) //nolint:gosec
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
pathDir := path.Dir(pathStr)
|
||||
if err := os.Mkdir(pathDir, 0744); err != nil {
|
||||
return err
|
||||
// pathDir could contain subdirectories
|
||||
if erm := os.MkdirAll(pathDir, 0755); erm != nil {
|
||||
return erm
|
||||
}
|
||||
} else {
|
||||
return err
|
||||
|
||||
}
|
||||
err = os.WriteFile(pathStr, []byte(fmt.Sprintf("%v", string(encodedData))), 0644)
|
||||
err = os.WriteFile(pathStr, encodedData, 0644) //nolint:gosec
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -40,13 +40,6 @@ func SaveInFile(policy interface{}, pathStr string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// JSONDecoder returns JSON decoder for given string
|
||||
func JSONDecoder(origin string) *json.Decoder {
|
||||
dec := json.NewDecoder(strings.NewReader(origin))
|
||||
dec.UseNumber()
|
||||
return dec
|
||||
}
|
||||
|
||||
func HttpDelete(httpClient *http.Client, fullURL string, headers map[string]string) (string, error) {
|
||||
|
||||
req, err := http.NewRequest("DELETE", fullURL, nil)
|
||||
@@ -65,6 +58,7 @@ func HttpDelete(httpClient *http.Client, fullURL string, headers map[string]stri
|
||||
}
|
||||
return respStr, nil
|
||||
}
|
||||
|
||||
func HttpGetter(httpClient *http.Client, fullURL string, headers map[string]string) (string, error) {
|
||||
|
||||
req, err := http.NewRequest("GET", fullURL, nil)
|
||||
|
||||
26
core/cautils/getter/json.go
Normal file
26
core/cautils/getter/json.go
Normal file
@@ -0,0 +1,26 @@
|
||||
package getter
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
stdjson "encoding/json"
|
||||
|
||||
jsoniter "github.com/json-iterator/go"
|
||||
)
|
||||
|
||||
var (
|
||||
json jsoniter.API
|
||||
)
|
||||
|
||||
func init() {
|
||||
// NOTE(fredbi): attention, this configuration rounds floats down to 6 digits
|
||||
// For finer-grained config, see: https://pkg.go.dev/github.com/json-iterator/go#section-readme
|
||||
json = jsoniter.ConfigFastest
|
||||
}
|
||||
|
||||
// JSONDecoder returns JSON decoder for given string
|
||||
func JSONDecoder(origin string) *stdjson.Decoder {
|
||||
dec := stdjson.NewDecoder(strings.NewReader(origin))
|
||||
dec.UseNumber()
|
||||
return dec
|
||||
}
|
||||
32
core/cautils/getter/json_test.go
Normal file
32
core/cautils/getter/json_test.go
Normal file
@@ -0,0 +1,32 @@
|
||||
package getter
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestJSONDecoder(t *testing.T) {
|
||||
t.Run("should decode json string", func(t *testing.T) {
|
||||
const input = `"xyz"`
|
||||
d := JSONDecoder(input)
|
||||
var receiver string
|
||||
require.NoError(t, d.Decode(&receiver))
|
||||
require.Equal(t, "xyz", receiver)
|
||||
})
|
||||
|
||||
t.Run("should decode json number", func(t *testing.T) {
|
||||
const input = `123.01`
|
||||
d := JSONDecoder(input)
|
||||
var receiver float64
|
||||
require.NoError(t, d.Decode(&receiver))
|
||||
require.Equal(t, 123.01, receiver)
|
||||
})
|
||||
|
||||
t.Run("requires json quotes", func(t *testing.T) {
|
||||
const input = `xyz`
|
||||
d := JSONDecoder(input)
|
||||
var receiver string
|
||||
require.Error(t, d.Decode(&receiver))
|
||||
})
|
||||
}
|
||||
@@ -2,9 +2,8 @@ package getter
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"io"
|
||||
"net/http"
|
||||
"strings"
|
||||
"time"
|
||||
@@ -192,7 +191,7 @@ func (api *KSCloudAPI) GetFrameworks() ([]reporthandling.Framework, error) {
|
||||
return frameworks, err
|
||||
}
|
||||
|
||||
func (api *KSCloudAPI) GetControl(policyName string) (*reporthandling.Control, error) {
|
||||
func (api *KSCloudAPI) GetControl(ID string) (*reporthandling.Control, error) {
|
||||
return nil, fmt.Errorf("control api is not public")
|
||||
}
|
||||
|
||||
@@ -306,7 +305,7 @@ func (api *KSCloudAPI) ListFrameworks() ([]string, error) {
|
||||
return frameworkList, nil
|
||||
}
|
||||
|
||||
func (api *KSCloudAPI) ListControls(l ListType) ([]string, error) {
|
||||
func (api *KSCloudAPI) ListControls() ([]string, error) {
|
||||
return nil, fmt.Errorf("control api is not public")
|
||||
}
|
||||
|
||||
@@ -358,7 +357,7 @@ func (api *KSCloudAPI) Login() error {
|
||||
return fmt.Errorf("error authenticating: %d", resp.StatusCode)
|
||||
}
|
||||
|
||||
responseBody, err := ioutil.ReadAll(resp.Body)
|
||||
responseBody, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -2,7 +2,6 @@ package getter
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
package getter
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
@@ -9,12 +9,25 @@ import (
|
||||
|
||||
"github.com/armosec/armoapi-go/armotypes"
|
||||
"github.com/kubescape/opa-utils/reporthandling"
|
||||
"github.com/kubescape/opa-utils/reporthandling/attacktrack/v1alpha1"
|
||||
)
|
||||
|
||||
// =======================================================================================================================
|
||||
// ============================================== LoadPolicy =============================================================
|
||||
// =======================================================================================================================
|
||||
var DefaultLocalStore = getCacheDir()
|
||||
var (
|
||||
DefaultLocalStore = getCacheDir()
|
||||
|
||||
ErrNotImplemented = errors.New("feature is currently not supported")
|
||||
ErrNotFound = errors.New("name not found")
|
||||
ErrNameRequired = errors.New("missing required input framework name")
|
||||
ErrIDRequired = errors.New("missing required input control ID")
|
||||
ErrFrameworkNotMatching = errors.New("framework from file not matching")
|
||||
ErrControlNotMatching = errors.New("framework from file not matching")
|
||||
|
||||
_ IPolicyGetter = &LoadPolicy{}
|
||||
_ IExceptionsGetter = &LoadPolicy{}
|
||||
)
|
||||
|
||||
func getCacheDir() string {
|
||||
defaultDirPath := ".kubescape"
|
||||
@@ -24,125 +37,224 @@ func getCacheDir() string {
|
||||
return defaultDirPath
|
||||
}
|
||||
|
||||
// Load policies from a local repository
|
||||
// LoadPolicy loads policies from a local repository.
|
||||
type LoadPolicy struct {
|
||||
filePaths []string
|
||||
}
|
||||
|
||||
// NewLoadPolicy builds a LoadPolicy.
|
||||
func NewLoadPolicy(filePaths []string) *LoadPolicy {
|
||||
return &LoadPolicy{
|
||||
filePaths: filePaths,
|
||||
}
|
||||
}
|
||||
|
||||
// Return control from file
|
||||
func (lp *LoadPolicy) GetControl(controlName string) (*reporthandling.Control, error) {
|
||||
// GetControl returns a control from the policy file.
|
||||
func (lp *LoadPolicy) GetControl(controlID string) (*reporthandling.Control, error) {
|
||||
if controlID == "" {
|
||||
return nil, ErrIDRequired
|
||||
}
|
||||
|
||||
control := &reporthandling.Control{}
|
||||
// NOTE: this assumes that only the first path contains either a valid control descriptor or a framework descriptor
|
||||
filePath := lp.filePath()
|
||||
f, err := os.ReadFile(filePath)
|
||||
buf, err := os.ReadFile(filePath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err = json.Unmarshal(f, control); err != nil {
|
||||
return control, err
|
||||
}
|
||||
if controlName != "" && !strings.EqualFold(controlName, control.Name) && !strings.EqualFold(controlName, control.ControlID) {
|
||||
framework, err := lp.GetFramework(control.Name)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("control from file not matching")
|
||||
} else {
|
||||
for _, ctrl := range framework.Controls {
|
||||
if strings.EqualFold(ctrl.Name, controlName) || strings.EqualFold(ctrl.ControlID, controlName) {
|
||||
control = &ctrl
|
||||
break
|
||||
}
|
||||
}
|
||||
// check if the file is a control descriptor: a ControlID field is populated.
|
||||
var control reporthandling.Control
|
||||
if err = json.Unmarshal(buf, &control); err == nil && control.ControlID != "" {
|
||||
if strings.EqualFold(controlID, control.ControlID) {
|
||||
return &control, nil
|
||||
}
|
||||
}
|
||||
return control, err
|
||||
}
|
||||
|
||||
func (lp *LoadPolicy) GetFramework(frameworkName string) (*reporthandling.Framework, error) {
|
||||
return nil, fmt.Errorf("controlID: %s: %w", controlID, ErrControlNotMatching)
|
||||
}
|
||||
|
||||
// check if the file is a framework descriptor
|
||||
var framework reporthandling.Framework
|
||||
var err error
|
||||
if err = json.Unmarshal(buf, &framework); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, toPin := range framework.Controls {
|
||||
ctrl := toPin
|
||||
|
||||
if strings.EqualFold(ctrl.ControlID, controlID) {
|
||||
return &ctrl, nil
|
||||
}
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("controlID: %s: %w", controlID, ErrControlNotMatching)
|
||||
}
|
||||
|
||||
// GetFramework retrieves a framework configuration from the policy paths.
|
||||
func (lp *LoadPolicy) GetFramework(frameworkName string) (*reporthandling.Framework, error) {
|
||||
if frameworkName == "" {
|
||||
return nil, ErrNameRequired
|
||||
}
|
||||
|
||||
for _, filePath := range lp.filePaths {
|
||||
framework = reporthandling.Framework{}
|
||||
f, err := os.ReadFile(filePath)
|
||||
buf, err := os.ReadFile(filePath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err = json.Unmarshal(f, &framework); err != nil {
|
||||
|
||||
var framework reporthandling.Framework
|
||||
if err = json.Unmarshal(buf, &framework); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if strings.EqualFold(frameworkName, framework.Name) {
|
||||
break
|
||||
return &framework, nil
|
||||
}
|
||||
}
|
||||
if frameworkName != "" && !strings.EqualFold(frameworkName, framework.Name) {
|
||||
|
||||
return nil, fmt.Errorf("framework from file not matching")
|
||||
}
|
||||
return &framework, err
|
||||
return nil, fmt.Errorf("framework: %s: %w", frameworkName, ErrFrameworkNotMatching)
|
||||
}
|
||||
|
||||
// GetFrameworks returns all configured framework descriptors.
|
||||
func (lp *LoadPolicy) GetFrameworks() ([]reporthandling.Framework, error) {
|
||||
frameworks := []reporthandling.Framework{}
|
||||
var err error
|
||||
return frameworks, err
|
||||
}
|
||||
frameworks := make([]reporthandling.Framework, 0, 10)
|
||||
seenFws := make(map[string]struct{})
|
||||
|
||||
func (lp *LoadPolicy) ListFrameworks() ([]string, error) {
|
||||
fwNames := []string{}
|
||||
framework := &reporthandling.Framework{}
|
||||
for _, f := range lp.filePaths {
|
||||
file, err := os.ReadFile(f)
|
||||
if err == nil {
|
||||
if err := json.Unmarshal(file, framework); err == nil {
|
||||
if !contains(fwNames, framework.Name) {
|
||||
fwNames = append(fwNames, framework.Name)
|
||||
}
|
||||
}
|
||||
buf, err := os.ReadFile(f)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var framework reporthandling.Framework
|
||||
if err = json.Unmarshal(buf, &framework); err != nil {
|
||||
// ignore invalid framework files
|
||||
continue
|
||||
}
|
||||
|
||||
// dedupe
|
||||
_, alreadyLoaded := seenFws[framework.Name]
|
||||
if alreadyLoaded {
|
||||
continue
|
||||
}
|
||||
|
||||
seenFws[framework.Name] = struct{}{}
|
||||
frameworks = append(frameworks, framework)
|
||||
}
|
||||
return fwNames, nil
|
||||
|
||||
return frameworks, nil
|
||||
}
|
||||
|
||||
func (lp *LoadPolicy) ListControls(listType ListType) ([]string, error) {
|
||||
// TODO - Support
|
||||
return []string{}, fmt.Errorf("loading controls list from file is not supported")
|
||||
// ListFrameworks lists the names of all configured frameworks in this policy.
|
||||
func (lp *LoadPolicy) ListFrameworks() ([]string, error) {
|
||||
frameworkNames := make([]string, 0, 10)
|
||||
|
||||
for _, f := range lp.filePaths {
|
||||
buf, err := os.ReadFile(f)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var framework reporthandling.Framework
|
||||
if err := json.Unmarshal(buf, &framework); err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
if contains(frameworkNames, framework.Name) {
|
||||
continue
|
||||
}
|
||||
|
||||
frameworkNames = append(frameworkNames, framework.Name)
|
||||
}
|
||||
|
||||
return frameworkNames, nil
|
||||
}
|
||||
|
||||
func (lp *LoadPolicy) GetExceptions(clusterName string) ([]armotypes.PostureExceptionPolicy, error) {
|
||||
// ListControls returns the list of controls for this framework.
|
||||
//
|
||||
// At this moment, controls are listed for one single configured framework.
|
||||
func (lp *LoadPolicy) ListControls() ([]string, error) {
|
||||
controlIDs := make([]string, 0, 100)
|
||||
filePath := lp.filePath()
|
||||
exception := []armotypes.PostureExceptionPolicy{}
|
||||
f, err := os.ReadFile(filePath)
|
||||
buf, err := os.ReadFile(filePath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = json.Unmarshal(f, &exception)
|
||||
var framework reporthandling.Framework
|
||||
if err = json.Unmarshal(buf, &framework); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, ctrl := range framework.Controls {
|
||||
controlIDs = append(controlIDs, ctrl.ControlID)
|
||||
}
|
||||
|
||||
return controlIDs, nil
|
||||
}
|
||||
|
||||
// GetExceptions retrieves configured exceptions.
|
||||
//
|
||||
// NOTE: the cluster parameter is not used at this moment.
|
||||
func (lp *LoadPolicy) GetExceptions(_ /* clusterName */ string) ([]armotypes.PostureExceptionPolicy, error) {
|
||||
// NOTE: this assumes that the first path contains a valid exceptions descriptor
|
||||
filePath := lp.filePath()
|
||||
|
||||
buf, err := os.ReadFile(filePath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
exception := make([]armotypes.PostureExceptionPolicy, 0, 300)
|
||||
err = json.Unmarshal(buf, &exception)
|
||||
|
||||
return exception, err
|
||||
}
|
||||
|
||||
func (lp *LoadPolicy) GetControlsInputs(clusterName string) (map[string][]string, error) {
|
||||
// GetControlsInputs retrieves the map of control configs.
|
||||
//
|
||||
// NOTE: the cluster parameter is not used at this moment.
|
||||
func (lp *LoadPolicy) GetControlsInputs(_ /* clusterName */ string) (map[string][]string, error) {
|
||||
// NOTE: this assumes that only the first path contains a valid control inputs descriptor
|
||||
filePath := lp.filePath()
|
||||
accountConfig := &armotypes.CustomerConfig{}
|
||||
f, err := os.ReadFile(filePath)
|
||||
fileName := filepath.Base(filePath)
|
||||
|
||||
buf, err := os.ReadFile(filePath)
|
||||
if err != nil {
|
||||
formattedError := fmt.Errorf("Error opening %s file, \"controls-config\" will be downloaded from ARMO management portal", fileName)
|
||||
formattedError := fmt.Errorf(
|
||||
`Error opening %s file, "controls-config" will be downloaded from ARMO management portal`,
|
||||
fileName,
|
||||
)
|
||||
|
||||
return nil, formattedError
|
||||
}
|
||||
|
||||
if err = json.Unmarshal(f, &accountConfig.Settings.PostureControlInputs); err == nil {
|
||||
return accountConfig.Settings.PostureControlInputs, nil
|
||||
controlInputs := make(map[string][]string, 100) // from armotypes.Settings.PostureControlInputs
|
||||
if err = json.Unmarshal(buf, &controlInputs); err != nil {
|
||||
formattedError := fmt.Errorf(
|
||||
`Error reading %s file, %v, "controls-config" will be downloaded from ARMO management portal`,
|
||||
fileName, err,
|
||||
)
|
||||
|
||||
return nil, formattedError
|
||||
}
|
||||
|
||||
formattedError := fmt.Errorf("Error reading %s file, %s, \"controls-config\" will be downloaded from ARMO management portal", fileName, err.Error())
|
||||
return controlInputs, nil
|
||||
}
|
||||
|
||||
return nil, formattedError
|
||||
// GetAttackTracks yields the attack tracks from a config file.
|
||||
func (lp *LoadPolicy) GetAttackTracks() ([]v1alpha1.AttackTrack, error) {
|
||||
attackTracks := make([]v1alpha1.AttackTrack, 0, 20)
|
||||
|
||||
buf, err := os.ReadFile(lp.filePath())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err = json.Unmarshal(buf, &attackTracks); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return attackTracks, nil
|
||||
}
|
||||
|
||||
// temporary support for a list of files
|
||||
|
||||
@@ -1,13 +1,386 @@
|
||||
package getter
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
)
|
||||
"testing"
|
||||
|
||||
var mockFrameworkBasePath = filepath.Join("examples", "mocks", "frameworks")
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func MockNewLoadPolicy() *LoadPolicy {
|
||||
return &LoadPolicy{
|
||||
filePaths: []string{""},
|
||||
}
|
||||
}
|
||||
|
||||
func TestLoadPolicy(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
const (
|
||||
testFramework = "MITRE"
|
||||
testControl = "C-0053"
|
||||
)
|
||||
|
||||
t.Run("with GetFramework", func(t *testing.T) {
|
||||
t.Run("should retrieve named framework", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
p := NewLoadPolicy([]string{testFrameworkFile(testFramework)})
|
||||
fw, err := p.GetFramework(testFramework)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, fw)
|
||||
|
||||
require.Equal(t, testFramework, fw.Name)
|
||||
})
|
||||
|
||||
t.Run("should fail to retrieve framework", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
p := NewLoadPolicy([]string{testFrameworkFile(testFramework)})
|
||||
fw, err := p.GetFramework("wrong")
|
||||
require.Error(t, err)
|
||||
require.Nil(t, fw)
|
||||
})
|
||||
|
||||
t.Run("edge case: should error on empty framework", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
p := NewLoadPolicy([]string{testFrameworkFile(testFramework)})
|
||||
fw, err := p.GetFramework("")
|
||||
require.ErrorIs(t, err, ErrNameRequired)
|
||||
require.Nil(t, fw)
|
||||
})
|
||||
|
||||
t.Run("edge case: corrupted json", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
const invalidFramework = "invalid-fw"
|
||||
p := NewLoadPolicy([]string{testFrameworkFile(invalidFramework)})
|
||||
fw, err := p.GetFramework(invalidFramework)
|
||||
require.Error(t, err)
|
||||
require.Nil(t, fw)
|
||||
})
|
||||
|
||||
t.Run("edge case: missing json", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
const invalidFramework = "nowheretobefound"
|
||||
p := NewLoadPolicy([]string{testFrameworkFile(invalidFramework)})
|
||||
_, err := p.GetFramework(invalidFramework)
|
||||
require.Error(t, err)
|
||||
})
|
||||
})
|
||||
|
||||
t.Run("with GetControl", func(t *testing.T) {
|
||||
t.Run("should retrieve named control from framework", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
const (
|
||||
expectedControlName = "Access container service account"
|
||||
)
|
||||
p := NewLoadPolicy([]string{testFrameworkFile(testFramework)})
|
||||
ctrl, err := p.GetControl(testControl)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, ctrl)
|
||||
|
||||
require.Equal(t, testControl, ctrl.ControlID)
|
||||
require.Equal(t, expectedControlName, ctrl.Name)
|
||||
})
|
||||
|
||||
t.Run("with single control descriptor", func(t *testing.T) {
|
||||
const (
|
||||
singleControl = "C-0001"
|
||||
expectedControlName = "Forbidden Container Registries"
|
||||
)
|
||||
|
||||
t.Run("should retrieve named control from control descriptor", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
p := NewLoadPolicy([]string{testFrameworkFile(singleControl)})
|
||||
ctrl, err := p.GetControl(singleControl)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, ctrl)
|
||||
|
||||
require.Equal(t, singleControl, ctrl.ControlID)
|
||||
require.Equal(t, expectedControlName, ctrl.Name)
|
||||
})
|
||||
|
||||
t.Run("should fail to retrieve named control from control descriptor", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
p := NewLoadPolicy([]string{testFrameworkFile(singleControl)})
|
||||
ctrl, err := p.GetControl("wrong")
|
||||
require.Error(t, err)
|
||||
require.Nil(t, ctrl)
|
||||
})
|
||||
})
|
||||
|
||||
t.Run("with framework descriptor", func(t *testing.T) {
|
||||
t.Run("should fail to retrieve named control", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
const testControl = "wrong"
|
||||
p := NewLoadPolicy([]string{testFrameworkFile(testFramework)})
|
||||
ctrl, err := p.GetControl(testControl)
|
||||
require.ErrorIs(t, err, ErrControlNotMatching)
|
||||
require.Nil(t, ctrl)
|
||||
})
|
||||
})
|
||||
|
||||
t.Run("edge case: corrupted json", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
const invalidControl = "invalid-fw"
|
||||
p := NewLoadPolicy([]string{testFrameworkFile(invalidControl)})
|
||||
_, err := p.GetControl(invalidControl)
|
||||
require.Error(t, err)
|
||||
})
|
||||
|
||||
t.Run("edge case: missing json", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
const invalidControl = "nowheretobefound"
|
||||
p := NewLoadPolicy([]string{testFrameworkFile(invalidControl)})
|
||||
_, err := p.GetControl(invalidControl)
|
||||
require.Error(t, err)
|
||||
})
|
||||
|
||||
t.Run("edge case: should error on empty control", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
p := NewLoadPolicy([]string{testFrameworkFile(testFramework)})
|
||||
ctrl, err := p.GetControl("")
|
||||
require.ErrorIs(t, err, ErrIDRequired)
|
||||
require.Nil(t, ctrl)
|
||||
})
|
||||
})
|
||||
|
||||
t.Run("with ListFrameworks", func(t *testing.T) {
|
||||
t.Run("should return all frameworks in the policy path", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
const (
|
||||
extraFramework = "NSA"
|
||||
attackTracks = "attack-tracks"
|
||||
)
|
||||
p := NewLoadPolicy([]string{
|
||||
testFrameworkFile(testFramework),
|
||||
testFrameworkFile(extraFramework),
|
||||
testFrameworkFile(extraFramework), // should be deduped
|
||||
testFrameworkFile(attackTracks), // should be ignored
|
||||
})
|
||||
fws, err := p.ListFrameworks()
|
||||
require.NoError(t, err)
|
||||
require.Len(t, fws, 2)
|
||||
|
||||
require.Equal(t, testFramework, fws[0])
|
||||
require.Equal(t, extraFramework, fws[1])
|
||||
})
|
||||
|
||||
t.Run("should fail on file error", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
const (
|
||||
extraFramework = "NSA"
|
||||
nowhere = "nowheretobeseen"
|
||||
)
|
||||
p := NewLoadPolicy([]string{
|
||||
testFrameworkFile(testFramework),
|
||||
testFrameworkFile(extraFramework),
|
||||
testFrameworkFile(nowhere), // should raise an error
|
||||
})
|
||||
fws, err := p.ListFrameworks()
|
||||
require.Error(t, err)
|
||||
require.Nil(t, fws)
|
||||
})
|
||||
})
|
||||
|
||||
t.Run("edge case: policy without path", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
p := NewLoadPolicy([]string{})
|
||||
require.Empty(t, p.filePath())
|
||||
})
|
||||
|
||||
t.Run("with GetFrameworks", func(t *testing.T) {
|
||||
const extraFramework = "NSA"
|
||||
|
||||
t.Run("should return all configured frameworks", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
p := NewLoadPolicy([]string{
|
||||
testFrameworkFile(testFramework),
|
||||
testFrameworkFile(extraFramework),
|
||||
})
|
||||
fws, err := p.GetFrameworks()
|
||||
require.NoError(t, err)
|
||||
require.Len(t, fws, 2)
|
||||
|
||||
require.Equal(t, testFramework, fws[0].Name)
|
||||
require.Equal(t, extraFramework, fws[1].Name)
|
||||
})
|
||||
|
||||
t.Run("should return dedupe configured frameworks", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
const attackTracks = "attack-tracks"
|
||||
p := NewLoadPolicy([]string{
|
||||
testFrameworkFile(testFramework),
|
||||
testFrameworkFile(extraFramework),
|
||||
testFrameworkFile(extraFramework),
|
||||
testFrameworkFile(attackTracks), // should be ignored
|
||||
})
|
||||
fws, err := p.GetFrameworks()
|
||||
require.NoError(t, err)
|
||||
require.Len(t, fws, 2)
|
||||
|
||||
require.Equal(t, testFramework, fws[0].Name)
|
||||
require.Equal(t, extraFramework, fws[1].Name)
|
||||
})
|
||||
})
|
||||
|
||||
t.Run("with ListControls", func(t *testing.T) {
|
||||
t.Run("should return controls", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
p := NewLoadPolicy([]string{testFrameworkFile(testFramework)})
|
||||
controlIDs, err := p.ListControls()
|
||||
require.NoError(t, err)
|
||||
require.Greater(t, len(controlIDs), 0)
|
||||
require.Equal(t, testControl, controlIDs[0])
|
||||
})
|
||||
})
|
||||
|
||||
t.Run("with GetAttackTracks", func(t *testing.T) {
|
||||
t.Run("should return attack tracks", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
const attackTracks = "attack-tracks"
|
||||
p := NewLoadPolicy([]string{testFrameworkFile(attackTracks)})
|
||||
tracks, err := p.GetAttackTracks()
|
||||
require.NoError(t, err)
|
||||
require.Greater(t, len(tracks), 0)
|
||||
|
||||
for _, track := range tracks {
|
||||
require.Equal(t, "AttackTrack", track.Kind)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("edge case: corrupted json", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
const invalidTracks = "invalid-fw"
|
||||
p := NewLoadPolicy([]string{testFrameworkFile(invalidTracks)})
|
||||
_, err := p.GetAttackTracks()
|
||||
require.Error(t, err)
|
||||
})
|
||||
|
||||
t.Run("edge case: missing json", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
const invalidTracks = "nowheretobefound"
|
||||
p := NewLoadPolicy([]string{testFrameworkFile(invalidTracks)})
|
||||
_, err := p.GetAttackTracks()
|
||||
require.Error(t, err)
|
||||
})
|
||||
})
|
||||
|
||||
t.Run("with GetControlsInputs", func(t *testing.T) {
|
||||
const cluster = "dummy" // unused parameter at the moment
|
||||
|
||||
t.Run("should return control inputs for a cluster", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
fixture, expected := writeTempJSONControlInputs(t)
|
||||
t.Cleanup(func() {
|
||||
_ = os.Remove(fixture)
|
||||
})
|
||||
|
||||
p := NewLoadPolicy([]string{fixture})
|
||||
inputs, err := p.GetControlsInputs(cluster)
|
||||
require.NoError(t, err)
|
||||
require.EqualValues(t, expected, inputs)
|
||||
})
|
||||
|
||||
t.Run("edge case: corrupted json", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
const invalidInputs = "invalid-fw"
|
||||
p := NewLoadPolicy([]string{testFrameworkFile(invalidInputs)})
|
||||
_, err := p.GetControlsInputs(cluster)
|
||||
require.Error(t, err)
|
||||
})
|
||||
|
||||
t.Run("edge case: missing json", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
const invalidInputs = "nowheretobefound"
|
||||
p := NewLoadPolicy([]string{testFrameworkFile(invalidInputs)})
|
||||
_, err := p.GetControlsInputs(cluster)
|
||||
require.Error(t, err)
|
||||
})
|
||||
})
|
||||
|
||||
t.Run("with GetExceptions", func(t *testing.T) {
|
||||
const cluster = "dummy" // unused parameter at the moment
|
||||
|
||||
t.Run("should return exceptions", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
const exceptions = "exceptions"
|
||||
|
||||
p := NewLoadPolicy([]string{testFrameworkFile(exceptions)})
|
||||
exceptionPolicies, err := p.GetExceptions(cluster)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Greater(t, len(exceptionPolicies), 0)
|
||||
t.Logf("len=%d", len(exceptionPolicies))
|
||||
for _, policy := range exceptionPolicies {
|
||||
require.NotEmpty(t, policy.Name)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("edge case: corrupted json", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
const invalidInputs = "invalid-fw"
|
||||
p := NewLoadPolicy([]string{testFrameworkFile(invalidInputs)})
|
||||
_, err := p.GetExceptions(cluster)
|
||||
require.Error(t, err)
|
||||
})
|
||||
|
||||
t.Run("edge case: missing json", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
const invalidInputs = "nowheretobefound"
|
||||
p := NewLoadPolicy([]string{testFrameworkFile(invalidInputs)})
|
||||
_, err := p.GetExceptions(cluster)
|
||||
require.Error(t, err)
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
func testFrameworkFile(framework string) string {
|
||||
return filepath.Join(".", "testdata", fmt.Sprintf("%s.json", framework))
|
||||
}
|
||||
|
||||
func writeTempJSONControlInputs(t testing.TB) (string, map[string][]string) {
|
||||
fileName := testFrameworkFile("control-inputs")
|
||||
mock := map[string][]string{
|
||||
"key1": {
|
||||
"val1", "val2",
|
||||
},
|
||||
"key2": {
|
||||
"val3", "val4",
|
||||
},
|
||||
}
|
||||
|
||||
buf, err := json.Marshal(mock)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, os.WriteFile(fileName, buf, 0600))
|
||||
|
||||
return fileName, mock
|
||||
}
|
||||
|
||||
85
core/cautils/getter/testdata/C-0001.json
vendored
Normal file
85
core/cautils/getter/testdata/C-0001.json
vendored
Normal file
@@ -0,0 +1,85 @@
|
||||
{
|
||||
"guid": "",
|
||||
"name": "Forbidden Container Registries",
|
||||
"attributes": {
|
||||
"armoBuiltin": true,
|
||||
"attackTracks": [
|
||||
{
|
||||
"attackTrack": "container",
|
||||
"categories": [
|
||||
"Initial access"
|
||||
]
|
||||
}
|
||||
],
|
||||
"controlTypeTags": [
|
||||
"security",
|
||||
"compliance"
|
||||
],
|
||||
"microsoftMitreColumns": [
|
||||
"Initial Access"
|
||||
]
|
||||
},
|
||||
"id": "C-0001",
|
||||
"controlID": "C-0001",
|
||||
"creationTime": "",
|
||||
"description": "In cases where the Kubernetes cluster is provided by a CSP (e.g., AKS in Azure, GKE in GCP, or EKS in AWS), compromised cloud credential can lead to the cluster takeover. Attackers may abuse cloud account credentials or IAM mechanism to the cluster’s management layer.",
|
||||
"remediation": "Limit the registries from which you pull container images from",
|
||||
"rules": [
|
||||
{
|
||||
"guid": "",
|
||||
"name": "rule-identify-blocklisted-image-registries",
|
||||
"attributes": {
|
||||
"armoBuiltin": true,
|
||||
"m$K8sThreatMatrix": "Initial Access::Compromised images in registry"
|
||||
},
|
||||
"creationTime": "",
|
||||
"rule": "package armo_builtins\nimport data\n# Check for images from blocklisted repos\n\nuntrustedImageRepo[msga] {\n\tpod := input[_]\n\tk := pod.kind\n\tk == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tpath := sprintf(\"spec.containers[%v].image\", [format_int(i, 10)])\n\timage := container.image\n untrusted_or_public_registries(image)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"image '%v' in container '%s' comes from untrusted registry\", [image, container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 2,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [path],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n }\n}\n\nuntrustedImageRepo[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\tpath := sprintf(\"spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n\timage := container.image\n untrusted_or_public_registries(image)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"image '%v' in container '%s' comes from untrusted registry\", [image, container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 2,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [path],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\nuntrustedImageRepo[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tpath := sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n\timage := container.image\n untrusted_or_public_registries(image)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"image '%v' in container '%s' comes from untrusted registry\", [image, container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 2,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [path],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\nuntrusted_or_public_registries(image){\n\t# see default-config-inputs.json for list values\n\tuntrusted_registries := data.postureControlInputs.untrustedRegistries\n\trepo_prefix := untrusted_registries[_]\n\tstartswith(image, repo_prefix)\n}\n\nuntrusted_or_public_registries(image){\n\t# see default-config-inputs.json for list values\n\tpublic_registries := data.postureControlInputs.publicRegistries\n\trepo_prefix := public_registries[_]\n\tstartswith(image, repo_prefix)\n}",
|
||||
"resourceEnumerator": "",
|
||||
"ruleLanguage": "Rego",
|
||||
"match": [
|
||||
{
|
||||
"apiGroups": [
|
||||
"*"
|
||||
],
|
||||
"apiVersions": [
|
||||
"*"
|
||||
],
|
||||
"resources": [
|
||||
"Pod",
|
||||
"Deployment",
|
||||
"ReplicaSet",
|
||||
"DaemonSet",
|
||||
"StatefulSet",
|
||||
"Job",
|
||||
"CronJob"
|
||||
]
|
||||
}
|
||||
],
|
||||
"ruleDependencies": [],
|
||||
"configInputs": [
|
||||
"settings.postureControlInputs.publicRegistries",
|
||||
"settings.postureControlInputs.untrustedRegistries"
|
||||
],
|
||||
"controlConfigInputs": [
|
||||
{
|
||||
"path": "settings.postureControlInputs.publicRegistries",
|
||||
"name": "Public registries",
|
||||
"description": "Kubescape checks none of these public registries are in use."
|
||||
},
|
||||
{
|
||||
"path": "settings.postureControlInputs.untrustedRegistries",
|
||||
"name": "Registries block list",
|
||||
"description": "Kubescape checks none of the following registries are in use."
|
||||
}
|
||||
],
|
||||
"description": "Identifying if pod container images are from unallowed registries",
|
||||
"remediation": "Use images from safe registry",
|
||||
"ruleQuery": "",
|
||||
"relevantCloudProviders": null
|
||||
}
|
||||
],
|
||||
"rulesIDs": [
|
||||
""
|
||||
],
|
||||
"baseScore": 7
|
||||
}
|
||||
2832
core/cautils/getter/testdata/MITRE.json
vendored
Normal file
2832
core/cautils/getter/testdata/MITRE.json
vendored
Normal file
File diff suppressed because one or more lines are too long
2249
core/cautils/getter/testdata/NSA.json
vendored
Normal file
2249
core/cautils/getter/testdata/NSA.json
vendored
Normal file
File diff suppressed because one or more lines are too long
136
core/cautils/getter/testdata/attack-tracks.json
vendored
Normal file
136
core/cautils/getter/testdata/attack-tracks.json
vendored
Normal file
@@ -0,0 +1,136 @@
|
||||
[
|
||||
{
|
||||
"apiVersion": "regolibrary.kubescape/v1alpha1",
|
||||
"kind": "AttackTrack",
|
||||
"metadata": {
|
||||
"name": "node"
|
||||
},
|
||||
"spec": {
|
||||
"data": {
|
||||
"name": "Initial access",
|
||||
"subSteps": [
|
||||
{
|
||||
"name": "Execution",
|
||||
"subSteps": [
|
||||
{
|
||||
"name": "Persistence"
|
||||
},
|
||||
{
|
||||
"name": "Credential access"
|
||||
},
|
||||
{
|
||||
"name": "Defense evasion"
|
||||
},
|
||||
{
|
||||
"name": "Discovery"
|
||||
},
|
||||
{
|
||||
"name": "Lateral movement"
|
||||
},
|
||||
{
|
||||
"name": "Impact - data theft"
|
||||
},
|
||||
{
|
||||
"name": "Impact - data destruction"
|
||||
},
|
||||
{
|
||||
"name": "Impact - service injection"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"apiVersion": "regolibrary.kubescape/v1alpha1",
|
||||
"kind": "AttackTrack",
|
||||
"metadata": {
|
||||
"name": "kubeapi"
|
||||
},
|
||||
"spec": {
|
||||
"data": {
|
||||
"name": "Initial access",
|
||||
"subSteps": [
|
||||
{
|
||||
"name": "Persistence"
|
||||
},
|
||||
{
|
||||
"name": "Privilege escalation"
|
||||
},
|
||||
{
|
||||
"name": "Credential access"
|
||||
},
|
||||
{
|
||||
"name": "Discovery"
|
||||
},
|
||||
{
|
||||
"name": "Lateral movement"
|
||||
},
|
||||
{
|
||||
"name": "Defense evasion"
|
||||
},
|
||||
{
|
||||
"name": "Impact - data destruction"
|
||||
},
|
||||
{
|
||||
"name": "Impact - service injection"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"apiVersion": "regolibrary.kubescape/v1alpha1",
|
||||
"kind": "AttackTrack",
|
||||
"metadata": {
|
||||
"name": "container"
|
||||
},
|
||||
"spec": {
|
||||
"data": {
|
||||
"name": "Initial access",
|
||||
"subSteps": [
|
||||
{
|
||||
"name": "Execution",
|
||||
"subSteps": [
|
||||
{
|
||||
"name": "Privilege escalation"
|
||||
},
|
||||
{
|
||||
"name": "Credential access",
|
||||
"subSteps": [
|
||||
{
|
||||
"name": "Impact - service access"
|
||||
},
|
||||
{
|
||||
"name": "Impact - K8s API access",
|
||||
"subSteps": [
|
||||
{
|
||||
"name": "Defense evasion - KubeAPI"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "Discovery"
|
||||
},
|
||||
{
|
||||
"name": "Lateral movement"
|
||||
},
|
||||
{
|
||||
"name": "Impact - Data access in container"
|
||||
},
|
||||
{
|
||||
"name": "Persistence"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "Impact - service destruction"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
6407
core/cautils/getter/testdata/exceptions.json
vendored
Normal file
6407
core/cautils/getter/testdata/exceptions.json
vendored
Normal file
File diff suppressed because it is too large
Load Diff
3
core/cautils/getter/testdata/invalid-fw.json
vendored
Normal file
3
core/cautils/getter/testdata/invalid-fw.json
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
{
|
||||
"guid": "",
|
||||
}
|
||||
22
core/cautils/git_native_disabled.go
Normal file
22
core/cautils/git_native_disabled.go
Normal file
@@ -0,0 +1,22 @@
|
||||
//go:build !gitenabled
|
||||
|
||||
package cautils
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"github.com/kubescape/go-git-url/apis"
|
||||
)
|
||||
|
||||
var ErrFatalNotSupportedByBuild = errors.New(`git scan not supported by this build. Build with tag "gitenabled" to enable the git scan feature`)
|
||||
|
||||
type gitRepository struct {
|
||||
}
|
||||
|
||||
func newGitRepository(root string) (*gitRepository, error) {
|
||||
return &gitRepository{}, ErrWarnNotSupportedByBuild
|
||||
}
|
||||
|
||||
func (g *gitRepository) GetFileLastCommit(filePath string) (*apis.Commit, error) {
|
||||
return nil, ErrFatalNotSupportedByBuild
|
||||
}
|
||||
11
core/cautils/git_native_disabled_test.go
Normal file
11
core/cautils/git_native_disabled_test.go
Normal file
@@ -0,0 +1,11 @@
|
||||
//go:build !gitenabled
|
||||
|
||||
package cautils
|
||||
|
||||
func (s *LocalGitRepositoryTestSuite) TestGetLastCommit() {
|
||||
s.T().Log("warn: skipped testing native git functionality [GetLastCommit]")
|
||||
}
|
||||
|
||||
func (s *LocalGitRepositoryTestSuite) TestGetFileLastCommit() {
|
||||
s.T().Log("warn: skipped testing native git functionality [GetFileLastCommit]")
|
||||
}
|
||||
141
core/cautils/git_native_enabled.go
Normal file
141
core/cautils/git_native_enabled.go
Normal file
@@ -0,0 +1,141 @@
|
||||
//go:build gitenabled
|
||||
package cautils
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/kubescape/go-git-url/apis"
|
||||
git2go "github.com/libgit2/git2go/v33"
|
||||
)
|
||||
|
||||
type gitRepository struct {
|
||||
git2GoRepo *git2go.Repository
|
||||
fileToLastCommit map[string]*git2go.Commit
|
||||
}
|
||||
|
||||
func newGitRepository(root string) (*gitRepository, error) {
|
||||
git2GoRepo, err := git2go.OpenRepository(root)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &gitRepository{
|
||||
git2GoRepo: git2GoRepo,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (g *gitRepository) GetFileLastCommit(filePath string) (*apis.Commit, error) {
|
||||
if len(g.fileToLastCommit) == 0 {
|
||||
filePathToCommitTime := map[string]time.Time{}
|
||||
filePathToCommit := map[string]*git2go.Commit{}
|
||||
allCommits, _ := g.getAllCommits()
|
||||
|
||||
// builds a map of all files to their last commit
|
||||
for _, commit := range allCommits {
|
||||
// Ignore merge commits (2+ parents)
|
||||
if commit.ParentCount() <= 1 {
|
||||
tree, err := commit.Tree()
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// ParentCount can be either 1 or 0 (initial commit)
|
||||
// In case it's the initial commit, prevTree is nil
|
||||
var prevTree *git2go.Tree
|
||||
if commit.ParentCount() == 1 {
|
||||
prevCommit := commit.Parent(0)
|
||||
prevTree, err = prevCommit.Tree()
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
diff, err := g.git2GoRepo.DiffTreeToTree(prevTree, tree, nil)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
numDeltas, err := diff.NumDeltas()
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
for i := 0; i < numDeltas; i++ {
|
||||
delta, err := diff.Delta(i)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
deltaFilePath := delta.NewFile.Path
|
||||
commitTime := commit.Author().When
|
||||
|
||||
// In case we have the commit information for the file which is not the latest - we override it
|
||||
if currentCommitTime, exists := filePathToCommitTime[deltaFilePath]; exists {
|
||||
if currentCommitTime.Before(commitTime) {
|
||||
filePathToCommitTime[deltaFilePath] = commitTime
|
||||
filePathToCommit[deltaFilePath] = commit
|
||||
}
|
||||
} else {
|
||||
filePathToCommitTime[deltaFilePath] = commitTime
|
||||
filePathToCommit[deltaFilePath] = commit
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
g.fileToLastCommit = filePathToCommit
|
||||
}
|
||||
|
||||
if relevantCommit, exists := g.fileToLastCommit[filePath]; exists {
|
||||
return g.getCommit(relevantCommit), nil
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("failed to get commit information for file: %s", filePath)
|
||||
}
|
||||
|
||||
func (g *gitRepository) getAllCommits() ([]*git2go.Commit, error) {
|
||||
logItr, itrErr := g.git2GoRepo.Walk()
|
||||
if itrErr != nil {
|
||||
|
||||
return nil, itrErr
|
||||
}
|
||||
|
||||
pushErr := logItr.PushHead()
|
||||
if pushErr != nil {
|
||||
return nil, pushErr
|
||||
}
|
||||
|
||||
var allCommits []*git2go.Commit
|
||||
err := logItr.Iterate(func(commit *git2go.Commit) bool {
|
||||
if commit != nil {
|
||||
allCommits = append(allCommits, commit)
|
||||
return true
|
||||
}
|
||||
return false
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return allCommits, nil
|
||||
}
|
||||
|
||||
func (g *gitRepository) getCommit(commit *git2go.Commit) *apis.Commit {
|
||||
return &apis.Commit{
|
||||
SHA: commit.Id().String(),
|
||||
Author: apis.Committer{
|
||||
Name: commit.Author().Name,
|
||||
Email: commit.Author().Email,
|
||||
Date: commit.Author().When,
|
||||
},
|
||||
Message: commit.Message(),
|
||||
Committer: apis.Committer{},
|
||||
Files: []apis.Files{},
|
||||
}
|
||||
}
|
||||
44
core/cautils/git_native_enabled_test.go
Normal file
44
core/cautils/git_native_enabled_test.go
Normal file
@@ -0,0 +1,44 @@
|
||||
//go:build gitenabled
|
||||
package cautils
|
||||
|
||||
func (s *LocalGitRepositoryTestSuite) TestGetLastCommit() {
|
||||
if localRepo, err := NewLocalGitRepository(s.gitRepositoryPaths["localrepo"]); s.NoError(err) {
|
||||
if commit, err := localRepo.GetLastCommit(); s.NoError(err) {
|
||||
s.Equal("7e09312b8017695fadcd606882e3779f10a5c832", commit.SHA)
|
||||
s.Equal("Amir Malka", commit.Author.Name)
|
||||
s.Equal("amirm@armosec.io", commit.Author.Email)
|
||||
s.Equal("2022-05-22 19:11:57 +0300 +0300", commit.Author.Date.String())
|
||||
s.Equal("added file B\n", commit.Message)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *LocalGitRepositoryTestSuite) TestGetFileLastCommit() {
|
||||
s.Run("fileA", func() {
|
||||
if localRepo, err := NewLocalGitRepository(s.gitRepositoryPaths["localrepo"]); s.NoError(err) {
|
||||
|
||||
if commit, err := localRepo.GetFileLastCommit("fileA"); s.NoError(err) {
|
||||
s.Equal("9fae4be19624297947d2b605cefbff516628612d", commit.SHA)
|
||||
s.Equal("Amir Malka", commit.Author.Name)
|
||||
s.Equal("amirm@armosec.io", commit.Author.Email)
|
||||
s.Equal("2022-05-22 18:55:48 +0300 +0300", commit.Author.Date.String())
|
||||
s.Equal("added file A\n", commit.Message)
|
||||
}
|
||||
|
||||
}
|
||||
})
|
||||
|
||||
s.Run("fileB", func() {
|
||||
if localRepo, err := NewLocalGitRepository(s.gitRepositoryPaths["localrepo"]); s.NoError(err) {
|
||||
|
||||
if commit, err := localRepo.GetFileLastCommit("dirA/fileB"); s.NoError(err) {
|
||||
s.Equal("7e09312b8017695fadcd606882e3779f10a5c832", commit.SHA)
|
||||
s.Equal("Amir Malka", commit.Author.Name)
|
||||
s.Equal("amirm@armosec.io", commit.Author.Email)
|
||||
s.Equal("2022-05-22 19:11:57 +0300 +0300", commit.Author.Date.String())
|
||||
s.Equal("added file B\n", commit.Message)
|
||||
}
|
||||
|
||||
}
|
||||
})
|
||||
}
|
||||
16
core/cautils/gitparse_test.go
Normal file
16
core/cautils/gitparse_test.go
Normal file
@@ -0,0 +1,16 @@
|
||||
package cautils
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
giturl "github.com/kubescape/go-git-url"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestEnsureRemoteParsed(t *testing.T) {
|
||||
const remote = "git@gitlab.com:foobar/gitlab-tests/sample-project.git"
|
||||
|
||||
require.NotPanics(t, func() {
|
||||
_, _ = giturl.NewGitURL(remote)
|
||||
})
|
||||
}
|
||||
@@ -3,7 +3,6 @@ package cautils
|
||||
import (
|
||||
_ "embed"
|
||||
"encoding/json"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
@@ -39,7 +38,7 @@ func (s *HelmChartTestSuite) SetupSuite() {
|
||||
}
|
||||
|
||||
var obj interface{}
|
||||
file, _ := ioutil.ReadFile(filepath.Join("testdata", "helm_expected_default_values.json"))
|
||||
file, _ := os.ReadFile(filepath.Join("testdata", "helm_expected_default_values.json"))
|
||||
_ = json.Unmarshal([]byte(file), &obj)
|
||||
s.expectedDefaultValues = obj.(map[string]interface{})
|
||||
}
|
||||
|
||||
@@ -1,26 +1,26 @@
|
||||
package cautils
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"path"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/armosec/go-git-url/apis"
|
||||
gitv5 "github.com/go-git/go-git/v5"
|
||||
configv5 "github.com/go-git/go-git/v5/config"
|
||||
plumbingv5 "github.com/go-git/go-git/v5/plumbing"
|
||||
git2go "github.com/libgit2/git2go/v33"
|
||||
"github.com/kubescape/go-git-url/apis"
|
||||
)
|
||||
|
||||
type LocalGitRepository struct {
|
||||
goGitRepo *gitv5.Repository
|
||||
git2GoRepo *git2go.Repository
|
||||
head *plumbingv5.Reference
|
||||
config *configv5.Config
|
||||
fileToLastCommit map[string]*git2go.Commit
|
||||
*gitRepository
|
||||
goGitRepo *gitv5.Repository
|
||||
head *plumbingv5.Reference
|
||||
config *configv5.Config
|
||||
}
|
||||
|
||||
var ErrWarnNotSupportedByBuild = errors.New(`git commits retrieval not supported by this build. Build with tag "gitenabled" to enable the full git scan feature`)
|
||||
|
||||
func NewLocalGitRepository(path string) (*LocalGitRepository, error) {
|
||||
goGitRepo, err := gitv5.PlainOpenWithOptions(path, &gitv5.PlainOpenOptions{DetectDotGit: true})
|
||||
if err != nil {
|
||||
@@ -52,11 +52,12 @@ func NewLocalGitRepository(path string) (*LocalGitRepository, error) {
|
||||
}
|
||||
|
||||
if repoRoot, err := l.GetRootDir(); err == nil {
|
||||
git2GoRepo, err := git2go.OpenRepository(repoRoot)
|
||||
if err != nil {
|
||||
gitRepository, err := newGitRepository(repoRoot)
|
||||
if err != nil && !errors.Is(err, ErrWarnNotSupportedByBuild) {
|
||||
return l, err
|
||||
}
|
||||
l.git2GoRepo = git2GoRepo
|
||||
|
||||
l.gitRepository = gitRepository
|
||||
}
|
||||
|
||||
return l, nil
|
||||
@@ -72,6 +73,10 @@ func (g *LocalGitRepository) GetRemoteUrl() (string, error) {
|
||||
branchName := g.GetBranchName()
|
||||
if branchRef, branchFound := g.config.Branches[branchName]; branchFound {
|
||||
remoteName := branchRef.Remote
|
||||
// branchRef.Remote can be a reference to a config.Remotes entry or directly a gitUrl
|
||||
if _, found := g.config.Remotes[remoteName]; !found {
|
||||
return remoteName, nil
|
||||
}
|
||||
if len(g.config.Remotes[remoteName].URLs) == 0 {
|
||||
return "", fmt.Errorf("expected to find URLs for remote '%s', branch '%s'", remoteName, branchName)
|
||||
}
|
||||
@@ -79,10 +84,13 @@ func (g *LocalGitRepository) GetRemoteUrl() (string, error) {
|
||||
}
|
||||
|
||||
const defaultRemoteName string = "origin"
|
||||
if len(g.config.Remotes[defaultRemoteName].URLs) == 0 {
|
||||
defaultRemote, ok := g.config.Remotes[defaultRemoteName]
|
||||
if !ok {
|
||||
return "", fmt.Errorf("did not find a default remote with name '%s'", defaultRemoteName)
|
||||
} else if len(defaultRemote.URLs) == 0 {
|
||||
return "", fmt.Errorf("expected to find URLs for remote '%s'", defaultRemoteName)
|
||||
}
|
||||
return g.config.Remotes[defaultRemoteName].URLs[0], nil
|
||||
return defaultRemote.URLs[0], nil
|
||||
}
|
||||
|
||||
// GetName get origin name without the .git suffix
|
||||
@@ -122,120 +130,6 @@ func (g *LocalGitRepository) GetLastCommit() (*apis.Commit, error) {
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (g *LocalGitRepository) getAllCommits() ([]*git2go.Commit, error) {
|
||||
logItr, itrErr := g.git2GoRepo.Walk()
|
||||
if itrErr != nil {
|
||||
|
||||
return nil, itrErr
|
||||
}
|
||||
|
||||
pushErr := logItr.PushHead()
|
||||
if pushErr != nil {
|
||||
return nil, pushErr
|
||||
}
|
||||
|
||||
var allCommits []*git2go.Commit
|
||||
err := logItr.Iterate(func(commit *git2go.Commit) bool {
|
||||
if commit != nil {
|
||||
allCommits = append(allCommits, commit)
|
||||
return true
|
||||
}
|
||||
return false
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return allCommits, nil
|
||||
}
|
||||
|
||||
func (g *LocalGitRepository) GetFileLastCommit(filePath string) (*apis.Commit, error) {
|
||||
if len(g.fileToLastCommit) == 0 {
|
||||
filePathToCommitTime := map[string]time.Time{}
|
||||
filePathToCommit := map[string]*git2go.Commit{}
|
||||
allCommits, _ := g.getAllCommits()
|
||||
|
||||
// builds a map of all files to their last commit
|
||||
for _, commit := range allCommits {
|
||||
// Ignore merge commits (2+ parents)
|
||||
if commit.ParentCount() <= 1 {
|
||||
tree, err := commit.Tree()
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// ParentCount can be either 1 or 0 (initial commit)
|
||||
// In case it's the initial commit, prevTree is nil
|
||||
var prevTree *git2go.Tree
|
||||
if commit.ParentCount() == 1 {
|
||||
prevCommit := commit.Parent(0)
|
||||
prevTree, err = prevCommit.Tree()
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
diff, err := g.git2GoRepo.DiffTreeToTree(prevTree, tree, nil)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
numDeltas, err := diff.NumDeltas()
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
for i := 0; i < numDeltas; i++ {
|
||||
delta, err := diff.Delta(i)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
deltaFilePath := delta.NewFile.Path
|
||||
commitTime := commit.Author().When
|
||||
|
||||
// In case we have the commit information for the file which is not the latest - we override it
|
||||
if currentCommitTime, exists := filePathToCommitTime[deltaFilePath]; exists {
|
||||
if currentCommitTime.Before(commitTime) {
|
||||
filePathToCommitTime[deltaFilePath] = commitTime
|
||||
filePathToCommit[deltaFilePath] = commit
|
||||
}
|
||||
} else {
|
||||
filePathToCommitTime[deltaFilePath] = commitTime
|
||||
filePathToCommit[deltaFilePath] = commit
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
g.fileToLastCommit = filePathToCommit
|
||||
}
|
||||
|
||||
if relevantCommit, exists := g.fileToLastCommit[filePath]; exists {
|
||||
return g.getCommit(relevantCommit), nil
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("failed to get commit information for file: %s", filePath)
|
||||
}
|
||||
|
||||
func (g *LocalGitRepository) getCommit(commit *git2go.Commit) *apis.Commit {
|
||||
return &apis.Commit{
|
||||
SHA: commit.Id().String(),
|
||||
Author: apis.Committer{
|
||||
Name: commit.Author().Name,
|
||||
Email: commit.Author().Email,
|
||||
Date: commit.Author().When,
|
||||
},
|
||||
Message: commit.Message(),
|
||||
Committer: apis.Committer{},
|
||||
Files: []apis.Files{},
|
||||
}
|
||||
}
|
||||
|
||||
func (g *LocalGitRepository) GetRootDir() (string, error) {
|
||||
wt, err := g.goGitRepo.Worktree()
|
||||
if err != nil {
|
||||
|
||||
@@ -9,6 +9,8 @@ import (
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
configv5 "github.com/go-git/go-git/v5/config"
|
||||
plumbingv5 "github.com/go-git/go-git/v5/plumbing"
|
||||
"github.com/stretchr/testify/suite"
|
||||
)
|
||||
|
||||
@@ -26,40 +28,58 @@ func unzipFile(zipPath, destinationFolder string) (*zip.ReadCloser, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, f := range archive.File {
|
||||
filePath := filepath.Join(destinationFolder, f.Name)
|
||||
filePath := filepath.Join(destinationFolder, f.Name) //nolint:gosec
|
||||
if !strings.HasPrefix(filePath, filepath.Clean(destinationFolder)+string(os.PathSeparator)) {
|
||||
return nil, fmt.Errorf("invalid file path")
|
||||
}
|
||||
|
||||
if f.FileInfo().IsDir() {
|
||||
os.MkdirAll(filePath, os.ModePerm)
|
||||
continue
|
||||
}
|
||||
|
||||
if err := os.MkdirAll(filepath.Dir(filePath), os.ModePerm); err != nil {
|
||||
return nil, err
|
||||
if erc := copyFileInFolder(filePath, f); erc != nil {
|
||||
return nil, erc
|
||||
}
|
||||
|
||||
dstFile, err := os.OpenFile(filePath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, f.Mode())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
fileInArchive, err := f.Open()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if _, err := io.Copy(dstFile, fileInArchive); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
dstFile.Close()
|
||||
fileInArchive.Close()
|
||||
}
|
||||
|
||||
return archive, err
|
||||
}
|
||||
|
||||
func copyFileInFolder(filePath string, f *zip.File) (err error) {
|
||||
if err = os.MkdirAll(filepath.Dir(filePath), os.ModePerm); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
dstFile, err := os.OpenFile(filePath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, f.Mode())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
_ = dstFile.Close()
|
||||
}()
|
||||
|
||||
fileInArchive, err := f.Open()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
_ = fileInArchive.Close()
|
||||
}()
|
||||
|
||||
_, err = io.Copy(dstFile, fileInArchive) //nolint:gosec
|
||||
|
||||
if err = dstFile.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err = fileInArchive.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (s *LocalGitRepositoryTestSuite) SetupSuite() {
|
||||
@@ -132,44 +152,49 @@ func (s *LocalGitRepositoryTestSuite) TestGetOriginUrl() {
|
||||
}
|
||||
}
|
||||
|
||||
func (s *LocalGitRepositoryTestSuite) TestGetLastCommit() {
|
||||
if localRepo, err := NewLocalGitRepository(s.gitRepositoryPaths["localrepo"]); s.NoError(err) {
|
||||
if commit, err := localRepo.GetLastCommit(); s.NoError(err) {
|
||||
s.Equal("7e09312b8017695fadcd606882e3779f10a5c832", commit.SHA)
|
||||
s.Equal("Amir Malka", commit.Author.Name)
|
||||
s.Equal("amirm@armosec.io", commit.Author.Email)
|
||||
s.Equal("2022-05-22 19:11:57 +0300 +0300", commit.Author.Date.String())
|
||||
s.Equal("added file B\n", commit.Message)
|
||||
}
|
||||
func TestGetRemoteUrl(t *testing.T) {
|
||||
testCases := []struct {
|
||||
Name string
|
||||
LocalRepo LocalGitRepository
|
||||
Want string
|
||||
WantErr error
|
||||
}{
|
||||
{
|
||||
Name: "Branch with missing upstream and missing 'origin' fallback should return an error",
|
||||
LocalRepo: LocalGitRepository{
|
||||
config: &configv5.Config{
|
||||
Branches: make(map[string]*configv5.Branch),
|
||||
Remotes: make(map[string]*configv5.RemoteConfig),
|
||||
},
|
||||
head: plumbingv5.NewReferenceFromStrings("HEAD", "ref: refs/heads/v4"),
|
||||
},
|
||||
Want: "",
|
||||
WantErr: fmt.Errorf("did not find a default remote with name 'origin'"),
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.Name, func(t *testing.T) {
|
||||
localRepo := LocalGitRepository{
|
||||
config: &configv5.Config{
|
||||
Branches: make(map[string]*configv5.Branch),
|
||||
Remotes: make(map[string]*configv5.RemoteConfig),
|
||||
},
|
||||
head: plumbingv5.NewReferenceFromStrings("HEAD", "ref: refs/heads/v4"),
|
||||
}
|
||||
|
||||
want := tc.Want
|
||||
wantErr := tc.WantErr
|
||||
got, gotErr := localRepo.GetRemoteUrl()
|
||||
|
||||
if got != want {
|
||||
t.Errorf("Remote URLs don’t match: got '%s', want '%s'", got, want)
|
||||
}
|
||||
|
||||
if gotErr.Error() != wantErr.Error() {
|
||||
t.Errorf("Errors don’t match: got '%v', want '%v'", gotErr, wantErr)
|
||||
}
|
||||
},
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *LocalGitRepositoryTestSuite) TestGetFileLastCommit() {
|
||||
s.Run("fileA", func() {
|
||||
if localRepo, err := NewLocalGitRepository(s.gitRepositoryPaths["localrepo"]); s.NoError(err) {
|
||||
|
||||
if commit, err := localRepo.GetFileLastCommit("fileA"); s.NoError(err) {
|
||||
s.Equal("9fae4be19624297947d2b605cefbff516628612d", commit.SHA)
|
||||
s.Equal("Amir Malka", commit.Author.Name)
|
||||
s.Equal("amirm@armosec.io", commit.Author.Email)
|
||||
s.Equal("2022-05-22 18:55:48 +0300 +0300", commit.Author.Date.String())
|
||||
s.Equal("added file A\n", commit.Message)
|
||||
}
|
||||
|
||||
}
|
||||
})
|
||||
|
||||
s.Run("fileB", func() {
|
||||
if localRepo, err := NewLocalGitRepository(s.gitRepositoryPaths["localrepo"]); s.NoError(err) {
|
||||
|
||||
if commit, err := localRepo.GetFileLastCommit("dirA/fileB"); s.NoError(err) {
|
||||
s.Equal("7e09312b8017695fadcd606882e3779f10a5c832", commit.SHA)
|
||||
s.Equal("Amir Malka", commit.Author.Name)
|
||||
s.Equal("amirm@armosec.io", commit.Author.Email)
|
||||
s.Equal("2022-05-22 19:11:57 +0300 +0300", commit.Author.Date.String())
|
||||
s.Equal("added file B\n", commit.Message)
|
||||
}
|
||||
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
@@ -3,7 +3,6 @@ package cautils
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
@@ -11,8 +10,8 @@ import (
|
||||
"github.com/armosec/armoapi-go/armotypes"
|
||||
apisv1 "github.com/kubescape/opa-utils/httpserver/apis/v1"
|
||||
|
||||
giturl "github.com/armosec/go-git-url"
|
||||
logger "github.com/kubescape/go-logger"
|
||||
giturl "github.com/kubescape/go-git-url"
|
||||
"github.com/kubescape/go-logger"
|
||||
"github.com/kubescape/go-logger/helpers"
|
||||
"github.com/kubescape/k8s-interface/k8sinterface"
|
||||
"github.com/kubescape/kubescape/v2/core/cautils/getter"
|
||||
@@ -40,7 +39,8 @@ const (
|
||||
// ScanCluster string = "cluster"
|
||||
// ScanLocalFiles string = "yaml"
|
||||
localControlInputsFilename string = "controls-inputs.json"
|
||||
localExceptionsFilename string = "exceptions.json"
|
||||
LocalExceptionsFilename string = "exceptions.json"
|
||||
LocalAttackTracksFilename string = "attack-tracks.json"
|
||||
)
|
||||
|
||||
type BoolPtrFlag struct {
|
||||
@@ -94,7 +94,7 @@ const (
|
||||
)
|
||||
|
||||
type PolicyIdentifier struct {
|
||||
Name string // policy name e.g. nsa,mitre,c-0012
|
||||
Identifier string // policy Identifier e.g. c-0012 for control, nsa,mitre for frameworks
|
||||
Kind apisv1.NotificationPolicyKind // policy kind e.g. Framework,Control,Rule
|
||||
Designators armotypes.PortalDesignator
|
||||
}
|
||||
@@ -104,6 +104,7 @@ type ScanInfo struct {
|
||||
PolicyIdentifier []PolicyIdentifier // TODO - remove from object
|
||||
UseExceptions string // Load file with exceptions configuration
|
||||
ControlsInputs string // Load file with inputs for controls
|
||||
AttackTracks string // Load file with attack tracks
|
||||
UseFrom []string // Load framework from local file (instead of download). Use when running offline
|
||||
UseDefault bool // Load framework from cached file (instead of download). Use when running offline
|
||||
UseArtifactsFrom string // Load artifacts from local path. Use when running offline
|
||||
@@ -120,6 +121,7 @@ type ScanInfo struct {
|
||||
FailThreshold float32 // Failure score threshold
|
||||
FailThresholdSeverity string // Severity at and above which the command should fail
|
||||
Submit bool // Submit results to Kubescape Cloud BE
|
||||
CreateAccount bool // Create account in Kubescape Cloud BE if no account found in local cache
|
||||
ScanID string // Report id of the current scan
|
||||
HostSensorEnabled BoolPtrFlag // Deploy Kubescape K8s host scanner to collect data from certain controls
|
||||
HostSensorYamlPath string // Path to hostsensor file
|
||||
@@ -128,6 +130,8 @@ type ScanInfo struct {
|
||||
KubeContext string // context name
|
||||
FrameworkScan bool // false if scanning control
|
||||
ScanAll bool // true if scan all frameworks
|
||||
OmitRawResources bool // true if omit raw resources from the output
|
||||
PrintAttackTree bool // true if print attack tree
|
||||
}
|
||||
|
||||
type Getters struct {
|
||||
@@ -139,7 +143,6 @@ type Getters struct {
|
||||
|
||||
func (scanInfo *ScanInfo) Init() {
|
||||
scanInfo.setUseFrom()
|
||||
scanInfo.setOutputFile()
|
||||
scanInfo.setUseArtifactsFrom()
|
||||
if scanInfo.ScanID == "" {
|
||||
scanInfo.ScanID = uuid.NewString()
|
||||
@@ -159,7 +162,7 @@ func (scanInfo *ScanInfo) setUseArtifactsFrom() {
|
||||
scanInfo.UseArtifactsFrom = dir
|
||||
}
|
||||
// set frameworks files
|
||||
files, err := ioutil.ReadDir(scanInfo.UseArtifactsFrom)
|
||||
files, err := os.ReadDir(scanInfo.UseArtifactsFrom)
|
||||
if err != nil {
|
||||
logger.L().Fatal("failed to read files from directory", helpers.String("dir", scanInfo.UseArtifactsFrom), helpers.Error(err))
|
||||
}
|
||||
@@ -176,35 +179,27 @@ func (scanInfo *ScanInfo) setUseArtifactsFrom() {
|
||||
// set config-inputs file
|
||||
scanInfo.ControlsInputs = filepath.Join(scanInfo.UseArtifactsFrom, localControlInputsFilename)
|
||||
// set exceptions
|
||||
scanInfo.UseExceptions = filepath.Join(scanInfo.UseArtifactsFrom, localExceptionsFilename)
|
||||
scanInfo.UseExceptions = filepath.Join(scanInfo.UseArtifactsFrom, LocalExceptionsFilename)
|
||||
|
||||
// set attack tracks
|
||||
scanInfo.AttackTracks = filepath.Join(scanInfo.UseArtifactsFrom, LocalAttackTracksFilename)
|
||||
}
|
||||
|
||||
func (scanInfo *ScanInfo) setUseFrom() {
|
||||
if scanInfo.UseDefault {
|
||||
for _, policy := range scanInfo.PolicyIdentifier {
|
||||
scanInfo.UseFrom = append(scanInfo.UseFrom, getter.GetDefaultPath(policy.Name+".json"))
|
||||
scanInfo.UseFrom = append(scanInfo.UseFrom, getter.GetDefaultPath(policy.Identifier+".json"))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (scanInfo *ScanInfo) setOutputFile() {
|
||||
if scanInfo.Output == "" {
|
||||
return
|
||||
}
|
||||
if scanInfo.Format == "json" {
|
||||
if filepath.Ext(scanInfo.Output) != ".json" {
|
||||
scanInfo.Output += ".json"
|
||||
}
|
||||
}
|
||||
if scanInfo.Format == "junit" {
|
||||
if filepath.Ext(scanInfo.Output) != ".xml" {
|
||||
scanInfo.Output += ".xml"
|
||||
}
|
||||
}
|
||||
if scanInfo.Format == "pdf" {
|
||||
if filepath.Ext(scanInfo.Output) != ".pdf" {
|
||||
scanInfo.Output += ".pdf"
|
||||
}
|
||||
// Formats returns a slice of output formats that have been requested for a given scan
|
||||
func (scanInfo *ScanInfo) Formats() []string {
|
||||
formatString := scanInfo.Format
|
||||
if formatString != "" {
|
||||
return strings.Split(scanInfo.Format, ",")
|
||||
} else {
|
||||
return []string{}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -213,7 +208,7 @@ func (scanInfo *ScanInfo) SetPolicyIdentifiers(policies []string, kind apisv1.No
|
||||
if !scanInfo.contains(policy) {
|
||||
newPolicy := PolicyIdentifier{}
|
||||
newPolicy.Kind = kind
|
||||
newPolicy.Name = policy
|
||||
newPolicy.Identifier = policy
|
||||
scanInfo.PolicyIdentifier = append(scanInfo.PolicyIdentifier, newPolicy)
|
||||
}
|
||||
}
|
||||
@@ -221,7 +216,7 @@ func (scanInfo *ScanInfo) SetPolicyIdentifiers(policies []string, kind apisv1.No
|
||||
|
||||
func (scanInfo *ScanInfo) contains(policyName string) bool {
|
||||
for _, policy := range scanInfo.PolicyIdentifier {
|
||||
if policy.Name == policyName {
|
||||
if policy.Identifier == policyName {
|
||||
return true
|
||||
}
|
||||
}
|
||||
@@ -249,7 +244,7 @@ func scanInfoToScanMetadata(scanInfo *ScanInfo) *reporthandlingv2.Metadata {
|
||||
}
|
||||
// append frameworks
|
||||
for _, policy := range scanInfo.PolicyIdentifier {
|
||||
metadata.ScanMetadata.TargetNames = append(metadata.ScanMetadata.TargetNames, policy.Name)
|
||||
metadata.ScanMetadata.TargetNames = append(metadata.ScanMetadata.TargetNames, policy.Identifier)
|
||||
}
|
||||
|
||||
metadata.ScanMetadata.KubescapeVersion = BuildNumber
|
||||
|
||||
@@ -43,3 +43,30 @@ func TestGetScanningContext(t *testing.T) {
|
||||
assert.Equal(t, ContextCluster, GetScanningContext(""))
|
||||
assert.Equal(t, ContextGitURL, GetScanningContext("https://github.com/kubescape/kubescape"))
|
||||
}
|
||||
|
||||
func TestScanInfoFormats(t *testing.T) {
|
||||
testCases := []struct {
|
||||
Input string
|
||||
Want []string
|
||||
}{
|
||||
{"", []string{}},
|
||||
{"json", []string{"json"}},
|
||||
{"pdf", []string{"pdf"}},
|
||||
{"html", []string{"html"}},
|
||||
{"sarif", []string{"sarif"}},
|
||||
{"html,pdf,sarif", []string{"html", "pdf", "sarif"}},
|
||||
{"pretty-printer,pdf,sarif", []string{"pretty-printer", "pdf", "sarif"}},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.Input, func(t *testing.T) {
|
||||
input := tc.Input
|
||||
want := tc.Want
|
||||
scanInfo := &ScanInfo{Format: input}
|
||||
|
||||
got := scanInfo.Formats()
|
||||
|
||||
assert.Equal(t, want, got)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -14,8 +14,9 @@ import (
|
||||
"golang.org/x/mod/semver"
|
||||
)
|
||||
|
||||
const SKIP_VERSION_CHECK_DEPRECATED = "KUBESCAPE_SKIP_UPDATE_CHECK"
|
||||
const SKIP_VERSION_CHECK = "KS_SKIP_UPDATE_CHECK"
|
||||
const SKIP_VERSION_CHECK_DEPRECATED_ENV = "KUBESCAPE_SKIP_UPDATE_CHECK"
|
||||
const SKIP_VERSION_CHECK_ENV = "KS_SKIP_UPDATE_CHECK"
|
||||
const CLIENT_ENV = "KS_CLIENT"
|
||||
|
||||
var BuildNumber string
|
||||
var Client string
|
||||
@@ -31,9 +32,14 @@ func NewIVersionCheckHandler() IVersionCheckHandler {
|
||||
if BuildNumber == "" {
|
||||
logger.L().Warning("unknown build number, this might affect your scan results. Please make sure you are updated to latest version")
|
||||
}
|
||||
if v, ok := os.LookupEnv(SKIP_VERSION_CHECK); ok && boolutils.StringToBool(v) {
|
||||
|
||||
if v, ok := os.LookupEnv(CLIENT_ENV); ok && v != "" {
|
||||
Client = v
|
||||
}
|
||||
|
||||
if v, ok := os.LookupEnv(SKIP_VERSION_CHECK_ENV); ok && boolutils.StringToBool(v) {
|
||||
return NewVersionCheckHandlerMock()
|
||||
} else if v, ok := os.LookupEnv(SKIP_VERSION_CHECK_DEPRECATED); ok && boolutils.StringToBool(v) {
|
||||
} else if v, ok := os.LookupEnv(SKIP_VERSION_CHECK_DEPRECATED_ENV); ok && boolutils.StringToBool(v) {
|
||||
return NewVersionCheckHandlerMock()
|
||||
}
|
||||
return NewVersionCheckHandler()
|
||||
|
||||
@@ -19,6 +19,7 @@ var (
|
||||
"KubeletInfo",
|
||||
"KubeProxyInfo",
|
||||
"ControlPlaneInfo",
|
||||
"CloudProviderInfo",
|
||||
}
|
||||
CloudResources = []string{
|
||||
"ClusterDescribe",
|
||||
|
||||
@@ -6,19 +6,28 @@ import (
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/armosec/armoapi-go/armotypes"
|
||||
logger "github.com/kubescape/go-logger"
|
||||
"github.com/kubescape/go-logger"
|
||||
"github.com/kubescape/go-logger/helpers"
|
||||
"github.com/kubescape/kubescape/v2/core/cautils/getter"
|
||||
metav1 "github.com/kubescape/kubescape/v2/core/meta/datastructures/v1"
|
||||
)
|
||||
|
||||
const (
|
||||
TargetControlsInputs = "controls-inputs"
|
||||
TargetExceptions = "exceptions"
|
||||
TargetControl = "control"
|
||||
TargetFramework = "framework"
|
||||
TargetArtifacts = "artifacts"
|
||||
TargetAttackTracks = "attack-tracks"
|
||||
)
|
||||
|
||||
var downloadFunc = map[string]func(*metav1.DownloadInfo) error{
|
||||
"controls-inputs": downloadConfigInputs,
|
||||
"exceptions": downloadExceptions,
|
||||
"control": downloadControl,
|
||||
"framework": downloadFramework,
|
||||
"artifacts": downloadArtifacts,
|
||||
TargetControlsInputs: downloadConfigInputs,
|
||||
TargetExceptions: downloadExceptions,
|
||||
TargetControl: downloadControl,
|
||||
TargetFramework: downloadFramework,
|
||||
TargetArtifacts: downloadArtifacts,
|
||||
TargetAttackTracks: downloadAttackTracks,
|
||||
}
|
||||
|
||||
func DownloadSupportCommands() []string {
|
||||
@@ -70,6 +79,7 @@ func downloadArtifacts(downloadInfo *metav1.DownloadInfo) error {
|
||||
"controls-inputs": downloadConfigInputs,
|
||||
"exceptions": downloadExceptions,
|
||||
"framework": downloadFramework,
|
||||
"attack-tracks": downloadAttackTracks,
|
||||
}
|
||||
for artifact := range artifacts {
|
||||
if err := downloadArtifact(&metav1.DownloadInfo{Target: artifact, Path: downloadInfo.Path, FileName: fmt.Sprintf("%s.json", artifact)}, artifacts); err != nil {
|
||||
@@ -82,7 +92,7 @@ func downloadArtifacts(downloadInfo *metav1.DownloadInfo) error {
|
||||
func downloadConfigInputs(downloadInfo *metav1.DownloadInfo) error {
|
||||
tenant := getTenantConfig(&downloadInfo.Credentials, "", "", getKubernetesApi())
|
||||
|
||||
controlsInputsGetter := getConfigInputsGetter(downloadInfo.Name, tenant.GetAccountID(), nil)
|
||||
controlsInputsGetter := getConfigInputsGetter(downloadInfo.Identifier, tenant.GetAccountID(), nil)
|
||||
controlInputs, err := controlsInputsGetter.GetControlsInputs(tenant.GetContextName())
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -103,17 +113,14 @@ func downloadConfigInputs(downloadInfo *metav1.DownloadInfo) error {
|
||||
}
|
||||
|
||||
func downloadExceptions(downloadInfo *metav1.DownloadInfo) error {
|
||||
var err error
|
||||
tenant := getTenantConfig(&downloadInfo.Credentials, "", "", getKubernetesApi())
|
||||
|
||||
exceptionsGetter := getExceptionsGetter("", tenant.GetAccountID(), nil)
|
||||
exceptions := []armotypes.PostureExceptionPolicy{}
|
||||
if tenant.GetAccountID() != "" {
|
||||
exceptions, err = exceptionsGetter.GetExceptions(tenant.GetContextName())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
exceptions, err := exceptionsGetter.GetExceptions(tenant.GetContextName())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if downloadInfo.FileName == "" {
|
||||
downloadInfo.FileName = fmt.Sprintf("%s.json", downloadInfo.Target)
|
||||
}
|
||||
@@ -126,13 +133,37 @@ func downloadExceptions(downloadInfo *metav1.DownloadInfo) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func downloadAttackTracks(downloadInfo *metav1.DownloadInfo) error {
|
||||
var err error
|
||||
tenant := getTenantConfig(&downloadInfo.Credentials, "", "", getKubernetesApi())
|
||||
|
||||
attackTracksGetter := getAttackTracksGetter("", tenant.GetAccountID(), nil)
|
||||
|
||||
attackTracks, err := attackTracksGetter.GetAttackTracks()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if downloadInfo.FileName == "" {
|
||||
downloadInfo.FileName = fmt.Sprintf("%s.json", downloadInfo.Target)
|
||||
}
|
||||
// save in file
|
||||
err = getter.SaveInFile(attackTracks, filepath.Join(downloadInfo.Path, downloadInfo.FileName))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
logger.L().Success("Downloaded", helpers.String("attack tracks", downloadInfo.Target), helpers.String("path", filepath.Join(downloadInfo.Path, downloadInfo.FileName)))
|
||||
return nil
|
||||
|
||||
}
|
||||
|
||||
func downloadFramework(downloadInfo *metav1.DownloadInfo) error {
|
||||
|
||||
tenant := getTenantConfig(&downloadInfo.Credentials, "", "", getKubernetesApi())
|
||||
|
||||
g := getPolicyGetter(nil, tenant.GetTenantEmail(), true, nil)
|
||||
|
||||
if downloadInfo.Name == "" {
|
||||
if downloadInfo.Identifier == "" {
|
||||
// if framework name not specified - download all frameworks
|
||||
frameworks, err := g.GetFrameworks()
|
||||
if err != nil {
|
||||
@@ -149,9 +180,9 @@ func downloadFramework(downloadInfo *metav1.DownloadInfo) error {
|
||||
// return fmt.Errorf("missing framework name")
|
||||
} else {
|
||||
if downloadInfo.FileName == "" {
|
||||
downloadInfo.FileName = fmt.Sprintf("%s.json", downloadInfo.Name)
|
||||
downloadInfo.FileName = fmt.Sprintf("%s.json", downloadInfo.Identifier)
|
||||
}
|
||||
framework, err := g.GetFramework(downloadInfo.Name)
|
||||
framework, err := g.GetFramework(downloadInfo.Identifier)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -174,25 +205,25 @@ func downloadControl(downloadInfo *metav1.DownloadInfo) error {
|
||||
|
||||
g := getPolicyGetter(nil, tenant.GetTenantEmail(), false, nil)
|
||||
|
||||
if downloadInfo.Name == "" {
|
||||
if downloadInfo.Identifier == "" {
|
||||
// TODO - support
|
||||
return fmt.Errorf("missing control name")
|
||||
return fmt.Errorf("missing control ID")
|
||||
}
|
||||
if downloadInfo.FileName == "" {
|
||||
downloadInfo.FileName = fmt.Sprintf("%s.json", downloadInfo.Name)
|
||||
downloadInfo.FileName = fmt.Sprintf("%s.json", downloadInfo.Identifier)
|
||||
}
|
||||
controls, err := g.GetControl(downloadInfo.Name)
|
||||
controls, err := g.GetControl(downloadInfo.Identifier)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("failed to download control id '%s', %s", downloadInfo.Identifier, err.Error())
|
||||
}
|
||||
if controls == nil {
|
||||
return fmt.Errorf("failed to download control - received an empty objects")
|
||||
return fmt.Errorf("failed to download control id '%s' - received an empty objects", downloadInfo.Identifier)
|
||||
}
|
||||
downloadTo := filepath.Join(downloadInfo.Path, downloadInfo.FileName)
|
||||
err = getter.SaveInFile(controls, downloadTo)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
logger.L().Success("Downloaded", helpers.String("artifact", downloadInfo.Target), helpers.String("name", downloadInfo.Name), helpers.String("path", downloadTo))
|
||||
logger.L().Success("Downloaded", helpers.String("artifact", downloadInfo.Target), helpers.String("ID", downloadInfo.Identifier), helpers.String("path", downloadTo))
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -1,72 +0,0 @@
|
||||
package core
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
logger "github.com/kubescape/go-logger"
|
||||
metav1 "github.com/kubescape/kubescape/v2/core/meta/datastructures/v1"
|
||||
|
||||
"github.com/kubescape/kubescape/v2/core/pkg/fixhandler"
|
||||
)
|
||||
|
||||
const NoChangesApplied = "No changes were applied."
|
||||
const NoResourcesToFix = "No issues to fix."
|
||||
const ConfirmationQuestion = "Would you like to apply the changes to the files above? [y|n]: "
|
||||
|
||||
func (ks *Kubescape) Fix(fixInfo *metav1.FixInfo) error {
|
||||
logger.L().Info("Reading report file...")
|
||||
handler, err := fixhandler.NewFixHandler(fixInfo)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
resourcesToFix := handler.PrepareResourcesToFix()
|
||||
|
||||
if len(resourcesToFix) == 0 {
|
||||
logger.L().Info(NoResourcesToFix)
|
||||
return nil
|
||||
}
|
||||
|
||||
handler.PrintExpectedChanges(resourcesToFix)
|
||||
|
||||
if fixInfo.DryRun {
|
||||
logger.L().Info(NoChangesApplied)
|
||||
return nil
|
||||
}
|
||||
|
||||
if !fixInfo.NoConfirm && !userConfirmed() {
|
||||
logger.L().Info(NoChangesApplied)
|
||||
return nil
|
||||
}
|
||||
|
||||
updatedFilesCount, errors := handler.ApplyChanges(resourcesToFix)
|
||||
logger.L().Info(fmt.Sprintf("Fixed resources in %d files.", updatedFilesCount))
|
||||
|
||||
if len(errors) > 0 {
|
||||
for _, err := range errors {
|
||||
logger.L().Error(err.Error())
|
||||
}
|
||||
return fmt.Errorf("Failed to fix some resources, check the logs for more details")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func userConfirmed() bool {
|
||||
var input string
|
||||
|
||||
for {
|
||||
fmt.Printf(ConfirmationQuestion)
|
||||
if _, err := fmt.Scanln(&input); err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
input = strings.ToLower(input)
|
||||
if input == "y" || input == "yes" {
|
||||
return true
|
||||
} else if input == "n" || input == "no" {
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -2,14 +2,17 @@ package core
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
logger "github.com/kubescape/go-logger"
|
||||
"github.com/kubescape/go-logger"
|
||||
"github.com/kubescape/go-logger/helpers"
|
||||
"github.com/kubescape/k8s-interface/k8sinterface"
|
||||
"github.com/kubescape/kubescape/v2/core/cautils"
|
||||
"github.com/kubescape/kubescape/v2/core/cautils/getter"
|
||||
"github.com/kubescape/kubescape/v2/core/pkg/hostsensorutils"
|
||||
"github.com/kubescape/kubescape/v2/core/pkg/resourcehandler"
|
||||
"github.com/kubescape/kubescape/v2/core/pkg/resultshandling/printer"
|
||||
printerv2 "github.com/kubescape/kubescape/v2/core/pkg/resultshandling/printer/v2"
|
||||
"github.com/kubescape/kubescape/v2/core/pkg/resultshandling/reporter"
|
||||
reporterv2 "github.com/kubescape/kubescape/v2/core/pkg/resultshandling/reporter/v2"
|
||||
|
||||
@@ -45,8 +48,9 @@ func getExceptionsGetter(useExceptions string, accountID string, downloadRelease
|
||||
if downloadReleasedPolicy == nil {
|
||||
downloadReleasedPolicy = getter.NewDownloadReleasedPolicy()
|
||||
}
|
||||
if err := downloadReleasedPolicy.SetRegoObjects(); err != nil {
|
||||
logger.L().Warning("failed to get exceptions from github release, this may affect the scanning results", helpers.Error(err))
|
||||
if err := downloadReleasedPolicy.SetRegoObjects(); err != nil { // if failed to pull attack tracks, fallback to cache
|
||||
logger.L().Warning("failed to get exceptions from github release, loading attack tracks from cache", helpers.Error(err))
|
||||
return getter.NewLoadPolicy([]string{getter.GetDefaultPath(cautils.LocalExceptionsFilename)})
|
||||
}
|
||||
return downloadReleasedPolicy
|
||||
|
||||
@@ -98,7 +102,7 @@ func getHostSensorHandler(scanInfo *cautils.ScanInfo, k8s *k8sinterface.Kubernet
|
||||
// we need to determined which controls needs host scanner
|
||||
if scanInfo.HostSensorEnabled.Get() == nil && hasHostSensorControls {
|
||||
scanInfo.HostSensorEnabled.SetBool(false) // default - do not run host scanner
|
||||
logger.L().Warning("Kubernetes cluster nodes scanning is disabled. This is required to collect valuable data for certain controls. You can enable it using the --enable-host-scan flag")
|
||||
logger.L().Warning("Kubernetes cluster nodes scanning is disabled. This is required to collect valuable data for certain controls. You can enable it using the --enable-host-scan flag")
|
||||
}
|
||||
if hostSensorVal := scanInfo.HostSensorEnabled.Get(); hostSensorVal != nil && *hostSensorVal {
|
||||
hostSensorHandler, err := hostsensorutils.NewHostSensorHandler(k8s, scanInfo.HostSensorYamlPath)
|
||||
@@ -121,18 +125,18 @@ func getFieldSelector(scanInfo *cautils.ScanInfo) resourcehandler.IFieldSelector
|
||||
return &resourcehandler.EmptySelector{}
|
||||
}
|
||||
|
||||
func policyIdentifierNames(pi []cautils.PolicyIdentifier) string {
|
||||
policiesNames := ""
|
||||
func policyIdentifierIdentities(pi []cautils.PolicyIdentifier) string {
|
||||
policiesIdentities := ""
|
||||
for i := range pi {
|
||||
policiesNames += pi[i].Name
|
||||
policiesIdentities += pi[i].Identifier
|
||||
if i+1 < len(pi) {
|
||||
policiesNames += ","
|
||||
policiesIdentities += ","
|
||||
}
|
||||
}
|
||||
if policiesNames == "" {
|
||||
policiesNames = "all"
|
||||
if policiesIdentities == "" {
|
||||
policiesIdentities = "all"
|
||||
}
|
||||
return policiesNames
|
||||
return policiesIdentities
|
||||
}
|
||||
|
||||
// setSubmitBehavior - Setup the desired cluster behavior regarding submitting to the Kubescape Cloud BE
|
||||
@@ -178,6 +182,10 @@ func setSubmitBehavior(scanInfo *cautils.ScanInfo, tenantConfig cautils.ITenantC
|
||||
scanInfo.Submit = true
|
||||
}
|
||||
|
||||
if scanInfo.CreateAccount {
|
||||
scanInfo.Submit = true
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// setPolicyGetter set the policy getter - local file/github release/Kubescape Cloud API
|
||||
@@ -239,7 +247,10 @@ func listFrameworksNames(policyGetter getter.IPolicyGetter) []string {
|
||||
return getter.NativeFrameworks
|
||||
}
|
||||
|
||||
func getAttackTracksGetter(accountID string, downloadReleasedPolicy *getter.DownloadReleasedPolicy) getter.IAttackTracksGetter {
|
||||
func getAttackTracksGetter(attackTracks, accountID string, downloadReleasedPolicy *getter.DownloadReleasedPolicy) getter.IAttackTracksGetter {
|
||||
if len(attackTracks) > 0 {
|
||||
return getter.NewLoadPolicy([]string{attackTracks})
|
||||
}
|
||||
if accountID != "" {
|
||||
g := getter.GetKSCloudAPIConnector() // download attack tracks from Kubescape Cloud backend
|
||||
return g
|
||||
@@ -247,8 +258,20 @@ func getAttackTracksGetter(accountID string, downloadReleasedPolicy *getter.Down
|
||||
if downloadReleasedPolicy == nil {
|
||||
downloadReleasedPolicy = getter.NewDownloadReleasedPolicy()
|
||||
}
|
||||
if err := downloadReleasedPolicy.SetRegoObjects(); err != nil {
|
||||
logger.L().Warning("failed to get attack tracks from github release, this may affect the scanning results", helpers.Error(err))
|
||||
|
||||
if err := downloadReleasedPolicy.SetRegoObjects(); err != nil { // if failed to pull attack tracks, fallback to cache
|
||||
logger.L().Warning("failed to get attack tracks from github release, loading attack tracks from cache", helpers.Error(err))
|
||||
return getter.NewLoadPolicy([]string{getter.GetDefaultPath(cautils.LocalAttackTracksFilename)})
|
||||
}
|
||||
return downloadReleasedPolicy
|
||||
}
|
||||
|
||||
// getUIPrinter returns a printer that will be used to print to the program’s UI (terminal)
|
||||
func getUIPrinter(verboseMode bool, formatVersion string, attackTree bool, viewType cautils.ViewTypes) printer.IPrinter {
|
||||
p := printerv2.NewPrettyPrinter(verboseMode, formatVersion, attackTree, viewType)
|
||||
|
||||
// Since the UI of the program is a CLI (Stdout), it means that it should always print to Stdout
|
||||
p.SetWriter(os.Stdout.Name())
|
||||
|
||||
return p
|
||||
}
|
||||
|
||||
39
core/core/initutils_test.go
Normal file
39
core/core/initutils_test.go
Normal file
@@ -0,0 +1,39 @@
|
||||
package core
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/kubescape/kubescape/v2/core/cautils"
|
||||
)
|
||||
|
||||
func Test_getUIPrinter(t *testing.T) {
|
||||
scanInfo := &cautils.ScanInfo{
|
||||
FormatVersion: "v2",
|
||||
VerboseMode: true,
|
||||
View: "control",
|
||||
}
|
||||
wantFormatVersion := scanInfo.FormatVersion
|
||||
wantVerboseMode := scanInfo.VerboseMode
|
||||
wantViewType := cautils.ViewTypes(scanInfo.View)
|
||||
|
||||
got := getUIPrinter(scanInfo.VerboseMode, scanInfo.FormatVersion, scanInfo.PrintAttackTree, cautils.ViewTypes(scanInfo.View))
|
||||
|
||||
gotValue := reflect.ValueOf(got).Elem()
|
||||
gotFormatVersion := gotValue.FieldByName("formatVersion").String()
|
||||
gotVerboseMode := gotValue.FieldByName("verboseMode").Bool()
|
||||
gotViewType := cautils.ViewTypes(gotValue.FieldByName("viewType").String())
|
||||
|
||||
if gotFormatVersion != wantFormatVersion {
|
||||
t.Errorf("Got: %s, want: %s", gotFormatVersion, wantFormatVersion)
|
||||
}
|
||||
|
||||
if gotVerboseMode != wantVerboseMode {
|
||||
t.Errorf("Got: %t, want: %t", gotVerboseMode, wantVerboseMode)
|
||||
}
|
||||
|
||||
if gotViewType != wantViewType {
|
||||
t.Errorf("Got: %v, want: %v", gotViewType, wantViewType)
|
||||
}
|
||||
|
||||
}
|
||||
@@ -6,8 +6,11 @@ import (
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/kubescape/kubescape/v2/core/cautils/getter"
|
||||
"github.com/kubescape/kubescape/v2/core/cautils"
|
||||
metav1 "github.com/kubescape/kubescape/v2/core/meta/datastructures/v1"
|
||||
"github.com/kubescape/kubescape/v2/core/pkg/resultshandling/printer"
|
||||
v2 "github.com/kubescape/kubescape/v2/core/pkg/resultshandling/printer/v2"
|
||||
"github.com/olekukonko/tablewriter"
|
||||
)
|
||||
|
||||
var listFunc = map[string]func(*metav1.ListPolicies) ([]string, error){
|
||||
@@ -16,7 +19,7 @@ var listFunc = map[string]func(*metav1.ListPolicies) ([]string, error){
|
||||
"exceptions": listExceptions,
|
||||
}
|
||||
|
||||
var listFormatFunc = map[string]func(*metav1.ListPolicies, []string){
|
||||
var listFormatFunc = map[string]func(string, []string){
|
||||
"pretty-print": prettyPrintListFormat,
|
||||
"json": jsonListFormat,
|
||||
}
|
||||
@@ -29,14 +32,18 @@ func ListSupportActions() []string {
|
||||
return commands
|
||||
}
|
||||
func (ks *Kubescape) List(listPolicies *metav1.ListPolicies) error {
|
||||
if f, ok := listFunc[listPolicies.Target]; ok {
|
||||
policies, err := f(listPolicies)
|
||||
if policyListerFunc, ok := listFunc[listPolicies.Target]; ok {
|
||||
policies, err := policyListerFunc(listPolicies)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
sort.Strings(policies)
|
||||
|
||||
listFormatFunc[listPolicies.Format](listPolicies, policies)
|
||||
if listFormatFunction, ok := listFormatFunc[listPolicies.Format]; ok {
|
||||
listFormatFunction(listPolicies.Target, policies)
|
||||
} else {
|
||||
return fmt.Errorf("Invalid format \"%s\", Supported formats: 'pretty-print'/'json' ", listPolicies.Format)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -45,20 +52,16 @@ func (ks *Kubescape) List(listPolicies *metav1.ListPolicies) error {
|
||||
|
||||
func listFrameworks(listPolicies *metav1.ListPolicies) ([]string, error) {
|
||||
tenant := getTenantConfig(&listPolicies.Credentials, "", "", getKubernetesApi()) // change k8sinterface
|
||||
g := getPolicyGetter(nil, tenant.GetTenantEmail(), true, nil)
|
||||
policyGetter := getPolicyGetter(nil, tenant.GetTenantEmail(), true, nil)
|
||||
|
||||
return listFrameworksNames(g), nil
|
||||
return listFrameworksNames(policyGetter), nil
|
||||
}
|
||||
|
||||
func listControls(listPolicies *metav1.ListPolicies) ([]string, error) {
|
||||
tenant := getTenantConfig(&listPolicies.Credentials, "", "", getKubernetesApi()) // change k8sinterface
|
||||
|
||||
g := getPolicyGetter(nil, tenant.GetTenantEmail(), false, nil)
|
||||
l := getter.ListName
|
||||
if listPolicies.ListIDs {
|
||||
l = getter.ListID
|
||||
}
|
||||
return g.ListControls(l)
|
||||
policyGetter := getPolicyGetter(nil, tenant.GetTenantEmail(), false, nil)
|
||||
return policyGetter.ListControls()
|
||||
}
|
||||
|
||||
func listExceptions(listPolicies *metav1.ListPolicies) ([]string, error) {
|
||||
@@ -77,12 +80,73 @@ func listExceptions(listPolicies *metav1.ListPolicies) ([]string, error) {
|
||||
return exceptionsNames, nil
|
||||
}
|
||||
|
||||
func prettyPrintListFormat(listPolicies *metav1.ListPolicies, policies []string) {
|
||||
sep := "\n * "
|
||||
fmt.Printf("Supported %s:%s%s\n", listPolicies.Target, sep, strings.Join(policies, sep))
|
||||
func prettyPrintListFormat(targetPolicy string, policies []string) {
|
||||
if targetPolicy == "controls" {
|
||||
prettyPrintControls(policies)
|
||||
return
|
||||
}
|
||||
|
||||
header := fmt.Sprintf("Supported %s", targetPolicy)
|
||||
|
||||
policyTable := tablewriter.NewWriter(printer.GetWriter(""))
|
||||
policyTable.SetAutoWrapText(true)
|
||||
policyTable.SetHeader([]string{header})
|
||||
policyTable.SetHeaderLine(true)
|
||||
policyTable.SetRowLine(true)
|
||||
data := v2.Matrix{}
|
||||
|
||||
controlRows := generatePolicyRows(policies)
|
||||
data = append(data, controlRows...)
|
||||
|
||||
policyTable.SetAlignment(tablewriter.ALIGN_CENTER)
|
||||
policyTable.AppendBulk(data)
|
||||
policyTable.Render()
|
||||
}
|
||||
|
||||
func jsonListFormat(listPolicies *metav1.ListPolicies, policies []string) {
|
||||
func jsonListFormat(targetPolicy string, policies []string) {
|
||||
j, _ := json.MarshalIndent(policies, "", " ")
|
||||
|
||||
fmt.Printf("%s\n", j)
|
||||
}
|
||||
|
||||
func prettyPrintControls(policies []string) {
|
||||
controlsTable := tablewriter.NewWriter(printer.GetWriter(""))
|
||||
controlsTable.SetAutoWrapText(true)
|
||||
controlsTable.SetHeader([]string{"Control ID", "Control Name", "Docs", "Frameworks"})
|
||||
controlsTable.SetHeaderLine(true)
|
||||
controlsTable.SetRowLine(true)
|
||||
data := v2.Matrix{}
|
||||
|
||||
controlRows := generateControlRows(policies)
|
||||
data = append(data, controlRows...)
|
||||
|
||||
controlsTable.AppendBulk(data)
|
||||
controlsTable.Render()
|
||||
}
|
||||
|
||||
func generateControlRows(policies []string) [][]string {
|
||||
rows := [][]string{}
|
||||
|
||||
for _, control := range policies {
|
||||
idAndControlAndFrameworks := strings.Split(control, "|")
|
||||
id, control, framework := idAndControlAndFrameworks[0], idAndControlAndFrameworks[1], idAndControlAndFrameworks[2]
|
||||
|
||||
docs := cautils.GetControlLink(id)
|
||||
|
||||
currentRow := []string{id, control, docs, framework}
|
||||
|
||||
rows = append(rows, currentRow)
|
||||
}
|
||||
|
||||
return rows
|
||||
}
|
||||
|
||||
func generatePolicyRows(policies []string) [][]string {
|
||||
rows := [][]string{}
|
||||
|
||||
for _, policy := range policies {
|
||||
currentRow := []string{policy}
|
||||
rows = append(rows, currentRow)
|
||||
}
|
||||
return rows
|
||||
}
|
||||
|
||||
@@ -7,7 +7,7 @@ import (
|
||||
|
||||
"github.com/kubescape/k8s-interface/k8sinterface"
|
||||
|
||||
logger "github.com/kubescape/go-logger"
|
||||
"github.com/kubescape/go-logger"
|
||||
"github.com/kubescape/go-logger/helpers"
|
||||
"github.com/kubescape/kubescape/v2/core/cautils"
|
||||
"github.com/kubescape/kubescape/v2/core/cautils/getter"
|
||||
@@ -27,7 +27,8 @@ type componentInterfaces struct {
|
||||
tenantConfig cautils.ITenantConfig
|
||||
resourceHandler resourcehandler.IResourceHandler
|
||||
report reporter.IReport
|
||||
printerHandler printer.IPrinter
|
||||
outputPrinters []printer.IPrinter
|
||||
uiPrinter printer.IPrinter
|
||||
hostSensorHandler hostsensorutils.IHostSensor
|
||||
}
|
||||
|
||||
@@ -54,12 +55,16 @@ func getInterfaces(scanInfo *cautils.ScanInfo) componentInterfaces {
|
||||
if err := tenantConfig.SetTenant(); err != nil {
|
||||
logger.L().Error(err.Error())
|
||||
}
|
||||
|
||||
if scanInfo.OmitRawResources {
|
||||
logger.L().Warning("omit-raw-resources flag will be ignored in submit mode")
|
||||
}
|
||||
}
|
||||
|
||||
// ================== version testing ======================================
|
||||
|
||||
v := cautils.NewIVersionCheckHandler()
|
||||
v.CheckLatestVersion(cautils.NewVersionCheckRequest(cautils.BuildNumber, policyIdentifierNames(scanInfo.PolicyIdentifier), "", cautils.ScanningContextToScanningScope(scanInfo.GetScanningContext())))
|
||||
v.CheckLatestVersion(cautils.NewVersionCheckRequest(cautils.BuildNumber, policyIdentifierIdentities(scanInfo.PolicyIdentifier), "", cautils.ScanningContextToScanningScope(scanInfo.GetScanningContext())))
|
||||
|
||||
// ================== setup host scanner object ======================================
|
||||
|
||||
@@ -89,9 +94,17 @@ func getInterfaces(scanInfo *cautils.ScanInfo) componentInterfaces {
|
||||
// reporting behavior - setup reporter
|
||||
reportHandler := getReporter(tenantConfig, scanInfo.ScanID, scanInfo.Submit, scanInfo.FrameworkScan, scanInfo.GetScanningContext())
|
||||
|
||||
// setup printer
|
||||
printerHandler := resultshandling.NewPrinter(scanInfo.Format, scanInfo.FormatVersion, scanInfo.VerboseMode, cautils.ViewTypes(scanInfo.View))
|
||||
printerHandler.SetWriter(scanInfo.Output)
|
||||
// setup printers
|
||||
formats := scanInfo.Formats()
|
||||
|
||||
outputPrinters := make([]printer.IPrinter, 0)
|
||||
for _, format := range formats {
|
||||
printerHandler := resultshandling.NewPrinter(format, scanInfo.FormatVersion, scanInfo.PrintAttackTree, scanInfo.VerboseMode, cautils.ViewTypes(scanInfo.View))
|
||||
printerHandler.SetWriter(scanInfo.Output)
|
||||
outputPrinters = append(outputPrinters, printerHandler)
|
||||
}
|
||||
|
||||
uiPrinter := getUIPrinter(scanInfo.VerboseMode, scanInfo.FormatVersion, scanInfo.PrintAttackTree, cautils.ViewTypes(scanInfo.View))
|
||||
|
||||
// ================== return interface ======================================
|
||||
|
||||
@@ -99,7 +112,8 @@ func getInterfaces(scanInfo *cautils.ScanInfo) componentInterfaces {
|
||||
tenantConfig: tenantConfig,
|
||||
resourceHandler: resourceHandler,
|
||||
report: reportHandler,
|
||||
printerHandler: printerHandler,
|
||||
outputPrinters: outputPrinters,
|
||||
uiPrinter: uiPrinter,
|
||||
hostSensorHandler: hostSensorHandler,
|
||||
}
|
||||
}
|
||||
@@ -123,7 +137,7 @@ func (ks *Kubescape) Scan(scanInfo *cautils.ScanInfo) (*resultshandling.ResultsH
|
||||
scanInfo.Getters.PolicyGetter = getPolicyGetter(scanInfo.UseFrom, interfaces.tenantConfig.GetTenantEmail(), scanInfo.FrameworkScan, downloadReleasedPolicy)
|
||||
scanInfo.Getters.ControlsInputsGetter = getConfigInputsGetter(scanInfo.ControlsInputs, interfaces.tenantConfig.GetAccountID(), downloadReleasedPolicy)
|
||||
scanInfo.Getters.ExceptionsGetter = getExceptionsGetter(scanInfo.UseExceptions, interfaces.tenantConfig.GetAccountID(), downloadReleasedPolicy)
|
||||
scanInfo.Getters.AttackTracksGetter = getAttackTracksGetter(interfaces.tenantConfig.GetAccountID(), downloadReleasedPolicy)
|
||||
scanInfo.Getters.AttackTracksGetter = getAttackTracksGetter(scanInfo.AttackTracks, interfaces.tenantConfig.GetAccountID(), downloadReleasedPolicy)
|
||||
|
||||
// TODO - list supported frameworks/controls
|
||||
if scanInfo.ScanAll {
|
||||
@@ -137,7 +151,7 @@ func (ks *Kubescape) Scan(scanInfo *cautils.ScanInfo) (*resultshandling.ResultsH
|
||||
}
|
||||
}()
|
||||
|
||||
resultsHandling := resultshandling.NewResultsHandler(interfaces.report, interfaces.printerHandler)
|
||||
resultsHandling := resultshandling.NewResultsHandler(interfaces.report, interfaces.outputPrinters, interfaces.uiPrinter)
|
||||
|
||||
// ===================== policies & resources =====================
|
||||
policyHandler := policyhandler.NewPolicyHandler(interfaces.resourceHandler)
|
||||
@@ -156,7 +170,7 @@ func (ks *Kubescape) Scan(scanInfo *cautils.ScanInfo) (*resultshandling.ResultsH
|
||||
|
||||
// ======================== prioritization ===================
|
||||
|
||||
if priotizationHandler, err := resourcesprioritization.NewResourcesPrioritizationHandler(scanInfo.Getters.AttackTracksGetter); err != nil {
|
||||
if priotizationHandler, err := resourcesprioritization.NewResourcesPrioritizationHandler(scanInfo.Getters.AttackTracksGetter, scanInfo.PrintAttackTree); err != nil {
|
||||
logger.L().Warning("failed to get attack tracks, this may affect the scanning results", helpers.Error(err))
|
||||
} else if err := priotizationHandler.PrioritizeResources(scanData); err != nil {
|
||||
return resultsHandling, fmt.Errorf("%w", err)
|
||||
|
||||
@@ -6,6 +6,6 @@ type DownloadInfo struct {
|
||||
Path string // directory to save artifact. Default is "~/.kubescape/"
|
||||
FileName string // can be empty
|
||||
Target string // type of artifact to download
|
||||
Name string // name of artifact to download
|
||||
Identifier string // identifier of artifact to download
|
||||
Credentials cautils.Credentials
|
||||
}
|
||||
|
||||
@@ -1,8 +0,0 @@
|
||||
package v1
|
||||
|
||||
type FixInfo struct {
|
||||
ReportFile string // path to report file (mandatory)
|
||||
NoConfirm bool // if true, no confirmation will be given to the user before applying the fix
|
||||
SkipUserValues bool // if true, user values will not be changed
|
||||
DryRun bool // if true, no changes will be applied
|
||||
}
|
||||
@@ -4,7 +4,6 @@ import "github.com/kubescape/kubescape/v2/core/cautils"
|
||||
|
||||
type ListPolicies struct {
|
||||
Target string
|
||||
ListIDs bool
|
||||
Format string
|
||||
Credentials cautils.Credentials
|
||||
}
|
||||
|
||||
@@ -25,7 +25,4 @@ type IKubescape interface {
|
||||
|
||||
// delete
|
||||
DeleteExceptions(deleteexceptions *metav1.DeleteExceptions) error
|
||||
|
||||
// fix
|
||||
Fix(fixInfo *metav1.FixInfo) error
|
||||
}
|
||||
|
||||
@@ -8,7 +8,7 @@ import (
|
||||
"github.com/kubescape/opa-utils/reporthandling"
|
||||
)
|
||||
|
||||
var mockControl_0006 = `{"guid":"","name":"Allowed hostPath","attributes":{"armoBuiltin":true},"id":"C-0006","controlID":"C-0006","creationTime":"","description":"Mounting host directory to the container can be abused to get access to sensitive data and gain persistence on the host machine.","remediation":"Refrain from using host path mount.","rules":[{"guid":"","name":"alert-rw-hostpath","attributes":{"armoBuiltin":true,"m$K8sThreatMatrix":"Persistence::Writable hostPath mount, Lateral Movement::Writable volume mounts on the host"},"creationTime":"","rule":"package armo_builtins\n\n# input: pod\n# apiversion: v1\n# does: returns hostPath volumes\n\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n volumes := pod.spec.volumes\n volume := volumes[_]\n volume.hostPath\n\tcontainer := pod.spec.containers[i]\n\tvolumeMount := container.volumeMounts[k]\n\tvolumeMount.name == volume.name\n\tbegginingOfPath := \"spec.\"\n\tresult := isRWMount(volumeMount, begginingOfPath, i, k)\n\n podname := pod.metadata.name\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"pod: %v has: %v as hostPath volume\", [podname, volume.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [result],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n#handles majority of workload resources\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n volumes := wl.spec.template.spec.volumes\n volume := volumes[_]\n volume.hostPath\n\tcontainer := wl.spec.template.spec.containers[i]\n\tvolumeMount := container.volumeMounts[k]\n\tvolumeMount.name == volume.name\n\tbegginingOfPath := \"spec.template.spec.\"\n\tresult := isRWMount(volumeMount, begginingOfPath, i, k)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has: %v as hostPath volume\", [wl.kind, wl.metadata.name, volume.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [result],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t\n\t}\n}\n\n#handles CronJobs\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n volumes := wl.spec.jobTemplate.spec.template.spec.volumes\n volume := volumes[_]\n volume.hostPath\n\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tvolumeMount := container.volumeMounts[k]\n\tvolumeMount.name == volume.name\n\tbegginingOfPath := \"spec.jobTemplate.spec.template.spec.\"\n\tresult := isRWMount(volumeMount, begginingOfPath, i, k)\n\n\tmsga := {\n\t\"alertMessage\": sprintf(\"%v: %v has: %v as hostPath volume\", [wl.kind, wl.metadata.name, volume.name]),\n\t\"packagename\": \"armo_builtins\",\n\t\"alertScore\": 7,\n\t\"failedPaths\": [result],\n\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nisRWMount(mount, begginingOfPath, i, k) = path {\n not mount.readOnly == true\n not mount.readOnly == false\n path = \"\"\n}\nisRWMount(mount, begginingOfPath, i, k) = path {\n mount.readOnly == false\n path = sprintf(\"%vcontainers[%v].volumeMounts[%v].readOnly\", [begginingOfPath, format_int(i, 10), format_int(k, 10)])\n} ","resourceEnumerator":"","ruleLanguage":"Rego","match":[{"apiGroups":["*"],"apiVersions":["*"],"resources":["Deployment","ReplicaSet","DaemonSet","StatefulSet","Job","CronJob","Pod"]}],"ruleDependencies":[{"packageName":"cautils"},{"packageName":"kubernetes.api.client"}],"configInputs":null,"controlConfigInputs":null,"description":"determines if any workload contains a hostPath volume with rw permissions","remediation":"Set the readOnly field of the mount to true","ruleQuery":""}],"rulesIDs":[""],"baseScore":6}`
|
||||
var mockControl_0006 = `{"guid":"","name":"HostPath mount","attributes":{"armoBuiltin":true},"id":"C-0048","controlID":"C-0048","creationTime":"","description":"Mounting host directory to the container can be abused to get access to sensitive data and gain persistence on the host machine.","remediation":"Refrain from using host path mount.","rules":[{"guid":"","name":"alert-rw-hostpath","attributes":{"armoBuiltin":true,"m$K8sThreatMatrix":"Persistence::Writable hostPath mount, Lateral Movement::Writable volume mounts on the host"},"creationTime":"","rule":"package armo_builtins\n\n# input: pod\n# apiversion: v1\n# does: returns hostPath volumes\n\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n volumes := pod.spec.volumes\n volume := volumes[_]\n volume.hostPath\n\tcontainer := pod.spec.containers[i]\n\tvolumeMount := container.volumeMounts[k]\n\tvolumeMount.name == volume.name\n\tbegginingOfPath := \"spec.\"\n\tresult := isRWMount(volumeMount, begginingOfPath, i, k)\n\n podname := pod.metadata.name\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"pod: %v has: %v as hostPath volume\", [podname, volume.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [result],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n#handles majority of workload resources\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n volumes := wl.spec.template.spec.volumes\n volume := volumes[_]\n volume.hostPath\n\tcontainer := wl.spec.template.spec.containers[i]\n\tvolumeMount := container.volumeMounts[k]\n\tvolumeMount.name == volume.name\n\tbegginingOfPath := \"spec.template.spec.\"\n\tresult := isRWMount(volumeMount, begginingOfPath, i, k)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has: %v as hostPath volume\", [wl.kind, wl.metadata.name, volume.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [result],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t\n\t}\n}\n\n#handles CronJobs\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n volumes := wl.spec.jobTemplate.spec.template.spec.volumes\n volume := volumes[_]\n volume.hostPath\n\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tvolumeMount := container.volumeMounts[k]\n\tvolumeMount.name == volume.name\n\tbegginingOfPath := \"spec.jobTemplate.spec.template.spec.\"\n\tresult := isRWMount(volumeMount, begginingOfPath, i, k)\n\n\tmsga := {\n\t\"alertMessage\": sprintf(\"%v: %v has: %v as hostPath volume\", [wl.kind, wl.metadata.name, volume.name]),\n\t\"packagename\": \"armo_builtins\",\n\t\"alertScore\": 7,\n\t\"failedPaths\": [result],\n\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nisRWMount(mount, begginingOfPath, i, k) = path {\n not mount.readOnly == true\n not mount.readOnly == false\n path = \"\"\n}\nisRWMount(mount, begginingOfPath, i, k) = path {\n mount.readOnly == false\n path = sprintf(\"%vcontainers[%v].volumeMounts[%v].readOnly\", [begginingOfPath, format_int(i, 10), format_int(k, 10)])\n} ","resourceEnumerator":"","ruleLanguage":"Rego","match":[{"apiGroups":["*"],"apiVersions":["*"],"resources":["Deployment","ReplicaSet","DaemonSet","StatefulSet","Job","CronJob","Pod"]}],"ruleDependencies":[{"packageName":"cautils"},{"packageName":"kubernetes.api.client"}],"configInputs":null,"controlConfigInputs":null,"description":"determines if any workload contains a hostPath volume with rw permissions","remediation":"Set the readOnly field of the mount to true","ruleQuery":""}],"rulesIDs":[""],"baseScore":6}`
|
||||
|
||||
var mockControl_0044 = `{"guid":"","name":"Container hostPort","attributes":{"armoBuiltin":true},"id":"C-0044","controlID":"C-0044","creationTime":"","description":"Configuring hostPort limits you to a particular port, and if any two workloads that specify the same HostPort they cannot be deployed to the same node. Therefore, if the number of replica of such workload is higher than the number of nodes, the deployment will fail.","remediation":"Avoid usage of hostPort unless it is absolutely necessary. Use NodePort / ClusterIP instead.","rules":[{"guid":"","name":"container-hostPort","attributes":{"armoBuiltin":true},"creationTime":"","rule":"package armo_builtins\n\n\n# Fails if pod has container with hostPort\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n container := pod.spec.containers[i]\n\tbegginingOfPath := \"spec.\"\n\tpath := isHostPort(container, i, begginingOfPath)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v has Host-port\", [ container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if workload has container with hostPort\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\tbegginingOfPath := \"spec.template.spec.\"\n path := isHostPort(container, i, begginingOfPath)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v has Host-port\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if cronjob has container with hostPort\ndeny[msga] {\n \twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tbegginingOfPath := \"spec.jobTemplate.spec.template.spec.\"\n path := isHostPort(container, i, begginingOfPath)\n msga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v has Host-port\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n\nisHostPort(container, i, begginingOfPath) = path {\n\tpath = [sprintf(\"%vcontainers[%v].ports[%v].hostPort\", [begginingOfPath, format_int(i, 10), format_int(j, 10)]) | port = container.ports[j]; port.hostPort]\n\tcount(path) > 0\n}\n","resourceEnumerator":"","ruleLanguage":"Rego","match":[{"apiGroups":["*"],"apiVersions":["*"],"resources":["Deployment","ReplicaSet","DaemonSet","StatefulSet","Job","Pod","CronJob"]}],"ruleDependencies":[],"configInputs":null,"controlConfigInputs":null,"description":"fails if container has hostPort","remediation":"Make sure you do not configure hostPort for the container, if necessary use NodePort / ClusterIP","ruleQuery":"armo_builtins"}],"rulesIDs":[""],"baseScore":4}`
|
||||
|
||||
@@ -31,7 +31,7 @@ func MockFramework_0013() *reporthandling.Framework {
|
||||
return fw
|
||||
}
|
||||
|
||||
// MockFramework_0006_0013 mock control 0013 and control 0006 - "Non-root containers" and "Allowed hostPath"
|
||||
// MockFramework_0006_0013 mock control 0013 and control 0006 - "Non-root containers" and "HostPath mount"
|
||||
func MockFramework_0006_0013() *reporthandling.Framework {
|
||||
fw := &reporthandling.Framework{
|
||||
PortalBase: armotypes.PortalBase{
|
||||
|
||||
@@ -50,7 +50,7 @@ func randSeq(n int, bank []rune) string {
|
||||
|
||||
b := make([]rune, n)
|
||||
for i := range b {
|
||||
b[i] = bank[rand.Intn(len(bank))]
|
||||
b[i] = bank[rand.Intn(len(bank))] //nolint:gosec
|
||||
}
|
||||
return string(b)
|
||||
}
|
||||
@@ -60,7 +60,7 @@ func GenerateContainerScanLayer(layer *ScanResultLayer) {
|
||||
layer.LayerHash = randSeq(32, hash)
|
||||
layer.Vulnerabilities = make(VulnerabilitiesList, 0)
|
||||
layer.Packages = make(LinuxPkgs, 0)
|
||||
vuls := rand.Intn(10) + 1
|
||||
vuls := rand.Intn(10) + 1 //nolint:gosec
|
||||
|
||||
for i := 0; i < vuls; i++ {
|
||||
v := Vulnerability{}
|
||||
|
||||
@@ -64,7 +64,7 @@ func (pkgs *LinuxPkgs) UnmarshalJSONArray(dec *gojay.Decoder) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
//--------Vul fixed in----------------------------------
|
||||
// --------Vul fixed in----------------------------------
|
||||
func (fx *FixedIn) UnmarshalJSONObject(dec *gojay.Decoder, key string) (err error) {
|
||||
|
||||
switch key {
|
||||
|
||||
@@ -71,19 +71,19 @@ type PackageFile struct {
|
||||
|
||||
// types to provide unmarshalling:
|
||||
|
||||
//VulnerabilitiesList -s.e
|
||||
// VulnerabilitiesList -s.e
|
||||
type LayersList []ScanResultLayer
|
||||
|
||||
//VulnerabilitiesList -s.e
|
||||
// VulnerabilitiesList -s.e
|
||||
type VulnerabilitiesList []Vulnerability
|
||||
|
||||
//LinuxPkgs - slice of linux pkgs
|
||||
// LinuxPkgs - slice of linux pkgs
|
||||
type LinuxPkgs []LinuxPackage
|
||||
|
||||
//VulFixes - information bout when/how this vul was fixed
|
||||
// VulFixes - information bout when/how this vul was fixed
|
||||
type VulFixes []FixedIn
|
||||
|
||||
//PkgFiles - slice of files belong to specific pkg
|
||||
// PkgFiles - slice of files belong to specific pkg
|
||||
type PkgFiles []PackageFile
|
||||
|
||||
func (v *ScanResultReport) AsFNVHash() string {
|
||||
|
||||
@@ -1,63 +0,0 @@
|
||||
package fixhandler
|
||||
|
||||
import (
|
||||
"github.com/armosec/armoapi-go/armotypes"
|
||||
metav1 "github.com/kubescape/kubescape/v2/core/meta/datastructures/v1"
|
||||
"github.com/kubescape/opa-utils/reporthandling"
|
||||
reporthandlingv2 "github.com/kubescape/opa-utils/reporthandling/v2"
|
||||
"gopkg.in/yaml.v3"
|
||||
)
|
||||
|
||||
// FixHandler is a struct that holds the information of the report to be fixed
|
||||
type FixHandler struct {
|
||||
fixInfo *metav1.FixInfo
|
||||
reportObj *reporthandlingv2.PostureReport
|
||||
localBasePath string
|
||||
}
|
||||
|
||||
// ResourceFixInfo is a struct that holds the information about the resource that needs to be fixed
|
||||
type ResourceFixInfo struct {
|
||||
YamlExpressions map[string]*armotypes.FixPath
|
||||
Resource *reporthandling.Resource
|
||||
FilePath string
|
||||
DocumentIndex int
|
||||
}
|
||||
|
||||
// NodeInfo holds extra information about the node
|
||||
type nodeInfo struct {
|
||||
node *yaml.Node
|
||||
parent *yaml.Node
|
||||
|
||||
// position of the node among siblings
|
||||
index int
|
||||
}
|
||||
|
||||
// FixInfoMetadata holds the arguments "getFixInfo" function needs to pass to the
|
||||
// functions it uses
|
||||
type fixInfoMetadata struct {
|
||||
originalList *[]nodeInfo
|
||||
fixedList *[]nodeInfo
|
||||
originalListTracker int
|
||||
fixedListTracker int
|
||||
contentToAdd *[]contentToAdd
|
||||
linesToRemove *[]linesToRemove
|
||||
}
|
||||
|
||||
// ContentToAdd holds the information about where to insert the new changes in the existing yaml file
|
||||
type contentToAdd struct {
|
||||
// Line where the fix should be applied to
|
||||
line int
|
||||
// Content is a string representation of the YAML node that describes a suggested fix
|
||||
content string
|
||||
}
|
||||
|
||||
// LinesToRemove holds the line numbers to remove from the existing yaml file
|
||||
type linesToRemove struct {
|
||||
startLine int
|
||||
endLine int
|
||||
}
|
||||
|
||||
type fileFixInfo struct {
|
||||
contentsToAdd *[]contentToAdd
|
||||
linesToRemove *[]linesToRemove
|
||||
}
|
||||
@@ -1,346 +0,0 @@
|
||||
package fixhandler
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/armosec/armoapi-go/armotypes"
|
||||
metav1 "github.com/kubescape/kubescape/v2/core/meta/datastructures/v1"
|
||||
|
||||
logger "github.com/kubescape/go-logger"
|
||||
"github.com/kubescape/opa-utils/objectsenvelopes"
|
||||
"github.com/kubescape/opa-utils/objectsenvelopes/localworkload"
|
||||
"github.com/kubescape/opa-utils/reporthandling"
|
||||
"github.com/kubescape/opa-utils/reporthandling/results/v1/resourcesresults"
|
||||
reporthandlingv2 "github.com/kubescape/opa-utils/reporthandling/v2"
|
||||
"github.com/mikefarah/yq/v4/pkg/yqlib"
|
||||
"gopkg.in/op/go-logging.v1"
|
||||
)
|
||||
|
||||
const UserValuePrefix = "YOUR_"
|
||||
|
||||
func NewFixHandler(fixInfo *metav1.FixInfo) (*FixHandler, error) {
|
||||
jsonFile, err := os.Open(fixInfo.ReportFile)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer jsonFile.Close()
|
||||
byteValue, _ := ioutil.ReadAll(jsonFile)
|
||||
|
||||
var reportObj reporthandlingv2.PostureReport
|
||||
if err = json.Unmarshal(byteValue, &reportObj); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err = isSupportedScanningTarget(&reportObj); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
localPath := getLocalPath(&reportObj)
|
||||
if _, err = os.Stat(localPath); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
backendLoggerLeveled := logging.AddModuleLevel(logging.NewLogBackend(logger.L().GetWriter(), "", 0))
|
||||
backendLoggerLeveled.SetLevel(logging.ERROR, "")
|
||||
yqlib.GetLogger().SetBackend(backendLoggerLeveled)
|
||||
|
||||
return &FixHandler{
|
||||
fixInfo: fixInfo,
|
||||
reportObj: &reportObj,
|
||||
localBasePath: localPath,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func isSupportedScanningTarget(report *reporthandlingv2.PostureReport) error {
|
||||
if report.Metadata.ScanMetadata.ScanningTarget == reporthandlingv2.GitLocal || report.Metadata.ScanMetadata.ScanningTarget == reporthandlingv2.Directory {
|
||||
return nil
|
||||
}
|
||||
|
||||
return fmt.Errorf("unsupported scanning target. Only local git and directory scanning targets are supported")
|
||||
}
|
||||
|
||||
func getLocalPath(report *reporthandlingv2.PostureReport) string {
|
||||
if report.Metadata.ScanMetadata.ScanningTarget == reporthandlingv2.GitLocal {
|
||||
return report.Metadata.ContextMetadata.RepoContextMetadata.LocalRootPath
|
||||
}
|
||||
|
||||
if report.Metadata.ScanMetadata.ScanningTarget == reporthandlingv2.Directory {
|
||||
return report.Metadata.ContextMetadata.DirectoryContextMetadata.BasePath
|
||||
}
|
||||
|
||||
return ""
|
||||
}
|
||||
|
||||
func (h *FixHandler) buildResourcesMap() map[string]*reporthandling.Resource {
|
||||
resourceIdToRawResource := make(map[string]*reporthandling.Resource)
|
||||
for i := range h.reportObj.Resources {
|
||||
resourceIdToRawResource[h.reportObj.Resources[i].GetID()] = &h.reportObj.Resources[i]
|
||||
}
|
||||
for i := range h.reportObj.Results {
|
||||
if h.reportObj.Results[i].RawResource == nil {
|
||||
continue
|
||||
}
|
||||
resourceIdToRawResource[h.reportObj.Results[i].RawResource.GetID()] = h.reportObj.Results[i].RawResource
|
||||
}
|
||||
|
||||
return resourceIdToRawResource
|
||||
}
|
||||
|
||||
func (h *FixHandler) getPathFromRawResource(obj map[string]interface{}) string {
|
||||
if localworkload.IsTypeLocalWorkload(obj) {
|
||||
localwork := localworkload.NewLocalWorkload(obj)
|
||||
return localwork.GetPath()
|
||||
} else if objectsenvelopes.IsTypeRegoResponseVector(obj) {
|
||||
regoResponseVectorObject := objectsenvelopes.NewRegoResponseVectorObject(obj)
|
||||
relatedObjects := regoResponseVectorObject.GetRelatedObjects()
|
||||
for _, relatedObject := range relatedObjects {
|
||||
if localworkload.IsTypeLocalWorkload(relatedObject.GetObject()) {
|
||||
return relatedObject.(*localworkload.LocalWorkload).GetPath()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return ""
|
||||
}
|
||||
|
||||
func (h *FixHandler) PrepareResourcesToFix() []ResourceFixInfo {
|
||||
resourceIdToResource := h.buildResourcesMap()
|
||||
|
||||
resourcesToFix := make([]ResourceFixInfo, 0)
|
||||
for _, result := range h.reportObj.Results {
|
||||
if !result.GetStatus(nil).IsFailed() {
|
||||
continue
|
||||
}
|
||||
|
||||
resourceID := result.ResourceID
|
||||
resourceObj := resourceIdToResource[resourceID]
|
||||
resourcePath := h.getPathFromRawResource(resourceObj.GetObject())
|
||||
if resourcePath == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
if resourceObj.Source == nil || resourceObj.Source.FileType != reporthandling.SourceTypeYaml {
|
||||
continue
|
||||
}
|
||||
|
||||
relativePath, documentIndex, err := h.getFilePathAndIndex(resourcePath)
|
||||
if err != nil {
|
||||
logger.L().Error("Skipping invalid resource path: " + resourcePath)
|
||||
continue
|
||||
}
|
||||
|
||||
absolutePath := path.Join(h.localBasePath, relativePath)
|
||||
if _, err := os.Stat(absolutePath); err != nil {
|
||||
logger.L().Error("Skipping missing file: " + absolutePath)
|
||||
continue
|
||||
}
|
||||
|
||||
rfi := ResourceFixInfo{
|
||||
FilePath: absolutePath,
|
||||
Resource: resourceObj,
|
||||
YamlExpressions: make(map[string]*armotypes.FixPath, 0),
|
||||
DocumentIndex: documentIndex,
|
||||
}
|
||||
|
||||
for i := range result.AssociatedControls {
|
||||
if result.AssociatedControls[i].GetStatus(nil).IsFailed() {
|
||||
rfi.addYamlExpressionsFromResourceAssociatedControl(documentIndex, &result.AssociatedControls[i], h.fixInfo.SkipUserValues)
|
||||
}
|
||||
}
|
||||
|
||||
if len(rfi.YamlExpressions) > 0 {
|
||||
resourcesToFix = append(resourcesToFix, rfi)
|
||||
}
|
||||
}
|
||||
|
||||
return resourcesToFix
|
||||
}
|
||||
|
||||
func (h *FixHandler) PrintExpectedChanges(resourcesToFix []ResourceFixInfo) {
|
||||
var sb strings.Builder
|
||||
sb.WriteString("The following changes will be applied:\n")
|
||||
|
||||
for _, resourceFixInfo := range resourcesToFix {
|
||||
sb.WriteString(fmt.Sprintf("File: %s\n", resourceFixInfo.FilePath))
|
||||
sb.WriteString(fmt.Sprintf("Resource: %s\n", resourceFixInfo.Resource.GetName()))
|
||||
sb.WriteString(fmt.Sprintf("Kind: %s\n", resourceFixInfo.Resource.GetKind()))
|
||||
sb.WriteString("Changes:\n")
|
||||
|
||||
i := 1
|
||||
for _, fixPath := range resourceFixInfo.YamlExpressions {
|
||||
sb.WriteString(fmt.Sprintf("\t%d) %s = %s\n", i, (*fixPath).Path, (*fixPath).Value))
|
||||
i++
|
||||
}
|
||||
sb.WriteString("\n------\n")
|
||||
}
|
||||
|
||||
logger.L().Info(sb.String())
|
||||
}
|
||||
|
||||
func (h *FixHandler) ApplyChanges(resourcesToFix []ResourceFixInfo) (int, []error) {
|
||||
updatedFiles := make(map[string]bool)
|
||||
errors := make([]error, 0)
|
||||
|
||||
fileYamlExpressions := h.getFileYamlExpressions(resourcesToFix)
|
||||
|
||||
for filepath, yamlExpression := range fileYamlExpressions {
|
||||
fileAsString, err := getFileString(filepath)
|
||||
|
||||
if err != nil {
|
||||
errors = append(errors, err)
|
||||
continue
|
||||
}
|
||||
|
||||
fixedYamlString, err := h.ApplyFixToContent(fileAsString, yamlExpression)
|
||||
|
||||
if err != nil {
|
||||
errors = append(errors, fmt.Errorf("Failed to fix file %s: %w ", filepath, err))
|
||||
continue
|
||||
} else {
|
||||
updatedFiles[filepath] = true
|
||||
}
|
||||
|
||||
err = writeFixesToFile(filepath, fixedYamlString)
|
||||
|
||||
if err != nil {
|
||||
logger.L().Error(fmt.Sprintf("Failed to write fixes to file %s, %v", filepath, err.Error()))
|
||||
errors = append(errors, err)
|
||||
}
|
||||
}
|
||||
|
||||
return len(updatedFiles), errors
|
||||
}
|
||||
|
||||
func (h *FixHandler) getFilePathAndIndex(filePathWithIndex string) (filePath string, documentIndex int, err error) {
|
||||
splittedPath := strings.Split(filePathWithIndex, ":")
|
||||
if len(splittedPath) <= 1 {
|
||||
return "", 0, fmt.Errorf("expected to find ':' in file path")
|
||||
}
|
||||
|
||||
filePath = splittedPath[0]
|
||||
if documentIndex, err := strconv.Atoi(splittedPath[1]); err != nil {
|
||||
return "", 0, err
|
||||
} else {
|
||||
return filePath, documentIndex, nil
|
||||
}
|
||||
}
|
||||
|
||||
func (h *FixHandler) ApplyFixToContent(yamlAsString, yamlExpression string) (fixedString string, err error) {
|
||||
yamlLines := strings.Split(yamlAsString, "\n")
|
||||
|
||||
originalRootNodes, err := decodeDocumentRoots(yamlAsString)
|
||||
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
fixedRootNodes, err := getFixedNodes(yamlAsString, yamlExpression)
|
||||
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
fileFixInfo := getFixInfo(originalRootNodes, fixedRootNodes)
|
||||
|
||||
fixedYamlLines := getFixedYamlLines(yamlLines, fileFixInfo)
|
||||
|
||||
fixedString = getStringFromSlice(fixedYamlLines)
|
||||
|
||||
return fixedString, nil
|
||||
}
|
||||
|
||||
func (h *FixHandler) getFileYamlExpressions(resourcesToFix []ResourceFixInfo) map[string]string {
|
||||
fileYamlExpressions := make(map[string]string, 0)
|
||||
for _, resourceToFix := range resourcesToFix {
|
||||
singleExpression := reduceYamlExpressions(&resourceToFix)
|
||||
resourceFilePath := resourceToFix.FilePath
|
||||
|
||||
if _, pathExistsInMap := fileYamlExpressions[resourceFilePath]; !pathExistsInMap {
|
||||
fileYamlExpressions[resourceFilePath] = singleExpression
|
||||
} else {
|
||||
fileYamlExpressions[resourceFilePath] = joinStrings(fileYamlExpressions[resourceFilePath], " | ", singleExpression)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return fileYamlExpressions
|
||||
}
|
||||
|
||||
func (rfi *ResourceFixInfo) addYamlExpressionsFromResourceAssociatedControl(documentIndex int, ac *resourcesresults.ResourceAssociatedControl, skipUserValues bool) {
|
||||
for _, rule := range ac.ResourceAssociatedRules {
|
||||
if !rule.GetStatus(nil).IsFailed() {
|
||||
continue
|
||||
}
|
||||
|
||||
for _, rulePaths := range rule.Paths {
|
||||
if rulePaths.FixPath.Path == "" {
|
||||
continue
|
||||
}
|
||||
if strings.HasPrefix(rulePaths.FixPath.Value, UserValuePrefix) && skipUserValues {
|
||||
continue
|
||||
}
|
||||
|
||||
yamlExpression := fixPathToValidYamlExpression(rulePaths.FixPath.Path, rulePaths.FixPath.Value, documentIndex)
|
||||
rfi.YamlExpressions[yamlExpression] = &rulePaths.FixPath
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// reduceYamlExpressions reduces the number of yaml expressions to a single one
|
||||
func reduceYamlExpressions(resource *ResourceFixInfo) string {
|
||||
expressions := make([]string, 0, len(resource.YamlExpressions))
|
||||
for expr := range resource.YamlExpressions {
|
||||
expressions = append(expressions, expr)
|
||||
}
|
||||
|
||||
return strings.Join(expressions, " | ")
|
||||
}
|
||||
|
||||
func fixPathToValidYamlExpression(fixPath, value string, documentIndexInYaml int) string {
|
||||
isStringValue := true
|
||||
if _, err := strconv.ParseBool(value); err == nil {
|
||||
isStringValue = false
|
||||
} else if _, err := strconv.ParseFloat(value, 64); err == nil {
|
||||
isStringValue = false
|
||||
} else if _, err := strconv.Atoi(value); err == nil {
|
||||
isStringValue = false
|
||||
}
|
||||
|
||||
// Strings should be quoted
|
||||
if isStringValue {
|
||||
value = fmt.Sprintf("\"%s\"", value)
|
||||
}
|
||||
|
||||
// select document index and add a dot for the root node
|
||||
return fmt.Sprintf("select(di==%d).%s |= %s", documentIndexInYaml, fixPath, value)
|
||||
}
|
||||
|
||||
func joinStrings(inputStrings ...string) string {
|
||||
return strings.Join(inputStrings, "")
|
||||
}
|
||||
|
||||
func getFileString(filepath string) (string, error) {
|
||||
bytes, err := ioutil.ReadFile(filepath)
|
||||
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("Error reading file %s", filepath)
|
||||
}
|
||||
|
||||
return string(bytes), nil
|
||||
}
|
||||
|
||||
func writeFixesToFile(filepath, content string) error {
|
||||
err := ioutil.WriteFile(filepath, []byte(content), 0644)
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error writing fixes to file: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -1,248 +0,0 @@
|
||||
package fixhandler
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
logger "github.com/kubescape/go-logger"
|
||||
metav1 "github.com/kubescape/kubescape/v2/core/meta/datastructures/v1"
|
||||
reporthandlingv2 "github.com/kubescape/opa-utils/reporthandling/v2"
|
||||
"github.com/mikefarah/yq/v4/pkg/yqlib"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"gopkg.in/op/go-logging.v1"
|
||||
)
|
||||
|
||||
type indentationTestCase struct {
|
||||
inputFile string
|
||||
yamlExpression string
|
||||
expectedFile string
|
||||
}
|
||||
|
||||
func NewFixHandlerMock() (*FixHandler, error) {
|
||||
backendLoggerLeveled := logging.AddModuleLevel(logging.NewLogBackend(logger.L().GetWriter(), "", 0))
|
||||
backendLoggerLeveled.SetLevel(logging.ERROR, "")
|
||||
yqlib.GetLogger().SetBackend(backendLoggerLeveled)
|
||||
|
||||
return &FixHandler{
|
||||
fixInfo: &metav1.FixInfo{},
|
||||
reportObj: &reporthandlingv2.PostureReport{},
|
||||
localBasePath: "",
|
||||
}, nil
|
||||
}
|
||||
|
||||
func getTestdataPath() string {
|
||||
currentDir, _ := os.Getwd()
|
||||
return filepath.Join(currentDir, "testdata")
|
||||
}
|
||||
|
||||
func getTestCases() []indentationTestCase {
|
||||
indentationTestCases := []indentationTestCase{
|
||||
// Insertion Scenarios
|
||||
{
|
||||
"inserts/tc-01-00-input-mapping-insert-mapping.yaml",
|
||||
"select(di==0).spec.containers[0].securityContext.allowPrivilegeEscalation |= false",
|
||||
"inserts/tc-01-01-expected.yaml",
|
||||
},
|
||||
{
|
||||
"inserts/tc-02-00-input-mapping-insert-mapping-with-list.yaml",
|
||||
"select(di==0).spec.containers[0].securityContext.capabilities.drop += [\"NET_RAW\"]",
|
||||
"inserts/tc-02-01-expected.yaml",
|
||||
},
|
||||
{
|
||||
"inserts/tc-03-00-input-list-append-scalar.yaml",
|
||||
"select(di==0).spec.containers[0].securityContext.capabilities.drop += [\"SYS_ADM\"]",
|
||||
"inserts/tc-03-01-expected.yaml",
|
||||
},
|
||||
{
|
||||
"inserts/tc-04-00-input-multiple-inserts.yaml",
|
||||
|
||||
`select(di==0).spec.template.spec.securityContext.allowPrivilegeEscalation |= false |
|
||||
select(di==0).spec.template.spec.containers[0].securityContext.capabilities.drop += ["NET_RAW"] |
|
||||
select(di==0).spec.template.spec.containers[0].securityContext.seccompProfile.type |= "RuntimeDefault" |
|
||||
select(di==0).spec.template.spec.containers[0].securityContext.allowPrivilegeEscalation |= false |
|
||||
select(di==0).spec.template.spec.containers[0].securityContext.readOnlyRootFilesystem |= true`,
|
||||
|
||||
"inserts/tc-04-01-expected.yaml",
|
||||
},
|
||||
{
|
||||
"inserts/tc-05-00-input-comment-blank-line-single-insert.yaml",
|
||||
"select(di==0).spec.containers[0].securityContext.allowPrivilegeEscalation |= false",
|
||||
"inserts/tc-05-01-expected.yaml",
|
||||
},
|
||||
{
|
||||
"inserts/tc-06-00-input-list-append-scalar-oneline.yaml",
|
||||
"select(di==0).spec.containers[0].securityContext.capabilities.drop += [\"SYS_ADM\"]",
|
||||
"inserts/tc-06-01-expected.yaml",
|
||||
},
|
||||
{
|
||||
"inserts/tc-07-00-input-multiple-documents.yaml",
|
||||
|
||||
`select(di==0).spec.containers[0].securityContext.allowPrivilegeEscalation |= false |
|
||||
select(di==1).spec.containers[0].securityContext.allowPrivilegeEscalation |= false`,
|
||||
|
||||
"inserts/tc-07-01-expected.yaml",
|
||||
},
|
||||
{
|
||||
"inserts/tc-08-00-input-mapping-insert-mapping-indented.yaml",
|
||||
"select(di==0).spec.containers[0].securityContext.capabilities.drop += [\"NET_RAW\"]",
|
||||
"inserts/tc-08-01-expected.yaml",
|
||||
},
|
||||
{
|
||||
"inserts/tc-09-00-input-list-insert-new-mapping-indented.yaml",
|
||||
`select(di==0).spec.containers += {"name": "redis", "image": "redis"}`,
|
||||
"inserts/tc-09-01-expected.yaml",
|
||||
},
|
||||
{
|
||||
"inserts/tc-10-00-input-list-insert-new-mapping.yaml",
|
||||
`select(di==0).spec.containers += {"name": "redis", "image": "redis"}`,
|
||||
"inserts/tc-10-01-expected.yaml",
|
||||
},
|
||||
|
||||
// Removal Scenarios
|
||||
{
|
||||
"removals/tc-01-00-input.yaml",
|
||||
"del(select(di==0).spec.containers[0].securityContext)",
|
||||
"removals/tc-01-01-expected.yaml",
|
||||
},
|
||||
{
|
||||
"removals/tc-02-00-input.yaml",
|
||||
"del(select(di==0).spec.containers[1])",
|
||||
"removals/tc-02-01-expected.yaml",
|
||||
},
|
||||
{
|
||||
"removals/tc-03-00-input.yaml",
|
||||
"del(select(di==0).spec.containers[0].securityContext.capabilities.drop[1])",
|
||||
"removals/tc-03-01-expected.yaml",
|
||||
},
|
||||
{
|
||||
"removes/tc-04-00-input.yaml",
|
||||
`del(select(di==0).spec.containers[0].securityContext) |
|
||||
del(select(di==1).spec.containers[1])`,
|
||||
"removes/tc-04-01-expected.yaml",
|
||||
},
|
||||
|
||||
// Replace Scenarios
|
||||
{
|
||||
"replaces/tc-01-00-input.yaml",
|
||||
"select(di==0).spec.containers[0].securityContext.runAsRoot |= false",
|
||||
"replaces/tc-01-01-expected.yaml",
|
||||
},
|
||||
{
|
||||
"replaces/tc-02-00-input.yaml",
|
||||
`select(di==0).spec.containers[0].securityContext.capabilities.drop[0] |= "SYS_ADM" |
|
||||
select(di==0).spec.containers[0].securityContext.capabilities.add[0] |= "NET_RAW"`,
|
||||
"replaces/tc-02-01-expected.yaml",
|
||||
},
|
||||
|
||||
// Hybrid Scenarios
|
||||
{
|
||||
"hybrids/tc-01-00-input.yaml",
|
||||
`del(select(di==0).spec.containers[0].securityContext) |
|
||||
select(di==0).spec.securityContext.runAsRoot |= false`,
|
||||
"hybrids/tc-01-01-expected.yaml",
|
||||
},
|
||||
{
|
||||
"hybrids/tc-02-00-input-indented-list.yaml",
|
||||
`del(select(di==0).spec.containers[0].securityContext) |
|
||||
select(di==0).spec.securityContext.runAsRoot |= false`,
|
||||
"hybrids/tc-02-01-expected.yaml",
|
||||
},
|
||||
{
|
||||
"hybrids/tc-03-00-input-comments.yaml",
|
||||
`del(select(di==0).spec.containers[0].securityContext) |
|
||||
select(di==0).spec.securityContext.runAsRoot |= false`,
|
||||
"hybrids/tc-03-01-expected.yaml",
|
||||
},
|
||||
{
|
||||
"hybrids/tc-04-00-input-separated-keys.yaml",
|
||||
`del(select(di==0).spec.containers[0].securityContext) |
|
||||
select(di==0).spec.securityContext.runAsRoot |= false`,
|
||||
"hybrids/tc-04-01-expected.yaml",
|
||||
},
|
||||
}
|
||||
|
||||
return indentationTestCases
|
||||
}
|
||||
|
||||
func TestApplyFixKeepsFormatting(t *testing.T) {
|
||||
testCases := getTestCases()
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.inputFile, func(t *testing.T) {
|
||||
getTestDataPath := func(filename string) string {
|
||||
currentDir, _ := os.Getwd()
|
||||
currentFile := "testdata/" + filename
|
||||
return filepath.Join(currentDir, currentFile)
|
||||
}
|
||||
|
||||
input, _ := os.ReadFile(getTestDataPath(tc.inputFile))
|
||||
wantRaw, _ := os.ReadFile(getTestDataPath(tc.expectedFile))
|
||||
want := string(wantRaw)
|
||||
expression := tc.yamlExpression
|
||||
|
||||
h, _ := NewFixHandlerMock()
|
||||
|
||||
got, _ := h.ApplyFixToContent(string(input), expression)
|
||||
|
||||
assert.Equalf(
|
||||
t, want, got,
|
||||
"Contents of the fixed file don't match the expectation.\n"+
|
||||
"Input file: %s\n\n"+
|
||||
"Got: <%s>\n\n"+
|
||||
"Want: <%s>",
|
||||
tc.inputFile, got, want,
|
||||
)
|
||||
},
|
||||
)
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
func Test_fixPathToValidYamlExpression(t *testing.T) {
|
||||
type args struct {
|
||||
fixPath string
|
||||
value string
|
||||
documentIndexInYaml int
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
want string
|
||||
}{
|
||||
{
|
||||
name: "fix path with boolean value",
|
||||
args: args{
|
||||
fixPath: "spec.template.spec.containers[0].securityContext.privileged",
|
||||
value: "true",
|
||||
documentIndexInYaml: 2,
|
||||
},
|
||||
want: "select(di==2).spec.template.spec.containers[0].securityContext.privileged |= true",
|
||||
},
|
||||
{
|
||||
name: "fix path with string value",
|
||||
args: args{
|
||||
fixPath: "metadata.namespace",
|
||||
value: "YOUR_NAMESPACE",
|
||||
documentIndexInYaml: 0,
|
||||
},
|
||||
want: "select(di==0).metadata.namespace |= \"YOUR_NAMESPACE\"",
|
||||
},
|
||||
{
|
||||
name: "fix path with number",
|
||||
args: args{
|
||||
fixPath: "xxx.yyy",
|
||||
value: "123",
|
||||
documentIndexInYaml: 0,
|
||||
},
|
||||
want: "select(di==0).xxx.yyy |= 123",
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if got := fixPathToValidYamlExpression(tt.args.fixPath, tt.args.value, tt.args.documentIndexInYaml); got != tt.want {
|
||||
t.Errorf("fixPathToValidYamlExpression() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -1,19 +0,0 @@
|
||||
# Fix to Apply:
|
||||
# REMOVE:
|
||||
# "del(select(di==0).spec.containers[0].securityContext)"
|
||||
|
||||
# INSERT:
|
||||
# select(di==0).spec.securityContext.runAsRoot: false
|
||||
|
||||
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: insert_to_mapping_node_1
|
||||
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx_container
|
||||
image: nginx
|
||||
securityContext:
|
||||
runAsRoot: true
|
||||
@@ -1,19 +0,0 @@
|
||||
# Fix to Apply:
|
||||
# REMOVE:
|
||||
# "del(select(di==0).spec.containers[0].securityContext)"
|
||||
|
||||
# INSERT:
|
||||
# select(di==0).spec.securityContext.runAsRoot: false
|
||||
|
||||
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: insert_to_mapping_node_1
|
||||
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx_container
|
||||
image: nginx
|
||||
securityContext:
|
||||
runAsRoot: false
|
||||
@@ -1,19 +0,0 @@
|
||||
# Fix to Apply:
|
||||
# REMOVE:
|
||||
# "del(select(di==0).spec.containers[0].securityContext)"
|
||||
|
||||
# INSERT:
|
||||
# select(di==0).spec.securityContext.runAsRoot: false
|
||||
|
||||
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: insert_to_mapping_node_1
|
||||
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx_container
|
||||
image: nginx
|
||||
securityContext:
|
||||
runAsRoot: true
|
||||
@@ -1,19 +0,0 @@
|
||||
# Fix to Apply:
|
||||
# REMOVE:
|
||||
# "del(select(di==0).spec.containers[0].securityContext)"
|
||||
|
||||
# INSERT:
|
||||
# select(di==0).spec.securityContext.runAsRoot: false
|
||||
|
||||
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: insert_to_mapping_node_1
|
||||
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx_container
|
||||
image: nginx
|
||||
securityContext:
|
||||
runAsRoot: false
|
||||
@@ -1,21 +0,0 @@
|
||||
# Fix to Apply:
|
||||
# REMOVE:
|
||||
# "del(select(di==0).spec.containers[0].securityContext)"
|
||||
|
||||
# INSERT:
|
||||
# select(di==0).spec.securityContext.runAsRoot: false
|
||||
|
||||
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: insert_to_mapping_node_1
|
||||
|
||||
spec:
|
||||
# These are the container comments
|
||||
containers:
|
||||
# These are the first containers comments
|
||||
- name: nginx_container
|
||||
image: nginx
|
||||
securityContext:
|
||||
runAsRoot: true
|
||||
@@ -1,21 +0,0 @@
|
||||
# Fix to Apply:
|
||||
# REMOVE:
|
||||
# "del(select(di==0).spec.containers[0].securityContext)"
|
||||
|
||||
# INSERT:
|
||||
# select(di==0).spec.securityContext.runAsRoot: false
|
||||
|
||||
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: insert_to_mapping_node_1
|
||||
|
||||
spec:
|
||||
# These are the container comments
|
||||
containers:
|
||||
# These are the first containers comments
|
||||
- name: nginx_container
|
||||
image: nginx
|
||||
securityContext:
|
||||
runAsRoot: false
|
||||
@@ -1,21 +0,0 @@
|
||||
# Fix to Apply:
|
||||
# REMOVE:
|
||||
# "del(select(di==0).spec.containers[0].securityContext)"
|
||||
|
||||
# INSERT:
|
||||
# select(di==0).spec.securityContext.runAsRoot: false
|
||||
|
||||
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: insert_to_mapping_node_1
|
||||
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx_container
|
||||
|
||||
image: nginx
|
||||
|
||||
securityContext:
|
||||
runAsRoot: true
|
||||
@@ -1,21 +0,0 @@
|
||||
# Fix to Apply:
|
||||
# REMOVE:
|
||||
# "del(select(di==0).spec.containers[0].securityContext)"
|
||||
|
||||
# INSERT:
|
||||
# select(di==0).spec.securityContext.runAsRoot: false
|
||||
|
||||
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: insert_to_mapping_node_1
|
||||
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx_container
|
||||
|
||||
image: nginx
|
||||
securityContext:
|
||||
runAsRoot: false
|
||||
|
||||
@@ -1,12 +0,0 @@
|
||||
# Fix to Apply:
|
||||
# "select(di==0).spec.containers[0].securityContext.allowPrivilegeEscalation |= false"
|
||||
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: insert_to_mapping_node_1
|
||||
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx_container
|
||||
image: nginx
|
||||
@@ -1,14 +0,0 @@
|
||||
# Fix to Apply:
|
||||
# "select(di==0).spec.containers[0].securityContext.allowPrivilegeEscalation |= false"
|
||||
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: insert_to_mapping_node_1
|
||||
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx_container
|
||||
image: nginx
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
@@ -1,11 +0,0 @@
|
||||
# Fix to Apply:
|
||||
# select(di==0).spec.containers[0].securityContext.capabilities.drop += ["NET_RAW"]
|
||||
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: insert_list
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx_container
|
||||
image: nginx
|
||||
@@ -1,15 +0,0 @@
|
||||
# Fix to Apply:
|
||||
# select(di==0).spec.containers[0].securityContext.capabilities.drop += ["NET_RAW"]
|
||||
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: insert_list
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx_container
|
||||
image: nginx
|
||||
securityContext:
|
||||
capabilities:
|
||||
drop:
|
||||
- NET_RAW
|
||||
@@ -1,15 +0,0 @@
|
||||
# Fix to Apply:
|
||||
# select(di==0).spec.containers[0].securityContext.capabilities.drop += ["SYS_ADM"]
|
||||
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: insert_list
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx_container
|
||||
image: nginx
|
||||
securityContext:
|
||||
capabilities:
|
||||
drop:
|
||||
- NET_RAW
|
||||
@@ -1,16 +0,0 @@
|
||||
# Fix to Apply:
|
||||
# select(di==0).spec.containers[0].securityContext.capabilities.drop += ["SYS_ADM"]
|
||||
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: insert_list
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx_container
|
||||
image: nginx
|
||||
securityContext:
|
||||
capabilities:
|
||||
drop:
|
||||
- NET_RAW
|
||||
- SYS_ADM
|
||||
@@ -1,47 +0,0 @@
|
||||
# Fixes to Apply:
|
||||
# 1) select(di==0).spec.template.spec.securityContext.allowPrivilegeEscalation = false
|
||||
# 2) select(di==0).spec.template.spec.containers[0].securityContext.capabilities.drop += ["NET_RAW"]
|
||||
# 3) select(di==0).spec.template.spec.containers[0].securityContext.seccompProfile.type = RuntimeDefault
|
||||
# 4) select(di==0).spec.template.spec.containers[0].securityContext.allowPrivilegeEscalation |= false
|
||||
# 5) select(di==0).spec.template.spec.containers[0].securityContext.readOnlyRootFilesystem |= true
|
||||
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: multiple_inserts
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: example_4
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: example_4
|
||||
spec:
|
||||
serviceAccountName: default
|
||||
terminationGracePeriodSeconds: 5
|
||||
containers:
|
||||
- name: example_4
|
||||
image: nginx
|
||||
ports:
|
||||
- containerPort: 3000
|
||||
env:
|
||||
- name: PORT
|
||||
value: "3000"
|
||||
resources:
|
||||
requests:
|
||||
cpu: 200m
|
||||
memory: 180Mi
|
||||
limits:
|
||||
cpu: 300m
|
||||
memory: 300Mi
|
||||
readinessProbe:
|
||||
initialDelaySeconds: 20
|
||||
periodSeconds: 15
|
||||
exec:
|
||||
command: ["/bin/grpc_health_probe", "-addr=:3000"]
|
||||
livenessProbe:
|
||||
initialDelaySeconds: 20
|
||||
periodSeconds: 15
|
||||
exec:
|
||||
command: ["/bin/grpc_health_probe", "-addr=:3000"]
|
||||
@@ -1,57 +0,0 @@
|
||||
# Fixes to Apply:
|
||||
# 1) select(di==0).spec.template.spec.securityContext.allowPrivilegeEscalation = false
|
||||
# 2) select(di==0).spec.template.spec.containers[0].securityContext.capabilities.drop += ["NET_RAW"]
|
||||
# 3) select(di==0).spec.template.spec.containers[0].securityContext.seccompProfile.type = RuntimeDefault
|
||||
# 4) select(di==0).spec.template.spec.containers[0].securityContext.allowPrivilegeEscalation |= false
|
||||
# 5) select(di==0).spec.template.spec.containers[0].securityContext.readOnlyRootFilesystem |= true
|
||||
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: multiple_inserts
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: example_4
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: example_4
|
||||
spec:
|
||||
serviceAccountName: default
|
||||
terminationGracePeriodSeconds: 5
|
||||
containers:
|
||||
- name: example_4
|
||||
image: nginx
|
||||
ports:
|
||||
- containerPort: 3000
|
||||
env:
|
||||
- name: PORT
|
||||
value: "3000"
|
||||
resources:
|
||||
requests:
|
||||
cpu: 200m
|
||||
memory: 180Mi
|
||||
limits:
|
||||
cpu: 300m
|
||||
memory: 300Mi
|
||||
readinessProbe:
|
||||
initialDelaySeconds: 20
|
||||
periodSeconds: 15
|
||||
exec:
|
||||
command: ["/bin/grpc_health_probe", "-addr=:3000"]
|
||||
livenessProbe:
|
||||
initialDelaySeconds: 20
|
||||
periodSeconds: 15
|
||||
exec:
|
||||
command: ["/bin/grpc_health_probe", "-addr=:3000"]
|
||||
securityContext:
|
||||
capabilities:
|
||||
drop:
|
||||
- NET_RAW
|
||||
seccompProfile:
|
||||
type: RuntimeDefault
|
||||
allowPrivilegeEscalation: false
|
||||
readOnlyRootFilesystem: true
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
@@ -1,16 +0,0 @@
|
||||
# Fix to Apply:
|
||||
# "select(di==0).spec.containers[0].securityContext.allowPrivilegeEscalation |= false"
|
||||
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: insert_to_mapping_node_1
|
||||
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx_container
|
||||
image: nginx
|
||||
|
||||
# Testing if comments are retained as intended
|
||||
securityContext:
|
||||
runAsRoot: false
|
||||
@@ -1,18 +0,0 @@
|
||||
# Fix to Apply:
|
||||
# "select(di==0).spec.containers[0].securityContext.allowPrivilegeEscalation |= false"
|
||||
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: insert_to_mapping_node_1
|
||||
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx_container
|
||||
image: nginx
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
|
||||
# Testing if comments are retained as intended
|
||||
securityContext:
|
||||
runAsRoot: false
|
||||
@@ -1,14 +0,0 @@
|
||||
# Fix to Apply:
|
||||
# select(di==0).spec.containers[0].securityContext.capabilities.drop += ["SYS_ADM"]
|
||||
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: insert_list
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx1
|
||||
image: nginx
|
||||
securityContext:
|
||||
capabilities:
|
||||
drop: [NET_RAW]
|
||||
@@ -1,14 +0,0 @@
|
||||
# Fix to Apply:
|
||||
# select(di==0).spec.containers[0].securityContext.capabilities.drop += ["SYS_ADM"]
|
||||
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: insert_list
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx1
|
||||
image: nginx
|
||||
securityContext:
|
||||
capabilities:
|
||||
drop: [NET_RAW, SYS_ADM]
|
||||
@@ -1,27 +0,0 @@
|
||||
# Fix to Apply:
|
||||
# "select(di==0).spec.containers[0].securityContext.allowPrivilegeEscalation |= false"
|
||||
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: insert_to_mapping_node_1
|
||||
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx_container
|
||||
image: nginx
|
||||
|
||||
---
|
||||
|
||||
# Fix to Apply:
|
||||
# "select(di==1).spec.containers[0].securityContext.allowPrivilegeEscalation |= false"
|
||||
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: insert_to_mapping_node_1
|
||||
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx_container
|
||||
image: nginx
|
||||
@@ -1,31 +0,0 @@
|
||||
# Fix to Apply:
|
||||
# "select(di==0).spec.containers[0].securityContext.allowPrivilegeEscalation |= false"
|
||||
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: insert_to_mapping_node_1
|
||||
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx_container
|
||||
image: nginx
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
|
||||
---
|
||||
|
||||
# Fix to Apply:
|
||||
# "select(di==1).spec.containers[0].securityContext.allowPrivilegeEscalation |= false"
|
||||
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: insert_to_mapping_node_1
|
||||
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx_container
|
||||
image: nginx
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
@@ -1,11 +0,0 @@
|
||||
# Fix to Apply:
|
||||
# select(di==0).spec.containers[0].securityContext.capabilities.drop += ["NET_RAW"]
|
||||
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: indented-parent-list-insert-list-value
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx_container
|
||||
image: nginx
|
||||
@@ -1,15 +0,0 @@
|
||||
# Fix to Apply:
|
||||
# select(di==0).spec.containers[0].securityContext.capabilities.drop += ["NET_RAW"]
|
||||
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: indented-parent-list-insert-list-value
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx_container
|
||||
image: nginx
|
||||
securityContext:
|
||||
capabilities:
|
||||
drop:
|
||||
- NET_RAW
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user