mirror of
https://github.com/kubescape/kubescape.git
synced 2026-02-20 04:49:56 +00:00
Compare commits
275 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
a95c2ec42b | ||
|
|
ad91178ef7 | ||
|
|
db179d7b67 | ||
|
|
f6e2651f88 | ||
|
|
6a52945e5a | ||
|
|
f8a66b0f9b | ||
|
|
bfc3b0cc43 | ||
|
|
2432378a57 | ||
|
|
2f1ae9418a | ||
|
|
619eeb3f02 | ||
|
|
ed1862cf72 | ||
|
|
40d5b08f39 | ||
|
|
fcfccd18dc | ||
|
|
0be1acfe72 | ||
|
|
7407cbb4fb | ||
|
|
79158aa3e5 | ||
|
|
e7d212fb8c | ||
|
|
4d6f75a65a | ||
|
|
17445ee8b9 | ||
|
|
2de950cce7 | ||
|
|
326dd096fd | ||
|
|
416b5e691e | ||
|
|
f7d91f1b48 | ||
|
|
46f823eb89 | ||
|
|
e54bfec4c1 | ||
|
|
e95a09dbff | ||
|
|
e1f01f963f | ||
|
|
a397f681b3 | ||
|
|
0b52c498da | ||
|
|
5141d82e49 | ||
|
|
262c014cfe | ||
|
|
dcd4ac9703 | ||
|
|
566457308f | ||
|
|
c967dbeefc | ||
|
|
0bcef1be3b | ||
|
|
11037f1071 | ||
|
|
bec9fc9c80 | ||
|
|
7e1cf051e8 | ||
|
|
7ea8e89766 | ||
|
|
4fc20964b3 | ||
|
|
355be63b6f | ||
|
|
29b431009c | ||
|
|
4114730d91 | ||
|
|
d2a092d032 | ||
|
|
4ce8c63044 | ||
|
|
9c80f4d9cb | ||
|
|
3c615f180b | ||
|
|
13b37abcb4 | ||
|
|
46696d9eb3 | ||
|
|
9f450ef0cb | ||
|
|
1a15ee757b | ||
|
|
eab0d6650e | ||
|
|
2829c77cc8 | ||
|
|
25d3a9004c | ||
|
|
b0bdab3ef2 | ||
|
|
e69cf89fec | ||
|
|
45007a6aa4 | ||
|
|
40e8dd3575 | ||
|
|
be92c0a3e1 | ||
|
|
46a1a4ce04 | ||
|
|
4fadb413c3 | ||
|
|
99436f1b4d | ||
|
|
3f7a55c48e | ||
|
|
2fffd25e05 | ||
|
|
e5d02419f7 | ||
|
|
30919d7e9e | ||
|
|
96903ea77d | ||
|
|
f4572f6f53 | ||
|
|
55dbafb9b0 | ||
|
|
a6c19bc286 | ||
|
|
c7450adc77 | ||
|
|
f4f3adf576 | ||
|
|
0e9ccc955b | ||
|
|
54b502629f | ||
|
|
8a25d0d293 | ||
|
|
a4af46fcf9 | ||
|
|
e9d3b573b3 | ||
|
|
32922c6263 | ||
|
|
5abca6711e | ||
|
|
875b98415b | ||
|
|
d577b1a135 | ||
|
|
2cd52e43b0 | ||
|
|
69bdc358eb | ||
|
|
2b2034f2da | ||
|
|
fb114a17a3 | ||
|
|
948681b82e | ||
|
|
5bd532dd57 | ||
|
|
aef74d6480 | ||
|
|
7b38b5dc96 | ||
|
|
9f5d9fe36b | ||
|
|
643d0620d7 | ||
|
|
8ecc1839a0 | ||
|
|
31aeae8bd1 | ||
|
|
26bbcae0bd | ||
|
|
0feca50ebb | ||
|
|
895f330e14 | ||
|
|
30f454de08 | ||
|
|
8347fa7874 | ||
|
|
6b5d335519 | ||
|
|
44f0473a09 | ||
|
|
a6bae01476 | ||
|
|
c356246f82 | ||
|
|
a6d9badc5f | ||
|
|
5bf179810b | ||
|
|
fbb75d6dd1 | ||
|
|
ac9db6706c | ||
|
|
9b489f1e5c | ||
|
|
ab788eaaa2 | ||
|
|
826106090b | ||
|
|
e6e9a74766 | ||
|
|
5fee3efb35 | ||
|
|
a3d77a76aa | ||
|
|
3a958294f3 | ||
|
|
086a518a53 | ||
|
|
705fabb32b | ||
|
|
3d37a6ac2f | ||
|
|
dd79e348d3 | ||
|
|
a913d3eb32 | ||
|
|
f32049bdb3 | ||
|
|
bbcc7a502d | ||
|
|
1a94004de4 | ||
|
|
424f2cc403 | ||
|
|
bae960fd5b | ||
|
|
eab77a9e61 | ||
|
|
fc78d9143b | ||
|
|
034dbca30c | ||
|
|
a41adc6c9e | ||
|
|
bd170938c5 | ||
|
|
e91a73a32e | ||
|
|
099886e1bb | ||
|
|
c05dc8d7ae | ||
|
|
3cebfb3065 | ||
|
|
4e9f4a8010 | ||
|
|
94d99da821 | ||
|
|
ee1b596358 | ||
|
|
ed5abd5791 | ||
|
|
898b847211 | ||
|
|
889dd15772 | ||
|
|
81f0cecb79 | ||
|
|
d46d77411b | ||
|
|
ee4f4d8af1 | ||
|
|
ea1426a24b | ||
|
|
120677a91f | ||
|
|
bd78e4c4de | ||
|
|
e3f70b6cd6 | ||
|
|
616712cf79 | ||
|
|
a5007df1bc | ||
|
|
d6720b67ed | ||
|
|
2261fd6adb | ||
|
|
9334ad6991 | ||
|
|
7b5e4143c3 | ||
|
|
e63e5502cd | ||
|
|
154794e774 | ||
|
|
4aa71725dd | ||
|
|
9bd2e7fea4 | ||
|
|
e6d3e7d7da | ||
|
|
35bb15b5df | ||
|
|
e54d61fd87 | ||
|
|
f28b2836c7 | ||
|
|
d196f1f327 | ||
|
|
995f90ca53 | ||
|
|
c1da380c9b | ||
|
|
77f77b8c7d | ||
|
|
b3c1aec461 | ||
|
|
ef242b52bb | ||
|
|
af1d5694dc | ||
|
|
a5ac47ff6d | ||
|
|
b03a4974c4 | ||
|
|
8385cd0bd7 | ||
|
|
ca67aa7f5f | ||
|
|
9b9ed514c8 | ||
|
|
11b7f6ab2f | ||
|
|
021f2074b8 | ||
|
|
c1ba2d4b3c | ||
|
|
141ad17ece | ||
|
|
74c81e2270 | ||
|
|
7bb124b6fe | ||
|
|
8a8ff10b19 | ||
|
|
1eef32dd8e | ||
|
|
d7b5dd416d | ||
|
|
536a94de45 | ||
|
|
d8ef471eb2 | ||
|
|
ff07a80078 | ||
|
|
310d31a3b1 | ||
|
|
8a1ef7da87 | ||
|
|
c142779ee8 | ||
|
|
640f366c7e | ||
|
|
9f36c1d6de | ||
|
|
b3c8c078a8 | ||
|
|
3ff2b0d6ff | ||
|
|
35b2b350a0 | ||
|
|
046ea1d79f | ||
|
|
3081508863 | ||
|
|
4a757c1bf1 | ||
|
|
dec4bcca00 | ||
|
|
5443039b8c | ||
|
|
95e68f49f3 | ||
|
|
7e90956b50 | ||
|
|
0c84c8f1f3 | ||
|
|
56b3239e30 | ||
|
|
f8e85941da | ||
|
|
15081aa9c3 | ||
|
|
ac03a2bda3 | ||
|
|
b7ffa22f3a | ||
|
|
ac5e7069da | ||
|
|
5a83f38bca | ||
|
|
3abd59e290 | ||
|
|
d08fdf2e9e | ||
|
|
bad2f54e72 | ||
|
|
fc9b713851 | ||
|
|
245200840d | ||
|
|
3f87610e8c | ||
|
|
c285cb1bcc | ||
|
|
63968b564b | ||
|
|
e237c48186 | ||
|
|
622b121535 | ||
|
|
20774d4a40 | ||
|
|
7bb6bb85ec | ||
|
|
da908a84bc | ||
|
|
b515e259c0 | ||
|
|
facd551518 | ||
|
|
0fc569d9d9 | ||
|
|
da27a27ad5 | ||
|
|
5d4a20f622 | ||
|
|
70b15a373b | ||
|
|
01353f81b3 | ||
|
|
22f10b6581 | ||
|
|
785178ffb1 | ||
|
|
f9b5c58402 | ||
|
|
8ed6d63ce5 | ||
|
|
990a7c2052 | ||
|
|
09b0c09472 | ||
|
|
f83c38b58e | ||
|
|
51e600797a | ||
|
|
39d6d1fd26 | ||
|
|
2dff63b101 | ||
|
|
b928892f0a | ||
|
|
c0188ea51d | ||
|
|
fa376ed5a4 | ||
|
|
6382edeb6e | ||
|
|
61d6c2dd1f | ||
|
|
44194f0b4e | ||
|
|
7103c7d32c | ||
|
|
b4e1663cd1 | ||
|
|
47412c89ca | ||
|
|
20d65f2ed3 | ||
|
|
46a559fb1d | ||
|
|
2769b22721 | ||
|
|
60ec6e8294 | ||
|
|
63520f9aff | ||
|
|
333b55a9f2 | ||
|
|
c6d3fd1a82 | ||
|
|
8106133ed0 | ||
|
|
b36111f63e | ||
|
|
3ad0284394 | ||
|
|
245ebf8c41 | ||
|
|
8309562da1 | ||
|
|
de807a65a6 | ||
|
|
92fe583421 | ||
|
|
b7ec05e88a | ||
|
|
203e925888 | ||
|
|
fde5453bf3 | ||
|
|
4c6a65565b | ||
|
|
e60ecfb8f5 | ||
|
|
b72e2610ca | ||
|
|
8d4bae06bc | ||
|
|
847b597d0f | ||
|
|
db1743f617 | ||
|
|
7ac1b8aacf | ||
|
|
55f8cb1f0e | ||
|
|
93574736cd | ||
|
|
e43f4b1a37 | ||
|
|
ae00866005 | ||
|
|
21cb4dae29 | ||
|
|
7d3ac98998 |
90
.github/workflows/build.yaml
vendored
90
.github/workflows/build.yaml
vendored
@@ -3,6 +3,9 @@ name: build
|
||||
on:
|
||||
push:
|
||||
branches: [ master ]
|
||||
paths-ignore:
|
||||
# Do not run the pipeline if only Markdown files changed
|
||||
- '**.md'
|
||||
jobs:
|
||||
once:
|
||||
name: Create release
|
||||
@@ -28,29 +31,76 @@ jobs:
|
||||
matrix:
|
||||
os: [ubuntu-latest, macos-latest, windows-latest]
|
||||
steps:
|
||||
- uses: actions/checkout@v1
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v2
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
go-version: 1.17
|
||||
submodules: recursive
|
||||
|
||||
- name: Cache Go modules (Linux)
|
||||
if: matrix.os == 'ubuntu-latest'
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: |
|
||||
~/.cache/go-build
|
||||
~/go/pkg/mod
|
||||
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-go-
|
||||
|
||||
- name: Cache Go modules (macOS)
|
||||
if: matrix.os == 'macos-latest'
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: |
|
||||
~/Library/Caches/go-build
|
||||
~/go/pkg/mod
|
||||
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-go-
|
||||
|
||||
- name: Cache Go modules (Windows)
|
||||
if: matrix.os == 'windows-latest'
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: |
|
||||
~\AppData\Local\go-build
|
||||
~\go\pkg\mod
|
||||
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-go-
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: 1.18
|
||||
# - name: Test cmd pkg
|
||||
# run: cd cmd && go test -v ./...
|
||||
|
||||
- name: Install MSYS2 & libgit2 (Windows)
|
||||
shell: cmd
|
||||
run: .\build.bat all
|
||||
if: matrix.os == 'windows-latest'
|
||||
|
||||
- name: Install libgit2 (Linux/macOS)
|
||||
run: make libgit2
|
||||
if: matrix.os != 'windows-latest'
|
||||
|
||||
- name: Test core pkg
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
run: go test -v ./...
|
||||
run: go test -tags=static -v ./...
|
||||
|
||||
- name: Test httphandler pkg
|
||||
run: cd httphandler && go test -v ./...
|
||||
run: cd httphandler && go test -tags=static -v ./...
|
||||
|
||||
- name: Build
|
||||
env:
|
||||
RELEASE: v2.0.${{ github.run_number }}
|
||||
CLIENT: release
|
||||
ArmoBEServer: api.armo.cloud
|
||||
ArmoAuthServer: auth.armo.cloud
|
||||
ArmoERServer: report.armo.cloud
|
||||
ArmoWebsite: portal.armo.cloud
|
||||
CGO_ENABLED: 0
|
||||
CGO_ENABLED: 1
|
||||
run: python3 --version && python3 build.py
|
||||
|
||||
- name: Smoke Testing
|
||||
@@ -92,6 +142,8 @@ jobs:
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
submodules: recursive
|
||||
|
||||
- name: Set image version
|
||||
id: image-version
|
||||
@@ -101,28 +153,28 @@ jobs:
|
||||
id: image-name
|
||||
run: echo '::set-output name=IMAGE_NAME::quay.io/${{ github.repository_owner }}/kubescape'
|
||||
|
||||
- name: Build the Docker image
|
||||
run: docker build . --file build/Dockerfile --tag ${{ steps.image-name.outputs.IMAGE_NAME }}:${{ steps.image-version.outputs.IMAGE_VERSION }} --build-arg image_version=${{ steps.image-version.outputs.IMAGE_VERSION }}
|
||||
|
||||
- name: Re-Tag Image to latest
|
||||
run: docker tag ${{ steps.image-name.outputs.IMAGE_NAME }}:${{ steps.image-version.outputs.IMAGE_VERSION }} ${{ steps.image-name.outputs.IMAGE_NAME }}:latest
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v2
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
|
||||
- name: Login to Quay.io
|
||||
env: # Or as an environment variable
|
||||
env:
|
||||
QUAY_PASSWORD: ${{ secrets.QUAYIO_REGISTRY_PASSWORD }}
|
||||
QUAY_USERNAME: ${{ secrets.QUAYIO_REGISTRY_USERNAME }}
|
||||
run: docker login -u="${QUAY_USERNAME}" -p="${QUAY_PASSWORD}" quay.io
|
||||
|
||||
- name: Build the Docker image
|
||||
run: docker buildx build . --file build/Dockerfile --tag ${{ steps.image-name.outputs.IMAGE_NAME }}:${{ steps.image-version.outputs.IMAGE_VERSION }} --tag ${{ steps.image-name.outputs.IMAGE_NAME }}:latest --build-arg image_version=${{ steps.image-version.outputs.IMAGE_VERSION }} --build-arg client=image-release --push --platform linux/amd64,linux/arm64
|
||||
|
||||
# - name: Login to GitHub Container Registry
|
||||
# uses: docker/login-action@v1
|
||||
# with:
|
||||
# registry: ghcr.io
|
||||
# username: ${{ github.actor }}
|
||||
# password: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: Push Docker image
|
||||
run: |
|
||||
docker push ${{ steps.image-name.outputs.IMAGE_NAME }}:${{ steps.image-version.outputs.IMAGE_VERSION }}
|
||||
docker push ${{ steps.image-name.outputs.IMAGE_NAME }}:latest
|
||||
|
||||
|
||||
# TODO - Wait for casign to support fixed tags -> https://github.com/sigstore/cosign/issues/1424
|
||||
# - name: Install cosign
|
||||
# uses: sigstore/cosign-installer@main
|
||||
|
||||
84
.github/workflows/build_dev.yaml
vendored
84
.github/workflows/build_dev.yaml
vendored
@@ -3,6 +3,9 @@ name: build-dev
|
||||
on:
|
||||
push:
|
||||
branches: [ dev ]
|
||||
paths-ignore:
|
||||
# Do not run the pipeline if only Markdown files changed
|
||||
- '**.md'
|
||||
jobs:
|
||||
build:
|
||||
name: Create cross-platform dev build
|
||||
@@ -11,12 +14,47 @@ jobs:
|
||||
matrix:
|
||||
os: [ubuntu-latest, macos-latest, windows-latest]
|
||||
steps:
|
||||
- uses: actions/checkout@v1
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
submodules: recursive
|
||||
|
||||
- name: Cache Go modules (Linux)
|
||||
if: matrix.os == 'ubuntu-latest'
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: |
|
||||
~/.cache/go-build
|
||||
~/go/pkg/mod
|
||||
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-go-
|
||||
|
||||
- name: Cache Go modules (macOS)
|
||||
if: matrix.os == 'macos-latest'
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: |
|
||||
~/Library/Caches/go-build
|
||||
~/go/pkg/mod
|
||||
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-go-
|
||||
|
||||
- name: Cache Go modules (Windows)
|
||||
if: matrix.os == 'windows-latest'
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: |
|
||||
~\AppData\Local\go-build
|
||||
~\go\pkg\mod
|
||||
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-go-
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v2
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: 1.17
|
||||
go-version: 1.18
|
||||
|
||||
# - name: Test cmd pkg
|
||||
# run: cd cmd && go test -v ./...
|
||||
@@ -29,22 +67,32 @@ jobs:
|
||||
# - name: Test cmd pkg
|
||||
# run: cd cmd && go test -v ./...
|
||||
|
||||
- name: Install MSYS2 & libgit2 (Windows)
|
||||
shell: cmd
|
||||
run: .\build.bat all
|
||||
if: matrix.os == 'windows-latest'
|
||||
|
||||
- name: Install libgit2 (Linux/macOS)
|
||||
run: make libgit2
|
||||
if: matrix.os != 'windows-latest'
|
||||
|
||||
- name: Test core pkg
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
run: go test -v ./...
|
||||
run: go test -tags=static -v ./...
|
||||
|
||||
- name: Test httphandler pkg
|
||||
run: cd httphandler && go test -v ./...
|
||||
run: cd httphandler && go test -tags=static -v ./...
|
||||
|
||||
- name: Build
|
||||
env:
|
||||
RELEASE: v2.0.${{ github.run_number }}
|
||||
CLIENT: release-dev
|
||||
ArmoBEServer: api.armo.cloud
|
||||
ArmoAuthServer: auth.armo.cloud
|
||||
ArmoERServer: report.armo.cloud
|
||||
ArmoWebsite: portal.armo.cloud
|
||||
CGO_ENABLED: 0
|
||||
CGO_ENABLED: 1
|
||||
run: python3 --version && python3 build.py
|
||||
|
||||
- name: Smoke Testing
|
||||
@@ -71,6 +119,8 @@ jobs:
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
submodules: recursive
|
||||
|
||||
- name: Set image version
|
||||
id: image-version
|
||||
@@ -80,21 +130,17 @@ jobs:
|
||||
id: image-name
|
||||
run: echo '::set-output name=IMAGE_NAME::quay.io/${{ github.repository_owner }}/kubescape'
|
||||
|
||||
- name: Build the Docker image
|
||||
run: docker build . --file build/Dockerfile --tag ${{ steps.image-name.outputs.IMAGE_NAME }}:${{ steps.image-version.outputs.IMAGE_VERSION }} --build-arg image_version=${{ steps.image-version.outputs.IMAGE_VERSION }}
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v2
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
|
||||
- name: Login to Quay.io
|
||||
env:
|
||||
QUAY_PASSWORD: ${{ secrets.QUAYIO_REGISTRY_PASSWORD }}
|
||||
QUAY_USERNAME: ${{ secrets.QUAYIO_REGISTRY_USERNAME }}
|
||||
run: docker login -u="${QUAY_USERNAME}" -p="${QUAY_PASSWORD}" quay.io
|
||||
# - name: Login to GitHub Container Registry
|
||||
# uses: docker/login-action@v1
|
||||
# with:
|
||||
# registry: ghcr.io
|
||||
# username: ${{ github.actor }}
|
||||
# password: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: Push Docker image
|
||||
run: |
|
||||
docker push ${{ steps.image-name.outputs.IMAGE_NAME }}:${{ steps.image-version.outputs.IMAGE_VERSION }}
|
||||
|
||||
|
||||
- name: Build the Docker image
|
||||
run: docker buildx build . --file build/Dockerfile --tag ${{ steps.image-name.outputs.IMAGE_NAME }}:${{ steps.image-version.outputs.IMAGE_VERSION }} --build-arg image_version=${{ steps.image-version.outputs.IMAGE_VERSION }} --build-arg client=image-dev --push --platform linux/amd64,linux/arm64
|
||||
|
||||
48
.github/workflows/master_pr_checks.yaml
vendored
48
.github/workflows/master_pr_checks.yaml
vendored
@@ -1,48 +0,0 @@
|
||||
name: master-pr
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches: [ master ]
|
||||
types: [ edited, opened, synchronize, reopened ]
|
||||
jobs:
|
||||
build:
|
||||
name: Create cross-platform build
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
os: [ubuntu-latest, macos-latest, windows-latest]
|
||||
steps:
|
||||
- uses: actions/checkout@v1
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.17
|
||||
|
||||
# - name: Test cmd pkg
|
||||
# run: cd cmd && go test -v ./...
|
||||
|
||||
- name: Test core pkg
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
run: go test -v ./...
|
||||
|
||||
- name: Test httphandler pkg
|
||||
run: cd httphandler && go test -v ./...
|
||||
|
||||
- name: Build
|
||||
env:
|
||||
RELEASE: v2.0.${{ github.run_number }}
|
||||
ArmoBEServer: api.armo.cloud
|
||||
ArmoAuthServer: auth.armo.cloud
|
||||
ArmoERServer: report.armo.cloud
|
||||
ArmoWebsite: portal.armo.cloud
|
||||
CGO_ENABLED: 0
|
||||
run: python3 --version && python3 build.py
|
||||
|
||||
- name: Smoke Testing
|
||||
env:
|
||||
RELEASE: v2.0.${{ github.run_number }}
|
||||
KUBESCAPE_SKIP_UPDATE_CHECK: "true"
|
||||
run: python3 smoke_testing/init.py ${PWD}/build/${{ matrix.os }}/kubescape
|
||||
|
||||
96
.github/workflows/pr_checks.yaml
vendored
Normal file
96
.github/workflows/pr_checks.yaml
vendored
Normal file
@@ -0,0 +1,96 @@
|
||||
name: pr-checks
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches: [ master, dev ]
|
||||
types: [ edited, opened, synchronize, reopened ]
|
||||
paths-ignore:
|
||||
# Do not run the pipeline if only Markdown files changed
|
||||
- '**.md'
|
||||
jobs:
|
||||
build:
|
||||
name: Create cross-platform build
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
os: [ubuntu-latest, macos-latest, windows-latest]
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
submodules: recursive
|
||||
|
||||
- name: Cache Go modules (Linux)
|
||||
if: matrix.os == 'ubuntu-latest'
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: |
|
||||
~/.cache/go-build
|
||||
~/go/pkg/mod
|
||||
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-go-
|
||||
|
||||
- name: Cache Go modules (macOS)
|
||||
if: matrix.os == 'macos-latest'
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: |
|
||||
~/Library/Caches/go-build
|
||||
~/go/pkg/mod
|
||||
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-go-
|
||||
|
||||
- name: Cache Go modules (Windows)
|
||||
if: matrix.os == 'windows-latest'
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: |
|
||||
~\AppData\Local\go-build
|
||||
~\go\pkg\mod
|
||||
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-go-
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: 1.18
|
||||
|
||||
- name: Install MSYS2 & libgit2 (Windows)
|
||||
shell: cmd
|
||||
run: .\build.bat all
|
||||
if: matrix.os == 'windows-latest'
|
||||
|
||||
- name: Install libgit2 (Linux/macOS)
|
||||
run: make libgit2
|
||||
if: matrix.os != 'windows-latest'
|
||||
|
||||
# - name: Test cmd pkg
|
||||
# run: cd cmd && go test -v ./...
|
||||
|
||||
- name: Test core pkg
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
run: go test -tags=static -v ./...
|
||||
|
||||
- name: Test httphandler pkg
|
||||
run: cd httphandler && go test -tags=static -v ./...
|
||||
|
||||
- name: Build
|
||||
env:
|
||||
RELEASE: v2.0.${{ github.run_number }}
|
||||
CLIENT: test
|
||||
ArmoBEServer: api.armo.cloud
|
||||
ArmoAuthServer: auth.armo.cloud
|
||||
ArmoERServer: report.armo.cloud
|
||||
ArmoWebsite: portal.armo.cloud
|
||||
CGO_ENABLED: 1
|
||||
run: python3 --version && python3 build.py
|
||||
|
||||
- name: Smoke Testing
|
||||
env:
|
||||
RELEASE: v2.0.${{ github.run_number }}
|
||||
KUBESCAPE_SKIP_UPDATE_CHECK: "true"
|
||||
run: python3 smoke_testing/init.py ${PWD}/build/${{ matrix.os }}/kubescape
|
||||
|
||||
1
.gitignore
vendored
1
.gitignore
vendored
@@ -4,4 +4,5 @@
|
||||
*vender*
|
||||
*.pyc*
|
||||
.idea
|
||||
.history
|
||||
ca.srl
|
||||
3
.gitmodules
vendored
Normal file
3
.gitmodules
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
[submodule "git2go"]
|
||||
path = git2go
|
||||
url = https://github.com/libgit2/git2go.git
|
||||
20
Makefile
Normal file
20
Makefile
Normal file
@@ -0,0 +1,20 @@
|
||||
.PHONY: test all build libgit2
|
||||
|
||||
# default task invoked while running make
|
||||
all: libgit2 build
|
||||
|
||||
export CGO_ENABLED=1
|
||||
|
||||
# build and install libgit2
|
||||
libgit2:
|
||||
git submodule update --init --recursive
|
||||
cd git2go; make install-static
|
||||
|
||||
# go build tags
|
||||
TAGS = "static"
|
||||
|
||||
build:
|
||||
go build -v -tags=$(TAGS) .
|
||||
|
||||
test:
|
||||
go test -v -tags=$(TAGS) ./...
|
||||
238
README.md
238
README.md
@@ -6,7 +6,7 @@
|
||||
|
||||
|
||||
Kubescape is a K8s open-source tool providing a multi-cloud K8s single pane of glass, including risk analysis, security compliance, RBAC visualizer and image vulnerabilities scanning.
|
||||
Kubescape scans K8s clusters, YAML files, and HELM charts, detecting misconfigurations according to multiple frameworks (such as the [NSA-CISA](https://www.armosec.io/blog/kubernetes-hardening-guidance-summary-by-armo) , [MITRE ATT&CK®](https://www.microsoft.com/security/blog/2021/03/23/secure-containerized-environments-with-updated-threat-matrix-for-kubernetes/)), software vulnerabilities, and RBAC (role-based-access-control) violations at early stages of the CI/CD pipeline, calculates risk score instantly and shows risk trends over time.
|
||||
Kubescape scans K8s clusters, YAML files, and HELM charts, detecting misconfigurations according to multiple frameworks (such as the [NSA-CISA](https://www.armosec.io/blog/kubernetes-hardening-guidance-summary-by-armo/?utm_source=github&utm_medium=repository), [MITRE ATT&CK®](https://www.microsoft.com/security/blog/2021/03/23/secure-containerized-environments-with-updated-threat-matrix-for-kubernetes/)), software vulnerabilities, and RBAC (role-based-access-control) violations at early stages of the CI/CD pipeline, calculates risk score instantly and shows risk trends over time.
|
||||
It became one of the fastest-growing Kubernetes tools among developers due to its easy-to-use CLI interface, flexible output formats, and automated scanning capabilities, saving Kubernetes users and admins’ precious time, effort, and resources.
|
||||
Kubescape integrates natively with other DevOps tools, including Jenkins, CircleCI, Github workflows, Prometheus, and Slack, and supports multi-cloud K8s deployments like EKS, GKE, and AKS.
|
||||
|
||||
@@ -28,7 +28,7 @@ Kubescape integrates natively with other DevOps tools, including Jenkins, Circle
|
||||
|
||||
# TL;DR
|
||||
## Install:
|
||||
```
|
||||
```sh
|
||||
curl -s https://raw.githubusercontent.com/armosec/kubescape/master/install.sh | /bin/bash
|
||||
```
|
||||
|
||||
@@ -36,9 +36,13 @@ curl -s https://raw.githubusercontent.com/armosec/kubescape/master/install.sh |
|
||||
|
||||
[Install on macOS](#install-on-macos)
|
||||
|
||||
[Install on NixOS or Linux/macOS via nix](#install-on-nixos-or-with-nix-community)
|
||||
|
||||
[Install using Go](#install-using-go)
|
||||
|
||||
## Run:
|
||||
```
|
||||
kubescape scan --submit --enable-host-scan --format-version v2 --verbose
|
||||
```sh
|
||||
kubescape scan --submit --enable-host-scan --verbose
|
||||
```
|
||||
|
||||
<img src="docs/summary.png">
|
||||
@@ -62,7 +66,7 @@ Want to contribute? Want to discuss something? Have an issue?
|
||||
|
||||
* Feel free to pick a task from the [roadmap](docs/roadmap.md) or suggest a feature of your own. [Contact us](MAINTAINERS.md) directly for more information :)
|
||||
* Open a issue, we are trying to respond within 48 hours
|
||||
* [Join us](https://armosec.github.io/kubescape/) in a discussion on our discord server!
|
||||
* [Join us](https://armosec.github.io/kubescape) in a discussion on our discord server!
|
||||
|
||||
[<img src="docs/discord-banner.png" width="100" alt="logo" align="center">](https://armosec.github.io/kubescape/)
|
||||

|
||||
@@ -70,10 +74,10 @@ Want to contribute? Want to discuss something? Have an issue?
|
||||
|
||||
# Options and examples
|
||||
|
||||
[Kubescape docs](https://hub.armo.cloud/docs)
|
||||
[Kubescape docs](https://hub.armosec.io/docs?utm_source=github&utm_medium=repository)
|
||||
|
||||
## Playground
|
||||
* [Kubescape playground](https://www.katacoda.com/pathaksaiyam/scenarios/kubescape)
|
||||
* [Kubescape playground](https://killercoda.com/saiyampathak/scenario/kubescape)
|
||||
|
||||
## Tutorials
|
||||
|
||||
@@ -85,6 +89,8 @@ Want to contribute? Want to discuss something? Have an issue?
|
||||
* [Configure and run customized frameworks](https://youtu.be/12Sanq_rEhs)
|
||||
* Customize controls configurations. [Kubescape CLI](https://youtu.be/955psg6TVu4), [Kubescape SaaS](https://youtu.be/lIMVSVhH33o)
|
||||
|
||||
<details><summary>Windows</summary>
|
||||
|
||||
## Install on Windows
|
||||
|
||||
**Requires powershell v5.0+**
|
||||
@@ -98,41 +104,81 @@ Note: if you get an error you might need to change the execution policy (i.e. en
|
||||
``` powershell
|
||||
Set-ExecutionPolicy RemoteSigned -scope CurrentUser
|
||||
```
|
||||
</details>
|
||||
|
||||
<details><summary>MacOS</summary>
|
||||
|
||||
## Install on macOS
|
||||
|
||||
1. ```
|
||||
brew tap armosec/kubescape
|
||||
1. ```sh
|
||||
brew tap kubescape/tap
|
||||
```
|
||||
2. ```
|
||||
brew install kubescape
|
||||
2. ```sh
|
||||
brew install kubescape-cli
|
||||
```
|
||||
</details>
|
||||
|
||||
<details><summary>Nix/NixOS</summary>
|
||||
|
||||
## Install on NixOS or with nix (Community)
|
||||
|
||||
Direct issues installing `kubescape` via `nix` through the channels mentioned [here](https://nixos.wiki/wiki/Support)
|
||||
|
||||
You can use `nix` on Linux or macOS and on other platforms unofficially.
|
||||
|
||||
Try it out in an ephemeral shell: `nix-shell -p kubescape`
|
||||
|
||||
Install declarative as usual
|
||||
|
||||
NixOS:
|
||||
|
||||
```nix
|
||||
# your other config ...
|
||||
environment.systemPackages = with pkgs; [
|
||||
# your other packages ...
|
||||
kubescape
|
||||
];
|
||||
```
|
||||
|
||||
home-manager:
|
||||
|
||||
```nix
|
||||
# your other config ...
|
||||
home.packages = with pkgs; [
|
||||
# your other packages ...
|
||||
kubescape
|
||||
];
|
||||
```
|
||||
|
||||
Or to your profile (not preferred): `nix-env --install -A nixpkgs.kubescape`
|
||||
|
||||
</details>
|
||||
|
||||
## Usage & Examples
|
||||
|
||||
### Examples
|
||||
|
||||
|
||||
#### Scan a running Kubernetes cluster and submit results to the [Kubescape SaaS version](https://portal.armo.cloud/)
|
||||
#### Scan a running Kubernetes cluster and submit results to the [Kubescape SaaS version](https://cloud.armosec.io?utm_source=github&utm_medium=repository)
|
||||
```
|
||||
kubescape scan --submit --enable-host-scan --verbose
|
||||
```
|
||||
|
||||
> Read [here](https://hub.armo.cloud/docs/host-sensor) more about the `enable-host-scan` flag
|
||||
> Read [here](https://hub.armosec.io/docs/host-sensor?utm_source=github&utm_medium=repository) more about the `enable-host-scan` flag
|
||||
|
||||
#### Scan a running Kubernetes cluster with [`nsa`](https://www.nsa.gov/Press-Room/News-Highlights/Article/Article/2716980/nsa-cisa-release-kubernetes-hardening-guidance/) framework and submit results to the [Kubescape SaaS version](https://portal.armo.cloud/)
|
||||
#### Scan a running Kubernetes cluster with [`nsa`](https://www.nsa.gov/Press-Room/News-Highlights/Article/Article/2716980/nsa-cisa-release-kubernetes-hardening-guidance/) framework and submit results to the [Kubescape SaaS version](https://cloud.armosec.io?utm_source=github&utm_medium=repository)
|
||||
```
|
||||
kubescape scan framework nsa --submit
|
||||
```
|
||||
|
||||
|
||||
#### Scan a running Kubernetes cluster with [`MITRE ATT&CK®`](https://www.microsoft.com/security/blog/2021/03/23/secure-containerized-environments-with-updated-threat-matrix-for-kubernetes/) framework and submit results to the [Kubescape SaaS version](https://portal.armo.cloud/)
|
||||
#### Scan a running Kubernetes cluster with [`MITRE ATT&CK®`](https://www.microsoft.com/security/blog/2021/03/23/secure-containerized-environments-with-updated-threat-matrix-for-kubernetes/) framework and submit results to the [Kubescape SaaS version](https://cloud.armosec.io?utm_source=github&utm_medium=repository)
|
||||
```
|
||||
kubescape scan framework mitre --submit
|
||||
```
|
||||
|
||||
|
||||
#### Scan a running Kubernetes cluster with a specific control using the control name or control ID. [List of controls](https://hub.armo.cloud/docs/controls)
|
||||
#### Scan a running Kubernetes cluster with a specific control using the control name or control ID. [List of controls](https://hub.armosec.io/docs/controls?utm_source=github&utm_medium=repository)
|
||||
```
|
||||
kubescape scan control "Privileged container"
|
||||
```
|
||||
@@ -147,14 +193,14 @@ kubescape scan --include-namespaces development,staging,production
|
||||
kubescape scan --exclude-namespaces kube-system,kube-public
|
||||
```
|
||||
|
||||
#### Scan local `yaml`/`json` files before deploying. [Take a look at the demonstration](https://youtu.be/Ox6DaR7_4ZI)
|
||||
#### Scan local `yaml`/`json` files before deploying. [Take a look at the demonstration](https://youtu.be/Ox6DaR7_4ZI) Submit the results in case the directory is a git repo. [docs](https://hub.armosec.io/docs/repository-scanning?utm_source=github&utm_medium=repository)
|
||||
```
|
||||
kubescape scan *.yaml
|
||||
kubescape scan *.yaml --submit
|
||||
```
|
||||
|
||||
#### Scan kubernetes manifest files from a public github repository
|
||||
#### Scan kubernetes manifest files from a git repository [and submit the results](https://hub.armosec.io/docs/repository-scanning?utm_source=github&utm_medium=repository)
|
||||
```
|
||||
kubescape scan https://github.com/armosec/kubescape
|
||||
kubescape scan https://github.com/armosec/kubescape --submit
|
||||
```
|
||||
|
||||
#### Display all scanned resources (including the resources who passed)
|
||||
@@ -193,16 +239,11 @@ kubescape scan --format prometheus
|
||||
kubescape scan --exceptions examples/exceptions/exclude-kube-namespaces.json
|
||||
```
|
||||
|
||||
#### Scan Helm charts - Render the helm chart using [`helm template`](https://helm.sh/docs/helm/helm_template/) and pass to stdout
|
||||
#### Scan Helm charts
|
||||
```
|
||||
helm template [NAME] [CHART] [flags] --dry-run | kubescape scan -
|
||||
kubescape scan </path/to/directory> --submit
|
||||
```
|
||||
|
||||
e.g.
|
||||
```
|
||||
helm template bitnami/mysql --generate-name --dry-run | kubescape scan -
|
||||
```
|
||||
|
||||
> Kubescape will load the default values file
|
||||
|
||||
### Offline/Air-gaped Environment Support
|
||||
|
||||
@@ -238,36 +279,10 @@ kubescape scan framework nsa --use-from /path/nsa.json
|
||||
```
|
||||
|
||||
|
||||
## Scan Periodically using Helm - Contributed by [@yonahd](https://github.com/yonahd)
|
||||
[Please follow the instructions here](https://hub.armo.cloud/docs/installation-of-armo-in-cluster)
|
||||
## Scan Periodically using Helm
|
||||
[Please follow the instructions here](https://hub.armosec.io/docs/installation-of-armo-in-cluster?utm_source=github&utm_medium=repository)
|
||||
[helm chart repo](https://github.com/armosec/armo-helm)
|
||||
|
||||
## Scan using docker image
|
||||
|
||||
Official Docker image `quay.io/armosec/kubescape`
|
||||
|
||||
```
|
||||
docker run -v "$(pwd)/example.yaml:/app/example.yaml quay.io/armosec/kubescape scan /app/example.yaml
|
||||
```
|
||||
|
||||
If you wish, you can [build the docker image on your own](build/README.md)
|
||||
|
||||
# Submit data manually
|
||||
|
||||
Use the `submit` command if you wish to submit data manually
|
||||
|
||||
## Submit scan results manually
|
||||
|
||||
> Support forward compatibility by using the `--format-version v2` flag
|
||||
|
||||
First, scan your cluster using the `json` format flag: `kubescape scan framework <name> --format json --format-version v2 --output path/to/results.json`.
|
||||
|
||||
Now you can submit the results to the Kubescape SaaS version -
|
||||
```
|
||||
kubescape submit results path/to/results.json
|
||||
```
|
||||
|
||||
|
||||
# Integrations
|
||||
|
||||
## VS Code Extension
|
||||
@@ -276,13 +291,120 @@ kubescape submit results path/to/results.json
|
||||
|
||||
Scan the YAML files while writing them using the [vs code extension](https://github.com/armosec/vscode-kubescape/blob/master/README.md)
|
||||
|
||||
## Lens Extension
|
||||
|
||||
View Kubescape scan results directly in [Lens IDE](https://k8slens.dev/) using kubescape [Lens extension](https://github.com/armosec/lens-kubescape/blob/master/README.md)
|
||||
|
||||
|
||||
# Building Kubescape
|
||||
|
||||
<details><summary>Windows</summary>
|
||||
|
||||
## Windows
|
||||
|
||||
1. Install MSYS2 & build libgit _(needed only for the first time)_
|
||||
|
||||
```
|
||||
build.bat all
|
||||
```
|
||||
|
||||
> You can install MSYS2 separately by running `build.bat install` and build libgit2 separately by running `build.bat build`
|
||||
|
||||
2. Build kubescape
|
||||
|
||||
```
|
||||
make build
|
||||
```
|
||||
|
||||
OR
|
||||
|
||||
```
|
||||
go build -tags=static .
|
||||
```
|
||||
</details>
|
||||
|
||||
<details><summary>Linux / MacOS</summary>
|
||||
|
||||
## Linux / MacOS
|
||||
|
||||
1. Install libgit2 dependency _(needed only for the first time)_
|
||||
|
||||
```
|
||||
make libgit2
|
||||
```
|
||||
|
||||
> `cmake` is required to build libgit2. You can install it by running `sudo apt-get install cmake` (Linux) or `brew install cmake` (macOS)
|
||||
|
||||
2. Build kubescape
|
||||
|
||||
```
|
||||
make build
|
||||
```
|
||||
|
||||
OR
|
||||
|
||||
```
|
||||
go build -tags=static .
|
||||
```
|
||||
|
||||
3. Test
|
||||
|
||||
```
|
||||
make test
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
## VS code configuration samples
|
||||
|
||||
You can use the samples files below to setup your VS code environment for building and debugging purposes.
|
||||
|
||||
|
||||
<details><summary>.vscode/settings.json</summary>
|
||||
|
||||
```json5
|
||||
// .vscode/settings.json
|
||||
{
|
||||
"go.testTags": "static",
|
||||
"go.buildTags": "static",
|
||||
"go.toolsEnvVars": {
|
||||
"CGO_ENABLED": "1"
|
||||
}
|
||||
}
|
||||
```
|
||||
</details>
|
||||
|
||||
<details><summary>.vscode/launch.json</summary>
|
||||
|
||||
```json5
|
||||
// .vscode/launch.json
|
||||
{
|
||||
"version": "0.2.0",
|
||||
"configurations": [
|
||||
{
|
||||
"name": "Launch Package",
|
||||
"type": "go",
|
||||
"request": "launch",
|
||||
"mode": "auto",
|
||||
"program": "${workspaceFolder}/main.go",
|
||||
"args": [
|
||||
"scan",
|
||||
"--logger",
|
||||
"debug"
|
||||
],
|
||||
"buildFlags": "-tags=static"
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
</details>
|
||||
|
||||
# Under the hood
|
||||
|
||||
## Technology
|
||||
Kubescape based on OPA engine: https://github.com/open-policy-agent/opa and ARMO's posture controls.
|
||||
Kubescape based on [OPA engine](https://github.com/open-policy-agent/opa) and ARMO's posture controls.
|
||||
|
||||
The tools retrieves Kubernetes objects from the API server and runs a set of [rego's snippets](https://www.openpolicyagent.org/docs/latest/policy-language/) developed by [ARMO](https://www.armosec.io/).
|
||||
The tools retrieves Kubernetes objects from the API server and runs a set of [rego's snippets](https://www.openpolicyagent.org/docs/latest/policy-language/) developed by [ARMO](https://www.armosec.io?utm_source=github&utm_medium=repository).
|
||||
|
||||
The results by default printed in a pretty "console friendly" manner, but they can be retrieved in JSON format for further processing.
|
||||
|
||||
|
||||
51
build.bat
Normal file
51
build.bat
Normal file
@@ -0,0 +1,51 @@
|
||||
@ECHO OFF
|
||||
|
||||
IF "%1"=="install" goto Install
|
||||
IF "%1"=="build" goto Build
|
||||
IF "%1"=="all" goto All
|
||||
IF "%1"=="" goto Error ELSE goto Error
|
||||
|
||||
:Install
|
||||
|
||||
if exist C:\MSYS64\ (
|
||||
echo "MSYS2 already installed"
|
||||
) else (
|
||||
mkdir temp_install & cd temp_install
|
||||
|
||||
echo "Downloading MSYS2..."
|
||||
curl -L https://github.com/msys2/msys2-installer/releases/download/2022-06-03/msys2-x86_64-20220603.exe > msys2-x86_64-20220603.exe
|
||||
|
||||
echo "Installing MSYS2..."
|
||||
msys2-x86_64-20220603.exe install --root C:\MSYS64 --confirm-command
|
||||
|
||||
cd .. && rmdir /s /q temp_install
|
||||
)
|
||||
|
||||
|
||||
echo "Adding MSYS2 to path..."
|
||||
SET "PATH=C:\MSYS64\mingw64\bin;C:\MSYS64\usr\bin;%PATH%"
|
||||
echo %PATH%
|
||||
|
||||
echo "Installing MSYS2 packages..."
|
||||
pacman -S --needed --noconfirm make
|
||||
pacman -S --needed --noconfirm mingw-w64-x86_64-cmake
|
||||
pacman -S --needed --noconfirm mingw-w64-x86_64-gcc
|
||||
pacman -S --needed --noconfirm mingw-w64-x86_64-pkg-config
|
||||
pacman -S --needed --noconfirm msys2-w32api-runtime
|
||||
|
||||
IF "%1"=="all" GOTO Build
|
||||
GOTO End
|
||||
|
||||
:Build
|
||||
SET "PATH=C:\MSYS2\mingw64\bin;C:\MSYS2\usr\bin;%PATH%"
|
||||
make libgit2
|
||||
GOTO End
|
||||
|
||||
:All
|
||||
GOTO Install
|
||||
|
||||
:Error
|
||||
echo "Error: Unknown option"
|
||||
GOTO End
|
||||
|
||||
:End
|
||||
72
build.py
72
build.py
@@ -5,79 +5,65 @@ import platform
|
||||
import subprocess
|
||||
|
||||
BASE_GETTER_CONST = "github.com/armosec/kubescape/v2/core/cautils/getter"
|
||||
BE_SERVER_CONST = BASE_GETTER_CONST + ".ArmoBEURL"
|
||||
ER_SERVER_CONST = BASE_GETTER_CONST + ".ArmoERURL"
|
||||
WEBSITE_CONST = BASE_GETTER_CONST + ".ArmoFEURL"
|
||||
AUTH_SERVER_CONST = BASE_GETTER_CONST + ".armoAUTHURL"
|
||||
|
||||
def checkStatus(status, msg):
|
||||
def check_status(status, msg):
|
||||
if status != 0:
|
||||
sys.stderr.write(msg)
|
||||
exit(status)
|
||||
|
||||
|
||||
def getBuildDir():
|
||||
currentPlatform = platform.system()
|
||||
buildDir = "./build/"
|
||||
def get_build_dir():
|
||||
current_platform = platform.system()
|
||||
build_dir = "./build/"
|
||||
|
||||
if currentPlatform == "Windows": buildDir += "windows-latest"
|
||||
elif currentPlatform == "Linux": buildDir += "ubuntu-latest"
|
||||
elif currentPlatform == "Darwin": buildDir += "macos-latest"
|
||||
else: raise OSError("Platform %s is not supported!" % (currentPlatform))
|
||||
if current_platform == "Windows": build_dir += "windows-latest"
|
||||
elif current_platform == "Linux": build_dir += "ubuntu-latest"
|
||||
elif current_platform == "Darwin": build_dir += "macos-latest"
|
||||
else: raise OSError("Platform %s is not supported!" % (current_platform))
|
||||
|
||||
return buildDir
|
||||
return build_dir
|
||||
|
||||
def getPackageName():
|
||||
packageName = "kubescape"
|
||||
# if platform.system() == "Windows": packageName += ".exe"
|
||||
def get_package_name():
|
||||
package_name = "kubescape"
|
||||
# if platform.system() == "Windows": package_name += ".exe"
|
||||
|
||||
return packageName
|
||||
return package_name
|
||||
|
||||
|
||||
def main():
|
||||
print("Building Kubescape")
|
||||
|
||||
# print environment variables
|
||||
# print(os.environ)
|
||||
|
||||
# Set some variables
|
||||
packageName = getPackageName()
|
||||
buildUrl = "github.com/armosec/kubescape/v2/core/cautils.BuildNumber"
|
||||
releaseVersion = os.getenv("RELEASE")
|
||||
ArmoBEServer = os.getenv("ArmoBEServer")
|
||||
ArmoERServer = os.getenv("ArmoERServer")
|
||||
ArmoWebsite = os.getenv("ArmoWebsite")
|
||||
ArmoAuthServer = os.getenv("ArmoAuthServer")
|
||||
package_name = get_package_name()
|
||||
build_url = "github.com/armosec/kubescape/v2/core/cautils.BuildNumber"
|
||||
release_version = os.getenv("RELEASE")
|
||||
|
||||
client_var = "github.com/armosec/kubescape/v2/core/cautils.Client"
|
||||
client_name = os.getenv("CLIENT")
|
||||
|
||||
# Create build directory
|
||||
buildDir = getBuildDir()
|
||||
build_dir = get_build_dir()
|
||||
|
||||
ks_file = os.path.join(buildDir, packageName)
|
||||
ks_file = os.path.join(build_dir, package_name)
|
||||
hash_file = ks_file + ".sha256"
|
||||
|
||||
if not os.path.isdir(buildDir):
|
||||
os.makedirs(buildDir)
|
||||
if not os.path.isdir(build_dir):
|
||||
os.makedirs(build_dir)
|
||||
|
||||
# Build kubescape
|
||||
ldflags = "-w -s"
|
||||
if releaseVersion:
|
||||
ldflags += " -X {}={}".format(buildUrl, releaseVersion)
|
||||
if ArmoBEServer:
|
||||
ldflags += " -X {}={}".format(BE_SERVER_CONST, ArmoBEServer)
|
||||
if ArmoERServer:
|
||||
ldflags += " -X {}={}".format(ER_SERVER_CONST, ArmoERServer)
|
||||
if ArmoWebsite:
|
||||
ldflags += " -X {}={}".format(WEBSITE_CONST, ArmoWebsite)
|
||||
if ArmoAuthServer:
|
||||
ldflags += " -X {}={}".format(AUTH_SERVER_CONST, ArmoAuthServer)
|
||||
if release_version:
|
||||
ldflags += " -X {}={}".format(build_url, release_version)
|
||||
if client_name:
|
||||
ldflags += " -X {}={}".format(client_var, client_name)
|
||||
|
||||
build_command = ["go", "build", "-o", ks_file, "-ldflags" ,ldflags]
|
||||
build_command = ["go", "build", "-tags=static", "-o", ks_file, "-ldflags" ,ldflags]
|
||||
|
||||
print("Building kubescape and saving here: {}".format(ks_file))
|
||||
print("Build command: {}".format(" ".join(build_command)))
|
||||
|
||||
status = subprocess.call(build_command)
|
||||
checkStatus(status, "Failed to build kubescape")
|
||||
check_status(status, "Failed to build kubescape")
|
||||
|
||||
sha256 = hashlib.sha256()
|
||||
with open(ks_file, "rb") as kube:
|
||||
|
||||
@@ -1,23 +1,28 @@
|
||||
FROM golang:1.17-alpine as builder
|
||||
#ENV GOPROXY=https://goproxy.io,direct
|
||||
FROM golang:1.18-alpine as builder
|
||||
|
||||
ARG image_version
|
||||
ARG client
|
||||
|
||||
ENV RELEASE=$image_version
|
||||
ENV CLIENT=$client
|
||||
|
||||
ENV GO111MODULE=
|
||||
|
||||
ENV CGO_ENABLED=0
|
||||
ENV CGO_ENABLED=1
|
||||
|
||||
# Install required python/pip
|
||||
ENV PYTHONUNBUFFERED=1
|
||||
RUN apk add --update --no-cache python3 && ln -sf python3 /usr/bin/python
|
||||
RUN apk add --update --no-cache python3 git openssl-dev musl-dev gcc make cmake pkgconfig && ln -sf python3 /usr/bin/python
|
||||
RUN python3 -m ensurepip
|
||||
RUN pip3 install --no-cache --upgrade pip setuptools
|
||||
|
||||
WORKDIR /work
|
||||
ADD . .
|
||||
|
||||
# install libgit2
|
||||
WORKDIR /work
|
||||
RUN rm -rf git2go && make libgit2
|
||||
|
||||
# build kubescape server
|
||||
WORKDIR /work/httphandler
|
||||
RUN python build.py
|
||||
@@ -31,14 +36,17 @@ RUN /work/build/ubuntu-latest/kubescape download artifacts -o /work/artifacts
|
||||
|
||||
FROM alpine
|
||||
|
||||
RUN addgroup -S ks && adduser -S ks -G ks
|
||||
USER ks
|
||||
WORKDIR /home/ks/
|
||||
RUN addgroup -S armo && adduser -S armo -G armo
|
||||
|
||||
RUN mkdir /home/armo/.kubescape
|
||||
COPY --from=builder /work/artifacts/ /home/armo/.kubescape
|
||||
|
||||
RUN chown -R armo:armo /home/armo/.kubescape
|
||||
|
||||
USER armo
|
||||
WORKDIR /home/armo
|
||||
|
||||
COPY --from=builder /work/httphandler/build/ubuntu-latest/kubescape /usr/bin/ksserver
|
||||
COPY --from=builder /work/build/ubuntu-latest/kubescape /usr/bin/kubescape
|
||||
|
||||
RUN mkdir /home/ks/.kubescape && chmod 777 -R /home/ks/.kubescape
|
||||
COPY --from=builder /work/artifacts/ /home/ks/.kubescape
|
||||
|
||||
ENTRYPOINT ["ksserver"]
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger"
|
||||
"github.com/armosec/kubescape/v2/core/meta"
|
||||
v1 "github.com/armosec/kubescape/v2/core/meta/datastructures/v1"
|
||||
logger "github.com/kubescape/go-logger"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
|
||||
@@ -4,9 +4,9 @@ import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger"
|
||||
"github.com/armosec/kubescape/v2/core/meta"
|
||||
metav1 "github.com/armosec/kubescape/v2/core/meta/datastructures/v1"
|
||||
logger "github.com/kubescape/go-logger"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
|
||||
@@ -3,9 +3,9 @@ package config
|
||||
import (
|
||||
"os"
|
||||
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger"
|
||||
"github.com/armosec/kubescape/v2/core/meta"
|
||||
v1 "github.com/armosec/kubescape/v2/core/meta/datastructures/v1"
|
||||
logger "github.com/kubescape/go-logger"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
|
||||
@@ -24,7 +24,9 @@ func GetDeleteCmd(ks meta.IKubescape) *cobra.Command {
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
},
|
||||
}
|
||||
deleteCmd.PersistentFlags().StringVarP(&deleteInfo.Account, "account", "", "", "Armo portal account ID. Default will load account ID from configMap or config file")
|
||||
deleteCmd.PersistentFlags().StringVarP(&deleteInfo.Credentials.Account, "account", "", "", "Kubescape SaaS account ID. Default will load account ID from cache")
|
||||
deleteCmd.PersistentFlags().StringVarP(&deleteInfo.Credentials.ClientID, "client-id", "", "", "Kubescape SaaS client ID. Default will load client ID from cache, read more - https://hub.armosec.io/docs/authentication")
|
||||
deleteCmd.PersistentFlags().StringVarP(&deleteInfo.Credentials.SecretKey, "secret-key", "", "", "Kubescape SaaS secret key. Default will load secret key from cache, read more - https://hub.armosec.io/docs/authentication")
|
||||
|
||||
deleteCmd.AddCommand(getExceptionsCmd(ks, &deleteInfo))
|
||||
|
||||
|
||||
@@ -4,9 +4,9 @@ import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger"
|
||||
"github.com/armosec/kubescape/v2/core/meta"
|
||||
v1 "github.com/armosec/kubescape/v2/core/meta/datastructures/v1"
|
||||
logger "github.com/kubescape/go-logger"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
@@ -26,7 +26,7 @@ func getExceptionsCmd(ks meta.IKubescape, deleteInfo *v1.Delete) *cobra.Command
|
||||
if len(exceptionsNames) == 0 {
|
||||
logger.L().Fatal("missing exceptions names")
|
||||
}
|
||||
if err := ks.DeleteExceptions(&v1.DeleteExceptions{Account: deleteInfo.Account, Exceptions: exceptionsNames}); err != nil {
|
||||
if err := ks.DeleteExceptions(&v1.DeleteExceptions{Credentials: deleteInfo.Credentials, Exceptions: exceptionsNames}); err != nil {
|
||||
logger.L().Fatal(err.Error())
|
||||
}
|
||||
},
|
||||
|
||||
@@ -6,10 +6,10 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/armosec/kubescape/v2/core/cautils"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger"
|
||||
"github.com/armosec/kubescape/v2/core/core"
|
||||
"github.com/armosec/kubescape/v2/core/meta"
|
||||
v1 "github.com/armosec/kubescape/v2/core/meta/datastructures/v1"
|
||||
logger "github.com/kubescape/go-logger"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
@@ -72,7 +72,10 @@ func GeDownloadCmd(ks meta.IKubescape) *cobra.Command {
|
||||
return nil
|
||||
},
|
||||
}
|
||||
downloadCmd.PersistentFlags().StringVarP(&downloadInfo.Account, "account", "", "", "Armo portal account ID. Default will load account ID from configMap or config file")
|
||||
|
||||
downloadCmd.PersistentFlags().StringVarP(&downloadInfo.Credentials.Account, "account", "", "", "Kubescape SaaS account ID. Default will load account ID from cache")
|
||||
downloadCmd.PersistentFlags().StringVarP(&downloadInfo.Credentials.ClientID, "client-id", "", "", "Kubescape SaaS client ID. Default will load client ID from cache, read more - https://hub.armosec.io/docs/authentication")
|
||||
downloadCmd.PersistentFlags().StringVarP(&downloadInfo.Credentials.SecretKey, "secret-key", "", "", "Kubescape SaaS secret key. Default will load secret key from cache, read more - https://hub.armosec.io/docs/authentication")
|
||||
downloadCmd.Flags().StringVarP(&downloadInfo.Path, "output", "o", "", "Output file. If not specified, will save in `~/.kubescape/<policy name>.json`")
|
||||
|
||||
return downloadCmd
|
||||
|
||||
@@ -5,10 +5,10 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/armosec/kubescape/v2/core/cautils"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger"
|
||||
"github.com/armosec/kubescape/v2/core/core"
|
||||
"github.com/armosec/kubescape/v2/core/meta"
|
||||
v1 "github.com/armosec/kubescape/v2/core/meta/datastructures/v1"
|
||||
logger "github.com/kubescape/go-logger"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
@@ -27,7 +27,7 @@ var (
|
||||
kubescape list controls --id
|
||||
|
||||
Control documentation:
|
||||
https://hub.armo.cloud/docs/controls
|
||||
https://hub.armosec.io/docs/controls
|
||||
`
|
||||
)
|
||||
|
||||
@@ -59,7 +59,9 @@ func GetListCmd(ks meta.IKubescape) *cobra.Command {
|
||||
return nil
|
||||
},
|
||||
}
|
||||
listCmd.PersistentFlags().StringVar(&listPolicies.Account, "account", "", "Armo portal account ID. Default will load account ID from configMap or config file")
|
||||
listCmd.PersistentFlags().StringVarP(&listPolicies.Credentials.Account, "account", "", "", "Kubescape SaaS account ID. Default will load account ID from cache")
|
||||
listCmd.PersistentFlags().StringVarP(&listPolicies.Credentials.ClientID, "client-id", "", "", "Kubescape SaaS client ID. Default will load client ID from cache, read more - https://hub.armosec.io/docs/authentication")
|
||||
listCmd.PersistentFlags().StringVarP(&listPolicies.Credentials.SecretKey, "secret-key", "", "", "Kubescape SaaS secret key. Default will load secret key from cache, read more - https://hub.armosec.io/docs/authentication")
|
||||
listCmd.PersistentFlags().StringVar(&listPolicies.Format, "format", "pretty-print", "output format. supported: 'pretty-printer'/'json'")
|
||||
listCmd.PersistentFlags().BoolVarP(&listPolicies.ListIDs, "id", "", false, "List control ID's instead of controls names")
|
||||
|
||||
|
||||
12
cmd/root.go
12
cmd/root.go
@@ -14,10 +14,10 @@ import (
|
||||
"github.com/armosec/kubescape/v2/cmd/version"
|
||||
"github.com/armosec/kubescape/v2/core/cautils"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/getter"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger/helpers"
|
||||
"github.com/armosec/kubescape/v2/core/core"
|
||||
"github.com/armosec/kubescape/v2/core/meta"
|
||||
logger "github.com/kubescape/go-logger"
|
||||
"github.com/kubescape/go-logger/helpers"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
@@ -47,14 +47,12 @@ func getRootCmd(ks meta.IKubescape) *cobra.Command {
|
||||
|
||||
rootCmd := &cobra.Command{
|
||||
Use: "kubescape",
|
||||
Version: cautils.BuildNumber,
|
||||
Short: "Kubescape is a tool for testing Kubernetes security posture",
|
||||
Long: `Based on NSA \ MITRE ATT&CK® and other frameworks specifications`,
|
||||
Short: "Kubescape is a tool for testing Kubernetes security posture. Docs: https://hub.armosec.io/docs",
|
||||
Example: ksExamples,
|
||||
}
|
||||
|
||||
rootCmd.PersistentFlags().StringVar(&rootInfo.ArmoBEURLsDep, "environment", "", envFlagUsage)
|
||||
rootCmd.PersistentFlags().StringVar(&rootInfo.ArmoBEURLs, "env", "", envFlagUsage)
|
||||
rootCmd.PersistentFlags().StringVar(&rootInfo.KSCloudBEURLsDep, "environment", "", envFlagUsage)
|
||||
rootCmd.PersistentFlags().StringVar(&rootInfo.KSCloudBEURLs, "env", "", envFlagUsage)
|
||||
rootCmd.PersistentFlags().MarkDeprecated("environment", "use 'env' instead")
|
||||
rootCmd.PersistentFlags().MarkHidden("environment")
|
||||
rootCmd.PersistentFlags().MarkHidden("env")
|
||||
|
||||
@@ -6,8 +6,8 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/armosec/kubescape/v2/core/cautils/getter"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger/helpers"
|
||||
logger "github.com/kubescape/go-logger"
|
||||
"github.com/kubescape/go-logger/helpers"
|
||||
|
||||
"github.com/mattn/go-isatty"
|
||||
)
|
||||
@@ -55,10 +55,10 @@ func initCacheDir() {
|
||||
logger.L().Debug("cache dir updated", helpers.String("path", getter.DefaultLocalStore))
|
||||
}
|
||||
func initEnvironment() {
|
||||
if rootInfo.ArmoBEURLs == "" {
|
||||
rootInfo.ArmoBEURLs = rootInfo.ArmoBEURLsDep
|
||||
if rootInfo.KSCloudBEURLs == "" {
|
||||
rootInfo.KSCloudBEURLs = rootInfo.KSCloudBEURLsDep
|
||||
}
|
||||
urlSlices := strings.Split(rootInfo.ArmoBEURLs, ",")
|
||||
urlSlices := strings.Split(rootInfo.KSCloudBEURLs, ",")
|
||||
if len(urlSlices) != 1 && len(urlSlices) < 3 {
|
||||
logger.L().Fatal("expected at least 3 URLs (report, api, frontend, auth)")
|
||||
}
|
||||
@@ -66,24 +66,24 @@ func initEnvironment() {
|
||||
case 1:
|
||||
switch urlSlices[0] {
|
||||
case "dev", "development":
|
||||
getter.SetARMOAPIConnector(getter.NewARMOAPIDev())
|
||||
getter.SetKSCloudAPIConnector(getter.NewKSCloudAPIDev())
|
||||
case "stage", "staging":
|
||||
getter.SetARMOAPIConnector(getter.NewARMOAPIStaging())
|
||||
getter.SetKSCloudAPIConnector(getter.NewKSCloudAPIStaging())
|
||||
case "":
|
||||
getter.SetARMOAPIConnector(getter.NewARMOAPIProd())
|
||||
getter.SetKSCloudAPIConnector(getter.NewKSCloudAPIProd())
|
||||
default:
|
||||
logger.L().Fatal("--environment flag usage: " + envFlagUsage)
|
||||
}
|
||||
case 2:
|
||||
logger.L().Fatal("--environment flag usage: " + envFlagUsage)
|
||||
case 3, 4:
|
||||
var armoAUTHURL string
|
||||
armoERURL := urlSlices[0] // mandatory
|
||||
armoBEURL := urlSlices[1] // mandatory
|
||||
armoFEURL := urlSlices[2] // mandatory
|
||||
var ksAuthURL string
|
||||
ksEventReceiverURL := urlSlices[0] // mandatory
|
||||
ksBackendURL := urlSlices[1] // mandatory
|
||||
ksFrontendURL := urlSlices[2] // mandatory
|
||||
if len(urlSlices) >= 4 {
|
||||
armoAUTHURL = urlSlices[3]
|
||||
ksAuthURL = urlSlices[3]
|
||||
}
|
||||
getter.SetARMOAPIConnector(getter.NewARMOAPICustomized(armoERURL, armoBEURL, armoFEURL, armoAUTHURL))
|
||||
getter.SetKSCloudAPIConnector(getter.NewKSCloudAPICustomized(ksEventReceiverURL, ksBackendURL, ksFrontendURL, ksAuthURL))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -6,11 +6,13 @@ import (
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
apisv1 "github.com/kubescape/opa-utils/httpserver/apis/v1"
|
||||
|
||||
"github.com/armosec/kubescape/v2/core/cautils"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger/helpers"
|
||||
"github.com/armosec/kubescape/v2/core/meta"
|
||||
"github.com/armosec/opa-utils/reporthandling"
|
||||
logger "github.com/kubescape/go-logger"
|
||||
"github.com/kubescape/go-logger/helpers"
|
||||
|
||||
"github.com/enescakir/emoji"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
@@ -29,7 +31,7 @@ var (
|
||||
Run 'kubescape list controls' for the list of supported controls
|
||||
|
||||
Control documentation:
|
||||
https://hub.armo.cloud/docs/controls
|
||||
https://hub.armosec.io/docs/controls
|
||||
`
|
||||
)
|
||||
|
||||
@@ -57,14 +59,14 @@ func getControlCmd(ks meta.IKubescape, scanInfo *cautils.ScanInfo) *cobra.Comman
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
|
||||
// flagValidationControl(scanInfo)
|
||||
scanInfo.PolicyIdentifier = []reporthandling.PolicyIdentifier{}
|
||||
scanInfo.PolicyIdentifier = []cautils.PolicyIdentifier{}
|
||||
|
||||
if len(args) == 0 {
|
||||
scanInfo.ScanAll = true
|
||||
} else { // expected control or list of control sepparated by ","
|
||||
|
||||
// Read controls from input args
|
||||
scanInfo.SetPolicyIdentifiers(strings.Split(args[0], ","), reporthandling.KindControl)
|
||||
scanInfo.SetPolicyIdentifiers(strings.Split(args[0], ","), apisv1.KindControl)
|
||||
|
||||
if len(args) > 1 {
|
||||
if len(args[1:]) == 0 || args[1] != "-" {
|
||||
|
||||
@@ -6,11 +6,13 @@ import (
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
apisv1 "github.com/kubescape/opa-utils/httpserver/apis/v1"
|
||||
|
||||
"github.com/armosec/kubescape/v2/core/cautils"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger/helpers"
|
||||
"github.com/armosec/kubescape/v2/core/meta"
|
||||
"github.com/armosec/opa-utils/reporthandling"
|
||||
logger "github.com/kubescape/go-logger"
|
||||
"github.com/kubescape/go-logger/helpers"
|
||||
|
||||
"github.com/enescakir/emoji"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
@@ -95,7 +97,7 @@ func getFrameworkCmd(ks meta.IKubescape, scanInfo *cautils.ScanInfo) *cobra.Comm
|
||||
}
|
||||
scanInfo.FrameworkScan = true
|
||||
|
||||
scanInfo.SetPolicyIdentifiers(frameworks, reporthandling.KindFramework)
|
||||
scanInfo.SetPolicyIdentifiers(frameworks, apisv1.KindFramework)
|
||||
|
||||
results, err := ks.Scan(scanInfo)
|
||||
if err != nil {
|
||||
|
||||
@@ -1,9 +1,11 @@
|
||||
package scan
|
||||
|
||||
import (
|
||||
"github.com/armosec/k8s-interface/k8sinterface"
|
||||
"fmt"
|
||||
|
||||
"github.com/armosec/kubescape/v2/core/cautils"
|
||||
"github.com/armosec/kubescape/v2/core/meta"
|
||||
"github.com/kubescape/k8s-interface/k8sinterface"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
@@ -61,18 +63,21 @@ func GetScanCommand(ks meta.IKubescape) *cobra.Command {
|
||||
},
|
||||
}
|
||||
|
||||
scanCmd.PersistentFlags().StringVarP(&scanInfo.Account, "account", "", "", "ARMO portal account ID. Default will load account ID from configMap or config file")
|
||||
scanCmd.PersistentFlags().StringVarP(&scanInfo.Credentials.Account, "account", "", "", "Kubescape SaaS account ID. Default will load account ID from cache")
|
||||
scanCmd.PersistentFlags().StringVarP(&scanInfo.Credentials.ClientID, "client-id", "", "", "Kubescape SaaS client ID. Default will load client ID from cache, read more - https://hub.armosec.io/docs/authentication")
|
||||
scanCmd.PersistentFlags().StringVarP(&scanInfo.Credentials.SecretKey, "secret-key", "", "", "Kubescape SaaS secret key. Default will load secret key from cache, read more - https://hub.armosec.io/docs/authentication")
|
||||
scanCmd.PersistentFlags().StringVarP(&scanInfo.KubeContext, "kube-context", "", "", "Kube context. Default will use the current-context")
|
||||
scanCmd.PersistentFlags().StringVar(&scanInfo.ControlsInputs, "controls-config", "", "Path to an controls-config obj. If not set will download controls-config from ARMO management portal")
|
||||
scanCmd.PersistentFlags().StringVar(&scanInfo.UseExceptions, "exceptions", "", "Path to an exceptions obj. If not set will download exceptions from ARMO management portal")
|
||||
scanCmd.PersistentFlags().StringVar(&scanInfo.UseArtifactsFrom, "use-artifacts-from", "", "Load artifacts from local directory. If not used will download them")
|
||||
scanCmd.PersistentFlags().StringVarP(&scanInfo.ExcludedNamespaces, "exclude-namespaces", "e", "", "Namespaces to exclude from scanning. Recommended: kube-system,kube-public")
|
||||
scanCmd.PersistentFlags().Float32VarP(&scanInfo.FailThreshold, "fail-threshold", "t", 100, "Failure threshold is the percent above which the command fails and returns exit code 1")
|
||||
scanCmd.PersistentFlags().StringVarP(&scanInfo.Format, "format", "f", "pretty-printer", `Output format. Supported formats: "pretty-printer","json","junit","prometheus","pdf"`)
|
||||
scanCmd.PersistentFlags().StringVarP(&scanInfo.Format, "format", "f", "pretty-printer", `Output format. Supported formats: "pretty-printer", "json", "junit", "prometheus", "pdf", "html"`)
|
||||
scanCmd.PersistentFlags().StringVar(&scanInfo.IncludeNamespaces, "include-namespaces", "", "scan specific namespaces. e.g: --include-namespaces ns-a,ns-b")
|
||||
scanCmd.PersistentFlags().BoolVarP(&scanInfo.Local, "keep-local", "", false, "If you do not want your Kubescape results reported to ARMO backend. Use this flag if you ran with the '--submit' flag in the past and you do not want to submit your current scan results")
|
||||
scanCmd.PersistentFlags().StringVarP(&scanInfo.Output, "output", "o", "", "Output file. Print output to file and not stdout")
|
||||
scanCmd.PersistentFlags().BoolVarP(&scanInfo.VerboseMode, "verbose", "v", false, "Display all of the input resources and not only failed resources")
|
||||
scanCmd.PersistentFlags().StringVar(&scanInfo.View, "view", string(cautils.ResourceViewType), fmt.Sprintf("View results based on the %s/%s. default is --view=%s", cautils.ResourceViewType, cautils.ControlViewType, cautils.ResourceViewType))
|
||||
scanCmd.PersistentFlags().BoolVar(&scanInfo.UseDefault, "use-default", false, "Load local policy object from default path. If not used will download latest")
|
||||
scanCmd.PersistentFlags().StringSliceVar(&scanInfo.UseFrom, "use-from", nil, "Load local policy object from specified path. If not used will download latest")
|
||||
scanCmd.PersistentFlags().BoolVarP(&scanInfo.Submit, "submit", "", false, "Send the scan results to ARMO management portal where you can see the results in a user-friendly UI, choose your preferred compliance framework, check risk results history and trends, manage exceptions, get remediation recommendations and much more. By default the results are not submitted")
|
||||
@@ -88,7 +93,7 @@ func GetScanCommand(ks meta.IKubescape) *cobra.Command {
|
||||
scanCmd.PersistentFlags().MarkHidden("silent") // this flag should be deprecated since we added the --logger support
|
||||
// scanCmd.PersistentFlags().MarkHidden("format-version") // meant for testing different output approaches and not for common use
|
||||
|
||||
hostF := scanCmd.PersistentFlags().VarPF(&scanInfo.HostSensorEnabled, "enable-host-scan", "", "Deploy ARMO K8s host-sensor daemonset in the scanned cluster. Deleting it right after we collecting the data. Required to collect valuable data from cluster nodes for certain controls. Yaml file: https://raw.githubusercontent.com/armosec/kubescape/master/hostsensorutils/hostsensor.yaml")
|
||||
hostF := scanCmd.PersistentFlags().VarPF(&scanInfo.HostSensorEnabled, "enable-host-scan", "", "Deploy ARMO K8s host-sensor daemonset in the scanned cluster. Deleting it right after we collecting the data. Required to collect valuable data from cluster nodes for certain controls. Yaml file: https://github.com/armosec/kubescape/blob/master/core/pkg/hostsensorutils/hostsensor.yaml")
|
||||
hostF.NoOptDefVal = "true"
|
||||
hostF.DefValue = "false, for no TTY in stdin"
|
||||
|
||||
|
||||
@@ -3,16 +3,16 @@ package submit
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger"
|
||||
"github.com/armosec/kubescape/v2/core/meta"
|
||||
metav1 "github.com/armosec/kubescape/v2/core/meta/datastructures/v1"
|
||||
logger "github.com/kubescape/go-logger"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
func getExceptionsCmd(ks meta.IKubescape, submitInfo *metav1.Submit) *cobra.Command {
|
||||
return &cobra.Command{
|
||||
Use: "exceptions <full path to exceptins file>",
|
||||
Use: "exceptions <full path to exceptions file>",
|
||||
Short: "Submit exceptions to the Kubescape SaaS version",
|
||||
Args: func(cmd *cobra.Command, args []string) error {
|
||||
if len(args) != 1 {
|
||||
@@ -21,7 +21,7 @@ func getExceptionsCmd(ks meta.IKubescape, submitInfo *metav1.Submit) *cobra.Comm
|
||||
return nil
|
||||
},
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
if err := ks.SubmitExceptions(submitInfo.Account, args[0]); err != nil {
|
||||
if err := ks.SubmitExceptions(&submitInfo.Credentials, args[0]); err != nil {
|
||||
logger.L().Fatal(err.Error())
|
||||
}
|
||||
},
|
||||
|
||||
@@ -1,42 +1,59 @@
|
||||
package submit
|
||||
|
||||
import (
|
||||
"github.com/armosec/k8s-interface/k8sinterface"
|
||||
"fmt"
|
||||
|
||||
"github.com/armosec/kubescape/v2/core/cautils"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/getter"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger/helpers"
|
||||
"github.com/armosec/kubescape/v2/core/meta"
|
||||
"github.com/armosec/kubescape/v2/core/meta/cliinterfaces"
|
||||
v1 "github.com/armosec/kubescape/v2/core/meta/datastructures/v1"
|
||||
reporterv2 "github.com/armosec/kubescape/v2/core/pkg/resultshandling/reporter/v2"
|
||||
"github.com/google/uuid"
|
||||
logger "github.com/kubescape/go-logger"
|
||||
"github.com/kubescape/go-logger/helpers"
|
||||
"github.com/kubescape/k8s-interface/k8sinterface"
|
||||
|
||||
reporterv1 "github.com/armosec/kubescape/v2/core/pkg/resultshandling/reporter/v1"
|
||||
|
||||
"github.com/armosec/rbac-utils/rbacscanner"
|
||||
"github.com/kubescape/rbac-utils/rbacscanner"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var (
|
||||
rbacExamples = `
|
||||
# Submit cluster's Role-Based Access Control(RBAC)
|
||||
kubescape submit rbac
|
||||
|
||||
# Submit cluster's Role-Based Access Control(RBAC) with account ID
|
||||
kubescape submit rbac --account <account-id>
|
||||
`
|
||||
)
|
||||
|
||||
// getRBACCmd represents the RBAC command
|
||||
func getRBACCmd(ks meta.IKubescape, submitInfo *v1.Submit) *cobra.Command {
|
||||
return &cobra.Command{
|
||||
Use: "rbac \nExample:\n$ kubescape submit rbac",
|
||||
Short: "Submit cluster's Role-Based Access Control(RBAC)",
|
||||
Long: ``,
|
||||
Use: "rbac",
|
||||
Example: rbacExamples,
|
||||
Short: "Submit cluster's Role-Based Access Control(RBAC)",
|
||||
Long: ``,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
|
||||
k8s := k8sinterface.NewKubernetesApi()
|
||||
|
||||
// get config
|
||||
clusterConfig := getTenantConfig(submitInfo.Account, "", k8s)
|
||||
clusterConfig := getTenantConfig(&submitInfo.Credentials, "", k8s)
|
||||
if err := clusterConfig.SetTenant(); err != nil {
|
||||
logger.L().Error("failed setting account ID", helpers.Error(err))
|
||||
}
|
||||
|
||||
if clusterConfig.GetAccountID() == "" {
|
||||
return fmt.Errorf("account ID is not set, run 'kubescape submit rbac --account <account-id>'")
|
||||
}
|
||||
|
||||
// list RBAC
|
||||
rbacObjects := cautils.NewRBACObjects(rbacscanner.NewRbacScannerFromK8sAPI(k8s, clusterConfig.GetAccountID(), clusterConfig.GetContextName()))
|
||||
|
||||
// submit resources
|
||||
r := reporterv1.NewReportEventReceiver(clusterConfig.GetConfigObj())
|
||||
r := reporterv2.NewReportEventReceiver(clusterConfig.GetConfigObj(), uuid.NewString(), reporterv2.SubmitContextRBAC)
|
||||
|
||||
submitInterfaces := cliinterfaces.SubmitInterfaces{
|
||||
ClusterConfig: clusterConfig,
|
||||
@@ -60,9 +77,9 @@ func getKubernetesApi() *k8sinterface.KubernetesApi {
|
||||
}
|
||||
return k8sinterface.NewKubernetesApi()
|
||||
}
|
||||
func getTenantConfig(Account, clusterName string, k8s *k8sinterface.KubernetesApi) cautils.ITenantConfig {
|
||||
func getTenantConfig(credentials *cautils.Credentials, clusterName string, k8s *k8sinterface.KubernetesApi) cautils.ITenantConfig {
|
||||
if !k8sinterface.IsConnectedToCluster() || k8s == nil {
|
||||
return cautils.NewLocalConfig(getter.GetArmoAPIConnector(), Account, clusterName)
|
||||
return cautils.NewLocalConfig(getter.GetKSCloudAPIConnector(), credentials, clusterName)
|
||||
}
|
||||
return cautils.NewClusterConfig(k8s, getter.GetArmoAPIConnector(), Account, clusterName)
|
||||
return cautils.NewClusterConfig(k8s, getter.GetKSCloudAPIConnector(), credentials, clusterName)
|
||||
}
|
||||
|
||||
@@ -4,20 +4,18 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/armosec/k8s-interface/workloadinterface"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger/helpers"
|
||||
"github.com/google/uuid"
|
||||
reporthandlingv2 "github.com/kubescape/opa-utils/reporthandling/v2"
|
||||
|
||||
"github.com/armosec/kubescape/v2/core/meta"
|
||||
"github.com/armosec/kubescape/v2/core/meta/cliinterfaces"
|
||||
v1 "github.com/armosec/kubescape/v2/core/meta/datastructures/v1"
|
||||
"github.com/armosec/kubescape/v2/core/pkg/resultshandling/reporter"
|
||||
reporterv1 "github.com/armosec/kubescape/v2/core/pkg/resultshandling/reporter/v1"
|
||||
reporterv2 "github.com/armosec/kubescape/v2/core/pkg/resultshandling/reporter/v2"
|
||||
logger "github.com/kubescape/go-logger"
|
||||
"github.com/kubescape/go-logger/helpers"
|
||||
"github.com/kubescape/k8s-interface/workloadinterface"
|
||||
|
||||
"github.com/armosec/opa-utils/reporthandling"
|
||||
"github.com/google/uuid"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
@@ -37,19 +35,13 @@ func NewResultsObject(customerGUID, clusterName, filePath string) *ResultsObject
|
||||
}
|
||||
}
|
||||
|
||||
func (resultsObject *ResultsObject) SetResourcesReport() (*reporthandling.PostureReport, error) {
|
||||
func (resultsObject *ResultsObject) SetResourcesReport() (*reporthandlingv2.PostureReport, error) {
|
||||
// load framework results from json file
|
||||
frameworkReports, err := loadResultsFromFile(resultsObject.filePath)
|
||||
report, err := loadResultsFromFile(resultsObject.filePath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &reporthandling.PostureReport{
|
||||
FrameworkReports: frameworkReports,
|
||||
ReportID: uuid.NewString(),
|
||||
ReportGenerationTime: time.Now().UTC(),
|
||||
CustomerGUID: resultsObject.customerGUID,
|
||||
ClusterName: resultsObject.clusterName,
|
||||
}, nil
|
||||
return report, nil
|
||||
}
|
||||
|
||||
func (resultsObject *ResultsObject) ListAllResources() (map[string]workloadinterface.IMetadata, error) {
|
||||
@@ -69,22 +61,14 @@ func getResultsCmd(ks meta.IKubescape, submitInfo *v1.Submit) *cobra.Command {
|
||||
k8s := getKubernetesApi()
|
||||
|
||||
// get config
|
||||
clusterConfig := getTenantConfig(submitInfo.Account, "", k8s)
|
||||
clusterConfig := getTenantConfig(&submitInfo.Credentials, "", k8s)
|
||||
if err := clusterConfig.SetTenant(); err != nil {
|
||||
logger.L().Error("failed setting account ID", helpers.Error(err))
|
||||
}
|
||||
|
||||
resultsObjects := NewResultsObject(clusterConfig.GetAccountID(), clusterConfig.GetContextName(), args[0])
|
||||
|
||||
// submit resources
|
||||
var r reporter.IReport
|
||||
switch formatVersion {
|
||||
case "v2":
|
||||
r = reporterv2.NewReportEventReceiver(clusterConfig.GetConfigObj(), "")
|
||||
default:
|
||||
logger.L().Warning("Deprecated results version. run with '--format-version' flag", helpers.String("your version", formatVersion), helpers.String("latest version", "v2"))
|
||||
r = reporterv1.NewReportEventReceiver(clusterConfig.GetConfigObj())
|
||||
}
|
||||
r := reporterv2.NewReportEventReceiver(clusterConfig.GetConfigObj(), uuid.NewString(), reporterv2.SubmitContextScan)
|
||||
|
||||
submitInterfaces := cliinterfaces.SubmitInterfaces{
|
||||
ClusterConfig: clusterConfig,
|
||||
@@ -102,18 +86,14 @@ func getResultsCmd(ks meta.IKubescape, submitInfo *v1.Submit) *cobra.Command {
|
||||
|
||||
return resultsCmd
|
||||
}
|
||||
func loadResultsFromFile(filePath string) ([]reporthandling.FrameworkReport, error) {
|
||||
frameworkReports := []reporthandling.FrameworkReport{}
|
||||
func loadResultsFromFile(filePath string) (*reporthandlingv2.PostureReport, error) {
|
||||
report := &reporthandlingv2.PostureReport{}
|
||||
f, err := os.ReadFile(filePath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err = json.Unmarshal(f, &frameworkReports); err != nil {
|
||||
frameworkReport := reporthandling.FrameworkReport{}
|
||||
if err = json.Unmarshal(f, &frameworkReport); err != nil {
|
||||
return frameworkReports, err
|
||||
}
|
||||
frameworkReports = append(frameworkReports, frameworkReport)
|
||||
if err = json.Unmarshal(f, report); err != nil {
|
||||
return report, fmt.Errorf("failed to unmarshal results file: %s, make sure you run kubescape with '--format=json --format-version=v2'", err.Error())
|
||||
}
|
||||
return frameworkReports, nil
|
||||
return report, nil
|
||||
}
|
||||
|
||||
@@ -20,7 +20,9 @@ func GetSubmitCmd(ks meta.IKubescape) *cobra.Command {
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
},
|
||||
}
|
||||
submitCmd.PersistentFlags().StringVarP(&submitInfo.Account, "account", "", "", "Armo portal account ID. Default will load account ID from configMap or config file")
|
||||
submitCmd.PersistentFlags().StringVarP(&submitInfo.Credentials.Account, "account", "", "", "Kubescape SaaS account ID. Default will load account ID from cache")
|
||||
submitCmd.PersistentFlags().StringVarP(&submitInfo.Credentials.ClientID, "client-id", "", "", "Kubescape SaaS client ID. Default will load client ID from cache, read more - https://hub.armosec.io/docs/authentication")
|
||||
submitCmd.PersistentFlags().StringVarP(&submitInfo.Credentials.SecretKey, "secret-key", "", "", "Kubescape SaaS secret key. Default will load secret key from cache, read more - https://hub.armosec.io/docs/authentication")
|
||||
|
||||
submitCmd.AddCommand(getExceptionsCmd(ks, &submitInfo))
|
||||
submitCmd.AddCommand(getResultsCmd(ks, &submitInfo))
|
||||
|
||||
@@ -9,9 +9,9 @@ import (
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
"github.com/armosec/k8s-interface/k8sinterface"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/getter"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger"
|
||||
logger "github.com/kubescape/go-logger"
|
||||
"github.com/kubescape/k8s-interface/k8sinterface"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
@@ -69,7 +69,10 @@ type ITenantConfig interface {
|
||||
// getters
|
||||
GetContextName() string
|
||||
GetAccountID() string
|
||||
GetTennatEmail() string
|
||||
GetTenantEmail() string
|
||||
GetToken() string
|
||||
GetClientID() string
|
||||
GetSecretKey() string
|
||||
GetConfigObj() *ConfigObj
|
||||
// GetBackendAPI() getter.IBackend
|
||||
// GenerateURL()
|
||||
@@ -87,8 +90,7 @@ type LocalConfig struct {
|
||||
}
|
||||
|
||||
func NewLocalConfig(
|
||||
backendAPI getter.IBackend, customerGUID, clusterName string) *LocalConfig {
|
||||
var configObj *ConfigObj
|
||||
backendAPI getter.IBackend, credentials *Credentials, clusterName string) *LocalConfig {
|
||||
|
||||
lc := &LocalConfig{
|
||||
backendAPI: backendAPI,
|
||||
@@ -96,20 +98,14 @@ func NewLocalConfig(
|
||||
}
|
||||
// get from configMap
|
||||
if existsConfigFile() { // get from file
|
||||
configObj, _ = loadConfigFromFile()
|
||||
} else {
|
||||
configObj = &ConfigObj{}
|
||||
}
|
||||
if configObj != nil {
|
||||
lc.configObj = configObj
|
||||
}
|
||||
if customerGUID != "" {
|
||||
lc.configObj.AccountID = customerGUID // override config customerGUID
|
||||
loadConfigFromFile(lc.configObj)
|
||||
}
|
||||
|
||||
updateCredentials(lc.configObj, credentials)
|
||||
|
||||
if clusterName != "" {
|
||||
lc.configObj.ClusterName = AdoptClusterName(clusterName) // override config clusterName
|
||||
}
|
||||
getAccountFromEnv(lc.configObj)
|
||||
|
||||
lc.backendAPI.SetAccountID(lc.configObj.AccountID)
|
||||
lc.backendAPI.SetClientID(lc.configObj.ClientID)
|
||||
@@ -119,13 +115,16 @@ func NewLocalConfig(
|
||||
}
|
||||
|
||||
func (lc *LocalConfig) GetConfigObj() *ConfigObj { return lc.configObj }
|
||||
func (lc *LocalConfig) GetTennatEmail() string { return lc.configObj.CustomerAdminEMail }
|
||||
func (lc *LocalConfig) GetTenantEmail() string { return lc.configObj.CustomerAdminEMail }
|
||||
func (lc *LocalConfig) GetAccountID() string { return lc.configObj.AccountID }
|
||||
func (lc *LocalConfig) GetClientID() string { return lc.configObj.ClientID }
|
||||
func (lc *LocalConfig) GetSecretKey() string { return lc.configObj.SecretKey }
|
||||
func (lc *LocalConfig) GetContextName() string { return lc.configObj.ClusterName }
|
||||
func (lc *LocalConfig) GetToken() string { return lc.configObj.Token }
|
||||
func (lc *LocalConfig) IsConfigFound() bool { return existsConfigFile() }
|
||||
func (lc *LocalConfig) SetTenant() error {
|
||||
|
||||
// ARMO tenant GUID
|
||||
// Kubescape Cloud tenant GUID
|
||||
if err := getTenantConfigFromBE(lc.backendAPI, lc.configObj); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -146,7 +145,7 @@ func (lc *LocalConfig) DeleteCachedConfig() error {
|
||||
|
||||
func getTenantConfigFromBE(backendAPI getter.IBackend, configObj *ConfigObj) error {
|
||||
|
||||
// get from armoBE
|
||||
// get from Kubescape Cloud API
|
||||
tenantResponse, err := backendAPI.GetTenant()
|
||||
if err == nil && tenantResponse != nil {
|
||||
if tenantResponse.AdminMail != "" { // registered tenant
|
||||
@@ -190,8 +189,8 @@ type ClusterConfig struct {
|
||||
configObj *ConfigObj
|
||||
}
|
||||
|
||||
func NewClusterConfig(k8s *k8sinterface.KubernetesApi, backendAPI getter.IBackend, customerGUID, clusterName string) *ClusterConfig {
|
||||
var configObj *ConfigObj
|
||||
func NewClusterConfig(k8s *k8sinterface.KubernetesApi, backendAPI getter.IBackend, credentials *Credentials, clusterName string) *ClusterConfig {
|
||||
// var configObj *ConfigObj
|
||||
c := &ClusterConfig{
|
||||
k8s: k8s,
|
||||
backendAPI: backendAPI,
|
||||
@@ -200,23 +199,20 @@ func NewClusterConfig(k8s *k8sinterface.KubernetesApi, backendAPI getter.IBacken
|
||||
configMapNamespace: getConfigMapNamespace(),
|
||||
}
|
||||
|
||||
// get from configMap
|
||||
// first, load from configMap
|
||||
if c.existsConfigMap() {
|
||||
configObj, _ = c.loadConfigFromConfigMap()
|
||||
c.loadConfigFromConfigMap()
|
||||
}
|
||||
if configObj == nil && existsConfigFile() { // get from file
|
||||
configObj, _ = loadConfigFromFile()
|
||||
}
|
||||
if configObj != nil {
|
||||
c.configObj = configObj
|
||||
}
|
||||
if customerGUID != "" {
|
||||
c.configObj.AccountID = customerGUID // override config customerGUID
|
||||
|
||||
// second, load from file
|
||||
if existsConfigFile() { // get from file
|
||||
loadConfigFromFile(c.configObj)
|
||||
}
|
||||
updateCredentials(c.configObj, credentials)
|
||||
|
||||
if clusterName != "" {
|
||||
c.configObj.ClusterName = AdoptClusterName(clusterName) // override config clusterName
|
||||
}
|
||||
getAccountFromEnv(c.configObj)
|
||||
|
||||
if c.configObj.ClusterName == "" {
|
||||
c.configObj.ClusterName = AdoptClusterName(k8sinterface.GetContextName())
|
||||
@@ -234,7 +230,10 @@ func NewClusterConfig(k8s *k8sinterface.KubernetesApi, backendAPI getter.IBacken
|
||||
func (c *ClusterConfig) GetConfigObj() *ConfigObj { return c.configObj }
|
||||
func (c *ClusterConfig) GetDefaultNS() string { return c.configMapNamespace }
|
||||
func (c *ClusterConfig) GetAccountID() string { return c.configObj.AccountID }
|
||||
func (c *ClusterConfig) GetTennatEmail() string { return c.configObj.CustomerAdminEMail }
|
||||
func (c *ClusterConfig) GetClientID() string { return c.configObj.ClientID }
|
||||
func (c *ClusterConfig) GetSecretKey() string { return c.configObj.SecretKey }
|
||||
func (c *ClusterConfig) GetTenantEmail() string { return c.configObj.CustomerAdminEMail }
|
||||
func (c *ClusterConfig) GetToken() string { return c.configObj.Token }
|
||||
func (c *ClusterConfig) IsConfigFound() bool { return existsConfigFile() || c.existsConfigMap() }
|
||||
|
||||
func (c *ClusterConfig) SetTenant() error {
|
||||
@@ -282,18 +281,26 @@ func (c *ClusterConfig) ToMapString() map[string]interface{} {
|
||||
}
|
||||
return m
|
||||
}
|
||||
func (c *ClusterConfig) loadConfigFromConfigMap() (*ConfigObj, error) {
|
||||
func (c *ClusterConfig) loadConfigFromConfigMap() error {
|
||||
configMap, err := c.k8s.KubernetesClient.CoreV1().ConfigMaps(c.configMapNamespace).Get(context.Background(), c.configMapName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
|
||||
if bData, err := json.Marshal(configMap.Data); err == nil {
|
||||
return readConfig(bData)
|
||||
}
|
||||
return nil, nil
|
||||
return loadConfigFromData(c.configObj, configMap.Data)
|
||||
}
|
||||
|
||||
func loadConfigFromData(co *ConfigObj, data map[string]string) error {
|
||||
var e error
|
||||
if jsonConf, ok := data["config.json"]; ok {
|
||||
e = readConfig([]byte(jsonConf), co)
|
||||
}
|
||||
if bData, err := json.Marshal(data); err == nil {
|
||||
e = readConfig(bData, co)
|
||||
}
|
||||
|
||||
return e
|
||||
}
|
||||
func (c *ClusterConfig) existsConfigMap() bool {
|
||||
_, err := c.k8s.KubernetesClient.CoreV1().ConfigMaps(c.configMapNamespace).Get(context.Background(), c.configMapName, metav1.GetOptions{})
|
||||
// TODO - check if has customerGUID
|
||||
@@ -411,28 +418,27 @@ func (c *ClusterConfig) updateConfigData(configMap *corev1.ConfigMap) {
|
||||
}
|
||||
}
|
||||
}
|
||||
func loadConfigFromFile() (*ConfigObj, error) {
|
||||
func loadConfigFromFile(configObj *ConfigObj) error {
|
||||
dat, err := os.ReadFile(ConfigFileFullPath())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
|
||||
return readConfig(dat)
|
||||
return readConfig(dat, configObj)
|
||||
}
|
||||
func readConfig(dat []byte) (*ConfigObj, error) {
|
||||
func readConfig(dat []byte, configObj *ConfigObj) error {
|
||||
|
||||
if len(dat) == 0 {
|
||||
return nil, nil
|
||||
return nil
|
||||
}
|
||||
configObj := &ConfigObj{}
|
||||
|
||||
if err := json.Unmarshal(dat, configObj); err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
if configObj.AccountID == "" {
|
||||
configObj.AccountID = configObj.CustomerGUID
|
||||
}
|
||||
configObj.CustomerGUID = ""
|
||||
return configObj, nil
|
||||
return nil
|
||||
}
|
||||
|
||||
// Check if the customer is submitted
|
||||
@@ -479,15 +485,34 @@ func getConfigMapNamespace() string {
|
||||
return "default"
|
||||
}
|
||||
|
||||
func getAccountFromEnv(configObj *ConfigObj) {
|
||||
func getAccountFromEnv(credentials *Credentials) {
|
||||
// load from env
|
||||
if accountID := os.Getenv("KS_ACCOUNT_ID"); accountID != "" {
|
||||
configObj.AccountID = accountID
|
||||
if accountID := os.Getenv("KS_ACCOUNT_ID"); credentials.Account == "" && accountID != "" {
|
||||
credentials.Account = accountID
|
||||
}
|
||||
if clientID := os.Getenv("KS_CLIENT_ID"); clientID != "" {
|
||||
configObj.ClientID = clientID
|
||||
if clientID := os.Getenv("KS_CLIENT_ID"); credentials.ClientID == "" && clientID != "" {
|
||||
credentials.ClientID = clientID
|
||||
}
|
||||
if secretKey := os.Getenv("KS_SECRET_KEY"); secretKey != "" {
|
||||
configObj.SecretKey = secretKey
|
||||
if secretKey := os.Getenv("KS_SECRET_KEY"); credentials.SecretKey == "" && secretKey != "" {
|
||||
credentials.SecretKey = secretKey
|
||||
}
|
||||
}
|
||||
|
||||
func updateCredentials(configObj *ConfigObj, credentials *Credentials) {
|
||||
|
||||
if credentials == nil {
|
||||
credentials = &Credentials{}
|
||||
}
|
||||
getAccountFromEnv(credentials)
|
||||
|
||||
if credentials.Account != "" {
|
||||
configObj.AccountID = credentials.Account // override config Account
|
||||
}
|
||||
if credentials.ClientID != "" {
|
||||
configObj.ClientID = credentials.ClientID // override config ClientID
|
||||
}
|
||||
if credentials.SecretKey != "" {
|
||||
configObj.SecretKey = credentials.SecretKey // override config SecretKey
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
193
core/cautils/customerloader_test.go
Normal file
193
core/cautils/customerloader_test.go
Normal file
@@ -0,0 +1,193 @@
|
||||
package cautils
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
func mockConfigObj() *ConfigObj {
|
||||
return &ConfigObj{
|
||||
AccountID: "aaa",
|
||||
ClientID: "bbb",
|
||||
SecretKey: "ccc",
|
||||
ClusterName: "ddd",
|
||||
CustomerAdminEMail: "ab@cd",
|
||||
Token: "eee",
|
||||
}
|
||||
}
|
||||
func mockLocalConfig() *LocalConfig {
|
||||
return &LocalConfig{
|
||||
backendAPI: nil,
|
||||
configObj: mockConfigObj(),
|
||||
}
|
||||
}
|
||||
|
||||
func mockClusterConfig() *ClusterConfig {
|
||||
return &ClusterConfig{
|
||||
backendAPI: nil,
|
||||
configObj: mockConfigObj(),
|
||||
}
|
||||
}
|
||||
func TestConfig(t *testing.T) {
|
||||
co := mockConfigObj()
|
||||
cop := ConfigObj{}
|
||||
|
||||
assert.NoError(t, json.Unmarshal(co.Config(), &cop))
|
||||
assert.Equal(t, co.AccountID, cop.AccountID)
|
||||
assert.Equal(t, co.ClientID, cop.ClientID)
|
||||
assert.Equal(t, co.SecretKey, cop.SecretKey)
|
||||
assert.Equal(t, "", cop.ClusterName) // Not copied to bytes
|
||||
assert.Equal(t, "", cop.CustomerAdminEMail) // Not copied to bytes
|
||||
assert.Equal(t, "", cop.Token) // Not copied to bytes
|
||||
|
||||
}
|
||||
|
||||
func TestITenantConfig(t *testing.T) {
|
||||
var lc ITenantConfig
|
||||
var c ITenantConfig
|
||||
lc = mockLocalConfig()
|
||||
c = mockClusterConfig()
|
||||
|
||||
co := mockConfigObj()
|
||||
|
||||
// test LocalConfig methods
|
||||
assert.Equal(t, co.AccountID, lc.GetAccountID())
|
||||
assert.Equal(t, co.ClientID, lc.GetClientID())
|
||||
assert.Equal(t, co.SecretKey, lc.GetSecretKey())
|
||||
assert.Equal(t, co.ClusterName, lc.GetContextName())
|
||||
assert.Equal(t, co.CustomerAdminEMail, lc.GetTenantEmail())
|
||||
assert.Equal(t, co.Token, lc.GetToken())
|
||||
|
||||
// test ClusterConfig methods
|
||||
assert.Equal(t, co.AccountID, c.GetAccountID())
|
||||
assert.Equal(t, co.ClientID, c.GetClientID())
|
||||
assert.Equal(t, co.SecretKey, c.GetSecretKey())
|
||||
assert.Equal(t, co.ClusterName, c.GetContextName())
|
||||
assert.Equal(t, co.CustomerAdminEMail, c.GetTenantEmail())
|
||||
assert.Equal(t, co.Token, c.GetToken())
|
||||
}
|
||||
|
||||
func TestUpdateConfigData(t *testing.T) {
|
||||
c := mockClusterConfig()
|
||||
|
||||
configMap := &corev1.ConfigMap{}
|
||||
|
||||
c.updateConfigData(configMap)
|
||||
|
||||
assert.Equal(t, c.GetAccountID(), configMap.Data["accountID"])
|
||||
assert.Equal(t, c.GetClientID(), configMap.Data["clientID"])
|
||||
assert.Equal(t, c.GetSecretKey(), configMap.Data["secretKey"])
|
||||
}
|
||||
|
||||
func TestReadConfig(t *testing.T) {
|
||||
com := mockConfigObj()
|
||||
co := &ConfigObj{}
|
||||
|
||||
b, e := json.Marshal(com)
|
||||
assert.NoError(t, e)
|
||||
|
||||
readConfig(b, co)
|
||||
|
||||
assert.Equal(t, com.AccountID, co.AccountID)
|
||||
assert.Equal(t, com.ClientID, co.ClientID)
|
||||
assert.Equal(t, com.SecretKey, co.SecretKey)
|
||||
assert.Equal(t, com.ClusterName, co.ClusterName)
|
||||
assert.Equal(t, com.CustomerAdminEMail, co.CustomerAdminEMail)
|
||||
assert.Equal(t, com.Token, co.Token)
|
||||
}
|
||||
|
||||
func TestLoadConfigFromData(t *testing.T) {
|
||||
|
||||
// use case: all data is in base config
|
||||
{
|
||||
c := mockClusterConfig()
|
||||
co := mockConfigObj()
|
||||
|
||||
configMap := &corev1.ConfigMap{}
|
||||
|
||||
c.updateConfigData(configMap)
|
||||
|
||||
c.configObj = &ConfigObj{}
|
||||
|
||||
loadConfigFromData(c.configObj, configMap.Data)
|
||||
|
||||
assert.Equal(t, c.GetAccountID(), co.AccountID)
|
||||
assert.Equal(t, c.GetClientID(), co.ClientID)
|
||||
assert.Equal(t, c.GetSecretKey(), co.SecretKey)
|
||||
assert.Equal(t, c.GetContextName(), co.ClusterName)
|
||||
assert.Equal(t, c.GetTenantEmail(), co.CustomerAdminEMail)
|
||||
assert.Equal(t, c.GetToken(), co.Token)
|
||||
}
|
||||
|
||||
// use case: all data is in config.json
|
||||
{
|
||||
c := mockClusterConfig()
|
||||
|
||||
co := mockConfigObj()
|
||||
configMap := &corev1.ConfigMap{
|
||||
Data: make(map[string]string),
|
||||
}
|
||||
|
||||
configMap.Data["config.json"] = string(c.GetConfigObj().Config())
|
||||
c.configObj = &ConfigObj{}
|
||||
|
||||
loadConfigFromData(c.configObj, configMap.Data)
|
||||
|
||||
assert.Equal(t, c.GetAccountID(), co.AccountID)
|
||||
assert.Equal(t, c.GetClientID(), co.ClientID)
|
||||
assert.Equal(t, c.GetSecretKey(), co.SecretKey)
|
||||
}
|
||||
|
||||
// use case: some data is in config.json
|
||||
{
|
||||
c := mockClusterConfig()
|
||||
configMap := &corev1.ConfigMap{
|
||||
Data: make(map[string]string),
|
||||
}
|
||||
|
||||
// add to map
|
||||
configMap.Data["clientID"] = c.configObj.ClientID
|
||||
configMap.Data["secretKey"] = c.configObj.SecretKey
|
||||
|
||||
// delete the content
|
||||
c.configObj.ClientID = ""
|
||||
c.configObj.SecretKey = ""
|
||||
|
||||
configMap.Data["config.json"] = string(c.GetConfigObj().Config())
|
||||
loadConfigFromData(c.configObj, configMap.Data)
|
||||
|
||||
assert.NotEmpty(t, c.GetAccountID())
|
||||
assert.NotEmpty(t, c.GetClientID())
|
||||
assert.NotEmpty(t, c.GetSecretKey())
|
||||
}
|
||||
|
||||
// use case: some data is in config.json
|
||||
{
|
||||
c := mockClusterConfig()
|
||||
configMap := &corev1.ConfigMap{
|
||||
Data: make(map[string]string),
|
||||
}
|
||||
|
||||
c.configObj.AccountID = "tttt"
|
||||
|
||||
// add to map
|
||||
configMap.Data["accountID"] = mockConfigObj().AccountID
|
||||
configMap.Data["clientID"] = c.configObj.ClientID
|
||||
configMap.Data["secretKey"] = c.configObj.SecretKey
|
||||
|
||||
// delete the content
|
||||
c.configObj.ClientID = ""
|
||||
c.configObj.SecretKey = ""
|
||||
|
||||
configMap.Data["config.json"] = string(c.GetConfigObj().Config())
|
||||
loadConfigFromData(c.configObj, configMap.Data)
|
||||
|
||||
assert.Equal(t, mockConfigObj().AccountID, c.GetAccountID())
|
||||
assert.NotEmpty(t, c.GetClientID())
|
||||
assert.NotEmpty(t, c.GetSecretKey())
|
||||
}
|
||||
|
||||
}
|
||||
@@ -2,25 +2,24 @@ package cautils
|
||||
|
||||
import (
|
||||
"github.com/armosec/armoapi-go/armotypes"
|
||||
"github.com/armosec/k8s-interface/workloadinterface"
|
||||
"github.com/armosec/opa-utils/reporthandling"
|
||||
apis "github.com/armosec/opa-utils/reporthandling/apis"
|
||||
"github.com/armosec/opa-utils/reporthandling/results/v1/resourcesresults"
|
||||
reporthandlingv2 "github.com/armosec/opa-utils/reporthandling/v2"
|
||||
"github.com/kubescape/k8s-interface/workloadinterface"
|
||||
"github.com/kubescape/opa-utils/reporthandling"
|
||||
apis "github.com/kubescape/opa-utils/reporthandling/apis"
|
||||
"github.com/kubescape/opa-utils/reporthandling/results/v1/resourcesresults"
|
||||
reporthandlingv2 "github.com/kubescape/opa-utils/reporthandling/v2"
|
||||
)
|
||||
|
||||
// K8SResources map[<api group>/<api version>/<resource>][]<resourceID>
|
||||
type K8SResources map[string][]string
|
||||
type ArmoResources map[string][]string
|
||||
type KSResources map[string][]string
|
||||
|
||||
type OPASessionObj struct {
|
||||
K8SResources *K8SResources // input k8s objects
|
||||
ArmoResource *ArmoResources // input ARMO objects
|
||||
ArmoResource *KSResources // input ARMO objects
|
||||
Policies []reporthandling.Framework // list of frameworks to scan
|
||||
AllResources map[string]workloadinterface.IMetadata // all scanned resources, map[<rtesource ID>]<resource>
|
||||
ResourcesResult map[string]resourcesresults.Result // resources scan results, map[<rtesource ID>]<resource result>
|
||||
ResourceSource map[string]string // resources sources, map[<rtesource ID>]<resource result>
|
||||
PostureReport *reporthandling.PostureReport // scan results v1 - Remove
|
||||
ResourceSource map[string]reporthandling.Source // resources sources, map[<rtesource ID>]<resource result>
|
||||
Report *reporthandlingv2.PostureReport // scan results v2 - Remove
|
||||
Exceptions []armotypes.PostureExceptionPolicy // list of exceptions to apply on scan results
|
||||
RegoInputData RegoInputData // input passed to rgo for scanning. map[<control name>][<input arguments>]
|
||||
@@ -39,13 +38,9 @@ func NewOPASessionObj(frameworks []reporthandling.Framework, k8sResources *K8SRe
|
||||
ResourcesResult: make(map[string]resourcesresults.Result),
|
||||
InfoMap: make(map[string]apis.StatusInfo),
|
||||
ResourceToControlsMap: make(map[string][]string),
|
||||
ResourceSource: make(map[string]string),
|
||||
ResourceSource: make(map[string]reporthandling.Source),
|
||||
SessionID: scanInfo.ScanID,
|
||||
PostureReport: &reporthandling.PostureReport{
|
||||
ClusterName: ClusterName,
|
||||
CustomerGUID: CustomerGUID,
|
||||
},
|
||||
Metadata: scanInfoToScanMetadata(scanInfo),
|
||||
Metadata: scanInfoToScanMetadata(scanInfo),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -56,11 +51,10 @@ func NewOPASessionObjMock() *OPASessionObj {
|
||||
AllResources: make(map[string]workloadinterface.IMetadata),
|
||||
ResourcesResult: make(map[string]resourcesresults.Result),
|
||||
Report: &reporthandlingv2.PostureReport{},
|
||||
PostureReport: &reporthandling.PostureReport{
|
||||
ClusterName: "",
|
||||
CustomerGUID: "",
|
||||
ReportID: "",
|
||||
JobID: "",
|
||||
Metadata: &reporthandlingv2.Metadata{
|
||||
ScanMetadata: reporthandlingv2.ScanMetadata{
|
||||
ScanningTarget: 0,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
package cautils
|
||||
|
||||
import (
|
||||
pkgcautils "github.com/armosec/utils-go/utils"
|
||||
"golang.org/x/mod/semver"
|
||||
|
||||
"github.com/armosec/opa-utils/reporthandling"
|
||||
"github.com/armosec/utils-go/boolutils"
|
||||
"github.com/kubescape/opa-utils/reporthandling"
|
||||
)
|
||||
|
||||
func NewPolicies() *Policies {
|
||||
@@ -22,7 +22,7 @@ func (policies *Policies) Set(frameworks []reporthandling.Framework, version str
|
||||
for j := range frameworks[i].Controls {
|
||||
compatibleRules := []reporthandling.PolicyRule{}
|
||||
for r := range frameworks[i].Controls[j].Rules {
|
||||
if !ruleWithArmoOpaDependency(frameworks[i].Controls[j].Rules[r].Attributes) && isRuleKubescapeVersionCompatible(frameworks[i].Controls[j].Rules[r].Attributes, version) {
|
||||
if !ruleWithKSOpaDependency(frameworks[i].Controls[j].Rules[r].Attributes) && isRuleKubescapeVersionCompatible(frameworks[i].Controls[j].Rules[r].Attributes, version) {
|
||||
compatibleRules = append(compatibleRules, frameworks[i].Controls[j].Rules[r])
|
||||
}
|
||||
}
|
||||
@@ -35,12 +35,12 @@ func (policies *Policies) Set(frameworks []reporthandling.Framework, version str
|
||||
}
|
||||
}
|
||||
|
||||
func ruleWithArmoOpaDependency(attributes map[string]interface{}) bool {
|
||||
func ruleWithKSOpaDependency(attributes map[string]interface{}) bool {
|
||||
if attributes == nil {
|
||||
return false
|
||||
}
|
||||
if s, ok := attributes["armoOpa"]; ok { // TODO - make global
|
||||
return pkgcautils.StringToBool(s.(string))
|
||||
return boolutils.StringToBool(s.(string))
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
package cautils
|
||||
|
||||
// CA environment vars
|
||||
// Kubescape Cloud environment vars
|
||||
var (
|
||||
CustomerGUID = ""
|
||||
ClusterName = ""
|
||||
|
||||
@@ -8,9 +8,13 @@ import (
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/armosec/k8s-interface/workloadinterface"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger"
|
||||
"github.com/armosec/opa-utils/objectsenvelopes"
|
||||
"github.com/kubescape/go-logger/helpers"
|
||||
"github.com/kubescape/k8s-interface/workloadinterface"
|
||||
|
||||
logger "github.com/kubescape/go-logger"
|
||||
"github.com/kubescape/opa-utils/objectsenvelopes"
|
||||
"github.com/kubescape/opa-utils/objectsenvelopes/localworkload"
|
||||
|
||||
"gopkg.in/yaml.v2"
|
||||
)
|
||||
|
||||
@@ -26,23 +30,55 @@ const (
|
||||
JSON_FILE_FORMAT FileFormat = "json"
|
||||
)
|
||||
|
||||
func LoadResourcesFromFiles(inputPatterns []string) (map[string][]workloadinterface.IMetadata, error) {
|
||||
files, errs := listFiles(inputPatterns)
|
||||
// LoadResourcesFromHelmCharts scans a given path (recuresively) for helm charts, renders the templates and returns a map of workloads and a map of chart names
|
||||
func LoadResourcesFromHelmCharts(basePath string) (map[string][]workloadinterface.IMetadata, map[string]string) {
|
||||
directories, _ := listDirs(basePath)
|
||||
helmDirectories := make([]string, 0)
|
||||
for _, dir := range directories {
|
||||
if ok, _ := IsHelmDirectory(dir); ok {
|
||||
helmDirectories = append(helmDirectories, dir)
|
||||
}
|
||||
}
|
||||
|
||||
sourceToWorkloads := map[string][]workloadinterface.IMetadata{}
|
||||
sourceToChartName := map[string]string{}
|
||||
for _, helmDir := range helmDirectories {
|
||||
chart, err := NewHelmChart(helmDir)
|
||||
if err == nil {
|
||||
wls, errs := chart.GetWorkloadsWithDefaultValues()
|
||||
if len(errs) > 0 {
|
||||
logger.L().Error(fmt.Sprintf("Rendering of Helm chart template failed: %v", errs))
|
||||
continue
|
||||
}
|
||||
|
||||
chartName := chart.GetName()
|
||||
for k, v := range wls {
|
||||
sourceToWorkloads[k] = v
|
||||
sourceToChartName[k] = chartName
|
||||
}
|
||||
}
|
||||
}
|
||||
return sourceToWorkloads, sourceToChartName
|
||||
}
|
||||
|
||||
func LoadResourcesFromFiles(input, rootPath string) map[string][]workloadinterface.IMetadata {
|
||||
files, errs := listFiles(input)
|
||||
if len(errs) > 0 {
|
||||
logger.L().Error(fmt.Sprintf("%v", errs))
|
||||
}
|
||||
if len(files) == 0 {
|
||||
return nil, nil
|
||||
return nil
|
||||
}
|
||||
|
||||
workloads, errs := loadFiles(files)
|
||||
workloads, errs := loadFiles(rootPath, files)
|
||||
if len(errs) > 0 {
|
||||
logger.L().Error(fmt.Sprintf("%v", errs))
|
||||
}
|
||||
return workloads, nil
|
||||
|
||||
return workloads
|
||||
}
|
||||
|
||||
func loadFiles(filePaths []string) (map[string][]workloadinterface.IMetadata, []error) {
|
||||
func loadFiles(rootPath string, filePaths []string) (map[string][]workloadinterface.IMetadata, []error) {
|
||||
workloads := make(map[string][]workloadinterface.IMetadata, 0)
|
||||
errs := []error{}
|
||||
for i := range filePaths {
|
||||
@@ -51,15 +87,30 @@ func loadFiles(filePaths []string) (map[string][]workloadinterface.IMetadata, []
|
||||
errs = append(errs, err)
|
||||
continue
|
||||
}
|
||||
if len(f) == 0 {
|
||||
continue // empty file
|
||||
}
|
||||
|
||||
w, e := ReadFile(f, GetFileFormat(filePaths[i]))
|
||||
errs = append(errs, e...)
|
||||
if w != nil {
|
||||
if _, ok := workloads[filePaths[i]]; !ok {
|
||||
workloads[filePaths[i]] = []workloadinterface.IMetadata{}
|
||||
if e != nil {
|
||||
logger.L().Debug("failed to read file", helpers.String("file", filePaths[i]), helpers.Error(e))
|
||||
}
|
||||
if len(w) != 0 {
|
||||
path := filePaths[i]
|
||||
if _, ok := workloads[path]; !ok {
|
||||
workloads[path] = []workloadinterface.IMetadata{}
|
||||
}
|
||||
wSlice := workloads[filePaths[i]]
|
||||
wSlice = append(wSlice, w...)
|
||||
workloads[filePaths[i]] = wSlice
|
||||
wSlice := workloads[path]
|
||||
for j := range w {
|
||||
lw := localworkload.NewLocalWorkload(w[j].GetObject())
|
||||
if relPath, err := filepath.Rel(rootPath, path); err == nil {
|
||||
lw.SetPath(relPath)
|
||||
} else {
|
||||
lw.SetPath(path)
|
||||
}
|
||||
wSlice = append(wSlice, lw)
|
||||
}
|
||||
workloads[path] = wSlice
|
||||
}
|
||||
}
|
||||
return workloads, errs
|
||||
@@ -68,46 +119,65 @@ func loadFiles(filePaths []string) (map[string][]workloadinterface.IMetadata, []
|
||||
func loadFile(filePath string) ([]byte, error) {
|
||||
return os.ReadFile(filePath)
|
||||
}
|
||||
func ReadFile(fileContent []byte, fileFromat FileFormat) ([]workloadinterface.IMetadata, []error) {
|
||||
func ReadFile(fileContent []byte, fileFormat FileFormat) ([]workloadinterface.IMetadata, error) {
|
||||
|
||||
switch fileFromat {
|
||||
switch fileFormat {
|
||||
case YAML_FILE_FORMAT:
|
||||
return readYamlFile(fileContent)
|
||||
case JSON_FILE_FORMAT:
|
||||
return readJsonFile(fileContent)
|
||||
default:
|
||||
return nil, nil // []error{fmt.Errorf("file extension %s not supported", fileFromat)}
|
||||
return nil, nil
|
||||
}
|
||||
}
|
||||
|
||||
func listFiles(patterns []string) ([]string, []error) {
|
||||
files := []string{}
|
||||
errs := []error{}
|
||||
for i := range patterns {
|
||||
if strings.HasPrefix(patterns[i], "http") {
|
||||
continue
|
||||
}
|
||||
if !filepath.IsAbs(patterns[i]) {
|
||||
o, _ := os.Getwd()
|
||||
patterns[i] = filepath.Join(o, patterns[i])
|
||||
}
|
||||
if IsFile(patterns[i]) {
|
||||
files = append(files, patterns[i])
|
||||
} else {
|
||||
f, err := glob(filepath.Split(patterns[i])) //filepath.Glob(patterns[i])
|
||||
if err != nil {
|
||||
errs = append(errs, err)
|
||||
} else {
|
||||
files = append(files, f...)
|
||||
}
|
||||
}
|
||||
}
|
||||
return files, errs
|
||||
// listFiles returns the list of absolute paths, full file path and list of errors. The list of abs paths and full path have the same length
|
||||
func listFiles(pattern string) ([]string, []error) {
|
||||
return listFilesOrDirectories(pattern, false)
|
||||
}
|
||||
|
||||
func readYamlFile(yamlFile []byte) ([]workloadinterface.IMetadata, []error) {
|
||||
// listDirs returns the list of absolute paths, full directories path and list of errors. The list of abs paths and full path have the same length
|
||||
func listDirs(pattern string) ([]string, []error) {
|
||||
return listFilesOrDirectories(pattern, true)
|
||||
}
|
||||
|
||||
func listFilesOrDirectories(pattern string, onlyDirectories bool) ([]string, []error) {
|
||||
var paths []string
|
||||
errs := []error{}
|
||||
|
||||
if !filepath.IsAbs(pattern) {
|
||||
o, _ := os.Getwd()
|
||||
pattern = filepath.Join(o, pattern)
|
||||
}
|
||||
|
||||
if !onlyDirectories && IsFile(pattern) {
|
||||
paths = append(paths, pattern)
|
||||
return paths, errs
|
||||
}
|
||||
|
||||
root, shouldMatch := filepath.Split(pattern)
|
||||
|
||||
if IsDir(pattern) {
|
||||
root = pattern
|
||||
shouldMatch = "*"
|
||||
}
|
||||
if shouldMatch == "" {
|
||||
shouldMatch = "*"
|
||||
}
|
||||
|
||||
f, err := glob(root, shouldMatch, onlyDirectories)
|
||||
if err != nil {
|
||||
errs = append(errs, err)
|
||||
} else {
|
||||
paths = append(paths, f...)
|
||||
}
|
||||
|
||||
return paths, errs
|
||||
}
|
||||
|
||||
func readYamlFile(yamlFile []byte) ([]workloadinterface.IMetadata, error) {
|
||||
defer recover()
|
||||
|
||||
r := bytes.NewReader(yamlFile)
|
||||
dec := yaml.NewDecoder(r)
|
||||
yamlObjs := []workloadinterface.IMetadata{}
|
||||
@@ -126,19 +196,17 @@ func readYamlFile(yamlFile []byte) ([]workloadinterface.IMetadata, []error) {
|
||||
yamlObjs = append(yamlObjs, o)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
errs = append(errs, fmt.Errorf("failed to convert yaml file to map[string]interface, file content: %v", j))
|
||||
}
|
||||
}
|
||||
|
||||
return yamlObjs, errs
|
||||
return yamlObjs, nil
|
||||
}
|
||||
|
||||
func readJsonFile(jsonFile []byte) ([]workloadinterface.IMetadata, []error) {
|
||||
func readJsonFile(jsonFile []byte) ([]workloadinterface.IMetadata, error) {
|
||||
workloads := []workloadinterface.IMetadata{}
|
||||
var jsonObj interface{}
|
||||
if err := json.Unmarshal(jsonFile, &jsonObj); err != nil {
|
||||
return workloads, []error{err}
|
||||
return workloads, err
|
||||
}
|
||||
|
||||
convertJsonToWorkload(jsonObj, &workloads)
|
||||
@@ -184,21 +252,40 @@ func IsJson(filePath string) bool {
|
||||
return StringInSlice(JSON_PREFIX, strings.ReplaceAll(filepath.Ext(filePath), ".", "")) != ValueNotFound
|
||||
}
|
||||
|
||||
func glob(root, pattern string) ([]string, error) {
|
||||
func glob(root, pattern string, onlyDirectories bool) ([]string, error) {
|
||||
var matches []string
|
||||
|
||||
err := filepath.Walk(root, func(path string, info os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// listing only directotries
|
||||
if onlyDirectories {
|
||||
if info.IsDir() {
|
||||
if matched, err := filepath.Match(pattern, filepath.Base(path)); err != nil {
|
||||
return err
|
||||
} else if matched {
|
||||
matches = append(matches, path)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// listing only files
|
||||
if info.IsDir() {
|
||||
return nil
|
||||
}
|
||||
fileFormat := GetFileFormat(path)
|
||||
if !(fileFormat == JSON_FILE_FORMAT || fileFormat == YAML_FILE_FORMAT) {
|
||||
return nil
|
||||
}
|
||||
if matched, err := filepath.Match(pattern, filepath.Base(path)); err != nil {
|
||||
return err
|
||||
} else if matched {
|
||||
matches = append(matches, path)
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
@@ -206,6 +293,8 @@ func glob(root, pattern string) ([]string, error) {
|
||||
}
|
||||
return matches, nil
|
||||
}
|
||||
|
||||
// IsFile checks if a given path is a file
|
||||
func IsFile(name string) bool {
|
||||
if fi, err := os.Stat(name); err == nil {
|
||||
if fi.Mode().IsRegular() {
|
||||
@@ -215,6 +304,16 @@ func IsFile(name string) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// IsDir checks if a given path is a directory
|
||||
func IsDir(name string) bool {
|
||||
if info, err := os.Stat(name); err == nil {
|
||||
if info.IsDir() {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func GetFileFormat(filePath string) FileFormat {
|
||||
if IsYaml(filePath) {
|
||||
return YAML_FILE_FORMAT
|
||||
|
||||
@@ -6,59 +6,100 @@ import (
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/kubescape/opa-utils/objectsenvelopes/localworkload"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func onlineBoutiquePath() string {
|
||||
o, _ := os.Getwd()
|
||||
return filepath.Join(filepath.Dir(o), "../examples/online-boutique/*")
|
||||
return filepath.Join(filepath.Dir(o), "..", "examples", "online-boutique")
|
||||
}
|
||||
|
||||
func helmChartPath() string {
|
||||
o, _ := os.Getwd()
|
||||
return filepath.Join(filepath.Dir(o), "..", "examples", "helm_chart")
|
||||
}
|
||||
|
||||
func TestListFiles(t *testing.T) {
|
||||
|
||||
filesPath := onlineBoutiquePath()
|
||||
|
||||
files, errs := listFiles([]string{filesPath})
|
||||
files, errs := listFiles(filesPath)
|
||||
assert.Equal(t, 0, len(errs))
|
||||
assert.Equal(t, 12, len(files))
|
||||
}
|
||||
|
||||
func TestLoadResourcesFromFiles(t *testing.T) {
|
||||
workloads, err := LoadResourcesFromFiles([]string{onlineBoutiquePath()})
|
||||
assert.NoError(t, err)
|
||||
workloads := LoadResourcesFromFiles(onlineBoutiquePath(), "")
|
||||
assert.Equal(t, 12, len(workloads))
|
||||
|
||||
for i, w := range workloads {
|
||||
switch filepath.Base(i) {
|
||||
case "adservice.yaml":
|
||||
assert.Equal(t, 2, len(w))
|
||||
assert.Equal(t, "apps/v1//Deployment/adservice", w[0].GetID())
|
||||
assert.Equal(t, "/v1//Service/adservice", w[1].GetID())
|
||||
assert.Equal(t, "apps/v1//Deployment/adservice", getRelativePath(w[0].GetID()))
|
||||
assert.Equal(t, "/v1//Service/adservice", getRelativePath(w[1].GetID()))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestLoadResourcesFromHelmCharts(t *testing.T) {
|
||||
sourceToWorkloads, sourceToChartName := LoadResourcesFromHelmCharts(helmChartPath())
|
||||
assert.Equal(t, 6, len(sourceToWorkloads))
|
||||
|
||||
for file, workloads := range sourceToWorkloads {
|
||||
assert.Equalf(t, 1, len(workloads), "expected 1 workload in file %s", file)
|
||||
|
||||
w := workloads[0]
|
||||
assert.True(t, localworkload.IsTypeLocalWorkload(w.GetObject()), "Expected localworkload as object type")
|
||||
assert.Equal(t, "kubescape", sourceToChartName[file])
|
||||
|
||||
switch filepath.Base(file) {
|
||||
case "serviceaccount.yaml":
|
||||
assert.Equal(t, "/v1//ServiceAccount/kubescape-discovery", getRelativePath(w.GetID()))
|
||||
case "clusterrole.yaml":
|
||||
assert.Equal(t, "rbac.authorization.k8s.io/v1//ClusterRole/-kubescape", getRelativePath(w.GetID()))
|
||||
case "cronjob.yaml":
|
||||
assert.Equal(t, "batch/v1//CronJob/-kubescape", getRelativePath(w.GetID()))
|
||||
case "role.yaml":
|
||||
assert.Equal(t, "rbac.authorization.k8s.io/v1//Role/-kubescape", getRelativePath(w.GetID()))
|
||||
case "rolebinding.yaml":
|
||||
assert.Equal(t, "rbac.authorization.k8s.io/v1//RoleBinding/-kubescape", getRelativePath(w.GetID()))
|
||||
case "clusterrolebinding.yaml":
|
||||
assert.Equal(t, "rbac.authorization.k8s.io/v1//ClusterRoleBinding/-kubescape", getRelativePath(w.GetID()))
|
||||
default:
|
||||
assert.Failf(t, "missing case for file: %s", filepath.Base(file))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestLoadFiles(t *testing.T) {
|
||||
files, _ := listFiles([]string{onlineBoutiquePath()})
|
||||
_, err := loadFiles(files)
|
||||
files, _ := listFiles(onlineBoutiquePath())
|
||||
_, err := loadFiles("", files)
|
||||
assert.Equal(t, 0, len(err))
|
||||
}
|
||||
|
||||
func TestListDirs(t *testing.T) {
|
||||
dirs, _ := listDirs(filepath.Join(onlineBoutiquePath(), "adservice.yaml"))
|
||||
assert.Equal(t, 0, len(dirs))
|
||||
|
||||
expectedDirs := []string{filepath.Join("examples", "helm_chart"), filepath.Join("examples", "helm_chart", "templates")}
|
||||
dirs, _ = listDirs(helmChartPath())
|
||||
assert.Equal(t, len(expectedDirs), len(dirs))
|
||||
for i := range expectedDirs {
|
||||
assert.Contains(t, dirs[i], expectedDirs[i])
|
||||
}
|
||||
}
|
||||
|
||||
func TestLoadFile(t *testing.T) {
|
||||
files, _ := listFiles([]string{strings.Replace(onlineBoutiquePath(), "*", "adservice.yaml", 1)})
|
||||
files, _ := listFiles(filepath.Join(onlineBoutiquePath(), "adservice.yaml"))
|
||||
assert.Equal(t, 1, len(files))
|
||||
|
||||
_, err := loadFile(files[0])
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
func TestMapResources(t *testing.T) {
|
||||
// policyHandler := &PolicyHandler{}
|
||||
// k8sResources, err := policyHandler.loadResources(opaSessionObj.Frameworks, scanInfo)
|
||||
// files, _ := listFiles([]string{onlineBoutiquePath()})
|
||||
// bb, err := loadFile(files[0])
|
||||
// if len(err) > 0 {
|
||||
// t.Errorf("%v", err)
|
||||
// }
|
||||
// for i := range bb {
|
||||
// t.Errorf("%s", bb[i].ToString())
|
||||
// }
|
||||
|
||||
func getRelativePath(p string) string {
|
||||
pp := strings.SplitAfter(p, "api=")
|
||||
return pp[1]
|
||||
}
|
||||
|
||||
@@ -1,370 +0,0 @@
|
||||
package getter
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/armosec/armoapi-go/armotypes"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger/helpers"
|
||||
"github.com/armosec/opa-utils/reporthandling"
|
||||
)
|
||||
|
||||
// =======================================================================================================================
|
||||
// =============================================== ArmoAPI ===============================================================
|
||||
// =======================================================================================================================
|
||||
|
||||
var (
|
||||
// ATTENTION!!!
|
||||
// Changes in this URLs variable names, or in the usage is affecting the build process! BE CAREFUL
|
||||
armoERURL = "report.armo.cloud"
|
||||
armoBEURL = "api.armo.cloud"
|
||||
armoFEURL = "portal.armo.cloud"
|
||||
armoAUTHURL = "auth.armo.cloud"
|
||||
|
||||
armoStageERURL = "report-ks.eustage2.cyberarmorsoft.com"
|
||||
armoStageBEURL = "api-stage.armo.cloud"
|
||||
armoStageFEURL = "armoui.eustage2.cyberarmorsoft.com"
|
||||
armoStageAUTHURL = "eggauth.eustage2.cyberarmorsoft.com"
|
||||
|
||||
armoDevERURL = "report.eudev3.cyberarmorsoft.com"
|
||||
armoDevBEURL = "api-dev.armo.cloud"
|
||||
armoDevFEURL = "armoui-dev.eudev3.cyberarmorsoft.com"
|
||||
armoDevAUTHURL = "eggauth.eudev3.cyberarmorsoft.com"
|
||||
)
|
||||
|
||||
// Armo API for downloading policies
|
||||
type ArmoAPI struct {
|
||||
httpClient *http.Client
|
||||
apiURL string
|
||||
authURL string
|
||||
erURL string
|
||||
feURL string
|
||||
accountID string
|
||||
clientID string
|
||||
secretKey string
|
||||
feToken FeLoginResponse
|
||||
authCookie string
|
||||
loggedIn bool
|
||||
}
|
||||
|
||||
var globalArmoAPIConnector *ArmoAPI
|
||||
|
||||
func SetARMOAPIConnector(armoAPI *ArmoAPI) {
|
||||
logger.L().Debug("Armo URLs", helpers.String("api", armoAPI.apiURL), helpers.String("auth", armoAPI.authURL), helpers.String("report", armoAPI.erURL), helpers.String("UI", armoAPI.feURL))
|
||||
globalArmoAPIConnector = armoAPI
|
||||
}
|
||||
|
||||
func GetArmoAPIConnector() *ArmoAPI {
|
||||
if globalArmoAPIConnector == nil {
|
||||
// logger.L().Error("returning nil API connector")
|
||||
SetARMOAPIConnector(NewARMOAPIProd())
|
||||
}
|
||||
return globalArmoAPIConnector
|
||||
}
|
||||
|
||||
func NewARMOAPIDev() *ArmoAPI {
|
||||
apiObj := newArmoAPI()
|
||||
|
||||
apiObj.apiURL = armoDevBEURL
|
||||
apiObj.authURL = armoDevAUTHURL
|
||||
apiObj.erURL = armoDevERURL
|
||||
apiObj.feURL = armoDevFEURL
|
||||
|
||||
return apiObj
|
||||
}
|
||||
|
||||
func NewARMOAPIProd() *ArmoAPI {
|
||||
apiObj := newArmoAPI()
|
||||
|
||||
apiObj.apiURL = armoBEURL
|
||||
apiObj.erURL = armoERURL
|
||||
apiObj.feURL = armoFEURL
|
||||
apiObj.authURL = armoAUTHURL
|
||||
|
||||
return apiObj
|
||||
}
|
||||
|
||||
func NewARMOAPIStaging() *ArmoAPI {
|
||||
apiObj := newArmoAPI()
|
||||
|
||||
apiObj.apiURL = armoStageBEURL
|
||||
apiObj.erURL = armoStageERURL
|
||||
apiObj.feURL = armoStageFEURL
|
||||
apiObj.authURL = armoStageAUTHURL
|
||||
|
||||
return apiObj
|
||||
}
|
||||
|
||||
func NewARMOAPICustomized(armoERURL, armoBEURL, armoFEURL, armoAUTHURL string) *ArmoAPI {
|
||||
apiObj := newArmoAPI()
|
||||
|
||||
apiObj.erURL = armoERURL
|
||||
apiObj.apiURL = armoBEURL
|
||||
apiObj.feURL = armoFEURL
|
||||
apiObj.authURL = armoAUTHURL
|
||||
|
||||
return apiObj
|
||||
}
|
||||
|
||||
func newArmoAPI() *ArmoAPI {
|
||||
return &ArmoAPI{
|
||||
httpClient: &http.Client{Timeout: time.Duration(61) * time.Second},
|
||||
loggedIn: false,
|
||||
}
|
||||
}
|
||||
|
||||
func (armoAPI *ArmoAPI) Post(fullURL string, headers map[string]string, body []byte) (string, error) {
|
||||
if headers == nil {
|
||||
headers = make(map[string]string)
|
||||
}
|
||||
armoAPI.appendAuthHeaders(headers)
|
||||
return HttpPost(armoAPI.httpClient, fullURL, headers, body)
|
||||
}
|
||||
|
||||
func (armoAPI *ArmoAPI) Delete(fullURL string, headers map[string]string) (string, error) {
|
||||
if headers == nil {
|
||||
headers = make(map[string]string)
|
||||
}
|
||||
armoAPI.appendAuthHeaders(headers)
|
||||
return HttpDelete(armoAPI.httpClient, fullURL, headers)
|
||||
}
|
||||
func (armoAPI *ArmoAPI) Get(fullURL string, headers map[string]string) (string, error) {
|
||||
if headers == nil {
|
||||
headers = make(map[string]string)
|
||||
}
|
||||
armoAPI.appendAuthHeaders(headers)
|
||||
return HttpGetter(armoAPI.httpClient, fullURL, headers)
|
||||
}
|
||||
|
||||
func (armoAPI *ArmoAPI) GetAccountID() string { return armoAPI.accountID }
|
||||
func (armoAPI *ArmoAPI) IsLoggedIn() bool { return armoAPI.loggedIn }
|
||||
func (armoAPI *ArmoAPI) GetClientID() string { return armoAPI.clientID }
|
||||
func (armoAPI *ArmoAPI) GetSecretKey() string { return armoAPI.secretKey }
|
||||
func (armoAPI *ArmoAPI) GetFrontendURL() string { return armoAPI.feURL }
|
||||
func (armoAPI *ArmoAPI) GetAPIURL() string { return armoAPI.apiURL }
|
||||
func (armoAPI *ArmoAPI) GetReportReceiverURL() string { return armoAPI.erURL }
|
||||
func (armoAPI *ArmoAPI) SetAccountID(accountID string) { armoAPI.accountID = accountID }
|
||||
func (armoAPI *ArmoAPI) SetClientID(clientID string) { armoAPI.clientID = clientID }
|
||||
func (armoAPI *ArmoAPI) SetSecretKey(secretKey string) { armoAPI.secretKey = secretKey }
|
||||
|
||||
func (armoAPI *ArmoAPI) GetFramework(name string) (*reporthandling.Framework, error) {
|
||||
respStr, err := armoAPI.Get(armoAPI.getFrameworkURL(name), nil)
|
||||
if err != nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
framework := &reporthandling.Framework{}
|
||||
if err = JSONDecoder(respStr).Decode(framework); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return framework, err
|
||||
}
|
||||
|
||||
func (armoAPI *ArmoAPI) GetFrameworks() ([]reporthandling.Framework, error) {
|
||||
respStr, err := armoAPI.Get(armoAPI.getListFrameworkURL(), nil)
|
||||
if err != nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
frameworks := []reporthandling.Framework{}
|
||||
if err = JSONDecoder(respStr).Decode(&frameworks); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// SaveInFile(framework, GetDefaultPath(name+".json"))
|
||||
|
||||
return frameworks, err
|
||||
}
|
||||
|
||||
func (armoAPI *ArmoAPI) GetControl(policyName string) (*reporthandling.Control, error) {
|
||||
return nil, fmt.Errorf("control api is not public")
|
||||
}
|
||||
|
||||
func (armoAPI *ArmoAPI) GetExceptions(clusterName string) ([]armotypes.PostureExceptionPolicy, error) {
|
||||
exceptions := []armotypes.PostureExceptionPolicy{}
|
||||
|
||||
respStr, err := armoAPI.Get(armoAPI.getExceptionsURL(clusterName), nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err = JSONDecoder(respStr).Decode(&exceptions); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return exceptions, nil
|
||||
}
|
||||
|
||||
func (armoAPI *ArmoAPI) GetTenant() (*TenantResponse, error) {
|
||||
url := armoAPI.getAccountURL()
|
||||
if armoAPI.accountID != "" {
|
||||
url = fmt.Sprintf("%s?customerGUID=%s", url, armoAPI.accountID)
|
||||
}
|
||||
respStr, err := armoAPI.Get(url, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
tenant := &TenantResponse{}
|
||||
if err = JSONDecoder(respStr).Decode(tenant); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if tenant.TenantID != "" {
|
||||
armoAPI.accountID = tenant.TenantID
|
||||
}
|
||||
return tenant, nil
|
||||
}
|
||||
|
||||
// ControlsInputs // map[<control name>][<input arguments>]
|
||||
func (armoAPI *ArmoAPI) GetAccountConfig(clusterName string) (*armotypes.CustomerConfig, error) {
|
||||
accountConfig := &armotypes.CustomerConfig{}
|
||||
if armoAPI.accountID == "" {
|
||||
return accountConfig, nil
|
||||
}
|
||||
respStr, err := armoAPI.Get(armoAPI.getAccountConfig(clusterName), nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err = JSONDecoder(respStr).Decode(&accountConfig); err != nil {
|
||||
// try with default scope
|
||||
respStr, err = armoAPI.Get(armoAPI.getAccountConfigDefault(clusterName), nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err = JSONDecoder(respStr).Decode(&accountConfig); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return accountConfig, nil
|
||||
}
|
||||
|
||||
// ControlsInputs // map[<control name>][<input arguments>]
|
||||
func (armoAPI *ArmoAPI) GetControlsInputs(clusterName string) (map[string][]string, error) {
|
||||
accountConfig, err := armoAPI.GetAccountConfig(clusterName)
|
||||
if err == nil {
|
||||
return accountConfig.Settings.PostureControlInputs, nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
func (armoAPI *ArmoAPI) ListCustomFrameworks() ([]string, error) {
|
||||
respStr, err := armoAPI.Get(armoAPI.getListFrameworkURL(), nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
frs := []reporthandling.Framework{}
|
||||
if err = json.Unmarshal([]byte(respStr), &frs); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
frameworkList := []string{}
|
||||
for _, fr := range frs {
|
||||
if !isNativeFramework(fr.Name) {
|
||||
frameworkList = append(frameworkList, fr.Name)
|
||||
}
|
||||
}
|
||||
|
||||
return frameworkList, nil
|
||||
}
|
||||
|
||||
func (armoAPI *ArmoAPI) ListFrameworks() ([]string, error) {
|
||||
respStr, err := armoAPI.Get(armoAPI.getListFrameworkURL(), nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
frs := []reporthandling.Framework{}
|
||||
if err = json.Unmarshal([]byte(respStr), &frs); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
frameworkList := []string{}
|
||||
for _, fr := range frs {
|
||||
if isNativeFramework(fr.Name) {
|
||||
frameworkList = append(frameworkList, strings.ToLower(fr.Name))
|
||||
} else {
|
||||
frameworkList = append(frameworkList, fr.Name)
|
||||
}
|
||||
}
|
||||
|
||||
return frameworkList, nil
|
||||
}
|
||||
|
||||
func (armoAPI *ArmoAPI) ListControls(l ListType) ([]string, error) {
|
||||
return nil, fmt.Errorf("control api is not public")
|
||||
}
|
||||
|
||||
func (armoAPI *ArmoAPI) PostExceptions(exceptions []armotypes.PostureExceptionPolicy) error {
|
||||
|
||||
for i := range exceptions {
|
||||
ex, err := json.Marshal(exceptions[i])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = armoAPI.Post(armoAPI.exceptionsURL(""), map[string]string{"Content-Type": "application/json"}, ex)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (armoAPI *ArmoAPI) DeleteException(exceptionName string) error {
|
||||
|
||||
_, err := armoAPI.Delete(armoAPI.exceptionsURL(exceptionName), nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func (armoAPI *ArmoAPI) Login() error {
|
||||
if armoAPI.accountID == "" {
|
||||
return fmt.Errorf("failed to login, missing accountID")
|
||||
}
|
||||
if armoAPI.clientID == "" {
|
||||
return fmt.Errorf("failed to login, missing clientID")
|
||||
}
|
||||
if armoAPI.secretKey == "" {
|
||||
return fmt.Errorf("failed to login, missing secretKey")
|
||||
}
|
||||
|
||||
// init URLs
|
||||
feLoginData := FeLoginData{ClientId: armoAPI.clientID, Secret: armoAPI.secretKey}
|
||||
body, _ := json.Marshal(feLoginData)
|
||||
|
||||
resp, err := http.Post(armoAPI.getApiToken(), "application/json", bytes.NewBuffer(body))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return fmt.Errorf("error authenticating: %d", resp.StatusCode)
|
||||
}
|
||||
|
||||
responseBody, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var feLoginResponse FeLoginResponse
|
||||
|
||||
if err = json.Unmarshal(responseBody, &feLoginResponse); err != nil {
|
||||
return err
|
||||
}
|
||||
armoAPI.feToken = feLoginResponse
|
||||
|
||||
/* Now we have JWT */
|
||||
|
||||
armoAPI.authCookie, err = armoAPI.getAuthCookie()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
armoAPI.loggedIn = true
|
||||
return nil
|
||||
}
|
||||
@@ -12,7 +12,7 @@ type FeLoginResponse struct {
|
||||
Expires string `json:"expires"`
|
||||
}
|
||||
|
||||
type ArmoSelectCustomer struct {
|
||||
type KSCloudSelectCustomer struct {
|
||||
SelectedCustomerGuid string `json:"selectedCustomer"`
|
||||
}
|
||||
|
||||
|
||||
@@ -3,8 +3,8 @@ package getter
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/armosec/opa-utils/gitregostore"
|
||||
"github.com/armosec/opa-utils/reporthandling"
|
||||
"github.com/kubescape/opa-utils/gitregostore"
|
||||
"github.com/kubescape/opa-utils/reporthandling"
|
||||
)
|
||||
|
||||
// =======================================================================================================================
|
||||
|
||||
@@ -2,7 +2,7 @@ package getter
|
||||
|
||||
import (
|
||||
"github.com/armosec/armoapi-go/armotypes"
|
||||
"github.com/armosec/opa-utils/reporthandling"
|
||||
"github.com/kubescape/opa-utils/reporthandling"
|
||||
)
|
||||
|
||||
// supported listing
|
||||
|
||||
363
core/cautils/getter/kscloudapi.go
Normal file
363
core/cautils/getter/kscloudapi.go
Normal file
@@ -0,0 +1,363 @@
|
||||
package getter
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/armosec/armoapi-go/armotypes"
|
||||
logger "github.com/kubescape/go-logger"
|
||||
"github.com/kubescape/go-logger/helpers"
|
||||
"github.com/kubescape/opa-utils/reporthandling"
|
||||
)
|
||||
|
||||
var (
|
||||
ksCloudERURL = "report.armo.cloud"
|
||||
ksCloudBEURL = "api.armosec.io"
|
||||
ksCloudFEURL = "cloud.armosec.io"
|
||||
ksCloudAUTHURL = "auth.armosec.io"
|
||||
|
||||
ksCloudStageERURL = "report-ks.eustage2.cyberarmorsoft.com"
|
||||
ksCloudStageBEURL = "api-stage.armosec.io"
|
||||
ksCloudStageFEURL = "armoui-stage.armosec.io"
|
||||
ksCloudStageAUTHURL = "eggauth-stage.armosec.io"
|
||||
|
||||
ksCloudDevERURL = "report.eudev3.cyberarmorsoft.com"
|
||||
ksCloudDevBEURL = "api-dev.armosec.io"
|
||||
ksCloudDevFEURL = "cloud-dev.armosec.io"
|
||||
ksCloudDevAUTHURL = "eggauth-dev.armosec.io"
|
||||
)
|
||||
|
||||
// KSCloudAPI allows accessing the API of the Kubescape Cloud offering
|
||||
type KSCloudAPI struct {
|
||||
httpClient *http.Client
|
||||
apiURL string
|
||||
authURL string
|
||||
erURL string
|
||||
feURL string
|
||||
accountID string
|
||||
clientID string
|
||||
secretKey string
|
||||
feToken FeLoginResponse
|
||||
authCookie string
|
||||
loggedIn bool
|
||||
}
|
||||
|
||||
var globalKSCloudAPIConnector *KSCloudAPI
|
||||
|
||||
func SetKSCloudAPIConnector(ksCloudAPI *KSCloudAPI) {
|
||||
logger.L().Debug("Kubescape Cloud URLs", helpers.String("api", ksCloudAPI.apiURL), helpers.String("auth", ksCloudAPI.authURL), helpers.String("report", ksCloudAPI.erURL), helpers.String("UI", ksCloudAPI.feURL))
|
||||
globalKSCloudAPIConnector = ksCloudAPI
|
||||
}
|
||||
|
||||
func GetKSCloudAPIConnector() *KSCloudAPI {
|
||||
if globalKSCloudAPIConnector == nil {
|
||||
SetKSCloudAPIConnector(NewKSCloudAPIProd())
|
||||
}
|
||||
return globalKSCloudAPIConnector
|
||||
}
|
||||
|
||||
func NewKSCloudAPIDev() *KSCloudAPI {
|
||||
apiObj := newKSCloudAPI()
|
||||
|
||||
apiObj.apiURL = ksCloudDevBEURL
|
||||
apiObj.authURL = ksCloudDevAUTHURL
|
||||
apiObj.erURL = ksCloudDevERURL
|
||||
apiObj.feURL = ksCloudDevFEURL
|
||||
|
||||
return apiObj
|
||||
}
|
||||
|
||||
func NewKSCloudAPIProd() *KSCloudAPI {
|
||||
apiObj := newKSCloudAPI()
|
||||
|
||||
apiObj.apiURL = ksCloudBEURL
|
||||
apiObj.erURL = ksCloudERURL
|
||||
apiObj.feURL = ksCloudFEURL
|
||||
apiObj.authURL = ksCloudAUTHURL
|
||||
|
||||
return apiObj
|
||||
}
|
||||
|
||||
func NewKSCloudAPIStaging() *KSCloudAPI {
|
||||
apiObj := newKSCloudAPI()
|
||||
|
||||
apiObj.apiURL = ksCloudStageBEURL
|
||||
apiObj.erURL = ksCloudStageERURL
|
||||
apiObj.feURL = ksCloudStageFEURL
|
||||
apiObj.authURL = ksCloudStageAUTHURL
|
||||
|
||||
return apiObj
|
||||
}
|
||||
|
||||
func NewKSCloudAPICustomized(ksCloudERURL, ksCloudBEURL, ksCloudFEURL, ksCloudAUTHURL string) *KSCloudAPI {
|
||||
apiObj := newKSCloudAPI()
|
||||
|
||||
apiObj.erURL = ksCloudERURL
|
||||
apiObj.apiURL = ksCloudBEURL
|
||||
apiObj.feURL = ksCloudFEURL
|
||||
apiObj.authURL = ksCloudAUTHURL
|
||||
|
||||
return apiObj
|
||||
}
|
||||
|
||||
func newKSCloudAPI() *KSCloudAPI {
|
||||
return &KSCloudAPI{
|
||||
httpClient: &http.Client{Timeout: time.Duration(61) * time.Second},
|
||||
loggedIn: false,
|
||||
}
|
||||
}
|
||||
|
||||
func (api *KSCloudAPI) Post(fullURL string, headers map[string]string, body []byte) (string, error) {
|
||||
if headers == nil {
|
||||
headers = make(map[string]string)
|
||||
}
|
||||
api.appendAuthHeaders(headers)
|
||||
return HttpPost(api.httpClient, fullURL, headers, body)
|
||||
}
|
||||
|
||||
func (api *KSCloudAPI) Delete(fullURL string, headers map[string]string) (string, error) {
|
||||
if headers == nil {
|
||||
headers = make(map[string]string)
|
||||
}
|
||||
api.appendAuthHeaders(headers)
|
||||
return HttpDelete(api.httpClient, fullURL, headers)
|
||||
}
|
||||
func (api *KSCloudAPI) Get(fullURL string, headers map[string]string) (string, error) {
|
||||
if headers == nil {
|
||||
headers = make(map[string]string)
|
||||
}
|
||||
api.appendAuthHeaders(headers)
|
||||
return HttpGetter(api.httpClient, fullURL, headers)
|
||||
}
|
||||
|
||||
func (api *KSCloudAPI) GetAccountID() string { return api.accountID }
|
||||
func (api *KSCloudAPI) IsLoggedIn() bool { return api.loggedIn }
|
||||
func (api *KSCloudAPI) GetClientID() string { return api.clientID }
|
||||
func (api *KSCloudAPI) GetSecretKey() string { return api.secretKey }
|
||||
func (api *KSCloudAPI) GetFrontendURL() string { return api.feURL }
|
||||
func (api *KSCloudAPI) GetApiURL() string { return api.apiURL }
|
||||
func (api *KSCloudAPI) GetAuthURL() string { return api.authURL }
|
||||
func (api *KSCloudAPI) GetReportReceiverURL() string { return api.erURL }
|
||||
func (api *KSCloudAPI) SetAccountID(accountID string) { api.accountID = accountID }
|
||||
func (api *KSCloudAPI) SetClientID(clientID string) { api.clientID = clientID }
|
||||
func (api *KSCloudAPI) SetSecretKey(secretKey string) { api.secretKey = secretKey }
|
||||
|
||||
func (api *KSCloudAPI) GetFramework(name string) (*reporthandling.Framework, error) {
|
||||
respStr, err := api.Get(api.getFrameworkURL(name), nil)
|
||||
if err != nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
framework := &reporthandling.Framework{}
|
||||
if err = JSONDecoder(respStr).Decode(framework); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return framework, err
|
||||
}
|
||||
|
||||
func (api *KSCloudAPI) GetFrameworks() ([]reporthandling.Framework, error) {
|
||||
respStr, err := api.Get(api.getListFrameworkURL(), nil)
|
||||
if err != nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
frameworks := []reporthandling.Framework{}
|
||||
if err = JSONDecoder(respStr).Decode(&frameworks); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return frameworks, err
|
||||
}
|
||||
|
||||
func (api *KSCloudAPI) GetControl(policyName string) (*reporthandling.Control, error) {
|
||||
return nil, fmt.Errorf("control api is not public")
|
||||
}
|
||||
|
||||
func (api *KSCloudAPI) GetExceptions(clusterName string) ([]armotypes.PostureExceptionPolicy, error) {
|
||||
exceptions := []armotypes.PostureExceptionPolicy{}
|
||||
|
||||
respStr, err := api.Get(api.getExceptionsURL(clusterName), nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err = JSONDecoder(respStr).Decode(&exceptions); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return exceptions, nil
|
||||
}
|
||||
|
||||
func (api *KSCloudAPI) GetTenant() (*TenantResponse, error) {
|
||||
url := api.getAccountURL()
|
||||
if api.accountID != "" {
|
||||
url = fmt.Sprintf("%s?customerGUID=%s", url, api.accountID)
|
||||
}
|
||||
respStr, err := api.Get(url, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
tenant := &TenantResponse{}
|
||||
if err = JSONDecoder(respStr).Decode(tenant); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if tenant.TenantID != "" {
|
||||
api.accountID = tenant.TenantID
|
||||
}
|
||||
return tenant, nil
|
||||
}
|
||||
|
||||
// ControlsInputs // map[<control name>][<input arguments>]
|
||||
func (api *KSCloudAPI) GetAccountConfig(clusterName string) (*armotypes.CustomerConfig, error) {
|
||||
accountConfig := &armotypes.CustomerConfig{}
|
||||
if api.accountID == "" {
|
||||
return accountConfig, nil
|
||||
}
|
||||
respStr, err := api.Get(api.getAccountConfig(clusterName), nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err = JSONDecoder(respStr).Decode(&accountConfig); err != nil {
|
||||
// try with default scope
|
||||
respStr, err = api.Get(api.getAccountConfigDefault(clusterName), nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err = JSONDecoder(respStr).Decode(&accountConfig); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return accountConfig, nil
|
||||
}
|
||||
|
||||
// ControlsInputs // map[<control name>][<input arguments>]
|
||||
func (api *KSCloudAPI) GetControlsInputs(clusterName string) (map[string][]string, error) {
|
||||
accountConfig, err := api.GetAccountConfig(clusterName)
|
||||
if err == nil {
|
||||
return accountConfig.Settings.PostureControlInputs, nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
func (api *KSCloudAPI) ListCustomFrameworks() ([]string, error) {
|
||||
respStr, err := api.Get(api.getListFrameworkURL(), nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
frs := []reporthandling.Framework{}
|
||||
if err = json.Unmarshal([]byte(respStr), &frs); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
frameworkList := []string{}
|
||||
for _, fr := range frs {
|
||||
if !isNativeFramework(fr.Name) {
|
||||
frameworkList = append(frameworkList, fr.Name)
|
||||
}
|
||||
}
|
||||
|
||||
return frameworkList, nil
|
||||
}
|
||||
|
||||
func (api *KSCloudAPI) ListFrameworks() ([]string, error) {
|
||||
respStr, err := api.Get(api.getListFrameworkURL(), nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
frs := []reporthandling.Framework{}
|
||||
if err = json.Unmarshal([]byte(respStr), &frs); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
frameworkList := []string{}
|
||||
for _, fr := range frs {
|
||||
if isNativeFramework(fr.Name) {
|
||||
frameworkList = append(frameworkList, strings.ToLower(fr.Name))
|
||||
} else {
|
||||
frameworkList = append(frameworkList, fr.Name)
|
||||
}
|
||||
}
|
||||
|
||||
return frameworkList, nil
|
||||
}
|
||||
|
||||
func (api *KSCloudAPI) ListControls(l ListType) ([]string, error) {
|
||||
return nil, fmt.Errorf("control api is not public")
|
||||
}
|
||||
|
||||
func (api *KSCloudAPI) PostExceptions(exceptions []armotypes.PostureExceptionPolicy) error {
|
||||
|
||||
for i := range exceptions {
|
||||
ex, err := json.Marshal(exceptions[i])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = api.Post(api.exceptionsURL(""), map[string]string{"Content-Type": "application/json"}, ex)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (api *KSCloudAPI) DeleteException(exceptionName string) error {
|
||||
|
||||
_, err := api.Delete(api.exceptionsURL(exceptionName), nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func (api *KSCloudAPI) Login() error {
|
||||
if api.accountID == "" {
|
||||
return fmt.Errorf("failed to login, missing accountID")
|
||||
}
|
||||
if api.clientID == "" {
|
||||
return fmt.Errorf("failed to login, missing clientID")
|
||||
}
|
||||
if api.secretKey == "" {
|
||||
return fmt.Errorf("failed to login, missing secretKey")
|
||||
}
|
||||
|
||||
// init URLs
|
||||
feLoginData := FeLoginData{ClientId: api.clientID, Secret: api.secretKey}
|
||||
body, _ := json.Marshal(feLoginData)
|
||||
|
||||
resp, err := http.Post(api.getApiToken(), "application/json", bytes.NewBuffer(body))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return fmt.Errorf("error authenticating: %d", resp.StatusCode)
|
||||
}
|
||||
|
||||
responseBody, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var feLoginResponse FeLoginResponse
|
||||
|
||||
if err = json.Unmarshal(responseBody, &feLoginResponse); err != nil {
|
||||
return err
|
||||
}
|
||||
api.feToken = feLoginResponse
|
||||
|
||||
/* Now we have JWT */
|
||||
|
||||
api.authCookie, err = api.getAuthCookie()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
api.loggedIn = true
|
||||
return nil
|
||||
}
|
||||
@@ -11,13 +11,12 @@ import (
|
||||
|
||||
var NativeFrameworks = []string{"nsa", "mitre", "armobest", "devopsbest"}
|
||||
|
||||
func (armoAPI *ArmoAPI) getFrameworkURL(frameworkName string) string {
|
||||
func (api *KSCloudAPI) getFrameworkURL(frameworkName string) string {
|
||||
u := url.URL{}
|
||||
u.Scheme = "https"
|
||||
u.Host = armoAPI.apiURL
|
||||
u.Scheme, u.Host = parseHost(api.GetApiURL())
|
||||
u.Path = "api/v1/armoFrameworks"
|
||||
q := u.Query()
|
||||
q.Add("customerGUID", armoAPI.getCustomerGUIDFallBack())
|
||||
q.Add("customerGUID", api.getCustomerGUIDFallBack())
|
||||
if isNativeFramework(frameworkName) {
|
||||
q.Add("frameworkName", strings.ToUpper(frameworkName))
|
||||
} else {
|
||||
@@ -29,25 +28,23 @@ func (armoAPI *ArmoAPI) getFrameworkURL(frameworkName string) string {
|
||||
return u.String()
|
||||
}
|
||||
|
||||
func (armoAPI *ArmoAPI) getListFrameworkURL() string {
|
||||
func (api *KSCloudAPI) getListFrameworkURL() string {
|
||||
u := url.URL{}
|
||||
u.Scheme = "https"
|
||||
u.Host = armoAPI.apiURL
|
||||
u.Scheme, u.Host = parseHost(api.GetApiURL())
|
||||
u.Path = "api/v1/armoFrameworks"
|
||||
q := u.Query()
|
||||
q.Add("customerGUID", armoAPI.getCustomerGUIDFallBack())
|
||||
q.Add("customerGUID", api.getCustomerGUIDFallBack())
|
||||
u.RawQuery = q.Encode()
|
||||
|
||||
return u.String()
|
||||
}
|
||||
func (armoAPI *ArmoAPI) getExceptionsURL(clusterName string) string {
|
||||
func (api *KSCloudAPI) getExceptionsURL(clusterName string) string {
|
||||
u := url.URL{}
|
||||
u.Scheme = "https"
|
||||
u.Host = armoAPI.apiURL
|
||||
u.Scheme, u.Host = parseHost(api.GetApiURL())
|
||||
u.Path = "api/v1/armoPostureExceptions"
|
||||
|
||||
q := u.Query()
|
||||
q.Add("customerGUID", armoAPI.getCustomerGUIDFallBack())
|
||||
q.Add("customerGUID", api.getCustomerGUIDFallBack())
|
||||
// if clusterName != "" { // TODO - fix customer name support in Armo BE
|
||||
// q.Add("clusterName", clusterName)
|
||||
// }
|
||||
@@ -56,14 +53,13 @@ func (armoAPI *ArmoAPI) getExceptionsURL(clusterName string) string {
|
||||
return u.String()
|
||||
}
|
||||
|
||||
func (armoAPI *ArmoAPI) exceptionsURL(exceptionsPolicyName string) string {
|
||||
func (api *KSCloudAPI) exceptionsURL(exceptionsPolicyName string) string {
|
||||
u := url.URL{}
|
||||
u.Scheme = "https"
|
||||
u.Host = armoAPI.apiURL
|
||||
u.Scheme, u.Host = parseHost(api.GetApiURL())
|
||||
u.Path = "api/v1/postureExceptionPolicy"
|
||||
|
||||
q := u.Query()
|
||||
q.Add("customerGUID", armoAPI.getCustomerGUIDFallBack())
|
||||
q.Add("customerGUID", api.getCustomerGUIDFallBack())
|
||||
if exceptionsPolicyName != "" { // for delete
|
||||
q.Add("policyName", exceptionsPolicyName)
|
||||
}
|
||||
@@ -73,20 +69,19 @@ func (armoAPI *ArmoAPI) exceptionsURL(exceptionsPolicyName string) string {
|
||||
return u.String()
|
||||
}
|
||||
|
||||
func (armoAPI *ArmoAPI) getAccountConfigDefault(clusterName string) string {
|
||||
config := armoAPI.getAccountConfig(clusterName)
|
||||
func (api *KSCloudAPI) getAccountConfigDefault(clusterName string) string {
|
||||
config := api.getAccountConfig(clusterName)
|
||||
url := config + "&scope=customer"
|
||||
return url
|
||||
}
|
||||
|
||||
func (armoAPI *ArmoAPI) getAccountConfig(clusterName string) string {
|
||||
func (api *KSCloudAPI) getAccountConfig(clusterName string) string {
|
||||
u := url.URL{}
|
||||
u.Scheme = "https"
|
||||
u.Host = armoAPI.apiURL
|
||||
u.Scheme, u.Host = parseHost(api.GetApiURL())
|
||||
u.Path = "api/v1/armoCustomerConfiguration"
|
||||
|
||||
q := u.Query()
|
||||
q.Add("customerGUID", armoAPI.getCustomerGUIDFallBack())
|
||||
q.Add("customerGUID", api.getCustomerGUIDFallBack())
|
||||
if clusterName != "" { // TODO - fix customer name support in Armo BE
|
||||
q.Add("clusterName", clusterName)
|
||||
}
|
||||
@@ -95,52 +90,49 @@ func (armoAPI *ArmoAPI) getAccountConfig(clusterName string) string {
|
||||
return u.String()
|
||||
}
|
||||
|
||||
func (armoAPI *ArmoAPI) getAccountURL() string {
|
||||
func (api *KSCloudAPI) getAccountURL() string {
|
||||
u := url.URL{}
|
||||
u.Scheme = "https"
|
||||
u.Host = armoAPI.apiURL
|
||||
u.Scheme, u.Host = parseHost(api.GetApiURL())
|
||||
u.Path = "api/v1/createTenant"
|
||||
return u.String()
|
||||
}
|
||||
|
||||
func (armoAPI *ArmoAPI) getApiToken() string {
|
||||
func (api *KSCloudAPI) getApiToken() string {
|
||||
u := url.URL{}
|
||||
u.Scheme = "https"
|
||||
u.Host = armoAPI.authURL
|
||||
u.Path = "frontegg/identity/resources/auth/v1/api-token"
|
||||
u.Scheme, u.Host = parseHost(api.GetAuthURL())
|
||||
u.Path = "identity/resources/auth/v1/api-token"
|
||||
return u.String()
|
||||
}
|
||||
|
||||
func (armoAPI *ArmoAPI) getOpenidCustomers() string {
|
||||
func (api *KSCloudAPI) getOpenidCustomers() string {
|
||||
u := url.URL{}
|
||||
u.Scheme = "https"
|
||||
u.Host = armoAPI.apiURL
|
||||
u.Scheme, u.Host = parseHost(api.GetApiURL())
|
||||
u.Path = "api/v1/openid_customers"
|
||||
return u.String()
|
||||
}
|
||||
|
||||
func (armoAPI *ArmoAPI) getAuthCookie() (string, error) {
|
||||
selectCustomer := ArmoSelectCustomer{SelectedCustomerGuid: armoAPI.accountID}
|
||||
func (api *KSCloudAPI) getAuthCookie() (string, error) {
|
||||
selectCustomer := KSCloudSelectCustomer{SelectedCustomerGuid: api.accountID}
|
||||
requestBody, _ := json.Marshal(selectCustomer)
|
||||
client := &http.Client{}
|
||||
httpRequest, err := http.NewRequest(http.MethodPost, armoAPI.getOpenidCustomers(), bytes.NewBuffer(requestBody))
|
||||
httpRequest, err := http.NewRequest(http.MethodPost, api.getOpenidCustomers(), bytes.NewBuffer(requestBody))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
httpRequest.Header.Set("Content-Type", "application/json")
|
||||
httpRequest.Header.Set("Authorization", fmt.Sprintf("Bearer %s", armoAPI.feToken.Token))
|
||||
httpRequest.Header.Set("Authorization", fmt.Sprintf("Bearer %s", api.feToken.Token))
|
||||
httpResponse, err := client.Do(httpRequest)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer httpResponse.Body.Close()
|
||||
if httpResponse.StatusCode != http.StatusOK {
|
||||
return "", fmt.Errorf("failed to get cookie from %s: status %d", armoAPI.getOpenidCustomers(), httpResponse.StatusCode)
|
||||
return "", fmt.Errorf("failed to get cookie from %s: status %d", api.getOpenidCustomers(), httpResponse.StatusCode)
|
||||
}
|
||||
|
||||
cookies := httpResponse.Header.Get("set-cookie")
|
||||
if len(cookies) == 0 {
|
||||
return "", fmt.Errorf("no cookie field in response from %s", armoAPI.getOpenidCustomers())
|
||||
return "", fmt.Errorf("no cookie field in response from %s", api.getOpenidCustomers())
|
||||
}
|
||||
|
||||
authCookie := ""
|
||||
@@ -152,24 +144,33 @@ func (armoAPI *ArmoAPI) getAuthCookie() (string, error) {
|
||||
}
|
||||
|
||||
if len(authCookie) == 0 {
|
||||
return "", fmt.Errorf("no auth cookie field in response from %s", armoAPI.getOpenidCustomers())
|
||||
return "", fmt.Errorf("no auth cookie field in response from %s", api.getOpenidCustomers())
|
||||
}
|
||||
|
||||
return authCookie, nil
|
||||
}
|
||||
func (armoAPI *ArmoAPI) appendAuthHeaders(headers map[string]string) {
|
||||
func (api *KSCloudAPI) appendAuthHeaders(headers map[string]string) {
|
||||
|
||||
if armoAPI.feToken.Token != "" {
|
||||
headers["Authorization"] = fmt.Sprintf("Bearer %s", armoAPI.feToken.Token)
|
||||
if api.feToken.Token != "" {
|
||||
headers["Authorization"] = fmt.Sprintf("Bearer %s", api.feToken.Token)
|
||||
}
|
||||
if armoAPI.authCookie != "" {
|
||||
headers["Cookie"] = fmt.Sprintf("auth=%s", armoAPI.authCookie)
|
||||
if api.authCookie != "" {
|
||||
headers["Cookie"] = fmt.Sprintf("auth=%s", api.authCookie)
|
||||
}
|
||||
}
|
||||
|
||||
func (armoAPI *ArmoAPI) getCustomerGUIDFallBack() string {
|
||||
if armoAPI.accountID != "" {
|
||||
return armoAPI.accountID
|
||||
func (api *KSCloudAPI) getCustomerGUIDFallBack() string {
|
||||
if api.accountID != "" {
|
||||
return api.accountID
|
||||
}
|
||||
return "11111111-1111-1111-1111-111111111111"
|
||||
}
|
||||
|
||||
func parseHost(host string) (string, string) {
|
||||
if strings.HasPrefix(host, "http://") {
|
||||
return "http", strings.Replace(host, "http://", "", 1)
|
||||
}
|
||||
|
||||
// default scheme
|
||||
return "https", strings.Replace(host, "https://", "", 1)
|
||||
}
|
||||
@@ -8,7 +8,7 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/armosec/armoapi-go/armotypes"
|
||||
"github.com/armosec/opa-utils/reporthandling"
|
||||
"github.com/kubescape/opa-utils/reporthandling"
|
||||
)
|
||||
|
||||
// =======================================================================================================================
|
||||
|
||||
92
core/cautils/helmchart.go
Normal file
92
core/cautils/helmchart.go
Normal file
@@ -0,0 +1,92 @@
|
||||
package cautils
|
||||
|
||||
import (
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
logger "github.com/kubescape/go-logger"
|
||||
"github.com/kubescape/go-logger/helpers"
|
||||
"github.com/kubescape/k8s-interface/workloadinterface"
|
||||
"github.com/kubescape/opa-utils/objectsenvelopes/localworkload"
|
||||
|
||||
helmchart "helm.sh/helm/v3/pkg/chart"
|
||||
helmloader "helm.sh/helm/v3/pkg/chart/loader"
|
||||
helmchartutil "helm.sh/helm/v3/pkg/chartutil"
|
||||
helmengine "helm.sh/helm/v3/pkg/engine"
|
||||
)
|
||||
|
||||
type HelmChart struct {
|
||||
chart *helmchart.Chart
|
||||
path string
|
||||
}
|
||||
|
||||
func IsHelmDirectory(path string) (bool, error) {
|
||||
return helmchartutil.IsChartDir(path)
|
||||
}
|
||||
|
||||
func NewHelmChart(path string) (*HelmChart, error) {
|
||||
chart, err := helmloader.Load(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &HelmChart{
|
||||
chart: chart,
|
||||
path: path,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (hc *HelmChart) GetName() string {
|
||||
return hc.chart.Name()
|
||||
}
|
||||
|
||||
func (hc *HelmChart) GetDefaultValues() map[string]interface{} {
|
||||
return hc.chart.Values
|
||||
}
|
||||
|
||||
// GetWorkloads renders chart template using the default values and returns a map of source file to its workloads
|
||||
func (hc *HelmChart) GetWorkloadsWithDefaultValues() (map[string][]workloadinterface.IMetadata, []error) {
|
||||
return hc.GetWorkloads(hc.GetDefaultValues())
|
||||
}
|
||||
|
||||
// GetWorkloads renders chart template using the provided values and returns a map of source (absolute) file path to its workloads
|
||||
func (hc *HelmChart) GetWorkloads(values map[string]interface{}) (map[string][]workloadinterface.IMetadata, []error) {
|
||||
vals, err := helmchartutil.ToRenderValues(hc.chart, values, helmchartutil.ReleaseOptions{}, nil)
|
||||
if err != nil {
|
||||
return nil, []error{err}
|
||||
}
|
||||
|
||||
sourceToFile, err := helmengine.Render(hc.chart, vals)
|
||||
if err != nil {
|
||||
return nil, []error{err}
|
||||
}
|
||||
|
||||
workloads := make(map[string][]workloadinterface.IMetadata, 0)
|
||||
errs := []error{}
|
||||
|
||||
for path, renderedYaml := range sourceToFile {
|
||||
if !IsYaml(strings.ToLower(path)) {
|
||||
continue
|
||||
}
|
||||
|
||||
wls, e := ReadFile([]byte(renderedYaml), YAML_FILE_FORMAT)
|
||||
if e != nil {
|
||||
logger.L().Debug("failed to read rendered yaml file", helpers.String("file", path), helpers.Error(e))
|
||||
}
|
||||
if len(wls) == 0 {
|
||||
continue
|
||||
}
|
||||
// separate base path and file name. We do not use the os.Separator because the paths returned from the helm engine are not OS specific (e.g. mychart/templates/myfile.yaml)
|
||||
if firstPathSeparatorIndex := strings.Index(path, string("/")); firstPathSeparatorIndex != -1 {
|
||||
absPath := filepath.Join(hc.path, path[firstPathSeparatorIndex:])
|
||||
|
||||
workloads[absPath] = []workloadinterface.IMetadata{}
|
||||
for i := range wls {
|
||||
lw := localworkload.NewLocalWorkload(wls[i].GetObject())
|
||||
lw.SetPath(absPath)
|
||||
workloads[absPath] = append(workloads[absPath], lw)
|
||||
}
|
||||
}
|
||||
}
|
||||
return workloads, errs
|
||||
}
|
||||
133
core/cautils/helmchart_test.go
Normal file
133
core/cautils/helmchart_test.go
Normal file
@@ -0,0 +1,133 @@
|
||||
package cautils
|
||||
|
||||
import (
|
||||
_ "embed"
|
||||
"encoding/json"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/kubescape/opa-utils/objectsenvelopes/localworkload"
|
||||
"github.com/stretchr/testify/suite"
|
||||
)
|
||||
|
||||
type HelmChartTestSuite struct {
|
||||
suite.Suite
|
||||
helmChartPath string
|
||||
expectedFiles []string
|
||||
expectedDefaultValues map[string]interface{}
|
||||
}
|
||||
|
||||
func TestHelmChartTestSuite(t *testing.T) {
|
||||
suite.Run(t, new(HelmChartTestSuite))
|
||||
}
|
||||
|
||||
func (s *HelmChartTestSuite) SetupSuite() {
|
||||
o, _ := os.Getwd()
|
||||
|
||||
s.helmChartPath = filepath.Join(filepath.Dir(o), "..", "examples", "helm_chart")
|
||||
|
||||
s.expectedFiles = []string{
|
||||
filepath.Join(s.helmChartPath, "templates", "clusterrolebinding.yaml"),
|
||||
filepath.Join(s.helmChartPath, "templates", "clusterrole.yaml"),
|
||||
filepath.Join(s.helmChartPath, "templates", "serviceaccount.yaml"),
|
||||
filepath.Join(s.helmChartPath, "templates", "rolebinding.yaml"),
|
||||
filepath.Join(s.helmChartPath, "templates", "role.yaml"),
|
||||
filepath.Join(s.helmChartPath, "templates", "cronjob.yaml"),
|
||||
}
|
||||
|
||||
var obj interface{}
|
||||
file, _ := ioutil.ReadFile(filepath.Join("testdata", "helm_expected_default_values.json"))
|
||||
_ = json.Unmarshal([]byte(file), &obj)
|
||||
s.expectedDefaultValues = obj.(map[string]interface{})
|
||||
}
|
||||
|
||||
func (s *HelmChartTestSuite) TestInvalidHelmDirectory() {
|
||||
_, err := NewHelmChart("/invalid_path")
|
||||
s.Error(err)
|
||||
}
|
||||
|
||||
func (s *HelmChartTestSuite) TestValidHelmDirectory() {
|
||||
chart, err := NewHelmChart(s.helmChartPath)
|
||||
s.NoError(err)
|
||||
s.NotNil(chart)
|
||||
}
|
||||
|
||||
func (s *HelmChartTestSuite) TestGetName() {
|
||||
chart, _ := NewHelmChart(s.helmChartPath)
|
||||
s.Equal("kubescape", chart.GetName())
|
||||
}
|
||||
|
||||
func (s *HelmChartTestSuite) TestGetDefaultValues() {
|
||||
chart, _ := NewHelmChart(s.helmChartPath)
|
||||
|
||||
values := chart.GetDefaultValues()
|
||||
|
||||
valuesJson, _ := json.Marshal(values)
|
||||
expectedValuesJson, _ := json.Marshal(s.expectedDefaultValues)
|
||||
|
||||
s.JSONEq(string(valuesJson), string(expectedValuesJson))
|
||||
}
|
||||
|
||||
func (s *HelmChartTestSuite) TestGetWorkloadsWithOverride() {
|
||||
chart, err := NewHelmChart(s.helmChartPath)
|
||||
s.NoError(err, "Expected a valid helm chart")
|
||||
|
||||
values := chart.GetDefaultValues()
|
||||
|
||||
// Default pullPolicy value = Always
|
||||
pullPolicyValue := values["image"].(map[string]interface{})["pullPolicy"].(string)
|
||||
s.Equal(pullPolicyValue, "Always")
|
||||
|
||||
// Override default value
|
||||
values["image"].(map[string]interface{})["pullPolicy"] = "Never"
|
||||
|
||||
fileToWorkloads, errs := chart.GetWorkloads(values)
|
||||
s.Len(errs, 0)
|
||||
|
||||
s.Lenf(fileToWorkloads, len(s.expectedFiles), "Expected %d files", len(s.expectedFiles))
|
||||
|
||||
for _, expectedFile := range s.expectedFiles {
|
||||
s.Contains(fileToWorkloads, expectedFile)
|
||||
s.FileExists(expectedFile)
|
||||
s.GreaterOrEqualf(len(fileToWorkloads[expectedFile]), 1, "Expected at least one workload in %q", expectedFile)
|
||||
|
||||
for i := range fileToWorkloads[expectedFile] {
|
||||
pathInWorkload := fileToWorkloads[expectedFile][i].(*localworkload.LocalWorkload).GetPath()
|
||||
s.Equal(pathInWorkload, expectedFile, "Expected GetPath() to return a valid path on workload")
|
||||
}
|
||||
|
||||
if strings.Contains(expectedFile, "cronjob.yaml") {
|
||||
jsonBytes, _ := json.Marshal(fileToWorkloads[expectedFile][0].GetObject())
|
||||
s.Contains(string(jsonBytes), "\"imagePullPolicy\":\"Never\"", "Expected to overriden value of imagePullPolicy to be 'Never'")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *HelmChartTestSuite) TestGetWorkloadsMissingValue() {
|
||||
chart, _ := NewHelmChart(s.helmChartPath)
|
||||
|
||||
values := chart.GetDefaultValues()
|
||||
delete(values, "image")
|
||||
|
||||
fileToWorkloads, errs := chart.GetWorkloads(values)
|
||||
s.Nil(fileToWorkloads)
|
||||
s.Len(errs, 1, "Expected an error due to missing value")
|
||||
|
||||
expectedErrMsg := "<.Values.image.repository>: nil pointer"
|
||||
s.Containsf(errs[0].Error(), expectedErrMsg, "expected error containing %q, got %q", expectedErrMsg, errs[0])
|
||||
}
|
||||
|
||||
func (s *HelmChartTestSuite) TestIsHelmDirectory() {
|
||||
ok, err := IsHelmDirectory(s.helmChartPath)
|
||||
s.True(ok)
|
||||
s.NoError(err)
|
||||
|
||||
o, _ := os.Getwd()
|
||||
nonHelmDir := filepath.Join(filepath.Dir(o), "../examples/online-boutique")
|
||||
ok, err = IsHelmDirectory(nonHelmDir)
|
||||
s.False(ok)
|
||||
s.Contains(err.Error(), "no Chart.yaml exists in directory")
|
||||
}
|
||||
246
core/cautils/localgitrepository.go
Normal file
246
core/cautils/localgitrepository.go
Normal file
@@ -0,0 +1,246 @@
|
||||
package cautils
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"path"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/armosec/go-git-url/apis"
|
||||
gitv5 "github.com/go-git/go-git/v5"
|
||||
configv5 "github.com/go-git/go-git/v5/config"
|
||||
plumbingv5 "github.com/go-git/go-git/v5/plumbing"
|
||||
git2go "github.com/libgit2/git2go/v33"
|
||||
)
|
||||
|
||||
type LocalGitRepository struct {
|
||||
goGitRepo *gitv5.Repository
|
||||
git2GoRepo *git2go.Repository
|
||||
head *plumbingv5.Reference
|
||||
config *configv5.Config
|
||||
fileToLastCommit map[string]*git2go.Commit
|
||||
}
|
||||
|
||||
func NewLocalGitRepository(path string) (*LocalGitRepository, error) {
|
||||
goGitRepo, err := gitv5.PlainOpenWithOptions(path, &gitv5.PlainOpenOptions{DetectDotGit: true})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
head, err := goGitRepo.Head()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if !head.Name().IsBranch() {
|
||||
return nil, fmt.Errorf("current HEAD reference is not a branch")
|
||||
}
|
||||
|
||||
config, err := goGitRepo.Config()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(config.Remotes) == 0 {
|
||||
return nil, fmt.Errorf("no remotes found")
|
||||
}
|
||||
|
||||
l := &LocalGitRepository{
|
||||
goGitRepo: goGitRepo,
|
||||
head: head,
|
||||
config: config,
|
||||
}
|
||||
|
||||
if repoRoot, err := l.GetRootDir(); err == nil {
|
||||
git2GoRepo, err := git2go.OpenRepository(repoRoot)
|
||||
if err != nil {
|
||||
return l, err
|
||||
}
|
||||
l.git2GoRepo = git2GoRepo
|
||||
}
|
||||
|
||||
return l, nil
|
||||
}
|
||||
|
||||
// GetBranchName get current branch name
|
||||
func (g *LocalGitRepository) GetBranchName() string {
|
||||
return g.head.Name().Short()
|
||||
}
|
||||
|
||||
// GetRemoteUrl get default remote URL
|
||||
func (g *LocalGitRepository) GetRemoteUrl() (string, error) {
|
||||
branchName := g.GetBranchName()
|
||||
if branchRef, branchFound := g.config.Branches[branchName]; branchFound {
|
||||
remoteName := branchRef.Remote
|
||||
if len(g.config.Remotes[remoteName].URLs) == 0 {
|
||||
return "", fmt.Errorf("expected to find URLs for remote '%s', branch '%s'", remoteName, branchName)
|
||||
}
|
||||
return g.config.Remotes[remoteName].URLs[0], nil
|
||||
}
|
||||
|
||||
const defaultRemoteName string = "origin"
|
||||
if len(g.config.Remotes[defaultRemoteName].URLs) == 0 {
|
||||
return "", fmt.Errorf("expected to find URLs for remote '%s'", defaultRemoteName)
|
||||
}
|
||||
return g.config.Remotes[defaultRemoteName].URLs[0], nil
|
||||
}
|
||||
|
||||
// GetName get origin name without the .git suffix
|
||||
func (g *LocalGitRepository) GetName() (string, error) {
|
||||
originUrl, err := g.GetRemoteUrl()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
baseName := path.Base(originUrl)
|
||||
// remove .git
|
||||
return strings.TrimSuffix(baseName, ".git"), nil
|
||||
}
|
||||
|
||||
// GetLastCommit get latest commit object
|
||||
func (g *LocalGitRepository) GetLastCommit() (*apis.Commit, error) {
|
||||
cIter, err := g.goGitRepo.Log(&gitv5.LogOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
commit, err := cIter.Next()
|
||||
defer cIter.Close()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &apis.Commit{
|
||||
SHA: commit.Hash.String(),
|
||||
Author: apis.Committer{
|
||||
Name: commit.Author.Name,
|
||||
Email: commit.Author.Email,
|
||||
Date: commit.Author.When,
|
||||
},
|
||||
Message: commit.Message,
|
||||
Committer: apis.Committer{},
|
||||
Files: []apis.Files{},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (g *LocalGitRepository) getAllCommits() ([]*git2go.Commit, error) {
|
||||
logItr, itrErr := g.git2GoRepo.Walk()
|
||||
if itrErr != nil {
|
||||
|
||||
return nil, itrErr
|
||||
}
|
||||
|
||||
pushErr := logItr.PushHead()
|
||||
if pushErr != nil {
|
||||
return nil, pushErr
|
||||
}
|
||||
|
||||
var allCommits []*git2go.Commit
|
||||
err := logItr.Iterate(func(commit *git2go.Commit) bool {
|
||||
if commit != nil {
|
||||
allCommits = append(allCommits, commit)
|
||||
return true
|
||||
}
|
||||
return false
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return allCommits, nil
|
||||
}
|
||||
|
||||
func (g *LocalGitRepository) GetFileLastCommit(filePath string) (*apis.Commit, error) {
|
||||
if len(g.fileToLastCommit) == 0 {
|
||||
filePathToCommitTime := map[string]time.Time{}
|
||||
filePathToCommit := map[string]*git2go.Commit{}
|
||||
allCommits, _ := g.getAllCommits()
|
||||
|
||||
// builds a map of all files to their last commit
|
||||
for _, commit := range allCommits {
|
||||
// Ignore merge commits (2+ parents)
|
||||
if commit.ParentCount() <= 1 {
|
||||
tree, err := commit.Tree()
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// ParentCount can be either 1 or 0 (initial commit)
|
||||
// In case it's the initial commit, prevTree is nil
|
||||
var prevTree *git2go.Tree
|
||||
if commit.ParentCount() == 1 {
|
||||
prevCommit := commit.Parent(0)
|
||||
prevTree, err = prevCommit.Tree()
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
diff, err := g.git2GoRepo.DiffTreeToTree(prevTree, tree, nil)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
numDeltas, err := diff.NumDeltas()
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
for i := 0; i < numDeltas; i++ {
|
||||
delta, err := diff.Delta(i)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
deltaFilePath := delta.NewFile.Path
|
||||
commitTime := commit.Author().When
|
||||
|
||||
// In case we have the commit information for the file which is not the latest - we override it
|
||||
if currentCommitTime, exists := filePathToCommitTime[deltaFilePath]; exists {
|
||||
if currentCommitTime.Before(commitTime) {
|
||||
filePathToCommitTime[deltaFilePath] = commitTime
|
||||
filePathToCommit[deltaFilePath] = commit
|
||||
}
|
||||
} else {
|
||||
filePathToCommitTime[deltaFilePath] = commitTime
|
||||
filePathToCommit[deltaFilePath] = commit
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
g.fileToLastCommit = filePathToCommit
|
||||
}
|
||||
|
||||
if relevantCommit, exists := g.fileToLastCommit[filePath]; exists {
|
||||
return g.getCommit(relevantCommit), nil
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("failed to get commit information for file: %s", filePath)
|
||||
}
|
||||
|
||||
func (g *LocalGitRepository) getCommit(commit *git2go.Commit) *apis.Commit {
|
||||
return &apis.Commit{
|
||||
SHA: commit.Id().String(),
|
||||
Author: apis.Committer{
|
||||
Name: commit.Author().Name,
|
||||
Email: commit.Author().Email,
|
||||
Date: commit.Author().When,
|
||||
},
|
||||
Message: commit.Message(),
|
||||
Committer: apis.Committer{},
|
||||
Files: []apis.Files{},
|
||||
}
|
||||
}
|
||||
|
||||
func (g *LocalGitRepository) GetRootDir() (string, error) {
|
||||
wt, err := g.goGitRepo.Worktree()
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to get repo root")
|
||||
}
|
||||
|
||||
return wt.Filesystem.Root(), nil
|
||||
}
|
||||
175
core/cautils/localgitrepository_test.go
Normal file
175
core/cautils/localgitrepository_test.go
Normal file
@@ -0,0 +1,175 @@
|
||||
package cautils
|
||||
|
||||
import (
|
||||
"archive/zip"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/suite"
|
||||
)
|
||||
|
||||
var TEST_REPOS = [...]string{"localrepo", "withoutremotes"}
|
||||
|
||||
type LocalGitRepositoryTestSuite struct {
|
||||
suite.Suite
|
||||
archives map[string]*zip.ReadCloser
|
||||
gitRepositoryPaths map[string]string
|
||||
destinationPath string
|
||||
}
|
||||
|
||||
func unzipFile(zipPath, destinationFolder string) (*zip.ReadCloser, error) {
|
||||
archive, err := zip.OpenReader(zipPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, f := range archive.File {
|
||||
filePath := filepath.Join(destinationFolder, f.Name)
|
||||
if !strings.HasPrefix(filePath, filepath.Clean(destinationFolder)+string(os.PathSeparator)) {
|
||||
return nil, fmt.Errorf("invalid file path")
|
||||
}
|
||||
if f.FileInfo().IsDir() {
|
||||
os.MkdirAll(filePath, os.ModePerm)
|
||||
continue
|
||||
}
|
||||
|
||||
if err := os.MkdirAll(filepath.Dir(filePath), os.ModePerm); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
dstFile, err := os.OpenFile(filePath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, f.Mode())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
fileInArchive, err := f.Open()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if _, err := io.Copy(dstFile, fileInArchive); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
dstFile.Close()
|
||||
fileInArchive.Close()
|
||||
}
|
||||
|
||||
return archive, err
|
||||
|
||||
}
|
||||
|
||||
func (s *LocalGitRepositoryTestSuite) SetupSuite() {
|
||||
s.archives = make(map[string]*zip.ReadCloser)
|
||||
s.gitRepositoryPaths = make(map[string]string)
|
||||
|
||||
destinationPath := filepath.Join(".", "testdata", "temp")
|
||||
s.destinationPath = destinationPath
|
||||
os.RemoveAll(destinationPath)
|
||||
for _, repo := range TEST_REPOS {
|
||||
zippedFixturePath := filepath.Join(".", "testdata", repo+".git")
|
||||
gitRepositoryPath := filepath.Join(destinationPath, repo)
|
||||
archive, err := unzipFile(zippedFixturePath, destinationPath)
|
||||
|
||||
if err == nil {
|
||||
s.archives[repo] = archive
|
||||
s.gitRepositoryPaths[repo] = gitRepositoryPath
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestLocalGitRepositoryTestSuite(t *testing.T) {
|
||||
suite.Run(t, new(LocalGitRepositoryTestSuite))
|
||||
}
|
||||
|
||||
func (s *LocalGitRepositoryTestSuite) TearDownSuite() {
|
||||
if s.archives != nil {
|
||||
for _, archive := range s.archives {
|
||||
if archive != nil {
|
||||
archive.Close()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
os.RemoveAll(s.destinationPath)
|
||||
}
|
||||
|
||||
func (s *LocalGitRepositoryTestSuite) TestInvalidRepositoryPath() {
|
||||
if _, err := NewLocalGitRepository("/invalidpath"); s.Error(err) {
|
||||
s.Equal("repository does not exist", err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
func (s *LocalGitRepositoryTestSuite) TestRepositoryWithoutRemotes() {
|
||||
if _, err := NewLocalGitRepository(s.gitRepositoryPaths["withoutremotes"]); s.Error(err) {
|
||||
s.Equal("no remotes found", err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
func (s *LocalGitRepositoryTestSuite) TestGetBranchName() {
|
||||
if localRepo, err := NewLocalGitRepository(s.gitRepositoryPaths["localrepo"]); s.NoError(err) {
|
||||
s.Equal("master", localRepo.GetBranchName())
|
||||
}
|
||||
}
|
||||
|
||||
func (s *LocalGitRepositoryTestSuite) TestGetName() {
|
||||
if localRepo, err := NewLocalGitRepository(s.gitRepositoryPaths["localrepo"]); s.NoError(err) {
|
||||
if name, err := localRepo.GetName(); s.NoError(err) {
|
||||
s.Equal("localrepo", name)
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
func (s *LocalGitRepositoryTestSuite) TestGetOriginUrl() {
|
||||
if localRepo, err := NewLocalGitRepository(s.gitRepositoryPaths["localrepo"]); s.NoError(err) {
|
||||
if url, err := localRepo.GetRemoteUrl(); s.NoError(err) {
|
||||
s.Equal("git@github.com:testuser/localrepo", url)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *LocalGitRepositoryTestSuite) TestGetLastCommit() {
|
||||
if localRepo, err := NewLocalGitRepository(s.gitRepositoryPaths["localrepo"]); s.NoError(err) {
|
||||
if commit, err := localRepo.GetLastCommit(); s.NoError(err) {
|
||||
s.Equal("7e09312b8017695fadcd606882e3779f10a5c832", commit.SHA)
|
||||
s.Equal("Amir Malka", commit.Author.Name)
|
||||
s.Equal("amirm@armosec.io", commit.Author.Email)
|
||||
s.Equal("2022-05-22 19:11:57 +0300 +0300", commit.Author.Date.String())
|
||||
s.Equal("added file B\n", commit.Message)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *LocalGitRepositoryTestSuite) TestGetFileLastCommit() {
|
||||
s.Run("fileA", func() {
|
||||
if localRepo, err := NewLocalGitRepository(s.gitRepositoryPaths["localrepo"]); s.NoError(err) {
|
||||
|
||||
if commit, err := localRepo.GetFileLastCommit("fileA"); s.NoError(err) {
|
||||
s.Equal("9fae4be19624297947d2b605cefbff516628612d", commit.SHA)
|
||||
s.Equal("Amir Malka", commit.Author.Name)
|
||||
s.Equal("amirm@armosec.io", commit.Author.Email)
|
||||
s.Equal("2022-05-22 18:55:48 +0300 +0300", commit.Author.Date.String())
|
||||
s.Equal("added file A\n", commit.Message)
|
||||
}
|
||||
|
||||
}
|
||||
})
|
||||
|
||||
s.Run("fileB", func() {
|
||||
if localRepo, err := NewLocalGitRepository(s.gitRepositoryPaths["localrepo"]); s.NoError(err) {
|
||||
|
||||
if commit, err := localRepo.GetFileLastCommit("dirA/fileB"); s.NoError(err) {
|
||||
s.Equal("7e09312b8017695fadcd606882e3779f10a5c832", commit.SHA)
|
||||
s.Equal("Amir Malka", commit.Author.Name)
|
||||
s.Equal("amirm@armosec.io", commit.Author.Email)
|
||||
s.Equal("2022-05-22 19:11:57 +0300 +0300", commit.Author.Date.String())
|
||||
s.Equal("added file B\n", commit.Message)
|
||||
}
|
||||
|
||||
}
|
||||
})
|
||||
}
|
||||
@@ -1,31 +0,0 @@
|
||||
package helpers
|
||||
|
||||
import "time"
|
||||
|
||||
type StringObj struct {
|
||||
key string
|
||||
value string
|
||||
}
|
||||
|
||||
type ErrorObj struct {
|
||||
key string
|
||||
value error
|
||||
}
|
||||
|
||||
type IntObj struct {
|
||||
key string
|
||||
value int
|
||||
}
|
||||
|
||||
type InterfaceObj struct {
|
||||
key string
|
||||
value interface{}
|
||||
}
|
||||
|
||||
func Error(e error) *ErrorObj { return &ErrorObj{key: "error", value: e} }
|
||||
func Int(k string, v int) *IntObj { return &IntObj{key: k, value: v} }
|
||||
func String(k, v string) *StringObj { return &StringObj{key: k, value: v} }
|
||||
func Interface(k string, v interface{}) *InterfaceObj { return &InterfaceObj{key: k, value: v} }
|
||||
func Time() *StringObj {
|
||||
return &StringObj{key: "time", value: time.Now().Format("2006-01-02 15:04:05")}
|
||||
}
|
||||
@@ -1,69 +0,0 @@
|
||||
package helpers
|
||||
|
||||
import (
|
||||
"strings"
|
||||
)
|
||||
|
||||
type Level int8
|
||||
|
||||
const (
|
||||
UnknownLevel Level = iota - -1
|
||||
DebugLevel
|
||||
InfoLevel //default
|
||||
SuccessLevel
|
||||
WarningLevel
|
||||
ErrorLevel
|
||||
FatalLevel
|
||||
|
||||
_defaultLevel = InfoLevel
|
||||
_minLevel = DebugLevel
|
||||
_maxLevel = FatalLevel
|
||||
)
|
||||
|
||||
func ToLevel(level string) Level {
|
||||
switch strings.ToLower(level) {
|
||||
case "debug":
|
||||
return DebugLevel
|
||||
case "info":
|
||||
return InfoLevel
|
||||
case "success":
|
||||
return SuccessLevel
|
||||
case "warning", "warn":
|
||||
return WarningLevel
|
||||
case "error":
|
||||
return ErrorLevel
|
||||
case "fatal":
|
||||
return FatalLevel
|
||||
default:
|
||||
return UnknownLevel
|
||||
}
|
||||
}
|
||||
func (l Level) String() string {
|
||||
switch l {
|
||||
case DebugLevel:
|
||||
return "debug"
|
||||
case InfoLevel:
|
||||
return "info"
|
||||
case SuccessLevel:
|
||||
return "success"
|
||||
case WarningLevel:
|
||||
return "warning"
|
||||
case ErrorLevel:
|
||||
return "error"
|
||||
case FatalLevel:
|
||||
return "fatal"
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (l Level) Skip(l2 Level) bool {
|
||||
return l < l2
|
||||
}
|
||||
|
||||
func SupportedLevels() []string {
|
||||
levels := []string{}
|
||||
for i := _minLevel; i <= _maxLevel; i++ {
|
||||
levels = append(levels, i.String())
|
||||
}
|
||||
return levels
|
||||
}
|
||||
@@ -1,62 +0,0 @@
|
||||
package helpers
|
||||
|
||||
type IDetails interface {
|
||||
Key() string
|
||||
Value() interface{}
|
||||
}
|
||||
|
||||
// ======================================================================================
|
||||
// ============================== String ================================================
|
||||
// ======================================================================================
|
||||
|
||||
// Key
|
||||
func (s *StringObj) Key() string {
|
||||
return s.key
|
||||
}
|
||||
|
||||
// Value
|
||||
func (s *StringObj) Value() interface{} {
|
||||
return s.value
|
||||
}
|
||||
|
||||
// ======================================================================================
|
||||
// =============================== Error ================================================
|
||||
// ======================================================================================
|
||||
|
||||
// Key
|
||||
func (s *ErrorObj) Key() string {
|
||||
return s.key
|
||||
}
|
||||
|
||||
// Value
|
||||
func (s *ErrorObj) Value() interface{} {
|
||||
return s.value
|
||||
}
|
||||
|
||||
// ======================================================================================
|
||||
// ================================= Int ================================================
|
||||
// ======================================================================================
|
||||
|
||||
// Key
|
||||
func (s *IntObj) Key() string {
|
||||
return s.key
|
||||
}
|
||||
|
||||
// Value
|
||||
func (s *IntObj) Value() interface{} {
|
||||
return s.value
|
||||
}
|
||||
|
||||
// ======================================================================================
|
||||
// =========================== Interface ================================================
|
||||
// ======================================================================================
|
||||
|
||||
// Key
|
||||
func (s *InterfaceObj) Key() string {
|
||||
return s.key
|
||||
}
|
||||
|
||||
// Value
|
||||
func (s *InterfaceObj) Value() interface{} {
|
||||
return s.value
|
||||
}
|
||||
@@ -1,81 +0,0 @@
|
||||
package logger
|
||||
|
||||
import (
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger/helpers"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger/nonelogger"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger/prettylogger"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger/zaplogger"
|
||||
)
|
||||
|
||||
type ILogger interface {
|
||||
Fatal(msg string, details ...helpers.IDetails) // print log and exit 1
|
||||
Error(msg string, details ...helpers.IDetails)
|
||||
Success(msg string, details ...helpers.IDetails)
|
||||
Warning(msg string, details ...helpers.IDetails)
|
||||
Info(msg string, details ...helpers.IDetails)
|
||||
Debug(msg string, details ...helpers.IDetails)
|
||||
|
||||
SetLevel(level string) error
|
||||
GetLevel() string
|
||||
|
||||
SetWriter(w *os.File)
|
||||
GetWriter() *os.File
|
||||
|
||||
LoggerName() string
|
||||
}
|
||||
|
||||
var l ILogger
|
||||
|
||||
// Return initialized logger. If logger not initialized, will call InitializeLogger() with the default value
|
||||
func L() ILogger {
|
||||
if l == nil {
|
||||
InitDefaultLogger()
|
||||
}
|
||||
return l
|
||||
}
|
||||
|
||||
/* InitLogger initialize desired logger
|
||||
|
||||
Use:
|
||||
InitLogger("<logger name>")
|
||||
|
||||
Supported logger names (call ListLoggersNames() for listing supported loggers)
|
||||
- "zap": Logger from package "go.uber.org/zap"
|
||||
- "pretty", "colorful": Human friendly colorful logger
|
||||
- "none", "mock", "empty", "ignore": Logger will not print anything
|
||||
|
||||
Default:
|
||||
- "pretty"
|
||||
|
||||
e.g.
|
||||
InitLogger("none") -> will initialize the mock logger
|
||||
|
||||
*/
|
||||
func InitLogger(loggerName string) {
|
||||
|
||||
switch strings.ToLower(loggerName) {
|
||||
case zaplogger.LoggerName:
|
||||
l = zaplogger.NewZapLogger()
|
||||
case prettylogger.LoggerName, "colorful":
|
||||
l = prettylogger.NewPrettyLogger()
|
||||
case nonelogger.LoggerName, "mock", "empty", "ignore":
|
||||
l = nonelogger.NewNoneLogger()
|
||||
default:
|
||||
InitDefaultLogger()
|
||||
}
|
||||
}
|
||||
|
||||
func InitDefaultLogger() {
|
||||
l = prettylogger.NewPrettyLogger()
|
||||
}
|
||||
|
||||
func DisableColor(flag bool) {
|
||||
prettylogger.DisableColor(flag)
|
||||
}
|
||||
|
||||
func ListLoggersNames() []string {
|
||||
return []string{prettylogger.LoggerName, zaplogger.LoggerName, nonelogger.LoggerName}
|
||||
}
|
||||
@@ -1,28 +0,0 @@
|
||||
package nonelogger
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger/helpers"
|
||||
)
|
||||
|
||||
const LoggerName string = "none"
|
||||
|
||||
type NoneLogger struct {
|
||||
}
|
||||
|
||||
func NewNoneLogger() *NoneLogger {
|
||||
return &NoneLogger{}
|
||||
}
|
||||
|
||||
func (nl *NoneLogger) GetLevel() string { return "" }
|
||||
func (nl *NoneLogger) LoggerName() string { return LoggerName }
|
||||
func (nl *NoneLogger) SetWriter(w *os.File) {}
|
||||
func (nl *NoneLogger) GetWriter() *os.File { return nil }
|
||||
func (nl *NoneLogger) SetLevel(level string) error { return nil }
|
||||
func (nl *NoneLogger) Fatal(msg string, details ...helpers.IDetails) {}
|
||||
func (nl *NoneLogger) Error(msg string, details ...helpers.IDetails) {}
|
||||
func (nl *NoneLogger) Warning(msg string, details ...helpers.IDetails) {}
|
||||
func (nl *NoneLogger) Success(msg string, details ...helpers.IDetails) {}
|
||||
func (nl *NoneLogger) Info(msg string, details ...helpers.IDetails) {}
|
||||
func (nl *NoneLogger) Debug(msg string, details ...helpers.IDetails) {}
|
||||
@@ -1,37 +0,0 @@
|
||||
package prettylogger
|
||||
|
||||
import (
|
||||
"io"
|
||||
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger/helpers"
|
||||
"github.com/fatih/color"
|
||||
)
|
||||
|
||||
var prefixError = color.New(color.Bold, color.FgHiRed).FprintfFunc()
|
||||
var prefixWarning = color.New(color.Bold, color.FgHiYellow).FprintfFunc()
|
||||
var prefixInfo = color.New(color.Bold, color.FgCyan).FprintfFunc()
|
||||
var prefixSuccess = color.New(color.Bold, color.FgHiGreen).FprintfFunc()
|
||||
var prefixDebug = color.New(color.Bold, color.FgWhite).FprintfFunc()
|
||||
var message = color.New().FprintfFunc()
|
||||
|
||||
func prefix(l helpers.Level) func(w io.Writer, format string, a ...interface{}) {
|
||||
switch l {
|
||||
case helpers.DebugLevel:
|
||||
return prefixDebug
|
||||
case helpers.InfoLevel:
|
||||
return prefixInfo
|
||||
case helpers.SuccessLevel:
|
||||
return prefixSuccess
|
||||
case helpers.WarningLevel:
|
||||
return prefixWarning
|
||||
case helpers.ErrorLevel, helpers.FatalLevel:
|
||||
return prefixError
|
||||
}
|
||||
return message
|
||||
}
|
||||
|
||||
func DisableColor(flag bool) {
|
||||
if flag {
|
||||
color.NoColor = true
|
||||
}
|
||||
}
|
||||
@@ -1,82 +0,0 @@
|
||||
package prettylogger
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"sync"
|
||||
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger/helpers"
|
||||
)
|
||||
|
||||
const LoggerName string = "pretty"
|
||||
|
||||
type PrettyLogger struct {
|
||||
writer *os.File
|
||||
level helpers.Level
|
||||
mutex sync.Mutex
|
||||
}
|
||||
|
||||
func NewPrettyLogger() *PrettyLogger {
|
||||
|
||||
return &PrettyLogger{
|
||||
writer: os.Stderr, // default to stderr
|
||||
level: helpers.InfoLevel,
|
||||
mutex: sync.Mutex{},
|
||||
}
|
||||
}
|
||||
|
||||
func (pl *PrettyLogger) GetLevel() string { return pl.level.String() }
|
||||
func (pl *PrettyLogger) SetWriter(w *os.File) { pl.writer = w }
|
||||
func (pl *PrettyLogger) GetWriter() *os.File { return pl.writer }
|
||||
func (pl *PrettyLogger) LoggerName() string { return LoggerName }
|
||||
|
||||
func (pl *PrettyLogger) SetLevel(level string) error {
|
||||
pl.level = helpers.ToLevel(level)
|
||||
if pl.level == helpers.UnknownLevel {
|
||||
return fmt.Errorf("level '%s' unknown", level)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func (pl *PrettyLogger) Fatal(msg string, details ...helpers.IDetails) {
|
||||
pl.print(helpers.FatalLevel, msg, details...)
|
||||
os.Exit(1)
|
||||
}
|
||||
func (pl *PrettyLogger) Error(msg string, details ...helpers.IDetails) {
|
||||
pl.print(helpers.ErrorLevel, msg, details...)
|
||||
}
|
||||
func (pl *PrettyLogger) Warning(msg string, details ...helpers.IDetails) {
|
||||
pl.print(helpers.WarningLevel, msg, details...)
|
||||
}
|
||||
func (pl *PrettyLogger) Info(msg string, details ...helpers.IDetails) {
|
||||
pl.print(helpers.InfoLevel, msg, details...)
|
||||
}
|
||||
func (pl *PrettyLogger) Debug(msg string, details ...helpers.IDetails) {
|
||||
pl.print(helpers.DebugLevel, msg, details...)
|
||||
}
|
||||
func (pl *PrettyLogger) Success(msg string, details ...helpers.IDetails) {
|
||||
pl.print(helpers.SuccessLevel, msg, details...)
|
||||
}
|
||||
|
||||
func (pl *PrettyLogger) print(level helpers.Level, msg string, details ...helpers.IDetails) {
|
||||
if !level.Skip(pl.level) {
|
||||
pl.mutex.Lock()
|
||||
prefix(level)(pl.writer, "[%s] ", level.String())
|
||||
if d := detailsToString(details); d != "" {
|
||||
msg = fmt.Sprintf("%s. %s", msg, d)
|
||||
}
|
||||
message(pl.writer, fmt.Sprintf("%s\n", msg))
|
||||
pl.mutex.Unlock()
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func detailsToString(details []helpers.IDetails) string {
|
||||
s := ""
|
||||
for i := range details {
|
||||
s += fmt.Sprintf("%s: %v", details[i].Key(), details[i].Value())
|
||||
if i < len(details)-1 {
|
||||
s += "; "
|
||||
}
|
||||
}
|
||||
return s
|
||||
}
|
||||
@@ -1,79 +0,0 @@
|
||||
package zaplogger
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger/helpers"
|
||||
"go.uber.org/zap"
|
||||
"go.uber.org/zap/zapcore"
|
||||
)
|
||||
|
||||
const LoggerName string = "zap"
|
||||
|
||||
type ZapLogger struct {
|
||||
zapL *zap.Logger
|
||||
cfg zap.Config
|
||||
}
|
||||
|
||||
func NewZapLogger() *ZapLogger {
|
||||
ec := zap.NewProductionEncoderConfig()
|
||||
ec.EncodeTime = zapcore.RFC3339TimeEncoder
|
||||
cfg := zap.NewProductionConfig()
|
||||
cfg.DisableCaller = true
|
||||
cfg.DisableStacktrace = true
|
||||
cfg.Encoding = "json"
|
||||
cfg.EncoderConfig = ec
|
||||
|
||||
zapLogger, err := cfg.Build()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return &ZapLogger{
|
||||
zapL: zapLogger,
|
||||
cfg: cfg,
|
||||
}
|
||||
}
|
||||
|
||||
func (zl *ZapLogger) GetLevel() string { return zl.cfg.Level.Level().String() }
|
||||
func (zl *ZapLogger) SetWriter(w *os.File) {}
|
||||
func (zl *ZapLogger) GetWriter() *os.File { return nil }
|
||||
func (zl *ZapLogger) LoggerName() string { return LoggerName }
|
||||
func (zl *ZapLogger) SetLevel(level string) error {
|
||||
l := zapcore.Level(1)
|
||||
err := l.Set(level)
|
||||
if err == nil {
|
||||
zl.cfg.Level.SetLevel(l)
|
||||
}
|
||||
return err
|
||||
}
|
||||
func (zl *ZapLogger) Fatal(msg string, details ...helpers.IDetails) {
|
||||
zl.zapL.Fatal(msg, detailsToZapFields(details)...)
|
||||
}
|
||||
|
||||
func (zl *ZapLogger) Error(msg string, details ...helpers.IDetails) {
|
||||
zl.zapL.Error(msg, detailsToZapFields(details)...)
|
||||
}
|
||||
|
||||
func (zl *ZapLogger) Warning(msg string, details ...helpers.IDetails) {
|
||||
zl.zapL.Warn(msg, detailsToZapFields(details)...)
|
||||
}
|
||||
|
||||
func (zl *ZapLogger) Success(msg string, details ...helpers.IDetails) {
|
||||
zl.zapL.Info(msg, detailsToZapFields(details)...)
|
||||
}
|
||||
|
||||
func (zl *ZapLogger) Info(msg string, details ...helpers.IDetails) {
|
||||
zl.zapL.Info(msg, detailsToZapFields(details)...)
|
||||
}
|
||||
|
||||
func (zl *ZapLogger) Debug(msg string, details ...helpers.IDetails) {
|
||||
zl.zapL.Debug(msg, detailsToZapFields(details)...)
|
||||
}
|
||||
|
||||
func detailsToZapFields(details []helpers.IDetails) []zapcore.Field {
|
||||
zapFields := []zapcore.Field{}
|
||||
for i := range details {
|
||||
zapFields = append(zapFields, zap.Any(details[i].Key(), details[i].Value()))
|
||||
}
|
||||
return zapFields
|
||||
}
|
||||
@@ -4,11 +4,12 @@ import (
|
||||
"encoding/json"
|
||||
"time"
|
||||
|
||||
"github.com/armosec/k8s-interface/workloadinterface"
|
||||
"github.com/armosec/opa-utils/reporthandling"
|
||||
"github.com/armosec/rbac-utils/rbacscanner"
|
||||
"github.com/armosec/rbac-utils/rbacutils"
|
||||
reporthandlingv2 "github.com/kubescape/opa-utils/reporthandling/v2"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/kubescape/k8s-interface/workloadinterface"
|
||||
"github.com/kubescape/rbac-utils/rbacscanner"
|
||||
"github.com/kubescape/rbac-utils/rbacutils"
|
||||
)
|
||||
|
||||
type RBACObjects struct {
|
||||
@@ -19,12 +20,19 @@ func NewRBACObjects(scanner *rbacscanner.RbacScannerFromK8sAPI) *RBACObjects {
|
||||
return &RBACObjects{scanner: scanner}
|
||||
}
|
||||
|
||||
func (rbacObjects *RBACObjects) SetResourcesReport() (*reporthandling.PostureReport, error) {
|
||||
return &reporthandling.PostureReport{
|
||||
func (rbacObjects *RBACObjects) SetResourcesReport() (*reporthandlingv2.PostureReport, error) {
|
||||
return &reporthandlingv2.PostureReport{
|
||||
ReportID: uuid.NewString(),
|
||||
ReportGenerationTime: time.Now().UTC(),
|
||||
CustomerGUID: rbacObjects.scanner.CustomerGUID,
|
||||
ClusterName: rbacObjects.scanner.ClusterName,
|
||||
Metadata: reporthandlingv2.Metadata{
|
||||
ContextMetadata: reporthandlingv2.ContextMetadata{
|
||||
ClusterContextMetadata: &reporthandlingv2.ClusterMetadata{
|
||||
ContextName: rbacObjects.scanner.ClusterName,
|
||||
},
|
||||
},
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -46,26 +54,30 @@ func (rbacObjects *RBACObjects) rbacObjectsToResources(resources *rbacutils.Rbac
|
||||
/*
|
||||
************************************************************************************************************************
|
||||
This code is adding a non valid ID ->
|
||||
(github.com/armosec/rbac-utils v0.0.11): "//SA2WLIDmap/SA2WLIDmap"
|
||||
(github.com/armosec/rbac-utils v0.0.12): "armo.rbac.com/v0beta1//SAID2WLIDmap/SAID2WLIDmap"
|
||||
(github.com/kubescape/opa-utils v0.0.11): "//SA2WLIDmap/SA2WLIDmap"
|
||||
(github.com/kubescape/opa-utils v0.0.12): "armo.rbac.com/v0beta1//SAID2WLIDmap/SAID2WLIDmap"
|
||||
|
||||
Should be investigated
|
||||
************************************************************************************************************************
|
||||
*/
|
||||
|
||||
// wrap rbac aggregated objects in IMetadata and add to allresources
|
||||
// wrap rbac aggregated objects in IMetadata and add to AllResources
|
||||
// TODO - DEPRECATE SA2WLIDmap
|
||||
SA2WLIDmapIMeta, err := rbacutils.SA2WLIDmapIMetadataWrapper(resources.SA2WLIDmap)
|
||||
m, err := rbacutils.SA2WLIDmapIMetadataWrapper(resources.SA2WLIDmap)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
allresources[SA2WLIDmapIMeta.GetID()] = SA2WLIDmapIMeta
|
||||
|
||||
SAID2WLIDmapIMeta, err := rbacutils.SAID2WLIDmapIMetadataWrapper(resources.SAID2WLIDmap)
|
||||
sa2WLIDmapIMeta := workloadinterface.NewBaseObject(m)
|
||||
allresources[sa2WLIDmapIMeta.GetID()] = sa2WLIDmapIMeta
|
||||
|
||||
m2, err := rbacutils.SAID2WLIDmapIMetadataWrapper(resources.SAID2WLIDmap)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
allresources[SAID2WLIDmapIMeta.GetID()] = SAID2WLIDmapIMeta
|
||||
|
||||
saID2WLIDmapIMeta := workloadinterface.NewBaseObject(m2)
|
||||
allresources[saID2WLIDmapIMeta.GetID()] = saID2WLIDmapIMeta
|
||||
|
||||
// convert rbac k8s resources to IMetadata and add to allresources
|
||||
for _, cr := range resources.ClusterRoles.Items {
|
||||
|
||||
@@ -1,17 +1,14 @@
|
||||
package cautils
|
||||
|
||||
import (
|
||||
"github.com/armosec/k8s-interface/workloadinterface"
|
||||
"github.com/armosec/opa-utils/reporthandling"
|
||||
helpersv1 "github.com/armosec/opa-utils/reporthandling/helpers/v1"
|
||||
"github.com/armosec/opa-utils/reporthandling/results/v1/reportsummary"
|
||||
"github.com/kubescape/k8s-interface/workloadinterface"
|
||||
"github.com/kubescape/opa-utils/reporthandling"
|
||||
helpersv1 "github.com/kubescape/opa-utils/reporthandling/helpers/v1"
|
||||
"github.com/kubescape/opa-utils/reporthandling/results/v1/reportsummary"
|
||||
)
|
||||
|
||||
func ReportV2ToV1(opaSessionObj *OPASessionObj) {
|
||||
if len(opaSessionObj.PostureReport.FrameworkReports) > 0 {
|
||||
return // report already converted
|
||||
}
|
||||
// opaSessionObj.PostureReport.ClusterCloudProvider = opaSessionObj.Report.ClusterCloudProvider
|
||||
func ReportV2ToV1(opaSessionObj *OPASessionObj) *reporthandling.PostureReport {
|
||||
report := &reporthandling.PostureReport{}
|
||||
|
||||
frameworks := []reporthandling.FrameworkReport{}
|
||||
|
||||
@@ -33,27 +30,14 @@ func ReportV2ToV1(opaSessionObj *OPASessionObj) {
|
||||
fwv1.Score = opaSessionObj.Report.SummaryDetails.Score
|
||||
}
|
||||
|
||||
// // remove unused data
|
||||
// opaSessionObj.Report = nil
|
||||
// opaSessionObj.ResourcesResult = nil
|
||||
|
||||
// setup counters and score
|
||||
for f := range frameworks {
|
||||
// // set exceptions
|
||||
// exceptions.SetFrameworkExceptions(frameworks, opap.Exceptions, cautils.ClusterName)
|
||||
|
||||
// set counters
|
||||
reporthandling.SetUniqueResourcesCounter(&frameworks[f])
|
||||
|
||||
// set default score
|
||||
// reporthandling.SetDefaultScore(&frameworks[f])
|
||||
}
|
||||
|
||||
// // update score
|
||||
// scoreutil := score.NewScore(opaSessionObj.AllResources)
|
||||
// scoreutil.Calculate(frameworks)
|
||||
|
||||
opaSessionObj.PostureReport.FrameworkReports = frameworks
|
||||
report.FrameworkReports = frameworks
|
||||
return report
|
||||
}
|
||||
|
||||
func controlReportV2ToV1(opaSessionObj *OPASessionObj, frameworkName string, controls map[string]reportsummary.ControlSummary) []reporthandling.ControlReport {
|
||||
@@ -65,7 +49,6 @@ func controlReportV2ToV1(opaSessionObj *OPASessionObj, frameworkName string, con
|
||||
crv1.Name = crv2.GetName()
|
||||
crv1.Score = crv2.GetScore()
|
||||
crv1.Control_ID = controlID
|
||||
// crv1.Attributes = crv2.
|
||||
|
||||
// TODO - add fields
|
||||
crv1.Description = crv2.Description
|
||||
|
||||
@@ -6,84 +6,13 @@ type RootInfo struct {
|
||||
CacheDir string // cached dir
|
||||
DisableColor bool // Disable Color
|
||||
|
||||
ArmoBEURLs string // armo url
|
||||
ArmoBEURLsDep string // armo url
|
||||
KSCloudBEURLs string // Kubescape Cloud URL
|
||||
KSCloudBEURLsDep string // Kubescape Cloud URL
|
||||
|
||||
}
|
||||
|
||||
// func (rootInfo *RootInfo) InitLogger() {
|
||||
// logger.DisableColor(rootInfo.DisableColor)
|
||||
|
||||
// if rootInfo.LoggerName == "" {
|
||||
// if l := os.Getenv("KS_LOGGER_NAME"); l != "" {
|
||||
// rootInfo.LoggerName = l
|
||||
// } else {
|
||||
// if isatty.IsTerminal(os.Stdout.Fd()) {
|
||||
// rootInfo.LoggerName = "pretty"
|
||||
// } else {
|
||||
// rootInfo.LoggerName = "zap"
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
|
||||
// logger.InitLogger(rootInfo.LoggerName)
|
||||
|
||||
// }
|
||||
// func (rootInfo *RootInfo) InitLoggerLevel() error {
|
||||
// if rootInfo.Logger == helpers.InfoLevel.String() {
|
||||
// } else if l := os.Getenv("KS_LOGGER"); l != "" {
|
||||
// rootInfo.Logger = l
|
||||
// }
|
||||
|
||||
// if err := logger.L().SetLevel(rootInfo.Logger); err != nil {
|
||||
// return fmt.Errorf("supported levels: %s", strings.Join(helpers.SupportedLevels(), "/"))
|
||||
// }
|
||||
// return nil
|
||||
// }
|
||||
|
||||
// func (rootInfo *RootInfo) InitCacheDir() error {
|
||||
// if rootInfo.CacheDir == getter.DefaultLocalStore {
|
||||
// getter.DefaultLocalStore = rootInfo.CacheDir
|
||||
// } else if cacheDir := os.Getenv("KS_CACHE_DIR"); cacheDir != "" {
|
||||
// getter.DefaultLocalStore = cacheDir
|
||||
// } else {
|
||||
// return nil // using default cache dir location
|
||||
// }
|
||||
|
||||
// // TODO create dir if not found exist
|
||||
// // logger.L().Debug("cache dir updated", helpers.String("path", getter.DefaultLocalStore))
|
||||
// return nil
|
||||
// }
|
||||
// func (rootInfo *RootInfo) InitEnvironment() error {
|
||||
|
||||
// urlSlices := strings.Split(rootInfo.ArmoBEURLs, ",")
|
||||
// if len(urlSlices) != 1 && len(urlSlices) < 3 {
|
||||
// return fmt.Errorf("expected at least 2 URLs (report,api,frontend,auth)")
|
||||
// }
|
||||
// switch len(urlSlices) {
|
||||
// case 1:
|
||||
// switch urlSlices[0] {
|
||||
// case "dev", "development":
|
||||
// getter.SetARMOAPIConnector(getter.NewARMOAPIDev())
|
||||
// case "stage", "staging":
|
||||
// getter.SetARMOAPIConnector(getter.NewARMOAPIStaging())
|
||||
// case "":
|
||||
// getter.SetARMOAPIConnector(getter.NewARMOAPIProd())
|
||||
// default:
|
||||
// return fmt.Errorf("unknown environment")
|
||||
// }
|
||||
// case 2:
|
||||
// armoERURL := urlSlices[0] // mandatory
|
||||
// armoBEURL := urlSlices[1] // mandatory
|
||||
// getter.SetARMOAPIConnector(getter.NewARMOAPICustomized(armoERURL, armoBEURL, "", ""))
|
||||
// case 3, 4:
|
||||
// var armoAUTHURL string
|
||||
// armoERURL := urlSlices[0] // mandatory
|
||||
// armoBEURL := urlSlices[1] // mandatory
|
||||
// armoFEURL := urlSlices[2] // mandatory
|
||||
// if len(urlSlices) <= 4 {
|
||||
// armoAUTHURL = urlSlices[3]
|
||||
// }
|
||||
// getter.SetARMOAPIConnector(getter.NewARMOAPICustomized(armoERURL, armoBEURL, armoFEURL, armoAUTHURL))
|
||||
// }
|
||||
// return nil
|
||||
// }
|
||||
type Credentials struct {
|
||||
Account string
|
||||
ClientID string
|
||||
SecretKey string
|
||||
}
|
||||
|
||||
@@ -8,19 +8,37 @@ import (
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/armosec/armoapi-go/armotypes"
|
||||
apisv1 "github.com/kubescape/opa-utils/httpserver/apis/v1"
|
||||
|
||||
giturl "github.com/armosec/go-git-url"
|
||||
"github.com/armosec/k8s-interface/k8sinterface"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/getter"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger/helpers"
|
||||
"github.com/armosec/opa-utils/reporthandling"
|
||||
reporthandlingv2 "github.com/armosec/opa-utils/reporthandling/v2"
|
||||
logger "github.com/kubescape/go-logger"
|
||||
"github.com/kubescape/go-logger/helpers"
|
||||
"github.com/kubescape/k8s-interface/k8sinterface"
|
||||
"github.com/kubescape/opa-utils/reporthandling"
|
||||
reporthandlingv2 "github.com/kubescape/opa-utils/reporthandling/v2"
|
||||
|
||||
"github.com/google/uuid"
|
||||
)
|
||||
|
||||
type ScanningContext string
|
||||
|
||||
const (
|
||||
ScanCluster string = "cluster"
|
||||
ScanLocalFiles string = "yaml"
|
||||
ContextCluster ScanningContext = "cluster"
|
||||
ContextFile ScanningContext = "single-file"
|
||||
ContextDir ScanningContext = "local-dir"
|
||||
ContextGitURL ScanningContext = "git-url"
|
||||
ContextGitLocal ScanningContext = "git-local"
|
||||
)
|
||||
|
||||
const ( // deprecated
|
||||
ScopeCluster = "cluster"
|
||||
ScopeYAML = "yaml"
|
||||
)
|
||||
const (
|
||||
// ScanCluster string = "cluster"
|
||||
// ScanLocalFiles string = "yaml"
|
||||
localControlInputsFilename string = "controls-inputs.json"
|
||||
localExceptionsFilename string = "exceptions.json"
|
||||
)
|
||||
@@ -29,6 +47,10 @@ type BoolPtrFlag struct {
|
||||
valPtr *bool
|
||||
}
|
||||
|
||||
func NewBoolPtr(b *bool) BoolPtrFlag {
|
||||
return BoolPtrFlag{valPtr: b}
|
||||
}
|
||||
|
||||
func (bpf *BoolPtrFlag) Type() string {
|
||||
return "bool"
|
||||
}
|
||||
@@ -64,32 +86,46 @@ func (bpf *BoolPtrFlag) Set(val string) error {
|
||||
}
|
||||
|
||||
// TODO - UPDATE
|
||||
type ViewTypes string
|
||||
|
||||
const (
|
||||
ResourceViewType ViewTypes = "resource"
|
||||
ControlViewType ViewTypes = "control"
|
||||
)
|
||||
|
||||
type PolicyIdentifier struct {
|
||||
Name string // policy name e.g. nsa,mitre,c-0012
|
||||
Kind apisv1.NotificationPolicyKind // policy kind e.g. Framework,Control,Rule
|
||||
Designators armotypes.PortalDesignator
|
||||
}
|
||||
|
||||
type ScanInfo struct {
|
||||
Getters // TODO - remove from object
|
||||
PolicyIdentifier []reporthandling.PolicyIdentifier // TODO - remove from object
|
||||
UseExceptions string // Load file with exceptions configuration
|
||||
ControlsInputs string // Load file with inputs for controls
|
||||
UseFrom []string // Load framework from local file (instead of download). Use when running offline
|
||||
UseDefault bool // Load framework from cached file (instead of download). Use when running offline
|
||||
UseArtifactsFrom string // Load artifacts from local path. Use when running offline
|
||||
VerboseMode bool // Display all of the input resources and not only failed resources
|
||||
Format string // Format results (table, json, junit ...)
|
||||
Output string // Store results in an output file, Output file name
|
||||
FormatVersion string // Output object can be differnet between versions, this is for testing and backward compatibility
|
||||
ExcludedNamespaces string // used for host scanner namespace
|
||||
IncludeNamespaces string // DEPRECATED?
|
||||
InputPatterns []string // Yaml files input patterns
|
||||
Silent bool // Silent mode - Do not print progress logs
|
||||
FailThreshold float32 // Failure score threshold
|
||||
Submit bool // Submit results to Armo BE
|
||||
ScanID string // Report id of the current scan
|
||||
HostSensorEnabled BoolPtrFlag // Deploy ARMO K8s host scanner to collect data from certain controls
|
||||
HostSensorYamlPath string // Path to hostsensor file
|
||||
Local bool // Do not submit results
|
||||
Account string // account ID
|
||||
KubeContext string // context name
|
||||
FrameworkScan bool // false if scanning control
|
||||
ScanAll bool // true if scan all frameworks
|
||||
Getters // TODO - remove from object
|
||||
PolicyIdentifier []PolicyIdentifier // TODO - remove from object
|
||||
UseExceptions string // Load file with exceptions configuration
|
||||
ControlsInputs string // Load file with inputs for controls
|
||||
UseFrom []string // Load framework from local file (instead of download). Use when running offline
|
||||
UseDefault bool // Load framework from cached file (instead of download). Use when running offline
|
||||
UseArtifactsFrom string // Load artifacts from local path. Use when running offline
|
||||
VerboseMode bool // Display all of the input resources and not only failed resources
|
||||
View string // Display all of the input resources and not only failed resources
|
||||
Format string // Format results (table, json, junit ...)
|
||||
Output string // Store results in an output file, Output file name
|
||||
FormatVersion string // Output object can be differnet between versions, this is for testing and backward compatibility
|
||||
ExcludedNamespaces string // used for host scanner namespace
|
||||
IncludeNamespaces string //
|
||||
InputPatterns []string // Yaml files input patterns
|
||||
Silent bool // Silent mode - Do not print progress logs
|
||||
FailThreshold float32 // Failure score threshold
|
||||
Submit bool // Submit results to Kubescape Cloud BE
|
||||
ScanID string // Report id of the current scan
|
||||
HostSensorEnabled BoolPtrFlag // Deploy Kubescape K8s host scanner to collect data from certain controls
|
||||
HostSensorYamlPath string // Path to hostsensor file
|
||||
Local bool // Do not submit results
|
||||
Credentials Credentials // account ID
|
||||
KubeContext string // context name
|
||||
FrameworkScan bool // false if scanning control
|
||||
ScanAll bool // true if scan all frameworks
|
||||
}
|
||||
|
||||
type Getters struct {
|
||||
@@ -169,18 +205,18 @@ func (scanInfo *ScanInfo) setOutputFile() {
|
||||
}
|
||||
}
|
||||
|
||||
func (scanInfo *ScanInfo) GetScanningEnvironment() string {
|
||||
if len(scanInfo.InputPatterns) != 0 {
|
||||
return ScanLocalFiles
|
||||
}
|
||||
return ScanCluster
|
||||
}
|
||||
// func (scanInfo *ScanInfo) GetScanningEnvironment() string {
|
||||
// if len(scanInfo.InputPatterns) != 0 {
|
||||
// return ScanLocalFiles
|
||||
// }
|
||||
// return ScanCluster
|
||||
// }
|
||||
|
||||
func (scanInfo *ScanInfo) SetPolicyIdentifiers(policies []string, kind reporthandling.NotificationPolicyKind) {
|
||||
func (scanInfo *ScanInfo) SetPolicyIdentifiers(policies []string, kind apisv1.NotificationPolicyKind) {
|
||||
for _, policy := range policies {
|
||||
if !scanInfo.contains(policy) {
|
||||
newPolicy := reporthandling.PolicyIdentifier{}
|
||||
newPolicy.Kind = kind // reporthandling.KindFramework
|
||||
newPolicy := PolicyIdentifier{}
|
||||
newPolicy.Kind = kind
|
||||
newPolicy.Name = policy
|
||||
scanInfo.PolicyIdentifier = append(scanInfo.PolicyIdentifier, newPolicy)
|
||||
}
|
||||
@@ -220,77 +256,196 @@ func scanInfoToScanMetadata(scanInfo *ScanInfo) *reporthandlingv2.Metadata {
|
||||
metadata.ScanMetadata.TargetNames = append(metadata.ScanMetadata.TargetNames, policy.Name)
|
||||
}
|
||||
|
||||
metadata.ScanMetadata.KubescapeVersion = BuildNumber
|
||||
metadata.ScanMetadata.VerboseMode = scanInfo.VerboseMode
|
||||
metadata.ScanMetadata.FailThreshold = scanInfo.FailThreshold
|
||||
metadata.ScanMetadata.HostScanner = scanInfo.HostSensorEnabled.GetBool()
|
||||
metadata.ScanMetadata.VerboseMode = scanInfo.VerboseMode
|
||||
metadata.ScanMetadata.ControlsInputs = scanInfo.ControlsInputs
|
||||
|
||||
metadata.ScanMetadata.ScanningTarget = reporthandlingv2.Cluster
|
||||
if scanInfo.GetScanningEnvironment() == ScanLocalFiles {
|
||||
metadata.ScanMetadata.ScanningTarget = reporthandlingv2.File
|
||||
}
|
||||
|
||||
inputFiles := ""
|
||||
if len(scanInfo.InputPatterns) > 0 {
|
||||
inputFiles = scanInfo.InputPatterns[0]
|
||||
}
|
||||
switch GetScanningContext(inputFiles) {
|
||||
case ContextCluster:
|
||||
// cluster
|
||||
metadata.ScanMetadata.ScanningTarget = reporthandlingv2.Cluster
|
||||
case ContextFile:
|
||||
// local file
|
||||
metadata.ScanMetadata.ScanningTarget = reporthandlingv2.File
|
||||
case ContextGitURL:
|
||||
// url
|
||||
metadata.ScanMetadata.ScanningTarget = reporthandlingv2.Repo
|
||||
case ContextGitLocal:
|
||||
// local-git
|
||||
metadata.ScanMetadata.ScanningTarget = reporthandlingv2.GitLocal
|
||||
case ContextDir:
|
||||
// directory
|
||||
metadata.ScanMetadata.ScanningTarget = reporthandlingv2.Directory
|
||||
|
||||
}
|
||||
|
||||
setContextMetadata(&metadata.ContextMetadata, inputFiles)
|
||||
|
||||
return metadata
|
||||
}
|
||||
|
||||
func setContextMetadata(contextMetadata *reporthandlingv2.ContextMetadata, input string) {
|
||||
func (scanInfo *ScanInfo) GetScanningContext() ScanningContext {
|
||||
input := ""
|
||||
if len(scanInfo.InputPatterns) > 0 {
|
||||
input = scanInfo.InputPatterns[0]
|
||||
}
|
||||
return GetScanningContext(input)
|
||||
}
|
||||
|
||||
// GetScanningContext get scanning context from the input param
|
||||
func GetScanningContext(input string) ScanningContext {
|
||||
// cluster
|
||||
if input == "" {
|
||||
contextMetadata.ClusterContextMetadata = &reporthandlingv2.ClusterMetadata{
|
||||
ContextName: k8sinterface.GetContextName(),
|
||||
}
|
||||
return
|
||||
return ContextCluster
|
||||
}
|
||||
|
||||
// url
|
||||
if gitParser, err := giturl.NewGitURL(input); err == nil {
|
||||
if gitParser.GetBranch() == "" {
|
||||
gitParser.SetDefaultBranch()
|
||||
}
|
||||
contextMetadata.RepoContextMetadata = &reporthandlingv2.RepoContextMetadata{
|
||||
Repo: gitParser.GetRepo(),
|
||||
Owner: gitParser.GetOwner(),
|
||||
Branch: gitParser.GetBranch(),
|
||||
}
|
||||
return
|
||||
if _, err := giturl.NewGitURL(input); err == nil {
|
||||
return ContextGitURL
|
||||
}
|
||||
|
||||
if !filepath.IsAbs(input) {
|
||||
if !filepath.IsAbs(input) { // parse path
|
||||
if o, err := os.Getwd(); err == nil {
|
||||
input = filepath.Join(o, input)
|
||||
}
|
||||
}
|
||||
|
||||
// local git repo
|
||||
if _, err := NewLocalGitRepository(input); err == nil {
|
||||
return ContextGitLocal
|
||||
}
|
||||
|
||||
// single file
|
||||
if IsFile(input) {
|
||||
contextMetadata.FileContextMetadata = &reporthandlingv2.FileContextMetadata{
|
||||
FilePath: input,
|
||||
HostName: getHostname(),
|
||||
}
|
||||
return
|
||||
return ContextFile
|
||||
}
|
||||
|
||||
// dir/glob
|
||||
if !IsFile(input) {
|
||||
return ContextDir
|
||||
}
|
||||
func setContextMetadata(contextMetadata *reporthandlingv2.ContextMetadata, input string) {
|
||||
switch GetScanningContext(input) {
|
||||
case ContextCluster:
|
||||
contextMetadata.ClusterContextMetadata = &reporthandlingv2.ClusterMetadata{
|
||||
ContextName: k8sinterface.GetContextName(),
|
||||
}
|
||||
case ContextGitURL:
|
||||
// url
|
||||
context, err := metadataGitURL(input)
|
||||
if err != nil {
|
||||
logger.L().Warning("in setContextMetadata", helpers.Interface("case", ContextGitURL), helpers.Error(err))
|
||||
}
|
||||
contextMetadata.RepoContextMetadata = context
|
||||
case ContextDir:
|
||||
contextMetadata.DirectoryContextMetadata = &reporthandlingv2.DirectoryContextMetadata{
|
||||
BasePath: input,
|
||||
BasePath: getAbsPath(input),
|
||||
HostName: getHostname(),
|
||||
}
|
||||
return
|
||||
case ContextFile:
|
||||
contextMetadata.FileContextMetadata = &reporthandlingv2.FileContextMetadata{
|
||||
FilePath: getAbsPath(input),
|
||||
HostName: getHostname(),
|
||||
}
|
||||
case ContextGitLocal:
|
||||
// local
|
||||
context, err := metadataGitLocal(input)
|
||||
if err != nil {
|
||||
logger.L().Warning("in setContextMetadata", helpers.Interface("case", ContextGitURL), helpers.Error(err))
|
||||
}
|
||||
contextMetadata.RepoContextMetadata = context
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func metadataGitURL(input string) (*reporthandlingv2.RepoContextMetadata, error) {
|
||||
context := &reporthandlingv2.RepoContextMetadata{}
|
||||
gitParser, err := giturl.NewGitAPI(input)
|
||||
if err != nil {
|
||||
return context, fmt.Errorf("%w", err)
|
||||
}
|
||||
if gitParser.GetBranchName() == "" {
|
||||
gitParser.SetDefaultBranchName()
|
||||
}
|
||||
context.Provider = gitParser.GetProvider()
|
||||
context.Repo = gitParser.GetRepoName()
|
||||
context.Owner = gitParser.GetOwnerName()
|
||||
context.Branch = gitParser.GetBranchName()
|
||||
context.RemoteURL = gitParser.GetURL().String()
|
||||
|
||||
commit, err := gitParser.GetLatestCommit()
|
||||
if err != nil {
|
||||
return context, fmt.Errorf("%w", err)
|
||||
}
|
||||
|
||||
context.LastCommit = reporthandling.LastCommit{
|
||||
Hash: commit.SHA,
|
||||
Date: commit.Committer.Date,
|
||||
CommitterName: commit.Committer.Name,
|
||||
}
|
||||
|
||||
return context, nil
|
||||
}
|
||||
|
||||
func metadataGitLocal(input string) (*reporthandlingv2.RepoContextMetadata, error) {
|
||||
gitParser, err := NewLocalGitRepository(input)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("%w", err)
|
||||
}
|
||||
remoteURL, err := gitParser.GetRemoteUrl()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("%w", err)
|
||||
}
|
||||
context := &reporthandlingv2.RepoContextMetadata{}
|
||||
gitParserURL, err := giturl.NewGitURL(remoteURL)
|
||||
if err != nil {
|
||||
return context, fmt.Errorf("%w", err)
|
||||
}
|
||||
gitParserURL.SetBranchName(gitParser.GetBranchName())
|
||||
|
||||
context.Provider = gitParserURL.GetProvider()
|
||||
context.Repo = gitParserURL.GetRepoName()
|
||||
context.Owner = gitParserURL.GetOwnerName()
|
||||
context.Branch = gitParserURL.GetBranchName()
|
||||
context.RemoteURL = gitParserURL.GetURL().String()
|
||||
|
||||
commit, err := gitParser.GetLastCommit()
|
||||
if err != nil {
|
||||
return context, fmt.Errorf("%w", err)
|
||||
}
|
||||
context.LastCommit = reporthandling.LastCommit{
|
||||
Hash: commit.SHA,
|
||||
Date: commit.Committer.Date,
|
||||
CommitterName: commit.Committer.Name,
|
||||
}
|
||||
|
||||
return context, nil
|
||||
}
|
||||
func getHostname() string {
|
||||
if h, e := os.Hostname(); e == nil {
|
||||
return h
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func getAbsPath(p string) string {
|
||||
if !filepath.IsAbs(p) { // parse path
|
||||
if o, err := os.Getwd(); err == nil {
|
||||
return filepath.Join(o, p)
|
||||
}
|
||||
}
|
||||
return p
|
||||
}
|
||||
|
||||
// ScanningContextToScanningScope convert the context to the deprecated scope
|
||||
func ScanningContextToScanningScope(scanningContext ScanningContext) string {
|
||||
if scanningContext == ContextCluster {
|
||||
return ScopeCluster
|
||||
}
|
||||
return ScopeYAML
|
||||
}
|
||||
|
||||
@@ -3,7 +3,7 @@ package cautils
|
||||
import (
|
||||
"testing"
|
||||
|
||||
reporthandlingv2 "github.com/armosec/opa-utils/reporthandling/v2"
|
||||
reporthandlingv2 "github.com/kubescape/opa-utils/reporthandling/v2"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
@@ -18,34 +18,6 @@ func TestSetContextMetadata(t *testing.T) {
|
||||
assert.Nil(t, ctx.HelmContextMetadata)
|
||||
assert.Nil(t, ctx.RepoContextMetadata)
|
||||
}
|
||||
{
|
||||
ctx := reporthandlingv2.ContextMetadata{}
|
||||
setContextMetadata(&ctx, "file")
|
||||
|
||||
assert.Nil(t, ctx.ClusterContextMetadata)
|
||||
assert.NotNil(t, ctx.DirectoryContextMetadata)
|
||||
assert.Nil(t, ctx.FileContextMetadata)
|
||||
assert.Nil(t, ctx.HelmContextMetadata)
|
||||
assert.Nil(t, ctx.RepoContextMetadata)
|
||||
|
||||
hostName := getHostname()
|
||||
assert.Contains(t, ctx.DirectoryContextMetadata.BasePath, "file")
|
||||
assert.Equal(t, hostName, ctx.DirectoryContextMetadata.HostName)
|
||||
}
|
||||
{
|
||||
ctx := reporthandlingv2.ContextMetadata{}
|
||||
setContextMetadata(&ctx, "scaninfo_test.go")
|
||||
|
||||
assert.Nil(t, ctx.ClusterContextMetadata)
|
||||
assert.Nil(t, ctx.DirectoryContextMetadata)
|
||||
assert.NotNil(t, ctx.FileContextMetadata)
|
||||
assert.Nil(t, ctx.HelmContextMetadata)
|
||||
assert.Nil(t, ctx.RepoContextMetadata)
|
||||
|
||||
hostName := getHostname()
|
||||
assert.Contains(t, ctx.FileContextMetadata.FilePath, "scaninfo_test.go")
|
||||
assert.Equal(t, hostName, ctx.FileContextMetadata.HostName)
|
||||
}
|
||||
{
|
||||
ctx := reporthandlingv2.ContextMetadata{}
|
||||
setContextMetadata(&ctx, "https://github.com/armosec/kubescape")
|
||||
@@ -65,3 +37,8 @@ func TestSetContextMetadata(t *testing.T) {
|
||||
func TestGetHostname(t *testing.T) {
|
||||
assert.NotEqual(t, "", getHostname())
|
||||
}
|
||||
|
||||
func TestGetScanningContext(t *testing.T) {
|
||||
assert.Equal(t, ContextCluster, GetScanningContext(""))
|
||||
assert.Equal(t, ContextGitURL, GetScanningContext("https://github.com/armosec/kubescape"))
|
||||
}
|
||||
|
||||
40
core/cautils/testdata/helm_expected_default_values.json
vendored
Normal file
40
core/cautils/testdata/helm_expected_default_values.json
vendored
Normal file
@@ -0,0 +1,40 @@
|
||||
{
|
||||
"affinity": {},
|
||||
"configMap": {
|
||||
"create": false,
|
||||
"params": {
|
||||
"clusterName": "<MyK8sClusterName>",
|
||||
"customerGUID": "<MyGUID>"
|
||||
}
|
||||
},
|
||||
"fullnameOverride": "",
|
||||
"image": {
|
||||
"imageName": "kubescape",
|
||||
"pullPolicy": "Always",
|
||||
"repository": "quay.io/armosec",
|
||||
"tag": "latest"
|
||||
},
|
||||
"imagePullSecrets": [],
|
||||
"nameOverride": "",
|
||||
"nodeSelector": {},
|
||||
"podAnnotations": {},
|
||||
"podSecurityContext": {},
|
||||
"resources": {
|
||||
"limits": {
|
||||
"cpu": "500m",
|
||||
"memory": "512Mi"
|
||||
},
|
||||
"requests": {
|
||||
"cpu": "200m",
|
||||
"memory": "256Mi"
|
||||
}
|
||||
},
|
||||
"schedule": "* * 1 * *",
|
||||
"securityContext": {},
|
||||
"serviceAccount": {
|
||||
"annotations": {},
|
||||
"create": true,
|
||||
"name": "kubescape-discovery"
|
||||
},
|
||||
"tolerations": []
|
||||
}
|
||||
BIN
core/cautils/testdata/localrepo.git
vendored
Normal file
BIN
core/cautils/testdata/localrepo.git
vendored
Normal file
Binary file not shown.
BIN
core/cautils/testdata/withoutremotes.git
vendored
Normal file
BIN
core/cautils/testdata/withoutremotes.git
vendored
Normal file
Binary file not shown.
@@ -7,9 +7,10 @@ import (
|
||||
"os"
|
||||
|
||||
"github.com/armosec/kubescape/v2/core/cautils/getter"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger/helpers"
|
||||
pkgutils "github.com/armosec/utils-go/utils"
|
||||
"github.com/armosec/utils-go/boolutils"
|
||||
logger "github.com/kubescape/go-logger"
|
||||
"github.com/kubescape/go-logger/helpers"
|
||||
|
||||
"golang.org/x/mod/semver"
|
||||
)
|
||||
|
||||
@@ -17,6 +18,7 @@ const SKIP_VERSION_CHECK_DEPRECATED = "KUBESCAPE_SKIP_UPDATE_CHECK"
|
||||
const SKIP_VERSION_CHECK = "KS_SKIP_UPDATE_CHECK"
|
||||
|
||||
var BuildNumber string
|
||||
var Client string
|
||||
|
||||
const UnknownBuildNumber = "unknown"
|
||||
|
||||
@@ -28,9 +30,9 @@ func NewIVersionCheckHandler() IVersionCheckHandler {
|
||||
if BuildNumber == "" {
|
||||
logger.L().Warning("unknown build number, this might affect your scan results. Please make sure you are updated to latest version")
|
||||
}
|
||||
if v, ok := os.LookupEnv(SKIP_VERSION_CHECK); ok && pkgutils.StringToBool(v) {
|
||||
if v, ok := os.LookupEnv(SKIP_VERSION_CHECK); ok && boolutils.StringToBool(v) {
|
||||
return NewVersionCheckHandlerMock()
|
||||
} else if v, ok := os.LookupEnv(SKIP_VERSION_CHECK_DEPRECATED); ok && pkgutils.StringToBool(v) {
|
||||
} else if v, ok := os.LookupEnv(SKIP_VERSION_CHECK_DEPRECATED); ok && boolutils.StringToBool(v) {
|
||||
return NewVersionCheckHandlerMock()
|
||||
}
|
||||
return NewVersionCheckHandler()
|
||||
@@ -48,10 +50,12 @@ type VersionCheckHandler struct {
|
||||
}
|
||||
type VersionCheckRequest struct {
|
||||
Client string `json:"client"` // kubescape
|
||||
ClientBuild string `json:"clientBuild"` // client build environment
|
||||
ClientVersion string `json:"clientVersion"` // kubescape version
|
||||
Framework string `json:"framework"` // framework name
|
||||
FrameworkVersion string `json:"frameworkVersion"` // framework version
|
||||
ScanningTarget string `json:"target"` // scanning target- cluster/yaml
|
||||
ScanningTarget string `json:"target"` // Deprecated
|
||||
ScanningContext string `json:"context"` // scanning context- cluster/file/gitURL/localGit/dir
|
||||
}
|
||||
|
||||
type VersionCheckResponse struct {
|
||||
@@ -74,8 +78,12 @@ func NewVersionCheckRequest(buildNumber, frameworkName, frameworkVersion, scanni
|
||||
if scanningTarget == "" {
|
||||
scanningTarget = "unknown"
|
||||
}
|
||||
if Client == "" {
|
||||
Client = "local-build"
|
||||
}
|
||||
return &VersionCheckRequest{
|
||||
Client: "kubescape",
|
||||
ClientBuild: Client,
|
||||
ClientVersion: buildNumber,
|
||||
Framework: frameworkName,
|
||||
FrameworkVersion: frameworkVersion,
|
||||
|
||||
@@ -4,7 +4,7 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/armosec/armoapi-go/armotypes"
|
||||
"github.com/armosec/opa-utils/reporthandling"
|
||||
"github.com/kubescape/opa-utils/reporthandling"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"golang.org/x/mod/semver"
|
||||
)
|
||||
|
||||
@@ -3,7 +3,7 @@ package cautils
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/armosec/opa-utils/reporthandling/apis"
|
||||
"github.com/kubescape/opa-utils/reporthandling/apis"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -14,13 +14,16 @@ var (
|
||||
"KernelVersion",
|
||||
"LinuxSecurityHardeningStatus",
|
||||
"OpenPortsList",
|
||||
"LinuxKernelVariables"}
|
||||
"LinuxKernelVariables",
|
||||
"KubeletInfo",
|
||||
"KubeProxyInfo",
|
||||
}
|
||||
CloudResources = []string{"ClusterDescribe"}
|
||||
)
|
||||
|
||||
func MapArmoResource(armoResourceMap *ArmoResources, resources []string) []string {
|
||||
func MapKSResource(ksResourceMap *KSResources, resources []string) []string {
|
||||
var hostResources []string
|
||||
for k := range *armoResourceMap {
|
||||
for k := range *ksResourceMap {
|
||||
for _, resource := range resources {
|
||||
if strings.Contains(k, resource) {
|
||||
hostResources = append(hostResources, k)
|
||||
@@ -30,16 +33,16 @@ func MapArmoResource(armoResourceMap *ArmoResources, resources []string) []strin
|
||||
return hostResources
|
||||
}
|
||||
|
||||
func MapHostResources(armoResourceMap *ArmoResources) []string {
|
||||
return MapArmoResource(armoResourceMap, HostSensorResources)
|
||||
func MapHostResources(ksResourceMap *KSResources) []string {
|
||||
return MapKSResource(ksResourceMap, HostSensorResources)
|
||||
}
|
||||
|
||||
func MapImageVulnResources(armoResourceMap *ArmoResources) []string {
|
||||
return MapArmoResource(armoResourceMap, ImageVulnResources)
|
||||
func MapImageVulnResources(ksResourceMap *KSResources) []string {
|
||||
return MapKSResource(ksResourceMap, ImageVulnResources)
|
||||
}
|
||||
|
||||
func MapCloudResources(armoResourceMap *ArmoResources) []string {
|
||||
return MapArmoResource(armoResourceMap, CloudResources)
|
||||
func MapCloudResources(ksResourceMap *KSResources) []string {
|
||||
return MapKSResource(ksResourceMap, CloudResources)
|
||||
}
|
||||
|
||||
func SetInfoMapForResources(info string, resources []string, errorMap map[string]apis.StatusInfo) {
|
||||
|
||||
@@ -8,7 +8,7 @@ import (
|
||||
|
||||
func (ks *Kubescape) SetCachedConfig(setConfig *metav1.SetConfig) error {
|
||||
|
||||
tenant := getTenantConfig("", "", getKubernetesApi())
|
||||
tenant := getTenantConfig(nil, "", getKubernetesApi())
|
||||
|
||||
if setConfig.Account != "" {
|
||||
tenant.GetConfigObj().AccountID = setConfig.Account
|
||||
@@ -25,13 +25,13 @@ func (ks *Kubescape) SetCachedConfig(setConfig *metav1.SetConfig) error {
|
||||
|
||||
// View cached configurations
|
||||
func (ks *Kubescape) ViewCachedConfig(viewConfig *metav1.ViewConfig) error {
|
||||
tenant := getTenantConfig("", "", getKubernetesApi()) // change k8sinterface
|
||||
tenant := getTenantConfig(nil, "", getKubernetesApi()) // change k8sinterface
|
||||
fmt.Fprintf(viewConfig.Writer, "%s\n", tenant.GetConfigObj().Config())
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ks *Kubescape) DeleteCachedConfig(deleteConfig *metav1.DeleteConfig) error {
|
||||
|
||||
tenant := getTenantConfig("", "", getKubernetesApi()) // change k8sinterface
|
||||
tenant := getTenantConfig(nil, "", getKubernetesApi()) // change k8sinterface
|
||||
return tenant.DeleteCachedConfig()
|
||||
}
|
||||
|
||||
@@ -4,19 +4,19 @@ import (
|
||||
"fmt"
|
||||
|
||||
"github.com/armosec/kubescape/v2/core/cautils/getter"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger/helpers"
|
||||
v1 "github.com/armosec/kubescape/v2/core/meta/datastructures/v1"
|
||||
logger "github.com/kubescape/go-logger"
|
||||
"github.com/kubescape/go-logger/helpers"
|
||||
)
|
||||
|
||||
func (ks *Kubescape) DeleteExceptions(delExceptions *v1.DeleteExceptions) error {
|
||||
|
||||
// load cached config
|
||||
getTenantConfig(delExceptions.Account, "", getKubernetesApi())
|
||||
getTenantConfig(&delExceptions.Credentials, "", getKubernetesApi())
|
||||
|
||||
// login kubescape SaaS
|
||||
armoAPI := getter.GetArmoAPIConnector()
|
||||
if err := armoAPI.Login(); err != nil {
|
||||
ksCloudAPI := getter.GetKSCloudAPIConnector()
|
||||
if err := ksCloudAPI.Login(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -26,7 +26,7 @@ func (ks *Kubescape) DeleteExceptions(delExceptions *v1.DeleteExceptions) error
|
||||
continue
|
||||
}
|
||||
logger.L().Info("Deleting exception", helpers.String("name", exceptionName))
|
||||
if err := armoAPI.DeleteException(exceptionName); err != nil {
|
||||
if err := ksCloudAPI.DeleteException(exceptionName); err != nil {
|
||||
return fmt.Errorf("failed to delete exception '%s', reason: %s", exceptionName, err.Error())
|
||||
}
|
||||
logger.L().Success("Exception deleted successfully")
|
||||
|
||||
@@ -8,9 +8,9 @@ import (
|
||||
|
||||
"github.com/armosec/armoapi-go/armotypes"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/getter"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger/helpers"
|
||||
metav1 "github.com/armosec/kubescape/v2/core/meta/datastructures/v1"
|
||||
logger "github.com/kubescape/go-logger"
|
||||
"github.com/kubescape/go-logger/helpers"
|
||||
)
|
||||
|
||||
var downloadFunc = map[string]func(*metav1.DownloadInfo) error{
|
||||
@@ -80,7 +80,7 @@ func downloadArtifacts(downloadInfo *metav1.DownloadInfo) error {
|
||||
}
|
||||
|
||||
func downloadConfigInputs(downloadInfo *metav1.DownloadInfo) error {
|
||||
tenant := getTenantConfig(downloadInfo.Account, "", getKubernetesApi())
|
||||
tenant := getTenantConfig(&downloadInfo.Credentials, "", getKubernetesApi())
|
||||
|
||||
controlsInputsGetter := getConfigInputsGetter(downloadInfo.Name, tenant.GetAccountID(), nil)
|
||||
controlInputs, err := controlsInputsGetter.GetControlsInputs(tenant.GetContextName())
|
||||
@@ -104,7 +104,7 @@ func downloadConfigInputs(downloadInfo *metav1.DownloadInfo) error {
|
||||
|
||||
func downloadExceptions(downloadInfo *metav1.DownloadInfo) error {
|
||||
var err error
|
||||
tenant := getTenantConfig(downloadInfo.Account, "", getKubernetesApi())
|
||||
tenant := getTenantConfig(&downloadInfo.Credentials, "", getKubernetesApi())
|
||||
|
||||
exceptionsGetter := getExceptionsGetter("")
|
||||
exceptions := []armotypes.PostureExceptionPolicy{}
|
||||
@@ -128,9 +128,9 @@ func downloadExceptions(downloadInfo *metav1.DownloadInfo) error {
|
||||
|
||||
func downloadFramework(downloadInfo *metav1.DownloadInfo) error {
|
||||
|
||||
tenant := getTenantConfig(downloadInfo.Account, "", getKubernetesApi())
|
||||
tenant := getTenantConfig(&downloadInfo.Credentials, "", getKubernetesApi())
|
||||
|
||||
g := getPolicyGetter(nil, tenant.GetTennatEmail(), true, nil)
|
||||
g := getPolicyGetter(nil, tenant.GetTenantEmail(), true, nil)
|
||||
|
||||
if downloadInfo.Name == "" {
|
||||
// if framework name not specified - download all frameworks
|
||||
@@ -170,9 +170,9 @@ func downloadFramework(downloadInfo *metav1.DownloadInfo) error {
|
||||
|
||||
func downloadControl(downloadInfo *metav1.DownloadInfo) error {
|
||||
|
||||
tenant := getTenantConfig(downloadInfo.Account, "", getKubernetesApi())
|
||||
tenant := getTenantConfig(&downloadInfo.Credentials, "", getKubernetesApi())
|
||||
|
||||
g := getPolicyGetter(nil, tenant.GetTennatEmail(), false, nil)
|
||||
g := getPolicyGetter(nil, tenant.GetTenantEmail(), false, nil)
|
||||
|
||||
if downloadInfo.Name == "" {
|
||||
// TODO - support
|
||||
|
||||
@@ -3,18 +3,19 @@ package core
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/armosec/k8s-interface/k8sinterface"
|
||||
"github.com/armosec/kubescape/v2/core/cautils"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/getter"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger/helpers"
|
||||
"github.com/armosec/kubescape/v2/core/pkg/hostsensorutils"
|
||||
"github.com/armosec/kubescape/v2/core/pkg/resourcehandler"
|
||||
"github.com/armosec/kubescape/v2/core/pkg/resultshandling/reporter"
|
||||
reporterv2 "github.com/armosec/kubescape/v2/core/pkg/resultshandling/reporter/v2"
|
||||
logger "github.com/kubescape/go-logger"
|
||||
"github.com/kubescape/go-logger/helpers"
|
||||
"github.com/kubescape/k8s-interface/k8sinterface"
|
||||
|
||||
"github.com/armosec/opa-utils/reporthandling"
|
||||
"github.com/armosec/rbac-utils/rbacscanner"
|
||||
"github.com/google/uuid"
|
||||
|
||||
"github.com/kubescape/rbac-utils/rbacscanner"
|
||||
)
|
||||
|
||||
// getKubernetesApi
|
||||
@@ -24,11 +25,11 @@ func getKubernetesApi() *k8sinterface.KubernetesApi {
|
||||
}
|
||||
return k8sinterface.NewKubernetesApi()
|
||||
}
|
||||
func getTenantConfig(Account, clusterName string, k8s *k8sinterface.KubernetesApi) cautils.ITenantConfig {
|
||||
func getTenantConfig(credentials *cautils.Credentials, clusterName string, k8s *k8sinterface.KubernetesApi) cautils.ITenantConfig {
|
||||
if !k8sinterface.IsConnectedToCluster() || k8s == nil {
|
||||
return cautils.NewLocalConfig(getter.GetArmoAPIConnector(), Account, clusterName)
|
||||
return cautils.NewLocalConfig(getter.GetKSCloudAPIConnector(), credentials, clusterName)
|
||||
}
|
||||
return cautils.NewClusterConfig(k8s, getter.GetArmoAPIConnector(), Account, clusterName)
|
||||
return cautils.NewClusterConfig(k8s, getter.GetKSCloudAPIConnector(), credentials, clusterName)
|
||||
}
|
||||
|
||||
func getExceptionsGetter(useExceptions string) getter.IExceptionsGetter {
|
||||
@@ -36,7 +37,7 @@ func getExceptionsGetter(useExceptions string) getter.IExceptionsGetter {
|
||||
// load exceptions from file
|
||||
return getter.NewLoadPolicy([]string{useExceptions})
|
||||
} else {
|
||||
return getter.GetArmoAPIConnector()
|
||||
return getter.GetKSCloudAPIConnector()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -47,9 +48,13 @@ func getRBACHandler(tenantConfig cautils.ITenantConfig, k8s *k8sinterface.Kubern
|
||||
return nil
|
||||
}
|
||||
|
||||
func getReporter(tenantConfig cautils.ITenantConfig, reportID string, submit, fwScan bool) reporter.IReport {
|
||||
func getReporter(tenantConfig cautils.ITenantConfig, reportID string, submit, fwScan bool, scanningContext cautils.ScanningContext) reporter.IReport {
|
||||
if submit {
|
||||
return reporterv2.NewReportEventReceiver(tenantConfig.GetConfigObj(), reportID)
|
||||
submitData := reporterv2.SubmitContextScan
|
||||
if scanningContext != cautils.ContextCluster {
|
||||
submitData = reporterv2.SubmitContextRepository
|
||||
}
|
||||
return reporterv2.NewReportEventReceiver(tenantConfig.GetConfigObj(), reportID, submitData)
|
||||
}
|
||||
if tenantConfig.GetAccountID() == "" {
|
||||
// Add link only when scanning a cluster using a framework
|
||||
@@ -68,7 +73,7 @@ func getResourceHandler(scanInfo *cautils.ScanInfo, tenantConfig cautils.ITenant
|
||||
// scanInfo.HostSensor.SetBool(false)
|
||||
return resourcehandler.NewFileResourceHandler(scanInfo.InputPatterns, registryAdaptors)
|
||||
}
|
||||
getter.GetArmoAPIConnector()
|
||||
getter.GetKSCloudAPIConnector()
|
||||
rbacObjects := getRBACHandler(tenantConfig, k8s, scanInfo.Submit)
|
||||
return resourcehandler.NewK8sResourceHandler(k8s, getFieldSelector(scanInfo), hostSensorHandler, rbacObjects, registryAdaptors)
|
||||
}
|
||||
@@ -105,7 +110,7 @@ func getFieldSelector(scanInfo *cautils.ScanInfo) resourcehandler.IFieldSelector
|
||||
return &resourcehandler.EmptySelector{}
|
||||
}
|
||||
|
||||
func policyIdentifierNames(pi []reporthandling.PolicyIdentifier) string {
|
||||
func policyIdentifierNames(pi []cautils.PolicyIdentifier) string {
|
||||
policiesNames := ""
|
||||
for i := range pi {
|
||||
policiesNames += pi[i].Name
|
||||
@@ -119,11 +124,10 @@ func policyIdentifierNames(pi []reporthandling.PolicyIdentifier) string {
|
||||
return policiesNames
|
||||
}
|
||||
|
||||
// setSubmitBehavior - Setup the desired cluster behavior regarding submitting to the Armo BE
|
||||
// setSubmitBehavior - Setup the desired cluster behavior regarding submitting to the Kubescape Cloud BE
|
||||
func setSubmitBehavior(scanInfo *cautils.ScanInfo, tenantConfig cautils.ITenantConfig) {
|
||||
|
||||
/*
|
||||
|
||||
If "First run (local config not found)" -
|
||||
Default/keep-local - Do not send report
|
||||
Submit - Create tenant & Submit report
|
||||
@@ -140,8 +144,20 @@ func setSubmitBehavior(scanInfo *cautils.ScanInfo, tenantConfig cautils.ITenantC
|
||||
return
|
||||
}
|
||||
|
||||
scanningContext := scanInfo.GetScanningContext()
|
||||
if scanningContext == cautils.ContextFile || scanningContext == cautils.ContextDir {
|
||||
scanInfo.Submit = false
|
||||
return
|
||||
}
|
||||
|
||||
if tenantConfig.IsConfigFound() { // config found in cache (submitted)
|
||||
if !scanInfo.Local {
|
||||
if tenantConfig.GetAccountID() != "" {
|
||||
if _, err := uuid.Parse(tenantConfig.GetAccountID()); err != nil {
|
||||
scanInfo.Submit = false
|
||||
return
|
||||
}
|
||||
}
|
||||
// Submit report
|
||||
scanInfo.Submit = true
|
||||
}
|
||||
@@ -149,13 +165,13 @@ func setSubmitBehavior(scanInfo *cautils.ScanInfo, tenantConfig cautils.ITenantC
|
||||
|
||||
}
|
||||
|
||||
// setPolicyGetter set the policy getter - local file/github release/ArmoAPI
|
||||
// setPolicyGetter set the policy getter - local file/github release/Kubescape Cloud API
|
||||
func getPolicyGetter(loadPoliciesFromFile []string, tennatEmail string, frameworkScope bool, downloadReleasedPolicy *getter.DownloadReleasedPolicy) getter.IPolicyGetter {
|
||||
if len(loadPoliciesFromFile) > 0 {
|
||||
return getter.NewLoadPolicy(loadPoliciesFromFile)
|
||||
}
|
||||
if tennatEmail != "" && frameworkScope {
|
||||
g := getter.GetArmoAPIConnector() // download policy from ARMO backend
|
||||
g := getter.GetKSCloudAPIConnector() // download policy from Kubescape Cloud backend
|
||||
return g
|
||||
}
|
||||
if downloadReleasedPolicy == nil {
|
||||
@@ -165,27 +181,13 @@ func getPolicyGetter(loadPoliciesFromFile []string, tennatEmail string, framewor
|
||||
|
||||
}
|
||||
|
||||
// func setGetArmoAPIConnector(scanInfo *cautils.ScanInfo, customerGUID string) {
|
||||
// g := getter.GetArmoAPIConnector() // download policy from ARMO backend
|
||||
// g.SetCustomerGUID(customerGUID)
|
||||
// scanInfo.PolicyGetter = g
|
||||
// if scanInfo.ScanAll {
|
||||
// frameworks, err := g.ListCustomFrameworks(customerGUID)
|
||||
// if err != nil {
|
||||
// glog.Error("failed to get custom frameworks") // handle error
|
||||
// return
|
||||
// }
|
||||
// scanInfo.SetPolicyIdentifiers(frameworks, reporthandling.KindFramework)
|
||||
// }
|
||||
// }
|
||||
|
||||
// setConfigInputsGetter sets the config input getter - local file/github release/ArmoAPI
|
||||
// setConfigInputsGetter sets the config input getter - local file/github release/Kubescape Cloud API
|
||||
func getConfigInputsGetter(ControlsInputs string, accountID string, downloadReleasedPolicy *getter.DownloadReleasedPolicy) getter.IControlsInputsGetter {
|
||||
if len(ControlsInputs) > 0 {
|
||||
return getter.NewLoadPolicy([]string{ControlsInputs})
|
||||
}
|
||||
if accountID != "" {
|
||||
g := getter.GetArmoAPIConnector() // download config from ARMO backend
|
||||
g := getter.GetKSCloudAPIConnector() // download config from Kubescape Cloud backend
|
||||
return g
|
||||
}
|
||||
if downloadReleasedPolicy == nil {
|
||||
|
||||
@@ -44,16 +44,16 @@ func (ks *Kubescape) List(listPolicies *metav1.ListPolicies) error {
|
||||
}
|
||||
|
||||
func listFrameworks(listPolicies *metav1.ListPolicies) ([]string, error) {
|
||||
tenant := getTenantConfig(listPolicies.Account, "", getKubernetesApi()) // change k8sinterface
|
||||
g := getPolicyGetter(nil, tenant.GetTennatEmail(), true, nil)
|
||||
tenant := getTenantConfig(&listPolicies.Credentials, "", getKubernetesApi()) // change k8sinterface
|
||||
g := getPolicyGetter(nil, tenant.GetTenantEmail(), true, nil)
|
||||
|
||||
return listFrameworksNames(g), nil
|
||||
}
|
||||
|
||||
func listControls(listPolicies *metav1.ListPolicies) ([]string, error) {
|
||||
tenant := getTenantConfig(listPolicies.Account, "", getKubernetesApi()) // change k8sinterface
|
||||
tenant := getTenantConfig(&listPolicies.Credentials, "", getKubernetesApi()) // change k8sinterface
|
||||
|
||||
g := getPolicyGetter(nil, tenant.GetTennatEmail(), false, nil)
|
||||
g := getPolicyGetter(nil, tenant.GetTenantEmail(), false, nil)
|
||||
l := getter.ListName
|
||||
if listPolicies.ListIDs {
|
||||
l = getter.ListID
|
||||
@@ -63,11 +63,11 @@ func listControls(listPolicies *metav1.ListPolicies) ([]string, error) {
|
||||
|
||||
func listExceptions(listPolicies *metav1.ListPolicies) ([]string, error) {
|
||||
// load tenant metav1
|
||||
getTenantConfig(listPolicies.Account, "", getKubernetesApi())
|
||||
getTenantConfig(&listPolicies.Credentials, "", getKubernetesApi())
|
||||
|
||||
var exceptionsNames []string
|
||||
armoAPI := getExceptionsGetter("")
|
||||
exceptions, err := armoAPI.GetExceptions("")
|
||||
ksCloudAPI := getExceptionsGetter("")
|
||||
exceptions, err := ksCloudAPI.GetExceptions("")
|
||||
if err != nil {
|
||||
return exceptionsNames, err
|
||||
}
|
||||
|
||||
@@ -3,13 +3,12 @@ package core
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/armosec/armoapi-go/armotypes"
|
||||
"github.com/armosec/k8s-interface/k8sinterface"
|
||||
apisv1 "github.com/kubescape/opa-utils/httpserver/apis/v1"
|
||||
|
||||
"github.com/kubescape/k8s-interface/k8sinterface"
|
||||
|
||||
"github.com/armosec/kubescape/v2/core/cautils"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/getter"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger/helpers"
|
||||
"github.com/armosec/kubescape/v2/core/pkg/hostsensorutils"
|
||||
"github.com/armosec/kubescape/v2/core/pkg/opaprocessor"
|
||||
"github.com/armosec/kubescape/v2/core/pkg/policyhandler"
|
||||
@@ -17,9 +16,10 @@ import (
|
||||
"github.com/armosec/kubescape/v2/core/pkg/resultshandling"
|
||||
"github.com/armosec/kubescape/v2/core/pkg/resultshandling/printer"
|
||||
"github.com/armosec/kubescape/v2/core/pkg/resultshandling/reporter"
|
||||
logger "github.com/kubescape/go-logger"
|
||||
"github.com/kubescape/go-logger/helpers"
|
||||
|
||||
"github.com/armosec/opa-utils/reporthandling"
|
||||
"github.com/armosec/opa-utils/resources"
|
||||
"github.com/kubescape/opa-utils/resources"
|
||||
)
|
||||
|
||||
type componentInterfaces struct {
|
||||
@@ -34,7 +34,7 @@ func getInterfaces(scanInfo *cautils.ScanInfo) componentInterfaces {
|
||||
|
||||
// ================== setup k8s interface object ======================================
|
||||
var k8s *k8sinterface.KubernetesApi
|
||||
if scanInfo.GetScanningEnvironment() == cautils.ScanCluster {
|
||||
if scanInfo.GetScanningContext() == cautils.ContextCluster {
|
||||
k8s = getKubernetesApi()
|
||||
if k8s == nil {
|
||||
logger.L().Fatal("failed connecting to Kubernetes cluster")
|
||||
@@ -43,16 +43,11 @@ func getInterfaces(scanInfo *cautils.ScanInfo) componentInterfaces {
|
||||
|
||||
// ================== setup tenant object ======================================
|
||||
|
||||
tenantConfig := getTenantConfig(scanInfo.Account, scanInfo.KubeContext, k8s)
|
||||
tenantConfig := getTenantConfig(&scanInfo.Credentials, scanInfo.KubeContext, k8s)
|
||||
|
||||
// Set submit behavior AFTER loading tenant config
|
||||
setSubmitBehavior(scanInfo, tenantConfig)
|
||||
|
||||
// Do not submit yaml scanning
|
||||
if len(scanInfo.InputPatterns) > 0 {
|
||||
scanInfo.Submit = false
|
||||
}
|
||||
|
||||
if scanInfo.Submit {
|
||||
// submit - Create tenant & Submit report
|
||||
if err := tenantConfig.SetTenant(); err != nil {
|
||||
@@ -63,7 +58,7 @@ func getInterfaces(scanInfo *cautils.ScanInfo) componentInterfaces {
|
||||
// ================== version testing ======================================
|
||||
|
||||
v := cautils.NewIVersionCheckHandler()
|
||||
v.CheckLatestVersion(cautils.NewVersionCheckRequest(cautils.BuildNumber, policyIdentifierNames(scanInfo.PolicyIdentifier), "", scanInfo.GetScanningEnvironment()))
|
||||
v.CheckLatestVersion(cautils.NewVersionCheckRequest(cautils.BuildNumber, policyIdentifierNames(scanInfo.PolicyIdentifier), "", cautils.ScanningContextToScanningScope(scanInfo.GetScanningContext())))
|
||||
|
||||
// ================== setup host scanner object ======================================
|
||||
|
||||
@@ -91,10 +86,10 @@ func getInterfaces(scanInfo *cautils.ScanInfo) componentInterfaces {
|
||||
// ================== setup reporter & printer objects ======================================
|
||||
|
||||
// reporting behavior - setup reporter
|
||||
reportHandler := getReporter(tenantConfig, scanInfo.ScanID, scanInfo.Submit, scanInfo.FrameworkScan)
|
||||
reportHandler := getReporter(tenantConfig, scanInfo.ScanID, scanInfo.Submit, scanInfo.FrameworkScan, scanInfo.GetScanningContext())
|
||||
|
||||
// setup printer
|
||||
printerHandler := resultshandling.NewPrinter(scanInfo.Format, scanInfo.FormatVersion, scanInfo.VerboseMode)
|
||||
printerHandler := resultshandling.NewPrinter(scanInfo.Format, scanInfo.FormatVersion, scanInfo.VerboseMode, cautils.ViewTypes(scanInfo.View))
|
||||
printerHandler.SetWriter(scanInfo.Output)
|
||||
|
||||
// ================== return interface ======================================
|
||||
@@ -124,13 +119,13 @@ func (ks *Kubescape) Scan(scanInfo *cautils.ScanInfo) (*resultshandling.ResultsH
|
||||
downloadReleasedPolicy := getter.NewDownloadReleasedPolicy() // download config inputs from github release
|
||||
|
||||
// set policy getter only after setting the customerGUID
|
||||
scanInfo.Getters.PolicyGetter = getPolicyGetter(scanInfo.UseFrom, interfaces.tenantConfig.GetTennatEmail(), scanInfo.FrameworkScan, downloadReleasedPolicy)
|
||||
scanInfo.Getters.PolicyGetter = getPolicyGetter(scanInfo.UseFrom, interfaces.tenantConfig.GetTenantEmail(), scanInfo.FrameworkScan, downloadReleasedPolicy)
|
||||
scanInfo.Getters.ControlsInputsGetter = getConfigInputsGetter(scanInfo.ControlsInputs, interfaces.tenantConfig.GetAccountID(), downloadReleasedPolicy)
|
||||
scanInfo.Getters.ExceptionsGetter = getExceptionsGetter(scanInfo.UseExceptions)
|
||||
|
||||
// TODO - list supported frameworks/controls
|
||||
if scanInfo.ScanAll {
|
||||
scanInfo.SetPolicyIdentifiers(listFrameworksNames(scanInfo.Getters.PolicyGetter), reporthandling.KindFramework)
|
||||
scanInfo.SetPolicyIdentifiers(listFrameworksNames(scanInfo.Getters.PolicyGetter), apisv1.KindFramework)
|
||||
}
|
||||
|
||||
// remove host scanner components
|
||||
@@ -144,7 +139,7 @@ func (ks *Kubescape) Scan(scanInfo *cautils.ScanInfo) (*resultshandling.ResultsH
|
||||
|
||||
// ===================== policies & resources =====================
|
||||
policyHandler := policyhandler.NewPolicyHandler(interfaces.resourceHandler)
|
||||
scanData, err := collectResources(policyHandler, scanInfo)
|
||||
scanData, err := policyHandler.CollectResources(scanInfo.PolicyIdentifier, scanInfo)
|
||||
if err != nil {
|
||||
return resultsHandling, err
|
||||
}
|
||||
@@ -154,7 +149,7 @@ func (ks *Kubescape) Scan(scanInfo *cautils.ScanInfo) (*resultshandling.ResultsH
|
||||
reportResults := opaprocessor.NewOPAProcessor(scanData, deps)
|
||||
if err := reportResults.ProcessRulesListenner(); err != nil {
|
||||
// TODO - do something
|
||||
return resultsHandling, err
|
||||
return resultsHandling, fmt.Errorf("%w", err)
|
||||
}
|
||||
|
||||
// ========================= results handling =====================
|
||||
@@ -166,47 +161,3 @@ func (ks *Kubescape) Scan(scanInfo *cautils.ScanInfo) (*resultshandling.ResultsH
|
||||
|
||||
return resultsHandling, nil
|
||||
}
|
||||
|
||||
// TODO - remove function
|
||||
func collectResources(policyHandler *policyhandler.PolicyHandler, scanInfo *cautils.ScanInfo) (*cautils.OPASessionObj, error) {
|
||||
policyNotification := &reporthandling.PolicyNotification{
|
||||
Rules: scanInfo.PolicyIdentifier,
|
||||
KubescapeNotification: reporthandling.KubescapeNotification{
|
||||
Designators: armotypes.PortalDesignator{},
|
||||
NotificationType: reporthandling.TypeExecPostureScan,
|
||||
},
|
||||
}
|
||||
switch policyNotification.KubescapeNotification.NotificationType {
|
||||
case reporthandling.TypeExecPostureScan:
|
||||
collectedResources, err := policyHandler.CollectResources(policyNotification, scanInfo)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return collectedResources, nil
|
||||
|
||||
default:
|
||||
return nil, fmt.Errorf("notification type '%s' Unknown", policyNotification.KubescapeNotification.NotificationType)
|
||||
}
|
||||
}
|
||||
|
||||
// func askUserForHostSensor() bool {
|
||||
// return false
|
||||
|
||||
// if !isatty.IsTerminal(os.Stdin.Fd()) {
|
||||
// return false
|
||||
// }
|
||||
// if ssss, err := os.Stdin.Stat(); err == nil {
|
||||
// // fmt.Printf("Found stdin type: %s\n", ssss.Mode().Type())
|
||||
// if ssss.Mode().Type()&(fs.ModeDevice|fs.ModeCharDevice) > 0 { //has TTY
|
||||
// fmt.Fprintf(os.Stderr, "Would you like to scan K8s nodes? [y/N]. This is required to collect valuable data for certain controls\n")
|
||||
// fmt.Fprintf(os.Stderr, "Use --enable-host-scan flag to suppress this message\n")
|
||||
// var b []byte = make([]byte, 1)
|
||||
// if n, err := os.Stdin.Read(b); err == nil {
|
||||
// if n > 0 && len(b) > 0 && (b[0] == 'y' || b[0] == 'Y') {
|
||||
// return true
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
// return false
|
||||
// }
|
||||
|
||||
@@ -3,15 +3,16 @@ package core
|
||||
import (
|
||||
"github.com/armosec/kubescape/v2/core/cautils"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/getter"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger/helpers"
|
||||
"github.com/armosec/kubescape/v2/core/meta/cliinterfaces"
|
||||
|
||||
logger "github.com/kubescape/go-logger"
|
||||
"github.com/kubescape/go-logger/helpers"
|
||||
)
|
||||
|
||||
func (ks *Kubescape) Submit(submitInterfaces cliinterfaces.SubmitInterfaces) error {
|
||||
|
||||
// list resources
|
||||
postureReport, err := submitInterfaces.SubmitObjects.SetResourcesReport()
|
||||
report, err := submitInterfaces.SubmitObjects.SetResourcesReport()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -20,7 +21,12 @@ func (ks *Kubescape) Submit(submitInterfaces cliinterfaces.SubmitInterfaces) err
|
||||
return err
|
||||
}
|
||||
// report
|
||||
if err := submitInterfaces.Reporter.Submit(&cautils.OPASessionObj{PostureReport: postureReport, AllResources: allresources}); err != nil {
|
||||
o := &cautils.OPASessionObj{
|
||||
Report: report,
|
||||
AllResources: allresources,
|
||||
Metadata: &report.Metadata,
|
||||
}
|
||||
if err := submitInterfaces.Reporter.Submit(o); err != nil {
|
||||
return err
|
||||
}
|
||||
logger.L().Success("Data has been submitted successfully")
|
||||
@@ -29,11 +35,11 @@ func (ks *Kubescape) Submit(submitInterfaces cliinterfaces.SubmitInterfaces) err
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ks *Kubescape) SubmitExceptions(accountID, excPath string) error {
|
||||
func (ks *Kubescape) SubmitExceptions(credentials *cautils.Credentials, excPath string) error {
|
||||
logger.L().Info("submitting exceptions", helpers.String("path", excPath))
|
||||
|
||||
// load cached config
|
||||
tenantConfig := getTenantConfig(accountID, "", getKubernetesApi())
|
||||
tenantConfig := getTenantConfig(credentials, "", getKubernetesApi())
|
||||
if err := tenantConfig.SetTenant(); err != nil {
|
||||
logger.L().Error("failed setting account ID", helpers.Error(err))
|
||||
}
|
||||
@@ -46,12 +52,12 @@ func (ks *Kubescape) SubmitExceptions(accountID, excPath string) error {
|
||||
}
|
||||
|
||||
// login kubescape SaaS
|
||||
armoAPI := getter.GetArmoAPIConnector()
|
||||
if err := armoAPI.Login(); err != nil {
|
||||
ksCloudAPI := getter.GetKSCloudAPIConnector()
|
||||
if err := ksCloudAPI.Login(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := armoAPI.PostExceptions(exceptions); err != nil {
|
||||
if err := ksCloudAPI.PostExceptions(exceptions); err != nil {
|
||||
return err
|
||||
}
|
||||
logger.L().Success("Exceptions submitted successfully")
|
||||
|
||||
@@ -1,14 +1,14 @@
|
||||
package cliinterfaces
|
||||
|
||||
import (
|
||||
"github.com/armosec/k8s-interface/workloadinterface"
|
||||
"github.com/armosec/kubescape/v2/core/cautils"
|
||||
"github.com/armosec/kubescape/v2/core/pkg/resultshandling/reporter"
|
||||
"github.com/armosec/opa-utils/reporthandling"
|
||||
"github.com/kubescape/k8s-interface/workloadinterface"
|
||||
reporthandlingv2 "github.com/kubescape/opa-utils/reporthandling/v2"
|
||||
)
|
||||
|
||||
type ISubmitObjects interface {
|
||||
SetResourcesReport() (*reporthandling.PostureReport, error)
|
||||
SetResourcesReport() (*reporthandlingv2.PostureReport, error)
|
||||
ListAllResources() (map[string]workloadinterface.IMetadata, error)
|
||||
}
|
||||
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
package v1
|
||||
|
||||
import "github.com/armosec/kubescape/v2/core/cautils"
|
||||
|
||||
type DeleteExceptions struct {
|
||||
Account string
|
||||
Exceptions []string
|
||||
Credentials cautils.Credentials
|
||||
Exceptions []string
|
||||
}
|
||||
|
||||
@@ -1,9 +1,11 @@
|
||||
package v1
|
||||
|
||||
import "github.com/armosec/kubescape/v2/core/cautils"
|
||||
|
||||
type DownloadInfo struct {
|
||||
Path string // directory to save artifact. Default is "~/.kubescape/"
|
||||
FileName string // can be empty
|
||||
Target string // type of artifact to download
|
||||
Name string // name of artifact to download
|
||||
Account string // AccountID
|
||||
Path string // directory to save artifact. Default is "~/.kubescape/"
|
||||
FileName string // can be empty
|
||||
Target string // type of artifact to download
|
||||
Name string // name of artifact to download
|
||||
Credentials cautils.Credentials
|
||||
}
|
||||
|
||||
@@ -1,10 +1,12 @@
|
||||
package v1
|
||||
|
||||
import "github.com/armosec/kubescape/v2/core/cautils"
|
||||
|
||||
type ListPolicies struct {
|
||||
Target string
|
||||
ListIDs bool
|
||||
Account string
|
||||
Format string
|
||||
Target string
|
||||
ListIDs bool
|
||||
Format string
|
||||
Credentials cautils.Credentials
|
||||
}
|
||||
|
||||
type ListResponse struct {
|
||||
|
||||
@@ -1,9 +1,11 @@
|
||||
package v1
|
||||
|
||||
import "github.com/armosec/kubescape/v2/core/cautils"
|
||||
|
||||
type Submit struct {
|
||||
Account string
|
||||
Credentials cautils.Credentials
|
||||
}
|
||||
|
||||
type Delete struct {
|
||||
Account string
|
||||
Credentials cautils.Credentials
|
||||
}
|
||||
|
||||
@@ -15,8 +15,8 @@ type IKubescape interface {
|
||||
Download(downloadInfo *metav1.DownloadInfo) error // TODO - return downloaded policies
|
||||
|
||||
// submit
|
||||
Submit(submitInterfaces cliinterfaces.SubmitInterfaces) error // TODO - func should receive object
|
||||
SubmitExceptions(accountID, excPath string) error // TODO - remove
|
||||
Submit(submitInterfaces cliinterfaces.SubmitInterfaces) error // TODO - func should receive object
|
||||
SubmitExceptions(credentials *cautils.Credentials, excPath string) error // TODO - remove
|
||||
|
||||
// config
|
||||
SetCachedConfig(setConfig *metav1.SetConfig) error
|
||||
|
||||
@@ -4,8 +4,8 @@ import (
|
||||
"encoding/json"
|
||||
|
||||
"github.com/armosec/armoapi-go/armotypes"
|
||||
"github.com/armosec/k8s-interface/workloadinterface"
|
||||
"github.com/armosec/opa-utils/reporthandling"
|
||||
"github.com/kubescape/k8s-interface/workloadinterface"
|
||||
"github.com/kubescape/opa-utils/reporthandling"
|
||||
)
|
||||
|
||||
var mockControl_0006 = `{"guid":"","name":"Allowed hostPath","attributes":{"armoBuiltin":true},"id":"C-0006","controlID":"C-0006","creationTime":"","description":"Mounting host directory to the container can be abused to get access to sensitive data and gain persistence on the host machine.","remediation":"Refrain from using host path mount.","rules":[{"guid":"","name":"alert-rw-hostpath","attributes":{"armoBuiltin":true,"m$K8sThreatMatrix":"Persistence::Writable hostPath mount, Lateral Movement::Writable volume mounts on the host"},"creationTime":"","rule":"package armo_builtins\n\n# input: pod\n# apiversion: v1\n# does: returns hostPath volumes\n\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n volumes := pod.spec.volumes\n volume := volumes[_]\n volume.hostPath\n\tcontainer := pod.spec.containers[i]\n\tvolumeMount := container.volumeMounts[k]\n\tvolumeMount.name == volume.name\n\tbegginingOfPath := \"spec.\"\n\tresult := isRWMount(volumeMount, begginingOfPath, i, k)\n\n podname := pod.metadata.name\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"pod: %v has: %v as hostPath volume\", [podname, volume.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [result],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n#handles majority of workload resources\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n volumes := wl.spec.template.spec.volumes\n volume := volumes[_]\n volume.hostPath\n\tcontainer := wl.spec.template.spec.containers[i]\n\tvolumeMount := container.volumeMounts[k]\n\tvolumeMount.name == volume.name\n\tbegginingOfPath := \"spec.template.spec.\"\n\tresult := isRWMount(volumeMount, begginingOfPath, i, k)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has: %v as hostPath volume\", [wl.kind, wl.metadata.name, volume.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [result],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t\n\t}\n}\n\n#handles CronJobs\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n volumes := wl.spec.jobTemplate.spec.template.spec.volumes\n volume := volumes[_]\n volume.hostPath\n\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tvolumeMount := container.volumeMounts[k]\n\tvolumeMount.name == volume.name\n\tbegginingOfPath := \"spec.jobTemplate.spec.template.spec.\"\n\tresult := isRWMount(volumeMount, begginingOfPath, i, k)\n\n\tmsga := {\n\t\"alertMessage\": sprintf(\"%v: %v has: %v as hostPath volume\", [wl.kind, wl.metadata.name, volume.name]),\n\t\"packagename\": \"armo_builtins\",\n\t\"alertScore\": 7,\n\t\"failedPaths\": [result],\n\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nisRWMount(mount, begginingOfPath, i, k) = path {\n not mount.readOnly == true\n not mount.readOnly == false\n path = \"\"\n}\nisRWMount(mount, begginingOfPath, i, k) = path {\n mount.readOnly == false\n path = sprintf(\"%vcontainers[%v].volumeMounts[%v].readOnly\", [begginingOfPath, format_int(i, 10), format_int(k, 10)])\n} ","resourceEnumerator":"","ruleLanguage":"Rego","match":[{"apiGroups":["*"],"apiVersions":["*"],"resources":["Deployment","ReplicaSet","DaemonSet","StatefulSet","Job","CronJob","Pod"]}],"ruleDependencies":[{"packageName":"cautils"},{"packageName":"kubernetes.api.client"}],"configInputs":null,"controlConfigInputs":null,"description":"determines if any workload contains a hostPath volume with rw permissions","remediation":"Set the readOnly field of the mount to true","ruleQuery":""}],"rulesIDs":[""],"baseScore":6}`
|
||||
|
||||
@@ -8,11 +8,12 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/armosec/k8s-interface/k8sinterface"
|
||||
"github.com/armosec/k8s-interface/workloadinterface"
|
||||
"github.com/armosec/kubescape/v2/core/cautils"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger/helpers"
|
||||
logger "github.com/kubescape/go-logger"
|
||||
"github.com/kubescape/go-logger/helpers"
|
||||
"github.com/kubescape/k8s-interface/k8sinterface"
|
||||
"github.com/kubescape/k8s-interface/workloadinterface"
|
||||
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@@ -35,6 +36,7 @@ type HostSensorHandler struct {
|
||||
DaemonSet *appsv1.DaemonSet
|
||||
podListLock sync.RWMutex
|
||||
gracePeriod int64
|
||||
workerPool workerPool
|
||||
}
|
||||
|
||||
func NewHostSensorHandler(k8sObj *k8sinterface.KubernetesApi, hostSensorYAMLFile string) (*HostSensorHandler, error) {
|
||||
@@ -54,6 +56,7 @@ func NewHostSensorHandler(k8sObj *k8sinterface.KubernetesApi, hostSensorYAMLFile
|
||||
HostSensorPodNames: map[string]string{},
|
||||
HostSensorUnscheduledPodNames: map[string]string{},
|
||||
gracePeriod: int64(15),
|
||||
workerPool: NewWorkerPool(),
|
||||
}
|
||||
// Don't deploy on cluster with no nodes. Some cloud providers prevents termination of K8s objects for cluster with no nodes!!!
|
||||
if nodeList, err := k8sObj.KubernetesClient.CoreV1().Nodes().List(k8sObj.Context, metav1.ListOptions{}); err != nil || len(nodeList.Items) == 0 {
|
||||
@@ -89,7 +92,7 @@ func (hsh *HostSensorHandler) Init() error {
|
||||
|
||||
func (hsh *HostSensorHandler) applyYAML() error {
|
||||
workloads, err := cautils.ReadFile([]byte(hostSensorYAML), cautils.YAML_FILE_FORMAT)
|
||||
if len(err) != 0 {
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to read YAML files, reason: %v", err)
|
||||
}
|
||||
|
||||
|
||||
@@ -5,11 +5,12 @@ import (
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"github.com/armosec/k8s-interface/k8sinterface"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger/helpers"
|
||||
"github.com/armosec/opa-utils/objectsenvelopes/hostsensor"
|
||||
"github.com/armosec/opa-utils/reporthandling/apis"
|
||||
logger "github.com/kubescape/go-logger"
|
||||
"github.com/kubescape/go-logger/helpers"
|
||||
"github.com/kubescape/k8s-interface/k8sinterface"
|
||||
"github.com/kubescape/opa-utils/objectsenvelopes/hostsensor"
|
||||
"github.com/kubescape/opa-utils/reporthandling/apis"
|
||||
|
||||
"sigs.k8s.io/yaml"
|
||||
)
|
||||
|
||||
@@ -32,7 +33,23 @@ func (hsh *HostSensorHandler) HTTPGetToPod(podName, path string) ([]byte, error)
|
||||
|
||||
restProxy := hsh.k8sObj.KubernetesClient.CoreV1().Pods(hsh.DaemonSet.Namespace).ProxyGet("http", podName, fmt.Sprintf("%d", hsh.HostSensorPort), path, map[string]string{})
|
||||
return restProxy.DoRaw(hsh.k8sObj.Context)
|
||||
}
|
||||
|
||||
func (hsh *HostSensorHandler) getResourcesFromPod(podName, nodeName, resourceKind, path string) (hostsensor.HostSensorDataEnvelope, error) {
|
||||
// send the request and pack the response as an hostSensorDataEnvelope
|
||||
|
||||
resBytes, err := hsh.HTTPGetToPod(podName, path)
|
||||
if err != nil {
|
||||
return hostsensor.HostSensorDataEnvelope{}, err
|
||||
}
|
||||
|
||||
hostSensorDataEnvelope := hostsensor.HostSensorDataEnvelope{}
|
||||
hostSensorDataEnvelope.SetApiVersion(k8sinterface.JoinGroupVersion(hostsensor.GroupHostSensor, hostsensor.Version))
|
||||
hostSensorDataEnvelope.SetKind(resourceKind)
|
||||
hostSensorDataEnvelope.SetName(nodeName)
|
||||
hostSensorDataEnvelope.SetData(resBytes)
|
||||
|
||||
return hostSensorDataEnvelope, nil
|
||||
}
|
||||
|
||||
func (hsh *HostSensorHandler) ForwardToPod(podName, path string) ([]byte, error) {
|
||||
@@ -59,35 +76,26 @@ func (hsh *HostSensorHandler) ForwardToPod(podName, path string) ([]byte, error)
|
||||
|
||||
// sendAllPodsHTTPGETRequest fills the raw byte response in the envelope and the node name, but not the GroupVersionKind
|
||||
// so the caller is responsible to convert the raw data to some structured data and add the GroupVersionKind details
|
||||
//
|
||||
// The function produces a worker-pool with a fixed number of workers.
|
||||
// For each node the request is pushed to the jobs channel, the worker sends the request and pushes the result to the result channel.
|
||||
// When all workers have finished, the function returns a list of results
|
||||
func (hsh *HostSensorHandler) sendAllPodsHTTPGETRequest(path, requestKind string) ([]hostsensor.HostSensorDataEnvelope, error) {
|
||||
podList, err := hsh.getPodList()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to sendAllPodsHTTPGETRequest: %v", err)
|
||||
}
|
||||
res := make([]hostsensor.HostSensorDataEnvelope, 0, len(podList))
|
||||
resLock := sync.Mutex{}
|
||||
wg := sync.WaitGroup{}
|
||||
wg.Add(len(podList))
|
||||
for podName := range podList {
|
||||
go func(podName, path string) {
|
||||
defer wg.Done()
|
||||
resBytes, err := hsh.HTTPGetToPod(podName, path)
|
||||
if err != nil {
|
||||
logger.L().Error("failed to get data", helpers.String("path", path), helpers.String("podName", podName), helpers.Error(err))
|
||||
} else {
|
||||
resLock.Lock()
|
||||
defer resLock.Unlock()
|
||||
hostSensorDataEnvelope := hostsensor.HostSensorDataEnvelope{}
|
||||
hostSensorDataEnvelope.SetApiVersion(k8sinterface.JoinGroupVersion(hostsensor.GroupHostSensor, hostsensor.Version))
|
||||
hostSensorDataEnvelope.SetKind(requestKind)
|
||||
hostSensorDataEnvelope.SetName(podList[podName])
|
||||
hostSensorDataEnvelope.SetData(resBytes)
|
||||
res = append(res, hostSensorDataEnvelope)
|
||||
}
|
||||
|
||||
}(podName, path)
|
||||
}
|
||||
wg.Wait()
|
||||
res := make([]hostsensor.HostSensorDataEnvelope, 0, len(podList))
|
||||
var wg sync.WaitGroup
|
||||
// initialization of the channels
|
||||
hsh.workerPool.init(len(podList))
|
||||
|
||||
hsh.workerPool.hostSensorApplyJobs(podList, path, requestKind)
|
||||
hsh.workerPool.hostSensorGetResults(&res)
|
||||
hsh.workerPool.createWorkerPool(hsh, &wg)
|
||||
hsh.workerPool.waitForDone(&wg)
|
||||
|
||||
return res, nil
|
||||
}
|
||||
|
||||
@@ -109,6 +117,18 @@ func (hsh *HostSensorHandler) GetLinuxSecurityHardeningStatus() ([]hostsensor.Ho
|
||||
return hsh.sendAllPodsHTTPGETRequest("/linuxSecurityHardening", "LinuxSecurityHardeningStatus")
|
||||
}
|
||||
|
||||
// return list of KubeletInfo
|
||||
func (hsh *HostSensorHandler) GetKubeletInfo() ([]hostsensor.HostSensorDataEnvelope, error) {
|
||||
// loop over pods and port-forward it to each of them
|
||||
return hsh.sendAllPodsHTTPGETRequest("/kubeletInfo", "KubeletInfo")
|
||||
}
|
||||
|
||||
// return list of KubeProxyInfo
|
||||
func (hsh *HostSensorHandler) GetKubeProxyInfo() ([]hostsensor.HostSensorDataEnvelope, error) {
|
||||
// loop over pods and port-forward it to each of them
|
||||
return hsh.sendAllPodsHTTPGETRequest("/kubeProxyInfo", "KubeProxyInfo")
|
||||
}
|
||||
|
||||
// return list of KubeletCommandLine
|
||||
func (hsh *HostSensorHandler) GetKubeletCommandLine() ([]hostsensor.HostSensorDataEnvelope, error) {
|
||||
// loop over pods and port-forward it to each of them
|
||||
@@ -228,6 +248,27 @@ func (hsh *HostSensorHandler) CollectResources() ([]hostsensor.HostSensorDataEnv
|
||||
if len(kcData) > 0 {
|
||||
res = append(res, kcData...)
|
||||
}
|
||||
|
||||
// GetKubeletInfo
|
||||
kcData, err = hsh.GetKubeletInfo()
|
||||
if err != nil {
|
||||
addInfoToMap(KubeletInfo, infoMap, err)
|
||||
logger.L().Warning(err.Error())
|
||||
}
|
||||
if len(kcData) > 0 {
|
||||
res = append(res, kcData...)
|
||||
}
|
||||
|
||||
// GetKubeProxyInfo
|
||||
kcData, err = hsh.GetKubeProxyInfo()
|
||||
if err != nil {
|
||||
addInfoToMap(KubeProxyInfo, infoMap, err)
|
||||
logger.L().Warning(err.Error())
|
||||
}
|
||||
if len(kcData) > 0 {
|
||||
res = append(res, kcData...)
|
||||
}
|
||||
|
||||
logger.L().Debug("Done reading information from host scanner")
|
||||
return res, infoMap, nil
|
||||
}
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
package hostsensorutils
|
||||
|
||||
import (
|
||||
"github.com/armosec/opa-utils/objectsenvelopes/hostsensor"
|
||||
"github.com/armosec/opa-utils/reporthandling/apis"
|
||||
"github.com/kubescape/opa-utils/objectsenvelopes/hostsensor"
|
||||
"github.com/kubescape/opa-utils/reporthandling/apis"
|
||||
)
|
||||
|
||||
type IHostSensor interface {
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
package hostsensorutils
|
||||
|
||||
import (
|
||||
"github.com/armosec/opa-utils/objectsenvelopes/hostsensor"
|
||||
"github.com/armosec/opa-utils/reporthandling/apis"
|
||||
"github.com/kubescape/opa-utils/objectsenvelopes/hostsensor"
|
||||
"github.com/kubescape/opa-utils/reporthandling/apis"
|
||||
)
|
||||
|
||||
type HostSensorHandlerMock struct {
|
||||
|
||||
96
core/pkg/hostsensorutils/hostsensorworkerpool.go
Normal file
96
core/pkg/hostsensorutils/hostsensorworkerpool.go
Normal file
@@ -0,0 +1,96 @@
|
||||
package hostsensorutils
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
logger "github.com/kubescape/go-logger"
|
||||
"github.com/kubescape/go-logger/helpers"
|
||||
"github.com/kubescape/opa-utils/objectsenvelopes/hostsensor"
|
||||
)
|
||||
|
||||
const noOfWorkers int = 10
|
||||
|
||||
type job struct {
|
||||
podName string
|
||||
nodeName string
|
||||
requestKind string
|
||||
path string
|
||||
}
|
||||
|
||||
type workerPool struct {
|
||||
jobs chan job
|
||||
results chan hostsensor.HostSensorDataEnvelope
|
||||
done chan bool
|
||||
noOfWorkers int
|
||||
}
|
||||
|
||||
func NewWorkerPool() workerPool {
|
||||
wp := workerPool{}
|
||||
wp.noOfWorkers = noOfWorkers
|
||||
wp.init()
|
||||
return wp
|
||||
}
|
||||
|
||||
func (wp *workerPool) init(noOfPods ...int) {
|
||||
if noOfPods != nil && len(noOfPods) > 0 && noOfPods[0] < noOfWorkers {
|
||||
wp.noOfWorkers = noOfPods[0]
|
||||
}
|
||||
// init the channels
|
||||
wp.jobs = make(chan job, noOfWorkers)
|
||||
wp.results = make(chan hostsensor.HostSensorDataEnvelope, noOfWorkers)
|
||||
wp.done = make(chan bool)
|
||||
}
|
||||
|
||||
// The worker takes a job out of the chan, executes the request, and pushes the result to the results chan
|
||||
func (wp *workerPool) hostSensorWorker(hsh *HostSensorHandler, wg *sync.WaitGroup) {
|
||||
defer wg.Done()
|
||||
for job := range wp.jobs {
|
||||
hostSensorDataEnvelope, err := hsh.getResourcesFromPod(job.podName, job.nodeName, job.requestKind, job.path)
|
||||
if err != nil {
|
||||
logger.L().Error("failed to get data", helpers.String("path", job.path), helpers.String("podName", job.podName), helpers.Error(err))
|
||||
} else {
|
||||
wp.results <- hostSensorDataEnvelope
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (wp *workerPool) createWorkerPool(hsh *HostSensorHandler, wg *sync.WaitGroup) {
|
||||
for i := 0; i < noOfWorkers; i++ {
|
||||
wg.Add(1)
|
||||
go wp.hostSensorWorker(hsh, wg)
|
||||
}
|
||||
}
|
||||
|
||||
func (wp *workerPool) waitForDone(wg *sync.WaitGroup) {
|
||||
// Waiting for workers to finish
|
||||
wg.Wait()
|
||||
close(wp.results)
|
||||
|
||||
// Waiting for the results to be processed
|
||||
<-wp.done
|
||||
}
|
||||
|
||||
func (wp *workerPool) hostSensorGetResults(result *[]hostsensor.HostSensorDataEnvelope) {
|
||||
go func() {
|
||||
for res := range wp.results {
|
||||
*result = append(*result, res)
|
||||
}
|
||||
wp.done <- true
|
||||
}()
|
||||
}
|
||||
|
||||
func (wp *workerPool) hostSensorApplyJobs(podList map[string]string, path, requestKind string) {
|
||||
go func() {
|
||||
for podName, nodeName := range podList {
|
||||
job := job{
|
||||
podName: podName,
|
||||
nodeName: nodeName,
|
||||
requestKind: requestKind,
|
||||
path: path,
|
||||
}
|
||||
wp.jobs <- job
|
||||
|
||||
}
|
||||
close(wp.jobs)
|
||||
}()
|
||||
}
|
||||
@@ -1,8 +1,8 @@
|
||||
package hostsensorutils
|
||||
|
||||
import (
|
||||
"github.com/armosec/k8s-interface/k8sinterface"
|
||||
"github.com/armosec/opa-utils/reporthandling/apis"
|
||||
"github.com/kubescape/k8s-interface/k8sinterface"
|
||||
"github.com/kubescape/opa-utils/reporthandling/apis"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -13,8 +13,10 @@ var (
|
||||
OpenPortsList = "OpenPortsList"
|
||||
LinuxKernelVariables = "LinuxKernelVariables"
|
||||
KubeletCommandLine = "KubeletCommandLine"
|
||||
KubeletInfo = "KubeletInfo"
|
||||
KubeProxyInfo = "KubeProxyInfo"
|
||||
|
||||
MapResourceToApiGroup = map[string]string{
|
||||
MapHostSensorResourceToApiGroup = map[string]string{
|
||||
KubeletConfiguration: "hostdata.kubescape.cloud/v1beta0",
|
||||
OsReleaseFile: "hostdata.kubescape.cloud/v1beta0",
|
||||
KubeletCommandLine: "hostdata.kubescape.cloud/v1beta0",
|
||||
@@ -22,11 +24,13 @@ var (
|
||||
LinuxSecurityHardeningStatus: "hostdata.kubescape.cloud/v1beta0",
|
||||
OpenPortsList: "hostdata.kubescape.cloud/v1beta0",
|
||||
LinuxKernelVariables: "hostdata.kubescape.cloud/v1beta0",
|
||||
KubeletInfo: "hostdata.kubescape.cloud/v1beta0",
|
||||
KubeProxyInfo: "hostdata.kubescape.cloud/v1beta0",
|
||||
}
|
||||
)
|
||||
|
||||
func addInfoToMap(resource string, infoMap map[string]apis.StatusInfo, err error) {
|
||||
group, version := k8sinterface.SplitApiVersion(MapResourceToApiGroup[resource])
|
||||
group, version := k8sinterface.SplitApiVersion(MapHostSensorResourceToApiGroup[resource])
|
||||
r := k8sinterface.JoinResourceTriplets(group, version, resource)
|
||||
infoMap[r] = apis.StatusInfo{
|
||||
InnerStatus: apis.StatusSkipped,
|
||||
|
||||
@@ -7,18 +7,20 @@ import (
|
||||
|
||||
"github.com/armosec/armoapi-go/armotypes"
|
||||
"github.com/armosec/kubescape/v2/core/cautils"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger/helpers"
|
||||
"github.com/armosec/kubescape/v2/core/pkg/score"
|
||||
"github.com/armosec/opa-utils/objectsenvelopes"
|
||||
"github.com/armosec/opa-utils/reporthandling"
|
||||
"github.com/armosec/opa-utils/reporthandling/apis"
|
||||
"github.com/armosec/opa-utils/reporthandling/results/v1/resourcesresults"
|
||||
logger "github.com/kubescape/go-logger"
|
||||
"github.com/kubescape/go-logger/helpers"
|
||||
"github.com/kubescape/opa-utils/objectsenvelopes"
|
||||
"github.com/kubescape/opa-utils/reporthandling"
|
||||
"github.com/kubescape/opa-utils/reporthandling/apis"
|
||||
"github.com/kubescape/opa-utils/reporthandling/results/v1/resourcesresults"
|
||||
|
||||
"github.com/open-policy-agent/opa/storage"
|
||||
|
||||
"github.com/armosec/k8s-interface/workloadinterface"
|
||||
"github.com/kubescape/k8s-interface/workloadinterface"
|
||||
|
||||
"github.com/armosec/opa-utils/resources"
|
||||
reporthandlingv2 "github.com/kubescape/opa-utils/reporthandling/v2"
|
||||
"github.com/kubescape/opa-utils/resources"
|
||||
"github.com/open-policy-agent/opa/ast"
|
||||
"github.com/open-policy-agent/opa/rego"
|
||||
)
|
||||
@@ -62,7 +64,7 @@ func (opap *OPAProcessor) ProcessRulesListenner() error {
|
||||
}
|
||||
|
||||
func (opap *OPAProcessor) Process(policies *cautils.Policies) error {
|
||||
logger.L().Info("Scanning", helpers.String("cluster", cautils.ClusterName))
|
||||
opap.loggerStartScanning()
|
||||
|
||||
cautils.StartSpinner()
|
||||
|
||||
@@ -89,11 +91,30 @@ func (opap *OPAProcessor) Process(policies *cautils.Policies) error {
|
||||
opap.Report.ReportGenerationTime = time.Now().UTC()
|
||||
|
||||
cautils.StopSpinner()
|
||||
logger.L().Success("Done scanning", helpers.String("cluster", cautils.ClusterName))
|
||||
|
||||
opap.loggerDoneScanning()
|
||||
|
||||
return errs
|
||||
}
|
||||
|
||||
func (opap *OPAProcessor) loggerStartScanning() {
|
||||
targetScan := opap.OPASessionObj.Metadata.ScanMetadata.ScanningTarget
|
||||
if reporthandlingv2.Cluster == targetScan {
|
||||
logger.L().Info("Scanning", helpers.String(targetScan.String(), cautils.ClusterName))
|
||||
} else {
|
||||
logger.L().Info("Scanning " + targetScan.String())
|
||||
}
|
||||
}
|
||||
|
||||
func (opap *OPAProcessor) loggerDoneScanning() {
|
||||
targetScan := opap.OPASessionObj.Metadata.ScanMetadata.ScanningTarget
|
||||
if reporthandlingv2.Cluster == targetScan {
|
||||
logger.L().Success("Done scanning", helpers.String(targetScan.String(), cautils.ClusterName))
|
||||
} else {
|
||||
logger.L().Success("Done scanning " + targetScan.String())
|
||||
}
|
||||
}
|
||||
|
||||
func (opap *OPAProcessor) processControl(control *reporthandling.Control) (map[string]resourcesresults.ResourceAssociatedControl, error) {
|
||||
var errs error
|
||||
|
||||
@@ -180,6 +201,9 @@ func (opap *OPAProcessor) processRule(rule *reporthandling.PolicyRule) (map[stri
|
||||
for j := range ruleResponses[i].FixPaths {
|
||||
ruleResult.Paths = append(ruleResult.Paths, armotypes.PosturePaths{FixPath: ruleResponses[i].FixPaths[j]})
|
||||
}
|
||||
if ruleResponses[i].FixCommand != "" {
|
||||
ruleResult.Paths = append(ruleResult.Paths, armotypes.PosturePaths{FixCommand: ruleResponses[i].FixCommand})
|
||||
}
|
||||
resources[failedResources[j].GetID()] = ruleResult
|
||||
}
|
||||
}
|
||||
|
||||
@@ -6,56 +6,17 @@ import (
|
||||
"github.com/armosec/armoapi-go/armotypes"
|
||||
"github.com/armosec/kubescape/v2/core/cautils"
|
||||
"github.com/armosec/kubescape/v2/core/mocks"
|
||||
"github.com/armosec/opa-utils/objectsenvelopes"
|
||||
"github.com/armosec/opa-utils/reporthandling"
|
||||
"github.com/armosec/opa-utils/resources"
|
||||
"github.com/kubescape/opa-utils/reporthandling"
|
||||
"github.com/kubescape/opa-utils/resources"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/armosec/k8s-interface/k8sinterface"
|
||||
"github.com/armosec/k8s-interface/workloadinterface"
|
||||
"github.com/kubescape/k8s-interface/workloadinterface"
|
||||
// _ "k8s.io/client-go/plugin/pkg/client/auth"
|
||||
)
|
||||
|
||||
func NewOPAProcessorMock() *OPAProcessor {
|
||||
return &OPAProcessor{}
|
||||
}
|
||||
func TestProcess(t *testing.T) {
|
||||
|
||||
// set k8s
|
||||
k8sResources := make(cautils.K8SResources)
|
||||
allResources := make(map[string]workloadinterface.IMetadata)
|
||||
imetaObj := objectsenvelopes.ListMapToMeta(k8sinterface.ConvertUnstructuredSliceToMap(k8sinterface.V1KubeSystemNamespaceMock().Items))
|
||||
for i := range imetaObj {
|
||||
allResources[imetaObj[i].GetID()] = imetaObj[i]
|
||||
}
|
||||
k8sResources["/v1/pods"] = workloadinterface.ListMetaIDs(imetaObj)
|
||||
|
||||
// set opaSessionObj
|
||||
opaSessionObj := cautils.NewOPASessionObjMock()
|
||||
opaSessionObj.Policies = []reporthandling.Framework{*reporthandling.MockFrameworkA()}
|
||||
policies := ConvertFrameworksToPolicies(opaSessionObj.Policies, "")
|
||||
|
||||
opaSessionObj.K8SResources = &k8sResources
|
||||
opaSessionObj.AllResources = allResources
|
||||
|
||||
opap := NewOPAProcessor(opaSessionObj, resources.NewRegoDependenciesDataMock()) // ,
|
||||
opap.Process(policies)
|
||||
opap.updateResults()
|
||||
for _, f := range opap.PostureReport.FrameworkReports {
|
||||
for _, c := range f.ControlReports {
|
||||
for _, r := range c.RuleReports {
|
||||
for _, rr := range r.RuleResponses {
|
||||
// t.Errorf("AlertMessage: %v", rr.AlertMessage)
|
||||
if rr.Exception != nil {
|
||||
t.Errorf("Exception: %v", rr.Exception)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestProcessResourcesResult(t *testing.T) {
|
||||
|
||||
// set k8s
|
||||
|
||||
@@ -2,13 +2,14 @@ package opaprocessor
|
||||
|
||||
import (
|
||||
"github.com/armosec/kubescape/v2/core/cautils"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger"
|
||||
logger "github.com/kubescape/go-logger"
|
||||
|
||||
"github.com/armosec/k8s-interface/k8sinterface"
|
||||
"github.com/armosec/k8s-interface/workloadinterface"
|
||||
"github.com/armosec/opa-utils/reporthandling"
|
||||
"github.com/armosec/opa-utils/reporthandling/apis"
|
||||
resources "github.com/armosec/opa-utils/resources"
|
||||
"github.com/kubescape/k8s-interface/k8sinterface"
|
||||
"github.com/kubescape/k8s-interface/workloadinterface"
|
||||
"github.com/kubescape/opa-utils/reporthandling"
|
||||
"github.com/kubescape/opa-utils/reporthandling/apis"
|
||||
"github.com/kubescape/opa-utils/reporthandling/results/v1/reportsummary"
|
||||
resources "github.com/kubescape/opa-utils/resources"
|
||||
)
|
||||
|
||||
// updateResults update the results objects and report objects. This is a critical function - DO NOT CHANGE
|
||||
@@ -46,7 +47,7 @@ func (opap *OPAProcessor) updateResults() {
|
||||
|
||||
// set result summary
|
||||
// map control to error
|
||||
controlToInfoMap := mapControlToInfo(opap.ResourceToControlsMap, opap.InfoMap)
|
||||
controlToInfoMap := mapControlToInfo(opap.ResourceToControlsMap, opap.InfoMap, opap.Report.SummaryDetails.Controls)
|
||||
opap.Report.SummaryDetails.InitResourcesSummary(controlToInfoMap)
|
||||
// for f := range opap.PostureReport.FrameworkReports {
|
||||
// // set exceptions
|
||||
@@ -60,25 +61,37 @@ func (opap *OPAProcessor) updateResults() {
|
||||
// }
|
||||
}
|
||||
|
||||
func mapControlToInfo(mapResourceToControls map[string][]string, infoMap map[string]apis.StatusInfo) map[string]apis.StatusInfo {
|
||||
func mapControlToInfo(mapResourceToControls map[string][]string, infoMap map[string]apis.StatusInfo, controlSummary reportsummary.ControlSummaries) map[string]apis.StatusInfo {
|
||||
controlToInfoMap := make(map[string]apis.StatusInfo)
|
||||
for resource, statusInfo := range infoMap {
|
||||
controls := mapResourceToControls[resource]
|
||||
for _, control := range controls {
|
||||
controlToInfoMap[control] = statusInfo
|
||||
controlIDs := mapResourceToControls[resource]
|
||||
for _, controlID := range controlIDs {
|
||||
ctrl := controlSummary.GetControl(reportsummary.EControlCriteriaID, controlID)
|
||||
if ctrl != nil {
|
||||
resources := ctrl.NumberOfResources()
|
||||
// Check that there are no K8s resources too
|
||||
if isEmptyResources(resources) {
|
||||
controlToInfoMap[controlID] = statusInfo
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
return controlToInfoMap
|
||||
}
|
||||
|
||||
func getAllSupportedObjects(k8sResources *cautils.K8SResources, armoResources *cautils.ArmoResources, allResources map[string]workloadinterface.IMetadata, rule *reporthandling.PolicyRule) []workloadinterface.IMetadata {
|
||||
func isEmptyResources(counters reportsummary.ICounters) bool {
|
||||
return counters.Failed() == 0 && counters.Excluded() == 0 && counters.Passed() == 0
|
||||
}
|
||||
|
||||
func getAllSupportedObjects(k8sResources *cautils.K8SResources, ksResources *cautils.KSResources, allResources map[string]workloadinterface.IMetadata, rule *reporthandling.PolicyRule) []workloadinterface.IMetadata {
|
||||
k8sObjects := []workloadinterface.IMetadata{}
|
||||
k8sObjects = append(k8sObjects, getKubernetesObjects(k8sResources, allResources, rule.Match)...)
|
||||
k8sObjects = append(k8sObjects, getArmoObjects(armoResources, allResources, rule.DynamicMatch)...)
|
||||
k8sObjects = append(k8sObjects, getKSObjects(ksResources, allResources, rule.DynamicMatch)...)
|
||||
return k8sObjects
|
||||
}
|
||||
|
||||
func getArmoObjects(k8sResources *cautils.ArmoResources, allResources map[string]workloadinterface.IMetadata, match []reporthandling.RuleMatchObjects) []workloadinterface.IMetadata {
|
||||
func getKSObjects(k8sResources *cautils.KSResources, allResources map[string]workloadinterface.IMetadata, match []reporthandling.RuleMatchObjects) []workloadinterface.IMetadata {
|
||||
k8sObjects := []workloadinterface.IMetadata{}
|
||||
|
||||
for m := range match {
|
||||
|
||||
@@ -5,7 +5,7 @@ import (
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/armosec/k8s-interface/workloadinterface"
|
||||
"github.com/kubescape/k8s-interface/workloadinterface"
|
||||
)
|
||||
|
||||
func TestRemoveData(t *testing.T) {
|
||||
|
||||
@@ -2,8 +2,8 @@ package opaprocessor
|
||||
|
||||
import (
|
||||
"github.com/armosec/kubescape/v2/core/cautils"
|
||||
"github.com/armosec/opa-utils/reporthandling"
|
||||
"github.com/armosec/opa-utils/reporthandling/results/v1/reportsummary"
|
||||
"github.com/kubescape/opa-utils/reporthandling"
|
||||
"github.com/kubescape/opa-utils/reporthandling/results/v1/reportsummary"
|
||||
)
|
||||
|
||||
// ConvertFrameworksToPolicies convert list of frameworks to list of policies
|
||||
|
||||
@@ -6,8 +6,8 @@ import (
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/armosec/kubescape/v2/core/mocks"
|
||||
"github.com/armosec/opa-utils/reporthandling"
|
||||
"github.com/armosec/opa-utils/reporthandling/results/v1/reportsummary"
|
||||
"github.com/kubescape/opa-utils/reporthandling"
|
||||
"github.com/kubescape/opa-utils/reporthandling/results/v1/reportsummary"
|
||||
)
|
||||
|
||||
func TestConvertFrameworksToPolicies(t *testing.T) {
|
||||
|
||||
@@ -3,9 +3,9 @@ package policyhandler
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/armosec/armoapi-go/armotypes"
|
||||
"github.com/armosec/kubescape/v2/core/cautils"
|
||||
"github.com/armosec/kubescape/v2/core/pkg/resourcehandler"
|
||||
"github.com/armosec/opa-utils/reporthandling"
|
||||
)
|
||||
|
||||
// PolicyHandler -
|
||||
@@ -22,7 +22,7 @@ func NewPolicyHandler(resourceHandler resourcehandler.IResourceHandler) *PolicyH
|
||||
}
|
||||
}
|
||||
|
||||
func (policyHandler *PolicyHandler) CollectResources(notification *reporthandling.PolicyNotification, scanInfo *cautils.ScanInfo) (*cautils.OPASessionObj, error) {
|
||||
func (policyHandler *PolicyHandler) CollectResources(policyIdentifier []cautils.PolicyIdentifier, scanInfo *cautils.ScanInfo) (*cautils.OPASessionObj, error) {
|
||||
opaSessionObj := cautils.NewOPASessionObj(nil, nil, scanInfo)
|
||||
|
||||
// validate notification
|
||||
@@ -30,11 +30,11 @@ func (policyHandler *PolicyHandler) CollectResources(notification *reporthandlin
|
||||
policyHandler.getters = &scanInfo.Getters
|
||||
|
||||
// get policies
|
||||
if err := policyHandler.getPolicies(notification, opaSessionObj); err != nil {
|
||||
if err := policyHandler.getPolicies(policyIdentifier, opaSessionObj); err != nil {
|
||||
return opaSessionObj, err
|
||||
}
|
||||
|
||||
err := policyHandler.getResources(notification, opaSessionObj, scanInfo)
|
||||
err := policyHandler.getResources(policyIdentifier, opaSessionObj, scanInfo)
|
||||
if err != nil {
|
||||
return opaSessionObj, err
|
||||
}
|
||||
@@ -46,17 +46,24 @@ func (policyHandler *PolicyHandler) CollectResources(notification *reporthandlin
|
||||
return opaSessionObj, nil
|
||||
}
|
||||
|
||||
func (policyHandler *PolicyHandler) getResources(notification *reporthandling.PolicyNotification, opaSessionObj *cautils.OPASessionObj, scanInfo *cautils.ScanInfo) error {
|
||||
func (policyHandler *PolicyHandler) getResources(policyIdentifier []cautils.PolicyIdentifier, opaSessionObj *cautils.OPASessionObj, scanInfo *cautils.ScanInfo) error {
|
||||
opaSessionObj.Report.ClusterAPIServerInfo = policyHandler.resourceHandler.GetClusterAPIServerInfo()
|
||||
|
||||
resourcesMap, allResources, armoResources, err := policyHandler.resourceHandler.GetResources(opaSessionObj, ¬ification.Designators)
|
||||
resourcesMap, allResources, ksResources, err := policyHandler.resourceHandler.GetResources(opaSessionObj, &policyIdentifier[0].Designators)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
opaSessionObj.K8SResources = resourcesMap
|
||||
opaSessionObj.AllResources = allResources
|
||||
opaSessionObj.ArmoResource = armoResources
|
||||
opaSessionObj.ArmoResource = ksResources
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func getDesignator(policyIdentifier []cautils.PolicyIdentifier) *armotypes.PortalDesignator {
|
||||
if len(policyIdentifier) > 0 {
|
||||
return &policyIdentifier[0].Designators
|
||||
}
|
||||
return &armotypes.PortalDesignator{}
|
||||
}
|
||||
|
||||
@@ -4,25 +4,27 @@ import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
apisv1 "github.com/kubescape/opa-utils/httpserver/apis/v1"
|
||||
|
||||
"github.com/armosec/kubescape/v2/core/cautils"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/getter"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger/helpers"
|
||||
"github.com/armosec/opa-utils/reporthandling"
|
||||
logger "github.com/kubescape/go-logger"
|
||||
"github.com/kubescape/go-logger/helpers"
|
||||
"github.com/kubescape/opa-utils/reporthandling"
|
||||
)
|
||||
|
||||
func (policyHandler *PolicyHandler) getPolicies(notification *reporthandling.PolicyNotification, policiesAndResources *cautils.OPASessionObj) error {
|
||||
func (policyHandler *PolicyHandler) getPolicies(policyIdentifier []cautils.PolicyIdentifier, policiesAndResources *cautils.OPASessionObj) error {
|
||||
logger.L().Info("Downloading/Loading policy definitions")
|
||||
|
||||
cautils.StartSpinner()
|
||||
defer cautils.StopSpinner()
|
||||
|
||||
policies, err := policyHandler.getScanPolicies(notification)
|
||||
policies, err := policyHandler.getScanPolicies(policyIdentifier)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(policies) == 0 {
|
||||
return fmt.Errorf("failed to download policies: '%s'. Make sure the policy exist and you spelled it correctly. For more information, please feel free to contact ARMO team", strings.Join(policyIdentifierToSlice(notification.Rules), ", "))
|
||||
return fmt.Errorf("failed to download policies: '%s'. Make sure the policy exist and you spelled it correctly. For more information, please feel free to contact ARMO team", strings.Join(policyIdentifierToSlice(policyIdentifier), ", "))
|
||||
}
|
||||
|
||||
policiesAndResources.Policies = policies
|
||||
@@ -31,6 +33,8 @@ func (policyHandler *PolicyHandler) getPolicies(notification *reporthandling.Pol
|
||||
exceptionPolicies, err := policyHandler.getters.ExceptionsGetter.GetExceptions(cautils.ClusterName)
|
||||
if err == nil {
|
||||
policiesAndResources.Exceptions = exceptionPolicies
|
||||
} else {
|
||||
logger.L().Error("failed to load exceptions", helpers.Error(err))
|
||||
}
|
||||
|
||||
// get account configuration
|
||||
@@ -44,12 +48,12 @@ func (policyHandler *PolicyHandler) getPolicies(notification *reporthandling.Pol
|
||||
return nil
|
||||
}
|
||||
|
||||
func (policyHandler *PolicyHandler) getScanPolicies(notification *reporthandling.PolicyNotification) ([]reporthandling.Framework, error) {
|
||||
func (policyHandler *PolicyHandler) getScanPolicies(policyIdentifier []cautils.PolicyIdentifier) ([]reporthandling.Framework, error) {
|
||||
frameworks := []reporthandling.Framework{}
|
||||
|
||||
switch getScanKind(notification) {
|
||||
case reporthandling.KindFramework: // Download frameworks
|
||||
for _, rule := range notification.Rules {
|
||||
switch getScanKind(policyIdentifier) {
|
||||
case apisv1.KindFramework: // Download frameworks
|
||||
for _, rule := range policyIdentifier {
|
||||
receivedFramework, err := policyHandler.getters.PolicyGetter.GetFramework(rule.Name)
|
||||
if err != nil {
|
||||
return frameworks, policyDownloadError(err)
|
||||
@@ -63,11 +67,11 @@ func (policyHandler *PolicyHandler) getScanPolicies(notification *reporthandling
|
||||
}
|
||||
}
|
||||
}
|
||||
case reporthandling.KindControl: // Download controls
|
||||
case apisv1.KindControl: // Download controls
|
||||
f := reporthandling.Framework{}
|
||||
var receivedControl *reporthandling.Control
|
||||
var err error
|
||||
for _, rule := range notification.Rules {
|
||||
for _, rule := range policyIdentifier {
|
||||
receivedControl, err = policyHandler.getters.PolicyGetter.GetControl(rule.Name)
|
||||
if err != nil {
|
||||
return frameworks, policyDownloadError(err)
|
||||
@@ -89,7 +93,7 @@ func (policyHandler *PolicyHandler) getScanPolicies(notification *reporthandling
|
||||
return frameworks, nil
|
||||
}
|
||||
|
||||
func policyIdentifierToSlice(rules []reporthandling.PolicyIdentifier) []string {
|
||||
func policyIdentifierToSlice(rules []cautils.PolicyIdentifier) []string {
|
||||
s := []string{}
|
||||
for i := range rules {
|
||||
s = append(s, fmt.Sprintf("%s: %s", rules[i].Kind, rules[i].Name))
|
||||
|
||||
@@ -4,12 +4,14 @@ import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/armosec/opa-utils/reporthandling"
|
||||
apisv1 "github.com/kubescape/opa-utils/httpserver/apis/v1"
|
||||
|
||||
"github.com/armosec/kubescape/v2/core/cautils"
|
||||
)
|
||||
|
||||
func getScanKind(notification *reporthandling.PolicyNotification) reporthandling.NotificationPolicyKind {
|
||||
if len(notification.Rules) > 0 {
|
||||
return notification.Rules[0].Kind
|
||||
func getScanKind(policyIdentifier []cautils.PolicyIdentifier) apisv1.NotificationPolicyKind {
|
||||
if len(policyIdentifier) > 0 {
|
||||
return policyIdentifier[0].Kind
|
||||
}
|
||||
return "unknown"
|
||||
}
|
||||
|
||||
@@ -7,7 +7,7 @@ For these controls to work properly, it is necessary to
|
||||
|
||||
# Integrate With Armosec Server
|
||||
|
||||
1. Navigate to the [armosec.io](https://portal.armo.cloud/)
|
||||
1. Navigate to the [armosec.io](https://cloud.armosec.io?utm_source=github&utm_medium=repository)
|
||||
2. Click Profile(top right icon)->"User Management"->"API Tokens" and Generate a token
|
||||
3. Copy the clientID and secretKey and run:
|
||||
```
|
||||
|
||||
@@ -5,28 +5,28 @@ import (
|
||||
"fmt"
|
||||
|
||||
"github.com/armosec/kubescape/v2/core/cautils/getter"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger/helpers"
|
||||
"github.com/armosec/kubescape/v2/core/pkg/containerscan"
|
||||
"github.com/armosec/kubescape/v2/core/pkg/registryadaptors/registryvulnerabilities"
|
||||
logger "github.com/kubescape/go-logger"
|
||||
"github.com/kubescape/go-logger/helpers"
|
||||
)
|
||||
|
||||
func NewArmoAdaptor(armoAPI *getter.ArmoAPI) *ArmoCivAdaptor {
|
||||
return &ArmoCivAdaptor{
|
||||
armoAPI: armoAPI,
|
||||
func NewKSAdaptor(api *getter.KSCloudAPI) *KSCivAdaptor {
|
||||
return &KSCivAdaptor{
|
||||
ksCloudAPI: api,
|
||||
}
|
||||
}
|
||||
|
||||
func (armoCivAdaptor *ArmoCivAdaptor) Login() error {
|
||||
if armoCivAdaptor.armoAPI.IsLoggedIn() {
|
||||
func (ksCivAdaptor *KSCivAdaptor) Login() error {
|
||||
if ksCivAdaptor.ksCloudAPI.IsLoggedIn() {
|
||||
return nil
|
||||
}
|
||||
return armoCivAdaptor.armoAPI.Login()
|
||||
return ksCivAdaptor.ksCloudAPI.Login()
|
||||
}
|
||||
func (armoCivAdaptor *ArmoCivAdaptor) GetImagesVulnerabilities(imageIDs []registryvulnerabilities.ContainerImageIdentifier) ([]registryvulnerabilities.ContainerImageVulnerabilityReport, error) {
|
||||
func (ksCivAdaptor *KSCivAdaptor) GetImagesVulnerabilities(imageIDs []registryvulnerabilities.ContainerImageIdentifier) ([]registryvulnerabilities.ContainerImageVulnerabilityReport, error) {
|
||||
resultList := make([]registryvulnerabilities.ContainerImageVulnerabilityReport, 0)
|
||||
for _, imageID := range imageIDs {
|
||||
result, err := armoCivAdaptor.GetImageVulnerability(&imageID)
|
||||
result, err := ksCivAdaptor.GetImageVulnerability(&imageID)
|
||||
if err == nil {
|
||||
resultList = append(resultList, *result)
|
||||
} else {
|
||||
@@ -36,9 +36,9 @@ func (armoCivAdaptor *ArmoCivAdaptor) GetImagesVulnerabilities(imageIDs []regist
|
||||
return resultList, nil
|
||||
}
|
||||
|
||||
func (armoCivAdaptor *ArmoCivAdaptor) GetImageVulnerability(imageID *registryvulnerabilities.ContainerImageIdentifier) (*registryvulnerabilities.ContainerImageVulnerabilityReport, error) {
|
||||
func (ksCivAdaptor *KSCivAdaptor) GetImageVulnerability(imageID *registryvulnerabilities.ContainerImageIdentifier) (*registryvulnerabilities.ContainerImageVulnerabilityReport, error) {
|
||||
// First
|
||||
containerScanId, err := armoCivAdaptor.getImageLastScanId(imageID)
|
||||
containerScanId, err := ksCivAdaptor.getImageLastScanId(imageID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -51,9 +51,9 @@ func (armoCivAdaptor *ArmoCivAdaptor) GetImageVulnerability(imageID *registryvul
|
||||
pageNumber := 1
|
||||
request := V2ListRequest{PageSize: &pageSize, PageNum: &pageNumber, InnerFilters: filter, OrderBy: "timestamp:desc"}
|
||||
requestBody, _ := json.Marshal(request)
|
||||
requestUrl := fmt.Sprintf("https://%s/api/v1/vulnerability/scanResultsDetails?customerGUID=%s", armoCivAdaptor.armoAPI.GetAPIURL(), armoCivAdaptor.armoAPI.GetAccountID())
|
||||
requestUrl := fmt.Sprintf("https://%s/api/v1/vulnerability/scanResultsDetails?customerGUID=%s", ksCivAdaptor.ksCloudAPI.GetApiURL(), ksCivAdaptor.ksCloudAPI.GetAccountID())
|
||||
|
||||
resp, err := armoCivAdaptor.armoAPI.Post(requestUrl, map[string]string{"Content-Type": "application/json"}, requestBody)
|
||||
resp, err := ksCivAdaptor.ksCloudAPI.Post(requestUrl, map[string]string{"Content-Type": "application/json"}, requestBody)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -82,17 +82,16 @@ func (armoCivAdaptor *ArmoCivAdaptor) GetImageVulnerability(imageID *registryvul
|
||||
return &resultImageVulnerabilityReport, nil
|
||||
}
|
||||
|
||||
func (armoCivAdaptor *ArmoCivAdaptor) DescribeAdaptor() string {
|
||||
// TODO
|
||||
return ""
|
||||
func (ksCivAdaptor *KSCivAdaptor) DescribeAdaptor() string {
|
||||
return "armo image vulnerabilities scanner, docs: https://hub.armosec.io/docs/configuration-of-image-vulnerabilities"
|
||||
}
|
||||
|
||||
func (armoCivAdaptor *ArmoCivAdaptor) GetImagesInformation(imageIDs []registryvulnerabilities.ContainerImageIdentifier) ([]registryvulnerabilities.ContainerImageInformation, error) {
|
||||
func (ksCivAdaptor *KSCivAdaptor) GetImagesInformation(imageIDs []registryvulnerabilities.ContainerImageIdentifier) ([]registryvulnerabilities.ContainerImageInformation, error) {
|
||||
// TODO
|
||||
return []registryvulnerabilities.ContainerImageInformation{}, nil
|
||||
}
|
||||
|
||||
func (armoCivAdaptor *ArmoCivAdaptor) GetImagesScanStatus(imageIDs []registryvulnerabilities.ContainerImageIdentifier) ([]registryvulnerabilities.ContainerImageScanStatus, error) {
|
||||
func (ksCivAdaptor *KSCivAdaptor) GetImagesScanStatus(imageIDs []registryvulnerabilities.ContainerImageIdentifier) ([]registryvulnerabilities.ContainerImageScanStatus, error) {
|
||||
// TODO
|
||||
return []registryvulnerabilities.ContainerImageScanStatus{}, nil
|
||||
}
|
||||
@@ -8,15 +8,15 @@ import (
|
||||
"github.com/armosec/kubescape/v2/core/pkg/registryadaptors/registryvulnerabilities"
|
||||
)
|
||||
|
||||
func (armoCivAdaptor *ArmoCivAdaptor) getImageLastScanId(imageID *registryvulnerabilities.ContainerImageIdentifier) (string, error) {
|
||||
func (armoCivAdaptor *KSCivAdaptor) getImageLastScanId(imageID *registryvulnerabilities.ContainerImageIdentifier) (string, error) {
|
||||
filter := []map[string]string{{"imageTag": imageID.Tag, "status": "Success"}}
|
||||
pageSize := 1
|
||||
pageNumber := 1
|
||||
request := V2ListRequest{PageSize: &pageSize, PageNum: &pageNumber, InnerFilters: filter, OrderBy: "timestamp:desc"}
|
||||
requestBody, _ := json.Marshal(request)
|
||||
requestUrl := fmt.Sprintf("https://%s/api/v1/vulnerability/scanResultsSumSummary?customerGUID=%s", armoCivAdaptor.armoAPI.GetAPIURL(), armoCivAdaptor.armoAPI.GetAccountID())
|
||||
requestUrl := fmt.Sprintf("https://%s/api/v1/vulnerability/scanResultsSumSummary?customerGUID=%s", armoCivAdaptor.ksCloudAPI.GetApiURL(), armoCivAdaptor.ksCloudAPI.GetAccountID())
|
||||
|
||||
resp, err := armoCivAdaptor.armoAPI.Post(requestUrl, map[string]string{"Content-Type": "application/json"}, requestBody)
|
||||
resp, err := armoCivAdaptor.ksCloudAPI.Post(requestUrl, map[string]string{"Content-Type": "application/json"}, requestBody)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
@@ -30,6 +30,7 @@ type V2ListRequest struct {
|
||||
FieldsList []string `json:"includeFields,omitempty"`
|
||||
FieldsReverseKeywordMap map[string]string `json:"-,omitempty"`
|
||||
}
|
||||
type ArmoCivAdaptor struct {
|
||||
armoAPI *getter.ArmoAPI
|
||||
|
||||
type KSCivAdaptor struct {
|
||||
ksCloudAPI *getter.KSCloudAPI
|
||||
}
|
||||
|
||||
@@ -4,7 +4,7 @@ import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/armosec/k8s-interface/k8sinterface"
|
||||
"github.com/kubescape/k8s-interface/k8sinterface"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
)
|
||||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user