mirror of
https://github.com/kubescape/kubescape.git
synced 2026-02-14 18:09:55 +00:00
Compare commits
187 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
2fb2ab02c4 | ||
|
|
1a94004de4 | ||
|
|
424f2cc403 | ||
|
|
eab77a9e61 | ||
|
|
fc78d9143b | ||
|
|
034dbca30c | ||
|
|
a41adc6c9e | ||
|
|
bd170938c5 | ||
|
|
e91a73a32e | ||
|
|
099886e1bb | ||
|
|
c05dc8d7ae | ||
|
|
3cebfb3065 | ||
|
|
f7445d1777 | ||
|
|
ed5abd5791 | ||
|
|
898b847211 | ||
|
|
889dd15772 | ||
|
|
2ce6c1840b | ||
|
|
d46d77411b | ||
|
|
ee4f4d8af1 | ||
|
|
ea1426a24b | ||
|
|
bd78e4c4de | ||
|
|
e3f70b6cd6 | ||
|
|
a5007df1bc | ||
|
|
d6720b67ed | ||
|
|
2261fd6adb | ||
|
|
9334ad6991 | ||
|
|
7b5e4143c3 | ||
|
|
e63e5502cd | ||
|
|
154794e774 | ||
|
|
4aa71725dd | ||
|
|
9bd2e7fea4 | ||
|
|
e6d3e7d7da | ||
|
|
35bb15b5df | ||
|
|
e54d61fd87 | ||
|
|
f28b2836c7 | ||
|
|
d196f1f327 | ||
|
|
995f90ca53 | ||
|
|
c1da380c9b | ||
|
|
77f77b8c7d | ||
|
|
b3c1aec461 | ||
|
|
ef242b52bb | ||
|
|
af1d5694dc | ||
|
|
a5ac47ff6d | ||
|
|
b03a4974c4 | ||
|
|
8385cd0bd7 | ||
|
|
ca67aa7f5f | ||
|
|
9b9ed514c8 | ||
|
|
11b7f6ab2f | ||
|
|
021f2074b8 | ||
|
|
c1ba2d4b3c | ||
|
|
141ad17ece | ||
|
|
74c81e2270 | ||
|
|
7bb124b6fe | ||
|
|
8a8ff10b19 | ||
|
|
1eef32dd8e | ||
|
|
d7b5dd416d | ||
|
|
536a94de45 | ||
|
|
d8ef471eb2 | ||
|
|
ff07a80078 | ||
|
|
310d31a3b1 | ||
|
|
8a1ef7da87 | ||
|
|
c142779ee8 | ||
|
|
54020d317e | ||
|
|
91d1ec6c2f | ||
|
|
1d3fd0dc9d | ||
|
|
8a7511cecb | ||
|
|
640f366c7e | ||
|
|
9f36c1d6de | ||
|
|
b3c8c078a8 | ||
|
|
0af0c01ec0 | ||
|
|
3ff2b0d6ff | ||
|
|
35b2b350a0 | ||
|
|
046ea1d79f | ||
|
|
3081508863 | ||
|
|
4a757c1bf1 | ||
|
|
4f1971a63d | ||
|
|
dec4bcca00 | ||
|
|
5443039b8c | ||
|
|
95e68f49f3 | ||
|
|
7e90956b50 | ||
|
|
0c84c8f1f3 | ||
|
|
56b3239e30 | ||
|
|
f8e85941da | ||
|
|
15081aa9c3 | ||
|
|
ac03a2bda3 | ||
|
|
b7ffa22f3a | ||
|
|
ac5e7069da | ||
|
|
5a83f38bca | ||
|
|
3abd59e290 | ||
|
|
d08fdf2e9e | ||
|
|
bad2f54e72 | ||
|
|
fc9b713851 | ||
|
|
245200840d | ||
|
|
3f87610e8c | ||
|
|
c285cb1bcc | ||
|
|
63968b564b | ||
|
|
e237c48186 | ||
|
|
622b121535 | ||
|
|
20774d4a40 | ||
|
|
7bb6bb85ec | ||
|
|
da908a84bc | ||
|
|
b515e259c0 | ||
|
|
facd551518 | ||
|
|
0fc569d9d9 | ||
|
|
da27a27ad5 | ||
|
|
5d4a20f622 | ||
|
|
70b15a373b | ||
|
|
01353f81b3 | ||
|
|
22f10b6581 | ||
|
|
785178ffb1 | ||
|
|
f9b5c58402 | ||
|
|
8ed6d63ce5 | ||
|
|
990a7c2052 | ||
|
|
09b0c09472 | ||
|
|
f83c38b58e | ||
|
|
51e600797a | ||
|
|
afb6ea1d9c | ||
|
|
39d6d1fd26 | ||
|
|
2dff63b101 | ||
|
|
b928892f0a | ||
|
|
c0188ea51d | ||
|
|
fa376ed5a4 | ||
|
|
6382edeb6e | ||
|
|
61d6c2dd1f | ||
|
|
44194f0b4e | ||
|
|
7103c7d32c | ||
|
|
b4e1663cd1 | ||
|
|
b3d16875d6 | ||
|
|
47412c89ca | ||
|
|
20d65f2ed3 | ||
|
|
46a559fb1d | ||
|
|
2769b22721 | ||
|
|
60ec6e8294 | ||
|
|
63520f9aff | ||
|
|
333b55a9f2 | ||
|
|
c6d3fd1a82 | ||
|
|
8106133ed0 | ||
|
|
b36111f63e | ||
|
|
3ad0284394 | ||
|
|
245ebf8c41 | ||
|
|
8309562da1 | ||
|
|
de807a65a6 | ||
|
|
92fe583421 | ||
|
|
b7ec05e88a | ||
|
|
203e925888 | ||
|
|
fde5453bf3 | ||
|
|
4c6a65565b | ||
|
|
e60ecfb8f5 | ||
|
|
b72e2610ca | ||
|
|
8d4bae06bc | ||
|
|
847b597d0f | ||
|
|
db1743f617 | ||
|
|
7ac1b8aacf | ||
|
|
55f8cb1f0e | ||
|
|
93574736cd | ||
|
|
e43f4b1a37 | ||
|
|
4ba33578ce | ||
|
|
ae00866005 | ||
|
|
21cb4dae29 | ||
|
|
cf086e6614 | ||
|
|
7d3ac98998 | ||
|
|
5e9d01aec2 | ||
|
|
a27d2d41f2 | ||
|
|
c09eabf347 | ||
|
|
38c2aed74a | ||
|
|
cf70671dba | ||
|
|
f90ce83a74 | ||
|
|
fab594ee32 | ||
|
|
d25cefe355 | ||
|
|
747eee1d29 | ||
|
|
0c43ee9ab8 | ||
|
|
466f3acd71 | ||
|
|
80add4ef12 | ||
|
|
959319c335 | ||
|
|
0e9ca547cb | ||
|
|
6e17e5ce7e | ||
|
|
858d7ac2ef | ||
|
|
4046321297 | ||
|
|
0cfdabd25a | ||
|
|
8b6cb6c5d8 | ||
|
|
3df3b7766c | ||
|
|
0d83654197 | ||
|
|
bd9e44382e | ||
|
|
ed45b09241 | ||
|
|
107903cc99 | ||
|
|
13fb586ded | ||
|
|
2e63982f5a |
44
.github/workflows/build.yaml
vendored
44
.github/workflows/build.yaml
vendored
@@ -28,19 +28,17 @@ jobs:
|
||||
matrix:
|
||||
os: [ubuntu-latest, macos-latest, windows-latest]
|
||||
steps:
|
||||
- uses: actions/checkout@v1
|
||||
- uses: actions/checkout@v3
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v2
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: 1.17
|
||||
|
||||
- name: Test cmd pkg
|
||||
run: cd cmd && go test -v ./...
|
||||
|
||||
go-version: 1.18
|
||||
# - name: Test cmd pkg
|
||||
# run: cd cmd && go test -v ./...
|
||||
- name: Test core pkg
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
run: cd core && go test -v ./...
|
||||
run: go test -v ./...
|
||||
|
||||
- name: Test httphandler pkg
|
||||
run: cd httphandler && go test -v ./...
|
||||
@@ -48,12 +46,13 @@ jobs:
|
||||
- name: Build
|
||||
env:
|
||||
RELEASE: v2.0.${{ github.run_number }}
|
||||
CLIENT: release
|
||||
ArmoBEServer: api.armo.cloud
|
||||
ArmoAuthServer: auth.armo.cloud
|
||||
ArmoERServer: report.armo.cloud
|
||||
ArmoWebsite: portal.armo.cloud
|
||||
CGO_ENABLED: 0
|
||||
run: cd cmd && python3 --version && python3 build.py
|
||||
run: python3 --version && python3 build.py
|
||||
|
||||
- name: Smoke Testing
|
||||
env:
|
||||
@@ -82,9 +81,6 @@ jobs:
|
||||
asset_path: build/${{ matrix.os }}/kubescape.sha256
|
||||
asset_name: kubescape-${{ matrix.os }}-sha256
|
||||
asset_content_type: application/octet-stream
|
||||
|
||||
|
||||
|
||||
build-docker:
|
||||
name: Build docker container, tag and upload to registry
|
||||
needs: build
|
||||
@@ -106,28 +102,28 @@ jobs:
|
||||
id: image-name
|
||||
run: echo '::set-output name=IMAGE_NAME::quay.io/${{ github.repository_owner }}/kubescape'
|
||||
|
||||
- name: Build the Docker image
|
||||
run: docker build . --file build/Dockerfile --tag ${{ steps.image-name.outputs.IMAGE_NAME }}:${{ steps.image-version.outputs.IMAGE_VERSION }} --build-arg image_version=${{ steps.image-version.outputs.IMAGE_VERSION }}
|
||||
|
||||
- name: Re-Tag Image to latest
|
||||
run: docker tag ${{ steps.image-name.outputs.IMAGE_NAME }}:${{ steps.image-version.outputs.IMAGE_VERSION }} ${{ steps.image-name.outputs.IMAGE_NAME }}:latest
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v2
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
|
||||
- name: Login to Quay.io
|
||||
env: # Or as an environment variable
|
||||
env:
|
||||
QUAY_PASSWORD: ${{ secrets.QUAYIO_REGISTRY_PASSWORD }}
|
||||
QUAY_USERNAME: ${{ secrets.QUAYIO_REGISTRY_USERNAME }}
|
||||
run: docker login -u="${QUAY_USERNAME}" -p="${QUAY_PASSWORD}" quay.io
|
||||
|
||||
- name: Build the Docker image
|
||||
run: docker buildx build . --file build/Dockerfile --tag ${{ steps.image-name.outputs.IMAGE_NAME }}:${{ steps.image-version.outputs.IMAGE_VERSION }} --tag ${{ steps.image-name.outputs.IMAGE_NAME }}:latest --build-arg image_version=${{ steps.image-version.outputs.IMAGE_VERSION }} --build-arg client=image-release --push --platform linux/amd64,linux/arm64
|
||||
|
||||
# - name: Login to GitHub Container Registry
|
||||
# uses: docker/login-action@v1
|
||||
# with:
|
||||
# registry: ghcr.io
|
||||
# username: ${{ github.actor }}
|
||||
# password: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: Push Docker image
|
||||
run: |
|
||||
docker push ${{ steps.image-name.outputs.IMAGE_NAME }}:${{ steps.image-version.outputs.IMAGE_VERSION }}
|
||||
docker push ${{ steps.image-name.outputs.IMAGE_NAME }}:latest
|
||||
|
||||
|
||||
# TODO - Wait for casign to support fixed tags -> https://github.com/sigstore/cosign/issues/1424
|
||||
# - name: Install cosign
|
||||
# uses: sigstore/cosign-installer@main
|
||||
|
||||
46
.github/workflows/build_dev.yaml
vendored
46
.github/workflows/build_dev.yaml
vendored
@@ -11,20 +11,27 @@ jobs:
|
||||
matrix:
|
||||
os: [ubuntu-latest, macos-latest, windows-latest]
|
||||
steps:
|
||||
- uses: actions/checkout@v1
|
||||
|
||||
- uses: actions/checkout@v3
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v2
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: 1.17
|
||||
go-version: 1.18
|
||||
|
||||
- name: Test cmd pkg
|
||||
run: cd cmd && go test -v ./...
|
||||
# - name: Test cmd pkg
|
||||
# run: cd cmd && go test -v ./...
|
||||
|
||||
# - name: Test core pkg
|
||||
# env:
|
||||
# GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
# run: cd core && go test -v ./...
|
||||
|
||||
# - name: Test cmd pkg
|
||||
# run: cd cmd && go test -v ./...
|
||||
|
||||
- name: Test core pkg
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
run: cd core && go test -v ./...
|
||||
run: go test -v ./...
|
||||
|
||||
- name: Test httphandler pkg
|
||||
run: cd httphandler && go test -v ./...
|
||||
@@ -32,12 +39,13 @@ jobs:
|
||||
- name: Build
|
||||
env:
|
||||
RELEASE: v2.0.${{ github.run_number }}
|
||||
CLIENT: release-dev
|
||||
ArmoBEServer: api.armo.cloud
|
||||
ArmoAuthServer: auth.armo.cloud
|
||||
ArmoERServer: report.armo.cloud
|
||||
ArmoWebsite: portal.armo.cloud
|
||||
CGO_ENABLED: 0
|
||||
run: cd cmd && python3 --version && python3 build.py
|
||||
run: python3 --version && python3 build.py
|
||||
|
||||
- name: Smoke Testing
|
||||
env:
|
||||
@@ -72,21 +80,17 @@ jobs:
|
||||
id: image-name
|
||||
run: echo '::set-output name=IMAGE_NAME::quay.io/${{ github.repository_owner }}/kubescape'
|
||||
|
||||
- name: Build the Docker image
|
||||
run: docker build . --file build/Dockerfile --tag ${{ steps.image-name.outputs.IMAGE_NAME }}:${{ steps.image-version.outputs.IMAGE_VERSION }} --build-arg image_version=${{ steps.image-version.outputs.IMAGE_VERSION }}
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v2
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
|
||||
- name: Login to Quay.io
|
||||
env:
|
||||
QUAY_PASSWORD: ${{ secrets.QUAYIO_REGISTRY_PASSWORD }}
|
||||
QUAY_USERNAME: ${{ secrets.QUAYIO_REGISTRY_USERNAME }}
|
||||
run: docker login -u="${QUAY_USERNAME}" -p="${QUAY_PASSWORD}" quay.io
|
||||
# - name: Login to GitHub Container Registry
|
||||
# uses: docker/login-action@v1
|
||||
# with:
|
||||
# registry: ghcr.io
|
||||
# username: ${{ github.actor }}
|
||||
# password: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: Push Docker image
|
||||
run: |
|
||||
docker push ${{ steps.image-name.outputs.IMAGE_NAME }}:${{ steps.image-version.outputs.IMAGE_VERSION }}
|
||||
|
||||
|
||||
- name: Build the Docker image
|
||||
run: docker buildx build . --file build/Dockerfile --tag ${{ steps.image-name.outputs.IMAGE_NAME }}:${{ steps.image-version.outputs.IMAGE_VERSION }} --build-arg image_version=${{ steps.image-version.outputs.IMAGE_VERSION }} --build-arg client=image-dev --push --platform linux/amd64,linux/arm64
|
||||
|
||||
16
.github/workflows/master_pr_checks.yaml
vendored
16
.github/workflows/master_pr_checks.yaml
vendored
@@ -12,20 +12,19 @@ jobs:
|
||||
matrix:
|
||||
os: [ubuntu-latest, macos-latest, windows-latest]
|
||||
steps:
|
||||
- uses: actions/checkout@v1
|
||||
|
||||
- uses: actions/checkout@v3
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v2
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: 1.17
|
||||
go-version: 1.18
|
||||
|
||||
- name: Test cmd pkg
|
||||
run: cd cmd && go test -v ./...
|
||||
# - name: Test cmd pkg
|
||||
# run: cd cmd && go test -v ./...
|
||||
|
||||
- name: Test core pkg
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
run: cd core && go test -v ./...
|
||||
run: go test -v ./...
|
||||
|
||||
- name: Test httphandler pkg
|
||||
run: cd httphandler && go test -v ./...
|
||||
@@ -33,12 +32,13 @@ jobs:
|
||||
- name: Build
|
||||
env:
|
||||
RELEASE: v2.0.${{ github.run_number }}
|
||||
CLIENT: test
|
||||
ArmoBEServer: api.armo.cloud
|
||||
ArmoAuthServer: auth.armo.cloud
|
||||
ArmoERServer: report.armo.cloud
|
||||
ArmoWebsite: portal.armo.cloud
|
||||
CGO_ENABLED: 0
|
||||
run: cd cmd && python3 --version && python3 build.py
|
||||
run: python3 --version && python3 build.py
|
||||
|
||||
- name: Smoke Testing
|
||||
env:
|
||||
|
||||
1
.gitignore
vendored
1
.gitignore
vendored
@@ -4,4 +4,5 @@
|
||||
*vender*
|
||||
*.pyc*
|
||||
.idea
|
||||
.history
|
||||
ca.srl
|
||||
72
README.md
72
README.md
@@ -28,7 +28,7 @@ Kubescape integrates natively with other DevOps tools, including Jenkins, Circle
|
||||
|
||||
# TL;DR
|
||||
## Install:
|
||||
```
|
||||
```sh
|
||||
curl -s https://raw.githubusercontent.com/armosec/kubescape/master/install.sh | /bin/bash
|
||||
```
|
||||
|
||||
@@ -36,9 +36,13 @@ curl -s https://raw.githubusercontent.com/armosec/kubescape/master/install.sh |
|
||||
|
||||
[Install on macOS](#install-on-macos)
|
||||
|
||||
[Install on NixOS or Linux/macOS via nix](#install-on-nixos-or-with-nix-community)
|
||||
|
||||
[Install using Go](#install-using-go)
|
||||
|
||||
## Run:
|
||||
```
|
||||
kubescape scan --submit --enable-host-scan
|
||||
```sh
|
||||
kubescape scan --submit --enable-host-scan --verbose
|
||||
```
|
||||
|
||||
<img src="docs/summary.png">
|
||||
@@ -101,13 +105,49 @@ Set-ExecutionPolicy RemoteSigned -scope CurrentUser
|
||||
|
||||
## Install on macOS
|
||||
|
||||
1. ```
|
||||
1. ```sh
|
||||
brew tap armosec/kubescape
|
||||
```
|
||||
2. ```
|
||||
2. ```sh
|
||||
brew install kubescape
|
||||
```
|
||||
|
||||
## Install on NixOS or with nix (Community)
|
||||
|
||||
Direct issues installing `kubescape` via `nix` through the channels mentioned [here](https://nixos.wiki/wiki/Support)
|
||||
|
||||
You can use `nix` on Linux or macOS and on other platforms unofficially.
|
||||
|
||||
Try it out in an ephemeral shell: `nix-shell -p kubescape`
|
||||
|
||||
Install declarative as usual
|
||||
|
||||
NixOS:
|
||||
|
||||
```nix
|
||||
# your other config ...
|
||||
environment.systemPackages = with pkgs; [
|
||||
# your other packages ...
|
||||
kubescape
|
||||
];
|
||||
```
|
||||
|
||||
home-manager:
|
||||
|
||||
```nix
|
||||
# your other config ...
|
||||
home.packages = with pkgs; [
|
||||
# your other packages ...
|
||||
kubescape
|
||||
];
|
||||
```
|
||||
|
||||
Or to your profile (not preferred): `nix-env --install -A nixpkgs.kubescape`
|
||||
|
||||
## Install using Go
|
||||
|
||||
With a sufficient version of `go` you can install and build with `go install github.com/armosec/kubescape/v2@latest`
|
||||
|
||||
## Usage & Examples
|
||||
|
||||
### Examples
|
||||
@@ -147,14 +187,14 @@ kubescape scan --include-namespaces development,staging,production
|
||||
kubescape scan --exclude-namespaces kube-system,kube-public
|
||||
```
|
||||
|
||||
#### Scan local `yaml`/`json` files before deploying. [Take a look at the demonstration](https://youtu.be/Ox6DaR7_4ZI)
|
||||
#### Scan local `yaml`/`json` files before deploying. [Take a look at the demonstration](https://youtu.be/Ox6DaR7_4ZI) Submit the results in case the directory is a git repo. [docs](https://hub.armo.cloud/docs/repository-scanning)
|
||||
```
|
||||
kubescape scan *.yaml
|
||||
kubescape scan *.yaml --submit
|
||||
```
|
||||
|
||||
#### Scan kubernetes manifest files from a public github repository
|
||||
#### Scan kubernetes manifest files from a git repository [and submit the results](https://hub.armo.cloud/docs/repository-scanning)
|
||||
```
|
||||
kubescape scan https://github.com/armosec/kubescape
|
||||
kubescape scan https://github.com/armosec/kubescape --submit
|
||||
```
|
||||
|
||||
#### Display all scanned resources (including the resources who passed)
|
||||
@@ -193,16 +233,11 @@ kubescape scan --format prometheus
|
||||
kubescape scan --exceptions examples/exceptions/exclude-kube-namespaces.json
|
||||
```
|
||||
|
||||
#### Scan Helm charts - Render the helm chart using [`helm template`](https://helm.sh/docs/helm/helm_template/) and pass to stdout
|
||||
#### Scan Helm charts
|
||||
```
|
||||
helm template [NAME] [CHART] [flags] --dry-run | kubescape scan -
|
||||
kubescape scan </path/to/directory> --submit
|
||||
```
|
||||
|
||||
e.g.
|
||||
```
|
||||
helm template bitnami/mysql --generate-name --dry-run | kubescape scan -
|
||||
```
|
||||
|
||||
> Kubescape will load the default values file
|
||||
|
||||
### Offline/Air-gaped Environment Support
|
||||
|
||||
@@ -276,6 +311,9 @@ kubescape submit results path/to/results.json
|
||||
|
||||
Scan the YAML files while writing them using the [vs code extension](https://github.com/armosec/vscode-kubescape/blob/master/README.md)
|
||||
|
||||
## Lens Extension
|
||||
|
||||
View Kubescape scan results directly in [Lens IDE](https://k8slens.dev/) using kubescape [Lens extension](https://github.com/armosec/lens-kubescape/blob/master/README.md)
|
||||
|
||||
# Under the hood
|
||||
|
||||
|
||||
96
build.py
Normal file
96
build.py
Normal file
@@ -0,0 +1,96 @@
|
||||
import os
|
||||
import sys
|
||||
import hashlib
|
||||
import platform
|
||||
import subprocess
|
||||
|
||||
BASE_GETTER_CONST = "github.com/armosec/kubescape/v2/core/cautils/getter"
|
||||
BE_SERVER_CONST = BASE_GETTER_CONST + ".ArmoBEURL"
|
||||
ER_SERVER_CONST = BASE_GETTER_CONST + ".ArmoERURL"
|
||||
WEBSITE_CONST = BASE_GETTER_CONST + ".ArmoFEURL"
|
||||
AUTH_SERVER_CONST = BASE_GETTER_CONST + ".armoAUTHURL"
|
||||
|
||||
def check_status(status, msg):
|
||||
if status != 0:
|
||||
sys.stderr.write(msg)
|
||||
exit(status)
|
||||
|
||||
|
||||
def get_build_dir():
|
||||
current_platform = platform.system()
|
||||
build_dir = "./build/"
|
||||
|
||||
if current_platform == "Windows": build_dir += "windows-latest"
|
||||
elif current_platform == "Linux": build_dir += "ubuntu-latest"
|
||||
elif current_platform == "Darwin": build_dir += "macos-latest"
|
||||
else: raise OSError("Platform %s is not supported!" % (current_platform))
|
||||
|
||||
return build_dir
|
||||
|
||||
def get_package_name():
|
||||
package_name = "kubescape"
|
||||
# if platform.system() == "Windows": package_name += ".exe"
|
||||
|
||||
return package_name
|
||||
|
||||
|
||||
def main():
|
||||
print("Building Kubescape")
|
||||
|
||||
# Set some variables
|
||||
package_name = get_package_name()
|
||||
build_url = "github.com/armosec/kubescape/v2/core/cautils.BuildNumber"
|
||||
release_version = os.getenv("RELEASE")
|
||||
armo_be_server = os.getenv("ArmoBEServer")
|
||||
armo_er_server = os.getenv("ArmoERServer")
|
||||
armo_website = os.getenv("ArmoWebsite")
|
||||
armo_auth_server = os.getenv("ArmoAuthServer")
|
||||
|
||||
client_var = "github.com/armosec/kubescape/v2/core/cautils.Client"
|
||||
client_name = os.getenv("CLIENT")
|
||||
|
||||
# Create build directory
|
||||
build_dir = get_build_dir()
|
||||
|
||||
ks_file = os.path.join(build_dir, package_name)
|
||||
hash_file = ks_file + ".sha256"
|
||||
|
||||
if not os.path.isdir(build_dir):
|
||||
os.makedirs(build_dir)
|
||||
|
||||
# Build kubescape
|
||||
ldflags = "-w -s"
|
||||
if release_version:
|
||||
ldflags += " -X {}={}".format(build_url, release_version)
|
||||
if client_name:
|
||||
ldflags += " -X {}={}".format(client_var, client_name)
|
||||
if armo_be_server:
|
||||
ldflags += " -X {}={}".format(BE_SERVER_CONST, armo_be_server)
|
||||
if armo_er_server:
|
||||
ldflags += " -X {}={}".format(ER_SERVER_CONST, armo_er_server)
|
||||
if armo_website:
|
||||
ldflags += " -X {}={}".format(WEBSITE_CONST, armo_website)
|
||||
if armo_auth_server:
|
||||
ldflags += " -X {}={}".format(AUTH_SERVER_CONST, armo_auth_server)
|
||||
|
||||
build_command = ["go", "build", "-o", ks_file, "-ldflags" ,ldflags]
|
||||
|
||||
print("Building kubescape and saving here: {}".format(ks_file))
|
||||
print("Build command: {}".format(" ".join(build_command)))
|
||||
|
||||
status = subprocess.call(build_command)
|
||||
check_status(status, "Failed to build kubescape")
|
||||
|
||||
sha256 = hashlib.sha256()
|
||||
with open(ks_file, "rb") as kube:
|
||||
sha256.update(kube.read())
|
||||
with open(hash_file, "w") as kube_sha:
|
||||
hash = sha256.hexdigest()
|
||||
print("kubescape hash: {}, file: {}".format(hash, hash_file))
|
||||
kube_sha.write(sha256.hexdigest())
|
||||
|
||||
print("Build Done")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -1,9 +1,10 @@
|
||||
FROM golang:1.17-alpine as builder
|
||||
#ENV GOPROXY=https://goproxy.io,direct
|
||||
FROM golang:1.18-alpine as builder
|
||||
|
||||
ARG image_version
|
||||
ARG client
|
||||
|
||||
ENV RELEASE=$image_version
|
||||
ENV CLIENT=$client
|
||||
|
||||
ENV GO111MODULE=
|
||||
|
||||
@@ -24,21 +25,24 @@ RUN python build.py
|
||||
RUN ls -ltr build/ubuntu-latest
|
||||
|
||||
# build kubescape cmd
|
||||
WORKDIR /work/cmd
|
||||
WORKDIR /work
|
||||
RUN python build.py
|
||||
|
||||
RUN /work/build/ubuntu-latest/kubescape download artifacts -o /work/artifacts
|
||||
|
||||
FROM alpine
|
||||
|
||||
RUN addgroup -S ks && adduser -S ks -G ks
|
||||
USER ks
|
||||
WORKDIR /home/ks/
|
||||
RUN addgroup -S armo && adduser -S armo -G armo
|
||||
|
||||
RUN mkdir /home/armo/.kubescape
|
||||
COPY --from=builder /work/artifacts/ /home/armo/.kubescape
|
||||
|
||||
RUN chown -R armo:armo /home/armo/.kubescape
|
||||
|
||||
USER armo
|
||||
WORKDIR /home/armo
|
||||
|
||||
COPY --from=builder /work/httphandler/build/ubuntu-latest/kubescape /usr/bin/ksserver
|
||||
COPY --from=builder /work/build/ubuntu-latest/kubescape /usr/bin/kubescape
|
||||
|
||||
RUN mkdir /home/ks/.kubescape && chmod 777 -R /home/ks/.kubescape
|
||||
COPY --from=builder /work/artifacts/ /home/ks/.kubescape
|
||||
|
||||
ENTRYPOINT ["ksserver"]
|
||||
|
||||
@@ -1,19 +0,0 @@
|
||||
# Kubescape CLI Package
|
||||
|
||||
## Commands
|
||||
* [Completion](#completion): Generate autocompletion script
|
||||
* [Config](#config): Handle cached configurations
|
||||
* [Delete](#delete): Delete configurations in Kubescape SaaS version
|
||||
* [Download](#download): Download controls-inputs,exceptions,control,framework,artifacts
|
||||
* [Help](#help): Help about any command
|
||||
* [List](#list): List frameworks/controls will list the supported frameworks and controls
|
||||
* [Scan](#scan): Scan the current running cluster or yaml files
|
||||
* [Submit](#submit): Submit an object to the Kubescape SaaS version
|
||||
* [Version](#version): Get kubescape version
|
||||
|
||||
## Global Flags
|
||||
|
||||
--cache-dir string Cache directory [$KS_CACHE_DIR] (default "/home/david/.kubescape")
|
||||
-l, --logger string Logger level. Supported: debug/info/success/warning/error/fatal [$KS_LOGGER] (default "info")
|
||||
|
||||
### Completion
|
||||
94
cmd/build.py
94
cmd/build.py
@@ -1,94 +0,0 @@
|
||||
import os
|
||||
import sys
|
||||
import hashlib
|
||||
import platform
|
||||
import subprocess
|
||||
|
||||
BASE_GETTER_CONST = "github.com/armosec/kubescape/core/cautils/getter"
|
||||
BE_SERVER_CONST = BASE_GETTER_CONST + ".ArmoBEURL"
|
||||
ER_SERVER_CONST = BASE_GETTER_CONST + ".ArmoERURL"
|
||||
WEBSITE_CONST = BASE_GETTER_CONST + ".ArmoFEURL"
|
||||
AUTH_SERVER_CONST = BASE_GETTER_CONST + ".armoAUTHURL"
|
||||
|
||||
def checkStatus(status, msg):
|
||||
if status != 0:
|
||||
sys.stderr.write(msg)
|
||||
exit(status)
|
||||
|
||||
|
||||
def getBuildDir():
|
||||
currentPlatform = platform.system()
|
||||
buildDir = "../build/"
|
||||
|
||||
if currentPlatform == "Windows": buildDir += "windows-latest"
|
||||
elif currentPlatform == "Linux": buildDir += "ubuntu-latest"
|
||||
elif currentPlatform == "Darwin": buildDir += "macos-latest"
|
||||
else: raise OSError("Platform %s is not supported!" % (currentPlatform))
|
||||
|
||||
return buildDir
|
||||
|
||||
def getPackageName():
|
||||
packageName = "kubescape"
|
||||
# if platform.system() == "Windows": packageName += ".exe"
|
||||
|
||||
return packageName
|
||||
|
||||
|
||||
def main():
|
||||
print("Building Kubescape")
|
||||
|
||||
# print environment variables
|
||||
# print(os.environ)
|
||||
|
||||
# Set some variables
|
||||
packageName = getPackageName()
|
||||
buildUrl = "github.com/armosec/kubescape/core/cautils.BuildNumber"
|
||||
releaseVersion = os.getenv("RELEASE")
|
||||
ArmoBEServer = os.getenv("ArmoBEServer")
|
||||
ArmoERServer = os.getenv("ArmoERServer")
|
||||
ArmoWebsite = os.getenv("ArmoWebsite")
|
||||
ArmoAuthServer = os.getenv("ArmoAuthServer")
|
||||
|
||||
# Create build directory
|
||||
buildDir = getBuildDir()
|
||||
|
||||
ks_file = os.path.join(buildDir, packageName)
|
||||
hash_file = ks_file + ".sha256"
|
||||
|
||||
if not os.path.isdir(buildDir):
|
||||
os.makedirs(buildDir)
|
||||
|
||||
# Build kubescape
|
||||
ldflags = "-w -s"
|
||||
if releaseVersion:
|
||||
ldflags += " -X {}={}".format(buildUrl, releaseVersion)
|
||||
if ArmoBEServer:
|
||||
ldflags += " -X {}={}".format(BE_SERVER_CONST, ArmoBEServer)
|
||||
if ArmoERServer:
|
||||
ldflags += " -X {}={}".format(ER_SERVER_CONST, ArmoERServer)
|
||||
if ArmoWebsite:
|
||||
ldflags += " -X {}={}".format(WEBSITE_CONST, ArmoWebsite)
|
||||
if ArmoAuthServer:
|
||||
ldflags += " -X {}={}".format(AUTH_SERVER_CONST, ArmoAuthServer)
|
||||
|
||||
build_command = ["go", "build", "-o", ks_file, "-ldflags" ,ldflags]
|
||||
|
||||
print("Building kubescape and saving here: {}".format(ks_file))
|
||||
print("Build command: {}".format(" ".join(build_command)))
|
||||
|
||||
status = subprocess.call(build_command)
|
||||
checkStatus(status, "Failed to build kubescape")
|
||||
|
||||
sha256 = hashlib.sha256()
|
||||
with open(ks_file, "rb") as kube:
|
||||
sha256.update(kube.read())
|
||||
with open(hash_file, "w") as kube_sha:
|
||||
hash = sha256.hexdigest()
|
||||
print("kubescape hash: {}, file: {}".format(hash, hash_file))
|
||||
kube_sha.write(sha256.hexdigest())
|
||||
|
||||
print("Build Done")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -1,7 +1,7 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"github.com/armosec/kubescape/core/meta"
|
||||
"github.com/armosec/kubescape/v2/core/meta"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"github.com/armosec/kubescape/core/cautils/logger"
|
||||
"github.com/armosec/kubescape/core/meta"
|
||||
v1 "github.com/armosec/kubescape/core/meta/datastructures/v1"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger"
|
||||
"github.com/armosec/kubescape/v2/core/meta"
|
||||
v1 "github.com/armosec/kubescape/v2/core/meta/datastructures/v1"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
|
||||
@@ -4,9 +4,9 @@ import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/armosec/kubescape/core/cautils/logger"
|
||||
"github.com/armosec/kubescape/core/meta"
|
||||
metav1 "github.com/armosec/kubescape/core/meta/datastructures/v1"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger"
|
||||
"github.com/armosec/kubescape/v2/core/meta"
|
||||
metav1 "github.com/armosec/kubescape/v2/core/meta/datastructures/v1"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
|
||||
@@ -3,9 +3,9 @@ package config
|
||||
import (
|
||||
"os"
|
||||
|
||||
"github.com/armosec/kubescape/core/cautils/logger"
|
||||
"github.com/armosec/kubescape/core/meta"
|
||||
v1 "github.com/armosec/kubescape/core/meta/datastructures/v1"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger"
|
||||
"github.com/armosec/kubescape/v2/core/meta"
|
||||
v1 "github.com/armosec/kubescape/v2/core/meta/datastructures/v1"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
package delete
|
||||
|
||||
import (
|
||||
"github.com/armosec/kubescape/core/meta"
|
||||
v1 "github.com/armosec/kubescape/core/meta/datastructures/v1"
|
||||
"github.com/armosec/kubescape/v2/core/meta"
|
||||
v1 "github.com/armosec/kubescape/v2/core/meta/datastructures/v1"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
@@ -24,7 +24,9 @@ func GetDeleteCmd(ks meta.IKubescape) *cobra.Command {
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
},
|
||||
}
|
||||
deleteCmd.PersistentFlags().StringVarP(&deleteInfo.Account, "account", "", "", "Armo portal account ID. Default will load account ID from configMap or config file")
|
||||
deleteCmd.PersistentFlags().StringVarP(&deleteInfo.Credentials.Account, "account", "", "", "Kubescape SaaS account ID. Default will load account ID from cache")
|
||||
deleteCmd.PersistentFlags().StringVarP(&deleteInfo.Credentials.ClientID, "client-id", "", "", "Kubescape SaaS client ID. Default will load client ID from cache, read more - https://hub.armo.cloud/docs/authentication")
|
||||
deleteCmd.PersistentFlags().StringVarP(&deleteInfo.Credentials.SecretKey, "secret-key", "", "", "Kubescape SaaS secret key. Default will load secret key from cache, read more - https://hub.armo.cloud/docs/authentication")
|
||||
|
||||
deleteCmd.AddCommand(getExceptionsCmd(ks, &deleteInfo))
|
||||
|
||||
|
||||
@@ -4,9 +4,9 @@ import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/armosec/kubescape/core/cautils/logger"
|
||||
"github.com/armosec/kubescape/core/meta"
|
||||
v1 "github.com/armosec/kubescape/core/meta/datastructures/v1"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger"
|
||||
"github.com/armosec/kubescape/v2/core/meta"
|
||||
v1 "github.com/armosec/kubescape/v2/core/meta/datastructures/v1"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
@@ -26,7 +26,7 @@ func getExceptionsCmd(ks meta.IKubescape, deleteInfo *v1.Delete) *cobra.Command
|
||||
if len(exceptionsNames) == 0 {
|
||||
logger.L().Fatal("missing exceptions names")
|
||||
}
|
||||
if err := ks.DeleteExceptions(&v1.DeleteExceptions{Account: deleteInfo.Account, Exceptions: exceptionsNames}); err != nil {
|
||||
if err := ks.DeleteExceptions(&v1.DeleteExceptions{Credentials: deleteInfo.Credentials, Exceptions: exceptionsNames}); err != nil {
|
||||
logger.L().Fatal(err.Error())
|
||||
}
|
||||
},
|
||||
|
||||
@@ -5,11 +5,11 @@ import (
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/armosec/kubescape/core/cautils"
|
||||
"github.com/armosec/kubescape/core/cautils/logger"
|
||||
"github.com/armosec/kubescape/core/core"
|
||||
"github.com/armosec/kubescape/core/meta"
|
||||
v1 "github.com/armosec/kubescape/core/meta/datastructures/v1"
|
||||
"github.com/armosec/kubescape/v2/core/cautils"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger"
|
||||
"github.com/armosec/kubescape/v2/core/core"
|
||||
"github.com/armosec/kubescape/v2/core/meta"
|
||||
v1 "github.com/armosec/kubescape/v2/core/meta/datastructures/v1"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
@@ -72,7 +72,10 @@ func GeDownloadCmd(ks meta.IKubescape) *cobra.Command {
|
||||
return nil
|
||||
},
|
||||
}
|
||||
downloadCmd.PersistentFlags().StringVarP(&downloadInfo.Account, "account", "", "", "Armo portal account ID. Default will load account ID from configMap or config file")
|
||||
|
||||
downloadCmd.PersistentFlags().StringVarP(&downloadInfo.Credentials.Account, "account", "", "", "Kubescape SaaS account ID. Default will load account ID from cache")
|
||||
downloadCmd.PersistentFlags().StringVarP(&downloadInfo.Credentials.ClientID, "client-id", "", "", "Kubescape SaaS client ID. Default will load client ID from cache, read more - https://hub.armo.cloud/docs/authentication")
|
||||
downloadCmd.PersistentFlags().StringVarP(&downloadInfo.Credentials.SecretKey, "secret-key", "", "", "Kubescape SaaS secret key. Default will load secret key from cache, read more - https://hub.armo.cloud/docs/authentication")
|
||||
downloadCmd.Flags().StringVarP(&downloadInfo.Path, "output", "o", "", "Output file. If not specified, will save in `~/.kubescape/<policy name>.json`")
|
||||
|
||||
return downloadCmd
|
||||
|
||||
127
cmd/go.mod
127
cmd/go.mod
@@ -1,127 +0,0 @@
|
||||
module github.com/armosec/kubescape/cmd
|
||||
|
||||
go 1.17
|
||||
|
||||
replace github.com/armosec/kubescape/core => ../core
|
||||
|
||||
require (
|
||||
github.com/armosec/k8s-interface v0.0.68
|
||||
github.com/armosec/kubescape/core v0.0.0-00010101000000-000000000000
|
||||
github.com/armosec/opa-utils v0.0.130
|
||||
github.com/armosec/rbac-utils v0.0.14
|
||||
github.com/enescakir/emoji v1.0.0
|
||||
github.com/google/uuid v1.3.0
|
||||
github.com/mattn/go-isatty v0.0.14
|
||||
github.com/spf13/cobra v1.4.0
|
||||
)
|
||||
|
||||
require (
|
||||
cloud.google.com/go v0.99.0 // indirect
|
||||
cloud.google.com/go/container v1.0.0 // indirect
|
||||
github.com/Azure/go-autorest v14.2.0+incompatible // indirect
|
||||
github.com/Azure/go-autorest/autorest v0.11.18 // indirect
|
||||
github.com/Azure/go-autorest/autorest/adal v0.9.13 // indirect
|
||||
github.com/Azure/go-autorest/autorest/date v0.3.0 // indirect
|
||||
github.com/Azure/go-autorest/logger v0.2.1 // indirect
|
||||
github.com/Azure/go-autorest/tracing v0.6.0 // indirect
|
||||
github.com/OneOfOne/xxhash v1.2.8 // indirect
|
||||
github.com/armosec/armoapi-go v0.0.58 // indirect
|
||||
github.com/armosec/utils-go v0.0.3 // indirect
|
||||
github.com/armosec/utils-k8s-go v0.0.3 // indirect
|
||||
github.com/aws/aws-sdk-go v1.41.11 // indirect
|
||||
github.com/aws/aws-sdk-go-v2 v1.12.0 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/config v1.12.0 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.7.0 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.9.0 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.3 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.1.0 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.3.3 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/eks v1.17.0 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.6.0 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.8.0 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.13.0 // indirect
|
||||
github.com/aws/smithy-go v1.9.1 // indirect
|
||||
github.com/boombuler/barcode v1.0.0 // indirect
|
||||
github.com/briandowns/spinner v1.18.1 // indirect
|
||||
github.com/census-instrumentation/opencensus-proto v0.3.0 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.1.2 // indirect
|
||||
github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4 // indirect
|
||||
github.com/cncf/xds/go v0.0.0-20211130200136-a8f946100490 // indirect
|
||||
github.com/coreos/go-oidc v2.2.1+incompatible // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/docker/docker v20.10.9+incompatible // indirect
|
||||
github.com/docker/go-connections v0.4.0 // indirect
|
||||
github.com/docker/go-units v0.4.0 // indirect
|
||||
github.com/envoyproxy/go-control-plane v0.10.1 // indirect
|
||||
github.com/envoyproxy/protoc-gen-validate v0.6.2 // indirect
|
||||
github.com/fatih/color v1.13.0 // indirect
|
||||
github.com/form3tech-oss/jwt-go v3.2.3+incompatible // indirect
|
||||
github.com/francoispqt/gojay v1.2.13 // indirect
|
||||
github.com/ghodss/yaml v1.0.0 // indirect
|
||||
github.com/go-gota/gota v0.12.0 // indirect
|
||||
github.com/go-logr/logr v1.2.2 // indirect
|
||||
github.com/gobwas/glob v0.2.3 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang/glog v1.0.0 // indirect
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
||||
github.com/golang/protobuf v1.5.2 // indirect
|
||||
github.com/google/go-cmp v0.5.7 // indirect
|
||||
github.com/google/gofuzz v1.1.0 // indirect
|
||||
github.com/googleapis/gax-go/v2 v2.1.1 // indirect
|
||||
github.com/googleapis/gnostic v0.5.5 // indirect
|
||||
github.com/imdario/mergo v0.3.12 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.0.0 // indirect
|
||||
github.com/jmespath/go-jmespath v0.4.0 // indirect
|
||||
github.com/johnfercher/maroto v0.34.0 // indirect
|
||||
github.com/json-iterator/go v1.1.12 // indirect
|
||||
github.com/jung-kurt/gofpdf v1.4.2 // indirect
|
||||
github.com/mattn/go-colorable v0.1.12 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.9 // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||
github.com/olekukonko/tablewriter v0.0.5 // indirect
|
||||
github.com/open-policy-agent/opa v0.38.0 // indirect
|
||||
github.com/opencontainers/go-digest v1.0.0 // indirect
|
||||
github.com/opencontainers/image-spec v1.0.2 // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/pquerna/cachecontrol v0.1.0 // indirect
|
||||
github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0 // indirect
|
||||
github.com/ruudk/golang-pdf417 v0.0.0-20181029194003-1af4ab5afa58 // indirect
|
||||
github.com/spf13/pflag v1.0.5 // indirect
|
||||
github.com/whilp/git-urls v1.0.0 // indirect
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect
|
||||
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
|
||||
github.com/yashtewari/glob-intersection v0.0.0-20180916065949-5c77d914dd0b // indirect
|
||||
go.opencensus.io v0.23.0 // indirect
|
||||
go.uber.org/atomic v1.7.0 // indirect
|
||||
go.uber.org/multierr v1.6.0 // indirect
|
||||
go.uber.org/zap v1.21.0 // indirect
|
||||
golang.org/x/crypto v0.0.0-20210817164053-32db794688a5 // indirect
|
||||
golang.org/x/mod v0.5.1 // indirect
|
||||
golang.org/x/net v0.0.0-20211209124913-491a49abca63 // indirect
|
||||
golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8 // indirect
|
||||
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 // indirect
|
||||
golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b // indirect
|
||||
golang.org/x/text v0.3.7 // indirect
|
||||
golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac // indirect
|
||||
gonum.org/v1/gonum v0.9.1 // indirect
|
||||
google.golang.org/api v0.62.0 // indirect
|
||||
google.golang.org/appengine v1.6.7 // indirect
|
||||
google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa // indirect
|
||||
google.golang.org/grpc v1.44.0 // indirect
|
||||
google.golang.org/protobuf v1.27.1 // indirect
|
||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||
gopkg.in/square/go-jose.v2 v2.6.0 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect
|
||||
k8s.io/api v0.23.4 // indirect
|
||||
k8s.io/apimachinery v0.23.4 // indirect
|
||||
k8s.io/client-go v0.23.4 // indirect
|
||||
k8s.io/klog/v2 v2.30.0 // indirect
|
||||
k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65 // indirect
|
||||
k8s.io/utils v0.0.0-20211116205334-6203023598ed // indirect
|
||||
sigs.k8s.io/controller-runtime v0.11.1 // indirect
|
||||
sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6 // indirect
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.2.1 // indirect
|
||||
sigs.k8s.io/yaml v1.3.0 // indirect
|
||||
)
|
||||
@@ -4,11 +4,11 @@ import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/armosec/kubescape/core/cautils"
|
||||
"github.com/armosec/kubescape/core/cautils/logger"
|
||||
"github.com/armosec/kubescape/core/core"
|
||||
"github.com/armosec/kubescape/core/meta"
|
||||
v1 "github.com/armosec/kubescape/core/meta/datastructures/v1"
|
||||
"github.com/armosec/kubescape/v2/core/cautils"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger"
|
||||
"github.com/armosec/kubescape/v2/core/core"
|
||||
"github.com/armosec/kubescape/v2/core/meta"
|
||||
v1 "github.com/armosec/kubescape/v2/core/meta/datastructures/v1"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
@@ -59,7 +59,9 @@ func GetListCmd(ks meta.IKubescape) *cobra.Command {
|
||||
return nil
|
||||
},
|
||||
}
|
||||
listCmd.PersistentFlags().StringVar(&listPolicies.Account, "account", "", "Armo portal account ID. Default will load account ID from configMap or config file")
|
||||
listCmd.PersistentFlags().StringVarP(&listPolicies.Credentials.Account, "account", "", "", "Kubescape SaaS account ID. Default will load account ID from cache")
|
||||
listCmd.PersistentFlags().StringVarP(&listPolicies.Credentials.ClientID, "client-id", "", "", "Kubescape SaaS client ID. Default will load client ID from cache, read more - https://hub.armo.cloud/docs/authentication")
|
||||
listCmd.PersistentFlags().StringVarP(&listPolicies.Credentials.SecretKey, "secret-key", "", "", "Kubescape SaaS secret key. Default will load secret key from cache, read more - https://hub.armo.cloud/docs/authentication")
|
||||
listCmd.PersistentFlags().StringVar(&listPolicies.Format, "format", "pretty-print", "output format. supported: 'pretty-printer'/'json'")
|
||||
listCmd.PersistentFlags().BoolVarP(&listPolicies.ListIDs, "id", "", false, "List control ID's instead of controls names")
|
||||
|
||||
|
||||
41
cmd/root.go
41
cmd/root.go
@@ -1,23 +1,23 @@
|
||||
package main
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/armosec/kubescape/cmd/completion"
|
||||
"github.com/armosec/kubescape/cmd/config"
|
||||
"github.com/armosec/kubescape/cmd/delete"
|
||||
"github.com/armosec/kubescape/cmd/download"
|
||||
"github.com/armosec/kubescape/cmd/list"
|
||||
"github.com/armosec/kubescape/cmd/scan"
|
||||
"github.com/armosec/kubescape/cmd/submit"
|
||||
"github.com/armosec/kubescape/cmd/version"
|
||||
"github.com/armosec/kubescape/core/cautils"
|
||||
"github.com/armosec/kubescape/core/cautils/getter"
|
||||
"github.com/armosec/kubescape/core/cautils/logger"
|
||||
"github.com/armosec/kubescape/core/cautils/logger/helpers"
|
||||
"github.com/armosec/kubescape/core/core"
|
||||
"github.com/armosec/kubescape/core/meta"
|
||||
"github.com/armosec/kubescape/v2/cmd/completion"
|
||||
"github.com/armosec/kubescape/v2/cmd/config"
|
||||
"github.com/armosec/kubescape/v2/cmd/delete"
|
||||
"github.com/armosec/kubescape/v2/cmd/download"
|
||||
"github.com/armosec/kubescape/v2/cmd/list"
|
||||
"github.com/armosec/kubescape/v2/cmd/scan"
|
||||
"github.com/armosec/kubescape/v2/cmd/submit"
|
||||
"github.com/armosec/kubescape/v2/cmd/version"
|
||||
"github.com/armosec/kubescape/v2/core/cautils"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/getter"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger/helpers"
|
||||
"github.com/armosec/kubescape/v2/core/core"
|
||||
"github.com/armosec/kubescape/v2/core/meta"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
@@ -40,7 +40,6 @@ var ksExamples = `
|
||||
|
||||
func NewDefaultKubescapeCommand() *cobra.Command {
|
||||
ks := core.NewKubescape()
|
||||
|
||||
return getRootCmd(ks)
|
||||
}
|
||||
|
||||
@@ -49,8 +48,7 @@ func getRootCmd(ks meta.IKubescape) *cobra.Command {
|
||||
rootCmd := &cobra.Command{
|
||||
Use: "kubescape",
|
||||
Version: cautils.BuildNumber,
|
||||
Short: "Kubescape is a tool for testing Kubernetes security posture",
|
||||
Long: `Based on NSA \ MITRE ATT&CK® and other frameworks specifications`,
|
||||
Short: "Kubescape is a tool for testing Kubernetes security posture. Docs: https://hub.armo.cloud/docs",
|
||||
Example: ksExamples,
|
||||
}
|
||||
|
||||
@@ -82,10 +80,7 @@ func getRootCmd(ks meta.IKubescape) *cobra.Command {
|
||||
return rootCmd
|
||||
}
|
||||
|
||||
func main() {
|
||||
func Execute() error {
|
||||
ks := NewDefaultKubescapeCommand()
|
||||
err := ks.Execute()
|
||||
if err != nil {
|
||||
logger.L().Fatal(err.Error())
|
||||
}
|
||||
return ks.Execute()
|
||||
}
|
||||
|
||||
@@ -1,13 +1,13 @@
|
||||
package main
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/armosec/kubescape/core/cautils/getter"
|
||||
"github.com/armosec/kubescape/core/cautils/logger"
|
||||
"github.com/armosec/kubescape/core/cautils/logger/helpers"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/getter"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger/helpers"
|
||||
|
||||
"github.com/mattn/go-isatty"
|
||||
)
|
||||
|
||||
@@ -6,11 +6,12 @@ import (
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/armosec/kubescape/core/cautils"
|
||||
"github.com/armosec/kubescape/core/cautils/logger"
|
||||
"github.com/armosec/kubescape/core/cautils/logger/helpers"
|
||||
"github.com/armosec/kubescape/core/meta"
|
||||
"github.com/armosec/opa-utils/reporthandling"
|
||||
apisv1 "github.com/armosec/opa-utils/httpserver/apis/v1"
|
||||
|
||||
"github.com/armosec/kubescape/v2/core/cautils"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger/helpers"
|
||||
"github.com/armosec/kubescape/v2/core/meta"
|
||||
"github.com/enescakir/emoji"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
@@ -57,14 +58,14 @@ func getControlCmd(ks meta.IKubescape, scanInfo *cautils.ScanInfo) *cobra.Comman
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
|
||||
// flagValidationControl(scanInfo)
|
||||
scanInfo.PolicyIdentifier = []reporthandling.PolicyIdentifier{}
|
||||
scanInfo.PolicyIdentifier = []cautils.PolicyIdentifier{}
|
||||
|
||||
if len(args) == 0 {
|
||||
scanInfo.ScanAll = true
|
||||
} else { // expected control or list of control sepparated by ","
|
||||
|
||||
// Read controls from input args
|
||||
scanInfo.SetPolicyIdentifiers(strings.Split(args[0], ","), reporthandling.KindControl)
|
||||
scanInfo.SetPolicyIdentifiers(strings.Split(args[0], ","), apisv1.KindControl)
|
||||
|
||||
if len(args) > 1 {
|
||||
if len(args[1:]) == 0 || args[1] != "-" {
|
||||
@@ -94,7 +95,7 @@ func getControlCmd(ks meta.IKubescape, scanInfo *cautils.ScanInfo) *cobra.Comman
|
||||
logger.L().Fatal(err.Error())
|
||||
}
|
||||
if !scanInfo.VerboseMode {
|
||||
cautils.SimpleDisplay(os.Stderr, "%s Run with '--verbose' flag for full scan details\n\n", emoji.Detective)
|
||||
cautils.SimpleDisplay(os.Stderr, "%s Run with '--verbose'/'-v' flag for detailed resources view\n\n", emoji.Detective)
|
||||
}
|
||||
if results.GetRiskScore() > float32(scanInfo.FailThreshold) {
|
||||
logger.L().Fatal("scan risk-score is above permitted threshold", helpers.String("risk-score", fmt.Sprintf("%.2f", results.GetRiskScore())), helpers.String("fail-threshold", fmt.Sprintf("%.2f", scanInfo.FailThreshold)))
|
||||
|
||||
@@ -6,11 +6,12 @@ import (
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/armosec/kubescape/core/cautils"
|
||||
"github.com/armosec/kubescape/core/cautils/logger"
|
||||
"github.com/armosec/kubescape/core/cautils/logger/helpers"
|
||||
"github.com/armosec/kubescape/core/meta"
|
||||
"github.com/armosec/opa-utils/reporthandling"
|
||||
apisv1 "github.com/armosec/opa-utils/httpserver/apis/v1"
|
||||
|
||||
"github.com/armosec/kubescape/v2/core/cautils"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger/helpers"
|
||||
"github.com/armosec/kubescape/v2/core/meta"
|
||||
"github.com/enescakir/emoji"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
@@ -95,7 +96,7 @@ func getFrameworkCmd(ks meta.IKubescape, scanInfo *cautils.ScanInfo) *cobra.Comm
|
||||
}
|
||||
scanInfo.FrameworkScan = true
|
||||
|
||||
scanInfo.SetPolicyIdentifiers(frameworks, reporthandling.KindFramework)
|
||||
scanInfo.SetPolicyIdentifiers(frameworks, apisv1.KindFramework)
|
||||
|
||||
results, err := ks.Scan(scanInfo)
|
||||
if err != nil {
|
||||
@@ -106,7 +107,7 @@ func getFrameworkCmd(ks meta.IKubescape, scanInfo *cautils.ScanInfo) *cobra.Comm
|
||||
logger.L().Fatal(err.Error())
|
||||
}
|
||||
if !scanInfo.VerboseMode {
|
||||
cautils.SimpleDisplay(os.Stderr, "%s Run with '--verbose' flag for full scan details\n\n", emoji.Detective)
|
||||
cautils.SimpleDisplay(os.Stderr, "%s Run with '--verbose'/'-v' flag for detailed resources view\n\n", emoji.Detective)
|
||||
}
|
||||
if results.GetRiskScore() > float32(scanInfo.FailThreshold) {
|
||||
logger.L().Fatal("scan risk-score is above permitted threshold", helpers.String("risk-score", fmt.Sprintf("%.2f", results.GetRiskScore())), helpers.String("fail-threshold", fmt.Sprintf("%.2f", scanInfo.FailThreshold)))
|
||||
|
||||
@@ -1,9 +1,11 @@
|
||||
package scan
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/armosec/k8s-interface/k8sinterface"
|
||||
"github.com/armosec/kubescape/core/cautils"
|
||||
"github.com/armosec/kubescape/core/meta"
|
||||
"github.com/armosec/kubescape/v2/core/cautils"
|
||||
"github.com/armosec/kubescape/v2/core/meta"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
@@ -61,18 +63,21 @@ func GetScanCommand(ks meta.IKubescape) *cobra.Command {
|
||||
},
|
||||
}
|
||||
|
||||
scanCmd.PersistentFlags().StringVarP(&scanInfo.Account, "account", "", "", "ARMO portal account ID. Default will load account ID from configMap or config file")
|
||||
scanCmd.PersistentFlags().StringVarP(&scanInfo.Credentials.Account, "account", "", "", "Kubescape SaaS account ID. Default will load account ID from cache")
|
||||
scanCmd.PersistentFlags().StringVarP(&scanInfo.Credentials.ClientID, "client-id", "", "", "Kubescape SaaS client ID. Default will load client ID from cache, read more - https://hub.armo.cloud/docs/authentication")
|
||||
scanCmd.PersistentFlags().StringVarP(&scanInfo.Credentials.SecretKey, "secret-key", "", "", "Kubescape SaaS secret key. Default will load secret key from cache, read more - https://hub.armo.cloud/docs/authentication")
|
||||
scanCmd.PersistentFlags().StringVarP(&scanInfo.KubeContext, "kube-context", "", "", "Kube context. Default will use the current-context")
|
||||
scanCmd.PersistentFlags().StringVar(&scanInfo.ControlsInputs, "controls-config", "", "Path to an controls-config obj. If not set will download controls-config from ARMO management portal")
|
||||
scanCmd.PersistentFlags().StringVar(&scanInfo.UseExceptions, "exceptions", "", "Path to an exceptions obj. If not set will download exceptions from ARMO management portal")
|
||||
scanCmd.PersistentFlags().StringVar(&scanInfo.UseArtifactsFrom, "use-artifacts-from", "", "Load artifacts from local directory. If not used will download them")
|
||||
scanCmd.PersistentFlags().StringVarP(&scanInfo.ExcludedNamespaces, "exclude-namespaces", "e", "", "Namespaces to exclude from scanning. Recommended: kube-system,kube-public")
|
||||
scanCmd.PersistentFlags().Float32VarP(&scanInfo.FailThreshold, "fail-threshold", "t", 100, "Failure threshold is the percent above which the command fails and returns exit code 1")
|
||||
scanCmd.PersistentFlags().StringVarP(&scanInfo.Format, "format", "f", "pretty-printer", `Output format. Supported formats: "pretty-printer","json","junit","prometheus","pdf"`)
|
||||
scanCmd.PersistentFlags().StringVarP(&scanInfo.Format, "format", "f", "pretty-printer", `Output format. Supported formats: "pretty-printer", "json", "junit", "prometheus", "pdf", "html"`)
|
||||
scanCmd.PersistentFlags().StringVar(&scanInfo.IncludeNamespaces, "include-namespaces", "", "scan specific namespaces. e.g: --include-namespaces ns-a,ns-b")
|
||||
scanCmd.PersistentFlags().BoolVarP(&scanInfo.Local, "keep-local", "", false, "If you do not want your Kubescape results reported to ARMO backend. Use this flag if you ran with the '--submit' flag in the past and you do not want to submit your current scan results")
|
||||
scanCmd.PersistentFlags().StringVarP(&scanInfo.Output, "output", "o", "", "Output file. Print output to file and not stdout")
|
||||
scanCmd.PersistentFlags().BoolVar(&scanInfo.VerboseMode, "verbose", false, "Display all of the input resources and not only failed resources")
|
||||
scanCmd.PersistentFlags().BoolVarP(&scanInfo.VerboseMode, "verbose", "v", false, "Display all of the input resources and not only failed resources")
|
||||
scanCmd.PersistentFlags().StringVar(&scanInfo.View, "view", string(cautils.ResourceViewType), fmt.Sprintf("View results based on the %s/%s. default is --view=%s", cautils.ResourceViewType, cautils.ControlViewType, cautils.ResourceViewType))
|
||||
scanCmd.PersistentFlags().BoolVar(&scanInfo.UseDefault, "use-default", false, "Load local policy object from default path. If not used will download latest")
|
||||
scanCmd.PersistentFlags().StringSliceVar(&scanInfo.UseFrom, "use-from", nil, "Load local policy object from specified path. If not used will download latest")
|
||||
scanCmd.PersistentFlags().BoolVarP(&scanInfo.Submit, "submit", "", false, "Send the scan results to ARMO management portal where you can see the results in a user-friendly UI, choose your preferred compliance framework, check risk results history and trends, manage exceptions, get remediation recommendations and much more. By default the results are not submitted")
|
||||
@@ -88,7 +93,7 @@ func GetScanCommand(ks meta.IKubescape) *cobra.Command {
|
||||
scanCmd.PersistentFlags().MarkHidden("silent") // this flag should be deprecated since we added the --logger support
|
||||
// scanCmd.PersistentFlags().MarkHidden("format-version") // meant for testing different output approaches and not for common use
|
||||
|
||||
hostF := scanCmd.PersistentFlags().VarPF(&scanInfo.HostSensorEnabled, "enable-host-scan", "", "Deploy ARMO K8s host-sensor daemonset in the scanned cluster. Deleting it right after we collecting the data. Required to collect valuable data from cluster nodes for certain controls. Yaml file: https://raw.githubusercontent.com/armosec/kubescape/master/hostsensorutils/hostsensor.yaml")
|
||||
hostF := scanCmd.PersistentFlags().VarPF(&scanInfo.HostSensorEnabled, "enable-host-scan", "", "Deploy ARMO K8s host-sensor daemonset in the scanned cluster. Deleting it right after we collecting the data. Required to collect valuable data from cluster nodes for certain controls. Yaml file: https://github.com/armosec/kubescape/blob/master/core/pkg/hostsensorutils/hostsensor.yaml")
|
||||
hostF.NoOptDefVal = "true"
|
||||
hostF.DefValue = "false, for no TTY in stdin"
|
||||
|
||||
|
||||
@@ -3,9 +3,9 @@ package submit
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/armosec/kubescape/core/cautils/logger"
|
||||
"github.com/armosec/kubescape/core/meta"
|
||||
metav1 "github.com/armosec/kubescape/core/meta/datastructures/v1"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger"
|
||||
"github.com/armosec/kubescape/v2/core/meta"
|
||||
metav1 "github.com/armosec/kubescape/v2/core/meta/datastructures/v1"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
@@ -21,7 +21,7 @@ func getExceptionsCmd(ks meta.IKubescape, submitInfo *metav1.Submit) *cobra.Comm
|
||||
return nil
|
||||
},
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
if err := ks.SubmitExceptions(submitInfo.Account, args[0]); err != nil {
|
||||
if err := ks.SubmitExceptions(&submitInfo.Credentials, args[0]); err != nil {
|
||||
logger.L().Fatal(err.Error())
|
||||
}
|
||||
},
|
||||
|
||||
@@ -2,15 +2,15 @@ package submit
|
||||
|
||||
import (
|
||||
"github.com/armosec/k8s-interface/k8sinterface"
|
||||
"github.com/armosec/kubescape/core/cautils"
|
||||
"github.com/armosec/kubescape/core/cautils/getter"
|
||||
"github.com/armosec/kubescape/core/cautils/logger"
|
||||
"github.com/armosec/kubescape/core/cautils/logger/helpers"
|
||||
"github.com/armosec/kubescape/core/meta"
|
||||
"github.com/armosec/kubescape/core/meta/cliinterfaces"
|
||||
v1 "github.com/armosec/kubescape/core/meta/datastructures/v1"
|
||||
"github.com/armosec/kubescape/v2/core/cautils"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/getter"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger/helpers"
|
||||
"github.com/armosec/kubescape/v2/core/meta"
|
||||
"github.com/armosec/kubescape/v2/core/meta/cliinterfaces"
|
||||
v1 "github.com/armosec/kubescape/v2/core/meta/datastructures/v1"
|
||||
|
||||
reporterv1 "github.com/armosec/kubescape/core/pkg/resultshandling/reporter/v1"
|
||||
reporterv1 "github.com/armosec/kubescape/v2/core/pkg/resultshandling/reporter/v1"
|
||||
|
||||
"github.com/armosec/rbac-utils/rbacscanner"
|
||||
"github.com/spf13/cobra"
|
||||
@@ -27,13 +27,13 @@ func getRBACCmd(ks meta.IKubescape, submitInfo *v1.Submit) *cobra.Command {
|
||||
k8s := k8sinterface.NewKubernetesApi()
|
||||
|
||||
// get config
|
||||
clusterConfig := getTenantConfig(submitInfo.Account, "", k8s)
|
||||
clusterConfig := getTenantConfig(&submitInfo.Credentials, "", k8s)
|
||||
if err := clusterConfig.SetTenant(); err != nil {
|
||||
logger.L().Error("failed setting account ID", helpers.Error(err))
|
||||
}
|
||||
|
||||
// list RBAC
|
||||
rbacObjects := cautils.NewRBACObjects(rbacscanner.NewRbacScannerFromK8sAPI(k8s, clusterConfig.GetAccountID(), clusterConfig.GetClusterName()))
|
||||
rbacObjects := cautils.NewRBACObjects(rbacscanner.NewRbacScannerFromK8sAPI(k8s, clusterConfig.GetAccountID(), clusterConfig.GetContextName()))
|
||||
|
||||
// submit resources
|
||||
r := reporterv1.NewReportEventReceiver(clusterConfig.GetConfigObj())
|
||||
@@ -60,9 +60,9 @@ func getKubernetesApi() *k8sinterface.KubernetesApi {
|
||||
}
|
||||
return k8sinterface.NewKubernetesApi()
|
||||
}
|
||||
func getTenantConfig(Account, clusterName string, k8s *k8sinterface.KubernetesApi) cautils.ITenantConfig {
|
||||
func getTenantConfig(credentials *cautils.Credentials, clusterName string, k8s *k8sinterface.KubernetesApi) cautils.ITenantConfig {
|
||||
if !k8sinterface.IsConnectedToCluster() || k8s == nil {
|
||||
return cautils.NewLocalConfig(getter.GetArmoAPIConnector(), Account, clusterName)
|
||||
return cautils.NewLocalConfig(getter.GetArmoAPIConnector(), credentials, clusterName)
|
||||
}
|
||||
return cautils.NewClusterConfig(k8s, getter.GetArmoAPIConnector(), Account, clusterName)
|
||||
return cautils.NewClusterConfig(k8s, getter.GetArmoAPIConnector(), credentials, clusterName)
|
||||
}
|
||||
|
||||
@@ -7,14 +7,14 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/armosec/k8s-interface/workloadinterface"
|
||||
"github.com/armosec/kubescape/core/cautils/logger"
|
||||
"github.com/armosec/kubescape/core/cautils/logger/helpers"
|
||||
"github.com/armosec/kubescape/core/meta"
|
||||
"github.com/armosec/kubescape/core/meta/cliinterfaces"
|
||||
v1 "github.com/armosec/kubescape/core/meta/datastructures/v1"
|
||||
"github.com/armosec/kubescape/core/pkg/resultshandling/reporter"
|
||||
reporterv1 "github.com/armosec/kubescape/core/pkg/resultshandling/reporter/v1"
|
||||
reporterv2 "github.com/armosec/kubescape/core/pkg/resultshandling/reporter/v2"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger/helpers"
|
||||
"github.com/armosec/kubescape/v2/core/meta"
|
||||
"github.com/armosec/kubescape/v2/core/meta/cliinterfaces"
|
||||
v1 "github.com/armosec/kubescape/v2/core/meta/datastructures/v1"
|
||||
"github.com/armosec/kubescape/v2/core/pkg/resultshandling/reporter"
|
||||
reporterv1 "github.com/armosec/kubescape/v2/core/pkg/resultshandling/reporter/v1"
|
||||
reporterv2 "github.com/armosec/kubescape/v2/core/pkg/resultshandling/reporter/v2"
|
||||
|
||||
"github.com/armosec/opa-utils/reporthandling"
|
||||
"github.com/google/uuid"
|
||||
@@ -69,12 +69,12 @@ func getResultsCmd(ks meta.IKubescape, submitInfo *v1.Submit) *cobra.Command {
|
||||
k8s := getKubernetesApi()
|
||||
|
||||
// get config
|
||||
clusterConfig := getTenantConfig(submitInfo.Account, "", k8s)
|
||||
clusterConfig := getTenantConfig(&submitInfo.Credentials, "", k8s)
|
||||
if err := clusterConfig.SetTenant(); err != nil {
|
||||
logger.L().Error("failed setting account ID", helpers.Error(err))
|
||||
}
|
||||
|
||||
resultsObjects := NewResultsObject(clusterConfig.GetAccountID(), clusterConfig.GetClusterName(), args[0])
|
||||
resultsObjects := NewResultsObject(clusterConfig.GetAccountID(), clusterConfig.GetContextName(), args[0])
|
||||
|
||||
// submit resources
|
||||
var r reporter.IReport
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
package submit
|
||||
|
||||
import (
|
||||
"github.com/armosec/kubescape/core/meta"
|
||||
metav1 "github.com/armosec/kubescape/core/meta/datastructures/v1"
|
||||
"github.com/armosec/kubescape/v2/core/meta"
|
||||
metav1 "github.com/armosec/kubescape/v2/core/meta/datastructures/v1"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
@@ -20,7 +20,9 @@ func GetSubmitCmd(ks meta.IKubescape) *cobra.Command {
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
},
|
||||
}
|
||||
submitCmd.PersistentFlags().StringVarP(&submitInfo.Account, "account", "", "", "Armo portal account ID. Default will load account ID from configMap or config file")
|
||||
submitCmd.PersistentFlags().StringVarP(&submitInfo.Credentials.Account, "account", "", "", "Kubescape SaaS account ID. Default will load account ID from cache")
|
||||
submitCmd.PersistentFlags().StringVarP(&submitInfo.Credentials.ClientID, "client-id", "", "", "Kubescape SaaS client ID. Default will load client ID from cache, read more - https://hub.armo.cloud/docs/authentication")
|
||||
submitCmd.PersistentFlags().StringVarP(&submitInfo.Credentials.SecretKey, "secret-key", "", "", "Kubescape SaaS secret key. Default will load secret key from cache, read more - https://hub.armo.cloud/docs/authentication")
|
||||
|
||||
submitCmd.AddCommand(getExceptionsCmd(ks, &submitInfo))
|
||||
submitCmd.AddCommand(getResultsCmd(ks, &submitInfo))
|
||||
|
||||
@@ -4,7 +4,7 @@ import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/armosec/kubescape/core/cautils"
|
||||
"github.com/armosec/kubescape/v2/core/cautils"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
|
||||
@@ -10,8 +10,8 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
"github.com/armosec/k8s-interface/k8sinterface"
|
||||
"github.com/armosec/kubescape/core/cautils/getter"
|
||||
"github.com/armosec/kubescape/core/cautils/logger"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/getter"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
@@ -67,9 +67,12 @@ type ITenantConfig interface {
|
||||
DeleteCachedConfig() error
|
||||
|
||||
// getters
|
||||
GetClusterName() string
|
||||
GetContextName() string
|
||||
GetAccountID() string
|
||||
GetTennatEmail() string
|
||||
GetTenantEmail() string
|
||||
GetToken() string
|
||||
GetClientID() string
|
||||
GetSecretKey() string
|
||||
GetConfigObj() *ConfigObj
|
||||
// GetBackendAPI() getter.IBackend
|
||||
// GenerateURL()
|
||||
@@ -87,8 +90,7 @@ type LocalConfig struct {
|
||||
}
|
||||
|
||||
func NewLocalConfig(
|
||||
backendAPI getter.IBackend, customerGUID, clusterName string) *LocalConfig {
|
||||
var configObj *ConfigObj
|
||||
backendAPI getter.IBackend, credentials *Credentials, clusterName string) *LocalConfig {
|
||||
|
||||
lc := &LocalConfig{
|
||||
backendAPI: backendAPI,
|
||||
@@ -96,20 +98,14 @@ func NewLocalConfig(
|
||||
}
|
||||
// get from configMap
|
||||
if existsConfigFile() { // get from file
|
||||
configObj, _ = loadConfigFromFile()
|
||||
} else {
|
||||
configObj = &ConfigObj{}
|
||||
}
|
||||
if configObj != nil {
|
||||
lc.configObj = configObj
|
||||
}
|
||||
if customerGUID != "" {
|
||||
lc.configObj.AccountID = customerGUID // override config customerGUID
|
||||
loadConfigFromFile(lc.configObj)
|
||||
}
|
||||
|
||||
updateCredentials(lc.configObj, credentials)
|
||||
|
||||
if clusterName != "" {
|
||||
lc.configObj.ClusterName = AdoptClusterName(clusterName) // override config clusterName
|
||||
}
|
||||
getAccountFromEnv(lc.configObj)
|
||||
|
||||
lc.backendAPI.SetAccountID(lc.configObj.AccountID)
|
||||
lc.backendAPI.SetClientID(lc.configObj.ClientID)
|
||||
@@ -119,9 +115,12 @@ func NewLocalConfig(
|
||||
}
|
||||
|
||||
func (lc *LocalConfig) GetConfigObj() *ConfigObj { return lc.configObj }
|
||||
func (lc *LocalConfig) GetTennatEmail() string { return lc.configObj.CustomerAdminEMail }
|
||||
func (lc *LocalConfig) GetTenantEmail() string { return lc.configObj.CustomerAdminEMail }
|
||||
func (lc *LocalConfig) GetAccountID() string { return lc.configObj.AccountID }
|
||||
func (lc *LocalConfig) GetClusterName() string { return lc.configObj.ClusterName }
|
||||
func (lc *LocalConfig) GetClientID() string { return lc.configObj.ClientID }
|
||||
func (lc *LocalConfig) GetSecretKey() string { return lc.configObj.SecretKey }
|
||||
func (lc *LocalConfig) GetContextName() string { return lc.configObj.ClusterName }
|
||||
func (lc *LocalConfig) GetToken() string { return lc.configObj.Token }
|
||||
func (lc *LocalConfig) IsConfigFound() bool { return existsConfigFile() }
|
||||
func (lc *LocalConfig) SetTenant() error {
|
||||
|
||||
@@ -190,8 +189,8 @@ type ClusterConfig struct {
|
||||
configObj *ConfigObj
|
||||
}
|
||||
|
||||
func NewClusterConfig(k8s *k8sinterface.KubernetesApi, backendAPI getter.IBackend, customerGUID, clusterName string) *ClusterConfig {
|
||||
var configObj *ConfigObj
|
||||
func NewClusterConfig(k8s *k8sinterface.KubernetesApi, backendAPI getter.IBackend, credentials *Credentials, clusterName string) *ClusterConfig {
|
||||
// var configObj *ConfigObj
|
||||
c := &ClusterConfig{
|
||||
k8s: k8s,
|
||||
backendAPI: backendAPI,
|
||||
@@ -200,26 +199,23 @@ func NewClusterConfig(k8s *k8sinterface.KubernetesApi, backendAPI getter.IBacken
|
||||
configMapNamespace: getConfigMapNamespace(),
|
||||
}
|
||||
|
||||
// get from configMap
|
||||
// first, load from configMap
|
||||
if c.existsConfigMap() {
|
||||
configObj, _ = c.loadConfigFromConfigMap()
|
||||
c.loadConfigFromConfigMap()
|
||||
}
|
||||
if configObj == nil && existsConfigFile() { // get from file
|
||||
configObj, _ = loadConfigFromFile()
|
||||
}
|
||||
if configObj != nil {
|
||||
c.configObj = configObj
|
||||
}
|
||||
if customerGUID != "" {
|
||||
c.configObj.AccountID = customerGUID // override config customerGUID
|
||||
|
||||
// second, load from file
|
||||
if existsConfigFile() { // get from file
|
||||
loadConfigFromFile(c.configObj)
|
||||
}
|
||||
updateCredentials(c.configObj, credentials)
|
||||
|
||||
if clusterName != "" {
|
||||
c.configObj.ClusterName = AdoptClusterName(clusterName) // override config clusterName
|
||||
}
|
||||
getAccountFromEnv(c.configObj)
|
||||
|
||||
if c.configObj.ClusterName == "" {
|
||||
c.configObj.ClusterName = AdoptClusterName(k8sinterface.GetClusterName())
|
||||
c.configObj.ClusterName = AdoptClusterName(k8sinterface.GetContextName())
|
||||
} else { // override the cluster name if it has unwanted characters
|
||||
c.configObj.ClusterName = AdoptClusterName(c.configObj.ClusterName)
|
||||
}
|
||||
@@ -234,7 +230,10 @@ func NewClusterConfig(k8s *k8sinterface.KubernetesApi, backendAPI getter.IBacken
|
||||
func (c *ClusterConfig) GetConfigObj() *ConfigObj { return c.configObj }
|
||||
func (c *ClusterConfig) GetDefaultNS() string { return c.configMapNamespace }
|
||||
func (c *ClusterConfig) GetAccountID() string { return c.configObj.AccountID }
|
||||
func (c *ClusterConfig) GetTennatEmail() string { return c.configObj.CustomerAdminEMail }
|
||||
func (c *ClusterConfig) GetClientID() string { return c.configObj.ClientID }
|
||||
func (c *ClusterConfig) GetSecretKey() string { return c.configObj.SecretKey }
|
||||
func (c *ClusterConfig) GetTenantEmail() string { return c.configObj.CustomerAdminEMail }
|
||||
func (c *ClusterConfig) GetToken() string { return c.configObj.Token }
|
||||
func (c *ClusterConfig) IsConfigFound() bool { return existsConfigFile() || c.existsConfigMap() }
|
||||
|
||||
func (c *ClusterConfig) SetTenant() error {
|
||||
@@ -271,7 +270,7 @@ func (c *ClusterConfig) DeleteCachedConfig() error {
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func (c *ClusterConfig) GetClusterName() string {
|
||||
func (c *ClusterConfig) GetContextName() string {
|
||||
return c.configObj.ClusterName
|
||||
}
|
||||
|
||||
@@ -282,18 +281,26 @@ func (c *ClusterConfig) ToMapString() map[string]interface{} {
|
||||
}
|
||||
return m
|
||||
}
|
||||
func (c *ClusterConfig) loadConfigFromConfigMap() (*ConfigObj, error) {
|
||||
func (c *ClusterConfig) loadConfigFromConfigMap() error {
|
||||
configMap, err := c.k8s.KubernetesClient.CoreV1().ConfigMaps(c.configMapNamespace).Get(context.Background(), c.configMapName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
|
||||
if bData, err := json.Marshal(configMap.Data); err == nil {
|
||||
return readConfig(bData)
|
||||
}
|
||||
return nil, nil
|
||||
return loadConfigFromData(c.configObj, configMap.Data)
|
||||
}
|
||||
|
||||
func loadConfigFromData(co *ConfigObj, data map[string]string) error {
|
||||
var e error
|
||||
if jsonConf, ok := data["config.json"]; ok {
|
||||
e = readConfig([]byte(jsonConf), co)
|
||||
}
|
||||
if bData, err := json.Marshal(data); err == nil {
|
||||
e = readConfig(bData, co)
|
||||
}
|
||||
|
||||
return e
|
||||
}
|
||||
func (c *ClusterConfig) existsConfigMap() bool {
|
||||
_, err := c.k8s.KubernetesClient.CoreV1().ConfigMaps(c.configMapNamespace).Get(context.Background(), c.configMapName, metav1.GetOptions{})
|
||||
// TODO - check if has customerGUID
|
||||
@@ -411,28 +418,27 @@ func (c *ClusterConfig) updateConfigData(configMap *corev1.ConfigMap) {
|
||||
}
|
||||
}
|
||||
}
|
||||
func loadConfigFromFile() (*ConfigObj, error) {
|
||||
func loadConfigFromFile(configObj *ConfigObj) error {
|
||||
dat, err := os.ReadFile(ConfigFileFullPath())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
|
||||
return readConfig(dat)
|
||||
return readConfig(dat, configObj)
|
||||
}
|
||||
func readConfig(dat []byte) (*ConfigObj, error) {
|
||||
func readConfig(dat []byte, configObj *ConfigObj) error {
|
||||
|
||||
if len(dat) == 0 {
|
||||
return nil, nil
|
||||
return nil
|
||||
}
|
||||
configObj := &ConfigObj{}
|
||||
|
||||
if err := json.Unmarshal(dat, configObj); err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
if configObj.AccountID == "" {
|
||||
configObj.AccountID = configObj.CustomerGUID
|
||||
}
|
||||
configObj.CustomerGUID = ""
|
||||
return configObj, nil
|
||||
return nil
|
||||
}
|
||||
|
||||
// Check if the customer is submitted
|
||||
@@ -479,15 +485,34 @@ func getConfigMapNamespace() string {
|
||||
return "default"
|
||||
}
|
||||
|
||||
func getAccountFromEnv(configObj *ConfigObj) {
|
||||
func getAccountFromEnv(credentials *Credentials) {
|
||||
// load from env
|
||||
if accountID := os.Getenv("KS_ACCOUNT_ID"); accountID != "" {
|
||||
configObj.AccountID = accountID
|
||||
if accountID := os.Getenv("KS_ACCOUNT_ID"); credentials.Account != "" && accountID != "" {
|
||||
credentials.Account = accountID
|
||||
}
|
||||
if clientID := os.Getenv("KS_CLIENT_ID"); clientID != "" {
|
||||
configObj.ClientID = clientID
|
||||
if clientID := os.Getenv("KS_CLIENT_ID"); credentials.ClientID != "" && clientID != "" {
|
||||
credentials.ClientID = clientID
|
||||
}
|
||||
if secretKey := os.Getenv("KS_SECRET_KEY"); secretKey != "" {
|
||||
configObj.SecretKey = secretKey
|
||||
if secretKey := os.Getenv("KS_SECRET_KEY"); credentials.SecretKey != "" && secretKey != "" {
|
||||
credentials.SecretKey = secretKey
|
||||
}
|
||||
}
|
||||
|
||||
func updateCredentials(configObj *ConfigObj, credentials *Credentials) {
|
||||
|
||||
if credentials == nil {
|
||||
credentials = &Credentials{}
|
||||
}
|
||||
getAccountFromEnv(credentials)
|
||||
|
||||
if credentials.Account != "" {
|
||||
configObj.AccountID = credentials.Account // override config Account
|
||||
}
|
||||
if credentials.ClientID != "" {
|
||||
configObj.ClientID = credentials.ClientID // override config ClientID
|
||||
}
|
||||
if credentials.SecretKey != "" {
|
||||
configObj.SecretKey = credentials.SecretKey // override config SecretKey
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
193
core/cautils/customerloader_test.go
Normal file
193
core/cautils/customerloader_test.go
Normal file
@@ -0,0 +1,193 @@
|
||||
package cautils
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
func mockConfigObj() *ConfigObj {
|
||||
return &ConfigObj{
|
||||
AccountID: "aaa",
|
||||
ClientID: "bbb",
|
||||
SecretKey: "ccc",
|
||||
ClusterName: "ddd",
|
||||
CustomerAdminEMail: "ab@cd",
|
||||
Token: "eee",
|
||||
}
|
||||
}
|
||||
func mockLocalConfig() *LocalConfig {
|
||||
return &LocalConfig{
|
||||
backendAPI: nil,
|
||||
configObj: mockConfigObj(),
|
||||
}
|
||||
}
|
||||
|
||||
func mockClusterConfig() *ClusterConfig {
|
||||
return &ClusterConfig{
|
||||
backendAPI: nil,
|
||||
configObj: mockConfigObj(),
|
||||
}
|
||||
}
|
||||
func TestConfig(t *testing.T) {
|
||||
co := mockConfigObj()
|
||||
cop := ConfigObj{}
|
||||
|
||||
assert.NoError(t, json.Unmarshal(co.Config(), &cop))
|
||||
assert.Equal(t, co.AccountID, cop.AccountID)
|
||||
assert.Equal(t, co.ClientID, cop.ClientID)
|
||||
assert.Equal(t, co.SecretKey, cop.SecretKey)
|
||||
assert.Equal(t, "", cop.ClusterName) // Not copied to bytes
|
||||
assert.Equal(t, "", cop.CustomerAdminEMail) // Not copied to bytes
|
||||
assert.Equal(t, "", cop.Token) // Not copied to bytes
|
||||
|
||||
}
|
||||
|
||||
func TestITenantConfig(t *testing.T) {
|
||||
var lc ITenantConfig
|
||||
var c ITenantConfig
|
||||
lc = mockLocalConfig()
|
||||
c = mockClusterConfig()
|
||||
|
||||
co := mockConfigObj()
|
||||
|
||||
// test LocalConfig methods
|
||||
assert.Equal(t, co.AccountID, lc.GetAccountID())
|
||||
assert.Equal(t, co.ClientID, lc.GetClientID())
|
||||
assert.Equal(t, co.SecretKey, lc.GetSecretKey())
|
||||
assert.Equal(t, co.ClusterName, lc.GetContextName())
|
||||
assert.Equal(t, co.CustomerAdminEMail, lc.GetTenantEmail())
|
||||
assert.Equal(t, co.Token, lc.GetToken())
|
||||
|
||||
// test ClusterConfig methods
|
||||
assert.Equal(t, co.AccountID, c.GetAccountID())
|
||||
assert.Equal(t, co.ClientID, c.GetClientID())
|
||||
assert.Equal(t, co.SecretKey, c.GetSecretKey())
|
||||
assert.Equal(t, co.ClusterName, c.GetContextName())
|
||||
assert.Equal(t, co.CustomerAdminEMail, c.GetTenantEmail())
|
||||
assert.Equal(t, co.Token, c.GetToken())
|
||||
}
|
||||
|
||||
func TestUpdateConfigData(t *testing.T) {
|
||||
c := mockClusterConfig()
|
||||
|
||||
configMap := &corev1.ConfigMap{}
|
||||
|
||||
c.updateConfigData(configMap)
|
||||
|
||||
assert.Equal(t, c.GetAccountID(), configMap.Data["accountID"])
|
||||
assert.Equal(t, c.GetClientID(), configMap.Data["clientID"])
|
||||
assert.Equal(t, c.GetSecretKey(), configMap.Data["secretKey"])
|
||||
}
|
||||
|
||||
func TestReadConfig(t *testing.T) {
|
||||
com := mockConfigObj()
|
||||
co := &ConfigObj{}
|
||||
|
||||
b, e := json.Marshal(com)
|
||||
assert.NoError(t, e)
|
||||
|
||||
readConfig(b, co)
|
||||
|
||||
assert.Equal(t, com.AccountID, co.AccountID)
|
||||
assert.Equal(t, com.ClientID, co.ClientID)
|
||||
assert.Equal(t, com.SecretKey, co.SecretKey)
|
||||
assert.Equal(t, com.ClusterName, co.ClusterName)
|
||||
assert.Equal(t, com.CustomerAdminEMail, co.CustomerAdminEMail)
|
||||
assert.Equal(t, com.Token, co.Token)
|
||||
}
|
||||
|
||||
func TestLoadConfigFromData(t *testing.T) {
|
||||
|
||||
// use case: all data is in base config
|
||||
{
|
||||
c := mockClusterConfig()
|
||||
co := mockConfigObj()
|
||||
|
||||
configMap := &corev1.ConfigMap{}
|
||||
|
||||
c.updateConfigData(configMap)
|
||||
|
||||
c.configObj = &ConfigObj{}
|
||||
|
||||
loadConfigFromData(c.configObj, configMap.Data)
|
||||
|
||||
assert.Equal(t, c.GetAccountID(), co.AccountID)
|
||||
assert.Equal(t, c.GetClientID(), co.ClientID)
|
||||
assert.Equal(t, c.GetSecretKey(), co.SecretKey)
|
||||
assert.Equal(t, c.GetContextName(), co.ClusterName)
|
||||
assert.Equal(t, c.GetTenantEmail(), co.CustomerAdminEMail)
|
||||
assert.Equal(t, c.GetToken(), co.Token)
|
||||
}
|
||||
|
||||
// use case: all data is in config.json
|
||||
{
|
||||
c := mockClusterConfig()
|
||||
|
||||
co := mockConfigObj()
|
||||
configMap := &corev1.ConfigMap{
|
||||
Data: make(map[string]string),
|
||||
}
|
||||
|
||||
configMap.Data["config.json"] = string(c.GetConfigObj().Config())
|
||||
c.configObj = &ConfigObj{}
|
||||
|
||||
loadConfigFromData(c.configObj, configMap.Data)
|
||||
|
||||
assert.Equal(t, c.GetAccountID(), co.AccountID)
|
||||
assert.Equal(t, c.GetClientID(), co.ClientID)
|
||||
assert.Equal(t, c.GetSecretKey(), co.SecretKey)
|
||||
}
|
||||
|
||||
// use case: some data is in config.json
|
||||
{
|
||||
c := mockClusterConfig()
|
||||
configMap := &corev1.ConfigMap{
|
||||
Data: make(map[string]string),
|
||||
}
|
||||
|
||||
// add to map
|
||||
configMap.Data["clientID"] = c.configObj.ClientID
|
||||
configMap.Data["secretKey"] = c.configObj.SecretKey
|
||||
|
||||
// delete the content
|
||||
c.configObj.ClientID = ""
|
||||
c.configObj.SecretKey = ""
|
||||
|
||||
configMap.Data["config.json"] = string(c.GetConfigObj().Config())
|
||||
loadConfigFromData(c.configObj, configMap.Data)
|
||||
|
||||
assert.NotEmpty(t, c.GetAccountID())
|
||||
assert.NotEmpty(t, c.GetClientID())
|
||||
assert.NotEmpty(t, c.GetSecretKey())
|
||||
}
|
||||
|
||||
// use case: some data is in config.json
|
||||
{
|
||||
c := mockClusterConfig()
|
||||
configMap := &corev1.ConfigMap{
|
||||
Data: make(map[string]string),
|
||||
}
|
||||
|
||||
c.configObj.AccountID = "tttt"
|
||||
|
||||
// add to map
|
||||
configMap.Data["accountID"] = mockConfigObj().AccountID
|
||||
configMap.Data["clientID"] = c.configObj.ClientID
|
||||
configMap.Data["secretKey"] = c.configObj.SecretKey
|
||||
|
||||
// delete the content
|
||||
c.configObj.ClientID = ""
|
||||
c.configObj.SecretKey = ""
|
||||
|
||||
configMap.Data["config.json"] = string(c.GetConfigObj().Config())
|
||||
loadConfigFromData(c.configObj, configMap.Data)
|
||||
|
||||
assert.Equal(t, mockConfigObj().AccountID, c.GetAccountID())
|
||||
assert.NotEmpty(t, c.GetClientID())
|
||||
assert.NotEmpty(t, c.GetSecretKey())
|
||||
}
|
||||
|
||||
}
|
||||
@@ -19,7 +19,7 @@ type OPASessionObj struct {
|
||||
Policies []reporthandling.Framework // list of frameworks to scan
|
||||
AllResources map[string]workloadinterface.IMetadata // all scanned resources, map[<rtesource ID>]<resource>
|
||||
ResourcesResult map[string]resourcesresults.Result // resources scan results, map[<rtesource ID>]<resource result>
|
||||
ResourceSource map[string]string // resources sources, map[<rtesource ID>]<resource result>
|
||||
ResourceSource map[string]reporthandling.Source // resources sources, map[<rtesource ID>]<resource result>
|
||||
PostureReport *reporthandling.PostureReport // scan results v1 - Remove
|
||||
Report *reporthandlingv2.PostureReport // scan results v2 - Remove
|
||||
Exceptions []armotypes.PostureExceptionPolicy // list of exceptions to apply on scan results
|
||||
@@ -39,7 +39,7 @@ func NewOPASessionObj(frameworks []reporthandling.Framework, k8sResources *K8SRe
|
||||
ResourcesResult: make(map[string]resourcesresults.Result),
|
||||
InfoMap: make(map[string]apis.StatusInfo),
|
||||
ResourceToControlsMap: make(map[string][]string),
|
||||
ResourceSource: make(map[string]string),
|
||||
ResourceSource: make(map[string]reporthandling.Source),
|
||||
SessionID: scanInfo.ScanID,
|
||||
PostureReport: &reporthandling.PostureReport{
|
||||
ClusterName: ClusterName,
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
package cautils
|
||||
|
||||
import (
|
||||
pkgcautils "github.com/armosec/utils-go/utils"
|
||||
"golang.org/x/mod/semver"
|
||||
|
||||
"github.com/armosec/opa-utils/reporthandling"
|
||||
"github.com/armosec/utils-go/boolutils"
|
||||
)
|
||||
|
||||
func NewPolicies() *Policies {
|
||||
@@ -40,7 +40,7 @@ func ruleWithArmoOpaDependency(attributes map[string]interface{}) bool {
|
||||
return false
|
||||
}
|
||||
if s, ok := attributes["armoOpa"]; ok { // TODO - make global
|
||||
return pkgcautils.StringToBool(s.(string))
|
||||
return boolutils.StringToBool(s.(string))
|
||||
}
|
||||
return false
|
||||
}
|
||||
@@ -51,18 +51,16 @@ func ruleWithArmoOpaDependency(attributes map[string]interface{}) bool {
|
||||
func isRuleKubescapeVersionCompatible(attributes map[string]interface{}, version string) bool {
|
||||
if from, ok := attributes["useFromKubescapeVersion"]; ok && from != nil {
|
||||
if version != "" {
|
||||
|
||||
if semver.Compare(from.(string), BuildNumber) > 0 {
|
||||
if semver.Compare(version, from.(string)) == -1 {
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
if until, ok := attributes["useUntilKubescapeVersion"]; ok && until != nil {
|
||||
if version != "" {
|
||||
if semver.Compare(BuildNumber, until.(string)) >= 0 {
|
||||
return false
|
||||
}
|
||||
} else {
|
||||
if version == "" {
|
||||
return false
|
||||
}
|
||||
if semver.Compare(version, until.(string)) >= 0 {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
@@ -9,14 +9,17 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/armosec/k8s-interface/workloadinterface"
|
||||
"github.com/armosec/kubescape/core/cautils/logger"
|
||||
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger/helpers"
|
||||
"github.com/armosec/opa-utils/objectsenvelopes"
|
||||
"github.com/armosec/opa-utils/objectsenvelopes/localworkload"
|
||||
"gopkg.in/yaml.v2"
|
||||
)
|
||||
|
||||
var (
|
||||
YAML_PREFIX = []string{".yaml", ".yml"}
|
||||
JSON_PREFIX = []string{".json"}
|
||||
YAML_PREFIX = []string{"yaml", "yml"}
|
||||
JSON_PREFIX = []string{"json"}
|
||||
)
|
||||
|
||||
type FileFormat string
|
||||
@@ -26,23 +29,52 @@ const (
|
||||
JSON_FILE_FORMAT FileFormat = "json"
|
||||
)
|
||||
|
||||
func LoadResourcesFromFiles(inputPatterns []string) (map[string][]workloadinterface.IMetadata, error) {
|
||||
files, errs := listFiles(inputPatterns)
|
||||
// LoadResourcesFromHelmCharts scans a given path (recuresively) for helm charts, renders the templates and returns a list of workloads
|
||||
func LoadResourcesFromHelmCharts(basePath string) map[string][]workloadinterface.IMetadata {
|
||||
directories, _ := listDirs(basePath)
|
||||
helmDirectories := make([]string, 0)
|
||||
for _, dir := range directories {
|
||||
if ok, _ := IsHelmDirectory(dir); ok {
|
||||
helmDirectories = append(helmDirectories, dir)
|
||||
}
|
||||
}
|
||||
|
||||
result := map[string][]workloadinterface.IMetadata{}
|
||||
for _, helmDir := range helmDirectories {
|
||||
chart, err := NewHelmChart(helmDir)
|
||||
if err == nil {
|
||||
wls, errs := chart.GetWorkloadsWithDefaultValues()
|
||||
if len(errs) > 0 {
|
||||
logger.L().Error(fmt.Sprintf("Rendering of Helm chart template failed: %v", errs))
|
||||
continue
|
||||
}
|
||||
|
||||
for k, v := range wls {
|
||||
result[k] = v
|
||||
}
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
func LoadResourcesFromFiles(input, rootPath string) map[string][]workloadinterface.IMetadata {
|
||||
files, errs := listFiles(input)
|
||||
if len(errs) > 0 {
|
||||
logger.L().Error(fmt.Sprintf("%v", errs))
|
||||
}
|
||||
if len(files) == 0 {
|
||||
return nil, nil
|
||||
return nil
|
||||
}
|
||||
|
||||
workloads, errs := loadFiles(files)
|
||||
workloads, errs := loadFiles(rootPath, files)
|
||||
if len(errs) > 0 {
|
||||
logger.L().Error(fmt.Sprintf("%v", errs))
|
||||
}
|
||||
return workloads, nil
|
||||
|
||||
return workloads
|
||||
}
|
||||
|
||||
func loadFiles(filePaths []string) (map[string][]workloadinterface.IMetadata, []error) {
|
||||
func loadFiles(rootPath string, filePaths []string) (map[string][]workloadinterface.IMetadata, []error) {
|
||||
workloads := make(map[string][]workloadinterface.IMetadata, 0)
|
||||
errs := []error{}
|
||||
for i := range filePaths {
|
||||
@@ -51,15 +83,30 @@ func loadFiles(filePaths []string) (map[string][]workloadinterface.IMetadata, []
|
||||
errs = append(errs, err)
|
||||
continue
|
||||
}
|
||||
if len(f) == 0 {
|
||||
continue // empty file
|
||||
}
|
||||
|
||||
w, e := ReadFile(f, GetFileFormat(filePaths[i]))
|
||||
errs = append(errs, e...)
|
||||
if w != nil {
|
||||
if _, ok := workloads[filePaths[i]]; !ok {
|
||||
workloads[filePaths[i]] = []workloadinterface.IMetadata{}
|
||||
if e != nil {
|
||||
logger.L().Debug("failed to read file", helpers.String("file", filePaths[i]), helpers.Error(e))
|
||||
}
|
||||
if len(w) != 0 {
|
||||
path := filePaths[i]
|
||||
if _, ok := workloads[path]; !ok {
|
||||
workloads[path] = []workloadinterface.IMetadata{}
|
||||
}
|
||||
wSlice := workloads[filePaths[i]]
|
||||
wSlice = append(wSlice, w...)
|
||||
workloads[filePaths[i]] = wSlice
|
||||
wSlice := workloads[path]
|
||||
for j := range w {
|
||||
lw := localworkload.NewLocalWorkload(w[j].GetObject())
|
||||
if relPath, err := filepath.Rel(rootPath, path); err == nil {
|
||||
lw.SetPath(relPath)
|
||||
} else {
|
||||
lw.SetPath(path)
|
||||
}
|
||||
wSlice = append(wSlice, lw)
|
||||
}
|
||||
workloads[path] = wSlice
|
||||
}
|
||||
}
|
||||
return workloads, errs
|
||||
@@ -68,47 +115,65 @@ func loadFiles(filePaths []string) (map[string][]workloadinterface.IMetadata, []
|
||||
func loadFile(filePath string) ([]byte, error) {
|
||||
return os.ReadFile(filePath)
|
||||
}
|
||||
func ReadFile(fileContent []byte, fileFromat FileFormat) ([]workloadinterface.IMetadata, []error) {
|
||||
func ReadFile(fileContent []byte, fileFormat FileFormat) ([]workloadinterface.IMetadata, error) {
|
||||
|
||||
switch fileFromat {
|
||||
switch fileFormat {
|
||||
case YAML_FILE_FORMAT:
|
||||
return readYamlFile(fileContent)
|
||||
case JSON_FILE_FORMAT:
|
||||
return readJsonFile(fileContent)
|
||||
default:
|
||||
return nil, nil // []error{fmt.Errorf("file extension %s not supported", fileFromat)}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func listFiles(patterns []string) ([]string, []error) {
|
||||
files := []string{}
|
||||
errs := []error{}
|
||||
for i := range patterns {
|
||||
if strings.HasPrefix(patterns[i], "http") {
|
||||
continue
|
||||
}
|
||||
if !filepath.IsAbs(patterns[i]) {
|
||||
o, _ := os.Getwd()
|
||||
patterns[i] = filepath.Join(o, patterns[i])
|
||||
}
|
||||
if IsFile(patterns[i]) {
|
||||
files = append(files, patterns[i])
|
||||
} else {
|
||||
f, err := glob(filepath.Split(patterns[i])) //filepath.Glob(patterns[i])
|
||||
if err != nil {
|
||||
errs = append(errs, err)
|
||||
} else {
|
||||
files = append(files, f...)
|
||||
}
|
||||
}
|
||||
}
|
||||
return files, errs
|
||||
// listFiles returns the list of absolute paths, full file path and list of errors. The list of abs paths and full path have the same length
|
||||
func listFiles(pattern string) ([]string, []error) {
|
||||
return listFilesOrDirectories(pattern, false)
|
||||
}
|
||||
|
||||
func readYamlFile(yamlFile []byte) ([]workloadinterface.IMetadata, []error) {
|
||||
// listDirs returns the list of absolute paths, full directories path and list of errors. The list of abs paths and full path have the same length
|
||||
func listDirs(pattern string) ([]string, []error) {
|
||||
return listFilesOrDirectories(pattern, true)
|
||||
}
|
||||
|
||||
func listFilesOrDirectories(pattern string, onlyDirectories bool) ([]string, []error) {
|
||||
var paths []string
|
||||
errs := []error{}
|
||||
|
||||
if !filepath.IsAbs(pattern) {
|
||||
o, _ := os.Getwd()
|
||||
pattern = filepath.Join(o, pattern)
|
||||
}
|
||||
|
||||
if !onlyDirectories && IsFile(pattern) {
|
||||
paths = append(paths, pattern)
|
||||
return paths, errs
|
||||
}
|
||||
|
||||
root, shouldMatch := filepath.Split(pattern)
|
||||
|
||||
if IsDir(pattern) {
|
||||
root = pattern
|
||||
shouldMatch = "*"
|
||||
}
|
||||
if shouldMatch == "" {
|
||||
shouldMatch = "*"
|
||||
}
|
||||
|
||||
f, err := glob(root, shouldMatch, onlyDirectories)
|
||||
if err != nil {
|
||||
errs = append(errs, err)
|
||||
} else {
|
||||
paths = append(paths, f...)
|
||||
}
|
||||
|
||||
return paths, errs
|
||||
}
|
||||
|
||||
func readYamlFile(yamlFile []byte) ([]workloadinterface.IMetadata, error) {
|
||||
defer recover()
|
||||
|
||||
r := bytes.NewReader(yamlFile)
|
||||
dec := yaml.NewDecoder(r)
|
||||
yamlObjs := []workloadinterface.IMetadata{}
|
||||
@@ -127,19 +192,17 @@ func readYamlFile(yamlFile []byte) ([]workloadinterface.IMetadata, []error) {
|
||||
yamlObjs = append(yamlObjs, o)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
errs = append(errs, fmt.Errorf("failed to convert yaml file to map[string]interface, file content: %v", j))
|
||||
}
|
||||
}
|
||||
|
||||
return yamlObjs, errs
|
||||
return yamlObjs, nil
|
||||
}
|
||||
|
||||
func readJsonFile(jsonFile []byte) ([]workloadinterface.IMetadata, []error) {
|
||||
func readJsonFile(jsonFile []byte) ([]workloadinterface.IMetadata, error) {
|
||||
workloads := []workloadinterface.IMetadata{}
|
||||
var jsonObj interface{}
|
||||
if err := json.Unmarshal(jsonFile, &jsonObj); err != nil {
|
||||
return workloads, []error{err}
|
||||
return workloads, err
|
||||
}
|
||||
|
||||
convertJsonToWorkload(jsonObj, &workloads)
|
||||
@@ -178,28 +241,47 @@ func convertYamlToJson(i interface{}) interface{} {
|
||||
}
|
||||
|
||||
func IsYaml(filePath string) bool {
|
||||
return StringInSlice(YAML_PREFIX, filepath.Ext(filePath)) != ValueNotFound
|
||||
return StringInSlice(YAML_PREFIX, strings.ReplaceAll(filepath.Ext(filePath), ".", "")) != ValueNotFound
|
||||
}
|
||||
|
||||
func IsJson(filePath string) bool {
|
||||
return StringInSlice(JSON_PREFIX, filepath.Ext(filePath)) != ValueNotFound
|
||||
return StringInSlice(JSON_PREFIX, strings.ReplaceAll(filepath.Ext(filePath), ".", "")) != ValueNotFound
|
||||
}
|
||||
|
||||
func glob(root, pattern string) ([]string, error) {
|
||||
func glob(root, pattern string, onlyDirectories bool) ([]string, error) {
|
||||
var matches []string
|
||||
|
||||
err := filepath.Walk(root, func(path string, info os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// listing only directotries
|
||||
if onlyDirectories {
|
||||
if info.IsDir() {
|
||||
if matched, err := filepath.Match(pattern, filepath.Base(path)); err != nil {
|
||||
return err
|
||||
} else if matched {
|
||||
matches = append(matches, path)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// listing only files
|
||||
if info.IsDir() {
|
||||
return nil
|
||||
}
|
||||
fileFormat := GetFileFormat(path)
|
||||
if !(fileFormat == JSON_FILE_FORMAT || fileFormat == YAML_FILE_FORMAT) {
|
||||
return nil
|
||||
}
|
||||
if matched, err := filepath.Match(pattern, filepath.Base(path)); err != nil {
|
||||
return err
|
||||
} else if matched {
|
||||
matches = append(matches, path)
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
@@ -207,6 +289,8 @@ func glob(root, pattern string) ([]string, error) {
|
||||
}
|
||||
return matches, nil
|
||||
}
|
||||
|
||||
// IsFile checks if a given path is a file
|
||||
func IsFile(name string) bool {
|
||||
if fi, err := os.Stat(name); err == nil {
|
||||
if fi.Mode().IsRegular() {
|
||||
@@ -216,6 +300,16 @@ func IsFile(name string) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// IsDir checks if a given path is a directory
|
||||
func IsDir(name string) bool {
|
||||
if info, err := os.Stat(name); err == nil {
|
||||
if info.IsDir() {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func GetFileFormat(filePath string) FileFormat {
|
||||
if IsYaml(filePath) {
|
||||
return YAML_FILE_FORMAT
|
||||
|
||||
@@ -6,59 +6,99 @@ import (
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/armosec/opa-utils/objectsenvelopes/localworkload"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func onlineBoutiquePath() string {
|
||||
o, _ := os.Getwd()
|
||||
return filepath.Join(filepath.Dir(o), "../examples/online-boutique/*")
|
||||
return filepath.Join(filepath.Dir(o), "..", "examples", "online-boutique")
|
||||
}
|
||||
|
||||
func helmChartPath() string {
|
||||
o, _ := os.Getwd()
|
||||
return filepath.Join(filepath.Dir(o), "..", "examples", "helm_chart")
|
||||
}
|
||||
|
||||
func TestListFiles(t *testing.T) {
|
||||
|
||||
filesPath := onlineBoutiquePath()
|
||||
|
||||
files, errs := listFiles([]string{filesPath})
|
||||
files, errs := listFiles(filesPath)
|
||||
assert.Equal(t, 0, len(errs))
|
||||
assert.Equal(t, 12, len(files))
|
||||
}
|
||||
|
||||
func TestLoadResourcesFromFiles(t *testing.T) {
|
||||
workloads, err := LoadResourcesFromFiles([]string{onlineBoutiquePath()})
|
||||
assert.NoError(t, err)
|
||||
workloads := LoadResourcesFromFiles(onlineBoutiquePath(), "")
|
||||
assert.Equal(t, 12, len(workloads))
|
||||
|
||||
for i, w := range workloads {
|
||||
switch filepath.Base(i) {
|
||||
case "adservice.yaml":
|
||||
assert.Equal(t, 2, len(w))
|
||||
assert.Equal(t, "apps/v1//Deployment/adservice", w[0].GetID())
|
||||
assert.Equal(t, "/v1//Service/adservice", w[1].GetID())
|
||||
assert.Equal(t, "apps/v1//Deployment/adservice", getRelativePath(w[0].GetID()))
|
||||
assert.Equal(t, "/v1//Service/adservice", getRelativePath(w[1].GetID()))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestLoadResourcesFromHelmCharts(t *testing.T) {
|
||||
sourceToWorkloads := LoadResourcesFromHelmCharts(helmChartPath())
|
||||
assert.Equal(t, 6, len(sourceToWorkloads))
|
||||
|
||||
for file, workloads := range sourceToWorkloads {
|
||||
assert.Equalf(t, 1, len(workloads), "expected 1 workload in file %s", file)
|
||||
|
||||
w := workloads[0]
|
||||
assert.True(t, localworkload.IsTypeLocalWorkload(w.GetObject()), "Expected localworkload as object type")
|
||||
|
||||
switch filepath.Base(file) {
|
||||
case "serviceaccount.yaml":
|
||||
assert.Equal(t, "/v1//ServiceAccount/kubescape-discovery", getRelativePath(w.GetID()))
|
||||
case "clusterrole.yaml":
|
||||
assert.Equal(t, "rbac.authorization.k8s.io/v1//ClusterRole/-kubescape", getRelativePath(w.GetID()))
|
||||
case "cronjob.yaml":
|
||||
assert.Equal(t, "batch/v1//CronJob/-kubescape", getRelativePath(w.GetID()))
|
||||
case "role.yaml":
|
||||
assert.Equal(t, "rbac.authorization.k8s.io/v1//Role/-kubescape", getRelativePath(w.GetID()))
|
||||
case "rolebinding.yaml":
|
||||
assert.Equal(t, "rbac.authorization.k8s.io/v1//RoleBinding/-kubescape", getRelativePath(w.GetID()))
|
||||
case "clusterrolebinding.yaml":
|
||||
assert.Equal(t, "rbac.authorization.k8s.io/v1//ClusterRoleBinding/-kubescape", getRelativePath(w.GetID()))
|
||||
default:
|
||||
assert.Failf(t, "missing case for file: %s", filepath.Base(file))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestLoadFiles(t *testing.T) {
|
||||
files, _ := listFiles([]string{onlineBoutiquePath()})
|
||||
_, err := loadFiles(files)
|
||||
files, _ := listFiles(onlineBoutiquePath())
|
||||
_, err := loadFiles("", files)
|
||||
assert.Equal(t, 0, len(err))
|
||||
}
|
||||
|
||||
func TestListDirs(t *testing.T) {
|
||||
dirs, _ := listDirs(filepath.Join(onlineBoutiquePath(), "adservice.yaml"))
|
||||
assert.Equal(t, 0, len(dirs))
|
||||
|
||||
expectedDirs := []string{filepath.Join("examples", "helm_chart"), filepath.Join("examples", "helm_chart", "templates")}
|
||||
dirs, _ = listDirs(helmChartPath())
|
||||
assert.Equal(t, len(expectedDirs), len(dirs))
|
||||
for i := range expectedDirs {
|
||||
assert.Contains(t, dirs[i], expectedDirs[i])
|
||||
}
|
||||
}
|
||||
|
||||
func TestLoadFile(t *testing.T) {
|
||||
files, _ := listFiles([]string{strings.Replace(onlineBoutiquePath(), "*", "adservice.yaml", 1)})
|
||||
files, _ := listFiles(filepath.Join(onlineBoutiquePath(), "adservice.yaml"))
|
||||
assert.Equal(t, 1, len(files))
|
||||
|
||||
_, err := loadFile(files[0])
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
func TestMapResources(t *testing.T) {
|
||||
// policyHandler := &PolicyHandler{}
|
||||
// k8sResources, err := policyHandler.loadResources(opaSessionObj.Frameworks, scanInfo)
|
||||
// files, _ := listFiles([]string{onlineBoutiquePath()})
|
||||
// bb, err := loadFile(files[0])
|
||||
// if len(err) > 0 {
|
||||
// t.Errorf("%v", err)
|
||||
// }
|
||||
// for i := range bb {
|
||||
// t.Errorf("%s", bb[i].ToString())
|
||||
// }
|
||||
|
||||
func getRelativePath(p string) string {
|
||||
pp := strings.SplitAfter(p, "api=")
|
||||
return pp[1]
|
||||
}
|
||||
|
||||
@@ -10,8 +10,8 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/armosec/armoapi-go/armotypes"
|
||||
"github.com/armosec/kubescape/core/cautils/logger"
|
||||
"github.com/armosec/kubescape/core/cautils/logger/helpers"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger/helpers"
|
||||
"github.com/armosec/opa-utils/reporthandling"
|
||||
)
|
||||
|
||||
@@ -34,7 +34,7 @@ var (
|
||||
|
||||
armoDevERURL = "report.eudev3.cyberarmorsoft.com"
|
||||
armoDevBEURL = "api-dev.armo.cloud"
|
||||
armoDevFEURL = "armoui-dev.eudev3.cyberarmorsoft.com"
|
||||
armoDevFEURL = "cloud-dev.armosec.io"
|
||||
armoDevAUTHURL = "eggauth.eudev3.cyberarmorsoft.com"
|
||||
)
|
||||
|
||||
@@ -147,7 +147,8 @@ func (armoAPI *ArmoAPI) IsLoggedIn() bool { return armoAPI.loggedIn
|
||||
func (armoAPI *ArmoAPI) GetClientID() string { return armoAPI.clientID }
|
||||
func (armoAPI *ArmoAPI) GetSecretKey() string { return armoAPI.secretKey }
|
||||
func (armoAPI *ArmoAPI) GetFrontendURL() string { return armoAPI.feURL }
|
||||
func (armoAPI *ArmoAPI) GetAPIURL() string { return armoAPI.apiURL }
|
||||
func (armoAPI *ArmoAPI) GetApiURL() string { return armoAPI.apiURL }
|
||||
func (armoAPI *ArmoAPI) GetAuthURL() string { return armoAPI.authURL }
|
||||
func (armoAPI *ArmoAPI) GetReportReceiverURL() string { return armoAPI.erURL }
|
||||
func (armoAPI *ArmoAPI) SetAccountID(accountID string) { armoAPI.accountID = accountID }
|
||||
func (armoAPI *ArmoAPI) SetClientID(clientID string) { armoAPI.clientID = clientID }
|
||||
@@ -163,7 +164,6 @@ func (armoAPI *ArmoAPI) GetFramework(name string) (*reporthandling.Framework, er
|
||||
if err = JSONDecoder(respStr).Decode(framework); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
SaveInFile(framework, GetDefaultPath(name+".json"))
|
||||
|
||||
return framework, err
|
||||
}
|
||||
|
||||
@@ -13,8 +13,7 @@ var NativeFrameworks = []string{"nsa", "mitre", "armobest", "devopsbest"}
|
||||
|
||||
func (armoAPI *ArmoAPI) getFrameworkURL(frameworkName string) string {
|
||||
u := url.URL{}
|
||||
u.Scheme = "https"
|
||||
u.Host = armoAPI.apiURL
|
||||
u.Scheme, u.Host = parseHost(armoAPI.GetApiURL())
|
||||
u.Path = "api/v1/armoFrameworks"
|
||||
q := u.Query()
|
||||
q.Add("customerGUID", armoAPI.getCustomerGUIDFallBack())
|
||||
@@ -31,8 +30,7 @@ func (armoAPI *ArmoAPI) getFrameworkURL(frameworkName string) string {
|
||||
|
||||
func (armoAPI *ArmoAPI) getListFrameworkURL() string {
|
||||
u := url.URL{}
|
||||
u.Scheme = "https"
|
||||
u.Host = armoAPI.apiURL
|
||||
u.Scheme, u.Host = parseHost(armoAPI.GetApiURL())
|
||||
u.Path = "api/v1/armoFrameworks"
|
||||
q := u.Query()
|
||||
q.Add("customerGUID", armoAPI.getCustomerGUIDFallBack())
|
||||
@@ -42,8 +40,7 @@ func (armoAPI *ArmoAPI) getListFrameworkURL() string {
|
||||
}
|
||||
func (armoAPI *ArmoAPI) getExceptionsURL(clusterName string) string {
|
||||
u := url.URL{}
|
||||
u.Scheme = "https"
|
||||
u.Host = armoAPI.apiURL
|
||||
u.Scheme, u.Host = parseHost(armoAPI.GetApiURL())
|
||||
u.Path = "api/v1/armoPostureExceptions"
|
||||
|
||||
q := u.Query()
|
||||
@@ -58,8 +55,7 @@ func (armoAPI *ArmoAPI) getExceptionsURL(clusterName string) string {
|
||||
|
||||
func (armoAPI *ArmoAPI) exceptionsURL(exceptionsPolicyName string) string {
|
||||
u := url.URL{}
|
||||
u.Scheme = "https"
|
||||
u.Host = armoAPI.apiURL
|
||||
u.Scheme, u.Host = parseHost(armoAPI.GetApiURL())
|
||||
u.Path = "api/v1/postureExceptionPolicy"
|
||||
|
||||
q := u.Query()
|
||||
@@ -75,14 +71,13 @@ func (armoAPI *ArmoAPI) exceptionsURL(exceptionsPolicyName string) string {
|
||||
|
||||
func (armoAPI *ArmoAPI) getAccountConfigDefault(clusterName string) string {
|
||||
config := armoAPI.getAccountConfig(clusterName)
|
||||
url := config + "&scope=default"
|
||||
url := config + "&scope=customer"
|
||||
return url
|
||||
}
|
||||
|
||||
func (armoAPI *ArmoAPI) getAccountConfig(clusterName string) string {
|
||||
u := url.URL{}
|
||||
u.Scheme = "https"
|
||||
u.Host = armoAPI.apiURL
|
||||
u.Scheme, u.Host = parseHost(armoAPI.GetApiURL())
|
||||
u.Path = "api/v1/armoCustomerConfiguration"
|
||||
|
||||
q := u.Query()
|
||||
@@ -97,24 +92,21 @@ func (armoAPI *ArmoAPI) getAccountConfig(clusterName string) string {
|
||||
|
||||
func (armoAPI *ArmoAPI) getAccountURL() string {
|
||||
u := url.URL{}
|
||||
u.Scheme = "https"
|
||||
u.Host = armoAPI.apiURL
|
||||
u.Scheme, u.Host = parseHost(armoAPI.GetApiURL())
|
||||
u.Path = "api/v1/createTenant"
|
||||
return u.String()
|
||||
}
|
||||
|
||||
func (armoAPI *ArmoAPI) getApiToken() string {
|
||||
u := url.URL{}
|
||||
u.Scheme = "https"
|
||||
u.Host = armoAPI.authURL
|
||||
u.Path = "frontegg/identity/resources/auth/v1/api-token"
|
||||
u.Scheme, u.Host = parseHost(armoAPI.GetAuthURL())
|
||||
u.Path = "identity/resources/auth/v1/api-token"
|
||||
return u.String()
|
||||
}
|
||||
|
||||
func (armoAPI *ArmoAPI) getOpenidCustomers() string {
|
||||
u := url.URL{}
|
||||
u.Scheme = "https"
|
||||
u.Host = armoAPI.apiURL
|
||||
u.Scheme, u.Host = parseHost(armoAPI.GetApiURL())
|
||||
u.Path = "api/v1/openid_customers"
|
||||
return u.String()
|
||||
}
|
||||
@@ -173,3 +165,12 @@ func (armoAPI *ArmoAPI) getCustomerGUIDFallBack() string {
|
||||
}
|
||||
return "11111111-1111-1111-1111-111111111111"
|
||||
}
|
||||
|
||||
func parseHost(host string) (string, string) {
|
||||
if strings.HasPrefix(host, "http://") {
|
||||
return "http", strings.Replace(host, "http://", "", 1)
|
||||
}
|
||||
|
||||
// default scheme
|
||||
return "https", strings.Replace(host, "https://", "", 1)
|
||||
}
|
||||
|
||||
91
core/cautils/helmchart.go
Normal file
91
core/cautils/helmchart.go
Normal file
@@ -0,0 +1,91 @@
|
||||
package cautils
|
||||
|
||||
import (
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/armosec/k8s-interface/workloadinterface"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger/helpers"
|
||||
"github.com/armosec/opa-utils/objectsenvelopes/localworkload"
|
||||
helmchart "helm.sh/helm/v3/pkg/chart"
|
||||
helmloader "helm.sh/helm/v3/pkg/chart/loader"
|
||||
helmchartutil "helm.sh/helm/v3/pkg/chartutil"
|
||||
helmengine "helm.sh/helm/v3/pkg/engine"
|
||||
)
|
||||
|
||||
type HelmChart struct {
|
||||
chart *helmchart.Chart
|
||||
path string
|
||||
}
|
||||
|
||||
func IsHelmDirectory(path string) (bool, error) {
|
||||
return helmchartutil.IsChartDir(path)
|
||||
}
|
||||
|
||||
func NewHelmChart(path string) (*HelmChart, error) {
|
||||
chart, err := helmloader.Load(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &HelmChart{
|
||||
chart: chart,
|
||||
path: path,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (hc *HelmChart) GetName() string {
|
||||
return hc.chart.Name()
|
||||
}
|
||||
|
||||
func (hc *HelmChart) GetDefaultValues() map[string]interface{} {
|
||||
return hc.chart.Values
|
||||
}
|
||||
|
||||
// GetWorkloads renders chart template using the default values and returns a map of source file to its workloads
|
||||
func (hc *HelmChart) GetWorkloadsWithDefaultValues() (map[string][]workloadinterface.IMetadata, []error) {
|
||||
return hc.GetWorkloads(hc.GetDefaultValues())
|
||||
}
|
||||
|
||||
// GetWorkloads renders chart template using the provided values and returns a map of source (absolute) file path to its workloads
|
||||
func (hc *HelmChart) GetWorkloads(values map[string]interface{}) (map[string][]workloadinterface.IMetadata, []error) {
|
||||
vals, err := helmchartutil.ToRenderValues(hc.chart, values, helmchartutil.ReleaseOptions{}, nil)
|
||||
if err != nil {
|
||||
return nil, []error{err}
|
||||
}
|
||||
|
||||
sourceToFile, err := helmengine.Render(hc.chart, vals)
|
||||
if err != nil {
|
||||
return nil, []error{err}
|
||||
}
|
||||
|
||||
workloads := make(map[string][]workloadinterface.IMetadata, 0)
|
||||
errs := []error{}
|
||||
|
||||
for path, renderedYaml := range sourceToFile {
|
||||
if !IsYaml(strings.ToLower(path)) {
|
||||
continue
|
||||
}
|
||||
|
||||
wls, e := ReadFile([]byte(renderedYaml), YAML_FILE_FORMAT)
|
||||
if e != nil {
|
||||
logger.L().Debug("failed to read rendered yaml file", helpers.String("file", path), helpers.Error(e))
|
||||
}
|
||||
if len(wls) == 0 {
|
||||
continue
|
||||
}
|
||||
// separate base path and file name. We do not use the os.Separator because the paths returned from the helm engine are not OS specific (e.g. mychart/templates/myfile.yaml)
|
||||
if firstPathSeparatorIndex := strings.Index(path, string("/")); firstPathSeparatorIndex != -1 {
|
||||
absPath := filepath.Join(hc.path, path[firstPathSeparatorIndex:])
|
||||
|
||||
workloads[absPath] = []workloadinterface.IMetadata{}
|
||||
for i := range wls {
|
||||
lw := localworkload.NewLocalWorkload(wls[i].GetObject())
|
||||
lw.SetPath(absPath)
|
||||
workloads[absPath] = append(workloads[absPath], lw)
|
||||
}
|
||||
}
|
||||
}
|
||||
return workloads, errs
|
||||
}
|
||||
133
core/cautils/helmchart_test.go
Normal file
133
core/cautils/helmchart_test.go
Normal file
@@ -0,0 +1,133 @@
|
||||
package cautils
|
||||
|
||||
import (
|
||||
_ "embed"
|
||||
"encoding/json"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/armosec/opa-utils/objectsenvelopes/localworkload"
|
||||
"github.com/stretchr/testify/suite"
|
||||
)
|
||||
|
||||
type HelmChartTestSuite struct {
|
||||
suite.Suite
|
||||
helmChartPath string
|
||||
expectedFiles []string
|
||||
expectedDefaultValues map[string]interface{}
|
||||
}
|
||||
|
||||
func TestHelmChartTestSuite(t *testing.T) {
|
||||
suite.Run(t, new(HelmChartTestSuite))
|
||||
}
|
||||
|
||||
func (s *HelmChartTestSuite) SetupSuite() {
|
||||
o, _ := os.Getwd()
|
||||
|
||||
s.helmChartPath = filepath.Join(filepath.Dir(o), "..", "examples", "helm_chart")
|
||||
|
||||
s.expectedFiles = []string{
|
||||
filepath.Join(s.helmChartPath, "templates", "clusterrolebinding.yaml"),
|
||||
filepath.Join(s.helmChartPath, "templates", "clusterrole.yaml"),
|
||||
filepath.Join(s.helmChartPath, "templates", "serviceaccount.yaml"),
|
||||
filepath.Join(s.helmChartPath, "templates", "rolebinding.yaml"),
|
||||
filepath.Join(s.helmChartPath, "templates", "role.yaml"),
|
||||
filepath.Join(s.helmChartPath, "templates", "cronjob.yaml"),
|
||||
}
|
||||
|
||||
var obj interface{}
|
||||
file, _ := ioutil.ReadFile(filepath.Join("testdata", "helm_expected_default_values.json"))
|
||||
_ = json.Unmarshal([]byte(file), &obj)
|
||||
s.expectedDefaultValues = obj.(map[string]interface{})
|
||||
}
|
||||
|
||||
func (s *HelmChartTestSuite) TestInvalidHelmDirectory() {
|
||||
_, err := NewHelmChart("/invalid_path")
|
||||
s.Error(err)
|
||||
}
|
||||
|
||||
func (s *HelmChartTestSuite) TestValidHelmDirectory() {
|
||||
chart, err := NewHelmChart(s.helmChartPath)
|
||||
s.NoError(err)
|
||||
s.NotNil(chart)
|
||||
}
|
||||
|
||||
func (s *HelmChartTestSuite) TestGetName() {
|
||||
chart, _ := NewHelmChart(s.helmChartPath)
|
||||
s.Equal("kubescape", chart.GetName())
|
||||
}
|
||||
|
||||
func (s *HelmChartTestSuite) TestGetDefaultValues() {
|
||||
chart, _ := NewHelmChart(s.helmChartPath)
|
||||
|
||||
values := chart.GetDefaultValues()
|
||||
|
||||
valuesJson, _ := json.Marshal(values)
|
||||
expectedValuesJson, _ := json.Marshal(s.expectedDefaultValues)
|
||||
|
||||
s.JSONEq(string(valuesJson), string(expectedValuesJson))
|
||||
}
|
||||
|
||||
func (s *HelmChartTestSuite) TestGetWorkloadsWithOverride() {
|
||||
chart, err := NewHelmChart(s.helmChartPath)
|
||||
s.NoError(err, "Expected a valid helm chart")
|
||||
|
||||
values := chart.GetDefaultValues()
|
||||
|
||||
// Default pullPolicy value = Always
|
||||
pullPolicyValue := values["image"].(map[string]interface{})["pullPolicy"].(string)
|
||||
s.Equal(pullPolicyValue, "Always")
|
||||
|
||||
// Override default value
|
||||
values["image"].(map[string]interface{})["pullPolicy"] = "Never"
|
||||
|
||||
fileToWorkloads, errs := chart.GetWorkloads(values)
|
||||
s.Len(errs, 0)
|
||||
|
||||
s.Lenf(fileToWorkloads, len(s.expectedFiles), "Expected %d files", len(s.expectedFiles))
|
||||
|
||||
for _, expectedFile := range s.expectedFiles {
|
||||
s.Contains(fileToWorkloads, expectedFile)
|
||||
s.FileExists(expectedFile)
|
||||
s.GreaterOrEqualf(len(fileToWorkloads[expectedFile]), 1, "Expected at least one workload in %q", expectedFile)
|
||||
|
||||
for i := range fileToWorkloads[expectedFile] {
|
||||
pathInWorkload := fileToWorkloads[expectedFile][i].(*localworkload.LocalWorkload).GetPath()
|
||||
s.Equal(pathInWorkload, expectedFile, "Expected GetPath() to return a valid path on workload")
|
||||
}
|
||||
|
||||
if strings.Contains(expectedFile, "cronjob.yaml") {
|
||||
jsonBytes, _ := json.Marshal(fileToWorkloads[expectedFile][0].GetObject())
|
||||
s.Contains(string(jsonBytes), "\"imagePullPolicy\":\"Never\"", "Expected to overriden value of imagePullPolicy to be 'Never'")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *HelmChartTestSuite) TestGetWorkloadsMissingValue() {
|
||||
chart, _ := NewHelmChart(s.helmChartPath)
|
||||
|
||||
values := chart.GetDefaultValues()
|
||||
delete(values, "image")
|
||||
|
||||
fileToWorkloads, errs := chart.GetWorkloads(values)
|
||||
s.Nil(fileToWorkloads)
|
||||
s.Len(errs, 1, "Expected an error due to missing value")
|
||||
|
||||
expectedErrMsg := "<.Values.image.repository>: nil pointer"
|
||||
s.Containsf(errs[0].Error(), expectedErrMsg, "expected error containing %q, got %q", expectedErrMsg, errs[0])
|
||||
}
|
||||
|
||||
func (s *HelmChartTestSuite) TestIsHelmDirectory() {
|
||||
ok, err := IsHelmDirectory(s.helmChartPath)
|
||||
s.True(ok)
|
||||
s.NoError(err)
|
||||
|
||||
o, _ := os.Getwd()
|
||||
nonHelmDir := filepath.Join(filepath.Dir(o), "../examples/online-boutique")
|
||||
ok, err = IsHelmDirectory(nonHelmDir)
|
||||
s.False(ok)
|
||||
s.Contains(err.Error(), "no Chart.yaml exists in directory")
|
||||
}
|
||||
127
core/cautils/localgitrepository.go
Normal file
127
core/cautils/localgitrepository.go
Normal file
@@ -0,0 +1,127 @@
|
||||
package cautils
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"path"
|
||||
"strings"
|
||||
|
||||
"github.com/armosec/go-git-url/apis"
|
||||
gitv5 "github.com/go-git/go-git/v5"
|
||||
configv5 "github.com/go-git/go-git/v5/config"
|
||||
plumbingv5 "github.com/go-git/go-git/v5/plumbing"
|
||||
)
|
||||
|
||||
type LocalGitRepository struct {
|
||||
repo *gitv5.Repository
|
||||
head *plumbingv5.Reference
|
||||
config *configv5.Config
|
||||
}
|
||||
|
||||
func NewLocalGitRepository(path string) (*LocalGitRepository, error) {
|
||||
gitRepo, err := gitv5.PlainOpenWithOptions(path, &gitv5.PlainOpenOptions{DetectDotGit: true})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
head, err := gitRepo.Head()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if !head.Name().IsBranch() {
|
||||
return nil, fmt.Errorf("current HEAD reference is not a branch")
|
||||
}
|
||||
|
||||
config, err := gitRepo.Config()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &LocalGitRepository{
|
||||
repo: gitRepo,
|
||||
head: head,
|
||||
config: config,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// GetBranchName get current branch name
|
||||
func (g *LocalGitRepository) GetBranchName() string {
|
||||
return g.head.Name().Short()
|
||||
}
|
||||
|
||||
// GetRemoteUrl get default remote URL
|
||||
func (g *LocalGitRepository) GetRemoteUrl() (string, error) {
|
||||
branchName := g.GetBranchName()
|
||||
if branchRef, branchFound := g.config.Branches[branchName]; branchFound {
|
||||
remoteName := branchRef.Remote
|
||||
if len(g.config.Remotes[remoteName].URLs) == 0 {
|
||||
return "", fmt.Errorf("expected to find URLs for remote '%s', branch '%s'", remoteName, branchName)
|
||||
}
|
||||
return g.config.Remotes[remoteName].URLs[0], nil
|
||||
}
|
||||
|
||||
const defaultRemoteName string = "origin"
|
||||
if len(g.config.Remotes[defaultRemoteName].URLs) == 0 {
|
||||
return "", fmt.Errorf("expected to find URLs for remote '%s'", defaultRemoteName)
|
||||
}
|
||||
return g.config.Remotes[defaultRemoteName].URLs[0], nil
|
||||
}
|
||||
|
||||
// GetName get origin name without the .git suffix
|
||||
func (g *LocalGitRepository) GetName() (string, error) {
|
||||
originUrl, err := g.GetRemoteUrl()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
baseName := path.Base(originUrl)
|
||||
// remove .git
|
||||
return strings.TrimSuffix(baseName, ".git"), nil
|
||||
}
|
||||
|
||||
// GetLastCommit get latest commit object
|
||||
func (g *LocalGitRepository) GetLastCommit() (*apis.Commit, error) {
|
||||
return g.GetFileLastCommit("")
|
||||
}
|
||||
|
||||
// GetFileLastCommit get file latest commit object, if empty will return latest commit
|
||||
func (g *LocalGitRepository) GetFileLastCommit(filePath string) (*apis.Commit, error) {
|
||||
// By default, returns commit information from current HEAD
|
||||
logOptions := &gitv5.LogOptions{}
|
||||
|
||||
if filePath != "" {
|
||||
logOptions.FileName = &filePath
|
||||
logOptions.Order = gitv5.LogOrderCommitterTime // faster -> LogOrderDFSPost
|
||||
}
|
||||
|
||||
cIter, err := g.repo.Log(logOptions)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
commit, err := cIter.Next()
|
||||
defer cIter.Close()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &apis.Commit{
|
||||
SHA: commit.Hash.String(),
|
||||
Author: apis.Committer{
|
||||
Name: commit.Author.Name,
|
||||
Email: commit.Author.Email,
|
||||
Date: commit.Author.When,
|
||||
},
|
||||
Message: commit.Message,
|
||||
Committer: apis.Committer{},
|
||||
Files: []apis.Files{},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (g *LocalGitRepository) GetRootDir() (string, error) {
|
||||
wt, err := g.repo.Worktree()
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to get repo root")
|
||||
}
|
||||
|
||||
return wt.Filesystem.Root(), nil
|
||||
}
|
||||
154
core/cautils/localgitrepository_test.go
Normal file
154
core/cautils/localgitrepository_test.go
Normal file
@@ -0,0 +1,154 @@
|
||||
package cautils
|
||||
|
||||
import (
|
||||
"archive/zip"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/suite"
|
||||
)
|
||||
|
||||
type LocalGitRepositoryTestSuite struct {
|
||||
suite.Suite
|
||||
archive *zip.ReadCloser
|
||||
gitRepositoryPath string
|
||||
destinationPath string
|
||||
}
|
||||
|
||||
func unzipFile(zipPath, destinationFolder string) (*zip.ReadCloser, error) {
|
||||
archive, err := zip.OpenReader(zipPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, f := range archive.File {
|
||||
filePath := filepath.Join(destinationFolder, f.Name)
|
||||
if !strings.HasPrefix(filePath, filepath.Clean(destinationFolder)+string(os.PathSeparator)) {
|
||||
return nil, fmt.Errorf("invalid file path")
|
||||
}
|
||||
if f.FileInfo().IsDir() {
|
||||
os.MkdirAll(filePath, os.ModePerm)
|
||||
continue
|
||||
}
|
||||
|
||||
if err := os.MkdirAll(filepath.Dir(filePath), os.ModePerm); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
dstFile, err := os.OpenFile(filePath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, f.Mode())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
fileInArchive, err := f.Open()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if _, err := io.Copy(dstFile, fileInArchive); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
dstFile.Close()
|
||||
fileInArchive.Close()
|
||||
}
|
||||
|
||||
return archive, err
|
||||
|
||||
}
|
||||
|
||||
func (s *LocalGitRepositoryTestSuite) SetupSuite() {
|
||||
zippedFixturePath := filepath.Join(".", "testdata", "localrepo.git")
|
||||
destinationPath := filepath.Join(".", "testdata", "temp")
|
||||
gitRepositoryPath := filepath.Join(destinationPath, "localrepo")
|
||||
|
||||
os.RemoveAll(destinationPath)
|
||||
archive, err := unzipFile(zippedFixturePath, destinationPath)
|
||||
|
||||
if err == nil {
|
||||
s.archive = archive
|
||||
s.gitRepositoryPath = gitRepositoryPath
|
||||
s.destinationPath = destinationPath
|
||||
}
|
||||
}
|
||||
|
||||
func TestLocalGitRepositoryTestSuite(t *testing.T) {
|
||||
suite.Run(t, new(LocalGitRepositoryTestSuite))
|
||||
}
|
||||
|
||||
func (s *LocalGitRepositoryTestSuite) TearDownSuite() {
|
||||
if s.archive != nil {
|
||||
s.archive.Close()
|
||||
}
|
||||
os.RemoveAll(s.destinationPath)
|
||||
}
|
||||
|
||||
func (s *LocalGitRepositoryTestSuite) TestInvalidRepositoryPath() {
|
||||
if _, err := NewLocalGitRepository("/invalidpath"); s.Error(err) {
|
||||
s.Equal("repository does not exist", err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
func (s *LocalGitRepositoryTestSuite) TestGetBranchName() {
|
||||
if localRepo, err := NewLocalGitRepository(s.gitRepositoryPath); s.NoError(err) {
|
||||
s.Equal("master", localRepo.GetBranchName())
|
||||
}
|
||||
}
|
||||
|
||||
func (s *LocalGitRepositoryTestSuite) TestGetName() {
|
||||
if localRepo, err := NewLocalGitRepository(s.gitRepositoryPath); s.NoError(err) {
|
||||
if name, err := localRepo.GetName(); s.NoError(err) {
|
||||
s.Equal("localrepo", name)
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
func (s *LocalGitRepositoryTestSuite) TestGetOriginUrl() {
|
||||
if localRepo, err := NewLocalGitRepository(s.gitRepositoryPath); s.NoError(err) {
|
||||
if url, err := localRepo.GetRemoteUrl(); s.NoError(err) {
|
||||
s.Equal("git@github.com:testuser/localrepo", url)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *LocalGitRepositoryTestSuite) TestGetLastCommit() {
|
||||
if localRepo, err := NewLocalGitRepository(s.gitRepositoryPath); s.NoError(err) {
|
||||
if commit, err := localRepo.GetLastCommit(); s.NoError(err) {
|
||||
s.Equal("7e09312b8017695fadcd606882e3779f10a5c832", commit.SHA)
|
||||
s.Equal("Amir Malka", commit.Author.Name)
|
||||
s.Equal("amirm@armosec.io", commit.Author.Email)
|
||||
s.Equal("2022-05-22 19:11:57 +0300 +0300", commit.Author.Date.String())
|
||||
s.Equal("added file B\n", commit.Message)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *LocalGitRepositoryTestSuite) TestGetFileLastCommit() {
|
||||
s.Run("fileA", func() {
|
||||
if localRepo, err := NewLocalGitRepository(s.gitRepositoryPath); s.NoError(err) {
|
||||
if commit, err := localRepo.GetFileLastCommit("fileA"); s.NoError(err) {
|
||||
s.Equal("9fae4be19624297947d2b605cefbff516628612d", commit.SHA)
|
||||
s.Equal("Amir Malka", commit.Author.Name)
|
||||
s.Equal("amirm@armosec.io", commit.Author.Email)
|
||||
s.Equal("2022-05-22 18:55:48 +0300 +0300", commit.Author.Date.String())
|
||||
s.Equal("added file A\n", commit.Message)
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
s.Run("fileB", func() {
|
||||
if localRepo, err := NewLocalGitRepository(s.gitRepositoryPath); s.NoError(err) {
|
||||
if commit, err := localRepo.GetFileLastCommit("dirA/fileB"); s.NoError(err) {
|
||||
s.Equal("7e09312b8017695fadcd606882e3779f10a5c832", commit.SHA)
|
||||
s.Equal("Amir Malka", commit.Author.Name)
|
||||
s.Equal("amirm@armosec.io", commit.Author.Email)
|
||||
s.Equal("2022-05-22 19:11:57 +0300 +0300", commit.Author.Date.String())
|
||||
s.Equal("added file B\n", commit.Message)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
@@ -4,10 +4,10 @@ import (
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/armosec/kubescape/core/cautils/logger/helpers"
|
||||
"github.com/armosec/kubescape/core/cautils/logger/nonelogger"
|
||||
"github.com/armosec/kubescape/core/cautils/logger/prettylogger"
|
||||
"github.com/armosec/kubescape/core/cautils/logger/zaplogger"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger/helpers"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger/nonelogger"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger/prettylogger"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger/zaplogger"
|
||||
)
|
||||
|
||||
type ILogger interface {
|
||||
|
||||
@@ -3,7 +3,7 @@ package nonelogger
|
||||
import (
|
||||
"os"
|
||||
|
||||
"github.com/armosec/kubescape/core/cautils/logger/helpers"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger/helpers"
|
||||
)
|
||||
|
||||
const LoggerName string = "none"
|
||||
|
||||
@@ -3,7 +3,7 @@ package prettylogger
|
||||
import (
|
||||
"io"
|
||||
|
||||
"github.com/armosec/kubescape/core/cautils/logger/helpers"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger/helpers"
|
||||
"github.com/fatih/color"
|
||||
)
|
||||
|
||||
|
||||
@@ -5,7 +5,7 @@ import (
|
||||
"os"
|
||||
"sync"
|
||||
|
||||
"github.com/armosec/kubescape/core/cautils/logger/helpers"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger/helpers"
|
||||
)
|
||||
|
||||
const LoggerName string = "pretty"
|
||||
|
||||
@@ -3,7 +3,7 @@ package zaplogger
|
||||
import (
|
||||
"os"
|
||||
|
||||
"github.com/armosec/kubescape/core/cautils/logger/helpers"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger/helpers"
|
||||
"go.uber.org/zap"
|
||||
"go.uber.org/zap/zapcore"
|
||||
)
|
||||
|
||||
@@ -8,6 +8,13 @@ type RootInfo struct {
|
||||
|
||||
ArmoBEURLs string // armo url
|
||||
ArmoBEURLsDep string // armo url
|
||||
|
||||
}
|
||||
|
||||
type Credentials struct {
|
||||
Account string
|
||||
ClientID string
|
||||
SecretKey string
|
||||
}
|
||||
|
||||
// func (rootInfo *RootInfo) InitLogger() {
|
||||
|
||||
@@ -8,18 +8,36 @@ import (
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/armosec/armoapi-go/armotypes"
|
||||
apisv1 "github.com/armosec/opa-utils/httpserver/apis/v1"
|
||||
|
||||
giturl "github.com/armosec/go-git-url"
|
||||
"github.com/armosec/k8s-interface/k8sinterface"
|
||||
"github.com/armosec/kubescape/core/cautils/getter"
|
||||
"github.com/armosec/kubescape/core/cautils/logger"
|
||||
"github.com/armosec/kubescape/core/cautils/logger/helpers"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/getter"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger/helpers"
|
||||
"github.com/armosec/opa-utils/reporthandling"
|
||||
reporthandlingv2 "github.com/armosec/opa-utils/reporthandling/v2"
|
||||
"github.com/google/uuid"
|
||||
)
|
||||
|
||||
type ScanningContext string
|
||||
|
||||
const (
|
||||
ScanCluster string = "cluster"
|
||||
ScanLocalFiles string = "yaml"
|
||||
ContextCluster ScanningContext = "cluster"
|
||||
ContextFile ScanningContext = "single-file"
|
||||
ContextDir ScanningContext = "local-dir"
|
||||
ContextGitURL ScanningContext = "git-url"
|
||||
ContextGitLocal ScanningContext = "git-local"
|
||||
)
|
||||
|
||||
const ( // deprecated
|
||||
ScopeCluster = "cluster"
|
||||
ScopeYAML = "yaml"
|
||||
)
|
||||
const (
|
||||
// ScanCluster string = "cluster"
|
||||
// ScanLocalFiles string = "yaml"
|
||||
localControlInputsFilename string = "controls-inputs.json"
|
||||
localExceptionsFilename string = "exceptions.json"
|
||||
)
|
||||
@@ -28,6 +46,10 @@ type BoolPtrFlag struct {
|
||||
valPtr *bool
|
||||
}
|
||||
|
||||
func NewBoolPtr(b *bool) BoolPtrFlag {
|
||||
return BoolPtrFlag{valPtr: b}
|
||||
}
|
||||
|
||||
func (bpf *BoolPtrFlag) Type() string {
|
||||
return "bool"
|
||||
}
|
||||
@@ -63,32 +85,46 @@ func (bpf *BoolPtrFlag) Set(val string) error {
|
||||
}
|
||||
|
||||
// TODO - UPDATE
|
||||
type ViewTypes string
|
||||
|
||||
const (
|
||||
ResourceViewType ViewTypes = "resource"
|
||||
ControlViewType ViewTypes = "control"
|
||||
)
|
||||
|
||||
type PolicyIdentifier struct {
|
||||
Name string // policy name e.g. nsa,mitre,c-0012
|
||||
Kind apisv1.NotificationPolicyKind // policy kind e.g. Framework,Control,Rule
|
||||
Designators armotypes.PortalDesignator
|
||||
}
|
||||
|
||||
type ScanInfo struct {
|
||||
Getters // TODO - remove from object
|
||||
PolicyIdentifier []reporthandling.PolicyIdentifier // TODO - remove from object
|
||||
UseExceptions string // Load file with exceptions configuration
|
||||
ControlsInputs string // Load file with inputs for controls
|
||||
UseFrom []string // Load framework from local file (instead of download). Use when running offline
|
||||
UseDefault bool // Load framework from cached file (instead of download). Use when running offline
|
||||
UseArtifactsFrom string // Load artifacts from local path. Use when running offline
|
||||
VerboseMode bool // Display all of the input resources and not only failed resources
|
||||
Format string // Format results (table, json, junit ...)
|
||||
Output string // Store results in an output file, Output file name
|
||||
FormatVersion string // Output object can be differnet between versions, this is for testing and backward compatibility
|
||||
ExcludedNamespaces string // used for host scanner namespace
|
||||
IncludeNamespaces string // DEPRECATED?
|
||||
InputPatterns []string // Yaml files input patterns
|
||||
Silent bool // Silent mode - Do not print progress logs
|
||||
FailThreshold float32 // Failure score threshold
|
||||
Submit bool // Submit results to Armo BE
|
||||
ScanID string // Report id of the current scan
|
||||
HostSensorEnabled BoolPtrFlag // Deploy ARMO K8s host scanner to collect data from certain controls
|
||||
HostSensorYamlPath string // Path to hostsensor file
|
||||
Local bool // Do not submit results
|
||||
Account string // account ID
|
||||
KubeContext string // context name
|
||||
FrameworkScan bool // false if scanning control
|
||||
ScanAll bool // true if scan all frameworks
|
||||
Getters // TODO - remove from object
|
||||
PolicyIdentifier []PolicyIdentifier // TODO - remove from object
|
||||
UseExceptions string // Load file with exceptions configuration
|
||||
ControlsInputs string // Load file with inputs for controls
|
||||
UseFrom []string // Load framework from local file (instead of download). Use when running offline
|
||||
UseDefault bool // Load framework from cached file (instead of download). Use when running offline
|
||||
UseArtifactsFrom string // Load artifacts from local path. Use when running offline
|
||||
VerboseMode bool // Display all of the input resources and not only failed resources
|
||||
View string // Display all of the input resources and not only failed resources
|
||||
Format string // Format results (table, json, junit ...)
|
||||
Output string // Store results in an output file, Output file name
|
||||
FormatVersion string // Output object can be differnet between versions, this is for testing and backward compatibility
|
||||
ExcludedNamespaces string // used for host scanner namespace
|
||||
IncludeNamespaces string //
|
||||
InputPatterns []string // Yaml files input patterns
|
||||
Silent bool // Silent mode - Do not print progress logs
|
||||
FailThreshold float32 // Failure score threshold
|
||||
Submit bool // Submit results to Armo BE
|
||||
ScanID string // Report id of the current scan
|
||||
HostSensorEnabled BoolPtrFlag // Deploy ARMO K8s host scanner to collect data from certain controls
|
||||
HostSensorYamlPath string // Path to hostsensor file
|
||||
Local bool // Do not submit results
|
||||
Credentials Credentials // account ID
|
||||
KubeContext string // context name
|
||||
FrameworkScan bool // false if scanning control
|
||||
ScanAll bool // true if scan all frameworks
|
||||
}
|
||||
|
||||
type Getters struct {
|
||||
@@ -168,18 +204,18 @@ func (scanInfo *ScanInfo) setOutputFile() {
|
||||
}
|
||||
}
|
||||
|
||||
func (scanInfo *ScanInfo) GetScanningEnvironment() string {
|
||||
if len(scanInfo.InputPatterns) != 0 {
|
||||
return ScanLocalFiles
|
||||
}
|
||||
return ScanCluster
|
||||
}
|
||||
// func (scanInfo *ScanInfo) GetScanningEnvironment() string {
|
||||
// if len(scanInfo.InputPatterns) != 0 {
|
||||
// return ScanLocalFiles
|
||||
// }
|
||||
// return ScanCluster
|
||||
// }
|
||||
|
||||
func (scanInfo *ScanInfo) SetPolicyIdentifiers(policies []string, kind reporthandling.NotificationPolicyKind) {
|
||||
func (scanInfo *ScanInfo) SetPolicyIdentifiers(policies []string, kind apisv1.NotificationPolicyKind) {
|
||||
for _, policy := range policies {
|
||||
if !scanInfo.contains(policy) {
|
||||
newPolicy := reporthandling.PolicyIdentifier{}
|
||||
newPolicy.Kind = kind // reporthandling.KindFramework
|
||||
newPolicy := PolicyIdentifier{}
|
||||
newPolicy.Kind = kind
|
||||
newPolicy.Name = policy
|
||||
scanInfo.PolicyIdentifier = append(scanInfo.PolicyIdentifier, newPolicy)
|
||||
}
|
||||
@@ -219,69 +255,183 @@ func scanInfoToScanMetadata(scanInfo *ScanInfo) *reporthandlingv2.Metadata {
|
||||
metadata.ScanMetadata.TargetNames = append(metadata.ScanMetadata.TargetNames, policy.Name)
|
||||
}
|
||||
|
||||
metadata.ScanMetadata.KubescapeVersion = BuildNumber
|
||||
metadata.ScanMetadata.VerboseMode = scanInfo.VerboseMode
|
||||
metadata.ScanMetadata.FailThreshold = scanInfo.FailThreshold
|
||||
metadata.ScanMetadata.HostScanner = scanInfo.HostSensorEnabled.GetBool()
|
||||
metadata.ScanMetadata.VerboseMode = scanInfo.VerboseMode
|
||||
metadata.ScanMetadata.ControlsInputs = scanInfo.ControlsInputs
|
||||
|
||||
metadata.ScanMetadata.ScanningTarget = reporthandlingv2.Cluster
|
||||
if scanInfo.GetScanningEnvironment() == ScanLocalFiles {
|
||||
metadata.ScanMetadata.ScanningTarget = reporthandlingv2.File
|
||||
}
|
||||
|
||||
inputFiles := ""
|
||||
if len(scanInfo.InputPatterns) > 0 {
|
||||
inputFiles = scanInfo.InputPatterns[0]
|
||||
}
|
||||
|
||||
metadata.ScanMetadata.ScanningTarget = reporthandlingv2.Cluster
|
||||
if GetScanningContext(inputFiles) != ContextCluster {
|
||||
metadata.ScanMetadata.ScanningTarget = reporthandlingv2.File
|
||||
}
|
||||
|
||||
setContextMetadata(&metadata.ContextMetadata, inputFiles)
|
||||
|
||||
return metadata
|
||||
}
|
||||
|
||||
func setContextMetadata(contextMetadata *reporthandlingv2.ContextMetadata, input string) {
|
||||
// if cluster
|
||||
func (scanInfo *ScanInfo) GetScanningContext() ScanningContext {
|
||||
input := ""
|
||||
if len(scanInfo.InputPatterns) > 0 {
|
||||
input = scanInfo.InputPatterns[0]
|
||||
}
|
||||
return GetScanningContext(input)
|
||||
}
|
||||
|
||||
// GetScanningContext get scanning context from the input param
|
||||
func GetScanningContext(input string) ScanningContext {
|
||||
// cluster
|
||||
if input == "" {
|
||||
contextMetadata.ClusterContextMetadata = &reporthandlingv2.ClusterMetadata{
|
||||
ContextName: k8sinterface.GetClusterName(),
|
||||
}
|
||||
return
|
||||
return ContextCluster
|
||||
}
|
||||
|
||||
// if url
|
||||
if strings.HasPrefix(input, "http") { // TODO - check if can parse
|
||||
return
|
||||
// url
|
||||
if _, err := giturl.NewGitURL(input); err == nil {
|
||||
return ContextGitURL
|
||||
}
|
||||
|
||||
if !filepath.IsAbs(input) {
|
||||
if !filepath.IsAbs(input) { // parse path
|
||||
if o, err := os.Getwd(); err == nil {
|
||||
input = filepath.Join(o, input)
|
||||
}
|
||||
}
|
||||
|
||||
// if single file
|
||||
// local git repo
|
||||
if _, err := NewLocalGitRepository(input); err == nil {
|
||||
return ContextGitLocal
|
||||
}
|
||||
|
||||
// single file
|
||||
if IsFile(input) {
|
||||
contextMetadata.FileContextMetadata = &reporthandlingv2.FileContextMetadata{
|
||||
FilePath: input,
|
||||
HostName: getHostname(),
|
||||
}
|
||||
return
|
||||
return ContextFile
|
||||
}
|
||||
|
||||
// if dir/glob
|
||||
if !IsFile(input) {
|
||||
// dir/glob
|
||||
return ContextDir
|
||||
}
|
||||
func setContextMetadata(contextMetadata *reporthandlingv2.ContextMetadata, input string) {
|
||||
switch GetScanningContext(input) {
|
||||
case ContextCluster:
|
||||
contextMetadata.ClusterContextMetadata = &reporthandlingv2.ClusterMetadata{
|
||||
ContextName: k8sinterface.GetContextName(),
|
||||
}
|
||||
case ContextGitURL:
|
||||
// url
|
||||
context, err := metadataGitURL(input)
|
||||
if err != nil {
|
||||
logger.L().Warning("in setContextMetadata", helpers.Interface("case", ContextGitURL), helpers.Error(err))
|
||||
}
|
||||
contextMetadata.RepoContextMetadata = context
|
||||
case ContextDir:
|
||||
contextMetadata.DirectoryContextMetadata = &reporthandlingv2.DirectoryContextMetadata{
|
||||
BasePath: input,
|
||||
BasePath: getAbsPath(input),
|
||||
HostName: getHostname(),
|
||||
}
|
||||
return
|
||||
case ContextFile:
|
||||
contextMetadata.FileContextMetadata = &reporthandlingv2.FileContextMetadata{
|
||||
FilePath: getAbsPath(input),
|
||||
HostName: getHostname(),
|
||||
}
|
||||
case ContextGitLocal:
|
||||
// local
|
||||
context, err := metadataGitLocal(input)
|
||||
if err != nil {
|
||||
logger.L().Warning("in setContextMetadata", helpers.Interface("case", ContextGitURL), helpers.Error(err))
|
||||
}
|
||||
contextMetadata.RepoContextMetadata = context
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func metadataGitURL(input string) (*reporthandlingv2.RepoContextMetadata, error) {
|
||||
context := &reporthandlingv2.RepoContextMetadata{}
|
||||
gitParser, err := giturl.NewGitAPI(input)
|
||||
if err != nil {
|
||||
return context, fmt.Errorf("%w", err)
|
||||
}
|
||||
if gitParser.GetBranchName() == "" {
|
||||
gitParser.SetDefaultBranchName()
|
||||
}
|
||||
context.Provider = gitParser.GetProvider()
|
||||
context.Repo = gitParser.GetRepoName()
|
||||
context.Owner = gitParser.GetOwnerName()
|
||||
context.Branch = gitParser.GetBranchName()
|
||||
context.RemoteURL = gitParser.GetURL().String()
|
||||
|
||||
commit, err := gitParser.GetLatestCommit()
|
||||
if err != nil {
|
||||
return context, fmt.Errorf("%w", err)
|
||||
}
|
||||
|
||||
context.LastCommit = reporthandling.LastCommit{
|
||||
Hash: commit.SHA,
|
||||
Date: commit.Committer.Date,
|
||||
CommitterName: commit.Committer.Name,
|
||||
}
|
||||
|
||||
return context, nil
|
||||
}
|
||||
|
||||
func metadataGitLocal(input string) (*reporthandlingv2.RepoContextMetadata, error) {
|
||||
gitParser, err := NewLocalGitRepository(input)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("%w", err)
|
||||
}
|
||||
remoteURL, err := gitParser.GetRemoteUrl()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("%w", err)
|
||||
}
|
||||
context := &reporthandlingv2.RepoContextMetadata{}
|
||||
gitParserURL, err := giturl.NewGitURL(remoteURL)
|
||||
if err != nil {
|
||||
return context, fmt.Errorf("%w", err)
|
||||
}
|
||||
gitParserURL.SetBranchName(gitParser.GetBranchName())
|
||||
|
||||
context.Provider = gitParserURL.GetProvider()
|
||||
context.Repo = gitParserURL.GetRepoName()
|
||||
context.Owner = gitParserURL.GetOwnerName()
|
||||
context.Branch = gitParserURL.GetBranchName()
|
||||
context.RemoteURL = gitParserURL.GetURL().String()
|
||||
|
||||
commit, err := gitParser.GetLastCommit()
|
||||
if err != nil {
|
||||
return context, fmt.Errorf("%w", err)
|
||||
}
|
||||
context.LastCommit = reporthandling.LastCommit{
|
||||
Hash: commit.SHA,
|
||||
Date: commit.Committer.Date,
|
||||
CommitterName: commit.Committer.Name,
|
||||
}
|
||||
|
||||
return context, nil
|
||||
}
|
||||
func getHostname() string {
|
||||
if h, e := os.Hostname(); e == nil {
|
||||
return h
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func getAbsPath(p string) string {
|
||||
if !filepath.IsAbs(p) { // parse path
|
||||
if o, err := os.Getwd(); err == nil {
|
||||
return filepath.Join(o, p)
|
||||
}
|
||||
}
|
||||
return p
|
||||
}
|
||||
|
||||
// ScanningContextToScanningScope convert the context to the deprecated scope
|
||||
func ScanningContextToScanningScope(scanningContext ScanningContext) string {
|
||||
if scanningContext == ContextCluster {
|
||||
return ScopeCluster
|
||||
}
|
||||
return ScopeYAML
|
||||
}
|
||||
|
||||
@@ -7,16 +7,6 @@ import (
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
// func TestSetInputPatterns(t *testing.T) { //Unitest
|
||||
// {
|
||||
// scanInfo := ScanInfo{
|
||||
// InputPatterns: []string{"file"},
|
||||
// }
|
||||
// scanInfo.setInputPatterns()
|
||||
// assert.Equal(t, "file", scanInfo.InputPatterns[0])
|
||||
// }
|
||||
// }
|
||||
|
||||
func TestSetContextMetadata(t *testing.T) {
|
||||
{
|
||||
ctx := reporthandlingv2.ContextMetadata{}
|
||||
@@ -28,26 +18,6 @@ func TestSetContextMetadata(t *testing.T) {
|
||||
assert.Nil(t, ctx.HelmContextMetadata)
|
||||
assert.Nil(t, ctx.RepoContextMetadata)
|
||||
}
|
||||
{
|
||||
ctx := reporthandlingv2.ContextMetadata{}
|
||||
setContextMetadata(&ctx, "file")
|
||||
|
||||
assert.Nil(t, ctx.ClusterContextMetadata)
|
||||
assert.NotNil(t, ctx.DirectoryContextMetadata)
|
||||
assert.Nil(t, ctx.FileContextMetadata)
|
||||
assert.Nil(t, ctx.HelmContextMetadata)
|
||||
assert.Nil(t, ctx.RepoContextMetadata)
|
||||
}
|
||||
{
|
||||
ctx := reporthandlingv2.ContextMetadata{}
|
||||
setContextMetadata(&ctx, "scaninfo_test.go")
|
||||
|
||||
assert.Nil(t, ctx.ClusterContextMetadata)
|
||||
assert.Nil(t, ctx.DirectoryContextMetadata)
|
||||
assert.NotNil(t, ctx.FileContextMetadata)
|
||||
assert.Nil(t, ctx.HelmContextMetadata)
|
||||
assert.Nil(t, ctx.RepoContextMetadata)
|
||||
}
|
||||
{
|
||||
ctx := reporthandlingv2.ContextMetadata{}
|
||||
setContextMetadata(&ctx, "https://github.com/armosec/kubescape")
|
||||
@@ -56,10 +26,22 @@ func TestSetContextMetadata(t *testing.T) {
|
||||
assert.Nil(t, ctx.DirectoryContextMetadata)
|
||||
assert.Nil(t, ctx.FileContextMetadata)
|
||||
assert.Nil(t, ctx.HelmContextMetadata)
|
||||
assert.Nil(t, ctx.RepoContextMetadata) // TODO
|
||||
assert.NotNil(t, ctx.RepoContextMetadata)
|
||||
|
||||
assert.Equal(t, "kubescape", ctx.RepoContextMetadata.Repo)
|
||||
assert.Equal(t, "armosec", ctx.RepoContextMetadata.Owner)
|
||||
assert.Equal(t, "master", ctx.RepoContextMetadata.Branch)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetHostname(t *testing.T) {
|
||||
assert.NotEqual(t, "", getHostname())
|
||||
}
|
||||
|
||||
func TestGetScanningContext(t *testing.T) {
|
||||
assert.Equal(t, ContextCluster, GetScanningContext(""))
|
||||
// assert.Equal(t, ContextDir, GetScanningContext("/"))
|
||||
assert.Equal(t, ContextGitURL, GetScanningContext("https://github.com/armosec/kubescape"))
|
||||
// assert.Equal(t, ContextFile, GetScanningContext(path.Join(".", "testdata", "localrepo.git")))
|
||||
// assert.Equal(t, ContextGitLocal, GetScanningContext(path.Join(".", "testdata")))
|
||||
}
|
||||
|
||||
40
core/cautils/testdata/helm_expected_default_values.json
vendored
Normal file
40
core/cautils/testdata/helm_expected_default_values.json
vendored
Normal file
@@ -0,0 +1,40 @@
|
||||
{
|
||||
"affinity": {},
|
||||
"configMap": {
|
||||
"create": false,
|
||||
"params": {
|
||||
"clusterName": "<MyK8sClusterName>",
|
||||
"customerGUID": "<MyGUID>"
|
||||
}
|
||||
},
|
||||
"fullnameOverride": "",
|
||||
"image": {
|
||||
"imageName": "kubescape",
|
||||
"pullPolicy": "Always",
|
||||
"repository": "quay.io/armosec",
|
||||
"tag": "latest"
|
||||
},
|
||||
"imagePullSecrets": [],
|
||||
"nameOverride": "",
|
||||
"nodeSelector": {},
|
||||
"podAnnotations": {},
|
||||
"podSecurityContext": {},
|
||||
"resources": {
|
||||
"limits": {
|
||||
"cpu": "500m",
|
||||
"memory": "512Mi"
|
||||
},
|
||||
"requests": {
|
||||
"cpu": "200m",
|
||||
"memory": "256Mi"
|
||||
}
|
||||
},
|
||||
"schedule": "* * 1 * *",
|
||||
"securityContext": {},
|
||||
"serviceAccount": {
|
||||
"annotations": {},
|
||||
"create": true,
|
||||
"name": "kubescape-discovery"
|
||||
},
|
||||
"tolerations": []
|
||||
}
|
||||
BIN
core/cautils/testdata/localrepo.git
vendored
Normal file
BIN
core/cautils/testdata/localrepo.git
vendored
Normal file
Binary file not shown.
@@ -6,10 +6,10 @@ import (
|
||||
"net/http"
|
||||
"os"
|
||||
|
||||
"github.com/armosec/kubescape/core/cautils/getter"
|
||||
"github.com/armosec/kubescape/core/cautils/logger"
|
||||
"github.com/armosec/kubescape/core/cautils/logger/helpers"
|
||||
pkgutils "github.com/armosec/utils-go/utils"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/getter"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger/helpers"
|
||||
"github.com/armosec/utils-go/boolutils"
|
||||
"golang.org/x/mod/semver"
|
||||
)
|
||||
|
||||
@@ -17,6 +17,7 @@ const SKIP_VERSION_CHECK_DEPRECATED = "KUBESCAPE_SKIP_UPDATE_CHECK"
|
||||
const SKIP_VERSION_CHECK = "KS_SKIP_UPDATE_CHECK"
|
||||
|
||||
var BuildNumber string
|
||||
var Client string
|
||||
|
||||
const UnknownBuildNumber = "unknown"
|
||||
|
||||
@@ -28,9 +29,9 @@ func NewIVersionCheckHandler() IVersionCheckHandler {
|
||||
if BuildNumber == "" {
|
||||
logger.L().Warning("unknown build number, this might affect your scan results. Please make sure you are updated to latest version")
|
||||
}
|
||||
if v, ok := os.LookupEnv(SKIP_VERSION_CHECK); ok && pkgutils.StringToBool(v) {
|
||||
if v, ok := os.LookupEnv(SKIP_VERSION_CHECK); ok && boolutils.StringToBool(v) {
|
||||
return NewVersionCheckHandlerMock()
|
||||
} else if v, ok := os.LookupEnv(SKIP_VERSION_CHECK_DEPRECATED); ok && pkgutils.StringToBool(v) {
|
||||
} else if v, ok := os.LookupEnv(SKIP_VERSION_CHECK_DEPRECATED); ok && boolutils.StringToBool(v) {
|
||||
return NewVersionCheckHandlerMock()
|
||||
}
|
||||
return NewVersionCheckHandler()
|
||||
@@ -48,10 +49,12 @@ type VersionCheckHandler struct {
|
||||
}
|
||||
type VersionCheckRequest struct {
|
||||
Client string `json:"client"` // kubescape
|
||||
ClientBuild string `json:"clientBuild"` // client build environment
|
||||
ClientVersion string `json:"clientVersion"` // kubescape version
|
||||
Framework string `json:"framework"` // framework name
|
||||
FrameworkVersion string `json:"frameworkVersion"` // framework version
|
||||
ScanningTarget string `json:"target"` // scanning target- cluster/yaml
|
||||
ScanningTarget string `json:"target"` // Deprecated
|
||||
ScanningContext string `json:"context"` // scanning context- cluster/file/gitURL/localGit/dir
|
||||
}
|
||||
|
||||
type VersionCheckResponse struct {
|
||||
@@ -74,8 +77,12 @@ func NewVersionCheckRequest(buildNumber, frameworkName, frameworkVersion, scanni
|
||||
if scanningTarget == "" {
|
||||
scanningTarget = "unknown"
|
||||
}
|
||||
if Client == "" {
|
||||
Client = "local-build"
|
||||
}
|
||||
return &VersionCheckRequest{
|
||||
Client: "kubescape",
|
||||
ClientBuild: Client,
|
||||
ClientVersion: buildNumber,
|
||||
Framework: frameworkName,
|
||||
FrameworkVersion: frameworkVersion,
|
||||
@@ -101,7 +108,7 @@ func (v *VersionCheckHandler) CheckLatestVersion(versionData *VersionCheckReques
|
||||
}
|
||||
|
||||
if latestVersion.ClientUpdate != "" {
|
||||
if BuildNumber != "" && semver.Compare(BuildNumber, latestVersion.ClientUpdate) >= 0 {
|
||||
if BuildNumber != "" && semver.Compare(BuildNumber, latestVersion.ClientUpdate) == -1 {
|
||||
logger.L().Warning(warningMessage(latestVersion.ClientUpdate))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"github.com/armosec/armoapi-go/armotypes"
|
||||
"github.com/armosec/opa-utils/reporthandling"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"golang.org/x/mod/semver"
|
||||
)
|
||||
|
||||
func TestGetKubernetesObjects(t *testing.T) {
|
||||
@@ -30,9 +31,38 @@ func TestIsRuleKubescapeVersionCompatible(t *testing.T) {
|
||||
assert.True(t, isRuleKubescapeVersionCompatible(rule_v1_0_134.Attributes, buildNumberMock))
|
||||
|
||||
// should only use rules that version is in range of use
|
||||
buildNumberMock = "v1.0.133"
|
||||
buildNumberMock = "v1.0.130"
|
||||
assert.True(t, isRuleKubescapeVersionCompatible(rule_v1_0_131.Attributes, buildNumberMock))
|
||||
assert.False(t, isRuleKubescapeVersionCompatible(rule_v1_0_132.Attributes, buildNumberMock))
|
||||
assert.False(t, isRuleKubescapeVersionCompatible(rule_v1_0_133.Attributes, buildNumberMock))
|
||||
assert.False(t, isRuleKubescapeVersionCompatible(rule_v1_0_134.Attributes, buildNumberMock))
|
||||
|
||||
// should only use rules that version is in range of use
|
||||
buildNumberMock = "v1.0.132"
|
||||
assert.False(t, isRuleKubescapeVersionCompatible(rule_v1_0_131.Attributes, buildNumberMock))
|
||||
assert.True(t, isRuleKubescapeVersionCompatible(rule_v1_0_132.Attributes, buildNumberMock))
|
||||
assert.False(t, isRuleKubescapeVersionCompatible(rule_v1_0_133.Attributes, buildNumberMock))
|
||||
assert.False(t, isRuleKubescapeVersionCompatible(rule_v1_0_134.Attributes, buildNumberMock))
|
||||
|
||||
// should only use rules that version is in range of use
|
||||
buildNumberMock = "v1.0.133"
|
||||
assert.False(t, isRuleKubescapeVersionCompatible(rule_v1_0_131.Attributes, buildNumberMock))
|
||||
assert.False(t, isRuleKubescapeVersionCompatible(rule_v1_0_132.Attributes, buildNumberMock))
|
||||
assert.True(t, isRuleKubescapeVersionCompatible(rule_v1_0_133.Attributes, buildNumberMock))
|
||||
assert.False(t, isRuleKubescapeVersionCompatible(rule_v1_0_134.Attributes, buildNumberMock))
|
||||
|
||||
// should only use rules that version is in range of use
|
||||
buildNumberMock = "v1.0.135"
|
||||
assert.False(t, isRuleKubescapeVersionCompatible(rule_v1_0_131.Attributes, buildNumberMock))
|
||||
assert.False(t, isRuleKubescapeVersionCompatible(rule_v1_0_132.Attributes, buildNumberMock))
|
||||
assert.False(t, isRuleKubescapeVersionCompatible(rule_v1_0_133.Attributes, buildNumberMock))
|
||||
assert.True(t, isRuleKubescapeVersionCompatible(rule_v1_0_134.Attributes, buildNumberMock))
|
||||
}
|
||||
|
||||
func TestCheckLatestVersion(t *testing.T) {
|
||||
assert.Equal(t, -1, semver.Compare("v2.0.150", "v2.0.151"))
|
||||
assert.Equal(t, 0, semver.Compare("v2.0.150", "v2.0.150"))
|
||||
assert.Equal(t, 1, semver.Compare("v2.0.150", "v2.0.149"))
|
||||
assert.Equal(t, -1, semver.Compare("v2.0.150", "v3.0.150"))
|
||||
|
||||
}
|
||||
|
||||
@@ -14,7 +14,10 @@ var (
|
||||
"KernelVersion",
|
||||
"LinuxSecurityHardeningStatus",
|
||||
"OpenPortsList",
|
||||
"LinuxKernelVariables"}
|
||||
"LinuxKernelVariables",
|
||||
"KubeletInfo",
|
||||
"KubeProxyInfo",
|
||||
}
|
||||
CloudResources = []string{"ClusterDescribe"}
|
||||
)
|
||||
|
||||
|
||||
@@ -3,12 +3,12 @@ package core
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
metav1 "github.com/armosec/kubescape/core/meta/datastructures/v1"
|
||||
metav1 "github.com/armosec/kubescape/v2/core/meta/datastructures/v1"
|
||||
)
|
||||
|
||||
func (ks *Kubescape) SetCachedConfig(setConfig *metav1.SetConfig) error {
|
||||
|
||||
tenant := getTenantConfig("", "", getKubernetesApi())
|
||||
tenant := getTenantConfig(nil, "", getKubernetesApi())
|
||||
|
||||
if setConfig.Account != "" {
|
||||
tenant.GetConfigObj().AccountID = setConfig.Account
|
||||
@@ -25,13 +25,13 @@ func (ks *Kubescape) SetCachedConfig(setConfig *metav1.SetConfig) error {
|
||||
|
||||
// View cached configurations
|
||||
func (ks *Kubescape) ViewCachedConfig(viewConfig *metav1.ViewConfig) error {
|
||||
tenant := getTenantConfig("", "", getKubernetesApi()) // change k8sinterface
|
||||
tenant := getTenantConfig(nil, "", getKubernetesApi()) // change k8sinterface
|
||||
fmt.Fprintf(viewConfig.Writer, "%s\n", tenant.GetConfigObj().Config())
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ks *Kubescape) DeleteCachedConfig(deleteConfig *metav1.DeleteConfig) error {
|
||||
|
||||
tenant := getTenantConfig("", "", getKubernetesApi()) // change k8sinterface
|
||||
tenant := getTenantConfig(nil, "", getKubernetesApi()) // change k8sinterface
|
||||
return tenant.DeleteCachedConfig()
|
||||
}
|
||||
|
||||
@@ -3,16 +3,16 @@ package core
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/armosec/kubescape/core/cautils/getter"
|
||||
"github.com/armosec/kubescape/core/cautils/logger"
|
||||
"github.com/armosec/kubescape/core/cautils/logger/helpers"
|
||||
v1 "github.com/armosec/kubescape/core/meta/datastructures/v1"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/getter"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger/helpers"
|
||||
v1 "github.com/armosec/kubescape/v2/core/meta/datastructures/v1"
|
||||
)
|
||||
|
||||
func (ks *Kubescape) DeleteExceptions(delExceptions *v1.DeleteExceptions) error {
|
||||
|
||||
// load cached config
|
||||
getTenantConfig(delExceptions.Account, "", getKubernetesApi())
|
||||
getTenantConfig(&delExceptions.Credentials, "", getKubernetesApi())
|
||||
|
||||
// login kubescape SaaS
|
||||
armoAPI := getter.GetArmoAPIConnector()
|
||||
|
||||
@@ -7,10 +7,10 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/armosec/armoapi-go/armotypes"
|
||||
"github.com/armosec/kubescape/core/cautils/getter"
|
||||
"github.com/armosec/kubescape/core/cautils/logger"
|
||||
"github.com/armosec/kubescape/core/cautils/logger/helpers"
|
||||
metav1 "github.com/armosec/kubescape/core/meta/datastructures/v1"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/getter"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger/helpers"
|
||||
metav1 "github.com/armosec/kubescape/v2/core/meta/datastructures/v1"
|
||||
)
|
||||
|
||||
var downloadFunc = map[string]func(*metav1.DownloadInfo) error{
|
||||
@@ -80,10 +80,10 @@ func downloadArtifacts(downloadInfo *metav1.DownloadInfo) error {
|
||||
}
|
||||
|
||||
func downloadConfigInputs(downloadInfo *metav1.DownloadInfo) error {
|
||||
tenant := getTenantConfig(downloadInfo.Account, "", getKubernetesApi())
|
||||
tenant := getTenantConfig(&downloadInfo.Credentials, "", getKubernetesApi())
|
||||
|
||||
controlsInputsGetter := getConfigInputsGetter(downloadInfo.Name, tenant.GetAccountID(), nil)
|
||||
controlInputs, err := controlsInputsGetter.GetControlsInputs(tenant.GetClusterName())
|
||||
controlInputs, err := controlsInputsGetter.GetControlsInputs(tenant.GetContextName())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -104,12 +104,12 @@ func downloadConfigInputs(downloadInfo *metav1.DownloadInfo) error {
|
||||
|
||||
func downloadExceptions(downloadInfo *metav1.DownloadInfo) error {
|
||||
var err error
|
||||
tenant := getTenantConfig(downloadInfo.Account, "", getKubernetesApi())
|
||||
tenant := getTenantConfig(&downloadInfo.Credentials, "", getKubernetesApi())
|
||||
|
||||
exceptionsGetter := getExceptionsGetter("")
|
||||
exceptions := []armotypes.PostureExceptionPolicy{}
|
||||
if tenant.GetAccountID() != "" {
|
||||
exceptions, err = exceptionsGetter.GetExceptions(tenant.GetClusterName())
|
||||
exceptions, err = exceptionsGetter.GetExceptions(tenant.GetContextName())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -128,9 +128,9 @@ func downloadExceptions(downloadInfo *metav1.DownloadInfo) error {
|
||||
|
||||
func downloadFramework(downloadInfo *metav1.DownloadInfo) error {
|
||||
|
||||
tenant := getTenantConfig(downloadInfo.Account, "", getKubernetesApi())
|
||||
tenant := getTenantConfig(&downloadInfo.Credentials, "", getKubernetesApi())
|
||||
|
||||
g := getPolicyGetter(nil, tenant.GetTennatEmail(), true, nil)
|
||||
g := getPolicyGetter(nil, tenant.GetTenantEmail(), true, nil)
|
||||
|
||||
if downloadInfo.Name == "" {
|
||||
// if framework name not specified - download all frameworks
|
||||
@@ -170,9 +170,9 @@ func downloadFramework(downloadInfo *metav1.DownloadInfo) error {
|
||||
|
||||
func downloadControl(downloadInfo *metav1.DownloadInfo) error {
|
||||
|
||||
tenant := getTenantConfig(downloadInfo.Account, "", getKubernetesApi())
|
||||
tenant := getTenantConfig(&downloadInfo.Credentials, "", getKubernetesApi())
|
||||
|
||||
g := getPolicyGetter(nil, tenant.GetTennatEmail(), false, nil)
|
||||
g := getPolicyGetter(nil, tenant.GetTenantEmail(), false, nil)
|
||||
|
||||
if downloadInfo.Name == "" {
|
||||
// TODO - support
|
||||
|
||||
@@ -2,19 +2,18 @@ package core
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/armosec/k8s-interface/k8sinterface"
|
||||
"github.com/armosec/kubescape/core/cautils"
|
||||
"github.com/armosec/kubescape/core/cautils/getter"
|
||||
"github.com/armosec/kubescape/core/cautils/logger"
|
||||
"github.com/armosec/kubescape/core/cautils/logger/helpers"
|
||||
"github.com/armosec/kubescape/core/pkg/hostsensorutils"
|
||||
"github.com/armosec/kubescape/core/pkg/resourcehandler"
|
||||
"github.com/armosec/kubescape/core/pkg/resultshandling/reporter"
|
||||
reporterv2 "github.com/armosec/kubescape/core/pkg/resultshandling/reporter/v2"
|
||||
"github.com/armosec/kubescape/v2/core/cautils"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/getter"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger/helpers"
|
||||
"github.com/armosec/kubescape/v2/core/pkg/hostsensorutils"
|
||||
"github.com/armosec/kubescape/v2/core/pkg/resourcehandler"
|
||||
"github.com/armosec/kubescape/v2/core/pkg/resultshandling/reporter"
|
||||
reporterv2 "github.com/armosec/kubescape/v2/core/pkg/resultshandling/reporter/v2"
|
||||
"github.com/google/uuid"
|
||||
|
||||
"github.com/armosec/opa-utils/reporthandling"
|
||||
"github.com/armosec/rbac-utils/rbacscanner"
|
||||
)
|
||||
|
||||
@@ -25,11 +24,11 @@ func getKubernetesApi() *k8sinterface.KubernetesApi {
|
||||
}
|
||||
return k8sinterface.NewKubernetesApi()
|
||||
}
|
||||
func getTenantConfig(Account, clusterName string, k8s *k8sinterface.KubernetesApi) cautils.ITenantConfig {
|
||||
func getTenantConfig(credentials *cautils.Credentials, clusterName string, k8s *k8sinterface.KubernetesApi) cautils.ITenantConfig {
|
||||
if !k8sinterface.IsConnectedToCluster() || k8s == nil {
|
||||
return cautils.NewLocalConfig(getter.GetArmoAPIConnector(), Account, clusterName)
|
||||
return cautils.NewLocalConfig(getter.GetArmoAPIConnector(), credentials, clusterName)
|
||||
}
|
||||
return cautils.NewClusterConfig(k8s, getter.GetArmoAPIConnector(), Account, clusterName)
|
||||
return cautils.NewClusterConfig(k8s, getter.GetArmoAPIConnector(), credentials, clusterName)
|
||||
}
|
||||
|
||||
func getExceptionsGetter(useExceptions string) getter.IExceptionsGetter {
|
||||
@@ -43,7 +42,7 @@ func getExceptionsGetter(useExceptions string) getter.IExceptionsGetter {
|
||||
|
||||
func getRBACHandler(tenantConfig cautils.ITenantConfig, k8s *k8sinterface.KubernetesApi, submit bool) *cautils.RBACObjects {
|
||||
if submit {
|
||||
return cautils.NewRBACObjects(rbacscanner.NewRbacScannerFromK8sAPI(k8s, tenantConfig.GetAccountID(), tenantConfig.GetClusterName()))
|
||||
return cautils.NewRBACObjects(rbacscanner.NewRbacScannerFromK8sAPI(k8s, tenantConfig.GetAccountID(), tenantConfig.GetContextName()))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -106,7 +105,7 @@ func getFieldSelector(scanInfo *cautils.ScanInfo) resourcehandler.IFieldSelector
|
||||
return &resourcehandler.EmptySelector{}
|
||||
}
|
||||
|
||||
func policyIdentifierNames(pi []reporthandling.PolicyIdentifier) string {
|
||||
func policyIdentifierNames(pi []cautils.PolicyIdentifier) string {
|
||||
policiesNames := ""
|
||||
for i := range pi {
|
||||
policiesNames += pi[i].Name
|
||||
@@ -124,7 +123,6 @@ func policyIdentifierNames(pi []reporthandling.PolicyIdentifier) string {
|
||||
func setSubmitBehavior(scanInfo *cautils.ScanInfo, tenantConfig cautils.ITenantConfig) {
|
||||
|
||||
/*
|
||||
|
||||
If "First run (local config not found)" -
|
||||
Default/keep-local - Do not send report
|
||||
Submit - Create tenant & Submit report
|
||||
@@ -141,8 +139,20 @@ func setSubmitBehavior(scanInfo *cautils.ScanInfo, tenantConfig cautils.ITenantC
|
||||
return
|
||||
}
|
||||
|
||||
scanningContext := scanInfo.GetScanningContext()
|
||||
if scanningContext == cautils.ContextFile || scanningContext == cautils.ContextDir {
|
||||
scanInfo.Submit = false
|
||||
return
|
||||
}
|
||||
|
||||
if tenantConfig.IsConfigFound() { // config found in cache (submitted)
|
||||
if !scanInfo.Local {
|
||||
if tenantConfig.GetAccountID() != "" {
|
||||
if _, err := uuid.Parse(tenantConfig.GetAccountID()); err != nil {
|
||||
scanInfo.Submit = false
|
||||
return
|
||||
}
|
||||
}
|
||||
// Submit report
|
||||
scanInfo.Submit = true
|
||||
}
|
||||
@@ -166,20 +176,6 @@ func getPolicyGetter(loadPoliciesFromFile []string, tennatEmail string, framewor
|
||||
|
||||
}
|
||||
|
||||
// func setGetArmoAPIConnector(scanInfo *cautils.ScanInfo, customerGUID string) {
|
||||
// g := getter.GetArmoAPIConnector() // download policy from ARMO backend
|
||||
// g.SetCustomerGUID(customerGUID)
|
||||
// scanInfo.PolicyGetter = g
|
||||
// if scanInfo.ScanAll {
|
||||
// frameworks, err := g.ListCustomFrameworks(customerGUID)
|
||||
// if err != nil {
|
||||
// glog.Error("failed to get custom frameworks") // handle error
|
||||
// return
|
||||
// }
|
||||
// scanInfo.SetPolicyIdentifiers(frameworks, reporthandling.KindFramework)
|
||||
// }
|
||||
// }
|
||||
|
||||
// setConfigInputsGetter sets the config input getter - local file/github release/ArmoAPI
|
||||
func getConfigInputsGetter(ControlsInputs string, accountID string, downloadReleasedPolicy *getter.DownloadReleasedPolicy) getter.IControlsInputsGetter {
|
||||
if len(ControlsInputs) > 0 {
|
||||
@@ -193,7 +189,7 @@ func getConfigInputsGetter(ControlsInputs string, accountID string, downloadRele
|
||||
downloadReleasedPolicy = getter.NewDownloadReleasedPolicy()
|
||||
}
|
||||
if err := downloadReleasedPolicy.SetRegoObjects(); err != nil { // if failed to pull config inputs, fallback to BE
|
||||
cautils.WarningDisplay(os.Stderr, "Warning: failed to get config inputs from github release, this may affect the scanning results\n")
|
||||
logger.L().Warning("failed to get config inputs from github release, this may affect the scanning results", helpers.Error(err))
|
||||
}
|
||||
return downloadReleasedPolicy
|
||||
}
|
||||
|
||||
@@ -6,8 +6,8 @@ import (
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/armosec/kubescape/core/cautils/getter"
|
||||
metav1 "github.com/armosec/kubescape/core/meta/datastructures/v1"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/getter"
|
||||
metav1 "github.com/armosec/kubescape/v2/core/meta/datastructures/v1"
|
||||
)
|
||||
|
||||
var listFunc = map[string]func(*metav1.ListPolicies) ([]string, error){
|
||||
@@ -44,16 +44,16 @@ func (ks *Kubescape) List(listPolicies *metav1.ListPolicies) error {
|
||||
}
|
||||
|
||||
func listFrameworks(listPolicies *metav1.ListPolicies) ([]string, error) {
|
||||
tenant := getTenantConfig(listPolicies.Account, "", getKubernetesApi()) // change k8sinterface
|
||||
g := getPolicyGetter(nil, tenant.GetTennatEmail(), true, nil)
|
||||
tenant := getTenantConfig(&listPolicies.Credentials, "", getKubernetesApi()) // change k8sinterface
|
||||
g := getPolicyGetter(nil, tenant.GetTenantEmail(), true, nil)
|
||||
|
||||
return listFrameworksNames(g), nil
|
||||
}
|
||||
|
||||
func listControls(listPolicies *metav1.ListPolicies) ([]string, error) {
|
||||
tenant := getTenantConfig(listPolicies.Account, "", getKubernetesApi()) // change k8sinterface
|
||||
tenant := getTenantConfig(&listPolicies.Credentials, "", getKubernetesApi()) // change k8sinterface
|
||||
|
||||
g := getPolicyGetter(nil, tenant.GetTennatEmail(), false, nil)
|
||||
g := getPolicyGetter(nil, tenant.GetTenantEmail(), false, nil)
|
||||
l := getter.ListName
|
||||
if listPolicies.ListIDs {
|
||||
l = getter.ListID
|
||||
@@ -63,7 +63,7 @@ func listControls(listPolicies *metav1.ListPolicies) ([]string, error) {
|
||||
|
||||
func listExceptions(listPolicies *metav1.ListPolicies) ([]string, error) {
|
||||
// load tenant metav1
|
||||
getTenantConfig(listPolicies.Account, "", getKubernetesApi())
|
||||
getTenantConfig(&listPolicies.Credentials, "", getKubernetesApi())
|
||||
|
||||
var exceptionsNames []string
|
||||
armoAPI := getExceptionsGetter("")
|
||||
|
||||
@@ -3,22 +3,22 @@ package core
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/armosec/armoapi-go/armotypes"
|
||||
apisv1 "github.com/armosec/opa-utils/httpserver/apis/v1"
|
||||
|
||||
"github.com/armosec/k8s-interface/k8sinterface"
|
||||
|
||||
"github.com/armosec/kubescape/core/cautils"
|
||||
"github.com/armosec/kubescape/core/cautils/getter"
|
||||
"github.com/armosec/kubescape/core/cautils/logger"
|
||||
"github.com/armosec/kubescape/core/cautils/logger/helpers"
|
||||
"github.com/armosec/kubescape/core/pkg/hostsensorutils"
|
||||
"github.com/armosec/kubescape/core/pkg/opaprocessor"
|
||||
"github.com/armosec/kubescape/core/pkg/policyhandler"
|
||||
"github.com/armosec/kubescape/core/pkg/resourcehandler"
|
||||
"github.com/armosec/kubescape/core/pkg/resultshandling"
|
||||
"github.com/armosec/kubescape/core/pkg/resultshandling/printer"
|
||||
"github.com/armosec/kubescape/core/pkg/resultshandling/reporter"
|
||||
"github.com/armosec/kubescape/v2/core/cautils"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/getter"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger/helpers"
|
||||
"github.com/armosec/kubescape/v2/core/pkg/hostsensorutils"
|
||||
"github.com/armosec/kubescape/v2/core/pkg/opaprocessor"
|
||||
"github.com/armosec/kubescape/v2/core/pkg/policyhandler"
|
||||
"github.com/armosec/kubescape/v2/core/pkg/resourcehandler"
|
||||
"github.com/armosec/kubescape/v2/core/pkg/resultshandling"
|
||||
"github.com/armosec/kubescape/v2/core/pkg/resultshandling/printer"
|
||||
"github.com/armosec/kubescape/v2/core/pkg/resultshandling/reporter"
|
||||
|
||||
"github.com/armosec/opa-utils/reporthandling"
|
||||
"github.com/armosec/opa-utils/resources"
|
||||
)
|
||||
|
||||
@@ -34,7 +34,7 @@ func getInterfaces(scanInfo *cautils.ScanInfo) componentInterfaces {
|
||||
|
||||
// ================== setup k8s interface object ======================================
|
||||
var k8s *k8sinterface.KubernetesApi
|
||||
if scanInfo.GetScanningEnvironment() == cautils.ScanCluster {
|
||||
if scanInfo.GetScanningContext() == cautils.ContextCluster {
|
||||
k8s = getKubernetesApi()
|
||||
if k8s == nil {
|
||||
logger.L().Fatal("failed connecting to Kubernetes cluster")
|
||||
@@ -43,7 +43,7 @@ func getInterfaces(scanInfo *cautils.ScanInfo) componentInterfaces {
|
||||
|
||||
// ================== setup tenant object ======================================
|
||||
|
||||
tenantConfig := getTenantConfig(scanInfo.Account, scanInfo.KubeContext, k8s)
|
||||
tenantConfig := getTenantConfig(&scanInfo.Credentials, scanInfo.KubeContext, k8s)
|
||||
|
||||
// Set submit behavior AFTER loading tenant config
|
||||
setSubmitBehavior(scanInfo, tenantConfig)
|
||||
@@ -58,7 +58,7 @@ func getInterfaces(scanInfo *cautils.ScanInfo) componentInterfaces {
|
||||
// ================== version testing ======================================
|
||||
|
||||
v := cautils.NewIVersionCheckHandler()
|
||||
v.CheckLatestVersion(cautils.NewVersionCheckRequest(cautils.BuildNumber, policyIdentifierNames(scanInfo.PolicyIdentifier), "", scanInfo.GetScanningEnvironment()))
|
||||
v.CheckLatestVersion(cautils.NewVersionCheckRequest(cautils.BuildNumber, policyIdentifierNames(scanInfo.PolicyIdentifier), "", cautils.ScanningContextToScanningScope(scanInfo.GetScanningContext())))
|
||||
|
||||
// ================== setup host scanner object ======================================
|
||||
|
||||
@@ -89,7 +89,7 @@ func getInterfaces(scanInfo *cautils.ScanInfo) componentInterfaces {
|
||||
reportHandler := getReporter(tenantConfig, scanInfo.ScanID, scanInfo.Submit, scanInfo.FrameworkScan)
|
||||
|
||||
// setup printer
|
||||
printerHandler := resultshandling.NewPrinter(scanInfo.Format, scanInfo.FormatVersion, scanInfo.VerboseMode)
|
||||
printerHandler := resultshandling.NewPrinter(scanInfo.Format, scanInfo.FormatVersion, scanInfo.VerboseMode, cautils.ViewTypes(scanInfo.View))
|
||||
printerHandler.SetWriter(scanInfo.Output)
|
||||
|
||||
// ================== return interface ======================================
|
||||
@@ -111,21 +111,21 @@ func (ks *Kubescape) Scan(scanInfo *cautils.ScanInfo) (*resultshandling.ResultsH
|
||||
|
||||
interfaces := getInterfaces(scanInfo)
|
||||
|
||||
cautils.ClusterName = interfaces.tenantConfig.GetClusterName() // TODO - Deprecated
|
||||
cautils.ClusterName = interfaces.tenantConfig.GetContextName() // TODO - Deprecated
|
||||
cautils.CustomerGUID = interfaces.tenantConfig.GetAccountID() // TODO - Deprecated
|
||||
interfaces.report.SetClusterName(interfaces.tenantConfig.GetClusterName())
|
||||
interfaces.report.SetClusterName(interfaces.tenantConfig.GetContextName())
|
||||
interfaces.report.SetCustomerGUID(interfaces.tenantConfig.GetAccountID())
|
||||
|
||||
downloadReleasedPolicy := getter.NewDownloadReleasedPolicy() // download config inputs from github release
|
||||
|
||||
// set policy getter only after setting the customerGUID
|
||||
scanInfo.Getters.PolicyGetter = getPolicyGetter(scanInfo.UseFrom, interfaces.tenantConfig.GetTennatEmail(), scanInfo.FrameworkScan, downloadReleasedPolicy)
|
||||
scanInfo.Getters.PolicyGetter = getPolicyGetter(scanInfo.UseFrom, interfaces.tenantConfig.GetTenantEmail(), scanInfo.FrameworkScan, downloadReleasedPolicy)
|
||||
scanInfo.Getters.ControlsInputsGetter = getConfigInputsGetter(scanInfo.ControlsInputs, interfaces.tenantConfig.GetAccountID(), downloadReleasedPolicy)
|
||||
scanInfo.Getters.ExceptionsGetter = getExceptionsGetter(scanInfo.UseExceptions)
|
||||
|
||||
// TODO - list supported frameworks/controls
|
||||
if scanInfo.ScanAll {
|
||||
scanInfo.SetPolicyIdentifiers(listFrameworksNames(scanInfo.Getters.PolicyGetter), reporthandling.KindFramework)
|
||||
scanInfo.SetPolicyIdentifiers(listFrameworksNames(scanInfo.Getters.PolicyGetter), apisv1.KindFramework)
|
||||
}
|
||||
|
||||
// remove host scanner components
|
||||
@@ -139,17 +139,17 @@ func (ks *Kubescape) Scan(scanInfo *cautils.ScanInfo) (*resultshandling.ResultsH
|
||||
|
||||
// ===================== policies & resources =====================
|
||||
policyHandler := policyhandler.NewPolicyHandler(interfaces.resourceHandler)
|
||||
scanData, err := collectResources(policyHandler, scanInfo)
|
||||
scanData, err := policyHandler.CollectResources(scanInfo.PolicyIdentifier, scanInfo)
|
||||
if err != nil {
|
||||
return resultsHandling, err
|
||||
}
|
||||
|
||||
// ========================= opa testing =====================
|
||||
deps := resources.NewRegoDependenciesData(k8sinterface.GetK8sConfig(), interfaces.tenantConfig.GetClusterName())
|
||||
deps := resources.NewRegoDependenciesData(k8sinterface.GetK8sConfig(), interfaces.tenantConfig.GetContextName())
|
||||
reportResults := opaprocessor.NewOPAProcessor(scanData, deps)
|
||||
if err := reportResults.ProcessRulesListenner(); err != nil {
|
||||
// TODO - do something
|
||||
return resultsHandling, err
|
||||
return resultsHandling, fmt.Errorf("%w", err)
|
||||
}
|
||||
|
||||
// ========================= results handling =====================
|
||||
@@ -161,47 +161,3 @@ func (ks *Kubescape) Scan(scanInfo *cautils.ScanInfo) (*resultshandling.ResultsH
|
||||
|
||||
return resultsHandling, nil
|
||||
}
|
||||
|
||||
// TODO - remove function
|
||||
func collectResources(policyHandler *policyhandler.PolicyHandler, scanInfo *cautils.ScanInfo) (*cautils.OPASessionObj, error) {
|
||||
policyNotification := &reporthandling.PolicyNotification{
|
||||
Rules: scanInfo.PolicyIdentifier,
|
||||
KubescapeNotification: reporthandling.KubescapeNotification{
|
||||
Designators: armotypes.PortalDesignator{},
|
||||
NotificationType: reporthandling.TypeExecPostureScan,
|
||||
},
|
||||
}
|
||||
switch policyNotification.KubescapeNotification.NotificationType {
|
||||
case reporthandling.TypeExecPostureScan:
|
||||
collectedResources, err := policyHandler.CollectResources(policyNotification, scanInfo)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return collectedResources, nil
|
||||
|
||||
default:
|
||||
return nil, fmt.Errorf("notification type '%s' Unknown", policyNotification.KubescapeNotification.NotificationType)
|
||||
}
|
||||
}
|
||||
|
||||
// func askUserForHostSensor() bool {
|
||||
// return false
|
||||
|
||||
// if !isatty.IsTerminal(os.Stdin.Fd()) {
|
||||
// return false
|
||||
// }
|
||||
// if ssss, err := os.Stdin.Stat(); err == nil {
|
||||
// // fmt.Printf("Found stdin type: %s\n", ssss.Mode().Type())
|
||||
// if ssss.Mode().Type()&(fs.ModeDevice|fs.ModeCharDevice) > 0 { //has TTY
|
||||
// fmt.Fprintf(os.Stderr, "Would you like to scan K8s nodes? [y/N]. This is required to collect valuable data for certain controls\n")
|
||||
// fmt.Fprintf(os.Stderr, "Use --enable-host-scan flag to suppress this message\n")
|
||||
// var b []byte = make([]byte, 1)
|
||||
// if n, err := os.Stdin.Read(b); err == nil {
|
||||
// if n > 0 && len(b) > 0 && (b[0] == 'y' || b[0] == 'Y') {
|
||||
// return true
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
// return false
|
||||
// }
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
package core
|
||||
|
||||
import (
|
||||
"github.com/armosec/kubescape/core/cautils"
|
||||
"github.com/armosec/kubescape/core/cautils/getter"
|
||||
"github.com/armosec/kubescape/core/cautils/logger"
|
||||
"github.com/armosec/kubescape/core/cautils/logger/helpers"
|
||||
"github.com/armosec/kubescape/core/meta/cliinterfaces"
|
||||
"github.com/armosec/kubescape/v2/core/cautils"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/getter"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger/helpers"
|
||||
"github.com/armosec/kubescape/v2/core/meta/cliinterfaces"
|
||||
)
|
||||
|
||||
func (ks *Kubescape) Submit(submitInterfaces cliinterfaces.SubmitInterfaces) error {
|
||||
@@ -29,11 +29,11 @@ func (ks *Kubescape) Submit(submitInterfaces cliinterfaces.SubmitInterfaces) err
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ks *Kubescape) SubmitExceptions(accountID, excPath string) error {
|
||||
func (ks *Kubescape) SubmitExceptions(credentials *cautils.Credentials, excPath string) error {
|
||||
logger.L().Info("submitting exceptions", helpers.String("path", excPath))
|
||||
|
||||
// load cached config
|
||||
tenantConfig := getTenantConfig(accountID, "", getKubernetesApi())
|
||||
tenantConfig := getTenantConfig(credentials, "", getKubernetesApi())
|
||||
if err := tenantConfig.SetTenant(); err != nil {
|
||||
logger.L().Error("failed setting account ID", helpers.Error(err))
|
||||
}
|
||||
|
||||
124
core/go.mod
124
core/go.mod
@@ -1,124 +0,0 @@
|
||||
module github.com/armosec/kubescape/core
|
||||
|
||||
go 1.17
|
||||
|
||||
require (
|
||||
github.com/armosec/armoapi-go v0.0.58
|
||||
github.com/armosec/k8s-interface v0.0.68
|
||||
github.com/armosec/opa-utils v0.0.130
|
||||
github.com/armosec/rbac-utils v0.0.14
|
||||
github.com/armosec/utils-go v0.0.3
|
||||
github.com/armosec/utils-k8s-go v0.0.3
|
||||
github.com/briandowns/spinner v1.18.1
|
||||
github.com/enescakir/emoji v1.0.0
|
||||
github.com/fatih/color v1.13.0
|
||||
github.com/francoispqt/gojay v1.2.13
|
||||
github.com/google/uuid v1.3.0
|
||||
github.com/johnfercher/maroto v0.34.0
|
||||
github.com/mattn/go-isatty v0.0.14
|
||||
github.com/olekukonko/tablewriter v0.0.5
|
||||
github.com/open-policy-agent/opa v0.38.0
|
||||
github.com/stretchr/testify v1.7.0
|
||||
github.com/whilp/git-urls v1.0.0
|
||||
go.uber.org/zap v1.21.0
|
||||
golang.org/x/mod v0.5.1
|
||||
gopkg.in/yaml.v2 v2.4.0
|
||||
k8s.io/api v0.23.4
|
||||
k8s.io/apimachinery v0.23.4
|
||||
k8s.io/client-go v0.23.4
|
||||
k8s.io/utils v0.0.0-20211116205334-6203023598ed
|
||||
sigs.k8s.io/yaml v1.3.0
|
||||
)
|
||||
|
||||
require (
|
||||
cloud.google.com/go v0.99.0 // indirect
|
||||
cloud.google.com/go/container v1.0.0 // indirect
|
||||
github.com/Azure/go-autorest v14.2.0+incompatible // indirect
|
||||
github.com/Azure/go-autorest/autorest v0.11.18 // indirect
|
||||
github.com/Azure/go-autorest/autorest/adal v0.9.13 // indirect
|
||||
github.com/Azure/go-autorest/autorest/date v0.3.0 // indirect
|
||||
github.com/Azure/go-autorest/logger v0.2.1 // indirect
|
||||
github.com/Azure/go-autorest/tracing v0.6.0 // indirect
|
||||
github.com/OneOfOne/xxhash v1.2.8 // indirect
|
||||
github.com/aws/aws-sdk-go v1.41.11 // indirect
|
||||
github.com/aws/aws-sdk-go-v2 v1.12.0 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/config v1.12.0 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.7.0 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.9.0 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.3 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.1.0 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.3.3 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/eks v1.17.0 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.6.0 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.8.0 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.13.0 // indirect
|
||||
github.com/aws/smithy-go v1.9.1 // indirect
|
||||
github.com/boombuler/barcode v1.0.0 // indirect
|
||||
github.com/census-instrumentation/opencensus-proto v0.3.0 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.1.2 // indirect
|
||||
github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4 // indirect
|
||||
github.com/cncf/xds/go v0.0.0-20211130200136-a8f946100490 // indirect
|
||||
github.com/coreos/go-oidc v2.2.1+incompatible // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/docker/docker v20.10.9+incompatible // indirect
|
||||
github.com/docker/go-connections v0.4.0 // indirect
|
||||
github.com/docker/go-units v0.4.0 // indirect
|
||||
github.com/envoyproxy/go-control-plane v0.10.1 // indirect
|
||||
github.com/envoyproxy/protoc-gen-validate v0.6.2 // indirect
|
||||
github.com/form3tech-oss/jwt-go v3.2.3+incompatible // indirect
|
||||
github.com/ghodss/yaml v1.0.0 // indirect
|
||||
github.com/go-gota/gota v0.12.0 // indirect
|
||||
github.com/go-logr/logr v1.2.2 // indirect
|
||||
github.com/gobwas/glob v0.2.3 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang/glog v1.0.0 // indirect
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
||||
github.com/golang/protobuf v1.5.2 // indirect
|
||||
github.com/google/go-cmp v0.5.7 // indirect
|
||||
github.com/google/gofuzz v1.1.0 // indirect
|
||||
github.com/googleapis/gax-go/v2 v2.1.1 // indirect
|
||||
github.com/googleapis/gnostic v0.5.5 // indirect
|
||||
github.com/imdario/mergo v0.3.12 // indirect
|
||||
github.com/jmespath/go-jmespath v0.4.0 // indirect
|
||||
github.com/json-iterator/go v1.1.12 // indirect
|
||||
github.com/jung-kurt/gofpdf v1.4.2 // indirect
|
||||
github.com/mattn/go-colorable v0.1.12 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.9 // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||
github.com/opencontainers/go-digest v1.0.0 // indirect
|
||||
github.com/opencontainers/image-spec v1.0.2 // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/pquerna/cachecontrol v0.1.0 // indirect
|
||||
github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0 // indirect
|
||||
github.com/ruudk/golang-pdf417 v0.0.0-20181029194003-1af4ab5afa58 // indirect
|
||||
github.com/spf13/pflag v1.0.5 // indirect
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect
|
||||
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
|
||||
github.com/yashtewari/glob-intersection v0.0.0-20180916065949-5c77d914dd0b // indirect
|
||||
go.opencensus.io v0.23.0 // indirect
|
||||
go.uber.org/atomic v1.7.0 // indirect
|
||||
go.uber.org/multierr v1.6.0 // indirect
|
||||
golang.org/x/crypto v0.0.0-20210817164053-32db794688a5 // indirect
|
||||
golang.org/x/net v0.0.0-20211209124913-491a49abca63 // indirect
|
||||
golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8 // indirect
|
||||
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 // indirect
|
||||
golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b // indirect
|
||||
golang.org/x/text v0.3.7 // indirect
|
||||
golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac // indirect
|
||||
gonum.org/v1/gonum v0.9.1 // indirect
|
||||
google.golang.org/api v0.62.0 // indirect
|
||||
google.golang.org/appengine v1.6.7 // indirect
|
||||
google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa // indirect
|
||||
google.golang.org/grpc v1.44.0 // indirect
|
||||
google.golang.org/protobuf v1.27.1 // indirect
|
||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||
gopkg.in/square/go-jose.v2 v2.6.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect
|
||||
k8s.io/klog/v2 v2.30.0 // indirect
|
||||
k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65 // indirect
|
||||
sigs.k8s.io/controller-runtime v0.11.1 // indirect
|
||||
sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6 // indirect
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.2.1 // indirect
|
||||
)
|
||||
1481
core/go.sum
1481
core/go.sum
File diff suppressed because it is too large
Load Diff
@@ -2,8 +2,8 @@ package cliinterfaces
|
||||
|
||||
import (
|
||||
"github.com/armosec/k8s-interface/workloadinterface"
|
||||
"github.com/armosec/kubescape/core/cautils"
|
||||
"github.com/armosec/kubescape/core/pkg/resultshandling/reporter"
|
||||
"github.com/armosec/kubescape/v2/core/cautils"
|
||||
"github.com/armosec/kubescape/v2/core/pkg/resultshandling/reporter"
|
||||
"github.com/armosec/opa-utils/reporthandling"
|
||||
)
|
||||
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
package v1
|
||||
|
||||
import "github.com/armosec/kubescape/v2/core/cautils"
|
||||
|
||||
type DeleteExceptions struct {
|
||||
Account string
|
||||
Exceptions []string
|
||||
Credentials cautils.Credentials
|
||||
Exceptions []string
|
||||
}
|
||||
|
||||
@@ -1,9 +1,11 @@
|
||||
package v1
|
||||
|
||||
import "github.com/armosec/kubescape/v2/core/cautils"
|
||||
|
||||
type DownloadInfo struct {
|
||||
Path string // directory to save artifact. Default is "~/.kubescape/"
|
||||
FileName string // can be empty
|
||||
Target string // type of artifact to download
|
||||
Name string // name of artifact to download
|
||||
Account string // AccountID
|
||||
Path string // directory to save artifact. Default is "~/.kubescape/"
|
||||
FileName string // can be empty
|
||||
Target string // type of artifact to download
|
||||
Name string // name of artifact to download
|
||||
Credentials cautils.Credentials
|
||||
}
|
||||
|
||||
@@ -1,10 +1,12 @@
|
||||
package v1
|
||||
|
||||
import "github.com/armosec/kubescape/v2/core/cautils"
|
||||
|
||||
type ListPolicies struct {
|
||||
Target string
|
||||
ListIDs bool
|
||||
Account string
|
||||
Format string
|
||||
Target string
|
||||
ListIDs bool
|
||||
Format string
|
||||
Credentials cautils.Credentials
|
||||
}
|
||||
|
||||
type ListResponse struct {
|
||||
|
||||
@@ -1,9 +1,11 @@
|
||||
package v1
|
||||
|
||||
import "github.com/armosec/kubescape/v2/core/cautils"
|
||||
|
||||
type Submit struct {
|
||||
Account string
|
||||
Credentials cautils.Credentials
|
||||
}
|
||||
|
||||
type Delete struct {
|
||||
Account string
|
||||
Credentials cautils.Credentials
|
||||
}
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
package meta
|
||||
|
||||
import (
|
||||
"github.com/armosec/kubescape/core/cautils"
|
||||
"github.com/armosec/kubescape/core/meta/cliinterfaces"
|
||||
metav1 "github.com/armosec/kubescape/core/meta/datastructures/v1"
|
||||
"github.com/armosec/kubescape/core/pkg/resultshandling"
|
||||
"github.com/armosec/kubescape/v2/core/cautils"
|
||||
"github.com/armosec/kubescape/v2/core/meta/cliinterfaces"
|
||||
metav1 "github.com/armosec/kubescape/v2/core/meta/datastructures/v1"
|
||||
"github.com/armosec/kubescape/v2/core/pkg/resultshandling"
|
||||
)
|
||||
|
||||
type IKubescape interface {
|
||||
@@ -15,8 +15,8 @@ type IKubescape interface {
|
||||
Download(downloadInfo *metav1.DownloadInfo) error // TODO - return downloaded policies
|
||||
|
||||
// submit
|
||||
Submit(submitInterfaces cliinterfaces.SubmitInterfaces) error // TODO - func should receive object
|
||||
SubmitExceptions(accountID, excPath string) error // TODO - remove
|
||||
Submit(submitInterfaces cliinterfaces.SubmitInterfaces) error // TODO - func should receive object
|
||||
SubmitExceptions(credentials *cautils.Credentials, excPath string) error // TODO - remove
|
||||
|
||||
// config
|
||||
SetCachedConfig(setConfig *metav1.SetConfig) error
|
||||
|
||||
@@ -10,9 +10,9 @@ import (
|
||||
|
||||
"github.com/armosec/k8s-interface/k8sinterface"
|
||||
"github.com/armosec/k8s-interface/workloadinterface"
|
||||
"github.com/armosec/kubescape/core/cautils"
|
||||
"github.com/armosec/kubescape/core/cautils/logger"
|
||||
"github.com/armosec/kubescape/core/cautils/logger/helpers"
|
||||
"github.com/armosec/kubescape/v2/core/cautils"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger/helpers"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@@ -89,7 +89,7 @@ func (hsh *HostSensorHandler) Init() error {
|
||||
|
||||
func (hsh *HostSensorHandler) applyYAML() error {
|
||||
workloads, err := cautils.ReadFile([]byte(hostSensorYAML), cautils.YAML_FILE_FORMAT)
|
||||
if len(err) != 0 {
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to read YAML files, reason: %v", err)
|
||||
}
|
||||
|
||||
|
||||
@@ -6,8 +6,8 @@ import (
|
||||
"sync"
|
||||
|
||||
"github.com/armosec/k8s-interface/k8sinterface"
|
||||
"github.com/armosec/kubescape/core/cautils/logger"
|
||||
"github.com/armosec/kubescape/core/cautils/logger/helpers"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger/helpers"
|
||||
"github.com/armosec/opa-utils/objectsenvelopes/hostsensor"
|
||||
"github.com/armosec/opa-utils/reporthandling/apis"
|
||||
"sigs.k8s.io/yaml"
|
||||
@@ -109,6 +109,18 @@ func (hsh *HostSensorHandler) GetLinuxSecurityHardeningStatus() ([]hostsensor.Ho
|
||||
return hsh.sendAllPodsHTTPGETRequest("/linuxSecurityHardening", "LinuxSecurityHardeningStatus")
|
||||
}
|
||||
|
||||
// return list of KubeletInfo
|
||||
func (hsh *HostSensorHandler) GetKubeletInfo() ([]hostsensor.HostSensorDataEnvelope, error) {
|
||||
// loop over pods and port-forward it to each of them
|
||||
return hsh.sendAllPodsHTTPGETRequest("/kubeletInfo", "KubeletInfo")
|
||||
}
|
||||
|
||||
// return list of KubeProxyInfo
|
||||
func (hsh *HostSensorHandler) GetKubeProxyInfo() ([]hostsensor.HostSensorDataEnvelope, error) {
|
||||
// loop over pods and port-forward it to each of them
|
||||
return hsh.sendAllPodsHTTPGETRequest("/kubeProxyInfo", "KubeProxyInfo")
|
||||
}
|
||||
|
||||
// return list of KubeletCommandLine
|
||||
func (hsh *HostSensorHandler) GetKubeletCommandLine() ([]hostsensor.HostSensorDataEnvelope, error) {
|
||||
// loop over pods and port-forward it to each of them
|
||||
@@ -228,6 +240,27 @@ func (hsh *HostSensorHandler) CollectResources() ([]hostsensor.HostSensorDataEnv
|
||||
if len(kcData) > 0 {
|
||||
res = append(res, kcData...)
|
||||
}
|
||||
|
||||
// GetKubeletInfo
|
||||
kcData, err = hsh.GetKubeletInfo()
|
||||
if err != nil {
|
||||
addInfoToMap(KubeletInfo, infoMap, err)
|
||||
logger.L().Warning(err.Error())
|
||||
}
|
||||
if len(kcData) > 0 {
|
||||
res = append(res, kcData...)
|
||||
}
|
||||
|
||||
// GetKubeProxyInfo
|
||||
kcData, err = hsh.GetKubeProxyInfo()
|
||||
if err != nil {
|
||||
addInfoToMap(KubeProxyInfo, infoMap, err)
|
||||
logger.L().Warning(err.Error())
|
||||
}
|
||||
if len(kcData) > 0 {
|
||||
res = append(res, kcData...)
|
||||
}
|
||||
|
||||
logger.L().Debug("Done reading information from host scanner")
|
||||
return res, infoMap, nil
|
||||
}
|
||||
|
||||
@@ -13,8 +13,10 @@ var (
|
||||
OpenPortsList = "OpenPortsList"
|
||||
LinuxKernelVariables = "LinuxKernelVariables"
|
||||
KubeletCommandLine = "KubeletCommandLine"
|
||||
KubeletInfo = "KubeletInfo"
|
||||
KubeProxyInfo = "KubeProxyInfo"
|
||||
|
||||
MapResourceToApiGroup = map[string]string{
|
||||
MapHostSensorResourceToApiGroup = map[string]string{
|
||||
KubeletConfiguration: "hostdata.kubescape.cloud/v1beta0",
|
||||
OsReleaseFile: "hostdata.kubescape.cloud/v1beta0",
|
||||
KubeletCommandLine: "hostdata.kubescape.cloud/v1beta0",
|
||||
@@ -22,11 +24,13 @@ var (
|
||||
LinuxSecurityHardeningStatus: "hostdata.kubescape.cloud/v1beta0",
|
||||
OpenPortsList: "hostdata.kubescape.cloud/v1beta0",
|
||||
LinuxKernelVariables: "hostdata.kubescape.cloud/v1beta0",
|
||||
KubeletInfo: "hostdata.kubescape.cloud/v1beta0",
|
||||
KubeProxyInfo: "hostdata.kubescape.cloud/v1beta0",
|
||||
}
|
||||
)
|
||||
|
||||
func addInfoToMap(resource string, infoMap map[string]apis.StatusInfo, err error) {
|
||||
group, version := k8sinterface.SplitApiVersion(MapResourceToApiGroup[resource])
|
||||
group, version := k8sinterface.SplitApiVersion(MapHostSensorResourceToApiGroup[resource])
|
||||
r := k8sinterface.JoinResourceTriplets(group, version, resource)
|
||||
infoMap[r] = apis.StatusInfo{
|
||||
InnerStatus: apis.StatusSkipped,
|
||||
|
||||
@@ -6,10 +6,10 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/armosec/armoapi-go/armotypes"
|
||||
"github.com/armosec/kubescape/core/cautils"
|
||||
"github.com/armosec/kubescape/core/cautils/logger"
|
||||
"github.com/armosec/kubescape/core/cautils/logger/helpers"
|
||||
"github.com/armosec/kubescape/core/pkg/score"
|
||||
"github.com/armosec/kubescape/v2/core/cautils"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger/helpers"
|
||||
"github.com/armosec/kubescape/v2/core/pkg/score"
|
||||
"github.com/armosec/opa-utils/objectsenvelopes"
|
||||
"github.com/armosec/opa-utils/reporthandling"
|
||||
"github.com/armosec/opa-utils/reporthandling/apis"
|
||||
@@ -180,6 +180,9 @@ func (opap *OPAProcessor) processRule(rule *reporthandling.PolicyRule) (map[stri
|
||||
for j := range ruleResponses[i].FixPaths {
|
||||
ruleResult.Paths = append(ruleResult.Paths, armotypes.PosturePaths{FixPath: ruleResponses[i].FixPaths[j]})
|
||||
}
|
||||
if ruleResponses[i].FixCommand != "" {
|
||||
ruleResult.Paths = append(ruleResult.Paths, armotypes.PosturePaths{FixCommand: ruleResponses[i].FixCommand})
|
||||
}
|
||||
resources[failedResources[j].GetID()] = ruleResult
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4,8 +4,8 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/armosec/armoapi-go/armotypes"
|
||||
"github.com/armosec/kubescape/core/cautils"
|
||||
"github.com/armosec/kubescape/core/mocks"
|
||||
"github.com/armosec/kubescape/v2/core/cautils"
|
||||
"github.com/armosec/kubescape/v2/core/mocks"
|
||||
"github.com/armosec/opa-utils/objectsenvelopes"
|
||||
"github.com/armosec/opa-utils/reporthandling"
|
||||
"github.com/armosec/opa-utils/resources"
|
||||
|
||||
@@ -1,14 +1,14 @@
|
||||
package opaprocessor
|
||||
|
||||
import (
|
||||
"github.com/armosec/kubescape/core/cautils"
|
||||
"github.com/armosec/kubescape/core/cautils/logger"
|
||||
"github.com/armosec/kubescape/core/cautils/logger/helpers"
|
||||
"github.com/armosec/kubescape/v2/core/cautils"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger"
|
||||
|
||||
"github.com/armosec/k8s-interface/k8sinterface"
|
||||
"github.com/armosec/k8s-interface/workloadinterface"
|
||||
"github.com/armosec/opa-utils/reporthandling"
|
||||
"github.com/armosec/opa-utils/reporthandling/apis"
|
||||
"github.com/armosec/opa-utils/reporthandling/results/v1/reportsummary"
|
||||
resources "github.com/armosec/opa-utils/resources"
|
||||
)
|
||||
|
||||
@@ -47,7 +47,7 @@ func (opap *OPAProcessor) updateResults() {
|
||||
|
||||
// set result summary
|
||||
// map control to error
|
||||
controlToInfoMap := mapControlToInfo(opap.ResourceToControlsMap, opap.InfoMap)
|
||||
controlToInfoMap := mapControlToInfo(opap.ResourceToControlsMap, opap.InfoMap, opap.Report.SummaryDetails.Controls)
|
||||
opap.Report.SummaryDetails.InitResourcesSummary(controlToInfoMap)
|
||||
// for f := range opap.PostureReport.FrameworkReports {
|
||||
// // set exceptions
|
||||
@@ -61,17 +61,29 @@ func (opap *OPAProcessor) updateResults() {
|
||||
// }
|
||||
}
|
||||
|
||||
func mapControlToInfo(mapResourceToControls map[string][]string, infoMap map[string]apis.StatusInfo) map[string]apis.StatusInfo {
|
||||
func mapControlToInfo(mapResourceToControls map[string][]string, infoMap map[string]apis.StatusInfo, controlSummary reportsummary.ControlSummaries) map[string]apis.StatusInfo {
|
||||
controlToInfoMap := make(map[string]apis.StatusInfo)
|
||||
for resource, statusInfo := range infoMap {
|
||||
controls := mapResourceToControls[resource]
|
||||
for _, control := range controls {
|
||||
controlToInfoMap[control] = statusInfo
|
||||
controlIDs := mapResourceToControls[resource]
|
||||
for _, controlID := range controlIDs {
|
||||
ctrl := controlSummary.GetControl(reportsummary.EControlCriteriaID, controlID)
|
||||
if ctrl != nil {
|
||||
resources := ctrl.NumberOfResources()
|
||||
// Check that there are no K8s resources too
|
||||
if isEmptyResources(resources) {
|
||||
controlToInfoMap[controlID] = statusInfo
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
return controlToInfoMap
|
||||
}
|
||||
|
||||
func isEmptyResources(counters reportsummary.ICounters) bool {
|
||||
return counters.Failed() == 0 && counters.Excluded() == 0 && counters.Passed() == 0
|
||||
}
|
||||
|
||||
func getAllSupportedObjects(k8sResources *cautils.K8SResources, armoResources *cautils.ArmoResources, allResources map[string]workloadinterface.IMetadata, rule *reporthandling.PolicyRule) []workloadinterface.IMetadata {
|
||||
k8sObjects := []workloadinterface.IMetadata{}
|
||||
k8sObjects = append(k8sObjects, getKubernetesObjects(k8sResources, allResources, rule.Match)...)
|
||||
@@ -116,7 +128,7 @@ func getKubernetesObjects(k8sResources *cautils.K8SResources, allResources map[s
|
||||
for _, groupResource := range groupResources {
|
||||
if k8sObj, ok := (*k8sResources)[groupResource]; ok {
|
||||
if k8sObj == nil {
|
||||
logger.L().Debug("skipping", helpers.String("resource", groupResource))
|
||||
// logger.L().Debug("skipping", helpers.String("resource", groupResource))
|
||||
}
|
||||
for i := range k8sObj {
|
||||
k8sObjects = append(k8sObjects, allResources[k8sObj[i]])
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
package opaprocessor
|
||||
|
||||
import (
|
||||
"github.com/armosec/kubescape/core/cautils"
|
||||
"github.com/armosec/kubescape/v2/core/cautils"
|
||||
"github.com/armosec/opa-utils/reporthandling"
|
||||
"github.com/armosec/opa-utils/reporthandling/results/v1/reportsummary"
|
||||
)
|
||||
|
||||
@@ -5,7 +5,7 @@ import (
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/armosec/kubescape/core/mocks"
|
||||
"github.com/armosec/kubescape/v2/core/mocks"
|
||||
"github.com/armosec/opa-utils/reporthandling"
|
||||
"github.com/armosec/opa-utils/reporthandling/results/v1/reportsummary"
|
||||
)
|
||||
|
||||
@@ -3,9 +3,9 @@ package policyhandler
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/armosec/kubescape/core/cautils"
|
||||
"github.com/armosec/kubescape/core/pkg/resourcehandler"
|
||||
"github.com/armosec/opa-utils/reporthandling"
|
||||
"github.com/armosec/armoapi-go/armotypes"
|
||||
"github.com/armosec/kubescape/v2/core/cautils"
|
||||
"github.com/armosec/kubescape/v2/core/pkg/resourcehandler"
|
||||
)
|
||||
|
||||
// PolicyHandler -
|
||||
@@ -22,7 +22,7 @@ func NewPolicyHandler(resourceHandler resourcehandler.IResourceHandler) *PolicyH
|
||||
}
|
||||
}
|
||||
|
||||
func (policyHandler *PolicyHandler) CollectResources(notification *reporthandling.PolicyNotification, scanInfo *cautils.ScanInfo) (*cautils.OPASessionObj, error) {
|
||||
func (policyHandler *PolicyHandler) CollectResources(policyIdentifier []cautils.PolicyIdentifier, scanInfo *cautils.ScanInfo) (*cautils.OPASessionObj, error) {
|
||||
opaSessionObj := cautils.NewOPASessionObj(nil, nil, scanInfo)
|
||||
|
||||
// validate notification
|
||||
@@ -30,15 +30,15 @@ func (policyHandler *PolicyHandler) CollectResources(notification *reporthandlin
|
||||
policyHandler.getters = &scanInfo.Getters
|
||||
|
||||
// get policies
|
||||
if err := policyHandler.getPolicies(notification, opaSessionObj); err != nil {
|
||||
if err := policyHandler.getPolicies(policyIdentifier, opaSessionObj); err != nil {
|
||||
return opaSessionObj, err
|
||||
}
|
||||
|
||||
err := policyHandler.getResources(notification, opaSessionObj, scanInfo)
|
||||
err := policyHandler.getResources(policyIdentifier, opaSessionObj, scanInfo)
|
||||
if err != nil {
|
||||
return opaSessionObj, err
|
||||
}
|
||||
if opaSessionObj.K8SResources == nil || len(*opaSessionObj.K8SResources) == 0 {
|
||||
if (opaSessionObj.K8SResources == nil || len(*opaSessionObj.K8SResources) == 0) && (opaSessionObj.ArmoResource == nil || len(*opaSessionObj.ArmoResource) == 0) {
|
||||
return opaSessionObj, fmt.Errorf("empty list of resources")
|
||||
}
|
||||
|
||||
@@ -46,10 +46,10 @@ func (policyHandler *PolicyHandler) CollectResources(notification *reporthandlin
|
||||
return opaSessionObj, nil
|
||||
}
|
||||
|
||||
func (policyHandler *PolicyHandler) getResources(notification *reporthandling.PolicyNotification, opaSessionObj *cautils.OPASessionObj, scanInfo *cautils.ScanInfo) error {
|
||||
func (policyHandler *PolicyHandler) getResources(policyIdentifier []cautils.PolicyIdentifier, opaSessionObj *cautils.OPASessionObj, scanInfo *cautils.ScanInfo) error {
|
||||
opaSessionObj.Report.ClusterAPIServerInfo = policyHandler.resourceHandler.GetClusterAPIServerInfo()
|
||||
|
||||
resourcesMap, allResources, armoResources, err := policyHandler.resourceHandler.GetResources(opaSessionObj, ¬ification.Designators)
|
||||
resourcesMap, allResources, armoResources, err := policyHandler.resourceHandler.GetResources(opaSessionObj, &policyIdentifier[0].Designators)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -60,3 +60,10 @@ func (policyHandler *PolicyHandler) getResources(notification *reporthandling.Po
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func getDesignator(policyIdentifier []cautils.PolicyIdentifier) *armotypes.PortalDesignator {
|
||||
if len(policyIdentifier) > 0 {
|
||||
return &policyIdentifier[0].Designators
|
||||
}
|
||||
return &armotypes.PortalDesignator{}
|
||||
}
|
||||
|
||||
@@ -4,23 +4,27 @@ import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/armosec/kubescape/core/cautils"
|
||||
"github.com/armosec/kubescape/core/cautils/logger"
|
||||
apisv1 "github.com/armosec/opa-utils/httpserver/apis/v1"
|
||||
|
||||
"github.com/armosec/kubescape/v2/core/cautils"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/getter"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger/helpers"
|
||||
"github.com/armosec/opa-utils/reporthandling"
|
||||
)
|
||||
|
||||
func (policyHandler *PolicyHandler) getPolicies(notification *reporthandling.PolicyNotification, policiesAndResources *cautils.OPASessionObj) error {
|
||||
func (policyHandler *PolicyHandler) getPolicies(policyIdentifier []cautils.PolicyIdentifier, policiesAndResources *cautils.OPASessionObj) error {
|
||||
logger.L().Info("Downloading/Loading policy definitions")
|
||||
|
||||
cautils.StartSpinner()
|
||||
defer cautils.StopSpinner()
|
||||
|
||||
policies, err := policyHandler.getScanPolicies(notification)
|
||||
policies, err := policyHandler.getScanPolicies(policyIdentifier)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(policies) == 0 {
|
||||
return fmt.Errorf("failed to download policies: '%s'. Make sure the policy exist and you spelled it correctly. For more information, please feel free to contact ARMO team", strings.Join(policyIdentifierToSlice(notification.Rules), ", "))
|
||||
return fmt.Errorf("failed to download policies: '%s'. Make sure the policy exist and you spelled it correctly. For more information, please feel free to contact ARMO team", strings.Join(policyIdentifierToSlice(policyIdentifier), ", "))
|
||||
}
|
||||
|
||||
policiesAndResources.Policies = policies
|
||||
@@ -42,31 +46,41 @@ func (policyHandler *PolicyHandler) getPolicies(notification *reporthandling.Pol
|
||||
return nil
|
||||
}
|
||||
|
||||
func (policyHandler *PolicyHandler) getScanPolicies(notification *reporthandling.PolicyNotification) ([]reporthandling.Framework, error) {
|
||||
func (policyHandler *PolicyHandler) getScanPolicies(policyIdentifier []cautils.PolicyIdentifier) ([]reporthandling.Framework, error) {
|
||||
frameworks := []reporthandling.Framework{}
|
||||
|
||||
switch getScanKind(notification) {
|
||||
case reporthandling.KindFramework: // Download frameworks
|
||||
for _, rule := range notification.Rules {
|
||||
switch getScanKind(policyIdentifier) {
|
||||
case apisv1.KindFramework: // Download frameworks
|
||||
for _, rule := range policyIdentifier {
|
||||
receivedFramework, err := policyHandler.getters.PolicyGetter.GetFramework(rule.Name)
|
||||
if err != nil {
|
||||
return frameworks, policyDownloadError(err)
|
||||
}
|
||||
if receivedFramework != nil {
|
||||
frameworks = append(frameworks, *receivedFramework)
|
||||
|
||||
cache := getter.GetDefaultPath(rule.Name + ".json")
|
||||
if err := getter.SaveInFile(receivedFramework, cache); err != nil {
|
||||
logger.L().Warning("failed to cache file", helpers.String("file", cache), helpers.Error(err))
|
||||
}
|
||||
}
|
||||
}
|
||||
case reporthandling.KindControl: // Download controls
|
||||
case apisv1.KindControl: // Download controls
|
||||
f := reporthandling.Framework{}
|
||||
var receivedControl *reporthandling.Control
|
||||
var err error
|
||||
for _, rule := range notification.Rules {
|
||||
for _, rule := range policyIdentifier {
|
||||
receivedControl, err = policyHandler.getters.PolicyGetter.GetControl(rule.Name)
|
||||
if err != nil {
|
||||
return frameworks, policyDownloadError(err)
|
||||
}
|
||||
if receivedControl != nil {
|
||||
f.Controls = append(f.Controls, *receivedControl)
|
||||
|
||||
cache := getter.GetDefaultPath(rule.Name + ".json")
|
||||
if err := getter.SaveInFile(receivedControl, cache); err != nil {
|
||||
logger.L().Warning("failed to cache file", helpers.String("file", cache), helpers.Error(err))
|
||||
}
|
||||
}
|
||||
}
|
||||
frameworks = append(frameworks, f)
|
||||
@@ -77,7 +91,7 @@ func (policyHandler *PolicyHandler) getScanPolicies(notification *reporthandling
|
||||
return frameworks, nil
|
||||
}
|
||||
|
||||
func policyIdentifierToSlice(rules []reporthandling.PolicyIdentifier) []string {
|
||||
func policyIdentifierToSlice(rules []cautils.PolicyIdentifier) []string {
|
||||
s := []string{}
|
||||
for i := range rules {
|
||||
s = append(s, fmt.Sprintf("%s: %s", rules[i].Kind, rules[i].Name))
|
||||
|
||||
@@ -4,12 +4,14 @@ import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/armosec/opa-utils/reporthandling"
|
||||
apisv1 "github.com/armosec/opa-utils/httpserver/apis/v1"
|
||||
|
||||
"github.com/armosec/kubescape/v2/core/cautils"
|
||||
)
|
||||
|
||||
func getScanKind(notification *reporthandling.PolicyNotification) reporthandling.NotificationPolicyKind {
|
||||
if len(notification.Rules) > 0 {
|
||||
return notification.Rules[0].Kind
|
||||
func getScanKind(policyIdentifier []cautils.PolicyIdentifier) apisv1.NotificationPolicyKind {
|
||||
if len(policyIdentifier) > 0 {
|
||||
return policyIdentifier[0].Kind
|
||||
}
|
||||
return "unknown"
|
||||
}
|
||||
|
||||
@@ -4,11 +4,11 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
"github.com/armosec/kubescape/core/cautils/getter"
|
||||
"github.com/armosec/kubescape/core/cautils/logger"
|
||||
"github.com/armosec/kubescape/core/cautils/logger/helpers"
|
||||
"github.com/armosec/kubescape/core/pkg/containerscan"
|
||||
"github.com/armosec/kubescape/core/pkg/registryadaptors/registryvulnerabilities"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/getter"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger/helpers"
|
||||
"github.com/armosec/kubescape/v2/core/pkg/containerscan"
|
||||
"github.com/armosec/kubescape/v2/core/pkg/registryadaptors/registryvulnerabilities"
|
||||
)
|
||||
|
||||
func NewArmoAdaptor(armoAPI *getter.ArmoAPI) *ArmoCivAdaptor {
|
||||
@@ -51,7 +51,7 @@ func (armoCivAdaptor *ArmoCivAdaptor) GetImageVulnerability(imageID *registryvul
|
||||
pageNumber := 1
|
||||
request := V2ListRequest{PageSize: &pageSize, PageNum: &pageNumber, InnerFilters: filter, OrderBy: "timestamp:desc"}
|
||||
requestBody, _ := json.Marshal(request)
|
||||
requestUrl := fmt.Sprintf("https://%s/api/v1/vulnerability/scanResultsDetails?customerGUID=%s", armoCivAdaptor.armoAPI.GetAPIURL(), armoCivAdaptor.armoAPI.GetAccountID())
|
||||
requestUrl := fmt.Sprintf("https://%s/api/v1/vulnerability/scanResultsDetails?customerGUID=%s", armoCivAdaptor.armoAPI.GetApiURL(), armoCivAdaptor.armoAPI.GetAccountID())
|
||||
|
||||
resp, err := armoCivAdaptor.armoAPI.Post(requestUrl, map[string]string{"Content-Type": "application/json"}, requestBody)
|
||||
if err != nil {
|
||||
@@ -83,8 +83,7 @@ func (armoCivAdaptor *ArmoCivAdaptor) GetImageVulnerability(imageID *registryvul
|
||||
}
|
||||
|
||||
func (armoCivAdaptor *ArmoCivAdaptor) DescribeAdaptor() string {
|
||||
// TODO
|
||||
return ""
|
||||
return "armo image vulnerabilities scanner, docs: https://hub.armo.cloud/docs/cluster-vulnerability-scanning"
|
||||
}
|
||||
|
||||
func (armoCivAdaptor *ArmoCivAdaptor) GetImagesInformation(imageIDs []registryvulnerabilities.ContainerImageIdentifier) ([]registryvulnerabilities.ContainerImageInformation, error) {
|
||||
|
||||
@@ -3,7 +3,7 @@ package v1
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/armosec/kubescape/core/pkg/registryadaptors/registryvulnerabilities"
|
||||
"github.com/armosec/kubescape/v2/core/pkg/registryadaptors/registryvulnerabilities"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
|
||||
@@ -3,8 +3,8 @@ package v1
|
||||
import (
|
||||
"encoding/json"
|
||||
|
||||
"github.com/armosec/kubescape/core/pkg/containerscan"
|
||||
"github.com/armosec/kubescape/core/pkg/registryadaptors/registryvulnerabilities"
|
||||
"github.com/armosec/kubescape/v2/core/pkg/containerscan"
|
||||
"github.com/armosec/kubescape/v2/core/pkg/registryadaptors/registryvulnerabilities"
|
||||
)
|
||||
|
||||
type ArmoCivAdaptorMock struct {
|
||||
|
||||
@@ -4,8 +4,8 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
"github.com/armosec/kubescape/core/pkg/containerscan"
|
||||
"github.com/armosec/kubescape/core/pkg/registryadaptors/registryvulnerabilities"
|
||||
"github.com/armosec/kubescape/v2/core/pkg/containerscan"
|
||||
"github.com/armosec/kubescape/v2/core/pkg/registryadaptors/registryvulnerabilities"
|
||||
)
|
||||
|
||||
func (armoCivAdaptor *ArmoCivAdaptor) getImageLastScanId(imageID *registryvulnerabilities.ContainerImageIdentifier) (string, error) {
|
||||
@@ -14,7 +14,7 @@ func (armoCivAdaptor *ArmoCivAdaptor) getImageLastScanId(imageID *registryvulner
|
||||
pageNumber := 1
|
||||
request := V2ListRequest{PageSize: &pageSize, PageNum: &pageNumber, InnerFilters: filter, OrderBy: "timestamp:desc"}
|
||||
requestBody, _ := json.Marshal(request)
|
||||
requestUrl := fmt.Sprintf("https://%s/api/v1/vulnerability/scanResultsSumSummary?customerGUID=%s", armoCivAdaptor.armoAPI.GetAPIURL(), armoCivAdaptor.armoAPI.GetAccountID())
|
||||
requestUrl := fmt.Sprintf("https://%s/api/v1/vulnerability/scanResultsSumSummary?customerGUID=%s", armoCivAdaptor.armoAPI.GetApiURL(), armoCivAdaptor.armoAPI.GetAccountID())
|
||||
|
||||
resp, err := armoCivAdaptor.armoAPI.Post(requestUrl, map[string]string{"Content-Type": "application/json"}, requestBody)
|
||||
if err != nil {
|
||||
|
||||
@@ -3,7 +3,7 @@ package v1
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/armosec/kubescape/core/cautils/getter"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/getter"
|
||||
)
|
||||
|
||||
type V2ListRequest struct {
|
||||
|
||||
@@ -1,110 +0,0 @@
|
||||
package resourcehandler
|
||||
|
||||
import (
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/armosec/k8s-interface/cloudsupport"
|
||||
"github.com/armosec/k8s-interface/k8sinterface"
|
||||
)
|
||||
|
||||
var (
|
||||
KS_KUBE_CLUSTER_ENV_VAR = "KS_KUBE_CLUSTER"
|
||||
KS_CLOUD_PROVIDER_ENV_VAR = "KS_CLOUD_PROVIDER"
|
||||
KS_CLOUD_REGION_ENV_VAR = "KS_CLOUD_REGION"
|
||||
KS_GKE_PROJECT_ENV_VAR = "KS_GKE_PROJECT"
|
||||
)
|
||||
|
||||
type ICloudProvider interface {
|
||||
getKubeCluster() string
|
||||
getRegion(cluster string, provider string) (string, error)
|
||||
getProject(cluster string, provider string) (string, error)
|
||||
getKubeClusterName() string
|
||||
}
|
||||
|
||||
func initCloudProvider() ICloudProvider {
|
||||
|
||||
switch getCloudProvider() {
|
||||
case "gke", "gcp":
|
||||
if isEnvVars() {
|
||||
return NewGKEProviderEnvVar()
|
||||
}
|
||||
return NewGKEProviderContext()
|
||||
case "eks", "aws":
|
||||
if isEnvVars() {
|
||||
return NewEKSProviderEnvVar()
|
||||
}
|
||||
return NewEKSProviderContext()
|
||||
}
|
||||
return NewEmptyCloudProvider()
|
||||
}
|
||||
|
||||
func getCloudProvider() string {
|
||||
var provider string
|
||||
if isEnvVars() {
|
||||
provider = getCloudProviderFromEnvVar()
|
||||
} else {
|
||||
provider = getCloudProviderFromContext()
|
||||
}
|
||||
return strings.ToLower(provider)
|
||||
}
|
||||
|
||||
func getCloudProviderFromContext() string {
|
||||
return cloudsupport.GetCloudProvider(getClusterFromContext())
|
||||
}
|
||||
|
||||
func getClusterFromContext() string {
|
||||
context := k8sinterface.GetCurrentContext()
|
||||
if context == nil {
|
||||
return ""
|
||||
}
|
||||
cluster := context.Cluster
|
||||
if cluster != "" {
|
||||
return cluster
|
||||
}
|
||||
return k8sinterface.GetClusterName()
|
||||
}
|
||||
|
||||
func getCloudProviderFromEnvVar() string {
|
||||
val, present := os.LookupEnv(KS_CLOUD_PROVIDER_ENV_VAR)
|
||||
if present {
|
||||
return val
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func isEnvVars() bool {
|
||||
_, present := os.LookupEnv(KS_KUBE_CLUSTER_ENV_VAR)
|
||||
if !present {
|
||||
return false
|
||||
}
|
||||
_, present = os.LookupEnv(KS_CLOUD_PROVIDER_ENV_VAR)
|
||||
if !present {
|
||||
return false
|
||||
}
|
||||
_, present = os.LookupEnv(KS_CLOUD_REGION_ENV_VAR)
|
||||
return present
|
||||
}
|
||||
|
||||
type EmptyCloudProvider struct {
|
||||
}
|
||||
|
||||
func NewEmptyCloudProvider() *EmptyCloudProvider {
|
||||
return &EmptyCloudProvider{}
|
||||
}
|
||||
|
||||
func (emptyCloudProvider *EmptyCloudProvider) getKubeCluster() string {
|
||||
return getClusterFromContext()
|
||||
}
|
||||
|
||||
func (emptyCloudProvider *EmptyCloudProvider) getKubeClusterName() string {
|
||||
return emptyCloudProvider.getKubeCluster()
|
||||
}
|
||||
|
||||
func (emptyCloudProvider *EmptyCloudProvider) getRegion(cluster string, provider string) (string, error) {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
func (emptyCloudProvider *EmptyCloudProvider) getProject(cluster string, provider string) (string, error) {
|
||||
return "", nil
|
||||
}
|
||||
@@ -1,101 +0,0 @@
|
||||
package resourcehandler
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/armosec/k8s-interface/k8sinterface"
|
||||
)
|
||||
|
||||
type EKSProviderEnvVar struct {
|
||||
}
|
||||
|
||||
func NewEKSProviderEnvVar() *EKSProviderEnvVar {
|
||||
return &EKSProviderEnvVar{}
|
||||
}
|
||||
|
||||
func (eksProviderEnvVar *EKSProviderEnvVar) getKubeClusterName() string {
|
||||
return eksProviderEnvVar.getKubeCluster()
|
||||
}
|
||||
|
||||
func (eksProviderEnvVar *EKSProviderEnvVar) getKubeCluster() string {
|
||||
val, present := os.LookupEnv(KS_KUBE_CLUSTER_ENV_VAR)
|
||||
if present {
|
||||
return val
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (eksProviderEnvVar *EKSProviderEnvVar) getRegion(cluster string, provider string) (string, error) {
|
||||
return eksProviderEnvVar.getRegionForEKS(cluster)
|
||||
}
|
||||
|
||||
func (eksProviderEnvVar *EKSProviderEnvVar) getProject(cluster string, provider string) (string, error) {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
func (eksProviderEnvVar *EKSProviderEnvVar) getRegionForEKS(cluster string) (string, error) {
|
||||
region, present := os.LookupEnv(KS_CLOUD_REGION_ENV_VAR)
|
||||
if present {
|
||||
return region, nil
|
||||
}
|
||||
splittedClusterContext := strings.Split(cluster, ".")
|
||||
if len(splittedClusterContext) < 2 {
|
||||
return "", fmt.Errorf("failed to get region")
|
||||
}
|
||||
region = splittedClusterContext[1]
|
||||
return region, nil
|
||||
}
|
||||
|
||||
// ------------------------------------- EKSProviderContext -------------------------
|
||||
|
||||
type EKSProviderContext struct {
|
||||
}
|
||||
|
||||
func NewEKSProviderContext() *EKSProviderContext {
|
||||
return &EKSProviderContext{}
|
||||
}
|
||||
|
||||
func (eksProviderContext *EKSProviderContext) getKubeClusterName() string {
|
||||
context := k8sinterface.GetCurrentContext()
|
||||
if context == nil {
|
||||
return ""
|
||||
}
|
||||
cluster := context.Cluster
|
||||
if cluster != "" {
|
||||
splittedCluster := strings.Split(cluster, ".")
|
||||
if len(splittedCluster) > 1 {
|
||||
return splittedCluster[0]
|
||||
}
|
||||
}
|
||||
splittedCluster := strings.Split(k8sinterface.GetClusterName(), ".")
|
||||
if len(splittedCluster) > 1 {
|
||||
return splittedCluster[0]
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (eksProviderContext *EKSProviderContext) getKubeCluster() string {
|
||||
context := k8sinterface.GetCurrentContext()
|
||||
if context == nil {
|
||||
return ""
|
||||
}
|
||||
if context.Cluster != "" {
|
||||
return context.Cluster
|
||||
}
|
||||
return k8sinterface.GetClusterName()
|
||||
}
|
||||
|
||||
func (eksProviderContext *EKSProviderContext) getRegion(cluster string, provider string) (string, error) {
|
||||
splittedClusterContext := strings.Split(cluster, ".")
|
||||
if len(splittedClusterContext) < 2 {
|
||||
return "", fmt.Errorf("failed to get region")
|
||||
}
|
||||
region := splittedClusterContext[1]
|
||||
return region, nil
|
||||
}
|
||||
|
||||
func (eksProviderContext *EKSProviderContext) getProject(cluster string, provider string) (string, error) {
|
||||
return "", nil
|
||||
}
|
||||
@@ -3,13 +3,17 @@ package resourcehandler
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/armosec/armoapi-go/armotypes"
|
||||
"github.com/armosec/k8s-interface/workloadinterface"
|
||||
"github.com/armosec/opa-utils/reporthandling"
|
||||
"k8s.io/apimachinery/pkg/version"
|
||||
|
||||
"github.com/armosec/k8s-interface/k8sinterface"
|
||||
"github.com/armosec/kubescape/core/cautils"
|
||||
"github.com/armosec/kubescape/v2/core/cautils"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger/helpers"
|
||||
)
|
||||
|
||||
// FileResourceHandler handle resources from files and URLs
|
||||
@@ -28,45 +32,99 @@ func NewFileResourceHandler(inputPatterns []string, registryAdaptors *RegistryAd
|
||||
|
||||
func (fileHandler *FileResourceHandler) GetResources(sessionObj *cautils.OPASessionObj, designator *armotypes.PortalDesignator) (*cautils.K8SResources, map[string]workloadinterface.IMetadata, *cautils.ArmoResources, error) {
|
||||
|
||||
//
|
||||
// build resources map
|
||||
// map resources based on framework required resources: map["/group/version/kind"][]<k8s workloads ids>
|
||||
k8sResources := setK8sResourceMap(sessionObj.Policies)
|
||||
allResources := map[string]workloadinterface.IMetadata{}
|
||||
workloadIDToSource := make(map[string]string, 0)
|
||||
workloadIDToSource := make(map[string]reporthandling.Source, 0)
|
||||
armoResources := &cautils.ArmoResources{}
|
||||
|
||||
workloads := []workloadinterface.IMetadata{}
|
||||
|
||||
// load resource from local file system
|
||||
w, err := cautils.LoadResourcesFromFiles(fileHandler.inputPatterns)
|
||||
if len(fileHandler.inputPatterns) == 0 {
|
||||
return nil, nil, nil, fmt.Errorf("missing input")
|
||||
}
|
||||
path := fileHandler.inputPatterns[0]
|
||||
|
||||
clonedRepo, err := cloneGitRepo(&path)
|
||||
if err != nil {
|
||||
return nil, allResources, nil, err
|
||||
}
|
||||
for source, ws := range w {
|
||||
workloads = append(workloads, ws...)
|
||||
for i := range ws {
|
||||
workloadIDToSource[ws[i].GetID()] = source
|
||||
}
|
||||
if clonedRepo != "" {
|
||||
defer os.RemoveAll(clonedRepo)
|
||||
}
|
||||
|
||||
// load resources from url
|
||||
w, err = loadResourcesFromUrl(fileHandler.inputPatterns)
|
||||
if err != nil {
|
||||
return nil, allResources, nil, err
|
||||
// Get repo root
|
||||
repoRoot := ""
|
||||
giRepo, err := cautils.NewLocalGitRepository(path)
|
||||
if err == nil {
|
||||
repoRoot, _ = giRepo.GetRootDir()
|
||||
}
|
||||
for source, ws := range w {
|
||||
|
||||
// load resource from local file system
|
||||
logger.L().Info("Accessing local objects")
|
||||
|
||||
sourceToWorkloads := cautils.LoadResourcesFromFiles(path, repoRoot)
|
||||
|
||||
// update workloads and workloadIDToSource
|
||||
for source, ws := range sourceToWorkloads {
|
||||
workloads = append(workloads, ws...)
|
||||
|
||||
relSource, err := filepath.Rel(repoRoot, source)
|
||||
if err == nil {
|
||||
source = relSource
|
||||
}
|
||||
for i := range ws {
|
||||
workloadIDToSource[ws[i].GetID()] = source
|
||||
var filetype string
|
||||
if cautils.IsYaml(source) {
|
||||
filetype = reporthandling.SourceTypeYaml
|
||||
} else if cautils.IsJson(source) {
|
||||
filetype = reporthandling.SourceTypeJson
|
||||
} else {
|
||||
continue
|
||||
}
|
||||
workloadIDToSource[ws[i].GetID()] = reporthandling.Source{
|
||||
RelativePath: source,
|
||||
FileType: filetype,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(workloads) == 0 {
|
||||
logger.L().Debug("files found in local storage", helpers.Int("files", len(sourceToWorkloads)), helpers.Int("workloads", len(workloads)))
|
||||
}
|
||||
|
||||
// load resources from helm charts
|
||||
helmSourceToWorkloads := cautils.LoadResourcesFromHelmCharts(path)
|
||||
for source, ws := range helmSourceToWorkloads {
|
||||
workloads = append(workloads, ws...)
|
||||
|
||||
relSource, err := filepath.Rel(repoRoot, source)
|
||||
if err == nil {
|
||||
source = relSource
|
||||
}
|
||||
for i := range ws {
|
||||
workloadIDToSource[ws[i].GetID()] = reporthandling.Source{
|
||||
RelativePath: source,
|
||||
FileType: reporthandling.SourceTypeHelmChart,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(helmSourceToWorkloads) > 0 {
|
||||
logger.L().Debug("helm templates found in local storage", helpers.Int("helmTemplates", len(helmSourceToWorkloads)), helpers.Int("workloads", len(workloads)))
|
||||
}
|
||||
|
||||
// addCommitData(fileHandler.inputPatterns[0], workloadIDToSource)
|
||||
|
||||
if len(workloads) == 0 {
|
||||
return nil, allResources, nil, fmt.Errorf("empty list of workloads - no workloads found")
|
||||
}
|
||||
|
||||
sessionObj.ResourceSource = workloadIDToSource
|
||||
|
||||
// map all resources: map["/group/version/kind"][]<k8s workloads>
|
||||
// map all resources: map["/apiVersion/version/kind"][]<k8s workloads>
|
||||
mappedResources := mapResources(workloads)
|
||||
|
||||
// save only relevant resources
|
||||
@@ -82,42 +140,14 @@ func (fileHandler *FileResourceHandler) GetResources(sessionObj *cautils.OPASess
|
||||
}
|
||||
|
||||
if err := fileHandler.registryAdaptors.collectImagesVulnerabilities(k8sResources, allResources, armoResources); err != nil {
|
||||
cautils.WarningDisplay(os.Stderr, "Warning: failed to collect images vulnerabilities: %s\n", err.Error())
|
||||
logger.L().Warning("failed to collect images vulnerabilities", helpers.Error(err))
|
||||
}
|
||||
|
||||
return k8sResources, allResources, armoResources, nil
|
||||
logger.L().Success("Accessed to local objects")
|
||||
|
||||
return k8sResources, allResources, armoResources, nil
|
||||
}
|
||||
|
||||
func (fileHandler *FileResourceHandler) GetClusterAPIServerInfo() *version.Info {
|
||||
return nil
|
||||
}
|
||||
|
||||
// build resources map
|
||||
func mapResources(workloads []workloadinterface.IMetadata) map[string][]workloadinterface.IMetadata {
|
||||
|
||||
allResources := map[string][]workloadinterface.IMetadata{}
|
||||
for i := range workloads {
|
||||
groupVersionResource, err := k8sinterface.GetGroupVersionResource(workloads[i].GetKind())
|
||||
if err != nil {
|
||||
// TODO - print warning
|
||||
continue
|
||||
}
|
||||
|
||||
if k8sinterface.IsTypeWorkload(workloads[i].GetObject()) {
|
||||
w := workloadinterface.NewWorkloadObj(workloads[i].GetObject())
|
||||
if groupVersionResource.Group != w.GetGroup() || groupVersionResource.Version != w.GetVersion() {
|
||||
// TODO - print warning
|
||||
continue
|
||||
}
|
||||
}
|
||||
resourceTriplets := k8sinterface.JoinResourceTriplets(groupVersionResource.Group, groupVersionResource.Version, groupVersionResource.Resource)
|
||||
if r, ok := allResources[resourceTriplets]; ok {
|
||||
allResources[resourceTriplets] = append(r, workloads[i])
|
||||
} else {
|
||||
allResources[resourceTriplets] = []workloadinterface.IMetadata{workloads[i]}
|
||||
}
|
||||
}
|
||||
return allResources
|
||||
|
||||
}
|
||||
|
||||
86
core/pkg/resourcehandler/filesloaderutils.go
Normal file
86
core/pkg/resourcehandler/filesloaderutils.go
Normal file
@@ -0,0 +1,86 @@
|
||||
package resourcehandler
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
|
||||
giturl "github.com/armosec/go-git-url"
|
||||
"github.com/armosec/k8s-interface/k8sinterface"
|
||||
"github.com/armosec/k8s-interface/workloadinterface"
|
||||
"github.com/armosec/kubescape/v2/core/cautils"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger/helpers"
|
||||
"github.com/armosec/opa-utils/reporthandling"
|
||||
)
|
||||
|
||||
// Clone git repository
|
||||
func cloneGitRepo(path *string) (string, error) {
|
||||
var clonedDir string
|
||||
|
||||
// Clone git repository if needed
|
||||
gitURL, err := giturl.NewGitURL(*path)
|
||||
if err == nil {
|
||||
logger.L().Info("cloning", helpers.String("repository url", gitURL.GetURL().String()))
|
||||
cautils.StartSpinner()
|
||||
clonedDir, err = cloneRepo(gitURL)
|
||||
cautils.StopSpinner()
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to clone git repo '%s', %w", gitURL.GetURL().String(), err)
|
||||
}
|
||||
|
||||
*path = filepath.Join(clonedDir, gitURL.GetPath())
|
||||
|
||||
}
|
||||
return clonedDir, nil
|
||||
}
|
||||
|
||||
// build resources map
|
||||
func mapResources(workloads []workloadinterface.IMetadata) map[string][]workloadinterface.IMetadata {
|
||||
|
||||
allResources := map[string][]workloadinterface.IMetadata{}
|
||||
for i := range workloads {
|
||||
groupVersionResource, err := k8sinterface.GetGroupVersionResource(workloads[i].GetKind())
|
||||
if err != nil {
|
||||
// TODO - print warning
|
||||
continue
|
||||
}
|
||||
|
||||
if k8sinterface.IsTypeWorkload(workloads[i].GetObject()) {
|
||||
w := workloadinterface.NewWorkloadObj(workloads[i].GetObject())
|
||||
if groupVersionResource.Group != w.GetGroup() || groupVersionResource.Version != w.GetVersion() {
|
||||
// TODO - print warning
|
||||
continue
|
||||
}
|
||||
}
|
||||
resourceTriplets := k8sinterface.JoinResourceTriplets(groupVersionResource.Group, groupVersionResource.Version, groupVersionResource.Resource)
|
||||
if r, ok := allResources[resourceTriplets]; ok {
|
||||
allResources[resourceTriplets] = append(r, workloads[i])
|
||||
} else {
|
||||
allResources[resourceTriplets] = []workloadinterface.IMetadata{workloads[i]}
|
||||
}
|
||||
}
|
||||
return allResources
|
||||
|
||||
}
|
||||
|
||||
func addCommitData(input string, workloadIDToSource map[string]reporthandling.Source) {
|
||||
giRepo, err := cautils.NewLocalGitRepository(input)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
for k := range workloadIDToSource {
|
||||
sourceObj := workloadIDToSource[k]
|
||||
lastCommit, err := giRepo.GetFileLastCommit(sourceObj.RelativePath)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
sourceObj.LastCommit = reporthandling.LastCommit{
|
||||
Hash: lastCommit.SHA,
|
||||
Date: lastCommit.Author.Date,
|
||||
CommitterName: lastCommit.Author.Name,
|
||||
CommitterEmail: lastCommit.Author.Email,
|
||||
Message: lastCommit.Message,
|
||||
}
|
||||
workloadIDToSource[k] = sourceObj
|
||||
}
|
||||
}
|
||||
@@ -1,131 +0,0 @@
|
||||
package resourcehandler
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/armosec/k8s-interface/k8sinterface"
|
||||
)
|
||||
|
||||
type GKEProviderEnvVar struct {
|
||||
}
|
||||
|
||||
func NewGKEProviderEnvVar() *GKEProviderEnvVar {
|
||||
return &GKEProviderEnvVar{}
|
||||
}
|
||||
func (gkeProvider *GKEProviderEnvVar) getKubeClusterName() string {
|
||||
return gkeProvider.getKubeCluster()
|
||||
}
|
||||
|
||||
func (gkeProvider *GKEProviderEnvVar) getKubeCluster() string {
|
||||
val, present := os.LookupEnv(KS_KUBE_CLUSTER_ENV_VAR)
|
||||
if present {
|
||||
return val
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (gkeProvider *GKEProviderEnvVar) getRegion(cluster string, provider string) (string, error) {
|
||||
return gkeProvider.getRegionForGKE(cluster)
|
||||
}
|
||||
|
||||
func (gkeProvider *GKEProviderEnvVar) getProject(cluster string, provider string) (string, error) {
|
||||
return gkeProvider.getProjectForGKE(cluster)
|
||||
}
|
||||
|
||||
func (gkeProvider *GKEProviderEnvVar) getProjectForGKE(cluster string) (string, error) {
|
||||
project, present := os.LookupEnv(KS_GKE_PROJECT_ENV_VAR)
|
||||
if present {
|
||||
return project, nil
|
||||
}
|
||||
parsedName := strings.Split(cluster, "_")
|
||||
if len(parsedName) < 3 {
|
||||
return "", fmt.Errorf("failed to parse project name from cluster name: '%s'", cluster)
|
||||
}
|
||||
project = parsedName[1]
|
||||
return project, nil
|
||||
}
|
||||
|
||||
func (gkeProvider *GKEProviderEnvVar) getRegionForGKE(cluster string) (string, error) {
|
||||
region, present := os.LookupEnv(KS_CLOUD_REGION_ENV_VAR)
|
||||
if present {
|
||||
return region, nil
|
||||
}
|
||||
parsedName := strings.Split(cluster, "_")
|
||||
if len(parsedName) < 3 {
|
||||
return "", fmt.Errorf("failed to parse region name from cluster name: '%s'", cluster)
|
||||
}
|
||||
region = parsedName[2]
|
||||
return region, nil
|
||||
|
||||
}
|
||||
|
||||
// ------------------------------ GKEProviderContext --------------------------------------------------------
|
||||
|
||||
type GKEProviderContext struct {
|
||||
}
|
||||
|
||||
func NewGKEProviderContext() *GKEProviderContext {
|
||||
return &GKEProviderContext{}
|
||||
}
|
||||
|
||||
func (gkeProviderContext *GKEProviderContext) getKubeClusterName() string {
|
||||
context := k8sinterface.GetCurrentContext()
|
||||
if context == nil {
|
||||
return ""
|
||||
}
|
||||
cluster := context.Cluster
|
||||
parsedName := strings.Split(cluster, "_")
|
||||
if len(parsedName) < 3 {
|
||||
return ""
|
||||
}
|
||||
clusterName := parsedName[3]
|
||||
if clusterName != "" {
|
||||
return clusterName
|
||||
}
|
||||
cluster = k8sinterface.GetClusterName()
|
||||
parsedName = strings.Split(cluster, "_")
|
||||
if len(parsedName) < 3 {
|
||||
return ""
|
||||
}
|
||||
return parsedName[3]
|
||||
}
|
||||
|
||||
func (gkeProviderContext *GKEProviderContext) getKubeCluster() string {
|
||||
context := k8sinterface.GetCurrentContext()
|
||||
if context == nil {
|
||||
return ""
|
||||
}
|
||||
if context.Cluster != "" {
|
||||
return context.Cluster
|
||||
}
|
||||
return k8sinterface.GetClusterName()
|
||||
|
||||
}
|
||||
|
||||
func (gkeProviderContext *GKEProviderContext) getRegion(cluster string, provider string) (string, error) {
|
||||
return gkeProviderContext.getRegionForGKE(cluster)
|
||||
}
|
||||
|
||||
func (gkeProviderContext *GKEProviderContext) getProject(cluster string, provider string) (string, error) {
|
||||
return gkeProviderContext.getProjectForGKE(cluster)
|
||||
}
|
||||
|
||||
func (gkeProviderContext *GKEProviderContext) getProjectForGKE(cluster string) (string, error) {
|
||||
parsedName := strings.Split(cluster, "_")
|
||||
if len(parsedName) < 3 {
|
||||
return "", fmt.Errorf("failed to parse project name from cluster name: '%s'", cluster)
|
||||
}
|
||||
project := parsedName[1]
|
||||
return project, nil
|
||||
}
|
||||
|
||||
func (gkeProviderContext *GKEProviderContext) getRegionForGKE(cluster string) (string, error) {
|
||||
parsedName := strings.Split(cluster, "_")
|
||||
if len(parsedName) < 3 {
|
||||
return "", fmt.Errorf("failed to parse region name from cluster name: '%s'", cluster)
|
||||
}
|
||||
region := parsedName[2]
|
||||
return region, nil
|
||||
}
|
||||
@@ -5,10 +5,10 @@ import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/armosec/kubescape/core/cautils"
|
||||
"github.com/armosec/kubescape/core/cautils/logger"
|
||||
"github.com/armosec/kubescape/core/cautils/logger/helpers"
|
||||
"github.com/armosec/kubescape/core/pkg/hostsensorutils"
|
||||
"github.com/armosec/kubescape/v2/core/cautils"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger/helpers"
|
||||
"github.com/armosec/kubescape/v2/core/pkg/hostsensorutils"
|
||||
"github.com/armosec/opa-utils/objectsenvelopes"
|
||||
"github.com/armosec/opa-utils/reporthandling/apis"
|
||||
|
||||
@@ -18,6 +18,7 @@ import (
|
||||
|
||||
"github.com/armosec/armoapi-go/armotypes"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
k8slabels "k8s.io/apimachinery/pkg/labels"
|
||||
@@ -85,6 +86,11 @@ func (k8sHandler *K8sResourceHandler) GetResources(sessionObj *cautils.OPASessio
|
||||
if len(imgVulnResources) > 0 {
|
||||
if err := k8sHandler.registryAdaptors.collectImagesVulnerabilities(k8sResourcesMap, allResources, armoResourceMap); err != nil {
|
||||
logger.L().Warning("failed to collect image vulnerabilities", helpers.Error(err))
|
||||
cautils.SetInfoMapForResources(fmt.Sprintf("failed to pull image scanning data: %s", err.Error()), imgVulnResources, sessionObj.InfoMap)
|
||||
} else {
|
||||
if isEmptyImgVulns(*armoResourceMap) {
|
||||
cautils.SetInfoMapForResources("image scanning is not configured. for more information: https://hub.armo.cloud/docs/cluster-vulnerability-scanning", imgVulnResources, sessionObj.InfoMap)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -103,7 +109,7 @@ func (k8sHandler *K8sResourceHandler) GetResources(sessionObj *cautils.OPASessio
|
||||
sessionObj.InfoMap = infoMap
|
||||
}
|
||||
} else {
|
||||
cautils.SetInfoMapForResources("enable-host-scan flag not used", hostResources, sessionObj.InfoMap)
|
||||
cautils.SetInfoMapForResources("enable-host-scan flag not used. For more information: https://hub.armo.cloud/docs/host-sensor", hostResources, sessionObj.InfoMap)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -265,23 +271,15 @@ func (k8sHandler *K8sResourceHandler) collectRbacResources(allResources map[stri
|
||||
|
||||
func getCloudProviderDescription(allResources map[string]workloadinterface.IMetadata, armoResourceMap *cautils.ArmoResources) (string, error) {
|
||||
logger.L().Debug("Collecting cloud data")
|
||||
cloudProvider := initCloudProvider()
|
||||
cluster := cloudProvider.getKubeCluster()
|
||||
clusterName := cloudProvider.getKubeClusterName()
|
||||
provider := getCloudProvider()
|
||||
region, err := cloudProvider.getRegion(cluster, provider)
|
||||
if err != nil {
|
||||
return provider, err
|
||||
}
|
||||
project, err := cloudProvider.getProject(cluster, provider)
|
||||
if err != nil {
|
||||
return provider, err
|
||||
}
|
||||
|
||||
clusterName := cautils.ClusterName
|
||||
|
||||
provider := cloudsupport.GetCloudProvider(clusterName)
|
||||
|
||||
if provider != "" {
|
||||
logger.L().Debug("cloud", helpers.String("cluster", cluster), helpers.String("clusterName", clusterName), helpers.String("provider", provider), helpers.String("region", region), helpers.String("project", project))
|
||||
logger.L().Debug("cloud", helpers.String("cluster", clusterName), helpers.String("clusterName", clusterName), helpers.String("provider", provider))
|
||||
|
||||
wl, err := cloudsupport.GetDescriptiveInfoFromCloudProvider(clusterName, provider, region, project)
|
||||
wl, err := cloudsupport.GetDescriptiveInfoFromCloudProvider(clusterName, provider)
|
||||
if err != nil {
|
||||
// Return error with useful info on how to configure credentials for getting cloud provider info
|
||||
logger.L().Debug("failed to get descriptive information", helpers.Error(err))
|
||||
@@ -295,17 +293,31 @@ func getCloudProviderDescription(allResources map[string]workloadinterface.IMeta
|
||||
}
|
||||
|
||||
func (k8sHandler *K8sResourceHandler) pullWorkerNodesNumber() (int, error) {
|
||||
// labels used for control plane
|
||||
listOptions := metav1.ListOptions{
|
||||
LabelSelector: "!node-role.kubernetes.io/control-plane,!node-role.kubernetes.io/master",
|
||||
nodesList, err := k8sHandler.k8s.KubernetesClient.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})
|
||||
scheduableNodes := v1.NodeList{}
|
||||
if nodesList != nil {
|
||||
for _, node := range nodesList.Items {
|
||||
if len(node.Spec.Taints) == 0 {
|
||||
scheduableNodes.Items = append(scheduableNodes.Items, node)
|
||||
} else {
|
||||
if !isMasterNodeTaints(node.Spec.Taints) {
|
||||
scheduableNodes.Items = append(scheduableNodes.Items, node)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
nodesList, err := k8sHandler.k8s.KubernetesClient.CoreV1().Nodes().List(context.TODO(), listOptions)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
nodesNumber := 0
|
||||
if nodesList != nil {
|
||||
nodesNumber = len(nodesList.Items)
|
||||
}
|
||||
return nodesNumber, nil
|
||||
return len(scheduableNodes.Items), nil
|
||||
}
|
||||
|
||||
// NoSchedule taint with empty value is usually applied to controlplane
|
||||
func isMasterNodeTaints(taints []v1.Taint) bool {
|
||||
for _, taint := range taints {
|
||||
if taint.Effect == v1.TaintEffectNoSchedule && taint.Value == "" {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
532
core/pkg/resourcehandler/k8sresources_test.go
Normal file
532
core/pkg/resourcehandler/k8sresources_test.go
Normal file
@@ -0,0 +1,532 @@
|
||||
package resourcehandler
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
func TestIsMasterNodeTaints(t *testing.T) {
|
||||
noTaintNode := `
|
||||
{
|
||||
"apiVersion": "v1",
|
||||
"kind": "Node",
|
||||
"metadata": {
|
||||
"annotations": {
|
||||
"kubeadm.alpha.kubernetes.io/cri-socket": "/var/run/dockershim.sock",
|
||||
"node.alpha.kubernetes.io/ttl": "0",
|
||||
"volumes.kubernetes.io/controller-managed-attach-detach": "true"
|
||||
},
|
||||
"creationTimestamp": "2022-05-16T10:52:32Z",
|
||||
"labels": {
|
||||
"beta.kubernetes.io/arch": "amd64",
|
||||
"beta.kubernetes.io/os": "linux",
|
||||
"kubernetes.io/arch": "amd64",
|
||||
"kubernetes.io/hostname": "danielg-minikube",
|
||||
"kubernetes.io/os": "linux",
|
||||
"minikube.k8s.io/commit": "3e64b11ed75e56e4898ea85f96b2e4af0301f43d",
|
||||
"minikube.k8s.io/name": "danielg-minikube",
|
||||
"minikube.k8s.io/updated_at": "2022_05_16T13_52_35_0700",
|
||||
"minikube.k8s.io/version": "v1.25.1",
|
||||
"node-role.kubernetes.io/control-plane": "",
|
||||
"node-role.kubernetes.io/master": "",
|
||||
"node.kubernetes.io/exclude-from-external-load-balancers": ""
|
||||
},
|
||||
"name": "danielg-minikube",
|
||||
"resourceVersion": "9432",
|
||||
"uid": "fc4afcb6-4ca4-4038-ba54-5e16065a614a"
|
||||
},
|
||||
"spec": {
|
||||
"podCIDR": "10.244.0.0/24",
|
||||
"podCIDRs": [
|
||||
"10.244.0.0/24"
|
||||
]
|
||||
},
|
||||
"status": {
|
||||
"addresses": [
|
||||
{
|
||||
"address": "192.168.49.2",
|
||||
"type": "InternalIP"
|
||||
},
|
||||
{
|
||||
"address": "danielg-minikube",
|
||||
"type": "Hostname"
|
||||
}
|
||||
],
|
||||
"allocatable": {
|
||||
"cpu": "4",
|
||||
"ephemeral-storage": "94850516Ki",
|
||||
"hugepages-2Mi": "0",
|
||||
"memory": "10432976Ki",
|
||||
"pods": "110"
|
||||
},
|
||||
"capacity": {
|
||||
"cpu": "4",
|
||||
"ephemeral-storage": "94850516Ki",
|
||||
"hugepages-2Mi": "0",
|
||||
"memory": "10432976Ki",
|
||||
"pods": "110"
|
||||
},
|
||||
"conditions": [
|
||||
{
|
||||
"lastHeartbeatTime": "2022-05-16T14:14:31Z",
|
||||
"lastTransitionTime": "2022-05-16T10:52:29Z",
|
||||
"message": "kubelet has sufficient memory available",
|
||||
"reason": "KubeletHasSufficientMemory",
|
||||
"status": "False",
|
||||
"type": "MemoryPressure"
|
||||
},
|
||||
{
|
||||
"lastHeartbeatTime": "2022-05-16T14:14:31Z",
|
||||
"lastTransitionTime": "2022-05-16T10:52:29Z",
|
||||
"message": "kubelet has no disk pressure",
|
||||
"reason": "KubeletHasNoDiskPressure",
|
||||
"status": "False",
|
||||
"type": "DiskPressure"
|
||||
},
|
||||
{
|
||||
"lastHeartbeatTime": "2022-05-16T14:14:31Z",
|
||||
"lastTransitionTime": "2022-05-16T10:52:29Z",
|
||||
"message": "kubelet has sufficient PID available",
|
||||
"reason": "KubeletHasSufficientPID",
|
||||
"status": "False",
|
||||
"type": "PIDPressure"
|
||||
},
|
||||
{
|
||||
"lastHeartbeatTime": "2022-05-16T14:14:31Z",
|
||||
"lastTransitionTime": "2022-05-16T10:52:45Z",
|
||||
"message": "kubelet is posting ready status",
|
||||
"reason": "KubeletReady",
|
||||
"status": "True",
|
||||
"type": "Ready"
|
||||
}
|
||||
],
|
||||
"daemonEndpoints": {
|
||||
"kubeletEndpoint": {
|
||||
"Port": 10250
|
||||
}
|
||||
},
|
||||
"images": [
|
||||
{
|
||||
"names": [
|
||||
"requarks/wiki@sha256:dd83fff15e77843ff934b25c28c865ac000edf7653e5d11adad1dd51df87439d"
|
||||
],
|
||||
"sizeBytes": 441083858
|
||||
},
|
||||
{
|
||||
"names": [
|
||||
"mariadb@sha256:821d0411208eaa88f9e1f0daccd1d534f88d19baf724eb9a2777cbedb10b6c66"
|
||||
],
|
||||
"sizeBytes": 400782682
|
||||
},
|
||||
{
|
||||
"names": [
|
||||
"k8s.gcr.io/etcd@sha256:64b9ea357325d5db9f8a723dcf503b5a449177b17ac87d69481e126bb724c263",
|
||||
"k8s.gcr.io/etcd:3.5.1-0"
|
||||
],
|
||||
"sizeBytes": 292558922
|
||||
},
|
||||
{
|
||||
"names": [
|
||||
"kubernetesui/dashboard@sha256:ec27f462cf1946220f5a9ace416a84a57c18f98c777876a8054405d1428cc92e",
|
||||
"kubernetesui/dashboard:v2.3.1"
|
||||
],
|
||||
"sizeBytes": 220033604
|
||||
},
|
||||
{
|
||||
"names": [
|
||||
"k8s.gcr.io/kube-apiserver@sha256:f54681a71cce62cbc1b13ebb3dbf1d880f849112789811f98b6aebd2caa2f255",
|
||||
"k8s.gcr.io/kube-apiserver:v1.23.1"
|
||||
],
|
||||
"sizeBytes": 135162256
|
||||
},
|
||||
{
|
||||
"names": [
|
||||
"k8s.gcr.io/kube-controller-manager@sha256:a7ed87380108a2d811f0d392a3fe87546c85bc366e0d1e024dfa74eb14468604",
|
||||
"k8s.gcr.io/kube-controller-manager:v1.23.1"
|
||||
],
|
||||
"sizeBytes": 124971684
|
||||
},
|
||||
{
|
||||
"names": [
|
||||
"k8s.gcr.io/kube-proxy@sha256:e40f3a28721588affcf187f3f246d1e078157dabe274003eaa2957a83f7170c8",
|
||||
"k8s.gcr.io/kube-proxy:v1.23.1"
|
||||
],
|
||||
"sizeBytes": 112327826
|
||||
},
|
||||
{
|
||||
"names": [
|
||||
"quay.io/armosec/kubescape@sha256:6196f766be50d94b45d903a911f5ee95ac99bc392a1324c3e063bec41efd98ba",
|
||||
"quay.io/armosec/kubescape:v2.0.153"
|
||||
],
|
||||
"sizeBytes": 110345054
|
||||
},
|
||||
{
|
||||
"names": [
|
||||
"nginx@sha256:f7988fb6c02e0ce69257d9bd9cf37ae20a60f1df7563c3a2a6abe24160306b8d"
|
||||
],
|
||||
"sizeBytes": 109129446
|
||||
},
|
||||
{
|
||||
"names": [
|
||||
"quay.io/armosec/action-trigger@sha256:b93707d10ff86aac8dfa42ad37192d6bcf9aceeb4321b21756e438389c26e07c",
|
||||
"quay.io/armosec/action-trigger:v0.0.5"
|
||||
],
|
||||
"sizeBytes": 65127067
|
||||
},
|
||||
{
|
||||
"names": [
|
||||
"quay.io/armosec/images-vulnerabilities-scan@sha256:a5f9ddc04a7fdce6d52ef85a21f0de567d8e04d418c2bc5bf5d72b151c997625",
|
||||
"quay.io/armosec/images-vulnerabilities-scan:v0.0.7"
|
||||
],
|
||||
"sizeBytes": 61446712
|
||||
},
|
||||
{
|
||||
"names": [
|
||||
"quay.io/armosec/images-vulnerabilities-scan@sha256:2f879858da89f6542e3223fb18d6d793810cc2ad6e398b66776475e4218b6af5",
|
||||
"quay.io/armosec/images-vulnerabilities-scan:v0.0.8"
|
||||
],
|
||||
"sizeBytes": 61446528
|
||||
},
|
||||
{
|
||||
"names": [
|
||||
"quay.io/armosec/cluster-collector@sha256:2c4f733d09f7f4090ace04585230bdfacbbc29a3ade38a2e1233d2c0f730d9b6",
|
||||
"quay.io/armosec/cluster-collector:v0.0.9"
|
||||
],
|
||||
"sizeBytes": 53699576
|
||||
},
|
||||
{
|
||||
"names": [
|
||||
"k8s.gcr.io/kube-scheduler@sha256:8be4eb1593cf9ff2d91b44596633b7815a3753696031a1eb4273d1b39427fa8c",
|
||||
"k8s.gcr.io/kube-scheduler:v1.23.1"
|
||||
],
|
||||
"sizeBytes": 53488305
|
||||
},
|
||||
{
|
||||
"names": [
|
||||
"k8s.gcr.io/coredns/coredns@sha256:5b6ec0d6de9baaf3e92d0f66cd96a25b9edbce8716f5f15dcd1a616b3abd590e",
|
||||
"k8s.gcr.io/coredns/coredns:v1.8.6"
|
||||
],
|
||||
"sizeBytes": 46829283
|
||||
},
|
||||
{
|
||||
"names": [
|
||||
"kubernetesui/metrics-scraper@sha256:36d5b3f60e1a144cc5ada820910535074bdf5cf73fb70d1ff1681537eef4e172",
|
||||
"kubernetesui/metrics-scraper:v1.0.7"
|
||||
],
|
||||
"sizeBytes": 34446077
|
||||
},
|
||||
{
|
||||
"names": [
|
||||
"gcr.io/k8s-minikube/storage-provisioner@sha256:18eb69d1418e854ad5a19e399310e52808a8321e4c441c1dddad8977a0d7a944",
|
||||
"gcr.io/k8s-minikube/storage-provisioner:v5"
|
||||
],
|
||||
"sizeBytes": 31465472
|
||||
},
|
||||
{
|
||||
"names": [
|
||||
"quay.io/armosec/notification-server@sha256:b6e9b296cd53bd3b2b42c516d8ab43db998acff1124a57aff8d66b3dd7881979",
|
||||
"quay.io/armosec/notification-server:v0.0.3"
|
||||
],
|
||||
"sizeBytes": 20209940
|
||||
},
|
||||
{
|
||||
"names": [
|
||||
"quay.io/armosec/kube-host-sensor@sha256:82139d2561039726be060df2878ef023c59df7c536fbd7f6d766af5a99569fee",
|
||||
"quay.io/armosec/kube-host-sensor:latest"
|
||||
],
|
||||
"sizeBytes": 11796788
|
||||
},
|
||||
{
|
||||
"names": [
|
||||
"k8s.gcr.io/pause@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db",
|
||||
"k8s.gcr.io/pause:3.6"
|
||||
],
|
||||
"sizeBytes": 682696
|
||||
}
|
||||
],
|
||||
"nodeInfo": {
|
||||
"architecture": "amd64",
|
||||
"bootID": "828cbe73-120b-43cf-aae0-9e2d15b8c873",
|
||||
"containerRuntimeVersion": "docker://20.10.12",
|
||||
"kernelVersion": "5.13.0-40-generic",
|
||||
"kubeProxyVersion": "v1.23.1",
|
||||
"kubeletVersion": "v1.23.1",
|
||||
"machineID": "8de776e053e140d6a14c2d2def3d6bb8",
|
||||
"operatingSystem": "linux",
|
||||
"osImage": "Ubuntu 20.04.2 LTS",
|
||||
"systemUUID": "da12dc19-10bf-4033-a440-2d9aa33d6fe3"
|
||||
}
|
||||
}
|
||||
}
|
||||
`
|
||||
var l v1.Node
|
||||
_ = json.Unmarshal([]byte(noTaintNode), &l)
|
||||
assert.False(t, isMasterNodeTaints(l.Spec.Taints))
|
||||
|
||||
taintNode :=
|
||||
`
|
||||
{
|
||||
"apiVersion": "v1",
|
||||
"kind": "Node",
|
||||
"metadata": {
|
||||
"annotations": {
|
||||
"kubeadm.alpha.kubernetes.io/cri-socket": "/var/run/dockershim.sock",
|
||||
"node.alpha.kubernetes.io/ttl": "0",
|
||||
"volumes.kubernetes.io/controller-managed-attach-detach": "true"
|
||||
},
|
||||
"creationTimestamp": "2022-05-16T10:52:32Z",
|
||||
"labels": {
|
||||
"beta.kubernetes.io/arch": "amd64",
|
||||
"beta.kubernetes.io/os": "linux",
|
||||
"kubernetes.io/arch": "amd64",
|
||||
"kubernetes.io/hostname": "danielg-minikube",
|
||||
"kubernetes.io/os": "linux",
|
||||
"minikube.k8s.io/commit": "3e64b11ed75e56e4898ea85f96b2e4af0301f43d",
|
||||
"minikube.k8s.io/name": "danielg-minikube",
|
||||
"minikube.k8s.io/updated_at": "2022_05_16T13_52_35_0700",
|
||||
"minikube.k8s.io/version": "v1.25.1",
|
||||
"node-role.kubernetes.io/control-plane": "",
|
||||
"node-role.kubernetes.io/master": "",
|
||||
"node.kubernetes.io/exclude-from-external-load-balancers": ""
|
||||
},
|
||||
"name": "danielg-minikube",
|
||||
"resourceVersion": "9871",
|
||||
"uid": "fc4afcb6-4ca4-4038-ba54-5e16065a614a"
|
||||
},
|
||||
"spec": {
|
||||
"podCIDR": "10.244.0.0/24",
|
||||
"podCIDRs": [
|
||||
"10.244.0.0/24"
|
||||
],
|
||||
"taints": [
|
||||
{
|
||||
"effect": "NoSchedule",
|
||||
"key": "key1",
|
||||
"value": ""
|
||||
}
|
||||
]
|
||||
},
|
||||
"status": {
|
||||
"addresses": [
|
||||
{
|
||||
"address": "192.168.49.2",
|
||||
"type": "InternalIP"
|
||||
},
|
||||
{
|
||||
"address": "danielg-minikube",
|
||||
"type": "Hostname"
|
||||
}
|
||||
],
|
||||
"allocatable": {
|
||||
"cpu": "4",
|
||||
"ephemeral-storage": "94850516Ki",
|
||||
"hugepages-2Mi": "0",
|
||||
"memory": "10432976Ki",
|
||||
"pods": "110"
|
||||
},
|
||||
"capacity": {
|
||||
"cpu": "4",
|
||||
"ephemeral-storage": "94850516Ki",
|
||||
"hugepages-2Mi": "0",
|
||||
"memory": "10432976Ki",
|
||||
"pods": "110"
|
||||
},
|
||||
"conditions": [
|
||||
{
|
||||
"lastHeartbeatTime": "2022-05-16T14:24:45Z",
|
||||
"lastTransitionTime": "2022-05-16T10:52:29Z",
|
||||
"message": "kubelet has sufficient memory available",
|
||||
"reason": "KubeletHasSufficientMemory",
|
||||
"status": "False",
|
||||
"type": "MemoryPressure"
|
||||
},
|
||||
{
|
||||
"lastHeartbeatTime": "2022-05-16T14:24:45Z",
|
||||
"lastTransitionTime": "2022-05-16T10:52:29Z",
|
||||
"message": "kubelet has no disk pressure",
|
||||
"reason": "KubeletHasNoDiskPressure",
|
||||
"status": "False",
|
||||
"type": "DiskPressure"
|
||||
},
|
||||
{
|
||||
"lastHeartbeatTime": "2022-05-16T14:24:45Z",
|
||||
"lastTransitionTime": "2022-05-16T10:52:29Z",
|
||||
"message": "kubelet has sufficient PID available",
|
||||
"reason": "KubeletHasSufficientPID",
|
||||
"status": "False",
|
||||
"type": "PIDPressure"
|
||||
},
|
||||
{
|
||||
"lastHeartbeatTime": "2022-05-16T14:24:45Z",
|
||||
"lastTransitionTime": "2022-05-16T10:52:45Z",
|
||||
"message": "kubelet is posting ready status",
|
||||
"reason": "KubeletReady",
|
||||
"status": "True",
|
||||
"type": "Ready"
|
||||
}
|
||||
],
|
||||
"daemonEndpoints": {
|
||||
"kubeletEndpoint": {
|
||||
"Port": 10250
|
||||
}
|
||||
},
|
||||
"images": [
|
||||
{
|
||||
"names": [
|
||||
"requarks/wiki@sha256:dd83fff15e77843ff934b25c28c865ac000edf7653e5d11adad1dd51df87439d"
|
||||
],
|
||||
"sizeBytes": 441083858
|
||||
},
|
||||
{
|
||||
"names": [
|
||||
"mariadb@sha256:821d0411208eaa88f9e1f0daccd1d534f88d19baf724eb9a2777cbedb10b6c66"
|
||||
],
|
||||
"sizeBytes": 400782682
|
||||
},
|
||||
{
|
||||
"names": [
|
||||
"k8s.gcr.io/etcd@sha256:64b9ea357325d5db9f8a723dcf503b5a449177b17ac87d69481e126bb724c263",
|
||||
"k8s.gcr.io/etcd:3.5.1-0"
|
||||
],
|
||||
"sizeBytes": 292558922
|
||||
},
|
||||
{
|
||||
"names": [
|
||||
"kubernetesui/dashboard@sha256:ec27f462cf1946220f5a9ace416a84a57c18f98c777876a8054405d1428cc92e",
|
||||
"kubernetesui/dashboard:v2.3.1"
|
||||
],
|
||||
"sizeBytes": 220033604
|
||||
},
|
||||
{
|
||||
"names": [
|
||||
"k8s.gcr.io/kube-apiserver@sha256:f54681a71cce62cbc1b13ebb3dbf1d880f849112789811f98b6aebd2caa2f255",
|
||||
"k8s.gcr.io/kube-apiserver:v1.23.1"
|
||||
],
|
||||
"sizeBytes": 135162256
|
||||
},
|
||||
{
|
||||
"names": [
|
||||
"k8s.gcr.io/kube-controller-manager@sha256:a7ed87380108a2d811f0d392a3fe87546c85bc366e0d1e024dfa74eb14468604",
|
||||
"k8s.gcr.io/kube-controller-manager:v1.23.1"
|
||||
],
|
||||
"sizeBytes": 124971684
|
||||
},
|
||||
{
|
||||
"names": [
|
||||
"k8s.gcr.io/kube-proxy@sha256:e40f3a28721588affcf187f3f246d1e078157dabe274003eaa2957a83f7170c8",
|
||||
"k8s.gcr.io/kube-proxy:v1.23.1"
|
||||
],
|
||||
"sizeBytes": 112327826
|
||||
},
|
||||
{
|
||||
"names": [
|
||||
"quay.io/armosec/kubescape@sha256:6196f766be50d94b45d903a911f5ee95ac99bc392a1324c3e063bec41efd98ba",
|
||||
"quay.io/armosec/kubescape:v2.0.153"
|
||||
],
|
||||
"sizeBytes": 110345054
|
||||
},
|
||||
{
|
||||
"names": [
|
||||
"nginx@sha256:f7988fb6c02e0ce69257d9bd9cf37ae20a60f1df7563c3a2a6abe24160306b8d"
|
||||
],
|
||||
"sizeBytes": 109129446
|
||||
},
|
||||
{
|
||||
"names": [
|
||||
"quay.io/armosec/action-trigger@sha256:b93707d10ff86aac8dfa42ad37192d6bcf9aceeb4321b21756e438389c26e07c",
|
||||
"quay.io/armosec/action-trigger:v0.0.5"
|
||||
],
|
||||
"sizeBytes": 65127067
|
||||
},
|
||||
{
|
||||
"names": [
|
||||
"quay.io/armosec/images-vulnerabilities-scan@sha256:a5f9ddc04a7fdce6d52ef85a21f0de567d8e04d418c2bc5bf5d72b151c997625",
|
||||
"quay.io/armosec/images-vulnerabilities-scan:v0.0.7"
|
||||
],
|
||||
"sizeBytes": 61446712
|
||||
},
|
||||
{
|
||||
"names": [
|
||||
"quay.io/armosec/images-vulnerabilities-scan@sha256:2f879858da89f6542e3223fb18d6d793810cc2ad6e398b66776475e4218b6af5",
|
||||
"quay.io/armosec/images-vulnerabilities-scan:v0.0.8"
|
||||
],
|
||||
"sizeBytes": 61446528
|
||||
},
|
||||
{
|
||||
"names": [
|
||||
"quay.io/armosec/cluster-collector@sha256:2c4f733d09f7f4090ace04585230bdfacbbc29a3ade38a2e1233d2c0f730d9b6",
|
||||
"quay.io/armosec/cluster-collector:v0.0.9"
|
||||
],
|
||||
"sizeBytes": 53699576
|
||||
},
|
||||
{
|
||||
"names": [
|
||||
"k8s.gcr.io/kube-scheduler@sha256:8be4eb1593cf9ff2d91b44596633b7815a3753696031a1eb4273d1b39427fa8c",
|
||||
"k8s.gcr.io/kube-scheduler:v1.23.1"
|
||||
],
|
||||
"sizeBytes": 53488305
|
||||
},
|
||||
{
|
||||
"names": [
|
||||
"k8s.gcr.io/coredns/coredns@sha256:5b6ec0d6de9baaf3e92d0f66cd96a25b9edbce8716f5f15dcd1a616b3abd590e",
|
||||
"k8s.gcr.io/coredns/coredns:v1.8.6"
|
||||
],
|
||||
"sizeBytes": 46829283
|
||||
},
|
||||
{
|
||||
"names": [
|
||||
"kubernetesui/metrics-scraper@sha256:36d5b3f60e1a144cc5ada820910535074bdf5cf73fb70d1ff1681537eef4e172",
|
||||
"kubernetesui/metrics-scraper:v1.0.7"
|
||||
],
|
||||
"sizeBytes": 34446077
|
||||
},
|
||||
{
|
||||
"names": [
|
||||
"gcr.io/k8s-minikube/storage-provisioner@sha256:18eb69d1418e854ad5a19e399310e52808a8321e4c441c1dddad8977a0d7a944",
|
||||
"gcr.io/k8s-minikube/storage-provisioner:v5"
|
||||
],
|
||||
"sizeBytes": 31465472
|
||||
},
|
||||
{
|
||||
"names": [
|
||||
"quay.io/armosec/notification-server@sha256:b6e9b296cd53bd3b2b42c516d8ab43db998acff1124a57aff8d66b3dd7881979",
|
||||
"quay.io/armosec/notification-server:v0.0.3"
|
||||
],
|
||||
"sizeBytes": 20209940
|
||||
},
|
||||
{
|
||||
"names": [
|
||||
"quay.io/armosec/kube-host-sensor@sha256:82139d2561039726be060df2878ef023c59df7c536fbd7f6d766af5a99569fee",
|
||||
"quay.io/armosec/kube-host-sensor:latest"
|
||||
],
|
||||
"sizeBytes": 11796788
|
||||
},
|
||||
{
|
||||
"names": [
|
||||
"k8s.gcr.io/pause@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db",
|
||||
"k8s.gcr.io/pause:3.6"
|
||||
],
|
||||
"sizeBytes": 682696
|
||||
}
|
||||
],
|
||||
"nodeInfo": {
|
||||
"architecture": "amd64",
|
||||
"bootID": "828cbe73-120b-43cf-aae0-9e2d15b8c873",
|
||||
"containerRuntimeVersion": "docker://20.10.12",
|
||||
"kernelVersion": "5.13.0-40-generic",
|
||||
"kubeProxyVersion": "v1.23.1",
|
||||
"kubeletVersion": "v1.23.1",
|
||||
"machineID": "8de776e053e140d6a14c2d2def3d6bb8",
|
||||
"operatingSystem": "linux",
|
||||
"osImage": "Ubuntu 20.04.2 LTS",
|
||||
"systemUUID": "da12dc19-10bf-4033-a440-2d9aa33d6fe3"
|
||||
}
|
||||
}
|
||||
}
|
||||
`
|
||||
_ = json.Unmarshal([]byte(taintNode), &l)
|
||||
assert.True(t, isMasterNodeTaints(l.Spec.Taints))
|
||||
}
|
||||
@@ -3,8 +3,7 @@ package resourcehandler
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/armosec/kubescape/core/cautils"
|
||||
"github.com/armosec/kubescape/core/pkg/hostsensorutils"
|
||||
"github.com/armosec/kubescape/v2/core/cautils"
|
||||
"github.com/armosec/opa-utils/reporthandling"
|
||||
"k8s.io/utils/strings/slices"
|
||||
|
||||
@@ -12,12 +11,47 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
ClusterDescribe = "ClusterDescribe"
|
||||
ClusterDescribe = "ClusterDescribe"
|
||||
KubeletConfiguration = "KubeletConfiguration"
|
||||
OsReleaseFile = "OsReleaseFile"
|
||||
KernelVersion = "KernelVersion"
|
||||
LinuxSecurityHardeningStatus = "LinuxSecurityHardeningStatus"
|
||||
OpenPortsList = "OpenPortsList"
|
||||
LinuxKernelVariables = "LinuxKernelVariables"
|
||||
KubeletCommandLine = "KubeletCommandLine"
|
||||
ImageVulnerabilities = "ImageVulnerabilities"
|
||||
KubeletInfo = "KubeletInfo"
|
||||
KubeProxyInfo = "KubeProxyInfo"
|
||||
|
||||
MapResourceToApiGroup = map[string]string{
|
||||
KubeletConfiguration: "hostdata.kubescape.cloud/v1beta0",
|
||||
OsReleaseFile: "hostdata.kubescape.cloud/v1beta0",
|
||||
KubeletCommandLine: "hostdata.kubescape.cloud/v1beta0",
|
||||
KernelVersion: "hostdata.kubescape.cloud/v1beta0",
|
||||
LinuxSecurityHardeningStatus: "hostdata.kubescape.cloud/v1beta0",
|
||||
OpenPortsList: "hostdata.kubescape.cloud/v1beta0",
|
||||
LinuxKernelVariables: "hostdata.kubescape.cloud/v1beta0",
|
||||
KubeletInfo: "hostdata.kubescape.cloud/v1beta0",
|
||||
KubeProxyInfo: "hostdata.kubescape.cloud/v1beta0",
|
||||
}
|
||||
MapResourceToApiGroupVuln = map[string][]string{
|
||||
ImageVulnerabilities: {"armo.vuln.images/v1", "image.vulnscan.com/v1"}}
|
||||
MapResourceToApiGroupCloud = map[string][]string{
|
||||
ClusterDescribe: {"container.googleapis.com/v1", "eks.amazonaws.com/v1"}}
|
||||
ClusterDescribe: {"container.googleapis.com/v1", "eks.amazonaws.com/v1", "management.azure.com/v1"}}
|
||||
)
|
||||
|
||||
func isEmptyImgVulns(armoResourcesMap cautils.ArmoResources) bool {
|
||||
imgVulnResources := cautils.MapImageVulnResources(&armoResourcesMap)
|
||||
for _, resource := range imgVulnResources {
|
||||
if val, ok := armoResourcesMap[resource]; ok {
|
||||
if len(val) > 0 {
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func setK8sResourceMap(frameworks []reporthandling.Framework) *cautils.K8SResources {
|
||||
k8sResources := make(cautils.K8SResources)
|
||||
complexMap := setComplexK8sResourceMap(frameworks)
|
||||
@@ -50,19 +84,6 @@ func setArmoResourceMap(frameworks []reporthandling.Framework, resourceToControl
|
||||
return &armoResources
|
||||
}
|
||||
|
||||
func convertComplexResourceMap(frameworks []reporthandling.Framework) map[string]map[string]map[string]interface{} {
|
||||
k8sResources := make(map[string]map[string]map[string]interface{})
|
||||
for _, framework := range frameworks {
|
||||
for _, control := range framework.Controls {
|
||||
for _, rule := range control.Rules {
|
||||
for _, match := range rule.Match {
|
||||
insertResources(k8sResources, match)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return k8sResources
|
||||
}
|
||||
func setComplexK8sResourceMap(frameworks []reporthandling.Framework) map[string]map[string]map[string]interface{} {
|
||||
k8sResources := make(map[string]map[string]map[string]interface{})
|
||||
for _, framework := range frameworks {
|
||||
@@ -93,10 +114,16 @@ func setComplexArmoResourceMap(frameworks []reporthandling.Framework, resourceTo
|
||||
}
|
||||
|
||||
func mapArmoResourceToApiGroup(resource string) []string {
|
||||
if val, ok := hostsensorutils.MapResourceToApiGroup[resource]; ok {
|
||||
if val, ok := MapResourceToApiGroup[resource]; ok {
|
||||
return []string{val}
|
||||
}
|
||||
return MapResourceToApiGroupCloud[resource]
|
||||
if val, ok := MapResourceToApiGroupCloud[resource]; ok {
|
||||
return val
|
||||
}
|
||||
if val, ok := MapResourceToApiGroupVuln[resource]; ok {
|
||||
return val
|
||||
}
|
||||
return []string{}
|
||||
}
|
||||
|
||||
func insertControls(resource string, resourceToControl map[string][]string, control reporthandling.Control) {
|
||||
|
||||
@@ -2,7 +2,9 @@ package resourcehandler
|
||||
|
||||
import (
|
||||
"github.com/armosec/k8s-interface/k8sinterface"
|
||||
"github.com/armosec/kubescape/v2/core/cautils"
|
||||
"github.com/armosec/opa-utils/reporthandling"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"testing"
|
||||
)
|
||||
@@ -24,6 +26,18 @@ func TestSetResourceMap(t *testing.T) {
|
||||
}
|
||||
|
||||
}
|
||||
func TestSsEmptyImgVulns(t *testing.T) {
|
||||
armoResourcesMap := make(cautils.ArmoResources, 0)
|
||||
armoResourcesMap["container.googleapis.com/v1"] = []string{"fsdfds"}
|
||||
assert.Equal(t, true, isEmptyImgVulns(armoResourcesMap))
|
||||
|
||||
armoResourcesMap["armo.vuln.images/v1/ImageVulnerabilities"] = []string{"dada"}
|
||||
assert.Equal(t, false, isEmptyImgVulns(armoResourcesMap))
|
||||
|
||||
armoResourcesMap["armo.vuln.images/v1/ImageVulnerabilities"] = []string{}
|
||||
armoResourcesMap["bla"] = []string{"blu"}
|
||||
assert.Equal(t, true, isEmptyImgVulns(armoResourcesMap))
|
||||
}
|
||||
|
||||
func TestInsertK8sResources(t *testing.T) {
|
||||
// insertK8sResources
|
||||
|
||||
@@ -1,14 +1,15 @@
|
||||
package resourcehandler
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/armosec/k8s-interface/k8sinterface"
|
||||
"github.com/armosec/k8s-interface/workloadinterface"
|
||||
"github.com/armosec/kubescape/core/cautils"
|
||||
"github.com/armosec/kubescape/core/cautils/getter"
|
||||
"github.com/armosec/kubescape/core/cautils/logger"
|
||||
"github.com/armosec/kubescape/core/cautils/logger/helpers"
|
||||
armosecadaptorv1 "github.com/armosec/kubescape/core/pkg/registryadaptors/armosec/v1"
|
||||
"github.com/armosec/kubescape/core/pkg/registryadaptors/registryvulnerabilities"
|
||||
"github.com/armosec/kubescape/v2/core/cautils"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/getter"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger"
|
||||
armosecadaptorv1 "github.com/armosec/kubescape/v2/core/pkg/registryadaptors/armosec/v1"
|
||||
"github.com/armosec/kubescape/v2/core/pkg/registryadaptors/registryvulnerabilities"
|
||||
|
||||
"github.com/armosec/opa-utils/shared"
|
||||
)
|
||||
@@ -45,8 +46,9 @@ func (registryAdaptors *RegistryAdaptors) collectImagesVulnerabilities(k8sResour
|
||||
for i := range registryAdaptors.adaptors { // login and and get vulnerabilities
|
||||
|
||||
if err := registryAdaptors.adaptors[i].Login(); err != nil {
|
||||
logger.L().Error("failed to login", helpers.Error(err))
|
||||
continue
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to login, adaptor: '%s', reason: '%s'", registryAdaptors.adaptors[i].DescribeAdaptor(), err.Error())
|
||||
}
|
||||
}
|
||||
vulnerabilities, err := registryAdaptors.adaptors[i].GetImagesVulnerabilities(imagesIdentifiers)
|
||||
if err != nil {
|
||||
|
||||
36
core/pkg/resourcehandler/remotegitutils.go
Normal file
36
core/pkg/resourcehandler/remotegitutils.go
Normal file
@@ -0,0 +1,36 @@
|
||||
package resourcehandler
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
giturl "github.com/armosec/go-git-url"
|
||||
"github.com/go-git/go-git/v5"
|
||||
"github.com/go-git/go-git/v5/plumbing"
|
||||
)
|
||||
|
||||
// cloneRepo clones a repository to a local temporary directory and returns the directory
|
||||
func cloneRepo(gitURL giturl.IGitURL) (string, error) {
|
||||
|
||||
// Create temp directory
|
||||
tmpDir, err := os.MkdirTemp("", "")
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to create temporary directory: %w", err)
|
||||
}
|
||||
|
||||
// Clone option
|
||||
cloneURL := gitURL.GetHttpCloneURL()
|
||||
cloneOpts := git.CloneOptions{URL: cloneURL}
|
||||
if gitURL.GetBranchName() != "" {
|
||||
cloneOpts.ReferenceName = plumbing.NewBranchReferenceName(gitURL.GetBranchName())
|
||||
cloneOpts.SingleBranch = true
|
||||
}
|
||||
|
||||
// Actual clone
|
||||
_, err = git.PlainClone(tmpDir, false, &cloneOpts)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to clone %s. %w", gitURL.GetRepoName(), err)
|
||||
}
|
||||
|
||||
return tmpDir, nil
|
||||
}
|
||||
@@ -8,7 +8,7 @@ import (
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/armosec/kubescape/core/cautils/getter"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/getter"
|
||||
giturls "github.com/whilp/git-urls"
|
||||
"k8s.io/utils/strings/slices"
|
||||
)
|
||||
|
||||
@@ -3,7 +3,7 @@ package resourcehandler
|
||||
import (
|
||||
"github.com/armosec/armoapi-go/armotypes"
|
||||
"github.com/armosec/k8s-interface/workloadinterface"
|
||||
"github.com/armosec/kubescape/core/cautils"
|
||||
"github.com/armosec/kubescape/v2/core/cautils"
|
||||
"k8s.io/apimachinery/pkg/version"
|
||||
)
|
||||
|
||||
|
||||
@@ -1,82 +1,48 @@
|
||||
package resourcehandler
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
giturl "github.com/armosec/go-git-url"
|
||||
"github.com/armosec/k8s-interface/workloadinterface"
|
||||
"github.com/armosec/kubescape/core/cautils"
|
||||
"github.com/armosec/kubescape/core/cautils/logger"
|
||||
"github.com/armosec/kubescape/v2/core/cautils"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger/helpers"
|
||||
)
|
||||
|
||||
func loadResourcesFromUrl(inputPatterns []string) (map[string][]workloadinterface.IMetadata, error) {
|
||||
urls := listUrls(inputPatterns)
|
||||
if len(urls) == 0 {
|
||||
if len(inputPatterns) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
g, err := giturl.NewGitAPI(inputPatterns[0])
|
||||
if err != nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
workloads, errs := downloadFiles(urls)
|
||||
files, errs := g.DownloadFilesWithExtension(append(cautils.YAML_PREFIX, cautils.JSON_PREFIX...))
|
||||
if len(errs) > 0 {
|
||||
logger.L().Error(fmt.Sprintf("%v", errs))
|
||||
}
|
||||
return workloads, nil
|
||||
}
|
||||
|
||||
func listUrls(patterns []string) []string {
|
||||
urls := []string{}
|
||||
for i := range patterns {
|
||||
if strings.HasPrefix(patterns[i], "http") {
|
||||
if yamls, err := ScanRepository(patterns[i], ""); err == nil { // TODO - support branch
|
||||
urls = append(urls, yamls...)
|
||||
} else {
|
||||
logger.L().Error(err.Error())
|
||||
}
|
||||
for i, j := range errs {
|
||||
logger.L().Error(i, helpers.Error(j))
|
||||
}
|
||||
}
|
||||
|
||||
return urls
|
||||
}
|
||||
if len(files) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func downloadFiles(urls []string) (map[string][]workloadinterface.IMetadata, []error) {
|
||||
// convert files to IMetadata
|
||||
workloads := make(map[string][]workloadinterface.IMetadata, 0)
|
||||
errs := []error{}
|
||||
for i := range urls {
|
||||
f, err := downloadFile(urls[i])
|
||||
if err != nil {
|
||||
errs = append(errs, err)
|
||||
|
||||
for i, j := range files {
|
||||
w, e := cautils.ReadFile(j, cautils.GetFileFormat(i))
|
||||
if e != nil || len(w) == 0 {
|
||||
continue
|
||||
}
|
||||
w, e := cautils.ReadFile(f, cautils.GetFileFormat(urls[i]))
|
||||
errs = append(errs, e...)
|
||||
if w != nil {
|
||||
if _, ok := workloads[urls[i]]; !ok {
|
||||
workloads[urls[i]] = make([]workloadinterface.IMetadata, 0)
|
||||
}
|
||||
wSlice := workloads[urls[i]]
|
||||
wSlice = append(wSlice, w...)
|
||||
workloads[urls[i]] = wSlice
|
||||
if _, ok := workloads[i]; !ok {
|
||||
workloads[i] = make([]workloadinterface.IMetadata, 0)
|
||||
}
|
||||
wSlice := workloads[i]
|
||||
wSlice = append(wSlice, w...)
|
||||
workloads[i] = wSlice
|
||||
}
|
||||
return workloads, errs
|
||||
}
|
||||
|
||||
func downloadFile(url string) ([]byte, error) {
|
||||
resp, err := http.Get(url)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
if resp.StatusCode < 200 || 301 < resp.StatusCode {
|
||||
return nil, fmt.Errorf("failed to download file, url: '%s', status code: %s", url, resp.Status)
|
||||
}
|
||||
return streamToByte(resp.Body), nil
|
||||
}
|
||||
|
||||
func streamToByte(stream io.Reader) []byte {
|
||||
buf := new(bytes.Buffer)
|
||||
buf.ReadFrom(stream)
|
||||
return buf.Bytes()
|
||||
return workloads, nil
|
||||
}
|
||||
|
||||
@@ -5,8 +5,8 @@ import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/armosec/kubescape/core/cautils"
|
||||
"github.com/armosec/kubescape/core/cautils/logger"
|
||||
"github.com/armosec/kubescape/v2/core/cautils"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger"
|
||||
)
|
||||
|
||||
var INDENT = " "
|
||||
@@ -17,6 +17,7 @@ const (
|
||||
JunitResultFormat string = "junit"
|
||||
PrometheusFormat string = "prometheus"
|
||||
PdfFormat string = "pdf"
|
||||
HtmlFormat string = "html"
|
||||
)
|
||||
|
||||
type IPrinter interface {
|
||||
|
||||
@@ -5,9 +5,9 @@ import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/armosec/kubescape/core/cautils"
|
||||
"github.com/armosec/kubescape/core/cautils/logger"
|
||||
"github.com/armosec/kubescape/core/pkg/resultshandling/printer"
|
||||
"github.com/armosec/kubescape/v2/core/cautils"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger"
|
||||
"github.com/armosec/kubescape/v2/core/pkg/resultshandling/printer"
|
||||
)
|
||||
|
||||
type JsonPrinter struct {
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user