mirror of
https://github.com/kubescape/kubescape.git
synced 2026-02-14 18:09:55 +00:00
Compare commits
165 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
fc78d9143b | ||
|
|
034dbca30c | ||
|
|
a41adc6c9e | ||
|
|
bd170938c5 | ||
|
|
e91a73a32e | ||
|
|
099886e1bb | ||
|
|
c05dc8d7ae | ||
|
|
3cebfb3065 | ||
|
|
ed5abd5791 | ||
|
|
898b847211 | ||
|
|
889dd15772 | ||
|
|
d46d77411b | ||
|
|
ee4f4d8af1 | ||
|
|
ea1426a24b | ||
|
|
bd78e4c4de | ||
|
|
e3f70b6cd6 | ||
|
|
a5007df1bc | ||
|
|
d6720b67ed | ||
|
|
2261fd6adb | ||
|
|
9334ad6991 | ||
|
|
7b5e4143c3 | ||
|
|
e63e5502cd | ||
|
|
154794e774 | ||
|
|
4aa71725dd | ||
|
|
9bd2e7fea4 | ||
|
|
e6d3e7d7da | ||
|
|
35bb15b5df | ||
|
|
e54d61fd87 | ||
|
|
f28b2836c7 | ||
|
|
d196f1f327 | ||
|
|
995f90ca53 | ||
|
|
c1da380c9b | ||
|
|
77f77b8c7d | ||
|
|
b3c1aec461 | ||
|
|
ef242b52bb | ||
|
|
af1d5694dc | ||
|
|
a5ac47ff6d | ||
|
|
b03a4974c4 | ||
|
|
8385cd0bd7 | ||
|
|
ca67aa7f5f | ||
|
|
9b9ed514c8 | ||
|
|
11b7f6ab2f | ||
|
|
021f2074b8 | ||
|
|
c1ba2d4b3c | ||
|
|
141ad17ece | ||
|
|
74c81e2270 | ||
|
|
7bb124b6fe | ||
|
|
8a8ff10b19 | ||
|
|
1eef32dd8e | ||
|
|
d7b5dd416d | ||
|
|
536a94de45 | ||
|
|
d8ef471eb2 | ||
|
|
ff07a80078 | ||
|
|
310d31a3b1 | ||
|
|
8a1ef7da87 | ||
|
|
c142779ee8 | ||
|
|
640f366c7e | ||
|
|
9f36c1d6de | ||
|
|
b3c8c078a8 | ||
|
|
3ff2b0d6ff | ||
|
|
35b2b350a0 | ||
|
|
046ea1d79f | ||
|
|
3081508863 | ||
|
|
4a757c1bf1 | ||
|
|
dec4bcca00 | ||
|
|
5443039b8c | ||
|
|
95e68f49f3 | ||
|
|
7e90956b50 | ||
|
|
0c84c8f1f3 | ||
|
|
56b3239e30 | ||
|
|
f8e85941da | ||
|
|
15081aa9c3 | ||
|
|
ac03a2bda3 | ||
|
|
b7ffa22f3a | ||
|
|
ac5e7069da | ||
|
|
5a83f38bca | ||
|
|
3abd59e290 | ||
|
|
d08fdf2e9e | ||
|
|
bad2f54e72 | ||
|
|
fc9b713851 | ||
|
|
245200840d | ||
|
|
3f87610e8c | ||
|
|
c285cb1bcc | ||
|
|
63968b564b | ||
|
|
e237c48186 | ||
|
|
622b121535 | ||
|
|
20774d4a40 | ||
|
|
7bb6bb85ec | ||
|
|
da908a84bc | ||
|
|
b515e259c0 | ||
|
|
facd551518 | ||
|
|
0fc569d9d9 | ||
|
|
da27a27ad5 | ||
|
|
5d4a20f622 | ||
|
|
70b15a373b | ||
|
|
01353f81b3 | ||
|
|
22f10b6581 | ||
|
|
785178ffb1 | ||
|
|
f9b5c58402 | ||
|
|
8ed6d63ce5 | ||
|
|
990a7c2052 | ||
|
|
09b0c09472 | ||
|
|
f83c38b58e | ||
|
|
51e600797a | ||
|
|
39d6d1fd26 | ||
|
|
2dff63b101 | ||
|
|
b928892f0a | ||
|
|
c0188ea51d | ||
|
|
fa376ed5a4 | ||
|
|
6382edeb6e | ||
|
|
61d6c2dd1f | ||
|
|
44194f0b4e | ||
|
|
7103c7d32c | ||
|
|
b4e1663cd1 | ||
|
|
47412c89ca | ||
|
|
20d65f2ed3 | ||
|
|
46a559fb1d | ||
|
|
2769b22721 | ||
|
|
60ec6e8294 | ||
|
|
63520f9aff | ||
|
|
333b55a9f2 | ||
|
|
c6d3fd1a82 | ||
|
|
8106133ed0 | ||
|
|
b36111f63e | ||
|
|
3ad0284394 | ||
|
|
245ebf8c41 | ||
|
|
8309562da1 | ||
|
|
de807a65a6 | ||
|
|
92fe583421 | ||
|
|
b7ec05e88a | ||
|
|
203e925888 | ||
|
|
fde5453bf3 | ||
|
|
4c6a65565b | ||
|
|
e60ecfb8f5 | ||
|
|
b72e2610ca | ||
|
|
8d4bae06bc | ||
|
|
847b597d0f | ||
|
|
db1743f617 | ||
|
|
7ac1b8aacf | ||
|
|
55f8cb1f0e | ||
|
|
93574736cd | ||
|
|
e43f4b1a37 | ||
|
|
4ba33578ce | ||
|
|
ae00866005 | ||
|
|
21cb4dae29 | ||
|
|
7d3ac98998 | ||
|
|
5e9d01aec2 | ||
|
|
c09eabf347 | ||
|
|
38c2aed74a | ||
|
|
cf70671dba | ||
|
|
f90ce83a74 | ||
|
|
fab594ee32 | ||
|
|
d25cefe355 | ||
|
|
747eee1d29 | ||
|
|
0c43ee9ab8 | ||
|
|
466f3acd71 | ||
|
|
80add4ef12 | ||
|
|
959319c335 | ||
|
|
0e9ca547cb | ||
|
|
6e17e5ce7e | ||
|
|
4046321297 | ||
|
|
ed45b09241 | ||
|
|
107903cc99 | ||
|
|
13fb586ded | ||
|
|
2e63982f5a |
31
.github/workflows/build.yaml
vendored
31
.github/workflows/build.yaml
vendored
@@ -28,11 +28,11 @@ jobs:
|
||||
matrix:
|
||||
os: [ubuntu-latest, macos-latest, windows-latest]
|
||||
steps:
|
||||
- uses: actions/checkout@v1
|
||||
- uses: actions/checkout@v3
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v2
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: 1.17
|
||||
go-version: 1.18
|
||||
# - name: Test cmd pkg
|
||||
# run: cd cmd && go test -v ./...
|
||||
- name: Test core pkg
|
||||
@@ -46,6 +46,7 @@ jobs:
|
||||
- name: Build
|
||||
env:
|
||||
RELEASE: v2.0.${{ github.run_number }}
|
||||
CLIENT: release
|
||||
ArmoBEServer: api.armo.cloud
|
||||
ArmoAuthServer: auth.armo.cloud
|
||||
ArmoERServer: report.armo.cloud
|
||||
@@ -101,28 +102,28 @@ jobs:
|
||||
id: image-name
|
||||
run: echo '::set-output name=IMAGE_NAME::quay.io/${{ github.repository_owner }}/kubescape'
|
||||
|
||||
- name: Build the Docker image
|
||||
run: docker build . --file build/Dockerfile --tag ${{ steps.image-name.outputs.IMAGE_NAME }}:${{ steps.image-version.outputs.IMAGE_VERSION }} --build-arg image_version=${{ steps.image-version.outputs.IMAGE_VERSION }}
|
||||
|
||||
- name: Re-Tag Image to latest
|
||||
run: docker tag ${{ steps.image-name.outputs.IMAGE_NAME }}:${{ steps.image-version.outputs.IMAGE_VERSION }} ${{ steps.image-name.outputs.IMAGE_NAME }}:latest
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v2
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
|
||||
- name: Login to Quay.io
|
||||
env: # Or as an environment variable
|
||||
env:
|
||||
QUAY_PASSWORD: ${{ secrets.QUAYIO_REGISTRY_PASSWORD }}
|
||||
QUAY_USERNAME: ${{ secrets.QUAYIO_REGISTRY_USERNAME }}
|
||||
run: docker login -u="${QUAY_USERNAME}" -p="${QUAY_PASSWORD}" quay.io
|
||||
|
||||
- name: Build the Docker image
|
||||
run: docker buildx build . --file build/Dockerfile --tag ${{ steps.image-name.outputs.IMAGE_NAME }}:${{ steps.image-version.outputs.IMAGE_VERSION }} --tag ${{ steps.image-name.outputs.IMAGE_NAME }}:latest --build-arg image_version=${{ steps.image-version.outputs.IMAGE_VERSION }} --build-arg client=image-release --push --platform linux/amd64,linux/arm64
|
||||
|
||||
# - name: Login to GitHub Container Registry
|
||||
# uses: docker/login-action@v1
|
||||
# with:
|
||||
# registry: ghcr.io
|
||||
# username: ${{ github.actor }}
|
||||
# password: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: Push Docker image
|
||||
run: |
|
||||
docker push ${{ steps.image-name.outputs.IMAGE_NAME }}:${{ steps.image-version.outputs.IMAGE_VERSION }}
|
||||
docker push ${{ steps.image-name.outputs.IMAGE_NAME }}:latest
|
||||
|
||||
|
||||
# TODO - Wait for casign to support fixed tags -> https://github.com/sigstore/cosign/issues/1424
|
||||
# - name: Install cosign
|
||||
# uses: sigstore/cosign-installer@main
|
||||
|
||||
30
.github/workflows/build_dev.yaml
vendored
30
.github/workflows/build_dev.yaml
vendored
@@ -11,12 +11,11 @@ jobs:
|
||||
matrix:
|
||||
os: [ubuntu-latest, macos-latest, windows-latest]
|
||||
steps:
|
||||
- uses: actions/checkout@v1
|
||||
|
||||
- uses: actions/checkout@v3
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v2
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: 1.17
|
||||
go-version: 1.18
|
||||
|
||||
# - name: Test cmd pkg
|
||||
# run: cd cmd && go test -v ./...
|
||||
@@ -40,6 +39,7 @@ jobs:
|
||||
- name: Build
|
||||
env:
|
||||
RELEASE: v2.0.${{ github.run_number }}
|
||||
CLIENT: release-dev
|
||||
ArmoBEServer: api.armo.cloud
|
||||
ArmoAuthServer: auth.armo.cloud
|
||||
ArmoERServer: report.armo.cloud
|
||||
@@ -80,21 +80,17 @@ jobs:
|
||||
id: image-name
|
||||
run: echo '::set-output name=IMAGE_NAME::quay.io/${{ github.repository_owner }}/kubescape'
|
||||
|
||||
- name: Build the Docker image
|
||||
run: docker build . --file build/Dockerfile --tag ${{ steps.image-name.outputs.IMAGE_NAME }}:${{ steps.image-version.outputs.IMAGE_VERSION }} --build-arg image_version=${{ steps.image-version.outputs.IMAGE_VERSION }}
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v2
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
|
||||
- name: Login to Quay.io
|
||||
env:
|
||||
QUAY_PASSWORD: ${{ secrets.QUAYIO_REGISTRY_PASSWORD }}
|
||||
QUAY_USERNAME: ${{ secrets.QUAYIO_REGISTRY_USERNAME }}
|
||||
run: docker login -u="${QUAY_USERNAME}" -p="${QUAY_PASSWORD}" quay.io
|
||||
# - name: Login to GitHub Container Registry
|
||||
# uses: docker/login-action@v1
|
||||
# with:
|
||||
# registry: ghcr.io
|
||||
# username: ${{ github.actor }}
|
||||
# password: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: Push Docker image
|
||||
run: |
|
||||
docker push ${{ steps.image-name.outputs.IMAGE_NAME }}:${{ steps.image-version.outputs.IMAGE_VERSION }}
|
||||
|
||||
|
||||
- name: Build the Docker image
|
||||
run: docker buildx build . --file build/Dockerfile --tag ${{ steps.image-name.outputs.IMAGE_NAME }}:${{ steps.image-version.outputs.IMAGE_VERSION }} --build-arg image_version=${{ steps.image-version.outputs.IMAGE_VERSION }} --build-arg client=image-dev --push --platform linux/amd64,linux/arm64
|
||||
|
||||
8
.github/workflows/master_pr_checks.yaml
vendored
8
.github/workflows/master_pr_checks.yaml
vendored
@@ -12,12 +12,11 @@ jobs:
|
||||
matrix:
|
||||
os: [ubuntu-latest, macos-latest, windows-latest]
|
||||
steps:
|
||||
- uses: actions/checkout@v1
|
||||
|
||||
- uses: actions/checkout@v3
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v2
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: 1.17
|
||||
go-version: 1.18
|
||||
|
||||
# - name: Test cmd pkg
|
||||
# run: cd cmd && go test -v ./...
|
||||
@@ -33,6 +32,7 @@ jobs:
|
||||
- name: Build
|
||||
env:
|
||||
RELEASE: v2.0.${{ github.run_number }}
|
||||
CLIENT: test
|
||||
ArmoBEServer: api.armo.cloud
|
||||
ArmoAuthServer: auth.armo.cloud
|
||||
ArmoERServer: report.armo.cloud
|
||||
|
||||
1
.gitignore
vendored
1
.gitignore
vendored
@@ -4,4 +4,5 @@
|
||||
*vender*
|
||||
*.pyc*
|
||||
.idea
|
||||
.history
|
||||
ca.srl
|
||||
72
README.md
72
README.md
@@ -28,7 +28,7 @@ Kubescape integrates natively with other DevOps tools, including Jenkins, Circle
|
||||
|
||||
# TL;DR
|
||||
## Install:
|
||||
```
|
||||
```sh
|
||||
curl -s https://raw.githubusercontent.com/armosec/kubescape/master/install.sh | /bin/bash
|
||||
```
|
||||
|
||||
@@ -36,9 +36,13 @@ curl -s https://raw.githubusercontent.com/armosec/kubescape/master/install.sh |
|
||||
|
||||
[Install on macOS](#install-on-macos)
|
||||
|
||||
[Install on NixOS or Linux/macOS via nix](#install-on-nixos-or-with-nix-community)
|
||||
|
||||
[Install using Go](#install-using-go)
|
||||
|
||||
## Run:
|
||||
```
|
||||
kubescape scan --submit --enable-host-scan --format-version v2 --verbose
|
||||
```sh
|
||||
kubescape scan --submit --enable-host-scan --verbose
|
||||
```
|
||||
|
||||
<img src="docs/summary.png">
|
||||
@@ -101,13 +105,49 @@ Set-ExecutionPolicy RemoteSigned -scope CurrentUser
|
||||
|
||||
## Install on macOS
|
||||
|
||||
1. ```
|
||||
1. ```sh
|
||||
brew tap armosec/kubescape
|
||||
```
|
||||
2. ```
|
||||
2. ```sh
|
||||
brew install kubescape
|
||||
```
|
||||
|
||||
## Install on NixOS or with nix (Community)
|
||||
|
||||
Direct issues installing `kubescape` via `nix` through the channels mentioned [here](https://nixos.wiki/wiki/Support)
|
||||
|
||||
You can use `nix` on Linux or macOS and on other platforms unofficially.
|
||||
|
||||
Try it out in an ephemeral shell: `nix-shell -p kubescape`
|
||||
|
||||
Install declarative as usual
|
||||
|
||||
NixOS:
|
||||
|
||||
```nix
|
||||
# your other config ...
|
||||
environment.systemPackages = with pkgs; [
|
||||
# your other packages ...
|
||||
kubescape
|
||||
];
|
||||
```
|
||||
|
||||
home-manager:
|
||||
|
||||
```nix
|
||||
# your other config ...
|
||||
home.packages = with pkgs; [
|
||||
# your other packages ...
|
||||
kubescape
|
||||
];
|
||||
```
|
||||
|
||||
Or to your profile (not preferred): `nix-env --install -A nixpkgs.kubescape`
|
||||
|
||||
## Install using Go
|
||||
|
||||
With a sufficient version of `go` you can install and build with `go install github.com/armosec/kubescape/v2@latest`
|
||||
|
||||
## Usage & Examples
|
||||
|
||||
### Examples
|
||||
@@ -147,14 +187,14 @@ kubescape scan --include-namespaces development,staging,production
|
||||
kubescape scan --exclude-namespaces kube-system,kube-public
|
||||
```
|
||||
|
||||
#### Scan local `yaml`/`json` files before deploying. [Take a look at the demonstration](https://youtu.be/Ox6DaR7_4ZI)
|
||||
#### Scan local `yaml`/`json` files before deploying. [Take a look at the demonstration](https://youtu.be/Ox6DaR7_4ZI) Submit the results in case the directory is a git repo. [docs](https://hub.armo.cloud/docs/repository-scanning)
|
||||
```
|
||||
kubescape scan *.yaml
|
||||
kubescape scan *.yaml --submit
|
||||
```
|
||||
|
||||
#### Scan kubernetes manifest files from a public github repository
|
||||
#### Scan kubernetes manifest files from a git repository [and submit the results](https://hub.armo.cloud/docs/repository-scanning)
|
||||
```
|
||||
kubescape scan https://github.com/armosec/kubescape
|
||||
kubescape scan https://github.com/armosec/kubescape --submit
|
||||
```
|
||||
|
||||
#### Display all scanned resources (including the resources who passed)
|
||||
@@ -193,16 +233,11 @@ kubescape scan --format prometheus
|
||||
kubescape scan --exceptions examples/exceptions/exclude-kube-namespaces.json
|
||||
```
|
||||
|
||||
#### Scan Helm charts - Render the helm chart using [`helm template`](https://helm.sh/docs/helm/helm_template/) and pass to stdout
|
||||
#### Scan Helm charts
|
||||
```
|
||||
helm template [NAME] [CHART] [flags] --dry-run | kubescape scan -
|
||||
kubescape scan </path/to/directory> --submit
|
||||
```
|
||||
|
||||
e.g.
|
||||
```
|
||||
helm template bitnami/mysql --generate-name --dry-run | kubescape scan -
|
||||
```
|
||||
|
||||
> Kubescape will load the default values file
|
||||
|
||||
### Offline/Air-gaped Environment Support
|
||||
|
||||
@@ -276,6 +311,9 @@ kubescape submit results path/to/results.json
|
||||
|
||||
Scan the YAML files while writing them using the [vs code extension](https://github.com/armosec/vscode-kubescape/blob/master/README.md)
|
||||
|
||||
## Lens Extension
|
||||
|
||||
View Kubescape scan results directly in [Lens IDE](https://k8slens.dev/) using kubescape [Lens extension](https://github.com/armosec/lens-kubescape/blob/master/README.md)
|
||||
|
||||
# Under the hood
|
||||
|
||||
|
||||
78
build.py
78
build.py
@@ -10,66 +10,68 @@ ER_SERVER_CONST = BASE_GETTER_CONST + ".ArmoERURL"
|
||||
WEBSITE_CONST = BASE_GETTER_CONST + ".ArmoFEURL"
|
||||
AUTH_SERVER_CONST = BASE_GETTER_CONST + ".armoAUTHURL"
|
||||
|
||||
def checkStatus(status, msg):
|
||||
def check_status(status, msg):
|
||||
if status != 0:
|
||||
sys.stderr.write(msg)
|
||||
exit(status)
|
||||
|
||||
|
||||
def getBuildDir():
|
||||
currentPlatform = platform.system()
|
||||
buildDir = "./build/"
|
||||
def get_build_dir():
|
||||
current_platform = platform.system()
|
||||
build_dir = "./build/"
|
||||
|
||||
if currentPlatform == "Windows": buildDir += "windows-latest"
|
||||
elif currentPlatform == "Linux": buildDir += "ubuntu-latest"
|
||||
elif currentPlatform == "Darwin": buildDir += "macos-latest"
|
||||
else: raise OSError("Platform %s is not supported!" % (currentPlatform))
|
||||
if current_platform == "Windows": build_dir += "windows-latest"
|
||||
elif current_platform == "Linux": build_dir += "ubuntu-latest"
|
||||
elif current_platform == "Darwin": build_dir += "macos-latest"
|
||||
else: raise OSError("Platform %s is not supported!" % (current_platform))
|
||||
|
||||
return buildDir
|
||||
return build_dir
|
||||
|
||||
def getPackageName():
|
||||
packageName = "kubescape"
|
||||
# if platform.system() == "Windows": packageName += ".exe"
|
||||
def get_package_name():
|
||||
package_name = "kubescape"
|
||||
# if platform.system() == "Windows": package_name += ".exe"
|
||||
|
||||
return packageName
|
||||
return package_name
|
||||
|
||||
|
||||
def main():
|
||||
print("Building Kubescape")
|
||||
|
||||
# print environment variables
|
||||
# print(os.environ)
|
||||
|
||||
# Set some variables
|
||||
packageName = getPackageName()
|
||||
buildUrl = "github.com/armosec/kubescape/v2/core/cautils.BuildNumber"
|
||||
releaseVersion = os.getenv("RELEASE")
|
||||
ArmoBEServer = os.getenv("ArmoBEServer")
|
||||
ArmoERServer = os.getenv("ArmoERServer")
|
||||
ArmoWebsite = os.getenv("ArmoWebsite")
|
||||
ArmoAuthServer = os.getenv("ArmoAuthServer")
|
||||
package_name = get_package_name()
|
||||
build_url = "github.com/armosec/kubescape/v2/core/cautils.BuildNumber"
|
||||
release_version = os.getenv("RELEASE")
|
||||
armo_be_server = os.getenv("ArmoBEServer")
|
||||
armo_er_server = os.getenv("ArmoERServer")
|
||||
armo_website = os.getenv("ArmoWebsite")
|
||||
armo_auth_server = os.getenv("ArmoAuthServer")
|
||||
|
||||
client_var = "github.com/armosec/kubescape/v2/core/cautils.Client"
|
||||
client_name = os.getenv("CLIENT")
|
||||
|
||||
# Create build directory
|
||||
buildDir = getBuildDir()
|
||||
build_dir = get_build_dir()
|
||||
|
||||
ks_file = os.path.join(buildDir, packageName)
|
||||
ks_file = os.path.join(build_dir, package_name)
|
||||
hash_file = ks_file + ".sha256"
|
||||
|
||||
if not os.path.isdir(buildDir):
|
||||
os.makedirs(buildDir)
|
||||
if not os.path.isdir(build_dir):
|
||||
os.makedirs(build_dir)
|
||||
|
||||
# Build kubescape
|
||||
ldflags = "-w -s"
|
||||
if releaseVersion:
|
||||
ldflags += " -X {}={}".format(buildUrl, releaseVersion)
|
||||
if ArmoBEServer:
|
||||
ldflags += " -X {}={}".format(BE_SERVER_CONST, ArmoBEServer)
|
||||
if ArmoERServer:
|
||||
ldflags += " -X {}={}".format(ER_SERVER_CONST, ArmoERServer)
|
||||
if ArmoWebsite:
|
||||
ldflags += " -X {}={}".format(WEBSITE_CONST, ArmoWebsite)
|
||||
if ArmoAuthServer:
|
||||
ldflags += " -X {}={}".format(AUTH_SERVER_CONST, ArmoAuthServer)
|
||||
if release_version:
|
||||
ldflags += " -X {}={}".format(build_url, release_version)
|
||||
if client_name:
|
||||
ldflags += " -X {}={}".format(client_var, client_name)
|
||||
if armo_be_server:
|
||||
ldflags += " -X {}={}".format(BE_SERVER_CONST, armo_be_server)
|
||||
if armo_er_server:
|
||||
ldflags += " -X {}={}".format(ER_SERVER_CONST, armo_er_server)
|
||||
if armo_website:
|
||||
ldflags += " -X {}={}".format(WEBSITE_CONST, armo_website)
|
||||
if armo_auth_server:
|
||||
ldflags += " -X {}={}".format(AUTH_SERVER_CONST, armo_auth_server)
|
||||
|
||||
build_command = ["go", "build", "-o", ks_file, "-ldflags" ,ldflags]
|
||||
|
||||
@@ -77,7 +79,7 @@ def main():
|
||||
print("Build command: {}".format(" ".join(build_command)))
|
||||
|
||||
status = subprocess.call(build_command)
|
||||
checkStatus(status, "Failed to build kubescape")
|
||||
check_status(status, "Failed to build kubescape")
|
||||
|
||||
sha256 = hashlib.sha256()
|
||||
with open(ks_file, "rb") as kube:
|
||||
|
||||
@@ -1,9 +1,10 @@
|
||||
FROM golang:1.17-alpine as builder
|
||||
#ENV GOPROXY=https://goproxy.io,direct
|
||||
FROM golang:1.18-alpine as builder
|
||||
|
||||
ARG image_version
|
||||
ARG client
|
||||
|
||||
ENV RELEASE=$image_version
|
||||
ENV CLIENT=$client
|
||||
|
||||
ENV GO111MODULE=
|
||||
|
||||
@@ -31,14 +32,17 @@ RUN /work/build/ubuntu-latest/kubescape download artifacts -o /work/artifacts
|
||||
|
||||
FROM alpine
|
||||
|
||||
RUN addgroup -S ks && adduser -S ks -G ks
|
||||
USER ks
|
||||
WORKDIR /home/ks/
|
||||
RUN addgroup -S armo && adduser -S armo -G armo
|
||||
|
||||
RUN mkdir /home/armo/.kubescape
|
||||
COPY --from=builder /work/artifacts/ /home/armo/.kubescape
|
||||
|
||||
RUN chown -R armo:armo /home/armo/.kubescape
|
||||
|
||||
USER armo
|
||||
WORKDIR /home/armo
|
||||
|
||||
COPY --from=builder /work/httphandler/build/ubuntu-latest/kubescape /usr/bin/ksserver
|
||||
COPY --from=builder /work/build/ubuntu-latest/kubescape /usr/bin/kubescape
|
||||
|
||||
RUN mkdir /home/ks/.kubescape && chmod 777 -R /home/ks/.kubescape
|
||||
COPY --from=builder /work/artifacts/ /home/ks/.kubescape
|
||||
|
||||
ENTRYPOINT ["ksserver"]
|
||||
|
||||
@@ -24,7 +24,9 @@ func GetDeleteCmd(ks meta.IKubescape) *cobra.Command {
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
},
|
||||
}
|
||||
deleteCmd.PersistentFlags().StringVarP(&deleteInfo.Account, "account", "", "", "Armo portal account ID. Default will load account ID from configMap or config file")
|
||||
deleteCmd.PersistentFlags().StringVarP(&deleteInfo.Credentials.Account, "account", "", "", "Kubescape SaaS account ID. Default will load account ID from cache")
|
||||
deleteCmd.PersistentFlags().StringVarP(&deleteInfo.Credentials.ClientID, "client-id", "", "", "Kubescape SaaS client ID. Default will load client ID from cache, read more - https://hub.armo.cloud/docs/authentication")
|
||||
deleteCmd.PersistentFlags().StringVarP(&deleteInfo.Credentials.SecretKey, "secret-key", "", "", "Kubescape SaaS secret key. Default will load secret key from cache, read more - https://hub.armo.cloud/docs/authentication")
|
||||
|
||||
deleteCmd.AddCommand(getExceptionsCmd(ks, &deleteInfo))
|
||||
|
||||
|
||||
@@ -26,7 +26,7 @@ func getExceptionsCmd(ks meta.IKubescape, deleteInfo *v1.Delete) *cobra.Command
|
||||
if len(exceptionsNames) == 0 {
|
||||
logger.L().Fatal("missing exceptions names")
|
||||
}
|
||||
if err := ks.DeleteExceptions(&v1.DeleteExceptions{Account: deleteInfo.Account, Exceptions: exceptionsNames}); err != nil {
|
||||
if err := ks.DeleteExceptions(&v1.DeleteExceptions{Credentials: deleteInfo.Credentials, Exceptions: exceptionsNames}); err != nil {
|
||||
logger.L().Fatal(err.Error())
|
||||
}
|
||||
},
|
||||
|
||||
@@ -72,7 +72,10 @@ func GeDownloadCmd(ks meta.IKubescape) *cobra.Command {
|
||||
return nil
|
||||
},
|
||||
}
|
||||
downloadCmd.PersistentFlags().StringVarP(&downloadInfo.Account, "account", "", "", "Armo portal account ID. Default will load account ID from configMap or config file")
|
||||
|
||||
downloadCmd.PersistentFlags().StringVarP(&downloadInfo.Credentials.Account, "account", "", "", "Kubescape SaaS account ID. Default will load account ID from cache")
|
||||
downloadCmd.PersistentFlags().StringVarP(&downloadInfo.Credentials.ClientID, "client-id", "", "", "Kubescape SaaS client ID. Default will load client ID from cache, read more - https://hub.armo.cloud/docs/authentication")
|
||||
downloadCmd.PersistentFlags().StringVarP(&downloadInfo.Credentials.SecretKey, "secret-key", "", "", "Kubescape SaaS secret key. Default will load secret key from cache, read more - https://hub.armo.cloud/docs/authentication")
|
||||
downloadCmd.Flags().StringVarP(&downloadInfo.Path, "output", "o", "", "Output file. If not specified, will save in `~/.kubescape/<policy name>.json`")
|
||||
|
||||
return downloadCmd
|
||||
|
||||
@@ -59,7 +59,9 @@ func GetListCmd(ks meta.IKubescape) *cobra.Command {
|
||||
return nil
|
||||
},
|
||||
}
|
||||
listCmd.PersistentFlags().StringVar(&listPolicies.Account, "account", "", "Armo portal account ID. Default will load account ID from configMap or config file")
|
||||
listCmd.PersistentFlags().StringVarP(&listPolicies.Credentials.Account, "account", "", "", "Kubescape SaaS account ID. Default will load account ID from cache")
|
||||
listCmd.PersistentFlags().StringVarP(&listPolicies.Credentials.ClientID, "client-id", "", "", "Kubescape SaaS client ID. Default will load client ID from cache, read more - https://hub.armo.cloud/docs/authentication")
|
||||
listCmd.PersistentFlags().StringVarP(&listPolicies.Credentials.SecretKey, "secret-key", "", "", "Kubescape SaaS secret key. Default will load secret key from cache, read more - https://hub.armo.cloud/docs/authentication")
|
||||
listCmd.PersistentFlags().StringVar(&listPolicies.Format, "format", "pretty-print", "output format. supported: 'pretty-printer'/'json'")
|
||||
listCmd.PersistentFlags().BoolVarP(&listPolicies.ListIDs, "id", "", false, "List control ID's instead of controls names")
|
||||
|
||||
|
||||
15
cmd/root.go
15
cmd/root.go
@@ -40,7 +40,6 @@ var ksExamples = `
|
||||
|
||||
func NewDefaultKubescapeCommand() *cobra.Command {
|
||||
ks := core.NewKubescape()
|
||||
|
||||
return getRootCmd(ks)
|
||||
}
|
||||
|
||||
@@ -49,8 +48,7 @@ func getRootCmd(ks meta.IKubescape) *cobra.Command {
|
||||
rootCmd := &cobra.Command{
|
||||
Use: "kubescape",
|
||||
Version: cautils.BuildNumber,
|
||||
Short: "Kubescape is a tool for testing Kubernetes security posture",
|
||||
Long: `Based on NSA \ MITRE ATT&CK® and other frameworks specifications`,
|
||||
Short: "Kubescape is a tool for testing Kubernetes security posture. Docs: https://hub.armo.cloud/docs",
|
||||
Example: ksExamples,
|
||||
}
|
||||
|
||||
@@ -82,10 +80,7 @@ func getRootCmd(ks meta.IKubescape) *cobra.Command {
|
||||
return rootCmd
|
||||
}
|
||||
|
||||
// func main() {
|
||||
// ks := NewDefaultKubescapeCommand()
|
||||
// err := ks.Execute()
|
||||
// if err != nil {
|
||||
// logger.L().Fatal(err.Error())
|
||||
// }
|
||||
// }
|
||||
func Execute() error {
|
||||
ks := NewDefaultKubescapeCommand()
|
||||
return ks.Execute()
|
||||
}
|
||||
|
||||
@@ -6,11 +6,12 @@ import (
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
apisv1 "github.com/armosec/opa-utils/httpserver/apis/v1"
|
||||
|
||||
"github.com/armosec/kubescape/v2/core/cautils"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger/helpers"
|
||||
"github.com/armosec/kubescape/v2/core/meta"
|
||||
"github.com/armosec/opa-utils/reporthandling"
|
||||
"github.com/enescakir/emoji"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
@@ -57,14 +58,14 @@ func getControlCmd(ks meta.IKubescape, scanInfo *cautils.ScanInfo) *cobra.Comman
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
|
||||
// flagValidationControl(scanInfo)
|
||||
scanInfo.PolicyIdentifier = []reporthandling.PolicyIdentifier{}
|
||||
scanInfo.PolicyIdentifier = []cautils.PolicyIdentifier{}
|
||||
|
||||
if len(args) == 0 {
|
||||
scanInfo.ScanAll = true
|
||||
} else { // expected control or list of control sepparated by ","
|
||||
|
||||
// Read controls from input args
|
||||
scanInfo.SetPolicyIdentifiers(strings.Split(args[0], ","), reporthandling.KindControl)
|
||||
scanInfo.SetPolicyIdentifiers(strings.Split(args[0], ","), apisv1.KindControl)
|
||||
|
||||
if len(args) > 1 {
|
||||
if len(args[1:]) == 0 || args[1] != "-" {
|
||||
|
||||
@@ -6,11 +6,12 @@ import (
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
apisv1 "github.com/armosec/opa-utils/httpserver/apis/v1"
|
||||
|
||||
"github.com/armosec/kubescape/v2/core/cautils"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger/helpers"
|
||||
"github.com/armosec/kubescape/v2/core/meta"
|
||||
"github.com/armosec/opa-utils/reporthandling"
|
||||
"github.com/enescakir/emoji"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
@@ -95,7 +96,7 @@ func getFrameworkCmd(ks meta.IKubescape, scanInfo *cautils.ScanInfo) *cobra.Comm
|
||||
}
|
||||
scanInfo.FrameworkScan = true
|
||||
|
||||
scanInfo.SetPolicyIdentifiers(frameworks, reporthandling.KindFramework)
|
||||
scanInfo.SetPolicyIdentifiers(frameworks, apisv1.KindFramework)
|
||||
|
||||
results, err := ks.Scan(scanInfo)
|
||||
if err != nil {
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
package scan
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/armosec/k8s-interface/k8sinterface"
|
||||
"github.com/armosec/kubescape/v2/core/cautils"
|
||||
"github.com/armosec/kubescape/v2/core/meta"
|
||||
@@ -61,18 +63,21 @@ func GetScanCommand(ks meta.IKubescape) *cobra.Command {
|
||||
},
|
||||
}
|
||||
|
||||
scanCmd.PersistentFlags().StringVarP(&scanInfo.Account, "account", "", "", "ARMO portal account ID. Default will load account ID from configMap or config file")
|
||||
scanCmd.PersistentFlags().StringVarP(&scanInfo.Credentials.Account, "account", "", "", "Kubescape SaaS account ID. Default will load account ID from cache")
|
||||
scanCmd.PersistentFlags().StringVarP(&scanInfo.Credentials.ClientID, "client-id", "", "", "Kubescape SaaS client ID. Default will load client ID from cache, read more - https://hub.armo.cloud/docs/authentication")
|
||||
scanCmd.PersistentFlags().StringVarP(&scanInfo.Credentials.SecretKey, "secret-key", "", "", "Kubescape SaaS secret key. Default will load secret key from cache, read more - https://hub.armo.cloud/docs/authentication")
|
||||
scanCmd.PersistentFlags().StringVarP(&scanInfo.KubeContext, "kube-context", "", "", "Kube context. Default will use the current-context")
|
||||
scanCmd.PersistentFlags().StringVar(&scanInfo.ControlsInputs, "controls-config", "", "Path to an controls-config obj. If not set will download controls-config from ARMO management portal")
|
||||
scanCmd.PersistentFlags().StringVar(&scanInfo.UseExceptions, "exceptions", "", "Path to an exceptions obj. If not set will download exceptions from ARMO management portal")
|
||||
scanCmd.PersistentFlags().StringVar(&scanInfo.UseArtifactsFrom, "use-artifacts-from", "", "Load artifacts from local directory. If not used will download them")
|
||||
scanCmd.PersistentFlags().StringVarP(&scanInfo.ExcludedNamespaces, "exclude-namespaces", "e", "", "Namespaces to exclude from scanning. Recommended: kube-system,kube-public")
|
||||
scanCmd.PersistentFlags().Float32VarP(&scanInfo.FailThreshold, "fail-threshold", "t", 100, "Failure threshold is the percent above which the command fails and returns exit code 1")
|
||||
scanCmd.PersistentFlags().StringVarP(&scanInfo.Format, "format", "f", "pretty-printer", `Output format. Supported formats: "pretty-printer","json","junit","prometheus","pdf"`)
|
||||
scanCmd.PersistentFlags().StringVarP(&scanInfo.Format, "format", "f", "pretty-printer", `Output format. Supported formats: "pretty-printer", "json", "junit", "prometheus", "pdf", "html"`)
|
||||
scanCmd.PersistentFlags().StringVar(&scanInfo.IncludeNamespaces, "include-namespaces", "", "scan specific namespaces. e.g: --include-namespaces ns-a,ns-b")
|
||||
scanCmd.PersistentFlags().BoolVarP(&scanInfo.Local, "keep-local", "", false, "If you do not want your Kubescape results reported to ARMO backend. Use this flag if you ran with the '--submit' flag in the past and you do not want to submit your current scan results")
|
||||
scanCmd.PersistentFlags().StringVarP(&scanInfo.Output, "output", "o", "", "Output file. Print output to file and not stdout")
|
||||
scanCmd.PersistentFlags().BoolVarP(&scanInfo.VerboseMode, "verbose", "v", false, "Display all of the input resources and not only failed resources")
|
||||
scanCmd.PersistentFlags().StringVar(&scanInfo.View, "view", string(cautils.ResourceViewType), fmt.Sprintf("View results based on the %s/%s. default is --view=%s", cautils.ResourceViewType, cautils.ControlViewType, cautils.ResourceViewType))
|
||||
scanCmd.PersistentFlags().BoolVar(&scanInfo.UseDefault, "use-default", false, "Load local policy object from default path. If not used will download latest")
|
||||
scanCmd.PersistentFlags().StringSliceVar(&scanInfo.UseFrom, "use-from", nil, "Load local policy object from specified path. If not used will download latest")
|
||||
scanCmd.PersistentFlags().BoolVarP(&scanInfo.Submit, "submit", "", false, "Send the scan results to ARMO management portal where you can see the results in a user-friendly UI, choose your preferred compliance framework, check risk results history and trends, manage exceptions, get remediation recommendations and much more. By default the results are not submitted")
|
||||
@@ -88,7 +93,7 @@ func GetScanCommand(ks meta.IKubescape) *cobra.Command {
|
||||
scanCmd.PersistentFlags().MarkHidden("silent") // this flag should be deprecated since we added the --logger support
|
||||
// scanCmd.PersistentFlags().MarkHidden("format-version") // meant for testing different output approaches and not for common use
|
||||
|
||||
hostF := scanCmd.PersistentFlags().VarPF(&scanInfo.HostSensorEnabled, "enable-host-scan", "", "Deploy ARMO K8s host-sensor daemonset in the scanned cluster. Deleting it right after we collecting the data. Required to collect valuable data from cluster nodes for certain controls. Yaml file: https://raw.githubusercontent.com/armosec/kubescape/master/hostsensorutils/hostsensor.yaml")
|
||||
hostF := scanCmd.PersistentFlags().VarPF(&scanInfo.HostSensorEnabled, "enable-host-scan", "", "Deploy ARMO K8s host-sensor daemonset in the scanned cluster. Deleting it right after we collecting the data. Required to collect valuable data from cluster nodes for certain controls. Yaml file: https://github.com/armosec/kubescape/blob/master/core/pkg/hostsensorutils/hostsensor.yaml")
|
||||
hostF.NoOptDefVal = "true"
|
||||
hostF.DefValue = "false, for no TTY in stdin"
|
||||
|
||||
|
||||
@@ -21,7 +21,7 @@ func getExceptionsCmd(ks meta.IKubescape, submitInfo *metav1.Submit) *cobra.Comm
|
||||
return nil
|
||||
},
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
if err := ks.SubmitExceptions(submitInfo.Account, args[0]); err != nil {
|
||||
if err := ks.SubmitExceptions(&submitInfo.Credentials, args[0]); err != nil {
|
||||
logger.L().Fatal(err.Error())
|
||||
}
|
||||
},
|
||||
|
||||
@@ -27,13 +27,13 @@ func getRBACCmd(ks meta.IKubescape, submitInfo *v1.Submit) *cobra.Command {
|
||||
k8s := k8sinterface.NewKubernetesApi()
|
||||
|
||||
// get config
|
||||
clusterConfig := getTenantConfig(submitInfo.Account, "", k8s)
|
||||
clusterConfig := getTenantConfig(&submitInfo.Credentials, "", k8s)
|
||||
if err := clusterConfig.SetTenant(); err != nil {
|
||||
logger.L().Error("failed setting account ID", helpers.Error(err))
|
||||
}
|
||||
|
||||
// list RBAC
|
||||
rbacObjects := cautils.NewRBACObjects(rbacscanner.NewRbacScannerFromK8sAPI(k8s, clusterConfig.GetAccountID(), clusterConfig.GetClusterName()))
|
||||
rbacObjects := cautils.NewRBACObjects(rbacscanner.NewRbacScannerFromK8sAPI(k8s, clusterConfig.GetAccountID(), clusterConfig.GetContextName()))
|
||||
|
||||
// submit resources
|
||||
r := reporterv1.NewReportEventReceiver(clusterConfig.GetConfigObj())
|
||||
@@ -60,9 +60,9 @@ func getKubernetesApi() *k8sinterface.KubernetesApi {
|
||||
}
|
||||
return k8sinterface.NewKubernetesApi()
|
||||
}
|
||||
func getTenantConfig(Account, clusterName string, k8s *k8sinterface.KubernetesApi) cautils.ITenantConfig {
|
||||
func getTenantConfig(credentials *cautils.Credentials, clusterName string, k8s *k8sinterface.KubernetesApi) cautils.ITenantConfig {
|
||||
if !k8sinterface.IsConnectedToCluster() || k8s == nil {
|
||||
return cautils.NewLocalConfig(getter.GetArmoAPIConnector(), Account, clusterName)
|
||||
return cautils.NewLocalConfig(getter.GetArmoAPIConnector(), credentials, clusterName)
|
||||
}
|
||||
return cautils.NewClusterConfig(k8s, getter.GetArmoAPIConnector(), Account, clusterName)
|
||||
return cautils.NewClusterConfig(k8s, getter.GetArmoAPIConnector(), credentials, clusterName)
|
||||
}
|
||||
|
||||
@@ -69,12 +69,12 @@ func getResultsCmd(ks meta.IKubescape, submitInfo *v1.Submit) *cobra.Command {
|
||||
k8s := getKubernetesApi()
|
||||
|
||||
// get config
|
||||
clusterConfig := getTenantConfig(submitInfo.Account, "", k8s)
|
||||
clusterConfig := getTenantConfig(&submitInfo.Credentials, "", k8s)
|
||||
if err := clusterConfig.SetTenant(); err != nil {
|
||||
logger.L().Error("failed setting account ID", helpers.Error(err))
|
||||
}
|
||||
|
||||
resultsObjects := NewResultsObject(clusterConfig.GetAccountID(), clusterConfig.GetClusterName(), args[0])
|
||||
resultsObjects := NewResultsObject(clusterConfig.GetAccountID(), clusterConfig.GetContextName(), args[0])
|
||||
|
||||
// submit resources
|
||||
var r reporter.IReport
|
||||
|
||||
@@ -20,7 +20,9 @@ func GetSubmitCmd(ks meta.IKubescape) *cobra.Command {
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
},
|
||||
}
|
||||
submitCmd.PersistentFlags().StringVarP(&submitInfo.Account, "account", "", "", "Armo portal account ID. Default will load account ID from configMap or config file")
|
||||
submitCmd.PersistentFlags().StringVarP(&submitInfo.Credentials.Account, "account", "", "", "Kubescape SaaS account ID. Default will load account ID from cache")
|
||||
submitCmd.PersistentFlags().StringVarP(&submitInfo.Credentials.ClientID, "client-id", "", "", "Kubescape SaaS client ID. Default will load client ID from cache, read more - https://hub.armo.cloud/docs/authentication")
|
||||
submitCmd.PersistentFlags().StringVarP(&submitInfo.Credentials.SecretKey, "secret-key", "", "", "Kubescape SaaS secret key. Default will load secret key from cache, read more - https://hub.armo.cloud/docs/authentication")
|
||||
|
||||
submitCmd.AddCommand(getExceptionsCmd(ks, &submitInfo))
|
||||
submitCmd.AddCommand(getResultsCmd(ks, &submitInfo))
|
||||
|
||||
@@ -67,9 +67,12 @@ type ITenantConfig interface {
|
||||
DeleteCachedConfig() error
|
||||
|
||||
// getters
|
||||
GetClusterName() string
|
||||
GetContextName() string
|
||||
GetAccountID() string
|
||||
GetTennatEmail() string
|
||||
GetTenantEmail() string
|
||||
GetToken() string
|
||||
GetClientID() string
|
||||
GetSecretKey() string
|
||||
GetConfigObj() *ConfigObj
|
||||
// GetBackendAPI() getter.IBackend
|
||||
// GenerateURL()
|
||||
@@ -87,8 +90,7 @@ type LocalConfig struct {
|
||||
}
|
||||
|
||||
func NewLocalConfig(
|
||||
backendAPI getter.IBackend, customerGUID, clusterName string) *LocalConfig {
|
||||
var configObj *ConfigObj
|
||||
backendAPI getter.IBackend, credentials *Credentials, clusterName string) *LocalConfig {
|
||||
|
||||
lc := &LocalConfig{
|
||||
backendAPI: backendAPI,
|
||||
@@ -96,20 +98,14 @@ func NewLocalConfig(
|
||||
}
|
||||
// get from configMap
|
||||
if existsConfigFile() { // get from file
|
||||
configObj, _ = loadConfigFromFile()
|
||||
} else {
|
||||
configObj = &ConfigObj{}
|
||||
}
|
||||
if configObj != nil {
|
||||
lc.configObj = configObj
|
||||
}
|
||||
if customerGUID != "" {
|
||||
lc.configObj.AccountID = customerGUID // override config customerGUID
|
||||
loadConfigFromFile(lc.configObj)
|
||||
}
|
||||
|
||||
updateCredentials(lc.configObj, credentials)
|
||||
|
||||
if clusterName != "" {
|
||||
lc.configObj.ClusterName = AdoptClusterName(clusterName) // override config clusterName
|
||||
}
|
||||
getAccountFromEnv(lc.configObj)
|
||||
|
||||
lc.backendAPI.SetAccountID(lc.configObj.AccountID)
|
||||
lc.backendAPI.SetClientID(lc.configObj.ClientID)
|
||||
@@ -119,9 +115,12 @@ func NewLocalConfig(
|
||||
}
|
||||
|
||||
func (lc *LocalConfig) GetConfigObj() *ConfigObj { return lc.configObj }
|
||||
func (lc *LocalConfig) GetTennatEmail() string { return lc.configObj.CustomerAdminEMail }
|
||||
func (lc *LocalConfig) GetTenantEmail() string { return lc.configObj.CustomerAdminEMail }
|
||||
func (lc *LocalConfig) GetAccountID() string { return lc.configObj.AccountID }
|
||||
func (lc *LocalConfig) GetClusterName() string { return lc.configObj.ClusterName }
|
||||
func (lc *LocalConfig) GetClientID() string { return lc.configObj.ClientID }
|
||||
func (lc *LocalConfig) GetSecretKey() string { return lc.configObj.SecretKey }
|
||||
func (lc *LocalConfig) GetContextName() string { return lc.configObj.ClusterName }
|
||||
func (lc *LocalConfig) GetToken() string { return lc.configObj.Token }
|
||||
func (lc *LocalConfig) IsConfigFound() bool { return existsConfigFile() }
|
||||
func (lc *LocalConfig) SetTenant() error {
|
||||
|
||||
@@ -190,8 +189,8 @@ type ClusterConfig struct {
|
||||
configObj *ConfigObj
|
||||
}
|
||||
|
||||
func NewClusterConfig(k8s *k8sinterface.KubernetesApi, backendAPI getter.IBackend, customerGUID, clusterName string) *ClusterConfig {
|
||||
var configObj *ConfigObj
|
||||
func NewClusterConfig(k8s *k8sinterface.KubernetesApi, backendAPI getter.IBackend, credentials *Credentials, clusterName string) *ClusterConfig {
|
||||
// var configObj *ConfigObj
|
||||
c := &ClusterConfig{
|
||||
k8s: k8s,
|
||||
backendAPI: backendAPI,
|
||||
@@ -200,26 +199,23 @@ func NewClusterConfig(k8s *k8sinterface.KubernetesApi, backendAPI getter.IBacken
|
||||
configMapNamespace: getConfigMapNamespace(),
|
||||
}
|
||||
|
||||
// get from configMap
|
||||
// first, load from configMap
|
||||
if c.existsConfigMap() {
|
||||
configObj, _ = c.loadConfigFromConfigMap()
|
||||
c.loadConfigFromConfigMap()
|
||||
}
|
||||
if configObj == nil && existsConfigFile() { // get from file
|
||||
configObj, _ = loadConfigFromFile()
|
||||
}
|
||||
if configObj != nil {
|
||||
c.configObj = configObj
|
||||
}
|
||||
if customerGUID != "" {
|
||||
c.configObj.AccountID = customerGUID // override config customerGUID
|
||||
|
||||
// second, load from file
|
||||
if existsConfigFile() { // get from file
|
||||
loadConfigFromFile(c.configObj)
|
||||
}
|
||||
updateCredentials(c.configObj, credentials)
|
||||
|
||||
if clusterName != "" {
|
||||
c.configObj.ClusterName = AdoptClusterName(clusterName) // override config clusterName
|
||||
}
|
||||
getAccountFromEnv(c.configObj)
|
||||
|
||||
if c.configObj.ClusterName == "" {
|
||||
c.configObj.ClusterName = AdoptClusterName(k8sinterface.GetClusterName())
|
||||
c.configObj.ClusterName = AdoptClusterName(k8sinterface.GetContextName())
|
||||
} else { // override the cluster name if it has unwanted characters
|
||||
c.configObj.ClusterName = AdoptClusterName(c.configObj.ClusterName)
|
||||
}
|
||||
@@ -234,7 +230,10 @@ func NewClusterConfig(k8s *k8sinterface.KubernetesApi, backendAPI getter.IBacken
|
||||
func (c *ClusterConfig) GetConfigObj() *ConfigObj { return c.configObj }
|
||||
func (c *ClusterConfig) GetDefaultNS() string { return c.configMapNamespace }
|
||||
func (c *ClusterConfig) GetAccountID() string { return c.configObj.AccountID }
|
||||
func (c *ClusterConfig) GetTennatEmail() string { return c.configObj.CustomerAdminEMail }
|
||||
func (c *ClusterConfig) GetClientID() string { return c.configObj.ClientID }
|
||||
func (c *ClusterConfig) GetSecretKey() string { return c.configObj.SecretKey }
|
||||
func (c *ClusterConfig) GetTenantEmail() string { return c.configObj.CustomerAdminEMail }
|
||||
func (c *ClusterConfig) GetToken() string { return c.configObj.Token }
|
||||
func (c *ClusterConfig) IsConfigFound() bool { return existsConfigFile() || c.existsConfigMap() }
|
||||
|
||||
func (c *ClusterConfig) SetTenant() error {
|
||||
@@ -271,7 +270,7 @@ func (c *ClusterConfig) DeleteCachedConfig() error {
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func (c *ClusterConfig) GetClusterName() string {
|
||||
func (c *ClusterConfig) GetContextName() string {
|
||||
return c.configObj.ClusterName
|
||||
}
|
||||
|
||||
@@ -282,18 +281,26 @@ func (c *ClusterConfig) ToMapString() map[string]interface{} {
|
||||
}
|
||||
return m
|
||||
}
|
||||
func (c *ClusterConfig) loadConfigFromConfigMap() (*ConfigObj, error) {
|
||||
func (c *ClusterConfig) loadConfigFromConfigMap() error {
|
||||
configMap, err := c.k8s.KubernetesClient.CoreV1().ConfigMaps(c.configMapNamespace).Get(context.Background(), c.configMapName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
|
||||
if bData, err := json.Marshal(configMap.Data); err == nil {
|
||||
return readConfig(bData)
|
||||
}
|
||||
return nil, nil
|
||||
return loadConfigFromData(c.configObj, configMap.Data)
|
||||
}
|
||||
|
||||
func loadConfigFromData(co *ConfigObj, data map[string]string) error {
|
||||
var e error
|
||||
if jsonConf, ok := data["config.json"]; ok {
|
||||
e = readConfig([]byte(jsonConf), co)
|
||||
}
|
||||
if bData, err := json.Marshal(data); err == nil {
|
||||
e = readConfig(bData, co)
|
||||
}
|
||||
|
||||
return e
|
||||
}
|
||||
func (c *ClusterConfig) existsConfigMap() bool {
|
||||
_, err := c.k8s.KubernetesClient.CoreV1().ConfigMaps(c.configMapNamespace).Get(context.Background(), c.configMapName, metav1.GetOptions{})
|
||||
// TODO - check if has customerGUID
|
||||
@@ -411,28 +418,27 @@ func (c *ClusterConfig) updateConfigData(configMap *corev1.ConfigMap) {
|
||||
}
|
||||
}
|
||||
}
|
||||
func loadConfigFromFile() (*ConfigObj, error) {
|
||||
func loadConfigFromFile(configObj *ConfigObj) error {
|
||||
dat, err := os.ReadFile(ConfigFileFullPath())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
|
||||
return readConfig(dat)
|
||||
return readConfig(dat, configObj)
|
||||
}
|
||||
func readConfig(dat []byte) (*ConfigObj, error) {
|
||||
func readConfig(dat []byte, configObj *ConfigObj) error {
|
||||
|
||||
if len(dat) == 0 {
|
||||
return nil, nil
|
||||
return nil
|
||||
}
|
||||
configObj := &ConfigObj{}
|
||||
|
||||
if err := json.Unmarshal(dat, configObj); err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
if configObj.AccountID == "" {
|
||||
configObj.AccountID = configObj.CustomerGUID
|
||||
}
|
||||
configObj.CustomerGUID = ""
|
||||
return configObj, nil
|
||||
return nil
|
||||
}
|
||||
|
||||
// Check if the customer is submitted
|
||||
@@ -479,15 +485,34 @@ func getConfigMapNamespace() string {
|
||||
return "default"
|
||||
}
|
||||
|
||||
func getAccountFromEnv(configObj *ConfigObj) {
|
||||
func getAccountFromEnv(credentials *Credentials) {
|
||||
// load from env
|
||||
if accountID := os.Getenv("KS_ACCOUNT_ID"); accountID != "" {
|
||||
configObj.AccountID = accountID
|
||||
if accountID := os.Getenv("KS_ACCOUNT_ID"); credentials.Account != "" && accountID != "" {
|
||||
credentials.Account = accountID
|
||||
}
|
||||
if clientID := os.Getenv("KS_CLIENT_ID"); clientID != "" {
|
||||
configObj.ClientID = clientID
|
||||
if clientID := os.Getenv("KS_CLIENT_ID"); credentials.ClientID != "" && clientID != "" {
|
||||
credentials.ClientID = clientID
|
||||
}
|
||||
if secretKey := os.Getenv("KS_SECRET_KEY"); secretKey != "" {
|
||||
configObj.SecretKey = secretKey
|
||||
if secretKey := os.Getenv("KS_SECRET_KEY"); credentials.SecretKey != "" && secretKey != "" {
|
||||
credentials.SecretKey = secretKey
|
||||
}
|
||||
}
|
||||
|
||||
func updateCredentials(configObj *ConfigObj, credentials *Credentials) {
|
||||
|
||||
if credentials == nil {
|
||||
credentials = &Credentials{}
|
||||
}
|
||||
getAccountFromEnv(credentials)
|
||||
|
||||
if credentials.Account != "" {
|
||||
configObj.AccountID = credentials.Account // override config Account
|
||||
}
|
||||
if credentials.ClientID != "" {
|
||||
configObj.ClientID = credentials.ClientID // override config ClientID
|
||||
}
|
||||
if credentials.SecretKey != "" {
|
||||
configObj.SecretKey = credentials.SecretKey // override config SecretKey
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
193
core/cautils/customerloader_test.go
Normal file
193
core/cautils/customerloader_test.go
Normal file
@@ -0,0 +1,193 @@
|
||||
package cautils
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
func mockConfigObj() *ConfigObj {
|
||||
return &ConfigObj{
|
||||
AccountID: "aaa",
|
||||
ClientID: "bbb",
|
||||
SecretKey: "ccc",
|
||||
ClusterName: "ddd",
|
||||
CustomerAdminEMail: "ab@cd",
|
||||
Token: "eee",
|
||||
}
|
||||
}
|
||||
func mockLocalConfig() *LocalConfig {
|
||||
return &LocalConfig{
|
||||
backendAPI: nil,
|
||||
configObj: mockConfigObj(),
|
||||
}
|
||||
}
|
||||
|
||||
func mockClusterConfig() *ClusterConfig {
|
||||
return &ClusterConfig{
|
||||
backendAPI: nil,
|
||||
configObj: mockConfigObj(),
|
||||
}
|
||||
}
|
||||
func TestConfig(t *testing.T) {
|
||||
co := mockConfigObj()
|
||||
cop := ConfigObj{}
|
||||
|
||||
assert.NoError(t, json.Unmarshal(co.Config(), &cop))
|
||||
assert.Equal(t, co.AccountID, cop.AccountID)
|
||||
assert.Equal(t, co.ClientID, cop.ClientID)
|
||||
assert.Equal(t, co.SecretKey, cop.SecretKey)
|
||||
assert.Equal(t, "", cop.ClusterName) // Not copied to bytes
|
||||
assert.Equal(t, "", cop.CustomerAdminEMail) // Not copied to bytes
|
||||
assert.Equal(t, "", cop.Token) // Not copied to bytes
|
||||
|
||||
}
|
||||
|
||||
func TestITenantConfig(t *testing.T) {
|
||||
var lc ITenantConfig
|
||||
var c ITenantConfig
|
||||
lc = mockLocalConfig()
|
||||
c = mockClusterConfig()
|
||||
|
||||
co := mockConfigObj()
|
||||
|
||||
// test LocalConfig methods
|
||||
assert.Equal(t, co.AccountID, lc.GetAccountID())
|
||||
assert.Equal(t, co.ClientID, lc.GetClientID())
|
||||
assert.Equal(t, co.SecretKey, lc.GetSecretKey())
|
||||
assert.Equal(t, co.ClusterName, lc.GetContextName())
|
||||
assert.Equal(t, co.CustomerAdminEMail, lc.GetTenantEmail())
|
||||
assert.Equal(t, co.Token, lc.GetToken())
|
||||
|
||||
// test ClusterConfig methods
|
||||
assert.Equal(t, co.AccountID, c.GetAccountID())
|
||||
assert.Equal(t, co.ClientID, c.GetClientID())
|
||||
assert.Equal(t, co.SecretKey, c.GetSecretKey())
|
||||
assert.Equal(t, co.ClusterName, c.GetContextName())
|
||||
assert.Equal(t, co.CustomerAdminEMail, c.GetTenantEmail())
|
||||
assert.Equal(t, co.Token, c.GetToken())
|
||||
}
|
||||
|
||||
func TestUpdateConfigData(t *testing.T) {
|
||||
c := mockClusterConfig()
|
||||
|
||||
configMap := &corev1.ConfigMap{}
|
||||
|
||||
c.updateConfigData(configMap)
|
||||
|
||||
assert.Equal(t, c.GetAccountID(), configMap.Data["accountID"])
|
||||
assert.Equal(t, c.GetClientID(), configMap.Data["clientID"])
|
||||
assert.Equal(t, c.GetSecretKey(), configMap.Data["secretKey"])
|
||||
}
|
||||
|
||||
func TestReadConfig(t *testing.T) {
|
||||
com := mockConfigObj()
|
||||
co := &ConfigObj{}
|
||||
|
||||
b, e := json.Marshal(com)
|
||||
assert.NoError(t, e)
|
||||
|
||||
readConfig(b, co)
|
||||
|
||||
assert.Equal(t, com.AccountID, co.AccountID)
|
||||
assert.Equal(t, com.ClientID, co.ClientID)
|
||||
assert.Equal(t, com.SecretKey, co.SecretKey)
|
||||
assert.Equal(t, com.ClusterName, co.ClusterName)
|
||||
assert.Equal(t, com.CustomerAdminEMail, co.CustomerAdminEMail)
|
||||
assert.Equal(t, com.Token, co.Token)
|
||||
}
|
||||
|
||||
func TestLoadConfigFromData(t *testing.T) {
|
||||
|
||||
// use case: all data is in base config
|
||||
{
|
||||
c := mockClusterConfig()
|
||||
co := mockConfigObj()
|
||||
|
||||
configMap := &corev1.ConfigMap{}
|
||||
|
||||
c.updateConfigData(configMap)
|
||||
|
||||
c.configObj = &ConfigObj{}
|
||||
|
||||
loadConfigFromData(c.configObj, configMap.Data)
|
||||
|
||||
assert.Equal(t, c.GetAccountID(), co.AccountID)
|
||||
assert.Equal(t, c.GetClientID(), co.ClientID)
|
||||
assert.Equal(t, c.GetSecretKey(), co.SecretKey)
|
||||
assert.Equal(t, c.GetContextName(), co.ClusterName)
|
||||
assert.Equal(t, c.GetTenantEmail(), co.CustomerAdminEMail)
|
||||
assert.Equal(t, c.GetToken(), co.Token)
|
||||
}
|
||||
|
||||
// use case: all data is in config.json
|
||||
{
|
||||
c := mockClusterConfig()
|
||||
|
||||
co := mockConfigObj()
|
||||
configMap := &corev1.ConfigMap{
|
||||
Data: make(map[string]string),
|
||||
}
|
||||
|
||||
configMap.Data["config.json"] = string(c.GetConfigObj().Config())
|
||||
c.configObj = &ConfigObj{}
|
||||
|
||||
loadConfigFromData(c.configObj, configMap.Data)
|
||||
|
||||
assert.Equal(t, c.GetAccountID(), co.AccountID)
|
||||
assert.Equal(t, c.GetClientID(), co.ClientID)
|
||||
assert.Equal(t, c.GetSecretKey(), co.SecretKey)
|
||||
}
|
||||
|
||||
// use case: some data is in config.json
|
||||
{
|
||||
c := mockClusterConfig()
|
||||
configMap := &corev1.ConfigMap{
|
||||
Data: make(map[string]string),
|
||||
}
|
||||
|
||||
// add to map
|
||||
configMap.Data["clientID"] = c.configObj.ClientID
|
||||
configMap.Data["secretKey"] = c.configObj.SecretKey
|
||||
|
||||
// delete the content
|
||||
c.configObj.ClientID = ""
|
||||
c.configObj.SecretKey = ""
|
||||
|
||||
configMap.Data["config.json"] = string(c.GetConfigObj().Config())
|
||||
loadConfigFromData(c.configObj, configMap.Data)
|
||||
|
||||
assert.NotEmpty(t, c.GetAccountID())
|
||||
assert.NotEmpty(t, c.GetClientID())
|
||||
assert.NotEmpty(t, c.GetSecretKey())
|
||||
}
|
||||
|
||||
// use case: some data is in config.json
|
||||
{
|
||||
c := mockClusterConfig()
|
||||
configMap := &corev1.ConfigMap{
|
||||
Data: make(map[string]string),
|
||||
}
|
||||
|
||||
c.configObj.AccountID = "tttt"
|
||||
|
||||
// add to map
|
||||
configMap.Data["accountID"] = mockConfigObj().AccountID
|
||||
configMap.Data["clientID"] = c.configObj.ClientID
|
||||
configMap.Data["secretKey"] = c.configObj.SecretKey
|
||||
|
||||
// delete the content
|
||||
c.configObj.ClientID = ""
|
||||
c.configObj.SecretKey = ""
|
||||
|
||||
configMap.Data["config.json"] = string(c.GetConfigObj().Config())
|
||||
loadConfigFromData(c.configObj, configMap.Data)
|
||||
|
||||
assert.Equal(t, mockConfigObj().AccountID, c.GetAccountID())
|
||||
assert.NotEmpty(t, c.GetClientID())
|
||||
assert.NotEmpty(t, c.GetSecretKey())
|
||||
}
|
||||
|
||||
}
|
||||
@@ -19,7 +19,7 @@ type OPASessionObj struct {
|
||||
Policies []reporthandling.Framework // list of frameworks to scan
|
||||
AllResources map[string]workloadinterface.IMetadata // all scanned resources, map[<rtesource ID>]<resource>
|
||||
ResourcesResult map[string]resourcesresults.Result // resources scan results, map[<rtesource ID>]<resource result>
|
||||
ResourceSource map[string]string // resources sources, map[<rtesource ID>]<resource result>
|
||||
ResourceSource map[string]reporthandling.Source // resources sources, map[<rtesource ID>]<resource result>
|
||||
PostureReport *reporthandling.PostureReport // scan results v1 - Remove
|
||||
Report *reporthandlingv2.PostureReport // scan results v2 - Remove
|
||||
Exceptions []armotypes.PostureExceptionPolicy // list of exceptions to apply on scan results
|
||||
@@ -39,7 +39,7 @@ func NewOPASessionObj(frameworks []reporthandling.Framework, k8sResources *K8SRe
|
||||
ResourcesResult: make(map[string]resourcesresults.Result),
|
||||
InfoMap: make(map[string]apis.StatusInfo),
|
||||
ResourceToControlsMap: make(map[string][]string),
|
||||
ResourceSource: make(map[string]string),
|
||||
ResourceSource: make(map[string]reporthandling.Source),
|
||||
SessionID: scanInfo.ScanID,
|
||||
PostureReport: &reporthandling.PostureReport{
|
||||
ClusterName: ClusterName,
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
package cautils
|
||||
|
||||
import (
|
||||
pkgcautils "github.com/armosec/utils-go/utils"
|
||||
"golang.org/x/mod/semver"
|
||||
|
||||
"github.com/armosec/opa-utils/reporthandling"
|
||||
"github.com/armosec/utils-go/boolutils"
|
||||
)
|
||||
|
||||
func NewPolicies() *Policies {
|
||||
@@ -40,7 +40,7 @@ func ruleWithArmoOpaDependency(attributes map[string]interface{}) bool {
|
||||
return false
|
||||
}
|
||||
if s, ok := attributes["armoOpa"]; ok { // TODO - make global
|
||||
return pkgcautils.StringToBool(s.(string))
|
||||
return boolutils.StringToBool(s.(string))
|
||||
}
|
||||
return false
|
||||
}
|
||||
@@ -51,18 +51,16 @@ func ruleWithArmoOpaDependency(attributes map[string]interface{}) bool {
|
||||
func isRuleKubescapeVersionCompatible(attributes map[string]interface{}, version string) bool {
|
||||
if from, ok := attributes["useFromKubescapeVersion"]; ok && from != nil {
|
||||
if version != "" {
|
||||
|
||||
if semver.Compare(from.(string), BuildNumber) > 0 {
|
||||
if semver.Compare(version, from.(string)) == -1 {
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
if until, ok := attributes["useUntilKubescapeVersion"]; ok && until != nil {
|
||||
if version != "" {
|
||||
if semver.Compare(BuildNumber, until.(string)) >= 0 {
|
||||
return false
|
||||
}
|
||||
} else {
|
||||
if version == "" {
|
||||
return false
|
||||
}
|
||||
if semver.Compare(version, until.(string)) >= 0 {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
@@ -9,14 +9,17 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/armosec/k8s-interface/workloadinterface"
|
||||
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger/helpers"
|
||||
"github.com/armosec/opa-utils/objectsenvelopes"
|
||||
"github.com/armosec/opa-utils/objectsenvelopes/localworkload"
|
||||
"gopkg.in/yaml.v2"
|
||||
)
|
||||
|
||||
var (
|
||||
YAML_PREFIX = []string{".yaml", ".yml"}
|
||||
JSON_PREFIX = []string{".json"}
|
||||
YAML_PREFIX = []string{"yaml", "yml"}
|
||||
JSON_PREFIX = []string{"json"}
|
||||
)
|
||||
|
||||
type FileFormat string
|
||||
@@ -26,23 +29,52 @@ const (
|
||||
JSON_FILE_FORMAT FileFormat = "json"
|
||||
)
|
||||
|
||||
func LoadResourcesFromFiles(inputPatterns []string) (map[string][]workloadinterface.IMetadata, error) {
|
||||
files, errs := listFiles(inputPatterns)
|
||||
// LoadResourcesFromHelmCharts scans a given path (recuresively) for helm charts, renders the templates and returns a list of workloads
|
||||
func LoadResourcesFromHelmCharts(basePath string) map[string][]workloadinterface.IMetadata {
|
||||
directories, _ := listDirs(basePath)
|
||||
helmDirectories := make([]string, 0)
|
||||
for _, dir := range directories {
|
||||
if ok, _ := IsHelmDirectory(dir); ok {
|
||||
helmDirectories = append(helmDirectories, dir)
|
||||
}
|
||||
}
|
||||
|
||||
result := map[string][]workloadinterface.IMetadata{}
|
||||
for _, helmDir := range helmDirectories {
|
||||
chart, err := NewHelmChart(helmDir)
|
||||
if err == nil {
|
||||
wls, errs := chart.GetWorkloadsWithDefaultValues()
|
||||
if len(errs) > 0 {
|
||||
logger.L().Error(fmt.Sprintf("Rendering of Helm chart template failed: %v", errs))
|
||||
continue
|
||||
}
|
||||
|
||||
for k, v := range wls {
|
||||
result[k] = v
|
||||
}
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
func LoadResourcesFromFiles(input, rootPath string) map[string][]workloadinterface.IMetadata {
|
||||
files, errs := listFiles(input)
|
||||
if len(errs) > 0 {
|
||||
logger.L().Error(fmt.Sprintf("%v", errs))
|
||||
}
|
||||
if len(files) == 0 {
|
||||
return nil, nil
|
||||
return nil
|
||||
}
|
||||
|
||||
workloads, errs := loadFiles(files)
|
||||
workloads, errs := loadFiles(rootPath, files)
|
||||
if len(errs) > 0 {
|
||||
logger.L().Error(fmt.Sprintf("%v", errs))
|
||||
}
|
||||
return workloads, nil
|
||||
|
||||
return workloads
|
||||
}
|
||||
|
||||
func loadFiles(filePaths []string) (map[string][]workloadinterface.IMetadata, []error) {
|
||||
func loadFiles(rootPath string, filePaths []string) (map[string][]workloadinterface.IMetadata, []error) {
|
||||
workloads := make(map[string][]workloadinterface.IMetadata, 0)
|
||||
errs := []error{}
|
||||
for i := range filePaths {
|
||||
@@ -51,15 +83,30 @@ func loadFiles(filePaths []string) (map[string][]workloadinterface.IMetadata, []
|
||||
errs = append(errs, err)
|
||||
continue
|
||||
}
|
||||
if len(f) == 0 {
|
||||
continue // empty file
|
||||
}
|
||||
|
||||
w, e := ReadFile(f, GetFileFormat(filePaths[i]))
|
||||
errs = append(errs, e...)
|
||||
if w != nil {
|
||||
if _, ok := workloads[filePaths[i]]; !ok {
|
||||
workloads[filePaths[i]] = []workloadinterface.IMetadata{}
|
||||
if e != nil {
|
||||
logger.L().Debug("failed to read file", helpers.String("file", filePaths[i]), helpers.Error(e))
|
||||
}
|
||||
if len(w) != 0 {
|
||||
path := filePaths[i]
|
||||
if _, ok := workloads[path]; !ok {
|
||||
workloads[path] = []workloadinterface.IMetadata{}
|
||||
}
|
||||
wSlice := workloads[filePaths[i]]
|
||||
wSlice = append(wSlice, w...)
|
||||
workloads[filePaths[i]] = wSlice
|
||||
wSlice := workloads[path]
|
||||
for j := range w {
|
||||
lw := localworkload.NewLocalWorkload(w[j].GetObject())
|
||||
if relPath, err := filepath.Rel(rootPath, path); err == nil {
|
||||
lw.SetPath(relPath)
|
||||
} else {
|
||||
lw.SetPath(path)
|
||||
}
|
||||
wSlice = append(wSlice, lw)
|
||||
}
|
||||
workloads[path] = wSlice
|
||||
}
|
||||
}
|
||||
return workloads, errs
|
||||
@@ -68,47 +115,65 @@ func loadFiles(filePaths []string) (map[string][]workloadinterface.IMetadata, []
|
||||
func loadFile(filePath string) ([]byte, error) {
|
||||
return os.ReadFile(filePath)
|
||||
}
|
||||
func ReadFile(fileContent []byte, fileFromat FileFormat) ([]workloadinterface.IMetadata, []error) {
|
||||
func ReadFile(fileContent []byte, fileFormat FileFormat) ([]workloadinterface.IMetadata, error) {
|
||||
|
||||
switch fileFromat {
|
||||
switch fileFormat {
|
||||
case YAML_FILE_FORMAT:
|
||||
return readYamlFile(fileContent)
|
||||
case JSON_FILE_FORMAT:
|
||||
return readJsonFile(fileContent)
|
||||
default:
|
||||
return nil, nil // []error{fmt.Errorf("file extension %s not supported", fileFromat)}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func listFiles(patterns []string) ([]string, []error) {
|
||||
files := []string{}
|
||||
errs := []error{}
|
||||
for i := range patterns {
|
||||
if strings.HasPrefix(patterns[i], "http") {
|
||||
continue
|
||||
}
|
||||
if !filepath.IsAbs(patterns[i]) {
|
||||
o, _ := os.Getwd()
|
||||
patterns[i] = filepath.Join(o, patterns[i])
|
||||
}
|
||||
if IsFile(patterns[i]) {
|
||||
files = append(files, patterns[i])
|
||||
} else {
|
||||
f, err := glob(filepath.Split(patterns[i])) //filepath.Glob(patterns[i])
|
||||
if err != nil {
|
||||
errs = append(errs, err)
|
||||
} else {
|
||||
files = append(files, f...)
|
||||
}
|
||||
}
|
||||
}
|
||||
return files, errs
|
||||
// listFiles returns the list of absolute paths, full file path and list of errors. The list of abs paths and full path have the same length
|
||||
func listFiles(pattern string) ([]string, []error) {
|
||||
return listFilesOrDirectories(pattern, false)
|
||||
}
|
||||
|
||||
func readYamlFile(yamlFile []byte) ([]workloadinterface.IMetadata, []error) {
|
||||
// listDirs returns the list of absolute paths, full directories path and list of errors. The list of abs paths and full path have the same length
|
||||
func listDirs(pattern string) ([]string, []error) {
|
||||
return listFilesOrDirectories(pattern, true)
|
||||
}
|
||||
|
||||
func listFilesOrDirectories(pattern string, onlyDirectories bool) ([]string, []error) {
|
||||
var paths []string
|
||||
errs := []error{}
|
||||
|
||||
if !filepath.IsAbs(pattern) {
|
||||
o, _ := os.Getwd()
|
||||
pattern = filepath.Join(o, pattern)
|
||||
}
|
||||
|
||||
if !onlyDirectories && IsFile(pattern) {
|
||||
paths = append(paths, pattern)
|
||||
return paths, errs
|
||||
}
|
||||
|
||||
root, shouldMatch := filepath.Split(pattern)
|
||||
|
||||
if IsDir(pattern) {
|
||||
root = pattern
|
||||
shouldMatch = "*"
|
||||
}
|
||||
if shouldMatch == "" {
|
||||
shouldMatch = "*"
|
||||
}
|
||||
|
||||
f, err := glob(root, shouldMatch, onlyDirectories)
|
||||
if err != nil {
|
||||
errs = append(errs, err)
|
||||
} else {
|
||||
paths = append(paths, f...)
|
||||
}
|
||||
|
||||
return paths, errs
|
||||
}
|
||||
|
||||
func readYamlFile(yamlFile []byte) ([]workloadinterface.IMetadata, error) {
|
||||
defer recover()
|
||||
|
||||
r := bytes.NewReader(yamlFile)
|
||||
dec := yaml.NewDecoder(r)
|
||||
yamlObjs := []workloadinterface.IMetadata{}
|
||||
@@ -127,19 +192,17 @@ func readYamlFile(yamlFile []byte) ([]workloadinterface.IMetadata, []error) {
|
||||
yamlObjs = append(yamlObjs, o)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
errs = append(errs, fmt.Errorf("failed to convert yaml file to map[string]interface, file content: %v", j))
|
||||
}
|
||||
}
|
||||
|
||||
return yamlObjs, errs
|
||||
return yamlObjs, nil
|
||||
}
|
||||
|
||||
func readJsonFile(jsonFile []byte) ([]workloadinterface.IMetadata, []error) {
|
||||
func readJsonFile(jsonFile []byte) ([]workloadinterface.IMetadata, error) {
|
||||
workloads := []workloadinterface.IMetadata{}
|
||||
var jsonObj interface{}
|
||||
if err := json.Unmarshal(jsonFile, &jsonObj); err != nil {
|
||||
return workloads, []error{err}
|
||||
return workloads, err
|
||||
}
|
||||
|
||||
convertJsonToWorkload(jsonObj, &workloads)
|
||||
@@ -178,28 +241,47 @@ func convertYamlToJson(i interface{}) interface{} {
|
||||
}
|
||||
|
||||
func IsYaml(filePath string) bool {
|
||||
return StringInSlice(YAML_PREFIX, filepath.Ext(filePath)) != ValueNotFound
|
||||
return StringInSlice(YAML_PREFIX, strings.ReplaceAll(filepath.Ext(filePath), ".", "")) != ValueNotFound
|
||||
}
|
||||
|
||||
func IsJson(filePath string) bool {
|
||||
return StringInSlice(JSON_PREFIX, filepath.Ext(filePath)) != ValueNotFound
|
||||
return StringInSlice(JSON_PREFIX, strings.ReplaceAll(filepath.Ext(filePath), ".", "")) != ValueNotFound
|
||||
}
|
||||
|
||||
func glob(root, pattern string) ([]string, error) {
|
||||
func glob(root, pattern string, onlyDirectories bool) ([]string, error) {
|
||||
var matches []string
|
||||
|
||||
err := filepath.Walk(root, func(path string, info os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// listing only directotries
|
||||
if onlyDirectories {
|
||||
if info.IsDir() {
|
||||
if matched, err := filepath.Match(pattern, filepath.Base(path)); err != nil {
|
||||
return err
|
||||
} else if matched {
|
||||
matches = append(matches, path)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// listing only files
|
||||
if info.IsDir() {
|
||||
return nil
|
||||
}
|
||||
fileFormat := GetFileFormat(path)
|
||||
if !(fileFormat == JSON_FILE_FORMAT || fileFormat == YAML_FILE_FORMAT) {
|
||||
return nil
|
||||
}
|
||||
if matched, err := filepath.Match(pattern, filepath.Base(path)); err != nil {
|
||||
return err
|
||||
} else if matched {
|
||||
matches = append(matches, path)
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
@@ -207,6 +289,8 @@ func glob(root, pattern string) ([]string, error) {
|
||||
}
|
||||
return matches, nil
|
||||
}
|
||||
|
||||
// IsFile checks if a given path is a file
|
||||
func IsFile(name string) bool {
|
||||
if fi, err := os.Stat(name); err == nil {
|
||||
if fi.Mode().IsRegular() {
|
||||
@@ -216,6 +300,16 @@ func IsFile(name string) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// IsDir checks if a given path is a directory
|
||||
func IsDir(name string) bool {
|
||||
if info, err := os.Stat(name); err == nil {
|
||||
if info.IsDir() {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func GetFileFormat(filePath string) FileFormat {
|
||||
if IsYaml(filePath) {
|
||||
return YAML_FILE_FORMAT
|
||||
|
||||
@@ -6,59 +6,99 @@ import (
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/armosec/opa-utils/objectsenvelopes/localworkload"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func onlineBoutiquePath() string {
|
||||
o, _ := os.Getwd()
|
||||
return filepath.Join(filepath.Dir(o), "../examples/online-boutique/*")
|
||||
return filepath.Join(filepath.Dir(o), "..", "examples", "online-boutique")
|
||||
}
|
||||
|
||||
func helmChartPath() string {
|
||||
o, _ := os.Getwd()
|
||||
return filepath.Join(filepath.Dir(o), "..", "examples", "helm_chart")
|
||||
}
|
||||
|
||||
func TestListFiles(t *testing.T) {
|
||||
|
||||
filesPath := onlineBoutiquePath()
|
||||
|
||||
files, errs := listFiles([]string{filesPath})
|
||||
files, errs := listFiles(filesPath)
|
||||
assert.Equal(t, 0, len(errs))
|
||||
assert.Equal(t, 12, len(files))
|
||||
}
|
||||
|
||||
func TestLoadResourcesFromFiles(t *testing.T) {
|
||||
workloads, err := LoadResourcesFromFiles([]string{onlineBoutiquePath()})
|
||||
assert.NoError(t, err)
|
||||
workloads := LoadResourcesFromFiles(onlineBoutiquePath(), "")
|
||||
assert.Equal(t, 12, len(workloads))
|
||||
|
||||
for i, w := range workloads {
|
||||
switch filepath.Base(i) {
|
||||
case "adservice.yaml":
|
||||
assert.Equal(t, 2, len(w))
|
||||
assert.Equal(t, "apps/v1//Deployment/adservice", w[0].GetID())
|
||||
assert.Equal(t, "/v1//Service/adservice", w[1].GetID())
|
||||
assert.Equal(t, "apps/v1//Deployment/adservice", getRelativePath(w[0].GetID()))
|
||||
assert.Equal(t, "/v1//Service/adservice", getRelativePath(w[1].GetID()))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestLoadResourcesFromHelmCharts(t *testing.T) {
|
||||
sourceToWorkloads := LoadResourcesFromHelmCharts(helmChartPath())
|
||||
assert.Equal(t, 6, len(sourceToWorkloads))
|
||||
|
||||
for file, workloads := range sourceToWorkloads {
|
||||
assert.Equalf(t, 1, len(workloads), "expected 1 workload in file %s", file)
|
||||
|
||||
w := workloads[0]
|
||||
assert.True(t, localworkload.IsTypeLocalWorkload(w.GetObject()), "Expected localworkload as object type")
|
||||
|
||||
switch filepath.Base(file) {
|
||||
case "serviceaccount.yaml":
|
||||
assert.Equal(t, "/v1//ServiceAccount/kubescape-discovery", getRelativePath(w.GetID()))
|
||||
case "clusterrole.yaml":
|
||||
assert.Equal(t, "rbac.authorization.k8s.io/v1//ClusterRole/-kubescape", getRelativePath(w.GetID()))
|
||||
case "cronjob.yaml":
|
||||
assert.Equal(t, "batch/v1//CronJob/-kubescape", getRelativePath(w.GetID()))
|
||||
case "role.yaml":
|
||||
assert.Equal(t, "rbac.authorization.k8s.io/v1//Role/-kubescape", getRelativePath(w.GetID()))
|
||||
case "rolebinding.yaml":
|
||||
assert.Equal(t, "rbac.authorization.k8s.io/v1//RoleBinding/-kubescape", getRelativePath(w.GetID()))
|
||||
case "clusterrolebinding.yaml":
|
||||
assert.Equal(t, "rbac.authorization.k8s.io/v1//ClusterRoleBinding/-kubescape", getRelativePath(w.GetID()))
|
||||
default:
|
||||
assert.Failf(t, "missing case for file: %s", filepath.Base(file))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestLoadFiles(t *testing.T) {
|
||||
files, _ := listFiles([]string{onlineBoutiquePath()})
|
||||
_, err := loadFiles(files)
|
||||
files, _ := listFiles(onlineBoutiquePath())
|
||||
_, err := loadFiles("", files)
|
||||
assert.Equal(t, 0, len(err))
|
||||
}
|
||||
|
||||
func TestListDirs(t *testing.T) {
|
||||
dirs, _ := listDirs(filepath.Join(onlineBoutiquePath(), "adservice.yaml"))
|
||||
assert.Equal(t, 0, len(dirs))
|
||||
|
||||
expectedDirs := []string{filepath.Join("examples", "helm_chart"), filepath.Join("examples", "helm_chart", "templates")}
|
||||
dirs, _ = listDirs(helmChartPath())
|
||||
assert.Equal(t, len(expectedDirs), len(dirs))
|
||||
for i := range expectedDirs {
|
||||
assert.Contains(t, dirs[i], expectedDirs[i])
|
||||
}
|
||||
}
|
||||
|
||||
func TestLoadFile(t *testing.T) {
|
||||
files, _ := listFiles([]string{strings.Replace(onlineBoutiquePath(), "*", "adservice.yaml", 1)})
|
||||
files, _ := listFiles(filepath.Join(onlineBoutiquePath(), "adservice.yaml"))
|
||||
assert.Equal(t, 1, len(files))
|
||||
|
||||
_, err := loadFile(files[0])
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
func TestMapResources(t *testing.T) {
|
||||
// policyHandler := &PolicyHandler{}
|
||||
// k8sResources, err := policyHandler.loadResources(opaSessionObj.Frameworks, scanInfo)
|
||||
// files, _ := listFiles([]string{onlineBoutiquePath()})
|
||||
// bb, err := loadFile(files[0])
|
||||
// if len(err) > 0 {
|
||||
// t.Errorf("%v", err)
|
||||
// }
|
||||
// for i := range bb {
|
||||
// t.Errorf("%s", bb[i].ToString())
|
||||
// }
|
||||
|
||||
func getRelativePath(p string) string {
|
||||
pp := strings.SplitAfter(p, "api=")
|
||||
return pp[1]
|
||||
}
|
||||
|
||||
@@ -34,7 +34,7 @@ var (
|
||||
|
||||
armoDevERURL = "report.eudev3.cyberarmorsoft.com"
|
||||
armoDevBEURL = "api-dev.armo.cloud"
|
||||
armoDevFEURL = "armoui-dev.eudev3.cyberarmorsoft.com"
|
||||
armoDevFEURL = "cloud-dev.armosec.io"
|
||||
armoDevAUTHURL = "eggauth.eudev3.cyberarmorsoft.com"
|
||||
)
|
||||
|
||||
@@ -147,7 +147,8 @@ func (armoAPI *ArmoAPI) IsLoggedIn() bool { return armoAPI.loggedIn
|
||||
func (armoAPI *ArmoAPI) GetClientID() string { return armoAPI.clientID }
|
||||
func (armoAPI *ArmoAPI) GetSecretKey() string { return armoAPI.secretKey }
|
||||
func (armoAPI *ArmoAPI) GetFrontendURL() string { return armoAPI.feURL }
|
||||
func (armoAPI *ArmoAPI) GetAPIURL() string { return armoAPI.apiURL }
|
||||
func (armoAPI *ArmoAPI) GetApiURL() string { return armoAPI.apiURL }
|
||||
func (armoAPI *ArmoAPI) GetAuthURL() string { return armoAPI.authURL }
|
||||
func (armoAPI *ArmoAPI) GetReportReceiverURL() string { return armoAPI.erURL }
|
||||
func (armoAPI *ArmoAPI) SetAccountID(accountID string) { armoAPI.accountID = accountID }
|
||||
func (armoAPI *ArmoAPI) SetClientID(clientID string) { armoAPI.clientID = clientID }
|
||||
|
||||
@@ -13,8 +13,7 @@ var NativeFrameworks = []string{"nsa", "mitre", "armobest", "devopsbest"}
|
||||
|
||||
func (armoAPI *ArmoAPI) getFrameworkURL(frameworkName string) string {
|
||||
u := url.URL{}
|
||||
u.Scheme = "https"
|
||||
u.Host = armoAPI.apiURL
|
||||
u.Scheme, u.Host = parseHost(armoAPI.GetApiURL())
|
||||
u.Path = "api/v1/armoFrameworks"
|
||||
q := u.Query()
|
||||
q.Add("customerGUID", armoAPI.getCustomerGUIDFallBack())
|
||||
@@ -31,8 +30,7 @@ func (armoAPI *ArmoAPI) getFrameworkURL(frameworkName string) string {
|
||||
|
||||
func (armoAPI *ArmoAPI) getListFrameworkURL() string {
|
||||
u := url.URL{}
|
||||
u.Scheme = "https"
|
||||
u.Host = armoAPI.apiURL
|
||||
u.Scheme, u.Host = parseHost(armoAPI.GetApiURL())
|
||||
u.Path = "api/v1/armoFrameworks"
|
||||
q := u.Query()
|
||||
q.Add("customerGUID", armoAPI.getCustomerGUIDFallBack())
|
||||
@@ -42,8 +40,7 @@ func (armoAPI *ArmoAPI) getListFrameworkURL() string {
|
||||
}
|
||||
func (armoAPI *ArmoAPI) getExceptionsURL(clusterName string) string {
|
||||
u := url.URL{}
|
||||
u.Scheme = "https"
|
||||
u.Host = armoAPI.apiURL
|
||||
u.Scheme, u.Host = parseHost(armoAPI.GetApiURL())
|
||||
u.Path = "api/v1/armoPostureExceptions"
|
||||
|
||||
q := u.Query()
|
||||
@@ -58,8 +55,7 @@ func (armoAPI *ArmoAPI) getExceptionsURL(clusterName string) string {
|
||||
|
||||
func (armoAPI *ArmoAPI) exceptionsURL(exceptionsPolicyName string) string {
|
||||
u := url.URL{}
|
||||
u.Scheme = "https"
|
||||
u.Host = armoAPI.apiURL
|
||||
u.Scheme, u.Host = parseHost(armoAPI.GetApiURL())
|
||||
u.Path = "api/v1/postureExceptionPolicy"
|
||||
|
||||
q := u.Query()
|
||||
@@ -75,14 +71,13 @@ func (armoAPI *ArmoAPI) exceptionsURL(exceptionsPolicyName string) string {
|
||||
|
||||
func (armoAPI *ArmoAPI) getAccountConfigDefault(clusterName string) string {
|
||||
config := armoAPI.getAccountConfig(clusterName)
|
||||
url := config + "&scope=default"
|
||||
url := config + "&scope=customer"
|
||||
return url
|
||||
}
|
||||
|
||||
func (armoAPI *ArmoAPI) getAccountConfig(clusterName string) string {
|
||||
u := url.URL{}
|
||||
u.Scheme = "https"
|
||||
u.Host = armoAPI.apiURL
|
||||
u.Scheme, u.Host = parseHost(armoAPI.GetApiURL())
|
||||
u.Path = "api/v1/armoCustomerConfiguration"
|
||||
|
||||
q := u.Query()
|
||||
@@ -97,24 +92,21 @@ func (armoAPI *ArmoAPI) getAccountConfig(clusterName string) string {
|
||||
|
||||
func (armoAPI *ArmoAPI) getAccountURL() string {
|
||||
u := url.URL{}
|
||||
u.Scheme = "https"
|
||||
u.Host = armoAPI.apiURL
|
||||
u.Scheme, u.Host = parseHost(armoAPI.GetApiURL())
|
||||
u.Path = "api/v1/createTenant"
|
||||
return u.String()
|
||||
}
|
||||
|
||||
func (armoAPI *ArmoAPI) getApiToken() string {
|
||||
u := url.URL{}
|
||||
u.Scheme = "https"
|
||||
u.Host = armoAPI.authURL
|
||||
u.Path = "frontegg/identity/resources/auth/v1/api-token"
|
||||
u.Scheme, u.Host = parseHost(armoAPI.GetAuthURL())
|
||||
u.Path = "identity/resources/auth/v1/api-token"
|
||||
return u.String()
|
||||
}
|
||||
|
||||
func (armoAPI *ArmoAPI) getOpenidCustomers() string {
|
||||
u := url.URL{}
|
||||
u.Scheme = "https"
|
||||
u.Host = armoAPI.apiURL
|
||||
u.Scheme, u.Host = parseHost(armoAPI.GetApiURL())
|
||||
u.Path = "api/v1/openid_customers"
|
||||
return u.String()
|
||||
}
|
||||
@@ -173,3 +165,12 @@ func (armoAPI *ArmoAPI) getCustomerGUIDFallBack() string {
|
||||
}
|
||||
return "11111111-1111-1111-1111-111111111111"
|
||||
}
|
||||
|
||||
func parseHost(host string) (string, string) {
|
||||
if strings.HasPrefix(host, "http://") {
|
||||
return "http", strings.Replace(host, "http://", "", 1)
|
||||
}
|
||||
|
||||
// default scheme
|
||||
return "https", strings.Replace(host, "https://", "", 1)
|
||||
}
|
||||
|
||||
91
core/cautils/helmchart.go
Normal file
91
core/cautils/helmchart.go
Normal file
@@ -0,0 +1,91 @@
|
||||
package cautils
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/armosec/k8s-interface/workloadinterface"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger/helpers"
|
||||
"github.com/armosec/opa-utils/objectsenvelopes/localworkload"
|
||||
helmchart "helm.sh/helm/v3/pkg/chart"
|
||||
helmloader "helm.sh/helm/v3/pkg/chart/loader"
|
||||
helmchartutil "helm.sh/helm/v3/pkg/chartutil"
|
||||
helmengine "helm.sh/helm/v3/pkg/engine"
|
||||
)
|
||||
|
||||
type HelmChart struct {
|
||||
chart *helmchart.Chart
|
||||
path string
|
||||
}
|
||||
|
||||
func IsHelmDirectory(path string) (bool, error) {
|
||||
return helmchartutil.IsChartDir(path)
|
||||
}
|
||||
|
||||
func NewHelmChart(path string) (*HelmChart, error) {
|
||||
chart, err := helmloader.Load(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &HelmChart{
|
||||
chart: chart,
|
||||
path: path,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (hc *HelmChart) GetName() string {
|
||||
return hc.chart.Name()
|
||||
}
|
||||
|
||||
func (hc *HelmChart) GetDefaultValues() map[string]interface{} {
|
||||
return hc.chart.Values
|
||||
}
|
||||
|
||||
// GetWorkloads renders chart template using the default values and returns a map of source file to its workloads
|
||||
func (hc *HelmChart) GetWorkloadsWithDefaultValues() (map[string][]workloadinterface.IMetadata, []error) {
|
||||
return hc.GetWorkloads(hc.GetDefaultValues())
|
||||
}
|
||||
|
||||
// GetWorkloads renders chart template using the provided values and returns a map of source (absolute) file path to its workloads
|
||||
func (hc *HelmChart) GetWorkloads(values map[string]interface{}) (map[string][]workloadinterface.IMetadata, []error) {
|
||||
vals, err := helmchartutil.ToRenderValues(hc.chart, values, helmchartutil.ReleaseOptions{}, nil)
|
||||
if err != nil {
|
||||
return nil, []error{err}
|
||||
}
|
||||
|
||||
sourceToFile, err := helmengine.Render(hc.chart, vals)
|
||||
if err != nil {
|
||||
return nil, []error{err}
|
||||
}
|
||||
|
||||
workloads := make(map[string][]workloadinterface.IMetadata, 0)
|
||||
errs := []error{}
|
||||
|
||||
for path, renderedYaml := range sourceToFile {
|
||||
if !IsYaml(strings.ToLower(path)) {
|
||||
continue
|
||||
}
|
||||
|
||||
wls, e := ReadFile([]byte(renderedYaml), YAML_FILE_FORMAT)
|
||||
if e != nil {
|
||||
logger.L().Debug("failed to read rendered yaml file", helpers.String("file", path), helpers.Error(e))
|
||||
}
|
||||
if len(wls) == 0 {
|
||||
continue
|
||||
}
|
||||
if firstPathSeparatorIndex := strings.Index(path, string(os.PathSeparator)); firstPathSeparatorIndex != -1 {
|
||||
absPath := filepath.Join(hc.path, path[firstPathSeparatorIndex:])
|
||||
|
||||
workloads[absPath] = []workloadinterface.IMetadata{}
|
||||
for i := range wls {
|
||||
lw := localworkload.NewLocalWorkload(wls[i].GetObject())
|
||||
lw.SetPath(absPath)
|
||||
workloads[absPath] = append(workloads[absPath], lw)
|
||||
}
|
||||
}
|
||||
}
|
||||
return workloads, errs
|
||||
}
|
||||
133
core/cautils/helmchart_test.go
Normal file
133
core/cautils/helmchart_test.go
Normal file
@@ -0,0 +1,133 @@
|
||||
package cautils
|
||||
|
||||
import (
|
||||
_ "embed"
|
||||
"encoding/json"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/armosec/opa-utils/objectsenvelopes/localworkload"
|
||||
"github.com/stretchr/testify/suite"
|
||||
)
|
||||
|
||||
type HelmChartTestSuite struct {
|
||||
suite.Suite
|
||||
helmChartPath string
|
||||
expectedFiles []string
|
||||
expectedDefaultValues map[string]interface{}
|
||||
}
|
||||
|
||||
func TestHelmChartTestSuite(t *testing.T) {
|
||||
suite.Run(t, new(HelmChartTestSuite))
|
||||
}
|
||||
|
||||
func (s *HelmChartTestSuite) SetupSuite() {
|
||||
o, _ := os.Getwd()
|
||||
|
||||
s.helmChartPath = filepath.Join(filepath.Dir(o), "..", "examples", "helm_chart")
|
||||
|
||||
s.expectedFiles = []string{
|
||||
filepath.Join(s.helmChartPath, "templates", "clusterrolebinding.yaml"),
|
||||
filepath.Join(s.helmChartPath, "templates", "clusterrole.yaml"),
|
||||
filepath.Join(s.helmChartPath, "templates", "serviceaccount.yaml"),
|
||||
filepath.Join(s.helmChartPath, "templates", "rolebinding.yaml"),
|
||||
filepath.Join(s.helmChartPath, "templates", "role.yaml"),
|
||||
filepath.Join(s.helmChartPath, "templates", "cronjob.yaml"),
|
||||
}
|
||||
|
||||
var obj interface{}
|
||||
file, _ := ioutil.ReadFile(filepath.Join("testdata", "helm_expected_default_values.json"))
|
||||
_ = json.Unmarshal([]byte(file), &obj)
|
||||
s.expectedDefaultValues = obj.(map[string]interface{})
|
||||
}
|
||||
|
||||
func (s *HelmChartTestSuite) TestInvalidHelmDirectory() {
|
||||
_, err := NewHelmChart("/invalid_path")
|
||||
s.Error(err)
|
||||
}
|
||||
|
||||
func (s *HelmChartTestSuite) TestValidHelmDirectory() {
|
||||
chart, err := NewHelmChart(s.helmChartPath)
|
||||
s.NoError(err)
|
||||
s.NotNil(chart)
|
||||
}
|
||||
|
||||
func (s *HelmChartTestSuite) TestGetName() {
|
||||
chart, _ := NewHelmChart(s.helmChartPath)
|
||||
s.Equal("kubescape", chart.GetName())
|
||||
}
|
||||
|
||||
func (s *HelmChartTestSuite) TestGetDefaultValues() {
|
||||
chart, _ := NewHelmChart(s.helmChartPath)
|
||||
|
||||
values := chart.GetDefaultValues()
|
||||
|
||||
valuesJson, _ := json.Marshal(values)
|
||||
expectedValuesJson, _ := json.Marshal(s.expectedDefaultValues)
|
||||
|
||||
s.JSONEq(string(valuesJson), string(expectedValuesJson))
|
||||
}
|
||||
|
||||
func (s *HelmChartTestSuite) TestGetWorkloadsWithOverride() {
|
||||
chart, err := NewHelmChart(s.helmChartPath)
|
||||
s.NoError(err, "Expected a valid helm chart")
|
||||
|
||||
values := chart.GetDefaultValues()
|
||||
|
||||
// Default pullPolicy value = Always
|
||||
pullPolicyValue := values["image"].(map[string]interface{})["pullPolicy"].(string)
|
||||
s.Equal(pullPolicyValue, "Always")
|
||||
|
||||
// Override default value
|
||||
values["image"].(map[string]interface{})["pullPolicy"] = "Never"
|
||||
|
||||
fileToWorkloads, errs := chart.GetWorkloads(values)
|
||||
s.Len(errs, 0)
|
||||
|
||||
s.Lenf(fileToWorkloads, len(s.expectedFiles), "Expected %d files", len(s.expectedFiles))
|
||||
|
||||
for _, expectedFile := range s.expectedFiles {
|
||||
s.Contains(fileToWorkloads, expectedFile)
|
||||
s.FileExists(expectedFile)
|
||||
s.GreaterOrEqualf(len(fileToWorkloads[expectedFile]), 1, "Expected at least one workload in %q", expectedFile)
|
||||
|
||||
for i := range fileToWorkloads[expectedFile] {
|
||||
pathInWorkload := fileToWorkloads[expectedFile][i].(*localworkload.LocalWorkload).GetPath()
|
||||
s.Equal(pathInWorkload, expectedFile, "Expected GetPath() to return a valid path on workload")
|
||||
}
|
||||
|
||||
if strings.Contains(expectedFile, "cronjob.yaml") {
|
||||
jsonBytes, _ := json.Marshal(fileToWorkloads[expectedFile][0].GetObject())
|
||||
s.Contains(string(jsonBytes), "\"imagePullPolicy\":\"Never\"", "Expected to overriden value of imagePullPolicy to be 'Never'")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *HelmChartTestSuite) TestGetWorkloadsMissingValue() {
|
||||
chart, _ := NewHelmChart(s.helmChartPath)
|
||||
|
||||
values := chart.GetDefaultValues()
|
||||
delete(values, "image")
|
||||
|
||||
fileToWorkloads, errs := chart.GetWorkloads(values)
|
||||
s.Nil(fileToWorkloads)
|
||||
s.Len(errs, 1, "Expected an error due to missing value")
|
||||
|
||||
expectedErrMsg := "<.Values.image.repository>: nil pointer"
|
||||
s.Containsf(errs[0].Error(), expectedErrMsg, "expected error containing %q, got %q", expectedErrMsg, errs[0])
|
||||
}
|
||||
|
||||
func (s *HelmChartTestSuite) TestIsHelmDirectory() {
|
||||
ok, err := IsHelmDirectory(s.helmChartPath)
|
||||
s.True(ok)
|
||||
s.NoError(err)
|
||||
|
||||
o, _ := os.Getwd()
|
||||
nonHelmDir := filepath.Join(filepath.Dir(o), "../examples/online-boutique")
|
||||
ok, err = IsHelmDirectory(nonHelmDir)
|
||||
s.False(ok)
|
||||
s.Contains(err.Error(), "no Chart.yaml exists in directory")
|
||||
}
|
||||
127
core/cautils/localgitrepository.go
Normal file
127
core/cautils/localgitrepository.go
Normal file
@@ -0,0 +1,127 @@
|
||||
package cautils
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"path"
|
||||
"strings"
|
||||
|
||||
"github.com/armosec/go-git-url/apis"
|
||||
gitv5 "github.com/go-git/go-git/v5"
|
||||
configv5 "github.com/go-git/go-git/v5/config"
|
||||
plumbingv5 "github.com/go-git/go-git/v5/plumbing"
|
||||
)
|
||||
|
||||
type LocalGitRepository struct {
|
||||
repo *gitv5.Repository
|
||||
head *plumbingv5.Reference
|
||||
config *configv5.Config
|
||||
}
|
||||
|
||||
func NewLocalGitRepository(path string) (*LocalGitRepository, error) {
|
||||
gitRepo, err := gitv5.PlainOpenWithOptions(path, &gitv5.PlainOpenOptions{DetectDotGit: true})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
head, err := gitRepo.Head()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if !head.Name().IsBranch() {
|
||||
return nil, fmt.Errorf("current HEAD reference is not a branch")
|
||||
}
|
||||
|
||||
config, err := gitRepo.Config()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &LocalGitRepository{
|
||||
repo: gitRepo,
|
||||
head: head,
|
||||
config: config,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// GetBranchName get current branch name
|
||||
func (g *LocalGitRepository) GetBranchName() string {
|
||||
return g.head.Name().Short()
|
||||
}
|
||||
|
||||
// GetRemoteUrl get default remote URL
|
||||
func (g *LocalGitRepository) GetRemoteUrl() (string, error) {
|
||||
branchName := g.GetBranchName()
|
||||
if branchRef, branchFound := g.config.Branches[branchName]; branchFound {
|
||||
remoteName := branchRef.Remote
|
||||
if len(g.config.Remotes[remoteName].URLs) == 0 {
|
||||
return "", fmt.Errorf("expected to find URLs for remote '%s', branch '%s'", remoteName, branchName)
|
||||
}
|
||||
return g.config.Remotes[remoteName].URLs[0], nil
|
||||
}
|
||||
|
||||
const defaultRemoteName string = "origin"
|
||||
if len(g.config.Remotes[defaultRemoteName].URLs) == 0 {
|
||||
return "", fmt.Errorf("expected to find URLs for remote '%s'", defaultRemoteName)
|
||||
}
|
||||
return g.config.Remotes[defaultRemoteName].URLs[0], nil
|
||||
}
|
||||
|
||||
// GetName get origin name without the .git suffix
|
||||
func (g *LocalGitRepository) GetName() (string, error) {
|
||||
originUrl, err := g.GetRemoteUrl()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
baseName := path.Base(originUrl)
|
||||
// remove .git
|
||||
return strings.TrimSuffix(baseName, ".git"), nil
|
||||
}
|
||||
|
||||
// GetLastCommit get latest commit object
|
||||
func (g *LocalGitRepository) GetLastCommit() (*apis.Commit, error) {
|
||||
return g.GetFileLastCommit("")
|
||||
}
|
||||
|
||||
// GetFileLastCommit get file latest commit object, if empty will return latest commit
|
||||
func (g *LocalGitRepository) GetFileLastCommit(filePath string) (*apis.Commit, error) {
|
||||
// By default, returns commit information from current HEAD
|
||||
logOptions := &gitv5.LogOptions{}
|
||||
|
||||
if filePath != "" {
|
||||
logOptions.FileName = &filePath
|
||||
logOptions.Order = gitv5.LogOrderCommitterTime // faster -> LogOrderDFSPost
|
||||
}
|
||||
|
||||
cIter, err := g.repo.Log(logOptions)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
commit, err := cIter.Next()
|
||||
defer cIter.Close()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &apis.Commit{
|
||||
SHA: commit.Hash.String(),
|
||||
Author: apis.Committer{
|
||||
Name: commit.Author.Name,
|
||||
Email: commit.Author.Email,
|
||||
Date: commit.Author.When,
|
||||
},
|
||||
Message: commit.Message,
|
||||
Committer: apis.Committer{},
|
||||
Files: []apis.Files{},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (g *LocalGitRepository) GetRootDir() (string, error) {
|
||||
wt, err := g.repo.Worktree()
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to get repo root")
|
||||
}
|
||||
|
||||
return wt.Filesystem.Root(), nil
|
||||
}
|
||||
154
core/cautils/localgitrepository_test.go
Normal file
154
core/cautils/localgitrepository_test.go
Normal file
@@ -0,0 +1,154 @@
|
||||
package cautils
|
||||
|
||||
import (
|
||||
"archive/zip"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/suite"
|
||||
)
|
||||
|
||||
type LocalGitRepositoryTestSuite struct {
|
||||
suite.Suite
|
||||
archive *zip.ReadCloser
|
||||
gitRepositoryPath string
|
||||
destinationPath string
|
||||
}
|
||||
|
||||
func unzipFile(zipPath, destinationFolder string) (*zip.ReadCloser, error) {
|
||||
archive, err := zip.OpenReader(zipPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, f := range archive.File {
|
||||
filePath := filepath.Join(destinationFolder, f.Name)
|
||||
if !strings.HasPrefix(filePath, filepath.Clean(destinationFolder)+string(os.PathSeparator)) {
|
||||
return nil, fmt.Errorf("invalid file path")
|
||||
}
|
||||
if f.FileInfo().IsDir() {
|
||||
os.MkdirAll(filePath, os.ModePerm)
|
||||
continue
|
||||
}
|
||||
|
||||
if err := os.MkdirAll(filepath.Dir(filePath), os.ModePerm); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
dstFile, err := os.OpenFile(filePath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, f.Mode())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
fileInArchive, err := f.Open()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if _, err := io.Copy(dstFile, fileInArchive); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
dstFile.Close()
|
||||
fileInArchive.Close()
|
||||
}
|
||||
|
||||
return archive, err
|
||||
|
||||
}
|
||||
|
||||
func (s *LocalGitRepositoryTestSuite) SetupSuite() {
|
||||
zippedFixturePath := filepath.Join(".", "testdata", "localrepo.git")
|
||||
destinationPath := filepath.Join(".", "testdata", "temp")
|
||||
gitRepositoryPath := filepath.Join(destinationPath, "localrepo")
|
||||
|
||||
os.RemoveAll(destinationPath)
|
||||
archive, err := unzipFile(zippedFixturePath, destinationPath)
|
||||
|
||||
if err == nil {
|
||||
s.archive = archive
|
||||
s.gitRepositoryPath = gitRepositoryPath
|
||||
s.destinationPath = destinationPath
|
||||
}
|
||||
}
|
||||
|
||||
func TestLocalGitRepositoryTestSuite(t *testing.T) {
|
||||
suite.Run(t, new(LocalGitRepositoryTestSuite))
|
||||
}
|
||||
|
||||
func (s *LocalGitRepositoryTestSuite) TearDownSuite() {
|
||||
if s.archive != nil {
|
||||
s.archive.Close()
|
||||
}
|
||||
os.RemoveAll(s.destinationPath)
|
||||
}
|
||||
|
||||
func (s *LocalGitRepositoryTestSuite) TestInvalidRepositoryPath() {
|
||||
if _, err := NewLocalGitRepository("/invalidpath"); s.Error(err) {
|
||||
s.Equal("repository does not exist", err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
func (s *LocalGitRepositoryTestSuite) TestGetBranchName() {
|
||||
if localRepo, err := NewLocalGitRepository(s.gitRepositoryPath); s.NoError(err) {
|
||||
s.Equal("master", localRepo.GetBranchName())
|
||||
}
|
||||
}
|
||||
|
||||
func (s *LocalGitRepositoryTestSuite) TestGetName() {
|
||||
if localRepo, err := NewLocalGitRepository(s.gitRepositoryPath); s.NoError(err) {
|
||||
if name, err := localRepo.GetName(); s.NoError(err) {
|
||||
s.Equal("localrepo", name)
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
func (s *LocalGitRepositoryTestSuite) TestGetOriginUrl() {
|
||||
if localRepo, err := NewLocalGitRepository(s.gitRepositoryPath); s.NoError(err) {
|
||||
if url, err := localRepo.GetRemoteUrl(); s.NoError(err) {
|
||||
s.Equal("git@github.com:testuser/localrepo", url)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *LocalGitRepositoryTestSuite) TestGetLastCommit() {
|
||||
if localRepo, err := NewLocalGitRepository(s.gitRepositoryPath); s.NoError(err) {
|
||||
if commit, err := localRepo.GetLastCommit(); s.NoError(err) {
|
||||
s.Equal("7e09312b8017695fadcd606882e3779f10a5c832", commit.SHA)
|
||||
s.Equal("Amir Malka", commit.Author.Name)
|
||||
s.Equal("amirm@armosec.io", commit.Author.Email)
|
||||
s.Equal("2022-05-22 19:11:57 +0300 +0300", commit.Author.Date.String())
|
||||
s.Equal("added file B\n", commit.Message)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *LocalGitRepositoryTestSuite) TestGetFileLastCommit() {
|
||||
s.Run("fileA", func() {
|
||||
if localRepo, err := NewLocalGitRepository(s.gitRepositoryPath); s.NoError(err) {
|
||||
if commit, err := localRepo.GetFileLastCommit("fileA"); s.NoError(err) {
|
||||
s.Equal("9fae4be19624297947d2b605cefbff516628612d", commit.SHA)
|
||||
s.Equal("Amir Malka", commit.Author.Name)
|
||||
s.Equal("amirm@armosec.io", commit.Author.Email)
|
||||
s.Equal("2022-05-22 18:55:48 +0300 +0300", commit.Author.Date.String())
|
||||
s.Equal("added file A\n", commit.Message)
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
s.Run("fileB", func() {
|
||||
if localRepo, err := NewLocalGitRepository(s.gitRepositoryPath); s.NoError(err) {
|
||||
if commit, err := localRepo.GetFileLastCommit("dirA/fileB"); s.NoError(err) {
|
||||
s.Equal("7e09312b8017695fadcd606882e3779f10a5c832", commit.SHA)
|
||||
s.Equal("Amir Malka", commit.Author.Name)
|
||||
s.Equal("amirm@armosec.io", commit.Author.Email)
|
||||
s.Equal("2022-05-22 19:11:57 +0300 +0300", commit.Author.Date.String())
|
||||
s.Equal("added file B\n", commit.Message)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
@@ -8,6 +8,13 @@ type RootInfo struct {
|
||||
|
||||
ArmoBEURLs string // armo url
|
||||
ArmoBEURLsDep string // armo url
|
||||
|
||||
}
|
||||
|
||||
type Credentials struct {
|
||||
Account string
|
||||
ClientID string
|
||||
SecretKey string
|
||||
}
|
||||
|
||||
// func (rootInfo *RootInfo) InitLogger() {
|
||||
|
||||
@@ -8,6 +8,10 @@ import (
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/armosec/armoapi-go/armotypes"
|
||||
apisv1 "github.com/armosec/opa-utils/httpserver/apis/v1"
|
||||
|
||||
giturl "github.com/armosec/go-git-url"
|
||||
"github.com/armosec/k8s-interface/k8sinterface"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/getter"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger"
|
||||
@@ -17,9 +21,23 @@ import (
|
||||
"github.com/google/uuid"
|
||||
)
|
||||
|
||||
type ScanningContext string
|
||||
|
||||
const (
|
||||
ScanCluster string = "cluster"
|
||||
ScanLocalFiles string = "yaml"
|
||||
ContextCluster ScanningContext = "cluster"
|
||||
ContextFile ScanningContext = "single-file"
|
||||
ContextDir ScanningContext = "local-dir"
|
||||
ContextGitURL ScanningContext = "git-url"
|
||||
ContextGitLocal ScanningContext = "git-local"
|
||||
)
|
||||
|
||||
const ( // deprecated
|
||||
ScopeCluster = "cluster"
|
||||
ScopeYAML = "yaml"
|
||||
)
|
||||
const (
|
||||
// ScanCluster string = "cluster"
|
||||
// ScanLocalFiles string = "yaml"
|
||||
localControlInputsFilename string = "controls-inputs.json"
|
||||
localExceptionsFilename string = "exceptions.json"
|
||||
)
|
||||
@@ -28,6 +46,10 @@ type BoolPtrFlag struct {
|
||||
valPtr *bool
|
||||
}
|
||||
|
||||
func NewBoolPtr(b *bool) BoolPtrFlag {
|
||||
return BoolPtrFlag{valPtr: b}
|
||||
}
|
||||
|
||||
func (bpf *BoolPtrFlag) Type() string {
|
||||
return "bool"
|
||||
}
|
||||
@@ -63,32 +85,46 @@ func (bpf *BoolPtrFlag) Set(val string) error {
|
||||
}
|
||||
|
||||
// TODO - UPDATE
|
||||
type ViewTypes string
|
||||
|
||||
const (
|
||||
ResourceViewType ViewTypes = "resource"
|
||||
ControlViewType ViewTypes = "control"
|
||||
)
|
||||
|
||||
type PolicyIdentifier struct {
|
||||
Name string // policy name e.g. nsa,mitre,c-0012
|
||||
Kind apisv1.NotificationPolicyKind // policy kind e.g. Framework,Control,Rule
|
||||
Designators armotypes.PortalDesignator
|
||||
}
|
||||
|
||||
type ScanInfo struct {
|
||||
Getters // TODO - remove from object
|
||||
PolicyIdentifier []reporthandling.PolicyIdentifier // TODO - remove from object
|
||||
UseExceptions string // Load file with exceptions configuration
|
||||
ControlsInputs string // Load file with inputs for controls
|
||||
UseFrom []string // Load framework from local file (instead of download). Use when running offline
|
||||
UseDefault bool // Load framework from cached file (instead of download). Use when running offline
|
||||
UseArtifactsFrom string // Load artifacts from local path. Use when running offline
|
||||
VerboseMode bool // Display all of the input resources and not only failed resources
|
||||
Format string // Format results (table, json, junit ...)
|
||||
Output string // Store results in an output file, Output file name
|
||||
FormatVersion string // Output object can be differnet between versions, this is for testing and backward compatibility
|
||||
ExcludedNamespaces string // used for host scanner namespace
|
||||
IncludeNamespaces string // DEPRECATED?
|
||||
InputPatterns []string // Yaml files input patterns
|
||||
Silent bool // Silent mode - Do not print progress logs
|
||||
FailThreshold float32 // Failure score threshold
|
||||
Submit bool // Submit results to Armo BE
|
||||
ScanID string // Report id of the current scan
|
||||
HostSensorEnabled BoolPtrFlag // Deploy ARMO K8s host scanner to collect data from certain controls
|
||||
HostSensorYamlPath string // Path to hostsensor file
|
||||
Local bool // Do not submit results
|
||||
Account string // account ID
|
||||
KubeContext string // context name
|
||||
FrameworkScan bool // false if scanning control
|
||||
ScanAll bool // true if scan all frameworks
|
||||
Getters // TODO - remove from object
|
||||
PolicyIdentifier []PolicyIdentifier // TODO - remove from object
|
||||
UseExceptions string // Load file with exceptions configuration
|
||||
ControlsInputs string // Load file with inputs for controls
|
||||
UseFrom []string // Load framework from local file (instead of download). Use when running offline
|
||||
UseDefault bool // Load framework from cached file (instead of download). Use when running offline
|
||||
UseArtifactsFrom string // Load artifacts from local path. Use when running offline
|
||||
VerboseMode bool // Display all of the input resources and not only failed resources
|
||||
View string // Display all of the input resources and not only failed resources
|
||||
Format string // Format results (table, json, junit ...)
|
||||
Output string // Store results in an output file, Output file name
|
||||
FormatVersion string // Output object can be differnet between versions, this is for testing and backward compatibility
|
||||
ExcludedNamespaces string // used for host scanner namespace
|
||||
IncludeNamespaces string //
|
||||
InputPatterns []string // Yaml files input patterns
|
||||
Silent bool // Silent mode - Do not print progress logs
|
||||
FailThreshold float32 // Failure score threshold
|
||||
Submit bool // Submit results to Armo BE
|
||||
ScanID string // Report id of the current scan
|
||||
HostSensorEnabled BoolPtrFlag // Deploy ARMO K8s host scanner to collect data from certain controls
|
||||
HostSensorYamlPath string // Path to hostsensor file
|
||||
Local bool // Do not submit results
|
||||
Credentials Credentials // account ID
|
||||
KubeContext string // context name
|
||||
FrameworkScan bool // false if scanning control
|
||||
ScanAll bool // true if scan all frameworks
|
||||
}
|
||||
|
||||
type Getters struct {
|
||||
@@ -168,18 +204,18 @@ func (scanInfo *ScanInfo) setOutputFile() {
|
||||
}
|
||||
}
|
||||
|
||||
func (scanInfo *ScanInfo) GetScanningEnvironment() string {
|
||||
if len(scanInfo.InputPatterns) != 0 {
|
||||
return ScanLocalFiles
|
||||
}
|
||||
return ScanCluster
|
||||
}
|
||||
// func (scanInfo *ScanInfo) GetScanningEnvironment() string {
|
||||
// if len(scanInfo.InputPatterns) != 0 {
|
||||
// return ScanLocalFiles
|
||||
// }
|
||||
// return ScanCluster
|
||||
// }
|
||||
|
||||
func (scanInfo *ScanInfo) SetPolicyIdentifiers(policies []string, kind reporthandling.NotificationPolicyKind) {
|
||||
func (scanInfo *ScanInfo) SetPolicyIdentifiers(policies []string, kind apisv1.NotificationPolicyKind) {
|
||||
for _, policy := range policies {
|
||||
if !scanInfo.contains(policy) {
|
||||
newPolicy := reporthandling.PolicyIdentifier{}
|
||||
newPolicy.Kind = kind // reporthandling.KindFramework
|
||||
newPolicy := PolicyIdentifier{}
|
||||
newPolicy.Kind = kind
|
||||
newPolicy.Name = policy
|
||||
scanInfo.PolicyIdentifier = append(scanInfo.PolicyIdentifier, newPolicy)
|
||||
}
|
||||
@@ -219,69 +255,183 @@ func scanInfoToScanMetadata(scanInfo *ScanInfo) *reporthandlingv2.Metadata {
|
||||
metadata.ScanMetadata.TargetNames = append(metadata.ScanMetadata.TargetNames, policy.Name)
|
||||
}
|
||||
|
||||
metadata.ScanMetadata.KubescapeVersion = BuildNumber
|
||||
metadata.ScanMetadata.VerboseMode = scanInfo.VerboseMode
|
||||
metadata.ScanMetadata.FailThreshold = scanInfo.FailThreshold
|
||||
metadata.ScanMetadata.HostScanner = scanInfo.HostSensorEnabled.GetBool()
|
||||
metadata.ScanMetadata.VerboseMode = scanInfo.VerboseMode
|
||||
metadata.ScanMetadata.ControlsInputs = scanInfo.ControlsInputs
|
||||
|
||||
metadata.ScanMetadata.ScanningTarget = reporthandlingv2.Cluster
|
||||
if scanInfo.GetScanningEnvironment() == ScanLocalFiles {
|
||||
metadata.ScanMetadata.ScanningTarget = reporthandlingv2.File
|
||||
}
|
||||
|
||||
inputFiles := ""
|
||||
if len(scanInfo.InputPatterns) > 0 {
|
||||
inputFiles = scanInfo.InputPatterns[0]
|
||||
}
|
||||
|
||||
metadata.ScanMetadata.ScanningTarget = reporthandlingv2.Cluster
|
||||
if GetScanningContext(inputFiles) != ContextCluster {
|
||||
metadata.ScanMetadata.ScanningTarget = reporthandlingv2.File
|
||||
}
|
||||
|
||||
setContextMetadata(&metadata.ContextMetadata, inputFiles)
|
||||
|
||||
return metadata
|
||||
}
|
||||
|
||||
func setContextMetadata(contextMetadata *reporthandlingv2.ContextMetadata, input string) {
|
||||
// if cluster
|
||||
func (scanInfo *ScanInfo) GetScanningContext() ScanningContext {
|
||||
input := ""
|
||||
if len(scanInfo.InputPatterns) > 0 {
|
||||
input = scanInfo.InputPatterns[0]
|
||||
}
|
||||
return GetScanningContext(input)
|
||||
}
|
||||
|
||||
// GetScanningContext get scanning context from the input param
|
||||
func GetScanningContext(input string) ScanningContext {
|
||||
// cluster
|
||||
if input == "" {
|
||||
contextMetadata.ClusterContextMetadata = &reporthandlingv2.ClusterMetadata{
|
||||
ContextName: k8sinterface.GetClusterName(),
|
||||
}
|
||||
return
|
||||
return ContextCluster
|
||||
}
|
||||
|
||||
// if url
|
||||
if strings.HasPrefix(input, "http") { // TODO - check if can parse
|
||||
return
|
||||
// url
|
||||
if _, err := giturl.NewGitURL(input); err == nil {
|
||||
return ContextGitURL
|
||||
}
|
||||
|
||||
if !filepath.IsAbs(input) {
|
||||
if !filepath.IsAbs(input) { // parse path
|
||||
if o, err := os.Getwd(); err == nil {
|
||||
input = filepath.Join(o, input)
|
||||
}
|
||||
}
|
||||
|
||||
// if single file
|
||||
// local git repo
|
||||
if _, err := NewLocalGitRepository(input); err == nil {
|
||||
return ContextGitLocal
|
||||
}
|
||||
|
||||
// single file
|
||||
if IsFile(input) {
|
||||
contextMetadata.FileContextMetadata = &reporthandlingv2.FileContextMetadata{
|
||||
FilePath: input,
|
||||
HostName: getHostname(),
|
||||
}
|
||||
return
|
||||
return ContextFile
|
||||
}
|
||||
|
||||
// if dir/glob
|
||||
if !IsFile(input) {
|
||||
// dir/glob
|
||||
return ContextDir
|
||||
}
|
||||
func setContextMetadata(contextMetadata *reporthandlingv2.ContextMetadata, input string) {
|
||||
switch GetScanningContext(input) {
|
||||
case ContextCluster:
|
||||
contextMetadata.ClusterContextMetadata = &reporthandlingv2.ClusterMetadata{
|
||||
ContextName: k8sinterface.GetContextName(),
|
||||
}
|
||||
case ContextGitURL:
|
||||
// url
|
||||
context, err := metadataGitURL(input)
|
||||
if err != nil {
|
||||
logger.L().Warning("in setContextMetadata", helpers.Interface("case", ContextGitURL), helpers.Error(err))
|
||||
}
|
||||
contextMetadata.RepoContextMetadata = context
|
||||
case ContextDir:
|
||||
contextMetadata.DirectoryContextMetadata = &reporthandlingv2.DirectoryContextMetadata{
|
||||
BasePath: input,
|
||||
BasePath: getAbsPath(input),
|
||||
HostName: getHostname(),
|
||||
}
|
||||
return
|
||||
case ContextFile:
|
||||
contextMetadata.FileContextMetadata = &reporthandlingv2.FileContextMetadata{
|
||||
FilePath: getAbsPath(input),
|
||||
HostName: getHostname(),
|
||||
}
|
||||
case ContextGitLocal:
|
||||
// local
|
||||
context, err := metadataGitLocal(input)
|
||||
if err != nil {
|
||||
logger.L().Warning("in setContextMetadata", helpers.Interface("case", ContextGitURL), helpers.Error(err))
|
||||
}
|
||||
contextMetadata.RepoContextMetadata = context
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func metadataGitURL(input string) (*reporthandlingv2.RepoContextMetadata, error) {
|
||||
context := &reporthandlingv2.RepoContextMetadata{}
|
||||
gitParser, err := giturl.NewGitAPI(input)
|
||||
if err != nil {
|
||||
return context, fmt.Errorf("%w", err)
|
||||
}
|
||||
if gitParser.GetBranchName() == "" {
|
||||
gitParser.SetDefaultBranchName()
|
||||
}
|
||||
context.Provider = gitParser.GetProvider()
|
||||
context.Repo = gitParser.GetRepoName()
|
||||
context.Owner = gitParser.GetOwnerName()
|
||||
context.Branch = gitParser.GetBranchName()
|
||||
context.RemoteURL = gitParser.GetURL().String()
|
||||
|
||||
commit, err := gitParser.GetLatestCommit()
|
||||
if err != nil {
|
||||
return context, fmt.Errorf("%w", err)
|
||||
}
|
||||
|
||||
context.LastCommit = reporthandling.LastCommit{
|
||||
Hash: commit.SHA,
|
||||
Date: commit.Committer.Date,
|
||||
CommitterName: commit.Committer.Name,
|
||||
}
|
||||
|
||||
return context, nil
|
||||
}
|
||||
|
||||
func metadataGitLocal(input string) (*reporthandlingv2.RepoContextMetadata, error) {
|
||||
gitParser, err := NewLocalGitRepository(input)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("%w", err)
|
||||
}
|
||||
remoteURL, err := gitParser.GetRemoteUrl()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("%w", err)
|
||||
}
|
||||
context := &reporthandlingv2.RepoContextMetadata{}
|
||||
gitParserURL, err := giturl.NewGitURL(remoteURL)
|
||||
if err != nil {
|
||||
return context, fmt.Errorf("%w", err)
|
||||
}
|
||||
gitParserURL.SetBranchName(gitParser.GetBranchName())
|
||||
|
||||
context.Provider = gitParserURL.GetProvider()
|
||||
context.Repo = gitParserURL.GetRepoName()
|
||||
context.Owner = gitParserURL.GetOwnerName()
|
||||
context.Branch = gitParserURL.GetBranchName()
|
||||
context.RemoteURL = gitParserURL.GetURL().String()
|
||||
|
||||
commit, err := gitParser.GetLastCommit()
|
||||
if err != nil {
|
||||
return context, fmt.Errorf("%w", err)
|
||||
}
|
||||
context.LastCommit = reporthandling.LastCommit{
|
||||
Hash: commit.SHA,
|
||||
Date: commit.Committer.Date,
|
||||
CommitterName: commit.Committer.Name,
|
||||
}
|
||||
|
||||
return context, nil
|
||||
}
|
||||
func getHostname() string {
|
||||
if h, e := os.Hostname(); e == nil {
|
||||
return h
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func getAbsPath(p string) string {
|
||||
if !filepath.IsAbs(p) { // parse path
|
||||
if o, err := os.Getwd(); err == nil {
|
||||
return filepath.Join(o, p)
|
||||
}
|
||||
}
|
||||
return p
|
||||
}
|
||||
|
||||
// ScanningContextToScanningScope convert the context to the deprecated scope
|
||||
func ScanningContextToScanningScope(scanningContext ScanningContext) string {
|
||||
if scanningContext == ContextCluster {
|
||||
return ScopeCluster
|
||||
}
|
||||
return ScopeYAML
|
||||
}
|
||||
|
||||
@@ -7,16 +7,6 @@ import (
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
// func TestSetInputPatterns(t *testing.T) { //Unitest
|
||||
// {
|
||||
// scanInfo := ScanInfo{
|
||||
// InputPatterns: []string{"file"},
|
||||
// }
|
||||
// scanInfo.setInputPatterns()
|
||||
// assert.Equal(t, "file", scanInfo.InputPatterns[0])
|
||||
// }
|
||||
// }
|
||||
|
||||
func TestSetContextMetadata(t *testing.T) {
|
||||
{
|
||||
ctx := reporthandlingv2.ContextMetadata{}
|
||||
@@ -28,26 +18,6 @@ func TestSetContextMetadata(t *testing.T) {
|
||||
assert.Nil(t, ctx.HelmContextMetadata)
|
||||
assert.Nil(t, ctx.RepoContextMetadata)
|
||||
}
|
||||
{
|
||||
ctx := reporthandlingv2.ContextMetadata{}
|
||||
setContextMetadata(&ctx, "file")
|
||||
|
||||
assert.Nil(t, ctx.ClusterContextMetadata)
|
||||
assert.NotNil(t, ctx.DirectoryContextMetadata)
|
||||
assert.Nil(t, ctx.FileContextMetadata)
|
||||
assert.Nil(t, ctx.HelmContextMetadata)
|
||||
assert.Nil(t, ctx.RepoContextMetadata)
|
||||
}
|
||||
{
|
||||
ctx := reporthandlingv2.ContextMetadata{}
|
||||
setContextMetadata(&ctx, "scaninfo_test.go")
|
||||
|
||||
assert.Nil(t, ctx.ClusterContextMetadata)
|
||||
assert.Nil(t, ctx.DirectoryContextMetadata)
|
||||
assert.NotNil(t, ctx.FileContextMetadata)
|
||||
assert.Nil(t, ctx.HelmContextMetadata)
|
||||
assert.Nil(t, ctx.RepoContextMetadata)
|
||||
}
|
||||
{
|
||||
ctx := reporthandlingv2.ContextMetadata{}
|
||||
setContextMetadata(&ctx, "https://github.com/armosec/kubescape")
|
||||
@@ -56,10 +26,22 @@ func TestSetContextMetadata(t *testing.T) {
|
||||
assert.Nil(t, ctx.DirectoryContextMetadata)
|
||||
assert.Nil(t, ctx.FileContextMetadata)
|
||||
assert.Nil(t, ctx.HelmContextMetadata)
|
||||
assert.Nil(t, ctx.RepoContextMetadata) // TODO
|
||||
assert.NotNil(t, ctx.RepoContextMetadata)
|
||||
|
||||
assert.Equal(t, "kubescape", ctx.RepoContextMetadata.Repo)
|
||||
assert.Equal(t, "armosec", ctx.RepoContextMetadata.Owner)
|
||||
assert.Equal(t, "master", ctx.RepoContextMetadata.Branch)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetHostname(t *testing.T) {
|
||||
assert.NotEqual(t, "", getHostname())
|
||||
}
|
||||
|
||||
func TestGetScanningContext(t *testing.T) {
|
||||
assert.Equal(t, ContextCluster, GetScanningContext(""))
|
||||
// assert.Equal(t, ContextDir, GetScanningContext("/"))
|
||||
assert.Equal(t, ContextGitURL, GetScanningContext("https://github.com/armosec/kubescape"))
|
||||
// assert.Equal(t, ContextFile, GetScanningContext(path.Join(".", "testdata", "localrepo.git")))
|
||||
// assert.Equal(t, ContextGitLocal, GetScanningContext(path.Join(".", "testdata")))
|
||||
}
|
||||
|
||||
40
core/cautils/testdata/helm_expected_default_values.json
vendored
Normal file
40
core/cautils/testdata/helm_expected_default_values.json
vendored
Normal file
@@ -0,0 +1,40 @@
|
||||
{
|
||||
"affinity": {},
|
||||
"configMap": {
|
||||
"create": false,
|
||||
"params": {
|
||||
"clusterName": "<MyK8sClusterName>",
|
||||
"customerGUID": "<MyGUID>"
|
||||
}
|
||||
},
|
||||
"fullnameOverride": "",
|
||||
"image": {
|
||||
"imageName": "kubescape",
|
||||
"pullPolicy": "Always",
|
||||
"repository": "quay.io/armosec",
|
||||
"tag": "latest"
|
||||
},
|
||||
"imagePullSecrets": [],
|
||||
"nameOverride": "",
|
||||
"nodeSelector": {},
|
||||
"podAnnotations": {},
|
||||
"podSecurityContext": {},
|
||||
"resources": {
|
||||
"limits": {
|
||||
"cpu": "500m",
|
||||
"memory": "512Mi"
|
||||
},
|
||||
"requests": {
|
||||
"cpu": "200m",
|
||||
"memory": "256Mi"
|
||||
}
|
||||
},
|
||||
"schedule": "* * 1 * *",
|
||||
"securityContext": {},
|
||||
"serviceAccount": {
|
||||
"annotations": {},
|
||||
"create": true,
|
||||
"name": "kubescape-discovery"
|
||||
},
|
||||
"tolerations": []
|
||||
}
|
||||
BIN
core/cautils/testdata/localrepo.git
vendored
Normal file
BIN
core/cautils/testdata/localrepo.git
vendored
Normal file
Binary file not shown.
@@ -9,7 +9,7 @@ import (
|
||||
"github.com/armosec/kubescape/v2/core/cautils/getter"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger/helpers"
|
||||
pkgutils "github.com/armosec/utils-go/utils"
|
||||
"github.com/armosec/utils-go/boolutils"
|
||||
"golang.org/x/mod/semver"
|
||||
)
|
||||
|
||||
@@ -17,6 +17,7 @@ const SKIP_VERSION_CHECK_DEPRECATED = "KUBESCAPE_SKIP_UPDATE_CHECK"
|
||||
const SKIP_VERSION_CHECK = "KS_SKIP_UPDATE_CHECK"
|
||||
|
||||
var BuildNumber string
|
||||
var Client string
|
||||
|
||||
const UnknownBuildNumber = "unknown"
|
||||
|
||||
@@ -28,9 +29,9 @@ func NewIVersionCheckHandler() IVersionCheckHandler {
|
||||
if BuildNumber == "" {
|
||||
logger.L().Warning("unknown build number, this might affect your scan results. Please make sure you are updated to latest version")
|
||||
}
|
||||
if v, ok := os.LookupEnv(SKIP_VERSION_CHECK); ok && pkgutils.StringToBool(v) {
|
||||
if v, ok := os.LookupEnv(SKIP_VERSION_CHECK); ok && boolutils.StringToBool(v) {
|
||||
return NewVersionCheckHandlerMock()
|
||||
} else if v, ok := os.LookupEnv(SKIP_VERSION_CHECK_DEPRECATED); ok && pkgutils.StringToBool(v) {
|
||||
} else if v, ok := os.LookupEnv(SKIP_VERSION_CHECK_DEPRECATED); ok && boolutils.StringToBool(v) {
|
||||
return NewVersionCheckHandlerMock()
|
||||
}
|
||||
return NewVersionCheckHandler()
|
||||
@@ -48,10 +49,12 @@ type VersionCheckHandler struct {
|
||||
}
|
||||
type VersionCheckRequest struct {
|
||||
Client string `json:"client"` // kubescape
|
||||
ClientBuild string `json:"clientBuild"` // client build environment
|
||||
ClientVersion string `json:"clientVersion"` // kubescape version
|
||||
Framework string `json:"framework"` // framework name
|
||||
FrameworkVersion string `json:"frameworkVersion"` // framework version
|
||||
ScanningTarget string `json:"target"` // scanning target- cluster/yaml
|
||||
ScanningTarget string `json:"target"` // Deprecated
|
||||
ScanningContext string `json:"context"` // scanning context- cluster/file/gitURL/localGit/dir
|
||||
}
|
||||
|
||||
type VersionCheckResponse struct {
|
||||
@@ -74,8 +77,12 @@ func NewVersionCheckRequest(buildNumber, frameworkName, frameworkVersion, scanni
|
||||
if scanningTarget == "" {
|
||||
scanningTarget = "unknown"
|
||||
}
|
||||
if Client == "" {
|
||||
Client = "local-build"
|
||||
}
|
||||
return &VersionCheckRequest{
|
||||
Client: "kubescape",
|
||||
ClientBuild: Client,
|
||||
ClientVersion: buildNumber,
|
||||
Framework: frameworkName,
|
||||
FrameworkVersion: frameworkVersion,
|
||||
@@ -101,7 +108,7 @@ func (v *VersionCheckHandler) CheckLatestVersion(versionData *VersionCheckReques
|
||||
}
|
||||
|
||||
if latestVersion.ClientUpdate != "" {
|
||||
if BuildNumber != "" && semver.Compare(BuildNumber, latestVersion.ClientUpdate) >= 0 {
|
||||
if BuildNumber != "" && semver.Compare(BuildNumber, latestVersion.ClientUpdate) == -1 {
|
||||
logger.L().Warning(warningMessage(latestVersion.ClientUpdate))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"github.com/armosec/armoapi-go/armotypes"
|
||||
"github.com/armosec/opa-utils/reporthandling"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"golang.org/x/mod/semver"
|
||||
)
|
||||
|
||||
func TestGetKubernetesObjects(t *testing.T) {
|
||||
@@ -30,9 +31,38 @@ func TestIsRuleKubescapeVersionCompatible(t *testing.T) {
|
||||
assert.True(t, isRuleKubescapeVersionCompatible(rule_v1_0_134.Attributes, buildNumberMock))
|
||||
|
||||
// should only use rules that version is in range of use
|
||||
buildNumberMock = "v1.0.133"
|
||||
buildNumberMock = "v1.0.130"
|
||||
assert.True(t, isRuleKubescapeVersionCompatible(rule_v1_0_131.Attributes, buildNumberMock))
|
||||
assert.False(t, isRuleKubescapeVersionCompatible(rule_v1_0_132.Attributes, buildNumberMock))
|
||||
assert.False(t, isRuleKubescapeVersionCompatible(rule_v1_0_133.Attributes, buildNumberMock))
|
||||
assert.False(t, isRuleKubescapeVersionCompatible(rule_v1_0_134.Attributes, buildNumberMock))
|
||||
|
||||
// should only use rules that version is in range of use
|
||||
buildNumberMock = "v1.0.132"
|
||||
assert.False(t, isRuleKubescapeVersionCompatible(rule_v1_0_131.Attributes, buildNumberMock))
|
||||
assert.True(t, isRuleKubescapeVersionCompatible(rule_v1_0_132.Attributes, buildNumberMock))
|
||||
assert.False(t, isRuleKubescapeVersionCompatible(rule_v1_0_133.Attributes, buildNumberMock))
|
||||
assert.False(t, isRuleKubescapeVersionCompatible(rule_v1_0_134.Attributes, buildNumberMock))
|
||||
|
||||
// should only use rules that version is in range of use
|
||||
buildNumberMock = "v1.0.133"
|
||||
assert.False(t, isRuleKubescapeVersionCompatible(rule_v1_0_131.Attributes, buildNumberMock))
|
||||
assert.False(t, isRuleKubescapeVersionCompatible(rule_v1_0_132.Attributes, buildNumberMock))
|
||||
assert.True(t, isRuleKubescapeVersionCompatible(rule_v1_0_133.Attributes, buildNumberMock))
|
||||
assert.False(t, isRuleKubescapeVersionCompatible(rule_v1_0_134.Attributes, buildNumberMock))
|
||||
|
||||
// should only use rules that version is in range of use
|
||||
buildNumberMock = "v1.0.135"
|
||||
assert.False(t, isRuleKubescapeVersionCompatible(rule_v1_0_131.Attributes, buildNumberMock))
|
||||
assert.False(t, isRuleKubescapeVersionCompatible(rule_v1_0_132.Attributes, buildNumberMock))
|
||||
assert.False(t, isRuleKubescapeVersionCompatible(rule_v1_0_133.Attributes, buildNumberMock))
|
||||
assert.True(t, isRuleKubescapeVersionCompatible(rule_v1_0_134.Attributes, buildNumberMock))
|
||||
}
|
||||
|
||||
func TestCheckLatestVersion(t *testing.T) {
|
||||
assert.Equal(t, -1, semver.Compare("v2.0.150", "v2.0.151"))
|
||||
assert.Equal(t, 0, semver.Compare("v2.0.150", "v2.0.150"))
|
||||
assert.Equal(t, 1, semver.Compare("v2.0.150", "v2.0.149"))
|
||||
assert.Equal(t, -1, semver.Compare("v2.0.150", "v3.0.150"))
|
||||
|
||||
}
|
||||
|
||||
@@ -14,7 +14,10 @@ var (
|
||||
"KernelVersion",
|
||||
"LinuxSecurityHardeningStatus",
|
||||
"OpenPortsList",
|
||||
"LinuxKernelVariables"}
|
||||
"LinuxKernelVariables",
|
||||
"KubeletInfo",
|
||||
"KubeProxyInfo",
|
||||
}
|
||||
CloudResources = []string{"ClusterDescribe"}
|
||||
)
|
||||
|
||||
|
||||
@@ -8,7 +8,7 @@ import (
|
||||
|
||||
func (ks *Kubescape) SetCachedConfig(setConfig *metav1.SetConfig) error {
|
||||
|
||||
tenant := getTenantConfig("", "", getKubernetesApi())
|
||||
tenant := getTenantConfig(nil, "", getKubernetesApi())
|
||||
|
||||
if setConfig.Account != "" {
|
||||
tenant.GetConfigObj().AccountID = setConfig.Account
|
||||
@@ -25,13 +25,13 @@ func (ks *Kubescape) SetCachedConfig(setConfig *metav1.SetConfig) error {
|
||||
|
||||
// View cached configurations
|
||||
func (ks *Kubescape) ViewCachedConfig(viewConfig *metav1.ViewConfig) error {
|
||||
tenant := getTenantConfig("", "", getKubernetesApi()) // change k8sinterface
|
||||
tenant := getTenantConfig(nil, "", getKubernetesApi()) // change k8sinterface
|
||||
fmt.Fprintf(viewConfig.Writer, "%s\n", tenant.GetConfigObj().Config())
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ks *Kubescape) DeleteCachedConfig(deleteConfig *metav1.DeleteConfig) error {
|
||||
|
||||
tenant := getTenantConfig("", "", getKubernetesApi()) // change k8sinterface
|
||||
tenant := getTenantConfig(nil, "", getKubernetesApi()) // change k8sinterface
|
||||
return tenant.DeleteCachedConfig()
|
||||
}
|
||||
|
||||
@@ -12,7 +12,7 @@ import (
|
||||
func (ks *Kubescape) DeleteExceptions(delExceptions *v1.DeleteExceptions) error {
|
||||
|
||||
// load cached config
|
||||
getTenantConfig(delExceptions.Account, "", getKubernetesApi())
|
||||
getTenantConfig(&delExceptions.Credentials, "", getKubernetesApi())
|
||||
|
||||
// login kubescape SaaS
|
||||
armoAPI := getter.GetArmoAPIConnector()
|
||||
|
||||
@@ -80,10 +80,10 @@ func downloadArtifacts(downloadInfo *metav1.DownloadInfo) error {
|
||||
}
|
||||
|
||||
func downloadConfigInputs(downloadInfo *metav1.DownloadInfo) error {
|
||||
tenant := getTenantConfig(downloadInfo.Account, "", getKubernetesApi())
|
||||
tenant := getTenantConfig(&downloadInfo.Credentials, "", getKubernetesApi())
|
||||
|
||||
controlsInputsGetter := getConfigInputsGetter(downloadInfo.Name, tenant.GetAccountID(), nil)
|
||||
controlInputs, err := controlsInputsGetter.GetControlsInputs(tenant.GetClusterName())
|
||||
controlInputs, err := controlsInputsGetter.GetControlsInputs(tenant.GetContextName())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -104,12 +104,12 @@ func downloadConfigInputs(downloadInfo *metav1.DownloadInfo) error {
|
||||
|
||||
func downloadExceptions(downloadInfo *metav1.DownloadInfo) error {
|
||||
var err error
|
||||
tenant := getTenantConfig(downloadInfo.Account, "", getKubernetesApi())
|
||||
tenant := getTenantConfig(&downloadInfo.Credentials, "", getKubernetesApi())
|
||||
|
||||
exceptionsGetter := getExceptionsGetter("")
|
||||
exceptions := []armotypes.PostureExceptionPolicy{}
|
||||
if tenant.GetAccountID() != "" {
|
||||
exceptions, err = exceptionsGetter.GetExceptions(tenant.GetClusterName())
|
||||
exceptions, err = exceptionsGetter.GetExceptions(tenant.GetContextName())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -128,9 +128,9 @@ func downloadExceptions(downloadInfo *metav1.DownloadInfo) error {
|
||||
|
||||
func downloadFramework(downloadInfo *metav1.DownloadInfo) error {
|
||||
|
||||
tenant := getTenantConfig(downloadInfo.Account, "", getKubernetesApi())
|
||||
tenant := getTenantConfig(&downloadInfo.Credentials, "", getKubernetesApi())
|
||||
|
||||
g := getPolicyGetter(nil, tenant.GetTennatEmail(), true, nil)
|
||||
g := getPolicyGetter(nil, tenant.GetTenantEmail(), true, nil)
|
||||
|
||||
if downloadInfo.Name == "" {
|
||||
// if framework name not specified - download all frameworks
|
||||
@@ -170,9 +170,9 @@ func downloadFramework(downloadInfo *metav1.DownloadInfo) error {
|
||||
|
||||
func downloadControl(downloadInfo *metav1.DownloadInfo) error {
|
||||
|
||||
tenant := getTenantConfig(downloadInfo.Account, "", getKubernetesApi())
|
||||
tenant := getTenantConfig(&downloadInfo.Credentials, "", getKubernetesApi())
|
||||
|
||||
g := getPolicyGetter(nil, tenant.GetTennatEmail(), false, nil)
|
||||
g := getPolicyGetter(nil, tenant.GetTenantEmail(), false, nil)
|
||||
|
||||
if downloadInfo.Name == "" {
|
||||
// TODO - support
|
||||
|
||||
@@ -12,8 +12,8 @@ import (
|
||||
"github.com/armosec/kubescape/v2/core/pkg/resourcehandler"
|
||||
"github.com/armosec/kubescape/v2/core/pkg/resultshandling/reporter"
|
||||
reporterv2 "github.com/armosec/kubescape/v2/core/pkg/resultshandling/reporter/v2"
|
||||
"github.com/google/uuid"
|
||||
|
||||
"github.com/armosec/opa-utils/reporthandling"
|
||||
"github.com/armosec/rbac-utils/rbacscanner"
|
||||
)
|
||||
|
||||
@@ -24,11 +24,11 @@ func getKubernetesApi() *k8sinterface.KubernetesApi {
|
||||
}
|
||||
return k8sinterface.NewKubernetesApi()
|
||||
}
|
||||
func getTenantConfig(Account, clusterName string, k8s *k8sinterface.KubernetesApi) cautils.ITenantConfig {
|
||||
func getTenantConfig(credentials *cautils.Credentials, clusterName string, k8s *k8sinterface.KubernetesApi) cautils.ITenantConfig {
|
||||
if !k8sinterface.IsConnectedToCluster() || k8s == nil {
|
||||
return cautils.NewLocalConfig(getter.GetArmoAPIConnector(), Account, clusterName)
|
||||
return cautils.NewLocalConfig(getter.GetArmoAPIConnector(), credentials, clusterName)
|
||||
}
|
||||
return cautils.NewClusterConfig(k8s, getter.GetArmoAPIConnector(), Account, clusterName)
|
||||
return cautils.NewClusterConfig(k8s, getter.GetArmoAPIConnector(), credentials, clusterName)
|
||||
}
|
||||
|
||||
func getExceptionsGetter(useExceptions string) getter.IExceptionsGetter {
|
||||
@@ -42,7 +42,7 @@ func getExceptionsGetter(useExceptions string) getter.IExceptionsGetter {
|
||||
|
||||
func getRBACHandler(tenantConfig cautils.ITenantConfig, k8s *k8sinterface.KubernetesApi, submit bool) *cautils.RBACObjects {
|
||||
if submit {
|
||||
return cautils.NewRBACObjects(rbacscanner.NewRbacScannerFromK8sAPI(k8s, tenantConfig.GetAccountID(), tenantConfig.GetClusterName()))
|
||||
return cautils.NewRBACObjects(rbacscanner.NewRbacScannerFromK8sAPI(k8s, tenantConfig.GetAccountID(), tenantConfig.GetContextName()))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -105,7 +105,7 @@ func getFieldSelector(scanInfo *cautils.ScanInfo) resourcehandler.IFieldSelector
|
||||
return &resourcehandler.EmptySelector{}
|
||||
}
|
||||
|
||||
func policyIdentifierNames(pi []reporthandling.PolicyIdentifier) string {
|
||||
func policyIdentifierNames(pi []cautils.PolicyIdentifier) string {
|
||||
policiesNames := ""
|
||||
for i := range pi {
|
||||
policiesNames += pi[i].Name
|
||||
@@ -123,7 +123,6 @@ func policyIdentifierNames(pi []reporthandling.PolicyIdentifier) string {
|
||||
func setSubmitBehavior(scanInfo *cautils.ScanInfo, tenantConfig cautils.ITenantConfig) {
|
||||
|
||||
/*
|
||||
|
||||
If "First run (local config not found)" -
|
||||
Default/keep-local - Do not send report
|
||||
Submit - Create tenant & Submit report
|
||||
@@ -140,8 +139,20 @@ func setSubmitBehavior(scanInfo *cautils.ScanInfo, tenantConfig cautils.ITenantC
|
||||
return
|
||||
}
|
||||
|
||||
scanningContext := scanInfo.GetScanningContext()
|
||||
if scanningContext == cautils.ContextFile || scanningContext == cautils.ContextDir {
|
||||
scanInfo.Submit = false
|
||||
return
|
||||
}
|
||||
|
||||
if tenantConfig.IsConfigFound() { // config found in cache (submitted)
|
||||
if !scanInfo.Local {
|
||||
if tenantConfig.GetAccountID() != "" {
|
||||
if _, err := uuid.Parse(tenantConfig.GetAccountID()); err != nil {
|
||||
scanInfo.Submit = false
|
||||
return
|
||||
}
|
||||
}
|
||||
// Submit report
|
||||
scanInfo.Submit = true
|
||||
}
|
||||
@@ -165,20 +176,6 @@ func getPolicyGetter(loadPoliciesFromFile []string, tennatEmail string, framewor
|
||||
|
||||
}
|
||||
|
||||
// func setGetArmoAPIConnector(scanInfo *cautils.ScanInfo, customerGUID string) {
|
||||
// g := getter.GetArmoAPIConnector() // download policy from ARMO backend
|
||||
// g.SetCustomerGUID(customerGUID)
|
||||
// scanInfo.PolicyGetter = g
|
||||
// if scanInfo.ScanAll {
|
||||
// frameworks, err := g.ListCustomFrameworks(customerGUID)
|
||||
// if err != nil {
|
||||
// glog.Error("failed to get custom frameworks") // handle error
|
||||
// return
|
||||
// }
|
||||
// scanInfo.SetPolicyIdentifiers(frameworks, reporthandling.KindFramework)
|
||||
// }
|
||||
// }
|
||||
|
||||
// setConfigInputsGetter sets the config input getter - local file/github release/ArmoAPI
|
||||
func getConfigInputsGetter(ControlsInputs string, accountID string, downloadReleasedPolicy *getter.DownloadReleasedPolicy) getter.IControlsInputsGetter {
|
||||
if len(ControlsInputs) > 0 {
|
||||
|
||||
@@ -44,16 +44,16 @@ func (ks *Kubescape) List(listPolicies *metav1.ListPolicies) error {
|
||||
}
|
||||
|
||||
func listFrameworks(listPolicies *metav1.ListPolicies) ([]string, error) {
|
||||
tenant := getTenantConfig(listPolicies.Account, "", getKubernetesApi()) // change k8sinterface
|
||||
g := getPolicyGetter(nil, tenant.GetTennatEmail(), true, nil)
|
||||
tenant := getTenantConfig(&listPolicies.Credentials, "", getKubernetesApi()) // change k8sinterface
|
||||
g := getPolicyGetter(nil, tenant.GetTenantEmail(), true, nil)
|
||||
|
||||
return listFrameworksNames(g), nil
|
||||
}
|
||||
|
||||
func listControls(listPolicies *metav1.ListPolicies) ([]string, error) {
|
||||
tenant := getTenantConfig(listPolicies.Account, "", getKubernetesApi()) // change k8sinterface
|
||||
tenant := getTenantConfig(&listPolicies.Credentials, "", getKubernetesApi()) // change k8sinterface
|
||||
|
||||
g := getPolicyGetter(nil, tenant.GetTennatEmail(), false, nil)
|
||||
g := getPolicyGetter(nil, tenant.GetTenantEmail(), false, nil)
|
||||
l := getter.ListName
|
||||
if listPolicies.ListIDs {
|
||||
l = getter.ListID
|
||||
@@ -63,7 +63,7 @@ func listControls(listPolicies *metav1.ListPolicies) ([]string, error) {
|
||||
|
||||
func listExceptions(listPolicies *metav1.ListPolicies) ([]string, error) {
|
||||
// load tenant metav1
|
||||
getTenantConfig(listPolicies.Account, "", getKubernetesApi())
|
||||
getTenantConfig(&listPolicies.Credentials, "", getKubernetesApi())
|
||||
|
||||
var exceptionsNames []string
|
||||
armoAPI := getExceptionsGetter("")
|
||||
|
||||
@@ -3,7 +3,8 @@ package core
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/armosec/armoapi-go/armotypes"
|
||||
apisv1 "github.com/armosec/opa-utils/httpserver/apis/v1"
|
||||
|
||||
"github.com/armosec/k8s-interface/k8sinterface"
|
||||
|
||||
"github.com/armosec/kubescape/v2/core/cautils"
|
||||
@@ -18,7 +19,6 @@ import (
|
||||
"github.com/armosec/kubescape/v2/core/pkg/resultshandling/printer"
|
||||
"github.com/armosec/kubescape/v2/core/pkg/resultshandling/reporter"
|
||||
|
||||
"github.com/armosec/opa-utils/reporthandling"
|
||||
"github.com/armosec/opa-utils/resources"
|
||||
)
|
||||
|
||||
@@ -34,7 +34,7 @@ func getInterfaces(scanInfo *cautils.ScanInfo) componentInterfaces {
|
||||
|
||||
// ================== setup k8s interface object ======================================
|
||||
var k8s *k8sinterface.KubernetesApi
|
||||
if scanInfo.GetScanningEnvironment() == cautils.ScanCluster {
|
||||
if scanInfo.GetScanningContext() == cautils.ContextCluster {
|
||||
k8s = getKubernetesApi()
|
||||
if k8s == nil {
|
||||
logger.L().Fatal("failed connecting to Kubernetes cluster")
|
||||
@@ -43,16 +43,11 @@ func getInterfaces(scanInfo *cautils.ScanInfo) componentInterfaces {
|
||||
|
||||
// ================== setup tenant object ======================================
|
||||
|
||||
tenantConfig := getTenantConfig(scanInfo.Account, scanInfo.KubeContext, k8s)
|
||||
tenantConfig := getTenantConfig(&scanInfo.Credentials, scanInfo.KubeContext, k8s)
|
||||
|
||||
// Set submit behavior AFTER loading tenant config
|
||||
setSubmitBehavior(scanInfo, tenantConfig)
|
||||
|
||||
// Do not submit yaml scanning
|
||||
if len(scanInfo.InputPatterns) > 0 {
|
||||
scanInfo.Submit = false
|
||||
}
|
||||
|
||||
if scanInfo.Submit {
|
||||
// submit - Create tenant & Submit report
|
||||
if err := tenantConfig.SetTenant(); err != nil {
|
||||
@@ -63,7 +58,7 @@ func getInterfaces(scanInfo *cautils.ScanInfo) componentInterfaces {
|
||||
// ================== version testing ======================================
|
||||
|
||||
v := cautils.NewIVersionCheckHandler()
|
||||
v.CheckLatestVersion(cautils.NewVersionCheckRequest(cautils.BuildNumber, policyIdentifierNames(scanInfo.PolicyIdentifier), "", scanInfo.GetScanningEnvironment()))
|
||||
v.CheckLatestVersion(cautils.NewVersionCheckRequest(cautils.BuildNumber, policyIdentifierNames(scanInfo.PolicyIdentifier), "", cautils.ScanningContextToScanningScope(scanInfo.GetScanningContext())))
|
||||
|
||||
// ================== setup host scanner object ======================================
|
||||
|
||||
@@ -94,7 +89,7 @@ func getInterfaces(scanInfo *cautils.ScanInfo) componentInterfaces {
|
||||
reportHandler := getReporter(tenantConfig, scanInfo.ScanID, scanInfo.Submit, scanInfo.FrameworkScan)
|
||||
|
||||
// setup printer
|
||||
printerHandler := resultshandling.NewPrinter(scanInfo.Format, scanInfo.FormatVersion, scanInfo.VerboseMode)
|
||||
printerHandler := resultshandling.NewPrinter(scanInfo.Format, scanInfo.FormatVersion, scanInfo.VerboseMode, cautils.ViewTypes(scanInfo.View))
|
||||
printerHandler.SetWriter(scanInfo.Output)
|
||||
|
||||
// ================== return interface ======================================
|
||||
@@ -116,21 +111,21 @@ func (ks *Kubescape) Scan(scanInfo *cautils.ScanInfo) (*resultshandling.ResultsH
|
||||
|
||||
interfaces := getInterfaces(scanInfo)
|
||||
|
||||
cautils.ClusterName = interfaces.tenantConfig.GetClusterName() // TODO - Deprecated
|
||||
cautils.ClusterName = interfaces.tenantConfig.GetContextName() // TODO - Deprecated
|
||||
cautils.CustomerGUID = interfaces.tenantConfig.GetAccountID() // TODO - Deprecated
|
||||
interfaces.report.SetClusterName(interfaces.tenantConfig.GetClusterName())
|
||||
interfaces.report.SetClusterName(interfaces.tenantConfig.GetContextName())
|
||||
interfaces.report.SetCustomerGUID(interfaces.tenantConfig.GetAccountID())
|
||||
|
||||
downloadReleasedPolicy := getter.NewDownloadReleasedPolicy() // download config inputs from github release
|
||||
|
||||
// set policy getter only after setting the customerGUID
|
||||
scanInfo.Getters.PolicyGetter = getPolicyGetter(scanInfo.UseFrom, interfaces.tenantConfig.GetTennatEmail(), scanInfo.FrameworkScan, downloadReleasedPolicy)
|
||||
scanInfo.Getters.PolicyGetter = getPolicyGetter(scanInfo.UseFrom, interfaces.tenantConfig.GetTenantEmail(), scanInfo.FrameworkScan, downloadReleasedPolicy)
|
||||
scanInfo.Getters.ControlsInputsGetter = getConfigInputsGetter(scanInfo.ControlsInputs, interfaces.tenantConfig.GetAccountID(), downloadReleasedPolicy)
|
||||
scanInfo.Getters.ExceptionsGetter = getExceptionsGetter(scanInfo.UseExceptions)
|
||||
|
||||
// TODO - list supported frameworks/controls
|
||||
if scanInfo.ScanAll {
|
||||
scanInfo.SetPolicyIdentifiers(listFrameworksNames(scanInfo.Getters.PolicyGetter), reporthandling.KindFramework)
|
||||
scanInfo.SetPolicyIdentifiers(listFrameworksNames(scanInfo.Getters.PolicyGetter), apisv1.KindFramework)
|
||||
}
|
||||
|
||||
// remove host scanner components
|
||||
@@ -144,17 +139,17 @@ func (ks *Kubescape) Scan(scanInfo *cautils.ScanInfo) (*resultshandling.ResultsH
|
||||
|
||||
// ===================== policies & resources =====================
|
||||
policyHandler := policyhandler.NewPolicyHandler(interfaces.resourceHandler)
|
||||
scanData, err := collectResources(policyHandler, scanInfo)
|
||||
scanData, err := policyHandler.CollectResources(scanInfo.PolicyIdentifier, scanInfo)
|
||||
if err != nil {
|
||||
return resultsHandling, err
|
||||
}
|
||||
|
||||
// ========================= opa testing =====================
|
||||
deps := resources.NewRegoDependenciesData(k8sinterface.GetK8sConfig(), interfaces.tenantConfig.GetClusterName())
|
||||
deps := resources.NewRegoDependenciesData(k8sinterface.GetK8sConfig(), interfaces.tenantConfig.GetContextName())
|
||||
reportResults := opaprocessor.NewOPAProcessor(scanData, deps)
|
||||
if err := reportResults.ProcessRulesListenner(); err != nil {
|
||||
// TODO - do something
|
||||
return resultsHandling, err
|
||||
return resultsHandling, fmt.Errorf("%w", err)
|
||||
}
|
||||
|
||||
// ========================= results handling =====================
|
||||
@@ -166,47 +161,3 @@ func (ks *Kubescape) Scan(scanInfo *cautils.ScanInfo) (*resultshandling.ResultsH
|
||||
|
||||
return resultsHandling, nil
|
||||
}
|
||||
|
||||
// TODO - remove function
|
||||
func collectResources(policyHandler *policyhandler.PolicyHandler, scanInfo *cautils.ScanInfo) (*cautils.OPASessionObj, error) {
|
||||
policyNotification := &reporthandling.PolicyNotification{
|
||||
Rules: scanInfo.PolicyIdentifier,
|
||||
KubescapeNotification: reporthandling.KubescapeNotification{
|
||||
Designators: armotypes.PortalDesignator{},
|
||||
NotificationType: reporthandling.TypeExecPostureScan,
|
||||
},
|
||||
}
|
||||
switch policyNotification.KubescapeNotification.NotificationType {
|
||||
case reporthandling.TypeExecPostureScan:
|
||||
collectedResources, err := policyHandler.CollectResources(policyNotification, scanInfo)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return collectedResources, nil
|
||||
|
||||
default:
|
||||
return nil, fmt.Errorf("notification type '%s' Unknown", policyNotification.KubescapeNotification.NotificationType)
|
||||
}
|
||||
}
|
||||
|
||||
// func askUserForHostSensor() bool {
|
||||
// return false
|
||||
|
||||
// if !isatty.IsTerminal(os.Stdin.Fd()) {
|
||||
// return false
|
||||
// }
|
||||
// if ssss, err := os.Stdin.Stat(); err == nil {
|
||||
// // fmt.Printf("Found stdin type: %s\n", ssss.Mode().Type())
|
||||
// if ssss.Mode().Type()&(fs.ModeDevice|fs.ModeCharDevice) > 0 { //has TTY
|
||||
// fmt.Fprintf(os.Stderr, "Would you like to scan K8s nodes? [y/N]. This is required to collect valuable data for certain controls\n")
|
||||
// fmt.Fprintf(os.Stderr, "Use --enable-host-scan flag to suppress this message\n")
|
||||
// var b []byte = make([]byte, 1)
|
||||
// if n, err := os.Stdin.Read(b); err == nil {
|
||||
// if n > 0 && len(b) > 0 && (b[0] == 'y' || b[0] == 'Y') {
|
||||
// return true
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
// return false
|
||||
// }
|
||||
|
||||
@@ -29,11 +29,11 @@ func (ks *Kubescape) Submit(submitInterfaces cliinterfaces.SubmitInterfaces) err
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ks *Kubescape) SubmitExceptions(accountID, excPath string) error {
|
||||
func (ks *Kubescape) SubmitExceptions(credentials *cautils.Credentials, excPath string) error {
|
||||
logger.L().Info("submitting exceptions", helpers.String("path", excPath))
|
||||
|
||||
// load cached config
|
||||
tenantConfig := getTenantConfig(accountID, "", getKubernetesApi())
|
||||
tenantConfig := getTenantConfig(credentials, "", getKubernetesApi())
|
||||
if err := tenantConfig.SetTenant(); err != nil {
|
||||
logger.L().Error("failed setting account ID", helpers.Error(err))
|
||||
}
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
package v1
|
||||
|
||||
import "github.com/armosec/kubescape/v2/core/cautils"
|
||||
|
||||
type DeleteExceptions struct {
|
||||
Account string
|
||||
Exceptions []string
|
||||
Credentials cautils.Credentials
|
||||
Exceptions []string
|
||||
}
|
||||
|
||||
@@ -1,9 +1,11 @@
|
||||
package v1
|
||||
|
||||
import "github.com/armosec/kubescape/v2/core/cautils"
|
||||
|
||||
type DownloadInfo struct {
|
||||
Path string // directory to save artifact. Default is "~/.kubescape/"
|
||||
FileName string // can be empty
|
||||
Target string // type of artifact to download
|
||||
Name string // name of artifact to download
|
||||
Account string // AccountID
|
||||
Path string // directory to save artifact. Default is "~/.kubescape/"
|
||||
FileName string // can be empty
|
||||
Target string // type of artifact to download
|
||||
Name string // name of artifact to download
|
||||
Credentials cautils.Credentials
|
||||
}
|
||||
|
||||
@@ -1,10 +1,12 @@
|
||||
package v1
|
||||
|
||||
import "github.com/armosec/kubescape/v2/core/cautils"
|
||||
|
||||
type ListPolicies struct {
|
||||
Target string
|
||||
ListIDs bool
|
||||
Account string
|
||||
Format string
|
||||
Target string
|
||||
ListIDs bool
|
||||
Format string
|
||||
Credentials cautils.Credentials
|
||||
}
|
||||
|
||||
type ListResponse struct {
|
||||
|
||||
@@ -1,9 +1,11 @@
|
||||
package v1
|
||||
|
||||
import "github.com/armosec/kubescape/v2/core/cautils"
|
||||
|
||||
type Submit struct {
|
||||
Account string
|
||||
Credentials cautils.Credentials
|
||||
}
|
||||
|
||||
type Delete struct {
|
||||
Account string
|
||||
Credentials cautils.Credentials
|
||||
}
|
||||
|
||||
@@ -15,8 +15,8 @@ type IKubescape interface {
|
||||
Download(downloadInfo *metav1.DownloadInfo) error // TODO - return downloaded policies
|
||||
|
||||
// submit
|
||||
Submit(submitInterfaces cliinterfaces.SubmitInterfaces) error // TODO - func should receive object
|
||||
SubmitExceptions(accountID, excPath string) error // TODO - remove
|
||||
Submit(submitInterfaces cliinterfaces.SubmitInterfaces) error // TODO - func should receive object
|
||||
SubmitExceptions(credentials *cautils.Credentials, excPath string) error // TODO - remove
|
||||
|
||||
// config
|
||||
SetCachedConfig(setConfig *metav1.SetConfig) error
|
||||
|
||||
@@ -89,7 +89,7 @@ func (hsh *HostSensorHandler) Init() error {
|
||||
|
||||
func (hsh *HostSensorHandler) applyYAML() error {
|
||||
workloads, err := cautils.ReadFile([]byte(hostSensorYAML), cautils.YAML_FILE_FORMAT)
|
||||
if len(err) != 0 {
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to read YAML files, reason: %v", err)
|
||||
}
|
||||
|
||||
|
||||
@@ -109,6 +109,18 @@ func (hsh *HostSensorHandler) GetLinuxSecurityHardeningStatus() ([]hostsensor.Ho
|
||||
return hsh.sendAllPodsHTTPGETRequest("/linuxSecurityHardening", "LinuxSecurityHardeningStatus")
|
||||
}
|
||||
|
||||
// return list of KubeletInfo
|
||||
func (hsh *HostSensorHandler) GetKubeletInfo() ([]hostsensor.HostSensorDataEnvelope, error) {
|
||||
// loop over pods and port-forward it to each of them
|
||||
return hsh.sendAllPodsHTTPGETRequest("/kubeletInfo", "KubeletInfo")
|
||||
}
|
||||
|
||||
// return list of KubeProxyInfo
|
||||
func (hsh *HostSensorHandler) GetKubeProxyInfo() ([]hostsensor.HostSensorDataEnvelope, error) {
|
||||
// loop over pods and port-forward it to each of them
|
||||
return hsh.sendAllPodsHTTPGETRequest("/kubeProxyInfo", "KubeProxyInfo")
|
||||
}
|
||||
|
||||
// return list of KubeletCommandLine
|
||||
func (hsh *HostSensorHandler) GetKubeletCommandLine() ([]hostsensor.HostSensorDataEnvelope, error) {
|
||||
// loop over pods and port-forward it to each of them
|
||||
@@ -228,6 +240,27 @@ func (hsh *HostSensorHandler) CollectResources() ([]hostsensor.HostSensorDataEnv
|
||||
if len(kcData) > 0 {
|
||||
res = append(res, kcData...)
|
||||
}
|
||||
|
||||
// GetKubeletInfo
|
||||
kcData, err = hsh.GetKubeletInfo()
|
||||
if err != nil {
|
||||
addInfoToMap(KubeletInfo, infoMap, err)
|
||||
logger.L().Warning(err.Error())
|
||||
}
|
||||
if len(kcData) > 0 {
|
||||
res = append(res, kcData...)
|
||||
}
|
||||
|
||||
// GetKubeProxyInfo
|
||||
kcData, err = hsh.GetKubeProxyInfo()
|
||||
if err != nil {
|
||||
addInfoToMap(KubeProxyInfo, infoMap, err)
|
||||
logger.L().Warning(err.Error())
|
||||
}
|
||||
if len(kcData) > 0 {
|
||||
res = append(res, kcData...)
|
||||
}
|
||||
|
||||
logger.L().Debug("Done reading information from host scanner")
|
||||
return res, infoMap, nil
|
||||
}
|
||||
|
||||
@@ -13,8 +13,10 @@ var (
|
||||
OpenPortsList = "OpenPortsList"
|
||||
LinuxKernelVariables = "LinuxKernelVariables"
|
||||
KubeletCommandLine = "KubeletCommandLine"
|
||||
KubeletInfo = "KubeletInfo"
|
||||
KubeProxyInfo = "KubeProxyInfo"
|
||||
|
||||
MapResourceToApiGroup = map[string]string{
|
||||
MapHostSensorResourceToApiGroup = map[string]string{
|
||||
KubeletConfiguration: "hostdata.kubescape.cloud/v1beta0",
|
||||
OsReleaseFile: "hostdata.kubescape.cloud/v1beta0",
|
||||
KubeletCommandLine: "hostdata.kubescape.cloud/v1beta0",
|
||||
@@ -22,11 +24,13 @@ var (
|
||||
LinuxSecurityHardeningStatus: "hostdata.kubescape.cloud/v1beta0",
|
||||
OpenPortsList: "hostdata.kubescape.cloud/v1beta0",
|
||||
LinuxKernelVariables: "hostdata.kubescape.cloud/v1beta0",
|
||||
KubeletInfo: "hostdata.kubescape.cloud/v1beta0",
|
||||
KubeProxyInfo: "hostdata.kubescape.cloud/v1beta0",
|
||||
}
|
||||
)
|
||||
|
||||
func addInfoToMap(resource string, infoMap map[string]apis.StatusInfo, err error) {
|
||||
group, version := k8sinterface.SplitApiVersion(MapResourceToApiGroup[resource])
|
||||
group, version := k8sinterface.SplitApiVersion(MapHostSensorResourceToApiGroup[resource])
|
||||
r := k8sinterface.JoinResourceTriplets(group, version, resource)
|
||||
infoMap[r] = apis.StatusInfo{
|
||||
InnerStatus: apis.StatusSkipped,
|
||||
|
||||
@@ -180,6 +180,9 @@ func (opap *OPAProcessor) processRule(rule *reporthandling.PolicyRule) (map[stri
|
||||
for j := range ruleResponses[i].FixPaths {
|
||||
ruleResult.Paths = append(ruleResult.Paths, armotypes.PosturePaths{FixPath: ruleResponses[i].FixPaths[j]})
|
||||
}
|
||||
if ruleResponses[i].FixCommand != "" {
|
||||
ruleResult.Paths = append(ruleResult.Paths, armotypes.PosturePaths{FixCommand: ruleResponses[i].FixCommand})
|
||||
}
|
||||
resources[failedResources[j].GetID()] = ruleResult
|
||||
}
|
||||
}
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
"github.com/armosec/k8s-interface/workloadinterface"
|
||||
"github.com/armosec/opa-utils/reporthandling"
|
||||
"github.com/armosec/opa-utils/reporthandling/apis"
|
||||
"github.com/armosec/opa-utils/reporthandling/results/v1/reportsummary"
|
||||
resources "github.com/armosec/opa-utils/resources"
|
||||
)
|
||||
|
||||
@@ -46,7 +47,7 @@ func (opap *OPAProcessor) updateResults() {
|
||||
|
||||
// set result summary
|
||||
// map control to error
|
||||
controlToInfoMap := mapControlToInfo(opap.ResourceToControlsMap, opap.InfoMap)
|
||||
controlToInfoMap := mapControlToInfo(opap.ResourceToControlsMap, opap.InfoMap, opap.Report.SummaryDetails.Controls)
|
||||
opap.Report.SummaryDetails.InitResourcesSummary(controlToInfoMap)
|
||||
// for f := range opap.PostureReport.FrameworkReports {
|
||||
// // set exceptions
|
||||
@@ -60,17 +61,29 @@ func (opap *OPAProcessor) updateResults() {
|
||||
// }
|
||||
}
|
||||
|
||||
func mapControlToInfo(mapResourceToControls map[string][]string, infoMap map[string]apis.StatusInfo) map[string]apis.StatusInfo {
|
||||
func mapControlToInfo(mapResourceToControls map[string][]string, infoMap map[string]apis.StatusInfo, controlSummary reportsummary.ControlSummaries) map[string]apis.StatusInfo {
|
||||
controlToInfoMap := make(map[string]apis.StatusInfo)
|
||||
for resource, statusInfo := range infoMap {
|
||||
controls := mapResourceToControls[resource]
|
||||
for _, control := range controls {
|
||||
controlToInfoMap[control] = statusInfo
|
||||
controlIDs := mapResourceToControls[resource]
|
||||
for _, controlID := range controlIDs {
|
||||
ctrl := controlSummary.GetControl(reportsummary.EControlCriteriaID, controlID)
|
||||
if ctrl != nil {
|
||||
resources := ctrl.NumberOfResources()
|
||||
// Check that there are no K8s resources too
|
||||
if isEmptyResources(resources) {
|
||||
controlToInfoMap[controlID] = statusInfo
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
return controlToInfoMap
|
||||
}
|
||||
|
||||
func isEmptyResources(counters reportsummary.ICounters) bool {
|
||||
return counters.Failed() == 0 && counters.Excluded() == 0 && counters.Passed() == 0
|
||||
}
|
||||
|
||||
func getAllSupportedObjects(k8sResources *cautils.K8SResources, armoResources *cautils.ArmoResources, allResources map[string]workloadinterface.IMetadata, rule *reporthandling.PolicyRule) []workloadinterface.IMetadata {
|
||||
k8sObjects := []workloadinterface.IMetadata{}
|
||||
k8sObjects = append(k8sObjects, getKubernetesObjects(k8sResources, allResources, rule.Match)...)
|
||||
|
||||
@@ -3,9 +3,9 @@ package policyhandler
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/armosec/armoapi-go/armotypes"
|
||||
"github.com/armosec/kubescape/v2/core/cautils"
|
||||
"github.com/armosec/kubescape/v2/core/pkg/resourcehandler"
|
||||
"github.com/armosec/opa-utils/reporthandling"
|
||||
)
|
||||
|
||||
// PolicyHandler -
|
||||
@@ -22,7 +22,7 @@ func NewPolicyHandler(resourceHandler resourcehandler.IResourceHandler) *PolicyH
|
||||
}
|
||||
}
|
||||
|
||||
func (policyHandler *PolicyHandler) CollectResources(notification *reporthandling.PolicyNotification, scanInfo *cautils.ScanInfo) (*cautils.OPASessionObj, error) {
|
||||
func (policyHandler *PolicyHandler) CollectResources(policyIdentifier []cautils.PolicyIdentifier, scanInfo *cautils.ScanInfo) (*cautils.OPASessionObj, error) {
|
||||
opaSessionObj := cautils.NewOPASessionObj(nil, nil, scanInfo)
|
||||
|
||||
// validate notification
|
||||
@@ -30,15 +30,15 @@ func (policyHandler *PolicyHandler) CollectResources(notification *reporthandlin
|
||||
policyHandler.getters = &scanInfo.Getters
|
||||
|
||||
// get policies
|
||||
if err := policyHandler.getPolicies(notification, opaSessionObj); err != nil {
|
||||
if err := policyHandler.getPolicies(policyIdentifier, opaSessionObj); err != nil {
|
||||
return opaSessionObj, err
|
||||
}
|
||||
|
||||
err := policyHandler.getResources(notification, opaSessionObj, scanInfo)
|
||||
err := policyHandler.getResources(policyIdentifier, opaSessionObj, scanInfo)
|
||||
if err != nil {
|
||||
return opaSessionObj, err
|
||||
}
|
||||
if opaSessionObj.K8SResources == nil || len(*opaSessionObj.K8SResources) == 0 {
|
||||
if (opaSessionObj.K8SResources == nil || len(*opaSessionObj.K8SResources) == 0) && (opaSessionObj.ArmoResource == nil || len(*opaSessionObj.ArmoResource) == 0) {
|
||||
return opaSessionObj, fmt.Errorf("empty list of resources")
|
||||
}
|
||||
|
||||
@@ -46,10 +46,10 @@ func (policyHandler *PolicyHandler) CollectResources(notification *reporthandlin
|
||||
return opaSessionObj, nil
|
||||
}
|
||||
|
||||
func (policyHandler *PolicyHandler) getResources(notification *reporthandling.PolicyNotification, opaSessionObj *cautils.OPASessionObj, scanInfo *cautils.ScanInfo) error {
|
||||
func (policyHandler *PolicyHandler) getResources(policyIdentifier []cautils.PolicyIdentifier, opaSessionObj *cautils.OPASessionObj, scanInfo *cautils.ScanInfo) error {
|
||||
opaSessionObj.Report.ClusterAPIServerInfo = policyHandler.resourceHandler.GetClusterAPIServerInfo()
|
||||
|
||||
resourcesMap, allResources, armoResources, err := policyHandler.resourceHandler.GetResources(opaSessionObj, ¬ification.Designators)
|
||||
resourcesMap, allResources, armoResources, err := policyHandler.resourceHandler.GetResources(opaSessionObj, &policyIdentifier[0].Designators)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -60,3 +60,10 @@ func (policyHandler *PolicyHandler) getResources(notification *reporthandling.Po
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func getDesignator(policyIdentifier []cautils.PolicyIdentifier) *armotypes.PortalDesignator {
|
||||
if len(policyIdentifier) > 0 {
|
||||
return &policyIdentifier[0].Designators
|
||||
}
|
||||
return &armotypes.PortalDesignator{}
|
||||
}
|
||||
|
||||
@@ -4,6 +4,8 @@ import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
apisv1 "github.com/armosec/opa-utils/httpserver/apis/v1"
|
||||
|
||||
"github.com/armosec/kubescape/v2/core/cautils"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/getter"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger"
|
||||
@@ -11,18 +13,18 @@ import (
|
||||
"github.com/armosec/opa-utils/reporthandling"
|
||||
)
|
||||
|
||||
func (policyHandler *PolicyHandler) getPolicies(notification *reporthandling.PolicyNotification, policiesAndResources *cautils.OPASessionObj) error {
|
||||
func (policyHandler *PolicyHandler) getPolicies(policyIdentifier []cautils.PolicyIdentifier, policiesAndResources *cautils.OPASessionObj) error {
|
||||
logger.L().Info("Downloading/Loading policy definitions")
|
||||
|
||||
cautils.StartSpinner()
|
||||
defer cautils.StopSpinner()
|
||||
|
||||
policies, err := policyHandler.getScanPolicies(notification)
|
||||
policies, err := policyHandler.getScanPolicies(policyIdentifier)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(policies) == 0 {
|
||||
return fmt.Errorf("failed to download policies: '%s'. Make sure the policy exist and you spelled it correctly. For more information, please feel free to contact ARMO team", strings.Join(policyIdentifierToSlice(notification.Rules), ", "))
|
||||
return fmt.Errorf("failed to download policies: '%s'. Make sure the policy exist and you spelled it correctly. For more information, please feel free to contact ARMO team", strings.Join(policyIdentifierToSlice(policyIdentifier), ", "))
|
||||
}
|
||||
|
||||
policiesAndResources.Policies = policies
|
||||
@@ -44,12 +46,12 @@ func (policyHandler *PolicyHandler) getPolicies(notification *reporthandling.Pol
|
||||
return nil
|
||||
}
|
||||
|
||||
func (policyHandler *PolicyHandler) getScanPolicies(notification *reporthandling.PolicyNotification) ([]reporthandling.Framework, error) {
|
||||
func (policyHandler *PolicyHandler) getScanPolicies(policyIdentifier []cautils.PolicyIdentifier) ([]reporthandling.Framework, error) {
|
||||
frameworks := []reporthandling.Framework{}
|
||||
|
||||
switch getScanKind(notification) {
|
||||
case reporthandling.KindFramework: // Download frameworks
|
||||
for _, rule := range notification.Rules {
|
||||
switch getScanKind(policyIdentifier) {
|
||||
case apisv1.KindFramework: // Download frameworks
|
||||
for _, rule := range policyIdentifier {
|
||||
receivedFramework, err := policyHandler.getters.PolicyGetter.GetFramework(rule.Name)
|
||||
if err != nil {
|
||||
return frameworks, policyDownloadError(err)
|
||||
@@ -63,11 +65,11 @@ func (policyHandler *PolicyHandler) getScanPolicies(notification *reporthandling
|
||||
}
|
||||
}
|
||||
}
|
||||
case reporthandling.KindControl: // Download controls
|
||||
case apisv1.KindControl: // Download controls
|
||||
f := reporthandling.Framework{}
|
||||
var receivedControl *reporthandling.Control
|
||||
var err error
|
||||
for _, rule := range notification.Rules {
|
||||
for _, rule := range policyIdentifier {
|
||||
receivedControl, err = policyHandler.getters.PolicyGetter.GetControl(rule.Name)
|
||||
if err != nil {
|
||||
return frameworks, policyDownloadError(err)
|
||||
@@ -89,7 +91,7 @@ func (policyHandler *PolicyHandler) getScanPolicies(notification *reporthandling
|
||||
return frameworks, nil
|
||||
}
|
||||
|
||||
func policyIdentifierToSlice(rules []reporthandling.PolicyIdentifier) []string {
|
||||
func policyIdentifierToSlice(rules []cautils.PolicyIdentifier) []string {
|
||||
s := []string{}
|
||||
for i := range rules {
|
||||
s = append(s, fmt.Sprintf("%s: %s", rules[i].Kind, rules[i].Name))
|
||||
|
||||
@@ -4,12 +4,14 @@ import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/armosec/opa-utils/reporthandling"
|
||||
apisv1 "github.com/armosec/opa-utils/httpserver/apis/v1"
|
||||
|
||||
"github.com/armosec/kubescape/v2/core/cautils"
|
||||
)
|
||||
|
||||
func getScanKind(notification *reporthandling.PolicyNotification) reporthandling.NotificationPolicyKind {
|
||||
if len(notification.Rules) > 0 {
|
||||
return notification.Rules[0].Kind
|
||||
func getScanKind(policyIdentifier []cautils.PolicyIdentifier) apisv1.NotificationPolicyKind {
|
||||
if len(policyIdentifier) > 0 {
|
||||
return policyIdentifier[0].Kind
|
||||
}
|
||||
return "unknown"
|
||||
}
|
||||
|
||||
@@ -51,7 +51,7 @@ func (armoCivAdaptor *ArmoCivAdaptor) GetImageVulnerability(imageID *registryvul
|
||||
pageNumber := 1
|
||||
request := V2ListRequest{PageSize: &pageSize, PageNum: &pageNumber, InnerFilters: filter, OrderBy: "timestamp:desc"}
|
||||
requestBody, _ := json.Marshal(request)
|
||||
requestUrl := fmt.Sprintf("https://%s/api/v1/vulnerability/scanResultsDetails?customerGUID=%s", armoCivAdaptor.armoAPI.GetAPIURL(), armoCivAdaptor.armoAPI.GetAccountID())
|
||||
requestUrl := fmt.Sprintf("https://%s/api/v1/vulnerability/scanResultsDetails?customerGUID=%s", armoCivAdaptor.armoAPI.GetApiURL(), armoCivAdaptor.armoAPI.GetAccountID())
|
||||
|
||||
resp, err := armoCivAdaptor.armoAPI.Post(requestUrl, map[string]string{"Content-Type": "application/json"}, requestBody)
|
||||
if err != nil {
|
||||
@@ -83,8 +83,7 @@ func (armoCivAdaptor *ArmoCivAdaptor) GetImageVulnerability(imageID *registryvul
|
||||
}
|
||||
|
||||
func (armoCivAdaptor *ArmoCivAdaptor) DescribeAdaptor() string {
|
||||
// TODO
|
||||
return ""
|
||||
return "armo image vulnerabilities scanner, docs: https://hub.armo.cloud/docs/cluster-vulnerability-scanning"
|
||||
}
|
||||
|
||||
func (armoCivAdaptor *ArmoCivAdaptor) GetImagesInformation(imageIDs []registryvulnerabilities.ContainerImageIdentifier) ([]registryvulnerabilities.ContainerImageInformation, error) {
|
||||
|
||||
@@ -14,7 +14,7 @@ func (armoCivAdaptor *ArmoCivAdaptor) getImageLastScanId(imageID *registryvulner
|
||||
pageNumber := 1
|
||||
request := V2ListRequest{PageSize: &pageSize, PageNum: &pageNumber, InnerFilters: filter, OrderBy: "timestamp:desc"}
|
||||
requestBody, _ := json.Marshal(request)
|
||||
requestUrl := fmt.Sprintf("https://%s/api/v1/vulnerability/scanResultsSumSummary?customerGUID=%s", armoCivAdaptor.armoAPI.GetAPIURL(), armoCivAdaptor.armoAPI.GetAccountID())
|
||||
requestUrl := fmt.Sprintf("https://%s/api/v1/vulnerability/scanResultsSumSummary?customerGUID=%s", armoCivAdaptor.armoAPI.GetApiURL(), armoCivAdaptor.armoAPI.GetAccountID())
|
||||
|
||||
resp, err := armoCivAdaptor.armoAPI.Post(requestUrl, map[string]string{"Content-Type": "application/json"}, requestBody)
|
||||
if err != nil {
|
||||
|
||||
@@ -1,110 +0,0 @@
|
||||
package resourcehandler
|
||||
|
||||
import (
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/armosec/k8s-interface/cloudsupport"
|
||||
"github.com/armosec/k8s-interface/k8sinterface"
|
||||
)
|
||||
|
||||
var (
|
||||
KS_KUBE_CLUSTER_ENV_VAR = "KS_KUBE_CLUSTER"
|
||||
KS_CLOUD_PROVIDER_ENV_VAR = "KS_CLOUD_PROVIDER"
|
||||
KS_CLOUD_REGION_ENV_VAR = "KS_CLOUD_REGION"
|
||||
KS_GKE_PROJECT_ENV_VAR = "KS_GKE_PROJECT"
|
||||
)
|
||||
|
||||
type ICloudProvider interface {
|
||||
getKubeCluster() string
|
||||
getRegion(cluster string, provider string) (string, error)
|
||||
getProject(cluster string, provider string) (string, error)
|
||||
getKubeClusterName() string
|
||||
}
|
||||
|
||||
func initCloudProvider() ICloudProvider {
|
||||
|
||||
switch getCloudProvider() {
|
||||
case "gke", "gcp":
|
||||
if isEnvVars() {
|
||||
return NewGKEProviderEnvVar()
|
||||
}
|
||||
return NewGKEProviderContext()
|
||||
case "eks", "aws":
|
||||
if isEnvVars() {
|
||||
return NewEKSProviderEnvVar()
|
||||
}
|
||||
return NewEKSProviderContext()
|
||||
}
|
||||
return NewEmptyCloudProvider()
|
||||
}
|
||||
|
||||
func getCloudProvider() string {
|
||||
var provider string
|
||||
if isEnvVars() {
|
||||
provider = getCloudProviderFromEnvVar()
|
||||
} else {
|
||||
provider = getCloudProviderFromContext()
|
||||
}
|
||||
return strings.ToLower(provider)
|
||||
}
|
||||
|
||||
func getCloudProviderFromContext() string {
|
||||
return cloudsupport.GetCloudProvider(getClusterFromContext())
|
||||
}
|
||||
|
||||
func getClusterFromContext() string {
|
||||
context := k8sinterface.GetCurrentContext()
|
||||
if context == nil {
|
||||
return ""
|
||||
}
|
||||
cluster := context.Cluster
|
||||
if cluster != "" {
|
||||
return cluster
|
||||
}
|
||||
return k8sinterface.GetClusterName()
|
||||
}
|
||||
|
||||
func getCloudProviderFromEnvVar() string {
|
||||
val, present := os.LookupEnv(KS_CLOUD_PROVIDER_ENV_VAR)
|
||||
if present {
|
||||
return val
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func isEnvVars() bool {
|
||||
_, present := os.LookupEnv(KS_KUBE_CLUSTER_ENV_VAR)
|
||||
if !present {
|
||||
return false
|
||||
}
|
||||
_, present = os.LookupEnv(KS_CLOUD_PROVIDER_ENV_VAR)
|
||||
if !present {
|
||||
return false
|
||||
}
|
||||
_, present = os.LookupEnv(KS_CLOUD_REGION_ENV_VAR)
|
||||
return present
|
||||
}
|
||||
|
||||
type EmptyCloudProvider struct {
|
||||
}
|
||||
|
||||
func NewEmptyCloudProvider() *EmptyCloudProvider {
|
||||
return &EmptyCloudProvider{}
|
||||
}
|
||||
|
||||
func (emptyCloudProvider *EmptyCloudProvider) getKubeCluster() string {
|
||||
return getClusterFromContext()
|
||||
}
|
||||
|
||||
func (emptyCloudProvider *EmptyCloudProvider) getKubeClusterName() string {
|
||||
return emptyCloudProvider.getKubeCluster()
|
||||
}
|
||||
|
||||
func (emptyCloudProvider *EmptyCloudProvider) getRegion(cluster string, provider string) (string, error) {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
func (emptyCloudProvider *EmptyCloudProvider) getProject(cluster string, provider string) (string, error) {
|
||||
return "", nil
|
||||
}
|
||||
@@ -1,101 +0,0 @@
|
||||
package resourcehandler
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/armosec/k8s-interface/k8sinterface"
|
||||
)
|
||||
|
||||
type EKSProviderEnvVar struct {
|
||||
}
|
||||
|
||||
func NewEKSProviderEnvVar() *EKSProviderEnvVar {
|
||||
return &EKSProviderEnvVar{}
|
||||
}
|
||||
|
||||
func (eksProviderEnvVar *EKSProviderEnvVar) getKubeClusterName() string {
|
||||
return eksProviderEnvVar.getKubeCluster()
|
||||
}
|
||||
|
||||
func (eksProviderEnvVar *EKSProviderEnvVar) getKubeCluster() string {
|
||||
val, present := os.LookupEnv(KS_KUBE_CLUSTER_ENV_VAR)
|
||||
if present {
|
||||
return val
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (eksProviderEnvVar *EKSProviderEnvVar) getRegion(cluster string, provider string) (string, error) {
|
||||
return eksProviderEnvVar.getRegionForEKS(cluster)
|
||||
}
|
||||
|
||||
func (eksProviderEnvVar *EKSProviderEnvVar) getProject(cluster string, provider string) (string, error) {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
func (eksProviderEnvVar *EKSProviderEnvVar) getRegionForEKS(cluster string) (string, error) {
|
||||
region, present := os.LookupEnv(KS_CLOUD_REGION_ENV_VAR)
|
||||
if present {
|
||||
return region, nil
|
||||
}
|
||||
splittedClusterContext := strings.Split(cluster, ".")
|
||||
if len(splittedClusterContext) < 2 {
|
||||
return "", fmt.Errorf("failed to get region")
|
||||
}
|
||||
region = splittedClusterContext[1]
|
||||
return region, nil
|
||||
}
|
||||
|
||||
// ------------------------------------- EKSProviderContext -------------------------
|
||||
|
||||
type EKSProviderContext struct {
|
||||
}
|
||||
|
||||
func NewEKSProviderContext() *EKSProviderContext {
|
||||
return &EKSProviderContext{}
|
||||
}
|
||||
|
||||
func (eksProviderContext *EKSProviderContext) getKubeClusterName() string {
|
||||
context := k8sinterface.GetCurrentContext()
|
||||
if context == nil {
|
||||
return ""
|
||||
}
|
||||
cluster := context.Cluster
|
||||
if cluster != "" {
|
||||
splittedCluster := strings.Split(cluster, ".")
|
||||
if len(splittedCluster) > 1 {
|
||||
return splittedCluster[0]
|
||||
}
|
||||
}
|
||||
splittedCluster := strings.Split(k8sinterface.GetClusterName(), ".")
|
||||
if len(splittedCluster) > 1 {
|
||||
return splittedCluster[0]
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (eksProviderContext *EKSProviderContext) getKubeCluster() string {
|
||||
context := k8sinterface.GetCurrentContext()
|
||||
if context == nil {
|
||||
return ""
|
||||
}
|
||||
if context.Cluster != "" {
|
||||
return context.Cluster
|
||||
}
|
||||
return k8sinterface.GetClusterName()
|
||||
}
|
||||
|
||||
func (eksProviderContext *EKSProviderContext) getRegion(cluster string, provider string) (string, error) {
|
||||
splittedClusterContext := strings.Split(cluster, ".")
|
||||
if len(splittedClusterContext) < 2 {
|
||||
return "", fmt.Errorf("failed to get region")
|
||||
}
|
||||
region := splittedClusterContext[1]
|
||||
return region, nil
|
||||
}
|
||||
|
||||
func (eksProviderContext *EKSProviderContext) getProject(cluster string, provider string) (string, error) {
|
||||
return "", nil
|
||||
}
|
||||
@@ -2,9 +2,12 @@ package resourcehandler
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/armosec/armoapi-go/armotypes"
|
||||
"github.com/armosec/k8s-interface/workloadinterface"
|
||||
"github.com/armosec/opa-utils/reporthandling"
|
||||
"k8s.io/apimachinery/pkg/version"
|
||||
|
||||
"github.com/armosec/k8s-interface/k8sinterface"
|
||||
@@ -29,48 +32,99 @@ func NewFileResourceHandler(inputPatterns []string, registryAdaptors *RegistryAd
|
||||
|
||||
func (fileHandler *FileResourceHandler) GetResources(sessionObj *cautils.OPASessionObj, designator *armotypes.PortalDesignator) (*cautils.K8SResources, map[string]workloadinterface.IMetadata, *cautils.ArmoResources, error) {
|
||||
|
||||
//
|
||||
// build resources map
|
||||
// map resources based on framework required resources: map["/group/version/kind"][]<k8s workloads ids>
|
||||
k8sResources := setK8sResourceMap(sessionObj.Policies)
|
||||
allResources := map[string]workloadinterface.IMetadata{}
|
||||
workloadIDToSource := make(map[string]string, 0)
|
||||
workloadIDToSource := make(map[string]reporthandling.Source, 0)
|
||||
armoResources := &cautils.ArmoResources{}
|
||||
|
||||
workloads := []workloadinterface.IMetadata{}
|
||||
|
||||
// load resource from local file system
|
||||
sourceToWorkloads, err := cautils.LoadResourcesFromFiles(fileHandler.inputPatterns)
|
||||
if err != nil {
|
||||
return nil, allResources, nil, err
|
||||
if len(fileHandler.inputPatterns) == 0 {
|
||||
return nil, nil, nil, fmt.Errorf("missing input")
|
||||
}
|
||||
for source, ws := range sourceToWorkloads {
|
||||
workloads = append(workloads, ws...)
|
||||
for i := range ws {
|
||||
workloadIDToSource[ws[i].GetID()] = source
|
||||
}
|
||||
}
|
||||
logger.L().Debug("files found in local storage", helpers.Int("files", len(sourceToWorkloads)), helpers.Int("workloads", len(workloads)))
|
||||
path := fileHandler.inputPatterns[0]
|
||||
|
||||
// load resources from url
|
||||
sourceToWorkloads, err = loadResourcesFromUrl(fileHandler.inputPatterns)
|
||||
clonedRepo, err := cloneGitRepo(&path)
|
||||
if err != nil {
|
||||
return nil, allResources, nil, err
|
||||
}
|
||||
if clonedRepo != "" {
|
||||
defer os.RemoveAll(clonedRepo)
|
||||
}
|
||||
|
||||
// Get repo root
|
||||
repoRoot := ""
|
||||
giRepo, err := cautils.NewLocalGitRepository(path)
|
||||
if err == nil {
|
||||
repoRoot, _ = giRepo.GetRootDir()
|
||||
}
|
||||
|
||||
// load resource from local file system
|
||||
logger.L().Info("Accessing local objects")
|
||||
|
||||
sourceToWorkloads := cautils.LoadResourcesFromFiles(path, repoRoot)
|
||||
|
||||
// update workloads and workloadIDToSource
|
||||
for source, ws := range sourceToWorkloads {
|
||||
workloads = append(workloads, ws...)
|
||||
|
||||
relSource, err := filepath.Rel(repoRoot, source)
|
||||
if err == nil {
|
||||
source = relSource
|
||||
}
|
||||
for i := range ws {
|
||||
workloadIDToSource[ws[i].GetID()] = source
|
||||
var filetype string
|
||||
if cautils.IsYaml(source) {
|
||||
filetype = reporthandling.SourceTypeYaml
|
||||
} else if cautils.IsJson(source) {
|
||||
filetype = reporthandling.SourceTypeJson
|
||||
} else {
|
||||
continue
|
||||
}
|
||||
workloadIDToSource[ws[i].GetID()] = reporthandling.Source{
|
||||
RelativePath: source,
|
||||
FileType: filetype,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(workloads) == 0 {
|
||||
logger.L().Debug("files found in local storage", helpers.Int("files", len(sourceToWorkloads)), helpers.Int("workloads", len(workloads)))
|
||||
}
|
||||
|
||||
// load resources from helm charts
|
||||
helmSourceToWorkloads := cautils.LoadResourcesFromHelmCharts(path)
|
||||
for source, ws := range helmSourceToWorkloads {
|
||||
workloads = append(workloads, ws...)
|
||||
|
||||
relSource, err := filepath.Rel(repoRoot, source)
|
||||
if err == nil {
|
||||
source = relSource
|
||||
}
|
||||
for i := range ws {
|
||||
workloadIDToSource[ws[i].GetID()] = reporthandling.Source{
|
||||
RelativePath: source,
|
||||
FileType: reporthandling.SourceTypeHelmChart,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(helmSourceToWorkloads) > 0 {
|
||||
logger.L().Debug("helm templates found in local storage", helpers.Int("helmTemplates", len(helmSourceToWorkloads)), helpers.Int("workloads", len(workloads)))
|
||||
}
|
||||
|
||||
// addCommitData(fileHandler.inputPatterns[0], workloadIDToSource)
|
||||
|
||||
if len(workloads) == 0 {
|
||||
return nil, allResources, nil, fmt.Errorf("empty list of workloads - no workloads found")
|
||||
}
|
||||
logger.L().Debug("files found in git repo", helpers.Int("files", len(sourceToWorkloads)), helpers.Int("workloads", len(workloads)))
|
||||
|
||||
sessionObj.ResourceSource = workloadIDToSource
|
||||
|
||||
// map all resources: map["/group/version/kind"][]<k8s workloads>
|
||||
// map all resources: map["/apiVersion/version/kind"][]<k8s workloads>
|
||||
mappedResources := mapResources(workloads)
|
||||
|
||||
// save only relevant resources
|
||||
@@ -89,39 +143,11 @@ func (fileHandler *FileResourceHandler) GetResources(sessionObj *cautils.OPASess
|
||||
logger.L().Warning("failed to collect images vulnerabilities", helpers.Error(err))
|
||||
}
|
||||
|
||||
return k8sResources, allResources, armoResources, nil
|
||||
logger.L().Success("Accessed to local objects")
|
||||
|
||||
return k8sResources, allResources, armoResources, nil
|
||||
}
|
||||
|
||||
func (fileHandler *FileResourceHandler) GetClusterAPIServerInfo() *version.Info {
|
||||
return nil
|
||||
}
|
||||
|
||||
// build resources map
|
||||
func mapResources(workloads []workloadinterface.IMetadata) map[string][]workloadinterface.IMetadata {
|
||||
|
||||
allResources := map[string][]workloadinterface.IMetadata{}
|
||||
for i := range workloads {
|
||||
groupVersionResource, err := k8sinterface.GetGroupVersionResource(workloads[i].GetKind())
|
||||
if err != nil {
|
||||
// TODO - print warning
|
||||
continue
|
||||
}
|
||||
|
||||
if k8sinterface.IsTypeWorkload(workloads[i].GetObject()) {
|
||||
w := workloadinterface.NewWorkloadObj(workloads[i].GetObject())
|
||||
if groupVersionResource.Group != w.GetGroup() || groupVersionResource.Version != w.GetVersion() {
|
||||
// TODO - print warning
|
||||
continue
|
||||
}
|
||||
}
|
||||
resourceTriplets := k8sinterface.JoinResourceTriplets(groupVersionResource.Group, groupVersionResource.Version, groupVersionResource.Resource)
|
||||
if r, ok := allResources[resourceTriplets]; ok {
|
||||
allResources[resourceTriplets] = append(r, workloads[i])
|
||||
} else {
|
||||
allResources[resourceTriplets] = []workloadinterface.IMetadata{workloads[i]}
|
||||
}
|
||||
}
|
||||
return allResources
|
||||
|
||||
}
|
||||
|
||||
86
core/pkg/resourcehandler/filesloaderutils.go
Normal file
86
core/pkg/resourcehandler/filesloaderutils.go
Normal file
@@ -0,0 +1,86 @@
|
||||
package resourcehandler
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
|
||||
giturl "github.com/armosec/go-git-url"
|
||||
"github.com/armosec/k8s-interface/k8sinterface"
|
||||
"github.com/armosec/k8s-interface/workloadinterface"
|
||||
"github.com/armosec/kubescape/v2/core/cautils"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger/helpers"
|
||||
"github.com/armosec/opa-utils/reporthandling"
|
||||
)
|
||||
|
||||
// Clone git repository
|
||||
func cloneGitRepo(path *string) (string, error) {
|
||||
var clonedDir string
|
||||
|
||||
// Clone git repository if needed
|
||||
gitURL, err := giturl.NewGitURL(*path)
|
||||
if err == nil {
|
||||
logger.L().Info("cloning", helpers.String("repository url", gitURL.GetURL().String()))
|
||||
cautils.StartSpinner()
|
||||
clonedDir, err = cloneRepo(gitURL)
|
||||
cautils.StopSpinner()
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to clone git repo '%s', %w", gitURL.GetURL().String(), err)
|
||||
}
|
||||
|
||||
*path = filepath.Join(clonedDir, gitURL.GetPath())
|
||||
|
||||
}
|
||||
return clonedDir, nil
|
||||
}
|
||||
|
||||
// build resources map
|
||||
func mapResources(workloads []workloadinterface.IMetadata) map[string][]workloadinterface.IMetadata {
|
||||
|
||||
allResources := map[string][]workloadinterface.IMetadata{}
|
||||
for i := range workloads {
|
||||
groupVersionResource, err := k8sinterface.GetGroupVersionResource(workloads[i].GetKind())
|
||||
if err != nil {
|
||||
// TODO - print warning
|
||||
continue
|
||||
}
|
||||
|
||||
if k8sinterface.IsTypeWorkload(workloads[i].GetObject()) {
|
||||
w := workloadinterface.NewWorkloadObj(workloads[i].GetObject())
|
||||
if groupVersionResource.Group != w.GetGroup() || groupVersionResource.Version != w.GetVersion() {
|
||||
// TODO - print warning
|
||||
continue
|
||||
}
|
||||
}
|
||||
resourceTriplets := k8sinterface.JoinResourceTriplets(groupVersionResource.Group, groupVersionResource.Version, groupVersionResource.Resource)
|
||||
if r, ok := allResources[resourceTriplets]; ok {
|
||||
allResources[resourceTriplets] = append(r, workloads[i])
|
||||
} else {
|
||||
allResources[resourceTriplets] = []workloadinterface.IMetadata{workloads[i]}
|
||||
}
|
||||
}
|
||||
return allResources
|
||||
|
||||
}
|
||||
|
||||
func addCommitData(input string, workloadIDToSource map[string]reporthandling.Source) {
|
||||
giRepo, err := cautils.NewLocalGitRepository(input)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
for k := range workloadIDToSource {
|
||||
sourceObj := workloadIDToSource[k]
|
||||
lastCommit, err := giRepo.GetFileLastCommit(sourceObj.RelativePath)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
sourceObj.LastCommit = reporthandling.LastCommit{
|
||||
Hash: lastCommit.SHA,
|
||||
Date: lastCommit.Author.Date,
|
||||
CommitterName: lastCommit.Author.Name,
|
||||
CommitterEmail: lastCommit.Author.Email,
|
||||
Message: lastCommit.Message,
|
||||
}
|
||||
workloadIDToSource[k] = sourceObj
|
||||
}
|
||||
}
|
||||
@@ -1,131 +0,0 @@
|
||||
package resourcehandler
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/armosec/k8s-interface/k8sinterface"
|
||||
)
|
||||
|
||||
type GKEProviderEnvVar struct {
|
||||
}
|
||||
|
||||
func NewGKEProviderEnvVar() *GKEProviderEnvVar {
|
||||
return &GKEProviderEnvVar{}
|
||||
}
|
||||
func (gkeProvider *GKEProviderEnvVar) getKubeClusterName() string {
|
||||
return gkeProvider.getKubeCluster()
|
||||
}
|
||||
|
||||
func (gkeProvider *GKEProviderEnvVar) getKubeCluster() string {
|
||||
val, present := os.LookupEnv(KS_KUBE_CLUSTER_ENV_VAR)
|
||||
if present {
|
||||
return val
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (gkeProvider *GKEProviderEnvVar) getRegion(cluster string, provider string) (string, error) {
|
||||
return gkeProvider.getRegionForGKE(cluster)
|
||||
}
|
||||
|
||||
func (gkeProvider *GKEProviderEnvVar) getProject(cluster string, provider string) (string, error) {
|
||||
return gkeProvider.getProjectForGKE(cluster)
|
||||
}
|
||||
|
||||
func (gkeProvider *GKEProviderEnvVar) getProjectForGKE(cluster string) (string, error) {
|
||||
project, present := os.LookupEnv(KS_GKE_PROJECT_ENV_VAR)
|
||||
if present {
|
||||
return project, nil
|
||||
}
|
||||
parsedName := strings.Split(cluster, "_")
|
||||
if len(parsedName) < 3 {
|
||||
return "", fmt.Errorf("failed to parse project name from cluster name: '%s'", cluster)
|
||||
}
|
||||
project = parsedName[1]
|
||||
return project, nil
|
||||
}
|
||||
|
||||
func (gkeProvider *GKEProviderEnvVar) getRegionForGKE(cluster string) (string, error) {
|
||||
region, present := os.LookupEnv(KS_CLOUD_REGION_ENV_VAR)
|
||||
if present {
|
||||
return region, nil
|
||||
}
|
||||
parsedName := strings.Split(cluster, "_")
|
||||
if len(parsedName) < 3 {
|
||||
return "", fmt.Errorf("failed to parse region name from cluster name: '%s'", cluster)
|
||||
}
|
||||
region = parsedName[2]
|
||||
return region, nil
|
||||
|
||||
}
|
||||
|
||||
// ------------------------------ GKEProviderContext --------------------------------------------------------
|
||||
|
||||
type GKEProviderContext struct {
|
||||
}
|
||||
|
||||
func NewGKEProviderContext() *GKEProviderContext {
|
||||
return &GKEProviderContext{}
|
||||
}
|
||||
|
||||
func (gkeProviderContext *GKEProviderContext) getKubeClusterName() string {
|
||||
context := k8sinterface.GetCurrentContext()
|
||||
if context == nil {
|
||||
return ""
|
||||
}
|
||||
cluster := context.Cluster
|
||||
parsedName := strings.Split(cluster, "_")
|
||||
if len(parsedName) < 3 {
|
||||
return ""
|
||||
}
|
||||
clusterName := parsedName[3]
|
||||
if clusterName != "" {
|
||||
return clusterName
|
||||
}
|
||||
cluster = k8sinterface.GetClusterName()
|
||||
parsedName = strings.Split(cluster, "_")
|
||||
if len(parsedName) < 3 {
|
||||
return ""
|
||||
}
|
||||
return parsedName[3]
|
||||
}
|
||||
|
||||
func (gkeProviderContext *GKEProviderContext) getKubeCluster() string {
|
||||
context := k8sinterface.GetCurrentContext()
|
||||
if context == nil {
|
||||
return ""
|
||||
}
|
||||
if context.Cluster != "" {
|
||||
return context.Cluster
|
||||
}
|
||||
return k8sinterface.GetClusterName()
|
||||
|
||||
}
|
||||
|
||||
func (gkeProviderContext *GKEProviderContext) getRegion(cluster string, provider string) (string, error) {
|
||||
return gkeProviderContext.getRegionForGKE(cluster)
|
||||
}
|
||||
|
||||
func (gkeProviderContext *GKEProviderContext) getProject(cluster string, provider string) (string, error) {
|
||||
return gkeProviderContext.getProjectForGKE(cluster)
|
||||
}
|
||||
|
||||
func (gkeProviderContext *GKEProviderContext) getProjectForGKE(cluster string) (string, error) {
|
||||
parsedName := strings.Split(cluster, "_")
|
||||
if len(parsedName) < 3 {
|
||||
return "", fmt.Errorf("failed to parse project name from cluster name: '%s'", cluster)
|
||||
}
|
||||
project := parsedName[1]
|
||||
return project, nil
|
||||
}
|
||||
|
||||
func (gkeProviderContext *GKEProviderContext) getRegionForGKE(cluster string) (string, error) {
|
||||
parsedName := strings.Split(cluster, "_")
|
||||
if len(parsedName) < 3 {
|
||||
return "", fmt.Errorf("failed to parse region name from cluster name: '%s'", cluster)
|
||||
}
|
||||
region := parsedName[2]
|
||||
return region, nil
|
||||
}
|
||||
@@ -18,6 +18,7 @@ import (
|
||||
|
||||
"github.com/armosec/armoapi-go/armotypes"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
k8slabels "k8s.io/apimachinery/pkg/labels"
|
||||
@@ -85,6 +86,11 @@ func (k8sHandler *K8sResourceHandler) GetResources(sessionObj *cautils.OPASessio
|
||||
if len(imgVulnResources) > 0 {
|
||||
if err := k8sHandler.registryAdaptors.collectImagesVulnerabilities(k8sResourcesMap, allResources, armoResourceMap); err != nil {
|
||||
logger.L().Warning("failed to collect image vulnerabilities", helpers.Error(err))
|
||||
cautils.SetInfoMapForResources(fmt.Sprintf("failed to pull image scanning data: %s", err.Error()), imgVulnResources, sessionObj.InfoMap)
|
||||
} else {
|
||||
if isEmptyImgVulns(*armoResourceMap) {
|
||||
cautils.SetInfoMapForResources("image scanning is not configured. for more information: https://hub.armo.cloud/docs/cluster-vulnerability-scanning", imgVulnResources, sessionObj.InfoMap)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -103,7 +109,7 @@ func (k8sHandler *K8sResourceHandler) GetResources(sessionObj *cautils.OPASessio
|
||||
sessionObj.InfoMap = infoMap
|
||||
}
|
||||
} else {
|
||||
cautils.SetInfoMapForResources("enable-host-scan flag not used", hostResources, sessionObj.InfoMap)
|
||||
cautils.SetInfoMapForResources("enable-host-scan flag not used. For more information: https://hub.armo.cloud/docs/host-sensor", hostResources, sessionObj.InfoMap)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -265,23 +271,15 @@ func (k8sHandler *K8sResourceHandler) collectRbacResources(allResources map[stri
|
||||
|
||||
func getCloudProviderDescription(allResources map[string]workloadinterface.IMetadata, armoResourceMap *cautils.ArmoResources) (string, error) {
|
||||
logger.L().Debug("Collecting cloud data")
|
||||
cloudProvider := initCloudProvider()
|
||||
cluster := cloudProvider.getKubeCluster()
|
||||
clusterName := cloudProvider.getKubeClusterName()
|
||||
provider := getCloudProvider()
|
||||
region, err := cloudProvider.getRegion(cluster, provider)
|
||||
if err != nil {
|
||||
return provider, err
|
||||
}
|
||||
project, err := cloudProvider.getProject(cluster, provider)
|
||||
if err != nil {
|
||||
return provider, err
|
||||
}
|
||||
|
||||
clusterName := cautils.ClusterName
|
||||
|
||||
provider := cloudsupport.GetCloudProvider(clusterName)
|
||||
|
||||
if provider != "" {
|
||||
logger.L().Debug("cloud", helpers.String("cluster", cluster), helpers.String("clusterName", clusterName), helpers.String("provider", provider), helpers.String("region", region), helpers.String("project", project))
|
||||
logger.L().Debug("cloud", helpers.String("cluster", clusterName), helpers.String("clusterName", clusterName), helpers.String("provider", provider))
|
||||
|
||||
wl, err := cloudsupport.GetDescriptiveInfoFromCloudProvider(clusterName, provider, region, project)
|
||||
wl, err := cloudsupport.GetDescriptiveInfoFromCloudProvider(clusterName, provider)
|
||||
if err != nil {
|
||||
// Return error with useful info on how to configure credentials for getting cloud provider info
|
||||
logger.L().Debug("failed to get descriptive information", helpers.Error(err))
|
||||
@@ -295,17 +293,31 @@ func getCloudProviderDescription(allResources map[string]workloadinterface.IMeta
|
||||
}
|
||||
|
||||
func (k8sHandler *K8sResourceHandler) pullWorkerNodesNumber() (int, error) {
|
||||
// labels used for control plane
|
||||
listOptions := metav1.ListOptions{
|
||||
LabelSelector: "!node-role.kubernetes.io/control-plane,!node-role.kubernetes.io/master",
|
||||
nodesList, err := k8sHandler.k8s.KubernetesClient.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})
|
||||
scheduableNodes := v1.NodeList{}
|
||||
if nodesList != nil {
|
||||
for _, node := range nodesList.Items {
|
||||
if len(node.Spec.Taints) == 0 {
|
||||
scheduableNodes.Items = append(scheduableNodes.Items, node)
|
||||
} else {
|
||||
if !isMasterNodeTaints(node.Spec.Taints) {
|
||||
scheduableNodes.Items = append(scheduableNodes.Items, node)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
nodesList, err := k8sHandler.k8s.KubernetesClient.CoreV1().Nodes().List(context.TODO(), listOptions)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
nodesNumber := 0
|
||||
if nodesList != nil {
|
||||
nodesNumber = len(nodesList.Items)
|
||||
}
|
||||
return nodesNumber, nil
|
||||
return len(scheduableNodes.Items), nil
|
||||
}
|
||||
|
||||
// NoSchedule taint with empty value is usually applied to controlplane
|
||||
func isMasterNodeTaints(taints []v1.Taint) bool {
|
||||
for _, taint := range taints {
|
||||
if taint.Effect == v1.TaintEffectNoSchedule && taint.Value == "" {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
532
core/pkg/resourcehandler/k8sresources_test.go
Normal file
532
core/pkg/resourcehandler/k8sresources_test.go
Normal file
@@ -0,0 +1,532 @@
|
||||
package resourcehandler
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
func TestIsMasterNodeTaints(t *testing.T) {
|
||||
noTaintNode := `
|
||||
{
|
||||
"apiVersion": "v1",
|
||||
"kind": "Node",
|
||||
"metadata": {
|
||||
"annotations": {
|
||||
"kubeadm.alpha.kubernetes.io/cri-socket": "/var/run/dockershim.sock",
|
||||
"node.alpha.kubernetes.io/ttl": "0",
|
||||
"volumes.kubernetes.io/controller-managed-attach-detach": "true"
|
||||
},
|
||||
"creationTimestamp": "2022-05-16T10:52:32Z",
|
||||
"labels": {
|
||||
"beta.kubernetes.io/arch": "amd64",
|
||||
"beta.kubernetes.io/os": "linux",
|
||||
"kubernetes.io/arch": "amd64",
|
||||
"kubernetes.io/hostname": "danielg-minikube",
|
||||
"kubernetes.io/os": "linux",
|
||||
"minikube.k8s.io/commit": "3e64b11ed75e56e4898ea85f96b2e4af0301f43d",
|
||||
"minikube.k8s.io/name": "danielg-minikube",
|
||||
"minikube.k8s.io/updated_at": "2022_05_16T13_52_35_0700",
|
||||
"minikube.k8s.io/version": "v1.25.1",
|
||||
"node-role.kubernetes.io/control-plane": "",
|
||||
"node-role.kubernetes.io/master": "",
|
||||
"node.kubernetes.io/exclude-from-external-load-balancers": ""
|
||||
},
|
||||
"name": "danielg-minikube",
|
||||
"resourceVersion": "9432",
|
||||
"uid": "fc4afcb6-4ca4-4038-ba54-5e16065a614a"
|
||||
},
|
||||
"spec": {
|
||||
"podCIDR": "10.244.0.0/24",
|
||||
"podCIDRs": [
|
||||
"10.244.0.0/24"
|
||||
]
|
||||
},
|
||||
"status": {
|
||||
"addresses": [
|
||||
{
|
||||
"address": "192.168.49.2",
|
||||
"type": "InternalIP"
|
||||
},
|
||||
{
|
||||
"address": "danielg-minikube",
|
||||
"type": "Hostname"
|
||||
}
|
||||
],
|
||||
"allocatable": {
|
||||
"cpu": "4",
|
||||
"ephemeral-storage": "94850516Ki",
|
||||
"hugepages-2Mi": "0",
|
||||
"memory": "10432976Ki",
|
||||
"pods": "110"
|
||||
},
|
||||
"capacity": {
|
||||
"cpu": "4",
|
||||
"ephemeral-storage": "94850516Ki",
|
||||
"hugepages-2Mi": "0",
|
||||
"memory": "10432976Ki",
|
||||
"pods": "110"
|
||||
},
|
||||
"conditions": [
|
||||
{
|
||||
"lastHeartbeatTime": "2022-05-16T14:14:31Z",
|
||||
"lastTransitionTime": "2022-05-16T10:52:29Z",
|
||||
"message": "kubelet has sufficient memory available",
|
||||
"reason": "KubeletHasSufficientMemory",
|
||||
"status": "False",
|
||||
"type": "MemoryPressure"
|
||||
},
|
||||
{
|
||||
"lastHeartbeatTime": "2022-05-16T14:14:31Z",
|
||||
"lastTransitionTime": "2022-05-16T10:52:29Z",
|
||||
"message": "kubelet has no disk pressure",
|
||||
"reason": "KubeletHasNoDiskPressure",
|
||||
"status": "False",
|
||||
"type": "DiskPressure"
|
||||
},
|
||||
{
|
||||
"lastHeartbeatTime": "2022-05-16T14:14:31Z",
|
||||
"lastTransitionTime": "2022-05-16T10:52:29Z",
|
||||
"message": "kubelet has sufficient PID available",
|
||||
"reason": "KubeletHasSufficientPID",
|
||||
"status": "False",
|
||||
"type": "PIDPressure"
|
||||
},
|
||||
{
|
||||
"lastHeartbeatTime": "2022-05-16T14:14:31Z",
|
||||
"lastTransitionTime": "2022-05-16T10:52:45Z",
|
||||
"message": "kubelet is posting ready status",
|
||||
"reason": "KubeletReady",
|
||||
"status": "True",
|
||||
"type": "Ready"
|
||||
}
|
||||
],
|
||||
"daemonEndpoints": {
|
||||
"kubeletEndpoint": {
|
||||
"Port": 10250
|
||||
}
|
||||
},
|
||||
"images": [
|
||||
{
|
||||
"names": [
|
||||
"requarks/wiki@sha256:dd83fff15e77843ff934b25c28c865ac000edf7653e5d11adad1dd51df87439d"
|
||||
],
|
||||
"sizeBytes": 441083858
|
||||
},
|
||||
{
|
||||
"names": [
|
||||
"mariadb@sha256:821d0411208eaa88f9e1f0daccd1d534f88d19baf724eb9a2777cbedb10b6c66"
|
||||
],
|
||||
"sizeBytes": 400782682
|
||||
},
|
||||
{
|
||||
"names": [
|
||||
"k8s.gcr.io/etcd@sha256:64b9ea357325d5db9f8a723dcf503b5a449177b17ac87d69481e126bb724c263",
|
||||
"k8s.gcr.io/etcd:3.5.1-0"
|
||||
],
|
||||
"sizeBytes": 292558922
|
||||
},
|
||||
{
|
||||
"names": [
|
||||
"kubernetesui/dashboard@sha256:ec27f462cf1946220f5a9ace416a84a57c18f98c777876a8054405d1428cc92e",
|
||||
"kubernetesui/dashboard:v2.3.1"
|
||||
],
|
||||
"sizeBytes": 220033604
|
||||
},
|
||||
{
|
||||
"names": [
|
||||
"k8s.gcr.io/kube-apiserver@sha256:f54681a71cce62cbc1b13ebb3dbf1d880f849112789811f98b6aebd2caa2f255",
|
||||
"k8s.gcr.io/kube-apiserver:v1.23.1"
|
||||
],
|
||||
"sizeBytes": 135162256
|
||||
},
|
||||
{
|
||||
"names": [
|
||||
"k8s.gcr.io/kube-controller-manager@sha256:a7ed87380108a2d811f0d392a3fe87546c85bc366e0d1e024dfa74eb14468604",
|
||||
"k8s.gcr.io/kube-controller-manager:v1.23.1"
|
||||
],
|
||||
"sizeBytes": 124971684
|
||||
},
|
||||
{
|
||||
"names": [
|
||||
"k8s.gcr.io/kube-proxy@sha256:e40f3a28721588affcf187f3f246d1e078157dabe274003eaa2957a83f7170c8",
|
||||
"k8s.gcr.io/kube-proxy:v1.23.1"
|
||||
],
|
||||
"sizeBytes": 112327826
|
||||
},
|
||||
{
|
||||
"names": [
|
||||
"quay.io/armosec/kubescape@sha256:6196f766be50d94b45d903a911f5ee95ac99bc392a1324c3e063bec41efd98ba",
|
||||
"quay.io/armosec/kubescape:v2.0.153"
|
||||
],
|
||||
"sizeBytes": 110345054
|
||||
},
|
||||
{
|
||||
"names": [
|
||||
"nginx@sha256:f7988fb6c02e0ce69257d9bd9cf37ae20a60f1df7563c3a2a6abe24160306b8d"
|
||||
],
|
||||
"sizeBytes": 109129446
|
||||
},
|
||||
{
|
||||
"names": [
|
||||
"quay.io/armosec/action-trigger@sha256:b93707d10ff86aac8dfa42ad37192d6bcf9aceeb4321b21756e438389c26e07c",
|
||||
"quay.io/armosec/action-trigger:v0.0.5"
|
||||
],
|
||||
"sizeBytes": 65127067
|
||||
},
|
||||
{
|
||||
"names": [
|
||||
"quay.io/armosec/images-vulnerabilities-scan@sha256:a5f9ddc04a7fdce6d52ef85a21f0de567d8e04d418c2bc5bf5d72b151c997625",
|
||||
"quay.io/armosec/images-vulnerabilities-scan:v0.0.7"
|
||||
],
|
||||
"sizeBytes": 61446712
|
||||
},
|
||||
{
|
||||
"names": [
|
||||
"quay.io/armosec/images-vulnerabilities-scan@sha256:2f879858da89f6542e3223fb18d6d793810cc2ad6e398b66776475e4218b6af5",
|
||||
"quay.io/armosec/images-vulnerabilities-scan:v0.0.8"
|
||||
],
|
||||
"sizeBytes": 61446528
|
||||
},
|
||||
{
|
||||
"names": [
|
||||
"quay.io/armosec/cluster-collector@sha256:2c4f733d09f7f4090ace04585230bdfacbbc29a3ade38a2e1233d2c0f730d9b6",
|
||||
"quay.io/armosec/cluster-collector:v0.0.9"
|
||||
],
|
||||
"sizeBytes": 53699576
|
||||
},
|
||||
{
|
||||
"names": [
|
||||
"k8s.gcr.io/kube-scheduler@sha256:8be4eb1593cf9ff2d91b44596633b7815a3753696031a1eb4273d1b39427fa8c",
|
||||
"k8s.gcr.io/kube-scheduler:v1.23.1"
|
||||
],
|
||||
"sizeBytes": 53488305
|
||||
},
|
||||
{
|
||||
"names": [
|
||||
"k8s.gcr.io/coredns/coredns@sha256:5b6ec0d6de9baaf3e92d0f66cd96a25b9edbce8716f5f15dcd1a616b3abd590e",
|
||||
"k8s.gcr.io/coredns/coredns:v1.8.6"
|
||||
],
|
||||
"sizeBytes": 46829283
|
||||
},
|
||||
{
|
||||
"names": [
|
||||
"kubernetesui/metrics-scraper@sha256:36d5b3f60e1a144cc5ada820910535074bdf5cf73fb70d1ff1681537eef4e172",
|
||||
"kubernetesui/metrics-scraper:v1.0.7"
|
||||
],
|
||||
"sizeBytes": 34446077
|
||||
},
|
||||
{
|
||||
"names": [
|
||||
"gcr.io/k8s-minikube/storage-provisioner@sha256:18eb69d1418e854ad5a19e399310e52808a8321e4c441c1dddad8977a0d7a944",
|
||||
"gcr.io/k8s-minikube/storage-provisioner:v5"
|
||||
],
|
||||
"sizeBytes": 31465472
|
||||
},
|
||||
{
|
||||
"names": [
|
||||
"quay.io/armosec/notification-server@sha256:b6e9b296cd53bd3b2b42c516d8ab43db998acff1124a57aff8d66b3dd7881979",
|
||||
"quay.io/armosec/notification-server:v0.0.3"
|
||||
],
|
||||
"sizeBytes": 20209940
|
||||
},
|
||||
{
|
||||
"names": [
|
||||
"quay.io/armosec/kube-host-sensor@sha256:82139d2561039726be060df2878ef023c59df7c536fbd7f6d766af5a99569fee",
|
||||
"quay.io/armosec/kube-host-sensor:latest"
|
||||
],
|
||||
"sizeBytes": 11796788
|
||||
},
|
||||
{
|
||||
"names": [
|
||||
"k8s.gcr.io/pause@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db",
|
||||
"k8s.gcr.io/pause:3.6"
|
||||
],
|
||||
"sizeBytes": 682696
|
||||
}
|
||||
],
|
||||
"nodeInfo": {
|
||||
"architecture": "amd64",
|
||||
"bootID": "828cbe73-120b-43cf-aae0-9e2d15b8c873",
|
||||
"containerRuntimeVersion": "docker://20.10.12",
|
||||
"kernelVersion": "5.13.0-40-generic",
|
||||
"kubeProxyVersion": "v1.23.1",
|
||||
"kubeletVersion": "v1.23.1",
|
||||
"machineID": "8de776e053e140d6a14c2d2def3d6bb8",
|
||||
"operatingSystem": "linux",
|
||||
"osImage": "Ubuntu 20.04.2 LTS",
|
||||
"systemUUID": "da12dc19-10bf-4033-a440-2d9aa33d6fe3"
|
||||
}
|
||||
}
|
||||
}
|
||||
`
|
||||
var l v1.Node
|
||||
_ = json.Unmarshal([]byte(noTaintNode), &l)
|
||||
assert.False(t, isMasterNodeTaints(l.Spec.Taints))
|
||||
|
||||
taintNode :=
|
||||
`
|
||||
{
|
||||
"apiVersion": "v1",
|
||||
"kind": "Node",
|
||||
"metadata": {
|
||||
"annotations": {
|
||||
"kubeadm.alpha.kubernetes.io/cri-socket": "/var/run/dockershim.sock",
|
||||
"node.alpha.kubernetes.io/ttl": "0",
|
||||
"volumes.kubernetes.io/controller-managed-attach-detach": "true"
|
||||
},
|
||||
"creationTimestamp": "2022-05-16T10:52:32Z",
|
||||
"labels": {
|
||||
"beta.kubernetes.io/arch": "amd64",
|
||||
"beta.kubernetes.io/os": "linux",
|
||||
"kubernetes.io/arch": "amd64",
|
||||
"kubernetes.io/hostname": "danielg-minikube",
|
||||
"kubernetes.io/os": "linux",
|
||||
"minikube.k8s.io/commit": "3e64b11ed75e56e4898ea85f96b2e4af0301f43d",
|
||||
"minikube.k8s.io/name": "danielg-minikube",
|
||||
"minikube.k8s.io/updated_at": "2022_05_16T13_52_35_0700",
|
||||
"minikube.k8s.io/version": "v1.25.1",
|
||||
"node-role.kubernetes.io/control-plane": "",
|
||||
"node-role.kubernetes.io/master": "",
|
||||
"node.kubernetes.io/exclude-from-external-load-balancers": ""
|
||||
},
|
||||
"name": "danielg-minikube",
|
||||
"resourceVersion": "9871",
|
||||
"uid": "fc4afcb6-4ca4-4038-ba54-5e16065a614a"
|
||||
},
|
||||
"spec": {
|
||||
"podCIDR": "10.244.0.0/24",
|
||||
"podCIDRs": [
|
||||
"10.244.0.0/24"
|
||||
],
|
||||
"taints": [
|
||||
{
|
||||
"effect": "NoSchedule",
|
||||
"key": "key1",
|
||||
"value": ""
|
||||
}
|
||||
]
|
||||
},
|
||||
"status": {
|
||||
"addresses": [
|
||||
{
|
||||
"address": "192.168.49.2",
|
||||
"type": "InternalIP"
|
||||
},
|
||||
{
|
||||
"address": "danielg-minikube",
|
||||
"type": "Hostname"
|
||||
}
|
||||
],
|
||||
"allocatable": {
|
||||
"cpu": "4",
|
||||
"ephemeral-storage": "94850516Ki",
|
||||
"hugepages-2Mi": "0",
|
||||
"memory": "10432976Ki",
|
||||
"pods": "110"
|
||||
},
|
||||
"capacity": {
|
||||
"cpu": "4",
|
||||
"ephemeral-storage": "94850516Ki",
|
||||
"hugepages-2Mi": "0",
|
||||
"memory": "10432976Ki",
|
||||
"pods": "110"
|
||||
},
|
||||
"conditions": [
|
||||
{
|
||||
"lastHeartbeatTime": "2022-05-16T14:24:45Z",
|
||||
"lastTransitionTime": "2022-05-16T10:52:29Z",
|
||||
"message": "kubelet has sufficient memory available",
|
||||
"reason": "KubeletHasSufficientMemory",
|
||||
"status": "False",
|
||||
"type": "MemoryPressure"
|
||||
},
|
||||
{
|
||||
"lastHeartbeatTime": "2022-05-16T14:24:45Z",
|
||||
"lastTransitionTime": "2022-05-16T10:52:29Z",
|
||||
"message": "kubelet has no disk pressure",
|
||||
"reason": "KubeletHasNoDiskPressure",
|
||||
"status": "False",
|
||||
"type": "DiskPressure"
|
||||
},
|
||||
{
|
||||
"lastHeartbeatTime": "2022-05-16T14:24:45Z",
|
||||
"lastTransitionTime": "2022-05-16T10:52:29Z",
|
||||
"message": "kubelet has sufficient PID available",
|
||||
"reason": "KubeletHasSufficientPID",
|
||||
"status": "False",
|
||||
"type": "PIDPressure"
|
||||
},
|
||||
{
|
||||
"lastHeartbeatTime": "2022-05-16T14:24:45Z",
|
||||
"lastTransitionTime": "2022-05-16T10:52:45Z",
|
||||
"message": "kubelet is posting ready status",
|
||||
"reason": "KubeletReady",
|
||||
"status": "True",
|
||||
"type": "Ready"
|
||||
}
|
||||
],
|
||||
"daemonEndpoints": {
|
||||
"kubeletEndpoint": {
|
||||
"Port": 10250
|
||||
}
|
||||
},
|
||||
"images": [
|
||||
{
|
||||
"names": [
|
||||
"requarks/wiki@sha256:dd83fff15e77843ff934b25c28c865ac000edf7653e5d11adad1dd51df87439d"
|
||||
],
|
||||
"sizeBytes": 441083858
|
||||
},
|
||||
{
|
||||
"names": [
|
||||
"mariadb@sha256:821d0411208eaa88f9e1f0daccd1d534f88d19baf724eb9a2777cbedb10b6c66"
|
||||
],
|
||||
"sizeBytes": 400782682
|
||||
},
|
||||
{
|
||||
"names": [
|
||||
"k8s.gcr.io/etcd@sha256:64b9ea357325d5db9f8a723dcf503b5a449177b17ac87d69481e126bb724c263",
|
||||
"k8s.gcr.io/etcd:3.5.1-0"
|
||||
],
|
||||
"sizeBytes": 292558922
|
||||
},
|
||||
{
|
||||
"names": [
|
||||
"kubernetesui/dashboard@sha256:ec27f462cf1946220f5a9ace416a84a57c18f98c777876a8054405d1428cc92e",
|
||||
"kubernetesui/dashboard:v2.3.1"
|
||||
],
|
||||
"sizeBytes": 220033604
|
||||
},
|
||||
{
|
||||
"names": [
|
||||
"k8s.gcr.io/kube-apiserver@sha256:f54681a71cce62cbc1b13ebb3dbf1d880f849112789811f98b6aebd2caa2f255",
|
||||
"k8s.gcr.io/kube-apiserver:v1.23.1"
|
||||
],
|
||||
"sizeBytes": 135162256
|
||||
},
|
||||
{
|
||||
"names": [
|
||||
"k8s.gcr.io/kube-controller-manager@sha256:a7ed87380108a2d811f0d392a3fe87546c85bc366e0d1e024dfa74eb14468604",
|
||||
"k8s.gcr.io/kube-controller-manager:v1.23.1"
|
||||
],
|
||||
"sizeBytes": 124971684
|
||||
},
|
||||
{
|
||||
"names": [
|
||||
"k8s.gcr.io/kube-proxy@sha256:e40f3a28721588affcf187f3f246d1e078157dabe274003eaa2957a83f7170c8",
|
||||
"k8s.gcr.io/kube-proxy:v1.23.1"
|
||||
],
|
||||
"sizeBytes": 112327826
|
||||
},
|
||||
{
|
||||
"names": [
|
||||
"quay.io/armosec/kubescape@sha256:6196f766be50d94b45d903a911f5ee95ac99bc392a1324c3e063bec41efd98ba",
|
||||
"quay.io/armosec/kubescape:v2.0.153"
|
||||
],
|
||||
"sizeBytes": 110345054
|
||||
},
|
||||
{
|
||||
"names": [
|
||||
"nginx@sha256:f7988fb6c02e0ce69257d9bd9cf37ae20a60f1df7563c3a2a6abe24160306b8d"
|
||||
],
|
||||
"sizeBytes": 109129446
|
||||
},
|
||||
{
|
||||
"names": [
|
||||
"quay.io/armosec/action-trigger@sha256:b93707d10ff86aac8dfa42ad37192d6bcf9aceeb4321b21756e438389c26e07c",
|
||||
"quay.io/armosec/action-trigger:v0.0.5"
|
||||
],
|
||||
"sizeBytes": 65127067
|
||||
},
|
||||
{
|
||||
"names": [
|
||||
"quay.io/armosec/images-vulnerabilities-scan@sha256:a5f9ddc04a7fdce6d52ef85a21f0de567d8e04d418c2bc5bf5d72b151c997625",
|
||||
"quay.io/armosec/images-vulnerabilities-scan:v0.0.7"
|
||||
],
|
||||
"sizeBytes": 61446712
|
||||
},
|
||||
{
|
||||
"names": [
|
||||
"quay.io/armosec/images-vulnerabilities-scan@sha256:2f879858da89f6542e3223fb18d6d793810cc2ad6e398b66776475e4218b6af5",
|
||||
"quay.io/armosec/images-vulnerabilities-scan:v0.0.8"
|
||||
],
|
||||
"sizeBytes": 61446528
|
||||
},
|
||||
{
|
||||
"names": [
|
||||
"quay.io/armosec/cluster-collector@sha256:2c4f733d09f7f4090ace04585230bdfacbbc29a3ade38a2e1233d2c0f730d9b6",
|
||||
"quay.io/armosec/cluster-collector:v0.0.9"
|
||||
],
|
||||
"sizeBytes": 53699576
|
||||
},
|
||||
{
|
||||
"names": [
|
||||
"k8s.gcr.io/kube-scheduler@sha256:8be4eb1593cf9ff2d91b44596633b7815a3753696031a1eb4273d1b39427fa8c",
|
||||
"k8s.gcr.io/kube-scheduler:v1.23.1"
|
||||
],
|
||||
"sizeBytes": 53488305
|
||||
},
|
||||
{
|
||||
"names": [
|
||||
"k8s.gcr.io/coredns/coredns@sha256:5b6ec0d6de9baaf3e92d0f66cd96a25b9edbce8716f5f15dcd1a616b3abd590e",
|
||||
"k8s.gcr.io/coredns/coredns:v1.8.6"
|
||||
],
|
||||
"sizeBytes": 46829283
|
||||
},
|
||||
{
|
||||
"names": [
|
||||
"kubernetesui/metrics-scraper@sha256:36d5b3f60e1a144cc5ada820910535074bdf5cf73fb70d1ff1681537eef4e172",
|
||||
"kubernetesui/metrics-scraper:v1.0.7"
|
||||
],
|
||||
"sizeBytes": 34446077
|
||||
},
|
||||
{
|
||||
"names": [
|
||||
"gcr.io/k8s-minikube/storage-provisioner@sha256:18eb69d1418e854ad5a19e399310e52808a8321e4c441c1dddad8977a0d7a944",
|
||||
"gcr.io/k8s-minikube/storage-provisioner:v5"
|
||||
],
|
||||
"sizeBytes": 31465472
|
||||
},
|
||||
{
|
||||
"names": [
|
||||
"quay.io/armosec/notification-server@sha256:b6e9b296cd53bd3b2b42c516d8ab43db998acff1124a57aff8d66b3dd7881979",
|
||||
"quay.io/armosec/notification-server:v0.0.3"
|
||||
],
|
||||
"sizeBytes": 20209940
|
||||
},
|
||||
{
|
||||
"names": [
|
||||
"quay.io/armosec/kube-host-sensor@sha256:82139d2561039726be060df2878ef023c59df7c536fbd7f6d766af5a99569fee",
|
||||
"quay.io/armosec/kube-host-sensor:latest"
|
||||
],
|
||||
"sizeBytes": 11796788
|
||||
},
|
||||
{
|
||||
"names": [
|
||||
"k8s.gcr.io/pause@sha256:3d380ca8864549e74af4b29c10f9cb0956236dfb01c40ca076fb6c37253234db",
|
||||
"k8s.gcr.io/pause:3.6"
|
||||
],
|
||||
"sizeBytes": 682696
|
||||
}
|
||||
],
|
||||
"nodeInfo": {
|
||||
"architecture": "amd64",
|
||||
"bootID": "828cbe73-120b-43cf-aae0-9e2d15b8c873",
|
||||
"containerRuntimeVersion": "docker://20.10.12",
|
||||
"kernelVersion": "5.13.0-40-generic",
|
||||
"kubeProxyVersion": "v1.23.1",
|
||||
"kubeletVersion": "v1.23.1",
|
||||
"machineID": "8de776e053e140d6a14c2d2def3d6bb8",
|
||||
"operatingSystem": "linux",
|
||||
"osImage": "Ubuntu 20.04.2 LTS",
|
||||
"systemUUID": "da12dc19-10bf-4033-a440-2d9aa33d6fe3"
|
||||
}
|
||||
}
|
||||
}
|
||||
`
|
||||
_ = json.Unmarshal([]byte(taintNode), &l)
|
||||
assert.True(t, isMasterNodeTaints(l.Spec.Taints))
|
||||
}
|
||||
@@ -4,7 +4,6 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/armosec/kubescape/v2/core/cautils"
|
||||
"github.com/armosec/kubescape/v2/core/pkg/hostsensorutils"
|
||||
"github.com/armosec/opa-utils/reporthandling"
|
||||
"k8s.io/utils/strings/slices"
|
||||
|
||||
@@ -12,12 +11,47 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
ClusterDescribe = "ClusterDescribe"
|
||||
ClusterDescribe = "ClusterDescribe"
|
||||
KubeletConfiguration = "KubeletConfiguration"
|
||||
OsReleaseFile = "OsReleaseFile"
|
||||
KernelVersion = "KernelVersion"
|
||||
LinuxSecurityHardeningStatus = "LinuxSecurityHardeningStatus"
|
||||
OpenPortsList = "OpenPortsList"
|
||||
LinuxKernelVariables = "LinuxKernelVariables"
|
||||
KubeletCommandLine = "KubeletCommandLine"
|
||||
ImageVulnerabilities = "ImageVulnerabilities"
|
||||
KubeletInfo = "KubeletInfo"
|
||||
KubeProxyInfo = "KubeProxyInfo"
|
||||
|
||||
MapResourceToApiGroup = map[string]string{
|
||||
KubeletConfiguration: "hostdata.kubescape.cloud/v1beta0",
|
||||
OsReleaseFile: "hostdata.kubescape.cloud/v1beta0",
|
||||
KubeletCommandLine: "hostdata.kubescape.cloud/v1beta0",
|
||||
KernelVersion: "hostdata.kubescape.cloud/v1beta0",
|
||||
LinuxSecurityHardeningStatus: "hostdata.kubescape.cloud/v1beta0",
|
||||
OpenPortsList: "hostdata.kubescape.cloud/v1beta0",
|
||||
LinuxKernelVariables: "hostdata.kubescape.cloud/v1beta0",
|
||||
KubeletInfo: "hostdata.kubescape.cloud/v1beta0",
|
||||
KubeProxyInfo: "hostdata.kubescape.cloud/v1beta0",
|
||||
}
|
||||
MapResourceToApiGroupVuln = map[string][]string{
|
||||
ImageVulnerabilities: {"armo.vuln.images/v1", "image.vulnscan.com/v1"}}
|
||||
MapResourceToApiGroupCloud = map[string][]string{
|
||||
ClusterDescribe: {"container.googleapis.com/v1", "eks.amazonaws.com/v1"}}
|
||||
ClusterDescribe: {"container.googleapis.com/v1", "eks.amazonaws.com/v1", "management.azure.com/v1"}}
|
||||
)
|
||||
|
||||
func isEmptyImgVulns(armoResourcesMap cautils.ArmoResources) bool {
|
||||
imgVulnResources := cautils.MapImageVulnResources(&armoResourcesMap)
|
||||
for _, resource := range imgVulnResources {
|
||||
if val, ok := armoResourcesMap[resource]; ok {
|
||||
if len(val) > 0 {
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func setK8sResourceMap(frameworks []reporthandling.Framework) *cautils.K8SResources {
|
||||
k8sResources := make(cautils.K8SResources)
|
||||
complexMap := setComplexK8sResourceMap(frameworks)
|
||||
@@ -50,19 +84,6 @@ func setArmoResourceMap(frameworks []reporthandling.Framework, resourceToControl
|
||||
return &armoResources
|
||||
}
|
||||
|
||||
func convertComplexResourceMap(frameworks []reporthandling.Framework) map[string]map[string]map[string]interface{} {
|
||||
k8sResources := make(map[string]map[string]map[string]interface{})
|
||||
for _, framework := range frameworks {
|
||||
for _, control := range framework.Controls {
|
||||
for _, rule := range control.Rules {
|
||||
for _, match := range rule.Match {
|
||||
insertResources(k8sResources, match)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return k8sResources
|
||||
}
|
||||
func setComplexK8sResourceMap(frameworks []reporthandling.Framework) map[string]map[string]map[string]interface{} {
|
||||
k8sResources := make(map[string]map[string]map[string]interface{})
|
||||
for _, framework := range frameworks {
|
||||
@@ -93,10 +114,16 @@ func setComplexArmoResourceMap(frameworks []reporthandling.Framework, resourceTo
|
||||
}
|
||||
|
||||
func mapArmoResourceToApiGroup(resource string) []string {
|
||||
if val, ok := hostsensorutils.MapResourceToApiGroup[resource]; ok {
|
||||
if val, ok := MapResourceToApiGroup[resource]; ok {
|
||||
return []string{val}
|
||||
}
|
||||
return MapResourceToApiGroupCloud[resource]
|
||||
if val, ok := MapResourceToApiGroupCloud[resource]; ok {
|
||||
return val
|
||||
}
|
||||
if val, ok := MapResourceToApiGroupVuln[resource]; ok {
|
||||
return val
|
||||
}
|
||||
return []string{}
|
||||
}
|
||||
|
||||
func insertControls(resource string, resourceToControl map[string][]string, control reporthandling.Control) {
|
||||
|
||||
@@ -2,7 +2,9 @@ package resourcehandler
|
||||
|
||||
import (
|
||||
"github.com/armosec/k8s-interface/k8sinterface"
|
||||
"github.com/armosec/kubescape/v2/core/cautils"
|
||||
"github.com/armosec/opa-utils/reporthandling"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"testing"
|
||||
)
|
||||
@@ -24,6 +26,18 @@ func TestSetResourceMap(t *testing.T) {
|
||||
}
|
||||
|
||||
}
|
||||
func TestSsEmptyImgVulns(t *testing.T) {
|
||||
armoResourcesMap := make(cautils.ArmoResources, 0)
|
||||
armoResourcesMap["container.googleapis.com/v1"] = []string{"fsdfds"}
|
||||
assert.Equal(t, true, isEmptyImgVulns(armoResourcesMap))
|
||||
|
||||
armoResourcesMap["armo.vuln.images/v1/ImageVulnerabilities"] = []string{"dada"}
|
||||
assert.Equal(t, false, isEmptyImgVulns(armoResourcesMap))
|
||||
|
||||
armoResourcesMap["armo.vuln.images/v1/ImageVulnerabilities"] = []string{}
|
||||
armoResourcesMap["bla"] = []string{"blu"}
|
||||
assert.Equal(t, true, isEmptyImgVulns(armoResourcesMap))
|
||||
}
|
||||
|
||||
func TestInsertK8sResources(t *testing.T) {
|
||||
// insertK8sResources
|
||||
|
||||
@@ -1,12 +1,13 @@
|
||||
package resourcehandler
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/armosec/k8s-interface/k8sinterface"
|
||||
"github.com/armosec/k8s-interface/workloadinterface"
|
||||
"github.com/armosec/kubescape/v2/core/cautils"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/getter"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger/helpers"
|
||||
armosecadaptorv1 "github.com/armosec/kubescape/v2/core/pkg/registryadaptors/armosec/v1"
|
||||
"github.com/armosec/kubescape/v2/core/pkg/registryadaptors/registryvulnerabilities"
|
||||
|
||||
@@ -45,8 +46,9 @@ func (registryAdaptors *RegistryAdaptors) collectImagesVulnerabilities(k8sResour
|
||||
for i := range registryAdaptors.adaptors { // login and and get vulnerabilities
|
||||
|
||||
if err := registryAdaptors.adaptors[i].Login(); err != nil {
|
||||
logger.L().Error("failed to login", helpers.Error(err))
|
||||
continue
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to login, adaptor: '%s', reason: '%s'", registryAdaptors.adaptors[i].DescribeAdaptor(), err.Error())
|
||||
}
|
||||
}
|
||||
vulnerabilities, err := registryAdaptors.adaptors[i].GetImagesVulnerabilities(imagesIdentifiers)
|
||||
if err != nil {
|
||||
|
||||
36
core/pkg/resourcehandler/remotegitutils.go
Normal file
36
core/pkg/resourcehandler/remotegitutils.go
Normal file
@@ -0,0 +1,36 @@
|
||||
package resourcehandler
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
giturl "github.com/armosec/go-git-url"
|
||||
"github.com/go-git/go-git/v5"
|
||||
"github.com/go-git/go-git/v5/plumbing"
|
||||
)
|
||||
|
||||
// cloneRepo clones a repository to a local temporary directory and returns the directory
|
||||
func cloneRepo(gitURL giturl.IGitURL) (string, error) {
|
||||
|
||||
// Create temp directory
|
||||
tmpDir, err := os.MkdirTemp("", "")
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to create temporary directory: %w", err)
|
||||
}
|
||||
|
||||
// Clone option
|
||||
cloneURL := gitURL.GetHttpCloneURL()
|
||||
cloneOpts := git.CloneOptions{URL: cloneURL}
|
||||
if gitURL.GetBranchName() != "" {
|
||||
cloneOpts.ReferenceName = plumbing.NewBranchReferenceName(gitURL.GetBranchName())
|
||||
cloneOpts.SingleBranch = true
|
||||
}
|
||||
|
||||
// Actual clone
|
||||
_, err = git.PlainClone(tmpDir, false, &cloneOpts)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to clone %s. %w", gitURL.GetRepoName(), err)
|
||||
}
|
||||
|
||||
return tmpDir, nil
|
||||
}
|
||||
@@ -1,82 +1,48 @@
|
||||
package resourcehandler
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
giturl "github.com/armosec/go-git-url"
|
||||
"github.com/armosec/k8s-interface/workloadinterface"
|
||||
"github.com/armosec/kubescape/v2/core/cautils"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger/helpers"
|
||||
)
|
||||
|
||||
func loadResourcesFromUrl(inputPatterns []string) (map[string][]workloadinterface.IMetadata, error) {
|
||||
urls := listUrls(inputPatterns)
|
||||
if len(urls) == 0 {
|
||||
if len(inputPatterns) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
g, err := giturl.NewGitAPI(inputPatterns[0])
|
||||
if err != nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
workloads, errs := downloadFiles(urls)
|
||||
files, errs := g.DownloadFilesWithExtension(append(cautils.YAML_PREFIX, cautils.JSON_PREFIX...))
|
||||
if len(errs) > 0 {
|
||||
logger.L().Error(fmt.Sprintf("%v", errs))
|
||||
}
|
||||
return workloads, nil
|
||||
}
|
||||
|
||||
func listUrls(patterns []string) []string {
|
||||
urls := []string{}
|
||||
for i := range patterns {
|
||||
if strings.HasPrefix(patterns[i], "http") {
|
||||
if yamls, err := ScanRepository(patterns[i], ""); err == nil { // TODO - support branch
|
||||
urls = append(urls, yamls...)
|
||||
} else {
|
||||
logger.L().Error(err.Error())
|
||||
}
|
||||
for i, j := range errs {
|
||||
logger.L().Error(i, helpers.Error(j))
|
||||
}
|
||||
}
|
||||
|
||||
return urls
|
||||
}
|
||||
if len(files) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func downloadFiles(urls []string) (map[string][]workloadinterface.IMetadata, []error) {
|
||||
// convert files to IMetadata
|
||||
workloads := make(map[string][]workloadinterface.IMetadata, 0)
|
||||
errs := []error{}
|
||||
for i := range urls {
|
||||
f, err := downloadFile(urls[i])
|
||||
if err != nil {
|
||||
errs = append(errs, err)
|
||||
|
||||
for i, j := range files {
|
||||
w, e := cautils.ReadFile(j, cautils.GetFileFormat(i))
|
||||
if e != nil || len(w) == 0 {
|
||||
continue
|
||||
}
|
||||
w, e := cautils.ReadFile(f, cautils.GetFileFormat(urls[i]))
|
||||
errs = append(errs, e...)
|
||||
if w != nil {
|
||||
if _, ok := workloads[urls[i]]; !ok {
|
||||
workloads[urls[i]] = make([]workloadinterface.IMetadata, 0)
|
||||
}
|
||||
wSlice := workloads[urls[i]]
|
||||
wSlice = append(wSlice, w...)
|
||||
workloads[urls[i]] = wSlice
|
||||
if _, ok := workloads[i]; !ok {
|
||||
workloads[i] = make([]workloadinterface.IMetadata, 0)
|
||||
}
|
||||
wSlice := workloads[i]
|
||||
wSlice = append(wSlice, w...)
|
||||
workloads[i] = wSlice
|
||||
}
|
||||
return workloads, errs
|
||||
}
|
||||
|
||||
func downloadFile(url string) ([]byte, error) {
|
||||
resp, err := http.Get(url)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
if resp.StatusCode < 200 || 301 < resp.StatusCode {
|
||||
return nil, fmt.Errorf("failed to download file, url: '%s', status code: %s", url, resp.Status)
|
||||
}
|
||||
return streamToByte(resp.Body), nil
|
||||
}
|
||||
|
||||
func streamToByte(stream io.Reader) []byte {
|
||||
buf := new(bytes.Buffer)
|
||||
buf.ReadFrom(stream)
|
||||
return buf.Bytes()
|
||||
return workloads, nil
|
||||
}
|
||||
|
||||
@@ -17,6 +17,7 @@ const (
|
||||
JunitResultFormat string = "junit"
|
||||
PrometheusFormat string = "prometheus"
|
||||
PdfFormat string = "pdf"
|
||||
HtmlFormat string = "html"
|
||||
)
|
||||
|
||||
type IPrinter interface {
|
||||
|
||||
154
core/pkg/resultshandling/printer/v2/html/report.gohtml
Normal file
154
core/pkg/resultshandling/printer/v2/html/report.gohtml
Normal file
@@ -0,0 +1,154 @@
|
||||
<!DOCTYPE html>
|
||||
<html lang="en-US">
|
||||
<head>
|
||||
<meta charset="UTF-8">
|
||||
<title>Kubescape Scan Report</title>
|
||||
</head>
|
||||
<style>
|
||||
:root {
|
||||
--cell-padding-vertical: 0.25em;
|
||||
--cell-padding-horizontal: 0.25em;
|
||||
--font-family-sans: system-ui, -apple-system, sans-serif;
|
||||
}
|
||||
body {
|
||||
max-width: 60em;
|
||||
margin: auto;
|
||||
font-family: var(--font-family-sans);
|
||||
}
|
||||
table {
|
||||
width: 100%;
|
||||
border-top: 0.1em solid black;
|
||||
border-bottom: 0.1em solid black;
|
||||
border-collapse: collapse;
|
||||
table-layout: fixed;
|
||||
}
|
||||
th {
|
||||
text-align: left;
|
||||
}
|
||||
td, th {
|
||||
padding-top: var(--cell-padding-vertical);
|
||||
padding-bottom: var(--cell-padding-vertical);
|
||||
padding-right: var(--cell-padding-horizontal);
|
||||
vertical-align: top;
|
||||
}
|
||||
td > p {
|
||||
margin: 0;
|
||||
word-break: break-all;
|
||||
hyphens: auto;
|
||||
}
|
||||
thead {
|
||||
border-bottom: 0.01em solid black;
|
||||
}
|
||||
.numericCell {
|
||||
text-align: right;
|
||||
}
|
||||
.controlSeverityCell {
|
||||
width: 10%;
|
||||
}
|
||||
.controlNameCell {
|
||||
width: 50%;
|
||||
}
|
||||
.controlRiskCell {
|
||||
width: 10%;
|
||||
}
|
||||
.resourceSeverityCell {
|
||||
width: 10%;
|
||||
}
|
||||
.resourceNameCell {
|
||||
width: 30%;
|
||||
}
|
||||
.resourceURLCell {
|
||||
width: 10%;
|
||||
}
|
||||
.resourceRemediationCell {
|
||||
width: 50%;
|
||||
}
|
||||
.logo {
|
||||
width: 25%;
|
||||
float: right;
|
||||
}
|
||||
</style>
|
||||
<body>
|
||||
<img class="logo" src="https://raw.githubusercontent.com/armosec/kubescape/master/core/pkg/resultshandling/printer/v2/pdf/logo.png">
|
||||
<h1>Kubescape Scan Report</h1>
|
||||
{{ with .OPASessionObj.Report.SummaryDetails }}
|
||||
<h2>By Controls</h2>
|
||||
<h3>Summary</h3>
|
||||
<table>
|
||||
<thead>
|
||||
<tr>
|
||||
<th>All</th>
|
||||
<th>Failed</th>
|
||||
<th>Excluded</th>
|
||||
<th>Skipped</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
<tr>
|
||||
<td>{{ .NumberOfControls.All }}</td>
|
||||
<td>{{ .NumberOfControls.Failed }}</td>
|
||||
<td>{{ .NumberOfControls.Excluded }}</td>
|
||||
<td>{{ .NumberOfControls.Skipped }}</td>
|
||||
</tr>
|
||||
</tbody>
|
||||
</table>
|
||||
<h3>Details</h3>
|
||||
<table>
|
||||
<thead>
|
||||
<tr>
|
||||
<th class="controlSeverityCell">Severity</th>
|
||||
<th class="controlNameCell">Control Name</th>
|
||||
<th class="controlRiskCell">Failed Resources</th>
|
||||
<th class="controlRiskCell">Excluded Resources</th>
|
||||
<th class="controlRiskCell">All Resources</th>
|
||||
<th class="controlRiskCell">Risk Score, %</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
{{ $sorted := sortBySeverityName .Controls }}
|
||||
{{ range $control := $sorted }}
|
||||
<tr>
|
||||
<td class="controlSeverityCell">{{ controlSeverityToString $control.ScoreFactor }}</td>
|
||||
<td class="controlNameCell">{{ $control.Name }}</td>
|
||||
<td class="controlRiskCell numericCell">{{ $control.ResourceCounters.FailedResources }}</td>
|
||||
<td class="controlRiskCell numericCell">{{ $control.ResourceCounters.ExcludedResources }}</td>
|
||||
<td class="controlRiskCell numericCell">{{ sum $control.ResourceCounters.ExcludedResources $control.ResourceCounters.FailedResources $control.ResourceCounters.PassedResources }}</td>
|
||||
<td class="controlRiskCell numericCell">{{ float32ToInt $control.Score }}</td>
|
||||
</tr>
|
||||
</tr>
|
||||
{{ end }}
|
||||
<tbody>
|
||||
</table>
|
||||
{{ end }}
|
||||
<h2>By Resource</h2>
|
||||
{{ $sortedResourceTableView := sortByNamespace .ResourceTableView }}
|
||||
{{ range $sortedResourceTableView }}
|
||||
<h3>Name: {{ .Resource.GetName }}</h3>
|
||||
<p>ApiVersion: {{ .Resource.GetApiVersion }}</p>
|
||||
<p>Kind: {{ .Resource.GetKind }}</p>
|
||||
<p>Name: {{ .Resource.GetName }}</p>
|
||||
<p>Namespace: {{ .Resource.GetNamespace }}</p>
|
||||
<table>
|
||||
<thead>
|
||||
<tr>
|
||||
<th class="resourceSeverityCell">Severity</th>
|
||||
<th class="resourceNameCell">Name</th>
|
||||
<th class="resourceURLCell">Docs</th>
|
||||
<th class="resourceRemediationCell">Assistant Remediation</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
{{ range .ControlsResult }}
|
||||
<tr>
|
||||
<td class="resourceSeverityCell">{{ .Severity }}</td>
|
||||
<td class="resourceNameCell">{{ .Name }}</td>
|
||||
<td class="resourceURLCell"><a href="https://hub.armo.cloud/docs/{{ lower .URL }}">{{ .URL }}</a></td>
|
||||
<td class="resourceRemediationCell">{{ range .FailedPaths }} <p>{{ . }}</p> {{ end }}</td>
|
||||
</tr>
|
||||
{{ end }}
|
||||
</tbody>
|
||||
</table>
|
||||
</div>
|
||||
{{ end }}
|
||||
</body>
|
||||
</html>
|
||||
151
core/pkg/resultshandling/printer/v2/htmlprinter.go
Normal file
151
core/pkg/resultshandling/printer/v2/htmlprinter.go
Normal file
@@ -0,0 +1,151 @@
|
||||
package v2
|
||||
|
||||
import (
|
||||
_ "embed"
|
||||
"html/template"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/armosec/kubescape/v2/core/cautils"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger/helpers"
|
||||
"github.com/armosec/kubescape/v2/core/pkg/resultshandling/printer"
|
||||
"github.com/armosec/opa-utils/reporthandling/apis"
|
||||
"github.com/armosec/opa-utils/reporthandling/results/v1/reportsummary"
|
||||
"github.com/armosec/opa-utils/reporthandling/results/v1/resourcesresults"
|
||||
)
|
||||
|
||||
const (
|
||||
htmlOutputFile = "report"
|
||||
htmlOutputExt = ".html"
|
||||
)
|
||||
|
||||
//go:embed html/report.gohtml
|
||||
var reportTemplate string
|
||||
|
||||
type HTMLReportingCtx struct {
|
||||
OPASessionObj *cautils.OPASessionObj
|
||||
ResourceTableView ResourceTableView
|
||||
}
|
||||
|
||||
type HtmlPrinter struct {
|
||||
writer *os.File
|
||||
}
|
||||
|
||||
func NewHtmlPrinter() *HtmlPrinter {
|
||||
return &HtmlPrinter{}
|
||||
}
|
||||
|
||||
func (htmlPrinter *HtmlPrinter) SetWriter(outputFile string) {
|
||||
if outputFile == "" {
|
||||
outputFile = htmlOutputFile
|
||||
}
|
||||
if filepath.Ext(strings.TrimSpace(outputFile)) != htmlOutputExt {
|
||||
outputFile = outputFile + htmlOutputExt
|
||||
}
|
||||
htmlPrinter.writer = printer.GetWriter(outputFile)
|
||||
}
|
||||
|
||||
func (htmlPrinter *HtmlPrinter) ActionPrint(opaSessionObj *cautils.OPASessionObj) {
|
||||
tplFuncMap := template.FuncMap{
|
||||
"sum": func(nums ...int) int {
|
||||
total := 0
|
||||
for _, n := range nums {
|
||||
total += n
|
||||
}
|
||||
return total
|
||||
},
|
||||
"float32ToInt": cautils.Float32ToInt,
|
||||
"lower": strings.ToLower,
|
||||
"sortByNamespace": func(resourceTableView ResourceTableView) ResourceTableView {
|
||||
sortedResourceTableView := make(ResourceTableView, len(resourceTableView))
|
||||
copy(sortedResourceTableView, resourceTableView)
|
||||
|
||||
sort.SliceStable(
|
||||
sortedResourceTableView,
|
||||
func(i, j int) bool {
|
||||
return sortedResourceTableView[i].Resource.GetNamespace() < sortedResourceTableView[j].Resource.GetNamespace()
|
||||
},
|
||||
)
|
||||
return sortedResourceTableView
|
||||
},
|
||||
"controlSeverityToString": apis.ControlSeverityToString,
|
||||
"sortBySeverityName": func(controlSummaries map[string]reportsummary.ControlSummary) []reportsummary.ControlSummary {
|
||||
sortedSlice := make([]reportsummary.ControlSummary, 0, len(controlSummaries))
|
||||
for _, val := range controlSummaries {
|
||||
sortedSlice = append(sortedSlice, val)
|
||||
}
|
||||
|
||||
sort.SliceStable(
|
||||
sortedSlice,
|
||||
func(i, j int) bool {
|
||||
//First sort by Severity descending
|
||||
iSeverity := apis.ControlSeverityToInt(sortedSlice[i].GetScoreFactor())
|
||||
jSeverity := apis.ControlSeverityToInt(sortedSlice[j].GetScoreFactor())
|
||||
if iSeverity > jSeverity {
|
||||
return true
|
||||
}
|
||||
if iSeverity < jSeverity {
|
||||
return false
|
||||
}
|
||||
//And then by Name ascending
|
||||
return sortedSlice[i].GetName() < sortedSlice[j].GetName()
|
||||
},
|
||||
)
|
||||
|
||||
return sortedSlice
|
||||
},
|
||||
}
|
||||
tpl := template.Must(
|
||||
template.New("htmlReport").Funcs(tplFuncMap).Parse(reportTemplate),
|
||||
)
|
||||
|
||||
resourceTableView := buildResourceTableView(opaSessionObj)
|
||||
reportingCtx := HTMLReportingCtx{opaSessionObj, resourceTableView}
|
||||
err := tpl.Execute(htmlPrinter.writer, reportingCtx)
|
||||
if err != nil {
|
||||
logger.L().Error("failed to render template", helpers.Error(err))
|
||||
}
|
||||
}
|
||||
|
||||
func (htmlPrinter *HtmlPrinter) Score(score float32) {
|
||||
return
|
||||
}
|
||||
|
||||
func buildResourceTableView(opaSessionObj *cautils.OPASessionObj) ResourceTableView {
|
||||
resourceTableView := make(ResourceTableView, 0)
|
||||
for resourceID, result := range opaSessionObj.ResourcesResult {
|
||||
if result.GetStatus(nil).IsFailed() {
|
||||
resource := opaSessionObj.AllResources[resourceID]
|
||||
ctlResults := buildResourceControlResultTable(result.AssociatedControls, &opaSessionObj.Report.SummaryDetails)
|
||||
resourceTableView = append(resourceTableView, ResourceResult{resource, ctlResults})
|
||||
}
|
||||
}
|
||||
|
||||
return resourceTableView
|
||||
}
|
||||
|
||||
func buildResourceControlResult(resourceControl resourcesresults.ResourceAssociatedControl, control reportsummary.IControlSummary) ResourceControlResult {
|
||||
ctlSeverity := apis.ControlSeverityToString(control.GetScoreFactor())
|
||||
ctlName := resourceControl.GetName()
|
||||
ctlURL := resourceControl.GetID()
|
||||
failedPaths := failedPathsToString(&resourceControl)
|
||||
|
||||
return ResourceControlResult{ctlSeverity, ctlName, ctlURL, failedPaths}
|
||||
}
|
||||
|
||||
func buildResourceControlResultTable(resourceControls []resourcesresults.ResourceAssociatedControl, summaryDetails *reportsummary.SummaryDetails) []ResourceControlResult {
|
||||
var ctlResults []ResourceControlResult
|
||||
for _, resourceControl := range resourceControls {
|
||||
if resourceControl.GetStatus(nil).IsFailed() {
|
||||
control := summaryDetails.Controls.GetControl(reportsummary.EControlCriteriaName, resourceControl.GetName())
|
||||
ctlResult := buildResourceControlResult(resourceControl, control)
|
||||
|
||||
ctlResults = append(ctlResults, ctlResult)
|
||||
}
|
||||
}
|
||||
|
||||
return ctlResults
|
||||
}
|
||||
@@ -28,7 +28,7 @@ func (jsonPrinter *JsonPrinter) Score(score float32) {
|
||||
}
|
||||
|
||||
func (jsonPrinter *JsonPrinter) ActionPrint(opaSessionObj *cautils.OPASessionObj) {
|
||||
r, err := json.Marshal(DataToJson(opaSessionObj))
|
||||
r, err := json.Marshal(FinalizeResults(opaSessionObj))
|
||||
if err != nil {
|
||||
logger.L().Fatal("failed to Marshal posture report object")
|
||||
}
|
||||
|
||||
@@ -49,7 +49,7 @@ type JUnitTestSuite struct {
|
||||
XMLName xml.Name `xml:"testsuite"`
|
||||
Name string `xml:"name,attr"` // Full (class) name of the test for non-aggregated testsuite documents. Class name without the package for aggregated testsuites documents. Required
|
||||
Disabled int `xml:"disabled,attr"` // The total number of disabled tests in the suite. optional. not supported by maven surefire.
|
||||
Errors int `xml:"errors,attr"` // The total number of tests in the suite that errored
|
||||
Errors int `xml:"errors,attr"` // The total number of tests in the suite that errors
|
||||
Failures int `xml:"failures,attr"` // The total number of tests in the suite that failed
|
||||
Hostname string `xml:"hostname,attr"` // Host on which the tests were executed ? cluster name ?
|
||||
ID int `xml:"id,attr"` // Starts at 0 for the first testsuite and is incremented by 1 for each following testsuite
|
||||
@@ -181,7 +181,7 @@ func testsCases(results *cautils.OPASessionObj, controls reportsummary.IControls
|
||||
testCase.Failure = &testCaseFailure
|
||||
} else if control.GetStatus().IsSkipped() {
|
||||
testCase.SkipMessage = &JUnitSkipMessage{
|
||||
Message: "", // TODO - fill after statusInfo is supportred
|
||||
Message: "", // TODO - fill after statusInfo is supported
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -19,25 +19,33 @@ import (
|
||||
|
||||
type PrettyPrinter struct {
|
||||
formatVersion string
|
||||
viewType cautils.ViewTypes
|
||||
writer *os.File
|
||||
verboseMode bool
|
||||
}
|
||||
|
||||
func NewPrettyPrinter(verboseMode bool, formatVersion string) *PrettyPrinter {
|
||||
func NewPrettyPrinter(verboseMode bool, formatVersion string, viewType cautils.ViewTypes) *PrettyPrinter {
|
||||
return &PrettyPrinter{
|
||||
verboseMode: verboseMode,
|
||||
formatVersion: formatVersion,
|
||||
viewType: viewType,
|
||||
}
|
||||
}
|
||||
|
||||
func (prettyPrinter *PrettyPrinter) ActionPrint(opaSessionObj *cautils.OPASessionObj) {
|
||||
fmt.Fprintf(prettyPrinter.writer, "\n"+getSperator("^")+"\n")
|
||||
fmt.Fprintf(prettyPrinter.writer, "\n"+getSeparator("^")+"\n")
|
||||
|
||||
sortedControlNames := getSortedControlsNames(opaSessionObj.Report.SummaryDetails.Controls) // ListControls().All())
|
||||
|
||||
if prettyPrinter.verboseMode {
|
||||
prettyPrinter.resourceTable(opaSessionObj)
|
||||
switch prettyPrinter.viewType {
|
||||
case cautils.ControlViewType:
|
||||
prettyPrinter.printResults(&opaSessionObj.Report.SummaryDetails.Controls, opaSessionObj.AllResources, sortedControlNames)
|
||||
case cautils.ResourceViewType:
|
||||
if prettyPrinter.verboseMode {
|
||||
prettyPrinter.resourceTable(opaSessionObj)
|
||||
}
|
||||
}
|
||||
|
||||
prettyPrinter.printSummaryTable(&opaSessionObj.Report.SummaryDetails, sortedControlNames)
|
||||
|
||||
}
|
||||
@@ -180,6 +188,10 @@ func generateFooter(summaryDetails *reportsummary.SummaryDetails) []string {
|
||||
}
|
||||
func (prettyPrinter *PrettyPrinter) printSummaryTable(summaryDetails *reportsummary.SummaryDetails, sortedControlNames [][]string) {
|
||||
|
||||
if summaryDetails.NumberOfControls().All() == 0 {
|
||||
fmt.Fprintf(prettyPrinter.writer, "\nKubescape did not scan any of the resources, make sure you are scanning valid kubernetes manifests (Deployments, Pods, etc.)\n")
|
||||
return
|
||||
}
|
||||
cautils.InfoTextDisplay(prettyPrinter.writer, "\n"+controlCountersForSummary(summaryDetails.NumberOfControls())+"\n\n")
|
||||
|
||||
summaryTable := tablewriter.NewWriter(prettyPrinter.writer)
|
||||
@@ -188,10 +200,16 @@ func (prettyPrinter *PrettyPrinter) printSummaryTable(summaryDetails *reportsumm
|
||||
summaryTable.SetHeaderLine(true)
|
||||
summaryTable.SetColumnAlignment(getColumnsAlignments())
|
||||
|
||||
printAll := prettyPrinter.verboseMode
|
||||
if summaryDetails.NumberOfResources().Failed() == 0 {
|
||||
// if there are no failed controls, print the resource table and detailed information
|
||||
printAll = true
|
||||
}
|
||||
|
||||
infoToPrintInfo := mapInfoToPrintInfo(summaryDetails.Controls)
|
||||
for i := len(sortedControlNames) - 1; i >= 0; i-- {
|
||||
for _, c := range sortedControlNames[i] {
|
||||
row := generateRow(summaryDetails.Controls.GetControl(reportsummary.EControlCriteriaName, c), infoToPrintInfo, prettyPrinter.verboseMode)
|
||||
row := generateRow(summaryDetails.Controls.GetControl(reportsummary.EControlCriteriaName, c), infoToPrintInfo, printAll)
|
||||
if len(row) > 0 {
|
||||
summaryTable.Append(row)
|
||||
}
|
||||
@@ -245,7 +263,7 @@ func controlCountersForSummary(counters reportsummary.ICounters) string {
|
||||
func controlCountersForResource(l *helpersv1.AllLists) string {
|
||||
return fmt.Sprintf("Controls: %d (Failed: %d, Excluded: %d)", len(l.All()), len(l.Failed()), len(l.Excluded()))
|
||||
}
|
||||
func getSperator(sep string) string {
|
||||
func getSeparator(sep string) string {
|
||||
s := ""
|
||||
for i := 0; i < 80; i++ {
|
||||
s += sep
|
||||
|
||||
19
core/pkg/resultshandling/printer/v2/reportingstructs.go
Normal file
19
core/pkg/resultshandling/printer/v2/reportingstructs.go
Normal file
@@ -0,0 +1,19 @@
|
||||
package v2
|
||||
|
||||
import (
|
||||
"github.com/armosec/k8s-interface/workloadinterface"
|
||||
)
|
||||
|
||||
type ResourceTableView []ResourceResult
|
||||
|
||||
type ResourceResult struct {
|
||||
Resource workloadinterface.IMetadata
|
||||
ControlsResult []ResourceControlResult
|
||||
}
|
||||
|
||||
type ResourceControlResult struct {
|
||||
Severity string
|
||||
Name string
|
||||
URL string
|
||||
FailedPaths []string
|
||||
}
|
||||
@@ -29,10 +29,10 @@ func (prettyPrinter *PrettyPrinter) resourceTable(opaSessionObj *cautils.OPASess
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
fmt.Fprintf(prettyPrinter.writer, "\n"+getSperator("#")+"\n\n")
|
||||
fmt.Fprintf(prettyPrinter.writer, "\n%s\n", getSeparator("#"))
|
||||
|
||||
if source, ok := opaSessionObj.ResourceSource[resourceID]; ok {
|
||||
fmt.Fprintf(prettyPrinter.writer, "Source: %s\n", source)
|
||||
fmt.Fprintf(prettyPrinter.writer, "Source: %s\n", source.RelativePath)
|
||||
}
|
||||
fmt.Fprintf(prettyPrinter.writer, "ApiVersion: %s\n", resource.GetApiVersion())
|
||||
fmt.Fprintf(prettyPrinter.writer, "Kind: %s\n", resource.GetKind())
|
||||
|
||||
@@ -12,7 +12,7 @@ import (
|
||||
)
|
||||
|
||||
// finalizeV2Report finalize the results objects by copying data from map to lists
|
||||
func DataToJson(data *cautils.OPASessionObj) *reporthandlingv2.PostureReport {
|
||||
func FinalizeResults(data *cautils.OPASessionObj) *reporthandlingv2.PostureReport {
|
||||
report := reporthandlingv2.PostureReport{
|
||||
SummaryDetails: data.Report.SummaryDetails,
|
||||
ClusterAPIServerInfo: data.Report.ClusterAPIServerInfo,
|
||||
@@ -62,13 +62,13 @@ func mapInfoToPrintInfo(controls reportsummary.ControlSummaries) []infoStars {
|
||||
return infoToPrintInfo
|
||||
}
|
||||
|
||||
func finalizeResources(results []resourcesresults.Result, allResources map[string]workloadinterface.IMetadata, resourcesSource map[string]string) []reporthandling.Resource {
|
||||
func finalizeResources(results []resourcesresults.Result, allResources map[string]workloadinterface.IMetadata, resourcesSource map[string]reporthandling.Source) []reporthandling.Resource {
|
||||
resources := make([]reporthandling.Resource, 0)
|
||||
for i := range results {
|
||||
if obj, ok := allResources[results[i].ResourceID]; ok {
|
||||
resource := *reporthandling.NewResourceIMetadata(obj)
|
||||
if r, ok := resourcesSource[results[i].ResourceID]; ok {
|
||||
resource.SetSource(&reporthandling.Source{Path: r})
|
||||
resource.SetSource(&r)
|
||||
}
|
||||
resources = append(resources, resource)
|
||||
}
|
||||
|
||||
@@ -12,6 +12,7 @@ import (
|
||||
"github.com/armosec/kubescape/v2/core/cautils/getter"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger/helpers"
|
||||
v2 "github.com/armosec/kubescape/v2/core/pkg/resultshandling/reporter/v2"
|
||||
"github.com/armosec/opa-utils/reporthandling"
|
||||
"github.com/google/uuid"
|
||||
)
|
||||
@@ -140,8 +141,8 @@ func (report *ReportEventReceiver) generateMessage() {
|
||||
message := "You can see the results in a user-friendly UI, choose your preferred compliance framework, check risk results history and trends, manage exceptions, get remediation recommendations and much more by registering here:"
|
||||
|
||||
u := url.URL{}
|
||||
u.Scheme = "https"
|
||||
u.Host = getter.GetArmoAPIConnector().GetFrontendURL()
|
||||
v2.ParseHost(&u)
|
||||
|
||||
if report.customerAdminEMail != "" {
|
||||
logger.L().Debug("", helpers.String("account ID", report.customerGUID))
|
||||
|
||||
@@ -5,15 +5,16 @@ import (
|
||||
|
||||
"github.com/armosec/k8s-interface/workloadinterface"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/getter"
|
||||
v2 "github.com/armosec/kubescape/v2/core/pkg/resultshandling/reporter/v2"
|
||||
"github.com/armosec/opa-utils/reporthandling"
|
||||
"github.com/google/uuid"
|
||||
)
|
||||
|
||||
func (report *ReportEventReceiver) initEventReceiverURL() {
|
||||
urlObj := url.URL{}
|
||||
|
||||
urlObj.Scheme = "https"
|
||||
urlObj.Host = getter.GetArmoAPIConnector().GetReportReceiverURL()
|
||||
v2.ParseHost(&urlObj)
|
||||
|
||||
urlObj.Path = "/k8s/postureReport"
|
||||
q := urlObj.Query()
|
||||
q.Add("customerGUID", uuid.MustParse(report.customerGUID).String())
|
||||
|
||||
@@ -32,19 +32,20 @@ func (reportMock *ReportMock) SetClusterName(clusterName string) {
|
||||
}
|
||||
|
||||
func (reportMock *ReportMock) GetURL() string {
|
||||
return getter.GetArmoAPIConnector().GetFrontendURL()
|
||||
}
|
||||
|
||||
func (reportMock *ReportMock) DisplayReportURL() {
|
||||
u := fmt.Sprintf("https://%s/account/login", getter.GetArmoAPIConnector().GetFrontendURL())
|
||||
u := fmt.Sprintf("https://%s/account/sign-up", getter.GetArmoAPIConnector().GetFrontendURL())
|
||||
if reportMock.query != "" {
|
||||
u += fmt.Sprintf("?%s", reportMock.query)
|
||||
}
|
||||
return u
|
||||
}
|
||||
|
||||
func (reportMock *ReportMock) DisplayReportURL() {
|
||||
|
||||
sep := "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~"
|
||||
message := sep + "\n"
|
||||
message += "Scan results have not been submitted: " + reportMock.message + "\n"
|
||||
message += "Sign up for free: "
|
||||
message += u + "\n"
|
||||
message += reportMock.GetURL() + "\n"
|
||||
message += sep + "\n"
|
||||
cautils.InfoTextDisplay(os.Stderr, fmt.Sprintf("\n%s\n", message))
|
||||
}
|
||||
|
||||
@@ -0,0 +1,48 @@
|
||||
package v2
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/armosec/kubescape/v2/core/cautils"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestGetURL(t *testing.T) {
|
||||
// Test submit and registered url
|
||||
{
|
||||
reporter := NewReportEventReceiver(
|
||||
&cautils.ConfigObj{
|
||||
AccountID: "1234",
|
||||
Token: "token",
|
||||
CustomerAdminEMail: "my@email",
|
||||
ClusterName: "test",
|
||||
},
|
||||
"",
|
||||
)
|
||||
assert.Equal(t, "https://portal.armo.cloud/configuration-scanning/test?utm_campaign=Submit&utm_medium=CLI&utm_source=GitHub", reporter.GetURL())
|
||||
}
|
||||
|
||||
// Test submit and NOT registered url
|
||||
{
|
||||
|
||||
reporter := NewReportEventReceiver(
|
||||
&cautils.ConfigObj{
|
||||
AccountID: "1234",
|
||||
Token: "token",
|
||||
ClusterName: "test",
|
||||
},
|
||||
"",
|
||||
)
|
||||
assert.Equal(t, "https://portal.armo.cloud/account/sign-up?customerGUID=1234&invitationToken=token&utm_campaign=Submit&utm_medium=CLI&utm_source=GitHub", reporter.GetURL())
|
||||
}
|
||||
// Test None submit url
|
||||
{
|
||||
reporter := NewReportMock(NO_SUBMIT_QUERY, "")
|
||||
assert.Equal(t, "https://portal.armo.cloud/account/sign-up?utm_source=GitHub&utm_medium=CLI&utm_campaign=no_submit", reporter.GetURL())
|
||||
}
|
||||
// Test None report url
|
||||
{
|
||||
reporter := NewReportMock("", "")
|
||||
assert.Equal(t, "https://portal.armo.cloud/account/sign-up", reporter.GetURL())
|
||||
}
|
||||
}
|
||||
@@ -7,12 +7,12 @@ import (
|
||||
"net/url"
|
||||
"os"
|
||||
|
||||
"github.com/armosec/armoapi-go/apis"
|
||||
"github.com/armosec/k8s-interface/workloadinterface"
|
||||
"github.com/armosec/kubescape/v2/core/cautils"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/getter"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger/helpers"
|
||||
|
||||
"github.com/armosec/opa-utils/reporthandling"
|
||||
"github.com/armosec/opa-utils/reporthandling/results/v1/resourcesresults"
|
||||
reporthandlingv2 "github.com/armosec/opa-utils/reporthandling/v2"
|
||||
@@ -57,8 +57,7 @@ func (report *ReportEventReceiver) Submit(opaSessionObj *cautils.OPASessionObj)
|
||||
if err == nil {
|
||||
report.generateMessage()
|
||||
} else {
|
||||
|
||||
err = fmt.Errorf("failed to submit scan results. url: '%s'", report.GetURL())
|
||||
err = fmt.Errorf("failed to submit scan results. url: '%s', reason: %s", report.GetURL(), err.Error())
|
||||
}
|
||||
|
||||
logger.L().Debug("", helpers.String("account ID", report.customerGUID))
|
||||
@@ -89,16 +88,18 @@ func (report *ReportEventReceiver) prepareReport(opaSessionObj *cautils.OPASessi
|
||||
|
||||
func (report *ReportEventReceiver) GetURL() string {
|
||||
u := url.URL{}
|
||||
u.Scheme = "https"
|
||||
u.Host = getter.GetArmoAPIConnector().GetFrontendURL()
|
||||
|
||||
ParseHost(&u)
|
||||
q := u.Query()
|
||||
|
||||
if report.customerAdminEMail != "" || report.token == "" { // data has been submitted
|
||||
u.Path = fmt.Sprintf("configuration-scanning/%s", report.clusterName)
|
||||
if report.clusterName != "" {
|
||||
u.Path = fmt.Sprintf("configuration-scanning/%s", report.clusterName)
|
||||
} else {
|
||||
u.Path = fmt.Sprintf("repositories-scan/%s", report.reportID)
|
||||
}
|
||||
} else {
|
||||
u.Path = "account/sign-up"
|
||||
q := u.Query()
|
||||
q.Add("invitationToken", report.token)
|
||||
q.Add("customerGUID", report.customerGUID)
|
||||
}
|
||||
@@ -114,6 +115,7 @@ func (report *ReportEventReceiver) GetURL() string {
|
||||
}
|
||||
func (report *ReportEventReceiver) sendResources(host string, opaSessionObj *cautils.OPASessionObj) error {
|
||||
splittedPostureReport := report.setSubReport(opaSessionObj)
|
||||
|
||||
counter := 0
|
||||
reportCounter := 0
|
||||
if err := report.setResources(splittedPostureReport, opaSessionObj.AllResources, opaSessionObj.ResourceSource, &counter, &reportCounter, host); err != nil {
|
||||
@@ -122,7 +124,6 @@ func (report *ReportEventReceiver) sendResources(host string, opaSessionObj *cau
|
||||
if err := report.setResults(splittedPostureReport, opaSessionObj.ResourcesResult, &counter, &reportCounter, host); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return report.sendReport(host, splittedPostureReport, reportCounter, true)
|
||||
}
|
||||
func (report *ReportEventReceiver) setResults(reportObj *reporthandlingv2.PostureReport, results map[string]resourcesresults.Result, counter, reportCounter *int, host string) error {
|
||||
@@ -154,11 +155,11 @@ func (report *ReportEventReceiver) setResults(reportObj *reporthandlingv2.Postur
|
||||
return nil
|
||||
}
|
||||
|
||||
func (report *ReportEventReceiver) setResources(reportObj *reporthandlingv2.PostureReport, allResources map[string]workloadinterface.IMetadata, resourcesSource map[string]string, counter, reportCounter *int, host string) error {
|
||||
func (report *ReportEventReceiver) setResources(reportObj *reporthandlingv2.PostureReport, allResources map[string]workloadinterface.IMetadata, resourcesSource map[string]reporthandling.Source, counter, reportCounter *int, host string) error {
|
||||
for resourceID, v := range allResources {
|
||||
resource := reporthandling.NewResourceIMetadata(v)
|
||||
if r, ok := resourcesSource[resourceID]; ok {
|
||||
resource.SetSource(&reporthandling.Source{Path: r})
|
||||
resource.SetSource(&r)
|
||||
}
|
||||
r, err := json.Marshal(resource)
|
||||
if err != nil {
|
||||
@@ -187,7 +188,7 @@ func (report *ReportEventReceiver) setResources(reportObj *reporthandlingv2.Post
|
||||
return nil
|
||||
}
|
||||
func (report *ReportEventReceiver) sendReport(host string, postureReport *reporthandlingv2.PostureReport, counter int, isLastReport bool) error {
|
||||
postureReport.PaginationInfo = reporthandlingv2.PaginationMarks{
|
||||
postureReport.PaginationInfo = apis.PaginationMarks{
|
||||
ReportNumber: counter,
|
||||
IsLastReport: isLastReport,
|
||||
}
|
||||
|
||||
@@ -11,14 +11,13 @@ import (
|
||||
|
||||
func (report *ReportEventReceiver) initEventReceiverURL() {
|
||||
urlObj := url.URL{}
|
||||
|
||||
urlObj.Scheme = "https"
|
||||
urlObj.Host = getter.GetArmoAPIConnector().GetReportReceiverURL()
|
||||
ParseHost(&urlObj)
|
||||
urlObj.Path = "/k8s/v2/postureReport"
|
||||
|
||||
q := urlObj.Query()
|
||||
q.Add("customerGUID", uuid.MustParse(report.customerGUID).String())
|
||||
q.Add("clusterName", report.clusterName)
|
||||
q.Add("contextName", report.clusterName)
|
||||
q.Add("clusterName", report.clusterName) // deprecated
|
||||
|
||||
urlObj.RawQuery = q.Encode()
|
||||
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package v2
|
||||
|
||||
import (
|
||||
"net/url"
|
||||
"strings"
|
||||
)
|
||||
|
||||
@@ -21,3 +22,13 @@ func maskID(id string) string {
|
||||
|
||||
return strings.TrimSuffix(str, sep)
|
||||
}
|
||||
|
||||
func ParseHost(urlObj *url.URL) {
|
||||
if strings.Contains(urlObj.Host, "http://") {
|
||||
urlObj.Scheme = "http"
|
||||
urlObj.Host = strings.Replace(urlObj.Host, "http://", "", 1)
|
||||
} else {
|
||||
urlObj.Scheme = "https"
|
||||
urlObj.Host = strings.Replace(urlObj.Host, "https://", "", 1)
|
||||
}
|
||||
}
|
||||
|
||||
38
core/pkg/resultshandling/reporter/v2/utils_test.go
Normal file
38
core/pkg/resultshandling/reporter/v2/utils_test.go
Normal file
@@ -0,0 +1,38 @@
|
||||
package v2
|
||||
|
||||
import (
|
||||
"net/url"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestParseHost(t *testing.T) {
|
||||
urlObj := url.URL{}
|
||||
|
||||
urlObj.Host = "http://localhost:7555"
|
||||
ParseHost(&urlObj)
|
||||
assert.Equal(t, "http", urlObj.Scheme)
|
||||
assert.Equal(t, "localhost:7555", urlObj.Host)
|
||||
|
||||
urlObj.Host = "https://localhost:7555"
|
||||
ParseHost(&urlObj)
|
||||
assert.Equal(t, "https", urlObj.Scheme)
|
||||
assert.Equal(t, "localhost:7555", urlObj.Host)
|
||||
|
||||
urlObj.Host = "http://portal-dev.armo.cloud"
|
||||
ParseHost(&urlObj)
|
||||
assert.Equal(t, "http", urlObj.Scheme)
|
||||
assert.Equal(t, "portal-dev.armo.cloud", urlObj.Host)
|
||||
|
||||
urlObj.Host = "https://portal-dev.armo.cloud"
|
||||
ParseHost(&urlObj)
|
||||
assert.Equal(t, "https", urlObj.Scheme)
|
||||
assert.Equal(t, "portal-dev.armo.cloud", urlObj.Host)
|
||||
|
||||
urlObj.Host = "portal-dev.armo.cloud"
|
||||
ParseHost(&urlObj)
|
||||
assert.Equal(t, "https", urlObj.Scheme)
|
||||
assert.Equal(t, "portal-dev.armo.cloud", urlObj.Host)
|
||||
|
||||
}
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
printerv1 "github.com/armosec/kubescape/v2/core/pkg/resultshandling/printer/v1"
|
||||
printerv2 "github.com/armosec/kubescape/v2/core/pkg/resultshandling/printer/v2"
|
||||
"github.com/armosec/kubescape/v2/core/pkg/resultshandling/reporter"
|
||||
reporthandlingv2 "github.com/armosec/opa-utils/reporthandling/v2"
|
||||
)
|
||||
|
||||
type ResultsHandler struct {
|
||||
@@ -52,7 +53,12 @@ func (resultsHandler *ResultsHandler) GetReporter() reporter.IReport {
|
||||
|
||||
// ToJson return results in json format
|
||||
func (resultsHandler *ResultsHandler) ToJson() ([]byte, error) {
|
||||
return json.Marshal(printerv2.DataToJson(resultsHandler.scanData))
|
||||
return json.Marshal(printerv2.FinalizeResults(resultsHandler.scanData))
|
||||
}
|
||||
|
||||
// GetResults return results
|
||||
func (resultsHandler *ResultsHandler) GetResults() *reporthandlingv2.PostureReport {
|
||||
return printerv2.FinalizeResults(resultsHandler.scanData)
|
||||
}
|
||||
|
||||
// HandleResults handle the scan results according to the pre defind interfaces
|
||||
@@ -72,7 +78,7 @@ func (resultsHandler *ResultsHandler) HandleResults() error {
|
||||
}
|
||||
|
||||
// NewPrinter defind output format
|
||||
func NewPrinter(printFormat, formatVersion string, verboseMode bool) printer.IPrinter {
|
||||
func NewPrinter(printFormat, formatVersion string, verboseMode bool, viewType cautils.ViewTypes) printer.IPrinter {
|
||||
|
||||
switch printFormat {
|
||||
case printer.JsonFormat:
|
||||
@@ -89,7 +95,9 @@ func NewPrinter(printFormat, formatVersion string, verboseMode bool) printer.IPr
|
||||
return printerv2.NewPrometheusPrinter(verboseMode)
|
||||
case printer.PdfFormat:
|
||||
return printerv2.NewPdfPrinter()
|
||||
case printer.HtmlFormat:
|
||||
return printerv2.NewHtmlPrinter()
|
||||
default:
|
||||
return printerv2.NewPrettyPrinter(verboseMode, formatVersion)
|
||||
return printerv2.NewPrettyPrinter(verboseMode, formatVersion, viewType)
|
||||
}
|
||||
}
|
||||
|
||||
164
go.mod
164
go.mod
@@ -1,126 +1,164 @@
|
||||
module github.com/armosec/kubescape/v2
|
||||
|
||||
go 1.17
|
||||
go 1.18
|
||||
|
||||
require (
|
||||
github.com/armosec/armoapi-go v0.0.66
|
||||
github.com/armosec/k8s-interface v0.0.68
|
||||
github.com/armosec/opa-utils v0.0.130
|
||||
github.com/armosec/armoapi-go v0.0.97
|
||||
github.com/armosec/go-git-url v0.0.13
|
||||
github.com/armosec/k8s-interface v0.0.78
|
||||
github.com/armosec/opa-utils v0.0.159
|
||||
github.com/armosec/rbac-utils v0.0.14
|
||||
github.com/armosec/utils-go v0.0.3
|
||||
github.com/armosec/utils-k8s-go v0.0.5
|
||||
github.com/armosec/utils-go v0.0.7
|
||||
github.com/armosec/utils-k8s-go v0.0.7
|
||||
github.com/briandowns/spinner v1.18.1
|
||||
github.com/enescakir/emoji v1.0.0
|
||||
github.com/fatih/color v1.13.0
|
||||
github.com/francoispqt/gojay v1.2.13
|
||||
github.com/go-git/go-git/v5 v5.4.2
|
||||
github.com/google/uuid v1.3.0
|
||||
github.com/johnfercher/maroto v0.36.1
|
||||
github.com/johnfercher/maroto v0.37.0
|
||||
github.com/mattn/go-isatty v0.0.14
|
||||
github.com/olekukonko/tablewriter v0.0.5
|
||||
github.com/open-policy-agent/opa v0.39.0
|
||||
github.com/open-policy-agent/opa v0.41.0
|
||||
github.com/spf13/cobra v1.4.0
|
||||
github.com/stretchr/testify v1.7.1
|
||||
github.com/whilp/git-urls v1.0.0
|
||||
go.uber.org/zap v1.21.0
|
||||
golang.org/x/mod v0.5.1
|
||||
golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3
|
||||
gopkg.in/yaml.v2 v2.4.0
|
||||
k8s.io/api v0.23.5
|
||||
k8s.io/apimachinery v0.23.5
|
||||
k8s.io/client-go v0.23.5
|
||||
helm.sh/helm/v3 v3.9.0
|
||||
k8s.io/api v0.24.2
|
||||
k8s.io/apimachinery v0.24.2
|
||||
k8s.io/client-go v0.24.2
|
||||
k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9
|
||||
sigs.k8s.io/yaml v1.3.0
|
||||
)
|
||||
|
||||
require (
|
||||
cloud.google.com/go v0.99.0 // indirect
|
||||
cloud.google.com/go/container v1.0.0 // indirect
|
||||
cloud.google.com/go/compute v1.7.0 // indirect
|
||||
cloud.google.com/go/container v1.2.0 // indirect
|
||||
github.com/Azure/azure-sdk-for-go v65.0.0+incompatible // indirect
|
||||
github.com/Azure/go-autorest v14.2.0+incompatible // indirect
|
||||
github.com/Azure/go-autorest/autorest v0.11.24 // indirect
|
||||
github.com/Azure/go-autorest/autorest/adal v0.9.18 // indirect
|
||||
github.com/Azure/go-autorest/autorest v0.11.27 // indirect
|
||||
github.com/Azure/go-autorest/autorest/adal v0.9.20 // indirect
|
||||
github.com/Azure/go-autorest/autorest/azure/auth v0.5.11 // indirect
|
||||
github.com/Azure/go-autorest/autorest/azure/cli v0.4.5 // indirect
|
||||
github.com/Azure/go-autorest/autorest/date v0.3.0 // indirect
|
||||
github.com/Azure/go-autorest/autorest/to v0.4.0 // indirect
|
||||
github.com/Azure/go-autorest/autorest/validation v0.3.1 // indirect
|
||||
github.com/Azure/go-autorest/logger v0.2.1 // indirect
|
||||
github.com/Azure/go-autorest/tracing v0.6.0 // indirect
|
||||
github.com/BurntSushi/toml v1.0.0 // indirect
|
||||
github.com/Masterminds/goutils v1.1.1 // indirect
|
||||
github.com/Masterminds/semver/v3 v3.1.1 // indirect
|
||||
github.com/Masterminds/sprig/v3 v3.2.2 // indirect
|
||||
github.com/Microsoft/go-winio v0.5.2 // indirect
|
||||
github.com/OneOfOne/xxhash v1.2.8 // indirect
|
||||
github.com/aws/aws-sdk-go v1.41.11 // indirect
|
||||
github.com/aws/aws-sdk-go-v2 v1.12.0 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/config v1.12.0 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.7.0 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.9.0 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.3 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.1.0 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.3.3 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/eks v1.17.0 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.6.0 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.8.0 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.13.0 // indirect
|
||||
github.com/aws/smithy-go v1.9.1 // indirect
|
||||
github.com/ProtonMail/go-crypto v0.0.0-20220517143526-88bb52951d5b // indirect
|
||||
github.com/acomagu/bufpipe v1.0.3 // indirect
|
||||
github.com/agnivade/levenshtein v1.1.1 // indirect
|
||||
github.com/aws/aws-sdk-go v1.44.36 // indirect
|
||||
github.com/aws/aws-sdk-go-v2 v1.16.5 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/config v1.15.11 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.12.6 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.6 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.12 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.6 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.3.13 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/eks v1.21.2 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.6 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.11.9 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.16.7 // indirect
|
||||
github.com/aws/smithy-go v1.11.3 // indirect
|
||||
github.com/boombuler/barcode v1.0.1 // indirect
|
||||
github.com/census-instrumentation/opencensus-proto v0.3.0 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.1.2 // indirect
|
||||
github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4 // indirect
|
||||
github.com/cncf/xds/go v0.0.0-20211130200136-a8f946100490 // indirect
|
||||
github.com/coreos/go-oidc v2.2.1+incompatible // indirect
|
||||
github.com/cyphar/filepath-securejoin v0.2.3 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/docker/docker v20.10.9+incompatible // indirect
|
||||
github.com/dimchansky/utfbom v1.1.1 // indirect
|
||||
github.com/docker/docker v20.10.17+incompatible // indirect
|
||||
github.com/docker/go-connections v0.4.0 // indirect
|
||||
github.com/docker/go-units v0.4.0 // indirect
|
||||
github.com/envoyproxy/go-control-plane v0.10.1 // indirect
|
||||
github.com/envoyproxy/protoc-gen-validate v0.6.2 // indirect
|
||||
github.com/emicklei/go-restful/v3 v3.8.0 // indirect
|
||||
github.com/emirpasic/gods v1.18.1 // indirect
|
||||
github.com/ghodss/yaml v1.0.0 // indirect
|
||||
github.com/go-git/gcfg v1.5.0 // indirect
|
||||
github.com/go-git/go-billy/v5 v5.3.1 // indirect
|
||||
github.com/go-gota/gota v0.12.0 // indirect
|
||||
github.com/go-logr/logr v1.2.3 // indirect
|
||||
github.com/go-openapi/jsonpointer v0.19.5 // indirect
|
||||
github.com/go-openapi/jsonreference v0.20.0 // indirect
|
||||
github.com/go-openapi/swag v0.21.1 // indirect
|
||||
github.com/gobwas/glob v0.2.3 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang-jwt/jwt/v4 v4.2.0 // indirect
|
||||
github.com/golang-jwt/jwt/v4 v4.4.1 // indirect
|
||||
github.com/golang/glog v1.0.0 // indirect
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
||||
github.com/golang/protobuf v1.5.2 // indirect
|
||||
github.com/google/go-cmp v0.5.7 // indirect
|
||||
github.com/google/gofuzz v1.1.0 // indirect
|
||||
github.com/googleapis/gax-go/v2 v2.1.1 // indirect
|
||||
github.com/googleapis/gnostic v0.5.5 // indirect
|
||||
github.com/imdario/mergo v0.3.12 // indirect
|
||||
github.com/google/gnostic v0.6.9 // indirect
|
||||
github.com/google/go-cmp v0.5.8 // indirect
|
||||
github.com/google/gofuzz v1.2.0 // indirect
|
||||
github.com/googleapis/enterprise-certificate-proxy v0.1.0 // indirect
|
||||
github.com/googleapis/gax-go/v2 v2.4.0 // indirect
|
||||
github.com/huandu/xstrings v1.3.2 // indirect
|
||||
github.com/imdario/mergo v0.3.13 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.0.0 // indirect
|
||||
github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 // indirect
|
||||
github.com/jmespath/go-jmespath v0.4.0 // indirect
|
||||
github.com/josharian/intern v1.0.0 // indirect
|
||||
github.com/json-iterator/go v1.1.12 // indirect
|
||||
github.com/jung-kurt/gofpdf v1.16.2 // indirect
|
||||
github.com/kevinburke/ssh_config v1.2.0 // indirect
|
||||
github.com/mailru/easyjson v0.7.7 // indirect
|
||||
github.com/mattn/go-colorable v0.1.12 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.9 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.13 // indirect
|
||||
github.com/mitchellh/copystructure v1.2.0 // indirect
|
||||
github.com/mitchellh/go-homedir v1.1.0 // indirect
|
||||
github.com/mitchellh/reflectwalk v1.0.2 // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
||||
github.com/opencontainers/go-digest v1.0.0 // indirect
|
||||
github.com/opencontainers/image-spec v1.0.2 // indirect
|
||||
github.com/opencontainers/image-spec v1.0.3-0.20211202183452-c5a74bcca799 // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/pquerna/cachecontrol v0.1.0 // indirect
|
||||
github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0 // indirect
|
||||
github.com/ruudk/golang-pdf417 v0.0.0-20181029194003-1af4ab5afa58 // indirect
|
||||
github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 // indirect
|
||||
github.com/rivo/uniseg v0.2.0 // indirect
|
||||
github.com/ruudk/golang-pdf417 v0.0.0-20201230142125-a7e3863a1245 // indirect
|
||||
github.com/sergi/go-diff v1.2.0 // indirect
|
||||
github.com/shopspring/decimal v1.2.0 // indirect
|
||||
github.com/spf13/cast v1.4.1 // indirect
|
||||
github.com/spf13/pflag v1.0.5 // indirect
|
||||
github.com/vektah/gqlparser/v2 v2.4.5 // indirect
|
||||
github.com/xanzy/ssh-agent v0.3.1 // indirect
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect
|
||||
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
|
||||
github.com/xeipuuv/gojsonschema v1.2.0 // indirect
|
||||
github.com/yashtewari/glob-intersection v0.1.0 // indirect
|
||||
go.opencensus.io v0.23.0 // indirect
|
||||
go.uber.org/atomic v1.7.0 // indirect
|
||||
go.uber.org/multierr v1.6.0 // indirect
|
||||
golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3 // indirect
|
||||
golang.org/x/net v0.0.0-20211209124913-491a49abca63 // indirect
|
||||
golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8 // indirect
|
||||
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 // indirect
|
||||
golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b // indirect
|
||||
go.uber.org/atomic v1.9.0 // indirect
|
||||
go.uber.org/multierr v1.8.0 // indirect
|
||||
golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e // indirect
|
||||
golang.org/x/net v0.0.0-20220615171555-694bf12d69de // indirect
|
||||
golang.org/x/oauth2 v0.0.0-20220608161450-d0670ef3b1eb // indirect
|
||||
golang.org/x/sys v0.0.0-20220615213510-4f61da869c0c // indirect
|
||||
golang.org/x/term v0.0.0-20220526004731-065cf7ba2467 // indirect
|
||||
golang.org/x/text v0.3.7 // indirect
|
||||
golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac // indirect
|
||||
gonum.org/v1/gonum v0.9.1 // indirect
|
||||
google.golang.org/api v0.62.0 // indirect
|
||||
golang.org/x/time v0.0.0-20220609170525-579cf78fd858 // indirect
|
||||
gonum.org/v1/gonum v0.11.0 // indirect
|
||||
google.golang.org/api v0.84.0 // indirect
|
||||
google.golang.org/appengine v1.6.7 // indirect
|
||||
google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa // indirect
|
||||
google.golang.org/grpc v1.45.0 // indirect
|
||||
google.golang.org/genproto v0.0.0-20220617124728-180714bec0ad // indirect
|
||||
google.golang.org/grpc v1.47.0 // indirect
|
||||
google.golang.org/protobuf v1.28.0 // indirect
|
||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||
gopkg.in/square/go-jose.v2 v2.6.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect
|
||||
k8s.io/klog/v2 v2.30.0 // indirect
|
||||
k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65 // indirect
|
||||
sigs.k8s.io/controller-runtime v0.11.1 // indirect
|
||||
sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6 // indirect
|
||||
gopkg.in/warnings.v0 v0.1.2 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
k8s.io/apiextensions-apiserver v0.24.0 // indirect
|
||||
k8s.io/klog/v2 v2.60.1 // indirect
|
||||
k8s.io/kube-openapi v0.0.0-20220614142933-1062c7ade5f8 // indirect
|
||||
sigs.k8s.io/controller-runtime v0.12.1 // indirect
|
||||
sigs.k8s.io/json v0.0.0-20220525155127-227cbc7cc124 // indirect
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.2.1 // indirect
|
||||
)
|
||||
|
||||
@@ -1,47 +1,171 @@
|
||||
# Kubescape HTTP Handler Package
|
||||
|
||||
> This is a beta version, we might make some changes before publishing the official Prometheus support
|
||||
Running `kubescape` will start up a webserver on port `8080` which will serve the following API's:
|
||||
|
||||
Running `kubescape` will start up a webserver on port `8080` which will serve the following paths:
|
||||
### Trigger scan
|
||||
|
||||
* POST `/v1/scan` - Trigger a kubescape scan. The server will return an ID and will execute the scanning asynchronously
|
||||
* * `wait`: scan synchronously (return results and not ID). Use only in small clusters are with an increased timeout
|
||||
* GET `/v1/results` - Request kubescape scan results
|
||||
* * query `id=<string>` -> ID returned when triggering the scan action. ~If empty will return latest results~ (not supported)
|
||||
* * query `remove` -> Remove results from storage after reading the results
|
||||
* DELETE `/v1/results` - Delete kubescape scan results from storage. ~If empty will delete latest results~ (not supported)
|
||||
* POST `/v1/scan` - trigger a kubescape scan. The server will return an ID and will execute the scanning asynchronously. the request body should look [as followed](#trigger-scan-object).
|
||||
* * `wait=true`: scan synchronously (return results and not ID). Use only in small clusters or with an increased timeout. default is `wait=false`
|
||||
* * `keep=true`: do not delete results from local storage after returning. default is `keep=false`
|
||||
* POST `/v1/metrics` - trigger kubescape for Prometheus support. [read more](examples/prometheus/README.md)
|
||||
|
||||
[Response](#response-object):
|
||||
|
||||
```
|
||||
{
|
||||
"id": <str>, // scan ID
|
||||
"type": "busy", // response object type
|
||||
"response": <message:string> // message indicating scanning is still in process
|
||||
}
|
||||
```
|
||||
|
||||
> When scanning was triggered with the `wait=true` query param, the response is like the [`/v1/results` API](#get-results) response
|
||||
|
||||
### Get results
|
||||
* GET `/v1/results` - request kubescape scan results
|
||||
* * query `id=<string>` -> request results of a specific scan ID. If empty will return latest results
|
||||
* * query `keep=true` -> keep the results in the local storage after returning. default is `keep=false` - the results will be deleted from local storage after they are returned
|
||||
|
||||
[Response](#response-object):
|
||||
|
||||
When scanning was done successfully
|
||||
```
|
||||
{
|
||||
"id": <str>, // scan ID
|
||||
"type": "v1results", // response object type
|
||||
"response": <object:v1results> // v1 results payload
|
||||
}
|
||||
```
|
||||
|
||||
When scanning failed
|
||||
```
|
||||
{
|
||||
"id": <str>, // scan ID
|
||||
"type": "error", // response object type
|
||||
"response": <error:string> // error string
|
||||
}
|
||||
```
|
||||
|
||||
When scanning is in progress
|
||||
```
|
||||
{
|
||||
"id": <str>, // scan ID
|
||||
"type": "busy", // response object type
|
||||
"response": <message:string> // message indicating scanning is still in process
|
||||
}
|
||||
```
|
||||
### Check scanning progress status
|
||||
Check the scanning status - is the scanning in progress or done. This is meant for a waiting mechanize since the API does not return the entire results object when the scanning is done
|
||||
|
||||
* GET `/v1/status` - Request kubescape scan status
|
||||
* * query `id=<string>` -> Check status of a specific scan. If empty will check if any scan is in progress
|
||||
|
||||
[Response](#response-object):
|
||||
|
||||
When scanning is in progress
|
||||
```
|
||||
{
|
||||
"id": <str>, // scan ID
|
||||
"type": "busy", // response object type
|
||||
"response": <message:string> // message indicating scanning is still in process
|
||||
}
|
||||
```
|
||||
|
||||
When scanning is not in progress
|
||||
```
|
||||
{
|
||||
"id": <str>, // scan ID
|
||||
"type": "notBusy", // response object type
|
||||
"response": <message:string> // message indicating scanning is done in process
|
||||
}
|
||||
```
|
||||
|
||||
### Delete cached results
|
||||
* DELETE `/v1/results` - Delete kubescape scan results from storage. If empty will delete latest results
|
||||
* * query `id=<string>`: Delete ID of specific results
|
||||
* * query `all`: Delete all cached results
|
||||
|
||||
### Prometheus support API
|
||||
|
||||
* GET/POST `/v1/metrics` - will trigger cluster scan. will respond with prometheus metrics once they have been scanned. This will respond 503 if the scan failed.
|
||||
* `/livez` - will respond 200 is server is alive
|
||||
* `/readyz` - will respond 200 if server can receive requests
|
||||
|
||||
## Trigger Kubescape scan
|
||||
## Objects
|
||||
|
||||
### Trigger scan object
|
||||
|
||||
POST /v1/results
|
||||
body:
|
||||
```
|
||||
{
|
||||
"format": <str>, // results format [default: json] (same as 'kubescape scan --format')
|
||||
"excludedNamespaces": <[]str>, // list of namespaces to exclude (same as 'kubescape scan --excluded-namespaces')
|
||||
"includeNamespaces": <[]str>, // list of namespaces to include (same as 'kubescape scan --include-namespaces')
|
||||
"useCachedArtifacts"`: <bool>, // use the cached artifacts instead of downloading (offline support)
|
||||
"submit": <bool>, // submit results to Kubescape cloud (same as 'kubescape scan --submit')
|
||||
"hostScanner": <bool>, // deploy kubescape K8s host-scanner DaemonSet in the scanned cluster (same as 'kubescape scan --enable-host-scan')
|
||||
"keepLocal": <bool>, // do not submit results to Kubescape cloud (same as 'kubescape scan --keep-local')
|
||||
"account": <str> // account ID (same as 'kubescape scan --account')
|
||||
"format": <str>, // results format [default: json] (same as 'kubescape scan --format')
|
||||
"excludedNamespaces": [<str>], // list of namespaces to exclude (same as 'kubescape scan --excluded-namespaces')
|
||||
"includeNamespaces": [<str>], // list of namespaces to include (same as 'kubescape scan --include-namespaces')
|
||||
"useCachedArtifacts"`: <bool>, // use the cached artifacts instead of downloading (offline support)
|
||||
"submit": <bool>, // submit results to Kubescape cloud (same as 'kubescape scan --submit')
|
||||
"hostScanner": <bool>, // deploy kubescape K8s host-scanner DaemonSet in the scanned cluster (same as 'kubescape scan --enable-host-scan')
|
||||
"keepLocal": <bool>, // do not submit results to Kubescape cloud (same as 'kubescape scan --keep-local')
|
||||
"account": <str>, // account ID (same as 'kubescape scan --account')
|
||||
"targetType": <str>, // framework/control
|
||||
"targetNames": [<str>] // names. e.g. when targetType==framework, targetNames=["nsa", "mitre"]
|
||||
}
|
||||
```
|
||||
|
||||
e.g.:
|
||||
### Response object
|
||||
|
||||
```
|
||||
{
|
||||
"id": <str>, // scan ID
|
||||
"type": <responseType:str>, // response object type
|
||||
"response": <object:interface> // response payload as list of bytes
|
||||
}
|
||||
```
|
||||
#### Response object types
|
||||
|
||||
* "v1results" - v1 results object
|
||||
* "busy" - server is busy processing previous requests
|
||||
* "notBusy" - server is not busy processing previous requests
|
||||
* "ready" - server is done processing request and results are ready
|
||||
* "error" - error object
|
||||
|
||||
## API Examples
|
||||
#### Default scan
|
||||
|
||||
1. Trigger kubescape scan
|
||||
```bash
|
||||
curl --header "Content-Type: application/json" --request POST --data '{"hostScanner":true, "submit": true}' http://127.0.0.1:8080/v1/scan
|
||||
```
|
||||
|
||||
2. Get kubescape scan results
|
||||
```bash
|
||||
curl --request GET http://127.0.0.1:8080/v1/results -o response.json
|
||||
```
|
||||
|
||||
#### Trigger scan and wait for scan to end
|
||||
|
||||
```bash
|
||||
curl --header "Content-Type: application/json" --request POST --data '{"hostScanner":true, "submit": true}' http://127.0.0.1:8080/v1/scan?wait -o scan_results.json
|
||||
```
|
||||
#### Scan single namespace with a specific framework
|
||||
```bash
|
||||
curl --header "Content-Type: application/json" \
|
||||
--request POST \
|
||||
--data '{"hostScanner":true, "submit":true}' \
|
||||
--data '{"hostScanner":true, "submit":true, "includeNamespaces": ["ks-scanner"], "targetType": "framework", "targetNames": ["nsa"] }' \
|
||||
http://127.0.0.1:8080/v1/scan
|
||||
```
|
||||
|
||||
## Examples
|
||||
|
||||
* [Prometheus](examples/prometheus/README.md)
|
||||
* [Microservice](examples/microservice/README.md)
|
||||
|
||||
|
||||
## Supported environment variables
|
||||
|
||||
* `KS_ACCOUNT`: Account ID
|
||||
* `KS_SUBMIT`: Submit the results to Kubescape SaaS version
|
||||
* `KS_EXCLUDE_NAMESPACES`: List of namespaces to exclude, e.g. `KS_EXCLUDE_NAMESPACES=kube-system,kube-public`
|
||||
* `KS_INCLUDE_NAMESPACES`: List of namespaces to include, rest of the namespaces will be ignored. e.g. `KS_INCLUDE_NAMESPACES=dev,prod`
|
||||
* `KS_HOST_SCAN_YAML`: Full path to the host scanner YAML
|
||||
* `KS_FORMAT`: Output file format. default is json
|
||||
* `KS_ENABLE_HOST_SCANNER`: Enable the host scanner feature
|
||||
* `KS_DOWNLOAD_ARTIFACTS`: Download the artifacts every scan
|
||||
|
||||
@@ -43,10 +43,10 @@ subjects:
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: kubescape-service
|
||||
name: kubescape
|
||||
namespace: ks-scanner
|
||||
labels:
|
||||
app: kubescape-service
|
||||
app: kubescape
|
||||
spec:
|
||||
type: NodePort
|
||||
ports:
|
||||
@@ -89,13 +89,20 @@ spec:
|
||||
port: 8080
|
||||
initialDelaySeconds: 3
|
||||
periodSeconds: 3
|
||||
image: quay.io/armosec/kubescape:prometheus.v2
|
||||
image: quay.io/armosec/kubescape:latest
|
||||
imagePullPolicy: Always
|
||||
env:
|
||||
- name: KS_DEFAULT_CONFIGMAP_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
apiVersion: v1
|
||||
fieldPath: metadata.namespace
|
||||
- name: "KS_SKIP_UPDATE_CHECK" # do not check latest version
|
||||
value: "true"
|
||||
- name: KS_ENABLE_HOST_SCANNER # enable host scanner -> https://hub.armo.cloud/docs/host-sensor
|
||||
value: "true"
|
||||
- name: KS_DOWNLOAD_ARTIFACTS # When set to true the artifacts will be downloaded every scan execution
|
||||
value: "true"
|
||||
ports:
|
||||
- containerPort: 8080
|
||||
name: http
|
||||
|
||||
@@ -110,13 +110,4 @@ kubescape_control_count_resources_excluded{name="<control name>",url="<docs url>
|
||||
kubescape_control_count_resources_passed{name="<control name>",url="<docs url>",severity="<control severity>"} <counter>
|
||||
```
|
||||
|
||||
#### Resources metrics
|
||||
The resources metrics give you the ability to prioritize fixing the resources by the number of controls that failed
|
||||
|
||||
```
|
||||
# Number of controls that failed for this particular resource
|
||||
kubescape_resource_count_controls_failed{apiVersion="<>",kind="<>",namespace="<>",name="<>"} <counter>
|
||||
|
||||
# Number of controls that where excluded for this particular resource
|
||||
kubescape_resource_count_controls_excluded{apiVersion="<>",kind="<>",namespace="<>",name="<>"} <counter>
|
||||
```
|
||||
|
||||
542
httphandler/examples/prometheus/grafana-kubescape-dashboard.json
Normal file
542
httphandler/examples/prometheus/grafana-kubescape-dashboard.json
Normal file
@@ -0,0 +1,542 @@
|
||||
{
|
||||
"annotations": {
|
||||
"list": [
|
||||
{
|
||||
"builtIn": 1,
|
||||
"datasource": {
|
||||
"type": "grafana",
|
||||
"uid": "-- Grafana --"
|
||||
},
|
||||
"enable": true,
|
||||
"hide": true,
|
||||
"iconColor": "rgba(0, 211, 255, 1)",
|
||||
"name": "Annotations & Alerts",
|
||||
"target": {
|
||||
"limit": 100,
|
||||
"matchAny": false,
|
||||
"tags": [],
|
||||
"type": "dashboard"
|
||||
},
|
||||
"type": "dashboard"
|
||||
}
|
||||
]
|
||||
},
|
||||
"editable": true,
|
||||
"fiscalYearStartMonth": 0,
|
||||
"graphTooltip": 0,
|
||||
"id": 27,
|
||||
"links": [],
|
||||
"liveNow": false,
|
||||
"panels": [
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus",
|
||||
"uid": "prometheus"
|
||||
},
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"color": {
|
||||
"mode": "palette-classic"
|
||||
},
|
||||
"custom": {
|
||||
"axisLabel": "",
|
||||
"axisPlacement": "auto",
|
||||
"barAlignment": 0,
|
||||
"drawStyle": "line",
|
||||
"fillOpacity": 13,
|
||||
"gradientMode": "none",
|
||||
"hideFrom": {
|
||||
"legend": false,
|
||||
"tooltip": false,
|
||||
"viz": false
|
||||
},
|
||||
"lineInterpolation": "linear",
|
||||
"lineWidth": 2,
|
||||
"pointSize": 4,
|
||||
"scaleDistribution": {
|
||||
"type": "linear"
|
||||
},
|
||||
"showPoints": "auto",
|
||||
"spanNulls": false,
|
||||
"stacking": {
|
||||
"group": "A",
|
||||
"mode": "none"
|
||||
},
|
||||
"thresholdsStyle": {
|
||||
"mode": "off"
|
||||
}
|
||||
},
|
||||
"mappings": [],
|
||||
"thresholds": {
|
||||
"mode": "absolute",
|
||||
"steps": [
|
||||
{
|
||||
"color": "green",
|
||||
"value": null
|
||||
},
|
||||
{
|
||||
"color": "red",
|
||||
"value": 80
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
"overrides": []
|
||||
},
|
||||
"gridPos": {
|
||||
"h": 8,
|
||||
"w": 24,
|
||||
"x": 0,
|
||||
"y": 0
|
||||
},
|
||||
"id": 2,
|
||||
"options": {
|
||||
"legend": {
|
||||
"calcs": [],
|
||||
"displayMode": "hidden",
|
||||
"placement": "bottom"
|
||||
},
|
||||
"tooltip": {
|
||||
"mode": "single",
|
||||
"sort": "none"
|
||||
}
|
||||
},
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus",
|
||||
"uid": "prometheus"
|
||||
},
|
||||
"expr": "kubescape_cluster_riskScore",
|
||||
"refId": "A"
|
||||
}
|
||||
],
|
||||
"title": "Overtime Cluster Risk score",
|
||||
"transparent": true,
|
||||
"type": "timeseries"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus",
|
||||
"uid": "prometheus"
|
||||
},
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"color": {
|
||||
"mode": "palette-classic"
|
||||
},
|
||||
"custom": {
|
||||
"axisLabel": "",
|
||||
"axisPlacement": "auto",
|
||||
"barAlignment": 0,
|
||||
"drawStyle": "points",
|
||||
"fillOpacity": 10,
|
||||
"gradientMode": "none",
|
||||
"hideFrom": {
|
||||
"legend": false,
|
||||
"tooltip": false,
|
||||
"viz": false
|
||||
},
|
||||
"lineInterpolation": "linear",
|
||||
"lineWidth": 1,
|
||||
"pointSize": 4,
|
||||
"scaleDistribution": {
|
||||
"type": "linear"
|
||||
},
|
||||
"showPoints": "never",
|
||||
"spanNulls": true,
|
||||
"stacking": {
|
||||
"group": "A",
|
||||
"mode": "none"
|
||||
},
|
||||
"thresholdsStyle": {
|
||||
"mode": "off"
|
||||
}
|
||||
},
|
||||
"mappings": [],
|
||||
"thresholds": {
|
||||
"mode": "absolute",
|
||||
"steps": [
|
||||
{
|
||||
"color": "green",
|
||||
"value": null
|
||||
},
|
||||
{
|
||||
"color": "red",
|
||||
"value": 80
|
||||
}
|
||||
]
|
||||
},
|
||||
"unit": "short"
|
||||
},
|
||||
"overrides": []
|
||||
},
|
||||
"gridPos": {
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 0,
|
||||
"y": 8
|
||||
},
|
||||
"id": 7,
|
||||
"options": {
|
||||
"legend": {
|
||||
"calcs": [],
|
||||
"displayMode": "list",
|
||||
"placement": "bottom"
|
||||
},
|
||||
"tooltip": {
|
||||
"mode": "multi",
|
||||
"sort": "none"
|
||||
}
|
||||
},
|
||||
"pluginVersion": "8.5.0",
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus",
|
||||
"uid": "prometheus"
|
||||
},
|
||||
"editorMode": "code",
|
||||
"exemplar": false,
|
||||
"expr": "sum (kubescape_control_riskScore) by (name)",
|
||||
"instant": false,
|
||||
"interval": "",
|
||||
"legendFormat": "{{link}}",
|
||||
"range": true,
|
||||
"refId": "A"
|
||||
}
|
||||
],
|
||||
"title": "Overtime Controls Risk score ",
|
||||
"type": "timeseries"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus",
|
||||
"uid": "prometheus"
|
||||
},
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"color": {
|
||||
"mode": "palette-classic"
|
||||
},
|
||||
"custom": {
|
||||
"axisLabel": "",
|
||||
"axisPlacement": "auto",
|
||||
"barAlignment": 0,
|
||||
"drawStyle": "line",
|
||||
"fillOpacity": 0,
|
||||
"gradientMode": "none",
|
||||
"hideFrom": {
|
||||
"legend": false,
|
||||
"tooltip": false,
|
||||
"viz": false
|
||||
},
|
||||
"lineInterpolation": "linear",
|
||||
"lineWidth": 1,
|
||||
"pointSize": 5,
|
||||
"scaleDistribution": {
|
||||
"type": "linear"
|
||||
},
|
||||
"showPoints": "auto",
|
||||
"spanNulls": false,
|
||||
"stacking": {
|
||||
"group": "A",
|
||||
"mode": "none"
|
||||
},
|
||||
"thresholdsStyle": {
|
||||
"mode": "off"
|
||||
}
|
||||
},
|
||||
"mappings": [],
|
||||
"thresholds": {
|
||||
"mode": "absolute",
|
||||
"steps": [
|
||||
{
|
||||
"color": "green",
|
||||
"value": null
|
||||
},
|
||||
{
|
||||
"color": "red",
|
||||
"value": 80
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
"overrides": [
|
||||
{
|
||||
"__systemRef": "hideSeriesFrom",
|
||||
"matcher": {
|
||||
"id": "byNames",
|
||||
"options": {
|
||||
"mode": "exclude",
|
||||
"names": [
|
||||
"{__name__=\"kubescape_framework_riskScore\", container=\"kubescape\", endpoint=\"http\", instance=\"172.17.0.9:8080\", job=\"armo-kubescape\", name=\"DevOpsBest\", namespace=\"armo-system\", pod=\"armo-kubescape-66555d4db6-wznwg\", service=\"armo-kubescape\"}"
|
||||
],
|
||||
"prefix": "All except:",
|
||||
"readOnly": true
|
||||
}
|
||||
},
|
||||
"properties": [
|
||||
{
|
||||
"id": "custom.hideFrom",
|
||||
"value": {
|
||||
"legend": false,
|
||||
"tooltip": false,
|
||||
"viz": false
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
"gridPos": {
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 12,
|
||||
"y": 8
|
||||
},
|
||||
"id": 5,
|
||||
"options": {
|
||||
"legend": {
|
||||
"calcs": [],
|
||||
"displayMode": "list",
|
||||
"placement": "bottom"
|
||||
},
|
||||
"tooltip": {
|
||||
"mode": "single",
|
||||
"sort": "none"
|
||||
}
|
||||
},
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus",
|
||||
"uid": "prometheus"
|
||||
},
|
||||
"editorMode": "code",
|
||||
"exemplar": false,
|
||||
"expr": "sum(kubescape_framework_riskScore) by (name)",
|
||||
"hide": false,
|
||||
"instant": true,
|
||||
"legendFormat": "{{name}}",
|
||||
"range": false,
|
||||
"refId": "A"
|
||||
}
|
||||
],
|
||||
"title": "Overtime Frameworks Risk score",
|
||||
"type": "timeseries"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus",
|
||||
"uid": "prometheus"
|
||||
},
|
||||
"description": "Number of Resources that failed/passed or excluded",
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"color": {
|
||||
"mode": "palette-classic"
|
||||
},
|
||||
"custom": {
|
||||
"hideFrom": {
|
||||
"legend": false,
|
||||
"tooltip": false,
|
||||
"viz": false
|
||||
}
|
||||
},
|
||||
"mappings": []
|
||||
},
|
||||
"overrides": []
|
||||
},
|
||||
"gridPos": {
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 0,
|
||||
"y": 16
|
||||
},
|
||||
"id": 4,
|
||||
"options": {
|
||||
"displayLabels": [
|
||||
"percent"
|
||||
],
|
||||
"legend": {
|
||||
"displayMode": "list",
|
||||
"placement": "right",
|
||||
"values": []
|
||||
},
|
||||
"pieType": "pie",
|
||||
"reduceOptions": {
|
||||
"calcs": [
|
||||
"lastNotNull"
|
||||
],
|
||||
"fields": "",
|
||||
"values": false
|
||||
},
|
||||
"tooltip": {
|
||||
"mode": "multi",
|
||||
"sort": "none"
|
||||
}
|
||||
},
|
||||
"pluginVersion": "8.5.0",
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus",
|
||||
"uid": "prometheus"
|
||||
},
|
||||
"editorMode": "code",
|
||||
"expr": "sum(kubescape_control_count_resources_passed)",
|
||||
"hide": false,
|
||||
"legendFormat": "passed",
|
||||
"range": true,
|
||||
"refId": "C"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus",
|
||||
"uid": "prometheus"
|
||||
},
|
||||
"editorMode": "code",
|
||||
"exemplar": false,
|
||||
"expr": "sum(kubescape_control_count_resources_failed)",
|
||||
"hide": false,
|
||||
"instant": true,
|
||||
"legendFormat": "failed",
|
||||
"range": false,
|
||||
"refId": "A"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus",
|
||||
"uid": "prometheus"
|
||||
},
|
||||
"editorMode": "code",
|
||||
"exemplar": false,
|
||||
"expr": "sum(kubescape_control_count_resources_exclude)",
|
||||
"hide": false,
|
||||
"instant": false,
|
||||
"legendFormat": "exclude",
|
||||
"range": true,
|
||||
"refId": "B"
|
||||
}
|
||||
],
|
||||
"title": "Resources by Status",
|
||||
"type": "piechart"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus",
|
||||
"uid": "prometheus"
|
||||
},
|
||||
"description": "Number of controls that failed/passed or excluded\n",
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"color": {
|
||||
"mode": "palette-classic"
|
||||
},
|
||||
"custom": {
|
||||
"hideFrom": {
|
||||
"legend": false,
|
||||
"tooltip": false,
|
||||
"viz": false
|
||||
}
|
||||
},
|
||||
"mappings": []
|
||||
},
|
||||
"overrides": []
|
||||
},
|
||||
"gridPos": {
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 12,
|
||||
"y": 16
|
||||
},
|
||||
"id": 8,
|
||||
"options": {
|
||||
"displayLabels": [
|
||||
"percent"
|
||||
],
|
||||
"legend": {
|
||||
"displayMode": "list",
|
||||
"placement": "right",
|
||||
"values": []
|
||||
},
|
||||
"pieType": "pie",
|
||||
"reduceOptions": {
|
||||
"calcs": [
|
||||
"lastNotNull"
|
||||
],
|
||||
"fields": "",
|
||||
"values": false
|
||||
},
|
||||
"tooltip": {
|
||||
"mode": "single",
|
||||
"sort": "none"
|
||||
}
|
||||
},
|
||||
"pluginVersion": "8.5.0",
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus",
|
||||
"uid": "prometheus"
|
||||
},
|
||||
"editorMode": "code",
|
||||
"exemplar": false,
|
||||
"expr": "kubescape_cluster_count_control_failed",
|
||||
"hide": false,
|
||||
"instant": true,
|
||||
"legendFormat": "failed",
|
||||
"range": false,
|
||||
"refId": "A"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus",
|
||||
"uid": "prometheus"
|
||||
},
|
||||
"editorMode": "code",
|
||||
"exemplar": false,
|
||||
"expr": "kubescape_cluster_count_control_exclude",
|
||||
"hide": false,
|
||||
"instant": false,
|
||||
"legendFormat": "exclude",
|
||||
"range": true,
|
||||
"refId": "B"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus",
|
||||
"uid": "prometheus"
|
||||
},
|
||||
"editorMode": "code",
|
||||
"exemplar": false,
|
||||
"expr": "kubescape_cluster_count_control_passed",
|
||||
"hide": false,
|
||||
"instant": false,
|
||||
"legendFormat": "passed",
|
||||
"range": true,
|
||||
"refId": "C"
|
||||
}
|
||||
],
|
||||
"title": "Controls by Status",
|
||||
"type": "piechart"
|
||||
}
|
||||
],
|
||||
"schemaVersion": 36,
|
||||
"style": "dark",
|
||||
"tags": [],
|
||||
"templating": {
|
||||
"list": []
|
||||
},
|
||||
"time": {
|
||||
"from": "now-6h",
|
||||
"to": "now"
|
||||
},
|
||||
"timepicker": {},
|
||||
"timezone": "",
|
||||
"title": "Kubescape",
|
||||
"uid": "SwdcJornz",
|
||||
"version": 7,
|
||||
"weekStart": ""
|
||||
}
|
||||
@@ -43,12 +43,12 @@ subjects:
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: kubescape-service
|
||||
name: kubescape
|
||||
namespace: ks-scanner
|
||||
labels:
|
||||
app: kubescape-service
|
||||
app: kubescape
|
||||
spec:
|
||||
type: NodePort
|
||||
type: ClusterIP
|
||||
ports:
|
||||
- port: 8080
|
||||
name: http
|
||||
|
||||
@@ -1,128 +1,167 @@
|
||||
module github.com/armosec/kubescape/v2/httphandler
|
||||
|
||||
go 1.17
|
||||
go 1.18
|
||||
|
||||
replace github.com/armosec/kubescape/v2 => ../
|
||||
|
||||
require (
|
||||
github.com/armosec/kubescape/v2 v2.0.0-00010101000000-000000000000
|
||||
github.com/armosec/opa-utils v0.0.130
|
||||
github.com/armosec/utils-go v0.0.3
|
||||
github.com/armosec/opa-utils v0.0.159
|
||||
github.com/armosec/utils-go v0.0.7
|
||||
github.com/google/uuid v1.3.0
|
||||
github.com/gorilla/mux v1.8.0
|
||||
github.com/gorilla/schema v1.2.0
|
||||
github.com/stretchr/testify v1.7.1
|
||||
k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9
|
||||
)
|
||||
|
||||
require (
|
||||
cloud.google.com/go v0.99.0 // indirect
|
||||
cloud.google.com/go/container v1.0.0 // indirect
|
||||
cloud.google.com/go/compute v1.7.0 // indirect
|
||||
cloud.google.com/go/container v1.2.0 // indirect
|
||||
github.com/Azure/azure-sdk-for-go v65.0.0+incompatible // indirect
|
||||
github.com/Azure/go-autorest v14.2.0+incompatible // indirect
|
||||
github.com/Azure/go-autorest/autorest v0.11.24 // indirect
|
||||
github.com/Azure/go-autorest/autorest/adal v0.9.18 // indirect
|
||||
github.com/Azure/go-autorest/autorest v0.11.27 // indirect
|
||||
github.com/Azure/go-autorest/autorest/adal v0.9.20 // indirect
|
||||
github.com/Azure/go-autorest/autorest/azure/auth v0.5.11 // indirect
|
||||
github.com/Azure/go-autorest/autorest/azure/cli v0.4.5 // indirect
|
||||
github.com/Azure/go-autorest/autorest/date v0.3.0 // indirect
|
||||
github.com/Azure/go-autorest/autorest/to v0.4.0 // indirect
|
||||
github.com/Azure/go-autorest/autorest/validation v0.3.1 // indirect
|
||||
github.com/Azure/go-autorest/logger v0.2.1 // indirect
|
||||
github.com/Azure/go-autorest/tracing v0.6.0 // indirect
|
||||
github.com/BurntSushi/toml v1.0.0 // indirect
|
||||
github.com/Masterminds/goutils v1.1.1 // indirect
|
||||
github.com/Masterminds/semver/v3 v3.1.1 // indirect
|
||||
github.com/Masterminds/sprig/v3 v3.2.2 // indirect
|
||||
github.com/Microsoft/go-winio v0.5.2 // indirect
|
||||
github.com/OneOfOne/xxhash v1.2.8 // indirect
|
||||
github.com/armosec/armoapi-go v0.0.66 // indirect
|
||||
github.com/armosec/k8s-interface v0.0.68 // indirect
|
||||
github.com/ProtonMail/go-crypto v0.0.0-20220517143526-88bb52951d5b // indirect
|
||||
github.com/acomagu/bufpipe v1.0.3 // indirect
|
||||
github.com/agnivade/levenshtein v1.1.1 // indirect
|
||||
github.com/armosec/armoapi-go v0.0.97 // indirect
|
||||
github.com/armosec/go-git-url v0.0.13 // indirect
|
||||
github.com/armosec/k8s-interface v0.0.78 // indirect
|
||||
github.com/armosec/rbac-utils v0.0.14 // indirect
|
||||
github.com/armosec/utils-k8s-go v0.0.5 // indirect
|
||||
github.com/aws/aws-sdk-go v1.41.11 // indirect
|
||||
github.com/aws/aws-sdk-go-v2 v1.12.0 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/config v1.12.0 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.7.0 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.9.0 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.3 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.1.0 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.3.3 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/eks v1.17.0 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.6.0 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.8.0 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.13.0 // indirect
|
||||
github.com/aws/smithy-go v1.9.1 // indirect
|
||||
github.com/armosec/utils-k8s-go v0.0.7 // indirect
|
||||
github.com/aws/aws-sdk-go v1.44.36 // indirect
|
||||
github.com/aws/aws-sdk-go-v2 v1.16.5 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/config v1.15.11 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.12.6 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.6 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.12 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.6 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.3.13 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/eks v1.21.2 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.6 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.11.9 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.16.7 // indirect
|
||||
github.com/aws/smithy-go v1.11.3 // indirect
|
||||
github.com/boombuler/barcode v1.0.1 // indirect
|
||||
github.com/briandowns/spinner v1.18.1 // indirect
|
||||
github.com/census-instrumentation/opencensus-proto v0.3.0 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.1.2 // indirect
|
||||
github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4 // indirect
|
||||
github.com/cncf/xds/go v0.0.0-20211130200136-a8f946100490 // indirect
|
||||
github.com/coreos/go-oidc v2.2.1+incompatible // indirect
|
||||
github.com/cyphar/filepath-securejoin v0.2.3 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/docker/docker v20.10.9+incompatible // indirect
|
||||
github.com/dimchansky/utfbom v1.1.1 // indirect
|
||||
github.com/docker/docker v20.10.17+incompatible // indirect
|
||||
github.com/docker/go-connections v0.4.0 // indirect
|
||||
github.com/docker/go-units v0.4.0 // indirect
|
||||
github.com/emicklei/go-restful/v3 v3.8.0 // indirect
|
||||
github.com/emirpasic/gods v1.18.1 // indirect
|
||||
github.com/enescakir/emoji v1.0.0 // indirect
|
||||
github.com/envoyproxy/go-control-plane v0.10.1 // indirect
|
||||
github.com/envoyproxy/protoc-gen-validate v0.6.2 // indirect
|
||||
github.com/fatih/color v1.13.0 // indirect
|
||||
github.com/francoispqt/gojay v1.2.13 // indirect
|
||||
github.com/ghodss/yaml v1.0.0 // indirect
|
||||
github.com/go-git/gcfg v1.5.0 // indirect
|
||||
github.com/go-git/go-billy/v5 v5.3.1 // indirect
|
||||
github.com/go-git/go-git/v5 v5.4.2 // indirect
|
||||
github.com/go-gota/gota v0.12.0 // indirect
|
||||
github.com/go-logr/logr v1.2.3 // indirect
|
||||
github.com/go-openapi/jsonpointer v0.19.5 // indirect
|
||||
github.com/go-openapi/jsonreference v0.20.0 // indirect
|
||||
github.com/go-openapi/swag v0.21.1 // indirect
|
||||
github.com/gobwas/glob v0.2.3 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang-jwt/jwt/v4 v4.2.0 // indirect
|
||||
github.com/golang-jwt/jwt/v4 v4.4.1 // indirect
|
||||
github.com/golang/glog v1.0.0 // indirect
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
||||
github.com/golang/protobuf v1.5.2 // indirect
|
||||
github.com/google/go-cmp v0.5.7 // indirect
|
||||
github.com/google/gofuzz v1.1.0 // indirect
|
||||
github.com/googleapis/gax-go/v2 v2.1.1 // indirect
|
||||
github.com/googleapis/gnostic v0.5.5 // indirect
|
||||
github.com/imdario/mergo v0.3.12 // indirect
|
||||
github.com/google/gnostic v0.6.9 // indirect
|
||||
github.com/google/go-cmp v0.5.8 // indirect
|
||||
github.com/google/gofuzz v1.2.0 // indirect
|
||||
github.com/googleapis/enterprise-certificate-proxy v0.1.0 // indirect
|
||||
github.com/googleapis/gax-go/v2 v2.4.0 // indirect
|
||||
github.com/huandu/xstrings v1.3.2 // indirect
|
||||
github.com/imdario/mergo v0.3.13 // indirect
|
||||
github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 // indirect
|
||||
github.com/jmespath/go-jmespath v0.4.0 // indirect
|
||||
github.com/johnfercher/maroto v0.36.1 // indirect
|
||||
github.com/johnfercher/maroto v0.37.0 // indirect
|
||||
github.com/josharian/intern v1.0.0 // indirect
|
||||
github.com/json-iterator/go v1.1.12 // indirect
|
||||
github.com/jung-kurt/gofpdf v1.16.2 // indirect
|
||||
github.com/kevinburke/ssh_config v1.2.0 // indirect
|
||||
github.com/mailru/easyjson v0.7.7 // indirect
|
||||
github.com/mattn/go-colorable v0.1.12 // indirect
|
||||
github.com/mattn/go-isatty v0.0.14 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.9 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.13 // indirect
|
||||
github.com/mitchellh/copystructure v1.2.0 // indirect
|
||||
github.com/mitchellh/go-homedir v1.1.0 // indirect
|
||||
github.com/mitchellh/reflectwalk v1.0.2 // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
||||
github.com/olekukonko/tablewriter v0.0.5 // indirect
|
||||
github.com/open-policy-agent/opa v0.39.0 // indirect
|
||||
github.com/open-policy-agent/opa v0.41.0 // indirect
|
||||
github.com/opencontainers/go-digest v1.0.0 // indirect
|
||||
github.com/opencontainers/image-spec v1.0.2 // indirect
|
||||
github.com/opencontainers/image-spec v1.0.3-0.20211202183452-c5a74bcca799 // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/pquerna/cachecontrol v0.1.0 // indirect
|
||||
github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0 // indirect
|
||||
github.com/ruudk/golang-pdf417 v0.0.0-20181029194003-1af4ab5afa58 // indirect
|
||||
github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 // indirect
|
||||
github.com/rivo/uniseg v0.2.0 // indirect
|
||||
github.com/ruudk/golang-pdf417 v0.0.0-20201230142125-a7e3863a1245 // indirect
|
||||
github.com/sergi/go-diff v1.2.0 // indirect
|
||||
github.com/shopspring/decimal v1.2.0 // indirect
|
||||
github.com/spf13/cast v1.4.1 // indirect
|
||||
github.com/spf13/pflag v1.0.5 // indirect
|
||||
github.com/vektah/gqlparser/v2 v2.4.5 // indirect
|
||||
github.com/whilp/git-urls v1.0.0 // indirect
|
||||
github.com/xanzy/ssh-agent v0.3.1 // indirect
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect
|
||||
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
|
||||
github.com/xeipuuv/gojsonschema v1.2.0 // indirect
|
||||
github.com/yashtewari/glob-intersection v0.1.0 // indirect
|
||||
go.opencensus.io v0.23.0 // indirect
|
||||
go.uber.org/atomic v1.7.0 // indirect
|
||||
go.uber.org/multierr v1.6.0 // indirect
|
||||
go.uber.org/atomic v1.9.0 // indirect
|
||||
go.uber.org/multierr v1.8.0 // indirect
|
||||
go.uber.org/zap v1.21.0 // indirect
|
||||
golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3 // indirect
|
||||
golang.org/x/mod v0.5.1 // indirect
|
||||
golang.org/x/net v0.0.0-20211209124913-491a49abca63 // indirect
|
||||
golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8 // indirect
|
||||
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 // indirect
|
||||
golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b // indirect
|
||||
golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e // indirect
|
||||
golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3 // indirect
|
||||
golang.org/x/net v0.0.0-20220615171555-694bf12d69de // indirect
|
||||
golang.org/x/oauth2 v0.0.0-20220608161450-d0670ef3b1eb // indirect
|
||||
golang.org/x/sys v0.0.0-20220615213510-4f61da869c0c // indirect
|
||||
golang.org/x/term v0.0.0-20220526004731-065cf7ba2467 // indirect
|
||||
golang.org/x/text v0.3.7 // indirect
|
||||
golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac // indirect
|
||||
gonum.org/v1/gonum v0.9.1 // indirect
|
||||
google.golang.org/api v0.62.0 // indirect
|
||||
golang.org/x/time v0.0.0-20220609170525-579cf78fd858 // indirect
|
||||
gonum.org/v1/gonum v0.11.0 // indirect
|
||||
google.golang.org/api v0.84.0 // indirect
|
||||
google.golang.org/appengine v1.6.7 // indirect
|
||||
google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa // indirect
|
||||
google.golang.org/grpc v1.45.0 // indirect
|
||||
google.golang.org/genproto v0.0.0-20220617124728-180714bec0ad // indirect
|
||||
google.golang.org/grpc v1.47.0 // indirect
|
||||
google.golang.org/protobuf v1.28.0 // indirect
|
||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||
gopkg.in/square/go-jose.v2 v2.6.0 // indirect
|
||||
gopkg.in/warnings.v0 v0.1.2 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect
|
||||
k8s.io/api v0.23.5 // indirect
|
||||
k8s.io/apimachinery v0.23.5 // indirect
|
||||
k8s.io/client-go v0.23.5 // indirect
|
||||
k8s.io/klog/v2 v2.30.0 // indirect
|
||||
k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65 // indirect
|
||||
k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9 // indirect
|
||||
sigs.k8s.io/controller-runtime v0.11.1 // indirect
|
||||
sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
helm.sh/helm/v3 v3.9.0 // indirect
|
||||
k8s.io/api v0.24.2 // indirect
|
||||
k8s.io/apiextensions-apiserver v0.24.0 // indirect
|
||||
k8s.io/apimachinery v0.24.2 // indirect
|
||||
k8s.io/client-go v0.24.2 // indirect
|
||||
k8s.io/klog/v2 v2.60.1 // indirect
|
||||
k8s.io/kube-openapi v0.0.0-20220614142933-1062c7ade5f8 // indirect
|
||||
sigs.k8s.io/controller-runtime v0.12.1 // indirect
|
||||
sigs.k8s.io/json v0.0.0-20220525155127-227cbc7cc124 // indirect
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.2.1 // indirect
|
||||
sigs.k8s.io/yaml v1.3.0 // indirect
|
||||
)
|
||||
|
||||
1120
httphandler/go.sum
1120
httphandler/go.sum
File diff suppressed because it is too large
Load Diff
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user