mirror of
https://github.com/kubescape/kubescape.git
synced 2026-02-14 18:09:55 +00:00
Compare commits
145 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
b3d16875d6 | ||
|
|
47412c89ca | ||
|
|
20d65f2ed3 | ||
|
|
46a559fb1d | ||
|
|
2769b22721 | ||
|
|
63520f9aff | ||
|
|
333b55a9f2 | ||
|
|
c6d3fd1a82 | ||
|
|
8106133ed0 | ||
|
|
b36111f63e | ||
|
|
3ad0284394 | ||
|
|
245ebf8c41 | ||
|
|
8309562da1 | ||
|
|
de807a65a6 | ||
|
|
92fe583421 | ||
|
|
b7ec05e88a | ||
|
|
203e925888 | ||
|
|
fde5453bf3 | ||
|
|
4c6a65565b | ||
|
|
e60ecfb8f5 | ||
|
|
b72e2610ca | ||
|
|
8d4bae06bc | ||
|
|
847b597d0f | ||
|
|
db1743f617 | ||
|
|
7ac1b8aacf | ||
|
|
55f8cb1f0e | ||
|
|
93574736cd | ||
|
|
e43f4b1a37 | ||
|
|
4ba33578ce | ||
|
|
ae00866005 | ||
|
|
21cb4dae29 | ||
|
|
cf086e6614 | ||
|
|
7d3ac98998 | ||
|
|
5e9d01aec2 | ||
|
|
a27d2d41f2 | ||
|
|
c09eabf347 | ||
|
|
38c2aed74a | ||
|
|
cf70671dba | ||
|
|
f90ce83a74 | ||
|
|
fab594ee32 | ||
|
|
d25cefe355 | ||
|
|
747eee1d29 | ||
|
|
0c43ee9ab8 | ||
|
|
466f3acd71 | ||
|
|
80add4ef12 | ||
|
|
959319c335 | ||
|
|
0e9ca547cb | ||
|
|
6e17e5ce7e | ||
|
|
858d7ac2ef | ||
|
|
4046321297 | ||
|
|
0cfdabd25a | ||
|
|
8b6cb6c5d8 | ||
|
|
3df3b7766c | ||
|
|
0d83654197 | ||
|
|
bd9e44382e | ||
|
|
2a7c20ea94 | ||
|
|
bde0dc9a17 | ||
|
|
7d7336ae01 | ||
|
|
e5e608324d | ||
|
|
569c1444f7 | ||
|
|
dc5ef28324 | ||
|
|
aea6c0eab8 | ||
|
|
c80a15d0cf | ||
|
|
1ddd57aa1d | ||
|
|
55adb0da6b | ||
|
|
a716289cc8 | ||
|
|
093d71fff4 | ||
|
|
4fe40e348d | ||
|
|
29a67b806d | ||
|
|
0169f42747 | ||
|
|
ed45b09241 | ||
|
|
107903cc99 | ||
|
|
92e100c497 | ||
|
|
536fe970f7 | ||
|
|
85526b06b6 | ||
|
|
d9b6c048d5 | ||
|
|
7e46a6529a | ||
|
|
2dc5fd80da | ||
|
|
e89cc8ca24 | ||
|
|
d39aeb0691 | ||
|
|
da9d98134a | ||
|
|
9992a9a0e4 | ||
|
|
adc8a16e85 | ||
|
|
13fb586ded | ||
|
|
2e63982f5a | ||
|
|
58b833c18a | ||
|
|
cb424eab00 | ||
|
|
9f2e18c3ee | ||
|
|
b44a73aea5 | ||
|
|
9c5759286f | ||
|
|
74dc714736 | ||
|
|
83751e22cc | ||
|
|
db5fdd75c4 | ||
|
|
4be2104d4b | ||
|
|
b6bab7618f | ||
|
|
3e1fda6f3b | ||
|
|
8487a031ee | ||
|
|
efbb123fce | ||
|
|
5a335d4f1c | ||
|
|
5770a823d6 | ||
|
|
52d7be9108 | ||
|
|
9512b9c6c4 | ||
|
|
da9ab642ec | ||
|
|
718ca1c7ab | ||
|
|
ee3742c5a0 | ||
|
|
7eef843a7a | ||
|
|
b4a8b06f07 | ||
|
|
4e13609985 | ||
|
|
2e5e4328f6 | ||
|
|
d98a11a8fa | ||
|
|
bdb25cbb66 | ||
|
|
369804cb6e | ||
|
|
1b08a92095 | ||
|
|
e787454d53 | ||
|
|
31d1ba663a | ||
|
|
c3731d8ff6 | ||
|
|
c5b46beb1a | ||
|
|
c5ca576c98 | ||
|
|
eae6458b42 | ||
|
|
aa1aa913b6 | ||
|
|
44084592cb | ||
|
|
6cacfb7b16 | ||
|
|
6372ce5647 | ||
|
|
306d3a7081 | ||
|
|
442530061f | ||
|
|
961a6f6ebc | ||
|
|
0d0c8e1b97 | ||
|
|
5b843ba2c4 | ||
|
|
8f9b46cdbe | ||
|
|
e16885a044 | ||
|
|
06a2fa05be | ||
|
|
d26f90b98e | ||
|
|
b47c128eb3 | ||
|
|
9d957b3c77 | ||
|
|
8ec5615569 | ||
|
|
fae73b827a | ||
|
|
6477437872 | ||
|
|
6099f46dea | ||
|
|
5009e6ef47 | ||
|
|
c4450d3259 | ||
|
|
0c3339f1c9 | ||
|
|
faee3d5ad6 | ||
|
|
a279963b28 | ||
|
|
353a39d66a | ||
|
|
9733178228 |
13
.github/workflows/build.yaml
vendored
13
.github/workflows/build.yaml
vendored
@@ -33,12 +33,12 @@ jobs:
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.17
|
||||
|
||||
- name: Test cmd pkg
|
||||
run: cd cmd && go test -v ./...
|
||||
|
||||
# - name: Test cmd pkg
|
||||
# run: cd cmd && go test -v ./...
|
||||
- name: Test core pkg
|
||||
run: cd core && go test -v ./...
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
run: go test -v ./...
|
||||
|
||||
- name: Test httphandler pkg
|
||||
run: cd httphandler && go test -v ./...
|
||||
@@ -80,9 +80,6 @@ jobs:
|
||||
asset_path: build/${{ matrix.os }}/kubescape.sha256
|
||||
asset_name: kubescape-${{ matrix.os }}-sha256
|
||||
asset_content_type: application/octet-stream
|
||||
|
||||
|
||||
|
||||
build-docker:
|
||||
name: Build docker container, tag and upload to registry
|
||||
needs: build
|
||||
|
||||
16
.github/workflows/build_dev.yaml
vendored
16
.github/workflows/build_dev.yaml
vendored
@@ -18,11 +18,21 @@ jobs:
|
||||
with:
|
||||
go-version: 1.17
|
||||
|
||||
- name: Test cmd pkg
|
||||
run: cd cmd && go test -v ./...
|
||||
# - name: Test cmd pkg
|
||||
# run: cd cmd && go test -v ./...
|
||||
|
||||
# - name: Test core pkg
|
||||
# env:
|
||||
# GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
# run: cd core && go test -v ./...
|
||||
|
||||
# - name: Test cmd pkg
|
||||
# run: cd cmd && go test -v ./...
|
||||
|
||||
- name: Test core pkg
|
||||
run: cd core && go test -v ./...
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
run: go test -v ./...
|
||||
|
||||
- name: Test httphandler pkg
|
||||
run: cd httphandler && go test -v ./...
|
||||
|
||||
10
.github/workflows/master_pr_checks.yaml
vendored
10
.github/workflows/master_pr_checks.yaml
vendored
@@ -19,9 +19,17 @@ jobs:
|
||||
with:
|
||||
go-version: 1.17
|
||||
|
||||
- name: Test
|
||||
# - name: Test cmd pkg
|
||||
# run: cd cmd && go test -v ./...
|
||||
|
||||
- name: Test core pkg
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
run: go test -v ./...
|
||||
|
||||
- name: Test httphandler pkg
|
||||
run: cd httphandler && go test -v ./...
|
||||
|
||||
- name: Build
|
||||
env:
|
||||
RELEASE: v2.0.${{ github.run_number }}
|
||||
|
||||
26
README.md
26
README.md
@@ -12,8 +12,20 @@ Kubescape integrates natively with other DevOps tools, including Jenkins, Circle
|
||||
|
||||
</br>
|
||||
|
||||
<!-- # Kubescape Coverage
|
||||
<img src="docs/ksfromcodetodeploy.png">
|
||||
|
||||
</br> -->
|
||||
|
||||
|
||||
# Kubescape CLI:
|
||||
<img src="docs/demo.gif">
|
||||
|
||||
</br>
|
||||
|
||||
<!-- # Kubescape overview:
|
||||
<img src="docs/ARMO-header-2022.gif"> -->
|
||||
|
||||
# TL;DR
|
||||
## Install:
|
||||
```
|
||||
@@ -26,7 +38,7 @@ curl -s https://raw.githubusercontent.com/armosec/kubescape/master/install.sh |
|
||||
|
||||
## Run:
|
||||
```
|
||||
kubescape scan --submit --enable-host-scan
|
||||
kubescape scan --submit --enable-host-scan --format-version v2 --verbose
|
||||
```
|
||||
|
||||
<img src="docs/summary.png">
|
||||
@@ -103,7 +115,7 @@ Set-ExecutionPolicy RemoteSigned -scope CurrentUser
|
||||
|
||||
#### Scan a running Kubernetes cluster and submit results to the [Kubescape SaaS version](https://portal.armo.cloud/)
|
||||
```
|
||||
kubescape scan --submit --enable-host-scan
|
||||
kubescape scan --submit --enable-host-scan --verbose
|
||||
```
|
||||
|
||||
> Read [here](https://hub.armo.cloud/docs/host-sensor) more about the `enable-host-scan` flag
|
||||
@@ -255,6 +267,16 @@ Now you can submit the results to the Kubescape SaaS version -
|
||||
kubescape submit results path/to/results.json
|
||||
```
|
||||
|
||||
|
||||
# Integrations
|
||||
|
||||
## VS Code Extension
|
||||
|
||||
 
|
||||
|
||||
Scan the YAML files while writing them using the [vs code extension](https://github.com/armosec/vscode-kubescape/blob/master/README.md)
|
||||
|
||||
|
||||
# Under the hood
|
||||
|
||||
## Technology
|
||||
|
||||
8
build.py
8
build.py
@@ -4,7 +4,7 @@ import hashlib
|
||||
import platform
|
||||
import subprocess
|
||||
|
||||
BASE_GETTER_CONST = "github.com/armosec/kubescape/core/cautils/getter"
|
||||
BASE_GETTER_CONST = "github.com/armosec/kubescape/v2/core/cautils/getter"
|
||||
BE_SERVER_CONST = BASE_GETTER_CONST + ".ArmoBEURL"
|
||||
ER_SERVER_CONST = BASE_GETTER_CONST + ".ArmoERURL"
|
||||
WEBSITE_CONST = BASE_GETTER_CONST + ".ArmoFEURL"
|
||||
@@ -18,7 +18,7 @@ def checkStatus(status, msg):
|
||||
|
||||
def getBuildDir():
|
||||
currentPlatform = platform.system()
|
||||
buildDir = "build/"
|
||||
buildDir = "./build/"
|
||||
|
||||
if currentPlatform == "Windows": buildDir += "windows-latest"
|
||||
elif currentPlatform == "Linux": buildDir += "ubuntu-latest"
|
||||
@@ -42,7 +42,7 @@ def main():
|
||||
|
||||
# Set some variables
|
||||
packageName = getPackageName()
|
||||
buildUrl = "github.com/armosec/kubescape/core/cautils.BuildNumber"
|
||||
buildUrl = "github.com/armosec/kubescape/v2/core/cautils.BuildNumber"
|
||||
releaseVersion = os.getenv("RELEASE")
|
||||
ArmoBEServer = os.getenv("ArmoBEServer")
|
||||
ArmoERServer = os.getenv("ArmoERServer")
|
||||
@@ -70,7 +70,7 @@ def main():
|
||||
ldflags += " -X {}={}".format(WEBSITE_CONST, ArmoWebsite)
|
||||
if ArmoAuthServer:
|
||||
ldflags += " -X {}={}".format(AUTH_SERVER_CONST, ArmoAuthServer)
|
||||
|
||||
|
||||
build_command = ["go", "build", "-o", ks_file, "-ldflags" ,ldflags]
|
||||
|
||||
print("Building kubescape and saving here: {}".format(ks_file))
|
||||
|
||||
@@ -18,14 +18,30 @@ RUN pip3 install --no-cache --upgrade pip setuptools
|
||||
WORKDIR /work
|
||||
ADD . .
|
||||
|
||||
# build kubescape server
|
||||
WORKDIR /work/httphandler
|
||||
RUN python build.py
|
||||
|
||||
RUN ls -ltr build/ubuntu-latest
|
||||
|
||||
# build kubescape cmd
|
||||
WORKDIR /work
|
||||
RUN python build.py
|
||||
|
||||
RUN /work/build/ubuntu-latest/kubescape download artifacts -o /work/artifacts
|
||||
|
||||
FROM alpine
|
||||
|
||||
RUN addgroup -S armo && adduser -S armo -G armo
|
||||
|
||||
RUN mkdir /home/armo/.kubescape
|
||||
COPY --from=builder /work/artifacts/ /home/armo/.kubescape
|
||||
|
||||
RUN chown -R armo:armo /home/armo/.kubescape
|
||||
|
||||
USER armo
|
||||
WORKDIR /home/armo
|
||||
|
||||
COPY --from=builder /work/httphandler/build/ubuntu-latest/kubescape /usr/bin/ksserver
|
||||
COPY --from=builder /work/build/ubuntu-latest/kubescape /usr/bin/kubescape
|
||||
|
||||
# # Download the frameworks. Use the "--use-default" flag when running kubescape
|
||||
# RUN kubescape download framework nsa && kubescape download framework mitre
|
||||
|
||||
ENTRYPOINT ["kubescape"]
|
||||
ENTRYPOINT ["ksserver"]
|
||||
|
||||
@@ -1,19 +0,0 @@
|
||||
# Kubescape CLI Package
|
||||
|
||||
## Commands
|
||||
* [Completion](#completion): Generate autocompletion script
|
||||
* [Config](#config): Handle cached configurations
|
||||
* [Delete](#delete): Delete configurations in Kubescape SaaS version
|
||||
* [Download](#download): Download controls-inputs,exceptions,control,framework,artifacts
|
||||
* [Help](#help): Help about any command
|
||||
* [List](#list): List frameworks/controls will list the supported frameworks and controls
|
||||
* [Scan](#scan): Scan the current running cluster or yaml files
|
||||
* [Submit](#submit): Submit an object to the Kubescape SaaS version
|
||||
* [Version](#version): Get kubescape version
|
||||
|
||||
## Global Flags
|
||||
|
||||
--cache-dir string Cache directory [$KS_CACHE_DIR] (default "/home/david/.kubescape")
|
||||
-l, --logger string Logger level. Supported: debug/info/success/warning/error/fatal [$KS_LOGGER] (default "info")
|
||||
|
||||
### Completion
|
||||
@@ -1,7 +1,7 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"github.com/armosec/kubescape/core/meta"
|
||||
"github.com/armosec/kubescape/v2/core/meta"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"github.com/armosec/kubescape/core/cautils/logger"
|
||||
"github.com/armosec/kubescape/core/meta"
|
||||
v1 "github.com/armosec/kubescape/core/meta/datastructures/v1"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger"
|
||||
"github.com/armosec/kubescape/v2/core/meta"
|
||||
v1 "github.com/armosec/kubescape/v2/core/meta/datastructures/v1"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
|
||||
@@ -4,9 +4,9 @@ import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/armosec/kubescape/core/cautils/logger"
|
||||
"github.com/armosec/kubescape/core/meta"
|
||||
metav1 "github.com/armosec/kubescape/core/meta/datastructures/v1"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger"
|
||||
"github.com/armosec/kubescape/v2/core/meta"
|
||||
metav1 "github.com/armosec/kubescape/v2/core/meta/datastructures/v1"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
|
||||
@@ -3,9 +3,9 @@ package config
|
||||
import (
|
||||
"os"
|
||||
|
||||
"github.com/armosec/kubescape/core/cautils/logger"
|
||||
"github.com/armosec/kubescape/core/meta"
|
||||
v1 "github.com/armosec/kubescape/core/meta/datastructures/v1"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger"
|
||||
"github.com/armosec/kubescape/v2/core/meta"
|
||||
v1 "github.com/armosec/kubescape/v2/core/meta/datastructures/v1"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
package delete
|
||||
|
||||
import (
|
||||
"github.com/armosec/kubescape/core/meta"
|
||||
v1 "github.com/armosec/kubescape/core/meta/datastructures/v1"
|
||||
"github.com/armosec/kubescape/v2/core/meta"
|
||||
v1 "github.com/armosec/kubescape/v2/core/meta/datastructures/v1"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
|
||||
@@ -4,9 +4,9 @@ import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/armosec/kubescape/core/cautils/logger"
|
||||
"github.com/armosec/kubescape/core/meta"
|
||||
v1 "github.com/armosec/kubescape/core/meta/datastructures/v1"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger"
|
||||
"github.com/armosec/kubescape/v2/core/meta"
|
||||
v1 "github.com/armosec/kubescape/v2/core/meta/datastructures/v1"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
|
||||
@@ -5,11 +5,11 @@ import (
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/armosec/kubescape/core/cautils"
|
||||
"github.com/armosec/kubescape/core/cautils/logger"
|
||||
"github.com/armosec/kubescape/core/core"
|
||||
"github.com/armosec/kubescape/core/meta"
|
||||
v1 "github.com/armosec/kubescape/core/meta/datastructures/v1"
|
||||
"github.com/armosec/kubescape/v2/core/cautils"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger"
|
||||
"github.com/armosec/kubescape/v2/core/core"
|
||||
"github.com/armosec/kubescape/v2/core/meta"
|
||||
v1 "github.com/armosec/kubescape/v2/core/meta/datastructures/v1"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
|
||||
126
cmd/go.mod
126
cmd/go.mod
@@ -1,126 +0,0 @@
|
||||
module github.com/armosec/kubescape/cmd
|
||||
|
||||
go 1.17
|
||||
|
||||
replace github.com/armosec/kubescape/core => ../core
|
||||
|
||||
require (
|
||||
github.com/armosec/k8s-interface v0.0.68
|
||||
github.com/armosec/kubescape/core v0.0.0-00010101000000-000000000000
|
||||
github.com/armosec/opa-utils v0.0.116
|
||||
github.com/armosec/rbac-utils v0.0.14
|
||||
github.com/google/uuid v1.3.0
|
||||
github.com/mattn/go-isatty v0.0.14
|
||||
github.com/spf13/cobra v1.4.0
|
||||
)
|
||||
|
||||
require (
|
||||
cloud.google.com/go v0.99.0 // indirect
|
||||
cloud.google.com/go/container v1.0.0 // indirect
|
||||
github.com/Azure/go-autorest v14.2.0+incompatible // indirect
|
||||
github.com/Azure/go-autorest/autorest v0.11.18 // indirect
|
||||
github.com/Azure/go-autorest/autorest/adal v0.9.13 // indirect
|
||||
github.com/Azure/go-autorest/autorest/date v0.3.0 // indirect
|
||||
github.com/Azure/go-autorest/logger v0.2.1 // indirect
|
||||
github.com/Azure/go-autorest/tracing v0.6.0 // indirect
|
||||
github.com/OneOfOne/xxhash v1.2.8 // indirect
|
||||
github.com/armosec/armoapi-go v0.0.58 // indirect
|
||||
github.com/armosec/utils-go v0.0.3 // indirect
|
||||
github.com/armosec/utils-k8s-go v0.0.3 // indirect
|
||||
github.com/aws/aws-sdk-go v1.41.11 // indirect
|
||||
github.com/aws/aws-sdk-go-v2 v1.12.0 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/config v1.12.0 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.7.0 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.9.0 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.3 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.1.0 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.3.3 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/eks v1.17.0 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.6.0 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.8.0 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.13.0 // indirect
|
||||
github.com/aws/smithy-go v1.9.1 // indirect
|
||||
github.com/boombuler/barcode v1.0.0 // indirect
|
||||
github.com/briandowns/spinner v1.18.1 // indirect
|
||||
github.com/census-instrumentation/opencensus-proto v0.3.0 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.1.2 // indirect
|
||||
github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4 // indirect
|
||||
github.com/cncf/xds/go v0.0.0-20211130200136-a8f946100490 // indirect
|
||||
github.com/coreos/go-oidc v2.2.1+incompatible // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/docker/docker v20.10.9+incompatible // indirect
|
||||
github.com/docker/go-connections v0.4.0 // indirect
|
||||
github.com/docker/go-units v0.4.0 // indirect
|
||||
github.com/enescakir/emoji v1.0.0 // indirect
|
||||
github.com/envoyproxy/go-control-plane v0.10.1 // indirect
|
||||
github.com/envoyproxy/protoc-gen-validate v0.6.2 // indirect
|
||||
github.com/fatih/color v1.13.0 // indirect
|
||||
github.com/form3tech-oss/jwt-go v3.2.3+incompatible // indirect
|
||||
github.com/francoispqt/gojay v1.2.13 // indirect
|
||||
github.com/ghodss/yaml v1.0.0 // indirect
|
||||
github.com/go-gota/gota v0.12.0 // indirect
|
||||
github.com/go-logr/logr v1.2.2 // indirect
|
||||
github.com/gobwas/glob v0.2.3 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang/glog v1.0.0 // indirect
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
||||
github.com/golang/protobuf v1.5.2 // indirect
|
||||
github.com/google/go-cmp v0.5.7 // indirect
|
||||
github.com/google/gofuzz v1.1.0 // indirect
|
||||
github.com/googleapis/gax-go/v2 v2.1.1 // indirect
|
||||
github.com/googleapis/gnostic v0.5.5 // indirect
|
||||
github.com/imdario/mergo v0.3.12 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.0.0 // indirect
|
||||
github.com/jmespath/go-jmespath v0.4.0 // indirect
|
||||
github.com/johnfercher/maroto v0.34.0 // indirect
|
||||
github.com/json-iterator/go v1.1.12 // indirect
|
||||
github.com/jung-kurt/gofpdf v1.4.2 // indirect
|
||||
github.com/mattn/go-colorable v0.1.12 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.9 // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||
github.com/olekukonko/tablewriter v0.0.5 // indirect
|
||||
github.com/open-policy-agent/opa v0.38.0 // indirect
|
||||
github.com/opencontainers/go-digest v1.0.0 // indirect
|
||||
github.com/opencontainers/image-spec v1.0.2 // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/pquerna/cachecontrol v0.1.0 // indirect
|
||||
github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0 // indirect
|
||||
github.com/ruudk/golang-pdf417 v0.0.0-20181029194003-1af4ab5afa58 // indirect
|
||||
github.com/spf13/pflag v1.0.5 // indirect
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect
|
||||
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
|
||||
github.com/yashtewari/glob-intersection v0.0.0-20180916065949-5c77d914dd0b // indirect
|
||||
go.opencensus.io v0.23.0 // indirect
|
||||
go.uber.org/atomic v1.7.0 // indirect
|
||||
go.uber.org/multierr v1.6.0 // indirect
|
||||
go.uber.org/zap v1.21.0 // indirect
|
||||
golang.org/x/crypto v0.0.0-20210817164053-32db794688a5 // indirect
|
||||
golang.org/x/mod v0.5.1 // indirect
|
||||
golang.org/x/net v0.0.0-20211209124913-491a49abca63 // indirect
|
||||
golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8 // indirect
|
||||
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 // indirect
|
||||
golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b // indirect
|
||||
golang.org/x/text v0.3.7 // indirect
|
||||
golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac // indirect
|
||||
gonum.org/v1/gonum v0.9.1 // indirect
|
||||
google.golang.org/api v0.62.0 // indirect
|
||||
google.golang.org/appengine v1.6.7 // indirect
|
||||
google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa // indirect
|
||||
google.golang.org/grpc v1.44.0 // indirect
|
||||
google.golang.org/protobuf v1.27.1 // indirect
|
||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||
gopkg.in/square/go-jose.v2 v2.6.0 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect
|
||||
k8s.io/api v0.23.4 // indirect
|
||||
k8s.io/apimachinery v0.23.4 // indirect
|
||||
k8s.io/client-go v0.23.4 // indirect
|
||||
k8s.io/klog/v2 v2.30.0 // indirect
|
||||
k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65 // indirect
|
||||
k8s.io/utils v0.0.0-20211116205334-6203023598ed // indirect
|
||||
sigs.k8s.io/controller-runtime v0.11.1 // indirect
|
||||
sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6 // indirect
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.2.1 // indirect
|
||||
sigs.k8s.io/yaml v1.3.0 // indirect
|
||||
)
|
||||
@@ -4,11 +4,11 @@ import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/armosec/kubescape/core/cautils"
|
||||
"github.com/armosec/kubescape/core/cautils/logger"
|
||||
"github.com/armosec/kubescape/core/core"
|
||||
"github.com/armosec/kubescape/core/meta"
|
||||
v1 "github.com/armosec/kubescape/core/meta/datastructures/v1"
|
||||
"github.com/armosec/kubescape/v2/core/cautils"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger"
|
||||
"github.com/armosec/kubescape/v2/core/core"
|
||||
"github.com/armosec/kubescape/v2/core/meta"
|
||||
v1 "github.com/armosec/kubescape/v2/core/meta/datastructures/v1"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
|
||||
48
cmd/root.go
48
cmd/root.go
@@ -1,27 +1,29 @@
|
||||
package main
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/armosec/kubescape/cmd/completion"
|
||||
"github.com/armosec/kubescape/cmd/config"
|
||||
"github.com/armosec/kubescape/cmd/delete"
|
||||
"github.com/armosec/kubescape/cmd/download"
|
||||
"github.com/armosec/kubescape/cmd/list"
|
||||
"github.com/armosec/kubescape/cmd/scan"
|
||||
"github.com/armosec/kubescape/cmd/submit"
|
||||
"github.com/armosec/kubescape/cmd/version"
|
||||
"github.com/armosec/kubescape/core/cautils"
|
||||
"github.com/armosec/kubescape/core/cautils/getter"
|
||||
"github.com/armosec/kubescape/core/cautils/logger"
|
||||
"github.com/armosec/kubescape/core/cautils/logger/helpers"
|
||||
"github.com/armosec/kubescape/core/core"
|
||||
"github.com/armosec/kubescape/core/meta"
|
||||
"github.com/armosec/kubescape/v2/cmd/completion"
|
||||
"github.com/armosec/kubescape/v2/cmd/config"
|
||||
"github.com/armosec/kubescape/v2/cmd/delete"
|
||||
"github.com/armosec/kubescape/v2/cmd/download"
|
||||
"github.com/armosec/kubescape/v2/cmd/list"
|
||||
"github.com/armosec/kubescape/v2/cmd/scan"
|
||||
"github.com/armosec/kubescape/v2/cmd/submit"
|
||||
"github.com/armosec/kubescape/v2/cmd/version"
|
||||
"github.com/armosec/kubescape/v2/core/cautils"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/getter"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger/helpers"
|
||||
"github.com/armosec/kubescape/v2/core/core"
|
||||
"github.com/armosec/kubescape/v2/core/meta"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var rootInfo cautils.RootInfo
|
||||
|
||||
var ksExamples = `
|
||||
# Scan command
|
||||
kubescape scan --submit
|
||||
@@ -38,12 +40,10 @@ var ksExamples = `
|
||||
|
||||
func NewDefaultKubescapeCommand() *cobra.Command {
|
||||
ks := core.NewKubescape()
|
||||
|
||||
return getRootCmd(ks)
|
||||
}
|
||||
|
||||
func getRootCmd(ks meta.IKubescape) *cobra.Command {
|
||||
var rootInfo cautils.RootInfo
|
||||
|
||||
rootCmd := &cobra.Command{
|
||||
Use: "kubescape",
|
||||
@@ -53,8 +53,8 @@ func getRootCmd(ks meta.IKubescape) *cobra.Command {
|
||||
Example: ksExamples,
|
||||
}
|
||||
|
||||
rootCmd.PersistentFlags().StringVar(&armoBEURLsDep, "environment", "", envFlagUsage)
|
||||
rootCmd.PersistentFlags().StringVar(&armoBEURLs, "env", "", envFlagUsage)
|
||||
rootCmd.PersistentFlags().StringVar(&rootInfo.ArmoBEURLsDep, "environment", "", envFlagUsage)
|
||||
rootCmd.PersistentFlags().StringVar(&rootInfo.ArmoBEURLs, "env", "", envFlagUsage)
|
||||
rootCmd.PersistentFlags().MarkDeprecated("environment", "use 'env' instead")
|
||||
rootCmd.PersistentFlags().MarkHidden("environment")
|
||||
rootCmd.PersistentFlags().MarkHidden("env")
|
||||
@@ -66,11 +66,7 @@ func getRootCmd(ks meta.IKubescape) *cobra.Command {
|
||||
rootCmd.PersistentFlags().StringVar(&rootInfo.CacheDir, "cache-dir", getter.DefaultLocalStore, "Cache directory [$KS_CACHE_DIR]")
|
||||
rootCmd.PersistentFlags().BoolVarP(&rootInfo.DisableColor, "disable-color", "", false, "Disable Color output for logging")
|
||||
|
||||
// Initialize
|
||||
initLogger(&rootInfo)
|
||||
initLoggerLevel(&rootInfo)
|
||||
initEnvironment(&rootInfo)
|
||||
initCacheDir(&rootInfo)
|
||||
cobra.OnInitialize(initLogger, initLoggerLevel, initEnvironment, initCacheDir)
|
||||
|
||||
// Supported commands
|
||||
rootCmd.AddCommand(scan.GetScanCommand(ks))
|
||||
@@ -85,7 +81,7 @@ func getRootCmd(ks meta.IKubescape) *cobra.Command {
|
||||
return rootCmd
|
||||
}
|
||||
|
||||
func main() {
|
||||
func Execute() error {
|
||||
ks := NewDefaultKubescapeCommand()
|
||||
ks.Execute()
|
||||
return ks.Execute()
|
||||
}
|
||||
|
||||
@@ -1,24 +1,20 @@
|
||||
package main
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/armosec/kubescape/core/cautils"
|
||||
"github.com/armosec/kubescape/core/cautils/getter"
|
||||
"github.com/armosec/kubescape/core/cautils/logger"
|
||||
"github.com/armosec/kubescape/core/cautils/logger/helpers"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/getter"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger/helpers"
|
||||
|
||||
"github.com/mattn/go-isatty"
|
||||
)
|
||||
|
||||
var armoBEURLs = ""
|
||||
var armoBEURLsDep = ""
|
||||
|
||||
const envFlagUsage = "Send report results to specific URL. Format:<ReportReceiver>,<Backend>,<Frontend>.\n\t\tExample:report.armo.cloud,api.armo.cloud,portal.armo.cloud"
|
||||
|
||||
func initLogger(rootInfo *cautils.RootInfo) {
|
||||
func initLogger() {
|
||||
logger.DisableColor(rootInfo.DisableColor)
|
||||
|
||||
if rootInfo.LoggerName == "" {
|
||||
@@ -36,8 +32,8 @@ func initLogger(rootInfo *cautils.RootInfo) {
|
||||
logger.InitLogger(rootInfo.LoggerName)
|
||||
|
||||
}
|
||||
func initLoggerLevel(rootInfo *cautils.RootInfo) {
|
||||
if rootInfo.Logger != helpers.InfoLevel.String() {
|
||||
func initLoggerLevel() {
|
||||
if rootInfo.Logger == helpers.InfoLevel.String() {
|
||||
} else if l := os.Getenv("KS_LOGGER"); l != "" {
|
||||
rootInfo.Logger = l
|
||||
}
|
||||
@@ -47,8 +43,8 @@ func initLoggerLevel(rootInfo *cautils.RootInfo) {
|
||||
}
|
||||
}
|
||||
|
||||
func initCacheDir(rootInfo *cautils.RootInfo) {
|
||||
if rootInfo.CacheDir == getter.DefaultLocalStore {
|
||||
func initCacheDir() {
|
||||
if rootInfo.CacheDir != getter.DefaultLocalStore {
|
||||
getter.DefaultLocalStore = rootInfo.CacheDir
|
||||
} else if cacheDir := os.Getenv("KS_CACHE_DIR"); cacheDir != "" {
|
||||
getter.DefaultLocalStore = cacheDir
|
||||
@@ -58,11 +54,11 @@ func initCacheDir(rootInfo *cautils.RootInfo) {
|
||||
|
||||
logger.L().Debug("cache dir updated", helpers.String("path", getter.DefaultLocalStore))
|
||||
}
|
||||
func initEnvironment(rootInfo *cautils.RootInfo) {
|
||||
if armoBEURLsDep != "" {
|
||||
armoBEURLs = armoBEURLsDep
|
||||
func initEnvironment() {
|
||||
if rootInfo.ArmoBEURLs == "" {
|
||||
rootInfo.ArmoBEURLs = rootInfo.ArmoBEURLsDep
|
||||
}
|
||||
urlSlices := strings.Split(armoBEURLs, ",")
|
||||
urlSlices := strings.Split(rootInfo.ArmoBEURLs, ",")
|
||||
if len(urlSlices) != 1 && len(urlSlices) < 3 {
|
||||
logger.L().Fatal("expected at least 3 URLs (report, api, frontend, auth)")
|
||||
}
|
||||
@@ -85,7 +81,7 @@ func initEnvironment(rootInfo *cautils.RootInfo) {
|
||||
armoERURL := urlSlices[0] // mandatory
|
||||
armoBEURL := urlSlices[1] // mandatory
|
||||
armoFEURL := urlSlices[2] // mandatory
|
||||
if len(urlSlices) <= 4 {
|
||||
if len(urlSlices) >= 4 {
|
||||
armoAUTHURL = urlSlices[3]
|
||||
}
|
||||
getter.SetARMOAPIConnector(getter.NewARMOAPICustomized(armoERURL, armoBEURL, armoFEURL, armoAUTHURL))
|
||||
|
||||
@@ -6,10 +6,14 @@ import (
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/armosec/kubescape/core/cautils"
|
||||
"github.com/armosec/kubescape/core/cautils/logger"
|
||||
"github.com/armosec/kubescape/core/meta"
|
||||
apisv1 "github.com/armosec/opa-utils/httpserver/apis/v1"
|
||||
|
||||
"github.com/armosec/kubescape/v2/core/cautils"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger/helpers"
|
||||
"github.com/armosec/kubescape/v2/core/meta"
|
||||
"github.com/armosec/opa-utils/reporthandling"
|
||||
"github.com/enescakir/emoji"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
@@ -62,11 +66,11 @@ func getControlCmd(ks meta.IKubescape, scanInfo *cautils.ScanInfo) *cobra.Comman
|
||||
} else { // expected control or list of control sepparated by ","
|
||||
|
||||
// Read controls from input args
|
||||
scanInfo.SetPolicyIdentifiers(strings.Split(args[0], ","), reporthandling.KindControl)
|
||||
scanInfo.SetPolicyIdentifiers(strings.Split(args[0], ","), apisv1.KindControl)
|
||||
|
||||
if len(args) > 1 {
|
||||
if len(args[1:]) == 0 || args[1] != "-" {
|
||||
scanInfo.InputPatterns = args[1:]
|
||||
scanInfo.InputPatterns = []string{args[1]}
|
||||
} else { // store stdin to file - do NOT move to separate function !!
|
||||
tempFile, err := os.CreateTemp(".", "tmp-kubescape*.yaml")
|
||||
if err != nil {
|
||||
@@ -88,36 +92,16 @@ func getControlCmd(ks meta.IKubescape, scanInfo *cautils.ScanInfo) *cobra.Comman
|
||||
if err != nil {
|
||||
logger.L().Fatal(err.Error())
|
||||
}
|
||||
results.HandleResults()
|
||||
if err := results.HandleResults(); err != nil {
|
||||
logger.L().Fatal(err.Error())
|
||||
}
|
||||
if !scanInfo.VerboseMode {
|
||||
cautils.SimpleDisplay(os.Stderr, "%s Run with '--verbose'/'-v' flag for detailed resources view\n\n", emoji.Detective)
|
||||
}
|
||||
if results.GetRiskScore() > float32(scanInfo.FailThreshold) {
|
||||
return fmt.Errorf("scan risk-score %.2f is above permitted threshold %.2f", results.GetRiskScore(), scanInfo.FailThreshold)
|
||||
logger.L().Fatal("scan risk-score is above permitted threshold", helpers.String("risk-score", fmt.Sprintf("%.2f", results.GetRiskScore())), helpers.String("fail-threshold", fmt.Sprintf("%.2f", scanInfo.FailThreshold)))
|
||||
}
|
||||
return nil
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// func flagValidationControl() {
|
||||
// if 100 < scanInfo.FailThreshold {
|
||||
// logger.L().Fatal("bad argument: out of range threshold")
|
||||
// }
|
||||
// }
|
||||
|
||||
// func setScanForFirstControl(scanInfo, controls []string) []reporthandling.PolicyIdentifier {
|
||||
// newPolicy := reporthandling.PolicyIdentifier{}
|
||||
// newPolicy.Kind = reporthandling.KindControl
|
||||
// newPolicy.Name = controls[0]
|
||||
// scanInfo.PolicyIdentifier = append(scanInfo.PolicyIdentifier, newPolicy)
|
||||
// return scanInfo.PolicyIdentifier
|
||||
// }
|
||||
|
||||
// func SetScanForGivenControls(scanInfo, controls []string) []reporthandling.PolicyIdentifier {
|
||||
// for _, control := range controls {
|
||||
// control := strings.TrimLeft(control, " ")
|
||||
// newPolicy := reporthandling.PolicyIdentifier{}
|
||||
// newPolicy.Kind = reporthandling.KindControl
|
||||
// newPolicy.Name = control
|
||||
// scanInfo.PolicyIdentifier = append(scanInfo.PolicyIdentifier, newPolicy)
|
||||
// }
|
||||
// return scanInfo.PolicyIdentifier
|
||||
// }
|
||||
|
||||
@@ -6,10 +6,13 @@ import (
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/armosec/kubescape/core/cautils"
|
||||
"github.com/armosec/kubescape/core/cautils/logger"
|
||||
"github.com/armosec/kubescape/core/meta"
|
||||
"github.com/armosec/opa-utils/reporthandling"
|
||||
apisv1 "github.com/armosec/opa-utils/httpserver/apis/v1"
|
||||
|
||||
"github.com/armosec/kubescape/v2/core/cautils"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger/helpers"
|
||||
"github.com/armosec/kubescape/v2/core/meta"
|
||||
"github.com/enescakir/emoji"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
@@ -27,7 +30,7 @@ var (
|
||||
# Scan all frameworks
|
||||
kubescape scan framework all
|
||||
|
||||
# Scan kubernetes YAML manifest files
|
||||
# Scan kubernetes YAML manifest files (single file or glob)
|
||||
kubescape scan framework nsa *.yaml
|
||||
|
||||
Run 'kubescape list frameworks' for the list of supported frameworks
|
||||
@@ -58,7 +61,9 @@ func getFrameworkCmd(ks meta.IKubescape, scanInfo *cautils.ScanInfo) *cobra.Comm
|
||||
},
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
|
||||
flagValidationFramework(scanInfo)
|
||||
if err := flagValidationFramework(scanInfo); err != nil {
|
||||
return err
|
||||
}
|
||||
scanInfo.FrameworkScan = true
|
||||
|
||||
var frameworks []string
|
||||
@@ -74,7 +79,7 @@ func getFrameworkCmd(ks meta.IKubescape, scanInfo *cautils.ScanInfo) *cobra.Comm
|
||||
}
|
||||
if len(args) > 1 {
|
||||
if len(args[1:]) == 0 || args[1] != "-" {
|
||||
scanInfo.InputPatterns = args[1:]
|
||||
scanInfo.InputPatterns = []string{args[1]}
|
||||
} else { // store stdin to file - do NOT move to separate function !!
|
||||
tempFile, err := os.CreateTemp(".", "tmp-kubescape*.yaml")
|
||||
if err != nil {
|
||||
@@ -91,40 +96,33 @@ func getFrameworkCmd(ks meta.IKubescape, scanInfo *cautils.ScanInfo) *cobra.Comm
|
||||
}
|
||||
scanInfo.FrameworkScan = true
|
||||
|
||||
scanInfo.SetPolicyIdentifiers(frameworks, reporthandling.KindFramework)
|
||||
scanInfo.SetPolicyIdentifiers(frameworks, apisv1.KindFramework)
|
||||
|
||||
results, err := ks.Scan(scanInfo)
|
||||
if err != nil {
|
||||
logger.L().Fatal(err.Error())
|
||||
}
|
||||
results.HandleResults()
|
||||
|
||||
if err = results.HandleResults(); err != nil {
|
||||
logger.L().Fatal(err.Error())
|
||||
}
|
||||
if !scanInfo.VerboseMode {
|
||||
cautils.SimpleDisplay(os.Stderr, "%s Run with '--verbose'/'-v' flag for detailed resources view\n\n", emoji.Detective)
|
||||
}
|
||||
if results.GetRiskScore() > float32(scanInfo.FailThreshold) {
|
||||
return fmt.Errorf("scan risk-score %.2f is above permitted threshold %.2f", results.GetRiskScore(), scanInfo.FailThreshold)
|
||||
logger.L().Fatal("scan risk-score is above permitted threshold", helpers.String("risk-score", fmt.Sprintf("%.2f", results.GetRiskScore())), helpers.String("fail-threshold", fmt.Sprintf("%.2f", scanInfo.FailThreshold)))
|
||||
}
|
||||
return nil
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// func init() {
|
||||
// scanCmd.AddCommand(frameworkCmd)
|
||||
// scanInfo = cautils.ScanInfo{}
|
||||
|
||||
// }
|
||||
|
||||
// func SetScanForFirstFramework(frameworks []string) []reporthandling.PolicyIdentifier {
|
||||
// newPolicy := reporthandling.PolicyIdentifier{}
|
||||
// newPolicy.Kind = reporthandling.KindFramework
|
||||
// newPolicy.Name = frameworks[0]
|
||||
// scanInfo.PolicyIdentifier = append(scanInfo.PolicyIdentifier, newPolicy)
|
||||
// return scanInfo.PolicyIdentifier
|
||||
// }
|
||||
|
||||
func flagValidationFramework(scanInfo *cautils.ScanInfo) {
|
||||
func flagValidationFramework(scanInfo *cautils.ScanInfo) error {
|
||||
if scanInfo.Submit && scanInfo.Local {
|
||||
logger.L().Fatal("you can use `keep-local` or `submit`, but not both")
|
||||
return fmt.Errorf("you can use `keep-local` or `submit`, but not both")
|
||||
}
|
||||
if 100 < scanInfo.FailThreshold {
|
||||
logger.L().Fatal("bad argument: out of range threshold")
|
||||
if 100 < scanInfo.FailThreshold || 0 > scanInfo.FailThreshold {
|
||||
return fmt.Errorf("bad argument: out of range threshold")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -2,8 +2,8 @@ package scan
|
||||
|
||||
import (
|
||||
"github.com/armosec/k8s-interface/k8sinterface"
|
||||
"github.com/armosec/kubescape/core/cautils"
|
||||
"github.com/armosec/kubescape/core/meta"
|
||||
"github.com/armosec/kubescape/v2/core/cautils"
|
||||
"github.com/armosec/kubescape/v2/core/meta"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
@@ -11,7 +11,7 @@ var scanCmdExamples = `
|
||||
Scan command is for scanning an existing cluster or kubernetes manifest files based on pre-defind frameworks
|
||||
|
||||
# Scan current cluster with all frameworks
|
||||
kubescape scan --submit --enable-host-scan
|
||||
kubescape scan --submit --enable-host-scan --verbose
|
||||
|
||||
# Scan kubernetes YAML manifest files
|
||||
kubescape scan *.yaml
|
||||
@@ -72,7 +72,7 @@ func GetScanCommand(ks meta.IKubescape) *cobra.Command {
|
||||
scanCmd.PersistentFlags().StringVar(&scanInfo.IncludeNamespaces, "include-namespaces", "", "scan specific namespaces. e.g: --include-namespaces ns-a,ns-b")
|
||||
scanCmd.PersistentFlags().BoolVarP(&scanInfo.Local, "keep-local", "", false, "If you do not want your Kubescape results reported to ARMO backend. Use this flag if you ran with the '--submit' flag in the past and you do not want to submit your current scan results")
|
||||
scanCmd.PersistentFlags().StringVarP(&scanInfo.Output, "output", "o", "", "Output file. Print output to file and not stdout")
|
||||
scanCmd.PersistentFlags().BoolVar(&scanInfo.VerboseMode, "verbose", false, "Display all of the input resources and not only failed resources")
|
||||
scanCmd.PersistentFlags().BoolVarP(&scanInfo.VerboseMode, "verbose", "v", false, "Display all of the input resources and not only failed resources")
|
||||
scanCmd.PersistentFlags().BoolVar(&scanInfo.UseDefault, "use-default", false, "Load local policy object from default path. If not used will download latest")
|
||||
scanCmd.PersistentFlags().StringSliceVar(&scanInfo.UseFrom, "use-from", nil, "Load local policy object from specified path. If not used will download latest")
|
||||
scanCmd.PersistentFlags().BoolVarP(&scanInfo.Submit, "submit", "", false, "Send the scan results to ARMO management portal where you can see the results in a user-friendly UI, choose your preferred compliance framework, check risk results history and trends, manage exceptions, get remediation recommendations and much more. By default the results are not submitted")
|
||||
|
||||
@@ -3,9 +3,9 @@ package submit
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/armosec/kubescape/core/cautils/logger"
|
||||
"github.com/armosec/kubescape/core/meta"
|
||||
metav1 "github.com/armosec/kubescape/core/meta/datastructures/v1"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger"
|
||||
"github.com/armosec/kubescape/v2/core/meta"
|
||||
metav1 "github.com/armosec/kubescape/v2/core/meta/datastructures/v1"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
@@ -2,15 +2,15 @@ package submit
|
||||
|
||||
import (
|
||||
"github.com/armosec/k8s-interface/k8sinterface"
|
||||
"github.com/armosec/kubescape/core/cautils"
|
||||
"github.com/armosec/kubescape/core/cautils/getter"
|
||||
"github.com/armosec/kubescape/core/cautils/logger"
|
||||
"github.com/armosec/kubescape/core/cautils/logger/helpers"
|
||||
"github.com/armosec/kubescape/core/meta"
|
||||
"github.com/armosec/kubescape/core/meta/cliinterfaces"
|
||||
v1 "github.com/armosec/kubescape/core/meta/datastructures/v1"
|
||||
"github.com/armosec/kubescape/v2/core/cautils"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/getter"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger/helpers"
|
||||
"github.com/armosec/kubescape/v2/core/meta"
|
||||
"github.com/armosec/kubescape/v2/core/meta/cliinterfaces"
|
||||
v1 "github.com/armosec/kubescape/v2/core/meta/datastructures/v1"
|
||||
|
||||
reporterv1 "github.com/armosec/kubescape/core/pkg/resultshandling/reporter/v1"
|
||||
reporterv1 "github.com/armosec/kubescape/v2/core/pkg/resultshandling/reporter/v1"
|
||||
|
||||
"github.com/armosec/rbac-utils/rbacscanner"
|
||||
"github.com/spf13/cobra"
|
||||
@@ -33,7 +33,7 @@ func getRBACCmd(ks meta.IKubescape, submitInfo *v1.Submit) *cobra.Command {
|
||||
}
|
||||
|
||||
// list RBAC
|
||||
rbacObjects := cautils.NewRBACObjects(rbacscanner.NewRbacScannerFromK8sAPI(k8s, clusterConfig.GetAccountID(), clusterConfig.GetClusterName()))
|
||||
rbacObjects := cautils.NewRBACObjects(rbacscanner.NewRbacScannerFromK8sAPI(k8s, clusterConfig.GetAccountID(), clusterConfig.GetContextName()))
|
||||
|
||||
// submit resources
|
||||
r := reporterv1.NewReportEventReceiver(clusterConfig.GetConfigObj())
|
||||
|
||||
@@ -7,14 +7,14 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/armosec/k8s-interface/workloadinterface"
|
||||
"github.com/armosec/kubescape/core/cautils/logger"
|
||||
"github.com/armosec/kubescape/core/cautils/logger/helpers"
|
||||
"github.com/armosec/kubescape/core/meta"
|
||||
"github.com/armosec/kubescape/core/meta/cliinterfaces"
|
||||
v1 "github.com/armosec/kubescape/core/meta/datastructures/v1"
|
||||
"github.com/armosec/kubescape/core/pkg/resultshandling/reporter"
|
||||
reporterv1 "github.com/armosec/kubescape/core/pkg/resultshandling/reporter/v1"
|
||||
reporterv2 "github.com/armosec/kubescape/core/pkg/resultshandling/reporter/v2"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger/helpers"
|
||||
"github.com/armosec/kubescape/v2/core/meta"
|
||||
"github.com/armosec/kubescape/v2/core/meta/cliinterfaces"
|
||||
v1 "github.com/armosec/kubescape/v2/core/meta/datastructures/v1"
|
||||
"github.com/armosec/kubescape/v2/core/pkg/resultshandling/reporter"
|
||||
reporterv1 "github.com/armosec/kubescape/v2/core/pkg/resultshandling/reporter/v1"
|
||||
reporterv2 "github.com/armosec/kubescape/v2/core/pkg/resultshandling/reporter/v2"
|
||||
|
||||
"github.com/armosec/opa-utils/reporthandling"
|
||||
"github.com/google/uuid"
|
||||
@@ -74,7 +74,7 @@ func getResultsCmd(ks meta.IKubescape, submitInfo *v1.Submit) *cobra.Command {
|
||||
logger.L().Error("failed setting account ID", helpers.Error(err))
|
||||
}
|
||||
|
||||
resultsObjects := NewResultsObject(clusterConfig.GetAccountID(), clusterConfig.GetClusterName(), args[0])
|
||||
resultsObjects := NewResultsObject(clusterConfig.GetAccountID(), clusterConfig.GetContextName(), args[0])
|
||||
|
||||
// submit resources
|
||||
var r reporter.IReport
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
package submit
|
||||
|
||||
import (
|
||||
"github.com/armosec/kubescape/core/meta"
|
||||
metav1 "github.com/armosec/kubescape/core/meta/datastructures/v1"
|
||||
"github.com/armosec/kubescape/v2/core/meta"
|
||||
metav1 "github.com/armosec/kubescape/v2/core/meta/datastructures/v1"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
|
||||
@@ -4,7 +4,7 @@ import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/armosec/kubescape/core/cautils"
|
||||
"github.com/armosec/kubescape/v2/core/cautils"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
|
||||
@@ -10,7 +10,8 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
"github.com/armosec/k8s-interface/k8sinterface"
|
||||
"github.com/armosec/kubescape/core/cautils/getter"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/getter"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
@@ -66,8 +67,9 @@ type ITenantConfig interface {
|
||||
DeleteCachedConfig() error
|
||||
|
||||
// getters
|
||||
GetClusterName() string
|
||||
GetContextName() string
|
||||
GetAccountID() string
|
||||
GetTennatEmail() string
|
||||
GetConfigObj() *ConfigObj
|
||||
// GetBackendAPI() getter.IBackend
|
||||
// GenerateURL()
|
||||
@@ -117,8 +119,9 @@ func NewLocalConfig(
|
||||
}
|
||||
|
||||
func (lc *LocalConfig) GetConfigObj() *ConfigObj { return lc.configObj }
|
||||
func (lc *LocalConfig) GetTennatEmail() string { return lc.configObj.CustomerAdminEMail }
|
||||
func (lc *LocalConfig) GetAccountID() string { return lc.configObj.AccountID }
|
||||
func (lc *LocalConfig) GetClusterName() string { return lc.configObj.ClusterName }
|
||||
func (lc *LocalConfig) GetContextName() string { return lc.configObj.ClusterName }
|
||||
func (lc *LocalConfig) IsConfigFound() bool { return existsConfigFile() }
|
||||
func (lc *LocalConfig) SetTenant() error {
|
||||
|
||||
@@ -135,7 +138,10 @@ func (lc *LocalConfig) UpdateCachedConfig() error {
|
||||
}
|
||||
|
||||
func (lc *LocalConfig) DeleteCachedConfig() error {
|
||||
return DeleteConfigFile()
|
||||
if err := DeleteConfigFile(); err != nil {
|
||||
logger.L().Warning(err.Error())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func getTenantConfigFromBE(backendAPI getter.IBackend, configObj *ConfigObj) error {
|
||||
@@ -213,7 +219,7 @@ func NewClusterConfig(k8s *k8sinterface.KubernetesApi, backendAPI getter.IBacken
|
||||
getAccountFromEnv(c.configObj)
|
||||
|
||||
if c.configObj.ClusterName == "" {
|
||||
c.configObj.ClusterName = AdoptClusterName(k8sinterface.GetClusterName())
|
||||
c.configObj.ClusterName = AdoptClusterName(k8sinterface.GetContextName())
|
||||
} else { // override the cluster name if it has unwanted characters
|
||||
c.configObj.ClusterName = AdoptClusterName(c.configObj.ClusterName)
|
||||
}
|
||||
@@ -228,6 +234,7 @@ func NewClusterConfig(k8s *k8sinterface.KubernetesApi, backendAPI getter.IBacken
|
||||
func (c *ClusterConfig) GetConfigObj() *ConfigObj { return c.configObj }
|
||||
func (c *ClusterConfig) GetDefaultNS() string { return c.configMapNamespace }
|
||||
func (c *ClusterConfig) GetAccountID() string { return c.configObj.AccountID }
|
||||
func (c *ClusterConfig) GetTennatEmail() string { return c.configObj.CustomerAdminEMail }
|
||||
func (c *ClusterConfig) IsConfigFound() bool { return existsConfigFile() || c.existsConfigMap() }
|
||||
|
||||
func (c *ClusterConfig) SetTenant() error {
|
||||
@@ -257,14 +264,14 @@ func (c *ClusterConfig) UpdateCachedConfig() error {
|
||||
|
||||
func (c *ClusterConfig) DeleteCachedConfig() error {
|
||||
if err := c.deleteConfigMap(); err != nil {
|
||||
return err
|
||||
logger.L().Warning(err.Error())
|
||||
}
|
||||
if err := DeleteConfigFile(); err != nil {
|
||||
return err
|
||||
logger.L().Warning(err.Error())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func (c *ClusterConfig) GetClusterName() string {
|
||||
func (c *ClusterConfig) GetContextName() string {
|
||||
return c.configObj.ClusterName
|
||||
}
|
||||
|
||||
|
||||
@@ -4,35 +4,48 @@ import (
|
||||
"github.com/armosec/armoapi-go/armotypes"
|
||||
"github.com/armosec/k8s-interface/workloadinterface"
|
||||
"github.com/armosec/opa-utils/reporthandling"
|
||||
apis "github.com/armosec/opa-utils/reporthandling/apis"
|
||||
"github.com/armosec/opa-utils/reporthandling/results/v1/resourcesresults"
|
||||
reporthandlingv2 "github.com/armosec/opa-utils/reporthandling/v2"
|
||||
)
|
||||
|
||||
// K8SResources map[<api group>/<api version>/<resource>][]<resourceID>
|
||||
type K8SResources map[string][]string
|
||||
type ArmoResources map[string][]string
|
||||
|
||||
type OPASessionObj struct {
|
||||
K8SResources *K8SResources // input k8s objects
|
||||
Policies []reporthandling.Framework // list of frameworks to scan
|
||||
AllResources map[string]workloadinterface.IMetadata // all scanned resources, map[<rtesource ID>]<resource>
|
||||
ResourcesResult map[string]resourcesresults.Result // resources scan results, map[<rtesource ID>]<resource result>
|
||||
PostureReport *reporthandling.PostureReport // scan results v1 - Remove
|
||||
Report *reporthandlingv2.PostureReport // scan results v2 - Remove
|
||||
Exceptions []armotypes.PostureExceptionPolicy // list of exceptions to apply on scan results
|
||||
RegoInputData RegoInputData // input passed to rgo for scanning. map[<control name>][<input arguments>]
|
||||
K8SResources *K8SResources // input k8s objects
|
||||
ArmoResource *ArmoResources // input ARMO objects
|
||||
Policies []reporthandling.Framework // list of frameworks to scan
|
||||
AllResources map[string]workloadinterface.IMetadata // all scanned resources, map[<rtesource ID>]<resource>
|
||||
ResourcesResult map[string]resourcesresults.Result // resources scan results, map[<rtesource ID>]<resource result>
|
||||
ResourceSource map[string]string // resources sources, map[<rtesource ID>]<resource result>
|
||||
PostureReport *reporthandling.PostureReport // scan results v1 - Remove
|
||||
Report *reporthandlingv2.PostureReport // scan results v2 - Remove
|
||||
Exceptions []armotypes.PostureExceptionPolicy // list of exceptions to apply on scan results
|
||||
RegoInputData RegoInputData // input passed to rgo for scanning. map[<control name>][<input arguments>]
|
||||
Metadata *reporthandlingv2.Metadata
|
||||
InfoMap map[string]apis.StatusInfo // Map errors of resources to StatusInfo
|
||||
ResourceToControlsMap map[string][]string // map[<apigroup/apiversion/resource>] = [<control_IDs>]
|
||||
SessionID string // SessionID
|
||||
}
|
||||
|
||||
func NewOPASessionObj(frameworks []reporthandling.Framework, k8sResources *K8SResources) *OPASessionObj {
|
||||
func NewOPASessionObj(frameworks []reporthandling.Framework, k8sResources *K8SResources, scanInfo *ScanInfo) *OPASessionObj {
|
||||
return &OPASessionObj{
|
||||
Report: &reporthandlingv2.PostureReport{},
|
||||
Policies: frameworks,
|
||||
K8SResources: k8sResources,
|
||||
AllResources: make(map[string]workloadinterface.IMetadata),
|
||||
ResourcesResult: make(map[string]resourcesresults.Result),
|
||||
Report: &reporthandlingv2.PostureReport{},
|
||||
Policies: frameworks,
|
||||
K8SResources: k8sResources,
|
||||
AllResources: make(map[string]workloadinterface.IMetadata),
|
||||
ResourcesResult: make(map[string]resourcesresults.Result),
|
||||
InfoMap: make(map[string]apis.StatusInfo),
|
||||
ResourceToControlsMap: make(map[string][]string),
|
||||
ResourceSource: make(map[string]string),
|
||||
SessionID: scanInfo.ScanID,
|
||||
PostureReport: &reporthandling.PostureReport{
|
||||
ClusterName: ClusterName,
|
||||
CustomerGUID: CustomerGUID,
|
||||
},
|
||||
Metadata: scanInfoToScanMetadata(scanInfo),
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
package cautils
|
||||
|
||||
import (
|
||||
pkgcautils "github.com/armosec/utils-go/utils"
|
||||
"golang.org/x/mod/semver"
|
||||
|
||||
"github.com/armosec/opa-utils/reporthandling"
|
||||
"github.com/armosec/utils-go/boolutils"
|
||||
)
|
||||
|
||||
func NewPolicies() *Policies {
|
||||
@@ -40,7 +40,7 @@ func ruleWithArmoOpaDependency(attributes map[string]interface{}) bool {
|
||||
return false
|
||||
}
|
||||
if s, ok := attributes["armoOpa"]; ok { // TODO - make global
|
||||
return pkgcautils.StringToBool(s.(string))
|
||||
return boolutils.StringToBool(s.(string))
|
||||
}
|
||||
return false
|
||||
}
|
||||
@@ -51,18 +51,16 @@ func ruleWithArmoOpaDependency(attributes map[string]interface{}) bool {
|
||||
func isRuleKubescapeVersionCompatible(attributes map[string]interface{}, version string) bool {
|
||||
if from, ok := attributes["useFromKubescapeVersion"]; ok && from != nil {
|
||||
if version != "" {
|
||||
|
||||
if semver.Compare(from.(string), BuildNumber) > 0 {
|
||||
if semver.Compare(version, from.(string)) == -1 {
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
if until, ok := attributes["useUntilKubescapeVersion"]; ok && until != nil {
|
||||
if version != "" {
|
||||
if semver.Compare(BuildNumber, until.(string)) >= 0 {
|
||||
return false
|
||||
}
|
||||
} else {
|
||||
if version == "" {
|
||||
return false
|
||||
}
|
||||
if semver.Compare(version, until.(string)) >= 0 {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2,10 +2,6 @@ package cautils
|
||||
|
||||
// CA environment vars
|
||||
var (
|
||||
CustomerGUID = ""
|
||||
ClusterName = ""
|
||||
EventReceiverURL = ""
|
||||
NotificationServerURL = ""
|
||||
DashboardBackendURL = ""
|
||||
RestAPIPort = "4001"
|
||||
CustomerGUID = ""
|
||||
ClusterName = ""
|
||||
)
|
||||
|
||||
@@ -9,14 +9,14 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/armosec/k8s-interface/workloadinterface"
|
||||
"github.com/armosec/kubescape/core/cautils/logger"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger"
|
||||
"github.com/armosec/opa-utils/objectsenvelopes"
|
||||
"gopkg.in/yaml.v2"
|
||||
)
|
||||
|
||||
var (
|
||||
YAML_PREFIX = []string{".yaml", ".yml"}
|
||||
JSON_PREFIX = []string{".json"}
|
||||
YAML_PREFIX = []string{"yaml", "yml"}
|
||||
JSON_PREFIX = []string{"json"}
|
||||
)
|
||||
|
||||
type FileFormat string
|
||||
@@ -26,7 +26,7 @@ const (
|
||||
JSON_FILE_FORMAT FileFormat = "json"
|
||||
)
|
||||
|
||||
func LoadResourcesFromFiles(inputPatterns []string) ([]workloadinterface.IMetadata, error) {
|
||||
func LoadResourcesFromFiles(inputPatterns []string) (map[string][]workloadinterface.IMetadata, error) {
|
||||
files, errs := listFiles(inputPatterns)
|
||||
if len(errs) > 0 {
|
||||
logger.L().Error(fmt.Sprintf("%v", errs))
|
||||
@@ -42,8 +42,8 @@ func LoadResourcesFromFiles(inputPatterns []string) ([]workloadinterface.IMetada
|
||||
return workloads, nil
|
||||
}
|
||||
|
||||
func loadFiles(filePaths []string) ([]workloadinterface.IMetadata, []error) {
|
||||
workloads := []workloadinterface.IMetadata{}
|
||||
func loadFiles(filePaths []string) (map[string][]workloadinterface.IMetadata, []error) {
|
||||
workloads := make(map[string][]workloadinterface.IMetadata, 0)
|
||||
errs := []error{}
|
||||
for i := range filePaths {
|
||||
f, err := loadFile(filePaths[i])
|
||||
@@ -54,7 +54,12 @@ func loadFiles(filePaths []string) ([]workloadinterface.IMetadata, []error) {
|
||||
w, e := ReadFile(f, GetFileFormat(filePaths[i]))
|
||||
errs = append(errs, e...)
|
||||
if w != nil {
|
||||
workloads = append(workloads, w...)
|
||||
if _, ok := workloads[filePaths[i]]; !ok {
|
||||
workloads[filePaths[i]] = []workloadinterface.IMetadata{}
|
||||
}
|
||||
wSlice := workloads[filePaths[i]]
|
||||
wSlice = append(wSlice, w...)
|
||||
workloads[filePaths[i]] = wSlice
|
||||
}
|
||||
}
|
||||
return workloads, errs
|
||||
@@ -73,7 +78,6 @@ func ReadFile(fileContent []byte, fileFromat FileFormat) ([]workloadinterface.IM
|
||||
default:
|
||||
return nil, nil // []error{fmt.Errorf("file extension %s not supported", fileFromat)}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func listFiles(patterns []string) ([]string, []error) {
|
||||
@@ -173,11 +177,11 @@ func convertYamlToJson(i interface{}) interface{} {
|
||||
}
|
||||
|
||||
func IsYaml(filePath string) bool {
|
||||
return StringInSlice(YAML_PREFIX, filepath.Ext(filePath)) != ValueNotFound
|
||||
return StringInSlice(YAML_PREFIX, strings.ReplaceAll(filepath.Ext(filePath), ".", "")) != ValueNotFound
|
||||
}
|
||||
|
||||
func IsJson(filePath string) bool {
|
||||
return StringInSlice(JSON_PREFIX, filepath.Ext(filePath)) != ValueNotFound
|
||||
return StringInSlice(JSON_PREFIX, strings.ReplaceAll(filepath.Ext(filePath), ".", "")) != ValueNotFound
|
||||
}
|
||||
|
||||
func glob(root, pattern string) ([]string, error) {
|
||||
|
||||
@@ -23,6 +23,20 @@ func TestListFiles(t *testing.T) {
|
||||
assert.Equal(t, 12, len(files))
|
||||
}
|
||||
|
||||
func TestLoadResourcesFromFiles(t *testing.T) {
|
||||
workloads, err := LoadResourcesFromFiles([]string{onlineBoutiquePath()})
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 12, len(workloads))
|
||||
|
||||
for i, w := range workloads {
|
||||
switch filepath.Base(i) {
|
||||
case "adservice.yaml":
|
||||
assert.Equal(t, 2, len(w))
|
||||
assert.Equal(t, "apps/v1//Deployment/adservice", w[0].GetID())
|
||||
assert.Equal(t, "/v1//Service/adservice", w[1].GetID())
|
||||
}
|
||||
}
|
||||
}
|
||||
func TestLoadFiles(t *testing.T) {
|
||||
files, _ := listFiles([]string{onlineBoutiquePath()})
|
||||
_, err := loadFiles(files)
|
||||
|
||||
18
core/cautils/floatutils.go
Normal file
18
core/cautils/floatutils.go
Normal file
@@ -0,0 +1,18 @@
|
||||
package cautils
|
||||
|
||||
import "math"
|
||||
|
||||
// Float64ToInt convert float64 to int
|
||||
func Float64ToInt(x float64) int {
|
||||
return int(math.Round(x))
|
||||
}
|
||||
|
||||
// Float32ToInt convert float32 to int
|
||||
func Float32ToInt(x float32) int {
|
||||
return Float64ToInt(float64(x))
|
||||
}
|
||||
|
||||
// Float16ToInt convert float16 to int
|
||||
func Float16ToInt(x float32) int {
|
||||
return Float64ToInt(float64(x))
|
||||
}
|
||||
24
core/cautils/floatutils_test.go
Normal file
24
core/cautils/floatutils_test.go
Normal file
@@ -0,0 +1,24 @@
|
||||
package cautils
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestFloat64ToInt(t *testing.T) {
|
||||
assert.Equal(t, 3, Float64ToInt(3.49))
|
||||
assert.Equal(t, 4, Float64ToInt(3.5))
|
||||
assert.Equal(t, 4, Float64ToInt(3.51))
|
||||
}
|
||||
|
||||
func TestFloat32ToInt(t *testing.T) {
|
||||
assert.Equal(t, 3, Float32ToInt(3.49))
|
||||
assert.Equal(t, 4, Float32ToInt(3.5))
|
||||
assert.Equal(t, 4, Float32ToInt(3.51))
|
||||
}
|
||||
func TestFloat16ToInt(t *testing.T) {
|
||||
assert.Equal(t, 3, Float16ToInt(3.49))
|
||||
assert.Equal(t, 4, Float16ToInt(3.5))
|
||||
assert.Equal(t, 4, Float16ToInt(3.51))
|
||||
}
|
||||
@@ -10,8 +10,8 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/armosec/armoapi-go/armotypes"
|
||||
"github.com/armosec/kubescape/core/cautils/logger"
|
||||
"github.com/armosec/kubescape/core/cautils/logger/helpers"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger/helpers"
|
||||
"github.com/armosec/opa-utils/reporthandling"
|
||||
)
|
||||
|
||||
@@ -163,7 +163,6 @@ func (armoAPI *ArmoAPI) GetFramework(name string) (*reporthandling.Framework, er
|
||||
if err = JSONDecoder(respStr).Decode(framework); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
SaveInFile(framework, GetDefaultPath(name+".json"))
|
||||
|
||||
return framework, err
|
||||
}
|
||||
@@ -233,7 +232,14 @@ func (armoAPI *ArmoAPI) GetAccountConfig(clusterName string) (*armotypes.Custome
|
||||
}
|
||||
|
||||
if err = JSONDecoder(respStr).Decode(&accountConfig); err != nil {
|
||||
return nil, err
|
||||
// try with default scope
|
||||
respStr, err = armoAPI.Get(armoAPI.getAccountConfigDefault(clusterName), nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err = JSONDecoder(respStr).Decode(&accountConfig); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return accountConfig, nil
|
||||
|
||||
@@ -73,6 +73,12 @@ func (armoAPI *ArmoAPI) exceptionsURL(exceptionsPolicyName string) string {
|
||||
return u.String()
|
||||
}
|
||||
|
||||
func (armoAPI *ArmoAPI) getAccountConfigDefault(clusterName string) string {
|
||||
config := armoAPI.getAccountConfig(clusterName)
|
||||
url := config + "&scope=customer"
|
||||
return url
|
||||
}
|
||||
|
||||
func (armoAPI *ArmoAPI) getAccountConfig(clusterName string) string {
|
||||
u := url.URL{}
|
||||
u.Scheme = "https"
|
||||
|
||||
@@ -4,10 +4,10 @@ import (
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/armosec/kubescape/core/cautils/logger/helpers"
|
||||
"github.com/armosec/kubescape/core/cautils/logger/nonelogger"
|
||||
"github.com/armosec/kubescape/core/cautils/logger/prettylogger"
|
||||
"github.com/armosec/kubescape/core/cautils/logger/zaplogger"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger/helpers"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger/nonelogger"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger/prettylogger"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger/zaplogger"
|
||||
)
|
||||
|
||||
type ILogger interface {
|
||||
|
||||
@@ -3,7 +3,7 @@ package nonelogger
|
||||
import (
|
||||
"os"
|
||||
|
||||
"github.com/armosec/kubescape/core/cautils/logger/helpers"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger/helpers"
|
||||
)
|
||||
|
||||
const LoggerName string = "none"
|
||||
|
||||
@@ -3,7 +3,7 @@ package prettylogger
|
||||
import (
|
||||
"io"
|
||||
|
||||
"github.com/armosec/kubescape/core/cautils/logger/helpers"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger/helpers"
|
||||
"github.com/fatih/color"
|
||||
)
|
||||
|
||||
|
||||
@@ -5,7 +5,7 @@ import (
|
||||
"os"
|
||||
"sync"
|
||||
|
||||
"github.com/armosec/kubescape/core/cautils/logger/helpers"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger/helpers"
|
||||
)
|
||||
|
||||
const LoggerName string = "pretty"
|
||||
|
||||
@@ -3,7 +3,7 @@ package zaplogger
|
||||
import (
|
||||
"os"
|
||||
|
||||
"github.com/armosec/kubescape/core/cautils/logger/helpers"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger/helpers"
|
||||
"go.uber.org/zap"
|
||||
"go.uber.org/zap/zapcore"
|
||||
)
|
||||
|
||||
@@ -11,8 +11,7 @@ func ReportV2ToV1(opaSessionObj *OPASessionObj) {
|
||||
if len(opaSessionObj.PostureReport.FrameworkReports) > 0 {
|
||||
return // report already converted
|
||||
}
|
||||
|
||||
opaSessionObj.PostureReport.ClusterCloudProvider = opaSessionObj.Report.ClusterCloudProvider
|
||||
// opaSessionObj.PostureReport.ClusterCloudProvider = opaSessionObj.Report.ClusterCloudProvider
|
||||
|
||||
frameworks := []reporthandling.FrameworkReport{}
|
||||
|
||||
|
||||
89
core/cautils/rootinfo.go
Normal file
89
core/cautils/rootinfo.go
Normal file
@@ -0,0 +1,89 @@
|
||||
package cautils
|
||||
|
||||
type RootInfo struct {
|
||||
Logger string // logger level
|
||||
LoggerName string // logger name ("pretty"/"zap"/"none")
|
||||
CacheDir string // cached dir
|
||||
DisableColor bool // Disable Color
|
||||
|
||||
ArmoBEURLs string // armo url
|
||||
ArmoBEURLsDep string // armo url
|
||||
}
|
||||
|
||||
// func (rootInfo *RootInfo) InitLogger() {
|
||||
// logger.DisableColor(rootInfo.DisableColor)
|
||||
|
||||
// if rootInfo.LoggerName == "" {
|
||||
// if l := os.Getenv("KS_LOGGER_NAME"); l != "" {
|
||||
// rootInfo.LoggerName = l
|
||||
// } else {
|
||||
// if isatty.IsTerminal(os.Stdout.Fd()) {
|
||||
// rootInfo.LoggerName = "pretty"
|
||||
// } else {
|
||||
// rootInfo.LoggerName = "zap"
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
|
||||
// logger.InitLogger(rootInfo.LoggerName)
|
||||
|
||||
// }
|
||||
// func (rootInfo *RootInfo) InitLoggerLevel() error {
|
||||
// if rootInfo.Logger == helpers.InfoLevel.String() {
|
||||
// } else if l := os.Getenv("KS_LOGGER"); l != "" {
|
||||
// rootInfo.Logger = l
|
||||
// }
|
||||
|
||||
// if err := logger.L().SetLevel(rootInfo.Logger); err != nil {
|
||||
// return fmt.Errorf("supported levels: %s", strings.Join(helpers.SupportedLevels(), "/"))
|
||||
// }
|
||||
// return nil
|
||||
// }
|
||||
|
||||
// func (rootInfo *RootInfo) InitCacheDir() error {
|
||||
// if rootInfo.CacheDir == getter.DefaultLocalStore {
|
||||
// getter.DefaultLocalStore = rootInfo.CacheDir
|
||||
// } else if cacheDir := os.Getenv("KS_CACHE_DIR"); cacheDir != "" {
|
||||
// getter.DefaultLocalStore = cacheDir
|
||||
// } else {
|
||||
// return nil // using default cache dir location
|
||||
// }
|
||||
|
||||
// // TODO create dir if not found exist
|
||||
// // logger.L().Debug("cache dir updated", helpers.String("path", getter.DefaultLocalStore))
|
||||
// return nil
|
||||
// }
|
||||
// func (rootInfo *RootInfo) InitEnvironment() error {
|
||||
|
||||
// urlSlices := strings.Split(rootInfo.ArmoBEURLs, ",")
|
||||
// if len(urlSlices) != 1 && len(urlSlices) < 3 {
|
||||
// return fmt.Errorf("expected at least 2 URLs (report,api,frontend,auth)")
|
||||
// }
|
||||
// switch len(urlSlices) {
|
||||
// case 1:
|
||||
// switch urlSlices[0] {
|
||||
// case "dev", "development":
|
||||
// getter.SetARMOAPIConnector(getter.NewARMOAPIDev())
|
||||
// case "stage", "staging":
|
||||
// getter.SetARMOAPIConnector(getter.NewARMOAPIStaging())
|
||||
// case "":
|
||||
// getter.SetARMOAPIConnector(getter.NewARMOAPIProd())
|
||||
// default:
|
||||
// return fmt.Errorf("unknown environment")
|
||||
// }
|
||||
// case 2:
|
||||
// armoERURL := urlSlices[0] // mandatory
|
||||
// armoBEURL := urlSlices[1] // mandatory
|
||||
// getter.SetARMOAPIConnector(getter.NewARMOAPICustomized(armoERURL, armoBEURL, "", ""))
|
||||
// case 3, 4:
|
||||
// var armoAUTHURL string
|
||||
// armoERURL := urlSlices[0] // mandatory
|
||||
// armoBEURL := urlSlices[1] // mandatory
|
||||
// armoFEURL := urlSlices[2] // mandatory
|
||||
// if len(urlSlices) <= 4 {
|
||||
// armoAUTHURL = urlSlices[3]
|
||||
// }
|
||||
// getter.SetARMOAPIConnector(getter.NewARMOAPICustomized(armoERURL, armoBEURL, armoFEURL, armoAUTHURL))
|
||||
// }
|
||||
// return nil
|
||||
// }
|
||||
@@ -8,10 +8,16 @@ import (
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/armosec/kubescape/core/cautils/getter"
|
||||
"github.com/armosec/kubescape/core/cautils/logger"
|
||||
"github.com/armosec/kubescape/core/cautils/logger/helpers"
|
||||
apisv1 "github.com/armosec/opa-utils/httpserver/apis/v1"
|
||||
|
||||
giturl "github.com/armosec/go-git-url"
|
||||
"github.com/armosec/k8s-interface/k8sinterface"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/getter"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger/helpers"
|
||||
"github.com/armosec/opa-utils/reporthandling"
|
||||
reporthandlingv2 "github.com/armosec/opa-utils/reporthandling/v2"
|
||||
"github.com/google/uuid"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -25,6 +31,10 @@ type BoolPtrFlag struct {
|
||||
valPtr *bool
|
||||
}
|
||||
|
||||
func NewBoolPtr(b *bool) BoolPtrFlag {
|
||||
return BoolPtrFlag{valPtr: b}
|
||||
}
|
||||
|
||||
func (bpf *BoolPtrFlag) Type() string {
|
||||
return "bool"
|
||||
}
|
||||
@@ -38,6 +48,12 @@ func (bpf *BoolPtrFlag) String() string {
|
||||
func (bpf *BoolPtrFlag) Get() *bool {
|
||||
return bpf.valPtr
|
||||
}
|
||||
func (bpf *BoolPtrFlag) GetBool() bool {
|
||||
if bpf.valPtr == nil {
|
||||
return false
|
||||
}
|
||||
return *bpf.valPtr
|
||||
}
|
||||
|
||||
func (bpf *BoolPtrFlag) SetBool(val bool) {
|
||||
bpf.valPtr = &val
|
||||
@@ -53,13 +69,6 @@ func (bpf *BoolPtrFlag) Set(val string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
type RootInfo struct {
|
||||
Logger string // logger level
|
||||
LoggerName string // logger name ("pretty"/"zap"/"none")
|
||||
CacheDir string // cached dir
|
||||
DisableColor bool // Disable Color
|
||||
}
|
||||
|
||||
// TODO - UPDATE
|
||||
type ScanInfo struct {
|
||||
Getters // TODO - remove from object
|
||||
@@ -79,7 +88,7 @@ type ScanInfo struct {
|
||||
Silent bool // Silent mode - Do not print progress logs
|
||||
FailThreshold float32 // Failure score threshold
|
||||
Submit bool // Submit results to Armo BE
|
||||
ReportID string // Report id of the current scan
|
||||
ScanID string // Report id of the current scan
|
||||
HostSensorEnabled BoolPtrFlag // Deploy ARMO K8s host scanner to collect data from certain controls
|
||||
HostSensorYamlPath string // Path to hostsensor file
|
||||
Local bool // Do not submit results
|
||||
@@ -99,6 +108,10 @@ func (scanInfo *ScanInfo) Init() {
|
||||
scanInfo.setUseFrom()
|
||||
scanInfo.setOutputFile()
|
||||
scanInfo.setUseArtifactsFrom()
|
||||
if scanInfo.ScanID == "" {
|
||||
scanInfo.ScanID = uuid.NewString()
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func (scanInfo *ScanInfo) setUseArtifactsFrom() {
|
||||
@@ -133,15 +146,6 @@ func (scanInfo *ScanInfo) setUseArtifactsFrom() {
|
||||
scanInfo.UseExceptions = filepath.Join(scanInfo.UseArtifactsFrom, localExceptionsFilename)
|
||||
}
|
||||
|
||||
func (scanInfo *ScanInfo) setUseExceptions() {
|
||||
if scanInfo.UseExceptions != "" {
|
||||
// load exceptions from file
|
||||
scanInfo.ExceptionsGetter = getter.NewLoadPolicy([]string{scanInfo.UseExceptions})
|
||||
} else {
|
||||
scanInfo.ExceptionsGetter = getter.GetArmoAPIConnector()
|
||||
}
|
||||
}
|
||||
|
||||
func (scanInfo *ScanInfo) setUseFrom() {
|
||||
if scanInfo.UseDefault {
|
||||
for _, policy := range scanInfo.PolicyIdentifier {
|
||||
@@ -178,11 +182,11 @@ func (scanInfo *ScanInfo) GetScanningEnvironment() string {
|
||||
return ScanCluster
|
||||
}
|
||||
|
||||
func (scanInfo *ScanInfo) SetPolicyIdentifiers(policies []string, kind reporthandling.NotificationPolicyKind) {
|
||||
func (scanInfo *ScanInfo) SetPolicyIdentifiers(policies []string, kind apisv1.NotificationPolicyKind) {
|
||||
for _, policy := range policies {
|
||||
if !scanInfo.contains(policy) {
|
||||
newPolicy := reporthandling.PolicyIdentifier{}
|
||||
newPolicy.Kind = kind // reporthandling.KindFramework
|
||||
newPolicy.Kind = reporthandling.NotificationPolicyKind(kind) // reporthandling.KindFramework
|
||||
newPolicy.Name = policy
|
||||
scanInfo.PolicyIdentifier = append(scanInfo.PolicyIdentifier, newPolicy)
|
||||
}
|
||||
@@ -197,3 +201,103 @@ func (scanInfo *ScanInfo) contains(policyName string) bool {
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func scanInfoToScanMetadata(scanInfo *ScanInfo) *reporthandlingv2.Metadata {
|
||||
metadata := &reporthandlingv2.Metadata{}
|
||||
|
||||
metadata.ScanMetadata.Format = scanInfo.Format
|
||||
metadata.ScanMetadata.FormatVersion = scanInfo.FormatVersion
|
||||
metadata.ScanMetadata.Submit = scanInfo.Submit
|
||||
|
||||
// TODO - Add excluded and included namespaces
|
||||
// if len(scanInfo.ExcludedNamespaces) > 1 {
|
||||
// opaSessionObj.Metadata.ScanMetadata.ExcludedNamespaces = strings.Split(scanInfo.ExcludedNamespaces[1:], ",")
|
||||
// }
|
||||
// if len(scanInfo.IncludeNamespaces) > 1 {
|
||||
// opaSessionObj.Metadata.ScanMetadata.IncludeNamespaces = strings.Split(scanInfo.IncludeNamespaces[1:], ",")
|
||||
// }
|
||||
|
||||
// scan type
|
||||
if len(scanInfo.PolicyIdentifier) > 0 {
|
||||
metadata.ScanMetadata.TargetType = string(scanInfo.PolicyIdentifier[0].Kind)
|
||||
}
|
||||
// append frameworks
|
||||
for _, policy := range scanInfo.PolicyIdentifier {
|
||||
metadata.ScanMetadata.TargetNames = append(metadata.ScanMetadata.TargetNames, policy.Name)
|
||||
}
|
||||
|
||||
metadata.ScanMetadata.KubescapeVersion = BuildNumber
|
||||
metadata.ScanMetadata.VerboseMode = scanInfo.VerboseMode
|
||||
metadata.ScanMetadata.FailThreshold = scanInfo.FailThreshold
|
||||
metadata.ScanMetadata.HostScanner = scanInfo.HostSensorEnabled.GetBool()
|
||||
metadata.ScanMetadata.VerboseMode = scanInfo.VerboseMode
|
||||
metadata.ScanMetadata.ControlsInputs = scanInfo.ControlsInputs
|
||||
|
||||
metadata.ScanMetadata.ScanningTarget = reporthandlingv2.Cluster
|
||||
if scanInfo.GetScanningEnvironment() == ScanLocalFiles {
|
||||
metadata.ScanMetadata.ScanningTarget = reporthandlingv2.File
|
||||
}
|
||||
|
||||
inputFiles := ""
|
||||
if len(scanInfo.InputPatterns) > 0 {
|
||||
inputFiles = scanInfo.InputPatterns[0]
|
||||
}
|
||||
setContextMetadata(&metadata.ContextMetadata, inputFiles)
|
||||
|
||||
return metadata
|
||||
}
|
||||
|
||||
func setContextMetadata(contextMetadata *reporthandlingv2.ContextMetadata, input string) {
|
||||
// cluster
|
||||
if input == "" {
|
||||
contextMetadata.ClusterContextMetadata = &reporthandlingv2.ClusterMetadata{
|
||||
ContextName: k8sinterface.GetContextName(),
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// url
|
||||
if gitParser, err := giturl.NewGitURL(input); err == nil {
|
||||
if gitParser.GetBranch() == "" {
|
||||
gitParser.SetDefaultBranch()
|
||||
}
|
||||
contextMetadata.RepoContextMetadata = &reporthandlingv2.RepoContextMetadata{
|
||||
Repo: gitParser.GetRepo(),
|
||||
Owner: gitParser.GetOwner(),
|
||||
Branch: gitParser.GetBranch(),
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
if !filepath.IsAbs(input) {
|
||||
if o, err := os.Getwd(); err == nil {
|
||||
input = filepath.Join(o, input)
|
||||
}
|
||||
}
|
||||
|
||||
// single file
|
||||
if IsFile(input) {
|
||||
contextMetadata.FileContextMetadata = &reporthandlingv2.FileContextMetadata{
|
||||
FilePath: input,
|
||||
HostName: getHostname(),
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// dir/glob
|
||||
if !IsFile(input) {
|
||||
contextMetadata.DirectoryContextMetadata = &reporthandlingv2.DirectoryContextMetadata{
|
||||
BasePath: input,
|
||||
HostName: getHostname(),
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func getHostname() string {
|
||||
if h, e := os.Hostname(); e == nil {
|
||||
return h
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
67
core/cautils/scaninfo_test.go
Normal file
67
core/cautils/scaninfo_test.go
Normal file
@@ -0,0 +1,67 @@
|
||||
package cautils
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
reporthandlingv2 "github.com/armosec/opa-utils/reporthandling/v2"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestSetContextMetadata(t *testing.T) {
|
||||
{
|
||||
ctx := reporthandlingv2.ContextMetadata{}
|
||||
setContextMetadata(&ctx, "")
|
||||
|
||||
assert.NotNil(t, ctx.ClusterContextMetadata)
|
||||
assert.Nil(t, ctx.DirectoryContextMetadata)
|
||||
assert.Nil(t, ctx.FileContextMetadata)
|
||||
assert.Nil(t, ctx.HelmContextMetadata)
|
||||
assert.Nil(t, ctx.RepoContextMetadata)
|
||||
}
|
||||
{
|
||||
ctx := reporthandlingv2.ContextMetadata{}
|
||||
setContextMetadata(&ctx, "file")
|
||||
|
||||
assert.Nil(t, ctx.ClusterContextMetadata)
|
||||
assert.NotNil(t, ctx.DirectoryContextMetadata)
|
||||
assert.Nil(t, ctx.FileContextMetadata)
|
||||
assert.Nil(t, ctx.HelmContextMetadata)
|
||||
assert.Nil(t, ctx.RepoContextMetadata)
|
||||
|
||||
hostName := getHostname()
|
||||
assert.Contains(t, ctx.DirectoryContextMetadata.BasePath, "file")
|
||||
assert.Equal(t, hostName, ctx.DirectoryContextMetadata.HostName)
|
||||
}
|
||||
{
|
||||
ctx := reporthandlingv2.ContextMetadata{}
|
||||
setContextMetadata(&ctx, "scaninfo_test.go")
|
||||
|
||||
assert.Nil(t, ctx.ClusterContextMetadata)
|
||||
assert.Nil(t, ctx.DirectoryContextMetadata)
|
||||
assert.NotNil(t, ctx.FileContextMetadata)
|
||||
assert.Nil(t, ctx.HelmContextMetadata)
|
||||
assert.Nil(t, ctx.RepoContextMetadata)
|
||||
|
||||
hostName := getHostname()
|
||||
assert.Contains(t, ctx.FileContextMetadata.FilePath, "scaninfo_test.go")
|
||||
assert.Equal(t, hostName, ctx.FileContextMetadata.HostName)
|
||||
}
|
||||
{
|
||||
ctx := reporthandlingv2.ContextMetadata{}
|
||||
setContextMetadata(&ctx, "https://github.com/armosec/kubescape")
|
||||
|
||||
assert.Nil(t, ctx.ClusterContextMetadata)
|
||||
assert.Nil(t, ctx.DirectoryContextMetadata)
|
||||
assert.Nil(t, ctx.FileContextMetadata)
|
||||
assert.Nil(t, ctx.HelmContextMetadata)
|
||||
assert.NotNil(t, ctx.RepoContextMetadata)
|
||||
|
||||
assert.Equal(t, "kubescape", ctx.RepoContextMetadata.Repo)
|
||||
assert.Equal(t, "armosec", ctx.RepoContextMetadata.Owner)
|
||||
assert.Equal(t, "master", ctx.RepoContextMetadata.Branch)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetHostname(t *testing.T) {
|
||||
assert.NotEqual(t, "", getHostname())
|
||||
}
|
||||
@@ -6,14 +6,15 @@ import (
|
||||
"net/http"
|
||||
"os"
|
||||
|
||||
"github.com/armosec/kubescape/core/cautils/getter"
|
||||
"github.com/armosec/kubescape/core/cautils/logger"
|
||||
"github.com/armosec/kubescape/core/cautils/logger/helpers"
|
||||
pkgutils "github.com/armosec/utils-go/utils"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/getter"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger/helpers"
|
||||
"github.com/armosec/utils-go/boolutils"
|
||||
"golang.org/x/mod/semver"
|
||||
)
|
||||
|
||||
const SKIP_VERSION_CHECK = "KUBESCAPE_SKIP_UPDATE_CHECK"
|
||||
const SKIP_VERSION_CHECK_DEPRECATED = "KUBESCAPE_SKIP_UPDATE_CHECK"
|
||||
const SKIP_VERSION_CHECK = "KS_SKIP_UPDATE_CHECK"
|
||||
|
||||
var BuildNumber string
|
||||
|
||||
@@ -27,7 +28,9 @@ func NewIVersionCheckHandler() IVersionCheckHandler {
|
||||
if BuildNumber == "" {
|
||||
logger.L().Warning("unknown build number, this might affect your scan results. Please make sure you are updated to latest version")
|
||||
}
|
||||
if v, ok := os.LookupEnv(SKIP_VERSION_CHECK); ok && pkgutils.StringToBool(v) {
|
||||
if v, ok := os.LookupEnv(SKIP_VERSION_CHECK); ok && boolutils.StringToBool(v) {
|
||||
return NewVersionCheckHandlerMock()
|
||||
} else if v, ok := os.LookupEnv(SKIP_VERSION_CHECK_DEPRECATED); ok && boolutils.StringToBool(v) {
|
||||
return NewVersionCheckHandlerMock()
|
||||
}
|
||||
return NewVersionCheckHandler()
|
||||
@@ -98,7 +101,7 @@ func (v *VersionCheckHandler) CheckLatestVersion(versionData *VersionCheckReques
|
||||
}
|
||||
|
||||
if latestVersion.ClientUpdate != "" {
|
||||
if BuildNumber != "" && semver.Compare(BuildNumber, latestVersion.ClientUpdate) >= 0 {
|
||||
if BuildNumber != "" && semver.Compare(BuildNumber, latestVersion.ClientUpdate) == -1 {
|
||||
logger.L().Warning(warningMessage(latestVersion.ClientUpdate))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"github.com/armosec/armoapi-go/armotypes"
|
||||
"github.com/armosec/opa-utils/reporthandling"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"golang.org/x/mod/semver"
|
||||
)
|
||||
|
||||
func TestGetKubernetesObjects(t *testing.T) {
|
||||
@@ -30,9 +31,38 @@ func TestIsRuleKubescapeVersionCompatible(t *testing.T) {
|
||||
assert.True(t, isRuleKubescapeVersionCompatible(rule_v1_0_134.Attributes, buildNumberMock))
|
||||
|
||||
// should only use rules that version is in range of use
|
||||
buildNumberMock = "v1.0.133"
|
||||
buildNumberMock = "v1.0.130"
|
||||
assert.True(t, isRuleKubescapeVersionCompatible(rule_v1_0_131.Attributes, buildNumberMock))
|
||||
assert.False(t, isRuleKubescapeVersionCompatible(rule_v1_0_132.Attributes, buildNumberMock))
|
||||
assert.False(t, isRuleKubescapeVersionCompatible(rule_v1_0_133.Attributes, buildNumberMock))
|
||||
assert.False(t, isRuleKubescapeVersionCompatible(rule_v1_0_134.Attributes, buildNumberMock))
|
||||
|
||||
// should only use rules that version is in range of use
|
||||
buildNumberMock = "v1.0.132"
|
||||
assert.False(t, isRuleKubescapeVersionCompatible(rule_v1_0_131.Attributes, buildNumberMock))
|
||||
assert.True(t, isRuleKubescapeVersionCompatible(rule_v1_0_132.Attributes, buildNumberMock))
|
||||
assert.False(t, isRuleKubescapeVersionCompatible(rule_v1_0_133.Attributes, buildNumberMock))
|
||||
assert.False(t, isRuleKubescapeVersionCompatible(rule_v1_0_134.Attributes, buildNumberMock))
|
||||
|
||||
// should only use rules that version is in range of use
|
||||
buildNumberMock = "v1.0.133"
|
||||
assert.False(t, isRuleKubescapeVersionCompatible(rule_v1_0_131.Attributes, buildNumberMock))
|
||||
assert.False(t, isRuleKubescapeVersionCompatible(rule_v1_0_132.Attributes, buildNumberMock))
|
||||
assert.True(t, isRuleKubescapeVersionCompatible(rule_v1_0_133.Attributes, buildNumberMock))
|
||||
assert.False(t, isRuleKubescapeVersionCompatible(rule_v1_0_134.Attributes, buildNumberMock))
|
||||
|
||||
// should only use rules that version is in range of use
|
||||
buildNumberMock = "v1.0.135"
|
||||
assert.False(t, isRuleKubescapeVersionCompatible(rule_v1_0_131.Attributes, buildNumberMock))
|
||||
assert.False(t, isRuleKubescapeVersionCompatible(rule_v1_0_132.Attributes, buildNumberMock))
|
||||
assert.False(t, isRuleKubescapeVersionCompatible(rule_v1_0_133.Attributes, buildNumberMock))
|
||||
assert.True(t, isRuleKubescapeVersionCompatible(rule_v1_0_134.Attributes, buildNumberMock))
|
||||
}
|
||||
|
||||
func TestCheckLatestVersion(t *testing.T) {
|
||||
assert.Equal(t, -1, semver.Compare("v2.0.150", "v2.0.151"))
|
||||
assert.Equal(t, 0, semver.Compare("v2.0.150", "v2.0.150"))
|
||||
assert.Equal(t, 1, semver.Compare("v2.0.150", "v2.0.149"))
|
||||
assert.Equal(t, -1, semver.Compare("v2.0.150", "v3.0.150"))
|
||||
|
||||
}
|
||||
|
||||
52
core/cautils/workloadmappingutils.go
Normal file
52
core/cautils/workloadmappingutils.go
Normal file
@@ -0,0 +1,52 @@
|
||||
package cautils
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/armosec/opa-utils/reporthandling/apis"
|
||||
)
|
||||
|
||||
var (
|
||||
ImageVulnResources = []string{"ImageVulnerabilities"}
|
||||
HostSensorResources = []string{"KubeletConfiguration",
|
||||
"KubeletCommandLine",
|
||||
"OsReleaseFile",
|
||||
"KernelVersion",
|
||||
"LinuxSecurityHardeningStatus",
|
||||
"OpenPortsList",
|
||||
"LinuxKernelVariables"}
|
||||
CloudResources = []string{"ClusterDescribe"}
|
||||
)
|
||||
|
||||
func MapArmoResource(armoResourceMap *ArmoResources, resources []string) []string {
|
||||
var hostResources []string
|
||||
for k := range *armoResourceMap {
|
||||
for _, resource := range resources {
|
||||
if strings.Contains(k, resource) {
|
||||
hostResources = append(hostResources, k)
|
||||
}
|
||||
}
|
||||
}
|
||||
return hostResources
|
||||
}
|
||||
|
||||
func MapHostResources(armoResourceMap *ArmoResources) []string {
|
||||
return MapArmoResource(armoResourceMap, HostSensorResources)
|
||||
}
|
||||
|
||||
func MapImageVulnResources(armoResourceMap *ArmoResources) []string {
|
||||
return MapArmoResource(armoResourceMap, ImageVulnResources)
|
||||
}
|
||||
|
||||
func MapCloudResources(armoResourceMap *ArmoResources) []string {
|
||||
return MapArmoResource(armoResourceMap, CloudResources)
|
||||
}
|
||||
|
||||
func SetInfoMapForResources(info string, resources []string, errorMap map[string]apis.StatusInfo) {
|
||||
for _, resource := range resources {
|
||||
errorMap[resource] = apis.StatusInfo{
|
||||
InnerInfo: info,
|
||||
InnerStatus: apis.StatusSkipped,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -3,7 +3,7 @@ package core
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
metav1 "github.com/armosec/kubescape/core/meta/datastructures/v1"
|
||||
metav1 "github.com/armosec/kubescape/v2/core/meta/datastructures/v1"
|
||||
)
|
||||
|
||||
func (ks *Kubescape) SetCachedConfig(setConfig *metav1.SetConfig) error {
|
||||
|
||||
@@ -3,10 +3,10 @@ package core
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/armosec/kubescape/core/cautils/getter"
|
||||
"github.com/armosec/kubescape/core/cautils/logger"
|
||||
"github.com/armosec/kubescape/core/cautils/logger/helpers"
|
||||
v1 "github.com/armosec/kubescape/core/meta/datastructures/v1"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/getter"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger/helpers"
|
||||
v1 "github.com/armosec/kubescape/v2/core/meta/datastructures/v1"
|
||||
)
|
||||
|
||||
func (ks *Kubescape) DeleteExceptions(delExceptions *v1.DeleteExceptions) error {
|
||||
|
||||
@@ -2,14 +2,15 @@ package core
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/armosec/armoapi-go/armotypes"
|
||||
"github.com/armosec/kubescape/core/cautils/getter"
|
||||
"github.com/armosec/kubescape/core/cautils/logger"
|
||||
"github.com/armosec/kubescape/core/cautils/logger/helpers"
|
||||
metav1 "github.com/armosec/kubescape/core/meta/datastructures/v1"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/getter"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger/helpers"
|
||||
metav1 "github.com/armosec/kubescape/v2/core/meta/datastructures/v1"
|
||||
)
|
||||
|
||||
var downloadFunc = map[string]func(*metav1.DownloadInfo) error{
|
||||
@@ -30,6 +31,9 @@ func DownloadSupportCommands() []string {
|
||||
|
||||
func (ks *Kubescape) Download(downloadInfo *metav1.DownloadInfo) error {
|
||||
setPathandFilename(downloadInfo)
|
||||
if err := os.MkdirAll(downloadInfo.Path, os.ModePerm); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := downloadArtifact(downloadInfo, downloadFunc); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -79,13 +83,16 @@ func downloadConfigInputs(downloadInfo *metav1.DownloadInfo) error {
|
||||
tenant := getTenantConfig(downloadInfo.Account, "", getKubernetesApi())
|
||||
|
||||
controlsInputsGetter := getConfigInputsGetter(downloadInfo.Name, tenant.GetAccountID(), nil)
|
||||
controlInputs, err := controlsInputsGetter.GetControlsInputs(tenant.GetClusterName())
|
||||
controlInputs, err := controlsInputsGetter.GetControlsInputs(tenant.GetContextName())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if downloadInfo.FileName == "" {
|
||||
downloadInfo.FileName = fmt.Sprintf("%s.json", downloadInfo.Target)
|
||||
}
|
||||
if controlInputs == nil {
|
||||
return fmt.Errorf("failed to download controlInputs - received an empty objects")
|
||||
}
|
||||
// save in file
|
||||
err = getter.SaveInFile(controlInputs, filepath.Join(downloadInfo.Path, downloadInfo.FileName))
|
||||
if err != nil {
|
||||
@@ -102,7 +109,7 @@ func downloadExceptions(downloadInfo *metav1.DownloadInfo) error {
|
||||
exceptionsGetter := getExceptionsGetter("")
|
||||
exceptions := []armotypes.PostureExceptionPolicy{}
|
||||
if tenant.GetAccountID() != "" {
|
||||
exceptions, err = exceptionsGetter.GetExceptions(tenant.GetClusterName())
|
||||
exceptions, err = exceptionsGetter.GetExceptions(tenant.GetContextName())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -123,7 +130,7 @@ func downloadFramework(downloadInfo *metav1.DownloadInfo) error {
|
||||
|
||||
tenant := getTenantConfig(downloadInfo.Account, "", getKubernetesApi())
|
||||
|
||||
g := getPolicyGetter(nil, tenant.GetAccountID(), true, nil)
|
||||
g := getPolicyGetter(nil, tenant.GetTennatEmail(), true, nil)
|
||||
|
||||
if downloadInfo.Name == "" {
|
||||
// if framework name not specified - download all frameworks
|
||||
@@ -148,6 +155,9 @@ func downloadFramework(downloadInfo *metav1.DownloadInfo) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if framework == nil {
|
||||
return fmt.Errorf("failed to download framework - received an empty objects")
|
||||
}
|
||||
downloadTo := filepath.Join(downloadInfo.Path, downloadInfo.FileName)
|
||||
err = getter.SaveInFile(framework, downloadTo)
|
||||
if err != nil {
|
||||
@@ -162,7 +172,7 @@ func downloadControl(downloadInfo *metav1.DownloadInfo) error {
|
||||
|
||||
tenant := getTenantConfig(downloadInfo.Account, "", getKubernetesApi())
|
||||
|
||||
g := getPolicyGetter(nil, tenant.GetAccountID(), false, nil)
|
||||
g := getPolicyGetter(nil, tenant.GetTennatEmail(), false, nil)
|
||||
|
||||
if downloadInfo.Name == "" {
|
||||
// TODO - support
|
||||
@@ -175,6 +185,9 @@ func downloadControl(downloadInfo *metav1.DownloadInfo) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if controls == nil {
|
||||
return fmt.Errorf("failed to download control - received an empty objects")
|
||||
}
|
||||
downloadTo := filepath.Join(downloadInfo.Path, downloadInfo.FileName)
|
||||
err = getter.SaveInFile(controls, downloadTo)
|
||||
if err != nil {
|
||||
|
||||
@@ -2,17 +2,16 @@ package core
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/armosec/k8s-interface/k8sinterface"
|
||||
"github.com/armosec/kubescape/core/cautils"
|
||||
"github.com/armosec/kubescape/core/cautils/getter"
|
||||
"github.com/armosec/kubescape/core/cautils/logger"
|
||||
"github.com/armosec/kubescape/core/cautils/logger/helpers"
|
||||
"github.com/armosec/kubescape/core/pkg/hostsensorutils"
|
||||
"github.com/armosec/kubescape/core/pkg/resourcehandler"
|
||||
"github.com/armosec/kubescape/core/pkg/resultshandling/reporter"
|
||||
reporterv2 "github.com/armosec/kubescape/core/pkg/resultshandling/reporter/v2"
|
||||
"github.com/armosec/kubescape/v2/core/cautils"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/getter"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger/helpers"
|
||||
"github.com/armosec/kubescape/v2/core/pkg/hostsensorutils"
|
||||
"github.com/armosec/kubescape/v2/core/pkg/resourcehandler"
|
||||
"github.com/armosec/kubescape/v2/core/pkg/resultshandling/reporter"
|
||||
reporterv2 "github.com/armosec/kubescape/v2/core/pkg/resultshandling/reporter/v2"
|
||||
|
||||
"github.com/armosec/opa-utils/reporthandling"
|
||||
"github.com/armosec/rbac-utils/rbacscanner"
|
||||
@@ -43,16 +42,16 @@ func getExceptionsGetter(useExceptions string) getter.IExceptionsGetter {
|
||||
|
||||
func getRBACHandler(tenantConfig cautils.ITenantConfig, k8s *k8sinterface.KubernetesApi, submit bool) *cautils.RBACObjects {
|
||||
if submit {
|
||||
return cautils.NewRBACObjects(rbacscanner.NewRbacScannerFromK8sAPI(k8s, tenantConfig.GetAccountID(), tenantConfig.GetClusterName()))
|
||||
return cautils.NewRBACObjects(rbacscanner.NewRbacScannerFromK8sAPI(k8s, tenantConfig.GetAccountID(), tenantConfig.GetContextName()))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func getReporter(tenantConfig cautils.ITenantConfig, reportID string, submit, fwScan, clusterScan bool) reporter.IReport {
|
||||
if submit && clusterScan {
|
||||
func getReporter(tenantConfig cautils.ITenantConfig, reportID string, submit, fwScan bool) reporter.IReport {
|
||||
if submit {
|
||||
return reporterv2.NewReportEventReceiver(tenantConfig.GetConfigObj(), reportID)
|
||||
}
|
||||
if tenantConfig.GetAccountID() == "" && fwScan && clusterScan {
|
||||
if tenantConfig.GetAccountID() == "" {
|
||||
// Add link only when scanning a cluster using a framework
|
||||
return reporterv2.NewReportMock(reporterv2.NO_SUBMIT_QUERY, "run kubescape with the '--submit' flag")
|
||||
}
|
||||
@@ -60,9 +59,7 @@ func getReporter(tenantConfig cautils.ITenantConfig, reportID string, submit, fw
|
||||
if !fwScan {
|
||||
message = "Kubescape does not submit scan results when scanning controls"
|
||||
}
|
||||
if !clusterScan {
|
||||
message = "Kubescape will submit scan results only when scanning a cluster (not YAML files)"
|
||||
}
|
||||
|
||||
return reporterv2.NewReportMock("", message)
|
||||
}
|
||||
|
||||
@@ -153,11 +150,11 @@ func setSubmitBehavior(scanInfo *cautils.ScanInfo, tenantConfig cautils.ITenantC
|
||||
}
|
||||
|
||||
// setPolicyGetter set the policy getter - local file/github release/ArmoAPI
|
||||
func getPolicyGetter(loadPoliciesFromFile []string, accountID string, frameworkScope bool, downloadReleasedPolicy *getter.DownloadReleasedPolicy) getter.IPolicyGetter {
|
||||
func getPolicyGetter(loadPoliciesFromFile []string, tennatEmail string, frameworkScope bool, downloadReleasedPolicy *getter.DownloadReleasedPolicy) getter.IPolicyGetter {
|
||||
if len(loadPoliciesFromFile) > 0 {
|
||||
return getter.NewLoadPolicy(loadPoliciesFromFile)
|
||||
}
|
||||
if accountID != "" && frameworkScope {
|
||||
if tennatEmail != "" && frameworkScope {
|
||||
g := getter.GetArmoAPIConnector() // download policy from ARMO backend
|
||||
return g
|
||||
}
|
||||
@@ -195,7 +192,7 @@ func getConfigInputsGetter(ControlsInputs string, accountID string, downloadRele
|
||||
downloadReleasedPolicy = getter.NewDownloadReleasedPolicy()
|
||||
}
|
||||
if err := downloadReleasedPolicy.SetRegoObjects(); err != nil { // if failed to pull config inputs, fallback to BE
|
||||
cautils.WarningDisplay(os.Stderr, "Warning: failed to get config inputs from github release, this may affect the scanning results\n")
|
||||
logger.L().Warning("failed to get config inputs from github release, this may affect the scanning results", helpers.Error(err))
|
||||
}
|
||||
return downloadReleasedPolicy
|
||||
}
|
||||
|
||||
@@ -6,8 +6,8 @@ import (
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/armosec/kubescape/core/cautils/getter"
|
||||
metav1 "github.com/armosec/kubescape/core/meta/datastructures/v1"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/getter"
|
||||
metav1 "github.com/armosec/kubescape/v2/core/meta/datastructures/v1"
|
||||
)
|
||||
|
||||
var listFunc = map[string]func(*metav1.ListPolicies) ([]string, error){
|
||||
@@ -45,7 +45,7 @@ func (ks *Kubescape) List(listPolicies *metav1.ListPolicies) error {
|
||||
|
||||
func listFrameworks(listPolicies *metav1.ListPolicies) ([]string, error) {
|
||||
tenant := getTenantConfig(listPolicies.Account, "", getKubernetesApi()) // change k8sinterface
|
||||
g := getPolicyGetter(nil, tenant.GetAccountID(), true, nil)
|
||||
g := getPolicyGetter(nil, tenant.GetTennatEmail(), true, nil)
|
||||
|
||||
return listFrameworksNames(g), nil
|
||||
}
|
||||
@@ -53,7 +53,7 @@ func listFrameworks(listPolicies *metav1.ListPolicies) ([]string, error) {
|
||||
func listControls(listPolicies *metav1.ListPolicies) ([]string, error) {
|
||||
tenant := getTenantConfig(listPolicies.Account, "", getKubernetesApi()) // change k8sinterface
|
||||
|
||||
g := getPolicyGetter(nil, tenant.GetAccountID(), false, nil)
|
||||
g := getPolicyGetter(nil, tenant.GetTennatEmail(), false, nil)
|
||||
l := getter.ListName
|
||||
if listPolicies.ListIDs {
|
||||
l = getter.ListID
|
||||
|
||||
@@ -3,20 +3,22 @@ package core
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
apisv1 "github.com/armosec/opa-utils/httpserver/apis/v1"
|
||||
|
||||
"github.com/armosec/armoapi-go/armotypes"
|
||||
"github.com/armosec/k8s-interface/k8sinterface"
|
||||
|
||||
"github.com/armosec/kubescape/core/cautils"
|
||||
"github.com/armosec/kubescape/core/cautils/getter"
|
||||
"github.com/armosec/kubescape/core/cautils/logger"
|
||||
"github.com/armosec/kubescape/core/cautils/logger/helpers"
|
||||
"github.com/armosec/kubescape/core/pkg/hostsensorutils"
|
||||
"github.com/armosec/kubescape/core/pkg/opaprocessor"
|
||||
"github.com/armosec/kubescape/core/pkg/policyhandler"
|
||||
"github.com/armosec/kubescape/core/pkg/resourcehandler"
|
||||
"github.com/armosec/kubescape/core/pkg/resultshandling"
|
||||
"github.com/armosec/kubescape/core/pkg/resultshandling/printer"
|
||||
"github.com/armosec/kubescape/core/pkg/resultshandling/reporter"
|
||||
"github.com/armosec/kubescape/v2/core/cautils"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/getter"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger/helpers"
|
||||
"github.com/armosec/kubescape/v2/core/pkg/hostsensorutils"
|
||||
"github.com/armosec/kubescape/v2/core/pkg/opaprocessor"
|
||||
"github.com/armosec/kubescape/v2/core/pkg/policyhandler"
|
||||
"github.com/armosec/kubescape/v2/core/pkg/resourcehandler"
|
||||
"github.com/armosec/kubescape/v2/core/pkg/resultshandling"
|
||||
"github.com/armosec/kubescape/v2/core/pkg/resultshandling/printer"
|
||||
"github.com/armosec/kubescape/v2/core/pkg/resultshandling/reporter"
|
||||
|
||||
"github.com/armosec/opa-utils/reporthandling"
|
||||
"github.com/armosec/opa-utils/resources"
|
||||
@@ -48,6 +50,11 @@ func getInterfaces(scanInfo *cautils.ScanInfo) componentInterfaces {
|
||||
// Set submit behavior AFTER loading tenant config
|
||||
setSubmitBehavior(scanInfo, tenantConfig)
|
||||
|
||||
// Do not submit yaml scanning
|
||||
if len(scanInfo.InputPatterns) > 0 {
|
||||
scanInfo.Submit = false
|
||||
}
|
||||
|
||||
if scanInfo.Submit {
|
||||
// submit - Create tenant & Submit report
|
||||
if err := tenantConfig.SetTenant(); err != nil {
|
||||
@@ -86,7 +93,7 @@ func getInterfaces(scanInfo *cautils.ScanInfo) componentInterfaces {
|
||||
// ================== setup reporter & printer objects ======================================
|
||||
|
||||
// reporting behavior - setup reporter
|
||||
reportHandler := getReporter(tenantConfig, scanInfo.ReportID, scanInfo.Submit, scanInfo.FrameworkScan, len(scanInfo.InputPatterns) == 0)
|
||||
reportHandler := getReporter(tenantConfig, scanInfo.ScanID, scanInfo.Submit, scanInfo.FrameworkScan)
|
||||
|
||||
// setup printer
|
||||
printerHandler := resultshandling.NewPrinter(scanInfo.Format, scanInfo.FormatVersion, scanInfo.VerboseMode)
|
||||
@@ -111,21 +118,21 @@ func (ks *Kubescape) Scan(scanInfo *cautils.ScanInfo) (*resultshandling.ResultsH
|
||||
|
||||
interfaces := getInterfaces(scanInfo)
|
||||
|
||||
cautils.ClusterName = interfaces.tenantConfig.GetClusterName() // TODO - Deprecated
|
||||
cautils.ClusterName = interfaces.tenantConfig.GetContextName() // TODO - Deprecated
|
||||
cautils.CustomerGUID = interfaces.tenantConfig.GetAccountID() // TODO - Deprecated
|
||||
interfaces.report.SetClusterName(interfaces.tenantConfig.GetClusterName())
|
||||
interfaces.report.SetClusterName(interfaces.tenantConfig.GetContextName())
|
||||
interfaces.report.SetCustomerGUID(interfaces.tenantConfig.GetAccountID())
|
||||
|
||||
downloadReleasedPolicy := getter.NewDownloadReleasedPolicy() // download config inputs from github release
|
||||
|
||||
// set policy getter only after setting the customerGUID
|
||||
scanInfo.Getters.PolicyGetter = getPolicyGetter(scanInfo.UseFrom, interfaces.tenantConfig.GetAccountID(), scanInfo.FrameworkScan, downloadReleasedPolicy)
|
||||
scanInfo.Getters.PolicyGetter = getPolicyGetter(scanInfo.UseFrom, interfaces.tenantConfig.GetTennatEmail(), scanInfo.FrameworkScan, downloadReleasedPolicy)
|
||||
scanInfo.Getters.ControlsInputsGetter = getConfigInputsGetter(scanInfo.ControlsInputs, interfaces.tenantConfig.GetAccountID(), downloadReleasedPolicy)
|
||||
scanInfo.Getters.ExceptionsGetter = getExceptionsGetter(scanInfo.UseExceptions)
|
||||
|
||||
// TODO - list supported frameworks/controls
|
||||
if scanInfo.ScanAll {
|
||||
scanInfo.SetPolicyIdentifiers(listFrameworksNames(scanInfo.Getters.PolicyGetter), reporthandling.KindFramework)
|
||||
scanInfo.SetPolicyIdentifiers(listFrameworksNames(scanInfo.Getters.PolicyGetter), apisv1.KindFramework)
|
||||
}
|
||||
|
||||
// remove host scanner components
|
||||
@@ -145,7 +152,7 @@ func (ks *Kubescape) Scan(scanInfo *cautils.ScanInfo) (*resultshandling.ResultsH
|
||||
}
|
||||
|
||||
// ========================= opa testing =====================
|
||||
deps := resources.NewRegoDependenciesData(k8sinterface.GetK8sConfig(), interfaces.tenantConfig.GetClusterName())
|
||||
deps := resources.NewRegoDependenciesData(k8sinterface.GetK8sConfig(), interfaces.tenantConfig.GetContextName())
|
||||
reportResults := opaprocessor.NewOPAProcessor(scanData, deps)
|
||||
if err := reportResults.ProcessRulesListenner(); err != nil {
|
||||
// TODO - do something
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
package core
|
||||
|
||||
import (
|
||||
"github.com/armosec/kubescape/core/cautils"
|
||||
"github.com/armosec/kubescape/core/cautils/getter"
|
||||
"github.com/armosec/kubescape/core/cautils/logger"
|
||||
"github.com/armosec/kubescape/core/cautils/logger/helpers"
|
||||
"github.com/armosec/kubescape/core/meta/cliinterfaces"
|
||||
"github.com/armosec/kubescape/v2/core/cautils"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/getter"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger/helpers"
|
||||
"github.com/armosec/kubescape/v2/core/meta/cliinterfaces"
|
||||
)
|
||||
|
||||
func (ks *Kubescape) Submit(submitInterfaces cliinterfaces.SubmitInterfaces) error {
|
||||
@@ -20,7 +20,7 @@ func (ks *Kubescape) Submit(submitInterfaces cliinterfaces.SubmitInterfaces) err
|
||||
return err
|
||||
}
|
||||
// report
|
||||
if err := submitInterfaces.Reporter.ActionSendReport(&cautils.OPASessionObj{PostureReport: postureReport, AllResources: allresources}); err != nil {
|
||||
if err := submitInterfaces.Reporter.Submit(&cautils.OPASessionObj{PostureReport: postureReport, AllResources: allresources}); err != nil {
|
||||
return err
|
||||
}
|
||||
logger.L().Success("Data has been submitted successfully")
|
||||
|
||||
1479
core/go.sum
1479
core/go.sum
File diff suppressed because it is too large
Load Diff
@@ -2,8 +2,8 @@ package cliinterfaces
|
||||
|
||||
import (
|
||||
"github.com/armosec/k8s-interface/workloadinterface"
|
||||
"github.com/armosec/kubescape/core/cautils"
|
||||
"github.com/armosec/kubescape/core/pkg/resultshandling/reporter"
|
||||
"github.com/armosec/kubescape/v2/core/cautils"
|
||||
"github.com/armosec/kubescape/v2/core/pkg/resultshandling/reporter"
|
||||
"github.com/armosec/opa-utils/reporthandling"
|
||||
)
|
||||
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
package meta
|
||||
|
||||
import (
|
||||
"github.com/armosec/kubescape/core/cautils"
|
||||
"github.com/armosec/kubescape/core/meta/cliinterfaces"
|
||||
metav1 "github.com/armosec/kubescape/core/meta/datastructures/v1"
|
||||
"github.com/armosec/kubescape/core/pkg/resultshandling"
|
||||
"github.com/armosec/kubescape/v2/core/cautils"
|
||||
"github.com/armosec/kubescape/v2/core/meta/cliinterfaces"
|
||||
metav1 "github.com/armosec/kubescape/v2/core/meta/datastructures/v1"
|
||||
"github.com/armosec/kubescape/v2/core/pkg/resultshandling"
|
||||
)
|
||||
|
||||
type IKubescape interface {
|
||||
|
||||
@@ -10,9 +10,9 @@ import (
|
||||
|
||||
"github.com/armosec/k8s-interface/k8sinterface"
|
||||
"github.com/armosec/k8s-interface/workloadinterface"
|
||||
"github.com/armosec/kubescape/core/cautils"
|
||||
"github.com/armosec/kubescape/core/cautils/logger"
|
||||
"github.com/armosec/kubescape/core/cautils/logger/helpers"
|
||||
"github.com/armosec/kubescape/v2/core/cautils"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger/helpers"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
@@ -6,9 +6,10 @@ import (
|
||||
"sync"
|
||||
|
||||
"github.com/armosec/k8s-interface/k8sinterface"
|
||||
"github.com/armosec/kubescape/core/cautils/logger"
|
||||
"github.com/armosec/kubescape/core/cautils/logger/helpers"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger/helpers"
|
||||
"github.com/armosec/opa-utils/objectsenvelopes/hostsensor"
|
||||
"github.com/armosec/opa-utils/reporthandling/apis"
|
||||
"sigs.k8s.io/yaml"
|
||||
)
|
||||
|
||||
@@ -156,56 +157,77 @@ func (hsh *HostSensorHandler) GetKubeletConfigurations() ([]hostsensor.HostSenso
|
||||
return res, err
|
||||
}
|
||||
|
||||
func (hsh *HostSensorHandler) CollectResources() ([]hostsensor.HostSensorDataEnvelope, error) {
|
||||
func (hsh *HostSensorHandler) CollectResources() ([]hostsensor.HostSensorDataEnvelope, map[string]apis.StatusInfo, error) {
|
||||
res := make([]hostsensor.HostSensorDataEnvelope, 0)
|
||||
infoMap := make(map[string]apis.StatusInfo)
|
||||
if hsh.DaemonSet == nil {
|
||||
return res, nil
|
||||
return res, nil, nil
|
||||
}
|
||||
|
||||
var kcData []hostsensor.HostSensorDataEnvelope
|
||||
var err error
|
||||
logger.L().Debug("Accessing host scanner")
|
||||
kcData, err := hsh.GetKubeletConfigurations()
|
||||
kcData, err = hsh.GetKubeletConfigurations()
|
||||
if err != nil {
|
||||
return kcData, err
|
||||
addInfoToMap(KubeletConfiguration, infoMap, err)
|
||||
logger.L().Warning(err.Error())
|
||||
}
|
||||
if len(kcData) > 0 {
|
||||
res = append(res, kcData...)
|
||||
}
|
||||
res = append(res, kcData...)
|
||||
//
|
||||
kcData, err = hsh.GetKubeletCommandLine()
|
||||
if err != nil {
|
||||
return kcData, err
|
||||
addInfoToMap(KubeletCommandLine, infoMap, err)
|
||||
logger.L().Warning(err.Error())
|
||||
}
|
||||
if len(kcData) > 0 {
|
||||
res = append(res, kcData...)
|
||||
}
|
||||
res = append(res, kcData...)
|
||||
//
|
||||
kcData, err = hsh.GetOsReleaseFile()
|
||||
if err != nil {
|
||||
return kcData, err
|
||||
addInfoToMap(OsReleaseFile, infoMap, err)
|
||||
logger.L().Warning(err.Error())
|
||||
}
|
||||
if len(kcData) > 0 {
|
||||
res = append(res, kcData...)
|
||||
}
|
||||
res = append(res, kcData...)
|
||||
//
|
||||
kcData, err = hsh.GetKernelVersion()
|
||||
if err != nil {
|
||||
return kcData, err
|
||||
addInfoToMap(KernelVersion, infoMap, err)
|
||||
logger.L().Warning(err.Error())
|
||||
}
|
||||
if len(kcData) > 0 {
|
||||
res = append(res, kcData...)
|
||||
}
|
||||
res = append(res, kcData...)
|
||||
//
|
||||
kcData, err = hsh.GetLinuxSecurityHardeningStatus()
|
||||
if err != nil {
|
||||
return kcData, err
|
||||
addInfoToMap(LinuxSecurityHardeningStatus, infoMap, err)
|
||||
logger.L().Warning(err.Error())
|
||||
}
|
||||
if len(kcData) > 0 {
|
||||
res = append(res, kcData...)
|
||||
}
|
||||
res = append(res, kcData...)
|
||||
//
|
||||
kcData, err = hsh.GetOpenPortsList()
|
||||
if err != nil {
|
||||
return kcData, err
|
||||
addInfoToMap(OpenPortsList, infoMap, err)
|
||||
logger.L().Warning(err.Error())
|
||||
}
|
||||
if len(kcData) > 0 {
|
||||
res = append(res, kcData...)
|
||||
}
|
||||
res = append(res, kcData...)
|
||||
// GetKernelVariables
|
||||
kcData, err = hsh.GetKernelVariables()
|
||||
if err != nil {
|
||||
return kcData, err
|
||||
addInfoToMap(LinuxKernelVariables, infoMap, err)
|
||||
logger.L().Warning(err.Error())
|
||||
}
|
||||
if len(kcData) > 0 {
|
||||
res = append(res, kcData...)
|
||||
}
|
||||
res = append(res, kcData...)
|
||||
// finish
|
||||
|
||||
logger.L().Debug("Done reading information from host scanner")
|
||||
return res, nil
|
||||
return res, infoMap, nil
|
||||
}
|
||||
|
||||
@@ -1,10 +1,13 @@
|
||||
package hostsensorutils
|
||||
|
||||
import "github.com/armosec/opa-utils/objectsenvelopes/hostsensor"
|
||||
import (
|
||||
"github.com/armosec/opa-utils/objectsenvelopes/hostsensor"
|
||||
"github.com/armosec/opa-utils/reporthandling/apis"
|
||||
)
|
||||
|
||||
type IHostSensor interface {
|
||||
Init() error
|
||||
TearDown() error
|
||||
CollectResources() ([]hostsensor.HostSensorDataEnvelope, error)
|
||||
CollectResources() ([]hostsensor.HostSensorDataEnvelope, map[string]apis.StatusInfo, error)
|
||||
GetNamespace() string
|
||||
}
|
||||
|
||||
@@ -2,6 +2,7 @@ package hostsensorutils
|
||||
|
||||
import (
|
||||
"github.com/armosec/opa-utils/objectsenvelopes/hostsensor"
|
||||
"github.com/armosec/opa-utils/reporthandling/apis"
|
||||
)
|
||||
|
||||
type HostSensorHandlerMock struct {
|
||||
@@ -15,8 +16,8 @@ func (hshm *HostSensorHandlerMock) TearDown() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (hshm *HostSensorHandlerMock) CollectResources() ([]hostsensor.HostSensorDataEnvelope, error) {
|
||||
return []hostsensor.HostSensorDataEnvelope{}, nil
|
||||
func (hshm *HostSensorHandlerMock) CollectResources() ([]hostsensor.HostSensorDataEnvelope, map[string]apis.StatusInfo, error) {
|
||||
return []hostsensor.HostSensorDataEnvelope{}, nil, nil
|
||||
}
|
||||
|
||||
func (hshm *HostSensorHandlerMock) GetNamespace() string {
|
||||
|
||||
35
core/pkg/hostsensorutils/utils.go
Normal file
35
core/pkg/hostsensorutils/utils.go
Normal file
@@ -0,0 +1,35 @@
|
||||
package hostsensorutils
|
||||
|
||||
import (
|
||||
"github.com/armosec/k8s-interface/k8sinterface"
|
||||
"github.com/armosec/opa-utils/reporthandling/apis"
|
||||
)
|
||||
|
||||
var (
|
||||
KubeletConfiguration = "KubeletConfiguration"
|
||||
OsReleaseFile = "OsReleaseFile"
|
||||
KernelVersion = "KernelVersion"
|
||||
LinuxSecurityHardeningStatus = "LinuxSecurityHardeningStatus"
|
||||
OpenPortsList = "OpenPortsList"
|
||||
LinuxKernelVariables = "LinuxKernelVariables"
|
||||
KubeletCommandLine = "KubeletCommandLine"
|
||||
|
||||
MapResourceToApiGroup = map[string]string{
|
||||
KubeletConfiguration: "hostdata.kubescape.cloud/v1beta0",
|
||||
OsReleaseFile: "hostdata.kubescape.cloud/v1beta0",
|
||||
KubeletCommandLine: "hostdata.kubescape.cloud/v1beta0",
|
||||
KernelVersion: "hostdata.kubescape.cloud/v1beta0",
|
||||
LinuxSecurityHardeningStatus: "hostdata.kubescape.cloud/v1beta0",
|
||||
OpenPortsList: "hostdata.kubescape.cloud/v1beta0",
|
||||
LinuxKernelVariables: "hostdata.kubescape.cloud/v1beta0",
|
||||
}
|
||||
)
|
||||
|
||||
func addInfoToMap(resource string, infoMap map[string]apis.StatusInfo, err error) {
|
||||
group, version := k8sinterface.SplitApiVersion(MapResourceToApiGroup[resource])
|
||||
r := k8sinterface.JoinResourceTriplets(group, version, resource)
|
||||
infoMap[r] = apis.StatusInfo{
|
||||
InnerStatus: apis.StatusSkipped,
|
||||
InnerInfo: err.Error(),
|
||||
}
|
||||
}
|
||||
@@ -6,10 +6,10 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/armosec/armoapi-go/armotypes"
|
||||
"github.com/armosec/kubescape/core/cautils"
|
||||
"github.com/armosec/kubescape/core/cautils/logger"
|
||||
"github.com/armosec/kubescape/core/cautils/logger/helpers"
|
||||
"github.com/armosec/kubescape/core/pkg/score"
|
||||
"github.com/armosec/kubescape/v2/core/cautils"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger/helpers"
|
||||
"github.com/armosec/kubescape/v2/core/pkg/score"
|
||||
"github.com/armosec/opa-utils/objectsenvelopes"
|
||||
"github.com/armosec/opa-utils/reporthandling"
|
||||
"github.com/armosec/opa-utils/reporthandling/apis"
|
||||
@@ -133,7 +133,7 @@ func (opap *OPAProcessor) processRule(rule *reporthandling.PolicyRule) (map[stri
|
||||
|
||||
postureControlInputs := opap.regoDependenciesData.GetFilteredPostureControlInputs(rule.ConfigInputs) // get store
|
||||
|
||||
inputResources, err := reporthandling.RegoResourcesAggregator(rule, getAllSupportedObjects(opap.K8SResources, opap.AllResources, rule))
|
||||
inputResources, err := reporthandling.RegoResourcesAggregator(rule, getAllSupportedObjects(opap.K8SResources, opap.ArmoResource, opap.AllResources, rule))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error getting aggregated k8sObjects: %s", err.Error())
|
||||
}
|
||||
@@ -180,6 +180,9 @@ func (opap *OPAProcessor) processRule(rule *reporthandling.PolicyRule) (map[stri
|
||||
for j := range ruleResponses[i].FixPaths {
|
||||
ruleResult.Paths = append(ruleResult.Paths, armotypes.PosturePaths{FixPath: ruleResponses[i].FixPaths[j]})
|
||||
}
|
||||
if ruleResponses[i].FixCommand != "" {
|
||||
ruleResult.Paths = append(ruleResult.Paths, armotypes.PosturePaths{FixCommand: ruleResponses[i].FixCommand})
|
||||
}
|
||||
resources[failedResources[j].GetID()] = ruleResult
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4,8 +4,8 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/armosec/armoapi-go/armotypes"
|
||||
"github.com/armosec/kubescape/core/cautils"
|
||||
"github.com/armosec/kubescape/core/mocks"
|
||||
"github.com/armosec/kubescape/v2/core/cautils"
|
||||
"github.com/armosec/kubescape/v2/core/mocks"
|
||||
"github.com/armosec/opa-utils/objectsenvelopes"
|
||||
"github.com/armosec/opa-utils/reporthandling"
|
||||
"github.com/armosec/opa-utils/resources"
|
||||
@@ -112,10 +112,10 @@ func TestProcessResourcesResult(t *testing.T) {
|
||||
assert.Equal(t, 0, len(summaryDetails.ListResourcesIDs().Passed()))
|
||||
|
||||
// test control listing
|
||||
assert.Equal(t, len(res.ListControlsIDs(nil).All()), len(summaryDetails.ListControls().All()))
|
||||
assert.Equal(t, len(res.ListControlsIDs(nil).Passed()), len(summaryDetails.ListControls().Passed()))
|
||||
assert.Equal(t, len(res.ListControlsIDs(nil).Failed()), len(summaryDetails.ListControls().Failed()))
|
||||
assert.Equal(t, len(res.ListControlsIDs(nil).Excluded()), len(summaryDetails.ListControls().Excluded()))
|
||||
assert.Equal(t, len(res.ListControlsIDs(nil).All()), summaryDetails.NumberOfControls().All())
|
||||
assert.Equal(t, len(res.ListControlsIDs(nil).Passed()), summaryDetails.NumberOfControls().Passed())
|
||||
assert.Equal(t, len(res.ListControlsIDs(nil).Failed()), summaryDetails.NumberOfControls().Failed())
|
||||
assert.Equal(t, len(res.ListControlsIDs(nil).Excluded()), summaryDetails.NumberOfControls().Excluded())
|
||||
assert.True(t, summaryDetails.GetStatus().IsFailed())
|
||||
|
||||
opaSessionObj.Exceptions = []armotypes.PostureExceptionPolicy{*mocks.MockExceptionAllKinds(&armotypes.PosturePolicy{FrameworkName: frameworks[0].Name})}
|
||||
|
||||
@@ -1,14 +1,14 @@
|
||||
package opaprocessor
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/armosec/kubescape/core/cautils"
|
||||
"github.com/armosec/kubescape/core/cautils/logger"
|
||||
"github.com/armosec/kubescape/v2/core/cautils"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger"
|
||||
|
||||
"github.com/armosec/k8s-interface/k8sinterface"
|
||||
"github.com/armosec/k8s-interface/workloadinterface"
|
||||
"github.com/armosec/opa-utils/reporthandling"
|
||||
"github.com/armosec/opa-utils/reporthandling/apis"
|
||||
"github.com/armosec/opa-utils/reporthandling/results/v1/reportsummary"
|
||||
resources "github.com/armosec/opa-utils/resources"
|
||||
)
|
||||
|
||||
@@ -46,8 +46,9 @@ func (opap *OPAProcessor) updateResults() {
|
||||
}
|
||||
|
||||
// set result summary
|
||||
opap.Report.SummaryDetails.InitResourcesSummary()
|
||||
|
||||
// map control to error
|
||||
controlToInfoMap := mapControlToInfo(opap.ResourceToControlsMap, opap.InfoMap, opap.Report.SummaryDetails.Controls)
|
||||
opap.Report.SummaryDetails.InitResourcesSummary(controlToInfoMap)
|
||||
// for f := range opap.PostureReport.FrameworkReports {
|
||||
// // set exceptions
|
||||
// exceptions.SetFrameworkExceptions(&opap.PostureReport.FrameworkReports[f], opap.Exceptions, cautils.ClusterName)
|
||||
@@ -60,13 +61,62 @@ func (opap *OPAProcessor) updateResults() {
|
||||
// }
|
||||
}
|
||||
|
||||
func getAllSupportedObjects(k8sResources *cautils.K8SResources, allResources map[string]workloadinterface.IMetadata, rule *reporthandling.PolicyRule) []workloadinterface.IMetadata {
|
||||
func mapControlToInfo(mapResourceToControls map[string][]string, infoMap map[string]apis.StatusInfo, controlSummary reportsummary.ControlSummaries) map[string]apis.StatusInfo {
|
||||
controlToInfoMap := make(map[string]apis.StatusInfo)
|
||||
for resource, statusInfo := range infoMap {
|
||||
controlIDs := mapResourceToControls[resource]
|
||||
for _, controlID := range controlIDs {
|
||||
ctrl := controlSummary.GetControl(reportsummary.EControlCriteriaID, controlID)
|
||||
if ctrl != nil {
|
||||
resources := ctrl.NumberOfResources()
|
||||
// Check that there are no K8s resources too
|
||||
if isEmptyResources(resources) {
|
||||
controlToInfoMap[controlID] = statusInfo
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
return controlToInfoMap
|
||||
}
|
||||
|
||||
func isEmptyResources(counters reportsummary.ICounters) bool {
|
||||
return counters.Failed() == 0 && counters.Excluded() == 0 && counters.Passed() == 0
|
||||
}
|
||||
|
||||
func getAllSupportedObjects(k8sResources *cautils.K8SResources, armoResources *cautils.ArmoResources, allResources map[string]workloadinterface.IMetadata, rule *reporthandling.PolicyRule) []workloadinterface.IMetadata {
|
||||
k8sObjects := []workloadinterface.IMetadata{}
|
||||
k8sObjects = append(k8sObjects, getKubernetesObjects(k8sResources, allResources, rule.Match)...)
|
||||
k8sObjects = append(k8sObjects, getKubernetesObjects(k8sResources, allResources, rule.DynamicMatch)...)
|
||||
k8sObjects = append(k8sObjects, getArmoObjects(armoResources, allResources, rule.DynamicMatch)...)
|
||||
return k8sObjects
|
||||
}
|
||||
|
||||
func getArmoObjects(k8sResources *cautils.ArmoResources, allResources map[string]workloadinterface.IMetadata, match []reporthandling.RuleMatchObjects) []workloadinterface.IMetadata {
|
||||
k8sObjects := []workloadinterface.IMetadata{}
|
||||
|
||||
for m := range match {
|
||||
for _, groups := range match[m].APIGroups {
|
||||
for _, version := range match[m].APIVersions {
|
||||
for _, resource := range match[m].Resources {
|
||||
groupResources := k8sinterface.ResourceGroupToString(groups, version, resource)
|
||||
for _, groupResource := range groupResources {
|
||||
if k8sObj, ok := (*k8sResources)[groupResource]; ok {
|
||||
// if k8sObj == nil {
|
||||
// logger.L().Debug(fmt.Sprintf("resource '%s' is nil, probably failed to pull the resource", groupResource))
|
||||
// }
|
||||
for i := range k8sObj {
|
||||
k8sObjects = append(k8sObjects, allResources[k8sObj[i]])
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return filterOutChildResources(k8sObjects, match)
|
||||
}
|
||||
|
||||
func getKubernetesObjects(k8sResources *cautils.K8SResources, allResources map[string]workloadinterface.IMetadata, match []reporthandling.RuleMatchObjects) []workloadinterface.IMetadata {
|
||||
k8sObjects := []workloadinterface.IMetadata{}
|
||||
|
||||
@@ -78,7 +128,7 @@ func getKubernetesObjects(k8sResources *cautils.K8SResources, allResources map[s
|
||||
for _, groupResource := range groupResources {
|
||||
if k8sObj, ok := (*k8sResources)[groupResource]; ok {
|
||||
if k8sObj == nil {
|
||||
logger.L().Debug(fmt.Sprintf("resource '%s' is nil, probably failed to pull the resource", groupResource))
|
||||
// logger.L().Debug("skipping", helpers.String("resource", groupResource))
|
||||
}
|
||||
for i := range k8sObj {
|
||||
k8sObjects = append(k8sObjects, allResources[k8sObj[i]])
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
package opaprocessor
|
||||
|
||||
import (
|
||||
"github.com/armosec/kubescape/core/cautils"
|
||||
"github.com/armosec/kubescape/v2/core/cautils"
|
||||
"github.com/armosec/opa-utils/reporthandling"
|
||||
"github.com/armosec/opa-utils/reporthandling/results/v1/reportsummary"
|
||||
)
|
||||
|
||||
@@ -5,7 +5,7 @@ import (
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/armosec/kubescape/core/mocks"
|
||||
"github.com/armosec/kubescape/v2/core/mocks"
|
||||
"github.com/armosec/opa-utils/reporthandling"
|
||||
"github.com/armosec/opa-utils/reporthandling/results/v1/reportsummary"
|
||||
)
|
||||
|
||||
@@ -3,8 +3,8 @@ package policyhandler
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/armosec/kubescape/core/cautils"
|
||||
"github.com/armosec/kubescape/core/pkg/resourcehandler"
|
||||
"github.com/armosec/kubescape/v2/core/cautils"
|
||||
"github.com/armosec/kubescape/v2/core/pkg/resourcehandler"
|
||||
"github.com/armosec/opa-utils/reporthandling"
|
||||
)
|
||||
|
||||
@@ -23,7 +23,8 @@ func NewPolicyHandler(resourceHandler resourcehandler.IResourceHandler) *PolicyH
|
||||
}
|
||||
|
||||
func (policyHandler *PolicyHandler) CollectResources(notification *reporthandling.PolicyNotification, scanInfo *cautils.ScanInfo) (*cautils.OPASessionObj, error) {
|
||||
opaSessionObj := cautils.NewOPASessionObj(nil, nil)
|
||||
opaSessionObj := cautils.NewOPASessionObj(nil, nil, scanInfo)
|
||||
|
||||
// validate notification
|
||||
// TODO
|
||||
policyHandler.getters = &scanInfo.Getters
|
||||
@@ -37,7 +38,7 @@ func (policyHandler *PolicyHandler) CollectResources(notification *reporthandlin
|
||||
if err != nil {
|
||||
return opaSessionObj, err
|
||||
}
|
||||
if opaSessionObj.K8SResources == nil || len(*opaSessionObj.K8SResources) == 0 {
|
||||
if (opaSessionObj.K8SResources == nil || len(*opaSessionObj.K8SResources) == 0) && (opaSessionObj.ArmoResource == nil || len(*opaSessionObj.ArmoResource) == 0) {
|
||||
return opaSessionObj, fmt.Errorf("empty list of resources")
|
||||
}
|
||||
|
||||
@@ -46,15 +47,16 @@ func (policyHandler *PolicyHandler) CollectResources(notification *reporthandlin
|
||||
}
|
||||
|
||||
func (policyHandler *PolicyHandler) getResources(notification *reporthandling.PolicyNotification, opaSessionObj *cautils.OPASessionObj, scanInfo *cautils.ScanInfo) error {
|
||||
|
||||
opaSessionObj.Report.ClusterAPIServerInfo = policyHandler.resourceHandler.GetClusterAPIServerInfo()
|
||||
resourcesMap, allResources, err := policyHandler.resourceHandler.GetResources(opaSessionObj.Policies, ¬ification.Designators)
|
||||
|
||||
resourcesMap, allResources, armoResources, err := policyHandler.resourceHandler.GetResources(opaSessionObj, ¬ification.Designators)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
opaSessionObj.K8SResources = resourcesMap
|
||||
opaSessionObj.AllResources = allResources
|
||||
opaSessionObj.ArmoResource = armoResources
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -4,8 +4,10 @@ import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/armosec/kubescape/core/cautils"
|
||||
"github.com/armosec/kubescape/core/cautils/logger"
|
||||
"github.com/armosec/kubescape/v2/core/cautils"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/getter"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger/helpers"
|
||||
"github.com/armosec/opa-utils/reporthandling"
|
||||
)
|
||||
|
||||
@@ -54,6 +56,11 @@ func (policyHandler *PolicyHandler) getScanPolicies(notification *reporthandling
|
||||
}
|
||||
if receivedFramework != nil {
|
||||
frameworks = append(frameworks, *receivedFramework)
|
||||
|
||||
cache := getter.GetDefaultPath(rule.Name + ".json")
|
||||
if err := getter.SaveInFile(receivedFramework, cache); err != nil {
|
||||
logger.L().Warning("failed to cache file", helpers.String("file", cache), helpers.Error(err))
|
||||
}
|
||||
}
|
||||
}
|
||||
case reporthandling.KindControl: // Download controls
|
||||
@@ -67,6 +74,11 @@ func (policyHandler *PolicyHandler) getScanPolicies(notification *reporthandling
|
||||
}
|
||||
if receivedControl != nil {
|
||||
f.Controls = append(f.Controls, *receivedControl)
|
||||
|
||||
cache := getter.GetDefaultPath(rule.Name + ".json")
|
||||
if err := getter.SaveInFile(receivedControl, cache); err != nil {
|
||||
logger.L().Warning("failed to cache file", helpers.String("file", cache), helpers.Error(err))
|
||||
}
|
||||
}
|
||||
}
|
||||
frameworks = append(frameworks, f)
|
||||
|
||||
@@ -4,11 +4,11 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
"github.com/armosec/kubescape/core/cautils/getter"
|
||||
"github.com/armosec/kubescape/core/cautils/logger"
|
||||
"github.com/armosec/kubescape/core/cautils/logger/helpers"
|
||||
"github.com/armosec/kubescape/core/pkg/containerscan"
|
||||
"github.com/armosec/kubescape/core/pkg/registryadaptors/registryvulnerabilities"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/getter"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger/helpers"
|
||||
"github.com/armosec/kubescape/v2/core/pkg/containerscan"
|
||||
"github.com/armosec/kubescape/v2/core/pkg/registryadaptors/registryvulnerabilities"
|
||||
)
|
||||
|
||||
func NewArmoAdaptor(armoAPI *getter.ArmoAPI) *ArmoCivAdaptor {
|
||||
|
||||
@@ -3,7 +3,7 @@ package v1
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/armosec/kubescape/core/pkg/registryadaptors/registryvulnerabilities"
|
||||
"github.com/armosec/kubescape/v2/core/pkg/registryadaptors/registryvulnerabilities"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
|
||||
@@ -3,8 +3,8 @@ package v1
|
||||
import (
|
||||
"encoding/json"
|
||||
|
||||
"github.com/armosec/kubescape/core/pkg/containerscan"
|
||||
"github.com/armosec/kubescape/core/pkg/registryadaptors/registryvulnerabilities"
|
||||
"github.com/armosec/kubescape/v2/core/pkg/containerscan"
|
||||
"github.com/armosec/kubescape/v2/core/pkg/registryadaptors/registryvulnerabilities"
|
||||
)
|
||||
|
||||
type ArmoCivAdaptorMock struct {
|
||||
|
||||
@@ -4,8 +4,8 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
"github.com/armosec/kubescape/core/pkg/containerscan"
|
||||
"github.com/armosec/kubescape/core/pkg/registryadaptors/registryvulnerabilities"
|
||||
"github.com/armosec/kubescape/v2/core/pkg/containerscan"
|
||||
"github.com/armosec/kubescape/v2/core/pkg/registryadaptors/registryvulnerabilities"
|
||||
)
|
||||
|
||||
func (armoCivAdaptor *ArmoCivAdaptor) getImageLastScanId(imageID *registryvulnerabilities.ContainerImageIdentifier) (string, error) {
|
||||
@@ -60,6 +60,9 @@ func responseObjectToVulnerabilities(vulnerabilitiesList containerscan.Vulnerabi
|
||||
vulnerabilities[i].Relevancy = vulnerabilityEntry.Relevancy
|
||||
vulnerabilities[i].Severity = vulnerabilityEntry.Severity
|
||||
vulnerabilities[i].UrgentCount = vulnerabilityEntry.UrgentCount
|
||||
vulnerabilities[i].Categories = registryvulnerabilities.Categories{
|
||||
IsRCE: vulnerabilityEntry.Categories.IsRCE,
|
||||
}
|
||||
}
|
||||
return vulnerabilities
|
||||
}
|
||||
|
||||
@@ -3,7 +3,7 @@ package v1
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/armosec/kubescape/core/cautils/getter"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/getter"
|
||||
)
|
||||
|
||||
type V2ListRequest struct {
|
||||
|
||||
@@ -23,6 +23,10 @@ type FixedIn struct {
|
||||
ImgTag string `json:"imageTag"`
|
||||
Version string `json:"version"`
|
||||
}
|
||||
type Categories struct {
|
||||
IsRCE bool `json:"isRce"`
|
||||
}
|
||||
|
||||
type Vulnerability struct {
|
||||
Name string `json:"name"`
|
||||
RelatedPackageName string `json:"packageName"`
|
||||
@@ -36,6 +40,7 @@ type Vulnerability struct {
|
||||
UrgentCount int `json:"urgent"`
|
||||
NeglectedCount int `json:"neglected"`
|
||||
HealthStatus string `json:"healthStatus"`
|
||||
Categories Categories `json:"categories"`
|
||||
}
|
||||
|
||||
type ContainerImageVulnerabilityReport struct {
|
||||
|
||||
@@ -1,110 +0,0 @@
|
||||
package resourcehandler
|
||||
|
||||
import (
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/armosec/k8s-interface/cloudsupport"
|
||||
"github.com/armosec/k8s-interface/k8sinterface"
|
||||
)
|
||||
|
||||
var (
|
||||
KS_KUBE_CLUSTER_ENV_VAR = "KS_KUBE_CLUSTER"
|
||||
KS_CLOUD_PROVIDER_ENV_VAR = "KS_CLOUD_PROVIDER"
|
||||
KS_CLOUD_REGION_ENV_VAR = "KS_CLOUD_REGION"
|
||||
KS_GKE_PROJECT_ENV_VAR = "KS_GKE_PROJECT"
|
||||
)
|
||||
|
||||
type ICloudProvider interface {
|
||||
getKubeCluster() string
|
||||
getRegion(cluster string, provider string) (string, error)
|
||||
getProject(cluster string, provider string) (string, error)
|
||||
getKubeClusterName() string
|
||||
}
|
||||
|
||||
func initCloudProvider() ICloudProvider {
|
||||
|
||||
switch getCloudProvider() {
|
||||
case "gke", "gcp":
|
||||
if isEnvVars() {
|
||||
return NewGKEProviderEnvVar()
|
||||
}
|
||||
return NewGKEProviderContext()
|
||||
case "eks", "aws":
|
||||
if isEnvVars() {
|
||||
return NewEKSProviderEnvVar()
|
||||
}
|
||||
return NewEKSProviderContext()
|
||||
}
|
||||
return NewEmptyCloudProvider()
|
||||
}
|
||||
|
||||
func getCloudProvider() string {
|
||||
var provider string
|
||||
if isEnvVars() {
|
||||
provider = getCloudProviderFromEnvVar()
|
||||
} else {
|
||||
provider = getCloudProviderFromContext()
|
||||
}
|
||||
return strings.ToLower(provider)
|
||||
}
|
||||
|
||||
func getCloudProviderFromContext() string {
|
||||
return cloudsupport.GetCloudProvider(getClusterFromContext())
|
||||
}
|
||||
|
||||
func getClusterFromContext() string {
|
||||
context := k8sinterface.GetCurrentContext()
|
||||
if context == nil {
|
||||
return ""
|
||||
}
|
||||
cluster := context.Cluster
|
||||
if cluster != "" {
|
||||
return cluster
|
||||
}
|
||||
return k8sinterface.GetClusterName()
|
||||
}
|
||||
|
||||
func getCloudProviderFromEnvVar() string {
|
||||
val, present := os.LookupEnv(KS_CLOUD_PROVIDER_ENV_VAR)
|
||||
if present {
|
||||
return val
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func isEnvVars() bool {
|
||||
_, present := os.LookupEnv(KS_KUBE_CLUSTER_ENV_VAR)
|
||||
if !present {
|
||||
return false
|
||||
}
|
||||
_, present = os.LookupEnv(KS_CLOUD_PROVIDER_ENV_VAR)
|
||||
if !present {
|
||||
return false
|
||||
}
|
||||
_, present = os.LookupEnv(KS_CLOUD_REGION_ENV_VAR)
|
||||
return present
|
||||
}
|
||||
|
||||
type EmptyCloudProvider struct {
|
||||
}
|
||||
|
||||
func NewEmptyCloudProvider() *EmptyCloudProvider {
|
||||
return &EmptyCloudProvider{}
|
||||
}
|
||||
|
||||
func (emptyCloudProvider *EmptyCloudProvider) getKubeCluster() string {
|
||||
return getClusterFromContext()
|
||||
}
|
||||
|
||||
func (emptyCloudProvider *EmptyCloudProvider) getKubeClusterName() string {
|
||||
return emptyCloudProvider.getKubeCluster()
|
||||
}
|
||||
|
||||
func (emptyCloudProvider *EmptyCloudProvider) getRegion(cluster string, provider string) (string, error) {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
func (emptyCloudProvider *EmptyCloudProvider) getProject(cluster string, provider string) (string, error) {
|
||||
return "", nil
|
||||
}
|
||||
@@ -1,101 +0,0 @@
|
||||
package resourcehandler
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/armosec/k8s-interface/k8sinterface"
|
||||
)
|
||||
|
||||
type EKSProviderEnvVar struct {
|
||||
}
|
||||
|
||||
func NewEKSProviderEnvVar() *EKSProviderEnvVar {
|
||||
return &EKSProviderEnvVar{}
|
||||
}
|
||||
|
||||
func (eksProviderEnvVar *EKSProviderEnvVar) getKubeClusterName() string {
|
||||
return eksProviderEnvVar.getKubeCluster()
|
||||
}
|
||||
|
||||
func (eksProviderEnvVar *EKSProviderEnvVar) getKubeCluster() string {
|
||||
val, present := os.LookupEnv(KS_KUBE_CLUSTER_ENV_VAR)
|
||||
if present {
|
||||
return val
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (eksProviderEnvVar *EKSProviderEnvVar) getRegion(cluster string, provider string) (string, error) {
|
||||
return eksProviderEnvVar.getRegionForEKS(cluster)
|
||||
}
|
||||
|
||||
func (eksProviderEnvVar *EKSProviderEnvVar) getProject(cluster string, provider string) (string, error) {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
func (eksProviderEnvVar *EKSProviderEnvVar) getRegionForEKS(cluster string) (string, error) {
|
||||
region, present := os.LookupEnv(KS_CLOUD_REGION_ENV_VAR)
|
||||
if present {
|
||||
return region, nil
|
||||
}
|
||||
splittedClusterContext := strings.Split(cluster, ".")
|
||||
if len(splittedClusterContext) < 2 {
|
||||
return "", fmt.Errorf("failed to get region")
|
||||
}
|
||||
region = splittedClusterContext[1]
|
||||
return region, nil
|
||||
}
|
||||
|
||||
// ------------------------------------- EKSProviderContext -------------------------
|
||||
|
||||
type EKSProviderContext struct {
|
||||
}
|
||||
|
||||
func NewEKSProviderContext() *EKSProviderContext {
|
||||
return &EKSProviderContext{}
|
||||
}
|
||||
|
||||
func (eksProviderContext *EKSProviderContext) getKubeClusterName() string {
|
||||
context := k8sinterface.GetCurrentContext()
|
||||
if context == nil {
|
||||
return ""
|
||||
}
|
||||
cluster := context.Cluster
|
||||
if cluster != "" {
|
||||
splittedCluster := strings.Split(cluster, ".")
|
||||
if len(splittedCluster) > 1 {
|
||||
return splittedCluster[0]
|
||||
}
|
||||
}
|
||||
splittedCluster := strings.Split(k8sinterface.GetClusterName(), ".")
|
||||
if len(splittedCluster) > 1 {
|
||||
return splittedCluster[0]
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (eksProviderContext *EKSProviderContext) getKubeCluster() string {
|
||||
context := k8sinterface.GetCurrentContext()
|
||||
if context == nil {
|
||||
return ""
|
||||
}
|
||||
if context.Cluster != "" {
|
||||
return context.Cluster
|
||||
}
|
||||
return k8sinterface.GetClusterName()
|
||||
}
|
||||
|
||||
func (eksProviderContext *EKSProviderContext) getRegion(cluster string, provider string) (string, error) {
|
||||
splittedClusterContext := strings.Split(cluster, ".")
|
||||
if len(splittedClusterContext) < 2 {
|
||||
return "", fmt.Errorf("failed to get region")
|
||||
}
|
||||
region := splittedClusterContext[1]
|
||||
return region, nil
|
||||
}
|
||||
|
||||
func (eksProviderContext *EKSProviderContext) getProject(cluster string, provider string) (string, error) {
|
||||
return "", nil
|
||||
}
|
||||
@@ -2,15 +2,15 @@ package resourcehandler
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/armosec/armoapi-go/armotypes"
|
||||
"github.com/armosec/k8s-interface/workloadinterface"
|
||||
"k8s.io/apimachinery/pkg/version"
|
||||
|
||||
"github.com/armosec/k8s-interface/k8sinterface"
|
||||
"github.com/armosec/kubescape/core/cautils"
|
||||
"github.com/armosec/opa-utils/reporthandling"
|
||||
"github.com/armosec/kubescape/v2/core/cautils"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger/helpers"
|
||||
)
|
||||
|
||||
// FileResourceHandler handle resources from files and URLs
|
||||
@@ -27,36 +27,48 @@ func NewFileResourceHandler(inputPatterns []string, registryAdaptors *RegistryAd
|
||||
}
|
||||
}
|
||||
|
||||
func (fileHandler *FileResourceHandler) GetResources(frameworks []reporthandling.Framework, designator *armotypes.PortalDesignator) (*cautils.K8SResources, map[string]workloadinterface.IMetadata, error) {
|
||||
func (fileHandler *FileResourceHandler) GetResources(sessionObj *cautils.OPASessionObj, designator *armotypes.PortalDesignator) (*cautils.K8SResources, map[string]workloadinterface.IMetadata, *cautils.ArmoResources, error) {
|
||||
|
||||
// build resources map
|
||||
// map resources based on framework required resources: map["/group/version/kind"][]<k8s workloads ids>
|
||||
k8sResources := setResourceMap(frameworks)
|
||||
k8sResources := setK8sResourceMap(sessionObj.Policies)
|
||||
allResources := map[string]workloadinterface.IMetadata{}
|
||||
workloadIDToSource := make(map[string]string, 0)
|
||||
armoResources := &cautils.ArmoResources{}
|
||||
|
||||
workloads := []workloadinterface.IMetadata{}
|
||||
|
||||
// load resource from local file system
|
||||
w, err := cautils.LoadResourcesFromFiles(fileHandler.inputPatterns)
|
||||
sourceToWorkloads, err := cautils.LoadResourcesFromFiles(fileHandler.inputPatterns)
|
||||
if err != nil {
|
||||
return nil, allResources, err
|
||||
return nil, allResources, nil, err
|
||||
}
|
||||
if w != nil {
|
||||
workloads = append(workloads, w...)
|
||||
for source, ws := range sourceToWorkloads {
|
||||
workloads = append(workloads, ws...)
|
||||
for i := range ws {
|
||||
workloadIDToSource[ws[i].GetID()] = source
|
||||
}
|
||||
}
|
||||
logger.L().Debug("files found in local storage", helpers.Int("files", len(sourceToWorkloads)), helpers.Int("workloads", len(workloads)))
|
||||
|
||||
// load resources from url
|
||||
w, err = loadResourcesFromUrl(fileHandler.inputPatterns)
|
||||
sourceToWorkloads, err = loadResourcesFromUrl(fileHandler.inputPatterns)
|
||||
if err != nil {
|
||||
return nil, allResources, err
|
||||
return nil, allResources, nil, err
|
||||
}
|
||||
if w != nil {
|
||||
workloads = append(workloads, w...)
|
||||
for source, ws := range sourceToWorkloads {
|
||||
workloads = append(workloads, ws...)
|
||||
for i := range ws {
|
||||
workloadIDToSource[ws[i].GetID()] = source
|
||||
}
|
||||
}
|
||||
|
||||
if len(workloads) == 0 {
|
||||
return nil, allResources, fmt.Errorf("empty list of workloads - no workloads found")
|
||||
return nil, allResources, nil, fmt.Errorf("empty list of workloads - no workloads found")
|
||||
}
|
||||
logger.L().Debug("files found in git repo", helpers.Int("files", len(sourceToWorkloads)), helpers.Int("workloads", len(workloads)))
|
||||
|
||||
sessionObj.ResourceSource = workloadIDToSource
|
||||
|
||||
// map all resources: map["/group/version/kind"][]<k8s workloads>
|
||||
mappedResources := mapResources(workloads)
|
||||
@@ -73,11 +85,11 @@ func (fileHandler *FileResourceHandler) GetResources(frameworks []reporthandling
|
||||
}
|
||||
}
|
||||
|
||||
if err := fileHandler.registryAdaptors.collectImagesVulnerabilities(k8sResources, allResources); err != nil {
|
||||
cautils.WarningDisplay(os.Stderr, "Warning: failed to collect images vulnerabilities: %s\n", err.Error())
|
||||
if err := fileHandler.registryAdaptors.collectImagesVulnerabilities(k8sResources, allResources, armoResources); err != nil {
|
||||
logger.L().Warning("failed to collect images vulnerabilities", helpers.Error(err))
|
||||
}
|
||||
|
||||
return k8sResources, allResources, nil
|
||||
return k8sResources, allResources, armoResources, nil
|
||||
|
||||
}
|
||||
|
||||
|
||||
@@ -1,131 +0,0 @@
|
||||
package resourcehandler
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/armosec/k8s-interface/k8sinterface"
|
||||
)
|
||||
|
||||
type GKEProviderEnvVar struct {
|
||||
}
|
||||
|
||||
func NewGKEProviderEnvVar() *GKEProviderEnvVar {
|
||||
return &GKEProviderEnvVar{}
|
||||
}
|
||||
func (gkeProvider *GKEProviderEnvVar) getKubeClusterName() string {
|
||||
return gkeProvider.getKubeCluster()
|
||||
}
|
||||
|
||||
func (gkeProvider *GKEProviderEnvVar) getKubeCluster() string {
|
||||
val, present := os.LookupEnv(KS_KUBE_CLUSTER_ENV_VAR)
|
||||
if present {
|
||||
return val
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (gkeProvider *GKEProviderEnvVar) getRegion(cluster string, provider string) (string, error) {
|
||||
return gkeProvider.getRegionForGKE(cluster)
|
||||
}
|
||||
|
||||
func (gkeProvider *GKEProviderEnvVar) getProject(cluster string, provider string) (string, error) {
|
||||
return gkeProvider.getProjectForGKE(cluster)
|
||||
}
|
||||
|
||||
func (gkeProvider *GKEProviderEnvVar) getProjectForGKE(cluster string) (string, error) {
|
||||
project, present := os.LookupEnv(KS_GKE_PROJECT_ENV_VAR)
|
||||
if present {
|
||||
return project, nil
|
||||
}
|
||||
parsedName := strings.Split(cluster, "_")
|
||||
if len(parsedName) < 3 {
|
||||
return "", fmt.Errorf("failed to parse project name from cluster name: '%s'", cluster)
|
||||
}
|
||||
project = parsedName[1]
|
||||
return project, nil
|
||||
}
|
||||
|
||||
func (gkeProvider *GKEProviderEnvVar) getRegionForGKE(cluster string) (string, error) {
|
||||
region, present := os.LookupEnv(KS_CLOUD_REGION_ENV_VAR)
|
||||
if present {
|
||||
return region, nil
|
||||
}
|
||||
parsedName := strings.Split(cluster, "_")
|
||||
if len(parsedName) < 3 {
|
||||
return "", fmt.Errorf("failed to parse region name from cluster name: '%s'", cluster)
|
||||
}
|
||||
region = parsedName[2]
|
||||
return region, nil
|
||||
|
||||
}
|
||||
|
||||
// ------------------------------ GKEProviderContext --------------------------------------------------------
|
||||
|
||||
type GKEProviderContext struct {
|
||||
}
|
||||
|
||||
func NewGKEProviderContext() *GKEProviderContext {
|
||||
return &GKEProviderContext{}
|
||||
}
|
||||
|
||||
func (gkeProviderContext *GKEProviderContext) getKubeClusterName() string {
|
||||
context := k8sinterface.GetCurrentContext()
|
||||
if context == nil {
|
||||
return ""
|
||||
}
|
||||
cluster := context.Cluster
|
||||
parsedName := strings.Split(cluster, "_")
|
||||
if len(parsedName) < 3 {
|
||||
return ""
|
||||
}
|
||||
clusterName := parsedName[3]
|
||||
if clusterName != "" {
|
||||
return clusterName
|
||||
}
|
||||
cluster = k8sinterface.GetClusterName()
|
||||
parsedName = strings.Split(cluster, "_")
|
||||
if len(parsedName) < 3 {
|
||||
return ""
|
||||
}
|
||||
return parsedName[3]
|
||||
}
|
||||
|
||||
func (gkeProviderContext *GKEProviderContext) getKubeCluster() string {
|
||||
context := k8sinterface.GetCurrentContext()
|
||||
if context == nil {
|
||||
return ""
|
||||
}
|
||||
if context.Cluster != "" {
|
||||
return context.Cluster
|
||||
}
|
||||
return k8sinterface.GetClusterName()
|
||||
|
||||
}
|
||||
|
||||
func (gkeProviderContext *GKEProviderContext) getRegion(cluster string, provider string) (string, error) {
|
||||
return gkeProviderContext.getRegionForGKE(cluster)
|
||||
}
|
||||
|
||||
func (gkeProviderContext *GKEProviderContext) getProject(cluster string, provider string) (string, error) {
|
||||
return gkeProviderContext.getProjectForGKE(cluster)
|
||||
}
|
||||
|
||||
func (gkeProviderContext *GKEProviderContext) getProjectForGKE(cluster string) (string, error) {
|
||||
parsedName := strings.Split(cluster, "_")
|
||||
if len(parsedName) < 3 {
|
||||
return "", fmt.Errorf("failed to parse project name from cluster name: '%s'", cluster)
|
||||
}
|
||||
project := parsedName[1]
|
||||
return project, nil
|
||||
}
|
||||
|
||||
func (gkeProviderContext *GKEProviderContext) getRegionForGKE(cluster string) (string, error) {
|
||||
parsedName := strings.Split(cluster, "_")
|
||||
if len(parsedName) < 3 {
|
||||
return "", fmt.Errorf("failed to parse region name from cluster name: '%s'", cluster)
|
||||
}
|
||||
region := parsedName[2]
|
||||
return region, nil
|
||||
}
|
||||
@@ -5,12 +5,12 @@ import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/armosec/kubescape/core/cautils"
|
||||
"github.com/armosec/kubescape/core/cautils/logger"
|
||||
"github.com/armosec/kubescape/core/cautils/logger/helpers"
|
||||
"github.com/armosec/kubescape/core/pkg/hostsensorutils"
|
||||
"github.com/armosec/kubescape/v2/core/cautils"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger/helpers"
|
||||
"github.com/armosec/kubescape/v2/core/pkg/hostsensorutils"
|
||||
"github.com/armosec/opa-utils/objectsenvelopes"
|
||||
"github.com/armosec/opa-utils/reporthandling"
|
||||
"github.com/armosec/opa-utils/reporthandling/apis"
|
||||
|
||||
"github.com/armosec/k8s-interface/cloudsupport"
|
||||
"github.com/armosec/k8s-interface/k8sinterface"
|
||||
@@ -44,46 +44,92 @@ func NewK8sResourceHandler(k8s *k8sinterface.KubernetesApi, fieldSelector IField
|
||||
}
|
||||
}
|
||||
|
||||
func (k8sHandler *K8sResourceHandler) GetResources(frameworks []reporthandling.Framework, designator *armotypes.PortalDesignator) (*cautils.K8SResources, map[string]workloadinterface.IMetadata, error) {
|
||||
func (k8sHandler *K8sResourceHandler) GetResources(sessionObj *cautils.OPASessionObj, designator *armotypes.PortalDesignator) (*cautils.K8SResources, map[string]workloadinterface.IMetadata, *cautils.ArmoResources, error) {
|
||||
allResources := map[string]workloadinterface.IMetadata{}
|
||||
|
||||
// get k8s resources
|
||||
logger.L().Info("Accessing Kubernetes objects")
|
||||
|
||||
cautils.StartSpinner()
|
||||
|
||||
resourceToControl := make(map[string][]string)
|
||||
// build resources map
|
||||
// map resources based on framework required resources: map["/group/version/kind"][]<k8s workloads ids>
|
||||
k8sResourcesMap := setResourceMap(frameworks)
|
||||
k8sResourcesMap := setK8sResourceMap(sessionObj.Policies)
|
||||
|
||||
// get namespace and labels from designator (ignore cluster labels)
|
||||
_, namespace, labels := armotypes.DigestPortalDesignator(designator)
|
||||
|
||||
// pull k8s recourses
|
||||
armoResourceMap := setArmoResourceMap(sessionObj.Policies, resourceToControl)
|
||||
|
||||
// map of armo resources to control_ids
|
||||
sessionObj.ResourceToControlsMap = resourceToControl
|
||||
|
||||
if err := k8sHandler.pullResources(k8sResourcesMap, allResources, namespace, labels); err != nil {
|
||||
cautils.StopSpinner()
|
||||
return k8sResourcesMap, allResources, err
|
||||
return k8sResourcesMap, allResources, armoResourceMap, err
|
||||
}
|
||||
|
||||
if err := k8sHandler.registryAdaptors.collectImagesVulnerabilities(k8sResourcesMap, allResources); err != nil {
|
||||
logger.L().Warning("failed to collect image vulnerabilities", helpers.Error(err))
|
||||
numberOfWorkerNodes, err := k8sHandler.pullWorkerNodesNumber()
|
||||
|
||||
if err != nil {
|
||||
logger.L().Debug("failed to collect worker nodes number", helpers.Error(err))
|
||||
} else {
|
||||
if sessionObj.Metadata != nil && sessionObj.Metadata.ContextMetadata.ClusterContextMetadata != nil {
|
||||
sessionObj.Metadata.ContextMetadata.ClusterContextMetadata.NumberOfWorkerNodes = numberOfWorkerNodes
|
||||
}
|
||||
}
|
||||
|
||||
if err := k8sHandler.collectHostResources(allResources, k8sResourcesMap); err != nil {
|
||||
logger.L().Warning("failed to collect host scanner resources", helpers.Error(err))
|
||||
imgVulnResources := cautils.MapImageVulnResources(armoResourceMap)
|
||||
// check that controls use image vulnerability resources
|
||||
if len(imgVulnResources) > 0 {
|
||||
if err := k8sHandler.registryAdaptors.collectImagesVulnerabilities(k8sResourcesMap, allResources, armoResourceMap); err != nil {
|
||||
logger.L().Warning("failed to collect image vulnerabilities", helpers.Error(err))
|
||||
}
|
||||
}
|
||||
|
||||
hostResources := cautils.MapHostResources(armoResourceMap)
|
||||
// check that controls use host sensor resources
|
||||
if len(hostResources) > 0 {
|
||||
if sessionObj.Metadata.ScanMetadata.HostScanner {
|
||||
infoMap, err := k8sHandler.collectHostResources(allResources, armoResourceMap)
|
||||
if err != nil {
|
||||
logger.L().Warning("failed to collect host scanner resources", helpers.Error(err))
|
||||
cautils.SetInfoMapForResources(err.Error(), hostResources, sessionObj.InfoMap)
|
||||
} else if k8sHandler.hostSensorHandler == nil {
|
||||
// using hostSensor mock
|
||||
cautils.SetInfoMapForResources("failed to init host scanner", hostResources, sessionObj.InfoMap)
|
||||
} else {
|
||||
sessionObj.InfoMap = infoMap
|
||||
}
|
||||
} else {
|
||||
cautils.SetInfoMapForResources("enable-host-scan flag not used", hostResources, sessionObj.InfoMap)
|
||||
}
|
||||
}
|
||||
|
||||
if err := k8sHandler.collectRbacResources(allResources); err != nil {
|
||||
logger.L().Warning("failed to collect rbac resources", helpers.Error(err))
|
||||
}
|
||||
if err := getCloudProviderDescription(allResources, k8sResourcesMap); err != nil {
|
||||
logger.L().Warning("failed to collect cloud data", helpers.Error(err))
|
||||
|
||||
cloudResources := cautils.MapCloudResources(armoResourceMap)
|
||||
// check that controls use cloud resources
|
||||
if len(cloudResources) > 0 {
|
||||
provider, err := getCloudProviderDescription(allResources, armoResourceMap)
|
||||
if err != nil {
|
||||
cautils.SetInfoMapForResources(err.Error(), cloudResources, sessionObj.InfoMap)
|
||||
logger.L().Warning("failed to collect cloud data", helpers.Error(err))
|
||||
}
|
||||
if provider != "" {
|
||||
if sessionObj.Metadata != nil && sessionObj.Metadata.ContextMetadata.ClusterContextMetadata != nil {
|
||||
sessionObj.Metadata.ContextMetadata.ClusterContextMetadata.CloudProvider = provider
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
cautils.StopSpinner()
|
||||
logger.L().Success("Accessed to Kubernetes objects")
|
||||
|
||||
return k8sResourcesMap, allResources, nil
|
||||
return k8sResourcesMap, allResources, armoResourceMap, nil
|
||||
}
|
||||
|
||||
func (k8sHandler *K8sResourceHandler) GetClusterAPIServerInfo() *version.Info {
|
||||
@@ -180,12 +226,11 @@ func ConvertMapListToMeta(resourceMap []map[string]interface{}) []workloadinterf
|
||||
// }
|
||||
// return nil
|
||||
// }
|
||||
func (k8sHandler *K8sResourceHandler) collectHostResources(allResources map[string]workloadinterface.IMetadata, resourcesMap *cautils.K8SResources) error {
|
||||
func (k8sHandler *K8sResourceHandler) collectHostResources(allResources map[string]workloadinterface.IMetadata, armoResourceMap *cautils.ArmoResources) (map[string]apis.StatusInfo, error) {
|
||||
logger.L().Debug("Collecting host scanner resources")
|
||||
|
||||
hostResources, err := k8sHandler.hostSensorHandler.CollectResources()
|
||||
hostResources, infoMap, err := k8sHandler.hostSensorHandler.CollectResources()
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for rscIdx := range hostResources {
|
||||
@@ -193,13 +238,13 @@ func (k8sHandler *K8sResourceHandler) collectHostResources(allResources map[stri
|
||||
groupResource := k8sinterface.JoinResourceTriplets(group, version, hostResources[rscIdx].GetKind())
|
||||
allResources[hostResources[rscIdx].GetID()] = &hostResources[rscIdx]
|
||||
|
||||
grpResourceList, ok := (*resourcesMap)[groupResource]
|
||||
grpResourceList, ok := (*armoResourceMap)[groupResource]
|
||||
if !ok {
|
||||
grpResourceList = make([]string, 0)
|
||||
}
|
||||
(*resourcesMap)[groupResource] = append(grpResourceList, hostResources[rscIdx].GetID())
|
||||
(*armoResourceMap)[groupResource] = append(grpResourceList, hostResources[rscIdx].GetID())
|
||||
}
|
||||
return nil
|
||||
return infoMap, nil
|
||||
}
|
||||
|
||||
func (k8sHandler *K8sResourceHandler) collectRbacResources(allResources map[string]workloadinterface.IMetadata) error {
|
||||
@@ -218,41 +263,41 @@ func (k8sHandler *K8sResourceHandler) collectRbacResources(allResources map[stri
|
||||
return nil
|
||||
}
|
||||
|
||||
func getCloudProviderDescription(allResources map[string]workloadinterface.IMetadata, k8sResourcesMap *cautils.K8SResources) error {
|
||||
func getCloudProviderDescription(allResources map[string]workloadinterface.IMetadata, armoResourceMap *cautils.ArmoResources) (string, error) {
|
||||
logger.L().Debug("Collecting cloud data")
|
||||
|
||||
cloudProvider := initCloudProvider()
|
||||
cluster := cloudProvider.getKubeCluster()
|
||||
clusterName := cloudProvider.getKubeClusterName()
|
||||
provider := getCloudProvider()
|
||||
region, err := cloudProvider.getRegion(cluster, provider)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
project, err := cloudProvider.getProject(cluster, provider)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
clusterName := cautils.ClusterName
|
||||
|
||||
provider := cloudsupport.GetCloudProvider(clusterName)
|
||||
|
||||
if provider != "" {
|
||||
logger.L().Debug("cloud", helpers.String("cluster", cluster), helpers.String("clusterName", clusterName), helpers.String("provider", provider), helpers.String("region", region), helpers.String("project", project))
|
||||
logger.L().Debug("cloud", helpers.String("cluster", clusterName), helpers.String("clusterName", clusterName), helpers.String("provider", provider))
|
||||
|
||||
wl, err := cloudsupport.GetDescriptiveInfoFromCloudProvider(clusterName, provider, region, project)
|
||||
wl, err := cloudsupport.GetDescriptiveInfoFromCloudProvider(clusterName, provider)
|
||||
if err != nil {
|
||||
// Return error with useful info on how to configure credentials for getting cloud provider info
|
||||
switch provider {
|
||||
case "gke":
|
||||
return fmt.Errorf("could not get descriptive information about gke cluster: %s using sdk client. See https://developers.google.com/accounts/docs/application-default-credentials for more information", cluster)
|
||||
case "eks":
|
||||
return fmt.Errorf("could not get descriptive information about eks cluster: %s using sdk client. Check out how to configure credentials in https://docs.aws.amazon.com/sdk-for-go/api/", cluster)
|
||||
case "aks":
|
||||
return fmt.Errorf("could not get descriptive information about aks cluster: %s. %v", cluster, err.Error())
|
||||
}
|
||||
return err
|
||||
logger.L().Debug("failed to get descriptive information", helpers.Error(err))
|
||||
return provider, fmt.Errorf("failed to get %s descriptive information. Read more: https://hub.armo.cloud/docs/kubescape-integration-with-cloud-providers", strings.ToUpper(provider))
|
||||
}
|
||||
allResources[wl.GetID()] = wl
|
||||
(*k8sResourcesMap)[fmt.Sprintf("%s/%s", wl.GetApiVersion(), wl.GetKind())] = []string{wl.GetID()}
|
||||
(*armoResourceMap)[fmt.Sprintf("%s/%s", wl.GetApiVersion(), wl.GetKind())] = []string{wl.GetID()}
|
||||
}
|
||||
return nil
|
||||
return provider, nil
|
||||
|
||||
}
|
||||
|
||||
func (k8sHandler *K8sResourceHandler) pullWorkerNodesNumber() (int, error) {
|
||||
// labels used for control plane
|
||||
listOptions := metav1.ListOptions{
|
||||
LabelSelector: "!node-role.kubernetes.io/control-plane,!node-role.kubernetes.io/master",
|
||||
}
|
||||
nodesList, err := k8sHandler.k8s.KubernetesClient.CoreV1().Nodes().List(context.TODO(), listOptions)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
nodesNumber := 0
|
||||
if nodesList != nil {
|
||||
nodesNumber = len(nodesList.Items)
|
||||
}
|
||||
return nodesNumber, nil
|
||||
}
|
||||
|
||||
@@ -3,15 +3,24 @@ package resourcehandler
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/armosec/kubescape/core/cautils"
|
||||
"github.com/armosec/kubescape/v2/core/cautils"
|
||||
"github.com/armosec/kubescape/v2/core/pkg/hostsensorutils"
|
||||
"github.com/armosec/opa-utils/reporthandling"
|
||||
"k8s.io/utils/strings/slices"
|
||||
|
||||
"github.com/armosec/k8s-interface/k8sinterface"
|
||||
)
|
||||
|
||||
func setResourceMap(frameworks []reporthandling.Framework) *cautils.K8SResources {
|
||||
var (
|
||||
ClusterDescribe = "ClusterDescribe"
|
||||
|
||||
MapResourceToApiGroupCloud = map[string][]string{
|
||||
ClusterDescribe: {"container.googleapis.com/v1", "eks.amazonaws.com/v1", "management.azure.com/v1"}}
|
||||
)
|
||||
|
||||
func setK8sResourceMap(frameworks []reporthandling.Framework) *cautils.K8SResources {
|
||||
k8sResources := make(cautils.K8SResources)
|
||||
complexMap := setComplexResourceMap(frameworks)
|
||||
complexMap := setComplexK8sResourceMap(frameworks)
|
||||
for group := range complexMap {
|
||||
for version := range complexMap[group] {
|
||||
for resource := range complexMap[group][version] {
|
||||
@@ -25,33 +34,74 @@ func setResourceMap(frameworks []reporthandling.Framework) *cautils.K8SResources
|
||||
return &k8sResources
|
||||
}
|
||||
|
||||
func convertComplexResourceMap(frameworks []reporthandling.Framework) map[string]map[string]map[string]interface{} {
|
||||
func setArmoResourceMap(frameworks []reporthandling.Framework, resourceToControl map[string][]string) *cautils.ArmoResources {
|
||||
armoResources := make(cautils.ArmoResources)
|
||||
complexMap := setComplexArmoResourceMap(frameworks, resourceToControl)
|
||||
for group := range complexMap {
|
||||
for version := range complexMap[group] {
|
||||
for resource := range complexMap[group][version] {
|
||||
groupResources := k8sinterface.ResourceGroupToString(group, version, resource)
|
||||
for _, groupResource := range groupResources {
|
||||
armoResources[groupResource] = nil
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return &armoResources
|
||||
}
|
||||
|
||||
func setComplexK8sResourceMap(frameworks []reporthandling.Framework) map[string]map[string]map[string]interface{} {
|
||||
k8sResources := make(map[string]map[string]map[string]interface{})
|
||||
for _, framework := range frameworks {
|
||||
for _, control := range framework.Controls {
|
||||
for _, rule := range control.Rules {
|
||||
for _, match := range rule.Match {
|
||||
insertK8sResources(k8sResources, match)
|
||||
insertResources(k8sResources, match)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return k8sResources
|
||||
}
|
||||
func setComplexResourceMap(frameworks []reporthandling.Framework) map[string]map[string]map[string]interface{} {
|
||||
|
||||
// [group][versionn][resource]
|
||||
func setComplexArmoResourceMap(frameworks []reporthandling.Framework, resourceToControls map[string][]string) map[string]map[string]map[string]interface{} {
|
||||
k8sResources := make(map[string]map[string]map[string]interface{})
|
||||
for _, framework := range frameworks {
|
||||
for _, control := range framework.Controls {
|
||||
for _, rule := range control.Rules {
|
||||
for _, match := range rule.Match {
|
||||
insertK8sResources(k8sResources, match)
|
||||
for _, match := range rule.DynamicMatch {
|
||||
insertArmoResourcesAndControls(k8sResources, match, resourceToControls, control)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return k8sResources
|
||||
}
|
||||
func insertK8sResources(k8sResources map[string]map[string]map[string]interface{}, match reporthandling.RuleMatchObjects) {
|
||||
|
||||
func mapArmoResourceToApiGroup(resource string) []string {
|
||||
if val, ok := hostsensorutils.MapResourceToApiGroup[resource]; ok {
|
||||
return []string{val}
|
||||
}
|
||||
return MapResourceToApiGroupCloud[resource]
|
||||
}
|
||||
|
||||
func insertControls(resource string, resourceToControl map[string][]string, control reporthandling.Control) {
|
||||
armoResources := mapArmoResourceToApiGroup(resource)
|
||||
for _, armoResource := range armoResources {
|
||||
group, version := k8sinterface.SplitApiVersion(armoResource)
|
||||
r := k8sinterface.JoinResourceTriplets(group, version, resource)
|
||||
if _, ok := resourceToControl[r]; !ok {
|
||||
resourceToControl[r] = append(resourceToControl[r], control.ControlID)
|
||||
} else {
|
||||
if !slices.Contains(resourceToControl[r], control.ControlID) {
|
||||
resourceToControl[r] = append(resourceToControl[r], control.ControlID)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func insertResources(k8sResources map[string]map[string]map[string]interface{}, match reporthandling.RuleMatchObjects) {
|
||||
for _, apiGroup := range match.APIGroups {
|
||||
if v, ok := k8sResources[apiGroup]; !ok || v == nil {
|
||||
k8sResources[apiGroup] = make(map[string]map[string]interface{})
|
||||
@@ -69,6 +119,25 @@ func insertK8sResources(k8sResources map[string]map[string]map[string]interface{
|
||||
}
|
||||
}
|
||||
|
||||
func insertArmoResourcesAndControls(k8sResources map[string]map[string]map[string]interface{}, match reporthandling.RuleMatchObjects, resourceToControl map[string][]string, control reporthandling.Control) {
|
||||
for _, apiGroup := range match.APIGroups {
|
||||
if v, ok := k8sResources[apiGroup]; !ok || v == nil {
|
||||
k8sResources[apiGroup] = make(map[string]map[string]interface{})
|
||||
}
|
||||
for _, apiVersions := range match.APIVersions {
|
||||
if v, ok := k8sResources[apiGroup][apiVersions]; !ok || v == nil {
|
||||
k8sResources[apiGroup][apiVersions] = make(map[string]interface{})
|
||||
}
|
||||
for _, resource := range match.Resources {
|
||||
if _, ok := k8sResources[apiGroup][apiVersions][resource]; !ok {
|
||||
k8sResources[apiGroup][apiVersions][resource] = nil
|
||||
}
|
||||
insertControls(resource, resourceToControl, control)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func getGroupNVersion(apiVersion string) (string, string) {
|
||||
gv := strings.Split(apiVersion, "/")
|
||||
group, version := "", ""
|
||||
|
||||
@@ -13,7 +13,7 @@ func TestGetK8sResources(t *testing.T) {
|
||||
func TestSetResourceMap(t *testing.T) {
|
||||
k8sinterface.InitializeMapResourcesMock()
|
||||
framework := reporthandling.MockFrameworkA()
|
||||
k8sResources := setResourceMap([]reporthandling.Framework{*framework})
|
||||
k8sResources := setK8sResourceMap([]reporthandling.Framework{*framework})
|
||||
resources := k8sinterface.ResourceGroupToString("*", "v1", "Pod")
|
||||
if len(resources) == 0 {
|
||||
t.Error("expected resources")
|
||||
@@ -43,9 +43,9 @@ func TestInsertK8sResources(t *testing.T) {
|
||||
APIVersions: []string{"v1"},
|
||||
Resources: []string{"secrets"},
|
||||
}
|
||||
insertK8sResources(k8sResources, match1)
|
||||
insertK8sResources(k8sResources, match2)
|
||||
insertK8sResources(k8sResources, match3)
|
||||
insertResources(k8sResources, match1)
|
||||
insertResources(k8sResources, match2)
|
||||
insertResources(k8sResources, match3)
|
||||
|
||||
apiGroup1, ok := k8sResources["apps"]
|
||||
if !ok {
|
||||
|
||||
@@ -3,12 +3,12 @@ package resourcehandler
|
||||
import (
|
||||
"github.com/armosec/k8s-interface/k8sinterface"
|
||||
"github.com/armosec/k8s-interface/workloadinterface"
|
||||
"github.com/armosec/kubescape/core/cautils"
|
||||
"github.com/armosec/kubescape/core/cautils/getter"
|
||||
"github.com/armosec/kubescape/core/cautils/logger"
|
||||
"github.com/armosec/kubescape/core/cautils/logger/helpers"
|
||||
armosecadaptorv1 "github.com/armosec/kubescape/core/pkg/registryadaptors/armosec/v1"
|
||||
"github.com/armosec/kubescape/core/pkg/registryadaptors/registryvulnerabilities"
|
||||
"github.com/armosec/kubescape/v2/core/cautils"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/getter"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger/helpers"
|
||||
armosecadaptorv1 "github.com/armosec/kubescape/v2/core/pkg/registryadaptors/armosec/v1"
|
||||
"github.com/armosec/kubescape/v2/core/pkg/registryadaptors/registryvulnerabilities"
|
||||
|
||||
"github.com/armosec/opa-utils/shared"
|
||||
)
|
||||
@@ -34,7 +34,7 @@ func NewRegistryAdaptors() (*RegistryAdaptors, error) {
|
||||
return registryAdaptors, nil
|
||||
}
|
||||
|
||||
func (registryAdaptors *RegistryAdaptors) collectImagesVulnerabilities(k8sResourcesMap *cautils.K8SResources, allResources map[string]workloadinterface.IMetadata) error {
|
||||
func (registryAdaptors *RegistryAdaptors) collectImagesVulnerabilities(k8sResourcesMap *cautils.K8SResources, allResources map[string]workloadinterface.IMetadata, armoResourceMap *cautils.ArmoResources) error {
|
||||
logger.L().Debug("Collecting images vulnerabilities")
|
||||
|
||||
// list cluster images
|
||||
@@ -64,7 +64,7 @@ func (registryAdaptors *RegistryAdaptors) collectImagesVulnerabilities(k8sResour
|
||||
for i := range metaObjs {
|
||||
allResources[metaObjs[i].GetID()] = metaObjs[i]
|
||||
}
|
||||
(*k8sResourcesMap)[k8sinterface.JoinResourceTriplets(ImagevulnerabilitiesObjectGroup, ImagevulnerabilitiesObjectVersion, ImagevulnerabilitiesObjectKind)] = workloadinterface.ListMetaIDs(metaObjs)
|
||||
(*armoResourceMap)[k8sinterface.JoinResourceTriplets(ImagevulnerabilitiesObjectGroup, ImagevulnerabilitiesObjectVersion, ImagevulnerabilitiesObjectKind)] = workloadinterface.ListMetaIDs(metaObjs)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -4,15 +4,27 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/armosec/kubescape/core/cautils/getter"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/getter"
|
||||
giturls "github.com/whilp/git-urls"
|
||||
"k8s.io/utils/strings/slices"
|
||||
)
|
||||
|
||||
type IRepository interface {
|
||||
parse(fullURL string) error
|
||||
|
||||
setBranch(string) error
|
||||
setTree() error
|
||||
getYamlFromTree() []string
|
||||
setIsFile(bool)
|
||||
|
||||
getIsFile() bool
|
||||
getBranch() string
|
||||
getTree() tree
|
||||
|
||||
getFilesFromTree([]string) []string
|
||||
}
|
||||
|
||||
type innerTree struct {
|
||||
@@ -23,19 +35,24 @@ type tree struct {
|
||||
}
|
||||
|
||||
type GitHubRepository struct {
|
||||
// name string // <org>/<repo>
|
||||
host string
|
||||
name string // <org>/<repo>
|
||||
owner string //
|
||||
repo string //
|
||||
branch string
|
||||
path string
|
||||
token string
|
||||
isFile bool
|
||||
tree tree
|
||||
}
|
||||
type githubDefaultBranchAPI struct {
|
||||
DefaultBranch string `json:"default_branch"`
|
||||
}
|
||||
|
||||
func NewGitHubRepository(rep string) *GitHubRepository {
|
||||
func NewGitHubRepository() *GitHubRepository {
|
||||
return &GitHubRepository{
|
||||
host: "github",
|
||||
name: rep,
|
||||
host: "github.com",
|
||||
token: os.Getenv("GITHUB_TOKEN"),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -45,79 +62,143 @@ func ScanRepository(command string, branchOptional string) ([]string, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = repo.setBranch(branchOptional)
|
||||
if err != nil {
|
||||
if err := repo.parse(command); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = repo.setTree()
|
||||
if err != nil {
|
||||
if err := repo.setBranch(branchOptional); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := repo.setTree(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// get all paths that are of the yaml type, and build them into a valid url
|
||||
return repo.getYamlFromTree(), nil
|
||||
return repo.getFilesFromTree([]string{"yaml", "yml", "json"}), nil
|
||||
}
|
||||
|
||||
func getHostAndRepoName(url string) (string, string, error) {
|
||||
splitUrl := strings.Split(url, "/")
|
||||
|
||||
if len(splitUrl) != 5 {
|
||||
return "", "", fmt.Errorf("failed to pars url: %s", url)
|
||||
func getHost(fullURL string) (string, error) {
|
||||
parsedURL, err := giturls.Parse(fullURL)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
hostUrl := splitUrl[2] // github.com, gitlab.com, etc.
|
||||
repository := splitUrl[3] + "/" + strings.Split(splitUrl[4], ".")[0] // user/reposetory
|
||||
|
||||
return hostUrl, repository, nil
|
||||
return parsedURL.Host, nil
|
||||
}
|
||||
|
||||
func getRepository(url string) (IRepository, error) {
|
||||
hostUrl, repoName, err := getHostAndRepoName(url)
|
||||
func getRepository(fullURL string) (IRepository, error) {
|
||||
hostUrl, err := getHost(fullURL)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var repo IRepository
|
||||
switch repoHost := strings.Split(hostUrl, ".")[0]; repoHost {
|
||||
case "github":
|
||||
repo = NewGitHubRepository(repoName)
|
||||
switch hostUrl {
|
||||
case "github.com":
|
||||
repo = NewGitHubRepository()
|
||||
case "raw.githubusercontent.com":
|
||||
repo = NewGitHubRepository()
|
||||
repo.setIsFile(true)
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown repository host: %s", repoHost)
|
||||
return nil, fmt.Errorf("unknown repository host: %s", hostUrl)
|
||||
}
|
||||
|
||||
// Returns the host-url, and the part of the user and repository from the url
|
||||
return repo, nil
|
||||
}
|
||||
func (g *GitHubRepository) parse(fullURL string) error {
|
||||
parsedURL, err := giturls.Parse(fullURL)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
index := 0
|
||||
|
||||
splittedRepo := strings.FieldsFunc(parsedURL.Path, func(c rune) bool { return c == '/' })
|
||||
if len(splittedRepo) < 2 {
|
||||
return fmt.Errorf("expecting <user>/<repo> in url path, received: '%s'", parsedURL.Path)
|
||||
}
|
||||
g.owner = splittedRepo[index]
|
||||
index += 1
|
||||
g.repo = splittedRepo[index]
|
||||
index += 1
|
||||
|
||||
// root of repo
|
||||
if len(splittedRepo) < index+1 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// is file or dir
|
||||
switch splittedRepo[index] {
|
||||
case "blob":
|
||||
g.isFile = true
|
||||
index += 1
|
||||
case "tree":
|
||||
g.isFile = false
|
||||
index += 1
|
||||
}
|
||||
|
||||
if len(splittedRepo) < index+1 {
|
||||
return nil
|
||||
}
|
||||
|
||||
g.branch = splittedRepo[index]
|
||||
index += 1
|
||||
|
||||
if len(splittedRepo) < index+1 {
|
||||
return nil
|
||||
}
|
||||
g.path = strings.Join(splittedRepo[index:], "/")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (g *GitHubRepository) getBranch() string { return g.branch }
|
||||
func (g *GitHubRepository) getTree() tree { return g.tree }
|
||||
func (g *GitHubRepository) setIsFile(isFile bool) { g.isFile = isFile }
|
||||
func (g *GitHubRepository) getIsFile() bool { return g.isFile }
|
||||
|
||||
func (g *GitHubRepository) setBranch(branchOptional string) error {
|
||||
// Checks whether the repository type is a master or another type.
|
||||
// By default it is "master", unless the branchOptional came with a value
|
||||
if branchOptional == "" {
|
||||
|
||||
body, err := getter.HttpGetter(&http.Client{}, g.defaultBranchAPI(), nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var data githubDefaultBranchAPI
|
||||
err = json.Unmarshal([]byte(body), &data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
g.branch = data.DefaultBranch
|
||||
} else {
|
||||
if branchOptional != "" {
|
||||
g.branch = branchOptional
|
||||
}
|
||||
if g.branch != "" {
|
||||
return nil
|
||||
}
|
||||
body, err := getter.HttpGetter(&http.Client{}, g.defaultBranchAPI(), g.getHeaders())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var data githubDefaultBranchAPI
|
||||
err = json.Unmarshal([]byte(body), &data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
g.branch = data.DefaultBranch
|
||||
return nil
|
||||
}
|
||||
|
||||
func (g *GitHubRepository) defaultBranchAPI() string {
|
||||
return fmt.Sprintf("https://api.github.com/repos/%s", g.name)
|
||||
func joinOwnerNRepo(owner, repo string) string {
|
||||
return fmt.Sprintf("%s/%s", owner, repo)
|
||||
}
|
||||
func (g *GitHubRepository) defaultBranchAPI() string {
|
||||
return fmt.Sprintf("https://api.github.com/repos/%s", joinOwnerNRepo(g.owner, g.repo))
|
||||
}
|
||||
func (g *GitHubRepository) getHeaders() map[string]string {
|
||||
if g.token == "" {
|
||||
return nil
|
||||
}
|
||||
return map[string]string{"Authorization": fmt.Sprintf("token %s", g.token)}
|
||||
}
|
||||
|
||||
func (g *GitHubRepository) setTree() error {
|
||||
body, err := getter.HttpGetter(&http.Client{}, g.treeAPI(), nil)
|
||||
if g.isFile {
|
||||
return nil
|
||||
}
|
||||
|
||||
body, err := getter.HttpGetter(&http.Client{}, g.treeAPI(), g.getHeaders())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -135,14 +216,24 @@ func (g *GitHubRepository) setTree() error {
|
||||
}
|
||||
|
||||
func (g *GitHubRepository) treeAPI() string {
|
||||
return fmt.Sprintf("https://api.github.com/repos/%s/git/trees/%s?recursive=1", g.name, g.branch)
|
||||
return fmt.Sprintf("https://api.github.com/repos/%s/git/trees/%s?recursive=1", joinOwnerNRepo(g.owner, g.repo), g.branch)
|
||||
}
|
||||
|
||||
// return a list of yaml for a given repository tree
|
||||
func (g *GitHubRepository) getYamlFromTree() []string {
|
||||
func (g *GitHubRepository) getFilesFromTree(filesExtensions []string) []string {
|
||||
var urls []string
|
||||
if g.isFile {
|
||||
if slices.Contains(filesExtensions, getFileExtension(g.path)) {
|
||||
return []string{fmt.Sprintf("%s/%s", g.rowYamlUrl(), g.path)}
|
||||
} else {
|
||||
return []string{}
|
||||
}
|
||||
}
|
||||
for _, path := range g.tree.InnerTrees {
|
||||
if strings.HasSuffix(path.Path, ".yaml") {
|
||||
if g.path != "" && !strings.HasPrefix(path.Path, g.path) {
|
||||
continue
|
||||
}
|
||||
if slices.Contains(filesExtensions, getFileExtension(path.Path)) {
|
||||
urls = append(urls, fmt.Sprintf("%s/%s", g.rowYamlUrl(), path.Path))
|
||||
}
|
||||
}
|
||||
@@ -150,5 +241,9 @@ func (g *GitHubRepository) getYamlFromTree() []string {
|
||||
}
|
||||
|
||||
func (g *GitHubRepository) rowYamlUrl() string {
|
||||
return fmt.Sprintf("https://raw.githubusercontent.com/%s/%s", g.name, g.branch)
|
||||
return fmt.Sprintf("https://raw.githubusercontent.com/%s/%s", joinOwnerNRepo(g.owner, g.repo), g.branch)
|
||||
}
|
||||
|
||||
func getFileExtension(path string) string {
|
||||
return strings.TrimPrefix(filepath.Ext(path), ".")
|
||||
}
|
||||
|
||||
140
core/pkg/resourcehandler/repositoryscanner_test.go
Normal file
140
core/pkg/resourcehandler/repositoryscanner_test.go
Normal file
@@ -0,0 +1,140 @@
|
||||
package resourcehandler
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
var (
|
||||
urlA = "https://github.com/armosec/kubescape"
|
||||
urlB = "https://github.com/armosec/kubescape/blob/master/examples/online-boutique/adservice.yaml"
|
||||
urlC = "https://github.com/armosec/kubescape/tree/master/examples/online-boutique"
|
||||
urlD = "https://raw.githubusercontent.com/armosec/kubescape/master/examples/online-boutique/adservice.yaml"
|
||||
)
|
||||
|
||||
func TestScanRepository(t *testing.T) {
|
||||
{
|
||||
files, err := ScanRepository(urlA, "")
|
||||
assert.NoError(t, err)
|
||||
assert.Less(t, 0, len(files))
|
||||
}
|
||||
{
|
||||
files, err := ScanRepository(urlB, "")
|
||||
assert.NoError(t, err)
|
||||
assert.Less(t, 0, len(files))
|
||||
}
|
||||
{
|
||||
files, err := ScanRepository(urlC, "")
|
||||
assert.NoError(t, err)
|
||||
assert.Less(t, 0, len(files))
|
||||
}
|
||||
{
|
||||
files, err := ScanRepository(urlD, "")
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 1, len(files))
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestGetHost(t *testing.T) {
|
||||
{
|
||||
host, err := getHost(urlA)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "github.com", host)
|
||||
}
|
||||
{
|
||||
host, err := getHost(urlB)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "github.com", host)
|
||||
}
|
||||
{
|
||||
host, err := getHost(urlC)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "github.com", host)
|
||||
}
|
||||
{
|
||||
host, err := getHost(urlD)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "raw.githubusercontent.com", host)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGithubSetBranch(t *testing.T) {
|
||||
{
|
||||
gh := NewGitHubRepository()
|
||||
assert.NoError(t, gh.parse(urlA))
|
||||
assert.NoError(t, gh.setBranch(""))
|
||||
assert.Equal(t, "master", gh.getBranch())
|
||||
}
|
||||
{
|
||||
gh := NewGitHubRepository()
|
||||
assert.NoError(t, gh.parse(urlB))
|
||||
err := gh.setBranch("dev")
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "dev", gh.getBranch())
|
||||
}
|
||||
}
|
||||
|
||||
func TestGithubSetTree(t *testing.T) {
|
||||
{
|
||||
gh := NewGitHubRepository()
|
||||
assert.NoError(t, gh.parse(urlA))
|
||||
assert.NoError(t, gh.setBranch(""))
|
||||
err := gh.setTree()
|
||||
assert.NoError(t, err)
|
||||
assert.Less(t, 0, len(gh.getTree().InnerTrees))
|
||||
}
|
||||
}
|
||||
func TestGithubGetYamlFromTree(t *testing.T) {
|
||||
{
|
||||
gh := NewGitHubRepository()
|
||||
assert.NoError(t, gh.parse(urlA))
|
||||
assert.NoError(t, gh.setBranch(""))
|
||||
assert.NoError(t, gh.setTree())
|
||||
files := gh.getFilesFromTree([]string{"yaml"})
|
||||
assert.Less(t, 0, len(files))
|
||||
}
|
||||
{
|
||||
gh := NewGitHubRepository()
|
||||
assert.NoError(t, gh.parse(urlB))
|
||||
assert.NoError(t, gh.setBranch(""))
|
||||
assert.NoError(t, gh.setTree())
|
||||
files := gh.getFilesFromTree([]string{"yaml"})
|
||||
assert.Equal(t, 1, len(files))
|
||||
}
|
||||
{
|
||||
gh := NewGitHubRepository()
|
||||
assert.NoError(t, gh.parse(urlC))
|
||||
assert.NoError(t, gh.setBranch(""))
|
||||
assert.NoError(t, gh.setTree())
|
||||
files := gh.getFilesFromTree([]string{"yaml"})
|
||||
assert.Equal(t, 12, len(files))
|
||||
}
|
||||
}
|
||||
|
||||
func TestGithubParse(t *testing.T) {
|
||||
{
|
||||
gh := NewGitHubRepository()
|
||||
assert.NoError(t, gh.parse(urlA))
|
||||
assert.Equal(t, "armosec/kubescape", joinOwnerNRepo(gh.owner, gh.repo))
|
||||
}
|
||||
{
|
||||
gh := NewGitHubRepository()
|
||||
assert.NoError(t, gh.parse(urlB))
|
||||
assert.Equal(t, "armosec/kubescape", joinOwnerNRepo(gh.owner, gh.repo))
|
||||
assert.Equal(t, "master", gh.branch)
|
||||
assert.Equal(t, "examples/online-boutique/adservice.yaml", gh.path)
|
||||
assert.True(t, gh.isFile)
|
||||
assert.Equal(t, 1, len(gh.getFilesFromTree([]string{"yaml"})))
|
||||
assert.Equal(t, 0, len(gh.getFilesFromTree([]string{"yml"})))
|
||||
}
|
||||
{
|
||||
gh := NewGitHubRepository()
|
||||
assert.NoError(t, gh.parse(urlC))
|
||||
assert.Equal(t, "armosec/kubescape", joinOwnerNRepo(gh.owner, gh.repo))
|
||||
assert.Equal(t, "master", gh.branch)
|
||||
assert.Equal(t, "examples/online-boutique", gh.path)
|
||||
assert.False(t, gh.isFile)
|
||||
}
|
||||
}
|
||||
@@ -3,12 +3,11 @@ package resourcehandler
|
||||
import (
|
||||
"github.com/armosec/armoapi-go/armotypes"
|
||||
"github.com/armosec/k8s-interface/workloadinterface"
|
||||
"github.com/armosec/kubescape/core/cautils"
|
||||
"github.com/armosec/opa-utils/reporthandling"
|
||||
"github.com/armosec/kubescape/v2/core/cautils"
|
||||
"k8s.io/apimachinery/pkg/version"
|
||||
)
|
||||
|
||||
type IResourceHandler interface {
|
||||
GetResources([]reporthandling.Framework, *armotypes.PortalDesignator) (*cautils.K8SResources, map[string]workloadinterface.IMetadata, error)
|
||||
GetResources(*cautils.OPASessionObj, *armotypes.PortalDesignator) (*cautils.K8SResources, map[string]workloadinterface.IMetadata, *cautils.ArmoResources, error)
|
||||
GetClusterAPIServerInfo() *version.Info
|
||||
}
|
||||
|
||||
@@ -1,81 +1,48 @@
|
||||
package resourcehandler
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
giturl "github.com/armosec/go-git-url"
|
||||
"github.com/armosec/k8s-interface/workloadinterface"
|
||||
"github.com/armosec/kubescape/core/cautils"
|
||||
"github.com/armosec/kubescape/core/cautils/logger"
|
||||
"github.com/armosec/kubescape/v2/core/cautils"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger/helpers"
|
||||
)
|
||||
|
||||
func loadResourcesFromUrl(inputPatterns []string) ([]workloadinterface.IMetadata, error) {
|
||||
urls := listUrls(inputPatterns)
|
||||
if len(urls) == 0 {
|
||||
func loadResourcesFromUrl(inputPatterns []string) (map[string][]workloadinterface.IMetadata, error) {
|
||||
if len(inputPatterns) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
g, err := giturl.NewGitURL(inputPatterns[0])
|
||||
if err != nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
workloads, errs := downloadFiles(urls)
|
||||
files, errs := g.DownloadFilesWithExtension(append(cautils.YAML_PREFIX, cautils.JSON_PREFIX...))
|
||||
if len(errs) > 0 {
|
||||
logger.L().Error(fmt.Sprintf("%v", errs))
|
||||
}
|
||||
return workloads, nil
|
||||
}
|
||||
|
||||
func listUrls(patterns []string) []string {
|
||||
urls := []string{}
|
||||
for i := range patterns {
|
||||
if strings.HasPrefix(patterns[i], "http") {
|
||||
if !cautils.IsYaml(patterns[i]) && !cautils.IsJson(patterns[i]) { // if url of repo
|
||||
if yamls, err := ScanRepository(patterns[i], ""); err == nil { // TODO - support branch
|
||||
urls = append(urls, yamls...)
|
||||
} else {
|
||||
logger.L().Error(err.Error())
|
||||
}
|
||||
} else { // url of single file
|
||||
urls = append(urls, patterns[i])
|
||||
}
|
||||
for i, j := range errs {
|
||||
logger.L().Error(i, helpers.Error(j))
|
||||
}
|
||||
}
|
||||
|
||||
return urls
|
||||
}
|
||||
if len(files) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func downloadFiles(urls []string) ([]workloadinterface.IMetadata, []error) {
|
||||
workloads := []workloadinterface.IMetadata{}
|
||||
errs := []error{}
|
||||
for i := range urls {
|
||||
f, err := downloadFile(urls[i])
|
||||
if err != nil {
|
||||
errs = append(errs, err)
|
||||
// convert files to IMetadata
|
||||
workloads := make(map[string][]workloadinterface.IMetadata, 0)
|
||||
|
||||
for i, j := range files {
|
||||
w, e := cautils.ReadFile(j, cautils.GetFileFormat(i))
|
||||
if len(e) != 0 || len(w) == 0 {
|
||||
continue
|
||||
}
|
||||
w, e := cautils.ReadFile(f, cautils.GetFileFormat(urls[i]))
|
||||
errs = append(errs, e...)
|
||||
if w != nil {
|
||||
workloads = append(workloads, w...)
|
||||
if _, ok := workloads[i]; !ok {
|
||||
workloads[i] = make([]workloadinterface.IMetadata, 0)
|
||||
}
|
||||
wSlice := workloads[i]
|
||||
wSlice = append(wSlice, w...)
|
||||
workloads[i] = wSlice
|
||||
}
|
||||
return workloads, errs
|
||||
}
|
||||
|
||||
func downloadFile(url string) ([]byte, error) {
|
||||
resp, err := http.Get(url)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
if resp.StatusCode < 200 || 301 < resp.StatusCode {
|
||||
return nil, fmt.Errorf("failed to download file, url: '%s', status code: %s", url, resp.Status)
|
||||
}
|
||||
return streamToByte(resp.Body), nil
|
||||
}
|
||||
|
||||
func streamToByte(stream io.Reader) []byte {
|
||||
buf := new(bytes.Buffer)
|
||||
buf.ReadFrom(stream)
|
||||
return buf.Bytes()
|
||||
return workloads, nil
|
||||
}
|
||||
|
||||
66
core/pkg/resourcehandler/urlloader_test.go
Normal file
66
core/pkg/resourcehandler/urlloader_test.go
Normal file
@@ -0,0 +1,66 @@
|
||||
package resourcehandler
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestLoadResourcesFromUrl(t *testing.T) {
|
||||
{
|
||||
workloads, err := loadResourcesFromUrl([]string{"https://github.com/armosec/kubescape/tree/master/examples/online-boutique"})
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 12, len(workloads))
|
||||
|
||||
for i, w := range workloads {
|
||||
switch i {
|
||||
case "https://raw.githubusercontent.com/armosec/kubescape/master/examples/online-boutique/adservice.yaml":
|
||||
assert.Equal(t, 2, len(w))
|
||||
assert.Equal(t, "apps/v1//Deployment/adservice", w[0].GetID())
|
||||
assert.Equal(t, "/v1//Service/adservice", w[1].GetID())
|
||||
}
|
||||
}
|
||||
}
|
||||
{
|
||||
workloads, err := loadResourcesFromUrl([]string{"https://github.com/armosec/kubescape"})
|
||||
assert.NoError(t, err)
|
||||
assert.Less(t, 12, len(workloads))
|
||||
|
||||
for i, w := range workloads {
|
||||
switch i {
|
||||
case "https://raw.githubusercontent.com/armosec/kubescape/master/examples/online-boutique/adservice.yaml":
|
||||
assert.Equal(t, 2, len(w))
|
||||
assert.Equal(t, "apps/v1//Deployment/adservice", w[0].GetID())
|
||||
assert.Equal(t, "/v1//Service/adservice", w[1].GetID())
|
||||
}
|
||||
}
|
||||
}
|
||||
{
|
||||
workloads, err := loadResourcesFromUrl([]string{"https://github.com/armosec/kubescape/blob/master/examples/online-boutique/adservice.yaml"})
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 1, len(workloads))
|
||||
|
||||
for i, w := range workloads {
|
||||
switch i {
|
||||
case "https://raw.githubusercontent.com/armosec/kubescape/master/examples/online-boutique/adservice.yaml":
|
||||
assert.Equal(t, 2, len(w))
|
||||
assert.Equal(t, "apps/v1//Deployment/adservice", w[0].GetID())
|
||||
assert.Equal(t, "/v1//Service/adservice", w[1].GetID())
|
||||
}
|
||||
}
|
||||
}
|
||||
{
|
||||
workloads, err := loadResourcesFromUrl([]string{"https://raw.githubusercontent.com/armosec/kubescape/master/examples/online-boutique/adservice.yaml"})
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 1, len(workloads))
|
||||
|
||||
for i, w := range workloads {
|
||||
switch i {
|
||||
case "https://raw.githubusercontent.com/armosec/kubescape/master/examples/online-boutique/adservice.yaml":
|
||||
assert.Equal(t, 2, len(w))
|
||||
assert.Equal(t, "apps/v1//Deployment/adservice", w[0].GetID())
|
||||
assert.Equal(t, "/v1//Service/adservice", w[1].GetID())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -5,8 +5,8 @@ import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/armosec/kubescape/core/cautils"
|
||||
"github.com/armosec/kubescape/core/cautils/logger"
|
||||
"github.com/armosec/kubescape/v2/core/cautils"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger"
|
||||
)
|
||||
|
||||
var INDENT = " "
|
||||
|
||||
@@ -5,9 +5,9 @@ import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/armosec/kubescape/core/cautils"
|
||||
"github.com/armosec/kubescape/core/cautils/logger"
|
||||
"github.com/armosec/kubescape/core/pkg/resultshandling/printer"
|
||||
"github.com/armosec/kubescape/v2/core/cautils"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger"
|
||||
"github.com/armosec/kubescape/v2/core/pkg/resultshandling/printer"
|
||||
)
|
||||
|
||||
type JsonPrinter struct {
|
||||
@@ -23,7 +23,7 @@ func (jsonPrinter *JsonPrinter) SetWriter(outputFile string) {
|
||||
}
|
||||
|
||||
func (jsonPrinter *JsonPrinter) Score(score float32) {
|
||||
fmt.Fprintf(os.Stderr, "\nOverall risk-score (0- Excellent, 100- All failed): %d\n", int(score))
|
||||
fmt.Fprintf(os.Stderr, "\nOverall risk-score (0- Excellent, 100- All failed): %d\n", cautils.Float32ToInt(score))
|
||||
}
|
||||
|
||||
func (jsonPrinter *JsonPrinter) ActionPrint(opaSessionObj *cautils.OPASessionObj) {
|
||||
|
||||
@@ -5,9 +5,9 @@ import (
|
||||
"os"
|
||||
|
||||
"github.com/armosec/k8s-interface/workloadinterface"
|
||||
"github.com/armosec/kubescape/core/cautils"
|
||||
"github.com/armosec/kubescape/core/cautils/logger"
|
||||
"github.com/armosec/kubescape/core/pkg/resultshandling/printer"
|
||||
"github.com/armosec/kubescape/v2/core/cautils"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger"
|
||||
"github.com/armosec/kubescape/v2/core/pkg/resultshandling/printer"
|
||||
"github.com/armosec/opa-utils/reporthandling"
|
||||
)
|
||||
|
||||
@@ -27,7 +27,7 @@ func (prometheusPrinter *PrometheusPrinter) SetWriter(outputFile string) {
|
||||
}
|
||||
|
||||
func (prometheusPrinter *PrometheusPrinter) Score(score float32) {
|
||||
fmt.Printf("\n# Overall risk-score (0- Excellent, 100- All failed)\nkubescape_score %d\n", int(score))
|
||||
fmt.Printf("\n# Overall risk-score (0- Excellent, 100- All failed)\nkubescape_score %d\n", cautils.Float32ToInt(score))
|
||||
}
|
||||
|
||||
func (printer *PrometheusPrinter) printResources(allResources map[string]workloadinterface.IMetadata, resourcesIDs *reporthandling.ResourcesIDs, frameworkName, controlName string) {
|
||||
|
||||
@@ -4,33 +4,111 @@ import (
|
||||
"fmt"
|
||||
"sort"
|
||||
|
||||
"github.com/armosec/kubescape/v2/core/cautils"
|
||||
"github.com/armosec/opa-utils/reporthandling/apis"
|
||||
"github.com/armosec/opa-utils/reporthandling/results/v1/reportsummary"
|
||||
"github.com/fatih/color"
|
||||
"github.com/olekukonko/tablewriter"
|
||||
)
|
||||
|
||||
func generateRow(controlSummary reportsummary.IControlSummary) []string {
|
||||
row := []string{controlSummary.GetName()}
|
||||
row = append(row, fmt.Sprintf("%d", controlSummary.NumberOfResources().Failed()))
|
||||
row = append(row, fmt.Sprintf("%d", controlSummary.NumberOfResources().Excluded()))
|
||||
row = append(row, fmt.Sprintf("%d", controlSummary.NumberOfResources().All()))
|
||||
const (
|
||||
columnSeverity = iota
|
||||
columnName = iota
|
||||
columnCounterFailed = iota
|
||||
columnCounterExclude = iota
|
||||
columnCounterAll = iota
|
||||
columnRiskScore = iota
|
||||
_rowLen = iota
|
||||
)
|
||||
|
||||
if !controlSummary.GetStatus().IsSkipped() {
|
||||
row = append(row, fmt.Sprintf("%d", int(controlSummary.GetScore()))+"%")
|
||||
} else {
|
||||
row = append(row, "skipped")
|
||||
func generateRow(controlSummary reportsummary.IControlSummary, infoToPrintInfo []infoStars, verbose bool) []string {
|
||||
row := make([]string, _rowLen)
|
||||
|
||||
// ignore passed results
|
||||
if !verbose && (controlSummary.GetStatus().IsPassed()) {
|
||||
return []string{}
|
||||
}
|
||||
|
||||
// ignore irelevant results
|
||||
if !verbose && (controlSummary.GetStatus().IsSkipped() && controlSummary.GetStatus().Status() == apis.StatusIrrelevant) {
|
||||
return []string{}
|
||||
}
|
||||
|
||||
row[columnSeverity] = getSeverityColumn(controlSummary)
|
||||
row[columnName] = controlSummary.GetName()
|
||||
row[columnCounterFailed] = fmt.Sprintf("%d", controlSummary.NumberOfResources().Failed())
|
||||
row[columnCounterExclude] = fmt.Sprintf("%d", controlSummary.NumberOfResources().Excluded())
|
||||
row[columnCounterAll] = fmt.Sprintf("%d", controlSummary.NumberOfResources().All())
|
||||
row[columnRiskScore] = getRiskScoreColumn(controlSummary, infoToPrintInfo)
|
||||
|
||||
return row
|
||||
}
|
||||
|
||||
func getSortedControlsNames(controls reportsummary.ControlSummaries) []string {
|
||||
controlNames := make([]string, 0, len(controls))
|
||||
func getInfoColumn(controlSummary reportsummary.IControlSummary, infoToPrintInfo []infoStars) string {
|
||||
for i := range infoToPrintInfo {
|
||||
if infoToPrintInfo[i].info == controlSummary.GetStatus().Info() {
|
||||
return infoToPrintInfo[i].stars
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func getRiskScoreColumn(controlSummary reportsummary.IControlSummary, infoToPrintInfo []infoStars) string {
|
||||
if controlSummary.GetStatus().IsSkipped() {
|
||||
return fmt.Sprintf("%s%s", controlSummary.GetStatus().Status(), getInfoColumn(controlSummary, infoToPrintInfo))
|
||||
}
|
||||
return fmt.Sprintf("%d", cautils.Float32ToInt(controlSummary.GetScore())) + "%"
|
||||
}
|
||||
|
||||
func getSeverityColumn(controlSummary reportsummary.IControlSummary) string {
|
||||
return color.New(getColor(apis.ControlSeverityToInt(controlSummary.GetScoreFactor())), color.Bold).SprintFunc()(apis.ControlSeverityToString(controlSummary.GetScoreFactor()))
|
||||
}
|
||||
func getColor(controlSeverity int) color.Attribute {
|
||||
switch controlSeverity {
|
||||
case apis.SeverityCritical:
|
||||
return color.FgRed
|
||||
case apis.SeverityHigh:
|
||||
return color.FgYellow
|
||||
case apis.SeverityMedium:
|
||||
return color.FgCyan
|
||||
case apis.SeverityLow:
|
||||
return color.FgWhite
|
||||
default:
|
||||
return color.FgWhite
|
||||
}
|
||||
}
|
||||
|
||||
func getSortedControlsNames(controls reportsummary.ControlSummaries) [][]string {
|
||||
controlNames := make([][]string, 5)
|
||||
for k := range controls {
|
||||
c := controls[k]
|
||||
controlNames = append(controlNames, c.GetName())
|
||||
i := apis.ControlSeverityToInt(c.GetScoreFactor())
|
||||
controlNames[i] = append(controlNames[i], c.GetName())
|
||||
}
|
||||
for i := range controlNames {
|
||||
sort.Strings(controlNames[i])
|
||||
}
|
||||
sort.Strings(controlNames)
|
||||
return controlNames
|
||||
}
|
||||
|
||||
func getControlTableHeaders() []string {
|
||||
return []string{"CONTROL NAME", "FAILED RESOURCES", "EXCLUDED RESOURCES", "ALL RESOURCES", "% RISK-SCORE"}
|
||||
headers := make([]string, _rowLen)
|
||||
headers[columnName] = "CONTROL NAME"
|
||||
headers[columnCounterFailed] = "FAILED RESOURCES"
|
||||
headers[columnCounterExclude] = "EXCLUDED RESOURCES"
|
||||
headers[columnCounterAll] = "ALL RESOURCES"
|
||||
headers[columnSeverity] = "SEVERITY"
|
||||
headers[columnRiskScore] = "% RISK-SCORE"
|
||||
return headers
|
||||
}
|
||||
|
||||
func getColumnsAlignments() []int {
|
||||
alignments := make([]int, _rowLen)
|
||||
alignments[columnName] = tablewriter.ALIGN_LEFT
|
||||
alignments[columnCounterFailed] = tablewriter.ALIGN_CENTER
|
||||
alignments[columnCounterExclude] = tablewriter.ALIGN_CENTER
|
||||
alignments[columnCounterAll] = tablewriter.ALIGN_CENTER
|
||||
alignments[columnSeverity] = tablewriter.ALIGN_LEFT
|
||||
alignments[columnRiskScore] = tablewriter.ALIGN_CENTER
|
||||
return alignments
|
||||
}
|
||||
|
||||
@@ -5,10 +5,10 @@ import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/armosec/kubescape/core/cautils"
|
||||
"github.com/armosec/kubescape/core/cautils/logger"
|
||||
"github.com/armosec/kubescape/core/cautils/logger/helpers"
|
||||
"github.com/armosec/kubescape/core/pkg/resultshandling/printer"
|
||||
"github.com/armosec/kubescape/v2/core/cautils"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger/helpers"
|
||||
"github.com/armosec/kubescape/v2/core/pkg/resultshandling/printer"
|
||||
)
|
||||
|
||||
type JsonPrinter struct {
|
||||
@@ -24,7 +24,7 @@ func (jsonPrinter *JsonPrinter) SetWriter(outputFile string) {
|
||||
}
|
||||
|
||||
func (jsonPrinter *JsonPrinter) Score(score float32) {
|
||||
fmt.Fprintf(os.Stderr, "\nOverall risk-score (0- Excellent, 100- All failed): %d\n", int(score))
|
||||
fmt.Fprintf(os.Stderr, "\nOverall risk-score (0- Excellent, 100- All failed): %d\n", cautils.Float32ToInt(score))
|
||||
}
|
||||
|
||||
func (jsonPrinter *JsonPrinter) ActionPrint(opaSessionObj *cautils.OPASessionObj) {
|
||||
@@ -32,7 +32,6 @@ func (jsonPrinter *JsonPrinter) ActionPrint(opaSessionObj *cautils.OPASessionObj
|
||||
if err != nil {
|
||||
logger.L().Fatal("failed to Marshal posture report object")
|
||||
}
|
||||
jsonPrinter.writer.Write(r)
|
||||
|
||||
logOUtputFile(jsonPrinter.writer.Name())
|
||||
if _, err := jsonPrinter.writer.Write(r); err != nil {
|
||||
|
||||
@@ -8,10 +8,10 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/armosec/k8s-interface/workloadinterface"
|
||||
"github.com/armosec/kubescape/core/cautils"
|
||||
"github.com/armosec/kubescape/core/cautils/logger"
|
||||
"github.com/armosec/kubescape/core/cautils/logger/helpers"
|
||||
"github.com/armosec/kubescape/core/pkg/resultshandling/printer"
|
||||
"github.com/armosec/kubescape/v2/core/cautils"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger/helpers"
|
||||
"github.com/armosec/kubescape/v2/core/pkg/resultshandling/printer"
|
||||
"github.com/armosec/opa-utils/reporthandling/results/v1/reportsummary"
|
||||
"github.com/armosec/opa-utils/shared"
|
||||
)
|
||||
@@ -100,7 +100,7 @@ func (junitPrinter *JunitPrinter) SetWriter(outputFile string) {
|
||||
}
|
||||
|
||||
func (junitPrinter *JunitPrinter) Score(score float32) {
|
||||
fmt.Fprintf(os.Stderr, "\nOverall risk-score (0- Excellent, 100- All failed): %d\n", int(score))
|
||||
fmt.Fprintf(os.Stderr, "\nOverall risk-score (0- Excellent, 100- All failed): %d\n", cautils.Float32ToInt(score))
|
||||
}
|
||||
|
||||
func (junitPrinter *JunitPrinter) ActionPrint(opaSessionObj *cautils.OPASessionObj) {
|
||||
@@ -128,7 +128,7 @@ func listTestsSuite(results *cautils.OPASessionObj) []JUnitTestSuite {
|
||||
var testSuites []JUnitTestSuite
|
||||
|
||||
// control scan
|
||||
if len(results.Report.SummaryDetails.ListFrameworks().All()) == 0 {
|
||||
if len(results.Report.SummaryDetails.ListFrameworks()) == 0 {
|
||||
testSuite := JUnitTestSuite{}
|
||||
testSuite.Failures = results.Report.SummaryDetails.NumberOfControls().Failed()
|
||||
testSuite.Timestamp = results.Report.ReportGenerationTime.String()
|
||||
@@ -147,7 +147,7 @@ func listTestsSuite(results *cautils.OPASessionObj) []JUnitTestSuite {
|
||||
testSuite.ID = i
|
||||
testSuite.Name = f.Name
|
||||
testSuite.Properties = properties(f.Score)
|
||||
testSuite.TestCases = testsCases(results, f.ListControls(), f.GetName())
|
||||
testSuite.TestCases = testsCases(results, f.GetControls(), f.GetName())
|
||||
testSuites = append(testSuites, testSuite)
|
||||
}
|
||||
|
||||
@@ -176,7 +176,7 @@ func testsCases(results *cautils.OPASessionObj, controls reportsummary.IControls
|
||||
testCaseFailure := JUnitFailure{}
|
||||
testCaseFailure.Type = "Control"
|
||||
// testCaseFailure.Contents =
|
||||
testCaseFailure.Message = fmt.Sprintf("Remediation: %s\nMore details: %s\n\n%s", control.GetRemediation(), getControlURL(control.GetID()), strings.Join(resourcesStr, "\n"))
|
||||
testCaseFailure.Message = fmt.Sprintf("Remediation: %s\nMore details: %s\n\n%s", control.GetRemediation(), getControlLink(control.GetID()), strings.Join(resourcesStr, "\n"))
|
||||
|
||||
testCase.Failure = &testCaseFailure
|
||||
} else if control.GetStatus().IsSkipped() {
|
||||
|
||||
@@ -9,10 +9,10 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/armosec/kubescape/core/cautils"
|
||||
"github.com/armosec/kubescape/core/cautils/logger"
|
||||
"github.com/armosec/kubescape/core/cautils/logger/helpers"
|
||||
"github.com/armosec/kubescape/core/pkg/resultshandling/printer"
|
||||
"github.com/armosec/kubescape/v2/core/cautils"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger/helpers"
|
||||
"github.com/armosec/kubescape/v2/core/pkg/resultshandling/printer"
|
||||
"github.com/armosec/opa-utils/reporthandling/results/v1/reportsummary"
|
||||
"github.com/johnfercher/maroto/pkg/color"
|
||||
"github.com/johnfercher/maroto/pkg/consts"
|
||||
@@ -31,8 +31,7 @@ var (
|
||||
)
|
||||
|
||||
type PdfPrinter struct {
|
||||
writer *os.File
|
||||
sortedControlNames []string
|
||||
writer *os.File
|
||||
}
|
||||
|
||||
func NewPdfPrinter() *PdfPrinter {
|
||||
@@ -52,17 +51,39 @@ func (pdfPrinter *PdfPrinter) SetWriter(outputFile string) {
|
||||
}
|
||||
|
||||
func (pdfPrinter *PdfPrinter) Score(score float32) {
|
||||
fmt.Fprintf(os.Stderr, "\nOverall risk-score (0- Excellent, 100- All failed): %d\n", int(score))
|
||||
fmt.Fprintf(os.Stderr, "\nOverall risk-score (0- Excellent, 100- All failed): %d\n", cautils.Float32ToInt(score))
|
||||
}
|
||||
func (pdfPrinter *PdfPrinter) printInfo(m pdf.Maroto, summaryDetails *reportsummary.SummaryDetails, infoMap []infoStars) {
|
||||
emptyRowCounter := 1
|
||||
for i := range infoMap {
|
||||
if infoMap[i].info != "" {
|
||||
m.Row(5, func() {
|
||||
m.Col(1, func() {
|
||||
m.Text(fmt.Sprintf("%v", infoMap[i].info))
|
||||
})
|
||||
m.Col(12, func() {
|
||||
m.Text(fmt.Sprintf("%v", infoMap[i].stars))
|
||||
})
|
||||
})
|
||||
if emptyRowCounter < len(infoMap) {
|
||||
m.Row(2.5, func() {})
|
||||
emptyRowCounter++
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func (pdfPrinter *PdfPrinter) ActionPrint(opaSessionObj *cautils.OPASessionObj) {
|
||||
pdfPrinter.sortedControlNames = getSortedControlsNames(opaSessionObj.Report.SummaryDetails.Controls)
|
||||
sortedControlNames := getSortedControlsNames(opaSessionObj.Report.SummaryDetails.Controls)
|
||||
|
||||
infoToPrintInfo := mapInfoToPrintInfo(opaSessionObj.Report.SummaryDetails.Controls)
|
||||
m := pdf.NewMaroto(consts.Portrait, consts.A4)
|
||||
pdfPrinter.printHeader(m)
|
||||
pdfPrinter.printFramework(m, opaSessionObj.Report.SummaryDetails.ListFrameworks().All())
|
||||
pdfPrinter.printTable(m, &opaSessionObj.Report.SummaryDetails)
|
||||
pdfPrinter.printFramework(m, opaSessionObj.Report.SummaryDetails.ListFrameworks())
|
||||
pdfPrinter.printTable(m, &opaSessionObj.Report.SummaryDetails, sortedControlNames)
|
||||
pdfPrinter.printFinalResult(m, &opaSessionObj.Report.SummaryDetails)
|
||||
pdfPrinter.printInfo(m, &opaSessionObj.Report.SummaryDetails, infoToPrintInfo)
|
||||
|
||||
// Extrat output buffer.
|
||||
outBuff, err := m.Output()
|
||||
@@ -115,7 +136,7 @@ func (pdfPrinter *PdfPrinter) printHeader(m pdf.Maroto) {
|
||||
}
|
||||
|
||||
// Print pdf frameworks after pdf header.
|
||||
func (pdfPrinter *PdfPrinter) printFramework(m pdf.Maroto, frameworks []reportsummary.IPolicies) {
|
||||
func (pdfPrinter *PdfPrinter) printFramework(m pdf.Maroto, frameworks []reportsummary.IFrameworkSummary) {
|
||||
m.Row(10, func() {
|
||||
m.Text(frameworksScoresToString(frameworks), props.Text{
|
||||
Align: consts.Center,
|
||||
@@ -127,14 +148,17 @@ func (pdfPrinter *PdfPrinter) printFramework(m pdf.Maroto, frameworks []reportsu
|
||||
}
|
||||
|
||||
// Create pdf table
|
||||
func (pdfPrinter *PdfPrinter) printTable(m pdf.Maroto, summaryDetails *reportsummary.SummaryDetails) {
|
||||
func (pdfPrinter *PdfPrinter) printTable(m pdf.Maroto, summaryDetails *reportsummary.SummaryDetails, sortedControlNames [][]string) {
|
||||
headers := getControlTableHeaders()
|
||||
controls := make([][]string, len(pdfPrinter.sortedControlNames))
|
||||
infoToPrintInfoMap := mapInfoToPrintInfo(summaryDetails.Controls)
|
||||
controls := make([][]string, len(sortedControlNames))
|
||||
for i := range controls {
|
||||
controls[i] = make([]string, len(headers))
|
||||
}
|
||||
for i := 0; i < len(pdfPrinter.sortedControlNames); i++ {
|
||||
controls[i] = generateRow(summaryDetails.Controls.GetControl(reportsummary.EControlCriteriaName, pdfPrinter.sortedControlNames[i]))
|
||||
for i := len(sortedControlNames) - 1; i >= 0; i-- {
|
||||
for _, c := range sortedControlNames[i] {
|
||||
controls[i] = generateRow(summaryDetails.Controls.GetControl(reportsummary.EControlCriteriaName, c), infoToPrintInfoMap, true)
|
||||
}
|
||||
}
|
||||
|
||||
m.TableList(headers, controls, props.TableList{
|
||||
@@ -163,7 +187,7 @@ func (pdfPrinter *PdfPrinter) printTable(m pdf.Maroto, summaryDetails *reportsum
|
||||
|
||||
// Add final results.
|
||||
func (pdfPrinter *PdfPrinter) printFinalResult(m pdf.Maroto, summaryDetails *reportsummary.SummaryDetails) {
|
||||
m.Row(5, func() {
|
||||
m.Row(_rowLen, func() {
|
||||
m.Col(3, func() {
|
||||
m.Text("Resource summary", props.Text{
|
||||
Align: consts.Left,
|
||||
|
||||
@@ -7,20 +7,20 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/armosec/k8s-interface/workloadinterface"
|
||||
"github.com/armosec/kubescape/core/cautils"
|
||||
"github.com/armosec/kubescape/core/pkg/resultshandling/printer"
|
||||
"github.com/armosec/kubescape/v2/core/cautils"
|
||||
"github.com/armosec/kubescape/v2/core/pkg/resultshandling/printer"
|
||||
"github.com/armosec/opa-utils/objectsenvelopes"
|
||||
"github.com/armosec/opa-utils/reporthandling/apis"
|
||||
helpersv1 "github.com/armosec/opa-utils/reporthandling/helpers/v1"
|
||||
"github.com/armosec/opa-utils/reporthandling/results/v1/reportsummary"
|
||||
"github.com/enescakir/emoji"
|
||||
"github.com/olekukonko/tablewriter"
|
||||
)
|
||||
|
||||
type PrettyPrinter struct {
|
||||
formatVersion string
|
||||
writer *os.File
|
||||
verboseMode bool
|
||||
sortedControlNames []string
|
||||
formatVersion string
|
||||
writer *os.File
|
||||
verboseMode bool
|
||||
}
|
||||
|
||||
func NewPrettyPrinter(verboseMode bool, formatVersion string) *PrettyPrinter {
|
||||
@@ -31,14 +31,14 @@ func NewPrettyPrinter(verboseMode bool, formatVersion string) *PrettyPrinter {
|
||||
}
|
||||
|
||||
func (prettyPrinter *PrettyPrinter) ActionPrint(opaSessionObj *cautils.OPASessionObj) {
|
||||
prettyPrinter.sortedControlNames = getSortedControlsNames(opaSessionObj.Report.SummaryDetails.Controls) // ListControls().All())
|
||||
fmt.Fprintf(prettyPrinter.writer, "\n"+getSperator("^")+"\n")
|
||||
|
||||
if prettyPrinter.formatVersion == "v1" {
|
||||
prettyPrinter.printResults(&opaSessionObj.Report.SummaryDetails.Controls, opaSessionObj.AllResources)
|
||||
} else if prettyPrinter.formatVersion == "v2" {
|
||||
prettyPrinter.resourceTable(opaSessionObj.ResourcesResult, opaSessionObj.AllResources)
|
||||
sortedControlNames := getSortedControlsNames(opaSessionObj.Report.SummaryDetails.Controls) // ListControls().All())
|
||||
|
||||
if prettyPrinter.verboseMode {
|
||||
prettyPrinter.resourceTable(opaSessionObj)
|
||||
}
|
||||
prettyPrinter.printSummaryTable(&opaSessionObj.Report.SummaryDetails)
|
||||
prettyPrinter.printSummaryTable(&opaSessionObj.Report.SummaryDetails, sortedControlNames)
|
||||
|
||||
}
|
||||
|
||||
@@ -49,13 +49,14 @@ func (prettyPrinter *PrettyPrinter) SetWriter(outputFile string) {
|
||||
func (prettyPrinter *PrettyPrinter) Score(score float32) {
|
||||
}
|
||||
|
||||
func (prettyPrinter *PrettyPrinter) printResults(controls *reportsummary.ControlSummaries, allResources map[string]workloadinterface.IMetadata) {
|
||||
for i := 0; i < len(prettyPrinter.sortedControlNames); i++ {
|
||||
|
||||
controlSummary := controls.GetControl(reportsummary.EControlCriteriaName, prettyPrinter.sortedControlNames[i]) // summaryDetails.Controls ListControls().All() Controls.GetControl(ca)
|
||||
prettyPrinter.printTitle(controlSummary)
|
||||
prettyPrinter.printResources(controlSummary, allResources)
|
||||
prettyPrinter.printSummary(prettyPrinter.sortedControlNames[i], controlSummary)
|
||||
func (prettyPrinter *PrettyPrinter) printResults(controls *reportsummary.ControlSummaries, allResources map[string]workloadinterface.IMetadata, sortedControlNames [][]string) {
|
||||
for i := len(sortedControlNames) - 1; i >= 0; i-- {
|
||||
for _, c := range sortedControlNames[i] {
|
||||
controlSummary := controls.GetControl(reportsummary.EControlCriteriaName, c) // summaryDetails.Controls ListControls().All() Controls.GetControl(ca)
|
||||
prettyPrinter.printTitle(controlSummary)
|
||||
prettyPrinter.printResources(controlSummary, allResources)
|
||||
prettyPrinter.printSummary(c, controlSummary)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -75,7 +76,7 @@ func (prettyPrinter *PrettyPrinter) printSummary(controlName string, controlSumm
|
||||
|
||||
}
|
||||
func (prettyPrinter *PrettyPrinter) printTitle(controlSummary reportsummary.IControlSummary) {
|
||||
cautils.InfoDisplay(prettyPrinter.writer, "[control: %s - %s] ", controlSummary.GetName(), getControlURL(controlSummary.GetID()))
|
||||
cautils.InfoDisplay(prettyPrinter.writer, "[control: %s - %s] ", controlSummary.GetName(), getControlLink(controlSummary.GetID()))
|
||||
switch controlSummary.GetStatus().Status() {
|
||||
case apis.StatusSkipped:
|
||||
cautils.InfoDisplay(prettyPrinter.writer, "skipped %v\n", emoji.ConfusedFace)
|
||||
@@ -83,10 +84,17 @@ func (prettyPrinter *PrettyPrinter) printTitle(controlSummary reportsummary.ICon
|
||||
cautils.FailureDisplay(prettyPrinter.writer, "failed %v\n", emoji.SadButRelievedFace)
|
||||
case apis.StatusExcluded:
|
||||
cautils.WarningDisplay(prettyPrinter.writer, "excluded %v\n", emoji.NeutralFace)
|
||||
case apis.StatusIrrelevant:
|
||||
cautils.SuccessDisplay(prettyPrinter.writer, "irrelevant %v\n", emoji.ConfusedFace)
|
||||
case apis.StatusError:
|
||||
cautils.WarningDisplay(prettyPrinter.writer, "error %v\n", emoji.ConfusedFace)
|
||||
default:
|
||||
cautils.SuccessDisplay(prettyPrinter.writer, "passed %v\n", emoji.ThumbsUp)
|
||||
}
|
||||
cautils.DescriptionDisplay(prettyPrinter.writer, "Description: %s\n", controlSummary.GetDescription())
|
||||
if controlSummary.GetStatus().Info() != "" {
|
||||
cautils.WarningDisplay(prettyPrinter.writer, "Reason: %v\n", controlSummary.GetStatus().Info())
|
||||
}
|
||||
}
|
||||
func (prettyPrinter *PrettyPrinter) printResources(controlSummary reportsummary.IControlSummary, allResources map[string]workloadinterface.IMetadata) {
|
||||
|
||||
@@ -160,38 +168,55 @@ func generateRelatedObjectsStr(workload WorkloadSummary) string {
|
||||
}
|
||||
func generateFooter(summaryDetails *reportsummary.SummaryDetails) []string {
|
||||
// Control name | # failed resources | all resources | % success
|
||||
row := []string{}
|
||||
row = append(row, "Resource Summary") //fmt.Sprintf(""%d", numControlers"))
|
||||
row = append(row, fmt.Sprintf("%d", summaryDetails.NumberOfResources().Failed()))
|
||||
row = append(row, fmt.Sprintf("%d", summaryDetails.NumberOfResources().Excluded()))
|
||||
row = append(row, fmt.Sprintf("%d", summaryDetails.NumberOfResources().All()))
|
||||
row = append(row, fmt.Sprintf("%.2f%s", summaryDetails.Score, "%"))
|
||||
row := make([]string, _rowLen)
|
||||
row[columnName] = "Resource Summary"
|
||||
row[columnCounterFailed] = fmt.Sprintf("%d", summaryDetails.NumberOfResources().Failed())
|
||||
row[columnCounterExclude] = fmt.Sprintf("%d", summaryDetails.NumberOfResources().Excluded())
|
||||
row[columnCounterAll] = fmt.Sprintf("%d", summaryDetails.NumberOfResources().All())
|
||||
row[columnSeverity] = " "
|
||||
row[columnRiskScore] = fmt.Sprintf("%.2f%s", summaryDetails.Score, "%")
|
||||
|
||||
return row
|
||||
}
|
||||
func (prettyPrinter *PrettyPrinter) printSummaryTable(summaryDetails *reportsummary.SummaryDetails) {
|
||||
func (prettyPrinter *PrettyPrinter) printSummaryTable(summaryDetails *reportsummary.SummaryDetails, sortedControlNames [][]string) {
|
||||
|
||||
cautils.InfoTextDisplay(prettyPrinter.writer, "\n"+controlCountersForSummary(summaryDetails.NumberOfControls())+"\n\n")
|
||||
|
||||
summaryTable := tablewriter.NewWriter(prettyPrinter.writer)
|
||||
summaryTable.SetAutoWrapText(false)
|
||||
summaryTable.SetHeader(getControlTableHeaders())
|
||||
summaryTable.SetHeaderLine(true)
|
||||
alignments := []int{tablewriter.ALIGN_LEFT, tablewriter.ALIGN_CENTER, tablewriter.ALIGN_CENTER, tablewriter.ALIGN_CENTER, tablewriter.ALIGN_CENTER}
|
||||
summaryTable.SetColumnAlignment(alignments)
|
||||
summaryTable.SetColumnAlignment(getColumnsAlignments())
|
||||
|
||||
for i := 0; i < len(prettyPrinter.sortedControlNames); i++ {
|
||||
summaryTable.Append(generateRow(summaryDetails.Controls.GetControl(reportsummary.EControlCriteriaName, prettyPrinter.sortedControlNames[i])))
|
||||
infoToPrintInfo := mapInfoToPrintInfo(summaryDetails.Controls)
|
||||
for i := len(sortedControlNames) - 1; i >= 0; i-- {
|
||||
for _, c := range sortedControlNames[i] {
|
||||
row := generateRow(summaryDetails.Controls.GetControl(reportsummary.EControlCriteriaName, c), infoToPrintInfo, prettyPrinter.verboseMode)
|
||||
if len(row) > 0 {
|
||||
summaryTable.Append(row)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
summaryTable.SetFooter(generateFooter(summaryDetails))
|
||||
|
||||
// summaryTable.SetFooter(generateFooter())
|
||||
summaryTable.Render()
|
||||
|
||||
// For control scan framework will be nil
|
||||
cautils.InfoTextDisplay(prettyPrinter.writer, frameworksScoresToString(summaryDetails.ListFrameworks().All()))
|
||||
// When scanning controls the framework list will be empty
|
||||
cautils.InfoTextDisplay(prettyPrinter.writer, frameworksScoresToString(summaryDetails.ListFrameworks()))
|
||||
|
||||
prettyPrinter.printInfo(infoToPrintInfo)
|
||||
|
||||
}
|
||||
|
||||
func frameworksScoresToString(frameworks []reportsummary.IPolicies) string {
|
||||
func (prettyPrinter *PrettyPrinter) printInfo(infoToPrintInfo []infoStars) {
|
||||
fmt.Println()
|
||||
for i := range infoToPrintInfo {
|
||||
cautils.InfoDisplay(prettyPrinter.writer, fmt.Sprintf("%s %s\n", infoToPrintInfo[i].stars, infoToPrintInfo[i].info))
|
||||
}
|
||||
}
|
||||
|
||||
func frameworksScoresToString(frameworks []reportsummary.IFrameworkSummary) string {
|
||||
if len(frameworks) == 1 {
|
||||
if frameworks[0].GetName() != "" {
|
||||
return fmt.Sprintf("FRAMEWORK %s\n", frameworks[0].GetName())
|
||||
@@ -209,14 +234,21 @@ func frameworksScoresToString(frameworks []reportsummary.IPolicies) string {
|
||||
return ""
|
||||
}
|
||||
|
||||
// func getSortedControlsNames(controls []reportsummary.IPolicies) []string {
|
||||
// controlNames := make([]string, 0, len(controls))
|
||||
// for k := range controls {
|
||||
// controlNames = append(controlNames, controls[k].Get())
|
||||
// }
|
||||
// sort.Strings(controlNames)
|
||||
// return controlNames
|
||||
// }
|
||||
func getControlURL(controlID string) string {
|
||||
func getControlLink(controlID string) string {
|
||||
return fmt.Sprintf("https://hub.armo.cloud/docs/%s", strings.ToLower(controlID))
|
||||
}
|
||||
|
||||
func controlCountersForSummary(counters reportsummary.ICounters) string {
|
||||
return fmt.Sprintf("Controls: %d (Failed: %d, Excluded: %d, Skipped: %d)", counters.All(), counters.Failed(), counters.Excluded(), counters.Skipped())
|
||||
}
|
||||
|
||||
func controlCountersForResource(l *helpersv1.AllLists) string {
|
||||
return fmt.Sprintf("Controls: %d (Failed: %d, Excluded: %d)", len(l.All()), len(l.Failed()), len(l.Excluded()))
|
||||
}
|
||||
func getSperator(sep string) string {
|
||||
s := ""
|
||||
for i := 0; i < 80; i++ {
|
||||
s += sep
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
55
core/pkg/resultshandling/printer/v2/prometheus.go
Normal file
55
core/pkg/resultshandling/printer/v2/prometheus.go
Normal file
@@ -0,0 +1,55 @@
|
||||
package v2
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/armosec/k8s-interface/workloadinterface"
|
||||
"github.com/armosec/kubescape/v2/core/cautils"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger"
|
||||
"github.com/armosec/kubescape/v2/core/cautils/logger/helpers"
|
||||
"github.com/armosec/kubescape/v2/core/pkg/resultshandling/printer"
|
||||
"github.com/armosec/opa-utils/reporthandling/results/v1/reportsummary"
|
||||
"github.com/armosec/opa-utils/reporthandling/results/v1/resourcesresults"
|
||||
)
|
||||
|
||||
type PrometheusPrinter struct {
|
||||
writer *os.File
|
||||
verboseMode bool
|
||||
}
|
||||
|
||||
func NewPrometheusPrinter(verboseMode bool) *PrometheusPrinter {
|
||||
return &PrometheusPrinter{
|
||||
verboseMode: verboseMode,
|
||||
}
|
||||
}
|
||||
|
||||
func (prometheusPrinter *PrometheusPrinter) SetWriter(outputFile string) {
|
||||
prometheusPrinter.writer = printer.GetWriter(outputFile)
|
||||
}
|
||||
|
||||
func (prometheusPrinter *PrometheusPrinter) Score(score float32) {
|
||||
fmt.Printf("\n# Overall risk-score (0- Excellent, 100- All failed)\nkubescape_score %d\n", cautils.Float32ToInt(score))
|
||||
}
|
||||
|
||||
func (printer *PrometheusPrinter) generatePrometheusFormat(
|
||||
resources map[string]workloadinterface.IMetadata,
|
||||
results map[string]resourcesresults.Result,
|
||||
summaryDetails *reportsummary.SummaryDetails) *Metrics {
|
||||
|
||||
m := &Metrics{}
|
||||
m.setRiskScores(summaryDetails)
|
||||
// m.setResourcesCounters(resources, results)
|
||||
|
||||
return m
|
||||
}
|
||||
|
||||
func (printer *PrometheusPrinter) ActionPrint(opaSessionObj *cautils.OPASessionObj) {
|
||||
|
||||
metrics := printer.generatePrometheusFormat(opaSessionObj.AllResources, opaSessionObj.ResourcesResult, &opaSessionObj.Report.SummaryDetails)
|
||||
|
||||
logOUtputFile(printer.writer.Name())
|
||||
if _, err := printer.writer.Write([]byte(metrics.String())); err != nil {
|
||||
logger.L().Error("failed to write results", helpers.Error(err))
|
||||
}
|
||||
}
|
||||
343
core/pkg/resultshandling/printer/v2/prometheusutils.go
Normal file
343
core/pkg/resultshandling/printer/v2/prometheusutils.go
Normal file
@@ -0,0 +1,343 @@
|
||||
package v2
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/armosec/k8s-interface/workloadinterface"
|
||||
"github.com/armosec/kubescape/v2/core/cautils"
|
||||
"github.com/armosec/opa-utils/reporthandling/apis"
|
||||
"github.com/armosec/opa-utils/reporthandling/results/v1/reportsummary"
|
||||
"github.com/armosec/opa-utils/reporthandling/results/v1/resourcesresults"
|
||||
)
|
||||
|
||||
type metricsName string
|
||||
|
||||
const (
|
||||
ksMetrics metricsName = "kubescape"
|
||||
metricsCluster metricsName = "cluster"
|
||||
metricsScore metricsName = "riskScore"
|
||||
metricsCount metricsName = "count"
|
||||
metricsFailed metricsName = "failed"
|
||||
metricsExcluded metricsName = "exclude"
|
||||
metricsPassed metricsName = "passed"
|
||||
metricsControl metricsName = "control"
|
||||
metricsControls metricsName = "controls"
|
||||
metricsResource metricsName = "resource"
|
||||
metricsResources metricsName = "resources"
|
||||
metricsFramework metricsName = "framework"
|
||||
)
|
||||
|
||||
// ============================================ CLUSTER ============================================================
|
||||
func (mrs *mRiskScore) metrics() []string {
|
||||
/*
|
||||
##### Overall risk score
|
||||
kubescape_cluster_riskScore{} <risk score>
|
||||
|
||||
###### Overall resources counters
|
||||
kubescape_cluster_count_resources_failed{} <counter>
|
||||
kubescape_cluster_count_resources_excluded{} <counter>
|
||||
kubescape_cluster_count_resources_passed{} <counter>
|
||||
|
||||
###### Overall controls counters
|
||||
kubescape_cluster_count_controls_failed{} <counter>
|
||||
kubescape_cluster_count_controls_excluded{} <counter>
|
||||
kubescape_cluster_count_controls_passed{} <counter>
|
||||
*/
|
||||
|
||||
m := []string{}
|
||||
// overall
|
||||
m = append(m, toRowInMetrics(fmt.Sprintf("%s_%s", mrs.prefix(), metricsScore), mrs.labels(), mrs.riskScore))
|
||||
|
||||
// resources
|
||||
m = append(m, toRowInMetrics(fmt.Sprintf("%s_%s_%s_%s", mrs.prefix(), metricsCount, metricsResources, metricsFailed), mrs.labels(), mrs.resourcesCountFailed))
|
||||
m = append(m, toRowInMetrics(fmt.Sprintf("%s_%s_%s_%s", mrs.prefix(), metricsCount, metricsResources, metricsExcluded), mrs.labels(), mrs.resourcesCountExcluded))
|
||||
m = append(m, toRowInMetrics(fmt.Sprintf("%s_%s_%s_%s", mrs.prefix(), metricsCount, metricsResources, metricsPassed), mrs.labels(), mrs.resourcesCountPassed))
|
||||
|
||||
// controls
|
||||
m = append(m, toRowInMetrics(fmt.Sprintf("%s_%s_%s_%s", mrs.prefix(), metricsCount, metricsControl, metricsFailed), mrs.labels(), mrs.controlsCountFailed))
|
||||
m = append(m, toRowInMetrics(fmt.Sprintf("%s_%s_%s_%s", mrs.prefix(), metricsCount, metricsControl, metricsExcluded), mrs.labels(), mrs.controlsCountExcluded))
|
||||
m = append(m, toRowInMetrics(fmt.Sprintf("%s_%s_%s_%s", mrs.prefix(), metricsCount, metricsControl, metricsPassed), mrs.labels(), mrs.controlsCountPassed))
|
||||
|
||||
return m
|
||||
}
|
||||
func (mrs *mRiskScore) labels() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func (mrs *mRiskScore) prefix() string {
|
||||
return fmt.Sprintf("%s_%s", ksMetrics, metricsCluster)
|
||||
}
|
||||
|
||||
// ============================================ CONTROL ============================================================
|
||||
|
||||
func (mcrs *mControlRiskScore) metrics() []string {
|
||||
/*
|
||||
# Risk score
|
||||
kubescape_control_riskScore{name="<control name>",url="<docs url>",severity="<control severity>"} <risk score>
|
||||
|
||||
# Resources counters
|
||||
kubescape_control_count_resources_failed{name="<control name>",url="<docs url>",severity="<control severity>"} <counter>
|
||||
kubescape_control_count_resources_excluded{name="<control name>",url="<docs url>",severity="<control severity>"} <counter>
|
||||
kubescape_control_count_resources_passed{name="<control name>",url="<docs url>",severity="<control severity>"} <counter>
|
||||
*/
|
||||
|
||||
m := []string{}
|
||||
// overall
|
||||
m = append(m, toRowInMetrics(fmt.Sprintf("%s_%s", mcrs.prefix(), metricsScore), mcrs.labels(), mcrs.riskScore))
|
||||
|
||||
// resources
|
||||
m = append(m, toRowInMetrics(fmt.Sprintf("%s_%s_%s_%s", mcrs.prefix(), metricsCount, metricsResources, metricsFailed), mcrs.labels(), mcrs.resourcesCountFailed))
|
||||
m = append(m, toRowInMetrics(fmt.Sprintf("%s_%s_%s_%s", mcrs.prefix(), metricsCount, metricsResources, metricsExcluded), mcrs.labels(), mcrs.resourcesCountExcluded))
|
||||
m = append(m, toRowInMetrics(fmt.Sprintf("%s_%s_%s_%s", mcrs.prefix(), metricsCount, metricsResources, metricsPassed), mcrs.labels(), mcrs.resourcesCountPassed))
|
||||
|
||||
return m
|
||||
}
|
||||
func (mcrs *mControlRiskScore) labels() string {
|
||||
r := fmt.Sprintf("name=\"%s\"", mcrs.controlName) + ","
|
||||
r += fmt.Sprintf("severity=\"%s\"", mcrs.severity) + ","
|
||||
r += fmt.Sprintf("link=\"%s\"", mcrs.link)
|
||||
return r
|
||||
}
|
||||
func (mcrs *mControlRiskScore) prefix() string {
|
||||
return fmt.Sprintf("%s_%s", ksMetrics, metricsControl)
|
||||
}
|
||||
|
||||
// ============================================ FRAMEWORK ============================================================
|
||||
|
||||
func (mfrs *mFrameworkRiskScore) metrics() []string {
|
||||
/*
|
||||
#### Frameworks metrics
|
||||
kubescape_framework_riskScore{name="<framework name>"} <risk score>
|
||||
|
||||
###### Frameworks resources counters
|
||||
kubescape_framework_count_resources_failed{} <counter>
|
||||
kubescape_framework_count_resources_excluded{} <counter>
|
||||
kubescape_framework_count_resources_passed{} <counter>
|
||||
|
||||
###### Frameworks controls counters
|
||||
kubescape_framework_count_controls_failed{name="<framework name>"} <counter>
|
||||
kubescape_framework_count_controls_excluded{name="<framework name>"} <counter>
|
||||
kubescape_framework_count_controls_passed{name="<framework name>"} <counter>
|
||||
|
||||
*/
|
||||
|
||||
m := []string{}
|
||||
// overall
|
||||
m = append(m, toRowInMetrics(fmt.Sprintf("%s_%s", mfrs.prefix(), metricsScore), mfrs.labels(), mfrs.riskScore))
|
||||
|
||||
// resources
|
||||
m = append(m, toRowInMetrics(fmt.Sprintf("%s_%s_%s_%s", mfrs.prefix(), metricsCount, metricsResources, metricsFailed), mfrs.labels(), mfrs.resourcesCountFailed))
|
||||
m = append(m, toRowInMetrics(fmt.Sprintf("%s_%s_%s_%s", mfrs.prefix(), metricsCount, metricsResources, metricsExcluded), mfrs.labels(), mfrs.resourcesCountExcluded))
|
||||
m = append(m, toRowInMetrics(fmt.Sprintf("%s_%s_%s_%s", mfrs.prefix(), metricsCount, metricsResources, metricsPassed), mfrs.labels(), mfrs.resourcesCountPassed))
|
||||
|
||||
// controls
|
||||
m = append(m, toRowInMetrics(fmt.Sprintf("%s_%s_%s_%s", mfrs.prefix(), metricsCount, metricsControl, metricsFailed), mfrs.labels(), mfrs.controlsCountFailed))
|
||||
m = append(m, toRowInMetrics(fmt.Sprintf("%s_%s_%s_%s", mfrs.prefix(), metricsCount, metricsControl, metricsExcluded), mfrs.labels(), mfrs.controlsCountExcluded))
|
||||
m = append(m, toRowInMetrics(fmt.Sprintf("%s_%s_%s_%s", mfrs.prefix(), metricsCount, metricsControl, metricsPassed), mfrs.labels(), mfrs.controlsCountPassed))
|
||||
|
||||
return m
|
||||
}
|
||||
func (mfrs *mFrameworkRiskScore) labels() string {
|
||||
r := fmt.Sprintf("name=\"%s\"", mfrs.frameworkName)
|
||||
return r
|
||||
}
|
||||
func (mfrs *mFrameworkRiskScore) prefix() string {
|
||||
return fmt.Sprintf("%s_%s", ksMetrics, metricsFramework)
|
||||
}
|
||||
|
||||
// ============================================ RESOURCES ============================================================
|
||||
|
||||
func (mrc *mResources) metrics() []string {
|
||||
/*
|
||||
#### Resources metrics
|
||||
kubescape_resource_count_controls_failed{apiVersion="<>",kind="<>",namespace="<>",name="<>"} <counter>
|
||||
kubescape_resource_count_controls_excluded{apiVersion="<>",kind="<>",namespace="<>",name="<>"} <counter>
|
||||
*/
|
||||
|
||||
m := []string{}
|
||||
|
||||
// controls
|
||||
m = append(m, toRowInMetrics(fmt.Sprintf("%s_%s_%s_%s", mrc.prefix(), metricsCount, metricsControls, metricsFailed), mrc.labels(), mrc.controlsCountFailed))
|
||||
m = append(m, toRowInMetrics(fmt.Sprintf("%s_%s_%s_%s", mrc.prefix(), metricsCount, metricsControls, metricsExcluded), mrc.labels(), mrc.controlsCountExcluded))
|
||||
return m
|
||||
}
|
||||
|
||||
func (mrc *mResources) labels() string {
|
||||
r := fmt.Sprintf("apiVersion=\"%s\"", mrc.apiVersion) + ","
|
||||
r += fmt.Sprintf("kind=\"%s\"", mrc.kind) + ","
|
||||
r += fmt.Sprintf("namespace=\"%s\"", mrc.namespace) + ","
|
||||
r += fmt.Sprintf("name=\"%s\"", mrc.name)
|
||||
return r
|
||||
}
|
||||
func (mrc *mResources) prefix() string {
|
||||
return fmt.Sprintf("%s_%s", ksMetrics, metricsResource)
|
||||
}
|
||||
|
||||
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
func toRowInMetrics(name string, row string, value int) string {
|
||||
return fmt.Sprintf("%s{%s} %d", name, row, value)
|
||||
|
||||
}
|
||||
func (m *Metrics) String() string {
|
||||
|
||||
r := strings.Join(m.rs.metrics(), "\n") + "\n"
|
||||
for i := range m.listFrameworks {
|
||||
r += strings.Join(m.listFrameworks[i].metrics(), "\n") + "\n"
|
||||
}
|
||||
for i := range m.listControls {
|
||||
r += strings.Join(m.listControls[i].metrics(), "\n") + "\n"
|
||||
}
|
||||
for i := range m.listResources {
|
||||
r += strings.Join(m.listResources[i].metrics(), "\n") + "\n"
|
||||
}
|
||||
return r
|
||||
}
|
||||
|
||||
type mRiskScore struct {
|
||||
resourcesCountPassed int
|
||||
resourcesCountFailed int
|
||||
resourcesCountExcluded int
|
||||
controlsCountPassed int
|
||||
controlsCountFailed int
|
||||
controlsCountExcluded int
|
||||
controlsCountSkipped int
|
||||
riskScore int
|
||||
}
|
||||
|
||||
type mControlRiskScore struct {
|
||||
controlName string
|
||||
controlID string
|
||||
link string
|
||||
severity string
|
||||
remediation string
|
||||
resourcesCountPassed int
|
||||
resourcesCountFailed int
|
||||
resourcesCountExcluded int
|
||||
riskScore int
|
||||
}
|
||||
|
||||
type mFrameworkRiskScore struct {
|
||||
frameworkName string
|
||||
resourcesCountPassed int
|
||||
resourcesCountFailed int
|
||||
resourcesCountExcluded int
|
||||
controlsCountPassed int
|
||||
controlsCountFailed int
|
||||
controlsCountExcluded int
|
||||
controlsCountSkipped int
|
||||
riskScore int
|
||||
}
|
||||
|
||||
type mResources struct {
|
||||
name string
|
||||
namespace string
|
||||
apiVersion string
|
||||
kind string
|
||||
controlsCountPassed int
|
||||
controlsCountFailed int
|
||||
controlsCountExcluded int
|
||||
}
|
||||
type Metrics struct {
|
||||
rs mRiskScore
|
||||
listFrameworks []mFrameworkRiskScore
|
||||
listControls []mControlRiskScore
|
||||
listResources []mResources
|
||||
}
|
||||
|
||||
func (mrs *mRiskScore) set(resources reportsummary.ICounters, controls reportsummary.ICounters) {
|
||||
mrs.resourcesCountExcluded = resources.Excluded()
|
||||
mrs.resourcesCountFailed = resources.Failed()
|
||||
mrs.resourcesCountPassed = resources.Passed()
|
||||
mrs.controlsCountExcluded = controls.Excluded()
|
||||
mrs.controlsCountFailed = controls.Failed()
|
||||
mrs.controlsCountPassed = controls.Passed()
|
||||
mrs.controlsCountSkipped = controls.Skipped()
|
||||
}
|
||||
|
||||
func (mfrs *mFrameworkRiskScore) set(resources reportsummary.ICounters, controls reportsummary.ICounters) {
|
||||
mfrs.resourcesCountExcluded = resources.Excluded()
|
||||
mfrs.resourcesCountFailed = resources.Failed()
|
||||
mfrs.resourcesCountPassed = resources.Passed()
|
||||
mfrs.controlsCountExcluded = controls.Excluded()
|
||||
mfrs.controlsCountFailed = controls.Failed()
|
||||
mfrs.controlsCountPassed = controls.Passed()
|
||||
mfrs.controlsCountSkipped = controls.Skipped()
|
||||
}
|
||||
|
||||
func (mcrs *mControlRiskScore) set(resources reportsummary.ICounters) {
|
||||
mcrs.resourcesCountExcluded = resources.Excluded()
|
||||
mcrs.resourcesCountFailed = resources.Failed()
|
||||
mcrs.resourcesCountPassed = resources.Passed()
|
||||
}
|
||||
func (m *Metrics) setRiskScores(summaryDetails *reportsummary.SummaryDetails) {
|
||||
m.rs.set(summaryDetails.NumberOfResources(), summaryDetails.NumberOfControls())
|
||||
m.rs.riskScore = cautils.Float32ToInt(summaryDetails.GetScore())
|
||||
|
||||
for _, fw := range summaryDetails.ListFrameworks() {
|
||||
mfrs := mFrameworkRiskScore{
|
||||
frameworkName: fw.GetName(),
|
||||
riskScore: cautils.Float32ToInt(fw.GetScore()),
|
||||
}
|
||||
mfrs.set(fw.NumberOfResources(), fw.NumberOfControls())
|
||||
m.listFrameworks = append(m.listFrameworks, mfrs)
|
||||
}
|
||||
|
||||
for _, control := range summaryDetails.ListControls() {
|
||||
mcrs := mControlRiskScore{
|
||||
controlName: control.GetName(),
|
||||
controlID: control.GetID(),
|
||||
riskScore: cautils.Float32ToInt(control.GetScore()),
|
||||
link: getControlLink(control.GetID()),
|
||||
severity: apis.ControlSeverityToString(control.GetScoreFactor()),
|
||||
remediation: control.GetRemediation(),
|
||||
}
|
||||
mcrs.set(control.NumberOfResources())
|
||||
m.listControls = append(m.listControls, mcrs)
|
||||
}
|
||||
}
|
||||
|
||||
// return -> (passed, exceluded, failed)
|
||||
func resourceControlStatusCounters(result *resourcesresults.Result) (int, int, int) {
|
||||
failed := 0
|
||||
excluded := 0
|
||||
passed := 0
|
||||
for i := range result.ListControls() {
|
||||
switch result.ListControls()[i].GetStatus(nil).Status() {
|
||||
case apis.StatusExcluded:
|
||||
excluded++
|
||||
case apis.StatusFailed:
|
||||
failed++
|
||||
case apis.StatusPassed:
|
||||
passed++
|
||||
}
|
||||
}
|
||||
return passed, excluded, failed
|
||||
}
|
||||
func (m *Metrics) setResourcesCounters(
|
||||
resources map[string]workloadinterface.IMetadata,
|
||||
results map[string]resourcesresults.Result) {
|
||||
|
||||
for resourceID, result := range results {
|
||||
r, ok := resources[resourceID]
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
passed, excluded, failed := resourceControlStatusCounters(&result)
|
||||
|
||||
mrc := mResources{}
|
||||
mrc.apiVersion = r.GetApiVersion()
|
||||
mrc.namespace = r.GetNamespace()
|
||||
mrc.kind = r.GetKind()
|
||||
mrc.name = r.GetName()
|
||||
|
||||
// append
|
||||
mrc.controlsCountPassed = passed
|
||||
mrc.controlsCountFailed = failed
|
||||
mrc.controlsCountExcluded = excluded
|
||||
|
||||
m.listResources = append(m.listResources, mrc)
|
||||
}
|
||||
|
||||
}
|
||||
@@ -5,58 +5,82 @@ import (
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/armosec/k8s-interface/workloadinterface"
|
||||
"github.com/armosec/kubescape/v2/core/cautils"
|
||||
"github.com/armosec/opa-utils/reporthandling/results/v1/reportsummary"
|
||||
"github.com/armosec/opa-utils/reporthandling/results/v1/resourcesresults"
|
||||
"github.com/olekukonko/tablewriter"
|
||||
)
|
||||
|
||||
func (prettyPrinter *PrettyPrinter) resourceTable(results map[string]resourcesresults.Result, allResources map[string]workloadinterface.IMetadata) {
|
||||
const (
|
||||
resourceColumnSeverity = iota
|
||||
resourceColumnName = iota
|
||||
resourceColumnURL = iota
|
||||
resourceColumnPath = iota
|
||||
_resourceRowLen = iota
|
||||
)
|
||||
|
||||
summaryTable := tablewriter.NewWriter(prettyPrinter.writer)
|
||||
summaryTable.SetAutoWrapText(true)
|
||||
summaryTable.SetAutoMergeCells(true)
|
||||
summaryTable.SetHeader(generateResourceHeader())
|
||||
summaryTable.SetHeaderLine(true)
|
||||
summaryTable.SetRowLine(true)
|
||||
// summaryTable.SetFooter([]string{"", "", "Total", "", "$146.93"})
|
||||
// For control scan framework will be nil
|
||||
data := Matrix{}
|
||||
for i := range results {
|
||||
resource, ok := allResources[i]
|
||||
func (prettyPrinter *PrettyPrinter) resourceTable(opaSessionObj *cautils.OPASessionObj) {
|
||||
|
||||
for resourceID, result := range opaSessionObj.ResourcesResult {
|
||||
if !result.GetStatus(nil).IsFailed() {
|
||||
continue
|
||||
}
|
||||
resource, ok := opaSessionObj.AllResources[resourceID]
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
s := results[i]
|
||||
if raw := generateResourceRows(resource, s.ListControls(), prettyPrinter.verboseMode); len(raw) > 0 {
|
||||
fmt.Fprintf(prettyPrinter.writer, "\n"+getSperator("#")+"\n\n")
|
||||
|
||||
if source, ok := opaSessionObj.ResourceSource[resourceID]; ok {
|
||||
fmt.Fprintf(prettyPrinter.writer, "Source: %s\n", source)
|
||||
}
|
||||
fmt.Fprintf(prettyPrinter.writer, "ApiVersion: %s\n", resource.GetApiVersion())
|
||||
fmt.Fprintf(prettyPrinter.writer, "Kind: %s\n", resource.GetKind())
|
||||
fmt.Fprintf(prettyPrinter.writer, "Name: %s\n", resource.GetName())
|
||||
if resource.GetNamespace() != "" {
|
||||
fmt.Fprintf(prettyPrinter.writer, "Namespace: %s\n", resource.GetNamespace())
|
||||
}
|
||||
fmt.Fprintf(prettyPrinter.writer, "\n"+controlCountersForResource(result.ListControlsIDs(nil))+"\n\n")
|
||||
|
||||
summaryTable := tablewriter.NewWriter(prettyPrinter.writer)
|
||||
summaryTable.SetAutoWrapText(true)
|
||||
summaryTable.SetAutoMergeCells(true)
|
||||
summaryTable.SetHeader(generateResourceHeader())
|
||||
summaryTable.SetHeaderLine(true)
|
||||
summaryTable.SetRowLine(true)
|
||||
data := Matrix{}
|
||||
|
||||
if raw := generateResourceRows(result.ListControls(), &opaSessionObj.Report.SummaryDetails); len(raw) > 0 {
|
||||
data = append(data, raw...)
|
||||
}
|
||||
}
|
||||
sort.Sort(data)
|
||||
summaryTable.AppendBulk(data)
|
||||
// For control scan framework will be nil
|
||||
|
||||
sort.Sort(data)
|
||||
summaryTable.AppendBulk(data)
|
||||
|
||||
summaryTable.Render()
|
||||
}
|
||||
|
||||
summaryTable.Render()
|
||||
}
|
||||
|
||||
func generateResourceRows(resource workloadinterface.IMetadata, controls []resourcesresults.ResourceAssociatedControl, verboseMode bool) [][]string {
|
||||
func generateResourceRows(controls []resourcesresults.ResourceAssociatedControl, summaryDetails *reportsummary.SummaryDetails) [][]string {
|
||||
rows := [][]string{}
|
||||
|
||||
for i := range controls {
|
||||
row := make([]string, _resourceRowLen)
|
||||
|
||||
if controls[i].GetName() == "" {
|
||||
continue
|
||||
}
|
||||
row := []string{}
|
||||
|
||||
if !verboseMode && controls[i].GetStatus(nil).IsPassed() {
|
||||
if !controls[i].GetStatus(nil).IsFailed() {
|
||||
continue
|
||||
}
|
||||
|
||||
row = append(row, fmt.Sprintf("%s\nhttps://hub.armo.cloud/docs/%s", controls[i].GetName(), strings.ToLower(controls[i].GetID())))
|
||||
row = append(row, resource.GetNamespace())
|
||||
paths := failedPathsToString(&controls[i])
|
||||
row[resourceColumnURL] = fmt.Sprintf("https://hub.armo.cloud/docs/%s", strings.ToLower(controls[i].GetID()))
|
||||
row[resourceColumnPath] = strings.Join(failedPathsToString(&controls[i]), "\n")
|
||||
row[resourceColumnName] = controls[i].GetName()
|
||||
|
||||
if c := summaryDetails.Controls.GetControl(reportsummary.EControlCriteriaName, controls[i].GetName()); c != nil {
|
||||
row[resourceColumnSeverity] = getSeverityColumn(c)
|
||||
}
|
||||
|
||||
row = append(row, fmt.Sprintf("%s/%s\n%s", resource.GetKind(), resource.GetName(), strings.Join(paths, ";\n")))
|
||||
row = append(row, string(controls[i].GetStatus(nil).Status()))
|
||||
rows = append(rows, row)
|
||||
}
|
||||
|
||||
@@ -64,7 +88,12 @@ func generateResourceRows(resource workloadinterface.IMetadata, controls []resou
|
||||
}
|
||||
|
||||
func generateResourceHeader() []string {
|
||||
return []string{"Control", "Namespace", "Kind/Name", "Statues"}
|
||||
headers := make([]string, _resourceRowLen)
|
||||
headers[resourceColumnSeverity] = "Severity"
|
||||
headers[resourceColumnName] = "Control Name"
|
||||
headers[resourceColumnURL] = "Docs"
|
||||
headers[resourceColumnPath] = "Assistant Remediation"
|
||||
return headers
|
||||
}
|
||||
|
||||
type Matrix [][]string
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user