mirror of
https://github.com/kubescape/kubescape.git
synced 2026-04-15 06:58:11 +00:00
Compare commits
48 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
606f5cfb62 | ||
|
|
990be3afe8 | ||
|
|
7020c2d025 | ||
|
|
a6497c1252 | ||
|
|
9d528a8075 | ||
|
|
5aec8b6f28 | ||
|
|
830ee27169 | ||
|
|
2453aea6f3 | ||
|
|
e95b0f840a | ||
|
|
83680d1207 | ||
|
|
fd135e9e49 | ||
|
|
e47eb9cb4e | ||
|
|
d288fdc7f2 | ||
|
|
9de73dab29 | ||
|
|
f0afc20ec6 | ||
|
|
bf75059347 | ||
|
|
78835a58c4 | ||
|
|
fdccae9a1e | ||
|
|
6d97d42f67 | ||
|
|
46001e4761 | ||
|
|
b4d712fcb1 | ||
|
|
7847a4593b | ||
|
|
b2036e64f1 | ||
|
|
fd0bbcccfe | ||
|
|
7caa47f949 | ||
|
|
06b171901d | ||
|
|
e685fe2b7d | ||
|
|
7177e77a8d | ||
|
|
4cda32771b | ||
|
|
f896b65a87 | ||
|
|
3fff1b750a | ||
|
|
2380317953 | ||
|
|
bd9ade4d15 | ||
|
|
659d3533ee | ||
|
|
37c242576e | ||
|
|
e9a22a23e7 | ||
|
|
ae3816c1e0 | ||
|
|
e4661a5ae2 | ||
|
|
539d1889fe | ||
|
|
2dd5f05f1a | ||
|
|
60c9b38de4 | ||
|
|
8b66b068ea | ||
|
|
1507bc3f04 | ||
|
|
1e0baba919 | ||
|
|
4c9f47b1e1 | ||
|
|
b66446b7eb | ||
|
|
b92d4256ad | ||
|
|
d630811386 |
21
.github/workflows/build.yaml
vendored
21
.github/workflows/build.yaml
vendored
@@ -53,7 +53,7 @@ jobs:
|
||||
KUBESCAPE_SKIP_UPDATE_CHECK: "true"
|
||||
run: python3 smoke_testing/init.py ${PWD}/build/${{ matrix.os }}/kubescape
|
||||
|
||||
- name: Upload Release binaries
|
||||
- name: Upload release binaries
|
||||
id: upload-release-asset
|
||||
uses: actions/upload-release-asset@v1
|
||||
env:
|
||||
@@ -64,6 +64,18 @@ jobs:
|
||||
asset_name: kubescape-${{ matrix.os }}
|
||||
asset_content_type: application/octet-stream
|
||||
|
||||
- name: Upload release hash
|
||||
id: upload-release-hash
|
||||
uses: actions/upload-release-asset@v1
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
upload_url: ${{ needs.once.outputs.upload_url }}
|
||||
asset_path: build/${{ matrix.os }}/kubescape.sha256
|
||||
asset_name: kubescape-${{ matrix.os }}-sha256
|
||||
asset_content_type: application/octet-stream
|
||||
|
||||
|
||||
|
||||
build-docker:
|
||||
name: Build docker container, tag and upload to registry
|
||||
@@ -80,14 +92,14 @@ jobs:
|
||||
|
||||
- name: Set image version
|
||||
id: image-version
|
||||
run: echo '::set-output version=IMAGE_VERSION::v2.0.${{ github.run_number }}'
|
||||
run: echo '::set-output name=IMAGE_VERSION::v2.0.${{ github.run_number }}'
|
||||
|
||||
- name: Set image name
|
||||
id: image-name
|
||||
run: echo '::set-output name=IMAGE_NAME::quay.io/${{ github.repository_owner }}/kubescape'
|
||||
|
||||
- name: Build the Docker image
|
||||
run: docker build . --file build/Dockerfile --tag ${{ steps.image-name.outputs.IMAGE_NAME }}:${{ steps.image-version.outputs.IMAGE_VERSION }} --build-arg run_number=${{ github.run_number }}
|
||||
run: docker build . --file build/Dockerfile --tag ${{ steps.image-name.outputs.IMAGE_NAME }}:${{ steps.image-version.outputs.IMAGE_VERSION }} --build-arg image_version=${{ steps.image-version.outputs.IMAGE_VERSION }}
|
||||
|
||||
- name: Re-Tag Image to latest
|
||||
run: docker tag ${{ steps.image-name.outputs.IMAGE_NAME }}:${{ steps.image-version.outputs.IMAGE_VERSION }} ${{ steps.image-name.outputs.IMAGE_NAME }}:latest
|
||||
@@ -108,6 +120,7 @@ jobs:
|
||||
docker push ${{ steps.image-name.outputs.IMAGE_NAME }}:${{ steps.image-version.outputs.IMAGE_VERSION }}
|
||||
docker push ${{ steps.image-name.outputs.IMAGE_NAME }}:latest
|
||||
|
||||
# TODO - Wait for casign to support fixed tags -> https://github.com/sigstore/cosign/issues/1424
|
||||
# - name: Install cosign
|
||||
# uses: sigstore/cosign-installer@main
|
||||
# with:
|
||||
@@ -116,6 +129,6 @@ jobs:
|
||||
# env:
|
||||
# COSIGN_EXPERIMENTAL: "true"
|
||||
# run: |
|
||||
# cosign sign --force ${{ steps.image-name.outputs.IMAGE_NAME }}:${{ steps.image-version.outputs.IMAGE_VERSION }}
|
||||
# cosign sign --force ${{ steps.image-name.outputs.IMAGE_NAME }}:latest
|
||||
# cosign sign --force ${{ steps.image-name.outputs.IMAGE_NAME }}:${{ steps.image-version.outputs.IMAGE_VERSION }}
|
||||
|
||||
|
||||
16
.github/workflows/build_dev.yaml
vendored
16
.github/workflows/build_dev.yaml
vendored
@@ -58,14 +58,14 @@ jobs:
|
||||
|
||||
- name: Set image version
|
||||
id: image-version
|
||||
run: echo '::set-output name=IMAGE_VERSION::v2.0.${{ github.run_number }}'
|
||||
run: echo '::set-output name=IMAGE_VERSION::dev-v2.0.${{ github.run_number }}'
|
||||
|
||||
- name: Set image name
|
||||
id: image-name
|
||||
run: echo '::set-output name=IMAGE_NAME::quay.io/${{ github.repository_owner }}/kubescape'
|
||||
|
||||
- name: Build the Docker image
|
||||
run: docker build . --file build/Dockerfile --tag ${{ steps.image-name.outputs.IMAGE_NAME }}:${{ steps.image-version.outputs.IMAGE_VERSION }} --build-arg run_number=${{ github.run_number }}
|
||||
run: docker build . --file build/Dockerfile --tag ${{ steps.image-name.outputs.IMAGE_NAME }}:${{ steps.image-version.outputs.IMAGE_VERSION }} --build-arg image_version=${{ steps.image-version.outputs.IMAGE_VERSION }}
|
||||
|
||||
- name: Login to Quay.io
|
||||
env:
|
||||
@@ -81,14 +81,4 @@ jobs:
|
||||
- name: Push Docker image
|
||||
run: |
|
||||
docker push ${{ steps.image-name.outputs.IMAGE_NAME }}:${{ steps.image-version.outputs.IMAGE_VERSION }}
|
||||
|
||||
# - name: Install cosign
|
||||
# uses: sigstore/cosign-installer@main
|
||||
# with:
|
||||
# cosign-release: 'v1.5.1' # optional
|
||||
# - name: sign kubescape container image
|
||||
# env:
|
||||
# COSIGN_EXPERIMENTAL: "true"
|
||||
# run: |
|
||||
# cosign sign --force ${{ steps.image-name.outputs.IMAGE_NAME }}:${{ steps.image-version.outputs.IMAGE_VERSION }}
|
||||
|
||||
|
||||
53
.github/workflows/publish_image.yaml
vendored
53
.github/workflows/publish_image.yaml
vendored
@@ -1,53 +0,0 @@
|
||||
name: build
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ master ]
|
||||
jobs:
|
||||
|
||||
build-docker:
|
||||
name: Build docker container, tag and upload to registry
|
||||
needs: build
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
id-token: write
|
||||
packages: write
|
||||
contents: read
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
|
||||
- name: Set name
|
||||
id: set-name
|
||||
run: echo '::set-output name=IMAGE_NAME::quay.io/${{ github.repository_owner }}/kubescape:v2.0.${{ github.run_number }}'
|
||||
|
||||
- name: Build the Docker image
|
||||
run: docker build . --file build/Dockerfile --tag ${{ steps.set-name.outputs.IMAGE_NAME }} --build-arg run_number=${{ github.run_number }}
|
||||
- name: Re-Tag Image to latest
|
||||
run: docker tag ${{ steps.set-name.outputs.IMAGE_NAME }} quay.io/${{ github.repository_owner }}/kubescape:latest
|
||||
- name: Login to Quay.io
|
||||
env: # Or as an environment variable
|
||||
QUAY_PASSWORD: ${{ secrets.QUAYIO_REGISTRY_PASSWORD }}
|
||||
QUAY_USERNAME: ${{ secrets.QUAYIO_REGISTRY_USERNAME }}
|
||||
run: docker login -u="${QUAY_USERNAME}" -p="${QUAY_PASSWORD}" quay.io
|
||||
- name: Login to GitHub Container Registry
|
||||
uses: docker/login-action@v1
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: Push Docker image
|
||||
run: |
|
||||
docker push ${{ steps.set-name.outputs.IMAGE_NAME }}
|
||||
docker push quay.io/${{ github.repository_owner }}/kubescape:latest
|
||||
- name: Install cosign
|
||||
uses: sigstore/cosign-installer@main
|
||||
with:
|
||||
cosign-release: 'v1.5.1' # optional
|
||||
- name: sign kubescape container image
|
||||
env:
|
||||
COSIGN_EXPERIMENTAL: "true"
|
||||
run: |
|
||||
cosign sign --force ${{ steps.set-name.outputs.IMAGE_NAME }}
|
||||
cosign sign --force quay.io/${{ github.repository_owner }}/kubescape:latest
|
||||
|
||||
78
README.md
78
README.md
@@ -3,6 +3,8 @@
|
||||
[](https://github.com/armosec/kubescape/actions/workflows/build.yaml)
|
||||
[](https://goreportcard.com/report/github.com/armosec/kubescape)
|
||||
|
||||
|
||||
|
||||
Kubescape is a K8s open-source tool providing a multi-cloud K8s single pane of glass, including risk analysis, security compliance, RBAC visualizer and image vulnerabilities scanning.
|
||||
Kubescape scans K8s clusters, YAML files, and HELM charts, detecting misconfigurations according to multiple frameworks (such as the [NSA-CISA](https://www.armosec.io/blog/kubernetes-hardening-guidance-summary-by-armo) , [MITRE ATT&CK®](https://www.microsoft.com/security/blog/2021/03/23/secure-containerized-environments-with-updated-threat-matrix-for-kubernetes/)), software vulnerabilities, and RBAC (role-based-access-control) violations at early stages of the CI/CD pipeline, calculates risk score instantly and shows risk trends over time.
|
||||
It became one of the fastest-growing Kubernetes tools among developers due to its easy-to-use CLI interface, flexible output formats, and automated scanning capabilities, saving Kubernetes users and admins’ precious time, effort, and resources.
|
||||
@@ -47,9 +49,11 @@ We invite you to our team! We are excited about this project and want to return
|
||||
Want to contribute? Want to discuss something? Have an issue?
|
||||
|
||||
* Open a issue, we are trying to respond within 48 hours
|
||||
* [Join us](https://armosec.github.io/kubescape/) in a discussion on our discord server!
|
||||
* [Join us](https://armosec.github.io/kubescape/) in a discussion on our discord server!
|
||||
|
||||
|
||||
[<img src="docs/discord-banner.png" width="100" alt="logo" align="center">](https://armosec.github.io/kubescape/)
|
||||

|
||||
|
||||
|
||||
# Options and examples
|
||||
@@ -140,107 +144,105 @@ kubescape scan control "Privileged container"
|
||||
|
||||
#### Scan specific namespaces
|
||||
```
|
||||
kubescape scan framework nsa --include-namespaces development,staging,production
|
||||
kubescape scan --include-namespaces development,staging,production
|
||||
```
|
||||
|
||||
#### Scan cluster and exclude some namespaces
|
||||
```
|
||||
kubescape scan framework nsa --exclude-namespaces kube-system,kube-public
|
||||
kubescape scan --exclude-namespaces kube-system,kube-public
|
||||
```
|
||||
|
||||
#### Scan local `yaml`/`json` files before deploying. [Take a look at the demonstration](https://youtu.be/Ox6DaR7_4ZI)
|
||||
```
|
||||
kubescape scan framework nsa *.yaml
|
||||
kubescape scan *.yaml
|
||||
```
|
||||
|
||||
#### Scan kubernetes manifest files from a public github repository
|
||||
```
|
||||
kubescape scan framework nsa https://github.com/armosec/kubescape
|
||||
kubescape scan https://github.com/armosec/kubescape
|
||||
```
|
||||
|
||||
#### Display all scanned resources (including the resources who passed)
|
||||
```
|
||||
kubescape scan framework nsa --verbose
|
||||
kubescape scan --verbose
|
||||
```
|
||||
|
||||
#### Output in `json` format
|
||||
```
|
||||
kubescape scan framework nsa --format json --output results.json
|
||||
kubescape scan --format json --output results.json
|
||||
```
|
||||
|
||||
#### Output in `junit xml` format
|
||||
```
|
||||
kubescape scan framework nsa --format junit --output results.xml
|
||||
kubescape scan --format junit --output results.xml
|
||||
```
|
||||
|
||||
#### Output in `prometheus` metrics format - Contributed by [@Joibel](https://github.com/Joibel)
|
||||
```
|
||||
kubescape scan framework nsa --format prometheus
|
||||
kubescape scan --format prometheus
|
||||
```
|
||||
|
||||
#### Scan with exceptions, objects with exceptions will be presented as `exclude` and not `fail`
|
||||
[Full documentation](examples/exceptions/README.md)
|
||||
```
|
||||
kubescape scan framework nsa --exceptions examples/exceptions/exclude-kube-namespaces.json
|
||||
kubescape scan --exceptions examples/exceptions/exclude-kube-namespaces.json
|
||||
```
|
||||
|
||||
#### Scan Helm charts - Render the helm chart using [`helm template`](https://helm.sh/docs/helm/helm_template/) and pass to stdout
|
||||
```
|
||||
helm template [NAME] [CHART] [flags] --dry-run | kubescape scan framework nsa -
|
||||
helm template [NAME] [CHART] [flags] --dry-run | kubescape scan -
|
||||
```
|
||||
|
||||
e.g.
|
||||
```
|
||||
helm template bitnami/mysql --generate-name --dry-run | kubescape scan framework nsa -
|
||||
helm template bitnami/mysql --generate-name --dry-run | kubescape scan -
|
||||
```
|
||||
|
||||
|
||||
### Offline Support
|
||||
### Offline/Air-gaped Environment Support
|
||||
|
||||
[Video tutorial](https://youtu.be/IGXL9s37smM)
|
||||
|
||||
It is possible to run Kubescape offline!
|
||||
|
||||
First download the framework and then scan with `--use-from` flag
|
||||
|
||||
1. Download and save in file, if file name not specified, will save in `~/.kubescape/<framework name>.json`
|
||||
```
|
||||
kubescape download framework nsa --output nsa.json
|
||||
```
|
||||
|
||||
2. Scan using the downloaded framework
|
||||
```
|
||||
kubescape scan framework nsa --use-from nsa.json
|
||||
```
|
||||
|
||||
|
||||
|
||||
You can also download all artifacts to a local path and then load them using `--use-artifacts-from` flag
|
||||
#### Download all artifacts
|
||||
|
||||
1. Download and save in local directory, if path not specified, will save all in `~/.kubescape`
|
||||
```
|
||||
kubescape download artifacts --output path/to/local/dir
|
||||
```
|
||||
2. Copy the downloaded artifacts to the air-gaped/offline environment
|
||||
|
||||
2. Scan using the downloaded artifacts
|
||||
3. Scan using the downloaded artifacts
|
||||
```
|
||||
kubescape scan framework nsa --use-artifacts-from path/to/local/dir
|
||||
kubescape scan --use-artifacts-from path/to/local/dir
|
||||
```
|
||||
|
||||
#### Download a single artifacts
|
||||
|
||||
You can also download a single artifacts and scan with the `--use-from` flag
|
||||
|
||||
1. Download and save in file, if file name not specified, will save in `~/.kubescape/<framework name>.json`
|
||||
```
|
||||
kubescape download framework nsa --output /path/nsa.json
|
||||
```
|
||||
2. Copy the downloaded artifacts to the air-gaped/offline environment
|
||||
|
||||
3. Scan using the downloaded framework
|
||||
```
|
||||
kubescape scan framework nsa --use-from /path/nsa.json
|
||||
```
|
||||
|
||||
|
||||
## Scan Periodically using Helm - Contributed by [@yonahd](https://github.com/yonahd)
|
||||
|
||||
You can scan your cluster periodically by adding a `CronJob` that will repeatedly trigger kubescape
|
||||
|
||||
```
|
||||
helm install kubescape examples/helm_chart/
|
||||
```
|
||||
[Please follow the instructions here](https://hub.armo.cloud/docs/installation-of-armo-in-cluster)
|
||||
[helm chart repo](https://github.com/armosec/armo-helm)
|
||||
|
||||
## Scan using docker image
|
||||
|
||||
Official Docker image `quay.io/armosec/kubescape`
|
||||
|
||||
```
|
||||
docker run -v "$(pwd)/example.yaml:/app/example.yaml quay.io/armosec/kubescape scan framework nsa /app/example.yaml
|
||||
docker run -v "$(pwd)/example.yaml:/app/example.yaml quay.io/armosec/kubescape scan /app/example.yaml
|
||||
```
|
||||
|
||||
# Submit data manually
|
||||
|
||||
38
build.py
38
build.py
@@ -52,22 +52,40 @@ def main():
|
||||
# Create build directory
|
||||
buildDir = getBuildDir()
|
||||
|
||||
ks_file = os.path.join(buildDir, packageName)
|
||||
hash_file = ks_file + ".sha256"
|
||||
|
||||
if not os.path.isdir(buildDir):
|
||||
os.makedirs(buildDir)
|
||||
|
||||
# Build kubescape
|
||||
ldflags = "-w -s -X %s=%s -X %s=%s -X %s=%s -X %s=%s -X %s=%s" \
|
||||
% (buildUrl, releaseVersion, BE_SERVER_CONST, ArmoBEServer,
|
||||
ER_SERVER_CONST, ArmoERServer, WEBSITE_CONST, ArmoWebsite,
|
||||
AUTH_SERVER_CONST, ArmoAuthServer)
|
||||
status = subprocess.call(["go", "build", "-o", "%s/%s" % (buildDir, packageName), "-ldflags" ,ldflags])
|
||||
ldflags = "-w -s"
|
||||
if releaseVersion:
|
||||
ldflags += " -X {}={}".format(buildUrl, releaseVersion)
|
||||
if ArmoBEServer:
|
||||
ldflags += " -X {}={}".format(BE_SERVER_CONST, ArmoBEServer)
|
||||
if ArmoERServer:
|
||||
ldflags += " -X {}={}".format(ER_SERVER_CONST, ArmoERServer)
|
||||
if ArmoWebsite:
|
||||
ldflags += " -X {}={}".format(WEBSITE_CONST, ArmoWebsite)
|
||||
if ArmoAuthServer:
|
||||
ldflags += " -X {}={}".format(AUTH_SERVER_CONST, ArmoAuthServer)
|
||||
|
||||
build_command = ["go", "build", "-o", ks_file, "-ldflags" ,ldflags]
|
||||
|
||||
print("Building kubescape and saving here: {}".format(ks_file))
|
||||
print("Build command: {}".format(" ".join(build_command)))
|
||||
|
||||
status = subprocess.call(build_command)
|
||||
checkStatus(status, "Failed to build kubescape")
|
||||
|
||||
sha1 = hashlib.sha1()
|
||||
with open(buildDir + "/" + packageName, "rb") as kube:
|
||||
sha1.update(kube.read())
|
||||
with open(buildDir + "/" + packageName + ".sha1", "w") as kube_sha:
|
||||
kube_sha.write(sha1.hexdigest())
|
||||
sha256 = hashlib.sha256()
|
||||
with open(ks_file, "rb") as kube:
|
||||
sha256.update(kube.read())
|
||||
with open(hash_file, "w") as kube_sha:
|
||||
hash = sha256.hexdigest()
|
||||
print("kubescape hash: {}, file: {}".format(hash, hash_file))
|
||||
kube_sha.write(sha256.hexdigest())
|
||||
|
||||
print("Build Done")
|
||||
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
FROM golang:1.17-alpine as builder
|
||||
#ENV GOPROXY=https://goproxy.io,direct
|
||||
|
||||
ARG run_number
|
||||
ARG image_version
|
||||
|
||||
ENV RELEASE=v1.0.${run_number}
|
||||
ENV RELEASE=$image_version
|
||||
|
||||
ENV GO111MODULE=
|
||||
|
||||
@@ -21,7 +21,6 @@ ADD . .
|
||||
RUN python build.py
|
||||
|
||||
RUN ls -ltr build/ubuntu-latest
|
||||
RUN cat /work/build/ubuntu-latest/kubescape.sha1
|
||||
|
||||
FROM alpine
|
||||
COPY --from=builder /work/build/ubuntu-latest/kubescape /usr/bin/kubescape
|
||||
|
||||
@@ -11,7 +11,6 @@ import (
|
||||
|
||||
"github.com/armosec/k8s-interface/k8sinterface"
|
||||
"github.com/armosec/kubescape/cautils/getter"
|
||||
"github.com/armosec/kubescape/cautils/logger"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
@@ -113,12 +112,6 @@ func NewLocalConfig(backendAPI getter.IBackend, customerGUID, clusterName string
|
||||
lc.backendAPI.SetClientID(lc.configObj.ClientID)
|
||||
lc.backendAPI.SetSecretKey(lc.configObj.SecretKey)
|
||||
|
||||
if lc.configObj.AccountID != "" {
|
||||
if err := lc.SetTenant(); err != nil {
|
||||
logger.L().Error(err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
return lc
|
||||
}
|
||||
|
||||
@@ -228,12 +221,6 @@ func NewClusterConfig(k8s *k8sinterface.KubernetesApi, backendAPI getter.IBacken
|
||||
c.backendAPI.SetClientID(c.configObj.ClientID)
|
||||
c.backendAPI.SetSecretKey(c.configObj.SecretKey)
|
||||
|
||||
if c.configObj.AccountID != "" {
|
||||
if err := c.SetTenant(); err != nil {
|
||||
logger.L().Error(err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
|
||||
"github.com/armosec/armoapi-go/armotypes"
|
||||
"github.com/armosec/kubescape/cautils/logger"
|
||||
"github.com/armosec/kubescape/cautils/logger/helpers"
|
||||
"github.com/armosec/opa-utils/reporthandling"
|
||||
)
|
||||
|
||||
@@ -26,6 +27,11 @@ var (
|
||||
armoFEURL = "portal.armo.cloud"
|
||||
armoAUTHURL = "auth.armo.cloud"
|
||||
|
||||
armoStageERURL = "report-ks.eustage2.cyberarmorsoft"
|
||||
armoStageBEURL = "api-stage.armo.cloud"
|
||||
armoStageFEURL = "armoui.eustage2.cyberarmorsoft.com"
|
||||
armoStageAUTHURL = "eggauth.eustage2.cyberarmorsoft.com"
|
||||
|
||||
armoDevERURL = "report.eudev3.cyberarmorsoft.com"
|
||||
armoDevBEURL = "api-dev.armo.cloud"
|
||||
armoDevFEURL = "armoui-dev.eudev3.cyberarmorsoft.com"
|
||||
@@ -50,6 +56,7 @@ type ArmoAPI struct {
|
||||
var globalArmoAPIConnector *ArmoAPI
|
||||
|
||||
func SetARMOAPIConnector(armoAPI *ArmoAPI) {
|
||||
logger.L().Debug("Armo URLs", helpers.String("api", armoAPI.apiURL), helpers.String("auth", armoAPI.authURL), helpers.String("report", armoAPI.erURL), helpers.String("UI", armoAPI.feURL))
|
||||
globalArmoAPIConnector = armoAPI
|
||||
}
|
||||
|
||||
@@ -82,6 +89,17 @@ func NewARMOAPIProd() *ArmoAPI {
|
||||
return apiObj
|
||||
}
|
||||
|
||||
func NewARMOAPIStaging() *ArmoAPI {
|
||||
apiObj := newArmoAPI()
|
||||
|
||||
apiObj.apiURL = armoStageBEURL
|
||||
apiObj.erURL = armoStageERURL
|
||||
apiObj.feURL = armoStageFEURL
|
||||
apiObj.authURL = armoStageAUTHURL
|
||||
|
||||
return apiObj
|
||||
}
|
||||
|
||||
func NewARMOAPICustomized(armoERURL, armoBEURL, armoFEURL, armoAUTHURL string) *ArmoAPI {
|
||||
apiObj := newArmoAPI()
|
||||
|
||||
|
||||
@@ -17,7 +17,7 @@ func (armoAPI *ArmoAPI) getFrameworkURL(frameworkName string) string {
|
||||
u.Host = armoAPI.apiURL
|
||||
u.Path = "api/v1/armoFrameworks"
|
||||
q := u.Query()
|
||||
q.Add("customerGUID", armoAPI.accountID)
|
||||
q.Add("customerGUID", armoAPI.getCustomerGUIDFallBack())
|
||||
if isNativeFramework(frameworkName) {
|
||||
q.Add("frameworkName", strings.ToUpper(frameworkName))
|
||||
} else {
|
||||
@@ -35,7 +35,7 @@ func (armoAPI *ArmoAPI) getListFrameworkURL() string {
|
||||
u.Host = armoAPI.apiURL
|
||||
u.Path = "api/v1/armoFrameworks"
|
||||
q := u.Query()
|
||||
q.Add("customerGUID", armoAPI.accountID)
|
||||
q.Add("customerGUID", armoAPI.getCustomerGUIDFallBack())
|
||||
u.RawQuery = q.Encode()
|
||||
|
||||
return u.String()
|
||||
@@ -47,7 +47,7 @@ func (armoAPI *ArmoAPI) getExceptionsURL(clusterName string) string {
|
||||
u.Path = "api/v1/armoPostureExceptions"
|
||||
|
||||
q := u.Query()
|
||||
q.Add("customerGUID", armoAPI.accountID)
|
||||
q.Add("customerGUID", armoAPI.getCustomerGUIDFallBack())
|
||||
// if clusterName != "" { // TODO - fix customer name support in Armo BE
|
||||
// q.Add("clusterName", clusterName)
|
||||
// }
|
||||
@@ -63,7 +63,7 @@ func (armoAPI *ArmoAPI) postExceptionsURL() string {
|
||||
u.Path = "api/v1/postureExceptionPolicy"
|
||||
|
||||
q := u.Query()
|
||||
q.Add("customerGUID", armoAPI.accountID)
|
||||
q.Add("customerGUID", armoAPI.getCustomerGUIDFallBack())
|
||||
u.RawQuery = q.Encode()
|
||||
|
||||
return u.String()
|
||||
@@ -76,7 +76,7 @@ func (armoAPI *ArmoAPI) getAccountConfig(clusterName string) string {
|
||||
u.Path = "api/v1/armoCustomerConfiguration"
|
||||
|
||||
q := u.Query()
|
||||
q.Add("customerGUID", armoAPI.accountID)
|
||||
q.Add("customerGUID", armoAPI.getCustomerGUIDFallBack())
|
||||
if clusterName != "" { // TODO - fix customer name support in Armo BE
|
||||
q.Add("clusterName", clusterName)
|
||||
}
|
||||
@@ -156,3 +156,10 @@ func (armoAPI *ArmoAPI) appendAuthHeaders(headers map[string]string) {
|
||||
headers["Cookie"] = fmt.Sprintf("auth=%s", armoAPI.authCookie)
|
||||
}
|
||||
}
|
||||
|
||||
func (armoAPI *ArmoAPI) getCustomerGUIDFallBack() string {
|
||||
if armoAPI.accountID != "" {
|
||||
return armoAPI.accountID
|
||||
}
|
||||
return "11111111-1111-1111-1111-111111111111"
|
||||
}
|
||||
|
||||
@@ -13,11 +13,7 @@ import (
|
||||
)
|
||||
|
||||
func GetDefaultPath(name string) string {
|
||||
defaultfilePath := filepath.Join(DefaultLocalStore, name)
|
||||
if homeDir, err := os.UserHomeDir(); err == nil {
|
||||
defaultfilePath = filepath.Join(homeDir, defaultfilePath)
|
||||
}
|
||||
return defaultfilePath
|
||||
return filepath.Join(DefaultLocalStore, name)
|
||||
}
|
||||
|
||||
func SaveInFile(policy interface{}, pathStr string) error {
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/armosec/armoapi-go/armotypes"
|
||||
@@ -13,7 +14,15 @@ import (
|
||||
// =======================================================================================================================
|
||||
// ============================================== LoadPolicy =============================================================
|
||||
// =======================================================================================================================
|
||||
const DefaultLocalStore = ".kubescape"
|
||||
var DefaultLocalStore = getCacheDir()
|
||||
|
||||
func getCacheDir() string {
|
||||
defaultDirPath := ".kubescape"
|
||||
if homeDir, err := os.UserHomeDir(); err == nil {
|
||||
defaultDirPath = filepath.Join(homeDir, defaultDirPath)
|
||||
}
|
||||
return defaultDirPath
|
||||
}
|
||||
|
||||
// Load policies from a local repository
|
||||
type LoadPolicy struct {
|
||||
|
||||
@@ -2,9 +2,12 @@ package logger
|
||||
|
||||
import (
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/armosec/kubescape/cautils/logger/helpers"
|
||||
"github.com/armosec/kubescape/cautils/logger/prettylogger"
|
||||
"github.com/armosec/kubescape/cautils/logger/zaplogger"
|
||||
"github.com/mattn/go-isatty"
|
||||
)
|
||||
|
||||
type ILogger interface {
|
||||
@@ -26,16 +29,23 @@ var l ILogger
|
||||
|
||||
func L() ILogger {
|
||||
if l == nil {
|
||||
InitializeLogger()
|
||||
InitializeLogger("")
|
||||
}
|
||||
return l
|
||||
}
|
||||
|
||||
func InitializeLogger() {
|
||||
initializeLogger()
|
||||
}
|
||||
func InitializeLogger(loggerName string) {
|
||||
|
||||
func initializeLogger() {
|
||||
// TODO - support zap logger
|
||||
l = prettylogger.NewPrettyLogger()
|
||||
switch strings.ToLower(loggerName) {
|
||||
case "zap":
|
||||
l = zaplogger.NewZapLogger()
|
||||
case "pretty":
|
||||
l = prettylogger.NewPrettyLogger()
|
||||
default:
|
||||
if isatty.IsTerminal(os.Stdout.Fd()) {
|
||||
l = prettylogger.NewPrettyLogger()
|
||||
} else {
|
||||
l = zaplogger.NewZapLogger()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -57,7 +57,10 @@ func (pl *PrettyLogger) print(level helpers.Level, msg string, details ...helper
|
||||
if !level.Skip(pl.level) {
|
||||
pl.mutex.Lock()
|
||||
prefix(level)(pl.writer, "[%s] ", level.String())
|
||||
message(pl.writer, fmt.Sprintf("%s. %s\n", msg, detailsToString(details)))
|
||||
if d := detailsToString(details); d != "" {
|
||||
msg = fmt.Sprintf("%s. %s", msg, d)
|
||||
}
|
||||
message(pl.writer, fmt.Sprintf("%s\n", msg))
|
||||
pl.mutex.Unlock()
|
||||
}
|
||||
|
||||
@@ -68,7 +71,7 @@ func detailsToString(details []helpers.IDetails) string {
|
||||
for i := range details {
|
||||
s += fmt.Sprintf("%s: %s", details[i].Key(), details[i].Value())
|
||||
if i < len(details)-1 {
|
||||
s += ";"
|
||||
s += "; "
|
||||
}
|
||||
}
|
||||
return s
|
||||
|
||||
@@ -10,20 +10,40 @@ import (
|
||||
|
||||
type ZapLogger struct {
|
||||
zapL *zap.Logger
|
||||
cfg zap.Config
|
||||
}
|
||||
|
||||
func NewZapLogger() *ZapLogger {
|
||||
ec := zap.NewProductionEncoderConfig()
|
||||
ec.EncodeTime = zapcore.RFC3339TimeEncoder
|
||||
cfg := zap.NewProductionConfig()
|
||||
cfg.DisableCaller = true
|
||||
cfg.DisableStacktrace = true
|
||||
cfg.Encoding = "json"
|
||||
cfg.EncoderConfig = ec
|
||||
|
||||
zapLogger, err := cfg.Build()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return &ZapLogger{
|
||||
zapL: zap.L(),
|
||||
zapL: zapLogger,
|
||||
cfg: cfg,
|
||||
}
|
||||
}
|
||||
|
||||
func (zl *ZapLogger) GetLevel() string { return "" }
|
||||
func (zl *ZapLogger) GetLevel() string { return zl.cfg.Level.Level().String() }
|
||||
func (zl *ZapLogger) SetWriter(w *os.File) {}
|
||||
func (zl *ZapLogger) GetWriter() *os.File { return nil }
|
||||
func GetWriter() *os.File { return nil }
|
||||
|
||||
func (zl *ZapLogger) SetLevel(level string) error {
|
||||
return nil
|
||||
l := zapcore.Level(1)
|
||||
err := l.Set(level)
|
||||
if err == nil {
|
||||
zl.cfg.Level.SetLevel(l)
|
||||
}
|
||||
return err
|
||||
}
|
||||
func (zl *ZapLogger) Fatal(msg string, details ...helpers.IDetails) {
|
||||
zl.zapL.Fatal(msg, detailsToZapFields(details)...)
|
||||
|
||||
@@ -73,6 +73,7 @@ type ScanInfo struct {
|
||||
Local bool // Do not submit results
|
||||
Account string // account ID
|
||||
Logger string // logger level
|
||||
CacheDir string // cached dir
|
||||
KubeContext string // context name
|
||||
FrameworkScan bool // false if scanning control
|
||||
ScanAll bool // true if scan all frameworks
|
||||
|
||||
@@ -8,6 +8,8 @@ import (
|
||||
"github.com/armosec/armoapi-go/armotypes"
|
||||
"github.com/armosec/kubescape/cautils"
|
||||
"github.com/armosec/kubescape/cautils/getter"
|
||||
"github.com/armosec/kubescape/cautils/logger"
|
||||
"github.com/armosec/kubescape/cautils/logger/helpers"
|
||||
)
|
||||
|
||||
var downloadFunc = map[string]func(*cautils.DownloadInfo) error{
|
||||
@@ -67,7 +69,7 @@ func downloadArtifacts(downloadInfo *cautils.DownloadInfo) error {
|
||||
}
|
||||
for artifact := range artifacts {
|
||||
if err := downloadArtifact(&cautils.DownloadInfo{Target: artifact, Path: downloadInfo.Path, FileName: fmt.Sprintf("%s.json", artifact)}, artifacts); err != nil {
|
||||
fmt.Printf("error downloading %s, error: %s", artifact, err)
|
||||
logger.L().Error("error downloading", helpers.String("artifact", artifact), helpers.Error(err))
|
||||
}
|
||||
}
|
||||
return nil
|
||||
@@ -75,6 +77,7 @@ func downloadArtifacts(downloadInfo *cautils.DownloadInfo) error {
|
||||
|
||||
func downloadConfigInputs(downloadInfo *cautils.DownloadInfo) error {
|
||||
tenant := getTenantConfig(downloadInfo.Account, "", getKubernetesApi())
|
||||
|
||||
controlsInputsGetter := getConfigInputsGetter(downloadInfo.Name, tenant.GetAccountID(), nil)
|
||||
controlInputs, err := controlsInputsGetter.GetControlsInputs(tenant.GetClusterName())
|
||||
if err != nil {
|
||||
@@ -88,13 +91,14 @@ func downloadConfigInputs(downloadInfo *cautils.DownloadInfo) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Printf("'%s' downloaded successfully and saved at: '%s'\n", downloadInfo.Target, filepath.Join(downloadInfo.Path, downloadInfo.FileName))
|
||||
logger.L().Success("Downloaded", helpers.String("artifact", downloadInfo.Target), helpers.String("path", filepath.Join(downloadInfo.Path, downloadInfo.FileName)))
|
||||
return nil
|
||||
}
|
||||
|
||||
func downloadExceptions(downloadInfo *cautils.DownloadInfo) error {
|
||||
var err error
|
||||
tenant := getTenantConfig(downloadInfo.Account, "", getKubernetesApi())
|
||||
|
||||
exceptionsGetter := getExceptionsGetter("")
|
||||
exceptions := []armotypes.PostureExceptionPolicy{}
|
||||
if tenant.GetAccountID() != "" {
|
||||
@@ -111,13 +115,14 @@ func downloadExceptions(downloadInfo *cautils.DownloadInfo) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Printf("'%s' downloaded successfully and saved at: '%s'\n", downloadInfo.Target, filepath.Join(downloadInfo.Path, downloadInfo.FileName))
|
||||
logger.L().Success("Downloaded", helpers.String("artifact", downloadInfo.Target), helpers.String("path", filepath.Join(downloadInfo.Path, downloadInfo.FileName)))
|
||||
return nil
|
||||
}
|
||||
|
||||
func downloadFramework(downloadInfo *cautils.DownloadInfo) error {
|
||||
|
||||
tenant := getTenantConfig(downloadInfo.Account, "", getKubernetesApi())
|
||||
|
||||
g := getPolicyGetter(nil, tenant.GetAccountID(), true, nil)
|
||||
|
||||
if downloadInfo.Name == "" {
|
||||
@@ -131,7 +136,7 @@ func downloadFramework(downloadInfo *cautils.DownloadInfo) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Printf("'%s': '%s' downloaded successfully and saved at: '%s'\n", downloadInfo.Target, fw.Name, filepath.Join(downloadInfo.Path, (strings.ToLower(fw.Name)+".json")))
|
||||
logger.L().Success("Downloaded", helpers.String("artifact", downloadInfo.Target), helpers.String("name", fw.Name), helpers.String("path", filepath.Join(downloadInfo.Path, downloadInfo.FileName)))
|
||||
}
|
||||
// return fmt.Errorf("missing framework name")
|
||||
} else {
|
||||
@@ -146,7 +151,7 @@ func downloadFramework(downloadInfo *cautils.DownloadInfo) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Printf("'%s' downloaded successfully and saved at: '%s'\n", downloadInfo.Target, filepath.Join(downloadInfo.Path, downloadInfo.FileName))
|
||||
logger.L().Success("Downloaded", helpers.String("artifact", downloadInfo.Target), helpers.String("name", framework.Name), helpers.String("path", filepath.Join(downloadInfo.Path, downloadInfo.FileName)))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -154,6 +159,7 @@ func downloadFramework(downloadInfo *cautils.DownloadInfo) error {
|
||||
func downloadControl(downloadInfo *cautils.DownloadInfo) error {
|
||||
|
||||
tenant := getTenantConfig(downloadInfo.Account, "", getKubernetesApi())
|
||||
|
||||
g := getPolicyGetter(nil, tenant.GetAccountID(), false, nil)
|
||||
|
||||
if downloadInfo.Name == "" {
|
||||
@@ -171,6 +177,6 @@ func downloadControl(downloadInfo *cautils.DownloadInfo) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Printf("'%s' downloaded successfully and saved at: '%s'\n", downloadInfo.Target, filepath.Join(downloadInfo.Path, downloadInfo.FileName))
|
||||
logger.L().Success("Downloaded", helpers.String("artifact", downloadInfo.Target), helpers.String("name", downloadInfo.Name), helpers.String("path", filepath.Join(downloadInfo.Path, downloadInfo.FileName)))
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -32,7 +32,7 @@ func CliList(listPolicies *cliobjects.ListPolicies) error {
|
||||
sep := "\n * "
|
||||
usageCmd := strings.TrimSuffix(listPolicies.Target, "s")
|
||||
fmt.Printf("Supported %s:%s%s\n", listPolicies.Target, sep, strings.Join(policies, sep))
|
||||
fmt.Printf("\nUseage:\n")
|
||||
fmt.Printf("\nUsage:\n")
|
||||
fmt.Printf("$ kubescape scan %s \"name\"\n", usageCmd)
|
||||
fmt.Printf("$ kubescape scan %s \"name-0\",\"name-1\"\n\n", usageCmd)
|
||||
return nil
|
||||
@@ -49,6 +49,7 @@ func listFrameworks(listPolicies *cliobjects.ListPolicies) ([]string, error) {
|
||||
|
||||
func listControls(listPolicies *cliobjects.ListPolicies) ([]string, error) {
|
||||
tenant := getTenantConfig(listPolicies.Account, "", getKubernetesApi()) // change k8sinterface
|
||||
|
||||
g := getPolicyGetter(nil, tenant.GetAccountID(), false, nil)
|
||||
l := getter.ListName
|
||||
if listPolicies.ListIDs {
|
||||
|
||||
@@ -33,7 +33,10 @@ func SubmitExceptions(accountID, excPath string) error {
|
||||
logger.L().Info("submitting exceptions", helpers.String("path", excPath))
|
||||
|
||||
// load cached config
|
||||
getTenantConfig(accountID, "", getKubernetesApi())
|
||||
tenantConfig := getTenantConfig(accountID, "", getKubernetesApi())
|
||||
if err := tenantConfig.SetTenant(); err != nil {
|
||||
logger.L().Error("failed setting account ID", helpers.Error(err))
|
||||
}
|
||||
|
||||
// load exceptions from file
|
||||
loader := getter.NewLoadPolicy([]string{excPath})
|
||||
|
||||
@@ -1,9 +1,12 @@
|
||||
package clihandler
|
||||
|
||||
import "fmt"
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
)
|
||||
|
||||
func CliView() error {
|
||||
tenant := getTenantConfig("", "", getKubernetesApi()) // change k8sinterface
|
||||
fmt.Printf("%s\n", tenant.GetConfigObj().Config())
|
||||
fmt.Fprintf(os.Stderr, "%s\n", tenant.GetConfigObj().Config())
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -36,8 +36,7 @@ var getCmd = &cobra.Command{
|
||||
val, err := clusterConfig.GetValueByKeyFromConfigMap(key)
|
||||
if err != nil {
|
||||
if err.Error() == "value does not exist." {
|
||||
fmt.Printf("Could net get value from configmap, reason: %s\n", err)
|
||||
return nil
|
||||
return fmt.Errorf("failed to get value from configmap, reason: %s", err.Error())
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -2,6 +2,7 @@ package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/armosec/kubescape/cautils"
|
||||
@@ -65,10 +66,16 @@ var downloadCmd = &cobra.Command{
|
||||
}
|
||||
|
||||
func init() {
|
||||
// cobra.OnInitialize(initConfig)
|
||||
cobra.OnInitialize(initDownload)
|
||||
|
||||
rootCmd.AddCommand(downloadCmd)
|
||||
downloadCmd.PersistentFlags().StringVarP(&downloadInfo.Account, "account", "", "", "Armo portal account ID. Default will load account ID from configMap or config file")
|
||||
downloadCmd.Flags().StringVarP(&downloadInfo.Path, "output", "o", "", "Output file. If not specified, will save in `~/.kubescape/<policy name>.json`")
|
||||
|
||||
}
|
||||
|
||||
func initDownload() {
|
||||
if filepath.Ext(downloadInfo.Path) == ".json" {
|
||||
downloadInfo.Path, downloadInfo.FileName = filepath.Split(downloadInfo.Path)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -31,8 +31,7 @@ var localGetCmd = &cobra.Command{
|
||||
val, err := cautils.GetValueFromConfigJson(key)
|
||||
if err != nil {
|
||||
if err.Error() == "value does not exist." {
|
||||
fmt.Printf("Could net get value from: %s, reason: %s\n", cautils.ConfigFileFullPath(), err)
|
||||
return nil
|
||||
return fmt.Errorf("failed to get value from: %s, reason: %s", cautils.ConfigFileFullPath(), err.Error())
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"github.com/armosec/kubescape/cautils"
|
||||
"github.com/armosec/kubescape/cautils/getter"
|
||||
"github.com/armosec/kubescape/cautils/logger"
|
||||
"github.com/armosec/kubescape/cautils/logger/helpers"
|
||||
"github.com/armosec/kubescape/clihandler"
|
||||
"github.com/armosec/kubescape/clihandler/cliinterfaces"
|
||||
reporterv1 "github.com/armosec/kubescape/resultshandling/reporter/v1"
|
||||
@@ -23,6 +24,9 @@ var rabcCmd = &cobra.Command{
|
||||
|
||||
// get config
|
||||
clusterConfig := getTenantConfig(submitInfo.Account, "", k8s)
|
||||
if err := clusterConfig.SetTenant(); err != nil {
|
||||
logger.L().Error("failed setting account ID", helpers.Error(err))
|
||||
}
|
||||
|
||||
// list RBAC
|
||||
rbacObjects := cautils.NewRBACObjects(rbacscanner.NewRbacScannerFromK8sAPI(k8s, clusterConfig.GetAccountID(), clusterConfig.GetClusterName()))
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
|
||||
"github.com/armosec/k8s-interface/workloadinterface"
|
||||
"github.com/armosec/kubescape/cautils/logger"
|
||||
"github.com/armosec/kubescape/cautils/logger/helpers"
|
||||
"github.com/armosec/kubescape/clihandler"
|
||||
"github.com/armosec/kubescape/clihandler/cliinterfaces"
|
||||
reporterv1 "github.com/armosec/kubescape/resultshandling/reporter/v1"
|
||||
@@ -63,6 +64,9 @@ var resultsCmd = &cobra.Command{
|
||||
|
||||
// get config
|
||||
clusterConfig := getTenantConfig(submitInfo.Account, "", k8s)
|
||||
if err := clusterConfig.SetTenant(); err != nil {
|
||||
logger.L().Error("failed setting account ID", helpers.Error(err))
|
||||
}
|
||||
|
||||
resultsObjects := NewResultsObject(clusterConfig.GetAccountID(), clusterConfig.GetClusterName(), args[0])
|
||||
|
||||
|
||||
@@ -3,8 +3,10 @@ package cmd
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/armosec/kubescape/cautils"
|
||||
"github.com/armosec/kubescape/cautils/getter"
|
||||
"github.com/armosec/kubescape/cautils/logger"
|
||||
"github.com/armosec/kubescape/cautils/logger/helpers"
|
||||
@@ -31,6 +33,7 @@ var ksExamples = `
|
||||
|
||||
var rootCmd = &cobra.Command{
|
||||
Use: "kubescape",
|
||||
Version: cautils.BuildNumber,
|
||||
Short: "Kubescape is a tool for testing Kubernetes security posture",
|
||||
Long: `Kubescape is a tool for testing Kubernetes security posture based on NSA \ MITRE ATT&CK® and other frameworks specifications`,
|
||||
Example: ksExamples,
|
||||
@@ -40,20 +43,42 @@ func Execute() {
|
||||
rootCmd.Execute()
|
||||
}
|
||||
func init() {
|
||||
cobra.OnInitialize(initLogger, initEnvironment)
|
||||
cobra.OnInitialize(initLogger, initLoggerLevel, initEnvironment, initCacheDir)
|
||||
|
||||
flag.CommandLine.StringVar(&armoBEURLs, "environment", "", envFlagUsage)
|
||||
rootCmd.PersistentFlags().StringVar(&armoBEURLs, "environment", "", envFlagUsage)
|
||||
rootCmd.PersistentFlags().MarkHidden("environment")
|
||||
rootCmd.PersistentFlags().StringVar(&scanInfo.Logger, "logger", "info", fmt.Sprintf("Logger level. Supported: %s", strings.Join(helpers.SupportedLevels(), "/")))
|
||||
rootCmd.PersistentFlags().StringVarP(&scanInfo.Logger, "logger", "l", helpers.InfoLevel.String(), fmt.Sprintf("Logger level. Supported: %s [$KS_LOGGER]", strings.Join(helpers.SupportedLevels(), "/")))
|
||||
rootCmd.PersistentFlags().StringVar(&scanInfo.CacheDir, "cache-dir", getter.DefaultLocalStore, "Cache directory [$KS_CACHE_DIR]")
|
||||
flag.Parse()
|
||||
}
|
||||
|
||||
func initLogger() {
|
||||
if l := os.Getenv("KS_LOGGER_NAME"); l != "" {
|
||||
logger.InitializeLogger(l)
|
||||
}
|
||||
}
|
||||
func initLoggerLevel() {
|
||||
if scanInfo.Logger != helpers.InfoLevel.String() {
|
||||
} else if l := os.Getenv("KS_LOGGER"); l != "" {
|
||||
scanInfo.Logger = l
|
||||
}
|
||||
if err := logger.L().SetLevel(scanInfo.Logger); err != nil {
|
||||
logger.L().Fatal(fmt.Sprintf("supported levels: %s", strings.Join(helpers.SupportedLevels(), "/")), helpers.Error(err))
|
||||
}
|
||||
}
|
||||
|
||||
func initCacheDir() {
|
||||
if scanInfo.CacheDir != getter.DefaultLocalStore {
|
||||
getter.DefaultLocalStore = scanInfo.CacheDir
|
||||
} else if cacheDir := os.Getenv("KS_CACHE_DIR"); cacheDir != "" {
|
||||
getter.DefaultLocalStore = cacheDir
|
||||
} else {
|
||||
return // using default cache di location
|
||||
}
|
||||
|
||||
logger.L().Debug("cache dir updated", helpers.String("path", getter.DefaultLocalStore))
|
||||
}
|
||||
func initEnvironment() {
|
||||
urlSlices := strings.Split(armoBEURLs, ",")
|
||||
if len(urlSlices) != 1 && len(urlSlices) < 3 {
|
||||
@@ -62,8 +87,10 @@ func initEnvironment() {
|
||||
switch len(urlSlices) {
|
||||
case 1:
|
||||
switch urlSlices[0] {
|
||||
case "dev":
|
||||
case "dev", "development":
|
||||
getter.SetARMOAPIConnector(getter.NewARMOAPIDev())
|
||||
case "stage", "staging":
|
||||
getter.SetARMOAPIConnector(getter.NewARMOAPIStaging())
|
||||
case "":
|
||||
getter.SetARMOAPIConnector(getter.NewARMOAPIProd())
|
||||
default:
|
||||
|
||||
@@ -1,8 +1,10 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/armosec/kubescape/cautils"
|
||||
"github.com/armosec/kubescape/cautils/logger"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
@@ -13,7 +15,7 @@ var versionCmd = &cobra.Command{
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
v := cautils.NewIVersionCheckHandler()
|
||||
v.CheckLatestVersion(cautils.NewVersionCheckRequest(cautils.BuildNumber, "", "", "version"))
|
||||
logger.L().Info("Your current version is: " + cautils.BuildNumber)
|
||||
fmt.Fprintln(os.Stdout, "Your current version is: "+cautils.BuildNumber)
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
@@ -36,6 +36,7 @@ type componentInterfaces struct {
|
||||
|
||||
func getInterfaces(scanInfo *cautils.ScanInfo) componentInterfaces {
|
||||
|
||||
// ================== setup k8s interface object ======================================
|
||||
var k8s *k8sinterface.KubernetesApi
|
||||
if scanInfo.GetScanningEnvironment() == cautils.ScanCluster {
|
||||
k8s = getKubernetesApi()
|
||||
@@ -44,11 +45,20 @@ func getInterfaces(scanInfo *cautils.ScanInfo) componentInterfaces {
|
||||
}
|
||||
}
|
||||
|
||||
// ================== setup tenant object ======================================
|
||||
|
||||
tenantConfig := getTenantConfig(scanInfo.Account, scanInfo.KubeContext, k8s)
|
||||
|
||||
// Set submit behavior AFTER loading tenant config
|
||||
setSubmitBehavior(scanInfo, tenantConfig)
|
||||
|
||||
// ================== version testing ======================================
|
||||
|
||||
v := cautils.NewIVersionCheckHandler()
|
||||
v.CheckLatestVersion(cautils.NewVersionCheckRequest(cautils.BuildNumber, policyIdentifierNames(scanInfo.PolicyIdentifier), "", scanInfo.GetScanningEnvironment()))
|
||||
|
||||
// ================== setup host sensor object ======================================
|
||||
|
||||
hostSensorHandler := getHostSensorHandler(scanInfo, k8s)
|
||||
if err := hostSensorHandler.Init(); err != nil {
|
||||
logger.L().Error("failed to init host sensor", helpers.Error(err))
|
||||
@@ -59,24 +69,29 @@ func getInterfaces(scanInfo *cautils.ScanInfo) componentInterfaces {
|
||||
scanInfo.ExcludedNamespaces = fmt.Sprintf("%s,%s", scanInfo.ExcludedNamespaces, hostSensorHandler.GetNamespace())
|
||||
}
|
||||
|
||||
// ================== setup registry adaptors ======================================
|
||||
|
||||
registryAdaptors, err := resourcehandler.NewRegistryAdaptors()
|
||||
if err != nil {
|
||||
logger.L().Error("failed to initialize registry adaptors", helpers.Error(err))
|
||||
}
|
||||
|
||||
// ================== setup resource collector object ======================================
|
||||
|
||||
resourceHandler := getResourceHandler(scanInfo, tenantConfig, k8s, hostSensorHandler, registryAdaptors)
|
||||
|
||||
// ================== setup reporter & printer objects ======================================
|
||||
|
||||
// reporting behavior - setup reporter
|
||||
reportHandler := getReporter(tenantConfig, scanInfo.Submit)
|
||||
|
||||
v := cautils.NewIVersionCheckHandler()
|
||||
v.CheckLatestVersion(cautils.NewVersionCheckRequest(cautils.BuildNumber, policyIdentifierNames(scanInfo.PolicyIdentifier), "", scanInfo.GetScanningEnvironment()))
|
||||
|
||||
// setup printer
|
||||
printerHandler := printerv1.GetPrinter(scanInfo.Format, scanInfo.VerboseMode)
|
||||
// printerHandler = printerv2.GetPrinter(scanInfo.Format, scanInfo.VerboseMode)
|
||||
printerHandler.SetWriter(scanInfo.Output)
|
||||
|
||||
// ================== return interface ======================================
|
||||
|
||||
return componentInterfaces{
|
||||
tenantConfig: tenantConfig,
|
||||
resourceHandler: resourceHandler,
|
||||
@@ -177,8 +192,8 @@ func askUserForHostSensor() bool {
|
||||
if ssss, err := os.Stdin.Stat(); err == nil {
|
||||
// fmt.Printf("Found stdin type: %s\n", ssss.Mode().Type())
|
||||
if ssss.Mode().Type()&(fs.ModeDevice|fs.ModeCharDevice) > 0 { //has TTY
|
||||
fmt.Printf("Would you like to scan K8s nodes? [y/N]. This is required to collect valuable data for certain controls\n")
|
||||
fmt.Printf("Use --enable-host-scan flag to suppress this message\n")
|
||||
fmt.Fprintf(os.Stderr, "Would you like to scan K8s nodes? [y/N]. This is required to collect valuable data for certain controls\n")
|
||||
fmt.Fprintf(os.Stderr, "Use --enable-host-scan flag to suppress this message\n")
|
||||
var b []byte = make([]byte, 1)
|
||||
if n, err := os.Stdin.Read(b); err == nil {
|
||||
if n > 0 && len(b) > 0 && (b[0] == 'y' || b[0] == 'Y') {
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
"github.com/armosec/kubescape/cautils"
|
||||
"github.com/armosec/kubescape/cautils/getter"
|
||||
"github.com/armosec/kubescape/cautils/logger"
|
||||
"github.com/armosec/kubescape/cautils/logger/helpers"
|
||||
"github.com/armosec/kubescape/hostsensorutils"
|
||||
"github.com/armosec/kubescape/resourcehandler"
|
||||
"github.com/armosec/kubescape/resultshandling/reporter"
|
||||
@@ -58,6 +59,7 @@ func getReporter(tenantConfig cautils.ITenantConfig, submit bool) reporter.IRepo
|
||||
|
||||
func getResourceHandler(scanInfo *cautils.ScanInfo, tenantConfig cautils.ITenantConfig, k8s *k8sinterface.KubernetesApi, hostSensorHandler hostsensorutils.IHostSensor, registryAdaptors *resourcehandler.RegistryAdaptors) resourcehandler.IResourceHandler {
|
||||
if len(scanInfo.InputPatterns) > 0 || k8s == nil {
|
||||
// scanInfo.HostSensor.SetBool(false)
|
||||
return resourcehandler.NewFileResourceHandler(scanInfo.InputPatterns, registryAdaptors)
|
||||
}
|
||||
getter.GetArmoAPIConnector()
|
||||
@@ -197,7 +199,7 @@ func getConfigInputsGetter(ControlsInputs string, accountID string, downloadRele
|
||||
|
||||
func getDownloadReleasedPolicy(downloadReleasedPolicy *getter.DownloadReleasedPolicy) getter.IPolicyGetter {
|
||||
if err := downloadReleasedPolicy.SetRegoObjects(); err != nil { // if failed to pull policy, fallback to cache
|
||||
cautils.WarningDisplay(os.Stderr, "Warning: failed to get policies from github release, loading policies from cache\n")
|
||||
logger.L().Warning("failed to get policies from github release, loading policies from cache", helpers.Error(err))
|
||||
return getter.NewLoadPolicy(getDefaultFrameworksPaths())
|
||||
} else {
|
||||
return downloadReleasedPolicy
|
||||
@@ -214,8 +216,8 @@ func getDefaultFrameworksPaths() []string {
|
||||
|
||||
func listFrameworksNames(policyGetter getter.IPolicyGetter) []string {
|
||||
fw, err := policyGetter.ListFrameworks()
|
||||
if err != nil {
|
||||
fw = getDefaultFrameworksPaths()
|
||||
if err == nil {
|
||||
return fw
|
||||
}
|
||||
return fw
|
||||
return getter.NativeFrameworks
|
||||
}
|
||||
|
||||
BIN
docs/summary.png
BIN
docs/summary.png
Binary file not shown.
|
Before Width: | Height: | Size: 60 KiB After Width: | Height: | Size: 70 KiB |
@@ -1,85 +0,0 @@
|
||||
# Periodically Kubescape Scanning
|
||||
|
||||
You can scan your cluster periodically by adding a `CronJob` that will repeatedly trigger kubescape
|
||||
|
||||
* Setup [scanning & submitting](#scanning-and-submitting)
|
||||
* Setup [scanning without submitting](#scanning-without-submitting)
|
||||
|
||||
## Scanning And Submitting
|
||||
|
||||
If you wish to periodically scan and submit the result to the [Kubescape SaaS version](https://portal.armo.cloud/) where you can benefit the features the SaaS version provides, please follow this instructions ->
|
||||
|
||||
1. Apply kubescape namespace
|
||||
```
|
||||
kubectl apply ks-namespace.yaml
|
||||
```
|
||||
|
||||
2. Apply serviceAccount and roles
|
||||
```
|
||||
kubectl apply ks-serviceAccount.yaml
|
||||
```
|
||||
|
||||
3. Setup and apply configMap
|
||||
|
||||
Before you apply the configMap you need to set the account ID and cluster name in the `ks-configMap.yaml` file.
|
||||
|
||||
* Set cluster name:
|
||||
Run `kubectl config current-context` and set the result in the `data.clusterName` field
|
||||
* Set account ID:
|
||||
1. Navigate to the [Kubescape SaaS version](https://portal.armo.cloud/) and login/sign up for free
|
||||
2. Click the `Add Cluster` button on the top right of the page
|
||||
</br>
|
||||
<img src="screenshots/add-cluster.png" alt="add-cluster">
|
||||
3. Copy the value of `--account` and set it in the `data.customerGUID` field
|
||||
</br>
|
||||
<img src="screenshots/account.png" alt="account">
|
||||
|
||||
Make sure the configMap looks as following;
|
||||
```
|
||||
kind: ConfigMap
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: kubescape
|
||||
labels:
|
||||
app: kubescape
|
||||
namespace: kubescape
|
||||
data:
|
||||
config.json: |
|
||||
{
|
||||
"customerGUID": "XXXXXXXX-XXXX-XXXX-XXXXXXXXXXXX",
|
||||
"clusterName": "my-awesome-cluster-name"
|
||||
}
|
||||
```
|
||||
|
||||
Finally, apply the configMap
|
||||
```
|
||||
kubectl apply ks-configMap.yaml
|
||||
```
|
||||
|
||||
4. Apply CronJob
|
||||
|
||||
Before you apply the cronJob, make sure the scanning frequency suites your needs
|
||||
```
|
||||
kubectl apply ks-cronJob-submit.yaml
|
||||
```
|
||||
|
||||
## Scanning Without Submitting
|
||||
|
||||
If you wish to periodically scan but not submit the scan results, follow this instructions ->
|
||||
|
||||
1. Apply kubescape namespace
|
||||
```
|
||||
kubectl apply ks-namespace.yaml
|
||||
```
|
||||
|
||||
2. Apply serviceAccount and roles
|
||||
```
|
||||
kubectl apply ks-serviceAccount.yaml
|
||||
```
|
||||
|
||||
3. Apply CronJob
|
||||
|
||||
Before you apply the cronJob, make sure the scanning frequency suites your needs
|
||||
```
|
||||
kubectl apply ks-cronJob-non-submit.yaml
|
||||
```
|
||||
@@ -1,14 +0,0 @@
|
||||
# ------------------- Kubescape User/Customer ID ------------------- #
|
||||
kind: ConfigMap
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: kubescape
|
||||
labels:
|
||||
app: kubescape
|
||||
namespace: kubescape
|
||||
data:
|
||||
config.json: |
|
||||
{
|
||||
"customerGUID": "<ID>",
|
||||
"clusterName": "<cluster name>"
|
||||
}
|
||||
@@ -1,32 +0,0 @@
|
||||
apiVersion: batch/v1
|
||||
kind: CronJob
|
||||
metadata:
|
||||
name: kubescape
|
||||
labels:
|
||||
app: kubescape
|
||||
namespace: kubescape
|
||||
spec:
|
||||
# ┌────────────────── timezone (optional)
|
||||
# | ┌───────────── minute (0 - 59)
|
||||
# | │ ┌───────────── hour (0 - 23)
|
||||
# | │ │ ┌───────────── day of the month (1 - 31)
|
||||
# | │ │ │ ┌───────────── month (1 - 12)
|
||||
# | │ │ │ │ ┌───────────── day of the week (0 - 6) (Sunday to Saturday;
|
||||
# | │ │ │ │ │ 7 is also Sunday on some systems)
|
||||
# | │ │ │ │ │
|
||||
# | │ │ │ │ │
|
||||
# CRON_TZ=UTC * * * * *
|
||||
schedule: "0 0 1 * *"
|
||||
jobTemplate:
|
||||
spec:
|
||||
template:
|
||||
spec:
|
||||
containers:
|
||||
- name: kubescape
|
||||
image: quay.io/armosec/kubescape:latest
|
||||
imagePullPolicy: IfNotPresent
|
||||
command: ["/bin/sh","-c"]
|
||||
args:
|
||||
- kubescape scan framework nsa
|
||||
restartPolicy: OnFailure
|
||||
serviceAccountName: kubescape-discovery
|
||||
@@ -1,40 +0,0 @@
|
||||
apiVersion: batch/v1
|
||||
kind: CronJob
|
||||
metadata:
|
||||
name: kubescape
|
||||
labels:
|
||||
app: kubescape
|
||||
namespace: kubescape
|
||||
spec:
|
||||
# ┌────────────────── timezone (optional)
|
||||
# | ┌───────────── minute (0 - 59)
|
||||
# | │ ┌───────────── hour (0 - 23)
|
||||
# | │ │ ┌───────────── day of the month (1 - 31)
|
||||
# | │ │ │ ┌───────────── month (1 - 12)
|
||||
# | │ │ │ │ ┌───────────── day of the week (0 - 6) (Sunday to Saturday;
|
||||
# | │ │ │ │ │ 7 is also Sunday on some systems)
|
||||
# | │ │ │ │ │
|
||||
# | │ │ │ │ │
|
||||
# CRON_TZ=UTC * * * * *
|
||||
schedule: "0 0 1 * *"
|
||||
jobTemplate:
|
||||
spec:
|
||||
template:
|
||||
spec:
|
||||
containers:
|
||||
- name: kubescape
|
||||
image: quay.io/armosec/kubescape:latest
|
||||
imagePullPolicy: IfNotPresent
|
||||
command: ["/bin/sh","-c"]
|
||||
args:
|
||||
- kubescape scan framework nsa --submit
|
||||
volumeMounts:
|
||||
- name: kubescape-config-volume
|
||||
mountPath: /root/.kubescape/config.json
|
||||
subPath: config.json
|
||||
restartPolicy: OnFailure
|
||||
serviceAccountName: kubescape-discovery
|
||||
volumes:
|
||||
- name: kubescape-config-volume
|
||||
configMap:
|
||||
name: kubescape
|
||||
@@ -1,7 +0,0 @@
|
||||
# ------------------- Kubescape User/Customer ID ------------------- #
|
||||
kind: Namespace
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: kubescape
|
||||
labels:
|
||||
app: kubescape
|
||||
@@ -1,61 +0,0 @@
|
||||
---
|
||||
# ------------------- Kubescape Service Account ------------------- #
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
labels:
|
||||
app: kubescape
|
||||
name: kubescape-discovery
|
||||
namespace: kubescape
|
||||
|
||||
---
|
||||
# ------------------- Kubescape Role & Role Binding ------------------- #
|
||||
kind: Role
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: kubescape-discovery-role
|
||||
namespace: kubescape
|
||||
rules:
|
||||
- apiGroups: ["*"]
|
||||
resources: ["*"]
|
||||
verbs: ["get", "list", "describe"]
|
||||
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: kubescape-discovery-binding
|
||||
namespace: kubescape
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: kubescape-discovery-role
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: kubescape-discovery
|
||||
|
||||
---
|
||||
# ------------------- Kubescape Cluster Role & Cluster Role Binding ------------------- #
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: kubescape-discovery-clusterroles
|
||||
# "namespace" omitted since ClusterRoles are not namespaced
|
||||
rules:
|
||||
- apiGroups: ["*"]
|
||||
resources: ["*"]
|
||||
verbs: ["get", "list", "describe"]
|
||||
|
||||
---
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: kubescape-discovery-role-binding
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: kubescape-discovery-clusterroles
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: kubescape-discovery
|
||||
namespace: kubescape
|
||||
Binary file not shown.
|
Before Width: | Height: | Size: 41 KiB |
Binary file not shown.
|
Before Width: | Height: | Size: 120 KiB |
@@ -1,130 +0,0 @@
|
||||
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
|
||||
#
|
||||
# This file is DEPRECATE, please navigate to the official docs ->
|
||||
# https://github.com/armosec/kubescape/tree/master/examples/cronJob-support/README.md
|
||||
#
|
||||
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
|
||||
|
||||
---
|
||||
# ------------------- Kubescape Service Account ------------------- #
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
labels:
|
||||
app: kubescape
|
||||
name: kubescape-discovery
|
||||
namespace: kubescape
|
||||
|
||||
---
|
||||
# ------------------- Kubescape Role & Role Binding ------------------- #
|
||||
kind: Role
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: kubescape-discovery-role
|
||||
namespace: kubescape
|
||||
rules:
|
||||
- apiGroups: ["*"]
|
||||
resources: ["*"]
|
||||
verbs: ["get", "list", "describe"]
|
||||
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: kubescape-discovery-binding
|
||||
namespace: kubescape
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: kubescape-discovery-role
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: kubescape-discovery
|
||||
|
||||
---
|
||||
# ------------------- Kubescape Cluster Role & Cluster Role Binding ------------------- #
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: kubescape-discovery-clusterroles
|
||||
# "namespace" omitted since ClusterRoles are not namespaced
|
||||
rules:
|
||||
- apiGroups: ["*"]
|
||||
resources: ["*"]
|
||||
verbs: ["get", "list", "describe"]
|
||||
|
||||
---
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: kubescape-discovery-role-binding
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: kubescape-discovery-clusterroles
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: kubescape-discovery
|
||||
namespace: kubescape
|
||||
|
||||
---
|
||||
# ------------------- Kubescape User/Customer GUID ------------------- #
|
||||
kind: ConfigMap
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: kubescape-configmap
|
||||
labels:
|
||||
app: kubescape
|
||||
namespace: kubescape
|
||||
data:
|
||||
config.json: |
|
||||
{
|
||||
"customerGUID": <MyGUID>,
|
||||
"clusterName": <MyK8sClusterName>
|
||||
}
|
||||
|
||||
---
|
||||
apiVersion: batch/v1
|
||||
kind: CronJob
|
||||
metadata:
|
||||
name: kubescape
|
||||
labels:
|
||||
app: kubescape
|
||||
namespace: kubescape
|
||||
spec:
|
||||
# ┌────────────────── timezone (optional)
|
||||
# | ┌───────────── minute (0 - 59)
|
||||
# | │ ┌───────────── hour (0 - 23)
|
||||
# | │ │ ┌───────────── day of the month (1 - 31)
|
||||
# | │ │ │ ┌───────────── month (1 - 12)
|
||||
# | │ │ │ │ ┌───────────── day of the week (0 - 6) (Sunday to Saturday;
|
||||
# | │ │ │ │ │ 7 is also Sunday on some systems)
|
||||
# | │ │ │ │ │
|
||||
# | │ │ │ │ │
|
||||
# CRON_TZ=UTC * * * * *
|
||||
schedule: "0 0 1 * *"
|
||||
jobTemplate:
|
||||
spec:
|
||||
template:
|
||||
spec:
|
||||
containers:
|
||||
- name: kubescape
|
||||
image: quay.io/armosec/kubescape:latest
|
||||
imagePullPolicy: IfNotPresent
|
||||
command: ["/bin/sh","-c"]
|
||||
args:
|
||||
- kubescape scan framework nsa --submit
|
||||
volumeMounts:
|
||||
- name: kubescape-config-volume
|
||||
mountPath: /root/.kubescape/config.json
|
||||
subPath: config.json
|
||||
restartPolicy: OnFailure
|
||||
serviceAccountName: kubescape-discovery
|
||||
volumes:
|
||||
- name: kubescape-config-volume
|
||||
configMap:
|
||||
name: kubescape-configmap
|
||||
|
||||
---
|
||||
|
||||
|
||||
@@ -1,8 +1,7 @@
|
||||
# kubescape
|
||||
# Helm chart - DEPRECATED
|
||||
|
||||
  
|
||||
[helm chart repo](https://github.com/armosec/armo-helm)
|
||||
|
||||
Kubescape is the first open-source tool for testing if Kubernetes is deployed securely according to multiple frameworks regulatory, customized company policies and DevSecOps best practices, such as the [NSA-CISA](https://www.armosec.io/blog/kubernetes-hardening-guidance-summary-by-armo) and the [MITRE ATT&CK®](https://www.microsoft.com/security/blog/2021/03/23/secure-containerized-environments-with-updated-threat-matrix-for-kubernetes/) . Kubescape scans K8s clusters, YAML files, and HELM charts, and detect misconfigurations and software vulnerabilities at early stages of the CI/CD pipeline and provides a risk score instantly and risk trends over time. Kubescape integrates natively with other DevOps tools, including Jenkins, CircleCI and Github workflows.
|
||||
|
||||
## Values
|
||||
|
||||
|
||||
2
go.mod
2
go.mod
@@ -5,7 +5,7 @@ go 1.17
|
||||
require (
|
||||
github.com/armosec/armoapi-go v0.0.49
|
||||
github.com/armosec/k8s-interface v0.0.60
|
||||
github.com/armosec/opa-utils v0.0.107
|
||||
github.com/armosec/opa-utils v0.0.110
|
||||
github.com/armosec/rbac-utils v0.0.14
|
||||
github.com/armosec/utils-go v0.0.3
|
||||
github.com/armosec/utils-k8s-go v0.0.1
|
||||
|
||||
4
go.sum
4
go.sum
@@ -93,8 +93,8 @@ github.com/armosec/k8s-interface v0.0.50/go.mod h1:vHxGWqD/uh6+GQb9Sqv7OGMs+Rvc2
|
||||
github.com/armosec/k8s-interface v0.0.60 h1:jTCiO15QQbHVuxFQ928rp4srf1rQoUzeybfcbv/cuss=
|
||||
github.com/armosec/k8s-interface v0.0.60/go.mod h1:g0jv/fG+VqpT5ivO6D2gJcJ/w68BiffDz+PcU9YFbL4=
|
||||
github.com/armosec/opa-utils v0.0.64/go.mod h1:6tQP8UDq2EvEfSqh8vrUdr/9QVSCG4sJfju1SXQOn4c=
|
||||
github.com/armosec/opa-utils v0.0.107 h1:P+SACquUDMbXcOYIbQ+uzwcdJlrguXOTI42PHEJG2NU=
|
||||
github.com/armosec/opa-utils v0.0.107/go.mod h1:Wc1P4gkB6UQeGW8I76zCuitGGl15Omp0bKw7N0tR9dk=
|
||||
github.com/armosec/opa-utils v0.0.110 h1:qncGcbnYjiGULP3yK+4geRNNpRoWqKXQL+Xg+iXc1cM=
|
||||
github.com/armosec/opa-utils v0.0.110/go.mod h1:Wc1P4gkB6UQeGW8I76zCuitGGl15Omp0bKw7N0tR9dk=
|
||||
github.com/armosec/rbac-utils v0.0.1/go.mod h1:pQ8CBiij8kSKV7aeZm9FMvtZN28VgA7LZcYyTWimq40=
|
||||
github.com/armosec/rbac-utils v0.0.14 h1:CKYKcgqJEXWF2Hen/B1pVGtS3nDAG1wp9dDv6oNtq90=
|
||||
github.com/armosec/rbac-utils v0.0.14/go.mod h1:Ex/IdGWhGv9HZq6Hs8N/ApzCKSIvpNe/ETqDfnuyah0=
|
||||
|
||||
@@ -40,9 +40,10 @@ spec:
|
||||
- name: http
|
||||
hostPort: 7888
|
||||
containerPort: 7888
|
||||
protocol: TCP
|
||||
resources:
|
||||
limits:
|
||||
cpu: 1m
|
||||
cpu: 0.1m
|
||||
memory: 200Mi
|
||||
requests:
|
||||
cpu: 1m
|
||||
@@ -50,6 +51,12 @@ spec:
|
||||
volumeMounts:
|
||||
- mountPath: /host_fs
|
||||
name: host-filesystem
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /kernelVersion
|
||||
port: 7888
|
||||
initialDelaySeconds: 1
|
||||
periodSeconds: 1
|
||||
terminationGracePeriodSeconds: 120
|
||||
dnsPolicy: ClusterFirstWithHostNet
|
||||
automountServiceAccountToken: false
|
||||
|
||||
@@ -27,13 +27,14 @@ var (
|
||||
)
|
||||
|
||||
type HostSensorHandler struct {
|
||||
HostSensorPort int32
|
||||
HostSensorPodNames map[string]string //map from pod names to node names
|
||||
IsReady <-chan bool //readonly chan
|
||||
k8sObj *k8sinterface.KubernetesApi
|
||||
DaemonSet *appsv1.DaemonSet
|
||||
podListLock sync.RWMutex
|
||||
gracePeriod int64
|
||||
HostSensorPort int32
|
||||
HostSensorPodNames map[string]string //map from pod names to node names
|
||||
HostSensorUnshedulePodNames map[string]string //map from pod names to node names
|
||||
IsReady <-chan bool //readonly chan
|
||||
k8sObj *k8sinterface.KubernetesApi
|
||||
DaemonSet *appsv1.DaemonSet
|
||||
podListLock sync.RWMutex
|
||||
gracePeriod int64
|
||||
}
|
||||
|
||||
func NewHostSensorHandler(k8sObj *k8sinterface.KubernetesApi) (*HostSensorHandler, error) {
|
||||
@@ -42,9 +43,10 @@ func NewHostSensorHandler(k8sObj *k8sinterface.KubernetesApi) (*HostSensorHandle
|
||||
return nil, fmt.Errorf("nil k8s interface received")
|
||||
}
|
||||
hsh := &HostSensorHandler{
|
||||
k8sObj: k8sObj,
|
||||
HostSensorPodNames: map[string]string{},
|
||||
gracePeriod: int64(15),
|
||||
k8sObj: k8sObj,
|
||||
HostSensorPodNames: map[string]string{},
|
||||
HostSensorUnshedulePodNames: map[string]string{},
|
||||
gracePeriod: int64(15),
|
||||
}
|
||||
// Don't deploy on cluster with no nodes. Some cloud providers prevents termination of K8s objects for cluster with no nodes!!!
|
||||
if nodeList, err := k8sObj.KubernetesClient.CoreV1().Nodes().List(k8sObj.Context, metav1.ListOptions{}); err != nil || len(nodeList.Items) == 0 {
|
||||
@@ -140,12 +142,17 @@ func (hsh *HostSensorHandler) checkPodForEachNode() error {
|
||||
}
|
||||
hsh.podListLock.RLock()
|
||||
podsNum := len(hsh.HostSensorPodNames)
|
||||
unschedPodNum := len(hsh.HostSensorUnshedulePodNames)
|
||||
hsh.podListLock.RUnlock()
|
||||
if len(nodesList.Items) == podsNum {
|
||||
if len(nodesList.Items) <= podsNum+unschedPodNum {
|
||||
break
|
||||
}
|
||||
if time.Now().After(deadline) {
|
||||
return fmt.Errorf("host-sensor pods number (%d) differ than nodes number (%d) after deadline exceded", podsNum, len(nodesList.Items))
|
||||
hsh.podListLock.RLock()
|
||||
podsMap := hsh.HostSensorPodNames
|
||||
hsh.podListLock.RUnlock()
|
||||
return fmt.Errorf("host-sensor pods number (%d) differ than nodes number (%d) after deadline exceded. We will take data only from the pods below: %v",
|
||||
podsNum, len(nodesList.Items), podsMap)
|
||||
}
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
}
|
||||
@@ -156,12 +163,17 @@ func (hsh *HostSensorHandler) checkPodForEachNode() error {
|
||||
func (hsh *HostSensorHandler) populatePodNamesToNodeNames() {
|
||||
|
||||
go func() {
|
||||
watchRes, err := hsh.k8sObj.KubernetesClient.CoreV1().Pods(hsh.DaemonSet.Namespace).Watch(hsh.k8sObj.Context, metav1.ListOptions{
|
||||
var watchRes watch.Interface
|
||||
var err error
|
||||
watchRes, err = hsh.k8sObj.KubernetesClient.CoreV1().Pods(hsh.DaemonSet.Namespace).Watch(hsh.k8sObj.Context, metav1.ListOptions{
|
||||
Watch: true,
|
||||
LabelSelector: fmt.Sprintf("name=%s", hsh.DaemonSet.Spec.Template.Labels["name"]),
|
||||
})
|
||||
if err != nil {
|
||||
fmt.Printf("Failed to watch over daemonset pods")
|
||||
logger.L().Error("failed to watch over daemonset pods - are we missing watch pods permissions?", helpers.Error(err))
|
||||
}
|
||||
if watchRes == nil {
|
||||
return
|
||||
}
|
||||
for eve := range watchRes.ResultChan() {
|
||||
pod, ok := eve.Object.(*corev1.Pod)
|
||||
@@ -179,10 +191,31 @@ func (hsh *HostSensorHandler) updatePodInListAtomic(eventType watch.EventType, p
|
||||
|
||||
switch eventType {
|
||||
case watch.Added, watch.Modified:
|
||||
if podObj.Status.Phase == corev1.PodRunning {
|
||||
if podObj.Status.Phase == corev1.PodRunning && len(podObj.Status.ContainerStatuses) > 0 &&
|
||||
podObj.Status.ContainerStatuses[0].Ready {
|
||||
hsh.HostSensorPodNames[podObj.ObjectMeta.Name] = podObj.Spec.NodeName
|
||||
delete(hsh.HostSensorUnshedulePodNames, podObj.ObjectMeta.Name)
|
||||
} else {
|
||||
delete(hsh.HostSensorPodNames, podObj.ObjectMeta.Name)
|
||||
if podObj.Status.Phase == corev1.PodPending && len(podObj.Status.Conditions) > 0 &&
|
||||
podObj.Status.Conditions[0].Reason == corev1.PodReasonUnschedulable {
|
||||
nodeName := ""
|
||||
if podObj.Spec.Affinity != nil && podObj.Spec.Affinity.NodeAffinity != nil &&
|
||||
podObj.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution != nil &&
|
||||
len(podObj.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms) > 0 &&
|
||||
len(podObj.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms[0].MatchFields) > 0 &&
|
||||
len(podObj.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms[0].MatchFields[0].Values) > 0 {
|
||||
nodeName = podObj.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms[0].MatchFields[0].Values[0]
|
||||
}
|
||||
logger.L().Warning("One host-sensor pod is unable to schedule on node. We will fail to collect the data from this node",
|
||||
helpers.String("message", podObj.Status.Conditions[0].Message),
|
||||
helpers.String("nodeName", nodeName),
|
||||
helpers.String("podName", podObj.ObjectMeta.Name))
|
||||
if nodeName != "" {
|
||||
hsh.HostSensorUnshedulePodNames[podObj.ObjectMeta.Name] = nodeName
|
||||
}
|
||||
} else {
|
||||
delete(hsh.HostSensorPodNames, podObj.ObjectMeta.Name)
|
||||
}
|
||||
}
|
||||
default:
|
||||
delete(hsh.HostSensorPodNames, podObj.ObjectMeta.Name)
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
"github.com/armosec/k8s-interface/k8sinterface"
|
||||
"github.com/armosec/kubescape/cautils"
|
||||
"github.com/armosec/kubescape/cautils/logger"
|
||||
"github.com/armosec/kubescape/cautils/logger/helpers"
|
||||
"github.com/armosec/opa-utils/objectsenvelopes/hostsensor"
|
||||
"sigs.k8s.io/yaml"
|
||||
)
|
||||
@@ -72,7 +73,7 @@ func (hsh *HostSensorHandler) sendAllPodsHTTPGETRequest(path, requestKind string
|
||||
defer wg.Done()
|
||||
resBytes, err := hsh.HTTPGetToPod(podName, path)
|
||||
if err != nil {
|
||||
fmt.Printf("In sendAllPodsHTTPGETRequest failed to get data '%s' from pod '%s': %v", path, podName, err)
|
||||
logger.L().Error("failed to get data", helpers.String("path", path), helpers.String("podName", podName), helpers.Error(err))
|
||||
} else {
|
||||
resLock.Lock()
|
||||
defer resLock.Unlock()
|
||||
@@ -142,7 +143,7 @@ func (hsh *HostSensorHandler) GetKubeletConfigurations() ([]hostsensor.HostSenso
|
||||
for resIdx := range res {
|
||||
jsonBytes, err := yaml.YAMLToJSON(res[resIdx].Data)
|
||||
if err != nil {
|
||||
fmt.Printf("In GetKubeletConfigurations failed to YAMLToJSON: %v;\n%v", err, res[resIdx])
|
||||
logger.L().Error("failed to convert kubelet configurations from yaml to json", helpers.Error(err))
|
||||
continue
|
||||
}
|
||||
res[resIdx].SetData(jsonBytes)
|
||||
@@ -156,7 +157,7 @@ func (hsh *HostSensorHandler) CollectResources() ([]hostsensor.HostSensorDataEnv
|
||||
return res, nil
|
||||
}
|
||||
|
||||
logger.L().Info("Accessing host sensor")
|
||||
logger.L().Debug("Accessing host sensor")
|
||||
cautils.StartSpinner()
|
||||
defer cautils.StopSpinner()
|
||||
kcData, err := hsh.GetKubeletConfigurations()
|
||||
@@ -196,6 +197,6 @@ func (hsh *HostSensorHandler) CollectResources() ([]hostsensor.HostSensorDataEnv
|
||||
res = append(res, kcData...)
|
||||
// finish
|
||||
|
||||
logger.L().Success("Read host information from host sensor")
|
||||
logger.L().Debug("Done reading information from host sensor")
|
||||
return res, nil
|
||||
}
|
||||
|
||||
@@ -54,6 +54,6 @@ echo -e "\033[0m"
|
||||
$KUBESCAPE_EXEC version
|
||||
echo
|
||||
|
||||
echo -e "\033[35mUsage: $ $KUBESCAPE_EXEC scan --submit"
|
||||
echo -e "\033[35mUsage: $ $KUBESCAPE_EXEC scan --submit --enable-host-scan"
|
||||
|
||||
echo -e "\033[0m"
|
||||
|
||||
@@ -58,15 +58,18 @@ func NewEKSProviderContext() *EKSProviderContext {
|
||||
}
|
||||
|
||||
func (eksProviderContext *EKSProviderContext) getKubeClusterName() string {
|
||||
cluster := k8sinterface.GetCurrentContext().Cluster
|
||||
var splittedCluster []string
|
||||
context := k8sinterface.GetCurrentContext()
|
||||
if context == nil {
|
||||
return ""
|
||||
}
|
||||
cluster := context.Cluster
|
||||
if cluster != "" {
|
||||
splittedCluster = strings.Split(cluster, ".")
|
||||
splittedCluster := strings.Split(cluster, ".")
|
||||
if len(splittedCluster) > 1 {
|
||||
return splittedCluster[0]
|
||||
}
|
||||
}
|
||||
splittedCluster = strings.Split(k8sinterface.GetClusterName(), ".")
|
||||
splittedCluster := strings.Split(k8sinterface.GetClusterName(), ".")
|
||||
if len(splittedCluster) > 1 {
|
||||
return splittedCluster[0]
|
||||
}
|
||||
@@ -78,9 +81,8 @@ func (eksProviderContext *EKSProviderContext) getKubeCluster() string {
|
||||
if context == nil {
|
||||
return ""
|
||||
}
|
||||
cluster := context.Cluster
|
||||
if cluster != "" {
|
||||
return cluster
|
||||
if context.Cluster != "" {
|
||||
return context.Cluster
|
||||
}
|
||||
return k8sinterface.GetClusterName()
|
||||
}
|
||||
|
||||
@@ -89,8 +89,7 @@ func (gkeProviderContext *GKEProviderContext) getKubeClusterName() string {
|
||||
if len(parsedName) < 3 {
|
||||
return ""
|
||||
}
|
||||
clusterName = parsedName[3]
|
||||
return clusterName
|
||||
return parsedName[3]
|
||||
}
|
||||
|
||||
func (gkeProviderContext *GKEProviderContext) getKubeCluster() string {
|
||||
@@ -98,9 +97,8 @@ func (gkeProviderContext *GKEProviderContext) getKubeCluster() string {
|
||||
if context == nil {
|
||||
return ""
|
||||
}
|
||||
cluster := context.Cluster
|
||||
if cluster != "" {
|
||||
return cluster
|
||||
if context.Cluster != "" {
|
||||
return context.Cluster
|
||||
}
|
||||
return k8sinterface.GetClusterName()
|
||||
|
||||
|
||||
@@ -80,7 +80,7 @@ func (k8sHandler *K8sResourceHandler) GetResources(frameworks []reporthandling.F
|
||||
}
|
||||
|
||||
cautils.StopSpinner()
|
||||
logger.L().Success("Accessed successfully to Kubernetes objects")
|
||||
logger.L().Success("Accessed to Kubernetes objects")
|
||||
|
||||
return k8sResourcesMap, allResources, nil
|
||||
}
|
||||
@@ -168,6 +168,18 @@ func ConvertMapListToMeta(resourceMap []map[string]interface{}) []workloadinterf
|
||||
return workloads
|
||||
}
|
||||
|
||||
// func (k8sHandler *K8sResourceHandler) collectHostResourcesAPI(allResources map[string]workloadinterface.IMetadata, resourcesMap *cautils.K8SResources) error {
|
||||
|
||||
// HostSensorAPI := map[string]string{
|
||||
// "bla/v1": "",
|
||||
// }
|
||||
// for apiVersion := range allResources {
|
||||
// if HostSensorAPI == apiVersion {
|
||||
// k8sHandler.collectHostResources()
|
||||
// }
|
||||
// }
|
||||
// return nil
|
||||
// }
|
||||
func (k8sHandler *K8sResourceHandler) collectHostResources(allResources map[string]workloadinterface.IMetadata, resourcesMap *cautils.K8SResources) error {
|
||||
logger.L().Debug("Collecting host sensor resources")
|
||||
|
||||
@@ -175,6 +187,7 @@ func (k8sHandler *K8sResourceHandler) collectHostResources(allResources map[stri
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for rscIdx := range hostResources {
|
||||
group, version := getGroupNVersion(hostResources[rscIdx].GetApiVersion())
|
||||
groupResource := k8sinterface.JoinResourceTriplets(group, version, hostResources[rscIdx].GetKind())
|
||||
@@ -220,9 +233,10 @@ func getCloudProviderDescription(allResources map[string]workloadinterface.IMeta
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
logger.L().Debug("cloud", helpers.String("cluster", cluster), helpers.String("clusterName", clusterName), helpers.String("provider", provider), helpers.String("region", region), helpers.String("project", project))
|
||||
|
||||
if provider != "" {
|
||||
logger.L().Debug("cloud", helpers.String("cluster", cluster), helpers.String("clusterName", clusterName), helpers.String("provider", provider), helpers.String("region", region), helpers.String("project", project))
|
||||
|
||||
wl, err := cloudsupport.GetDescriptiveInfoFromCloudProvider(clusterName, provider, region, project)
|
||||
if err != nil {
|
||||
// Return error with useful info on how to configure credentials for getting cloud provider info
|
||||
|
||||
@@ -127,7 +127,6 @@ func (g *GitHubRepository) setTree() error {
|
||||
err = json.Unmarshal([]byte(body), &tree)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to unmarshal response body from '%s', reason: %s", g.treeAPI(), err.Error())
|
||||
// fmt.Printf("failed to unmarshal response body from '%s', reason: %s", urlCommand, err.Error())
|
||||
// return nil
|
||||
}
|
||||
g.tree = tree
|
||||
|
||||
@@ -28,11 +28,11 @@ func listUrls(patterns []string) []string {
|
||||
urls := []string{}
|
||||
for i := range patterns {
|
||||
if strings.HasPrefix(patterns[i], "http") {
|
||||
if !isYaml(patterns[i]) || !isJson(patterns[i]) { // if url of repo
|
||||
if !isYaml(patterns[i]) && !isJson(patterns[i]) { // if url of repo
|
||||
if yamls, err := ScanRepository(patterns[i], ""); err == nil { // TODO - support branch
|
||||
urls = append(urls, yamls...)
|
||||
} else {
|
||||
fmt.Print(err) // TODO - handle errors
|
||||
logger.L().Error(err.Error())
|
||||
}
|
||||
} else { // url of single file
|
||||
urls = append(urls, patterns[i])
|
||||
|
||||
@@ -25,6 +25,6 @@ func (reportMock *ReportMock) SetClusterName(clusterName string) {
|
||||
}
|
||||
|
||||
func (reportMock *ReportMock) DisplayReportURL() {
|
||||
message := fmt.Sprintf("\nYou can see the results in a user-friendly UI, choose your preferred compliance framework, check risk results history and trends, manage exceptions, get remediation recommendations and much more by registering here: https://%s/cli-signup \n", getter.GetArmoAPIConnector().GetFrontendURL())
|
||||
message := fmt.Sprintf("\nScan results have not been submitted.\nYou can see the results in a user-friendly UI, choose your preferred compliance framework, check risk results history and trends, manage exceptions, get remediation recommendations and much more by registering here: https://%s/cli-signup \n", getter.GetArmoAPIConnector().GetFrontendURL())
|
||||
cautils.InfoTextDisplay(os.Stderr, fmt.Sprintf("\n%s\n", message))
|
||||
}
|
||||
|
||||
@@ -44,11 +44,11 @@ func (report *ReportEventReceiver) ActionSendReport(opaSessionObj *cautils.OPASe
|
||||
}
|
||||
|
||||
if report.customerGUID == "" {
|
||||
report.message = "WARNING: Failed to publish results. Reason: Unknown accout ID. Run kubescape with the '--account <account ID>' flag. Contact ARMO team for more details"
|
||||
report.message = "WARNING: Failed to publish results. Reason: Unknown accout ID. Run kubescape with the '--account <account ID>' flag. Please feel free to contact ARMO team for more details"
|
||||
return nil
|
||||
}
|
||||
if report.clusterName == "" {
|
||||
report.message = "WARNING: Failed to publish results because the cluster name is Unknown. If you are scanning YAML files the results are not submitted to the Kubescape SaaS"
|
||||
report.message = "WARNING: Failed to publish results because the cluster name is Unknown. If you are scanning YAML files the results are not submitted to the Kubescape SaaS.Please feel free to contact ARMO team for more details"
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -144,7 +144,7 @@ func (report *ReportEventReceiver) generateMessage() {
|
||||
|
||||
if report.customerAdminEMail != "" {
|
||||
logger.L().Debug("", helpers.String("account ID", report.customerGUID))
|
||||
report.message = fmt.Sprintf("%s %s/risk/%s", message, u.String(), report.clusterName)
|
||||
report.message = fmt.Sprintf("%s %s/configuration-scanning/%s", message, u.String(), report.clusterName)
|
||||
return
|
||||
}
|
||||
u.Path = "account/sign-up"
|
||||
|
||||
@@ -60,6 +60,8 @@ func (report *ReportEventReceiver) ActionSendReport(opaSessionObj *cautils.OPASe
|
||||
} else {
|
||||
report.generateMessage()
|
||||
}
|
||||
logger.L().Debug("", helpers.String("account ID", report.customerGUID))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -202,24 +204,28 @@ func (report *ReportEventReceiver) sendReport(host string, postureReport *report
|
||||
}
|
||||
|
||||
func (report *ReportEventReceiver) generateMessage() {
|
||||
message := "You can see the results in a user-friendly UI, choose your preferred compliance framework, check risk results history and trends, manage exceptions, get remediation recommendations and much more by registering here:"
|
||||
|
||||
u := url.URL{}
|
||||
u.Scheme = "https"
|
||||
u.Host = getter.GetArmoAPIConnector().GetFrontendURL()
|
||||
|
||||
if report.customerAdminEMail != "" {
|
||||
logger.L().Debug("", helpers.String("account ID", report.customerGUID))
|
||||
report.message = fmt.Sprintf("%s %s/risk/%s", message, u.String(), report.clusterName)
|
||||
return
|
||||
}
|
||||
u.Path = "account/sign-up"
|
||||
q := u.Query()
|
||||
q.Add("invitationToken", report.token)
|
||||
q.Add("customerGUID", report.customerGUID)
|
||||
if report.customerAdminEMail != "" { // data has been submitted
|
||||
u.Path = fmt.Sprintf("configuration-scanning/%s", report.clusterName)
|
||||
} else {
|
||||
u.Path = "account/sign-up"
|
||||
q := u.Query()
|
||||
q.Add("invitationToken", report.token)
|
||||
q.Add("customerGUID", report.customerGUID)
|
||||
|
||||
u.RawQuery = q.Encode()
|
||||
}
|
||||
|
||||
sep := "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n"
|
||||
report.message = sep
|
||||
report.message += " << WOW! Now you can see the scan results on the web >>\n\n"
|
||||
report.message += fmt.Sprintf(" %s\n", u.String())
|
||||
report.message += sep
|
||||
|
||||
u.RawQuery = q.Encode()
|
||||
report.message = fmt.Sprintf("%s %s", message, u.String())
|
||||
}
|
||||
|
||||
func (report *ReportEventReceiver) DisplayReportURL() {
|
||||
|
||||
Reference in New Issue
Block a user