mirror of
https://github.com/kubescape/kubescape.git
synced 2026-02-14 18:09:55 +00:00
Compare commits
168 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
914a04a386 | ||
|
|
12f3dd7db6 | ||
|
|
427032ab94 | ||
|
|
b55aaaa34d | ||
|
|
7cde877452 | ||
|
|
e399012f73 | ||
|
|
fe1d2646bd | ||
|
|
ea98bfbe9a | ||
|
|
7bc3277634 | ||
|
|
22e94c5a29 | ||
|
|
aa8cf0ff15 | ||
|
|
a22f97bd13 | ||
|
|
3fd2d1629d | ||
|
|
cd04204a5c | ||
|
|
eee55376e7 | ||
|
|
d3c0972d70 | ||
|
|
e3f5fa8e35 | ||
|
|
03c540b68c | ||
|
|
7f2f53b06c | ||
|
|
8064826b53 | ||
|
|
8bdff31693 | ||
|
|
6f05b4137b | ||
|
|
4207f3d6d1 | ||
|
|
98dbda696d | ||
|
|
7a34c94542 | ||
|
|
789b93776d | ||
|
|
c93ee64630 | ||
|
|
f54c3ad85c | ||
|
|
a9fcd00723 | ||
|
|
fb7cc4284e | ||
|
|
e7a0755c25 | ||
|
|
d1e02dc298 | ||
|
|
69814039ca | ||
|
|
2ffb7fcdb4 | ||
|
|
d1695b7f10 | ||
|
|
5f0f9a9eae | ||
|
|
aa2dedb76f | ||
|
|
407e35c9d8 | ||
|
|
bb7f38ce31 | ||
|
|
1ffb2d360a | ||
|
|
5cbadc02c5 | ||
|
|
9aa8d9edf0 | ||
|
|
db84380844 | ||
|
|
bbb0d2154f | ||
|
|
2a937ac7c0 | ||
|
|
a96652094e | ||
|
|
dbf3de57f6 | ||
|
|
c00bc0ebbb | ||
|
|
b08e5a2c32 | ||
|
|
09db5d94e1 | ||
|
|
033e8f6b44 | ||
|
|
bef40f0e6c | ||
|
|
aa2f69125f | ||
|
|
d30f3960a7 | ||
|
|
5f43da94ba | ||
|
|
2aa8a0c935 | ||
|
|
c02f8c6cb5 | ||
|
|
aa0be474e2 | ||
|
|
c0161c9b33 | ||
|
|
71404f2205 | ||
|
|
514da1e2db | ||
|
|
75dfceb5da | ||
|
|
1ae76b4377 | ||
|
|
b6f90cba8e | ||
|
|
62af441a1d | ||
|
|
228b8957d3 | ||
|
|
b4ce999ab3 | ||
|
|
cc06a414fe | ||
|
|
d3c37c4e5f | ||
|
|
3b448b62b1 | ||
|
|
6a3f5658b1 | ||
|
|
f65e791522 | ||
|
|
d91304f9ad | ||
|
|
61ce00108e | ||
|
|
a4eb773eee | ||
|
|
cfc69f5a0f | ||
|
|
a44823c3ed | ||
|
|
8a166e5ba5 | ||
|
|
9a7aeff870 | ||
|
|
cb3bdb9df2 | ||
|
|
0be8d57eaa | ||
|
|
79b9cbf1d6 | ||
|
|
500df8737e | ||
|
|
b8acbd1bee | ||
|
|
0bde8a65ba | ||
|
|
d2884b8936 | ||
|
|
e692359b47 | ||
|
|
473746eab0 | ||
|
|
050878cbd6 | ||
|
|
e100f18bb0 | ||
|
|
05c82fc166 | ||
|
|
839c3e261f | ||
|
|
95b579d191 | ||
|
|
8656715753 | ||
|
|
05b6394c5c | ||
|
|
72860deb0f | ||
|
|
d3bdbf31ac | ||
|
|
639c694c13 | ||
|
|
f34f6dc51e | ||
|
|
b93e7b9abf | ||
|
|
995f615b10 | ||
|
|
39b95eff4f | ||
|
|
392625b774 | ||
|
|
306b9d28ca | ||
|
|
6fe87bba20 | ||
|
|
c0d534072d | ||
|
|
009221aa98 | ||
|
|
46e5aff5f9 | ||
|
|
59498361e7 | ||
|
|
c652da130d | ||
|
|
83246a1802 | ||
|
|
f255df0198 | ||
|
|
9e524ffc34 | ||
|
|
004cc0c469 | ||
|
|
52b78a7e73 | ||
|
|
bd089d76af | ||
|
|
d5025b54bf | ||
|
|
740497047d | ||
|
|
3f6cbd57b2 | ||
|
|
2c9524ed45 | ||
|
|
384922680a | ||
|
|
d2e9f8f4f8 | ||
|
|
b4f10f854e | ||
|
|
8ce64d2a7f | ||
|
|
d917e21364 | ||
|
|
32cedaf565 | ||
|
|
4c2a5e9a11 | ||
|
|
a41d2a46ff | ||
|
|
4794cbfb36 | ||
|
|
d021217cf7 | ||
|
|
4573d83831 | ||
|
|
2bb612ca3f | ||
|
|
35534112c6 | ||
|
|
f51e531f3a | ||
|
|
2490856ccb | ||
|
|
9a5a87b027 | ||
|
|
45b8c89865 | ||
|
|
e68e6dcd3d | ||
|
|
670ff4a15d | ||
|
|
b616a37800 | ||
|
|
ce488a3645 | ||
|
|
fb47a9c742 | ||
|
|
80ace81a12 | ||
|
|
1efdae5197 | ||
|
|
a4c88edfca | ||
|
|
8f38c2f627 | ||
|
|
bbf68d4ce8 | ||
|
|
e1eec47a22 | ||
|
|
27e2c044da | ||
|
|
1213e8d6ac | ||
|
|
3f58d68d2a | ||
|
|
fde437312f | ||
|
|
b7842f98f0 | ||
|
|
1b2514e3ec | ||
|
|
0da4f40b48 | ||
|
|
5591bf09d9 | ||
|
|
da94651656 | ||
|
|
86b6a1d88a | ||
|
|
f903e13d7b | ||
|
|
015206a760 | ||
|
|
0aff119260 | ||
|
|
ddb8608501 | ||
|
|
0d75a273f0 | ||
|
|
4f07d23dd6 | ||
|
|
79baa0d66e | ||
|
|
d5ca49ef9b | ||
|
|
536d7fb3c5 | ||
|
|
f66fd1f38c |
57
.github/workflows/build.yaml
vendored
57
.github/workflows/build.yaml
vendored
@@ -2,7 +2,7 @@ name: build
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ master ]
|
||||
branches: [ master ]
|
||||
jobs:
|
||||
once:
|
||||
name: Create release
|
||||
@@ -16,8 +16,8 @@ jobs:
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
tag_name: v1.0.${{ github.run_number }}
|
||||
release_name: Release v1.0.${{ github.run_number }}
|
||||
tag_name: v2.0.${{ github.run_number }}
|
||||
release_name: Release v2.0.${{ github.run_number }}
|
||||
draft: false
|
||||
prerelease: false
|
||||
build:
|
||||
@@ -39,8 +39,9 @@ jobs:
|
||||
|
||||
- name: Build
|
||||
env:
|
||||
RELEASE: v1.0.${{ github.run_number }}
|
||||
RELEASE: v2.0.${{ github.run_number }}
|
||||
ArmoBEServer: api.armo.cloud
|
||||
ArmoAuthServer: eggauth.armo.cloud
|
||||
ArmoERServer: report.armo.cloud
|
||||
ArmoWebsite: portal.armo.cloud
|
||||
CGO_ENABLED: 0
|
||||
@@ -48,7 +49,7 @@ jobs:
|
||||
|
||||
- name: Smoke Testing
|
||||
env:
|
||||
RELEASE: v1.0.${{ github.run_number }}
|
||||
RELEASE: v2.0.${{ github.run_number }}
|
||||
KUBESCAPE_SKIP_UPDATE_CHECK: "true"
|
||||
run: python3 smoke_testing/init.py ${PWD}/build/${{ matrix.os }}/kubescape
|
||||
|
||||
@@ -67,28 +68,54 @@ jobs:
|
||||
build-docker:
|
||||
name: Build docker container, tag and upload to registry
|
||||
needs: build
|
||||
if: ${{ github.repository == 'armosec/kubescape' }}
|
||||
runs-on: ubuntu-latest
|
||||
if: ${{ github.repository == 'armosec/kubescape' }} # TODO
|
||||
permissions:
|
||||
id-token: write
|
||||
packages: write
|
||||
contents: read
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
|
||||
- name: Set name
|
||||
run: echo quay.io/armosec/kubescape:v1.0.${{ github.run_number }} > build_tag.txt
|
||||
- name: Set image version
|
||||
id: image-version
|
||||
run: echo '::set-output version=IMAGE_VERSION::v2.0.${{ github.run_number }}'
|
||||
|
||||
- name: Set image name
|
||||
id: image-name
|
||||
run: echo '::set-output name=IMAGE_NAME::quay.io/${{ github.repository_owner }}/kubescape'
|
||||
|
||||
- name: Build the Docker image
|
||||
run: docker build . --file build/Dockerfile --tag $(cat build_tag.txt) --build-arg run_number=${{ github.run_number }}
|
||||
|
||||
run: docker build . --file build/Dockerfile --tag ${{ steps.image-name.outputs.IMAGE_NAME }}:${{ steps.image-version.outputs.IMAGE_VERSION }} --build-arg run_number=${{ github.run_number }}
|
||||
|
||||
- name: Re-Tag Image to latest
|
||||
run: docker tag $(cat build_tag.txt) quay.io/armosec/kubescape:latest
|
||||
|
||||
run: docker tag ${{ steps.image-name.outputs.IMAGE_NAME }}:${{ steps.image-version.outputs.IMAGE_VERSION }} ${{ steps.image-name.outputs.IMAGE_NAME }}:latest
|
||||
|
||||
- name: Login to Quay.io
|
||||
env: # Or as an environment variable
|
||||
QUAY_PASSWORD: ${{ secrets.QUAYIO_REGISTRY_PASSWORD }}
|
||||
QUAY_USERNAME: ${{ secrets.QUAYIO_REGISTRY_USERNAME }}
|
||||
run: docker login -u="${QUAY_USERNAME}" -p="${QUAY_PASSWORD}" quay.io
|
||||
# - name: Login to GitHub Container Registry
|
||||
# uses: docker/login-action@v1
|
||||
# with:
|
||||
# registry: ghcr.io
|
||||
# username: ${{ github.actor }}
|
||||
# password: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: Push Docker image
|
||||
run: |
|
||||
docker push $(cat build_tag.txt)
|
||||
docker push quay.io/armosec/kubescape:latest
|
||||
|
||||
docker push ${{ steps.image-name.outputs.IMAGE_NAME }}:${{ steps.image-version.outputs.IMAGE_VERSION }}
|
||||
docker push ${{ steps.image-name.outputs.IMAGE_NAME }}:latest
|
||||
|
||||
- name: Install cosign
|
||||
uses: sigstore/cosign-installer@main
|
||||
with:
|
||||
cosign-release: 'v1.5.1' # optional
|
||||
- name: sign kubescape container image
|
||||
env:
|
||||
COSIGN_EXPERIMENTAL: "true"
|
||||
run: |
|
||||
cosign sign --force ${{ steps.image-name.outputs.IMAGE_NAME }}:${{ steps.image-version.outputs.IMAGE_VERSION }}
|
||||
cosign sign --force ${{ steps.image-name.outputs.IMAGE_NAME }}:latest
|
||||
|
||||
|
||||
49
.github/workflows/build_dev.yaml
vendored
49
.github/workflows/build_dev.yaml
vendored
@@ -23,16 +23,17 @@ jobs:
|
||||
|
||||
- name: Build
|
||||
env:
|
||||
RELEASE: v1.0.${{ github.run_number }}
|
||||
RELEASE: v2.0.${{ github.run_number }}
|
||||
ArmoBEServer: api.armo.cloud
|
||||
ArmoERServer: report.euprod1.cyberarmorsoft.com
|
||||
ArmoAuthServer: eggauth.armo.cloud
|
||||
ArmoERServer: report.armo.cloud
|
||||
ArmoWebsite: portal.armo.cloud
|
||||
CGO_ENABLED: 0
|
||||
run: python3 --version && python3 build.py
|
||||
|
||||
- name: Smoke Testing
|
||||
env:
|
||||
RELEASE: v1.0.${{ github.run_number }}
|
||||
RELEASE: v2.0.${{ github.run_number }}
|
||||
KUBESCAPE_SKIP_UPDATE_CHECK: "true"
|
||||
run: python3 smoke_testing/init.py ${PWD}/build/${{ matrix.os }}/kubescape
|
||||
|
||||
@@ -42,30 +43,52 @@ jobs:
|
||||
name: kubescape-${{ matrix.os }}
|
||||
path: build/${{ matrix.os }}/kubescape
|
||||
|
||||
|
||||
build-docker:
|
||||
name: Build docker container, tag and upload to registry
|
||||
needs: build
|
||||
if: ${{ github.repository == 'armosec/kubescape' }}
|
||||
if: ${{ github.repository == 'armosec/kubescape' }} # TODO
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
id-token: write
|
||||
packages: write
|
||||
contents: read
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
|
||||
- name: Set name
|
||||
run: echo quay.io/armosec/kubescape:dev-v1.0.${{ github.run_number }} > build_tag.txt
|
||||
- name: Set image version
|
||||
id: image-version
|
||||
run: echo '::set-output name=IMAGE_VERSION::v2.0.${{ github.run_number }}'
|
||||
|
||||
- name: Set image name
|
||||
id: image-name
|
||||
run: echo '::set-output name=IMAGE_NAME::quay.io/${{ github.repository_owner }}/kubescape'
|
||||
|
||||
- name: Build the Docker image
|
||||
run: docker build . --file build/Dockerfile --tag $(cat build_tag.txt) --build-arg run_number=${{ github.run_number }}
|
||||
run: docker build . --file build/Dockerfile --tag ${{ steps.image-name.outputs.IMAGE_NAME }}:${{ steps.image-version.outputs.IMAGE_VERSION }} --build-arg run_number=${{ github.run_number }}
|
||||
|
||||
- name: Login to Quay.io
|
||||
env: # Or as an environment variable
|
||||
env:
|
||||
QUAY_PASSWORD: ${{ secrets.QUAYIO_REGISTRY_PASSWORD }}
|
||||
QUAY_USERNAME: ${{ secrets.QUAYIO_REGISTRY_USERNAME }}
|
||||
run: docker login -u="${QUAY_USERNAME}" -p="${QUAY_PASSWORD}" quay.io
|
||||
|
||||
# - name: Login to GitHub Container Registry
|
||||
# uses: docker/login-action@v1
|
||||
# with:
|
||||
# registry: ghcr.io
|
||||
# username: ${{ github.actor }}
|
||||
# password: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: Push Docker image
|
||||
run: |
|
||||
docker push $(cat build_tag.txt)
|
||||
|
||||
|
||||
docker push ${{ steps.image-name.outputs.IMAGE_NAME }}:${{ steps.image-version.outputs.IMAGE_VERSION }}
|
||||
|
||||
- name: Install cosign
|
||||
uses: sigstore/cosign-installer@main
|
||||
with:
|
||||
cosign-release: 'v1.5.1' # optional
|
||||
- name: sign kubescape container image
|
||||
env:
|
||||
COSIGN_EXPERIMENTAL: "true"
|
||||
run: |
|
||||
cosign sign --force ${{ steps.image-name.outputs.IMAGE_NAME }}:${{ steps.image-version.outputs.IMAGE_VERSION }}
|
||||
|
||||
|
||||
5
.github/workflows/master_pr_checks.yaml
vendored
5
.github/workflows/master_pr_checks.yaml
vendored
@@ -24,8 +24,9 @@ jobs:
|
||||
|
||||
- name: Build
|
||||
env:
|
||||
RELEASE: v1.0.${{ github.run_number }}
|
||||
RELEASE: v2.0.${{ github.run_number }}
|
||||
ArmoBEServer: api.armo.cloud
|
||||
ArmoAuthServer: eggauth.armo.cloud
|
||||
ArmoERServer: report.armo.cloud
|
||||
ArmoWebsite: portal.armo.cloud
|
||||
CGO_ENABLED: 0
|
||||
@@ -33,7 +34,7 @@ jobs:
|
||||
|
||||
- name: Smoke Testing
|
||||
env:
|
||||
RELEASE: v1.0.${{ github.run_number }}
|
||||
RELEASE: v2.0.${{ github.run_number }}
|
||||
KUBESCAPE_SKIP_UPDATE_CHECK: "true"
|
||||
run: python3 smoke_testing/init.py ${PWD}/build/${{ matrix.os }}/kubescape
|
||||
|
||||
17
.github/workflows/post-release.yaml
vendored
Normal file
17
.github/workflows/post-release.yaml
vendored
Normal file
@@ -0,0 +1,17 @@
|
||||
name: create release digests
|
||||
|
||||
on:
|
||||
release:
|
||||
types: [ published]
|
||||
branches: [ master ]
|
||||
|
||||
jobs:
|
||||
once:
|
||||
name: Creating digests
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Digest
|
||||
uses: MCJack123/ghaction-generate-release-hashes@v1
|
||||
with:
|
||||
hash-type: sha1
|
||||
file-name: kubescape-release-digests
|
||||
53
.github/workflows/publish_image.yaml
vendored
Normal file
53
.github/workflows/publish_image.yaml
vendored
Normal file
@@ -0,0 +1,53 @@
|
||||
name: build
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ master ]
|
||||
jobs:
|
||||
|
||||
build-docker:
|
||||
name: Build docker container, tag and upload to registry
|
||||
needs: build
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
id-token: write
|
||||
packages: write
|
||||
contents: read
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
|
||||
- name: Set name
|
||||
id: set-name
|
||||
run: echo '::set-output name=IMAGE_NAME::quay.io/${{ github.repository_owner }}/kubescape:v2.0.${{ github.run_number }}'
|
||||
|
||||
- name: Build the Docker image
|
||||
run: docker build . --file build/Dockerfile --tag ${{ steps.set-name.outputs.IMAGE_NAME }} --build-arg run_number=${{ github.run_number }}
|
||||
- name: Re-Tag Image to latest
|
||||
run: docker tag ${{ steps.set-name.outputs.IMAGE_NAME }} quay.io/${{ github.repository_owner }}/kubescape:latest
|
||||
- name: Login to Quay.io
|
||||
env: # Or as an environment variable
|
||||
QUAY_PASSWORD: ${{ secrets.QUAYIO_REGISTRY_PASSWORD }}
|
||||
QUAY_USERNAME: ${{ secrets.QUAYIO_REGISTRY_USERNAME }}
|
||||
run: docker login -u="${QUAY_USERNAME}" -p="${QUAY_PASSWORD}" quay.io
|
||||
- name: Login to GitHub Container Registry
|
||||
uses: docker/login-action@v1
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: Push Docker image
|
||||
run: |
|
||||
docker push ${{ steps.set-name.outputs.IMAGE_NAME }}
|
||||
docker push quay.io/${{ github.repository_owner }}/kubescape:latest
|
||||
- name: Install cosign
|
||||
uses: sigstore/cosign-installer@main
|
||||
with:
|
||||
cosign-release: 'v1.5.1' # optional
|
||||
- name: sign kubescape container image
|
||||
env:
|
||||
COSIGN_EXPERIMENTAL: "true"
|
||||
run: |
|
||||
cosign sign --force ${{ steps.set-name.outputs.IMAGE_NAME }}
|
||||
cosign sign --force quay.io/${{ github.repository_owner }}/kubescape:latest
|
||||
|
||||
3
.gitignore
vendored
3
.gitignore
vendored
@@ -3,4 +3,5 @@
|
||||
*debug*
|
||||
*vender*
|
||||
*.pyc*
|
||||
.idea
|
||||
.idea
|
||||
ca.srl
|
||||
127
CODE_OF_CONDUCT.md
Normal file
127
CODE_OF_CONDUCT.md
Normal file
@@ -0,0 +1,127 @@
|
||||
# Contributor Covenant Code of Conduct
|
||||
|
||||
## Our Pledge
|
||||
|
||||
We as members, contributors, and leaders pledge to make participation in our
|
||||
community a harassment-free experience for everyone, regardless of age, body
|
||||
size, visible or invisible disability, ethnicity, sex characteristics, gender
|
||||
identity and expression, level of experience, education, socio-economic status,
|
||||
nationality, personal appearance, race, religion, or sexual identity
|
||||
and orientation.
|
||||
|
||||
We pledge to act and interact in ways that contribute to an open, welcoming,
|
||||
diverse, inclusive, and healthy community.
|
||||
|
||||
## Our Standards
|
||||
|
||||
Examples of behavior that contributes to a positive environment for our
|
||||
community include:
|
||||
|
||||
* Demonstrating empathy and kindness toward other people
|
||||
* Being respectful of differing opinions, viewpoints, and experiences
|
||||
* Giving and gracefully accepting constructive feedback
|
||||
* Accepting responsibility and apologizing to those affected by our mistakes,
|
||||
and learning from the experience
|
||||
* Focusing on what is best not just for us as individuals, but for the
|
||||
overall community
|
||||
|
||||
Examples of unacceptable behavior include:
|
||||
|
||||
* The use of sexualized language or imagery, and sexual attention or
|
||||
advances of any kind
|
||||
* Trolling, insulting or derogatory comments, and personal or political attacks
|
||||
* Public or private harassment
|
||||
* Publishing others' private information, such as a physical or email
|
||||
address, without their explicit permission
|
||||
* Other conduct which could reasonably be considered inappropriate in a
|
||||
professional setting
|
||||
|
||||
## Enforcement Responsibilities
|
||||
|
||||
Community leaders are responsible for clarifying and enforcing our standards of
|
||||
acceptable behavior and will take appropriate and fair corrective action in
|
||||
response to any behavior that they deem inappropriate, threatening, offensive,
|
||||
or harmful.
|
||||
|
||||
Community leaders have the right and responsibility to remove, edit, or reject
|
||||
comments, commits, code, wiki edits, issues, and other contributions that are
|
||||
not aligned to this Code of Conduct, and will communicate reasons for moderation
|
||||
decisions when appropriate.
|
||||
|
||||
## Scope
|
||||
|
||||
This Code of Conduct applies within all community spaces, and also applies when
|
||||
an individual is officially representing the community in public spaces.
|
||||
Examples of representing our community include using an official e-mail address,
|
||||
posting via an official social media account, or acting as an appointed
|
||||
representative at an online or offline event.
|
||||
|
||||
## Enforcement
|
||||
|
||||
Instances of abusive, harassing, or otherwise unacceptable behavior may be
|
||||
reported to the community leaders responsible for enforcement [here](mailto:ben@armosec.io).
|
||||
All complaints will be reviewed and investigated promptly and fairly.
|
||||
|
||||
All community leaders are obligated to respect the privacy and security of the
|
||||
reporter of any incident.
|
||||
|
||||
## Enforcement Guidelines
|
||||
|
||||
Community leaders will follow these Community Impact Guidelines in determining
|
||||
the consequences for any action they deem in violation of this Code of Conduct:
|
||||
|
||||
### 1. Correction
|
||||
|
||||
**Community Impact**: Use of inappropriate language or other behavior deemed
|
||||
unprofessional or unwelcome in the community.
|
||||
|
||||
**Consequence**: A private, written warning from community leaders, providing
|
||||
clarity around the nature of the violation and an explanation of why the
|
||||
behavior was inappropriate. A public apology may be requested.
|
||||
|
||||
### 2. Warning
|
||||
|
||||
**Community Impact**: A violation through a single incident or series
|
||||
of actions.
|
||||
|
||||
**Consequence**: A warning with consequences for continued behavior. No
|
||||
interaction with the people involved, including unsolicited interaction with
|
||||
those enforcing the Code of Conduct, for a specified period of time. This
|
||||
includes avoiding interactions in community spaces as well as external channels
|
||||
like social media. Violating these terms may lead to a temporary or
|
||||
permanent ban.
|
||||
|
||||
### 3. Temporary Ban
|
||||
|
||||
**Community Impact**: A serious violation of community standards, including
|
||||
sustained inappropriate behavior.
|
||||
|
||||
**Consequence**: A temporary ban from any sort of interaction or public
|
||||
communication with the community for a specified period of time. No public or
|
||||
private interaction with the people involved, including unsolicited interaction
|
||||
with those enforcing the Code of Conduct, is allowed during this period.
|
||||
Violating these terms may lead to a permanent ban.
|
||||
|
||||
### 4. Permanent Ban
|
||||
|
||||
**Community Impact**: Demonstrating a pattern of violation of community
|
||||
standards, including sustained inappropriate behavior, harassment of an
|
||||
individual, or aggression toward or disparagement of classes of individuals.
|
||||
|
||||
**Consequence**: A permanent ban from any sort of public interaction within
|
||||
the community.
|
||||
|
||||
## Attribution
|
||||
|
||||
This Code of Conduct is adapted from the [Contributor Covenant][homepage],
|
||||
version 2.0, available at
|
||||
https://www.contributor-covenant.org/version/2/0/code_of_conduct.html.
|
||||
|
||||
Community Impact Guidelines were inspired by [Mozilla's code of conduct
|
||||
enforcement ladder](https://github.com/mozilla/diversity).
|
||||
|
||||
[homepage]: https://www.contributor-covenant.org
|
||||
|
||||
For answers to common questions about this code of conduct, see the FAQ at
|
||||
https://www.contributor-covenant.org/faq. Translations are available at
|
||||
https://www.contributor-covenant.org/translations.
|
||||
@@ -19,7 +19,8 @@ Please note we have a code of conduct, please follow it in all your interactions
|
||||
build.
|
||||
2. Update the README.md with details of changes to the interface, this includes new environment
|
||||
variables, exposed ports, useful file locations and container parameters.
|
||||
3. We will merge the Pull Request in once you have the sign-off.
|
||||
3. Open Pull Request to `dev` branch - we test the component before merging into the `master` branch
|
||||
4. We will merge the Pull Request in once you have the sign-off.
|
||||
|
||||
## Code of Conduct
|
||||
|
||||
|
||||
9
MAINTAINERS.md
Normal file
9
MAINTAINERS.md
Normal file
@@ -0,0 +1,9 @@
|
||||
# Maintainers
|
||||
|
||||
The following table lists Kubescape project maintainers
|
||||
|
||||
| Name | GitHub | Email | Organization | Repositories/Area of Expertise | Added/Renewed On |
|
||||
| --- | --- | --- | --- | --- | --- |
|
||||
| Ben Hirschberg | @slashben | ben@armosec.io | ARMO | Kubescape CLI | 2021-09-01 |
|
||||
| Rotem Refael | @rotemamsa | rrefael@armosec.io | ARMO | Kubescape CLI | 2021-10-11 |
|
||||
| David Wertenteil | @dwertent | dwertent@armosec.io | ARMO | Kubescape CLI | 2021-09-01 |
|
||||
51
README.md
51
README.md
@@ -3,10 +3,10 @@
|
||||
[](https://github.com/armosec/kubescape/actions/workflows/build.yaml)
|
||||
[](https://goreportcard.com/report/github.com/armosec/kubescape)
|
||||
|
||||
Kubescape is the first open-source tool for testing if Kubernetes is deployed securely according to multiple frameworks:
|
||||
regulatory, customized company policies and DevSecOps best practices, such as the [NSA-CISA](https://www.armosec.io/blog/kubernetes-hardening-guidance-summary-by-armo) and the [MITRE ATT&CK®](https://www.microsoft.com/security/blog/2021/03/23/secure-containerized-environments-with-updated-threat-matrix-for-kubernetes/) .
|
||||
Kubescape scans K8s clusters, YAML files, and HELM charts, and detect misconfigurations and software vulnerabilities at early stages of the CI/CD pipeline and provides a risk score instantly and risk trends over time.
|
||||
Kubescape integrates natively with other DevOps tools, including Jenkins, CircleCI and Github workflows.
|
||||
Kubescape is a K8s open-source tool providing a multi-cloud K8s single pane of glass, including risk analysis, security compliance, RBAC visualizer and image vulnerabilities scanning.
|
||||
Kubescape scans K8s clusters, YAML files, and HELM charts, detecting misconfigurations according to multiple frameworks (such as the [NSA-CISA](https://www.armosec.io/blog/kubernetes-hardening-guidance-summary-by-armo) , [MITRE ATT&CK®](https://www.microsoft.com/security/blog/2021/03/23/secure-containerized-environments-with-updated-threat-matrix-for-kubernetes/)), software vulnerabilities, and RBAC (role-based-access-control) violations at early stages of the CI/CD pipeline, calculates risk score instantly and shows risk trends over time.
|
||||
It became one of the fastest-growing Kubernetes tools among developers due to its easy-to-use CLI interface, flexible output formats, and automated scanning capabilities, saving Kubernetes users and admins’ precious time, effort, and resources.
|
||||
Kubescape integrates natively with other DevOps tools, including Jenkins, CircleCI, Github workflows, Prometheus, and Slack, and supports multi-cloud K8s deployments like EKS, GKE, and AKS.
|
||||
|
||||
</br>
|
||||
|
||||
@@ -24,7 +24,7 @@ curl -s https://raw.githubusercontent.com/armosec/kubescape/master/install.sh |
|
||||
|
||||
## Run:
|
||||
```
|
||||
kubescape scan --submit
|
||||
kubescape scan --submit --enable-host-scan
|
||||
```
|
||||
|
||||
<img src="docs/summary.png">
|
||||
@@ -54,9 +54,13 @@ Want to contribute? Want to discuss something? Have an issue?
|
||||
|
||||
# Options and examples
|
||||
|
||||
## Playground
|
||||
* [Kubescape playground](https://www.katacoda.com/pathaksaiyam/scenarios/kubescape)
|
||||
|
||||
## Tutorials
|
||||
|
||||
* [Overview](https://youtu.be/wdBkt_0Qhbg)
|
||||
* [How To Secure Kubernetes Clusters With Kubescape And Armo](https://youtu.be/ZATGiDIDBQk)
|
||||
* [Scanning Kubernetes YAML files](https://youtu.be/Ox6DaR7_4ZI)
|
||||
* [Scan Kubescape on an air-gapped environment (offline support)](https://youtu.be/IGXL9s37smM)
|
||||
* [Managing exceptions in the Kubescape SaaS version](https://youtu.be/OzpvxGmCR80)
|
||||
@@ -94,12 +98,15 @@ Set-ExecutionPolicy RemoteSigned -scope CurrentUser
|
||||
| `-t`/`--fail-threshold` | `100` (do not fail) | fail command (return exit code 1) if result is above threshold | `0` -> `100` |
|
||||
| `-f`/`--format` | `pretty-printer` | Output format | `pretty-printer`/`json`/`junit`/`prometheus` |
|
||||
| `-o`/`--output` | print to stdout | Save scan result in file | |
|
||||
| `--use-from` | | Load local framework object from specified path. If not used will download latest | |
|
||||
| `--use-from` | | Load local framework object from specified path. If not used will download latest |
|
||||
| `--use-artifacts-from` | | Load artifacts (frameworks, control-config, exceptions) from local directory. If not used will download them | |
|
||||
| `--use-default` | `false` | Load local framework object from default path. If not used will download latest | `true`/`false` |
|
||||
| `--exceptions` | | Path to an [exceptions obj](examples/exceptions.json). If not set will download exceptions from Armo management portal | |
|
||||
| `--exceptions` | | Path to an exceptions obj, [examples](examples/exceptions/README.md). Default will download exceptions from Kubescape SaaS |
|
||||
| `--controls-config` | | Path to a controls-config obj. If not set will download controls-config from ARMO management portal | |
|
||||
| `--submit` | `false` | If set, Kubescape will send the scan results to Armo management portal where you can see the results in a user-friendly UI, choose your preferred compliance framework, check risk results history and trends, manage exceptions, get remediation recommendations and much more. By default the results are not sent | `true`/`false` |
|
||||
| `--keep-local` | `false` | Kubescape will not send scan results to Armo management portal. Use this flag if you ran with the `--submit` flag in the past and you do not want to submit your current scan results | `true`/`false` |
|
||||
| `--account` | | Armo portal account ID. Default will load account ID from configMap or config file | |
|
||||
| `--kube-context` | current-context | Cluster context to scan | |
|
||||
| `--verbose` | `false` | Display all of the input resources and not only failed resources | `true`/`false` |
|
||||
|
||||
|
||||
@@ -107,6 +114,12 @@ Set-ExecutionPolicy RemoteSigned -scope CurrentUser
|
||||
|
||||
### Examples
|
||||
|
||||
|
||||
#### Scan a running Kubernetes cluster and submit results to the [Kubescape SaaS version](https://portal.armo.cloud/)
|
||||
```
|
||||
kubescape scan --submit
|
||||
```
|
||||
|
||||
#### Scan a running Kubernetes cluster with [`nsa`](https://www.nsa.gov/Press-Room/News-Highlights/Article/Article/2716980/nsa-cisa-release-kubernetes-hardening-guidance/) framework and submit results to the [Kubescape SaaS version](https://portal.armo.cloud/)
|
||||
```
|
||||
kubescape scan framework nsa --submit
|
||||
@@ -189,7 +202,7 @@ It is possible to run Kubescape offline!
|
||||
|
||||
First download the framework and then scan with `--use-from` flag
|
||||
|
||||
1. Download and save in file, if file name not specified, will store save to `~/.kubescape/<framework name>.json`
|
||||
1. Download and save in file, if file name not specified, will save in `~/.kubescape/<framework name>.json`
|
||||
```
|
||||
kubescape download framework nsa --output nsa.json
|
||||
```
|
||||
@@ -200,6 +213,19 @@ kubescape scan framework nsa --use-from nsa.json
|
||||
```
|
||||
|
||||
|
||||
|
||||
You can also download all artifacts to a local path and then load them using `--use-artifacts-from` flag
|
||||
|
||||
1. Download and save in local directory, if path not specified, will save all in `~/.kubescape`
|
||||
```
|
||||
kubescape download artifacts --output path/to/local/dir
|
||||
```
|
||||
|
||||
2. Scan using the downloaded artifacts
|
||||
```
|
||||
kubescape scan framework nsa --use-artifacts-from path/to/local/dir
|
||||
```
|
||||
|
||||
## Scan Periodically using Helm - Contributed by [@yonahd](https://github.com/yonahd)
|
||||
|
||||
You can scan your cluster periodically by adding a `CronJob` that will repeatedly trigger kubescape
|
||||
@@ -245,6 +271,7 @@ variables in this script:
|
||||
+ ArmoBEServer
|
||||
+ ArmoERServer
|
||||
+ ArmoWebsite
|
||||
+ ArmoAuthServer
|
||||
|
||||
|
||||
## Build using go
|
||||
@@ -263,7 +290,7 @@ go build -o kubescape .
|
||||
|
||||
3. Run
|
||||
```
|
||||
./kubescape scan framework nsa
|
||||
./kubescape scan --submit --enable-host-scan
|
||||
```
|
||||
|
||||
4. Enjoy :zany_face:
|
||||
@@ -319,3 +346,9 @@ The tools retrieves Kubernetes objects from the API server and runs a set of [re
|
||||
The results by default printed in a pretty "console friendly" manner, but they can be retrieved in JSON format for further processing.
|
||||
|
||||
Kubescape is an open source project, we welcome your feedback and ideas for improvement. We’re also aiming to collaborate with the Kubernetes community to help make the tests themselves more robust and complete as Kubernetes develops.
|
||||
|
||||
## Thanks to all the contributors ❤️
|
||||
<a href = "https://github.com/armosec/kubescape/graphs/contributors">
|
||||
<img src = "https://contrib.rocks/image?repo=armosec/kubescape"/>
|
||||
</a>
|
||||
|
||||
|
||||
9
build.py
9
build.py
@@ -8,6 +8,7 @@ BASE_GETTER_CONST = "github.com/armosec/kubescape/cautils/getter"
|
||||
BE_SERVER_CONST = BASE_GETTER_CONST + ".ArmoBEURL"
|
||||
ER_SERVER_CONST = BASE_GETTER_CONST + ".ArmoERURL"
|
||||
WEBSITE_CONST = BASE_GETTER_CONST + ".ArmoFEURL"
|
||||
AUTH_SERVER_CONST = BASE_GETTER_CONST + ".armoAUTHURL"
|
||||
|
||||
def checkStatus(status, msg):
|
||||
if status != 0:
|
||||
@@ -37,7 +38,7 @@ def main():
|
||||
print("Building Kubescape")
|
||||
|
||||
# print environment variables
|
||||
print(os.environ)
|
||||
# print(os.environ)
|
||||
|
||||
# Set some variables
|
||||
packageName = getPackageName()
|
||||
@@ -46,6 +47,7 @@ def main():
|
||||
ArmoBEServer = os.getenv("ArmoBEServer")
|
||||
ArmoERServer = os.getenv("ArmoERServer")
|
||||
ArmoWebsite = os.getenv("ArmoWebsite")
|
||||
ArmoAuthServer = os.getenv("ArmoAuthServer")
|
||||
|
||||
# Create build directory
|
||||
buildDir = getBuildDir()
|
||||
@@ -54,9 +56,10 @@ def main():
|
||||
os.makedirs(buildDir)
|
||||
|
||||
# Build kubescape
|
||||
ldflags = "-w -s -X %s=%s -X %s=%s -X %s=%s -X %s=%s" \
|
||||
ldflags = "-w -s -X %s=%s -X %s=%s -X %s=%s -X %s=%s -X %s=%s" \
|
||||
% (buildUrl, releaseVersion, BE_SERVER_CONST, ArmoBEServer,
|
||||
ER_SERVER_CONST, ArmoERServer, WEBSITE_CONST, ArmoWebsite)
|
||||
ER_SERVER_CONST, ArmoERServer, WEBSITE_CONST, ArmoWebsite,
|
||||
AUTH_SERVER_CONST, ArmoAuthServer)
|
||||
status = subprocess.call(["go", "build", "-o", "%s/%s" % (buildDir, packageName), "-ldflags" ,ldflags])
|
||||
checkStatus(status, "Failed to build kubescape")
|
||||
|
||||
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
|
||||
"github.com/armosec/k8s-interface/k8sinterface"
|
||||
"github.com/armosec/kubescape/cautils/getter"
|
||||
"github.com/armosec/kubescape/cautils/logger"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
@@ -23,25 +24,31 @@ func ConfigFileFullPath() string { return getter.GetDefaultPath(configFileName +
|
||||
// ======================================================================================
|
||||
|
||||
type ConfigObj struct {
|
||||
CustomerGUID string `json:"customerGUID"`
|
||||
Token string `json:"invitationParam"`
|
||||
CustomerAdminEMail string `json:"adminMail"`
|
||||
ClusterName string `json:"clusterName"`
|
||||
}
|
||||
|
||||
func (co *ConfigObj) Json() []byte {
|
||||
if b, err := json.Marshal(co); err == nil {
|
||||
return b
|
||||
}
|
||||
return []byte{}
|
||||
AccountID string `json:"accountID,omitempty"`
|
||||
ClientID string `json:"clientID,omitempty"`
|
||||
SecretKey string `json:"secretKey,omitempty"`
|
||||
CustomerGUID string `json:"customerGUID,omitempty"` // Deprecated
|
||||
Token string `json:"invitationParam,omitempty"`
|
||||
CustomerAdminEMail string `json:"adminMail,omitempty"`
|
||||
ClusterName string `json:"clusterName,omitempty"`
|
||||
}
|
||||
|
||||
// Config - convert ConfigObj to config file
|
||||
func (co *ConfigObj) Config() []byte {
|
||||
|
||||
// remove cluster name before saving to file
|
||||
clusterName := co.ClusterName
|
||||
co.ClusterName = "" // remove cluster name before saving to file
|
||||
b, err := json.Marshal(co)
|
||||
customerAdminEMail := co.CustomerAdminEMail
|
||||
token := co.Token
|
||||
co.ClusterName = ""
|
||||
co.Token = ""
|
||||
co.CustomerAdminEMail = ""
|
||||
|
||||
b, err := json.MarshalIndent(co, "", " ")
|
||||
|
||||
co.ClusterName = clusterName
|
||||
co.CustomerAdminEMail = customerAdminEMail
|
||||
co.Token = token
|
||||
|
||||
if err == nil {
|
||||
return b
|
||||
@@ -56,10 +63,12 @@ func (co *ConfigObj) Config() []byte {
|
||||
type ITenantConfig interface {
|
||||
// set
|
||||
SetTenant() error
|
||||
UpdateCachedConfig() error
|
||||
DeleteCachedConfig() error
|
||||
|
||||
// getters
|
||||
GetClusterName() string
|
||||
GetCustomerGUID() string
|
||||
GetAccountID() string
|
||||
GetConfigObj() *ConfigObj
|
||||
// GetBackendAPI() getter.IBackend
|
||||
// GenerateURL()
|
||||
@@ -76,7 +85,7 @@ type LocalConfig struct {
|
||||
configObj *ConfigObj
|
||||
}
|
||||
|
||||
func NewLocalConfig(backendAPI getter.IBackend, customerGUID string) *LocalConfig {
|
||||
func NewLocalConfig(backendAPI getter.IBackend, customerGUID, clusterName string) *LocalConfig {
|
||||
var configObj *ConfigObj
|
||||
|
||||
lc := &LocalConfig{
|
||||
@@ -93,11 +102,20 @@ func NewLocalConfig(backendAPI getter.IBackend, customerGUID string) *LocalConfi
|
||||
lc.configObj = configObj
|
||||
}
|
||||
if customerGUID != "" {
|
||||
lc.configObj.CustomerGUID = customerGUID // override config customerGUID
|
||||
lc.configObj.AccountID = customerGUID // override config customerGUID
|
||||
}
|
||||
if lc.configObj.CustomerGUID != "" {
|
||||
if clusterName != "" {
|
||||
lc.configObj.ClusterName = AdoptClusterName(clusterName) // override config clusterName
|
||||
}
|
||||
getAccountFromEnv(lc.configObj)
|
||||
|
||||
lc.backendAPI.SetAccountID(lc.configObj.AccountID)
|
||||
lc.backendAPI.SetClientID(lc.configObj.ClientID)
|
||||
lc.backendAPI.SetSecretKey(lc.configObj.SecretKey)
|
||||
|
||||
if lc.configObj.AccountID != "" {
|
||||
if err := lc.SetTenant(); err != nil {
|
||||
fmt.Println(err)
|
||||
logger.L().Error(err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -105,29 +123,37 @@ func NewLocalConfig(backendAPI getter.IBackend, customerGUID string) *LocalConfi
|
||||
}
|
||||
|
||||
func (lc *LocalConfig) GetConfigObj() *ConfigObj { return lc.configObj }
|
||||
func (lc *LocalConfig) GetCustomerGUID() string { return lc.configObj.CustomerGUID }
|
||||
func (lc *LocalConfig) GetClusterName() string { return "" }
|
||||
func (lc *LocalConfig) GetAccountID() string { return lc.configObj.AccountID }
|
||||
func (lc *LocalConfig) GetClusterName() string { return lc.configObj.ClusterName }
|
||||
func (lc *LocalConfig) IsConfigFound() bool { return existsConfigFile() }
|
||||
func (lc *LocalConfig) SetTenant() error {
|
||||
|
||||
// ARMO tenant GUID
|
||||
if err := getTenantConfigFromBE(lc.backendAPI, lc.configObj); err != nil {
|
||||
return err
|
||||
}
|
||||
updateConfigFile(lc.configObj)
|
||||
lc.UpdateCachedConfig()
|
||||
return nil
|
||||
|
||||
}
|
||||
func (lc *LocalConfig) UpdateCachedConfig() error {
|
||||
return updateConfigFile(lc.configObj)
|
||||
}
|
||||
|
||||
func (lc *LocalConfig) DeleteCachedConfig() error {
|
||||
return DeleteConfigFile()
|
||||
}
|
||||
|
||||
func getTenantConfigFromBE(backendAPI getter.IBackend, configObj *ConfigObj) error {
|
||||
|
||||
// get from armoBE
|
||||
tenantResponse, err := backendAPI.GetCustomerGUID(configObj.CustomerGUID)
|
||||
tenantResponse, err := backendAPI.GetTenant()
|
||||
if err == nil && tenantResponse != nil {
|
||||
if tenantResponse.AdminMail != "" { // registered tenant
|
||||
configObj.CustomerAdminEMail = tenantResponse.AdminMail
|
||||
} else { // new tenant
|
||||
configObj.Token = tenantResponse.Token
|
||||
configObj.CustomerGUID = tenantResponse.TenantID
|
||||
configObj.AccountID = tenantResponse.TenantID
|
||||
}
|
||||
} else {
|
||||
if err != nil && !strings.Contains(err.Error(), "already exists") {
|
||||
@@ -149,8 +175,11 @@ Supported environments variables:
|
||||
KS_DEFAULT_CONFIGMAP_NAME // name of configmap, if not set default is 'kubescape'
|
||||
KS_DEFAULT_CONFIGMAP_NAMESPACE // configmap namespace, if not set default is 'default'
|
||||
|
||||
KS_ACCOUNT_ID
|
||||
KS_CLIENT_ID
|
||||
KS_SECRET_KEY
|
||||
|
||||
TODO - supprot:
|
||||
KS_ACCOUNT // Account ID
|
||||
KS_CACHE // path to cached files
|
||||
*/
|
||||
type ClusterConfig struct {
|
||||
@@ -161,7 +190,7 @@ type ClusterConfig struct {
|
||||
configObj *ConfigObj
|
||||
}
|
||||
|
||||
func NewClusterConfig(k8s *k8sinterface.KubernetesApi, backendAPI getter.IBackend, customerGUID string) *ClusterConfig {
|
||||
func NewClusterConfig(k8s *k8sinterface.KubernetesApi, backendAPI getter.IBackend, customerGUID, clusterName string) *ClusterConfig {
|
||||
var configObj *ConfigObj
|
||||
c := &ClusterConfig{
|
||||
k8s: k8s,
|
||||
@@ -174,35 +203,44 @@ func NewClusterConfig(k8s *k8sinterface.KubernetesApi, backendAPI getter.IBacken
|
||||
// get from configMap
|
||||
if c.existsConfigMap() {
|
||||
configObj, _ = c.loadConfigFromConfigMap()
|
||||
} else if existsConfigFile() { // get from file
|
||||
}
|
||||
if configObj == nil && existsConfigFile() { // get from file
|
||||
configObj, _ = loadConfigFromFile()
|
||||
}
|
||||
if configObj != nil {
|
||||
c.configObj = configObj
|
||||
}
|
||||
if customerGUID != "" {
|
||||
c.configObj.CustomerGUID = customerGUID // override config customerGUID
|
||||
c.configObj.AccountID = customerGUID // override config customerGUID
|
||||
}
|
||||
if c.configObj.CustomerGUID != "" {
|
||||
if err := c.SetTenant(); err != nil {
|
||||
fmt.Println(err)
|
||||
}
|
||||
if clusterName != "" {
|
||||
c.configObj.ClusterName = AdoptClusterName(clusterName) // override config clusterName
|
||||
}
|
||||
getAccountFromEnv(c.configObj)
|
||||
|
||||
if c.configObj.ClusterName == "" {
|
||||
c.configObj.ClusterName = AdoptClusterName(k8sinterface.GetClusterName())
|
||||
} else { // override the cluster name if it has unwanted characters
|
||||
c.configObj.ClusterName = AdoptClusterName(c.configObj.ClusterName)
|
||||
}
|
||||
|
||||
c.backendAPI.SetAccountID(c.configObj.AccountID)
|
||||
c.backendAPI.SetClientID(c.configObj.ClientID)
|
||||
c.backendAPI.SetSecretKey(c.configObj.SecretKey)
|
||||
|
||||
if c.configObj.AccountID != "" {
|
||||
if err := c.SetTenant(); err != nil {
|
||||
logger.L().Error(err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
func (c *ClusterConfig) GetConfigObj() *ConfigObj { return c.configObj }
|
||||
func (c *ClusterConfig) GetDefaultNS() string { return c.configMapNamespace }
|
||||
func (c *ClusterConfig) GetCustomerGUID() string { return c.configObj.CustomerGUID }
|
||||
func (c *ClusterConfig) IsConfigFound() bool {
|
||||
return existsConfigFile() || c.existsConfigMap()
|
||||
}
|
||||
func (c *ClusterConfig) GetAccountID() string { return c.configObj.AccountID }
|
||||
func (c *ClusterConfig) IsConfigFound() bool { return existsConfigFile() || c.existsConfigMap() }
|
||||
|
||||
func (c *ClusterConfig) SetTenant() error {
|
||||
|
||||
@@ -210,17 +248,34 @@ func (c *ClusterConfig) SetTenant() error {
|
||||
if err := getTenantConfigFromBE(c.backendAPI, c.configObj); err != nil {
|
||||
return err
|
||||
}
|
||||
// update/create config
|
||||
if c.existsConfigMap() {
|
||||
c.updateConfigMap()
|
||||
} else {
|
||||
c.createConfigMap()
|
||||
}
|
||||
updateConfigFile(c.configObj)
|
||||
c.UpdateCachedConfig()
|
||||
return nil
|
||||
|
||||
}
|
||||
|
||||
func (c *ClusterConfig) UpdateCachedConfig() error {
|
||||
// update/create config
|
||||
if c.existsConfigMap() {
|
||||
if err := c.updateConfigMap(); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if err := c.createConfigMap(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return updateConfigFile(c.configObj)
|
||||
}
|
||||
|
||||
func (c *ClusterConfig) DeleteCachedConfig() error {
|
||||
if err := c.deleteConfigMap(); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := DeleteConfigFile(); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func (c *ClusterConfig) GetClusterName() string {
|
||||
return c.configObj.ClusterName
|
||||
}
|
||||
@@ -396,9 +451,14 @@ func readConfig(dat []byte) (*ConfigObj, error) {
|
||||
return nil, nil
|
||||
}
|
||||
configObj := &ConfigObj{}
|
||||
err := json.Unmarshal(dat, configObj)
|
||||
|
||||
return configObj, err
|
||||
if err := json.Unmarshal(dat, configObj); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if configObj.AccountID == "" {
|
||||
configObj.AccountID = configObj.CustomerGUID
|
||||
}
|
||||
configObj.CustomerGUID = ""
|
||||
return configObj, nil
|
||||
}
|
||||
|
||||
// Check if the customer is submitted
|
||||
@@ -410,7 +470,7 @@ func (clusterConfig *ClusterConfig) IsSubmitted() bool {
|
||||
func (clusterConfig *ClusterConfig) IsRegistered() bool {
|
||||
|
||||
// get from armoBE
|
||||
tenantResponse, err := clusterConfig.backendAPI.GetCustomerGUID(clusterConfig.GetCustomerGUID())
|
||||
tenantResponse, err := clusterConfig.backendAPI.GetTenant()
|
||||
if err == nil && tenantResponse != nil {
|
||||
if tenantResponse.AdminMail != "" { // this customer already belongs to some user
|
||||
return true
|
||||
@@ -419,16 +479,7 @@ func (clusterConfig *ClusterConfig) IsRegistered() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (clusterConfig *ClusterConfig) DeleteConfig() error {
|
||||
if err := clusterConfig.DeleteConfigMap(); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := DeleteConfigFile(); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func (clusterConfig *ClusterConfig) DeleteConfigMap() error {
|
||||
func (clusterConfig *ClusterConfig) deleteConfigMap() error {
|
||||
return clusterConfig.k8s.KubernetesClient.CoreV1().ConfigMaps(clusterConfig.configMapNamespace).Delete(context.Background(), clusterConfig.configMapName, metav1.DeleteOptions{})
|
||||
}
|
||||
|
||||
@@ -453,3 +504,16 @@ func getConfigMapNamespace() string {
|
||||
}
|
||||
return "default"
|
||||
}
|
||||
|
||||
func getAccountFromEnv(configObj *ConfigObj) {
|
||||
// load from env
|
||||
if accountID := os.Getenv("KS_ACCOUNT_ID"); accountID != "" {
|
||||
configObj.AccountID = accountID
|
||||
}
|
||||
if clientID := os.Getenv("KS_CLIENT_ID"); clientID != "" {
|
||||
configObj.ClientID = clientID
|
||||
}
|
||||
if secretKey := os.Getenv("KS_SECRET_KEY"); secretKey != "" {
|
||||
configObj.SecretKey = secretKey
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4,25 +4,31 @@ import (
|
||||
"github.com/armosec/armoapi-go/armotypes"
|
||||
"github.com/armosec/k8s-interface/workloadinterface"
|
||||
"github.com/armosec/opa-utils/reporthandling"
|
||||
"github.com/armosec/opa-utils/reporthandling/results/v1/resourcesresults"
|
||||
reporthandlingv2 "github.com/armosec/opa-utils/reporthandling/v2"
|
||||
)
|
||||
|
||||
// K8SResources map[<api group>/<api version>/<resource>][]<resourceID>
|
||||
type K8SResources map[string][]string
|
||||
|
||||
type OPASessionObj struct {
|
||||
K8SResources *K8SResources // input k8s objects
|
||||
Frameworks []reporthandling.Framework // list of frameworks to scan
|
||||
AllResources map[string]workloadinterface.IMetadata // all scanned resources, map[<rtesource ID>]<resource>
|
||||
PostureReport *reporthandling.PostureReport // scan results
|
||||
Exceptions []armotypes.PostureExceptionPolicy // list of exceptions to apply on scan results
|
||||
RegoInputData RegoInputData // input passed to rgo for scanning. map[<control name>][<input arguments>]
|
||||
K8SResources *K8SResources // input k8s objects
|
||||
Frameworks []reporthandling.Framework // list of frameworks to scan
|
||||
AllResources map[string]workloadinterface.IMetadata // all scanned resources, map[<rtesource ID>]<resource>
|
||||
ResourcesResult map[string]resourcesresults.Result // resources scan results, map[<rtesource ID>]<resource result>
|
||||
PostureReport *reporthandling.PostureReport // scan results v1
|
||||
Report *reporthandlingv2.PostureReport // scan results v2
|
||||
Exceptions []armotypes.PostureExceptionPolicy // list of exceptions to apply on scan results
|
||||
RegoInputData RegoInputData // input passed to rgo for scanning. map[<control name>][<input arguments>]
|
||||
}
|
||||
|
||||
func NewOPASessionObj(frameworks []reporthandling.Framework, k8sResources *K8SResources) *OPASessionObj {
|
||||
return &OPASessionObj{
|
||||
Frameworks: frameworks,
|
||||
K8SResources: k8sResources,
|
||||
AllResources: make(map[string]workloadinterface.IMetadata),
|
||||
Report: &reporthandlingv2.PostureReport{},
|
||||
Frameworks: frameworks,
|
||||
K8SResources: k8sResources,
|
||||
AllResources: make(map[string]workloadinterface.IMetadata),
|
||||
ResourcesResult: make(map[string]resourcesresults.Result),
|
||||
PostureReport: &reporthandling.PostureReport{
|
||||
ClusterName: ClusterName,
|
||||
CustomerGUID: CustomerGUID,
|
||||
@@ -32,9 +38,11 @@ func NewOPASessionObj(frameworks []reporthandling.Framework, k8sResources *K8SRe
|
||||
|
||||
func NewOPASessionObjMock() *OPASessionObj {
|
||||
return &OPASessionObj{
|
||||
Frameworks: nil,
|
||||
K8SResources: nil,
|
||||
AllResources: make(map[string]workloadinterface.IMetadata),
|
||||
Frameworks: nil,
|
||||
K8SResources: nil,
|
||||
AllResources: make(map[string]workloadinterface.IMetadata),
|
||||
ResourcesResult: make(map[string]resourcesresults.Result),
|
||||
Report: &reporthandlingv2.PostureReport{},
|
||||
PostureReport: &reporthandling.PostureReport{
|
||||
ClusterName: "",
|
||||
CustomerGUID: "",
|
||||
@@ -60,3 +68,8 @@ type RegoInputData struct {
|
||||
// ClusterName string `json:"clusterName"`
|
||||
// K8sConfig RegoK8sConfig `json:"k8sconfig"`
|
||||
}
|
||||
|
||||
type Policies struct {
|
||||
Frameworks []string
|
||||
Controls map[string]reporthandling.Control // map[<control ID>]<control>
|
||||
}
|
||||
|
||||
@@ -1,26 +1,67 @@
|
||||
package cautils
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
pkgcautils "github.com/armosec/utils-go/utils"
|
||||
|
||||
"github.com/open-policy-agent/opa/storage"
|
||||
"github.com/open-policy-agent/opa/storage/inmem"
|
||||
"github.com/open-policy-agent/opa/util"
|
||||
"github.com/armosec/opa-utils/reporthandling"
|
||||
)
|
||||
|
||||
func (data *RegoInputData) SetControlsInputs(controlsInputs map[string][]string) {
|
||||
data.PostureControlInputs = controlsInputs
|
||||
func NewPolicies() *Policies {
|
||||
return &Policies{
|
||||
Frameworks: make([]string, 0),
|
||||
Controls: make(map[string]reporthandling.Control),
|
||||
}
|
||||
}
|
||||
|
||||
func (data *RegoInputData) TOStorage() (storage.Store, error) {
|
||||
var jsonObj map[string]interface{}
|
||||
bytesData, err := json.Marshal(*data)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
func (policies *Policies) Set(frameworks []reporthandling.Framework, version string) {
|
||||
for i := range frameworks {
|
||||
if frameworks[i].Name != "" {
|
||||
policies.Frameworks = append(policies.Frameworks, frameworks[i].Name)
|
||||
}
|
||||
for j := range frameworks[i].Controls {
|
||||
compatibleRules := []reporthandling.PolicyRule{}
|
||||
for r := range frameworks[i].Controls[j].Rules {
|
||||
if !ruleWithArmoOpaDependency(frameworks[i].Controls[j].Rules[r].Attributes) && isRuleKubescapeVersionCompatible(frameworks[i].Controls[j].Rules[r].Attributes, version) {
|
||||
compatibleRules = append(compatibleRules, frameworks[i].Controls[j].Rules[r])
|
||||
}
|
||||
}
|
||||
if len(compatibleRules) > 0 {
|
||||
frameworks[i].Controls[j].Rules = compatibleRules
|
||||
policies.Controls[frameworks[i].Controls[j].ControlID] = frameworks[i].Controls[j]
|
||||
}
|
||||
}
|
||||
}
|
||||
// glog.Infof("RegoDependenciesData: %s", bytesData)
|
||||
if err := util.UnmarshalJSON(bytesData, &jsonObj); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return inmem.NewFromObject(jsonObj), nil
|
||||
}
|
||||
|
||||
func ruleWithArmoOpaDependency(attributes map[string]interface{}) bool {
|
||||
if attributes == nil {
|
||||
return false
|
||||
}
|
||||
if s, ok := attributes["armoOpa"]; ok { // TODO - make global
|
||||
return pkgcautils.StringToBool(s.(string))
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Checks that kubescape version is in range of use for this rule
|
||||
// In local build (BuildNumber = ""):
|
||||
// returns true only if rule doesn't have the "until" attribute
|
||||
func isRuleKubescapeVersionCompatible(attributes map[string]interface{}, version string) bool {
|
||||
if from, ok := attributes["useFromKubescapeVersion"]; ok && from != nil {
|
||||
if version != "" {
|
||||
if from.(string) > BuildNumber {
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
if until, ok := attributes["useUntilKubescapeVersion"]; ok && until != nil {
|
||||
if version != "" {
|
||||
if until.(string) <= BuildNumber {
|
||||
return false
|
||||
}
|
||||
} else {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package cautils
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
@@ -21,9 +20,9 @@ func IsSilent() bool {
|
||||
}
|
||||
|
||||
var FailureDisplay = color.New(color.Bold, color.FgHiRed).FprintfFunc()
|
||||
var WarningDisplay = color.New(color.Bold, color.FgCyan).FprintfFunc()
|
||||
var WarningDisplay = color.New(color.Bold, color.FgHiYellow).FprintfFunc()
|
||||
var FailureTextDisplay = color.New(color.Faint, color.FgHiRed).FprintfFunc()
|
||||
var InfoDisplay = color.New(color.Bold, color.FgHiYellow).FprintfFunc()
|
||||
var InfoDisplay = color.New(color.Bold, color.FgCyan).FprintfFunc()
|
||||
var InfoTextDisplay = color.New(color.Bold, color.FgHiYellow).FprintfFunc()
|
||||
var SimpleDisplay = color.New().FprintfFunc()
|
||||
var SuccessDisplay = color.New(color.Bold, color.FgHiGreen).FprintfFunc()
|
||||
@@ -31,39 +30,6 @@ var DescriptionDisplay = color.New(color.Faint, color.FgWhite).FprintfFunc()
|
||||
|
||||
var Spinner *spinner.Spinner
|
||||
|
||||
func ScanStartDisplay() {
|
||||
if IsSilent() {
|
||||
return
|
||||
}
|
||||
InfoDisplay(os.Stderr, "ARMO security scanner starting\n")
|
||||
}
|
||||
|
||||
func SuccessTextDisplay(str string) {
|
||||
if IsSilent() {
|
||||
return
|
||||
}
|
||||
SuccessDisplay(os.Stderr, "[success] ")
|
||||
SimpleDisplay(os.Stderr, fmt.Sprintf("%s\n", str))
|
||||
|
||||
}
|
||||
|
||||
func ErrorDisplay(str string) {
|
||||
if IsSilent() {
|
||||
return
|
||||
}
|
||||
FailureDisplay(os.Stderr, "[Error] ")
|
||||
SimpleDisplay(os.Stderr, fmt.Sprintf("%s\n", str))
|
||||
|
||||
}
|
||||
|
||||
func ProgressTextDisplay(str string) {
|
||||
if IsSilent() {
|
||||
return
|
||||
}
|
||||
InfoDisplay(os.Stderr, "[progress] ")
|
||||
SimpleDisplay(os.Stderr, fmt.Sprintf("%s\n", str))
|
||||
|
||||
}
|
||||
func StartSpinner() {
|
||||
if !IsSilent() && isatty.IsTerminal(os.Stdout.Fd()) {
|
||||
Spinner = spinner.New(spinner.CharSets[7], 100*time.Millisecond) // Build our new spinner
|
||||
|
||||
@@ -1,7 +1,9 @@
|
||||
package cautils
|
||||
|
||||
type DownloadInfo struct {
|
||||
Path string
|
||||
FrameworkName string
|
||||
ControlName string
|
||||
Path string // directory to save artifact. Default is "~/.kubescape/"
|
||||
FileName string // can be empty
|
||||
Target string // type of artifact to download
|
||||
Name string // name of artifact to download
|
||||
Account string // customerGUID
|
||||
}
|
||||
|
||||
@@ -1,15 +1,17 @@
|
||||
package getter
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/armosec/armoapi-go/armotypes"
|
||||
"github.com/armosec/kubescape/cautils/logger"
|
||||
"github.com/armosec/opa-utils/reporthandling"
|
||||
"github.com/golang/glog"
|
||||
)
|
||||
|
||||
// =======================================================================================================================
|
||||
@@ -19,41 +21,50 @@ import (
|
||||
var (
|
||||
// ATTENTION!!!
|
||||
// Changes in this URLs variable names, or in the usage is affecting the build process! BE CAREFULL
|
||||
armoERURL = "report.armo.cloud"
|
||||
armoBEURL = "api.armo.cloud"
|
||||
armoFEURL = "portal.armo.cloud"
|
||||
armoERURL = "report.armo.cloud"
|
||||
armoBEURL = "api.armo.cloud"
|
||||
armoFEURL = "portal.armo.cloud"
|
||||
armoAUTHURL = "eggauth.armo.cloud"
|
||||
|
||||
armoDevERURL = "report.eudev3.cyberarmorsoft.com"
|
||||
armoDevBEURL = "eggdashbe.eudev3.cyberarmorsoft.com"
|
||||
armoDevFEURL = "armoui-dev.eudev3.cyberarmorsoft.com"
|
||||
armoDevERURL = "report.eudev3.cyberarmorsoft.com"
|
||||
armoDevBEURL = "api-dev.armo.cloud"
|
||||
armoDevFEURL = "armoui-dev.eudev3.cyberarmorsoft.com"
|
||||
armoDevAUTHURL = "eggauth.eudev3.cyberarmorsoft.com"
|
||||
)
|
||||
|
||||
// Armo API for downloading policies
|
||||
type ArmoAPI struct {
|
||||
httpClient *http.Client
|
||||
apiURL string
|
||||
erURL string
|
||||
feURL string
|
||||
customerGUID string
|
||||
httpClient *http.Client
|
||||
apiURL string
|
||||
authURL string
|
||||
erURL string
|
||||
feURL string
|
||||
accountID string
|
||||
clientID string
|
||||
secretKey string
|
||||
feToken FeLoginResponse
|
||||
authCookie string
|
||||
loggedIn bool
|
||||
}
|
||||
|
||||
var globalArmoAPIConnecctor *ArmoAPI
|
||||
var globalArmoAPIConnector *ArmoAPI
|
||||
|
||||
func SetARMOAPIConnector(armoAPI *ArmoAPI) {
|
||||
globalArmoAPIConnecctor = armoAPI
|
||||
globalArmoAPIConnector = armoAPI
|
||||
}
|
||||
|
||||
func GetArmoAPIConnector() *ArmoAPI {
|
||||
if globalArmoAPIConnecctor == nil {
|
||||
glog.Error("returning nil API connector")
|
||||
if globalArmoAPIConnector == nil {
|
||||
logger.L().Error("returning nil API connector")
|
||||
}
|
||||
return globalArmoAPIConnecctor
|
||||
return globalArmoAPIConnector
|
||||
}
|
||||
|
||||
func NewARMOAPIDev() *ArmoAPI {
|
||||
apiObj := newArmoAPI()
|
||||
|
||||
apiObj.apiURL = armoDevBEURL
|
||||
apiObj.authURL = armoDevAUTHURL
|
||||
apiObj.erURL = armoDevERURL
|
||||
apiObj.feURL = armoDevFEURL
|
||||
|
||||
@@ -66,16 +77,18 @@ func NewARMOAPIProd() *ArmoAPI {
|
||||
apiObj.apiURL = armoBEURL
|
||||
apiObj.erURL = armoERURL
|
||||
apiObj.feURL = armoFEURL
|
||||
apiObj.authURL = armoAUTHURL
|
||||
|
||||
return apiObj
|
||||
}
|
||||
|
||||
func NewARMOAPICustomized(armoERURL, armoBEURL, armoFEURL string) *ArmoAPI {
|
||||
func NewARMOAPICustomized(armoERURL, armoBEURL, armoFEURL, armoAUTHURL string) *ArmoAPI {
|
||||
apiObj := newArmoAPI()
|
||||
|
||||
apiObj.erURL = armoERURL
|
||||
apiObj.apiURL = armoBEURL
|
||||
apiObj.feURL = armoFEURL
|
||||
apiObj.authURL = armoAUTHURL
|
||||
|
||||
return apiObj
|
||||
}
|
||||
@@ -83,22 +96,39 @@ func NewARMOAPICustomized(armoERURL, armoBEURL, armoFEURL string) *ArmoAPI {
|
||||
func newArmoAPI() *ArmoAPI {
|
||||
return &ArmoAPI{
|
||||
httpClient: &http.Client{Timeout: time.Duration(61) * time.Second},
|
||||
loggedIn: false,
|
||||
}
|
||||
}
|
||||
func (armoAPI *ArmoAPI) SetCustomerGUID(customerGUID string) {
|
||||
armoAPI.customerGUID = customerGUID
|
||||
|
||||
}
|
||||
func (armoAPI *ArmoAPI) GetFrontendURL() string {
|
||||
return armoAPI.feURL
|
||||
func (armoAPI *ArmoAPI) Post(fullURL string, headers map[string]string, body []byte) (string, error) {
|
||||
if headers == nil {
|
||||
headers = make(map[string]string)
|
||||
}
|
||||
armoAPI.appendAuthHeaders(headers)
|
||||
return HttpPost(armoAPI.httpClient, fullURL, headers, body)
|
||||
}
|
||||
|
||||
func (armoAPI *ArmoAPI) GetReportReceiverURL() string {
|
||||
return armoAPI.erURL
|
||||
func (armoAPI *ArmoAPI) Get(fullURL string, headers map[string]string) (string, error) {
|
||||
if headers == nil {
|
||||
headers = make(map[string]string)
|
||||
}
|
||||
armoAPI.appendAuthHeaders(headers)
|
||||
return HttpGetter(armoAPI.httpClient, fullURL, headers)
|
||||
}
|
||||
|
||||
func (armoAPI *ArmoAPI) GetAccountID() string { return armoAPI.accountID }
|
||||
func (armoAPI *ArmoAPI) IsLoggedIn() bool { return armoAPI.loggedIn }
|
||||
func (armoAPI *ArmoAPI) GetClientID() string { return armoAPI.clientID }
|
||||
func (armoAPI *ArmoAPI) GetSecretKey() string { return armoAPI.secretKey }
|
||||
func (armoAPI *ArmoAPI) GetFrontendURL() string { return armoAPI.feURL }
|
||||
func (armoAPI *ArmoAPI) GetAPIURL() string { return armoAPI.apiURL }
|
||||
func (armoAPI *ArmoAPI) GetReportReceiverURL() string { return armoAPI.erURL }
|
||||
func (armoAPI *ArmoAPI) SetAccountID(accountID string) { armoAPI.accountID = accountID }
|
||||
func (armoAPI *ArmoAPI) SetClientID(clientID string) { armoAPI.clientID = clientID }
|
||||
func (armoAPI *ArmoAPI) SetSecretKey(secretKey string) { armoAPI.secretKey = secretKey }
|
||||
|
||||
func (armoAPI *ArmoAPI) GetFramework(name string) (*reporthandling.Framework, error) {
|
||||
respStr, err := HttpGetter(armoAPI.httpClient, armoAPI.getFrameworkURL(name), nil)
|
||||
respStr, err := armoAPI.Get(armoAPI.getFrameworkURL(name), nil)
|
||||
if err != nil {
|
||||
return nil, nil
|
||||
}
|
||||
@@ -107,21 +137,34 @@ func (armoAPI *ArmoAPI) GetFramework(name string) (*reporthandling.Framework, er
|
||||
if err = JSONDecoder(respStr).Decode(framework); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
SaveFrameworkInFile(framework, GetDefaultPath(name+".json"))
|
||||
SaveInFile(framework, GetDefaultPath(name+".json"))
|
||||
|
||||
return framework, err
|
||||
}
|
||||
|
||||
func (armoAPI *ArmoAPI) GetFrameworks() ([]reporthandling.Framework, error) {
|
||||
respStr, err := armoAPI.Get(armoAPI.getListFrameworkURL(), nil)
|
||||
if err != nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
frameworks := []reporthandling.Framework{}
|
||||
if err = JSONDecoder(respStr).Decode(&frameworks); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// SaveInFile(framework, GetDefaultPath(name+".json"))
|
||||
|
||||
return frameworks, err
|
||||
}
|
||||
|
||||
func (armoAPI *ArmoAPI) GetControl(policyName string) (*reporthandling.Control, error) {
|
||||
return nil, fmt.Errorf("control api is not public")
|
||||
}
|
||||
|
||||
func (armoAPI *ArmoAPI) GetExceptions(customerGUID, clusterName string) ([]armotypes.PostureExceptionPolicy, error) {
|
||||
func (armoAPI *ArmoAPI) GetExceptions(clusterName string) ([]armotypes.PostureExceptionPolicy, error) {
|
||||
exceptions := []armotypes.PostureExceptionPolicy{}
|
||||
if customerGUID == "" {
|
||||
return exceptions, nil
|
||||
}
|
||||
respStr, err := HttpGetter(armoAPI.httpClient, armoAPI.getExceptionsURL(customerGUID, clusterName), nil)
|
||||
|
||||
respStr, err := armoAPI.Get(armoAPI.getExceptionsURL(clusterName), nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -133,12 +176,12 @@ func (armoAPI *ArmoAPI) GetExceptions(customerGUID, clusterName string) ([]armot
|
||||
return exceptions, nil
|
||||
}
|
||||
|
||||
func (armoAPI *ArmoAPI) GetCustomerGUID(customerGUID string) (*TenantResponse, error) {
|
||||
url := armoAPI.getCustomerURL()
|
||||
if customerGUID != "" {
|
||||
url = fmt.Sprintf("%s?customerGUID=%s", url, customerGUID)
|
||||
func (armoAPI *ArmoAPI) GetTenant() (*TenantResponse, error) {
|
||||
url := armoAPI.getAccountURL()
|
||||
if armoAPI.accountID != "" {
|
||||
url = fmt.Sprintf("%s?customerGUID=%s", url, armoAPI.accountID)
|
||||
}
|
||||
respStr, err := HttpGetter(armoAPI.httpClient, url, nil)
|
||||
respStr, err := armoAPI.Get(url, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -146,17 +189,19 @@ func (armoAPI *ArmoAPI) GetCustomerGUID(customerGUID string) (*TenantResponse, e
|
||||
if err = JSONDecoder(respStr).Decode(tenant); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if tenant.TenantID != "" {
|
||||
armoAPI.accountID = tenant.TenantID
|
||||
}
|
||||
return tenant, nil
|
||||
}
|
||||
|
||||
// ControlsInputs // map[<control name>][<input arguments>]
|
||||
func (armoAPI *ArmoAPI) GetAccountConfig(customerGUID, clusterName string) (*armotypes.CustomerConfig, error) {
|
||||
func (armoAPI *ArmoAPI) GetAccountConfig(clusterName string) (*armotypes.CustomerConfig, error) {
|
||||
accountConfig := &armotypes.CustomerConfig{}
|
||||
if customerGUID == "" {
|
||||
if armoAPI.accountID == "" {
|
||||
return accountConfig, nil
|
||||
}
|
||||
respStr, err := HttpGetter(armoAPI.httpClient, armoAPI.getAccountConfig(customerGUID, clusterName), nil)
|
||||
respStr, err := armoAPI.Get(armoAPI.getAccountConfig(clusterName), nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -169,16 +214,16 @@ func (armoAPI *ArmoAPI) GetAccountConfig(customerGUID, clusterName string) (*arm
|
||||
}
|
||||
|
||||
// ControlsInputs // map[<control name>][<input arguments>]
|
||||
func (armoAPI *ArmoAPI) GetControlsInputs(customerGUID, clusterName string) (map[string][]string, error) {
|
||||
accountConfig, err := armoAPI.GetAccountConfig(customerGUID, clusterName)
|
||||
func (armoAPI *ArmoAPI) GetControlsInputs(clusterName string) (map[string][]string, error) {
|
||||
accountConfig, err := armoAPI.GetAccountConfig(clusterName)
|
||||
if err == nil {
|
||||
return accountConfig.Settings.PostureControlInputs, nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
func (armoAPI *ArmoAPI) ListCustomFrameworks(customerGUID string) ([]string, error) {
|
||||
respStr, err := HttpGetter(armoAPI.httpClient, armoAPI.getListFrameworkURL(), nil)
|
||||
func (armoAPI *ArmoAPI) ListCustomFrameworks() ([]string, error) {
|
||||
respStr, err := armoAPI.Get(armoAPI.getListFrameworkURL(), nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -197,8 +242,8 @@ func (armoAPI *ArmoAPI) ListCustomFrameworks(customerGUID string) ([]string, err
|
||||
return frameworkList, nil
|
||||
}
|
||||
|
||||
func (armoAPI *ArmoAPI) ListFrameworks(customerGUID string) ([]string, error) {
|
||||
respStr, err := HttpGetter(armoAPI.httpClient, armoAPI.getListFrameworkURL(), nil)
|
||||
func (armoAPI *ArmoAPI) ListFrameworks() ([]string, error) {
|
||||
respStr, err := armoAPI.Get(armoAPI.getListFrameworkURL(), nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -219,9 +264,67 @@ func (armoAPI *ArmoAPI) ListFrameworks(customerGUID string) ([]string, error) {
|
||||
return frameworkList, nil
|
||||
}
|
||||
|
||||
type TenantResponse struct {
|
||||
TenantID string `json:"tenantId"`
|
||||
Token string `json:"token"`
|
||||
Expires string `json:"expires"`
|
||||
AdminMail string `json:"adminMail,omitempty"`
|
||||
func (armoAPI *ArmoAPI) ListControls(l ListType) ([]string, error) {
|
||||
return nil, fmt.Errorf("control api is not public")
|
||||
}
|
||||
|
||||
func (armoAPI *ArmoAPI) PostExceptions(exceptions []armotypes.PostureExceptionPolicy) error {
|
||||
|
||||
for i := range exceptions {
|
||||
ex, err := json.Marshal(exceptions[i])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = armoAPI.Post(armoAPI.postExceptionsURL(), map[string]string{"Content-Type": "application/json"}, ex)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (armoAPI *ArmoAPI) Login() error {
|
||||
if armoAPI.accountID == "" {
|
||||
return fmt.Errorf("failed to login, missing accountID")
|
||||
}
|
||||
if armoAPI.clientID == "" {
|
||||
return fmt.Errorf("failed to login, missing clientID")
|
||||
}
|
||||
if armoAPI.secretKey == "" {
|
||||
return fmt.Errorf("failed to login, missing secretKey")
|
||||
}
|
||||
|
||||
// init URLs
|
||||
feLoginData := FeLoginData{ClientId: armoAPI.clientID, Secret: armoAPI.secretKey}
|
||||
body, _ := json.Marshal(feLoginData)
|
||||
|
||||
resp, err := http.Post(armoAPI.getApiToken(), "application/json", bytes.NewBuffer(body))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return fmt.Errorf("error authenticating: %d", resp.StatusCode)
|
||||
}
|
||||
|
||||
responseBody, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var feLoginResponse FeLoginResponse
|
||||
|
||||
if err = json.Unmarshal(responseBody, &feLoginResponse); err != nil {
|
||||
return err
|
||||
}
|
||||
armoAPI.feToken = feLoginResponse
|
||||
|
||||
/* Now we have JWT */
|
||||
|
||||
armoAPI.authCookie, err = armoAPI.getAuthCookie()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
armoAPI.loggedIn = true
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -1,6 +1,10 @@
|
||||
package getter
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
)
|
||||
@@ -13,7 +17,7 @@ func (armoAPI *ArmoAPI) getFrameworkURL(frameworkName string) string {
|
||||
u.Host = armoAPI.apiURL
|
||||
u.Path = "api/v1/armoFrameworks"
|
||||
q := u.Query()
|
||||
q.Add("customerGUID", armoAPI.customerGUID)
|
||||
q.Add("customerGUID", armoAPI.accountID)
|
||||
if isNativeFramework(frameworkName) {
|
||||
q.Add("frameworkName", strings.ToUpper(frameworkName))
|
||||
} else {
|
||||
@@ -31,19 +35,19 @@ func (armoAPI *ArmoAPI) getListFrameworkURL() string {
|
||||
u.Host = armoAPI.apiURL
|
||||
u.Path = "api/v1/armoFrameworks"
|
||||
q := u.Query()
|
||||
q.Add("customerGUID", armoAPI.customerGUID)
|
||||
q.Add("customerGUID", armoAPI.accountID)
|
||||
u.RawQuery = q.Encode()
|
||||
|
||||
return u.String()
|
||||
}
|
||||
func (armoAPI *ArmoAPI) getExceptionsURL(customerGUID, clusterName string) string {
|
||||
func (armoAPI *ArmoAPI) getExceptionsURL(clusterName string) string {
|
||||
u := url.URL{}
|
||||
u.Scheme = "https"
|
||||
u.Host = armoAPI.apiURL
|
||||
u.Path = "api/v1/armoPostureExceptions"
|
||||
|
||||
q := u.Query()
|
||||
q.Add("customerGUID", customerGUID)
|
||||
q.Add("customerGUID", armoAPI.accountID)
|
||||
// if clusterName != "" { // TODO - fix customer name support in Armo BE
|
||||
// q.Add("clusterName", clusterName)
|
||||
// }
|
||||
@@ -52,14 +56,27 @@ func (armoAPI *ArmoAPI) getExceptionsURL(customerGUID, clusterName string) strin
|
||||
return u.String()
|
||||
}
|
||||
|
||||
func (armoAPI *ArmoAPI) getAccountConfig(customerGUID, clusterName string) string {
|
||||
func (armoAPI *ArmoAPI) postExceptionsURL() string {
|
||||
u := url.URL{}
|
||||
u.Scheme = "https"
|
||||
u.Host = armoAPI.apiURL
|
||||
u.Path = "api/v1/postureExceptionPolicy"
|
||||
|
||||
q := u.Query()
|
||||
q.Add("customerGUID", armoAPI.accountID)
|
||||
u.RawQuery = q.Encode()
|
||||
|
||||
return u.String()
|
||||
}
|
||||
|
||||
func (armoAPI *ArmoAPI) getAccountConfig(clusterName string) string {
|
||||
u := url.URL{}
|
||||
u.Scheme = "https"
|
||||
u.Host = armoAPI.apiURL
|
||||
u.Path = "api/v1/armoCustomerConfiguration"
|
||||
|
||||
q := u.Query()
|
||||
q.Add("customerGUID", customerGUID)
|
||||
q.Add("customerGUID", armoAPI.accountID)
|
||||
if clusterName != "" { // TODO - fix customer name support in Armo BE
|
||||
q.Add("clusterName", clusterName)
|
||||
}
|
||||
@@ -68,10 +85,74 @@ func (armoAPI *ArmoAPI) getAccountConfig(customerGUID, clusterName string) strin
|
||||
return u.String()
|
||||
}
|
||||
|
||||
func (armoAPI *ArmoAPI) getCustomerURL() string {
|
||||
func (armoAPI *ArmoAPI) getAccountURL() string {
|
||||
u := url.URL{}
|
||||
u.Scheme = "https"
|
||||
u.Host = armoAPI.apiURL
|
||||
u.Path = "api/v1/createTenant"
|
||||
return u.String()
|
||||
}
|
||||
|
||||
func (armoAPI *ArmoAPI) getApiToken() string {
|
||||
u := url.URL{}
|
||||
u.Scheme = "https"
|
||||
u.Host = armoAPI.authURL
|
||||
u.Path = "frontegg/identity/resources/auth/v1/api-token"
|
||||
return u.String()
|
||||
}
|
||||
|
||||
func (armoAPI *ArmoAPI) getOpenidCustomers() string {
|
||||
u := url.URL{}
|
||||
u.Scheme = "https"
|
||||
u.Host = armoAPI.apiURL
|
||||
u.Path = "api/v1/openid_customers"
|
||||
return u.String()
|
||||
}
|
||||
|
||||
func (armoAPI *ArmoAPI) getAuthCookie() (string, error) {
|
||||
selectCustomer := ArmoSelectCustomer{SelectedCustomerGuid: armoAPI.accountID}
|
||||
requestBody, _ := json.Marshal(selectCustomer)
|
||||
client := &http.Client{}
|
||||
httpRequest, err := http.NewRequest(http.MethodPost, armoAPI.getOpenidCustomers(), bytes.NewBuffer(requestBody))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
httpRequest.Header.Set("Content-Type", "application/json")
|
||||
httpRequest.Header.Set("Authorization", fmt.Sprintf("Bearer %s", armoAPI.feToken.Token))
|
||||
httpResponse, err := client.Do(httpRequest)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer httpResponse.Body.Close()
|
||||
if httpResponse.StatusCode != http.StatusOK {
|
||||
return "", fmt.Errorf("failed to get cookie from %s: status %d", armoAPI.getOpenidCustomers(), httpResponse.StatusCode)
|
||||
}
|
||||
|
||||
cookies := httpResponse.Header.Get("set-cookie")
|
||||
if len(cookies) == 0 {
|
||||
return "", fmt.Errorf("no cookie field in response from %s", armoAPI.getOpenidCustomers())
|
||||
}
|
||||
|
||||
authCookie := ""
|
||||
for _, cookie := range strings.Split(cookies, ";") {
|
||||
kv := strings.Split(cookie, "=")
|
||||
if kv[0] == "auth" {
|
||||
authCookie = kv[1]
|
||||
}
|
||||
}
|
||||
|
||||
if len(authCookie) == 0 {
|
||||
return "", fmt.Errorf("no auth cookie field in response from %s", armoAPI.getOpenidCustomers())
|
||||
}
|
||||
|
||||
return authCookie, nil
|
||||
}
|
||||
func (armoAPI *ArmoAPI) appendAuthHeaders(headers map[string]string) {
|
||||
|
||||
if armoAPI.feToken.Token != "" {
|
||||
headers["Authorization"] = fmt.Sprintf("Bearer %s", armoAPI.feToken.Token)
|
||||
}
|
||||
if armoAPI.authCookie != "" {
|
||||
headers["Cookie"] = fmt.Sprintf("auth=%s", armoAPI.authCookie)
|
||||
}
|
||||
}
|
||||
|
||||
24
cautils/getter/datastructures.go
Normal file
24
cautils/getter/datastructures.go
Normal file
@@ -0,0 +1,24 @@
|
||||
package getter
|
||||
|
||||
type FeLoginData struct {
|
||||
Secret string `json:"secret"`
|
||||
ClientId string `json:"clientId"`
|
||||
}
|
||||
|
||||
type FeLoginResponse struct {
|
||||
Token string `json:"accessToken"`
|
||||
RefreshToken string `json:"refreshToken"`
|
||||
ExpiresIn int32 `json:"expiresIn"`
|
||||
Expires string `json:"expires"`
|
||||
}
|
||||
|
||||
type ArmoSelectCustomer struct {
|
||||
SelectedCustomerGuid string `json:"selectedCustomer"`
|
||||
}
|
||||
|
||||
type TenantResponse struct {
|
||||
TenantID string `json:"tenantId"`
|
||||
Token string `json:"token"`
|
||||
Expires string `json:"expires"`
|
||||
AdminMail string `json:"adminMail,omitempty"`
|
||||
}
|
||||
@@ -41,7 +41,28 @@ func (drp *DownloadReleasedPolicy) GetFramework(name string) (*reporthandling.Fr
|
||||
return framework, err
|
||||
}
|
||||
|
||||
func (drp *DownloadReleasedPolicy) GetControlsInputs(customerGUID, clusterName string) (map[string][]string, error) {
|
||||
func (drp *DownloadReleasedPolicy) GetFrameworks() ([]reporthandling.Framework, error) {
|
||||
frameworks, err := drp.gs.GetOPAFrameworks()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return frameworks, err
|
||||
}
|
||||
|
||||
func (drp *DownloadReleasedPolicy) ListFrameworks() ([]string, error) {
|
||||
return drp.gs.GetOPAFrameworksNamesList()
|
||||
}
|
||||
|
||||
func (drp *DownloadReleasedPolicy) ListControls(listType ListType) ([]string, error) {
|
||||
switch listType {
|
||||
case ListID:
|
||||
return drp.gs.GetOPAControlsIDsList()
|
||||
default:
|
||||
return drp.gs.GetOPAControlsNamesList()
|
||||
}
|
||||
}
|
||||
|
||||
func (drp *DownloadReleasedPolicy) GetControlsInputs(clusterName string) (map[string][]string, error) {
|
||||
defaultConfigInputs, err := drp.gs.GetDefaultConfigInputs()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
||||
@@ -5,18 +5,36 @@ import (
|
||||
"github.com/armosec/opa-utils/reporthandling"
|
||||
)
|
||||
|
||||
// supported listing
|
||||
type ListType string
|
||||
|
||||
const ListID ListType = "id"
|
||||
const ListName ListType = "name"
|
||||
|
||||
type IPolicyGetter interface {
|
||||
GetFramework(name string) (*reporthandling.Framework, error)
|
||||
GetFrameworks() ([]reporthandling.Framework, error)
|
||||
GetControl(name string) (*reporthandling.Control, error)
|
||||
|
||||
ListFrameworks() ([]string, error)
|
||||
ListControls(ListType) ([]string, error)
|
||||
}
|
||||
|
||||
type IExceptionsGetter interface {
|
||||
GetExceptions(customerGUID, clusterName string) ([]armotypes.PostureExceptionPolicy, error)
|
||||
GetExceptions(clusterName string) ([]armotypes.PostureExceptionPolicy, error)
|
||||
}
|
||||
type IBackend interface {
|
||||
GetCustomerGUID(customerGUID string) (*TenantResponse, error)
|
||||
GetAccountID() string
|
||||
GetClientID() string
|
||||
GetSecretKey() string
|
||||
|
||||
SetAccountID(accountID string)
|
||||
SetClientID(clientID string)
|
||||
SetSecretKey(secretKey string)
|
||||
|
||||
GetTenant() (*TenantResponse, error)
|
||||
}
|
||||
|
||||
type IControlsInputsGetter interface {
|
||||
GetControlsInputs(customerGUID, clusterName string) (map[string][]string, error)
|
||||
GetControlsInputs(clusterName string) (map[string][]string, error)
|
||||
}
|
||||
|
||||
@@ -10,8 +10,6 @@ import (
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/armosec/opa-utils/reporthandling"
|
||||
)
|
||||
|
||||
func GetDefaultPath(name string) string {
|
||||
@@ -22,33 +20,8 @@ func GetDefaultPath(name string) string {
|
||||
return defaultfilePath
|
||||
}
|
||||
|
||||
// Save control as json in file
|
||||
func SaveControlInFile(control *reporthandling.Control, pathStr string) error {
|
||||
encodedData, err := json.Marshal(control)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = os.WriteFile(pathStr, []byte(fmt.Sprintf("%v", string(encodedData))), 0644)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
pathDir := path.Dir(pathStr)
|
||||
if err := os.Mkdir(pathDir, 0744); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
return err
|
||||
|
||||
}
|
||||
err = os.WriteFile(pathStr, []byte(fmt.Sprintf("%v", string(encodedData))), 0644)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func SaveFrameworkInFile(framework *reporthandling.Framework, pathStr string) error {
|
||||
encodedData, err := json.Marshal(framework)
|
||||
func SaveInFile(policy interface{}, pathStr string) error {
|
||||
encodedData, err := json.MarshalIndent(policy, "", " ")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -84,7 +57,7 @@ func HttpGetter(httpClient *http.Client, fullURL string, headers map[string]stri
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
addHeaders(req, headers)
|
||||
setHeaders(req, headers)
|
||||
|
||||
resp, err := httpClient.Do(req)
|
||||
if err != nil {
|
||||
@@ -103,7 +76,7 @@ func HttpPost(httpClient *http.Client, fullURL string, headers map[string]string
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
addHeaders(req, headers)
|
||||
setHeaders(req, headers)
|
||||
resp, err := httpClient.Do(req)
|
||||
if err != nil {
|
||||
return "", err
|
||||
@@ -115,10 +88,10 @@ func HttpPost(httpClient *http.Client, fullURL string, headers map[string]string
|
||||
return respStr, nil
|
||||
}
|
||||
|
||||
func addHeaders(req *http.Request, headers map[string]string) {
|
||||
func setHeaders(req *http.Request, headers map[string]string) {
|
||||
if len(headers) >= 0 { // might be nil
|
||||
for k, v := range headers {
|
||||
req.Header.Add(k, v)
|
||||
req.Header.Set(k, v)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -133,21 +106,22 @@ func httpRespToString(resp *http.Response) (string, error) {
|
||||
if resp.ContentLength > 0 {
|
||||
strBuilder.Grow(int(resp.ContentLength))
|
||||
}
|
||||
bytesNum, err := io.Copy(&strBuilder, resp.Body)
|
||||
_, err := io.Copy(&strBuilder, resp.Body)
|
||||
respStr := strBuilder.String()
|
||||
if err != nil {
|
||||
respStrNewLen := len(respStr)
|
||||
if respStrNewLen > 1024 {
|
||||
respStrNewLen = 1024
|
||||
}
|
||||
return "", fmt.Errorf("HTTP request failed. URL: '%s', Read-ERROR: '%s', HTTP-CODE: '%s', BODY(top): '%s', HTTP-HEADERS: %v, HTTP-BODY-BUFFER-LENGTH: %v", resp.Request.URL.RequestURI(), err, resp.Status, respStr[:respStrNewLen], resp.Header, bytesNum)
|
||||
return "", fmt.Errorf("http-error: '%s', reason: '%s'", resp.Status, respStr[:respStrNewLen])
|
||||
// return "", fmt.Errorf("HTTP request failed. URL: '%s', Read-ERROR: '%s', HTTP-CODE: '%s', BODY(top): '%s', HTTP-HEADERS: %v, HTTP-BODY-BUFFER-LENGTH: %v", resp.Request.URL.RequestURI(), err, resp.Status, respStr[:respStrNewLen], resp.Header, bytesNum)
|
||||
}
|
||||
if resp.StatusCode < 200 || resp.StatusCode >= 300 {
|
||||
respStrNewLen := len(respStr)
|
||||
if respStrNewLen > 1024 {
|
||||
respStrNewLen = 1024
|
||||
}
|
||||
err = fmt.Errorf("HTTP request failed. URL: '%s', HTTP-ERROR: '%s', BODY: '%s', HTTP-HEADERS: %v, HTTP-BODY-BUFFER-LENGTH: %v", resp.Request.URL.RequestURI(), resp.Status, respStr[:respStrNewLen], resp.Header, bytesNum)
|
||||
err = fmt.Errorf("http-error: '%s', reason: '%s'", resp.Status, respStr[:respStrNewLen])
|
||||
}
|
||||
|
||||
return respStr, err
|
||||
|
||||
@@ -78,7 +78,34 @@ func (lp *LoadPolicy) GetFramework(frameworkName string) (*reporthandling.Framew
|
||||
return framework, err
|
||||
}
|
||||
|
||||
func (lp *LoadPolicy) GetExceptions(customerGUID, clusterName string) ([]armotypes.PostureExceptionPolicy, error) {
|
||||
func (lp *LoadPolicy) GetFrameworks() ([]reporthandling.Framework, error) {
|
||||
frameworks := []reporthandling.Framework{}
|
||||
var err error
|
||||
return frameworks, err
|
||||
}
|
||||
|
||||
func (lp *LoadPolicy) ListFrameworks() ([]string, error) {
|
||||
fwNames := []string{}
|
||||
framework := &reporthandling.Framework{}
|
||||
for _, f := range lp.filePaths {
|
||||
file, err := os.ReadFile(f)
|
||||
if err == nil {
|
||||
if err := json.Unmarshal(file, framework); err == nil {
|
||||
if !contains(fwNames, framework.Name) {
|
||||
fwNames = append(fwNames, framework.Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return fwNames, nil
|
||||
}
|
||||
|
||||
func (lp *LoadPolicy) ListControls(listType ListType) ([]string, error) {
|
||||
// TODO - Support
|
||||
return []string{}, fmt.Errorf("loading controls list from file is not supported")
|
||||
}
|
||||
|
||||
func (lp *LoadPolicy) GetExceptions(clusterName string) ([]armotypes.PostureExceptionPolicy, error) {
|
||||
filePath := lp.filePath()
|
||||
exception := []armotypes.PostureExceptionPolicy{}
|
||||
f, err := os.ReadFile(filePath)
|
||||
@@ -90,7 +117,7 @@ func (lp *LoadPolicy) GetExceptions(customerGUID, clusterName string) ([]armotyp
|
||||
return exception, err
|
||||
}
|
||||
|
||||
func (lp *LoadPolicy) GetControlsInputs(customerGUID, clusterName string) (map[string][]string, error) {
|
||||
func (lp *LoadPolicy) GetControlsInputs(clusterName string) (map[string][]string, error) {
|
||||
filePath := lp.filePath()
|
||||
accountConfig := &armotypes.CustomerConfig{}
|
||||
f, err := os.ReadFile(filePath)
|
||||
@@ -98,7 +125,7 @@ func (lp *LoadPolicy) GetControlsInputs(customerGUID, clusterName string) (map[s
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err = json.Unmarshal(f, &accountConfig); err == nil {
|
||||
if err = json.Unmarshal(f, &accountConfig.Settings.PostureControlInputs); err == nil {
|
||||
return accountConfig.Settings.PostureControlInputs, nil
|
||||
}
|
||||
return nil, err
|
||||
|
||||
13
cautils/getter/loadpolicy_test.go
Normal file
13
cautils/getter/loadpolicy_test.go
Normal file
@@ -0,0 +1,13 @@
|
||||
package getter
|
||||
|
||||
import (
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
var mockFrameworkBasePath = filepath.Join("examples", "mocks", "frameworks")
|
||||
|
||||
func MockNewLoadPolicy() *LoadPolicy {
|
||||
return &LoadPolicy{
|
||||
filePaths: []string{""},
|
||||
}
|
||||
}
|
||||
26
cautils/logger/helpers/datastructures.go
Normal file
26
cautils/logger/helpers/datastructures.go
Normal file
@@ -0,0 +1,26 @@
|
||||
package helpers
|
||||
|
||||
type StringObj struct {
|
||||
key string
|
||||
value string
|
||||
}
|
||||
|
||||
type ErrorObj struct {
|
||||
key string
|
||||
value error
|
||||
}
|
||||
|
||||
type IntObj struct {
|
||||
key string
|
||||
value int
|
||||
}
|
||||
|
||||
type InterfaceObj struct {
|
||||
key string
|
||||
value interface{}
|
||||
}
|
||||
|
||||
func Error(e error) *ErrorObj { return &ErrorObj{key: "error", value: e} }
|
||||
func Int(k string, v int) *IntObj { return &IntObj{key: k, value: v} }
|
||||
func String(k, v string) *StringObj { return &StringObj{key: k, value: v} }
|
||||
func Interface(k string, v interface{}) *InterfaceObj { return &InterfaceObj{key: k, value: v} }
|
||||
69
cautils/logger/helpers/level.go
Normal file
69
cautils/logger/helpers/level.go
Normal file
@@ -0,0 +1,69 @@
|
||||
package helpers
|
||||
|
||||
import (
|
||||
"strings"
|
||||
)
|
||||
|
||||
type Level int8
|
||||
|
||||
const (
|
||||
UnknownLevel Level = iota - -1
|
||||
DebugLevel
|
||||
InfoLevel //default
|
||||
SuccessLevel
|
||||
WarningLevel
|
||||
ErrorLevel
|
||||
FatalLevel
|
||||
|
||||
_defaultLevel = InfoLevel
|
||||
_minLevel = DebugLevel
|
||||
_maxLevel = FatalLevel
|
||||
)
|
||||
|
||||
func ToLevel(level string) Level {
|
||||
switch strings.ToLower(level) {
|
||||
case "debug":
|
||||
return DebugLevel
|
||||
case "info":
|
||||
return InfoLevel
|
||||
case "success":
|
||||
return SuccessLevel
|
||||
case "warning", "warn":
|
||||
return WarningLevel
|
||||
case "error":
|
||||
return ErrorLevel
|
||||
case "fatal":
|
||||
return FatalLevel
|
||||
default:
|
||||
return UnknownLevel
|
||||
}
|
||||
}
|
||||
func (l Level) String() string {
|
||||
switch l {
|
||||
case DebugLevel:
|
||||
return "debug"
|
||||
case InfoLevel:
|
||||
return "info"
|
||||
case SuccessLevel:
|
||||
return "success"
|
||||
case WarningLevel:
|
||||
return "warning"
|
||||
case ErrorLevel:
|
||||
return "error"
|
||||
case FatalLevel:
|
||||
return "fatal"
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (l Level) Skip(l2 Level) bool {
|
||||
return l < l2
|
||||
}
|
||||
|
||||
func SupportedLevels() []string {
|
||||
levels := []string{}
|
||||
for i := _minLevel; i <= _maxLevel; i++ {
|
||||
levels = append(levels, i.String())
|
||||
}
|
||||
return levels
|
||||
}
|
||||
62
cautils/logger/helpers/methods.go
Normal file
62
cautils/logger/helpers/methods.go
Normal file
@@ -0,0 +1,62 @@
|
||||
package helpers
|
||||
|
||||
type IDetails interface {
|
||||
Key() string
|
||||
Value() interface{}
|
||||
}
|
||||
|
||||
// ======================================================================================
|
||||
// ============================== String ================================================
|
||||
// ======================================================================================
|
||||
|
||||
// Key
|
||||
func (s *StringObj) Key() string {
|
||||
return s.key
|
||||
}
|
||||
|
||||
// Value
|
||||
func (s *StringObj) Value() interface{} {
|
||||
return s.value
|
||||
}
|
||||
|
||||
// ======================================================================================
|
||||
// =============================== Error ================================================
|
||||
// ======================================================================================
|
||||
|
||||
// Key
|
||||
func (s *ErrorObj) Key() string {
|
||||
return s.key
|
||||
}
|
||||
|
||||
// Value
|
||||
func (s *ErrorObj) Value() interface{} {
|
||||
return s.value
|
||||
}
|
||||
|
||||
// ======================================================================================
|
||||
// ================================= Int ================================================
|
||||
// ======================================================================================
|
||||
|
||||
// Key
|
||||
func (s *IntObj) Key() string {
|
||||
return s.key
|
||||
}
|
||||
|
||||
// Value
|
||||
func (s *IntObj) Value() interface{} {
|
||||
return s.value
|
||||
}
|
||||
|
||||
// ======================================================================================
|
||||
// =========================== Interface ================================================
|
||||
// ======================================================================================
|
||||
|
||||
// Key
|
||||
func (s *InterfaceObj) Key() string {
|
||||
return s.key
|
||||
}
|
||||
|
||||
// Value
|
||||
func (s *InterfaceObj) Value() interface{} {
|
||||
return s.value
|
||||
}
|
||||
41
cautils/logger/methods.go
Normal file
41
cautils/logger/methods.go
Normal file
@@ -0,0 +1,41 @@
|
||||
package logger
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
"github.com/armosec/kubescape/cautils/logger/helpers"
|
||||
"github.com/armosec/kubescape/cautils/logger/prettylogger"
|
||||
)
|
||||
|
||||
type ILogger interface {
|
||||
Fatal(msg string, details ...helpers.IDetails) // print log and exit 1
|
||||
Error(msg string, details ...helpers.IDetails)
|
||||
Success(msg string, details ...helpers.IDetails)
|
||||
Warning(msg string, details ...helpers.IDetails)
|
||||
Info(msg string, details ...helpers.IDetails)
|
||||
Debug(msg string, details ...helpers.IDetails)
|
||||
|
||||
SetLevel(level string) error
|
||||
GetLevel() string
|
||||
|
||||
SetWriter(w *os.File)
|
||||
GetWriter() *os.File
|
||||
}
|
||||
|
||||
var l ILogger
|
||||
|
||||
func L() ILogger {
|
||||
if l == nil {
|
||||
InitializeLogger()
|
||||
}
|
||||
return l
|
||||
}
|
||||
|
||||
func InitializeLogger() {
|
||||
initializeLogger()
|
||||
}
|
||||
|
||||
func initializeLogger() {
|
||||
// TODO - support zap logger
|
||||
l = prettylogger.NewPrettyLogger()
|
||||
}
|
||||
31
cautils/logger/prettylogger/colors.go
Normal file
31
cautils/logger/prettylogger/colors.go
Normal file
@@ -0,0 +1,31 @@
|
||||
package prettylogger
|
||||
|
||||
import (
|
||||
"io"
|
||||
|
||||
"github.com/armosec/kubescape/cautils/logger/helpers"
|
||||
"github.com/fatih/color"
|
||||
)
|
||||
|
||||
var prefixError = color.New(color.Bold, color.FgHiRed).FprintfFunc()
|
||||
var prefixWarning = color.New(color.Bold, color.FgHiYellow).FprintfFunc()
|
||||
var prefixInfo = color.New(color.Bold, color.FgCyan).FprintfFunc()
|
||||
var prefixSuccess = color.New(color.Bold, color.FgHiGreen).FprintfFunc()
|
||||
var prefixDebug = color.New(color.Bold, color.FgWhite).FprintfFunc()
|
||||
var message = color.New().FprintfFunc()
|
||||
|
||||
func prefix(l helpers.Level) func(w io.Writer, format string, a ...interface{}) {
|
||||
switch l {
|
||||
case helpers.DebugLevel:
|
||||
return prefixDebug
|
||||
case helpers.InfoLevel:
|
||||
return prefixInfo
|
||||
case helpers.SuccessLevel:
|
||||
return prefixSuccess
|
||||
case helpers.WarningLevel:
|
||||
return prefixWarning
|
||||
case helpers.ErrorLevel, helpers.FatalLevel:
|
||||
return prefixError
|
||||
}
|
||||
return message
|
||||
}
|
||||
75
cautils/logger/prettylogger/logger.go
Normal file
75
cautils/logger/prettylogger/logger.go
Normal file
@@ -0,0 +1,75 @@
|
||||
package prettylogger
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"sync"
|
||||
|
||||
"github.com/armosec/kubescape/cautils/logger/helpers"
|
||||
)
|
||||
|
||||
type PrettyLogger struct {
|
||||
writer *os.File
|
||||
level helpers.Level
|
||||
mutex sync.Mutex
|
||||
}
|
||||
|
||||
func NewPrettyLogger() *PrettyLogger {
|
||||
return &PrettyLogger{
|
||||
writer: os.Stderr, // default to stderr
|
||||
level: helpers.InfoLevel,
|
||||
mutex: sync.Mutex{},
|
||||
}
|
||||
}
|
||||
|
||||
func (pl *PrettyLogger) GetLevel() string { return pl.level.String() }
|
||||
func (pl *PrettyLogger) SetWriter(w *os.File) { pl.writer = w }
|
||||
func (pl *PrettyLogger) GetWriter() *os.File { return pl.writer }
|
||||
|
||||
func (pl *PrettyLogger) SetLevel(level string) error {
|
||||
pl.level = helpers.ToLevel(level)
|
||||
if pl.level == helpers.UnknownLevel {
|
||||
return fmt.Errorf("level '%s' unknown", level)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func (pl *PrettyLogger) Fatal(msg string, details ...helpers.IDetails) {
|
||||
pl.print(helpers.FatalLevel, msg, details...)
|
||||
os.Exit(1)
|
||||
}
|
||||
func (pl *PrettyLogger) Error(msg string, details ...helpers.IDetails) {
|
||||
pl.print(helpers.ErrorLevel, msg, details...)
|
||||
}
|
||||
func (pl *PrettyLogger) Warning(msg string, details ...helpers.IDetails) {
|
||||
pl.print(helpers.WarningLevel, msg, details...)
|
||||
}
|
||||
func (pl *PrettyLogger) Info(msg string, details ...helpers.IDetails) {
|
||||
pl.print(helpers.InfoLevel, msg, details...)
|
||||
}
|
||||
func (pl *PrettyLogger) Debug(msg string, details ...helpers.IDetails) {
|
||||
pl.print(helpers.DebugLevel, msg, details...)
|
||||
}
|
||||
func (pl *PrettyLogger) Success(msg string, details ...helpers.IDetails) {
|
||||
pl.print(helpers.SuccessLevel, msg, details...)
|
||||
}
|
||||
|
||||
func (pl *PrettyLogger) print(level helpers.Level, msg string, details ...helpers.IDetails) {
|
||||
if !level.Skip(pl.level) {
|
||||
pl.mutex.Lock()
|
||||
prefix(level)(pl.writer, "[%s] ", level.String())
|
||||
message(pl.writer, fmt.Sprintf("%s. %s\n", msg, detailsToString(details)))
|
||||
pl.mutex.Unlock()
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func detailsToString(details []helpers.IDetails) string {
|
||||
s := ""
|
||||
for i := range details {
|
||||
s += fmt.Sprintf("%s: %s", details[i].Key(), details[i].Value())
|
||||
if i < len(details)-1 {
|
||||
s += ";"
|
||||
}
|
||||
}
|
||||
return s
|
||||
}
|
||||
58
cautils/logger/zaplogger/logger.go
Normal file
58
cautils/logger/zaplogger/logger.go
Normal file
@@ -0,0 +1,58 @@
|
||||
package zaplogger
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
"github.com/armosec/kubescape/cautils/logger/helpers"
|
||||
"go.uber.org/zap"
|
||||
"go.uber.org/zap/zapcore"
|
||||
)
|
||||
|
||||
type ZapLogger struct {
|
||||
zapL *zap.Logger
|
||||
}
|
||||
|
||||
func NewZapLogger() *ZapLogger {
|
||||
return &ZapLogger{
|
||||
zapL: zap.L(),
|
||||
}
|
||||
}
|
||||
|
||||
func (zl *ZapLogger) GetLevel() string { return "" }
|
||||
func (zl *ZapLogger) SetWriter(w *os.File) {}
|
||||
func GetWriter() *os.File { return nil }
|
||||
|
||||
func (zl *ZapLogger) SetLevel(level string) error {
|
||||
return nil
|
||||
}
|
||||
func (zl *ZapLogger) Fatal(msg string, details ...helpers.IDetails) {
|
||||
zl.zapL.Fatal(msg, detailsToZapFields(details)...)
|
||||
}
|
||||
|
||||
func (zl *ZapLogger) Error(msg string, details ...helpers.IDetails) {
|
||||
zl.zapL.Error(msg, detailsToZapFields(details)...)
|
||||
}
|
||||
|
||||
func (zl *ZapLogger) Warning(msg string, details ...helpers.IDetails) {
|
||||
zl.zapL.Warn(msg, detailsToZapFields(details)...)
|
||||
}
|
||||
|
||||
func (zl *ZapLogger) Success(msg string, details ...helpers.IDetails) {
|
||||
zl.zapL.Info(msg, detailsToZapFields(details)...)
|
||||
}
|
||||
|
||||
func (zl *ZapLogger) Info(msg string, details ...helpers.IDetails) {
|
||||
zl.zapL.Info(msg, detailsToZapFields(details)...)
|
||||
}
|
||||
|
||||
func (zl *ZapLogger) Debug(msg string, details ...helpers.IDetails) {
|
||||
zl.zapL.Debug(msg, detailsToZapFields(details)...)
|
||||
}
|
||||
|
||||
func detailsToZapFields(details []helpers.IDetails) []zapcore.Field {
|
||||
zapFields := []zapcore.Field{}
|
||||
for i := range details {
|
||||
zapFields = append(zapFields, zap.Any(details[i].Key(), details[i].Value()))
|
||||
}
|
||||
return zapFields
|
||||
}
|
||||
@@ -42,20 +42,38 @@ func (rbacObjects *RBACObjects) ListAllResources() (map[string]workloadinterface
|
||||
|
||||
func (rbacObjects *RBACObjects) rbacObjectsToResources(resources *rbacutils.RbacObjects) (map[string]workloadinterface.IMetadata, error) {
|
||||
allresources := map[string]workloadinterface.IMetadata{}
|
||||
|
||||
/*
|
||||
************************************************************************************************************************
|
||||
This code is adding a non valid ID ->
|
||||
(github.com/armosec/rbac-utils v0.0.11): "//SA2WLIDmap/SA2WLIDmap"
|
||||
(github.com/armosec/rbac-utils v0.0.12): "armo.rbac.com/v0beta1//SAID2WLIDmap/SAID2WLIDmap"
|
||||
|
||||
Should be investigated
|
||||
************************************************************************************************************************
|
||||
*/
|
||||
|
||||
// wrap rbac aggregated objects in IMetadata and add to allresources
|
||||
// TODO - DEPRECATE SA2WLIDmap
|
||||
SA2WLIDmapIMeta, err := rbacutils.SA2WLIDmapIMetadataWrapper(resources.SA2WLIDmap)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
allresources[SA2WLIDmapIMeta.GetID()] = SA2WLIDmapIMeta
|
||||
|
||||
SAID2WLIDmapIMeta, err := rbacutils.SAID2WLIDmapIMetadataWrapper(resources.SAID2WLIDmap)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
allresources[SAID2WLIDmapIMeta.GetID()] = SAID2WLIDmapIMeta
|
||||
|
||||
// convert rbac k8s resources to IMetadata and add to allresources
|
||||
for _, cr := range resources.ClusterRoles.Items {
|
||||
crmap, err := convertToMap(cr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
crmap["apiVersion"] = "rbac.authorization.k8s.io/v1"
|
||||
crmap["apiVersion"] = "rbac.authorization.k8s.io/v1" // TODO - is the the correct apiVersion?
|
||||
crIMeta := workloadinterface.NewWorkloadObj(crmap)
|
||||
crIMeta.SetKind("ClusterRole")
|
||||
allresources[crIMeta.GetID()] = crIMeta
|
||||
@@ -65,7 +83,7 @@ func (rbacObjects *RBACObjects) rbacObjectsToResources(resources *rbacutils.Rbac
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
crmap["apiVersion"] = "rbac.authorization.k8s.io/v1"
|
||||
crmap["apiVersion"] = "rbac.authorization.k8s.io/v1" // TODO - is the the correct apiVersion?
|
||||
crIMeta := workloadinterface.NewWorkloadObj(crmap)
|
||||
crIMeta.SetKind("Role")
|
||||
allresources[crIMeta.GetID()] = crIMeta
|
||||
@@ -75,7 +93,7 @@ func (rbacObjects *RBACObjects) rbacObjectsToResources(resources *rbacutils.Rbac
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
crmap["apiVersion"] = "rbac.authorization.k8s.io/v1"
|
||||
crmap["apiVersion"] = "rbac.authorization.k8s.io/v1" // TODO - is the the correct apiVersion?
|
||||
crIMeta := workloadinterface.NewWorkloadObj(crmap)
|
||||
crIMeta.SetKind("ClusterRoleBinding")
|
||||
allresources[crIMeta.GetID()] = crIMeta
|
||||
@@ -85,7 +103,7 @@ func (rbacObjects *RBACObjects) rbacObjectsToResources(resources *rbacutils.Rbac
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
crmap["apiVersion"] = "rbac.authorization.k8s.io/v1"
|
||||
crmap["apiVersion"] = "rbac.authorization.k8s.io/v1" // TODO - is the the correct apiVersion?
|
||||
crIMeta := workloadinterface.NewWorkloadObj(crmap)
|
||||
crIMeta.SetKind("RoleBinding")
|
||||
allresources[crIMeta.GetID()] = crIMeta
|
||||
|
||||
159
cautils/reportv2tov1.go
Normal file
159
cautils/reportv2tov1.go
Normal file
@@ -0,0 +1,159 @@
|
||||
package cautils
|
||||
|
||||
import (
|
||||
"github.com/armosec/k8s-interface/workloadinterface"
|
||||
"github.com/armosec/opa-utils/reporthandling"
|
||||
helpersv1 "github.com/armosec/opa-utils/reporthandling/helpers/v1"
|
||||
"github.com/armosec/opa-utils/reporthandling/results/v1/reportsummary"
|
||||
"github.com/armosec/opa-utils/score"
|
||||
)
|
||||
|
||||
func ReportV2ToV1(opaSessionObj *OPASessionObj) {
|
||||
if len(opaSessionObj.PostureReport.FrameworkReports) > 0 {
|
||||
return // report already converted
|
||||
}
|
||||
|
||||
opaSessionObj.PostureReport.ClusterCloudProvider = opaSessionObj.Report.ClusterCloudProvider
|
||||
|
||||
frameworks := []reporthandling.FrameworkReport{}
|
||||
|
||||
if len(opaSessionObj.Report.SummaryDetails.Frameworks) > 0 {
|
||||
for _, fwv2 := range opaSessionObj.Report.SummaryDetails.Frameworks {
|
||||
fwv1 := reporthandling.FrameworkReport{}
|
||||
fwv1.Name = fwv2.GetName()
|
||||
fwv1.Score = fwv2.GetScore()
|
||||
|
||||
fwv1.ControlReports = append(fwv1.ControlReports, controlReportV2ToV1(opaSessionObj, fwv2.GetName(), fwv2.Controls)...)
|
||||
frameworks = append(frameworks, fwv1)
|
||||
|
||||
}
|
||||
} else {
|
||||
fwv1 := reporthandling.FrameworkReport{}
|
||||
fwv1.Name = ""
|
||||
fwv1.Score = 0
|
||||
|
||||
fwv1.ControlReports = append(fwv1.ControlReports, controlReportV2ToV1(opaSessionObj, "", opaSessionObj.Report.SummaryDetails.Controls)...)
|
||||
frameworks = append(frameworks, fwv1)
|
||||
}
|
||||
|
||||
// // remove unused data
|
||||
// opaSessionObj.Report = nil
|
||||
// opaSessionObj.ResourcesResult = nil
|
||||
|
||||
// setup counters and score
|
||||
for f := range frameworks {
|
||||
// // set exceptions
|
||||
// exceptions.SetFrameworkExceptions(frameworks, opap.Exceptions, cautils.ClusterName)
|
||||
|
||||
// set counters
|
||||
reporthandling.SetUniqueResourcesCounter(&frameworks[f])
|
||||
|
||||
// set default score
|
||||
reporthandling.SetDefaultScore(&frameworks[f])
|
||||
}
|
||||
|
||||
// update score
|
||||
scoreutil := score.NewScore(opaSessionObj.AllResources)
|
||||
scoreutil.Calculate(frameworks)
|
||||
|
||||
opaSessionObj.PostureReport.FrameworkReports = frameworks
|
||||
|
||||
// opaSessionObj.Report.SummaryDetails.Score = 0
|
||||
// for i := range frameworks {
|
||||
// for j := range frameworks[i].ControlReports {
|
||||
// // frameworks[i].ControlReports[j].Score
|
||||
// for w := range opaSessionObj.Report.SummaryDetails.Frameworks {
|
||||
// if opaSessionObj.Report.SummaryDetails.Frameworks[w].Name == frameworks[i].Name {
|
||||
// opaSessionObj.Report.SummaryDetails.Frameworks[w].Score = frameworks[i].Score
|
||||
// }
|
||||
// if c, ok := opaSessionObj.Report.SummaryDetails.Frameworks[w].Controls[frameworks[i].ControlReports[j].ControlID]; ok {
|
||||
// c.Score = frameworks[i].ControlReports[j].Score
|
||||
// opaSessionObj.Report.SummaryDetails.Frameworks[w].Controls[frameworks[i].ControlReports[j].ControlID] = c
|
||||
// }
|
||||
// }
|
||||
// if c, ok := opaSessionObj.Report.SummaryDetails.Controls[frameworks[i].ControlReports[j].ControlID]; ok {
|
||||
// c.Score = frameworks[i].ControlReports[j].Score
|
||||
// opaSessionObj.Report.SummaryDetails.Controls[frameworks[i].ControlReports[j].ControlID] = c
|
||||
// }
|
||||
// }
|
||||
// opaSessionObj.Report.SummaryDetails.Score += opaSessionObj.PostureReport.FrameworkReports[i].Score
|
||||
// }
|
||||
// opaSessionObj.Report.SummaryDetails.Score /= float32(len(opaSessionObj.Report.SummaryDetails.Frameworks))
|
||||
}
|
||||
|
||||
func controlReportV2ToV1(opaSessionObj *OPASessionObj, frameworkName string, controls map[string]reportsummary.ControlSummary) []reporthandling.ControlReport {
|
||||
controlRepors := []reporthandling.ControlReport{}
|
||||
for controlID, crv2 := range controls {
|
||||
crv1 := reporthandling.ControlReport{}
|
||||
crv1.ControlID = controlID
|
||||
crv1.BaseScore = crv2.ScoreFactor
|
||||
crv1.Name = crv2.GetName()
|
||||
crv1.Control_ID = controlID
|
||||
// crv1.Attributes = crv2.
|
||||
crv1.Score = crv2.GetScore()
|
||||
|
||||
// TODO - add fields
|
||||
crv1.Description = crv2.Description
|
||||
crv1.Remediation = crv2.Remediation
|
||||
|
||||
rulesv1 := map[string]reporthandling.RuleReport{}
|
||||
|
||||
for _, resourceID := range crv2.ListResourcesIDs().All() {
|
||||
if result, ok := opaSessionObj.ResourcesResult[resourceID]; ok {
|
||||
for _, rulev2 := range result.ListRulesOfControl(crv2.GetID(), "") {
|
||||
|
||||
if _, ok := rulesv1[rulev2.GetName()]; !ok {
|
||||
rulesv1[rulev2.GetName()] = reporthandling.RuleReport{
|
||||
Name: rulev2.GetName(),
|
||||
RuleStatus: reporthandling.RuleStatus{
|
||||
Status: "success",
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
rulev1 := rulesv1[rulev2.GetName()]
|
||||
status := rulev2.GetStatus(&helpersv1.Filters{FrameworkNames: []string{frameworkName}})
|
||||
|
||||
if status.IsFailed() || status.IsExcluded() {
|
||||
|
||||
// rule response
|
||||
ruleResponse := reporthandling.RuleResponse{}
|
||||
ruleResponse.Rulename = rulev2.GetName()
|
||||
for i := range rulev2.Paths {
|
||||
if rulev2.Paths[i].FailedPath != "" {
|
||||
ruleResponse.FailedPaths = append(ruleResponse.FailedPaths, rulev2.Paths[i].FailedPath)
|
||||
}
|
||||
if rulev2.Paths[i].FixPath.Path != "" {
|
||||
ruleResponse.FixPaths = append(ruleResponse.FixPaths, rulev2.Paths[i].FixPath)
|
||||
}
|
||||
}
|
||||
ruleResponse.RuleStatus = string(status.Status())
|
||||
if len(rulev2.Exception) > 0 {
|
||||
ruleResponse.Exception = &rulev2.Exception[0]
|
||||
}
|
||||
|
||||
if fullRessource, ok := opaSessionObj.AllResources[resourceID]; ok {
|
||||
tmp := fullRessource.GetObject()
|
||||
workloadinterface.RemoveFromMap(tmp, "spec")
|
||||
ruleResponse.AlertObject.K8SApiObjects = append(ruleResponse.AlertObject.K8SApiObjects, tmp)
|
||||
}
|
||||
rulev1.RuleResponses = append(rulev1.RuleResponses, ruleResponse)
|
||||
}
|
||||
|
||||
rulev1.ListInputKinds = append(rulev1.ListInputKinds, resourceID)
|
||||
rulesv1[rulev2.GetName()] = rulev1
|
||||
}
|
||||
}
|
||||
}
|
||||
if len(rulesv1) > 0 {
|
||||
for i := range rulesv1 {
|
||||
crv1.RuleReports = append(crv1.RuleReports, rulesv1[i])
|
||||
}
|
||||
}
|
||||
if len(crv1.RuleReports) == 0 {
|
||||
crv1.RuleReports = []reporthandling.RuleReport{}
|
||||
}
|
||||
controlRepors = append(controlRepors, crv1)
|
||||
}
|
||||
return controlRepors
|
||||
}
|
||||
@@ -1,16 +1,23 @@
|
||||
package cautils
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/armosec/kubescape/cautils/getter"
|
||||
"github.com/armosec/opa-utils/reporthandling"
|
||||
)
|
||||
|
||||
const (
|
||||
ScanCluster string = "cluster"
|
||||
ScanLocalFiles string = "yaml"
|
||||
ScanCluster string = "cluster"
|
||||
ScanLocalFiles string = "yaml"
|
||||
localControlInputsFilename string = "controls-inputs.json"
|
||||
localExceptionsFilename string = "exceptions.json"
|
||||
)
|
||||
|
||||
type BoolPtrFlag struct {
|
||||
@@ -52,6 +59,7 @@ type ScanInfo struct {
|
||||
ControlsInputs string // Load file with inputs for controls
|
||||
UseFrom []string // Load framework from local file (instead of download). Use when running offline
|
||||
UseDefault bool // Load framework from cached file (instead of download). Use when running offline
|
||||
UseArtifactsFrom string // Load artifacts from local path. Use when running offline
|
||||
VerboseMode bool // Display all of the input resources and not only failed resources
|
||||
Format string // Format results (table, json, junit ...)
|
||||
Output string // Store results in an output file, Output file name
|
||||
@@ -64,6 +72,8 @@ type ScanInfo struct {
|
||||
HostSensor BoolPtrFlag // Deploy ARMO K8s host sensor to collect data from certain controls
|
||||
Local bool // Do not submit results
|
||||
Account string // account ID
|
||||
Logger string // logger level
|
||||
KubeContext string // context name
|
||||
FrameworkScan bool // false if scanning control
|
||||
ScanAll bool // true if scan all frameworks
|
||||
}
|
||||
@@ -76,9 +86,40 @@ type Getters struct {
|
||||
|
||||
func (scanInfo *ScanInfo) Init() {
|
||||
scanInfo.setUseFrom()
|
||||
scanInfo.setUseExceptions()
|
||||
scanInfo.setOutputFile()
|
||||
scanInfo.setUseArtifactsFrom()
|
||||
}
|
||||
|
||||
func (scanInfo *ScanInfo) setUseArtifactsFrom() {
|
||||
if scanInfo.UseArtifactsFrom == "" {
|
||||
return
|
||||
}
|
||||
// UseArtifactsFrom must be a path without a filename
|
||||
dir, file := filepath.Split(scanInfo.UseArtifactsFrom)
|
||||
if dir == "" {
|
||||
scanInfo.UseArtifactsFrom = file
|
||||
} else if strings.Contains(file, ".json") {
|
||||
scanInfo.UseArtifactsFrom = dir
|
||||
}
|
||||
// set frameworks files
|
||||
files, err := ioutil.ReadDir(scanInfo.UseArtifactsFrom)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
framework := &reporthandling.Framework{}
|
||||
for _, f := range files {
|
||||
filePath := filepath.Join(scanInfo.UseArtifactsFrom, f.Name())
|
||||
file, err := os.ReadFile(filePath)
|
||||
if err == nil {
|
||||
if err := json.Unmarshal(file, framework); err == nil {
|
||||
scanInfo.UseFrom = append(scanInfo.UseFrom, filepath.Join(scanInfo.UseArtifactsFrom, f.Name()))
|
||||
}
|
||||
}
|
||||
}
|
||||
// set config-inputs file
|
||||
scanInfo.ControlsInputs = filepath.Join(scanInfo.UseArtifactsFrom, localControlInputsFilename)
|
||||
// set exceptions
|
||||
scanInfo.UseExceptions = filepath.Join(scanInfo.UseArtifactsFrom, localExceptionsFilename)
|
||||
}
|
||||
|
||||
func (scanInfo *ScanInfo) setUseExceptions() {
|
||||
|
||||
@@ -7,6 +7,8 @@ import (
|
||||
"os"
|
||||
|
||||
"github.com/armosec/kubescape/cautils/getter"
|
||||
"github.com/armosec/kubescape/cautils/logger"
|
||||
"github.com/armosec/kubescape/cautils/logger/helpers"
|
||||
pkgutils "github.com/armosec/utils-go/utils"
|
||||
)
|
||||
|
||||
@@ -22,7 +24,7 @@ type IVersionCheckHandler interface {
|
||||
|
||||
func NewIVersionCheckHandler() IVersionCheckHandler {
|
||||
if BuildNumber == "" {
|
||||
WarningDisplay(os.Stderr, "Warning: unknown build number, this might affect your scan results. Please make sure you are updated to latest version.\n")
|
||||
logger.L().Warning("unknown build number, this might affect your scan results. Please make sure you are updated to latest version")
|
||||
}
|
||||
if v, ok := os.LookupEnv(SKIP_VERSION_CHECK); ok && pkgutils.StringToBool(v) {
|
||||
return NewVersionCheckHandlerMock()
|
||||
@@ -78,14 +80,14 @@ func NewVersionCheckRequest(buildNumber, frameworkName, frameworkVersion, scanni
|
||||
}
|
||||
|
||||
func (v *VersionCheckHandlerMock) CheckLatestVersion(versionData *VersionCheckRequest) error {
|
||||
fmt.Println("Skipping version check")
|
||||
logger.L().Info("Skipping version check")
|
||||
return nil
|
||||
}
|
||||
|
||||
func (v *VersionCheckHandler) CheckLatestVersion(versionData *VersionCheckRequest) error {
|
||||
defer func() {
|
||||
if err := recover(); err != nil {
|
||||
WarningDisplay(os.Stderr, "failed to get latest version\n")
|
||||
logger.L().Warning("failed to get latest version", helpers.Interface("error", err))
|
||||
}
|
||||
}()
|
||||
|
||||
@@ -96,7 +98,7 @@ func (v *VersionCheckHandler) CheckLatestVersion(versionData *VersionCheckReques
|
||||
|
||||
if latestVersion.ClientUpdate != "" {
|
||||
if BuildNumber != "" && BuildNumber < latestVersion.ClientUpdate {
|
||||
WarningDisplay(os.Stderr, warningMessage(latestVersion.Client, latestVersion.ClientUpdate), "\n")
|
||||
logger.L().Warning(warningMessage(latestVersion.Client, latestVersion.ClientUpdate))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -106,7 +108,7 @@ func (v *VersionCheckHandler) CheckLatestVersion(versionData *VersionCheckReques
|
||||
// }
|
||||
|
||||
if latestVersion.Message != "" {
|
||||
InfoDisplay(os.Stderr, latestVersion.Message, "\n")
|
||||
logger.L().Info(latestVersion.Message)
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -132,5 +134,5 @@ func (v *VersionCheckHandler) getLatestVersion(versionData *VersionCheckRequest)
|
||||
}
|
||||
|
||||
func warningMessage(kind, release string) string {
|
||||
return fmt.Sprintf("Warning: '%s' is not updated to the latest release: '%s'", kind, release)
|
||||
return fmt.Sprintf("'%s' is not updated to the latest release: '%s'", kind, release)
|
||||
}
|
||||
|
||||
7
clihandler/clidelete.go
Normal file
7
clihandler/clidelete.go
Normal file
@@ -0,0 +1,7 @@
|
||||
package clihandler
|
||||
|
||||
func CliDelete() error {
|
||||
|
||||
tenant := getTenantConfig("", "", getKubernetesApi()) // change k8sinterface
|
||||
return tenant.DeleteCachedConfig()
|
||||
}
|
||||
176
clihandler/clidownload.go
Normal file
176
clihandler/clidownload.go
Normal file
@@ -0,0 +1,176 @@
|
||||
package clihandler
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/armosec/armoapi-go/armotypes"
|
||||
"github.com/armosec/kubescape/cautils"
|
||||
"github.com/armosec/kubescape/cautils/getter"
|
||||
)
|
||||
|
||||
var downloadFunc = map[string]func(*cautils.DownloadInfo) error{
|
||||
"controls-inputs": downloadConfigInputs,
|
||||
"exceptions": downloadExceptions,
|
||||
"control": downloadControl,
|
||||
"framework": downloadFramework,
|
||||
"artifacts": downloadArtifacts,
|
||||
}
|
||||
|
||||
func DownloadSupportCommands() []string {
|
||||
commands := []string{}
|
||||
for k := range downloadFunc {
|
||||
commands = append(commands, k)
|
||||
}
|
||||
return commands
|
||||
}
|
||||
|
||||
func CliDownload(downloadInfo *cautils.DownloadInfo) error {
|
||||
setPathandFilename(downloadInfo)
|
||||
if err := downloadArtifact(downloadInfo, downloadFunc); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func downloadArtifact(downloadInfo *cautils.DownloadInfo, downloadArtifactFunc map[string]func(*cautils.DownloadInfo) error) error {
|
||||
if f, ok := downloadArtifactFunc[downloadInfo.Target]; ok {
|
||||
if err := f(downloadInfo); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("unknown command to download")
|
||||
}
|
||||
|
||||
func setPathandFilename(downloadInfo *cautils.DownloadInfo) {
|
||||
if downloadInfo.Path == "" {
|
||||
downloadInfo.Path = getter.GetDefaultPath("")
|
||||
} else {
|
||||
dir, file := filepath.Split(downloadInfo.Path)
|
||||
if dir == "" {
|
||||
downloadInfo.Path = file
|
||||
} else if strings.Contains(file, ".json") {
|
||||
downloadInfo.Path = dir
|
||||
downloadInfo.FileName = file
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func downloadArtifacts(downloadInfo *cautils.DownloadInfo) error {
|
||||
downloadInfo.FileName = ""
|
||||
var artifacts = map[string]func(*cautils.DownloadInfo) error{
|
||||
"controls-inputs": downloadConfigInputs,
|
||||
"exceptions": downloadExceptions,
|
||||
"framework": downloadFramework,
|
||||
}
|
||||
for artifact := range artifacts {
|
||||
if err := downloadArtifact(&cautils.DownloadInfo{Target: artifact, Path: downloadInfo.Path, FileName: fmt.Sprintf("%s.json", artifact)}, artifacts); err != nil {
|
||||
fmt.Printf("error downloading %s, error: %s", artifact, err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func downloadConfigInputs(downloadInfo *cautils.DownloadInfo) error {
|
||||
tenant := getTenantConfig(downloadInfo.Account, "", getKubernetesApi())
|
||||
controlsInputsGetter := getConfigInputsGetter(downloadInfo.Name, tenant.GetAccountID(), nil)
|
||||
controlInputs, err := controlsInputsGetter.GetControlsInputs(tenant.GetClusterName())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if downloadInfo.FileName == "" {
|
||||
downloadInfo.FileName = fmt.Sprintf("%s.json", downloadInfo.Target)
|
||||
}
|
||||
// save in file
|
||||
err = getter.SaveInFile(controlInputs, filepath.Join(downloadInfo.Path, downloadInfo.FileName))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Printf("'%s' downloaded successfully and saved at: '%s'\n", downloadInfo.Target, filepath.Join(downloadInfo.Path, downloadInfo.FileName))
|
||||
return nil
|
||||
}
|
||||
|
||||
func downloadExceptions(downloadInfo *cautils.DownloadInfo) error {
|
||||
var err error
|
||||
tenant := getTenantConfig(downloadInfo.Account, "", getKubernetesApi())
|
||||
exceptionsGetter := getExceptionsGetter("")
|
||||
exceptions := []armotypes.PostureExceptionPolicy{}
|
||||
if tenant.GetAccountID() != "" {
|
||||
exceptions, err = exceptionsGetter.GetExceptions(tenant.GetClusterName())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if downloadInfo.FileName == "" {
|
||||
downloadInfo.FileName = fmt.Sprintf("%s.json", downloadInfo.Target)
|
||||
}
|
||||
// save in file
|
||||
err = getter.SaveInFile(exceptions, filepath.Join(downloadInfo.Path, downloadInfo.FileName))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Printf("'%s' downloaded successfully and saved at: '%s'\n", downloadInfo.Target, filepath.Join(downloadInfo.Path, downloadInfo.FileName))
|
||||
return nil
|
||||
}
|
||||
|
||||
func downloadFramework(downloadInfo *cautils.DownloadInfo) error {
|
||||
|
||||
tenant := getTenantConfig(downloadInfo.Account, "", getKubernetesApi())
|
||||
g := getPolicyGetter(nil, tenant.GetAccountID(), true, nil)
|
||||
|
||||
if downloadInfo.Name == "" {
|
||||
// if framework name not specified - download all frameworks
|
||||
frameworks, err := g.GetFrameworks()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, fw := range frameworks {
|
||||
err = getter.SaveInFile(fw, filepath.Join(downloadInfo.Path, (strings.ToLower(fw.Name)+".json")))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Printf("'%s': '%s' downloaded successfully and saved at: '%s'\n", downloadInfo.Target, fw.Name, filepath.Join(downloadInfo.Path, (strings.ToLower(fw.Name)+".json")))
|
||||
}
|
||||
// return fmt.Errorf("missing framework name")
|
||||
} else {
|
||||
if downloadInfo.FileName == "" {
|
||||
downloadInfo.FileName = fmt.Sprintf("%s.json", downloadInfo.Name)
|
||||
}
|
||||
framework, err := g.GetFramework(downloadInfo.Name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = getter.SaveInFile(framework, filepath.Join(downloadInfo.Path, downloadInfo.FileName))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Printf("'%s' downloaded successfully and saved at: '%s'\n", downloadInfo.Target, filepath.Join(downloadInfo.Path, downloadInfo.FileName))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func downloadControl(downloadInfo *cautils.DownloadInfo) error {
|
||||
|
||||
tenant := getTenantConfig(downloadInfo.Account, "", getKubernetesApi())
|
||||
g := getPolicyGetter(nil, tenant.GetAccountID(), false, nil)
|
||||
|
||||
if downloadInfo.Name == "" {
|
||||
// TODO - support
|
||||
return fmt.Errorf("missing control name")
|
||||
}
|
||||
if downloadInfo.FileName == "" {
|
||||
downloadInfo.FileName = fmt.Sprintf("%s.json", downloadInfo.Name)
|
||||
}
|
||||
controls, err := g.GetControl(downloadInfo.Name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = getter.SaveInFile(controls, filepath.Join(downloadInfo.Path, downloadInfo.FileName))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Printf("'%s' downloaded successfully and saved at: '%s'\n", downloadInfo.Target, filepath.Join(downloadInfo.Path, downloadInfo.FileName))
|
||||
return nil
|
||||
}
|
||||
58
clihandler/clilist.go
Normal file
58
clihandler/clilist.go
Normal file
@@ -0,0 +1,58 @@
|
||||
package clihandler
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/armosec/kubescape/cautils/getter"
|
||||
"github.com/armosec/kubescape/clihandler/cliobjects"
|
||||
)
|
||||
|
||||
var listFunc = map[string]func(*cliobjects.ListPolicies) ([]string, error){
|
||||
"controls": listControls,
|
||||
"frameworks": listFrameworks,
|
||||
}
|
||||
|
||||
func ListSupportCommands() []string {
|
||||
commands := []string{}
|
||||
for k := range listFunc {
|
||||
commands = append(commands, k)
|
||||
}
|
||||
return commands
|
||||
}
|
||||
func CliList(listPolicies *cliobjects.ListPolicies) error {
|
||||
if f, ok := listFunc[listPolicies.Target]; ok {
|
||||
policies, err := f(listPolicies)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
sort.Strings(policies)
|
||||
|
||||
sep := "\n * "
|
||||
usageCmd := strings.TrimSuffix(listPolicies.Target, "s")
|
||||
fmt.Printf("Supported %s:%s%s\n", listPolicies.Target, sep, strings.Join(policies, sep))
|
||||
fmt.Printf("\nUseage:\n")
|
||||
fmt.Printf("$ kubescape scan %s \"name\"\n", usageCmd)
|
||||
fmt.Printf("$ kubescape scan %s \"name-0\",\"name-1\"\n\n", usageCmd)
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("unknown command to download")
|
||||
}
|
||||
|
||||
func listFrameworks(listPolicies *cliobjects.ListPolicies) ([]string, error) {
|
||||
tenant := getTenantConfig(listPolicies.Account, "", getKubernetesApi()) // change k8sinterface
|
||||
g := getPolicyGetter(nil, tenant.GetAccountID(), true, nil)
|
||||
|
||||
return listFrameworksNames(g), nil
|
||||
}
|
||||
|
||||
func listControls(listPolicies *cliobjects.ListPolicies) ([]string, error) {
|
||||
tenant := getTenantConfig(listPolicies.Account, "", getKubernetesApi()) // change k8sinterface
|
||||
g := getPolicyGetter(nil, tenant.GetAccountID(), false, nil)
|
||||
l := getter.ListName
|
||||
if listPolicies.ListIDs {
|
||||
l = getter.ListID
|
||||
}
|
||||
return g.ListControls(l)
|
||||
}
|
||||
7
clihandler/cliobjects/listpolicies.go
Normal file
7
clihandler/cliobjects/listpolicies.go
Normal file
@@ -0,0 +1,7 @@
|
||||
package cliobjects
|
||||
|
||||
type ListPolicies struct {
|
||||
Target string
|
||||
ListIDs bool
|
||||
Account string
|
||||
}
|
||||
7
clihandler/cliobjects/set.go
Normal file
7
clihandler/cliobjects/set.go
Normal file
@@ -0,0 +1,7 @@
|
||||
package cliobjects
|
||||
|
||||
type SetConfig struct {
|
||||
Account string
|
||||
ClientID string
|
||||
SecretKey string
|
||||
}
|
||||
5
clihandler/cliobjects/submit.go
Normal file
5
clihandler/cliobjects/submit.go
Normal file
@@ -0,0 +1,5 @@
|
||||
package cliobjects
|
||||
|
||||
type Submit struct {
|
||||
Account string
|
||||
}
|
||||
22
clihandler/cliset.go
Normal file
22
clihandler/cliset.go
Normal file
@@ -0,0 +1,22 @@
|
||||
package clihandler
|
||||
|
||||
import (
|
||||
"github.com/armosec/kubescape/clihandler/cliobjects"
|
||||
)
|
||||
|
||||
func CliSetConfig(setConfig *cliobjects.SetConfig) error {
|
||||
|
||||
tenant := getTenantConfig("", "", getKubernetesApi())
|
||||
|
||||
if setConfig.Account != "" {
|
||||
tenant.GetConfigObj().AccountID = setConfig.Account
|
||||
}
|
||||
if setConfig.SecretKey != "" {
|
||||
tenant.GetConfigObj().SecretKey = setConfig.SecretKey
|
||||
}
|
||||
if setConfig.ClientID != "" {
|
||||
tenant.GetConfigObj().ClientID = setConfig.ClientID
|
||||
}
|
||||
|
||||
return tenant.UpdateCachedConfig()
|
||||
}
|
||||
57
clihandler/clisubmit.go
Normal file
57
clihandler/clisubmit.go
Normal file
@@ -0,0 +1,57 @@
|
||||
package clihandler
|
||||
|
||||
import (
|
||||
"github.com/armosec/kubescape/cautils"
|
||||
"github.com/armosec/kubescape/cautils/getter"
|
||||
"github.com/armosec/kubescape/cautils/logger"
|
||||
"github.com/armosec/kubescape/cautils/logger/helpers"
|
||||
"github.com/armosec/kubescape/clihandler/cliinterfaces"
|
||||
)
|
||||
|
||||
func Submit(submitInterfaces cliinterfaces.SubmitInterfaces) error {
|
||||
|
||||
// list resources
|
||||
postureReport, err := submitInterfaces.SubmitObjects.SetResourcesReport()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
allresources, err := submitInterfaces.SubmitObjects.ListAllResources()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// report
|
||||
if err := submitInterfaces.Reporter.ActionSendReport(&cautils.OPASessionObj{PostureReport: postureReport, AllResources: allresources}); err != nil {
|
||||
return err
|
||||
}
|
||||
logger.L().Success("Data has been submitted successfully")
|
||||
submitInterfaces.Reporter.DisplayReportURL()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func SubmitExceptions(accountID, excPath string) error {
|
||||
logger.L().Info("submitting exceptions", helpers.String("path", excPath))
|
||||
|
||||
// load cached config
|
||||
getTenantConfig(accountID, "", getKubernetesApi())
|
||||
|
||||
// load exceptions from file
|
||||
loader := getter.NewLoadPolicy([]string{excPath})
|
||||
exceptions, err := loader.GetExceptions("")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// login kubescape SaaS
|
||||
armoAPI := getter.GetArmoAPIConnector()
|
||||
if err := armoAPI.Login(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := armoAPI.PostExceptions(exceptions); err != nil {
|
||||
return err
|
||||
}
|
||||
logger.L().Success("Exceptions submitted successfully")
|
||||
|
||||
return nil
|
||||
}
|
||||
9
clihandler/cliview.go
Normal file
9
clihandler/cliview.go
Normal file
@@ -0,0 +1,9 @@
|
||||
package clihandler
|
||||
|
||||
import "fmt"
|
||||
|
||||
func CliView() error {
|
||||
tenant := getTenantConfig("", "", getKubernetesApi()) // change k8sinterface
|
||||
fmt.Printf("%s\n", tenant.GetConfigObj().Config())
|
||||
return nil
|
||||
}
|
||||
@@ -6,9 +6,10 @@ import (
|
||||
|
||||
// clusterCmd represents the cluster command
|
||||
var clusterCmd = &cobra.Command{
|
||||
Use: "cluster",
|
||||
Short: "Set configuration for cluster",
|
||||
Long: ``,
|
||||
Use: "cluster",
|
||||
Short: "Set configuration for cluster",
|
||||
Long: ``,
|
||||
Deprecated: "use the 'set' command instead",
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
},
|
||||
}
|
||||
|
||||
@@ -7,14 +7,15 @@ import (
|
||||
"github.com/armosec/k8s-interface/k8sinterface"
|
||||
"github.com/armosec/kubescape/cautils"
|
||||
"github.com/armosec/kubescape/cautils/getter"
|
||||
"github.com/armosec/kubescape/cautils/logger"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var getCmd = &cobra.Command{
|
||||
Use: "get <key>",
|
||||
Short: "Get configuration in cluster",
|
||||
Long: ``,
|
||||
ValidArgs: getter.NativeFrameworks,
|
||||
Use: "get <key>",
|
||||
Short: "Get configuration in cluster",
|
||||
Long: ``,
|
||||
Deprecated: "use the 'view' command instead",
|
||||
Args: func(cmd *cobra.Command, args []string) error {
|
||||
if len(args) < 1 || len(args) > 1 {
|
||||
return fmt.Errorf("requires one argument")
|
||||
@@ -31,7 +32,7 @@ var getCmd = &cobra.Command{
|
||||
key := keyValue[0]
|
||||
|
||||
k8s := k8sinterface.NewKubernetesApi()
|
||||
clusterConfig := cautils.NewClusterConfig(k8s, getter.GetArmoAPIConnector(), scanInfo.Account)
|
||||
clusterConfig := cautils.NewClusterConfig(k8s, getter.GetArmoAPIConnector(), scanInfo.Account, "")
|
||||
val, err := clusterConfig.GetValueByKeyFromConfigMap(key)
|
||||
if err != nil {
|
||||
if err.Error() == "value does not exist." {
|
||||
@@ -40,7 +41,7 @@ var getCmd = &cobra.Command{
|
||||
}
|
||||
return err
|
||||
}
|
||||
fmt.Println(key + "=" + val)
|
||||
logger.L().Info(key + "=" + val)
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
@@ -7,13 +7,15 @@ import (
|
||||
"github.com/armosec/k8s-interface/k8sinterface"
|
||||
"github.com/armosec/kubescape/cautils"
|
||||
"github.com/armosec/kubescape/cautils/getter"
|
||||
"github.com/armosec/kubescape/cautils/logger"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var setCmd = &cobra.Command{
|
||||
Use: "set <key>=<value>",
|
||||
Short: "Set configuration in cluster",
|
||||
Long: ``,
|
||||
var setClusterCmd = &cobra.Command{
|
||||
Use: "set <key>=<value>",
|
||||
Short: "Set configuration in cluster",
|
||||
Long: ``,
|
||||
Deprecated: "use the 'set' command instead",
|
||||
Args: func(cmd *cobra.Command, args []string) error {
|
||||
if len(args) < 1 || len(args) > 1 {
|
||||
return fmt.Errorf("requires one argument: <key>=<value>")
|
||||
@@ -30,15 +32,15 @@ var setCmd = &cobra.Command{
|
||||
data := keyValue[1]
|
||||
|
||||
k8s := k8sinterface.NewKubernetesApi()
|
||||
clusterConfig := cautils.NewClusterConfig(k8s, getter.GetArmoAPIConnector(), scanInfo.Account)
|
||||
clusterConfig := cautils.NewClusterConfig(k8s, getter.GetArmoAPIConnector(), scanInfo.Account, "")
|
||||
if err := clusterConfig.SetKeyValueInConfigmap(key, data); err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Println("Value added successfully.")
|
||||
logger.L().Info("value added successfully.")
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
clusterCmd.AddCommand(setCmd)
|
||||
clusterCmd.AddCommand(setClusterCmd)
|
||||
}
|
||||
|
||||
@@ -1,18 +1,127 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/armosec/kubescape/clihandler"
|
||||
"github.com/armosec/kubescape/clihandler/cliobjects"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var (
|
||||
configExample = `
|
||||
# View cached configurations
|
||||
kubescape config view
|
||||
|
||||
# Delete cached configurations
|
||||
kubescape config delete
|
||||
|
||||
# Set cached configurations
|
||||
kubescape config set --help
|
||||
`
|
||||
setConfigExample = `
|
||||
# Set account id
|
||||
kubescape config set accountID <account id>
|
||||
|
||||
# Set client id
|
||||
kubescape config set clientID <client id>
|
||||
|
||||
# Set access key
|
||||
kubescape config set secretKey <access key>
|
||||
`
|
||||
)
|
||||
|
||||
// configCmd represents the config command
|
||||
var configCmd = &cobra.Command{
|
||||
Use: "config",
|
||||
Short: "Set configuration",
|
||||
Use: "config",
|
||||
Short: "handle cached configurations",
|
||||
Example: configExample,
|
||||
}
|
||||
|
||||
var setConfig = cliobjects.SetConfig{}
|
||||
|
||||
// configCmd represents the config command
|
||||
var configSetCmd = &cobra.Command{
|
||||
Use: "set",
|
||||
Short: fmt.Sprintf("Set configurations, supported: %s", strings.Join(stringKeysToSlice(supportConfigSet), "/")),
|
||||
Example: setConfigExample,
|
||||
ValidArgs: stringKeysToSlice(supportConfigSet),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
if err := parseSetArgs(args); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := clihandler.CliSetConfig(&setConfig); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "error: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
var supportConfigSet = map[string]func(*cliobjects.SetConfig, string){
|
||||
"accountID": func(s *cliobjects.SetConfig, account string) { s.Account = account },
|
||||
"clientID": func(s *cliobjects.SetConfig, clientID string) { s.ClientID = clientID },
|
||||
"secretKey": func(s *cliobjects.SetConfig, secretKey string) { s.SecretKey = secretKey },
|
||||
}
|
||||
|
||||
func stringKeysToSlice(m map[string]func(*cliobjects.SetConfig, string)) []string {
|
||||
l := []string{}
|
||||
for i := range m {
|
||||
l = append(l, i)
|
||||
}
|
||||
return l
|
||||
}
|
||||
|
||||
func parseSetArgs(args []string) error {
|
||||
var key string
|
||||
var value string
|
||||
if len(args) == 1 {
|
||||
if keyValue := strings.Split(args[0], "="); len(keyValue) == 2 {
|
||||
key = keyValue[0]
|
||||
value = keyValue[1]
|
||||
}
|
||||
} else if len(args) == 2 {
|
||||
key = args[0]
|
||||
value = args[1]
|
||||
}
|
||||
if setConfigFunc, ok := supportConfigSet[key]; ok {
|
||||
setConfigFunc(&setConfig, value)
|
||||
} else {
|
||||
return fmt.Errorf("key '%s' unknown . supported: %s", key, strings.Join(stringKeysToSlice(supportConfigSet), "/"))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
var configDeleteCmd = &cobra.Command{
|
||||
Use: "delete",
|
||||
Short: "Delete cached configurations",
|
||||
Long: ``,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
if err := clihandler.CliDelete(); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "error: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
// configCmd represents the config command
|
||||
var configViewCmd = &cobra.Command{
|
||||
Use: "view",
|
||||
Short: "View cached configurations",
|
||||
Long: ``,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
if err := clihandler.CliView(); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "error: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
rootCmd.AddCommand(configCmd)
|
||||
configCmd.AddCommand(configSetCmd)
|
||||
configCmd.AddCommand(configDeleteCmd)
|
||||
configCmd.AddCommand(configViewCmd)
|
||||
}
|
||||
|
||||
@@ -7,16 +7,35 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/armosec/kubescape/cautils"
|
||||
"github.com/armosec/kubescape/cautils/getter"
|
||||
"github.com/armosec/kubescape/cautils/logger"
|
||||
"github.com/armosec/kubescape/clihandler"
|
||||
"github.com/armosec/opa-utils/reporthandling"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var (
|
||||
controlExample = `
|
||||
# Scan the 'privileged container' control
|
||||
kubescape scan control "privileged container"
|
||||
|
||||
# Scan list of controls separated with a comma
|
||||
kubescape scan control "privileged container","allowed hostpath"
|
||||
|
||||
# Scan list of controls using the control ID separated with a comma
|
||||
kubescape scan control C-0058,C-0057
|
||||
|
||||
Run 'kubescape list controls' for the list of supported controls
|
||||
|
||||
Control documentation:
|
||||
https://hub.armo.cloud/docs/controls
|
||||
`
|
||||
)
|
||||
|
||||
// controlCmd represents the control command
|
||||
var controlCmd = &cobra.Command{
|
||||
Use: "control <control names list>/<control ids list>.\nExamples:\n$ kubescape scan control C-0058,C-0057 [flags]\n$ kubescape scan contol C-0058 [flags]\n$ kubescape scan control 'privileged container,allowed hostpath' [flags]",
|
||||
Short: fmt.Sprintf("The control you wish to use for scan. It must be present in at least one of the following frameworks: %s", getter.NativeFrameworks),
|
||||
Use: "control <control names list>/<control ids list>",
|
||||
Short: "The controls you wish to use. Run 'kubescape list controls' for the list of supported controls",
|
||||
Example: controlExample,
|
||||
Args: func(cmd *cobra.Command, args []string) error {
|
||||
if len(args) > 0 {
|
||||
controls := strings.Split(args[0], ",")
|
||||
@@ -35,7 +54,6 @@ var controlCmd = &cobra.Command{
|
||||
scanInfo.PolicyIdentifier = []reporthandling.PolicyIdentifier{}
|
||||
|
||||
if len(args) == 0 {
|
||||
scanInfo.SetPolicyIdentifiers(getter.NativeFrameworks, reporthandling.KindFramework)
|
||||
scanInfo.ScanAll = true
|
||||
} else { // expected control or list of control sepparated by ","
|
||||
|
||||
@@ -65,8 +83,7 @@ var controlCmd = &cobra.Command{
|
||||
cautils.SetSilentMode(scanInfo.Silent)
|
||||
err := clihandler.ScanCliSetup(&scanInfo)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "error: %v\n", err)
|
||||
os.Exit(1)
|
||||
logger.L().Fatal(err.Error())
|
||||
}
|
||||
return nil
|
||||
},
|
||||
@@ -79,8 +96,7 @@ func init() {
|
||||
|
||||
func flagValidationControl() {
|
||||
if 100 < scanInfo.FailThreshold {
|
||||
fmt.Println("bad argument: out of range threshold")
|
||||
os.Exit(1)
|
||||
logger.L().Fatal("bad argument: out of range threshold")
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -2,94 +2,73 @@ package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/armosec/kubescape/cautils"
|
||||
"github.com/armosec/kubescape/cautils/getter"
|
||||
"github.com/armosec/kubescape/cautils/logger"
|
||||
"github.com/armosec/kubescape/clihandler"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var downloadInfo cautils.DownloadInfo
|
||||
var downloadInfo = cautils.DownloadInfo{}
|
||||
|
||||
var (
|
||||
downloadExample = `
|
||||
# Download all artifacts and save them in the default path (~/.kubescape)
|
||||
kubescape download artifacts
|
||||
|
||||
# Download all artifacts and save them in /tmp path
|
||||
kubescape download artifacts --output /tmp
|
||||
|
||||
# Download the NSA framework. Run 'kubescape list frameworks' for all frameworks names
|
||||
kubescape download framework nsa
|
||||
|
||||
# Download the "Allowed hostPath" control. Run 'kubescape list controls' for all controls names
|
||||
kubescape download control "Allowed hostPath"
|
||||
|
||||
# Download the "C-0001" control. Run 'kubescape list controls --id' for all controls ids
|
||||
kubescape download control C-0001
|
||||
|
||||
# Download the configured exceptions
|
||||
kubescape download exceptions
|
||||
|
||||
# Download the configured controls-inputs
|
||||
kubescape download controls-inputs
|
||||
|
||||
`
|
||||
)
|
||||
var downloadCmd = &cobra.Command{
|
||||
Use: fmt.Sprintf("download framework/control <framework-name>/<control-name> [flags]\nSupported frameworks: %s", getter.NativeFrameworks),
|
||||
Short: "Download framework/control",
|
||||
Long: ``,
|
||||
Use: "download <policy> <policy name>",
|
||||
Short: fmt.Sprintf("Download %s", strings.Join(clihandler.DownloadSupportCommands(), ",")),
|
||||
Long: ``,
|
||||
Example: downloadExample,
|
||||
Args: func(cmd *cobra.Command, args []string) error {
|
||||
if len(args) != 2 {
|
||||
return fmt.Errorf("requires two arguments : framework/control <framework-name>/<control-name>")
|
||||
supported := strings.Join(clihandler.DownloadSupportCommands(), ",")
|
||||
if len(args) < 1 {
|
||||
return fmt.Errorf("policy type required, supported: %v", supported)
|
||||
}
|
||||
if !strings.EqualFold(args[0], "framework") && !strings.EqualFold(args[0], "control") {
|
||||
return fmt.Errorf("invalid parameter '%s'. Supported parameters: framework, control", args[0])
|
||||
if cautils.StringInSlice(clihandler.DownloadSupportCommands(), args[0]) == cautils.ValueNotFound {
|
||||
return fmt.Errorf("invalid parameter '%s'. Supported parameters: %s", args[0], supported)
|
||||
}
|
||||
return nil
|
||||
},
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
if err := download(args); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "error: %v\n", err)
|
||||
os.Exit(1)
|
||||
downloadInfo.Target = args[0]
|
||||
if len(args) >= 2 {
|
||||
downloadInfo.Name = args[1]
|
||||
}
|
||||
if err := clihandler.CliDownload(&downloadInfo); err != nil {
|
||||
logger.L().Fatal(err.Error())
|
||||
}
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
// cobra.OnInitialize(initConfig)
|
||||
|
||||
rootCmd.AddCommand(downloadCmd)
|
||||
downloadInfo = cautils.DownloadInfo{}
|
||||
downloadCmd.Flags().StringVarP(&downloadInfo.Path, "output", "o", "", "Output file. If specified, will store save to `~/.kubescape/<framework name>.json`")
|
||||
}
|
||||
downloadCmd.PersistentFlags().StringVarP(&downloadInfo.Account, "account", "", "", "Armo portal account ID. Default will load account ID from configMap or config file")
|
||||
downloadCmd.Flags().StringVarP(&downloadInfo.Path, "output", "o", "", "Output file. If not specified, will save in `~/.kubescape/<policy name>.json`")
|
||||
|
||||
func download(args []string) error {
|
||||
switch strings.ToLower(args[0]) {
|
||||
case "framework":
|
||||
return downloadFramework(args[1])
|
||||
case "control":
|
||||
return downloadControl(args[1])
|
||||
// case "exceptions":
|
||||
// case "artifacts":
|
||||
default:
|
||||
return fmt.Errorf("unknown command to download")
|
||||
}
|
||||
}
|
||||
|
||||
func downloadFramework(frameworkName string) error {
|
||||
downloadInfo.FrameworkName = strings.ToLower(frameworkName)
|
||||
g := getter.NewDownloadReleasedPolicy()
|
||||
if err := g.SetRegoObjects(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if downloadInfo.Path == "" {
|
||||
downloadInfo.Path = getter.GetDefaultPath(downloadInfo.FrameworkName + ".json")
|
||||
}
|
||||
frameworks, err := g.GetFramework(downloadInfo.FrameworkName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = getter.SaveFrameworkInFile(frameworks, downloadInfo.Path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func downloadControl(controlName string) error {
|
||||
downloadInfo.ControlName = strings.ToLower(controlName)
|
||||
g := getter.NewDownloadReleasedPolicy()
|
||||
if err := g.SetRegoObjects(); err != nil {
|
||||
return err
|
||||
}
|
||||
if downloadInfo.Path == "" {
|
||||
downloadInfo.Path = getter.GetDefaultPath(downloadInfo.ControlName + ".json")
|
||||
}
|
||||
controls, err := g.GetControl(downloadInfo.ControlName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = getter.SaveControlInFile(controls, downloadInfo.Path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -7,7 +7,7 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/armosec/kubescape/cautils"
|
||||
"github.com/armosec/kubescape/cautils/getter"
|
||||
"github.com/armosec/kubescape/cautils/logger"
|
||||
"github.com/armosec/kubescape/clihandler"
|
||||
"github.com/armosec/opa-utils/reporthandling"
|
||||
"github.com/spf13/cobra"
|
||||
@@ -24,6 +24,9 @@ var (
|
||||
# Scan the NSA and MITRE framework
|
||||
kubescape scan framework nsa,mitre
|
||||
|
||||
# Scan all frameworks
|
||||
kubescape scan framework all
|
||||
|
||||
# Scan kubernetes YAML manifest files
|
||||
kubescape scan framework nsa *.yaml
|
||||
|
||||
@@ -35,14 +38,15 @@ var (
|
||||
|
||||
# Display all resources
|
||||
kubescape scan --verbose
|
||||
|
||||
Run 'kubescape list frameworks' for the list of supported frameworks
|
||||
`
|
||||
)
|
||||
var frameworkCmd = &cobra.Command{
|
||||
Use: "framework <framework names list> [`<glob pattern>`/`-`] [flags]",
|
||||
Short: fmt.Sprintf("The framework you wish to use. Supported frameworks: %s", strings.Join(getter.NativeFrameworks, ", ")),
|
||||
Example: frameworkExample,
|
||||
Long: "Execute a scan on a running Kubernetes cluster or `yaml`/`json` files (use glob) or `-` for stdin",
|
||||
ValidArgs: getter.NativeFrameworks,
|
||||
Use: "framework <framework names list> [`<glob pattern>`/`-`] [flags]",
|
||||
Short: "The framework you wish to use. Run 'kubescape list frameworks' for the list of supported frameworks",
|
||||
Example: frameworkExample,
|
||||
Long: "Execute a scan on a running Kubernetes cluster or `yaml`/`json` files (use glob) or `-` for stdin",
|
||||
Args: func(cmd *cobra.Command, args []string) error {
|
||||
if len(args) > 0 {
|
||||
frameworks := strings.Split(args[0], ",")
|
||||
@@ -61,12 +65,14 @@ var frameworkCmd = &cobra.Command{
|
||||
var frameworks []string
|
||||
|
||||
if len(args) == 0 { // scan all frameworks
|
||||
frameworks = getter.NativeFrameworks
|
||||
scanInfo.ScanAll = true
|
||||
} else {
|
||||
// Read frameworks from input args
|
||||
frameworks = strings.Split(args[0], ",")
|
||||
|
||||
if cautils.StringInSlice(frameworks, "all") != cautils.ValueNotFound {
|
||||
scanInfo.ScanAll = true
|
||||
frameworks = []string{}
|
||||
}
|
||||
if len(args) > 1 {
|
||||
if len(args[1:]) == 0 || args[1] != "-" {
|
||||
scanInfo.InputPatterns = args[1:]
|
||||
@@ -84,14 +90,15 @@ var frameworkCmd = &cobra.Command{
|
||||
}
|
||||
}
|
||||
}
|
||||
scanInfo.FrameworkScan = true
|
||||
|
||||
scanInfo.SetPolicyIdentifiers(frameworks, reporthandling.KindFramework)
|
||||
|
||||
scanInfo.Init()
|
||||
cautils.SetSilentMode(scanInfo.Silent)
|
||||
err := clihandler.ScanCliSetup(&scanInfo)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "error: %v\n\n", err)
|
||||
os.Exit(1)
|
||||
logger.L().Fatal(err.Error())
|
||||
}
|
||||
return nil
|
||||
},
|
||||
@@ -113,11 +120,9 @@ func init() {
|
||||
|
||||
func flagValidationFramework() {
|
||||
if scanInfo.Submit && scanInfo.Local {
|
||||
fmt.Println("You can use `keep-local` or `submit`, but not both")
|
||||
os.Exit(1)
|
||||
logger.L().Fatal("you can use `keep-local` or `submit`, but not both")
|
||||
}
|
||||
if 100 < scanInfo.FailThreshold {
|
||||
fmt.Println("bad argument: out of range threshold")
|
||||
os.Exit(1)
|
||||
logger.L().Fatal("bad argument: out of range threshold")
|
||||
}
|
||||
}
|
||||
|
||||
67
clihandler/cmd/list.go
Normal file
67
clihandler/cmd/list.go
Normal file
@@ -0,0 +1,67 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/armosec/kubescape/cautils"
|
||||
"github.com/armosec/kubescape/clihandler"
|
||||
"github.com/armosec/kubescape/clihandler/cliobjects"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var (
|
||||
listExample = `
|
||||
# List default supported frameworks names
|
||||
kubescape list frameworks
|
||||
|
||||
# List all supported frameworks names
|
||||
kubescape list frameworks --account <account id>
|
||||
|
||||
# List all supported controls names
|
||||
kubescape list controls
|
||||
|
||||
# List all supported controls ids
|
||||
kubescape list controls --id
|
||||
|
||||
Control documentation:
|
||||
https://hub.armo.cloud/docs/controls
|
||||
`
|
||||
)
|
||||
var listPolicies = cliobjects.ListPolicies{}
|
||||
|
||||
var listCmd = &cobra.Command{
|
||||
Use: "list <policy> [flags]",
|
||||
Short: "List frameworks/controls will list the supported frameworks and controls",
|
||||
Long: ``,
|
||||
Example: listExample,
|
||||
Args: func(cmd *cobra.Command, args []string) error {
|
||||
supported := strings.Join(clihandler.ListSupportCommands(), ",")
|
||||
|
||||
if len(args) < 1 {
|
||||
return fmt.Errorf("policy type requeued, supported: %s", supported)
|
||||
}
|
||||
if cautils.StringInSlice(clihandler.ListSupportCommands(), args[0]) == cautils.ValueNotFound {
|
||||
return fmt.Errorf("invalid parameter '%s'. Supported parameters: %s", args[0], supported)
|
||||
}
|
||||
return nil
|
||||
},
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
listPolicies.Target = args[0]
|
||||
|
||||
if err := clihandler.CliList(&listPolicies); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "error: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
// cobra.OnInitialize(initConfig)
|
||||
|
||||
rootCmd.AddCommand(listCmd)
|
||||
listCmd.PersistentFlags().StringVarP(&listPolicies.Account, "account", "", "", "Armo portal account ID. Default will load account ID from configMap or config file")
|
||||
listCmd.PersistentFlags().BoolVarP(&listPolicies.ListIDs, "id", "", false, "List control ID's instead of controls names")
|
||||
}
|
||||
@@ -5,9 +5,10 @@ import (
|
||||
)
|
||||
|
||||
var localCmd = &cobra.Command{
|
||||
Use: "local",
|
||||
Short: "Set configuration locally (for config.json)",
|
||||
Long: ``,
|
||||
Use: "local",
|
||||
Short: "Set configuration locally (for config.json)",
|
||||
Long: ``,
|
||||
Deprecated: "use the 'set' command instead",
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
},
|
||||
}
|
||||
|
||||
@@ -9,9 +9,10 @@ import (
|
||||
)
|
||||
|
||||
var localGetCmd = &cobra.Command{
|
||||
Use: "get <key>",
|
||||
Short: "Get configuration locally",
|
||||
Long: ``,
|
||||
Use: "get <key>",
|
||||
Short: "Get configuration locally",
|
||||
Long: ``,
|
||||
Deprecated: "use the 'view' command instead",
|
||||
Args: func(cmd *cobra.Command, args []string) error {
|
||||
if len(args) < 1 || len(args) > 1 {
|
||||
return fmt.Errorf("requires one argument")
|
||||
|
||||
@@ -9,9 +9,10 @@ import (
|
||||
)
|
||||
|
||||
var localSetCmd = &cobra.Command{
|
||||
Use: "set <key>=<value>",
|
||||
Short: "Set configuration locally",
|
||||
Long: ``,
|
||||
Use: "set <key>=<value>",
|
||||
Short: "Set configuration locally",
|
||||
Long: ``,
|
||||
Deprecated: "use the 'set' command instead",
|
||||
Args: func(cmd *cobra.Command, args []string) error {
|
||||
if len(args) < 1 || len(args) > 1 {
|
||||
return fmt.Errorf("requires one argument: <key>=<value>")
|
||||
|
||||
@@ -1,14 +1,13 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/armosec/k8s-interface/k8sinterface"
|
||||
"github.com/armosec/kubescape/cautils"
|
||||
"github.com/armosec/kubescape/cautils/getter"
|
||||
"github.com/armosec/kubescape/cautils/logger"
|
||||
"github.com/armosec/kubescape/clihandler"
|
||||
"github.com/armosec/kubescape/clihandler/cliinterfaces"
|
||||
"github.com/armosec/kubescape/resultshandling/reporter"
|
||||
reporterv1 "github.com/armosec/kubescape/resultshandling/reporter/v1"
|
||||
"github.com/armosec/rbac-utils/rbacscanner"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
@@ -23,16 +22,13 @@ var rabcCmd = &cobra.Command{
|
||||
k8s := k8sinterface.NewKubernetesApi()
|
||||
|
||||
// get config
|
||||
clusterConfig, err := getSubmittedClusterConfig(k8s)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
clusterConfig := getTenantConfig(submitInfo.Account, "", k8s)
|
||||
|
||||
// list RBAC
|
||||
rbacObjects := cautils.NewRBACObjects(rbacscanner.NewRbacScannerFromK8sAPI(k8s, clusterConfig.GetCustomerGUID(), clusterConfig.GetClusterName()))
|
||||
rbacObjects := cautils.NewRBACObjects(rbacscanner.NewRbacScannerFromK8sAPI(k8s, clusterConfig.GetAccountID(), clusterConfig.GetClusterName()))
|
||||
|
||||
// submit resources
|
||||
r := reporter.NewReportEventReceiver(clusterConfig.GetConfigObj())
|
||||
r := reporterv1.NewReportEventReceiver(clusterConfig.GetConfigObj())
|
||||
|
||||
submitInterfaces := cliinterfaces.SubmitInterfaces{
|
||||
ClusterConfig: clusterConfig,
|
||||
@@ -41,8 +37,7 @@ var rabcCmd = &cobra.Command{
|
||||
}
|
||||
|
||||
if err := clihandler.Submit(submitInterfaces); err != nil {
|
||||
fmt.Println(err)
|
||||
os.Exit(1)
|
||||
logger.L().Fatal(err.Error())
|
||||
}
|
||||
return nil
|
||||
},
|
||||
@@ -51,3 +46,17 @@ var rabcCmd = &cobra.Command{
|
||||
func init() {
|
||||
submitCmd.AddCommand(rabcCmd)
|
||||
}
|
||||
|
||||
// getKubernetesApi
|
||||
func getKubernetesApi() *k8sinterface.KubernetesApi {
|
||||
if !k8sinterface.IsConnectedToCluster() {
|
||||
return nil
|
||||
}
|
||||
return k8sinterface.NewKubernetesApi()
|
||||
}
|
||||
func getTenantConfig(Account, clusterName string, k8s *k8sinterface.KubernetesApi) cautils.ITenantConfig {
|
||||
if !k8sinterface.IsConnectedToCluster() || k8s == nil {
|
||||
return cautils.NewLocalConfig(getter.GetArmoAPIConnector(), Account, clusterName)
|
||||
}
|
||||
return cautils.NewClusterConfig(k8s, getter.GetArmoAPIConnector(), Account, clusterName)
|
||||
}
|
||||
|
||||
@@ -6,11 +6,11 @@ import (
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/armosec/k8s-interface/k8sinterface"
|
||||
"github.com/armosec/k8s-interface/workloadinterface"
|
||||
"github.com/armosec/kubescape/cautils/logger"
|
||||
"github.com/armosec/kubescape/clihandler"
|
||||
"github.com/armosec/kubescape/clihandler/cliinterfaces"
|
||||
"github.com/armosec/kubescape/resultshandling/reporter"
|
||||
reporterv1 "github.com/armosec/kubescape/resultshandling/reporter/v1"
|
||||
"github.com/armosec/opa-utils/reporthandling"
|
||||
uuid "github.com/satori/go.uuid"
|
||||
"github.com/spf13/cobra"
|
||||
@@ -59,18 +59,15 @@ var resultsCmd = &cobra.Command{
|
||||
return fmt.Errorf("missing results file")
|
||||
}
|
||||
|
||||
k8s := k8sinterface.NewKubernetesApi()
|
||||
k8s := getKubernetesApi()
|
||||
|
||||
// get config
|
||||
clusterConfig, err := getSubmittedClusterConfig(k8s)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
clusterConfig := getTenantConfig(submitInfo.Account, "", k8s)
|
||||
|
||||
resultsObjects := NewResultsObject(clusterConfig.GetCustomerGUID(), clusterConfig.GetClusterName(), args[0])
|
||||
resultsObjects := NewResultsObject(clusterConfig.GetAccountID(), clusterConfig.GetClusterName(), args[0])
|
||||
|
||||
// submit resources
|
||||
r := reporter.NewReportEventReceiver(clusterConfig.GetConfigObj())
|
||||
r := reporterv1.NewReportEventReceiver(clusterConfig.GetConfigObj())
|
||||
|
||||
submitInterfaces := cliinterfaces.SubmitInterfaces{
|
||||
ClusterConfig: clusterConfig,
|
||||
@@ -79,8 +76,7 @@ var resultsCmd = &cobra.Command{
|
||||
}
|
||||
|
||||
if err := clihandler.Submit(submitInterfaces); err != nil {
|
||||
fmt.Println(err)
|
||||
os.Exit(1)
|
||||
logger.L().Fatal(err.Error())
|
||||
}
|
||||
return nil
|
||||
},
|
||||
|
||||
@@ -2,52 +2,62 @@ package cmd
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"os"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/armosec/kubescape/cautils/getter"
|
||||
"github.com/golang/glog"
|
||||
"github.com/armosec/kubescape/cautils/logger"
|
||||
"github.com/armosec/kubescape/cautils/logger/helpers"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var cfgFile string
|
||||
var armoBEURLs = ""
|
||||
|
||||
const envFlagUsage = "Send report results to specific URL. Format:<ReportReceiver>,<Backend>,<Frontend>.\n\t\tExample:report.armo.cloud,api.armo.cloud,portal.armo.cloud"
|
||||
|
||||
var ksExamples = `
|
||||
# Scan command
|
||||
kubescape scan --submit
|
||||
|
||||
# List supported frameworks
|
||||
kubescape list frameworks
|
||||
|
||||
# Download artifacts (air-gapped environment support)
|
||||
kubescape download artifacts
|
||||
|
||||
# View cached configurations
|
||||
kubescape config view
|
||||
`
|
||||
|
||||
var rootCmd = &cobra.Command{
|
||||
Use: "kubescape",
|
||||
Short: "Kubescape is a tool for testing Kubernetes security posture",
|
||||
Long: `Kubescape is a tool for testing Kubernetes security posture based on NSA \ MITRE ATT&CK® specifications.`,
|
||||
PersistentPreRunE: func(cmd *cobra.Command, args []string) error {
|
||||
flag.Parse()
|
||||
InitArmoBEConnector()
|
||||
return nil
|
||||
},
|
||||
Use: "kubescape",
|
||||
Short: "Kubescape is a tool for testing Kubernetes security posture",
|
||||
Long: `Kubescape is a tool for testing Kubernetes security posture based on NSA \ MITRE ATT&CK® and other frameworks specifications`,
|
||||
Example: ksExamples,
|
||||
}
|
||||
|
||||
func Execute() {
|
||||
rootCmd.Execute()
|
||||
}
|
||||
|
||||
func init() {
|
||||
rootCmd.PersistentFlags().StringVarP(&scanInfo.Account, "account", "", "", "Armo portal account ID. Default will load account ID from configMap or config file")
|
||||
cobra.OnInitialize(initLogger, initEnvironment)
|
||||
|
||||
flag.CommandLine.StringVar(&armoBEURLs, "environment", "", envFlagUsage)
|
||||
rootCmd.PersistentFlags().StringVar(&armoBEURLs, "environment", "", envFlagUsage)
|
||||
rootCmd.PersistentFlags().MarkHidden("environment")
|
||||
cobra.OnInitialize(initConfig)
|
||||
|
||||
rootCmd.PersistentFlags().StringVar(&scanInfo.Logger, "logger", "info", fmt.Sprintf("Logger level. Supported: %s", strings.Join(helpers.SupportedLevels(), "/")))
|
||||
flag.Parse()
|
||||
}
|
||||
|
||||
// initConfig reads in config file and ENV variables if set.
|
||||
func initConfig() {
|
||||
func initLogger() {
|
||||
if err := logger.L().SetLevel(scanInfo.Logger); err != nil {
|
||||
logger.L().Fatal(fmt.Sprintf("supported levels: %s", strings.Join(helpers.SupportedLevels(), "/")), helpers.Error(err))
|
||||
}
|
||||
}
|
||||
|
||||
func InitArmoBEConnector() {
|
||||
func initEnvironment() {
|
||||
urlSlices := strings.Split(armoBEURLs, ",")
|
||||
if len(urlSlices) > 3 {
|
||||
glog.Errorf("Too many URLs")
|
||||
os.Exit(1)
|
||||
if len(urlSlices) != 1 && len(urlSlices) < 3 {
|
||||
logger.L().Fatal("expected at least 3 URLs (report, api, frontend, auth)")
|
||||
}
|
||||
switch len(urlSlices) {
|
||||
case 1:
|
||||
@@ -57,13 +67,18 @@ func InitArmoBEConnector() {
|
||||
case "":
|
||||
getter.SetARMOAPIConnector(getter.NewARMOAPIProd())
|
||||
default:
|
||||
glog.Errorf("--environment flag usage: %s", envFlagUsage)
|
||||
os.Exit(1)
|
||||
logger.L().Fatal("--environment flag usage: " + envFlagUsage)
|
||||
}
|
||||
case 2:
|
||||
glog.Errorf("--environment flag usage: %s", envFlagUsage)
|
||||
os.Exit(1)
|
||||
case 3:
|
||||
getter.SetARMOAPIConnector(getter.NewARMOAPICustomized(urlSlices[0], urlSlices[1], urlSlices[2]))
|
||||
logger.L().Fatal("--environment flag usage: " + envFlagUsage)
|
||||
case 3, 4:
|
||||
var armoAUTHURL string
|
||||
armoERURL := urlSlices[0] // mandatory
|
||||
armoBEURL := urlSlices[1] // mandatory
|
||||
armoFEURL := urlSlices[2] // mandatory
|
||||
if len(urlSlices) <= 4 {
|
||||
armoAUTHURL = urlSlices[3]
|
||||
}
|
||||
getter.SetARMOAPIConnector(getter.NewARMOAPICustomized(armoERURL, armoBEURL, armoFEURL, armoAUTHURL))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,11 +1,8 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/armosec/k8s-interface/k8sinterface"
|
||||
"github.com/armosec/kubescape/cautils"
|
||||
"github.com/armosec/kubescape/cautils/getter"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
@@ -13,31 +10,42 @@ var scanInfo cautils.ScanInfo
|
||||
|
||||
// scanCmd represents the scan command
|
||||
var scanCmd = &cobra.Command{
|
||||
Use: "scan <command>",
|
||||
Use: "scan [command]",
|
||||
Short: "Scan the current running cluster or yaml files",
|
||||
Long: `The action you want to perform`,
|
||||
Args: func(cmd *cobra.Command, args []string) error {
|
||||
if len(args) > 0 {
|
||||
if !strings.EqualFold(args[0], "framework") && !strings.EqualFold(args[0], "control") {
|
||||
return fmt.Errorf("invalid parameter '%s'. Supported parameters: framework, control", args[0])
|
||||
if args[0] != "framework" && args[0] != "control" {
|
||||
scanInfo.ScanAll = true
|
||||
return frameworkCmd.RunE(cmd, append([]string{"all"}, args...))
|
||||
}
|
||||
}
|
||||
return nil
|
||||
},
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
if len(args) == 0 {
|
||||
scanInfo.ScanAll = true
|
||||
frameworks := getter.NativeFrameworks
|
||||
frameworkArgs := []string{strings.Join(frameworks, ",")}
|
||||
frameworkCmd.RunE(cmd, frameworkArgs)
|
||||
return frameworkCmd.RunE(cmd, []string{"all"})
|
||||
}
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
func frameworkInitConfig() {
|
||||
k8sinterface.SetClusterContextName(scanInfo.KubeContext)
|
||||
}
|
||||
|
||||
func init() {
|
||||
|
||||
cobra.OnInitialize(frameworkInitConfig)
|
||||
|
||||
rootCmd.AddCommand(scanCmd)
|
||||
|
||||
scanCmd.PersistentFlags().StringVarP(&scanInfo.Account, "account", "", "", "Armo portal account ID. Default will load account ID from configMap or config file")
|
||||
scanCmd.PersistentFlags().StringVarP(&scanInfo.KubeContext, "kube-context", "", "", "Kube context. Default will use the current-context")
|
||||
scanCmd.PersistentFlags().StringVar(&scanInfo.ControlsInputs, "controls-config", "", "Path to an controls-config obj. If not set will download controls-config from ARMO management portal")
|
||||
scanCmd.PersistentFlags().StringVar(&scanInfo.UseExceptions, "exceptions", "", "Path to an exceptions obj. If not set will download exceptions from ARMO management portal")
|
||||
scanCmd.PersistentFlags().StringVar(&scanInfo.UseArtifactsFrom, "use-artifacts-from", "", "Load artifacts from local directory. If not used will download them")
|
||||
scanCmd.PersistentFlags().StringVarP(&scanInfo.ExcludedNamespaces, "exclude-namespaces", "e", "", "Namespaces to exclude from scanning. Recommended: kube-system,kube-public")
|
||||
scanCmd.PersistentFlags().Uint16VarP(&scanInfo.FailThreshold, "fail-threshold", "t", 100, "Failure threshold is the percent above which the command fails and returns exit code 1")
|
||||
scanCmd.PersistentFlags().StringVarP(&scanInfo.Format, "format", "f", "pretty-printer", `Output format. Supported formats: "pretty-printer"/"json"/"junit"/"prometheus"`)
|
||||
@@ -53,4 +61,5 @@ func init() {
|
||||
hostF := scanCmd.PersistentFlags().VarPF(&scanInfo.HostSensor, "enable-host-scan", "", "Deploy ARMO K8s host-sensor daemonset in the scanned cluster. Deleting it right after we collecting the data. Required to collect valueable data from cluster nodes for certain controls")
|
||||
hostF.NoOptDefVal = "true"
|
||||
hostF.DefValue = "false, for no TTY in stdin"
|
||||
|
||||
}
|
||||
|
||||
@@ -1,12 +1,19 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"github.com/armosec/k8s-interface/k8sinterface"
|
||||
"github.com/armosec/kubescape/cautils"
|
||||
"github.com/armosec/kubescape/cautils/getter"
|
||||
"fmt"
|
||||
|
||||
"github.com/armosec/kubescape/cautils/logger"
|
||||
"github.com/armosec/kubescape/clihandler"
|
||||
"github.com/armosec/kubescape/clihandler/cliobjects"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var submitInfo cliobjects.Submit
|
||||
|
||||
var submitCmdExamples = `
|
||||
|
||||
`
|
||||
var submitCmd = &cobra.Command{
|
||||
Use: "submit <command>",
|
||||
Short: "Submit an object to the Kubescape SaaS version",
|
||||
@@ -15,17 +22,25 @@ var submitCmd = &cobra.Command{
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
rootCmd.AddCommand(submitCmd)
|
||||
}
|
||||
|
||||
func getSubmittedClusterConfig(k8s *k8sinterface.KubernetesApi) (*cautils.ClusterConfig, error) {
|
||||
clusterConfig := cautils.NewClusterConfig(k8s, getter.GetArmoAPIConnector(), scanInfo.Account) // TODO - support none cluster env submit
|
||||
if clusterConfig.GetCustomerGUID() != "" {
|
||||
if err := clusterConfig.SetTenant(); err != nil {
|
||||
return clusterConfig, err
|
||||
var submitExceptionsCmd = &cobra.Command{
|
||||
Use: "exceptions <full path to exceptins file>",
|
||||
Short: "Submit exceptions to the Kubescape SaaS version",
|
||||
Args: func(cmd *cobra.Command, args []string) error {
|
||||
if len(args) != 1 {
|
||||
return fmt.Errorf("missing full path to exceptions file")
|
||||
}
|
||||
}
|
||||
|
||||
return clusterConfig, nil
|
||||
return nil
|
||||
},
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
if err := clihandler.SubmitExceptions(submitInfo.Account, args[0]); err != nil {
|
||||
logger.L().Fatal(err.Error())
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
submitCmd.PersistentFlags().StringVarP(&submitInfo.Account, "account", "", "", "Armo portal account ID. Default will load account ID from configMap or config file")
|
||||
rootCmd.AddCommand(submitCmd)
|
||||
|
||||
submitCmd.AddCommand(submitExceptionsCmd)
|
||||
}
|
||||
|
||||
@@ -1,9 +1,8 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/armosec/kubescape/cautils"
|
||||
"github.com/armosec/kubescape/cautils/logger"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
@@ -14,7 +13,7 @@ var versionCmd = &cobra.Command{
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
v := cautils.NewIVersionCheckHandler()
|
||||
v.CheckLatestVersion(cautils.NewVersionCheckRequest(cautils.BuildNumber, "", "", "version"))
|
||||
fmt.Println("Your current version is: " + cautils.BuildNumber)
|
||||
logger.L().Info("Your current version is: " + cautils.BuildNumber)
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
@@ -6,15 +6,21 @@ import (
|
||||
"os"
|
||||
|
||||
"github.com/armosec/armoapi-go/armotypes"
|
||||
"github.com/armosec/k8s-interface/k8sinterface"
|
||||
"github.com/armosec/kubescape/resultshandling/printer"
|
||||
printerv1 "github.com/armosec/kubescape/resultshandling/printer/v1"
|
||||
|
||||
// printerv2 "github.com/armosec/kubescape/resultshandling/printer/v2"
|
||||
|
||||
"github.com/armosec/kubescape/cautils"
|
||||
"github.com/armosec/kubescape/cautils/getter"
|
||||
"github.com/armosec/kubescape/clihandler/cliinterfaces"
|
||||
"github.com/armosec/kubescape/cautils/logger"
|
||||
"github.com/armosec/kubescape/cautils/logger/helpers"
|
||||
"github.com/armosec/kubescape/hostsensorutils"
|
||||
"github.com/armosec/kubescape/opaprocessor"
|
||||
"github.com/armosec/kubescape/policyhandler"
|
||||
"github.com/armosec/kubescape/resourcehandler"
|
||||
"github.com/armosec/kubescape/resultshandling"
|
||||
"github.com/armosec/kubescape/resultshandling/printer"
|
||||
"github.com/armosec/kubescape/resultshandling/reporter"
|
||||
"github.com/armosec/opa-utils/reporthandling"
|
||||
"github.com/mattn/go-isatty"
|
||||
@@ -30,20 +36,22 @@ type componentInterfaces struct {
|
||||
|
||||
func getInterfaces(scanInfo *cautils.ScanInfo) componentInterfaces {
|
||||
|
||||
k8s := getKubernetesApi(scanInfo)
|
||||
var k8s *k8sinterface.KubernetesApi
|
||||
if scanInfo.GetScanningEnvironment() == cautils.ScanCluster {
|
||||
k8s = getKubernetesApi()
|
||||
if k8s == nil {
|
||||
logger.L().Fatal("failed connecting to Kubernetes cluster")
|
||||
}
|
||||
}
|
||||
|
||||
tenantConfig := getTenantConfig(scanInfo, k8s)
|
||||
tenantConfig := getTenantConfig(scanInfo.Account, scanInfo.KubeContext, k8s)
|
||||
|
||||
// Set submit behavior AFTER loading tenant config
|
||||
setSubmitBehavior(scanInfo, tenantConfig)
|
||||
|
||||
hostSensorHandler := getHostSensorHandler(scanInfo, k8s)
|
||||
if err := hostSensorHandler.Init(); err != nil {
|
||||
errMsg := "failed to init host sensor"
|
||||
if scanInfo.VerboseMode {
|
||||
errMsg = fmt.Sprintf("%s: %v", errMsg, err)
|
||||
}
|
||||
cautils.ErrorDisplay(errMsg)
|
||||
logger.L().Error("failed to init host sensor", helpers.Error(err))
|
||||
hostSensorHandler = &hostsensorutils.HostSensorHandlerMock{}
|
||||
}
|
||||
// excluding hostsensor namespace
|
||||
@@ -51,7 +59,12 @@ func getInterfaces(scanInfo *cautils.ScanInfo) componentInterfaces {
|
||||
scanInfo.ExcludedNamespaces = fmt.Sprintf("%s,%s", scanInfo.ExcludedNamespaces, hostSensorHandler.GetNamespace())
|
||||
}
|
||||
|
||||
resourceHandler := getResourceHandler(scanInfo, tenantConfig, k8s, hostSensorHandler)
|
||||
registryAdaptors, err := resourcehandler.NewRegistryAdaptors()
|
||||
if err != nil {
|
||||
logger.L().Error("failed to initialize registry adaptors", helpers.Error(err))
|
||||
}
|
||||
|
||||
resourceHandler := getResourceHandler(scanInfo, tenantConfig, k8s, hostSensorHandler, registryAdaptors)
|
||||
|
||||
// reporting behavior - setup reporter
|
||||
reportHandler := getReporter(tenantConfig, scanInfo.Submit)
|
||||
@@ -60,7 +73,8 @@ func getInterfaces(scanInfo *cautils.ScanInfo) componentInterfaces {
|
||||
v.CheckLatestVersion(cautils.NewVersionCheckRequest(cautils.BuildNumber, policyIdentifierNames(scanInfo.PolicyIdentifier), "", scanInfo.GetScanningEnvironment()))
|
||||
|
||||
// setup printer
|
||||
printerHandler := printer.GetPrinter(scanInfo.Format, scanInfo.VerboseMode)
|
||||
printerHandler := printerv1.GetPrinter(scanInfo.Format, scanInfo.VerboseMode)
|
||||
// printerHandler = printerv2.GetPrinter(scanInfo.Format, scanInfo.VerboseMode)
|
||||
printerHandler.SetWriter(scanInfo.Output)
|
||||
|
||||
return componentInterfaces{
|
||||
@@ -73,7 +87,7 @@ func getInterfaces(scanInfo *cautils.ScanInfo) componentInterfaces {
|
||||
}
|
||||
|
||||
func ScanCliSetup(scanInfo *cautils.ScanInfo) error {
|
||||
cautils.ScanStartDisplay()
|
||||
logger.L().Info("ARMO security scanner starting")
|
||||
|
||||
interfaces := getInterfaces(scanInfo)
|
||||
// setPolicyGetter(scanInfo, interfaces.clusterConfig.GetCustomerGUID())
|
||||
@@ -81,23 +95,27 @@ func ScanCliSetup(scanInfo *cautils.ScanInfo) error {
|
||||
processNotification := make(chan *cautils.OPASessionObj)
|
||||
reportResults := make(chan *cautils.OPASessionObj)
|
||||
|
||||
cautils.ClusterName = interfaces.tenantConfig.GetClusterName() // TODO - Deprecated
|
||||
cautils.CustomerGUID = interfaces.tenantConfig.GetCustomerGUID() // TODO - Deprecated
|
||||
cautils.ClusterName = interfaces.tenantConfig.GetClusterName() // TODO - Deprecated
|
||||
cautils.CustomerGUID = interfaces.tenantConfig.GetAccountID() // TODO - Deprecated
|
||||
interfaces.report.SetClusterName(interfaces.tenantConfig.GetClusterName())
|
||||
interfaces.report.SetCustomerGUID(interfaces.tenantConfig.GetCustomerGUID())
|
||||
interfaces.report.SetCustomerGUID(interfaces.tenantConfig.GetAccountID())
|
||||
|
||||
downloadReleasedPolicy := getter.NewDownloadReleasedPolicy() // download config inputs from github release
|
||||
// set policy getter only after setting the customerGUID
|
||||
setPolicyGetter(scanInfo, interfaces.tenantConfig.GetCustomerGUID(), downloadReleasedPolicy)
|
||||
setConfigInputsGetter(scanInfo, interfaces.tenantConfig.GetCustomerGUID(), downloadReleasedPolicy)
|
||||
|
||||
// set policy getter only after setting the customerGUID
|
||||
scanInfo.Getters.PolicyGetter = getPolicyGetter(scanInfo.UseFrom, interfaces.tenantConfig.GetAccountID(), scanInfo.FrameworkScan, downloadReleasedPolicy)
|
||||
scanInfo.Getters.ControlsInputsGetter = getConfigInputsGetter(scanInfo.ControlsInputs, interfaces.tenantConfig.GetAccountID(), downloadReleasedPolicy)
|
||||
scanInfo.Getters.ExceptionsGetter = getExceptionsGetter(scanInfo.UseExceptions)
|
||||
|
||||
// TODO - list supported frameworks/controls
|
||||
if scanInfo.ScanAll {
|
||||
scanInfo.SetPolicyIdentifiers(listFrameworksNames(scanInfo.Getters.PolicyGetter), reporthandling.KindFramework)
|
||||
}
|
||||
|
||||
//
|
||||
defer func() {
|
||||
if err := interfaces.hostSensorHandler.TearDown(); err != nil {
|
||||
errMsg := "failed to tear down host sensor"
|
||||
if scanInfo.VerboseMode {
|
||||
errMsg = fmt.Sprintf("%s: %v", errMsg, err)
|
||||
}
|
||||
cautils.ErrorDisplay(errMsg)
|
||||
logger.L().Error("failed to tear down host sensor", helpers.Error(err))
|
||||
}
|
||||
}()
|
||||
|
||||
@@ -107,8 +125,7 @@ func ScanCliSetup(scanInfo *cautils.ScanInfo) error {
|
||||
policyHandler := policyhandler.NewPolicyHandler(&processNotification, interfaces.resourceHandler)
|
||||
|
||||
if err := Scan(policyHandler, scanInfo); err != nil {
|
||||
fmt.Println(err)
|
||||
os.Exit(1)
|
||||
logger.L().Fatal(err.Error())
|
||||
}
|
||||
}()
|
||||
|
||||
@@ -133,43 +150,24 @@ func ScanCliSetup(scanInfo *cautils.ScanInfo) error {
|
||||
|
||||
func Scan(policyHandler *policyhandler.PolicyHandler, scanInfo *cautils.ScanInfo) error {
|
||||
policyNotification := &reporthandling.PolicyNotification{
|
||||
NotificationType: reporthandling.TypeExecPostureScan,
|
||||
Rules: scanInfo.PolicyIdentifier,
|
||||
Designators: armotypes.PortalDesignator{},
|
||||
Rules: scanInfo.PolicyIdentifier,
|
||||
KubescapeNotification: reporthandling.KubescapeNotification{
|
||||
Designators: armotypes.PortalDesignator{},
|
||||
NotificationType: reporthandling.TypeExecPostureScan,
|
||||
},
|
||||
}
|
||||
switch policyNotification.NotificationType {
|
||||
switch policyNotification.KubescapeNotification.NotificationType {
|
||||
case reporthandling.TypeExecPostureScan:
|
||||
if err := policyHandler.HandleNotificationRequest(policyNotification, scanInfo); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
default:
|
||||
return fmt.Errorf("notification type '%s' Unknown", policyNotification.NotificationType)
|
||||
return fmt.Errorf("notification type '%s' Unknown", policyNotification.KubescapeNotification.NotificationType)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func Submit(submitInterfaces cliinterfaces.SubmitInterfaces) error {
|
||||
|
||||
// list resources
|
||||
postureReport, err := submitInterfaces.SubmitObjects.SetResourcesReport()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
allresources, err := submitInterfaces.SubmitObjects.ListAllResources()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// report
|
||||
if err := submitInterfaces.Reporter.ActionSendReport(&cautils.OPASessionObj{PostureReport: postureReport, AllResources: allresources}); err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Printf("\nData has been submitted successfully")
|
||||
submitInterfaces.Reporter.DisplayReportURL()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func askUserForHostSensor() bool {
|
||||
return false
|
||||
|
||||
|
||||
@@ -7,62 +7,79 @@ import (
|
||||
"github.com/armosec/k8s-interface/k8sinterface"
|
||||
"github.com/armosec/kubescape/cautils"
|
||||
"github.com/armosec/kubescape/cautils/getter"
|
||||
"github.com/armosec/kubescape/cautils/logger"
|
||||
"github.com/armosec/kubescape/hostsensorutils"
|
||||
"github.com/armosec/kubescape/resourcehandler"
|
||||
"github.com/armosec/kubescape/resultshandling/reporter"
|
||||
reporterv1 "github.com/armosec/kubescape/resultshandling/reporter/v1"
|
||||
reporterv2 "github.com/armosec/kubescape/resultshandling/reporter/v2"
|
||||
|
||||
"github.com/armosec/opa-utils/reporthandling"
|
||||
"github.com/armosec/rbac-utils/rbacscanner"
|
||||
"github.com/golang/glog"
|
||||
)
|
||||
|
||||
func getKubernetesApi(scanInfo *cautils.ScanInfo) *k8sinterface.KubernetesApi {
|
||||
if scanInfo.GetScanningEnvironment() == cautils.ScanLocalFiles {
|
||||
// getKubernetesApi
|
||||
func getKubernetesApi() *k8sinterface.KubernetesApi {
|
||||
if !k8sinterface.IsConnectedToCluster() {
|
||||
return nil
|
||||
}
|
||||
return k8sinterface.NewKubernetesApi()
|
||||
}
|
||||
func getTenantConfig(scanInfo *cautils.ScanInfo, k8s *k8sinterface.KubernetesApi) cautils.ITenantConfig {
|
||||
if scanInfo.GetScanningEnvironment() == cautils.ScanLocalFiles {
|
||||
return cautils.NewLocalConfig(getter.GetArmoAPIConnector(), scanInfo.Account)
|
||||
func getTenantConfig(Account, clusterName string, k8s *k8sinterface.KubernetesApi) cautils.ITenantConfig {
|
||||
if !k8sinterface.IsConnectedToCluster() || k8s == nil {
|
||||
return cautils.NewLocalConfig(getter.GetArmoAPIConnector(), Account, clusterName)
|
||||
}
|
||||
return cautils.NewClusterConfig(k8s, getter.GetArmoAPIConnector(), Account, clusterName)
|
||||
}
|
||||
|
||||
func getExceptionsGetter(useExceptions string) getter.IExceptionsGetter {
|
||||
if useExceptions != "" {
|
||||
// load exceptions from file
|
||||
return getter.NewLoadPolicy([]string{useExceptions})
|
||||
} else {
|
||||
return getter.GetArmoAPIConnector()
|
||||
}
|
||||
return cautils.NewClusterConfig(k8s, getter.GetArmoAPIConnector(), scanInfo.Account)
|
||||
}
|
||||
|
||||
func getRBACHandler(tenantConfig cautils.ITenantConfig, k8s *k8sinterface.KubernetesApi, submit bool) *cautils.RBACObjects {
|
||||
if submit {
|
||||
return cautils.NewRBACObjects(rbacscanner.NewRbacScannerFromK8sAPI(k8s, tenantConfig.GetCustomerGUID(), tenantConfig.GetClusterName()))
|
||||
return cautils.NewRBACObjects(rbacscanner.NewRbacScannerFromK8sAPI(k8s, tenantConfig.GetAccountID(), tenantConfig.GetClusterName()))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func getReporter(tenantConfig cautils.ITenantConfig, submit bool) reporter.IReport {
|
||||
if submit {
|
||||
return reporter.NewReportEventReceiver(tenantConfig.GetConfigObj())
|
||||
// return reporterv1.NewReportEventReceiver(tenantConfig.GetConfigObj())
|
||||
return reporterv2.NewReportEventReceiver(tenantConfig.GetConfigObj())
|
||||
}
|
||||
return reporter.NewReportMock()
|
||||
return reporterv1.NewReportMock()
|
||||
}
|
||||
func getResourceHandler(scanInfo *cautils.ScanInfo, tenantConfig cautils.ITenantConfig, k8s *k8sinterface.KubernetesApi, hostSensorHandler hostsensorutils.IHostSensor) resourcehandler.IResourceHandler {
|
||||
if scanInfo.GetScanningEnvironment() == cautils.ScanLocalFiles {
|
||||
return resourcehandler.NewFileResourceHandler(scanInfo.InputPatterns)
|
||||
|
||||
func getResourceHandler(scanInfo *cautils.ScanInfo, tenantConfig cautils.ITenantConfig, k8s *k8sinterface.KubernetesApi, hostSensorHandler hostsensorutils.IHostSensor, registryAdaptors *resourcehandler.RegistryAdaptors) resourcehandler.IResourceHandler {
|
||||
if len(scanInfo.InputPatterns) > 0 || k8s == nil {
|
||||
return resourcehandler.NewFileResourceHandler(scanInfo.InputPatterns, registryAdaptors)
|
||||
}
|
||||
getter.GetArmoAPIConnector()
|
||||
rbacObjects := getRBACHandler(tenantConfig, k8s, scanInfo.Submit)
|
||||
return resourcehandler.NewK8sResourceHandler(k8s, getFieldSelector(scanInfo), hostSensorHandler, rbacObjects)
|
||||
return resourcehandler.NewK8sResourceHandler(k8s, getFieldSelector(scanInfo), hostSensorHandler, rbacObjects, registryAdaptors)
|
||||
}
|
||||
|
||||
func getHostSensorHandler(scanInfo *cautils.ScanInfo, k8s *k8sinterface.KubernetesApi) hostsensorutils.IHostSensor {
|
||||
if scanInfo.GetScanningEnvironment() == cautils.ScanLocalFiles {
|
||||
if !k8sinterface.IsConnectedToCluster() || k8s == nil {
|
||||
return &hostsensorutils.HostSensorHandlerMock{}
|
||||
}
|
||||
|
||||
hasHostSensorControls := true
|
||||
// we need to determined which controls needs host sensor
|
||||
if scanInfo.HostSensor.Get() == nil && hasHostSensorControls {
|
||||
scanInfo.HostSensor.SetBool(askUserForHostSensor())
|
||||
cautils.WarningDisplay(os.Stderr, "Warning: Kubernetes cluster nodes scanning is disabled. This is required to collect valuable data for certain controls. You can enable it using the --enable-host-scan flag\n")
|
||||
logger.L().Warning("Kubernetes cluster nodes scanning is disabled. This is required to collect valuable data for certain controls. You can enable it using the --enable-host-scan flag")
|
||||
}
|
||||
if hostSensorVal := scanInfo.HostSensor.Get(); hostSensorVal != nil && *hostSensorVal {
|
||||
hostSensorHandler, err := hostsensorutils.NewHostSensorHandler(k8s)
|
||||
if err != nil {
|
||||
cautils.WarningDisplay(os.Stderr, fmt.Sprintf("Warning: failed to create host sensor: %v\n", err.Error()))
|
||||
logger.L().Warning(fmt.Sprintf("failed to create host sensor: %s", err.Error()))
|
||||
return &hostsensorutils.HostSensorHandlerMock{}
|
||||
}
|
||||
return hostSensorHandler
|
||||
@@ -115,12 +132,6 @@ func setSubmitBehavior(scanInfo *cautils.ScanInfo, tenantConfig cautils.ITenantC
|
||||
return
|
||||
}
|
||||
|
||||
// do not submit yaml/url scanning
|
||||
if scanInfo.GetScanningEnvironment() == cautils.ScanLocalFiles {
|
||||
scanInfo.Submit = false
|
||||
return
|
||||
}
|
||||
|
||||
if tenantConfig.IsConfigFound() { // config found in cache (submitted)
|
||||
if !scanInfo.Local {
|
||||
// Submit report
|
||||
@@ -130,62 +141,69 @@ func setSubmitBehavior(scanInfo *cautils.ScanInfo, tenantConfig cautils.ITenantC
|
||||
if scanInfo.Submit {
|
||||
// submit - Create tenant & Submit report
|
||||
if err := tenantConfig.SetTenant(); err != nil {
|
||||
fmt.Println(err)
|
||||
logger.L().Error(err.Error())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// setPolicyGetter set the policy getter - local file/github release/ArmoAPI
|
||||
func setPolicyGetter(scanInfo *cautils.ScanInfo, customerGUID string, downloadReleasedPolicy *getter.DownloadReleasedPolicy) {
|
||||
if len(scanInfo.UseFrom) > 0 {
|
||||
scanInfo.PolicyGetter = getter.NewLoadPolicy(scanInfo.UseFrom)
|
||||
} else {
|
||||
if customerGUID == "" || !scanInfo.FrameworkScan {
|
||||
setDownloadReleasedPolicy(scanInfo, downloadReleasedPolicy)
|
||||
} else {
|
||||
setGetArmoAPIConnector(scanInfo, customerGUID)
|
||||
}
|
||||
func getPolicyGetter(loadPoliciesFromFile []string, accountID string, frameworkScope bool, downloadReleasedPolicy *getter.DownloadReleasedPolicy) getter.IPolicyGetter {
|
||||
if len(loadPoliciesFromFile) > 0 {
|
||||
return getter.NewLoadPolicy(loadPoliciesFromFile)
|
||||
}
|
||||
if accountID != "" && frameworkScope {
|
||||
g := getter.GetArmoAPIConnector() // download policy from ARMO backend
|
||||
return g
|
||||
}
|
||||
if downloadReleasedPolicy == nil {
|
||||
downloadReleasedPolicy = getter.NewDownloadReleasedPolicy()
|
||||
}
|
||||
return getDownloadReleasedPolicy(downloadReleasedPolicy)
|
||||
|
||||
}
|
||||
|
||||
// func setGetArmoAPIConnector(scanInfo *cautils.ScanInfo, customerGUID string) {
|
||||
// g := getter.GetArmoAPIConnector() // download policy from ARMO backend
|
||||
// g.SetCustomerGUID(customerGUID)
|
||||
// scanInfo.PolicyGetter = g
|
||||
// if scanInfo.ScanAll {
|
||||
// frameworks, err := g.ListCustomFrameworks(customerGUID)
|
||||
// if err != nil {
|
||||
// glog.Error("failed to get custom frameworks") // handle error
|
||||
// return
|
||||
// }
|
||||
// scanInfo.SetPolicyIdentifiers(frameworks, reporthandling.KindFramework)
|
||||
// }
|
||||
// }
|
||||
|
||||
// setConfigInputsGetter sets the config input getter - local file/github release/ArmoAPI
|
||||
func setConfigInputsGetter(scanInfo *cautils.ScanInfo, customerGUID string, downloadReleasedPolicy *getter.DownloadReleasedPolicy) {
|
||||
if len(scanInfo.ControlsInputs) > 0 {
|
||||
scanInfo.Getters.ControlsInputsGetter = getter.NewLoadPolicy([]string{scanInfo.ControlsInputs})
|
||||
func getConfigInputsGetter(ControlsInputs string, accountID string, downloadReleasedPolicy *getter.DownloadReleasedPolicy) getter.IControlsInputsGetter {
|
||||
if len(ControlsInputs) > 0 {
|
||||
return getter.NewLoadPolicy([]string{ControlsInputs})
|
||||
}
|
||||
if accountID != "" {
|
||||
g := getter.GetArmoAPIConnector() // download config from ARMO backend
|
||||
return g
|
||||
}
|
||||
if downloadReleasedPolicy == nil {
|
||||
downloadReleasedPolicy = getter.NewDownloadReleasedPolicy()
|
||||
}
|
||||
if err := downloadReleasedPolicy.SetRegoObjects(); err != nil { // if failed to pull config inputs, fallback to BE
|
||||
cautils.WarningDisplay(os.Stderr, "Warning: failed to get config inputs from github release, this may affect the scanning results\n")
|
||||
}
|
||||
return downloadReleasedPolicy
|
||||
}
|
||||
|
||||
func getDownloadReleasedPolicy(downloadReleasedPolicy *getter.DownloadReleasedPolicy) getter.IPolicyGetter {
|
||||
if err := downloadReleasedPolicy.SetRegoObjects(); err != nil { // if failed to pull policy, fallback to cache
|
||||
cautils.WarningDisplay(os.Stderr, "Warning: failed to get policies from github release, loading policies from cache\n")
|
||||
return getter.NewLoadPolicy(getDefaultFrameworksPaths())
|
||||
} else {
|
||||
if customerGUID != "" {
|
||||
scanInfo.Getters.ControlsInputsGetter = getter.GetArmoAPIConnector()
|
||||
} else {
|
||||
if err := downloadReleasedPolicy.SetRegoObjects(); err != nil { // if failed to pull config inputs, fallback to BE
|
||||
cautils.WarningDisplay(os.Stderr, "Warning: failed to get config inputs from github release, this may affect the scanning results\n")
|
||||
}
|
||||
scanInfo.Getters.ControlsInputsGetter = downloadReleasedPolicy
|
||||
}
|
||||
return downloadReleasedPolicy
|
||||
}
|
||||
}
|
||||
|
||||
func setDownloadReleasedPolicy(scanInfo *cautils.ScanInfo, downloadReleasedPolicy *getter.DownloadReleasedPolicy) {
|
||||
if err := downloadReleasedPolicy.SetRegoObjects(); err != nil { // if failed to pull policy, fallback to cache
|
||||
cautils.WarningDisplay(os.Stderr, "Warning: failed to get policies from github release, loading policies from cache\n")
|
||||
scanInfo.PolicyGetter = getter.NewLoadPolicy(getDefaultFrameworksPaths())
|
||||
} else {
|
||||
scanInfo.PolicyGetter = downloadReleasedPolicy
|
||||
}
|
||||
}
|
||||
func setGetArmoAPIConnector(scanInfo *cautils.ScanInfo, customerGUID string) {
|
||||
g := getter.GetArmoAPIConnector() // download policy from ARMO backend
|
||||
g.SetCustomerGUID(customerGUID)
|
||||
scanInfo.PolicyGetter = g
|
||||
if scanInfo.ScanAll {
|
||||
frameworks, err := g.ListCustomFrameworks(customerGUID)
|
||||
if err != nil {
|
||||
glog.Error("failed to get custom frameworks") // handle error
|
||||
return
|
||||
}
|
||||
scanInfo.SetPolicyIdentifiers(frameworks, reporthandling.KindFramework)
|
||||
}
|
||||
}
|
||||
func getDefaultFrameworksPaths() []string {
|
||||
fwPaths := []string{}
|
||||
for i := range getter.NativeFrameworks {
|
||||
@@ -193,3 +211,11 @@ func getDefaultFrameworksPaths() []string {
|
||||
}
|
||||
return fwPaths
|
||||
}
|
||||
|
||||
func listFrameworksNames(policyGetter getter.IPolicyGetter) []string {
|
||||
fw, err := policyGetter.ListFrameworks()
|
||||
if err != nil {
|
||||
fw = getDefaultFrameworksPaths()
|
||||
}
|
||||
return fw
|
||||
}
|
||||
|
||||
90
containerscan/containerscan_mock.go
Normal file
90
containerscan/containerscan_mock.go
Normal file
@@ -0,0 +1,90 @@
|
||||
package containerscan
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"math/rand"
|
||||
"time"
|
||||
|
||||
"github.com/francoispqt/gojay"
|
||||
)
|
||||
|
||||
// GenerateContainerScanReportMock - generate a scan result
|
||||
func GenerateContainerScanReportMock() ScanResultReport {
|
||||
ds := ScanResultReport{
|
||||
WLID: "wlid://cluster-k8s-geriatrix-k8s-demo3/namespace-whisky-app/deployment-whisky4all-shipping",
|
||||
CustomerGUID: "1231bcb1-49ce-4a67-bdd3-5da7a393ae08",
|
||||
ImgTag: "dreg.armo.cloud:443/demoservice:v16",
|
||||
ImgHash: "docker-pullable://dreg.armo.cloud:443/demoservice@sha256:754f3cfca915a07ed10655a301dd7a8dc5526a06f9bd06e7c932f4d4108a8296",
|
||||
Timestamp: time.Now().UnixNano(),
|
||||
}
|
||||
|
||||
ds.Layers = make(LayersList, 0)
|
||||
layer := ScanResultLayer{}
|
||||
GenerateContainerScanLayer(&layer)
|
||||
ds.Layers = append(ds.Layers, layer)
|
||||
return ds
|
||||
}
|
||||
|
||||
// GenerateContainerScanReportMock - generate a scan result
|
||||
func GenerateContainerScanReportNoVulMock() ScanResultReport {
|
||||
ds := ScanResultReport{
|
||||
WLID: "wlid://cluster-k8s-geriatrix-k8s-demo3/namespace-whisky-app/deployment-whisky4all-shipping",
|
||||
CustomerGUID: "1231bcb1-49ce-4a67-bdd3-5da7a393ae08",
|
||||
ImgTag: "dreg.armo.cloud:443/demoservice:v16",
|
||||
ImgHash: "docker-pullable://dreg.armo.cloud:443/demoservice@sha256:754f3cfca915a07ed10655a301dd7a8dc5526a06f9bd06e7c932f4d4108a8296",
|
||||
Timestamp: time.Now().UnixNano(),
|
||||
ContainerName: "shipping",
|
||||
}
|
||||
|
||||
ds.Layers = make(LayersList, 0)
|
||||
layer := ScanResultLayer{LayerHash: "aaa"}
|
||||
ds.Layers = append(ds.Layers, layer)
|
||||
return ds
|
||||
}
|
||||
|
||||
var hash = []rune("abcdef0123456789")
|
||||
var nums = []rune("0123456789")
|
||||
|
||||
func randSeq(n int, bank []rune) string {
|
||||
rand.Seed(time.Now().UnixNano())
|
||||
|
||||
b := make([]rune, n)
|
||||
for i := range b {
|
||||
b[i] = bank[rand.Intn(len(bank))]
|
||||
}
|
||||
return string(b)
|
||||
}
|
||||
|
||||
// GenerateContainerScanLayer - generate a layer with random vuls
|
||||
func GenerateContainerScanLayer(layer *ScanResultLayer) {
|
||||
layer.LayerHash = randSeq(32, hash)
|
||||
layer.Vulnerabilities = make(VulnerabilitiesList, 0)
|
||||
layer.Packages = make(LinuxPkgs, 0)
|
||||
vuls := rand.Intn(10) + 1
|
||||
|
||||
for i := 0; i < vuls; i++ {
|
||||
v := Vulnerability{}
|
||||
GenerateVulnerability(&v)
|
||||
layer.Vulnerabilities = append(layer.Vulnerabilities, v)
|
||||
}
|
||||
|
||||
pkg := LinuxPackage{PackageName: "coreutils"}
|
||||
pkg.Files = make(PkgFiles, 0)
|
||||
pf := PackageFile{Filename: "aa"}
|
||||
pkg.Files = append(pkg.Files, pf)
|
||||
layer.Packages = append(layer.Packages, pkg)
|
||||
}
|
||||
|
||||
// GenerateVulnerability - generate a vul (just diff "cve"'s)
|
||||
func GenerateVulnerability(v *Vulnerability) error {
|
||||
baseVul := " { \"name\": \"CVE-2014-9471\", \"imageTag\": \"debian:8\", \"link\": \"https://security-tracker.debian.org/tracker/CVE-2014-9471\", \"description\": \"The parse_datetime function in GNU coreutils allows remote attackers to cause a denial of service (crash) or possibly execute arbitrary code via a crafted date string, as demonstrated by the sdf\", \"severity\": \"Low\", \"metadata\": { \"NVD\": { \"CVSSv2\": { \"Score\": 7.5, \"Vectors\": \"AV:N/AC:L/Au:N/C:P/I:P\" } } }, \"fixedIn\": [ { \"name\": \"coreutils\", \"imageTag\": \"debian:8\", \"version\": \"8.23-1\" } ] }"
|
||||
b := []byte(baseVul)
|
||||
r := bytes.NewReader(b)
|
||||
er := gojay.NewDecoder(r).DecodeObject(v)
|
||||
v.RelatedPackageName = "coreutils"
|
||||
v.Severity = HighSeverity
|
||||
v.Relevancy = Irelevant
|
||||
v.Name = "CVE-" + randSeq(4, nums) + "-" + randSeq(4, nums)
|
||||
return er
|
||||
|
||||
}
|
||||
90
containerscan/containerscan_test.go
Normal file
90
containerscan/containerscan_test.go
Normal file
@@ -0,0 +1,90 @@
|
||||
package containerscan
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/francoispqt/gojay"
|
||||
)
|
||||
|
||||
func TestDecodeScanWIthDangearousArtifacts(t *testing.T) {
|
||||
rhs := &ScanResultReport{}
|
||||
er := gojay.NewDecoder(strings.NewReader(nginxScanJSON)).DecodeObject(rhs)
|
||||
if er != nil {
|
||||
t.Errorf("decode failed due to: %v", er.Error())
|
||||
}
|
||||
sumObj := rhs.Summarize()
|
||||
if sumObj.Registry != "" {
|
||||
t.Errorf("sumObj.Registry = %v", sumObj.Registry)
|
||||
}
|
||||
if sumObj.VersionImage != "nginx:1.18.0" {
|
||||
t.Errorf("sumObj.VersionImage = %v", sumObj.Registry)
|
||||
}
|
||||
if sumObj.ImgTag != "nginx:1.18.0" {
|
||||
t.Errorf("sumObj.ImgTag = %v", sumObj.ImgTag)
|
||||
}
|
||||
if sumObj.Status != "Success" {
|
||||
t.Errorf("sumObj.Status = %v", sumObj.Status)
|
||||
}
|
||||
if len(sumObj.ListOfDangerousArtifcats) != 3 {
|
||||
t.Errorf("sumObj.ListOfDangerousArtifcats = %v", sumObj.ListOfDangerousArtifcats)
|
||||
}
|
||||
}
|
||||
|
||||
func TestUnmarshalScanReport(t *testing.T) {
|
||||
ds := GenerateContainerScanReportMock()
|
||||
str1 := ds.AsFNVHash()
|
||||
rhs := &ScanResultReport{}
|
||||
|
||||
bolB, _ := json.Marshal(ds)
|
||||
r := bytes.NewReader(bolB)
|
||||
|
||||
er := gojay.NewDecoder(r).DecodeObject(rhs)
|
||||
if er != nil {
|
||||
t.Errorf("marshalling failed due to: %v", er.Error())
|
||||
}
|
||||
|
||||
if rhs.AsFNVHash() != str1 {
|
||||
t.Errorf("marshalling failed different values after marshal:\nOriginal:\n%v\nParsed:\n%v\n\n===\n", string(bolB), rhs)
|
||||
}
|
||||
}
|
||||
|
||||
func TestUnmarshalScanReport1(t *testing.T) {
|
||||
ds := Vulnerability{}
|
||||
if err := GenerateVulnerability(&ds); err != nil {
|
||||
t.Errorf("%v\n%v\n", ds, err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetByPkgNameSuccess(t *testing.T) {
|
||||
ds := GenerateContainerScanReportMock()
|
||||
a := ds.Layers[0].GetFilesByPackage("coreutils")
|
||||
if a != nil {
|
||||
|
||||
fmt.Printf("%+v\n", *a)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestGetByPkgNameMissing(t *testing.T) {
|
||||
ds := GenerateContainerScanReportMock()
|
||||
a := ds.Layers[0].GetFilesByPackage("s")
|
||||
if a != nil && len(*a) > 0 {
|
||||
t.Errorf("expected - no such package should be in that layer %v\n\n; found - %v", ds, a)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestCalculateFixed(t *testing.T) {
|
||||
res := CalculateFixed([]FixedIn{{
|
||||
Name: "",
|
||||
ImgTag: "",
|
||||
Version: "",
|
||||
}})
|
||||
if 0 != res {
|
||||
t.Errorf("wrong fix status: %v", res)
|
||||
}
|
||||
}
|
||||
37
containerscan/datastructures.go
Normal file
37
containerscan/datastructures.go
Normal file
@@ -0,0 +1,37 @@
|
||||
package containerscan
|
||||
|
||||
const (
|
||||
//defines Relevancy as enum-like
|
||||
Unknown = "Unknown"
|
||||
Relevant = "Relevant"
|
||||
Irelevant = "Irelevant"
|
||||
NoSP = "No signature profile to compare"
|
||||
|
||||
//Clair Severities
|
||||
UnknownSeverity = "Unknown"
|
||||
NegligibleSeverity = "Negligible"
|
||||
LowSeverity = "Low"
|
||||
MediumSeverity = "Medium"
|
||||
HighSeverity = "High"
|
||||
CriticalSeverity = "Critical"
|
||||
|
||||
ContainerScanRedisPrefix = "_containerscan"
|
||||
)
|
||||
|
||||
var KnownSeverities = map[string]bool{
|
||||
UnknownSeverity: true,
|
||||
NegligibleSeverity: true,
|
||||
LowSeverity: true,
|
||||
MediumSeverity: true,
|
||||
HighSeverity: true,
|
||||
CriticalSeverity: true,
|
||||
}
|
||||
|
||||
func CalculateFixed(Fixes []FixedIn) int {
|
||||
for _, fix := range Fixes {
|
||||
if fix.Version != "None" && fix.Version != "" {
|
||||
return 1
|
||||
}
|
||||
}
|
||||
return 0
|
||||
}
|
||||
51
containerscan/datastructuresmethods.go
Normal file
51
containerscan/datastructuresmethods.go
Normal file
@@ -0,0 +1,51 @@
|
||||
package containerscan
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/armosec/armoapi-go/armotypes"
|
||||
)
|
||||
|
||||
func (layer *ScanResultLayer) GetFilesByPackage(pkgname string) (files *PkgFiles) {
|
||||
for _, pkg := range layer.Packages {
|
||||
if pkg.PackageName == pkgname {
|
||||
return &pkg.Files
|
||||
}
|
||||
}
|
||||
|
||||
return &PkgFiles{}
|
||||
}
|
||||
|
||||
func (layer *ScanResultLayer) GetPackagesNames() []string {
|
||||
pkgsNames := []string{}
|
||||
for _, pkg := range layer.Packages {
|
||||
pkgsNames = append(pkgsNames, pkg.PackageName)
|
||||
}
|
||||
return pkgsNames
|
||||
}
|
||||
|
||||
func (scanresult *ScanResultReport) GetDesignatorsNContext() (*armotypes.PortalDesignator, []armotypes.ArmoContext) {
|
||||
designatorsObj := armotypes.AttributesDesignatorsFromWLID(scanresult.WLID)
|
||||
designatorsObj.Attributes["containerName"] = scanresult.ContainerName
|
||||
designatorsObj.Attributes["customerGUID"] = scanresult.CustomerGUID
|
||||
contextObj := armotypes.DesignatorToArmoContext(designatorsObj, "designators")
|
||||
return designatorsObj, contextObj
|
||||
}
|
||||
|
||||
func (scanresult *ScanResultReport) Validate() bool {
|
||||
if scanresult.CustomerGUID == "" || (scanresult.ImgHash == "" && scanresult.ImgTag == "") || scanresult.Timestamp <= 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
//TODO validate layers & vuls
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func (v *Vulnerability) IsRCE() bool {
|
||||
desc := strings.ToLower(v.Description)
|
||||
|
||||
isRCE := strings.Contains(v.Description, "RCE")
|
||||
|
||||
return isRCE || strings.Contains(desc, "remote code execution") || strings.Contains(desc, "remote command execution") || strings.Contains(desc, "arbitrary code") || strings.Contains(desc, "code execution") || strings.Contains(desc, "code injection") || strings.Contains(desc, "command injection") || strings.Contains(desc, "inject arbitrary commands")
|
||||
}
|
||||
141
containerscan/elasticadapters.go
Normal file
141
containerscan/elasticadapters.go
Normal file
@@ -0,0 +1,141 @@
|
||||
package containerscan
|
||||
|
||||
import (
|
||||
"github.com/armosec/armoapi-go/armotypes"
|
||||
cautils "github.com/armosec/utils-k8s-go/armometadata"
|
||||
)
|
||||
|
||||
// ToFlatVulnerabilities - returnsgit p
|
||||
func (scanresult *ScanResultReport) ToFlatVulnerabilities() []*ElasticContainerVulnerabilityResult {
|
||||
vuls := make([]*ElasticContainerVulnerabilityResult, 0)
|
||||
vul2indx := make(map[string]int)
|
||||
scanID := scanresult.AsFNVHash()
|
||||
designatorsObj, ctxList := scanresult.GetDesignatorsNContext()
|
||||
for _, layer := range scanresult.Layers {
|
||||
for _, vul := range layer.Vulnerabilities {
|
||||
esLayer := ESLayer{LayerHash: layer.LayerHash, ParentLayerHash: layer.ParentLayerHash}
|
||||
if indx, isOk := vul2indx[vul.Name]; isOk {
|
||||
vuls[indx].Layers = append(vuls[indx].Layers, esLayer)
|
||||
continue
|
||||
}
|
||||
result := &ElasticContainerVulnerabilityResult{WLID: scanresult.WLID,
|
||||
Timestamp: scanresult.Timestamp,
|
||||
Designators: *designatorsObj,
|
||||
Context: ctxList}
|
||||
|
||||
result.Vulnerability = vul
|
||||
result.Layers = make([]ESLayer, 0)
|
||||
result.Layers = append(result.Layers, esLayer)
|
||||
result.ContainerScanID = scanID
|
||||
|
||||
result.IsFixed = CalculateFixed(vul.Fixes)
|
||||
result.RelevantLinks = append(result.RelevantLinks, "https://nvd.nist.gov/vuln/detail/"+vul.Name)
|
||||
result.RelevantLinks = append(result.RelevantLinks, vul.Link)
|
||||
result.Vulnerability.Link = "https://nvd.nist.gov/vuln/detail/" + vul.Name
|
||||
|
||||
result.Categories.IsRCE = result.IsRCE()
|
||||
vuls = append(vuls, result)
|
||||
vul2indx[vul.Name] = len(vuls) - 1
|
||||
|
||||
}
|
||||
}
|
||||
// find first introduced
|
||||
for i, v := range vuls {
|
||||
earlyLayer := ""
|
||||
for _, layer := range v.Layers {
|
||||
if layer.ParentLayerHash == earlyLayer {
|
||||
earlyLayer = layer.LayerHash
|
||||
}
|
||||
}
|
||||
vuls[i].IntroducedInLayer = earlyLayer
|
||||
|
||||
}
|
||||
|
||||
return vuls
|
||||
}
|
||||
|
||||
func (scanresult *ScanResultReport) Summarize() *ElasticContainerScanSummaryResult {
|
||||
designatorsObj, ctxList := scanresult.GetDesignatorsNContext()
|
||||
summary := &ElasticContainerScanSummaryResult{
|
||||
Designators: *designatorsObj,
|
||||
Context: ctxList,
|
||||
CustomerGUID: scanresult.CustomerGUID,
|
||||
ImgTag: scanresult.ImgTag,
|
||||
ImgHash: scanresult.ImgHash,
|
||||
WLID: scanresult.WLID,
|
||||
Timestamp: scanresult.Timestamp,
|
||||
ContainerName: scanresult.ContainerName,
|
||||
ContainerScanID: scanresult.AsFNVHash(),
|
||||
ListOfDangerousArtifcats: scanresult.ListOfDangerousArtifcats,
|
||||
}
|
||||
|
||||
summary.Cluster = designatorsObj.Attributes[armotypes.AttributeCluster]
|
||||
summary.Namespace = designatorsObj.Attributes[armotypes.AttributeNamespace]
|
||||
|
||||
imageInfo, e2 := cautils.ImageTagToImageInfo(scanresult.ImgTag)
|
||||
if e2 == nil {
|
||||
summary.Registry = imageInfo.Registry
|
||||
summary.VersionImage = imageInfo.VersionImage
|
||||
}
|
||||
|
||||
summary.PackagesName = make([]string, 0)
|
||||
|
||||
severitiesStats := map[string]SeverityStats{}
|
||||
|
||||
uniqueVulsMap := make(map[string]bool)
|
||||
for _, layer := range scanresult.Layers {
|
||||
summary.PackagesName = append(summary.PackagesName, (layer.GetPackagesNames())...)
|
||||
for _, vul := range layer.Vulnerabilities {
|
||||
if _, isOk := uniqueVulsMap[vul.Name]; isOk {
|
||||
continue
|
||||
}
|
||||
uniqueVulsMap[vul.Name] = true
|
||||
|
||||
// TODO: maybe add all severities just to have a placeholders
|
||||
if !KnownSeverities[vul.Severity] {
|
||||
vul.Severity = UnknownSeverity
|
||||
}
|
||||
|
||||
vulnSeverityStats, ok := severitiesStats[vul.Severity]
|
||||
if !ok {
|
||||
vulnSeverityStats = SeverityStats{Severity: vul.Severity}
|
||||
}
|
||||
|
||||
vulnSeverityStats.TotalCount++
|
||||
summary.TotalCount++
|
||||
isFixed := CalculateFixed(vul.Fixes) > 0
|
||||
if isFixed {
|
||||
vulnSeverityStats.FixAvailableOfTotalCount++
|
||||
summary.FixAvailableOfTotalCount++
|
||||
}
|
||||
isRCE := vul.IsRCE()
|
||||
if isRCE {
|
||||
vulnSeverityStats.RCECount++
|
||||
summary.RCECount++
|
||||
}
|
||||
if vul.Relevancy == Relevant {
|
||||
vulnSeverityStats.RelevantCount++
|
||||
summary.RelevantCount++
|
||||
if isFixed {
|
||||
vulnSeverityStats.FixAvailableForRelevantCount++
|
||||
summary.FixAvailableForRelevantCount++
|
||||
}
|
||||
|
||||
}
|
||||
severitiesStats[vul.Severity] = vulnSeverityStats
|
||||
}
|
||||
}
|
||||
summary.Status = "Success"
|
||||
|
||||
// if criticalStats, hasCritical := severitiesStats[CriticalSeverity]; hasCritical && criticalStats.TotalCount > 0 {
|
||||
// summary.Status = "Fail"
|
||||
// }
|
||||
// if highStats, hasHigh := severitiesStats[HighSeverity]; hasHigh && highStats.RelevantCount > 0 {
|
||||
// summary.Status = "Fail"
|
||||
// }
|
||||
|
||||
for sever := range severitiesStats {
|
||||
summary.SeveritiesStats = append(summary.SeveritiesStats, severitiesStats[sever])
|
||||
}
|
||||
return summary
|
||||
}
|
||||
89
containerscan/elasticdatastructures.go
Normal file
89
containerscan/elasticdatastructures.go
Normal file
@@ -0,0 +1,89 @@
|
||||
package containerscan
|
||||
|
||||
import "github.com/armosec/armoapi-go/armotypes"
|
||||
|
||||
type ElasticContainerVulnerabilityResult struct {
|
||||
Designators armotypes.PortalDesignator `json:"designators"`
|
||||
Context []armotypes.ArmoContext `json:"context"`
|
||||
|
||||
WLID string `json:"wlid"`
|
||||
ContainerScanID string `json:"containersScanID"`
|
||||
Layers []ESLayer `json:"layers"`
|
||||
Timestamp int64 `json:"timestamp"`
|
||||
IsFixed int `json:"isFixed"`
|
||||
IntroducedInLayer string `json:"layerHash"`
|
||||
RelevantLinks []string `json:"links"` // shitty SE practice
|
||||
|
||||
Vulnerability `json:",inline"`
|
||||
}
|
||||
|
||||
type ESLayer struct {
|
||||
LayerHash string `json:"layerHash"`
|
||||
ParentLayerHash string `json:"parentLayerHash"`
|
||||
}
|
||||
|
||||
type SeverityStats struct {
|
||||
Severity string `json:"severity,omitempty"`
|
||||
TotalCount int64 `json:"total"`
|
||||
FixAvailableOfTotalCount int64 `json:"fixedTotal"`
|
||||
RelevantCount int64 `json:"totalRelevant"`
|
||||
FixAvailableForRelevantCount int64 `json:"fixedRelevant"`
|
||||
RCECount int64 `json:"rceTotal"`
|
||||
UrgentCount int64 `json:"urgent"`
|
||||
NeglectedCount int64 `json:"neglected"`
|
||||
HealthStatus string `json:"healthStatus"`
|
||||
}
|
||||
|
||||
type ElasticContainerScanSeveritySummary struct {
|
||||
Designators armotypes.PortalDesignator `json:"designators"`
|
||||
Context []armotypes.ArmoContext `json:"context"`
|
||||
|
||||
SeverityStats
|
||||
CustomerGUID string `json:"customerGUID"`
|
||||
ContainerScanID string `json:"containersScanID"`
|
||||
Timestamp int64 `json:"timestamp"`
|
||||
WLID string `json:"wlid"`
|
||||
ImgTag string `json:"imageTag"`
|
||||
ImgHash string `json:"imageHash"`
|
||||
Cluster string `json:"cluster"`
|
||||
Namespace string `json:"namespace"`
|
||||
ContainerName string `json:"containerName"`
|
||||
Status string `json:"status"`
|
||||
Registry string `json:"registry"`
|
||||
VersionImage string `json:"versionImage"`
|
||||
Version string `json:"version"`
|
||||
DayDate string `json:"dayDate"`
|
||||
}
|
||||
|
||||
type ElasticContainerScanSummaryResult struct {
|
||||
SeverityStats
|
||||
Designators armotypes.PortalDesignator `json:"designators"`
|
||||
Context []armotypes.ArmoContext `json:"context"`
|
||||
|
||||
CustomerGUID string `json:"customerGUID"`
|
||||
ContainerScanID string `json:"containersScanID"`
|
||||
|
||||
Timestamp int64 `json:"timestamp"`
|
||||
WLID string `json:"wlid"`
|
||||
ImgTag string `json:"imageTag"`
|
||||
ImgHash string `json:"imageHash"`
|
||||
Cluster string `json:"cluster"`
|
||||
Namespace string `json:"namespace"`
|
||||
ContainerName string `json:"containerName"`
|
||||
PackagesName []string `json:"packages"`
|
||||
|
||||
ListOfDangerousArtifcats []string `json:"listOfDangerousArtifcats"`
|
||||
|
||||
Status string `json:"status"`
|
||||
|
||||
Registry string `json:"registry"`
|
||||
VersionImage string `json:"versionImage"`
|
||||
|
||||
SeveritiesStats []SeverityStats `json:"severitiesStats"`
|
||||
|
||||
Version string `json:"version"`
|
||||
}
|
||||
|
||||
func (summary *ElasticContainerScanSummaryResult) Validate() bool {
|
||||
return summary.CustomerGUID != "" && summary.ContainerScanID != "" && (summary.ImgTag != "" || summary.ImgHash != "") && summary.Timestamp > 0
|
||||
}
|
||||
246
containerscan/gojayunmarshaller.go
Normal file
246
containerscan/gojayunmarshaller.go
Normal file
@@ -0,0 +1,246 @@
|
||||
package containerscan
|
||||
|
||||
import (
|
||||
"github.com/francoispqt/gojay"
|
||||
)
|
||||
|
||||
/*
|
||||
responsible on fast unmarshaling of various COMMON containerscan structures and substructures
|
||||
|
||||
*/
|
||||
|
||||
// UnmarshalJSONObject - File inside a pkg
|
||||
func (file *PackageFile) UnmarshalJSONObject(dec *gojay.Decoder, key string) (err error) {
|
||||
|
||||
switch key {
|
||||
case "name":
|
||||
err = dec.String(&(file.Filename))
|
||||
}
|
||||
return err
|
||||
|
||||
}
|
||||
|
||||
func (files *PkgFiles) UnmarshalJSONArray(dec *gojay.Decoder) error {
|
||||
lae := PackageFile{}
|
||||
if err := dec.Object(&lae); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
*files = append(*files, lae)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (file *PackageFile) NKeys() int {
|
||||
return 0
|
||||
}
|
||||
|
||||
// UnmarshalJSONObject--- Package
|
||||
func (pkgnx *LinuxPackage) UnmarshalJSONObject(dec *gojay.Decoder, key string) (err error) {
|
||||
|
||||
switch key {
|
||||
case "packageName":
|
||||
err = dec.String(&(pkgnx.PackageName))
|
||||
|
||||
case "version":
|
||||
err = dec.String(&(pkgnx.PackageVersion))
|
||||
|
||||
case "files":
|
||||
err = dec.Array(&(pkgnx.Files))
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (file *LinuxPackage) NKeys() int {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (pkgs *LinuxPkgs) UnmarshalJSONArray(dec *gojay.Decoder) error {
|
||||
lae := LinuxPackage{}
|
||||
if err := dec.Object(&lae); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
*pkgs = append(*pkgs, lae)
|
||||
return nil
|
||||
}
|
||||
|
||||
//--------Vul fixed in----------------------------------
|
||||
func (fx *FixedIn) UnmarshalJSONObject(dec *gojay.Decoder, key string) (err error) {
|
||||
|
||||
switch key {
|
||||
case "name":
|
||||
err = dec.String(&(fx.Name))
|
||||
|
||||
case "imageTag":
|
||||
err = dec.String(&(fx.ImgTag))
|
||||
case "version":
|
||||
err = dec.String(&(fx.Version))
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (t *VulFixes) UnmarshalJSONArray(dec *gojay.Decoder) error {
|
||||
lae := FixedIn{}
|
||||
if err := dec.Object(&lae); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
*t = append(*t, lae)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (file *FixedIn) NKeys() int {
|
||||
return 0
|
||||
}
|
||||
|
||||
//------ VULNERABIlITy ---------------------
|
||||
|
||||
// Name string `json:"name"`
|
||||
// ImgHash string `json:"imageHash"`
|
||||
// ImgTag string `json:"imageTag",omitempty`
|
||||
// RelatedPackageName string `json:"packageName"`
|
||||
// PackageVersion string `json:"packageVersion"`
|
||||
// Link string `json:"link"`
|
||||
// Description string `json:"description"`
|
||||
// Severity string `json:"severity"`
|
||||
// Metadata interface{} `json:"metadata",omitempty`
|
||||
// Fixes VulFixes `json:"fixedIn",omitempty`
|
||||
// Relevancy string `json:"relevant"` // use the related enum
|
||||
|
||||
func (v *Vulnerability) UnmarshalJSONObject(dec *gojay.Decoder, key string) (err error) {
|
||||
|
||||
switch key {
|
||||
case "name":
|
||||
err = dec.String(&(v.Name))
|
||||
|
||||
case "imageTag":
|
||||
err = dec.String(&(v.ImgTag))
|
||||
case "imageHash":
|
||||
err = dec.String(&(v.ImgHash))
|
||||
|
||||
case "packageName":
|
||||
err = dec.String(&(v.RelatedPackageName))
|
||||
|
||||
case "packageVersion":
|
||||
err = dec.String(&(v.PackageVersion))
|
||||
|
||||
case "link":
|
||||
err = dec.String(&(v.Link))
|
||||
|
||||
case "description":
|
||||
err = dec.String(&(v.Description))
|
||||
|
||||
case "severity":
|
||||
err = dec.String(&(v.Severity))
|
||||
|
||||
case "relevant":
|
||||
err = dec.String(&(v.Relevancy))
|
||||
|
||||
case "fixedIn":
|
||||
err = dec.Array(&(v.Fixes))
|
||||
|
||||
case "metadata":
|
||||
err = dec.Interface(&(v.Metadata))
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (t *VulnerabilitiesList) UnmarshalJSONArray(dec *gojay.Decoder) error {
|
||||
lae := Vulnerability{}
|
||||
if err := dec.Object(&lae); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
*t = append(*t, lae)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (v *Vulnerability) NKeys() int {
|
||||
return 0
|
||||
}
|
||||
|
||||
//---------Layer Object----------------------------------
|
||||
// type ScanResultLayer struct {
|
||||
// LayerHash string `json:layerHash`
|
||||
// Vulnerabilities []Vulnerability `json:vulnerabilities`
|
||||
// Packages []LinuxPackage `json:packageToFile`
|
||||
// }
|
||||
|
||||
func (scan *ScanResultLayer) UnmarshalJSONObject(dec *gojay.Decoder, key string) (err error) {
|
||||
|
||||
switch key {
|
||||
// case "timestamp":
|
||||
// err = dec.Time(&(reporter.Timestamp), time.RFC3339)
|
||||
// reporter.Timestamp = reporter.Timestamp.Local()
|
||||
case "layerHash":
|
||||
err = dec.String(&(scan.LayerHash))
|
||||
|
||||
case "parentLayerHash":
|
||||
err = dec.String(&(scan.ParentLayerHash))
|
||||
|
||||
case "vulnerabilities":
|
||||
err = dec.Array(&(scan.Vulnerabilities))
|
||||
case "packageToFile":
|
||||
err = dec.Array(&(scan.Packages))
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (t *LayersList) UnmarshalJSONArray(dec *gojay.Decoder) error {
|
||||
lae := ScanResultLayer{}
|
||||
if err := dec.Object(&lae); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
*t = append(*t, lae)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (scan *ScanResultLayer) NKeys() int {
|
||||
return 0
|
||||
}
|
||||
|
||||
//---------------------SCAN RESULT--------------------------------------------------------------------------
|
||||
|
||||
// type ScanResultReport struct {
|
||||
// CustomerGUID string `json:customerGuid`
|
||||
// ImgTag string `json:imageTag,omitempty`
|
||||
// ImgHash string `json:imageHash`
|
||||
// WLID string `json:wlid`
|
||||
// Timestamp int `json:customerGuid`
|
||||
// Layers []ScanResultLayer `json:layers`
|
||||
// ContainerName
|
||||
// }
|
||||
|
||||
func (scan *ScanResultReport) UnmarshalJSONObject(dec *gojay.Decoder, key string) (err error) {
|
||||
|
||||
switch key {
|
||||
// case "timestamp":
|
||||
// err = dec.Time(&(reporter.Timestamp), time.RFC3339)
|
||||
// reporter.Timestamp = reporter.Timestamp.Local()
|
||||
case "customerGUID":
|
||||
err = dec.String(&(scan.CustomerGUID))
|
||||
case "imageTag":
|
||||
err = dec.String(&(scan.ImgTag))
|
||||
case "imageHash":
|
||||
err = dec.String(&(scan.ImgHash))
|
||||
case "wlid":
|
||||
err = dec.String(&(scan.WLID))
|
||||
case "containerName":
|
||||
err = dec.String(&(scan.ContainerName))
|
||||
case "timestamp":
|
||||
err = dec.Int64(&(scan.Timestamp))
|
||||
case "layers":
|
||||
err = dec.Array(&(scan.Layers))
|
||||
|
||||
case "listOfDangerousArtifcats":
|
||||
err = dec.SliceString(&(scan.ListOfDangerousArtifcats))
|
||||
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (scan *ScanResultReport) NKeys() int {
|
||||
return 0
|
||||
}
|
||||
525
containerscan/jsonrawscan.go
Normal file
525
containerscan/jsonrawscan.go
Normal file
@@ -0,0 +1,525 @@
|
||||
package containerscan
|
||||
|
||||
var nginxScanJSON = `
|
||||
{
|
||||
"customerGUID": "1e3a88bf-92ce-44f8-914e-cbe71830d566",
|
||||
"imageTag": "nginx:1.18.0",
|
||||
"imageHash": "",
|
||||
"wlid": "wlid://cluster-test/namespace-test/deployment-davidg",
|
||||
"containerName": "nginx-1",
|
||||
"timestamp": 1628091365,
|
||||
"layers": [
|
||||
{
|
||||
"layerHash": "sha256:f7ec5a41d630a33a2d1db59b95d89d93de7ae5a619a3a8571b78457e48266eba",
|
||||
"parentLayerHash": "",
|
||||
"vulnerabilities": [
|
||||
{
|
||||
"name": "CVE-2009-0854",
|
||||
"imageHash": "sha256:c2c45d506085d300b72a6d4b10e3dce104228080a2cf095fc38333afe237e2be",
|
||||
"imageTag": "",
|
||||
"packageName": "dash",
|
||||
"packageVersion": "",
|
||||
"link": "https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2009-0854",
|
||||
"description": "Untrusted search path vulnerability in dash 0.5.4, when used as a login shell, allows local users to execute arbitrary code via a Trojan horse .profile file in the current working directory.",
|
||||
"severity": "Medium",
|
||||
"metadata": null,
|
||||
"fixedIn": [
|
||||
{
|
||||
"name": "",
|
||||
"imageTag": "",
|
||||
"version": "0:0"
|
||||
}
|
||||
],
|
||||
"relevant": ""
|
||||
},
|
||||
{
|
||||
"name": "CVE-2019-13627",
|
||||
"imageHash": "sha256:c2c45d506085d300b72a6d4b10e3dce104228080a2cf095fc38333afe237e2be",
|
||||
"imageTag": "",
|
||||
"packageName": "libgcrypt20",
|
||||
"packageVersion": "",
|
||||
"link": "https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2019-13627",
|
||||
"description": "It was discovered that there was a ECDSA timing attack in the libgcrypt20 cryptographic library. Version affected: 1.8.4-5, 1.7.6-2+deb9u3, and 1.6.3-2+deb8u4. Versions fixed: 1.8.5-2 and 1.6.3-2+deb8u7.",
|
||||
"severity": "Medium",
|
||||
"metadata": null,
|
||||
"fixedIn": [
|
||||
{
|
||||
"name": "",
|
||||
"imageTag": "",
|
||||
"version": "0:0"
|
||||
}
|
||||
],
|
||||
"relevant": ""
|
||||
},
|
||||
{
|
||||
"name": "CVE-2021-33560",
|
||||
"imageHash": "sha256:c2c45d506085d300b72a6d4b10e3dce104228080a2cf095fc38333afe237e2be",
|
||||
"imageTag": "",
|
||||
"packageName": "libgcrypt20",
|
||||
"packageVersion": "",
|
||||
"link": "https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-33560",
|
||||
"description": "Libgcrypt before 1.8.8 and 1.9.x before 1.9.3 mishandles ElGamal encryption because it lacks exponent blinding to address a side-channel attack against mpi_powm, and the window size is not chosen appropriately. (There is also an interoperability problem because the selection of the k integer value does not properly consider the differences between basic ElGamal encryption and generalized ElGamal encryption.) This, for example, affects use of ElGamal in OpenPGP.",
|
||||
"severity": "High",
|
||||
"metadata": null,
|
||||
"fixedIn": [
|
||||
{
|
||||
"name": "",
|
||||
"imageTag": "",
|
||||
"version": "0:1.8.4-5+deb10u1"
|
||||
}
|
||||
],
|
||||
"relevant": ""
|
||||
},
|
||||
{
|
||||
"name": "CVE-2021-3345",
|
||||
"imageHash": "sha256:c2c45d506085d300b72a6d4b10e3dce104228080a2cf095fc38333afe237e2be",
|
||||
"imageTag": "",
|
||||
"packageName": "libgcrypt20",
|
||||
"packageVersion": "",
|
||||
"link": "https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-3345",
|
||||
"description": "_gcry_md_block_write in cipher/hash-common.c in Libgcrypt version 1.9.0 has a heap-based buffer overflow when the digest final function sets a large count value. It is recommended to upgrade to 1.9.1 or later.",
|
||||
"severity": "High",
|
||||
"metadata": null,
|
||||
"fixedIn": [
|
||||
{
|
||||
"name": "",
|
||||
"imageTag": "",
|
||||
"version": "0:0"
|
||||
}
|
||||
],
|
||||
"relevant": ""
|
||||
},
|
||||
{
|
||||
"name": "CVE-2010-0834",
|
||||
"imageHash": "sha256:c2c45d506085d300b72a6d4b10e3dce104228080a2cf095fc38333afe237e2be",
|
||||
"imageTag": "",
|
||||
"packageName": "base-files",
|
||||
"packageVersion": "",
|
||||
"link": "https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2010-0834",
|
||||
"description": "The base-files package before 5.0.0ubuntu7.1 on Ubuntu 9.10 and before 5.0.0ubuntu20.10.04.2 on Ubuntu 10.04 LTS, as shipped on Dell Latitude 2110 netbooks, does not require authentication for package installation, which allows remote archive servers and man-in-the-middle attackers to execute arbitrary code via a crafted package.",
|
||||
"severity": "High",
|
||||
"metadata": null,
|
||||
"fixedIn": [
|
||||
{
|
||||
"name": "",
|
||||
"imageTag": "",
|
||||
"version": "0:0"
|
||||
}
|
||||
],
|
||||
"relevant": ""
|
||||
},
|
||||
{
|
||||
"name": "CVE-2018-6557",
|
||||
"imageHash": "sha256:c2c45d506085d300b72a6d4b10e3dce104228080a2cf095fc38333afe237e2be",
|
||||
"imageTag": "",
|
||||
"packageName": "base-files",
|
||||
"packageVersion": "",
|
||||
"link": "https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2018-6557",
|
||||
"description": "The MOTD update script in the base-files package in Ubuntu 18.04 LTS before 10.1ubuntu2.2, and Ubuntu 18.10 before 10.1ubuntu6 incorrectly handled temporary files. A local attacker could use this issue to cause a denial of service, or possibly escalate privileges if kernel symlink restrictions were disabled.",
|
||||
"severity": "High",
|
||||
"metadata": null,
|
||||
"fixedIn": [
|
||||
{
|
||||
"name": "",
|
||||
"imageTag": "",
|
||||
"version": "0:0"
|
||||
}
|
||||
],
|
||||
"relevant": ""
|
||||
},
|
||||
{
|
||||
"name": "CVE-2013-0223",
|
||||
"imageHash": "sha256:c2c45d506085d300b72a6d4b10e3dce104228080a2cf095fc38333afe237e2be",
|
||||
"imageTag": "",
|
||||
"packageName": "coreutils",
|
||||
"packageVersion": "",
|
||||
"link": "https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2013-0223",
|
||||
"description": "The SUSE coreutils-i18n.patch for GNU coreutils allows context-dependent attackers to cause a denial of service (segmentation fault and crash) via a long string to the join command, when using the -i switch, which triggers a stack-based buffer overflow in the alloca function.",
|
||||
"severity": "Low",
|
||||
"metadata": null,
|
||||
"fixedIn": [
|
||||
{
|
||||
"name": "",
|
||||
"imageTag": "",
|
||||
"version": "0:0"
|
||||
}
|
||||
],
|
||||
"relevant": ""
|
||||
},
|
||||
{
|
||||
"name": "CVE-2015-4041",
|
||||
"imageHash": "sha256:c2c45d506085d300b72a6d4b10e3dce104228080a2cf095fc38333afe237e2be",
|
||||
"imageTag": "",
|
||||
"packageName": "coreutils",
|
||||
"packageVersion": "",
|
||||
"link": "https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2015-4041",
|
||||
"description": "The keycompare_mb function in sort.c in sort in GNU Coreutils through 8.23 on 64-bit platforms performs a size calculation without considering the number of bytes occupied by multibyte characters, which allows attackers to cause a denial of service (heap-based buffer overflow and application crash) or possibly have unspecified other impact via long UTF-8 strings.",
|
||||
"severity": "High",
|
||||
"metadata": null,
|
||||
"fixedIn": [
|
||||
{
|
||||
"name": "",
|
||||
"imageTag": "",
|
||||
"version": "0:0"
|
||||
}
|
||||
],
|
||||
"relevant": ""
|
||||
},
|
||||
{
|
||||
"name": "CVE-2009-4135",
|
||||
"imageHash": "sha256:c2c45d506085d300b72a6d4b10e3dce104228080a2cf095fc38333afe237e2be",
|
||||
"imageTag": "",
|
||||
"packageName": "coreutils",
|
||||
"packageVersion": "",
|
||||
"link": "https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2009-4135",
|
||||
"description": "The distcheck rule in dist-check.mk in GNU coreutils 5.2.1 through 8.1 allows local users to gain privileges via a symlink attack on a file in a directory tree under /tmp.",
|
||||
"severity": "Medium",
|
||||
"metadata": null,
|
||||
"fixedIn": [
|
||||
{
|
||||
"name": "",
|
||||
"imageTag": "",
|
||||
"version": "0:0"
|
||||
}
|
||||
],
|
||||
"relevant": ""
|
||||
},
|
||||
{
|
||||
"name": "CVE-2015-4042",
|
||||
"imageHash": "sha256:c2c45d506085d300b72a6d4b10e3dce104228080a2cf095fc38333afe237e2be",
|
||||
"imageTag": "",
|
||||
"packageName": "coreutils",
|
||||
"packageVersion": "",
|
||||
"link": "https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2015-4042",
|
||||
"description": "Integer overflow in the keycompare_mb function in sort.c in sort in GNU Coreutils through 8.23 might allow attackers to cause a denial of service (application crash) or possibly have unspecified other impact via long strings.",
|
||||
"severity": "Critical",
|
||||
"metadata": null,
|
||||
"fixedIn": [
|
||||
{
|
||||
"name": "",
|
||||
"imageTag": "",
|
||||
"version": "0:0"
|
||||
}
|
||||
],
|
||||
"relevant": ""
|
||||
},
|
||||
{
|
||||
"name": "CVE-2013-0221",
|
||||
"imageHash": "sha256:c2c45d506085d300b72a6d4b10e3dce104228080a2cf095fc38333afe237e2be",
|
||||
"imageTag": "",
|
||||
"packageName": "coreutils",
|
||||
"packageVersion": "",
|
||||
"link": "https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2013-0221",
|
||||
"description": "The SUSE coreutils-i18n.patch for GNU coreutils allows context-dependent attackers to cause a denial of service (segmentation fault and crash) via a long string to the sort command, when using the (1) -d or (2) -M switch, which triggers a stack-based buffer overflow in the alloca function.",
|
||||
"severity": "Medium",
|
||||
"metadata": null,
|
||||
"fixedIn": [
|
||||
{
|
||||
"name": "",
|
||||
"imageTag": "",
|
||||
"version": "0:0"
|
||||
}
|
||||
],
|
||||
"relevant": ""
|
||||
},
|
||||
{
|
||||
"name": "CVE-2013-0222",
|
||||
"imageHash": "sha256:c2c45d506085d300b72a6d4b10e3dce104228080a2cf095fc38333afe237e2be",
|
||||
"imageTag": "",
|
||||
"packageName": "coreutils",
|
||||
"packageVersion": "",
|
||||
"link": "https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2013-0222",
|
||||
"description": "The SUSE coreutils-i18n.patch for GNU coreutils allows context-dependent attackers to cause a denial of service (segmentation fault and crash) via a long string to the uniq command, which triggers a stack-based buffer overflow in the alloca function.",
|
||||
"severity": "Low",
|
||||
"metadata": null,
|
||||
"fixedIn": [
|
||||
{
|
||||
"name": "",
|
||||
"imageTag": "",
|
||||
"version": "0:0"
|
||||
}
|
||||
],
|
||||
"relevant": ""
|
||||
},
|
||||
{
|
||||
"name": "CVE-2016-2781",
|
||||
"imageHash": "sha256:c2c45d506085d300b72a6d4b10e3dce104228080a2cf095fc38333afe237e2be",
|
||||
"imageTag": "",
|
||||
"packageName": "coreutils",
|
||||
"packageVersion": "",
|
||||
"link": "https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2016-2781",
|
||||
"description": "chroot in GNU coreutils, when used with --userspec, allows local users to escape to the parent session via a crafted TIOCSTI ioctl call, which pushes characters to the terminal's input buffer.",
|
||||
"severity": "Medium",
|
||||
"metadata": null,
|
||||
"fixedIn": [
|
||||
{
|
||||
"name": "",
|
||||
"imageTag": "",
|
||||
"version": "0:0"
|
||||
}
|
||||
],
|
||||
"relevant": ""
|
||||
},
|
||||
{
|
||||
"name": "CVE-2017-18018",
|
||||
"imageHash": "sha256:c2c45d506085d300b72a6d4b10e3dce104228080a2cf095fc38333afe237e2be",
|
||||
"imageTag": "",
|
||||
"packageName": "coreutils",
|
||||
"packageVersion": "",
|
||||
"link": "https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2017-18018",
|
||||
"description": "In GNU Coreutils through 8.29, chown-core.c in chown and chgrp does not prevent replacement of a plain file with a symlink during use of the POSIX \"-R -L\" options, which allows local users to modify the ownership of arbitrary files by leveraging a race condition.",
|
||||
"severity": "Medium",
|
||||
"metadata": null,
|
||||
"fixedIn": [
|
||||
{
|
||||
"name": "",
|
||||
"imageTag": "",
|
||||
"version": "0:0"
|
||||
}
|
||||
],
|
||||
"relevant": ""
|
||||
},
|
||||
{
|
||||
"name": "CVE-2021-20193",
|
||||
"imageHash": "sha256:c2c45d506085d300b72a6d4b10e3dce104228080a2cf095fc38333afe237e2be",
|
||||
"imageTag": "",
|
||||
"packageName": "tar",
|
||||
"packageVersion": "",
|
||||
"link": "https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-20193",
|
||||
"description": "A flaw was found in the src/list.c of tar 1.33 and earlier. This flaw allows an attacker who can submit a crafted input file to tar to cause uncontrolled consumption of memory. The highest threat from this vulnerability is to system availability.",
|
||||
"severity": "Medium",
|
||||
"metadata": null,
|
||||
"fixedIn": [
|
||||
{
|
||||
"name": "",
|
||||
"imageTag": "",
|
||||
"version": "0:0"
|
||||
}
|
||||
],
|
||||
"relevant": ""
|
||||
},
|
||||
{
|
||||
"name": "CVE-2005-2541",
|
||||
"imageHash": "sha256:c2c45d506085d300b72a6d4b10e3dce104228080a2cf095fc38333afe237e2be",
|
||||
"imageTag": "",
|
||||
"packageName": "tar",
|
||||
"packageVersion": "",
|
||||
"link": "https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2005-2541",
|
||||
"description": "Tar 1.15.1 does not properly warn the user when extracting setuid or setgid files, which may allow local users or remote attackers to gain privileges.",
|
||||
"severity": "High",
|
||||
"metadata": null,
|
||||
"fixedIn": [
|
||||
{
|
||||
"name": "",
|
||||
"imageTag": "",
|
||||
"version": "0:0"
|
||||
}
|
||||
],
|
||||
"relevant": ""
|
||||
},
|
||||
{
|
||||
"name": "CVE-2019-9923",
|
||||
"imageHash": "sha256:c2c45d506085d300b72a6d4b10e3dce104228080a2cf095fc38333afe237e2be",
|
||||
"imageTag": "",
|
||||
"packageName": "tar",
|
||||
"packageVersion": "",
|
||||
"link": "https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2019-9923",
|
||||
"description": "pax_decode_header in sparse.c in GNU Tar before 1.32 had a NULL pointer dereference when parsing certain archives that have malformed extended headers.",
|
||||
"severity": "High",
|
||||
"metadata": null,
|
||||
"fixedIn": [
|
||||
{
|
||||
"name": "",
|
||||
"imageTag": "",
|
||||
"version": "0:0"
|
||||
}
|
||||
],
|
||||
"relevant": ""
|
||||
},
|
||||
{
|
||||
"name": "CVE-2018-1000654",
|
||||
"imageHash": "sha256:c2c45d506085d300b72a6d4b10e3dce104228080a2cf095fc38333afe237e2be",
|
||||
"imageTag": "",
|
||||
"packageName": "libtasn1-6",
|
||||
"packageVersion": "",
|
||||
"link": "https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2018-1000654",
|
||||
"description": "GNU Libtasn1-4.13 libtasn1-4.13 version libtasn1-4.13, libtasn1-4.12 contains a DoS, specifically CPU usage will reach 100% when running asn1Paser against the POC due to an issue in _asn1_expand_object_id(p_tree), after a long time, the program will be killed. This attack appears to be exploitable via parsing a crafted file.",
|
||||
"severity": "High",
|
||||
"metadata": null,
|
||||
"fixedIn": [
|
||||
{
|
||||
"name": "",
|
||||
"imageTag": "",
|
||||
"version": "0:0"
|
||||
}
|
||||
],
|
||||
"relevant": ""
|
||||
},
|
||||
{
|
||||
"name": "CVE-2011-3374",
|
||||
"imageHash": "sha256:c2c45d506085d300b72a6d4b10e3dce104228080a2cf095fc38333afe237e2be",
|
||||
"imageTag": "",
|
||||
"packageName": "apt",
|
||||
"packageVersion": "",
|
||||
"link": "https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2011-3374",
|
||||
"description": "It was found that apt-key in apt, all versions, do not correctly validate gpg keys with the master keyring, leading to a potential man-in-the-middle attack.",
|
||||
"severity": "Medium",
|
||||
"metadata": null,
|
||||
"fixedIn": [
|
||||
{
|
||||
"name": "",
|
||||
"imageTag": "",
|
||||
"version": "0:0"
|
||||
}
|
||||
],
|
||||
"relevant": ""
|
||||
},
|
||||
{
|
||||
"name": "CVE-2021-37600",
|
||||
"imageHash": "sha256:c2c45d506085d300b72a6d4b10e3dce104228080a2cf095fc38333afe237e2be",
|
||||
"imageTag": "",
|
||||
"packageName": "util-linux",
|
||||
"packageVersion": "",
|
||||
"link": "https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37600",
|
||||
"description": "An integer overflow in util-linux through 2.37.1 can potentially cause a buffer overflow if an attacker were able to use system resources in a way that leads to a large number in the /proc/sysvipc/sem file.",
|
||||
"severity": "Unknown",
|
||||
"metadata": null,
|
||||
"fixedIn": [
|
||||
{
|
||||
"name": "",
|
||||
"imageTag": "",
|
||||
"version": "0:0"
|
||||
}
|
||||
],
|
||||
"relevant": ""
|
||||
},
|
||||
{
|
||||
"name": "CVE-2007-0822",
|
||||
"imageHash": "sha256:c2c45d506085d300b72a6d4b10e3dce104228080a2cf095fc38333afe237e2be",
|
||||
"imageTag": "",
|
||||
"packageName": "util-linux",
|
||||
"packageVersion": "",
|
||||
"link": "https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2007-0822",
|
||||
"description": "umount, when running with the Linux 2.6.15 kernel on Slackware Linux 10.2, allows local users to trigger a NULL dereference and application crash by invoking the program with a pathname for a USB pen drive that was mounted and then physically removed, which might allow the users to obtain sensitive information, including core file contents.",
|
||||
"severity": "Low",
|
||||
"metadata": null,
|
||||
"fixedIn": [
|
||||
{
|
||||
"name": "",
|
||||
"imageTag": "",
|
||||
"version": "0:0"
|
||||
}
|
||||
],
|
||||
"relevant": ""
|
||||
},
|
||||
{
|
||||
"name": "CVE-2004-1349",
|
||||
"imageHash": "sha256:c2c45d506085d300b72a6d4b10e3dce104228080a2cf095fc38333afe237e2be",
|
||||
"imageTag": "",
|
||||
"packageName": "gzip",
|
||||
"packageVersion": "",
|
||||
"link": "https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2004-1349",
|
||||
"description": "gzip before 1.3 in Solaris 8, when called with the -f or -force flags, will change the permissions of files that are hard linked to the target files, which allows local users to view or modify these files.",
|
||||
"severity": "Low",
|
||||
"metadata": null,
|
||||
"fixedIn": [
|
||||
{
|
||||
"name": "",
|
||||
"imageTag": "",
|
||||
"version": "0:0"
|
||||
}
|
||||
],
|
||||
"relevant": ""
|
||||
},
|
||||
{
|
||||
"name": "CVE-2004-0603",
|
||||
"imageHash": "sha256:c2c45d506085d300b72a6d4b10e3dce104228080a2cf095fc38333afe237e2be",
|
||||
"imageTag": "",
|
||||
"packageName": "gzip",
|
||||
"packageVersion": "",
|
||||
"link": "https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2004-0603",
|
||||
"description": "gzexe in gzip 1.3.3 and earlier will execute an argument when the creation of a temp file fails instead of exiting the program, which could allow remote attackers or local users to execute arbitrary commands, a different vulnerability than CVE-1999-1332.",
|
||||
"severity": "High",
|
||||
"metadata": null,
|
||||
"fixedIn": [
|
||||
{
|
||||
"name": "",
|
||||
"imageTag": "",
|
||||
"version": "0:0"
|
||||
}
|
||||
],
|
||||
"relevant": ""
|
||||
},
|
||||
{
|
||||
"name": "CVE-2010-0002",
|
||||
"imageHash": "sha256:c2c45d506085d300b72a6d4b10e3dce104228080a2cf095fc38333afe237e2be",
|
||||
"imageTag": "",
|
||||
"packageName": "bash",
|
||||
"packageVersion": "",
|
||||
"link": "https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2010-0002",
|
||||
"description": "The /etc/profile.d/60alias.sh script in the Mandriva bash package for Bash 2.05b, 3.0, 3.2, 3.2.48, and 4.0 enables the --show-control-chars option in LS_OPTIONS, which allows local users to send escape sequences to terminal emulators, or hide the existence of a file, via a crafted filename.",
|
||||
"severity": "Low",
|
||||
"metadata": null,
|
||||
"fixedIn": [
|
||||
{
|
||||
"name": "",
|
||||
"imageTag": "",
|
||||
"version": "0:0"
|
||||
}
|
||||
],
|
||||
"relevant": ""
|
||||
},
|
||||
{
|
||||
"name": "CVE-2019-18276",
|
||||
"imageHash": "sha256:c2c45d506085d300b72a6d4b10e3dce104228080a2cf095fc38333afe237e2be",
|
||||
"imageTag": "",
|
||||
"packageName": "bash",
|
||||
"packageVersion": "",
|
||||
"link": "https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2019-18276",
|
||||
"description": "An issue was discovered in disable_priv_mode in shell.c in GNU Bash through 5.0 patch 11. By default, if Bash is run with its effective UID not equal to its real UID, it will drop privileges by setting its effective UID to its real UID. However, it does so incorrectly. On Linux and other systems that support \"saved UID\" functionality, the saved UID is not dropped. An attacker with command execution in the shell can use \"enable -f\" for runtime loading of a new builtin, which can be a shared object that calls setuid() and therefore regains privileges. However, binaries running with an effective UID of 0 are unaffected.",
|
||||
"severity": "High",
|
||||
"metadata": null,
|
||||
"fixedIn": [
|
||||
{
|
||||
"name": "",
|
||||
"imageTag": "",
|
||||
"version": "0:0"
|
||||
}
|
||||
],
|
||||
"relevant": ""
|
||||
}
|
||||
],
|
||||
"packageToFile": null
|
||||
},
|
||||
{
|
||||
"layerHash": "sha256:0b20d28b5eb3007f70c43cdd8efcdb04016aa193192e5911cda5b7590ffaa635",
|
||||
"parentLayerHash": "sha256:f7ec5a41d630a33a2d1db59b95d89d93de7ae5a619a3a8571b78457e48266eba",
|
||||
"vulnerabilities": [],
|
||||
"packageToFile": null
|
||||
},
|
||||
{
|
||||
"layerHash": "sha256:1576642c97761adf346890bf67c43473217160a9a203ef47d0bc6020af652798",
|
||||
"parentLayerHash": "sha256:0b20d28b5eb3007f70c43cdd8efcdb04016aa193192e5911cda5b7590ffaa635",
|
||||
"vulnerabilities": [],
|
||||
"packageToFile": null
|
||||
},
|
||||
{
|
||||
"layerHash": "sha256:c12a848bad84d57e3f5faafab5880484434aee3bf8bdde4d519753b7c81254fd",
|
||||
"parentLayerHash": "sha256:1576642c97761adf346890bf67c43473217160a9a203ef47d0bc6020af652798",
|
||||
"vulnerabilities": [],
|
||||
"packageToFile": null
|
||||
},
|
||||
{
|
||||
"layerHash": "sha256:03f221d9cf00a7077231c6dcac3c95182727c7e7fd44fd2b2e882a01dcda2d70",
|
||||
"parentLayerHash": "sha256:c12a848bad84d57e3f5faafab5880484434aee3bf8bdde4d519753b7c81254fd",
|
||||
"vulnerabilities": [],
|
||||
"packageToFile": null
|
||||
}
|
||||
],
|
||||
"listOfDangerousArtifcats": [
|
||||
"bin/dash",
|
||||
"bin/bash",
|
||||
"usr/bin/curl"
|
||||
]
|
||||
}
|
||||
`
|
||||
93
containerscan/rawdatastrucutres.go
Normal file
93
containerscan/rawdatastrucutres.go
Normal file
@@ -0,0 +1,93 @@
|
||||
package containerscan
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"hash/fnv"
|
||||
)
|
||||
|
||||
//!!!!!!!!!!!!EVERY CHANGE IN THESE STRUCTURES => CHANGE gojayunmarshaller ASWELL!!!!!!!!!!!!!!!!!!!!!!!!
|
||||
|
||||
// ScanResultReport - the report given from scanner to event receiver
|
||||
type ScanResultReport struct {
|
||||
CustomerGUID string `json:"customerGUID"`
|
||||
ImgTag string `json:"imageTag"`
|
||||
ImgHash string `json:"imageHash"`
|
||||
WLID string `json:"wlid"`
|
||||
ContainerName string `json:"containerName"`
|
||||
Timestamp int64 `json:"timestamp"`
|
||||
Layers LayersList `json:"layers"`
|
||||
ListOfDangerousArtifcats []string `json:"listOfDangerousArtifcats"`
|
||||
}
|
||||
|
||||
// ScanResultLayer - represents a single layer from container scan result
|
||||
type ScanResultLayer struct {
|
||||
LayerHash string `json:"layerHash"`
|
||||
ParentLayerHash string `json:"parentLayerHash"`
|
||||
Vulnerabilities VulnerabilitiesList `json:"vulnerabilities"`
|
||||
Packages LinuxPkgs `json:"packageToFile"`
|
||||
}
|
||||
|
||||
type VulnerabilityCategory struct {
|
||||
IsRCE bool `json:"isRce"`
|
||||
}
|
||||
|
||||
// Vulnerability - a vul object
|
||||
type Vulnerability struct {
|
||||
Name string `json:"name"`
|
||||
ImgHash string `json:"imageHash"`
|
||||
ImgTag string `json:"imageTag"`
|
||||
RelatedPackageName string `json:"packageName"`
|
||||
PackageVersion string `json:"packageVersion"`
|
||||
Link string `json:"link"`
|
||||
Description string `json:"description"`
|
||||
Severity string `json:"severity"`
|
||||
Metadata interface{} `json:"metadata"`
|
||||
Fixes VulFixes `json:"fixedIn"`
|
||||
Relevancy string `json:"relevant"` // use the related enum
|
||||
UrgentCount int `json:"urgent"`
|
||||
NeglectedCount int `json:"neglected"`
|
||||
HealthStatus string `json:"healthStatus"`
|
||||
Categories VulnerabilityCategory `json:"categories"`
|
||||
}
|
||||
|
||||
// FixedIn when and which pkg was fixed (which version as well)
|
||||
type FixedIn struct {
|
||||
Name string `json:"name"`
|
||||
ImgTag string `json:"imageTag"`
|
||||
Version string `json:"version"`
|
||||
}
|
||||
|
||||
// LinuxPackage- Linux package representation
|
||||
type LinuxPackage struct {
|
||||
PackageName string `json:"packageName"`
|
||||
Files PkgFiles `json:"files"`
|
||||
PackageVersion string `json:"version"`
|
||||
}
|
||||
|
||||
// PackageFile - s.e
|
||||
type PackageFile struct {
|
||||
Filename string `json:"name"`
|
||||
}
|
||||
|
||||
// types to provide unmarshalling:
|
||||
|
||||
//VulnerabilitiesList -s.e
|
||||
type LayersList []ScanResultLayer
|
||||
|
||||
//VulnerabilitiesList -s.e
|
||||
type VulnerabilitiesList []Vulnerability
|
||||
|
||||
//LinuxPkgs - slice of linux pkgs
|
||||
type LinuxPkgs []LinuxPackage
|
||||
|
||||
//VulFixes - information bout when/how this vul was fixed
|
||||
type VulFixes []FixedIn
|
||||
|
||||
//PkgFiles - slice of files belong to specific pkg
|
||||
type PkgFiles []PackageFile
|
||||
|
||||
func (v *ScanResultReport) AsFNVHash() string {
|
||||
hasher := fnv.New64a()
|
||||
hasher.Write([]byte(fmt.Sprintf("%v", *v)))
|
||||
return fmt.Sprintf("%v", hasher.Sum64())
|
||||
}
|
||||
Binary file not shown.
|
Before Width: | Height: | Size: 44 KiB After Width: | Height: | Size: 53 KiB |
@@ -1,4 +1,4 @@
|
||||
# Container image vulnerabilty adaptor interface proposal
|
||||
# Container image vulnerability adaptor interface proposal
|
||||
|
||||
## Rationale
|
||||
|
||||
@@ -6,7 +6,7 @@ source #287
|
||||
|
||||
### Big picture
|
||||
|
||||
* Kubescape team planning to create controls which take into account image vulnerabilities, example: looking for public internet facing workloads with critical vulnerabilities. These are seriously effecting the security health of a cluster and therefore we think it is important to cover it. We think that most container registries are/will support image scanning like Harbor and therefore the ability to get information from them is important.
|
||||
* Kubescape team is planning to create controls which take into account image vulnerabilities, example: looking for public internet facing workloads with critical vulnerabilities. These are seriously effecting the security health of a cluster and therefore we think it is important to cover it. We think that most container registries are/will support image scanning like Harbor and therefore the ability to get information from them is important.
|
||||
* There are information in the image repository which is important for existing controls as well. They are incomplete without it, example see this issue: Non-root containers check is broken #19 . These are not necessarily image vulnerability related. Can be information in the image manifest (like the issue before), but it can be the image BOM related.
|
||||
|
||||
### Relation to this proposal
|
||||
@@ -89,7 +89,7 @@ type ContainerImageScanStatus struct {
|
||||
LastScanDate time.Time
|
||||
}
|
||||
|
||||
type ContainerImageVulnerability struct {
|
||||
type ContainerImageVulnerabilityReport struct {
|
||||
ImageID ContainerImageIdentifier
|
||||
// TBD
|
||||
}
|
||||
@@ -110,8 +110,67 @@ type IContainerImageVulnerabilityAdaptor interface {
|
||||
|
||||
GetImagesScanStatus(imageIDs []ContainerImageIdentifier) ([]ContainerImageScanStatus, error)
|
||||
|
||||
GetImagesVulnerabilties(imageIDs []ContainerImageIdentifier) ([]ContainerImageVulnerability, error)
|
||||
GetImagesVulnerabilties(imageIDs []ContainerImageIdentifier) ([]ContainerImageVulnerabilityReport, error)
|
||||
|
||||
GetImagesInformation(imageIDs []ContainerImageIdentifier) ([]ContainerImageInformation, error)
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
|
||||
# Integration
|
||||
|
||||
# Input
|
||||
|
||||
The objects received from the interface will be converted to an Imetadata compatible objects as following
|
||||
|
||||
```
|
||||
{
|
||||
"apiVersion": "image.vulnscan.com/v1",
|
||||
"kind": "VulnScan",
|
||||
"metadata": {
|
||||
"name": "nginx:latest"
|
||||
},
|
||||
"data": {
|
||||
// returned by the adaptor API (structure like our backend gives for an image
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
# Output
|
||||
|
||||
The rego results will be a combination of the k8s artifact and the list of relevant CVEs for the control
|
||||
|
||||
```
|
||||
{
|
||||
"apiVersion": "result.vulnscan.com/v1",
|
||||
"kind": "Pod",
|
||||
"metadata": {
|
||||
"name": "nginx"
|
||||
},
|
||||
"relatedObjects": [
|
||||
{
|
||||
"apiVersion": "v1",
|
||||
"kind": "Pod",
|
||||
"metadata": {
|
||||
"name": "nginx"
|
||||
},
|
||||
"spec": {
|
||||
// podSpec
|
||||
},
|
||||
},
|
||||
{
|
||||
"apiVersion": "container.vulnscan.com/v1",
|
||||
"kind": "VulnScan",
|
||||
"metadata": {
|
||||
"name": "nginx:latest",
|
||||
},
|
||||
"data": {
|
||||
|
||||
// returned by the adaptor API (structure like our backend gives for an image
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
23
docs/roadmap.md
Normal file
23
docs/roadmap.md
Normal file
@@ -0,0 +1,23 @@
|
||||
# Kubescape project roadmap
|
||||
|
||||
|
||||
## Proposals
|
||||
* [Container registry integration](/docs/proposals/container-image-vulnerability-adaptor.md)
|
||||
|
||||
## Planed features
|
||||
* Image vulnerablity scanning based controls
|
||||
* Assited remidiation (telling where/what to fix)
|
||||
* Git integration for pull requests
|
||||
* Integration with container registries
|
||||
* Custom controls and regos
|
||||
* API server configuration validation
|
||||
* Kubelet configuration validation
|
||||
|
||||
## Completed features
|
||||
* Integration with Prometheus
|
||||
* Confiugration of controls (customizing rules for a given environment)
|
||||
* Installation in the cluster for continous monitoring
|
||||
* Host scanner
|
||||
* Cloud vendor API integration
|
||||
* Custom exceptions
|
||||
* Custom frameworks
|
||||
68
examples/cloud-vendor-integration/aws.sh
Executable file
68
examples/cloud-vendor-integration/aws.sh
Executable file
@@ -0,0 +1,68 @@
|
||||
#!/bin/bash
|
||||
|
||||
# AWS
|
||||
# Attach the Kubescape service account to an AWS IAM role with the described cluster permission
|
||||
|
||||
# Prerequisites:
|
||||
# eksctl, awscli v2
|
||||
|
||||
# Set environment variables
|
||||
echo 'Set environment variables'
|
||||
export kubescape_namespace=armo-system
|
||||
export kubescape_serviceaccount=armo-kubescape-service-account
|
||||
|
||||
# Get current context
|
||||
echo 'Get current context'
|
||||
export context=$(kubectl config current-context)
|
||||
|
||||
# Get cluster arn
|
||||
echo 'Get cluster arn'
|
||||
export cluster_arn=$(kubectl config view -o jsonpath="{.contexts[?(@.name == \"$context\")].context.cluster}")
|
||||
|
||||
# Get cluster name
|
||||
echo 'Get cluster name'
|
||||
export cluster_name=$(echo "$cluster_arn" | awk -F'/' '{print $NF}')
|
||||
|
||||
# Get cluster region
|
||||
echo 'Get cluster region'
|
||||
export cluster_region=$(echo "$cluster_arn" | awk -F':' '{print $4}')
|
||||
|
||||
# First step, Create IAM OIDC provider for the cluster (Not required if the third step runs as is):
|
||||
echo 'Create IAM OIDC provider for the cluster'
|
||||
eksctl utils associate-iam-oidc-provider --cluster $cluster_name --approve
|
||||
|
||||
# Second step, Create a policy and service account role:
|
||||
# Create a kubescape policy
|
||||
echo 'Create a kubescape policy'
|
||||
export kubescape_policy_arn=$(aws iam create-policy \
|
||||
--output yaml \
|
||||
--query 'Policy.Arn' \
|
||||
--policy-name kubescape \
|
||||
--policy-document \
|
||||
"$(cat <<EOF
|
||||
{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": "eks:DescribeCluster",
|
||||
"Resource": "$cluster_arn"
|
||||
}
|
||||
]
|
||||
}
|
||||
EOF
|
||||
)")
|
||||
|
||||
# Create Kubernetes Kubescape service account, and AWS IAM attachment role
|
||||
echo 'Create Kubernetes Kubescape service account, and AWS IAM attachment role'
|
||||
eksctl create iamserviceaccount \
|
||||
--name $kubescape_serviceaccount \
|
||||
--namespace $kubescape_namespace \
|
||||
--cluster $cluster_name \
|
||||
--attach-policy-arn $kubescape_policy_arn \
|
||||
--approve \
|
||||
--override-existing-serviceaccounts
|
||||
|
||||
# Install/Upgrade Kubescape chart
|
||||
echo 'Install/Upgrade Kubescape chart'
|
||||
helm upgrade --install armo armo-components/ -n armo-system --create-namespace --set clusterName=$cluster_name --set cloud_provider_engine=eks --set createKubescapeServiceAccount=false --set cloudRegion=$cluster_region
|
||||
49
examples/cloud-vendor-integration/gcp.sh
Executable file
49
examples/cloud-vendor-integration/gcp.sh
Executable file
@@ -0,0 +1,49 @@
|
||||
#!/bin/bash
|
||||
|
||||
# GCP
|
||||
# Attach the Kubescape service account to a GCP service account with the get cluster permission
|
||||
|
||||
# Prerequisites:
|
||||
# gcloud
|
||||
# Workload Identity enabled on the cluster.
|
||||
# Node pool with --workload-metadata=GKE_METADATA where the pod can run.
|
||||
# CLUSTER_NAME and CLUSTER_REGION environment variables.
|
||||
|
||||
[ -z $CLUSTER_NAME ] && >&2 echo "Please set the CLUSTER_NAME environment variable" && exit 1
|
||||
[ -z $CLUSTER_REGION ] && >&2 echo "Please set the CLUSTER_REGION environment variable" && exit 1
|
||||
|
||||
# Create GCP service account
|
||||
gcloud iam service-accounts create kubescape --display-name=kubescape
|
||||
|
||||
# Set environment variables
|
||||
echo 'Set environment variables'
|
||||
export kubescape_namespace=armo-system
|
||||
export kubescape_serviceaccount=armo-kubescape-service-account
|
||||
|
||||
# Get current GCP project
|
||||
echo 'Get current GCP project'
|
||||
export gcp_project=$(gcloud config get-value project)
|
||||
|
||||
sleep 5
|
||||
# Get service account email
|
||||
echo 'Get service account email'
|
||||
export gcp_service_account=$(gcloud iam service-accounts list --filter="email ~ kubescape@" --format="value(email)")
|
||||
|
||||
# Create custome cluster.get role
|
||||
echo 'Create custome cluster.get role'
|
||||
export custom_role_name=$(gcloud iam roles create kubescape --project=$gcp_project --title='Armo kubernetes' --description='Allow clusters.get to Kubernetes armo service account' --permissions=container.clusters.get --stage=GA --format='value(name)')
|
||||
|
||||
# Attach policies to the service account
|
||||
echo 'Attach policies to the service account'
|
||||
gcloud --quiet projects add-iam-policy-binding $gcp_project --member serviceAccount:$gcp_service_account --role $custom_role_name >/dev/null
|
||||
gcloud --quiet projects add-iam-policy-binding $gcp_project --member serviceAccount:$gcp_service_account --role roles/storage.objectViewer >/dev/null
|
||||
|
||||
# If there are missing permissions, use this role instead
|
||||
# gcloud --quiet projects add-iam-policy-binding $gcp_project --member serviceAccount:$gcp_service_account --role roles/container.clusterViewer
|
||||
|
||||
# Bind the GCP kubescape service account to kubescape kubernetes service account
|
||||
gcloud iam service-accounts add-iam-policy-binding $gcp_service_account --role roles/iam.workloadIdentityUser --member "serviceAccount:${gcp_project}.svc.id.goog[${kubescape_namespace}/${kubescape_serviceaccount}]"
|
||||
|
||||
# Install/Upgrade Kubescape chart
|
||||
echo 'Install/Upgrade Kubescape chart'
|
||||
helm upgrade --install armo armo-components/ -n armo-system --create-namespace --set cloud_provider_engine=gke --set gke_service_account=$gcp_service_account --set cloudRegion=$CLUSTER_REGION --set clusterName=$CLUSTER_NAME --set gkeProject=$gcp_project
|
||||
30
go.mod
30
go.mod
@@ -3,22 +3,24 @@ module github.com/armosec/kubescape
|
||||
go 1.17
|
||||
|
||||
require (
|
||||
github.com/armosec/armoapi-go v0.0.40
|
||||
github.com/armosec/k8s-interface v0.0.50
|
||||
github.com/armosec/opa-utils v0.0.78
|
||||
github.com/armosec/rbac-utils v0.0.10
|
||||
github.com/armosec/armoapi-go v0.0.49
|
||||
github.com/armosec/k8s-interface v0.0.60
|
||||
github.com/armosec/opa-utils v0.0.107
|
||||
github.com/armosec/rbac-utils v0.0.14
|
||||
github.com/armosec/utils-go v0.0.3
|
||||
github.com/armosec/utils-k8s-go v0.0.1
|
||||
github.com/briandowns/spinner v1.18.0
|
||||
github.com/enescakir/emoji v1.0.0
|
||||
github.com/fatih/color v1.13.0
|
||||
github.com/francoispqt/gojay v1.2.13
|
||||
github.com/gofrs/uuid v4.1.0+incompatible
|
||||
github.com/golang/glog v1.0.0
|
||||
github.com/mattn/go-isatty v0.0.14
|
||||
github.com/olekukonko/tablewriter v0.0.5
|
||||
github.com/open-policy-agent/opa v0.33.1
|
||||
github.com/satori/go.uuid v1.2.0
|
||||
github.com/spf13/cobra v1.2.1
|
||||
github.com/stretchr/testify v1.7.0
|
||||
go.uber.org/zap v1.19.1
|
||||
gopkg.in/yaml.v2 v2.4.0
|
||||
k8s.io/api v0.22.2
|
||||
k8s.io/apimachinery v0.22.2
|
||||
@@ -35,23 +37,34 @@ require (
|
||||
github.com/Azure/go-autorest/logger v0.2.1 // indirect
|
||||
github.com/Azure/go-autorest/tracing v0.6.0 // indirect
|
||||
github.com/OneOfOne/xxhash v1.2.8 // indirect
|
||||
github.com/armosec/utils-k8s-go v0.0.1 // indirect
|
||||
github.com/aws/aws-sdk-go v1.41.11 // indirect
|
||||
github.com/aws/aws-sdk-go-v2 v1.12.0 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/config v1.12.0 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.7.0 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.9.0 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.3 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.1.0 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.3.3 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/eks v1.17.0 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.6.0 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.8.0 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.13.0 // indirect
|
||||
github.com/aws/smithy-go v1.9.1 // indirect
|
||||
github.com/coreos/go-oidc v2.2.1+incompatible // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/docker/docker v20.10.9+incompatible // indirect
|
||||
github.com/docker/go-connections v0.4.0 // indirect
|
||||
github.com/docker/go-units v0.4.0 // indirect
|
||||
github.com/form3tech-oss/jwt-go v3.2.3+incompatible // indirect
|
||||
github.com/francoispqt/gojay v1.2.13 // indirect
|
||||
github.com/ghodss/yaml v1.0.0 // indirect
|
||||
github.com/go-gota/gota v0.12.0 // indirect
|
||||
github.com/go-logr/logr v0.4.0 // indirect
|
||||
github.com/gobwas/glob v0.2.3 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang/glog v1.0.0 // indirect
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
||||
github.com/golang/protobuf v1.5.2 // indirect
|
||||
github.com/google/go-cmp v0.5.5 // indirect
|
||||
github.com/google/go-cmp v0.5.6 // indirect
|
||||
github.com/google/gofuzz v1.1.0 // indirect
|
||||
github.com/googleapis/gax-go/v2 v2.0.5 // indirect
|
||||
github.com/googleapis/gnostic v0.5.5 // indirect
|
||||
@@ -76,7 +89,6 @@ require (
|
||||
go.opencensus.io v0.23.0 // indirect
|
||||
go.uber.org/atomic v1.7.0 // indirect
|
||||
go.uber.org/multierr v1.6.0 // indirect
|
||||
go.uber.org/zap v1.19.1 // indirect
|
||||
golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97 // indirect
|
||||
golang.org/x/net v0.0.0-20210825183410-e898025ed96a // indirect
|
||||
golang.org/x/oauth2 v0.0.0-20211005180243-6b3c2da341f1 // indirect
|
||||
|
||||
42
go.sum
42
go.sum
@@ -85,18 +85,19 @@ github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmV
|
||||
github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
|
||||
github.com/armosec/armoapi-go v0.0.2/go.mod h1:vIK17yoKbJRQyZXWWLe3AqfqCRITxW8qmSkApyq5xFs=
|
||||
github.com/armosec/armoapi-go v0.0.23/go.mod h1:iaVVGyc23QGGzAdv4n+szGQg3Rbpixn9yQTU3qWRpaw=
|
||||
github.com/armosec/armoapi-go v0.0.40 h1:KQRJXFqw95s6cV7HoGgw1x8qrRZ9eNVze//yQbo24Lk=
|
||||
github.com/armosec/armoapi-go v0.0.40/go.mod h1:iaVVGyc23QGGzAdv4n+szGQg3Rbpixn9yQTU3qWRpaw=
|
||||
github.com/armosec/armoapi-go v0.0.49 h1:b3gvZ5YB5DSEfk8pt7x0705b4Pcuahd3wI/ZmGYmB3Y=
|
||||
github.com/armosec/armoapi-go v0.0.49/go.mod h1:iaVVGyc23QGGzAdv4n+szGQg3Rbpixn9yQTU3qWRpaw=
|
||||
github.com/armosec/k8s-interface v0.0.8/go.mod h1:xxS+V5QT3gVQTwZyAMMDrYLWGrfKOpiJ7Jfhfa0w9sM=
|
||||
github.com/armosec/k8s-interface v0.0.37/go.mod h1:vHxGWqD/uh6+GQb9Sqv7OGMs+Rvc2dsFVc0XtgRh1ZU=
|
||||
github.com/armosec/k8s-interface v0.0.50 h1:iLPGI0j85vwKANr9QDAnba4Efjg3DyIJg15jRJdvOnc=
|
||||
github.com/armosec/k8s-interface v0.0.50/go.mod h1:vHxGWqD/uh6+GQb9Sqv7OGMs+Rvc2dsFVc0XtgRh1ZU=
|
||||
github.com/armosec/k8s-interface v0.0.60 h1:jTCiO15QQbHVuxFQ928rp4srf1rQoUzeybfcbv/cuss=
|
||||
github.com/armosec/k8s-interface v0.0.60/go.mod h1:g0jv/fG+VqpT5ivO6D2gJcJ/w68BiffDz+PcU9YFbL4=
|
||||
github.com/armosec/opa-utils v0.0.64/go.mod h1:6tQP8UDq2EvEfSqh8vrUdr/9QVSCG4sJfju1SXQOn4c=
|
||||
github.com/armosec/opa-utils v0.0.78 h1:wFkVqJ1vEftn3E1hHdUfKlp5xHgof616ljyKKebuNkI=
|
||||
github.com/armosec/opa-utils v0.0.78/go.mod h1:ZOXYVTtuyrV4TldcfbzgRqP6F9Drlf4hB0zr210OXgM=
|
||||
github.com/armosec/opa-utils v0.0.107 h1:P+SACquUDMbXcOYIbQ+uzwcdJlrguXOTI42PHEJG2NU=
|
||||
github.com/armosec/opa-utils v0.0.107/go.mod h1:Wc1P4gkB6UQeGW8I76zCuitGGl15Omp0bKw7N0tR9dk=
|
||||
github.com/armosec/rbac-utils v0.0.1/go.mod h1:pQ8CBiij8kSKV7aeZm9FMvtZN28VgA7LZcYyTWimq40=
|
||||
github.com/armosec/rbac-utils v0.0.10 h1:bFjesO8+xJS1ryR9vqj4xFEo1cQ0HvClzR+LWHzozW4=
|
||||
github.com/armosec/rbac-utils v0.0.10/go.mod h1:Ex/IdGWhGv9HZq6Hs8N/ApzCKSIvpNe/ETqDfnuyah0=
|
||||
github.com/armosec/rbac-utils v0.0.14 h1:CKYKcgqJEXWF2Hen/B1pVGtS3nDAG1wp9dDv6oNtq90=
|
||||
github.com/armosec/rbac-utils v0.0.14/go.mod h1:Ex/IdGWhGv9HZq6Hs8N/ApzCKSIvpNe/ETqDfnuyah0=
|
||||
github.com/armosec/utils-go v0.0.2/go.mod h1:itWmRLzRdsnwjpEOomL0mBWGnVNNIxSjDAdyc+b0iUo=
|
||||
github.com/armosec/utils-go v0.0.3 h1:uyQI676yRciQM0sSN9uPoqHkbspTxHO0kmzXhBeE/xU=
|
||||
github.com/armosec/utils-go v0.0.3/go.mod h1:itWmRLzRdsnwjpEOomL0mBWGnVNNIxSjDAdyc+b0iUo=
|
||||
@@ -106,6 +107,30 @@ github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:l
|
||||
github.com/aws/aws-sdk-go v1.41.1/go.mod h1:585smgzpB/KqRA+K3y/NL/oYRqQvpNJYvLm+LY1U59Q=
|
||||
github.com/aws/aws-sdk-go v1.41.11 h1:QLouWsiYQ8i22kD8k58Dpdhio1A0MpT7bg9ZNXqEjuI=
|
||||
github.com/aws/aws-sdk-go v1.41.11/go.mod h1:585smgzpB/KqRA+K3y/NL/oYRqQvpNJYvLm+LY1U59Q=
|
||||
github.com/aws/aws-sdk-go-v2 v1.12.0 h1:z5bijqy+eXLK/QqF6eQcwCN2qw1k+m9OUDicqCZygu0=
|
||||
github.com/aws/aws-sdk-go-v2 v1.12.0/go.mod h1:tWhQI5N5SiMawto3uMAQJU5OUN/1ivhDDHq7HTsJvZ0=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.12.0 h1:WOhIzj5HdixjlvQ4SLYAOk6OUUsuu88RwcsTzexa9cg=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.12.0/go.mod h1:GQONFVSDdG6RRho1C730SGNyDhS1kSTnxpOE76ptBqo=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.7.0 h1:KFuKwPs7i5SE5a0LxqAxz75qxSjr2HnHnhu0UPGlvpM=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.7.0/go.mod h1:Kmq64kahHJtXfmnEwnvRKeNjLBqkdP++Itln9BmQerE=
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.9.0 h1:fPq3oloONbHaA0O8KX/KYUQk7pG9JjKBwYQvQsQDK84=
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.9.0/go.mod h1:19SxQ+9zANyJCnNaoF3ovl8bFil4TaqCYEDdqNGKM+A=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.3 h1:YPNiEXnuWdkpNOwBFHhcLwkSmewwQRcPFO9dHmxU0qg=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.3/go.mod h1:L72JSFj9OwHwyukeuKFFyTj6uFWE4AjB0IQp97bd9Lc=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.1.0 h1:ArRd27pSm66f7cCBDPS77wvxiS4IRjFatpzVBD7Aojc=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.1.0/go.mod h1:KdVvdk4gb7iatuHZgIkIqvJlWHBtjCJLUtD/uO/FkWw=
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.3.3 h1:fmGqMNlFTHr9Y48qmYYv2qIo+TAsST3qZa2d1HcwBeo=
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.3.3/go.mod h1:N4dv+zawriMFZBO/6UKg3zt+XO6xWOQo1neAA0lFbo4=
|
||||
github.com/aws/aws-sdk-go-v2/service/eks v1.17.0 h1:lal3erO1VVVSnw3a47pRiCTne+9mGh9IyJDIgwWD02o=
|
||||
github.com/aws/aws-sdk-go-v2/service/eks v1.17.0/go.mod h1:YHVf/zIAi9lGVhG1TakeJp7LaUHFS99yme9e78+r+8A=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.6.0 h1:rwE0kWa5qm0yEoNPwC3zhrt1tFVXTmkWRlUxLayAwyc=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.6.0/go.mod h1:wTgFkG6t7jS/6Y0SILXwfspV3IXowb6ngsAlSajW0Kc=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.8.0 h1:X77LUt6Djy3Z02r6tW7Z+4FNr6GCnEG54EXfskc19M4=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.8.0/go.mod h1:AB6v3BedyhVRIbPQbJnUsBmtup2pFiikpp5n3YyB6Ac=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.13.0 h1:n8+dZMOvwkGtmhub8B2wYvRHut45/NB7DeNhNcUnBpg=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.13.0/go.mod h1:jQto17aC9pJ6xRa1g29uXZhbcS6qNT3PSnKfPShq4sY=
|
||||
github.com/aws/smithy-go v1.9.1 h1:5vetTooLk4hPWV8q6ym6+lXKAT1Urnm49YkrRKo2J8o=
|
||||
github.com/aws/smithy-go v1.9.1/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E=
|
||||
github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM=
|
||||
github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8=
|
||||
github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
|
||||
@@ -313,8 +338,9 @@ github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
|
||||
github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU=
|
||||
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ=
|
||||
github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ=
|
||||
github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck=
|
||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
|
||||
@@ -1,6 +1,4 @@
|
||||
package hostsensorutils
|
||||
|
||||
const hostSensorYAML = `apiVersion: v1
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
labels:
|
||||
@@ -62,4 +60,4 @@ spec:
|
||||
name: host-filesystem
|
||||
hostNetwork: true
|
||||
hostPID: true
|
||||
hostIPC: true`
|
||||
hostIPC: true
|
||||
@@ -1,6 +1,7 @@
|
||||
package hostsensorutils
|
||||
|
||||
import (
|
||||
_ "embed"
|
||||
"fmt"
|
||||
"io"
|
||||
"strings"
|
||||
@@ -9,6 +10,8 @@ import (
|
||||
|
||||
"github.com/armosec/k8s-interface/k8sinterface"
|
||||
"github.com/armosec/kubescape/cautils"
|
||||
"github.com/armosec/kubescape/cautils/logger"
|
||||
"github.com/armosec/kubescape/cautils/logger/helpers"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@@ -18,6 +21,11 @@ import (
|
||||
coreapplyv1 "k8s.io/client-go/applyconfigurations/core/v1"
|
||||
)
|
||||
|
||||
var (
|
||||
//go:embed hostsensor.yaml
|
||||
hostSensorYAML string
|
||||
)
|
||||
|
||||
type HostSensorHandler struct {
|
||||
HostSensorPort int32
|
||||
HostSensorPodNames map[string]string //map from pod names to node names
|
||||
@@ -54,15 +62,17 @@ func (hsh *HostSensorHandler) Init() error {
|
||||
// store namespace + port
|
||||
// store pod names
|
||||
// make sure all pods are running, after X seconds treat has running anyway, and log an error on the pods not running yet
|
||||
cautils.ProgressTextDisplay("Installing host sensor")
|
||||
logger.L().Info("Installing host sensor")
|
||||
|
||||
cautils.StartSpinner()
|
||||
defer cautils.StopSpinner()
|
||||
|
||||
if err := hsh.applyYAML(); err != nil {
|
||||
return fmt.Errorf("in HostSensorHandler init failed to apply YAML: %v", err)
|
||||
}
|
||||
hsh.populatePodNamesToNodeNames()
|
||||
if err := hsh.checkPodForEachNode(); err != nil {
|
||||
fmt.Printf("failed to validate host-sensor pods status: %v", err)
|
||||
logger.L().Error("failed to validate host-sensor pods status", helpers.Error(err))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
|
||||
"github.com/armosec/k8s-interface/k8sinterface"
|
||||
"github.com/armosec/kubescape/cautils"
|
||||
"github.com/armosec/kubescape/cautils/logger"
|
||||
"github.com/armosec/opa-utils/objectsenvelopes/hostsensor"
|
||||
"sigs.k8s.io/yaml"
|
||||
)
|
||||
@@ -154,7 +155,8 @@ func (hsh *HostSensorHandler) CollectResources() ([]hostsensor.HostSensorDataEnv
|
||||
if hsh.DaemonSet == nil {
|
||||
return res, nil
|
||||
}
|
||||
cautils.ProgressTextDisplay("Accessing host sensor")
|
||||
|
||||
logger.L().Info("Accessing host sensor")
|
||||
cautils.StartSpinner()
|
||||
defer cautils.StopSpinner()
|
||||
kcData, err := hsh.GetKubeletConfigurations()
|
||||
@@ -193,6 +195,7 @@ func (hsh *HostSensorHandler) CollectResources() ([]hostsensor.HostSensorDataEnv
|
||||
}
|
||||
res = append(res, kcData...)
|
||||
// finish
|
||||
cautils.SuccessTextDisplay("Read host information from host sensor")
|
||||
|
||||
logger.L().Success("Read host information from host sensor")
|
||||
return res, nil
|
||||
}
|
||||
|
||||
@@ -6,6 +6,7 @@ echo
|
||||
|
||||
BASE_DIR=~/.kubescape
|
||||
KUBESCAPE_EXEC=kubescape
|
||||
KUBESCAPE_ZIP=kubescape.zip
|
||||
|
||||
osName=$(uname -s)
|
||||
if [[ $osName == *"MINGW"* ]]; then
|
||||
|
||||
85
mocks/loadmocks.go
Normal file
85
mocks/loadmocks.go
Normal file
File diff suppressed because one or more lines are too long
@@ -5,12 +5,15 @@ import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/armosec/armoapi-go/armotypes"
|
||||
"github.com/armosec/kubescape/cautils"
|
||||
"github.com/armosec/kubescape/cautils/logger"
|
||||
ksscore "github.com/armosec/kubescape/score"
|
||||
"github.com/armosec/opa-utils/objectsenvelopes"
|
||||
"github.com/armosec/opa-utils/reporthandling"
|
||||
"github.com/armosec/opa-utils/score"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"github.com/armosec/opa-utils/reporthandling/apis"
|
||||
"github.com/armosec/opa-utils/reporthandling/results/v1/resourcesresults"
|
||||
"github.com/open-policy-agent/opa/storage"
|
||||
|
||||
"github.com/armosec/k8s-interface/k8sinterface"
|
||||
"github.com/armosec/k8s-interface/workloadinterface"
|
||||
@@ -18,7 +21,6 @@ import (
|
||||
"github.com/armosec/opa-utils/resources"
|
||||
"github.com/open-policy-agent/opa/ast"
|
||||
"github.com/open-policy-agent/opa/rego"
|
||||
uuid "github.com/satori/go.uuid"
|
||||
)
|
||||
|
||||
const ScoreConfigPath = "/resources/config"
|
||||
@@ -58,212 +60,196 @@ func (opaHandler *OPAProcessorHandler) ProcessRulesListenner() {
|
||||
opaSessionObj := <-*opaHandler.processedPolicy
|
||||
opap := NewOPAProcessor(opaSessionObj, opaHandler.regoDependenciesData)
|
||||
|
||||
policies := ConvertFrameworksToPolicies(opap.Frameworks, cautils.BuildNumber)
|
||||
|
||||
ConvertFrameworksToSummaryDetails(&opap.Report.SummaryDetails, opap.Frameworks, policies)
|
||||
|
||||
// process
|
||||
if err := opap.Process(); err != nil {
|
||||
// fmt.Println(err)
|
||||
if err := opap.Process(policies); err != nil {
|
||||
logger.L().Error(err.Error())
|
||||
}
|
||||
|
||||
// edit results
|
||||
opap.updateResults()
|
||||
|
||||
// update score
|
||||
scoreutil := score.NewScore(opaSessionObj.AllResources)
|
||||
scoreutil.Calculate(opaSessionObj.PostureReport.FrameworkReports)
|
||||
|
||||
//TODO: review this location
|
||||
scorewrapper := ksscore.NewScoreWrapper(opaSessionObj)
|
||||
scorewrapper.Calculate(ksscore.EPostureReportV2)
|
||||
// report
|
||||
*opaHandler.reportResults <- opaSessionObj
|
||||
}
|
||||
}
|
||||
|
||||
func (opap *OPAProcessor) Process() error {
|
||||
// glog.Infof(fmt.Sprintf("Starting 'Process'. reportID: %s", opap.PostureReport.ReportID))
|
||||
cautils.ProgressTextDisplay(fmt.Sprintf("Scanning cluster %s", cautils.ClusterName))
|
||||
func (opap *OPAProcessor) Process(policies *cautils.Policies) error {
|
||||
logger.L().Info(fmt.Sprintf("Scanning cluster %s", cautils.ClusterName))
|
||||
|
||||
cautils.StartSpinner()
|
||||
frameworkReports := []reporthandling.FrameworkReport{}
|
||||
|
||||
var errs error
|
||||
for i := range opap.Frameworks {
|
||||
frameworkReport, err := opap.processFramework(&opap.Frameworks[i])
|
||||
for _, control := range policies.Controls {
|
||||
|
||||
resourcesAssociatedControl, err := opap.processControl(&control)
|
||||
if err != nil {
|
||||
appendError(&errs, err)
|
||||
logger.L().Error(err.Error())
|
||||
}
|
||||
// update resources with latest results
|
||||
if len(resourcesAssociatedControl) != 0 {
|
||||
for resourceID, controlResult := range resourcesAssociatedControl {
|
||||
if _, ok := opap.ResourcesResult[resourceID]; !ok {
|
||||
opap.ResourcesResult[resourceID] = resourcesresults.Result{ResourceID: resourceID}
|
||||
}
|
||||
t := opap.ResourcesResult[resourceID]
|
||||
t.AssociatedControls = append(t.AssociatedControls, controlResult)
|
||||
opap.ResourcesResult[resourceID] = t
|
||||
}
|
||||
}
|
||||
frameworkReports = append(frameworkReports, *frameworkReport)
|
||||
}
|
||||
|
||||
opap.PostureReport.FrameworkReports = frameworkReports
|
||||
opap.PostureReport.ReportID = uuid.NewV4().String()
|
||||
opap.PostureReport.ReportGenerationTime = time.Now().UTC()
|
||||
// glog.Infof(fmt.Sprintf("Done 'Process'. reportID: %s", opap.PostureReport.ReportID))
|
||||
opap.Report.ReportGenerationTime = time.Now().UTC()
|
||||
|
||||
cautils.StopSpinner()
|
||||
cautils.SuccessTextDisplay(fmt.Sprintf("Done scanning cluster %s", cautils.ClusterName))
|
||||
logger.L().Success(fmt.Sprintf("Done scanning cluster %s", cautils.ClusterName))
|
||||
return errs
|
||||
}
|
||||
|
||||
func appendError(errs *error, err error) {
|
||||
if err == nil {
|
||||
return
|
||||
}
|
||||
if errs == nil {
|
||||
errs = &err
|
||||
} else {
|
||||
*errs = fmt.Errorf("%v\n%s", *errs, err.Error())
|
||||
}
|
||||
}
|
||||
func (opap *OPAProcessor) processFramework(framework *reporthandling.Framework) (*reporthandling.FrameworkReport, error) {
|
||||
func (opap *OPAProcessor) processControl(control *reporthandling.Control) (map[string]resourcesresults.ResourceAssociatedControl, error) {
|
||||
var errs error
|
||||
|
||||
frameworkReport := reporthandling.FrameworkReport{}
|
||||
frameworkReport.Name = framework.Name
|
||||
resourcesAssociatedControl := make(map[string]resourcesresults.ResourceAssociatedControl)
|
||||
|
||||
controlReports := []reporthandling.ControlReport{}
|
||||
for i := range framework.Controls {
|
||||
controlReport, err := opap.processControl(&framework.Controls[i])
|
||||
if err != nil {
|
||||
appendError(&errs, err)
|
||||
// errs = fmt.Errorf("%v\n%s", errs, err.Error())
|
||||
}
|
||||
if controlReport != nil {
|
||||
controlReports = append(controlReports, *controlReport)
|
||||
}
|
||||
}
|
||||
frameworkReport.ControlReports = controlReports
|
||||
return &frameworkReport, errs
|
||||
}
|
||||
|
||||
func (opap *OPAProcessor) processControl(control *reporthandling.Control) (*reporthandling.ControlReport, error) {
|
||||
var errs error
|
||||
|
||||
controlReport := reporthandling.ControlReport{}
|
||||
controlReport.PortalBase = control.PortalBase
|
||||
controlReport.ControlID = control.ControlID
|
||||
controlReport.BaseScore = control.BaseScore
|
||||
|
||||
controlReport.Control_ID = control.Control_ID // TODO: delete when 'id' is deprecated
|
||||
|
||||
controlReport.Name = control.Name
|
||||
controlReport.Description = control.Description
|
||||
controlReport.Remediation = control.Remediation
|
||||
|
||||
ruleReports := []reporthandling.RuleReport{}
|
||||
// ruleResults := make(map[string][]resourcesresults.ResourceAssociatedRule)
|
||||
for i := range control.Rules {
|
||||
ruleReport, err := opap.processRule(&control.Rules[i])
|
||||
resourceAssociatedRule, err := opap.processRule(&control.Rules[i])
|
||||
if err != nil {
|
||||
appendError(&errs, err)
|
||||
logger.L().Error(err.Error())
|
||||
continue
|
||||
}
|
||||
if ruleReport != nil {
|
||||
ruleReports = append(ruleReports, *ruleReport)
|
||||
|
||||
// append failed rules to controls
|
||||
if len(resourceAssociatedRule) != 0 {
|
||||
for resourceID, ruleResponse := range resourceAssociatedRule {
|
||||
|
||||
controlResult := resourcesresults.ResourceAssociatedControl{}
|
||||
controlResult.SetID(control.ControlID)
|
||||
controlResult.SetName(control.Name)
|
||||
|
||||
if _, ok := resourcesAssociatedControl[resourceID]; ok {
|
||||
controlResult.ResourceAssociatedRules = resourcesAssociatedControl[resourceID].ResourceAssociatedRules
|
||||
}
|
||||
if ruleResponse != nil {
|
||||
controlResult.ResourceAssociatedRules = append(controlResult.ResourceAssociatedRules, *ruleResponse)
|
||||
}
|
||||
resourcesAssociatedControl[resourceID] = controlResult
|
||||
}
|
||||
}
|
||||
}
|
||||
if len(ruleReports) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
controlReport.RuleReports = ruleReports
|
||||
return &controlReport, errs
|
||||
|
||||
return resourcesAssociatedControl, errs
|
||||
}
|
||||
|
||||
func (opap *OPAProcessor) processRule(rule *reporthandling.PolicyRule) (*reporthandling.RuleReport, error) {
|
||||
if ruleWithArmoOpaDependency(rule.Attributes) || !isRuleKubescapeVersionCompatible(rule) {
|
||||
return nil, nil
|
||||
}
|
||||
func (opap *OPAProcessor) processRule(rule *reporthandling.PolicyRule) (map[string]*resourcesresults.ResourceAssociatedRule, error) {
|
||||
|
||||
postureControlInputs := opap.regoDependenciesData.GetFilteredPostureControlInputs(rule.ConfigInputs) // get store
|
||||
|
||||
inputResources, err := reporthandling.RegoResourcesAggregator(rule, getAllSupportedObjects(opap.K8SResources, opap.AllResources, rule))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error getting aggregated k8sObjects: %s", err.Error())
|
||||
}
|
||||
if len(inputResources) == 0 {
|
||||
return nil, nil // no resources found for testing
|
||||
}
|
||||
|
||||
inputRawResources := workloadinterface.ListMetaToMap(inputResources)
|
||||
|
||||
ruleReport, err := opap.runOPAOnSingleRule(rule, inputRawResources, ruleData)
|
||||
if err != nil {
|
||||
// ruleReport.RuleStatus.Status = reporthandling.StatusFailed
|
||||
ruleReport.RuleStatus.Status = "failure"
|
||||
ruleReport.RuleStatus.Message = err.Error()
|
||||
glog.Error(err)
|
||||
} else {
|
||||
ruleReport.RuleStatus.Status = reporthandling.StatusPassed
|
||||
}
|
||||
|
||||
resources := map[string]*resourcesresults.ResourceAssociatedRule{}
|
||||
// the failed resources are a subgroup of the enumeratedData, so we store the enumeratedData like it was the input data
|
||||
enumeratedData, err := opap.enumerateData(rule, inputRawResources)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
inputResources = objectsenvelopes.ListMapToMeta(enumeratedData)
|
||||
ruleReport.ListInputKinds = workloadinterface.ListMetaIDs(inputResources)
|
||||
|
||||
for i := range inputResources {
|
||||
resources[inputResources[i].GetID()] = &resourcesresults.ResourceAssociatedRule{
|
||||
Name: rule.Name,
|
||||
ControlConfigurations: postureControlInputs,
|
||||
Status: apis.StatusPassed,
|
||||
}
|
||||
opap.AllResources[inputResources[i].GetID()] = inputResources[i]
|
||||
}
|
||||
|
||||
failedResources := objectsenvelopes.ListMapToMeta(ruleReport.GetFailedResources())
|
||||
for i := range failedResources {
|
||||
if r, ok := opap.AllResources[failedResources[i].GetID()]; !ok {
|
||||
opap.AllResources[failedResources[i].GetID()] = r
|
||||
}
|
||||
}
|
||||
warningResources := objectsenvelopes.ListMapToMeta(ruleReport.GetWarnignResources())
|
||||
for i := range warningResources {
|
||||
if r, ok := opap.AllResources[warningResources[i].GetID()]; !ok {
|
||||
opap.AllResources[warningResources[i].GetID()] = r
|
||||
ruleResponses, err := opap.runOPAOnSingleRule(rule, inputRawResources, ruleData, postureControlInputs)
|
||||
if err != nil {
|
||||
// TODO - Handle error
|
||||
logger.L().Error(err.Error())
|
||||
} else {
|
||||
// ruleResponse to ruleResult
|
||||
for i := range ruleResponses {
|
||||
failedResources := objectsenvelopes.ListMapToMeta(ruleResponses[i].GetFailedResources())
|
||||
for j := range failedResources {
|
||||
ruleResult := &resourcesresults.ResourceAssociatedRule{}
|
||||
if r, k := resources[failedResources[j].GetID()]; k {
|
||||
ruleResult = r
|
||||
}
|
||||
|
||||
ruleResult.Status = apis.StatusFailed
|
||||
for j := range ruleResponses[i].FailedPaths {
|
||||
ruleResult.Paths = append(ruleResult.Paths, armotypes.PosturePaths{FailedPath: ruleResponses[i].FailedPaths[j]})
|
||||
}
|
||||
for j := range ruleResponses[i].FixPaths {
|
||||
ruleResult.Paths = append(ruleResult.Paths, armotypes.PosturePaths{FixPath: ruleResponses[i].FixPaths[j]})
|
||||
}
|
||||
resources[failedResources[j].GetID()] = ruleResult
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// remove all data from responses, leave only the metadata
|
||||
keepFields := []string{"kind", "apiVersion", "metadata"}
|
||||
keepMetadataFields := []string{"name", "namespace", "labels"}
|
||||
ruleReport.RemoveData(keepFields, keepMetadataFields)
|
||||
|
||||
return &ruleReport, err
|
||||
return resources, err
|
||||
}
|
||||
|
||||
func (opap *OPAProcessor) runOPAOnSingleRule(rule *reporthandling.PolicyRule, k8sObjects []map[string]interface{}, getRuleData func(*reporthandling.PolicyRule) string) (reporthandling.RuleReport, error) {
|
||||
func (opap *OPAProcessor) runOPAOnSingleRule(rule *reporthandling.PolicyRule, k8sObjects []map[string]interface{}, getRuleData func(*reporthandling.PolicyRule) string, postureControlInputs map[string][]string) ([]reporthandling.RuleResponse, error) {
|
||||
switch rule.RuleLanguage {
|
||||
case reporthandling.RegoLanguage, reporthandling.RegoLanguage2:
|
||||
return opap.runRegoOnK8s(rule, k8sObjects, getRuleData)
|
||||
return opap.runRegoOnK8s(rule, k8sObjects, getRuleData, postureControlInputs)
|
||||
default:
|
||||
return reporthandling.RuleReport{}, fmt.Errorf("rule: '%s', language '%v' not supported", rule.Name, rule.RuleLanguage)
|
||||
return nil, fmt.Errorf("rule: '%s', language '%v' not supported", rule.Name, rule.RuleLanguage)
|
||||
}
|
||||
}
|
||||
|
||||
func (opap *OPAProcessor) runRegoOnK8s(rule *reporthandling.PolicyRule, k8sObjects []map[string]interface{}, getRuleData func(*reporthandling.PolicyRule) string) (reporthandling.RuleReport, error) {
|
||||
var errs error
|
||||
ruleReport := reporthandling.RuleReport{
|
||||
Name: rule.Name,
|
||||
}
|
||||
func (opap *OPAProcessor) runRegoOnK8s(rule *reporthandling.PolicyRule, k8sObjects []map[string]interface{}, getRuleData func(*reporthandling.PolicyRule) string, postureControlInputs map[string][]string) ([]reporthandling.RuleResponse, error) {
|
||||
|
||||
// compile modules
|
||||
modules, err := getRuleDependencies()
|
||||
if err != nil {
|
||||
return ruleReport, fmt.Errorf("rule: '%s', %s", rule.Name, err.Error())
|
||||
return nil, fmt.Errorf("rule: '%s', %s", rule.Name, err.Error())
|
||||
}
|
||||
modules[rule.Name] = getRuleData(rule)
|
||||
compiled, err := ast.CompileModules(modules)
|
||||
if err != nil {
|
||||
return ruleReport, fmt.Errorf("in 'runRegoOnSingleRule', failed to compile rule, name: %s, reason: %s", rule.Name, err.Error())
|
||||
return nil, fmt.Errorf("in 'runRegoOnSingleRule', failed to compile rule, name: %s, reason: %s", rule.Name, err.Error())
|
||||
}
|
||||
|
||||
// Eval
|
||||
results, err := opap.regoEval(k8sObjects, compiled)
|
||||
if err != nil {
|
||||
errs = fmt.Errorf("rule: '%s', %s", rule.Name, err.Error())
|
||||
}
|
||||
|
||||
if results != nil {
|
||||
ruleReport.RuleResponses = append(ruleReport.RuleResponses, results...)
|
||||
}
|
||||
return ruleReport, errs
|
||||
}
|
||||
|
||||
func (opap *OPAProcessor) regoEval(inputObj []map[string]interface{}, compiledRego *ast.Compiler) ([]reporthandling.RuleResponse, error) {
|
||||
store, err := opap.regoDependenciesData.TOStorage() // get store
|
||||
store, err := resources.TOStorage(postureControlInputs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Eval
|
||||
results, err := opap.regoEval(k8sObjects, compiled, &store)
|
||||
if err != nil {
|
||||
logger.L().Error(err.Error())
|
||||
}
|
||||
|
||||
return results, nil
|
||||
}
|
||||
|
||||
func (opap *OPAProcessor) regoEval(inputObj []map[string]interface{}, compiledRego *ast.Compiler, store *storage.Store) ([]reporthandling.RuleResponse, error) {
|
||||
// opap.regoDependenciesData.PostureControlInputs
|
||||
|
||||
rego := rego.New(
|
||||
rego.Query("data.armo_builtins"), // get package name from rule
|
||||
rego.Compiler(compiledRego),
|
||||
rego.Input(inputObj),
|
||||
rego.Store(store),
|
||||
rego.Store(*store),
|
||||
)
|
||||
|
||||
// Run evaluation
|
||||
@@ -284,9 +270,15 @@ func (opap *OPAProcessor) enumerateData(rule *reporthandling.PolicyRule, k8sObje
|
||||
if ruleEnumeratorData(rule) == "" {
|
||||
return k8sObjects, nil
|
||||
}
|
||||
ruleReport, err := opap.runOPAOnSingleRule(rule, k8sObjects, ruleEnumeratorData)
|
||||
postureControlInputs := opap.regoDependenciesData.GetFilteredPostureControlInputs(rule.ConfigInputs)
|
||||
|
||||
ruleResponse, err := opap.runOPAOnSingleRule(rule, k8sObjects, ruleEnumeratorData, postureControlInputs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ruleReport.GetFailedResources(), nil
|
||||
failedResources := []map[string]interface{}{}
|
||||
for _, ruleResponse := range ruleResponse {
|
||||
failedResources = append(failedResources, ruleResponse.GetFailedResources()...)
|
||||
}
|
||||
return failedResources, nil
|
||||
}
|
||||
|
||||
@@ -3,10 +3,13 @@ package opaprocessor
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/armosec/armoapi-go/armotypes"
|
||||
"github.com/armosec/kubescape/cautils"
|
||||
"github.com/armosec/kubescape/mocks"
|
||||
"github.com/armosec/opa-utils/objectsenvelopes"
|
||||
"github.com/armosec/opa-utils/reporthandling"
|
||||
"github.com/armosec/opa-utils/resources"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/armosec/k8s-interface/k8sinterface"
|
||||
"github.com/armosec/k8s-interface/workloadinterface"
|
||||
@@ -30,11 +33,13 @@ func TestProcess(t *testing.T) {
|
||||
// set opaSessionObj
|
||||
opaSessionObj := cautils.NewOPASessionObjMock()
|
||||
opaSessionObj.Frameworks = []reporthandling.Framework{*reporthandling.MockFrameworkA()}
|
||||
policies := ConvertFrameworksToPolicies(opaSessionObj.Frameworks, "")
|
||||
|
||||
opaSessionObj.K8SResources = &k8sResources
|
||||
opaSessionObj.AllResources = allResources
|
||||
|
||||
opap := NewOPAProcessor(opaSessionObj, resources.NewRegoDependenciesDataMock())
|
||||
opap.Process()
|
||||
opap.Process(policies)
|
||||
opap.updateResults()
|
||||
for _, f := range opap.PostureReport.FrameworkReports {
|
||||
for _, c := range f.ControlReports {
|
||||
@@ -50,3 +55,85 @@ func TestProcess(t *testing.T) {
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestProcessResourcesResult(t *testing.T) {
|
||||
|
||||
// set k8s
|
||||
k8sResources := make(cautils.K8SResources)
|
||||
|
||||
deployment := mocks.MockDevelopmentWithHostpath()
|
||||
frameworks := []reporthandling.Framework{*mocks.MockFramework_0006_0013()}
|
||||
|
||||
k8sResources["apps/v1/deployments"] = workloadinterface.ListMetaIDs([]workloadinterface.IMetadata{deployment})
|
||||
|
||||
// set opaSessionObj
|
||||
opaSessionObj := cautils.NewOPASessionObjMock()
|
||||
opaSessionObj.Frameworks = frameworks
|
||||
|
||||
policies := ConvertFrameworksToPolicies(opaSessionObj.Frameworks, "")
|
||||
ConvertFrameworksToSummaryDetails(&opaSessionObj.Report.SummaryDetails, opaSessionObj.Frameworks, policies)
|
||||
|
||||
opaSessionObj.K8SResources = &k8sResources
|
||||
opaSessionObj.AllResources[deployment.GetID()] = deployment
|
||||
|
||||
opap := NewOPAProcessor(opaSessionObj, resources.NewRegoDependenciesDataMock())
|
||||
opap.Process(policies)
|
||||
|
||||
assert.Equal(t, 1, len(opaSessionObj.ResourcesResult))
|
||||
res := opaSessionObj.ResourcesResult[deployment.GetID()]
|
||||
assert.Equal(t, 2, len(res.ListControlsIDs(nil).All()))
|
||||
assert.Equal(t, 1, len(res.ListControlsIDs(nil).Failed()))
|
||||
assert.Equal(t, 1, len(res.ListControlsIDs(nil).Passed()))
|
||||
assert.True(t, res.GetStatus(nil).IsFailed())
|
||||
assert.False(t, res.GetStatus(nil).IsPassed())
|
||||
assert.Equal(t, deployment.GetID(), opaSessionObj.ResourcesResult[deployment.GetID()].ResourceID)
|
||||
|
||||
opap.updateResults()
|
||||
res = opaSessionObj.ResourcesResult[deployment.GetID()]
|
||||
assert.Equal(t, 2, len(res.ListControlsIDs(nil).All()))
|
||||
assert.Equal(t, 2, len(res.ListControlsIDs(nil).All()))
|
||||
assert.Equal(t, 1, len(res.ListControlsIDs(nil).Failed()))
|
||||
assert.Equal(t, 1, len(res.ListControlsIDs(nil).Passed()))
|
||||
assert.True(t, res.GetStatus(nil).IsFailed())
|
||||
assert.False(t, res.GetStatus(nil).IsPassed())
|
||||
assert.Equal(t, deployment.GetID(), opaSessionObj.ResourcesResult[deployment.GetID()].ResourceID)
|
||||
|
||||
// test resource counters
|
||||
summaryDetails := opaSessionObj.Report.SummaryDetails
|
||||
assert.Equal(t, 1, summaryDetails.NumberOfResources().All())
|
||||
assert.Equal(t, 1, summaryDetails.NumberOfResources().Failed())
|
||||
assert.Equal(t, 0, summaryDetails.NumberOfResources().Excluded())
|
||||
assert.Equal(t, 0, summaryDetails.NumberOfResources().Passed())
|
||||
|
||||
// test resource listing
|
||||
assert.Equal(t, 1, len(summaryDetails.ListResourcesIDs().All()))
|
||||
assert.Equal(t, 1, len(summaryDetails.ListResourcesIDs().Failed()))
|
||||
assert.Equal(t, 0, len(summaryDetails.ListResourcesIDs().Excluded()))
|
||||
assert.Equal(t, 0, len(summaryDetails.ListResourcesIDs().Passed()))
|
||||
|
||||
// test control listing
|
||||
assert.Equal(t, len(res.ListControlsIDs(nil).All()), len(summaryDetails.ListControls().All()))
|
||||
assert.Equal(t, len(res.ListControlsIDs(nil).Passed()), len(summaryDetails.ListControls().Passed()))
|
||||
assert.Equal(t, len(res.ListControlsIDs(nil).Failed()), len(summaryDetails.ListControls().Failed()))
|
||||
assert.Equal(t, len(res.ListControlsIDs(nil).Excluded()), len(summaryDetails.ListControls().Excluded()))
|
||||
assert.True(t, summaryDetails.GetStatus().IsFailed())
|
||||
|
||||
opaSessionObj.Exceptions = []armotypes.PostureExceptionPolicy{*mocks.MockExceptionAllKinds(&armotypes.PosturePolicy{FrameworkName: frameworks[0].Name})}
|
||||
opap.updateResults()
|
||||
|
||||
res = opaSessionObj.ResourcesResult[deployment.GetID()]
|
||||
assert.Equal(t, 2, len(res.ListControlsIDs(nil).All()))
|
||||
assert.Equal(t, 1, len(res.ListControlsIDs(nil).Excluded()))
|
||||
assert.Equal(t, 1, len(res.ListControlsIDs(nil).Passed()))
|
||||
assert.True(t, res.GetStatus(nil).IsExcluded())
|
||||
assert.False(t, res.GetStatus(nil).IsPassed())
|
||||
assert.False(t, res.GetStatus(nil).IsFailed())
|
||||
assert.Equal(t, deployment.GetID(), opaSessionObj.ResourcesResult[deployment.GetID()].ResourceID)
|
||||
|
||||
// test resource listing
|
||||
summaryDetails = opaSessionObj.Report.SummaryDetails
|
||||
assert.Equal(t, 1, len(summaryDetails.ListResourcesIDs().All()))
|
||||
assert.Equal(t, 1, len(summaryDetails.ListResourcesIDs().Failed()))
|
||||
assert.Equal(t, 0, len(summaryDetails.ListResourcesIDs().Excluded()))
|
||||
assert.Equal(t, 0, len(summaryDetails.ListResourcesIDs().Passed()))
|
||||
}
|
||||
|
||||
@@ -1,35 +1,65 @@
|
||||
package opaprocessor
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
pkgcautils "github.com/armosec/utils-go/utils"
|
||||
|
||||
"github.com/armosec/kubescape/cautils"
|
||||
"github.com/armosec/kubescape/cautils/logger"
|
||||
|
||||
"github.com/armosec/k8s-interface/k8sinterface"
|
||||
"github.com/armosec/k8s-interface/workloadinterface"
|
||||
"github.com/armosec/opa-utils/exceptions"
|
||||
"github.com/armosec/opa-utils/reporthandling"
|
||||
resources "github.com/armosec/opa-utils/resources"
|
||||
|
||||
"github.com/golang/glog"
|
||||
)
|
||||
|
||||
// updateResults update the results objects and report objects. This is a critical function - DO NOT CHANGE
|
||||
/*
|
||||
- remove sensible data
|
||||
- adding exceptions
|
||||
- summarize results
|
||||
*/
|
||||
func (opap *OPAProcessor) updateResults() {
|
||||
|
||||
// remove data from all objects
|
||||
for i := range opap.AllResources {
|
||||
removeData(opap.AllResources[i])
|
||||
}
|
||||
|
||||
for f := range opap.PostureReport.FrameworkReports {
|
||||
// set exceptions
|
||||
exceptions.SetFrameworkExceptions(&opap.PostureReport.FrameworkReports[f], opap.Exceptions, cautils.ClusterName)
|
||||
// set exceptions
|
||||
for i := range opap.ResourcesResult {
|
||||
|
||||
// set counters
|
||||
reporthandling.SetUniqueResourcesCounter(&opap.PostureReport.FrameworkReports[f])
|
||||
t := opap.ResourcesResult[i]
|
||||
|
||||
// set default score
|
||||
// reporthandling.SetDefaultScore(&opap.PostureReport.FrameworkReports[f])
|
||||
// first set exceptions
|
||||
if resource, ok := opap.AllResources[i]; ok {
|
||||
t.SetExceptions(resource, opap.Exceptions, cautils.ClusterName)
|
||||
}
|
||||
|
||||
// summarize the resources
|
||||
opap.Report.AppendResourceResultToSummary(&t)
|
||||
|
||||
// Add score
|
||||
// TODO
|
||||
|
||||
// save changes
|
||||
opap.ResourcesResult[i] = t
|
||||
}
|
||||
|
||||
// set result summary
|
||||
opap.Report.SummaryDetails.InitResourcesSummary()
|
||||
|
||||
// for f := range opap.PostureReport.FrameworkReports {
|
||||
// // set exceptions
|
||||
// exceptions.SetFrameworkExceptions(&opap.PostureReport.FrameworkReports[f], opap.Exceptions, cautils.ClusterName)
|
||||
|
||||
// // set counters
|
||||
// reporthandling.SetUniqueResourcesCounter(&opap.PostureReport.FrameworkReports[f])
|
||||
|
||||
// // set default score
|
||||
// // reporthandling.SetDefaultScore(&opap.PostureReport.FrameworkReports[f])
|
||||
// }
|
||||
}
|
||||
|
||||
func getAllSupportedObjects(k8sResources *cautils.K8SResources, allResources map[string]workloadinterface.IMetadata, rule *reporthandling.PolicyRule) []workloadinterface.IMetadata {
|
||||
@@ -50,8 +80,7 @@ func getKubernetesObjects(k8sResources *cautils.K8SResources, allResources map[s
|
||||
for _, groupResource := range groupResources {
|
||||
if k8sObj, ok := (*k8sResources)[groupResource]; ok {
|
||||
if k8sObj == nil {
|
||||
continue
|
||||
// glog.Errorf("Resource '%s' is nil, probably failed to pull the resource", groupResource)
|
||||
logger.L().Debug(fmt.Sprintf("resource '%s' is nil, probably failed to pull the resource", groupResource))
|
||||
}
|
||||
for i := range k8sObj {
|
||||
k8sObjects = append(k8sObjects, allResources[k8sObj[i]])
|
||||
@@ -93,7 +122,7 @@ func filterOutChildResources(objects []workloadinterface.IMetadata, match []repo
|
||||
func getRuleDependencies() (map[string]string, error) {
|
||||
modules := resources.LoadRegoModules()
|
||||
if len(modules) == 0 {
|
||||
glog.Warningf("failed to load rule dependencies")
|
||||
logger.L().Warning("failed to load rule dependencies")
|
||||
}
|
||||
return modules, nil
|
||||
}
|
||||
@@ -172,6 +201,7 @@ func removeSecretData(workload workloadinterface.IWorkload) {
|
||||
func removePodData(workload workloadinterface.IWorkload) {
|
||||
workload.RemoveAnnotation("kubectl.kubernetes.io/last-applied-configuration")
|
||||
workloadinterface.RemoveFromMap(workload.GetObject(), "metadata", "managedFields")
|
||||
workloadinterface.RemoveFromMap(workload.GetObject(), "status")
|
||||
|
||||
containers, err := workload.GetContainers()
|
||||
if err != nil || len(containers) == 0 {
|
||||
|
||||
@@ -6,7 +6,6 @@ import (
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/armosec/armoapi-go/armotypes"
|
||||
"github.com/armosec/k8s-interface/k8sinterface"
|
||||
"github.com/armosec/k8s-interface/workloadinterface"
|
||||
"github.com/armosec/kubescape/cautils"
|
||||
"github.com/armosec/opa-utils/reporthandling"
|
||||
@@ -58,7 +57,6 @@ func TestIsRuleKubescapeVersionCompatible(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestRemoveData(t *testing.T) {
|
||||
k8sinterface.InitializeMapResourcesMock()
|
||||
|
||||
w := `{"apiVersion":"apps/v1","kind":"Deployment","metadata":{"name":"demoservice-server"},"spec":{"replicas":1,"selector":{"matchLabels":{"app":"demoservice-server"}},"template":{"metadata":{"creationTimestamp":null,"labels":{"app":"demoservice-server"}},"spec":{"containers":[{"env":[{"name":"SERVER_PORT","value":"8089"},{"name":"SLEEP_DURATION","value":"1"},{"name":"DEMO_FOLDERS","value":"/app"},{"name":"ARMO_TEST_NAME","value":"auto_attach_deployment"},{"name":"CAA_ENABLE_CRASH_REPORTER","value":"1"}],"image":"quay.io/armosec/demoservice:v25","imagePullPolicy":"IfNotPresent","name":"demoservice","ports":[{"containerPort":8089,"protocol":"TCP"}],"resources":{},"terminationMessagePath":"/dev/termination-log","terminationMessagePolicy":"File"}],"dnsPolicy":"ClusterFirst","restartPolicy":"Always","schedulerName":"default-scheduler","securityContext":{},"terminationGracePeriodSeconds":30}}}}`
|
||||
obj, _ := workloadinterface.NewWorkload([]byte(w))
|
||||
|
||||
45
opaprocessor/utils.go
Normal file
45
opaprocessor/utils.go
Normal file
@@ -0,0 +1,45 @@
|
||||
package opaprocessor
|
||||
|
||||
import (
|
||||
"github.com/armosec/kubescape/cautils"
|
||||
"github.com/armosec/opa-utils/reporthandling"
|
||||
"github.com/armosec/opa-utils/reporthandling/results/v1/reportsummary"
|
||||
)
|
||||
|
||||
// ConvertFrameworksToPolicies convert list of frameworks to list of policies
|
||||
func ConvertFrameworksToPolicies(frameworks []reporthandling.Framework, version string) *cautils.Policies {
|
||||
policies := cautils.NewPolicies()
|
||||
policies.Set(frameworks, version)
|
||||
return policies
|
||||
}
|
||||
|
||||
// ConvertFrameworksToSummaryDetails initialize the summary details for the report object
|
||||
func ConvertFrameworksToSummaryDetails(summaryDetails *reportsummary.SummaryDetails, frameworks []reporthandling.Framework, policies *cautils.Policies) {
|
||||
if summaryDetails.Controls == nil {
|
||||
summaryDetails.Controls = make(map[string]reportsummary.ControlSummary)
|
||||
}
|
||||
for i := range frameworks {
|
||||
controls := map[string]reportsummary.ControlSummary{}
|
||||
for j := range frameworks[i].Controls {
|
||||
id := frameworks[i].Controls[j].ControlID
|
||||
if _, ok := policies.Controls[id]; ok {
|
||||
c := reportsummary.ControlSummary{
|
||||
Name: frameworks[i].Controls[j].Name,
|
||||
ControlID: id,
|
||||
ScoreFactor: frameworks[i].Controls[j].BaseScore,
|
||||
Description: frameworks[i].Controls[j].Description,
|
||||
Remediation: frameworks[i].Controls[j].Remediation,
|
||||
}
|
||||
controls[frameworks[i].Controls[j].ControlID] = c
|
||||
summaryDetails.Controls[id] = c
|
||||
}
|
||||
}
|
||||
if cautils.StringInSlice(policies.Frameworks, frameworks[i].Name) != cautils.ValueNotFound {
|
||||
summaryDetails.Frameworks = append(summaryDetails.Frameworks, reportsummary.FrameworkSummary{
|
||||
Name: frameworks[i].Name,
|
||||
Controls: controls,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
30
opaprocessor/utils_test.go
Normal file
30
opaprocessor/utils_test.go
Normal file
@@ -0,0 +1,30 @@
|
||||
package opaprocessor
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/armosec/kubescape/mocks"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/armosec/opa-utils/reporthandling"
|
||||
"github.com/armosec/opa-utils/reporthandling/results/v1/reportsummary"
|
||||
)
|
||||
|
||||
func TestConvertFrameworksToPolicies(t *testing.T) {
|
||||
fw0 := mocks.MockFramework_0006_0013()
|
||||
fw1 := mocks.MockFramework_0044()
|
||||
policies := ConvertFrameworksToPolicies([]reporthandling.Framework{*fw0, *fw1}, "")
|
||||
assert.Equal(t, 2, len(policies.Frameworks))
|
||||
assert.Equal(t, 3, len(policies.Controls))
|
||||
}
|
||||
func TestInitializeSummaryDetails(t *testing.T) {
|
||||
fw0 := mocks.MockFramework_0006_0013()
|
||||
fw1 := mocks.MockFramework_0044()
|
||||
|
||||
summaryDetails := reportsummary.SummaryDetails{}
|
||||
frameworks := []reporthandling.Framework{*fw0, *fw1}
|
||||
policies := ConvertFrameworksToPolicies([]reporthandling.Framework{*fw0, *fw1}, "")
|
||||
ConvertFrameworksToSummaryDetails(&summaryDetails, frameworks, policies)
|
||||
assert.Equal(t, 2, len(summaryDetails.Frameworks))
|
||||
assert.Equal(t, 3, len(summaryDetails.Controls))
|
||||
}
|
||||
@@ -50,7 +50,7 @@ func (policyHandler *PolicyHandler) HandleNotificationRequest(notification *repo
|
||||
|
||||
func (policyHandler *PolicyHandler) getResources(notification *reporthandling.PolicyNotification, opaSessionObj *cautils.OPASessionObj, scanInfo *cautils.ScanInfo) error {
|
||||
|
||||
opaSessionObj.PostureReport.ClusterAPIServerInfo = policyHandler.resourceHandler.GetClusterAPIServerInfo()
|
||||
opaSessionObj.Report.ClusterAPIServerInfo = policyHandler.resourceHandler.GetClusterAPIServerInfo()
|
||||
resourcesMap, allResources, err := policyHandler.resourceHandler.GetResources(opaSessionObj.Frameworks, ¬ification.Designators)
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
@@ -2,37 +2,38 @@ package policyhandler
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/armosec/kubescape/cautils"
|
||||
"github.com/armosec/kubescape/cautils/logger"
|
||||
"github.com/armosec/opa-utils/reporthandling"
|
||||
)
|
||||
|
||||
func (policyHandler *PolicyHandler) getPolicies(notification *reporthandling.PolicyNotification, policiesAndResources *cautils.OPASessionObj) error {
|
||||
cautils.ProgressTextDisplay("Downloading/Loading policy definitions")
|
||||
logger.L().Info("Downloading/Loading policy definitions")
|
||||
|
||||
frameworks, err := policyHandler.getScanPolicies(notification)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(frameworks) == 0 {
|
||||
return fmt.Errorf("failed to download policies, please ARMO team for more information")
|
||||
return fmt.Errorf("failed to download policies: '%s'. Make sure the policy exist and you spelled it correctly. For more information, please feel free to contact ARMO team", strings.Join(policyIdentifierToSlice(notification.Rules), ", "))
|
||||
}
|
||||
|
||||
policiesAndResources.Frameworks = frameworks
|
||||
|
||||
// get exceptions
|
||||
exceptionPolicies, err := policyHandler.getters.ExceptionsGetter.GetExceptions(cautils.CustomerGUID, cautils.ClusterName)
|
||||
exceptionPolicies, err := policyHandler.getters.ExceptionsGetter.GetExceptions(cautils.ClusterName)
|
||||
if err == nil {
|
||||
policiesAndResources.Exceptions = exceptionPolicies
|
||||
}
|
||||
|
||||
// get account configuration
|
||||
controlsInputs, err := policyHandler.getters.ControlsInputsGetter.GetControlsInputs(cautils.CustomerGUID, cautils.ClusterName)
|
||||
controlsInputs, err := policyHandler.getters.ControlsInputsGetter.GetControlsInputs(cautils.ClusterName)
|
||||
if err == nil {
|
||||
policiesAndResources.RegoInputData.PostureControlInputs = controlsInputs
|
||||
}
|
||||
|
||||
cautils.SuccessTextDisplay("Downloaded/Loaded policy")
|
||||
logger.L().Success("Downloaded/Loaded policy")
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -70,3 +71,11 @@ func (policyHandler *PolicyHandler) getScanPolicies(notification *reporthandling
|
||||
}
|
||||
return frameworks, nil
|
||||
}
|
||||
|
||||
func policyIdentifierToSlice(rules []reporthandling.PolicyIdentifier) []string {
|
||||
s := []string{}
|
||||
for i := range rules {
|
||||
s = append(s, fmt.Sprintf("%s: %s", rules[i].Kind, rules[i].Name))
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
38
registryadaptors/README.md
Normal file
38
registryadaptors/README.md
Normal file
@@ -0,0 +1,38 @@
|
||||
# Integrate With Vulnerability Server
|
||||
|
||||
There are some controls that check the relation between the kubernetes manifest and vulnerabilities.
|
||||
For these controls to work properly, it is necasery to
|
||||
## Supported Servers
|
||||
* Armosec
|
||||
|
||||
# Integrate With Armosec Server
|
||||
|
||||
1. Navigate to the [armosec.io](https://portal.armo.cloud/)
|
||||
2. Click Profile(top right icon)->"User Management"->"API Tokens" and Generate a token
|
||||
3. Copy the clientID and secretKey and run:
|
||||
```
|
||||
kubescape config set clientID <>
|
||||
```
|
||||
```
|
||||
kubescape config set secretKey <>
|
||||
```
|
||||
4. Confirm the keys are set
|
||||
```
|
||||
kubescape config view
|
||||
```
|
||||
Expecting:
|
||||
```
|
||||
{
|
||||
"accountID": "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX",
|
||||
"clientID": "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX",
|
||||
"secretKey": "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"
|
||||
}
|
||||
```
|
||||
> If you are missing the `accountID` field, set it by running `kubescape config set accountID <>`
|
||||
|
||||
For CICD, set environments variables as following:
|
||||
```
|
||||
KS_ACCOUNT_ID // account id
|
||||
KS_CLIENT_ID // client id
|
||||
KS_SECRET_KEY // access key
|
||||
```
|
||||
98
registryadaptors/armosec/v1/civarmoadaptor.go
Normal file
98
registryadaptors/armosec/v1/civarmoadaptor.go
Normal file
@@ -0,0 +1,98 @@
|
||||
package v1
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
"github.com/armosec/kubescape/cautils/getter"
|
||||
"github.com/armosec/kubescape/cautils/logger"
|
||||
"github.com/armosec/kubescape/cautils/logger/helpers"
|
||||
"github.com/armosec/kubescape/containerscan"
|
||||
"github.com/armosec/kubescape/registryadaptors/registryvulnerabilities"
|
||||
)
|
||||
|
||||
func NewArmoAdaptor(armoAPI *getter.ArmoAPI) *ArmoCivAdaptor {
|
||||
return &ArmoCivAdaptor{
|
||||
armoAPI: armoAPI,
|
||||
}
|
||||
}
|
||||
|
||||
func (armoCivAdaptor *ArmoCivAdaptor) Login() error {
|
||||
if armoCivAdaptor.armoAPI.IsLoggedIn() {
|
||||
return nil
|
||||
}
|
||||
return armoCivAdaptor.armoAPI.Login()
|
||||
}
|
||||
func (armoCivAdaptor *ArmoCivAdaptor) GetImagesVulnerabilities(imageIDs []registryvulnerabilities.ContainerImageIdentifier) ([]registryvulnerabilities.ContainerImageVulnerabilityReport, error) {
|
||||
resultList := make([]registryvulnerabilities.ContainerImageVulnerabilityReport, 0)
|
||||
for _, imageID := range imageIDs {
|
||||
result, err := armoCivAdaptor.GetImageVulnerability(&imageID)
|
||||
if err == nil {
|
||||
resultList = append(resultList, *result)
|
||||
} else {
|
||||
logger.L().Debug("failed to get image vulnerabilities", helpers.String("image", imageID.Tag), helpers.Error(err))
|
||||
}
|
||||
}
|
||||
return resultList, nil
|
||||
}
|
||||
|
||||
func (armoCivAdaptor *ArmoCivAdaptor) GetImageVulnerability(imageID *registryvulnerabilities.ContainerImageIdentifier) (*registryvulnerabilities.ContainerImageVulnerabilityReport, error) {
|
||||
// First
|
||||
containerScanId, err := armoCivAdaptor.getImageLastScanId(imageID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if containerScanId == "" {
|
||||
return nil, fmt.Errorf("last scan ID is empty")
|
||||
}
|
||||
|
||||
filter := []map[string]string{{"containersScanID": containerScanId}}
|
||||
pageSize := 300
|
||||
pageNumber := 1
|
||||
request := V2ListRequest{PageSize: &pageSize, PageNum: &pageNumber, InnerFilters: filter, OrderBy: "timestamp:desc"}
|
||||
requestBody, _ := json.Marshal(request)
|
||||
requestUrl := fmt.Sprintf("https://%s/api/v1/vulnerability/scanResultsDetails?customerGUID=%s", armoCivAdaptor.armoAPI.GetAPIURL(), armoCivAdaptor.armoAPI.GetAccountID())
|
||||
|
||||
resp, err := armoCivAdaptor.armoAPI.Post(requestUrl, map[string]string{"Content-Type": "application/json"}, requestBody)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
scanDetailsResult := struct {
|
||||
Total struct {
|
||||
Value int `json:"value"`
|
||||
Relation string `json:"relation"`
|
||||
} `json:"total"`
|
||||
Response containerscan.VulnerabilitiesList `json:"response"`
|
||||
Cursor string `json:"cursor"`
|
||||
}{}
|
||||
|
||||
err = json.Unmarshal([]byte(resp), &scanDetailsResult)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
vulnerabilities := responseObjectToVulnerabilities(scanDetailsResult.Response)
|
||||
|
||||
resultImageVulnerabilityReport := registryvulnerabilities.ContainerImageVulnerabilityReport{
|
||||
ImageID: *imageID,
|
||||
Vulnerabilities: vulnerabilities,
|
||||
}
|
||||
|
||||
return &resultImageVulnerabilityReport, nil
|
||||
}
|
||||
|
||||
func (armoCivAdaptor *ArmoCivAdaptor) DescribeAdaptor() string {
|
||||
// TODO
|
||||
return ""
|
||||
}
|
||||
|
||||
func (armoCivAdaptor *ArmoCivAdaptor) GetImagesInformation(imageIDs []registryvulnerabilities.ContainerImageIdentifier) ([]registryvulnerabilities.ContainerImageInformation, error) {
|
||||
// TODO
|
||||
return []registryvulnerabilities.ContainerImageInformation{}, nil
|
||||
}
|
||||
|
||||
func (armoCivAdaptor *ArmoCivAdaptor) GetImagesScanStatus(imageIDs []registryvulnerabilities.ContainerImageIdentifier) ([]registryvulnerabilities.ContainerImageScanStatus, error) {
|
||||
// TODO
|
||||
return []registryvulnerabilities.ContainerImageScanStatus{}, nil
|
||||
}
|
||||
23
registryadaptors/armosec/v1/civarmoadaptor_test.go
Normal file
23
registryadaptors/armosec/v1/civarmoadaptor_test.go
Normal file
@@ -0,0 +1,23 @@
|
||||
package v1
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/armosec/kubescape/registryadaptors/registryvulnerabilities"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestSum(t *testing.T) {
|
||||
var err error
|
||||
var adaptor registryvulnerabilities.IContainerImageVulnerabilityAdaptor
|
||||
|
||||
adaptor, err = NewArmoAdaptorMock()
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.NoError(t, adaptor.Login())
|
||||
|
||||
imageVulnerabilityReport, err := adaptor.GetImageVulnerability(®istryvulnerabilities.ContainerImageIdentifier{Tag: "gke.gcr.io/gcp-compute-persistent-disk-csi-driver:v1.3.4-gke.0"})
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Equal(t, 25, len(imageVulnerabilityReport.Vulnerabilities))
|
||||
}
|
||||
67
registryadaptors/armosec/v1/civarmoadaptormock.go
Normal file
67
registryadaptors/armosec/v1/civarmoadaptormock.go
Normal file
File diff suppressed because one or more lines are too long
65
registryadaptors/armosec/v1/civarmoadaptorutils.go
Normal file
65
registryadaptors/armosec/v1/civarmoadaptorutils.go
Normal file
@@ -0,0 +1,65 @@
|
||||
package v1
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
"github.com/armosec/kubescape/containerscan"
|
||||
"github.com/armosec/kubescape/registryadaptors/registryvulnerabilities"
|
||||
)
|
||||
|
||||
func (armoCivAdaptor *ArmoCivAdaptor) getImageLastScanId(imageID *registryvulnerabilities.ContainerImageIdentifier) (string, error) {
|
||||
filter := []map[string]string{{"imageTag": imageID.Tag, "status": "Success"}}
|
||||
pageSize := 1
|
||||
pageNumber := 1
|
||||
request := V2ListRequest{PageSize: &pageSize, PageNum: &pageNumber, InnerFilters: filter, OrderBy: "timestamp:desc"}
|
||||
requestBody, _ := json.Marshal(request)
|
||||
requestUrl := fmt.Sprintf("https://%s/api/v1/vulnerability/scanResultsSumSummary?customerGUID=%s", armoCivAdaptor.armoAPI.GetAPIURL(), armoCivAdaptor.armoAPI.GetAccountID())
|
||||
|
||||
resp, err := armoCivAdaptor.armoAPI.Post(requestUrl, map[string]string{"Content-Type": "application/json"}, requestBody)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
scanSummartResult := struct {
|
||||
Total struct {
|
||||
Value int `json:"value"`
|
||||
Relation string `json:"relation"`
|
||||
} `json:"total"`
|
||||
Response []containerscan.ElasticContainerScanSummaryResult `json:"response"`
|
||||
Cursor string `json:"cursor"`
|
||||
}{}
|
||||
err = json.Unmarshal([]byte(resp), &scanSummartResult)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if len(scanSummartResult.Response) < pageSize {
|
||||
return "", fmt.Errorf("did not get response for image %s", imageID.Tag)
|
||||
}
|
||||
|
||||
return scanSummartResult.Response[0].ContainerScanID, nil
|
||||
}
|
||||
|
||||
func responseObjectToVulnerabilities(vulnerabilitiesList containerscan.VulnerabilitiesList) []registryvulnerabilities.Vulnerability {
|
||||
vulnerabilities := make([]registryvulnerabilities.Vulnerability, len(vulnerabilitiesList))
|
||||
for i, vulnerabilityEntry := range vulnerabilitiesList {
|
||||
vulnerabilities[i].Description = vulnerabilityEntry.Description
|
||||
vulnerabilities[i].Fixes = make([]registryvulnerabilities.FixedIn, len(vulnerabilityEntry.Fixes))
|
||||
for j, fix := range vulnerabilityEntry.Fixes {
|
||||
vulnerabilities[i].Fixes[j].ImgTag = fix.ImgTag
|
||||
vulnerabilities[i].Fixes[j].Name = fix.Name
|
||||
vulnerabilities[i].Fixes[j].Version = fix.Version
|
||||
}
|
||||
vulnerabilities[i].HealthStatus = vulnerabilityEntry.HealthStatus
|
||||
vulnerabilities[i].Link = vulnerabilityEntry.Link
|
||||
vulnerabilities[i].Metadata = vulnerabilityEntry.Metadata
|
||||
vulnerabilities[i].Name = vulnerabilityEntry.Name
|
||||
vulnerabilities[i].PackageVersion = vulnerabilityEntry.PackageVersion
|
||||
vulnerabilities[i].RelatedPackageName = vulnerabilityEntry.RelatedPackageName
|
||||
vulnerabilities[i].Relevancy = vulnerabilityEntry.Relevancy
|
||||
vulnerabilities[i].Severity = vulnerabilityEntry.Severity
|
||||
vulnerabilities[i].UrgentCount = vulnerabilityEntry.UrgentCount
|
||||
}
|
||||
return vulnerabilities
|
||||
}
|
||||
35
registryadaptors/armosec/v1/datastructures.go
Normal file
35
registryadaptors/armosec/v1/datastructures.go
Normal file
@@ -0,0 +1,35 @@
|
||||
package v1
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/armosec/kubescape/cautils/getter"
|
||||
)
|
||||
|
||||
type V2ListRequest struct {
|
||||
// properties of the requested next page
|
||||
// Use ValidatePageProperties to set PageSize field
|
||||
PageSize *int `json:"pageSize,omitempty"`
|
||||
// One can leave it empty for 0, then call ValidatePageProperties
|
||||
PageNum *int `json:"pageNum,omitempty"`
|
||||
// The time window of the list to return. Default: since - begining og the time, until - now.
|
||||
Since *time.Time `json:"since,omitempty"`
|
||||
Until *time.Time `json:"until,omitempty"`
|
||||
// Which elements of the list to return, each field can hold multiple values separated by comma
|
||||
// Example: ": {"severity": "High,Medium", "type": "61539,30303"}
|
||||
// An empty map means "return the complete list"
|
||||
InnerFilters []map[string]string `json:"innerFilters,omitempty"`
|
||||
// How to order (sort) the list, field name + sort order (asc/desc), like https://www.w3schools.com/sql/sql_orderby.asp
|
||||
// Example: "timestamp:asc,severity:desc"
|
||||
OrderBy string `json:"orderBy,omitempty"`
|
||||
// Cursor to the next page of former requset. Not supported yet
|
||||
// Cursor cannot be used with another parameters of this struct
|
||||
Cursor string `json:"cursor,omitempty"`
|
||||
// FieldsList allow us to return only subset of the source document fields
|
||||
// Don't expose FieldsList outside without well designed decision
|
||||
FieldsList []string `json:"includeFields,omitempty"`
|
||||
FieldsReverseKeywordMap map[string]string `json:"-,omitempty"`
|
||||
}
|
||||
type ArmoCivAdaptor struct {
|
||||
armoAPI *getter.ArmoAPI
|
||||
}
|
||||
164
registryadaptors/contribute.md
Normal file
164
registryadaptors/contribute.md
Normal file
@@ -0,0 +1,164 @@
|
||||
# Container image vulnerability adaptor interface
|
||||
|
||||
## High level design of Kubescape
|
||||
|
||||
### Layers
|
||||
|
||||
* Controls and Rules: that actual control logic implementation, the "tests" themselves. Implemented in rego
|
||||
* OPA engine: the [OPA](https://github.com/open-policy-agent/opa) rego interpreter
|
||||
* Rules processor: Kubescape component, it enumerates and runs the controls while also preparing the all the input data that the controls need for running
|
||||
* Data sources: set of different modules providing data to the Rules processor so it can run the controls with them. Examples: Kubernetes objects, cloud vendor API objects and adding in this proposal the vulnerability infomration
|
||||
* Cloud Image Vulnerability adaption interface: the subject of this proposal, it gives a common interface for different registry/vulnerabilty vendors to adapt to.
|
||||
* CIV adaptors: specific implementation of the CIV interface, example Harbor adaption
|
||||
```
|
||||
-----------------------
|
||||
| Controls/Rules (rego) |
|
||||
-----------------------
|
||||
|
|
||||
-----------------------
|
||||
| OPA engine |
|
||||
-----------------------
|
||||
|
|
||||
-----------------------
|
||||
| Rules processor |
|
||||
-----------------------
|
||||
|
|
||||
-----------------------
|
||||
| Data sources |
|
||||
-----------------------
|
||||
|
|
||||
=======================
|
||||
| CIV adaption interface| <- Adding this layer in this proposal
|
||||
=======================
|
||||
|
|
||||
-----------------------
|
||||
| Specific CIV adaptors | <- Will be implemented based on this proposal
|
||||
-----------------------
|
||||
|
||||
|
||||
|
||||
```
|
||||
|
||||
## Functionalities to cover
|
||||
|
||||
The interface needs to cover the following functionalities:
|
||||
|
||||
* Authentication against the information source (abstracted login)
|
||||
* Triggering image scan (if applicable, the source might store vulnerabilities for images but cannot scan alone)
|
||||
* Reading image scan status (with last scan date and etc.)
|
||||
* Getting vulnerability information for a given image
|
||||
* Getting image information
|
||||
* Image manifests
|
||||
* Image BOMs (bill of material)
|
||||
|
||||
## Go API proposal
|
||||
|
||||
```
|
||||
|
||||
/*type ContainerImageRegistryCredentials struct {
|
||||
Password string
|
||||
Tag string
|
||||
Hash string
|
||||
}*/
|
||||
|
||||
type ContainerImageIdentifier struct {
|
||||
Registry string
|
||||
Repository string
|
||||
Tag string
|
||||
Hash string
|
||||
}
|
||||
|
||||
type ContainerImageScanStatus struct {
|
||||
ImageID ContainerImageIdentifier
|
||||
IsScanAvailable bool
|
||||
IsBomAvailable bool
|
||||
LastScanDate time.Time
|
||||
}
|
||||
|
||||
type ContainerImageVulnerabilityReport struct {
|
||||
ImageID ContainerImageIdentifier
|
||||
// TBD
|
||||
}
|
||||
|
||||
type ContainerImageInformation struct {
|
||||
ImageID ContainerImageIdentifier
|
||||
Bom []string
|
||||
ImageManifest Manifest // will use here Docker package definition
|
||||
}
|
||||
|
||||
type IContainerImageVulnerabilityAdaptor interface {
|
||||
// Credentials are coming from user input (CLI or configuration file) and they are abstracted at string to string map level
|
||||
// so and example use would be like registry: "simpledockerregistry:80" and credentials like {"username":"joedoe","password":"abcd1234"}
|
||||
Login(registry string, credentials map[string]string) error
|
||||
|
||||
// For "help" purposes
|
||||
DescribeAdaptor() string
|
||||
|
||||
GetImagesScanStatus(imageIDs []ContainerImageIdentifier) ([]ContainerImageScanStatus, error)
|
||||
|
||||
GetImagesVulnerabilties(imageIDs []ContainerImageIdentifier) ([]ContainerImageVulnerabilityReport, error)
|
||||
|
||||
GetImagesInformation(imageIDs []ContainerImageIdentifier) ([]ContainerImageInformation, error)
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
|
||||
# Integration
|
||||
|
||||
# Input
|
||||
|
||||
The objects received from the interface will be converted to an IMetadata compatible objects as following
|
||||
|
||||
```
|
||||
{
|
||||
"apiVersion": "image.vulnscan.com/v1",
|
||||
"kind": "ImageVulnerabilities",
|
||||
"metadata": {
|
||||
"name": "nginx:latest"
|
||||
},
|
||||
"data": {
|
||||
// list of vulnerabilities
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
# Output
|
||||
|
||||
The rego results will be a combination of the k8s artifact and the list of relevant CVEs for the control
|
||||
|
||||
```
|
||||
{
|
||||
"apiVersion": "result.vulnscan.com/v1",
|
||||
"kind": "Pod",
|
||||
"metadata": {
|
||||
"name": "nginx"
|
||||
"namespace": "default"
|
||||
|
||||
},
|
||||
"relatedObjects": [
|
||||
{
|
||||
"apiVersion": "v1",
|
||||
"kind": "Pod",
|
||||
"metadata": {
|
||||
"name": "nginx"
|
||||
"namespace": "default"
|
||||
},
|
||||
"spec": {
|
||||
// podSpec
|
||||
},
|
||||
},
|
||||
{
|
||||
"apiVersion": "image.vulnscan.com/v1",
|
||||
"kind": "ImageVulnerabilities",
|
||||
"metadata": {
|
||||
"name": "nginx:latest",
|
||||
},
|
||||
"data": {
|
||||
// list of vulnerabilities
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
50
registryadaptors/registryvulnerabilities/datastructures.go
Normal file
50
registryadaptors/registryvulnerabilities/datastructures.go
Normal file
@@ -0,0 +1,50 @@
|
||||
package registryvulnerabilities
|
||||
|
||||
import (
|
||||
"time"
|
||||
)
|
||||
|
||||
type ContainerImageIdentifier struct {
|
||||
Registry string
|
||||
Repository string
|
||||
Tag string
|
||||
Hash string
|
||||
}
|
||||
|
||||
type ContainerImageScanStatus struct {
|
||||
ImageID ContainerImageIdentifier
|
||||
IsScanAvailable bool
|
||||
IsBomAvailable bool
|
||||
LastScanDate time.Time
|
||||
}
|
||||
|
||||
type FixedIn struct {
|
||||
Name string `json:"name"`
|
||||
ImgTag string `json:"imageTag"`
|
||||
Version string `json:"version"`
|
||||
}
|
||||
type Vulnerability struct {
|
||||
Name string `json:"name"`
|
||||
RelatedPackageName string `json:"packageName"`
|
||||
PackageVersion string `json:"packageVersion"`
|
||||
Link string `json:"link"`
|
||||
Description string `json:"description"`
|
||||
Severity string `json:"severity"`
|
||||
Metadata interface{} `json:"metadata"`
|
||||
Fixes []FixedIn `json:"fixedIn"`
|
||||
Relevancy string `json:"relevant"` // use the related enum
|
||||
UrgentCount int `json:"urgent"`
|
||||
NeglectedCount int `json:"neglected"`
|
||||
HealthStatus string `json:"healthStatus"`
|
||||
}
|
||||
|
||||
type ContainerImageVulnerabilityReport struct {
|
||||
ImageID ContainerImageIdentifier
|
||||
Vulnerabilities []Vulnerability
|
||||
}
|
||||
|
||||
type ContainerImageInformation struct {
|
||||
ImageID ContainerImageIdentifier
|
||||
Bom []string
|
||||
//ImageManifest Manifest // will use here Docker package definition
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user