mirror of
https://github.com/kubescape/kubescape.git
synced 2026-02-14 18:09:55 +00:00
Compare commits
238 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
4bd468f03e | ||
|
|
c6eaecd596 | ||
|
|
a2a5b06024 | ||
|
|
825732f60f | ||
|
|
596ec17106 | ||
|
|
fbd0f352c4 | ||
|
|
2600052735 | ||
|
|
a985b2ce09 | ||
|
|
829c176644 | ||
|
|
7d7d247bc2 | ||
|
|
43ae8e2a81 | ||
|
|
b0f37e9465 | ||
|
|
396ef55267 | ||
|
|
4b07469bb2 | ||
|
|
260f7b06c1 | ||
|
|
67ba28a3cb | ||
|
|
a768d22a1d | ||
|
|
ede88550da | ||
|
|
ab55a0d134 | ||
|
|
bfd7060044 | ||
|
|
bf215a0f96 | ||
|
|
a2e1fb36df | ||
|
|
4e9c6f34b3 | ||
|
|
b08c0f2ec6 | ||
|
|
4c0e358afc | ||
|
|
9ae21b064a | ||
|
|
2df0c12e10 | ||
|
|
d37025dc6c | ||
|
|
0b01eb5ee4 | ||
|
|
d537c56159 | ||
|
|
feb9e3af10 | ||
|
|
ec30ed8439 | ||
|
|
cda9bb0e45 | ||
|
|
17f1c6b647 | ||
|
|
98079ec1ec | ||
|
|
16aaf9b5f8 | ||
|
|
ff0264ee15 | ||
|
|
bea9bd64a4 | ||
|
|
544a19906e | ||
|
|
208bb25118 | ||
|
|
fdb7e278c1 | ||
|
|
a132a49d57 | ||
|
|
23e73f5e88 | ||
|
|
fdcc5e9a66 | ||
|
|
77e7b1a2cb | ||
|
|
db95da3742 | ||
|
|
dc172a1476 | ||
|
|
8694a929cf | ||
|
|
36b3840362 | ||
|
|
d5fcbe842f | ||
|
|
155349dac0 | ||
|
|
7956a849d9 | ||
|
|
0d1c4cdc02 | ||
|
|
8c833a5df8 | ||
|
|
2c5bb977cb | ||
|
|
cddf7dd8f6 | ||
|
|
306c18147e | ||
|
|
84815eb97d | ||
|
|
890c13a91f | ||
|
|
3887ec8091 | ||
|
|
726b06bb70 | ||
|
|
c8e07c283e | ||
|
|
88a5128c03 | ||
|
|
01f2d3b04f | ||
|
|
fef85a4467 | ||
|
|
e0eadc1f2d | ||
|
|
a881b73e8d | ||
|
|
606f5cfb62 | ||
|
|
40737d545b | ||
|
|
990be3afe8 | ||
|
|
7020c2d025 | ||
|
|
a6497c1252 | ||
|
|
9d528a8075 | ||
|
|
5aec8b6f28 | ||
|
|
830ee27169 | ||
|
|
5f2e5c6f4e | ||
|
|
cf4317b5f6 | ||
|
|
2453aea6f3 | ||
|
|
e95b0f840a | ||
|
|
83680d1207 | ||
|
|
fd135e9e49 | ||
|
|
e47eb9cb4e | ||
|
|
d288fdc7f2 | ||
|
|
9de73dab29 | ||
|
|
f0afc20ec6 | ||
|
|
bf75059347 | ||
|
|
78835a58c4 | ||
|
|
fdccae9a1e | ||
|
|
6d97d42f67 | ||
|
|
46001e4761 | ||
|
|
37644e1f57 | ||
|
|
8a04934fbd | ||
|
|
31e1b3055f | ||
|
|
b4d712fcb1 | ||
|
|
7847a4593b | ||
|
|
b2036e64f1 | ||
|
|
fd0bbcccfe | ||
|
|
7caa47f949 | ||
|
|
06b171901d | ||
|
|
e685fe2b7d | ||
|
|
7177e77a8d | ||
|
|
4cda32771b | ||
|
|
f896b65a87 | ||
|
|
3fff1b750a | ||
|
|
2380317953 | ||
|
|
bd9ade4d15 | ||
|
|
659d3533ee | ||
|
|
37c242576e | ||
|
|
e9a22a23e7 | ||
|
|
ae3816c1e0 | ||
|
|
e4661a5ae2 | ||
|
|
539d1889fe | ||
|
|
2dd5f05f1a | ||
|
|
60c9b38de4 | ||
|
|
8b66b068ea | ||
|
|
1507bc3f04 | ||
|
|
1e0baba919 | ||
|
|
4c9f47b1e1 | ||
|
|
b66446b7eb | ||
|
|
f1726e21ae | ||
|
|
8d48f8ad86 | ||
|
|
8b280f272e | ||
|
|
b92d4256ad | ||
|
|
914a04a386 | ||
|
|
12f3dd7db6 | ||
|
|
427032ab94 | ||
|
|
b55aaaa34d | ||
|
|
7cde877452 | ||
|
|
e399012f73 | ||
|
|
fe1d2646bd | ||
|
|
ea98bfbe9a | ||
|
|
7bc3277634 | ||
|
|
22e94c5a29 | ||
|
|
aa8cf0ff15 | ||
|
|
a22f97bd13 | ||
|
|
3fd2d1629d | ||
|
|
cd04204a5c | ||
|
|
eee55376e7 | ||
|
|
d3c0972d70 | ||
|
|
e3f5fa8e35 | ||
|
|
03c540b68c | ||
|
|
7f2f53b06c | ||
|
|
8064826b53 | ||
|
|
8bdff31693 | ||
|
|
6f05b4137b | ||
|
|
4207f3d6d1 | ||
|
|
98dbda696d | ||
|
|
7a34c94542 | ||
|
|
789b93776d | ||
|
|
c93ee64630 | ||
|
|
f54c3ad85c | ||
|
|
a9fcd00723 | ||
|
|
fb7cc4284e | ||
|
|
e7a0755c25 | ||
|
|
d1e02dc298 | ||
|
|
69814039ca | ||
|
|
2ffb7fcdb4 | ||
|
|
d1695b7f10 | ||
|
|
5f0f9a9eae | ||
|
|
aa2dedb76f | ||
|
|
d630811386 | ||
|
|
407e35c9d8 | ||
|
|
bb7f38ce31 | ||
|
|
1ffb2d360a | ||
|
|
5cbadc02c5 | ||
|
|
9aa8d9edf0 | ||
|
|
db84380844 | ||
|
|
bbb0d2154f | ||
|
|
2a937ac7c0 | ||
|
|
a96652094e | ||
|
|
dbf3de57f6 | ||
|
|
c00bc0ebbb | ||
|
|
b08e5a2c32 | ||
|
|
09db5d94e1 | ||
|
|
033e8f6b44 | ||
|
|
bef40f0e6c | ||
|
|
aa2f69125f | ||
|
|
d30f3960a7 | ||
|
|
5f43da94ba | ||
|
|
2aa8a0c935 | ||
|
|
c02f8c6cb5 | ||
|
|
aa0be474e2 | ||
|
|
c0161c9b33 | ||
|
|
71404f2205 | ||
|
|
514da1e2db | ||
|
|
75dfceb5da | ||
|
|
1ae76b4377 | ||
|
|
b6f90cba8e | ||
|
|
62af441a1d | ||
|
|
228b8957d3 | ||
|
|
b4ce999ab3 | ||
|
|
cc06a414fe | ||
|
|
d3c37c4e5f | ||
|
|
3b448b62b1 | ||
|
|
6a3f5658b1 | ||
|
|
f65e791522 | ||
|
|
d91304f9ad | ||
|
|
61ce00108e | ||
|
|
a4eb773eee | ||
|
|
cfc69f5a0f | ||
|
|
a44823c3ed | ||
|
|
8a166e5ba5 | ||
|
|
9a7aeff870 | ||
|
|
cb3bdb9df2 | ||
|
|
0be8d57eaa | ||
|
|
79b9cbf1d6 | ||
|
|
500df8737e | ||
|
|
b8acbd1bee | ||
|
|
0bde8a65ba | ||
|
|
d2884b8936 | ||
|
|
e692359b47 | ||
|
|
473746eab0 | ||
|
|
72860deb0f | ||
|
|
d3bdbf31ac | ||
|
|
639c694c13 | ||
|
|
f34f6dc51e | ||
|
|
b93e7b9abf | ||
|
|
995f615b10 | ||
|
|
39b95eff4f | ||
|
|
83246a1802 | ||
|
|
f255df0198 | ||
|
|
52b78a7e73 | ||
|
|
b7842f98f0 | ||
|
|
1b2514e3ec | ||
|
|
0da4f40b48 | ||
|
|
5591bf09d9 | ||
|
|
da94651656 | ||
|
|
86b6a1d88a | ||
|
|
f903e13d7b | ||
|
|
015206a760 | ||
|
|
0aff119260 | ||
|
|
ddb8608501 | ||
|
|
0d75a273f0 | ||
|
|
4f07d23dd6 | ||
|
|
79baa0d66e | ||
|
|
d5ca49ef9b | ||
|
|
536d7fb3c5 | ||
|
|
f66fd1f38c |
72
.github/workflows/build.yaml
vendored
72
.github/workflows/build.yaml
vendored
@@ -2,7 +2,7 @@ name: build
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ master ]
|
||||
branches: [ master ]
|
||||
jobs:
|
||||
once:
|
||||
name: Create release
|
||||
@@ -16,8 +16,8 @@ jobs:
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
tag_name: v1.0.${{ github.run_number }}
|
||||
release_name: Release v1.0.${{ github.run_number }}
|
||||
tag_name: v2.0.${{ github.run_number }}
|
||||
release_name: Release v2.0.${{ github.run_number }}
|
||||
draft: false
|
||||
prerelease: false
|
||||
build:
|
||||
@@ -39,8 +39,9 @@ jobs:
|
||||
|
||||
- name: Build
|
||||
env:
|
||||
RELEASE: v1.0.${{ github.run_number }}
|
||||
RELEASE: v2.0.${{ github.run_number }}
|
||||
ArmoBEServer: api.armo.cloud
|
||||
ArmoAuthServer: auth.armo.cloud
|
||||
ArmoERServer: report.armo.cloud
|
||||
ArmoWebsite: portal.armo.cloud
|
||||
CGO_ENABLED: 0
|
||||
@@ -48,11 +49,11 @@ jobs:
|
||||
|
||||
- name: Smoke Testing
|
||||
env:
|
||||
RELEASE: v1.0.${{ github.run_number }}
|
||||
RELEASE: v2.0.${{ github.run_number }}
|
||||
KUBESCAPE_SKIP_UPDATE_CHECK: "true"
|
||||
run: python3 smoke_testing/init.py ${PWD}/build/${{ matrix.os }}/kubescape
|
||||
|
||||
- name: Upload Release binaries
|
||||
- name: Upload release binaries
|
||||
id: upload-release-asset
|
||||
uses: actions/upload-release-asset@v1
|
||||
env:
|
||||
@@ -63,32 +64,71 @@ jobs:
|
||||
asset_name: kubescape-${{ matrix.os }}
|
||||
asset_content_type: application/octet-stream
|
||||
|
||||
- name: Upload release hash
|
||||
id: upload-release-hash
|
||||
uses: actions/upload-release-asset@v1
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
upload_url: ${{ needs.once.outputs.upload_url }}
|
||||
asset_path: build/${{ matrix.os }}/kubescape.sha256
|
||||
asset_name: kubescape-${{ matrix.os }}-sha256
|
||||
asset_content_type: application/octet-stream
|
||||
|
||||
|
||||
|
||||
build-docker:
|
||||
name: Build docker container, tag and upload to registry
|
||||
needs: build
|
||||
if: ${{ github.repository == 'armosec/kubescape' }}
|
||||
runs-on: ubuntu-latest
|
||||
if: ${{ github.repository == 'armosec/kubescape' }} # TODO
|
||||
permissions:
|
||||
id-token: write
|
||||
packages: write
|
||||
contents: read
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
|
||||
- name: Set name
|
||||
run: echo quay.io/armosec/kubescape:v1.0.${{ github.run_number }} > build_tag.txt
|
||||
- name: Set image version
|
||||
id: image-version
|
||||
run: echo '::set-output name=IMAGE_VERSION::v2.0.${{ github.run_number }}'
|
||||
|
||||
- name: Set image name
|
||||
id: image-name
|
||||
run: echo '::set-output name=IMAGE_NAME::quay.io/${{ github.repository_owner }}/kubescape'
|
||||
|
||||
- name: Build the Docker image
|
||||
run: docker build . --file build/Dockerfile --tag $(cat build_tag.txt) --build-arg run_number=${{ github.run_number }}
|
||||
|
||||
run: docker build . --file build/Dockerfile --tag ${{ steps.image-name.outputs.IMAGE_NAME }}:${{ steps.image-version.outputs.IMAGE_VERSION }} --build-arg image_version=${{ steps.image-version.outputs.IMAGE_VERSION }}
|
||||
|
||||
- name: Re-Tag Image to latest
|
||||
run: docker tag $(cat build_tag.txt) quay.io/armosec/kubescape:latest
|
||||
|
||||
run: docker tag ${{ steps.image-name.outputs.IMAGE_NAME }}:${{ steps.image-version.outputs.IMAGE_VERSION }} ${{ steps.image-name.outputs.IMAGE_NAME }}:latest
|
||||
|
||||
- name: Login to Quay.io
|
||||
env: # Or as an environment variable
|
||||
QUAY_PASSWORD: ${{ secrets.QUAYIO_REGISTRY_PASSWORD }}
|
||||
QUAY_USERNAME: ${{ secrets.QUAYIO_REGISTRY_USERNAME }}
|
||||
run: docker login -u="${QUAY_USERNAME}" -p="${QUAY_PASSWORD}" quay.io
|
||||
# - name: Login to GitHub Container Registry
|
||||
# uses: docker/login-action@v1
|
||||
# with:
|
||||
# registry: ghcr.io
|
||||
# username: ${{ github.actor }}
|
||||
# password: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: Push Docker image
|
||||
run: |
|
||||
docker push $(cat build_tag.txt)
|
||||
docker push quay.io/armosec/kubescape:latest
|
||||
|
||||
docker push ${{ steps.image-name.outputs.IMAGE_NAME }}:${{ steps.image-version.outputs.IMAGE_VERSION }}
|
||||
docker push ${{ steps.image-name.outputs.IMAGE_NAME }}:latest
|
||||
|
||||
# TODO - Wait for casign to support fixed tags -> https://github.com/sigstore/cosign/issues/1424
|
||||
# - name: Install cosign
|
||||
# uses: sigstore/cosign-installer@main
|
||||
# with:
|
||||
# cosign-release: 'v1.5.1' # optional
|
||||
# - name: sign kubescape container image
|
||||
# env:
|
||||
# COSIGN_EXPERIMENTAL: "true"
|
||||
# run: |
|
||||
# cosign sign --force ${{ steps.image-name.outputs.IMAGE_NAME }}:latest
|
||||
# cosign sign --force ${{ steps.image-name.outputs.IMAGE_NAME }}:${{ steps.image-version.outputs.IMAGE_VERSION }}
|
||||
|
||||
|
||||
39
.github/workflows/build_dev.yaml
vendored
39
.github/workflows/build_dev.yaml
vendored
@@ -23,16 +23,17 @@ jobs:
|
||||
|
||||
- name: Build
|
||||
env:
|
||||
RELEASE: v1.0.${{ github.run_number }}
|
||||
RELEASE: v2.0.${{ github.run_number }}
|
||||
ArmoBEServer: api.armo.cloud
|
||||
ArmoERServer: report.euprod1.cyberarmorsoft.com
|
||||
ArmoAuthServer: auth.armo.cloud
|
||||
ArmoERServer: report.armo.cloud
|
||||
ArmoWebsite: portal.armo.cloud
|
||||
CGO_ENABLED: 0
|
||||
run: python3 --version && python3 build.py
|
||||
|
||||
- name: Smoke Testing
|
||||
env:
|
||||
RELEASE: v1.0.${{ github.run_number }}
|
||||
RELEASE: v2.0.${{ github.run_number }}
|
||||
KUBESCAPE_SKIP_UPDATE_CHECK: "true"
|
||||
run: python3 smoke_testing/init.py ${PWD}/build/${{ matrix.os }}/kubescape
|
||||
|
||||
@@ -42,30 +43,42 @@ jobs:
|
||||
name: kubescape-${{ matrix.os }}
|
||||
path: build/${{ matrix.os }}/kubescape
|
||||
|
||||
|
||||
build-docker:
|
||||
name: Build docker container, tag and upload to registry
|
||||
needs: build
|
||||
if: ${{ github.repository == 'armosec/kubescape' }}
|
||||
if: ${{ github.repository == 'armosec/kubescape' }} # TODO
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
id-token: write
|
||||
packages: write
|
||||
contents: read
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
|
||||
- name: Set name
|
||||
run: echo quay.io/armosec/kubescape:dev-v1.0.${{ github.run_number }} > build_tag.txt
|
||||
- name: Set image version
|
||||
id: image-version
|
||||
run: echo '::set-output name=IMAGE_VERSION::dev-v2.0.${{ github.run_number }}'
|
||||
|
||||
- name: Set image name
|
||||
id: image-name
|
||||
run: echo '::set-output name=IMAGE_NAME::quay.io/${{ github.repository_owner }}/kubescape'
|
||||
|
||||
- name: Build the Docker image
|
||||
run: docker build . --file build/Dockerfile --tag $(cat build_tag.txt) --build-arg run_number=${{ github.run_number }}
|
||||
run: docker build . --file build/Dockerfile --tag ${{ steps.image-name.outputs.IMAGE_NAME }}:${{ steps.image-version.outputs.IMAGE_VERSION }} --build-arg image_version=${{ steps.image-version.outputs.IMAGE_VERSION }}
|
||||
|
||||
- name: Login to Quay.io
|
||||
env: # Or as an environment variable
|
||||
env:
|
||||
QUAY_PASSWORD: ${{ secrets.QUAYIO_REGISTRY_PASSWORD }}
|
||||
QUAY_USERNAME: ${{ secrets.QUAYIO_REGISTRY_USERNAME }}
|
||||
run: docker login -u="${QUAY_USERNAME}" -p="${QUAY_PASSWORD}" quay.io
|
||||
|
||||
# - name: Login to GitHub Container Registry
|
||||
# uses: docker/login-action@v1
|
||||
# with:
|
||||
# registry: ghcr.io
|
||||
# username: ${{ github.actor }}
|
||||
# password: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: Push Docker image
|
||||
run: |
|
||||
docker push $(cat build_tag.txt)
|
||||
|
||||
|
||||
docker push ${{ steps.image-name.outputs.IMAGE_NAME }}:${{ steps.image-version.outputs.IMAGE_VERSION }}
|
||||
|
||||
5
.github/workflows/master_pr_checks.yaml
vendored
5
.github/workflows/master_pr_checks.yaml
vendored
@@ -24,8 +24,9 @@ jobs:
|
||||
|
||||
- name: Build
|
||||
env:
|
||||
RELEASE: v1.0.${{ github.run_number }}
|
||||
RELEASE: v2.0.${{ github.run_number }}
|
||||
ArmoBEServer: api.armo.cloud
|
||||
ArmoAuthServer: auth.armo.cloud
|
||||
ArmoERServer: report.armo.cloud
|
||||
ArmoWebsite: portal.armo.cloud
|
||||
CGO_ENABLED: 0
|
||||
@@ -33,7 +34,7 @@ jobs:
|
||||
|
||||
- name: Smoke Testing
|
||||
env:
|
||||
RELEASE: v1.0.${{ github.run_number }}
|
||||
RELEASE: v2.0.${{ github.run_number }}
|
||||
KUBESCAPE_SKIP_UPDATE_CHECK: "true"
|
||||
run: python3 smoke_testing/init.py ${PWD}/build/${{ matrix.os }}/kubescape
|
||||
|
||||
17
.github/workflows/post-release.yaml
vendored
Normal file
17
.github/workflows/post-release.yaml
vendored
Normal file
@@ -0,0 +1,17 @@
|
||||
name: create release digests
|
||||
|
||||
on:
|
||||
release:
|
||||
types: [ published]
|
||||
branches: [ master ]
|
||||
|
||||
jobs:
|
||||
once:
|
||||
name: Creating digests
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Digest
|
||||
uses: MCJack123/ghaction-generate-release-hashes@v1
|
||||
with:
|
||||
hash-type: sha1
|
||||
file-name: kubescape-release-digests
|
||||
3
.gitignore
vendored
3
.gitignore
vendored
@@ -3,4 +3,5 @@
|
||||
*debug*
|
||||
*vender*
|
||||
*.pyc*
|
||||
.idea
|
||||
.idea
|
||||
ca.srl
|
||||
@@ -2,8 +2,9 @@
|
||||
|
||||
The following table lists Kubescape project maintainers
|
||||
|
||||
| Name | GitHub | Email | Organization | Repositories/Area of Expertise | Added/Renewed On |
|
||||
| Name | GitHub | Email | Organization | Role | Added/Renewed On |
|
||||
| --- | --- | --- | --- | --- | --- |
|
||||
| Ben Hirschberg | @slashben | ben@armosec.io | ARMO | Kubescape CLI | 2021-09-01 |
|
||||
| Rotem Refael | @rotemamsa | rrefael@armosec.io | ARMO | Kubescape CLI | 2021-10-11 |
|
||||
| David Wertenteil | @dwertent | dwertent@armosec.io | ARMO | Kubescape CLI | 2021-09-01 |
|
||||
| [Ben Hirschberg](https://www.linkedin.com/in/benyamin-ben-hirschberg-66141890) | [@slashben](https://github.com/slashben) | ben@armosec.io | [ARMO](https://www.armosec.io/) | VP R&D | 2021-09-01 |
|
||||
| [Rotem Refael](https://www.linkedin.com/in/rotem-refael) | [@rotemamsa](https://github.com/rotemamsa) | rrefael@armosec.io | [ARMO](https://www.armosec.io/) | Team Leader | 2021-10-11 |
|
||||
| [David Wertenteil](https://www.linkedin.com/in/david-wertenteil-0ba277b9) | [@dwertent](https://github.com/dwertent) | dwertent@armosec.io | [ARMO](https://www.armosec.io/) | Kubescape CLI Developer | 2021-09-01 |
|
||||
| [Bezalel Brandwine](https://www.linkedin.com/in/bezalel-brandwine) | [@Bezbran](https://github.com/Bezbran) | bbrandwine@armosec.io | [ARMO](https://www.armosec.io/) | Kubescape SaaS Developer | 2021-09-01 |
|
||||
|
||||
210
README.md
210
README.md
@@ -3,10 +3,12 @@
|
||||
[](https://github.com/armosec/kubescape/actions/workflows/build.yaml)
|
||||
[](https://goreportcard.com/report/github.com/armosec/kubescape)
|
||||
|
||||
Kubescape is the first open-source tool for testing if Kubernetes is deployed securely according to multiple frameworks:
|
||||
regulatory, customized company policies and DevSecOps best practices, such as the [NSA-CISA](https://www.armosec.io/blog/kubernetes-hardening-guidance-summary-by-armo) and the [MITRE ATT&CK®](https://www.microsoft.com/security/blog/2021/03/23/secure-containerized-environments-with-updated-threat-matrix-for-kubernetes/) .
|
||||
Kubescape scans K8s clusters, YAML files, and HELM charts, and detect misconfigurations and software vulnerabilities at early stages of the CI/CD pipeline and provides a risk score instantly and risk trends over time.
|
||||
Kubescape integrates natively with other DevOps tools, including Jenkins, CircleCI and Github workflows.
|
||||
|
||||
|
||||
Kubescape is a K8s open-source tool providing a multi-cloud K8s single pane of glass, including risk analysis, security compliance, RBAC visualizer and image vulnerabilities scanning.
|
||||
Kubescape scans K8s clusters, YAML files, and HELM charts, detecting misconfigurations according to multiple frameworks (such as the [NSA-CISA](https://www.armosec.io/blog/kubernetes-hardening-guidance-summary-by-armo) , [MITRE ATT&CK®](https://www.microsoft.com/security/blog/2021/03/23/secure-containerized-environments-with-updated-threat-matrix-for-kubernetes/)), software vulnerabilities, and RBAC (role-based-access-control) violations at early stages of the CI/CD pipeline, calculates risk score instantly and shows risk trends over time.
|
||||
It became one of the fastest-growing Kubernetes tools among developers due to its easy-to-use CLI interface, flexible output formats, and automated scanning capabilities, saving Kubernetes users and admins’ precious time, effort, and resources.
|
||||
Kubescape integrates natively with other DevOps tools, including Jenkins, CircleCI, Github workflows, Prometheus, and Slack, and supports multi-cloud K8s deployments like EKS, GKE, and AKS.
|
||||
|
||||
</br>
|
||||
|
||||
@@ -24,7 +26,7 @@ curl -s https://raw.githubusercontent.com/armosec/kubescape/master/install.sh |
|
||||
|
||||
## Run:
|
||||
```
|
||||
kubescape scan --submit
|
||||
kubescape scan --submit --enable-host-scan
|
||||
```
|
||||
|
||||
<img src="docs/summary.png">
|
||||
@@ -46,23 +48,30 @@ We invite you to our team! We are excited about this project and want to return
|
||||
|
||||
Want to contribute? Want to discuss something? Have an issue?
|
||||
|
||||
* Feel free to pick a task from the [roadmap](docs/roadmap.md) or suggest a feature of your own. [Contact us](MAINTAINERS.md) directly for more information :)
|
||||
* Open a issue, we are trying to respond within 48 hours
|
||||
* [Join us](https://armosec.github.io/kubescape/) in a discussion on our discord server!
|
||||
|
||||
[<img src="docs/discord-banner.png" width="100" alt="logo" align="center">](https://armosec.github.io/kubescape/)
|
||||

|
||||
|
||||
|
||||
# Options and examples
|
||||
|
||||
[Kubescape docs](https://hub.armo.cloud/docs)
|
||||
|
||||
## Playground
|
||||
* [Kubescape playground](https://www.katacoda.com/pathaksaiyam/scenarios/kubescape)
|
||||
|
||||
## Tutorials
|
||||
|
||||
* [Overview](https://youtu.be/wdBkt_0Qhbg)
|
||||
* [Scanning Kubernetes YAML files](https://youtu.be/Ox6DaR7_4ZI)
|
||||
* [How To Secure Kubernetes Clusters With Kubescape And Armo](https://youtu.be/ZATGiDIDBQk)
|
||||
* [Scan Kubernetes YAML files](https://youtu.be/Ox6DaR7_4ZI)
|
||||
* [Scan Kubescape on an air-gapped environment (offline support)](https://youtu.be/IGXL9s37smM)
|
||||
* [Managing exceptions in the Kubescape SaaS version](https://youtu.be/OzpvxGmCR80)
|
||||
* [Configure and run customized frameworks](https://youtu.be/12Sanq_rEhs)
|
||||
* Customize controls configurations. [Kubescape CLI](https://youtu.be/955psg6TVu4), [Kubescape SaaS](https://youtu.be/lIMVSVhH33o)
|
||||
|
||||
## Install on Windows
|
||||
|
||||
@@ -87,30 +96,18 @@ Set-ExecutionPolicy RemoteSigned -scope CurrentUser
|
||||
brew install kubescape
|
||||
```
|
||||
|
||||
## Flags
|
||||
|
||||
| flag | default | description | options |
|
||||
|-----------------------------|---------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|----------------------------------|
|
||||
| `-e`/`--exclude-namespaces` | Scan all namespaces | Namespaces to exclude from scanning. Recommended to exclude `kube-system` and `kube-public` namespaces | |
|
||||
| `--include-namespaces` | Scan all namespaces | Scan specific namespaces | |
|
||||
| `-s`/`--silent` | Display progress messages | Silent progress messages | |
|
||||
| `-t`/`--fail-threshold` | `100` (do not fail) | fail command (return exit code 1) if result is above threshold | `0` -> `100` |
|
||||
| `-f`/`--format` | `pretty-printer` | Output format | `pretty-printer`/`json`/`junit`/`prometheus` |
|
||||
| `-o`/`--output` | print to stdout | Save scan result in file | |
|
||||
| `--use-from` | | Load local framework object from specified path. If not used will download latest | |
|
||||
| `--use-default` | `false` | Load local framework object from default path. If not used will download latest | `true`/`false` |
|
||||
| `--exceptions` | | Path to an [exceptions obj](examples/exceptions.json). If not set will download exceptions from Armo management portal | |
|
||||
| `--submit` | `false` | If set, Kubescape will send the scan results to Armo management portal where you can see the results in a user-friendly UI, choose your preferred compliance framework, check risk results history and trends, manage exceptions, get remediation recommendations and much more. By default the results are not sent | `true`/`false` |
|
||||
| `--keep-local` | `false` | Kubescape will not send scan results to Armo management portal. Use this flag if you ran with the `--submit` flag in the past and you do not want to submit your current scan results | `true`/`false` |
|
||||
| `--account` | | Armo portal account ID. Default will load account ID from configMap or config file | |
|
||||
| `--kube-context` | current-context | Cluster context to scan | |
|
||||
| `--verbose` | `false` | Display all of the input resources and not only failed resources | `true`/`false` |
|
||||
|
||||
|
||||
## Usage & Examples
|
||||
|
||||
### Examples
|
||||
|
||||
|
||||
#### Scan a running Kubernetes cluster and submit results to the [Kubescape SaaS version](https://portal.armo.cloud/)
|
||||
```
|
||||
kubescape scan --submit --enable-host-scan
|
||||
```
|
||||
|
||||
> Read [here](https://hub.armo.cloud/docs/host-sensor) more about the `enable-host-scan` flag
|
||||
|
||||
#### Scan a running Kubernetes cluster with [`nsa`](https://www.nsa.gov/Press-Room/News-Highlights/Article/Article/2716980/nsa-cisa-release-kubernetes-hardening-guidance/) framework and submit results to the [Kubescape SaaS version](https://portal.armo.cloud/)
|
||||
```
|
||||
kubescape scan framework nsa --submit
|
||||
@@ -130,195 +127,140 @@ kubescape scan control "Privileged container"
|
||||
|
||||
#### Scan specific namespaces
|
||||
```
|
||||
kubescape scan framework nsa --include-namespaces development,staging,production
|
||||
kubescape scan --include-namespaces development,staging,production
|
||||
```
|
||||
|
||||
#### Scan cluster and exclude some namespaces
|
||||
```
|
||||
kubescape scan framework nsa --exclude-namespaces kube-system,kube-public
|
||||
kubescape scan --exclude-namespaces kube-system,kube-public
|
||||
```
|
||||
|
||||
#### Scan local `yaml`/`json` files before deploying. [Take a look at the demonstration](https://youtu.be/Ox6DaR7_4ZI)
|
||||
```
|
||||
kubescape scan framework nsa *.yaml
|
||||
kubescape scan *.yaml
|
||||
```
|
||||
|
||||
#### Scan kubernetes manifest files from a public github repository
|
||||
```
|
||||
kubescape scan framework nsa https://github.com/armosec/kubescape
|
||||
kubescape scan https://github.com/armosec/kubescape
|
||||
```
|
||||
|
||||
#### Display all scanned resources (including the resources who passed)
|
||||
```
|
||||
kubescape scan framework nsa --verbose
|
||||
kubescape scan --verbose
|
||||
```
|
||||
|
||||
#### Output in `json` format
|
||||
|
||||
> Add the `--format-version v2` flag
|
||||
|
||||
```
|
||||
kubescape scan framework nsa --format json --output results.json
|
||||
kubescape scan --format json --format-version v2 --output results.json
|
||||
```
|
||||
|
||||
#### Output in `junit xml` format
|
||||
```
|
||||
kubescape scan framework nsa --format junit --output results.xml
|
||||
kubescape scan --format junit --output results.xml
|
||||
```
|
||||
|
||||
#### Output in `pdf` format - Contributed by [@alegrey91](https://github.com/alegrey91)
|
||||
|
||||
```
|
||||
kubescape scan --format pdf --output results.pdf
|
||||
```
|
||||
|
||||
#### Output in `prometheus` metrics format - Contributed by [@Joibel](https://github.com/Joibel)
|
||||
|
||||
```
|
||||
kubescape scan framework nsa --format prometheus
|
||||
kubescape scan --format prometheus
|
||||
```
|
||||
|
||||
#### Scan with exceptions, objects with exceptions will be presented as `exclude` and not `fail`
|
||||
[Full documentation](examples/exceptions/README.md)
|
||||
```
|
||||
kubescape scan framework nsa --exceptions examples/exceptions/exclude-kube-namespaces.json
|
||||
kubescape scan --exceptions examples/exceptions/exclude-kube-namespaces.json
|
||||
```
|
||||
|
||||
#### Scan Helm charts - Render the helm chart using [`helm template`](https://helm.sh/docs/helm/helm_template/) and pass to stdout
|
||||
```
|
||||
helm template [NAME] [CHART] [flags] --dry-run | kubescape scan framework nsa -
|
||||
helm template [NAME] [CHART] [flags] --dry-run | kubescape scan -
|
||||
```
|
||||
|
||||
e.g.
|
||||
```
|
||||
helm template bitnami/mysql --generate-name --dry-run | kubescape scan framework nsa -
|
||||
helm template bitnami/mysql --generate-name --dry-run | kubescape scan -
|
||||
```
|
||||
|
||||
|
||||
### Offline Support
|
||||
### Offline/Air-gaped Environment Support
|
||||
|
||||
[Video tutorial](https://youtu.be/IGXL9s37smM)
|
||||
|
||||
It is possible to run Kubescape offline!
|
||||
#### Download all artifacts
|
||||
|
||||
First download the framework and then scan with `--use-from` flag
|
||||
|
||||
1. Download and save in file, if file name not specified, will store save to `~/.kubescape/<framework name>.json`
|
||||
1. Download and save in local directory, if path not specified, will save all in `~/.kubescape`
|
||||
```
|
||||
kubescape download framework nsa --output nsa.json
|
||||
kubescape download artifacts --output path/to/local/dir
|
||||
```
|
||||
2. Copy the downloaded artifacts to the air-gaped/offline environment
|
||||
|
||||
3. Scan using the downloaded artifacts
|
||||
```
|
||||
kubescape scan --use-artifacts-from path/to/local/dir
|
||||
```
|
||||
|
||||
2. Scan using the downloaded framework
|
||||
#### Download a single artifacts
|
||||
|
||||
You can also download a single artifacts and scan with the `--use-from` flag
|
||||
|
||||
1. Download and save in file, if file name not specified, will save in `~/.kubescape/<framework name>.json`
|
||||
```
|
||||
kubescape scan framework nsa --use-from nsa.json
|
||||
kubescape download framework nsa --output /path/nsa.json
|
||||
```
|
||||
2. Copy the downloaded artifacts to the air-gaped/offline environment
|
||||
|
||||
3. Scan using the downloaded framework
|
||||
```
|
||||
kubescape scan framework nsa --use-from /path/nsa.json
|
||||
```
|
||||
|
||||
|
||||
## Scan Periodically using Helm - Contributed by [@yonahd](https://github.com/yonahd)
|
||||
|
||||
You can scan your cluster periodically by adding a `CronJob` that will repeatedly trigger kubescape
|
||||
|
||||
```
|
||||
helm install kubescape examples/helm_chart/
|
||||
```
|
||||
[Please follow the instructions here](https://hub.armo.cloud/docs/installation-of-armo-in-cluster)
|
||||
[helm chart repo](https://github.com/armosec/armo-helm)
|
||||
|
||||
## Scan using docker image
|
||||
|
||||
Official Docker image `quay.io/armosec/kubescape`
|
||||
|
||||
```
|
||||
docker run -v "$(pwd)/example.yaml:/app/example.yaml quay.io/armosec/kubescape scan framework nsa /app/example.yaml
|
||||
docker run -v "$(pwd)/example.yaml:/app/example.yaml quay.io/armosec/kubescape scan /app/example.yaml
|
||||
```
|
||||
|
||||
If you wish, you can [build the docker image on your own](build/README.md)
|
||||
|
||||
# Submit data manually
|
||||
|
||||
Use the `submit` command if you wish to submit data manually
|
||||
|
||||
## Submit scan results manually
|
||||
|
||||
First, scan your cluster using the `json` format flag: `kubescape scan framework <name> --format json --output path/to/results.json`.
|
||||
> Support forward compatibility by using the `--format-version v2` flag
|
||||
|
||||
Now you can submit the results to the Kubaescape SaaS version -
|
||||
First, scan your cluster using the `json` format flag: `kubescape scan framework <name> --format json --format-version v2 --output path/to/results.json`.
|
||||
|
||||
Now you can submit the results to the Kubescape SaaS version -
|
||||
```
|
||||
kubescape submit results path/to/results.json
|
||||
```
|
||||
# How to build
|
||||
|
||||
## Build using python (3.7^) script
|
||||
|
||||
Kubescape can be built using:
|
||||
|
||||
``` sh
|
||||
python build.py
|
||||
```
|
||||
|
||||
Note: In order to built using the above script, one must set the environment
|
||||
variables in this script:
|
||||
|
||||
+ RELEASE
|
||||
+ ArmoBEServer
|
||||
+ ArmoERServer
|
||||
+ ArmoWebsite
|
||||
|
||||
|
||||
## Build using go
|
||||
|
||||
Note: development (and the release process) is done with Go `1.17`
|
||||
|
||||
1. Clone Project
|
||||
```
|
||||
git clone https://github.com/armosec/kubescape.git kubescape && cd "$_"
|
||||
```
|
||||
|
||||
2. Build
|
||||
```
|
||||
go build -o kubescape .
|
||||
```
|
||||
|
||||
3. Run
|
||||
```
|
||||
./kubescape scan framework nsa
|
||||
```
|
||||
|
||||
4. Enjoy :zany_face:
|
||||
|
||||
## Docker Build
|
||||
|
||||
### Build your own Docker image
|
||||
|
||||
1. Clone Project
|
||||
```
|
||||
git clone https://github.com/armosec/kubescape.git kubescape && cd "$_"
|
||||
```
|
||||
|
||||
2. Build
|
||||
```
|
||||
docker build -t kubescape -f build/Dockerfile .
|
||||
```
|
||||
|
||||
|
||||
# Under the hood
|
||||
|
||||
## Tests
|
||||
Kubescape is running the following tests according to what is defined by [Kubernetes Hardening Guidance by NSA and CISA](https://www.nsa.gov/Press-Room/News-Highlights/Article/Article/2716980/nsa-cisa-release-kubernetes-hardening-guidance/)
|
||||
* Non-root containers
|
||||
* Immutable container filesystem
|
||||
* Privileged containers
|
||||
* hostPID, hostIPC privileges
|
||||
* hostNetwork access
|
||||
* allowedHostPaths field
|
||||
* Protecting pod service account tokens
|
||||
* Resource policies
|
||||
* Control plane hardening
|
||||
* Exposed dashboard
|
||||
* Allow privilege escalation
|
||||
* Applications credentials in configuration files
|
||||
* Cluster-admin binding
|
||||
* Exec into container
|
||||
* Dangerous capabilities
|
||||
* Insecure capabilities
|
||||
* Linux hardening
|
||||
* Ingress and Egress blocked
|
||||
* Container hostPort
|
||||
* Network policies
|
||||
* Symlink Exchange Can Allow Host Filesystem Access (CVE-2021-25741)
|
||||
|
||||
|
||||
|
||||
## Technology
|
||||
Kubescape based on OPA engine: https://github.com/open-policy-agent/opa and ARMO's posture controls.
|
||||
|
||||
The tools retrieves Kubernetes objects from the API server and runs a set of [regos snippets](https://www.openpolicyagent.org/docs/latest/policy-language/) developed by [ARMO](https://www.armosec.io/).
|
||||
The tools retrieves Kubernetes objects from the API server and runs a set of [rego's snippets](https://www.openpolicyagent.org/docs/latest/policy-language/) developed by [ARMO](https://www.armosec.io/).
|
||||
|
||||
The results by default printed in a pretty "console friendly" manner, but they can be retrieved in JSON format for further processing.
|
||||
|
||||
|
||||
41
build.py
41
build.py
@@ -8,6 +8,7 @@ BASE_GETTER_CONST = "github.com/armosec/kubescape/cautils/getter"
|
||||
BE_SERVER_CONST = BASE_GETTER_CONST + ".ArmoBEURL"
|
||||
ER_SERVER_CONST = BASE_GETTER_CONST + ".ArmoERURL"
|
||||
WEBSITE_CONST = BASE_GETTER_CONST + ".ArmoFEURL"
|
||||
AUTH_SERVER_CONST = BASE_GETTER_CONST + ".armoAUTHURL"
|
||||
|
||||
def checkStatus(status, msg):
|
||||
if status != 0:
|
||||
@@ -37,7 +38,7 @@ def main():
|
||||
print("Building Kubescape")
|
||||
|
||||
# print environment variables
|
||||
print(os.environ)
|
||||
# print(os.environ)
|
||||
|
||||
# Set some variables
|
||||
packageName = getPackageName()
|
||||
@@ -46,25 +47,45 @@ def main():
|
||||
ArmoBEServer = os.getenv("ArmoBEServer")
|
||||
ArmoERServer = os.getenv("ArmoERServer")
|
||||
ArmoWebsite = os.getenv("ArmoWebsite")
|
||||
ArmoAuthServer = os.getenv("ArmoAuthServer")
|
||||
|
||||
# Create build directory
|
||||
buildDir = getBuildDir()
|
||||
|
||||
ks_file = os.path.join(buildDir, packageName)
|
||||
hash_file = ks_file + ".sha256"
|
||||
|
||||
if not os.path.isdir(buildDir):
|
||||
os.makedirs(buildDir)
|
||||
|
||||
# Build kubescape
|
||||
ldflags = "-w -s -X %s=%s -X %s=%s -X %s=%s -X %s=%s" \
|
||||
% (buildUrl, releaseVersion, BE_SERVER_CONST, ArmoBEServer,
|
||||
ER_SERVER_CONST, ArmoERServer, WEBSITE_CONST, ArmoWebsite)
|
||||
status = subprocess.call(["go", "build", "-o", "%s/%s" % (buildDir, packageName), "-ldflags" ,ldflags])
|
||||
ldflags = "-w -s"
|
||||
if releaseVersion:
|
||||
ldflags += " -X {}={}".format(buildUrl, releaseVersion)
|
||||
if ArmoBEServer:
|
||||
ldflags += " -X {}={}".format(BE_SERVER_CONST, ArmoBEServer)
|
||||
if ArmoERServer:
|
||||
ldflags += " -X {}={}".format(ER_SERVER_CONST, ArmoERServer)
|
||||
if ArmoWebsite:
|
||||
ldflags += " -X {}={}".format(WEBSITE_CONST, ArmoWebsite)
|
||||
if ArmoAuthServer:
|
||||
ldflags += " -X {}={}".format(AUTH_SERVER_CONST, ArmoAuthServer)
|
||||
|
||||
build_command = ["go", "build", "-o", ks_file, "-ldflags" ,ldflags]
|
||||
|
||||
print("Building kubescape and saving here: {}".format(ks_file))
|
||||
print("Build command: {}".format(" ".join(build_command)))
|
||||
|
||||
status = subprocess.call(build_command)
|
||||
checkStatus(status, "Failed to build kubescape")
|
||||
|
||||
sha1 = hashlib.sha1()
|
||||
with open(buildDir + "/" + packageName, "rb") as kube:
|
||||
sha1.update(kube.read())
|
||||
with open(buildDir + "/" + packageName + ".sha1", "w") as kube_sha:
|
||||
kube_sha.write(sha1.hexdigest())
|
||||
sha256 = hashlib.sha256()
|
||||
with open(ks_file, "rb") as kube:
|
||||
sha256.update(kube.read())
|
||||
with open(hash_file, "w") as kube_sha:
|
||||
hash = sha256.hexdigest()
|
||||
print("kubescape hash: {}, file: {}".format(hash, hash_file))
|
||||
kube_sha.write(sha256.hexdigest())
|
||||
|
||||
print("Build Done")
|
||||
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
FROM golang:1.17-alpine as builder
|
||||
#ENV GOPROXY=https://goproxy.io,direct
|
||||
|
||||
ARG run_number
|
||||
ARG image_version
|
||||
|
||||
ENV RELEASE=v1.0.${run_number}
|
||||
ENV RELEASE=$image_version
|
||||
|
||||
ENV GO111MODULE=
|
||||
|
||||
@@ -21,7 +21,6 @@ ADD . .
|
||||
RUN python build.py
|
||||
|
||||
RUN ls -ltr build/ubuntu-latest
|
||||
RUN cat /work/build/ubuntu-latest/kubescape.sha1
|
||||
|
||||
FROM alpine
|
||||
COPY --from=builder /work/build/ubuntu-latest/kubescape /usr/bin/kubescape
|
||||
|
||||
13
build/README.md
Normal file
13
build/README.md
Normal file
@@ -0,0 +1,13 @@
|
||||
## Docker Build
|
||||
|
||||
### Build your own Docker image
|
||||
|
||||
1. Clone Project
|
||||
```
|
||||
git clone https://github.com/armosec/kubescape.git kubescape && cd "$_"
|
||||
```
|
||||
|
||||
2. Build
|
||||
```
|
||||
docker build -t kubescape -f build/Dockerfile .
|
||||
```
|
||||
@@ -23,25 +23,31 @@ func ConfigFileFullPath() string { return getter.GetDefaultPath(configFileName +
|
||||
// ======================================================================================
|
||||
|
||||
type ConfigObj struct {
|
||||
CustomerGUID string `json:"customerGUID"`
|
||||
Token string `json:"invitationParam"`
|
||||
CustomerAdminEMail string `json:"adminMail"`
|
||||
ClusterName string `json:"clusterName"`
|
||||
}
|
||||
|
||||
func (co *ConfigObj) Json() []byte {
|
||||
if b, err := json.Marshal(co); err == nil {
|
||||
return b
|
||||
}
|
||||
return []byte{}
|
||||
AccountID string `json:"accountID,omitempty"`
|
||||
ClientID string `json:"clientID,omitempty"`
|
||||
SecretKey string `json:"secretKey,omitempty"`
|
||||
CustomerGUID string `json:"customerGUID,omitempty"` // Deprecated
|
||||
Token string `json:"invitationParam,omitempty"`
|
||||
CustomerAdminEMail string `json:"adminMail,omitempty"`
|
||||
ClusterName string `json:"clusterName,omitempty"`
|
||||
}
|
||||
|
||||
// Config - convert ConfigObj to config file
|
||||
func (co *ConfigObj) Config() []byte {
|
||||
|
||||
// remove cluster name before saving to file
|
||||
clusterName := co.ClusterName
|
||||
co.ClusterName = "" // remove cluster name before saving to file
|
||||
b, err := json.Marshal(co)
|
||||
customerAdminEMail := co.CustomerAdminEMail
|
||||
token := co.Token
|
||||
co.ClusterName = ""
|
||||
co.Token = ""
|
||||
co.CustomerAdminEMail = ""
|
||||
|
||||
b, err := json.MarshalIndent(co, "", " ")
|
||||
|
||||
co.ClusterName = clusterName
|
||||
co.CustomerAdminEMail = customerAdminEMail
|
||||
co.Token = token
|
||||
|
||||
if err == nil {
|
||||
return b
|
||||
@@ -56,10 +62,12 @@ func (co *ConfigObj) Config() []byte {
|
||||
type ITenantConfig interface {
|
||||
// set
|
||||
SetTenant() error
|
||||
UpdateCachedConfig() error
|
||||
DeleteCachedConfig() error
|
||||
|
||||
// getters
|
||||
GetClusterName() string
|
||||
GetCustomerGUID() string
|
||||
GetAccountID() string
|
||||
GetConfigObj() *ConfigObj
|
||||
// GetBackendAPI() getter.IBackend
|
||||
// GenerateURL()
|
||||
@@ -93,46 +101,52 @@ func NewLocalConfig(backendAPI getter.IBackend, customerGUID, clusterName string
|
||||
lc.configObj = configObj
|
||||
}
|
||||
if customerGUID != "" {
|
||||
lc.configObj.CustomerGUID = customerGUID // override config customerGUID
|
||||
lc.configObj.AccountID = customerGUID // override config customerGUID
|
||||
}
|
||||
if clusterName != "" {
|
||||
lc.configObj.ClusterName = AdoptClusterName(clusterName) // override config clusterName
|
||||
}
|
||||
if lc.configObj.CustomerGUID != "" {
|
||||
if err := lc.SetTenant(); err != nil {
|
||||
fmt.Println(err)
|
||||
}
|
||||
}
|
||||
getAccountFromEnv(lc.configObj)
|
||||
|
||||
lc.backendAPI.SetAccountID(lc.configObj.AccountID)
|
||||
lc.backendAPI.SetClientID(lc.configObj.ClientID)
|
||||
lc.backendAPI.SetSecretKey(lc.configObj.SecretKey)
|
||||
|
||||
return lc
|
||||
}
|
||||
|
||||
func (lc *LocalConfig) GetConfigObj() *ConfigObj { return lc.configObj }
|
||||
func (lc *LocalConfig) GetCustomerGUID() string { return lc.configObj.CustomerGUID }
|
||||
func (lc *LocalConfig) SetCustomerGUID(customerGUID string) { lc.configObj.CustomerGUID = customerGUID }
|
||||
func (lc *LocalConfig) GetClusterName() string { return lc.configObj.ClusterName }
|
||||
func (lc *LocalConfig) IsConfigFound() bool { return existsConfigFile() }
|
||||
func (lc *LocalConfig) GetConfigObj() *ConfigObj { return lc.configObj }
|
||||
func (lc *LocalConfig) GetAccountID() string { return lc.configObj.AccountID }
|
||||
func (lc *LocalConfig) GetClusterName() string { return lc.configObj.ClusterName }
|
||||
func (lc *LocalConfig) IsConfigFound() bool { return existsConfigFile() }
|
||||
func (lc *LocalConfig) SetTenant() error {
|
||||
|
||||
// ARMO tenant GUID
|
||||
if err := getTenantConfigFromBE(lc.backendAPI, lc.configObj); err != nil {
|
||||
return err
|
||||
}
|
||||
updateConfigFile(lc.configObj)
|
||||
lc.UpdateCachedConfig()
|
||||
return nil
|
||||
|
||||
}
|
||||
func (lc *LocalConfig) UpdateCachedConfig() error {
|
||||
return updateConfigFile(lc.configObj)
|
||||
}
|
||||
|
||||
func (lc *LocalConfig) DeleteCachedConfig() error {
|
||||
return DeleteConfigFile()
|
||||
}
|
||||
|
||||
func getTenantConfigFromBE(backendAPI getter.IBackend, configObj *ConfigObj) error {
|
||||
|
||||
// get from armoBE
|
||||
backendAPI.SetCustomerGUID(configObj.CustomerGUID)
|
||||
tenantResponse, err := backendAPI.GetCustomerGUID()
|
||||
tenantResponse, err := backendAPI.GetTenant()
|
||||
if err == nil && tenantResponse != nil {
|
||||
if tenantResponse.AdminMail != "" { // registered tenant
|
||||
configObj.CustomerAdminEMail = tenantResponse.AdminMail
|
||||
} else { // new tenant
|
||||
configObj.Token = tenantResponse.Token
|
||||
configObj.CustomerGUID = tenantResponse.TenantID
|
||||
configObj.AccountID = tenantResponse.TenantID
|
||||
}
|
||||
} else {
|
||||
if err != nil && !strings.Contains(err.Error(), "already exists") {
|
||||
@@ -154,8 +168,11 @@ Supported environments variables:
|
||||
KS_DEFAULT_CONFIGMAP_NAME // name of configmap, if not set default is 'kubescape'
|
||||
KS_DEFAULT_CONFIGMAP_NAMESPACE // configmap namespace, if not set default is 'default'
|
||||
|
||||
KS_ACCOUNT_ID
|
||||
KS_CLIENT_ID
|
||||
KS_SECRET_KEY
|
||||
|
||||
TODO - supprot:
|
||||
KS_ACCOUNT // Account ID
|
||||
KS_CACHE // path to cached files
|
||||
*/
|
||||
type ClusterConfig struct {
|
||||
@@ -187,32 +204,30 @@ func NewClusterConfig(k8s *k8sinterface.KubernetesApi, backendAPI getter.IBacken
|
||||
c.configObj = configObj
|
||||
}
|
||||
if customerGUID != "" {
|
||||
c.configObj.CustomerGUID = customerGUID // override config customerGUID
|
||||
c.configObj.AccountID = customerGUID // override config customerGUID
|
||||
}
|
||||
if clusterName != "" {
|
||||
c.configObj.ClusterName = AdoptClusterName(clusterName) // override config clusterName
|
||||
}
|
||||
if c.configObj.CustomerGUID != "" {
|
||||
if err := c.SetTenant(); err != nil {
|
||||
fmt.Println(err)
|
||||
}
|
||||
}
|
||||
getAccountFromEnv(c.configObj)
|
||||
|
||||
if c.configObj.ClusterName == "" {
|
||||
c.configObj.ClusterName = AdoptClusterName(k8sinterface.GetClusterName())
|
||||
} else { // override the cluster name if it has unwanted characters
|
||||
c.configObj.ClusterName = AdoptClusterName(c.configObj.ClusterName)
|
||||
}
|
||||
|
||||
c.backendAPI.SetAccountID(c.configObj.AccountID)
|
||||
c.backendAPI.SetClientID(c.configObj.ClientID)
|
||||
c.backendAPI.SetSecretKey(c.configObj.SecretKey)
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
func (c *ClusterConfig) GetConfigObj() *ConfigObj { return c.configObj }
|
||||
func (c *ClusterConfig) GetDefaultNS() string { return c.configMapNamespace }
|
||||
func (c *ClusterConfig) GetCustomerGUID() string { return c.configObj.CustomerGUID }
|
||||
func (c *ClusterConfig) SetCustomerGUID(customerGUID string) { c.configObj.CustomerGUID = customerGUID }
|
||||
func (c *ClusterConfig) IsConfigFound() bool {
|
||||
return existsConfigFile() || c.existsConfigMap()
|
||||
}
|
||||
func (c *ClusterConfig) GetConfigObj() *ConfigObj { return c.configObj }
|
||||
func (c *ClusterConfig) GetDefaultNS() string { return c.configMapNamespace }
|
||||
func (c *ClusterConfig) GetAccountID() string { return c.configObj.AccountID }
|
||||
func (c *ClusterConfig) IsConfigFound() bool { return existsConfigFile() || c.existsConfigMap() }
|
||||
|
||||
func (c *ClusterConfig) SetTenant() error {
|
||||
|
||||
@@ -220,17 +235,34 @@ func (c *ClusterConfig) SetTenant() error {
|
||||
if err := getTenantConfigFromBE(c.backendAPI, c.configObj); err != nil {
|
||||
return err
|
||||
}
|
||||
// update/create config
|
||||
if c.existsConfigMap() {
|
||||
c.updateConfigMap()
|
||||
} else {
|
||||
c.createConfigMap()
|
||||
}
|
||||
updateConfigFile(c.configObj)
|
||||
c.UpdateCachedConfig()
|
||||
return nil
|
||||
|
||||
}
|
||||
|
||||
func (c *ClusterConfig) UpdateCachedConfig() error {
|
||||
// update/create config
|
||||
if c.existsConfigMap() {
|
||||
if err := c.updateConfigMap(); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if err := c.createConfigMap(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return updateConfigFile(c.configObj)
|
||||
}
|
||||
|
||||
func (c *ClusterConfig) DeleteCachedConfig() error {
|
||||
if err := c.deleteConfigMap(); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := DeleteConfigFile(); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func (c *ClusterConfig) GetClusterName() string {
|
||||
return c.configObj.ClusterName
|
||||
}
|
||||
@@ -409,6 +441,10 @@ func readConfig(dat []byte) (*ConfigObj, error) {
|
||||
if err := json.Unmarshal(dat, configObj); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if configObj.AccountID == "" {
|
||||
configObj.AccountID = configObj.CustomerGUID
|
||||
}
|
||||
configObj.CustomerGUID = ""
|
||||
return configObj, nil
|
||||
}
|
||||
|
||||
@@ -421,8 +457,7 @@ func (clusterConfig *ClusterConfig) IsSubmitted() bool {
|
||||
func (clusterConfig *ClusterConfig) IsRegistered() bool {
|
||||
|
||||
// get from armoBE
|
||||
clusterConfig.backendAPI.SetCustomerGUID(clusterConfig.GetCustomerGUID())
|
||||
tenantResponse, err := clusterConfig.backendAPI.GetCustomerGUID()
|
||||
tenantResponse, err := clusterConfig.backendAPI.GetTenant()
|
||||
if err == nil && tenantResponse != nil {
|
||||
if tenantResponse.AdminMail != "" { // this customer already belongs to some user
|
||||
return true
|
||||
@@ -431,16 +466,7 @@ func (clusterConfig *ClusterConfig) IsRegistered() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (clusterConfig *ClusterConfig) DeleteConfig() error {
|
||||
if err := clusterConfig.DeleteConfigMap(); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := DeleteConfigFile(); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func (clusterConfig *ClusterConfig) DeleteConfigMap() error {
|
||||
func (clusterConfig *ClusterConfig) deleteConfigMap() error {
|
||||
return clusterConfig.k8s.KubernetesClient.CoreV1().ConfigMaps(clusterConfig.configMapNamespace).Delete(context.Background(), clusterConfig.configMapName, metav1.DeleteOptions{})
|
||||
}
|
||||
|
||||
@@ -465,3 +491,16 @@ func getConfigMapNamespace() string {
|
||||
}
|
||||
return "default"
|
||||
}
|
||||
|
||||
func getAccountFromEnv(configObj *ConfigObj) {
|
||||
// load from env
|
||||
if accountID := os.Getenv("KS_ACCOUNT_ID"); accountID != "" {
|
||||
configObj.AccountID = accountID
|
||||
}
|
||||
if clientID := os.Getenv("KS_CLIENT_ID"); clientID != "" {
|
||||
configObj.ClientID = clientID
|
||||
}
|
||||
if secretKey := os.Getenv("KS_SECRET_KEY"); secretKey != "" {
|
||||
configObj.SecretKey = secretKey
|
||||
}
|
||||
}
|
||||
|
||||
@@ -13,11 +13,11 @@ type K8SResources map[string][]string
|
||||
|
||||
type OPASessionObj struct {
|
||||
K8SResources *K8SResources // input k8s objects
|
||||
Frameworks []reporthandling.Framework // list of frameworks to scan
|
||||
Policies []reporthandling.Framework // list of frameworks to scan
|
||||
AllResources map[string]workloadinterface.IMetadata // all scanned resources, map[<rtesource ID>]<resource>
|
||||
ResourcesResult map[string]resourcesresults.Result // resources scan results, map[<rtesource ID>]<resource result>
|
||||
PostureReport *reporthandling.PostureReport // scan results v1
|
||||
Report *reporthandlingv2.PostureReport // scan results v2
|
||||
PostureReport *reporthandling.PostureReport // scan results v1 - Remove
|
||||
Report *reporthandlingv2.PostureReport // scan results v2 - Remove
|
||||
Exceptions []armotypes.PostureExceptionPolicy // list of exceptions to apply on scan results
|
||||
RegoInputData RegoInputData // input passed to rgo for scanning. map[<control name>][<input arguments>]
|
||||
}
|
||||
@@ -25,7 +25,7 @@ type OPASessionObj struct {
|
||||
func NewOPASessionObj(frameworks []reporthandling.Framework, k8sResources *K8SResources) *OPASessionObj {
|
||||
return &OPASessionObj{
|
||||
Report: &reporthandlingv2.PostureReport{},
|
||||
Frameworks: frameworks,
|
||||
Policies: frameworks,
|
||||
K8SResources: k8sResources,
|
||||
AllResources: make(map[string]workloadinterface.IMetadata),
|
||||
ResourcesResult: make(map[string]resourcesresults.Result),
|
||||
@@ -38,7 +38,7 @@ func NewOPASessionObj(frameworks []reporthandling.Framework, k8sResources *K8SRe
|
||||
|
||||
func NewOPASessionObjMock() *OPASessionObj {
|
||||
return &OPASessionObj{
|
||||
Frameworks: nil,
|
||||
Policies: nil,
|
||||
K8SResources: nil,
|
||||
AllResources: make(map[string]workloadinterface.IMetadata),
|
||||
ResourcesResult: make(map[string]resourcesresults.Result),
|
||||
|
||||
@@ -2,6 +2,7 @@ package cautils
|
||||
|
||||
import (
|
||||
pkgcautils "github.com/armosec/utils-go/utils"
|
||||
"golang.org/x/mod/semver"
|
||||
|
||||
"github.com/armosec/opa-utils/reporthandling"
|
||||
)
|
||||
@@ -15,7 +16,7 @@ func NewPolicies() *Policies {
|
||||
|
||||
func (policies *Policies) Set(frameworks []reporthandling.Framework, version string) {
|
||||
for i := range frameworks {
|
||||
if frameworks[i].Name != "" {
|
||||
if frameworks[i].Name != "" && len(frameworks[i].Controls) > 0 {
|
||||
policies.Frameworks = append(policies.Frameworks, frameworks[i].Name)
|
||||
}
|
||||
for j := range frameworks[i].Controls {
|
||||
@@ -30,6 +31,7 @@ func (policies *Policies) Set(frameworks []reporthandling.Framework, version str
|
||||
policies.Controls[frameworks[i].Controls[j].ControlID] = frameworks[i].Controls[j]
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
@@ -49,14 +51,15 @@ func ruleWithArmoOpaDependency(attributes map[string]interface{}) bool {
|
||||
func isRuleKubescapeVersionCompatible(attributes map[string]interface{}, version string) bool {
|
||||
if from, ok := attributes["useFromKubescapeVersion"]; ok && from != nil {
|
||||
if version != "" {
|
||||
if from.(string) > BuildNumber {
|
||||
|
||||
if semver.Compare(from.(string), BuildNumber) > 0 {
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
if until, ok := attributes["useUntilKubescapeVersion"]; ok && until != nil {
|
||||
if version != "" {
|
||||
if until.(string) <= BuildNumber {
|
||||
if semver.Compare(BuildNumber, until.(string)) >= 0 {
|
||||
return false
|
||||
}
|
||||
} else {
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package cautils
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
@@ -10,20 +9,10 @@ import (
|
||||
"github.com/mattn/go-isatty"
|
||||
)
|
||||
|
||||
var silent = false
|
||||
|
||||
func SetSilentMode(s bool) {
|
||||
silent = s
|
||||
}
|
||||
|
||||
func IsSilent() bool {
|
||||
return silent
|
||||
}
|
||||
|
||||
var FailureDisplay = color.New(color.Bold, color.FgHiRed).FprintfFunc()
|
||||
var WarningDisplay = color.New(color.Bold, color.FgCyan).FprintfFunc()
|
||||
var WarningDisplay = color.New(color.Bold, color.FgHiYellow).FprintfFunc()
|
||||
var FailureTextDisplay = color.New(color.Faint, color.FgHiRed).FprintfFunc()
|
||||
var InfoDisplay = color.New(color.Bold, color.FgHiYellow).FprintfFunc()
|
||||
var InfoDisplay = color.New(color.Bold, color.FgCyan).FprintfFunc()
|
||||
var InfoTextDisplay = color.New(color.Bold, color.FgHiYellow).FprintfFunc()
|
||||
var SimpleDisplay = color.New().FprintfFunc()
|
||||
var SuccessDisplay = color.New(color.Bold, color.FgHiGreen).FprintfFunc()
|
||||
@@ -31,41 +20,8 @@ var DescriptionDisplay = color.New(color.Faint, color.FgWhite).FprintfFunc()
|
||||
|
||||
var Spinner *spinner.Spinner
|
||||
|
||||
func ScanStartDisplay() {
|
||||
if IsSilent() {
|
||||
return
|
||||
}
|
||||
InfoDisplay(os.Stderr, "ARMO security scanner starting\n")
|
||||
}
|
||||
|
||||
func SuccessTextDisplay(str string) {
|
||||
if IsSilent() {
|
||||
return
|
||||
}
|
||||
SuccessDisplay(os.Stderr, "[success] ")
|
||||
SimpleDisplay(os.Stderr, fmt.Sprintf("%s\n", str))
|
||||
|
||||
}
|
||||
|
||||
func ErrorDisplay(str string) {
|
||||
if IsSilent() {
|
||||
return
|
||||
}
|
||||
FailureDisplay(os.Stderr, "[Error] ")
|
||||
SimpleDisplay(os.Stderr, fmt.Sprintf("%s\n", str))
|
||||
|
||||
}
|
||||
|
||||
func ProgressTextDisplay(str string) {
|
||||
if IsSilent() {
|
||||
return
|
||||
}
|
||||
InfoDisplay(os.Stderr, "[progress] ")
|
||||
SimpleDisplay(os.Stderr, fmt.Sprintf("%s\n", str))
|
||||
|
||||
}
|
||||
func StartSpinner() {
|
||||
if !IsSilent() && isatty.IsTerminal(os.Stdout.Fd()) {
|
||||
if isatty.IsTerminal(os.Stdout.Fd()) {
|
||||
Spinner = spinner.New(spinner.CharSets[7], 100*time.Millisecond) // Build our new spinner
|
||||
Spinner.Start()
|
||||
}
|
||||
|
||||
239
cautils/fileutils.go
Normal file
239
cautils/fileutils.go
Normal file
@@ -0,0 +1,239 @@
|
||||
package cautils
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/armosec/k8s-interface/workloadinterface"
|
||||
"github.com/armosec/kubescape/cautils/logger"
|
||||
"github.com/armosec/opa-utils/objectsenvelopes"
|
||||
"gopkg.in/yaml.v2"
|
||||
)
|
||||
|
||||
var (
|
||||
YAML_PREFIX = []string{".yaml", ".yml"}
|
||||
JSON_PREFIX = []string{".json"}
|
||||
)
|
||||
|
||||
type FileFormat string
|
||||
|
||||
const (
|
||||
YAML_FILE_FORMAT FileFormat = "yaml"
|
||||
JSON_FILE_FORMAT FileFormat = "json"
|
||||
)
|
||||
|
||||
func LoadResourcesFromFiles(inputPatterns []string) ([]workloadinterface.IMetadata, error) {
|
||||
files, errs := listFiles(inputPatterns)
|
||||
if len(errs) > 0 {
|
||||
logger.L().Error(fmt.Sprintf("%v", errs))
|
||||
}
|
||||
if len(files) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
workloads, errs := loadFiles(files)
|
||||
if len(errs) > 0 {
|
||||
logger.L().Error(fmt.Sprintf("%v", errs))
|
||||
}
|
||||
return workloads, nil
|
||||
}
|
||||
|
||||
func loadFiles(filePaths []string) ([]workloadinterface.IMetadata, []error) {
|
||||
workloads := []workloadinterface.IMetadata{}
|
||||
errs := []error{}
|
||||
for i := range filePaths {
|
||||
f, err := loadFile(filePaths[i])
|
||||
if err != nil {
|
||||
errs = append(errs, err)
|
||||
continue
|
||||
}
|
||||
w, e := ReadFile(f, GetFileFormat(filePaths[i]))
|
||||
errs = append(errs, e...)
|
||||
if w != nil {
|
||||
workloads = append(workloads, w...)
|
||||
}
|
||||
}
|
||||
return workloads, errs
|
||||
}
|
||||
|
||||
func loadFile(filePath string) ([]byte, error) {
|
||||
return os.ReadFile(filePath)
|
||||
}
|
||||
func ReadFile(fileContent []byte, fileFromat FileFormat) ([]workloadinterface.IMetadata, []error) {
|
||||
|
||||
switch fileFromat {
|
||||
case YAML_FILE_FORMAT:
|
||||
return readYamlFile(fileContent)
|
||||
case JSON_FILE_FORMAT:
|
||||
return readJsonFile(fileContent)
|
||||
default:
|
||||
return nil, nil // []error{fmt.Errorf("file extension %s not supported", fileFromat)}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func listFiles(patterns []string) ([]string, []error) {
|
||||
files := []string{}
|
||||
errs := []error{}
|
||||
for i := range patterns {
|
||||
if strings.HasPrefix(patterns[i], "http") {
|
||||
continue
|
||||
}
|
||||
if !filepath.IsAbs(patterns[i]) {
|
||||
o, _ := os.Getwd()
|
||||
patterns[i] = filepath.Join(o, patterns[i])
|
||||
}
|
||||
if IsFile(patterns[i]) {
|
||||
files = append(files, patterns[i])
|
||||
} else {
|
||||
f, err := glob(filepath.Split(patterns[i])) //filepath.Glob(patterns[i])
|
||||
if err != nil {
|
||||
errs = append(errs, err)
|
||||
} else {
|
||||
files = append(files, f...)
|
||||
}
|
||||
}
|
||||
}
|
||||
return files, errs
|
||||
}
|
||||
|
||||
func readYamlFile(yamlFile []byte) ([]workloadinterface.IMetadata, []error) {
|
||||
errs := []error{}
|
||||
|
||||
r := bytes.NewReader(yamlFile)
|
||||
dec := yaml.NewDecoder(r)
|
||||
yamlObjs := []workloadinterface.IMetadata{}
|
||||
|
||||
var t interface{}
|
||||
for dec.Decode(&t) == nil {
|
||||
j := convertYamlToJson(t)
|
||||
if j == nil {
|
||||
continue
|
||||
}
|
||||
if obj, ok := j.(map[string]interface{}); ok {
|
||||
if o := objectsenvelopes.NewObject(obj); o != nil {
|
||||
if o.GetKind() == "List" {
|
||||
yamlObjs = append(yamlObjs, handleListObject(o)...)
|
||||
} else {
|
||||
yamlObjs = append(yamlObjs, o)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
errs = append(errs, fmt.Errorf("failed to convert yaml file to map[string]interface, file content: %v", j))
|
||||
}
|
||||
}
|
||||
|
||||
return yamlObjs, errs
|
||||
}
|
||||
|
||||
func readJsonFile(jsonFile []byte) ([]workloadinterface.IMetadata, []error) {
|
||||
workloads := []workloadinterface.IMetadata{}
|
||||
var jsonObj interface{}
|
||||
if err := json.Unmarshal(jsonFile, &jsonObj); err != nil {
|
||||
return workloads, []error{err}
|
||||
}
|
||||
|
||||
convertJsonToWorkload(jsonObj, &workloads)
|
||||
|
||||
return workloads, nil
|
||||
}
|
||||
func convertJsonToWorkload(jsonObj interface{}, workloads *[]workloadinterface.IMetadata) {
|
||||
|
||||
switch x := jsonObj.(type) {
|
||||
case map[string]interface{}:
|
||||
if o := objectsenvelopes.NewObject(x); o != nil {
|
||||
(*workloads) = append(*workloads, o)
|
||||
}
|
||||
case []interface{}:
|
||||
for i := range x {
|
||||
convertJsonToWorkload(x[i], workloads)
|
||||
}
|
||||
}
|
||||
}
|
||||
func convertYamlToJson(i interface{}) interface{} {
|
||||
switch x := i.(type) {
|
||||
case map[interface{}]interface{}:
|
||||
m2 := map[string]interface{}{}
|
||||
for k, v := range x {
|
||||
if s, ok := k.(string); ok {
|
||||
m2[s] = convertYamlToJson(v)
|
||||
}
|
||||
}
|
||||
return m2
|
||||
case []interface{}:
|
||||
for i, v := range x {
|
||||
x[i] = convertYamlToJson(v)
|
||||
}
|
||||
}
|
||||
return i
|
||||
}
|
||||
|
||||
func IsYaml(filePath string) bool {
|
||||
return StringInSlice(YAML_PREFIX, filepath.Ext(filePath)) != ValueNotFound
|
||||
}
|
||||
|
||||
func IsJson(filePath string) bool {
|
||||
return StringInSlice(JSON_PREFIX, filepath.Ext(filePath)) != ValueNotFound
|
||||
}
|
||||
|
||||
func glob(root, pattern string) ([]string, error) {
|
||||
var matches []string
|
||||
|
||||
err := filepath.Walk(root, func(path string, info os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if info.IsDir() {
|
||||
return nil
|
||||
}
|
||||
if matched, err := filepath.Match(pattern, filepath.Base(path)); err != nil {
|
||||
return err
|
||||
} else if matched {
|
||||
matches = append(matches, path)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return matches, nil
|
||||
}
|
||||
func IsFile(name string) bool {
|
||||
if fi, err := os.Stat(name); err == nil {
|
||||
if fi.Mode().IsRegular() {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func GetFileFormat(filePath string) FileFormat {
|
||||
if IsYaml(filePath) {
|
||||
return YAML_FILE_FORMAT
|
||||
} else if IsJson(filePath) {
|
||||
return JSON_FILE_FORMAT
|
||||
} else {
|
||||
return FileFormat(filePath)
|
||||
}
|
||||
}
|
||||
|
||||
// handleListObject handle a List manifest
|
||||
func handleListObject(obj workloadinterface.IMetadata) []workloadinterface.IMetadata {
|
||||
yamlObjs := []workloadinterface.IMetadata{}
|
||||
if i, ok := workloadinterface.InspectMap(obj.GetObject(), "items"); ok && i != nil {
|
||||
if items, ok := i.([]interface{}); ok && items != nil {
|
||||
for item := range items {
|
||||
if m, ok := items[item].(map[string]interface{}); ok && m != nil {
|
||||
if o := objectsenvelopes.NewObject(m); o != nil {
|
||||
yamlObjs = append(yamlObjs, o)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return yamlObjs
|
||||
}
|
||||
@@ -1,11 +1,12 @@
|
||||
package resourcehandler
|
||||
package cautils
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func onlineBoutiquePath() string {
|
||||
@@ -14,32 +15,26 @@ func onlineBoutiquePath() string {
|
||||
}
|
||||
|
||||
func TestListFiles(t *testing.T) {
|
||||
workDir, err := os.Getwd()
|
||||
fmt.Printf("\n------------------\n%s,%v\n--------------\n", workDir, err)
|
||||
|
||||
filesPath := onlineBoutiquePath()
|
||||
fmt.Printf("\n------------------\n%s\n--------------\n", filesPath)
|
||||
|
||||
files, errs := listFiles([]string{filesPath})
|
||||
if len(errs) > 0 {
|
||||
t.Error(errs)
|
||||
}
|
||||
expected := 12
|
||||
if len(files) != expected {
|
||||
t.Errorf("wrong number of files, expected: %d, found: %d", expected, len(files))
|
||||
}
|
||||
assert.Equal(t, 0, len(errs))
|
||||
assert.Equal(t, 12, len(files))
|
||||
}
|
||||
|
||||
func TestLoadFiles(t *testing.T) {
|
||||
files, _ := listFiles([]string{onlineBoutiquePath()})
|
||||
loadFiles(files)
|
||||
_, err := loadFiles(files)
|
||||
assert.Equal(t, 0, len(err))
|
||||
}
|
||||
|
||||
func TestLoadFile(t *testing.T) {
|
||||
files, _ := listFiles([]string{strings.Replace(onlineBoutiquePath(), "*", "adservice.yaml", 1)})
|
||||
assert.Equal(t, 1, len(files))
|
||||
|
||||
_, err := loadFile(files[0])
|
||||
if err != nil {
|
||||
t.Errorf("%v", err)
|
||||
}
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
func TestMapResources(t *testing.T) {
|
||||
// policyHandler := &PolicyHandler{}
|
||||
@@ -1,15 +1,18 @@
|
||||
package getter
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/armosec/armoapi-go/armotypes"
|
||||
"github.com/armosec/kubescape/cautils/logger"
|
||||
"github.com/armosec/kubescape/cautils/logger/helpers"
|
||||
"github.com/armosec/opa-utils/reporthandling"
|
||||
"github.com/golang/glog"
|
||||
)
|
||||
|
||||
// =======================================================================================================================
|
||||
@@ -18,42 +21,58 @@ import (
|
||||
|
||||
var (
|
||||
// ATTENTION!!!
|
||||
// Changes in this URLs variable names, or in the usage is affecting the build process! BE CAREFULL
|
||||
armoERURL = "report.armo.cloud"
|
||||
armoBEURL = "api.armo.cloud"
|
||||
armoFEURL = "portal.armo.cloud"
|
||||
// Changes in this URLs variable names, or in the usage is affecting the build process! BE CAREFUL
|
||||
armoERURL = "report.armo.cloud"
|
||||
armoBEURL = "api.armo.cloud"
|
||||
armoFEURL = "portal.armo.cloud"
|
||||
armoAUTHURL = "auth.armo.cloud"
|
||||
|
||||
armoDevERURL = "report.eudev3.cyberarmorsoft.com"
|
||||
armoDevBEURL = "eggdashbe.eudev3.cyberarmorsoft.com"
|
||||
armoDevFEURL = "armoui-dev.eudev3.cyberarmorsoft.com"
|
||||
armoStageERURL = "report-ks.eustage2.cyberarmorsoft.com"
|
||||
armoStageBEURL = "api-stage.armo.cloud"
|
||||
armoStageFEURL = "armoui.eustage2.cyberarmorsoft.com"
|
||||
armoStageAUTHURL = "eggauth.eustage2.cyberarmorsoft.com"
|
||||
|
||||
armoDevERURL = "report.eudev3.cyberarmorsoft.com"
|
||||
armoDevBEURL = "api-dev.armo.cloud"
|
||||
armoDevFEURL = "armoui-dev.eudev3.cyberarmorsoft.com"
|
||||
armoDevAUTHURL = "eggauth.eudev3.cyberarmorsoft.com"
|
||||
)
|
||||
|
||||
// Armo API for downloading policies
|
||||
type ArmoAPI struct {
|
||||
httpClient *http.Client
|
||||
apiURL string
|
||||
erURL string
|
||||
feURL string
|
||||
customerGUID string
|
||||
httpClient *http.Client
|
||||
apiURL string
|
||||
authURL string
|
||||
erURL string
|
||||
feURL string
|
||||
accountID string
|
||||
clientID string
|
||||
secretKey string
|
||||
feToken FeLoginResponse
|
||||
authCookie string
|
||||
loggedIn bool
|
||||
}
|
||||
|
||||
var globalArmoAPIConnecctor *ArmoAPI
|
||||
var globalArmoAPIConnector *ArmoAPI
|
||||
|
||||
func SetARMOAPIConnector(armoAPI *ArmoAPI) {
|
||||
globalArmoAPIConnecctor = armoAPI
|
||||
logger.L().Debug("Armo URLs", helpers.String("api", armoAPI.apiURL), helpers.String("auth", armoAPI.authURL), helpers.String("report", armoAPI.erURL), helpers.String("UI", armoAPI.feURL))
|
||||
globalArmoAPIConnector = armoAPI
|
||||
}
|
||||
|
||||
func GetArmoAPIConnector() *ArmoAPI {
|
||||
if globalArmoAPIConnecctor == nil {
|
||||
glog.Error("returning nil API connector")
|
||||
if globalArmoAPIConnector == nil {
|
||||
// logger.L().Error("returning nil API connector")
|
||||
SetARMOAPIConnector(NewARMOAPIProd())
|
||||
}
|
||||
return globalArmoAPIConnecctor
|
||||
return globalArmoAPIConnector
|
||||
}
|
||||
|
||||
func NewARMOAPIDev() *ArmoAPI {
|
||||
apiObj := newArmoAPI()
|
||||
|
||||
apiObj.apiURL = armoDevBEURL
|
||||
apiObj.authURL = armoDevAUTHURL
|
||||
apiObj.erURL = armoDevERURL
|
||||
apiObj.feURL = armoDevFEURL
|
||||
|
||||
@@ -66,16 +85,29 @@ func NewARMOAPIProd() *ArmoAPI {
|
||||
apiObj.apiURL = armoBEURL
|
||||
apiObj.erURL = armoERURL
|
||||
apiObj.feURL = armoFEURL
|
||||
apiObj.authURL = armoAUTHURL
|
||||
|
||||
return apiObj
|
||||
}
|
||||
|
||||
func NewARMOAPICustomized(armoERURL, armoBEURL, armoFEURL string) *ArmoAPI {
|
||||
func NewARMOAPIStaging() *ArmoAPI {
|
||||
apiObj := newArmoAPI()
|
||||
|
||||
apiObj.apiURL = armoStageBEURL
|
||||
apiObj.erURL = armoStageERURL
|
||||
apiObj.feURL = armoStageFEURL
|
||||
apiObj.authURL = armoStageAUTHURL
|
||||
|
||||
return apiObj
|
||||
}
|
||||
|
||||
func NewARMOAPICustomized(armoERURL, armoBEURL, armoFEURL, armoAUTHURL string) *ArmoAPI {
|
||||
apiObj := newArmoAPI()
|
||||
|
||||
apiObj.erURL = armoERURL
|
||||
apiObj.apiURL = armoBEURL
|
||||
apiObj.feURL = armoFEURL
|
||||
apiObj.authURL = armoAUTHURL
|
||||
|
||||
return apiObj
|
||||
}
|
||||
@@ -83,22 +115,46 @@ func NewARMOAPICustomized(armoERURL, armoBEURL, armoFEURL string) *ArmoAPI {
|
||||
func newArmoAPI() *ArmoAPI {
|
||||
return &ArmoAPI{
|
||||
httpClient: &http.Client{Timeout: time.Duration(61) * time.Second},
|
||||
loggedIn: false,
|
||||
}
|
||||
}
|
||||
func (armoAPI *ArmoAPI) SetCustomerGUID(customerGUID string) {
|
||||
armoAPI.customerGUID = customerGUID
|
||||
|
||||
}
|
||||
func (armoAPI *ArmoAPI) GetFrontendURL() string {
|
||||
return armoAPI.feURL
|
||||
func (armoAPI *ArmoAPI) Post(fullURL string, headers map[string]string, body []byte) (string, error) {
|
||||
if headers == nil {
|
||||
headers = make(map[string]string)
|
||||
}
|
||||
armoAPI.appendAuthHeaders(headers)
|
||||
return HttpPost(armoAPI.httpClient, fullURL, headers, body)
|
||||
}
|
||||
|
||||
func (armoAPI *ArmoAPI) GetReportReceiverURL() string {
|
||||
return armoAPI.erURL
|
||||
func (armoAPI *ArmoAPI) Delete(fullURL string, headers map[string]string) (string, error) {
|
||||
if headers == nil {
|
||||
headers = make(map[string]string)
|
||||
}
|
||||
armoAPI.appendAuthHeaders(headers)
|
||||
return HttpDelete(armoAPI.httpClient, fullURL, headers)
|
||||
}
|
||||
func (armoAPI *ArmoAPI) Get(fullURL string, headers map[string]string) (string, error) {
|
||||
if headers == nil {
|
||||
headers = make(map[string]string)
|
||||
}
|
||||
armoAPI.appendAuthHeaders(headers)
|
||||
return HttpGetter(armoAPI.httpClient, fullURL, headers)
|
||||
}
|
||||
|
||||
func (armoAPI *ArmoAPI) GetAccountID() string { return armoAPI.accountID }
|
||||
func (armoAPI *ArmoAPI) IsLoggedIn() bool { return armoAPI.loggedIn }
|
||||
func (armoAPI *ArmoAPI) GetClientID() string { return armoAPI.clientID }
|
||||
func (armoAPI *ArmoAPI) GetSecretKey() string { return armoAPI.secretKey }
|
||||
func (armoAPI *ArmoAPI) GetFrontendURL() string { return armoAPI.feURL }
|
||||
func (armoAPI *ArmoAPI) GetAPIURL() string { return armoAPI.apiURL }
|
||||
func (armoAPI *ArmoAPI) GetReportReceiverURL() string { return armoAPI.erURL }
|
||||
func (armoAPI *ArmoAPI) SetAccountID(accountID string) { armoAPI.accountID = accountID }
|
||||
func (armoAPI *ArmoAPI) SetClientID(clientID string) { armoAPI.clientID = clientID }
|
||||
func (armoAPI *ArmoAPI) SetSecretKey(secretKey string) { armoAPI.secretKey = secretKey }
|
||||
|
||||
func (armoAPI *ArmoAPI) GetFramework(name string) (*reporthandling.Framework, error) {
|
||||
respStr, err := HttpGetter(armoAPI.httpClient, armoAPI.getFrameworkURL(name), nil)
|
||||
respStr, err := armoAPI.Get(armoAPI.getFrameworkURL(name), nil)
|
||||
if err != nil {
|
||||
return nil, nil
|
||||
}
|
||||
@@ -113,7 +169,7 @@ func (armoAPI *ArmoAPI) GetFramework(name string) (*reporthandling.Framework, er
|
||||
}
|
||||
|
||||
func (armoAPI *ArmoAPI) GetFrameworks() ([]reporthandling.Framework, error) {
|
||||
respStr, err := HttpGetter(armoAPI.httpClient, armoAPI.getListFrameworkURL(), nil)
|
||||
respStr, err := armoAPI.Get(armoAPI.getListFrameworkURL(), nil)
|
||||
if err != nil {
|
||||
return nil, nil
|
||||
}
|
||||
@@ -134,7 +190,7 @@ func (armoAPI *ArmoAPI) GetControl(policyName string) (*reporthandling.Control,
|
||||
func (armoAPI *ArmoAPI) GetExceptions(clusterName string) ([]armotypes.PostureExceptionPolicy, error) {
|
||||
exceptions := []armotypes.PostureExceptionPolicy{}
|
||||
|
||||
respStr, err := HttpGetter(armoAPI.httpClient, armoAPI.getExceptionsURL(clusterName), nil)
|
||||
respStr, err := armoAPI.Get(armoAPI.getExceptionsURL(clusterName), nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -146,12 +202,12 @@ func (armoAPI *ArmoAPI) GetExceptions(clusterName string) ([]armotypes.PostureEx
|
||||
return exceptions, nil
|
||||
}
|
||||
|
||||
func (armoAPI *ArmoAPI) GetCustomerGUID() (*TenantResponse, error) {
|
||||
url := armoAPI.getCustomerURL()
|
||||
if armoAPI.customerGUID != "" {
|
||||
url = fmt.Sprintf("%s?customerGUID=%s", url, armoAPI.customerGUID)
|
||||
func (armoAPI *ArmoAPI) GetTenant() (*TenantResponse, error) {
|
||||
url := armoAPI.getAccountURL()
|
||||
if armoAPI.accountID != "" {
|
||||
url = fmt.Sprintf("%s?customerGUID=%s", url, armoAPI.accountID)
|
||||
}
|
||||
respStr, err := HttpGetter(armoAPI.httpClient, url, nil)
|
||||
respStr, err := armoAPI.Get(url, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -159,17 +215,19 @@ func (armoAPI *ArmoAPI) GetCustomerGUID() (*TenantResponse, error) {
|
||||
if err = JSONDecoder(respStr).Decode(tenant); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if tenant.TenantID != "" {
|
||||
armoAPI.accountID = tenant.TenantID
|
||||
}
|
||||
return tenant, nil
|
||||
}
|
||||
|
||||
// ControlsInputs // map[<control name>][<input arguments>]
|
||||
func (armoAPI *ArmoAPI) GetAccountConfig(clusterName string) (*armotypes.CustomerConfig, error) {
|
||||
accountConfig := &armotypes.CustomerConfig{}
|
||||
if armoAPI.customerGUID == "" {
|
||||
if armoAPI.accountID == "" {
|
||||
return accountConfig, nil
|
||||
}
|
||||
respStr, err := HttpGetter(armoAPI.httpClient, armoAPI.getAccountConfig(clusterName), nil)
|
||||
respStr, err := armoAPI.Get(armoAPI.getAccountConfig(clusterName), nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -191,7 +249,7 @@ func (armoAPI *ArmoAPI) GetControlsInputs(clusterName string) (map[string][]stri
|
||||
}
|
||||
|
||||
func (armoAPI *ArmoAPI) ListCustomFrameworks() ([]string, error) {
|
||||
respStr, err := HttpGetter(armoAPI.httpClient, armoAPI.getListFrameworkURL(), nil)
|
||||
respStr, err := armoAPI.Get(armoAPI.getListFrameworkURL(), nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -211,7 +269,7 @@ func (armoAPI *ArmoAPI) ListCustomFrameworks() ([]string, error) {
|
||||
}
|
||||
|
||||
func (armoAPI *ArmoAPI) ListFrameworks() ([]string, error) {
|
||||
respStr, err := HttpGetter(armoAPI.httpClient, armoAPI.getListFrameworkURL(), nil)
|
||||
respStr, err := armoAPI.Get(armoAPI.getListFrameworkURL(), nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -236,9 +294,71 @@ func (armoAPI *ArmoAPI) ListControls(l ListType) ([]string, error) {
|
||||
return nil, fmt.Errorf("control api is not public")
|
||||
}
|
||||
|
||||
type TenantResponse struct {
|
||||
TenantID string `json:"tenantId"`
|
||||
Token string `json:"token"`
|
||||
Expires string `json:"expires"`
|
||||
AdminMail string `json:"adminMail,omitempty"`
|
||||
func (armoAPI *ArmoAPI) PostExceptions(exceptions []armotypes.PostureExceptionPolicy) error {
|
||||
|
||||
for i := range exceptions {
|
||||
ex, err := json.Marshal(exceptions[i])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = armoAPI.Post(armoAPI.exceptionsURL(""), map[string]string{"Content-Type": "application/json"}, ex)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (armoAPI *ArmoAPI) DeleteException(exceptionName string) error {
|
||||
|
||||
_, err := armoAPI.Delete(armoAPI.exceptionsURL(exceptionName), nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func (armoAPI *ArmoAPI) Login() error {
|
||||
if armoAPI.accountID == "" {
|
||||
return fmt.Errorf("failed to login, missing accountID")
|
||||
}
|
||||
if armoAPI.clientID == "" {
|
||||
return fmt.Errorf("failed to login, missing clientID")
|
||||
}
|
||||
if armoAPI.secretKey == "" {
|
||||
return fmt.Errorf("failed to login, missing secretKey")
|
||||
}
|
||||
|
||||
// init URLs
|
||||
feLoginData := FeLoginData{ClientId: armoAPI.clientID, Secret: armoAPI.secretKey}
|
||||
body, _ := json.Marshal(feLoginData)
|
||||
|
||||
resp, err := http.Post(armoAPI.getApiToken(), "application/json", bytes.NewBuffer(body))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return fmt.Errorf("error authenticating: %d", resp.StatusCode)
|
||||
}
|
||||
|
||||
responseBody, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var feLoginResponse FeLoginResponse
|
||||
|
||||
if err = json.Unmarshal(responseBody, &feLoginResponse); err != nil {
|
||||
return err
|
||||
}
|
||||
armoAPI.feToken = feLoginResponse
|
||||
|
||||
/* Now we have JWT */
|
||||
|
||||
armoAPI.authCookie, err = armoAPI.getAuthCookie()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
armoAPI.loggedIn = true
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -1,6 +1,10 @@
|
||||
package getter
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
)
|
||||
@@ -13,7 +17,7 @@ func (armoAPI *ArmoAPI) getFrameworkURL(frameworkName string) string {
|
||||
u.Host = armoAPI.apiURL
|
||||
u.Path = "api/v1/armoFrameworks"
|
||||
q := u.Query()
|
||||
q.Add("customerGUID", armoAPI.customerGUID)
|
||||
q.Add("customerGUID", armoAPI.getCustomerGUIDFallBack())
|
||||
if isNativeFramework(frameworkName) {
|
||||
q.Add("frameworkName", strings.ToUpper(frameworkName))
|
||||
} else {
|
||||
@@ -31,7 +35,7 @@ func (armoAPI *ArmoAPI) getListFrameworkURL() string {
|
||||
u.Host = armoAPI.apiURL
|
||||
u.Path = "api/v1/armoFrameworks"
|
||||
q := u.Query()
|
||||
q.Add("customerGUID", armoAPI.customerGUID)
|
||||
q.Add("customerGUID", armoAPI.getCustomerGUIDFallBack())
|
||||
u.RawQuery = q.Encode()
|
||||
|
||||
return u.String()
|
||||
@@ -43,7 +47,7 @@ func (armoAPI *ArmoAPI) getExceptionsURL(clusterName string) string {
|
||||
u.Path = "api/v1/armoPostureExceptions"
|
||||
|
||||
q := u.Query()
|
||||
q.Add("customerGUID", armoAPI.customerGUID)
|
||||
q.Add("customerGUID", armoAPI.getCustomerGUIDFallBack())
|
||||
// if clusterName != "" { // TODO - fix customer name support in Armo BE
|
||||
// q.Add("clusterName", clusterName)
|
||||
// }
|
||||
@@ -52,6 +56,23 @@ func (armoAPI *ArmoAPI) getExceptionsURL(clusterName string) string {
|
||||
return u.String()
|
||||
}
|
||||
|
||||
func (armoAPI *ArmoAPI) exceptionsURL(exceptionsPolicyName string) string {
|
||||
u := url.URL{}
|
||||
u.Scheme = "https"
|
||||
u.Host = armoAPI.apiURL
|
||||
u.Path = "api/v1/postureExceptionPolicy"
|
||||
|
||||
q := u.Query()
|
||||
q.Add("customerGUID", armoAPI.getCustomerGUIDFallBack())
|
||||
if exceptionsPolicyName != "" { // for delete
|
||||
q.Add("policyName", exceptionsPolicyName)
|
||||
}
|
||||
|
||||
u.RawQuery = q.Encode()
|
||||
|
||||
return u.String()
|
||||
}
|
||||
|
||||
func (armoAPI *ArmoAPI) getAccountConfig(clusterName string) string {
|
||||
u := url.URL{}
|
||||
u.Scheme = "https"
|
||||
@@ -59,7 +80,7 @@ func (armoAPI *ArmoAPI) getAccountConfig(clusterName string) string {
|
||||
u.Path = "api/v1/armoCustomerConfiguration"
|
||||
|
||||
q := u.Query()
|
||||
q.Add("customerGUID", armoAPI.customerGUID)
|
||||
q.Add("customerGUID", armoAPI.getCustomerGUIDFallBack())
|
||||
if clusterName != "" { // TODO - fix customer name support in Armo BE
|
||||
q.Add("clusterName", clusterName)
|
||||
}
|
||||
@@ -68,10 +89,81 @@ func (armoAPI *ArmoAPI) getAccountConfig(clusterName string) string {
|
||||
return u.String()
|
||||
}
|
||||
|
||||
func (armoAPI *ArmoAPI) getCustomerURL() string {
|
||||
func (armoAPI *ArmoAPI) getAccountURL() string {
|
||||
u := url.URL{}
|
||||
u.Scheme = "https"
|
||||
u.Host = armoAPI.apiURL
|
||||
u.Path = "api/v1/createTenant"
|
||||
return u.String()
|
||||
}
|
||||
|
||||
func (armoAPI *ArmoAPI) getApiToken() string {
|
||||
u := url.URL{}
|
||||
u.Scheme = "https"
|
||||
u.Host = armoAPI.authURL
|
||||
u.Path = "frontegg/identity/resources/auth/v1/api-token"
|
||||
return u.String()
|
||||
}
|
||||
|
||||
func (armoAPI *ArmoAPI) getOpenidCustomers() string {
|
||||
u := url.URL{}
|
||||
u.Scheme = "https"
|
||||
u.Host = armoAPI.apiURL
|
||||
u.Path = "api/v1/openid_customers"
|
||||
return u.String()
|
||||
}
|
||||
|
||||
func (armoAPI *ArmoAPI) getAuthCookie() (string, error) {
|
||||
selectCustomer := ArmoSelectCustomer{SelectedCustomerGuid: armoAPI.accountID}
|
||||
requestBody, _ := json.Marshal(selectCustomer)
|
||||
client := &http.Client{}
|
||||
httpRequest, err := http.NewRequest(http.MethodPost, armoAPI.getOpenidCustomers(), bytes.NewBuffer(requestBody))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
httpRequest.Header.Set("Content-Type", "application/json")
|
||||
httpRequest.Header.Set("Authorization", fmt.Sprintf("Bearer %s", armoAPI.feToken.Token))
|
||||
httpResponse, err := client.Do(httpRequest)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer httpResponse.Body.Close()
|
||||
if httpResponse.StatusCode != http.StatusOK {
|
||||
return "", fmt.Errorf("failed to get cookie from %s: status %d", armoAPI.getOpenidCustomers(), httpResponse.StatusCode)
|
||||
}
|
||||
|
||||
cookies := httpResponse.Header.Get("set-cookie")
|
||||
if len(cookies) == 0 {
|
||||
return "", fmt.Errorf("no cookie field in response from %s", armoAPI.getOpenidCustomers())
|
||||
}
|
||||
|
||||
authCookie := ""
|
||||
for _, cookie := range strings.Split(cookies, ";") {
|
||||
kv := strings.Split(cookie, "=")
|
||||
if kv[0] == "auth" {
|
||||
authCookie = kv[1]
|
||||
}
|
||||
}
|
||||
|
||||
if len(authCookie) == 0 {
|
||||
return "", fmt.Errorf("no auth cookie field in response from %s", armoAPI.getOpenidCustomers())
|
||||
}
|
||||
|
||||
return authCookie, nil
|
||||
}
|
||||
func (armoAPI *ArmoAPI) appendAuthHeaders(headers map[string]string) {
|
||||
|
||||
if armoAPI.feToken.Token != "" {
|
||||
headers["Authorization"] = fmt.Sprintf("Bearer %s", armoAPI.feToken.Token)
|
||||
}
|
||||
if armoAPI.authCookie != "" {
|
||||
headers["Cookie"] = fmt.Sprintf("auth=%s", armoAPI.authCookie)
|
||||
}
|
||||
}
|
||||
|
||||
func (armoAPI *ArmoAPI) getCustomerGUIDFallBack() string {
|
||||
if armoAPI.accountID != "" {
|
||||
return armoAPI.accountID
|
||||
}
|
||||
return "11111111-1111-1111-1111-111111111111"
|
||||
}
|
||||
|
||||
24
cautils/getter/datastructures.go
Normal file
24
cautils/getter/datastructures.go
Normal file
@@ -0,0 +1,24 @@
|
||||
package getter
|
||||
|
||||
type FeLoginData struct {
|
||||
Secret string `json:"secret"`
|
||||
ClientId string `json:"clientId"`
|
||||
}
|
||||
|
||||
type FeLoginResponse struct {
|
||||
Token string `json:"accessToken"`
|
||||
RefreshToken string `json:"refreshToken"`
|
||||
ExpiresIn int32 `json:"expiresIn"`
|
||||
Expires string `json:"expires"`
|
||||
}
|
||||
|
||||
type ArmoSelectCustomer struct {
|
||||
SelectedCustomerGuid string `json:"selectedCustomer"`
|
||||
}
|
||||
|
||||
type TenantResponse struct {
|
||||
TenantID string `json:"tenantId"`
|
||||
Token string `json:"token"`
|
||||
Expires string `json:"expires"`
|
||||
AdminMail string `json:"adminMail,omitempty"`
|
||||
}
|
||||
@@ -24,8 +24,15 @@ type IExceptionsGetter interface {
|
||||
GetExceptions(clusterName string) ([]armotypes.PostureExceptionPolicy, error)
|
||||
}
|
||||
type IBackend interface {
|
||||
GetCustomerGUID() (*TenantResponse, error)
|
||||
SetCustomerGUID(customerGUID string)
|
||||
GetAccountID() string
|
||||
GetClientID() string
|
||||
GetSecretKey() string
|
||||
|
||||
SetAccountID(accountID string)
|
||||
SetClientID(clientID string)
|
||||
SetSecretKey(secretKey string)
|
||||
|
||||
GetTenant() (*TenantResponse, error)
|
||||
}
|
||||
|
||||
type IControlsInputsGetter interface {
|
||||
|
||||
@@ -13,15 +13,11 @@ import (
|
||||
)
|
||||
|
||||
func GetDefaultPath(name string) string {
|
||||
defaultfilePath := filepath.Join(DefaultLocalStore, name)
|
||||
if homeDir, err := os.UserHomeDir(); err == nil {
|
||||
defaultfilePath = filepath.Join(homeDir, defaultfilePath)
|
||||
}
|
||||
return defaultfilePath
|
||||
return filepath.Join(DefaultLocalStore, name)
|
||||
}
|
||||
|
||||
func SaveInFile(policy interface{}, pathStr string) error {
|
||||
encodedData, err := json.Marshal(policy)
|
||||
encodedData, err := json.MarshalIndent(policy, "", " ")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -51,13 +47,31 @@ func JSONDecoder(origin string) *json.Decoder {
|
||||
return dec
|
||||
}
|
||||
|
||||
func HttpDelete(httpClient *http.Client, fullURL string, headers map[string]string) (string, error) {
|
||||
|
||||
req, err := http.NewRequest("DELETE", fullURL, nil)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
setHeaders(req, headers)
|
||||
|
||||
resp, err := httpClient.Do(req)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
respStr, err := httpRespToString(resp)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return respStr, nil
|
||||
}
|
||||
func HttpGetter(httpClient *http.Client, fullURL string, headers map[string]string) (string, error) {
|
||||
|
||||
req, err := http.NewRequest("GET", fullURL, nil)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
addHeaders(req, headers)
|
||||
setHeaders(req, headers)
|
||||
|
||||
resp, err := httpClient.Do(req)
|
||||
if err != nil {
|
||||
@@ -76,7 +90,7 @@ func HttpPost(httpClient *http.Client, fullURL string, headers map[string]string
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
addHeaders(req, headers)
|
||||
setHeaders(req, headers)
|
||||
resp, err := httpClient.Do(req)
|
||||
if err != nil {
|
||||
return "", err
|
||||
@@ -88,10 +102,10 @@ func HttpPost(httpClient *http.Client, fullURL string, headers map[string]string
|
||||
return respStr, nil
|
||||
}
|
||||
|
||||
func addHeaders(req *http.Request, headers map[string]string) {
|
||||
func setHeaders(req *http.Request, headers map[string]string) {
|
||||
if len(headers) >= 0 { // might be nil
|
||||
for k, v := range headers {
|
||||
req.Header.Add(k, v)
|
||||
req.Header.Set(k, v)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -106,21 +120,22 @@ func httpRespToString(resp *http.Response) (string, error) {
|
||||
if resp.ContentLength > 0 {
|
||||
strBuilder.Grow(int(resp.ContentLength))
|
||||
}
|
||||
bytesNum, err := io.Copy(&strBuilder, resp.Body)
|
||||
_, err := io.Copy(&strBuilder, resp.Body)
|
||||
respStr := strBuilder.String()
|
||||
if err != nil {
|
||||
respStrNewLen := len(respStr)
|
||||
if respStrNewLen > 1024 {
|
||||
respStrNewLen = 1024
|
||||
}
|
||||
return "", fmt.Errorf("HTTP request failed. URL: '%s', Read-ERROR: '%s', HTTP-CODE: '%s', BODY(top): '%s', HTTP-HEADERS: %v, HTTP-BODY-BUFFER-LENGTH: %v", resp.Request.URL.RequestURI(), err, resp.Status, respStr[:respStrNewLen], resp.Header, bytesNum)
|
||||
return "", fmt.Errorf("http-error: '%s', reason: '%s'", resp.Status, respStr[:respStrNewLen])
|
||||
// return "", fmt.Errorf("HTTP request failed. URL: '%s', Read-ERROR: '%s', HTTP-CODE: '%s', BODY(top): '%s', HTTP-HEADERS: %v, HTTP-BODY-BUFFER-LENGTH: %v", resp.Request.URL.RequestURI(), err, resp.Status, respStr[:respStrNewLen], resp.Header, bytesNum)
|
||||
}
|
||||
if resp.StatusCode < 200 || resp.StatusCode >= 300 {
|
||||
respStrNewLen := len(respStr)
|
||||
if respStrNewLen > 1024 {
|
||||
respStrNewLen = 1024
|
||||
}
|
||||
err = fmt.Errorf("HTTP request failed. URL: '%s', HTTP-ERROR: '%s', BODY: '%s', HTTP-HEADERS: %v, HTTP-BODY-BUFFER-LENGTH: %v", resp.Request.URL.RequestURI(), resp.Status, respStr[:respStrNewLen], resp.Header, bytesNum)
|
||||
err = fmt.Errorf("http-error: '%s', reason: '%s'", resp.Status, respStr[:respStrNewLen])
|
||||
}
|
||||
|
||||
return respStr, err
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/armosec/armoapi-go/armotypes"
|
||||
@@ -13,7 +14,15 @@ import (
|
||||
// =======================================================================================================================
|
||||
// ============================================== LoadPolicy =============================================================
|
||||
// =======================================================================================================================
|
||||
const DefaultLocalStore = ".kubescape"
|
||||
var DefaultLocalStore = getCacheDir()
|
||||
|
||||
func getCacheDir() string {
|
||||
defaultDirPath := ".kubescape"
|
||||
if homeDir, err := os.UserHomeDir(); err == nil {
|
||||
defaultDirPath = filepath.Join(homeDir, defaultDirPath)
|
||||
}
|
||||
return defaultDirPath
|
||||
}
|
||||
|
||||
// Load policies from a local repository
|
||||
type LoadPolicy struct {
|
||||
@@ -85,8 +94,19 @@ func (lp *LoadPolicy) GetFrameworks() ([]reporthandling.Framework, error) {
|
||||
}
|
||||
|
||||
func (lp *LoadPolicy) ListFrameworks() ([]string, error) {
|
||||
// TODO - Support
|
||||
return []string{}, fmt.Errorf("loading frameworks list from file is not supported")
|
||||
fwNames := []string{}
|
||||
framework := &reporthandling.Framework{}
|
||||
for _, f := range lp.filePaths {
|
||||
file, err := os.ReadFile(f)
|
||||
if err == nil {
|
||||
if err := json.Unmarshal(file, framework); err == nil {
|
||||
if !contains(fwNames, framework.Name) {
|
||||
fwNames = append(fwNames, framework.Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return fwNames, nil
|
||||
}
|
||||
|
||||
func (lp *LoadPolicy) ListControls(listType ListType) ([]string, error) {
|
||||
@@ -114,7 +134,7 @@ func (lp *LoadPolicy) GetControlsInputs(clusterName string) (map[string][]string
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err = json.Unmarshal(f, &accountConfig); err == nil {
|
||||
if err = json.Unmarshal(f, &accountConfig.Settings.PostureControlInputs); err == nil {
|
||||
return accountConfig.Settings.PostureControlInputs, nil
|
||||
}
|
||||
return nil, err
|
||||
|
||||
31
cautils/logger/helpers/datastructures.go
Normal file
31
cautils/logger/helpers/datastructures.go
Normal file
@@ -0,0 +1,31 @@
|
||||
package helpers
|
||||
|
||||
import "time"
|
||||
|
||||
type StringObj struct {
|
||||
key string
|
||||
value string
|
||||
}
|
||||
|
||||
type ErrorObj struct {
|
||||
key string
|
||||
value error
|
||||
}
|
||||
|
||||
type IntObj struct {
|
||||
key string
|
||||
value int
|
||||
}
|
||||
|
||||
type InterfaceObj struct {
|
||||
key string
|
||||
value interface{}
|
||||
}
|
||||
|
||||
func Error(e error) *ErrorObj { return &ErrorObj{key: "error", value: e} }
|
||||
func Int(k string, v int) *IntObj { return &IntObj{key: k, value: v} }
|
||||
func String(k, v string) *StringObj { return &StringObj{key: k, value: v} }
|
||||
func Interface(k string, v interface{}) *InterfaceObj { return &InterfaceObj{key: k, value: v} }
|
||||
func Time() *StringObj {
|
||||
return &StringObj{key: "time", value: time.Now().Format("2006-01-02 15:04:05")}
|
||||
}
|
||||
69
cautils/logger/helpers/level.go
Normal file
69
cautils/logger/helpers/level.go
Normal file
@@ -0,0 +1,69 @@
|
||||
package helpers
|
||||
|
||||
import (
|
||||
"strings"
|
||||
)
|
||||
|
||||
type Level int8
|
||||
|
||||
const (
|
||||
UnknownLevel Level = iota - -1
|
||||
DebugLevel
|
||||
InfoLevel //default
|
||||
SuccessLevel
|
||||
WarningLevel
|
||||
ErrorLevel
|
||||
FatalLevel
|
||||
|
||||
_defaultLevel = InfoLevel
|
||||
_minLevel = DebugLevel
|
||||
_maxLevel = FatalLevel
|
||||
)
|
||||
|
||||
func ToLevel(level string) Level {
|
||||
switch strings.ToLower(level) {
|
||||
case "debug":
|
||||
return DebugLevel
|
||||
case "info":
|
||||
return InfoLevel
|
||||
case "success":
|
||||
return SuccessLevel
|
||||
case "warning", "warn":
|
||||
return WarningLevel
|
||||
case "error":
|
||||
return ErrorLevel
|
||||
case "fatal":
|
||||
return FatalLevel
|
||||
default:
|
||||
return UnknownLevel
|
||||
}
|
||||
}
|
||||
func (l Level) String() string {
|
||||
switch l {
|
||||
case DebugLevel:
|
||||
return "debug"
|
||||
case InfoLevel:
|
||||
return "info"
|
||||
case SuccessLevel:
|
||||
return "success"
|
||||
case WarningLevel:
|
||||
return "warning"
|
||||
case ErrorLevel:
|
||||
return "error"
|
||||
case FatalLevel:
|
||||
return "fatal"
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (l Level) Skip(l2 Level) bool {
|
||||
return l < l2
|
||||
}
|
||||
|
||||
func SupportedLevels() []string {
|
||||
levels := []string{}
|
||||
for i := _minLevel; i <= _maxLevel; i++ {
|
||||
levels = append(levels, i.String())
|
||||
}
|
||||
return levels
|
||||
}
|
||||
62
cautils/logger/helpers/methods.go
Normal file
62
cautils/logger/helpers/methods.go
Normal file
@@ -0,0 +1,62 @@
|
||||
package helpers
|
||||
|
||||
type IDetails interface {
|
||||
Key() string
|
||||
Value() interface{}
|
||||
}
|
||||
|
||||
// ======================================================================================
|
||||
// ============================== String ================================================
|
||||
// ======================================================================================
|
||||
|
||||
// Key
|
||||
func (s *StringObj) Key() string {
|
||||
return s.key
|
||||
}
|
||||
|
||||
// Value
|
||||
func (s *StringObj) Value() interface{} {
|
||||
return s.value
|
||||
}
|
||||
|
||||
// ======================================================================================
|
||||
// =============================== Error ================================================
|
||||
// ======================================================================================
|
||||
|
||||
// Key
|
||||
func (s *ErrorObj) Key() string {
|
||||
return s.key
|
||||
}
|
||||
|
||||
// Value
|
||||
func (s *ErrorObj) Value() interface{} {
|
||||
return s.value
|
||||
}
|
||||
|
||||
// ======================================================================================
|
||||
// ================================= Int ================================================
|
||||
// ======================================================================================
|
||||
|
||||
// Key
|
||||
func (s *IntObj) Key() string {
|
||||
return s.key
|
||||
}
|
||||
|
||||
// Value
|
||||
func (s *IntObj) Value() interface{} {
|
||||
return s.value
|
||||
}
|
||||
|
||||
// ======================================================================================
|
||||
// =========================== Interface ================================================
|
||||
// ======================================================================================
|
||||
|
||||
// Key
|
||||
func (s *InterfaceObj) Key() string {
|
||||
return s.key
|
||||
}
|
||||
|
||||
// Value
|
||||
func (s *InterfaceObj) Value() interface{} {
|
||||
return s.value
|
||||
}
|
||||
77
cautils/logger/methods.go
Normal file
77
cautils/logger/methods.go
Normal file
@@ -0,0 +1,77 @@
|
||||
package logger
|
||||
|
||||
import (
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/armosec/kubescape/cautils/logger/helpers"
|
||||
"github.com/armosec/kubescape/cautils/logger/mocklogger"
|
||||
"github.com/armosec/kubescape/cautils/logger/prettylogger"
|
||||
"github.com/armosec/kubescape/cautils/logger/zaplogger"
|
||||
"github.com/mattn/go-isatty"
|
||||
)
|
||||
|
||||
type ILogger interface {
|
||||
Fatal(msg string, details ...helpers.IDetails) // print log and exit 1
|
||||
Error(msg string, details ...helpers.IDetails)
|
||||
Success(msg string, details ...helpers.IDetails)
|
||||
Warning(msg string, details ...helpers.IDetails)
|
||||
Info(msg string, details ...helpers.IDetails)
|
||||
Debug(msg string, details ...helpers.IDetails)
|
||||
|
||||
SetLevel(level string) error
|
||||
GetLevel() string
|
||||
|
||||
SetWriter(w *os.File)
|
||||
GetWriter() *os.File
|
||||
|
||||
DisableColor(flag bool)
|
||||
}
|
||||
|
||||
var l ILogger
|
||||
|
||||
// Return initialized logger. If logger not initialized, will call InitializeLogger() with the default value
|
||||
func L() ILogger {
|
||||
if l == nil {
|
||||
InitializeLogger("")
|
||||
}
|
||||
return l
|
||||
}
|
||||
|
||||
/* InitializeLogger initialize desired logger
|
||||
|
||||
Use:
|
||||
InitializeLogger("<logger name>")
|
||||
|
||||
Supported logger names
|
||||
- "zap": Logger from package "go.uber.org/zap"
|
||||
- "pretty", "colorful": Human friendly colorful logger
|
||||
- "mock", "empty", "ignore": Logger will be totally ignored
|
||||
|
||||
e.g.
|
||||
InitializeLogger("mock") -> will initialize the mock logger
|
||||
|
||||
Default:
|
||||
If isatty.IsTerminal(os.Stdout.Fd()):
|
||||
"pretty"
|
||||
else
|
||||
"zap"
|
||||
|
||||
*/
|
||||
func InitializeLogger(loggerName string) {
|
||||
|
||||
switch strings.ToLower(loggerName) {
|
||||
case "zap":
|
||||
l = zaplogger.NewZapLogger()
|
||||
case "pretty", "colorful":
|
||||
l = prettylogger.NewPrettyLogger()
|
||||
case "mock", "empty", "ignore":
|
||||
l = mocklogger.NewMockLogger()
|
||||
default:
|
||||
if isatty.IsTerminal(os.Stdout.Fd()) {
|
||||
l = prettylogger.NewPrettyLogger()
|
||||
} else {
|
||||
l = zaplogger.NewZapLogger()
|
||||
}
|
||||
}
|
||||
}
|
||||
25
cautils/logger/mocklogger/logger.go
Normal file
25
cautils/logger/mocklogger/logger.go
Normal file
@@ -0,0 +1,25 @@
|
||||
package mocklogger
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
"github.com/armosec/kubescape/cautils/logger/helpers"
|
||||
)
|
||||
|
||||
type MockLogger struct {
|
||||
}
|
||||
|
||||
func NewMockLogger() *MockLogger {
|
||||
return &MockLogger{}
|
||||
}
|
||||
|
||||
func (zl *MockLogger) GetLevel() string { return "" }
|
||||
func (zl *MockLogger) SetWriter(w *os.File) {}
|
||||
func (zl *MockLogger) GetWriter() *os.File { return nil }
|
||||
func (zl *MockLogger) SetLevel(level string) error { return nil }
|
||||
func (zl *MockLogger) Fatal(msg string, details ...helpers.IDetails) {}
|
||||
func (zl *MockLogger) Error(msg string, details ...helpers.IDetails) {}
|
||||
func (zl *MockLogger) Warning(msg string, details ...helpers.IDetails) {}
|
||||
func (zl *MockLogger) Success(msg string, details ...helpers.IDetails) {}
|
||||
func (zl *MockLogger) Info(msg string, details ...helpers.IDetails) {}
|
||||
func (zl *MockLogger) Debug(msg string, details ...helpers.IDetails) {}
|
||||
37
cautils/logger/prettylogger/colors.go
Normal file
37
cautils/logger/prettylogger/colors.go
Normal file
@@ -0,0 +1,37 @@
|
||||
package prettylogger
|
||||
|
||||
import (
|
||||
"io"
|
||||
|
||||
"github.com/armosec/kubescape/cautils/logger/helpers"
|
||||
"github.com/fatih/color"
|
||||
)
|
||||
|
||||
var prefixError = color.New(color.Bold, color.FgHiRed).FprintfFunc()
|
||||
var prefixWarning = color.New(color.Bold, color.FgHiYellow).FprintfFunc()
|
||||
var prefixInfo = color.New(color.Bold, color.FgCyan).FprintfFunc()
|
||||
var prefixSuccess = color.New(color.Bold, color.FgHiGreen).FprintfFunc()
|
||||
var prefixDebug = color.New(color.Bold, color.FgWhite).FprintfFunc()
|
||||
var message = color.New().FprintfFunc()
|
||||
|
||||
func prefix(l helpers.Level) func(w io.Writer, format string, a ...interface{}) {
|
||||
switch l {
|
||||
case helpers.DebugLevel:
|
||||
return prefixDebug
|
||||
case helpers.InfoLevel:
|
||||
return prefixInfo
|
||||
case helpers.SuccessLevel:
|
||||
return prefixSuccess
|
||||
case helpers.WarningLevel:
|
||||
return prefixWarning
|
||||
case helpers.ErrorLevel, helpers.FatalLevel:
|
||||
return prefixError
|
||||
}
|
||||
return message
|
||||
}
|
||||
|
||||
func DisableColor(flag bool) {
|
||||
if flag {
|
||||
color.NoColor = true
|
||||
}
|
||||
}
|
||||
82
cautils/logger/prettylogger/logger.go
Normal file
82
cautils/logger/prettylogger/logger.go
Normal file
@@ -0,0 +1,82 @@
|
||||
package prettylogger
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"sync"
|
||||
|
||||
"github.com/armosec/kubescape/cautils/logger/helpers"
|
||||
)
|
||||
|
||||
type PrettyLogger struct {
|
||||
writer *os.File
|
||||
level helpers.Level
|
||||
mutex sync.Mutex
|
||||
}
|
||||
|
||||
func NewPrettyLogger() *PrettyLogger {
|
||||
return &PrettyLogger{
|
||||
writer: os.Stderr, // default to stderr
|
||||
level: helpers.InfoLevel,
|
||||
mutex: sync.Mutex{},
|
||||
}
|
||||
}
|
||||
|
||||
func (pl *PrettyLogger) GetLevel() string { return pl.level.String() }
|
||||
func (pl *PrettyLogger) SetWriter(w *os.File) { pl.writer = w }
|
||||
func (pl *PrettyLogger) GetWriter() *os.File { return pl.writer }
|
||||
|
||||
func (pl *PrettyLogger) DisableColor(flag bool) {
|
||||
DisableColor(flag)
|
||||
}
|
||||
|
||||
func (pl *PrettyLogger) SetLevel(level string) error {
|
||||
pl.level = helpers.ToLevel(level)
|
||||
if pl.level == helpers.UnknownLevel {
|
||||
return fmt.Errorf("level '%s' unknown", level)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func (pl *PrettyLogger) Fatal(msg string, details ...helpers.IDetails) {
|
||||
pl.print(helpers.FatalLevel, msg, details...)
|
||||
os.Exit(1)
|
||||
}
|
||||
func (pl *PrettyLogger) Error(msg string, details ...helpers.IDetails) {
|
||||
pl.print(helpers.ErrorLevel, msg, details...)
|
||||
}
|
||||
func (pl *PrettyLogger) Warning(msg string, details ...helpers.IDetails) {
|
||||
pl.print(helpers.WarningLevel, msg, details...)
|
||||
}
|
||||
func (pl *PrettyLogger) Info(msg string, details ...helpers.IDetails) {
|
||||
pl.print(helpers.InfoLevel, msg, details...)
|
||||
}
|
||||
func (pl *PrettyLogger) Debug(msg string, details ...helpers.IDetails) {
|
||||
pl.print(helpers.DebugLevel, msg, details...)
|
||||
}
|
||||
func (pl *PrettyLogger) Success(msg string, details ...helpers.IDetails) {
|
||||
pl.print(helpers.SuccessLevel, msg, details...)
|
||||
}
|
||||
|
||||
func (pl *PrettyLogger) print(level helpers.Level, msg string, details ...helpers.IDetails) {
|
||||
if !level.Skip(pl.level) {
|
||||
pl.mutex.Lock()
|
||||
prefix(level)(pl.writer, "[%s] ", level.String())
|
||||
if d := detailsToString(details); d != "" {
|
||||
msg = fmt.Sprintf("%s. %s", msg, d)
|
||||
}
|
||||
message(pl.writer, fmt.Sprintf("%s\n", msg))
|
||||
pl.mutex.Unlock()
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func detailsToString(details []helpers.IDetails) string {
|
||||
s := ""
|
||||
for i := range details {
|
||||
s += fmt.Sprintf("%s: %v", details[i].Key(), details[i].Value())
|
||||
if i < len(details)-1 {
|
||||
s += "; "
|
||||
}
|
||||
}
|
||||
return s
|
||||
}
|
||||
79
cautils/logger/zaplogger/logger.go
Normal file
79
cautils/logger/zaplogger/logger.go
Normal file
@@ -0,0 +1,79 @@
|
||||
package zaplogger
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
"github.com/armosec/kubescape/cautils/logger/helpers"
|
||||
"go.uber.org/zap"
|
||||
"go.uber.org/zap/zapcore"
|
||||
)
|
||||
|
||||
type ZapLogger struct {
|
||||
zapL *zap.Logger
|
||||
cfg zap.Config
|
||||
}
|
||||
|
||||
func NewZapLogger() *ZapLogger {
|
||||
ec := zap.NewProductionEncoderConfig()
|
||||
ec.EncodeTime = zapcore.RFC3339TimeEncoder
|
||||
cfg := zap.NewProductionConfig()
|
||||
cfg.DisableCaller = true
|
||||
cfg.DisableStacktrace = true
|
||||
cfg.Encoding = "json"
|
||||
cfg.EncoderConfig = ec
|
||||
|
||||
zapLogger, err := cfg.Build()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return &ZapLogger{
|
||||
zapL: zapLogger,
|
||||
cfg: cfg,
|
||||
}
|
||||
}
|
||||
|
||||
func (zl *ZapLogger) GetLevel() string { return zl.cfg.Level.Level().String() }
|
||||
func (zl *ZapLogger) SetWriter(w *os.File) {}
|
||||
func (zl *ZapLogger) GetWriter() *os.File { return nil }
|
||||
|
||||
func (zl *ZapLogger) DisableColor(flag bool) {}
|
||||
|
||||
func (zl *ZapLogger) SetLevel(level string) error {
|
||||
l := zapcore.Level(1)
|
||||
err := l.Set(level)
|
||||
if err == nil {
|
||||
zl.cfg.Level.SetLevel(l)
|
||||
}
|
||||
return err
|
||||
}
|
||||
func (zl *ZapLogger) Fatal(msg string, details ...helpers.IDetails) {
|
||||
zl.zapL.Fatal(msg, detailsToZapFields(details)...)
|
||||
}
|
||||
|
||||
func (zl *ZapLogger) Error(msg string, details ...helpers.IDetails) {
|
||||
zl.zapL.Error(msg, detailsToZapFields(details)...)
|
||||
}
|
||||
|
||||
func (zl *ZapLogger) Warning(msg string, details ...helpers.IDetails) {
|
||||
zl.zapL.Warn(msg, detailsToZapFields(details)...)
|
||||
}
|
||||
|
||||
func (zl *ZapLogger) Success(msg string, details ...helpers.IDetails) {
|
||||
zl.zapL.Info(msg, detailsToZapFields(details)...)
|
||||
}
|
||||
|
||||
func (zl *ZapLogger) Info(msg string, details ...helpers.IDetails) {
|
||||
zl.zapL.Info(msg, detailsToZapFields(details)...)
|
||||
}
|
||||
|
||||
func (zl *ZapLogger) Debug(msg string, details ...helpers.IDetails) {
|
||||
zl.zapL.Debug(msg, detailsToZapFields(details)...)
|
||||
}
|
||||
|
||||
func detailsToZapFields(details []helpers.IDetails) []zapcore.Field {
|
||||
zapFields := []zapcore.Field{}
|
||||
for i := range details {
|
||||
zapFields = append(zapFields, zap.Any(details[i].Key(), details[i].Value()))
|
||||
}
|
||||
return zapFields
|
||||
}
|
||||
@@ -8,7 +8,7 @@ import (
|
||||
"github.com/armosec/opa-utils/reporthandling"
|
||||
"github.com/armosec/rbac-utils/rbacscanner"
|
||||
"github.com/armosec/rbac-utils/rbacutils"
|
||||
uuid "github.com/satori/go.uuid"
|
||||
"github.com/google/uuid"
|
||||
)
|
||||
|
||||
type RBACObjects struct {
|
||||
@@ -21,7 +21,7 @@ func NewRBACObjects(scanner *rbacscanner.RbacScannerFromK8sAPI) *RBACObjects {
|
||||
|
||||
func (rbacObjects *RBACObjects) SetResourcesReport() (*reporthandling.PostureReport, error) {
|
||||
return &reporthandling.PostureReport{
|
||||
ReportID: uuid.NewV4().String(),
|
||||
ReportID: uuid.NewString(),
|
||||
ReportGenerationTime: time.Now().UTC(),
|
||||
CustomerGUID: rbacObjects.scanner.CustomerGUID,
|
||||
ClusterName: rbacObjects.scanner.ClusterName,
|
||||
@@ -51,23 +51,22 @@ func (rbacObjects *RBACObjects) rbacObjectsToResources(resources *rbacutils.Rbac
|
||||
|
||||
Should be investigated
|
||||
************************************************************************************************************************
|
||||
|
||||
// wrap rbac aggregated objects in IMetadata and add to allresources
|
||||
// TODO - DEPRECATE SA2WLIDmap
|
||||
SA2WLIDmapIMeta, err := rbacutils.SA2WLIDmapIMetadataWrapper(resources.SA2WLIDmap)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
allresources[SA2WLIDmapIMeta.GetID()] = SA2WLIDmapIMeta
|
||||
|
||||
SAID2WLIDmapIMeta, err := rbacutils.SAID2WLIDmapIMetadataWrapper(resources.SAID2WLIDmap)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
allresources[SAID2WLIDmapIMeta.GetID()] = SAID2WLIDmapIMeta
|
||||
|
||||
*/
|
||||
|
||||
// wrap rbac aggregated objects in IMetadata and add to allresources
|
||||
// TODO - DEPRECATE SA2WLIDmap
|
||||
SA2WLIDmapIMeta, err := rbacutils.SA2WLIDmapIMetadataWrapper(resources.SA2WLIDmap)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
allresources[SA2WLIDmapIMeta.GetID()] = SA2WLIDmapIMeta
|
||||
|
||||
SAID2WLIDmapIMeta, err := rbacutils.SAID2WLIDmapIMetadataWrapper(resources.SAID2WLIDmap)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
allresources[SAID2WLIDmapIMeta.GetID()] = SAID2WLIDmapIMeta
|
||||
|
||||
// convert rbac k8s resources to IMetadata and add to allresources
|
||||
for _, cr := range resources.ClusterRoles.Items {
|
||||
crmap, err := convertToMap(cr)
|
||||
|
||||
@@ -5,7 +5,6 @@ import (
|
||||
"github.com/armosec/opa-utils/reporthandling"
|
||||
helpersv1 "github.com/armosec/opa-utils/reporthandling/helpers/v1"
|
||||
"github.com/armosec/opa-utils/reporthandling/results/v1/reportsummary"
|
||||
"github.com/armosec/opa-utils/score"
|
||||
)
|
||||
|
||||
func ReportV2ToV1(opaSessionObj *OPASessionObj) {
|
||||
@@ -22,7 +21,6 @@ func ReportV2ToV1(opaSessionObj *OPASessionObj) {
|
||||
fwv1 := reporthandling.FrameworkReport{}
|
||||
fwv1.Name = fwv2.GetName()
|
||||
fwv1.Score = fwv2.GetScore()
|
||||
|
||||
fwv1.ControlReports = append(fwv1.ControlReports, controlReportV2ToV1(opaSessionObj, fwv2.GetName(), fwv2.Controls)...)
|
||||
frameworks = append(frameworks, fwv1)
|
||||
|
||||
@@ -30,10 +28,10 @@ func ReportV2ToV1(opaSessionObj *OPASessionObj) {
|
||||
} else {
|
||||
fwv1 := reporthandling.FrameworkReport{}
|
||||
fwv1.Name = ""
|
||||
fwv1.Score = 0
|
||||
|
||||
fwv1.ControlReports = append(fwv1.ControlReports, controlReportV2ToV1(opaSessionObj, "", opaSessionObj.Report.SummaryDetails.Controls)...)
|
||||
frameworks = append(frameworks, fwv1)
|
||||
fwv1.Score = opaSessionObj.Report.SummaryDetails.Score
|
||||
}
|
||||
|
||||
// // remove unused data
|
||||
@@ -49,36 +47,14 @@ func ReportV2ToV1(opaSessionObj *OPASessionObj) {
|
||||
reporthandling.SetUniqueResourcesCounter(&frameworks[f])
|
||||
|
||||
// set default score
|
||||
reporthandling.SetDefaultScore(&frameworks[f])
|
||||
// reporthandling.SetDefaultScore(&frameworks[f])
|
||||
}
|
||||
|
||||
// update score
|
||||
scoreutil := score.NewScore(opaSessionObj.AllResources)
|
||||
scoreutil.Calculate(frameworks)
|
||||
// // update score
|
||||
// scoreutil := score.NewScore(opaSessionObj.AllResources)
|
||||
// scoreutil.Calculate(frameworks)
|
||||
|
||||
opaSessionObj.PostureReport.FrameworkReports = frameworks
|
||||
|
||||
// opaSessionObj.Report.SummaryDetails.Score = 0
|
||||
// for i := range frameworks {
|
||||
// for j := range frameworks[i].ControlReports {
|
||||
// // frameworks[i].ControlReports[j].Score
|
||||
// for w := range opaSessionObj.Report.SummaryDetails.Frameworks {
|
||||
// if opaSessionObj.Report.SummaryDetails.Frameworks[w].Name == frameworks[i].Name {
|
||||
// opaSessionObj.Report.SummaryDetails.Frameworks[w].Score = frameworks[i].Score
|
||||
// }
|
||||
// if c, ok := opaSessionObj.Report.SummaryDetails.Frameworks[w].Controls[frameworks[i].ControlReports[j].ControlID]; ok {
|
||||
// c.Score = frameworks[i].ControlReports[j].Score
|
||||
// opaSessionObj.Report.SummaryDetails.Frameworks[w].Controls[frameworks[i].ControlReports[j].ControlID] = c
|
||||
// }
|
||||
// }
|
||||
// if c, ok := opaSessionObj.Report.SummaryDetails.Controls[frameworks[i].ControlReports[j].ControlID]; ok {
|
||||
// c.Score = frameworks[i].ControlReports[j].Score
|
||||
// opaSessionObj.Report.SummaryDetails.Controls[frameworks[i].ControlReports[j].ControlID] = c
|
||||
// }
|
||||
// }
|
||||
// opaSessionObj.Report.SummaryDetails.Score += opaSessionObj.PostureReport.FrameworkReports[i].Score
|
||||
// }
|
||||
// opaSessionObj.Report.SummaryDetails.Score /= float32(len(opaSessionObj.Report.SummaryDetails.Frameworks))
|
||||
}
|
||||
|
||||
func controlReportV2ToV1(opaSessionObj *OPASessionObj, frameworkName string, controls map[string]reportsummary.ControlSummary) []reporthandling.ControlReport {
|
||||
@@ -88,9 +64,9 @@ func controlReportV2ToV1(opaSessionObj *OPASessionObj, frameworkName string, con
|
||||
crv1.ControlID = controlID
|
||||
crv1.BaseScore = crv2.ScoreFactor
|
||||
crv1.Name = crv2.GetName()
|
||||
crv1.Score = crv2.GetScore()
|
||||
crv1.Control_ID = controlID
|
||||
// crv1.Attributes = crv2.
|
||||
crv1.Score = crv2.GetScore()
|
||||
|
||||
// TODO - add fields
|
||||
crv1.Description = crv2.Description
|
||||
@@ -120,7 +96,12 @@ func controlReportV2ToV1(opaSessionObj *OPASessionObj, frameworkName string, con
|
||||
ruleResponse := reporthandling.RuleResponse{}
|
||||
ruleResponse.Rulename = rulev2.GetName()
|
||||
for i := range rulev2.Paths {
|
||||
ruleResponse.FailedPaths = append(ruleResponse.FailedPaths, rulev2.Paths[i].FailedPath)
|
||||
if rulev2.Paths[i].FailedPath != "" {
|
||||
ruleResponse.FailedPaths = append(ruleResponse.FailedPaths, rulev2.Paths[i].FailedPath)
|
||||
}
|
||||
if rulev2.Paths[i].FixPath.Path != "" {
|
||||
ruleResponse.FixPaths = append(ruleResponse.FixPaths, rulev2.Paths[i].FixPath)
|
||||
}
|
||||
}
|
||||
ruleResponse.RuleStatus = string(status.Status())
|
||||
if len(rulev2.Exception) > 0 {
|
||||
|
||||
@@ -1,16 +1,24 @@
|
||||
package cautils
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/armosec/kubescape/cautils/getter"
|
||||
"github.com/armosec/kubescape/cautils/logger"
|
||||
"github.com/armosec/kubescape/cautils/logger/helpers"
|
||||
"github.com/armosec/opa-utils/reporthandling"
|
||||
)
|
||||
|
||||
const (
|
||||
ScanCluster string = "cluster"
|
||||
ScanLocalFiles string = "yaml"
|
||||
ScanCluster string = "cluster"
|
||||
ScanLocalFiles string = "yaml"
|
||||
localControlInputsFilename string = "controls-inputs.json"
|
||||
localExceptionsFilename string = "exceptions.json"
|
||||
)
|
||||
|
||||
type BoolPtrFlag struct {
|
||||
@@ -45,6 +53,12 @@ func (bpf *BoolPtrFlag) Set(val string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
type RootInfo struct {
|
||||
Logger string // logger level
|
||||
CacheDir string // cached dir
|
||||
DisableColor bool // Disable Color
|
||||
}
|
||||
|
||||
type ScanInfo struct {
|
||||
Getters
|
||||
PolicyIdentifier []reporthandling.PolicyIdentifier
|
||||
@@ -52,22 +66,25 @@ type ScanInfo struct {
|
||||
ControlsInputs string // Load file with inputs for controls
|
||||
UseFrom []string // Load framework from local file (instead of download). Use when running offline
|
||||
UseDefault bool // Load framework from cached file (instead of download). Use when running offline
|
||||
UseArtifactsFrom string // Load artifacts from local path. Use when running offline
|
||||
VerboseMode bool // Display all of the input resources and not only failed resources
|
||||
Format string // Format results (table, json, junit ...)
|
||||
Output string // Store results in an output file, Output file name
|
||||
FormatVersion string // Output object can be differnet between versions, this is for testing and backward compatibility
|
||||
ExcludedNamespaces string // used for host sensor namespace
|
||||
IncludeNamespaces string // DEPRECATED?
|
||||
InputPatterns []string // Yaml files input patterns
|
||||
Silent bool // Silent mode - Do not print progress logs
|
||||
FailThreshold uint16 // Failure score threshold
|
||||
FailThreshold float32 // Failure score threshold
|
||||
Submit bool // Submit results to Armo BE
|
||||
HostSensor BoolPtrFlag // Deploy ARMO K8s host sensor to collect data from certain controls
|
||||
ReportID string // Report id of the current scan
|
||||
HostSensorEnabled BoolPtrFlag // Deploy ARMO K8s host sensor to collect data from certain controls
|
||||
HostSensorYamlPath string // Path to hostsensor file
|
||||
Local bool // Do not submit results
|
||||
Account string // account ID
|
||||
// ClusterName string // cluster name
|
||||
KubeContext string // context name
|
||||
FrameworkScan bool // false if scanning control
|
||||
ScanAll bool // true if scan all frameworks
|
||||
KubeContext string // context name
|
||||
FrameworkScan bool // false if scanning control
|
||||
ScanAll bool // true if scan all frameworks
|
||||
}
|
||||
|
||||
type Getters struct {
|
||||
@@ -79,7 +96,48 @@ type Getters struct {
|
||||
func (scanInfo *ScanInfo) Init() {
|
||||
scanInfo.setUseFrom()
|
||||
scanInfo.setOutputFile()
|
||||
scanInfo.setUseArtifactsFrom()
|
||||
}
|
||||
|
||||
func (scanInfo *ScanInfo) setUseArtifactsFrom() {
|
||||
if scanInfo.UseArtifactsFrom == "" {
|
||||
return
|
||||
}
|
||||
// UseArtifactsFrom must be a path without a filename
|
||||
dir, file := filepath.Split(scanInfo.UseArtifactsFrom)
|
||||
if dir == "" {
|
||||
scanInfo.UseArtifactsFrom = file
|
||||
} else if strings.Contains(file, ".json") {
|
||||
scanInfo.UseArtifactsFrom = dir
|
||||
}
|
||||
// set frameworks files
|
||||
files, err := ioutil.ReadDir(scanInfo.UseArtifactsFrom)
|
||||
if err != nil {
|
||||
logger.L().Fatal("failed to read files from directory", helpers.String("dir", scanInfo.UseArtifactsFrom), helpers.Error(err))
|
||||
}
|
||||
framework := &reporthandling.Framework{}
|
||||
for _, f := range files {
|
||||
filePath := filepath.Join(scanInfo.UseArtifactsFrom, f.Name())
|
||||
file, err := os.ReadFile(filePath)
|
||||
if err == nil {
|
||||
if err := json.Unmarshal(file, framework); err == nil {
|
||||
scanInfo.UseFrom = append(scanInfo.UseFrom, filepath.Join(scanInfo.UseArtifactsFrom, f.Name()))
|
||||
}
|
||||
}
|
||||
}
|
||||
// set config-inputs file
|
||||
scanInfo.ControlsInputs = filepath.Join(scanInfo.UseArtifactsFrom, localControlInputsFilename)
|
||||
// set exceptions
|
||||
scanInfo.UseExceptions = filepath.Join(scanInfo.UseArtifactsFrom, localExceptionsFilename)
|
||||
}
|
||||
|
||||
func (scanInfo *ScanInfo) setUseExceptions() {
|
||||
if scanInfo.UseExceptions != "" {
|
||||
// load exceptions from file
|
||||
scanInfo.ExceptionsGetter = getter.NewLoadPolicy([]string{scanInfo.UseExceptions})
|
||||
} else {
|
||||
scanInfo.ExceptionsGetter = getter.GetArmoAPIConnector()
|
||||
}
|
||||
}
|
||||
|
||||
func (scanInfo *ScanInfo) setUseFrom() {
|
||||
@@ -104,6 +162,11 @@ func (scanInfo *ScanInfo) setOutputFile() {
|
||||
scanInfo.Output += ".xml"
|
||||
}
|
||||
}
|
||||
if scanInfo.Format == "pdf" {
|
||||
if filepath.Ext(scanInfo.Output) != ".pdf" {
|
||||
scanInfo.Output += ".pdf"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (scanInfo *ScanInfo) GetScanningEnvironment() string {
|
||||
|
||||
@@ -7,7 +7,10 @@ import (
|
||||
"os"
|
||||
|
||||
"github.com/armosec/kubescape/cautils/getter"
|
||||
"github.com/armosec/kubescape/cautils/logger"
|
||||
"github.com/armosec/kubescape/cautils/logger/helpers"
|
||||
pkgutils "github.com/armosec/utils-go/utils"
|
||||
"golang.org/x/mod/semver"
|
||||
)
|
||||
|
||||
const SKIP_VERSION_CHECK = "KUBESCAPE_SKIP_UPDATE_CHECK"
|
||||
@@ -22,7 +25,7 @@ type IVersionCheckHandler interface {
|
||||
|
||||
func NewIVersionCheckHandler() IVersionCheckHandler {
|
||||
if BuildNumber == "" {
|
||||
WarningDisplay(os.Stderr, "Warning: unknown build number, this might affect your scan results. Please make sure you are updated to latest version.\n")
|
||||
logger.L().Warning("unknown build number, this might affect your scan results. Please make sure you are updated to latest version")
|
||||
}
|
||||
if v, ok := os.LookupEnv(SKIP_VERSION_CHECK); ok && pkgutils.StringToBool(v) {
|
||||
return NewVersionCheckHandlerMock()
|
||||
@@ -78,14 +81,14 @@ func NewVersionCheckRequest(buildNumber, frameworkName, frameworkVersion, scanni
|
||||
}
|
||||
|
||||
func (v *VersionCheckHandlerMock) CheckLatestVersion(versionData *VersionCheckRequest) error {
|
||||
fmt.Println("Skipping version check")
|
||||
logger.L().Info("Skipping version check")
|
||||
return nil
|
||||
}
|
||||
|
||||
func (v *VersionCheckHandler) CheckLatestVersion(versionData *VersionCheckRequest) error {
|
||||
defer func() {
|
||||
if err := recover(); err != nil {
|
||||
WarningDisplay(os.Stderr, "failed to get latest version\n")
|
||||
logger.L().Warning("failed to get latest version", helpers.Interface("error", err))
|
||||
}
|
||||
}()
|
||||
|
||||
@@ -95,8 +98,8 @@ func (v *VersionCheckHandler) CheckLatestVersion(versionData *VersionCheckReques
|
||||
}
|
||||
|
||||
if latestVersion.ClientUpdate != "" {
|
||||
if BuildNumber != "" && BuildNumber < latestVersion.ClientUpdate {
|
||||
WarningDisplay(os.Stderr, warningMessage(latestVersion.Client, latestVersion.ClientUpdate), "\n")
|
||||
if BuildNumber != "" && semver.Compare(BuildNumber, latestVersion.ClientUpdate) >= 0 {
|
||||
logger.L().Warning(warningMessage(latestVersion.ClientUpdate))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -106,7 +109,7 @@ func (v *VersionCheckHandler) CheckLatestVersion(versionData *VersionCheckReques
|
||||
// }
|
||||
|
||||
if latestVersion.Message != "" {
|
||||
InfoDisplay(os.Stderr, latestVersion.Message, "\n")
|
||||
logger.L().Info(latestVersion.Message)
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -131,6 +134,6 @@ func (v *VersionCheckHandler) getLatestVersion(versionData *VersionCheckRequest)
|
||||
return vResp, nil
|
||||
}
|
||||
|
||||
func warningMessage(kind, release string) string {
|
||||
return fmt.Sprintf("Warning: '%s' is not updated to the latest release: '%s'", kind, release)
|
||||
func warningMessage(release string) string {
|
||||
return fmt.Sprintf("current version '%s' is not updated to the latest release: '%s'", BuildNumber, release)
|
||||
}
|
||||
|
||||
38
cautils/versioncheck_test.go
Normal file
38
cautils/versioncheck_test.go
Normal file
@@ -0,0 +1,38 @@
|
||||
package cautils
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/armosec/armoapi-go/armotypes"
|
||||
"github.com/armosec/opa-utils/reporthandling"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestGetKubernetesObjects(t *testing.T) {
|
||||
}
|
||||
|
||||
var rule_v1_0_131 = &reporthandling.PolicyRule{PortalBase: armotypes.PortalBase{
|
||||
Attributes: map[string]interface{}{"useUntilKubescapeVersion": "v1.0.132"}}}
|
||||
var rule_v1_0_132 = &reporthandling.PolicyRule{PortalBase: armotypes.PortalBase{
|
||||
Attributes: map[string]interface{}{"useFromKubescapeVersion": "v1.0.132", "useUntilKubescapeVersion": "v1.0.133"}}}
|
||||
var rule_v1_0_133 = &reporthandling.PolicyRule{PortalBase: armotypes.PortalBase{
|
||||
Attributes: map[string]interface{}{"useFromKubescapeVersion": "v1.0.133", "useUntilKubescapeVersion": "v1.0.134"}}}
|
||||
var rule_v1_0_134 = &reporthandling.PolicyRule{PortalBase: armotypes.PortalBase{
|
||||
Attributes: map[string]interface{}{"useFromKubescapeVersion": "v1.0.134"}}}
|
||||
|
||||
func TestIsRuleKubescapeVersionCompatible(t *testing.T) {
|
||||
// local build- no build number
|
||||
// should use only rules that don't have "until"
|
||||
buildNumberMock := ""
|
||||
assert.False(t, isRuleKubescapeVersionCompatible(rule_v1_0_131.Attributes, buildNumberMock))
|
||||
assert.False(t, isRuleKubescapeVersionCompatible(rule_v1_0_132.Attributes, buildNumberMock))
|
||||
assert.False(t, isRuleKubescapeVersionCompatible(rule_v1_0_133.Attributes, buildNumberMock))
|
||||
assert.True(t, isRuleKubescapeVersionCompatible(rule_v1_0_134.Attributes, buildNumberMock))
|
||||
|
||||
// should only use rules that version is in range of use
|
||||
buildNumberMock = "v1.0.133"
|
||||
assert.True(t, isRuleKubescapeVersionCompatible(rule_v1_0_131.Attributes, buildNumberMock))
|
||||
assert.False(t, isRuleKubescapeVersionCompatible(rule_v1_0_132.Attributes, buildNumberMock))
|
||||
assert.False(t, isRuleKubescapeVersionCompatible(rule_v1_0_133.Attributes, buildNumberMock))
|
||||
assert.False(t, isRuleKubescapeVersionCompatible(rule_v1_0_134.Attributes, buildNumberMock))
|
||||
}
|
||||
7
clihandler/cliconfigdelete.go
Normal file
7
clihandler/cliconfigdelete.go
Normal file
@@ -0,0 +1,7 @@
|
||||
package clihandler
|
||||
|
||||
func CliDelete() error {
|
||||
|
||||
tenant := getTenantConfig("", "", getKubernetesApi()) // change k8sinterface
|
||||
return tenant.DeleteCachedConfig()
|
||||
}
|
||||
35
clihandler/clidelete.go
Normal file
35
clihandler/clidelete.go
Normal file
@@ -0,0 +1,35 @@
|
||||
package clihandler
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/armosec/kubescape/cautils/getter"
|
||||
"github.com/armosec/kubescape/cautils/logger"
|
||||
"github.com/armosec/kubescape/cautils/logger/helpers"
|
||||
)
|
||||
|
||||
func DeleteExceptions(accountID string, exceptions []string) error {
|
||||
|
||||
// load cached config
|
||||
getTenantConfig(accountID, "", getKubernetesApi())
|
||||
|
||||
// login kubescape SaaS
|
||||
armoAPI := getter.GetArmoAPIConnector()
|
||||
if err := armoAPI.Login(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for i := range exceptions {
|
||||
exceptionName := exceptions[i]
|
||||
if exceptionName == "" {
|
||||
continue
|
||||
}
|
||||
logger.L().Info("Deleting exception", helpers.String("name", exceptionName))
|
||||
if err := armoAPI.DeleteException(exceptionName); err != nil {
|
||||
return fmt.Errorf("failed to delete exception '%s', reason: %s", exceptionName, err.Error())
|
||||
}
|
||||
logger.L().Success("Exception deleted successfully")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -8,6 +8,8 @@ import (
|
||||
"github.com/armosec/armoapi-go/armotypes"
|
||||
"github.com/armosec/kubescape/cautils"
|
||||
"github.com/armosec/kubescape/cautils/getter"
|
||||
"github.com/armosec/kubescape/cautils/logger"
|
||||
"github.com/armosec/kubescape/cautils/logger/helpers"
|
||||
)
|
||||
|
||||
var downloadFunc = map[string]func(*cautils.DownloadInfo) error{
|
||||
@@ -51,7 +53,7 @@ func setPathandFilename(downloadInfo *cautils.DownloadInfo) {
|
||||
dir, file := filepath.Split(downloadInfo.Path)
|
||||
if dir == "" {
|
||||
downloadInfo.Path = file
|
||||
} else {
|
||||
} else if strings.Contains(file, ".json") {
|
||||
downloadInfo.Path = dir
|
||||
downloadInfo.FileName = file
|
||||
}
|
||||
@@ -67,7 +69,7 @@ func downloadArtifacts(downloadInfo *cautils.DownloadInfo) error {
|
||||
}
|
||||
for artifact := range artifacts {
|
||||
if err := downloadArtifact(&cautils.DownloadInfo{Target: artifact, Path: downloadInfo.Path, FileName: fmt.Sprintf("%s.json", artifact)}, artifacts); err != nil {
|
||||
fmt.Printf("error downloading %s, error: %s", artifact, err)
|
||||
logger.L().Error("error downloading", helpers.String("artifact", artifact), helpers.Error(err))
|
||||
}
|
||||
}
|
||||
return nil
|
||||
@@ -75,7 +77,8 @@ func downloadArtifacts(downloadInfo *cautils.DownloadInfo) error {
|
||||
|
||||
func downloadConfigInputs(downloadInfo *cautils.DownloadInfo) error {
|
||||
tenant := getTenantConfig(downloadInfo.Account, "", getKubernetesApi())
|
||||
controlsInputsGetter := getConfigInputsGetter(downloadInfo.Name, tenant.GetCustomerGUID(), nil)
|
||||
|
||||
controlsInputsGetter := getConfigInputsGetter(downloadInfo.Name, tenant.GetAccountID(), nil)
|
||||
controlInputs, err := controlsInputsGetter.GetControlsInputs(tenant.GetClusterName())
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -88,16 +91,17 @@ func downloadConfigInputs(downloadInfo *cautils.DownloadInfo) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Printf("'%s' downloaded successfully and saved at: '%s'\n", downloadInfo.Target, filepath.Join(downloadInfo.Path, downloadInfo.FileName))
|
||||
logger.L().Success("Downloaded", helpers.String("artifact", downloadInfo.Target), helpers.String("path", filepath.Join(downloadInfo.Path, downloadInfo.FileName)))
|
||||
return nil
|
||||
}
|
||||
|
||||
func downloadExceptions(downloadInfo *cautils.DownloadInfo) error {
|
||||
var err error
|
||||
tenant := getTenantConfig(downloadInfo.Account, "", getKubernetesApi())
|
||||
|
||||
exceptionsGetter := getExceptionsGetter("")
|
||||
exceptions := []armotypes.PostureExceptionPolicy{}
|
||||
if tenant.GetCustomerGUID() != "" {
|
||||
if tenant.GetAccountID() != "" {
|
||||
exceptions, err = exceptionsGetter.GetExceptions(tenant.GetClusterName())
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -111,14 +115,15 @@ func downloadExceptions(downloadInfo *cautils.DownloadInfo) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Printf("'%s' downloaded successfully and saved at: '%s'\n", downloadInfo.Target, filepath.Join(downloadInfo.Path, downloadInfo.FileName))
|
||||
logger.L().Success("Downloaded", helpers.String("artifact", downloadInfo.Target), helpers.String("path", filepath.Join(downloadInfo.Path, downloadInfo.FileName)))
|
||||
return nil
|
||||
}
|
||||
|
||||
func downloadFramework(downloadInfo *cautils.DownloadInfo) error {
|
||||
|
||||
tenant := getTenantConfig(downloadInfo.Account, "", getKubernetesApi())
|
||||
g := getPolicyGetter(nil, tenant.GetCustomerGUID(), true, nil)
|
||||
|
||||
g := getPolicyGetter(nil, tenant.GetAccountID(), true, nil)
|
||||
|
||||
if downloadInfo.Name == "" {
|
||||
// if framework name not specified - download all frameworks
|
||||
@@ -127,11 +132,12 @@ func downloadFramework(downloadInfo *cautils.DownloadInfo) error {
|
||||
return err
|
||||
}
|
||||
for _, fw := range frameworks {
|
||||
err = getter.SaveInFile(fw, filepath.Join(downloadInfo.Path, (strings.ToLower(fw.Name)+".json")))
|
||||
downloadTo := filepath.Join(downloadInfo.Path, (strings.ToLower(fw.Name) + ".json"))
|
||||
err = getter.SaveInFile(fw, downloadTo)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Printf("'%s': '%s' downloaded successfully and saved at: '%s'\n", downloadInfo.Target, fw.Name, filepath.Join(downloadInfo.Path, (strings.ToLower(fw.Name)+".json")))
|
||||
logger.L().Success("Downloaded", helpers.String("artifact", downloadInfo.Target), helpers.String("name", fw.Name), helpers.String("path", downloadTo))
|
||||
}
|
||||
// return fmt.Errorf("missing framework name")
|
||||
} else {
|
||||
@@ -142,11 +148,12 @@ func downloadFramework(downloadInfo *cautils.DownloadInfo) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = getter.SaveInFile(framework, filepath.Join(downloadInfo.Path, downloadInfo.FileName))
|
||||
downloadTo := filepath.Join(downloadInfo.Path, downloadInfo.FileName)
|
||||
err = getter.SaveInFile(framework, downloadTo)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Printf("'%s' downloaded successfully and saved at: '%s'\n", downloadInfo.Target, filepath.Join(downloadInfo.Path, downloadInfo.FileName))
|
||||
logger.L().Success("Downloaded", helpers.String("artifact", downloadInfo.Target), helpers.String("name", framework.Name), helpers.String("path", downloadTo))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -154,7 +161,8 @@ func downloadFramework(downloadInfo *cautils.DownloadInfo) error {
|
||||
func downloadControl(downloadInfo *cautils.DownloadInfo) error {
|
||||
|
||||
tenant := getTenantConfig(downloadInfo.Account, "", getKubernetesApi())
|
||||
g := getPolicyGetter(nil, tenant.GetCustomerGUID(), false, nil)
|
||||
|
||||
g := getPolicyGetter(nil, tenant.GetAccountID(), false, nil)
|
||||
|
||||
if downloadInfo.Name == "" {
|
||||
// TODO - support
|
||||
@@ -167,10 +175,11 @@ func downloadControl(downloadInfo *cautils.DownloadInfo) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = getter.SaveInFile(controls, filepath.Join(downloadInfo.Path, downloadInfo.FileName))
|
||||
downloadTo := filepath.Join(downloadInfo.Path, downloadInfo.FileName)
|
||||
err = getter.SaveInFile(controls, downloadTo)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Printf("'%s' downloaded successfully and saved at: '%s'\n", downloadInfo.Target, filepath.Join(downloadInfo.Path, downloadInfo.FileName))
|
||||
logger.L().Success("Downloaded", helpers.String("artifact", downloadInfo.Target), helpers.String("name", downloadInfo.Name), helpers.String("path", downloadTo))
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -1,17 +1,24 @@
|
||||
package clihandler
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/armosec/kubescape/cautils"
|
||||
"github.com/armosec/kubescape/cautils/getter"
|
||||
"github.com/armosec/kubescape/clihandler/cliobjects"
|
||||
)
|
||||
|
||||
var listFunc = map[string]func(*cautils.ListPolicies) ([]string, error){
|
||||
var listFunc = map[string]func(*cliobjects.ListPolicies) ([]string, error){
|
||||
"controls": listControls,
|
||||
"frameworks": listFrameworks,
|
||||
"exceptions": listExceptions,
|
||||
}
|
||||
|
||||
var listFormatFunc = map[string]func(*cliobjects.ListPolicies, []string){
|
||||
"pretty-print": prettyPrintListFormat,
|
||||
"json": jsonListFormat,
|
||||
}
|
||||
|
||||
func ListSupportCommands() []string {
|
||||
@@ -21,7 +28,7 @@ func ListSupportCommands() []string {
|
||||
}
|
||||
return commands
|
||||
}
|
||||
func CliList(listPolicies *cautils.ListPolicies) error {
|
||||
func CliList(listPolicies *cliobjects.ListPolicies) error {
|
||||
if f, ok := listFunc[listPolicies.Target]; ok {
|
||||
policies, err := f(listPolicies)
|
||||
if err != nil {
|
||||
@@ -29,30 +36,53 @@ func CliList(listPolicies *cautils.ListPolicies) error {
|
||||
}
|
||||
sort.Strings(policies)
|
||||
|
||||
sep := "\n * "
|
||||
usageCmd := strings.TrimSuffix(listPolicies.Target, "s")
|
||||
fmt.Printf("Supported %s:%s%s\n", listPolicies.Target, sep, strings.Join(policies, sep))
|
||||
fmt.Printf("\nUseage:\n")
|
||||
fmt.Printf("$ kubescape scan %s \"name\"\n", usageCmd)
|
||||
fmt.Printf("$ kubescape scan %s \"name-0\",\"name-1\"\n\n", usageCmd)
|
||||
listFormatFunc[listPolicies.Format](listPolicies, policies)
|
||||
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("unknown command to download")
|
||||
}
|
||||
|
||||
func listFrameworks(listPolicies *cautils.ListPolicies) ([]string, error) {
|
||||
func listFrameworks(listPolicies *cliobjects.ListPolicies) ([]string, error) {
|
||||
tenant := getTenantConfig(listPolicies.Account, "", getKubernetesApi()) // change k8sinterface
|
||||
g := getPolicyGetter(nil, tenant.GetCustomerGUID(), true, nil)
|
||||
g := getPolicyGetter(nil, tenant.GetAccountID(), true, nil)
|
||||
|
||||
return listFrameworksNames(g), nil
|
||||
}
|
||||
|
||||
func listControls(listPolicies *cautils.ListPolicies) ([]string, error) {
|
||||
func listControls(listPolicies *cliobjects.ListPolicies) ([]string, error) {
|
||||
tenant := getTenantConfig(listPolicies.Account, "", getKubernetesApi()) // change k8sinterface
|
||||
g := getPolicyGetter(nil, tenant.GetCustomerGUID(), false, nil)
|
||||
|
||||
g := getPolicyGetter(nil, tenant.GetAccountID(), false, nil)
|
||||
l := getter.ListName
|
||||
if listPolicies.ListIDs {
|
||||
l = getter.ListID
|
||||
}
|
||||
return g.ListControls(l)
|
||||
}
|
||||
|
||||
func listExceptions(listPolicies *cliobjects.ListPolicies) ([]string, error) {
|
||||
// load tenant config
|
||||
getTenantConfig(listPolicies.Account, "", getKubernetesApi())
|
||||
|
||||
var exceptionsNames []string
|
||||
armoAPI := getExceptionsGetter("")
|
||||
exceptions, err := armoAPI.GetExceptions("")
|
||||
if err != nil {
|
||||
return exceptionsNames, err
|
||||
}
|
||||
for i := range exceptions {
|
||||
exceptionsNames = append(exceptionsNames, exceptions[i].Name)
|
||||
}
|
||||
return exceptionsNames, nil
|
||||
}
|
||||
|
||||
func prettyPrintListFormat(listPolicies *cliobjects.ListPolicies, policies []string) {
|
||||
sep := "\n * "
|
||||
fmt.Printf("Supported %s:%s%s\n", listPolicies.Target, sep, strings.Join(policies, sep))
|
||||
}
|
||||
|
||||
func jsonListFormat(listPolicies *cliobjects.ListPolicies, policies []string) {
|
||||
j, _ := json.MarshalIndent(policies, "", " ")
|
||||
fmt.Printf("%s\n", j)
|
||||
}
|
||||
|
||||
@@ -1,7 +1,8 @@
|
||||
package cautils
|
||||
package cliobjects
|
||||
|
||||
type ListPolicies struct {
|
||||
Target string
|
||||
ListIDs bool
|
||||
Account string
|
||||
Format string
|
||||
}
|
||||
7
clihandler/cliobjects/set.go
Normal file
7
clihandler/cliobjects/set.go
Normal file
@@ -0,0 +1,7 @@
|
||||
package cliobjects
|
||||
|
||||
type SetConfig struct {
|
||||
Account string
|
||||
ClientID string
|
||||
SecretKey string
|
||||
}
|
||||
9
clihandler/cliobjects/submit.go
Normal file
9
clihandler/cliobjects/submit.go
Normal file
@@ -0,0 +1,9 @@
|
||||
package cliobjects
|
||||
|
||||
type Submit struct {
|
||||
Account string
|
||||
}
|
||||
|
||||
type Delete struct {
|
||||
Account string
|
||||
}
|
||||
22
clihandler/cliset.go
Normal file
22
clihandler/cliset.go
Normal file
@@ -0,0 +1,22 @@
|
||||
package clihandler
|
||||
|
||||
import (
|
||||
"github.com/armosec/kubescape/clihandler/cliobjects"
|
||||
)
|
||||
|
||||
func CliSetConfig(setConfig *cliobjects.SetConfig) error {
|
||||
|
||||
tenant := getTenantConfig("", "", getKubernetesApi())
|
||||
|
||||
if setConfig.Account != "" {
|
||||
tenant.GetConfigObj().AccountID = setConfig.Account
|
||||
}
|
||||
if setConfig.SecretKey != "" {
|
||||
tenant.GetConfigObj().SecretKey = setConfig.SecretKey
|
||||
}
|
||||
if setConfig.ClientID != "" {
|
||||
tenant.GetConfigObj().ClientID = setConfig.ClientID
|
||||
}
|
||||
|
||||
return tenant.UpdateCachedConfig()
|
||||
}
|
||||
60
clihandler/clisubmit.go
Normal file
60
clihandler/clisubmit.go
Normal file
@@ -0,0 +1,60 @@
|
||||
package clihandler
|
||||
|
||||
import (
|
||||
"github.com/armosec/kubescape/cautils"
|
||||
"github.com/armosec/kubescape/cautils/getter"
|
||||
"github.com/armosec/kubescape/cautils/logger"
|
||||
"github.com/armosec/kubescape/cautils/logger/helpers"
|
||||
"github.com/armosec/kubescape/clihandler/cliinterfaces"
|
||||
)
|
||||
|
||||
func Submit(submitInterfaces cliinterfaces.SubmitInterfaces) error {
|
||||
|
||||
// list resources
|
||||
postureReport, err := submitInterfaces.SubmitObjects.SetResourcesReport()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
allresources, err := submitInterfaces.SubmitObjects.ListAllResources()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// report
|
||||
if err := submitInterfaces.Reporter.ActionSendReport(&cautils.OPASessionObj{PostureReport: postureReport, AllResources: allresources}); err != nil {
|
||||
return err
|
||||
}
|
||||
logger.L().Success("Data has been submitted successfully")
|
||||
submitInterfaces.Reporter.DisplayReportURL()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func SubmitExceptions(accountID, excPath string) error {
|
||||
logger.L().Info("submitting exceptions", helpers.String("path", excPath))
|
||||
|
||||
// load cached config
|
||||
tenantConfig := getTenantConfig(accountID, "", getKubernetesApi())
|
||||
if err := tenantConfig.SetTenant(); err != nil {
|
||||
logger.L().Error("failed setting account ID", helpers.Error(err))
|
||||
}
|
||||
|
||||
// load exceptions from file
|
||||
loader := getter.NewLoadPolicy([]string{excPath})
|
||||
exceptions, err := loader.GetExceptions("")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// login kubescape SaaS
|
||||
armoAPI := getter.GetArmoAPIConnector()
|
||||
if err := armoAPI.Login(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := armoAPI.PostExceptions(exceptions); err != nil {
|
||||
return err
|
||||
}
|
||||
logger.L().Success("Exceptions submitted successfully")
|
||||
|
||||
return nil
|
||||
}
|
||||
12
clihandler/cliview.go
Normal file
12
clihandler/cliview.go
Normal file
@@ -0,0 +1,12 @@
|
||||
package clihandler
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
)
|
||||
|
||||
func CliView() error {
|
||||
tenant := getTenantConfig("", "", getKubernetesApi()) // change k8sinterface
|
||||
fmt.Fprintf(os.Stderr, "%s\n", tenant.GetConfigObj().Config())
|
||||
return nil
|
||||
}
|
||||
@@ -6,9 +6,10 @@ import (
|
||||
|
||||
// clusterCmd represents the cluster command
|
||||
var clusterCmd = &cobra.Command{
|
||||
Use: "cluster",
|
||||
Short: "Set configuration for cluster",
|
||||
Long: ``,
|
||||
Use: "cluster",
|
||||
Short: "Set configuration for cluster",
|
||||
Long: ``,
|
||||
Deprecated: "use the 'set' command instead",
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
},
|
||||
}
|
||||
|
||||
@@ -7,13 +7,15 @@ import (
|
||||
"github.com/armosec/k8s-interface/k8sinterface"
|
||||
"github.com/armosec/kubescape/cautils"
|
||||
"github.com/armosec/kubescape/cautils/getter"
|
||||
"github.com/armosec/kubescape/cautils/logger"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var getCmd = &cobra.Command{
|
||||
Use: "get <key>",
|
||||
Short: "Get configuration in cluster",
|
||||
Long: ``,
|
||||
Use: "get <key>",
|
||||
Short: "Get configuration in cluster",
|
||||
Long: ``,
|
||||
Deprecated: "use the 'view' command instead",
|
||||
Args: func(cmd *cobra.Command, args []string) error {
|
||||
if len(args) < 1 || len(args) > 1 {
|
||||
return fmt.Errorf("requires one argument")
|
||||
@@ -34,12 +36,11 @@ var getCmd = &cobra.Command{
|
||||
val, err := clusterConfig.GetValueByKeyFromConfigMap(key)
|
||||
if err != nil {
|
||||
if err.Error() == "value does not exist." {
|
||||
fmt.Printf("Could net get value from configmap, reason: %s\n", err)
|
||||
return nil
|
||||
return fmt.Errorf("failed to get value from configmap, reason: %s", err.Error())
|
||||
}
|
||||
return err
|
||||
}
|
||||
fmt.Println(key + "=" + val)
|
||||
logger.L().Info(key + "=" + val)
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
@@ -7,13 +7,15 @@ import (
|
||||
"github.com/armosec/k8s-interface/k8sinterface"
|
||||
"github.com/armosec/kubescape/cautils"
|
||||
"github.com/armosec/kubescape/cautils/getter"
|
||||
"github.com/armosec/kubescape/cautils/logger"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var setCmd = &cobra.Command{
|
||||
Use: "set <key>=<value>",
|
||||
Short: "Set configuration in cluster",
|
||||
Long: ``,
|
||||
var setClusterCmd = &cobra.Command{
|
||||
Use: "set <key>=<value>",
|
||||
Short: "Set configuration in cluster",
|
||||
Long: ``,
|
||||
Deprecated: "use the 'set' command instead",
|
||||
Args: func(cmd *cobra.Command, args []string) error {
|
||||
if len(args) < 1 || len(args) > 1 {
|
||||
return fmt.Errorf("requires one argument: <key>=<value>")
|
||||
@@ -34,11 +36,11 @@ var setCmd = &cobra.Command{
|
||||
if err := clusterConfig.SetKeyValueInConfigmap(key, data); err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Println("Value added successfully.")
|
||||
logger.L().Info("value added successfully.")
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
clusterCmd.AddCommand(setCmd)
|
||||
clusterCmd.AddCommand(setClusterCmd)
|
||||
}
|
||||
|
||||
45
clihandler/cmd/completion.go
Normal file
45
clihandler/cmd/completion.go
Normal file
@@ -0,0 +1,45 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var completionCmdExamples = `
|
||||
|
||||
# Enable BASH shell autocompletion
|
||||
$ source <(kubescape completion bash)
|
||||
$ echo 'source <(kubescape completion bash)' >> ~/.bashrc
|
||||
|
||||
# Enable ZSH shell autocompletion
|
||||
$ source <(kubectl completion zsh)
|
||||
$ echo 'source <(kubectl completion zsh)' >> "${fpath[1]}/_kubectl"
|
||||
|
||||
`
|
||||
var completionCmd = &cobra.Command{
|
||||
Use: "completion [bash|zsh|fish|powershell]",
|
||||
Short: "Generate autocompletion script",
|
||||
Long: "To load completions",
|
||||
Example: completionCmdExamples,
|
||||
DisableFlagsInUseLine: true,
|
||||
ValidArgs: []string{"bash", "zsh", "fish", "powershell"},
|
||||
Args: cobra.ExactValidArgs(1),
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
switch strings.ToLower(args[0]) {
|
||||
case "bash":
|
||||
cmd.Root().GenBashCompletion(os.Stdout)
|
||||
case "zsh":
|
||||
cmd.Root().GenZshCompletion(os.Stdout)
|
||||
case "fish":
|
||||
cmd.Root().GenFishCompletion(os.Stdout, true)
|
||||
case "powershell":
|
||||
cmd.Root().GenPowerShellCompletionWithDesc(os.Stdout)
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
rootCmd.AddCommand(completionCmd)
|
||||
}
|
||||
@@ -1,18 +1,124 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/armosec/kubescape/cautils/logger"
|
||||
"github.com/armosec/kubescape/clihandler"
|
||||
"github.com/armosec/kubescape/clihandler/cliobjects"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var (
|
||||
configExample = `
|
||||
# View cached configurations
|
||||
kubescape config view
|
||||
|
||||
# Delete cached configurations
|
||||
kubescape config delete
|
||||
|
||||
# Set cached configurations
|
||||
kubescape config set --help
|
||||
`
|
||||
setConfigExample = `
|
||||
# Set account id
|
||||
kubescape config set accountID <account id>
|
||||
|
||||
# Set client id
|
||||
kubescape config set clientID <client id>
|
||||
|
||||
# Set access key
|
||||
kubescape config set secretKey <access key>
|
||||
`
|
||||
)
|
||||
|
||||
// configCmd represents the config command
|
||||
var configCmd = &cobra.Command{
|
||||
Use: "config",
|
||||
Short: "Set configuration",
|
||||
Use: "config",
|
||||
Short: "handle cached configurations",
|
||||
Example: configExample,
|
||||
}
|
||||
|
||||
var setConfig = cliobjects.SetConfig{}
|
||||
|
||||
// configCmd represents the config command
|
||||
var configSetCmd = &cobra.Command{
|
||||
Use: "set",
|
||||
Short: fmt.Sprintf("Set configurations, supported: %s", strings.Join(stringKeysToSlice(supportConfigSet), "/")),
|
||||
Example: setConfigExample,
|
||||
ValidArgs: stringKeysToSlice(supportConfigSet),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
if err := parseSetArgs(args); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := clihandler.CliSetConfig(&setConfig); err != nil {
|
||||
logger.L().Fatal(err.Error())
|
||||
}
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
var supportConfigSet = map[string]func(*cliobjects.SetConfig, string){
|
||||
"accountID": func(s *cliobjects.SetConfig, account string) { s.Account = account },
|
||||
"clientID": func(s *cliobjects.SetConfig, clientID string) { s.ClientID = clientID },
|
||||
"secretKey": func(s *cliobjects.SetConfig, secretKey string) { s.SecretKey = secretKey },
|
||||
}
|
||||
|
||||
func stringKeysToSlice(m map[string]func(*cliobjects.SetConfig, string)) []string {
|
||||
l := []string{}
|
||||
for i := range m {
|
||||
l = append(l, i)
|
||||
}
|
||||
return l
|
||||
}
|
||||
|
||||
func parseSetArgs(args []string) error {
|
||||
var key string
|
||||
var value string
|
||||
if len(args) == 1 {
|
||||
if keyValue := strings.Split(args[0], "="); len(keyValue) == 2 {
|
||||
key = keyValue[0]
|
||||
value = keyValue[1]
|
||||
}
|
||||
} else if len(args) == 2 {
|
||||
key = args[0]
|
||||
value = args[1]
|
||||
}
|
||||
if setConfigFunc, ok := supportConfigSet[key]; ok {
|
||||
setConfigFunc(&setConfig, value)
|
||||
} else {
|
||||
return fmt.Errorf("key '%s' unknown . supported: %s", key, strings.Join(stringKeysToSlice(supportConfigSet), "/"))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
var configDeleteCmd = &cobra.Command{
|
||||
Use: "delete",
|
||||
Short: "Delete cached configurations",
|
||||
Long: ``,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
if err := clihandler.CliDelete(); err != nil {
|
||||
logger.L().Fatal(err.Error())
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
// configCmd represents the config command
|
||||
var configViewCmd = &cobra.Command{
|
||||
Use: "view",
|
||||
Short: "View cached configurations",
|
||||
Long: ``,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
if err := clihandler.CliView(); err != nil {
|
||||
logger.L().Fatal(err.Error())
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
rootCmd.AddCommand(configCmd)
|
||||
configCmd.AddCommand(configSetCmd)
|
||||
configCmd.AddCommand(configDeleteCmd)
|
||||
configCmd.AddCommand(configViewCmd)
|
||||
}
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/armosec/kubescape/cautils"
|
||||
"github.com/armosec/kubescape/cautils/logger"
|
||||
"github.com/armosec/kubescape/clihandler"
|
||||
"github.com/armosec/opa-utils/reporthandling"
|
||||
"github.com/spf13/cobra"
|
||||
@@ -39,8 +40,10 @@ var controlCmd = &cobra.Command{
|
||||
if len(args) > 0 {
|
||||
controls := strings.Split(args[0], ",")
|
||||
if len(controls) > 1 {
|
||||
if controls[1] == "" {
|
||||
return fmt.Errorf("usage: <control-0>,<control-1>")
|
||||
for _, control := range controls {
|
||||
if control == "" {
|
||||
return fmt.Errorf("usage: <control-0>,<control-1>")
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
@@ -53,7 +56,6 @@ var controlCmd = &cobra.Command{
|
||||
scanInfo.PolicyIdentifier = []reporthandling.PolicyIdentifier{}
|
||||
|
||||
if len(args) == 0 {
|
||||
// scanInfo.SetPolicyIdentifiers(getter.NativeFrameworks, reporthandling.KindFramework)
|
||||
scanInfo.ScanAll = true
|
||||
} else { // expected control or list of control sepparated by ","
|
||||
|
||||
@@ -79,12 +81,14 @@ var controlCmd = &cobra.Command{
|
||||
}
|
||||
|
||||
scanInfo.FrameworkScan = false
|
||||
scanInfo.Init()
|
||||
cautils.SetSilentMode(scanInfo.Silent)
|
||||
err := clihandler.ScanCliSetup(&scanInfo)
|
||||
|
||||
results, err := clihandler.Scan(&scanInfo)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "error: %v\n", err)
|
||||
os.Exit(1)
|
||||
logger.L().Fatal(err.Error())
|
||||
}
|
||||
results.HandleResults()
|
||||
if results.GetRiskScore() > float32(scanInfo.FailThreshold) {
|
||||
return fmt.Errorf("scan risk-score %.2f is above permitted threshold %.2f", results.GetRiskScore(), scanInfo.FailThreshold)
|
||||
}
|
||||
return nil
|
||||
},
|
||||
@@ -97,8 +101,7 @@ func init() {
|
||||
|
||||
func flagValidationControl() {
|
||||
if 100 < scanInfo.FailThreshold {
|
||||
fmt.Println("bad argument: out of range threshold")
|
||||
os.Exit(1)
|
||||
logger.L().Fatal("bad argument: out of range threshold")
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
57
clihandler/cmd/delete.go
Normal file
57
clihandler/cmd/delete.go
Normal file
@@ -0,0 +1,57 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/armosec/kubescape/cautils/logger"
|
||||
"github.com/armosec/kubescape/clihandler"
|
||||
"github.com/armosec/kubescape/clihandler/cliobjects"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var deleteInfo cliobjects.Delete
|
||||
|
||||
var deleteExceptionsExamples = `
|
||||
# Delete single exception
|
||||
kubescape delete exceptions "exception name"
|
||||
|
||||
# Delete multiple exceptions
|
||||
kubescape delete exceptions "first exception;second exception;third exception"
|
||||
`
|
||||
|
||||
var deleteCmd = &cobra.Command{
|
||||
Use: "delete <command>",
|
||||
Short: "Delete configurations in Kubescape SaaS version",
|
||||
Long: ``,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
},
|
||||
}
|
||||
|
||||
var deleteExceptionsCmd = &cobra.Command{
|
||||
Use: "exceptions <exception name>",
|
||||
Short: "Delete exceptions from Kubescape SaaS version. Run 'kubescape list exceptions' for all exceptions names",
|
||||
Example: deleteExceptionsExamples,
|
||||
Args: func(cmd *cobra.Command, args []string) error {
|
||||
if len(args) != 1 {
|
||||
return fmt.Errorf("missing exceptions names")
|
||||
}
|
||||
return nil
|
||||
},
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
exceptionsNames := strings.Split(args[0], ";")
|
||||
if len(exceptionsNames) == 0 {
|
||||
logger.L().Fatal("missing exceptions names")
|
||||
}
|
||||
if err := clihandler.DeleteExceptions(deleteInfo.Account, exceptionsNames); err != nil {
|
||||
logger.L().Fatal(err.Error())
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
deleteCmd.PersistentFlags().StringVarP(&deleteInfo.Account, "account", "", "", "Armo portal account ID. Default will load account ID from configMap or config file")
|
||||
rootCmd.AddCommand(deleteCmd)
|
||||
|
||||
deleteCmd.AddCommand(deleteExceptionsCmd)
|
||||
}
|
||||
@@ -2,10 +2,11 @@ package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/armosec/kubescape/cautils"
|
||||
"github.com/armosec/kubescape/cautils/logger"
|
||||
"github.com/armosec/kubescape/clihandler"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
@@ -21,7 +22,7 @@ var (
|
||||
kubescape download artifacts --output /tmp
|
||||
|
||||
# Download the NSA framework. Run 'kubescape list frameworks' for all frameworks names
|
||||
kubescape download frameworks nsa
|
||||
kubescape download framework nsa
|
||||
|
||||
# Download the "Allowed hostPath" control. Run 'kubescape list controls' for all controls names
|
||||
kubescape download control "Allowed hostPath"
|
||||
@@ -58,18 +59,23 @@ var downloadCmd = &cobra.Command{
|
||||
downloadInfo.Name = args[1]
|
||||
}
|
||||
if err := clihandler.CliDownload(&downloadInfo); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "error: %v\n", err)
|
||||
os.Exit(1)
|
||||
logger.L().Fatal(err.Error())
|
||||
}
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
// cobra.OnInitialize(initConfig)
|
||||
cobra.OnInitialize(initDownload)
|
||||
|
||||
rootCmd.AddCommand(downloadCmd)
|
||||
downloadCmd.Flags().StringVarP(&downloadInfo.Path, "output", "o", "", "Output file. If not specified, will save in `~/.kubescape/<policy name>.json`")
|
||||
downloadCmd.PersistentFlags().StringVarP(&downloadInfo.Account, "account", "", "", "Armo portal account ID. Default will load account ID from configMap or config file")
|
||||
downloadCmd.Flags().StringVarP(&downloadInfo.Path, "output", "o", "", "Output file. If not specified, will save in `~/.kubescape/<policy name>.json`")
|
||||
|
||||
}
|
||||
|
||||
func initDownload() {
|
||||
if filepath.Ext(downloadInfo.Path) == ".json" {
|
||||
downloadInfo.Path, downloadInfo.FileName = filepath.Split(downloadInfo.Path)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/armosec/kubescape/cautils"
|
||||
"github.com/armosec/kubescape/cautils/logger"
|
||||
"github.com/armosec/kubescape/clihandler"
|
||||
"github.com/armosec/opa-utils/reporthandling"
|
||||
"github.com/spf13/cobra"
|
||||
@@ -15,7 +16,7 @@ import (
|
||||
var (
|
||||
frameworkExample = `
|
||||
# Scan all frameworks and submit the results
|
||||
kubescape scan --submit
|
||||
kubescape scan framework all --submit
|
||||
|
||||
# Scan the NSA framework
|
||||
kubescape scan framework nsa
|
||||
@@ -29,15 +30,6 @@ var (
|
||||
# Scan kubernetes YAML manifest files
|
||||
kubescape scan framework nsa *.yaml
|
||||
|
||||
# Scan and save the results in the JSON format
|
||||
kubescape scan --format json --output results.json
|
||||
|
||||
# Save scan results in JSON format
|
||||
kubescape scan --format json --output results.json
|
||||
|
||||
# Display all resources
|
||||
kubescape scan --verbose
|
||||
|
||||
Run 'kubescape list frameworks' for the list of supported frameworks
|
||||
`
|
||||
)
|
||||
@@ -46,13 +38,14 @@ var frameworkCmd = &cobra.Command{
|
||||
Short: "The framework you wish to use. Run 'kubescape list frameworks' for the list of supported frameworks",
|
||||
Example: frameworkExample,
|
||||
Long: "Execute a scan on a running Kubernetes cluster or `yaml`/`json` files (use glob) or `-` for stdin",
|
||||
// ValidArgs: getter.NativeFrameworks,
|
||||
Args: func(cmd *cobra.Command, args []string) error {
|
||||
if len(args) > 0 {
|
||||
frameworks := strings.Split(args[0], ",")
|
||||
if len(frameworks) > 1 {
|
||||
if frameworks[1] == "" {
|
||||
return fmt.Errorf("usage: <framework-0>,<framework-1>")
|
||||
for _, framework := range frameworks {
|
||||
if framework == "" {
|
||||
return fmt.Errorf("usage: <framework-0>,<framework-1>")
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
@@ -65,7 +58,6 @@ var frameworkCmd = &cobra.Command{
|
||||
var frameworks []string
|
||||
|
||||
if len(args) == 0 { // scan all frameworks
|
||||
// frameworks = getter.NativeFrameworks
|
||||
scanInfo.ScanAll = true
|
||||
} else {
|
||||
// Read frameworks from input args
|
||||
@@ -95,12 +87,13 @@ var frameworkCmd = &cobra.Command{
|
||||
|
||||
scanInfo.SetPolicyIdentifiers(frameworks, reporthandling.KindFramework)
|
||||
|
||||
scanInfo.Init()
|
||||
cautils.SetSilentMode(scanInfo.Silent)
|
||||
err := clihandler.ScanCliSetup(&scanInfo)
|
||||
results, err := clihandler.Scan(&scanInfo)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "error: %v\n\n", err)
|
||||
os.Exit(1)
|
||||
logger.L().Fatal(err.Error())
|
||||
}
|
||||
results.HandleResults()
|
||||
if results.GetRiskScore() > float32(scanInfo.FailThreshold) {
|
||||
return fmt.Errorf("scan risk-score %.2f is above permitted threshold %.2f", results.GetRiskScore(), scanInfo.FailThreshold)
|
||||
}
|
||||
return nil
|
||||
},
|
||||
@@ -122,11 +115,9 @@ func init() {
|
||||
|
||||
func flagValidationFramework() {
|
||||
if scanInfo.Submit && scanInfo.Local {
|
||||
fmt.Println("You can use `keep-local` or `submit`, but not both")
|
||||
os.Exit(1)
|
||||
logger.L().Fatal("you can use `keep-local` or `submit`, but not both")
|
||||
}
|
||||
if 100 < scanInfo.FailThreshold {
|
||||
fmt.Println("bad argument: out of range threshold")
|
||||
os.Exit(1)
|
||||
logger.L().Fatal("bad argument: out of range threshold")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2,11 +2,12 @@ package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/armosec/kubescape/cautils"
|
||||
"github.com/armosec/kubescape/cautils/logger"
|
||||
"github.com/armosec/kubescape/clihandler"
|
||||
"github.com/armosec/kubescape/clihandler/cliobjects"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
@@ -28,7 +29,7 @@ var (
|
||||
https://hub.armo.cloud/docs/controls
|
||||
`
|
||||
)
|
||||
var listPolicies = cautils.ListPolicies{}
|
||||
var listPolicies = cliobjects.ListPolicies{}
|
||||
|
||||
var listCmd = &cobra.Command{
|
||||
Use: "list <policy> [flags]",
|
||||
@@ -50,8 +51,7 @@ var listCmd = &cobra.Command{
|
||||
listPolicies.Target = args[0]
|
||||
|
||||
if err := clihandler.CliList(&listPolicies); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "error: %v\n", err)
|
||||
os.Exit(1)
|
||||
logger.L().Fatal(err.Error())
|
||||
}
|
||||
return nil
|
||||
},
|
||||
@@ -61,6 +61,7 @@ func init() {
|
||||
// cobra.OnInitialize(initConfig)
|
||||
|
||||
rootCmd.AddCommand(listCmd)
|
||||
listCmd.PersistentFlags().StringVarP(&listPolicies.Account, "account", "", "", "Armo portal account ID. Default will load account ID from configMap or config file")
|
||||
listCmd.PersistentFlags().StringVar(&listPolicies.Account, "account", "", "Armo portal account ID. Default will load account ID from configMap or config file")
|
||||
listCmd.PersistentFlags().StringVar(&listPolicies.Format, "format", "pretty-print", "output format. supported: 'pretty-printer'/'json'")
|
||||
listCmd.PersistentFlags().BoolVarP(&listPolicies.ListIDs, "id", "", false, "List control ID's instead of controls names")
|
||||
}
|
||||
|
||||
@@ -5,9 +5,10 @@ import (
|
||||
)
|
||||
|
||||
var localCmd = &cobra.Command{
|
||||
Use: "local",
|
||||
Short: "Set configuration locally (for config.json)",
|
||||
Long: ``,
|
||||
Use: "local",
|
||||
Short: "Set configuration locally (for config.json)",
|
||||
Long: ``,
|
||||
Deprecated: "use the 'set' command instead",
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
},
|
||||
}
|
||||
|
||||
@@ -9,9 +9,10 @@ import (
|
||||
)
|
||||
|
||||
var localGetCmd = &cobra.Command{
|
||||
Use: "get <key>",
|
||||
Short: "Get configuration locally",
|
||||
Long: ``,
|
||||
Use: "get <key>",
|
||||
Short: "Get configuration locally",
|
||||
Long: ``,
|
||||
Deprecated: "use the 'view' command instead",
|
||||
Args: func(cmd *cobra.Command, args []string) error {
|
||||
if len(args) < 1 || len(args) > 1 {
|
||||
return fmt.Errorf("requires one argument")
|
||||
@@ -30,8 +31,7 @@ var localGetCmd = &cobra.Command{
|
||||
val, err := cautils.GetValueFromConfigJson(key)
|
||||
if err != nil {
|
||||
if err.Error() == "value does not exist." {
|
||||
fmt.Printf("Could net get value from: %s, reason: %s\n", cautils.ConfigFileFullPath(), err)
|
||||
return nil
|
||||
return fmt.Errorf("failed to get value from: %s, reason: %s", cautils.ConfigFileFullPath(), err.Error())
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -9,9 +9,10 @@ import (
|
||||
)
|
||||
|
||||
var localSetCmd = &cobra.Command{
|
||||
Use: "set <key>=<value>",
|
||||
Short: "Set configuration locally",
|
||||
Long: ``,
|
||||
Use: "set <key>=<value>",
|
||||
Short: "Set configuration locally",
|
||||
Long: ``,
|
||||
Deprecated: "use the 'set' command instead",
|
||||
Args: func(cmd *cobra.Command, args []string) error {
|
||||
if len(args) < 1 || len(args) > 1 {
|
||||
return fmt.Errorf("requires one argument: <key>=<value>")
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/armosec/k8s-interface/k8sinterface"
|
||||
"github.com/armosec/kubescape/cautils"
|
||||
"github.com/armosec/kubescape/cautils/getter"
|
||||
"github.com/armosec/kubescape/cautils/logger"
|
||||
"github.com/armosec/kubescape/cautils/logger/helpers"
|
||||
"github.com/armosec/kubescape/clihandler"
|
||||
"github.com/armosec/kubescape/clihandler/cliinterfaces"
|
||||
reporterv1 "github.com/armosec/kubescape/resultshandling/reporter/v1"
|
||||
@@ -23,13 +23,13 @@ var rabcCmd = &cobra.Command{
|
||||
k8s := k8sinterface.NewKubernetesApi()
|
||||
|
||||
// get config
|
||||
clusterConfig, err := getSubmittedClusterConfig(k8s)
|
||||
if err != nil {
|
||||
return err
|
||||
clusterConfig := getTenantConfig(submitInfo.Account, "", k8s)
|
||||
if err := clusterConfig.SetTenant(); err != nil {
|
||||
logger.L().Error("failed setting account ID", helpers.Error(err))
|
||||
}
|
||||
|
||||
// list RBAC
|
||||
rbacObjects := cautils.NewRBACObjects(rbacscanner.NewRbacScannerFromK8sAPI(k8s, clusterConfig.GetCustomerGUID(), clusterConfig.GetClusterName()))
|
||||
rbacObjects := cautils.NewRBACObjects(rbacscanner.NewRbacScannerFromK8sAPI(k8s, clusterConfig.GetAccountID(), clusterConfig.GetClusterName()))
|
||||
|
||||
// submit resources
|
||||
r := reporterv1.NewReportEventReceiver(clusterConfig.GetConfigObj())
|
||||
@@ -41,8 +41,7 @@ var rabcCmd = &cobra.Command{
|
||||
}
|
||||
|
||||
if err := clihandler.Submit(submitInterfaces); err != nil {
|
||||
fmt.Println(err)
|
||||
os.Exit(1)
|
||||
logger.L().Fatal(err.Error())
|
||||
}
|
||||
return nil
|
||||
},
|
||||
@@ -51,3 +50,17 @@ var rabcCmd = &cobra.Command{
|
||||
func init() {
|
||||
submitCmd.AddCommand(rabcCmd)
|
||||
}
|
||||
|
||||
// getKubernetesApi
|
||||
func getKubernetesApi() *k8sinterface.KubernetesApi {
|
||||
if !k8sinterface.IsConnectedToCluster() {
|
||||
return nil
|
||||
}
|
||||
return k8sinterface.NewKubernetesApi()
|
||||
}
|
||||
func getTenantConfig(Account, clusterName string, k8s *k8sinterface.KubernetesApi) cautils.ITenantConfig {
|
||||
if !k8sinterface.IsConnectedToCluster() || k8s == nil {
|
||||
return cautils.NewLocalConfig(getter.GetArmoAPIConnector(), Account, clusterName)
|
||||
}
|
||||
return cautils.NewClusterConfig(k8s, getter.GetArmoAPIConnector(), Account, clusterName)
|
||||
}
|
||||
|
||||
@@ -6,16 +6,21 @@ import (
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/armosec/k8s-interface/k8sinterface"
|
||||
"github.com/armosec/k8s-interface/workloadinterface"
|
||||
"github.com/armosec/kubescape/cautils/logger"
|
||||
"github.com/armosec/kubescape/cautils/logger/helpers"
|
||||
"github.com/armosec/kubescape/clihandler"
|
||||
"github.com/armosec/kubescape/clihandler/cliinterfaces"
|
||||
"github.com/armosec/kubescape/resultshandling/reporter"
|
||||
reporterv1 "github.com/armosec/kubescape/resultshandling/reporter/v1"
|
||||
reporterv2 "github.com/armosec/kubescape/resultshandling/reporter/v2"
|
||||
"github.com/armosec/opa-utils/reporthandling"
|
||||
uuid "github.com/satori/go.uuid"
|
||||
"github.com/google/uuid"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var formatVersion string
|
||||
|
||||
type ResultsObject struct {
|
||||
filePath string
|
||||
customerGUID string
|
||||
@@ -36,10 +41,9 @@ func (resultsObject *ResultsObject) SetResourcesReport() (*reporthandling.Postur
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &reporthandling.PostureReport{
|
||||
FrameworkReports: frameworkReports,
|
||||
ReportID: uuid.NewV4().String(),
|
||||
ReportID: uuid.NewString(),
|
||||
ReportGenerationTime: time.Now().UTC(),
|
||||
CustomerGUID: resultsObject.customerGUID,
|
||||
ClusterName: resultsObject.clusterName,
|
||||
@@ -51,7 +55,7 @@ func (resultsObject *ResultsObject) ListAllResources() (map[string]workloadinter
|
||||
}
|
||||
|
||||
var resultsCmd = &cobra.Command{
|
||||
Use: "results <json file>\nExample:\n$ kubescape submit results path/to/results.json",
|
||||
Use: "results <json file>\nExample:\n$ kubescape submit results path/to/results.json --format-version v2",
|
||||
Short: "Submit a pre scanned results file. The file must be in json format",
|
||||
Long: ``,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
@@ -59,18 +63,25 @@ var resultsCmd = &cobra.Command{
|
||||
return fmt.Errorf("missing results file")
|
||||
}
|
||||
|
||||
k8s := k8sinterface.NewKubernetesApi()
|
||||
k8s := getKubernetesApi()
|
||||
|
||||
// get config
|
||||
clusterConfig, err := getSubmittedClusterConfig(k8s)
|
||||
if err != nil {
|
||||
return err
|
||||
clusterConfig := getTenantConfig(submitInfo.Account, "", k8s)
|
||||
if err := clusterConfig.SetTenant(); err != nil {
|
||||
logger.L().Error("failed setting account ID", helpers.Error(err))
|
||||
}
|
||||
|
||||
resultsObjects := NewResultsObject(clusterConfig.GetCustomerGUID(), clusterConfig.GetClusterName(), args[0])
|
||||
resultsObjects := NewResultsObject(clusterConfig.GetAccountID(), clusterConfig.GetClusterName(), args[0])
|
||||
|
||||
// submit resources
|
||||
r := reporterv1.NewReportEventReceiver(clusterConfig.GetConfigObj())
|
||||
var r reporter.IReport
|
||||
switch formatVersion {
|
||||
case "v2":
|
||||
r = reporterv2.NewReportEventReceiver(clusterConfig.GetConfigObj(), "")
|
||||
default:
|
||||
logger.L().Warning("Deprecated results version. run with '--format-version' flag", helpers.String("your version", formatVersion), helpers.String("latest version", "v2"))
|
||||
r = reporterv1.NewReportEventReceiver(clusterConfig.GetConfigObj())
|
||||
}
|
||||
|
||||
submitInterfaces := cliinterfaces.SubmitInterfaces{
|
||||
ClusterConfig: clusterConfig,
|
||||
@@ -79,8 +90,7 @@ var resultsCmd = &cobra.Command{
|
||||
}
|
||||
|
||||
if err := clihandler.Submit(submitInterfaces); err != nil {
|
||||
fmt.Println(err)
|
||||
os.Exit(1)
|
||||
logger.L().Fatal(err.Error())
|
||||
}
|
||||
return nil
|
||||
},
|
||||
@@ -88,6 +98,7 @@ var resultsCmd = &cobra.Command{
|
||||
|
||||
func init() {
|
||||
submitCmd.AddCommand(resultsCmd)
|
||||
resultsCmd.PersistentFlags().StringVar(&formatVersion, "format-version", "v1", "Output object can be differnet between versions, this is for maintaining backward and forward compatibility. Supported:'v1'/'v2'")
|
||||
}
|
||||
|
||||
func loadResultsFromFile(filePath string) ([]reporthandling.FrameworkReport, error) {
|
||||
|
||||
@@ -1,29 +1,43 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/armosec/kubescape/cautils"
|
||||
"github.com/armosec/kubescape/cautils/getter"
|
||||
"github.com/golang/glog"
|
||||
"github.com/armosec/kubescape/cautils/logger"
|
||||
"github.com/armosec/kubescape/cautils/logger/helpers"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var cfgFile string
|
||||
var armoBEURLs = ""
|
||||
var armoBEURLsDep = ""
|
||||
var rootInfo cautils.RootInfo
|
||||
|
||||
const envFlagUsage = "Send report results to specific URL. Format:<ReportReceiver>,<Backend>,<Frontend>.\n\t\tExample:report.armo.cloud,api.armo.cloud,portal.armo.cloud"
|
||||
|
||||
var ksExamples = `
|
||||
# Scan command
|
||||
kubescape scan --submit
|
||||
|
||||
# List supported frameworks
|
||||
kubescape list frameworks
|
||||
|
||||
# Download artifacts (air-gapped environment support)
|
||||
kubescape download artifacts
|
||||
|
||||
# View cached configurations
|
||||
kubescape config view
|
||||
`
|
||||
|
||||
var rootCmd = &cobra.Command{
|
||||
Use: "kubescape",
|
||||
Short: "Kubescape is a tool for testing Kubernetes security posture",
|
||||
Long: `Kubescape is a tool for testing Kubernetes security posture based on NSA \ MITRE ATT&CK® specifications.`,
|
||||
PersistentPreRunE: func(cmd *cobra.Command, args []string) error {
|
||||
flag.Parse()
|
||||
InitArmoBEConnector()
|
||||
return nil
|
||||
},
|
||||
Use: "kubescape",
|
||||
Version: cautils.BuildNumber,
|
||||
Short: "Kubescape is a tool for testing Kubernetes security posture",
|
||||
Long: `Based on NSA \ MITRE ATT&CK® and other frameworks specifications`,
|
||||
Example: ksExamples,
|
||||
}
|
||||
|
||||
func Execute() {
|
||||
@@ -31,34 +45,80 @@ func Execute() {
|
||||
}
|
||||
|
||||
func init() {
|
||||
rootCmd.PersistentFlags().StringVarP(&scanInfo.Account, "account", "", "", "Armo portal account ID. Default will load account ID from configMap or config file")
|
||||
flag.CommandLine.StringVar(&armoBEURLs, "environment", "", envFlagUsage)
|
||||
rootCmd.PersistentFlags().StringVar(&armoBEURLs, "environment", "", envFlagUsage)
|
||||
|
||||
cobra.OnInitialize(initLogger, initLoggerLevel, initEnvironment, initCacheDir)
|
||||
|
||||
rootCmd.PersistentFlags().StringVar(&armoBEURLsDep, "environment", "", envFlagUsage)
|
||||
rootCmd.PersistentFlags().StringVar(&armoBEURLs, "env", "", envFlagUsage)
|
||||
rootCmd.PersistentFlags().MarkDeprecated("environment", "use 'env' instead")
|
||||
rootCmd.PersistentFlags().MarkHidden("environment")
|
||||
rootCmd.PersistentFlags().MarkHidden("env")
|
||||
|
||||
rootCmd.PersistentFlags().StringVarP(&rootInfo.Logger, "logger", "l", helpers.InfoLevel.String(), fmt.Sprintf("Logger level. Supported: %s [$KS_LOGGER]", strings.Join(helpers.SupportedLevels(), "/")))
|
||||
rootCmd.PersistentFlags().StringVar(&rootInfo.CacheDir, "cache-dir", getter.DefaultLocalStore, "Cache directory [$KS_CACHE_DIR]")
|
||||
rootCmd.PersistentFlags().BoolVarP(&rootInfo.DisableColor, "disable-color", "", false, "Disable Color output for logging")
|
||||
|
||||
}
|
||||
|
||||
func InitArmoBEConnector() {
|
||||
func initLogger() {
|
||||
if l := os.Getenv("KS_LOGGER_NAME"); l != "" {
|
||||
logger.InitializeLogger(l)
|
||||
}
|
||||
}
|
||||
func initLoggerLevel() {
|
||||
if rootInfo.Logger != helpers.InfoLevel.String() {
|
||||
} else if l := os.Getenv("KS_LOGGER"); l != "" {
|
||||
rootInfo.Logger = l
|
||||
}
|
||||
|
||||
logger.L().DisableColor(rootInfo.DisableColor)
|
||||
|
||||
if err := logger.L().SetLevel(rootInfo.Logger); err != nil {
|
||||
logger.L().Fatal(fmt.Sprintf("supported levels: %s", strings.Join(helpers.SupportedLevels(), "/")), helpers.Error(err))
|
||||
}
|
||||
}
|
||||
|
||||
func initCacheDir() {
|
||||
if rootInfo.CacheDir != getter.DefaultLocalStore {
|
||||
getter.DefaultLocalStore = rootInfo.CacheDir
|
||||
} else if cacheDir := os.Getenv("KS_CACHE_DIR"); cacheDir != "" {
|
||||
getter.DefaultLocalStore = cacheDir
|
||||
} else {
|
||||
return // using default cache di location
|
||||
}
|
||||
|
||||
logger.L().Debug("cache dir updated", helpers.String("path", getter.DefaultLocalStore))
|
||||
}
|
||||
func initEnvironment() {
|
||||
if armoBEURLsDep != "" {
|
||||
armoBEURLs = armoBEURLsDep
|
||||
}
|
||||
urlSlices := strings.Split(armoBEURLs, ",")
|
||||
if len(urlSlices) > 3 {
|
||||
glog.Errorf("Too many URLs")
|
||||
os.Exit(1)
|
||||
if len(urlSlices) != 1 && len(urlSlices) < 3 {
|
||||
logger.L().Fatal("expected at least 3 URLs (report, api, frontend, auth)")
|
||||
}
|
||||
switch len(urlSlices) {
|
||||
case 1:
|
||||
switch urlSlices[0] {
|
||||
case "dev":
|
||||
case "dev", "development":
|
||||
getter.SetARMOAPIConnector(getter.NewARMOAPIDev())
|
||||
case "stage", "staging":
|
||||
getter.SetARMOAPIConnector(getter.NewARMOAPIStaging())
|
||||
case "":
|
||||
getter.SetARMOAPIConnector(getter.NewARMOAPIProd())
|
||||
default:
|
||||
glog.Errorf("--environment flag usage: %s", envFlagUsage)
|
||||
os.Exit(1)
|
||||
logger.L().Fatal("--environment flag usage: " + envFlagUsage)
|
||||
}
|
||||
case 2:
|
||||
glog.Errorf("--environment flag usage: %s", envFlagUsage)
|
||||
os.Exit(1)
|
||||
case 3:
|
||||
getter.SetARMOAPIConnector(getter.NewARMOAPICustomized(urlSlices[0], urlSlices[1], urlSlices[2]))
|
||||
logger.L().Fatal("--environment flag usage: " + envFlagUsage)
|
||||
case 3, 4:
|
||||
var armoAUTHURL string
|
||||
armoERURL := urlSlices[0] // mandatory
|
||||
armoBEURL := urlSlices[1] // mandatory
|
||||
armoFEURL := urlSlices[2] // mandatory
|
||||
if len(urlSlices) <= 4 {
|
||||
armoAUTHURL = urlSlices[3]
|
||||
}
|
||||
getter.SetARMOAPIConnector(getter.NewARMOAPICustomized(armoERURL, armoBEURL, armoFEURL, armoAUTHURL))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,9 +1,6 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/armosec/k8s-interface/k8sinterface"
|
||||
"github.com/armosec/kubescape/cautils"
|
||||
"github.com/spf13/cobra"
|
||||
@@ -11,53 +8,88 @@ import (
|
||||
|
||||
var scanInfo cautils.ScanInfo
|
||||
|
||||
var scanCmdExamples = `
|
||||
Scan command is for scanning an existing cluster or kubernetes manifest files based on pre-defind frameworks
|
||||
|
||||
# Scan current cluster with all frameworks
|
||||
kubescape scan --submit --enable-host-scan
|
||||
|
||||
# Scan kubernetes YAML manifest files
|
||||
kubescape scan *.yaml
|
||||
|
||||
# Scan and save the results in the JSON format
|
||||
kubescape scan --format json --output results.json
|
||||
|
||||
# Display all resources
|
||||
kubescape scan --verbose
|
||||
|
||||
# Scan different clusters from the kubectl context
|
||||
kubescape scan --kube-context <kubernetes context>
|
||||
|
||||
`
|
||||
|
||||
// scanCmd represents the scan command
|
||||
var scanCmd = &cobra.Command{
|
||||
Use: "scan <command>",
|
||||
Short: "Scan the current running cluster or yaml files",
|
||||
Long: `The action you want to perform`,
|
||||
Use: "scan",
|
||||
Short: "Scan the current running cluster or yaml files",
|
||||
Long: `The action you want to perform`,
|
||||
Example: scanCmdExamples,
|
||||
Args: func(cmd *cobra.Command, args []string) error {
|
||||
if len(args) > 0 {
|
||||
if !strings.EqualFold(args[0], "framework") && !strings.EqualFold(args[0], "control") {
|
||||
return fmt.Errorf("invalid parameter '%s'. Supported parameters: framework, control", args[0])
|
||||
if args[0] != "framework" && args[0] != "control" {
|
||||
scanInfo.ScanAll = true
|
||||
return frameworkCmd.RunE(cmd, append([]string{"all"}, args...))
|
||||
}
|
||||
}
|
||||
return nil
|
||||
},
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
if len(args) == 0 {
|
||||
scanInfo.ScanAll = true
|
||||
// frameworks := getter.NativeFrameworks
|
||||
// frameworkArgs := []string{strings.Join(frameworks, ",")}
|
||||
frameworkCmd.RunE(cmd, []string{"all"})
|
||||
return frameworkCmd.RunE(cmd, []string{"all"})
|
||||
}
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
func frameworkInitConfig() {
|
||||
k8sinterface.SetClusterContextName(scanInfo.KubeContext)
|
||||
}
|
||||
|
||||
func init() {
|
||||
|
||||
cobra.OnInitialize(frameworkInitConfig)
|
||||
|
||||
rootCmd.AddCommand(scanCmd)
|
||||
rootCmd.PersistentFlags().StringVarP(&scanInfo.KubeContext, "--kube-context", "", "", "Kube context. Default will use the current-context")
|
||||
|
||||
scanCmd.PersistentFlags().StringVarP(&scanInfo.Account, "account", "", "", "ARMO portal account ID. Default will load account ID from configMap or config file")
|
||||
scanCmd.PersistentFlags().StringVarP(&scanInfo.KubeContext, "kube-context", "", "", "Kube context. Default will use the current-context")
|
||||
scanCmd.PersistentFlags().StringVar(&scanInfo.ControlsInputs, "controls-config", "", "Path to an controls-config obj. If not set will download controls-config from ARMO management portal")
|
||||
scanCmd.PersistentFlags().StringVar(&scanInfo.UseExceptions, "exceptions", "", "Path to an exceptions obj. If not set will download exceptions from ARMO management portal")
|
||||
scanCmd.PersistentFlags().StringVar(&scanInfo.UseArtifactsFrom, "use-artifacts-from", "", "Load artifacts from local directory. If not used will download them")
|
||||
scanCmd.PersistentFlags().StringVarP(&scanInfo.ExcludedNamespaces, "exclude-namespaces", "e", "", "Namespaces to exclude from scanning. Recommended: kube-system,kube-public")
|
||||
scanCmd.PersistentFlags().Uint16VarP(&scanInfo.FailThreshold, "fail-threshold", "t", 100, "Failure threshold is the percent above which the command fails and returns exit code 1")
|
||||
scanCmd.PersistentFlags().StringVarP(&scanInfo.Format, "format", "f", "pretty-printer", `Output format. Supported formats: "pretty-printer"/"json"/"junit"/"prometheus"`)
|
||||
scanCmd.PersistentFlags().Float32VarP(&scanInfo.FailThreshold, "fail-threshold", "t", 100, "Failure threshold is the percent above which the command fails and returns exit code 1")
|
||||
scanCmd.PersistentFlags().StringVarP(&scanInfo.Format, "format", "f", "pretty-printer", `Output format. Supported formats: "pretty-printer","json","junit","prometheus","pdf"`)
|
||||
scanCmd.PersistentFlags().StringVar(&scanInfo.IncludeNamespaces, "include-namespaces", "", "scan specific namespaces. e.g: --include-namespaces ns-a,ns-b")
|
||||
scanCmd.PersistentFlags().BoolVarP(&scanInfo.Local, "keep-local", "", false, "If you do not want your Kubescape results reported to Armo backend. Use this flag if you ran with the '--submit' flag in the past and you do not want to submit your current scan results")
|
||||
scanCmd.PersistentFlags().BoolVarP(&scanInfo.Local, "keep-local", "", false, "If you do not want your Kubescape results reported to ARMO backend. Use this flag if you ran with the '--submit' flag in the past and you do not want to submit your current scan results")
|
||||
scanCmd.PersistentFlags().StringVarP(&scanInfo.Output, "output", "o", "", "Output file. Print output to file and not stdout")
|
||||
scanCmd.PersistentFlags().BoolVar(&scanInfo.VerboseMode, "verbose", false, "Display all of the input resources and not only failed resources")
|
||||
scanCmd.PersistentFlags().BoolVar(&scanInfo.UseDefault, "use-default", false, "Load local policy object from default path. If not used will download latest")
|
||||
scanCmd.PersistentFlags().StringSliceVar(&scanInfo.UseFrom, "use-from", nil, "Load local policy object from specified path. If not used will download latest")
|
||||
scanCmd.PersistentFlags().BoolVarP(&scanInfo.Silent, "silent", "s", false, "Silent progress messages")
|
||||
scanCmd.PersistentFlags().BoolVarP(&scanInfo.Submit, "submit", "", false, "Send the scan results to Armo management portal where you can see the results in a user-friendly UI, choose your preferred compliance framework, check risk results history and trends, manage exceptions, get remediation recommendations and much more. By default the results are not submitted")
|
||||
scanCmd.PersistentFlags().BoolVarP(&scanInfo.Submit, "submit", "", false, "Send the scan results to ARMO management portal where you can see the results in a user-friendly UI, choose your preferred compliance framework, check risk results history and trends, manage exceptions, get remediation recommendations and much more. By default the results are not submitted")
|
||||
scanCmd.PersistentFlags().StringVar(&scanInfo.HostSensorYamlPath, "host-scan-yaml", "", "Override default host sensor DaemonSet. Use this flag cautiously")
|
||||
scanCmd.PersistentFlags().StringVar(&scanInfo.FormatVersion, "format-version", "v1", "Output object can be differnet between versions, this is for maintaining backward and forward compatibility. Supported:'v1'/'v2'")
|
||||
|
||||
hostF := scanCmd.PersistentFlags().VarPF(&scanInfo.HostSensor, "enable-host-scan", "", "Deploy ARMO K8s host-sensor daemonset in the scanned cluster. Deleting it right after we collecting the data. Required to collect valueable data from cluster nodes for certain controls")
|
||||
// Deprecated flags - remove 1.May.2022
|
||||
scanCmd.PersistentFlags().MarkDeprecated("silent", "use '--logger' flag instead. Flag will be removed at 1.May.2022")
|
||||
|
||||
// hidden flags
|
||||
scanCmd.PersistentFlags().MarkHidden("host-scan-yaml") // this flag should be used very cautiously. We prefer users will not use it at all unless the DaemonSet can not run pods on the nodes
|
||||
scanCmd.PersistentFlags().MarkHidden("silent") // this flag should be deprecated since we added the --logger support
|
||||
// scanCmd.PersistentFlags().MarkHidden("format-version") // meant for testing different output approaches and not for common use
|
||||
|
||||
hostF := scanCmd.PersistentFlags().VarPF(&scanInfo.HostSensorEnabled, "enable-host-scan", "", "Deploy ARMO K8s host-sensor daemonset in the scanned cluster. Deleting it right after we collecting the data. Required to collect valuable data from cluster nodes for certain controls. Yaml file: https://raw.githubusercontent.com/armosec/kubescape/master/hostsensorutils/hostsensor.yaml")
|
||||
hostF.NoOptDefVal = "true"
|
||||
hostF.DefValue = "false, for no TTY in stdin"
|
||||
|
||||
}
|
||||
|
||||
@@ -1,12 +1,19 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"github.com/armosec/k8s-interface/k8sinterface"
|
||||
"github.com/armosec/kubescape/cautils"
|
||||
"github.com/armosec/kubescape/cautils/getter"
|
||||
"fmt"
|
||||
|
||||
"github.com/armosec/kubescape/cautils/logger"
|
||||
"github.com/armosec/kubescape/clihandler"
|
||||
"github.com/armosec/kubescape/clihandler/cliobjects"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var submitInfo cliobjects.Submit
|
||||
|
||||
var submitCmdExamples = `
|
||||
|
||||
`
|
||||
var submitCmd = &cobra.Command{
|
||||
Use: "submit <command>",
|
||||
Short: "Submit an object to the Kubescape SaaS version",
|
||||
@@ -15,17 +22,25 @@ var submitCmd = &cobra.Command{
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
rootCmd.AddCommand(submitCmd)
|
||||
}
|
||||
|
||||
func getSubmittedClusterConfig(k8s *k8sinterface.KubernetesApi) (*cautils.ClusterConfig, error) {
|
||||
clusterConfig := cautils.NewClusterConfig(k8s, getter.GetArmoAPIConnector(), scanInfo.Account, scanInfo.KubeContext) // TODO - support none cluster env submit
|
||||
if clusterConfig.GetCustomerGUID() != "" {
|
||||
if err := clusterConfig.SetTenant(); err != nil {
|
||||
return clusterConfig, err
|
||||
var submitExceptionsCmd = &cobra.Command{
|
||||
Use: "exceptions <full path to exceptins file>",
|
||||
Short: "Submit exceptions to the Kubescape SaaS version",
|
||||
Args: func(cmd *cobra.Command, args []string) error {
|
||||
if len(args) != 1 {
|
||||
return fmt.Errorf("missing full path to exceptions file")
|
||||
}
|
||||
}
|
||||
|
||||
return clusterConfig, nil
|
||||
return nil
|
||||
},
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
if err := clihandler.SubmitExceptions(submitInfo.Account, args[0]); err != nil {
|
||||
logger.L().Fatal(err.Error())
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
submitCmd.PersistentFlags().StringVarP(&submitInfo.Account, "account", "", "", "Armo portal account ID. Default will load account ID from configMap or config file")
|
||||
rootCmd.AddCommand(submitCmd)
|
||||
|
||||
submitCmd.AddCommand(submitExceptionsCmd)
|
||||
}
|
||||
|
||||
@@ -2,6 +2,7 @@ package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/armosec/kubescape/cautils"
|
||||
"github.com/spf13/cobra"
|
||||
@@ -14,7 +15,7 @@ var versionCmd = &cobra.Command{
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
v := cautils.NewIVersionCheckHandler()
|
||||
v.CheckLatestVersion(cautils.NewVersionCheckRequest(cautils.BuildNumber, "", "", "version"))
|
||||
fmt.Println("Your current version is: " + cautils.BuildNumber)
|
||||
fmt.Fprintln(os.Stdout, "Your current version is: "+cautils.BuildNumber)
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
@@ -5,16 +5,14 @@ import (
|
||||
"io/fs"
|
||||
"os"
|
||||
|
||||
"github.com/armosec/armoapi-go/armotypes"
|
||||
"github.com/armosec/k8s-interface/k8sinterface"
|
||||
"github.com/armosec/kubescape/resultshandling/printer"
|
||||
printerv1 "github.com/armosec/kubescape/resultshandling/printer/v1"
|
||||
|
||||
// printerv2 "github.com/armosec/kubescape/resultshandling/printer/v2"
|
||||
|
||||
"github.com/armosec/armoapi-go/armotypes"
|
||||
"github.com/armosec/kubescape/cautils"
|
||||
"github.com/armosec/kubescape/cautils/getter"
|
||||
"github.com/armosec/kubescape/clihandler/cliinterfaces"
|
||||
"github.com/armosec/kubescape/cautils/logger"
|
||||
"github.com/armosec/kubescape/cautils/logger/helpers"
|
||||
"github.com/armosec/kubescape/hostsensorutils"
|
||||
"github.com/armosec/kubescape/opaprocessor"
|
||||
"github.com/armosec/kubescape/policyhandler"
|
||||
@@ -22,6 +20,7 @@ import (
|
||||
"github.com/armosec/kubescape/resultshandling"
|
||||
"github.com/armosec/kubescape/resultshandling/reporter"
|
||||
"github.com/armosec/opa-utils/reporthandling"
|
||||
"github.com/armosec/opa-utils/resources"
|
||||
"github.com/mattn/go-isatty"
|
||||
)
|
||||
|
||||
@@ -35,27 +34,39 @@ type componentInterfaces struct {
|
||||
|
||||
func getInterfaces(scanInfo *cautils.ScanInfo) componentInterfaces {
|
||||
|
||||
// ================== setup k8s interface object ======================================
|
||||
var k8s *k8sinterface.KubernetesApi
|
||||
if scanInfo.GetScanningEnvironment() == cautils.ScanCluster {
|
||||
k8s = getKubernetesApi()
|
||||
if k8s == nil {
|
||||
fmt.Println("Failed connecting to Kubernetes cluster")
|
||||
os.Exit(1)
|
||||
logger.L().Fatal("failed connecting to Kubernetes cluster")
|
||||
}
|
||||
}
|
||||
|
||||
// ================== setup tenant object ======================================
|
||||
|
||||
tenantConfig := getTenantConfig(scanInfo.Account, scanInfo.KubeContext, k8s)
|
||||
|
||||
// Set submit behavior AFTER loading tenant config
|
||||
setSubmitBehavior(scanInfo, tenantConfig)
|
||||
|
||||
if scanInfo.Submit {
|
||||
// submit - Create tenant & Submit report
|
||||
if err := tenantConfig.SetTenant(); err != nil {
|
||||
logger.L().Error(err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
// ================== version testing ======================================
|
||||
|
||||
v := cautils.NewIVersionCheckHandler()
|
||||
v.CheckLatestVersion(cautils.NewVersionCheckRequest(cautils.BuildNumber, policyIdentifierNames(scanInfo.PolicyIdentifier), "", scanInfo.GetScanningEnvironment()))
|
||||
|
||||
// ================== setup host sensor object ======================================
|
||||
|
||||
hostSensorHandler := getHostSensorHandler(scanInfo, k8s)
|
||||
if err := hostSensorHandler.Init(); err != nil {
|
||||
errMsg := "failed to init host sensor"
|
||||
if scanInfo.VerboseMode {
|
||||
errMsg = fmt.Sprintf("%s: %v", errMsg, err)
|
||||
}
|
||||
cautils.ErrorDisplay(errMsg)
|
||||
logger.L().Error("failed to init host sensor", helpers.Error(err))
|
||||
hostSensorHandler = &hostsensorutils.HostSensorHandlerMock{}
|
||||
}
|
||||
// excluding hostsensor namespace
|
||||
@@ -63,19 +74,28 @@ func getInterfaces(scanInfo *cautils.ScanInfo) componentInterfaces {
|
||||
scanInfo.ExcludedNamespaces = fmt.Sprintf("%s,%s", scanInfo.ExcludedNamespaces, hostSensorHandler.GetNamespace())
|
||||
}
|
||||
|
||||
resourceHandler := getResourceHandler(scanInfo, tenantConfig, k8s, hostSensorHandler)
|
||||
// ================== setup registry adaptors ======================================
|
||||
|
||||
registryAdaptors, err := resourcehandler.NewRegistryAdaptors()
|
||||
if err != nil {
|
||||
logger.L().Error("failed to initialize registry adaptors", helpers.Error(err))
|
||||
}
|
||||
|
||||
// ================== setup resource collector object ======================================
|
||||
|
||||
resourceHandler := getResourceHandler(scanInfo, tenantConfig, k8s, hostSensorHandler, registryAdaptors)
|
||||
|
||||
// ================== setup reporter & printer objects ======================================
|
||||
|
||||
// reporting behavior - setup reporter
|
||||
reportHandler := getReporter(tenantConfig, scanInfo.Submit)
|
||||
|
||||
v := cautils.NewIVersionCheckHandler()
|
||||
v.CheckLatestVersion(cautils.NewVersionCheckRequest(cautils.BuildNumber, policyIdentifierNames(scanInfo.PolicyIdentifier), "", scanInfo.GetScanningEnvironment()))
|
||||
reportHandler := getReporter(tenantConfig, scanInfo.ReportID, scanInfo.Submit, scanInfo.FrameworkScan, len(scanInfo.InputPatterns) == 0)
|
||||
|
||||
// setup printer
|
||||
printerHandler := printerv1.GetPrinter(scanInfo.Format, scanInfo.VerboseMode)
|
||||
// printerHandler = printerv2.GetPrinter(scanInfo.Format, scanInfo.VerboseMode)
|
||||
printerHandler := resultshandling.NewPrinter(scanInfo.Format, scanInfo.FormatVersion, scanInfo.VerboseMode)
|
||||
printerHandler.SetWriter(scanInfo.Output)
|
||||
|
||||
// ================== return interface ======================================
|
||||
|
||||
return componentInterfaces{
|
||||
tenantConfig: tenantConfig,
|
||||
resourceHandler: resourceHandler,
|
||||
@@ -85,25 +105,24 @@ func getInterfaces(scanInfo *cautils.ScanInfo) componentInterfaces {
|
||||
}
|
||||
}
|
||||
|
||||
func ScanCliSetup(scanInfo *cautils.ScanInfo) error {
|
||||
cautils.ScanStartDisplay()
|
||||
func Scan(scanInfo *cautils.ScanInfo) (*resultshandling.ResultsHandler, error) {
|
||||
logger.L().Info("ARMO security scanner starting")
|
||||
|
||||
// ===================== Initialization =====================
|
||||
scanInfo.Init() // initialize scan info
|
||||
|
||||
interfaces := getInterfaces(scanInfo)
|
||||
// setPolicyGetter(scanInfo, interfaces.clusterConfig.GetCustomerGUID())
|
||||
|
||||
processNotification := make(chan *cautils.OPASessionObj)
|
||||
reportResults := make(chan *cautils.OPASessionObj)
|
||||
|
||||
cautils.ClusterName = interfaces.tenantConfig.GetClusterName() // TODO - Deprecated
|
||||
cautils.CustomerGUID = interfaces.tenantConfig.GetCustomerGUID() // TODO - Deprecated
|
||||
cautils.ClusterName = interfaces.tenantConfig.GetClusterName() // TODO - Deprecated
|
||||
cautils.CustomerGUID = interfaces.tenantConfig.GetAccountID() // TODO - Deprecated
|
||||
interfaces.report.SetClusterName(interfaces.tenantConfig.GetClusterName())
|
||||
interfaces.report.SetCustomerGUID(interfaces.tenantConfig.GetCustomerGUID())
|
||||
interfaces.report.SetCustomerGUID(interfaces.tenantConfig.GetAccountID())
|
||||
|
||||
downloadReleasedPolicy := getter.NewDownloadReleasedPolicy() // download config inputs from github release
|
||||
|
||||
// set policy getter only after setting the customerGUID
|
||||
scanInfo.Getters.PolicyGetter = getPolicyGetter(scanInfo.UseFrom, interfaces.tenantConfig.GetCustomerGUID(), scanInfo.FrameworkScan, downloadReleasedPolicy)
|
||||
scanInfo.Getters.ControlsInputsGetter = getConfigInputsGetter(scanInfo.ControlsInputs, interfaces.tenantConfig.GetCustomerGUID(), downloadReleasedPolicy)
|
||||
scanInfo.Getters.PolicyGetter = getPolicyGetter(scanInfo.UseFrom, interfaces.tenantConfig.GetAccountID(), scanInfo.FrameworkScan, downloadReleasedPolicy)
|
||||
scanInfo.Getters.ControlsInputsGetter = getConfigInputsGetter(scanInfo.ControlsInputs, interfaces.tenantConfig.GetAccountID(), downloadReleasedPolicy)
|
||||
scanInfo.Getters.ExceptionsGetter = getExceptionsGetter(scanInfo.UseExceptions)
|
||||
|
||||
// TODO - list supported frameworks/controls
|
||||
@@ -111,84 +130,60 @@ func ScanCliSetup(scanInfo *cautils.ScanInfo) error {
|
||||
scanInfo.SetPolicyIdentifiers(listFrameworksNames(scanInfo.Getters.PolicyGetter), reporthandling.KindFramework)
|
||||
}
|
||||
|
||||
//
|
||||
// remove host scanner components
|
||||
defer func() {
|
||||
if err := interfaces.hostSensorHandler.TearDown(); err != nil {
|
||||
errMsg := "failed to tear down host sensor"
|
||||
if scanInfo.VerboseMode {
|
||||
errMsg = fmt.Sprintf("%s: %v", errMsg, err)
|
||||
}
|
||||
cautils.ErrorDisplay(errMsg)
|
||||
logger.L().Error("failed to tear down host sensor", helpers.Error(err))
|
||||
}
|
||||
}()
|
||||
|
||||
// cli handler setup
|
||||
go func() {
|
||||
// policy handler setup
|
||||
policyHandler := policyhandler.NewPolicyHandler(&processNotification, interfaces.resourceHandler)
|
||||
resultsHandling := resultshandling.NewResultsHandler(interfaces.report, interfaces.printerHandler)
|
||||
|
||||
if err := Scan(policyHandler, scanInfo); err != nil {
|
||||
fmt.Println(err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}()
|
||||
|
||||
// processor setup - rego run
|
||||
go func() {
|
||||
opaprocessorObj := opaprocessor.NewOPAProcessorHandler(&processNotification, &reportResults)
|
||||
opaprocessorObj.ProcessRulesListenner()
|
||||
}()
|
||||
|
||||
resultsHandling := resultshandling.NewResultsHandler(&reportResults, interfaces.report, interfaces.printerHandler)
|
||||
score := resultsHandling.HandleResults(scanInfo)
|
||||
|
||||
// print report url
|
||||
interfaces.report.DisplayReportURL()
|
||||
|
||||
if score > float32(scanInfo.FailThreshold) {
|
||||
return fmt.Errorf("scan risk-score %.2f is above permitted threshold %d", score, scanInfo.FailThreshold)
|
||||
// ===================== policies & resources =====================
|
||||
policyHandler := policyhandler.NewPolicyHandler(interfaces.resourceHandler)
|
||||
scanData, err := CollectResources(policyHandler, scanInfo)
|
||||
if err != nil {
|
||||
return resultsHandling, err
|
||||
}
|
||||
|
||||
return nil
|
||||
// ========================= opa testing =====================
|
||||
deps := resources.NewRegoDependenciesData(k8sinterface.GetK8sConfig(), interfaces.tenantConfig.GetClusterName())
|
||||
reportResults := opaprocessor.NewOPAProcessor(scanData, deps)
|
||||
if err := reportResults.ProcessRulesListenner(); err != nil {
|
||||
// TODO - do something
|
||||
return resultsHandling, err
|
||||
}
|
||||
|
||||
// ========================= results handling =====================
|
||||
resultsHandling.SetData(scanData)
|
||||
|
||||
// if resultsHandling.GetRiskScore() > float32(scanInfo.FailThreshold) {
|
||||
// return resultsHandling, fmt.Errorf("scan risk-score %.2f is above permitted threshold %.2f", resultsHandling.GetRiskScore(), scanInfo.FailThreshold)
|
||||
// }
|
||||
|
||||
return resultsHandling, nil
|
||||
}
|
||||
|
||||
func Scan(policyHandler *policyhandler.PolicyHandler, scanInfo *cautils.ScanInfo) error {
|
||||
func CollectResources(policyHandler *policyhandler.PolicyHandler, scanInfo *cautils.ScanInfo) (*cautils.OPASessionObj, error) {
|
||||
policyNotification := &reporthandling.PolicyNotification{
|
||||
NotificationType: reporthandling.TypeExecPostureScan,
|
||||
Rules: scanInfo.PolicyIdentifier,
|
||||
Designators: armotypes.PortalDesignator{},
|
||||
Rules: scanInfo.PolicyIdentifier,
|
||||
KubescapeNotification: reporthandling.KubescapeNotification{
|
||||
Designators: armotypes.PortalDesignator{},
|
||||
NotificationType: reporthandling.TypeExecPostureScan,
|
||||
},
|
||||
}
|
||||
switch policyNotification.NotificationType {
|
||||
switch policyNotification.KubescapeNotification.NotificationType {
|
||||
case reporthandling.TypeExecPostureScan:
|
||||
if err := policyHandler.HandleNotificationRequest(policyNotification, scanInfo); err != nil {
|
||||
return err
|
||||
collectedResources, err := policyHandler.CollectResources(policyNotification, scanInfo)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return collectedResources, nil
|
||||
|
||||
default:
|
||||
return fmt.Errorf("notification type '%s' Unknown", policyNotification.NotificationType)
|
||||
return nil, fmt.Errorf("notification type '%s' Unknown", policyNotification.KubescapeNotification.NotificationType)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func Submit(submitInterfaces cliinterfaces.SubmitInterfaces) error {
|
||||
|
||||
// list resources
|
||||
postureReport, err := submitInterfaces.SubmitObjects.SetResourcesReport()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
allresources, err := submitInterfaces.SubmitObjects.ListAllResources()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// report
|
||||
if err := submitInterfaces.Reporter.ActionSendReport(&cautils.OPASessionObj{PostureReport: postureReport, AllResources: allresources}); err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Printf("\nData has been submitted successfully")
|
||||
submitInterfaces.Reporter.DisplayReportURL()
|
||||
|
||||
return nil
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func askUserForHostSensor() bool {
|
||||
@@ -200,8 +195,8 @@ func askUserForHostSensor() bool {
|
||||
if ssss, err := os.Stdin.Stat(); err == nil {
|
||||
// fmt.Printf("Found stdin type: %s\n", ssss.Mode().Type())
|
||||
if ssss.Mode().Type()&(fs.ModeDevice|fs.ModeCharDevice) > 0 { //has TTY
|
||||
fmt.Printf("Would you like to scan K8s nodes? [y/N]. This is required to collect valuable data for certain controls\n")
|
||||
fmt.Printf("Use --enable-host-scan flag to suppress this message\n")
|
||||
fmt.Fprintf(os.Stderr, "Would you like to scan K8s nodes? [y/N]. This is required to collect valuable data for certain controls\n")
|
||||
fmt.Fprintf(os.Stderr, "Use --enable-host-scan flag to suppress this message\n")
|
||||
var b []byte = make([]byte, 1)
|
||||
if n, err := os.Stdin.Read(b); err == nil {
|
||||
if n > 0 && len(b) > 0 && (b[0] == 'y' || b[0] == 'Y') {
|
||||
|
||||
@@ -7,10 +7,11 @@ import (
|
||||
"github.com/armosec/k8s-interface/k8sinterface"
|
||||
"github.com/armosec/kubescape/cautils"
|
||||
"github.com/armosec/kubescape/cautils/getter"
|
||||
"github.com/armosec/kubescape/cautils/logger"
|
||||
"github.com/armosec/kubescape/cautils/logger/helpers"
|
||||
"github.com/armosec/kubescape/hostsensorutils"
|
||||
"github.com/armosec/kubescape/resourcehandler"
|
||||
"github.com/armosec/kubescape/resultshandling/reporter"
|
||||
reporterv1 "github.com/armosec/kubescape/resultshandling/reporter/v1"
|
||||
reporterv2 "github.com/armosec/kubescape/resultshandling/reporter/v2"
|
||||
|
||||
"github.com/armosec/opa-utils/reporthandling"
|
||||
@@ -42,25 +43,37 @@ func getExceptionsGetter(useExceptions string) getter.IExceptionsGetter {
|
||||
|
||||
func getRBACHandler(tenantConfig cautils.ITenantConfig, k8s *k8sinterface.KubernetesApi, submit bool) *cautils.RBACObjects {
|
||||
if submit {
|
||||
return cautils.NewRBACObjects(rbacscanner.NewRbacScannerFromK8sAPI(k8s, tenantConfig.GetCustomerGUID(), tenantConfig.GetClusterName()))
|
||||
return cautils.NewRBACObjects(rbacscanner.NewRbacScannerFromK8sAPI(k8s, tenantConfig.GetAccountID(), tenantConfig.GetClusterName()))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func getReporter(tenantConfig cautils.ITenantConfig, submit bool) reporter.IReport {
|
||||
if submit {
|
||||
// return reporterv1.NewReportEventReceiver(tenantConfig.GetConfigObj())
|
||||
return reporterv2.NewReportEventReceiver(tenantConfig.GetConfigObj())
|
||||
func getReporter(tenantConfig cautils.ITenantConfig, reportID string, submit, fwScan, clusterScan bool) reporter.IReport {
|
||||
if submit && clusterScan {
|
||||
return reporterv2.NewReportEventReceiver(tenantConfig.GetConfigObj(), reportID)
|
||||
}
|
||||
return reporterv1.NewReportMock()
|
||||
if tenantConfig.GetAccountID() == "" && fwScan && clusterScan {
|
||||
// Add link only when scanning a cluster using a framework
|
||||
return reporterv2.NewReportMock(reporterv2.NO_SUBMIT_QUERY, "run kubescape with the '--submit' flag")
|
||||
}
|
||||
var message string
|
||||
if !fwScan {
|
||||
message = "Kubescape does not submit scan results when scanning controls"
|
||||
}
|
||||
if !clusterScan {
|
||||
message = "Kubescape will submit scan results only when scanning a cluster (not YAML files)"
|
||||
}
|
||||
return reporterv2.NewReportMock("", message)
|
||||
}
|
||||
|
||||
func getResourceHandler(scanInfo *cautils.ScanInfo, tenantConfig cautils.ITenantConfig, k8s *k8sinterface.KubernetesApi, hostSensorHandler hostsensorutils.IHostSensor) resourcehandler.IResourceHandler {
|
||||
func getResourceHandler(scanInfo *cautils.ScanInfo, tenantConfig cautils.ITenantConfig, k8s *k8sinterface.KubernetesApi, hostSensorHandler hostsensorutils.IHostSensor, registryAdaptors *resourcehandler.RegistryAdaptors) resourcehandler.IResourceHandler {
|
||||
if len(scanInfo.InputPatterns) > 0 || k8s == nil {
|
||||
return resourcehandler.NewFileResourceHandler(scanInfo.InputPatterns)
|
||||
// scanInfo.HostSensor.SetBool(false)
|
||||
return resourcehandler.NewFileResourceHandler(scanInfo.InputPatterns, registryAdaptors)
|
||||
}
|
||||
getter.GetArmoAPIConnector()
|
||||
rbacObjects := getRBACHandler(tenantConfig, k8s, scanInfo.Submit)
|
||||
return resourcehandler.NewK8sResourceHandler(k8s, getFieldSelector(scanInfo), hostSensorHandler, rbacObjects)
|
||||
return resourcehandler.NewK8sResourceHandler(k8s, getFieldSelector(scanInfo), hostSensorHandler, rbacObjects, registryAdaptors)
|
||||
}
|
||||
|
||||
func getHostSensorHandler(scanInfo *cautils.ScanInfo, k8s *k8sinterface.KubernetesApi) hostsensorutils.IHostSensor {
|
||||
@@ -70,14 +83,14 @@ func getHostSensorHandler(scanInfo *cautils.ScanInfo, k8s *k8sinterface.Kubernet
|
||||
|
||||
hasHostSensorControls := true
|
||||
// we need to determined which controls needs host sensor
|
||||
if scanInfo.HostSensor.Get() == nil && hasHostSensorControls {
|
||||
scanInfo.HostSensor.SetBool(askUserForHostSensor())
|
||||
cautils.WarningDisplay(os.Stderr, "Warning: Kubernetes cluster nodes scanning is disabled. This is required to collect valuable data for certain controls. You can enable it using the --enable-host-scan flag\n")
|
||||
if scanInfo.HostSensorEnabled.Get() == nil && hasHostSensorControls {
|
||||
scanInfo.HostSensorEnabled.SetBool(askUserForHostSensor())
|
||||
logger.L().Warning("Kubernetes cluster nodes scanning is disabled. This is required to collect valuable data for certain controls. You can enable it using the --enable-host-scan flag")
|
||||
}
|
||||
if hostSensorVal := scanInfo.HostSensor.Get(); hostSensorVal != nil && *hostSensorVal {
|
||||
hostSensorHandler, err := hostsensorutils.NewHostSensorHandler(k8s)
|
||||
if hostSensorVal := scanInfo.HostSensorEnabled.Get(); hostSensorVal != nil && *hostSensorVal {
|
||||
hostSensorHandler, err := hostsensorutils.NewHostSensorHandler(k8s, scanInfo.HostSensorYamlPath)
|
||||
if err != nil {
|
||||
cautils.WarningDisplay(os.Stderr, fmt.Sprintf("Warning: failed to create host sensor: %v\n", err.Error()))
|
||||
logger.L().Warning(fmt.Sprintf("failed to create host sensor: %s", err.Error()))
|
||||
return &hostsensorutils.HostSensorHandlerMock{}
|
||||
}
|
||||
return hostSensorHandler
|
||||
@@ -109,7 +122,7 @@ func policyIdentifierNames(pi []reporthandling.PolicyIdentifier) string {
|
||||
return policiesNames
|
||||
}
|
||||
|
||||
// setSubmitBehavior - Setup the desired cluster behavior regarding submittion to the Armo BE
|
||||
// setSubmitBehavior - Setup the desired cluster behavior regarding submitting to the Armo BE
|
||||
func setSubmitBehavior(scanInfo *cautils.ScanInfo, tenantConfig cautils.ITenantConfig) {
|
||||
|
||||
/*
|
||||
@@ -135,14 +148,8 @@ func setSubmitBehavior(scanInfo *cautils.ScanInfo, tenantConfig cautils.ITenantC
|
||||
// Submit report
|
||||
scanInfo.Submit = true
|
||||
}
|
||||
} else { // config not found in cache (not submitted)
|
||||
if scanInfo.Submit {
|
||||
// submit - Create tenant & Submit report
|
||||
if err := tenantConfig.SetTenant(); err != nil {
|
||||
fmt.Println(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// setPolicyGetter set the policy getter - local file/github release/ArmoAPI
|
||||
@@ -152,7 +159,6 @@ func getPolicyGetter(loadPoliciesFromFile []string, accountID string, frameworkS
|
||||
}
|
||||
if accountID != "" && frameworkScope {
|
||||
g := getter.GetArmoAPIConnector() // download policy from ARMO backend
|
||||
g.SetCustomerGUID(accountID)
|
||||
return g
|
||||
}
|
||||
if downloadReleasedPolicy == nil {
|
||||
@@ -183,7 +189,6 @@ func getConfigInputsGetter(ControlsInputs string, accountID string, downloadRele
|
||||
}
|
||||
if accountID != "" {
|
||||
g := getter.GetArmoAPIConnector() // download config from ARMO backend
|
||||
g.SetCustomerGUID(accountID)
|
||||
return g
|
||||
}
|
||||
if downloadReleasedPolicy == nil {
|
||||
@@ -197,7 +202,7 @@ func getConfigInputsGetter(ControlsInputs string, accountID string, downloadRele
|
||||
|
||||
func getDownloadReleasedPolicy(downloadReleasedPolicy *getter.DownloadReleasedPolicy) getter.IPolicyGetter {
|
||||
if err := downloadReleasedPolicy.SetRegoObjects(); err != nil { // if failed to pull policy, fallback to cache
|
||||
cautils.WarningDisplay(os.Stderr, "Warning: failed to get policies from github release, loading policies from cache\n")
|
||||
logger.L().Warning("failed to get policies from github release, loading policies from cache", helpers.Error(err))
|
||||
return getter.NewLoadPolicy(getDefaultFrameworksPaths())
|
||||
} else {
|
||||
return downloadReleasedPolicy
|
||||
@@ -214,8 +219,8 @@ func getDefaultFrameworksPaths() []string {
|
||||
|
||||
func listFrameworksNames(policyGetter getter.IPolicyGetter) []string {
|
||||
fw, err := policyGetter.ListFrameworks()
|
||||
if err != nil {
|
||||
fw = getDefaultFrameworksPaths()
|
||||
if err == nil {
|
||||
return fw
|
||||
}
|
||||
return fw
|
||||
return getter.NativeFrameworks
|
||||
}
|
||||
|
||||
90
containerscan/containerscan_mock.go
Normal file
90
containerscan/containerscan_mock.go
Normal file
@@ -0,0 +1,90 @@
|
||||
package containerscan
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"math/rand"
|
||||
"time"
|
||||
|
||||
"github.com/francoispqt/gojay"
|
||||
)
|
||||
|
||||
// GenerateContainerScanReportMock - generate a scan result
|
||||
func GenerateContainerScanReportMock() ScanResultReport {
|
||||
ds := ScanResultReport{
|
||||
WLID: "wlid://cluster-k8s-geriatrix-k8s-demo3/namespace-whisky-app/deployment-whisky4all-shipping",
|
||||
CustomerGUID: "1231bcb1-49ce-4a67-bdd3-5da7a393ae08",
|
||||
ImgTag: "dreg.armo.cloud:443/demoservice:v16",
|
||||
ImgHash: "docker-pullable://dreg.armo.cloud:443/demoservice@sha256:754f3cfca915a07ed10655a301dd7a8dc5526a06f9bd06e7c932f4d4108a8296",
|
||||
Timestamp: time.Now().UnixNano(),
|
||||
}
|
||||
|
||||
ds.Layers = make(LayersList, 0)
|
||||
layer := ScanResultLayer{}
|
||||
GenerateContainerScanLayer(&layer)
|
||||
ds.Layers = append(ds.Layers, layer)
|
||||
return ds
|
||||
}
|
||||
|
||||
// GenerateContainerScanReportMock - generate a scan result
|
||||
func GenerateContainerScanReportNoVulMock() ScanResultReport {
|
||||
ds := ScanResultReport{
|
||||
WLID: "wlid://cluster-k8s-geriatrix-k8s-demo3/namespace-whisky-app/deployment-whisky4all-shipping",
|
||||
CustomerGUID: "1231bcb1-49ce-4a67-bdd3-5da7a393ae08",
|
||||
ImgTag: "dreg.armo.cloud:443/demoservice:v16",
|
||||
ImgHash: "docker-pullable://dreg.armo.cloud:443/demoservice@sha256:754f3cfca915a07ed10655a301dd7a8dc5526a06f9bd06e7c932f4d4108a8296",
|
||||
Timestamp: time.Now().UnixNano(),
|
||||
ContainerName: "shipping",
|
||||
}
|
||||
|
||||
ds.Layers = make(LayersList, 0)
|
||||
layer := ScanResultLayer{LayerHash: "aaa"}
|
||||
ds.Layers = append(ds.Layers, layer)
|
||||
return ds
|
||||
}
|
||||
|
||||
var hash = []rune("abcdef0123456789")
|
||||
var nums = []rune("0123456789")
|
||||
|
||||
func randSeq(n int, bank []rune) string {
|
||||
rand.Seed(time.Now().UnixNano())
|
||||
|
||||
b := make([]rune, n)
|
||||
for i := range b {
|
||||
b[i] = bank[rand.Intn(len(bank))]
|
||||
}
|
||||
return string(b)
|
||||
}
|
||||
|
||||
// GenerateContainerScanLayer - generate a layer with random vuls
|
||||
func GenerateContainerScanLayer(layer *ScanResultLayer) {
|
||||
layer.LayerHash = randSeq(32, hash)
|
||||
layer.Vulnerabilities = make(VulnerabilitiesList, 0)
|
||||
layer.Packages = make(LinuxPkgs, 0)
|
||||
vuls := rand.Intn(10) + 1
|
||||
|
||||
for i := 0; i < vuls; i++ {
|
||||
v := Vulnerability{}
|
||||
GenerateVulnerability(&v)
|
||||
layer.Vulnerabilities = append(layer.Vulnerabilities, v)
|
||||
}
|
||||
|
||||
pkg := LinuxPackage{PackageName: "coreutils"}
|
||||
pkg.Files = make(PkgFiles, 0)
|
||||
pf := PackageFile{Filename: "aa"}
|
||||
pkg.Files = append(pkg.Files, pf)
|
||||
layer.Packages = append(layer.Packages, pkg)
|
||||
}
|
||||
|
||||
// GenerateVulnerability - generate a vul (just diff "cve"'s)
|
||||
func GenerateVulnerability(v *Vulnerability) error {
|
||||
baseVul := " { \"name\": \"CVE-2014-9471\", \"imageTag\": \"debian:8\", \"link\": \"https://security-tracker.debian.org/tracker/CVE-2014-9471\", \"description\": \"The parse_datetime function in GNU coreutils allows remote attackers to cause a denial of service (crash) or possibly execute arbitrary code via a crafted date string, as demonstrated by the sdf\", \"severity\": \"Low\", \"metadata\": { \"NVD\": { \"CVSSv2\": { \"Score\": 7.5, \"Vectors\": \"AV:N/AC:L/Au:N/C:P/I:P\" } } }, \"fixedIn\": [ { \"name\": \"coreutils\", \"imageTag\": \"debian:8\", \"version\": \"8.23-1\" } ] }"
|
||||
b := []byte(baseVul)
|
||||
r := bytes.NewReader(b)
|
||||
er := gojay.NewDecoder(r).DecodeObject(v)
|
||||
v.RelatedPackageName = "coreutils"
|
||||
v.Severity = HighSeverity
|
||||
v.Relevancy = Irelevant
|
||||
v.Name = "CVE-" + randSeq(4, nums) + "-" + randSeq(4, nums)
|
||||
return er
|
||||
|
||||
}
|
||||
90
containerscan/containerscan_test.go
Normal file
90
containerscan/containerscan_test.go
Normal file
@@ -0,0 +1,90 @@
|
||||
package containerscan
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/francoispqt/gojay"
|
||||
)
|
||||
|
||||
func TestDecodeScanWIthDangearousArtifacts(t *testing.T) {
|
||||
rhs := &ScanResultReport{}
|
||||
er := gojay.NewDecoder(strings.NewReader(nginxScanJSON)).DecodeObject(rhs)
|
||||
if er != nil {
|
||||
t.Errorf("decode failed due to: %v", er.Error())
|
||||
}
|
||||
sumObj := rhs.Summarize()
|
||||
if sumObj.Registry != "" {
|
||||
t.Errorf("sumObj.Registry = %v", sumObj.Registry)
|
||||
}
|
||||
if sumObj.VersionImage != "nginx:1.18.0" {
|
||||
t.Errorf("sumObj.VersionImage = %v", sumObj.Registry)
|
||||
}
|
||||
if sumObj.ImgTag != "nginx:1.18.0" {
|
||||
t.Errorf("sumObj.ImgTag = %v", sumObj.ImgTag)
|
||||
}
|
||||
if sumObj.Status != "Success" {
|
||||
t.Errorf("sumObj.Status = %v", sumObj.Status)
|
||||
}
|
||||
if len(sumObj.ListOfDangerousArtifcats) != 3 {
|
||||
t.Errorf("sumObj.ListOfDangerousArtifcats = %v", sumObj.ListOfDangerousArtifcats)
|
||||
}
|
||||
}
|
||||
|
||||
func TestUnmarshalScanReport(t *testing.T) {
|
||||
ds := GenerateContainerScanReportMock()
|
||||
str1 := ds.AsFNVHash()
|
||||
rhs := &ScanResultReport{}
|
||||
|
||||
bolB, _ := json.Marshal(ds)
|
||||
r := bytes.NewReader(bolB)
|
||||
|
||||
er := gojay.NewDecoder(r).DecodeObject(rhs)
|
||||
if er != nil {
|
||||
t.Errorf("marshalling failed due to: %v", er.Error())
|
||||
}
|
||||
|
||||
if rhs.AsFNVHash() != str1 {
|
||||
t.Errorf("marshalling failed different values after marshal:\nOriginal:\n%v\nParsed:\n%v\n\n===\n", string(bolB), rhs)
|
||||
}
|
||||
}
|
||||
|
||||
func TestUnmarshalScanReport1(t *testing.T) {
|
||||
ds := Vulnerability{}
|
||||
if err := GenerateVulnerability(&ds); err != nil {
|
||||
t.Errorf("%v\n%v\n", ds, err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetByPkgNameSuccess(t *testing.T) {
|
||||
ds := GenerateContainerScanReportMock()
|
||||
a := ds.Layers[0].GetFilesByPackage("coreutils")
|
||||
if a != nil {
|
||||
|
||||
fmt.Printf("%+v\n", *a)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestGetByPkgNameMissing(t *testing.T) {
|
||||
ds := GenerateContainerScanReportMock()
|
||||
a := ds.Layers[0].GetFilesByPackage("s")
|
||||
if a != nil && len(*a) > 0 {
|
||||
t.Errorf("expected - no such package should be in that layer %v\n\n; found - %v", ds, a)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestCalculateFixed(t *testing.T) {
|
||||
res := CalculateFixed([]FixedIn{{
|
||||
Name: "",
|
||||
ImgTag: "",
|
||||
Version: "",
|
||||
}})
|
||||
if 0 != res {
|
||||
t.Errorf("wrong fix status: %v", res)
|
||||
}
|
||||
}
|
||||
37
containerscan/datastructures.go
Normal file
37
containerscan/datastructures.go
Normal file
@@ -0,0 +1,37 @@
|
||||
package containerscan
|
||||
|
||||
const (
|
||||
//defines Relevancy as enum-like
|
||||
Unknown = "Unknown"
|
||||
Relevant = "Relevant"
|
||||
Irelevant = "Irelevant"
|
||||
NoSP = "No signature profile to compare"
|
||||
|
||||
//Clair Severities
|
||||
UnknownSeverity = "Unknown"
|
||||
NegligibleSeverity = "Negligible"
|
||||
LowSeverity = "Low"
|
||||
MediumSeverity = "Medium"
|
||||
HighSeverity = "High"
|
||||
CriticalSeverity = "Critical"
|
||||
|
||||
ContainerScanRedisPrefix = "_containerscan"
|
||||
)
|
||||
|
||||
var KnownSeverities = map[string]bool{
|
||||
UnknownSeverity: true,
|
||||
NegligibleSeverity: true,
|
||||
LowSeverity: true,
|
||||
MediumSeverity: true,
|
||||
HighSeverity: true,
|
||||
CriticalSeverity: true,
|
||||
}
|
||||
|
||||
func CalculateFixed(Fixes []FixedIn) int {
|
||||
for _, fix := range Fixes {
|
||||
if fix.Version != "None" && fix.Version != "" {
|
||||
return 1
|
||||
}
|
||||
}
|
||||
return 0
|
||||
}
|
||||
51
containerscan/datastructuresmethods.go
Normal file
51
containerscan/datastructuresmethods.go
Normal file
@@ -0,0 +1,51 @@
|
||||
package containerscan
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/armosec/armoapi-go/armotypes"
|
||||
)
|
||||
|
||||
func (layer *ScanResultLayer) GetFilesByPackage(pkgname string) (files *PkgFiles) {
|
||||
for _, pkg := range layer.Packages {
|
||||
if pkg.PackageName == pkgname {
|
||||
return &pkg.Files
|
||||
}
|
||||
}
|
||||
|
||||
return &PkgFiles{}
|
||||
}
|
||||
|
||||
func (layer *ScanResultLayer) GetPackagesNames() []string {
|
||||
pkgsNames := []string{}
|
||||
for _, pkg := range layer.Packages {
|
||||
pkgsNames = append(pkgsNames, pkg.PackageName)
|
||||
}
|
||||
return pkgsNames
|
||||
}
|
||||
|
||||
func (scanresult *ScanResultReport) GetDesignatorsNContext() (*armotypes.PortalDesignator, []armotypes.ArmoContext) {
|
||||
designatorsObj := armotypes.AttributesDesignatorsFromWLID(scanresult.WLID)
|
||||
designatorsObj.Attributes["containerName"] = scanresult.ContainerName
|
||||
designatorsObj.Attributes["customerGUID"] = scanresult.CustomerGUID
|
||||
contextObj := armotypes.DesignatorToArmoContext(designatorsObj, "designators")
|
||||
return designatorsObj, contextObj
|
||||
}
|
||||
|
||||
func (scanresult *ScanResultReport) Validate() bool {
|
||||
if scanresult.CustomerGUID == "" || (scanresult.ImgHash == "" && scanresult.ImgTag == "") || scanresult.Timestamp <= 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
//TODO validate layers & vuls
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func (v *Vulnerability) IsRCE() bool {
|
||||
desc := strings.ToLower(v.Description)
|
||||
|
||||
isRCE := strings.Contains(v.Description, "RCE")
|
||||
|
||||
return isRCE || strings.Contains(desc, "remote code execution") || strings.Contains(desc, "remote command execution") || strings.Contains(desc, "arbitrary code") || strings.Contains(desc, "code execution") || strings.Contains(desc, "code injection") || strings.Contains(desc, "command injection") || strings.Contains(desc, "inject arbitrary commands")
|
||||
}
|
||||
141
containerscan/elasticadapters.go
Normal file
141
containerscan/elasticadapters.go
Normal file
@@ -0,0 +1,141 @@
|
||||
package containerscan
|
||||
|
||||
import (
|
||||
"github.com/armosec/armoapi-go/armotypes"
|
||||
cautils "github.com/armosec/utils-k8s-go/armometadata"
|
||||
)
|
||||
|
||||
// ToFlatVulnerabilities - returnsgit p
|
||||
func (scanresult *ScanResultReport) ToFlatVulnerabilities() []*ElasticContainerVulnerabilityResult {
|
||||
vuls := make([]*ElasticContainerVulnerabilityResult, 0)
|
||||
vul2indx := make(map[string]int)
|
||||
scanID := scanresult.AsFNVHash()
|
||||
designatorsObj, ctxList := scanresult.GetDesignatorsNContext()
|
||||
for _, layer := range scanresult.Layers {
|
||||
for _, vul := range layer.Vulnerabilities {
|
||||
esLayer := ESLayer{LayerHash: layer.LayerHash, ParentLayerHash: layer.ParentLayerHash}
|
||||
if indx, isOk := vul2indx[vul.Name]; isOk {
|
||||
vuls[indx].Layers = append(vuls[indx].Layers, esLayer)
|
||||
continue
|
||||
}
|
||||
result := &ElasticContainerVulnerabilityResult{WLID: scanresult.WLID,
|
||||
Timestamp: scanresult.Timestamp,
|
||||
Designators: *designatorsObj,
|
||||
Context: ctxList}
|
||||
|
||||
result.Vulnerability = vul
|
||||
result.Layers = make([]ESLayer, 0)
|
||||
result.Layers = append(result.Layers, esLayer)
|
||||
result.ContainerScanID = scanID
|
||||
|
||||
result.IsFixed = CalculateFixed(vul.Fixes)
|
||||
result.RelevantLinks = append(result.RelevantLinks, "https://nvd.nist.gov/vuln/detail/"+vul.Name)
|
||||
result.RelevantLinks = append(result.RelevantLinks, vul.Link)
|
||||
result.Vulnerability.Link = "https://nvd.nist.gov/vuln/detail/" + vul.Name
|
||||
|
||||
result.Categories.IsRCE = result.IsRCE()
|
||||
vuls = append(vuls, result)
|
||||
vul2indx[vul.Name] = len(vuls) - 1
|
||||
|
||||
}
|
||||
}
|
||||
// find first introduced
|
||||
for i, v := range vuls {
|
||||
earlyLayer := ""
|
||||
for _, layer := range v.Layers {
|
||||
if layer.ParentLayerHash == earlyLayer {
|
||||
earlyLayer = layer.LayerHash
|
||||
}
|
||||
}
|
||||
vuls[i].IntroducedInLayer = earlyLayer
|
||||
|
||||
}
|
||||
|
||||
return vuls
|
||||
}
|
||||
|
||||
func (scanresult *ScanResultReport) Summarize() *ElasticContainerScanSummaryResult {
|
||||
designatorsObj, ctxList := scanresult.GetDesignatorsNContext()
|
||||
summary := &ElasticContainerScanSummaryResult{
|
||||
Designators: *designatorsObj,
|
||||
Context: ctxList,
|
||||
CustomerGUID: scanresult.CustomerGUID,
|
||||
ImgTag: scanresult.ImgTag,
|
||||
ImgHash: scanresult.ImgHash,
|
||||
WLID: scanresult.WLID,
|
||||
Timestamp: scanresult.Timestamp,
|
||||
ContainerName: scanresult.ContainerName,
|
||||
ContainerScanID: scanresult.AsFNVHash(),
|
||||
ListOfDangerousArtifcats: scanresult.ListOfDangerousArtifcats,
|
||||
}
|
||||
|
||||
summary.Cluster = designatorsObj.Attributes[armotypes.AttributeCluster]
|
||||
summary.Namespace = designatorsObj.Attributes[armotypes.AttributeNamespace]
|
||||
|
||||
imageInfo, e2 := cautils.ImageTagToImageInfo(scanresult.ImgTag)
|
||||
if e2 == nil {
|
||||
summary.Registry = imageInfo.Registry
|
||||
summary.VersionImage = imageInfo.VersionImage
|
||||
}
|
||||
|
||||
summary.PackagesName = make([]string, 0)
|
||||
|
||||
severitiesStats := map[string]SeverityStats{}
|
||||
|
||||
uniqueVulsMap := make(map[string]bool)
|
||||
for _, layer := range scanresult.Layers {
|
||||
summary.PackagesName = append(summary.PackagesName, (layer.GetPackagesNames())...)
|
||||
for _, vul := range layer.Vulnerabilities {
|
||||
if _, isOk := uniqueVulsMap[vul.Name]; isOk {
|
||||
continue
|
||||
}
|
||||
uniqueVulsMap[vul.Name] = true
|
||||
|
||||
// TODO: maybe add all severities just to have a placeholders
|
||||
if !KnownSeverities[vul.Severity] {
|
||||
vul.Severity = UnknownSeverity
|
||||
}
|
||||
|
||||
vulnSeverityStats, ok := severitiesStats[vul.Severity]
|
||||
if !ok {
|
||||
vulnSeverityStats = SeverityStats{Severity: vul.Severity}
|
||||
}
|
||||
|
||||
vulnSeverityStats.TotalCount++
|
||||
summary.TotalCount++
|
||||
isFixed := CalculateFixed(vul.Fixes) > 0
|
||||
if isFixed {
|
||||
vulnSeverityStats.FixAvailableOfTotalCount++
|
||||
summary.FixAvailableOfTotalCount++
|
||||
}
|
||||
isRCE := vul.IsRCE()
|
||||
if isRCE {
|
||||
vulnSeverityStats.RCECount++
|
||||
summary.RCECount++
|
||||
}
|
||||
if vul.Relevancy == Relevant {
|
||||
vulnSeverityStats.RelevantCount++
|
||||
summary.RelevantCount++
|
||||
if isFixed {
|
||||
vulnSeverityStats.FixAvailableForRelevantCount++
|
||||
summary.FixAvailableForRelevantCount++
|
||||
}
|
||||
|
||||
}
|
||||
severitiesStats[vul.Severity] = vulnSeverityStats
|
||||
}
|
||||
}
|
||||
summary.Status = "Success"
|
||||
|
||||
// if criticalStats, hasCritical := severitiesStats[CriticalSeverity]; hasCritical && criticalStats.TotalCount > 0 {
|
||||
// summary.Status = "Fail"
|
||||
// }
|
||||
// if highStats, hasHigh := severitiesStats[HighSeverity]; hasHigh && highStats.RelevantCount > 0 {
|
||||
// summary.Status = "Fail"
|
||||
// }
|
||||
|
||||
for sever := range severitiesStats {
|
||||
summary.SeveritiesStats = append(summary.SeveritiesStats, severitiesStats[sever])
|
||||
}
|
||||
return summary
|
||||
}
|
||||
89
containerscan/elasticdatastructures.go
Normal file
89
containerscan/elasticdatastructures.go
Normal file
@@ -0,0 +1,89 @@
|
||||
package containerscan
|
||||
|
||||
import "github.com/armosec/armoapi-go/armotypes"
|
||||
|
||||
type ElasticContainerVulnerabilityResult struct {
|
||||
Designators armotypes.PortalDesignator `json:"designators"`
|
||||
Context []armotypes.ArmoContext `json:"context"`
|
||||
|
||||
WLID string `json:"wlid"`
|
||||
ContainerScanID string `json:"containersScanID"`
|
||||
Layers []ESLayer `json:"layers"`
|
||||
Timestamp int64 `json:"timestamp"`
|
||||
IsFixed int `json:"isFixed"`
|
||||
IntroducedInLayer string `json:"layerHash"`
|
||||
RelevantLinks []string `json:"links"` // shitty SE practice
|
||||
|
||||
Vulnerability `json:",inline"`
|
||||
}
|
||||
|
||||
type ESLayer struct {
|
||||
LayerHash string `json:"layerHash"`
|
||||
ParentLayerHash string `json:"parentLayerHash"`
|
||||
}
|
||||
|
||||
type SeverityStats struct {
|
||||
Severity string `json:"severity,omitempty"`
|
||||
TotalCount int64 `json:"total"`
|
||||
FixAvailableOfTotalCount int64 `json:"fixedTotal"`
|
||||
RelevantCount int64 `json:"totalRelevant"`
|
||||
FixAvailableForRelevantCount int64 `json:"fixedRelevant"`
|
||||
RCECount int64 `json:"rceTotal"`
|
||||
UrgentCount int64 `json:"urgent"`
|
||||
NeglectedCount int64 `json:"neglected"`
|
||||
HealthStatus string `json:"healthStatus"`
|
||||
}
|
||||
|
||||
type ElasticContainerScanSeveritySummary struct {
|
||||
Designators armotypes.PortalDesignator `json:"designators"`
|
||||
Context []armotypes.ArmoContext `json:"context"`
|
||||
|
||||
SeverityStats
|
||||
CustomerGUID string `json:"customerGUID"`
|
||||
ContainerScanID string `json:"containersScanID"`
|
||||
Timestamp int64 `json:"timestamp"`
|
||||
WLID string `json:"wlid"`
|
||||
ImgTag string `json:"imageTag"`
|
||||
ImgHash string `json:"imageHash"`
|
||||
Cluster string `json:"cluster"`
|
||||
Namespace string `json:"namespace"`
|
||||
ContainerName string `json:"containerName"`
|
||||
Status string `json:"status"`
|
||||
Registry string `json:"registry"`
|
||||
VersionImage string `json:"versionImage"`
|
||||
Version string `json:"version"`
|
||||
DayDate string `json:"dayDate"`
|
||||
}
|
||||
|
||||
type ElasticContainerScanSummaryResult struct {
|
||||
SeverityStats
|
||||
Designators armotypes.PortalDesignator `json:"designators"`
|
||||
Context []armotypes.ArmoContext `json:"context"`
|
||||
|
||||
CustomerGUID string `json:"customerGUID"`
|
||||
ContainerScanID string `json:"containersScanID"`
|
||||
|
||||
Timestamp int64 `json:"timestamp"`
|
||||
WLID string `json:"wlid"`
|
||||
ImgTag string `json:"imageTag"`
|
||||
ImgHash string `json:"imageHash"`
|
||||
Cluster string `json:"cluster"`
|
||||
Namespace string `json:"namespace"`
|
||||
ContainerName string `json:"containerName"`
|
||||
PackagesName []string `json:"packages"`
|
||||
|
||||
ListOfDangerousArtifcats []string `json:"listOfDangerousArtifcats"`
|
||||
|
||||
Status string `json:"status"`
|
||||
|
||||
Registry string `json:"registry"`
|
||||
VersionImage string `json:"versionImage"`
|
||||
|
||||
SeveritiesStats []SeverityStats `json:"severitiesStats"`
|
||||
|
||||
Version string `json:"version"`
|
||||
}
|
||||
|
||||
func (summary *ElasticContainerScanSummaryResult) Validate() bool {
|
||||
return summary.CustomerGUID != "" && summary.ContainerScanID != "" && (summary.ImgTag != "" || summary.ImgHash != "") && summary.Timestamp > 0
|
||||
}
|
||||
246
containerscan/gojayunmarshaller.go
Normal file
246
containerscan/gojayunmarshaller.go
Normal file
@@ -0,0 +1,246 @@
|
||||
package containerscan
|
||||
|
||||
import (
|
||||
"github.com/francoispqt/gojay"
|
||||
)
|
||||
|
||||
/*
|
||||
responsible on fast unmarshaling of various COMMON containerscan structures and substructures
|
||||
|
||||
*/
|
||||
|
||||
// UnmarshalJSONObject - File inside a pkg
|
||||
func (file *PackageFile) UnmarshalJSONObject(dec *gojay.Decoder, key string) (err error) {
|
||||
|
||||
switch key {
|
||||
case "name":
|
||||
err = dec.String(&(file.Filename))
|
||||
}
|
||||
return err
|
||||
|
||||
}
|
||||
|
||||
func (files *PkgFiles) UnmarshalJSONArray(dec *gojay.Decoder) error {
|
||||
lae := PackageFile{}
|
||||
if err := dec.Object(&lae); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
*files = append(*files, lae)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (file *PackageFile) NKeys() int {
|
||||
return 0
|
||||
}
|
||||
|
||||
// UnmarshalJSONObject--- Package
|
||||
func (pkgnx *LinuxPackage) UnmarshalJSONObject(dec *gojay.Decoder, key string) (err error) {
|
||||
|
||||
switch key {
|
||||
case "packageName":
|
||||
err = dec.String(&(pkgnx.PackageName))
|
||||
|
||||
case "version":
|
||||
err = dec.String(&(pkgnx.PackageVersion))
|
||||
|
||||
case "files":
|
||||
err = dec.Array(&(pkgnx.Files))
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (file *LinuxPackage) NKeys() int {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (pkgs *LinuxPkgs) UnmarshalJSONArray(dec *gojay.Decoder) error {
|
||||
lae := LinuxPackage{}
|
||||
if err := dec.Object(&lae); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
*pkgs = append(*pkgs, lae)
|
||||
return nil
|
||||
}
|
||||
|
||||
//--------Vul fixed in----------------------------------
|
||||
func (fx *FixedIn) UnmarshalJSONObject(dec *gojay.Decoder, key string) (err error) {
|
||||
|
||||
switch key {
|
||||
case "name":
|
||||
err = dec.String(&(fx.Name))
|
||||
|
||||
case "imageTag":
|
||||
err = dec.String(&(fx.ImgTag))
|
||||
case "version":
|
||||
err = dec.String(&(fx.Version))
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (t *VulFixes) UnmarshalJSONArray(dec *gojay.Decoder) error {
|
||||
lae := FixedIn{}
|
||||
if err := dec.Object(&lae); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
*t = append(*t, lae)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (file *FixedIn) NKeys() int {
|
||||
return 0
|
||||
}
|
||||
|
||||
//------ VULNERABIlITy ---------------------
|
||||
|
||||
// Name string `json:"name"`
|
||||
// ImgHash string `json:"imageHash"`
|
||||
// ImgTag string `json:"imageTag",omitempty`
|
||||
// RelatedPackageName string `json:"packageName"`
|
||||
// PackageVersion string `json:"packageVersion"`
|
||||
// Link string `json:"link"`
|
||||
// Description string `json:"description"`
|
||||
// Severity string `json:"severity"`
|
||||
// Metadata interface{} `json:"metadata",omitempty`
|
||||
// Fixes VulFixes `json:"fixedIn",omitempty`
|
||||
// Relevancy string `json:"relevant"` // use the related enum
|
||||
|
||||
func (v *Vulnerability) UnmarshalJSONObject(dec *gojay.Decoder, key string) (err error) {
|
||||
|
||||
switch key {
|
||||
case "name":
|
||||
err = dec.String(&(v.Name))
|
||||
|
||||
case "imageTag":
|
||||
err = dec.String(&(v.ImgTag))
|
||||
case "imageHash":
|
||||
err = dec.String(&(v.ImgHash))
|
||||
|
||||
case "packageName":
|
||||
err = dec.String(&(v.RelatedPackageName))
|
||||
|
||||
case "packageVersion":
|
||||
err = dec.String(&(v.PackageVersion))
|
||||
|
||||
case "link":
|
||||
err = dec.String(&(v.Link))
|
||||
|
||||
case "description":
|
||||
err = dec.String(&(v.Description))
|
||||
|
||||
case "severity":
|
||||
err = dec.String(&(v.Severity))
|
||||
|
||||
case "relevant":
|
||||
err = dec.String(&(v.Relevancy))
|
||||
|
||||
case "fixedIn":
|
||||
err = dec.Array(&(v.Fixes))
|
||||
|
||||
case "metadata":
|
||||
err = dec.Interface(&(v.Metadata))
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (t *VulnerabilitiesList) UnmarshalJSONArray(dec *gojay.Decoder) error {
|
||||
lae := Vulnerability{}
|
||||
if err := dec.Object(&lae); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
*t = append(*t, lae)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (v *Vulnerability) NKeys() int {
|
||||
return 0
|
||||
}
|
||||
|
||||
//---------Layer Object----------------------------------
|
||||
// type ScanResultLayer struct {
|
||||
// LayerHash string `json:layerHash`
|
||||
// Vulnerabilities []Vulnerability `json:vulnerabilities`
|
||||
// Packages []LinuxPackage `json:packageToFile`
|
||||
// }
|
||||
|
||||
func (scan *ScanResultLayer) UnmarshalJSONObject(dec *gojay.Decoder, key string) (err error) {
|
||||
|
||||
switch key {
|
||||
// case "timestamp":
|
||||
// err = dec.Time(&(reporter.Timestamp), time.RFC3339)
|
||||
// reporter.Timestamp = reporter.Timestamp.Local()
|
||||
case "layerHash":
|
||||
err = dec.String(&(scan.LayerHash))
|
||||
|
||||
case "parentLayerHash":
|
||||
err = dec.String(&(scan.ParentLayerHash))
|
||||
|
||||
case "vulnerabilities":
|
||||
err = dec.Array(&(scan.Vulnerabilities))
|
||||
case "packageToFile":
|
||||
err = dec.Array(&(scan.Packages))
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (t *LayersList) UnmarshalJSONArray(dec *gojay.Decoder) error {
|
||||
lae := ScanResultLayer{}
|
||||
if err := dec.Object(&lae); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
*t = append(*t, lae)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (scan *ScanResultLayer) NKeys() int {
|
||||
return 0
|
||||
}
|
||||
|
||||
//---------------------SCAN RESULT--------------------------------------------------------------------------
|
||||
|
||||
// type ScanResultReport struct {
|
||||
// CustomerGUID string `json:customerGuid`
|
||||
// ImgTag string `json:imageTag,omitempty`
|
||||
// ImgHash string `json:imageHash`
|
||||
// WLID string `json:wlid`
|
||||
// Timestamp int `json:customerGuid`
|
||||
// Layers []ScanResultLayer `json:layers`
|
||||
// ContainerName
|
||||
// }
|
||||
|
||||
func (scan *ScanResultReport) UnmarshalJSONObject(dec *gojay.Decoder, key string) (err error) {
|
||||
|
||||
switch key {
|
||||
// case "timestamp":
|
||||
// err = dec.Time(&(reporter.Timestamp), time.RFC3339)
|
||||
// reporter.Timestamp = reporter.Timestamp.Local()
|
||||
case "customerGUID":
|
||||
err = dec.String(&(scan.CustomerGUID))
|
||||
case "imageTag":
|
||||
err = dec.String(&(scan.ImgTag))
|
||||
case "imageHash":
|
||||
err = dec.String(&(scan.ImgHash))
|
||||
case "wlid":
|
||||
err = dec.String(&(scan.WLID))
|
||||
case "containerName":
|
||||
err = dec.String(&(scan.ContainerName))
|
||||
case "timestamp":
|
||||
err = dec.Int64(&(scan.Timestamp))
|
||||
case "layers":
|
||||
err = dec.Array(&(scan.Layers))
|
||||
|
||||
case "listOfDangerousArtifcats":
|
||||
err = dec.SliceString(&(scan.ListOfDangerousArtifcats))
|
||||
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (scan *ScanResultReport) NKeys() int {
|
||||
return 0
|
||||
}
|
||||
525
containerscan/jsonrawscan.go
Normal file
525
containerscan/jsonrawscan.go
Normal file
@@ -0,0 +1,525 @@
|
||||
package containerscan
|
||||
|
||||
var nginxScanJSON = `
|
||||
{
|
||||
"customerGUID": "1e3a88bf-92ce-44f8-914e-cbe71830d566",
|
||||
"imageTag": "nginx:1.18.0",
|
||||
"imageHash": "",
|
||||
"wlid": "wlid://cluster-test/namespace-test/deployment-davidg",
|
||||
"containerName": "nginx-1",
|
||||
"timestamp": 1628091365,
|
||||
"layers": [
|
||||
{
|
||||
"layerHash": "sha256:f7ec5a41d630a33a2d1db59b95d89d93de7ae5a619a3a8571b78457e48266eba",
|
||||
"parentLayerHash": "",
|
||||
"vulnerabilities": [
|
||||
{
|
||||
"name": "CVE-2009-0854",
|
||||
"imageHash": "sha256:c2c45d506085d300b72a6d4b10e3dce104228080a2cf095fc38333afe237e2be",
|
||||
"imageTag": "",
|
||||
"packageName": "dash",
|
||||
"packageVersion": "",
|
||||
"link": "https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2009-0854",
|
||||
"description": "Untrusted search path vulnerability in dash 0.5.4, when used as a login shell, allows local users to execute arbitrary code via a Trojan horse .profile file in the current working directory.",
|
||||
"severity": "Medium",
|
||||
"metadata": null,
|
||||
"fixedIn": [
|
||||
{
|
||||
"name": "",
|
||||
"imageTag": "",
|
||||
"version": "0:0"
|
||||
}
|
||||
],
|
||||
"relevant": ""
|
||||
},
|
||||
{
|
||||
"name": "CVE-2019-13627",
|
||||
"imageHash": "sha256:c2c45d506085d300b72a6d4b10e3dce104228080a2cf095fc38333afe237e2be",
|
||||
"imageTag": "",
|
||||
"packageName": "libgcrypt20",
|
||||
"packageVersion": "",
|
||||
"link": "https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2019-13627",
|
||||
"description": "It was discovered that there was a ECDSA timing attack in the libgcrypt20 cryptographic library. Version affected: 1.8.4-5, 1.7.6-2+deb9u3, and 1.6.3-2+deb8u4. Versions fixed: 1.8.5-2 and 1.6.3-2+deb8u7.",
|
||||
"severity": "Medium",
|
||||
"metadata": null,
|
||||
"fixedIn": [
|
||||
{
|
||||
"name": "",
|
||||
"imageTag": "",
|
||||
"version": "0:0"
|
||||
}
|
||||
],
|
||||
"relevant": ""
|
||||
},
|
||||
{
|
||||
"name": "CVE-2021-33560",
|
||||
"imageHash": "sha256:c2c45d506085d300b72a6d4b10e3dce104228080a2cf095fc38333afe237e2be",
|
||||
"imageTag": "",
|
||||
"packageName": "libgcrypt20",
|
||||
"packageVersion": "",
|
||||
"link": "https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-33560",
|
||||
"description": "Libgcrypt before 1.8.8 and 1.9.x before 1.9.3 mishandles ElGamal encryption because it lacks exponent blinding to address a side-channel attack against mpi_powm, and the window size is not chosen appropriately. (There is also an interoperability problem because the selection of the k integer value does not properly consider the differences between basic ElGamal encryption and generalized ElGamal encryption.) This, for example, affects use of ElGamal in OpenPGP.",
|
||||
"severity": "High",
|
||||
"metadata": null,
|
||||
"fixedIn": [
|
||||
{
|
||||
"name": "",
|
||||
"imageTag": "",
|
||||
"version": "0:1.8.4-5+deb10u1"
|
||||
}
|
||||
],
|
||||
"relevant": ""
|
||||
},
|
||||
{
|
||||
"name": "CVE-2021-3345",
|
||||
"imageHash": "sha256:c2c45d506085d300b72a6d4b10e3dce104228080a2cf095fc38333afe237e2be",
|
||||
"imageTag": "",
|
||||
"packageName": "libgcrypt20",
|
||||
"packageVersion": "",
|
||||
"link": "https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-3345",
|
||||
"description": "_gcry_md_block_write in cipher/hash-common.c in Libgcrypt version 1.9.0 has a heap-based buffer overflow when the digest final function sets a large count value. It is recommended to upgrade to 1.9.1 or later.",
|
||||
"severity": "High",
|
||||
"metadata": null,
|
||||
"fixedIn": [
|
||||
{
|
||||
"name": "",
|
||||
"imageTag": "",
|
||||
"version": "0:0"
|
||||
}
|
||||
],
|
||||
"relevant": ""
|
||||
},
|
||||
{
|
||||
"name": "CVE-2010-0834",
|
||||
"imageHash": "sha256:c2c45d506085d300b72a6d4b10e3dce104228080a2cf095fc38333afe237e2be",
|
||||
"imageTag": "",
|
||||
"packageName": "base-files",
|
||||
"packageVersion": "",
|
||||
"link": "https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2010-0834",
|
||||
"description": "The base-files package before 5.0.0ubuntu7.1 on Ubuntu 9.10 and before 5.0.0ubuntu20.10.04.2 on Ubuntu 10.04 LTS, as shipped on Dell Latitude 2110 netbooks, does not require authentication for package installation, which allows remote archive servers and man-in-the-middle attackers to execute arbitrary code via a crafted package.",
|
||||
"severity": "High",
|
||||
"metadata": null,
|
||||
"fixedIn": [
|
||||
{
|
||||
"name": "",
|
||||
"imageTag": "",
|
||||
"version": "0:0"
|
||||
}
|
||||
],
|
||||
"relevant": ""
|
||||
},
|
||||
{
|
||||
"name": "CVE-2018-6557",
|
||||
"imageHash": "sha256:c2c45d506085d300b72a6d4b10e3dce104228080a2cf095fc38333afe237e2be",
|
||||
"imageTag": "",
|
||||
"packageName": "base-files",
|
||||
"packageVersion": "",
|
||||
"link": "https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2018-6557",
|
||||
"description": "The MOTD update script in the base-files package in Ubuntu 18.04 LTS before 10.1ubuntu2.2, and Ubuntu 18.10 before 10.1ubuntu6 incorrectly handled temporary files. A local attacker could use this issue to cause a denial of service, or possibly escalate privileges if kernel symlink restrictions were disabled.",
|
||||
"severity": "High",
|
||||
"metadata": null,
|
||||
"fixedIn": [
|
||||
{
|
||||
"name": "",
|
||||
"imageTag": "",
|
||||
"version": "0:0"
|
||||
}
|
||||
],
|
||||
"relevant": ""
|
||||
},
|
||||
{
|
||||
"name": "CVE-2013-0223",
|
||||
"imageHash": "sha256:c2c45d506085d300b72a6d4b10e3dce104228080a2cf095fc38333afe237e2be",
|
||||
"imageTag": "",
|
||||
"packageName": "coreutils",
|
||||
"packageVersion": "",
|
||||
"link": "https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2013-0223",
|
||||
"description": "The SUSE coreutils-i18n.patch for GNU coreutils allows context-dependent attackers to cause a denial of service (segmentation fault and crash) via a long string to the join command, when using the -i switch, which triggers a stack-based buffer overflow in the alloca function.",
|
||||
"severity": "Low",
|
||||
"metadata": null,
|
||||
"fixedIn": [
|
||||
{
|
||||
"name": "",
|
||||
"imageTag": "",
|
||||
"version": "0:0"
|
||||
}
|
||||
],
|
||||
"relevant": ""
|
||||
},
|
||||
{
|
||||
"name": "CVE-2015-4041",
|
||||
"imageHash": "sha256:c2c45d506085d300b72a6d4b10e3dce104228080a2cf095fc38333afe237e2be",
|
||||
"imageTag": "",
|
||||
"packageName": "coreutils",
|
||||
"packageVersion": "",
|
||||
"link": "https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2015-4041",
|
||||
"description": "The keycompare_mb function in sort.c in sort in GNU Coreutils through 8.23 on 64-bit platforms performs a size calculation without considering the number of bytes occupied by multibyte characters, which allows attackers to cause a denial of service (heap-based buffer overflow and application crash) or possibly have unspecified other impact via long UTF-8 strings.",
|
||||
"severity": "High",
|
||||
"metadata": null,
|
||||
"fixedIn": [
|
||||
{
|
||||
"name": "",
|
||||
"imageTag": "",
|
||||
"version": "0:0"
|
||||
}
|
||||
],
|
||||
"relevant": ""
|
||||
},
|
||||
{
|
||||
"name": "CVE-2009-4135",
|
||||
"imageHash": "sha256:c2c45d506085d300b72a6d4b10e3dce104228080a2cf095fc38333afe237e2be",
|
||||
"imageTag": "",
|
||||
"packageName": "coreutils",
|
||||
"packageVersion": "",
|
||||
"link": "https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2009-4135",
|
||||
"description": "The distcheck rule in dist-check.mk in GNU coreutils 5.2.1 through 8.1 allows local users to gain privileges via a symlink attack on a file in a directory tree under /tmp.",
|
||||
"severity": "Medium",
|
||||
"metadata": null,
|
||||
"fixedIn": [
|
||||
{
|
||||
"name": "",
|
||||
"imageTag": "",
|
||||
"version": "0:0"
|
||||
}
|
||||
],
|
||||
"relevant": ""
|
||||
},
|
||||
{
|
||||
"name": "CVE-2015-4042",
|
||||
"imageHash": "sha256:c2c45d506085d300b72a6d4b10e3dce104228080a2cf095fc38333afe237e2be",
|
||||
"imageTag": "",
|
||||
"packageName": "coreutils",
|
||||
"packageVersion": "",
|
||||
"link": "https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2015-4042",
|
||||
"description": "Integer overflow in the keycompare_mb function in sort.c in sort in GNU Coreutils through 8.23 might allow attackers to cause a denial of service (application crash) or possibly have unspecified other impact via long strings.",
|
||||
"severity": "Critical",
|
||||
"metadata": null,
|
||||
"fixedIn": [
|
||||
{
|
||||
"name": "",
|
||||
"imageTag": "",
|
||||
"version": "0:0"
|
||||
}
|
||||
],
|
||||
"relevant": ""
|
||||
},
|
||||
{
|
||||
"name": "CVE-2013-0221",
|
||||
"imageHash": "sha256:c2c45d506085d300b72a6d4b10e3dce104228080a2cf095fc38333afe237e2be",
|
||||
"imageTag": "",
|
||||
"packageName": "coreutils",
|
||||
"packageVersion": "",
|
||||
"link": "https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2013-0221",
|
||||
"description": "The SUSE coreutils-i18n.patch for GNU coreutils allows context-dependent attackers to cause a denial of service (segmentation fault and crash) via a long string to the sort command, when using the (1) -d or (2) -M switch, which triggers a stack-based buffer overflow in the alloca function.",
|
||||
"severity": "Medium",
|
||||
"metadata": null,
|
||||
"fixedIn": [
|
||||
{
|
||||
"name": "",
|
||||
"imageTag": "",
|
||||
"version": "0:0"
|
||||
}
|
||||
],
|
||||
"relevant": ""
|
||||
},
|
||||
{
|
||||
"name": "CVE-2013-0222",
|
||||
"imageHash": "sha256:c2c45d506085d300b72a6d4b10e3dce104228080a2cf095fc38333afe237e2be",
|
||||
"imageTag": "",
|
||||
"packageName": "coreutils",
|
||||
"packageVersion": "",
|
||||
"link": "https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2013-0222",
|
||||
"description": "The SUSE coreutils-i18n.patch for GNU coreutils allows context-dependent attackers to cause a denial of service (segmentation fault and crash) via a long string to the uniq command, which triggers a stack-based buffer overflow in the alloca function.",
|
||||
"severity": "Low",
|
||||
"metadata": null,
|
||||
"fixedIn": [
|
||||
{
|
||||
"name": "",
|
||||
"imageTag": "",
|
||||
"version": "0:0"
|
||||
}
|
||||
],
|
||||
"relevant": ""
|
||||
},
|
||||
{
|
||||
"name": "CVE-2016-2781",
|
||||
"imageHash": "sha256:c2c45d506085d300b72a6d4b10e3dce104228080a2cf095fc38333afe237e2be",
|
||||
"imageTag": "",
|
||||
"packageName": "coreutils",
|
||||
"packageVersion": "",
|
||||
"link": "https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2016-2781",
|
||||
"description": "chroot in GNU coreutils, when used with --userspec, allows local users to escape to the parent session via a crafted TIOCSTI ioctl call, which pushes characters to the terminal's input buffer.",
|
||||
"severity": "Medium",
|
||||
"metadata": null,
|
||||
"fixedIn": [
|
||||
{
|
||||
"name": "",
|
||||
"imageTag": "",
|
||||
"version": "0:0"
|
||||
}
|
||||
],
|
||||
"relevant": ""
|
||||
},
|
||||
{
|
||||
"name": "CVE-2017-18018",
|
||||
"imageHash": "sha256:c2c45d506085d300b72a6d4b10e3dce104228080a2cf095fc38333afe237e2be",
|
||||
"imageTag": "",
|
||||
"packageName": "coreutils",
|
||||
"packageVersion": "",
|
||||
"link": "https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2017-18018",
|
||||
"description": "In GNU Coreutils through 8.29, chown-core.c in chown and chgrp does not prevent replacement of a plain file with a symlink during use of the POSIX \"-R -L\" options, which allows local users to modify the ownership of arbitrary files by leveraging a race condition.",
|
||||
"severity": "Medium",
|
||||
"metadata": null,
|
||||
"fixedIn": [
|
||||
{
|
||||
"name": "",
|
||||
"imageTag": "",
|
||||
"version": "0:0"
|
||||
}
|
||||
],
|
||||
"relevant": ""
|
||||
},
|
||||
{
|
||||
"name": "CVE-2021-20193",
|
||||
"imageHash": "sha256:c2c45d506085d300b72a6d4b10e3dce104228080a2cf095fc38333afe237e2be",
|
||||
"imageTag": "",
|
||||
"packageName": "tar",
|
||||
"packageVersion": "",
|
||||
"link": "https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-20193",
|
||||
"description": "A flaw was found in the src/list.c of tar 1.33 and earlier. This flaw allows an attacker who can submit a crafted input file to tar to cause uncontrolled consumption of memory. The highest threat from this vulnerability is to system availability.",
|
||||
"severity": "Medium",
|
||||
"metadata": null,
|
||||
"fixedIn": [
|
||||
{
|
||||
"name": "",
|
||||
"imageTag": "",
|
||||
"version": "0:0"
|
||||
}
|
||||
],
|
||||
"relevant": ""
|
||||
},
|
||||
{
|
||||
"name": "CVE-2005-2541",
|
||||
"imageHash": "sha256:c2c45d506085d300b72a6d4b10e3dce104228080a2cf095fc38333afe237e2be",
|
||||
"imageTag": "",
|
||||
"packageName": "tar",
|
||||
"packageVersion": "",
|
||||
"link": "https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2005-2541",
|
||||
"description": "Tar 1.15.1 does not properly warn the user when extracting setuid or setgid files, which may allow local users or remote attackers to gain privileges.",
|
||||
"severity": "High",
|
||||
"metadata": null,
|
||||
"fixedIn": [
|
||||
{
|
||||
"name": "",
|
||||
"imageTag": "",
|
||||
"version": "0:0"
|
||||
}
|
||||
],
|
||||
"relevant": ""
|
||||
},
|
||||
{
|
||||
"name": "CVE-2019-9923",
|
||||
"imageHash": "sha256:c2c45d506085d300b72a6d4b10e3dce104228080a2cf095fc38333afe237e2be",
|
||||
"imageTag": "",
|
||||
"packageName": "tar",
|
||||
"packageVersion": "",
|
||||
"link": "https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2019-9923",
|
||||
"description": "pax_decode_header in sparse.c in GNU Tar before 1.32 had a NULL pointer dereference when parsing certain archives that have malformed extended headers.",
|
||||
"severity": "High",
|
||||
"metadata": null,
|
||||
"fixedIn": [
|
||||
{
|
||||
"name": "",
|
||||
"imageTag": "",
|
||||
"version": "0:0"
|
||||
}
|
||||
],
|
||||
"relevant": ""
|
||||
},
|
||||
{
|
||||
"name": "CVE-2018-1000654",
|
||||
"imageHash": "sha256:c2c45d506085d300b72a6d4b10e3dce104228080a2cf095fc38333afe237e2be",
|
||||
"imageTag": "",
|
||||
"packageName": "libtasn1-6",
|
||||
"packageVersion": "",
|
||||
"link": "https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2018-1000654",
|
||||
"description": "GNU Libtasn1-4.13 libtasn1-4.13 version libtasn1-4.13, libtasn1-4.12 contains a DoS, specifically CPU usage will reach 100% when running asn1Paser against the POC due to an issue in _asn1_expand_object_id(p_tree), after a long time, the program will be killed. This attack appears to be exploitable via parsing a crafted file.",
|
||||
"severity": "High",
|
||||
"metadata": null,
|
||||
"fixedIn": [
|
||||
{
|
||||
"name": "",
|
||||
"imageTag": "",
|
||||
"version": "0:0"
|
||||
}
|
||||
],
|
||||
"relevant": ""
|
||||
},
|
||||
{
|
||||
"name": "CVE-2011-3374",
|
||||
"imageHash": "sha256:c2c45d506085d300b72a6d4b10e3dce104228080a2cf095fc38333afe237e2be",
|
||||
"imageTag": "",
|
||||
"packageName": "apt",
|
||||
"packageVersion": "",
|
||||
"link": "https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2011-3374",
|
||||
"description": "It was found that apt-key in apt, all versions, do not correctly validate gpg keys with the master keyring, leading to a potential man-in-the-middle attack.",
|
||||
"severity": "Medium",
|
||||
"metadata": null,
|
||||
"fixedIn": [
|
||||
{
|
||||
"name": "",
|
||||
"imageTag": "",
|
||||
"version": "0:0"
|
||||
}
|
||||
],
|
||||
"relevant": ""
|
||||
},
|
||||
{
|
||||
"name": "CVE-2021-37600",
|
||||
"imageHash": "sha256:c2c45d506085d300b72a6d4b10e3dce104228080a2cf095fc38333afe237e2be",
|
||||
"imageTag": "",
|
||||
"packageName": "util-linux",
|
||||
"packageVersion": "",
|
||||
"link": "https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-37600",
|
||||
"description": "An integer overflow in util-linux through 2.37.1 can potentially cause a buffer overflow if an attacker were able to use system resources in a way that leads to a large number in the /proc/sysvipc/sem file.",
|
||||
"severity": "Unknown",
|
||||
"metadata": null,
|
||||
"fixedIn": [
|
||||
{
|
||||
"name": "",
|
||||
"imageTag": "",
|
||||
"version": "0:0"
|
||||
}
|
||||
],
|
||||
"relevant": ""
|
||||
},
|
||||
{
|
||||
"name": "CVE-2007-0822",
|
||||
"imageHash": "sha256:c2c45d506085d300b72a6d4b10e3dce104228080a2cf095fc38333afe237e2be",
|
||||
"imageTag": "",
|
||||
"packageName": "util-linux",
|
||||
"packageVersion": "",
|
||||
"link": "https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2007-0822",
|
||||
"description": "umount, when running with the Linux 2.6.15 kernel on Slackware Linux 10.2, allows local users to trigger a NULL dereference and application crash by invoking the program with a pathname for a USB pen drive that was mounted and then physically removed, which might allow the users to obtain sensitive information, including core file contents.",
|
||||
"severity": "Low",
|
||||
"metadata": null,
|
||||
"fixedIn": [
|
||||
{
|
||||
"name": "",
|
||||
"imageTag": "",
|
||||
"version": "0:0"
|
||||
}
|
||||
],
|
||||
"relevant": ""
|
||||
},
|
||||
{
|
||||
"name": "CVE-2004-1349",
|
||||
"imageHash": "sha256:c2c45d506085d300b72a6d4b10e3dce104228080a2cf095fc38333afe237e2be",
|
||||
"imageTag": "",
|
||||
"packageName": "gzip",
|
||||
"packageVersion": "",
|
||||
"link": "https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2004-1349",
|
||||
"description": "gzip before 1.3 in Solaris 8, when called with the -f or -force flags, will change the permissions of files that are hard linked to the target files, which allows local users to view or modify these files.",
|
||||
"severity": "Low",
|
||||
"metadata": null,
|
||||
"fixedIn": [
|
||||
{
|
||||
"name": "",
|
||||
"imageTag": "",
|
||||
"version": "0:0"
|
||||
}
|
||||
],
|
||||
"relevant": ""
|
||||
},
|
||||
{
|
||||
"name": "CVE-2004-0603",
|
||||
"imageHash": "sha256:c2c45d506085d300b72a6d4b10e3dce104228080a2cf095fc38333afe237e2be",
|
||||
"imageTag": "",
|
||||
"packageName": "gzip",
|
||||
"packageVersion": "",
|
||||
"link": "https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2004-0603",
|
||||
"description": "gzexe in gzip 1.3.3 and earlier will execute an argument when the creation of a temp file fails instead of exiting the program, which could allow remote attackers or local users to execute arbitrary commands, a different vulnerability than CVE-1999-1332.",
|
||||
"severity": "High",
|
||||
"metadata": null,
|
||||
"fixedIn": [
|
||||
{
|
||||
"name": "",
|
||||
"imageTag": "",
|
||||
"version": "0:0"
|
||||
}
|
||||
],
|
||||
"relevant": ""
|
||||
},
|
||||
{
|
||||
"name": "CVE-2010-0002",
|
||||
"imageHash": "sha256:c2c45d506085d300b72a6d4b10e3dce104228080a2cf095fc38333afe237e2be",
|
||||
"imageTag": "",
|
||||
"packageName": "bash",
|
||||
"packageVersion": "",
|
||||
"link": "https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2010-0002",
|
||||
"description": "The /etc/profile.d/60alias.sh script in the Mandriva bash package for Bash 2.05b, 3.0, 3.2, 3.2.48, and 4.0 enables the --show-control-chars option in LS_OPTIONS, which allows local users to send escape sequences to terminal emulators, or hide the existence of a file, via a crafted filename.",
|
||||
"severity": "Low",
|
||||
"metadata": null,
|
||||
"fixedIn": [
|
||||
{
|
||||
"name": "",
|
||||
"imageTag": "",
|
||||
"version": "0:0"
|
||||
}
|
||||
],
|
||||
"relevant": ""
|
||||
},
|
||||
{
|
||||
"name": "CVE-2019-18276",
|
||||
"imageHash": "sha256:c2c45d506085d300b72a6d4b10e3dce104228080a2cf095fc38333afe237e2be",
|
||||
"imageTag": "",
|
||||
"packageName": "bash",
|
||||
"packageVersion": "",
|
||||
"link": "https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2019-18276",
|
||||
"description": "An issue was discovered in disable_priv_mode in shell.c in GNU Bash through 5.0 patch 11. By default, if Bash is run with its effective UID not equal to its real UID, it will drop privileges by setting its effective UID to its real UID. However, it does so incorrectly. On Linux and other systems that support \"saved UID\" functionality, the saved UID is not dropped. An attacker with command execution in the shell can use \"enable -f\" for runtime loading of a new builtin, which can be a shared object that calls setuid() and therefore regains privileges. However, binaries running with an effective UID of 0 are unaffected.",
|
||||
"severity": "High",
|
||||
"metadata": null,
|
||||
"fixedIn": [
|
||||
{
|
||||
"name": "",
|
||||
"imageTag": "",
|
||||
"version": "0:0"
|
||||
}
|
||||
],
|
||||
"relevant": ""
|
||||
}
|
||||
],
|
||||
"packageToFile": null
|
||||
},
|
||||
{
|
||||
"layerHash": "sha256:0b20d28b5eb3007f70c43cdd8efcdb04016aa193192e5911cda5b7590ffaa635",
|
||||
"parentLayerHash": "sha256:f7ec5a41d630a33a2d1db59b95d89d93de7ae5a619a3a8571b78457e48266eba",
|
||||
"vulnerabilities": [],
|
||||
"packageToFile": null
|
||||
},
|
||||
{
|
||||
"layerHash": "sha256:1576642c97761adf346890bf67c43473217160a9a203ef47d0bc6020af652798",
|
||||
"parentLayerHash": "sha256:0b20d28b5eb3007f70c43cdd8efcdb04016aa193192e5911cda5b7590ffaa635",
|
||||
"vulnerabilities": [],
|
||||
"packageToFile": null
|
||||
},
|
||||
{
|
||||
"layerHash": "sha256:c12a848bad84d57e3f5faafab5880484434aee3bf8bdde4d519753b7c81254fd",
|
||||
"parentLayerHash": "sha256:1576642c97761adf346890bf67c43473217160a9a203ef47d0bc6020af652798",
|
||||
"vulnerabilities": [],
|
||||
"packageToFile": null
|
||||
},
|
||||
{
|
||||
"layerHash": "sha256:03f221d9cf00a7077231c6dcac3c95182727c7e7fd44fd2b2e882a01dcda2d70",
|
||||
"parentLayerHash": "sha256:c12a848bad84d57e3f5faafab5880484434aee3bf8bdde4d519753b7c81254fd",
|
||||
"vulnerabilities": [],
|
||||
"packageToFile": null
|
||||
}
|
||||
],
|
||||
"listOfDangerousArtifcats": [
|
||||
"bin/dash",
|
||||
"bin/bash",
|
||||
"usr/bin/curl"
|
||||
]
|
||||
}
|
||||
`
|
||||
93
containerscan/rawdatastrucutres.go
Normal file
93
containerscan/rawdatastrucutres.go
Normal file
@@ -0,0 +1,93 @@
|
||||
package containerscan
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"hash/fnv"
|
||||
)
|
||||
|
||||
//!!!!!!!!!!!!EVERY CHANGE IN THESE STRUCTURES => CHANGE gojayunmarshaller ASWELL!!!!!!!!!!!!!!!!!!!!!!!!
|
||||
|
||||
// ScanResultReport - the report given from scanner to event receiver
|
||||
type ScanResultReport struct {
|
||||
CustomerGUID string `json:"customerGUID"`
|
||||
ImgTag string `json:"imageTag"`
|
||||
ImgHash string `json:"imageHash"`
|
||||
WLID string `json:"wlid"`
|
||||
ContainerName string `json:"containerName"`
|
||||
Timestamp int64 `json:"timestamp"`
|
||||
Layers LayersList `json:"layers"`
|
||||
ListOfDangerousArtifcats []string `json:"listOfDangerousArtifcats"`
|
||||
}
|
||||
|
||||
// ScanResultLayer - represents a single layer from container scan result
|
||||
type ScanResultLayer struct {
|
||||
LayerHash string `json:"layerHash"`
|
||||
ParentLayerHash string `json:"parentLayerHash"`
|
||||
Vulnerabilities VulnerabilitiesList `json:"vulnerabilities"`
|
||||
Packages LinuxPkgs `json:"packageToFile"`
|
||||
}
|
||||
|
||||
type VulnerabilityCategory struct {
|
||||
IsRCE bool `json:"isRce"`
|
||||
}
|
||||
|
||||
// Vulnerability - a vul object
|
||||
type Vulnerability struct {
|
||||
Name string `json:"name"`
|
||||
ImgHash string `json:"imageHash"`
|
||||
ImgTag string `json:"imageTag"`
|
||||
RelatedPackageName string `json:"packageName"`
|
||||
PackageVersion string `json:"packageVersion"`
|
||||
Link string `json:"link"`
|
||||
Description string `json:"description"`
|
||||
Severity string `json:"severity"`
|
||||
Metadata interface{} `json:"metadata"`
|
||||
Fixes VulFixes `json:"fixedIn"`
|
||||
Relevancy string `json:"relevant"` // use the related enum
|
||||
UrgentCount int `json:"urgent"`
|
||||
NeglectedCount int `json:"neglected"`
|
||||
HealthStatus string `json:"healthStatus"`
|
||||
Categories VulnerabilityCategory `json:"categories"`
|
||||
}
|
||||
|
||||
// FixedIn when and which pkg was fixed (which version as well)
|
||||
type FixedIn struct {
|
||||
Name string `json:"name"`
|
||||
ImgTag string `json:"imageTag"`
|
||||
Version string `json:"version"`
|
||||
}
|
||||
|
||||
// LinuxPackage- Linux package representation
|
||||
type LinuxPackage struct {
|
||||
PackageName string `json:"packageName"`
|
||||
Files PkgFiles `json:"files"`
|
||||
PackageVersion string `json:"version"`
|
||||
}
|
||||
|
||||
// PackageFile - s.e
|
||||
type PackageFile struct {
|
||||
Filename string `json:"name"`
|
||||
}
|
||||
|
||||
// types to provide unmarshalling:
|
||||
|
||||
//VulnerabilitiesList -s.e
|
||||
type LayersList []ScanResultLayer
|
||||
|
||||
//VulnerabilitiesList -s.e
|
||||
type VulnerabilitiesList []Vulnerability
|
||||
|
||||
//LinuxPkgs - slice of linux pkgs
|
||||
type LinuxPkgs []LinuxPackage
|
||||
|
||||
//VulFixes - information bout when/how this vul was fixed
|
||||
type VulFixes []FixedIn
|
||||
|
||||
//PkgFiles - slice of files belong to specific pkg
|
||||
type PkgFiles []PackageFile
|
||||
|
||||
func (v *ScanResultReport) AsFNVHash() string {
|
||||
hasher := fnv.New64a()
|
||||
hasher.Write([]byte(fmt.Sprintf("%v", *v)))
|
||||
return fmt.Sprintf("%v", hasher.Sum64())
|
||||
}
|
||||
@@ -11,7 +11,7 @@ source #287
|
||||
|
||||
### Relation to this proposal
|
||||
|
||||
There are multiple changes and design decisions needs to be made before Kubescape will support the before outlined controls. However, a focal point the whole picutre is the ability to access vulnerabilty databases of container images. We anticiapte that most container image repositories will support image vulnerabilty scanning, some major players are already do. Since there is no a single API available which all of these data sources support it is important to create an adaption layer within Kubescape so different datasources can serve Kubescape's goals.
|
||||
There are multiple changes and design decisions needs to be made before Kubescape will support the before outlined controls. However, a focal point the whole picutre is the ability to access vulnerability databases of container images. We anticipate that most container image repositories will support image vulnerability scanning, some major players are already do. Since there is no a single API available which all of these data sources support it is important to create an adaption layer within Kubescape so different datasources can serve Kubescape's goals.
|
||||
|
||||
## High level design of Kubescape
|
||||
|
||||
@@ -21,7 +21,7 @@ There are multiple changes and design decisions needs to be made before Kubescap
|
||||
* OPA engine: the [OPA](https://github.com/open-policy-agent/opa) rego interpreter
|
||||
* Rules processor: Kubescape component, it enumerates and runs the controls while also preparing the all the input data that the controls need for running
|
||||
* Data sources: set of different modules providing data to the Rules processor so it can run the controls with them. Examples: Kubernetes objects, cloud vendor API objects and adding in this proposal the vulnerability infomration
|
||||
* Cloud Image Vulnerability adaption interface: the subject of this proposal, it gives a common interface for different registry/vulnerabilty vendors to adapt to.
|
||||
* Cloud Image Vulnerability adaption interface: the subject of this proposal, it gives a common interface for different registry/vulnerability vendors to adapt to.
|
||||
* CIV adaptors: specific implementation of the CIV interface, example Harbor adaption
|
||||
```
|
||||
-----------------------
|
||||
@@ -89,7 +89,7 @@ type ContainerImageScanStatus struct {
|
||||
LastScanDate time.Time
|
||||
}
|
||||
|
||||
type ContainerImageVulnerability struct {
|
||||
type ContainerImageVulnerabilityReport struct {
|
||||
ImageID ContainerImageIdentifier
|
||||
// TBD
|
||||
}
|
||||
@@ -110,7 +110,7 @@ type IContainerImageVulnerabilityAdaptor interface {
|
||||
|
||||
GetImagesScanStatus(imageIDs []ContainerImageIdentifier) ([]ContainerImageScanStatus, error)
|
||||
|
||||
GetImagesVulnerabilties(imageIDs []ContainerImageIdentifier) ([]ContainerImageVulnerability, error)
|
||||
GetImagesVulnerabilties(imageIDs []ContainerImageIdentifier) ([]ContainerImageVulnerabilityReport, error)
|
||||
|
||||
GetImagesInformation(imageIDs []ContainerImageIdentifier) ([]ContainerImageInformation, error)
|
||||
}
|
||||
|
||||
@@ -1,91 +0,0 @@
|
||||
# Kubescape Release
|
||||
|
||||
|
||||
## Input
|
||||
|
||||
### Scan a running Kubernetes cluster
|
||||
|
||||
* Scan your Kubernetes cluster. Ignore `kube-system` and `kube-public` namespaces
|
||||
```
|
||||
kubescape scan framework nsa --exclude-namespaces kube-system,kube-public
|
||||
```
|
||||
|
||||
* Scan your Kubernetes cluster
|
||||
```
|
||||
kubescape scan framework nsa
|
||||
```
|
||||
|
||||
### Scan a local Kubernetes manifest
|
||||
|
||||
* Scan single Kubernetes manifest file <img src="new-feature.svg">
|
||||
```
|
||||
kubescape scan framework nsa <my-workload.yaml>
|
||||
```
|
||||
|
||||
* Scan many Kubernetes manifest files <img src="new-feature.svg">
|
||||
```
|
||||
kubescape scan framework nsa <my-workload-1.yaml> <my-workload-2.yaml>
|
||||
```
|
||||
|
||||
* Scan all Kubernetes manifest files in directory <img src="new-feature.svg">
|
||||
```
|
||||
kubescape scan framework nsa *.yaml
|
||||
```
|
||||
|
||||
* Scan Kubernetes manifest from stdout <img src="new-feature.svg">
|
||||
```
|
||||
cat <my-workload.yaml> | kubescape scan framework nsa -
|
||||
```
|
||||
|
||||
|
||||
* Scan Kubernetes manifest url <img src="new-feature.svg">
|
||||
```
|
||||
kubescape scan framework nsa https://raw.githubusercontent.com/GoogleCloudPlatform/microservices-demo/master/release/kubernetes-manifests.yaml
|
||||
```
|
||||
|
||||
### Scan HELM chart
|
||||
|
||||
* Render the helm chart using [`helm template`](https://helm.sh/docs/helm/helm_template/) and pass to stdout <img src="new-feature.svg">
|
||||
```
|
||||
helm template [CHART] [flags] --generate-name --dry-run | kubescape scan framework nsa -
|
||||
```
|
||||
|
||||
### Scan on-prem (offline)
|
||||
|
||||
* Scan using a framework from the local file system
|
||||
```
|
||||
kubescape scan framework --use-from <path>
|
||||
```
|
||||
|
||||
* Scan using the framework from the default location in file system
|
||||
```
|
||||
kubescape scan framework --use-default
|
||||
```
|
||||
|
||||
## Output formats
|
||||
|
||||
By default, the output is user friendly.
|
||||
|
||||
For the sake of automation, it is possible to receive the result in a `json` or `junit xml` format.
|
||||
|
||||
* Output in `json` format <img src="new-feature.svg">
|
||||
```
|
||||
kubescape scan framework nsa --format json --output results.json
|
||||
```
|
||||
|
||||
* Output in `junit xml` format <img src="new-feature.svg">
|
||||
```
|
||||
kubescape scan framework nsa --format junit --output results.xml
|
||||
```
|
||||
|
||||
## Download
|
||||
|
||||
* Download and save in file <img src="new-feature.svg">
|
||||
```
|
||||
kubescape download framework nsa --output nsa.json
|
||||
```
|
||||
|
||||
* Download and save in default file (`~/.kubescape/<framework name>.json`)
|
||||
```
|
||||
kubescape download framework nsa
|
||||
```
|
||||
@@ -1,22 +1,66 @@
|
||||
# Kubescape project roadmap
|
||||
|
||||
## Planning principles
|
||||
|
||||
## Proposals
|
||||
* [Container registry integration](/docs/proposals/container-image-vulnerability-adaptor.md)
|
||||
Kubescape roadmap items are labeled based on where the feature is used and by their maturity.
|
||||
|
||||
## Planed features
|
||||
* Image vulnerablity scanning based controls
|
||||
* Assited remidiation (telling where/what to fix)
|
||||
* Git integration for pull requests
|
||||
* Integration with container registries
|
||||
* Custom controls and regos
|
||||
* API server configuration validation
|
||||
The features serve different stages of the workflow of the users:
|
||||
* **Development phase** (writing Kubernetes manifests) - example: VS Code extension is used while editing YAMLs
|
||||
* **CI phase** (integrating manifests to GIT repo) - example: GitHub action validating HELM charts on PRs
|
||||
* **CD phase** (deploying applications in Kubernetes) - example: running cluster scan after a new deployment
|
||||
* **Monitoring phase** (scanning application in Kubernetes) - example: Prometheus scraping the cluster security risk
|
||||
|
||||
The items in Kubescape roadmap are split to 3 major groups based on the feature planning maturity:
|
||||
|
||||
* [Planning](#planning) - we have tickets open for these issues with more or less clear vision of design
|
||||
* [Backlog](#backlog) - feature which were discussed at a high level but are not ready for development
|
||||
* [Wishlist](#wishlist) - features we are dreaming of 😀 and want to push them gradually forward
|
||||
|
||||
|
||||
## Planning 👷
|
||||
* ##### Integration with image registries
|
||||
We want to expand Kubescape to integrate with differnet image registries and read image vulnerability information from there. This will allow Kubescape to give contextual security information about vulnerabilities [Container registry integration](/docs/proposals/container-image-vulnerability-adaptor.md)
|
||||
* ##### Kubescape as a microservice
|
||||
Create a REST API for Kubescape so it can run constantly in a cluster and other components like Prometheus can scrape results
|
||||
* ##### Kubescape CLI control over cluster operations
|
||||
Add functionality to Kubescape CLI to trigger operations in Kubescape cluster components (example: trigger images scans and etc.)
|
||||
* ##### Produce md/HTML reports
|
||||
Create scan reports for different output formats
|
||||
* ##### Git integration for pull requests
|
||||
Create insightful GitHub actions for Kubescape
|
||||
|
||||
## Backlog 📅
|
||||
* ##### JSON path for HELM charts
|
||||
Today Kubescape can point to issues in the Kubernetes object, we want to develop this feature so Kubescape will be able to point to the misconfigured source file (HELM)
|
||||
* ##### Create Kubescape HELM plugin
|
||||
* ##### Kubescape based admission controller
|
||||
Implement admission controller API for Kubescape microservice to enable users to use Kubescape rules as policies
|
||||
|
||||
## Wishlist 💭
|
||||
* ##### Integrate with other Kubernetes CLI tools
|
||||
Use Kubescape as a YAML validator for `kubectl` and others.
|
||||
* ##### Kubernetes audit log integration
|
||||
Connect Kubescape to audit log stream to enable it to produce more contextual security information based on how the API service is used.
|
||||
* ##### TUI for Kubescape
|
||||
Interactive terminal based user interface which helps to analyze and fix issues
|
||||
* ##### Scanning images with GO for vulnerabilities
|
||||
Images scanners cannot determine which packages were used to build Go executables and we want to scan them for vulnerabilities
|
||||
* ##### Scanning Dockerfile-s for security best practices
|
||||
Scan image or Dockerfile to determine whether it is using security best practices (like root containers)
|
||||
* ##### Custom controls and rules
|
||||
Enable users to define their own Rego base rules
|
||||
* ##### More CI/CD tool integration
|
||||
Jenkins and etc. 😀
|
||||
|
||||
|
||||
## Completed features 🎓
|
||||
* Kubelet configuration validation
|
||||
|
||||
## Completed features
|
||||
* API server configuration validation
|
||||
* Image vulnerability scanning based controls
|
||||
* Assisted remediation (telling where/what to fix)
|
||||
* Integration with Prometheus
|
||||
* Confiugration of controls (customizing rules for a given environment)
|
||||
* Installation in the cluster for continous monitoring
|
||||
* Installation in the cluster for continuous monitoring
|
||||
* Host scanner
|
||||
* Cloud vendor API integration
|
||||
* Custom exceptions
|
||||
|
||||
@@ -1,87 +0,0 @@
|
||||
<img src="kubescape.png" width="300" alt="logo" align="center">
|
||||
|
||||
# More detailed look on command line arguments and options
|
||||
|
||||
## Simple run:
|
||||
```
|
||||
kubescape scan framework nsa --exclude-namespaces kube-system,kube-public
|
||||
```
|
||||
|
||||
## Flags
|
||||
|
||||
| flag | default | description | options |
|
||||
| --- | --- | --- | --- |
|
||||
| `-e`/`--exclude-namespaces` | Scan all namespaces | Namespaces to exclude from scanning. Recommended to exclude `kube-system` and `kube-public` namespaces |
|
||||
| `-s`/`--silent` | Display progress messages | Silent progress messages |
|
||||
| `-t`/`--fail-threshold` | `0` (do not fail) | fail command (return exit code 1) if result is below threshold| `0` -> `100` |
|
||||
| `-f`/`--format` | `pretty-printer` | Output format | `pretty-printer`/`json`/`junit` |
|
||||
| `-o`/`--output` | print to stdout | Save scan result in file |
|
||||
| `--use-from` | | Load local framework object from specified path. If not used will download latest |
|
||||
| `--use-default` | `false` | Load local framework object from default path. If not used will download latest | `true`/`false` |
|
||||
| `--exceptions` | | Path to an [exceptions obj](examples/exceptions.json). If not set will download exceptions from Armo management portal |
|
||||
| `--results-locally` | `false` | Kubescape sends scan results to Armo management portal to allow users to control exceptions and maintain chronological scan results. Use this flag if you do not wish to use these features | `true`/`false`|
|
||||
|
||||
## Usage & Examples
|
||||
|
||||
### Examples
|
||||
|
||||
* Scan a running Kubernetes cluster with [`nsa`](https://www.nsa.gov/Press-Room/News-Highlights/Article/Article/2716980/nsa-cisa-release-kubernetes-hardening-guidance/) framework
|
||||
```
|
||||
kubescape scan framework nsa --exclude-namespaces kube-system,kube-public
|
||||
```
|
||||
|
||||
* Scan local `yaml`/`json` files before deploying
|
||||
```
|
||||
kubescape scan framework nsa *.yaml
|
||||
```
|
||||
|
||||
|
||||
* Scan `yaml`/`json` files from url
|
||||
```
|
||||
kubescape scan framework nsa https://raw.githubusercontent.com/GoogleCloudPlatform/microservices-demo/master/release/kubernetes-manifests.yaml
|
||||
```
|
||||
|
||||
* Output in `json` format
|
||||
```
|
||||
kubescape scan framework nsa --exclude-namespaces kube-system,kube-public --format json --output results.json
|
||||
```
|
||||
|
||||
* Output in `junit xml` format
|
||||
```
|
||||
kubescape scan framework nsa --exclude-namespaces kube-system,kube-public --format junit --output results.xml
|
||||
```
|
||||
|
||||
* Scan with exceptions, objects with exceptions will be presented as `warning` and not `fail`
|
||||
```
|
||||
kubescape scan framework nsa --exceptions examples/exceptions.json
|
||||
```
|
||||
|
||||
### Helm Support
|
||||
|
||||
* Render the helm chart using [`helm template`](https://helm.sh/docs/helm/helm_template/) and pass to stdout
|
||||
```
|
||||
helm template [NAME] [CHART] [flags] --dry-run | kubescape scan framework nsa -
|
||||
```
|
||||
|
||||
for example:
|
||||
```
|
||||
helm template bitnami/mysql --generate-name --dry-run | kubescape scan framework nsa -
|
||||
```
|
||||
|
||||
### Offline Support <img src="docs/new-feature.svg">
|
||||
|
||||
It is possible to run Kubescape offline!
|
||||
|
||||
First download the framework and then scan with `--use-from` flag
|
||||
|
||||
* Download and save in file, if file name not specified, will store save to `~/.kubescape/<framework name>.json`
|
||||
```
|
||||
kubescape download framework nsa --output nsa.json
|
||||
```
|
||||
|
||||
* Scan using the downloaded framework
|
||||
```
|
||||
kubescape scan framework nsa --use-from nsa.json
|
||||
```
|
||||
|
||||
Kubescape is an open source project, we welcome your feedback and ideas for improvement. We’re also aiming to collaborate with the Kubernetes community to help make the tests themselves more robust and complete as Kubernetes develops.
|
||||
BIN
docs/summary.png
BIN
docs/summary.png
Binary file not shown.
|
Before Width: | Height: | Size: 60 KiB After Width: | Height: | Size: 70 KiB |
68
examples/cloud-vendor-integration/aws.sh
Executable file
68
examples/cloud-vendor-integration/aws.sh
Executable file
@@ -0,0 +1,68 @@
|
||||
#!/bin/bash
|
||||
|
||||
# AWS
|
||||
# Attach the Kubescape service account to an AWS IAM role with the described cluster permission
|
||||
|
||||
# Prerequisites:
|
||||
# eksctl, awscli v2
|
||||
|
||||
# Set environment variables
|
||||
echo 'Set environment variables'
|
||||
export kubescape_namespace=armo-system
|
||||
export kubescape_serviceaccount=armo-kubescape-service-account
|
||||
|
||||
# Get current context
|
||||
echo 'Get current context'
|
||||
export context=$(kubectl config current-context)
|
||||
|
||||
# Get cluster arn
|
||||
echo 'Get cluster arn'
|
||||
export cluster_arn=$(kubectl config view -o jsonpath="{.contexts[?(@.name == \"$context\")].context.cluster}")
|
||||
|
||||
# Get cluster name
|
||||
echo 'Get cluster name'
|
||||
export cluster_name=$(echo "$cluster_arn" | awk -F'/' '{print $NF}')
|
||||
|
||||
# Get cluster region
|
||||
echo 'Get cluster region'
|
||||
export cluster_region=$(echo "$cluster_arn" | awk -F':' '{print $4}')
|
||||
|
||||
# First step, Create IAM OIDC provider for the cluster (Not required if the third step runs as is):
|
||||
echo 'Create IAM OIDC provider for the cluster'
|
||||
eksctl utils associate-iam-oidc-provider --cluster $cluster_name --approve
|
||||
|
||||
# Second step, Create a policy and service account role:
|
||||
# Create a kubescape policy
|
||||
echo 'Create a kubescape policy'
|
||||
export kubescape_policy_arn=$(aws iam create-policy \
|
||||
--output yaml \
|
||||
--query 'Policy.Arn' \
|
||||
--policy-name kubescape \
|
||||
--policy-document \
|
||||
"$(cat <<EOF
|
||||
{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": "eks:DescribeCluster",
|
||||
"Resource": "$cluster_arn"
|
||||
}
|
||||
]
|
||||
}
|
||||
EOF
|
||||
)")
|
||||
|
||||
# Create Kubernetes Kubescape service account, and AWS IAM attachment role
|
||||
echo 'Create Kubernetes Kubescape service account, and AWS IAM attachment role'
|
||||
eksctl create iamserviceaccount \
|
||||
--name $kubescape_serviceaccount \
|
||||
--namespace $kubescape_namespace \
|
||||
--cluster $cluster_name \
|
||||
--attach-policy-arn $kubescape_policy_arn \
|
||||
--approve \
|
||||
--override-existing-serviceaccounts
|
||||
|
||||
# Install/Upgrade Kubescape chart
|
||||
echo 'Install/Upgrade Kubescape chart'
|
||||
helm upgrade --install armo armo-components/ -n armo-system --create-namespace --set clusterName=$cluster_name --set cloud_provider_engine=eks --set createKubescapeServiceAccount=false --set cloudRegion=$cluster_region
|
||||
49
examples/cloud-vendor-integration/gcp.sh
Executable file
49
examples/cloud-vendor-integration/gcp.sh
Executable file
@@ -0,0 +1,49 @@
|
||||
#!/bin/bash
|
||||
|
||||
# GCP
|
||||
# Attach the Kubescape service account to a GCP service account with the get cluster permission
|
||||
|
||||
# Prerequisites:
|
||||
# gcloud
|
||||
# Workload Identity enabled on the cluster.
|
||||
# Node pool with --workload-metadata=GKE_METADATA where the pod can run.
|
||||
# CLUSTER_NAME and CLUSTER_REGION environment variables.
|
||||
|
||||
[ -z $CLUSTER_NAME ] && >&2 echo "Please set the CLUSTER_NAME environment variable" && exit 1
|
||||
[ -z $CLUSTER_REGION ] && >&2 echo "Please set the CLUSTER_REGION environment variable" && exit 1
|
||||
|
||||
# Create GCP service account
|
||||
gcloud iam service-accounts create kubescape --display-name=kubescape
|
||||
|
||||
# Set environment variables
|
||||
echo 'Set environment variables'
|
||||
export kubescape_namespace=armo-system
|
||||
export kubescape_serviceaccount=armo-kubescape-service-account
|
||||
|
||||
# Get current GCP project
|
||||
echo 'Get current GCP project'
|
||||
export gcp_project=$(gcloud config get-value project)
|
||||
|
||||
sleep 5
|
||||
# Get service account email
|
||||
echo 'Get service account email'
|
||||
export gcp_service_account=$(gcloud iam service-accounts list --filter="email ~ kubescape@" --format="value(email)")
|
||||
|
||||
# Create custome cluster.get role
|
||||
echo 'Create custome cluster.get role'
|
||||
export custom_role_name=$(gcloud iam roles create kubescape --project=$gcp_project --title='Armo kubernetes' --description='Allow clusters.get to Kubernetes armo service account' --permissions=container.clusters.get --stage=GA --format='value(name)')
|
||||
|
||||
# Attach policies to the service account
|
||||
echo 'Attach policies to the service account'
|
||||
gcloud --quiet projects add-iam-policy-binding $gcp_project --member serviceAccount:$gcp_service_account --role $custom_role_name >/dev/null
|
||||
gcloud --quiet projects add-iam-policy-binding $gcp_project --member serviceAccount:$gcp_service_account --role roles/storage.objectViewer >/dev/null
|
||||
|
||||
# If there are missing permissions, use this role instead
|
||||
# gcloud --quiet projects add-iam-policy-binding $gcp_project --member serviceAccount:$gcp_service_account --role roles/container.clusterViewer
|
||||
|
||||
# Bind the GCP kubescape service account to kubescape kubernetes service account
|
||||
gcloud iam service-accounts add-iam-policy-binding $gcp_service_account --role roles/iam.workloadIdentityUser --member "serviceAccount:${gcp_project}.svc.id.goog[${kubescape_namespace}/${kubescape_serviceaccount}]"
|
||||
|
||||
# Install/Upgrade Kubescape chart
|
||||
echo 'Install/Upgrade Kubescape chart'
|
||||
helm upgrade --install armo armo-components/ -n armo-system --create-namespace --set cloud_provider_engine=gke --set gke_service_account=$gcp_service_account --set cloudRegion=$CLUSTER_REGION --set clusterName=$CLUSTER_NAME --set gkeProject=$gcp_project
|
||||
@@ -1,85 +0,0 @@
|
||||
# Periodically Kubescape Scanning
|
||||
|
||||
You can scan your cluster periodically by adding a `CronJob` that will repeatedly trigger kubescape
|
||||
|
||||
* Setup [scanning & submitting](#scanning-and-submitting)
|
||||
* Setup [scanning without submitting](#scanning-without-submitting)
|
||||
|
||||
## Scanning And Submitting
|
||||
|
||||
If you wish to periodically scan and submit the result to the [Kubescape SaaS version](https://portal.armo.cloud/) where you can benefit the features the SaaS version provides, please follow this instructions ->
|
||||
|
||||
1. Apply kubescape namespace
|
||||
```
|
||||
kubectl apply ks-namespace.yaml
|
||||
```
|
||||
|
||||
2. Apply serviceAccount and roles
|
||||
```
|
||||
kubectl apply ks-serviceAccount.yaml
|
||||
```
|
||||
|
||||
3. Setup and apply configMap
|
||||
|
||||
Before you apply the configMap you need to set the account ID and cluster name in the `ks-configMap.yaml` file.
|
||||
|
||||
* Set cluster name:
|
||||
Run `kubectl config current-context` and set the result in the `data.clusterName` field
|
||||
* Set account ID:
|
||||
1. Navigate to the [Kubescape SaaS version](https://portal.armo.cloud/) and login/sign up for free
|
||||
2. Click the `Add Cluster` button on the top right of the page
|
||||
</br>
|
||||
<img src="screenshots/add-cluster.png" alt="add-cluster">
|
||||
3. Copy the value of `--account` and set it in the `data.customerGUID` field
|
||||
</br>
|
||||
<img src="screenshots/account.png" alt="account">
|
||||
|
||||
Make sure the configMap looks as following;
|
||||
```
|
||||
kind: ConfigMap
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: kubescape
|
||||
labels:
|
||||
app: kubescape
|
||||
namespace: kubescape
|
||||
data:
|
||||
config.json: |
|
||||
{
|
||||
"customerGUID": "XXXXXXXX-XXXX-XXXX-XXXXXXXXXXXX",
|
||||
"clusterName": "my-awesome-cluster-name"
|
||||
}
|
||||
```
|
||||
|
||||
Finally, apply the configMap
|
||||
```
|
||||
kubectl apply ks-configMap.yaml
|
||||
```
|
||||
|
||||
4. Apply CronJob
|
||||
|
||||
Before you apply the cronJob, make sure the scanning frequency suites your needs
|
||||
```
|
||||
kubectl apply ks-cronJob-submit.yaml
|
||||
```
|
||||
|
||||
## Scanning Without Submitting
|
||||
|
||||
If you wish to periodically scan but not submit the scan results, follow this instructions ->
|
||||
|
||||
1. Apply kubescape namespace
|
||||
```
|
||||
kubectl apply ks-namespace.yaml
|
||||
```
|
||||
|
||||
2. Apply serviceAccount and roles
|
||||
```
|
||||
kubectl apply ks-serviceAccount.yaml
|
||||
```
|
||||
|
||||
3. Apply CronJob
|
||||
|
||||
Before you apply the cronJob, make sure the scanning frequency suites your needs
|
||||
```
|
||||
kubectl apply ks-cronJob-non-submit.yaml
|
||||
```
|
||||
@@ -1,14 +0,0 @@
|
||||
# ------------------- Kubescape User/Customer ID ------------------- #
|
||||
kind: ConfigMap
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: kubescape
|
||||
labels:
|
||||
app: kubescape
|
||||
namespace: kubescape
|
||||
data:
|
||||
config.json: |
|
||||
{
|
||||
"customerGUID": "<ID>",
|
||||
"clusterName": "<cluster name>"
|
||||
}
|
||||
@@ -1,32 +0,0 @@
|
||||
apiVersion: batch/v1
|
||||
kind: CronJob
|
||||
metadata:
|
||||
name: kubescape
|
||||
labels:
|
||||
app: kubescape
|
||||
namespace: kubescape
|
||||
spec:
|
||||
# ┌────────────────── timezone (optional)
|
||||
# | ┌───────────── minute (0 - 59)
|
||||
# | │ ┌───────────── hour (0 - 23)
|
||||
# | │ │ ┌───────────── day of the month (1 - 31)
|
||||
# | │ │ │ ┌───────────── month (1 - 12)
|
||||
# | │ │ │ │ ┌───────────── day of the week (0 - 6) (Sunday to Saturday;
|
||||
# | │ │ │ │ │ 7 is also Sunday on some systems)
|
||||
# | │ │ │ │ │
|
||||
# | │ │ │ │ │
|
||||
# CRON_TZ=UTC * * * * *
|
||||
schedule: "0 0 1 * *"
|
||||
jobTemplate:
|
||||
spec:
|
||||
template:
|
||||
spec:
|
||||
containers:
|
||||
- name: kubescape
|
||||
image: quay.io/armosec/kubescape:latest
|
||||
imagePullPolicy: IfNotPresent
|
||||
command: ["/bin/sh","-c"]
|
||||
args:
|
||||
- kubescape scan framework nsa
|
||||
restartPolicy: OnFailure
|
||||
serviceAccountName: kubescape-discovery
|
||||
@@ -1,40 +0,0 @@
|
||||
apiVersion: batch/v1
|
||||
kind: CronJob
|
||||
metadata:
|
||||
name: kubescape
|
||||
labels:
|
||||
app: kubescape
|
||||
namespace: kubescape
|
||||
spec:
|
||||
# ┌────────────────── timezone (optional)
|
||||
# | ┌───────────── minute (0 - 59)
|
||||
# | │ ┌───────────── hour (0 - 23)
|
||||
# | │ │ ┌───────────── day of the month (1 - 31)
|
||||
# | │ │ │ ┌───────────── month (1 - 12)
|
||||
# | │ │ │ │ ┌───────────── day of the week (0 - 6) (Sunday to Saturday;
|
||||
# | │ │ │ │ │ 7 is also Sunday on some systems)
|
||||
# | │ │ │ │ │
|
||||
# | │ │ │ │ │
|
||||
# CRON_TZ=UTC * * * * *
|
||||
schedule: "0 0 1 * *"
|
||||
jobTemplate:
|
||||
spec:
|
||||
template:
|
||||
spec:
|
||||
containers:
|
||||
- name: kubescape
|
||||
image: quay.io/armosec/kubescape:latest
|
||||
imagePullPolicy: IfNotPresent
|
||||
command: ["/bin/sh","-c"]
|
||||
args:
|
||||
- kubescape scan framework nsa --submit
|
||||
volumeMounts:
|
||||
- name: kubescape-config-volume
|
||||
mountPath: /root/.kubescape/config.json
|
||||
subPath: config.json
|
||||
restartPolicy: OnFailure
|
||||
serviceAccountName: kubescape-discovery
|
||||
volumes:
|
||||
- name: kubescape-config-volume
|
||||
configMap:
|
||||
name: kubescape
|
||||
@@ -1,7 +0,0 @@
|
||||
# ------------------- Kubescape User/Customer ID ------------------- #
|
||||
kind: Namespace
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: kubescape
|
||||
labels:
|
||||
app: kubescape
|
||||
@@ -1,61 +0,0 @@
|
||||
---
|
||||
# ------------------- Kubescape Service Account ------------------- #
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
labels:
|
||||
app: kubescape
|
||||
name: kubescape-discovery
|
||||
namespace: kubescape
|
||||
|
||||
---
|
||||
# ------------------- Kubescape Role & Role Binding ------------------- #
|
||||
kind: Role
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: kubescape-discovery-role
|
||||
namespace: kubescape
|
||||
rules:
|
||||
- apiGroups: ["*"]
|
||||
resources: ["*"]
|
||||
verbs: ["get", "list", "describe"]
|
||||
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: kubescape-discovery-binding
|
||||
namespace: kubescape
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: kubescape-discovery-role
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: kubescape-discovery
|
||||
|
||||
---
|
||||
# ------------------- Kubescape Cluster Role & Cluster Role Binding ------------------- #
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: kubescape-discovery-clusterroles
|
||||
# "namespace" omitted since ClusterRoles are not namespaced
|
||||
rules:
|
||||
- apiGroups: ["*"]
|
||||
resources: ["*"]
|
||||
verbs: ["get", "list", "describe"]
|
||||
|
||||
---
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: kubescape-discovery-role-binding
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: kubescape-discovery-clusterroles
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: kubescape-discovery
|
||||
namespace: kubescape
|
||||
Binary file not shown.
|
Before Width: | Height: | Size: 41 KiB |
Binary file not shown.
|
Before Width: | Height: | Size: 120 KiB |
@@ -1,130 +0,0 @@
|
||||
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
|
||||
#
|
||||
# This file is DEPRECATE, please navigate to the official docs ->
|
||||
# https://github.com/armosec/kubescape/tree/master/examples/cronJob-support/README.md
|
||||
#
|
||||
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
|
||||
|
||||
---
|
||||
# ------------------- Kubescape Service Account ------------------- #
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
labels:
|
||||
app: kubescape
|
||||
name: kubescape-discovery
|
||||
namespace: kubescape
|
||||
|
||||
---
|
||||
# ------------------- Kubescape Role & Role Binding ------------------- #
|
||||
kind: Role
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: kubescape-discovery-role
|
||||
namespace: kubescape
|
||||
rules:
|
||||
- apiGroups: ["*"]
|
||||
resources: ["*"]
|
||||
verbs: ["get", "list", "describe"]
|
||||
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: kubescape-discovery-binding
|
||||
namespace: kubescape
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: kubescape-discovery-role
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: kubescape-discovery
|
||||
|
||||
---
|
||||
# ------------------- Kubescape Cluster Role & Cluster Role Binding ------------------- #
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: kubescape-discovery-clusterroles
|
||||
# "namespace" omitted since ClusterRoles are not namespaced
|
||||
rules:
|
||||
- apiGroups: ["*"]
|
||||
resources: ["*"]
|
||||
verbs: ["get", "list", "describe"]
|
||||
|
||||
---
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: kubescape-discovery-role-binding
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: kubescape-discovery-clusterroles
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: kubescape-discovery
|
||||
namespace: kubescape
|
||||
|
||||
---
|
||||
# ------------------- Kubescape User/Customer GUID ------------------- #
|
||||
kind: ConfigMap
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: kubescape-configmap
|
||||
labels:
|
||||
app: kubescape
|
||||
namespace: kubescape
|
||||
data:
|
||||
config.json: |
|
||||
{
|
||||
"customerGUID": <MyGUID>,
|
||||
"clusterName": <MyK8sClusterName>
|
||||
}
|
||||
|
||||
---
|
||||
apiVersion: batch/v1
|
||||
kind: CronJob
|
||||
metadata:
|
||||
name: kubescape
|
||||
labels:
|
||||
app: kubescape
|
||||
namespace: kubescape
|
||||
spec:
|
||||
# ┌────────────────── timezone (optional)
|
||||
# | ┌───────────── minute (0 - 59)
|
||||
# | │ ┌───────────── hour (0 - 23)
|
||||
# | │ │ ┌───────────── day of the month (1 - 31)
|
||||
# | │ │ │ ┌───────────── month (1 - 12)
|
||||
# | │ │ │ │ ┌───────────── day of the week (0 - 6) (Sunday to Saturday;
|
||||
# | │ │ │ │ │ 7 is also Sunday on some systems)
|
||||
# | │ │ │ │ │
|
||||
# | │ │ │ │ │
|
||||
# CRON_TZ=UTC * * * * *
|
||||
schedule: "0 0 1 * *"
|
||||
jobTemplate:
|
||||
spec:
|
||||
template:
|
||||
spec:
|
||||
containers:
|
||||
- name: kubescape
|
||||
image: quay.io/armosec/kubescape:latest
|
||||
imagePullPolicy: IfNotPresent
|
||||
command: ["/bin/sh","-c"]
|
||||
args:
|
||||
- kubescape scan framework nsa --submit
|
||||
volumeMounts:
|
||||
- name: kubescape-config-volume
|
||||
mountPath: /root/.kubescape/config.json
|
||||
subPath: config.json
|
||||
restartPolicy: OnFailure
|
||||
serviceAccountName: kubescape-discovery
|
||||
volumes:
|
||||
- name: kubescape-config-volume
|
||||
configMap:
|
||||
name: kubescape-configmap
|
||||
|
||||
---
|
||||
|
||||
|
||||
@@ -1,5 +0,0 @@
|
||||
#! /bin/bash
|
||||
|
||||
echo "Testing Online Boutique yamls (https://github.com/GoogleCloudPlatform/microservices-demo)"
|
||||
|
||||
kubescape scan framework nsa online-boutique/*
|
||||
@@ -19,11 +19,12 @@ e.g. When a `kube-system` resource fails and it is ok, simply add the resource t
|
||||
* `cluster`: k8s cluster name (usually it is the `current-context`) (case-sensitive, regex supported)
|
||||
* resource labels as key value (case-sensitive, regex NOT supported)
|
||||
* `posturePolicies`- An attribute-based declaration {key: value}
|
||||
* `frameworkName` - Framework names can be find [here](https://github.com/armosec/regolibrary/tree/master/frameworks)
|
||||
* `controlName` - Control names can be find [here](https://github.com/armosec/regolibrary/tree/master/controls)
|
||||
* `controlID` - Not yet supported
|
||||
* `ruleName` - Rule names can be find [here](https://github.com/armosec/regolibrary/tree/master/rules)
|
||||
* `frameworkName` - Framework names can be find [here](https://github.com/armosec/regolibrary/tree/master/frameworks) (regex supported)
|
||||
* `controlName` - Control names can be find [here](https://github.com/armosec/regolibrary/tree/master/controls) (regex supported)
|
||||
* `controlID` - Control ID can be find [here](https://github.com/armosec/regolibrary/tree/master/controls) (regex supported)
|
||||
* `ruleName` - Rule names can be find [here](https://github.com/armosec/regolibrary/tree/master/rules) (regex supported)
|
||||
|
||||
You can find [here](https://github.com/armosec/kubescape/tree/master/examples/exceptions) some examples of exceptions files
|
||||
|
||||
## Usage
|
||||
|
||||
@@ -92,7 +93,7 @@ Here are some examples demonstrating the different ways the exceptions file can
|
||||
|
||||
### Exclude control
|
||||
|
||||
Exclude the ["Allowed hostPath" control](https://github.com/armosec/regolibrary/blob/master/controls/allowedhostpath.json#L2) by declaring the control in the `"posturePolicies"` section.
|
||||
Exclude the [C-0060 control](https://github.com/armosec/regolibrary/blob/master/controls/allowedhostpath.json#L2) by declaring the control ID in the `"posturePolicies"` section.
|
||||
|
||||
The resources
|
||||
|
||||
@@ -114,7 +115,7 @@ The resources
|
||||
],
|
||||
"posturePolicies": [
|
||||
{
|
||||
"controlName": "Allowed hostPath"
|
||||
"controlID": "C-0060"
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
@@ -24,17 +24,6 @@
|
||||
"namespace": "kube-node-lease"
|
||||
}
|
||||
}
|
||||
],
|
||||
"posturePolicies": [
|
||||
{
|
||||
"frameworkName": "NSA"
|
||||
},
|
||||
{
|
||||
"frameworkName": "MITRE"
|
||||
},
|
||||
{
|
||||
"frameworkName": "ArmoBest"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
@@ -1,8 +1,7 @@
|
||||
# kubescape
|
||||
# Helm chart - DEPRECATED
|
||||
|
||||
  
|
||||
[helm chart repo](https://github.com/armosec/armo-helm)
|
||||
|
||||
Kubescape is the first open-source tool for testing if Kubernetes is deployed securely according to multiple frameworks regulatory, customized company policies and DevSecOps best practices, such as the [NSA-CISA](https://www.armosec.io/blog/kubernetes-hardening-guidance-summary-by-armo) and the [MITRE ATT&CK®](https://www.microsoft.com/security/blog/2021/03/23/secure-containerized-environments-with-updated-threat-matrix-for-kubernetes/) . Kubescape scans K8s clusters, YAML files, and HELM charts, and detect misconfigurations and software vulnerabilities at early stages of the CI/CD pipeline and provides a risk score instantly and risk trends over time. Kubescape integrates natively with other DevOps tools, including Jenkins, CircleCI and Github workflows.
|
||||
|
||||
## Values
|
||||
|
||||
|
||||
55
go.mod
55
go.mod
@@ -3,22 +3,26 @@ module github.com/armosec/kubescape
|
||||
go 1.17
|
||||
|
||||
require (
|
||||
github.com/armosec/armoapi-go v0.0.41
|
||||
github.com/armosec/k8s-interface v0.0.54
|
||||
github.com/armosec/opa-utils v0.0.97
|
||||
github.com/armosec/rbac-utils v0.0.12
|
||||
github.com/armosec/armoapi-go v0.0.57
|
||||
github.com/armosec/k8s-interface v0.0.63
|
||||
github.com/armosec/opa-utils v0.0.110
|
||||
github.com/armosec/rbac-utils v0.0.14
|
||||
github.com/armosec/utils-go v0.0.3
|
||||
github.com/armosec/utils-k8s-go v0.0.1
|
||||
github.com/briandowns/spinner v1.18.0
|
||||
github.com/enescakir/emoji v1.0.0
|
||||
github.com/fatih/color v1.13.0
|
||||
github.com/gofrs/uuid v4.1.0+incompatible
|
||||
github.com/golang/glog v1.0.0
|
||||
github.com/francoispqt/gojay v1.2.13
|
||||
github.com/google/uuid v1.3.0
|
||||
github.com/gorilla/mux v1.8.0
|
||||
github.com/johnfercher/maroto v0.34.0
|
||||
github.com/mattn/go-isatty v0.0.14
|
||||
github.com/olekukonko/tablewriter v0.0.5
|
||||
github.com/open-policy-agent/opa v0.33.1
|
||||
github.com/satori/go.uuid v1.2.0
|
||||
github.com/spf13/cobra v1.2.1
|
||||
github.com/stretchr/testify v1.7.0
|
||||
go.uber.org/zap v1.19.1
|
||||
golang.org/x/mod v0.4.2
|
||||
gopkg.in/yaml.v2 v2.4.0
|
||||
k8s.io/api v0.22.2
|
||||
k8s.io/apimachinery v0.22.2
|
||||
@@ -27,7 +31,8 @@ require (
|
||||
)
|
||||
|
||||
require (
|
||||
cloud.google.com/go v0.81.0 // indirect
|
||||
cloud.google.com/go v0.97.0 // indirect
|
||||
cloud.google.com/go/container v1.0.0 // indirect
|
||||
github.com/Azure/go-autorest v14.2.0+incompatible // indirect
|
||||
github.com/Azure/go-autorest/autorest v0.11.18 // indirect
|
||||
github.com/Azure/go-autorest/autorest/adal v0.9.13 // indirect
|
||||
@@ -35,31 +40,43 @@ require (
|
||||
github.com/Azure/go-autorest/logger v0.2.1 // indirect
|
||||
github.com/Azure/go-autorest/tracing v0.6.0 // indirect
|
||||
github.com/OneOfOne/xxhash v1.2.8 // indirect
|
||||
github.com/armosec/armo-interfaces v0.0.3 // indirect
|
||||
github.com/armosec/utils-k8s-go v0.0.1 // indirect
|
||||
github.com/aws/aws-sdk-go v1.41.11 // indirect
|
||||
github.com/aws/aws-sdk-go-v2 v1.12.0 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/config v1.12.0 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.7.0 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.9.0 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.3 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.1.0 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.3.3 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/eks v1.17.0 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.6.0 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.8.0 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.13.0 // indirect
|
||||
github.com/aws/smithy-go v1.9.1 // indirect
|
||||
github.com/boombuler/barcode v1.0.0 // indirect
|
||||
github.com/coreos/go-oidc v2.2.1+incompatible // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/docker/docker v20.10.9+incompatible // indirect
|
||||
github.com/docker/go-connections v0.4.0 // indirect
|
||||
github.com/docker/go-units v0.4.0 // indirect
|
||||
github.com/form3tech-oss/jwt-go v3.2.3+incompatible // indirect
|
||||
github.com/francoispqt/gojay v1.2.13 // indirect
|
||||
github.com/ghodss/yaml v1.0.0 // indirect
|
||||
github.com/go-gota/gota v0.12.0 // indirect
|
||||
github.com/go-logr/logr v0.4.0 // indirect
|
||||
github.com/gobwas/glob v0.2.3 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang/glog v1.0.0 // indirect
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
||||
github.com/golang/protobuf v1.5.2 // indirect
|
||||
github.com/google/go-cmp v0.5.5 // indirect
|
||||
github.com/google/go-cmp v0.5.6 // indirect
|
||||
github.com/google/gofuzz v1.1.0 // indirect
|
||||
github.com/googleapis/gax-go/v2 v2.0.5 // indirect
|
||||
github.com/googleapis/gax-go/v2 v2.1.1 // indirect
|
||||
github.com/googleapis/gnostic v0.5.5 // indirect
|
||||
github.com/imdario/mergo v0.3.12 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.0.0 // indirect
|
||||
github.com/jmespath/go-jmespath v0.4.0 // indirect
|
||||
github.com/json-iterator/go v1.1.11 // indirect
|
||||
github.com/jung-kurt/gofpdf v1.4.2 // indirect
|
||||
github.com/mattn/go-colorable v0.1.9 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.9 // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
@@ -70,6 +87,7 @@ require (
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/pquerna/cachecontrol v0.1.0 // indirect
|
||||
github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0 // indirect
|
||||
github.com/ruudk/golang-pdf417 v0.0.0-20181029194003-1af4ab5afa58 // indirect
|
||||
github.com/spf13/pflag v1.0.5 // indirect
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect
|
||||
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
|
||||
@@ -77,19 +95,18 @@ require (
|
||||
go.opencensus.io v0.23.0 // indirect
|
||||
go.uber.org/atomic v1.7.0 // indirect
|
||||
go.uber.org/multierr v1.6.0 // indirect
|
||||
go.uber.org/zap v1.19.1 // indirect
|
||||
golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97 // indirect
|
||||
golang.org/x/net v0.0.0-20210825183410-e898025ed96a // indirect
|
||||
golang.org/x/oauth2 v0.0.0-20211005180243-6b3c2da341f1 // indirect
|
||||
golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf // indirect
|
||||
golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359 // indirect
|
||||
golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d // indirect
|
||||
golang.org/x/text v0.3.6 // indirect
|
||||
golang.org/x/text v0.3.7 // indirect
|
||||
golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac // indirect
|
||||
gonum.org/v1/gonum v0.9.1 // indirect
|
||||
google.golang.org/api v0.44.0 // indirect
|
||||
google.golang.org/api v0.59.0 // indirect
|
||||
google.golang.org/appengine v1.6.7 // indirect
|
||||
google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c // indirect
|
||||
google.golang.org/grpc v1.38.0 // indirect
|
||||
google.golang.org/genproto v0.0.0-20211027162914-98a5263abeca // indirect
|
||||
google.golang.org/grpc v1.40.0 // indirect
|
||||
google.golang.org/protobuf v1.27.1 // indirect
|
||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||
gopkg.in/square/go-jose.v2 v2.6.0 // indirect
|
||||
|
||||
139
go.sum
139
go.sum
@@ -19,14 +19,23 @@ cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKP
|
||||
cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk=
|
||||
cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg=
|
||||
cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8=
|
||||
cloud.google.com/go v0.81.0 h1:at8Tk2zUz63cLPR0JPWm5vp77pEZmzxEQBEfRKn1VV8=
|
||||
cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0=
|
||||
cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAVY=
|
||||
cloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSUM=
|
||||
cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWcY=
|
||||
cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ=
|
||||
cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI=
|
||||
cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4=
|
||||
cloud.google.com/go v0.97.0 h1:3DXvAyifywvq64LfkKaMOmkWPS1CikIQdMe2lY9vxU8=
|
||||
cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc=
|
||||
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
|
||||
cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
|
||||
cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
|
||||
cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg=
|
||||
cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc=
|
||||
cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ=
|
||||
cloud.google.com/go/container v1.0.0 h1:k3IMcHEEJR4pQmj4/gNeUuA4azhW7i2RPX8xGhyneZw=
|
||||
cloud.google.com/go/container v1.0.0/go.mod h1:EQLhTDFhzVXTX6TmjfNqa3/ikbCzjGlZcGY67exUnlY=
|
||||
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
|
||||
cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
|
||||
cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk=
|
||||
@@ -83,23 +92,22 @@ github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hC
|
||||
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
|
||||
github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=
|
||||
github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
|
||||
github.com/armosec/armo-interfaces v0.0.3 h1:kG4mJIPgWBJvQFDDy8JzdqX3ASbyl8t32IuJYqB31Pk=
|
||||
github.com/armosec/armo-interfaces v0.0.3/go.mod h1:7XYefhcBCFYoF5LflCZHWuUHu+JrSJbmzk0zoNv2WlU=
|
||||
github.com/armosec/armoapi-go v0.0.2/go.mod h1:vIK17yoKbJRQyZXWWLe3AqfqCRITxW8qmSkApyq5xFs=
|
||||
github.com/armosec/armoapi-go v0.0.23/go.mod h1:iaVVGyc23QGGzAdv4n+szGQg3Rbpixn9yQTU3qWRpaw=
|
||||
github.com/armosec/armoapi-go v0.0.41 h1:iMkaCsME+zhE6vnCOMaqfqc0cp7pste8QFHojeGKfGg=
|
||||
github.com/armosec/armoapi-go v0.0.41/go.mod h1:exk1O3rK6V+X8SSyxc06lwb0j9ILQuKAoIdz9hs6Ndw=
|
||||
github.com/armosec/armoapi-go v0.0.49/go.mod h1:iaVVGyc23QGGzAdv4n+szGQg3Rbpixn9yQTU3qWRpaw=
|
||||
github.com/armosec/armoapi-go v0.0.57 h1:uAYohbzo+ZXZ4FR9R0i6181susGllWI4auUXQQhKQdQ=
|
||||
github.com/armosec/armoapi-go v0.0.57/go.mod h1:U/Axd+D5N00x9Ekr7t+5HXqLCMO+98NfJSVAggqJftI=
|
||||
github.com/armosec/k8s-interface v0.0.8/go.mod h1:xxS+V5QT3gVQTwZyAMMDrYLWGrfKOpiJ7Jfhfa0w9sM=
|
||||
github.com/armosec/k8s-interface v0.0.37/go.mod h1:vHxGWqD/uh6+GQb9Sqv7OGMs+Rvc2dsFVc0XtgRh1ZU=
|
||||
github.com/armosec/k8s-interface v0.0.50/go.mod h1:vHxGWqD/uh6+GQb9Sqv7OGMs+Rvc2dsFVc0XtgRh1ZU=
|
||||
github.com/armosec/k8s-interface v0.0.54 h1:1sQeoEZA5bgpXVibXhEiTSeLd3GKY5NkTOeewdgR0Bs=
|
||||
github.com/armosec/k8s-interface v0.0.54/go.mod h1:vHxGWqD/uh6+GQb9Sqv7OGMs+Rvc2dsFVc0XtgRh1ZU=
|
||||
github.com/armosec/k8s-interface v0.0.63 h1:RuMqsYcneVUD7HJkMVxjc8N6bpWd6rhJ5K85USMq1Sg=
|
||||
github.com/armosec/k8s-interface v0.0.63/go.mod h1:vwprS8qn/iowd5yf0JHpqDsLA5I8W2muqX9AxKhkb0Q=
|
||||
github.com/armosec/opa-utils v0.0.64/go.mod h1:6tQP8UDq2EvEfSqh8vrUdr/9QVSCG4sJfju1SXQOn4c=
|
||||
github.com/armosec/opa-utils v0.0.97 h1:KPjRZdsAC9EObo17QxiW+s5KWmF6vNFu+VQSOgFv5uk=
|
||||
github.com/armosec/opa-utils v0.0.97/go.mod h1:BNTjeianyXlflJMz3bZM0GimBWqmzirUf1whWR6Os04=
|
||||
github.com/armosec/opa-utils v0.0.110 h1:qncGcbnYjiGULP3yK+4geRNNpRoWqKXQL+Xg+iXc1cM=
|
||||
github.com/armosec/opa-utils v0.0.110/go.mod h1:Wc1P4gkB6UQeGW8I76zCuitGGl15Omp0bKw7N0tR9dk=
|
||||
github.com/armosec/rbac-utils v0.0.1/go.mod h1:pQ8CBiij8kSKV7aeZm9FMvtZN28VgA7LZcYyTWimq40=
|
||||
github.com/armosec/rbac-utils v0.0.12 h1:uJpMGDyLAX129PrKHp6NPNB6lVRhE0OZIwV6ywzSDrs=
|
||||
github.com/armosec/rbac-utils v0.0.12/go.mod h1:Ex/IdGWhGv9HZq6Hs8N/ApzCKSIvpNe/ETqDfnuyah0=
|
||||
github.com/armosec/rbac-utils v0.0.14 h1:CKYKcgqJEXWF2Hen/B1pVGtS3nDAG1wp9dDv6oNtq90=
|
||||
github.com/armosec/rbac-utils v0.0.14/go.mod h1:Ex/IdGWhGv9HZq6Hs8N/ApzCKSIvpNe/ETqDfnuyah0=
|
||||
github.com/armosec/utils-go v0.0.2/go.mod h1:itWmRLzRdsnwjpEOomL0mBWGnVNNIxSjDAdyc+b0iUo=
|
||||
github.com/armosec/utils-go v0.0.3 h1:uyQI676yRciQM0sSN9uPoqHkbspTxHO0kmzXhBeE/xU=
|
||||
github.com/armosec/utils-go v0.0.3/go.mod h1:itWmRLzRdsnwjpEOomL0mBWGnVNNIxSjDAdyc+b0iUo=
|
||||
@@ -109,6 +117,30 @@ github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:l
|
||||
github.com/aws/aws-sdk-go v1.41.1/go.mod h1:585smgzpB/KqRA+K3y/NL/oYRqQvpNJYvLm+LY1U59Q=
|
||||
github.com/aws/aws-sdk-go v1.41.11 h1:QLouWsiYQ8i22kD8k58Dpdhio1A0MpT7bg9ZNXqEjuI=
|
||||
github.com/aws/aws-sdk-go v1.41.11/go.mod h1:585smgzpB/KqRA+K3y/NL/oYRqQvpNJYvLm+LY1U59Q=
|
||||
github.com/aws/aws-sdk-go-v2 v1.12.0 h1:z5bijqy+eXLK/QqF6eQcwCN2qw1k+m9OUDicqCZygu0=
|
||||
github.com/aws/aws-sdk-go-v2 v1.12.0/go.mod h1:tWhQI5N5SiMawto3uMAQJU5OUN/1ivhDDHq7HTsJvZ0=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.12.0 h1:WOhIzj5HdixjlvQ4SLYAOk6OUUsuu88RwcsTzexa9cg=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.12.0/go.mod h1:GQONFVSDdG6RRho1C730SGNyDhS1kSTnxpOE76ptBqo=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.7.0 h1:KFuKwPs7i5SE5a0LxqAxz75qxSjr2HnHnhu0UPGlvpM=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.7.0/go.mod h1:Kmq64kahHJtXfmnEwnvRKeNjLBqkdP++Itln9BmQerE=
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.9.0 h1:fPq3oloONbHaA0O8KX/KYUQk7pG9JjKBwYQvQsQDK84=
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.9.0/go.mod h1:19SxQ+9zANyJCnNaoF3ovl8bFil4TaqCYEDdqNGKM+A=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.3 h1:YPNiEXnuWdkpNOwBFHhcLwkSmewwQRcPFO9dHmxU0qg=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.3/go.mod h1:L72JSFj9OwHwyukeuKFFyTj6uFWE4AjB0IQp97bd9Lc=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.1.0 h1:ArRd27pSm66f7cCBDPS77wvxiS4IRjFatpzVBD7Aojc=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.1.0/go.mod h1:KdVvdk4gb7iatuHZgIkIqvJlWHBtjCJLUtD/uO/FkWw=
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.3.3 h1:fmGqMNlFTHr9Y48qmYYv2qIo+TAsST3qZa2d1HcwBeo=
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.3.3/go.mod h1:N4dv+zawriMFZBO/6UKg3zt+XO6xWOQo1neAA0lFbo4=
|
||||
github.com/aws/aws-sdk-go-v2/service/eks v1.17.0 h1:lal3erO1VVVSnw3a47pRiCTne+9mGh9IyJDIgwWD02o=
|
||||
github.com/aws/aws-sdk-go-v2/service/eks v1.17.0/go.mod h1:YHVf/zIAi9lGVhG1TakeJp7LaUHFS99yme9e78+r+8A=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.6.0 h1:rwE0kWa5qm0yEoNPwC3zhrt1tFVXTmkWRlUxLayAwyc=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.6.0/go.mod h1:wTgFkG6t7jS/6Y0SILXwfspV3IXowb6ngsAlSajW0Kc=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.8.0 h1:X77LUt6Djy3Z02r6tW7Z+4FNr6GCnEG54EXfskc19M4=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.8.0/go.mod h1:AB6v3BedyhVRIbPQbJnUsBmtup2pFiikpp5n3YyB6Ac=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.13.0 h1:n8+dZMOvwkGtmhub8B2wYvRHut45/NB7DeNhNcUnBpg=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.13.0/go.mod h1:jQto17aC9pJ6xRa1g29uXZhbcS6qNT3PSnKfPShq4sY=
|
||||
github.com/aws/smithy-go v1.9.1 h1:5vetTooLk4hPWV8q6ym6+lXKAT1Urnm49YkrRKo2J8o=
|
||||
github.com/aws/smithy-go v1.9.1/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E=
|
||||
github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM=
|
||||
github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8=
|
||||
github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
|
||||
@@ -119,6 +151,7 @@ github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kB
|
||||
github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84=
|
||||
github.com/bketelsen/crypt v0.0.4/go.mod h1:aI6NrJ0pMGgvZKL1iVgXLnfIFJtfV+bKCoqOes/6LfM=
|
||||
github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
|
||||
github.com/boombuler/barcode v1.0.0 h1:s1TvRnXwL2xJRaccrdcBQMZxq6X7DvsMogtmJeHDdrc=
|
||||
github.com/boombuler/barcode v1.0.0/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8=
|
||||
github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g=
|
||||
github.com/briandowns/spinner v1.18.0 h1:SJs0maNOs4FqhBwiJ3Gr7Z1D39/rukIVGQvpNZVHVcM=
|
||||
@@ -141,6 +174,7 @@ github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDk
|
||||
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
|
||||
github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
|
||||
github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
|
||||
github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
||||
github.com/cockroachdb/datadriven v0.0.0-20200714090401-bf6692d28da5/go.mod h1:h6jFvWxBdQXxjopDMZyH2UVceIRfR84bdzbkoKrsWNo=
|
||||
github.com/cockroachdb/errors v1.2.4/go.mod h1:rQD95gz6FARkaKkQXUksEje/d9a6wBJoCr5oaCLELYA=
|
||||
github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f/go.mod h1:i/u985jwjWRlyHXQbwatDASoW0RMlZ/3i9yJHE2xLkI=
|
||||
@@ -192,6 +226,7 @@ github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1m
|
||||
github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po=
|
||||
github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
|
||||
github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
|
||||
github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ=
|
||||
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
||||
github.com/evanphx/json-patch v0.5.2/go.mod h1:ZWS5hhDbVDyob71nXKNL0+PWn6ToqBHMikGIFbs31qQ=
|
||||
github.com/evanphx/json-patch v4.11.0+incompatible h1:glyUF9yIYtMHzn8xaKw5rMhdWcwsYV8dZHIq5567/xs=
|
||||
@@ -253,7 +288,6 @@ github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y=
|
||||
github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8=
|
||||
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
|
||||
github.com/gofrs/uuid v4.0.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM=
|
||||
github.com/gofrs/uuid v4.1.0+incompatible h1:sIa2eCvUTwgjbqXrPLfNwUf9S3i3mpH1O1atV+iL/Wk=
|
||||
github.com/gofrs/uuid v4.1.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM=
|
||||
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||
github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
|
||||
@@ -279,6 +313,7 @@ github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt
|
||||
github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
|
||||
github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4=
|
||||
github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8=
|
||||
github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs=
|
||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
@@ -316,8 +351,9 @@ github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
|
||||
github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU=
|
||||
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ=
|
||||
github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ=
|
||||
github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck=
|
||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
@@ -326,6 +362,7 @@ github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/
|
||||
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
|
||||
github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
|
||||
github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
|
||||
github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk=
|
||||
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
|
||||
github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
|
||||
github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
||||
@@ -337,19 +374,27 @@ github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLe
|
||||
github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||
github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||
github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||
github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||
github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||
github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
|
||||
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
|
||||
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/googleapis/gax-go v2.0.0+incompatible h1:j0GKcs05QVmm7yesiZq2+9cxHkNK9YM6zKx4D2qucQU=
|
||||
github.com/googleapis/gax-go v2.0.0+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY=
|
||||
github.com/googleapis/gax-go/v2 v2.0.3/go.mod h1:LLvjysVCY1JZeum8Z6l8qUty8fiNwE08qbEPm1M08qg=
|
||||
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
|
||||
github.com/googleapis/gax-go/v2 v2.0.5 h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+Tv3SM=
|
||||
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
|
||||
github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0=
|
||||
github.com/googleapis/gax-go/v2 v2.1.1 h1:dp3bWCh+PPO1zjRRiCSczJav13sBvG4UhNyVTa1KqdU=
|
||||
github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM=
|
||||
github.com/googleapis/gnostic v0.5.1/go.mod h1:6U4PtQXGIEt/Z3h5MAT7FNofLnw9vXk2cUuW7uA/OeU=
|
||||
github.com/googleapis/gnostic v0.5.5 h1:9fHAtK0uDfpveeqqo1hkEZJcFvYXAiCN3UutL8F9xHw=
|
||||
github.com/googleapis/gnostic v0.5.5/go.mod h1:7+EbHbldMins07ALC74bsA81Ovc97DwqyJO1AENw9kA=
|
||||
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
|
||||
github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI=
|
||||
github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
|
||||
github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
|
||||
@@ -393,6 +438,8 @@ github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9Y
|
||||
github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
|
||||
github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8=
|
||||
github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=
|
||||
github.com/johnfercher/maroto v0.34.0 h1:kmqlO280WbjzeaPn8HtqUE3gooauVwqO/3cmrBtCL4A=
|
||||
github.com/johnfercher/maroto v0.34.0/go.mod h1:UeLY7evCe2Au8KwHFzaSGffKGADEZK+u6O8C74mdudM=
|
||||
github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
|
||||
github.com/jonboulle/clockwork v0.2.2/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8=
|
||||
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
|
||||
@@ -408,6 +455,8 @@ github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7V
|
||||
github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
|
||||
github.com/jung-kurt/gofpdf v1.0.0/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes=
|
||||
github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes=
|
||||
github.com/jung-kurt/gofpdf v1.4.2 h1:3u2ojTwxPPu3ysIOc5iTwcECpvkFCAe2RJ/tQrvfLi0=
|
||||
github.com/jung-kurt/gofpdf v1.4.2/go.mod h1:rZsO0wEsunjT/L9stF3fJjYbAHgqNYuQB4B8FWvBck0=
|
||||
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
|
||||
github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
|
||||
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
|
||||
@@ -550,9 +599,9 @@ github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6L
|
||||
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||
github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
|
||||
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/ruudk/golang-pdf417 v0.0.0-20181029194003-1af4ab5afa58 h1:nlG4Wa5+minh3S9LVFtNoY+GVRiudA2e3EVfcCi3RCA=
|
||||
github.com/ruudk/golang-pdf417 v0.0.0-20181029194003-1af4ab5afa58/go.mod h1:6lfFZQK844Gfx8o5WFuvpxWRwnSoipWe/p622j1v06w=
|
||||
github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
|
||||
github.com/satori/go.uuid v1.2.0 h1:0uYX9dsZ2yD7q2RtLRtPSdGDWzjeM3TbMJP9utgA0ww=
|
||||
github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0=
|
||||
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
|
||||
github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
|
||||
@@ -613,6 +662,8 @@ github.com/spf13/viper v1.8.1/go.mod h1:o0Pch8wJ9BVSWGQMbra6iw0oQ5oktSIBaujf1rJH
|
||||
github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.2.0 h1:Hbg2NidpLE8veEBkEZTL3CvlkUIVzuU9jDplZO54c48=
|
||||
github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
||||
@@ -719,6 +770,7 @@ golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6 h1:QE6XYQK6naiK1EPAe1g/ILLxN
|
||||
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
|
||||
golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs=
|
||||
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
|
||||
golang.org/x/image v0.0.0-20190507092727-e4e5bf290fec/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
|
||||
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
|
||||
golang.org/x/image v0.0.0-20190910094157-69e4b8554b2a/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
|
||||
golang.org/x/image v0.0.0-20200119044424-58c23975cae1/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
|
||||
@@ -749,6 +801,7 @@ golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.4.2 h1:Gz96sIWK3OalVv/I/qNygP42zyoKp3xptRVCWRFEBvo=
|
||||
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
@@ -798,6 +851,7 @@ golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLd
|
||||
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
|
||||
golang.org/x/net v0.0.0-20210423184538-5f58ad60dda6/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk=
|
||||
golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk=
|
||||
golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20210520170846-37e1c6afe023/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
@@ -818,6 +872,9 @@ golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ
|
||||
golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20210402161424-2e8d93401602/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20211005180243-6b3c2da341f1 h1:B333XXssMuKQeBwiNODx4TupZy7bf4sxFZnN2ZOcvUE=
|
||||
golang.org/x/oauth2 v0.0.0-20211005180243-6b3c2da341f1/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/perf v0.0.0-20180704124530-6e6d33e29852/go.mod h1:JLpeXjPJfIyPr5TlbXLkXWLhP8nz10XfvxElABhCtcw=
|
||||
@@ -896,13 +953,19 @@ golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7w
|
||||
golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210817190340-bfb29a6856f2/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf h1:2ucpDCmfkl8Bd/FsLtiD653Wf96cW37s+iGx93zsu4k=
|
||||
golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359 h1:2B5p2L5IfGiD7+b9BOoRMC6DgObAVZV+Fsp050NqXik=
|
||||
golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d h1:SZxvLBoTP5yHO3Frd4z4vrF+DBX9vMVanchswa69toE=
|
||||
@@ -914,8 +977,9 @@ golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.6 h1:aRYxNxv6iGQlyVaZmk6ZgYEDa+Jg18DxebPSrd6bg1M=
|
||||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk=
|
||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
@@ -984,7 +1048,10 @@ golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4f
|
||||
golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
|
||||
golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||
golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||
golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||
golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||
golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
@@ -1024,8 +1091,17 @@ google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34q
|
||||
google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8=
|
||||
google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU=
|
||||
google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94=
|
||||
google.golang.org/api v0.44.0 h1:URs6qR1lAxDsqWITsQXI4ZkGiYJ5dHtRNiCpfs2OeKA=
|
||||
google.golang.org/api v0.44.0/go.mod h1:EBOGZqzyhtvMDoxwS97ctnh0zUmYY6CxqXsc1AvkYD8=
|
||||
google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo=
|
||||
google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtukyy4=
|
||||
google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw=
|
||||
google.golang.org/api v0.51.0/go.mod h1:t4HdrdoNgyN5cbEfm7Lum0lcLDLiise1F8qDKX00sOU=
|
||||
google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6z3k=
|
||||
google.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE=
|
||||
google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE=
|
||||
google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI=
|
||||
google.golang.org/api v0.59.0 h1:fPfFO7gttlXYo2ALuD3HxJzh8vaF++4youI0BkFL6GE=
|
||||
google.golang.org/api v0.59.0/go.mod h1:sT2boj7M9YJxZzgeZqXogmhfmRWDtPzT31xkieUbuZU=
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
@@ -1082,8 +1158,26 @@ google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6D
|
||||
google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A=
|
||||
google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c h1:wtujag7C+4D6KMoulW9YauvK2lgdvCMS260jsqqBXr0=
|
||||
google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A=
|
||||
google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0=
|
||||
google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0=
|
||||
google.golang.org/genproto v0.0.0-20210608205507-b6d2f5bf0d7d/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0=
|
||||
google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24=
|
||||
google.golang.org/genproto v0.0.0-20210713002101-d411969a0d9a/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k=
|
||||
google.golang.org/genproto v0.0.0-20210716133855-ce7ef5c701ea/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k=
|
||||
google.golang.org/genproto v0.0.0-20210728212813-7823e685a01f/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48=
|
||||
google.golang.org/genproto v0.0.0-20210805201207-89edb61ffb67/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48=
|
||||
google.golang.org/genproto v0.0.0-20210813162853-db860fec028c/go.mod h1:cFeNkxwySK631ADgubI+/XFU/xp8FD5KIVV4rj8UC5w=
|
||||
google.golang.org/genproto v0.0.0-20210821163610-241b8fcbd6c8/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
|
||||
google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
|
||||
google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
|
||||
google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
|
||||
google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
|
||||
google.golang.org/genproto v0.0.0-20210921142501-181ce0d877f6/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
|
||||
google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
|
||||
google.golang.org/genproto v0.0.0-20211008145708-270636b82663/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
|
||||
google.golang.org/genproto v0.0.0-20211027162914-98a5263abeca h1:+e+aQDO4/c9KaG8PXWHTc6/+Du6kz+BKcXCSnV4SSTE=
|
||||
google.golang.org/genproto v0.0.0-20211027162914-98a5263abeca/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
|
||||
google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
|
||||
google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio=
|
||||
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
|
||||
@@ -1107,8 +1201,13 @@ google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAG
|
||||
google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
|
||||
google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
|
||||
google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
|
||||
google.golang.org/grpc v1.38.0 h1:/9BgsAsa5nWe26HqOlvlgJnqBuktYOLCgjCPqsa56W0=
|
||||
google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
|
||||
google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
|
||||
google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE=
|
||||
google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE=
|
||||
google.golang.org/grpc v1.40.0 h1:AGJ0Ih4mHjSeibYkFGh1dD9KJ/eOtZ93I6hoHhukQ5Q=
|
||||
google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
|
||||
google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw=
|
||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
|
||||
|
||||
@@ -2,29 +2,31 @@ apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
labels:
|
||||
app: host-sensor
|
||||
kubernetes.io/metadata.name: armo-kube-host-sensor
|
||||
tier: armo-kube-host-sensor-control-plane
|
||||
name: armo-kube-host-sensor
|
||||
app: kubescape-host-scanner
|
||||
k8s-app: kubescape-host-scanner
|
||||
kubernetes.io/metadata.name: kubescape-host-scanner
|
||||
tier: kubescape-host-scanner-control-plane
|
||||
name: kubescape-host-scanner
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: host-sensor
|
||||
namespace: armo-kube-host-sensor
|
||||
name: host-scanner
|
||||
namespace: kubescape-host-scanner
|
||||
labels:
|
||||
k8s-app: armo-kube-host-sensor
|
||||
app: host-scanner
|
||||
k8s-app: kubescape-host-scanner
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
name: host-sensor
|
||||
name: host-scanner
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
name: host-sensor
|
||||
name: host-scanner
|
||||
spec:
|
||||
tolerations:
|
||||
# this toleration is to have the daemonset runnable on master nodes
|
||||
# this toleration is to have the DaemonDet runnable on master nodes
|
||||
# remove it if your masters can't run pods
|
||||
- key: node-role.kubernetes.io/master
|
||||
operator: Exists
|
||||
@@ -37,12 +39,13 @@ spec:
|
||||
readOnlyRootFilesystem: true
|
||||
procMount: Unmasked
|
||||
ports:
|
||||
- name: http
|
||||
- name: scanner # Do not change port name
|
||||
hostPort: 7888
|
||||
containerPort: 7888
|
||||
protocol: TCP
|
||||
resources:
|
||||
limits:
|
||||
cpu: 1m
|
||||
cpu: 0.1m
|
||||
memory: 200Mi
|
||||
requests:
|
||||
cpu: 1m
|
||||
@@ -50,6 +53,12 @@ spec:
|
||||
volumeMounts:
|
||||
- mountPath: /host_fs
|
||||
name: host-filesystem
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /kernelVersion
|
||||
port: 7888
|
||||
initialDelaySeconds: 1
|
||||
periodSeconds: 1
|
||||
terminationGracePeriodSeconds: 120
|
||||
dnsPolicy: ClusterFirstWithHostNet
|
||||
automountServiceAccountToken: false
|
||||
|
||||
@@ -2,21 +2,21 @@ package hostsensorutils
|
||||
|
||||
import (
|
||||
_ "embed"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"strings"
|
||||
"os"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/armosec/k8s-interface/k8sinterface"
|
||||
"github.com/armosec/k8s-interface/workloadinterface"
|
||||
"github.com/armosec/kubescape/cautils"
|
||||
"github.com/armosec/kubescape/cautils/logger"
|
||||
"github.com/armosec/kubescape/cautils/logger/helpers"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/yaml"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
appsapplyv1 "k8s.io/client-go/applyconfigurations/apps/v1"
|
||||
coreapplyv1 "k8s.io/client-go/applyconfigurations/core/v1"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -24,25 +24,36 @@ var (
|
||||
hostSensorYAML string
|
||||
)
|
||||
|
||||
const PortName string = "scanner"
|
||||
|
||||
type HostSensorHandler struct {
|
||||
HostSensorPort int32
|
||||
HostSensorPodNames map[string]string //map from pod names to node names
|
||||
IsReady <-chan bool //readonly chan
|
||||
k8sObj *k8sinterface.KubernetesApi
|
||||
DaemonSet *appsv1.DaemonSet
|
||||
podListLock sync.RWMutex
|
||||
gracePeriod int64
|
||||
HostSensorPort int32
|
||||
HostSensorPodNames map[string]string //map from pod names to node names
|
||||
HostSensorUnscheduledPodNames map[string]string //map from pod names to node names
|
||||
IsReady <-chan bool //readonly chan
|
||||
k8sObj *k8sinterface.KubernetesApi
|
||||
DaemonSet *appsv1.DaemonSet
|
||||
podListLock sync.RWMutex
|
||||
gracePeriod int64
|
||||
}
|
||||
|
||||
func NewHostSensorHandler(k8sObj *k8sinterface.KubernetesApi) (*HostSensorHandler, error) {
|
||||
func NewHostSensorHandler(k8sObj *k8sinterface.KubernetesApi, hostSensorYAMLFile string) (*HostSensorHandler, error) {
|
||||
|
||||
if k8sObj == nil {
|
||||
return nil, fmt.Errorf("nil k8s interface received")
|
||||
}
|
||||
if hostSensorYAMLFile != "" {
|
||||
d, err := loadHostSensorFromFile(hostSensorYAMLFile)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to load host-scan yaml file, reason: %s", err.Error())
|
||||
}
|
||||
hostSensorYAML = d
|
||||
}
|
||||
hsh := &HostSensorHandler{
|
||||
k8sObj: k8sObj,
|
||||
HostSensorPodNames: map[string]string{},
|
||||
gracePeriod: int64(15),
|
||||
k8sObj: k8sObj,
|
||||
HostSensorPodNames: map[string]string{},
|
||||
HostSensorUnscheduledPodNames: map[string]string{},
|
||||
gracePeriod: int64(15),
|
||||
}
|
||||
// Don't deploy on cluster with no nodes. Some cloud providers prevents termination of K8s objects for cluster with no nodes!!!
|
||||
if nodeList, err := k8sObj.KubernetesClient.CoreV1().Nodes().List(k8sObj.Context, metav1.ListOptions{}); err != nil || len(nodeList.Items) == 0 {
|
||||
@@ -60,69 +71,99 @@ func (hsh *HostSensorHandler) Init() error {
|
||||
// store namespace + port
|
||||
// store pod names
|
||||
// make sure all pods are running, after X seconds treat has running anyway, and log an error on the pods not running yet
|
||||
cautils.ProgressTextDisplay("Installing host sensor")
|
||||
logger.L().Info("Installing host sensor")
|
||||
|
||||
cautils.StartSpinner()
|
||||
defer cautils.StopSpinner()
|
||||
|
||||
if err := hsh.applyYAML(); err != nil {
|
||||
return fmt.Errorf("in HostSensorHandler init failed to apply YAML: %v", err)
|
||||
return fmt.Errorf("failed to apply host sensor YAML, reason: %v", err)
|
||||
}
|
||||
hsh.populatePodNamesToNodeNames()
|
||||
if err := hsh.checkPodForEachNode(); err != nil {
|
||||
fmt.Printf("failed to validate host-sensor pods status: %v", err)
|
||||
logger.L().Error("failed to validate host-sensor pods status", helpers.Error(err))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (hsh *HostSensorHandler) applyYAML() error {
|
||||
dec := yaml.NewDocumentDecoder(io.NopCloser(strings.NewReader(hostSensorYAML)))
|
||||
// apply namespace
|
||||
singleYAMLBytes := make([]byte, 4096)
|
||||
if readLen, err := dec.Read(singleYAMLBytes); err != nil {
|
||||
return fmt.Errorf("failed to read YAML of namespace: %v", err)
|
||||
} else {
|
||||
singleYAMLBytes = singleYAMLBytes[:readLen]
|
||||
workloads, err := cautils.ReadFile([]byte(hostSensorYAML), cautils.YAML_FILE_FORMAT)
|
||||
if len(err) != 0 {
|
||||
return fmt.Errorf("failed to read YAML files, reason: %v", err)
|
||||
}
|
||||
namespaceAC := &coreapplyv1.NamespaceApplyConfiguration{}
|
||||
if err := yaml.Unmarshal(singleYAMLBytes, namespaceAC); err != nil {
|
||||
return fmt.Errorf("failed to Unmarshal YAML of namespace: %v", err)
|
||||
}
|
||||
namespaceName := ""
|
||||
|
||||
if ns, err := hsh.k8sObj.KubernetesClient.CoreV1().Namespaces().Apply(hsh.k8sObj.Context, namespaceAC, metav1.ApplyOptions{
|
||||
FieldManager: "kubescape",
|
||||
}); err != nil {
|
||||
return fmt.Errorf("failed to apply YAML of namespace: %v", err)
|
||||
} else {
|
||||
namespaceName = ns.Name
|
||||
}
|
||||
// apply DaemonSet
|
||||
daemonAC := &appsapplyv1.DaemonSetApplyConfiguration{}
|
||||
singleYAMLBytes = make([]byte, 4096)
|
||||
if readLen, err := dec.Read(singleYAMLBytes); err != nil {
|
||||
if erra := hsh.tearDownNamesapce(namespaceName); erra != nil {
|
||||
err = fmt.Errorf("%v; In addidtion %v", err, erra)
|
||||
// Get namespace name
|
||||
namespaceName := ""
|
||||
for i := range workloads {
|
||||
if workloads[i].GetKind() == "Namespace" {
|
||||
namespaceName = workloads[i].GetName()
|
||||
break
|
||||
}
|
||||
return fmt.Errorf("failed to read YAML of DaemonSet: %v", err)
|
||||
} else {
|
||||
singleYAMLBytes = singleYAMLBytes[:readLen]
|
||||
}
|
||||
if err := yaml.Unmarshal(singleYAMLBytes, daemonAC); err != nil {
|
||||
if erra := hsh.tearDownNamesapce(namespaceName); erra != nil {
|
||||
err = fmt.Errorf("%v; In addidtion %v", err, erra)
|
||||
|
||||
// Update workload data before applying
|
||||
for i := range workloads {
|
||||
w := workloadinterface.NewWorkloadObj(workloads[i].GetObject())
|
||||
if w == nil {
|
||||
return fmt.Errorf("invalid workload: %v", workloads[i].GetObject())
|
||||
}
|
||||
return fmt.Errorf("failed to Unmarshal YAML of DaemonSet: %v", err)
|
||||
}
|
||||
daemonAC.Namespace = &namespaceName
|
||||
if ds, err := hsh.k8sObj.KubernetesClient.AppsV1().DaemonSets(namespaceName).Apply(hsh.k8sObj.Context, daemonAC, metav1.ApplyOptions{
|
||||
FieldManager: "kubescape",
|
||||
}); err != nil {
|
||||
if erra := hsh.tearDownNamesapce(namespaceName); erra != nil {
|
||||
err = fmt.Errorf("%v; In addidtion %v", err, erra)
|
||||
// set namespace in all objects
|
||||
if w.GetKind() != "Namespace" {
|
||||
w.SetNamespace(namespaceName)
|
||||
}
|
||||
// Get container port
|
||||
if w.GetKind() == "DaemonSet" {
|
||||
containers, err := w.GetContainers()
|
||||
if err != nil {
|
||||
if erra := hsh.tearDownNamespace(namespaceName); erra != nil {
|
||||
logger.L().Warning("failed to tear down namespace", helpers.Error(erra))
|
||||
}
|
||||
return fmt.Errorf("container not found in DaemonSet: %v", err)
|
||||
}
|
||||
for j := range containers {
|
||||
for k := range containers[j].Ports {
|
||||
if containers[j].Ports[k].Name == PortName {
|
||||
hsh.HostSensorPort = containers[j].Ports[k].ContainerPort
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// Apply workload
|
||||
var newWorkload k8sinterface.IWorkload
|
||||
var e error
|
||||
|
||||
if g, err := hsh.k8sObj.GetWorkload(w.GetNamespace(), w.GetKind(), w.GetName()); err == nil && g != nil {
|
||||
newWorkload, e = hsh.k8sObj.UpdateWorkload(w)
|
||||
} else {
|
||||
newWorkload, e = hsh.k8sObj.CreateWorkload(w)
|
||||
}
|
||||
if e != nil {
|
||||
if erra := hsh.tearDownNamespace(namespaceName); erra != nil {
|
||||
logger.L().Warning("failed to tear down namespace", helpers.Error(erra))
|
||||
}
|
||||
return fmt.Errorf("failed to create/update YAML, reason: %v", e)
|
||||
}
|
||||
|
||||
// Save DaemonSet
|
||||
if newWorkload.GetKind() == "DaemonSet" {
|
||||
b, err := json.Marshal(newWorkload.GetObject())
|
||||
if err != nil {
|
||||
if erra := hsh.tearDownNamespace(namespaceName); erra != nil {
|
||||
logger.L().Warning("failed to tear down namespace", helpers.Error(erra))
|
||||
}
|
||||
return fmt.Errorf("failed to Marshal YAML of DaemonSet, reason: %v", err)
|
||||
}
|
||||
var ds appsv1.DaemonSet
|
||||
if err := json.Unmarshal(b, &ds); err != nil {
|
||||
if erra := hsh.tearDownNamespace(namespaceName); erra != nil {
|
||||
logger.L().Warning("failed to tear down namespace", helpers.Error(erra))
|
||||
}
|
||||
return fmt.Errorf("failed to Unmarshal YAML of DaemonSet, reason: %v", err)
|
||||
}
|
||||
hsh.DaemonSet = &ds
|
||||
}
|
||||
return fmt.Errorf("failed to apply YAML of DaemonSet: %v", err)
|
||||
} else {
|
||||
hsh.HostSensorPort = ds.Spec.Template.Spec.Containers[0].Ports[0].ContainerPort
|
||||
hsh.DaemonSet = ds
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -136,12 +177,17 @@ func (hsh *HostSensorHandler) checkPodForEachNode() error {
|
||||
}
|
||||
hsh.podListLock.RLock()
|
||||
podsNum := len(hsh.HostSensorPodNames)
|
||||
unschedPodNum := len(hsh.HostSensorUnscheduledPodNames)
|
||||
hsh.podListLock.RUnlock()
|
||||
if len(nodesList.Items) == podsNum {
|
||||
if len(nodesList.Items) <= podsNum+unschedPodNum {
|
||||
break
|
||||
}
|
||||
if time.Now().After(deadline) {
|
||||
return fmt.Errorf("host-sensor pods number (%d) differ than nodes number (%d) after deadline exceded", podsNum, len(nodesList.Items))
|
||||
hsh.podListLock.RLock()
|
||||
podsMap := hsh.HostSensorPodNames
|
||||
hsh.podListLock.RUnlock()
|
||||
return fmt.Errorf("host-sensor pods number (%d) differ than nodes number (%d) after deadline exceeded. Kubescape will take data only from the pods below: %v",
|
||||
podsNum, len(nodesList.Items), podsMap)
|
||||
}
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
}
|
||||
@@ -152,12 +198,17 @@ func (hsh *HostSensorHandler) checkPodForEachNode() error {
|
||||
func (hsh *HostSensorHandler) populatePodNamesToNodeNames() {
|
||||
|
||||
go func() {
|
||||
watchRes, err := hsh.k8sObj.KubernetesClient.CoreV1().Pods(hsh.DaemonSet.Namespace).Watch(hsh.k8sObj.Context, metav1.ListOptions{
|
||||
var watchRes watch.Interface
|
||||
var err error
|
||||
watchRes, err = hsh.k8sObj.KubernetesClient.CoreV1().Pods(hsh.DaemonSet.Namespace).Watch(hsh.k8sObj.Context, metav1.ListOptions{
|
||||
Watch: true,
|
||||
LabelSelector: fmt.Sprintf("name=%s", hsh.DaemonSet.Spec.Template.Labels["name"]),
|
||||
})
|
||||
if err != nil {
|
||||
fmt.Printf("Failed to watch over daemonset pods")
|
||||
logger.L().Error("failed to watch over daemonset pods - are we missing watch pods permissions?", helpers.Error(err))
|
||||
}
|
||||
if watchRes == nil {
|
||||
return
|
||||
}
|
||||
for eve := range watchRes.ResultChan() {
|
||||
pod, ok := eve.Object.(*corev1.Pod)
|
||||
@@ -175,17 +226,38 @@ func (hsh *HostSensorHandler) updatePodInListAtomic(eventType watch.EventType, p
|
||||
|
||||
switch eventType {
|
||||
case watch.Added, watch.Modified:
|
||||
if podObj.Status.Phase == corev1.PodRunning {
|
||||
if podObj.Status.Phase == corev1.PodRunning && len(podObj.Status.ContainerStatuses) > 0 &&
|
||||
podObj.Status.ContainerStatuses[0].Ready {
|
||||
hsh.HostSensorPodNames[podObj.ObjectMeta.Name] = podObj.Spec.NodeName
|
||||
delete(hsh.HostSensorUnscheduledPodNames, podObj.ObjectMeta.Name)
|
||||
} else {
|
||||
delete(hsh.HostSensorPodNames, podObj.ObjectMeta.Name)
|
||||
if podObj.Status.Phase == corev1.PodPending && len(podObj.Status.Conditions) > 0 &&
|
||||
podObj.Status.Conditions[0].Reason == corev1.PodReasonUnschedulable {
|
||||
nodeName := ""
|
||||
if podObj.Spec.Affinity != nil && podObj.Spec.Affinity.NodeAffinity != nil &&
|
||||
podObj.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution != nil &&
|
||||
len(podObj.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms) > 0 &&
|
||||
len(podObj.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms[0].MatchFields) > 0 &&
|
||||
len(podObj.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms[0].MatchFields[0].Values) > 0 {
|
||||
nodeName = podObj.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms[0].MatchFields[0].Values[0]
|
||||
}
|
||||
logger.L().Warning("One host-sensor pod is unable to schedule on node. We will fail to collect the data from this node",
|
||||
helpers.String("message", podObj.Status.Conditions[0].Message),
|
||||
helpers.String("nodeName", nodeName),
|
||||
helpers.String("podName", podObj.ObjectMeta.Name))
|
||||
if nodeName != "" {
|
||||
hsh.HostSensorUnscheduledPodNames[podObj.ObjectMeta.Name] = nodeName
|
||||
}
|
||||
} else {
|
||||
delete(hsh.HostSensorPodNames, podObj.ObjectMeta.Name)
|
||||
}
|
||||
}
|
||||
default:
|
||||
delete(hsh.HostSensorPodNames, podObj.ObjectMeta.Name)
|
||||
}
|
||||
}
|
||||
|
||||
func (hsh *HostSensorHandler) tearDownNamesapce(namespace string) error {
|
||||
func (hsh *HostSensorHandler) tearDownNamespace(namespace string) error {
|
||||
|
||||
if err := hsh.k8sObj.KubernetesClient.CoreV1().Namespaces().Delete(hsh.k8sObj.Context, namespace, metav1.DeleteOptions{GracePeriodSeconds: &hsh.gracePeriod}); err != nil {
|
||||
return fmt.Errorf("failed to delete host-sensor namespace: %v", err)
|
||||
@@ -198,7 +270,7 @@ func (hsh *HostSensorHandler) TearDown() error {
|
||||
if err := hsh.k8sObj.KubernetesClient.AppsV1().DaemonSets(hsh.GetNamespace()).Delete(hsh.k8sObj.Context, hsh.DaemonSet.Name, metav1.DeleteOptions{GracePeriodSeconds: &hsh.gracePeriod}); err != nil {
|
||||
return fmt.Errorf("failed to delete host-sensor daemonset: %v", err)
|
||||
}
|
||||
if err := hsh.tearDownNamesapce(namespace); err != nil {
|
||||
if err := hsh.tearDownNamespace(namespace); err != nil {
|
||||
return fmt.Errorf("failed to delete host-sensor daemonset: %v", err)
|
||||
}
|
||||
// TODO: wait for termination? may take up to 120 seconds!!!
|
||||
@@ -212,3 +284,12 @@ func (hsh *HostSensorHandler) GetNamespace() string {
|
||||
}
|
||||
return hsh.DaemonSet.Namespace
|
||||
}
|
||||
|
||||
func loadHostSensorFromFile(hostSensorYAMLFile string) (string, error) {
|
||||
dat, err := os.ReadFile(hostSensorYAMLFile)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
// TODO - Add file validation
|
||||
return string(dat), err
|
||||
}
|
||||
|
||||
@@ -7,6 +7,8 @@ import (
|
||||
|
||||
"github.com/armosec/k8s-interface/k8sinterface"
|
||||
"github.com/armosec/kubescape/cautils"
|
||||
"github.com/armosec/kubescape/cautils/logger"
|
||||
"github.com/armosec/kubescape/cautils/logger/helpers"
|
||||
"github.com/armosec/opa-utils/objectsenvelopes/hostsensor"
|
||||
"sigs.k8s.io/yaml"
|
||||
)
|
||||
@@ -71,7 +73,7 @@ func (hsh *HostSensorHandler) sendAllPodsHTTPGETRequest(path, requestKind string
|
||||
defer wg.Done()
|
||||
resBytes, err := hsh.HTTPGetToPod(podName, path)
|
||||
if err != nil {
|
||||
fmt.Printf("In sendAllPodsHTTPGETRequest failed to get data '%s' from pod '%s': %v", path, podName, err)
|
||||
logger.L().Error("failed to get data", helpers.String("path", path), helpers.String("podName", podName), helpers.Error(err))
|
||||
} else {
|
||||
resLock.Lock()
|
||||
defer resLock.Unlock()
|
||||
@@ -89,6 +91,12 @@ func (hsh *HostSensorHandler) sendAllPodsHTTPGETRequest(path, requestKind string
|
||||
return res, nil
|
||||
}
|
||||
|
||||
// return list of LinuxKernelVariables
|
||||
func (hsh *HostSensorHandler) GetKernelVariables() ([]hostsensor.HostSensorDataEnvelope, error) {
|
||||
// loop over pods and port-forward it to each of them
|
||||
return hsh.sendAllPodsHTTPGETRequest("/LinuxKernelVariables", "LinuxKernelVariables")
|
||||
}
|
||||
|
||||
// return list of OpenPortsList
|
||||
func (hsh *HostSensorHandler) GetOpenPortsList() ([]hostsensor.HostSensorDataEnvelope, error) {
|
||||
// loop over pods and port-forward it to each of them
|
||||
@@ -141,7 +149,7 @@ func (hsh *HostSensorHandler) GetKubeletConfigurations() ([]hostsensor.HostSenso
|
||||
for resIdx := range res {
|
||||
jsonBytes, err := yaml.YAMLToJSON(res[resIdx].Data)
|
||||
if err != nil {
|
||||
fmt.Printf("In GetKubeletConfigurations failed to YAMLToJSON: %v;\n%v", err, res[resIdx])
|
||||
logger.L().Error("failed to convert kubelet configurations from yaml to json", helpers.Error(err))
|
||||
continue
|
||||
}
|
||||
res[resIdx].SetData(jsonBytes)
|
||||
@@ -154,7 +162,8 @@ func (hsh *HostSensorHandler) CollectResources() ([]hostsensor.HostSensorDataEnv
|
||||
if hsh.DaemonSet == nil {
|
||||
return res, nil
|
||||
}
|
||||
cautils.ProgressTextDisplay("Accessing host sensor")
|
||||
|
||||
logger.L().Debug("Accessing host sensor")
|
||||
cautils.StartSpinner()
|
||||
defer cautils.StopSpinner()
|
||||
kcData, err := hsh.GetKubeletConfigurations()
|
||||
@@ -192,7 +201,14 @@ func (hsh *HostSensorHandler) CollectResources() ([]hostsensor.HostSensorDataEnv
|
||||
return kcData, err
|
||||
}
|
||||
res = append(res, kcData...)
|
||||
// GetKernelVariables
|
||||
kcData, err = hsh.GetKernelVariables()
|
||||
if err != nil {
|
||||
return kcData, err
|
||||
}
|
||||
res = append(res, kcData...)
|
||||
// finish
|
||||
cautils.SuccessTextDisplay("Read host information from host sensor")
|
||||
|
||||
logger.L().Debug("Done reading information from host sensor")
|
||||
return res, nil
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user