mirror of
https://github.com/kubescape/kubescape.git
synced 2026-02-24 23:04:04 +00:00
Compare commits
129 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
5634903aa0 | ||
|
|
ce81a9cb22 | ||
|
|
cb704cb1e7 | ||
|
|
362ea83549 | ||
|
|
4cb7b999ad | ||
|
|
81482b7421 | ||
|
|
2b7807f300 | ||
|
|
ef23d022ee | ||
|
|
02d7fdc4f9 | ||
|
|
ccb3351607 | ||
|
|
72f9c6d81b | ||
|
|
bba70b4c46 | ||
|
|
46073e0a6c | ||
|
|
93a44f494d | ||
|
|
5c96f877ed | ||
|
|
23ea7e0511 | ||
|
|
137b3d7b5d | ||
|
|
13ffd92210 | ||
|
|
4725f8b3ca | ||
|
|
6d65a90de9 | ||
|
|
faf928527d | ||
|
|
18c6e80c3c | ||
|
|
83045c743a | ||
|
|
4940912784 | ||
|
|
a7fd2bd058 | ||
|
|
aa1f61a4f8 | ||
|
|
b103e817ed | ||
|
|
55045badce | ||
|
|
e951e23bc4 | ||
|
|
d467f159ad | ||
|
|
bb2a7b8d6c | ||
|
|
23bb8ec482 | ||
|
|
6c50fe1011 | ||
|
|
4268cb31c3 | ||
|
|
3b37d56427 | ||
|
|
f239075c26 | ||
|
|
b0c8c42c85 | ||
|
|
ea777b67ec | ||
|
|
2db2f55d16 | ||
|
|
cf9f34c0be | ||
|
|
4d4bec95f2 | ||
|
|
f3a5ce75d5 | ||
|
|
b38ce5e812 | ||
|
|
e4733fa02c | ||
|
|
39ea443f81 | ||
|
|
d03806aea2 | ||
|
|
576c281150 | ||
|
|
dfabcd691a | ||
|
|
e2698e71a3 | ||
|
|
6901628b5a | ||
|
|
fc3912ca7d | ||
|
|
c83cb4496d | ||
|
|
a76228c1e1 | ||
|
|
9447f2933a | ||
|
|
26d4664cc5 | ||
|
|
05fa9d887d | ||
|
|
acdad028a3 | ||
|
|
890ababe0a | ||
|
|
de78615038 | ||
|
|
db35670432 | ||
|
|
1c215c36af | ||
|
|
2e8f64b20a | ||
|
|
9c764c90e3 | ||
|
|
95a4c19dc6 | ||
|
|
e3352f90e1 | ||
|
|
677a9da80a | ||
|
|
83e53c09eb | ||
|
|
c7e1e251ba | ||
|
|
aff7af5159 | ||
|
|
7bd77d666d | ||
|
|
9a7eb4b9a5 | ||
|
|
903b5f39df | ||
|
|
55f0ca3e9e | ||
|
|
3387e677ba | ||
|
|
5774acfc81 | ||
|
|
0eee2d1d0a | ||
|
|
0c624cc576 | ||
|
|
aade1008c4 | ||
|
|
c58b099230 | ||
|
|
786092bdaf | ||
|
|
80adf03926 | ||
|
|
4b9c35d53b | ||
|
|
f3623dccf6 | ||
|
|
b0db1e3d40 | ||
|
|
b936c3f857 | ||
|
|
600b9a6fb0 | ||
|
|
3bec2ef0b7 | ||
|
|
3d8344f23c | ||
|
|
d87836d0a9 | ||
|
|
42908ceb6f | ||
|
|
70288c94c3 | ||
|
|
2bc63c2ab6 | ||
|
|
609cbff2da | ||
|
|
3cf0931fb8 | ||
|
|
a42d2452fd | ||
|
|
7dd79874cc | ||
|
|
d1a75f076e | ||
|
|
08fa833f82 | ||
|
|
45e869e0d6 | ||
|
|
46cfc882c2 | ||
|
|
10583a4b9b | ||
|
|
da2adf3059 | ||
|
|
da24c9164a | ||
|
|
8ac41533b6 | ||
|
|
76958f285c | ||
|
|
93f6f3aecf | ||
|
|
971f0c06e7 | ||
|
|
bd4e0483d4 | ||
|
|
1ee1c11700 | ||
|
|
daa6db164a | ||
|
|
eb33542e4a | ||
|
|
a03b0c94c4 | ||
|
|
402aea1493 | ||
|
|
77f3806abf | ||
|
|
051ec71263 | ||
|
|
38325c5af4 | ||
|
|
589d0545cb | ||
|
|
32b74608bf | ||
|
|
98c0be147b | ||
|
|
9454924b9f | ||
|
|
7233f00c32 | ||
|
|
905db42625 | ||
|
|
bfdf24afb4 | ||
|
|
588269f1a0 | ||
|
|
a45283b128 | ||
|
|
05d5ad47f2 | ||
|
|
5ccb858d7f | ||
|
|
c49c808730 | ||
|
|
23d44aef7e |
17
.github/ISSUE_TEMPLATE/feature_request.md
vendored
17
.github/ISSUE_TEMPLATE/feature_request.md
vendored
@@ -6,15 +6,14 @@ labels: ''
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
**Is your feature request related to a problem? Please describe.**</br>
|
||||
> A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
|
||||
|
||||
**Is your feature request related to a problem? Please describe.**
|
||||
A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
|
||||
**Describe the solution you'd like.**</br>
|
||||
> A clear and concise description of what you want to happen.
|
||||
|
||||
**Describe the solution you'd like**
|
||||
A clear and concise description of what you want to happen.
|
||||
**Describe alternatives you've considered.**</br>
|
||||
> A clear and concise description of any alternative solutions or features you've considered.
|
||||
|
||||
**Describe alternatives you've considered**
|
||||
A clear and concise description of any alternative solutions or features you've considered.
|
||||
|
||||
**Additional context**
|
||||
Add any other context or screenshots about the feature request here.
|
||||
**Additional context.**</br>
|
||||
> Add any other context or screenshots about the feature request here.
|
||||
|
||||
8
.github/PULL_REQUEST_TEMPLATE.md
vendored
8
.github/PULL_REQUEST_TEMPLATE.md
vendored
@@ -2,11 +2,15 @@
|
||||
|
||||
## Screenshots - If Any (Optional)
|
||||
|
||||
## Issue ticket number and link
|
||||
## This PR fixes:
|
||||
|
||||
* Resolved #
|
||||
|
||||
## Checklist before requesting a review
|
||||
<!-- put an [x] in the box to get it checked -->
|
||||
|
||||
- [ ] My code follows the style guidelines of this project
|
||||
- [ ] I have commented my code, particularly in hard-to-understand areas
|
||||
- [ ] I have commented on my code, particularly in hard-to-understand areas
|
||||
- [ ] I have performed a self-review of my code
|
||||
- [ ] If it is a core feature, I have added thorough tests.
|
||||
- [ ] New and existing unit tests pass locally with my changes
|
||||
|
||||
23
.github/workflows/close-typos-issues.yaml
vendored
Normal file
23
.github/workflows/close-typos-issues.yaml
vendored
Normal file
@@ -0,0 +1,23 @@
|
||||
on:
|
||||
issues:
|
||||
types: [opened, labeled]
|
||||
|
||||
jobs:
|
||||
open_PR_message:
|
||||
if: github.event.label.name == 'typo'
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: ben-z/actions-comment-on-issue@1.0.2
|
||||
with:
|
||||
message: "Hello! :wave:\n\nThis issue is being automatically closed, Please open a PR with a relevant fix."
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
|
||||
|
||||
auto_close_issues:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: lee-dohm/close-matching-issues@v2
|
||||
with:
|
||||
query: 'label:typo'
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
22
.github/workflows/community.yml
vendored
Normal file
22
.github/workflows/community.yml
vendored
Normal file
@@ -0,0 +1,22 @@
|
||||
on:
|
||||
fork:
|
||||
issues:
|
||||
types: [opened]
|
||||
issue_comment:
|
||||
types: [created]
|
||||
pull_request_target:
|
||||
types: [opened]
|
||||
pull_request_review_comment:
|
||||
types: [created]
|
||||
|
||||
jobs:
|
||||
welcome:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v1
|
||||
- uses: EddieHubCommunity/gh-action-community/src/welcome@main
|
||||
with:
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
issue-message: '<h3>Hey, Welcome to this repo, Congratulations on opening your issue. Keep Contributing to Kubescape</h3>'
|
||||
pr-message: '<h3>Hey, Welcome to this repo, Congratulations on opening your Pull Request. Keep Contributing to Kubescape</h3>'
|
||||
footer: '<h4>We''ll try to review and add you work as soon as possible and a maintainer will get back to you soon!</h4>'
|
||||
@@ -3,13 +3,13 @@
|
||||
First, it is awesome that you are considering contributing to Kubescape! Contributing is important and fun and we welcome your efforts.
|
||||
|
||||
When contributing, we categorize contributions into two:
|
||||
* Small code changes or fixes, whose scope are limited to a single or two files
|
||||
* Complex features and improvements, whose are not limited
|
||||
* Small code changes or fixes, whose scope is limited to a single or two files
|
||||
* Complex features and improvements, that are not limited
|
||||
|
||||
If you have a small change, feel free to fire up a Pull Request.
|
||||
|
||||
When planning a bigger change, please first discuss the change you wish to make via issue,
|
||||
email, or any other method with the owners of this repository before making a change. Most likely your changes or features are great, but sometimes we might already going to this direction (or the exact opposite ;-) ) and we don't want to waste your time.
|
||||
email, or any other method with the owners of this repository before making a change. Most likely your changes or features are great, but sometimes we might be already going in this direction (or the exact opposite ;-) ) and we don't want to waste your time.
|
||||
|
||||
Please note we have a code of conduct, please follow it in all your interactions with the project.
|
||||
|
||||
@@ -20,14 +20,14 @@ Please note we have a code of conduct, please follow it in all your interactions
|
||||
2. Update the README.md with details of changes to the interface, this includes new environment
|
||||
variables, exposed ports, useful file locations and container parameters.
|
||||
3. Open Pull Request to `dev` branch - we test the component before merging into the `master` branch
|
||||
4. We will merge the Pull Request in once you have the sign-off.
|
||||
4. We will merge the Pull Request once you have the sign-off.
|
||||
|
||||
## Code of Conduct
|
||||
|
||||
### Our Pledge
|
||||
|
||||
In the interest of fostering an open and welcoming environment, we as
|
||||
contributors and maintainers pledge to making participation in our project and
|
||||
contributors and maintainers pledge to make participation in our project and
|
||||
our community a harassment-free experience for everyone, regardless of age, body
|
||||
size, disability, ethnicity, gender identity and expression, level of experience,
|
||||
nationality, personal appearance, race, religion, or sexual identity and
|
||||
@@ -55,12 +55,12 @@ advances
|
||||
* Other conduct which could reasonably be considered inappropriate in a
|
||||
professional setting
|
||||
|
||||
We will distance those who are constantly adhere to unacceptable behavior.
|
||||
We will distance those who constantly adhere to unacceptable behavior.
|
||||
|
||||
### Our Responsibilities
|
||||
|
||||
Project maintainers are responsible for clarifying the standards of acceptable
|
||||
behavior and are expected to take appropriate and fair corrective action in
|
||||
behavior and are expected to take appropriate and fair corrective actions in
|
||||
response to any instances of unacceptable behavior.
|
||||
|
||||
Project maintainers have the right and responsibility to remove, edit, or
|
||||
@@ -97,4 +97,4 @@ This Code of Conduct is adapted from the [Contributor Covenant][homepage], versi
|
||||
available at [http://contributor-covenant.org/version/1/4][version]
|
||||
|
||||
[homepage]: http://contributor-covenant.org
|
||||
[version]: http://contributor-covenant.org/version/1/4/
|
||||
[version]: http://contributor-covenant.org/version/1/4/
|
||||
|
||||
31
README.md
31
README.md
@@ -1,11 +1,17 @@
|
||||
<img src="docs/kubescape.png" width="300" alt="logo" align="center">
|
||||
<div align="center">
|
||||
<img src="docs/kubescape.png" width="300" alt="logo">
|
||||
</div>
|
||||
|
||||
---
|
||||
|
||||
[](https://github.com/kubescape/kubescape/actions/workflows/build.yaml)
|
||||
[](https://goreportcard.com/report/github.com/kubescape/kubescape)
|
||||
[](https://gitpod.io/#https://github.com/kubescape/kubescape)
|
||||
|
||||
:sunglasses: [Want to contribute?](#being-a-part-of-the-team) :innocent:
|
||||
|
||||
|
||||
|
||||
Kubescape is a K8s open-source tool providing a multi-cloud K8s single pane of glass, including risk analysis, security compliance, RBAC visualizer, and image vulnerabilities scanning.
|
||||
Kubescape is a K8s open-source tool providing a Kubernetes single pane of glass, including risk analysis, security compliance, RBAC visualizer, and image vulnerabilities scanning.
|
||||
Kubescape scans K8s clusters, YAML files, and HELM charts, detecting misconfigurations according to multiple frameworks (such as the [NSA-CISA](https://www.armosec.io/blog/kubernetes-hardening-guidance-summary-by-armo/?utm_source=github&utm_medium=repository), [MITRE ATT&CK®](https://www.microsoft.com/security/blog/2021/03/23/secure-containerized-environments-with-updated-threat-matrix-for-kubernetes/)), software vulnerabilities, and RBAC (role-based-access-control) violations at early stages of the CI/CD pipeline, calculates risk score instantly and shows risk trends over time.
|
||||
|
||||
It has become one of the fastest-growing Kubernetes tools among developers due to its easy-to-use CLI interface, flexible output formats, and automated scanning capabilities, saving Kubernetes users and admins precious time, effort, and resources.
|
||||
@@ -45,19 +51,18 @@ kubescape scan --submit --enable-host-scan --verbose
|
||||
|
||||
</br>
|
||||
|
||||
### Click [👍](https://github.com/kubescape/kubescape/stargazers) if you want us to continue to develop and improve Kubescape 😀
|
||||
### Please [star ⭐](https://github.com/kubescape/kubescape/stargazers) the repo if you want us to continue developing and improving Kubescape 😀
|
||||
|
||||
</br>
|
||||
|
||||
|
||||
# Being a part of the team
|
||||
|
||||
We invite you to our team! We are excited about this project and want to return the love we get.
|
||||
|
||||
Want to contribute? Want to discuss something? Have an issue?
|
||||
[Want to contribute?](https://github.com/kubescape/kubescape/blob/master/CONTRIBUTING.md) Want to discuss something? Have an issue? Please make sure that you follow our [Code Of Conduct](https://github.com/kubescape/kubescape/blob/master/CODE_OF_CONDUCT.md) .
|
||||
|
||||
* Feel free to pick a task from the [roadmap](docs/roadmap.md) or suggest a feature of your own. [Contact us](MAINTAINERS.md) directly for more information :)
|
||||
* Open an issue, we are trying to respond within 48 hours
|
||||
* Feel free to pick a task from the [issues](https://github.com/kubescape/kubescape/issues?q=is%3Aissue+is%3Aopen+label%3A%22open+for+contribution%22), [roadmap](docs/roadmap.md) or suggest a feature of your own. [Contact us](MAINTAINERS.md) directly for more information :)
|
||||
* [Open an issue](https://github.com/kubescape/kubescape/issues/new/choose) , we are trying to respond within 48 hours
|
||||
* [Join us](https://discord.com/invite/WKZRaCtBxN) in the discussion on our discord server!
|
||||
|
||||
[<img src="docs/discord-banner.png" width="100" alt="logo" align="center">](https://discord.com/invite/WKZRaCtBxN)
|
||||
@@ -79,7 +84,9 @@ Want to contribute? Want to discuss something? Have an issue?
|
||||
* [Scan Kubescape on an air-gapped environment (offline support)](https://youtu.be/IGXL9s37smM)
|
||||
* [Managing exceptions in the Kubescape SaaS version](https://youtu.be/OzpvxGmCR80)
|
||||
* [Configure and run customized frameworks](https://youtu.be/12Sanq_rEhs)
|
||||
* Customize control configurations. [Kubescape CLI](https://youtu.be/955psg6TVu4), [Kubescape SaaS](https://youtu.be/lIMVSVhH33o)
|
||||
* Customize control configurations:
|
||||
- [Kubescape CLI](https://youtu.be/955psg6TVu4)
|
||||
- [Kubescape SaaS](https://youtu.be/lIMVSVhH33o)
|
||||
|
||||
## Install on Windows
|
||||
|
||||
@@ -186,12 +193,12 @@ kubescape scan --include-namespaces development,staging,production
|
||||
kubescape scan --exclude-namespaces kube-system,kube-public
|
||||
```
|
||||
|
||||
#### Scan local `yaml`/`json` files before deploying. [Take a look at the demonstration](https://youtu.be/Ox6DaR7_4ZI) Submit the results in case the directory is a git repo. [docs](https://hub.armosec.io/docs/repository-scanning?utm_source=github&utm_medium=repository)
|
||||
#### Scan local `yaml`/`json` files before deploying. [Take a look at the demonstration](https://youtu.be/Ox6DaR7_4ZI). Submit the results in case the directory is a git repo. [docs](https://hub.armosec.io/docs/repository-scanning?utm_source=github&utm_medium=repository)
|
||||
```
|
||||
kubescape scan *.yaml --submit
|
||||
```
|
||||
|
||||
#### Scan kubernetes manifest files from a git repository [and submit the results](https://hub.armosec.io/docs/repository-scanning?utm_source=github&utm_medium=repository)
|
||||
#### Scan Kubernetes manifest files from a git repository [and submit the results](https://hub.armosec.io/docs/repository-scanning?utm_source=github&utm_medium=repository)
|
||||
```
|
||||
kubescape scan https://github.com/kubescape/kubescape --submit
|
||||
```
|
||||
@@ -260,7 +267,7 @@ kubescape scan --use-artifacts-from path/to/local/dir
|
||||
|
||||
You can also download a single artifact and scan with the `--use-from` flag
|
||||
|
||||
1. Download and save in file, if file name is not specified, will save in `~/.kubescape/<framework name>.json`
|
||||
1. Download and save in a file, if the file name is not specified, will save in `~/.kubescape/<framework name>.json`
|
||||
```
|
||||
kubescape download framework nsa --output /path/nsa.json
|
||||
```
|
||||
|
||||
1
build.py
1
build.py
@@ -25,7 +25,6 @@ def get_build_dir():
|
||||
|
||||
def get_package_name():
|
||||
package_name = "kubescape"
|
||||
# if platform.system() == "Windows": package_name += ".exe"
|
||||
|
||||
return package_name
|
||||
|
||||
|
||||
@@ -20,7 +20,6 @@ WORKDIR /work
|
||||
ADD . .
|
||||
|
||||
# install libgit2
|
||||
WORKDIR /work
|
||||
RUN rm -rf git2go && make libgit2
|
||||
|
||||
# build kubescape server
|
||||
@@ -34,16 +33,16 @@ RUN python build.py
|
||||
|
||||
RUN /work/build/ubuntu-latest/kubescape download artifacts -o /work/artifacts
|
||||
|
||||
FROM alpine
|
||||
FROM alpine:3.16.2
|
||||
|
||||
RUN addgroup -S armo && adduser -S armo -G armo
|
||||
|
||||
RUN mkdir /home/armo/.kubescape
|
||||
COPY --from=builder /work/artifacts/ /home/armo/.kubescape
|
||||
|
||||
RUN chown -R armo:armo /home/armo/.kubescape
|
||||
|
||||
USER armo
|
||||
|
||||
WORKDIR /home/armo
|
||||
|
||||
COPY --from=builder /work/httphandler/build/ubuntu-latest/kubescape /usr/bin/ksserver
|
||||
|
||||
@@ -43,7 +43,3 @@ func GetCompletionCmd() *cobra.Command {
|
||||
}
|
||||
return completionCmd
|
||||
}
|
||||
|
||||
// func init() {
|
||||
// rootCmd.AddCommand(completionCmd)
|
||||
// }
|
||||
|
||||
@@ -63,6 +63,7 @@ func getRootCmd(ks meta.IKubescape) *cobra.Command {
|
||||
rootCmd.PersistentFlags().StringVarP(&rootInfo.Logger, "logger", "l", helpers.InfoLevel.String(), fmt.Sprintf("Logger level. Supported: %s [$KS_LOGGER]", strings.Join(helpers.SupportedLevels(), "/")))
|
||||
rootCmd.PersistentFlags().StringVar(&rootInfo.CacheDir, "cache-dir", getter.DefaultLocalStore, "Cache directory [$KS_CACHE_DIR]")
|
||||
rootCmd.PersistentFlags().BoolVarP(&rootInfo.DisableColor, "disable-color", "", false, "Disable Color output for logging")
|
||||
rootCmd.PersistentFlags().BoolVarP(&rootInfo.EnableColor, "enable-color", "", false, "Force enable Color output for logging")
|
||||
|
||||
cobra.OnInitialize(initLogger, initLoggerLevel, initEnvironment, initCacheDir)
|
||||
|
||||
|
||||
@@ -16,6 +16,7 @@ const envFlagUsage = "Send report results to specific URL. Format:<ReportReceive
|
||||
|
||||
func initLogger() {
|
||||
logger.DisableColor(rootInfo.DisableColor)
|
||||
logger.EnableColor(rootInfo.EnableColor)
|
||||
|
||||
if rootInfo.LoggerName == "" {
|
||||
if l := os.Getenv("KS_LOGGER_NAME"); l != "" {
|
||||
|
||||
@@ -70,7 +70,7 @@ func getControlCmd(ks meta.IKubescape, scanInfo *cautils.ScanInfo) *cobra.Comman
|
||||
|
||||
if len(args) > 1 {
|
||||
if len(args[1:]) == 0 || args[1] != "-" {
|
||||
scanInfo.InputPatterns = []string{args[1]}
|
||||
scanInfo.InputPatterns = args[1:]
|
||||
} else { // store stdin to file - do NOT move to separate function !!
|
||||
tempFile, err := os.CreateTemp(".", "tmp-kubescape*.yaml")
|
||||
if err != nil {
|
||||
|
||||
@@ -80,7 +80,7 @@ func getFrameworkCmd(ks meta.IKubescape, scanInfo *cautils.ScanInfo) *cobra.Comm
|
||||
}
|
||||
if len(args) > 1 {
|
||||
if len(args[1:]) == 0 || args[1] != "-" {
|
||||
scanInfo.InputPatterns = []string{args[1]}
|
||||
scanInfo.InputPatterns = args[1:]
|
||||
} else { // store stdin to file - do NOT move to separate function !!
|
||||
tempFile, err := os.CreateTemp(".", "tmp-kubescape*.yaml")
|
||||
if err != nil {
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@@ -182,11 +183,11 @@ TODO - supprot:
|
||||
KS_CACHE // path to cached files
|
||||
*/
|
||||
type ClusterConfig struct {
|
||||
backendAPI getter.IBackend
|
||||
k8s *k8sinterface.KubernetesApi
|
||||
configObj *ConfigObj
|
||||
configMapName string
|
||||
configMapNamespace string
|
||||
backendAPI getter.IBackend
|
||||
configObj *ConfigObj
|
||||
}
|
||||
|
||||
func NewClusterConfig(k8s *k8sinterface.KubernetesApi, backendAPI getter.IBackend, credentials *Credentials, clusterName string) *ClusterConfig {
|
||||
@@ -468,7 +469,11 @@ func DeleteConfigFile() error {
|
||||
}
|
||||
|
||||
func AdoptClusterName(clusterName string) string {
|
||||
return strings.ReplaceAll(clusterName, "/", "-")
|
||||
re, err := regexp.Compile(`[^\w]+`)
|
||||
if err != nil {
|
||||
return clusterName
|
||||
}
|
||||
return re.ReplaceAllString(clusterName, "-")
|
||||
}
|
||||
|
||||
func getConfigMapName() string {
|
||||
|
||||
@@ -191,3 +191,34 @@ func TestLoadConfigFromData(t *testing.T) {
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestAdoptClusterName(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
clusterName string
|
||||
want string
|
||||
}{
|
||||
{
|
||||
name: "replace 1",
|
||||
clusterName: "my-name__is--ks",
|
||||
want: "my-name__is-ks",
|
||||
},
|
||||
{
|
||||
name: "replace 2",
|
||||
clusterName: "my-name1",
|
||||
want: "my-name1",
|
||||
},
|
||||
{
|
||||
name: "replace 3",
|
||||
clusterName: "my:name",
|
||||
want: "my-name",
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if got := AdoptClusterName(tt.clusterName); got != tt.want {
|
||||
t.Errorf("AdoptClusterName() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"github.com/kubescape/k8s-interface/workloadinterface"
|
||||
"github.com/kubescape/opa-utils/reporthandling"
|
||||
apis "github.com/kubescape/opa-utils/reporthandling/apis"
|
||||
"github.com/kubescape/opa-utils/reporthandling/results/v1/prioritization"
|
||||
"github.com/kubescape/opa-utils/reporthandling/results/v1/resourcesresults"
|
||||
reporthandlingv2 "github.com/kubescape/opa-utils/reporthandling/v2"
|
||||
)
|
||||
@@ -14,15 +15,16 @@ type K8SResources map[string][]string
|
||||
type KSResources map[string][]string
|
||||
|
||||
type OPASessionObj struct {
|
||||
K8SResources *K8SResources // input k8s objects
|
||||
ArmoResource *KSResources // input ARMO objects
|
||||
Policies []reporthandling.Framework // list of frameworks to scan
|
||||
AllResources map[string]workloadinterface.IMetadata // all scanned resources, map[<rtesource ID>]<resource>
|
||||
ResourcesResult map[string]resourcesresults.Result // resources scan results, map[<rtesource ID>]<resource result>
|
||||
ResourceSource map[string]reporthandling.Source // resources sources, map[<rtesource ID>]<resource result>
|
||||
Report *reporthandlingv2.PostureReport // scan results v2 - Remove
|
||||
Exceptions []armotypes.PostureExceptionPolicy // list of exceptions to apply on scan results
|
||||
RegoInputData RegoInputData // input passed to rgo for scanning. map[<control name>][<input arguments>]
|
||||
K8SResources *K8SResources // input k8s objects
|
||||
ArmoResource *KSResources // input ARMO objects
|
||||
Policies []reporthandling.Framework // list of frameworks to scan
|
||||
AllResources map[string]workloadinterface.IMetadata // all scanned resources, map[<resource ID>]<resource>
|
||||
ResourcesResult map[string]resourcesresults.Result // resources scan results, map[<resource ID>]<resource result>
|
||||
ResourceSource map[string]reporthandling.Source // resources sources, map[<resource ID>]<resource result>
|
||||
ResourcesPrioritized map[string]prioritization.PrioritizedResource // resources prioritization information, map[<resource ID>]<prioritized resource>
|
||||
Report *reporthandlingv2.PostureReport // scan results v2 - Remove
|
||||
Exceptions []armotypes.PostureExceptionPolicy // list of exceptions to apply on scan results
|
||||
RegoInputData RegoInputData // input passed to rgo for scanning. map[<control name>][<input arguments>]
|
||||
Metadata *reporthandlingv2.Metadata
|
||||
InfoMap map[string]apis.StatusInfo // Map errors of resources to StatusInfo
|
||||
ResourceToControlsMap map[string][]string // map[<apigroup/apiversion/resource>] = [<control_IDs>]
|
||||
@@ -36,6 +38,7 @@ func NewOPASessionObj(frameworks []reporthandling.Framework, k8sResources *K8SRe
|
||||
K8SResources: k8sResources,
|
||||
AllResources: make(map[string]workloadinterface.IMetadata),
|
||||
ResourcesResult: make(map[string]resourcesresults.Result),
|
||||
ResourcesPrioritized: make(map[string]prioritization.PrioritizedResource),
|
||||
InfoMap: make(map[string]apis.StatusInfo),
|
||||
ResourceToControlsMap: make(map[string][]string),
|
||||
ResourceSource: make(map[string]reporthandling.Source),
|
||||
@@ -63,11 +66,12 @@ func (sessionObj *OPASessionObj) SetNumberOfWorkerNodes(n int) {
|
||||
|
||||
func NewOPASessionObjMock() *OPASessionObj {
|
||||
return &OPASessionObj{
|
||||
Policies: nil,
|
||||
K8SResources: nil,
|
||||
AllResources: make(map[string]workloadinterface.IMetadata),
|
||||
ResourcesResult: make(map[string]resourcesresults.Result),
|
||||
Report: &reporthandlingv2.PostureReport{},
|
||||
Policies: nil,
|
||||
K8SResources: nil,
|
||||
AllResources: make(map[string]workloadinterface.IMetadata),
|
||||
ResourcesResult: make(map[string]resourcesresults.Result),
|
||||
ResourcesPrioritized: make(map[string]prioritization.PrioritizedResource),
|
||||
Report: &reporthandlingv2.PostureReport{},
|
||||
Metadata: &reporthandlingv2.Metadata{
|
||||
ScanMetadata: reporthandlingv2.ScanMetadata{
|
||||
ScanningTarget: 0,
|
||||
@@ -94,6 +98,6 @@ type RegoInputData struct {
|
||||
}
|
||||
|
||||
type Policies struct {
|
||||
Frameworks []string
|
||||
Controls map[string]reporthandling.Control // map[<control ID>]<control>
|
||||
Frameworks []string
|
||||
}
|
||||
|
||||
@@ -47,7 +47,7 @@ func LoadResourcesFromHelmCharts(basePath string) (map[string][]workloadinterfac
|
||||
if err == nil {
|
||||
wls, errs := chart.GetWorkloadsWithDefaultValues()
|
||||
if len(errs) > 0 {
|
||||
logger.L().Error(fmt.Sprintf("Rendering of Helm chart template failed: %v", errs))
|
||||
logger.L().Error(fmt.Sprintf("Rendering of Helm chart template '%s', failed: %v", chart.GetName(), errs))
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -190,8 +190,10 @@ func readYamlFile(yamlFile []byte) ([]workloadinterface.IMetadata, error) {
|
||||
}
|
||||
if obj, ok := j.(map[string]interface{}); ok {
|
||||
if o := objectsenvelopes.NewObject(obj); o != nil {
|
||||
if o.GetKind() == "List" {
|
||||
yamlObjs = append(yamlObjs, handleListObject(o)...)
|
||||
if o.GetObjectType() == workloadinterface.TypeListWorkloads {
|
||||
if list := workloadinterface.NewListWorkloadsObj(o.GetObject()); list != nil {
|
||||
yamlObjs = append(yamlObjs, list.GetItems()...)
|
||||
}
|
||||
} else {
|
||||
yamlObjs = append(yamlObjs, o)
|
||||
}
|
||||
@@ -323,20 +325,3 @@ func GetFileFormat(filePath string) FileFormat {
|
||||
return FileFormat(filePath)
|
||||
}
|
||||
}
|
||||
|
||||
// handleListObject handle a List manifest
|
||||
func handleListObject(obj workloadinterface.IMetadata) []workloadinterface.IMetadata {
|
||||
yamlObjs := []workloadinterface.IMetadata{}
|
||||
if i, ok := workloadinterface.InspectMap(obj.GetObject(), "items"); ok && i != nil {
|
||||
if items, ok := i.([]interface{}); ok && items != nil {
|
||||
for item := range items {
|
||||
if m, ok := items[item].(map[string]interface{}); ok && m != nil {
|
||||
if o := objectsenvelopes.NewObject(m); o != nil {
|
||||
yamlObjs = append(yamlObjs, o)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return yamlObjs
|
||||
}
|
||||
|
||||
@@ -8,8 +8,8 @@ type FeLoginData struct {
|
||||
type FeLoginResponse struct {
|
||||
Token string `json:"accessToken"`
|
||||
RefreshToken string `json:"refreshToken"`
|
||||
ExpiresIn int32 `json:"expiresIn"`
|
||||
Expires string `json:"expires"`
|
||||
ExpiresIn int32 `json:"expiresIn"`
|
||||
}
|
||||
|
||||
type KSCloudSelectCustomer struct {
|
||||
|
||||
@@ -42,8 +42,8 @@ type KSCloudAPI struct {
|
||||
accountID string
|
||||
clientID string
|
||||
secretKey string
|
||||
feToken FeLoginResponse
|
||||
authCookie string
|
||||
feToken FeLoginResponse
|
||||
loggedIn bool
|
||||
}
|
||||
|
||||
|
||||
@@ -56,7 +56,9 @@ func controlReportV2ToV1(opaSessionObj *OPASessionObj, frameworkName string, con
|
||||
|
||||
rulesv1 := map[string]reporthandling.RuleReport{}
|
||||
|
||||
for _, resourceID := range crv2.ListResourcesIDs().All() {
|
||||
iter := crv2.ListResourcesIDs().All()
|
||||
for iter.HasNext() {
|
||||
resourceID := iter.Next()
|
||||
if result, ok := opaSessionObj.ResourcesResult[resourceID]; ok {
|
||||
for _, rulev2 := range result.ListRulesOfControl(crv2.GetID(), "") {
|
||||
|
||||
|
||||
@@ -5,6 +5,7 @@ type RootInfo struct {
|
||||
LoggerName string // logger name ("pretty"/"zap"/"none")
|
||||
CacheDir string // cached dir
|
||||
DisableColor bool // Disable Color
|
||||
EnableColor bool // Force enable Color
|
||||
|
||||
KSCloudBEURLs string // Kubescape Cloud URL
|
||||
KSCloudBEURLsDep string // Kubescape Cloud URL
|
||||
|
||||
@@ -152,6 +152,16 @@ func (ks *Kubescape) Scan(scanInfo *cautils.ScanInfo) (*resultshandling.ResultsH
|
||||
return resultsHandling, fmt.Errorf("%w", err)
|
||||
}
|
||||
|
||||
/*
|
||||
|
||||
// ======================== prioritization ===================
|
||||
priotizationHandler := resourcesprioritization.NewResourcesPrioritizationHandler(true)
|
||||
if err := priotizationHandler.PrioritizeResources(scanData); err != nil {
|
||||
return resultsHandling, fmt.Errorf("%w", err)
|
||||
}
|
||||
|
||||
*/
|
||||
|
||||
// ========================= results handling =====================
|
||||
resultsHandling.SetData(scanData)
|
||||
|
||||
|
||||
@@ -33,7 +33,7 @@ spec:
|
||||
effect: NoSchedule
|
||||
containers:
|
||||
- name: host-sensor
|
||||
image: quay.io/armosec/kube-host-sensor:latest
|
||||
image: quay.io/kubescape/host-scanner:latest
|
||||
securityContext:
|
||||
privileged: true
|
||||
readOnlyRootFilesystem: true
|
||||
|
||||
@@ -122,7 +122,7 @@ func (opap *OPAProcessor) processControl(control *reporthandling.Control) (map[s
|
||||
|
||||
// ruleResults := make(map[string][]resourcesresults.ResourceAssociatedRule)
|
||||
for i := range control.Rules {
|
||||
resourceAssociatedRule, err := opap.processRule(&control.Rules[i])
|
||||
resourceAssociatedRule, err := opap.processRule(&control.Rules[i], control.FixedInput)
|
||||
if err != nil {
|
||||
logger.L().Error(err.Error())
|
||||
continue
|
||||
@@ -150,10 +150,15 @@ func (opap *OPAProcessor) processControl(control *reporthandling.Control) (map[s
|
||||
return resourcesAssociatedControl, errs
|
||||
}
|
||||
|
||||
func (opap *OPAProcessor) processRule(rule *reporthandling.PolicyRule) (map[string]*resourcesresults.ResourceAssociatedRule, error) {
|
||||
func (opap *OPAProcessor) processRule(rule *reporthandling.PolicyRule, fixedControlInputs map[string][]string) (map[string]*resourcesresults.ResourceAssociatedRule, error) {
|
||||
|
||||
postureControlInputs := opap.regoDependenciesData.GetFilteredPostureControlInputs(rule.ConfigInputs) // get store
|
||||
|
||||
// Merge configurable control input and fixed control input
|
||||
for k, v := range fixedControlInputs {
|
||||
postureControlInputs[k] = v
|
||||
}
|
||||
|
||||
inputResources, err := reporthandling.RegoResourcesAggregator(rule, getAllSupportedObjects(opap.K8SResources, opap.ArmoResource, opap.AllResources, rule))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error getting aggregated k8sObjects: %s", err.Error())
|
||||
|
||||
@@ -42,7 +42,7 @@ func TestProcessResourcesResult(t *testing.T) {
|
||||
|
||||
assert.Equal(t, 1, len(opaSessionObj.ResourcesResult))
|
||||
res := opaSessionObj.ResourcesResult[deployment.GetID()]
|
||||
assert.Equal(t, 2, len(res.ListControlsIDs(nil).All()))
|
||||
assert.Equal(t, 2, res.ListControlsIDs(nil).All().Len())
|
||||
assert.Equal(t, 1, len(res.ListControlsIDs(nil).Failed()))
|
||||
assert.Equal(t, 1, len(res.ListControlsIDs(nil).Passed()))
|
||||
assert.True(t, res.GetStatus(nil).IsFailed())
|
||||
@@ -51,8 +51,8 @@ func TestProcessResourcesResult(t *testing.T) {
|
||||
|
||||
opap.updateResults()
|
||||
res = opaSessionObj.ResourcesResult[deployment.GetID()]
|
||||
assert.Equal(t, 2, len(res.ListControlsIDs(nil).All()))
|
||||
assert.Equal(t, 2, len(res.ListControlsIDs(nil).All()))
|
||||
assert.Equal(t, 2, res.ListControlsIDs(nil).All().Len())
|
||||
assert.Equal(t, 2, res.ListControlsIDs(nil).All().Len())
|
||||
assert.Equal(t, 1, len(res.ListControlsIDs(nil).Failed()))
|
||||
assert.Equal(t, 1, len(res.ListControlsIDs(nil).Passed()))
|
||||
assert.True(t, res.GetStatus(nil).IsFailed())
|
||||
@@ -67,13 +67,13 @@ func TestProcessResourcesResult(t *testing.T) {
|
||||
assert.Equal(t, 0, summaryDetails.NumberOfResources().Passed())
|
||||
|
||||
// test resource listing
|
||||
assert.Equal(t, 1, len(summaryDetails.ListResourcesIDs().All()))
|
||||
assert.Equal(t, 1, summaryDetails.ListResourcesIDs().All().Len())
|
||||
assert.Equal(t, 1, len(summaryDetails.ListResourcesIDs().Failed()))
|
||||
assert.Equal(t, 0, len(summaryDetails.ListResourcesIDs().Excluded()))
|
||||
assert.Equal(t, 0, len(summaryDetails.ListResourcesIDs().Passed()))
|
||||
|
||||
// test control listing
|
||||
assert.Equal(t, len(res.ListControlsIDs(nil).All()), summaryDetails.NumberOfControls().All())
|
||||
assert.Equal(t, res.ListControlsIDs(nil).All().Len(), summaryDetails.NumberOfControls().All())
|
||||
assert.Equal(t, len(res.ListControlsIDs(nil).Passed()), summaryDetails.NumberOfControls().Passed())
|
||||
assert.Equal(t, len(res.ListControlsIDs(nil).Failed()), summaryDetails.NumberOfControls().Failed())
|
||||
assert.Equal(t, len(res.ListControlsIDs(nil).Excluded()), summaryDetails.NumberOfControls().Excluded())
|
||||
@@ -83,7 +83,7 @@ func TestProcessResourcesResult(t *testing.T) {
|
||||
opap.updateResults()
|
||||
|
||||
res = opaSessionObj.ResourcesResult[deployment.GetID()]
|
||||
assert.Equal(t, 2, len(res.ListControlsIDs(nil).All()))
|
||||
assert.Equal(t, 2, res.ListControlsIDs(nil).All().Len())
|
||||
assert.Equal(t, 1, len(res.ListControlsIDs(nil).Excluded()))
|
||||
assert.Equal(t, 1, len(res.ListControlsIDs(nil).Passed()))
|
||||
assert.True(t, res.GetStatus(nil).IsExcluded())
|
||||
@@ -93,7 +93,7 @@ func TestProcessResourcesResult(t *testing.T) {
|
||||
|
||||
// test resource listing
|
||||
summaryDetails = opaSessionObj.Report.SummaryDetails
|
||||
assert.Equal(t, 1, len(summaryDetails.ListResourcesIDs().All()))
|
||||
assert.Equal(t, 1, summaryDetails.ListResourcesIDs().All().Len())
|
||||
assert.Equal(t, 1, len(summaryDetails.ListResourcesIDs().Failed()))
|
||||
assert.Equal(t, 0, len(summaryDetails.ListResourcesIDs().Excluded()))
|
||||
assert.Equal(t, 0, len(summaryDetails.ListResourcesIDs().Passed()))
|
||||
|
||||
@@ -37,19 +37,62 @@ func (fileHandler *FileResourceHandler) GetResources(sessionObj *cautils.OPASess
|
||||
// map resources based on framework required resources: map["/group/version/kind"][]<k8s workloads ids>
|
||||
k8sResources := setK8sResourceMap(sessionObj.Policies)
|
||||
allResources := map[string]workloadinterface.IMetadata{}
|
||||
workloadIDToSource := make(map[string]reporthandling.Source, 0)
|
||||
ksResources := &cautils.KSResources{}
|
||||
|
||||
workloads := []workloadinterface.IMetadata{}
|
||||
|
||||
if len(fileHandler.inputPatterns) == 0 {
|
||||
return nil, nil, nil, fmt.Errorf("missing input")
|
||||
}
|
||||
path := fileHandler.inputPatterns[0]
|
||||
|
||||
logger.L().Info("Accessing local objects")
|
||||
cautils.StartSpinner()
|
||||
|
||||
for path := range fileHandler.inputPatterns {
|
||||
workloadIDToSource, workloads, err := getResourcesFromPath(fileHandler.inputPatterns[path])
|
||||
if err != nil {
|
||||
return nil, allResources, nil, err
|
||||
}
|
||||
if len(workloads) == 0 {
|
||||
logger.L().Debug("path ignored because contains only a non-kubernetes file", helpers.String("path", fileHandler.inputPatterns[path]))
|
||||
}
|
||||
|
||||
for k, v := range workloadIDToSource {
|
||||
sessionObj.ResourceSource[k] = v
|
||||
}
|
||||
|
||||
// map all resources: map["/apiVersion/version/kind"][]<k8s workloads>
|
||||
mappedResources := mapResources(workloads)
|
||||
|
||||
// save only relevant resources
|
||||
for i := range mappedResources {
|
||||
if _, ok := (*k8sResources)[i]; ok {
|
||||
ids := []string{}
|
||||
for j := range mappedResources[i] {
|
||||
ids = append(ids, mappedResources[i][j].GetID())
|
||||
allResources[mappedResources[i][j].GetID()] = mappedResources[i][j]
|
||||
}
|
||||
(*k8sResources)[i] = append((*k8sResources)[i], ids...)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
if err := fileHandler.registryAdaptors.collectImagesVulnerabilities(k8sResources, allResources, ksResources); err != nil {
|
||||
logger.L().Warning("failed to collect images vulnerabilities", helpers.Error(err))
|
||||
}
|
||||
|
||||
cautils.StopSpinner()
|
||||
logger.L().Success("Done accessing local objects")
|
||||
|
||||
return k8sResources, allResources, ksResources, nil
|
||||
}
|
||||
|
||||
func getResourcesFromPath(path string) (map[string]reporthandling.Source, []workloadinterface.IMetadata, error) {
|
||||
workloadIDToSource := make(map[string]reporthandling.Source, 0)
|
||||
workloads := []workloadinterface.IMetadata{}
|
||||
|
||||
clonedRepo, err := cloneGitRepo(&path)
|
||||
if err != nil {
|
||||
return nil, allResources, nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
if clonedRepo != "" {
|
||||
defer os.RemoveAll(clonedRepo)
|
||||
@@ -63,9 +106,6 @@ func (fileHandler *FileResourceHandler) GetResources(sessionObj *cautils.OPASess
|
||||
}
|
||||
|
||||
// load resource from local file system
|
||||
logger.L().Info("Accessing local objects")
|
||||
cautils.StartSpinner()
|
||||
|
||||
sourceToWorkloads := cautils.LoadResourcesFromFiles(path, repoRoot)
|
||||
|
||||
// update workloads and workloadIDToSource
|
||||
@@ -156,37 +196,7 @@ func (fileHandler *FileResourceHandler) GetResources(sessionObj *cautils.OPASess
|
||||
logger.L().Debug("helm templates found in local storage", helpers.Int("helmTemplates", len(helmSourceToWorkloads)), helpers.Int("workloads", len(workloads)))
|
||||
}
|
||||
|
||||
// addCommitData(fileHandler.inputPatterns[0], workloadIDToSource)
|
||||
|
||||
if len(workloads) == 0 {
|
||||
return nil, allResources, nil, fmt.Errorf("empty list of workloads - no workloads found")
|
||||
}
|
||||
|
||||
sessionObj.ResourceSource = workloadIDToSource
|
||||
|
||||
// map all resources: map["/apiVersion/version/kind"][]<k8s workloads>
|
||||
mappedResources := mapResources(workloads)
|
||||
|
||||
// save only relevant resources
|
||||
for i := range mappedResources {
|
||||
if _, ok := (*k8sResources)[i]; ok {
|
||||
ids := []string{}
|
||||
for j := range mappedResources[i] {
|
||||
ids = append(ids, mappedResources[i][j].GetID())
|
||||
allResources[mappedResources[i][j].GetID()] = mappedResources[i][j]
|
||||
}
|
||||
(*k8sResources)[i] = ids
|
||||
}
|
||||
}
|
||||
|
||||
if err := fileHandler.registryAdaptors.collectImagesVulnerabilities(k8sResources, allResources, ksResources); err != nil {
|
||||
logger.L().Warning("failed to collect images vulnerabilities", helpers.Error(err))
|
||||
}
|
||||
|
||||
cautils.StopSpinner()
|
||||
logger.L().Success("Accessed to local objects")
|
||||
|
||||
return k8sResources, allResources, ksResources, nil
|
||||
return workloadIDToSource, workloads, nil
|
||||
}
|
||||
|
||||
func (fileHandler *FileResourceHandler) GetClusterAPIServerInfo() *version.Info {
|
||||
|
||||
@@ -18,7 +18,7 @@ func cloneGitRepo(path *string) (string, error) {
|
||||
var clonedDir string
|
||||
|
||||
// Clone git repository if needed
|
||||
gitURL, err := giturl.NewGitURL(*path)
|
||||
gitURL, err := giturl.NewGitAPI(*path)
|
||||
if err == nil {
|
||||
logger.L().Info("cloning", helpers.String("repository url", gitURL.GetURL().String()))
|
||||
cautils.StartSpinner()
|
||||
|
||||
@@ -104,7 +104,9 @@ func (k8sHandler *K8sResourceHandler) GetResources(sessionObj *cautils.OPASessio
|
||||
// using hostSensor mock
|
||||
cautils.SetInfoMapForResources("failed to init host scanner", hostResources, sessionObj.InfoMap)
|
||||
} else {
|
||||
sessionObj.InfoMap = infoMap
|
||||
if len(infoMap) > 0 {
|
||||
sessionObj.InfoMap = infoMap
|
||||
}
|
||||
}
|
||||
} else {
|
||||
cautils.SetInfoMapForResources("enable-host-scan flag not used. For more information: https://hub.armosec.io/docs/host-sensor", hostResources, sessionObj.InfoMap)
|
||||
|
||||
@@ -238,8 +238,8 @@ func TestIsMasterNodeTaints(t *testing.T) {
|
||||
},
|
||||
{
|
||||
"names": [
|
||||
"quay.io/armosec/kube-host-sensor@sha256:82139d2561039726be060df2878ef023c59df7c536fbd7f6d766af5a99569fee",
|
||||
"quay.io/armosec/kube-host-sensor:latest"
|
||||
"quay.io/kubescape/host-scanner@sha256:82139d2561039726be060df2878ef023c59df7c536fbd7f6d766af5a99569fee",
|
||||
"quay.io/kubescape/host-scanner:latest"
|
||||
],
|
||||
"sizeBytes": 11796788
|
||||
},
|
||||
@@ -503,8 +503,8 @@ func TestIsMasterNodeTaints(t *testing.T) {
|
||||
},
|
||||
{
|
||||
"names": [
|
||||
"quay.io/armosec/kube-host-sensor@sha256:82139d2561039726be060df2878ef023c59df7c536fbd7f6d766af5a99569fee",
|
||||
"quay.io/armosec/kube-host-sensor:latest"
|
||||
"quay.io/kubescape/host-scanner@sha256:82139d2561039726be060df2878ef023c59df7c536fbd7f6d766af5a99569fee",
|
||||
"quay.io/kubescape/host-scanner:latest"
|
||||
],
|
||||
"sizeBytes": 11796788
|
||||
},
|
||||
|
||||
@@ -1,16 +1,45 @@
|
||||
package resourcehandler
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
nethttp "net/http"
|
||||
"os"
|
||||
|
||||
giturl "github.com/armosec/go-git-url"
|
||||
"github.com/go-git/go-git/v5"
|
||||
"github.com/go-git/go-git/v5/plumbing"
|
||||
"github.com/go-git/go-git/v5/plumbing/transport"
|
||||
"github.com/go-git/go-git/v5/plumbing/transport/http"
|
||||
)
|
||||
|
||||
// To Check if the given repository is Public(No Authentication needed), send a HTTP GET request to the URL
|
||||
// If response code is 200, the repository is Public.
|
||||
func isGitRepoPublic(URL string) bool {
|
||||
resp, err := nethttp.Get(URL)
|
||||
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
// if the status code is 200, our get request is successful.
|
||||
// It only happens when the repository is public.
|
||||
if resp.StatusCode == 200 {
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// Check if the GITHUB_TOKEN is present
|
||||
func isGitTokenPresent(gitURL giturl.IGitAPI) bool {
|
||||
if token := gitURL.GetToken(); token == "" {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// cloneRepo clones a repository to a local temporary directory and returns the directory
|
||||
func cloneRepo(gitURL giturl.IGitURL) (string, error) {
|
||||
func cloneRepo(gitURL giturl.IGitAPI) (string, error) {
|
||||
|
||||
// Create temp directory
|
||||
tmpDir, err := os.MkdirTemp("", "")
|
||||
@@ -18,9 +47,31 @@ func cloneRepo(gitURL giturl.IGitURL) (string, error) {
|
||||
return "", fmt.Errorf("failed to create temporary directory: %w", err)
|
||||
}
|
||||
|
||||
// Clone option
|
||||
// Get the URL to clone
|
||||
cloneURL := gitURL.GetHttpCloneURL()
|
||||
cloneOpts := git.CloneOptions{URL: cloneURL}
|
||||
|
||||
isGitRepoPublic := isGitRepoPublic(cloneURL)
|
||||
|
||||
// Declare the authentication variable required for cloneOptions
|
||||
var auth transport.AuthMethod
|
||||
|
||||
if isGitRepoPublic {
|
||||
// No authentication needed if repository is public
|
||||
auth = nil
|
||||
} else {
|
||||
|
||||
// Return Error if the GITHUB_TOKEN is not present
|
||||
if isGitTokenPresent := isGitTokenPresent(gitURL); !isGitTokenPresent {
|
||||
return "", fmt.Errorf("%w", errors.New("GITHUB_TOKEN is not present"))
|
||||
}
|
||||
auth = &http.BasicAuth{
|
||||
Username: "anything Except Empty String",
|
||||
Password: gitURL.GetToken(),
|
||||
}
|
||||
}
|
||||
|
||||
// Clone option
|
||||
cloneOpts := git.CloneOptions{URL: cloneURL, Auth: auth}
|
||||
if gitURL.GetBranchName() != "" {
|
||||
cloneOpts.ReferenceName = plumbing.NewBranchReferenceName(gitURL.GetBranchName())
|
||||
cloneOpts.SingleBranch = true
|
||||
|
||||
91
core/pkg/resourcesprioritization/prioritizationhandler.go
Normal file
91
core/pkg/resourcesprioritization/prioritizationhandler.go
Normal file
@@ -0,0 +1,91 @@
|
||||
package resourcesprioritization
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/kubescape/k8s-interface/workloadinterface"
|
||||
"github.com/kubescape/kubescape/v2/core/cautils"
|
||||
"github.com/kubescape/opa-utils/reporthandling/apis"
|
||||
"github.com/kubescape/opa-utils/reporthandling/results/v1/prioritization"
|
||||
)
|
||||
|
||||
type ResourcesPrioritizationHandler struct {
|
||||
skipZeroScores bool
|
||||
}
|
||||
|
||||
func NewResourcesPrioritizationHandler(skipZeroScore bool) *ResourcesPrioritizationHandler {
|
||||
return &ResourcesPrioritizationHandler{
|
||||
skipZeroScores: skipZeroScore,
|
||||
}
|
||||
}
|
||||
|
||||
func (handler *ResourcesPrioritizationHandler) PrioritizeResources(sessionObj *cautils.OPASessionObj) error {
|
||||
for resourceId, result := range sessionObj.ResourcesResult {
|
||||
resourcePriorityVector := []prioritization.ControlsVector{}
|
||||
resource, exist := sessionObj.AllResources[resourceId]
|
||||
if !exist {
|
||||
return fmt.Errorf("expected to find resource id '%s' in scanned resources map", resourceId)
|
||||
}
|
||||
|
||||
workload := workloadinterface.NewWorkloadObj(resource.GetObject())
|
||||
|
||||
if workload != nil && handler.isSupportedKind(workload) {
|
||||
for _, resourceAssociatedControl := range result.ListControls() {
|
||||
if !resourceAssociatedControl.GetStatus(nil).IsFailed() {
|
||||
continue
|
||||
}
|
||||
|
||||
controlSummary := sessionObj.Report.SummaryDetails.Controls.GetControl("ID", resourceAssociatedControl.ControlID)
|
||||
if controlSummary == nil {
|
||||
return fmt.Errorf("expected to find control id '%s' in summary details", resourceAssociatedControl.ControlID)
|
||||
}
|
||||
|
||||
controlScoreFactor := controlSummary.GetScoreFactor()
|
||||
replicaCount := float64(workload.GetReplicas())
|
||||
|
||||
cVector := prioritization.NewControlsVector()
|
||||
cVector.AddControl(prioritization.PriorityVectorControl{
|
||||
ControlID: resourceAssociatedControl.ControlID,
|
||||
Category: "",
|
||||
})
|
||||
|
||||
cVector.SetSeverity(apis.ControlSeverityToInt(controlScoreFactor))
|
||||
cVector.SetScore(float64(controlScoreFactor) + (replicaCount / 10))
|
||||
resourcePriorityVector = append(resourcePriorityVector, *cVector)
|
||||
}
|
||||
}
|
||||
|
||||
prioritizedResource := prioritization.PrioritizedResource{
|
||||
ResourceID: resourceId,
|
||||
PriorityVector: resourcePriorityVector,
|
||||
}
|
||||
|
||||
prioritizedResource.SetSeverity(prioritizedResource.CalculateSeverity())
|
||||
prioritizedResource.SetScore(prioritizedResource.CalculateScore())
|
||||
|
||||
if handler.skipZeroScores && prioritizedResource.GetScore() == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
sessionObj.ResourcesPrioritized[resourceId] = prioritizedResource
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (handler *ResourcesPrioritizationHandler) isSupportedKind(obj workloadinterface.IMetadata) bool {
|
||||
if obj != nil {
|
||||
switch obj.GetKind() {
|
||||
case "Deployment",
|
||||
"Pod",
|
||||
"ReplicaSet",
|
||||
"Node",
|
||||
"DaemonSet",
|
||||
"StatefulSet",
|
||||
"Job",
|
||||
"CronJob":
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
150
core/pkg/resourcesprioritization/prioritizationhandler_test.go
Normal file
150
core/pkg/resourcesprioritization/prioritizationhandler_test.go
Normal file
@@ -0,0 +1,150 @@
|
||||
package resourcesprioritization
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/kubescape/k8s-interface/workloadinterface"
|
||||
"github.com/kubescape/kubescape/v2/core/cautils"
|
||||
"github.com/kubescape/opa-utils/reporthandling/apis"
|
||||
"github.com/kubescape/opa-utils/reporthandling/results/v1/reportsummary"
|
||||
"github.com/kubescape/opa-utils/reporthandling/results/v1/resourcesresults"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func OPASessionObjMock(mockResults map[string]resourcesresults.Result, mockControlsSummary map[string]reportsummary.ControlSummary, mockAllResources map[string]workloadinterface.IMetadata) *cautils.OPASessionObj {
|
||||
mock := cautils.NewOPASessionObjMock()
|
||||
mock.Report.SummaryDetails.Controls = mockControlsSummary
|
||||
mock.ResourcesResult = mockResults
|
||||
mock.AllResources = mockAllResources
|
||||
return mock
|
||||
}
|
||||
|
||||
func WorkloadMockWithKind(kind string) workloadinterface.IMetadata {
|
||||
mock := workloadinterface.NewWorkloadMock(nil)
|
||||
mock.SetKind(kind)
|
||||
return mock
|
||||
}
|
||||
|
||||
func DeploymentWorkloadMock(replicas int) workloadinterface.IMetadata {
|
||||
var deploymentMock = fmt.Sprintf(`{"apiVersion":"apps/v1","kind":"Deployment","metadata":{"name":"privileged-deployment","labels":{"app":"nginx"}},"spec":{"replicas":%v,"selector":{"matchLabels":{"app":"nginx"}},"template":{"metadata":{"labels":{"app":"nginx"}},"spec":{"containers":[{"name":"nginx","image":"nginx:1.18.0","ports":[{"containerPort":80}],"securityContext":{"privileged":true}}]}}}}`, replicas)
|
||||
w, _ := workloadinterface.NewWorkload([]byte(deploymentMock))
|
||||
return w
|
||||
}
|
||||
|
||||
func ResourceAssociatedControlMock(controlID string, status apis.ScanningStatus) resourcesresults.ResourceAssociatedControl {
|
||||
return resourcesresults.ResourceAssociatedControl{
|
||||
ControlID: controlID,
|
||||
ResourceAssociatedRules: []resourcesresults.ResourceAssociatedRule{
|
||||
{Name: "Test", Status: status},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func TestResourcesPrioritizationHandler_PrioritizeResources(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
results map[string]resourcesresults.Result
|
||||
controls map[string]reportsummary.ControlSummary
|
||||
resources map[string]workloadinterface.IMetadata
|
||||
expectedScores map[string]float64
|
||||
expectedSeverity map[string]int
|
||||
expectedControlsInVector map[string][]string
|
||||
}{
|
||||
{
|
||||
name: "non-empty report",
|
||||
results: map[string]resourcesresults.Result{
|
||||
"resource1": {
|
||||
AssociatedControls: []resourcesresults.ResourceAssociatedControl{
|
||||
ResourceAssociatedControlMock("C-001", apis.StatusFailed),
|
||||
ResourceAssociatedControlMock("C-002", apis.StatusFailed),
|
||||
},
|
||||
},
|
||||
"resource2": {
|
||||
AssociatedControls: []resourcesresults.ResourceAssociatedControl{
|
||||
ResourceAssociatedControlMock("C-001", apis.StatusFailed),
|
||||
ResourceAssociatedControlMock("C-002", apis.StatusFailed),
|
||||
ResourceAssociatedControlMock("C-003", apis.StatusPassed),
|
||||
},
|
||||
},
|
||||
"resource3": {
|
||||
AssociatedControls: []resourcesresults.ResourceAssociatedControl{
|
||||
ResourceAssociatedControlMock("C-001", apis.StatusPassed),
|
||||
ResourceAssociatedControlMock("C-002", apis.StatusPassed),
|
||||
ResourceAssociatedControlMock("C-003", apis.StatusFailed),
|
||||
},
|
||||
},
|
||||
},
|
||||
controls: map[string]reportsummary.ControlSummary{
|
||||
"C-001": {
|
||||
ControlID: "C-001",
|
||||
ScoreFactor: 3,
|
||||
},
|
||||
"C-002": {
|
||||
ControlID: "C-002",
|
||||
ScoreFactor: 4,
|
||||
},
|
||||
"C-003": {
|
||||
ControlID: "C-003",
|
||||
ScoreFactor: 10,
|
||||
},
|
||||
},
|
||||
resources: map[string]workloadinterface.IMetadata{
|
||||
"resource1": DeploymentWorkloadMock(20),
|
||||
"resource2": DeploymentWorkloadMock(1),
|
||||
"resource3": DeploymentWorkloadMock(1),
|
||||
},
|
||||
expectedScores: map[string]float64{
|
||||
"resource1": float64(11),
|
||||
"resource2": float64(7.199999999999999),
|
||||
"resource3": float64(10.1),
|
||||
},
|
||||
expectedSeverity: map[string]int{
|
||||
"resource1": apis.SeverityMedium,
|
||||
"resource2": apis.SeverityMedium,
|
||||
"resource3": apis.SeverityCritical,
|
||||
},
|
||||
expectedControlsInVector: map[string][]string{
|
||||
"resource1": {"C-001", "C-002"},
|
||||
"resource2": {"C-001", "C-002"},
|
||||
"resource3": {"C-003"},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
handler := &ResourcesPrioritizationHandler{
|
||||
skipZeroScores: false,
|
||||
}
|
||||
sessionObj := OPASessionObjMock(tt.results, tt.controls, tt.resources)
|
||||
err := handler.PrioritizeResources(sessionObj)
|
||||
assert.NoError(t, err, "expected to have no errors in PrioritizeResources()")
|
||||
|
||||
assert.Equalf(t, len(tt.results), len(sessionObj.ResourcesPrioritized), "expected prioritized resources to be not empty")
|
||||
for rId, resource := range sessionObj.ResourcesPrioritized {
|
||||
expectedScore := tt.expectedScores[rId]
|
||||
assert.Equalf(t, expectedScore, resource.GetScore(), "expected score of resourceID '%s' to be '%v', got '%v'", rId, expectedScore, resource.GetScore())
|
||||
|
||||
expectedSeverity := tt.expectedSeverity[rId]
|
||||
assert.Equalf(t, expectedSeverity, resource.GetSeverity(), "expected severity of resourceID '%s' to be '%v', got '%v'", rId, expectedSeverity, resource.GetSeverity())
|
||||
|
||||
expectedControlIDs := tt.expectedControlsInVector[rId]
|
||||
assert.ElementsMatchf(t, expectedControlIDs, resource.ListControlsIDs(), "expected controls of resourceID '%s' to be '%v', got '%v'", rId, expectedControlIDs, resource.ListControlsIDs())
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestResourcesPrioritizationHandler_isSupportedKind(t *testing.T) {
|
||||
handler := &ResourcesPrioritizationHandler{}
|
||||
assert.True(t, handler.isSupportedKind(WorkloadMockWithKind("Deployment")))
|
||||
assert.True(t, handler.isSupportedKind(WorkloadMockWithKind("Pod")))
|
||||
assert.True(t, handler.isSupportedKind(WorkloadMockWithKind("Node")))
|
||||
assert.True(t, handler.isSupportedKind(WorkloadMockWithKind("DaemonSet")))
|
||||
assert.True(t, handler.isSupportedKind(WorkloadMockWithKind("StatefulSet")))
|
||||
assert.True(t, handler.isSupportedKind(WorkloadMockWithKind("Job")))
|
||||
assert.True(t, handler.isSupportedKind(WorkloadMockWithKind("CronJob")))
|
||||
assert.False(t, handler.isSupportedKind(nil))
|
||||
assert.False(t, handler.isSupportedKind(WorkloadMockWithKind("ConfigMap")))
|
||||
assert.False(t, handler.isSupportedKind(WorkloadMockWithKind("ServiceAccount")))
|
||||
}
|
||||
@@ -131,7 +131,7 @@ func buildResourceControlResult(resourceControl resourcesresults.ResourceAssocia
|
||||
ctlSeverity := apis.ControlSeverityToString(control.GetScoreFactor())
|
||||
ctlName := resourceControl.GetName()
|
||||
ctlURL := resourceControl.GetID()
|
||||
failedPaths := failedPathsToString(&resourceControl)
|
||||
failedPaths := append(failedPathsToString(&resourceControl), fixPathsToString(&resourceControl)...)
|
||||
|
||||
return ResourceControlResult{ctlSeverity, ctlName, ctlURL, failedPaths}
|
||||
}
|
||||
|
||||
@@ -4,7 +4,6 @@ import (
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"os"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
logger "github.com/kubescape/go-logger"
|
||||
@@ -12,8 +11,9 @@ import (
|
||||
"github.com/kubescape/k8s-interface/workloadinterface"
|
||||
"github.com/kubescape/kubescape/v2/core/cautils"
|
||||
"github.com/kubescape/kubescape/v2/core/pkg/resultshandling/printer"
|
||||
"github.com/kubescape/opa-utils/reporthandling/apis"
|
||||
"github.com/kubescape/opa-utils/reporthandling/results/v1/reportsummary"
|
||||
"github.com/kubescape/opa-utils/shared"
|
||||
"github.com/kubescape/opa-utils/reporthandling/results/v1/resourcesresults"
|
||||
)
|
||||
|
||||
/*
|
||||
@@ -55,6 +55,7 @@ type JUnitTestSuite struct {
|
||||
Skipped string `xml:"skipped,attr"` // The total number of skipped tests
|
||||
Time string `xml:"time,attr"` // Time taken (in seconds) to execute the tests in the suite
|
||||
Timestamp string `xml:"timestamp,attr"` // when the test was executed in ISO 8601 format (2014-01-21T16:17:18)
|
||||
File string `xml:"file,attr"` // The file be tested
|
||||
Properties []JUnitProperty `xml:"properties>property,omitempty"`
|
||||
TestCases []JUnitTestCase `xml:"testcase"`
|
||||
}
|
||||
@@ -88,6 +89,11 @@ type JUnitFailure struct {
|
||||
Contents string `xml:",chardata"`
|
||||
}
|
||||
|
||||
const (
|
||||
lineSeparator = "\n===================================================================================================================\n\n"
|
||||
testCaseTypeResources = "Resources"
|
||||
)
|
||||
|
||||
func NewJunitPrinter(verbose bool) *JunitPrinter {
|
||||
return &JunitPrinter{
|
||||
verbose: verbose,
|
||||
@@ -118,94 +124,119 @@ func (junitPrinter *JunitPrinter) ActionPrint(opaSessionObj *cautils.OPASessionO
|
||||
func testsSuites(results *cautils.OPASessionObj) *JUnitTestSuites {
|
||||
return &JUnitTestSuites{
|
||||
Suites: listTestsSuite(results),
|
||||
Tests: results.Report.SummaryDetails.NumberOfControls().All(),
|
||||
Tests: results.Report.SummaryDetails.NumberOfResources().All(),
|
||||
Name: "Kubescape Scanning",
|
||||
Failures: results.Report.SummaryDetails.NumberOfControls().Failed(),
|
||||
Failures: results.Report.SummaryDetails.NumberOfResources().Failed(),
|
||||
}
|
||||
}
|
||||
|
||||
// aggregate resources source to a list of resources results
|
||||
func sourceToResourcesResults(results *cautils.OPASessionObj) map[string][]resourcesresults.Result {
|
||||
resourceResults := make(map[string][]resourcesresults.Result)
|
||||
for i := range results.ResourceSource {
|
||||
if r, ok := results.ResourcesResult[i]; ok {
|
||||
if _, ok := resourceResults[results.ResourceSource[i].RelativePath]; !ok {
|
||||
resourceResults[results.ResourceSource[i].RelativePath] = []resourcesresults.Result{}
|
||||
}
|
||||
resourceResults[results.ResourceSource[i].RelativePath] = append(resourceResults[results.ResourceSource[i].RelativePath], r)
|
||||
}
|
||||
}
|
||||
return resourceResults
|
||||
}
|
||||
|
||||
// listTestsSuite returns a list of testsuites
|
||||
func listTestsSuite(results *cautils.OPASessionObj) []JUnitTestSuite {
|
||||
var testSuites []JUnitTestSuite
|
||||
|
||||
resourceResults := sourceToResourcesResults(results)
|
||||
counter := 0
|
||||
// control scan
|
||||
if len(results.Report.SummaryDetails.ListFrameworks()) == 0 {
|
||||
for path, resourcesResult := range resourceResults {
|
||||
testSuite := JUnitTestSuite{}
|
||||
testSuite.Failures = results.Report.SummaryDetails.NumberOfControls().Failed()
|
||||
testSuite.Timestamp = results.Report.ReportGenerationTime.String()
|
||||
testSuite.ID = 0
|
||||
testSuite.Name = "kubescape"
|
||||
testSuite.Properties = properties(results.Report.SummaryDetails.Score)
|
||||
testSuite.TestCases = testsCases(results, &results.Report.SummaryDetails.Controls, "Kubescape")
|
||||
testSuites = append(testSuites, testSuite)
|
||||
return testSuites
|
||||
}
|
||||
|
||||
for i, f := range results.Report.SummaryDetails.Frameworks {
|
||||
testSuite := JUnitTestSuite{}
|
||||
testSuite.Failures = f.NumberOfControls().Failed()
|
||||
testSuite.Timestamp = results.Report.ReportGenerationTime.String()
|
||||
testSuite.ID = i
|
||||
testSuite.Name = f.Name
|
||||
testSuite.Properties = properties(f.Score)
|
||||
testSuite.TestCases = testsCases(results, f.GetControls(), f.GetName())
|
||||
testSuites = append(testSuites, testSuite)
|
||||
testSuite.ID = counter
|
||||
counter++
|
||||
testSuite.File = path
|
||||
testSuite.TestCases = testsCases(results, resourcesResult)
|
||||
if len(testSuite.TestCases) > 0 {
|
||||
testSuites = append(testSuites, testSuite)
|
||||
}
|
||||
}
|
||||
|
||||
return testSuites
|
||||
}
|
||||
func testsCases(results *cautils.OPASessionObj, controls reportsummary.IControlsSummaries, classname string) []JUnitTestCase {
|
||||
var testCases []JUnitTestCase
|
||||
|
||||
for _, cID := range controls.ListControlsIDs().All() {
|
||||
testCase := JUnitTestCase{}
|
||||
control := results.Report.SummaryDetails.Controls.GetControl(reportsummary.EControlCriteriaID, cID)
|
||||
|
||||
testCase.Name = control.GetName()
|
||||
testCase.Classname = classname
|
||||
testCase.Status = string(control.GetStatus().Status())
|
||||
|
||||
if control.GetStatus().IsFailed() {
|
||||
resources := map[string]interface{}{}
|
||||
resourceIDs := control.ListResourcesIDs().Failed()
|
||||
for j := range resourceIDs {
|
||||
resource := results.AllResources[resourceIDs[j]]
|
||||
resources[resourceToString(resource)] = nil
|
||||
func failedControlsToFailureMessage(results *cautils.OPASessionObj, controls []resourcesresults.ResourceAssociatedControl, severityCounter []int) string {
|
||||
msg := ""
|
||||
for _, c := range controls {
|
||||
control := results.Report.SummaryDetails.Controls.GetControl(reportsummary.EControlCriteriaID, c.GetID())
|
||||
if c.GetStatus(nil).IsFailed() {
|
||||
msg += fmt.Sprintf("Test: %s\n", control.GetName())
|
||||
msg += fmt.Sprintf("Severity: %s\n", apis.ControlSeverityToString(control.GetScoreFactor()))
|
||||
msg += fmt.Sprintf("Remediation: %s\n", control.GetRemediation())
|
||||
msg += fmt.Sprintf("Link: %s\n", getControlLink(control.GetID()))
|
||||
if failedPaths := failedPathsToString(&c); len(failedPaths) > 0 {
|
||||
msg += fmt.Sprintf("Failed paths: \n - %s\n", strings.Join(failedPaths, "\n - "))
|
||||
}
|
||||
resourcesStr := shared.MapStringToSlice(resources)
|
||||
sort.Strings(resourcesStr)
|
||||
testCaseFailure := JUnitFailure{}
|
||||
testCaseFailure.Type = "Control"
|
||||
// testCaseFailure.Contents =
|
||||
testCaseFailure.Message = fmt.Sprintf("Remediation: %s\nMore details: %s\n\n%s", control.GetRemediation(), getControlLink(control.GetID()), strings.Join(resourcesStr, "\n"))
|
||||
|
||||
testCase.Failure = &testCaseFailure
|
||||
} else if control.GetStatus().IsSkipped() {
|
||||
testCase.SkipMessage = &JUnitSkipMessage{
|
||||
Message: "", // TODO - fill after statusInfo is supported
|
||||
if fixPaths := fixPathsToString(&c); len(fixPaths) > 0 {
|
||||
msg += fmt.Sprintf("Available fix: \n - %s\n", strings.Join(fixPaths, "\n - "))
|
||||
}
|
||||
msg += "\n"
|
||||
|
||||
severityCounter[apis.ControlSeverityToInt(control.GetScoreFactor())] += 1
|
||||
}
|
||||
}
|
||||
return msg
|
||||
}
|
||||
|
||||
// Every testCase includes a file (even if the file contains several resources)
|
||||
func testsCases(results *cautils.OPASessionObj, resourcesResult []resourcesresults.Result) []JUnitTestCase {
|
||||
var testCases []JUnitTestCase
|
||||
testCase := JUnitTestCase{}
|
||||
testCaseFailure := JUnitFailure{}
|
||||
testCaseFailure.Type = testCaseTypeResources
|
||||
message := ""
|
||||
|
||||
// severityCounter represents the severities, 0: Unknown, 1: Low, 2: Medium, 3: High, 4: Critical
|
||||
severityCounter := make([]int, apis.NumberOfSeverities, apis.NumberOfSeverities)
|
||||
|
||||
for i := range resourcesResult {
|
||||
message += lineSeparator
|
||||
if failedControls := failedControlsToFailureMessage(results, resourcesResult[i].ListControls(), severityCounter); failedControls != "" {
|
||||
message += fmt.Sprintf("Resource: %s\n\n%s", resourceNameToString(results.AllResources[resourcesResult[i].GetResourceID()]), failedControls)
|
||||
}
|
||||
}
|
||||
testCaseFailure.Message += fmt.Sprintf("%s\n%s", getSummaryMessage(severityCounter), message)
|
||||
|
||||
testCase.Failure = &testCaseFailure
|
||||
if testCase.Failure.Message != "" {
|
||||
testCases = append(testCases, testCase)
|
||||
}
|
||||
|
||||
return testCases
|
||||
}
|
||||
|
||||
func resourceToString(resource workloadinterface.IMetadata) string {
|
||||
sep := "; "
|
||||
s := ""
|
||||
s += fmt.Sprintf("apiVersion: %s", resource.GetApiVersion()) + sep
|
||||
s += fmt.Sprintf("kind: %s", resource.GetKind()) + sep
|
||||
if resource.GetNamespace() != "" {
|
||||
s += fmt.Sprintf("namespace: %s", resource.GetNamespace()) + sep
|
||||
func getSummaryMessage(severityCounter []int) string {
|
||||
total := 0
|
||||
severities := ""
|
||||
for i, count := range severityCounter {
|
||||
if apis.SeverityNumberToString(i) == apis.SeverityNumberToString(apis.SeverityUnknown) {
|
||||
continue
|
||||
}
|
||||
severities += fmt.Sprintf("%s: %d, ", apis.SeverityNumberToString(i), count)
|
||||
total += count
|
||||
}
|
||||
s += fmt.Sprintf("name: %s", resource.GetName())
|
||||
return s
|
||||
if len(severities) == 0 {
|
||||
return ""
|
||||
}
|
||||
return fmt.Sprintf("Total: %d (%s)", total, severities[:len(severities)-2])
|
||||
}
|
||||
|
||||
func properties(riskScore float32) []JUnitProperty {
|
||||
return []JUnitProperty{
|
||||
{
|
||||
Name: "riskScore",
|
||||
Value: fmt.Sprintf("%.2f", riskScore),
|
||||
},
|
||||
func resourceNameToString(resource workloadinterface.IMetadata) string {
|
||||
s := ""
|
||||
s += fmt.Sprintf("kind=%s/", resource.GetKind())
|
||||
if resource.GetNamespace() != "" {
|
||||
s += fmt.Sprintf("namespace=%s/", resource.GetNamespace())
|
||||
}
|
||||
s += fmt.Sprintf("name=%s", resource.GetName())
|
||||
return s
|
||||
}
|
||||
|
||||
@@ -261,7 +261,7 @@ func controlCountersForSummary(counters reportsummary.ICounters) string {
|
||||
}
|
||||
|
||||
func controlCountersForResource(l *helpersv1.AllLists) string {
|
||||
return fmt.Sprintf("Controls: %d (Failed: %d, Excluded: %d)", len(l.All()), len(l.Failed()), len(l.Excluded()))
|
||||
return fmt.Sprintf("Controls: %d (Failed: %d, Excluded: %d)", l.All().Len(), len(l.Failed()), len(l.Excluded()))
|
||||
}
|
||||
func getSeparator(sep string) string {
|
||||
s := ""
|
||||
|
||||
@@ -74,7 +74,7 @@ func generateResourceRows(controls []resourcesresults.ResourceAssociatedControl,
|
||||
}
|
||||
|
||||
row[resourceColumnURL] = fmt.Sprintf("https://hub.armosec.io/docs/%s", strings.ToLower(controls[i].GetID()))
|
||||
row[resourceColumnPath] = strings.Join(failedPathsToString(&controls[i]), "\n")
|
||||
row[resourceColumnPath] = strings.Join(append(failedPathsToString(&controls[i]), fixPathsToString(&controls[i])...), "\n")
|
||||
row[resourceColumnName] = controls[i].GetName()
|
||||
|
||||
if c := summaryDetails.Controls.GetControl(reportsummary.EControlCriteriaName, controls[i].GetName()); c != nil {
|
||||
@@ -120,6 +120,16 @@ func failedPathsToString(control *resourcesresults.ResourceAssociatedControl) []
|
||||
if p := control.ResourceAssociatedRules[j].Paths[k].FailedPath; p != "" {
|
||||
paths = append(paths, p)
|
||||
}
|
||||
}
|
||||
}
|
||||
return paths
|
||||
}
|
||||
|
||||
func fixPathsToString(control *resourcesresults.ResourceAssociatedControl) []string {
|
||||
var paths []string
|
||||
|
||||
for j := range control.ResourceAssociatedRules {
|
||||
for k := range control.ResourceAssociatedRules[j].Paths {
|
||||
if p := control.ResourceAssociatedRules[j].Paths[k].FixPath.Path; p != "" {
|
||||
v := control.ResourceAssociatedRules[j].Paths[k].FixPath.Value
|
||||
paths = append(paths, fmt.Sprintf("%s=%s", p, v))
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"github.com/kubescape/k8s-interface/workloadinterface"
|
||||
"github.com/kubescape/kubescape/v2/core/cautils"
|
||||
"github.com/kubescape/opa-utils/reporthandling"
|
||||
"github.com/kubescape/opa-utils/reporthandling/results/v1/prioritization"
|
||||
"github.com/kubescape/opa-utils/reporthandling/results/v1/reportsummary"
|
||||
"github.com/kubescape/opa-utils/reporthandling/results/v1/resourcesresults"
|
||||
reporthandlingv2 "github.com/kubescape/opa-utils/reporthandling/v2"
|
||||
@@ -24,16 +25,21 @@ func FinalizeResults(data *cautils.OPASessionObj) *reporthandlingv2.PostureRepor
|
||||
}
|
||||
|
||||
report.Results = make([]resourcesresults.Result, len(data.ResourcesResult))
|
||||
finalizeResults(report.Results, data.ResourcesResult)
|
||||
finalizeResults(report.Results, data.ResourcesResult, data.ResourcesPrioritized)
|
||||
|
||||
report.Resources = finalizeResources(report.Results, data.AllResources, data.ResourceSource)
|
||||
|
||||
return &report
|
||||
}
|
||||
func finalizeResults(results []resourcesresults.Result, resourcesResult map[string]resourcesresults.Result) {
|
||||
func finalizeResults(results []resourcesresults.Result, resourcesResult map[string]resourcesresults.Result, prioritizedResources map[string]prioritization.PrioritizedResource) {
|
||||
index := 0
|
||||
for resourceID := range resourcesResult {
|
||||
results[index] = resourcesResult[resourceID]
|
||||
|
||||
// Add prioritization information to the result
|
||||
if v, exist := prioritizedResources[resourceID]; exist {
|
||||
results[index].PrioritizedResource = &v
|
||||
}
|
||||
index++
|
||||
}
|
||||
}
|
||||
|
||||
@@ -14,6 +14,7 @@ import (
|
||||
"github.com/kubescape/kubescape/v2/core/cautils"
|
||||
"github.com/kubescape/kubescape/v2/core/cautils/getter"
|
||||
"github.com/kubescape/opa-utils/reporthandling"
|
||||
"github.com/kubescape/opa-utils/reporthandling/results/v1/prioritization"
|
||||
"github.com/kubescape/opa-utils/reporthandling/results/v1/resourcesresults"
|
||||
reporthandlingv2 "github.com/kubescape/opa-utils/reporthandling/v2"
|
||||
)
|
||||
@@ -124,17 +125,39 @@ func (report *ReportEventReceiver) sendResources(host string, opaSessionObj *cau
|
||||
|
||||
counter := 0
|
||||
reportCounter := 0
|
||||
if err := report.setResources(splittedPostureReport, opaSessionObj.AllResources, opaSessionObj.ResourceSource, &counter, &reportCounter, host); err != nil {
|
||||
|
||||
if err := report.setResources(splittedPostureReport, opaSessionObj.AllResources, opaSessionObj.ResourceSource, opaSessionObj.ResourcesResult, &counter, &reportCounter, host); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := report.setResults(splittedPostureReport, opaSessionObj.ResourcesResult, &counter, &reportCounter, host); err != nil {
|
||||
|
||||
if err := report.setResults(splittedPostureReport, opaSessionObj.ResourcesResult, opaSessionObj.AllResources, opaSessionObj.ResourceSource, opaSessionObj.ResourcesPrioritized, &counter, &reportCounter, host); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return report.sendReport(host, splittedPostureReport, reportCounter, true)
|
||||
}
|
||||
func (report *ReportEventReceiver) setResults(reportObj *reporthandlingv2.PostureReport, results map[string]resourcesresults.Result, counter, reportCounter *int, host string) error {
|
||||
|
||||
func (report *ReportEventReceiver) setResults(reportObj *reporthandlingv2.PostureReport, results map[string]resourcesresults.Result, allResources map[string]workloadinterface.IMetadata, resourcesSource map[string]reporthandling.Source, prioritizedResources map[string]prioritization.PrioritizedResource, counter, reportCounter *int, host string) error {
|
||||
for _, v := range results {
|
||||
/*
|
||||
|
||||
// set result.RawResource
|
||||
resourceID := v.GetResourceID()
|
||||
if _, ok := allResources[resourceID]; !ok {
|
||||
return fmt.Errorf("expected to find raw resource object for '%s'", resourceID)
|
||||
}
|
||||
resource := reporthandling.NewResourceIMetadata(allResources[resourceID])
|
||||
if r, ok := resourcesSource[resourceID]; ok {
|
||||
resource.SetSource(&r)
|
||||
}
|
||||
v.RawResource = resource
|
||||
|
||||
// set result.PrioritizedResource
|
||||
if resource, ok := prioritizedResources[resourceID]; ok {
|
||||
v.PrioritizedResource = &resource
|
||||
}
|
||||
*/
|
||||
|
||||
r, err := json.Marshal(v)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to unmarshal resource '%s', reason: %v", v.GetResourceID(), err)
|
||||
@@ -162,8 +185,17 @@ func (report *ReportEventReceiver) setResults(reportObj *reporthandlingv2.Postur
|
||||
return nil
|
||||
}
|
||||
|
||||
func (report *ReportEventReceiver) setResources(reportObj *reporthandlingv2.PostureReport, allResources map[string]workloadinterface.IMetadata, resourcesSource map[string]reporthandling.Source, counter, reportCounter *int, host string) error {
|
||||
func (report *ReportEventReceiver) setResources(reportObj *reporthandlingv2.PostureReport, allResources map[string]workloadinterface.IMetadata, resourcesSource map[string]reporthandling.Source, results map[string]resourcesresults.Result, counter, reportCounter *int, host string) error {
|
||||
for resourceID, v := range allResources {
|
||||
/*
|
||||
|
||||
// process only resources which have no result because these resources will be sent on the result object
|
||||
if _, hasResult := results[resourceID]; hasResult {
|
||||
continue
|
||||
}
|
||||
|
||||
*/
|
||||
|
||||
resource := reporthandling.NewResourceIMetadata(v)
|
||||
if r, ok := resourcesSource[resourceID]; ok {
|
||||
resource.SetSource(&r)
|
||||
|
||||
@@ -15,10 +15,10 @@
|
||||
</style>
|
||||
|
||||
<body style="text-align: center;">
|
||||
<img src="kubescape.png" alt="Kubescap logo" style="width:20%">
|
||||
<img src="kubescape.png" alt="Kubescape logo" style="width:20%">
|
||||
<iframe src="https://discordapp.com/widget?id=893048809884643379&theme=dark" width="350" height="500"
|
||||
allowtransparency="true" frameborder="0"
|
||||
sandbox="allow-popups allow-popups-to-escape-sandbox allow-same-origin allow-scripts"></iframe>
|
||||
</body>
|
||||
|
||||
</html>
|
||||
</html>
|
||||
|
||||
Binary file not shown.
|
Before Width: | Height: | Size: 53 KiB After Width: | Height: | Size: 59 KiB |
@@ -6,23 +6,23 @@ source #287
|
||||
|
||||
### Big picture
|
||||
|
||||
* Kubescape team is planning to create controls which take into account image vulnerabilities, example: looking for public internet facing workloads with critical vulnerabilities. These are seriously effecting the security health of a cluster and therefore we think it is important to cover it. We think that most container registries are/will support image scanning like Harbor and therefore the ability to get information from them is important.
|
||||
* There are information in the image repository which is important for existing controls as well. They are incomplete without it, example see this issue: Non-root containers check is broken #19 . These are not necessarily image vulnerability related. Can be information in the image manifest (like the issue before), but it can be the image BOM related.
|
||||
* Kubescape team is planning to create controls which take into account image vulnerabilities, example: looking for public internet facing workloads with critical vulnerabilities. These are seriously affecting the security health of a cluster and therefore we think it is important to cover it. We think that most container registries are/will support image scanning like Harbor and therefore, the ability to get information from them is important.
|
||||
* There is information in the image repository which is important for the existing controls as well. They are incomplete without it, example see this issue: Non-root containers check is broken #19 . These are not necessarily image vulnerability related. Can be information in the image manifest (like the issue before), but it can be the image BOM related.
|
||||
|
||||
### Relation to this proposal
|
||||
|
||||
There are multiple changes and design decisions needs to be made before Kubescape will support the before outlined controls. However, a focal point the whole picutre is the ability to access vulnerability databases of container images. We anticipate that most container image repositories will support image vulnerability scanning, some major players are already do. Since there is no a single API available which all of these data sources support it is important to create an adaption layer within Kubescape so different datasources can serve Kubescape's goals.
|
||||
Multiple changes and design decisions need to be made before Kubescape will support the above outlined controls. However, a focal point in the whole picture is the ability to access vulnerability of databases of container images. We anticipate that most container image repositories will support image vulnerability scanning, like some major players already do. Since there is no single API available which all of these data sources support, it is important to create an adaption layer within Kubescape so that different datasources can serve Kubescape's goals.
|
||||
|
||||
## High level design of Kubescape
|
||||
|
||||
### Layers
|
||||
|
||||
* Controls and Rules: that actual control logic implementation, the "tests" themselves. Implemented in rego
|
||||
* Controls and Rules: That actual control logic implementation, the "tests" themselves. Implemented in rego
|
||||
* OPA engine: the [OPA](https://github.com/open-policy-agent/opa) rego interpreter
|
||||
* Rules processor: Kubescape component, it enumerates and runs the controls while also preparing the all the input data that the controls need for running
|
||||
* Data sources: set of different modules providing data to the Rules processor so it can run the controls with them. Examples: Kubernetes objects, cloud vendor API objects and adding in this proposal the vulnerability infomration
|
||||
* Cloud Image Vulnerability adaption interface: the subject of this proposal, it gives a common interface for different registry/vulnerability vendors to adapt to.
|
||||
* CIV adaptors: specific implementation of the CIV interface, example Harbor adaption
|
||||
* Rules processor: Kubescape component, it enumerates and runs the controls while also preparing all the input data that the controls need for running
|
||||
* Data sources: Set of different modules providing data to the Rules processor so that it can run the controls with them. Examples: Kubernetes objects, cloud vendor API objects and adding the vulnerability information in this proposal
|
||||
* Cloud Image Vulnerability adaption interface: The subject of this proposal, it gives a common interface for different registry/vulnerability vendors to adapt to.
|
||||
* CIV adaptors: Specific implementation of the CIV interface, example Harbor adaption
|
||||
```
|
||||
-----------------------
|
||||
| Controls/Rules (rego) |
|
||||
@@ -62,7 +62,7 @@ The interface needs to cover the following functionalities:
|
||||
* Getting vulnerability information for a given image
|
||||
* Getting image information
|
||||
* Image manifests
|
||||
* Image BOMs (bill of material)
|
||||
* Image BOMs (Bill Of Material)
|
||||
|
||||
## Go API proposal
|
||||
|
||||
@@ -102,7 +102,7 @@ type ContainerImageInformation struct {
|
||||
|
||||
type IContainerImageVulnerabilityAdaptor interface {
|
||||
// Credentials are coming from user input (CLI or configuration file) and they are abstracted at string to string map level
|
||||
// so and example use would be like registry: "simpledockerregistry:80" and credentials like {"username":"joedoe","password":"abcd1234"}
|
||||
// so an example use would be like registry: "simpledockerregistry:80" and credentials like {"username":"joedoe","password":"abcd1234"}
|
||||
Login(registry string, credentials map[string]string) error
|
||||
|
||||
// For "help" purposes
|
||||
@@ -132,7 +132,7 @@ The objects received from the interface will be converted to an Imetadata compat
|
||||
"name": "nginx:latest"
|
||||
},
|
||||
"data": {
|
||||
// returned by the adaptor API (structure like our backend gives for an image
|
||||
// returned by the adaptor API (structure like our backend gives for an image)
|
||||
}
|
||||
}
|
||||
```
|
||||
@@ -168,9 +168,9 @@ The rego results will be a combination of the k8s artifact and the list of relev
|
||||
},
|
||||
"data": {
|
||||
|
||||
// returned by the adaptor API (structure like our backend gives for an image
|
||||
// returned by the adaptor API (structure like our backend gives for an image)
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
```
|
||||
|
||||
@@ -12,27 +12,35 @@ The features serve different stages of the workflow of the users:
|
||||
|
||||
The items in the Kubescape roadmap are split into 3 major groups based on the feature planning maturity:
|
||||
|
||||
* [Planning](#planning) - we have tickets open for these issues with a more or less clear vision of design.
|
||||
* [Backlog](#backlog) - features that were discussed at a high level but are not ready for development
|
||||
* [Wishlist](#wishlist) - features we are dreaming of in 😀 and want to push them gradually forward
|
||||
* [Planning](#planning-) - we have tickets open for these issues with a more or less clear vision of design.
|
||||
* [Backlog](#backlog-) - features that were discussed at a high level but are not ready for development
|
||||
* [Wishlist](#wishlist-) - features we are dreaming of in 😀 and want to push them gradually forward
|
||||
|
||||
|
||||
## Planning 👷
|
||||
|
||||
* ##### Storing scan results in cluster
|
||||
We want Kubescape scan results (both cluster and image scan) to be stored in the cluster locally as `CRD`s. This will enable easier integration with results by other projects as well as with scripting via `kubectl`. This will also make image scan based controls to avoid accessing external resources for image vulnerability scan results.
|
||||
|
||||
* ##### Vulnerability prioritization based on workload file activity
|
||||
Implementing an eBPF agent (based on Falco or Tracee) which tracks file activity in workloads to prioritize container image vulnerabilities.
|
||||
|
||||
* ##### Prioritization engine using MITRE Attack matrix based attack chains
|
||||
Create a security issue prioritization engine that scores resources based on control based attack chains. All Kubescape controls can be arranged into attack categories of the MITRE Attack matrix. The Attack matrix categories can be connected to each other based on a theoretical attack (ie. you can't have privilege escalation without initial access). Each of the Kubescape controls is to be categorized in these system and Kubescape will calculate a priority score based on the interconnections between failed controls.
|
||||
|
||||
* ##### Integration with image registries
|
||||
We want to expand Kubescape to integrate with different image registries and read image vulnerability information from there. This will allow Kubescape to give contextual security information about vulnerabilities. Container registry integration
|
||||
* ##### Kubescape as a microservice
|
||||
Create a REST API for Kubescape so it can constantly run in a cluster, and other components like Prometheus can scrape results.
|
||||
* ##### Kubescape CLI control over cluster operations
|
||||
Add functionality to Kubescape CLI to trigger operations in Kubescape cluster components (example: trigger image scans, etc.)
|
||||
* ##### Produce md/HTML reports
|
||||
Create scan reports for different output formats.
|
||||
* ##### Git integration for pull requests
|
||||
Create insightful GitHub actions for Kubescape
|
||||
|
||||
|
||||
## Backlog 📅
|
||||
* ##### JSON path for HELM charts
|
||||
Today, Kubescape can point to issues in the Kubernetes object. We want to develop this feature so Kubescape will be able to point to the misconfigured source file (HELM).
|
||||
* ##### Create Kubescape HELM plugin
|
||||
Producing scan results in the context of HELM
|
||||
* ##### Kubescape based admission controller
|
||||
Implement admission controller API for Kubescape microservice to enable users to use Kubescape rules as policies
|
||||
|
||||
@@ -40,7 +48,7 @@ The items in the Kubescape roadmap are split into 3 major groups based on the fe
|
||||
* ##### Integrate with other Kubernetes CLI tools
|
||||
Use Kubescape as a YAML validator for `kubectl` and others.
|
||||
* ##### Kubernetes audit log integration
|
||||
Connect Kubescape to audit log stream to enable it to produce more contextual security information based on how the API service is used.
|
||||
Connect Kubescape to the audit log stream to enable it to produce more contextual security information based on how the API service is used.
|
||||
* ##### TUI for Kubescape
|
||||
Interactive terminal based user interface which helps to analyze and fix issues
|
||||
* ##### Scanning images with GO for vulnerabilities
|
||||
|
||||
@@ -27,7 +27,7 @@ export cluster_name=$(echo "$cluster_arn" | awk -F'/' '{print $NF}')
|
||||
echo 'Get cluster region'
|
||||
export cluster_region=$(echo "$cluster_arn" | awk -F':' '{print $4}')
|
||||
|
||||
# First step, Create IAM OIDC provider for the cluster (Not required if the third step runs as is):
|
||||
# First step, Create IAM OIDC provider for the cluster (Not required if the third step runs as it is):
|
||||
echo 'Create IAM OIDC provider for the cluster'
|
||||
eksctl utils associate-iam-oidc-provider --cluster $cluster_name --approve
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
# Kubescape Exceptions
|
||||
|
||||
Kubescape Exceptions is the proper way of excluding failed resources from effecting the risk score.
|
||||
Kubescape Exceptions is the proper way of excluding failed resources from affecting the risk score.
|
||||
|
||||
e.g. When a `kube-system` resource fails and it is ok, simply add the resource to the exceptions configurations.
|
||||
|
||||
@@ -9,7 +9,7 @@ e.g. When a `kube-system` resource fails and it is ok, simply add the resource t
|
||||
|
||||
* `name`- Exception name - unique name representing the exception
|
||||
* `policyType`- Do not change
|
||||
* `actions`- List of available actions. Currently alertOnly is supported
|
||||
* `actions`- List of available actions. Currently, alertOnly is supported
|
||||
* `resources`- List of resources to apply this exception on
|
||||
* `designatorType: Attributes`- An attribute-based declaration {key: value}
|
||||
Supported keys:
|
||||
@@ -19,19 +19,19 @@ e.g. When a `kube-system` resource fails and it is ok, simply add the resource t
|
||||
* `cluster`: k8s cluster name (usually it is the `current-context`) (case-sensitive, regex supported)
|
||||
* resource labels as key value (case-sensitive, regex NOT supported)
|
||||
* `posturePolicies`- An attribute-based declaration {key: value}
|
||||
* `frameworkName` - Framework names can be find [here](https://github.com/armosec/regolibrary/tree/master/frameworks) (regex supported)
|
||||
* `controlName` - Control names can be find [here](https://github.com/armosec/regolibrary/tree/master/controls) (regex supported)
|
||||
* `controlID` - Control ID can be find [here](https://github.com/armosec/regolibrary/tree/master/controls) (regex supported)
|
||||
* `ruleName` - Rule names can be find [here](https://github.com/armosec/regolibrary/tree/master/rules) (regex supported)
|
||||
* `frameworkName` - Framework names can be found [here](https://github.com/armosec/regolibrary/tree/master/frameworks) (regex supported)
|
||||
* `controlName` - Control names can be found [here](https://github.com/armosec/regolibrary/tree/master/controls) (regex supported)
|
||||
* `controlID` - Control ID can be found [here](https://github.com/armosec/regolibrary/tree/master/controls) (regex supported)
|
||||
* `ruleName` - Rule names can be found [here](https://github.com/armosec/regolibrary/tree/master/rules) (regex supported)
|
||||
|
||||
You can find [here](https://github.com/kubescape/kubescape/tree/master/examples/exceptions) some examples of exceptions files
|
||||
|
||||
## Usage
|
||||
|
||||
The `resources` list and `posturePolicies` list are design to be a combination of the resources and policies to exclude
|
||||
The `resources` list and `posturePolicies` list are designed to be a combination of the resources and policies to exclude
|
||||
> You must declare at least one resource and one policy
|
||||
|
||||
e.g. If you wish to exclude all namespaces with the label `"environment": "dev"`, the resource list should look as following:
|
||||
e.g. If you wish to exclude all namespaces with the label `"environment": "dev"`, the resource list should look as follows:
|
||||
```
|
||||
"resources": [
|
||||
{
|
||||
@@ -44,7 +44,7 @@ e.g. If you wish to exclude all namespaces with the label `"environment": "dev"`
|
||||
]
|
||||
```
|
||||
|
||||
But if you wish to exclude all namespaces **OR** any resource with the label `"environment": "dev"`, the resource list should look as following:
|
||||
But if you wish to exclude all namespaces **OR** any resource with the label `"environment": "dev"`, the resource list should look as follows:
|
||||
```
|
||||
"resources": [
|
||||
{
|
||||
@@ -64,7 +64,7 @@ But if you wish to exclude all namespaces **OR** any resource with the label `"e
|
||||
|
||||
Same works with the `posturePolicies` list ->
|
||||
|
||||
e.g. If you wish to exclude the resources declared in the `resources` list that failed when scanning the `NSA` framework **AND** failed the `Allowed hostPath` control, the `posturePolicies` list should look as following:
|
||||
e.g. If you wish to exclude the resources declared in the `resources` list that failed when scanning the `NSA` framework **AND** failed the `Allowed hostPath` control, the `posturePolicies` list should look as follows:
|
||||
```
|
||||
"posturePolicies": [
|
||||
{
|
||||
@@ -74,7 +74,7 @@ e.g. If you wish to exclude the resources declared in the `resources` list that
|
||||
]
|
||||
```
|
||||
|
||||
But if you wish to exclude the resources declared in the `resources` list that failed when scanning the `NSA` framework **OR** failed the `Allowed hostPath` control, the `posturePolicies` list should look as following:
|
||||
But if you wish to exclude the resources declared in the `resources` list that failed when scanning the `NSA` framework **OR** failed the `Allowed hostPath` control, the `posturePolicies` list should look as follows:
|
||||
```
|
||||
"posturePolicies": [
|
||||
{
|
||||
@@ -177,4 +177,4 @@ The resources
|
||||
]
|
||||
}
|
||||
]
|
||||
```
|
||||
```
|
||||
|
||||
10
go.mod
10
go.mod
@@ -14,9 +14,9 @@ require (
|
||||
github.com/go-git/go-git/v5 v5.4.2
|
||||
github.com/google/uuid v1.3.0
|
||||
github.com/johnfercher/maroto v0.37.0
|
||||
github.com/kubescape/go-logger v0.0.5
|
||||
github.com/kubescape/k8s-interface v0.0.82
|
||||
github.com/kubescape/opa-utils v0.0.172
|
||||
github.com/kubescape/go-logger v0.0.6
|
||||
github.com/kubescape/k8s-interface v0.0.83
|
||||
github.com/kubescape/opa-utils v0.0.186
|
||||
github.com/kubescape/rbac-utils v0.0.17
|
||||
github.com/libgit2/git2go/v33 v33.0.9
|
||||
github.com/mattn/go-isatty v0.0.14
|
||||
@@ -141,9 +141,9 @@ require (
|
||||
go.uber.org/multierr v1.6.0 // indirect
|
||||
go.uber.org/zap v1.22.0 // indirect
|
||||
golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e // indirect
|
||||
golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e // indirect
|
||||
golang.org/x/net v0.0.0-20220722155237-a158d28d115b // indirect
|
||||
golang.org/x/oauth2 v0.0.0-20220630143837-2104d58473e0 // indirect
|
||||
golang.org/x/sys v0.0.0-20220610221304-9f5ed59c137d // indirect
|
||||
golang.org/x/sys v0.0.0-20220829200755-d48e67d00261 // indirect
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 // indirect
|
||||
golang.org/x/text v0.3.7 // indirect
|
||||
golang.org/x/time v0.0.0-20220210224613-90d013bbcef8 // indirect
|
||||
|
||||
19
go.sum
19
go.sum
@@ -835,12 +835,12 @@ github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/kubescape/go-logger v0.0.5 h1:lA25XvI4LT4JqwnNkIDUtwdYS4KVmnbWrVB7SRte2Es=
|
||||
github.com/kubescape/go-logger v0.0.5/go.mod h1:DnVWEvC90LFY1nNMaNo6nBVOcqkLMK3S0qzXP1fzRvI=
|
||||
github.com/kubescape/k8s-interface v0.0.82 h1:Gu09lQDG+Mw81OxA9MK1BSaqxV/FBRaMYX9jzVqOzzw=
|
||||
github.com/kubescape/k8s-interface v0.0.82/go.mod h1:ihX96yqar+xogHl45mFE8zT9DLI06iy7XQPAP+j5KJE=
|
||||
github.com/kubescape/opa-utils v0.0.172 h1:9BPN9phA9zQxhlYS+Nf4nQ0Y/gRRHfsKjMJnIwTLLLE=
|
||||
github.com/kubescape/opa-utils v0.0.172/go.mod h1:aU4s1y+BWWxNwYw447TmHmNaKHxOWtYsFhZe5GyvOuE=
|
||||
github.com/kubescape/go-logger v0.0.6 h1:ynhAmwrz0O7Jtqq1CdmCZUrKveji25hVP+B/FAb3QrA=
|
||||
github.com/kubescape/go-logger v0.0.6/go.mod h1:DnVWEvC90LFY1nNMaNo6nBVOcqkLMK3S0qzXP1fzRvI=
|
||||
github.com/kubescape/k8s-interface v0.0.83 h1:yQ1kWNZmKfBim/+NmxpPI/j7L9ASDq2h3mCNdmYgzqY=
|
||||
github.com/kubescape/k8s-interface v0.0.83/go.mod h1:ihX96yqar+xogHl45mFE8zT9DLI06iy7XQPAP+j5KJE=
|
||||
github.com/kubescape/opa-utils v0.0.186 h1:3T0lD3x1/hweXY/HeNM2I4h8Ugh9SacXPo1AaOPBrAs=
|
||||
github.com/kubescape/opa-utils v0.0.186/go.mod h1:jC5QrhS6WiFj/tXP2/YSDBnFlGsYUQokDGbKwMBgMpw=
|
||||
github.com/kubescape/rbac-utils v0.0.17 h1:B78kjlTKqjYK/PXwmi4GPysHsFxIwVz1KFb4+IGT29w=
|
||||
github.com/kubescape/rbac-utils v0.0.17/go.mod h1:pBwjpcrVeuH/no+DiCZWvlhYtCDzd3U0o/hEZKi+eM8=
|
||||
github.com/linuxkit/virtsock v0.0.0-20201010232012-f8cee7dfc7a3/go.mod h1:3r6x7q95whyfWQpmGZTu3gk3v2YkMi05HEzl7Tf7YEo=
|
||||
@@ -1458,8 +1458,8 @@ golang.org/x/net v0.0.0-20220325170049-de3da57026de/go.mod h1:CfG3xpIq0wQ8r1q4Su
|
||||
golang.org/x/net v0.0.0-20220412020605-290c469a71a5/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
||||
golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
||||
golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||
golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e h1:TsQ7F31D3bUCLeqPT0u+yjp1guoArKaNKmCr22PYgTQ=
|
||||
golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||
golang.org/x/net v0.0.0-20220722155237-a158d28d115b h1:PxfKdU9lEEDYjdIzOtC4qFWgkU2rGHdKlKowJSMN9h0=
|
||||
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
@@ -1623,8 +1623,9 @@ golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBc
|
||||
golang.org/x/sys v0.0.0-20220502124256-b6088ccd6cba/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220610221304-9f5ed59c137d h1:Zu/JngovGLVi6t2J3nmAf3AoTDwuzw85YZ3b9o4yU7s=
|
||||
golang.org/x/sys v0.0.0-20220610221304-9f5ed59c137d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220829200755-d48e67d00261 h1:v6hYoSR9T5oet+pMXwUWkbiVqx/63mlHjefrHmxwfeY=
|
||||
golang.org/x/sys v0.0.0-20220829200755-d48e67d00261/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
|
||||
@@ -155,7 +155,7 @@ curl --header "Content-Type: application/json" \
|
||||
|
||||
#### Data profiling
|
||||
Analyze profiled data using [pprof](https://github.com/google/pprof/blob/main/doc/README.md).
|
||||
[How ro use](https://pkg.go.dev/net/http/pprof)
|
||||
[How to use](https://pkg.go.dev/net/http/pprof)
|
||||
|
||||
example:
|
||||
```bash
|
||||
|
||||
@@ -5,14 +5,14 @@ go 1.18
|
||||
replace github.com/kubescape/kubescape/v2 => ../
|
||||
|
||||
require (
|
||||
github.com/kubescape/kubescape/v2 v2.0.0-00010101000000-000000000000
|
||||
github.com/armosec/utils-go v0.0.12
|
||||
github.com/go-openapi/runtime v0.24.1
|
||||
github.com/google/uuid v1.3.0
|
||||
github.com/gorilla/mux v1.8.0
|
||||
github.com/gorilla/schema v1.2.0
|
||||
github.com/kubescape/go-logger v0.0.5
|
||||
github.com/kubescape/opa-utils v0.0.172
|
||||
github.com/kubescape/go-logger v0.0.6
|
||||
github.com/kubescape/kubescape/v2 v2.0.0-00010101000000-000000000000
|
||||
github.com/kubescape/opa-utils v0.0.186
|
||||
github.com/stretchr/testify v1.8.0
|
||||
k8s.io/utils v0.0.0-20220706174534-f6158b442e7c
|
||||
)
|
||||
@@ -108,7 +108,7 @@ require (
|
||||
github.com/json-iterator/go v1.1.12 // indirect
|
||||
github.com/jung-kurt/gofpdf v1.16.2 // indirect
|
||||
github.com/kevinburke/ssh_config v0.0.0-20201106050909-4977a11b4351 // indirect
|
||||
github.com/kubescape/k8s-interface v0.0.82 // indirect
|
||||
github.com/kubescape/k8s-interface v0.0.83 // indirect
|
||||
github.com/kubescape/rbac-utils v0.0.17 // indirect
|
||||
github.com/libgit2/git2go/v33 v33.0.9 // indirect
|
||||
github.com/mailru/easyjson v0.7.7 // indirect
|
||||
@@ -150,9 +150,9 @@ require (
|
||||
go.uber.org/zap v1.22.0 // indirect
|
||||
golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e // indirect
|
||||
golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3 // indirect
|
||||
golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e // indirect
|
||||
golang.org/x/net v0.0.0-20220722155237-a158d28d115b // indirect
|
||||
golang.org/x/oauth2 v0.0.0-20220630143837-2104d58473e0 // indirect
|
||||
golang.org/x/sys v0.0.0-20220610221304-9f5ed59c137d // indirect
|
||||
golang.org/x/sys v0.0.0-20220829200755-d48e67d00261 // indirect
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 // indirect
|
||||
golang.org/x/text v0.3.7 // indirect
|
||||
golang.org/x/time v0.0.0-20220210224613-90d013bbcef8 // indirect
|
||||
|
||||
@@ -891,12 +891,12 @@ github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/kubescape/go-logger v0.0.5 h1:lA25XvI4LT4JqwnNkIDUtwdYS4KVmnbWrVB7SRte2Es=
|
||||
github.com/kubescape/go-logger v0.0.5/go.mod h1:DnVWEvC90LFY1nNMaNo6nBVOcqkLMK3S0qzXP1fzRvI=
|
||||
github.com/kubescape/k8s-interface v0.0.82 h1:Gu09lQDG+Mw81OxA9MK1BSaqxV/FBRaMYX9jzVqOzzw=
|
||||
github.com/kubescape/k8s-interface v0.0.82/go.mod h1:ihX96yqar+xogHl45mFE8zT9DLI06iy7XQPAP+j5KJE=
|
||||
github.com/kubescape/opa-utils v0.0.172 h1:9BPN9phA9zQxhlYS+Nf4nQ0Y/gRRHfsKjMJnIwTLLLE=
|
||||
github.com/kubescape/opa-utils v0.0.172/go.mod h1:aU4s1y+BWWxNwYw447TmHmNaKHxOWtYsFhZe5GyvOuE=
|
||||
github.com/kubescape/go-logger v0.0.6 h1:ynhAmwrz0O7Jtqq1CdmCZUrKveji25hVP+B/FAb3QrA=
|
||||
github.com/kubescape/go-logger v0.0.6/go.mod h1:DnVWEvC90LFY1nNMaNo6nBVOcqkLMK3S0qzXP1fzRvI=
|
||||
github.com/kubescape/k8s-interface v0.0.83 h1:yQ1kWNZmKfBim/+NmxpPI/j7L9ASDq2h3mCNdmYgzqY=
|
||||
github.com/kubescape/k8s-interface v0.0.83/go.mod h1:ihX96yqar+xogHl45mFE8zT9DLI06iy7XQPAP+j5KJE=
|
||||
github.com/kubescape/opa-utils v0.0.186 h1:3T0lD3x1/hweXY/HeNM2I4h8Ugh9SacXPo1AaOPBrAs=
|
||||
github.com/kubescape/opa-utils v0.0.186/go.mod h1:jC5QrhS6WiFj/tXP2/YSDBnFlGsYUQokDGbKwMBgMpw=
|
||||
github.com/kubescape/rbac-utils v0.0.17 h1:B78kjlTKqjYK/PXwmi4GPysHsFxIwVz1KFb4+IGT29w=
|
||||
github.com/kubescape/rbac-utils v0.0.17/go.mod h1:pBwjpcrVeuH/no+DiCZWvlhYtCDzd3U0o/hEZKi+eM8=
|
||||
github.com/linuxkit/virtsock v0.0.0-20201010232012-f8cee7dfc7a3/go.mod h1:3r6x7q95whyfWQpmGZTu3gk3v2YkMi05HEzl7Tf7YEo=
|
||||
@@ -1540,8 +1540,8 @@ golang.org/x/net v0.0.0-20220325170049-de3da57026de/go.mod h1:CfG3xpIq0wQ8r1q4Su
|
||||
golang.org/x/net v0.0.0-20220412020605-290c469a71a5/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
||||
golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
||||
golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||
golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e h1:TsQ7F31D3bUCLeqPT0u+yjp1guoArKaNKmCr22PYgTQ=
|
||||
golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||
golang.org/x/net v0.0.0-20220722155237-a158d28d115b h1:PxfKdU9lEEDYjdIzOtC4qFWgkU2rGHdKlKowJSMN9h0=
|
||||
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
@@ -1710,8 +1710,9 @@ golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBc
|
||||
golang.org/x/sys v0.0.0-20220502124256-b6088ccd6cba/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220610221304-9f5ed59c137d h1:Zu/JngovGLVi6t2J3nmAf3AoTDwuzw85YZ3b9o4yU7s=
|
||||
golang.org/x/sys v0.0.0-20220610221304-9f5ed59c137d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220829200755-d48e67d00261 h1:v6hYoSR9T5oet+pMXwUWkbiVqx/63mlHjefrHmxwfeY=
|
||||
golang.org/x/sys v0.0.0-20220829200755-d48e67d00261/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
|
||||
@@ -19,7 +19,7 @@ const (
|
||||
scanPath = "/v1/scan"
|
||||
statusPath = "/v1/status"
|
||||
resultsPath = "/v1/results"
|
||||
prometheusMmeticsPath = "/v1/metrics"
|
||||
prometheusMetricsPath = "/v1/metrics"
|
||||
livePath = "/livez"
|
||||
readyPath = "/readyz"
|
||||
)
|
||||
@@ -45,7 +45,7 @@ func SetupHTTPListener() error {
|
||||
// listen
|
||||
httpHandler := handlerequestsv1.NewHTTPHandler()
|
||||
|
||||
rtr.HandleFunc(prometheusMmeticsPath, httpHandler.Metrics)
|
||||
rtr.HandleFunc(prometheusMetricsPath, httpHandler.Metrics)
|
||||
rtr.HandleFunc(scanPath, httpHandler.Scan)
|
||||
rtr.HandleFunc(statusPath, httpHandler.Status)
|
||||
rtr.HandleFunc(resultsPath, httpHandler.Results)
|
||||
@@ -75,7 +75,7 @@ func loadTLSKey(certFile, keyFile string) (*tls.Certificate, error) {
|
||||
|
||||
pair, err := tls.LoadX509KeyPair(certFile, keyFile)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("filed to load key pair: %v", err)
|
||||
return nil, fmt.Errorf("failed to load key pair: %v", err)
|
||||
}
|
||||
return &pair, nil
|
||||
}
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
<html>
|
||||
<head>
|
||||
<title>
|
||||
Kubscape Website
|
||||
Kubescape Website
|
||||
</title>
|
||||
<h1>Kubscape Website</h1>
|
||||
<h1>Kubescape Website</h1>
|
||||
</head>
|
||||
<body>
|
||||
<h2>
|
||||
@@ -11,4 +11,4 @@
|
||||
<iframe src="https://discordapp.com/widget?id=893048809884643379&theme=dark" width="350" height="500" allowtransparency="true" frameborder="0" sandbox="allow-popups allow-popups-to-escape-sandbox allow-same-origin allow-scripts"></iframe>
|
||||
</h2>
|
||||
</body>
|
||||
</html>
|
||||
</html>
|
||||
|
||||
Reference in New Issue
Block a user