diff --git a/.github/workflows/01-golang-lint.yaml b/.github/workflows/01-golang-lint.yaml
new file mode 100644
index 00000000..89af89d5
--- /dev/null
+++ b/.github/workflows/01-golang-lint.yaml
@@ -0,0 +1,54 @@
+name: golangci-lint
+on:
+ push:
+ branches:
+ - dev
+ pull_request:
+ types: [ edited, opened, synchronize, reopened ]
+ branches: [ master, dev ]
+ paths-ignore:
+ - '**.yaml'
+ - '**.md'
+permissions:
+ contents: read
+ # Optional: allow read access to pull request. Use with `only-new-issues` option.
+ pull-requests: read
+jobs:
+ golangci:
+ name: lint
+ runs-on: ubuntu-20.04
+ steps:
+ - uses: actions/setup-go@v3
+ with:
+ go-version: 1.18
+ - uses: actions/checkout@v3
+ with:
+ submodules: recursive
+ - name: Install libgit2
+ run: make libgit2
+ - name: golangci-lint
+ uses: golangci/golangci-lint-action@v3
+ with:
+ # Optional: version of golangci-lint to use in form of v1.2 or v1.2.3 or `latest` to use the latest version
+ version: latest
+
+ # Optional: working directory, useful for monorepos
+ # working-directory: somedir
+
+ # Optional: golangci-lint command line arguments.
+ # args: --issues-exit-code=0
+ args: --timeout 10m --build-tags=static
+ #--new-from-rev dev
+
+ # Optional: show only new issues if it's a pull request. The default value is `false`.
+ only-new-issues: true
+
+ # Optional: if set to true then the all caching functionality will be complete disabled,
+ # takes precedence over all other caching options.
+ # skip-cache: true
+
+ # Optional: if set to true then the action don't cache or restore ~/go/pkg.
+ # skip-pkg-cache: true
+
+ # Optional: if set to true then the action don't cache or restore ~/.cache/go-build.
+ # skip-build-cache: true
diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml
index b3e0ec05..9866022c 100644
--- a/.github/workflows/build.yaml
+++ b/.github/workflows/build.yaml
@@ -37,7 +37,7 @@ jobs:
- name: Set up Go
uses: actions/setup-go@v3
with:
- go-version: 1.18
+ go-version: 1.19
- name: Install MSYS2 & libgit2 (Windows)
shell: cmd
diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml
index ee3cc92b..76d1e74a 100644
--- a/.github/workflows/release.yaml
+++ b/.github/workflows/release.yaml
@@ -38,4 +38,4 @@ jobs:
release_name: ${{ inputs.release_name }}
draft: ${{ inputs.draft }}
prerelease: false
-
\ No newline at end of file
+
diff --git a/.github/workflows/test.yaml b/.github/workflows/test.yaml
index 2d17c3a4..9129adea 100644
--- a/.github/workflows/test.yaml
+++ b/.github/workflows/test.yaml
@@ -61,7 +61,7 @@ jobs:
- name: Set up Go
uses: actions/setup-go@v3
with:
- go-version: 1.18
+ go-version: 1.19
- name: Install MSYS2 & libgit2 (Windows)
shell: cmd
@@ -73,10 +73,10 @@ jobs:
if: matrix.os != 'windows-latest'
- name: Test core pkg
- run: go test -tags=static -v ./...
+ run: go test "-tags=static,gitenabled" -v ./...
- name: Test httphandler pkg
- run: cd httphandler && go test -tags=static -v ./...
+ run: cd httphandler && go test "-tags=static,gitenabled" -v ./...
- name: Build
env:
diff --git a/.gitignore b/.gitignore
index 676bc584..827afa23 100644
--- a/.gitignore
+++ b/.gitignore
@@ -5,4 +5,5 @@
*.pyc*
.idea
.history
-ca.srl
\ No newline at end of file
+ca.srl
+*.out
diff --git a/.golangci.yml b/.golangci.yml
new file mode 100644
index 00000000..7503c25a
--- /dev/null
+++ b/.golangci.yml
@@ -0,0 +1,57 @@
+linters-settings:
+ govet:
+ check-shadowing: true
+ dupl:
+ threshold: 200
+ goconst:
+ min-len: 3
+ min-occurrences: 2
+ gocognit:
+ min-complexity: 65
+
+linters:
+ enable:
+ - gosec
+ - staticcheck
+ - nolintlint
+ - gofmt
+ - unused
+ - govet
+ - bodyclose
+ - typecheck
+ - goimports
+ - ineffassign
+ - gosimple
+ disable:
+ # temporarily disabled
+ - varcheck
+ - errcheck
+ - dupl
+ - gocritic
+ - gocognit
+ - nakedret
+ - revive
+ - stylecheck
+ - unconvert
+ - unparam
+ #- forbidigo # <- see later
+ # should remain disabled
+ - deadcode # deprecated linter
+ - maligned
+ - lll
+ - gochecknoinits
+ - gochecknoglobals
+issues:
+ exclude-rules:
+ - linters:
+ - revive
+ text: "var-naming"
+ - linters:
+ - revive
+ text: "type name will be used as (.+?) by other packages, and that stutters"
+ - linters:
+ - stylecheck
+ text: "ST1003"
+run:
+ skip-dirs:
+ - git2go
diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md
index 2dfc05ba..2b6c96e7 100644
--- a/CODE_OF_CONDUCT.md
+++ b/CODE_OF_CONDUCT.md
@@ -1,127 +1,3 @@
-# Contributor Covenant Code of Conduct
+## Code of Conduct
-## Our Pledge
-
-We as members, contributors, and leaders pledge to make participation in our
-community a harassment-free experience for everyone, regardless of age, body
-size, visible or invisible disability, ethnicity, sex characteristics, gender
-identity and expression, level of experience, education, socio-economic status,
-nationality, personal appearance, race, religion, or sexual identity
-and orientation.
-
-We pledge to act and interact in ways that contribute to an open, welcoming,
-diverse, inclusive, and healthy community.
-
-## Our Standards
-
-Examples of behavior that contributes to a positive environment for our
-community include:
-
-* Demonstrating empathy and kindness toward other people
-* Being respectful of differing opinions, viewpoints, and experiences
-* Giving and gracefully accepting constructive feedback
-* Accepting responsibility and apologizing to those affected by our mistakes,
- and learning from the experience
-* Focusing on what is best not just for us as individuals, but for the
- overall community
-
-Examples of unacceptable behavior include:
-
-* The use of sexualized language or imagery, and sexual attention or
- advances of any kind
-* Trolling, insulting or derogatory comments, and personal or political attacks
-* Public or private harassment
-* Publishing others' private information, such as a physical or email
- address, without their explicit permission
-* Other conduct which could reasonably be considered inappropriate in a
- professional setting
-
-## Enforcement Responsibilities
-
-Community leaders are responsible for clarifying and enforcing our standards of
-acceptable behavior and will take appropriate and fair corrective action in
-response to any behavior that they deem inappropriate, threatening, offensive,
-or harmful.
-
-Community leaders have the right and responsibility to remove, edit, or reject
-comments, commits, code, wiki edits, issues, and other contributions that are
-not aligned to this Code of Conduct, and will communicate reasons for moderation
-decisions when appropriate.
-
-## Scope
-
-This Code of Conduct applies within all community spaces, and also applies when
-an individual is officially representing the community in public spaces.
-Examples of representing our community include using an official e-mail address,
-posting via an official social media account, or acting as an appointed
-representative at an online or offline event.
-
-## Enforcement
-
-Instances of abusive, harassing, or otherwise unacceptable behavior may be
-reported to the community leaders responsible for enforcement [here](mailto:ben@armosec.io).
-All complaints will be reviewed and investigated promptly and fairly.
-
-All community leaders are obligated to respect the privacy and security of the
-reporter of any incident.
-
-## Enforcement Guidelines
-
-Community leaders will follow these Community Impact Guidelines in determining
-the consequences for any action they deem in violation of this Code of Conduct:
-
-### 1. Correction
-
-**Community Impact**: Use of inappropriate language or other behavior deemed
-unprofessional or unwelcome in the community.
-
-**Consequence**: A private, written warning from community leaders, providing
-clarity around the nature of the violation and an explanation of why the
-behavior was inappropriate. A public apology may be requested.
-
-### 2. Warning
-
-**Community Impact**: A violation through a single incident or series
-of actions.
-
-**Consequence**: A warning with consequences for continued behavior. No
-interaction with the people involved, including unsolicited interaction with
-those enforcing the Code of Conduct, for a specified period of time. This
-includes avoiding interactions in community spaces as well as external channels
-like social media. Violating these terms may lead to a temporary or
-permanent ban.
-
-### 3. Temporary Ban
-
-**Community Impact**: A serious violation of community standards, including
-sustained inappropriate behavior.
-
-**Consequence**: A temporary ban from any sort of interaction or public
-communication with the community for a specified period of time. No public or
-private interaction with the people involved, including unsolicited interaction
-with those enforcing the Code of Conduct, is allowed during this period.
-Violating these terms may lead to a permanent ban.
-
-### 4. Permanent Ban
-
-**Community Impact**: Demonstrating a pattern of violation of community
-standards, including sustained inappropriate behavior, harassment of an
-individual, or aggression toward or disparagement of classes of individuals.
-
-**Consequence**: A permanent ban from any sort of public interaction within
-the community.
-
-## Attribution
-
-This Code of Conduct is adapted from the [Contributor Covenant][homepage],
-version 2.0, available at
-https://www.contributor-covenant.org/version/2/0/code_of_conduct.html.
-
-Community Impact Guidelines were inspired by [Mozilla's code of conduct
-enforcement ladder](https://github.com/mozilla/diversity).
-
-[homepage]: https://www.contributor-covenant.org
-
-For answers to common questions about this code of conduct, see the FAQ at
-https://www.contributor-covenant.org/faq. Translations are available at
-https://www.contributor-covenant.org/translations.
+The Kubescape project follows the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/master/code-of-conduct.md).
diff --git a/MAINTAINERS.md b/MAINTAINERS.md
index 9b21e6d8..54a679da 100644
--- a/MAINTAINERS.md
+++ b/MAINTAINERS.md
@@ -1,10 +1,11 @@
# Maintainers
-The following table lists Kubescape project maintainers
+The following table lists the Kubescape project maintainers:
-| Name | GitHub | Email | Organization | Role | Added/Renewed On |
-| --- | --- | --- | --- | --- | --- |
-| [Ben Hirschberg](https://www.linkedin.com/in/benyamin-ben-hirschberg-66141890) | [@slashben](https://github.com/slashben) | ben@armosec.io | [ARMO](https://www.armosec.io/) | VP R&D | 2021-09-01 |
-| [Rotem Refael](https://www.linkedin.com/in/rotem-refael) | [@rotemamsa](https://github.com/rotemamsa) | rrefael@armosec.io | [ARMO](https://www.armosec.io/) | Team Leader | 2021-10-11 |
-| [David Wertenteil](https://www.linkedin.com/in/david-wertenteil-0ba277b9) | [@dwertent](https://github.com/dwertent) | dwertent@armosec.io | [ARMO](https://www.armosec.io/) | Kubescape CLI Developer | 2021-09-01 |
-| [Bezalel Brandwine](https://www.linkedin.com/in/bezalel-brandwine) | [@Bezbran](https://github.com/Bezbran) | bbrandwine@armosec.io | [ARMO](https://www.armosec.io/) | Kubescape SaaS Developer | 2021-09-01 |
+| Name | GitHub | Organization | Added/Renewed On |
+| --- | --- | --- | --- |
+| [Ben Hirschberg](https://www.linkedin.com/in/benyamin-ben-hirschberg-66141890) | [@slashben](https://github.com/slashben) | [ARMO](https://www.armosec.io/) | 2021-09-01 |
+| [Rotem Refael](https://www.linkedin.com/in/rotem-refael) | [@rotemamsa](https://github.com/rotemamsa) | [ARMO](https://www.armosec.io/) | 2021-10-11 |
+| [David Wertenteil](https://www.linkedin.com/in/david-wertenteil-0ba277b9) | [@dwertent](https://github.com/dwertent) | [ARMO](https://www.armosec.io/) | 2021-09-01 |
+| [Bezalel Brandwine](https://www.linkedin.com/in/bezalel-brandwine) | [@Bezbran](https://github.com/Bezbran) | [ARMO](https://www.armosec.io/) | 2021-09-01 |
+| [Craig Box](https://www.linkedin.com/in/crbnz/) | [@craigbox](https://github.com/craigbox) | [ARMO](https://www.armosec.io/) | 2022-10-31 |
diff --git a/Makefile b/Makefile
index f45b5953..4801e37d 100644
--- a/Makefile
+++ b/Makefile
@@ -11,7 +11,7 @@ libgit2:
cd git2go; make install-static
# go build tags
-TAGS = "static"
+TAGS = "gitenabled,static"
build:
go build -v -tags=$(TAGS) .
diff --git a/README.md b/README.md
index e7f87d89..b3c2e68b 100644
--- a/README.md
+++ b/README.md
@@ -11,11 +11,11 @@
:sunglasses: [Want to contribute?](#being-a-part-of-the-team) :innocent:
-Kubescape is a K8s open-source tool providing a Kubernetes single pane of glass, including risk analysis, security compliance, RBAC visualizer, and image vulnerability scanning.
-Kubescape scans K8s clusters, YAML files, and HELM charts, detecting misconfigurations according to multiple frameworks (such as the [NSA-CISA](https://www.armosec.io/blog/kubernetes-hardening-guidance-summary-by-armo/?utm_source=github&utm_medium=repository), [MITRE ATT&CK®](https://www.microsoft.com/security/blog/2021/03/23/secure-containerized-environments-with-updated-threat-matrix-for-kubernetes/)), software vulnerabilities, and RBAC (role-based-access-control) violations at early stages of the CI/CD pipeline, calculates risk score instantly and shows risk trends over time.
+Kubescape is an open-source Kubernetes security platform. A single pane of glass access to view risk analysis, security compliance, RBAC visualization, and image vulnerability scanning.
+Kubescape scans Kubernetes clusters, YAML files, and Helm charts. It detects misconfigurations according to multiple frameworks (such as [NSA-CISA](https://www.armosec.io/blog/kubernetes-hardening-guidance-summary-by-armo/?utm_source=github&utm_medium=repository), [MITRE ATT&CK®](https://www.microsoft.com/security/blog/2021/03/23/secure-containerized-environments-with-updated-threat-matrix-for-kubernetes/) and [CIS Benchmark](https://www.armosec.io/blog/cis-kubernetes-benchmark-framework-scanning-tools-comparison/?utm_source=github&utm_medium=repository)). Kubescape also helps you find software vulnerabilities, and RBAC (role-based-access-control) violations at early stages of the CI/CD pipeline. It calculates your risk score instantly and shows risk trends over time.
-It has become one of the fastest-growing Kubernetes tools among developers due to its easy-to-use CLI interface, flexible output formats, and automated scanning capabilities, saving Kubernetes users and admins precious time, effort, and resources.
-Kubescape integrates natively with other DevOps tools, including Jenkins, CircleCI, Github workflows, Prometheus, and Slack, and supports multi-cloud K8s deployments like EKS, GKE, and AKS.
+Kubescape is one of the fastest-growing Kubernetes security tools among developers. It saves Kubernetes users and admins precious time, effort, and resources with its easy-to-use CLI interface, flexible output formats, and automated scanning capabilities.
+Kubescape integrates natively with other DevOps tools, including Jenkins, CircleCI, Github workflows, Prometheus, and Slack. It supports multi-cloud Kubernetes deployments like EKS, GKE, and AKS.
@@ -72,12 +72,14 @@ kubescape scan --enable-host-scan --verbose
# Being a part of the team
## Community
-We invite you to our community! We are excited about this project and want to return the love we get.
+You are in vited to our community! We are excited about this project and want to return the love we get.
-We hold community meetings in [Zoom](https://us02web.zoom.us/j/84020231442) on the first Tuesday of every month at 14:00 GMT! :sunglasses:
+We hold community meetings on [Zoom](https://us02web.zoom.us/j/84020231442) on the first Tuesday of every month at 14:00 GMT! :sunglasses:
+
+Please make sure that you follow our [Code Of Conduct](https://github.com/kubescape/kubescape/blob/master/CODE_OF_CONDUCT.md).
## Contributions
-[Want to contribute?](https://github.com/kubescape/kubescape/blob/master/CONTRIBUTING.md) Want to discuss something? Have an issue? Please make sure that you follow our [Code Of Conduct](https://github.com/kubescape/kubescape/blob/master/CODE_OF_CONDUCT.md) .
+Want to discuss something? Have an issue? [Want to contribute?](https://github.com/kubescape/kubescape/blob/master/CONTRIBUTING.md)
* Feel free to pick a task from the [issues](https://github.com/kubescape/kubescape/issues?q=is%3Aissue+is%3Aopen+label%3A%22open+for+contribution%22), [roadmap](docs/roadmap.md) or suggest a feature of your own. [Contact us](MAINTAINERS.md) directly for more information :)
* [Open an issue](https://github.com/kubescape/kubescape/issues/new/choose) , we are trying to respond within 48 hours
@@ -264,7 +266,7 @@ kubescape scan --format prometheus
kubescape scan --format html --output results.html
```
-#### Scan with exceptions, objects with exceptions will be presented as `exclude` and not `fail`
+#### Scan with exceptions. Objects with exceptions will be presented as `exclude` and not `fail`
[Full documentation](examples/exceptions/README.md)
```
kubescape scan --exceptions examples/exceptions/exclude-kube-namespaces.json
@@ -276,13 +278,13 @@ kubescape scan
```
> Kubescape will load the default value file
-#### Scan Kustomize Directory
+#### Scan a Kustomize Directory
```
kubescape scan
```
-> Kubescape will generate Kubernetes Yaml Objects using 'Kustomize' file and scans them for security.
+> Kubescape will generate Kubernetes YAML objects using a 'Kustomize' file and scan them for security.
-### Offline/Air-gaped Environment Support
+### Offline/Air-gapped Environment Support
[Video tutorial](https://youtu.be/IGXL9s37smM)
@@ -326,7 +328,7 @@ kubescape scan framework nsa --use-from /path/nsa.json
 
-Scan the YAML files while writing them using the [vs code extension](https://github.com/armosec/vscode-kubescape/blob/master/README.md)
+Scan the YAML files while writing them using the [VS Code extension](https://github.com/armosec/vscode-kubescape/blob/master/README.md)
## Lens Extension
@@ -408,15 +410,15 @@ View Kubescape scan results directly in [Lens IDE](https://k8slens.dev/) using k
Instructions to use the playground
-* Apply changes you wish to make to the kubescape directory using text editors like `Vim`.
+* Apply changes you wish to make to the Kubescape directory using text editors like `Vim`.
* [Build on Linux](https://github.com/kubescape/kubescape#build-on-linuxmacos)
-* Now, you can use Kubescape just like a normal user. Instead of using `kubescape`, use `./kubescape`. (Make sure you are inside kubescape directory because the command will execute the binary named `kubescape` in `kubescape directory`)
+* Now, you can use Kubescape like a regular user. Instead of using `kubescape`, use `./kubescape`. Make sure you are in the Kubescape directory because the command will execute the binary named `kubescape` in `kubescape directory`)
-## VS code configuration samples
+## VS Code configuration samples
-You can use the sample files below to setup your VS code environment for building and debugging purposes.
+You can use the sample files below to setup your VS Code environment for building and debugging purposes.
.vscode/settings.json
@@ -463,11 +465,11 @@ You can use the sample files below to setup your VS code environment for buildin
## Technology
Kubescape is based on the [OPA engine](https://github.com/open-policy-agent/opa) and ARMO's posture controls.
-The tools retrieve Kubernetes objects from the API server and run a set of [rego's snippets](https://www.openpolicyagent.org/docs/latest/policy-language/) developed by [ARMO](https://www.armosec.io?utm_source=github&utm_medium=repository).
+The tools retrieve Kubernetes objects from the API server and runs a set of [Rego snippets](https://www.openpolicyagent.org/docs/latest/policy-language/) developed by [ARMO](https://www.armosec.io?utm_source=github&utm_medium=repository).
-The results by default are printed in a pretty "console friendly" manner, but they can be retrieved in JSON format for further processing.
+The results by default are printed in a "console friendly" manner, but they can be retrieved in JSON format for further processing.
-Kubescape is an open source project, we welcome your feedback and ideas for improvement. We’re also aiming to collaborate with the Kubernetes community to help make the tests more robust and complete as Kubernetes develops.
+Kubescape is an open source project, we welcome your feedback and ideas for improvement. We are part of the Kubernetes community and aim to make the tests more robust and complete as Kubernetes develops.
## Thanks to all the contributors ❤️
diff --git a/build.py b/build.py
index 9ddd46bc..f2f0ade5 100644
--- a/build.py
+++ b/build.py
@@ -40,7 +40,7 @@ def main():
client_var = "github.com/kubescape/kubescape/v2/core/cautils.Client"
client_name = os.getenv("CLIENT")
-
+
# Create build directory
build_dir = get_build_dir()
@@ -56,15 +56,15 @@ def main():
ldflags += " -X {}={}".format(build_url, release_version)
if client_name:
ldflags += " -X {}={}".format(client_var, client_name)
-
- build_command = ["go", "build", "-buildmode=pie", "-tags=static", "-o", ks_file, "-ldflags" ,ldflags]
+
+ build_command = ["go", "build", "-buildmode=pie", "-tags=static,gitenabled", "-o", ks_file, "-ldflags" ,ldflags]
print("Building kubescape and saving here: {}".format(ks_file))
print("Build command: {}".format(" ".join(build_command)))
status = subprocess.call(build_command)
check_status(status, "Failed to build kubescape")
-
+
sha256 = hashlib.sha256()
with open(ks_file, "rb") as kube:
sha256.update(kube.read())
@@ -74,7 +74,7 @@ def main():
kube_sha.write(sha256.hexdigest())
print("Build Done")
-
-
+
+
if __name__ == "__main__":
main()
diff --git a/build/Dockerfile b/build/Dockerfile
index 360efa76..db5ac948 100644
--- a/build/Dockerfile
+++ b/build/Dockerfile
@@ -1,4 +1,4 @@
-FROM golang:1.18-alpine as builder
+FROM golang:1.19-alpine as builder
ARG image_version
ARG client
diff --git a/cmd/completion/completion.go b/cmd/completion/completion.go
index 3a7ac1a5..453cb12b 100644
--- a/cmd/completion/completion.go
+++ b/cmd/completion/completion.go
@@ -9,11 +9,11 @@ import (
var completionCmdExamples = `
- # Enable BASH shell autocompletion
- $ source <(kubescape completion bash)
+ # Enable BASH shell autocompletion
+ $ source <(kubescape completion bash)
$ echo 'source <(kubescape completion bash)' >> ~/.bashrc
- # Enable ZSH shell autocompletion
+ # Enable ZSH shell autocompletion
$ source <(kubectl completion zsh)
$ echo 'source <(kubectl completion zsh)' >> "${fpath[1]}/_kubectl"
@@ -27,7 +27,7 @@ func GetCompletionCmd() *cobra.Command {
Example: completionCmdExamples,
DisableFlagsInUseLine: true,
ValidArgs: []string{"bash", "zsh", "fish", "powershell"},
- Args: cobra.ExactValidArgs(1),
+ Args: cobra.MatchAll(cobra.ExactArgs(1), cobra.OnlyValidArgs),
Run: func(cmd *cobra.Command, args []string) {
switch strings.ToLower(args[0]) {
case "bash":
diff --git a/cmd/download/download.go b/cmd/download/download.go
index ad730154..3c1519ec 100644
--- a/cmd/download/download.go
+++ b/cmd/download/download.go
@@ -24,8 +24,8 @@ var (
# Download the NSA framework. Run 'kubescape list frameworks' for all frameworks names
kubescape download framework nsa
- # Download the "HostPath mount" control. Run 'kubescape list controls' for all controls names
- kubescape download control "HostPath mount"
+ # Download the "C-0001" control. Run 'kubescape list controls --id' for all controls ids
+ kubescape download control "C-0001"
# Download the "C-0001" control. Run 'kubescape list controls --id' for all controls ids
kubescape download control C-0001
@@ -70,7 +70,9 @@ func GeDownloadCmd(ks meta.IKubescape) *cobra.Command {
}
downloadInfo.Target = args[0]
if len(args) >= 2 {
- downloadInfo.Name = args[1]
+
+ downloadInfo.Identifier = args[1]
+
}
if err := ks.Download(&downloadInfo); err != nil {
logger.L().Fatal(err.Error())
diff --git a/cmd/scan/framework.go b/cmd/scan/framework.go
index edb3bb55..ba11a3ff 100644
--- a/cmd/scan/framework.go
+++ b/cmd/scan/framework.go
@@ -16,7 +16,6 @@ import (
"github.com/kubescape/kubescape/v2/core/cautils"
"github.com/kubescape/kubescape/v2/core/meta"
- "github.com/enescakir/emoji"
"github.com/spf13/cobra"
)
@@ -113,7 +112,7 @@ func getFrameworkCmd(ks meta.IKubescape, scanInfo *cautils.ScanInfo) *cobra.Comm
logger.L().Fatal(err.Error())
}
if !scanInfo.VerboseMode {
- cautils.SimpleDisplay(os.Stderr, "%s Run with '--verbose'/'-v' flag for detailed resources view\n\n", emoji.Detective)
+ cautils.SimpleDisplay(os.Stderr, "Run with '--verbose'/'-v' flag for detailed resources view\n\n")
}
if results.GetRiskScore() > float32(scanInfo.FailThreshold) {
logger.L().Fatal("scan risk-score is above permitted threshold", helpers.String("risk-score", fmt.Sprintf("%.2f", results.GetRiskScore())), helpers.String("fail-threshold", fmt.Sprintf("%.2f", scanInfo.FailThreshold)))
diff --git a/cmd/scan/scan.go b/cmd/scan/scan.go
index b7146b8e..ac87f2e7 100644
--- a/cmd/scan/scan.go
+++ b/cmd/scan/scan.go
@@ -58,6 +58,7 @@ func GetScanCommand(ks meta.IKubescape) *cobra.Command {
},
PreRun: func(cmd *cobra.Command, args []string) {
k8sinterface.SetClusterContextName(scanInfo.KubeContext)
+
},
PostRun: func(cmd *cobra.Command, args []string) {
// TODO - revert context
@@ -65,6 +66,7 @@ func GetScanCommand(ks meta.IKubescape) *cobra.Command {
}
scanCmd.PersistentFlags().StringVarP(&scanInfo.Credentials.Account, "account", "", "", "Kubescape SaaS account ID. Default will load account ID from cache")
+ // scanCmd.PersistentFlags().BoolVar(&scanInfo.CreateAccount, "create-account", false, "Create a Kubescape SaaS account ID account ID is not found in cache. After creating the account, the account ID will be saved in cache. In addition, the scanning results will be uploaded to the Kubescape SaaS")
scanCmd.PersistentFlags().StringVarP(&scanInfo.Credentials.ClientID, "client-id", "", "", "Kubescape SaaS client ID. Default will load client ID from cache, read more - https://hub.armosec.io/docs/authentication")
scanCmd.PersistentFlags().StringVarP(&scanInfo.Credentials.SecretKey, "secret-key", "", "", "Kubescape SaaS secret key. Default will load secret key from cache, read more - https://hub.armosec.io/docs/authentication")
scanCmd.PersistentFlags().StringVarP(&scanInfo.KubeContext, "kube-context", "", "", "Kube context. Default will use the current-context")
@@ -76,7 +78,7 @@ func GetScanCommand(ks meta.IKubescape) *cobra.Command {
scanCmd.PersistentFlags().Float32VarP(&scanInfo.FailThreshold, "fail-threshold", "t", 100, "Failure threshold is the percent above which the command fails and returns exit code 1")
scanCmd.PersistentFlags().StringVar(&scanInfo.FailThresholdSeverity, "severity-threshold", "", "Severity threshold is the severity of failed controls at which the command fails and returns exit code 1")
- scanCmd.PersistentFlags().StringVarP(&scanInfo.Format, "format", "f", "pretty-printer", `Output format. Supported formats: "pretty-printer", "json", "junit", "prometheus", "pdf", "html", "sarif"`)
+ scanCmd.PersistentFlags().StringVarP(&scanInfo.Format, "format", "f", "", `Output file format. Supported formats: "pretty-printer", "json", "junit", "prometheus", "pdf", "html", "sarif"`)
scanCmd.PersistentFlags().StringVar(&scanInfo.IncludeNamespaces, "include-namespaces", "", "scan specific namespaces. e.g: --include-namespaces ns-a,ns-b")
scanCmd.PersistentFlags().BoolVarP(&scanInfo.Local, "keep-local", "", false, "If you do not want your Kubescape results reported to configured backend.")
scanCmd.PersistentFlags().StringVarP(&scanInfo.Output, "output", "o", "", "Output file. Print output to file and not stdout")
@@ -89,12 +91,14 @@ func GetScanCommand(ks meta.IKubescape) *cobra.Command {
scanCmd.PersistentFlags().StringVar(&scanInfo.CustomClusterName, "cluster-name", "", "Set the custom name of the cluster. Not same as the kube-context flag")
scanCmd.PersistentFlags().BoolVarP(&scanInfo.Submit, "submit", "", false, "Submit the scan results to Kubescape SaaS where you can see the results in a user-friendly UI, choose your preferred compliance framework, check risk results history and trends, manage exceptions, get remediation recommendations and much more. By default the results are not submitted")
scanCmd.PersistentFlags().BoolVarP(&scanInfo.OmitRawResources, "omit-raw-resources", "", false, "Omit raw resources from the output. By default the raw resources are included in the output")
+ scanCmd.PersistentFlags().BoolVarP(&scanInfo.PrintAttackTree, "print-attack-tree", "", false, "Print attack tree")
scanCmd.PersistentFlags().MarkDeprecated("silent", "use '--logger' flag instead. Flag will be removed at 1.May.2022")
// hidden flags
scanCmd.PersistentFlags().MarkHidden("host-scan-yaml") // this flag should be used very cautiously. We prefer users will not use it at all unless the DaemonSet can not run pods on the nodes
scanCmd.PersistentFlags().MarkHidden("omit-raw-resources")
+ scanCmd.PersistentFlags().MarkHidden("print-attack-tree")
// Retrieve --kubeconfig flag from https://github.com/kubernetes/kubectl/blob/master/pkg/cmd/cmd.go
scanCmd.PersistentFlags().AddGoFlag(flag.Lookup("kubeconfig"))
diff --git a/cmd/scan/validators_test.go b/cmd/scan/validators_test.go
index 52423173..b0b5f682 100644
--- a/cmd/scan/validators_test.go
+++ b/cmd/scan/validators_test.go
@@ -1,8 +1,9 @@
package scan
import (
- "github.com/kubescape/kubescape/v2/core/cautils"
"testing"
+
+ "github.com/kubescape/kubescape/v2/core/cautils"
)
// Test_validateControlScanInfo tests how scan info is validated for the `scan control` command
diff --git a/cmd/version/git_native_disabled.go b/cmd/version/git_native_disabled.go
new file mode 100644
index 00000000..c0c9bd29
--- /dev/null
+++ b/cmd/version/git_native_disabled.go
@@ -0,0 +1,7 @@
+//go:build !gitenabled
+
+package version
+
+func isGitEnabled() bool {
+ return false
+}
diff --git a/cmd/version/git_native_enabled.go b/cmd/version/git_native_enabled.go
new file mode 100644
index 00000000..518404f9
--- /dev/null
+++ b/cmd/version/git_native_enabled.go
@@ -0,0 +1,7 @@
+//go:build gitenabled
+
+package version
+
+func isGitEnabled() bool {
+ return true
+}
diff --git a/cmd/version/version.go b/cmd/version/version.go
index e5cc8b13..e19f1372 100644
--- a/cmd/version/version.go
+++ b/cmd/version/version.go
@@ -16,7 +16,11 @@ func GetVersionCmd() *cobra.Command {
RunE: func(cmd *cobra.Command, args []string) error {
v := cautils.NewIVersionCheckHandler()
v.CheckLatestVersion(cautils.NewVersionCheckRequest(cautils.BuildNumber, "", "", "version"))
- fmt.Fprintln(os.Stdout, "Your current version is: "+cautils.BuildNumber)
+ fmt.Fprintf(os.Stdout,
+ "Your current version is: %s [git enabled in build: %t]\n",
+ cautils.BuildNumber,
+ isGitEnabled(),
+ )
return nil
},
}
diff --git a/core/cautils/customerloader.go b/core/cautils/customerloader.go
index 733fb216..f45c9d8f 100644
--- a/core/cautils/customerloader.go
+++ b/core/cautils/customerloader.go
@@ -470,10 +470,7 @@ func (c *ClusterConfig) updateConfigMap() error {
}
func updateConfigFile(configObj *ConfigObj) error {
- if err := os.WriteFile(ConfigFileFullPath(), configObj.Config(), 0664); err != nil {
- return err
- }
- return nil
+ return os.WriteFile(ConfigFileFullPath(), configObj.Config(), 0664) //nolint:gosec
}
func (c *ClusterConfig) updateConfigData(configMap *corev1.ConfigMap) {
diff --git a/core/cautils/datastructures.go b/core/cautils/datastructures.go
index 06a1c072..16aa1af0 100644
--- a/core/cautils/datastructures.go
+++ b/core/cautils/datastructures.go
@@ -5,6 +5,7 @@ import (
"github.com/kubescape/k8s-interface/workloadinterface"
"github.com/kubescape/opa-utils/reporthandling"
apis "github.com/kubescape/opa-utils/reporthandling/apis"
+ "github.com/kubescape/opa-utils/reporthandling/attacktrack/v1alpha1"
"github.com/kubescape/opa-utils/reporthandling/results/v1/prioritization"
"github.com/kubescape/opa-utils/reporthandling/results/v1/resourcesresults"
reporthandlingv2 "github.com/kubescape/opa-utils/reporthandling/v2"
@@ -22,8 +23,10 @@ type OPASessionObj struct {
ResourcesResult map[string]resourcesresults.Result // resources scan results, map[]
ResourceSource map[string]reporthandling.Source // resources sources, map[]
ResourcesPrioritized map[string]prioritization.PrioritizedResource // resources prioritization information, map[]
- Report *reporthandlingv2.PostureReport // scan results v2 - Remove
- RegoInputData RegoInputData // input passed to rego for scanning. map[][]
+ ResourceAttackTracks map[string]v1alpha1.IAttackTrack // resources attack tracks, map[]
+ AttackTracks map[string]v1alpha1.IAttackTrack
+ Report *reporthandlingv2.PostureReport // scan results v2 - Remove
+ RegoInputData RegoInputData // input passed to rego for scanning. map[][]
Metadata *reporthandlingv2.Metadata
InfoMap map[string]apis.StatusInfo // Map errors of resources to StatusInfo
ResourceToControlsMap map[string][]string // map[] = []
diff --git a/core/cautils/getter/downloadreleasedpolicy.go b/core/cautils/getter/downloadreleasedpolicy.go
index d70fbb95..e8e445c1 100644
--- a/core/cautils/getter/downloadreleasedpolicy.go
+++ b/core/cautils/getter/downloadreleasedpolicy.go
@@ -25,11 +25,11 @@ func NewDownloadReleasedPolicy() *DownloadReleasedPolicy {
}
}
-func (drp *DownloadReleasedPolicy) GetControl(policyName string) (*reporthandling.Control, error) {
+func (drp *DownloadReleasedPolicy) GetControl(ID string) (*reporthandling.Control, error) {
var control *reporthandling.Control
var err error
- control, err = drp.gs.GetOPAControl(policyName)
+ control, err = drp.gs.GetOPAControlByID(ID)
if err != nil {
return nil, err
}
diff --git a/core/cautils/getter/getpolicies.go b/core/cautils/getter/getpolicies.go
index 263ae8af..082349ec 100644
--- a/core/cautils/getter/getpolicies.go
+++ b/core/cautils/getter/getpolicies.go
@@ -9,7 +9,7 @@ import (
type IPolicyGetter interface {
GetFramework(name string) (*reporthandling.Framework, error)
GetFrameworks() ([]reporthandling.Framework, error)
- GetControl(name string) (*reporthandling.Control, error)
+ GetControl(ID string) (*reporthandling.Control, error)
ListFrameworks() ([]string, error)
ListControls() ([]string, error)
diff --git a/core/cautils/getter/getpoliciesutils.go b/core/cautils/getter/getpoliciesutils.go
index 7d822eb3..0069cbcf 100644
--- a/core/cautils/getter/getpoliciesutils.go
+++ b/core/cautils/getter/getpoliciesutils.go
@@ -21,18 +21,19 @@ func SaveInFile(policy interface{}, pathStr string) error {
if err != nil {
return err
}
- err = os.WriteFile(pathStr, []byte(fmt.Sprintf("%v", string(encodedData))), 0644)
+ err = os.WriteFile(pathStr, encodedData, 0644) //nolint:gosec
if err != nil {
if os.IsNotExist(err) {
pathDir := path.Dir(pathStr)
- if err := os.Mkdir(pathDir, 0744); err != nil {
- return err
+ // pathDir could contain subdirectories
+ if erm := os.MkdirAll(pathDir, 0755); erm != nil {
+ return erm
}
} else {
return err
}
- err = os.WriteFile(pathStr, []byte(fmt.Sprintf("%v", string(encodedData))), 0644)
+ err = os.WriteFile(pathStr, encodedData, 0644) //nolint:gosec
if err != nil {
return err
}
diff --git a/core/cautils/getter/kscloudapi.go b/core/cautils/getter/kscloudapi.go
index c3672b3f..b01ab640 100644
--- a/core/cautils/getter/kscloudapi.go
+++ b/core/cautils/getter/kscloudapi.go
@@ -192,7 +192,7 @@ func (api *KSCloudAPI) GetFrameworks() ([]reporthandling.Framework, error) {
return frameworks, err
}
-func (api *KSCloudAPI) GetControl(policyName string) (*reporthandling.Control, error) {
+func (api *KSCloudAPI) GetControl(ID string) (*reporthandling.Control, error) {
return nil, fmt.Errorf("control api is not public")
}
diff --git a/core/cautils/getter/loadpolicy.go b/core/cautils/getter/loadpolicy.go
index 66700ab6..975dfc33 100644
--- a/core/cautils/getter/loadpolicy.go
+++ b/core/cautils/getter/loadpolicy.go
@@ -36,11 +36,11 @@ func NewLoadPolicy(filePaths []string) *LoadPolicy {
}
}
-// Return control from file
-func (lp *LoadPolicy) GetControl(controlName string) (*reporthandling.Control, error) {
-
+// GetControl returns a control from the policy file.
+func (lp *LoadPolicy) GetControl(controlID string) (*reporthandling.Control, error) {
control := &reporthandling.Control{}
filePath := lp.filePath()
+
f, err := os.ReadFile(filePath)
if err != nil {
return nil, err
@@ -49,43 +49,51 @@ func (lp *LoadPolicy) GetControl(controlName string) (*reporthandling.Control, e
if err = json.Unmarshal(f, control); err != nil {
return control, err
}
- if controlName != "" && !strings.EqualFold(controlName, control.Name) && !strings.EqualFold(controlName, control.ControlID) {
- framework, err := lp.GetFramework(control.Name)
- if err != nil {
- return nil, fmt.Errorf("control from file not matching")
- } else {
- for _, ctrl := range framework.Controls {
- if strings.EqualFold(ctrl.Name, controlName) || strings.EqualFold(ctrl.ControlID, controlName) {
- control = &ctrl
- break
- }
- }
+
+ if controlID == "" || strings.EqualFold(controlID, control.ControlID) {
+ return control, nil
+ }
+
+ framework, err := lp.GetFramework(control.Name)
+ if err != nil {
+ return nil, fmt.Errorf("control from file not matching")
+ }
+
+ for _, toPin := range framework.Controls {
+ ctrl := toPin
+ if strings.EqualFold(ctrl.ControlID, controlID) {
+ control = &ctrl
+
+ break
}
}
- return control, err
+
+ return control, nil
}
+// GetFramework retrieves a framework configuration from the policy.
func (lp *LoadPolicy) GetFramework(frameworkName string) (*reporthandling.Framework, error) {
- var framework reporthandling.Framework
- var err error
+ if frameworkName == "" {
+ return &reporthandling.Framework{}, nil
+ }
+
for _, filePath := range lp.filePaths {
- framework = reporthandling.Framework{}
f, err := os.ReadFile(filePath)
if err != nil {
return nil, err
}
- if err = json.Unmarshal(f, &framework); err != nil {
+
+ var fw reporthandling.Framework
+ if err = json.Unmarshal(f, &fw); err != nil {
return nil, err
}
- if strings.EqualFold(frameworkName, framework.Name) {
- break
+
+ if strings.EqualFold(frameworkName, fw.Name) {
+ return &fw, nil
}
}
- if frameworkName != "" && !strings.EqualFold(frameworkName, framework.Name) {
- return nil, fmt.Errorf("framework from file not matching")
- }
- return &framework, err
+ return nil, fmt.Errorf("framework from file not matching")
}
func (lp *LoadPolicy) GetFrameworks() ([]reporthandling.Framework, error) {
@@ -97,6 +105,7 @@ func (lp *LoadPolicy) GetFrameworks() ([]reporthandling.Framework, error) {
func (lp *LoadPolicy) ListFrameworks() ([]string, error) {
fwNames := []string{}
framework := &reporthandling.Framework{}
+
for _, f := range lp.filePaths {
file, err := os.ReadFile(f)
if err == nil {
@@ -107,6 +116,7 @@ func (lp *LoadPolicy) ListFrameworks() ([]string, error) {
}
}
}
+
return fwNames, nil
}
diff --git a/core/cautils/getter/loadpolicy_test.go b/core/cautils/getter/loadpolicy_test.go
index a29eda7d..af31e74a 100644
--- a/core/cautils/getter/loadpolicy_test.go
+++ b/core/cautils/getter/loadpolicy_test.go
@@ -1,13 +1,176 @@
package getter
import (
+ "fmt"
"path/filepath"
-)
+ "testing"
-var mockFrameworkBasePath = filepath.Join("examples", "mocks", "frameworks")
+ "github.com/stretchr/testify/require"
+)
func MockNewLoadPolicy() *LoadPolicy {
return &LoadPolicy{
filePaths: []string{""},
}
}
+
+func testFrameworkFile(framework string) string {
+ return filepath.Join(".", "testdata", fmt.Sprintf("%s.json", framework))
+}
+
+func TestLoadPolicy(t *testing.T) {
+ t.Parallel()
+
+ const testFramework = "MITRE"
+
+ t.Run("with GetFramework", func(t *testing.T) {
+ t.Run("should retrieve named framework", func(t *testing.T) {
+ t.Parallel()
+
+ p := NewLoadPolicy([]string{testFrameworkFile(testFramework)})
+ fw, err := p.GetFramework(testFramework)
+ require.NoError(t, err)
+ require.NotNil(t, fw)
+
+ require.Equal(t, testFramework, fw.Name)
+ })
+
+ t.Run("should fail to retrieve framework", func(t *testing.T) {
+ t.Parallel()
+
+ p := NewLoadPolicy([]string{testFrameworkFile(testFramework)})
+ fw, err := p.GetFramework("wrong")
+ require.Error(t, err)
+ require.Nil(t, fw)
+ })
+
+ t.Run("edge case: should return empty framework", func(t *testing.T) {
+ // NOTE(fredbi): this edge case corresponds to the original working of GetFramework.
+ // IMHO, this is a bad request call and it should return an error.
+ t.Parallel()
+
+ p := NewLoadPolicy([]string{testFrameworkFile(testFramework)})
+ fw, err := p.GetFramework("")
+ require.NoError(t, err)
+ require.NotNil(t, fw)
+ require.Empty(t, *fw)
+ })
+
+ t.Run("edge case: corrupted json", func(t *testing.T) {
+ t.Parallel()
+
+ const invalidFramework = "invalid-fw"
+ p := NewLoadPolicy([]string{testFrameworkFile(invalidFramework)})
+ fw, err := p.GetFramework(invalidFramework)
+ require.Error(t, err)
+ require.Nil(t, fw)
+ })
+
+ t.Run("edge case: missing json", func(t *testing.T) {
+ t.Parallel()
+
+ const invalidFramework = "nowheretobefound"
+ p := NewLoadPolicy([]string{testFrameworkFile(invalidFramework)})
+ _, err := p.GetFramework(invalidFramework)
+ require.Error(t, err)
+ })
+ })
+
+ t.Run("with GetControl", func(t *testing.T) {
+ t.Run("should retrieve named control", func(t *testing.T) {
+ t.Parallel()
+
+ const (
+ testControl = "C-0053"
+ expectedControlName = "Access container service account"
+ )
+ p := NewLoadPolicy([]string{testFrameworkFile(testFramework)})
+ ctrl, err := p.GetControl(testControl)
+ require.NoError(t, err)
+ require.NotNil(t, ctrl)
+
+ require.Equal(t, testControl, ctrl.ControlID)
+ require.Equal(t, expectedControlName, ctrl.Name)
+ })
+
+ t.Run("should fail to retrieve named control", func(t *testing.T) {
+ // NOTE(fredbi): IMHO, this case should bubble up an error
+ t.Parallel()
+
+ const testControl = "wrong"
+ p := NewLoadPolicy([]string{testFrameworkFile(testFramework)})
+ ctrl, err := p.GetControl(testControl)
+ require.NoError(t, err)
+ require.NotNil(t, ctrl) // no error, but still don't get the requested control...
+ })
+
+ t.Run("edge case: corrupted json", func(t *testing.T) {
+ t.Parallel()
+
+ const invalidControl = "invalid-fw"
+ p := NewLoadPolicy([]string{testFrameworkFile(invalidControl)})
+ _, err := p.GetControl(invalidControl)
+ require.Error(t, err)
+ })
+
+ t.Run("edge case: missing json", func(t *testing.T) {
+ t.Parallel()
+
+ const invalidControl = "nowheretobefound"
+ p := NewLoadPolicy([]string{testFrameworkFile(invalidControl)})
+ _, err := p.GetControl(invalidControl)
+ require.Error(t, err)
+ })
+
+ t.Run("edge case: should return empty control", func(t *testing.T) {
+ // NOTE(fredbi): this edge case corresponds to the original working of GetFramework.
+ // IMHO, this is a bad request call and it should return an error.
+ t.Parallel()
+
+ p := NewLoadPolicy([]string{testFrameworkFile(testFramework)})
+ ctrl, err := p.GetControl("")
+ require.NoError(t, err)
+ require.NotNil(t, ctrl)
+ })
+ })
+
+ t.Run("ListFrameworks should return all frameworks in the policy path", func(t *testing.T) {
+ t.Parallel()
+
+ const extraFramework = "NSA"
+ p := NewLoadPolicy([]string{
+ testFrameworkFile(testFramework),
+ testFrameworkFile(extraFramework),
+ })
+ fws, err := p.ListFrameworks()
+ require.NoError(t, err)
+ require.Len(t, fws, 2)
+
+ require.Equal(t, testFramework, fws[0])
+ require.Equal(t, extraFramework, fws[1])
+ })
+
+ t.Run("edge case: policy without path", func(t *testing.T) {
+ t.Parallel()
+
+ p := NewLoadPolicy([]string{})
+ require.Empty(t, p.filePath())
+ })
+
+ t.Run("GetFrameworks is currently stubbed", func(t *testing.T) {
+ t.Parallel()
+
+ p := NewLoadPolicy([]string{testFrameworkFile(testFramework)})
+ fws, err := p.GetFrameworks()
+ require.NoError(t, err)
+ require.Empty(t, fws)
+ })
+
+ t.Run("ListControls is currently unsupported", func(t *testing.T) {
+ t.Parallel()
+
+ p := NewLoadPolicy([]string{testFrameworkFile(testFramework)})
+ _, err := p.ListControls()
+ require.Error(t, err)
+ })
+}
diff --git a/core/cautils/getter/testdata/MITRE.json b/core/cautils/getter/testdata/MITRE.json
new file mode 100644
index 00000000..50a6ed31
--- /dev/null
+++ b/core/cautils/getter/testdata/MITRE.json
@@ -0,0 +1,2832 @@
+{
+ "guid": "",
+ "name": "MITRE",
+ "attributes": {
+ "armoBuiltin": true
+ },
+ "creationTime": "",
+ "description": "Testing MITRE for Kubernetes as suggested by microsoft in https://www.microsoft.com/security/blog/wp-content/uploads/2020/04/k8s-matrix.png",
+ "controls": [
+ {
+ "guid": "",
+ "name": "Access container service account",
+ "attributes": {
+ "armoBuiltin": true,
+ "attackTracks": [
+ {
+ "attackTrack": "container",
+ "categories": [
+ "Credential access",
+ "Impact - K8s API access"
+ ]
+ }
+ ],
+ "controlTypeTags": [
+ "compliance",
+ "security-impact"
+ ],
+ "microsoftMitreColumns": [
+ "Credential access"
+ ],
+ "rbacQuery": "Container service account mapping"
+ },
+ "id": "C-0053",
+ "controlID": "C-0053",
+ "creationTime": "",
+ "description": "Attackers who obtain access to a pod can use its SA token to communicate with KubeAPI server. All PODs with SA token mounted (if such token has a Role or a ClusterRole binding) are considerred potentially dangerous.",
+ "remediation": "Verify that RBAC is enabled. Follow the least privilege principle and ensure that only necessary PODs have SA token mounted into them.",
+ "rules": [
+ {
+ "guid": "",
+ "name": "access-container-service-account",
+ "attributes": {
+ "armoBuiltin": true,
+ "m$K8sThreatMatrix": "Credential Access::Access container service account, Lateral Movement::Container service account",
+ "useUntilKubescapeVersion": "v1.0.133"
+ },
+ "creationTime": "",
+ "rule": "package armo_builtins\n\n\n# Returns for each Pod, what are the permission of its service account\n\ndeny[msga] {\n serviceAccounts := [serviceaccount | serviceaccount= input[_]; serviceaccount.kind == \"ServiceAccount\"]\n serviceaccount := serviceAccounts[_]\n serviceAccountName := serviceaccount.metadata.name\n \n pods := [pod | pod=input[_]; pod.kind ==\"Pod\"]\n pod := pods[_]\n pod.spec.serviceAccountName == serviceAccountName\n\n not isNotAutoMount(serviceaccount, pod)\n\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n\trolebinding := rolebindings[_]\n rolesubject := rolebinding.subjects[_]\n rolesubject.name == serviceAccountName\n\n roles := [role | role = input[_]; role.kind == \"Role\"]\n role := roles[_]\n role.metadata.name == rolebinding.roleRef.name\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v has the following permissions in the cluster: %v\", [pod.metadata.name, rolebinding.roleRef.name]),\n\t\t\"packagename\": \"armo_builtins\",\n \"failedPaths\": [\"\"],\n\t\t\"alertScore\": 7,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [rolebinding, role, pod]\n\t\t}\n\t}\n}\n\n# Returns for each Pod, what are the permission of its service account\n deny[msga] {\n serviceAccounts := [serviceaccount | serviceaccount= input[_]; serviceaccount.kind == \"ServiceAccount\"]\n serviceaccount := serviceAccounts[_]\n serviceAccountName := serviceaccount.metadata.name\n\n pods := [pod | pod=input[_]; pod.kind ==\"Pod\"]\n pod := pods[_]\n pod.spec.serviceAccountName == serviceAccountName\n\n not isNotAutoMount(serviceaccount, pod)\n\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n\trolebinding := rolebindings[_]\n rolesubject := rolebinding.subjects[_]\n rolesubject.name == serviceAccountName\n\n roles := [role | role = input[_]; role.kind == \"ClusterRole\"]\n role := roles[_]\n role.metadata.name == rolebinding.roleRef.name\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v has the following permissions in the cluster: %v\", [pod.metadata.name, rolebinding.roleRef.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n \"failedPaths\": [\"\"],\n\t\t\"alertObject\": {\n\t\t\t\t\"k8sApiObjects\": [rolebinding, role, pod]\n\t\t}\n\t}\n}\n\n# Returns for each Pod, what are the permission of its service account\n\n deny[msga] {\n serviceAccounts := [serviceaccount | serviceaccount= input[_]; serviceaccount.kind == \"ServiceAccount\"]\n serviceaccount := serviceAccounts[_]\n serviceAccountName := serviceaccount.metadata.name\n\n pods := [pod | pod=input[_]; pod.kind ==\"Pod\"]\n pod := pods[_]\n pod.spec.serviceAccountName == serviceAccountName\n\n not isNotAutoMount(serviceaccount, pod)\n\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"ClusterRoleBinding\"]\n\trolebinding := rolebindings[_]\n rolesubject := rolebinding.subjects[_]\n rolesubject.name == serviceAccountName\n\n roles := [role | role = input[_]; role.kind == \"ClusterRole\"]\n role := roles[_]\n role.metadata.name == rolebinding.roleRef.name\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v has the following permissions in the cluster: %v\", [pod.metadata.name, rolebinding.roleRef.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n \"failedPaths\": [\"\"],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [rolebinding, role, pod]\n\t\t}\n\t}\n}\n\n\n\n\n### ---------------- #####\n\n \n\n# Returns for each Workloads, what are the permission of its service account\ndeny[msga] {\n serviceAccounts := [serviceaccount | serviceaccount= input[_]; serviceaccount.kind == \"ServiceAccount\"]\n serviceaccount := serviceAccounts[_]\n serviceAccountName := serviceaccount.metadata.name\n\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\n wl.spec.template.spec.serviceAccountName == serviceAccountName\n\n not isNotAutoMount(serviceaccount, wl.spec.template)\n\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n\trolebinding := rolebindings[_]\n rolesubject := rolebinding.subjects[_]\n rolesubject.name == serviceAccountName\n\n roles := [role | role = input[_]; role.kind == \"Role\"]\n role := roles[_]\n role.metadata.name == rolebinding.roleRef.name\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has the following permissions in the cluster: %v\", [wl.kind, wl.metadata.name, rolebinding.roleRef.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n \"failedPaths\": [\"\"],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [rolebinding, role, wl]\n\t\t}\n\t}\n}\n\n\n# Returns for each Workloads, what are the permission of its service account\ndeny[msga] {\n serviceAccounts := [serviceaccount | serviceaccount= input[_]; serviceaccount.kind == \"ServiceAccount\"]\n serviceaccount := serviceAccounts[_]\n serviceAccountName := serviceaccount.metadata.name\n\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\n wl.spec.template.spec.serviceAccountName == serviceAccountName\n\n not isNotAutoMount(serviceaccount, wl.spec.template)\n\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n\trolebinding := rolebindings[_]\n rolesubject := rolebinding.subjects[_]\n rolesubject.name == serviceAccountName\n\n roles := [role | role = input[_]; role.kind == \"ClusterRole\"]\n role := roles[_]\n role.metadata.name == rolebinding.roleRef.name\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has the following permissions in the cluster: %v\", [wl.kind, wl.metadata.name, rolebinding.roleRef.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n \"failedPaths\": [\"\"],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [rolebinding, role, wl]\n\t\t}\n\t}\n}\n\n\n\n# Returns for each Workloads, what are the permission of its service account\ndeny[msga] {\n serviceAccounts := [serviceaccount | serviceaccount= input[_]; serviceaccount.kind == \"ServiceAccount\"]\n serviceaccount := serviceAccounts[_]\n serviceAccountName := serviceaccount.metadata.name\n\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\n wl.spec.template.spec.serviceAccountName == serviceAccountName\n\n not isNotAutoMount(serviceaccount, wl.spec.template)\n\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"ClusterRoleBinding\"]\n\trolebinding := rolebindings[_]\n rolesubject := rolebinding.subjects[_]\n rolesubject.name == serviceAccountName\n\n roles := [role | role = input[_]; role.kind == \"ClusterRole\"]\n role := roles[_]\n role.metadata.name == rolebinding.roleRef.name\n\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has the following permissions in the cluster: %v\", [wl.kind, wl.metadata.name, rolebinding.roleRef.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n \"failedPaths\": [\"\"],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [rolebinding, role, wl]\n\t\t}\n\t}\n}\n\n\n\n\n### ---------------- #####\n\n\n# Returns for each Cronjob, what are the permission of its service account\n\ndeny[msga] {\n serviceAccounts := [serviceaccount | serviceaccount= input[_]; serviceaccount.kind == \"ServiceAccount\"]\n serviceaccount := serviceAccounts[_]\n serviceAccountName := serviceaccount.metadata.name\n\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\twl.spec.jobTemplate.spec.template.spec.serviceAccountName == serviceAccountName\n\n not isNotAutoMount(serviceaccount, wl.spec.jobTemplate.spec.template)\n\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n\trolebinding := rolebindings[_]\n rolesubject := rolebinding.subjects[_]\n rolesubject.name == serviceAccountName\n\n roles := [role | role = input[_]; role.kind == \"Role\"]\n role := roles[_]\n role.metadata.name == rolebinding.roleRef.name\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"Cronjob: %v has the following permissions in the cluster: %v\", [wl.metadata.name, rolebinding.roleRef.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n \"failedPaths\": [\"\"],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [rolebinding, role, wl]\n\t\t}\n\t}\n}\n\n\n\n# Returns for each Cronjob, what are the permission of its service account\ndeny[msga] {\n serviceAccounts := [serviceaccount | serviceaccount= input[_]; serviceaccount.kind == \"ServiceAccount\"]\n serviceaccount := serviceAccounts[_]\n serviceAccountName := serviceaccount.metadata.name\n\n\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\twl.spec.jobTemplate.spec.template.spec.serviceAccountName == serviceAccountName\n\n not isNotAutoMount(serviceaccount, wl.spec.jobTemplate.spec.template)\n\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n\trolebinding := rolebindings[_]\n rolesubject := rolebinding.subjects[_]\n rolesubject.name == serviceAccountName\n\n roles := [role | role = input[_]; role.kind == \"ClusterRole\"]\n role := roles[_]\n role.metadata.name == rolebinding.roleRef.name\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"Cronjob: %v has the following permissions in the cluster: %v\", [wl.metadata.name, rolebinding.roleRef.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n \"failedPaths\": [\"\"],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [rolebinding, role, wl]\n\t\t}\n\t}\n}\n\n\n# Returns for each Cronjob, what are the permission of its service account\ndeny[msga] {\n serviceAccounts := [serviceaccount | serviceaccount= input[_]; serviceaccount.kind == \"ServiceAccount\"]\n serviceaccount := serviceAccounts[_]\n serviceAccountName := serviceaccount.metadata.name\n\n\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\twl.spec.jobTemplate.spec.template.spec.serviceAccountName == serviceAccountName\n\n not isNotAutoMount(serviceaccount, wl.spec.jobTemplate.spec.template)\n \n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"ClusterRoleBinding\"]\n\trolebinding := rolebindings[_]\n rolesubject := rolebinding.subjects[_]\n rolesubject.name == serviceAccountName\n\n roles := [role | role = input[_]; role.kind == \"ClusterRole\"]\n role := roles[_]\n role.metadata.name == rolebinding.roleRef.name\n\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"Cronjob: %v has the following permissions in the cluster: %v\", [wl.metadata.name, rolebinding.roleRef.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n \"failedPaths\": [\"\"],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [rolebinding, role, wl]\n\t\t}\n\t}\n}\n\n# ===============================================================\n\nisNotAutoMount(serviceaccount, pod) {\n pod.spec.automountServiceAccountToken == false\n}\nisNotAutoMount(serviceaccount, pod) {\n serviceaccount.automountServiceAccountToken == false\n not pod.spec[\"automountServiceAccountToken\"]\n}\n\n",
+ "resourceEnumerator": "",
+ "ruleLanguage": "Rego",
+ "match": [
+ {
+ "apiGroups": [
+ ""
+ ],
+ "apiVersions": [
+ "v1"
+ ],
+ "resources": [
+ "Pod",
+ "ServiceAccount"
+ ]
+ },
+ {
+ "apiGroups": [
+ "apps"
+ ],
+ "apiVersions": [
+ "v1"
+ ],
+ "resources": [
+ "Deployment",
+ "ReplicaSet",
+ "DaemonSet",
+ "StatefulSet"
+ ]
+ },
+ {
+ "apiGroups": [
+ "batch"
+ ],
+ "apiVersions": [
+ "*"
+ ],
+ "resources": [
+ "Job",
+ "CronJob"
+ ]
+ },
+ {
+ "apiGroups": [
+ "rbac.authorization.k8s.io"
+ ],
+ "apiVersions": [
+ "v1"
+ ],
+ "resources": [
+ "RoleBinding",
+ "ClusterRoleBinding",
+ "Role",
+ "ClusterRole"
+ ]
+ }
+ ],
+ "ruleDependencies": [],
+ "configInputs": null,
+ "controlConfigInputs": null,
+ "description": "determines which service accounts can be used to access other resources in the cluster",
+ "remediation": "",
+ "ruleQuery": "armo_builtins",
+ "relevantCloudProviders": null
+ },
+ {
+ "guid": "",
+ "name": "access-container-service-account-v1",
+ "attributes": {
+ "armoBuiltin": true,
+ "m$K8sThreatMatrix": "Credential Access::Access container service account, Lateral Movement::Container service account",
+ "useFromKubescapeVersion": "v1.0.133"
+ },
+ "creationTime": "",
+ "rule": "package armo_builtins\n\n\n# Returns the rbac permission of each service account\ndeny[msga] {\n service_accounts := [service_account | service_account= input[_]; service_account.kind == \"ServiceAccount\"]\n service_account := service_accounts[_]\n service_account_name := service_account.metadata.name\n\n not saTokenNotAutoMount(service_account)\n\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n\trolebinding := rolebindings[_]\n rolesubject := rolebinding.subjects[_]\n rolesubject.name == service_account_name\n rolesubject.namespace == service_account.metadata.namespace\n\n roles := [role | role = input[_]; role.kind == \"Role\"]\n role := roles[_]\n role.metadata.name == rolebinding.roleRef.name\n\n savector = {\"name\": service_account.metadata.name,\n\t\t\t\t\"namespace\": service_account.metadata.namespace,\n\t\t\t\t\"kind\": service_account.kind,\n\t\t\t\t\"relatedObjects\": [role, rolebinding]}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"service account: %v has the following permissions in the cluster: %v\", [service_account_name, rolebinding.roleRef.name]),\n\t\t\"packagename\": \"armo_builtins\",\n \"failedPaths\": [],\n \"fixPaths\":[],\n\t\t\"alertScore\": 7,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": savector\n\t\t}\n\t}\n}\n\n# Returns the rbac permission of each service account\ndeny[msga] {\n service_accounts := [service_account | service_account= input[_]; service_account.kind == \"ServiceAccount\"]\n service_account := service_accounts[_]\n service_account_name := service_account.metadata.name\n\n not saTokenNotAutoMount(service_account)\n\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n\trolebinding := rolebindings[_]\n rolesubject := rolebinding.subjects[_]\n rolesubject.name == service_account_name\n rolesubject.namespace == service_account.metadata.namespace\n\n roles := [role | role = input[_]; role.kind == \"ClusterRole\"]\n role := roles[_]\n role.metadata.name == rolebinding.roleRef.name\n\n savector = {\"name\": service_account.metadata.name,\n\t\t\t\t\"namespace\": service_account.metadata.namespace,\n\t\t\t\t\"kind\": service_account.kind,\n\t\t\t\t\"relatedObjects\": [role, rolebinding]}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"service account: %v has the following permissions in the cluster: %v\", [service_account_name, rolebinding.roleRef.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n \"failedPaths\": [],\n \"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": savector\n\t\t}\n\t}\n}\n\n# Returns the rbac permission of each service account\ndeny[msga] {\n service_accounts := [service_account | service_account= input[_]; service_account.kind == \"ServiceAccount\"]\n service_account := service_accounts[_]\n service_account_name := service_account.metadata.name\n\n not saTokenNotAutoMount(service_account)\n\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"ClusterRoleBinding\"]\n\trolebinding := rolebindings[_]\n rolesubject := rolebinding.subjects[_]\n rolesubject.name == service_account_name\n rolesubject.namespace == service_account.metadata.namespace\n\n roles := [role | role = input[_]; role.kind == \"ClusterRole\"]\n role := roles[_]\n role.metadata.name == rolebinding.roleRef.name\n\n savector = {\"name\": service_account.metadata.name,\n\t\t\t\t\"namespace\": service_account.metadata.namespace,\n\t\t\t\t\"kind\": service_account.kind,\n\t\t\t\t\"relatedObjects\": [role, rolebinding]}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"service account: %v has the following permissions in the cluster: %v\", [service_account_name, rolebinding.roleRef.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n \"failedPaths\": [],\n \"fixPaths\":[],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": savector\n\t\t}\n\t}\n}\n\n# ===============================================================\n\nsaTokenNotAutoMount(service_account) {\n service_account.automountServiceAccountToken == false\n}\n\n",
+ "resourceEnumerator": "package armo_builtins\n\n\n# Returns the rbac permission of each service account\ndeny[msga] {\n service_accounts := [service_account | service_account= input[_]; service_account.kind == \"ServiceAccount\"]\n service_account := service_accounts[_]\n service_account_name := service_account.metadata.name\n\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n\trolebinding := rolebindings[_]\n rolesubject := rolebinding.subjects[_]\n rolesubject.name == service_account_name\n rolesubject.namespace == service_account.metadata.namespace\n\n roles := [role | role = input[_]; role.kind == \"Role\"]\n role := roles[_]\n role.metadata.name == rolebinding.roleRef.name\n\n savector = {\"name\": service_account.metadata.name,\n\t\t\t\t\"namespace\": service_account.metadata.namespace,\n\t\t\t\t\"kind\": service_account.kind,\n\t\t\t\t\"relatedObjects\": [role, rolebinding]}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"service account: %v has the following permissions in the cluster: %v\", [service_account_name, rolebinding.roleRef.name]),\n\t\t\"packagename\": \"armo_builtins\",\n \"failedPaths\": [\"\"],\n\t\t\"alertScore\": 7,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": savector\n\t\t}\n\t}\n}\n\n# Returns the rbac permission of each service account\ndeny[msga] {\n service_accounts := [service_account | service_account= input[_]; service_account.kind == \"ServiceAccount\"]\n service_account := service_accounts[_]\n service_account_name := service_account.metadata.name\n\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n\trolebinding := rolebindings[_]\n rolesubject := rolebinding.subjects[_]\n rolesubject.name == service_account_name\n rolesubject.namespace == service_account.metadata.namespace\n\n roles := [role | role = input[_]; role.kind == \"ClusterRole\"]\n role := roles[_]\n role.metadata.name == rolebinding.roleRef.name\n\n savector = {\"name\": service_account.metadata.name,\n\t\t\t\t\"namespace\": service_account.metadata.namespace,\n\t\t\t\t\"kind\": service_account.kind,\n\t\t\t\t\"relatedObjects\": [role, rolebinding]}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"service account: %v has the following permissions in the cluster: %v\", [service_account_name, rolebinding.roleRef.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n \"failedPaths\": [\"\"],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": savector\n\t\t}\n\t}\n}\n\n# Returns the rbac permission of each service account\ndeny[msga] {\n service_accounts := [service_account | service_account= input[_]; service_account.kind == \"ServiceAccount\"]\n service_account := service_accounts[_]\n service_account_name := service_account.metadata.name\n\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"ClusterRoleBinding\"]\n\trolebinding := rolebindings[_]\n rolesubject := rolebinding.subjects[_]\n rolesubject.name == service_account_name\n rolesubject.namespace == service_account.metadata.namespace\n\n roles := [role | role = input[_]; role.kind == \"ClusterRole\"]\n role := roles[_]\n role.metadata.name == rolebinding.roleRef.name\n\n savector = {\"name\": service_account.metadata.name,\n\t\t\t\t\"namespace\": service_account.metadata.namespace,\n\t\t\t\t\"kind\": service_account.kind,\n\t\t\t\t\"relatedObjects\": [role, rolebinding]}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"service account: %v has the following permissions in the cluster: %v\", [service_account_name, rolebinding.roleRef.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n \"failedPaths\": [\"\"],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": savector\n\t\t}\n\t}\n}",
+ "ruleLanguage": "Rego",
+ "match": [
+ {
+ "apiGroups": [
+ ""
+ ],
+ "apiVersions": [
+ "v1"
+ ],
+ "resources": [
+ "Pod",
+ "ServiceAccount"
+ ]
+ },
+ {
+ "apiGroups": [
+ "apps"
+ ],
+ "apiVersions": [
+ "v1"
+ ],
+ "resources": [
+ "Deployment",
+ "ReplicaSet",
+ "DaemonSet",
+ "StatefulSet"
+ ]
+ },
+ {
+ "apiGroups": [
+ "batch"
+ ],
+ "apiVersions": [
+ "*"
+ ],
+ "resources": [
+ "Job",
+ "CronJob"
+ ]
+ },
+ {
+ "apiGroups": [
+ "rbac.authorization.k8s.io"
+ ],
+ "apiVersions": [
+ "v1"
+ ],
+ "resources": [
+ "RoleBinding",
+ "ClusterRoleBinding",
+ "Role",
+ "ClusterRole"
+ ]
+ }
+ ],
+ "ruleDependencies": [],
+ "configInputs": null,
+ "controlConfigInputs": null,
+ "description": "determines which service accounts can be used to access other resources in the cluster",
+ "remediation": "",
+ "ruleQuery": "armo_builtins",
+ "relevantCloudProviders": null
+ }
+ ],
+ "rulesIDs": [
+ "",
+ ""
+ ],
+ "baseScore": 6
+ },
+ {
+ "guid": "",
+ "name": "Access Kubernetes dashboard",
+ "attributes": {
+ "armoBuiltin": true,
+ "controlTypeTags": [
+ "compliance"
+ ],
+ "microsoftMitreColumns": [
+ "Discovery",
+ "Lateral Movement"
+ ],
+ "rbacQuery": "Access k8s Dashboard"
+ },
+ "id": "C-0014",
+ "controlID": "C-0014",
+ "creationTime": "",
+ "description": "Attackers who gain access to the dashboard service account or have its RBAC permissions can use its network access to retrieve information about resources in the cluster or change them. This control checks if a subject that is not dashboard service account is bound to dashboard role/clusterrole, or - if anyone that is not the dashboard pod is associated with dashboard service account.",
+ "remediation": "Make sure that the “Kubernetes Dashboard” service account is only bound to the Kubernetes dashboard following the least privilege principle.",
+ "rules": [
+ {
+ "guid": "",
+ "name": "rule-access-dashboard",
+ "attributes": {
+ "armoBuiltin": true,
+ "m$K8sThreatMatrix": "Lateral Movement::Access Kubernetes dashboard, Discovery::Access Kubernetes dashboard",
+ "useUntilKubescapeVersion": "v1.0.133"
+ },
+ "creationTime": "",
+ "rule": "package armo_builtins\n\n# input: roleBinding\n# apiversion: v1\n# fails if a subject that is not dashboard service account is bound to dashboard role\n\ndeny[msga] {\n\troleBinding := input[_]\n roleBinding.kind == \"RoleBinding\"\n roleBinding.roleRef.name == \"kubernetes-dashboard\"\n subject := roleBinding.subjects[_]\n subject.name != \"kubernetes-dashboard\"\n subject.kind != \"ServiceAccount\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"the following subjects: %s are bound to dashboard role/clusterrole\", [subject.name]),\n\t\t\"alertScore\": 9,\n\t\t\"failedPaths\": [\"\"],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [roleBinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n }\n}\n\n# input: clusterRoleBinding\n# apiversion: v1\n# fails if a subject that is not dashboard service account is bound to dashboard role\n\ndeny[msga] {\n\troleBinding := input[_]\n roleBinding.kind == \"ClusterRoleBinding\"\n roleBinding.roleRef.name == \"kubernetes-dashboard\"\n subject := roleBinding.subjects[_]\n subject.name != \"kubernetes-dashboard\"\n subject.kind != \"ServiceAccount\"\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"the following subjects: %s are bound to dashboard role/clusterrole\", [subject.name]),\n\t\t\"alertScore\": 9,\n\t\t\"failedPaths\": [\"\"],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [roleBinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n\t}\n}\n\n# input: \n# apiversion: \n# fails if pod that is not dashboard is associated to dashboard service account\n\ndeny[msga] {\n pod := input[_]\n pod.spec.serviceaccountname == \"kubernetes-dashboard\"\n not startswith(pod.metadata.name, \"kubernetes-dashboard\")\n\tpath := \"spec.serviceaccountname\"\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"the following pods: %s are associated with dashboard service account\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# input: \n# apiversion: \n# fails if workload that is not dashboard is associated to dashboard service account\n\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n wl.spec.template.spec.serviceaccountname == \"kubernetes-dashboard\"\n not startswith(wl.metadata.name, \"kubernetes-dashboard\")\n\tpath := \"spec.template.spec.serviceaccountname\"\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is associated with dashboard service account\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# input: \n# apiversion: \n# fails if CronJob that is not dashboard is associated to dashboard service account\n\ndeny[msga] {\n wl := input[_]\n\twl.kind == \"CronJob\"\n wl.spec.jobTemplate.spec.template.spec.serviceaccountname == \"kubernetes-dashboard\"\n not startswith(wl.metadata.name, \"kubernetes-dashboard\")\n\tpath := \"spec.jobTemplate.spec.template.spec.serviceaccountname\"\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"the following cronjob: %s is associated with dashboard service account\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}",
+ "resourceEnumerator": "",
+ "ruleLanguage": "Rego",
+ "match": [
+ {
+ "apiGroups": [
+ "*"
+ ],
+ "apiVersions": [
+ "*"
+ ],
+ "resources": [
+ "RoleBinding",
+ "ClusterRoleBinding"
+ ]
+ }
+ ],
+ "ruleDependencies": [],
+ "configInputs": null,
+ "controlConfigInputs": null,
+ "description": "fails if subject that is not dashboard service account is bound to dashboard role/clusterrole, or- if anyone that is not dashboard pod is associated with its service account.",
+ "remediation": "",
+ "ruleQuery": "armo_builtins",
+ "relevantCloudProviders": null
+ },
+ {
+ "guid": "",
+ "name": "rule-access-dashboard-subject-v1",
+ "attributes": {
+ "armoBuiltin": true,
+ "m$K8sThreatMatrix": "Lateral Movement::Access Kubernetes dashboard, Discovery::Access Kubernetes dashboard",
+ "resourcesAggregator": "subject-role-rolebinding",
+ "useFromKubescapeVersion": "v1.0.133"
+ },
+ "creationTime": "",
+ "rule": "package armo_builtins\n\n# input: regoResponseVectorObject\n# fails if a subject that is not dashboard service account is bound to dashboard role\n\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(subjectVector.relatedObjects[i].kind, \"Role\")\n\tendswith(subjectVector.relatedObjects[j].kind, \"Binding\")\n\n\trole.metadata.name == \"kubernetes-dashboard\"\n\tsubjectVector.name != \"kubernetes-dashboard\"\n\n\tsubject := rolebinding.subjects[k]\n path := [sprintf(\"relatedObjects[%v].subjects[%v]\", [format_int(j, 10), format_int(k, 10)])]\n\tfinalpath := array.concat(path, [sprintf(\"relatedObjects[%v].roleRef.name\", [format_int(j, 10)])])\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %v-%v is bound to dashboard role/clusterrole\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 9,\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector\n\t\t}\n\t}\n}",
+ "resourceEnumerator": "",
+ "ruleLanguage": "Rego",
+ "match": [
+ {
+ "apiGroups": [
+ "*"
+ ],
+ "apiVersions": [
+ "*"
+ ],
+ "resources": [
+ "Role",
+ "ClusterRole",
+ "ClusterRoleBinding",
+ "RoleBinding"
+ ]
+ }
+ ],
+ "ruleDependencies": [],
+ "configInputs": null,
+ "controlConfigInputs": null,
+ "description": "fails if subject that is not dashboard service account is bound to dashboard role/clusterrole, or- if anyone that is not dashboard pod is associated with its service account.",
+ "remediation": "",
+ "ruleQuery": "",
+ "relevantCloudProviders": null
+ },
+ {
+ "guid": "",
+ "name": "rule-access-dashboard-wl-v1",
+ "attributes": {
+ "armoBuiltin": true,
+ "m$K8sThreatMatrix": "Lateral Movement::Access Kubernetes dashboard, Discovery::Access Kubernetes dashboard",
+ "useFromKubescapeVersion": "v1.0.133"
+ },
+ "creationTime": "",
+ "rule": "package armo_builtins\n\n# input: \n# apiversion: \n# fails if pod that is not dashboard is associated to dashboard service account\n\ndeny[msga] {\n pod := input[_]\n pod.spec.serviceAccountName == \"kubernetes-dashboard\"\n not startswith(pod.metadata.name, \"kubernetes-dashboard\")\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"the following pods: %s are associated with dashboard service account\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [\"spec.serviceaccountname\"],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# input: \n# apiversion: \n# fails if workload that is not dashboard is associated to dashboard service account\n\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n wl.spec.template.spec.serviceAccountName == \"kubernetes-dashboard\"\n not startswith(wl.metadata.name, \"kubernetes-dashboard\")\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is associated with dashboard service account\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [\"spec.template.spec.serviceaccountname\"],\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# input: \n# apiversion: \n# fails if CronJob that is not dashboard is associated to dashboard service account\n\ndeny[msga] {\n wl := input[_]\n\twl.kind == \"CronJob\"\n wl.spec.jobTemplate.spec.template.spec.serviceAccountName == \"kubernetes-dashboard\"\n not startswith(wl.metadata.name, \"kubernetes-dashboard\")\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"the following cronjob: %s is associated with dashboard service account\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [\"spec.jobTemplate.spec.template.spec.serviceaccountname\"],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}",
+ "resourceEnumerator": "",
+ "ruleLanguage": "Rego",
+ "match": [
+ {
+ "apiGroups": [
+ ""
+ ],
+ "apiVersions": [
+ "v1"
+ ],
+ "resources": [
+ "Pod"
+ ]
+ },
+ {
+ "apiGroups": [
+ "apps"
+ ],
+ "apiVersions": [
+ "v1"
+ ],
+ "resources": [
+ "Deployment",
+ "ReplicaSet",
+ "DaemonSet",
+ "StatefulSet"
+ ]
+ },
+ {
+ "apiGroups": [
+ "batch"
+ ],
+ "apiVersions": [
+ "*"
+ ],
+ "resources": [
+ "Job",
+ "CronJob"
+ ]
+ }
+ ],
+ "ruleDependencies": [],
+ "configInputs": null,
+ "controlConfigInputs": null,
+ "description": "fails if subject that is not dashboard service account is bound to dashboard role/clusterrole, or- if anyone that is not dashboard pod is associated with its service account.",
+ "remediation": "",
+ "ruleQuery": "armo_builtins",
+ "relevantCloudProviders": null
+ }
+ ],
+ "rulesIDs": [
+ "",
+ "",
+ ""
+ ],
+ "baseScore": 2
+ },
+ {
+ "guid": "",
+ "name": "Applications credentials in configuration files",
+ "attributes": {
+ "armoBuiltin": true,
+ "attackTracks": [
+ {
+ "attackTrack": "kubeapi",
+ "categories": [
+ "Credential access"
+ ]
+ },
+ {
+ "attackTrack": "container",
+ "categories": [
+ "Credential access"
+ ]
+ }
+ ],
+ "controlTypeTags": [
+ "security",
+ "compliance",
+ "security-impact"
+ ],
+ "microsoftMitreColumns": [
+ "Credential access",
+ "Lateral Movement"
+ ]
+ },
+ "id": "C-0012",
+ "controlID": "C-0012",
+ "creationTime": "",
+ "description": "Attackers who have access to configuration files can steal the stored secrets and use them. This control checks if ConfigMaps or pod specifications have sensitive information in their configuration.",
+ "remediation": "Use Kubernetes secrets or Key Management Systems to store credentials.",
+ "rules": [
+ {
+ "guid": "",
+ "name": "rule-credentials-in-env-var",
+ "attributes": {
+ "armoBuiltin": true,
+ "m$K8sThreatMatrix": "Credential access::Applications credentials in configuration files, Lateral Movement::Applications credentials in configuration files"
+ },
+ "creationTime": "",
+ "rule": "\tpackage armo_builtins\n\t# import data.cautils as cautils\n\t# import data.kubernetes.api.client as client\n\timport data\n\n\tdeny[msga] {\n\t\tpod := input[_]\n\t\tpod.kind == \"Pod\"\n\t\t# see default-config-inputs.json for list values\n\t\tsensitive_key_names := data.postureControlInputs.sensitiveKeyNames\n\t\tkey_name := sensitive_key_names[_]\n\t\tcontainer := pod.spec.containers[i]\n\t\tenv := container.env[j]\n\n\t\tcontains(lower(env.name), key_name)\n\t\tenv.value != \"\"\n\t\t# check that value wasn't allowed by user\n\t\tnot is_allowed_value(env.value) \n\n\t\tis_not_reference(env)\n\n\t\tpath := sprintf(\"spec.containers[%v].env[%v].name\", [format_int(i, 10), format_int(j, 10)])\n\n\t\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"Pod: %v has sensitive information in environment variables\", [pod.metadata.name]),\n\t\t\t\"alertScore\": 9,\n\t\t\t\"fixPaths\": [],\n\t\t\t\"failedPaths\": [path],\n\t\t\t\"packagename\": \"armo_builtins\",\n\t\t\t\"alertObject\": {\n\t\t\t\t\"k8sApiObjects\": [pod]\n\t\t\t}\n\t\t}\n\t}\n\n\tdeny[msga] {\n\t\twl := input[_]\n\t\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\t\tspec_template_spec_patterns[wl.kind]\n\n\t\t# see default-config-inputs.json for list values\n\t\tsensitive_key_names := data.postureControlInputs.sensitiveKeyNames\n\t\tkey_name := sensitive_key_names[_]\n\t\tcontainer := wl.spec.template.spec.containers[i]\n\t\tenv := container.env[j]\n\n\t\tcontains(lower(env.name), key_name)\n\t\tenv.value != \"\"\n\t\t# check that value wasn't allowed by user\n\t\tnot is_allowed_value(env.value) \n\n\t\tis_not_reference(env)\n\n\t\tpath := sprintf(\"spec.template.spec.containers[%v].env[%v].name\", [format_int(i, 10), format_int(j, 10)])\t\n\n\t\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"%v: %v has sensitive information in environment variables\", [wl.kind, wl.metadata.name]),\n\t\t\t\"alertScore\": 9,\n\t\t\t\"fixPaths\": [],\n\t\t\t\"failedPaths\": [path],\n\t\t\t\"packagename\": \"armo_builtins\",\n\t\t\t\"alertObject\": {\n\t\t\t\t\"k8sApiObjects\": [wl]\n\t\t\t}\n\t\t}\n\t}\n\n\tdeny[msga] {\n\t\twl := input[_]\n\t\twl.kind == \"CronJob\"\n\t\t# see default-config-inputs.json for list values\n\t\tsensitive_key_names := data.postureControlInputs.sensitiveKeyNames\n\t\tkey_name := sensitive_key_names[_]\n\t\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\t\tenv := container.env[j]\n\n\t\tcontains(lower(env.name), key_name)\n\n\t\tenv.value != \"\"\n\t\t# check that value wasn't allowed by user\n\t\tnot is_allowed_value(env.value) \n\t\t\n\t\tis_not_reference(env)\n\t\t\n\t\tpath := sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].env[%v].name\", [format_int(i, 10), format_int(j, 10)])\n\n\t\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"Cronjob: %v has sensitive information in environment variables\", [wl.metadata.name]),\n\t\t\t\"alertScore\": 9,\n\t\t\t\"fixPaths\": [],\n\t\t\t\"failedPaths\": [path],\n\t\t\t\"packagename\": \"armo_builtins\",\n\t\t\t\"alertObject\": {\n\t\t\t\t\"k8sApiObjects\": [wl]\n\t\t\t}\n\t\t}\n\t}\n\n\n\nis_not_reference(env)\n{\n\tnot env.valueFrom.secretKeyRef\n\tnot env.valueFrom.configMapKeyRef\n}\n\nis_allowed_value(value) {\n allow_val := data.postureControlInputs.sensitiveValuesAllowed[_]\n value == allow_val\n}",
+ "resourceEnumerator": "",
+ "ruleLanguage": "Rego",
+ "match": [
+ {
+ "apiGroups": [
+ ""
+ ],
+ "apiVersions": [
+ "v1"
+ ],
+ "resources": [
+ "Pod"
+ ]
+ },
+ {
+ "apiGroups": [
+ "apps"
+ ],
+ "apiVersions": [
+ "v1"
+ ],
+ "resources": [
+ "Deployment",
+ "ReplicaSet",
+ "DaemonSet",
+ "StatefulSet"
+ ]
+ },
+ {
+ "apiGroups": [
+ "batch"
+ ],
+ "apiVersions": [
+ "*"
+ ],
+ "resources": [
+ "Job",
+ "CronJob"
+ ]
+ }
+ ],
+ "ruleDependencies": [],
+ "configInputs": [
+ "settings.postureControlInputs.sensitiveKeyNames",
+ "settings.postureControlInputs.sensitiveValuesAllowed"
+ ],
+ "controlConfigInputs": [
+ {
+ "path": "settings.postureControlInputs.sensitiveKeyNames",
+ "name": "Keys",
+ "description": "Secrets are stored as a key/value pair. The names of the keys/values may change from one company to the other. Here you can find some examples of popular key phrases that Kubescape is searching for"
+ },
+ {
+ "path": "settings.postureControlInputs.sensitiveValuesAllowed",
+ "name": "AllowedValues",
+ "description": "Allowed values"
+ }
+ ],
+ "description": "fails if Pods have sensitive information in configuration",
+ "remediation": "",
+ "ruleQuery": "armo_builtins",
+ "relevantCloudProviders": null
+ },
+ {
+ "guid": "",
+ "name": "rule-credentials-configmap",
+ "attributes": {
+ "armoBuiltin": true,
+ "m$K8sThreatMatrix": "Credential access::Applications credentials in configuration files, Lateral Movement::Applications credentials in configuration files"
+ },
+ "creationTime": "",
+ "rule": "package armo_builtins\n# import data.cautils as cautils\n# import data.kubernetes.api.client as client\nimport data\n\n# fails if config map has keys with suspicious name\ndeny[msga] {\n\tconfigmap := input[_]\n configmap.kind == \"ConfigMap\"\n # see default-config-inputs.json for list values\n sensitive_key_names := data.postureControlInputs.sensitiveKeyNames\n key_name := sensitive_key_names[_]\n map_secret := configmap.data[map_key]\n map_secret != \"\"\n \n contains(lower(map_key), lower(key_name))\n # check that value wasn't allowed by user\n not is_allowed_value(map_secret)\n \n path := sprintf(\"data[%v]\", [map_key])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"this configmap has sensitive information: %v\", [configmap.metadata.name]),\n\t\t\"alertScore\": 9,\n \"failedPaths\": [path],\n \"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [configmap]\n\t\t}\n }\n}\n\n# fails if config map has values with suspicious content - not base 64\ndeny[msga] {\n # see default-config-inputs.json for list values\n sensitive_values := data.postureControlInputs.sensitiveValues\n value := sensitive_values[_]\n\n\tconfigmap := input[_]\n configmap.kind == \"ConfigMap\"\n map_secret := configmap.data[map_key]\n map_secret != \"\"\n\n regex.match(value , map_secret)\n # check that value wasn't allowed by user\n not is_allowed_value(map_secret)\n\n path := sprintf(\"data[%v]\", [map_key])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"this configmap has sensitive information: %v\", [configmap.metadata.name]),\n\t\t\"alertScore\": 9,\n \"failedPaths\": [path],\n \"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [configmap]\n\t\t}\n }\n}\n\n# fails if config map has values with suspicious content - base 64\ndeny[msga] {\n # see default-config-inputs.json for list values\n sensitive_values := data.postureControlInputs.sensitiveValues\n value := sensitive_values[_]\n\n\tconfigmap := input[_]\n configmap.kind == \"ConfigMap\"\n map_secret := configmap.data[map_key]\n map_secret != \"\"\n\n decoded_secret := base64.decode(map_secret)\n \n # check that value wasn't allowed by user\n not is_allowed_value(map_secret)\n\n regex.match(value , decoded_secret)\n\n path := sprintf(\"data[%v]\", [map_key])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"this configmap has sensitive information: %v\", [configmap.metadata.name]),\n\t\t\"alertScore\": 9,\n \"failedPaths\": [path],\n \"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [configmap]\n\t\t}\n }\n}\n\n\nis_allowed_value(value) {\n allow_val := data.postureControlInputs.sensitiveValuesAllowed[_]\n value == allow_val\n}",
+ "resourceEnumerator": "",
+ "ruleLanguage": "Rego",
+ "match": [
+ {
+ "apiGroups": [
+ "*"
+ ],
+ "apiVersions": [
+ "*"
+ ],
+ "resources": [
+ "ConfigMap"
+ ]
+ }
+ ],
+ "ruleDependencies": [],
+ "configInputs": [
+ "settings.postureControlInputs.sensitiveValues",
+ "settings.postureControlInputs.sensitiveKeyNames",
+ "settings.postureControlInputs.sensitiveValuesAllowed"
+ ],
+ "controlConfigInputs": [
+ {
+ "path": "settings.postureControlInputs.sensitiveValues",
+ "name": "Values",
+ "description": "Secrets are stored as a key/value pair. The names of the keys/values may change from one company to the other. Below you can find some examples of popular value phrases that Kubescape is searching for"
+ },
+ {
+ "path": "settings.postureControlInputs.sensitiveKeyNames",
+ "name": "Keys",
+ "description": "Secrets are stored as a key/value pair. The names of the keys/values may change from one company to the other. Here you can find some examples of popular key phrases that Kubescape is searching for"
+ },
+ {
+ "path": "settings.postureControlInputs.sensitiveValuesAllowed",
+ "name": "AllowedValues",
+ "description": "Allowed values"
+ }
+ ],
+ "description": "fails if ConfigMaps have sensitive information in configuration",
+ "remediation": "",
+ "ruleQuery": "armo_builtins",
+ "relevantCloudProviders": null
+ }
+ ],
+ "rulesIDs": [
+ "",
+ ""
+ ],
+ "baseScore": 8
+ },
+ {
+ "guid": "",
+ "name": "Cluster-admin binding",
+ "attributes": {
+ "armoBuiltin": true,
+ "attackTracks": [
+ {
+ "attackTrack": "kubeapi",
+ "categories": [
+ "Impact - data destruction",
+ "Impact - service injection"
+ ]
+ }
+ ],
+ "controlTypeTags": [
+ "security",
+ "compliance"
+ ],
+ "microsoftMitreColumns": [
+ "Privilege escalation"
+ ],
+ "rbacQuery": "Show cluster_admin"
+ },
+ "id": "C-0035",
+ "controlID": "C-0035",
+ "creationTime": "",
+ "description": "Attackers who have cluster admin permissions (can perform any action on any resource), can take advantage of their privileges for malicious activities. This control determines which subjects have cluster admin permissions.",
+ "remediation": "You should apply least privilege principle. Make sure cluster admin permissions are granted only when it is absolutely necessary. Don't use subjects with such high permissions for daily operations.",
+ "rules": [
+ {
+ "guid": "",
+ "name": "rule-list-all-cluster-admins",
+ "attributes": {
+ "armoBuiltin": true,
+ "m$K8sThreatMatrix": "Privilege Escalation::Cluster-admin binding",
+ "useUntilKubescapeVersion": "v1.0.133"
+ },
+ "creationTime": "",
+ "rule": "package armo_builtins\nimport data.cautils as cautils\n\n# input: roles\n# apiversion: v1\n# does: returns roles+ related subjects in rolebinding\n\ndeny[msga] {\n\troles := [role | role= input[_]; role.kind == \"Role\"]\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n role:= roles[_]\n rolebinding := rolebindings[_]\n\n rule:= role.rules[i]\n\tcanCreate(rule, i)\n\tcanCreateResources(rule, i)\n\n\trolebinding.roleRef.kind == \"Role\"\n\trolebinding.roleRef.name == role.metadata.name\n subject := rolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"The following %v: %v have high privileges, such as cluster-admin\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 9,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role,rolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n }\n}\n\n# input: ClusterRole\n# apiversion: v1\n# does: returns clusterroles+ related subjects in rolebinding\n\ndeny[msga] {\n roles := [role | role= input[_]; role.kind == \"ClusterRole\"]\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n role:= roles[_]\n rolebinding := rolebindings[_]\n\n rule:= role.rules[i]\n\tcanCreate(rule, i)\n\tcanCreateResources(rule, i)\n\n\trolebinding.roleRef.kind == \"ClusterRole\"\n\trolebinding.roleRef.name == role.metadata.name\n \n subject := rolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"The following %v: %v have high privileges, such as cluster-admin\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 9,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role,rolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n }\n}\n\n# input: ClusterRole\n# apiversion: v1\n# does:\treturns clusterroles+ related subjects in clusterrolebinding\n\ndeny[msga] {\n roles := [role | role= input[_]; role.kind == \"ClusterRole\"]\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"ClusterRoleBinding\"]\n role:= roles[_]\n rolebinding := rolebindings[_]\n\n rule:= role.rules[i]\n\tcanCreate(rule, i)\n canCreateResources(rule, i)\n\n\trolebinding.roleRef.kind == \"ClusterRole\"\n\trolebinding.roleRef.name == role.metadata.name\n\t\n subject := rolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"The following %v: %v have high privileges, such as cluster-admin\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 9,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role,rolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n\t}\n}\n\n\ncanCreate(rule, i) {\n\tverb := rule.verbs[j]\n\tverb == \"*\"\n}\n\ncanCreateResources(rule, i){\n\tis_api_group(rule)\n\tresource := rule.resources[j]\n\tresource == \"*\"\n}\n\nis_api_group(rule) {\n\tapiGroup := rule.apiGroups[_]\n\tapiGroup == \"\"\n}\n\nis_api_group(rule) {\n\tapiGroup := rule.apiGroups[_]\n\tapiGroup == \"*\"\n}\n",
+ "resourceEnumerator": "",
+ "ruleLanguage": "Rego",
+ "match": [
+ {
+ "apiGroups": [
+ "*"
+ ],
+ "apiVersions": [
+ "*"
+ ],
+ "resources": [
+ "Role",
+ "ClusterRole",
+ "ClusterRoleBinding",
+ "RoleBinding"
+ ]
+ }
+ ],
+ "ruleDependencies": [
+ {
+ "packageName": "cautils"
+ }
+ ],
+ "configInputs": null,
+ "controlConfigInputs": null,
+ "description": "determines which users have cluster admin permissions",
+ "remediation": "",
+ "ruleQuery": "armo_builtins",
+ "relevantCloudProviders": null
+ },
+ {
+ "guid": "",
+ "name": "rule-list-all-cluster-admins-v1",
+ "attributes": {
+ "armoBuiltin": true,
+ "m$K8sThreatMatrix": "Privilege Escalation::Cluster-admin binding",
+ "resourcesAggregator": "subject-role-rolebinding",
+ "useFromKubescapeVersion": "v1.0.133"
+ },
+ "creationTime": "",
+ "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# returns subjects with cluster admin permissions\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\nis_same_subjects(subjectVector, subject)\n\trule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) \u003e 0\n\n\tapi_groups := [\"*\", \"\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) \u003e 0\n\n\tresources := [\"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) \u003e 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s have high privileges, such as cluster-admin\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": finalpath,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n",
+ "resourceEnumerator": "",
+ "ruleLanguage": "Rego",
+ "match": [
+ {
+ "apiGroups": [
+ "*"
+ ],
+ "apiVersions": [
+ "*"
+ ],
+ "resources": [
+ "Role",
+ "ClusterRole",
+ "ClusterRoleBinding",
+ "RoleBinding"
+ ]
+ }
+ ],
+ "ruleDependencies": [],
+ "configInputs": null,
+ "controlConfigInputs": null,
+ "description": "determines which users have cluster admin permissions",
+ "remediation": "",
+ "ruleQuery": "armo_builtins",
+ "relevantCloudProviders": null
+ }
+ ],
+ "rulesIDs": [
+ "",
+ ""
+ ],
+ "baseScore": 6
+ },
+ {
+ "guid": "",
+ "name": "Cluster internal networking",
+ "attributes": {
+ "armoBuiltin": true,
+ "attackTracks": [
+ {
+ "attackTrack": "container",
+ "categories": [
+ "Discovery",
+ "Lateral movement"
+ ]
+ }
+ ],
+ "controlTypeTags": [
+ "security",
+ "compliance"
+ ],
+ "microsoftMitreColumns": [
+ "Lateral movement"
+ ]
+ },
+ "id": "C-0054",
+ "controlID": "C-0054",
+ "creationTime": "",
+ "description": "If no network policy is defined, attackers who gain access to a container may use it to move laterally in the cluster. This control lists namespaces in which no network policy is defined.",
+ "remediation": "Define Kubernetes network policies or use alternative products to protect cluster network.",
+ "rules": [
+ {
+ "guid": "",
+ "name": "internal-networking",
+ "attributes": {
+ "armoBuiltin": true,
+ "m$K8sThreatMatrix": "Lateral Movement::Container internal networking, Discovery::Network mapping"
+ },
+ "creationTime": "",
+ "rule": "package armo_builtins\n\n# input: network policies\n# apiversion: networking.k8s.io/v1\n# fails if no network policies are defined in a certain namespace\n\ndeny[msga] {\n\tnamespaces := [namespace | namespace = input[_]; namespace.kind == \"Namespace\"]\n\tnamespace := namespaces[_]\n\tpolicy_names := [policy.metadata.namespace | policy = input[_]; policy.kind == \"NetworkPolicy\"]\n\tnot list_contains(policy_names, namespace.metadata.name)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"no policy is defined for namespace %v\", [namespace.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\nlist_contains(list, element) {\n some i\n list[i] == element\n}",
+ "resourceEnumerator": "package armo_builtins\n\n# input: network policies + namespaces\n# apiversion: networking.k8s.io/v1\n# returns all namespaces\n\ndeny[msga] {\n\tnamespaces := [namespace | namespace = input[_]; namespace.kind == \"Namespace\"]\n\tnamespace := namespaces[_]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"no policy is defined for namespace %v\", [namespace.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [\"\"],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}",
+ "ruleLanguage": "Rego",
+ "match": [
+ {
+ "apiGroups": [
+ ""
+ ],
+ "apiVersions": [
+ "v1"
+ ],
+ "resources": [
+ "Namespace"
+ ]
+ },
+ {
+ "apiGroups": [
+ "networking.k8s.io"
+ ],
+ "apiVersions": [
+ "v1"
+ ],
+ "resources": [
+ "NetworkPolicy"
+ ]
+ }
+ ],
+ "ruleDependencies": [],
+ "configInputs": null,
+ "controlConfigInputs": null,
+ "description": "lists namespaces in which no network policies are defined",
+ "remediation": "",
+ "ruleQuery": "armo_builtins",
+ "relevantCloudProviders": null
+ }
+ ],
+ "rulesIDs": [
+ ""
+ ],
+ "baseScore": 4
+ },
+ {
+ "guid": "",
+ "name": "Exec into container",
+ "attributes": {
+ "armoBuiltin": true,
+ "controlTypeTags": [
+ "compliance",
+ "security-impact"
+ ],
+ "microsoftMitreColumns": [
+ "Execution"
+ ],
+ "rbacQuery": "Show who can access into pods"
+ },
+ "id": "C-0002",
+ "controlID": "C-0002",
+ "creationTime": "",
+ "description": "Attackers with relevant permissions can run malicious commands in the context of legitimate containers in the cluster using “kubectl exec” command. This control determines which subjects have permissions to use this command.",
+ "remediation": "It is recommended to prohibit “kubectl exec” command in production environments. It is also recommended not to use subjects with this permission for daily cluster operations.",
+ "rules": [
+ {
+ "guid": "",
+ "name": "exec-into-container",
+ "attributes": {
+ "armoBuiltin": true,
+ "m$K8sThreatMatrix": "Privilege Escalation::Exec into container",
+ "useUntilKubescapeVersion": "v1.0.133"
+ },
+ "creationTime": "",
+ "rule": "\npackage armo_builtins\nimport data.cautils as cautils\n\n# input: clusterrolebindings + rolebindings\n# apiversion: rbac.authorization.k8s.io/v1 \n# returns subjects that can exec into container\n\ndeny[msga] {\n\t roles := [role | role= input[_]; role.kind == \"Role\"]\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n role:= roles[_]\n rolebinding := rolebindings[_]\n\n rule:= role.rules[_]\n\tcan_exec_to_pod_resource(rule)\n\tcan_exec_to_pod_verb(rule)\n\n\trolebinding.roleRef.kind == \"Role\"\n\trolebinding.roleRef.name == role.metadata.name\n\t\n \tsubject := rolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"the following %v: %v, can exec into containers\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 9,\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role, rolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n\t}\n}\n\n\n# input: clusterrolebindings + rolebindings\n# apiversion: rbac.authorization.k8s.io/v1 \n# returns subjects that can exec into container\n\ndeny[msga] {\n roles := [role | role= input[_]; role.kind == \"ClusterRole\"]\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n role:= roles[_]\n rolebinding := rolebindings[_]\n\n rule:= role.rules[_]\n\tcan_exec_to_pod_resource(rule)\n\tcan_exec_to_pod_verb(rule)\n\n\trolebinding.roleRef.kind == \"ClusterRole\"\n\trolebinding.roleRef.name == role.metadata.name\n\t\n subject := rolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"the following %v: %v, can exec into containers\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 9,\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role, rolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n\t}\n}\n\n# input: clusterrolebindings + rolebindings\n# apiversion: rbac.authorization.k8s.io/v1 \n# returns subjects that can exec into container\n\ndeny[msga] {\n roles := [role | role= input[_]; role.kind == \"ClusterRole\"]\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"ClusterRoleBinding\"]\n role:= roles[_]\n rolebinding := rolebindings[_]\n\n rule:= role.rules[_]\n\tcan_exec_to_pod_resource(rule)\n\tcan_exec_to_pod_verb(rule)\n\n\trolebinding.roleRef.kind == \"ClusterRole\"\n\trolebinding.roleRef.name == role.metadata.name\n\t\n subject := rolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"the following %v: %v, can exec into containers\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 9,\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n \t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role, rolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n\t}\n}\n\ncan_exec_to_pod_verb(rule) {\n\tcautils.list_contains(rule.verbs, \"create\")\n}\ncan_exec_to_pod_verb(rule) {\n\tcautils.list_contains(rule.verbs, \"*\")\n}\n\ncan_exec_to_pod_resource(rule) {\n\tcautils.list_contains(rule.resources, \"pods/exec\")\n\t\n}\ncan_exec_to_pod_resource(rule) {\n\tcautils.list_contains(rule.resources, \"pods/*\")\n}\ncan_exec_to_pod_resource(rule) {\n\tis_api_group(rule)\n\tcautils.list_contains(rule.resources, \"*\")\n}\n\nis_api_group(rule) {\n\tapiGroup := rule.apiGroups[_]\n\tapiGroup == \"\"\n}\n\nis_api_group(rule) {\n\tapiGroup := rule.apiGroups[_]\n\tapiGroup == \"*\"\n}",
+ "resourceEnumerator": "",
+ "ruleLanguage": "Rego",
+ "match": [
+ {
+ "apiGroups": [
+ "rbac.authorization.k8s.io"
+ ],
+ "apiVersions": [
+ "v1"
+ ],
+ "resources": [
+ "RoleBinding",
+ "ClusterRoleBinding",
+ "Role",
+ "ClusterRole"
+ ]
+ }
+ ],
+ "ruleDependencies": [
+ {
+ "packageName": "cautils"
+ }
+ ],
+ "configInputs": null,
+ "controlConfigInputs": null,
+ "description": "determines which users have permissions to exec into pods",
+ "remediation": "",
+ "ruleQuery": "armo_builtins",
+ "relevantCloudProviders": null
+ },
+ {
+ "guid": "",
+ "name": "exec-into-container-v1",
+ "attributes": {
+ "armoBuiltin": true,
+ "m$K8sThreatMatrix": "Privilege Escalation::Exec into container",
+ "resourcesAggregator": "subject-role-rolebinding",
+ "useFromKubescapeVersion": "v1.0.133"
+ },
+ "creationTime": "",
+ "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# input: regoResponseVectorObject\n# returns subjects that can exec into container\n\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\n\trule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"create\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) \u003e 0\n\n\tapi_groups := [\"\", \"*\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) \u003e 0\n\n\tresources := [\"pods/exec\", \"pods/*\", \"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) \u003e 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can exec into containers\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n",
+ "resourceEnumerator": "",
+ "ruleLanguage": "Rego",
+ "match": [
+ {
+ "apiGroups": [
+ "rbac.authorization.k8s.io"
+ ],
+ "apiVersions": [
+ "v1"
+ ],
+ "resources": [
+ "RoleBinding",
+ "ClusterRoleBinding",
+ "Role",
+ "ClusterRole"
+ ]
+ }
+ ],
+ "ruleDependencies": [],
+ "configInputs": null,
+ "controlConfigInputs": null,
+ "description": "determines which users have permissions to exec into pods",
+ "remediation": "",
+ "ruleQuery": "armo_builtins",
+ "relevantCloudProviders": null
+ }
+ ],
+ "rulesIDs": [
+ "",
+ ""
+ ],
+ "baseScore": 5
+ },
+ {
+ "guid": "",
+ "name": "Exposed sensitive interfaces",
+ "attributes": {
+ "armoBuiltin": true,
+ "controlTypeTags": [
+ "compliance"
+ ],
+ "microsoftMitreColumns": [
+ "Initial access"
+ ]
+ },
+ "id": "C-0021",
+ "controlID": "C-0021",
+ "creationTime": "",
+ "description": "Exposing a sensitive interface to the internet poses a security risk. It might enable attackers to run malicious code or deploy containers in the cluster. This control checks if known components (e.g. Kubeflow, Argo Workflows, etc.) are deployed and exposed services externally.",
+ "remediation": "Consider blocking external interfaces or protect them with appropriate security tools.",
+ "rules": [
+ {
+ "guid": "",
+ "name": "exposed-sensitive-interfaces",
+ "attributes": {
+ "armoBuiltin": true,
+ "microsoftK8sThreatMatrix": "Initial access::Exposed sensitive interfaces",
+ "useUntilKubescapeVersion": "v1.0.133"
+ },
+ "creationTime": "",
+ "rule": "package armo_builtins\nimport data.kubernetes.api.client as client\nimport data\n\n# loadbalancer\ndeny[msga] {\n\tservice := \tinput[_]\n\tservice.kind == \"Service\"\n\tservice.spec.type == \"LoadBalancer\"\n\n\twl := input[_]\n\tworkload_types = {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Job\", \"Pod\", \"CronJob\"}\n\tworkload_types[wl.kind]\n\tresult := wl_connectedto_service(wl, service)\n \n # see default-config-inputs.json for list values\n services_names := data.postureControlInputs.servicesNames\n\tservices_names[service.metadata.name]\n # externalIP := service.spec.externalIPs[_]\n\texternalIP := service.status.loadBalancer.ingress[0].ip\n\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"service: %v is exposed\", [service.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": result,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl, service]\n\t\t}\n\t}\n}\n\n\n# nodePort\n# get a pod connected to that service, get nodeIP (hostIP?)\n# use ip + nodeport\ndeny[msga] {\n\tservice := \tinput[_]\n\tservice.kind == \"Service\"\n\tservice.spec.type == \"NodePort\"\n \n # see default-config-inputs.json for list values\n services_names := data.postureControlInputs.servicesNames\n\tservices_names[service.metadata.name]\n \n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\n\tresult := wl_connectedto_service(pod, service)\n\n\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"service: %v is exposed\", [service.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": result,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod, service]\n\t\t}\n\t}\n} \n\n# nodePort\n# get a workload connected to that service, get nodeIP (hostIP?)\n# use ip + nodeport\ndeny[msga] {\n\tservice := \tinput[_]\n\tservice.kind == \"Service\"\n\tservice.spec.type == \"NodePort\"\n \n # see default-config-inputs.json for list values\n services_names := data.postureControlInputs.servicesNames\n\tservices_names[service.metadata.name]\n \n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Job\", \"CronJob\"}\n\tspec_template_spec_patterns[wl.kind]\n\n\tresult := wl_connectedto_service(wl, service)\n\n\tpods_resource := client.query_all(\"pods\")\n\tpod := pods_resource.body.items[_]\n\tmy_pods := [pod | startswith(pod.metadata.name, wl.metadata.name)]\n\n\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"service: %v is exposed\", [service.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": result,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl, service]\n\t\t}\n\t}\n}\n\n# ====================================================================================\n\nwl_connectedto_service(wl, service) = paths{\n\tcount({x | service.spec.selector[x] == wl.metadata.labels[x]}) == count(service.spec.selector)\n\tpaths = [\"spec.selector.matchLabels\", \"service.spec.selector\"]\n}\n\nwl_connectedto_service(wl, service) = paths {\n\twl.spec.selector.matchLabels == service.spec.selector\n\tpaths = [\"spec.selector.matchLabels\", \"service.spec.selector\"]\n}\n",
+ "resourceEnumerator": "",
+ "ruleLanguage": "Rego",
+ "match": [
+ {
+ "apiGroups": [
+ ""
+ ],
+ "apiVersions": [
+ "v1"
+ ],
+ "resources": [
+ "Pod",
+ "Service"
+ ]
+ },
+ {
+ "apiGroups": [
+ "apps"
+ ],
+ "apiVersions": [
+ "v1"
+ ],
+ "resources": [
+ "Deployment",
+ "ReplicaSet",
+ "DaemonSet",
+ "StatefulSet"
+ ]
+ },
+ {
+ "apiGroups": [
+ "batch"
+ ],
+ "apiVersions": [
+ "*"
+ ],
+ "resources": [
+ "Job",
+ "CronJob"
+ ]
+ }
+ ],
+ "ruleDependencies": [
+ {
+ "packageName": "kubernetes.api.client"
+ }
+ ],
+ "configInputs": [
+ "settings.postureControlInputs.servicesNames"
+ ],
+ "controlConfigInputs": [
+ {
+ "path": "settings.postureControlInputs.servicesNames",
+ "name": "Service names",
+ "description": "Kubescape will look for the following services that exposes sensitive interfaces of common K8s projects/applications"
+ }
+ ],
+ "description": "fails if known interfaces have exposed services",
+ "remediation": "",
+ "ruleQuery": "armo_builtins",
+ "relevantCloudProviders": null
+ },
+ {
+ "guid": "",
+ "name": "exposed-sensitive-interfaces-v1",
+ "attributes": {
+ "armoBuiltin": true,
+ "microsoftK8sThreatMatrix": "Initial access::Exposed sensitive interfaces",
+ "useFromKubescapeVersion": "v1.0.133"
+ },
+ "creationTime": "",
+ "rule": "package armo_builtins\nimport data.kubernetes.api.client as client\nimport data\n\n# loadbalancer\ndeny[msga] {\n\twl := input[_]\n\tworkload_types = {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Job\", \"Pod\", \"CronJob\"}\n\tworkload_types[wl.kind]\n\n # see default-config-inputs.json for list values\n wl_names := data.postureControlInputs.sensitiveInterfaces\n\twl_name := wl_names[_]\n\tcontains(wl.metadata.name, wl_name)\n\n\tservice := \tinput[_]\n\tservice.kind == \"Service\"\n\tservice.spec.type == \"LoadBalancer\"\n\n\tresult := wl_connectedto_service(wl, service)\n \n # externalIP := service.spec.externalIPs[_]\n\texternalIP := service.status.loadBalancer.ingress[0].ip\n\n\twlvector = {\"name\": wl.metadata.name,\n\t\t\t\t\"namespace\": wl.metadata.namespace,\n\t\t\t\t\"kind\": wl.kind,\n\t\t\t\t\"relatedObjects\": [service]}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"service: %v is exposed\", [service.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": result,\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": wlvector\n\t\t}\n\t}\n}\n\n\n# nodePort\n# get a pod connected to that service, get nodeIP (hostIP?)\n# use ip + nodeport\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"Pod\"\n \n # see default-config-inputs.json for list values\n wl_names := data.postureControlInputs.sensitiveInterfaces\n\twl_name := wl_names[_]\n\tcontains(wl.metadata.name, wl_name)\n \n\tservice := \tinput[_]\n\tservice.kind == \"Service\"\n\tservice.spec.type == \"NodePort\"\n\n\tresult := wl_connectedto_service(wl, service)\n\n\twlvector = {\"name\": wl.metadata.name,\n\t\t\t\t\"namespace\": wl.metadata.namespace,\n\t\t\t\t\"kind\": wl.kind,\n\t\t\t\t\"relatedObjects\": [service]}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"service: %v is exposed\", [service.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": result,\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": wlvector\n\t\t}\n\t}\n} \n\n# nodePort\n# get a workload connected to that service, get nodeIP (hostIP?)\n# use ip + nodeport\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Job\", \"CronJob\"}\n\tspec_template_spec_patterns[wl.kind]\n \n # see default-config-inputs.json for list values\n wl_names := data.postureControlInputs.sensitiveInterfaces\n\twl_name := wl_names[_]\n\tcontains(wl.metadata.name, wl_name)\n \n\tservice := \tinput[_]\n\tservice.kind == \"Service\"\n\tservice.spec.type == \"NodePort\"\n\n\tresult := wl_connectedto_service(wl, service)\n\n\twlvector = {\"name\": wl.metadata.name,\n\t\t\t\t\"namespace\": wl.metadata.namespace,\n\t\t\t\t\"kind\": wl.kind,\n\t\t\t\t\"relatedObjects\": [service]}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"service: %v is exposed\", [service.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": result,\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": wlvector\n\t\t}\n\t}\n}\n\n# ====================================================================================\n\nwl_connectedto_service(wl, service) = paths{\n\tcount({x | service.spec.selector[x] == wl.metadata.labels[x]}) == count(service.spec.selector)\n\tpaths = [\"spec.selector.matchLabels\", \"service.spec.selector\"]\n}\n\nwl_connectedto_service(wl, service) = paths {\n\twl.spec.selector.matchLabels == service.spec.selector\n\tpaths = [\"spec.selector.matchLabels\", \"service.spec.selector\"]\n}",
+ "resourceEnumerator": "package armo_builtins\nimport data.kubernetes.api.client as client\nimport data\n\ndeny[msga] {\n\twl := input[_]\n\tworkload_types = {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Job\", \"Pod\", \"CronJob\"}\n\tworkload_types[wl.kind]\n\n\t# see default-config-inputs.json for list values\n\twl_names := data.postureControlInputs.sensitiveInterfaces\n\twl_name := wl_names[_]\n\tcontains(wl.metadata.name, wl_name)\n\n\tsrvc := get_wl_connectedto_service(wl)\n\n\twlvector = {\"name\": wl.metadata.name,\n\t\t\t\t\"namespace\": wl.metadata.namespace,\n\t\t\t\t\"kind\": wl.kind,\n\t\t\t\t\"relatedObjects\": srvc}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"wl: %v is in the cluster\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [\"\"],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": wlvector\n\t\t}\n\t}\n}\n\nget_wl_connectedto_service(wl) = s {\n\tservice := \tinput[_]\n\tservice.kind == \"Service\"\n\twl_connectedto_service(wl, service)\n\ts = [service]\n}\n\nget_wl_connectedto_service(wl) = s {\n\tservices := [service | service = input[_]; service.kind == \"Service\"]\n\tcount({i | services[i]; wl_connectedto_service(wl, services[i])}) == 0\n\ts = []\n}\n\nwl_connectedto_service(wl, service){\n\tcount({x | service.spec.selector[x] == wl.metadata.labels[x]}) == count(service.spec.selector)\n}",
+ "ruleLanguage": "Rego",
+ "match": [
+ {
+ "apiGroups": [
+ ""
+ ],
+ "apiVersions": [
+ "v1"
+ ],
+ "resources": [
+ "Pod",
+ "Service"
+ ]
+ },
+ {
+ "apiGroups": [
+ "apps"
+ ],
+ "apiVersions": [
+ "v1"
+ ],
+ "resources": [
+ "Deployment",
+ "ReplicaSet",
+ "DaemonSet",
+ "StatefulSet"
+ ]
+ },
+ {
+ "apiGroups": [
+ "batch"
+ ],
+ "apiVersions": [
+ "*"
+ ],
+ "resources": [
+ "Job",
+ "CronJob"
+ ]
+ }
+ ],
+ "ruleDependencies": [
+ {
+ "packageName": "kubernetes.api.client"
+ }
+ ],
+ "configInputs": [
+ "settings.postureControlInputs.sensitiveInterfaces"
+ ],
+ "controlConfigInputs": [
+ {
+ "path": "settings.postureControlInputs.sensitiveInterfaces",
+ "name": "Sensitive interfaces",
+ "description": "The following interfaces were seen exploited. Kubescape checks it they are externally exposed."
+ }
+ ],
+ "description": "fails if known interfaces have exposed services",
+ "remediation": "",
+ "ruleQuery": "armo_builtins",
+ "relevantCloudProviders": null
+ }
+ ],
+ "rulesIDs": [
+ "",
+ ""
+ ],
+ "baseScore": 6
+ },
+ {
+ "guid": "",
+ "name": "HostPath mount",
+ "attributes": {
+ "armoBuiltin": true,
+ "attackTracks": [
+ {
+ "attackTrack": "container",
+ "categories": [
+ "Impact - Data access in container"
+ ]
+ }
+ ],
+ "controlTypeTags": [
+ "security",
+ "compliance"
+ ],
+ "microsoftMitreColumns": [
+ "Privilege escalation"
+ ]
+ },
+ "id": "C-0048",
+ "controlID": "C-0048",
+ "creationTime": "",
+ "description": "Mounting host directory to the container can be used by attackers to get access to the underlying host. This control identifies all the PODs using hostPath mount.",
+ "remediation": "Remove hostPath mounts unless they are absolutely necessary and use exception mechanism to remove notifications.",
+ "rules": [
+ {
+ "guid": "",
+ "name": "alert-any-hostpath",
+ "attributes": {
+ "armoBuiltin": true,
+ "m$K8sThreatMatrix": "Privilege Escalation::hostPath mount"
+ },
+ "creationTime": "",
+ "rule": "package armo_builtins\n\n\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n volumes := pod.spec.volumes\n volume := volumes[i]\n\tbeggining_of_path := \"spec.\"\n\tresult := is_dangerous_host_path(volume, beggining_of_path, i)\n podname := pod.metadata.name\n\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"pod: %v has: %v as hostPath volume\", [podname, volume.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [result],\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n#handles majority of workload resources\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n volumes := wl.spec.template.spec.volumes\n volume := volumes[i]\n\tbeggining_of_path := \"spec.template.spec.\"\n result := is_dangerous_host_path(volume, beggining_of_path, i)\n\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has: %v as hostPath volume\", [wl.kind, wl.metadata.name, volume.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [result],\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n#handles CronJobs\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n volumes := wl.spec.jobTemplate.spec.template.spec.volumes\n volume := volumes[i]\n\tbeggining_of_path := \"spec.jobTemplate.spec.template.spec.\"\n result := is_dangerous_host_path(volume, beggining_of_path, i)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has: %v as hostPath volume\", [wl.kind, wl.metadata.name, volume.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [result],\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n\nis_dangerous_host_path(volume, beggining_of_path, i) = path {\n startswith(volume.hostPath.path, \"/etc\")\n\tpath = sprintf(\"%vvolumes[%v].hostPath.path\", [beggining_of_path, format_int(i, 10)])\n}\n\nis_dangerous_host_path(volume, beggining_of_path, i) = path {\n startswith(volume.hostPath.path, \"/var\")\n\tpath = sprintf(\"%vvolumes[%v].hostPath.path\", [beggining_of_path, format_int(i, 10)])\n}",
+ "resourceEnumerator": "",
+ "ruleLanguage": "Rego",
+ "match": [
+ {
+ "apiGroups": [
+ ""
+ ],
+ "apiVersions": [
+ "v1"
+ ],
+ "resources": [
+ "Pod"
+ ]
+ },
+ {
+ "apiGroups": [
+ "apps"
+ ],
+ "apiVersions": [
+ "v1"
+ ],
+ "resources": [
+ "Deployment",
+ "ReplicaSet",
+ "DaemonSet",
+ "StatefulSet"
+ ]
+ },
+ {
+ "apiGroups": [
+ "batch"
+ ],
+ "apiVersions": [
+ "*"
+ ],
+ "resources": [
+ "Job",
+ "CronJob"
+ ]
+ }
+ ],
+ "ruleDependencies": [],
+ "configInputs": null,
+ "controlConfigInputs": null,
+ "description": "determines if any workload contains a hostPath volume",
+ "remediation": "Try to refrain from using hostPath mounts",
+ "ruleQuery": "",
+ "relevantCloudProviders": null
+ }
+ ],
+ "rulesIDs": [
+ ""
+ ],
+ "baseScore": 7
+ },
+ {
+ "guid": "",
+ "name": "Instance Metadata API",
+ "attributes": {
+ "armoBuiltin": true,
+ "attackTracks": [
+ {
+ "attackTrack": "container",
+ "categories": [
+ "Credential access",
+ "Discovery",
+ "Impact - service access"
+ ]
+ }
+ ],
+ "controlTypeTags": [
+ "security",
+ "compliance"
+ ],
+ "microsoftMitreColumns": [
+ "Discovery"
+ ]
+ },
+ "id": "C-0052",
+ "controlID": "C-0052",
+ "creationTime": "",
+ "description": "Attackers who gain access to a container, may query the metadata API service for getting information about the underlying node. This control checks if there is access from the nodes to cloud providers instance metadata services.",
+ "remediation": "Disable metadata services for pods in cloud provider settings.",
+ "rules": [
+ {
+ "guid": "",
+ "name": "instance-metadata-api-access",
+ "attributes": {
+ "armoBuiltin": true,
+ "hostSensorRule": "true",
+ "m$K8sThreatMatrix": "Credential Access::Instance Metadata API"
+ },
+ "creationTime": "",
+ "rule": "package armo_builtins\n\n\ndeny[msg] {\n\tobj = input[_]\n\tis_cloud_provider_info(obj)\n\n\tobj.data.providerMetaDataAPIAccess == true\n\n\n\tmsg := {\n\t\t\"alertMessage\": sprintf(\"Node '%s' has access to Instance Metadata Services of cloud provider.\", [obj.metadata.name]),\n\t\t\"alert\": true,\n\t\t\"alertScore\": 1,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"externalObjects\": obj\n\t\t},\n\t\t\"packagename\": \"armo_builtins\"\n\t}\n\n}\n\n\n\nis_cloud_provider_info(obj) {\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\tobj.kind == \"cloudProviderInfo\"\n}",
+ "resourceEnumerator": "",
+ "ruleLanguage": "Rego",
+ "match": [],
+ "dynamicMatch": [
+ {
+ "apiGroups": [
+ "hostdata.kubescape.cloud"
+ ],
+ "apiVersions": [
+ "v1beta0"
+ ],
+ "resources": [
+ "cloudProviderInfo"
+ ]
+ }
+ ],
+ "ruleDependencies": [],
+ "configInputs": null,
+ "controlConfigInputs": null,
+ "description": "Checks if there is access from the nodes to cloud prividers instance metadata services",
+ "remediation": "",
+ "ruleQuery": "armo_builtins",
+ "relevantCloudProviders": null
+ }
+ ],
+ "rulesIDs": [
+ ""
+ ],
+ "baseScore": 7
+ },
+ {
+ "guid": "",
+ "name": "Kubernetes CronJob",
+ "attributes": {
+ "armoBuiltin": true,
+ "controlTypeTags": [
+ "compliance"
+ ],
+ "microsoftMitreColumns": [
+ "Persistence"
+ ]
+ },
+ "id": "C-0026",
+ "controlID": "C-0026",
+ "creationTime": "",
+ "description": "Attackers may use Kubernetes CronJob for scheduling execution of malicious code that would run as a POD in the cluster. This control lists all the CronJobs that exist in the cluster for the user to approve.",
+ "remediation": "Watch Kubernetes CronJobs and make sure they are legitimate.",
+ "rules": [
+ {
+ "guid": "",
+ "name": "rule-deny-cronjobs",
+ "attributes": {
+ "armoBuiltin": true,
+ "m$K8sThreatMatrix": "Persistence::Kubernetes Cronjob"
+ },
+ "creationTime": "",
+ "rule": "package armo_builtins\n\n# alert cronjobs\n\n#handles cronjob\ndeny[msga] {\n\n\twl := input[_]\n\twl.kind == \"CronJob\"\n msga := {\n\t\t\"alertMessage\": sprintf(\"the following cronjobs are defined: %v\", [wl.metadata.name]),\n\t\t\"alertScore\": 2,\n\t\t\"failedPaths\": [\"\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n",
+ "resourceEnumerator": "",
+ "ruleLanguage": "rego",
+ "match": [
+ {
+ "apiGroups": [
+ "*"
+ ],
+ "apiVersions": [
+ "*"
+ ],
+ "resources": [
+ "CronJob"
+ ]
+ }
+ ],
+ "ruleDependencies": [],
+ "configInputs": null,
+ "controlConfigInputs": null,
+ "description": "determines if it's cronjob",
+ "remediation": "",
+ "ruleQuery": "armo_builtins",
+ "relevantCloudProviders": null
+ }
+ ],
+ "rulesIDs": [
+ ""
+ ],
+ "baseScore": 1
+ },
+ {
+ "guid": "",
+ "name": "List Kubernetes secrets",
+ "attributes": {
+ "armoBuiltin": true,
+ "attackTracks": [
+ {
+ "attackTrack": "kubeapi",
+ "categories": [
+ "Credential access"
+ ]
+ }
+ ],
+ "controlTypeTags": [
+ "security-impact",
+ "compliance"
+ ],
+ "microsoftMitreColumns": [
+ "Credential access"
+ ],
+ "rbacQuery": "Show who can access secrets"
+ },
+ "id": "C-0015",
+ "controlID": "C-0015",
+ "creationTime": "",
+ "description": "Attackers who have permissions to access secrets can access sensitive information that might include credentials to various services. This control determines which user, group or service account can list/get secrets.",
+ "remediation": "Monitor and approve list of users, groups and service accounts that can access secrets. Use exception mechanism to prevent repetitive the notifications.",
+ "rules": [
+ {
+ "guid": "",
+ "name": "rule-can-list-get-secrets",
+ "attributes": {
+ "armoBuiltin": true,
+ "microsoftK8sThreatMatrix": "Discovery::Access the K8s API server",
+ "useUntilKubescapeVersion": "v1.0.133"
+ },
+ "creationTime": "",
+ "rule": "package armo_builtins\nimport data.cautils as cautils\n\n\n# fails if user can list/get secrets \n#RoleBinding to Role\ndeny[msga] {\n roles := [role | role= input[_]; role.kind == \"Role\"]\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n role:= roles[_]\n rolebinding := rolebindings[_]\n\n rule:= role.rules[_]\n canViewSecretsResource(rule)\n canViewSecretsVerb(rule)\n\n rolebinding.roleRef.kind == \"Role\"\n rolebinding.roleRef.name == role.metadata.name\n\n subject := rolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n msga := {\n\t \"alertMessage\": sprintf(\"The following %v: %v can read secrets\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n \"failedPaths\": [path],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role,rolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n }\n}\n\n\n# fails if user can list/get secrets \n#RoleBinding to ClusterRole\ndeny[msga] {\n roles := [role | role= input[_]; role.kind == \"ClusterRole\"]\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n role:= roles[_]\n rolebinding := rolebindings[_]\n\n rule:= role.rules[_]\n canViewSecretsResource(rule)\n canViewSecretsVerb(rule)\n\n rolebinding.roleRef.kind == \"ClusterRole\"\n rolebinding.roleRef.name == role.metadata.name\n\n\n subject := rolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n msga := {\n\t \"alertMessage\": sprintf(\"The following %v: %v can read secrets\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n \"failedPaths\": [path],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role,rolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n }\n}\n\n# fails if user can list/get secrets \n# ClusterRoleBinding to ClusterRole\ndeny[msga] {\n roles := [role | role= input[_]; role.kind == \"ClusterRole\"]\n clusterrolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"ClusterRoleBinding\"]\n role:= roles[_]\n clusterrolebinding := clusterrolebindings[_]\n\n rule:= role.rules[_]\n canViewSecretsResource(rule)\n canViewSecretsVerb(rule)\n\n clusterrolebinding.roleRef.kind == \"ClusterRole\"\n clusterrolebinding.roleRef.name == role.metadata.name\n\n subject := clusterrolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n msga := {\n\t \"alertMessage\": sprintf(\"The following %v: %v can read secrets\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n \"failedPaths\": [path],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role,clusterrolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n }\n}\n\n\n\n\ncanViewSecretsVerb(rule) {\n cautils.list_contains(rule.verbs,\"get\")\n}\n\ncanViewSecretsVerb(rule) {\n cautils.list_contains(rule.verbs,\"list\")\n}\n\ncanViewSecretsVerb(rule) {\n cautils.list_contains(rule.verbs,\"watch\")\n}\n\n\ncanViewSecretsVerb(rule) {\n cautils.list_contains(rule.verbs,\"*\")\n}\n\n\ncanViewSecretsResource(rule) {\n cautils.list_contains(rule.resources,\"secrets\")\n}\n\ncanViewSecretsResource(rule) {\n is_api_group(rule)\n cautils.list_contains(rule.resources,\"*\")\n}\n\nis_api_group(rule) {\n\tapiGroup := rule.apiGroups[_]\n\tapiGroup == \"*\"\n}\nis_api_group(rule) {\n\tapiGroup := rule.apiGroups[_]\n\tapiGroup == \"\"\n}",
+ "resourceEnumerator": "",
+ "ruleLanguage": "Rego",
+ "match": [
+ {
+ "apiGroups": [
+ "*"
+ ],
+ "apiVersions": [
+ "*"
+ ],
+ "resources": [
+ "Role",
+ "ClusterRole",
+ "ClusterRoleBinding",
+ "RoleBinding"
+ ]
+ }
+ ],
+ "ruleDependencies": [
+ {
+ "packageName": "cautils"
+ }
+ ],
+ "configInputs": null,
+ "controlConfigInputs": null,
+ "description": "determines which users can list/get secrets",
+ "remediation": "",
+ "ruleQuery": "armo_builtins",
+ "relevantCloudProviders": null
+ },
+ {
+ "guid": "",
+ "name": "rule-can-list-get-secrets-v1",
+ "attributes": {
+ "armoBuiltin": true,
+ "microsoftK8sThreatMatrix": "Discovery::Access the K8s API server",
+ "resourcesAggregator": "subject-role-rolebinding",
+ "useFromKubescapeVersion": "v1.0.133"
+ },
+ "creationTime": "",
+ "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# fails if user can list/get secrets \ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\nis_same_subjects(subjectVector, subject)\n\trule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"get\", \"list\", \"watch\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) \u003e 0\n\n\tapi_groups := [\"\", \"*\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) \u003e 0\n\n\tresources := [\"secrets\", \"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) \u003e 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can read secrets\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n",
+ "resourceEnumerator": "",
+ "ruleLanguage": "Rego",
+ "match": [
+ {
+ "apiGroups": [
+ "*"
+ ],
+ "apiVersions": [
+ "*"
+ ],
+ "resources": [
+ "Role",
+ "ClusterRole",
+ "ClusterRoleBinding",
+ "RoleBinding"
+ ]
+ }
+ ],
+ "ruleDependencies": [],
+ "configInputs": null,
+ "controlConfigInputs": null,
+ "description": "determines which users can list/get secrets",
+ "remediation": "",
+ "ruleQuery": "armo_builtins",
+ "relevantCloudProviders": null
+ }
+ ],
+ "rulesIDs": [
+ "",
+ ""
+ ],
+ "baseScore": 7
+ },
+ {
+ "guid": "",
+ "name": "Mount service principal",
+ "attributes": {
+ "armoBuiltin": true,
+ "controlTypeTags": [
+ "compliance"
+ ],
+ "microsoftMitreColumns": [
+ "Credential Access"
+ ]
+ },
+ "id": "C-0020",
+ "controlID": "C-0020",
+ "creationTime": "",
+ "description": "When a cluster is deployed in the cloud, in some cases attackers can leverage their access to a container in the cluster to gain cloud credentials. This control determines if any workload contains a volume with potential access to cloud credential.",
+ "remediation": "Refrain from using path mount to known cloud credentials folders or files .",
+ "rules": [
+ {
+ "guid": "",
+ "name": "alert-mount-potential-credentials-paths",
+ "attributes": {
+ "armoBuiltin": true
+ },
+ "creationTime": "",
+ "rule": "package armo_builtins\nimport future.keywords.if\n\n\ndeny[msga] {\n\tprovider := data.dataControlInputs.cloudProvider\n\tprovider != \"\"\n\tresources := input[_]\n\tvolumes_data := get_volumes(resources)\n volumes := volumes_data[\"volumes\"]\n volume := volumes[i]\n\tbeggining_of_path := volumes_data[\"beggining_of_path\"]\n result := is_unsafe_paths(volume, beggining_of_path, provider,i)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has: %v as volume with potential credentials access.\", [resources.kind, resources.metadata.name, volume.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [result],\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [resources]\n\t\t}\n\t}\t\n}\n\n\t\n# get_volume - get resource volumes paths for {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\nget_volumes(resources) := result {\n\tresources_kinds := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tresources_kinds[resources.kind]\n\tresult = {\"volumes\": resources.spec.template.spec.volumes, \"beggining_of_path\": \"spec.template.spec.\"}\n}\n\n# get_volume - get resource volumes paths for \"Pod\"\nget_volumes(resources) := result {\n\tresources.kind == \"Pod\"\n\tresult = {\"volumes\": resources.spec.volumes, \"beggining_of_path\": \"spec.\"}\n}\n\n# get_volume - get resource volumes paths for \"CronJob\"\nget_volumes(resources) := result {\n\tresources.kind == \"CronJob\"\n\tresult = {\"volumes\": resources.spec.jobTemplate.spec.template.spec.volumes, \"beggining_of_path\": \"spec.jobTemplate.spec.template.spec.\"}\n}\n\n\n# is_unsafe_paths - looking for cloud provider (eks/gke/aks) paths that have the potential of accessing credentials\nis_unsafe_paths(volume, beggining_of_path, provider, i) = result {\n\tunsafe := unsafe_paths(provider)\n\tunsafe[_] == fix_path(volume.hostPath.path)\n\tresult= sprintf(\"%vvolumes[%d].hostPath.path\", [beggining_of_path, i])\n}\n\n\n# fix_path - adding \"/\" at the end of the path if doesn't exist and if not a file path.\nfix_path(path) := result if {\n\n\t# filter file path\n not regex.match(`[\\\\w-]+\\\\.`, path)\n\n\t# filter path that doesn't end with \"/\"\n not endswith(path, \"/\")\n\n\t# adding \"/\" to the end of the path\n result = sprintf(\"%v/\", [path])\n} else := path\n\n\n\n# eks unsafe paths\nunsafe_paths(x) := [\"/.aws/\", \n\t\t\t\t\t\"/.aws/config/\", \n\t\t\t\t\t\"/.aws/credentials/\"] if {x==\"eks\"}\n\n# aks unsafe paths\nunsafe_paths(x) := [\"/etc/\",\n\t\t\t\t\t\"/etc/kubernetes/\",\n\t\t\t\t\t\"/etc/kubernetes/azure.json\", \n\t\t\t\t\t\"/.azure/\",\n\t\t\t\t\t\"/.azure/credentials/\", \n\t\t\t\t\t\"/etc/kubernetes/azure.json\"] if {x==\"aks\"}\n\n# gke unsafe paths\nunsafe_paths(x) := [\"/.config/gcloud/\", \n\t\t\t\t\t\"/.config/\", \n\t\t\t\t\t\"/gcloud/\", \n\t\t\t\t\t\"/.config/gcloud/application_default_credentials.json\",\n\t\t\t\t\t\"/gcloud/application_default_credentials.json\"] if {x==\"gke\"}\n\n",
+ "resourceEnumerator": "",
+ "ruleLanguage": "Rego",
+ "match": [
+ {
+ "apiGroups": [
+ ""
+ ],
+ "apiVersions": [
+ "v1"
+ ],
+ "resources": [
+ "Pod"
+ ]
+ },
+ {
+ "apiGroups": [
+ "apps"
+ ],
+ "apiVersions": [
+ "v1"
+ ],
+ "resources": [
+ "Deployment",
+ "ReplicaSet",
+ "DaemonSet",
+ "StatefulSet"
+ ]
+ },
+ {
+ "apiGroups": [
+ "batch"
+ ],
+ "apiVersions": [
+ "*"
+ ],
+ "resources": [
+ "Job",
+ "CronJob"
+ ]
+ }
+ ],
+ "ruleDependencies": [],
+ "configInputs": null,
+ "controlConfigInputs": null,
+ "description": "determines if any workload contains a hostPath volume",
+ "remediation": "Try to refrain from using hostPath mounts",
+ "ruleQuery": "",
+ "relevantCloudProviders": [
+ "EKS",
+ "GKE",
+ "AKS"
+ ]
+ }
+ ],
+ "rulesIDs": [
+ ""
+ ],
+ "baseScore": 4
+ },
+ {
+ "guid": "",
+ "name": "Privileged container",
+ "attributes": {
+ "armoBuiltin": true,
+ "attackTracks": [
+ {
+ "attackTrack": "container",
+ "categories": [
+ "Privilege escalation"
+ ]
+ }
+ ],
+ "controlTypeTags": [
+ "security"
+ ],
+ "microsoftMitreColumns": [
+ "Privilege escalation"
+ ]
+ },
+ "id": "C-0057",
+ "controlID": "C-0057",
+ "creationTime": "",
+ "description": "Potential attackers may gain access to privileged containers and inherit access to the host resources. Therefore, it is not recommended to deploy privileged containers unless it is absolutely necessary. This control identifies all the privileged Pods.",
+ "remediation": "Remove privileged capabilities by setting the securityContext.privileged to false. If you must deploy a Pod as privileged, add other restriction to it, such as network policy, Seccomp etc and still remove all unnecessary capabilities. Use the exception mechanism to remove unnecessary notifications.",
+ "rules": [
+ {
+ "guid": "",
+ "name": "rule-privilege-escalation",
+ "attributes": {
+ "armoBuiltin": true,
+ "m$K8sThreatMatrix": "Privilege Escalation::privileged container",
+ "mitre": "Privilege Escalation",
+ "mitreCode": "TA0004"
+ },
+ "creationTime": "",
+ "rule": "package armo_builtins\n# Deny mutating action unless user is in group owning the resource\n\n\n#privileged pods\ndeny[msga] {\n\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tbeggining_of_path := \"spec.\"\n\tpath := isPrivilegedContainer(container, i, beggining_of_path)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"the following pods are defined as privileged: %v\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": path,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n }\n}\n\n\n#handles majority of workload resources\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.template.spec.\"\n\tpath := isPrivilegedContainer(container, i, beggining_of_path)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is defined as privileged:\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": path,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n#handles cronjob\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.jobTemplate.spec.template.spec.\"\n\tpath := isPrivilegedContainer(container, i, beggining_of_path)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"the following cronjobs are defined as privileged: %v\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": path,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n\n# Only SYS_ADMIN capabilite\nisPrivilegedContainer(container, i, beggining_of_path) = path {\n\tnot container.securityContext.privileged == true\n\tpath = [sprintf(\"%vcontainers[%v].securityContext.capabilities.add[%v]\", [beggining_of_path, format_int(i, 10), format_int(k, 10)]) | capabilite = container.securityContext.capabilities.add[k]; capabilite == \"SYS_ADMIN\"]\n\tcount(path) \u003e 0\n}\n\n# Only securityContext.privileged == true\nisPrivilegedContainer(container, i, beggining_of_path) = path {\n\tcontainer.securityContext.privileged == true\n\tpath1 = [sprintf(\"%vcontainers[%v].securityContext.capabilities.add[%v]\", [beggining_of_path, format_int(i, 10), format_int(k, 10)]) | capabilite = container.securityContext.capabilities.add[k]; capabilite == \"SYS_ADMIN\"]\n\tcount(path1) \u003c 1\n\tpath = [sprintf(\"%vcontainers[%v].securityContext.privileged\", [beggining_of_path, format_int(i, 10)])]\n}\n\n# SYS_ADMIN capabilite \u0026\u0026 securityContext.privileged == true\nisPrivilegedContainer(container, i, beggining_of_path) = path {\n\tpath1 = [sprintf(\"%vcontainers[%v].securityContext.capabilities.add[%v]\", [beggining_of_path, format_int(i, 10), format_int(k, 10)]) | capabilite = container.securityContext.capabilities.add[k]; capabilite == \"SYS_ADMIN\"]\n\tcount(path1) \u003e 0\n\tcontainer.securityContext.privileged == true\n\tpath = array.concat(path1, [sprintf(\"%vcontainers[%v].securityContext.privileged\", [beggining_of_path, format_int(i, 10)])])\n}",
+ "resourceEnumerator": "",
+ "ruleLanguage": "Rego",
+ "match": [
+ {
+ "apiGroups": [
+ ""
+ ],
+ "apiVersions": [
+ "v1"
+ ],
+ "resources": [
+ "Pod"
+ ]
+ },
+ {
+ "apiGroups": [
+ "apps"
+ ],
+ "apiVersions": [
+ "v1"
+ ],
+ "resources": [
+ "Deployment",
+ "ReplicaSet",
+ "DaemonSet",
+ "StatefulSet"
+ ]
+ },
+ {
+ "apiGroups": [
+ "batch"
+ ],
+ "apiVersions": [
+ "*"
+ ],
+ "resources": [
+ "Job",
+ "CronJob"
+ ]
+ }
+ ],
+ "ruleDependencies": [],
+ "configInputs": null,
+ "controlConfigInputs": null,
+ "description": "determines if pods/deployments defined as privileged true",
+ "remediation": "avoid defining pods as privilleged",
+ "ruleQuery": "",
+ "relevantCloudProviders": null
+ }
+ ],
+ "rulesIDs": [
+ ""
+ ],
+ "baseScore": 8
+ },
+ {
+ "guid": "",
+ "name": "SSH server running inside container",
+ "attributes": {
+ "armoBuiltin": true,
+ "controlTypeTags": [
+ "compliance"
+ ],
+ "microsoftMitreColumns": [
+ "Execution"
+ ]
+ },
+ "id": "C-0042",
+ "controlID": "C-0042",
+ "creationTime": "",
+ "description": "An SSH server that is running inside a container may be used by attackers to get remote access to the container. This control checks if pods have an open SSH port (22/2222).",
+ "remediation": "Remove SSH from the container image or limit the access to the SSH server using network policies.",
+ "rules": [
+ {
+ "guid": "",
+ "name": "rule-can-ssh-to-pod",
+ "attributes": {
+ "armoBuiltin": true,
+ "microsoftK8sThreatMatrix": "Execution::SSH server running inside container",
+ "useUntilKubescapeVersion": "v1.0.133"
+ },
+ "creationTime": "",
+ "rule": "package armo_builtins\n\n# input: pod\n# apiversion: v1\n# does:\treturns the external facing services of that pod\n\ndeny[msga] {\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\tpodns := pod.metadata.namespace\n\tpodname := pod.metadata.name\n\tlabels := pod.metadata.labels\n\tfiltered_labels := json.remove(labels, [\"pod-template-hash\"])\n path := \"metadata.labels\"\n\tservice := \tinput[_]\n\tservice.kind == \"Service\"\n\tservice.metadata.namespace == podns\n\tservice.spec.selector == filtered_labels\n \n\thasSSHPorts(service)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"pod %v/%v exposed by SSH services: %v\", [podns, podname, service]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod,service]\n\t\t}\n }\n}\n\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tlabels := wl.spec.template.metadata.labels\n path := \"spec.template.metadata.labels\"\n\tservice := \tinput[_]\n\tservice.kind == \"Service\"\n\tservice.metadata.namespace == wl.metadata.namespace\n\tservice.spec.selector == labels\n\n\thasSSHPorts(service)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is exposed by SSH services: %v\", [wl.kind, wl.metadata.name, service]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl,service]\n\t\t}\n }\n}\n\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tlabels := wl.spec.jobTemplate.spec.template.metadata.labels\n path := \"spec.jobTemplate.spec.template.metadata.labels\"\n\tservice := \tinput[_]\n\tservice.kind == \"Service\"\n\tservice.metadata.namespace == wl.metadata.namespace\n\tservice.spec.selector == labels\n\n\thasSSHPorts(service)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is exposed by SSH services: %v\", [wl.kind, wl.metadata.name, service]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl,service]\n\t\t}\n }\n}\n\nhasSSHPorts(service) {\n\tport := service.spec.ports[_]\n\tport.port == 22\n}\n\n\nhasSSHPorts(service) {\n\tport := service.spec.ports[_]\n\tport.port == 2222\n}\n\nhasSSHPorts(service) {\n\tport := service.spec.ports[_]\n\tport.targetPort == 22\n}\n\n\nhasSSHPorts(service) {\n\tport := service.spec.ports[_]\n\tport.targetPort == 2222\n}\n",
+ "resourceEnumerator": "",
+ "ruleLanguage": "Rego",
+ "match": [
+ {
+ "apiGroups": [
+ ""
+ ],
+ "apiVersions": [
+ "v1"
+ ],
+ "resources": [
+ "Pod",
+ "Service"
+ ]
+ },
+ {
+ "apiGroups": [
+ "apps"
+ ],
+ "apiVersions": [
+ "v1"
+ ],
+ "resources": [
+ "Deployment",
+ "ReplicaSet",
+ "DaemonSet",
+ "StatefulSet"
+ ]
+ },
+ {
+ "apiGroups": [
+ "batch"
+ ],
+ "apiVersions": [
+ "*"
+ ],
+ "resources": [
+ "Job",
+ "CronJob"
+ ]
+ }
+ ],
+ "ruleDependencies": [],
+ "configInputs": null,
+ "controlConfigInputs": null,
+ "description": "denies pods with SSH ports opened(22/222)",
+ "remediation": "",
+ "ruleQuery": "armo_builtins",
+ "relevantCloudProviders": null
+ },
+ {
+ "guid": "",
+ "name": "rule-can-ssh-to-pod-v1",
+ "attributes": {
+ "armoBuiltin": true,
+ "microsoftK8sThreatMatrix": "Execution::SSH server running inside container",
+ "useFromKubescapeVersion": "v1.0.133"
+ },
+ "creationTime": "",
+ "rule": "package armo_builtins\n\n# input: pod\n# apiversion: v1\n# does:\treturns the external facing services of that pod\n\ndeny[msga] {\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\tpodns := pod.metadata.namespace\n\tpodname := pod.metadata.name\n\tlabels := pod.metadata.labels\n\tfiltered_labels := json.remove(labels, [\"pod-template-hash\"])\n path := \"metadata.labels\"\n\tservice := \tinput[_]\n\tservice.kind == \"Service\"\n\tservice.metadata.namespace == podns\n\tservice.spec.selector == filtered_labels\n \n\thasSSHPorts(service)\n\n\twlvector = {\"name\": pod.metadata.name,\n\t\t\t\t\"namespace\": pod.metadata.namespace,\n\t\t\t\t\"kind\": pod.kind,\n\t\t\t\t\"relatedObjects\": service}\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"pod %v/%v exposed by SSH services: %v\", [podns, podname, service]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": wlvector\n\t\t}\n }\n}\n\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tlabels := wl.spec.template.metadata.labels\n path := \"spec.template.metadata.labels\"\n\tservice := \tinput[_]\n\tservice.kind == \"Service\"\n\tservice.metadata.namespace == wl.metadata.namespace\n\tservice.spec.selector == labels\n\n\thasSSHPorts(service)\n\n\twlvector = {\"name\": wl.metadata.name,\n\t\t\t\t\"namespace\": wl.metadata.namespace,\n\t\t\t\t\"kind\": wl.kind,\n\t\t\t\t\"relatedObjects\": service}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is exposed by SSH services: %v\", [wl.kind, wl.metadata.name, service]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": wlvector\n\t\t}\n }\n}\n\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tlabels := wl.spec.jobTemplate.spec.template.metadata.labels\n path := \"spec.jobTemplate.spec.template.metadata.labels\"\n\tservice := \tinput[_]\n\tservice.kind == \"Service\"\n\tservice.metadata.namespace == wl.metadata.namespace\n\tservice.spec.selector == labels\n\n\thasSSHPorts(service)\n\n\twlvector = {\"name\": wl.metadata.name,\n\t\t\t\t\"namespace\": wl.metadata.namespace,\n\t\t\t\t\"kind\": wl.kind,\n\t\t\t\t\"relatedObjects\": service}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is exposed by SSH services: %v\", [wl.kind, wl.metadata.name, service]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": wlvector\n\t\t}\n }\n}\n\nhasSSHPorts(service) {\n\tport := service.spec.ports[_]\n\tport.port == 22\n}\n\n\nhasSSHPorts(service) {\n\tport := service.spec.ports[_]\n\tport.port == 2222\n}\n\nhasSSHPorts(service) {\n\tport := service.spec.ports[_]\n\tport.targetPort == 22\n}\n\n\nhasSSHPorts(service) {\n\tport := service.spec.ports[_]\n\tport.targetPort == 2222\n}\n",
+ "resourceEnumerator": "package armo_builtins\n\n# input: pod\n# apiversion: v1\n# does:\treturns the external facing services of that pod\n\ndeny[msga] {\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\tpodns := pod.metadata.namespace\n\tpodname := pod.metadata.name\n\tlabels := pod.metadata.labels\n\tfiltered_labels := json.remove(labels, [\"pod-template-hash\"])\n path := \"metadata.labels\"\n\tservice := \tinput[_]\n\tservice.kind == \"Service\"\n\tservice.metadata.namespace == podns\n\tservice.spec.selector == filtered_labels\n\n\n\twlvector = {\"name\": pod.metadata.name,\n\t\t\t\t\"namespace\": pod.metadata.namespace,\n\t\t\t\t\"kind\": pod.kind,\n\t\t\t\t\"relatedObjects\": service}\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"pod %v/%v exposed by SSH services: %v\", [podns, podname, service]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": wlvector\n\t\t}\n }\n}\n\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tlabels := wl.spec.template.metadata.labels\n path := \"spec.template.metadata.labels\"\n\tservice := \tinput[_]\n\tservice.kind == \"Service\"\n\tservice.metadata.namespace == wl.metadata.namespace\n\tservice.spec.selector == labels\n\n\n\twlvector = {\"name\": wl.metadata.name,\n\t\t\t\t\"namespace\": wl.metadata.namespace,\n\t\t\t\t\"kind\": wl.kind,\n\t\t\t\t\"relatedObjects\": service}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is exposed by SSH services: %v\", [wl.kind, wl.metadata.name, service]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": wlvector\n\t\t}\n }\n}\n\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tlabels := wl.spec.jobTemplate.spec.template.metadata.labels\n path := \"spec.jobTemplate.spec.template.metadata.labels\"\n\tservice := \tinput[_]\n\tservice.kind == \"Service\"\n\tservice.metadata.namespace == wl.metadata.namespace\n\tservice.spec.selector == labels\n\n\n\twlvector = {\"name\": wl.metadata.name,\n\t\t\t\t\"namespace\": wl.metadata.namespace,\n\t\t\t\t\"kind\": wl.kind,\n\t\t\t\t\"relatedObjects\": service}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is exposed by SSH services: %v\", [wl.kind, wl.metadata.name, service]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": wlvector\n\t\t}\n }\n}\n",
+ "ruleLanguage": "Rego",
+ "match": [
+ {
+ "apiGroups": [
+ ""
+ ],
+ "apiVersions": [
+ "v1"
+ ],
+ "resources": [
+ "Pod",
+ "Service"
+ ]
+ },
+ {
+ "apiGroups": [
+ "apps"
+ ],
+ "apiVersions": [
+ "v1"
+ ],
+ "resources": [
+ "Deployment",
+ "ReplicaSet",
+ "DaemonSet",
+ "StatefulSet"
+ ]
+ },
+ {
+ "apiGroups": [
+ "batch"
+ ],
+ "apiVersions": [
+ "*"
+ ],
+ "resources": [
+ "Job",
+ "CronJob"
+ ]
+ }
+ ],
+ "ruleDependencies": [],
+ "configInputs": null,
+ "controlConfigInputs": null,
+ "description": "denies pods with SSH ports opened(22/222)",
+ "remediation": "",
+ "ruleQuery": "armo_builtins",
+ "relevantCloudProviders": null
+ }
+ ],
+ "rulesIDs": [
+ "",
+ ""
+ ],
+ "baseScore": 3
+ },
+ {
+ "guid": "",
+ "name": "Writable hostPath mount",
+ "attributes": {
+ "armoBuiltin": true,
+ "attackTracks": [
+ {
+ "attackTrack": "container",
+ "categories": [
+ "Persistence",
+ "Impact - Data access in container"
+ ]
+ }
+ ],
+ "controlTypeTags": [
+ "security",
+ "compliance",
+ "devops",
+ "security-impact"
+ ],
+ "microsoftMitreColumns": [
+ "Persistence",
+ "Lateral Movement"
+ ]
+ },
+ "id": "C-0045",
+ "controlID": "C-0045",
+ "creationTime": "",
+ "description": "Mounting host directory to the container can be used by attackers to get access to the underlying host and gain persistence.",
+ "remediation": "Refrain from using the hostPath mount or use the exception mechanism to remove unnecessary notifications.",
+ "rules": [
+ {
+ "guid": "",
+ "name": "alert-rw-hostpath",
+ "attributes": {
+ "armoBuiltin": true,
+ "m$K8sThreatMatrix": "Persistence::Writable hostPath mount, Lateral Movement::Writable volume mounts on the host"
+ },
+ "creationTime": "",
+ "rule": "package armo_builtins\n\n# Fails if container has a hostPath volume which is not readOnly\n\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n volumes := pod.spec.volumes\n volume := volumes[_]\n volume.hostPath\n\tcontainer := pod.spec.containers[i]\n\tvolume_mount := container.volumeMounts[k]\n\tvolume_mount.name == volume.name\n\tbeggining_of_path := \"spec.\"\n\tresult := is_rw_mount(volume_mount, beggining_of_path, i, k)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n podname := pod.metadata.name\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"pod: %v has: %v as hostPath volume\", [podname, volume.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n#handles majority of workload resources\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n volumes := wl.spec.template.spec.volumes\n volume := volumes[_]\n volume.hostPath\n\tcontainer := wl.spec.template.spec.containers[i]\n\tvolume_mount := container.volumeMounts[k]\n\tvolume_mount.name == volume.name\n\tbeggining_of_path := \"spec.template.spec.\"\n\tresult := is_rw_mount(volume_mount, beggining_of_path, i, k)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has: %v as hostPath volume\", [wl.kind, wl.metadata.name, volume.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t\n\t}\n}\n\n#handles CronJobs\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n volumes := wl.spec.jobTemplate.spec.template.spec.volumes\n volume := volumes[_]\n volume.hostPath\n\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tvolume_mount := container.volumeMounts[k]\n\tvolume_mount.name == volume.name\n\tbeggining_of_path := \"spec.jobTemplate.spec.template.spec.\"\n\tresult := is_rw_mount(volume_mount, beggining_of_path, i, k) \n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n\n\tmsga := {\n\t\"alertMessage\": sprintf(\"%v: %v has: %v as hostPath volume\", [wl.kind, wl.metadata.name, volume.name]),\n\t\"packagename\": \"armo_builtins\",\n\t\"alertScore\": 7,\n\t\"fixPaths\": fixed_path,\n\t\"failedPaths\": failed_path,\n\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n\nis_rw_mount(mount, beggining_of_path, i, k) = [failed_path, fix_path] {\n\tnot mount.readOnly == true\n \tnot mount.readOnly == false\n\tfailed_path = \"\"\n fix_path = {\"path\": sprintf(\"%vcontainers[%v].volumeMounts[%v].readOnly\", [beggining_of_path, format_int(i, 10), format_int(k, 10)]), \"value\":\"true\"}\n}\n\nis_rw_mount(mount, beggining_of_path, i, k) = [failed_path, fix_path] {\n \tmount.readOnly == false\n \tfailed_path = sprintf(\"%vcontainers[%v].volumeMounts[%v].readOnly\", [beggining_of_path, format_int(i, 10), format_int(k, 10)])\n fix_path = \"\"\n} ",
+ "resourceEnumerator": "",
+ "ruleLanguage": "Rego",
+ "match": [
+ {
+ "apiGroups": [
+ ""
+ ],
+ "apiVersions": [
+ "v1"
+ ],
+ "resources": [
+ "Pod"
+ ]
+ },
+ {
+ "apiGroups": [
+ "apps"
+ ],
+ "apiVersions": [
+ "v1"
+ ],
+ "resources": [
+ "Deployment",
+ "ReplicaSet",
+ "DaemonSet",
+ "StatefulSet"
+ ]
+ },
+ {
+ "apiGroups": [
+ "batch"
+ ],
+ "apiVersions": [
+ "*"
+ ],
+ "resources": [
+ "Job",
+ "CronJob"
+ ]
+ }
+ ],
+ "ruleDependencies": [
+ {
+ "packageName": "cautils"
+ },
+ {
+ "packageName": "kubernetes.api.client"
+ }
+ ],
+ "configInputs": null,
+ "controlConfigInputs": null,
+ "description": "determines if any workload contains a hostPath volume with rw permissions",
+ "remediation": "Set the readOnly field of the mount to true",
+ "ruleQuery": "",
+ "relevantCloudProviders": null
+ }
+ ],
+ "rulesIDs": [
+ ""
+ ],
+ "baseScore": 8
+ },
+ {
+ "guid": "",
+ "name": "Malicious admission controller (mutating)",
+ "attributes": {
+ "armoBuiltin": true,
+ "attackTracks": [
+ {
+ "attackTrack": "kubeapi",
+ "categories": [
+ "Impact - service injection"
+ ]
+ }
+ ],
+ "controlTypeTags": [
+ "security",
+ "compliance"
+ ],
+ "microsoftMitreColumns": [
+ "Persistence"
+ ]
+ },
+ "id": "C-0039",
+ "controlID": "C-0039",
+ "creationTime": "",
+ "description": "Attackers may use mutating webhooks to intercept and modify all the resources in the cluster. This control lists all mutating webhook configurations that must be verified.",
+ "remediation": "Ensure all the webhooks are necessary. Use exception mechanism to prevent repititive notifications.",
+ "rules": [
+ {
+ "guid": "",
+ "name": "list-all-mutating-webhooks",
+ "attributes": {
+ "armoBuiltin": true,
+ "m$K8sThreatMatrix": "Persistence::Malicious admission controller"
+ },
+ "creationTime": "",
+ "rule": "package armo_builtins\n\n\ndeny [msga] {\n mutatingwebhooks := [mutatingwebhook | mutatingwebhook = input[_]; mutatingwebhook.kind == \"MutatingWebhookConfiguration\"]\n mutatingwebhook := mutatingwebhooks[_]\n\n \tmsga := {\n\t\t\"alertMessage\": sprintf(\"The following mutating webhook configuration should be checked %v.\", [mutatingwebhook.metadata.name]),\n\t\t\"alertScore\": 6,\n\t\t\"failedPaths\": [\"\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [mutatingwebhook]\n\t\t}\n\t}\n}",
+ "resourceEnumerator": "",
+ "ruleLanguage": "Rego",
+ "match": [
+ {
+ "apiGroups": [
+ "admissionregistration.k8s.io"
+ ],
+ "apiVersions": [
+ "*"
+ ],
+ "resources": [
+ "MutatingWebhookConfiguration"
+ ]
+ }
+ ],
+ "ruleDependencies": [],
+ "configInputs": null,
+ "controlConfigInputs": null,
+ "description": "Returns mutating webhook configurations to be verified",
+ "remediation": "Analyze webhook for malicious behavior",
+ "ruleQuery": "armo_builtins",
+ "relevantCloudProviders": null
+ }
+ ],
+ "rulesIDs": [
+ ""
+ ],
+ "baseScore": 4
+ },
+ {
+ "guid": "",
+ "name": "Malicious admission controller (validating)",
+ "attributes": {
+ "armoBuiltin": true,
+ "attackTracks": [
+ {
+ "attackTrack": "kubeapi",
+ "categories": [
+ "Impact - data destruction",
+ "Impact - service injection"
+ ]
+ }
+ ],
+ "controlTypeTags": [
+ "security",
+ "compliance"
+ ],
+ "microsoftMitreColumns": [
+ "Credential access"
+ ]
+ },
+ "id": "C-0036",
+ "controlID": "C-0036",
+ "creationTime": "",
+ "description": "Attackers can use validating webhooks to intercept and discover all the resources in the cluster. This control lists all the validating webhook configurations that must be verified.",
+ "remediation": "Ensure all the webhooks are necessary. Use exception mechanism to prevent repititive notifications.",
+ "rules": [
+ {
+ "guid": "",
+ "name": "list-all-validating-webhooks",
+ "attributes": {
+ "armoBuiltin": true,
+ "m$K8sThreatMatrix": "Credential Access::Malicious admission controller"
+ },
+ "creationTime": "",
+ "rule": "package armo_builtins\n\n\ndeny [msga] {\n admissionwebhooks := [admissionwebhook | admissionwebhook = input[_]; admissionwebhook.kind == \"ValidatingWebhookConfiguration\"]\n admissionwebhook := admissionwebhooks[_]\n\n \tmsga := {\n\t\t\"alertMessage\": sprintf(\"The following validating webhook configuration should be checked %v.\", [admissionwebhook.metadata.name]),\n\t\t\"alertScore\": 6,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [\"\"],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [admissionwebhook]\n\t\t}\n\t}\n}",
+ "resourceEnumerator": "",
+ "ruleLanguage": "Rego",
+ "match": [
+ {
+ "apiGroups": [
+ "admissionregistration.k8s.io"
+ ],
+ "apiVersions": [
+ "*"
+ ],
+ "resources": [
+ "ValidatingWebhookConfiguration"
+ ]
+ }
+ ],
+ "ruleDependencies": [],
+ "configInputs": null,
+ "controlConfigInputs": null,
+ "description": "Returns validating webhook configurations to be verified",
+ "remediation": "Analyze webhook for malicious behavior",
+ "ruleQuery": "armo_builtins",
+ "relevantCloudProviders": null
+ }
+ ],
+ "rulesIDs": [
+ ""
+ ],
+ "baseScore": 3
+ },
+ {
+ "guid": "",
+ "name": "Delete Kubernetes events",
+ "attributes": {
+ "armoBuiltin": true,
+ "attackTracks": [
+ {
+ "attackTrack": "kubeapi",
+ "categories": [
+ "Defense evasion"
+ ]
+ }
+ ],
+ "controlTypeTags": [
+ "security",
+ "compliance"
+ ],
+ "microsoftMitreColumns": [
+ "Defense evasion"
+ ],
+ "rbacQuery": "Show who can delete k8s events"
+ },
+ "id": "C-0031",
+ "controlID": "C-0031",
+ "creationTime": "",
+ "description": "Attackers may delete Kubernetes events to avoid detection of their activity in the cluster. This control identifies all the subjects that can delete Kubernetes events.",
+ "remediation": "You should follow the least privilege principle. Minimize the number of subjects who can delete Kubernetes events. Avoid using these subjects in the daily operations.",
+ "rules": [
+ {
+ "guid": "",
+ "name": "rule-can-delete-k8s-events",
+ "attributes": {
+ "armoBuiltin": true,
+ "microsoftK8sThreatMatrix": "Defense Evasion::Delete K8S events",
+ "useUntilKubescapeVersion": "v1.0.133"
+ },
+ "creationTime": "",
+ "rule": "package armo_builtins\nimport data.cautils as cautils\n\n# fails if user can delete events\n#RoleBinding to Role\ndeny [msga] {\n roles := [role | role= input[_]; role.kind == \"Role\"]\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n role:= roles[_]\n rolebinding := rolebindings[_]\n\n rule:= role.rules[_]\n canDeleteEventsResource(rule)\n canDeleteEventsVerb(rule)\n\n rolebinding.roleRef.kind == \"Role\"\n rolebinding.roleRef.name == role.metadata.name\n\n subject := rolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n msga := {\n\t \"alertMessage\": sprintf(\"The following %v: %v can delete events\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 6,\n \"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role,rolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n }\n}\n\n\n# fails if user can delete events\n#RoleBinding to ClusterRole\ndeny[msga] {\n roles := [role | role= input[_]; role.kind == \"ClusterRole\"]\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n role:= roles[_]\n rolebinding := rolebindings[_]\n\n rule:= role.rules[_]\n canDeleteEventsResource(rule)\n canDeleteEventsVerb(rule)\n\n rolebinding.roleRef.kind == \"ClusterRole\"\n rolebinding.roleRef.name == role.metadata.name\n\n\n subject := rolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n msga := {\n\t \"alertMessage\": sprintf(\"The following %v: %v can delete events\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 6,\n \"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role,rolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n }\n}\n\n\n# fails if user can delete events\n# ClusterRoleBinding to ClusterRole\ndeny[msga] {\n roles := [role | role= input[_]; role.kind == \"ClusterRole\"]\n clusterrolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"ClusterRoleBinding\"]\n role:= roles[_]\n clusterrolebinding := clusterrolebindings[_]\n\n rule:= role.rules[_]\n canDeleteEventsResource(rule)\n canDeleteEventsVerb(rule)\n\n clusterrolebinding.roleRef.kind == \"ClusterRole\"\n clusterrolebinding.roleRef.name == role.metadata.name\n\n\n subject := clusterrolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n msga := {\n\t \"alertMessage\": sprintf(\"The following %v: %v can delete events\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 6,\n \"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role,clusterrolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n }\n}\n\n\ncanDeleteEventsResource(rule) {\n cautils.list_contains(rule.resources,\"events\")\n}\ncanDeleteEventsResource(rule) {\n is_api_group(rule)\n cautils.list_contains(rule.resources,\"*\")\n}\n\nis_api_group(rule) {\n\tapiGroup := rule.apiGroups[_]\n\tapiGroup == \"*\"\n}\n\nis_api_group(rule) {\n\tapiGroup := rule.apiGroups[_]\n\tapiGroup == \"\"\n}\n\ncanDeleteEventsVerb(rule) {\n cautils.list_contains(rule.verbs,\"delete\")\n}\n\ncanDeleteEventsVerb(rule) {\n cautils.list_contains(rule.verbs,\"deletecollection\")\n}\n\ncanDeleteEventsVerb(rule) {\n cautils.list_contains(rule.verbs,\"*\")\n}",
+ "resourceEnumerator": "",
+ "ruleLanguage": "Rego",
+ "match": [
+ {
+ "apiGroups": [
+ "*"
+ ],
+ "apiVersions": [
+ "*"
+ ],
+ "resources": [
+ "Role",
+ "ClusterRole",
+ "ClusterRoleBinding",
+ "RoleBinding"
+ ]
+ }
+ ],
+ "ruleDependencies": [
+ {
+ "packageName": "cautils"
+ }
+ ],
+ "configInputs": null,
+ "controlConfigInputs": null,
+ "description": "determines which users can delete events",
+ "remediation": "",
+ "ruleQuery": "armo_builtins",
+ "relevantCloudProviders": null
+ },
+ {
+ "guid": "",
+ "name": "rule-can-delete-k8s-events-v1",
+ "attributes": {
+ "armoBuiltin": true,
+ "microsoftK8sThreatMatrix": "Defense Evasion::Delete K8S events",
+ "resourcesAggregator": "subject-role-rolebinding",
+ "useFromKubescapeVersion": "v1.0.133"
+ },
+ "creationTime": "",
+ "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# fails if user can delete events\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\nrule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"delete\", \"deletecollection\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) \u003e 0\n\n\tapi_groups := [\"\", \"*\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) \u003e 0\n\n\tresources := [\"events\", \"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) \u003e 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can delete events\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n",
+ "resourceEnumerator": "",
+ "ruleLanguage": "Rego",
+ "match": [
+ {
+ "apiGroups": [
+ "*"
+ ],
+ "apiVersions": [
+ "*"
+ ],
+ "resources": [
+ "Role",
+ "ClusterRole",
+ "ClusterRoleBinding",
+ "RoleBinding"
+ ]
+ }
+ ],
+ "ruleDependencies": [],
+ "configInputs": null,
+ "controlConfigInputs": null,
+ "description": "determines which users can delete events",
+ "remediation": "",
+ "ruleQuery": "armo_builtins",
+ "relevantCloudProviders": null
+ }
+ ],
+ "rulesIDs": [
+ "",
+ ""
+ ],
+ "baseScore": 4
+ },
+ {
+ "guid": "",
+ "name": "CoreDNS poisoning",
+ "attributes": {
+ "armoBuiltin": true,
+ "attackTracks": [
+ {
+ "attackTrack": "kubeapi",
+ "categories": [
+ "Impact - service injection"
+ ]
+ }
+ ],
+ "controlTypeTags": [
+ "compliance"
+ ],
+ "microsoftMitreColumns": [
+ "Lateral Movement"
+ ]
+ },
+ "id": "C-0037",
+ "controlID": "C-0037",
+ "creationTime": "",
+ "description": "If attackers have permissions to modify the coredns ConfigMap they can change the behavior of the cluster’s DNS, poison it, and override the network identity of other services. This control identifies all subjects allowed to update the 'coredns' configmap.",
+ "remediation": "You should follow the least privilege principle. Monitor and approve all the subjects allowed to modify the 'coredns' configmap. It is also recommended to remove this permission from the users/service accounts used in the daily operations.",
+ "rules": [
+ {
+ "guid": "",
+ "name": "rule-can-update-configmap",
+ "attributes": {
+ "armoBuiltin": true,
+ "microsoftK8sThreatMatrix": "Lateral Movement::CoreDNS poisoning",
+ "useUntilKubescapeVersion": "v1.0.133"
+ },
+ "creationTime": "",
+ "rule": "package armo_builtins\nimport data.cautils as cautils\n\n\n# Fails if user can modify all configmaps, or if he can modify the 'coredns' configmap (default for coredns)\n#RoleBinding to Role\ndeny [msga] {\n configmaps := [configmap | configmap = input[_]; configmap.kind == \"ConfigMap\"]\n configmap := configmaps[_]\n configmap.metadata.name == \"coredns\"\n\n roles := [role | role= input[_]; role.kind == \"Role\"]\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n role:= roles[_]\n rolebinding := rolebindings[_]\n\n rule:= role.rules[_]\n\n canModifyConfigMapResource(rule)\n canModifyConfigMapVerb(rule)\n\n rolebinding.roleRef.kind == \"Role\"\n rolebinding.roleRef.name == role.metadata.name\n rolebinding.metadata.namespace == \"kube-system\"\n\n\n subject := rolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n \tmsga := {\n\t \"alertMessage\": sprintf(\"The following %v: %v can modify 'coredns' configmap\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 6,\n \"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role,rolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n }\n}\n\n\n# Fails if user can modify all configmaps, or if he can modify the 'coredns' configmap (default for coredns)\n# RoleBinding to ClusterRole\ndeny[msga] {\n configmaps := [configmap | configmap = input[_]; configmap.kind == \"ConfigMap\"]\n configmap := configmaps[_]\n configmap.metadata.name == \"coredns\"\n\n roles := [role | role= input[_]; role.kind == \"ClusterRole\"]\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n role:= roles[_]\n rolebinding := rolebindings[_]\n\n rule:= role.rules[_]\n canModifyConfigMapResource(rule)\n canModifyConfigMapVerb(rule)\n\n rolebinding.roleRef.kind == \"ClusterRole\"\n rolebinding.roleRef.name == role.metadata.name\n rolebinding.metadata.namespace == \"kube-system\"\n\n\n\n subject := rolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n \tmsga := {\n\t \"alertMessage\": sprintf(\"The following %v: %v can modify 'coredns' configmap\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 6,\n \"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role,rolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n }\n\n}\n\n\n# Fails if user can modify all configmaps, or if he can modify the 'coredns' configmap (default for coredns)\n# ClusterRoleBinding to ClusterRole\ndeny[msga] {\n configmaps := [configmap | configmap = input[_]; configmap.kind == \"ConfigMap\"]\n configmap := configmaps[_]\n configmap.metadata.name == \"coredns\"\n\n roles := [role | role= input[_]; role.kind == \"ClusterRole\"]\n clusterrolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"ClusterRoleBinding\"]\n role:= roles[_]\n clusterrolebinding := clusterrolebindings[_]\n\n rule:= role.rules[_]\n canModifyConfigMapResource(rule)\n canModifyConfigMapVerb(rule)\n\n\n clusterrolebinding.roleRef.kind == \"ClusterRole\"\n clusterrolebinding.roleRef.name == role.metadata.name\n\n\n\n subject := clusterrolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n \tmsga := {\n\t \"alertMessage\": sprintf(\"The following %v: %v can modify 'coredns' configmap\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 6,\n \"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role,clusterrolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n }\n}\n\n\n\n\n\n canModifyConfigMapResource(rule) {\n not rule.resourceNames\n cautils.list_contains(rule.resources,\"configmaps\")\n }\n\n canModifyConfigMapResource(rule) {\n not rule.resourceNames\n is_api_group(rule)\n cautils.list_contains(rule.resources,\"*\")\n }\n\n canModifyConfigMapResource(rule) {\n cautils.list_contains(rule.resources,\"configmaps\")\n cautils.list_contains(rule.resourceNames,\"coredns\")\n }\n\n canModifyConfigMapVerb(rule) {\n cautils.list_contains(rule.verbs,\"update\")\n }\n\n\n canModifyConfigMapVerb(rule) {\n cautils.list_contains(rule.verbs,\"patch\")\n }\n\n canModifyConfigMapVerb(rule) {\n cautils.list_contains(rule.verbs,\"*\")\n }\n\n\nis_api_group(rule) {\n\tapiGroup := rule.apiGroups[_]\n\tapiGroup == \"*\"\n}\n\nis_api_group(rule) {\n\tapiGroup := rule.apiGroups[_]\n\tapiGroup == \"\"\n}",
+ "resourceEnumerator": "",
+ "ruleLanguage": "Rego",
+ "match": [
+ {
+ "apiGroups": [
+ "*"
+ ],
+ "apiVersions": [
+ "*"
+ ],
+ "resources": [
+ "Role",
+ "ClusterRole",
+ "ClusterRoleBinding",
+ "RoleBinding",
+ "ConfigMap"
+ ]
+ }
+ ],
+ "ruleDependencies": [
+ {
+ "packageName": "cautils"
+ }
+ ],
+ "configInputs": null,
+ "controlConfigInputs": null,
+ "description": "determines which users can update/patch the 'coredns' configmap",
+ "remediation": "",
+ "ruleQuery": "armo_builtins",
+ "relevantCloudProviders": null
+ },
+ {
+ "guid": "",
+ "name": "rule-can-update-configmap-v1",
+ "attributes": {
+ "armoBuiltin": true,
+ "microsoftK8sThreatMatrix": "Lateral Movement::CoreDNS poisoning",
+ "resourcesAggregator": "subject-role-rolebinding",
+ "useFromKubescapeVersion": "v1.0.133"
+ },
+ "creationTime": "",
+ "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# Fails if user can modify all configmaps\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\nrule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"update\", \"patch\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) \u003e 0\n\n\tapi_groups := [\"\", \"*\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) \u003e 0\n\n\tresources := [\"configmaps\", \"*\"]\n\tnot rule.resourceNames\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) \u003e 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can modify 'coredns' configmap\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# Fails if user can modify the 'coredns' configmap (default for coredns)\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\nrule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"update\", \"patch\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) \u003e 0\n\n\tapi_groups := [\"\", \"*\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) \u003e 0\n\n\tresources := [\"configmaps\", \"*\"]\n\t\"coredns\" in rule.resourceNames\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) \u003e 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can modify 'coredns' configmap\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"failedPaths\": finalpath,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n",
+ "resourceEnumerator": "",
+ "ruleLanguage": "Rego",
+ "match": [
+ {
+ "apiGroups": [
+ "*"
+ ],
+ "apiVersions": [
+ "*"
+ ],
+ "resources": [
+ "Role",
+ "ClusterRole",
+ "ClusterRoleBinding",
+ "RoleBinding"
+ ]
+ }
+ ],
+ "ruleDependencies": [],
+ "configInputs": null,
+ "controlConfigInputs": null,
+ "description": "determines which users can update/patch the 'coredns' configmap",
+ "remediation": "",
+ "ruleQuery": "armo_builtins",
+ "relevantCloudProviders": null
+ }
+ ],
+ "rulesIDs": [
+ "",
+ ""
+ ],
+ "baseScore": 4
+ },
+ {
+ "guid": "",
+ "name": "Data Destruction",
+ "attributes": {
+ "armoBuiltin": true,
+ "controlTypeTags": [
+ "compliance"
+ ],
+ "microsoftMitreColumns": [
+ "Impact"
+ ],
+ "rbacQuery": "Data destruction"
+ },
+ "id": "C-0007",
+ "controlID": "C-0007",
+ "creationTime": "",
+ "description": "Attackers may attempt to destroy data and resources in the cluster. This includes deleting deployments, configurations, storage, and compute resources. This control identifies all subjects that can delete resources.",
+ "remediation": "You should follow the least privilege principle and minimize the number of subjects that can delete resources.",
+ "rules": [
+ {
+ "guid": "",
+ "name": "rule-excessive-delete-rights",
+ "attributes": {
+ "armoBuiltin": true,
+ "m$K8sThreatMatrix": "Impact::Data Destruction",
+ "useUntilKubescapeVersion": "v1.0.133"
+ },
+ "creationTime": "",
+ "rule": "package armo_builtins\nimport data.cautils as cautils\n\n\n# fails if user can can delete important resources\n#RoleBinding to Role\ndeny[msga] {\n roles := [role | role= input[_]; role.kind == \"Role\"]\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n role:= roles[_]\n rolebinding := rolebindings[_]\n\n rule:= role.rules[_]\n canDeleteResource(rule)\n canDeleteVerb(rule)\n\n rolebinding.roleRef.kind == \"Role\"\n rolebinding.roleRef.name == role.metadata.name\n\n subject := rolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n msga := {\n\t \"alertMessage\": sprintf(\"The following %v: %v can delete important resources\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 9,\n\t\t\"fixPaths\": [],\n \"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role,rolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n }\n}\n\n\n# fails if user can can delete important resources\n#RoleBinding to ClusterRole\ndeny[msga] {\n roles := [role | role= input[_]; role.kind == \"ClusterRole\"]\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n role:= roles[_]\n rolebinding := rolebindings[_]\n\n rule:= role.rules[_]\n canDeleteResource(rule)\n canDeleteVerb(rule)\n\n rolebinding.roleRef.kind == \"ClusterRole\"\n rolebinding.roleRef.name == role.metadata.name\n\n subject := rolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n msga := {\n\t \"alertMessage\": sprintf(\"The following %v: %v can delete important resources\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 9,\n\t\t\"fixPaths\": [],\n \"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role,rolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n }\n}\n\n# fails if user can can delete important resources\n# ClusterRoleBinding to ClusterRole\ndeny[msga] {\n roles := [role | role= input[_]; role.kind == \"ClusterRole\"]\n clusterrolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"ClusterRoleBinding\"]\n role:= roles[_]\n clusterrolebinding := clusterrolebindings[_]\n\n rule:= role.rules[_]\n canDeleteResource(rule)\n canDeleteVerb(rule)\n\n clusterrolebinding.roleRef.kind == \"ClusterRole\"\n clusterrolebinding.roleRef.name == role.metadata.name\n\n\n subject := clusterrolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n msga := {\n\t \"alertMessage\": sprintf(\"The following %v: %v can delete important resources\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 9,\n\t\t\"fixPaths\": [],\n \"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role,clusterrolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n }\n}\n\n\ncanDeleteVerb(rule) {\n\tcautils.list_contains(rule.verbs, \"delete\")\n}\n\ncanDeleteVerb(rule) {\n\tcautils.list_contains(rule.verbs, \"deletecollection\")\n}\n\ncanDeleteVerb(rule) {\n\tcautils.list_contains(rule.verbs, \"*\")\n}\n\ncanDeleteResource(rule) {\n\tcautils.list_contains(rule.resources, \"secrets\")\n}\ncanDeleteResource(rule) {\n\tcautils.list_contains(rule.resources, \"pods\")\n}\ncanDeleteResource(rule) {\n\tcautils.list_contains(rule.resources, \"services\")\n}\ncanDeleteResource(rule) {\n\tcautils.list_contains(rule.resources, \"deployments\")\n}\ncanDeleteResource(rule) {\n\tcautils.list_contains(rule.resources, \"replicasets\")\n}\ncanDeleteResource(rule) {\n\tcautils.list_contains(rule.resources, \"daemonsets\")\n}\ncanDeleteResource(rule) {\n\tcautils.list_contains(rule.resources, \"statefulsets\")\n}\ncanDeleteResource(rule) {\n\tcautils.list_contains(rule.resources, \"jobs\")\n}\ncanDeleteResource(rule) {\n\tcautils.list_contains(rule.resources, \"cronjobs\")\n}\ncanDeleteResource(rule) {\n is_api_group(rule)\n\tcautils.list_contains(rule.resources, \"*\")\n}\n\n\nis_api_group(rule) {\n\tapiGroup := rule.apiGroups[_]\n\tapiGroup == \"\"\n}\nis_api_group(rule) {\n\tapiGroup := rule.apiGroups[_]\n\tapiGroup == \"*\"\n}\nis_api_group(rule) {\n\tapiGroup := rule.apiGroups[_]\n\tapiGroup == \"apps\"\n}\nis_api_group(rule) {\n\tapiGroup := rule.apiGroups[_]\n\tapiGroup == \"batch\"\n}\n\n",
+ "resourceEnumerator": "",
+ "ruleLanguage": "Rego",
+ "match": [
+ {
+ "apiGroups": [
+ "*"
+ ],
+ "apiVersions": [
+ "*"
+ ],
+ "resources": [
+ "Role",
+ "ClusterRole",
+ "ClusterRoleBinding",
+ "RoleBinding"
+ ]
+ }
+ ],
+ "ruleDependencies": [],
+ "configInputs": null,
+ "controlConfigInputs": null,
+ "description": "fails if user can delete important resources",
+ "remediation": "",
+ "ruleQuery": "armo_builtins",
+ "relevantCloudProviders": null
+ },
+ {
+ "guid": "",
+ "name": "rule-excessive-delete-rights-v1",
+ "attributes": {
+ "armoBuiltin": true,
+ "m$K8sThreatMatrix": "Impact::Data Destruction",
+ "resourcesAggregator": "subject-role-rolebinding",
+ "useFromKubescapeVersion": "v1.0.133"
+ },
+ "creationTime": "",
+ "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# fails if user can can delete important resources\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\nrule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"delete\", \"deletecollection\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) \u003e 0\n\n\tapi_groups := [\"\", \"*\", \"apps\", \"batch\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) \u003e 0\n\n\tresources := [\"secrets\", \"pods\", \"services\", \"deployments\", \"replicasets\", \"daemonsets\", \"statefulsets\", \"jobs\", \"cronjobs\", \"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) \u003e 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can delete important resources\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": finalpath,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n",
+ "resourceEnumerator": "",
+ "ruleLanguage": "Rego",
+ "match": [
+ {
+ "apiGroups": [
+ "*"
+ ],
+ "apiVersions": [
+ "*"
+ ],
+ "resources": [
+ "Role",
+ "ClusterRole",
+ "ClusterRoleBinding",
+ "RoleBinding"
+ ]
+ }
+ ],
+ "ruleDependencies": [],
+ "configInputs": null,
+ "controlConfigInputs": null,
+ "description": "fails if user can delete important resources",
+ "remediation": "",
+ "ruleQuery": "armo_builtins",
+ "relevantCloudProviders": null
+ }
+ ],
+ "rulesIDs": [
+ "",
+ ""
+ ],
+ "baseScore": 5
+ },
+ {
+ "guid": "",
+ "name": "CVE-2021-25741 - Using symlink for arbitrary host file system access.",
+ "attributes": {
+ "armoBuiltin": true,
+ "attackTracks": [
+ {
+ "attackTrack": "container",
+ "categories": [
+ "Persistence",
+ "Impact - Data access in container"
+ ]
+ }
+ ],
+ "controlTypeTags": [
+ "security",
+ "compliance"
+ ]
+ },
+ "id": "C-0058",
+ "controlID": "C-0058",
+ "creationTime": "",
+ "description": "A user may be able to create a container with subPath or subPathExpr volume mounts to access files \u0026 directories anywhere on the host filesystem. Following Kubernetes versions are affected: v1.22.0 - v1.22.1, v1.21.0 - v1.21.4, v1.20.0 - v1.20.10, version v1.19.14 and lower. This control checks the vulnerable versions and the actual usage of the subPath feature in all Pods in the cluster. If you want to learn more about the CVE, please refer to the CVE link: https://nvd.nist.gov/vuln/detail/CVE-2021-25741",
+ "remediation": "To mitigate this vulnerability without upgrading kubelet, you can disable the VolumeSubpath feature gate on kubelet and kube-apiserver, or remove any existing Pods using subPath or subPathExpr feature.",
+ "rules": [
+ {
+ "guid": "",
+ "name": "Symlink-Exchange-Can-Allow-Host-Filesystem-Access",
+ "attributes": {
+ "armoBuiltin": true
+ },
+ "creationTime": "",
+ "rule": "package armo_builtins\n\n\ndeny[msga] {\n\tnodes := input[_]\n\tcurrent_version := nodes.status.nodeInfo.kubeletVersion\n is_vulnerable_version(current_version)\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tbeggining_of_path := \"spec.\"\n final_path := is_sub_path_container(container, i, beggining_of_path)\n\n\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"You may be vulnerable to CVE-2021-25741. You have a Node with a vulnerable version and the following container : %v in pod : %v with subPath/subPathExpr\", [container.name, pod.metadata.name]),\n\t\t\t\"alertObject\": {\"k8SApiObjects\": [pod]},\n\t\t\t\"failedPaths\": final_path,\n\t\t\t\"fixPaths\": [],\n\t\t}\n}\n\n\ndeny[msga] {\n\tnodes := input[_]\n\tcurrent_version := nodes.status.nodeInfo.kubeletVersion\n is_vulnerable_version(current_version)\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.template.spec.\"\n final_path := is_sub_path_container(container, i, beggining_of_path)\n \n\tmsga := {\n\t\"alertMessage\": sprintf(\"You may be vulnerable to CVE-2021-25741. You have a Node with a vulnerable version and the following container : %v in %v : %v with subPath/subPathExpr\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\t\"alertObject\": {\"k8SApiObjects\": [wl]},\n\t\t\t\"failedPaths\": final_path,\n\t\t\t\"fixPaths\": [],\n\t\t}\n}\n\n\n\ndeny[msga] {\n\tnodes := input[_]\n\tcurrent_version := nodes.status.nodeInfo.kubeletVersion\n is_vulnerable_version(current_version)\n wl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.jobTemplate.spec.template.spec.\"\n final_path := is_sub_path_container(container, i, beggining_of_path)\n \n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"You may be vulnerable to CVE-2021-25741. You have a Node with a vulnerable version and the following container : %v in %v : %v with subPath/subPathExpr\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\t\"alertObject\": {\"k8SApiObjects\": [wl]},\n\t\t\t\"failedPaths\": final_path,\n\t\t\t\"fixPaths\": [],\n\t\t}\n}\n\n\n\nis_sub_path_container(container, i, beggining_of_path) = path {\n\tpath = [sprintf(\"%vcontainers[%v].volumeMounts[%v].subPath\" ,[beggining_of_path, format_int(i, 10), format_int(j, 10)]) | volume_mount = container.volumeMounts[j]; volume_mount.subPath]\n\tcount(path) \u003e 0\n}\n\nis_vulnerable_version(version) {\n version \u003c= \"v1.19.14\"\n}\n\nis_vulnerable_version(version){\n version \u003e= \"v1.22.0\"\n version \u003c= \"v1.22.1\"\n}\n\n\nis_vulnerable_version(version){\n version \u003e= \"v1.21.0\"\n version \u003c= \"v1.21.4\"\n}\n\n\nis_vulnerable_version(version){\n version \u003e= \"v1.20.0\"\n version \u003c= \"v1.20.9\"\n}\n\nis_vulnerable_version(version){\n\tversion == \"v1.20.10\"\n}\n\n\n",
+ "resourceEnumerator": "package armo_builtins\n\n\ndeny[msga] {\n\tnodes := input[_]\n\tcurrent_version := nodes.status.nodeInfo.kubeletVersion\n isVulnerableVersion(current_version)\n\tversionPath = \"status.nodeInfo.kubeletVersion\"\n pod := input[_]\n pod.kind == \"Pod\"\n\n\tmsga := {\n\t\t\t\"alertMessage\": \"\",\n\t\t\t\"alertObject\": {\"k8SApiObjects\": [pod]},\n\t\t\t\"failedPaths\": [\"\"],\n\t}\n}\n\n\ndeny[msga] {\n\tnodes := input[_]\n\tcurrent_version := nodes.status.nodeInfo.kubeletVersion\n isVulnerableVersion(current_version)\n\tversionPath = \"status.nodeInfo.kubeletVersion\"\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n \n\tmsga := {\n\t\"alertMessage\": \"\",\n\t\t\t\"alertObject\": {\"k8SApiObjects\": [wl]},\n\t\t\t\"failedPaths\": [\"\"],\n\t}\n}\n\n\n\ndeny[msga] {\n\tnodes := input[_]\n\tcurrent_version := nodes.status.nodeInfo.kubeletVersion\n isVulnerableVersion(current_version)\n\tversionPath = \"status.nodeInfo.kubeletVersion\"\n wl := input[_]\n\twl.kind == \"CronJob\"\n \n\tmsga := {\n\t\t\"alertMessage\": \"\",\n\t\t\t\"alertObject\": {\"k8SApiObjects\": [wl]},\n\t\t\t\"failedPaths\": [\"\"],\n\t}\n}\n\n\nisVulnerableVersion(version) {\n version \u003c= \"v1.19.14\"\n}\n\nisVulnerableVersion(version){\n version \u003e= \"v1.22.0\"\n version \u003c= \"v1.22.1\"\n}\n\n\nisVulnerableVersion(version){\n version \u003e= \"v1.21.0\"\n version \u003c= \"v1.21.4\"\n}\n\n\nisVulnerableVersion(version){\n version \u003e= \"v1.20.0\"\n version \u003c= \"v1.20.9\"\n}\n\nisVulnerableVersion(version){\n\tversion == \"v1.20.10\"\n}",
+ "ruleLanguage": "Rego",
+ "match": [
+ {
+ "apiGroups": [
+ ""
+ ],
+ "apiVersions": [
+ "v1"
+ ],
+ "resources": [
+ "Pod",
+ "Node"
+ ]
+ },
+ {
+ "apiGroups": [
+ "apps"
+ ],
+ "apiVersions": [
+ "v1"
+ ],
+ "resources": [
+ "Deployment",
+ "ReplicaSet",
+ "DaemonSet",
+ "StatefulSet"
+ ]
+ },
+ {
+ "apiGroups": [
+ "batch"
+ ],
+ "apiVersions": [
+ "*"
+ ],
+ "resources": [
+ "Job",
+ "CronJob"
+ ]
+ }
+ ],
+ "ruleDependencies": [],
+ "configInputs": null,
+ "controlConfigInputs": null,
+ "description": "A user may be able to create a container with subPath volume mounts to access files \u0026 directories outside of the volume, including on the host filesystem. This was affected at the following versions: v1.22.0 - v1.22.1, v1.21.0 - v1.21.4, v1.20.0 - v1.20.10, version v1.19.14 and lower. ",
+ "remediation": "To mitigate this vulnerability without upgrading kubelet, you can disable the VolumeSubpath feature gate on kubelet and kube-apiserver, and remove any existing Pods making use of the feature.",
+ "ruleQuery": "armo_builtins",
+ "relevantCloudProviders": null
+ }
+ ],
+ "rulesIDs": [
+ ""
+ ],
+ "baseScore": 6
+ },
+ {
+ "guid": "",
+ "name": "CVE-2021-25742-nginx-ingress-snippet-annotation-vulnerability",
+ "attributes": {
+ "armoBuiltin": true,
+ "attackTracks": [
+ {
+ "attackTrack": "container",
+ "categories": [
+ "Initial access",
+ "Execution"
+ ]
+ }
+ ],
+ "controlTypeTags": [
+ "security",
+ "compliance"
+ ]
+ },
+ "id": "C-0059",
+ "controlID": "C-0059",
+ "creationTime": "",
+ "description": "Security issue in ingress-nginx where a user that can create or update ingress objects can use the custom snippets feature to obtain all secrets in the cluster (see more at https://github.com/kubernetes/ingress-nginx/issues/7837)",
+ "remediation": "To mitigate this vulnerability: 1. Upgrade to a version that allows mitigation (\u003e= v0.49.1 or \u003e= v1.0.1), 2. Set allow-snippet-annotations to false in your ingress-nginx ConfigMap based on how you deploy ingress-nginx",
+ "rules": [
+ {
+ "guid": "",
+ "name": "nginx-ingress-snippet-annotation-vulnerability",
+ "attributes": {
+ "armoBuiltin": true
+ },
+ "creationTime": "",
+ "rule": "package armo_builtins\n\ndeny[msga] {\n\tdeployment := input[_]\n\tdeployment.kind == \"Deployment\"\n\timage := deployment.spec.template.spec.containers[i].image\n\tis_nginx_image(image)\n\tis_tag_image(image)\n\n\t# Extracting version from image tag\n\ttag_version_match := regex.find_all_string_submatch_n(\"[0-9]+\\\\.[0-9]+\\\\.[0-9]+\", image, -1)[0][0]\n image_version_str_arr := split(tag_version_match,\".\")\n\timage_version_arr := [to_number(image_version_str_arr[0]),to_number(image_version_str_arr[1]),to_number(image_version_str_arr[2])]\n\n\t# Check if vulnerable \n\tis_vulnerable(image_version_arr, deployment.metadata.namespace)\n\n\tpath := sprintf(\"spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"You may be vulnerable to CVE-2021-25742. Deployment %v\", [deployment.metadata.name]),\n\t\t\t\"failedPaths\": [path],\n\t\t\t\"fixPaths\":[],\n\t\t\t\"alertObject\": {\"k8SApiObjects\": [deployment]},\n\t\t}\n}\n\n\t\nis_nginx_image(image) {\n\tcontains(image, \"nginx-controller\")\n}\n\nis_nginx_image(image) {\n\tcontains(image, \"ingress-controller\")\n}\n\nis_nginx_image(image) {\n\tcontains(image, \"ingress-nginx\")\n}\n\nis_allow_snippet_annotation_on(namespace) {\n configmaps := [configmap | configmap = input[_]; configmap.kind == \"ConfigMap\"]\n\tconfigmap_on_ingress_namespace := [configmap | configmap= configmaps[_]; configmap.metadata.namespace == namespace]\n\tconfig_maps_with_snippet := [configmap | configmap= configmap_on_ingress_namespace[_]; configmap.data[\"allow-snippet-annotations\"] == \"false\"]\n\tcount(config_maps_with_snippet) \u003c 1\n}\n\nis_vulnerable(image_version, namespace) {\n\timage_version[0] == 0\n\timage_version[1] \u003c 49\n\tis_allow_snippet_annotation_on(namespace)\n}\n\nis_vulnerable(image_version, namespace) {\n\timage_version[0] == 0\n\timage_version[1] == 49\n\timage_version[2] == 0\n\tis_allow_snippet_annotation_on(namespace)\n}\n\t\nis_vulnerable(image_version, namespace) {\n\timage_version[0] == 1\n\timage_version[1] == 0\n\timage_version[2] == 0\n\tis_allow_snippet_annotation_on(namespace)\n}\n\nis_tag_image(image) {\n reg := \":[\\\\w][\\\\w.-]{0,127}(\\/)?\"\n version := regex.find_all_string_submatch_n(reg, image, -1)\n v := version[_]\n img := v[_]\n not endswith(img, \"/\")\n}",
+ "resourceEnumerator": "package armo_builtins\n\ndeny[msga] {\n\tdeployment := input[_]\n\tdeployment.kind == \"Deployment\"\n\timage := deployment.spec.template.spec.containers[i].image\n\tisNginxImage(image)\n\tis_tag_image(image)\n\tisVulnerable(image, deployment.metadata.namespace)\n\tpath := sprintf(\"spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"You may be vulnerable to CVE-2021-25742. %v\", [deployment]),\n\t\t\t\"failedPaths\": [path],\n\t\t\t\"alertObject\": {\"k8SApiObjects\": [deployment]},\n\t\t}\n}\n\n\t\nisNginxImage(image) {\n\tcontains(image, \"nginx-controller\")\n}\n\nisNginxImage(image) {\n\tcontains(image, \"ingress-controller\")\n}\n\nisNginxImage(image) {\n\tcontains(image, \"ingress-nginx\")\n}\n\nisVulnerable(image, namespace) {\n\tcontains(image, \"@\")\n\tversion := split(image, \":\")\n\ttag := split(version[count(version)-2], \"@\")[0]\n startswith(tag, \"v\")\n tag \u003c= \"v0.49\"\n}\n\t\nisVulnerable(image, namespace) {\n\tcontains(image, \"@\")\n\tversion := split(image, \":\")\n\ttag := split(version[count(version)-2], \"@\")[0]\n startswith(tag, \"v\")\n tag == \"v1.0.0\"\n}\n\nisVulnerable(image, namespace) {\n\tnot contains(image, \"@\")\n\tversion := split(image, \":\")\n\ttag := version[count(version)-1]\n startswith(tag, \"v\")\n\ttag \u003c= \"v0.49\"\n}\n\nisVulnerable(image, namespace) {\n\tnot contains(image, \"@\")\n\tversion := split(image, \":\")\n\ttag := version[count(version)-1]\n startswith(tag, \"v\")\n\ttag == \"v1.0.0\"\n}\n\n###### without 'v'\n\t\nisVulnerable(image, namespace) {\n\tcontains(image, \"@\")\n\tversion := split(image, \":\")\n\ttag := split(version[count(version)-2], \"@\")[0]\n not startswith(tag, \"v\")\n tag \u003c= \"0.49\"\n}\n\t\nisVulnerable(image, namespace) {\n\tcontains(image, \"@\")\n\tversion := split(image, \":\")\n\ttag := split(version[count(version)-2], \"@\")[0]\n not startswith(tag, \"v\")\n tag == \"1.0.0\"\n}\n\nisVulnerable(image, namespace) {\n\tnot contains(image, \"@\")\n\tversion := split(image, \":\")\n\ttag := version[count(version)-1]\n not startswith(tag, \"v\")\n\ttag \u003c= \"0.49\"\n}\nisVulnerable(image, namespace) {\n\tnot contains(image, \"@\")\n\tversion := split(image, \":\")\n\ttag := version[count(version)-1]\n not startswith(tag, \"v\")\n\ttag == \"1.0.0\"\n}\n\nisVulnerable(image, namespace) {\n configmaps := [configmap | configmap = input[_]; configmap.kind == \"ConfigMap\"]\n\tconfigmapOnIngressNamespace := [configmap | configmap= configmaps[_]; configmap.metadata.namespace == namespace]\n\tconfigMapsWithSnippet := [configmap | configmap= configmapOnIngressNamespace[_]; configmap.data[\"allow-snippet-annotations\"] == \"false\"]\n\tcount(configMapsWithSnippet) \u003c 1\n}\n\n\nis_tag_image(image) {\n reg := \":[\\\\w][\\\\w.-]{0,127}(\\/)?\"\n version := regex.find_all_string_submatch_n(reg, image, -1)\n v := version[_]\n img := v[_]\n not endswith(img, \"/\")\n}",
+ "ruleLanguage": "Rego",
+ "match": [
+ {
+ "apiGroups": [
+ "*"
+ ],
+ "apiVersions": [
+ "*"
+ ],
+ "resources": [
+ "Deployment",
+ "ConfigMap"
+ ]
+ }
+ ],
+ "ruleDependencies": [],
+ "configInputs": null,
+ "controlConfigInputs": null,
+ "description": "",
+ "remediation": "",
+ "ruleQuery": "armo_builtins",
+ "relevantCloudProviders": null
+ }
+ ],
+ "rulesIDs": [
+ ""
+ ],
+ "baseScore": 8
+ },
+ {
+ "guid": "",
+ "name": "Audit logs enabled",
+ "attributes": {
+ "armoBuiltin": true,
+ "attackTracks": [
+ {
+ "attackTrack": "container",
+ "categories": [
+ "Defense evasion - KubeAPI"
+ ]
+ }
+ ],
+ "controlTypeTags": [
+ "security",
+ "compliance"
+ ]
+ },
+ "id": "C-0067",
+ "controlID": "C-0067",
+ "creationTime": "",
+ "description": "Audit logging is an important security feature in Kubernetes, it enables the operator to track requests to the cluster. It is important to use it so the operator has a record of events happened in Kubernetes",
+ "remediation": "Turn on audit logging for your cluster. Look at the vendor guidelines for more details",
+ "rules": [
+ {
+ "guid": "",
+ "name": "k8s-audit-logs-enabled-cloud",
+ "attributes": {
+ "armoBuiltin": true
+ },
+ "creationTime": "",
+ "rule": "package armo_builtins\nimport data.cautils as cautils\n\n# Check if audit logs is enabled for GKE\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"container.googleapis.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n cluster_config.metadata.provider == \"gke\"\t\n\tconfig := cluster_config.data\n\t\n # If enableComponents is empty, it will disable logging\n # https://cloud.google.com/kubernetes-engine/docs/reference/rest/v1beta1/projects.locations.clusters#loggingcomponentconfig\n\tis_logging_disabled(config)\n\tmsga := {\n\t\t\"alertMessage\": \"audit logs is disabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\":\"\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": cluster_config\n\t\t}\n\t}\n}\n\n\n# Check if audit logs is enabled for EKS\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"eks.amazonaws.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n cluster_config.metadata.provider == \"eks\"\t\n\tconfig := cluster_config.data\n # logSetup is an object representing the enabled or disabled Kubernetes control plane logs for your cluster.\n # types - available cluster control plane log types\n # https://docs.aws.amazon.com/eks/latest/APIReference/API_LogSetup.html\n goodTypes := [logSetup | logSetup = config.Cluster.Logging.ClusterLogging[_]; isAuditLogs(logSetup)]\n count(goodTypes) == 0\n\t\n\tmsga := {\n\t\t\"alertMessage\": \"audit logs is disabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixCommand\":\"aws eks update-cluster-config --region \u003cregion_code\u003e --name \u003ccluster_name\u003e --logging '{'clusterLogging':[{'types':['\u003capi/audit/authenticator\u003e'],'enabled':true}]}'\",\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": cluster_config\n\t\t}\n\t}\n}\n\n\nis_logging_disabled(cluster_config) {\n\tnot cluster_config.logging_config.component_config.enable_components\n}\nis_logging_disabled(cluster_config) {\n\tcluster_config.logging_config.component_config.enable_components\n\tcount(cluster_config.logging_config.component_config.enable_components) == 0\n}\n\nisAuditLogs(logSetup) {\n logSetup.Enabled == true\n cautils.list_contains(logSetup.Types, \"api\")\n}\n\nisAuditLogs(logSetup) {\n logSetup.Enabled == true\n cautils.list_contains(logSetup.Types, \"audit\")\n}\n\nisAuditLogs(logSetup) {\n logSetup.enabled == true\n cautils.list_contains(logSetup.Types, \"authenticator\")\n}",
+ "resourceEnumerator": "",
+ "ruleLanguage": "Rego",
+ "match": [
+ {
+ "apiGroups": [],
+ "apiVersions": [],
+ "resources": []
+ }
+ ],
+ "dynamicMatch": [
+ {
+ "apiGroups": [
+ "container.googleapis.com",
+ "eks.amazonaws.com"
+ ],
+ "apiVersions": [
+ "v1"
+ ],
+ "resources": [
+ "ClusterDescribe"
+ ]
+ }
+ ],
+ "ruleDependencies": [],
+ "configInputs": null,
+ "controlConfigInputs": null,
+ "description": "",
+ "remediation": "",
+ "ruleQuery": "armo_builtins",
+ "relevantCloudProviders": [
+ "EKS",
+ "GKE"
+ ]
+ },
+ {
+ "guid": "",
+ "name": "k8s-audit-logs-enabled-native",
+ "attributes": {
+ "armoBuiltin": true,
+ "resourcesAggregator": "apiserver-pod",
+ "useFromKubescapeVersion": "v1.0.133"
+ },
+ "creationTime": "",
+ "rule": "package armo_builtins\nimport data.cautils as cautils\n\n# Check if audit logs is enabled for native k8s\ndeny[msga] {\n\tapiserverpod := input[_]\n cmd := apiserverpod.spec.containers[0].command\n\taudit_policy := [ command |command := cmd[_] ; contains(command, \"--audit-policy-file=\")]\n count(audit_policy) \u003c 1\n\tpath := \"spec.containers[0].command\"\t\n\n\t\n\tmsga := {\n\t\t\"alertMessage\": \"audit logs is not enabled\",\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [apiserverpod],\n\t\t\n\t\t}\n\t}\n}",
+ "resourceEnumerator": "",
+ "ruleLanguage": "Rego",
+ "match": [
+ {
+ "apiGroups": [
+ ""
+ ],
+ "apiVersions": [
+ "v1"
+ ],
+ "resources": [
+ "Pod"
+ ]
+ }
+ ],
+ "ruleDependencies": [],
+ "configInputs": null,
+ "controlConfigInputs": null,
+ "description": "",
+ "remediation": "",
+ "ruleQuery": "armo_builtins",
+ "relevantCloudProviders": null
+ }
+ ],
+ "rulesIDs": [
+ "",
+ ""
+ ],
+ "baseScore": 5
+ },
+ {
+ "guid": "",
+ "name": "Secret/ETCD encryption enabled",
+ "attributes": {
+ "armoBuiltin": true,
+ "attackTracks": [
+ {
+ "attackTrack": "node",
+ "categories": [
+ "Impact"
+ ]
+ }
+ ],
+ "controlTypeTags": [
+ "security",
+ "compliance"
+ ]
+ },
+ "id": "C-0066",
+ "controlID": "C-0066",
+ "creationTime": "",
+ "description": "All Kubernetes Secrets are stored primarily in etcd therefore it is important to encrypt it.",
+ "remediation": "Turn on the etcd encryption in your cluster, for more see the vendor documentation.",
+ "rules": [
+ {
+ "guid": "",
+ "name": "secret-etcd-encryption-cloud",
+ "attributes": {
+ "armoBuiltin": true
+ },
+ "creationTime": "",
+ "rule": "package armo_builtins\n\n\n# Check if encryption in etcd in enabled for EKS\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"eks.amazonaws.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n cluster_config.metadata.provider == \"eks\"\t\n\tconfig = cluster_config.data\n\n\tis_not_encrypted_EKS(config)\n \n\t\n\tmsga := {\n\t\t\"alertMessage\": \"etcd/secret encryption is not enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"eksctl utils enable-secrets-encryption --cluster=\u003ccluster\u003e --key-arn=arn:aws:kms:\u003ccluster_region\u003e:\u003caccount\u003e:key/\u003ckey\u003e --region=\u003cregion\u003e\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": cluster_config\n\t\t}\n\t}\n}\n\n\n\n# Check if encryption in etcd in enabled for GKE\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"container.googleapis.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n cluster_config.metadata.provider == \"gke\"\t\n\tconfig := cluster_config.data\n\n\tnot is_encrypted_GKE(config)\n \n\t\n\tmsga := {\n\t\t\"alertMessage\": \"etcd/secret encryption is not enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [\"data.database_encryption.state\"],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"gcloud container clusters update \u003ccluster_name\u003e --region=\u003ccompute_region\u003e --database-encryption-key=\u003ckey_project_id\u003e/locations/\u003clocation\u003e/keyRings/\u003cring_name\u003e/cryptoKeys/\u003ckey_name\u003e --project=\u003ccluster_project_id\u003e\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": cluster_config\n\t\t}\n\t}\n}\n\nis_encrypted_GKE(config) {\n\t config.database_encryption.state == \"1\"\n}\nis_encrypted_GKE(config) {\n\t config.database_encryption.state == \"ENCRYPTED\"\n}\n\nis_not_encrypted_EKS(cluster_config) {\n\tencryptionConfig := cluster_config.Cluster.EncryptionConfig[_]\n goodResources := [resource | resource = cluster_config.Cluster.EncryptionConfig.Resources[_]; resource == \"secrets\"]\n\tcount(goodResources) == 0\n}\n\nis_not_encrypted_EKS(cluster_config) {\n\tcluster_config.Cluster.EncryptionConfig == null\n}\n\nis_not_encrypted_EKS(cluster_config) {\n\tcount(cluster_config.Cluster.EncryptionConfig) == 0\n}\n\nis_not_encrypted_EKS(cluster_config) {\n\tencryptionConfig := cluster_config.Cluster.EncryptionConfig[_]\n count(encryptionConfig.Resources) == 0\n}",
+ "resourceEnumerator": "",
+ "ruleLanguage": "Rego",
+ "match": [
+ {
+ "apiGroups": [],
+ "apiVersions": [],
+ "resources": []
+ }
+ ],
+ "dynamicMatch": [
+ {
+ "apiGroups": [
+ "container.googleapis.com",
+ "eks.amazonaws.com"
+ ],
+ "apiVersions": [
+ "v1"
+ ],
+ "resources": [
+ "ClusterDescribe"
+ ]
+ }
+ ],
+ "ruleDependencies": [],
+ "configInputs": null,
+ "controlConfigInputs": null,
+ "description": "",
+ "remediation": "",
+ "ruleQuery": "armo_builtins",
+ "relevantCloudProviders": [
+ "EKS",
+ "GKE"
+ ]
+ },
+ {
+ "guid": "",
+ "name": "etcd-encryption-native",
+ "attributes": {
+ "armoBuiltin": true,
+ "resourcesAggregator": "apiserver-pod",
+ "useFromKubescapeVersion": "v1.0.133"
+ },
+ "creationTime": "",
+ "rule": "package armo_builtins\n\nimport data.cautils as cautils\n\n# Check if encryption in etcd is enabled for native k8s\ndeny[msga] {\n\tapiserverpod := input[_]\n\tcmd := apiserverpod.spec.containers[0].command\n\tenc_command := [command | command := cmd[_]; contains(command, \"--encryption-provider-config=\")]\n\tcount(enc_command) \u003c 1\n\tpath := \"spec.containers[0].command\"\n\n\tmsga := {\n\t\t\"alertMessage\": \"etcd encryption is not enabled\",\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [apiserverpod]},\n\t}\n}\n",
+ "resourceEnumerator": "",
+ "ruleLanguage": "Rego",
+ "match": [
+ {
+ "apiGroups": [
+ ""
+ ],
+ "apiVersions": [
+ "v1"
+ ],
+ "resources": [
+ "Pod"
+ ]
+ }
+ ],
+ "ruleDependencies": [],
+ "configInputs": null,
+ "controlConfigInputs": null,
+ "description": "",
+ "remediation": "",
+ "ruleQuery": "armo_builtins",
+ "relevantCloudProviders": null
+ }
+ ],
+ "rulesIDs": [
+ "",
+ ""
+ ],
+ "baseScore": 6
+ },
+ {
+ "guid": "",
+ "name": "PSP enabled",
+ "attributes": {
+ "armoBuiltin": true,
+ "attackTracks": [
+ {
+ "attackTrack": "kubeapi",
+ "categories": [
+ "Impact - service injection"
+ ]
+ }
+ ],
+ "controlTypeTags": [
+ "security",
+ "compliance"
+ ]
+ },
+ "id": "C-0068",
+ "controlID": "C-0068",
+ "creationTime": "",
+ "description": "PSP enable fine-grained authorization of pod creation and it is important to enable it",
+ "remediation": "Turn Pod Security Policies on in your cluster, if you use other admission controllers to control the behavior that PSP controls, exclude this control from your scans",
+ "rules": [
+ {
+ "guid": "",
+ "name": "psp-enabled-cloud",
+ "attributes": {
+ "armoBuiltin": true
+ },
+ "creationTime": "",
+ "rule": "package armo_builtins\n\n\n# Check if PSP is enabled for GKE\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"container.googleapis.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n cluster_config.metadata.provider == \"gke\"\t\n\tconfig := cluster_config.data\n not config.pod_security_policy_config.enabled == true\n\n\t\n\tmsga := {\n\t\t\"alertMessage\": \"pod security policy configuration is not enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"gcloud beta container clusters update \u003ccluster_name\u003e --enable-pod-security-policy\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": cluster_config\n\t\t}\n\t}\n}",
+ "resourceEnumerator": "",
+ "ruleLanguage": "Rego",
+ "match": [
+ {
+ "apiGroups": [],
+ "apiVersions": [],
+ "resources": []
+ }
+ ],
+ "dynamicMatch": [
+ {
+ "apiGroups": [
+ "container.googleapis.com",
+ "eks.amazonaws.com"
+ ],
+ "apiVersions": [
+ "v1"
+ ],
+ "resources": [
+ "ClusterDescribe"
+ ]
+ }
+ ],
+ "ruleDependencies": [],
+ "configInputs": null,
+ "controlConfigInputs": null,
+ "description": "",
+ "remediation": "",
+ "ruleQuery": "armo_builtins",
+ "relevantCloudProviders": [
+ "EKS",
+ "GKE"
+ ]
+ },
+ {
+ "guid": "",
+ "name": "psp-enabled-native",
+ "attributes": {
+ "armoBuiltin": true,
+ "resourcesAggregator": "apiserver-pod",
+ "useFromKubescapeVersion": "v1.0.133"
+ },
+ "creationTime": "",
+ "rule": "package armo_builtins\n\n\n# Check if psp is enabled for native k8s\ndeny[msga] {\n\tapiserverpod := input[_]\n cmd := apiserverpod.spec.containers[0].command[j]\n contains(cmd, \"--enable-admission-plugins=\")\n output := split(cmd, \"=\")\n not contains(output[1], \"PodSecurityPolicy\")\n\tpath := sprintf(\"spec.containers[0].command[%v]\", [format_int(j, 10)])\t\n\t\n\tmsga := {\n\t\t\"alertMessage\": \"PodSecurityPolicy is not enabled\",\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [apiserverpod],\n\t\t\n\t\t}\n\t}\n}",
+ "resourceEnumerator": "",
+ "ruleLanguage": "Rego",
+ "match": [
+ {
+ "apiGroups": [
+ ""
+ ],
+ "apiVersions": [
+ "v1"
+ ],
+ "resources": [
+ "Pod"
+ ]
+ }
+ ],
+ "ruleDependencies": [],
+ "configInputs": null,
+ "controlConfigInputs": null,
+ "description": "",
+ "remediation": "",
+ "ruleQuery": "armo_builtins",
+ "relevantCloudProviders": null
+ }
+ ],
+ "rulesIDs": [
+ "",
+ ""
+ ],
+ "baseScore": 1
+ },
+ {
+ "guid": "",
+ "name": "Disable anonymous access to Kubelet service",
+ "attributes": {
+ "armoBuiltin": true,
+ "attackTracks": [
+ {
+ "attackTrack": "kubeapi",
+ "categories": [
+ "Initial access"
+ ]
+ }
+ ],
+ "controlTypeTags": [
+ "security",
+ "compliance"
+ ]
+ },
+ "id": "C-0069",
+ "controlID": "C-0069",
+ "creationTime": "",
+ "description": "By default, requests to the kubelet's HTTPS endpoint that are not rejected by other configured authentication methods are treated as anonymous requests, and given a username of system:anonymous and a group of system:unauthenticated.",
+ "remediation": "Start the kubelet with the --anonymous-auth=false flag.",
+ "rules": [
+ {
+ "guid": "",
+ "name": "anonymous-requests-to-kubelet-service-updated",
+ "attributes": {
+ "armoBuiltin": true,
+ "hostSensorRule": "true"
+ },
+ "creationTime": "",
+ "rule": "package armo_builtins\n\n#CIS 4.2.1 https://workbench.cisecurity.org/sections/1126668/recommendations/1838638\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\tcommand := obj.data.cmdLine\n\n\tcontains(command, \"--anonymous-auth\")\n\tcontains(command, \"--anonymous-auth=true\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Anonymous requests is enabled.\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--anonymous-auth\")\n\tnot contains(command, \"--config\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Anonymous requests is enabled.\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--anonymous-auth\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\tnot yamlConfig.authentication.anonymous.enabled == false\n\n\tmsga := {\n\t\t\"alertMessage\": \"Anonymous requests is enabled.\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [\"authentication.anonymous.enabled\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\n## Host sensor failed to get config file content\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--anonymous-auth\")\n\tcontains(command, \"--config\")\n\n\tnot obj.data.configFile.content\n\n\tmsga := {\n\t\t\"alertMessage\": \"Failed to analyze config file\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": obj.data,\n\t\t}},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n",
+ "resourceEnumerator": "",
+ "ruleLanguage": "Rego",
+ "match": [
+ {
+ "apiGroups": [],
+ "apiVersions": [],
+ "resources": []
+ }
+ ],
+ "dynamicMatch": [
+ {
+ "apiGroups": [
+ "hostdata.kubescape.cloud"
+ ],
+ "apiVersions": [
+ "v1beta0"
+ ],
+ "resources": [
+ "KubeletInfo"
+ ]
+ }
+ ],
+ "ruleDependencies": [],
+ "configInputs": null,
+ "controlConfigInputs": null,
+ "description": "Determines if anonymous requests to the kubelet service are allowed.",
+ "remediation": "Disable anonymous requests by setting the anonymous-auth flag to false, or using the kubelet configuration file.",
+ "ruleQuery": "",
+ "relevantCloudProviders": null
+ }
+ ],
+ "rulesIDs": [
+ ""
+ ],
+ "baseScore": 10
+ },
+ {
+ "guid": "",
+ "name": "Enforce Kubelet client TLS authentication",
+ "attributes": {
+ "armoBuiltin": true,
+ "attackTracks": [
+ {
+ "attackTrack": "node",
+ "categories": [
+ "Initial access"
+ ]
+ }
+ ],
+ "controlTypeTags": [
+ "security",
+ "compliance"
+ ]
+ },
+ "id": "C-0070",
+ "controlID": "C-0070",
+ "creationTime": "",
+ "description": "Kubelets are the node level orchestrator in Kubernetes control plane. They are publishing service port 10250 where they accept commands from API server. Operator must make sure that only API server is allowed to submit commands to Kubelet. This is done through client certificate verification, must configure Kubelet with client CA file to use for this purpose.",
+ "remediation": "Start the kubelet with the --client-ca-file flag, providing a CA bundle to verify client certificates with.",
+ "rules": [
+ {
+ "guid": "",
+ "name": "enforce-kubelet-client-tls-authentication",
+ "attributes": {
+ "armoBuiltin": true,
+ "hostSensorRule": "true"
+ },
+ "creationTime": "",
+ "rule": "package armo_builtins\nimport data.kubernetes.api.client as client\n\n# Both config and cli present\ndeny[msga] {\n\t\tkubelet_config := input[_]\n\t\tkubelet_config.kind == \"KubeletConfiguration\"\n\t\tkubelet_config.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\n\t\tkubelet_cli := input[_] \n\t\tkubelet_cli.kind == \"KubeletCommandLine\"\n\t\tkubelet_cli.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\t\tkubelet_cli_data := kubelet_cli.data\n\n\t\tresult := is_client_tls_disabled_both(kubelet_config, kubelet_cli_data)\n\t\texternal_obj := result.obj\n\t\tfailed_paths := result.failedPaths\n\t\tfixPaths := result.fixPaths\n\n\n\t\tmsga := {\n\t\t\t\"alertMessage\": \"kubelet client TLS authentication is not enabled\",\n\t\t\t\"alertScore\": 2,\n\t\t\t\"failedPaths\": failed_paths,\n\t\t\t\"fixPaths\": fixPaths,\n\t\t\t\"packagename\": \"armo_builtins\",\n\t\t\t\"alertObject\": {\n\t\t\t\t\"k8sApiObjects\": [kubelet_config, kubelet_cli]\n\t\t\t},\n\t\t}\n\t}\n\n\n# Only of them present\ndeny[msga] {\n\t\tresult := is_client_tls_disabled_single(input)\n\t\texternal_obj := result.obj\n\t\tfailed_paths := result.failedPaths\n\t\tfixPaths := result.fixPaths\n\n\t\tmsga := {\n\t\t\t\"alertMessage\": \"kubelet client TLS authentication is not enabled\",\n\t\t\t\"alertScore\": 2,\n\t\t\t\"failedPaths\": failed_paths,\n\t\t\t\"fixPaths\": fixPaths,\n\t\t\t\"packagename\": \"armo_builtins\",\n\t\t\t\"alertObject\": {\n\t\t\t\t\"k8sApiObjects\": [external_obj]\n\t\t\t},\n\t\t}\n\t}\n\n# CLI overrides config\nis_client_tls_disabled_both(kubelet_config, kubelet_cli_data) = {\"obj\": obj,\"failedPaths\": [], \"fixPaths\": [{\"path\": \"data.authentication.x509.clientCAFile\", \"value\": \"YOUR_VALUE\"}]} {\n\tnot contains(kubelet_cli_data[\"fullCommand\"], \"client-ca-file\")\n not kubelet_config.data.authentication.x509.clientCAFile\n\tobj = kubelet_config\n}\n\n# Only cli\nis_client_tls_disabled_single(resources) = {\"obj\": obj,\"failedPaths\": [], \"fixPaths\": []} {\n\tkubelet_cli := resources[_] \n\tkubelet_cli.kind == \"KubeletCommandLine\"\n\tkubelet_cli.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\n\tkubelet_config := [config | config = resources[_]; config.kind == \"KubeletConfiguration\"]\n\tcount(kubelet_config) == 0\n\n\tobj = isClientTlsDisabledCli(kubelet_cli)\n\t\n}\n\n# Only config\nis_client_tls_disabled_single(resources) = {\"obj\": obj,\"failedPaths\": [], \"fixPaths\": [{\"path\": \"data.authentication.x509.clientCAFile\", \"value\": \"YOUR_VALUE\"}]} {\n\tkubelet_config := resources[_] \n\tkubelet_config.kind == \"KubeletConfiguration\"\n\tkubelet_config.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\n\tkubelet_cmd := [cmd | cmd = resources[_]; cmd.kind == \"KubeletCommandLine\"]\n\tcount(kubelet_cmd) == 0\n\n\tobj = is_Client_tls_disabled_config(kubelet_config)\n}\n\n\nis_Client_tls_disabled_config(kubelet_config) = obj {\n\tnot kubelet_config.data.authentication.x509.clientCAFile\n\tobj = kubelet_config\n}\n\nisClientTlsDisabledCli(kubelet_cli) = obj {\n\tkubelet_cli_data = kubelet_cli.data\n\tnot contains(kubelet_cli_data[\"fullCommand\"], \"client-ca-file\")\n\tobj = kubelet_cli\n}",
+ "resourceEnumerator": "",
+ "ruleLanguage": "Rego",
+ "match": [
+ {
+ "apiGroups": [],
+ "apiVersions": [],
+ "resources": []
+ }
+ ],
+ "dynamicMatch": [
+ {
+ "apiGroups": [
+ "hostdata.kubescape.cloud"
+ ],
+ "apiVersions": [
+ "v1beta0"
+ ],
+ "resources": [
+ "KubeletConfiguration",
+ "KubeletCommandLine"
+ ]
+ }
+ ],
+ "ruleDependencies": [
+ {
+ "packageName": "cautils"
+ },
+ {
+ "packageName": "kubernetes.api.client"
+ }
+ ],
+ "configInputs": null,
+ "controlConfigInputs": null,
+ "description": "Determines if kubelet client tls authentication is enabled.",
+ "remediation": "Start the kubelet with the --client-ca-file flag, providing a CA bundle to verify client certificates with.",
+ "ruleQuery": "",
+ "relevantCloudProviders": null
+ }
+ ],
+ "rulesIDs": [
+ ""
+ ],
+ "baseScore": 9
+ }
+ ],
+ "controlsIDs": [
+ "C-0053",
+ "C-0014",
+ "C-0012",
+ "C-0035",
+ "C-0054",
+ "C-0002",
+ "C-0021",
+ "C-0048",
+ "C-0052",
+ "C-0026",
+ "C-0015",
+ "C-0020",
+ "C-0057",
+ "C-0042",
+ "C-0045",
+ "C-0039",
+ "C-0036",
+ "C-0031",
+ "C-0037",
+ "C-0007",
+ "C-0058",
+ "C-0059",
+ "C-0067",
+ "C-0066",
+ "C-0068",
+ "C-0069",
+ "C-0070"
+ ]
+}
\ No newline at end of file
diff --git a/core/cautils/getter/testdata/NSA.json b/core/cautils/getter/testdata/NSA.json
new file mode 100644
index 00000000..bdc84e0e
--- /dev/null
+++ b/core/cautils/getter/testdata/NSA.json
@@ -0,0 +1,2249 @@
+{
+ "guid": "",
+ "name": "NSA",
+ "attributes": {
+ "armoBuiltin": true
+ },
+ "creationTime": "",
+ "description": "Implement NSA security advices for K8s ",
+ "controls": [
+ {
+ "guid": "",
+ "name": "API server insecure port is enabled",
+ "attributes": {
+ "armoBuiltin": true,
+ "attackTracks": [
+ {
+ "attackTrack": "kubeapi",
+ "categories": [
+ "Initial access"
+ ]
+ }
+ ],
+ "controlTypeTags": [
+ "security",
+ "compliance"
+ ]
+ },
+ "id": "C-0005",
+ "controlID": "C-0005",
+ "creationTime": "",
+ "description": "Kubernetes control plane API is running with non-secure port enabled which allows attackers to gain unprotected access to the cluster.",
+ "remediation": "Set the insecure-port flag of the API server to zero.",
+ "rules": [
+ {
+ "guid": "",
+ "name": "insecure-port-flag",
+ "attributes": {
+ "armoBuiltin": true
+ },
+ "creationTime": "",
+ "rule": "package armo_builtins\nimport data.cautils as cautils\n\n# Fails if pod has insecure-port flag enabled\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontains(pod.metadata.name, \"kube-apiserver\")\n container := pod.spec.containers[i]\n\tpath = is_insecure_port_flag(container, i)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"The API server container: %v has insecure-port flag enabled\", [ container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\t\nis_insecure_port_flag(container, i) = path {\n\tcommand := container.command[j]\n\tcontains(command, \"--insecure-port=1\")\n\tpath := sprintf(\"spec.containers[%v].command[%v]\", [format_int(i, 10), format_int(j, 10)])\n}",
+ "resourceEnumerator": "package armo_builtins\nimport data.cautils as cautils\n\n# Fails if pod has insecure-port flag enabled\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontains(pod.metadata.name, \"kube-apiserver\")\n container := pod.spec.containers[_]\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"The API server container: %v has insecure-port flag enabled\", [ container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [\"\"],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n",
+ "ruleLanguage": "Rego",
+ "match": [
+ {
+ "apiGroups": [
+ ""
+ ],
+ "apiVersions": [
+ "v1"
+ ],
+ "resources": [
+ "Pod"
+ ]
+ }
+ ],
+ "ruleDependencies": [],
+ "configInputs": null,
+ "controlConfigInputs": null,
+ "description": "fails if the api server has insecure-port enabled",
+ "remediation": "Make sure that the insecure-port flag of the api server is set to 0",
+ "ruleQuery": "armo_builtins",
+ "relevantCloudProviders": null
+ }
+ ],
+ "rulesIDs": [
+ ""
+ ],
+ "baseScore": 9
+ },
+ {
+ "guid": "",
+ "name": "Host PID/IPC privileges",
+ "attributes": {
+ "armoBuiltin": true,
+ "attackTracks": [
+ {
+ "attackTrack": "container",
+ "categories": [
+ "Privilege escalation"
+ ]
+ }
+ ],
+ "controlTypeTags": [
+ "security",
+ "compliance"
+ ]
+ },
+ "id": "C-0038",
+ "controlID": "C-0038",
+ "creationTime": "",
+ "description": "Containers should be isolated from the host machine as much as possible. The hostPID and hostIPC fields in deployment yaml may allow cross-container influence and may expose the host itself to potentially malicious or destructive actions. This control identifies all PODs using hostPID or hostIPC privileges.",
+ "remediation": "Remove hostPID and hostIPC from the yaml file(s) privileges unless they are absolutely necessary.",
+ "rules": [
+ {
+ "guid": "",
+ "name": "host-pid-ipc-privileges",
+ "attributes": {
+ "armoBuiltin": true
+ },
+ "creationTime": "",
+ "rule": "package armo_builtins\n\n\n# Fails if pod has hostPID enabled\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tis_host_pid(pod.spec)\n\tpath := \"spec.hostPID\"\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v has hostPID enabled\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if pod has hostIPC enabled\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tis_host_ipc(pod.spec)\n\tpath := \"spec.hostIPC\"\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v has hostIPC enabled\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n\n# Fails if workload has hostPID enabled\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tis_host_pid(wl.spec.template.spec)\n\tpath := \"spec.template.spec.hostPID\"\n msga := {\n\t\"alertMessage\": sprintf(\"%v: %v has a pod with hostPID enabled\", [wl.kind, wl.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# Fails if workload has hostIPC enabled\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tis_host_ipc(wl.spec.template.spec)\n\tpath := \"spec.template.spec.hostIPC\"\n msga := {\n\t\"alertMessage\": sprintf(\"%v: %v has a pod with hostIPC enabled\", [wl.kind, wl.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if cronjob has hostPID enabled\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tis_host_pid(wl.spec.jobTemplate.spec.template.spec)\n\tpath := \"spec.jobTemplate.spec.template.spec.hostPID\"\n msga := {\n\t\"alertMessage\": sprintf(\"CronJob: %v has a pod with hostPID enabled\", [wl.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# Fails if cronjob has hostIPC enabled\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tis_host_ipc(wl.spec.jobTemplate.spec.template.spec)\n\tpath := \"spec.jobTemplate.spec.template.spec.hostIPC\"\n msga := {\n\t\"alertMessage\": sprintf(\"CronJob: %v has a pod with hostIPC enabled\", [wl.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Check that hostPID and hostIPC are set to false. Default is false. Only in pod spec\n\n\nis_host_pid(podspec){\n podspec.hostPID == true\n}\n\nis_host_ipc(podspec){\n podspec.hostIPC == true\n}",
+ "resourceEnumerator": "",
+ "ruleLanguage": "Rego",
+ "match": [
+ {
+ "apiGroups": [
+ ""
+ ],
+ "apiVersions": [
+ "v1"
+ ],
+ "resources": [
+ "Pod"
+ ]
+ },
+ {
+ "apiGroups": [
+ "apps"
+ ],
+ "apiVersions": [
+ "v1"
+ ],
+ "resources": [
+ "Deployment",
+ "ReplicaSet",
+ "DaemonSet",
+ "StatefulSet"
+ ]
+ },
+ {
+ "apiGroups": [
+ "batch"
+ ],
+ "apiVersions": [
+ "*"
+ ],
+ "resources": [
+ "Job",
+ "CronJob"
+ ]
+ }
+ ],
+ "ruleDependencies": [],
+ "configInputs": null,
+ "controlConfigInputs": null,
+ "description": "Containers should be as isolated as possible from the host machine. The hostPID and hostIPC fields in Kubernetes may excessively expose the host to potentially malicious actions.",
+ "remediation": "Make sure that the fields hostIPC and hostPID in the pod spec are not set to true (set to false or not present)",
+ "ruleQuery": "armo_builtins",
+ "relevantCloudProviders": null
+ }
+ ],
+ "rulesIDs": [
+ ""
+ ],
+ "baseScore": 7
+ },
+ {
+ "guid": "",
+ "name": "Immutable container filesystem",
+ "attributes": {
+ "armoBuiltin": true,
+ "attackTracks": [
+ {
+ "attackTrack": "container",
+ "categories": [
+ "Execution",
+ "Persistence"
+ ]
+ }
+ ],
+ "controlTypeTags": [
+ "security",
+ "compliance"
+ ]
+ },
+ "id": "C-0017",
+ "controlID": "C-0017",
+ "creationTime": "",
+ "description": "Mutable container filesystem can be abused to inject malicious code or data into containers. Use immutable (read-only) filesystem to limit potential attacks.",
+ "remediation": "Set the filesystem of the container to read-only when possible (POD securityContext, readOnlyRootFilesystem: true). If containers application needs to write into the filesystem, it is recommended to mount secondary filesystems for specific directories where application require write access.",
+ "rules": [
+ {
+ "guid": "",
+ "name": "immutable-container-filesystem",
+ "attributes": {
+ "armoBuiltin": true
+ },
+ "creationTime": "",
+ "rule": "package armo_builtins\n\n\n# Fails if pods has container with mutable filesystem\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tbeggining_of_path := \"spec.\"\n result := is_mutable_filesystem(container, beggining_of_path, i)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in pod: %v has mutable filesystem\", [container.name, pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if workload has container with mutable filesystem \ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.template.spec.\"\n result := is_mutable_filesystem(container, beggining_of_path, i)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container :%v in %v: %v has mutable filesystem\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# Fails if cronjob has container with mutable filesystem \ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.jobTemplate.spec.template.spec.\"\n\tresult := is_mutable_filesystem(container, beggining_of_path, i)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container :%v in %v: %v has mutable filesystem\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Default of readOnlyRootFilesystem is false. This field is only in container spec and not pod spec\nis_mutable_filesystem(container, beggining_of_path, i) = [failed_path, fixPath] {\n\tcontainer.securityContext.readOnlyRootFilesystem == false\n\tfailed_path = sprintf(\"%vcontainers[%v].securityContext.readOnlyRootFilesystem\", [beggining_of_path, format_int(i, 10)])\n\tfixPath = \"\"\n }\n\n is_mutable_filesystem(container, beggining_of_path, i) = [failed_path, fixPath] {\n\tnot container.securityContext.readOnlyRootFilesystem == false\n not container.securityContext.readOnlyRootFilesystem == true\n\tfixPath = {\"path\": sprintf(\"%vcontainers[%v].securityContext.readOnlyRootFilesystem\", [beggining_of_path, format_int(i, 10)]), \"value\": \"true\"}\n\tfailed_path = \"\"\n }\n\n\n get_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n",
+ "resourceEnumerator": "",
+ "ruleLanguage": "Rego",
+ "match": [
+ {
+ "apiGroups": [
+ ""
+ ],
+ "apiVersions": [
+ "v1"
+ ],
+ "resources": [
+ "Pod"
+ ]
+ },
+ {
+ "apiGroups": [
+ "apps"
+ ],
+ "apiVersions": [
+ "v1"
+ ],
+ "resources": [
+ "Deployment",
+ "ReplicaSet",
+ "DaemonSet",
+ "StatefulSet"
+ ]
+ },
+ {
+ "apiGroups": [
+ "batch"
+ ],
+ "apiVersions": [
+ "*"
+ ],
+ "resources": [
+ "Job",
+ "CronJob"
+ ]
+ }
+ ],
+ "ruleDependencies": [],
+ "configInputs": null,
+ "controlConfigInputs": null,
+ "description": "fails if container has mutable filesystem",
+ "remediation": "Make sure that the securityContext.readOnlyRootFilesystem field in the container/pod spec is set to true",
+ "ruleQuery": "armo_builtins",
+ "relevantCloudProviders": null
+ }
+ ],
+ "rulesIDs": [
+ ""
+ ],
+ "baseScore": 3
+ },
+ {
+ "guid": "",
+ "name": "Non-root containers",
+ "attributes": {
+ "armoBuiltin": true,
+ "attackTracks": [
+ {
+ "attackTrack": "container",
+ "categories": [
+ "Privilege escalation"
+ ]
+ }
+ ],
+ "controlTypeTags": [
+ "security",
+ "compliance"
+ ]
+ },
+ "id": "C-0013",
+ "controlID": "C-0013",
+ "creationTime": "",
+ "description": "Potential attackers may gain access to a container and leverage its existing privileges to conduct an attack. Therefore, it is not recommended to deploy containers with root privileges unless it is absolutely necessary. This control identifies all the Pods running as root or can escalate to root.",
+ "remediation": "If your application does not need root privileges, make sure to define the runAsUser or runAsGroup under the PodSecurityContext and use user ID 1000 or higher. Do not turn on allowPrivlegeEscalation bit and make sure runAsNonRoot is true.",
+ "rules": [
+ {
+ "guid": "",
+ "name": "non-root-containers",
+ "attributes": {
+ "armoBuiltin": true
+ },
+ "creationTime": "",
+ "rule": "package armo_builtins\n\n\n################################################################################\n# Rules\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\n\tbeggining_of_path := \"spec\"\n\talertInfo := evaluate_workload_non_root_container(container, pod, beggining_of_path)\n\tfixPath := get_fixed_path(alertInfo, i)\n failed_path := get_failed_path(alertInfo, i) \n\n msga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in pod: %v may run as root\", [container.name, pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": failed_path,\n \"fixPaths\": fixPath,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\n\tbeggining_of_path := \"spec.template.spec\"\n\talertInfo := evaluate_workload_non_root_container(container, wl.spec.template, beggining_of_path)\n\tfixPath := get_fixed_path(alertInfo, i)\n failed_path := get_failed_path(alertInfo, i) \n msga := {\n\t\t\"alertMessage\": sprintf(\"container :%v in %v: %v may run as root\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": failed_path,\n \"fixPaths\": fixPath,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if cronjob has a container configured to run as root\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\n\tbeggining_of_path := \"spec.jobTemplate.spec.template.spec\"\n\talertInfo := evaluate_workload_non_root_container(container, wl.spec.jobTemplate.spec.template, beggining_of_path)\n\tfixPath := get_fixed_path(alertInfo, i)\n failed_path := get_failed_path(alertInfo, i) \n\t\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"container :%v in %v: %v may run as root\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": failed_path,\n \"fixPaths\": fixPath,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nget_failed_path(alertInfo, i) = [replace(alertInfo.failed_path,\"container_ndx\",format_int(i,10))] {\n\talertInfo.failed_path != \"\"\n} else = []\n\n\nget_fixed_path(alertInfo, i) = [{\"path\":replace(alertInfo.fixPath[0].path,\"container_ndx\",format_int(i,10)), \"value\":alertInfo.fixPath[0].value}, {\"path\":replace(alertInfo.fixPath[1].path,\"container_ndx\",format_int(i,10)), \"value\":alertInfo.fixPath[1].value}]{\n\tcount(alertInfo.fixPath) == 2\n} else = [{\"path\":replace(alertInfo.fixPath[0].path,\"container_ndx\",format_int(i,10)), \"value\":alertInfo.fixPath[0].value}] {\n\tcount(alertInfo.fixPath) == 1\n} else = []\n\n#################################################################################\n# Workload evaluation \n\nevaluate_workload_non_root_container(container, pod, beggining_of_path) = alertInfo {\n\trunAsNonRootValue := get_run_as_non_root_value(container, pod, beggining_of_path)\n\trunAsNonRootValue.value == false\n\t\n\trunAsUserValue := get_run_as_user_value(container, pod, beggining_of_path)\n\trunAsUserValue.value == 0\n\n\talertInfo := choose_first_if_defined(runAsUserValue, runAsNonRootValue)\n} else = alertInfo {\n allowPrivilegeEscalationValue := get_allow_privilege_escalation(container, pod, beggining_of_path)\n allowPrivilegeEscalationValue.value == true\n\n alertInfo := allowPrivilegeEscalationValue\n}\n\n\n#################################################################################\n# Value resolution functions\n\n\nget_run_as_non_root_value(container, pod, beggining_of_path) = runAsNonRoot {\n failed_path := sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [beggining_of_path]) \n runAsNonRoot := {\"value\" : container.securityContext.runAsNonRoot, \"failed_path\" : failed_path, \"fixPath\": [] ,\"defined\" : true}\n} else = runAsNonRoot {\n\tfailed_path := sprintf(\"%v.securityContext.runAsNonRoot\", [beggining_of_path]) \n runAsNonRoot := {\"value\" : pod.spec.securityContext.runAsNonRoot, \"failed_path\" : failed_path, \"fixPath\": [], \"defined\" : true}\n} else = {\"value\" : false, \"failed_path\" : \"\", \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [beggining_of_path]), \"value\":\"true\"}], \"defined\" : false} {\n\tis_allow_privilege_escalation_field(container, pod)\n} else = {\"value\" : false, \"failed_path\" : \"\", \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [beggining_of_path]) , \"value\":\"true\"}, {\"path\":sprintf(\"%v.containers[container_ndx].securityContext.allowPrivilegeEscalation\", [beggining_of_path]), \"value\":\"false\"}], \"defined\" : false}\n\nget_run_as_user_value(container, pod, beggining_of_path) = runAsUser {\n\tfailed_path := sprintf(\"%v.containers[container_ndx].securityContext.runAsUser\", [beggining_of_path]) \n runAsUser := {\"value\" : container.securityContext.runAsUser, \"failed_path\" : failed_path, \"fixPath\": [], \"defined\" : true}\n} else = runAsUser {\n\tfailed_path := sprintf(\"%v.securityContext.runAsUser\", [beggining_of_path]) \n runAsUser := {\"value\" : pod.spec.securityContext.runAsUser, \"failed_path\" : failed_path, \"fixPath\": [],\"defined\" : true}\n} else = {\"value\" : 0, \"failed_path\": \"\", \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [beggining_of_path]), \"value\":\"true\"}],\"defined\" : false}{\n\tis_allow_privilege_escalation_field(container, pod)\n} else = {\"value\" : 0, \"failed_path\": \"\", \n\t\"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [beggining_of_path]), \"value\":\"true\"},{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.allowPrivilegeEscalation\", [beggining_of_path]), \"value\":\"false\"}],\n\t\"defined\" : false}\n\nget_run_as_group_value(container, pod, beggining_of_path) = runAsGroup {\n\tfailed_path := sprintf(\"%v.containers[container_ndx].securityContext.runAsGroup\", [beggining_of_path])\n runAsGroup := {\"value\" : container.securityContext.runAsGroup, \"failed_path\" : failed_path, \"fixPath\": [],\"defined\" : true}\n} else = runAsGroup {\n\tfailed_path := sprintf(\"%v.securityContext.runAsGroup\", [beggining_of_path])\n runAsGroup := {\"value\" : pod.spec.securityContext.runAsGroup, \"failed_path\" : failed_path, \"fixPath\":[], \"defined\" : true}\n} else = {\"value\" : 0, \"failed_path\": \"\", \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [beggining_of_path]), \"value\":\"true\"}], \"defined\" : false}{\n\tis_allow_privilege_escalation_field(container, pod)\n} else = {\"value\" : 0, \"failed_path\": \"\", \n\t\"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.runAsNonRoot\", [beggining_of_path]), \"value\":\"true\"},{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.allowPrivilegeEscalation\", [beggining_of_path]), \"value\":\"false\"}],\n \t\"defined\" : false\n}\n\nget_allow_privilege_escalation(container, pod, beggining_of_path) = allowPrivilegeEscalation {\n\tfailed_path := sprintf(\"%v.containers[container_ndx].securityContext.allowPrivilegeEscalation\", [beggining_of_path])\n allowPrivilegeEscalation := {\"value\" : container.securityContext.allowPrivilegeEscalation, \"failed_path\" : failed_path, \"fixPath\": [],\"defined\" : true}\n} else = allowPrivilegeEscalation {\n\tfailed_path := sprintf(\"%v.securityContext.allowPrivilegeEscalation\", [beggining_of_path])\n allowPrivilegeEscalation := {\"value\" : pod.spec.securityContext.allowPrivilegeEscalation, \"failed_path\" : failed_path, \"fixPath\": [],\"defined\" : true}\n} else = {\"value\" : true, \"failed_path\": \"\", \"fixPath\": [{\"path\": sprintf(\"%v.containers[container_ndx].securityContext.allowPrivilegeEscalation\", [beggining_of_path]), \"value\":\"false\"}], \"defined\" : false}\n\nchoose_first_if_defined(l1, l2) = c {\n l1.defined\n c := l1\n} else = l2\n\n\nis_allow_privilege_escalation_field(container, pod) {\n\tcontainer.securityContext.allowPrivilegeEscalation == false\n}\n\nis_allow_privilege_escalation_field(container, pod) {\n\tpod.spec.securityContext.allowPrivilegeEscalation == false\n}\n\n\n",
+ "resourceEnumerator": "",
+ "ruleLanguage": "Rego",
+ "match": [
+ {
+ "apiGroups": [
+ ""
+ ],
+ "apiVersions": [
+ "v1"
+ ],
+ "resources": [
+ "Pod"
+ ]
+ },
+ {
+ "apiGroups": [
+ "apps"
+ ],
+ "apiVersions": [
+ "v1"
+ ],
+ "resources": [
+ "Deployment",
+ "ReplicaSet",
+ "DaemonSet",
+ "StatefulSet"
+ ]
+ },
+ {
+ "apiGroups": [
+ "batch"
+ ],
+ "apiVersions": [
+ "*"
+ ],
+ "resources": [
+ "Job",
+ "CronJob"
+ ]
+ }
+ ],
+ "ruleDependencies": [],
+ "configInputs": null,
+ "controlConfigInputs": null,
+ "description": "fails if container can run as root",
+ "remediation": "Make sure that the user/group in the securityContext of pod/container is set to an id less than 1000, or the runAsNonRoot flag is set to true. Also make sure that the allowPrivilegeEscalation field is set to false",
+ "ruleQuery": "armo_builtins",
+ "relevantCloudProviders": null
+ }
+ ],
+ "rulesIDs": [
+ ""
+ ],
+ "baseScore": 6
+ },
+ {
+ "guid": "",
+ "name": "Privileged container",
+ "attributes": {
+ "armoBuiltin": true,
+ "attackTracks": [
+ {
+ "attackTrack": "container",
+ "categories": [
+ "Privilege escalation"
+ ]
+ }
+ ],
+ "controlTypeTags": [
+ "security"
+ ],
+ "microsoftMitreColumns": [
+ "Privilege escalation"
+ ]
+ },
+ "id": "C-0057",
+ "controlID": "C-0057",
+ "creationTime": "",
+ "description": "Potential attackers may gain access to privileged containers and inherit access to the host resources. Therefore, it is not recommended to deploy privileged containers unless it is absolutely necessary. This control identifies all the privileged Pods.",
+ "remediation": "Remove privileged capabilities by setting the securityContext.privileged to false. If you must deploy a Pod as privileged, add other restriction to it, such as network policy, Seccomp etc and still remove all unnecessary capabilities. Use the exception mechanism to remove unnecessary notifications.",
+ "rules": [
+ {
+ "guid": "",
+ "name": "rule-privilege-escalation",
+ "attributes": {
+ "armoBuiltin": true,
+ "m$K8sThreatMatrix": "Privilege Escalation::privileged container",
+ "mitre": "Privilege Escalation",
+ "mitreCode": "TA0004"
+ },
+ "creationTime": "",
+ "rule": "package armo_builtins\n# Deny mutating action unless user is in group owning the resource\n\n\n#privileged pods\ndeny[msga] {\n\n\tpod := input[_]\n\tpod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tbeggining_of_path := \"spec.\"\n\tpath := isPrivilegedContainer(container, i, beggining_of_path)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"the following pods are defined as privileged: %v\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": path,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n }\n}\n\n\n#handles majority of workload resources\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.template.spec.\"\n\tpath := isPrivilegedContainer(container, i, beggining_of_path)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v is defined as privileged:\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": path,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n#handles cronjob\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.jobTemplate.spec.template.spec.\"\n\tpath := isPrivilegedContainer(container, i, beggining_of_path)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"the following cronjobs are defined as privileged: %v\", [wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": path,\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n }\n}\n\n\n# Only SYS_ADMIN capabilite\nisPrivilegedContainer(container, i, beggining_of_path) = path {\n\tnot container.securityContext.privileged == true\n\tpath = [sprintf(\"%vcontainers[%v].securityContext.capabilities.add[%v]\", [beggining_of_path, format_int(i, 10), format_int(k, 10)]) | capabilite = container.securityContext.capabilities.add[k]; capabilite == \"SYS_ADMIN\"]\n\tcount(path) \u003e 0\n}\n\n# Only securityContext.privileged == true\nisPrivilegedContainer(container, i, beggining_of_path) = path {\n\tcontainer.securityContext.privileged == true\n\tpath1 = [sprintf(\"%vcontainers[%v].securityContext.capabilities.add[%v]\", [beggining_of_path, format_int(i, 10), format_int(k, 10)]) | capabilite = container.securityContext.capabilities.add[k]; capabilite == \"SYS_ADMIN\"]\n\tcount(path1) \u003c 1\n\tpath = [sprintf(\"%vcontainers[%v].securityContext.privileged\", [beggining_of_path, format_int(i, 10)])]\n}\n\n# SYS_ADMIN capabilite \u0026\u0026 securityContext.privileged == true\nisPrivilegedContainer(container, i, beggining_of_path) = path {\n\tpath1 = [sprintf(\"%vcontainers[%v].securityContext.capabilities.add[%v]\", [beggining_of_path, format_int(i, 10), format_int(k, 10)]) | capabilite = container.securityContext.capabilities.add[k]; capabilite == \"SYS_ADMIN\"]\n\tcount(path1) \u003e 0\n\tcontainer.securityContext.privileged == true\n\tpath = array.concat(path1, [sprintf(\"%vcontainers[%v].securityContext.privileged\", [beggining_of_path, format_int(i, 10)])])\n}",
+ "resourceEnumerator": "",
+ "ruleLanguage": "Rego",
+ "match": [
+ {
+ "apiGroups": [
+ ""
+ ],
+ "apiVersions": [
+ "v1"
+ ],
+ "resources": [
+ "Pod"
+ ]
+ },
+ {
+ "apiGroups": [
+ "apps"
+ ],
+ "apiVersions": [
+ "v1"
+ ],
+ "resources": [
+ "Deployment",
+ "ReplicaSet",
+ "DaemonSet",
+ "StatefulSet"
+ ]
+ },
+ {
+ "apiGroups": [
+ "batch"
+ ],
+ "apiVersions": [
+ "*"
+ ],
+ "resources": [
+ "Job",
+ "CronJob"
+ ]
+ }
+ ],
+ "ruleDependencies": [],
+ "configInputs": null,
+ "controlConfigInputs": null,
+ "description": "determines if pods/deployments defined as privileged true",
+ "remediation": "avoid defining pods as privilleged",
+ "ruleQuery": "",
+ "relevantCloudProviders": null
+ }
+ ],
+ "rulesIDs": [
+ ""
+ ],
+ "baseScore": 8
+ },
+ {
+ "guid": "",
+ "name": "Automatic mapping of service account",
+ "attributes": {
+ "armoBuiltin": true,
+ "attackTracks": [
+ {
+ "attackTrack": "container",
+ "categories": [
+ "Credential access",
+ "Impact - K8s API access"
+ ]
+ }
+ ],
+ "controlTypeTags": [
+ "security",
+ "compliance"
+ ]
+ },
+ "id": "C-0034",
+ "controlID": "C-0034",
+ "creationTime": "",
+ "description": "Potential attacker may gain access to a POD and steal its service account token. Therefore, it is recommended to disable automatic mapping of the service account tokens in service account configuration and enable it only for PODs that need to use them.",
+ "remediation": "Disable automatic mounting of service account tokens to PODs either at the service account level or at the individual POD level, by specifying the automountServiceAccountToken: false. Note that POD level takes precedence.",
+ "rules": [
+ {
+ "guid": "",
+ "name": "automount-service-account",
+ "attributes": {
+ "armoBuiltin": true
+ },
+ "creationTime": "",
+ "rule": "package armo_builtins\n\n# Fails if user account mount tokens in pod by default\ndeny [msga]{\n service_accounts := [service_account | service_account= input[_]; service_account.kind == \"ServiceAccount\"]\n service_account := service_accounts[_]\n result := is_auto_mount(service_account)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n msga := {\n\t \"alertMessage\": sprintf(\"the following service account: %v in the following namespace: %v mounts service account tokens in pods by default\", [service_account.metadata.name, service_account.metadata.namespace]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"fixPaths\": fixed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [service_account]\n\t\t}\n\t}\n} \n\n\n # -- ---- For workloads -- ---- \n# Fails if pod mount tokens by default (either by its config or by its SA config)\n\n # POD \ndeny [msga]{\n pod := input[_]\n\tpod.kind == \"Pod\"\n\n\tbeggining_of_path := \"spec.\"\n\twl_namespace := pod.metadata.namespace\n\tresult := is_sa_auto_mounted(pod.spec, beggining_of_path, wl_namespace)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n msga := {\n\t \"alertMessage\": sprintf(\"Pod: %v in the following namespace: %v mounts service account tokens by default\", [pod.metadata.name, pod.metadata.namespace]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"fixPaths\": fixed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n} \n\n# WORKLOADS\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tbeggining_of_path := \"spec.template.spec.\"\n\n\twl_namespace := wl.metadata.namespace\n\tresult := is_sa_auto_mounted(wl.spec.template.spec, beggining_of_path, wl_namespace)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v in the following namespace: %v mounts service account tokens by default\", [wl.kind, wl.metadata.name, wl.metadata.namespace]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# CRONJOB\ndeny[msga] {\n \twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.jobTemplate.spec.template.spec.\"\n \n\twl_namespace := wl.metadata.namespace\n\tresult := is_sa_auto_mounted(wl.spec.jobTemplate.spec.template.spec, beggining_of_path, wl_namespace)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v in the following namespace: %v mounts service account tokens by default\", [wl.kind, wl.metadata.name, wl.metadata.namespace]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"failedPaths\": failed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n\n # -- ---- For workloads -- ---- \nis_sa_auto_mounted(spec, beggining_of_path, wl_namespace) = [failed_path, fix_path] {\n\t# automountServiceAccountToken not in pod spec\n\tnot spec.automountServiceAccountToken == false\n\tnot spec.automountServiceAccountToken == true\n\n\t# check if SA automount by default\n\tsa := input[_]\n\tis_same_sa(spec, sa.metadata.name)\n\tis_same_namespace(sa.metadata.namespace , wl_namespace)\n\tnot sa.automountServiceAccountToken == false\n\n\t# path is pod spec\n\tfix_path = { \"path\": sprintf(\"%vautomountServiceAccountToken\", [beggining_of_path]), \"value\": \"false\"}\n\tfailed_path = \"\"\n}\n\nget_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\nis_sa_auto_mounted(spec, beggining_of_path, wl_namespace) = [failed_path, fix_path] {\n\t# automountServiceAccountToken set to true in pod spec\n\tspec.automountServiceAccountToken == true\n\t\n\t# SA automount by default\n\tservice_accounts := [service_account | service_account = input[_]; service_account.kind == \"ServiceAccount\"]\n\tcount(service_accounts) \u003e 0\n\tsa := service_accounts[_]\n\tis_same_sa(spec, sa.metadata.name)\n\tis_same_namespace(sa.metadata.namespace , wl_namespace)\n\tnot sa.automountServiceAccountToken == false\n\n\tfailed_path = sprintf(\"%vautomountServiceAccountToken\", [beggining_of_path])\n\tfix_path = \"\"\n}\n\nis_sa_auto_mounted(spec, beggining_of_path, wl_namespace) = [failed_path, fix_path] {\n\t# automountServiceAccountToken set to true in pod spec\n\tspec.automountServiceAccountToken == true\n\t\n\t# No SA (yaml scan)\n\tservice_accounts := [service_account | service_account = input[_]; service_account.kind == \"ServiceAccount\"]\n\tcount(service_accounts) == 0\n\tfailed_path = sprintf(\"%vautomountServiceAccountToken\", [beggining_of_path])\n\tfix_path = \"\"\n}\n\n\n\n # -- ---- For SAs -- ---- \nis_auto_mount(service_account) = [failed_path, fix_path] {\n\tservice_account.automountServiceAccountToken == true\n\tfailed_path = \"automountServiceAccountToken\"\n\tfix_path = \"\"\n}\n\nis_auto_mount(service_account)= [failed_path, fix_path] {\n\tnot service_account.automountServiceAccountToken == false\n\tnot service_account.automountServiceAccountToken == true\n\tfix_path = {\"path\": \"automountServiceAccountToken\", \"value\": \"false\"}\n\tfailed_path = \"\"\n}\n\nis_same_sa(spec, serviceAccountName) {\n\tspec.serviceAccountName == serviceAccountName\n}\n\nis_same_sa(spec, serviceAccountName) {\n\tnot spec.serviceAccountName \n\tserviceAccountName == \"default\"\n}\n\n\nis_same_namespace(metadata1, metadata2) {\n\tmetadata1.namespace == metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tnot metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata2.namespace\n\tmetadata1.namespace == \"default\"\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tmetadata2.namespace == \"default\"\n}",
+ "resourceEnumerator": "",
+ "ruleLanguage": "Rego",
+ "match": [
+ {
+ "apiGroups": [
+ ""
+ ],
+ "apiVersions": [
+ "v1"
+ ],
+ "resources": [
+ "Pod",
+ "ServiceAccount"
+ ]
+ },
+ {
+ "apiGroups": [
+ "apps"
+ ],
+ "apiVersions": [
+ "v1"
+ ],
+ "resources": [
+ "Deployment",
+ "ReplicaSet",
+ "DaemonSet",
+ "StatefulSet"
+ ]
+ },
+ {
+ "apiGroups": [
+ "batch"
+ ],
+ "apiVersions": [
+ "*"
+ ],
+ "resources": [
+ "Job",
+ "CronJob"
+ ]
+ }
+ ],
+ "ruleDependencies": [],
+ "configInputs": null,
+ "controlConfigInputs": null,
+ "description": "fails if service account and workloads mount service account token by default",
+ "remediation": "Make sure that the automountServiceAccountToken field on the service account spec if set to false",
+ "ruleQuery": "armo_builtins",
+ "relevantCloudProviders": null
+ }
+ ],
+ "rulesIDs": [
+ ""
+ ],
+ "baseScore": 6
+ },
+ {
+ "guid": "",
+ "name": "HostNetwork access",
+ "attributes": {
+ "armoBuiltin": true,
+ "attackTracks": [
+ {
+ "attackTrack": "container",
+ "categories": [
+ "Discovery",
+ "Lateral movement",
+ "Impact - service access"
+ ]
+ }
+ ],
+ "controlTypeTags": [
+ "security",
+ "compliance"
+ ]
+ },
+ "id": "C-0041",
+ "controlID": "C-0041",
+ "creationTime": "",
+ "description": "Potential attackers may gain access to a POD and inherit access to the entire host network. For example, in AWS case, they will have access to the entire VPC. This control identifies all the PODs with host network access enabled.",
+ "remediation": "Only connect PODs to host network when it is necessary. If not, set the hostNetwork field of the pod spec to false, or completely remove it (false is the default). Whitelist only those PODs that must have access to host network by design.",
+ "rules": [
+ {
+ "guid": "",
+ "name": "host-network-access",
+ "attributes": {
+ "armoBuiltin": true
+ },
+ "creationTime": "",
+ "rule": "package armo_builtins\n\n# Fails if pod has hostNetwork enabled\ndeny[msga] {\n pods := [ pod | pod = input[_] ; pod.kind == \"Pod\"]\n pod := pods[_]\n\n\tis_host_network(pod.spec)\n\tpath := \"spec.hostNetwork\"\n msga := {\n\t\"alertMessage\": sprintf(\"Pod: %v is connected to the host network\", [pod.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if workload has hostNetwork enabled\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tis_host_network(wl.spec.template.spec)\n\tpath := \"spec.template.spec.hostNetwork\"\n msga := {\n\t\"alertMessage\": sprintf(\"%v: %v has a pod connected to the host network\", [wl.kind, wl.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if cronjob has hostNetwork enabled\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tis_host_network(wl.spec.jobTemplate.spec.template.spec)\n\tpath := \"spec.jobTemplate.spec.template.spec.hostNetwork\"\n msga := {\n\t\"alertMessage\": sprintf(\"CronJob: %v has a pod connected to the host network\", [wl.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\":[],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nis_host_network(podspec) {\n podspec.hostNetwork == true\n}",
+ "resourceEnumerator": "",
+ "ruleLanguage": "Rego",
+ "match": [
+ {
+ "apiGroups": [
+ ""
+ ],
+ "apiVersions": [
+ "v1"
+ ],
+ "resources": [
+ "Pod"
+ ]
+ },
+ {
+ "apiGroups": [
+ "apps"
+ ],
+ "apiVersions": [
+ "v1"
+ ],
+ "resources": [
+ "Deployment",
+ "ReplicaSet",
+ "DaemonSet",
+ "StatefulSet"
+ ]
+ },
+ {
+ "apiGroups": [
+ "batch"
+ ],
+ "apiVersions": [
+ "*"
+ ],
+ "resources": [
+ "Job",
+ "CronJob"
+ ]
+ }
+ ],
+ "ruleDependencies": [],
+ "configInputs": null,
+ "controlConfigInputs": null,
+ "description": "fails if pod has hostNetwork enabled",
+ "remediation": "Make sure that the hostNetwork field of the pod spec is not set to true (set to false or not present)",
+ "ruleQuery": "armo_builtins",
+ "relevantCloudProviders": null
+ }
+ ],
+ "rulesIDs": [
+ ""
+ ],
+ "baseScore": 7
+ },
+ {
+ "guid": "",
+ "name": "Resource limits",
+ "attributes": {
+ "armoBuiltin": true,
+ "attackTracks": [
+ {
+ "attackTrack": "container",
+ "categories": [
+ "Impact - service destruction"
+ ]
+ }
+ ],
+ "controlTypeTags": [
+ "security"
+ ]
+ },
+ "id": "C-0009",
+ "controlID": "C-0009",
+ "creationTime": "",
+ "description": "CPU and memory resources should have a limit set for every container or a namespace to prevent resource exhaustion. This control identifies all the Pods without resource limit definitions by checking their yaml definition file as well as their namespace LimitRange objects. It is also recommended to use ResourceQuota object to restrict overall namespace resources, but this is not verified by this control.",
+ "remediation": "Define LimitRange and Resource Limits in the namespace or in the deployment/POD yamls.",
+ "rules": [
+ {
+ "guid": "",
+ "name": "resource-policies",
+ "attributes": {
+ "armoBuiltin": true
+ },
+ "creationTime": "",
+ "rule": "package armo_builtins\n\n\n# Check if container has limits\ndeny[msga] {\n \tpods := [pod | pod = input[_]; pod.kind == \"Pod\"]\n pod := pods[_]\n\tcontainer := pod.spec.containers[i]\n\t\n\t\n\tbeggining_of_path := \"spec.\"\n\tfixPath := is_no_cpu_and_memory_limits_defined(container, beggining_of_path, i)\n\t\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"there are no cpu and memory limits defined for container : %v\", [container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPath,\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n\n# Check if container has limits - for workloads\n# If there is no limits specified in the workload, we check the namespace, since if limits are only specified for namespace\n# and not in workload, it won't be on the yaml\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\t\n\tbeggining_of_path\t:= \"spec.template.spec.\"\n\tfixPath := is_no_cpu_and_memory_limits_defined(container, beggining_of_path, i)\n\t\n\t\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"there are no cpu and memory limits defined for container : %v\", [container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPath,\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n\t\n}\n\n# Check if container has limits - for cronjobs\n# If there is no limits specified in the cronjob, we check the namespace, since if limits are only specified for namespace\n# and not in cronjob, it won't be on the yaml\ndeny [msga] {\n wl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\t\n\tbeggining_of_path := \"spec.jobTemplate.spec.template.spec.\"\n\tfixPath := is_no_cpu_and_memory_limits_defined(container, beggining_of_path, i)\n\t\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"there are no cpu and memory limits defined for container : %v\", [container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"fixPaths\": fixPath,\n\t\t\"failedPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# no limits at all\nis_no_cpu_and_memory_limits_defined(container, beggining_of_path, i) = fixPath {\n\tnot container.resources.limits\n\tfixPath = [{\"path\": sprintf(\"%vcontainers[%v].resources.limits.cpu\", [beggining_of_path, format_int(i, 10)]), \"value\":\"YOUR_VALUE\"}, {\"path\": sprintf(\"%vcontainers[%v].resources.limits.memory\", [beggining_of_path, format_int(i, 10)]), \"value\":\"YOUR_VALUE\"}]\n}\n\n# only memory limit\nis_no_cpu_and_memory_limits_defined(container, beggining_of_path, i) = fixPath {\n\tcontainer.resources.limits\n\tnot container.resources.limits.cpu\n\tcontainer.resources.limits.memory\n\tfixPath = [{\"path\": sprintf(\"%vcontainers[%v].resources.limits.cpu\", [beggining_of_path, format_int(i, 10)]), \"value\":\"YOUR_VALUE\"}]\n}\n\n# only cpu limit\nis_no_cpu_and_memory_limits_defined(container, beggining_of_path, i) =fixPath {\n\tcontainer.resources.limits\n\tnot container.resources.limits.memory\n\tcontainer.resources.limits.cpu\n\tfixPath = [{\"path\": sprintf(\"%vcontainers[%v].resources.limits.memory\", [beggining_of_path, format_int(i, 10)]), \"value\":\"YOUR_VALUE\"}]\n\tfailed_path = \"\"\n}\n# limits but without capu and memory \nis_no_cpu_and_memory_limits_defined(container, beggining_of_path, i) = fixPath {\n\tcontainer.resources.limits\n\tnot container.resources.limits.memory\n\tnot container.resources.limits.cpu\n\tfixPath = [{\"path\": sprintf(\"%vcontainers[%v].resources.limits.cpu\", [beggining_of_path, format_int(i, 10)]), \"value\":\"YOUR_VALUE\"}, {\"path\": sprintf(\"%vcontainers[%v].resources.limits.memory\", [beggining_of_path, format_int(i, 10)]), \"value\":\"YOUR_VALUE\"}]\n}",
+ "resourceEnumerator": "",
+ "ruleLanguage": "Rego",
+ "match": [
+ {
+ "apiGroups": [
+ ""
+ ],
+ "apiVersions": [
+ "v1"
+ ],
+ "resources": [
+ "Pod"
+ ]
+ },
+ {
+ "apiGroups": [
+ "apps"
+ ],
+ "apiVersions": [
+ "v1"
+ ],
+ "resources": [
+ "Deployment",
+ "ReplicaSet",
+ "DaemonSet",
+ "StatefulSet"
+ ]
+ },
+ {
+ "apiGroups": [
+ "batch"
+ ],
+ "apiVersions": [
+ "*"
+ ],
+ "resources": [
+ "Job",
+ "CronJob"
+ ]
+ }
+ ],
+ "ruleDependencies": [],
+ "configInputs": null,
+ "controlConfigInputs": null,
+ "description": "fails if namespace has no resource policies defined",
+ "remediation": "Make sure that you definy resource policies (LimitRange or ResourceQuota) which limit the usage of resources for all the namespaces",
+ "ruleQuery": "armo_builtins",
+ "relevantCloudProviders": null
+ }
+ ],
+ "rulesIDs": [
+ ""
+ ],
+ "baseScore": 7
+ },
+ {
+ "guid": "",
+ "name": "Allow privilege escalation",
+ "attributes": {
+ "armoBuiltin": true,
+ "attackTracks": [
+ {
+ "attackTrack": "container",
+ "categories": [
+ "Privilege escalation"
+ ]
+ }
+ ],
+ "controlTypeTags": [
+ "security",
+ "compliance"
+ ]
+ },
+ "id": "C-0016",
+ "controlID": "C-0016",
+ "creationTime": "",
+ "description": "Attackers may gain access to a container and uplift its privilege to enable excessive capabilities.",
+ "remediation": "If your application does not need it, make sure the allowPrivilegeEscalation field of the securityContext is set to false.",
+ "rules": [
+ {
+ "guid": "",
+ "name": "rule-allow-privilege-escalation",
+ "attributes": {
+ "armoBuiltin": true
+ },
+ "creationTime": "",
+ "rule": "package armo_builtins\n\n\n# Fails if pod has container that allow privilege escalation\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tbeggining_of_path := \"spec.\"\n result := is_allow_privilege_escalation_container(container, i, beggining_of_path)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in pod: %v allow privilege escalation\", [container.name, pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n\n# Fails if workload has a container that allow privilege escalation\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.template.spec.\"\n result := is_allow_privilege_escalation_container(container, i, beggining_of_path)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"container :%v in %v: %v allow privilege escalation\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# Fails if cronjob has a container that allow privilege escalation\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.jobTemplate.spec.template.spec.\"\n\tresult := is_allow_privilege_escalation_container(container, i, beggining_of_path)\n\tfailed_path := get_failed_path(result)\n fixed_path := get_fixed_path(result)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"container :%v in %v: %v allow privilege escalation\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": failed_path,\n\t\t\"fixPaths\": fixed_path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n\nis_allow_privilege_escalation_container(container, i, beggining_of_path) = [failed_path, fixPath] {\n not container.securityContext.allowPrivilegeEscalation == false\n\tnot container.securityContext.allowPrivilegeEscalation == true\n\tpsps := [psp | psp= input[_]; psp.kind == \"PodSecurityPolicy\"]\n\tcount(psps) == 0\n\tfailed_path = \"\"\n\tfixPath = {\"path\": sprintf(\"%vcontainers[%v].securityContext.allowPrivilegeEscalation\", [beggining_of_path, format_int(i, 10)]), \"value\":\"false\"} \n}\n\nis_allow_privilege_escalation_container(container, i, beggining_of_path) = [failed_path, fixPath] {\n not container.securityContext.allowPrivilegeEscalation == false\n\tnot container.securityContext.allowPrivilegeEscalation == true\n\tpsps := [psp | psp= input[_]; psp.kind == \"PodSecurityPolicy\"]\n\tcount(psps) \u003e 0\n\tpsp := psps[_]\n\tnot psp.spec.allowPrivilegeEscalation == false\n\tfailed_path = \"\"\n\tfixPath = {\"path\": sprintf(\"%vcontainers[%v].securityContext.allowPrivilegeEscalation\", [beggining_of_path, format_int(i, 10)]), \"value\":\"false\"} \n}\n\n\nis_allow_privilege_escalation_container(container, i, beggining_of_path) = [failed_path, fixPath] {\n container.securityContext.allowPrivilegeEscalation == true\n\tpsps := [psp | psp= input[_]; psp.kind == \"PodSecurityPolicy\"]\n\tcount(psps) == 0\n\tfixPath = \"\"\n\tfailed_path = sprintf(\"%vcontainers[%v].securityContext.allowPrivilegeEscalation\", [beggining_of_path, format_int(i, 10)])\n}\n\nis_allow_privilege_escalation_container(container, i, beggining_of_path)= [failed_path, fixPath] {\n container.securityContext.allowPrivilegeEscalation == true\n\tpsps := [psp | psp= input[_]; psp.kind == \"PodSecurityPolicy\"]\n\tcount(psps) \u003e 0\n\tpsp := psps[_]\n\tnot psp.spec.allowPrivilegeEscalation == false\n\tfixPath = \"\"\n\tfailed_path = sprintf(\"%vcontainers[%v].securityContext.allowPrivilegeEscalation\", [beggining_of_path, format_int(i, 10)])\n}\n\n get_failed_path(paths) = [paths[0]] {\n\tpaths[0] != \"\"\n} else = []\n\n\nget_fixed_path(paths) = [paths[1]] {\n\tpaths[1] != \"\"\n} else = []\n\n",
+ "resourceEnumerator": "",
+ "ruleLanguage": "Rego",
+ "match": [
+ {
+ "apiGroups": [
+ ""
+ ],
+ "apiVersions": [
+ "v1"
+ ],
+ "resources": [
+ "Pod"
+ ]
+ },
+ {
+ "apiGroups": [
+ "apps"
+ ],
+ "apiVersions": [
+ "v1"
+ ],
+ "resources": [
+ "Deployment",
+ "ReplicaSet",
+ "DaemonSet",
+ "StatefulSet"
+ ]
+ },
+ {
+ "apiGroups": [
+ "batch"
+ ],
+ "apiVersions": [
+ "*"
+ ],
+ "resources": [
+ "Job",
+ "CronJob"
+ ]
+ },
+ {
+ "apiGroups": [
+ "policy"
+ ],
+ "apiVersions": [
+ "*"
+ ],
+ "resources": [
+ "PodSecurityPolicy"
+ ]
+ }
+ ],
+ "ruleDependencies": [],
+ "configInputs": null,
+ "controlConfigInputs": null,
+ "description": "fails if container allows privilege escalation",
+ "remediation": "Make sure that the allowPrivilegeEscalation field in the securityContext of pod/container is set to false",
+ "ruleQuery": "armo_builtins",
+ "relevantCloudProviders": null
+ }
+ ],
+ "rulesIDs": [
+ ""
+ ],
+ "baseScore": 6
+ },
+ {
+ "guid": "",
+ "name": "Applications credentials in configuration files",
+ "attributes": {
+ "armoBuiltin": true,
+ "attackTracks": [
+ {
+ "attackTrack": "kubeapi",
+ "categories": [
+ "Credential access"
+ ]
+ },
+ {
+ "attackTrack": "container",
+ "categories": [
+ "Credential access"
+ ]
+ }
+ ],
+ "controlTypeTags": [
+ "security",
+ "compliance",
+ "security-impact"
+ ],
+ "microsoftMitreColumns": [
+ "Credential access",
+ "Lateral Movement"
+ ]
+ },
+ "id": "C-0012",
+ "controlID": "C-0012",
+ "creationTime": "",
+ "description": "Attackers who have access to configuration files can steal the stored secrets and use them. This control checks if ConfigMaps or pod specifications have sensitive information in their configuration.",
+ "remediation": "Use Kubernetes secrets or Key Management Systems to store credentials.",
+ "rules": [
+ {
+ "guid": "",
+ "name": "rule-credentials-in-env-var",
+ "attributes": {
+ "armoBuiltin": true,
+ "m$K8sThreatMatrix": "Credential access::Applications credentials in configuration files, Lateral Movement::Applications credentials in configuration files"
+ },
+ "creationTime": "",
+ "rule": "\tpackage armo_builtins\n\t# import data.cautils as cautils\n\t# import data.kubernetes.api.client as client\n\timport data\n\n\tdeny[msga] {\n\t\tpod := input[_]\n\t\tpod.kind == \"Pod\"\n\t\t# see default-config-inputs.json for list values\n\t\tsensitive_key_names := data.postureControlInputs.sensitiveKeyNames\n\t\tkey_name := sensitive_key_names[_]\n\t\tcontainer := pod.spec.containers[i]\n\t\tenv := container.env[j]\n\n\t\tcontains(lower(env.name), key_name)\n\t\tenv.value != \"\"\n\t\t# check that value wasn't allowed by user\n\t\tnot is_allowed_value(env.value) \n\n\t\tis_not_reference(env)\n\n\t\tpath := sprintf(\"spec.containers[%v].env[%v].name\", [format_int(i, 10), format_int(j, 10)])\n\n\t\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"Pod: %v has sensitive information in environment variables\", [pod.metadata.name]),\n\t\t\t\"alertScore\": 9,\n\t\t\t\"fixPaths\": [],\n\t\t\t\"failedPaths\": [path],\n\t\t\t\"packagename\": \"armo_builtins\",\n\t\t\t\"alertObject\": {\n\t\t\t\t\"k8sApiObjects\": [pod]\n\t\t\t}\n\t\t}\n\t}\n\n\tdeny[msga] {\n\t\twl := input[_]\n\t\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\t\tspec_template_spec_patterns[wl.kind]\n\n\t\t# see default-config-inputs.json for list values\n\t\tsensitive_key_names := data.postureControlInputs.sensitiveKeyNames\n\t\tkey_name := sensitive_key_names[_]\n\t\tcontainer := wl.spec.template.spec.containers[i]\n\t\tenv := container.env[j]\n\n\t\tcontains(lower(env.name), key_name)\n\t\tenv.value != \"\"\n\t\t# check that value wasn't allowed by user\n\t\tnot is_allowed_value(env.value) \n\n\t\tis_not_reference(env)\n\n\t\tpath := sprintf(\"spec.template.spec.containers[%v].env[%v].name\", [format_int(i, 10), format_int(j, 10)])\t\n\n\t\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"%v: %v has sensitive information in environment variables\", [wl.kind, wl.metadata.name]),\n\t\t\t\"alertScore\": 9,\n\t\t\t\"fixPaths\": [],\n\t\t\t\"failedPaths\": [path],\n\t\t\t\"packagename\": \"armo_builtins\",\n\t\t\t\"alertObject\": {\n\t\t\t\t\"k8sApiObjects\": [wl]\n\t\t\t}\n\t\t}\n\t}\n\n\tdeny[msga] {\n\t\twl := input[_]\n\t\twl.kind == \"CronJob\"\n\t\t# see default-config-inputs.json for list values\n\t\tsensitive_key_names := data.postureControlInputs.sensitiveKeyNames\n\t\tkey_name := sensitive_key_names[_]\n\t\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\t\tenv := container.env[j]\n\n\t\tcontains(lower(env.name), key_name)\n\n\t\tenv.value != \"\"\n\t\t# check that value wasn't allowed by user\n\t\tnot is_allowed_value(env.value) \n\t\t\n\t\tis_not_reference(env)\n\t\t\n\t\tpath := sprintf(\"spec.jobTemplate.spec.template.spec.containers[%v].env[%v].name\", [format_int(i, 10), format_int(j, 10)])\n\n\t\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"Cronjob: %v has sensitive information in environment variables\", [wl.metadata.name]),\n\t\t\t\"alertScore\": 9,\n\t\t\t\"fixPaths\": [],\n\t\t\t\"failedPaths\": [path],\n\t\t\t\"packagename\": \"armo_builtins\",\n\t\t\t\"alertObject\": {\n\t\t\t\t\"k8sApiObjects\": [wl]\n\t\t\t}\n\t\t}\n\t}\n\n\n\nis_not_reference(env)\n{\n\tnot env.valueFrom.secretKeyRef\n\tnot env.valueFrom.configMapKeyRef\n}\n\nis_allowed_value(value) {\n allow_val := data.postureControlInputs.sensitiveValuesAllowed[_]\n value == allow_val\n}",
+ "resourceEnumerator": "",
+ "ruleLanguage": "Rego",
+ "match": [
+ {
+ "apiGroups": [
+ ""
+ ],
+ "apiVersions": [
+ "v1"
+ ],
+ "resources": [
+ "Pod"
+ ]
+ },
+ {
+ "apiGroups": [
+ "apps"
+ ],
+ "apiVersions": [
+ "v1"
+ ],
+ "resources": [
+ "Deployment",
+ "ReplicaSet",
+ "DaemonSet",
+ "StatefulSet"
+ ]
+ },
+ {
+ "apiGroups": [
+ "batch"
+ ],
+ "apiVersions": [
+ "*"
+ ],
+ "resources": [
+ "Job",
+ "CronJob"
+ ]
+ }
+ ],
+ "ruleDependencies": [],
+ "configInputs": [
+ "settings.postureControlInputs.sensitiveKeyNames",
+ "settings.postureControlInputs.sensitiveValuesAllowed"
+ ],
+ "controlConfigInputs": [
+ {
+ "path": "settings.postureControlInputs.sensitiveKeyNames",
+ "name": "Keys",
+ "description": "Secrets are stored as a key/value pair. The names of the keys/values may change from one company to the other. Here you can find some examples of popular key phrases that Kubescape is searching for"
+ },
+ {
+ "path": "settings.postureControlInputs.sensitiveValuesAllowed",
+ "name": "AllowedValues",
+ "description": "Allowed values"
+ }
+ ],
+ "description": "fails if Pods have sensitive information in configuration",
+ "remediation": "",
+ "ruleQuery": "armo_builtins",
+ "relevantCloudProviders": null
+ },
+ {
+ "guid": "",
+ "name": "rule-credentials-configmap",
+ "attributes": {
+ "armoBuiltin": true,
+ "m$K8sThreatMatrix": "Credential access::Applications credentials in configuration files, Lateral Movement::Applications credentials in configuration files"
+ },
+ "creationTime": "",
+ "rule": "package armo_builtins\n# import data.cautils as cautils\n# import data.kubernetes.api.client as client\nimport data\n\n# fails if config map has keys with suspicious name\ndeny[msga] {\n\tconfigmap := input[_]\n configmap.kind == \"ConfigMap\"\n # see default-config-inputs.json for list values\n sensitive_key_names := data.postureControlInputs.sensitiveKeyNames\n key_name := sensitive_key_names[_]\n map_secret := configmap.data[map_key]\n map_secret != \"\"\n \n contains(lower(map_key), lower(key_name))\n # check that value wasn't allowed by user\n not is_allowed_value(map_secret)\n \n path := sprintf(\"data[%v]\", [map_key])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"this configmap has sensitive information: %v\", [configmap.metadata.name]),\n\t\t\"alertScore\": 9,\n \"failedPaths\": [path],\n \"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [configmap]\n\t\t}\n }\n}\n\n# fails if config map has values with suspicious content - not base 64\ndeny[msga] {\n # see default-config-inputs.json for list values\n sensitive_values := data.postureControlInputs.sensitiveValues\n value := sensitive_values[_]\n\n\tconfigmap := input[_]\n configmap.kind == \"ConfigMap\"\n map_secret := configmap.data[map_key]\n map_secret != \"\"\n\n regex.match(value , map_secret)\n # check that value wasn't allowed by user\n not is_allowed_value(map_secret)\n\n path := sprintf(\"data[%v]\", [map_key])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"this configmap has sensitive information: %v\", [configmap.metadata.name]),\n\t\t\"alertScore\": 9,\n \"failedPaths\": [path],\n \"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [configmap]\n\t\t}\n }\n}\n\n# fails if config map has values with suspicious content - base 64\ndeny[msga] {\n # see default-config-inputs.json for list values\n sensitive_values := data.postureControlInputs.sensitiveValues\n value := sensitive_values[_]\n\n\tconfigmap := input[_]\n configmap.kind == \"ConfigMap\"\n map_secret := configmap.data[map_key]\n map_secret != \"\"\n\n decoded_secret := base64.decode(map_secret)\n \n # check that value wasn't allowed by user\n not is_allowed_value(map_secret)\n\n regex.match(value , decoded_secret)\n\n path := sprintf(\"data[%v]\", [map_key])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"this configmap has sensitive information: %v\", [configmap.metadata.name]),\n\t\t\"alertScore\": 9,\n \"failedPaths\": [path],\n \"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [configmap]\n\t\t}\n }\n}\n\n\nis_allowed_value(value) {\n allow_val := data.postureControlInputs.sensitiveValuesAllowed[_]\n value == allow_val\n}",
+ "resourceEnumerator": "",
+ "ruleLanguage": "Rego",
+ "match": [
+ {
+ "apiGroups": [
+ "*"
+ ],
+ "apiVersions": [
+ "*"
+ ],
+ "resources": [
+ "ConfigMap"
+ ]
+ }
+ ],
+ "ruleDependencies": [],
+ "configInputs": [
+ "settings.postureControlInputs.sensitiveValues",
+ "settings.postureControlInputs.sensitiveKeyNames",
+ "settings.postureControlInputs.sensitiveValuesAllowed"
+ ],
+ "controlConfigInputs": [
+ {
+ "path": "settings.postureControlInputs.sensitiveValues",
+ "name": "Values",
+ "description": "Secrets are stored as a key/value pair. The names of the keys/values may change from one company to the other. Below you can find some examples of popular value phrases that Kubescape is searching for"
+ },
+ {
+ "path": "settings.postureControlInputs.sensitiveKeyNames",
+ "name": "Keys",
+ "description": "Secrets are stored as a key/value pair. The names of the keys/values may change from one company to the other. Here you can find some examples of popular key phrases that Kubescape is searching for"
+ },
+ {
+ "path": "settings.postureControlInputs.sensitiveValuesAllowed",
+ "name": "AllowedValues",
+ "description": "Allowed values"
+ }
+ ],
+ "description": "fails if ConfigMaps have sensitive information in configuration",
+ "remediation": "",
+ "ruleQuery": "armo_builtins",
+ "relevantCloudProviders": null
+ }
+ ],
+ "rulesIDs": [
+ "",
+ ""
+ ],
+ "baseScore": 8
+ },
+ {
+ "guid": "",
+ "name": "Cluster-admin binding",
+ "attributes": {
+ "armoBuiltin": true,
+ "attackTracks": [
+ {
+ "attackTrack": "kubeapi",
+ "categories": [
+ "Impact - data destruction",
+ "Impact - service injection"
+ ]
+ }
+ ],
+ "controlTypeTags": [
+ "security",
+ "compliance"
+ ],
+ "microsoftMitreColumns": [
+ "Privilege escalation"
+ ],
+ "rbacQuery": "Show cluster_admin"
+ },
+ "id": "C-0035",
+ "controlID": "C-0035",
+ "creationTime": "",
+ "description": "Attackers who have cluster admin permissions (can perform any action on any resource), can take advantage of their privileges for malicious activities. This control determines which subjects have cluster admin permissions.",
+ "remediation": "You should apply least privilege principle. Make sure cluster admin permissions are granted only when it is absolutely necessary. Don't use subjects with such high permissions for daily operations.",
+ "rules": [
+ {
+ "guid": "",
+ "name": "rule-list-all-cluster-admins",
+ "attributes": {
+ "armoBuiltin": true,
+ "m$K8sThreatMatrix": "Privilege Escalation::Cluster-admin binding",
+ "useUntilKubescapeVersion": "v1.0.133"
+ },
+ "creationTime": "",
+ "rule": "package armo_builtins\nimport data.cautils as cautils\n\n# input: roles\n# apiversion: v1\n# does: returns roles+ related subjects in rolebinding\n\ndeny[msga] {\n\troles := [role | role= input[_]; role.kind == \"Role\"]\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n role:= roles[_]\n rolebinding := rolebindings[_]\n\n rule:= role.rules[i]\n\tcanCreate(rule, i)\n\tcanCreateResources(rule, i)\n\n\trolebinding.roleRef.kind == \"Role\"\n\trolebinding.roleRef.name == role.metadata.name\n subject := rolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"The following %v: %v have high privileges, such as cluster-admin\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 9,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role,rolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n }\n}\n\n# input: ClusterRole\n# apiversion: v1\n# does: returns clusterroles+ related subjects in rolebinding\n\ndeny[msga] {\n roles := [role | role= input[_]; role.kind == \"ClusterRole\"]\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n role:= roles[_]\n rolebinding := rolebindings[_]\n\n rule:= role.rules[i]\n\tcanCreate(rule, i)\n\tcanCreateResources(rule, i)\n\n\trolebinding.roleRef.kind == \"ClusterRole\"\n\trolebinding.roleRef.name == role.metadata.name\n \n subject := rolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"The following %v: %v have high privileges, such as cluster-admin\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 9,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n \"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role,rolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n }\n}\n\n# input: ClusterRole\n# apiversion: v1\n# does:\treturns clusterroles+ related subjects in clusterrolebinding\n\ndeny[msga] {\n roles := [role | role= input[_]; role.kind == \"ClusterRole\"]\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"ClusterRoleBinding\"]\n role:= roles[_]\n rolebinding := rolebindings[_]\n\n rule:= role.rules[i]\n\tcanCreate(rule, i)\n canCreateResources(rule, i)\n\n\trolebinding.roleRef.kind == \"ClusterRole\"\n\trolebinding.roleRef.name == role.metadata.name\n\t\n subject := rolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"The following %v: %v have high privileges, such as cluster-admin\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 9,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role,rolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n\t}\n}\n\n\ncanCreate(rule, i) {\n\tverb := rule.verbs[j]\n\tverb == \"*\"\n}\n\ncanCreateResources(rule, i){\n\tis_api_group(rule)\n\tresource := rule.resources[j]\n\tresource == \"*\"\n}\n\nis_api_group(rule) {\n\tapiGroup := rule.apiGroups[_]\n\tapiGroup == \"\"\n}\n\nis_api_group(rule) {\n\tapiGroup := rule.apiGroups[_]\n\tapiGroup == \"*\"\n}\n",
+ "resourceEnumerator": "",
+ "ruleLanguage": "Rego",
+ "match": [
+ {
+ "apiGroups": [
+ "*"
+ ],
+ "apiVersions": [
+ "*"
+ ],
+ "resources": [
+ "Role",
+ "ClusterRole",
+ "ClusterRoleBinding",
+ "RoleBinding"
+ ]
+ }
+ ],
+ "ruleDependencies": [
+ {
+ "packageName": "cautils"
+ }
+ ],
+ "configInputs": null,
+ "controlConfigInputs": null,
+ "description": "determines which users have cluster admin permissions",
+ "remediation": "",
+ "ruleQuery": "armo_builtins",
+ "relevantCloudProviders": null
+ },
+ {
+ "guid": "",
+ "name": "rule-list-all-cluster-admins-v1",
+ "attributes": {
+ "armoBuiltin": true,
+ "m$K8sThreatMatrix": "Privilege Escalation::Cluster-admin binding",
+ "resourcesAggregator": "subject-role-rolebinding",
+ "useFromKubescapeVersion": "v1.0.133"
+ },
+ "creationTime": "",
+ "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# returns subjects with cluster admin permissions\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\nis_same_subjects(subjectVector, subject)\n\trule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) \u003e 0\n\n\tapi_groups := [\"*\", \"\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) \u003e 0\n\n\tresources := [\"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) \u003e 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s have high privileges, such as cluster-admin\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 3,\n\t\t\"fixPaths\": [],\n\t\t\"failedPaths\": finalpath,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n",
+ "resourceEnumerator": "",
+ "ruleLanguage": "Rego",
+ "match": [
+ {
+ "apiGroups": [
+ "*"
+ ],
+ "apiVersions": [
+ "*"
+ ],
+ "resources": [
+ "Role",
+ "ClusterRole",
+ "ClusterRoleBinding",
+ "RoleBinding"
+ ]
+ }
+ ],
+ "ruleDependencies": [],
+ "configInputs": null,
+ "controlConfigInputs": null,
+ "description": "determines which users have cluster admin permissions",
+ "remediation": "",
+ "ruleQuery": "armo_builtins",
+ "relevantCloudProviders": null
+ }
+ ],
+ "rulesIDs": [
+ "",
+ ""
+ ],
+ "baseScore": 6
+ },
+ {
+ "guid": "",
+ "name": "Exec into container",
+ "attributes": {
+ "armoBuiltin": true,
+ "controlTypeTags": [
+ "compliance",
+ "security-impact"
+ ],
+ "microsoftMitreColumns": [
+ "Execution"
+ ],
+ "rbacQuery": "Show who can access into pods"
+ },
+ "id": "C-0002",
+ "controlID": "C-0002",
+ "creationTime": "",
+ "description": "Attackers with relevant permissions can run malicious commands in the context of legitimate containers in the cluster using “kubectl exec” command. This control determines which subjects have permissions to use this command.",
+ "remediation": "It is recommended to prohibit “kubectl exec” command in production environments. It is also recommended not to use subjects with this permission for daily cluster operations.",
+ "rules": [
+ {
+ "guid": "",
+ "name": "exec-into-container",
+ "attributes": {
+ "armoBuiltin": true,
+ "m$K8sThreatMatrix": "Privilege Escalation::Exec into container",
+ "useUntilKubescapeVersion": "v1.0.133"
+ },
+ "creationTime": "",
+ "rule": "\npackage armo_builtins\nimport data.cautils as cautils\n\n# input: clusterrolebindings + rolebindings\n# apiversion: rbac.authorization.k8s.io/v1 \n# returns subjects that can exec into container\n\ndeny[msga] {\n\t roles := [role | role= input[_]; role.kind == \"Role\"]\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n role:= roles[_]\n rolebinding := rolebindings[_]\n\n rule:= role.rules[_]\n\tcan_exec_to_pod_resource(rule)\n\tcan_exec_to_pod_verb(rule)\n\n\trolebinding.roleRef.kind == \"Role\"\n\trolebinding.roleRef.name == role.metadata.name\n\t\n \tsubject := rolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"the following %v: %v, can exec into containers\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 9,\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role, rolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n\t}\n}\n\n\n# input: clusterrolebindings + rolebindings\n# apiversion: rbac.authorization.k8s.io/v1 \n# returns subjects that can exec into container\n\ndeny[msga] {\n roles := [role | role= input[_]; role.kind == \"ClusterRole\"]\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"RoleBinding\"]\n role:= roles[_]\n rolebinding := rolebindings[_]\n\n rule:= role.rules[_]\n\tcan_exec_to_pod_resource(rule)\n\tcan_exec_to_pod_verb(rule)\n\n\trolebinding.roleRef.kind == \"ClusterRole\"\n\trolebinding.roleRef.name == role.metadata.name\n\t\n subject := rolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"the following %v: %v, can exec into containers\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 9,\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role, rolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n\t}\n}\n\n# input: clusterrolebindings + rolebindings\n# apiversion: rbac.authorization.k8s.io/v1 \n# returns subjects that can exec into container\n\ndeny[msga] {\n roles := [role | role= input[_]; role.kind == \"ClusterRole\"]\n rolebindings := [rolebinding | rolebinding = input[_]; rolebinding.kind == \"ClusterRoleBinding\"]\n role:= roles[_]\n rolebinding := rolebindings[_]\n\n rule:= role.rules[_]\n\tcan_exec_to_pod_resource(rule)\n\tcan_exec_to_pod_verb(rule)\n\n\trolebinding.roleRef.kind == \"ClusterRole\"\n\trolebinding.roleRef.name == role.metadata.name\n\t\n subject := rolebinding.subjects[i]\n path := sprintf(\"subjects[%v]\", [format_int(i, 10)])\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"the following %v: %v, can exec into containers\", [subject.kind, subject.name]),\n\t\t\"alertScore\": 9,\n\t\t\"failedPaths\": [path],\n\t\t\"packagename\": \"armo_builtins\",\n \t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [role, rolebinding],\n\t\t\t\"externalObjects\": {\n\t\t\t\t\"subject\" : [subject]\n\t\t\t}\n\t\t}\n\t}\n}\n\ncan_exec_to_pod_verb(rule) {\n\tcautils.list_contains(rule.verbs, \"create\")\n}\ncan_exec_to_pod_verb(rule) {\n\tcautils.list_contains(rule.verbs, \"*\")\n}\n\ncan_exec_to_pod_resource(rule) {\n\tcautils.list_contains(rule.resources, \"pods/exec\")\n\t\n}\ncan_exec_to_pod_resource(rule) {\n\tcautils.list_contains(rule.resources, \"pods/*\")\n}\ncan_exec_to_pod_resource(rule) {\n\tis_api_group(rule)\n\tcautils.list_contains(rule.resources, \"*\")\n}\n\nis_api_group(rule) {\n\tapiGroup := rule.apiGroups[_]\n\tapiGroup == \"\"\n}\n\nis_api_group(rule) {\n\tapiGroup := rule.apiGroups[_]\n\tapiGroup == \"*\"\n}",
+ "resourceEnumerator": "",
+ "ruleLanguage": "Rego",
+ "match": [
+ {
+ "apiGroups": [
+ "rbac.authorization.k8s.io"
+ ],
+ "apiVersions": [
+ "v1"
+ ],
+ "resources": [
+ "RoleBinding",
+ "ClusterRoleBinding",
+ "Role",
+ "ClusterRole"
+ ]
+ }
+ ],
+ "ruleDependencies": [
+ {
+ "packageName": "cautils"
+ }
+ ],
+ "configInputs": null,
+ "controlConfigInputs": null,
+ "description": "determines which users have permissions to exec into pods",
+ "remediation": "",
+ "ruleQuery": "armo_builtins",
+ "relevantCloudProviders": null
+ },
+ {
+ "guid": "",
+ "name": "exec-into-container-v1",
+ "attributes": {
+ "armoBuiltin": true,
+ "m$K8sThreatMatrix": "Privilege Escalation::Exec into container",
+ "resourcesAggregator": "subject-role-rolebinding",
+ "useFromKubescapeVersion": "v1.0.133"
+ },
+ "creationTime": "",
+ "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# input: regoResponseVectorObject\n# returns subjects that can exec into container\n\ndeny[msga] {\n\tsubjectVector := input[_]\n\trole := subjectVector.relatedObjects[i]\n\trolebinding := subjectVector.relatedObjects[j]\n\tendswith(role.kind, \"Role\")\n\tendswith(rolebinding.kind, \"Binding\")\n\n\trule := role.rules[p]\n\n\tsubject := rolebinding.subjects[k]\n\tis_same_subjects(subjectVector, subject)\n\n\trule_path := sprintf(\"relatedObjects[%d].rules[%d]\", [i, p])\n\n\tverbs := [\"create\", \"*\"]\n\tverb_path := [sprintf(\"%s.verbs[%d]\", [rule_path, l]) | verb = rule.verbs[l]; verb in verbs]\n\tcount(verb_path) \u003e 0\n\n\tapi_groups := [\"\", \"*\"]\n\tapi_groups_path := [sprintf(\"%s.apiGroups[%d]\", [rule_path, a]) | apiGroup = rule.apiGroups[a]; apiGroup in api_groups]\n\tcount(api_groups_path) \u003e 0\n\n\tresources := [\"pods/exec\", \"pods/*\", \"*\"]\n\tresources_path := [sprintf(\"%s.resources[%d]\", [rule_path, l]) | resource = rule.resources[l]; resource in resources]\n\tcount(resources_path) \u003e 0\n\n\tpath := array.concat(resources_path, verb_path)\n\tpath2 := array.concat(path, api_groups_path)\n\tfinalpath := array.concat(path2, [\n\t\tsprintf(\"relatedObjects[%d].subjects[%d]\", [j, k]),\n\t\tsprintf(\"relatedObjects[%d].roleRef.name\", [j]),\n\t])\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Subject: %s-%s can exec into containers\", [subjectVector.kind, subjectVector.name]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": finalpath,\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": subjectVector,\n\t\t},\n\t}\n}\n\n# for service accounts\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.namespace == subject.namespace\n}\n\n# for users/ groups\nis_same_subjects(subjectVector, subject) {\n\tsubjectVector.kind == subject.kind\n\tsubjectVector.name == subject.name\n\tsubjectVector.apiGroup == subject.apiGroup\n}\n",
+ "resourceEnumerator": "",
+ "ruleLanguage": "Rego",
+ "match": [
+ {
+ "apiGroups": [
+ "rbac.authorization.k8s.io"
+ ],
+ "apiVersions": [
+ "v1"
+ ],
+ "resources": [
+ "RoleBinding",
+ "ClusterRoleBinding",
+ "Role",
+ "ClusterRole"
+ ]
+ }
+ ],
+ "ruleDependencies": [],
+ "configInputs": null,
+ "controlConfigInputs": null,
+ "description": "determines which users have permissions to exec into pods",
+ "remediation": "",
+ "ruleQuery": "armo_builtins",
+ "relevantCloudProviders": null
+ }
+ ],
+ "rulesIDs": [
+ "",
+ ""
+ ],
+ "baseScore": 5
+ },
+ {
+ "guid": "",
+ "name": "Insecure capabilities",
+ "attributes": {
+ "armoBuiltin": true,
+ "attackTracks": [
+ {
+ "attackTrack": "container",
+ "categories": [
+ "Privilege escalation"
+ ]
+ }
+ ],
+ "controlTypeTags": [
+ "security",
+ "compliance"
+ ]
+ },
+ "id": "C-0046",
+ "controlID": "C-0046",
+ "creationTime": "",
+ "description": "Giving insecure or excessive capabilities to a container can increase the impact of the container compromise. This control identifies all the PODs with dangerous capabilities (see documentation pages for details).",
+ "remediation": "Remove all insecure capabilities which are not necessary for the container.",
+ "rules": [
+ {
+ "guid": "",
+ "name": "insecure-capabilities",
+ "attributes": {
+ "armoBuiltin": true
+ },
+ "creationTime": "",
+ "rule": "package armo_builtins\nimport data\nimport data.cautils as cautils\n\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tbeggining_of_path := \"spec.\"\n result := is_dangerous_capabilities(container, beggining_of_path, i)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in pod: %v have dangerous capabilities\", [container.name, pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": result,\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainer := wl.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.template.spec.\"\n result := is_dangerous_capabilities(container, beggining_of_path, i)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in workload: %v have dangerous capabilities\", [container.name, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": result,\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\ndeny[msga] {\n wl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer := wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.jobTemplate.spec.template.spec.\"\n result := is_dangerous_capabilities(container, beggining_of_path, i)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in cronjob: %v have dangerous capabilities\", [container.name, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": result,\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nis_dangerous_capabilities(container, beggining_of_path, i) = path {\n\t# see default-config-inputs.json for list values\n insecureCapabilities := data.postureControlInputs.insecureCapabilities\n\tpath = [sprintf(\"%vcontainers[%v].securityContext.capabilities.add[%v]\", [beggining_of_path, format_int(i, 10), format_int(k, 10)]) | capability = container.securityContext.capabilities.add[k]; cautils.list_contains(insecureCapabilities, capability)]\n\tcount(path) \u003e 0\n}",
+ "resourceEnumerator": "",
+ "ruleLanguage": "Rego",
+ "match": [
+ {
+ "apiGroups": [
+ ""
+ ],
+ "apiVersions": [
+ "v1"
+ ],
+ "resources": [
+ "Pod"
+ ]
+ },
+ {
+ "apiGroups": [
+ "apps"
+ ],
+ "apiVersions": [
+ "v1"
+ ],
+ "resources": [
+ "Deployment",
+ "ReplicaSet",
+ "DaemonSet",
+ "StatefulSet"
+ ]
+ },
+ {
+ "apiGroups": [
+ "batch"
+ ],
+ "apiVersions": [
+ "*"
+ ],
+ "resources": [
+ "Job",
+ "CronJob"
+ ]
+ }
+ ],
+ "ruleDependencies": [],
+ "configInputs": [
+ "settings.postureControlInputs.insecureCapabilities"
+ ],
+ "controlConfigInputs": [
+ {
+ "path": "settings.postureControlInputs.insecureCapabilities",
+ "name": "Insecure capabilities",
+ "description": "You can see the list of capabilities in https://man7.org/linux/man-pages/man7/capabilities.7.html. Kubescape looks for the following capabilities in containers which might lead to attackers getting high privileges in your system."
+ }
+ ],
+ "description": "fails if container has insecure capabilities",
+ "remediation": "Remove all insecure capabilities which aren’t necessary for the container.",
+ "ruleQuery": "armo_builtins",
+ "relevantCloudProviders": null
+ }
+ ],
+ "rulesIDs": [
+ ""
+ ],
+ "baseScore": 7
+ },
+ {
+ "guid": "",
+ "name": "Linux hardening",
+ "attributes": {
+ "armoBuiltin": true,
+ "attackTracks": [
+ {
+ "attackTrack": "container",
+ "categories": [
+ "Privilege escalation"
+ ]
+ }
+ ],
+ "controlTypeTags": [
+ "security",
+ "compliance"
+ ]
+ },
+ "id": "C-0055",
+ "controlID": "C-0055",
+ "creationTime": "",
+ "description": "Containers may be given more privileges than they actually need. This can increase the potential impact of a container compromise.",
+ "remediation": "You can use AppArmor, Seccomp, SELinux and Linux Capabilities mechanisms to restrict containers abilities to utilize unwanted privileges.",
+ "rules": [
+ {
+ "guid": "",
+ "name": "linux-hardening",
+ "attributes": {
+ "armoBuiltin": true
+ },
+ "creationTime": "",
+ "rule": "package armo_builtins\n\nimport future.keywords.in\n\n# Fails if pod does not define linux security hardening \ndeny[msga] {\n\tobj := input[_]\n\tfix_paths := is_unsafe_obj(obj)\n\tcount(fix_paths) \u003e 0\n\n\t# final_fix_pathes := array.concat(fix_paths) # -\u003e produce only one failed result\n\tfinal_fix_pathes := fix_paths[_] # -\u003e produce failed result for each container\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%s: %s does not define any linux security hardening\", [obj.kind, obj.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": final_fix_pathes,\n\t\t\"alertObject\": {\"k8sApiObjects\": [obj]},\n\t}\n}\n\nis_unsafe_obj(obj) := fix_paths {\n\tobj.kind == \"Pod\"\n\tfix_paths := are_unsafe_specs(obj, [\"spec\"], [\"metadata\", \"annotations\"])\n} else := fix_paths {\n\tobj.kind == \"CronJob\"\n\tfix_paths := are_unsafe_specs(obj, [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"spec\"], [\"spec\", \"jobTemplate\", \"spec\", \"template\", \"metadata\", \"annotations\"])\n} else := fix_paths {\n\tobj.kind in [\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Job\"]\n\tfix_paths := are_unsafe_specs(obj, [\"spec\", \"template\", \"spec\"], [\"spec\", \"template\", \"metadata\", \"annotations\"])\n}\n\nare_unsafe_specs(obj, specs_path, anotation_path) := paths {\n\t# spec\n\tspecs := object.get(obj, specs_path, null)\n\tspecs != null\n\tare_seccomp_and_selinux_disabled(specs)\n\n\t# annotation\n\tannotations := object.get(obj, anotation_path, [])\n\tapp_armor_annotations := [annotations[i] | annotation = i; startswith(i, \"container.apparmor.security.beta.kubernetes.io\")]\n\tcount(app_armor_annotations) == 0\n\n\t# container\n\tcontainers_path := array.concat(specs_path, [\"containers\"])\n\tcontainers := object.get(obj, containers_path, [])\n\n\t# Psuedo code explanation:\n\t# for i, container in containers\n\t# \t\tif is_unsafe_container:\n\t# \t\t\tfix_paths += [(containers_path[i] + field) for j, field in fix_fields]\n\t# \n\t# At the end we get [[\u003ccontainer1_path1\u003e, \u003ccontainer1_path2\u003e, ...], ...]\n\tcontainers_fix_path := concat(\".\", containers_path)\n\tfix_fields := [\"seccompProfile\", \"seLinuxOptions\", \"capabilities.drop[0]\"]\n\tpaths := [[{\n\t\t\"path\": sprintf(\"%s[%d].securityContext.%s\", [containers_fix_path, i, field]),\n\t\t\"value\": \"YOUR_VALUE\",\n\t} |\n\t\tfield := fix_fields[j]\n\t] |\n\t\tcontainer = containers[i]\n\t\tis_unsafe_container(container)\n\t]\n\n\tcount(paths) \u003e 0\n}\n\nare_seccomp_and_selinux_disabled(obj) {\n\tnot obj.securityContext.seccompProfile\n\tnot obj.securityContext.seLinuxOptions\n}\n\nis_unsafe_container(container) {\n\tare_seccomp_and_selinux_disabled(container)\n\tnot container.securityContext.capabilities.drop\n}\n",
+ "resourceEnumerator": "",
+ "ruleLanguage": "Rego",
+ "match": [
+ {
+ "apiGroups": [
+ ""
+ ],
+ "apiVersions": [
+ "v1"
+ ],
+ "resources": [
+ "Pod"
+ ]
+ },
+ {
+ "apiGroups": [
+ "apps"
+ ],
+ "apiVersions": [
+ "v1"
+ ],
+ "resources": [
+ "Deployment",
+ "ReplicaSet",
+ "DaemonSet",
+ "StatefulSet"
+ ]
+ },
+ {
+ "apiGroups": [
+ "batch"
+ ],
+ "apiVersions": [
+ "*"
+ ],
+ "resources": [
+ "Job",
+ "CronJob"
+ ]
+ }
+ ],
+ "ruleDependencies": [],
+ "configInputs": null,
+ "controlConfigInputs": null,
+ "description": "fails if container does not define any linux security hardening",
+ "remediation": "Make sure you define at least one linux security hardening property out of Seccomp, SELinux or Capabilities.",
+ "ruleQuery": "armo_builtins",
+ "relevantCloudProviders": null
+ }
+ ],
+ "rulesIDs": [
+ ""
+ ],
+ "baseScore": 4
+ },
+ {
+ "guid": "",
+ "name": "Ingress and Egress blocked",
+ "attributes": {
+ "armoBuiltin": true,
+ "controlTypeTags": [
+ "compliance"
+ ]
+ },
+ "id": "C-0030",
+ "controlID": "C-0030",
+ "creationTime": "",
+ "description": "Disable Ingress and Egress traffic on all pods wherever possible. It is recommended to define restrictive network policy on all new PODs, and then enable sources/destinations that this POD must communicate with.",
+ "remediation": "Define a network policy that restricts ingress and egress connections.",
+ "rules": [
+ {
+ "guid": "",
+ "name": "ingress-and-egress-blocked",
+ "attributes": {
+ "armoBuiltin": true
+ },
+ "creationTime": "",
+ "rule": "package armo_builtins\n\n\n# For pods\ndeny[msga] {\n \t\tpods := [pod | pod= input[_]; pod.kind == \"Pod\"]\n\t\tnetworkpolicies := [networkpolicie | networkpolicie= input[_]; networkpolicie.kind == \"NetworkPolicy\"]\n\t\tpod := pods[_]\n\t\tnetwork_policies_connected_to_pod := [networkpolicie | networkpolicie= networkpolicies[_]; pod_connected_to_network_policy(pod, networkpolicie)]\n\t\tcount(network_policies_connected_to_pod) \u003e 0\n goodPolicies := [goodpolicie | goodpolicie= network_policies_connected_to_pod[_]; is_ingerss_egress_policy(goodpolicie)]\n\t\tcount(goodPolicies) \u003c 1\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v does not have ingress/egress defined\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n\n}\n\n# For pods\ndeny[msga] {\n \t\tpods := [pod | pod= input[_]; pod.kind == \"Pod\"]\n\t\tnetworkpolicies := [networkpolicie | networkpolicie= input[_]; networkpolicie.kind == \"NetworkPolicy\"]\n\t\tpod := pods[_]\n\t\tnetwork_policies_connected_to_pod := [networkpolicie | networkpolicie= networkpolicies[_]; pod_connected_to_network_policy(pod, networkpolicie)]\n\t\tcount(network_policies_connected_to_pod) \u003c 1\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"Pod: %v does not have ingress/egress defined\", [pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n\n}\n\n# For workloads\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n networkpolicies := [networkpolicie | networkpolicie= input[_]; networkpolicie.kind == \"NetworkPolicy\"]\n\tnetwork_policies_connected_to_pod := [networkpolicie | networkpolicie= networkpolicies[_]; wlConnectedToNetworkPolicy(wl, networkpolicie)]\n\tcount(network_policies_connected_to_pod) \u003e 0\n goodPolicies := [goodpolicie | goodpolicie= network_policies_connected_to_pod[_]; is_ingerss_egress_policy(goodpolicie)]\n\tcount(goodPolicies) \u003c 1\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has Pods which don't have ingress/egress defined\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# For workloads\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n networkpolicies := [networkpolicie | networkpolicie= input[_]; networkpolicie.kind == \"NetworkPolicy\"]\n\tnetwork_policies_connected_to_pod := [networkpolicie | networkpolicie= networkpolicies[_]; wlConnectedToNetworkPolicy(wl, networkpolicie)]\n\tcount(network_policies_connected_to_pod) \u003c 1\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has Pods which don't have ingress/egress defined\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# For Cronjobs\ndeny[msga] {\n wl := input[_]\n\twl.kind == \"CronJob\"\n networkpolicies := [networkpolicie | networkpolicie= input[_]; networkpolicie.kind == \"NetworkPolicy\"]\n\tnetwork_policies_connected_to_pod := [networkpolicie | networkpolicie= networkpolicies[_]; cronjob_connected_to_network_policy(wl, networkpolicie)]\n\tcount(network_policies_connected_to_pod) \u003e 0\n goodPolicies := [goodpolicie | goodpolicie= network_policies_connected_to_pod[_]; is_ingerss_egress_policy(goodpolicie)]\n\tcount(goodPolicies) \u003c 1\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has Pods which don't have ingress/egress defined\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# For Cronjobs\ndeny[msga] {\n wl := input[_]\n\twl.kind == \"CronJob\"\n networkpolicies := [networkpolicie | networkpolicie= input[_]; networkpolicie.kind == \"NetworkPolicy\"]\n\tnetwork_policies_connected_to_pod := [networkpolicie | networkpolicie= networkpolicies[_]; cronjob_connected_to_network_policy(wl, networkpolicie)]\n\tcount(network_policies_connected_to_pod) \u003c 1\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has Pods which don't have ingress/egress defined\", [wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tmetadata1.namespace == metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tnot metadata2.namespace\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata2.namespace\n\tmetadata1.namespace == \"default\"\n}\n\nis_same_namespace(metadata1, metadata2) {\n\tnot metadata1.namespace\n\tmetadata2.namespace == \"default\"\n}\n\npod_connected_to_network_policy(pod, networkpolicie){\n\tis_same_namespace(networkpolicie.metadata, pod.metadata)\n count(networkpolicie.spec.podSelector) \u003e 0\n count({x | networkpolicie.spec.podSelector.matchLabels[x] == pod.metadata.labels[x]}) == count(networkpolicie.spec.podSelector.matchLabels)\n}\n\npod_connected_to_network_policy(pod, networkpolicie){\n\tis_same_namespace(networkpolicie.metadata ,pod.metadata)\n count(networkpolicie.spec.podSelector) == 0\n}\n\nwlConnectedToNetworkPolicy(wl, networkpolicie){\n\tis_same_namespace(wl.metadata , networkpolicie.metadata)\n count(networkpolicie.spec.podSelector) == 0\n}\n\n\nwlConnectedToNetworkPolicy(wl, networkpolicie){\n\tis_same_namespace(wl.metadata, networkpolicie.metadata)\n\tcount(networkpolicie.spec.podSelector) \u003e 0\n count({x | networkpolicie.spec.podSelector.matchLabels[x] == wl.spec.template.metadata.labels[x]}) == count(networkpolicie.spec.podSelector.matchLabels)\n}\n\n\ncronjob_connected_to_network_policy(cj, networkpolicie){\n\tis_same_namespace(cj.metadata , networkpolicie.metadata)\n count(networkpolicie.spec.podSelector) == 0\n}\n\ncronjob_connected_to_network_policy(cj, networkpolicie){\n\tis_same_namespace(cj.metadata , networkpolicie.metadata)\n\tcount(networkpolicie.spec.podSelector) \u003e 0\n count({x | networkpolicie.spec.podSelector.matchLabels[x] == cj.spec.jobTemplate.spec.template.metadata.labels[x]}) == count(networkpolicie.spec.podSelector.matchLabels)\n}\n\nis_ingerss_egress_policy(networkpolicie) {\n list_contains(networkpolicie.spec.policyTypes, \"Ingress\")\n list_contains(networkpolicie.spec.policyTypes, \"Egress\")\n }\n\nlist_contains(list, element) {\n some i\n list[i] == element\n}",
+ "resourceEnumerator": "",
+ "ruleLanguage": "Rego",
+ "match": [
+ {
+ "apiGroups": [
+ ""
+ ],
+ "apiVersions": [
+ "v1"
+ ],
+ "resources": [
+ "Pod"
+ ]
+ },
+ {
+ "apiGroups": [
+ "apps"
+ ],
+ "apiVersions": [
+ "v1"
+ ],
+ "resources": [
+ "Deployment",
+ "ReplicaSet",
+ "DaemonSet",
+ "StatefulSet"
+ ]
+ },
+ {
+ "apiGroups": [
+ "batch"
+ ],
+ "apiVersions": [
+ "*"
+ ],
+ "resources": [
+ "Job",
+ "CronJob"
+ ]
+ },
+ {
+ "apiGroups": [
+ "networking.k8s.io"
+ ],
+ "apiVersions": [
+ "v1"
+ ],
+ "resources": [
+ "NetworkPolicy"
+ ]
+ }
+ ],
+ "ruleDependencies": [],
+ "configInputs": null,
+ "controlConfigInputs": null,
+ "description": "fails if there are no ingress and egress defined for pod",
+ "remediation": "Make sure you define ingress and egress policies for all your Pods",
+ "ruleQuery": "armo_builtins",
+ "relevantCloudProviders": null
+ }
+ ],
+ "rulesIDs": [
+ ""
+ ],
+ "baseScore": 6
+ },
+ {
+ "guid": "",
+ "name": "Container hostPort",
+ "attributes": {
+ "armoBuiltin": true,
+ "attackTracks": [
+ {
+ "attackTrack": "container",
+ "categories": [
+ "Initial access"
+ ]
+ }
+ ],
+ "controlTypeTags": [
+ "security",
+ "compliance",
+ "devops"
+ ]
+ },
+ "id": "C-0044",
+ "controlID": "C-0044",
+ "creationTime": "",
+ "description": "Configuring hostPort requires a particular port number. If two objects specify the same HostPort, they could not be deployed to the same node. It may prevent the second object from starting, even if Kubernetes will try reschedule it on another node, provided there are available nodes with sufficient amount of resources. Also, if the number of replicas of such workload is higher than the number of nodes, the deployment will consistently fail.",
+ "remediation": "Avoid usage of hostPort unless it is absolutely necessary, in which case define appropriate exception. Use NodePort / ClusterIP instead.",
+ "rules": [
+ {
+ "guid": "",
+ "name": "container-hostPort",
+ "attributes": {
+ "armoBuiltin": true
+ },
+ "creationTime": "",
+ "rule": "package armo_builtins\n\n\n# Fails if pod has container with hostPort\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n container := pod.spec.containers[i]\n\tbeggining_of_path := \"spec.\"\n\tpath := is_host_port(container, i, beggining_of_path)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v has Host-port\", [ container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 4,\n\t\t\"failedPaths\": path,\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if workload has container with hostPort\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.template.spec.\"\n path := is_host_port(container, i, beggining_of_path)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v has Host-port\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 4,\n\t\t\"failedPaths\": path,\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if cronjob has container with hostPort\ndeny[msga] {\n \twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.jobTemplate.spec.template.spec.\"\n path := is_host_port(container, i, beggining_of_path)\n msga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v has Host-port\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 4,\n\t\t\"failedPaths\": path,\n\t\t\"fixPaths\":[],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n\nis_host_port(container, i, beggining_of_path) = path {\n\tpath = [sprintf(\"%vcontainers[%v].ports[%v].hostPort\", [beggining_of_path, format_int(i, 10), format_int(j, 10)]) | port = container.ports[j]; port.hostPort]\n\tcount(path) \u003e 0\n}\n",
+ "resourceEnumerator": "",
+ "ruleLanguage": "Rego",
+ "match": [
+ {
+ "apiGroups": [
+ ""
+ ],
+ "apiVersions": [
+ "v1"
+ ],
+ "resources": [
+ "Pod"
+ ]
+ },
+ {
+ "apiGroups": [
+ "apps"
+ ],
+ "apiVersions": [
+ "v1"
+ ],
+ "resources": [
+ "Deployment",
+ "ReplicaSet",
+ "DaemonSet",
+ "StatefulSet"
+ ]
+ },
+ {
+ "apiGroups": [
+ "batch"
+ ],
+ "apiVersions": [
+ "*"
+ ],
+ "resources": [
+ "Job",
+ "CronJob"
+ ]
+ }
+ ],
+ "ruleDependencies": [],
+ "configInputs": null,
+ "controlConfigInputs": null,
+ "description": "fails if container has hostPort",
+ "remediation": "Make sure you do not configure hostPort for the container, if necessary use NodePort / ClusterIP",
+ "ruleQuery": "armo_builtins",
+ "relevantCloudProviders": null
+ }
+ ],
+ "rulesIDs": [
+ ""
+ ],
+ "baseScore": 4
+ },
+ {
+ "guid": "",
+ "name": "Cluster internal networking",
+ "attributes": {
+ "armoBuiltin": true,
+ "attackTracks": [
+ {
+ "attackTrack": "container",
+ "categories": [
+ "Discovery",
+ "Lateral movement"
+ ]
+ }
+ ],
+ "controlTypeTags": [
+ "security",
+ "compliance"
+ ],
+ "microsoftMitreColumns": [
+ "Lateral movement"
+ ]
+ },
+ "id": "C-0054",
+ "controlID": "C-0054",
+ "creationTime": "",
+ "description": "If no network policy is defined, attackers who gain access to a container may use it to move laterally in the cluster. This control lists namespaces in which no network policy is defined.",
+ "remediation": "Define Kubernetes network policies or use alternative products to protect cluster network.",
+ "rules": [
+ {
+ "guid": "",
+ "name": "internal-networking",
+ "attributes": {
+ "armoBuiltin": true,
+ "m$K8sThreatMatrix": "Lateral Movement::Container internal networking, Discovery::Network mapping"
+ },
+ "creationTime": "",
+ "rule": "package armo_builtins\n\n# input: network policies\n# apiversion: networking.k8s.io/v1\n# fails if no network policies are defined in a certain namespace\n\ndeny[msga] {\n\tnamespaces := [namespace | namespace = input[_]; namespace.kind == \"Namespace\"]\n\tnamespace := namespaces[_]\n\tpolicy_names := [policy.metadata.namespace | policy = input[_]; policy.kind == \"NetworkPolicy\"]\n\tnot list_contains(policy_names, namespace.metadata.name)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"no policy is defined for namespace %v\", [namespace.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}\n\nlist_contains(list, element) {\n some i\n list[i] == element\n}",
+ "resourceEnumerator": "package armo_builtins\n\n# input: network policies + namespaces\n# apiversion: networking.k8s.io/v1\n# returns all namespaces\n\ndeny[msga] {\n\tnamespaces := [namespace | namespace = input[_]; namespace.kind == \"Namespace\"]\n\tnamespace := namespaces[_]\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"no policy is defined for namespace %v\", [namespace.metadata.name]),\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [\"\"],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [namespace]\n\t\t}\n\t}\n}",
+ "ruleLanguage": "Rego",
+ "match": [
+ {
+ "apiGroups": [
+ ""
+ ],
+ "apiVersions": [
+ "v1"
+ ],
+ "resources": [
+ "Namespace"
+ ]
+ },
+ {
+ "apiGroups": [
+ "networking.k8s.io"
+ ],
+ "apiVersions": [
+ "v1"
+ ],
+ "resources": [
+ "NetworkPolicy"
+ ]
+ }
+ ],
+ "ruleDependencies": [],
+ "configInputs": null,
+ "controlConfigInputs": null,
+ "description": "lists namespaces in which no network policies are defined",
+ "remediation": "",
+ "ruleQuery": "armo_builtins",
+ "relevantCloudProviders": null
+ }
+ ],
+ "rulesIDs": [
+ ""
+ ],
+ "baseScore": 4
+ },
+ {
+ "guid": "",
+ "name": "CVE-2021-25741 - Using symlink for arbitrary host file system access.",
+ "attributes": {
+ "armoBuiltin": true,
+ "attackTracks": [
+ {
+ "attackTrack": "container",
+ "categories": [
+ "Persistence",
+ "Impact - Data access in container"
+ ]
+ }
+ ],
+ "controlTypeTags": [
+ "security",
+ "compliance"
+ ]
+ },
+ "id": "C-0058",
+ "controlID": "C-0058",
+ "creationTime": "",
+ "description": "A user may be able to create a container with subPath or subPathExpr volume mounts to access files \u0026 directories anywhere on the host filesystem. Following Kubernetes versions are affected: v1.22.0 - v1.22.1, v1.21.0 - v1.21.4, v1.20.0 - v1.20.10, version v1.19.14 and lower. This control checks the vulnerable versions and the actual usage of the subPath feature in all Pods in the cluster. If you want to learn more about the CVE, please refer to the CVE link: https://nvd.nist.gov/vuln/detail/CVE-2021-25741",
+ "remediation": "To mitigate this vulnerability without upgrading kubelet, you can disable the VolumeSubpath feature gate on kubelet and kube-apiserver, or remove any existing Pods using subPath or subPathExpr feature.",
+ "rules": [
+ {
+ "guid": "",
+ "name": "Symlink-Exchange-Can-Allow-Host-Filesystem-Access",
+ "attributes": {
+ "armoBuiltin": true
+ },
+ "creationTime": "",
+ "rule": "package armo_builtins\n\n\ndeny[msga] {\n\tnodes := input[_]\n\tcurrent_version := nodes.status.nodeInfo.kubeletVersion\n is_vulnerable_version(current_version)\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tbeggining_of_path := \"spec.\"\n final_path := is_sub_path_container(container, i, beggining_of_path)\n\n\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"You may be vulnerable to CVE-2021-25741. You have a Node with a vulnerable version and the following container : %v in pod : %v with subPath/subPathExpr\", [container.name, pod.metadata.name]),\n\t\t\t\"alertObject\": {\"k8SApiObjects\": [pod]},\n\t\t\t\"failedPaths\": final_path,\n\t\t\t\"fixPaths\": [],\n\t\t}\n}\n\n\ndeny[msga] {\n\tnodes := input[_]\n\tcurrent_version := nodes.status.nodeInfo.kubeletVersion\n is_vulnerable_version(current_version)\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.template.spec.\"\n final_path := is_sub_path_container(container, i, beggining_of_path)\n \n\tmsga := {\n\t\"alertMessage\": sprintf(\"You may be vulnerable to CVE-2021-25741. You have a Node with a vulnerable version and the following container : %v in %v : %v with subPath/subPathExpr\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\t\"alertObject\": {\"k8SApiObjects\": [wl]},\n\t\t\t\"failedPaths\": final_path,\n\t\t\t\"fixPaths\": [],\n\t\t}\n}\n\n\n\ndeny[msga] {\n\tnodes := input[_]\n\tcurrent_version := nodes.status.nodeInfo.kubeletVersion\n is_vulnerable_version(current_version)\n wl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tbeggining_of_path := \"spec.jobTemplate.spec.template.spec.\"\n final_path := is_sub_path_container(container, i, beggining_of_path)\n \n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"You may be vulnerable to CVE-2021-25741. You have a Node with a vulnerable version and the following container : %v in %v : %v with subPath/subPathExpr\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\t\"alertObject\": {\"k8SApiObjects\": [wl]},\n\t\t\t\"failedPaths\": final_path,\n\t\t\t\"fixPaths\": [],\n\t\t}\n}\n\n\n\nis_sub_path_container(container, i, beggining_of_path) = path {\n\tpath = [sprintf(\"%vcontainers[%v].volumeMounts[%v].subPath\" ,[beggining_of_path, format_int(i, 10), format_int(j, 10)]) | volume_mount = container.volumeMounts[j]; volume_mount.subPath]\n\tcount(path) \u003e 0\n}\n\nis_vulnerable_version(version) {\n version \u003c= \"v1.19.14\"\n}\n\nis_vulnerable_version(version){\n version \u003e= \"v1.22.0\"\n version \u003c= \"v1.22.1\"\n}\n\n\nis_vulnerable_version(version){\n version \u003e= \"v1.21.0\"\n version \u003c= \"v1.21.4\"\n}\n\n\nis_vulnerable_version(version){\n version \u003e= \"v1.20.0\"\n version \u003c= \"v1.20.9\"\n}\n\nis_vulnerable_version(version){\n\tversion == \"v1.20.10\"\n}\n\n\n",
+ "resourceEnumerator": "package armo_builtins\n\n\ndeny[msga] {\n\tnodes := input[_]\n\tcurrent_version := nodes.status.nodeInfo.kubeletVersion\n isVulnerableVersion(current_version)\n\tversionPath = \"status.nodeInfo.kubeletVersion\"\n pod := input[_]\n pod.kind == \"Pod\"\n\n\tmsga := {\n\t\t\t\"alertMessage\": \"\",\n\t\t\t\"alertObject\": {\"k8SApiObjects\": [pod]},\n\t\t\t\"failedPaths\": [\"\"],\n\t}\n}\n\n\ndeny[msga] {\n\tnodes := input[_]\n\tcurrent_version := nodes.status.nodeInfo.kubeletVersion\n isVulnerableVersion(current_version)\n\tversionPath = \"status.nodeInfo.kubeletVersion\"\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n \n\tmsga := {\n\t\"alertMessage\": \"\",\n\t\t\t\"alertObject\": {\"k8SApiObjects\": [wl]},\n\t\t\t\"failedPaths\": [\"\"],\n\t}\n}\n\n\n\ndeny[msga] {\n\tnodes := input[_]\n\tcurrent_version := nodes.status.nodeInfo.kubeletVersion\n isVulnerableVersion(current_version)\n\tversionPath = \"status.nodeInfo.kubeletVersion\"\n wl := input[_]\n\twl.kind == \"CronJob\"\n \n\tmsga := {\n\t\t\"alertMessage\": \"\",\n\t\t\t\"alertObject\": {\"k8SApiObjects\": [wl]},\n\t\t\t\"failedPaths\": [\"\"],\n\t}\n}\n\n\nisVulnerableVersion(version) {\n version \u003c= \"v1.19.14\"\n}\n\nisVulnerableVersion(version){\n version \u003e= \"v1.22.0\"\n version \u003c= \"v1.22.1\"\n}\n\n\nisVulnerableVersion(version){\n version \u003e= \"v1.21.0\"\n version \u003c= \"v1.21.4\"\n}\n\n\nisVulnerableVersion(version){\n version \u003e= \"v1.20.0\"\n version \u003c= \"v1.20.9\"\n}\n\nisVulnerableVersion(version){\n\tversion == \"v1.20.10\"\n}",
+ "ruleLanguage": "Rego",
+ "match": [
+ {
+ "apiGroups": [
+ ""
+ ],
+ "apiVersions": [
+ "v1"
+ ],
+ "resources": [
+ "Pod",
+ "Node"
+ ]
+ },
+ {
+ "apiGroups": [
+ "apps"
+ ],
+ "apiVersions": [
+ "v1"
+ ],
+ "resources": [
+ "Deployment",
+ "ReplicaSet",
+ "DaemonSet",
+ "StatefulSet"
+ ]
+ },
+ {
+ "apiGroups": [
+ "batch"
+ ],
+ "apiVersions": [
+ "*"
+ ],
+ "resources": [
+ "Job",
+ "CronJob"
+ ]
+ }
+ ],
+ "ruleDependencies": [],
+ "configInputs": null,
+ "controlConfigInputs": null,
+ "description": "A user may be able to create a container with subPath volume mounts to access files \u0026 directories outside of the volume, including on the host filesystem. This was affected at the following versions: v1.22.0 - v1.22.1, v1.21.0 - v1.21.4, v1.20.0 - v1.20.10, version v1.19.14 and lower. ",
+ "remediation": "To mitigate this vulnerability without upgrading kubelet, you can disable the VolumeSubpath feature gate on kubelet and kube-apiserver, and remove any existing Pods making use of the feature.",
+ "ruleQuery": "armo_builtins",
+ "relevantCloudProviders": null
+ }
+ ],
+ "rulesIDs": [
+ ""
+ ],
+ "baseScore": 6
+ },
+ {
+ "guid": "",
+ "name": "CVE-2021-25742-nginx-ingress-snippet-annotation-vulnerability",
+ "attributes": {
+ "armoBuiltin": true,
+ "attackTracks": [
+ {
+ "attackTrack": "container",
+ "categories": [
+ "Initial access",
+ "Execution"
+ ]
+ }
+ ],
+ "controlTypeTags": [
+ "security",
+ "compliance"
+ ]
+ },
+ "id": "C-0059",
+ "controlID": "C-0059",
+ "creationTime": "",
+ "description": "Security issue in ingress-nginx where a user that can create or update ingress objects can use the custom snippets feature to obtain all secrets in the cluster (see more at https://github.com/kubernetes/ingress-nginx/issues/7837)",
+ "remediation": "To mitigate this vulnerability: 1. Upgrade to a version that allows mitigation (\u003e= v0.49.1 or \u003e= v1.0.1), 2. Set allow-snippet-annotations to false in your ingress-nginx ConfigMap based on how you deploy ingress-nginx",
+ "rules": [
+ {
+ "guid": "",
+ "name": "nginx-ingress-snippet-annotation-vulnerability",
+ "attributes": {
+ "armoBuiltin": true
+ },
+ "creationTime": "",
+ "rule": "package armo_builtins\n\ndeny[msga] {\n\tdeployment := input[_]\n\tdeployment.kind == \"Deployment\"\n\timage := deployment.spec.template.spec.containers[i].image\n\tis_nginx_image(image)\n\tis_tag_image(image)\n\n\t# Extracting version from image tag\n\ttag_version_match := regex.find_all_string_submatch_n(\"[0-9]+\\\\.[0-9]+\\\\.[0-9]+\", image, -1)[0][0]\n image_version_str_arr := split(tag_version_match,\".\")\n\timage_version_arr := [to_number(image_version_str_arr[0]),to_number(image_version_str_arr[1]),to_number(image_version_str_arr[2])]\n\n\t# Check if vulnerable \n\tis_vulnerable(image_version_arr, deployment.metadata.namespace)\n\n\tpath := sprintf(\"spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"You may be vulnerable to CVE-2021-25742. Deployment %v\", [deployment.metadata.name]),\n\t\t\t\"failedPaths\": [path],\n\t\t\t\"fixPaths\":[],\n\t\t\t\"alertObject\": {\"k8SApiObjects\": [deployment]},\n\t\t}\n}\n\n\t\nis_nginx_image(image) {\n\tcontains(image, \"nginx-controller\")\n}\n\nis_nginx_image(image) {\n\tcontains(image, \"ingress-controller\")\n}\n\nis_nginx_image(image) {\n\tcontains(image, \"ingress-nginx\")\n}\n\nis_allow_snippet_annotation_on(namespace) {\n configmaps := [configmap | configmap = input[_]; configmap.kind == \"ConfigMap\"]\n\tconfigmap_on_ingress_namespace := [configmap | configmap= configmaps[_]; configmap.metadata.namespace == namespace]\n\tconfig_maps_with_snippet := [configmap | configmap= configmap_on_ingress_namespace[_]; configmap.data[\"allow-snippet-annotations\"] == \"false\"]\n\tcount(config_maps_with_snippet) \u003c 1\n}\n\nis_vulnerable(image_version, namespace) {\n\timage_version[0] == 0\n\timage_version[1] \u003c 49\n\tis_allow_snippet_annotation_on(namespace)\n}\n\nis_vulnerable(image_version, namespace) {\n\timage_version[0] == 0\n\timage_version[1] == 49\n\timage_version[2] == 0\n\tis_allow_snippet_annotation_on(namespace)\n}\n\t\nis_vulnerable(image_version, namespace) {\n\timage_version[0] == 1\n\timage_version[1] == 0\n\timage_version[2] == 0\n\tis_allow_snippet_annotation_on(namespace)\n}\n\nis_tag_image(image) {\n reg := \":[\\\\w][\\\\w.-]{0,127}(\\/)?\"\n version := regex.find_all_string_submatch_n(reg, image, -1)\n v := version[_]\n img := v[_]\n not endswith(img, \"/\")\n}",
+ "resourceEnumerator": "package armo_builtins\n\ndeny[msga] {\n\tdeployment := input[_]\n\tdeployment.kind == \"Deployment\"\n\timage := deployment.spec.template.spec.containers[i].image\n\tisNginxImage(image)\n\tis_tag_image(image)\n\tisVulnerable(image, deployment.metadata.namespace)\n\tpath := sprintf(\"spec.template.spec.containers[%v].image\", [format_int(i, 10)])\n\tmsga := {\n\t\t\t\"alertMessage\": sprintf(\"You may be vulnerable to CVE-2021-25742. %v\", [deployment]),\n\t\t\t\"failedPaths\": [path],\n\t\t\t\"alertObject\": {\"k8SApiObjects\": [deployment]},\n\t\t}\n}\n\n\t\nisNginxImage(image) {\n\tcontains(image, \"nginx-controller\")\n}\n\nisNginxImage(image) {\n\tcontains(image, \"ingress-controller\")\n}\n\nisNginxImage(image) {\n\tcontains(image, \"ingress-nginx\")\n}\n\nisVulnerable(image, namespace) {\n\tcontains(image, \"@\")\n\tversion := split(image, \":\")\n\ttag := split(version[count(version)-2], \"@\")[0]\n startswith(tag, \"v\")\n tag \u003c= \"v0.49\"\n}\n\t\nisVulnerable(image, namespace) {\n\tcontains(image, \"@\")\n\tversion := split(image, \":\")\n\ttag := split(version[count(version)-2], \"@\")[0]\n startswith(tag, \"v\")\n tag == \"v1.0.0\"\n}\n\nisVulnerable(image, namespace) {\n\tnot contains(image, \"@\")\n\tversion := split(image, \":\")\n\ttag := version[count(version)-1]\n startswith(tag, \"v\")\n\ttag \u003c= \"v0.49\"\n}\n\nisVulnerable(image, namespace) {\n\tnot contains(image, \"@\")\n\tversion := split(image, \":\")\n\ttag := version[count(version)-1]\n startswith(tag, \"v\")\n\ttag == \"v1.0.0\"\n}\n\n###### without 'v'\n\t\nisVulnerable(image, namespace) {\n\tcontains(image, \"@\")\n\tversion := split(image, \":\")\n\ttag := split(version[count(version)-2], \"@\")[0]\n not startswith(tag, \"v\")\n tag \u003c= \"0.49\"\n}\n\t\nisVulnerable(image, namespace) {\n\tcontains(image, \"@\")\n\tversion := split(image, \":\")\n\ttag := split(version[count(version)-2], \"@\")[0]\n not startswith(tag, \"v\")\n tag == \"1.0.0\"\n}\n\nisVulnerable(image, namespace) {\n\tnot contains(image, \"@\")\n\tversion := split(image, \":\")\n\ttag := version[count(version)-1]\n not startswith(tag, \"v\")\n\ttag \u003c= \"0.49\"\n}\nisVulnerable(image, namespace) {\n\tnot contains(image, \"@\")\n\tversion := split(image, \":\")\n\ttag := version[count(version)-1]\n not startswith(tag, \"v\")\n\ttag == \"1.0.0\"\n}\n\nisVulnerable(image, namespace) {\n configmaps := [configmap | configmap = input[_]; configmap.kind == \"ConfigMap\"]\n\tconfigmapOnIngressNamespace := [configmap | configmap= configmaps[_]; configmap.metadata.namespace == namespace]\n\tconfigMapsWithSnippet := [configmap | configmap= configmapOnIngressNamespace[_]; configmap.data[\"allow-snippet-annotations\"] == \"false\"]\n\tcount(configMapsWithSnippet) \u003c 1\n}\n\n\nis_tag_image(image) {\n reg := \":[\\\\w][\\\\w.-]{0,127}(\\/)?\"\n version := regex.find_all_string_submatch_n(reg, image, -1)\n v := version[_]\n img := v[_]\n not endswith(img, \"/\")\n}",
+ "ruleLanguage": "Rego",
+ "match": [
+ {
+ "apiGroups": [
+ "*"
+ ],
+ "apiVersions": [
+ "*"
+ ],
+ "resources": [
+ "Deployment",
+ "ConfigMap"
+ ]
+ }
+ ],
+ "ruleDependencies": [],
+ "configInputs": null,
+ "controlConfigInputs": null,
+ "description": "",
+ "remediation": "",
+ "ruleQuery": "armo_builtins",
+ "relevantCloudProviders": null
+ }
+ ],
+ "rulesIDs": [
+ ""
+ ],
+ "baseScore": 8
+ },
+ {
+ "guid": "",
+ "name": "Audit logs enabled",
+ "attributes": {
+ "armoBuiltin": true,
+ "attackTracks": [
+ {
+ "attackTrack": "container",
+ "categories": [
+ "Defense evasion - KubeAPI"
+ ]
+ }
+ ],
+ "controlTypeTags": [
+ "security",
+ "compliance"
+ ]
+ },
+ "id": "C-0067",
+ "controlID": "C-0067",
+ "creationTime": "",
+ "description": "Audit logging is an important security feature in Kubernetes, it enables the operator to track requests to the cluster. It is important to use it so the operator has a record of events happened in Kubernetes",
+ "remediation": "Turn on audit logging for your cluster. Look at the vendor guidelines for more details",
+ "rules": [
+ {
+ "guid": "",
+ "name": "k8s-audit-logs-enabled-cloud",
+ "attributes": {
+ "armoBuiltin": true
+ },
+ "creationTime": "",
+ "rule": "package armo_builtins\nimport data.cautils as cautils\n\n# Check if audit logs is enabled for GKE\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"container.googleapis.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n cluster_config.metadata.provider == \"gke\"\t\n\tconfig := cluster_config.data\n\t\n # If enableComponents is empty, it will disable logging\n # https://cloud.google.com/kubernetes-engine/docs/reference/rest/v1beta1/projects.locations.clusters#loggingcomponentconfig\n\tis_logging_disabled(config)\n\tmsga := {\n\t\t\"alertMessage\": \"audit logs is disabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\":\"\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": cluster_config\n\t\t}\n\t}\n}\n\n\n# Check if audit logs is enabled for EKS\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"eks.amazonaws.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n cluster_config.metadata.provider == \"eks\"\t\n\tconfig := cluster_config.data\n # logSetup is an object representing the enabled or disabled Kubernetes control plane logs for your cluster.\n # types - available cluster control plane log types\n # https://docs.aws.amazon.com/eks/latest/APIReference/API_LogSetup.html\n goodTypes := [logSetup | logSetup = config.Cluster.Logging.ClusterLogging[_]; isAuditLogs(logSetup)]\n count(goodTypes) == 0\n\t\n\tmsga := {\n\t\t\"alertMessage\": \"audit logs is disabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixCommand\":\"aws eks update-cluster-config --region \u003cregion_code\u003e --name \u003ccluster_name\u003e --logging '{'clusterLogging':[{'types':['\u003capi/audit/authenticator\u003e'],'enabled':true}]}'\",\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n\t\t\t\"externalObjects\": cluster_config\n\t\t}\n\t}\n}\n\n\nis_logging_disabled(cluster_config) {\n\tnot cluster_config.logging_config.component_config.enable_components\n}\nis_logging_disabled(cluster_config) {\n\tcluster_config.logging_config.component_config.enable_components\n\tcount(cluster_config.logging_config.component_config.enable_components) == 0\n}\n\nisAuditLogs(logSetup) {\n logSetup.Enabled == true\n cautils.list_contains(logSetup.Types, \"api\")\n}\n\nisAuditLogs(logSetup) {\n logSetup.Enabled == true\n cautils.list_contains(logSetup.Types, \"audit\")\n}\n\nisAuditLogs(logSetup) {\n logSetup.enabled == true\n cautils.list_contains(logSetup.Types, \"authenticator\")\n}",
+ "resourceEnumerator": "",
+ "ruleLanguage": "Rego",
+ "match": [
+ {
+ "apiGroups": [],
+ "apiVersions": [],
+ "resources": []
+ }
+ ],
+ "dynamicMatch": [
+ {
+ "apiGroups": [
+ "container.googleapis.com",
+ "eks.amazonaws.com"
+ ],
+ "apiVersions": [
+ "v1"
+ ],
+ "resources": [
+ "ClusterDescribe"
+ ]
+ }
+ ],
+ "ruleDependencies": [],
+ "configInputs": null,
+ "controlConfigInputs": null,
+ "description": "",
+ "remediation": "",
+ "ruleQuery": "armo_builtins",
+ "relevantCloudProviders": [
+ "EKS",
+ "GKE"
+ ]
+ },
+ {
+ "guid": "",
+ "name": "k8s-audit-logs-enabled-native",
+ "attributes": {
+ "armoBuiltin": true,
+ "resourcesAggregator": "apiserver-pod",
+ "useFromKubescapeVersion": "v1.0.133"
+ },
+ "creationTime": "",
+ "rule": "package armo_builtins\nimport data.cautils as cautils\n\n# Check if audit logs is enabled for native k8s\ndeny[msga] {\n\tapiserverpod := input[_]\n cmd := apiserverpod.spec.containers[0].command\n\taudit_policy := [ command |command := cmd[_] ; contains(command, \"--audit-policy-file=\")]\n count(audit_policy) \u003c 1\n\tpath := \"spec.containers[0].command\"\t\n\n\t\n\tmsga := {\n\t\t\"alertMessage\": \"audit logs is not enabled\",\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [apiserverpod],\n\t\t\n\t\t}\n\t}\n}",
+ "resourceEnumerator": "",
+ "ruleLanguage": "Rego",
+ "match": [
+ {
+ "apiGroups": [
+ ""
+ ],
+ "apiVersions": [
+ "v1"
+ ],
+ "resources": [
+ "Pod"
+ ]
+ }
+ ],
+ "ruleDependencies": [],
+ "configInputs": null,
+ "controlConfigInputs": null,
+ "description": "",
+ "remediation": "",
+ "ruleQuery": "armo_builtins",
+ "relevantCloudProviders": null
+ }
+ ],
+ "rulesIDs": [
+ "",
+ ""
+ ],
+ "baseScore": 5
+ },
+ {
+ "guid": "",
+ "name": "Secret/ETCD encryption enabled",
+ "attributes": {
+ "armoBuiltin": true,
+ "attackTracks": [
+ {
+ "attackTrack": "node",
+ "categories": [
+ "Impact"
+ ]
+ }
+ ],
+ "controlTypeTags": [
+ "security",
+ "compliance"
+ ]
+ },
+ "id": "C-0066",
+ "controlID": "C-0066",
+ "creationTime": "",
+ "description": "All Kubernetes Secrets are stored primarily in etcd therefore it is important to encrypt it.",
+ "remediation": "Turn on the etcd encryption in your cluster, for more see the vendor documentation.",
+ "rules": [
+ {
+ "guid": "",
+ "name": "secret-etcd-encryption-cloud",
+ "attributes": {
+ "armoBuiltin": true
+ },
+ "creationTime": "",
+ "rule": "package armo_builtins\n\n\n# Check if encryption in etcd in enabled for EKS\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"eks.amazonaws.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n cluster_config.metadata.provider == \"eks\"\t\n\tconfig = cluster_config.data\n\n\tis_not_encrypted_EKS(config)\n \n\t\n\tmsga := {\n\t\t\"alertMessage\": \"etcd/secret encryption is not enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"eksctl utils enable-secrets-encryption --cluster=\u003ccluster\u003e --key-arn=arn:aws:kms:\u003ccluster_region\u003e:\u003caccount\u003e:key/\u003ckey\u003e --region=\u003cregion\u003e\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": cluster_config\n\t\t}\n\t}\n}\n\n\n\n# Check if encryption in etcd in enabled for GKE\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"container.googleapis.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n cluster_config.metadata.provider == \"gke\"\t\n\tconfig := cluster_config.data\n\n\tnot is_encrypted_GKE(config)\n \n\t\n\tmsga := {\n\t\t\"alertMessage\": \"etcd/secret encryption is not enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [\"data.database_encryption.state\"],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"gcloud container clusters update \u003ccluster_name\u003e --region=\u003ccompute_region\u003e --database-encryption-key=\u003ckey_project_id\u003e/locations/\u003clocation\u003e/keyRings/\u003cring_name\u003e/cryptoKeys/\u003ckey_name\u003e --project=\u003ccluster_project_id\u003e\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": cluster_config\n\t\t}\n\t}\n}\n\nis_encrypted_GKE(config) {\n\t config.database_encryption.state == \"1\"\n}\nis_encrypted_GKE(config) {\n\t config.database_encryption.state == \"ENCRYPTED\"\n}\n\nis_not_encrypted_EKS(cluster_config) {\n\tencryptionConfig := cluster_config.Cluster.EncryptionConfig[_]\n goodResources := [resource | resource = cluster_config.Cluster.EncryptionConfig.Resources[_]; resource == \"secrets\"]\n\tcount(goodResources) == 0\n}\n\nis_not_encrypted_EKS(cluster_config) {\n\tcluster_config.Cluster.EncryptionConfig == null\n}\n\nis_not_encrypted_EKS(cluster_config) {\n\tcount(cluster_config.Cluster.EncryptionConfig) == 0\n}\n\nis_not_encrypted_EKS(cluster_config) {\n\tencryptionConfig := cluster_config.Cluster.EncryptionConfig[_]\n count(encryptionConfig.Resources) == 0\n}",
+ "resourceEnumerator": "",
+ "ruleLanguage": "Rego",
+ "match": [
+ {
+ "apiGroups": [],
+ "apiVersions": [],
+ "resources": []
+ }
+ ],
+ "dynamicMatch": [
+ {
+ "apiGroups": [
+ "container.googleapis.com",
+ "eks.amazonaws.com"
+ ],
+ "apiVersions": [
+ "v1"
+ ],
+ "resources": [
+ "ClusterDescribe"
+ ]
+ }
+ ],
+ "ruleDependencies": [],
+ "configInputs": null,
+ "controlConfigInputs": null,
+ "description": "",
+ "remediation": "",
+ "ruleQuery": "armo_builtins",
+ "relevantCloudProviders": [
+ "EKS",
+ "GKE"
+ ]
+ },
+ {
+ "guid": "",
+ "name": "etcd-encryption-native",
+ "attributes": {
+ "armoBuiltin": true,
+ "resourcesAggregator": "apiserver-pod",
+ "useFromKubescapeVersion": "v1.0.133"
+ },
+ "creationTime": "",
+ "rule": "package armo_builtins\n\nimport data.cautils as cautils\n\n# Check if encryption in etcd is enabled for native k8s\ndeny[msga] {\n\tapiserverpod := input[_]\n\tcmd := apiserverpod.spec.containers[0].command\n\tenc_command := [command | command := cmd[_]; contains(command, \"--encryption-provider-config=\")]\n\tcount(enc_command) \u003c 1\n\tpath := \"spec.containers[0].command\"\n\n\tmsga := {\n\t\t\"alertMessage\": \"etcd encryption is not enabled\",\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\"k8sApiObjects\": [apiserverpod]},\n\t}\n}\n",
+ "resourceEnumerator": "",
+ "ruleLanguage": "Rego",
+ "match": [
+ {
+ "apiGroups": [
+ ""
+ ],
+ "apiVersions": [
+ "v1"
+ ],
+ "resources": [
+ "Pod"
+ ]
+ }
+ ],
+ "ruleDependencies": [],
+ "configInputs": null,
+ "controlConfigInputs": null,
+ "description": "",
+ "remediation": "",
+ "ruleQuery": "armo_builtins",
+ "relevantCloudProviders": null
+ }
+ ],
+ "rulesIDs": [
+ "",
+ ""
+ ],
+ "baseScore": 6
+ },
+ {
+ "guid": "",
+ "name": "PSP enabled",
+ "attributes": {
+ "armoBuiltin": true,
+ "attackTracks": [
+ {
+ "attackTrack": "kubeapi",
+ "categories": [
+ "Impact - service injection"
+ ]
+ }
+ ],
+ "controlTypeTags": [
+ "security",
+ "compliance"
+ ]
+ },
+ "id": "C-0068",
+ "controlID": "C-0068",
+ "creationTime": "",
+ "description": "PSP enable fine-grained authorization of pod creation and it is important to enable it",
+ "remediation": "Turn Pod Security Policies on in your cluster, if you use other admission controllers to control the behavior that PSP controls, exclude this control from your scans",
+ "rules": [
+ {
+ "guid": "",
+ "name": "psp-enabled-cloud",
+ "attributes": {
+ "armoBuiltin": true
+ },
+ "creationTime": "",
+ "rule": "package armo_builtins\n\n\n# Check if PSP is enabled for GKE\ndeny[msga] {\n\tcluster_config := input[_]\n\tcluster_config.apiVersion == \"container.googleapis.com/v1\"\n\tcluster_config.kind == \"ClusterDescribe\"\n cluster_config.metadata.provider == \"gke\"\t\n\tconfig := cluster_config.data\n not config.pod_security_policy_config.enabled == true\n\n\t\n\tmsga := {\n\t\t\"alertMessage\": \"pod security policy configuration is not enabled\",\n\t\t\"alertScore\": 3,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"fixCommand\": \"gcloud beta container clusters update \u003ccluster_name\u003e --enable-pod-security-policy\",\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [],\n \"externalObjects\": cluster_config\n\t\t}\n\t}\n}",
+ "resourceEnumerator": "",
+ "ruleLanguage": "Rego",
+ "match": [
+ {
+ "apiGroups": [],
+ "apiVersions": [],
+ "resources": []
+ }
+ ],
+ "dynamicMatch": [
+ {
+ "apiGroups": [
+ "container.googleapis.com",
+ "eks.amazonaws.com"
+ ],
+ "apiVersions": [
+ "v1"
+ ],
+ "resources": [
+ "ClusterDescribe"
+ ]
+ }
+ ],
+ "ruleDependencies": [],
+ "configInputs": null,
+ "controlConfigInputs": null,
+ "description": "",
+ "remediation": "",
+ "ruleQuery": "armo_builtins",
+ "relevantCloudProviders": [
+ "EKS",
+ "GKE"
+ ]
+ },
+ {
+ "guid": "",
+ "name": "psp-enabled-native",
+ "attributes": {
+ "armoBuiltin": true,
+ "resourcesAggregator": "apiserver-pod",
+ "useFromKubescapeVersion": "v1.0.133"
+ },
+ "creationTime": "",
+ "rule": "package armo_builtins\n\n\n# Check if psp is enabled for native k8s\ndeny[msga] {\n\tapiserverpod := input[_]\n cmd := apiserverpod.spec.containers[0].command[j]\n contains(cmd, \"--enable-admission-plugins=\")\n output := split(cmd, \"=\")\n not contains(output[1], \"PodSecurityPolicy\")\n\tpath := sprintf(\"spec.containers[0].command[%v]\", [format_int(j, 10)])\t\n\t\n\tmsga := {\n\t\t\"alertMessage\": \"PodSecurityPolicy is not enabled\",\n\t\t\"alertScore\": 9,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"failedPaths\": [path],\n\t\t\"fixPaths\": [],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [apiserverpod],\n\t\t\n\t\t}\n\t}\n}",
+ "resourceEnumerator": "",
+ "ruleLanguage": "Rego",
+ "match": [
+ {
+ "apiGroups": [
+ ""
+ ],
+ "apiVersions": [
+ "v1"
+ ],
+ "resources": [
+ "Pod"
+ ]
+ }
+ ],
+ "ruleDependencies": [],
+ "configInputs": null,
+ "controlConfigInputs": null,
+ "description": "",
+ "remediation": "",
+ "ruleQuery": "armo_builtins",
+ "relevantCloudProviders": null
+ }
+ ],
+ "rulesIDs": [
+ "",
+ ""
+ ],
+ "baseScore": 1
+ },
+ {
+ "guid": "",
+ "name": "Disable anonymous access to Kubelet service",
+ "attributes": {
+ "armoBuiltin": true,
+ "attackTracks": [
+ {
+ "attackTrack": "kubeapi",
+ "categories": [
+ "Initial access"
+ ]
+ }
+ ],
+ "controlTypeTags": [
+ "security",
+ "compliance"
+ ]
+ },
+ "id": "C-0069",
+ "controlID": "C-0069",
+ "creationTime": "",
+ "description": "By default, requests to the kubelet's HTTPS endpoint that are not rejected by other configured authentication methods are treated as anonymous requests, and given a username of system:anonymous and a group of system:unauthenticated.",
+ "remediation": "Start the kubelet with the --anonymous-auth=false flag.",
+ "rules": [
+ {
+ "guid": "",
+ "name": "anonymous-requests-to-kubelet-service-updated",
+ "attributes": {
+ "armoBuiltin": true,
+ "hostSensorRule": "true"
+ },
+ "creationTime": "",
+ "rule": "package armo_builtins\n\n#CIS 4.2.1 https://workbench.cisecurity.org/sections/1126668/recommendations/1838638\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\tcommand := obj.data.cmdLine\n\n\tcontains(command, \"--anonymous-auth\")\n\tcontains(command, \"--anonymous-auth=true\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Anonymous requests is enabled.\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--anonymous-auth\")\n\tnot contains(command, \"--config\")\n\n\texternal_obj := json.filter(obj, [\"apiVersion\", \"data/cmdLine\", \"kind\", \"metadata\"])\n\n\tmsga := {\n\t\t\"alertMessage\": \"Anonymous requests is enabled.\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": external_obj},\n\t}\n}\n\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--anonymous-auth\")\n\tcontains(command, \"--config\")\n\n\tdecodedConfigContent := base64.decode(obj.data.configFile.content)\n\tyamlConfig := yaml.unmarshal(decodedConfigContent)\n\tnot yamlConfig.authentication.anonymous.enabled == false\n\n\tmsga := {\n\t\t\"alertMessage\": \"Anonymous requests is enabled.\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [\"authentication.anonymous.enabled\"],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"metadata\": obj.metadata,\n\t\t\t\"data\": {\"configFile\": {\"content\": decodedConfigContent}},\n\t\t}},\n\t}\n}\n\n## Host sensor failed to get config file content\ndeny[msga] {\n\tobj := input[_]\n\tis_kubelet_info(obj)\n\n\tcommand := obj.data.cmdLine\n\n\tnot contains(command, \"--anonymous-auth\")\n\tcontains(command, \"--config\")\n\n\tnot obj.data.configFile.content\n\n\tmsga := {\n\t\t\"alertMessage\": \"Failed to analyze config file\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [],\n\t\t\"fixPaths\": [],\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"externalObjects\": {\n\t\t\t\"apiVersion\": obj.apiVersion,\n\t\t\t\"kind\": obj.kind,\n\t\t\t\"data\": obj.data,\n\t\t}},\n\t}\n}\n\nis_kubelet_info(obj) {\n\tobj.kind == \"KubeletInfo\"\n\tobj.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n}\n",
+ "resourceEnumerator": "",
+ "ruleLanguage": "Rego",
+ "match": [
+ {
+ "apiGroups": [],
+ "apiVersions": [],
+ "resources": []
+ }
+ ],
+ "dynamicMatch": [
+ {
+ "apiGroups": [
+ "hostdata.kubescape.cloud"
+ ],
+ "apiVersions": [
+ "v1beta0"
+ ],
+ "resources": [
+ "KubeletInfo"
+ ]
+ }
+ ],
+ "ruleDependencies": [],
+ "configInputs": null,
+ "controlConfigInputs": null,
+ "description": "Determines if anonymous requests to the kubelet service are allowed.",
+ "remediation": "Disable anonymous requests by setting the anonymous-auth flag to false, or using the kubelet configuration file.",
+ "ruleQuery": "",
+ "relevantCloudProviders": null
+ }
+ ],
+ "rulesIDs": [
+ ""
+ ],
+ "baseScore": 10
+ },
+ {
+ "guid": "",
+ "name": "Enforce Kubelet client TLS authentication",
+ "attributes": {
+ "armoBuiltin": true,
+ "attackTracks": [
+ {
+ "attackTrack": "node",
+ "categories": [
+ "Initial access"
+ ]
+ }
+ ],
+ "controlTypeTags": [
+ "security",
+ "compliance"
+ ]
+ },
+ "id": "C-0070",
+ "controlID": "C-0070",
+ "creationTime": "",
+ "description": "Kubelets are the node level orchestrator in Kubernetes control plane. They are publishing service port 10250 where they accept commands from API server. Operator must make sure that only API server is allowed to submit commands to Kubelet. This is done through client certificate verification, must configure Kubelet with client CA file to use for this purpose.",
+ "remediation": "Start the kubelet with the --client-ca-file flag, providing a CA bundle to verify client certificates with.",
+ "rules": [
+ {
+ "guid": "",
+ "name": "enforce-kubelet-client-tls-authentication",
+ "attributes": {
+ "armoBuiltin": true,
+ "hostSensorRule": "true"
+ },
+ "creationTime": "",
+ "rule": "package armo_builtins\nimport data.kubernetes.api.client as client\n\n# Both config and cli present\ndeny[msga] {\n\t\tkubelet_config := input[_]\n\t\tkubelet_config.kind == \"KubeletConfiguration\"\n\t\tkubelet_config.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\n\t\tkubelet_cli := input[_] \n\t\tkubelet_cli.kind == \"KubeletCommandLine\"\n\t\tkubelet_cli.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\t\tkubelet_cli_data := kubelet_cli.data\n\n\t\tresult := is_client_tls_disabled_both(kubelet_config, kubelet_cli_data)\n\t\texternal_obj := result.obj\n\t\tfailed_paths := result.failedPaths\n\t\tfixPaths := result.fixPaths\n\n\n\t\tmsga := {\n\t\t\t\"alertMessage\": \"kubelet client TLS authentication is not enabled\",\n\t\t\t\"alertScore\": 2,\n\t\t\t\"failedPaths\": failed_paths,\n\t\t\t\"fixPaths\": fixPaths,\n\t\t\t\"packagename\": \"armo_builtins\",\n\t\t\t\"alertObject\": {\n\t\t\t\t\"k8sApiObjects\": [kubelet_config, kubelet_cli]\n\t\t\t},\n\t\t}\n\t}\n\n\n# Only of them present\ndeny[msga] {\n\t\tresult := is_client_tls_disabled_single(input)\n\t\texternal_obj := result.obj\n\t\tfailed_paths := result.failedPaths\n\t\tfixPaths := result.fixPaths\n\n\t\tmsga := {\n\t\t\t\"alertMessage\": \"kubelet client TLS authentication is not enabled\",\n\t\t\t\"alertScore\": 2,\n\t\t\t\"failedPaths\": failed_paths,\n\t\t\t\"fixPaths\": fixPaths,\n\t\t\t\"packagename\": \"armo_builtins\",\n\t\t\t\"alertObject\": {\n\t\t\t\t\"k8sApiObjects\": [external_obj]\n\t\t\t},\n\t\t}\n\t}\n\n# CLI overrides config\nis_client_tls_disabled_both(kubelet_config, kubelet_cli_data) = {\"obj\": obj,\"failedPaths\": [], \"fixPaths\": [{\"path\": \"data.authentication.x509.clientCAFile\", \"value\": \"YOUR_VALUE\"}]} {\n\tnot contains(kubelet_cli_data[\"fullCommand\"], \"client-ca-file\")\n not kubelet_config.data.authentication.x509.clientCAFile\n\tobj = kubelet_config\n}\n\n# Only cli\nis_client_tls_disabled_single(resources) = {\"obj\": obj,\"failedPaths\": [], \"fixPaths\": []} {\n\tkubelet_cli := resources[_] \n\tkubelet_cli.kind == \"KubeletCommandLine\"\n\tkubelet_cli.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\n\tkubelet_config := [config | config = resources[_]; config.kind == \"KubeletConfiguration\"]\n\tcount(kubelet_config) == 0\n\n\tobj = isClientTlsDisabledCli(kubelet_cli)\n\t\n}\n\n# Only config\nis_client_tls_disabled_single(resources) = {\"obj\": obj,\"failedPaths\": [], \"fixPaths\": [{\"path\": \"data.authentication.x509.clientCAFile\", \"value\": \"YOUR_VALUE\"}]} {\n\tkubelet_config := resources[_] \n\tkubelet_config.kind == \"KubeletConfiguration\"\n\tkubelet_config.apiVersion == \"hostdata.kubescape.cloud/v1beta0\"\n\n\tkubelet_cmd := [cmd | cmd = resources[_]; cmd.kind == \"KubeletCommandLine\"]\n\tcount(kubelet_cmd) == 0\n\n\tobj = is_Client_tls_disabled_config(kubelet_config)\n}\n\n\nis_Client_tls_disabled_config(kubelet_config) = obj {\n\tnot kubelet_config.data.authentication.x509.clientCAFile\n\tobj = kubelet_config\n}\n\nisClientTlsDisabledCli(kubelet_cli) = obj {\n\tkubelet_cli_data = kubelet_cli.data\n\tnot contains(kubelet_cli_data[\"fullCommand\"], \"client-ca-file\")\n\tobj = kubelet_cli\n}",
+ "resourceEnumerator": "",
+ "ruleLanguage": "Rego",
+ "match": [
+ {
+ "apiGroups": [],
+ "apiVersions": [],
+ "resources": []
+ }
+ ],
+ "dynamicMatch": [
+ {
+ "apiGroups": [
+ "hostdata.kubescape.cloud"
+ ],
+ "apiVersions": [
+ "v1beta0"
+ ],
+ "resources": [
+ "KubeletConfiguration",
+ "KubeletCommandLine"
+ ]
+ }
+ ],
+ "ruleDependencies": [
+ {
+ "packageName": "cautils"
+ },
+ {
+ "packageName": "kubernetes.api.client"
+ }
+ ],
+ "configInputs": null,
+ "controlConfigInputs": null,
+ "description": "Determines if kubelet client tls authentication is enabled.",
+ "remediation": "Start the kubelet with the --client-ca-file flag, providing a CA bundle to verify client certificates with.",
+ "ruleQuery": "",
+ "relevantCloudProviders": null
+ }
+ ],
+ "rulesIDs": [
+ ""
+ ],
+ "baseScore": 9
+ }
+ ],
+ "controlsIDs": [
+ "C-0005",
+ "C-0038",
+ "C-0017",
+ "C-0013",
+ "C-0057",
+ "C-0034",
+ "C-0041",
+ "C-0009",
+ "C-0016",
+ "C-0012",
+ "C-0035",
+ "C-0002",
+ "C-0046",
+ "C-0055",
+ "C-0030",
+ "C-0044",
+ "C-0054",
+ "C-0058",
+ "C-0059",
+ "C-0067",
+ "C-0066",
+ "C-0068",
+ "C-0069",
+ "C-0070"
+ ]
+}
\ No newline at end of file
diff --git a/core/cautils/getter/testdata/invalid-fw.json b/core/cautils/getter/testdata/invalid-fw.json
new file mode 100644
index 00000000..c5d55df9
--- /dev/null
+++ b/core/cautils/getter/testdata/invalid-fw.json
@@ -0,0 +1,3 @@
+{
+ "guid": "",
+}
diff --git a/core/cautils/git_native_disabled.go b/core/cautils/git_native_disabled.go
new file mode 100644
index 00000000..c8c58403
--- /dev/null
+++ b/core/cautils/git_native_disabled.go
@@ -0,0 +1,22 @@
+//go:build !gitenabled
+
+package cautils
+
+import (
+ "errors"
+
+ "github.com/kubescape/go-git-url/apis"
+)
+
+var ErrFatalNotSupportedByBuild = errors.New(`git scan not supported by this build. Build with tag "gitenabled" to enable the git scan feature`)
+
+type gitRepository struct {
+}
+
+func newGitRepository(root string) (*gitRepository, error) {
+ return &gitRepository{}, ErrWarnNotSupportedByBuild
+}
+
+func (g *gitRepository) GetFileLastCommit(filePath string) (*apis.Commit, error) {
+ return nil, ErrFatalNotSupportedByBuild
+}
diff --git a/core/cautils/git_native_disabled_test.go b/core/cautils/git_native_disabled_test.go
new file mode 100644
index 00000000..ccc23346
--- /dev/null
+++ b/core/cautils/git_native_disabled_test.go
@@ -0,0 +1,11 @@
+//go:build !gitenabled
+
+package cautils
+
+func (s *LocalGitRepositoryTestSuite) TestGetLastCommit() {
+ s.T().Log("warn: skipped testing native git functionality [GetLastCommit]")
+}
+
+func (s *LocalGitRepositoryTestSuite) TestGetFileLastCommit() {
+ s.T().Log("warn: skipped testing native git functionality [GetFileLastCommit]")
+}
diff --git a/core/cautils/git_native_enabled.go b/core/cautils/git_native_enabled.go
new file mode 100644
index 00000000..09cdaf71
--- /dev/null
+++ b/core/cautils/git_native_enabled.go
@@ -0,0 +1,141 @@
+//go:build gitenabled
+package cautils
+
+import (
+ "fmt"
+ "time"
+
+ "github.com/kubescape/go-git-url/apis"
+ git2go "github.com/libgit2/git2go/v33"
+)
+
+type gitRepository struct {
+ git2GoRepo *git2go.Repository
+ fileToLastCommit map[string]*git2go.Commit
+}
+
+func newGitRepository(root string) (*gitRepository, error) {
+ git2GoRepo, err := git2go.OpenRepository(root)
+ if err != nil {
+ return nil, err
+ }
+
+ return &gitRepository{
+ git2GoRepo: git2GoRepo,
+ }, nil
+}
+
+func (g *gitRepository) GetFileLastCommit(filePath string) (*apis.Commit, error) {
+ if len(g.fileToLastCommit) == 0 {
+ filePathToCommitTime := map[string]time.Time{}
+ filePathToCommit := map[string]*git2go.Commit{}
+ allCommits, _ := g.getAllCommits()
+
+ // builds a map of all files to their last commit
+ for _, commit := range allCommits {
+ // Ignore merge commits (2+ parents)
+ if commit.ParentCount() <= 1 {
+ tree, err := commit.Tree()
+ if err != nil {
+ continue
+ }
+
+ // ParentCount can be either 1 or 0 (initial commit)
+ // In case it's the initial commit, prevTree is nil
+ var prevTree *git2go.Tree
+ if commit.ParentCount() == 1 {
+ prevCommit := commit.Parent(0)
+ prevTree, err = prevCommit.Tree()
+ if err != nil {
+ continue
+ }
+ }
+
+ diff, err := g.git2GoRepo.DiffTreeToTree(prevTree, tree, nil)
+ if err != nil {
+ continue
+ }
+
+ numDeltas, err := diff.NumDeltas()
+ if err != nil {
+ continue
+ }
+
+ for i := 0; i < numDeltas; i++ {
+ delta, err := diff.Delta(i)
+ if err != nil {
+ continue
+ }
+
+ deltaFilePath := delta.NewFile.Path
+ commitTime := commit.Author().When
+
+ // In case we have the commit information for the file which is not the latest - we override it
+ if currentCommitTime, exists := filePathToCommitTime[deltaFilePath]; exists {
+ if currentCommitTime.Before(commitTime) {
+ filePathToCommitTime[deltaFilePath] = commitTime
+ filePathToCommit[deltaFilePath] = commit
+ }
+ } else {
+ filePathToCommitTime[deltaFilePath] = commitTime
+ filePathToCommit[deltaFilePath] = commit
+ }
+ }
+ }
+ }
+
+ g.fileToLastCommit = filePathToCommit
+ }
+
+ if relevantCommit, exists := g.fileToLastCommit[filePath]; exists {
+ return g.getCommit(relevantCommit), nil
+ }
+
+ return nil, fmt.Errorf("failed to get commit information for file: %s", filePath)
+}
+
+func (g *gitRepository) getAllCommits() ([]*git2go.Commit, error) {
+ logItr, itrErr := g.git2GoRepo.Walk()
+ if itrErr != nil {
+
+ return nil, itrErr
+ }
+
+ pushErr := logItr.PushHead()
+ if pushErr != nil {
+ return nil, pushErr
+ }
+
+ var allCommits []*git2go.Commit
+ err := logItr.Iterate(func(commit *git2go.Commit) bool {
+ if commit != nil {
+ allCommits = append(allCommits, commit)
+ return true
+ }
+ return false
+ })
+
+ if err != nil {
+ return nil, err
+ }
+
+ if err != nil {
+ return nil, err
+ }
+
+ return allCommits, nil
+}
+
+func (g *gitRepository) getCommit(commit *git2go.Commit) *apis.Commit {
+ return &apis.Commit{
+ SHA: commit.Id().String(),
+ Author: apis.Committer{
+ Name: commit.Author().Name,
+ Email: commit.Author().Email,
+ Date: commit.Author().When,
+ },
+ Message: commit.Message(),
+ Committer: apis.Committer{},
+ Files: []apis.Files{},
+ }
+}
diff --git a/core/cautils/git_native_enabled_test.go b/core/cautils/git_native_enabled_test.go
new file mode 100644
index 00000000..09ae5fbb
--- /dev/null
+++ b/core/cautils/git_native_enabled_test.go
@@ -0,0 +1,44 @@
+//go:build gitenabled
+package cautils
+
+func (s *LocalGitRepositoryTestSuite) TestGetLastCommit() {
+ if localRepo, err := NewLocalGitRepository(s.gitRepositoryPaths["localrepo"]); s.NoError(err) {
+ if commit, err := localRepo.GetLastCommit(); s.NoError(err) {
+ s.Equal("7e09312b8017695fadcd606882e3779f10a5c832", commit.SHA)
+ s.Equal("Amir Malka", commit.Author.Name)
+ s.Equal("amirm@armosec.io", commit.Author.Email)
+ s.Equal("2022-05-22 19:11:57 +0300 +0300", commit.Author.Date.String())
+ s.Equal("added file B\n", commit.Message)
+ }
+ }
+}
+
+func (s *LocalGitRepositoryTestSuite) TestGetFileLastCommit() {
+ s.Run("fileA", func() {
+ if localRepo, err := NewLocalGitRepository(s.gitRepositoryPaths["localrepo"]); s.NoError(err) {
+
+ if commit, err := localRepo.GetFileLastCommit("fileA"); s.NoError(err) {
+ s.Equal("9fae4be19624297947d2b605cefbff516628612d", commit.SHA)
+ s.Equal("Amir Malka", commit.Author.Name)
+ s.Equal("amirm@armosec.io", commit.Author.Email)
+ s.Equal("2022-05-22 18:55:48 +0300 +0300", commit.Author.Date.String())
+ s.Equal("added file A\n", commit.Message)
+ }
+
+ }
+ })
+
+ s.Run("fileB", func() {
+ if localRepo, err := NewLocalGitRepository(s.gitRepositoryPaths["localrepo"]); s.NoError(err) {
+
+ if commit, err := localRepo.GetFileLastCommit("dirA/fileB"); s.NoError(err) {
+ s.Equal("7e09312b8017695fadcd606882e3779f10a5c832", commit.SHA)
+ s.Equal("Amir Malka", commit.Author.Name)
+ s.Equal("amirm@armosec.io", commit.Author.Email)
+ s.Equal("2022-05-22 19:11:57 +0300 +0300", commit.Author.Date.String())
+ s.Equal("added file B\n", commit.Message)
+ }
+
+ }
+ })
+}
diff --git a/core/cautils/localgitrepository.go b/core/cautils/localgitrepository.go
index c34cec7b..23080e29 100644
--- a/core/cautils/localgitrepository.go
+++ b/core/cautils/localgitrepository.go
@@ -1,26 +1,26 @@
package cautils
import (
+ "errors"
"fmt"
"path"
"strings"
- "time"
gitv5 "github.com/go-git/go-git/v5"
configv5 "github.com/go-git/go-git/v5/config"
plumbingv5 "github.com/go-git/go-git/v5/plumbing"
"github.com/kubescape/go-git-url/apis"
- git2go "github.com/libgit2/git2go/v33"
)
type LocalGitRepository struct {
- goGitRepo *gitv5.Repository
- git2GoRepo *git2go.Repository
- head *plumbingv5.Reference
- config *configv5.Config
- fileToLastCommit map[string]*git2go.Commit
+ *gitRepository
+ goGitRepo *gitv5.Repository
+ head *plumbingv5.Reference
+ config *configv5.Config
}
+var ErrWarnNotSupportedByBuild = errors.New(`git commits retrieval not supported by this build. Build with tag "gitenabled" to enable the full git scan feature`)
+
func NewLocalGitRepository(path string) (*LocalGitRepository, error) {
goGitRepo, err := gitv5.PlainOpenWithOptions(path, &gitv5.PlainOpenOptions{DetectDotGit: true})
if err != nil {
@@ -52,11 +52,12 @@ func NewLocalGitRepository(path string) (*LocalGitRepository, error) {
}
if repoRoot, err := l.GetRootDir(); err == nil {
- git2GoRepo, err := git2go.OpenRepository(repoRoot)
- if err != nil {
+ gitRepository, err := newGitRepository(repoRoot)
+ if err != nil && !errors.Is(err, ErrWarnNotSupportedByBuild) {
return l, err
}
- l.git2GoRepo = git2GoRepo
+
+ l.gitRepository = gitRepository
}
return l, nil
@@ -72,6 +73,10 @@ func (g *LocalGitRepository) GetRemoteUrl() (string, error) {
branchName := g.GetBranchName()
if branchRef, branchFound := g.config.Branches[branchName]; branchFound {
remoteName := branchRef.Remote
+ // branchRef.Remote can be a reference to a config.Remotes entry or directly a gitUrl
+ if _, found := g.config.Remotes[remoteName]; !found {
+ return remoteName, nil
+ }
if len(g.config.Remotes[remoteName].URLs) == 0 {
return "", fmt.Errorf("expected to find URLs for remote '%s', branch '%s'", remoteName, branchName)
}
@@ -79,10 +84,13 @@ func (g *LocalGitRepository) GetRemoteUrl() (string, error) {
}
const defaultRemoteName string = "origin"
- if len(g.config.Remotes[defaultRemoteName].URLs) == 0 {
+ defaultRemote, ok := g.config.Remotes[defaultRemoteName]
+ if !ok {
+ return "", fmt.Errorf("did not find a default remote with name '%s'", defaultRemoteName)
+ } else if len(defaultRemote.URLs) == 0 {
return "", fmt.Errorf("expected to find URLs for remote '%s'", defaultRemoteName)
}
- return g.config.Remotes[defaultRemoteName].URLs[0], nil
+ return defaultRemote.URLs[0], nil
}
// GetName get origin name without the .git suffix
@@ -122,120 +130,6 @@ func (g *LocalGitRepository) GetLastCommit() (*apis.Commit, error) {
}, nil
}
-func (g *LocalGitRepository) getAllCommits() ([]*git2go.Commit, error) {
- logItr, itrErr := g.git2GoRepo.Walk()
- if itrErr != nil {
-
- return nil, itrErr
- }
-
- pushErr := logItr.PushHead()
- if pushErr != nil {
- return nil, pushErr
- }
-
- var allCommits []*git2go.Commit
- err := logItr.Iterate(func(commit *git2go.Commit) bool {
- if commit != nil {
- allCommits = append(allCommits, commit)
- return true
- }
- return false
- })
-
- if err != nil {
- return nil, err
- }
-
- if err != nil {
- return nil, err
- }
-
- return allCommits, nil
-}
-
-func (g *LocalGitRepository) GetFileLastCommit(filePath string) (*apis.Commit, error) {
- if len(g.fileToLastCommit) == 0 {
- filePathToCommitTime := map[string]time.Time{}
- filePathToCommit := map[string]*git2go.Commit{}
- allCommits, _ := g.getAllCommits()
-
- // builds a map of all files to their last commit
- for _, commit := range allCommits {
- // Ignore merge commits (2+ parents)
- if commit.ParentCount() <= 1 {
- tree, err := commit.Tree()
- if err != nil {
- continue
- }
-
- // ParentCount can be either 1 or 0 (initial commit)
- // In case it's the initial commit, prevTree is nil
- var prevTree *git2go.Tree
- if commit.ParentCount() == 1 {
- prevCommit := commit.Parent(0)
- prevTree, err = prevCommit.Tree()
- if err != nil {
- continue
- }
- }
-
- diff, err := g.git2GoRepo.DiffTreeToTree(prevTree, tree, nil)
- if err != nil {
- continue
- }
-
- numDeltas, err := diff.NumDeltas()
- if err != nil {
- continue
- }
-
- for i := 0; i < numDeltas; i++ {
- delta, err := diff.Delta(i)
- if err != nil {
- continue
- }
-
- deltaFilePath := delta.NewFile.Path
- commitTime := commit.Author().When
-
- // In case we have the commit information for the file which is not the latest - we override it
- if currentCommitTime, exists := filePathToCommitTime[deltaFilePath]; exists {
- if currentCommitTime.Before(commitTime) {
- filePathToCommitTime[deltaFilePath] = commitTime
- filePathToCommit[deltaFilePath] = commit
- }
- } else {
- filePathToCommitTime[deltaFilePath] = commitTime
- filePathToCommit[deltaFilePath] = commit
- }
- }
- }
- }
- g.fileToLastCommit = filePathToCommit
- }
-
- if relevantCommit, exists := g.fileToLastCommit[filePath]; exists {
- return g.getCommit(relevantCommit), nil
- }
-
- return nil, fmt.Errorf("failed to get commit information for file: %s", filePath)
-}
-
-func (g *LocalGitRepository) getCommit(commit *git2go.Commit) *apis.Commit {
- return &apis.Commit{
- SHA: commit.Id().String(),
- Author: apis.Committer{
- Name: commit.Author().Name,
- Email: commit.Author().Email,
- Date: commit.Author().When,
- },
- Message: commit.Message(),
- Committer: apis.Committer{},
- Files: []apis.Files{},
- }
-}
-
func (g *LocalGitRepository) GetRootDir() (string, error) {
wt, err := g.goGitRepo.Worktree()
if err != nil {
diff --git a/core/cautils/localgitrepository_test.go b/core/cautils/localgitrepository_test.go
index 933aa10d..057c01e6 100644
--- a/core/cautils/localgitrepository_test.go
+++ b/core/cautils/localgitrepository_test.go
@@ -9,6 +9,8 @@ import (
"strings"
"testing"
+ configv5 "github.com/go-git/go-git/v5/config"
+ plumbingv5 "github.com/go-git/go-git/v5/plumbing"
"github.com/stretchr/testify/suite"
)
@@ -26,40 +28,58 @@ func unzipFile(zipPath, destinationFolder string) (*zip.ReadCloser, error) {
if err != nil {
return nil, err
}
+
for _, f := range archive.File {
- filePath := filepath.Join(destinationFolder, f.Name)
+ filePath := filepath.Join(destinationFolder, f.Name) //nolint:gosec
if !strings.HasPrefix(filePath, filepath.Clean(destinationFolder)+string(os.PathSeparator)) {
return nil, fmt.Errorf("invalid file path")
}
+
if f.FileInfo().IsDir() {
os.MkdirAll(filePath, os.ModePerm)
continue
}
- if err := os.MkdirAll(filepath.Dir(filePath), os.ModePerm); err != nil {
- return nil, err
+ if erc := copyFileInFolder(filePath, f); erc != nil {
+ return nil, erc
}
-
- dstFile, err := os.OpenFile(filePath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, f.Mode())
- if err != nil {
- return nil, err
- }
-
- fileInArchive, err := f.Open()
- if err != nil {
- return nil, err
- }
-
- if _, err := io.Copy(dstFile, fileInArchive); err != nil {
- return nil, err
- }
-
- dstFile.Close()
- fileInArchive.Close()
}
return archive, err
+}
+func copyFileInFolder(filePath string, f *zip.File) (err error) {
+ if err = os.MkdirAll(filepath.Dir(filePath), os.ModePerm); err != nil {
+ return err
+ }
+
+ dstFile, err := os.OpenFile(filePath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, f.Mode())
+ if err != nil {
+ return err
+ }
+ defer func() {
+ _ = dstFile.Close()
+ }()
+
+ fileInArchive, err := f.Open()
+ if err != nil {
+ return err
+ }
+ defer func() {
+ _ = fileInArchive.Close()
+ }()
+
+ _, err = io.Copy(dstFile, fileInArchive) //nolint:gosec
+
+ if err = dstFile.Close(); err != nil {
+ return err
+ }
+
+ if err = fileInArchive.Close(); err != nil {
+ return err
+ }
+
+ return err
}
func (s *LocalGitRepositoryTestSuite) SetupSuite() {
@@ -132,44 +152,49 @@ func (s *LocalGitRepositoryTestSuite) TestGetOriginUrl() {
}
}
-func (s *LocalGitRepositoryTestSuite) TestGetLastCommit() {
- if localRepo, err := NewLocalGitRepository(s.gitRepositoryPaths["localrepo"]); s.NoError(err) {
- if commit, err := localRepo.GetLastCommit(); s.NoError(err) {
- s.Equal("7e09312b8017695fadcd606882e3779f10a5c832", commit.SHA)
- s.Equal("Amir Malka", commit.Author.Name)
- s.Equal("amirm@armosec.io", commit.Author.Email)
- s.Equal("2022-05-22 19:11:57 +0300 +0300", commit.Author.Date.String())
- s.Equal("added file B\n", commit.Message)
- }
+func TestGetRemoteUrl(t *testing.T) {
+ testCases := []struct {
+ Name string
+ LocalRepo LocalGitRepository
+ Want string
+ WantErr error
+ }{
+ {
+ Name: "Branch with missing upstream and missing 'origin' fallback should return an error",
+ LocalRepo: LocalGitRepository{
+ config: &configv5.Config{
+ Branches: make(map[string]*configv5.Branch),
+ Remotes: make(map[string]*configv5.RemoteConfig),
+ },
+ head: plumbingv5.NewReferenceFromStrings("HEAD", "ref: refs/heads/v4"),
+ },
+ Want: "",
+ WantErr: fmt.Errorf("did not find a default remote with name 'origin'"),
+ },
+ }
+
+ for _, tc := range testCases {
+ t.Run(tc.Name, func(t *testing.T) {
+ localRepo := LocalGitRepository{
+ config: &configv5.Config{
+ Branches: make(map[string]*configv5.Branch),
+ Remotes: make(map[string]*configv5.RemoteConfig),
+ },
+ head: plumbingv5.NewReferenceFromStrings("HEAD", "ref: refs/heads/v4"),
+ }
+
+ want := tc.Want
+ wantErr := tc.WantErr
+ got, gotErr := localRepo.GetRemoteUrl()
+
+ if got != want {
+ t.Errorf("Remote URLs don’t match: got '%s', want '%s'", got, want)
+ }
+
+ if gotErr.Error() != wantErr.Error() {
+ t.Errorf("Errors don’t match: got '%v', want '%v'", gotErr, wantErr)
+ }
+ },
+ )
}
}
-
-func (s *LocalGitRepositoryTestSuite) TestGetFileLastCommit() {
- s.Run("fileA", func() {
- if localRepo, err := NewLocalGitRepository(s.gitRepositoryPaths["localrepo"]); s.NoError(err) {
-
- if commit, err := localRepo.GetFileLastCommit("fileA"); s.NoError(err) {
- s.Equal("9fae4be19624297947d2b605cefbff516628612d", commit.SHA)
- s.Equal("Amir Malka", commit.Author.Name)
- s.Equal("amirm@armosec.io", commit.Author.Email)
- s.Equal("2022-05-22 18:55:48 +0300 +0300", commit.Author.Date.String())
- s.Equal("added file A\n", commit.Message)
- }
-
- }
- })
-
- s.Run("fileB", func() {
- if localRepo, err := NewLocalGitRepository(s.gitRepositoryPaths["localrepo"]); s.NoError(err) {
-
- if commit, err := localRepo.GetFileLastCommit("dirA/fileB"); s.NoError(err) {
- s.Equal("7e09312b8017695fadcd606882e3779f10a5c832", commit.SHA)
- s.Equal("Amir Malka", commit.Author.Name)
- s.Equal("amirm@armosec.io", commit.Author.Email)
- s.Equal("2022-05-22 19:11:57 +0300 +0300", commit.Author.Date.String())
- s.Equal("added file B\n", commit.Message)
- }
-
- }
- })
-}
diff --git a/core/cautils/scaninfo.go b/core/cautils/scaninfo.go
index 2c5631e2..7fd846ad 100644
--- a/core/cautils/scaninfo.go
+++ b/core/cautils/scaninfo.go
@@ -11,7 +11,7 @@ import (
apisv1 "github.com/kubescape/opa-utils/httpserver/apis/v1"
giturl "github.com/kubescape/go-git-url"
- logger "github.com/kubescape/go-logger"
+ "github.com/kubescape/go-logger"
"github.com/kubescape/go-logger/helpers"
"github.com/kubescape/k8s-interface/k8sinterface"
"github.com/kubescape/kubescape/v2/core/cautils/getter"
@@ -94,7 +94,7 @@ const (
)
type PolicyIdentifier struct {
- Name string // policy name e.g. nsa,mitre,c-0012
+ Identifier string // policy Identifier e.g. c-0012 for control, nsa,mitre for frameworks
Kind apisv1.NotificationPolicyKind // policy kind e.g. Framework,Control,Rule
Designators armotypes.PortalDesignator
}
@@ -104,6 +104,7 @@ type ScanInfo struct {
PolicyIdentifier []PolicyIdentifier // TODO - remove from object
UseExceptions string // Load file with exceptions configuration
ControlsInputs string // Load file with inputs for controls
+ AttackTracks string // Load file with attack tracks
UseFrom []string // Load framework from local file (instead of download). Use when running offline
UseDefault bool // Load framework from cached file (instead of download). Use when running offline
UseArtifactsFrom string // Load artifacts from local path. Use when running offline
@@ -120,6 +121,7 @@ type ScanInfo struct {
FailThreshold float32 // Failure score threshold
FailThresholdSeverity string // Severity at and above which the command should fail
Submit bool // Submit results to Kubescape Cloud BE
+ CreateAccount bool // Create account in Kubescape Cloud BE if no account found in local cache
ScanID string // Report id of the current scan
HostSensorEnabled BoolPtrFlag // Deploy Kubescape K8s host scanner to collect data from certain controls
HostSensorYamlPath string // Path to hostsensor file
@@ -129,6 +131,7 @@ type ScanInfo struct {
FrameworkScan bool // false if scanning control
ScanAll bool // true if scan all frameworks
OmitRawResources bool // true if omit raw resources from the output
+ PrintAttackTree bool // true if print attack tree
}
type Getters struct {
@@ -140,7 +143,6 @@ type Getters struct {
func (scanInfo *ScanInfo) Init() {
scanInfo.setUseFrom()
- scanInfo.setOutputFile()
scanInfo.setUseArtifactsFrom()
if scanInfo.ScanID == "" {
scanInfo.ScanID = uuid.NewString()
@@ -178,34 +180,26 @@ func (scanInfo *ScanInfo) setUseArtifactsFrom() {
scanInfo.ControlsInputs = filepath.Join(scanInfo.UseArtifactsFrom, localControlInputsFilename)
// set exceptions
scanInfo.UseExceptions = filepath.Join(scanInfo.UseArtifactsFrom, LocalExceptionsFilename)
+
+ // set attack tracks
+ scanInfo.AttackTracks = filepath.Join(scanInfo.UseArtifactsFrom, LocalAttackTracksFilename)
}
func (scanInfo *ScanInfo) setUseFrom() {
if scanInfo.UseDefault {
for _, policy := range scanInfo.PolicyIdentifier {
- scanInfo.UseFrom = append(scanInfo.UseFrom, getter.GetDefaultPath(policy.Name+".json"))
+ scanInfo.UseFrom = append(scanInfo.UseFrom, getter.GetDefaultPath(policy.Identifier+".json"))
}
}
}
-func (scanInfo *ScanInfo) setOutputFile() {
- if scanInfo.Output == "" {
- return
- }
- if scanInfo.Format == "json" {
- if filepath.Ext(scanInfo.Output) != ".json" {
- scanInfo.Output += ".json"
- }
- }
- if scanInfo.Format == "junit" {
- if filepath.Ext(scanInfo.Output) != ".xml" {
- scanInfo.Output += ".xml"
- }
- }
- if scanInfo.Format == "pdf" {
- if filepath.Ext(scanInfo.Output) != ".pdf" {
- scanInfo.Output += ".pdf"
- }
+// Formats returns a slice of output formats that have been requested for a given scan
+func (scanInfo *ScanInfo) Formats() []string {
+ formatString := scanInfo.Format
+ if formatString != "" {
+ return strings.Split(scanInfo.Format, ",")
+ } else {
+ return []string{}
}
}
@@ -214,7 +208,7 @@ func (scanInfo *ScanInfo) SetPolicyIdentifiers(policies []string, kind apisv1.No
if !scanInfo.contains(policy) {
newPolicy := PolicyIdentifier{}
newPolicy.Kind = kind
- newPolicy.Name = policy
+ newPolicy.Identifier = policy
scanInfo.PolicyIdentifier = append(scanInfo.PolicyIdentifier, newPolicy)
}
}
@@ -222,7 +216,7 @@ func (scanInfo *ScanInfo) SetPolicyIdentifiers(policies []string, kind apisv1.No
func (scanInfo *ScanInfo) contains(policyName string) bool {
for _, policy := range scanInfo.PolicyIdentifier {
- if policy.Name == policyName {
+ if policy.Identifier == policyName {
return true
}
}
@@ -250,7 +244,7 @@ func scanInfoToScanMetadata(scanInfo *ScanInfo) *reporthandlingv2.Metadata {
}
// append frameworks
for _, policy := range scanInfo.PolicyIdentifier {
- metadata.ScanMetadata.TargetNames = append(metadata.ScanMetadata.TargetNames, policy.Name)
+ metadata.ScanMetadata.TargetNames = append(metadata.ScanMetadata.TargetNames, policy.Identifier)
}
metadata.ScanMetadata.KubescapeVersion = BuildNumber
diff --git a/core/cautils/scaninfo_test.go b/core/cautils/scaninfo_test.go
index a825fcb8..6059837b 100644
--- a/core/cautils/scaninfo_test.go
+++ b/core/cautils/scaninfo_test.go
@@ -43,3 +43,30 @@ func TestGetScanningContext(t *testing.T) {
assert.Equal(t, ContextCluster, GetScanningContext(""))
assert.Equal(t, ContextGitURL, GetScanningContext("https://github.com/kubescape/kubescape"))
}
+
+func TestScanInfoFormats(t *testing.T) {
+ testCases := []struct {
+ Input string
+ Want []string
+ }{
+ {"", []string{}},
+ {"json", []string{"json"}},
+ {"pdf", []string{"pdf"}},
+ {"html", []string{"html"}},
+ {"sarif", []string{"sarif"}},
+ {"html,pdf,sarif", []string{"html", "pdf", "sarif"}},
+ {"pretty-printer,pdf,sarif", []string{"pretty-printer", "pdf", "sarif"}},
+ }
+
+ for _, tc := range testCases {
+ t.Run(tc.Input, func(t *testing.T) {
+ input := tc.Input
+ want := tc.Want
+ scanInfo := &ScanInfo{Format: input}
+
+ got := scanInfo.Formats()
+
+ assert.Equal(t, want, got)
+ })
+ }
+}
diff --git a/core/core/download.go b/core/core/download.go
index 5c2ab233..54be4fc6 100644
--- a/core/core/download.go
+++ b/core/core/download.go
@@ -6,20 +6,28 @@ import (
"path/filepath"
"strings"
- "github.com/armosec/armoapi-go/armotypes"
- logger "github.com/kubescape/go-logger"
+ "github.com/kubescape/go-logger"
"github.com/kubescape/go-logger/helpers"
"github.com/kubescape/kubescape/v2/core/cautils/getter"
metav1 "github.com/kubescape/kubescape/v2/core/meta/datastructures/v1"
)
+const (
+ TargetControlsInputs = "controls-inputs"
+ TargetExceptions = "exceptions"
+ TargetControl = "control"
+ TargetFramework = "framework"
+ TargetArtifacts = "artifacts"
+ TargetAttackTracks = "attack-tracks"
+)
+
var downloadFunc = map[string]func(*metav1.DownloadInfo) error{
- "controls-inputs": downloadConfigInputs,
- "exceptions": downloadExceptions,
- "control": downloadControl,
- "framework": downloadFramework,
- "artifacts": downloadArtifacts,
- "attack-tracks": downloadAttackTracks,
+ TargetControlsInputs: downloadConfigInputs,
+ TargetExceptions: downloadExceptions,
+ TargetControl: downloadControl,
+ TargetFramework: downloadFramework,
+ TargetArtifacts: downloadArtifacts,
+ TargetAttackTracks: downloadAttackTracks,
}
func DownloadSupportCommands() []string {
@@ -84,7 +92,7 @@ func downloadArtifacts(downloadInfo *metav1.DownloadInfo) error {
func downloadConfigInputs(downloadInfo *metav1.DownloadInfo) error {
tenant := getTenantConfig(&downloadInfo.Credentials, "", "", getKubernetesApi())
- controlsInputsGetter := getConfigInputsGetter(downloadInfo.Name, tenant.GetAccountID(), nil)
+ controlsInputsGetter := getConfigInputsGetter(downloadInfo.Identifier, tenant.GetAccountID(), nil)
controlInputs, err := controlsInputsGetter.GetControlsInputs(tenant.GetContextName())
if err != nil {
return err
@@ -105,13 +113,10 @@ func downloadConfigInputs(downloadInfo *metav1.DownloadInfo) error {
}
func downloadExceptions(downloadInfo *metav1.DownloadInfo) error {
- var err error
tenant := getTenantConfig(&downloadInfo.Credentials, "", "", getKubernetesApi())
-
exceptionsGetter := getExceptionsGetter("", tenant.GetAccountID(), nil)
- exceptions := []armotypes.PostureExceptionPolicy{}
- exceptions, err = exceptionsGetter.GetExceptions(tenant.GetContextName())
+ exceptions, err := exceptionsGetter.GetExceptions(tenant.GetContextName())
if err != nil {
return err
}
@@ -132,7 +137,7 @@ func downloadAttackTracks(downloadInfo *metav1.DownloadInfo) error {
var err error
tenant := getTenantConfig(&downloadInfo.Credentials, "", "", getKubernetesApi())
- attackTracksGetter := getAttackTracksGetter(tenant.GetAccountID(), nil)
+ attackTracksGetter := getAttackTracksGetter("", tenant.GetAccountID(), nil)
attackTracks, err := attackTracksGetter.GetAttackTracks()
if err != nil {
@@ -158,7 +163,7 @@ func downloadFramework(downloadInfo *metav1.DownloadInfo) error {
g := getPolicyGetter(nil, tenant.GetTenantEmail(), true, nil)
- if downloadInfo.Name == "" {
+ if downloadInfo.Identifier == "" {
// if framework name not specified - download all frameworks
frameworks, err := g.GetFrameworks()
if err != nil {
@@ -175,9 +180,9 @@ func downloadFramework(downloadInfo *metav1.DownloadInfo) error {
// return fmt.Errorf("missing framework name")
} else {
if downloadInfo.FileName == "" {
- downloadInfo.FileName = fmt.Sprintf("%s.json", downloadInfo.Name)
+ downloadInfo.FileName = fmt.Sprintf("%s.json", downloadInfo.Identifier)
}
- framework, err := g.GetFramework(downloadInfo.Name)
+ framework, err := g.GetFramework(downloadInfo.Identifier)
if err != nil {
return err
}
@@ -200,25 +205,25 @@ func downloadControl(downloadInfo *metav1.DownloadInfo) error {
g := getPolicyGetter(nil, tenant.GetTenantEmail(), false, nil)
- if downloadInfo.Name == "" {
+ if downloadInfo.Identifier == "" {
// TODO - support
- return fmt.Errorf("missing control name")
+ return fmt.Errorf("missing control ID")
}
if downloadInfo.FileName == "" {
- downloadInfo.FileName = fmt.Sprintf("%s.json", downloadInfo.Name)
+ downloadInfo.FileName = fmt.Sprintf("%s.json", downloadInfo.Identifier)
}
- controls, err := g.GetControl(downloadInfo.Name)
+ controls, err := g.GetControl(downloadInfo.Identifier)
if err != nil {
- return err
+ return fmt.Errorf("failed to download control id '%s', %s", downloadInfo.Identifier, err.Error())
}
if controls == nil {
- return fmt.Errorf("failed to download control - received an empty objects")
+ return fmt.Errorf("failed to download control id '%s' - received an empty objects", downloadInfo.Identifier)
}
downloadTo := filepath.Join(downloadInfo.Path, downloadInfo.FileName)
err = getter.SaveInFile(controls, downloadTo)
if err != nil {
return err
}
- logger.L().Success("Downloaded", helpers.String("artifact", downloadInfo.Target), helpers.String("name", downloadInfo.Name), helpers.String("path", downloadTo))
+ logger.L().Success("Downloaded", helpers.String("artifact", downloadInfo.Target), helpers.String("ID", downloadInfo.Identifier), helpers.String("path", downloadTo))
return nil
}
diff --git a/core/core/initutils.go b/core/core/initutils.go
index 66ae6272..11cb88ba 100644
--- a/core/core/initutils.go
+++ b/core/core/initutils.go
@@ -2,14 +2,17 @@ package core
import (
"fmt"
+ "os"
- logger "github.com/kubescape/go-logger"
+ "github.com/kubescape/go-logger"
"github.com/kubescape/go-logger/helpers"
"github.com/kubescape/k8s-interface/k8sinterface"
"github.com/kubescape/kubescape/v2/core/cautils"
"github.com/kubescape/kubescape/v2/core/cautils/getter"
"github.com/kubescape/kubescape/v2/core/pkg/hostsensorutils"
"github.com/kubescape/kubescape/v2/core/pkg/resourcehandler"
+ "github.com/kubescape/kubescape/v2/core/pkg/resultshandling/printer"
+ printerv2 "github.com/kubescape/kubescape/v2/core/pkg/resultshandling/printer/v2"
"github.com/kubescape/kubescape/v2/core/pkg/resultshandling/reporter"
reporterv2 "github.com/kubescape/kubescape/v2/core/pkg/resultshandling/reporter/v2"
@@ -99,7 +102,7 @@ func getHostSensorHandler(scanInfo *cautils.ScanInfo, k8s *k8sinterface.Kubernet
// we need to determined which controls needs host scanner
if scanInfo.HostSensorEnabled.Get() == nil && hasHostSensorControls {
scanInfo.HostSensorEnabled.SetBool(false) // default - do not run host scanner
- logger.L().Warning("Kubernetes cluster nodes scanning is disabled. This is required to collect valuable data for certain controls. You can enable it using the --enable-host-scan flag")
+ logger.L().Warning("Kubernetes cluster nodes scanning is disabled. This is required to collect valuable data for certain controls. You can enable it using the --enable-host-scan flag")
}
if hostSensorVal := scanInfo.HostSensorEnabled.Get(); hostSensorVal != nil && *hostSensorVal {
hostSensorHandler, err := hostsensorutils.NewHostSensorHandler(k8s, scanInfo.HostSensorYamlPath)
@@ -122,18 +125,18 @@ func getFieldSelector(scanInfo *cautils.ScanInfo) resourcehandler.IFieldSelector
return &resourcehandler.EmptySelector{}
}
-func policyIdentifierNames(pi []cautils.PolicyIdentifier) string {
- policiesNames := ""
+func policyIdentifierIdentities(pi []cautils.PolicyIdentifier) string {
+ policiesIdentities := ""
for i := range pi {
- policiesNames += pi[i].Name
+ policiesIdentities += pi[i].Identifier
if i+1 < len(pi) {
- policiesNames += ","
+ policiesIdentities += ","
}
}
- if policiesNames == "" {
- policiesNames = "all"
+ if policiesIdentities == "" {
+ policiesIdentities = "all"
}
- return policiesNames
+ return policiesIdentities
}
// setSubmitBehavior - Setup the desired cluster behavior regarding submitting to the Kubescape Cloud BE
@@ -179,6 +182,10 @@ func setSubmitBehavior(scanInfo *cautils.ScanInfo, tenantConfig cautils.ITenantC
scanInfo.Submit = true
}
+ if scanInfo.CreateAccount {
+ scanInfo.Submit = true
+ }
+
}
// setPolicyGetter set the policy getter - local file/github release/Kubescape Cloud API
@@ -240,7 +247,10 @@ func listFrameworksNames(policyGetter getter.IPolicyGetter) []string {
return getter.NativeFrameworks
}
-func getAttackTracksGetter(accountID string, downloadReleasedPolicy *getter.DownloadReleasedPolicy) getter.IAttackTracksGetter {
+func getAttackTracksGetter(attackTracks, accountID string, downloadReleasedPolicy *getter.DownloadReleasedPolicy) getter.IAttackTracksGetter {
+ if len(attackTracks) > 0 {
+ return getter.NewLoadPolicy([]string{attackTracks})
+ }
if accountID != "" {
g := getter.GetKSCloudAPIConnector() // download attack tracks from Kubescape Cloud backend
return g
@@ -248,9 +258,20 @@ func getAttackTracksGetter(accountID string, downloadReleasedPolicy *getter.Down
if downloadReleasedPolicy == nil {
downloadReleasedPolicy = getter.NewDownloadReleasedPolicy()
}
+
if err := downloadReleasedPolicy.SetRegoObjects(); err != nil { // if failed to pull attack tracks, fallback to cache
logger.L().Warning("failed to get attack tracks from github release, loading attack tracks from cache", helpers.Error(err))
return getter.NewLoadPolicy([]string{getter.GetDefaultPath(cautils.LocalAttackTracksFilename)})
}
return downloadReleasedPolicy
}
+
+// getUIPrinter returns a printer that will be used to print to the program’s UI (terminal)
+func getUIPrinter(verboseMode bool, formatVersion string, attackTree bool, viewType cautils.ViewTypes) printer.IPrinter {
+ p := printerv2.NewPrettyPrinter(verboseMode, formatVersion, attackTree, viewType)
+
+ // Since the UI of the program is a CLI (Stdout), it means that it should always print to Stdout
+ p.SetWriter(os.Stdout.Name())
+
+ return p
+}
diff --git a/core/core/initutils_test.go b/core/core/initutils_test.go
new file mode 100644
index 00000000..8f518125
--- /dev/null
+++ b/core/core/initutils_test.go
@@ -0,0 +1,39 @@
+package core
+
+import (
+ "reflect"
+ "testing"
+
+ "github.com/kubescape/kubescape/v2/core/cautils"
+)
+
+func Test_getUIPrinter(t *testing.T) {
+ scanInfo := &cautils.ScanInfo{
+ FormatVersion: "v2",
+ VerboseMode: true,
+ View: "control",
+ }
+ wantFormatVersion := scanInfo.FormatVersion
+ wantVerboseMode := scanInfo.VerboseMode
+ wantViewType := cautils.ViewTypes(scanInfo.View)
+
+ got := getUIPrinter(scanInfo.VerboseMode, scanInfo.FormatVersion, scanInfo.PrintAttackTree, cautils.ViewTypes(scanInfo.View))
+
+ gotValue := reflect.ValueOf(got).Elem()
+ gotFormatVersion := gotValue.FieldByName("formatVersion").String()
+ gotVerboseMode := gotValue.FieldByName("verboseMode").Bool()
+ gotViewType := cautils.ViewTypes(gotValue.FieldByName("viewType").String())
+
+ if gotFormatVersion != wantFormatVersion {
+ t.Errorf("Got: %s, want: %s", gotFormatVersion, wantFormatVersion)
+ }
+
+ if gotVerboseMode != wantVerboseMode {
+ t.Errorf("Got: %t, want: %t", gotVerboseMode, wantVerboseMode)
+ }
+
+ if gotViewType != wantViewType {
+ t.Errorf("Got: %v, want: %v", gotViewType, wantViewType)
+ }
+
+}
diff --git a/core/core/scan.go b/core/core/scan.go
index f17405c0..c643a0dc 100644
--- a/core/core/scan.go
+++ b/core/core/scan.go
@@ -7,7 +7,7 @@ import (
"github.com/kubescape/k8s-interface/k8sinterface"
- logger "github.com/kubescape/go-logger"
+ "github.com/kubescape/go-logger"
"github.com/kubescape/go-logger/helpers"
"github.com/kubescape/kubescape/v2/core/cautils"
"github.com/kubescape/kubescape/v2/core/cautils/getter"
@@ -27,7 +27,8 @@ type componentInterfaces struct {
tenantConfig cautils.ITenantConfig
resourceHandler resourcehandler.IResourceHandler
report reporter.IReport
- printerHandler printer.IPrinter
+ outputPrinters []printer.IPrinter
+ uiPrinter printer.IPrinter
hostSensorHandler hostsensorutils.IHostSensor
}
@@ -63,7 +64,7 @@ func getInterfaces(scanInfo *cautils.ScanInfo) componentInterfaces {
// ================== version testing ======================================
v := cautils.NewIVersionCheckHandler()
- v.CheckLatestVersion(cautils.NewVersionCheckRequest(cautils.BuildNumber, policyIdentifierNames(scanInfo.PolicyIdentifier), "", cautils.ScanningContextToScanningScope(scanInfo.GetScanningContext())))
+ v.CheckLatestVersion(cautils.NewVersionCheckRequest(cautils.BuildNumber, policyIdentifierIdentities(scanInfo.PolicyIdentifier), "", cautils.ScanningContextToScanningScope(scanInfo.GetScanningContext())))
// ================== setup host scanner object ======================================
@@ -93,9 +94,17 @@ func getInterfaces(scanInfo *cautils.ScanInfo) componentInterfaces {
// reporting behavior - setup reporter
reportHandler := getReporter(tenantConfig, scanInfo.ScanID, scanInfo.Submit, scanInfo.FrameworkScan, scanInfo.GetScanningContext())
- // setup printer
- printerHandler := resultshandling.NewPrinter(scanInfo.Format, scanInfo.FormatVersion, scanInfo.VerboseMode, cautils.ViewTypes(scanInfo.View))
- printerHandler.SetWriter(scanInfo.Output)
+ // setup printers
+ formats := scanInfo.Formats()
+
+ outputPrinters := make([]printer.IPrinter, 0)
+ for _, format := range formats {
+ printerHandler := resultshandling.NewPrinter(format, scanInfo.FormatVersion, scanInfo.PrintAttackTree, scanInfo.VerboseMode, cautils.ViewTypes(scanInfo.View))
+ printerHandler.SetWriter(scanInfo.Output)
+ outputPrinters = append(outputPrinters, printerHandler)
+ }
+
+ uiPrinter := getUIPrinter(scanInfo.VerboseMode, scanInfo.FormatVersion, scanInfo.PrintAttackTree, cautils.ViewTypes(scanInfo.View))
// ================== return interface ======================================
@@ -103,7 +112,8 @@ func getInterfaces(scanInfo *cautils.ScanInfo) componentInterfaces {
tenantConfig: tenantConfig,
resourceHandler: resourceHandler,
report: reportHandler,
- printerHandler: printerHandler,
+ outputPrinters: outputPrinters,
+ uiPrinter: uiPrinter,
hostSensorHandler: hostSensorHandler,
}
}
@@ -127,7 +137,7 @@ func (ks *Kubescape) Scan(scanInfo *cautils.ScanInfo) (*resultshandling.ResultsH
scanInfo.Getters.PolicyGetter = getPolicyGetter(scanInfo.UseFrom, interfaces.tenantConfig.GetTenantEmail(), scanInfo.FrameworkScan, downloadReleasedPolicy)
scanInfo.Getters.ControlsInputsGetter = getConfigInputsGetter(scanInfo.ControlsInputs, interfaces.tenantConfig.GetAccountID(), downloadReleasedPolicy)
scanInfo.Getters.ExceptionsGetter = getExceptionsGetter(scanInfo.UseExceptions, interfaces.tenantConfig.GetAccountID(), downloadReleasedPolicy)
- scanInfo.Getters.AttackTracksGetter = getAttackTracksGetter(interfaces.tenantConfig.GetAccountID(), downloadReleasedPolicy)
+ scanInfo.Getters.AttackTracksGetter = getAttackTracksGetter(scanInfo.AttackTracks, interfaces.tenantConfig.GetAccountID(), downloadReleasedPolicy)
// TODO - list supported frameworks/controls
if scanInfo.ScanAll {
@@ -141,7 +151,7 @@ func (ks *Kubescape) Scan(scanInfo *cautils.ScanInfo) (*resultshandling.ResultsH
}
}()
- resultsHandling := resultshandling.NewResultsHandler(interfaces.report, interfaces.printerHandler)
+ resultsHandling := resultshandling.NewResultsHandler(interfaces.report, interfaces.outputPrinters, interfaces.uiPrinter)
// ===================== policies & resources =====================
policyHandler := policyhandler.NewPolicyHandler(interfaces.resourceHandler)
@@ -160,7 +170,7 @@ func (ks *Kubescape) Scan(scanInfo *cautils.ScanInfo) (*resultshandling.ResultsH
// ======================== prioritization ===================
- if priotizationHandler, err := resourcesprioritization.NewResourcesPrioritizationHandler(scanInfo.Getters.AttackTracksGetter); err != nil {
+ if priotizationHandler, err := resourcesprioritization.NewResourcesPrioritizationHandler(scanInfo.Getters.AttackTracksGetter, scanInfo.PrintAttackTree); err != nil {
logger.L().Warning("failed to get attack tracks, this may affect the scanning results", helpers.Error(err))
} else if err := priotizationHandler.PrioritizeResources(scanData); err != nil {
return resultsHandling, fmt.Errorf("%w", err)
diff --git a/core/meta/datastructures/v1/download.go b/core/meta/datastructures/v1/download.go
index 6b44c323..b51a8342 100644
--- a/core/meta/datastructures/v1/download.go
+++ b/core/meta/datastructures/v1/download.go
@@ -6,6 +6,6 @@ type DownloadInfo struct {
Path string // directory to save artifact. Default is "~/.kubescape/"
FileName string // can be empty
Target string // type of artifact to download
- Name string // name of artifact to download
+ Identifier string // identifier of artifact to download
Credentials cautils.Credentials
}
diff --git a/core/pkg/containerscan/containerscan_mock.go b/core/pkg/containerscan/containerscan_mock.go
index cac3471e..ba08017f 100644
--- a/core/pkg/containerscan/containerscan_mock.go
+++ b/core/pkg/containerscan/containerscan_mock.go
@@ -50,7 +50,7 @@ func randSeq(n int, bank []rune) string {
b := make([]rune, n)
for i := range b {
- b[i] = bank[rand.Intn(len(bank))]
+ b[i] = bank[rand.Intn(len(bank))] //nolint:gosec
}
return string(b)
}
@@ -60,7 +60,7 @@ func GenerateContainerScanLayer(layer *ScanResultLayer) {
layer.LayerHash = randSeq(32, hash)
layer.Vulnerabilities = make(VulnerabilitiesList, 0)
layer.Packages = make(LinuxPkgs, 0)
- vuls := rand.Intn(10) + 1
+ vuls := rand.Intn(10) + 1 //nolint:gosec
for i := 0; i < vuls; i++ {
v := Vulnerability{}
diff --git a/core/pkg/containerscan/gojayunmarshaller.go b/core/pkg/containerscan/gojayunmarshaller.go
index a79f5c08..4d7d1184 100644
--- a/core/pkg/containerscan/gojayunmarshaller.go
+++ b/core/pkg/containerscan/gojayunmarshaller.go
@@ -64,7 +64,7 @@ func (pkgs *LinuxPkgs) UnmarshalJSONArray(dec *gojay.Decoder) error {
return nil
}
-//--------Vul fixed in----------------------------------
+// --------Vul fixed in----------------------------------
func (fx *FixedIn) UnmarshalJSONObject(dec *gojay.Decoder, key string) (err error) {
switch key {
diff --git a/core/pkg/containerscan/rawdatastrucutres.go b/core/pkg/containerscan/rawdatastrucutres.go
index fd72ed71..3dc21256 100644
--- a/core/pkg/containerscan/rawdatastrucutres.go
+++ b/core/pkg/containerscan/rawdatastrucutres.go
@@ -71,19 +71,19 @@ type PackageFile struct {
// types to provide unmarshalling:
-//VulnerabilitiesList -s.e
+// VulnerabilitiesList -s.e
type LayersList []ScanResultLayer
-//VulnerabilitiesList -s.e
+// VulnerabilitiesList -s.e
type VulnerabilitiesList []Vulnerability
-//LinuxPkgs - slice of linux pkgs
+// LinuxPkgs - slice of linux pkgs
type LinuxPkgs []LinuxPackage
-//VulFixes - information bout when/how this vul was fixed
+// VulFixes - information bout when/how this vul was fixed
type VulFixes []FixedIn
-//PkgFiles - slice of files belong to specific pkg
+// PkgFiles - slice of files belong to specific pkg
type PkgFiles []PackageFile
func (v *ScanResultReport) AsFNVHash() string {
diff --git a/core/pkg/hostsensorutils/hostsensor.yaml b/core/pkg/hostsensorutils/hostsensor.yaml
index d59596ae..4ad7e84d 100644
--- a/core/pkg/hostsensorutils/hostsensor.yaml
+++ b/core/pkg/hostsensorutils/hostsensor.yaml
@@ -38,6 +38,7 @@ spec:
- name: host-sensor
image: quay.io/kubescape/host-scanner:v1.0.39
securityContext:
+ allowPrivilegeEscalation: true
privileged: true
readOnlyRootFilesystem: true
procMount: Unmasked
diff --git a/core/pkg/hostsensorutils/hostsensorgetfrompod.go b/core/pkg/hostsensorutils/hostsensorgetfrompod.go
index 46d6308d..b76a65d2 100644
--- a/core/pkg/hostsensorutils/hostsensorgetfrompod.go
+++ b/core/pkg/hostsensorutils/hostsensorgetfrompod.go
@@ -212,9 +212,9 @@ func (hsh *HostSensorHandler) GetKubeletConfigurations() ([]hostsensor.HostSenso
// loop over pods and port-forward it to each of them
res, err := hsh.sendAllPodsHTTPGETRequest("/kubeletConfigurations", "KubeletConfiguration") // empty kind, will be overridden
for resIdx := range res {
- jsonBytes, err := yaml.YAMLToJSON(res[resIdx].Data)
- if err != nil {
- logger.L().Error("failed to convert kubelet configurations from yaml to json", helpers.Error(err))
+ jsonBytes, ery := yaml.YAMLToJSON(res[resIdx].Data)
+ if ery != nil {
+ logger.L().Error("failed to convert kubelet configurations from yaml to json", helpers.Error(ery))
continue
}
res[resIdx].SetData(jsonBytes)
diff --git a/core/pkg/hostsensorutils/hostsensorworkerpool.go b/core/pkg/hostsensorutils/hostsensorworkerpool.go
index ded788e5..5a5438e1 100644
--- a/core/pkg/hostsensorutils/hostsensorworkerpool.go
+++ b/core/pkg/hostsensorutils/hostsensorworkerpool.go
@@ -32,7 +32,7 @@ func NewWorkerPool() workerPool {
}
func (wp *workerPool) init(noOfPods ...int) {
- if noOfPods != nil && len(noOfPods) > 0 && noOfPods[0] < noOfWorkers {
+ if len(noOfPods) > 0 && noOfPods[0] < noOfWorkers {
wp.noOfWorkers = noOfPods[0]
}
// init the channels
@@ -82,13 +82,13 @@ func (wp *workerPool) hostSensorGetResults(result *[]hostsensor.HostSensorDataEn
func (wp *workerPool) hostSensorApplyJobs(podList map[string]string, path, requestKind string) {
go func() {
for podName, nodeName := range podList {
- job := job{
+ thisJob := job{
podName: podName,
nodeName: nodeName,
requestKind: requestKind,
path: path,
}
- wp.jobs <- job
+ wp.jobs <- thisJob
}
close(wp.jobs)
diff --git a/core/pkg/opaprocessor/processorhandler.go b/core/pkg/opaprocessor/processorhandler.go
index 9c32d9f5..83a4fd17 100644
--- a/core/pkg/opaprocessor/processorhandler.go
+++ b/core/pkg/opaprocessor/processorhandler.go
@@ -69,23 +69,26 @@ func (opap *OPAProcessor) Process(policies *cautils.Policies) error {
cautils.StartSpinner()
- var errs error
- for _, control := range policies.Controls {
+ for _, toPin := range policies.Controls {
+ control := toPin
resourcesAssociatedControl, err := opap.processControl(&control)
if err != nil {
logger.L().Error(err.Error())
}
+
+ if len(resourcesAssociatedControl) == 0 {
+ continue
+ }
+
// update resources with latest results
- if len(resourcesAssociatedControl) != 0 {
- for resourceID, controlResult := range resourcesAssociatedControl {
- if _, ok := opap.ResourcesResult[resourceID]; !ok {
- opap.ResourcesResult[resourceID] = resourcesresults.Result{ResourceID: resourceID}
- }
- t := opap.ResourcesResult[resourceID]
- t.AssociatedControls = append(t.AssociatedControls, controlResult)
- opap.ResourcesResult[resourceID] = t
+ for resourceID, controlResult := range resourcesAssociatedControl {
+ if _, ok := opap.ResourcesResult[resourceID]; !ok {
+ opap.ResourcesResult[resourceID] = resourcesresults.Result{ResourceID: resourceID}
}
+ t := opap.ResourcesResult[resourceID]
+ t.AssociatedControls = append(t.AssociatedControls, controlResult)
+ opap.ResourcesResult[resourceID] = t
}
}
@@ -95,7 +98,7 @@ func (opap *OPAProcessor) Process(policies *cautils.Policies) error {
opap.loggerDoneScanning()
- return errs
+ return nil
}
func (opap *OPAProcessor) loggerStartScanning() {
diff --git a/core/pkg/opaprocessor/processorhandlerutils.go b/core/pkg/opaprocessor/processorhandlerutils.go
index b468e668..a28ac7c0 100644
--- a/core/pkg/opaprocessor/processorhandlerutils.go
+++ b/core/pkg/opaprocessor/processorhandlerutils.go
@@ -15,9 +15,9 @@ import (
// updateResults updates the results objects and report objects. This is a critical function - DO NOT CHANGE
//
// The function:
-// - removes sensible data
-// - adds exceptions
-// - summarizes results
+// - removes sensible data
+// - adds exceptions
+// - summarizes results
func (opap *OPAProcessor) updateResults() {
// remove data from all objects
@@ -91,9 +91,6 @@ func getKSObjects(k8sResources *cautils.KSResources, allResources map[string]wor
groupResources := k8sinterface.ResourceGroupToString(groups, version, resource)
for _, groupResource := range groupResources {
if k8sObj, ok := (*k8sResources)[groupResource]; ok {
- // if k8sObj == nil {
- // logger.L().Debug(fmt.Sprintf("resource '%s' is nil, probably failed to pull the resource", groupResource))
- // }
for i := range k8sObj {
k8sObjects = append(k8sObjects, allResources[k8sObj[i]])
}
@@ -117,9 +114,11 @@ func getKubernetesObjects(k8sResources *cautils.K8SResources, allResources map[s
groupResources := k8sinterface.ResourceGroupToString(groups, version, resource)
for _, groupResource := range groupResources {
if k8sObj, ok := (*k8sResources)[groupResource]; ok {
- if k8sObj == nil {
- // logger.L().Debug("skipping", helpers.String("resource", groupResource))
- }
+ /*
+ if k8sObj == nil {
+ // logger.L().Debug("skipping", helpers.String("resource", groupResource))
+ }
+ */
for i := range k8sObj {
k8sObjects = append(k8sObjects, allResources[k8sObj[i]])
}
diff --git a/core/pkg/policyhandler/handlenotification.go b/core/pkg/policyhandler/handlenotification.go
index 797fe881..ddf83c9b 100644
--- a/core/pkg/policyhandler/handlenotification.go
+++ b/core/pkg/policyhandler/handlenotification.go
@@ -2,12 +2,20 @@ package policyhandler
import (
"fmt"
+ "strings"
+
+ helpersv1 "github.com/kubescape/opa-utils/reporthandling/helpers/v1"
+
+ clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
+
+ cloudsupportv1 "github.com/kubescape/k8s-interface/cloudsupport/v1"
+ reportv2 "github.com/kubescape/opa-utils/reporthandling/v2"
- "github.com/armosec/armoapi-go/armotypes"
"github.com/kubescape/k8s-interface/cloudsupport"
"github.com/kubescape/k8s-interface/k8sinterface"
"github.com/kubescape/kubescape/v2/core/cautils"
"github.com/kubescape/kubescape/v2/core/pkg/resourcehandler"
+ "github.com/kubescape/opa-utils/reporthandling/apis"
)
// PolicyHandler -
@@ -51,16 +59,7 @@ func (policyHandler *PolicyHandler) CollectResources(policyIdentifier []cautils.
func (policyHandler *PolicyHandler) getResources(policyIdentifier []cautils.PolicyIdentifier, opaSessionObj *cautils.OPASessionObj, scanInfo *cautils.ScanInfo) error {
opaSessionObj.Report.ClusterAPIServerInfo = policyHandler.resourceHandler.GetClusterAPIServerInfo()
- // attempting to get cloud provider from API server git version
- if opaSessionObj.Report.ClusterAPIServerInfo != nil {
- opaSessionObj.Report.ClusterCloudProvider = cloudsupport.GetCloudProvider(opaSessionObj.Report.ClusterAPIServerInfo.GitVersion)
- }
-
- // if didn't succeed getting cloud provider from API server git version, try from context.
- if opaSessionObj.Report.ClusterCloudProvider == "" {
- clusterName := k8sinterface.GetContextName()
- opaSessionObj.Report.ClusterCloudProvider = cloudsupport.GetCloudProvider(clusterName)
- }
+ setCloudMetadata(opaSessionObj)
resourcesMap, allResources, ksResources, err := policyHandler.resourceHandler.GetResources(opaSessionObj, &policyIdentifier[0].Designators)
if err != nil {
@@ -74,9 +73,78 @@ func (policyHandler *PolicyHandler) getResources(policyIdentifier []cautils.Poli
return nil
}
+/* unused for now
func getDesignator(policyIdentifier []cautils.PolicyIdentifier) *armotypes.PortalDesignator {
if len(policyIdentifier) > 0 {
return &policyIdentifier[0].Designators
}
return &armotypes.PortalDesignator{}
}
+*/
+
+func setCloudMetadata(opaSessionObj *cautils.OPASessionObj) {
+ iCloudMetadata := getCloudMetadata(opaSessionObj, k8sinterface.GetConfig())
+ if iCloudMetadata == nil {
+ return
+ }
+ cloudMetadata := reportv2.NewCloudMetadata(iCloudMetadata)
+ opaSessionObj.Metadata.ContextMetadata.ClusterContextMetadata.CloudMetadata = cloudMetadata
+ opaSessionObj.Metadata.ClusterMetadata.CloudMetadata = cloudMetadata // deprecated - fallback
+ opaSessionObj.Report.ClusterCloudProvider = iCloudMetadata.Provider().ToString() // deprecated - fallback
+}
+
+// getCloudMetadata - get cloud metadata from kubeconfig or API server
+// There are 3 options:
+// 1. Get cloud provider from API server git version (EKS, GKE)
+// 2. Get cloud provider from kubeconfig by parsing the cluster context (EKS, GKE)
+// 3. Get cloud provider from kubeconfig by parsing the server URL (AKS)
+func getCloudMetadata(opaSessionObj *cautils.OPASessionObj, config *clientcmdapi.Config) apis.ICloudParser {
+
+ if config == nil {
+ return nil
+ }
+
+ var provider string
+
+ // attempting to get cloud provider from API server git version
+ if opaSessionObj.Report.ClusterAPIServerInfo != nil {
+ provider = cloudsupport.GetCloudProvider(opaSessionObj.Report.ClusterAPIServerInfo.GitVersion)
+ }
+
+ if provider == cloudsupportv1.AKS || isAKS(config) {
+ return helpersv1.NewAKSMetadata(k8sinterface.GetContextName())
+ }
+ if provider == cloudsupportv1.EKS || isEKS(config) {
+ return helpersv1.NewEKSMetadata(k8sinterface.GetContextName())
+ }
+ if provider == cloudsupportv1.GKE || isGKE(config) {
+ return helpersv1.NewGKEMetadata(k8sinterface.GetContextName())
+ }
+
+ return nil
+}
+
+// check if the server is AKS. e.g. https://XXX.XX.XXX.azmk8s.io:443
+func isAKS(config *clientcmdapi.Config) bool {
+ const serverIdentifierAKS = "azmk8s.io"
+ if cluster, ok := config.Clusters[config.CurrentContext]; ok {
+ return strings.Contains(cluster.Server, serverIdentifierAKS)
+ }
+ return false
+}
+
+// check if the server is EKS. e.g. arn:aws:eks:eu-west-1:xxx:cluster/xxxx
+func isEKS(config *clientcmdapi.Config) bool {
+ if context, ok := config.Contexts[config.CurrentContext]; ok {
+ return strings.Contains(context.Cluster, cloudsupportv1.EKS)
+ }
+ return false
+}
+
+// check if the server is GKE. e.g. gke_xxx-xx-0000_us-central1-c_xxxx-1
+func isGKE(config *clientcmdapi.Config) bool {
+ if context, ok := config.Contexts[config.CurrentContext]; ok {
+ return strings.Contains(context.Cluster, cloudsupportv1.GKE)
+ }
+ return false
+}
diff --git a/core/pkg/policyhandler/handlenotification_test.go b/core/pkg/policyhandler/handlenotification_test.go
new file mode 100644
index 00000000..265e62f9
--- /dev/null
+++ b/core/pkg/policyhandler/handlenotification_test.go
@@ -0,0 +1,201 @@
+package policyhandler
+
+import (
+ _ "embed"
+ "encoding/json"
+ "testing"
+
+ "github.com/kubescape/kubescape/v2/core/cautils"
+ "github.com/kubescape/opa-utils/reporthandling/apis"
+ helpersv1 "github.com/kubescape/opa-utils/reporthandling/helpers/v1"
+ reporthandlingv2 "github.com/kubescape/opa-utils/reporthandling/v2"
+ "k8s.io/apimachinery/pkg/version"
+ clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
+)
+
+var (
+ //go:embed kubeconfig_mock.json
+ kubeConfigMock string
+)
+
+func getKubeConfigMock() *clientcmdapi.Config {
+ kubeConfig := clientcmdapi.Config{}
+ if err := json.Unmarshal([]byte(kubeConfigMock), &kubeConfig); err != nil {
+ panic(err)
+ }
+ return &kubeConfig
+}
+func Test_getCloudMetadata(t *testing.T) {
+ type args struct {
+ context string
+ opaSessionObj *cautils.OPASessionObj
+ kubeConfig *clientcmdapi.Config
+ }
+ kubeConfig := getKubeConfigMock()
+ tests := []struct {
+ want apis.ICloudParser
+ args args
+ name string
+ }{
+ {
+ name: "Test_getCloudMetadata - GitVersion: GKE",
+ args: args{
+ opaSessionObj: &cautils.OPASessionObj{
+ Report: &reporthandlingv2.PostureReport{
+ ClusterAPIServerInfo: &version.Info{
+ GitVersion: "v1.25.4-gke.1600",
+ },
+ },
+ },
+ context: "",
+ kubeConfig: kubeConfig,
+ },
+ want: helpersv1.NewGKEMetadata(""),
+ },
+ {
+ name: "Test_getCloudMetadata_context_GKE",
+ args: args{
+ opaSessionObj: &cautils.OPASessionObj{
+ Report: &reporthandlingv2.PostureReport{
+ ClusterAPIServerInfo: nil,
+ },
+ },
+ kubeConfig: kubeConfig,
+ context: "gke_xxx-xx-0000_us-central1-c_xxxx-1",
+ },
+ want: helpersv1.NewGKEMetadata(""),
+ },
+ {
+ name: "Test_getCloudMetadata_context_EKS",
+ args: args{
+ opaSessionObj: &cautils.OPASessionObj{
+ Report: &reporthandlingv2.PostureReport{
+ ClusterAPIServerInfo: nil,
+ },
+ },
+ kubeConfig: kubeConfig,
+ context: "arn:aws:eks:eu-west-1:xxx:cluster/xxxx",
+ },
+ want: helpersv1.NewEKSMetadata(""),
+ },
+ {
+ name: "Test_getCloudMetadata_context_AKS",
+ args: args{
+ opaSessionObj: &cautils.OPASessionObj{
+ Report: &reporthandlingv2.PostureReport{
+ ClusterAPIServerInfo: nil,
+ },
+ },
+ kubeConfig: kubeConfig,
+ context: "xxxx-2",
+ },
+ want: helpersv1.NewAKSMetadata(""),
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ tt.args.kubeConfig.CurrentContext = tt.args.context
+ got := getCloudMetadata(tt.args.opaSessionObj, tt.args.kubeConfig)
+ if got == nil {
+ t.Errorf("getCloudMetadata() = %v, want %v", got, tt.want.Provider())
+ return
+ }
+ if got.Provider() != tt.want.Provider() {
+ t.Errorf("getCloudMetadata() = %v, want %v", got, tt.want)
+ }
+ })
+ }
+}
+
+func Test_isGKE(t *testing.T) {
+ type args struct {
+ config *clientcmdapi.Config
+ context string
+ }
+ tests := []struct {
+ name string
+ args args
+ want bool
+ }{
+ {
+ name: "Test_isGKE",
+ args: args{
+ config: getKubeConfigMock(),
+ context: "gke_xxx-xx-0000_us-central1-c_xxxx-1",
+ },
+ want: true,
+ },
+ }
+ for _, tt := range tests {
+
+ t.Run(tt.name, func(t *testing.T) {
+ // set context
+ tt.args.config.CurrentContext = tt.args.context
+ if got := isGKE(tt.args.config); got != tt.want {
+ t.Errorf("isGKE() = %v, want %v", got, tt.want)
+ }
+ })
+ }
+}
+
+func Test_isEKS(t *testing.T) {
+ type args struct {
+ config *clientcmdapi.Config
+ context string
+ }
+ tests := []struct {
+ name string
+ args args
+ want bool
+ }{
+ {
+ name: "Test_isEKS",
+ args: args{
+ config: getKubeConfigMock(),
+ context: "arn:aws:eks:eu-west-1:xxx:cluster/xxxx",
+ },
+ want: true,
+ },
+ }
+ for _, tt := range tests {
+
+ t.Run(tt.name, func(t *testing.T) {
+ // set context
+ tt.args.config.CurrentContext = tt.args.context
+ if got := isEKS(tt.args.config); got != tt.want {
+ t.Errorf("isEKS() = %v, want %v", got, tt.want)
+ }
+ })
+ }
+}
+
+func Test_isAKS(t *testing.T) {
+ type args struct {
+ config *clientcmdapi.Config
+ context string
+ }
+ tests := []struct {
+ name string
+ args args
+ want bool
+ }{
+ {
+ name: "Test_isAKS",
+ args: args{
+ config: getKubeConfigMock(),
+ context: "xxxx-2",
+ },
+ want: true,
+ },
+ }
+ for _, tt := range tests {
+
+ t.Run(tt.name, func(t *testing.T) {
+ // set context
+ tt.args.config.CurrentContext = tt.args.context
+ if got := isAKS(tt.args.config); got != tt.want {
+ t.Errorf("isAKS() = %v, want %v", got, tt.want)
+ }
+ })
+ }
+}
diff --git a/core/pkg/policyhandler/handlepullpolicies.go b/core/pkg/policyhandler/handlepullpolicies.go
index 9915e05f..2d12af99 100644
--- a/core/pkg/policyhandler/handlepullpolicies.go
+++ b/core/pkg/policyhandler/handlepullpolicies.go
@@ -56,14 +56,16 @@ func (policyHandler *PolicyHandler) getScanPolicies(policyIdentifier []cautils.P
switch getScanKind(policyIdentifier) {
case apisv1.KindFramework: // Download frameworks
for _, rule := range policyIdentifier {
- receivedFramework, err := policyHandler.getters.PolicyGetter.GetFramework(rule.Name)
+ receivedFramework, err := policyHandler.getters.PolicyGetter.GetFramework(rule.Identifier)
if err != nil {
return frameworks, policyDownloadError(err)
}
+ if err := validateFramework(receivedFramework); err != nil {
+ return frameworks, err
+ }
if receivedFramework != nil {
frameworks = append(frameworks, *receivedFramework)
-
- cache := getter.GetDefaultPath(rule.Name + ".json")
+ cache := getter.GetDefaultPath(rule.Identifier + ".json")
if err := getter.SaveInFile(receivedFramework, cache); err != nil {
logger.L().Warning("failed to cache file", helpers.String("file", cache), helpers.Error(err))
}
@@ -73,15 +75,15 @@ func (policyHandler *PolicyHandler) getScanPolicies(policyIdentifier []cautils.P
f := reporthandling.Framework{}
var receivedControl *reporthandling.Control
var err error
- for _, rule := range policyIdentifier {
- receivedControl, err = policyHandler.getters.PolicyGetter.GetControl(rule.Name)
+ for _, policy := range policyIdentifier {
+ receivedControl, err = policyHandler.getters.PolicyGetter.GetControl(policy.Identifier)
if err != nil {
return frameworks, policyDownloadError(err)
}
if receivedControl != nil {
f.Controls = append(f.Controls, *receivedControl)
- cache := getter.GetDefaultPath(rule.Name + ".json")
+ cache := getter.GetDefaultPath(policy.Identifier + ".json")
if err := getter.SaveInFile(receivedControl, cache); err != nil {
logger.L().Warning("failed to cache file", helpers.String("file", cache), helpers.Error(err))
}
@@ -98,7 +100,7 @@ func (policyHandler *PolicyHandler) getScanPolicies(policyIdentifier []cautils.P
func policyIdentifierToSlice(rules []cautils.PolicyIdentifier) []string {
s := []string{}
for i := range rules {
- s = append(s, fmt.Sprintf("%s: %s", rules[i].Kind, rules[i].Name))
+ s = append(s, fmt.Sprintf("%s: %s", rules[i].Kind, rules[i].Identifier))
}
return s
}
diff --git a/core/pkg/policyhandler/handlepullpolicies_test.go b/core/pkg/policyhandler/handlepullpolicies_test.go
deleted file mode 100644
index d1118182..00000000
--- a/core/pkg/policyhandler/handlepullpolicies_test.go
+++ /dev/null
@@ -1,23 +0,0 @@
-package policyhandler
-
-// func TestGetPoliciesFromBackend(t *testing.T) {
-// notification := reporthandling.PolicyNotification{
-// Rules: []reporthandling.PolicyIdentifier{
-// {
-// Kind: reporthandling.KindFramework,
-// Name: "mitretest",
-// },
-// },
-// }
-// // os.Setenv(cacli., "")
-// ph := PolicyHandler{
-// cacli: &cacli.Cacli{},
-// }
-// f, err := ph.GetPoliciesFromBackend(¬ification)
-// if err != nil {
-// t.Error(err)
-// }
-// if len(f) == 0 {
-// t.Errorf("empty")
-// }
-// }
diff --git a/core/pkg/policyhandler/handlepullpoliciesutils.go b/core/pkg/policyhandler/handlepullpoliciesutils.go
index 4e109e8a..551f19d5 100644
--- a/core/pkg/policyhandler/handlepullpoliciesutils.go
+++ b/core/pkg/policyhandler/handlepullpoliciesutils.go
@@ -5,6 +5,7 @@ import (
"strings"
apisv1 "github.com/kubescape/opa-utils/httpserver/apis/v1"
+ "github.com/kubescape/opa-utils/reporthandling"
"github.com/kubescape/kubescape/v2/core/cautils"
)
@@ -21,3 +22,16 @@ func policyDownloadError(err error) error {
}
return err
}
+
+// validate the framework
+func validateFramework(framework *reporthandling.Framework) error {
+ if framework == nil {
+ return fmt.Errorf("received empty framework")
+ }
+
+ // validate the controls are not empty
+ if len(framework.Controls) == 0 {
+ return fmt.Errorf("failed to load controls for framework: %s: empty list of controls", framework.Name)
+ }
+ return nil
+}
diff --git a/core/pkg/policyhandler/handlepullpoliciesutils_test.go b/core/pkg/policyhandler/handlepullpoliciesutils_test.go
new file mode 100644
index 00000000..453ee5ac
--- /dev/null
+++ b/core/pkg/policyhandler/handlepullpoliciesutils_test.go
@@ -0,0 +1,48 @@
+package policyhandler
+
+import (
+ "testing"
+
+ "github.com/kubescape/opa-utils/reporthandling"
+)
+
+func Test_validateFramework(t *testing.T) {
+ type args struct {
+ framework *reporthandling.Framework
+ }
+ tests := []struct {
+ name string
+ args args
+ wantErr bool
+ }{
+ {
+ name: "empty framework",
+ args: args{
+ framework: &reporthandling.Framework{
+ Controls: []reporthandling.Control{},
+ },
+ },
+ wantErr: true,
+ },
+ {
+ name: "none empty framework",
+ args: args{
+ framework: &reporthandling.Framework{
+ Controls: []reporthandling.Control{
+ {
+ ControlID: "c-0001",
+ },
+ },
+ },
+ },
+ wantErr: false,
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ if err := validateFramework(tt.args.framework); (err != nil) != tt.wantErr {
+ t.Errorf("validateControls() error = %v, wantErr %v", err, tt.wantErr)
+ }
+ })
+ }
+}
diff --git a/core/pkg/policyhandler/kubeconfig_mock.json b/core/pkg/policyhandler/kubeconfig_mock.json
new file mode 100644
index 00000000..4c62f82d
--- /dev/null
+++ b/core/pkg/policyhandler/kubeconfig_mock.json
@@ -0,0 +1,82 @@
+{
+ "preferences": {},
+ "clusters": {
+ "xxxx-2": {
+ "server": "https://XXX.XX.XXX.azmk8s.io:443"
+ },
+ "arn:aws:eks:eu-west-1:xxx:cluster/xxxx": {
+ "server": "https://XX.XX.eu-west-1.eks.amazonaws.com"
+ },
+ "xxxx-0": {
+ "server": "https://kubernetes.docker.XXX:6443"
+ },
+ "xxxx-1": {
+ "server": "https://127.0.0.1:49498"
+ },
+ "gke_xxx-xx-0000_us-central1-c_xxxx-1": {
+ "server": "https://0.0.0.0"
+ },
+ "microk8s-cluster": {
+ "server": "https://0.0.0.0:16443"
+ }
+ },
+ "users": {
+ "arn:aws:eks:eu-west-1:xxx:cluster/xxxx": {
+ "exec": {
+ "command": "aws",
+ "args": [
+ "--region",
+ "eu-west-1",
+ "eks",
+ "get-token",
+ "--cluster-name",
+ "xxx"
+ ],
+ "env": null,
+ "apiVersion": "client.authentication.k8s.io/v1beta1",
+ "provideClusterInfo": false,
+ "Config": null,
+ "InteractiveMode": "IfAvailable",
+ "StdinUnavailable": false,
+ "StdinUnavailableMessage": ""
+ }
+ },
+ "gke_elated-pottery-xxx_us-central1-c_xxxx-1": {
+ "auth-provider": {
+ "name": "gcp",
+ "config": {
+ "cmd-args": "config config-helper --format=json",
+ "cmd-path": "/opt/homebrew/Caskroom/google-cloud-sdk/latest/google-cloud-sdk/bin/gcloud"
+ }
+ }
+ }
+ },
+ "contexts": {
+ "xxxx-2": {
+ "cluster": "xxxx-2",
+ "user": "clusterUser_MyResourceGroup_xxxx-2"
+ },
+ "arn:aws:eks:eu-west-1:xxx:cluster/xxxx": {
+ "cluster": "arn:aws:eks:eu-west-1:xxx:cluster/xxxx",
+ "user": "arn:aws:eks:eu-west-1:xxx:cluster/xxxx"
+ },
+ "docker-desktop": {
+ "cluster": "docker-desktop",
+ "user": "docker-desktop"
+ },
+ "xxxx-0": {
+ "cluster": "xxxx-0",
+ "user": "xxxx-0",
+ "namespace": "default"
+ },
+ "gke_xxx-xx-0000_us-central1-c_xxxx-1": {
+ "cluster": "gke_xxx-xx-0000_us-central1-c_xxxx-1",
+ "user": "gke_xxx-xx-0000_us-central1-c_xxxx-1"
+ },
+ "microk8s": {
+ "cluster": "microk8s-cluster",
+ "user": "admin"
+ }
+ },
+ "current-context": "xxxx-0"
+}
\ No newline at end of file
diff --git a/core/pkg/registryadaptors/armosec/v1/civksadaptor.go b/core/pkg/registryadaptors/armosec/v1/civksadaptor.go
index 8fe4b48b..a72a1a67 100644
--- a/core/pkg/registryadaptors/armosec/v1/civksadaptor.go
+++ b/core/pkg/registryadaptors/armosec/v1/civksadaptor.go
@@ -25,14 +25,17 @@ func (ksCivAdaptor *KSCivAdaptor) Login() error {
}
func (ksCivAdaptor *KSCivAdaptor) GetImagesVulnerabilities(imageIDs []registryvulnerabilities.ContainerImageIdentifier) ([]registryvulnerabilities.ContainerImageVulnerabilityReport, error) {
resultList := make([]registryvulnerabilities.ContainerImageVulnerabilityReport, 0)
- for _, imageID := range imageIDs {
+ for _, toPin := range imageIDs {
+ imageID := toPin
result, err := ksCivAdaptor.GetImageVulnerability(&imageID)
- if err == nil {
- resultList = append(resultList, *result)
- } else {
+ if err != nil {
logger.L().Debug("failed to get image vulnerabilities", helpers.String("image", imageID.Tag), helpers.Error(err))
+ continue
}
+
+ resultList = append(resultList, *result)
}
+
return resultList, nil
}
diff --git a/core/pkg/registryadaptors/gcp/v1/gcpadaptor.go b/core/pkg/registryadaptors/gcp/v1/gcpadaptor.go
index 253a58b8..cd8a99db 100644
--- a/core/pkg/registryadaptors/gcp/v1/gcpadaptor.go
+++ b/core/pkg/registryadaptors/gcp/v1/gcpadaptor.go
@@ -30,14 +30,17 @@ func (GCPAdaptor *GCPAdaptor) Login() error {
func (GCPAdaptor *GCPAdaptor) GetImagesVulnerabilities(imageIDs []registryvulnerabilities.ContainerImageIdentifier) ([]registryvulnerabilities.ContainerImageVulnerabilityReport, error) {
resultList := make([]registryvulnerabilities.ContainerImageVulnerabilityReport, 0)
- for _, imageID := range imageIDs {
+ for _, toPin := range imageIDs {
+ imageID := toPin
result, err := GCPAdaptor.GetImageVulnerability(&imageID)
- if err == nil {
- resultList = append(resultList, *result)
- } else {
+ if err != nil {
logger.L().Debug("failed to get image vulnerabilities", helpers.String("image", imageID.Tag), helpers.Error(err))
+ continue
}
+
+ resultList = append(resultList, *result)
}
+
return resultList, nil
}
diff --git a/core/pkg/registryadaptors/gcp/v1/gcpadaptormock.go b/core/pkg/registryadaptors/gcp/v1/gcpadaptormock.go
index a99496b0..f0c496d4 100644
--- a/core/pkg/registryadaptors/gcp/v1/gcpadaptormock.go
+++ b/core/pkg/registryadaptors/gcp/v1/gcpadaptormock.go
@@ -20,15 +20,16 @@ func (GCPAdaptorMock *GCPAdaptorMock) Login() error {
func (GCPAdaptorMock *GCPAdaptorMock) GetImagesVulnerabilities(imageIDs []registryvulnerabilities.ContainerImageIdentifier) ([]registryvulnerabilities.ContainerImageVulnerabilityReport, error) {
resultList := make([]registryvulnerabilities.ContainerImageVulnerabilityReport, 0)
- for _, imageID := range imageIDs {
+ for _, toPin := range imageIDs {
+ imageID := toPin
result, err := GCPAdaptorMock.GetImageVulnerability(&imageID)
- if err == nil {
- resultList = append(resultList, *result)
- } else {
+ if err != nil {
return nil, err
}
- return resultList, nil
+ resultList = append(resultList, *result)
+
+ return resultList, nil //nolint:staticcheck // we return at once and shorten the mocked result
}
GCPAdaptorMock.resultList = resultList
@@ -40,7 +41,7 @@ func (GCPAdaptorMock *GCPAdaptorMock) GetImageVulnerability(imageID *registryvul
occurrence := []*grafeaspb.Occurrence{}
arr := GetMockData()
- for i, _ := range arr {
+ for i := range arr {
if imageID.Tag == "gcr.io/myproject/nginx@sha256:2XXXXX" && i == 4 {
break
}
diff --git a/core/pkg/registryadaptors/registryvulnerabilities/datastructures.go b/core/pkg/registryadaptors/registryvulnerabilities/datastructures.go
index 10f5e2e7..081ff8dc 100644
--- a/core/pkg/registryadaptors/registryvulnerabilities/datastructures.go
+++ b/core/pkg/registryadaptors/registryvulnerabilities/datastructures.go
@@ -43,7 +43,7 @@ type Vulnerability struct {
Categories Categories `json:"categories"`
NoteName string `json:",omitempty"`
CreateTime time.Time `json:",omitempty"`
- UpdateTime time.Time `json:",omitempty"` // Vulnerablity started
+ UpdateTime time.Time `json:",omitempty"` // Vulnerablity started
CVSS float32 `json:",omitempty"` // other cvss versions are available
AffectedCPEURI string `json:",omitempty"` // Package issue
AffectedPackage string `json:",omitempty"`
diff --git a/core/pkg/resourcehandler/fieldselector.go b/core/pkg/resourcehandler/fieldselector.go
index cd2cda90..f4054c67 100644
--- a/core/pkg/resourcehandler/fieldselector.go
+++ b/core/pkg/resourcehandler/fieldselector.go
@@ -34,10 +34,7 @@ func NewExcludeSelector(ns string) *ExcludeSelector {
func (es *ExcludeSelector) GetClusterScope(resource *schema.GroupVersionResource) bool {
// for selector, 'namespace' is in Namespaced scope
- if resource.Resource == "namespaces" {
- return true
- }
- return false
+ return resource.Resource == "namespaces"
}
type IncludeSelector struct {
@@ -50,10 +47,7 @@ func NewIncludeSelector(ns string) *IncludeSelector {
func (is *IncludeSelector) GetClusterScope(resource *schema.GroupVersionResource) bool {
// for selector, 'namespace' is in Namespaced scope
- if resource.Resource == "namespaces" {
- return true
- }
- return false
+ return resource.Resource == "namespaces"
}
func (es *ExcludeSelector) GetNamespacesSelectors(resource *schema.GroupVersionResource) []string {
diff --git a/core/pkg/resourcehandler/filesloader.go b/core/pkg/resourcehandler/filesloader.go
index 411b86ff..e59765ff 100644
--- a/core/pkg/resourcehandler/filesloader.go
+++ b/core/pkg/resourcehandler/filesloader.go
@@ -112,6 +112,7 @@ func getResourcesFromPath(path string) (map[string]reporthandling.Source, []work
sourceToWorkloads := cautils.LoadResourcesFromFiles(path, repoRoot)
// update workloads and workloadIDToSource
+ var warnIssued bool
for source, ws := range sourceToWorkloads {
workloads = append(workloads, ws...)
@@ -131,7 +132,12 @@ func getResourcesFromPath(path string) (map[string]reporthandling.Source, []work
var lastCommit reporthandling.LastCommit
if gitRepo != nil {
- commitInfo, _ := gitRepo.GetFileLastCommit(source)
+ commitInfo, err := gitRepo.GetFileLastCommit(source)
+ if err != nil && !warnIssued {
+ logger.L().Warning("git scan skipped", helpers.Error(err))
+ warnIssued = true // croak only once
+ }
+
if commitInfo != nil {
lastCommit = reporthandling.LastCommit{
Hash: commitInfo.SHA,
diff --git a/core/pkg/resourcehandler/filesloaderutils.go b/core/pkg/resourcehandler/filesloaderutils.go
index 016c714e..e5b724ca 100644
--- a/core/pkg/resourcehandler/filesloaderutils.go
+++ b/core/pkg/resourcehandler/filesloaderutils.go
@@ -10,7 +10,6 @@ import (
"github.com/kubescape/k8s-interface/k8sinterface"
"github.com/kubescape/k8s-interface/workloadinterface"
"github.com/kubescape/kubescape/v2/core/cautils"
- "github.com/kubescape/opa-utils/reporthandling"
)
// Clone git repository
@@ -63,6 +62,7 @@ func mapResources(workloads []workloadinterface.IMetadata) map[string][]workload
}
+/* unused for now
func addCommitData(input string, workloadIDToSource map[string]reporthandling.Source) {
giRepo, err := cautils.NewLocalGitRepository(input)
if err != nil || giRepo == nil {
@@ -84,3 +84,4 @@ func addCommitData(input string, workloadIDToSource map[string]reporthandling.So
workloadIDToSource[k] = sourceObj
}
}
+*/
diff --git a/core/pkg/resourcehandler/k8sresources.go b/core/pkg/resourcehandler/k8sresources.go
index e1c42eab..f1de0d20 100644
--- a/core/pkg/resourcehandler/k8sresources.go
+++ b/core/pkg/resourcehandler/k8sresources.go
@@ -88,7 +88,6 @@ func (k8sHandler *K8sResourceHandler) GetResources(sessionObj *cautils.OPASessio
logger.L().Info("Requesting images vulnerabilities results")
cautils.StartSpinner()
if err := k8sHandler.registryAdaptors.collectImagesVulnerabilities(k8sResourcesMap, allResources, ksResourceMap); err != nil {
- logger.L().Warning("failed to collect image vulnerabilities", helpers.Error(err), helpers.String("Read more here", "https://hub.armosec.io/docs/configuration-of-image-vulnerabilities"))
cautils.SetInfoMapForResources(fmt.Sprintf("failed to pull image scanning data: %s. for more information: https://hub.armosec.io/docs/configuration-of-image-vulnerabilities", err.Error()), imgVulnResources, sessionObj.InfoMap)
} else {
if isEmptyImgVulns(*ksResourceMap) {
@@ -248,7 +247,7 @@ func (k8sHandler *K8sResourceHandler) pullSingleResource(resource *schema.GroupV
clientResource = k8sHandler.k8s.DynamicClient.Resource(*resource)
} else if k8sinterface.IsNamespaceScope(resource) {
clientResource = k8sHandler.k8s.DynamicClient.Resource(*resource).Namespace(namespace)
- } else if k8sHandler.fieldSelector.GetClusterScope(*&resource) {
+ } else if k8sHandler.fieldSelector.GetClusterScope(resource) {
clientResource = k8sHandler.k8s.DynamicClient.Resource(*resource)
} else {
continue
diff --git a/core/pkg/resourcehandler/remotegitutils.go b/core/pkg/resourcehandler/remotegitutils.go
index 06fc9ed8..fb67f2a0 100644
--- a/core/pkg/resourcehandler/remotegitutils.go
+++ b/core/pkg/resourcehandler/remotegitutils.go
@@ -8,6 +8,7 @@ import (
"github.com/go-git/go-git/v5"
"github.com/go-git/go-git/v5/plumbing"
+ "github.com/go-git/go-git/v5/plumbing/protocol/packp/capability"
"github.com/go-git/go-git/v5/plumbing/transport"
"github.com/go-git/go-git/v5/plumbing/transport/http"
giturl "github.com/kubescape/go-git-url"
@@ -15,19 +16,18 @@ import (
// To Check if the given repository is Public(No Authentication needed), send a HTTP GET request to the URL
// If response code is 200, the repository is Public.
-func isGitRepoPublic(URL string) bool {
- resp, err := nethttp.Get(URL)
-
+func isGitRepoPublic(u string) bool {
+ resp, err := nethttp.Get(u) //nolint:gosec
if err != nil {
return false
}
+ defer func() {
+ _ = resp.Body.Close()
+ }()
+
// if the status code is 200, our get request is successful.
// It only happens when the repository is public.
- if resp.StatusCode == 200 {
- return true
- }
-
- return false
+ return resp.StatusCode == nethttp.StatusOK
}
// Check if the GITHUB_TOKEN is present
@@ -38,6 +38,19 @@ func isGitTokenPresent(gitURL giturl.IGitAPI) bool {
return true
}
+// Get the error message according to the provider
+func getProviderError(gitURL giturl.IGitAPI) error {
+ switch gitURL.GetProvider() {
+ case "github":
+ return fmt.Errorf("%w", errors.New("GITHUB_TOKEN is not present"))
+ case "gitlab":
+ return fmt.Errorf("%w", errors.New("GITLAB_TOKEN is not present"))
+ case "azure":
+ return fmt.Errorf("%w", errors.New("AZURE_TOKEN is not present"))
+ }
+ return fmt.Errorf("%w", errors.New("unable to find the host name"))
+}
+
// cloneRepo clones a repository to a local temporary directory and returns the directory
func cloneRepo(gitURL giturl.IGitAPI) (string, error) {
@@ -60,9 +73,9 @@ func cloneRepo(gitURL giturl.IGitAPI) (string, error) {
auth = nil
} else {
- // Return Error if the GITHUB_TOKEN is not present
+ // Return Error if the AUTH_TOKEN is not present
if isGitTokenPresent := isGitTokenPresent(gitURL); !isGitTokenPresent {
- return "", fmt.Errorf("%w", errors.New("GITHUB_TOKEN is not present"))
+ return "", getProviderError(gitURL)
}
auth = &http.BasicAuth{
Username: "anything Except Empty String",
@@ -70,6 +83,11 @@ func cloneRepo(gitURL giturl.IGitAPI) (string, error) {
}
}
+ // For Azure repo cloning
+ transport.UnsupportedCapabilities = []capability.Capability{
+ capability.ThinPack,
+ }
+
// Clone option
cloneOpts := git.CloneOptions{URL: cloneURL, Auth: auth}
if gitURL.GetBranchName() != "" {
diff --git a/core/pkg/resourcehandler/repositoryscanner.go b/core/pkg/resourcehandler/repositoryscanner.go
index 4e92cb28..3477cbc1 100644
--- a/core/pkg/resourcehandler/repositoryscanner.go
+++ b/core/pkg/resourcehandler/repositoryscanner.go
@@ -204,13 +204,13 @@ func (g *GitHubRepository) setTree() error {
}
// press all tree to json
- var tree tree
- err = json.Unmarshal([]byte(body), &tree)
+ var thisTree tree
+ err = json.Unmarshal([]byte(body), &thisTree)
if err != nil {
return fmt.Errorf("failed to unmarshal response body from '%s', reason: %s", g.treeAPI(), err.Error())
// return nil
}
- g.tree = tree
+ g.tree = thisTree
return nil
}
diff --git a/core/pkg/resourcehandler/repositoryscanner_test.go b/core/pkg/resourcehandler/repositoryscanner_test.go
index f9ce633b..7a8e8313 100644
--- a/core/pkg/resourcehandler/repositoryscanner_test.go
+++ b/core/pkg/resourcehandler/repositoryscanner_test.go
@@ -10,7 +10,7 @@ var (
urlA = "https://github.com/kubescape/kubescape"
urlB = "https://github.com/kubescape/kubescape/blob/master/examples/online-boutique/adservice.yaml"
urlC = "https://github.com/kubescape/kubescape/tree/master/examples/online-boutique"
- urlD = "https://raw.githubusercontent.com/kubescape/kubescape/master/examples/online-boutique/adservice.yaml"
+ // urlD = "https://raw.githubusercontent.com/kubescape/kubescape/master/examples/online-boutique/adservice.yaml"
)
/*
diff --git a/core/pkg/resourcehandler/urlloader.go b/core/pkg/resourcehandler/urlloader.go
index f7d1826e..3d3c465e 100644
--- a/core/pkg/resourcehandler/urlloader.go
+++ b/core/pkg/resourcehandler/urlloader.go
@@ -1,13 +1,6 @@
package resourcehandler
-import (
- giturl "github.com/kubescape/go-git-url"
- logger "github.com/kubescape/go-logger"
- "github.com/kubescape/go-logger/helpers"
- "github.com/kubescape/k8s-interface/workloadinterface"
- "github.com/kubescape/kubescape/v2/core/cautils"
-)
-
+/* unused for now
func loadResourcesFromUrl(inputPatterns []string) (map[string][]workloadinterface.IMetadata, error) {
if len(inputPatterns) == 0 {
return nil, nil
@@ -46,3 +39,4 @@ func loadResourcesFromUrl(inputPatterns []string) (map[string][]workloadinterfac
return workloads, nil
}
+*/
diff --git a/core/pkg/resourcesprioritization/prioritizationhandler.go b/core/pkg/resourcesprioritization/prioritizationhandler.go
index 8ab39b6e..9d6987ec 100644
--- a/core/pkg/resourcesprioritization/prioritizationhandler.go
+++ b/core/pkg/resourcesprioritization/prioritizationhandler.go
@@ -1,6 +1,7 @@
package resourcesprioritization
import (
+ "encoding/json"
"fmt"
logger "github.com/kubescape/go-logger"
@@ -13,12 +14,16 @@ import (
)
type ResourcesPrioritizationHandler struct {
- attackTracks []v1alpha1.IAttackTrack
+ resourceToAttackTracks map[string]v1alpha1.IAttackTrack
+ attackTracks []v1alpha1.IAttackTrack
+ buildResourcesMap bool
}
-func NewResourcesPrioritizationHandler(attackTracksGetter getter.IAttackTracksGetter) (*ResourcesPrioritizationHandler, error) {
+func NewResourcesPrioritizationHandler(attackTracksGetter getter.IAttackTracksGetter, buildResourcesMap bool) (*ResourcesPrioritizationHandler, error) {
handler := &ResourcesPrioritizationHandler{
- attackTracks: make([]v1alpha1.IAttackTrack, 0),
+ attackTracks: make([]v1alpha1.IAttackTrack, 0),
+ resourceToAttackTracks: make(map[string]v1alpha1.IAttackTrack),
+ buildResourcesMap: buildResourcesMap,
}
tracks, err := attackTracksGetter.GetAttackTracks()
@@ -64,7 +69,6 @@ func (handler *ResourcesPrioritizationHandler) PrioritizeResources(sessionObj *c
resourcePriorityVector := []prioritization.ControlsVector{}
resource, exist := sessionObj.AllResources[resourceId]
if !exist {
- logger.L().Error("resource not found in resources map", helpers.String("resource ID", resourceId))
continue
}
@@ -86,6 +90,12 @@ func (handler *ResourcesPrioritizationHandler) PrioritizeResources(sessionObj *c
// Load the failed controls into the attack track
allPathsHandler := v1alpha1.NewAttackTrackAllPathsHandler(attackTrack, &controlsLookup)
+ // only build the map if the user requested it
+ if handler.buildResourcesMap {
+ // Store the attack track for returning to the caller
+ handler.resourceToAttackTracks[resourceId] = handler.copyAttackTrack(attackTrack, &controlsLookup)
+ }
+
// Calculate all the paths for the attack track
allAttackPaths := allPathsHandler.CalculateAllPaths()
@@ -128,6 +138,8 @@ func (handler *ResourcesPrioritizationHandler) PrioritizeResources(sessionObj *c
sessionObj.ResourcesPrioritized[resourceId] = prioritizedResource
}
+ sessionObj.ResourceAttackTracks = handler.resourceToAttackTracks
+
return nil
}
@@ -147,3 +159,18 @@ func (handler *ResourcesPrioritizationHandler) isSupportedKind(obj workloadinter
}
return false
}
+
+func (handler *ResourcesPrioritizationHandler) copyAttackTrack(attackTrack v1alpha1.IAttackTrack, lookup v1alpha1.IAttackTrackControlsLookup) v1alpha1.IAttackTrack {
+ copyBytes, _ := json.Marshal(attackTrack)
+ var copyObj v1alpha1.AttackTrack
+ json.Unmarshal(copyBytes, ©Obj)
+
+ iter := copyObj.Iterator()
+ for iter.HasNext() {
+ step := iter.Next()
+ failedControls := lookup.GetAssociatedControls(copyObj.GetName(), step.GetName())
+ step.SetControls(failedControls)
+ }
+
+ return ©Obj
+}
diff --git a/core/pkg/resourcesprioritization/prioritizationhandler_test.go b/core/pkg/resourcesprioritization/prioritizationhandler_test.go
index 3742bf97..63d1cf6a 100644
--- a/core/pkg/resourcesprioritization/prioritizationhandler_test.go
+++ b/core/pkg/resourcesprioritization/prioritizationhandler_test.go
@@ -97,7 +97,7 @@ func ResourceAssociatedControlMock(controlID string, status apis.ScanningStatus)
}
func TestNewResourcesPrioritizationHandler(t *testing.T) {
- handler, err := NewResourcesPrioritizationHandler(&AttackTracksGetterMock{})
+ handler, err := NewResourcesPrioritizationHandler(&AttackTracksGetterMock{}, false)
assert.NoError(t, err)
assert.Len(t, handler.attackTracks, 2)
assert.Equal(t, handler.attackTracks[0].GetName(), "TestAttackTrack")
@@ -182,7 +182,7 @@ func TestResourcesPrioritizationHandler_PrioritizeResources(t *testing.T) {
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
- handler, _ := NewResourcesPrioritizationHandler(&AttackTracksGetterMock{})
+ handler, _ := NewResourcesPrioritizationHandler(&AttackTracksGetterMock{}, false)
sessionObj := OPASessionObjMock(tt.allPoliciesControls, tt.results, tt.controls, tt.resources)
err := handler.PrioritizeResources(sessionObj)
assert.NoError(t, err, "expected to have no errors in PrioritizeResources()")
diff --git a/core/pkg/resultshandling/gotree/gotree.go b/core/pkg/resultshandling/gotree/gotree.go
new file mode 100644
index 00000000..5bfba207
--- /dev/null
+++ b/core/pkg/resultshandling/gotree/gotree.go
@@ -0,0 +1,128 @@
+package gotree
+
+import (
+ "strings"
+)
+
+const (
+ newLine = "\n"
+ emptySpace = " "
+ middleItem = "├── "
+ continueItem = "│ "
+ lastItem = "└── "
+)
+
+type (
+ tree struct {
+ text string
+ items []Tree
+ }
+
+ // Tree is tree interface
+ Tree interface {
+ Add(text string) Tree
+ AddTree(tree Tree)
+ Items() []Tree
+ Text() string
+ Print() string
+ }
+
+ printer struct {
+ }
+
+ // Printer is printer interface
+ Printer interface {
+ Print(Tree) string
+ }
+)
+
+// New returns a new GoTree.Tree
+func New(text string) Tree {
+ return &tree{
+ text: text,
+ items: []Tree{},
+ }
+}
+
+// Add adds a node to the tree
+func (t *tree) Add(text string) Tree {
+ n := New(text)
+ t.items = append(t.items, n)
+ return n
+}
+
+// AddTree adds a tree as an item
+func (t *tree) AddTree(tree Tree) {
+ t.items = append(t.items, tree)
+}
+
+// Text returns the node's value
+func (t *tree) Text() string {
+ return t.text
+}
+
+// Items returns all items in the tree
+func (t *tree) Items() []Tree {
+ return t.items
+}
+
+// Print returns an visual representation of the tree
+func (t *tree) Print() string {
+ return newPrinter().Print(t)
+}
+
+func newPrinter() Printer {
+ return &printer{}
+}
+
+// Print prints a tree to a string
+func (p *printer) Print(t Tree) string {
+ return t.Text() + newLine + p.printItems(t.Items(), []bool{})
+}
+
+func (p *printer) printText(text string, spaces []bool, last bool) string {
+ var result string
+ for _, space := range spaces {
+ if space {
+ result += emptySpace
+ } else {
+ result += continueItem
+ }
+ }
+
+ indicator := middleItem
+ if last {
+ indicator = lastItem
+ }
+
+ var out string
+ lines := strings.Split(text, "\n")
+ for i := range lines {
+ text := lines[i]
+ if i == 0 {
+ out += result + indicator + text + newLine
+ continue
+ }
+ if last {
+ indicator = emptySpace
+ } else {
+ indicator = continueItem
+ }
+ out += result + indicator + text + newLine
+ }
+
+ return out
+}
+
+func (p *printer) printItems(t []Tree, spaces []bool) string {
+ var result string
+ for i, f := range t {
+ last := i == len(t)-1
+ result += p.printText(f.Text(), spaces, last)
+ if len(f.Items()) > 0 {
+ spacesChild := append(spaces, last)
+ result += p.printItems(f.Items(), spacesChild)
+ }
+ }
+ return result
+}
diff --git a/core/pkg/resultshandling/printer/printresults.go b/core/pkg/resultshandling/printer/printresults.go
index 14f13e2e..48475e19 100644
--- a/core/pkg/resultshandling/printer/printresults.go
+++ b/core/pkg/resultshandling/printer/printresults.go
@@ -6,6 +6,7 @@ import (
"path/filepath"
logger "github.com/kubescape/go-logger"
+ "github.com/kubescape/go-logger/helpers"
"github.com/kubescape/kubescape/v2/core/cautils"
)
@@ -43,3 +44,9 @@ func GetWriter(outputFile string) *os.File {
return os.Stdout
}
+
+func LogOutputFile(fileName string) {
+ if fileName != os.Stdout.Name() && fileName != os.Stderr.Name() {
+ logger.L().Success("Scan results saved", helpers.String("filename", fileName))
+ }
+}
diff --git a/core/pkg/resultshandling/printer/v1/jsonprinter.go b/core/pkg/resultshandling/printer/v1/jsonprinter.go
index aa2d58fe..331ae19c 100644
--- a/core/pkg/resultshandling/printer/v1/jsonprinter.go
+++ b/core/pkg/resultshandling/printer/v1/jsonprinter.go
@@ -4,12 +4,19 @@ import (
"encoding/json"
"fmt"
"os"
+ "path/filepath"
+ "strings"
logger "github.com/kubescape/go-logger"
"github.com/kubescape/kubescape/v2/core/cautils"
"github.com/kubescape/kubescape/v2/core/pkg/resultshandling/printer"
)
+const (
+ jsonOutputFile = "report"
+ jsonOutputExt = ".json"
+)
+
type JsonPrinter struct {
writer *os.File
}
@@ -19,6 +26,12 @@ func NewJsonPrinter() *JsonPrinter {
}
func (jsonPrinter *JsonPrinter) SetWriter(outputFile string) {
+ if strings.TrimSpace(outputFile) == "" {
+ outputFile = jsonOutputFile
+ }
+ if filepath.Ext(strings.TrimSpace(outputFile)) != jsonOutputExt {
+ outputFile = outputFile + jsonOutputExt
+ }
jsonPrinter.writer = printer.GetWriter(outputFile)
}
@@ -41,5 +54,12 @@ func (jsonPrinter *JsonPrinter) ActionPrint(opaSessionObj *cautils.OPASessionObj
if err != nil {
logger.L().Fatal("failed to convert posture report object")
}
- jsonPrinter.writer.Write(postureReportStr)
+
+ _, err = jsonPrinter.writer.Write(postureReportStr)
+
+ if err != nil {
+ logger.L().Fatal("failed to Write posture report object into JSON output")
+ } else {
+ printer.LogOutputFile(jsonPrinter.writer.Name())
+ }
}
diff --git a/core/pkg/resultshandling/printer/v1/prometheusprinter.go b/core/pkg/resultshandling/printer/v1/prometheusprinter.go
index e06cf246..e28b3327 100644
--- a/core/pkg/resultshandling/printer/v1/prometheusprinter.go
+++ b/core/pkg/resultshandling/printer/v1/prometheusprinter.go
@@ -22,23 +22,23 @@ func NewPrometheusPrinter(verboseMode bool) *PrometheusPrinter {
}
}
-func (prometheusPrinter *PrometheusPrinter) SetWriter(outputFile string) {
- prometheusPrinter.writer = printer.GetWriter(outputFile)
+func (p *PrometheusPrinter) SetWriter(outputFile string) {
+ p.writer = printer.GetWriter(outputFile)
}
-func (prometheusPrinter *PrometheusPrinter) Score(score float32) {
+func (p *PrometheusPrinter) Score(score float32) {
fmt.Printf("\n# Overall risk-score (0- Excellent, 100- All failed)\nkubescape_score %d\n", cautils.Float32ToInt(score))
}
-func (printer *PrometheusPrinter) printResources(allResources map[string]workloadinterface.IMetadata, resourcesIDs *reporthandling.ResourcesIDs, frameworkName, controlName string) {
- printer.printDetails(allResources, resourcesIDs.GetFailedResources(), frameworkName, controlName, "failed")
- printer.printDetails(allResources, resourcesIDs.GetWarningResources(), frameworkName, controlName, "excluded")
- if printer.verboseMode {
- printer.printDetails(allResources, resourcesIDs.GetPassedResources(), frameworkName, controlName, "passed")
+func (p *PrometheusPrinter) printResources(allResources map[string]workloadinterface.IMetadata, resourcesIDs *reporthandling.ResourcesIDs, frameworkName, controlName string) {
+ p.printDetails(allResources, resourcesIDs.GetFailedResources(), frameworkName, controlName, "failed")
+ p.printDetails(allResources, resourcesIDs.GetWarningResources(), frameworkName, controlName, "excluded")
+ if p.verboseMode {
+ p.printDetails(allResources, resourcesIDs.GetPassedResources(), frameworkName, controlName, "passed")
}
}
-func (printer *PrometheusPrinter) printDetails(allResources map[string]workloadinterface.IMetadata, resourcesIDs []string, frameworkName, controlName, status string) {
+func (p *PrometheusPrinter) printDetails(allResources map[string]workloadinterface.IMetadata, resourcesIDs []string, frameworkName, controlName, status string) {
objs := make(map[string]map[string]map[string]int)
for _, resourceID := range resourcesIDs {
resource := allResources[resourceID]
@@ -56,18 +56,18 @@ func (printer *PrometheusPrinter) printDetails(allResources map[string]workloadi
for gvk, namespaces := range objs {
for namespace, names := range namespaces {
for name, value := range names {
- fmt.Fprintf(printer.writer, "# Failed object from \"%s\" control \"%s\"\n", frameworkName, controlName)
+ fmt.Fprintf(p.writer, "# Failed object from \"%s\" control \"%s\"\n", frameworkName, controlName)
if namespace != "" {
- fmt.Fprintf(printer.writer, "kubescape_object_failed_count{framework=\"%s\",control=\"%s\",namespace=\"%s\",name=\"%s\",groupVersionKind=\"%s\"} %d\n", frameworkName, controlName, namespace, name, gvk, value)
+ fmt.Fprintf(p.writer, "kubescape_object_failed_count{framework=\"%s\",control=\"%s\",namespace=\"%s\",name=\"%s\",groupVersionKind=\"%s\"} %d\n", frameworkName, controlName, namespace, name, gvk, value)
} else {
- fmt.Fprintf(printer.writer, "kubescape_object_failed_count{framework=\"%s\",control=\"%s\",name=\"%s\",groupVersionKind=\"%s\"} %d\n", frameworkName, controlName, name, gvk, value)
+ fmt.Fprintf(p.writer, "kubescape_object_failed_count{framework=\"%s\",control=\"%s\",name=\"%s\",groupVersionKind=\"%s\"} %d\n", frameworkName, controlName, name, gvk, value)
}
}
}
}
}
-func (printer *PrometheusPrinter) printReports(allResources map[string]workloadinterface.IMetadata, frameworks []reporthandling.FrameworkReport) error {
+func (p *PrometheusPrinter) printReports(allResources map[string]workloadinterface.IMetadata, frameworks []reporthandling.FrameworkReport) error {
for _, frameworkReport := range frameworks {
for _, controlReport := range frameworkReport.ControlReports {
if controlReport.GetNumberOfResources() == 0 {
@@ -76,21 +76,24 @@ func (printer *PrometheusPrinter) printReports(allResources map[string]workloadi
if controlReport.Passed() {
continue // control passed, do not print results
}
- fmt.Fprintf(printer.writer, "# Number of resources found as part of %s control %s\nkubescape_resources_found_count{framework=\"%s\",control=\"%s\"} %d\n", frameworkReport.Name, controlReport.Name, frameworkReport.Name, controlReport.Name, controlReport.GetNumberOfResources())
- fmt.Fprintf(printer.writer, "# Number of resources excluded as part of %s control %s\nkubescape_resources_excluded_count{framework=\"%s\",control=\"%s\"} %d\n", frameworkReport.Name, controlReport.Name, frameworkReport.Name, controlReport.Name, controlReport.GetNumberOfWarningResources())
- fmt.Fprintf(printer.writer, "# Number of resources failed as part of %s control %s\nkubescape_resources_failed_count{framework=\"%s\",control=\"%s\"} %d\n", frameworkReport.Name, controlReport.Name, frameworkReport.Name, controlReport.Name, controlReport.GetNumberOfFailedResources())
+ fmt.Fprintf(p.writer, "# Number of resources found as part of %s control %s\nkubescape_resources_found_count{framework=\"%s\",control=\"%s\"} %d\n", frameworkReport.Name, controlReport.Name, frameworkReport.Name, controlReport.Name, controlReport.GetNumberOfResources())
+ fmt.Fprintf(p.writer, "# Number of resources excluded as part of %s control %s\nkubescape_resources_excluded_count{framework=\"%s\",control=\"%s\"} %d\n", frameworkReport.Name, controlReport.Name, frameworkReport.Name, controlReport.Name, controlReport.GetNumberOfWarningResources())
+ fmt.Fprintf(p.writer, "# Number of resources failed as part of %s control %s\nkubescape_resources_failed_count{framework=\"%s\",control=\"%s\"} %d\n", frameworkReport.Name, controlReport.Name, frameworkReport.Name, controlReport.Name, controlReport.GetNumberOfFailedResources())
- printer.printResources(allResources, controlReport.ListResourcesIDs(), frameworkReport.Name, controlReport.Name)
+ p.printResources(allResources, controlReport.ListResourcesIDs(), frameworkReport.Name, controlReport.Name)
}
}
return nil
}
-func (printer *PrometheusPrinter) ActionPrint(opaSessionObj *cautils.OPASessionObj) {
+func (p *PrometheusPrinter) ActionPrint(opaSessionObj *cautils.OPASessionObj) {
report := cautils.ReportV2ToV1(opaSessionObj)
- err := printer.printReports(opaSessionObj.AllResources, report.FrameworkReports)
+ err := p.printReports(opaSessionObj.AllResources, report.FrameworkReports)
if err != nil {
logger.L().Fatal(err.Error())
+ } else {
+ printer.LogOutputFile(p.writer.Name())
}
+
}
diff --git a/core/pkg/resultshandling/printer/v2/attacktracks.go b/core/pkg/resultshandling/printer/v2/attacktracks.go
new file mode 100644
index 00000000..fee6b94e
--- /dev/null
+++ b/core/pkg/resultshandling/printer/v2/attacktracks.go
@@ -0,0 +1,140 @@
+package printer
+
+import (
+ "fmt"
+ "os"
+ "sort"
+ "strconv"
+ "strings"
+
+ "github.com/fatih/color"
+ "github.com/kubescape/kubescape/v2/core/cautils"
+ "github.com/kubescape/kubescape/v2/core/pkg/resultshandling/gotree"
+ "github.com/kubescape/opa-utils/reporthandling/apis"
+ "github.com/kubescape/opa-utils/reporthandling/attacktrack/v1alpha1"
+ "github.com/kubescape/opa-utils/reporthandling/results/v1/prioritization"
+)
+
+const TOP_RESOURCE_COUNT = 15
+const TOP_VECTOR_COUNT = 10
+
+/* unused for now
+func (prettyPrinter *PrettyPrinter) printAttackTreeNode(node v1alpha1.IAttackTrackStep, depth int) {
+ prefix := strings.Repeat("\t", depth)
+ text := prefix + node.GetName() + "\n"
+ if len(node.GetControls()) > 0 {
+ color.Red(text)
+ } else {
+ color.Green(text)
+ }
+
+ for i := 0; i < node.Length(); i++ {
+ prettyPrinter.printAttackTreeNode(node.SubStepAt(i), depth+1)
+ }
+}
+*/
+
+func (prettyPrinter *PrettyPrinter) createFailedControlList(node v1alpha1.IAttackTrackStep) string {
+ var r string
+ for i, control := range node.GetControls() {
+ if i == 0 {
+ r = control.GetControlId()
+ } else {
+ r = fmt.Sprintf("%s, %s", r, control.GetControlId())
+ }
+ }
+ return r
+}
+
+func (prettyPrinter *PrettyPrinter) buildTreeFromAttackTrackStep(tree gotree.Tree, node v1alpha1.IAttackTrackStep) gotree.Tree {
+ nodeName := node.GetName()
+ if len(node.GetControls()) > 0 {
+ red := color.New(color.Bold, color.FgRed).SprintFunc()
+ nodeName = red(nodeName)
+ }
+
+ controlText := prettyPrinter.createFailedControlList(node)
+ if len(controlText) > 0 {
+ controlStyle := color.New(color.FgWhite, color.Faint).SprintFunc()
+ controlText = controlStyle(fmt.Sprintf(" (%s)", controlText))
+ }
+
+ subTree := gotree.New(nodeName + controlText)
+ for i := 0; i < node.Length(); i++ {
+ subTree.AddTree(prettyPrinter.buildTreeFromAttackTrackStep(tree, node.SubStepAt(i)))
+ }
+
+ if tree == nil {
+ return subTree
+ }
+
+ tree.AddTree(subTree)
+ return tree
+}
+
+func (prettyPrinter *PrettyPrinter) printResourceAttackGraph(attackTrack v1alpha1.IAttackTrack) {
+ tree := prettyPrinter.buildTreeFromAttackTrackStep(nil, attackTrack.GetData())
+ fmt.Fprintln(prettyPrinter.writer, tree.Print())
+}
+
+func getNumericValueFromEnvVar(envVar string, defaultValue int) int {
+ value := os.Getenv(envVar)
+ if value != "" {
+ if value, err := strconv.Atoi(value); err == nil {
+ return value
+ }
+ }
+ return defaultValue
+}
+func (prettyPrinter *PrettyPrinter) printAttackTracks(opaSessionObj *cautils.OPASessionObj) {
+ if !prettyPrinter.printAttackTree || opaSessionObj.ResourceAttackTracks == nil {
+ return
+ }
+
+ // check if counters are set in env vars and use them, otherwise use default values
+ topResourceCount := getNumericValueFromEnvVar("ATTACK_TREE_TOP_RESOURCES", TOP_RESOURCE_COUNT)
+ topVectorCount := getNumericValueFromEnvVar("ATTACK_TREE_TOP_VECTORS", TOP_VECTOR_COUNT)
+
+ prioritizedResources := opaSessionObj.ResourcesPrioritized
+ resourceToAttackTrack := opaSessionObj.ResourceAttackTracks
+
+ resources := make([]prioritization.PrioritizedResource, 0, len(prioritizedResources))
+ for _, value := range prioritizedResources {
+ resources = append(resources, value)
+ }
+
+ sort.Slice(resources, func(i, j int) bool {
+ return resources[i].Score > resources[j].Score
+ })
+
+ for i := 0; i < topResourceCount && i < len(resources); i++ {
+ fmt.Fprintf(prettyPrinter.writer, "\n"+getSeparator("^")+"\n")
+ resource := resources[i]
+ resourceObj := opaSessionObj.AllResources[resource.ResourceID]
+
+ fmt.Fprintf(prettyPrinter.writer, "Name: %s\n", resourceObj.GetName())
+ fmt.Fprintf(prettyPrinter.writer, "Kind: %s\n", resourceObj.GetKind())
+ fmt.Fprintf(prettyPrinter.writer, "Namespace: %s\n\n", resourceObj.GetNamespace())
+
+ fmt.Fprintf(prettyPrinter.writer, "Score: %.2f\n", resource.Score)
+ fmt.Fprintf(prettyPrinter.writer, "Severity: %s\n", apis.SeverityNumberToString(resource.Severity))
+ fmt.Fprintf(prettyPrinter.writer, "Total vectors: %v\n\n", len(resources[i].PriorityVector))
+
+ prettyPrinter.printResourceAttackGraph(resourceToAttackTrack[resource.ResourceID])
+
+ sort.Slice(resource.PriorityVector, func(x, y int) bool {
+ return resource.PriorityVector[x].Score > resource.PriorityVector[y].Score
+ })
+
+ for j := 0; j < topVectorCount && j < len(resources[i].PriorityVector); j++ {
+ priorityVector := resource.PriorityVector[j]
+
+ vectorStrings := []string{}
+ for _, controlId := range priorityVector.ListControls() {
+ vectorStrings = append(vectorStrings, fmt.Sprintf("%s (%s)", controlId.Category, controlId.ControlID))
+ }
+
+ fmt.Fprintf(prettyPrinter.writer, "%v) [%.2f] [Severity: %v] [Attack Track: %v]: %v \n", j+1, priorityVector.Score, apis.SeverityNumberToString(priorityVector.Severity), priorityVector.AttackTrackName, strings.Join(vectorStrings, " -> "))
+ }
+ }
+}
diff --git a/core/pkg/resultshandling/printer/v2/controltable.go b/core/pkg/resultshandling/printer/v2/controltable.go
index 9042d2c4..f445a432 100644
--- a/core/pkg/resultshandling/printer/v2/controltable.go
+++ b/core/pkg/resultshandling/printer/v2/controltable.go
@@ -91,6 +91,7 @@ func getSortedControlsIDs(controls reportsummary.ControlSummaries) [][]string {
return controlIDs
}
+/* unused for now
func getSortedControlsNames(controls reportsummary.ControlSummaries) [][]string {
controlNames := make([][]string, 5)
for k := range controls {
@@ -103,6 +104,7 @@ func getSortedControlsNames(controls reportsummary.ControlSummaries) [][]string
}
return controlNames
}
+*/
func getControlTableHeaders() []string {
headers := make([]string, _rowLen)
diff --git a/core/pkg/resultshandling/printer/v2/htmlprinter.go b/core/pkg/resultshandling/printer/v2/htmlprinter.go
index 80d6cd90..5a119e3e 100644
--- a/core/pkg/resultshandling/printer/v2/htmlprinter.go
+++ b/core/pkg/resultshandling/printer/v2/htmlprinter.go
@@ -38,17 +38,17 @@ func NewHtmlPrinter() *HtmlPrinter {
return &HtmlPrinter{}
}
-func (htmlPrinter *HtmlPrinter) SetWriter(outputFile string) {
- if outputFile == "" {
+func (hp *HtmlPrinter) SetWriter(outputFile string) {
+ if strings.TrimSpace(outputFile) == "" {
outputFile = htmlOutputFile
}
if filepath.Ext(strings.TrimSpace(outputFile)) != htmlOutputExt {
outputFile = outputFile + htmlOutputExt
}
- htmlPrinter.writer = printer.GetWriter(outputFile)
+ hp.writer = printer.GetWriter(outputFile)
}
-func (htmlPrinter *HtmlPrinter) ActionPrint(opaSessionObj *cautils.OPASessionObj) {
+func (hp *HtmlPrinter) ActionPrint(opaSessionObj *cautils.OPASessionObj) {
tplFuncMap := template.FuncMap{
"sum": func(nums ...int) int {
total := 0
@@ -104,14 +104,16 @@ func (htmlPrinter *HtmlPrinter) ActionPrint(opaSessionObj *cautils.OPASessionObj
resourceTableView := buildResourceTableView(opaSessionObj)
reportingCtx := HTMLReportingCtx{opaSessionObj, resourceTableView}
- err := tpl.Execute(htmlPrinter.writer, reportingCtx)
+ err := tpl.Execute(hp.writer, reportingCtx)
if err != nil {
logger.L().Error("failed to render template", helpers.Error(err))
+ } else {
+ printer.LogOutputFile(hp.writer.Name())
}
+
}
-func (htmlPrinter *HtmlPrinter) Score(score float32) {
- return
+func (hp *HtmlPrinter) Score(score float32) {
}
func buildResourceTableView(opaSessionObj *cautils.OPASessionObj) ResourceTableView {
diff --git a/core/pkg/resultshandling/printer/v2/jsonprinter.go b/core/pkg/resultshandling/printer/v2/jsonprinter.go
index 57d8fe9f..752e6933 100644
--- a/core/pkg/resultshandling/printer/v2/jsonprinter.go
+++ b/core/pkg/resultshandling/printer/v2/jsonprinter.go
@@ -4,6 +4,8 @@ import (
"encoding/json"
"fmt"
"os"
+ "path/filepath"
+ "strings"
logger "github.com/kubescape/go-logger"
"github.com/kubescape/go-logger/helpers"
@@ -11,6 +13,11 @@ import (
"github.com/kubescape/kubescape/v2/core/pkg/resultshandling/printer"
)
+const (
+ jsonOutputFile = "report"
+ jsonOutputExt = ".json"
+)
+
type JsonPrinter struct {
writer *os.File
}
@@ -19,22 +26,29 @@ func NewJsonPrinter() *JsonPrinter {
return &JsonPrinter{}
}
-func (jsonPrinter *JsonPrinter) SetWriter(outputFile string) {
- jsonPrinter.writer = printer.GetWriter(outputFile)
+func (jp *JsonPrinter) SetWriter(outputFile string) {
+ if strings.TrimSpace(outputFile) == "" {
+ outputFile = jsonOutputFile
+ }
+ if filepath.Ext(strings.TrimSpace(outputFile)) != jsonOutputExt {
+ outputFile = outputFile + jsonOutputExt
+ }
+ jp.writer = printer.GetWriter(outputFile)
}
-func (jsonPrinter *JsonPrinter) Score(score float32) {
+func (jp *JsonPrinter) Score(score float32) {
fmt.Fprintf(os.Stderr, "\nOverall risk-score (0- Excellent, 100- All failed): %d\n", cautils.Float32ToInt(score))
}
-func (jsonPrinter *JsonPrinter) ActionPrint(opaSessionObj *cautils.OPASessionObj) {
+func (jp *JsonPrinter) ActionPrint(opaSessionObj *cautils.OPASessionObj) {
r, err := json.Marshal(FinalizeResults(opaSessionObj))
if err != nil {
logger.L().Fatal("failed to Marshal posture report object")
}
- logOUtputFile(jsonPrinter.writer.Name())
- if _, err := jsonPrinter.writer.Write(r); err != nil {
+ if _, err := jp.writer.Write(r); err != nil {
logger.L().Error("failed to write results", helpers.Error(err))
+ } else {
+ printer.LogOutputFile(jp.writer.Name())
}
}
diff --git a/core/pkg/resultshandling/printer/v2/junit.go b/core/pkg/resultshandling/printer/v2/junit.go
index f36f6b85..354b97ca 100644
--- a/core/pkg/resultshandling/printer/v2/junit.go
+++ b/core/pkg/resultshandling/printer/v2/junit.go
@@ -4,6 +4,7 @@ import (
"encoding/xml"
"fmt"
"os"
+ "path/filepath"
"sort"
"strings"
@@ -16,6 +17,11 @@ import (
"github.com/kubescape/opa-utils/shared"
)
+const (
+ junitOutputFile = "report"
+ junitOutputExt = ".xml"
+)
+
/*
riskScore
status
@@ -92,24 +98,31 @@ func NewJunitPrinter(verbose bool) *JunitPrinter {
}
}
-func (junitPrinter *JunitPrinter) SetWriter(outputFile string) {
- junitPrinter.writer = printer.GetWriter(outputFile)
+func (jp *JunitPrinter) SetWriter(outputFile string) {
+ if strings.TrimSpace(outputFile) == "" {
+ outputFile = junitOutputFile
+ }
+ if filepath.Ext(strings.TrimSpace(outputFile)) != junitOutputExt {
+ outputFile = outputFile + junitOutputExt
+ }
+ jp.writer = printer.GetWriter(outputFile)
}
-func (junitPrinter *JunitPrinter) Score(score float32) {
+func (jp *JunitPrinter) Score(score float32) {
fmt.Fprintf(os.Stderr, "\nOverall risk-score (0- Excellent, 100- All failed): %d\n", cautils.Float32ToInt(score))
}
-func (junitPrinter *JunitPrinter) ActionPrint(opaSessionObj *cautils.OPASessionObj) {
+func (jp *JunitPrinter) ActionPrint(opaSessionObj *cautils.OPASessionObj) {
junitResult := testsSuites(opaSessionObj)
postureReportStr, err := xml.Marshal(junitResult)
if err != nil {
logger.L().Fatal("failed to Marshal xml result object", helpers.Error(err))
}
- logOUtputFile(junitPrinter.writer.Name())
- if _, err := junitPrinter.writer.Write(postureReportStr); err != nil {
+ if _, err := jp.writer.Write(postureReportStr); err != nil {
logger.L().Error("failed to write results", helpers.Error(err))
+ } else {
+ printer.LogOutputFile(jp.writer.Name())
}
}
diff --git a/core/pkg/resultshandling/printer/v2/pdf.go b/core/pkg/resultshandling/printer/v2/pdf.go
index 9955a785..a4bbdf25 100644
--- a/core/pkg/resultshandling/printer/v2/pdf.go
+++ b/core/pkg/resultshandling/printer/v2/pdf.go
@@ -39,22 +39,22 @@ func NewPdfPrinter() *PdfPrinter {
return &PdfPrinter{}
}
-func (pdfPrinter *PdfPrinter) SetWriter(outputFile string) {
+func (pp *PdfPrinter) SetWriter(outputFile string) {
// Ensure to have an available output file, otherwise create it.
- if outputFile == "" {
+ if strings.TrimSpace(outputFile) == "" {
outputFile = pdfOutputFile
}
// Ensure to have the right file extension.
if filepath.Ext(strings.TrimSpace(outputFile)) != pdfOutputExt {
outputFile = outputFile + pdfOutputExt
}
- pdfPrinter.writer = printer.GetWriter(outputFile)
+ pp.writer = printer.GetWriter(outputFile)
}
-func (pdfPrinter *PdfPrinter) Score(score float32) {
+func (pp *PdfPrinter) Score(score float32) {
fmt.Fprintf(os.Stderr, "\nOverall risk-score (0- Excellent, 100- All failed): %d\n", cautils.Float32ToInt(score))
}
-func (pdfPrinter *PdfPrinter) printInfo(m pdf.Maroto, summaryDetails *reportsummary.SummaryDetails, infoMap []infoStars) {
+func (pp *PdfPrinter) printInfo(m pdf.Maroto, summaryDetails *reportsummary.SummaryDetails, infoMap []infoStars) {
emptyRowCounter := 1
for i := range infoMap {
if infoMap[i].info != "" {
@@ -75,16 +75,16 @@ func (pdfPrinter *PdfPrinter) printInfo(m pdf.Maroto, summaryDetails *reportsumm
}
-func (pdfPrinter *PdfPrinter) ActionPrint(opaSessionObj *cautils.OPASessionObj) {
+func (pp *PdfPrinter) ActionPrint(opaSessionObj *cautils.OPASessionObj) {
sortedControlIDs := getSortedControlsIDs(opaSessionObj.Report.SummaryDetails.Controls)
infoToPrintInfo := mapInfoToPrintInfo(opaSessionObj.Report.SummaryDetails.Controls)
m := pdf.NewMaroto(consts.Portrait, consts.A4)
- pdfPrinter.printHeader(m)
- pdfPrinter.printFramework(m, opaSessionObj.Report.SummaryDetails.ListFrameworks())
- pdfPrinter.printTable(m, &opaSessionObj.Report.SummaryDetails, sortedControlIDs)
- pdfPrinter.printFinalResult(m, &opaSessionObj.Report.SummaryDetails)
- pdfPrinter.printInfo(m, &opaSessionObj.Report.SummaryDetails, infoToPrintInfo)
+ pp.printHeader(m)
+ pp.printFramework(m, opaSessionObj.Report.SummaryDetails.ListFrameworks())
+ pp.printTable(m, &opaSessionObj.Report.SummaryDetails, sortedControlIDs)
+ pp.printFinalResult(m, &opaSessionObj.Report.SummaryDetails)
+ pp.printInfo(m, &opaSessionObj.Report.SummaryDetails, infoToPrintInfo)
// Extrat output buffer.
outBuff, err := m.Output()
@@ -93,14 +93,15 @@ func (pdfPrinter *PdfPrinter) ActionPrint(opaSessionObj *cautils.OPASessionObj)
return
}
- logOUtputFile(pdfPrinter.writer.Name())
- if _, err := pdfPrinter.writer.Write(outBuff.Bytes()); err != nil {
+ if _, err := pp.writer.Write(outBuff.Bytes()); err != nil {
logger.L().Error("failed to write results", helpers.Error(err))
+ } else {
+ printer.LogOutputFile(pp.writer.Name())
}
}
-// Print Kubescape logo and report date.
-func (pdfPrinter *PdfPrinter) printHeader(m pdf.Maroto) {
+// printHeader prints the Kubescape logo and report date
+func (pp *PdfPrinter) printHeader(m pdf.Maroto) {
// Retrieve current time (we need it for the report timestamp).
t := time.Now()
// Enconde PNG into Base64 to embed it into the pdf.
@@ -136,8 +137,8 @@ func (pdfPrinter *PdfPrinter) printHeader(m pdf.Maroto) {
m.Line(1)
}
-// Print pdf frameworks after pdf header.
-func (pdfPrinter *PdfPrinter) printFramework(m pdf.Maroto, frameworks []reportsummary.IFrameworkSummary) {
+// printFramework prints the PDF frameworks after the PDF header
+func (pp *PdfPrinter) printFramework(m pdf.Maroto, frameworks []reportsummary.IFrameworkSummary) {
m.Row(10, func() {
m.Text(frameworksScoresToString(frameworks), props.Text{
Align: consts.Center,
@@ -148,8 +149,8 @@ func (pdfPrinter *PdfPrinter) printFramework(m pdf.Maroto, frameworks []reportsu
})
}
-// Create pdf table
-func (pdfPrinter *PdfPrinter) printTable(m pdf.Maroto, summaryDetails *reportsummary.SummaryDetails, sortedControlIDs [][]string) {
+// printTable creates the PDF table
+func (pp *PdfPrinter) printTable(m pdf.Maroto, summaryDetails *reportsummary.SummaryDetails, sortedControlIDs [][]string) {
headers := getControlTableHeaders()
infoToPrintInfoMap := mapInfoToPrintInfo(summaryDetails.Controls)
controls := make([][]string, len(sortedControlIDs))
@@ -186,8 +187,8 @@ func (pdfPrinter *PdfPrinter) printTable(m pdf.Maroto, summaryDetails *reportsum
m.Row(2, func() {})
}
-// Add final results.
-func (pdfPrinter *PdfPrinter) printFinalResult(m pdf.Maroto, summaryDetails *reportsummary.SummaryDetails) {
+// printFinalResult adds the final results
+func (pp *PdfPrinter) printFinalResult(m pdf.Maroto, summaryDetails *reportsummary.SummaryDetails) {
m.Row(_rowLen, func() {
m.Col(3, func() {
m.Text("Resource summary", props.Text{
diff --git a/core/pkg/resultshandling/printer/v2/prettyprinter.go b/core/pkg/resultshandling/printer/v2/prettyprinter.go
index afe12462..9d6ade23 100644
--- a/core/pkg/resultshandling/printer/v2/prettyprinter.go
+++ b/core/pkg/resultshandling/printer/v2/prettyprinter.go
@@ -3,7 +3,9 @@ package printer
import (
"fmt"
"os"
+ "path/filepath"
"sort"
+ "strings"
"github.com/enescakir/emoji"
"github.com/kubescape/k8s-interface/workloadinterface"
@@ -16,94 +18,123 @@ import (
"github.com/olekukonko/tablewriter"
)
+const (
+ prettyPrinterOutputFile = "report"
+ prettyPrinterOutputExt = ".txt"
+)
+
type PrettyPrinter struct {
- formatVersion string
- viewType cautils.ViewTypes
- writer *os.File
- verboseMode bool
+ writer *os.File
+ formatVersion string
+ viewType cautils.ViewTypes
+ verboseMode bool
+ printAttackTree bool
}
-func NewPrettyPrinter(verboseMode bool, formatVersion string, viewType cautils.ViewTypes) *PrettyPrinter {
+func NewPrettyPrinter(verboseMode bool, formatVersion string, attackTree bool, viewType cautils.ViewTypes) *PrettyPrinter {
return &PrettyPrinter{
- verboseMode: verboseMode,
- formatVersion: formatVersion,
- viewType: viewType,
+ verboseMode: verboseMode,
+ formatVersion: formatVersion,
+ viewType: viewType,
+ printAttackTree: attackTree,
}
}
-func (prettyPrinter *PrettyPrinter) ActionPrint(opaSessionObj *cautils.OPASessionObj) {
- fmt.Fprintf(prettyPrinter.writer, "\n"+getSeparator("^")+"\n")
+func (pp *PrettyPrinter) ActionPrint(opaSessionObj *cautils.OPASessionObj) {
+ fmt.Fprintf(pp.writer, "\n"+getSeparator("^")+"\n")
sortedControlIDs := getSortedControlsIDs(opaSessionObj.Report.SummaryDetails.Controls) // ListControls().All())
- switch prettyPrinter.viewType {
+ switch pp.viewType {
case cautils.ControlViewType:
- prettyPrinter.printResults(&opaSessionObj.Report.SummaryDetails.Controls, opaSessionObj.AllResources, sortedControlIDs)
+ pp.printResults(&opaSessionObj.Report.SummaryDetails.Controls, opaSessionObj.AllResources, sortedControlIDs)
case cautils.ResourceViewType:
- if prettyPrinter.verboseMode {
- prettyPrinter.resourceTable(opaSessionObj)
+ if pp.verboseMode {
+ pp.resourceTable(opaSessionObj)
}
}
- prettyPrinter.printSummaryTable(&opaSessionObj.Report.SummaryDetails, sortedControlIDs)
+ pp.printSummaryTable(&opaSessionObj.Report.SummaryDetails, sortedControlIDs)
+ // When writing to Stdout, we aren’t really writing to an output file,
+ // so no need to print that we are
+ if pp.writer.Name() != os.Stdout.Name() {
+ printer.LogOutputFile(pp.writer.Name())
+ }
+
+ pp.printAttackTracks(opaSessionObj)
}
-func (prettyPrinter *PrettyPrinter) SetWriter(outputFile string) {
- prettyPrinter.writer = printer.GetWriter(outputFile)
+func (pp *PrettyPrinter) SetWriter(outputFile string) {
+ // PrettyPrinter should accept Stdout at least by its full name (path)
+ // and follow the common behavior of outputting to a default filename
+ // otherwise
+ if outputFile == os.Stdout.Name() {
+ pp.writer = printer.GetWriter("")
+ return
+ }
+
+ if strings.TrimSpace(outputFile) == "" {
+ outputFile = prettyPrinterOutputFile
+ }
+ if filepath.Ext(strings.TrimSpace(outputFile)) != junitOutputExt {
+ outputFile = outputFile + prettyPrinterOutputExt
+ }
+
+ pp.writer = printer.GetWriter(outputFile)
}
-func (prettyPrinter *PrettyPrinter) Score(score float32) {
+func (pp *PrettyPrinter) Score(score float32) {
}
-func (prettyPrinter *PrettyPrinter) printResults(controls *reportsummary.ControlSummaries, allResources map[string]workloadinterface.IMetadata, sortedControlIDs [][]string) {
+func (pp *PrettyPrinter) printResults(controls *reportsummary.ControlSummaries, allResources map[string]workloadinterface.IMetadata, sortedControlIDs [][]string) {
for i := len(sortedControlIDs) - 1; i >= 0; i-- {
for _, c := range sortedControlIDs[i] {
controlSummary := controls.GetControl(reportsummary.EControlCriteriaID, c) // summaryDetails.Controls ListControls().All() Controls.GetControl(ca)
- prettyPrinter.printTitle(controlSummary)
- prettyPrinter.printResources(controlSummary, allResources)
- prettyPrinter.printSummary(c, controlSummary)
+ pp.printTitle(controlSummary)
+ pp.printResources(controlSummary, allResources)
+ pp.printSummary(c, controlSummary)
}
}
}
-func (prettyPrinter *PrettyPrinter) printSummary(controlName string, controlSummary reportsummary.IControlSummary) {
+func (pp *PrettyPrinter) printSummary(controlName string, controlSummary reportsummary.IControlSummary) {
if controlSummary.GetStatus().IsSkipped() {
return
}
- cautils.SimpleDisplay(prettyPrinter.writer, "Summary - ")
- cautils.SuccessDisplay(prettyPrinter.writer, "Passed:%v ", controlSummary.NumberOfResources().Passed())
- cautils.WarningDisplay(prettyPrinter.writer, "Excluded:%v ", controlSummary.NumberOfResources().Excluded())
- cautils.FailureDisplay(prettyPrinter.writer, "Failed:%v ", controlSummary.NumberOfResources().Failed())
- cautils.InfoDisplay(prettyPrinter.writer, "Total:%v\n", controlSummary.NumberOfResources().All())
+ cautils.SimpleDisplay(pp.writer, "Summary - ")
+ cautils.SuccessDisplay(pp.writer, "Passed:%v ", controlSummary.NumberOfResources().Passed())
+ cautils.WarningDisplay(pp.writer, "Excluded:%v ", controlSummary.NumberOfResources().Excluded())
+ cautils.FailureDisplay(pp.writer, "Failed:%v ", controlSummary.NumberOfResources().Failed())
+ cautils.InfoDisplay(pp.writer, "Total:%v\n", controlSummary.NumberOfResources().All())
if controlSummary.GetStatus().IsFailed() {
- cautils.DescriptionDisplay(prettyPrinter.writer, "Remediation: %v\n", controlSummary.GetRemediation())
+ cautils.DescriptionDisplay(pp.writer, "Remediation: %v\n", controlSummary.GetRemediation())
}
- cautils.DescriptionDisplay(prettyPrinter.writer, "\n")
+ cautils.DescriptionDisplay(pp.writer, "\n")
}
-func (prettyPrinter *PrettyPrinter) printTitle(controlSummary reportsummary.IControlSummary) {
- cautils.InfoDisplay(prettyPrinter.writer, "[control: %s - %s] ", controlSummary.GetName(), cautils.GetControlLink(controlSummary.GetID()))
+func (pp *PrettyPrinter) printTitle(controlSummary reportsummary.IControlSummary) {
+ cautils.InfoDisplay(pp.writer, "[control: %s - %s] ", controlSummary.GetName(), cautils.GetControlLink(controlSummary.GetID()))
switch controlSummary.GetStatus().Status() {
case apis.StatusSkipped:
- cautils.InfoDisplay(prettyPrinter.writer, "skipped %v\n", emoji.ConfusedFace)
+ cautils.InfoDisplay(pp.writer, "skipped %v\n", emoji.ConfusedFace)
case apis.StatusFailed:
- cautils.FailureDisplay(prettyPrinter.writer, "failed %v\n", emoji.SadButRelievedFace)
+ cautils.FailureDisplay(pp.writer, "failed %v\n", emoji.SadButRelievedFace)
case apis.StatusExcluded:
- cautils.WarningDisplay(prettyPrinter.writer, "excluded %v\n", emoji.NeutralFace)
+ cautils.WarningDisplay(pp.writer, "excluded %v\n", emoji.NeutralFace)
case apis.StatusIrrelevant:
- cautils.SuccessDisplay(prettyPrinter.writer, "irrelevant %v\n", emoji.ConfusedFace)
+ cautils.SuccessDisplay(pp.writer, "irrelevant %v\n", emoji.ConfusedFace)
case apis.StatusError:
- cautils.WarningDisplay(prettyPrinter.writer, "error %v\n", emoji.ConfusedFace)
+ cautils.WarningDisplay(pp.writer, "error %v\n", emoji.ConfusedFace)
default:
- cautils.SuccessDisplay(prettyPrinter.writer, "passed %v\n", emoji.ThumbsUp)
+ cautils.SuccessDisplay(pp.writer, "passed %v\n", emoji.ThumbsUp)
}
- cautils.DescriptionDisplay(prettyPrinter.writer, "Description: %s\n", controlSummary.GetDescription())
+ cautils.DescriptionDisplay(pp.writer, "Description: %s\n", controlSummary.GetDescription())
if controlSummary.GetStatus().Info() != "" {
- cautils.WarningDisplay(prettyPrinter.writer, "Reason: %v\n", controlSummary.GetStatus().Info())
+ cautils.WarningDisplay(pp.writer, "Reason: %v\n", controlSummary.GetStatus().Info())
}
}
-func (prettyPrinter *PrettyPrinter) printResources(controlSummary reportsummary.IControlSummary, allResources map[string]workloadinterface.IMetadata) {
+func (pp *PrettyPrinter) printResources(controlSummary reportsummary.IControlSummary, allResources map[string]workloadinterface.IMetadata) {
workloadsSummary := listResultSummary(controlSummary, allResources)
@@ -111,35 +142,34 @@ func (prettyPrinter *PrettyPrinter) printResources(controlSummary reportsummary.
excludedWorkloads := groupByNamespaceOrKind(workloadsSummary, workloadSummaryExclude)
var passedWorkloads map[string][]WorkloadSummary
- if prettyPrinter.verboseMode {
+ if pp.verboseMode {
passedWorkloads = groupByNamespaceOrKind(workloadsSummary, workloadSummaryPassed)
}
if len(failedWorkloads) > 0 {
- cautils.FailureDisplay(prettyPrinter.writer, "Failed:\n")
- prettyPrinter.printGroupedResources(failedWorkloads)
+ cautils.FailureDisplay(pp.writer, "Failed:\n")
+ pp.printGroupedResources(failedWorkloads)
}
if len(excludedWorkloads) > 0 {
- cautils.WarningDisplay(prettyPrinter.writer, "Excluded:\n")
- prettyPrinter.printGroupedResources(excludedWorkloads)
+ cautils.WarningDisplay(pp.writer, "Excluded:\n")
+ pp.printGroupedResources(excludedWorkloads)
}
if len(passedWorkloads) > 0 {
- cautils.SuccessDisplay(prettyPrinter.writer, "Passed:\n")
- prettyPrinter.printGroupedResources(passedWorkloads)
+ cautils.SuccessDisplay(pp.writer, "Passed:\n")
+ pp.printGroupedResources(passedWorkloads)
}
}
-func (prettyPrinter *PrettyPrinter) printGroupedResources(workloads map[string][]WorkloadSummary) {
+func (pp *PrettyPrinter) printGroupedResources(workloads map[string][]WorkloadSummary) {
indent := " "
for title, rsc := range workloads {
- prettyPrinter.printGroupedResource(indent, title, rsc)
+ pp.printGroupedResource(indent, title, rsc)
}
}
-func (prettyPrinter *PrettyPrinter) printGroupedResource(indent string, title string, rsc []WorkloadSummary) {
- preIndent := indent
+func (pp *PrettyPrinter) printGroupedResource(indent string, title string, rsc []WorkloadSummary) {
if title != "" {
- cautils.SimpleDisplay(prettyPrinter.writer, "%s%s\n", indent, title)
+ cautils.SimpleDisplay(pp.writer, "%s%s\n", indent, title)
indent += indent
}
@@ -151,10 +181,8 @@ func (prettyPrinter *PrettyPrinter) printGroupedResource(indent string, title st
sort.Strings(resources)
for i := range resources {
- cautils.SimpleDisplay(prettyPrinter.writer, resources[i]+"\n")
+ cautils.SimpleDisplay(pp.writer, resources[i]+"\n")
}
-
- indent = preIndent
}
func generateRelatedObjectsStr(workload WorkloadSummary) string {
@@ -185,24 +213,24 @@ func generateFooter(summaryDetails *reportsummary.SummaryDetails) []string {
return row
}
-func (prettyPrinter *PrettyPrinter) printSummaryTable(summaryDetails *reportsummary.SummaryDetails, sortedControlIDs [][]string) {
+func (pp *PrettyPrinter) printSummaryTable(summaryDetails *reportsummary.SummaryDetails, sortedControlIDs [][]string) {
if summaryDetails.NumberOfControls().All() == 0 {
- fmt.Fprintf(prettyPrinter.writer, "\nKubescape did not scan any of the resources, make sure you are scanning valid kubernetes manifests (Deployments, Pods, etc.)\n")
+ fmt.Fprintf(pp.writer, "\nKubescape did not scan any of the resources, make sure you are scanning valid kubernetes manifests (Deployments, Pods, etc.)\n")
return
}
- cautils.InfoTextDisplay(prettyPrinter.writer, "\n"+controlCountersForSummary(summaryDetails.NumberOfControls())+"\n")
- cautils.InfoTextDisplay(prettyPrinter.writer, renderSeverityCountersSummary(summaryDetails.GetResourcesSeverityCounters())+"\n\n")
+ cautils.InfoTextDisplay(pp.writer, "\n"+controlCountersForSummary(summaryDetails.NumberOfControls())+"\n")
+ cautils.InfoTextDisplay(pp.writer, renderSeverityCountersSummary(summaryDetails.GetResourcesSeverityCounters())+"\n\n")
// cautils.InfoTextDisplay(prettyPrinter.writer, "\n"+"Severities: SOME OTHER"+"\n\n")
- summaryTable := tablewriter.NewWriter(prettyPrinter.writer)
+ summaryTable := tablewriter.NewWriter(pp.writer)
summaryTable.SetAutoWrapText(false)
summaryTable.SetHeader(getControlTableHeaders())
summaryTable.SetHeaderLine(true)
summaryTable.SetColumnAlignment(getColumnsAlignments())
- printAll := prettyPrinter.verboseMode
+ printAll := pp.verboseMode
if summaryDetails.NumberOfResources().Failed() == 0 {
// if there are no failed controls, print the resource table and detailed information
printAll = true
@@ -223,16 +251,16 @@ func (prettyPrinter *PrettyPrinter) printSummaryTable(summaryDetails *reportsumm
summaryTable.Render()
// When scanning controls the framework list will be empty
- cautils.InfoTextDisplay(prettyPrinter.writer, frameworksScoresToString(summaryDetails.ListFrameworks()))
+ cautils.InfoTextDisplay(pp.writer, frameworksScoresToString(summaryDetails.ListFrameworks()))
- prettyPrinter.printInfo(infoToPrintInfo)
+ pp.printInfo(infoToPrintInfo)
}
-func (prettyPrinter *PrettyPrinter) printInfo(infoToPrintInfo []infoStars) {
+func (pp *PrettyPrinter) printInfo(infoToPrintInfo []infoStars) {
fmt.Println()
for i := range infoToPrintInfo {
- cautils.InfoDisplay(prettyPrinter.writer, fmt.Sprintf("%s %s\n", infoToPrintInfo[i].stars, infoToPrintInfo[i].info))
+ cautils.InfoDisplay(pp.writer, fmt.Sprintf("%s %s\n", infoToPrintInfo[i].stars, infoToPrintInfo[i].info))
}
}
diff --git a/core/pkg/resultshandling/printer/v2/prometheus.go b/core/pkg/resultshandling/printer/v2/prometheus.go
index 0c57b7d7..ad85dfcb 100644
--- a/core/pkg/resultshandling/printer/v2/prometheus.go
+++ b/core/pkg/resultshandling/printer/v2/prometheus.go
@@ -24,15 +24,15 @@ func NewPrometheusPrinter(verboseMode bool) *PrometheusPrinter {
}
}
-func (prometheusPrinter *PrometheusPrinter) SetWriter(outputFile string) {
- prometheusPrinter.writer = printer.GetWriter(outputFile)
+func (pp *PrometheusPrinter) SetWriter(outputFile string) {
+ pp.writer = printer.GetWriter(outputFile)
}
-func (prometheusPrinter *PrometheusPrinter) Score(score float32) {
+func (pp *PrometheusPrinter) Score(score float32) {
fmt.Printf("\n# Overall risk-score (0- Excellent, 100- All failed)\nkubescape_score %d\n", cautils.Float32ToInt(score))
}
-func (printer *PrometheusPrinter) generatePrometheusFormat(
+func (pp *PrometheusPrinter) generatePrometheusFormat(
resources map[string]workloadinterface.IMetadata,
results map[string]resourcesresults.Result,
summaryDetails *reportsummary.SummaryDetails) *Metrics {
@@ -44,12 +44,13 @@ func (printer *PrometheusPrinter) generatePrometheusFormat(
return m
}
-func (printer *PrometheusPrinter) ActionPrint(opaSessionObj *cautils.OPASessionObj) {
+func (pp *PrometheusPrinter) ActionPrint(opaSessionObj *cautils.OPASessionObj) {
- metrics := printer.generatePrometheusFormat(opaSessionObj.AllResources, opaSessionObj.ResourcesResult, &opaSessionObj.Report.SummaryDetails)
+ metrics := pp.generatePrometheusFormat(opaSessionObj.AllResources, opaSessionObj.ResourcesResult, &opaSessionObj.Report.SummaryDetails)
- logOUtputFile(printer.writer.Name())
- if _, err := printer.writer.Write([]byte(metrics.String())); err != nil {
+ if _, err := pp.writer.Write([]byte(metrics.String())); err != nil {
logger.L().Error("failed to write results", helpers.Error(err))
+ } else {
+ printer.LogOutputFile(pp.writer.Name())
}
}
diff --git a/core/pkg/resultshandling/printer/v2/prometheusutils.go b/core/pkg/resultshandling/printer/v2/prometheusutils.go
index 89f351e2..319a6a2f 100644
--- a/core/pkg/resultshandling/printer/v2/prometheusutils.go
+++ b/core/pkg/resultshandling/printer/v2/prometheusutils.go
@@ -4,11 +4,9 @@ import (
"fmt"
"strings"
- "github.com/kubescape/k8s-interface/workloadinterface"
"github.com/kubescape/kubescape/v2/core/cautils"
"github.com/kubescape/opa-utils/reporthandling/apis"
"github.com/kubescape/opa-utils/reporthandling/results/v1/reportsummary"
- "github.com/kubescape/opa-utils/reporthandling/results/v1/resourcesresults"
)
type metricsName string
@@ -231,11 +229,11 @@ type mFrameworkRiskScore struct {
}
type mResources struct {
- name string
- namespace string
- apiVersion string
- kind string
- controlsCountPassed int
+ name string
+ namespace string
+ apiVersion string
+ kind string
+ // controlsCountPassed int // unused
controlsCountFailed int
controlsCountExcluded int
}
@@ -298,6 +296,7 @@ func (m *Metrics) setRiskScores(summaryDetails *reportsummary.SummaryDetails) {
}
}
+/* unused for now
// return -> (passed, exceluded, failed)
func resourceControlStatusCounters(result *resourcesresults.Result) (int, int, int) {
failed := 0
@@ -315,15 +314,18 @@ func resourceControlStatusCounters(result *resourcesresults.Result) (int, int, i
}
return passed, excluded, failed
}
+
func (m *Metrics) setResourcesCounters(
resources map[string]workloadinterface.IMetadata,
results map[string]resourcesresults.Result) {
- for resourceID, result := range results {
+ for resourceID, toPin := range results {
r, ok := resources[resourceID]
if !ok {
continue
}
+ result := toPin
+
passed, excluded, failed := resourceControlStatusCounters(&result)
mrc := mResources{}
@@ -339,5 +341,5 @@ func (m *Metrics) setResourcesCounters(
m.listResources = append(m.listResources, mrc)
}
-
}
+*/
diff --git a/core/pkg/resultshandling/printer/v2/sarifprinter.go b/core/pkg/resultshandling/printer/v2/sarifprinter.go
index c926b34c..612e353f 100644
--- a/core/pkg/resultshandling/printer/v2/sarifprinter.go
+++ b/core/pkg/resultshandling/printer/v2/sarifprinter.go
@@ -62,11 +62,10 @@ func NewSARIFPrinter() *SARIFPrinter {
}
func (sp *SARIFPrinter) Score(score float32) {
- return
}
func (sp *SARIFPrinter) SetWriter(outputFile string) {
- if outputFile == "" {
+ if strings.TrimSpace(outputFile) == "" {
outputFile = sarifOutputFile
}
if filepath.Ext(strings.TrimSpace(outputFile)) != sarifOutputExt {
@@ -129,7 +128,9 @@ func (sp *SARIFPrinter) ActionPrint(opaSessionObj *cautils.OPASessionObj) {
logger.L().Debug("failed to create location resolver", helpers.Error(err))
}
- for _, ac := range result.AssociatedControls {
+ for _, toPin := range result.AssociatedControls {
+ ac := toPin
+
if ac.GetStatus(nil).IsFailed() {
ctl := opaSessionObj.Report.SummaryDetails.Controls.GetControl(reportsummary.EControlCriteriaID, ac.GetID())
location := sp.resolveFixLocation(opaSessionObj, locationResolver, &ac, resourceID)
@@ -144,6 +145,8 @@ func (sp *SARIFPrinter) ActionPrint(opaSessionObj *cautils.OPASessionObj) {
report.AddRun(run)
report.PrettyWrite(sp.writer)
+
+ printer.LogOutputFile(sp.writer.Name())
}
func (sp *SARIFPrinter) resolveFixLocation(opaSessionObj *cautils.OPASessionObj, locationResolver *locationresolver.FixPathLocationResolver, ac *resourcesresults.ResourceAssociatedControl, resourceID string) locationresolver.Location {
diff --git a/core/pkg/resultshandling/printer/v2/sarifprinter_test.go b/core/pkg/resultshandling/printer/v2/sarifprinter_test.go
index 2d78c5bf..40986fce 100644
--- a/core/pkg/resultshandling/printer/v2/sarifprinter_test.go
+++ b/core/pkg/resultshandling/printer/v2/sarifprinter_test.go
@@ -5,7 +5,7 @@ import "testing"
func Test_scoreToSeverityLevel(t *testing.T) {
tc := []struct {
Name string
- ScoreFactor float32
+ ScoreFactor float32
ExpectedSARIFLevel sarifSeverityLevel
}{
{"Score factor 1.0 should map to 'note' SARIF level", 1.0, sarifSeverityLevelNote},
diff --git a/core/pkg/resultshandling/printer/v2/utils.go b/core/pkg/resultshandling/printer/v2/utils.go
index f436aec3..b6ba3ac7 100644
--- a/core/pkg/resultshandling/printer/v2/utils.go
+++ b/core/pkg/resultshandling/printer/v2/utils.go
@@ -1,8 +1,6 @@
package printer
import (
- logger "github.com/kubescape/go-logger"
- "github.com/kubescape/go-logger/helpers"
"github.com/kubescape/k8s-interface/workloadinterface"
"github.com/kubescape/kubescape/v2/core/cautils"
"github.com/kubescape/opa-utils/reporthandling"
@@ -83,10 +81,3 @@ func finalizeResources(results []resourcesresults.Result, allResources map[strin
}
return resources
}
-
-func logOUtputFile(fileName string) {
- if fileName != "/dev/stdout" && fileName != "/dev/stderr" {
- logger.L().Success("Scan results saved", helpers.String("filename", fileName))
- }
-
-}
diff --git a/core/pkg/resultshandling/reporter/v2/mockreporter.go b/core/pkg/resultshandling/reporter/v2/mockreporter.go
index e97107b0..4d265b5d 100644
--- a/core/pkg/resultshandling/reporter/v2/mockreporter.go
+++ b/core/pkg/resultshandling/reporter/v2/mockreporter.go
@@ -2,14 +2,12 @@ package reporter
import (
"fmt"
+ "net/url"
"os"
"github.com/kubescape/kubescape/v2/core/cautils"
- "github.com/kubescape/kubescape/v2/core/cautils/getter"
)
-const NO_SUBMIT_QUERY = "utm_source=GitHub&utm_medium=CLI&utm_campaign=no_submit"
-
type ReportMock struct {
query string
message string
@@ -32,11 +30,19 @@ func (reportMock *ReportMock) SetClusterName(clusterName string) {
}
func (reportMock *ReportMock) GetURL() string {
- u := fmt.Sprintf("https://%s/account/sign-up", getter.GetKSCloudAPIConnector().GetCloudUIURL())
- if reportMock.query != "" {
- u += fmt.Sprintf("?%s", reportMock.query)
+ u, err := url.Parse(reportMock.query)
+ if err != nil || u.String() == "" {
+ return ""
}
- return u
+
+ q := u.Query()
+ q.Add("utm_source", "GitHub")
+ q.Add("utm_medium", "CLI")
+ q.Add("utm_campaign", "Submit")
+
+ u.RawQuery = q.Encode()
+
+ return u.String()
}
func (reportMock *ReportMock) DisplayReportURL() {
@@ -44,8 +50,8 @@ func (reportMock *ReportMock) DisplayReportURL() {
sep := "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~"
message := sep + "\n"
message += "Scan results have not been submitted: " + reportMock.message + "\n"
- if reportMock.query != "" {
- message += "For more details: " + reportMock.query + "\n"
+ if link := reportMock.GetURL(); link != "" {
+ message += "For more details: " + link + "\n"
}
message += sep + "\n"
cautils.InfoTextDisplay(os.Stderr, fmt.Sprintf("\n%s\n", message))
diff --git a/core/pkg/resultshandling/reporter/v2/mockreporter_test.go b/core/pkg/resultshandling/reporter/v2/mockreporter_test.go
new file mode 100644
index 00000000..2a699a54
--- /dev/null
+++ b/core/pkg/resultshandling/reporter/v2/mockreporter_test.go
@@ -0,0 +1,43 @@
+package reporter
+
+import "testing"
+
+func TestReportMock_GetURL(t *testing.T) {
+ type fields struct {
+ query string
+ }
+ tests := []struct {
+ name string
+ fields fields
+ want string
+ }{
+ {
+ name: "TestReportMock_GetURL",
+ fields: struct {
+ query string
+ }{
+ query: "https://kubescape.io",
+ },
+ want: "https://kubescape.io?utm_campaign=Submit&utm_medium=CLI&utm_source=GitHub",
+ },
+ {
+ name: "TestReportMock_GetURL_empty",
+ fields: struct {
+ query string
+ }{
+ query: "",
+ },
+ want: "",
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ reportMock := &ReportMock{
+ query: tt.fields.query,
+ }
+ if got := reportMock.GetURL(); got != tt.want {
+ t.Errorf("ReportMock.GetURL() = %v, want %v", got, tt.want)
+ }
+ })
+ }
+}
diff --git a/core/pkg/resultshandling/reporter/v2/reporteventreceiver.go b/core/pkg/resultshandling/reporter/v2/reporteventreceiver.go
index fc807904..56d632f2 100644
--- a/core/pkg/resultshandling/reporter/v2/reporteventreceiver.go
+++ b/core/pkg/resultshandling/reporter/v2/reporteventreceiver.go
@@ -85,10 +85,18 @@ func (report *ReportEventReceiver) SetClusterName(clusterName string) {
}
func (report *ReportEventReceiver) prepareReport(opaSessionObj *cautils.OPASessionObj) error {
- // All scans whose target is not a cluster, currently their target is a file, which is what the backend expects
- // (e.g. local-git, directory, etc)
+ // The backend for Kubescape expects scanning targets to be either
+ // Clusters or Files, not other types we support (GitLocal, Directory
+ // etc). So, to submit a compatible report to the backend, we have to
+ // override the scanning target, submit the report and then restore the
+ // original value.
+ originalScanningTarget := opaSessionObj.Metadata.ScanMetadata.ScanningTarget
+
if opaSessionObj.Metadata.ScanMetadata.ScanningTarget != reporthandlingv2.Cluster {
opaSessionObj.Metadata.ScanMetadata.ScanningTarget = reporthandlingv2.File
+ defer func() {
+ opaSessionObj.Metadata.ScanMetadata.ScanningTarget = originalScanningTarget
+ }()
}
report.initEventReceiverURL()
@@ -142,7 +150,7 @@ func (report *ReportEventReceiver) setResults(reportObj *reporthandlingv2.Postur
// set result.RawResource
resourceID := v.GetResourceID()
if _, ok := allResources[resourceID]; !ok {
- return fmt.Errorf("expected to find raw resource object for '%s'", resourceID)
+ continue
}
resource := reporthandling.NewResourceIMetadata(allResources[resourceID])
if r, ok := resourcesSource[resourceID]; ok {
@@ -260,7 +268,7 @@ func (report *ReportEventReceiver) addPathURL(urlObj *url.URL) {
if report.customerAdminEMail != "" || report.token == "" { // data has been submitted
switch report.submitContext {
case SubmitContextScan:
- urlObj.Path = fmt.Sprintf("configuration-scanning/%s", report.clusterName)
+ urlObj.Path = fmt.Sprintf("config-scanning/%s", report.clusterName)
case SubmitContextRBAC:
urlObj.Path = "rbac-visualizer"
case SubmitContextRepository:
diff --git a/core/pkg/resultshandling/reporter/v2/reporteventreceiver_test.go b/core/pkg/resultshandling/reporter/v2/reporteventreceiver_test.go
index 4f932fa4..b15ab166 100644
--- a/core/pkg/resultshandling/reporter/v2/reporteventreceiver_test.go
+++ b/core/pkg/resultshandling/reporter/v2/reporteventreceiver_test.go
@@ -5,6 +5,7 @@ import (
"testing"
"github.com/kubescape/kubescape/v2/core/cautils"
+ reporthandlingv2 "github.com/kubescape/opa-utils/reporthandling/v2"
"github.com/stretchr/testify/assert"
)
@@ -32,7 +33,7 @@ func TestReportEventReceiver_addPathURL(t *testing.T) {
want: &url.URL{
Scheme: "https",
Host: "localhost:8080",
- Path: "configuration-scanning/test",
+ Path: "config-scanning/test",
},
},
}
@@ -58,7 +59,7 @@ func TestGetURL(t *testing.T) {
"",
SubmitContextScan,
)
- assert.Equal(t, "https://cloud.armosec.io/configuration-scanning/test?utm_campaign=Submit&utm_medium=CLI&utm_source=GitHub", reporter.GetURL())
+ assert.Equal(t, "https://cloud.armosec.io/config-scanning/test?utm_campaign=Submit&utm_medium=CLI&utm_source=GitHub", reporter.GetURL())
}
// Test rbac submit and registered url
@@ -106,3 +107,49 @@ func TestGetURL(t *testing.T) {
assert.Equal(t, "https://cloud.armosec.io/account/sign-up?customerGUID=1234&invitationToken=token&utm_campaign=Submit&utm_medium=CLI&utm_source=GitHub", reporter.GetURL())
}
}
+
+func Test_prepareReportKeepsOriginalScanningTarget(t *testing.T) {
+
+ // prepareReport should keep the original scanning target it received, and not mutate it
+ testCases := []struct {
+ Name string
+ Want reporthandlingv2.ScanningTarget
+ }{
+ {"Cluster", reporthandlingv2.Cluster},
+ {"File", reporthandlingv2.File},
+ {"Repo", reporthandlingv2.Repo},
+ {"GitLocal", reporthandlingv2.GitLocal},
+ {"Directory", reporthandlingv2.Directory},
+ }
+
+ reporter := NewReportEventReceiver(
+ &cautils.ConfigObj{
+ AccountID: "1e3ae7c4-a8bb-4d7c-9bdf-eb86bc25e6bb",
+ Token: "token",
+ ClusterName: "test",
+ },
+ "",
+ SubmitContextScan,
+ )
+
+ for _, tc := range testCases {
+ t.Run(tc.Name, func(t *testing.T) {
+ want := tc.Want
+
+ opaSessionObj := &cautils.OPASessionObj{
+ Report: &reporthandlingv2.PostureReport{},
+ Metadata: &reporthandlingv2.Metadata{
+ ScanMetadata: reporthandlingv2.ScanMetadata{ScanningTarget: want},
+ },
+ }
+
+ reporter.prepareReport(opaSessionObj)
+
+ got := opaSessionObj.Metadata.ScanMetadata.ScanningTarget
+ if got != want {
+ t.Errorf("Scanning targets don’t match after preparing report. Got: %v, want %v", got, want)
+ }
+ },
+ )
+ }
+}
diff --git a/core/pkg/resultshandling/reporter/v2/reporteventreceiverutils.go b/core/pkg/resultshandling/reporter/v2/reporteventreceiverutils.go
index 902de9d5..e96baeee 100644
--- a/core/pkg/resultshandling/reporter/v2/reporteventreceiverutils.go
+++ b/core/pkg/resultshandling/reporter/v2/reporteventreceiverutils.go
@@ -45,8 +45,7 @@ func (report *ReportEventReceiver) setSubReport(opaSessionObj *cautils.OPASessio
if opaSessionObj.Metadata != nil {
reportObj.Metadata = *opaSessionObj.Metadata
if opaSessionObj.Metadata.ContextMetadata.ClusterContextMetadata != nil {
- reportObj.ClusterCloudProvider = opaSessionObj.Metadata.ContextMetadata.ClusterContextMetadata.CloudProvider // DEPRECATED
- reportObj.Metadata.ClusterMetadata = *opaSessionObj.Metadata.ContextMetadata.ClusterContextMetadata
+ reportObj.ClusterCloudProvider = opaSessionObj.Metadata.ContextMetadata.ClusterContextMetadata.CloudProvider // DEPRECATED - left here as fallback
}
}
return reportObj
diff --git a/core/pkg/resultshandling/reporter/v2/utils.go b/core/pkg/resultshandling/reporter/v2/utils.go
index 3ea50ae6..352eb50b 100644
--- a/core/pkg/resultshandling/reporter/v2/utils.go
+++ b/core/pkg/resultshandling/reporter/v2/utils.go
@@ -5,6 +5,7 @@ import (
"strings"
)
+/* unused for now
func maskID(id string) string {
sep := "-"
splitted := strings.Split(id, sep)
@@ -22,6 +23,7 @@ func maskID(id string) string {
return strings.TrimSuffix(str, sep)
}
+*/
func parseHost(urlObj *url.URL) {
if strings.Contains(urlObj.Host, "http://") {
diff --git a/core/pkg/resultshandling/results.go b/core/pkg/resultshandling/results.go
index 94e3b913..83ffc8f8 100644
--- a/core/pkg/resultshandling/results.go
+++ b/core/pkg/resultshandling/results.go
@@ -16,70 +16,83 @@ import (
type ResultsHandler struct {
reporterObj reporter.IReport
- printerObj printer.IPrinter
+ printerObjs []printer.IPrinter
+ uiPrinter printer.IPrinter
scanData *cautils.OPASessionObj
}
-func NewResultsHandler(reporterObj reporter.IReport, printerObj printer.IPrinter) *ResultsHandler {
+func NewResultsHandler(reporterObj reporter.IReport, printerObjs []printer.IPrinter, uiPrinter printer.IPrinter) *ResultsHandler {
return &ResultsHandler{
reporterObj: reporterObj,
- printerObj: printerObj,
+ printerObjs: printerObjs,
+ uiPrinter: uiPrinter,
}
}
-// GetScore return scan risk-score
-func (resultsHandler *ResultsHandler) GetRiskScore() float32 {
- return resultsHandler.scanData.Report.SummaryDetails.Score
+// GetScore returns the result’s risk score
+func (rh *ResultsHandler) GetRiskScore() float32 {
+ return rh.scanData.Report.SummaryDetails.Score
}
-// GetData get scan/action related data (policies, resources, results, etc.). Call ToJson function if you wish the json representation of the data
-func (resultsHandler *ResultsHandler) GetData() *cautils.OPASessionObj {
- return resultsHandler.scanData
+// GetData returns scan/action related data (policies, resources, results, etc.)
+//
+// Call the ToJson() method if you want the JSON representation of the data
+func (rh *ResultsHandler) GetData() *cautils.OPASessionObj {
+ return rh.scanData
}
-// SetData set scan/action related data
-func (resultsHandler *ResultsHandler) SetData(data *cautils.OPASessionObj) {
- resultsHandler.scanData = data
+// SetData sets the scan/action related data
+func (rh *ResultsHandler) SetData(data *cautils.OPASessionObj) {
+ rh.scanData = data
}
-// GetPrinter get printer object
-func (resultsHandler *ResultsHandler) GetPrinter() printer.IPrinter {
- return resultsHandler.printerObj
+// GetPrinter returns all printers
+func (rh *ResultsHandler) GetPrinters() []printer.IPrinter {
+ return rh.printerObjs
}
-// GetReporter get reporter object
-func (resultsHandler *ResultsHandler) GetReporter() reporter.IReport {
- return resultsHandler.reporterObj
+// GetReporter returns the reporter object
+func (rh *ResultsHandler) GetReporter() reporter.IReport {
+ return rh.reporterObj
}
-// ToJson return results in json format
-func (resultsHandler *ResultsHandler) ToJson() ([]byte, error) {
- return json.Marshal(printerv2.FinalizeResults(resultsHandler.scanData))
+// ToJson returns the results in the JSON format
+func (rh *ResultsHandler) ToJson() ([]byte, error) {
+ return json.Marshal(printerv2.FinalizeResults(rh.scanData))
}
-// GetResults return results
-func (resultsHandler *ResultsHandler) GetResults() *reporthandlingv2.PostureReport {
- return printerv2.FinalizeResults(resultsHandler.scanData)
+// GetResults returns the results
+func (rh *ResultsHandler) GetResults() *reporthandlingv2.PostureReport {
+ return printerv2.FinalizeResults(rh.scanData)
}
-// HandleResults handle the scan results according to the pre defined interfaces
-func (resultsHandler *ResultsHandler) HandleResults() error {
+// HandleResults handles all necessary actions for the scan results
+func (rh *ResultsHandler) HandleResults() error {
+ // Display scan results in the UI first to give immediate value.
+ // First we output the results and then the score, so the
+ // score - a summary of the results—can always be seen at the end
+ // of output
+ rh.uiPrinter.ActionPrint(rh.scanData)
+ rh.uiPrinter.Score(rh.GetRiskScore())
- resultsHandler.printerObj.ActionPrint(resultsHandler.scanData)
+ // Then print to output files
+ for _, printer := range rh.printerObjs {
+ printer.ActionPrint(rh.scanData)
+ printer.Score(rh.GetRiskScore())
+ }
- if err := resultsHandler.reporterObj.Submit(resultsHandler.scanData); err != nil {
+ // We should submit only after printing results, so a user can see
+ // results at all times, even if submission fails
+ if err := rh.reporterObj.Submit(rh.scanData); err != nil {
return err
}
-
- resultsHandler.printerObj.Score(resultsHandler.GetRiskScore())
-
- resultsHandler.reporterObj.DisplayReportURL()
+ rh.reporterObj.DisplayReportURL()
return nil
}
-// NewPrinter defined output format
-func NewPrinter(printFormat, formatVersion string, verboseMode bool, viewType cautils.ViewTypes) printer.IPrinter {
+// NewPrinter returns a new printer for a given format and configuration options
+func NewPrinter(printFormat, formatVersion string, verboseMode bool, attackTree bool, viewType cautils.ViewTypes) printer.IPrinter {
switch printFormat {
case printer.JsonFormat:
@@ -104,6 +117,6 @@ func NewPrinter(printFormat, formatVersion string, verboseMode bool, viewType ca
if printFormat != printer.PrettyFormat {
logger.L().Error(fmt.Sprintf("Invalid format \"%s\", default format \"pretty-printer\" is applied", printFormat))
}
- return printerv2.NewPrettyPrinter(verboseMode, formatVersion, viewType)
+ return printerv2.NewPrettyPrinter(verboseMode, formatVersion, attackTree, viewType)
}
}
diff --git a/core/pkg/resultshandling/results_test.go b/core/pkg/resultshandling/results_test.go
index 56cc56c2..36b5cd4c 100644
--- a/core/pkg/resultshandling/results_test.go
+++ b/core/pkg/resultshandling/results_test.go
@@ -1,11 +1,79 @@
package resultshandling
-var mockFramework_0044 = `{"guid":"","name":"fw-0044","attributes":{"armoBuiltin":true},"creationTime":"","description":"Implement NSA security advices for K8s ","controls":[{"guid":"","name":"Container hostPort","attributes":{"armoBuiltin":true},"id":"C-0044","controlID":"C-0044","creationTime":"","description":"Configuring hostPort limits you to a particular port, and if any two workloads that specify the same HostPort they cannot be deployed to the same node. Therefore, if the number of replica of such workload is higher than the number of nodes, the deployment will fail.","remediation":"Avoid usage of hostPort unless it is absolutely necessary. Use NodePort / ClusterIP instead.","rules":[{"guid":"","name":"container-hostPort","attributes":{"armoBuiltin":true},"creationTime":"","rule":"package armo_builtins\n\n\n# Fails if pod has container with hostPort\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n container := pod.spec.containers[i]\n\tbegginingOfPath := \"spec.\"\n\tpath := isHostPort(container, i, begginingOfPath)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v has Host-port\", [ container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if workload has container with hostPort\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\tbegginingOfPath := \"spec.template.spec.\"\n path := isHostPort(container, i, begginingOfPath)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v has Host-port\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if cronjob has container with hostPort\ndeny[msga] {\n \twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tbegginingOfPath := \"spec.jobTemplate.spec.template.spec.\"\n path := isHostPort(container, i, begginingOfPath)\n msga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v has Host-port\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n\nisHostPort(container, i, begginingOfPath) = path {\n\tpath = [sprintf(\"%vcontainers[%v].ports[%v].hostPort\", [begginingOfPath, format_int(i, 10), format_int(j, 10)]) | port = container.ports[j]; port.hostPort]\n\tcount(path) > 0\n}\n","resourceEnumerator":"","ruleLanguage":"Rego","match":[{"apiGroups":["*"],"apiVersions":["*"],"resources":["Deployment","ReplicaSet","DaemonSet","StatefulSet","Job","Pod","CronJob"]}],"ruleDependencies":[],"configInputs":null,"controlConfigInputs":null,"description":"fails if container has hostPort","remediation":"Make sure you do not configure hostPort for the container, if necessary use NodePort / ClusterIP","ruleQuery":"armo_builtins"}],"rulesIDs":[""],"baseScore":4}]}`
-var mockFramework_0006_0013 = `{"guid":"","name":"fw-0006-0013","attributes":{"armoBuiltin":true},"creationTime":"","description":"Implement NSA security advices for K8s ","controls":[{"guid":"","name":"HostPath mount","attributes":{"armoBuiltin":true},"id":"C-0048","controlID":"C-0048","creationTime":"","description":"Mounting host directory to the container can be abused to get access to sensitive data and gain persistence on the host machine.","remediation":"Refrain from using host path mount.","rules":[{"guid":"","name":"alert-rw-hostpath","attributes":{"armoBuiltin":true,"m$K8sThreatMatrix":"Persistence::Writable hostPath mount, Lateral Movement::Writable volume mounts on the host"},"creationTime":"","rule":"package armo_builtins\n\n# input: pod\n# apiversion: v1\n# does: returns hostPath volumes\n\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n volumes := pod.spec.volumes\n volume := volumes[_]\n volume.hostPath\n\tcontainer := pod.spec.containers[i]\n\tvolumeMount := container.volumeMounts[k]\n\tvolumeMount.name == volume.name\n\tbegginingOfPath := \"spec.\"\n\tresult := isRWMount(volumeMount, begginingOfPath, i, k)\n\n podname := pod.metadata.name\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"pod: %v has: %v as hostPath volume\", [podname, volume.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [result],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n#handles majority of workload resources\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n volumes := wl.spec.template.spec.volumes\n volume := volumes[_]\n volume.hostPath\n\tcontainer := wl.spec.template.spec.containers[i]\n\tvolumeMount := container.volumeMounts[k]\n\tvolumeMount.name == volume.name\n\tbegginingOfPath := \"spec.template.spec.\"\n\tresult := isRWMount(volumeMount, begginingOfPath, i, k)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has: %v as hostPath volume\", [wl.kind, wl.metadata.name, volume.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [result],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t\n\t}\n}\n\n#handles CronJobs\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n volumes := wl.spec.jobTemplate.spec.template.spec.volumes\n volume := volumes[_]\n volume.hostPath\n\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tvolumeMount := container.volumeMounts[k]\n\tvolumeMount.name == volume.name\n\tbegginingOfPath := \"spec.jobTemplate.spec.template.spec.\"\n\tresult := isRWMount(volumeMount, begginingOfPath, i, k)\n\n\tmsga := {\n\t\"alertMessage\": sprintf(\"%v: %v has: %v as hostPath volume\", [wl.kind, wl.metadata.name, volume.name]),\n\t\"packagename\": \"armo_builtins\",\n\t\"alertScore\": 7,\n\t\"failedPaths\": [result],\n\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nisRWMount(mount, begginingOfPath, i, k) = path {\n not mount.readOnly == true\n not mount.readOnly == false\n path = \"\"\n}\nisRWMount(mount, begginingOfPath, i, k) = path {\n mount.readOnly == false\n path = sprintf(\"%vcontainers[%v].volumeMounts[%v].readOnly\", [begginingOfPath, format_int(i, 10), format_int(k, 10)])\n} ","resourceEnumerator":"","ruleLanguage":"Rego","match":[{"apiGroups":["*"],"apiVersions":["*"],"resources":["Deployment","ReplicaSet","DaemonSet","StatefulSet","Job","CronJob","Pod"]}],"ruleDependencies":[{"packageName":"cautils"},{"packageName":"kubernetes.api.client"}],"configInputs":null,"controlConfigInputs":null,"description":"determines if any workload contains a hostPath volume with rw permissions","remediation":"Set the readOnly field of the mount to true","ruleQuery":""}],"rulesIDs":[""],"baseScore":6},{"guid":"","name":"Non-root containers","attributes":{"armoBuiltin":true},"id":"C-0013","controlID":"C-0013","creationTime":"","description":"Potential attackers may gain access to a container and leverage its existing privileges to conduct an attack. Therefore, it is not recommended to deploy containers with root privileges unless it is absolutely necessary. This contol identifies all the Pods running as root or can escalate to root.","remediation":"If your application does not need root privileges, make sure to define the runAsUser or runAsGroup under the PodSecurityContext and use user ID 1000 or higher. Do not turn on allowPrivlegeEscalation bit and make sure runAsNonRoot is true.","rules":[{"guid":"","name":"non-root-containers","attributes":{"armoBuiltin":true},"creationTime":"","rule":"package armo_builtins\n\n\n# Fails if pod has container configured to run as root\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tbegginingOfPath := \"spec.\"\n result := isRootContainer(container, i, begginingOfPath)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in pod: %v may run as root\", [container.name, pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [result],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if pod has container configured to run as root\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n\tcontainer := pod.spec.containers[i]\n\tbegginingOfPath =\"spec.\"\n result := isRootPod(pod, container, i, begginingOfPath)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"container: %v in pod: %v may run as root\", [container.name, pod.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [result],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n\n\n# Fails if workload has container configured to run as root\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\tbegginingOfPath := \"spec.template.spec.\"\n result := isRootContainer(container, i, begginingOfPath)\n msga := {\n\t\t\"alertMessage\": sprintf(\"container :%v in %v: %v may run as root\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [result],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if workload has container configured to run as root\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\tbegginingOfPath := \"spec.template.spec.\"\n result := isRootPod(wl.spec.template, container, i, begginingOfPath)\n msga := {\n\t\t\"alertMessage\": sprintf(\"container :%v in %v: %v may run as root\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [result],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n# Fails if cronjob has a container configured to run as root\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tbegginingOfPath := \"spec.jobTemplate.spec.template.spec.\"\n\tresult := isRootContainer(container, i, begginingOfPath)\n msga := {\n\t\t\"alertMessage\": sprintf(\"container :%v in %v: %v may run as root\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [result],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n\n# Fails if workload has container configured to run as root\ndeny[msga] {\n \twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tbegginingOfPath := \"spec.jobTemplate.spec.template.spec.\"\n result := isRootPod(wl.spec.jobTemplate.spec.template, container, i, begginingOfPath)\n msga := {\n\t\t\"alertMessage\": sprintf(\"container :%v in %v: %v may run as root\", [container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [result],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\nisRootPod(pod, container, i, begginingOfPath) = path {\n\tpath = \"\"\n not container.securityContext.runAsUser\n pod.spec.securityContext.runAsUser == 0\n\tpath = \"spec.securityContext.runAsUser\"\n}\n\nisRootPod(pod, container, i, begginingOfPath) = path {\n\tpath = \"\"\n not container.securityContext.runAsUser\n\tnot container.securityContext.runAsGroup\n\tnot container.securityContext.runAsNonRoot\n not pod.spec.securityContext.runAsUser\n\tnot pod.spec.securityContext.runAsGroup\n pod.spec.securityContext.runAsNonRoot == false\n\tpath = \"spec.securityContext.runAsNonRoot\"\n}\n\nisRootPod(pod, container, i, begginingOfPath) = path {\n\tpath = \"\"\n not container.securityContext.runAsGroup\n pod.spec.securityContext.runAsGroup == 0\n\tpath = sprintf(\"%vsecurityContext.runAsGroup\", [begginingOfPath])\n}\n\nisRootPod(pod, container, i, begginingOfPath)= path {\n\tpath = \"\"\n\tnot pod.spec.securityContext.runAsGroup\n\tnot pod.spec.securityContext.runAsUser\n \tcontainer.securityContext.runAsNonRoot == false\n\tpath = sprintf(\"%vcontainers[%v].securityContext.runAsNonRoot\", [begginingOfPath, format_int(i, 10)])\n}\n\nisRootContainer(container, i, begginingOfPath) = path {\n\tpath = \"\"\n container.securityContext.runAsUser == 0\n\tpath = sprintf(\"%vcontainers[%v].securityContext.runAsUser\", [begginingOfPath, format_int(i, 10)])\n}\n\nisRootContainer(container, i, begginingOfPath) = path {\n\tpath = \"\"\n container.securityContext.runAsGroup == 0\n\t path = sprintf(\"%vcontainers[%v].securityContext.runAsGroup\", [begginingOfPath, format_int(i, 10)])\n}","resourceEnumerator":"","ruleLanguage":"Rego","match":[{"apiGroups":["*"],"apiVersions":["*"],"resources":["Deployment","ReplicaSet","DaemonSet","StatefulSet","Job","Pod","CronJob"]}],"ruleDependencies":[],"configInputs":null,"controlConfigInputs":null,"description":"fails if container can run as root","remediation":"Make sure that the user/group in the securityContext of pod/container is set to an id less than 1000, or the runAsNonRoot flag is set to true. Also make sure that the allowPrivilegeEscalation field is set to false","ruleQuery":"armo_builtins"}],"rulesIDs":[""],"baseScore":6}]}`
+import (
+ "testing"
-// func TestReportV2ToV1(t *testing.T) {
-// opaSessionObj := cautils.OPASessionObj{}
-// opaSessionObj.AllResources = map[string]workloadinterface.IMetadata{}
-// opaSessionObj.PostureReport
-// // opaSessionObj.Exceptions
-// }
+ "github.com/kubescape/kubescape/v2/core/cautils"
+ "github.com/kubescape/kubescape/v2/core/pkg/resultshandling/printer"
+ "github.com/kubescape/opa-utils/reporthandling/results/v1/reportsummary"
+ reporthandlingv2 "github.com/kubescape/opa-utils/reporthandling/v2"
+)
+
+type DummyReporter struct{}
+
+func (dr *DummyReporter) Submit(opaSessionObj *cautils.OPASessionObj) error { return nil }
+func (dr *DummyReporter) SetCustomerGUID(customerGUID string) {}
+func (dr *DummyReporter) SetClusterName(clusterName string) {}
+func (dr *DummyReporter) DisplayReportURL() {}
+func (dr *DummyReporter) GetURL() string { return "" }
+
+type SpyPrinter struct {
+ ActionPrintCalls int
+ ScoreCalls int
+}
+
+func (sp *SpyPrinter) SetWriter(outputFile string) {}
+func (sp *SpyPrinter) ActionPrint(opaSessionObj *cautils.OPASessionObj) {
+ sp.ActionPrintCalls += 1
+}
+func (sp *SpyPrinter) Score(score float32) {
+ sp.ScoreCalls += 1
+}
+
+func TestResultsHandlerHandleResultsPrintsResultsToUI(t *testing.T) {
+ reporter := &DummyReporter{}
+ printers := []printer.IPrinter{}
+ uiPrinter := &SpyPrinter{}
+ fakeScanData := &cautils.OPASessionObj{
+ Report: &reporthandlingv2.PostureReport{
+ SummaryDetails: reportsummary.SummaryDetails{
+ Score: 0.0,
+ },
+ },
+ }
+
+ rh := NewResultsHandler(reporter, printers, uiPrinter)
+ rh.SetData(fakeScanData)
+
+ rh.HandleResults()
+
+ want := 1
+ got := uiPrinter.ActionPrintCalls
+ if got != want {
+ t.Errorf("UI Printer was not called to print. Got calls: %d, want calls: %d", got, want)
+ }
+}
+
+func TestResultsHandlerHandleResultsPrintsScoreToUI(t *testing.T) {
+ reporter := &DummyReporter{}
+ printers := []printer.IPrinter{}
+ uiPrinter := &SpyPrinter{}
+ fakeScanData := &cautils.OPASessionObj{
+ Report: &reporthandlingv2.PostureReport{
+ SummaryDetails: reportsummary.SummaryDetails{
+ Score: 0.0,
+ },
+ },
+ }
+
+ rh := NewResultsHandler(reporter, printers, uiPrinter)
+ rh.SetData(fakeScanData)
+
+ rh.HandleResults()
+
+ want := 1
+ got := uiPrinter.ScoreCalls
+ if got != want {
+ t.Errorf("UI Printer was not called to print. Got calls: %d, want calls: %d", got, want)
+ }
+}
diff --git a/go.mod b/go.mod
index 838770d7..865b307c 100644
--- a/go.mod
+++ b/go.mod
@@ -1,6 +1,6 @@
module github.com/kubescape/kubescape/v2
-go 1.18
+go 1.19
require (
cloud.google.com/go/containeranalysis v0.4.0
@@ -11,13 +11,13 @@ require (
github.com/enescakir/emoji v1.0.0
github.com/fatih/color v1.13.0
github.com/francoispqt/gojay v1.2.13
- github.com/go-git/go-git/v5 v5.4.2
+ github.com/go-git/go-git/v5 v5.5.2
github.com/google/uuid v1.3.0
github.com/johnfercher/maroto v0.37.0
- github.com/kubescape/go-git-url v0.0.17
+ github.com/kubescape/go-git-url v0.0.21
github.com/kubescape/go-logger v0.0.6
- github.com/kubescape/k8s-interface v0.0.89
- github.com/kubescape/opa-utils v0.0.204
+ github.com/kubescape/k8s-interface v0.0.94-0.20221228202834-4b64f2440950
+ github.com/kubescape/opa-utils v0.0.223
github.com/kubescape/rbac-utils v0.0.19
github.com/libgit2/git2go/v33 v33.0.9
github.com/mattn/go-isatty v0.0.14
@@ -28,7 +28,7 @@ require (
github.com/spf13/cobra v1.6.1
github.com/stretchr/testify v1.8.0
github.com/whilp/git-urls v1.0.0
- golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3
+ golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4
google.golang.org/api v0.85.0
google.golang.org/genproto v0.0.0-20220708155623-50e5f4832e73
google.golang.org/protobuf v1.28.1
@@ -51,26 +51,25 @@ require (
cloud.google.com/go/compute v1.7.0 // indirect
cloud.google.com/go/container v1.2.0 // indirect
cloud.google.com/go/grafeas v0.2.0 // indirect
- github.com/Azure/azure-sdk-for-go v66.0.0+incompatible // indirect
+ github.com/Azure/azure-sdk-for-go/sdk/azcore v1.1.0 // indirect
+ github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.2.0 // indirect
+ github.com/Azure/azure-sdk-for-go/sdk/internal v1.0.0 // indirect
+ github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/authorization/armauthorization/v2 v2.0.0 // indirect
+ github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v2 v2.2.0 // indirect
github.com/Azure/go-autorest v14.2.0+incompatible // indirect
github.com/Azure/go-autorest/autorest v0.11.27 // indirect
github.com/Azure/go-autorest/autorest/adal v0.9.20 // indirect
- github.com/Azure/go-autorest/autorest/azure/auth v0.5.11 // indirect
- github.com/Azure/go-autorest/autorest/azure/cli v0.4.5 // indirect
github.com/Azure/go-autorest/autorest/date v0.3.0 // indirect
- github.com/Azure/go-autorest/autorest/to v0.4.0 // indirect
- github.com/Azure/go-autorest/autorest/validation v0.3.1 // indirect
github.com/Azure/go-autorest/logger v0.2.1 // indirect
github.com/Azure/go-autorest/tracing v0.6.0 // indirect
+ github.com/AzureAD/microsoft-authentication-library-for-go v0.7.0 // indirect
github.com/BurntSushi/toml v1.0.0 // indirect
github.com/Masterminds/goutils v1.1.1 // indirect
github.com/Masterminds/semver/v3 v3.1.1 // indirect
github.com/Masterminds/sprig/v3 v3.2.2 // indirect
- github.com/Microsoft/go-winio v0.5.1 // indirect
+ github.com/Microsoft/go-winio v0.5.2 // indirect
github.com/OneOfOne/xxhash v1.2.8 // indirect
- github.com/ProtonMail/go-crypto v0.0.0-20210428141323-04723f9f07d7 // indirect
- github.com/PuerkitoBio/purell v1.1.1 // indirect
- github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 // indirect
+ github.com/ProtonMail/go-crypto v0.0.0-20221026131551-cf6655e29de4 // indirect
github.com/a8m/envsubst v1.3.0 // indirect
github.com/acomagu/bufpipe v1.0.3 // indirect
github.com/agnivade/levenshtein v1.1.1 // indirect
@@ -89,6 +88,7 @@ require (
github.com/aws/aws-sdk-go-v2/service/sts v1.16.9 // indirect
github.com/aws/smithy-go v1.12.0 // indirect
github.com/boombuler/barcode v1.0.1 // indirect
+ github.com/cloudflare/circl v1.1.0 // indirect
github.com/coreos/go-oidc v2.2.1+incompatible // indirect
github.com/cyphar/filepath-securejoin v0.2.3 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
@@ -97,34 +97,34 @@ require (
github.com/docker/go-connections v0.4.0 // indirect
github.com/docker/go-units v0.4.0 // indirect
github.com/elliotchance/orderedmap v1.5.0 // indirect
- github.com/emicklei/go-restful/v3 v3.8.0 // indirect
- github.com/emirpasic/gods v1.12.0 // indirect
+ github.com/emicklei/go-restful/v3 v3.9.0 // indirect
+ github.com/emirpasic/gods v1.18.1 // indirect
github.com/evanphx/json-patch v4.12.0+incompatible // indirect
github.com/ghodss/yaml v1.0.0 // indirect
github.com/go-errors/errors v1.0.1 // indirect
github.com/go-git/gcfg v1.5.0 // indirect
- github.com/go-git/go-billy/v5 v5.3.1 // indirect
+ github.com/go-git/go-billy/v5 v5.4.0 // indirect
github.com/go-gota/gota v0.12.0 // indirect
github.com/go-logr/logr v1.2.3 // indirect
github.com/go-openapi/jsonpointer v0.19.5 // indirect
- github.com/go-openapi/jsonreference v0.19.5 // indirect
+ github.com/go-openapi/jsonreference v0.20.0 // indirect
github.com/go-openapi/swag v0.19.14 // indirect
github.com/gobwas/glob v0.2.3 // indirect
github.com/goccy/go-json v0.9.11 // indirect
github.com/goccy/go-yaml v1.9.6 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
- github.com/golang-jwt/jwt/v4 v4.2.0 // indirect
+ github.com/golang-jwt/jwt/v4 v4.4.2 // indirect
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/golang/protobuf v1.5.2 // indirect
github.com/google/btree v1.0.1 // indirect
github.com/google/gnostic v0.5.7-v3refs // indirect
- github.com/google/go-cmp v0.5.8 // indirect
+ github.com/google/go-cmp v0.5.9 // indirect
github.com/google/gofuzz v1.2.0 // indirect
github.com/googleapis/enterprise-certificate-proxy v0.1.0 // indirect
github.com/googleapis/gax-go/v2 v2.4.0 // indirect
github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7 // indirect
github.com/huandu/xstrings v1.3.2 // indirect
- github.com/imdario/mergo v0.3.12 // indirect
+ github.com/imdario/mergo v0.3.13 // indirect
github.com/inconshreveable/mousetrap v1.0.1 // indirect
github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 // indirect
github.com/jinzhu/copier v0.3.5 // indirect
@@ -132,14 +132,13 @@ require (
github.com/josharian/intern v1.0.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect
github.com/jung-kurt/gofpdf v1.16.2 // indirect
- github.com/kevinburke/ssh_config v0.0.0-20201106050909-4977a11b4351 // indirect
- github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect
+ github.com/kevinburke/ssh_config v1.2.0 // indirect
+ github.com/kylelemons/godebug v1.1.0 // indirect
github.com/magiconair/properties v1.8.6 // indirect
github.com/mailru/easyjson v0.7.6 // indirect
github.com/mattn/go-colorable v0.1.12 // indirect
github.com/mattn/go-runewidth v0.0.9 // indirect
github.com/mitchellh/copystructure v1.2.0 // indirect
- github.com/mitchellh/go-homedir v1.1.0 // indirect
github.com/mitchellh/mapstructure v1.5.0 // indirect
github.com/mitchellh/reflectwalk v1.0.2 // indirect
github.com/moby/spdystream v0.2.0 // indirect
@@ -149,7 +148,8 @@ require (
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
github.com/opencontainers/go-digest v1.0.0 // indirect
github.com/opencontainers/image-spec v1.0.3-0.20211202183452-c5a74bcca799 // indirect
- github.com/peterbourgon/diskv v2.0.1+incompatible // indirect
+ github.com/pjbgf/sha1cd v0.2.3 // indirect
+ github.com/pkg/browser v0.0.0-20210115035449-ce105d075bb4 // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/pquerna/cachecontrol v0.1.0 // indirect
@@ -157,10 +157,11 @@ require (
github.com/ruudk/golang-pdf417 v0.0.0-20181029194003-1af4ab5afa58 // indirect
github.com/sergi/go-diff v1.1.0 // indirect
github.com/shopspring/decimal v1.2.0 // indirect
+ github.com/skeema/knownhosts v1.1.0 // indirect
github.com/spf13/cast v1.4.1 // indirect
github.com/spf13/pflag v1.0.5 // indirect
github.com/tchap/go-patricia/v2 v2.3.1 // indirect
- github.com/xanzy/ssh-agent v0.3.0 // indirect
+ github.com/xanzy/ssh-agent v0.3.3 // indirect
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
github.com/xeipuuv/gojsonschema v1.2.0 // indirect
@@ -171,12 +172,12 @@ require (
go.uber.org/atomic v1.7.0 // indirect
go.uber.org/multierr v1.6.0 // indirect
go.uber.org/zap v1.22.0 // indirect
- golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e // indirect
- golang.org/x/net v0.0.0-20220909164309-bea034e7d591 // indirect
+ golang.org/x/crypto v0.3.0 // indirect
+ golang.org/x/net v0.3.1-0.20221206200815-1e63c2f08a10 // indirect
golang.org/x/oauth2 v0.0.0-20220630143837-2104d58473e0 // indirect
- golang.org/x/sys v0.0.0-20220829200755-d48e67d00261 // indirect
- golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 // indirect
- golang.org/x/text v0.3.7 // indirect
+ golang.org/x/sys v0.3.0 // indirect
+ golang.org/x/term v0.3.0 // indirect
+ golang.org/x/text v0.5.0 // indirect
golang.org/x/time v0.0.0-20220210224613-90d013bbcef8 // indirect
golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f // indirect
gonum.org/v1/gonum v0.9.1 // indirect
@@ -188,7 +189,7 @@ require (
gopkg.in/yaml.v2 v2.4.0 // indirect
k8s.io/apiextensions-apiserver v0.24.2 // indirect
k8s.io/klog/v2 v2.80.1 // indirect
- k8s.io/kube-openapi v0.0.0-20220803162953-67bda5d908f1 // indirect
+ k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280 // indirect
sigs.k8s.io/controller-runtime v0.12.3 // indirect
sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 // indirect
sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect
diff --git a/go.sum b/go.sum
index 90b2b415..6fab841f 100644
--- a/go.sum
+++ b/go.sum
@@ -72,36 +72,37 @@ dmitri.shuralyov.com/service/change v0.0.0-20181023043359-a85b471d5412/go.mod h1
dmitri.shuralyov.com/state v0.0.0-20180228185332-28bcc343414c/go.mod h1:0PRwlb0D6DFvNNtx+9ybjezNCa8XF0xaYcETyp6rHWU=
gioui.org v0.0.0-20210308172011-57750fc8a0a6/go.mod h1:RSH6KIUZ0p2xy5zHDxgAM4zumjgTw83q2ge/PI+yyw8=
git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg=
-github.com/Azure/azure-sdk-for-go v66.0.0+incompatible h1:bmmC38SlE8/E81nNADlgmVGurPWMHDX2YNXVQMrBpEE=
-github.com/Azure/azure-sdk-for-go v66.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
+github.com/Azure/azure-sdk-for-go/sdk/azcore v1.1.0 h1:Ut0ZGdOwJDw0npYEg+TLlPls3Pq6JiZaP2/aGKir7Zw=
+github.com/Azure/azure-sdk-for-go/sdk/azcore v1.1.0/go.mod h1:uGG2W01BaETf0Ozp+QxxKJdMBNRWPdstHG0Fmdwn1/U=
+github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.2.0 h1:t/W5MYAuQy81cvM8VUNfRLzhtKpXhVUAN7Cd7KVbTyc=
+github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.2.0/go.mod h1:NBanQUfSWiWn3QEpWDTCU0IjBECKOYvl2R8xdRtMtiM=
+github.com/Azure/azure-sdk-for-go/sdk/internal v1.0.0 h1:jp0dGvZ7ZK0mgqnTSClMxa5xuRL7NZgHameVYF6BurY=
+github.com/Azure/azure-sdk-for-go/sdk/internal v1.0.0/go.mod h1:eWRD7oawr1Mu1sLCawqVc0CUiF43ia3qQMxLscsKQ9w=
+github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/authorization/armauthorization/v2 v2.0.0 h1:WJd2y/3vp3sgG1u1KfDaEyGiM9oC11cBa9rbmsSv5rQ=
+github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/authorization/armauthorization/v2 v2.0.0/go.mod h1:XlGHa0e9Mg7RNOshDEuc0HptPdtN/SI0HCu+02rdnOA=
+github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v2 v2.2.0 h1:3L+gX5ssCABAToH0VQ64/oNz7rr+ShW+2sB+sonzIlY=
+github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v2 v2.2.0/go.mod h1:4gUds0dEPFIld6DwHfbo0cLBljyIyI5E5ciPb5MLi3Q=
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs=
github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
github.com/Azure/go-autorest/autorest v0.11.18/go.mod h1:dSiJPy22c3u0OtOKDNttNgqpNFY/GeWa7GH/Pz56QRA=
-github.com/Azure/go-autorest/autorest v0.11.24/go.mod h1:G6kyRlFnTuSbEYkQGawPfsCswgme4iYf6rfSKUDzbCc=
github.com/Azure/go-autorest/autorest v0.11.27 h1:F3R3q42aWytozkV8ihzcgMO4OA4cuqr3bNlsEuF6//A=
github.com/Azure/go-autorest/autorest v0.11.27/go.mod h1:7l8ybrIdUmGqZMTD0sRtAr8NvbHjfofbf8RSP2q7w7U=
github.com/Azure/go-autorest/autorest/adal v0.9.13/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M=
github.com/Azure/go-autorest/autorest/adal v0.9.18/go.mod h1:XVVeme+LZwABT8K5Lc3hA4nAe8LDBVle26gTrguhhPQ=
github.com/Azure/go-autorest/autorest/adal v0.9.20 h1:gJ3E98kMpFB1MFqQCvA1yFab8vthOeD4VlFRQULxahg=
github.com/Azure/go-autorest/autorest/adal v0.9.20/go.mod h1:XVVeme+LZwABT8K5Lc3hA4nAe8LDBVle26gTrguhhPQ=
-github.com/Azure/go-autorest/autorest/azure/auth v0.5.11 h1:P6bYXFoao05z5uhOQzbC3Qd8JqF3jUoocoTeIxkp2cA=
-github.com/Azure/go-autorest/autorest/azure/auth v0.5.11/go.mod h1:84w/uV8E37feW2NCJ08uT9VBfjfUHpgLVnG2InYD6cg=
-github.com/Azure/go-autorest/autorest/azure/cli v0.4.5 h1:0W/yGmFdTIT77fvdlGZ0LMISoLHFJ7Tx4U0yeB+uFs4=
-github.com/Azure/go-autorest/autorest/azure/cli v0.4.5/go.mod h1:ADQAXrkgm7acgWVUNamOgh8YNrv4p27l3Wc55oVfpzg=
github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw=
github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74=
github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k=
github.com/Azure/go-autorest/autorest/mocks v0.4.2 h1:PGN4EDXnuQbojHbU0UWoNvmu9AGVwYHG9/fkDYhtAfw=
github.com/Azure/go-autorest/autorest/mocks v0.4.2/go.mod h1:Vy7OitM9Kei0i1Oj+LvyAWMXJHeKH1MVlzFugfVrmyU=
-github.com/Azure/go-autorest/autorest/to v0.4.0 h1:oXVqrxakqqV1UZdSazDOPOLvOIz+XA683u8EctwboHk=
-github.com/Azure/go-autorest/autorest/to v0.4.0/go.mod h1:fE8iZBn7LQR7zH/9XU2NcPR4o9jEImooCeWJcYV/zLE=
-github.com/Azure/go-autorest/autorest/validation v0.3.1 h1:AgyqjAd94fwNAoTjl/WQXg4VvFeRFpO+UhNyRXqF1ac=
-github.com/Azure/go-autorest/autorest/validation v0.3.1/go.mod h1:yhLgjC0Wda5DYXl6JAsWyUe4KVNffhoDhG0zVzUMo3E=
github.com/Azure/go-autorest/logger v0.2.1 h1:IG7i4p/mDa2Ce4TRyAO8IHnVhAVF3RFU+ZtXWSmf4Tg=
github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8=
github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo=
github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU=
+github.com/AzureAD/microsoft-authentication-library-for-go v0.7.0 h1:VgSJlZH5u0k2qxSpqyghcFQKmvYckj46uymKK5XzkBM=
+github.com/AzureAD/microsoft-authentication-library-for-go v0.7.0/go.mod h1:BDJ5qMFKx9DugEg3+uQSDCdbYPr5s9vBTrL9P8TpqOU=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/BurntSushi/toml v1.0.0 h1:dtDWrepsVPfW9H/4y7dDgFc2MBUSeJhlaDtK13CxFlU=
github.com/BurntSushi/toml v1.0.0/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
@@ -112,20 +113,16 @@ github.com/Masterminds/semver/v3 v3.1.1 h1:hLg3sBzpNErnxhQtUy/mmLR2I9foDujNK030I
github.com/Masterminds/semver/v3 v3.1.1/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs=
github.com/Masterminds/sprig/v3 v3.2.2 h1:17jRggJu518dr3QaafizSXOjKYp94wKfABxUmyxvxX8=
github.com/Masterminds/sprig/v3 v3.2.2/go.mod h1:UoaO7Yp8KlPnJIYWTFkMaqPUYKTfGFPhxNuwnnxkKlk=
-github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA=
-github.com/Microsoft/go-winio v0.4.16/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0=
-github.com/Microsoft/go-winio v0.5.1 h1:aPJp2QD7OOrhO5tQXqQoGSJc+DjDtWTGLOmNyAm6FgY=
-github.com/Microsoft/go-winio v0.5.1/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84=
+github.com/Microsoft/go-winio v0.5.2 h1:a9IhgEQBCUEk6QCdml9CiJGhAws+YwffDHEMp1VMrpA=
+github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY=
github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c=
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
github.com/OneOfOne/xxhash v1.2.8 h1:31czK/TI9sNkxIKfaUfGlU47BAxQ0ztGgd9vPyqimf8=
github.com/OneOfOne/xxhash v1.2.8/go.mod h1:eZbhyaAYD41SGSSsnmcpxVoRiQ/MPUTjUdIIOT9Um7Q=
-github.com/ProtonMail/go-crypto v0.0.0-20210428141323-04723f9f07d7 h1:YoJbenK9C67SkzkDfmQuVln04ygHj3vjZfd9FL+GmQQ=
-github.com/ProtonMail/go-crypto v0.0.0-20210428141323-04723f9f07d7/go.mod h1:z4/9nQmJSSwwds7ejkxaJwO37dru3geImFUdJlaLzQo=
-github.com/PuerkitoBio/purell v1.1.1 h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tNFfI=
+github.com/ProtonMail/go-crypto v0.0.0-20221026131551-cf6655e29de4 h1:ra2OtmuW0AE5csawV4YXMNGNQQXvLRps3z2Z59OPO+I=
+github.com/ProtonMail/go-crypto v0.0.0-20221026131551-cf6655e29de4/go.mod h1:UBYPn8k0D56RtnR8RFQMjmh4KrZzWJ5o7Z9SYjossQ8=
github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
-github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M=
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
github.com/a8m/envsubst v1.3.0 h1:GmXKmVssap0YtlU3E230W98RWtWCyIZzjtf1apWWyAg=
github.com/a8m/envsubst v1.3.0/go.mod h1:MVUTQNGQ3tsjOOtKCNd+fl8RzhsXcDvvAEzkhGtlsbY=
@@ -134,7 +131,7 @@ github.com/acomagu/bufpipe v1.0.3/go.mod h1:mxdxdup/WdsKVreO5GpW4+M/1CE2sMG4jeGJ
github.com/agnivade/levenshtein v1.1.1 h1:QY8M92nrzkmr798gCo3kmMyqXFzdQVpxLlGPRBij0P8=
github.com/agnivade/levenshtein v1.1.1/go.mod h1:veldBMzWxcCG2ZvUTKD2kJNRdCk5hVbJomOvKkmgYbo=
github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw=
-github.com/alecthomas/assert/v2 v2.0.3 h1:WKqJODfOiQG0nEJKFKzDIG3E29CN2/4zR9XGJzKIkbg=
+github.com/alecthomas/assert/v2 v2.2.0 h1:f6L/b7KE2bfA+9O4FL3CM/xJccDEwPVYd5fALBiuwvw=
github.com/alecthomas/participle/v2 v2.0.0-beta.5 h1:y6dsSYVb1G5eK6mgmy+BgI3Mw35a3WghArZ/Hbebrjo=
github.com/alecthomas/participle/v2 v2.0.0-beta.5/go.mod h1:RC764t6n4L8D8ITAJv0qdokritYSNR3wV5cVwmIEaMM=
github.com/alecthomas/repr v0.1.1 h1:87P60cSmareLAxMc4Hro0r2RBY4ROm0dYwkJNpS4pPs=
@@ -143,8 +140,9 @@ github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuy
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho=
-github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239 h1:kFOfPq6dUM1hTo4JG6LR5AXSUEsOjtdm0kw0FtQtMJA=
github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c=
+github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be h1:9AeTilPcZAjCFIImctFaOjnTIavg87rW78vTPkQqLI8=
+github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be/go.mod h1:ySMOLuWl6zY27l47sB3qLNK6tF2fkHG55UZxx8oIVo4=
github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
github.com/antlr/antlr4/runtime/Go/antlr v0.0.0-20210826220005-b48c857c3a0e/go.mod h1:F7bn7fEU90QkQ3tnmaTx3LTKLEDqnwWODIYppRQ5hnY=
github.com/apparentlymart/go-textseg/v13 v13.0.0/go.mod h1:ZK2fH7c4NqDTLtiYLvIkEghdlcqw7yxLeM89kiTRPUo=
@@ -206,6 +204,7 @@ github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBT
github.com/briandowns/spinner v1.18.1 h1:yhQmQtM1zsqFsouh09Bk/jCjd50pC3EOGsh28gLVvwY=
github.com/briandowns/spinner v1.18.1/go.mod h1:mQak9GHqbspjC/5iUx3qMlIho8xBS/ppAL/hX5SmPJU=
github.com/buger/jsonparser v0.0.0-20181115193947-bf1c66bbce23/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s=
+github.com/bwesterb/go-ristretto v1.2.0/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0=
github.com/bytecodealliance/wasmtime-go v1.0.0 h1:9u9gqaUiaJeN5IoD1L7egD8atOnTGyJcNp8BhkL9cUU=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/certifi/gocertifi v0.0.0-20191021191039-0944d244cd40/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA=
@@ -219,6 +218,8 @@ github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWR
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
+github.com/cloudflare/circl v1.1.0 h1:bZgT/A+cikZnKIwn7xL2OBj012Bmvho/o6RpRvv3GKY=
+github.com/cloudflare/circl v1.1.0/go.mod h1:prBCrKB9DV4poKZY1l9zBXg2QJY7mvgRvtMxxK7fi4I=
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
@@ -259,6 +260,7 @@ github.com/dgryski/trifles v0.0.0-20200323201526-dd97f9abfb48 h1:fRzb/w+pyskVMQ+
github.com/dgryski/trifles v0.0.0-20200323201526-dd97f9abfb48/go.mod h1:if7Fbed8SFyPtHLHbg49SI7NAdJiC5WIA09pe59rfAA=
github.com/dimchansky/utfbom v1.1.1 h1:vV6w1AhK4VMnhBno/TPVCoK9U/LP0PkLCS9tbxHdi/U=
github.com/dimchansky/utfbom v1.1.1/go.mod h1:SxdoEBH5qIqFocHMyGOXVAybYJdr71b1Q/j0mACtrfE=
+github.com/dnaeon/go-vcr v1.1.0 h1:ReYa/UBrRyQdant9B4fNHGoCNKw6qh6P0fsdGmZpR7c=
github.com/docker/docker v20.10.17+incompatible h1:JYCuMrWaVNophQTOrMMoSwudOVEfcegoZZrleKc1xwE=
github.com/docker/docker v20.10.17+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=
@@ -274,10 +276,10 @@ github.com/elliotchance/orderedmap v1.5.0 h1:1IsExUsjv5XNBD3ZdC7jkAAqLWOOKdbPTmk
github.com/elliotchance/orderedmap v1.5.0/go.mod h1:wsDwEaX5jEoyhbs7x93zk2H/qv0zwuhg4inXhDkYqys=
github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
-github.com/emicklei/go-restful/v3 v3.8.0 h1:eCZ8ulSerjdAiaNpF7GxXIE7ZCMo1moN1qX+S609eVw=
-github.com/emicklei/go-restful/v3 v3.8.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
-github.com/emirpasic/gods v1.12.0 h1:QAUIPSaCu4G+POclxeqb3F+WPpdKqFGlw36+yOzGlrg=
-github.com/emirpasic/gods v1.12.0/go.mod h1:YfzfFFoVP/catgzJb4IKIqXjX78Ha8FMSDh3ymbK86o=
+github.com/emicklei/go-restful/v3 v3.9.0 h1:XwGDlfxEnQZzuopoqxwSEllNcCOM9DhhFyhFIIGKwxE=
+github.com/emicklei/go-restful/v3 v3.9.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
+github.com/emirpasic/gods v1.18.1 h1:FXtiHYKDGKCW2KzwZKx0iC0PQmdlorYgdFG9jPXJ1Bc=
+github.com/emirpasic/gods v1.18.1/go.mod h1:8tpGGwCnJ5H4r6BWwaV6OrWmMoPhUl5jm/FMNAnJvWQ=
github.com/enescakir/emoji v1.0.0 h1:W+HsNql8swfCQFtioDGDHCHri8nudlK1n5p2rHCJoog=
github.com/enescakir/emoji v1.0.0/go.mod h1:Bt1EKuLnKDTYpLALApstIkAjdDrS/8IAgTkKp+WKFD0=
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
@@ -315,8 +317,8 @@ github.com/getsentry/raven-go v0.2.0/go.mod h1:KungGk8q33+aIAZUIVWZDr2OfAEBsO49P
github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0=
-github.com/gliderlabs/ssh v0.2.2 h1:6zsha5zo/TWhRhwqCD3+EarCAgZ2yN28ipRnGPnwkI0=
-github.com/gliderlabs/ssh v0.2.2/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0=
+github.com/gliderlabs/ssh v0.3.5 h1:OcaySEmAQJgyYcArR+gGGTHCyE7nvhEMTlYY+Dp8CpY=
+github.com/gliderlabs/ssh v0.3.5/go.mod h1:8XB4KraRrX39qHhT6yxPsHedjA08I/uBVwj4xC+/+z4=
github.com/go-errors/errors v1.0.1 h1:LUHzmkK3GUKUrL/1gfBUxAHzcev3apQlezX/+O7ma6w=
github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q=
github.com/go-fonts/dejavu v0.1.0/go.mod h1:4Wt4I4OU2Nq9asgDCteaAaWZOV24E+0/Pwo0gppep4g=
@@ -325,13 +327,13 @@ github.com/go-fonts/liberation v0.1.1/go.mod h1:K6qoJYypsmfVjWg8KOVDQhLc8UDgIK2H
github.com/go-fonts/stix v0.1.0/go.mod h1:w/c1f0ldAUlJmLBvlbkvVXLAD+tAMqobIIQpmnUIzUY=
github.com/go-git/gcfg v1.5.0 h1:Q5ViNfGF8zFgyJWPqYwA7qGFoMTEiBmdlkcfRmpIMa4=
github.com/go-git/gcfg v1.5.0/go.mod h1:5m20vg6GwYabIxaOonVkTdrILxQMpEShl1xiMF4ua+E=
-github.com/go-git/go-billy/v5 v5.2.0/go.mod h1:pmpqyWchKfYfrkb/UVH4otLvyi/5gJlGI4Hb3ZqZ3W0=
-github.com/go-git/go-billy/v5 v5.3.1 h1:CPiOUAzKtMRvolEKw+bG1PLRpT7D3LIs3/3ey4Aiu34=
github.com/go-git/go-billy/v5 v5.3.1/go.mod h1:pmpqyWchKfYfrkb/UVH4otLvyi/5gJlGI4Hb3ZqZ3W0=
-github.com/go-git/go-git-fixtures/v4 v4.2.1 h1:n9gGL1Ct/yIw+nfsfr8s4+sbhT+Ncu2SubfXjIWgci8=
-github.com/go-git/go-git-fixtures/v4 v4.2.1/go.mod h1:K8zd3kDUAykwTdDCr+I0per6Y6vMiRR/nnVTBtavnB0=
-github.com/go-git/go-git/v5 v5.4.2 h1:BXyZu9t0VkbiHtqrsvdq39UDhGJTl1h55VW6CSC4aY4=
-github.com/go-git/go-git/v5 v5.4.2/go.mod h1:gQ1kArt6d+n+BGd+/B/I74HwRTLhth2+zti4ihgckDc=
+github.com/go-git/go-billy/v5 v5.4.0 h1:Vaw7LaSTRJOUric7pe4vnzBSgyuf2KrLsu2Y4ZpQBDE=
+github.com/go-git/go-billy/v5 v5.4.0/go.mod h1:vjbugF6Fz7JIflbVpl1hJsGjSHNltrSw45YK/ukIvQg=
+github.com/go-git/go-git-fixtures/v4 v4.3.1 h1:y5z6dd3qi8Hl+stezc8p3JxDkoTRqMAlKnXHuzrfjTQ=
+github.com/go-git/go-git-fixtures/v4 v4.3.1/go.mod h1:8LHG1a3SRW71ettAD/jW13h8c6AqjVSeL11RAdgaqpo=
+github.com/go-git/go-git/v5 v5.5.2 h1:v8lgZa5k9ylUw+OR/roJHTxR4QItsNFI5nKtAXFuynw=
+github.com/go-git/go-git/v5 v5.5.2/go.mod h1:BE5hUJ5yaV2YMxhmaP4l6RBQ08kMxKSPD4BlxtH7OjI=
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
@@ -355,8 +357,9 @@ github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34
github.com/go-openapi/jsonpointer v0.19.5 h1:gZr+CIYByUqjcgeLXnQu2gHYQC9o73G2XUeOFYEICuY=
github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8=
-github.com/go-openapi/jsonreference v0.19.5 h1:1WJP/wi4OjB4iV8KVbH73rQaoialJrqv8gitZLxGLtM=
github.com/go-openapi/jsonreference v0.19.5/go.mod h1:RdybgQwPxbL4UEjuAruzK1x3nE69AqPYEJeo/TWfEeg=
+github.com/go-openapi/jsonreference v0.20.0 h1:MYlu0sBgChmCfJxxUKZ8g1cPWFOB37YSZqewK7OKeyA=
+github.com/go-openapi/jsonreference v0.20.0/go.mod h1:Ag74Ico3lPc+zR+qjn4XBUmXymS4zJbYVCZmcgkasdo=
github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
github.com/go-openapi/swag v0.19.14 h1:gm3vOOXfiuw5i9p5N9xJvfjvuofpyvLA9Wr6QfK5Fng=
github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ=
@@ -378,8 +381,9 @@ github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXP
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg=
-github.com/golang-jwt/jwt/v4 v4.2.0 h1:besgBTC8w8HjP6NzQdxwKH9Z5oQMZ24ThTrHp3cZ8eU=
github.com/golang-jwt/jwt/v4 v4.2.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg=
+github.com/golang-jwt/jwt/v4 v4.4.2 h1:rcc4lwaZgFMCZ5jxF9ABolDcIHdBytAFgqFPbSJQAYs=
+github.com/golang-jwt/jwt/v4 v4.4.2/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=
github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/glog v1.0.0 h1:nfP3RFugxnNRyKgeWd4oI1nYvXpxrx8ck8ZrcizshdQ=
@@ -442,8 +446,9 @@ github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE=
-github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg=
github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
+github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
+github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ=
github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
@@ -530,8 +535,8 @@ github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:
github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
-github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU=
-github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
+github.com/imdario/mergo v0.3.13 h1:lFzP57bqS/wsqKssCGmtLAb8A0wKjLGrve2q3PPVcBk=
+github.com/imdario/mergo v0.3.13/go.mod h1:4lJ1jqUDcsbIECGy0RUJAXNIhg+6ocWgb1ALK2O4oXg=
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
github.com/inconshreveable/mousetrap v1.0.1 h1:U3uMjPSQEBMNp1lFxmllqCPM6P5u/Xq7Pgzkat/bFNc=
github.com/inconshreveable/mousetrap v1.0.1/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
@@ -566,8 +571,8 @@ github.com/jung-kurt/gofpdf v1.0.0/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+
github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes=
github.com/jung-kurt/gofpdf v1.16.2 h1:jgbatWHfRlPYiK85qgevsZTHviWXKwB1TTiKdz5PtRc=
github.com/jung-kurt/gofpdf v1.16.2/go.mod h1:1hl7y57EsiPAkLbOwzpzqgx1A30nQCk/YmFV8S2vmK0=
-github.com/kevinburke/ssh_config v0.0.0-20201106050909-4977a11b4351 h1:DowS9hvgyYSX4TO5NpyC606/Z4SxnNYbT+WX27or6Ck=
-github.com/kevinburke/ssh_config v0.0.0-20201106050909-4977a11b4351/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM=
+github.com/kevinburke/ssh_config v1.2.0 h1:x584FjTGwHzMwvHx18PXxbBVzfnxogHaAReU4gf13a4=
+github.com/kevinburke/ssh_config v1.2.0/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM=
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
@@ -586,16 +591,18 @@ github.com/kr/pty v1.1.3/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
-github.com/kubescape/go-git-url v0.0.17 h1:yBPmQzxIVa3vbFVjwTlrnxjGf1kTAWDeMh+kvMd/RZA=
-github.com/kubescape/go-git-url v0.0.17/go.mod h1:a1rDC6M1VBMwTaDfrSwbVq84Zu71U+1qKqQmI1cA0lE=
+github.com/kubescape/go-git-url v0.0.21 h1:YSOCI2c5o6KRn7p6dkU5UU8Zdlee9bqigTiMEuQK7hM=
+github.com/kubescape/go-git-url v0.0.21/go.mod h1:IbVT7Wsxlghsa+YxI5KOx4k9VQJaa3z0kTaQz5D3nKM=
github.com/kubescape/go-logger v0.0.6 h1:ynhAmwrz0O7Jtqq1CdmCZUrKveji25hVP+B/FAb3QrA=
github.com/kubescape/go-logger v0.0.6/go.mod h1:DnVWEvC90LFY1nNMaNo6nBVOcqkLMK3S0qzXP1fzRvI=
-github.com/kubescape/k8s-interface v0.0.89 h1:OtlvZosHpjlbHfsilfQk2wRbuBnxwF0e+WZX6GbkfLU=
-github.com/kubescape/k8s-interface v0.0.89/go.mod h1:pgFRs20mHiavf6+fFWY7h/f8HuKlwuZwirvjxiKJlu0=
-github.com/kubescape/opa-utils v0.0.204 h1:9O9drjyzjOhI7Xi2S4Px0WKa66U5GFPQqeOLvhDqHnw=
-github.com/kubescape/opa-utils v0.0.204/go.mod h1:rDC3PANuk8gU5lSDO/WPFTluypBQ+/6qiuZLye+slYg=
+github.com/kubescape/k8s-interface v0.0.94-0.20221228202834-4b64f2440950 h1:r9QwpzU4FG0B4XRbKx0J0VsmwO00+a6c2a8Utq5ae0k=
+github.com/kubescape/k8s-interface v0.0.94-0.20221228202834-4b64f2440950/go.mod h1:cFE6PoBm+31LjynY2XkzX19mRmaE9CRk2UQv/rEF3ZY=
+github.com/kubescape/opa-utils v0.0.223 h1:t39+P5eW1nsmt55Sx7NZU3Tv9IDBo0ljEYbOi9nlgVc=
+github.com/kubescape/opa-utils v0.0.223/go.mod h1:cKWsKl2t2XP7Mc3t1c3hNdf8Kg0sxikUcqATfq09vzU=
github.com/kubescape/rbac-utils v0.0.19 h1:7iydgVxlMLW15MgHORfMBMqNj9jHtFGACd744fdtrFs=
github.com/kubescape/rbac-utils v0.0.19/go.mod h1:t57AhSrjuNGQ+mpZWQM/hBzrCOeKBDHegFoVo4tbikQ=
+github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
+github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII=
github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de h1:9TO3cAIGXtEhnIaL+V+BEER86oLrvS+kWobKpbJuye0=
github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9B/r2mtpb6U+EI2rYA5OAXxsYw6wTamcNW+zcE=
@@ -638,7 +645,6 @@ github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFW
github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw=
github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s=
github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
-github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI=
github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg=
@@ -706,6 +712,10 @@ github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR
github.com/phpdave11/gofpdf v1.4.2/go.mod h1:zpO6xFn9yxo3YLyMvW8HcKWVdbNqgIfOOp2dXMnm1mY=
github.com/phpdave11/gofpdi v1.0.7/go.mod h1:vBmVV0Do6hSBHC8uKUQ71JGW+ZGQq74llk/7bXwjDoI=
github.com/phpdave11/gofpdi v1.0.12/go.mod h1:vBmVV0Do6hSBHC8uKUQ71JGW+ZGQq74llk/7bXwjDoI=
+github.com/pjbgf/sha1cd v0.2.3 h1:uKQP/7QOzNtKYH7UTohZLcjF5/55EnTw0jO/Ru4jZwI=
+github.com/pjbgf/sha1cd v0.2.3/go.mod h1:HOK9QrgzdHpbc2Kzip0Q1yi3M2MFGPADtR6HjG65m5M=
+github.com/pkg/browser v0.0.0-20210115035449-ce105d075bb4 h1:Qj1ukM4GlMWXNdMBuXcXfz/Kw9s1qm0CLY32QxuSImI=
+github.com/pkg/browser v0.0.0-20210115035449-ce105d075bb4/go.mod h1:N6UoU20jOqggOuDwUaBQpluzLNDqif3kq9z2wpdYEfQ=
github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e h1:aoZm08cpOy4WuID//EZDgcC4zIxODThtZNPirFr42+A=
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
@@ -789,12 +799,13 @@ github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeV
github.com/shurcooL/users v0.0.0-20180125191416-49c67e49c537/go.mod h1:QJTqeLYEDaXHZDBsXlPCDqdhQuJkuw4NOtaxYe3xii4=
github.com/shurcooL/webdavfs v0.0.0-20170829043945-18c3829fa133/go.mod h1:hKmq5kWdCj2z2KEozexVbfEZIWiTjhE0+UjmZgPqehw=
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
-github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q=
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0=
+github.com/skeema/knownhosts v1.1.0 h1:Wvr9V0MxhjRbl3f9nMnKnFfiWTJmtECJ9Njkea3ysW0=
+github.com/skeema/knownhosts v1.1.0/go.mod h1:sKFq3RD6/TKZkSWn8boUbDC7Qkgcv+8XXijpFO6roag=
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
@@ -849,8 +860,8 @@ github.com/vmihailenco/msgpack/v4 v4.3.12/go.mod h1:gborTTJjAo/GWTqqRjrLCn9pgNN+
github.com/vmihailenco/tagparser v0.1.1/go.mod h1:OeAg3pn3UbLjkWt+rN9oFYB6u/cQgqMEUPoW2WPyhdI=
github.com/whilp/git-urls v1.0.0 h1:95f6UMWN5FKW71ECsXRUd3FVYiXdrE7aX4NZKcPmIjU=
github.com/whilp/git-urls v1.0.0/go.mod h1:J16SAmobsqc3Qcy98brfl5f5+e0clUvg1krgwk/qCfE=
-github.com/xanzy/ssh-agent v0.3.0 h1:wUMzuKtKilRgBAD1sUb8gOwwRr2FGoBVumcjoOACClI=
-github.com/xanzy/ssh-agent v0.3.0/go.mod h1:3s9xbODqPuuhK9JV1R321M/FlMZSBvE5aY6eAcqrDh0=
+github.com/xanzy/ssh-agent v0.3.3 h1:+/15pJfg/RsTxqYcX6fHqOXZwwMP+2VyYWJeWM2qQFM=
+github.com/xanzy/ssh-agent v0.3.3/go.mod h1:6dzNDKs0J9rVPHPhaGCukekBHKqfl+L3KghI1Bc68Uw=
github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo=
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
@@ -869,6 +880,7 @@ github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
+github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
github.com/zclconf/go-cty v1.10.0/go.mod h1:vVKLxnk3puL4qRAv72AO+W99LUD4da90g3uUAzyuvAk=
go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4=
@@ -923,7 +935,6 @@ golang.org/x/build v0.0.0-20190111050920-041ab4dc3f9d/go.mod h1:OWs+y06UdEOHN4y+
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20181030102418-4d3f4d9ffa16/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
-golang.org/x/crypto v0.0.0-20190219172222-a4c6cb3142f2/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190313024323-a1f597ede03a/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
@@ -934,13 +945,14 @@ golang.org/x/crypto v0.0.0-20200414173820-0848c9571904/go.mod h1:LzIPMQfyMNhhGPh
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20201203163018-be400aefbc4c/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
-golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
-golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.0.0-20220214200702-86341886e292/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
-golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e h1:T8NU3HyQ8ClP4SEE+KbFlg6n0NhuTsN4MyznaarGsZM=
golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
+golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
+golang.org/x/crypto v0.0.0-20220826181053-bd7e27e6170d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
+golang.org/x/crypto v0.3.0 h1:a06MkbcxBrEFc0w0QIZWXrH/9cCX6KJyWbBOIwAn+7A=
+golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4=
golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
@@ -989,8 +1001,9 @@ golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
-golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3 h1:kQgndtyPBW/JIYERgdxfwMYh3AVStj88WQTlNDi2a+o=
golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY=
+golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 h1:6zppjxzCulZykYSLyVDYbneBfbaBIQPYMevg0bEwv2s=
+golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@@ -1036,7 +1049,6 @@ golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v
golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc=
-golang.org/x/net v0.0.0-20210326060303-6b1517762897/go.mod h1:uSPa2vr4CLtc/ILN5odXGNXS6mhrKVzTaCXzk9m6W3k=
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
golang.org/x/net v0.0.0-20210423184538-5f58ad60dda6/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk=
golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
@@ -1051,8 +1063,11 @@ golang.org/x/net v0.0.0-20220412020605-290c469a71a5/go.mod h1:CfG3xpIq0wQ8r1q4Su
golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.0.0-20220617184016-355a448f1bc9/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
-golang.org/x/net v0.0.0-20220909164309-bea034e7d591 h1:D0B/7al0LLrVC8aWF4+oxpv/m8bc7ViFfVS8/gXGdqI=
-golang.org/x/net v0.0.0-20220909164309-bea034e7d591/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk=
+golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
+golang.org/x/net v0.0.0-20220826154423-83b083e8dc8b/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk=
+golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY=
+golang.org/x/net v0.3.1-0.20221206200815-1e63c2f08a10 h1:Frnccbp+ok2GkUS2tC84yAq/U9Vg+0sIO7aRL3T4Xnc=
+golang.org/x/net v0.3.1-0.20221206200815-1e63c2f08a10/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
@@ -1091,6 +1106,7 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@@ -1111,7 +1127,6 @@ golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191002063906-3421d5a6bb1c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@@ -1151,11 +1166,9 @@ golang.org/x/sys v0.0.0-20210304124612-50617c2ba197/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210502180810-71e4cd670f79/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
@@ -1168,6 +1181,7 @@ golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20210831042530-f4d43177bf5e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
@@ -1184,12 +1198,20 @@ golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220610221304-9f5ed59c137d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220615213510-4f61da869c0c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20220829200755-d48e67d00261 h1:v6hYoSR9T5oet+pMXwUWkbiVqx/63mlHjefrHmxwfeY=
-golang.org/x/sys v0.0.0-20220829200755-d48e67d00261/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220825204002-c680a09ffe64/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.3.0 h1:w8ZOecv6NaNa/zC8944JTU3vz4u6Lagfk4RPQxv92NQ=
+golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
-golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 h1:JGgROgKl9N8DuW20oFS5gxc+lE67/N3FcwmBPMe7ArY=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
+golang.org/x/term v0.0.0-20220722155259-a9ba230a4035/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
+golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc=
+golang.org/x/term v0.3.0 h1:qoo4akIqOcDME5bhc/NgxUdovd6BSS2uMsVjB56q1xI=
+golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
@@ -1198,8 +1220,10 @@ golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
-golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
+golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
+golang.org/x/text v0.5.0 h1:OLmvp0KP+FVG99Ct/qFiL/Fhk4zp4QQnZ7b2U+5piUM=
+golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
@@ -1273,6 +1297,7 @@ golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.1.10-0.20220218145154-897bd77cd717/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E=
+golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
@@ -1523,6 +1548,7 @@ gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk=
@@ -1568,8 +1594,8 @@ k8s.io/klog/v2 v2.80.1 h1:atnLQ121W371wYYFawwYx1aEY2eUfs4l3J72wtgAwV4=
k8s.io/klog/v2 v2.80.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
k8s.io/kube-openapi v0.0.0-20210421082810-95288971da7e/go.mod h1:vHXdDvt9+2spS2Rx9ql3I8tycm3H9FDfdUoIuKCefvw=
k8s.io/kube-openapi v0.0.0-20220328201542-3ee0da9b0b42/go.mod h1:Z/45zLw8lUo4wdiUkI+v/ImEGAvu3WatcZl3lPMR4Rk=
-k8s.io/kube-openapi v0.0.0-20220803162953-67bda5d908f1 h1:MQ8BAZPZlWk3S9K4a9NCkIFQtZShWqoha7snGixVgEA=
-k8s.io/kube-openapi v0.0.0-20220803162953-67bda5d908f1/go.mod h1:C/N6wCaBHeBHkHUesQOQy2/MZqGgMAFPqGsGQLdbZBU=
+k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280 h1:+70TFaan3hfJzs+7VK2o+OGxg8HsuBr/5f6tVAjDu6E=
+k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280/go.mod h1:+Axhij7bCpeqhklhUTe3xmOn6bWxolyZEeyaFpjGtl4=
k8s.io/utils v0.0.0-20210802155522-efc7438f0176/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
k8s.io/utils v0.0.0-20221128185143-99ec85e7a448 h1:KTgPnR10d5zhztWptI952TNtt/4u5h3IzDXkdIMuo2Y=
diff --git a/httphandler/go.mod b/httphandler/go.mod
index fea78cd7..511250ad 100644
--- a/httphandler/go.mod
+++ b/httphandler/go.mod
@@ -1,6 +1,6 @@
module github.com/kubescape/kubescape/v2/httphandler
-go 1.18
+go 1.19
replace github.com/kubescape/kubescape/v2 => ../
@@ -12,7 +12,7 @@ require (
github.com/gorilla/schema v1.2.0
github.com/kubescape/go-logger v0.0.6
github.com/kubescape/kubescape/v2 v2.0.0-00010101000000-000000000000
- github.com/kubescape/opa-utils v0.0.204
+ github.com/kubescape/opa-utils v0.0.223
github.com/stretchr/testify v1.8.0
k8s.io/utils v0.0.0-20221128185143-99ec85e7a448
)
@@ -22,26 +22,19 @@ require (
cloud.google.com/go/container v1.2.0 // indirect
cloud.google.com/go/containeranalysis v0.4.0 // indirect
cloud.google.com/go/grafeas v0.2.0 // indirect
- github.com/Azure/azure-sdk-for-go v66.0.0+incompatible // indirect
- github.com/Azure/go-autorest v14.2.0+incompatible // indirect
- github.com/Azure/go-autorest/autorest v0.11.27 // indirect
- github.com/Azure/go-autorest/autorest/adal v0.9.20 // indirect
- github.com/Azure/go-autorest/autorest/azure/auth v0.5.11 // indirect
- github.com/Azure/go-autorest/autorest/azure/cli v0.4.5 // indirect
- github.com/Azure/go-autorest/autorest/date v0.3.0 // indirect
- github.com/Azure/go-autorest/autorest/to v0.4.0 // indirect
- github.com/Azure/go-autorest/autorest/validation v0.3.1 // indirect
- github.com/Azure/go-autorest/logger v0.2.1 // indirect
- github.com/Azure/go-autorest/tracing v0.6.0 // indirect
+ github.com/Azure/azure-sdk-for-go/sdk/azcore v1.1.0 // indirect
+ github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.2.0 // indirect
+ github.com/Azure/azure-sdk-for-go/sdk/internal v1.0.0 // indirect
+ github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/authorization/armauthorization/v2 v2.0.0 // indirect
+ github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v2 v2.2.0 // indirect
+ github.com/AzureAD/microsoft-authentication-library-for-go v0.7.0 // indirect
github.com/BurntSushi/toml v1.0.0 // indirect
github.com/Masterminds/goutils v1.1.1 // indirect
github.com/Masterminds/semver/v3 v3.1.1 // indirect
github.com/Masterminds/sprig/v3 v3.2.2 // indirect
- github.com/Microsoft/go-winio v0.5.1 // indirect
+ github.com/Microsoft/go-winio v0.5.2 // indirect
github.com/OneOfOne/xxhash v1.2.8 // indirect
- github.com/ProtonMail/go-crypto v0.0.0-20210428141323-04723f9f07d7 // indirect
- github.com/PuerkitoBio/purell v1.1.1 // indirect
- github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 // indirect
+ github.com/ProtonMail/go-crypto v0.0.0-20221026131551-cf6655e29de4 // indirect
github.com/a8m/envsubst v1.3.0 // indirect
github.com/acomagu/bufpipe v1.0.3 // indirect
github.com/agnivade/levenshtein v1.1.1 // indirect
@@ -64,6 +57,7 @@ require (
github.com/aws/smithy-go v1.12.0 // indirect
github.com/boombuler/barcode v1.0.1 // indirect
github.com/briandowns/spinner v1.18.1 // indirect
+ github.com/cloudflare/circl v1.1.0 // indirect
github.com/coreos/go-oidc v2.2.1+incompatible // indirect
github.com/cyphar/filepath-securejoin v0.2.3 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
@@ -72,8 +66,8 @@ require (
github.com/docker/go-connections v0.4.0 // indirect
github.com/docker/go-units v0.4.0 // indirect
github.com/elliotchance/orderedmap v1.5.0 // indirect
- github.com/emicklei/go-restful/v3 v3.8.0 // indirect
- github.com/emirpasic/gods v1.12.0 // indirect
+ github.com/emicklei/go-restful/v3 v3.9.0 // indirect
+ github.com/emirpasic/gods v1.18.1 // indirect
github.com/enescakir/emoji v1.0.0 // indirect
github.com/evanphx/json-patch v4.12.0+incompatible // indirect
github.com/fatih/color v1.13.0 // indirect
@@ -81,14 +75,14 @@ require (
github.com/ghodss/yaml v1.0.0 // indirect
github.com/go-errors/errors v1.0.1 // indirect
github.com/go-git/gcfg v1.5.0 // indirect
- github.com/go-git/go-billy/v5 v5.3.1 // indirect
- github.com/go-git/go-git/v5 v5.4.2 // indirect
+ github.com/go-git/go-billy/v5 v5.4.0 // indirect
+ github.com/go-git/go-git/v5 v5.5.2 // indirect
github.com/go-gota/gota v0.12.0 // indirect
github.com/go-logr/logr v1.2.3 // indirect
github.com/go-openapi/analysis v0.21.2 // indirect
github.com/go-openapi/errors v0.20.2 // indirect
github.com/go-openapi/jsonpointer v0.19.5 // indirect
- github.com/go-openapi/jsonreference v0.19.6 // indirect
+ github.com/go-openapi/jsonreference v0.20.0 // indirect
github.com/go-openapi/loads v0.21.1 // indirect
github.com/go-openapi/spec v0.20.4 // indirect
github.com/go-openapi/strfmt v0.21.2 // indirect
@@ -99,17 +93,17 @@ require (
github.com/goccy/go-json v0.9.11 // indirect
github.com/goccy/go-yaml v1.9.6 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
- github.com/golang-jwt/jwt/v4 v4.2.0 // indirect
+ github.com/golang-jwt/jwt/v4 v4.4.2 // indirect
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/golang/protobuf v1.5.2 // indirect
github.com/google/gnostic v0.5.7-v3refs // indirect
- github.com/google/go-cmp v0.5.8 // indirect
+ github.com/google/go-cmp v0.5.9 // indirect
github.com/google/gofuzz v1.2.0 // indirect
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect
github.com/googleapis/enterprise-certificate-proxy v0.1.0 // indirect
github.com/googleapis/gax-go/v2 v2.4.0 // indirect
github.com/huandu/xstrings v1.3.2 // indirect
- github.com/imdario/mergo v0.3.12 // indirect
+ github.com/imdario/mergo v0.3.13 // indirect
github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 // indirect
github.com/jinzhu/copier v0.3.5 // indirect
github.com/jmespath/go-jmespath v0.4.0 // indirect
@@ -117,10 +111,11 @@ require (
github.com/josharian/intern v1.0.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect
github.com/jung-kurt/gofpdf v1.16.2 // indirect
- github.com/kevinburke/ssh_config v0.0.0-20201106050909-4977a11b4351 // indirect
- github.com/kubescape/go-git-url v0.0.17 // indirect
- github.com/kubescape/k8s-interface v0.0.89 // indirect
+ github.com/kevinburke/ssh_config v1.2.0 // indirect
+ github.com/kubescape/go-git-url v0.0.21 // indirect
+ github.com/kubescape/k8s-interface v0.0.94-0.20221228202834-4b64f2440950 // indirect
github.com/kubescape/rbac-utils v0.0.19 // indirect
+ github.com/kylelemons/godebug v1.1.0 // indirect
github.com/libgit2/git2go/v33 v33.0.9 // indirect
github.com/magiconair/properties v1.8.6 // indirect
github.com/mailru/easyjson v0.7.7 // indirect
@@ -129,7 +124,6 @@ require (
github.com/mattn/go-runewidth v0.0.9 // indirect
github.com/mikefarah/yq/v4 v4.29.1 // indirect
github.com/mitchellh/copystructure v1.2.0 // indirect
- github.com/mitchellh/go-homedir v1.1.0 // indirect
github.com/mitchellh/mapstructure v1.5.0 // indirect
github.com/mitchellh/reflectwalk v1.0.2 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
@@ -142,6 +136,8 @@ require (
github.com/opencontainers/go-digest v1.0.0 // indirect
github.com/opencontainers/image-spec v1.0.3-0.20211202183452-c5a74bcca799 // indirect
github.com/owenrumney/go-sarif/v2 v2.1.2 // indirect
+ github.com/pjbgf/sha1cd v0.2.3 // indirect
+ github.com/pkg/browser v0.0.0-20210115035449-ce105d075bb4 // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/pquerna/cachecontrol v0.1.0 // indirect
@@ -149,11 +145,12 @@ require (
github.com/ruudk/golang-pdf417 v0.0.0-20181029194003-1af4ab5afa58 // indirect
github.com/sergi/go-diff v1.1.0 // indirect
github.com/shopspring/decimal v1.2.0 // indirect
+ github.com/skeema/knownhosts v1.1.0 // indirect
github.com/spf13/cast v1.4.1 // indirect
github.com/spf13/pflag v1.0.5 // indirect
github.com/tchap/go-patricia/v2 v2.3.1 // indirect
github.com/whilp/git-urls v1.0.0 // indirect
- github.com/xanzy/ssh-agent v0.3.0 // indirect
+ github.com/xanzy/ssh-agent v0.3.3 // indirect
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
github.com/xeipuuv/gojsonschema v1.2.0 // indirect
@@ -165,13 +162,13 @@ require (
go.uber.org/atomic v1.7.0 // indirect
go.uber.org/multierr v1.6.0 // indirect
go.uber.org/zap v1.22.0 // indirect
- golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e // indirect
- golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3 // indirect
- golang.org/x/net v0.0.0-20220909164309-bea034e7d591 // indirect
+ golang.org/x/crypto v0.3.0 // indirect
+ golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 // indirect
+ golang.org/x/net v0.3.1-0.20221206200815-1e63c2f08a10 // indirect
golang.org/x/oauth2 v0.0.0-20220630143837-2104d58473e0 // indirect
- golang.org/x/sys v0.0.0-20220829200755-d48e67d00261 // indirect
- golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 // indirect
- golang.org/x/text v0.3.7 // indirect
+ golang.org/x/sys v0.3.0 // indirect
+ golang.org/x/term v0.3.0 // indirect
+ golang.org/x/text v0.5.0 // indirect
golang.org/x/time v0.0.0-20220210224613-90d013bbcef8 // indirect
golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f // indirect
gonum.org/v1/gonum v0.9.1 // indirect
@@ -187,12 +184,12 @@ require (
gopkg.in/yaml.v2 v2.4.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
helm.sh/helm/v3 v3.9.0 // indirect
- k8s.io/api v0.25.3 // indirect
+ k8s.io/api v0.26.0 // indirect
k8s.io/apiextensions-apiserver v0.24.2 // indirect
- k8s.io/apimachinery v0.25.3 // indirect
- k8s.io/client-go v0.25.3 // indirect
+ k8s.io/apimachinery v0.26.0 // indirect
+ k8s.io/client-go v0.26.0 // indirect
k8s.io/klog/v2 v2.80.1 // indirect
- k8s.io/kube-openapi v0.0.0-20220803162953-67bda5d908f1 // indirect
+ k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280 // indirect
sigs.k8s.io/controller-runtime v0.12.3 // indirect
sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 // indirect
sigs.k8s.io/kustomize/api v0.11.4 // indirect
diff --git a/httphandler/go.sum b/httphandler/go.sum
index 30eb0f0d..d79d5f14 100644
--- a/httphandler/go.sum
+++ b/httphandler/go.sum
@@ -72,36 +72,26 @@ dmitri.shuralyov.com/service/change v0.0.0-20181023043359-a85b471d5412/go.mod h1
dmitri.shuralyov.com/state v0.0.0-20180228185332-28bcc343414c/go.mod h1:0PRwlb0D6DFvNNtx+9ybjezNCa8XF0xaYcETyp6rHWU=
gioui.org v0.0.0-20210308172011-57750fc8a0a6/go.mod h1:RSH6KIUZ0p2xy5zHDxgAM4zumjgTw83q2ge/PI+yyw8=
git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg=
-github.com/Azure/azure-sdk-for-go v66.0.0+incompatible h1:bmmC38SlE8/E81nNADlgmVGurPWMHDX2YNXVQMrBpEE=
-github.com/Azure/azure-sdk-for-go v66.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
+github.com/Azure/azure-sdk-for-go/sdk/azcore v1.1.0 h1:Ut0ZGdOwJDw0npYEg+TLlPls3Pq6JiZaP2/aGKir7Zw=
+github.com/Azure/azure-sdk-for-go/sdk/azcore v1.1.0/go.mod h1:uGG2W01BaETf0Ozp+QxxKJdMBNRWPdstHG0Fmdwn1/U=
+github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.2.0 h1:t/W5MYAuQy81cvM8VUNfRLzhtKpXhVUAN7Cd7KVbTyc=
+github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.2.0/go.mod h1:NBanQUfSWiWn3QEpWDTCU0IjBECKOYvl2R8xdRtMtiM=
+github.com/Azure/azure-sdk-for-go/sdk/internal v1.0.0 h1:jp0dGvZ7ZK0mgqnTSClMxa5xuRL7NZgHameVYF6BurY=
+github.com/Azure/azure-sdk-for-go/sdk/internal v1.0.0/go.mod h1:eWRD7oawr1Mu1sLCawqVc0CUiF43ia3qQMxLscsKQ9w=
+github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/authorization/armauthorization/v2 v2.0.0 h1:WJd2y/3vp3sgG1u1KfDaEyGiM9oC11cBa9rbmsSv5rQ=
+github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/authorization/armauthorization/v2 v2.0.0/go.mod h1:XlGHa0e9Mg7RNOshDEuc0HptPdtN/SI0HCu+02rdnOA=
+github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v2 v2.2.0 h1:3L+gX5ssCABAToH0VQ64/oNz7rr+ShW+2sB+sonzIlY=
+github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v2 v2.2.0/go.mod h1:4gUds0dEPFIld6DwHfbo0cLBljyIyI5E5ciPb5MLi3Q=
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
-github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs=
github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
github.com/Azure/go-autorest/autorest v0.11.18/go.mod h1:dSiJPy22c3u0OtOKDNttNgqpNFY/GeWa7GH/Pz56QRA=
-github.com/Azure/go-autorest/autorest v0.11.24/go.mod h1:G6kyRlFnTuSbEYkQGawPfsCswgme4iYf6rfSKUDzbCc=
-github.com/Azure/go-autorest/autorest v0.11.27 h1:F3R3q42aWytozkV8ihzcgMO4OA4cuqr3bNlsEuF6//A=
-github.com/Azure/go-autorest/autorest v0.11.27/go.mod h1:7l8ybrIdUmGqZMTD0sRtAr8NvbHjfofbf8RSP2q7w7U=
github.com/Azure/go-autorest/autorest/adal v0.9.13/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M=
-github.com/Azure/go-autorest/autorest/adal v0.9.18/go.mod h1:XVVeme+LZwABT8K5Lc3hA4nAe8LDBVle26gTrguhhPQ=
-github.com/Azure/go-autorest/autorest/adal v0.9.20 h1:gJ3E98kMpFB1MFqQCvA1yFab8vthOeD4VlFRQULxahg=
-github.com/Azure/go-autorest/autorest/adal v0.9.20/go.mod h1:XVVeme+LZwABT8K5Lc3hA4nAe8LDBVle26gTrguhhPQ=
-github.com/Azure/go-autorest/autorest/azure/auth v0.5.11 h1:P6bYXFoao05z5uhOQzbC3Qd8JqF3jUoocoTeIxkp2cA=
-github.com/Azure/go-autorest/autorest/azure/auth v0.5.11/go.mod h1:84w/uV8E37feW2NCJ08uT9VBfjfUHpgLVnG2InYD6cg=
-github.com/Azure/go-autorest/autorest/azure/cli v0.4.5 h1:0W/yGmFdTIT77fvdlGZ0LMISoLHFJ7Tx4U0yeB+uFs4=
-github.com/Azure/go-autorest/autorest/azure/cli v0.4.5/go.mod h1:ADQAXrkgm7acgWVUNamOgh8YNrv4p27l3Wc55oVfpzg=
-github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw=
github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74=
github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k=
-github.com/Azure/go-autorest/autorest/mocks v0.4.2 h1:PGN4EDXnuQbojHbU0UWoNvmu9AGVwYHG9/fkDYhtAfw=
-github.com/Azure/go-autorest/autorest/mocks v0.4.2/go.mod h1:Vy7OitM9Kei0i1Oj+LvyAWMXJHeKH1MVlzFugfVrmyU=
-github.com/Azure/go-autorest/autorest/to v0.4.0 h1:oXVqrxakqqV1UZdSazDOPOLvOIz+XA683u8EctwboHk=
-github.com/Azure/go-autorest/autorest/to v0.4.0/go.mod h1:fE8iZBn7LQR7zH/9XU2NcPR4o9jEImooCeWJcYV/zLE=
-github.com/Azure/go-autorest/autorest/validation v0.3.1 h1:AgyqjAd94fwNAoTjl/WQXg4VvFeRFpO+UhNyRXqF1ac=
-github.com/Azure/go-autorest/autorest/validation v0.3.1/go.mod h1:yhLgjC0Wda5DYXl6JAsWyUe4KVNffhoDhG0zVzUMo3E=
-github.com/Azure/go-autorest/logger v0.2.1 h1:IG7i4p/mDa2Ce4TRyAO8IHnVhAVF3RFU+ZtXWSmf4Tg=
github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8=
-github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo=
github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU=
+github.com/AzureAD/microsoft-authentication-library-for-go v0.7.0 h1:VgSJlZH5u0k2qxSpqyghcFQKmvYckj46uymKK5XzkBM=
+github.com/AzureAD/microsoft-authentication-library-for-go v0.7.0/go.mod h1:BDJ5qMFKx9DugEg3+uQSDCdbYPr5s9vBTrL9P8TpqOU=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/BurntSushi/toml v1.0.0 h1:dtDWrepsVPfW9H/4y7dDgFc2MBUSeJhlaDtK13CxFlU=
github.com/BurntSushi/toml v1.0.0/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
@@ -112,20 +102,16 @@ github.com/Masterminds/semver/v3 v3.1.1 h1:hLg3sBzpNErnxhQtUy/mmLR2I9foDujNK030I
github.com/Masterminds/semver/v3 v3.1.1/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs=
github.com/Masterminds/sprig/v3 v3.2.2 h1:17jRggJu518dr3QaafizSXOjKYp94wKfABxUmyxvxX8=
github.com/Masterminds/sprig/v3 v3.2.2/go.mod h1:UoaO7Yp8KlPnJIYWTFkMaqPUYKTfGFPhxNuwnnxkKlk=
-github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA=
-github.com/Microsoft/go-winio v0.4.16/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0=
-github.com/Microsoft/go-winio v0.5.1 h1:aPJp2QD7OOrhO5tQXqQoGSJc+DjDtWTGLOmNyAm6FgY=
-github.com/Microsoft/go-winio v0.5.1/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84=
+github.com/Microsoft/go-winio v0.5.2 h1:a9IhgEQBCUEk6QCdml9CiJGhAws+YwffDHEMp1VMrpA=
+github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY=
github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c=
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
github.com/OneOfOne/xxhash v1.2.8 h1:31czK/TI9sNkxIKfaUfGlU47BAxQ0ztGgd9vPyqimf8=
github.com/OneOfOne/xxhash v1.2.8/go.mod h1:eZbhyaAYD41SGSSsnmcpxVoRiQ/MPUTjUdIIOT9Um7Q=
-github.com/ProtonMail/go-crypto v0.0.0-20210428141323-04723f9f07d7 h1:YoJbenK9C67SkzkDfmQuVln04ygHj3vjZfd9FL+GmQQ=
-github.com/ProtonMail/go-crypto v0.0.0-20210428141323-04723f9f07d7/go.mod h1:z4/9nQmJSSwwds7ejkxaJwO37dru3geImFUdJlaLzQo=
-github.com/PuerkitoBio/purell v1.1.1 h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tNFfI=
+github.com/ProtonMail/go-crypto v0.0.0-20221026131551-cf6655e29de4 h1:ra2OtmuW0AE5csawV4YXMNGNQQXvLRps3z2Z59OPO+I=
+github.com/ProtonMail/go-crypto v0.0.0-20221026131551-cf6655e29de4/go.mod h1:UBYPn8k0D56RtnR8RFQMjmh4KrZzWJ5o7Z9SYjossQ8=
github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
-github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M=
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
github.com/a8m/envsubst v1.3.0 h1:GmXKmVssap0YtlU3E230W98RWtWCyIZzjtf1apWWyAg=
github.com/a8m/envsubst v1.3.0/go.mod h1:MVUTQNGQ3tsjOOtKCNd+fl8RzhsXcDvvAEzkhGtlsbY=
@@ -134,7 +120,7 @@ github.com/acomagu/bufpipe v1.0.3/go.mod h1:mxdxdup/WdsKVreO5GpW4+M/1CE2sMG4jeGJ
github.com/agnivade/levenshtein v1.1.1 h1:QY8M92nrzkmr798gCo3kmMyqXFzdQVpxLlGPRBij0P8=
github.com/agnivade/levenshtein v1.1.1/go.mod h1:veldBMzWxcCG2ZvUTKD2kJNRdCk5hVbJomOvKkmgYbo=
github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw=
-github.com/alecthomas/assert/v2 v2.0.3 h1:WKqJODfOiQG0nEJKFKzDIG3E29CN2/4zR9XGJzKIkbg=
+github.com/alecthomas/assert/v2 v2.2.0 h1:f6L/b7KE2bfA+9O4FL3CM/xJccDEwPVYd5fALBiuwvw=
github.com/alecthomas/participle/v2 v2.0.0-beta.5 h1:y6dsSYVb1G5eK6mgmy+BgI3Mw35a3WghArZ/Hbebrjo=
github.com/alecthomas/participle/v2 v2.0.0-beta.5/go.mod h1:RC764t6n4L8D8ITAJv0qdokritYSNR3wV5cVwmIEaMM=
github.com/alecthomas/repr v0.1.1 h1:87P60cSmareLAxMc4Hro0r2RBY4ROm0dYwkJNpS4pPs=
@@ -143,8 +129,9 @@ github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuy
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho=
-github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239 h1:kFOfPq6dUM1hTo4JG6LR5AXSUEsOjtdm0kw0FtQtMJA=
github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c=
+github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be h1:9AeTilPcZAjCFIImctFaOjnTIavg87rW78vTPkQqLI8=
+github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be/go.mod h1:ySMOLuWl6zY27l47sB3qLNK6tF2fkHG55UZxx8oIVo4=
github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
github.com/antlr/antlr4/runtime/Go/antlr v0.0.0-20210826220005-b48c857c3a0e/go.mod h1:F7bn7fEU90QkQ3tnmaTx3LTKLEDqnwWODIYppRQ5hnY=
github.com/apparentlymart/go-textseg/v13 v13.0.0/go.mod h1:ZK2fH7c4NqDTLtiYLvIkEghdlcqw7yxLeM89kiTRPUo=
@@ -209,6 +196,7 @@ github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBT
github.com/briandowns/spinner v1.18.1 h1:yhQmQtM1zsqFsouh09Bk/jCjd50pC3EOGsh28gLVvwY=
github.com/briandowns/spinner v1.18.1/go.mod h1:mQak9GHqbspjC/5iUx3qMlIho8xBS/ppAL/hX5SmPJU=
github.com/buger/jsonparser v0.0.0-20181115193947-bf1c66bbce23/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s=
+github.com/bwesterb/go-ristretto v1.2.0/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0=
github.com/bytecodealliance/wasmtime-go v1.0.0 h1:9u9gqaUiaJeN5IoD1L7egD8atOnTGyJcNp8BhkL9cUU=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/certifi/gocertifi v0.0.0-20191021191039-0944d244cd40/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA=
@@ -222,6 +210,8 @@ github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWR
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
+github.com/cloudflare/circl v1.1.0 h1:bZgT/A+cikZnKIwn7xL2OBj012Bmvho/o6RpRvv3GKY=
+github.com/cloudflare/circl v1.1.0/go.mod h1:prBCrKB9DV4poKZY1l9zBXg2QJY7mvgRvtMxxK7fi4I=
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
@@ -261,6 +251,7 @@ github.com/dgryski/trifles v0.0.0-20200323201526-dd97f9abfb48 h1:fRzb/w+pyskVMQ+
github.com/dgryski/trifles v0.0.0-20200323201526-dd97f9abfb48/go.mod h1:if7Fbed8SFyPtHLHbg49SI7NAdJiC5WIA09pe59rfAA=
github.com/dimchansky/utfbom v1.1.1 h1:vV6w1AhK4VMnhBno/TPVCoK9U/LP0PkLCS9tbxHdi/U=
github.com/dimchansky/utfbom v1.1.1/go.mod h1:SxdoEBH5qIqFocHMyGOXVAybYJdr71b1Q/j0mACtrfE=
+github.com/dnaeon/go-vcr v1.1.0 h1:ReYa/UBrRyQdant9B4fNHGoCNKw6qh6P0fsdGmZpR7c=
github.com/docker/docker v20.10.17+incompatible h1:JYCuMrWaVNophQTOrMMoSwudOVEfcegoZZrleKc1xwE=
github.com/docker/docker v20.10.17+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=
@@ -275,10 +266,10 @@ github.com/elliotchance/orderedmap v1.5.0 h1:1IsExUsjv5XNBD3ZdC7jkAAqLWOOKdbPTmk
github.com/elliotchance/orderedmap v1.5.0/go.mod h1:wsDwEaX5jEoyhbs7x93zk2H/qv0zwuhg4inXhDkYqys=
github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
-github.com/emicklei/go-restful/v3 v3.8.0 h1:eCZ8ulSerjdAiaNpF7GxXIE7ZCMo1moN1qX+S609eVw=
-github.com/emicklei/go-restful/v3 v3.8.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
-github.com/emirpasic/gods v1.12.0 h1:QAUIPSaCu4G+POclxeqb3F+WPpdKqFGlw36+yOzGlrg=
-github.com/emirpasic/gods v1.12.0/go.mod h1:YfzfFFoVP/catgzJb4IKIqXjX78Ha8FMSDh3ymbK86o=
+github.com/emicklei/go-restful/v3 v3.9.0 h1:XwGDlfxEnQZzuopoqxwSEllNcCOM9DhhFyhFIIGKwxE=
+github.com/emicklei/go-restful/v3 v3.9.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
+github.com/emirpasic/gods v1.18.1 h1:FXtiHYKDGKCW2KzwZKx0iC0PQmdlorYgdFG9jPXJ1Bc=
+github.com/emirpasic/gods v1.18.1/go.mod h1:8tpGGwCnJ5H4r6BWwaV6OrWmMoPhUl5jm/FMNAnJvWQ=
github.com/enescakir/emoji v1.0.0 h1:W+HsNql8swfCQFtioDGDHCHri8nudlK1n5p2rHCJoog=
github.com/enescakir/emoji v1.0.0/go.mod h1:Bt1EKuLnKDTYpLALApstIkAjdDrS/8IAgTkKp+WKFD0=
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
@@ -316,8 +307,8 @@ github.com/getsentry/raven-go v0.2.0/go.mod h1:KungGk8q33+aIAZUIVWZDr2OfAEBsO49P
github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0=
-github.com/gliderlabs/ssh v0.2.2 h1:6zsha5zo/TWhRhwqCD3+EarCAgZ2yN28ipRnGPnwkI0=
-github.com/gliderlabs/ssh v0.2.2/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0=
+github.com/gliderlabs/ssh v0.3.5 h1:OcaySEmAQJgyYcArR+gGGTHCyE7nvhEMTlYY+Dp8CpY=
+github.com/gliderlabs/ssh v0.3.5/go.mod h1:8XB4KraRrX39qHhT6yxPsHedjA08I/uBVwj4xC+/+z4=
github.com/go-errors/errors v1.0.1 h1:LUHzmkK3GUKUrL/1gfBUxAHzcev3apQlezX/+O7ma6w=
github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q=
github.com/go-fonts/dejavu v0.1.0/go.mod h1:4Wt4I4OU2Nq9asgDCteaAaWZOV24E+0/Pwo0gppep4g=
@@ -326,13 +317,13 @@ github.com/go-fonts/liberation v0.1.1/go.mod h1:K6qoJYypsmfVjWg8KOVDQhLc8UDgIK2H
github.com/go-fonts/stix v0.1.0/go.mod h1:w/c1f0ldAUlJmLBvlbkvVXLAD+tAMqobIIQpmnUIzUY=
github.com/go-git/gcfg v1.5.0 h1:Q5ViNfGF8zFgyJWPqYwA7qGFoMTEiBmdlkcfRmpIMa4=
github.com/go-git/gcfg v1.5.0/go.mod h1:5m20vg6GwYabIxaOonVkTdrILxQMpEShl1xiMF4ua+E=
-github.com/go-git/go-billy/v5 v5.2.0/go.mod h1:pmpqyWchKfYfrkb/UVH4otLvyi/5gJlGI4Hb3ZqZ3W0=
-github.com/go-git/go-billy/v5 v5.3.1 h1:CPiOUAzKtMRvolEKw+bG1PLRpT7D3LIs3/3ey4Aiu34=
github.com/go-git/go-billy/v5 v5.3.1/go.mod h1:pmpqyWchKfYfrkb/UVH4otLvyi/5gJlGI4Hb3ZqZ3W0=
-github.com/go-git/go-git-fixtures/v4 v4.2.1 h1:n9gGL1Ct/yIw+nfsfr8s4+sbhT+Ncu2SubfXjIWgci8=
-github.com/go-git/go-git-fixtures/v4 v4.2.1/go.mod h1:K8zd3kDUAykwTdDCr+I0per6Y6vMiRR/nnVTBtavnB0=
-github.com/go-git/go-git/v5 v5.4.2 h1:BXyZu9t0VkbiHtqrsvdq39UDhGJTl1h55VW6CSC4aY4=
-github.com/go-git/go-git/v5 v5.4.2/go.mod h1:gQ1kArt6d+n+BGd+/B/I74HwRTLhth2+zti4ihgckDc=
+github.com/go-git/go-billy/v5 v5.4.0 h1:Vaw7LaSTRJOUric7pe4vnzBSgyuf2KrLsu2Y4ZpQBDE=
+github.com/go-git/go-billy/v5 v5.4.0/go.mod h1:vjbugF6Fz7JIflbVpl1hJsGjSHNltrSw45YK/ukIvQg=
+github.com/go-git/go-git-fixtures/v4 v4.3.1 h1:y5z6dd3qi8Hl+stezc8p3JxDkoTRqMAlKnXHuzrfjTQ=
+github.com/go-git/go-git-fixtures/v4 v4.3.1/go.mod h1:8LHG1a3SRW71ettAD/jW13h8c6AqjVSeL11RAdgaqpo=
+github.com/go-git/go-git/v5 v5.5.2 h1:v8lgZa5k9ylUw+OR/roJHTxR4QItsNFI5nKtAXFuynw=
+github.com/go-git/go-git/v5 v5.5.2/go.mod h1:BE5hUJ5yaV2YMxhmaP4l6RBQ08kMxKSPD4BlxtH7OjI=
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
@@ -363,8 +354,9 @@ github.com/go-openapi/jsonpointer v0.19.5 h1:gZr+CIYByUqjcgeLXnQu2gHYQC9o73G2XUe
github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8=
github.com/go-openapi/jsonreference v0.19.5/go.mod h1:RdybgQwPxbL4UEjuAruzK1x3nE69AqPYEJeo/TWfEeg=
-github.com/go-openapi/jsonreference v0.19.6 h1:UBIxjkht+AWIgYzCDSv2GN+E/togfwXUJFRTWhl2Jjs=
github.com/go-openapi/jsonreference v0.19.6/go.mod h1:diGHMEHg2IqXZGKxqyvWdfWU/aim5Dprw5bqpKkTvns=
+github.com/go-openapi/jsonreference v0.20.0 h1:MYlu0sBgChmCfJxxUKZ8g1cPWFOB37YSZqewK7OKeyA=
+github.com/go-openapi/jsonreference v0.20.0/go.mod h1:Ag74Ico3lPc+zR+qjn4XBUmXymS4zJbYVCZmcgkasdo=
github.com/go-openapi/loads v0.21.1 h1:Wb3nVZpdEzDTcly8S4HMkey6fjARRzb7iEaySimlDW0=
github.com/go-openapi/loads v0.21.1/go.mod h1:/DtAMXXneXFjbQMGEtbamCZb+4x7eGwkvZCvBmwUG+g=
github.com/go-openapi/runtime v0.24.1 h1:Sml5cgQKGYQHF+M7yYSHaH1eOjvTykrddTE/KtQVjqo=
@@ -425,9 +417,8 @@ github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zV
github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
-github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg=
-github.com/golang-jwt/jwt/v4 v4.2.0 h1:besgBTC8w8HjP6NzQdxwKH9Z5oQMZ24ThTrHp3cZ8eU=
-github.com/golang-jwt/jwt/v4 v4.2.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg=
+github.com/golang-jwt/jwt/v4 v4.4.2 h1:rcc4lwaZgFMCZ5jxF9ABolDcIHdBytAFgqFPbSJQAYs=
+github.com/golang-jwt/jwt/v4 v4.4.2/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=
github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/glog v1.0.0 h1:nfP3RFugxnNRyKgeWd4oI1nYvXpxrx8ck8ZrcizshdQ=
@@ -490,8 +481,9 @@ github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE=
-github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg=
github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
+github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
+github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ=
github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
@@ -580,8 +572,8 @@ github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:
github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
-github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU=
-github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
+github.com/imdario/mergo v0.3.13 h1:lFzP57bqS/wsqKssCGmtLAb8A0wKjLGrve2q3PPVcBk=
+github.com/imdario/mergo v0.3.13/go.mod h1:4lJ1jqUDcsbIECGy0RUJAXNIhg+6ocWgb1ALK2O4oXg=
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A=
github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo=
@@ -617,8 +609,8 @@ github.com/jung-kurt/gofpdf v1.16.2 h1:jgbatWHfRlPYiK85qgevsZTHviWXKwB1TTiKdz5Pt
github.com/jung-kurt/gofpdf v1.16.2/go.mod h1:1hl7y57EsiPAkLbOwzpzqgx1A30nQCk/YmFV8S2vmK0=
github.com/karrick/godirwalk v1.8.0/go.mod h1:H5KPZjojv4lE+QYImBI8xVtrBRgYrIVsaRPx4tDPEn4=
github.com/karrick/godirwalk v1.10.3/go.mod h1:RoGL9dQei4vP9ilrpETWE8CLOZ1kiN0LhBygSwrAsHA=
-github.com/kevinburke/ssh_config v0.0.0-20201106050909-4977a11b4351 h1:DowS9hvgyYSX4TO5NpyC606/Z4SxnNYbT+WX27or6Ck=
-github.com/kevinburke/ssh_config v0.0.0-20201106050909-4977a11b4351/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM=
+github.com/kevinburke/ssh_config v1.2.0 h1:x584FjTGwHzMwvHx18PXxbBVzfnxogHaAReU4gf13a4=
+github.com/kevinburke/ssh_config v1.2.0/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM=
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
@@ -639,16 +631,18 @@ github.com/kr/pty v1.1.3/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
-github.com/kubescape/go-git-url v0.0.17 h1:yBPmQzxIVa3vbFVjwTlrnxjGf1kTAWDeMh+kvMd/RZA=
-github.com/kubescape/go-git-url v0.0.17/go.mod h1:a1rDC6M1VBMwTaDfrSwbVq84Zu71U+1qKqQmI1cA0lE=
+github.com/kubescape/go-git-url v0.0.21 h1:YSOCI2c5o6KRn7p6dkU5UU8Zdlee9bqigTiMEuQK7hM=
+github.com/kubescape/go-git-url v0.0.21/go.mod h1:IbVT7Wsxlghsa+YxI5KOx4k9VQJaa3z0kTaQz5D3nKM=
github.com/kubescape/go-logger v0.0.6 h1:ynhAmwrz0O7Jtqq1CdmCZUrKveji25hVP+B/FAb3QrA=
github.com/kubescape/go-logger v0.0.6/go.mod h1:DnVWEvC90LFY1nNMaNo6nBVOcqkLMK3S0qzXP1fzRvI=
-github.com/kubescape/k8s-interface v0.0.89 h1:OtlvZosHpjlbHfsilfQk2wRbuBnxwF0e+WZX6GbkfLU=
-github.com/kubescape/k8s-interface v0.0.89/go.mod h1:pgFRs20mHiavf6+fFWY7h/f8HuKlwuZwirvjxiKJlu0=
-github.com/kubescape/opa-utils v0.0.204 h1:9O9drjyzjOhI7Xi2S4Px0WKa66U5GFPQqeOLvhDqHnw=
-github.com/kubescape/opa-utils v0.0.204/go.mod h1:rDC3PANuk8gU5lSDO/WPFTluypBQ+/6qiuZLye+slYg=
+github.com/kubescape/k8s-interface v0.0.94-0.20221228202834-4b64f2440950 h1:r9QwpzU4FG0B4XRbKx0J0VsmwO00+a6c2a8Utq5ae0k=
+github.com/kubescape/k8s-interface v0.0.94-0.20221228202834-4b64f2440950/go.mod h1:cFE6PoBm+31LjynY2XkzX19mRmaE9CRk2UQv/rEF3ZY=
+github.com/kubescape/opa-utils v0.0.223 h1:t39+P5eW1nsmt55Sx7NZU3Tv9IDBo0ljEYbOi9nlgVc=
+github.com/kubescape/opa-utils v0.0.223/go.mod h1:cKWsKl2t2XP7Mc3t1c3hNdf8Kg0sxikUcqATfq09vzU=
github.com/kubescape/rbac-utils v0.0.19 h1:7iydgVxlMLW15MgHORfMBMqNj9jHtFGACd744fdtrFs=
github.com/kubescape/rbac-utils v0.0.19/go.mod h1:t57AhSrjuNGQ+mpZWQM/hBzrCOeKBDHegFoVo4tbikQ=
+github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
+github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII=
github.com/lunixbochs/vtclean v1.0.0/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI=
github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
@@ -692,7 +686,6 @@ github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFW
github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw=
github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s=
github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
-github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI=
github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg=
@@ -739,11 +732,11 @@ github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+W
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY=
github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE=
-github.com/onsi/ginkgo/v2 v2.1.6 h1:Fx2POJZfKRQcM1pH49qSZiYeu319wji004qX+GDovrU=
+github.com/onsi/ginkgo/v2 v2.4.0 h1:+Ig9nvqgS5OBSACXNk15PLdp0U9XPYROt9CFzVdFGIs=
github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
-github.com/onsi/gomega v1.20.1 h1:PA/3qinGoukvymdIDV8pii6tiZgC8kbmJO6Z5+b002Q=
+github.com/onsi/gomega v1.23.0 h1:/oxKu9c2HVap+F3PfKort2Hw5DEU+HGlW8n+tguWsys=
github.com/open-policy-agent/opa v0.45.0 h1:P5nuhVRtR+e58fk3CMMbiqr6ZFyWQPNOC3otsorGsFs=
github.com/open-policy-agent/opa v0.45.0/go.mod h1:/OnsYljNEWJ6DXeFOOnoGn8CvwZGMUS4iRqzYdJvmBI=
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
@@ -764,6 +757,10 @@ github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR
github.com/phpdave11/gofpdf v1.4.2/go.mod h1:zpO6xFn9yxo3YLyMvW8HcKWVdbNqgIfOOp2dXMnm1mY=
github.com/phpdave11/gofpdi v1.0.7/go.mod h1:vBmVV0Do6hSBHC8uKUQ71JGW+ZGQq74llk/7bXwjDoI=
github.com/phpdave11/gofpdi v1.0.12/go.mod h1:vBmVV0Do6hSBHC8uKUQ71JGW+ZGQq74llk/7bXwjDoI=
+github.com/pjbgf/sha1cd v0.2.3 h1:uKQP/7QOzNtKYH7UTohZLcjF5/55EnTw0jO/Ru4jZwI=
+github.com/pjbgf/sha1cd v0.2.3/go.mod h1:HOK9QrgzdHpbc2Kzip0Q1yi3M2MFGPADtR6HjG65m5M=
+github.com/pkg/browser v0.0.0-20210115035449-ce105d075bb4 h1:Qj1ukM4GlMWXNdMBuXcXfz/Kw9s1qm0CLY32QxuSImI=
+github.com/pkg/browser v0.0.0-20210115035449-ce105d075bb4/go.mod h1:N6UoU20jOqggOuDwUaBQpluzLNDqif3kq9z2wpdYEfQ=
github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e h1:aoZm08cpOy4WuID//EZDgcC4zIxODThtZNPirFr42+A=
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
@@ -856,6 +853,8 @@ github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrf
github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0=
+github.com/skeema/knownhosts v1.1.0 h1:Wvr9V0MxhjRbl3f9nMnKnFfiWTJmtECJ9Njkea3ysW0=
+github.com/skeema/knownhosts v1.1.0/go.mod h1:sKFq3RD6/TKZkSWn8boUbDC7Qkgcv+8XXijpFO6roag=
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
@@ -911,8 +910,8 @@ github.com/vmihailenco/msgpack/v4 v4.3.12/go.mod h1:gborTTJjAo/GWTqqRjrLCn9pgNN+
github.com/vmihailenco/tagparser v0.1.1/go.mod h1:OeAg3pn3UbLjkWt+rN9oFYB6u/cQgqMEUPoW2WPyhdI=
github.com/whilp/git-urls v1.0.0 h1:95f6UMWN5FKW71ECsXRUd3FVYiXdrE7aX4NZKcPmIjU=
github.com/whilp/git-urls v1.0.0/go.mod h1:J16SAmobsqc3Qcy98brfl5f5+e0clUvg1krgwk/qCfE=
-github.com/xanzy/ssh-agent v0.3.0 h1:wUMzuKtKilRgBAD1sUb8gOwwRr2FGoBVumcjoOACClI=
-github.com/xanzy/ssh-agent v0.3.0/go.mod h1:3s9xbODqPuuhK9JV1R321M/FlMZSBvE5aY6eAcqrDh0=
+github.com/xanzy/ssh-agent v0.3.3 h1:+/15pJfg/RsTxqYcX6fHqOXZwwMP+2VyYWJeWM2qQFM=
+github.com/xanzy/ssh-agent v0.3.3/go.mod h1:6dzNDKs0J9rVPHPhaGCukekBHKqfl+L3KghI1Bc68Uw=
github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI=
github.com/xdg-go/scram v1.0.2/go.mod h1:1WAq6h33pAW+iRreB34OORO2Nf7qel3VV3fjBj+hCSs=
github.com/xdg-go/stringprep v1.0.2/go.mod h1:8F9zXuvzgwmyT5DUm4GUfZGDdT3W+LCvS6+da4O5kxM=
@@ -935,6 +934,7 @@ github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
+github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
github.com/zclconf/go-cty v1.10.0/go.mod h1:vVKLxnk3puL4qRAv72AO+W99LUD4da90g3uUAzyuvAk=
go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4=
@@ -993,7 +993,6 @@ golang.org/x/build v0.0.0-20190111050920-041ab4dc3f9d/go.mod h1:OWs+y06UdEOHN4y+
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20181030102418-4d3f4d9ffa16/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
-golang.org/x/crypto v0.0.0-20190219172222-a4c6cb3142f2/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190313024323-a1f597ede03a/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190422162423-af44ce270edf/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE=
@@ -1007,13 +1006,13 @@ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPh
golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20201203163018-be400aefbc4c/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
golang.org/x/crypto v0.0.0-20201216223049-8b5274cf687f/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
-golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
-golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
-golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.0.0-20220214200702-86341886e292/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
-golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e h1:T8NU3HyQ8ClP4SEE+KbFlg6n0NhuTsN4MyznaarGsZM=
golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
+golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
+golang.org/x/crypto v0.0.0-20220826181053-bd7e27e6170d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
+golang.org/x/crypto v0.3.0 h1:a06MkbcxBrEFc0w0QIZWXrH/9cCX6KJyWbBOIwAn+7A=
+golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4=
golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
@@ -1062,8 +1061,9 @@ golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
-golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3 h1:kQgndtyPBW/JIYERgdxfwMYh3AVStj88WQTlNDi2a+o=
golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY=
+golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 h1:6zppjxzCulZykYSLyVDYbneBfbaBIQPYMevg0bEwv2s=
+golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@@ -1109,7 +1109,6 @@ golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v
golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc=
-golang.org/x/net v0.0.0-20210326060303-6b1517762897/go.mod h1:uSPa2vr4CLtc/ILN5odXGNXS6mhrKVzTaCXzk9m6W3k=
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
golang.org/x/net v0.0.0-20210421230115-4e50805a0758/go.mod h1:72T/g9IO56b78aLF+1Kcs5dz7/ng1VjMUvfKvpfy+jM=
golang.org/x/net v0.0.0-20210423184538-5f58ad60dda6/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk=
@@ -1125,8 +1124,11 @@ golang.org/x/net v0.0.0-20220412020605-290c469a71a5/go.mod h1:CfG3xpIq0wQ8r1q4Su
golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.0.0-20220617184016-355a448f1bc9/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
-golang.org/x/net v0.0.0-20220909164309-bea034e7d591 h1:D0B/7al0LLrVC8aWF4+oxpv/m8bc7ViFfVS8/gXGdqI=
-golang.org/x/net v0.0.0-20220909164309-bea034e7d591/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk=
+golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
+golang.org/x/net v0.0.0-20220826154423-83b083e8dc8b/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk=
+golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY=
+golang.org/x/net v0.3.1-0.20221206200815-1e63c2f08a10 h1:Frnccbp+ok2GkUS2tC84yAq/U9Vg+0sIO7aRL3T4Xnc=
+golang.org/x/net v0.3.1-0.20221206200815-1e63c2f08a10/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
@@ -1166,6 +1168,7 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@@ -1189,7 +1192,6 @@ golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191002063906-3421d5a6bb1c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@@ -1229,12 +1231,10 @@ golang.org/x/sys v0.0.0-20210304124612-50617c2ba197/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210420072515-93ed5bcd2bfe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210502180810-71e4cd670f79/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
@@ -1247,6 +1247,7 @@ golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20210831042530-f4d43177bf5e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
@@ -1263,12 +1264,20 @@ golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220610221304-9f5ed59c137d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220615213510-4f61da869c0c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20220829200755-d48e67d00261 h1:v6hYoSR9T5oet+pMXwUWkbiVqx/63mlHjefrHmxwfeY=
-golang.org/x/sys v0.0.0-20220829200755-d48e67d00261/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220825204002-c680a09ffe64/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.3.0 h1:w8ZOecv6NaNa/zC8944JTU3vz4u6Lagfk4RPQxv92NQ=
+golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
-golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 h1:JGgROgKl9N8DuW20oFS5gxc+lE67/N3FcwmBPMe7ArY=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
+golang.org/x/term v0.0.0-20220722155259-a9ba230a4035/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
+golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc=
+golang.org/x/term v0.3.0 h1:qoo4akIqOcDME5bhc/NgxUdovd6BSS2uMsVjB56q1xI=
+golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
@@ -1277,8 +1286,10 @@ golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
-golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
+golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
+golang.org/x/text v0.5.0 h1:OLmvp0KP+FVG99Ct/qFiL/Fhk4zp4QQnZ7b2U+5piUM=
+golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
@@ -1356,6 +1367,7 @@ golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.1.10-0.20220218145154-897bd77cd717/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E=
+golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
@@ -1607,6 +1619,7 @@ gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C
gopkg.in/yaml.v3 v3.0.0-20200605160147-a5ece683394c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk=
@@ -1624,17 +1637,17 @@ honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt
honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
k8s.io/api v0.24.2/go.mod h1:AHqbSkTm6YrQ0ObxjO3Pmp/ubFF/KuM7jU+3khoBsOg=
-k8s.io/api v0.25.3 h1:Q1v5UFfYe87vi5H7NU0p4RXC26PPMT8KOpr1TLQbCMQ=
-k8s.io/api v0.25.3/go.mod h1:o42gKscFrEVjHdQnyRenACrMtbuJsVdP+WVjqejfzmI=
+k8s.io/api v0.26.0 h1:IpPlZnxBpV1xl7TGk/X6lFtpgjgntCg8PJ+qrPHAC7I=
+k8s.io/api v0.26.0/go.mod h1:k6HDTaIFC8yn1i6pSClSqIwLABIcLV9l5Q4EcngKnQg=
k8s.io/apiextensions-apiserver v0.24.2 h1:/4NEQHKlEz1MlaK/wHT5KMKC9UKYz6NZz6JE6ov4G6k=
k8s.io/apiextensions-apiserver v0.24.2/go.mod h1:e5t2GMFVngUEHUd0wuCJzw8YDwZoqZfJiGOW6mm2hLQ=
k8s.io/apimachinery v0.24.2/go.mod h1:82Bi4sCzVBdpYjyI4jY6aHX+YCUchUIrZrXKedjd2UM=
-k8s.io/apimachinery v0.25.3 h1:7o9ium4uyUOM76t6aunP0nZuex7gDf8VGwkR5RcJnQc=
-k8s.io/apimachinery v0.25.3/go.mod h1:jaF9C/iPNM1FuLl7Zuy5b9v+n35HGSh6AQ4HYRkCqwo=
+k8s.io/apimachinery v0.26.0 h1:1feANjElT7MvPqp0JT6F3Ss6TWDwmcjLypwoPpEf7zg=
+k8s.io/apimachinery v0.26.0/go.mod h1:tnPmbONNJ7ByJNz9+n9kMjNP8ON+1qoAIIC70lztu74=
k8s.io/apiserver v0.24.2/go.mod h1:pSuKzr3zV+L+MWqsEo0kHHYwCo77AT5qXbFXP2jbvFI=
k8s.io/client-go v0.24.2/go.mod h1:zg4Xaoo+umDsfCWr4fCnmLEtQXyCNXCvJuSsglNcV30=
-k8s.io/client-go v0.25.3 h1:oB4Dyl8d6UbfDHD8Bv8evKylzs3BXzzufLiO27xuPs0=
-k8s.io/client-go v0.25.3/go.mod h1:t39LPczAIMwycjcXkVc+CB+PZV69jQuNx4um5ORDjQA=
+k8s.io/client-go v0.26.0 h1:lT1D3OfO+wIi9UFolCrifbjUUgu7CpLca0AD8ghRLI8=
+k8s.io/client-go v0.26.0/go.mod h1:I2Sh57A79EQsDmn7F7ASpmru1cceh3ocVT9KlX2jEZg=
k8s.io/code-generator v0.24.2/go.mod h1:dpVhs00hTuTdTY6jvVxvTFCk6gSMrtfRydbhZwHI15w=
k8s.io/component-base v0.24.2/go.mod h1:ucHwW76dajvQ9B7+zecZAP3BVqvrHoOxm8olHEg0nmM=
k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
@@ -1647,8 +1660,8 @@ k8s.io/klog/v2 v2.80.1 h1:atnLQ121W371wYYFawwYx1aEY2eUfs4l3J72wtgAwV4=
k8s.io/klog/v2 v2.80.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
k8s.io/kube-openapi v0.0.0-20210421082810-95288971da7e/go.mod h1:vHXdDvt9+2spS2Rx9ql3I8tycm3H9FDfdUoIuKCefvw=
k8s.io/kube-openapi v0.0.0-20220328201542-3ee0da9b0b42/go.mod h1:Z/45zLw8lUo4wdiUkI+v/ImEGAvu3WatcZl3lPMR4Rk=
-k8s.io/kube-openapi v0.0.0-20220803162953-67bda5d908f1 h1:MQ8BAZPZlWk3S9K4a9NCkIFQtZShWqoha7snGixVgEA=
-k8s.io/kube-openapi v0.0.0-20220803162953-67bda5d908f1/go.mod h1:C/N6wCaBHeBHkHUesQOQy2/MZqGgMAFPqGsGQLdbZBU=
+k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280 h1:+70TFaan3hfJzs+7VK2o+OGxg8HsuBr/5f6tVAjDu6E=
+k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280/go.mod h1:+Axhij7bCpeqhklhUTe3xmOn6bWxolyZEeyaFpjGtl4=
k8s.io/utils v0.0.0-20210802155522-efc7438f0176/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
k8s.io/utils v0.0.0-20221128185143-99ec85e7a448 h1:KTgPnR10d5zhztWptI952TNtt/4u5h3IzDXkdIMuo2Y=
diff --git a/httphandler/handlerequests/v1/datastructuremethods.go b/httphandler/handlerequests/v1/datastructuremethods.go
index 83c01132..d55a6d59 100644
--- a/httphandler/handlerequests/v1/datastructuremethods.go
+++ b/httphandler/handlerequests/v1/datastructuremethods.go
@@ -32,7 +32,7 @@ func ToScanInfo(scanRequest *utilsmetav1.PostScanRequest) *cautils.ScanInfo {
// UseCachedArtifacts
if scanRequest.UseCachedArtifacts != nil {
- if useCachedArtifacts := cautils.NewBoolPtr(scanRequest.UseCachedArtifacts); useCachedArtifacts.Get() != nil && !*useCachedArtifacts.Get() {
+ if useCachedArtifacts := cautils.NewBoolPtr(scanRequest.UseCachedArtifacts); useCachedArtifacts.Get() != nil && *useCachedArtifacts.Get() {
scanInfo.UseArtifactsFrom = getter.DefaultLocalStore // Load files from cache (this will prevent kubescape fom downloading the artifacts every time)
}
}
diff --git a/httphandler/handlerequests/v1/datastructuremethods_test.go b/httphandler/handlerequests/v1/datastructuremethods_test.go
index 05386650..4365f7fe 100644
--- a/httphandler/handlerequests/v1/datastructuremethods_test.go
+++ b/httphandler/handlerequests/v1/datastructuremethods_test.go
@@ -32,9 +32,9 @@ func TestToScanInfo(t *testing.T) {
assert.False(t, s.Submit)
assert.False(t, s.ScanAll)
assert.True(t, s.FrameworkScan)
- assert.Equal(t, "nsa", s.PolicyIdentifier[0].Name)
+ assert.Equal(t, "nsa", s.PolicyIdentifier[0].Identifier)
assert.Equal(t, apisv1.KindFramework, s.PolicyIdentifier[0].Kind)
- assert.Equal(t, "mitre", s.PolicyIdentifier[1].Name)
+ assert.Equal(t, "mitre", s.PolicyIdentifier[1].Identifier)
assert.Equal(t, apisv1.KindFramework, s.PolicyIdentifier[1].Kind)
}
{
@@ -49,7 +49,7 @@ func TestToScanInfo(t *testing.T) {
assert.Equal(t, "kube-system,kube-public", s.IncludeNamespaces)
assert.Equal(t, "", s.ExcludedNamespaces)
assert.Equal(t, 1, len(s.PolicyIdentifier))
- assert.Equal(t, "c-0001", s.PolicyIdentifier[0].Name)
+ assert.Equal(t, "c-0001", s.PolicyIdentifier[0].Identifier)
assert.Equal(t, apisv1.KindControl, s.PolicyIdentifier[0].Kind)
}
{
diff --git a/httphandler/httphandler b/httphandler/httphandler
deleted file mode 100755
index e907f510..00000000
Binary files a/httphandler/httphandler and /dev/null differ
diff --git a/install.ps1 b/install.ps1
index 08434c74..342a671d 100644
--- a/install.ps1
+++ b/install.ps1
@@ -12,7 +12,17 @@ $fullUrl = $url + $packageName
New-Item -Path $BASE_DIR -ItemType "directory" -ErrorAction SilentlyContinue
# Download the binary
-Invoke-WebRequest -Uri $fullUrl -OutFile $BASE_DIR\kubescape.exe
+$useBitTransfer = $null -ne (Get-Module -Name BitsTransfer -ListAvailable) -and ($PSVersionTable.PSVersion.Major -le 5)
+if ($useBitTransfer)
+ {
+ Write-Information -MessageData 'Using a fallback BitTransfer method since you are running Windows PowerShell'
+ Start-BitsTransfer -Source $fullUrl -Destination $BASE_DIR\kubescape.exe
+
+ }
+ else
+ {
+ Invoke-WebRequest -Uri $fullUrl -OutFile $BASE_DIR\kubescape.exe
+ }
# Update user PATH if needed
$currentPath = [Environment]::GetEnvironmentVariable("Path", "User")
diff --git a/install.sh b/install.sh
index 213860c9..9b180dd7 100755
--- a/install.sh
+++ b/install.sh
@@ -66,6 +66,6 @@ echo -e "\033[0m"
$KUBESCAPE_EXEC version
echo
-echo -e "\033[35mUsage: $ $KUBESCAPE_EXEC scan --enable-host-scan --verbose"
+echo -e "\033[35mUsage: $ $KUBESCAPE_EXEC scan --enable-host-scan"
echo -e "\033[0m"
diff --git a/main.go b/main.go
index 1db965e6..1049aaab 100644
--- a/main.go
+++ b/main.go
@@ -3,7 +3,6 @@ package main
import (
logger "github.com/kubescape/go-logger"
"github.com/kubescape/kubescape/v2/cmd"
-
)
func main() {
diff --git a/smoke_testing/test_scan.py b/smoke_testing/test_scan.py
index f890d8f8..aba956e0 100644
--- a/smoke_testing/test_scan.py
+++ b/smoke_testing/test_scan.py
@@ -21,7 +21,7 @@ def scan_control_id(kubescape_exec: str):
def scan_controls(kubescape_exec: str):
- return smoke_utils.run_command(command=[kubescape_exec, "scan", "control", 'HostPath mount,Allow privilege escalation', all_files, "--enable-host-scan=false"])
+ return smoke_utils.run_command(command=[kubescape_exec, "scan", "control", 'C-0048,C-0016', all_files, "--enable-host-scan=false"])
def scan_framework(kubescape_exec: str):
@@ -48,10 +48,6 @@ def run(kubescape_exec: str):
# msg = scan_all(kubescape_exec=kubescape_exec)
# smoke_utils.assertion(msg)
- print("Testing scan control name")
- msg = scan_control_name(kubescape_exec=kubescape_exec)
- smoke_utils.assertion(msg)
-
print("Testing scan control id")
msg = scan_control_id(kubescape_exec=kubescape_exec)
smoke_utils.assertion(msg)