mirror of
https://github.com/kubescape/kubescape.git
synced 2026-02-15 18:40:14 +00:00
Compare commits
144 Commits
v2.0.362
...
fix-comman
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
b0f65cce1d | ||
|
|
0e8b2f976d | ||
|
|
f1d646ac97 | ||
|
|
5f668037a7 | ||
|
|
c448c97463 | ||
|
|
da3bc8e8ea | ||
|
|
2fce139a9a | ||
|
|
85d2f5c250 | ||
|
|
c3771eec7e | ||
|
|
76d2154152 | ||
|
|
fa5e7fef23 | ||
|
|
38d2696058 | ||
|
|
e9a8ffbda9 | ||
|
|
0d76fffa48 | ||
|
|
218c77f3ae | ||
|
|
89fd7eb439 | ||
|
|
8079f9ae7d | ||
|
|
f9a26b7a95 | ||
|
|
663401d908 | ||
|
|
926790f49d | ||
|
|
566b7c29c1 | ||
|
|
af5cdefc5f | ||
|
|
36b7b8e2ac | ||
|
|
17c52bd0ae | ||
|
|
e02086e90c | ||
|
|
baf62887b9 | ||
|
|
99fa81e411 | ||
|
|
f64200f42f | ||
|
|
f72cb215d7 | ||
|
|
fa03a9dae3 | ||
|
|
48516b891f | ||
|
|
252a564552 | ||
|
|
30e5b9b57d | ||
|
|
7fcfa27d9a | ||
|
|
4b898b0075 | ||
|
|
f3665866af | ||
|
|
a7989bbe76 | ||
|
|
5ce69a750d | ||
|
|
2b61989073 | ||
|
|
be33054973 | ||
|
|
4b9bd5f3ae | ||
|
|
fb1c728b12 | ||
|
|
6964ca0d18 | ||
|
|
691fa61362 | ||
|
|
0c1eda0d08 | ||
|
|
767eac2fa6 | ||
|
|
6f651fa2d0 | ||
|
|
e3362c2e3d | ||
|
|
08b8b68f9a | ||
|
|
daf9ca9e7f | ||
|
|
d1024359c9 | ||
|
|
ed6070aff9 | ||
|
|
e4dbfa3534 | ||
|
|
ddd2b707c0 | ||
|
|
cd4f1077c2 | ||
|
|
b472d1cb9d | ||
|
|
922e2548f4 | ||
|
|
45caa7c120 | ||
|
|
670ae45d62 | ||
|
|
05bcf018d1 | ||
|
|
0af5d2e0bb | ||
|
|
eaf05fe9be | ||
|
|
e97b23f345 | ||
|
|
83a00ded3d | ||
|
|
78f81cc968 | ||
|
|
5d3347b4fe | ||
|
|
64d2ef8170 | ||
|
|
7c1e360b9a | ||
|
|
575d36dcde | ||
|
|
8dba8f7491 | ||
|
|
cc39e5b905 | ||
|
|
0be7e6018f | ||
|
|
7697e3f0c4 | ||
|
|
379800c49f | ||
|
|
79e2515807 | ||
|
|
342f5743e2 | ||
|
|
0e81870b85 | ||
|
|
4277331ee2 | ||
|
|
53561a728f | ||
|
|
d0fd8c4fe4 | ||
|
|
398989510b | ||
|
|
f8e3ad5685 | ||
|
|
fbea7ef874 | ||
|
|
dc2c6f8a21 | ||
|
|
5ee08583b6 | ||
|
|
bfbd278e7c | ||
|
|
4c6e5903e3 | ||
|
|
a7cd5672c1 | ||
|
|
22521b7159 | ||
|
|
e5fb14138e | ||
|
|
1b2242330c | ||
|
|
356958cc55 | ||
|
|
8f1da32001 | ||
|
|
686352a397 | ||
|
|
ef79c42ebc | ||
|
|
c8fc5378c1 | ||
|
|
c296666d8e | ||
|
|
f193e260b0 | ||
|
|
82981a9a54 | ||
|
|
3be54ca484 | ||
|
|
2f2c177674 | ||
|
|
1f47223918 | ||
|
|
eb646696a3 | ||
|
|
7cfe5160d5 | ||
|
|
95135c4379 | ||
|
|
7e604d6a5b | ||
|
|
64ac2666f9 | ||
|
|
05b3459342 | ||
|
|
92ad5f2407 | ||
|
|
e3c60e3202 | ||
|
|
7b5bcb05b1 | ||
|
|
154f94a0af | ||
|
|
063d3ee313 | ||
|
|
79859d05c0 | ||
|
|
acd3a94c46 | ||
|
|
13f09315e7 | ||
|
|
890528bf14 | ||
|
|
e4aafcf81e | ||
|
|
81c3c34ab8 | ||
|
|
b7b83b26b5 | ||
|
|
4b98490ff9 | ||
|
|
6ea18ec75b | ||
|
|
3b9c454245 | ||
|
|
10fa3cb27d | ||
|
|
d8f95edff5 | ||
|
|
37ffe86d8b | ||
|
|
87fdbfdcc5 | ||
|
|
424a218860 | ||
|
|
12f81353e0 | ||
|
|
d6427f0fc8 | ||
|
|
f33a6d7634 | ||
|
|
5a01a1a30a | ||
|
|
ba588b9eef | ||
|
|
f48b848eb6 | ||
|
|
f81fd74aa3 | ||
|
|
ad608b08e0 | ||
|
|
f9e80b709a | ||
|
|
f75b62e62c | ||
|
|
1c24a55d4b | ||
|
|
03418299b8 | ||
|
|
f5bd86593c | ||
|
|
2af78eaab2 | ||
|
|
67cd003afe | ||
|
|
0bc542f851 |
6
.github/workflows/build.yaml
vendored
6
.github/workflows/build.yaml
vendored
@@ -25,6 +25,8 @@ jobs:
|
||||
name: Build and publish artifacts
|
||||
needs: create-release
|
||||
runs-on: ${{ matrix.os }}
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
strategy:
|
||||
matrix:
|
||||
os: [ubuntu-latest, macos-latest, windows-latest]
|
||||
@@ -77,13 +79,13 @@ jobs:
|
||||
asset_content_type: application/octet-stream
|
||||
|
||||
publish-image:
|
||||
# if: ${{ github.repository == 'kubescape/kubescape' }} # TODO
|
||||
if: ${{ github.repository == 'kubescape/kubescape' }} # TODO
|
||||
uses: ./.github/workflows/build-image.yaml
|
||||
needs: create-release
|
||||
with:
|
||||
client: "image-release"
|
||||
image_name: "quay.io/${{ github.repository_owner }}/kubescape"
|
||||
image_tag: "v2.0.${{ github.run_number }}"
|
||||
support_platforms: true
|
||||
support_platforms: false
|
||||
cosign: true
|
||||
secrets: inherit
|
||||
|
||||
4
.github/workflows/build_dev.yaml
vendored
4
.github/workflows/build_dev.yaml
vendored
@@ -2,7 +2,7 @@ name: build-dev
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ gh-testing ]
|
||||
branches: [ dev ]
|
||||
paths-ignore:
|
||||
# Do not run the pipeline if only Markdown files changed
|
||||
- '**.md'
|
||||
@@ -14,7 +14,7 @@ jobs:
|
||||
client: test
|
||||
|
||||
publish-dev-image:
|
||||
# if: ${{ github.repository == 'kubescape/kubescape' }} # TODO
|
||||
if: ${{ github.repository == 'kubescape/kubescape' }} # TODO
|
||||
uses: ./.github/workflows/build-image.yaml
|
||||
needs: test
|
||||
with:
|
||||
|
||||
4
.github/workflows/pr_checks.yaml
vendored
4
.github/workflows/pr_checks.yaml
vendored
@@ -8,10 +8,8 @@ on:
|
||||
# Do not run the pipeline if only Markdown files changed
|
||||
- '**.yaml'
|
||||
- '**.md'
|
||||
push:
|
||||
branches: [ workflow-call-testing ]
|
||||
jobs:
|
||||
trigger-test-workflow:
|
||||
test:
|
||||
uses: ./.github/workflows/test.yaml
|
||||
with:
|
||||
release: "v2.0.${{ github.run_number }}"
|
||||
|
||||
2
.github/workflows/test.yaml
vendored
2
.github/workflows/test.yaml
vendored
@@ -15,6 +15,8 @@ jobs:
|
||||
build:
|
||||
name: Create cross-platform build
|
||||
runs-on: ${{ matrix.os }}
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
strategy:
|
||||
matrix:
|
||||
os: [ubuntu-latest, macos-latest, windows-latest]
|
||||
|
||||
45
README.md
45
README.md
@@ -40,7 +40,7 @@ curl -s https://raw.githubusercontent.com/kubescape/kubescape/master/install.sh
|
||||
|
||||
## Run:
|
||||
```sh
|
||||
kubescape scan --submit --enable-host-scan --verbose
|
||||
kubescape scan --enable-host-scan --verbose
|
||||
```
|
||||
|
||||
<img src="docs/summary.png">
|
||||
@@ -52,11 +52,15 @@ kubescape scan --submit --enable-host-scan --verbose
|
||||
</br>
|
||||
|
||||
## Architecture in short
|
||||
### CLI
|
||||
<img src="docs/ks-cli-arch.png" width="300">
|
||||
### [CLI](#kubescape-cli)
|
||||
<div align="center">
|
||||
<img src="docs/ks-cli-arch.png" width="300" alt="cli-diagram">
|
||||
</div>
|
||||
|
||||
### Operator
|
||||
<img src="docs/ks-operator-arch.png" width="300">
|
||||
### [Operator](https://github.com/kubescape/helm-charts#readme)
|
||||
<div align="center">
|
||||
<img src="docs/ks-operator-arch.png" width="300" alt="operator-diagram">
|
||||
</div>
|
||||
|
||||
### Please [star ⭐](https://github.com/kubescape/kubescape/stargazers) the repo if you want us to continue developing and improving Kubescape 😀
|
||||
|
||||
@@ -92,6 +96,7 @@ We hold community meetings in [Zoom](https://us02web.zoom.us/j/84020231442) on t
|
||||
* [Overview](https://youtu.be/wdBkt_0Qhbg)
|
||||
* [How To Secure Kubernetes Clusters With Kubescape And Armo](https://youtu.be/ZATGiDIDBQk)
|
||||
* [Scan Kubernetes YAML files](https://youtu.be/Ox6DaR7_4ZI)
|
||||
* [Scan container image registry](https://youtu.be/iQ_k8EnK-3s)
|
||||
* [Scan Kubescape on an air-gapped environment (offline support)](https://youtu.be/IGXL9s37smM)
|
||||
* [Managing exceptions in the Kubescape SaaS version](https://youtu.be/OzpvxGmCR80)
|
||||
* [Configure and run customized frameworks](https://youtu.be/12Sanq_rEhs)
|
||||
@@ -170,22 +175,22 @@ Or to your profile (not preferred): `nix-env --install -A nixpkgs.kubescape`
|
||||
### Examples
|
||||
|
||||
|
||||
#### Scan a running Kubernetes cluster and submit results to the [Kubescape SaaS version](https://cloud.armosec.io?utm_source=github&utm_medium=repository)
|
||||
#### Scan a running Kubernetes cluster
|
||||
```
|
||||
kubescape scan --submit --enable-host-scan --verbose
|
||||
kubescape scan --enable-host-scan --verbose
|
||||
```
|
||||
|
||||
> Read [here](https://hub.armosec.io/docs/host-sensor?utm_source=github&utm_medium=repository) more about the `enable-host-scan` flag
|
||||
|
||||
#### Scan a running Kubernetes cluster with [`nsa`](https://www.nsa.gov/Press-Room/News-Highlights/Article/Article/2716980/nsa-cisa-release-kubernetes-hardening-guidance/) framework and submit results to the [Kubescape SaaS version](https://cloud.armosec.io?utm_source=github&utm_medium=repository)
|
||||
#### Scan a running Kubernetes cluster with [`nsa`](https://www.nsa.gov/Press-Room/News-Highlights/Article/Article/2716980/nsa-cisa-release-kubernetes-hardening-guidance/) framework
|
||||
```
|
||||
kubescape scan framework nsa --submit
|
||||
kubescape scan framework nsa
|
||||
```
|
||||
|
||||
|
||||
#### Scan a running Kubernetes cluster with [`MITRE ATT&CK®`](https://www.microsoft.com/security/blog/2021/03/23/secure-containerized-environments-with-updated-threat-matrix-for-kubernetes/) framework and submit results to the [Kubescape SaaS version](https://cloud.armosec.io?utm_source=github&utm_medium=repository)
|
||||
#### Scan a running Kubernetes cluster with [`MITRE ATT&CK®`](https://www.microsoft.com/security/blog/2021/03/23/secure-containerized-environments-with-updated-threat-matrix-for-kubernetes/) framework
|
||||
```
|
||||
kubescape scan framework mitre --submit
|
||||
kubescape scan framework mitre
|
||||
```
|
||||
|
||||
|
||||
@@ -194,6 +199,11 @@ kubescape scan framework mitre --submit
|
||||
kubescape scan control "Privileged container"
|
||||
```
|
||||
|
||||
#### Scan using an alternative kubeconfig file
|
||||
```
|
||||
kubescape scan --kubeconfig cluster.conf
|
||||
```
|
||||
|
||||
#### Scan specific namespaces
|
||||
```
|
||||
kubescape scan --include-namespaces development,staging,production
|
||||
@@ -204,14 +214,13 @@ kubescape scan --include-namespaces development,staging,production
|
||||
kubescape scan --exclude-namespaces kube-system,kube-public
|
||||
```
|
||||
|
||||
#### Scan local `yaml`/`json` files before deploying. [Take a look at the demonstration](https://youtu.be/Ox6DaR7_4ZI). Submit the results in case the directory is a git repo. [docs](https://hub.armosec.io/docs/repository-scanning?utm_source=github&utm_medium=repository)
|
||||
#### Scan local `yaml`/`json` files before deploying. [Take a look at the demonstration](https://youtu.be/Ox6DaR7_4ZI).
|
||||
```
|
||||
kubescape scan *.yaml --submit
|
||||
kubescape scan *.yaml
|
||||
```
|
||||
|
||||
#### Scan Kubernetes manifest files from a git repository [and submit the results](https://hub.armosec.io/docs/repository-scanning?utm_source=github&utm_medium=repository)
|
||||
```
|
||||
kubescape scan https://github.com/kubescape/kubescape --submit
|
||||
#### Scan Kubernetes manifest files from a git repository
|
||||
kubescape scan https://github.com/kubescape/kubescape
|
||||
```
|
||||
|
||||
#### Display all scanned resources (including the resources which passed)
|
||||
@@ -258,13 +267,13 @@ kubescape scan --exceptions examples/exceptions/exclude-kube-namespaces.json
|
||||
|
||||
#### Scan Helm charts
|
||||
```
|
||||
kubescape scan </path/to/directory> --submit
|
||||
kubescape scan </path/to/directory>
|
||||
```
|
||||
> Kubescape will load the default value file
|
||||
|
||||
#### Scan Kustomize Directory
|
||||
```
|
||||
kubescape scan </path/to/directory> --submit
|
||||
kubescape scan </path/to/directory>
|
||||
```
|
||||
> Kubescape will generate Kubernetes Yaml Objects using 'Kustomize' file and scans them for security.
|
||||
|
||||
|
||||
11
build.py
11
build.py
@@ -14,14 +14,15 @@ def check_status(status, msg):
|
||||
|
||||
def get_build_dir():
|
||||
current_platform = platform.system()
|
||||
build_dir = "./build/"
|
||||
build_dir = ""
|
||||
|
||||
if current_platform == "Windows": build_dir += "windows-latest"
|
||||
elif current_platform == "Linux": build_dir += "ubuntu-latest"
|
||||
elif current_platform == "Darwin": build_dir += "macos-latest"
|
||||
if current_platform == "Windows": build_dir = "windows-latest"
|
||||
elif current_platform == "Linux": build_dir = "ubuntu-latest"
|
||||
elif current_platform == "Darwin": build_dir = "macos-latest"
|
||||
else: raise OSError("Platform %s is not supported!" % (current_platform))
|
||||
|
||||
return build_dir
|
||||
return os.path.join("build", build_dir)
|
||||
|
||||
|
||||
def get_package_name():
|
||||
package_name = "kubescape"
|
||||
|
||||
@@ -9,7 +9,6 @@ ENV CLIENT=$client
|
||||
ENV GO111MODULE=
|
||||
|
||||
ENV CGO_ENABLED=1
|
||||
ENV PATH=$PATH:/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin
|
||||
|
||||
# Install required python/pip
|
||||
ENV PYTHONUNBUFFERED=1
|
||||
|
||||
@@ -25,6 +25,9 @@ var (
|
||||
|
||||
# Set access key
|
||||
kubescape config set secretKey <access key>
|
||||
|
||||
# Set cloudAPIURL
|
||||
kubescape config set cloudAPIURL <cloud API URL>
|
||||
`
|
||||
)
|
||||
|
||||
|
||||
@@ -33,9 +33,13 @@ func getSetCmd(ks meta.IKubescape) *cobra.Command {
|
||||
}
|
||||
|
||||
var supportConfigSet = map[string]func(*metav1.SetConfig, string){
|
||||
"accountID": func(s *metav1.SetConfig, account string) { s.Account = account },
|
||||
"clientID": func(s *metav1.SetConfig, clientID string) { s.ClientID = clientID },
|
||||
"secretKey": func(s *metav1.SetConfig, secretKey string) { s.SecretKey = secretKey },
|
||||
"accountID": func(s *metav1.SetConfig, account string) { s.Account = account },
|
||||
"clientID": func(s *metav1.SetConfig, clientID string) { s.ClientID = clientID },
|
||||
"secretKey": func(s *metav1.SetConfig, secretKey string) { s.SecretKey = secretKey },
|
||||
"cloudAPIURL": func(s *metav1.SetConfig, cloudAPIURL string) { s.CloudAPIURL = cloudAPIURL },
|
||||
"cloudAuthURL": func(s *metav1.SetConfig, cloudAuthURL string) { s.CloudAuthURL = cloudAuthURL },
|
||||
"cloudReportURL": func(s *metav1.SetConfig, cloudReportURL string) { s.CloudReportURL = cloudReportURL },
|
||||
"cloudUIURL": func(s *metav1.SetConfig, cloudUIURL string) { s.CloudUIURL = cloudUIURL },
|
||||
}
|
||||
|
||||
func stringKeysToSlice(m map[string]func(*metav1.SetConfig, string)) []string {
|
||||
|
||||
45
cmd/fix/fix.go
Normal file
45
cmd/fix/fix.go
Normal file
@@ -0,0 +1,45 @@
|
||||
package fix
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"github.com/kubescape/kubescape/v2/core/meta"
|
||||
metav1 "github.com/kubescape/kubescape/v2/core/meta/datastructures/v1"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var fixCmdExamples = `
|
||||
Fix command is for fixing kubernetes manifest files based on a scan command output.
|
||||
Use with caution, this command will change your files in-place.
|
||||
|
||||
# Fix kubernetes YAML manifest files based on a scan command output (output.json)
|
||||
1) kubescape scan --format json --format-version v2 --output output.json
|
||||
2) kubescape fix output.json
|
||||
|
||||
`
|
||||
|
||||
func GetFixCmd(ks meta.IKubescape) *cobra.Command {
|
||||
var fixInfo metav1.FixInfo
|
||||
|
||||
fixCmd := &cobra.Command{
|
||||
Use: "fix <report output file>",
|
||||
Short: "Fix misconfiguration in files",
|
||||
Long: ``,
|
||||
Example: fixCmdExamples,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
if len(args) < 1 {
|
||||
return errors.New("report output file is required")
|
||||
}
|
||||
fixInfo.ReportFile = args[0]
|
||||
|
||||
return ks.Fix(&fixInfo)
|
||||
},
|
||||
}
|
||||
|
||||
fixCmd.PersistentFlags().BoolVar(&fixInfo.NoConfirm, "no-confirm", false, "No confirmation will be given to the user before applying the fix (default false)")
|
||||
fixCmd.PersistentFlags().BoolVar(&fixInfo.DryRun, "dry-run", false, "No changes will be applied (default false)")
|
||||
fixCmd.PersistentFlags().BoolVar(&fixInfo.SkipUserValues, "skip-user-values", true, "Changes which involve user-defined values will be skipped")
|
||||
|
||||
return fixCmd
|
||||
}
|
||||
@@ -10,9 +10,11 @@ import (
|
||||
"github.com/kubescape/kubescape/v2/cmd/config"
|
||||
"github.com/kubescape/kubescape/v2/cmd/delete"
|
||||
"github.com/kubescape/kubescape/v2/cmd/download"
|
||||
"github.com/kubescape/kubescape/v2/cmd/fix"
|
||||
"github.com/kubescape/kubescape/v2/cmd/list"
|
||||
"github.com/kubescape/kubescape/v2/cmd/scan"
|
||||
"github.com/kubescape/kubescape/v2/cmd/submit"
|
||||
"github.com/kubescape/kubescape/v2/cmd/update"
|
||||
"github.com/kubescape/kubescape/v2/cmd/version"
|
||||
"github.com/kubescape/kubescape/v2/core/cautils"
|
||||
"github.com/kubescape/kubescape/v2/core/cautils/getter"
|
||||
@@ -76,6 +78,8 @@ func getRootCmd(ks meta.IKubescape) *cobra.Command {
|
||||
rootCmd.AddCommand(completion.GetCompletionCmd())
|
||||
rootCmd.AddCommand(version.GetVersionCmd())
|
||||
rootCmd.AddCommand(config.GetConfigCmd(ks))
|
||||
rootCmd.AddCommand(update.GetUpdateCmd())
|
||||
rootCmd.AddCommand(fix.GetFixCmd(ks))
|
||||
|
||||
return rootCmd
|
||||
}
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package scan
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
|
||||
"github.com/kubescape/k8s-interface/k8sinterface"
|
||||
@@ -13,7 +14,7 @@ var scanCmdExamples = `
|
||||
Scan command is for scanning an existing cluster or kubernetes manifest files based on pre-defined frameworks
|
||||
|
||||
# Scan current cluster with all frameworks
|
||||
kubescape scan --submit --enable-host-scan --verbose
|
||||
kubescape scan --enable-host-scan --verbose
|
||||
|
||||
# Scan kubernetes YAML manifest files
|
||||
kubescape scan *.yaml
|
||||
@@ -70,34 +71,33 @@ func GetScanCommand(ks meta.IKubescape) *cobra.Command {
|
||||
scanCmd.PersistentFlags().StringVar(&scanInfo.ControlsInputs, "controls-config", "", "Path to an controls-config obj. If not set will download controls-config from ARMO management portal")
|
||||
scanCmd.PersistentFlags().StringVar(&scanInfo.UseExceptions, "exceptions", "", "Path to an exceptions obj. If not set will download exceptions from ARMO management portal")
|
||||
scanCmd.PersistentFlags().StringVar(&scanInfo.UseArtifactsFrom, "use-artifacts-from", "", "Load artifacts from local directory. If not used will download them")
|
||||
scanCmd.PersistentFlags().StringVarP(&scanInfo.ExcludedNamespaces, "exclude-namespaces", "e", "", "Namespaces to exclude from scanning. Recommended: kube-system,kube-public")
|
||||
scanCmd.PersistentFlags().StringVarP(&scanInfo.ExcludedNamespaces, "exclude-namespaces", "e", "", "Namespaces to exclude from scanning. Notice, when running with `exclude-namespace` kubescape does not scan cluster-scoped objects.")
|
||||
|
||||
scanCmd.PersistentFlags().Float32VarP(&scanInfo.FailThreshold, "fail-threshold", "t", 100, "Failure threshold is the percent above which the command fails and returns exit code 1")
|
||||
|
||||
scanCmd.PersistentFlags().StringVar(&scanInfo.FailThresholdSeverity, "severity-threshold", "", "Severity threshold is the severity of failed controls at which the command fails and returns exit code 1")
|
||||
scanCmd.PersistentFlags().StringVarP(&scanInfo.Format, "format", "f", "pretty-printer", `Output format. Supported formats: "pretty-printer", "json", "junit", "prometheus", "pdf", "html"`)
|
||||
scanCmd.PersistentFlags().StringVarP(&scanInfo.Format, "format", "f", "pretty-printer", `Output format. Supported formats: "pretty-printer", "json", "junit", "prometheus", "pdf", "html", "sarif"`)
|
||||
scanCmd.PersistentFlags().StringVar(&scanInfo.IncludeNamespaces, "include-namespaces", "", "scan specific namespaces. e.g: --include-namespaces ns-a,ns-b")
|
||||
scanCmd.PersistentFlags().BoolVarP(&scanInfo.Local, "keep-local", "", false, "If you do not want your Kubescape results reported to ARMO backend. Use this flag if you ran with the '--submit' flag in the past and you do not want to submit your current scan results")
|
||||
scanCmd.PersistentFlags().BoolVarP(&scanInfo.Local, "keep-local", "", false, "If you do not want your Kubescape results reported to configured backend.")
|
||||
scanCmd.PersistentFlags().StringVarP(&scanInfo.Output, "output", "o", "", "Output file. Print output to file and not stdout")
|
||||
scanCmd.PersistentFlags().BoolVarP(&scanInfo.VerboseMode, "verbose", "v", false, "Display all of the input resources and not only failed resources")
|
||||
scanCmd.PersistentFlags().StringVar(&scanInfo.View, "view", string(cautils.ResourceViewType), fmt.Sprintf("View results based on the %s/%s. default is --view=%s", cautils.ResourceViewType, cautils.ControlViewType, cautils.ResourceViewType))
|
||||
scanCmd.PersistentFlags().BoolVar(&scanInfo.UseDefault, "use-default", false, "Load local policy object from default path. If not used will download latest")
|
||||
scanCmd.PersistentFlags().StringSliceVar(&scanInfo.UseFrom, "use-from", nil, "Load local policy object from specified path. If not used will download latest")
|
||||
scanCmd.PersistentFlags().BoolVarP(&scanInfo.Submit, "submit", "", false, "Send the scan results to ARMO management portal where you can see the results in a user-friendly UI, choose your preferred compliance framework, check risk results history and trends, manage exceptions, get remediation recommendations and much more. By default the results are not submitted")
|
||||
scanCmd.PersistentFlags().StringVar(&scanInfo.HostSensorYamlPath, "host-scan-yaml", "", "Override default host scanner DaemonSet. Use this flag cautiously")
|
||||
scanCmd.PersistentFlags().StringVar(&scanInfo.FormatVersion, "format-version", "v1", "Output object can be different between versions, this is for maintaining backward and forward compatibility. Supported:'v1'/'v2'")
|
||||
scanCmd.PersistentFlags().StringVar(&scanInfo.CustomClusterName, "cluster-name", "", "Set the custom name of the cluster. Not same as the kube-context flag")
|
||||
scanCmd.PersistentFlags().BoolVarP(&scanInfo.Submit, "submit", "", false, "Submit the scan results to Kubescape SaaS where you can see the results in a user-friendly UI, choose your preferred compliance framework, check risk results history and trends, manage exceptions, get remediation recommendations and much more. By default the results are not submitted")
|
||||
|
||||
// Deprecated flags - remove 1.May.2022
|
||||
scanCmd.PersistentFlags().BoolVarP(&scanInfo.Silent, "silent", "s", false, "Silent progress messages")
|
||||
scanCmd.PersistentFlags().MarkDeprecated("silent", "use '--logger' flag instead. Flag will be removed at 1.May.2022")
|
||||
|
||||
// hidden flags
|
||||
scanCmd.PersistentFlags().MarkHidden("host-scan-yaml") // this flag should be used very cautiously. We prefer users will not use it at all unless the DaemonSet can not run pods on the nodes
|
||||
scanCmd.PersistentFlags().MarkHidden("silent") // this flag should be deprecated since we added the --logger support
|
||||
// scanCmd.PersistentFlags().MarkHidden("format-version") // meant for testing different output approaches and not for common use
|
||||
|
||||
hostF := scanCmd.PersistentFlags().VarPF(&scanInfo.HostSensorEnabled, "enable-host-scan", "", "Deploy ARMO K8s host-sensor daemonset in the scanned cluster. Deleting it right after we collecting the data. Required to collect valuable data from cluster nodes for certain controls. Yaml file: https://github.com/kubescape/kubescape/blob/master/core/pkg/hostsensorutils/hostsensor.yaml")
|
||||
// Retrieve --kubeconfig flag from https://github.com/kubernetes/kubectl/blob/master/pkg/cmd/cmd.go
|
||||
scanCmd.PersistentFlags().AddGoFlag(flag.Lookup("kubeconfig"))
|
||||
|
||||
hostF := scanCmd.PersistentFlags().VarPF(&scanInfo.HostSensorEnabled, "enable-host-scan", "", "Deploy Kubescape host-sensor daemonset in the scanned cluster. Deleting it right after we collecting the data. Required to collect valuable data from cluster nodes for certain controls. Yaml file: https://github.com/kubescape/kubescape/blob/master/core/pkg/hostsensorutils/hostsensor.yaml")
|
||||
hostF.NoOptDefVal = "true"
|
||||
hostF.DefValue = "false, for no TTY in stdin"
|
||||
|
||||
|
||||
59
cmd/update/update.go
Normal file
59
cmd/update/update.go
Normal file
@@ -0,0 +1,59 @@
|
||||
package update
|
||||
|
||||
//This update command updates to the latest kubescape release.
|
||||
//Example:-
|
||||
// kubescape update
|
||||
|
||||
import (
|
||||
"os/exec"
|
||||
"runtime"
|
||||
|
||||
logger "github.com/kubescape/go-logger"
|
||||
"github.com/kubescape/kubescape/v2/core/cautils"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
func GetUpdateCmd() *cobra.Command {
|
||||
updateCmd := &cobra.Command{
|
||||
Use: "update",
|
||||
Short: "Update your version",
|
||||
Long: ``,
|
||||
RunE: func(_ *cobra.Command, args []string) error {
|
||||
//Checking the user's version of kubescape to the latest release
|
||||
if cautils.BuildNumber == cautils.LatestReleaseVersion {
|
||||
//your version == latest version
|
||||
logger.L().Info(("You are in the latest version"))
|
||||
} else {
|
||||
|
||||
const OSTYPE string = runtime.GOOS
|
||||
var ShellToUse string
|
||||
switch OSTYPE {
|
||||
|
||||
case "windows":
|
||||
cautils.StartSpinner()
|
||||
//run the installation command for windows
|
||||
ShellToUse = "powershell"
|
||||
_, err := exec.Command(ShellToUse, "-c", "iwr -useb https://raw.githubusercontent.com/kubescape/kubescape/master/install.ps1 | iex").Output()
|
||||
|
||||
if err != nil {
|
||||
logger.L().Fatal(err.Error())
|
||||
}
|
||||
cautils.StopSpinner()
|
||||
|
||||
default:
|
||||
ShellToUse = "bash"
|
||||
cautils.StartSpinner()
|
||||
//run the installation command for linux and macOS
|
||||
_, err := exec.Command(ShellToUse, "-c", "curl -s https://raw.githubusercontent.com/kubescape/kubescape/master/install.sh | /bin/bash").Output()
|
||||
if err != nil {
|
||||
logger.L().Fatal(err.Error())
|
||||
}
|
||||
|
||||
cautils.StopSpinner()
|
||||
}
|
||||
}
|
||||
return nil
|
||||
},
|
||||
}
|
||||
return updateCmd
|
||||
}
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
logger "github.com/kubescape/go-logger"
|
||||
"github.com/kubescape/go-logger/helpers"
|
||||
"github.com/kubescape/k8s-interface/k8sinterface"
|
||||
"github.com/kubescape/kubescape/v2/core/cautils/getter"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
@@ -32,6 +33,10 @@ type ConfigObj struct {
|
||||
Token string `json:"invitationParam,omitempty"`
|
||||
CustomerAdminEMail string `json:"adminMail,omitempty"`
|
||||
ClusterName string `json:"clusterName,omitempty"`
|
||||
CloudReportURL string `json:"cloudReportURL,omitempty"`
|
||||
CloudAPIURL string `json:"cloudAPIURL,omitempty"`
|
||||
CloudUIURL string `json:"cloudUIURL,omitempty"`
|
||||
CloudAuthURL string `json:"cloudAuthURL,omitempty"`
|
||||
}
|
||||
|
||||
// Config - convert ConfigObj to config file
|
||||
@@ -75,6 +80,10 @@ type ITenantConfig interface {
|
||||
GetClientID() string
|
||||
GetSecretKey() string
|
||||
GetConfigObj() *ConfigObj
|
||||
GetCloudReportURL() string
|
||||
GetCloudAPIURL() string
|
||||
GetCloudUIURL() string
|
||||
GetCloudAuthURL() string
|
||||
// GetBackendAPI() getter.IBackend
|
||||
// GenerateURL()
|
||||
|
||||
@@ -103,10 +112,11 @@ func NewLocalConfig(
|
||||
}
|
||||
|
||||
updateCredentials(lc.configObj, credentials)
|
||||
updateCloudURLs(lc.configObj)
|
||||
|
||||
// If a custom cluster name is provided then set that name, else use the cluster's original name
|
||||
if customClusterName != "" {
|
||||
lc.configObj.ClusterName = AdoptCustomClusterName(customClusterName)
|
||||
lc.configObj.ClusterName = AdoptClusterName(customClusterName)
|
||||
} else if clusterName != "" {
|
||||
lc.configObj.ClusterName = AdoptClusterName(clusterName) // override config clusterName
|
||||
}
|
||||
@@ -114,18 +124,43 @@ func NewLocalConfig(
|
||||
lc.backendAPI.SetAccountID(lc.configObj.AccountID)
|
||||
lc.backendAPI.SetClientID(lc.configObj.ClientID)
|
||||
lc.backendAPI.SetSecretKey(lc.configObj.SecretKey)
|
||||
if lc.configObj.CloudAPIURL != "" {
|
||||
lc.backendAPI.SetCloudAPIURL(lc.configObj.CloudAPIURL)
|
||||
} else {
|
||||
lc.configObj.CloudAPIURL = lc.backendAPI.GetCloudAPIURL()
|
||||
}
|
||||
if lc.configObj.CloudAuthURL != "" {
|
||||
lc.backendAPI.SetCloudAuthURL(lc.configObj.CloudAuthURL)
|
||||
} else {
|
||||
lc.configObj.CloudAuthURL = lc.backendAPI.GetCloudAuthURL()
|
||||
}
|
||||
if lc.configObj.CloudReportURL != "" {
|
||||
lc.backendAPI.SetCloudReportURL(lc.configObj.CloudReportURL)
|
||||
} else {
|
||||
lc.configObj.CloudReportURL = lc.backendAPI.GetCloudReportURL()
|
||||
}
|
||||
if lc.configObj.CloudUIURL != "" {
|
||||
lc.backendAPI.SetCloudUIURL(lc.configObj.CloudUIURL)
|
||||
} else {
|
||||
lc.configObj.CloudUIURL = lc.backendAPI.GetCloudUIURL()
|
||||
}
|
||||
logger.L().Debug("Kubescape Cloud URLs", helpers.String("api", lc.backendAPI.GetCloudAPIURL()), helpers.String("auth", lc.backendAPI.GetCloudAuthURL()), helpers.String("report", lc.backendAPI.GetCloudReportURL()), helpers.String("UI", lc.backendAPI.GetCloudUIURL()))
|
||||
|
||||
return lc
|
||||
}
|
||||
|
||||
func (lc *LocalConfig) GetConfigObj() *ConfigObj { return lc.configObj }
|
||||
func (lc *LocalConfig) GetTenantEmail() string { return lc.configObj.CustomerAdminEMail }
|
||||
func (lc *LocalConfig) GetAccountID() string { return lc.configObj.AccountID }
|
||||
func (lc *LocalConfig) GetClientID() string { return lc.configObj.ClientID }
|
||||
func (lc *LocalConfig) GetSecretKey() string { return lc.configObj.SecretKey }
|
||||
func (lc *LocalConfig) GetContextName() string { return lc.configObj.ClusterName }
|
||||
func (lc *LocalConfig) GetToken() string { return lc.configObj.Token }
|
||||
func (lc *LocalConfig) IsConfigFound() bool { return existsConfigFile() }
|
||||
func (lc *LocalConfig) GetConfigObj() *ConfigObj { return lc.configObj }
|
||||
func (lc *LocalConfig) GetTenantEmail() string { return lc.configObj.CustomerAdminEMail }
|
||||
func (lc *LocalConfig) GetAccountID() string { return lc.configObj.AccountID }
|
||||
func (lc *LocalConfig) GetClientID() string { return lc.configObj.ClientID }
|
||||
func (lc *LocalConfig) GetSecretKey() string { return lc.configObj.SecretKey }
|
||||
func (lc *LocalConfig) GetContextName() string { return lc.configObj.ClusterName }
|
||||
func (lc *LocalConfig) GetToken() string { return lc.configObj.Token }
|
||||
func (lc *LocalConfig) GetCloudReportURL() string { return lc.configObj.CloudReportURL }
|
||||
func (lc *LocalConfig) GetCloudAPIURL() string { return lc.configObj.CloudAPIURL }
|
||||
func (lc *LocalConfig) GetCloudUIURL() string { return lc.configObj.CloudUIURL }
|
||||
func (lc *LocalConfig) GetCloudAuthURL() string { return lc.configObj.CloudAuthURL }
|
||||
func (lc *LocalConfig) IsConfigFound() bool { return existsConfigFile() }
|
||||
func (lc *LocalConfig) SetTenant() error {
|
||||
|
||||
// Kubescape Cloud tenant GUID
|
||||
@@ -213,10 +248,11 @@ func NewClusterConfig(k8s *k8sinterface.KubernetesApi, backendAPI getter.IBacken
|
||||
loadConfigFromFile(c.configObj)
|
||||
}
|
||||
updateCredentials(c.configObj, credentials)
|
||||
updateCloudURLs(c.configObj)
|
||||
|
||||
// If a custom cluster name is provided then set that name, else use the cluster's original name
|
||||
if customClusterName != "" {
|
||||
c.configObj.ClusterName = AdoptCustomClusterName(customClusterName)
|
||||
c.configObj.ClusterName = AdoptClusterName(customClusterName)
|
||||
} else if clusterName != "" {
|
||||
c.configObj.ClusterName = AdoptClusterName(clusterName) // override config clusterName
|
||||
}
|
||||
@@ -230,18 +266,44 @@ func NewClusterConfig(k8s *k8sinterface.KubernetesApi, backendAPI getter.IBacken
|
||||
c.backendAPI.SetAccountID(c.configObj.AccountID)
|
||||
c.backendAPI.SetClientID(c.configObj.ClientID)
|
||||
c.backendAPI.SetSecretKey(c.configObj.SecretKey)
|
||||
if c.configObj.CloudAPIURL != "" {
|
||||
c.backendAPI.SetCloudAPIURL(c.configObj.CloudAPIURL)
|
||||
} else {
|
||||
c.configObj.CloudAPIURL = c.backendAPI.GetCloudAPIURL()
|
||||
}
|
||||
if c.configObj.CloudAuthURL != "" {
|
||||
c.backendAPI.SetCloudAuthURL(c.configObj.CloudAuthURL)
|
||||
} else {
|
||||
c.configObj.CloudAuthURL = c.backendAPI.GetCloudAuthURL()
|
||||
}
|
||||
if c.configObj.CloudReportURL != "" {
|
||||
c.backendAPI.SetCloudReportURL(c.configObj.CloudReportURL)
|
||||
} else {
|
||||
c.configObj.CloudReportURL = c.backendAPI.GetCloudReportURL()
|
||||
}
|
||||
if c.configObj.CloudUIURL != "" {
|
||||
c.backendAPI.SetCloudUIURL(c.configObj.CloudUIURL)
|
||||
} else {
|
||||
c.configObj.CloudUIURL = c.backendAPI.GetCloudUIURL()
|
||||
}
|
||||
logger.L().Debug("Kubescape Cloud URLs", helpers.String("api", c.backendAPI.GetCloudAPIURL()), helpers.String("auth", c.backendAPI.GetCloudAuthURL()), helpers.String("report", c.backendAPI.GetCloudReportURL()), helpers.String("UI", c.backendAPI.GetCloudUIURL()))
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
func (c *ClusterConfig) GetConfigObj() *ConfigObj { return c.configObj }
|
||||
func (c *ClusterConfig) GetDefaultNS() string { return c.configMapNamespace }
|
||||
func (c *ClusterConfig) GetAccountID() string { return c.configObj.AccountID }
|
||||
func (c *ClusterConfig) GetClientID() string { return c.configObj.ClientID }
|
||||
func (c *ClusterConfig) GetSecretKey() string { return c.configObj.SecretKey }
|
||||
func (c *ClusterConfig) GetTenantEmail() string { return c.configObj.CustomerAdminEMail }
|
||||
func (c *ClusterConfig) GetToken() string { return c.configObj.Token }
|
||||
func (c *ClusterConfig) IsConfigFound() bool { return existsConfigFile() || c.existsConfigMap() }
|
||||
func (c *ClusterConfig) GetConfigObj() *ConfigObj { return c.configObj }
|
||||
func (c *ClusterConfig) GetDefaultNS() string { return c.configMapNamespace }
|
||||
func (c *ClusterConfig) GetAccountID() string { return c.configObj.AccountID }
|
||||
func (c *ClusterConfig) GetClientID() string { return c.configObj.ClientID }
|
||||
func (c *ClusterConfig) GetSecretKey() string { return c.configObj.SecretKey }
|
||||
func (c *ClusterConfig) GetTenantEmail() string { return c.configObj.CustomerAdminEMail }
|
||||
func (c *ClusterConfig) GetToken() string { return c.configObj.Token }
|
||||
func (c *ClusterConfig) GetCloudReportURL() string { return c.configObj.CloudReportURL }
|
||||
func (c *ClusterConfig) GetCloudAPIURL() string { return c.configObj.CloudAPIURL }
|
||||
func (c *ClusterConfig) GetCloudUIURL() string { return c.configObj.CloudUIURL }
|
||||
func (c *ClusterConfig) GetCloudAuthURL() string { return c.configObj.CloudAuthURL }
|
||||
|
||||
func (c *ClusterConfig) IsConfigFound() bool { return existsConfigFile() || c.existsConfigMap() }
|
||||
|
||||
func (c *ClusterConfig) SetTenant() error {
|
||||
|
||||
@@ -474,19 +536,6 @@ func DeleteConfigFile() error {
|
||||
return os.Remove(ConfigFileFullPath())
|
||||
}
|
||||
|
||||
// To check if the custom cluster name is valid:
|
||||
func AdoptCustomClusterName(customClusterName string) string {
|
||||
is_alphanumeric := regexp.MustCompile(`^[a-zA-Z0-9]*$`).MatchString(customClusterName)
|
||||
|
||||
// Check it does not contain special-characters
|
||||
if is_alphanumeric == false {
|
||||
logger.L().Fatal("custom cluster name cannot contain special characters")
|
||||
} else if len(customClusterName) >= 256 { // Check it contains less than 256 characters
|
||||
logger.L().Fatal("custom cluster name cannot contain more than 255 characters")
|
||||
}
|
||||
return customClusterName
|
||||
}
|
||||
|
||||
func AdoptClusterName(clusterName string) string {
|
||||
re, err := regexp.Compile(`[^\w]+`)
|
||||
if err != nil {
|
||||
@@ -540,3 +589,39 @@ func updateCredentials(configObj *ConfigObj, credentials *Credentials) {
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func getCloudURLsFromEnv(cloudURLs *CloudURLs) {
|
||||
// load from env
|
||||
if cloudAPIURL := os.Getenv("KS_CLOUD_API_URL"); cloudAPIURL != "" {
|
||||
cloudURLs.CloudAPIURL = cloudAPIURL
|
||||
}
|
||||
if cloudAuthURL := os.Getenv("KS_CLOUD_AUTH_URL"); cloudAuthURL != "" {
|
||||
cloudURLs.CloudAuthURL = cloudAuthURL
|
||||
}
|
||||
if cloudReportURL := os.Getenv("KS_CLOUD_REPORT_URL"); cloudReportURL != "" {
|
||||
cloudURLs.CloudReportURL = cloudReportURL
|
||||
}
|
||||
if cloudUIURL := os.Getenv("KS_CLOUD_UI_URL"); cloudUIURL != "" {
|
||||
cloudURLs.CloudUIURL = cloudUIURL
|
||||
}
|
||||
}
|
||||
|
||||
func updateCloudURLs(configObj *ConfigObj) {
|
||||
cloudURLs := &CloudURLs{}
|
||||
|
||||
getCloudURLsFromEnv(cloudURLs)
|
||||
|
||||
if cloudURLs.CloudAPIURL != "" {
|
||||
configObj.CloudAPIURL = cloudURLs.CloudAPIURL // override config CloudAPIURL
|
||||
}
|
||||
if cloudURLs.CloudAuthURL != "" {
|
||||
configObj.CloudAuthURL = cloudURLs.CloudAuthURL // override config CloudAuthURL
|
||||
}
|
||||
if cloudURLs.CloudReportURL != "" {
|
||||
configObj.CloudReportURL = cloudURLs.CloudReportURL // override config CloudReportURL
|
||||
}
|
||||
if cloudURLs.CloudUIURL != "" {
|
||||
configObj.CloudUIURL = cloudURLs.CloudUIURL // override config CloudUIURL
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -2,6 +2,7 @@ package cautils
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
@@ -16,6 +17,10 @@ func mockConfigObj() *ConfigObj {
|
||||
ClusterName: "ddd",
|
||||
CustomerAdminEMail: "ab@cd",
|
||||
Token: "eee",
|
||||
CloudReportURL: "report.armo.cloud",
|
||||
CloudAPIURL: "api.armosec.io",
|
||||
CloudUIURL: "cloud.armosec.io",
|
||||
CloudAuthURL: "auth.armosec.io",
|
||||
}
|
||||
}
|
||||
func mockLocalConfig() *LocalConfig {
|
||||
@@ -39,6 +44,10 @@ func TestConfig(t *testing.T) {
|
||||
assert.Equal(t, co.AccountID, cop.AccountID)
|
||||
assert.Equal(t, co.ClientID, cop.ClientID)
|
||||
assert.Equal(t, co.SecretKey, cop.SecretKey)
|
||||
assert.Equal(t, co.CloudReportURL, cop.CloudReportURL)
|
||||
assert.Equal(t, co.CloudAPIURL, cop.CloudAPIURL)
|
||||
assert.Equal(t, co.CloudUIURL, cop.CloudUIURL)
|
||||
assert.Equal(t, co.CloudAuthURL, cop.CloudAuthURL)
|
||||
assert.Equal(t, "", cop.ClusterName) // Not copied to bytes
|
||||
assert.Equal(t, "", cop.CustomerAdminEMail) // Not copied to bytes
|
||||
assert.Equal(t, "", cop.Token) // Not copied to bytes
|
||||
@@ -60,6 +69,10 @@ func TestITenantConfig(t *testing.T) {
|
||||
assert.Equal(t, co.ClusterName, lc.GetContextName())
|
||||
assert.Equal(t, co.CustomerAdminEMail, lc.GetTenantEmail())
|
||||
assert.Equal(t, co.Token, lc.GetToken())
|
||||
assert.Equal(t, co.CloudReportURL, lc.GetCloudReportURL())
|
||||
assert.Equal(t, co.CloudAPIURL, lc.GetCloudAPIURL())
|
||||
assert.Equal(t, co.CloudUIURL, lc.GetCloudUIURL())
|
||||
assert.Equal(t, co.CloudAuthURL, lc.GetCloudAuthURL())
|
||||
|
||||
// test ClusterConfig methods
|
||||
assert.Equal(t, co.AccountID, c.GetAccountID())
|
||||
@@ -68,6 +81,10 @@ func TestITenantConfig(t *testing.T) {
|
||||
assert.Equal(t, co.ClusterName, c.GetContextName())
|
||||
assert.Equal(t, co.CustomerAdminEMail, c.GetTenantEmail())
|
||||
assert.Equal(t, co.Token, c.GetToken())
|
||||
assert.Equal(t, co.CloudReportURL, c.GetCloudReportURL())
|
||||
assert.Equal(t, co.CloudAPIURL, c.GetCloudAPIURL())
|
||||
assert.Equal(t, co.CloudUIURL, c.GetCloudUIURL())
|
||||
assert.Equal(t, co.CloudAuthURL, c.GetCloudAuthURL())
|
||||
}
|
||||
|
||||
func TestUpdateConfigData(t *testing.T) {
|
||||
@@ -80,6 +97,10 @@ func TestUpdateConfigData(t *testing.T) {
|
||||
assert.Equal(t, c.GetAccountID(), configMap.Data["accountID"])
|
||||
assert.Equal(t, c.GetClientID(), configMap.Data["clientID"])
|
||||
assert.Equal(t, c.GetSecretKey(), configMap.Data["secretKey"])
|
||||
assert.Equal(t, c.GetCloudReportURL(), configMap.Data["cloudReportURL"])
|
||||
assert.Equal(t, c.GetCloudAPIURL(), configMap.Data["cloudAPIURL"])
|
||||
assert.Equal(t, c.GetCloudUIURL(), configMap.Data["cloudUIURL"])
|
||||
assert.Equal(t, c.GetCloudAuthURL(), configMap.Data["cloudAuthURL"])
|
||||
}
|
||||
|
||||
func TestReadConfig(t *testing.T) {
|
||||
@@ -97,6 +118,10 @@ func TestReadConfig(t *testing.T) {
|
||||
assert.Equal(t, com.ClusterName, co.ClusterName)
|
||||
assert.Equal(t, com.CustomerAdminEMail, co.CustomerAdminEMail)
|
||||
assert.Equal(t, com.Token, co.Token)
|
||||
assert.Equal(t, com.CloudReportURL, co.CloudReportURL)
|
||||
assert.Equal(t, com.CloudAPIURL, co.CloudAPIURL)
|
||||
assert.Equal(t, com.CloudUIURL, co.CloudUIURL)
|
||||
assert.Equal(t, com.CloudAuthURL, co.CloudAuthURL)
|
||||
}
|
||||
|
||||
func TestLoadConfigFromData(t *testing.T) {
|
||||
@@ -120,6 +145,10 @@ func TestLoadConfigFromData(t *testing.T) {
|
||||
assert.Equal(t, c.GetContextName(), co.ClusterName)
|
||||
assert.Equal(t, c.GetTenantEmail(), co.CustomerAdminEMail)
|
||||
assert.Equal(t, c.GetToken(), co.Token)
|
||||
assert.Equal(t, c.GetCloudReportURL(), co.CloudReportURL)
|
||||
assert.Equal(t, c.GetCloudAPIURL(), co.CloudAPIURL)
|
||||
assert.Equal(t, c.GetCloudUIURL(), co.CloudUIURL)
|
||||
assert.Equal(t, c.GetCloudAuthURL(), co.CloudAuthURL)
|
||||
}
|
||||
|
||||
// use case: all data is in config.json
|
||||
@@ -139,6 +168,10 @@ func TestLoadConfigFromData(t *testing.T) {
|
||||
assert.Equal(t, c.GetAccountID(), co.AccountID)
|
||||
assert.Equal(t, c.GetClientID(), co.ClientID)
|
||||
assert.Equal(t, c.GetSecretKey(), co.SecretKey)
|
||||
assert.Equal(t, c.GetCloudReportURL(), co.CloudReportURL)
|
||||
assert.Equal(t, c.GetCloudAPIURL(), co.CloudAPIURL)
|
||||
assert.Equal(t, c.GetCloudUIURL(), co.CloudUIURL)
|
||||
assert.Equal(t, c.GetCloudAuthURL(), co.CloudAuthURL)
|
||||
}
|
||||
|
||||
// use case: some data is in config.json
|
||||
@@ -151,10 +184,12 @@ func TestLoadConfigFromData(t *testing.T) {
|
||||
// add to map
|
||||
configMap.Data["clientID"] = c.configObj.ClientID
|
||||
configMap.Data["secretKey"] = c.configObj.SecretKey
|
||||
configMap.Data["cloudReportURL"] = c.configObj.CloudReportURL
|
||||
|
||||
// delete the content
|
||||
c.configObj.ClientID = ""
|
||||
c.configObj.SecretKey = ""
|
||||
c.configObj.CloudReportURL = ""
|
||||
|
||||
configMap.Data["config.json"] = string(c.GetConfigObj().Config())
|
||||
loadConfigFromData(c.configObj, configMap.Data)
|
||||
@@ -162,6 +197,7 @@ func TestLoadConfigFromData(t *testing.T) {
|
||||
assert.NotEmpty(t, c.GetAccountID())
|
||||
assert.NotEmpty(t, c.GetClientID())
|
||||
assert.NotEmpty(t, c.GetSecretKey())
|
||||
assert.NotEmpty(t, c.GetCloudReportURL())
|
||||
}
|
||||
|
||||
// use case: some data is in config.json
|
||||
@@ -222,3 +258,13 @@ func TestAdoptClusterName(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestUpdateCloudURLs(t *testing.T) {
|
||||
co := mockConfigObj()
|
||||
mockCloudAPIURL := "1-2-3-4.com"
|
||||
os.Setenv("KS_CLOUD_API_URL", mockCloudAPIURL)
|
||||
|
||||
assert.NotEqual(t, co.CloudAPIURL, mockCloudAPIURL)
|
||||
updateCloudURLs(co)
|
||||
assert.Equal(t, co.CloudAPIURL, mockCloudAPIURL)
|
||||
}
|
||||
|
||||
@@ -15,7 +15,7 @@ import (
|
||||
"github.com/kubescape/opa-utils/objectsenvelopes"
|
||||
"github.com/kubescape/opa-utils/objectsenvelopes/localworkload"
|
||||
|
||||
"gopkg.in/yaml.v2"
|
||||
"gopkg.in/yaml.v3"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -139,9 +139,9 @@ func loadFiles(rootPath string, filePaths []string) (map[string][]workloadinterf
|
||||
for j := range w {
|
||||
lw := localworkload.NewLocalWorkload(w[j].GetObject())
|
||||
if relPath, err := filepath.Rel(rootPath, path); err == nil {
|
||||
lw.SetPath(relPath)
|
||||
lw.SetPath(fmt.Sprintf("%s:%d", relPath, j))
|
||||
} else {
|
||||
lw.SetPath(path)
|
||||
lw.SetPath(fmt.Sprintf("%s:%d", path, j))
|
||||
}
|
||||
wSlice = append(wSlice, lw)
|
||||
}
|
||||
|
||||
@@ -3,6 +3,7 @@ package getter
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/armosec/armoapi-go/armotypes"
|
||||
"github.com/kubescape/opa-utils/gitregostore"
|
||||
"github.com/kubescape/opa-utils/reporthandling"
|
||||
"github.com/kubescape/opa-utils/reporthandling/attacktrack/v1alpha1"
|
||||
@@ -99,3 +100,11 @@ func contains(s []string, str string) bool {
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (drp *DownloadReleasedPolicy) GetExceptions(clusterName string) ([]armotypes.PostureExceptionPolicy, error) {
|
||||
exceptions, err := drp.gs.GetSystemPostureExceptionPolicies()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return exceptions, nil
|
||||
}
|
||||
|
||||
42
core/cautils/getter/gcpcloudapi.go
Normal file
42
core/cautils/getter/gcpcloudapi.go
Normal file
@@ -0,0 +1,42 @@
|
||||
package getter
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
|
||||
containeranalysis "cloud.google.com/go/containeranalysis/apiv1"
|
||||
)
|
||||
|
||||
type GCPCloudAPI struct {
|
||||
credentialsPath string
|
||||
context context.Context
|
||||
client *containeranalysis.Client
|
||||
projectID string
|
||||
credentialsCheck bool
|
||||
}
|
||||
|
||||
func GetGlobalGCPCloudAPIConnector() *GCPCloudAPI {
|
||||
|
||||
if os.Getenv("KS_GCP_CREDENTIALS_PATH") == "" || os.Getenv("KS_GCP_PROJECT_ID") == "" {
|
||||
return &GCPCloudAPI{
|
||||
credentialsCheck: false,
|
||||
}
|
||||
} else {
|
||||
return &GCPCloudAPI{
|
||||
context: context.Background(),
|
||||
credentialsPath: os.Getenv("KS_GCP_CREDENTIALS_PATH"),
|
||||
projectID: os.Getenv("KS_GCP_PROJECT_ID"),
|
||||
credentialsCheck: true,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (api *GCPCloudAPI) SetClient(client *containeranalysis.Client) {
|
||||
api.client = client
|
||||
}
|
||||
|
||||
func (api *GCPCloudAPI) GetCredentialsPath() string { return api.credentialsPath }
|
||||
func (api *GCPCloudAPI) GetClient() *containeranalysis.Client { return api.client }
|
||||
func (api *GCPCloudAPI) GetProjectID() string { return api.projectID }
|
||||
func (api *GCPCloudAPI) GetCredentialsCheck() bool { return api.credentialsCheck }
|
||||
func (api *GCPCloudAPI) GetContext() context.Context { return api.context }
|
||||
@@ -28,10 +28,18 @@ type IBackend interface {
|
||||
GetAccountID() string
|
||||
GetClientID() string
|
||||
GetSecretKey() string
|
||||
GetCloudReportURL() string
|
||||
GetCloudAPIURL() string
|
||||
GetCloudUIURL() string
|
||||
GetCloudAuthURL() string
|
||||
|
||||
SetAccountID(accountID string)
|
||||
SetClientID(clientID string)
|
||||
SetSecretKey(secretKey string)
|
||||
SetCloudReportURL(cloudReportURL string)
|
||||
SetCloudAPIURL(cloudAPIURL string)
|
||||
SetCloudUIURL(cloudUIURL string)
|
||||
SetCloudAuthURL(cloudAuthURL string)
|
||||
|
||||
GetTenant() (*TenantResponse, error)
|
||||
}
|
||||
|
||||
@@ -10,8 +10,6 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/armosec/armoapi-go/armotypes"
|
||||
logger "github.com/kubescape/go-logger"
|
||||
"github.com/kubescape/go-logger/helpers"
|
||||
"github.com/kubescape/opa-utils/reporthandling"
|
||||
"github.com/kubescape/opa-utils/reporthandling/attacktrack/v1alpha1"
|
||||
)
|
||||
@@ -35,23 +33,22 @@ var (
|
||||
|
||||
// KSCloudAPI allows accessing the API of the Kubescape Cloud offering
|
||||
type KSCloudAPI struct {
|
||||
httpClient *http.Client
|
||||
apiURL string
|
||||
authURL string
|
||||
erURL string
|
||||
feURL string
|
||||
accountID string
|
||||
clientID string
|
||||
secretKey string
|
||||
authCookie string
|
||||
feToken FeLoginResponse
|
||||
loggedIn bool
|
||||
httpClient *http.Client
|
||||
cloudAPIURL string
|
||||
cloudAuthURL string
|
||||
cloudReportURL string
|
||||
cloudUIURL string
|
||||
accountID string
|
||||
clientID string
|
||||
secretKey string
|
||||
authCookie string
|
||||
feToken FeLoginResponse
|
||||
loggedIn bool
|
||||
}
|
||||
|
||||
var globalKSCloudAPIConnector *KSCloudAPI
|
||||
|
||||
func SetKSCloudAPIConnector(ksCloudAPI *KSCloudAPI) {
|
||||
logger.L().Debug("Kubescape Cloud URLs", helpers.String("api", ksCloudAPI.apiURL), helpers.String("auth", ksCloudAPI.authURL), helpers.String("report", ksCloudAPI.erURL), helpers.String("UI", ksCloudAPI.feURL))
|
||||
globalKSCloudAPIConnector = ksCloudAPI
|
||||
}
|
||||
|
||||
@@ -65,10 +62,10 @@ func GetKSCloudAPIConnector() *KSCloudAPI {
|
||||
func NewKSCloudAPIDev() *KSCloudAPI {
|
||||
apiObj := newKSCloudAPI()
|
||||
|
||||
apiObj.apiURL = ksCloudDevBEURL
|
||||
apiObj.authURL = ksCloudDevAUTHURL
|
||||
apiObj.erURL = ksCloudDevERURL
|
||||
apiObj.feURL = ksCloudDevFEURL
|
||||
apiObj.cloudAPIURL = ksCloudDevBEURL
|
||||
apiObj.cloudAuthURL = ksCloudDevAUTHURL
|
||||
apiObj.cloudReportURL = ksCloudDevERURL
|
||||
apiObj.cloudUIURL = ksCloudDevFEURL
|
||||
|
||||
return apiObj
|
||||
}
|
||||
@@ -76,10 +73,10 @@ func NewKSCloudAPIDev() *KSCloudAPI {
|
||||
func NewKSCloudAPIProd() *KSCloudAPI {
|
||||
apiObj := newKSCloudAPI()
|
||||
|
||||
apiObj.apiURL = ksCloudBEURL
|
||||
apiObj.erURL = ksCloudERURL
|
||||
apiObj.feURL = ksCloudFEURL
|
||||
apiObj.authURL = ksCloudAUTHURL
|
||||
apiObj.cloudAPIURL = ksCloudBEURL
|
||||
apiObj.cloudReportURL = ksCloudERURL
|
||||
apiObj.cloudUIURL = ksCloudFEURL
|
||||
apiObj.cloudAuthURL = ksCloudAUTHURL
|
||||
|
||||
return apiObj
|
||||
}
|
||||
@@ -87,10 +84,10 @@ func NewKSCloudAPIProd() *KSCloudAPI {
|
||||
func NewKSCloudAPIStaging() *KSCloudAPI {
|
||||
apiObj := newKSCloudAPI()
|
||||
|
||||
apiObj.apiURL = ksCloudStageBEURL
|
||||
apiObj.erURL = ksCloudStageERURL
|
||||
apiObj.feURL = ksCloudStageFEURL
|
||||
apiObj.authURL = ksCloudStageAUTHURL
|
||||
apiObj.cloudAPIURL = ksCloudStageBEURL
|
||||
apiObj.cloudReportURL = ksCloudStageERURL
|
||||
apiObj.cloudUIURL = ksCloudStageFEURL
|
||||
apiObj.cloudAuthURL = ksCloudStageAUTHURL
|
||||
|
||||
return apiObj
|
||||
}
|
||||
@@ -98,10 +95,10 @@ func NewKSCloudAPIStaging() *KSCloudAPI {
|
||||
func NewKSCloudAPICustomized(ksCloudERURL, ksCloudBEURL, ksCloudFEURL, ksCloudAUTHURL string) *KSCloudAPI {
|
||||
apiObj := newKSCloudAPI()
|
||||
|
||||
apiObj.erURL = ksCloudERURL
|
||||
apiObj.apiURL = ksCloudBEURL
|
||||
apiObj.feURL = ksCloudFEURL
|
||||
apiObj.authURL = ksCloudAUTHURL
|
||||
apiObj.cloudReportURL = ksCloudERURL
|
||||
apiObj.cloudAPIURL = ksCloudBEURL
|
||||
apiObj.cloudUIURL = ksCloudFEURL
|
||||
apiObj.cloudAuthURL = ksCloudAUTHURL
|
||||
|
||||
return apiObj
|
||||
}
|
||||
@@ -136,17 +133,22 @@ func (api *KSCloudAPI) Get(fullURL string, headers map[string]string) (string, e
|
||||
return HttpGetter(api.httpClient, fullURL, headers)
|
||||
}
|
||||
|
||||
func (api *KSCloudAPI) GetAccountID() string { return api.accountID }
|
||||
func (api *KSCloudAPI) IsLoggedIn() bool { return api.loggedIn }
|
||||
func (api *KSCloudAPI) GetClientID() string { return api.clientID }
|
||||
func (api *KSCloudAPI) GetSecretKey() string { return api.secretKey }
|
||||
func (api *KSCloudAPI) GetFrontendURL() string { return api.feURL }
|
||||
func (api *KSCloudAPI) GetApiURL() string { return api.apiURL }
|
||||
func (api *KSCloudAPI) GetAuthURL() string { return api.authURL }
|
||||
func (api *KSCloudAPI) GetReportReceiverURL() string { return api.erURL }
|
||||
func (api *KSCloudAPI) SetAccountID(accountID string) { api.accountID = accountID }
|
||||
func (api *KSCloudAPI) SetClientID(clientID string) { api.clientID = clientID }
|
||||
func (api *KSCloudAPI) SetSecretKey(secretKey string) { api.secretKey = secretKey }
|
||||
func (api *KSCloudAPI) GetAccountID() string { return api.accountID }
|
||||
func (api *KSCloudAPI) IsLoggedIn() bool { return api.loggedIn }
|
||||
func (api *KSCloudAPI) GetClientID() string { return api.clientID }
|
||||
func (api *KSCloudAPI) GetSecretKey() string { return api.secretKey }
|
||||
func (api *KSCloudAPI) GetCloudReportURL() string { return api.cloudReportURL }
|
||||
func (api *KSCloudAPI) GetCloudAPIURL() string { return api.cloudAPIURL }
|
||||
func (api *KSCloudAPI) GetCloudUIURL() string { return api.cloudUIURL }
|
||||
func (api *KSCloudAPI) GetCloudAuthURL() string { return api.cloudAuthURL }
|
||||
|
||||
func (api *KSCloudAPI) SetAccountID(accountID string) { api.accountID = accountID }
|
||||
func (api *KSCloudAPI) SetClientID(clientID string) { api.clientID = clientID }
|
||||
func (api *KSCloudAPI) SetSecretKey(secretKey string) { api.secretKey = secretKey }
|
||||
func (api *KSCloudAPI) SetCloudReportURL(cloudReportURL string) { api.cloudReportURL = cloudReportURL }
|
||||
func (api *KSCloudAPI) SetCloudAPIURL(cloudAPIURL string) { api.cloudAPIURL = cloudAPIURL }
|
||||
func (api *KSCloudAPI) SetCloudUIURL(cloudUIURL string) { api.cloudUIURL = cloudUIURL }
|
||||
func (api *KSCloudAPI) SetCloudAuthURL(cloudAuthURL string) { api.cloudAuthURL = cloudAuthURL }
|
||||
|
||||
func (api *KSCloudAPI) GetAttackTracks() ([]v1alpha1.AttackTrack, error) {
|
||||
respStr, err := api.Get(api.getAttackTracksURL(), nil)
|
||||
|
||||
@@ -13,7 +13,7 @@ var NativeFrameworks = []string{"nsa", "mitre", "armobest", "devopsbest"}
|
||||
|
||||
func (api *KSCloudAPI) getFrameworkURL(frameworkName string) string {
|
||||
u := url.URL{}
|
||||
u.Scheme, u.Host = parseHost(api.GetApiURL())
|
||||
u.Scheme, u.Host = parseHost(api.GetCloudAPIURL())
|
||||
u.Path = "api/v1/armoFrameworks"
|
||||
q := u.Query()
|
||||
q.Add("customerGUID", api.getCustomerGUIDFallBack())
|
||||
@@ -30,7 +30,7 @@ func (api *KSCloudAPI) getFrameworkURL(frameworkName string) string {
|
||||
|
||||
func (api *KSCloudAPI) getAttackTracksURL() string {
|
||||
u := url.URL{}
|
||||
u.Scheme, u.Host = parseHost(api.GetApiURL())
|
||||
u.Scheme, u.Host = parseHost(api.GetCloudAPIURL())
|
||||
u.Path = "api/v1/attackTracks"
|
||||
q := u.Query()
|
||||
q.Add("customerGUID", api.getCustomerGUIDFallBack())
|
||||
@@ -41,7 +41,7 @@ func (api *KSCloudAPI) getAttackTracksURL() string {
|
||||
|
||||
func (api *KSCloudAPI) getListFrameworkURL() string {
|
||||
u := url.URL{}
|
||||
u.Scheme, u.Host = parseHost(api.GetApiURL())
|
||||
u.Scheme, u.Host = parseHost(api.GetCloudAPIURL())
|
||||
u.Path = "api/v1/armoFrameworks"
|
||||
q := u.Query()
|
||||
q.Add("customerGUID", api.getCustomerGUIDFallBack())
|
||||
@@ -51,7 +51,7 @@ func (api *KSCloudAPI) getListFrameworkURL() string {
|
||||
}
|
||||
func (api *KSCloudAPI) getExceptionsURL(clusterName string) string {
|
||||
u := url.URL{}
|
||||
u.Scheme, u.Host = parseHost(api.GetApiURL())
|
||||
u.Scheme, u.Host = parseHost(api.GetCloudAPIURL())
|
||||
u.Path = "api/v1/armoPostureExceptions"
|
||||
|
||||
q := u.Query()
|
||||
@@ -66,7 +66,7 @@ func (api *KSCloudAPI) getExceptionsURL(clusterName string) string {
|
||||
|
||||
func (api *KSCloudAPI) exceptionsURL(exceptionsPolicyName string) string {
|
||||
u := url.URL{}
|
||||
u.Scheme, u.Host = parseHost(api.GetApiURL())
|
||||
u.Scheme, u.Host = parseHost(api.GetCloudAPIURL())
|
||||
u.Path = "api/v1/postureExceptionPolicy"
|
||||
|
||||
q := u.Query()
|
||||
@@ -88,7 +88,7 @@ func (api *KSCloudAPI) getAccountConfigDefault(clusterName string) string {
|
||||
|
||||
func (api *KSCloudAPI) getAccountConfig(clusterName string) string {
|
||||
u := url.URL{}
|
||||
u.Scheme, u.Host = parseHost(api.GetApiURL())
|
||||
u.Scheme, u.Host = parseHost(api.GetCloudAPIURL())
|
||||
u.Path = "api/v1/armoCustomerConfiguration"
|
||||
|
||||
q := u.Query()
|
||||
@@ -103,21 +103,21 @@ func (api *KSCloudAPI) getAccountConfig(clusterName string) string {
|
||||
|
||||
func (api *KSCloudAPI) getAccountURL() string {
|
||||
u := url.URL{}
|
||||
u.Scheme, u.Host = parseHost(api.GetApiURL())
|
||||
u.Scheme, u.Host = parseHost(api.GetCloudAPIURL())
|
||||
u.Path = "api/v1/createTenant"
|
||||
return u.String()
|
||||
}
|
||||
|
||||
func (api *KSCloudAPI) getApiToken() string {
|
||||
u := url.URL{}
|
||||
u.Scheme, u.Host = parseHost(api.GetAuthURL())
|
||||
u.Scheme, u.Host = parseHost(api.GetCloudAuthURL())
|
||||
u.Path = "identity/resources/auth/v1/api-token"
|
||||
return u.String()
|
||||
}
|
||||
|
||||
func (api *KSCloudAPI) getOpenidCustomers() string {
|
||||
u := url.URL{}
|
||||
u.Scheme, u.Host = parseHost(api.GetApiURL())
|
||||
u.Scheme, u.Host = parseHost(api.GetCloudAPIURL())
|
||||
u.Path = "api/v1/openid_customers"
|
||||
return u.String()
|
||||
}
|
||||
|
||||
@@ -65,16 +65,16 @@ func (lp *LoadPolicy) GetControl(controlName string) (*reporthandling.Control, e
|
||||
}
|
||||
|
||||
func (lp *LoadPolicy) GetFramework(frameworkName string) (*reporthandling.Framework, error) {
|
||||
framework := &reporthandling.Framework{}
|
||||
var framework reporthandling.Framework
|
||||
var err error
|
||||
for _, filePath := range lp.filePaths {
|
||||
framework = reporthandling.Framework{}
|
||||
f, err := os.ReadFile(filePath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err = json.Unmarshal(f, framework); err != nil {
|
||||
return framework, err
|
||||
if err = json.Unmarshal(f, &framework); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if strings.EqualFold(frameworkName, framework.Name) {
|
||||
break
|
||||
@@ -84,7 +84,7 @@ func (lp *LoadPolicy) GetFramework(frameworkName string) (*reporthandling.Framew
|
||||
|
||||
return nil, fmt.Errorf("framework from file not matching")
|
||||
}
|
||||
return framework, err
|
||||
return &framework, err
|
||||
}
|
||||
|
||||
func (lp *LoadPolicy) GetFrameworks() ([]reporthandling.Framework, error) {
|
||||
@@ -130,14 +130,19 @@ func (lp *LoadPolicy) GetControlsInputs(clusterName string) (map[string][]string
|
||||
filePath := lp.filePath()
|
||||
accountConfig := &armotypes.CustomerConfig{}
|
||||
f, err := os.ReadFile(filePath)
|
||||
fileName := filepath.Base(filePath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
formattedError := fmt.Errorf("Error opening %s file, \"controls-config\" will be downloaded from ARMO management portal", fileName)
|
||||
return nil, formattedError
|
||||
}
|
||||
|
||||
if err = json.Unmarshal(f, &accountConfig.Settings.PostureControlInputs); err == nil {
|
||||
return accountConfig.Settings.PostureControlInputs, nil
|
||||
}
|
||||
return nil, err
|
||||
|
||||
formattedError := fmt.Errorf("Error reading %s file, %s, \"controls-config\" will be downloaded from ARMO management portal", fileName, err.Error())
|
||||
|
||||
return nil, formattedError
|
||||
}
|
||||
|
||||
// temporary support for a list of files
|
||||
|
||||
@@ -15,7 +15,12 @@ type RootInfo struct {
|
||||
|
||||
KSCloudBEURLs string // Kubescape Cloud URL
|
||||
KSCloudBEURLsDep string // Kubescape Cloud URL
|
||||
|
||||
}
|
||||
type CloudURLs struct {
|
||||
CloudReportURL string
|
||||
CloudAPIURL string
|
||||
CloudUIURL string
|
||||
CloudAuthURL string
|
||||
}
|
||||
|
||||
type Credentials struct {
|
||||
|
||||
@@ -419,6 +419,7 @@ func metadataGitLocal(input string) (*reporthandlingv2.RepoContextMetadata, erro
|
||||
Date: commit.Committer.Date,
|
||||
CommitterName: commit.Committer.Name,
|
||||
}
|
||||
context.LocalRootPath, _ = gitParser.GetRootDir()
|
||||
|
||||
return context, nil
|
||||
}
|
||||
|
||||
@@ -18,7 +18,8 @@ func TestSetContextMetadata(t *testing.T) {
|
||||
assert.Nil(t, ctx.HelmContextMetadata)
|
||||
assert.Nil(t, ctx.RepoContextMetadata)
|
||||
}
|
||||
{
|
||||
// TODO: tests were commented out due to actual http calls ; http calls should be mocked.
|
||||
/*{
|
||||
ctx := reporthandlingv2.ContextMetadata{}
|
||||
setContextMetadata(&ctx, "https://github.com/kubescape/kubescape")
|
||||
|
||||
@@ -31,7 +32,7 @@ func TestSetContextMetadata(t *testing.T) {
|
||||
assert.Equal(t, "kubescape", ctx.RepoContextMetadata.Repo)
|
||||
assert.Equal(t, "kubescape", ctx.RepoContextMetadata.Owner)
|
||||
assert.Equal(t, "master", ctx.RepoContextMetadata.Branch)
|
||||
}
|
||||
}*/
|
||||
}
|
||||
|
||||
func TestGetHostname(t *testing.T) {
|
||||
|
||||
@@ -19,6 +19,7 @@ const SKIP_VERSION_CHECK = "KS_SKIP_UPDATE_CHECK"
|
||||
|
||||
var BuildNumber string
|
||||
var Client string
|
||||
var LatestReleaseVersion string
|
||||
|
||||
const UnknownBuildNumber = "unknown"
|
||||
|
||||
@@ -108,9 +109,11 @@ func (v *VersionCheckHandler) CheckLatestVersion(versionData *VersionCheckReques
|
||||
return fmt.Errorf("failed to get latest version")
|
||||
}
|
||||
|
||||
LatestReleaseVersion := latestVersion.ClientUpdate
|
||||
|
||||
if latestVersion.ClientUpdate != "" {
|
||||
if BuildNumber != "" && semver.Compare(BuildNumber, latestVersion.ClientUpdate) == -1 {
|
||||
logger.L().Warning(warningMessage(latestVersion.ClientUpdate))
|
||||
if BuildNumber != "" && semver.Compare(BuildNumber, LatestReleaseVersion) == -1 {
|
||||
logger.L().Warning(warningMessage(LatestReleaseVersion))
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -3,6 +3,7 @@ package cautils
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/kubescape/k8s-interface/cloudsupport"
|
||||
"github.com/kubescape/opa-utils/reporthandling/apis"
|
||||
)
|
||||
|
||||
@@ -17,8 +18,12 @@ var (
|
||||
"LinuxKernelVariables",
|
||||
"KubeletInfo",
|
||||
"KubeProxyInfo",
|
||||
"ControlPlaneInfo",
|
||||
}
|
||||
CloudResources = []string{
|
||||
"ClusterDescribe",
|
||||
string(cloudsupport.TypeApiServerInfo),
|
||||
}
|
||||
CloudResources = []string{"ClusterDescribe"}
|
||||
)
|
||||
|
||||
func MapKSResource(ksResourceMap *KSResources, resources []string) []string {
|
||||
|
||||
@@ -19,6 +19,18 @@ func (ks *Kubescape) SetCachedConfig(setConfig *metav1.SetConfig) error {
|
||||
if setConfig.ClientID != "" {
|
||||
tenant.GetConfigObj().ClientID = setConfig.ClientID
|
||||
}
|
||||
if setConfig.CloudAPIURL != "" {
|
||||
tenant.GetConfigObj().CloudAPIURL = setConfig.CloudAPIURL
|
||||
}
|
||||
if setConfig.CloudAuthURL != "" {
|
||||
tenant.GetConfigObj().CloudAuthURL = setConfig.CloudAuthURL
|
||||
}
|
||||
if setConfig.CloudReportURL != "" {
|
||||
tenant.GetConfigObj().CloudReportURL = setConfig.CloudReportURL
|
||||
}
|
||||
if setConfig.CloudUIURL != "" {
|
||||
tenant.GetConfigObj().CloudUIURL = setConfig.CloudUIURL
|
||||
}
|
||||
|
||||
return tenant.UpdateCachedConfig()
|
||||
}
|
||||
|
||||
@@ -106,7 +106,7 @@ func downloadExceptions(downloadInfo *metav1.DownloadInfo) error {
|
||||
var err error
|
||||
tenant := getTenantConfig(&downloadInfo.Credentials, "", "", getKubernetesApi())
|
||||
|
||||
exceptionsGetter := getExceptionsGetter("")
|
||||
exceptionsGetter := getExceptionsGetter("", tenant.GetAccountID(), nil)
|
||||
exceptions := []armotypes.PostureExceptionPolicy{}
|
||||
if tenant.GetAccountID() != "" {
|
||||
exceptions, err = exceptionsGetter.GetExceptions(tenant.GetContextName())
|
||||
|
||||
72
core/core/fix.go
Normal file
72
core/core/fix.go
Normal file
@@ -0,0 +1,72 @@
|
||||
package core
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
logger "github.com/kubescape/go-logger"
|
||||
metav1 "github.com/kubescape/kubescape/v2/core/meta/datastructures/v1"
|
||||
|
||||
"github.com/kubescape/kubescape/v2/core/pkg/fixhandler"
|
||||
)
|
||||
|
||||
const NoChangesApplied = "No changes were applied."
|
||||
const NoResourcesToFix = "No issues to fix."
|
||||
const ConfirmationQuestion = "Would you like to apply the changes to the files above? [y|n]: "
|
||||
|
||||
func (ks *Kubescape) Fix(fixInfo *metav1.FixInfo) error {
|
||||
logger.L().Info("Reading report file...")
|
||||
handler, err := fixhandler.NewFixHandler(fixInfo)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
resourcesToFix := handler.PrepareResourcesToFix()
|
||||
|
||||
if len(resourcesToFix) == 0 {
|
||||
logger.L().Info(NoResourcesToFix)
|
||||
return nil
|
||||
}
|
||||
|
||||
handler.PrintExpectedChanges(resourcesToFix)
|
||||
|
||||
if fixInfo.DryRun {
|
||||
logger.L().Info(NoChangesApplied)
|
||||
return nil
|
||||
}
|
||||
|
||||
if !fixInfo.NoConfirm && !userConfirmed() {
|
||||
logger.L().Info(NoChangesApplied)
|
||||
return nil
|
||||
}
|
||||
|
||||
updatedFilesCount, errors := handler.ApplyChanges(resourcesToFix)
|
||||
logger.L().Info(fmt.Sprintf("Fixed resources in %d files.", updatedFilesCount))
|
||||
|
||||
if len(errors) > 0 {
|
||||
for _, err := range errors {
|
||||
logger.L().Error(err.Error())
|
||||
}
|
||||
return fmt.Errorf("Failed to fix some resources, check the logs for more details")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func userConfirmed() bool {
|
||||
var input string
|
||||
|
||||
for {
|
||||
fmt.Printf(ConfirmationQuestion)
|
||||
if _, err := fmt.Scanln(&input); err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
input = strings.ToLower(input)
|
||||
if input == "y" || input == "yes" {
|
||||
return true
|
||||
} else if input == "n" || input == "no" {
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -32,13 +32,24 @@ func getTenantConfig(credentials *cautils.Credentials, clusterName string, custo
|
||||
return cautils.NewClusterConfig(k8s, getter.GetKSCloudAPIConnector(), credentials, clusterName, customClusterName)
|
||||
}
|
||||
|
||||
func getExceptionsGetter(useExceptions string) getter.IExceptionsGetter {
|
||||
func getExceptionsGetter(useExceptions string, accountID string, downloadReleasedPolicy *getter.DownloadReleasedPolicy) getter.IExceptionsGetter {
|
||||
if useExceptions != "" {
|
||||
// load exceptions from file
|
||||
return getter.NewLoadPolicy([]string{useExceptions})
|
||||
} else {
|
||||
}
|
||||
if accountID != "" {
|
||||
// download exceptions from Kubescape Cloud backend
|
||||
return getter.GetKSCloudAPIConnector()
|
||||
}
|
||||
// download exceptions from GitHub
|
||||
if downloadReleasedPolicy == nil {
|
||||
downloadReleasedPolicy = getter.NewDownloadReleasedPolicy()
|
||||
}
|
||||
if err := downloadReleasedPolicy.SetRegoObjects(); err != nil {
|
||||
logger.L().Warning("failed to get exceptions from github release, this may affect the scanning results", helpers.Error(err))
|
||||
}
|
||||
return downloadReleasedPolicy
|
||||
|
||||
}
|
||||
|
||||
func getRBACHandler(tenantConfig cautils.ITenantConfig, k8s *k8sinterface.KubernetesApi, submit bool) *cautils.RBACObjects {
|
||||
@@ -58,7 +69,7 @@ func getReporter(tenantConfig cautils.ITenantConfig, reportID string, submit, fw
|
||||
}
|
||||
if tenantConfig.GetAccountID() == "" {
|
||||
// Add link only when scanning a cluster using a framework
|
||||
return reporterv2.NewReportMock(reporterv2.NO_SUBMIT_QUERY, "run kubescape with the '--submit' flag")
|
||||
return reporterv2.NewReportMock("https://hub.armosec.io/docs/installing-kubescape", "run kubescape with the '--account' flag")
|
||||
}
|
||||
var message string
|
||||
if !fwScan {
|
||||
@@ -128,16 +139,21 @@ func policyIdentifierNames(pi []cautils.PolicyIdentifier) string {
|
||||
func setSubmitBehavior(scanInfo *cautils.ScanInfo, tenantConfig cautils.ITenantConfig) {
|
||||
|
||||
/*
|
||||
If "First run (local config not found)" -
|
||||
Default/keep-local - Do not send report
|
||||
Submit - Create tenant & Submit report
|
||||
If CloudReportURL not set - Do not send report
|
||||
|
||||
If "Submitted" -
|
||||
If There is no account - Do not send report
|
||||
|
||||
If There is account -
|
||||
keep-local - Do not send report
|
||||
Default/Submit - Submit report
|
||||
Default - Submit report
|
||||
|
||||
*/
|
||||
|
||||
if getter.GetKSCloudAPIConnector().GetCloudAPIURL() == "" {
|
||||
scanInfo.Submit = false
|
||||
return
|
||||
}
|
||||
|
||||
// do not submit control scanning
|
||||
if !scanInfo.FrameworkScan {
|
||||
scanInfo.Submit = false
|
||||
@@ -150,27 +166,26 @@ func setSubmitBehavior(scanInfo *cautils.ScanInfo, tenantConfig cautils.ITenantC
|
||||
return
|
||||
}
|
||||
|
||||
if tenantConfig.IsConfigFound() { // config found in cache (submitted)
|
||||
if !scanInfo.Local {
|
||||
if tenantConfig.GetAccountID() != "" {
|
||||
if _, err := uuid.Parse(tenantConfig.GetAccountID()); err != nil {
|
||||
scanInfo.Submit = false
|
||||
return
|
||||
}
|
||||
}
|
||||
// Submit report
|
||||
scanInfo.Submit = true
|
||||
}
|
||||
if scanInfo.Local {
|
||||
scanInfo.Submit = false
|
||||
return
|
||||
}
|
||||
|
||||
// If There is no account, or if the account is not legal, do not submit
|
||||
if _, err := uuid.Parse(tenantConfig.GetAccountID()); err != nil {
|
||||
scanInfo.Submit = false
|
||||
} else {
|
||||
scanInfo.Submit = true
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// setPolicyGetter set the policy getter - local file/github release/Kubescape Cloud API
|
||||
func getPolicyGetter(loadPoliciesFromFile []string, tennatEmail string, frameworkScope bool, downloadReleasedPolicy *getter.DownloadReleasedPolicy) getter.IPolicyGetter {
|
||||
func getPolicyGetter(loadPoliciesFromFile []string, tenantEmail string, frameworkScope bool, downloadReleasedPolicy *getter.DownloadReleasedPolicy) getter.IPolicyGetter {
|
||||
if len(loadPoliciesFromFile) > 0 {
|
||||
return getter.NewLoadPolicy(loadPoliciesFromFile)
|
||||
}
|
||||
if tennatEmail != "" && frameworkScope {
|
||||
if tenantEmail != "" && getter.GetKSCloudAPIConnector().GetCloudAPIURL() != "" && frameworkScope {
|
||||
g := getter.GetKSCloudAPIConnector() // download policy from Kubescape Cloud backend
|
||||
return g
|
||||
}
|
||||
|
||||
@@ -63,10 +63,10 @@ func listControls(listPolicies *metav1.ListPolicies) ([]string, error) {
|
||||
|
||||
func listExceptions(listPolicies *metav1.ListPolicies) ([]string, error) {
|
||||
// load tenant metav1
|
||||
getTenantConfig(&listPolicies.Credentials, "", "", getKubernetesApi())
|
||||
tenant := getTenantConfig(&listPolicies.Credentials, "", "", getKubernetesApi())
|
||||
|
||||
var exceptionsNames []string
|
||||
ksCloudAPI := getExceptionsGetter("")
|
||||
ksCloudAPI := getExceptionsGetter("", tenant.GetAccountID(), nil)
|
||||
exceptions, err := ksCloudAPI.GetExceptions("")
|
||||
if err != nil {
|
||||
return exceptionsNames, err
|
||||
|
||||
@@ -122,7 +122,7 @@ func (ks *Kubescape) Scan(scanInfo *cautils.ScanInfo) (*resultshandling.ResultsH
|
||||
// set policy getter only after setting the customerGUID
|
||||
scanInfo.Getters.PolicyGetter = getPolicyGetter(scanInfo.UseFrom, interfaces.tenantConfig.GetTenantEmail(), scanInfo.FrameworkScan, downloadReleasedPolicy)
|
||||
scanInfo.Getters.ControlsInputsGetter = getConfigInputsGetter(scanInfo.ControlsInputs, interfaces.tenantConfig.GetAccountID(), downloadReleasedPolicy)
|
||||
scanInfo.Getters.ExceptionsGetter = getExceptionsGetter(scanInfo.UseExceptions)
|
||||
scanInfo.Getters.ExceptionsGetter = getExceptionsGetter(scanInfo.UseExceptions, interfaces.tenantConfig.GetAccountID(), downloadReleasedPolicy)
|
||||
scanInfo.Getters.AttackTracksGetter = getAttackTracksGetter(interfaces.tenantConfig.GetAccountID(), downloadReleasedPolicy)
|
||||
|
||||
// TODO - list supported frameworks/controls
|
||||
|
||||
@@ -3,9 +3,13 @@ package v1
|
||||
import "io"
|
||||
|
||||
type SetConfig struct {
|
||||
Account string
|
||||
ClientID string
|
||||
SecretKey string
|
||||
Account string
|
||||
ClientID string
|
||||
SecretKey string
|
||||
CloudReportURL string
|
||||
CloudAPIURL string
|
||||
CloudUIURL string
|
||||
CloudAuthURL string
|
||||
}
|
||||
|
||||
type ViewConfig struct {
|
||||
|
||||
8
core/meta/datastructures/v1/fix.go
Normal file
8
core/meta/datastructures/v1/fix.go
Normal file
@@ -0,0 +1,8 @@
|
||||
package v1
|
||||
|
||||
type FixInfo struct {
|
||||
ReportFile string // path to report file (mandatory)
|
||||
NoConfirm bool // if true, no confirmation will be given to the user before applying the fix
|
||||
SkipUserValues bool // if true, user values will not be changed
|
||||
DryRun bool // if true, no changes will be applied
|
||||
}
|
||||
@@ -25,4 +25,7 @@ type IKubescape interface {
|
||||
|
||||
// delete
|
||||
DeleteExceptions(deleteexceptions *metav1.DeleteExceptions) error
|
||||
|
||||
// fix
|
||||
Fix(fixInfo *metav1.FixInfo) error
|
||||
}
|
||||
|
||||
63
core/pkg/fixhandler/datastructures.go
Normal file
63
core/pkg/fixhandler/datastructures.go
Normal file
@@ -0,0 +1,63 @@
|
||||
package fixhandler
|
||||
|
||||
import (
|
||||
"github.com/armosec/armoapi-go/armotypes"
|
||||
metav1 "github.com/kubescape/kubescape/v2/core/meta/datastructures/v1"
|
||||
"github.com/kubescape/opa-utils/reporthandling"
|
||||
reporthandlingv2 "github.com/kubescape/opa-utils/reporthandling/v2"
|
||||
"gopkg.in/yaml.v3"
|
||||
)
|
||||
|
||||
// FixHandler is a struct that holds the information of the report to be fixed
|
||||
type FixHandler struct {
|
||||
fixInfo *metav1.FixInfo
|
||||
reportObj *reporthandlingv2.PostureReport
|
||||
localBasePath string
|
||||
}
|
||||
|
||||
// ResourceFixInfo is a struct that holds the information about the resource that needs to be fixed
|
||||
type ResourceFixInfo struct {
|
||||
YamlExpressions map[string]*armotypes.FixPath
|
||||
Resource *reporthandling.Resource
|
||||
FilePath string
|
||||
DocumentIndex int
|
||||
}
|
||||
|
||||
// NodeInfo holds extra information about the node
|
||||
type nodeInfo struct {
|
||||
node *yaml.Node
|
||||
parent *yaml.Node
|
||||
|
||||
// position of the node among siblings
|
||||
index int
|
||||
}
|
||||
|
||||
// FixInfoMetadata holds the arguments "getFixInfo" function needs to pass to the
|
||||
// functions it uses
|
||||
type fixInfoMetadata struct {
|
||||
originalList *[]nodeInfo
|
||||
fixedList *[]nodeInfo
|
||||
originalListTracker int
|
||||
fixedListTracker int
|
||||
contentToAdd *[]contentToAdd
|
||||
linesToRemove *[]linesToRemove
|
||||
}
|
||||
|
||||
// ContentToAdd holds the information about where to insert the new changes in the existing yaml file
|
||||
type contentToAdd struct {
|
||||
// Line where the fix should be applied to
|
||||
line int
|
||||
// Content is a string representation of the YAML node that describes a suggested fix
|
||||
content string
|
||||
}
|
||||
|
||||
// LinesToRemove holds the line numbers to remove from the existing yaml file
|
||||
type linesToRemove struct {
|
||||
startLine int
|
||||
endLine int
|
||||
}
|
||||
|
||||
type fileFixInfo struct {
|
||||
contentsToAdd *[]contentToAdd
|
||||
linesToRemove *[]linesToRemove
|
||||
}
|
||||
346
core/pkg/fixhandler/fixhandler.go
Normal file
346
core/pkg/fixhandler/fixhandler.go
Normal file
@@ -0,0 +1,346 @@
|
||||
package fixhandler
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/armosec/armoapi-go/armotypes"
|
||||
metav1 "github.com/kubescape/kubescape/v2/core/meta/datastructures/v1"
|
||||
|
||||
logger "github.com/kubescape/go-logger"
|
||||
"github.com/kubescape/opa-utils/objectsenvelopes"
|
||||
"github.com/kubescape/opa-utils/objectsenvelopes/localworkload"
|
||||
"github.com/kubescape/opa-utils/reporthandling"
|
||||
"github.com/kubescape/opa-utils/reporthandling/results/v1/resourcesresults"
|
||||
reporthandlingv2 "github.com/kubescape/opa-utils/reporthandling/v2"
|
||||
"github.com/mikefarah/yq/v4/pkg/yqlib"
|
||||
"gopkg.in/op/go-logging.v1"
|
||||
)
|
||||
|
||||
const UserValuePrefix = "YOUR_"
|
||||
|
||||
func NewFixHandler(fixInfo *metav1.FixInfo) (*FixHandler, error) {
|
||||
jsonFile, err := os.Open(fixInfo.ReportFile)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer jsonFile.Close()
|
||||
byteValue, _ := ioutil.ReadAll(jsonFile)
|
||||
|
||||
var reportObj reporthandlingv2.PostureReport
|
||||
if err = json.Unmarshal(byteValue, &reportObj); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err = isSupportedScanningTarget(&reportObj); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
localPath := getLocalPath(&reportObj)
|
||||
if _, err = os.Stat(localPath); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
backendLoggerLeveled := logging.AddModuleLevel(logging.NewLogBackend(logger.L().GetWriter(), "", 0))
|
||||
backendLoggerLeveled.SetLevel(logging.ERROR, "")
|
||||
yqlib.GetLogger().SetBackend(backendLoggerLeveled)
|
||||
|
||||
return &FixHandler{
|
||||
fixInfo: fixInfo,
|
||||
reportObj: &reportObj,
|
||||
localBasePath: localPath,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func isSupportedScanningTarget(report *reporthandlingv2.PostureReport) error {
|
||||
if report.Metadata.ScanMetadata.ScanningTarget == reporthandlingv2.GitLocal || report.Metadata.ScanMetadata.ScanningTarget == reporthandlingv2.Directory {
|
||||
return nil
|
||||
}
|
||||
|
||||
return fmt.Errorf("unsupported scanning target. Only local git and directory scanning targets are supported")
|
||||
}
|
||||
|
||||
func getLocalPath(report *reporthandlingv2.PostureReport) string {
|
||||
if report.Metadata.ScanMetadata.ScanningTarget == reporthandlingv2.GitLocal {
|
||||
return report.Metadata.ContextMetadata.RepoContextMetadata.LocalRootPath
|
||||
}
|
||||
|
||||
if report.Metadata.ScanMetadata.ScanningTarget == reporthandlingv2.Directory {
|
||||
return report.Metadata.ContextMetadata.DirectoryContextMetadata.BasePath
|
||||
}
|
||||
|
||||
return ""
|
||||
}
|
||||
|
||||
func (h *FixHandler) buildResourcesMap() map[string]*reporthandling.Resource {
|
||||
resourceIdToRawResource := make(map[string]*reporthandling.Resource)
|
||||
for i := range h.reportObj.Resources {
|
||||
resourceIdToRawResource[h.reportObj.Resources[i].GetID()] = &h.reportObj.Resources[i]
|
||||
}
|
||||
for i := range h.reportObj.Results {
|
||||
if h.reportObj.Results[i].RawResource == nil {
|
||||
continue
|
||||
}
|
||||
resourceIdToRawResource[h.reportObj.Results[i].RawResource.GetID()] = h.reportObj.Results[i].RawResource
|
||||
}
|
||||
|
||||
return resourceIdToRawResource
|
||||
}
|
||||
|
||||
func (h *FixHandler) getPathFromRawResource(obj map[string]interface{}) string {
|
||||
if localworkload.IsTypeLocalWorkload(obj) {
|
||||
localwork := localworkload.NewLocalWorkload(obj)
|
||||
return localwork.GetPath()
|
||||
} else if objectsenvelopes.IsTypeRegoResponseVector(obj) {
|
||||
regoResponseVectorObject := objectsenvelopes.NewRegoResponseVectorObject(obj)
|
||||
relatedObjects := regoResponseVectorObject.GetRelatedObjects()
|
||||
for _, relatedObject := range relatedObjects {
|
||||
if localworkload.IsTypeLocalWorkload(relatedObject.GetObject()) {
|
||||
return relatedObject.(*localworkload.LocalWorkload).GetPath()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return ""
|
||||
}
|
||||
|
||||
func (h *FixHandler) PrepareResourcesToFix() []ResourceFixInfo {
|
||||
resourceIdToResource := h.buildResourcesMap()
|
||||
|
||||
resourcesToFix := make([]ResourceFixInfo, 0)
|
||||
for _, result := range h.reportObj.Results {
|
||||
if !result.GetStatus(nil).IsFailed() {
|
||||
continue
|
||||
}
|
||||
|
||||
resourceID := result.ResourceID
|
||||
resourceObj := resourceIdToResource[resourceID]
|
||||
resourcePath := h.getPathFromRawResource(resourceObj.GetObject())
|
||||
if resourcePath == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
if resourceObj.Source == nil || resourceObj.Source.FileType != reporthandling.SourceTypeYaml {
|
||||
continue
|
||||
}
|
||||
|
||||
relativePath, documentIndex, err := h.getFilePathAndIndex(resourcePath)
|
||||
if err != nil {
|
||||
logger.L().Error("Skipping invalid resource path: " + resourcePath)
|
||||
continue
|
||||
}
|
||||
|
||||
absolutePath := path.Join(h.localBasePath, relativePath)
|
||||
if _, err := os.Stat(absolutePath); err != nil {
|
||||
logger.L().Error("Skipping missing file: " + absolutePath)
|
||||
continue
|
||||
}
|
||||
|
||||
rfi := ResourceFixInfo{
|
||||
FilePath: absolutePath,
|
||||
Resource: resourceObj,
|
||||
YamlExpressions: make(map[string]*armotypes.FixPath, 0),
|
||||
DocumentIndex: documentIndex,
|
||||
}
|
||||
|
||||
for i := range result.AssociatedControls {
|
||||
if result.AssociatedControls[i].GetStatus(nil).IsFailed() {
|
||||
rfi.addYamlExpressionsFromResourceAssociatedControl(documentIndex, &result.AssociatedControls[i], h.fixInfo.SkipUserValues)
|
||||
}
|
||||
}
|
||||
|
||||
if len(rfi.YamlExpressions) > 0 {
|
||||
resourcesToFix = append(resourcesToFix, rfi)
|
||||
}
|
||||
}
|
||||
|
||||
return resourcesToFix
|
||||
}
|
||||
|
||||
func (h *FixHandler) PrintExpectedChanges(resourcesToFix []ResourceFixInfo) {
|
||||
var sb strings.Builder
|
||||
sb.WriteString("The following changes will be applied:\n")
|
||||
|
||||
for _, resourceFixInfo := range resourcesToFix {
|
||||
sb.WriteString(fmt.Sprintf("File: %s\n", resourceFixInfo.FilePath))
|
||||
sb.WriteString(fmt.Sprintf("Resource: %s\n", resourceFixInfo.Resource.GetName()))
|
||||
sb.WriteString(fmt.Sprintf("Kind: %s\n", resourceFixInfo.Resource.GetKind()))
|
||||
sb.WriteString("Changes:\n")
|
||||
|
||||
i := 1
|
||||
for _, fixPath := range resourceFixInfo.YamlExpressions {
|
||||
sb.WriteString(fmt.Sprintf("\t%d) %s = %s\n", i, (*fixPath).Path, (*fixPath).Value))
|
||||
i++
|
||||
}
|
||||
sb.WriteString("\n------\n")
|
||||
}
|
||||
|
||||
logger.L().Info(sb.String())
|
||||
}
|
||||
|
||||
func (h *FixHandler) ApplyChanges(resourcesToFix []ResourceFixInfo) (int, []error) {
|
||||
updatedFiles := make(map[string]bool)
|
||||
errors := make([]error, 0)
|
||||
|
||||
fileYamlExpressions := h.getFileYamlExpressions(resourcesToFix)
|
||||
|
||||
for filepath, yamlExpression := range fileYamlExpressions {
|
||||
fileAsString, err := getFileString(filepath)
|
||||
|
||||
if err != nil {
|
||||
errors = append(errors, err)
|
||||
continue
|
||||
}
|
||||
|
||||
fixedYamlString, err := h.ApplyFixToContent(fileAsString, yamlExpression)
|
||||
|
||||
if err != nil {
|
||||
errors = append(errors, fmt.Errorf("Failed to fix file %s: %w ", filepath, err))
|
||||
continue
|
||||
} else {
|
||||
updatedFiles[filepath] = true
|
||||
}
|
||||
|
||||
err = writeFixesToFile(filepath, fixedYamlString)
|
||||
|
||||
if err != nil {
|
||||
logger.L().Error(fmt.Sprintf("Failed to write fixes to file %s, %v", filepath, err.Error()))
|
||||
errors = append(errors, err)
|
||||
}
|
||||
}
|
||||
|
||||
return len(updatedFiles), errors
|
||||
}
|
||||
|
||||
func (h *FixHandler) getFilePathAndIndex(filePathWithIndex string) (filePath string, documentIndex int, err error) {
|
||||
splittedPath := strings.Split(filePathWithIndex, ":")
|
||||
if len(splittedPath) <= 1 {
|
||||
return "", 0, fmt.Errorf("expected to find ':' in file path")
|
||||
}
|
||||
|
||||
filePath = splittedPath[0]
|
||||
if documentIndex, err := strconv.Atoi(splittedPath[1]); err != nil {
|
||||
return "", 0, err
|
||||
} else {
|
||||
return filePath, documentIndex, nil
|
||||
}
|
||||
}
|
||||
|
||||
func (h *FixHandler) ApplyFixToContent(yamlAsString, yamlExpression string) (fixedString string, err error) {
|
||||
yamlLines := strings.Split(yamlAsString, "\n")
|
||||
|
||||
originalRootNodes, err := decodeDocumentRoots(yamlAsString)
|
||||
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
fixedRootNodes, err := getFixedNodes(yamlAsString, yamlExpression)
|
||||
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
fileFixInfo := getFixInfo(originalRootNodes, fixedRootNodes)
|
||||
|
||||
fixedYamlLines := getFixedYamlLines(yamlLines, fileFixInfo)
|
||||
|
||||
fixedString = getStringFromSlice(fixedYamlLines)
|
||||
|
||||
return fixedString, nil
|
||||
}
|
||||
|
||||
func (h *FixHandler) getFileYamlExpressions(resourcesToFix []ResourceFixInfo) map[string]string {
|
||||
fileYamlExpressions := make(map[string]string, 0)
|
||||
for _, resourceToFix := range resourcesToFix {
|
||||
singleExpression := reduceYamlExpressions(&resourceToFix)
|
||||
resourceFilePath := resourceToFix.FilePath
|
||||
|
||||
if _, pathExistsInMap := fileYamlExpressions[resourceFilePath]; !pathExistsInMap {
|
||||
fileYamlExpressions[resourceFilePath] = singleExpression
|
||||
} else {
|
||||
fileYamlExpressions[resourceFilePath] = joinStrings(fileYamlExpressions[resourceFilePath], " | ", singleExpression)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return fileYamlExpressions
|
||||
}
|
||||
|
||||
func (rfi *ResourceFixInfo) addYamlExpressionsFromResourceAssociatedControl(documentIndex int, ac *resourcesresults.ResourceAssociatedControl, skipUserValues bool) {
|
||||
for _, rule := range ac.ResourceAssociatedRules {
|
||||
if !rule.GetStatus(nil).IsFailed() {
|
||||
continue
|
||||
}
|
||||
|
||||
for _, rulePaths := range rule.Paths {
|
||||
if rulePaths.FixPath.Path == "" {
|
||||
continue
|
||||
}
|
||||
if strings.HasPrefix(rulePaths.FixPath.Value, UserValuePrefix) && skipUserValues {
|
||||
continue
|
||||
}
|
||||
|
||||
yamlExpression := fixPathToValidYamlExpression(rulePaths.FixPath.Path, rulePaths.FixPath.Value, documentIndex)
|
||||
rfi.YamlExpressions[yamlExpression] = &rulePaths.FixPath
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// reduceYamlExpressions reduces the number of yaml expressions to a single one
|
||||
func reduceYamlExpressions(resource *ResourceFixInfo) string {
|
||||
expressions := make([]string, 0, len(resource.YamlExpressions))
|
||||
for expr := range resource.YamlExpressions {
|
||||
expressions = append(expressions, expr)
|
||||
}
|
||||
|
||||
return strings.Join(expressions, " | ")
|
||||
}
|
||||
|
||||
func fixPathToValidYamlExpression(fixPath, value string, documentIndexInYaml int) string {
|
||||
isStringValue := true
|
||||
if _, err := strconv.ParseBool(value); err == nil {
|
||||
isStringValue = false
|
||||
} else if _, err := strconv.ParseFloat(value, 64); err == nil {
|
||||
isStringValue = false
|
||||
} else if _, err := strconv.Atoi(value); err == nil {
|
||||
isStringValue = false
|
||||
}
|
||||
|
||||
// Strings should be quoted
|
||||
if isStringValue {
|
||||
value = fmt.Sprintf("\"%s\"", value)
|
||||
}
|
||||
|
||||
// select document index and add a dot for the root node
|
||||
return fmt.Sprintf("select(di==%d).%s |= %s", documentIndexInYaml, fixPath, value)
|
||||
}
|
||||
|
||||
func joinStrings(inputStrings ...string) string {
|
||||
return strings.Join(inputStrings, "")
|
||||
}
|
||||
|
||||
func getFileString(filepath string) (string, error) {
|
||||
bytes, err := ioutil.ReadFile(filepath)
|
||||
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("Error reading file %s", filepath)
|
||||
}
|
||||
|
||||
return string(bytes), nil
|
||||
}
|
||||
|
||||
func writeFixesToFile(filepath, content string) error {
|
||||
err := ioutil.WriteFile(filepath, []byte(content), 0644)
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error writing fixes to file: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
248
core/pkg/fixhandler/fixhandler_test.go
Normal file
248
core/pkg/fixhandler/fixhandler_test.go
Normal file
@@ -0,0 +1,248 @@
|
||||
package fixhandler
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
logger "github.com/kubescape/go-logger"
|
||||
metav1 "github.com/kubescape/kubescape/v2/core/meta/datastructures/v1"
|
||||
reporthandlingv2 "github.com/kubescape/opa-utils/reporthandling/v2"
|
||||
"github.com/mikefarah/yq/v4/pkg/yqlib"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"gopkg.in/op/go-logging.v1"
|
||||
)
|
||||
|
||||
type indentationTestCase struct {
|
||||
inputFile string
|
||||
yamlExpression string
|
||||
expectedFile string
|
||||
}
|
||||
|
||||
func NewFixHandlerMock() (*FixHandler, error) {
|
||||
backendLoggerLeveled := logging.AddModuleLevel(logging.NewLogBackend(logger.L().GetWriter(), "", 0))
|
||||
backendLoggerLeveled.SetLevel(logging.ERROR, "")
|
||||
yqlib.GetLogger().SetBackend(backendLoggerLeveled)
|
||||
|
||||
return &FixHandler{
|
||||
fixInfo: &metav1.FixInfo{},
|
||||
reportObj: &reporthandlingv2.PostureReport{},
|
||||
localBasePath: "",
|
||||
}, nil
|
||||
}
|
||||
|
||||
func getTestdataPath() string {
|
||||
currentDir, _ := os.Getwd()
|
||||
return filepath.Join(currentDir, "testdata")
|
||||
}
|
||||
|
||||
func getTestCases() []indentationTestCase {
|
||||
indentationTestCases := []indentationTestCase{
|
||||
// Insertion Scenarios
|
||||
{
|
||||
"inserts/tc-01-00-input-mapping-insert-mapping.yaml",
|
||||
"select(di==0).spec.containers[0].securityContext.allowPrivilegeEscalation |= false",
|
||||
"inserts/tc-01-01-expected.yaml",
|
||||
},
|
||||
{
|
||||
"inserts/tc-02-00-input-mapping-insert-mapping-with-list.yaml",
|
||||
"select(di==0).spec.containers[0].securityContext.capabilities.drop += [\"NET_RAW\"]",
|
||||
"inserts/tc-02-01-expected.yaml",
|
||||
},
|
||||
{
|
||||
"inserts/tc-03-00-input-list-append-scalar.yaml",
|
||||
"select(di==0).spec.containers[0].securityContext.capabilities.drop += [\"SYS_ADM\"]",
|
||||
"inserts/tc-03-01-expected.yaml",
|
||||
},
|
||||
{
|
||||
"inserts/tc-04-00-input-multiple-inserts.yaml",
|
||||
|
||||
`select(di==0).spec.template.spec.securityContext.allowPrivilegeEscalation |= false |
|
||||
select(di==0).spec.template.spec.containers[0].securityContext.capabilities.drop += ["NET_RAW"] |
|
||||
select(di==0).spec.template.spec.containers[0].securityContext.seccompProfile.type |= "RuntimeDefault" |
|
||||
select(di==0).spec.template.spec.containers[0].securityContext.allowPrivilegeEscalation |= false |
|
||||
select(di==0).spec.template.spec.containers[0].securityContext.readOnlyRootFilesystem |= true`,
|
||||
|
||||
"inserts/tc-04-01-expected.yaml",
|
||||
},
|
||||
{
|
||||
"inserts/tc-05-00-input-comment-blank-line-single-insert.yaml",
|
||||
"select(di==0).spec.containers[0].securityContext.allowPrivilegeEscalation |= false",
|
||||
"inserts/tc-05-01-expected.yaml",
|
||||
},
|
||||
{
|
||||
"inserts/tc-06-00-input-list-append-scalar-oneline.yaml",
|
||||
"select(di==0).spec.containers[0].securityContext.capabilities.drop += [\"SYS_ADM\"]",
|
||||
"inserts/tc-06-01-expected.yaml",
|
||||
},
|
||||
{
|
||||
"inserts/tc-07-00-input-multiple-documents.yaml",
|
||||
|
||||
`select(di==0).spec.containers[0].securityContext.allowPrivilegeEscalation |= false |
|
||||
select(di==1).spec.containers[0].securityContext.allowPrivilegeEscalation |= false`,
|
||||
|
||||
"inserts/tc-07-01-expected.yaml",
|
||||
},
|
||||
{
|
||||
"inserts/tc-08-00-input-mapping-insert-mapping-indented.yaml",
|
||||
"select(di==0).spec.containers[0].securityContext.capabilities.drop += [\"NET_RAW\"]",
|
||||
"inserts/tc-08-01-expected.yaml",
|
||||
},
|
||||
{
|
||||
"inserts/tc-09-00-input-list-insert-new-mapping-indented.yaml",
|
||||
`select(di==0).spec.containers += {"name": "redis", "image": "redis"}`,
|
||||
"inserts/tc-09-01-expected.yaml",
|
||||
},
|
||||
{
|
||||
"inserts/tc-10-00-input-list-insert-new-mapping.yaml",
|
||||
`select(di==0).spec.containers += {"name": "redis", "image": "redis"}`,
|
||||
"inserts/tc-10-01-expected.yaml",
|
||||
},
|
||||
|
||||
// Removal Scenarios
|
||||
{
|
||||
"removals/tc-01-00-input.yaml",
|
||||
"del(select(di==0).spec.containers[0].securityContext)",
|
||||
"removals/tc-01-01-expected.yaml",
|
||||
},
|
||||
{
|
||||
"removals/tc-02-00-input.yaml",
|
||||
"del(select(di==0).spec.containers[1])",
|
||||
"removals/tc-02-01-expected.yaml",
|
||||
},
|
||||
{
|
||||
"removals/tc-03-00-input.yaml",
|
||||
"del(select(di==0).spec.containers[0].securityContext.capabilities.drop[1])",
|
||||
"removals/tc-03-01-expected.yaml",
|
||||
},
|
||||
{
|
||||
"removes/tc-04-00-input.yaml",
|
||||
`del(select(di==0).spec.containers[0].securityContext) |
|
||||
del(select(di==1).spec.containers[1])`,
|
||||
"removes/tc-04-01-expected.yaml",
|
||||
},
|
||||
|
||||
// Replace Scenarios
|
||||
{
|
||||
"replaces/tc-01-00-input.yaml",
|
||||
"select(di==0).spec.containers[0].securityContext.runAsRoot |= false",
|
||||
"replaces/tc-01-01-expected.yaml",
|
||||
},
|
||||
{
|
||||
"replaces/tc-02-00-input.yaml",
|
||||
`select(di==0).spec.containers[0].securityContext.capabilities.drop[0] |= "SYS_ADM" |
|
||||
select(di==0).spec.containers[0].securityContext.capabilities.add[0] |= "NET_RAW"`,
|
||||
"replaces/tc-02-01-expected.yaml",
|
||||
},
|
||||
|
||||
// Hybrid Scenarios
|
||||
{
|
||||
"hybrids/tc-01-00-input.yaml",
|
||||
`del(select(di==0).spec.containers[0].securityContext) |
|
||||
select(di==0).spec.securityContext.runAsRoot |= false`,
|
||||
"hybrids/tc-01-01-expected.yaml",
|
||||
},
|
||||
{
|
||||
"hybrids/tc-02-00-input-indented-list.yaml",
|
||||
`del(select(di==0).spec.containers[0].securityContext) |
|
||||
select(di==0).spec.securityContext.runAsRoot |= false`,
|
||||
"hybrids/tc-02-01-expected.yaml",
|
||||
},
|
||||
{
|
||||
"hybrids/tc-03-00-input-comments.yaml",
|
||||
`del(select(di==0).spec.containers[0].securityContext) |
|
||||
select(di==0).spec.securityContext.runAsRoot |= false`,
|
||||
"hybrids/tc-03-01-expected.yaml",
|
||||
},
|
||||
{
|
||||
"hybrids/tc-04-00-input-separated-keys.yaml",
|
||||
`del(select(di==0).spec.containers[0].securityContext) |
|
||||
select(di==0).spec.securityContext.runAsRoot |= false`,
|
||||
"hybrids/tc-04-01-expected.yaml",
|
||||
},
|
||||
}
|
||||
|
||||
return indentationTestCases
|
||||
}
|
||||
|
||||
func TestApplyFixKeepsFormatting(t *testing.T) {
|
||||
testCases := getTestCases()
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.inputFile, func(t *testing.T) {
|
||||
getTestDataPath := func(filename string) string {
|
||||
currentDir, _ := os.Getwd()
|
||||
currentFile := "testdata/" + filename
|
||||
return filepath.Join(currentDir, currentFile)
|
||||
}
|
||||
|
||||
input, _ := os.ReadFile(getTestDataPath(tc.inputFile))
|
||||
wantRaw, _ := os.ReadFile(getTestDataPath(tc.expectedFile))
|
||||
want := string(wantRaw)
|
||||
expression := tc.yamlExpression
|
||||
|
||||
h, _ := NewFixHandlerMock()
|
||||
|
||||
got, _ := h.ApplyFixToContent(string(input), expression)
|
||||
|
||||
assert.Equalf(
|
||||
t, want, got,
|
||||
"Contents of the fixed file don't match the expectation.\n"+
|
||||
"Input file: %s\n\n"+
|
||||
"Got: <%s>\n\n"+
|
||||
"Want: <%s>",
|
||||
tc.inputFile, got, want,
|
||||
)
|
||||
},
|
||||
)
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
func Test_fixPathToValidYamlExpression(t *testing.T) {
|
||||
type args struct {
|
||||
fixPath string
|
||||
value string
|
||||
documentIndexInYaml int
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
want string
|
||||
}{
|
||||
{
|
||||
name: "fix path with boolean value",
|
||||
args: args{
|
||||
fixPath: "spec.template.spec.containers[0].securityContext.privileged",
|
||||
value: "true",
|
||||
documentIndexInYaml: 2,
|
||||
},
|
||||
want: "select(di==2).spec.template.spec.containers[0].securityContext.privileged |= true",
|
||||
},
|
||||
{
|
||||
name: "fix path with string value",
|
||||
args: args{
|
||||
fixPath: "metadata.namespace",
|
||||
value: "YOUR_NAMESPACE",
|
||||
documentIndexInYaml: 0,
|
||||
},
|
||||
want: "select(di==0).metadata.namespace |= \"YOUR_NAMESPACE\"",
|
||||
},
|
||||
{
|
||||
name: "fix path with number",
|
||||
args: args{
|
||||
fixPath: "xxx.yyy",
|
||||
value: "123",
|
||||
documentIndexInYaml: 0,
|
||||
},
|
||||
want: "select(di==0).xxx.yyy |= 123",
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if got := fixPathToValidYamlExpression(tt.args.fixPath, tt.args.value, tt.args.documentIndexInYaml); got != tt.want {
|
||||
t.Errorf("fixPathToValidYamlExpression() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
19
core/pkg/fixhandler/testdata/hybrids/tc-01-00-input.yaml
vendored
Normal file
19
core/pkg/fixhandler/testdata/hybrids/tc-01-00-input.yaml
vendored
Normal file
@@ -0,0 +1,19 @@
|
||||
# Fix to Apply:
|
||||
# REMOVE:
|
||||
# "del(select(di==0).spec.containers[0].securityContext)"
|
||||
|
||||
# INSERT:
|
||||
# select(di==0).spec.securityContext.runAsRoot: false
|
||||
|
||||
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: insert_to_mapping_node_1
|
||||
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx_container
|
||||
image: nginx
|
||||
securityContext:
|
||||
runAsRoot: true
|
||||
19
core/pkg/fixhandler/testdata/hybrids/tc-01-01-expected.yaml
vendored
Normal file
19
core/pkg/fixhandler/testdata/hybrids/tc-01-01-expected.yaml
vendored
Normal file
@@ -0,0 +1,19 @@
|
||||
# Fix to Apply:
|
||||
# REMOVE:
|
||||
# "del(select(di==0).spec.containers[0].securityContext)"
|
||||
|
||||
# INSERT:
|
||||
# select(di==0).spec.securityContext.runAsRoot: false
|
||||
|
||||
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: insert_to_mapping_node_1
|
||||
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx_container
|
||||
image: nginx
|
||||
securityContext:
|
||||
runAsRoot: false
|
||||
19
core/pkg/fixhandler/testdata/hybrids/tc-02-00-input-indented-list.yaml
vendored
Normal file
19
core/pkg/fixhandler/testdata/hybrids/tc-02-00-input-indented-list.yaml
vendored
Normal file
@@ -0,0 +1,19 @@
|
||||
# Fix to Apply:
|
||||
# REMOVE:
|
||||
# "del(select(di==0).spec.containers[0].securityContext)"
|
||||
|
||||
# INSERT:
|
||||
# select(di==0).spec.securityContext.runAsRoot: false
|
||||
|
||||
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: insert_to_mapping_node_1
|
||||
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx_container
|
||||
image: nginx
|
||||
securityContext:
|
||||
runAsRoot: true
|
||||
19
core/pkg/fixhandler/testdata/hybrids/tc-02-01-expected.yaml
vendored
Normal file
19
core/pkg/fixhandler/testdata/hybrids/tc-02-01-expected.yaml
vendored
Normal file
@@ -0,0 +1,19 @@
|
||||
# Fix to Apply:
|
||||
# REMOVE:
|
||||
# "del(select(di==0).spec.containers[0].securityContext)"
|
||||
|
||||
# INSERT:
|
||||
# select(di==0).spec.securityContext.runAsRoot: false
|
||||
|
||||
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: insert_to_mapping_node_1
|
||||
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx_container
|
||||
image: nginx
|
||||
securityContext:
|
||||
runAsRoot: false
|
||||
21
core/pkg/fixhandler/testdata/hybrids/tc-03-00-input-comments.yaml
vendored
Normal file
21
core/pkg/fixhandler/testdata/hybrids/tc-03-00-input-comments.yaml
vendored
Normal file
@@ -0,0 +1,21 @@
|
||||
# Fix to Apply:
|
||||
# REMOVE:
|
||||
# "del(select(di==0).spec.containers[0].securityContext)"
|
||||
|
||||
# INSERT:
|
||||
# select(di==0).spec.securityContext.runAsRoot: false
|
||||
|
||||
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: insert_to_mapping_node_1
|
||||
|
||||
spec:
|
||||
# These are the container comments
|
||||
containers:
|
||||
# These are the first containers comments
|
||||
- name: nginx_container
|
||||
image: nginx
|
||||
securityContext:
|
||||
runAsRoot: true
|
||||
21
core/pkg/fixhandler/testdata/hybrids/tc-03-01-expected.yaml
vendored
Normal file
21
core/pkg/fixhandler/testdata/hybrids/tc-03-01-expected.yaml
vendored
Normal file
@@ -0,0 +1,21 @@
|
||||
# Fix to Apply:
|
||||
# REMOVE:
|
||||
# "del(select(di==0).spec.containers[0].securityContext)"
|
||||
|
||||
# INSERT:
|
||||
# select(di==0).spec.securityContext.runAsRoot: false
|
||||
|
||||
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: insert_to_mapping_node_1
|
||||
|
||||
spec:
|
||||
# These are the container comments
|
||||
containers:
|
||||
# These are the first containers comments
|
||||
- name: nginx_container
|
||||
image: nginx
|
||||
securityContext:
|
||||
runAsRoot: false
|
||||
21
core/pkg/fixhandler/testdata/hybrids/tc-04-00-input-separated-keys.yaml
vendored
Normal file
21
core/pkg/fixhandler/testdata/hybrids/tc-04-00-input-separated-keys.yaml
vendored
Normal file
@@ -0,0 +1,21 @@
|
||||
# Fix to Apply:
|
||||
# REMOVE:
|
||||
# "del(select(di==0).spec.containers[0].securityContext)"
|
||||
|
||||
# INSERT:
|
||||
# select(di==0).spec.securityContext.runAsRoot: false
|
||||
|
||||
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: insert_to_mapping_node_1
|
||||
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx_container
|
||||
|
||||
image: nginx
|
||||
|
||||
securityContext:
|
||||
runAsRoot: true
|
||||
21
core/pkg/fixhandler/testdata/hybrids/tc-04-01-expected.yaml
vendored
Normal file
21
core/pkg/fixhandler/testdata/hybrids/tc-04-01-expected.yaml
vendored
Normal file
@@ -0,0 +1,21 @@
|
||||
# Fix to Apply:
|
||||
# REMOVE:
|
||||
# "del(select(di==0).spec.containers[0].securityContext)"
|
||||
|
||||
# INSERT:
|
||||
# select(di==0).spec.securityContext.runAsRoot: false
|
||||
|
||||
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: insert_to_mapping_node_1
|
||||
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx_container
|
||||
|
||||
image: nginx
|
||||
securityContext:
|
||||
runAsRoot: false
|
||||
|
||||
12
core/pkg/fixhandler/testdata/inserts/tc-01-00-input-mapping-insert-mapping.yaml
vendored
Normal file
12
core/pkg/fixhandler/testdata/inserts/tc-01-00-input-mapping-insert-mapping.yaml
vendored
Normal file
@@ -0,0 +1,12 @@
|
||||
# Fix to Apply:
|
||||
# "select(di==0).spec.containers[0].securityContext.allowPrivilegeEscalation |= false"
|
||||
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: insert_to_mapping_node_1
|
||||
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx_container
|
||||
image: nginx
|
||||
14
core/pkg/fixhandler/testdata/inserts/tc-01-01-expected.yaml
vendored
Normal file
14
core/pkg/fixhandler/testdata/inserts/tc-01-01-expected.yaml
vendored
Normal file
@@ -0,0 +1,14 @@
|
||||
# Fix to Apply:
|
||||
# "select(di==0).spec.containers[0].securityContext.allowPrivilegeEscalation |= false"
|
||||
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: insert_to_mapping_node_1
|
||||
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx_container
|
||||
image: nginx
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
11
core/pkg/fixhandler/testdata/inserts/tc-02-00-input-mapping-insert-mapping-with-list.yaml
vendored
Normal file
11
core/pkg/fixhandler/testdata/inserts/tc-02-00-input-mapping-insert-mapping-with-list.yaml
vendored
Normal file
@@ -0,0 +1,11 @@
|
||||
# Fix to Apply:
|
||||
# select(di==0).spec.containers[0].securityContext.capabilities.drop += ["NET_RAW"]
|
||||
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: insert_list
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx_container
|
||||
image: nginx
|
||||
15
core/pkg/fixhandler/testdata/inserts/tc-02-01-expected.yaml
vendored
Normal file
15
core/pkg/fixhandler/testdata/inserts/tc-02-01-expected.yaml
vendored
Normal file
@@ -0,0 +1,15 @@
|
||||
# Fix to Apply:
|
||||
# select(di==0).spec.containers[0].securityContext.capabilities.drop += ["NET_RAW"]
|
||||
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: insert_list
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx_container
|
||||
image: nginx
|
||||
securityContext:
|
||||
capabilities:
|
||||
drop:
|
||||
- NET_RAW
|
||||
15
core/pkg/fixhandler/testdata/inserts/tc-03-00-input-list-append-scalar.yaml
vendored
Normal file
15
core/pkg/fixhandler/testdata/inserts/tc-03-00-input-list-append-scalar.yaml
vendored
Normal file
@@ -0,0 +1,15 @@
|
||||
# Fix to Apply:
|
||||
# select(di==0).spec.containers[0].securityContext.capabilities.drop += ["SYS_ADM"]
|
||||
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: insert_list
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx_container
|
||||
image: nginx
|
||||
securityContext:
|
||||
capabilities:
|
||||
drop:
|
||||
- NET_RAW
|
||||
16
core/pkg/fixhandler/testdata/inserts/tc-03-01-expected.yaml
vendored
Normal file
16
core/pkg/fixhandler/testdata/inserts/tc-03-01-expected.yaml
vendored
Normal file
@@ -0,0 +1,16 @@
|
||||
# Fix to Apply:
|
||||
# select(di==0).spec.containers[0].securityContext.capabilities.drop += ["SYS_ADM"]
|
||||
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: insert_list
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx_container
|
||||
image: nginx
|
||||
securityContext:
|
||||
capabilities:
|
||||
drop:
|
||||
- NET_RAW
|
||||
- SYS_ADM
|
||||
47
core/pkg/fixhandler/testdata/inserts/tc-04-00-input-multiple-inserts.yaml
vendored
Normal file
47
core/pkg/fixhandler/testdata/inserts/tc-04-00-input-multiple-inserts.yaml
vendored
Normal file
@@ -0,0 +1,47 @@
|
||||
# Fixes to Apply:
|
||||
# 1) select(di==0).spec.template.spec.securityContext.allowPrivilegeEscalation = false
|
||||
# 2) select(di==0).spec.template.spec.containers[0].securityContext.capabilities.drop += ["NET_RAW"]
|
||||
# 3) select(di==0).spec.template.spec.containers[0].securityContext.seccompProfile.type = RuntimeDefault
|
||||
# 4) select(di==0).spec.template.spec.containers[0].securityContext.allowPrivilegeEscalation |= false
|
||||
# 5) select(di==0).spec.template.spec.containers[0].securityContext.readOnlyRootFilesystem |= true
|
||||
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: multiple_inserts
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: example_4
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: example_4
|
||||
spec:
|
||||
serviceAccountName: default
|
||||
terminationGracePeriodSeconds: 5
|
||||
containers:
|
||||
- name: example_4
|
||||
image: nginx
|
||||
ports:
|
||||
- containerPort: 3000
|
||||
env:
|
||||
- name: PORT
|
||||
value: "3000"
|
||||
resources:
|
||||
requests:
|
||||
cpu: 200m
|
||||
memory: 180Mi
|
||||
limits:
|
||||
cpu: 300m
|
||||
memory: 300Mi
|
||||
readinessProbe:
|
||||
initialDelaySeconds: 20
|
||||
periodSeconds: 15
|
||||
exec:
|
||||
command: ["/bin/grpc_health_probe", "-addr=:3000"]
|
||||
livenessProbe:
|
||||
initialDelaySeconds: 20
|
||||
periodSeconds: 15
|
||||
exec:
|
||||
command: ["/bin/grpc_health_probe", "-addr=:3000"]
|
||||
57
core/pkg/fixhandler/testdata/inserts/tc-04-01-expected.yaml
vendored
Normal file
57
core/pkg/fixhandler/testdata/inserts/tc-04-01-expected.yaml
vendored
Normal file
@@ -0,0 +1,57 @@
|
||||
# Fixes to Apply:
|
||||
# 1) select(di==0).spec.template.spec.securityContext.allowPrivilegeEscalation = false
|
||||
# 2) select(di==0).spec.template.spec.containers[0].securityContext.capabilities.drop += ["NET_RAW"]
|
||||
# 3) select(di==0).spec.template.spec.containers[0].securityContext.seccompProfile.type = RuntimeDefault
|
||||
# 4) select(di==0).spec.template.spec.containers[0].securityContext.allowPrivilegeEscalation |= false
|
||||
# 5) select(di==0).spec.template.spec.containers[0].securityContext.readOnlyRootFilesystem |= true
|
||||
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: multiple_inserts
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: example_4
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: example_4
|
||||
spec:
|
||||
serviceAccountName: default
|
||||
terminationGracePeriodSeconds: 5
|
||||
containers:
|
||||
- name: example_4
|
||||
image: nginx
|
||||
ports:
|
||||
- containerPort: 3000
|
||||
env:
|
||||
- name: PORT
|
||||
value: "3000"
|
||||
resources:
|
||||
requests:
|
||||
cpu: 200m
|
||||
memory: 180Mi
|
||||
limits:
|
||||
cpu: 300m
|
||||
memory: 300Mi
|
||||
readinessProbe:
|
||||
initialDelaySeconds: 20
|
||||
periodSeconds: 15
|
||||
exec:
|
||||
command: ["/bin/grpc_health_probe", "-addr=:3000"]
|
||||
livenessProbe:
|
||||
initialDelaySeconds: 20
|
||||
periodSeconds: 15
|
||||
exec:
|
||||
command: ["/bin/grpc_health_probe", "-addr=:3000"]
|
||||
securityContext:
|
||||
capabilities:
|
||||
drop:
|
||||
- NET_RAW
|
||||
seccompProfile:
|
||||
type: RuntimeDefault
|
||||
allowPrivilegeEscalation: false
|
||||
readOnlyRootFilesystem: true
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
16
core/pkg/fixhandler/testdata/inserts/tc-05-00-input-comment-blank-line-single-insert.yaml
vendored
Normal file
16
core/pkg/fixhandler/testdata/inserts/tc-05-00-input-comment-blank-line-single-insert.yaml
vendored
Normal file
@@ -0,0 +1,16 @@
|
||||
# Fix to Apply:
|
||||
# "select(di==0).spec.containers[0].securityContext.allowPrivilegeEscalation |= false"
|
||||
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: insert_to_mapping_node_1
|
||||
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx_container
|
||||
image: nginx
|
||||
|
||||
# Testing if comments are retained as intended
|
||||
securityContext:
|
||||
runAsRoot: false
|
||||
18
core/pkg/fixhandler/testdata/inserts/tc-05-01-expected.yaml
vendored
Normal file
18
core/pkg/fixhandler/testdata/inserts/tc-05-01-expected.yaml
vendored
Normal file
@@ -0,0 +1,18 @@
|
||||
# Fix to Apply:
|
||||
# "select(di==0).spec.containers[0].securityContext.allowPrivilegeEscalation |= false"
|
||||
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: insert_to_mapping_node_1
|
||||
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx_container
|
||||
image: nginx
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
|
||||
# Testing if comments are retained as intended
|
||||
securityContext:
|
||||
runAsRoot: false
|
||||
14
core/pkg/fixhandler/testdata/inserts/tc-06-00-input-list-append-scalar-oneline.yaml
vendored
Normal file
14
core/pkg/fixhandler/testdata/inserts/tc-06-00-input-list-append-scalar-oneline.yaml
vendored
Normal file
@@ -0,0 +1,14 @@
|
||||
# Fix to Apply:
|
||||
# select(di==0).spec.containers[0].securityContext.capabilities.drop += ["SYS_ADM"]
|
||||
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: insert_list
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx1
|
||||
image: nginx
|
||||
securityContext:
|
||||
capabilities:
|
||||
drop: [NET_RAW]
|
||||
14
core/pkg/fixhandler/testdata/inserts/tc-06-01-expected.yaml
vendored
Normal file
14
core/pkg/fixhandler/testdata/inserts/tc-06-01-expected.yaml
vendored
Normal file
@@ -0,0 +1,14 @@
|
||||
# Fix to Apply:
|
||||
# select(di==0).spec.containers[0].securityContext.capabilities.drop += ["SYS_ADM"]
|
||||
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: insert_list
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx1
|
||||
image: nginx
|
||||
securityContext:
|
||||
capabilities:
|
||||
drop: [NET_RAW, SYS_ADM]
|
||||
27
core/pkg/fixhandler/testdata/inserts/tc-07-00-input-multiple-documents.yaml
vendored
Normal file
27
core/pkg/fixhandler/testdata/inserts/tc-07-00-input-multiple-documents.yaml
vendored
Normal file
@@ -0,0 +1,27 @@
|
||||
# Fix to Apply:
|
||||
# "select(di==0).spec.containers[0].securityContext.allowPrivilegeEscalation |= false"
|
||||
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: insert_to_mapping_node_1
|
||||
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx_container
|
||||
image: nginx
|
||||
|
||||
---
|
||||
|
||||
# Fix to Apply:
|
||||
# "select(di==1).spec.containers[0].securityContext.allowPrivilegeEscalation |= false"
|
||||
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: insert_to_mapping_node_1
|
||||
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx_container
|
||||
image: nginx
|
||||
31
core/pkg/fixhandler/testdata/inserts/tc-07-01-expected.yaml
vendored
Normal file
31
core/pkg/fixhandler/testdata/inserts/tc-07-01-expected.yaml
vendored
Normal file
@@ -0,0 +1,31 @@
|
||||
# Fix to Apply:
|
||||
# "select(di==0).spec.containers[0].securityContext.allowPrivilegeEscalation |= false"
|
||||
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: insert_to_mapping_node_1
|
||||
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx_container
|
||||
image: nginx
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
|
||||
---
|
||||
|
||||
# Fix to Apply:
|
||||
# "select(di==1).spec.containers[0].securityContext.allowPrivilegeEscalation |= false"
|
||||
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: insert_to_mapping_node_1
|
||||
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx_container
|
||||
image: nginx
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
11
core/pkg/fixhandler/testdata/inserts/tc-08-00-input-mapping-insert-mapping-indented.yaml
vendored
Normal file
11
core/pkg/fixhandler/testdata/inserts/tc-08-00-input-mapping-insert-mapping-indented.yaml
vendored
Normal file
@@ -0,0 +1,11 @@
|
||||
# Fix to Apply:
|
||||
# select(di==0).spec.containers[0].securityContext.capabilities.drop += ["NET_RAW"]
|
||||
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: indented-parent-list-insert-list-value
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx_container
|
||||
image: nginx
|
||||
15
core/pkg/fixhandler/testdata/inserts/tc-08-01-expected.yaml
vendored
Normal file
15
core/pkg/fixhandler/testdata/inserts/tc-08-01-expected.yaml
vendored
Normal file
@@ -0,0 +1,15 @@
|
||||
# Fix to Apply:
|
||||
# select(di==0).spec.containers[0].securityContext.capabilities.drop += ["NET_RAW"]
|
||||
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: indented-parent-list-insert-list-value
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx_container
|
||||
image: nginx
|
||||
securityContext:
|
||||
capabilities:
|
||||
drop:
|
||||
- NET_RAW
|
||||
11
core/pkg/fixhandler/testdata/inserts/tc-09-00-input-list-insert-new-mapping-indented.yaml
vendored
Normal file
11
core/pkg/fixhandler/testdata/inserts/tc-09-00-input-list-insert-new-mapping-indented.yaml
vendored
Normal file
@@ -0,0 +1,11 @@
|
||||
# Fix to Apply:
|
||||
# select(di==0).spec.containers += {"name": "redis", "image": "redis"}
|
||||
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: indented-parent-list-insert-list-value
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx_container
|
||||
image: nginx
|
||||
13
core/pkg/fixhandler/testdata/inserts/tc-09-01-expected.yaml
vendored
Normal file
13
core/pkg/fixhandler/testdata/inserts/tc-09-01-expected.yaml
vendored
Normal file
@@ -0,0 +1,13 @@
|
||||
# Fix to Apply:
|
||||
# select(di==0).spec.containers += {"name": "redis", "image": "redis"}
|
||||
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: indented-parent-list-insert-list-value
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx_container
|
||||
image: nginx
|
||||
- name: redis
|
||||
image: redis
|
||||
11
core/pkg/fixhandler/testdata/inserts/tc-10-00-input-list-insert-new-mapping.yaml
vendored
Normal file
11
core/pkg/fixhandler/testdata/inserts/tc-10-00-input-list-insert-new-mapping.yaml
vendored
Normal file
@@ -0,0 +1,11 @@
|
||||
# Fix to Apply:
|
||||
# select(di==0).spec.containers += {"name": "redis", "image": "redis"}
|
||||
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: indented-list-insert-new-object
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx_container
|
||||
image: nginx
|
||||
13
core/pkg/fixhandler/testdata/inserts/tc-10-01-expected.yaml
vendored
Normal file
13
core/pkg/fixhandler/testdata/inserts/tc-10-01-expected.yaml
vendored
Normal file
@@ -0,0 +1,13 @@
|
||||
# Fix to Apply:
|
||||
# select(di==0).spec.containers += {"name": "redis", "image": "redis"}
|
||||
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: indented-list-insert-new-object
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx_container
|
||||
image: nginx
|
||||
- name: redis
|
||||
image: redis
|
||||
14
core/pkg/fixhandler/testdata/removals/tc-01-00-input.yaml
vendored
Normal file
14
core/pkg/fixhandler/testdata/removals/tc-01-00-input.yaml
vendored
Normal file
@@ -0,0 +1,14 @@
|
||||
# Fix to Apply:
|
||||
# del(select(di==0).spec.containers[0].securityContext)
|
||||
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: remove_example
|
||||
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx_container
|
||||
image: nginx
|
||||
securityContext:
|
||||
runAsRoot: false
|
||||
12
core/pkg/fixhandler/testdata/removals/tc-01-01-expected.yaml
vendored
Normal file
12
core/pkg/fixhandler/testdata/removals/tc-01-01-expected.yaml
vendored
Normal file
@@ -0,0 +1,12 @@
|
||||
# Fix to Apply:
|
||||
# del(select(di==0).spec.containers[0].securityContext)
|
||||
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: remove_example
|
||||
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx_container
|
||||
image: nginx
|
||||
15
core/pkg/fixhandler/testdata/removals/tc-02-00-input.yaml
vendored
Normal file
15
core/pkg/fixhandler/testdata/removals/tc-02-00-input.yaml
vendored
Normal file
@@ -0,0 +1,15 @@
|
||||
# Fix to Apply:
|
||||
# del(select(di==0).spec.containers[1])
|
||||
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: remove_example
|
||||
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx_container
|
||||
image: nginx
|
||||
|
||||
- name: container_with_security_issues
|
||||
image: image_with_security_issues
|
||||
12
core/pkg/fixhandler/testdata/removals/tc-02-01-expected.yaml
vendored
Normal file
12
core/pkg/fixhandler/testdata/removals/tc-02-01-expected.yaml
vendored
Normal file
@@ -0,0 +1,12 @@
|
||||
# Fix to Apply:
|
||||
# del(select(di==0).spec.containers[1])
|
||||
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: remove_example
|
||||
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx_container
|
||||
image: nginx
|
||||
14
core/pkg/fixhandler/testdata/removals/tc-03-00-input.yaml
vendored
Normal file
14
core/pkg/fixhandler/testdata/removals/tc-03-00-input.yaml
vendored
Normal file
@@ -0,0 +1,14 @@
|
||||
# Fix to Apply:
|
||||
# del(select(di==0).spec.containers[0].securityContext.capabilities.drop[1])
|
||||
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: insert_list
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx1
|
||||
image: nginx
|
||||
securityContext:
|
||||
capabilities:
|
||||
drop: ["NET_RAW", "SYS_ADM"]
|
||||
14
core/pkg/fixhandler/testdata/removals/tc-03-01-expected.yaml
vendored
Normal file
14
core/pkg/fixhandler/testdata/removals/tc-03-01-expected.yaml
vendored
Normal file
@@ -0,0 +1,14 @@
|
||||
# Fix to Apply:
|
||||
# del(select(di==0).spec.containers[0].securityContext.capabilities.drop[1])
|
||||
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: insert_list
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx1
|
||||
image: nginx
|
||||
securityContext:
|
||||
capabilities:
|
||||
drop: ["NET_RAW"]
|
||||
32
core/pkg/fixhandler/testdata/removals/tc-04-00-input.yaml
vendored
Normal file
32
core/pkg/fixhandler/testdata/removals/tc-04-00-input.yaml
vendored
Normal file
@@ -0,0 +1,32 @@
|
||||
# Fix to Apply:
|
||||
# del(select(di==0).spec.containers[0].securityContext)
|
||||
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: remove_example
|
||||
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx_container
|
||||
image: nginx
|
||||
securityContext:
|
||||
runAsRoot: false
|
||||
|
||||
---
|
||||
|
||||
# Fix to Apply:
|
||||
# del(select(di==0).spec.containers[1])
|
||||
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: remove_example
|
||||
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx_container
|
||||
image: nginx
|
||||
|
||||
- name: container_with_security_issues
|
||||
image: image_with_security_issues
|
||||
27
core/pkg/fixhandler/testdata/removals/tc-04-01-expected.yaml
vendored
Normal file
27
core/pkg/fixhandler/testdata/removals/tc-04-01-expected.yaml
vendored
Normal file
@@ -0,0 +1,27 @@
|
||||
# Fix to Apply:
|
||||
# del(select(di==0).spec.containers[0].securityContext)
|
||||
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: remove_example
|
||||
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx_container
|
||||
image: nginx
|
||||
|
||||
---
|
||||
|
||||
# Fix to Apply:
|
||||
# del(select(di==0).spec.containers[1])
|
||||
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: remove_example
|
||||
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx_container
|
||||
image: nginx
|
||||
14
core/pkg/fixhandler/testdata/replaces/tc-01-00-input.yaml
vendored
Normal file
14
core/pkg/fixhandler/testdata/replaces/tc-01-00-input.yaml
vendored
Normal file
@@ -0,0 +1,14 @@
|
||||
# Fix to Apply:
|
||||
# "select(di==0).spec.containers[0].securityContext.runAsRoot |= false"
|
||||
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: insert_to_mapping_node_1
|
||||
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx_container
|
||||
image: nginx
|
||||
securityContext:
|
||||
runAsRoot: true
|
||||
14
core/pkg/fixhandler/testdata/replaces/tc-01-01-expected.yaml
vendored
Normal file
14
core/pkg/fixhandler/testdata/replaces/tc-01-01-expected.yaml
vendored
Normal file
@@ -0,0 +1,14 @@
|
||||
# Fix to Apply:
|
||||
# "select(di==0).spec.containers[0].securityContext.runAsRoot |= false"
|
||||
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: insert_to_mapping_node_1
|
||||
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx_container
|
||||
image: nginx
|
||||
securityContext:
|
||||
runAsRoot: false
|
||||
18
core/pkg/fixhandler/testdata/replaces/tc-02-00-input.yaml
vendored
Normal file
18
core/pkg/fixhandler/testdata/replaces/tc-02-00-input.yaml
vendored
Normal file
@@ -0,0 +1,18 @@
|
||||
# Fix to Apply:
|
||||
# select(di==0).spec.containers[0].securityContext.capabilities.drop[0] |= "SYS_ADM"
|
||||
# select(di==0).spec.containers[0].securityContext.capabilities.add[0] |= "NET_RAW"
|
||||
|
||||
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: insert_list
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx1
|
||||
image: nginx
|
||||
securityContext:
|
||||
capabilities:
|
||||
drop:
|
||||
- "NET_RAW"
|
||||
add: ["SYS_ADM"]
|
||||
18
core/pkg/fixhandler/testdata/replaces/tc-02-01-expected.yaml
vendored
Normal file
18
core/pkg/fixhandler/testdata/replaces/tc-02-01-expected.yaml
vendored
Normal file
@@ -0,0 +1,18 @@
|
||||
# Fix to Apply:
|
||||
# select(di==0).spec.containers[0].securityContext.capabilities.drop[0] |= "SYS_ADM"
|
||||
# select(di==0).spec.containers[0].securityContext.capabilities.add[0] |= "NET_RAW"
|
||||
|
||||
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: insert_list
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx1
|
||||
image: nginx
|
||||
securityContext:
|
||||
capabilities:
|
||||
drop:
|
||||
- "SYS_ADM"
|
||||
add: ["NET_RAW"]
|
||||
286
core/pkg/fixhandler/yamlhandler.go
Normal file
286
core/pkg/fixhandler/yamlhandler.go
Normal file
@@ -0,0 +1,286 @@
|
||||
package fixhandler
|
||||
|
||||
import (
|
||||
"container/list"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"strings"
|
||||
|
||||
"github.com/mikefarah/yq/v4/pkg/yqlib"
|
||||
|
||||
"gopkg.in/yaml.v3"
|
||||
)
|
||||
|
||||
// decodeDocumentRoots decodes all YAML documents stored in a given `filepath` and returns a slice of their root nodes
|
||||
func decodeDocumentRoots(yamlAsString string) ([]yaml.Node, error) {
|
||||
fileReader := strings.NewReader(yamlAsString)
|
||||
dec := yaml.NewDecoder(fileReader)
|
||||
|
||||
nodes := make([]yaml.Node, 0)
|
||||
for {
|
||||
var node yaml.Node
|
||||
err := dec.Decode(&node)
|
||||
|
||||
nodes = append(nodes, node)
|
||||
|
||||
if errors.Is(err, io.EOF) {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Cannot Decode File as YAML")
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
return nodes, nil
|
||||
}
|
||||
|
||||
func getFixedNodes(yamlAsString, yamlExpression string) ([]yaml.Node, error) {
|
||||
preferences := yqlib.ConfiguredYamlPreferences
|
||||
preferences.EvaluateTogether = true
|
||||
decoder := yqlib.NewYamlDecoder(preferences)
|
||||
|
||||
var allDocuments = list.New()
|
||||
reader := strings.NewReader(yamlAsString)
|
||||
|
||||
fileDocuments, err := readDocuments(reader, decoder)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
allDocuments.PushBackList(fileDocuments)
|
||||
|
||||
allAtOnceEvaluator := yqlib.NewAllAtOnceEvaluator()
|
||||
|
||||
fixedCandidateNodes, err := allAtOnceEvaluator.EvaluateCandidateNodes(yamlExpression, allDocuments)
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Error fixing YAML, %w", err)
|
||||
}
|
||||
|
||||
fixedNodes := make([]yaml.Node, 0)
|
||||
var fixedNode *yaml.Node
|
||||
for fixedCandidateNode := fixedCandidateNodes.Front(); fixedCandidateNode != nil; fixedCandidateNode = fixedCandidateNode.Next() {
|
||||
fixedNode = fixedCandidateNode.Value.(*yqlib.CandidateNode).Node
|
||||
fixedNodes = append(fixedNodes, *fixedNode)
|
||||
}
|
||||
|
||||
return fixedNodes, nil
|
||||
}
|
||||
|
||||
func flattenWithDFS(node *yaml.Node) *[]nodeInfo {
|
||||
dfsOrder := make([]nodeInfo, 0)
|
||||
flattenWithDFSHelper(node, nil, &dfsOrder, 0)
|
||||
return &dfsOrder
|
||||
}
|
||||
|
||||
func flattenWithDFSHelper(node *yaml.Node, parent *yaml.Node, dfsOrder *[]nodeInfo, index int) {
|
||||
dfsNode := nodeInfo{
|
||||
node: node,
|
||||
parent: parent,
|
||||
index: index,
|
||||
}
|
||||
*dfsOrder = append(*dfsOrder, dfsNode)
|
||||
|
||||
for idx, child := range node.Content {
|
||||
flattenWithDFSHelper(child, node, dfsOrder, idx)
|
||||
}
|
||||
}
|
||||
|
||||
func getFixInfo(originalRootNodes, fixedRootNodes []yaml.Node) fileFixInfo {
|
||||
contentToAdd := make([]contentToAdd, 0)
|
||||
linesToRemove := make([]linesToRemove, 0)
|
||||
|
||||
for idx := 0; idx < len(fixedRootNodes); idx++ {
|
||||
originalList := flattenWithDFS(&originalRootNodes[idx])
|
||||
fixedList := flattenWithDFS(&fixedRootNodes[idx])
|
||||
nodeContentToAdd, nodeLinesToRemove := getFixInfoHelper(*originalList, *fixedList)
|
||||
contentToAdd = append(contentToAdd, nodeContentToAdd...)
|
||||
linesToRemove = append(linesToRemove, nodeLinesToRemove...)
|
||||
}
|
||||
|
||||
return fileFixInfo{
|
||||
contentsToAdd: &contentToAdd,
|
||||
linesToRemove: &linesToRemove,
|
||||
}
|
||||
}
|
||||
|
||||
func getFixInfoHelper(originalList, fixedList []nodeInfo) ([]contentToAdd, []linesToRemove) {
|
||||
|
||||
// While obtaining fixedYamlNode, comments and empty lines at the top are ignored.
|
||||
// This causes a difference in Line numbers across the tree structure. In order to
|
||||
// counter this, line numbers are adjusted in fixed list.
|
||||
adjustFixedListLines(&originalList, &fixedList)
|
||||
|
||||
contentToAdd := make([]contentToAdd, 0)
|
||||
linesToRemove := make([]linesToRemove, 0)
|
||||
|
||||
originalListTracker, fixedListTracker := 0, 0
|
||||
|
||||
fixInfoMetadata := &fixInfoMetadata{
|
||||
originalList: &originalList,
|
||||
fixedList: &fixedList,
|
||||
originalListTracker: originalListTracker,
|
||||
fixedListTracker: fixedListTracker,
|
||||
contentToAdd: &contentToAdd,
|
||||
linesToRemove: &linesToRemove,
|
||||
}
|
||||
|
||||
for originalListTracker < len(originalList) && fixedListTracker < len(fixedList) {
|
||||
matchNodeResult := matchNodes(originalList[originalListTracker].node, fixedList[fixedListTracker].node)
|
||||
|
||||
fixInfoMetadata.originalListTracker = originalListTracker
|
||||
fixInfoMetadata.fixedListTracker = fixedListTracker
|
||||
|
||||
switch matchNodeResult {
|
||||
case sameNodes:
|
||||
originalListTracker += 1
|
||||
fixedListTracker += 1
|
||||
|
||||
case removedNode:
|
||||
originalListTracker, fixedListTracker = addLinesToRemove(fixInfoMetadata)
|
||||
|
||||
case insertedNode:
|
||||
originalListTracker, fixedListTracker = addLinesToInsert(fixInfoMetadata)
|
||||
|
||||
case replacedNode:
|
||||
originalListTracker, fixedListTracker = updateLinesToReplace(fixInfoMetadata)
|
||||
}
|
||||
}
|
||||
|
||||
// Some nodes are still not visited if they are removed at the end of the list
|
||||
for originalListTracker < len(originalList) {
|
||||
fixInfoMetadata.originalListTracker = originalListTracker
|
||||
originalListTracker, _ = addLinesToRemove(fixInfoMetadata)
|
||||
}
|
||||
|
||||
// Some nodes are still not visited if they are inserted at the end of the list
|
||||
for fixedListTracker < len(fixedList) {
|
||||
// Use negative index of last node in original list as a placeholder to determine the last line number later
|
||||
fixInfoMetadata.originalListTracker = -(len(originalList) - 1)
|
||||
fixInfoMetadata.fixedListTracker = fixedListTracker
|
||||
_, fixedListTracker = addLinesToInsert(fixInfoMetadata)
|
||||
}
|
||||
|
||||
return contentToAdd, linesToRemove
|
||||
|
||||
}
|
||||
|
||||
// Adds the lines to remove and returns the updated originalListTracker
|
||||
func addLinesToRemove(fixInfoMetadata *fixInfoMetadata) (int, int) {
|
||||
isOneLine, line := isOneLineSequenceNode(fixInfoMetadata.originalList, fixInfoMetadata.originalListTracker)
|
||||
|
||||
if isOneLine {
|
||||
// Remove the entire line and replace it with the sequence node in fixed info. This way,
|
||||
// the original formatting is not lost.
|
||||
return replaceSingleLineSequence(fixInfoMetadata, line)
|
||||
}
|
||||
|
||||
currentDFSNode := (*fixInfoMetadata.originalList)[fixInfoMetadata.originalListTracker]
|
||||
|
||||
newOriginalListTracker := updateTracker(fixInfoMetadata.originalList, fixInfoMetadata.originalListTracker)
|
||||
*fixInfoMetadata.linesToRemove = append(*fixInfoMetadata.linesToRemove, linesToRemove{
|
||||
startLine: currentDFSNode.node.Line,
|
||||
endLine: getNodeLine(fixInfoMetadata.originalList, newOriginalListTracker),
|
||||
})
|
||||
|
||||
return newOriginalListTracker, fixInfoMetadata.fixedListTracker
|
||||
}
|
||||
|
||||
// Adds the lines to insert and returns the updated fixedListTracker
|
||||
func addLinesToInsert(fixInfoMetadata *fixInfoMetadata) (int, int) {
|
||||
|
||||
isOneLine, line := isOneLineSequenceNode(fixInfoMetadata.fixedList, fixInfoMetadata.fixedListTracker)
|
||||
|
||||
if isOneLine {
|
||||
return replaceSingleLineSequence(fixInfoMetadata, line)
|
||||
}
|
||||
|
||||
currentDFSNode := (*fixInfoMetadata.fixedList)[fixInfoMetadata.fixedListTracker]
|
||||
|
||||
lineToInsert := getLineToInsert(fixInfoMetadata)
|
||||
contentToInsert := getContent(currentDFSNode.parent, fixInfoMetadata.fixedList, fixInfoMetadata.fixedListTracker)
|
||||
|
||||
newFixedTracker := updateTracker(fixInfoMetadata.fixedList, fixInfoMetadata.fixedListTracker)
|
||||
|
||||
*fixInfoMetadata.contentToAdd = append(*fixInfoMetadata.contentToAdd, contentToAdd{
|
||||
line: lineToInsert,
|
||||
content: contentToInsert,
|
||||
})
|
||||
|
||||
return fixInfoMetadata.originalListTracker, newFixedTracker
|
||||
}
|
||||
|
||||
// Adds the lines to remove and insert and updates the fixedListTracker and originalListTracker
|
||||
func updateLinesToReplace(fixInfoMetadata *fixInfoMetadata) (int, int) {
|
||||
|
||||
isOneLine, line := isOneLineSequenceNode(fixInfoMetadata.fixedList, fixInfoMetadata.fixedListTracker)
|
||||
|
||||
if isOneLine {
|
||||
return replaceSingleLineSequence(fixInfoMetadata, line)
|
||||
}
|
||||
|
||||
currentDFSNode := (*fixInfoMetadata.fixedList)[fixInfoMetadata.fixedListTracker]
|
||||
|
||||
// If only the value node is changed, entire "key-value" pair is replaced
|
||||
if isValueNodeinMapping(¤tDFSNode) {
|
||||
fixInfoMetadata.originalListTracker -= 1
|
||||
fixInfoMetadata.fixedListTracker -= 1
|
||||
}
|
||||
|
||||
addLinesToRemove(fixInfoMetadata)
|
||||
updatedOriginalTracker, updatedFixedTracker := addLinesToInsert(fixInfoMetadata)
|
||||
|
||||
return updatedOriginalTracker, updatedFixedTracker
|
||||
}
|
||||
|
||||
func removeNewLinesAtTheEnd(yamlLines []string) []string {
|
||||
for idx := 1; idx < len(yamlLines); idx++ {
|
||||
if yamlLines[len(yamlLines)-idx] != "\n" {
|
||||
yamlLines = yamlLines[:len(yamlLines)-idx+1]
|
||||
break
|
||||
}
|
||||
}
|
||||
return yamlLines
|
||||
}
|
||||
|
||||
func getFixedYamlLines(yamlLines []string, fileFixInfo fileFixInfo) (fixedYamlLines []string) {
|
||||
|
||||
// Determining last line requires original yaml lines slice. The placeholder for last line is replaced with the real last line
|
||||
assignLastLine(fileFixInfo.contentsToAdd, fileFixInfo.linesToRemove, &yamlLines)
|
||||
|
||||
removeLines(fileFixInfo.linesToRemove, &yamlLines)
|
||||
|
||||
fixedYamlLines = make([]string, 0)
|
||||
lineIdx, lineToAddIdx := 1, 0
|
||||
|
||||
// Ideally, new node is inserted at line before the next node in DFS order. But, when the previous line contains a
|
||||
// comment or empty line, we need to insert new nodes before them.
|
||||
adjustContentLines(fileFixInfo.contentsToAdd, &yamlLines)
|
||||
|
||||
for lineToAddIdx < len(*fileFixInfo.contentsToAdd) {
|
||||
for lineIdx <= (*fileFixInfo.contentsToAdd)[lineToAddIdx].line {
|
||||
// Check if the current line is not removed
|
||||
if yamlLines[lineIdx-1] != "*" {
|
||||
fixedYamlLines = append(fixedYamlLines, yamlLines[lineIdx-1])
|
||||
}
|
||||
lineIdx += 1
|
||||
}
|
||||
|
||||
content := (*fileFixInfo.contentsToAdd)[lineToAddIdx].content
|
||||
fixedYamlLines = append(fixedYamlLines, content)
|
||||
|
||||
lineToAddIdx += 1
|
||||
}
|
||||
|
||||
for lineIdx <= len(yamlLines) {
|
||||
if yamlLines[lineIdx-1] != "*" {
|
||||
fixedYamlLines = append(fixedYamlLines, yamlLines[lineIdx-1])
|
||||
}
|
||||
lineIdx += 1
|
||||
}
|
||||
|
||||
fixedYamlLines = removeNewLinesAtTheEnd(fixedYamlLines)
|
||||
|
||||
return fixedYamlLines
|
||||
}
|
||||
406
core/pkg/fixhandler/yamlhelper.go
Normal file
406
core/pkg/fixhandler/yamlhelper.go
Normal file
@@ -0,0 +1,406 @@
|
||||
package fixhandler
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"container/list"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
logger "github.com/kubescape/go-logger"
|
||||
"github.com/mikefarah/yq/v4/pkg/yqlib"
|
||||
"gopkg.in/yaml.v3"
|
||||
)
|
||||
|
||||
type NodeRelation int
|
||||
|
||||
const (
|
||||
sameNodes NodeRelation = iota
|
||||
insertedNode
|
||||
removedNode
|
||||
replacedNode
|
||||
)
|
||||
|
||||
func matchNodes(nodeOne, nodeTwo *yaml.Node) NodeRelation {
|
||||
|
||||
isNewNode := nodeTwo.Line == 0 && nodeTwo.Column == 0
|
||||
sameLines := nodeOne.Line == nodeTwo.Line
|
||||
sameColumns := nodeOne.Column == nodeTwo.Column
|
||||
|
||||
isSameNode := isSameNode(nodeOne, nodeTwo)
|
||||
|
||||
switch {
|
||||
case isSameNode:
|
||||
return sameNodes
|
||||
case isNewNode:
|
||||
return insertedNode
|
||||
case sameLines && sameColumns:
|
||||
return replacedNode
|
||||
default:
|
||||
return removedNode
|
||||
}
|
||||
}
|
||||
|
||||
func adjustContentLines(contentToAdd *[]contentToAdd, linesSlice *[]string) {
|
||||
for contentIdx, content := range *contentToAdd {
|
||||
line := content.line
|
||||
|
||||
// Adjust line numbers such that there are no "empty lines or comment lines of next nodes" before them
|
||||
for idx := line - 1; idx >= 0; idx-- {
|
||||
if isEmptyLineOrComment((*linesSlice)[idx]) {
|
||||
(*contentToAdd)[contentIdx].line -= 1
|
||||
} else {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func adjustFixedListLines(originalList, fixedList *[]nodeInfo) {
|
||||
differenceAtTop := (*originalList)[0].node.Line - (*fixedList)[0].node.Line
|
||||
|
||||
if differenceAtTop <= 0 {
|
||||
return
|
||||
}
|
||||
|
||||
for _, node := range *fixedList {
|
||||
// line numbers should not be changed for new nodes.
|
||||
if node.node.Line != 0 {
|
||||
node.node.Line += differenceAtTop
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
|
||||
}
|
||||
|
||||
func enocodeIntoYaml(parentNode *yaml.Node, nodeList *[]nodeInfo, tracker int) (string, error) {
|
||||
content := make([]*yaml.Node, 0)
|
||||
currentNode := (*nodeList)[tracker].node
|
||||
content = append(content, currentNode)
|
||||
|
||||
// Add the value in "key-value" pair to construct if the parent is mapping node
|
||||
if parentNode.Kind == yaml.MappingNode {
|
||||
valueNode := (*nodeList)[tracker+1].node
|
||||
content = append(content, valueNode)
|
||||
}
|
||||
|
||||
// The parent is added at the top to encode into YAML
|
||||
parentForContent := yaml.Node{
|
||||
Kind: parentNode.Kind,
|
||||
Content: content,
|
||||
}
|
||||
|
||||
buf := new(bytes.Buffer)
|
||||
|
||||
encoder := yaml.NewEncoder(buf)
|
||||
encoder.SetIndent(2)
|
||||
|
||||
errorEncoding := encoder.Encode(parentForContent)
|
||||
if errorEncoding != nil {
|
||||
return "", fmt.Errorf("Error debugging node, %v", errorEncoding.Error())
|
||||
}
|
||||
errorClosingEncoder := encoder.Close()
|
||||
if errorClosingEncoder != nil {
|
||||
return "", fmt.Errorf("Error closing encoder: %v", errorClosingEncoder.Error())
|
||||
}
|
||||
return fmt.Sprintf(`%v`, buf.String()), nil
|
||||
}
|
||||
|
||||
func getContent(parentNode *yaml.Node, nodeList *[]nodeInfo, tracker int) string {
|
||||
content, err := enocodeIntoYaml(parentNode, nodeList, tracker)
|
||||
if err != nil {
|
||||
logger.L().Fatal("Cannot Encode into YAML")
|
||||
}
|
||||
|
||||
indentationSpaces := parentNode.Column - 1
|
||||
|
||||
content = indentContent(content, indentationSpaces)
|
||||
|
||||
return strings.TrimSuffix(content, "\n")
|
||||
}
|
||||
|
||||
func indentContent(content string, indentationSpaces int) string {
|
||||
indentedContent := ""
|
||||
indentSpaces := strings.Repeat(" ", indentationSpaces)
|
||||
|
||||
scanner := bufio.NewScanner(strings.NewReader(content))
|
||||
for scanner.Scan() {
|
||||
line := scanner.Text()
|
||||
indentedContent += (indentSpaces + line + "\n")
|
||||
}
|
||||
return indentedContent
|
||||
}
|
||||
|
||||
func getLineToInsert(fixInfoMetadata *fixInfoMetadata) int {
|
||||
var lineToInsert int
|
||||
// Check if lineToInsert is last line
|
||||
if fixInfoMetadata.originalListTracker < 0 {
|
||||
originalListTracker := int(math.Abs(float64(fixInfoMetadata.originalListTracker)))
|
||||
// Storing the negative value of line of last node as a placeholder to determine the last line later.
|
||||
lineToInsert = -(*fixInfoMetadata.originalList)[originalListTracker].node.Line
|
||||
} else {
|
||||
lineToInsert = (*fixInfoMetadata.originalList)[fixInfoMetadata.originalListTracker].node.Line - 1
|
||||
}
|
||||
return lineToInsert
|
||||
}
|
||||
|
||||
func assignLastLine(contentsToAdd *[]contentToAdd, linesToRemove *[]linesToRemove, linesSlice *[]string) {
|
||||
for idx, contentToAdd := range *contentsToAdd {
|
||||
if contentToAdd.line < 0 {
|
||||
currentLine := int(math.Abs(float64(contentToAdd.line)))
|
||||
(*contentsToAdd)[idx].line, _ = getLastLineOfResource(linesSlice, currentLine)
|
||||
}
|
||||
}
|
||||
|
||||
for idx, lineToRemove := range *linesToRemove {
|
||||
if lineToRemove.endLine < 0 {
|
||||
endLine, _ := getLastLineOfResource(linesSlice, lineToRemove.startLine)
|
||||
(*linesToRemove)[idx].endLine = endLine
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func getLastLineOfResource(linesSlice *[]string, currentLine int) (int, error) {
|
||||
// Get lastlines of all resources...
|
||||
lastLinesOfResources := make([]int, 0)
|
||||
for lineNumber, lineContent := range *linesSlice {
|
||||
if lineContent == "---" {
|
||||
for lastLine := lineNumber - 1; lastLine >= 0; lastLine-- {
|
||||
if !isEmptyLineOrComment((*linesSlice)[lastLine]) {
|
||||
lastLinesOfResources = append(lastLinesOfResources, lastLine+1)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
lastLine := len(*linesSlice)
|
||||
for lastLine >= 0 {
|
||||
if !isEmptyLineOrComment((*linesSlice)[lastLine-1]) {
|
||||
lastLinesOfResources = append(lastLinesOfResources, lastLine)
|
||||
break
|
||||
} else {
|
||||
lastLine--
|
||||
}
|
||||
}
|
||||
|
||||
// Get last line of the resource we need
|
||||
for _, endLine := range lastLinesOfResources {
|
||||
if currentLine <= endLine {
|
||||
return endLine, nil
|
||||
}
|
||||
}
|
||||
|
||||
return 0, fmt.Errorf("Provided line is greater than the length of YAML file")
|
||||
}
|
||||
|
||||
func getNodeLine(nodeList *[]nodeInfo, tracker int) int {
|
||||
if tracker < len(*nodeList) {
|
||||
return (*nodeList)[tracker].node.Line
|
||||
} else {
|
||||
return -1
|
||||
}
|
||||
}
|
||||
|
||||
// Checks if the node is value node in "key-value" pairs of mapping node
|
||||
func isValueNodeinMapping(node *nodeInfo) bool {
|
||||
if node.parent.Kind == yaml.MappingNode && node.index%2 != 0 {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Checks if the node is part of single line sequence node and returns the line
|
||||
func isOneLineSequenceNode(list *[]nodeInfo, currentTracker int) (bool, int) {
|
||||
parentNode := (*list)[currentTracker].parent
|
||||
if parentNode.Kind != yaml.SequenceNode {
|
||||
return false, -1
|
||||
}
|
||||
|
||||
var currentNode, prevNode nodeInfo
|
||||
currentTracker -= 1
|
||||
|
||||
for (*list)[currentTracker].node != parentNode {
|
||||
currentNode = (*list)[currentTracker]
|
||||
prevNode = (*list)[currentTracker-1]
|
||||
|
||||
if currentNode.node.Line != prevNode.node.Line {
|
||||
return false, -1
|
||||
}
|
||||
currentTracker -= 1
|
||||
}
|
||||
|
||||
parentNodeInfo := (*list)[currentTracker]
|
||||
|
||||
if parentNodeInfo.parent.Kind == yaml.MappingNode {
|
||||
keyNodeInfo := (*list)[currentTracker-1]
|
||||
if keyNodeInfo.node.Line == parentNode.Line {
|
||||
return true, parentNode.Line
|
||||
} else {
|
||||
return false, -1
|
||||
}
|
||||
} else {
|
||||
if parentNodeInfo.parent.Line == parentNode.Line {
|
||||
return true, parentNode.Line
|
||||
} else {
|
||||
return false, -1
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Checks if nodes are of same kind, value, line and column
|
||||
func isSameNode(nodeOne, nodeTwo *yaml.Node) bool {
|
||||
sameLines := nodeOne.Line == nodeTwo.Line
|
||||
sameColumns := nodeOne.Column == nodeTwo.Column
|
||||
sameKinds := nodeOne.Kind == nodeTwo.Kind
|
||||
sameValues := nodeOne.Value == nodeTwo.Value
|
||||
|
||||
return sameKinds && sameValues && sameLines && sameColumns
|
||||
}
|
||||
|
||||
// Checks if the line is empty or a comment
|
||||
func isEmptyLineOrComment(lineContent string) bool {
|
||||
lineContent = strings.TrimSpace(lineContent)
|
||||
if lineContent == "" {
|
||||
return true
|
||||
} else if lineContent[0:1] == "#" {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func readDocuments(reader io.Reader, decoder yqlib.Decoder) (*list.List, error) {
|
||||
err := decoder.Init(reader)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Error Initializing the decoder, %w", err)
|
||||
}
|
||||
inputList := list.New()
|
||||
|
||||
var currentIndex uint
|
||||
|
||||
for {
|
||||
candidateNode, errorReading := decoder.Decode()
|
||||
|
||||
if errors.Is(errorReading, io.EOF) {
|
||||
switch reader := reader.(type) {
|
||||
case *os.File:
|
||||
safelyCloseFile(reader)
|
||||
}
|
||||
return inputList, nil
|
||||
} else if errorReading != nil {
|
||||
return nil, fmt.Errorf("Error Decoding YAML file, %w", errorReading)
|
||||
}
|
||||
|
||||
candidateNode.Document = currentIndex
|
||||
candidateNode.EvaluateTogether = true
|
||||
|
||||
inputList.PushBack(candidateNode)
|
||||
|
||||
currentIndex = currentIndex + 1
|
||||
}
|
||||
}
|
||||
|
||||
func safelyCloseFile(file *os.File) {
|
||||
err := file.Close()
|
||||
if err != nil {
|
||||
logger.L().Error("Error Closing File")
|
||||
}
|
||||
}
|
||||
|
||||
// Remove the entire line and replace it with the sequence node in fixed info. This way,
|
||||
// the original formatting is lost.
|
||||
func replaceSingleLineSequence(fixInfoMetadata *fixInfoMetadata, line int) (int, int) {
|
||||
originalListTracker := getFirstNodeInLine(fixInfoMetadata.originalList, line)
|
||||
fixedListTracker := getFirstNodeInLine(fixInfoMetadata.fixedList, line)
|
||||
|
||||
currentDFSNode := (*fixInfoMetadata.fixedList)[fixedListTracker]
|
||||
contentToInsert := getContent(currentDFSNode.parent, fixInfoMetadata.fixedList, fixedListTracker)
|
||||
|
||||
// Remove the Single line
|
||||
*fixInfoMetadata.linesToRemove = append(*fixInfoMetadata.linesToRemove, linesToRemove{
|
||||
startLine: line,
|
||||
endLine: line,
|
||||
})
|
||||
|
||||
// Encode entire Sequence Node and Insert
|
||||
*fixInfoMetadata.contentToAdd = append(*fixInfoMetadata.contentToAdd, contentToAdd{
|
||||
line: line,
|
||||
content: contentToInsert,
|
||||
})
|
||||
|
||||
originalListTracker = updateTracker(fixInfoMetadata.originalList, originalListTracker)
|
||||
fixedListTracker = updateTracker(fixInfoMetadata.fixedList, fixedListTracker)
|
||||
|
||||
return originalListTracker, fixedListTracker
|
||||
}
|
||||
|
||||
// Returns the first node in the given line that is not mapping node
|
||||
func getFirstNodeInLine(list *[]nodeInfo, line int) int {
|
||||
tracker := 0
|
||||
|
||||
currentNode := (*list)[tracker].node
|
||||
for currentNode.Line != line || currentNode.Kind == yaml.MappingNode {
|
||||
tracker += 1
|
||||
currentNode = (*list)[tracker].node
|
||||
}
|
||||
|
||||
return tracker
|
||||
}
|
||||
|
||||
// To not mess with the line number while inserting, removed lines are not deleted but replaced with "*"
|
||||
func removeLines(linesToRemove *[]linesToRemove, linesSlice *[]string) {
|
||||
var startLine, endLine int
|
||||
for _, lineToRemove := range *linesToRemove {
|
||||
startLine = lineToRemove.startLine - 1
|
||||
endLine = lineToRemove.endLine - 1
|
||||
|
||||
for line := startLine; line <= endLine; line++ {
|
||||
lineContent := (*linesSlice)[line]
|
||||
// When determining the endLine, empty lines and comments which are not intended to be removed are included.
|
||||
// To deal with that, we need to refrain from removing empty lines and comments
|
||||
if isEmptyLineOrComment(lineContent) {
|
||||
break
|
||||
}
|
||||
(*linesSlice)[line] = "*"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Skips the current node including it's children in DFS order and returns the new tracker.
|
||||
func skipCurrentNode(node *yaml.Node, currentTracker int) int {
|
||||
updatedTracker := currentTracker + getChildrenCount(node)
|
||||
return updatedTracker
|
||||
}
|
||||
|
||||
func getChildrenCount(node *yaml.Node) int {
|
||||
totalChildren := 1
|
||||
for _, child := range node.Content {
|
||||
totalChildren += getChildrenCount(child)
|
||||
}
|
||||
return totalChildren
|
||||
}
|
||||
|
||||
// The current node along with it's children is skipped and the tracker is moved to next sibling
|
||||
// of current node. If parent is mapping node, "value" in "key-value" pairs is also skipped.
|
||||
func updateTracker(nodeList *[]nodeInfo, tracker int) int {
|
||||
currentNode := (*nodeList)[tracker]
|
||||
var updatedTracker int
|
||||
|
||||
if currentNode.parent.Kind == yaml.MappingNode {
|
||||
valueNode := (*nodeList)[tracker+1]
|
||||
updatedTracker = skipCurrentNode(valueNode.node, tracker+1)
|
||||
} else {
|
||||
updatedTracker = skipCurrentNode(currentNode.node, tracker)
|
||||
}
|
||||
|
||||
return updatedTracker
|
||||
}
|
||||
|
||||
func getStringFromSlice(yamlLines []string) (fixedYamlString string) {
|
||||
return strings.Join(yamlLines, "\n")
|
||||
}
|
||||
@@ -28,12 +28,15 @@ spec:
|
||||
tolerations:
|
||||
# this toleration is to have the DaemonDet runnable on master nodes
|
||||
# remove it if your masters can't run pods
|
||||
- key: node-role.kubernetes.io/control-plane
|
||||
operator: Exists
|
||||
effect: NoSchedule
|
||||
- key: node-role.kubernetes.io/master
|
||||
operator: Exists
|
||||
effect: NoSchedule
|
||||
containers:
|
||||
- name: host-sensor
|
||||
image: quay.io/kubescape/host-scanner:v1.0.28
|
||||
image: quay.io/kubescape/host-scanner:v1.0.32
|
||||
securityContext:
|
||||
privileged: true
|
||||
readOnlyRootFilesystem: true
|
||||
@@ -69,4 +72,4 @@ spec:
|
||||
name: host-filesystem
|
||||
hostNetwork: true
|
||||
hostPID: true
|
||||
hostIPC: true
|
||||
hostIPC: true
|
||||
|
||||
@@ -129,6 +129,12 @@ func (hsh *HostSensorHandler) GetKubeProxyInfo() ([]hostsensor.HostSensorDataEnv
|
||||
return hsh.sendAllPodsHTTPGETRequest("/kubeProxyInfo", "KubeProxyInfo")
|
||||
}
|
||||
|
||||
// return list of KubeProxyInfo
|
||||
func (hsh *HostSensorHandler) GetControlPlaneInfo() ([]hostsensor.HostSensorDataEnvelope, error) {
|
||||
// loop over pods and port-forward it to each of them
|
||||
return hsh.sendAllPodsHTTPGETRequest("/controlPlaneInfo", ControlPlaneInfo)
|
||||
}
|
||||
|
||||
// return list of KubeletCommandLine
|
||||
func (hsh *HostSensorHandler) GetKubeletCommandLine() ([]hostsensor.HostSensorDataEnvelope, error) {
|
||||
// loop over pods and port-forward it to each of them
|
||||
@@ -269,6 +275,16 @@ func (hsh *HostSensorHandler) CollectResources() ([]hostsensor.HostSensorDataEnv
|
||||
res = append(res, kcData...)
|
||||
}
|
||||
|
||||
// GetControlPlaneInfo
|
||||
kcData, err = hsh.GetControlPlaneInfo()
|
||||
if err != nil {
|
||||
addInfoToMap(ControlPlaneInfo, infoMap, err)
|
||||
logger.L().Warning(err.Error())
|
||||
}
|
||||
if len(kcData) > 0 {
|
||||
res = append(res, kcData...)
|
||||
}
|
||||
|
||||
logger.L().Debug("Done reading information from host scanner")
|
||||
return res, infoMap, nil
|
||||
}
|
||||
|
||||
@@ -15,6 +15,7 @@ var (
|
||||
KubeletCommandLine = "KubeletCommandLine"
|
||||
KubeletInfo = "KubeletInfo"
|
||||
KubeProxyInfo = "KubeProxyInfo"
|
||||
ControlPlaneInfo = "ControlPlaneInfo"
|
||||
|
||||
MapHostSensorResourceToApiGroup = map[string]string{
|
||||
KubeletConfiguration: "hostdata.kubescape.cloud/v1beta0",
|
||||
@@ -26,6 +27,7 @@ var (
|
||||
LinuxKernelVariables: "hostdata.kubescape.cloud/v1beta0",
|
||||
KubeletInfo: "hostdata.kubescape.cloud/v1beta0",
|
||||
KubeProxyInfo: "hostdata.kubescape.cloud/v1beta0",
|
||||
ControlPlaneInfo: "hostdata.kubescape.cloud/v1beta0",
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
@@ -41,6 +41,8 @@ func (policyHandler *PolicyHandler) getPolicies(policyIdentifier []cautils.Polic
|
||||
controlsInputs, err := policyHandler.getters.ControlsInputsGetter.GetControlsInputs(cautils.ClusterName)
|
||||
if err == nil {
|
||||
policiesAndResources.RegoInputData.PostureControlInputs = controlsInputs
|
||||
} else {
|
||||
logger.L().Error(err.Error())
|
||||
}
|
||||
cautils.StopSpinner()
|
||||
|
||||
|
||||
@@ -51,7 +51,7 @@ func (ksCivAdaptor *KSCivAdaptor) GetImageVulnerability(imageID *registryvulnera
|
||||
pageNumber := 1
|
||||
request := V2ListRequest{PageSize: &pageSize, PageNum: &pageNumber, InnerFilters: filter, OrderBy: "timestamp:desc"}
|
||||
requestBody, _ := json.Marshal(request)
|
||||
requestUrl := fmt.Sprintf("https://%s/api/v1/vulnerability/scanResultsDetails?customerGUID=%s", ksCivAdaptor.ksCloudAPI.GetApiURL(), ksCivAdaptor.ksCloudAPI.GetAccountID())
|
||||
requestUrl := fmt.Sprintf("https://%s/api/v1/vulnerability/scanResultsDetails?customerGUID=%s", ksCivAdaptor.ksCloudAPI.GetCloudAPIURL(), ksCivAdaptor.ksCloudAPI.GetAccountID())
|
||||
|
||||
resp, err := ksCivAdaptor.ksCloudAPI.Post(requestUrl, map[string]string{"Content-Type": "application/json"}, requestBody)
|
||||
if err != nil {
|
||||
|
||||
@@ -14,7 +14,7 @@ func (armoCivAdaptor *KSCivAdaptor) getImageLastScanId(imageID *registryvulnerab
|
||||
pageNumber := 1
|
||||
request := V2ListRequest{PageSize: &pageSize, PageNum: &pageNumber, InnerFilters: filter, OrderBy: "timestamp:desc"}
|
||||
requestBody, _ := json.Marshal(request)
|
||||
requestUrl := fmt.Sprintf("https://%s/api/v1/vulnerability/scanResultsSumSummary?customerGUID=%s", armoCivAdaptor.ksCloudAPI.GetApiURL(), armoCivAdaptor.ksCloudAPI.GetAccountID())
|
||||
requestUrl := fmt.Sprintf("https://%s/api/v1/vulnerability/scanResultsSumSummary?customerGUID=%s", armoCivAdaptor.ksCloudAPI.GetCloudAPIURL(), armoCivAdaptor.ksCloudAPI.GetAccountID())
|
||||
|
||||
resp, err := armoCivAdaptor.ksCloudAPI.Post(requestUrl, map[string]string{"Content-Type": "application/json"}, requestBody)
|
||||
if err != nil {
|
||||
|
||||
33
core/pkg/registryadaptors/gcp/v1/Readme.md
Normal file
33
core/pkg/registryadaptors/gcp/v1/Readme.md
Normal file
@@ -0,0 +1,33 @@
|
||||
# GCP Adaptor
|
||||
|
||||
### How we add gcp adaptor
|
||||
|
||||
As there can be possiblities of use of multiple registries we check for each adaptor if we have required credentias. For every adaptor having credentials we append the adaptor to the adaptors slice.
|
||||
|
||||
Particularly for gcp, we frstly bring the `gcpCloudAPI` from the connector. We still haven't created a proper function that initiats the gcpCloudAPI with projectId, credentialsPath, credentialsCheck fields. We check for `credentialsCheck` bool which is set true when we have credentials(to be set when initializing the gcpCloudAPI)
|
||||
|
||||
### How we fetch vulnerabilities for images
|
||||
|
||||
Step 1:
|
||||
Get container analysis client
|
||||
For this we needs credentials of the service account. Out of few approaches here we are using [JSON key file](https://cloud.google.com/container-registry/docs/advanced-authentication#json-key) for credentials and path to this file should be stored in `credentialsPath`
|
||||
|
||||
Step 2:
|
||||
Do ListOccurrenceRequest
|
||||
For this we need the `projectID` and the `resourceUrl`. ProjectID should be provided by the users and resourceUrl is processed imageTag that we get from kubescape resources
|
||||
|
||||
Step 3:
|
||||
Get Occurrence iterator
|
||||
We use context and the request from the ListOccurenceRequest to get the iterators
|
||||
|
||||
|
||||
### How we convert the response to Vulnerabilities
|
||||
|
||||
Response from the iterator has two type of kinds i.e. Discovery and Vulnerabilties and both has differnent struct
|
||||
|
||||
### How can this adaptor be used by the user
|
||||
|
||||
To know about GCR service accounts follow https://cloud.google.com/container-registry/docs/gcr-service-account
|
||||
export variables
|
||||
`export KS_GCP_CREDENTIALS_PATH=<path to service account credentials file>`
|
||||
`export KS_GCP_PROJECT_ID=<your project ID>`
|
||||
24
core/pkg/registryadaptors/gcp/v1/datastructure.go
Normal file
24
core/pkg/registryadaptors/gcp/v1/datastructure.go
Normal file
@@ -0,0 +1,24 @@
|
||||
package v1
|
||||
|
||||
import (
|
||||
"github.com/kubescape/kubescape/v2/core/cautils/getter"
|
||||
)
|
||||
|
||||
type GCPAdaptor struct {
|
||||
GCPCloudAPI *getter.GCPCloudAPI
|
||||
}
|
||||
|
||||
type Mock struct {
|
||||
Name string
|
||||
Notename string
|
||||
CvssScore float32
|
||||
CreatedTime int64
|
||||
UpdatedTime int64
|
||||
Type string
|
||||
ShortDescription string
|
||||
AffectedCPEURI string
|
||||
AffectedPackage string
|
||||
FixAvailable bool
|
||||
AffectedVersion string
|
||||
FixedVersion string
|
||||
}
|
||||
88
core/pkg/registryadaptors/gcp/v1/gcpadaptor.go
Normal file
88
core/pkg/registryadaptors/gcp/v1/gcpadaptor.go
Normal file
@@ -0,0 +1,88 @@
|
||||
package v1
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
containeranalysis "cloud.google.com/go/containeranalysis/apiv1"
|
||||
"github.com/kubescape/go-logger"
|
||||
"github.com/kubescape/go-logger/helpers"
|
||||
"github.com/kubescape/kubescape/v2/core/cautils/getter"
|
||||
"github.com/kubescape/kubescape/v2/core/pkg/registryadaptors/registryvulnerabilities"
|
||||
"google.golang.org/api/iterator"
|
||||
"google.golang.org/api/option"
|
||||
grafeaspb "google.golang.org/genproto/googleapis/grafeas/v1"
|
||||
)
|
||||
|
||||
func NewGCPAdaptor(GCPCloudAPI *getter.GCPCloudAPI) *GCPAdaptor {
|
||||
return &GCPAdaptor{
|
||||
GCPCloudAPI: GCPCloudAPI,
|
||||
}
|
||||
}
|
||||
|
||||
func (GCPAdaptor *GCPAdaptor) Login() error {
|
||||
client, err := containeranalysis.NewClient(GCPAdaptor.GCPCloudAPI.GetContext(), option.WithCredentialsFile(GCPAdaptor.GCPCloudAPI.GetCredentialsPath()))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
GCPAdaptor.GCPCloudAPI.SetClient(client)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (GCPAdaptor *GCPAdaptor) GetImagesVulnerabilities(imageIDs []registryvulnerabilities.ContainerImageIdentifier) ([]registryvulnerabilities.ContainerImageVulnerabilityReport, error) {
|
||||
resultList := make([]registryvulnerabilities.ContainerImageVulnerabilityReport, 0)
|
||||
for _, imageID := range imageIDs {
|
||||
result, err := GCPAdaptor.GetImageVulnerability(&imageID)
|
||||
if err == nil {
|
||||
resultList = append(resultList, *result)
|
||||
} else {
|
||||
logger.L().Debug("failed to get image vulnerabilities", helpers.String("image", imageID.Tag), helpers.Error(err))
|
||||
}
|
||||
}
|
||||
return resultList, nil
|
||||
}
|
||||
|
||||
func (GCPAdaptor *GCPAdaptor) GetImageVulnerability(imageID *registryvulnerabilities.ContainerImageIdentifier) (*registryvulnerabilities.ContainerImageVulnerabilityReport, error) {
|
||||
|
||||
resourceUrl := fmt.Sprintf("https://%s", imageID.Tag)
|
||||
|
||||
req := &grafeaspb.ListOccurrencesRequest{
|
||||
Parent: fmt.Sprintf("projects/%s", GCPAdaptor.GCPCloudAPI.GetProjectID()),
|
||||
Filter: fmt.Sprintf(`resourceUrl=%q`, resourceUrl),
|
||||
}
|
||||
|
||||
it := GCPAdaptor.GCPCloudAPI.GetClient().GetGrafeasClient().ListOccurrences(GCPAdaptor.GCPCloudAPI.GetContext(), req)
|
||||
occs := []*grafeaspb.Occurrence{}
|
||||
var count int
|
||||
for {
|
||||
occ, err := it.Next()
|
||||
if err == iterator.Done {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
occs = append(occs, occ)
|
||||
count++
|
||||
}
|
||||
vulnerabilities := responseObjectToVulnerabilities(occs, count)
|
||||
|
||||
resultImageVulnerabilityReport := registryvulnerabilities.ContainerImageVulnerabilityReport{
|
||||
ImageID: *imageID,
|
||||
Vulnerabilities: vulnerabilities,
|
||||
}
|
||||
return &resultImageVulnerabilityReport, nil
|
||||
}
|
||||
|
||||
func (GCPAdaptor *GCPAdaptor) DescribeAdaptor() string {
|
||||
return "GCP image vulnerabilities scanner, docs: https://cloud.google.com/container-analysis/docs/container-analysis"
|
||||
}
|
||||
|
||||
func (GCPAdaptor *GCPAdaptor) GetImagesInformation(imageIDs []registryvulnerabilities.ContainerImageIdentifier) ([]registryvulnerabilities.ContainerImageInformation, error) {
|
||||
// TODO
|
||||
return []registryvulnerabilities.ContainerImageInformation{}, nil
|
||||
}
|
||||
|
||||
func (GCPAdaptor *GCPAdaptor) GetImagesScanStatus(imageIDs []registryvulnerabilities.ContainerImageIdentifier) ([]registryvulnerabilities.ContainerImageScanStatus, error) {
|
||||
// TODO
|
||||
return []registryvulnerabilities.ContainerImageScanStatus{}, nil
|
||||
}
|
||||
31
core/pkg/registryadaptors/gcp/v1/gcpadaptor_test.go
Normal file
31
core/pkg/registryadaptors/gcp/v1/gcpadaptor_test.go
Normal file
@@ -0,0 +1,31 @@
|
||||
package v1
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/kubescape/kubescape/v2/core/pkg/registryadaptors/registryvulnerabilities"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestSum(t *testing.T) {
|
||||
var err error
|
||||
var adaptor registryvulnerabilities.IContainerImageVulnerabilityAdaptor
|
||||
|
||||
adaptor, err = NewGCPAdaptorMock()
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.NoError(t, adaptor.Login())
|
||||
|
||||
imageVulnerabilityReports, err := adaptor.GetImagesVulnerabilities([]registryvulnerabilities.ContainerImageIdentifier{{Tag: "gcr.io/myproject/nginx@sha256:1XXXXX"}, {Tag: "gcr.io/myproject/nginx@sha256:2XXXXX"}})
|
||||
assert.NoError(t, err)
|
||||
|
||||
for i := range imageVulnerabilityReports {
|
||||
var length int
|
||||
if i == 0 {
|
||||
length = 5
|
||||
} else if i == 1 {
|
||||
length = 3
|
||||
}
|
||||
assert.Equal(t, length, len(imageVulnerabilityReports[i].Vulnerabilities))
|
||||
}
|
||||
}
|
||||
185
core/pkg/registryadaptors/gcp/v1/gcpadaptormock.go
Normal file
185
core/pkg/registryadaptors/gcp/v1/gcpadaptormock.go
Normal file
@@ -0,0 +1,185 @@
|
||||
package v1
|
||||
|
||||
import (
|
||||
"github.com/kubescape/kubescape/v2/core/pkg/registryadaptors/registryvulnerabilities"
|
||||
grafeaspb "google.golang.org/genproto/googleapis/grafeas/v1"
|
||||
"google.golang.org/protobuf/types/known/timestamppb"
|
||||
)
|
||||
|
||||
type GCPAdaptorMock struct {
|
||||
resultList []registryvulnerabilities.ContainerImageVulnerabilityReport
|
||||
}
|
||||
|
||||
func NewGCPAdaptorMock() (*GCPAdaptorMock, error) {
|
||||
return &GCPAdaptorMock{}, nil
|
||||
}
|
||||
|
||||
func (GCPAdaptorMock *GCPAdaptorMock) Login() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (GCPAdaptorMock *GCPAdaptorMock) GetImagesVulnerabilities(imageIDs []registryvulnerabilities.ContainerImageIdentifier) ([]registryvulnerabilities.ContainerImageVulnerabilityReport, error) {
|
||||
resultList := make([]registryvulnerabilities.ContainerImageVulnerabilityReport, 0)
|
||||
for _, imageID := range imageIDs {
|
||||
result, err := GCPAdaptorMock.GetImageVulnerability(&imageID)
|
||||
if err == nil {
|
||||
resultList = append(resultList, *result)
|
||||
} else {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return resultList, nil
|
||||
}
|
||||
|
||||
GCPAdaptorMock.resultList = resultList
|
||||
return GCPAdaptorMock.resultList, nil
|
||||
}
|
||||
|
||||
func (GCPAdaptorMock *GCPAdaptorMock) GetImageVulnerability(imageID *registryvulnerabilities.ContainerImageIdentifier) (*registryvulnerabilities.ContainerImageVulnerabilityReport, error) {
|
||||
vulnerability := []*grafeaspb.Occurrence_Vulnerability{}
|
||||
occurrence := []*grafeaspb.Occurrence{}
|
||||
arr := GetMockData()
|
||||
|
||||
for i, _ := range arr {
|
||||
if imageID.Tag == "gcr.io/myproject/nginx@sha256:2XXXXX" && i == 4 {
|
||||
break
|
||||
}
|
||||
vulnerability = append(vulnerability, &grafeaspb.Occurrence_Vulnerability{
|
||||
Vulnerability: &grafeaspb.VulnerabilityOccurrence{
|
||||
Type: arr[i].Type,
|
||||
CvssScore: arr[i].CvssScore,
|
||||
ShortDescription: arr[i].ShortDescription,
|
||||
PackageIssue: []*grafeaspb.VulnerabilityOccurrence_PackageIssue{
|
||||
{
|
||||
FixedVersion: &grafeaspb.Version{
|
||||
FullName: arr[i].FixedVersion,
|
||||
},
|
||||
AffectedVersion: &grafeaspb.Version{
|
||||
FullName: arr[i].AffectedVersion,
|
||||
},
|
||||
AffectedCpeUri: arr[i].AffectedCPEURI,
|
||||
AffectedPackage: arr[i].AffectedPackage,
|
||||
},
|
||||
},
|
||||
FixAvailable: arr[i].FixAvailable,
|
||||
},
|
||||
})
|
||||
|
||||
occurrence = append(occurrence, &grafeaspb.Occurrence{
|
||||
Name: arr[i].Name,
|
||||
Kind: grafeaspb.NoteKind_ATTESTATION,
|
||||
NoteName: arr[i].Notename,
|
||||
CreateTime: ×tamppb.Timestamp{
|
||||
Seconds: arr[i].CreatedTime,
|
||||
},
|
||||
UpdateTime: ×tamppb.Timestamp{
|
||||
Seconds: arr[i].UpdatedTime,
|
||||
},
|
||||
Details: vulnerability[i],
|
||||
})
|
||||
}
|
||||
|
||||
vulnerabilities := responseObjectToVulnerabilities(occurrence, 5)
|
||||
|
||||
resultImageVulnerabilityReport := registryvulnerabilities.ContainerImageVulnerabilityReport{
|
||||
ImageID: *imageID,
|
||||
Vulnerabilities: vulnerabilities,
|
||||
}
|
||||
return &resultImageVulnerabilityReport, nil
|
||||
}
|
||||
|
||||
func (GCPAdaptorMock *GCPAdaptorMock) DescribeAdaptor() string {
|
||||
// TODO
|
||||
return ""
|
||||
}
|
||||
|
||||
func (GCPAdaptorMock *GCPAdaptorMock) GetImagesInformation(imageIDs []registryvulnerabilities.ContainerImageIdentifier) ([]registryvulnerabilities.ContainerImageInformation, error) {
|
||||
// TODO
|
||||
return []registryvulnerabilities.ContainerImageInformation{}, nil
|
||||
}
|
||||
|
||||
func (GCPAdaptorMock *GCPAdaptorMock) GetImagesScanStatus(imageIDs []registryvulnerabilities.ContainerImageIdentifier) ([]registryvulnerabilities.ContainerImageScanStatus, error) {
|
||||
// TODO
|
||||
return []registryvulnerabilities.ContainerImageScanStatus{}, nil
|
||||
}
|
||||
|
||||
//==============================================================================================================================
|
||||
//==============================================================================================================================
|
||||
//==============================================================================================================================
|
||||
|
||||
func GetMockData() []Mock {
|
||||
arr := []Mock{
|
||||
{
|
||||
Name: "projects/stable-furnace-356005/occurrences/41fd9fec-6fab-4531-a4ee-e7b97d518554",
|
||||
Notename: "projects/goog-vulnz/notes/CVE-2009-4487",
|
||||
CvssScore: 6.8,
|
||||
CreatedTime: 1661061853,
|
||||
UpdatedTime: 1661061853,
|
||||
Type: "OS",
|
||||
ShortDescription: "CVE-2009-4487",
|
||||
AffectedCPEURI: "cpe:/o:debian:debian_linux:11",
|
||||
AffectedPackage: "nginx",
|
||||
FixAvailable: true,
|
||||
AffectedVersion: "1.23.1-1~bullseye",
|
||||
FixedVersion: "",
|
||||
},
|
||||
{
|
||||
Name: "projects/stable-furnace-356005/occurrences/b28fa29f-5c2b-45c7-9727-2f1f02ed1957",
|
||||
Notename: "projects/goog-vulnz/notes/CVE-2017-17740",
|
||||
CvssScore: 2.3,
|
||||
CreatedTime: 3237628,
|
||||
UpdatedTime: 5989893,
|
||||
Type: "OS",
|
||||
ShortDescription: "CVE-2017-17740",
|
||||
AffectedCPEURI: "cpe:/o:debian:debian_linux:11",
|
||||
AffectedPackage: "openldap",
|
||||
FixAvailable: false,
|
||||
AffectedVersion: "1.3.5",
|
||||
FixedVersion: "1.3.5",
|
||||
},
|
||||
{
|
||||
Name: "projects/stable-furnace-356005/occurrences/b28fa29f-5c2b-45c7-9727-2f1f02ed1957",
|
||||
Notename: "projects/goog-vulnz/notes/CVE-2017-17740",
|
||||
CvssScore: 2.3,
|
||||
CreatedTime: 3237628,
|
||||
UpdatedTime: 5989893,
|
||||
Type: "OS",
|
||||
ShortDescription: "CVE-2017-17740",
|
||||
AffectedCPEURI: "cpe:/o:debian:debian_linux:11",
|
||||
AffectedPackage: "openldap",
|
||||
FixAvailable: false,
|
||||
AffectedVersion: "1.3.5",
|
||||
FixedVersion: "1.3.5",
|
||||
},
|
||||
{
|
||||
Name: "projects/stable-furnace-356005/occurrences/b28fa29f-5c2b-45c7-9727-2f1f02ed1957",
|
||||
Notename: "projects/goog-vulnz/notes/CVE-2017-17740",
|
||||
CvssScore: 2.3,
|
||||
CreatedTime: 3237628,
|
||||
UpdatedTime: 5989893,
|
||||
Type: "OS",
|
||||
ShortDescription: "CVE-2017-17740",
|
||||
AffectedCPEURI: "cpe:/o:debian:debian_linux:11",
|
||||
AffectedPackage: "openldap",
|
||||
FixAvailable: false,
|
||||
AffectedVersion: "1.3.5",
|
||||
FixedVersion: "1.3.5",
|
||||
},
|
||||
{
|
||||
Name: "projects/stable-furnace-356005/occurrences/b28fa29f-5c2b-45c7-9727-2f1f02ed1957",
|
||||
Notename: "projects/goog-vulnz/notes/CVE-2017-17740",
|
||||
CvssScore: 2.3,
|
||||
CreatedTime: 3237628,
|
||||
UpdatedTime: 5989893,
|
||||
Type: "OS",
|
||||
ShortDescription: "CVE-2017-17740",
|
||||
AffectedCPEURI: "cpe:/o:debian:debian_linux:11",
|
||||
AffectedPackage: "openldap",
|
||||
FixAvailable: false,
|
||||
AffectedVersion: "1.3.5",
|
||||
FixedVersion: "1.3.5",
|
||||
},
|
||||
}
|
||||
|
||||
return arr
|
||||
}
|
||||
36
core/pkg/registryadaptors/gcp/v1/gcpadaptorutils.go
Normal file
36
core/pkg/registryadaptors/gcp/v1/gcpadaptorutils.go
Normal file
@@ -0,0 +1,36 @@
|
||||
package v1
|
||||
|
||||
import (
|
||||
"github.com/kubescape/kubescape/v2/core/pkg/registryadaptors/registryvulnerabilities"
|
||||
grafeaspb "google.golang.org/genproto/googleapis/grafeas/v1"
|
||||
)
|
||||
|
||||
func responseObjectToVulnerabilities(vulnerabilityList []*grafeaspb.Occurrence, count int) []registryvulnerabilities.Vulnerability {
|
||||
vulnerabilities := make([]registryvulnerabilities.Vulnerability, count)
|
||||
for i, vulnerabilityEntry := range vulnerabilityList {
|
||||
if vulnerabilityEntry.GetKind().String() != "DISCOVERY" {
|
||||
vulnerabilities[i].Name = vulnerabilityEntry.Name
|
||||
vulnerabilities[i].NoteName = vulnerabilityEntry.NoteName
|
||||
vulnerabilities[i].CreateTime = vulnerabilityEntry.CreateTime.AsTime()
|
||||
vulnerabilities[i].UpdateTime = vulnerabilityEntry.UpdateTime.AsTime()
|
||||
vulnerabilities[i].CVSS = vulnerabilityEntry.GetVulnerability().CvssScore
|
||||
vulnerabilities[i].AffectedCPEURI = vulnerabilityEntry.GetVulnerability().PackageIssue[0].AffectedCpeUri
|
||||
vulnerabilities[i].AffectedPackage = vulnerabilityEntry.GetVulnerability().PackageIssue[0].AffectedPackage
|
||||
vulnerabilities[i].AffectedVersion = vulnerabilityEntry.GetVulnerability().PackageIssue[0].AffectedVersion.FullName
|
||||
vulnerabilities[i].FixedVersion = vulnerabilityEntry.GetVulnerability().PackageIssue[0].FixedVersion.FullName
|
||||
vulnerabilities[i].FixedCPEURI = vulnerabilityEntry.GetVulnerability().PackageIssue[0].FixedCpeUri
|
||||
vulnerabilities[i].FixedPackege = vulnerabilityEntry.GetVulnerability().PackageIssue[0].FixedPackage
|
||||
vulnerabilities[i].FixAvailablePackage = vulnerabilityEntry.GetVulnerability().PackageIssue[0].GetFixAvailable()
|
||||
vulnerabilities[i].PackageType = vulnerabilityEntry.GetVulnerability().PackageIssue[0].PackageType
|
||||
vulnerabilities[i].EffectiveSeverityPackage = vulnerabilityEntry.GetVulnerability().PackageIssue[0].EffectiveSeverity.String()
|
||||
vulnerabilities[i].AffectedPackage = vulnerabilityEntry.GetVulnerability().PackageIssue[0].AffectedPackage
|
||||
vulnerabilities[i].Severity = vulnerabilityEntry.GetVulnerability().Severity.Enum().String()
|
||||
vulnerabilities[i].ShortDescription = vulnerabilityEntry.GetVulnerability().ShortDescription
|
||||
vulnerabilities[i].LongDescription = vulnerabilityEntry.GetVulnerability().LongDescription
|
||||
} else {
|
||||
vulnerabilities[i].Description = vulnerabilityEntry.GetDiscovery().String()
|
||||
}
|
||||
}
|
||||
|
||||
return vulnerabilities
|
||||
}
|
||||
@@ -28,19 +28,36 @@ type Categories struct {
|
||||
}
|
||||
|
||||
type Vulnerability struct {
|
||||
Name string `json:"name"`
|
||||
RelatedPackageName string `json:"packageName"`
|
||||
PackageVersion string `json:"packageVersion"`
|
||||
Link string `json:"link"`
|
||||
Description string `json:"description"`
|
||||
Severity string `json:"severity"`
|
||||
Metadata interface{} `json:"metadata"`
|
||||
Fixes []FixedIn `json:"fixedIn"`
|
||||
Relevancy string `json:"relevant"` // use the related enum
|
||||
UrgentCount int `json:"urgent"`
|
||||
NeglectedCount int `json:"neglected"`
|
||||
HealthStatus string `json:"healthStatus"`
|
||||
Categories Categories `json:"categories"`
|
||||
Name string `json:"name"`
|
||||
RelatedPackageName string `json:"packageName"`
|
||||
PackageVersion string `json:"packageVersion"`
|
||||
Link string `json:"link"`
|
||||
Description string `json:"description"`
|
||||
Severity string `json:"severity"`
|
||||
Metadata interface{} `json:"metadata"`
|
||||
Fixes []FixedIn `json:"fixedIn"`
|
||||
Relevancy string `json:"relevant"` // use the related enum
|
||||
UrgentCount int `json:"urgent"`
|
||||
NeglectedCount int `json:"neglected"`
|
||||
HealthStatus string `json:"healthStatus"`
|
||||
Categories Categories `json:"categories"`
|
||||
NoteName string `json:",omitempty"`
|
||||
CreateTime time.Time `json:",omitempty"`
|
||||
UpdateTime time.Time `json:",omitempty"` // Vulnerablity started
|
||||
CVSS float32 `json:",omitempty"` // other cvss versions are available
|
||||
AffectedCPEURI string `json:",omitempty"` // Package issue
|
||||
AffectedPackage string `json:",omitempty"`
|
||||
AffectedVersion string `json:",omitempty"`
|
||||
FixedVersion string `json:",omitempty"`
|
||||
FixedCPEURI string `json:",omitempty"`
|
||||
FixedPackege string `json:",omitempty"`
|
||||
FixAvailablePackage bool `json:",omitempty"`
|
||||
PackageType string `json:",omitempty"`
|
||||
EffectiveSeverityPackage string `json:",omitempty"`
|
||||
ShortDescription string `json:",omitempty"` // Package issue ends
|
||||
LongDescription string `json:",omitempty"`
|
||||
EffectiveSeverity string `json:",omitempty"`
|
||||
FixAvailable bool `json:",omitempty"`
|
||||
}
|
||||
|
||||
type ContainerImageVulnerabilityReport struct {
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
|
||||
type IFieldSelector interface {
|
||||
GetNamespacesSelectors(*schema.GroupVersionResource) []string
|
||||
GetClusterScope(*schema.GroupVersionResource) bool
|
||||
}
|
||||
|
||||
type EmptySelector struct {
|
||||
@@ -19,6 +20,10 @@ func (es *EmptySelector) GetNamespacesSelectors(resource *schema.GroupVersionRes
|
||||
return []string{""} //
|
||||
}
|
||||
|
||||
func (es *EmptySelector) GetClusterScope(*schema.GroupVersionResource) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
type ExcludeSelector struct {
|
||||
namespace string
|
||||
}
|
||||
@@ -27,6 +32,14 @@ func NewExcludeSelector(ns string) *ExcludeSelector {
|
||||
return &ExcludeSelector{namespace: ns}
|
||||
}
|
||||
|
||||
func (es *ExcludeSelector) GetClusterScope(resource *schema.GroupVersionResource) bool {
|
||||
// for selector, 'namespace' is in Namespaced scope
|
||||
if resource.Resource == "namespaces" {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
type IncludeSelector struct {
|
||||
namespace string
|
||||
}
|
||||
@@ -34,6 +47,15 @@ type IncludeSelector struct {
|
||||
func NewIncludeSelector(ns string) *IncludeSelector {
|
||||
return &IncludeSelector{namespace: ns}
|
||||
}
|
||||
|
||||
func (is *IncludeSelector) GetClusterScope(resource *schema.GroupVersionResource) bool {
|
||||
// for selector, 'namespace' is in Namespaced scope
|
||||
if resource.Resource == "namespaces" {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (es *ExcludeSelector) GetNamespacesSelectors(resource *schema.GroupVersionResource) []string {
|
||||
fieldSelectors := ""
|
||||
for _, n := range strings.Split(es.namespace, ",") {
|
||||
|
||||
@@ -103,6 +103,8 @@ func getResourcesFromPath(path string) (map[string]reporthandling.Source, []work
|
||||
gitRepo, err := cautils.NewLocalGitRepository(path)
|
||||
if err == nil && gitRepo != nil {
|
||||
repoRoot, _ = gitRepo.GetRootDir()
|
||||
} else {
|
||||
repoRoot, _ = filepath.Abs(path)
|
||||
}
|
||||
|
||||
// load resource from local file system
|
||||
@@ -141,7 +143,7 @@ func getResourcesFromPath(path string) (map[string]reporthandling.Source, []work
|
||||
}
|
||||
|
||||
workloadSource := reporthandling.Source{
|
||||
RelativePath: source,
|
||||
RelativePath: relSource,
|
||||
FileType: filetype,
|
||||
LastCommit: lastCommit,
|
||||
}
|
||||
|
||||
@@ -84,7 +84,7 @@ func (k8sHandler *K8sResourceHandler) GetResources(sessionObj *cautils.OPASessio
|
||||
|
||||
imgVulnResources := cautils.MapImageVulnResources(ksResourceMap)
|
||||
// check that controls use image vulnerability resources
|
||||
if false { //len(imgVulnResources) > 0 {
|
||||
if len(imgVulnResources) > 0 {
|
||||
logger.L().Info("Requesting images vulnerabilities results")
|
||||
cautils.StartSpinner()
|
||||
if err := k8sHandler.registryAdaptors.collectImagesVulnerabilities(k8sResourcesMap, allResources, ksResourceMap); err != nil {
|
||||
@@ -134,8 +134,6 @@ func (k8sHandler *K8sResourceHandler) GetResources(sessionObj *cautils.OPASessio
|
||||
|
||||
// check that controls use cloud resources
|
||||
if len(cloudResources) > 0 {
|
||||
logger.L().Info("Requesting cloud provider data")
|
||||
cautils.StartSpinner()
|
||||
provider, err := getCloudProviderDescription(allResources, ksResourceMap)
|
||||
if err != nil {
|
||||
cautils.SetInfoMapForResources(err.Error(), cloudResources, sessionObj.InfoMap)
|
||||
@@ -146,13 +144,29 @@ func (k8sHandler *K8sResourceHandler) GetResources(sessionObj *cautils.OPASessio
|
||||
sessionObj.Metadata.ContextMetadata.ClusterContextMetadata.CloudProvider = provider
|
||||
}
|
||||
}
|
||||
cautils.StopSpinner()
|
||||
logger.L().Info("Requested cloud provider data")
|
||||
|
||||
// api server info resource
|
||||
err = k8sHandler.collectAPIServerInfoResource(allResources, ksResourceMap)
|
||||
if err != nil {
|
||||
logger.L().Warning("failed to collect api server info resource", helpers.Error(err))
|
||||
}
|
||||
}
|
||||
|
||||
return k8sResourcesMap, allResources, ksResourceMap, nil
|
||||
}
|
||||
|
||||
func (k8sHandler *K8sResourceHandler) collectAPIServerInfoResource(allResources map[string]workloadinterface.IMetadata, ksResourceMap *cautils.KSResources) error {
|
||||
clusterAPIServerInfo, err := k8sHandler.k8s.DiscoveryClient.ServerVersion()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
resource := cloudsupport.NewApiServerVersionInfo(clusterAPIServerInfo)
|
||||
allResources[resource.GetID()] = resource
|
||||
(*ksResourceMap)[fmt.Sprintf("%s/%s", resource.GetApiVersion(), resource.GetKind())] = []string{resource.GetID()}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (k8sHandler *K8sResourceHandler) GetClusterAPIServerInfo() *version.Info {
|
||||
clusterAPIServerInfo, err := k8sHandler.k8s.DiscoveryClient.ServerVersion()
|
||||
if err != nil {
|
||||
@@ -230,10 +244,14 @@ func (k8sHandler *K8sResourceHandler) pullSingleResource(resource *schema.GroupV
|
||||
|
||||
// set dynamic object
|
||||
var clientResource dynamic.ResourceInterface
|
||||
if namespace != "" && k8sinterface.IsNamespaceScope(resource) {
|
||||
clientResource = k8sHandler.k8s.DynamicClient.Resource(*resource).Namespace(namespace)
|
||||
} else {
|
||||
if namespace != "" {
|
||||
clientResource = k8sHandler.k8s.DynamicClient.Resource(*resource)
|
||||
} else if k8sinterface.IsNamespaceScope(resource) {
|
||||
clientResource = k8sHandler.k8s.DynamicClient.Resource(*resource).Namespace(namespace)
|
||||
} else if k8sHandler.fieldSelector.GetClusterScope(*&resource) {
|
||||
clientResource = k8sHandler.k8s.DynamicClient.Resource(*resource)
|
||||
} else {
|
||||
continue
|
||||
}
|
||||
|
||||
// list resources
|
||||
|
||||
@@ -22,6 +22,7 @@ var (
|
||||
ImageVulnerabilities = "ImageVulnerabilities"
|
||||
KubeletInfo = "KubeletInfo"
|
||||
KubeProxyInfo = "KubeProxyInfo"
|
||||
ControlPlaneInfo = "ControlPlaneInfo"
|
||||
|
||||
MapResourceToApiGroup = map[string]string{
|
||||
KubeletConfiguration: "hostdata.kubescape.cloud/v1beta0",
|
||||
@@ -33,6 +34,7 @@ var (
|
||||
LinuxKernelVariables: "hostdata.kubescape.cloud/v1beta0",
|
||||
KubeletInfo: "hostdata.kubescape.cloud/v1beta0",
|
||||
KubeProxyInfo: "hostdata.kubescape.cloud/v1beta0",
|
||||
ControlPlaneInfo: "hostdata.kubescape.cloud/v1beta0",
|
||||
}
|
||||
MapResourceToApiGroupVuln = map[string][]string{
|
||||
ImageVulnerabilities: {"armo.vuln.images/v1", "image.vulnscan.com/v1"}}
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
"github.com/kubescape/k8s-interface/workloadinterface"
|
||||
"github.com/kubescape/kubescape/v2/core/cautils"
|
||||
"github.com/kubescape/kubescape/v2/core/cautils/getter"
|
||||
gcpadaptorv1 "github.com/kubescape/kubescape/v2/core/pkg/registryadaptors/gcp/v1"
|
||||
armosecadaptorv1 "github.com/kubescape/kubescape/v2/core/pkg/registryadaptors/armosec/v1"
|
||||
"github.com/kubescape/kubescape/v2/core/pkg/registryadaptors/registryvulnerabilities"
|
||||
|
||||
@@ -158,5 +159,12 @@ func listAdaptores() ([]registryvulnerabilities.IContainerImageVulnerabilityAdap
|
||||
}
|
||||
}
|
||||
|
||||
gcpCloudAPI := getter.GetGlobalGCPCloudAPIConnector()
|
||||
if gcpCloudAPI != nil {
|
||||
if gcpCloudAPI.GetCredentialsCheck() {
|
||||
adaptors = append(adaptors, gcpadaptorv1.NewGCPAdaptor(getter.GetGlobalGCPCloudAPIConnector()))
|
||||
}
|
||||
}
|
||||
|
||||
return adaptors, nil
|
||||
}
|
||||
|
||||
@@ -13,6 +13,10 @@ var (
|
||||
urlD = "https://raw.githubusercontent.com/kubescape/kubescape/master/examples/online-boutique/adservice.yaml"
|
||||
)
|
||||
|
||||
/*
|
||||
|
||||
TODO: tests were commented out due to actual http calls ; http calls should be mocked.
|
||||
|
||||
func TestScanRepository(t *testing.T) {
|
||||
{
|
||||
files, err := ScanRepository(urlA, "")
|
||||
@@ -112,6 +116,7 @@ func TestGithubGetYamlFromTree(t *testing.T) {
|
||||
assert.Equal(t, 12, len(files))
|
||||
}
|
||||
}
|
||||
*/
|
||||
|
||||
func TestGithubParse(t *testing.T) {
|
||||
{
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user