Compare commits

..

41 Commits

Author SHA1 Message Date
Vlad Klokun
b0f65cce1d Merge pull request #959 from suhasgumma/fix-command
fix: keep user formatting when autofixing
2023-01-11 20:13:16 +02:00
Vlad Klokun
0e8b2f976d tests: extend test cases for autofix inserts
This change re-organizes the test cases for inserts performed by the
autofixing feature.
2023-01-11 19:50:57 +02:00
Vlad Klokun
f1d646ac97 tests: show diffs when comparing autofixes
This change refactors the TestApplyFixKeepsFormatting test to use
assert.Equalf so it will display a convenient diff between the expected
and actual fixing result.
2023-01-11 18:44:06 +02:00
Vlad Klokun
5f668037a7 tests: test fixing close to newline-separated keys in hybrid scenarios 2023-01-11 18:43:14 +02:00
Vlad Klokun
c448c97463 tests: test autofixing files with comments between fields 2023-01-10 19:43:25 +02:00
Vlad Klokun
da3bc8e8ea tests: test autofixing indented lists in hybrid scenarios 2023-01-10 19:43:09 +02:00
Vlad Klokun
2fce139a9a tests: re-organize autofixing unit tests
This change:
- Changes test data naming convention to be lexicographically sortable
  and have input and expected data side-by-side.
- Executes each test case in a separate run.
2023-01-10 19:43:04 +02:00
suhasgumma
85d2f5c250 Minor Changes to Improve Code Quality 2022-12-27 13:02:03 +05:30
suhasgumma
c3771eec7e Remove redundant functions and clean code after refactoring 2022-12-16 15:57:23 +05:30
suhasgumma
76d2154152 All the minor Changes 2022-12-16 12:56:40 +05:30
suhasgumma
fa5e7fef23 Restructure Code to Improve Code Quality 2022-12-16 03:25:13 +05:30
suhasgumma
38d2696058 Break updateFileFixInfo function into getResourceFileFix and addResourceFileFix functions 2022-12-15 19:14:52 +05:30
suhasgumma
e9a8ffbda9 Add abstraction while adding contentToAdd and linesToBeRemoved to fileFixInfo 2022-12-15 17:03:54 +05:30
suhasgumma
0d76fffa48 Don't export structs that are not needed outside fixhandler package 2022-12-15 11:54:00 +05:30
suhasgumma
218c77f3ae Delete .DS_Store files that are added by mistake 2022-12-15 11:27:14 +05:30
suhasgumma
89fd7eb439 Move Test files to tesdata directory 2022-12-15 11:19:04 +05:30
suhasgumma
8079f9ae7d Improve Code Readability 2022-12-13 15:19:26 +05:30
suhasgumma
f9a26b7a95 Added Test cases for multiple resources in the same YAML file 2022-12-12 23:30:37 +05:30
suhasgumma
663401d908 Dealing with Insertion and Removal at last line of a Resource 2022-12-12 23:09:13 +05:30
suhasgumma
926790f49d Dealing with Multiple Resources in a single YAML file 2022-12-12 18:42:59 +05:30
suhasgumma
566b7c29c1 Simplify dealing with comments and empty lines at the top 2022-12-11 18:47:52 +05:30
suhasgumma
af5cdefc5f Improve Readability of code 2022-12-11 16:35:40 +05:30
suhasgumma
36b7b8e2ac Replace Single Line Sequence Node 2022-12-11 15:30:51 +05:30
suhasgumma
17c52bd0ae Handle Single Line Sequence Node in Replacement 2022-12-10 18:24:06 +05:30
suhasgumma
e02086e90c Handle Single Line Sequence Node in Removal 2022-12-10 18:11:17 +05:30
suhasgumma
baf62887b9 Handle Single Line Sequence Node 2022-12-10 17:29:04 +05:30
suhasgumma
99fa81e411 Fix Bugs Remove Scenario 2022-12-10 12:00:15 +05:30
suhasgumma
f64200f42f Small Changes 2022-12-09 23:44:04 +05:30
suhasgumma
f72cb215d7 Adjust Content Line 2022-12-09 22:45:55 +05:30
suhasgumma
fa03a9dae3 Fixed: Comments and empty lines at the head are excluded 2022-12-09 18:55:31 +05:30
suhasgumma
48516b891f unit tests 2022-12-09 15:31:09 +05:30
suhasgumma
252a564552 Remove, Replace and Hybrid Scenarios 2022-12-09 12:23:51 +05:30
suhasgumma
30e5b9b57d Insert Scenarios 2022-12-09 02:29:14 +05:30
suhasgumma
7fcfa27d9a Initial Implementation 2022-12-08 23:06:05 +05:30
suhasgumma
4b898b0075 Initial Implementation 2022-12-08 22:56:53 +05:30
Suhas Gumma
f3665866af import "yqlib" properly
Co-authored-by: Vlad Klokun <vladklokun@users.noreply.github.com>
2022-11-30 01:31:49 +05:30
Suhas Gumma
a7989bbe76 Update core/pkg/fixhandler/datastructures.go
Co-authored-by: Vlad Klokun <vladklokun@users.noreply.github.com>
2022-11-30 01:30:02 +05:30
suhasgumma
5ce69a750d Fix git directory relative path 2022-11-29 18:19:49 +05:30
suhasgumma
2b61989073 Fix local directory relative path 2022-11-29 18:15:30 +05:30
suhasgumma
be33054973 Possible Solution to Indentation Issue 2022-11-23 22:54:59 +05:30
Amir Malka
4b9bd5f3ae Initial implementation of fix command (#898)
* Fix command initial implementation
2022-11-07 14:57:17 +02:00
95 changed files with 2634 additions and 535 deletions

View File

@@ -86,6 +86,6 @@ jobs:
client: "image-release"
image_name: "quay.io/${{ github.repository_owner }}/kubescape"
image_tag: "v2.0.${{ github.run_number }}"
support_platforms: true
support_platforms: false
cosign: true
secrets: inherit

View File

@@ -21,6 +21,6 @@ jobs:
client: "image-dev"
image_name: "quay.io/${{ github.repository_owner }}/kubescape"
image_tag: "dev-v2.0.${{ github.run_number }}"
support_platforms: true
support_platforms: false
cosign: true
secrets: inherit

View File

@@ -220,8 +220,6 @@ kubescape scan *.yaml
```
#### Scan Kubernetes manifest files from a git repository
```
kubescape scan https://github.com/kubescape/kubescape
```

View File

@@ -57,7 +57,7 @@ def main():
if client_name:
ldflags += " -X {}={}".format(client_var, client_name)
build_command = ["go", "build", "-buildmode=pie", "-tags=static", "-o", ks_file, "-ldflags" ,ldflags]
build_command = ["go", "build", "-tags=static", "-o", ks_file, "-ldflags" ,ldflags]
print("Building kubescape and saving here: {}".format(ks_file))
print("Build command: {}".format(" ".join(build_command)))

View File

@@ -12,7 +12,7 @@ ENV CGO_ENABLED=1
# Install required python/pip
ENV PYTHONUNBUFFERED=1
RUN apk add --update --no-cache python3 gcc make git libc-dev binutils-gold cmake pkgconfig && ln -sf python3 /usr/bin/python
RUN apk add --update --no-cache python3 git openssl-dev musl-dev gcc make cmake pkgconfig && ln -sf python3 /usr/bin/python
RUN python3 -m ensurepip
RUN pip3 install --no-cache --upgrade pip setuptools

View File

@@ -24,8 +24,8 @@ var (
# Download the NSA framework. Run 'kubescape list frameworks' for all frameworks names
kubescape download framework nsa
# Download the "HostPath mount" control. Run 'kubescape list controls' for all controls names
kubescape download control "HostPath mount"
# Download the "Allowed hostPath" control. Run 'kubescape list controls' for all controls names
kubescape download control "Allowed hostPath"
# Download the "C-0001" control. Run 'kubescape list controls --id' for all controls ids
kubescape download control C-0001
@@ -36,8 +36,6 @@ var (
# Download the configured controls-inputs
kubescape download controls-inputs
# Download the attack tracks
kubescape download attack-tracks
`
)

45
cmd/fix/fix.go Normal file
View File

@@ -0,0 +1,45 @@
package fix
import (
"errors"
"github.com/kubescape/kubescape/v2/core/meta"
metav1 "github.com/kubescape/kubescape/v2/core/meta/datastructures/v1"
"github.com/spf13/cobra"
)
var fixCmdExamples = `
Fix command is for fixing kubernetes manifest files based on a scan command output.
Use with caution, this command will change your files in-place.
# Fix kubernetes YAML manifest files based on a scan command output (output.json)
1) kubescape scan --format json --format-version v2 --output output.json
2) kubescape fix output.json
`
func GetFixCmd(ks meta.IKubescape) *cobra.Command {
var fixInfo metav1.FixInfo
fixCmd := &cobra.Command{
Use: "fix <report output file>",
Short: "Fix misconfiguration in files",
Long: ``,
Example: fixCmdExamples,
RunE: func(cmd *cobra.Command, args []string) error {
if len(args) < 1 {
return errors.New("report output file is required")
}
fixInfo.ReportFile = args[0]
return ks.Fix(&fixInfo)
},
}
fixCmd.PersistentFlags().BoolVar(&fixInfo.NoConfirm, "no-confirm", false, "No confirmation will be given to the user before applying the fix (default false)")
fixCmd.PersistentFlags().BoolVar(&fixInfo.DryRun, "dry-run", false, "No changes will be applied (default false)")
fixCmd.PersistentFlags().BoolVar(&fixInfo.SkipUserValues, "skip-user-values", true, "Changes which involve user-defined values will be skipped")
return fixCmd
}

View File

@@ -20,8 +20,11 @@ var (
# List all supported frameworks names
kubescape list frameworks --account <account id>
# List all supported controls names with ids
# List all supported controls names
kubescape list controls
# List all supported controls ids
kubescape list controls --id
Control documentation:
https://hub.armosec.io/docs/controls
@@ -64,8 +67,8 @@ func GetListCmd(ks meta.IKubescape) *cobra.Command {
listCmd.PersistentFlags().StringVarP(&listPolicies.Credentials.Account, "account", "", "", "Kubescape SaaS account ID. Default will load account ID from cache")
listCmd.PersistentFlags().StringVarP(&listPolicies.Credentials.ClientID, "client-id", "", "", "Kubescape SaaS client ID. Default will load client ID from cache, read more - https://hub.armosec.io/docs/authentication")
listCmd.PersistentFlags().StringVarP(&listPolicies.Credentials.SecretKey, "secret-key", "", "", "Kubescape SaaS secret key. Default will load secret key from cache, read more - https://hub.armosec.io/docs/authentication")
listCmd.PersistentFlags().StringVar(&listPolicies.Format, "format", "pretty-print", "output format. supported: 'pretty-print'/'json'")
listCmd.PersistentFlags().MarkDeprecated("id", "Control ID's are included in list outpus")
listCmd.PersistentFlags().StringVar(&listPolicies.Format, "format", "pretty-print", "output format. supported: 'pretty-printer'/'json'")
listCmd.PersistentFlags().BoolVarP(&listPolicies.ListIDs, "id", "", false, "List control ID's instead of controls names")
return listCmd
}

View File

@@ -10,6 +10,7 @@ import (
"github.com/kubescape/kubescape/v2/cmd/config"
"github.com/kubescape/kubescape/v2/cmd/delete"
"github.com/kubescape/kubescape/v2/cmd/download"
"github.com/kubescape/kubescape/v2/cmd/fix"
"github.com/kubescape/kubescape/v2/cmd/list"
"github.com/kubescape/kubescape/v2/cmd/scan"
"github.com/kubescape/kubescape/v2/cmd/submit"
@@ -78,6 +79,7 @@ func getRootCmd(ks meta.IKubescape) *cobra.Command {
rootCmd.AddCommand(version.GetVersionCmd())
rootCmd.AddCommand(config.GetConfigCmd(ks))
rootCmd.AddCommand(update.GetUpdateCmd())
rootCmd.AddCommand(fix.GetFixCmd(ks))
return rootCmd
}

View File

@@ -23,7 +23,7 @@ var (
kubescape scan control "privileged container"
# Scan list of controls separated with a comma
kubescape scan control "privileged container","HostPath mount"
kubescape scan control "privileged container","allowed hostpath"
# Scan list of controls using the control ID separated with a comma
kubescape scan control C-0058,C-0057
@@ -61,7 +61,7 @@ func getControlCmd(ks meta.IKubescape, scanInfo *cautils.ScanInfo) *cobra.Comman
if err := validateFrameworkScanInfo(scanInfo); err != nil {
return err
}
// flagValidationControl(scanInfo)
scanInfo.PolicyIdentifier = []cautils.PolicyIdentifier{}

View File

@@ -31,11 +31,10 @@ var (
// getRBACCmd represents the RBAC command
func getRBACCmd(ks meta.IKubescape, submitInfo *v1.Submit) *cobra.Command {
return &cobra.Command{
Use: "rbac",
Deprecated: "This command is deprecated and will not be supported after 1/Jan/2023. Please use the 'scan' command instead.",
Example: rbacExamples,
Short: "Submit cluster's Role-Based Access Control(RBAC)",
Long: ``,
Use: "rbac",
Example: rbacExamples,
Short: "Submit cluster's Role-Based Access Control(RBAC)",
Long: ``,
RunE: func(cmd *cobra.Command, args []string) error {
if err := flagValidationSubmit(submitInfo); err != nil {

View File

@@ -7,21 +7,16 @@ import (
)
var submitCmdExamples = `
# Submit Kubescape scan results file
kubescape submit results
# Submit exceptions file to Kubescape SaaS
kubescape submit exceptions
`
func GetSubmitCmd(ks meta.IKubescape) *cobra.Command {
var submitInfo metav1.Submit
submitCmd := &cobra.Command{
Use: "submit <command>",
Short: "Submit an object to the Kubescape SaaS version",
Long: ``,
Example: submitCmdExamples,
Use: "submit <command>",
Short: "Submit an object to the Kubescape SaaS version",
Long: ``,
Run: func(cmd *cobra.Command, args []string) {
},
}

View File

@@ -1,12 +0,0 @@
package cautils
import (
"fmt"
"strings"
)
func GetControlLink(controlID string) string {
// For CIS Controls, cis-1.1.3 will be transformed to cis-1-1-3 in documentation link.
docLinkID := strings.ReplaceAll(controlID, ".", "-")
return fmt.Sprintf("https://hub.armosec.io/docs/%s", strings.ToLower(docLinkID))
}

View File

@@ -1,7 +1,6 @@
package getter
import (
"fmt"
"strings"
"github.com/armosec/armoapi-go/armotypes"
@@ -56,21 +55,13 @@ func (drp *DownloadReleasedPolicy) ListFrameworks() ([]string, error) {
return drp.gs.GetOPAFrameworksNamesList()
}
func (drp *DownloadReleasedPolicy) ListControls() ([]string, error) {
controlsIDsList, err := drp.gs.GetOPAControlsIDsList()
if err != nil {
return []string{}, err
func (drp *DownloadReleasedPolicy) ListControls(listType ListType) ([]string, error) {
switch listType {
case ListID:
return drp.gs.GetOPAControlsIDsList()
default:
return drp.gs.GetOPAControlsNamesList()
}
controlsNamesList, err := drp.gs.GetOPAControlsNamesList()
if err != nil {
return []string{}, err
}
controlsNamesWithIDsList := make([]string, len(controlsIDsList))
// by design both slices have the same length
for i := range controlsIDsList {
controlsNamesWithIDsList[i] = fmt.Sprintf("%v|%v", controlsIDsList[i], controlsNamesList[i])
}
return controlsNamesWithIDsList, nil
}
func (drp *DownloadReleasedPolicy) GetControlsInputs(clusterName string) (map[string][]string, error) {

View File

@@ -6,13 +6,19 @@ import (
"github.com/kubescape/opa-utils/reporthandling/attacktrack/v1alpha1"
)
// supported listing
type ListType string
const ListID ListType = "id"
const ListName ListType = "name"
type IPolicyGetter interface {
GetFramework(name string) (*reporthandling.Framework, error)
GetFrameworks() ([]reporthandling.Framework, error)
GetControl(name string) (*reporthandling.Control, error)
ListFrameworks() ([]string, error)
ListControls() ([]string, error)
ListControls(ListType) ([]string, error)
}
type IExceptionsGetter interface {

View File

@@ -4,7 +4,7 @@ import (
"bytes"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net/http"
"strings"
"time"
@@ -306,7 +306,7 @@ func (api *KSCloudAPI) ListFrameworks() ([]string, error) {
return frameworkList, nil
}
func (api *KSCloudAPI) ListControls() ([]string, error) {
func (api *KSCloudAPI) ListControls(l ListType) ([]string, error) {
return nil, fmt.Errorf("control api is not public")
}
@@ -358,7 +358,7 @@ func (api *KSCloudAPI) Login() error {
return fmt.Errorf("error authenticating: %d", resp.StatusCode)
}
responseBody, err := io.ReadAll(resp.Body)
responseBody, err := ioutil.ReadAll(resp.Body)
if err != nil {
return err
}

View File

@@ -9,7 +9,6 @@ import (
"github.com/armosec/armoapi-go/armotypes"
"github.com/kubescape/opa-utils/reporthandling"
"github.com/kubescape/opa-utils/reporthandling/attacktrack/v1alpha1"
)
// =======================================================================================================================
@@ -110,7 +109,7 @@ func (lp *LoadPolicy) ListFrameworks() ([]string, error) {
return fwNames, nil
}
func (lp *LoadPolicy) ListControls() ([]string, error) {
func (lp *LoadPolicy) ListControls(listType ListType) ([]string, error) {
// TODO - Support
return []string{}, fmt.Errorf("loading controls list from file is not supported")
}
@@ -153,18 +152,3 @@ func (lp *LoadPolicy) filePath() string {
}
return ""
}
func (lp *LoadPolicy) GetAttackTracks() ([]v1alpha1.AttackTrack, error) {
attackTracks := []v1alpha1.AttackTrack{}
f, err := os.ReadFile(lp.filePath())
if err != nil {
return nil, err
}
if err := json.Unmarshal(f, &attackTracks); err != nil {
return nil, err
}
return attackTracks, nil
}

View File

@@ -3,6 +3,7 @@ package cautils
import (
_ "embed"
"encoding/json"
"io/ioutil"
"os"
"path/filepath"
"strings"
@@ -38,7 +39,7 @@ func (s *HelmChartTestSuite) SetupSuite() {
}
var obj interface{}
file, _ := os.ReadFile(filepath.Join("testdata", "helm_expected_default_values.json"))
file, _ := ioutil.ReadFile(filepath.Join("testdata", "helm_expected_default_values.json"))
_ = json.Unmarshal([]byte(file), &obj)
s.expectedDefaultValues = obj.(map[string]interface{})
}

View File

@@ -3,6 +3,7 @@ package cautils
import (
"encoding/json"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"strings"
@@ -39,8 +40,7 @@ const (
// ScanCluster string = "cluster"
// ScanLocalFiles string = "yaml"
localControlInputsFilename string = "controls-inputs.json"
LocalExceptionsFilename string = "exceptions.json"
LocalAttackTracksFilename string = "attack-tracks.json"
localExceptionsFilename string = "exceptions.json"
)
type BoolPtrFlag struct {
@@ -159,7 +159,7 @@ func (scanInfo *ScanInfo) setUseArtifactsFrom() {
scanInfo.UseArtifactsFrom = dir
}
// set frameworks files
files, err := os.ReadDir(scanInfo.UseArtifactsFrom)
files, err := ioutil.ReadDir(scanInfo.UseArtifactsFrom)
if err != nil {
logger.L().Fatal("failed to read files from directory", helpers.String("dir", scanInfo.UseArtifactsFrom), helpers.Error(err))
}
@@ -176,7 +176,7 @@ func (scanInfo *ScanInfo) setUseArtifactsFrom() {
// set config-inputs file
scanInfo.ControlsInputs = filepath.Join(scanInfo.UseArtifactsFrom, localControlInputsFilename)
// set exceptions
scanInfo.UseExceptions = filepath.Join(scanInfo.UseArtifactsFrom, LocalExceptionsFilename)
scanInfo.UseExceptions = filepath.Join(scanInfo.UseArtifactsFrom, localExceptionsFilename)
}
func (scanInfo *ScanInfo) setUseFrom() {
@@ -419,7 +419,7 @@ func metadataGitLocal(input string) (*reporthandlingv2.RepoContextMetadata, erro
Date: commit.Committer.Date,
CommitterName: commit.Committer.Name,
}
context.LocalRootPath = getAbsPath(input)
context.LocalRootPath, _ = gitParser.GetRootDir()
return context, nil
}

View File

@@ -19,7 +19,6 @@ var downloadFunc = map[string]func(*metav1.DownloadInfo) error{
"control": downloadControl,
"framework": downloadFramework,
"artifacts": downloadArtifacts,
"attack-tracks": downloadAttackTracks,
}
func DownloadSupportCommands() []string {
@@ -71,7 +70,6 @@ func downloadArtifacts(downloadInfo *metav1.DownloadInfo) error {
"controls-inputs": downloadConfigInputs,
"exceptions": downloadExceptions,
"framework": downloadFramework,
"attack-tracks": downloadAttackTracks,
}
for artifact := range artifacts {
if err := downloadArtifact(&metav1.DownloadInfo{Target: artifact, Path: downloadInfo.Path, FileName: fmt.Sprintf("%s.json", artifact)}, artifacts); err != nil {
@@ -110,12 +108,12 @@ func downloadExceptions(downloadInfo *metav1.DownloadInfo) error {
exceptionsGetter := getExceptionsGetter("", tenant.GetAccountID(), nil)
exceptions := []armotypes.PostureExceptionPolicy{}
exceptions, err = exceptionsGetter.GetExceptions(tenant.GetContextName())
if err != nil {
return err
if tenant.GetAccountID() != "" {
exceptions, err = exceptionsGetter.GetExceptions(tenant.GetContextName())
if err != nil {
return err
}
}
if downloadInfo.FileName == "" {
downloadInfo.FileName = fmt.Sprintf("%s.json", downloadInfo.Target)
}
@@ -128,30 +126,6 @@ func downloadExceptions(downloadInfo *metav1.DownloadInfo) error {
return nil
}
func downloadAttackTracks(downloadInfo *metav1.DownloadInfo) error {
var err error
tenant := getTenantConfig(&downloadInfo.Credentials, "", "", getKubernetesApi())
attackTracksGetter := getAttackTracksGetter(tenant.GetAccountID(), nil)
attackTracks, err := attackTracksGetter.GetAttackTracks()
if err != nil {
return err
}
if downloadInfo.FileName == "" {
downloadInfo.FileName = fmt.Sprintf("%s.json", downloadInfo.Target)
}
// save in file
err = getter.SaveInFile(attackTracks, filepath.Join(downloadInfo.Path, downloadInfo.FileName))
if err != nil {
return err
}
logger.L().Success("Downloaded", helpers.String("attack tracks", downloadInfo.Target), helpers.String("path", filepath.Join(downloadInfo.Path, downloadInfo.FileName)))
return nil
}
func downloadFramework(downloadInfo *metav1.DownloadInfo) error {
tenant := getTenantConfig(&downloadInfo.Credentials, "", "", getKubernetesApi())

72
core/core/fix.go Normal file
View File

@@ -0,0 +1,72 @@
package core
import (
"fmt"
"strings"
logger "github.com/kubescape/go-logger"
metav1 "github.com/kubescape/kubescape/v2/core/meta/datastructures/v1"
"github.com/kubescape/kubescape/v2/core/pkg/fixhandler"
)
const NoChangesApplied = "No changes were applied."
const NoResourcesToFix = "No issues to fix."
const ConfirmationQuestion = "Would you like to apply the changes to the files above? [y|n]: "
func (ks *Kubescape) Fix(fixInfo *metav1.FixInfo) error {
logger.L().Info("Reading report file...")
handler, err := fixhandler.NewFixHandler(fixInfo)
if err != nil {
return err
}
resourcesToFix := handler.PrepareResourcesToFix()
if len(resourcesToFix) == 0 {
logger.L().Info(NoResourcesToFix)
return nil
}
handler.PrintExpectedChanges(resourcesToFix)
if fixInfo.DryRun {
logger.L().Info(NoChangesApplied)
return nil
}
if !fixInfo.NoConfirm && !userConfirmed() {
logger.L().Info(NoChangesApplied)
return nil
}
updatedFilesCount, errors := handler.ApplyChanges(resourcesToFix)
logger.L().Info(fmt.Sprintf("Fixed resources in %d files.", updatedFilesCount))
if len(errors) > 0 {
for _, err := range errors {
logger.L().Error(err.Error())
}
return fmt.Errorf("Failed to fix some resources, check the logs for more details")
}
return nil
}
func userConfirmed() bool {
var input string
for {
fmt.Printf(ConfirmationQuestion)
if _, err := fmt.Scanln(&input); err != nil {
continue
}
input = strings.ToLower(input)
if input == "y" || input == "yes" {
return true
} else if input == "n" || input == "no" {
return false
}
}
}

View File

@@ -45,9 +45,8 @@ func getExceptionsGetter(useExceptions string, accountID string, downloadRelease
if downloadReleasedPolicy == nil {
downloadReleasedPolicy = getter.NewDownloadReleasedPolicy()
}
if err := downloadReleasedPolicy.SetRegoObjects(); err != nil { // if failed to pull attack tracks, fallback to cache
logger.L().Warning("failed to get exceptions from github release, loading attack tracks from cache", helpers.Error(err))
return getter.NewLoadPolicy([]string{getter.GetDefaultPath(cautils.LocalExceptionsFilename)})
if err := downloadReleasedPolicy.SetRegoObjects(); err != nil {
logger.L().Warning("failed to get exceptions from github release, this may affect the scanning results", helpers.Error(err))
}
return downloadReleasedPolicy
@@ -248,9 +247,8 @@ func getAttackTracksGetter(accountID string, downloadReleasedPolicy *getter.Down
if downloadReleasedPolicy == nil {
downloadReleasedPolicy = getter.NewDownloadReleasedPolicy()
}
if err := downloadReleasedPolicy.SetRegoObjects(); err != nil { // if failed to pull attack tracks, fallback to cache
logger.L().Warning("failed to get attack tracks from github release, loading attack tracks from cache", helpers.Error(err))
return getter.NewLoadPolicy([]string{getter.GetDefaultPath(cautils.LocalAttackTracksFilename)})
if err := downloadReleasedPolicy.SetRegoObjects(); err != nil {
logger.L().Warning("failed to get attack tracks from github release, this may affect the scanning results", helpers.Error(err))
}
return downloadReleasedPolicy
}

View File

@@ -6,11 +6,8 @@ import (
"sort"
"strings"
"github.com/kubescape/kubescape/v2/core/cautils"
"github.com/kubescape/kubescape/v2/core/cautils/getter"
metav1 "github.com/kubescape/kubescape/v2/core/meta/datastructures/v1"
"github.com/kubescape/kubescape/v2/core/pkg/resultshandling/printer"
v2 "github.com/kubescape/kubescape/v2/core/pkg/resultshandling/printer/v2"
"github.com/olekukonko/tablewriter"
)
var listFunc = map[string]func(*metav1.ListPolicies) ([]string, error){
@@ -19,7 +16,7 @@ var listFunc = map[string]func(*metav1.ListPolicies) ([]string, error){
"exceptions": listExceptions,
}
var listFormatFunc = map[string]func(string, []string){
var listFormatFunc = map[string]func(*metav1.ListPolicies, []string){
"pretty-print": prettyPrintListFormat,
"json": jsonListFormat,
}
@@ -32,18 +29,14 @@ func ListSupportActions() []string {
return commands
}
func (ks *Kubescape) List(listPolicies *metav1.ListPolicies) error {
if policyListerFunc, ok := listFunc[listPolicies.Target]; ok {
policies, err := policyListerFunc(listPolicies)
if f, ok := listFunc[listPolicies.Target]; ok {
policies, err := f(listPolicies)
if err != nil {
return err
}
sort.Strings(policies)
if listFormatFunction, ok := listFormatFunc[listPolicies.Format]; ok {
listFormatFunction(listPolicies.Target, policies)
} else {
return fmt.Errorf("Invalid format \"%s\", Supported formats: 'pretty-print'/'json' ", listPolicies.Format)
}
listFormatFunc[listPolicies.Format](listPolicies, policies)
return nil
}
@@ -52,16 +45,20 @@ func (ks *Kubescape) List(listPolicies *metav1.ListPolicies) error {
func listFrameworks(listPolicies *metav1.ListPolicies) ([]string, error) {
tenant := getTenantConfig(&listPolicies.Credentials, "", "", getKubernetesApi()) // change k8sinterface
policyGetter := getPolicyGetter(nil, tenant.GetTenantEmail(), true, nil)
g := getPolicyGetter(nil, tenant.GetTenantEmail(), true, nil)
return listFrameworksNames(policyGetter), nil
return listFrameworksNames(g), nil
}
func listControls(listPolicies *metav1.ListPolicies) ([]string, error) {
tenant := getTenantConfig(&listPolicies.Credentials, "", "", getKubernetesApi()) // change k8sinterface
policyGetter := getPolicyGetter(nil, tenant.GetTenantEmail(), false, nil)
return policyGetter.ListControls()
g := getPolicyGetter(nil, tenant.GetTenantEmail(), false, nil)
l := getter.ListName
if listPolicies.ListIDs {
l = getter.ListID
}
return g.ListControls(l)
}
func listExceptions(listPolicies *metav1.ListPolicies) ([]string, error) {
@@ -80,73 +77,12 @@ func listExceptions(listPolicies *metav1.ListPolicies) ([]string, error) {
return exceptionsNames, nil
}
func prettyPrintListFormat(targetPolicy string, policies []string) {
if targetPolicy == "controls" {
prettyPrintControls(policies)
return
}
header := fmt.Sprintf("Supported %s", targetPolicy)
policyTable := tablewriter.NewWriter(printer.GetWriter(""))
policyTable.SetAutoWrapText(true)
policyTable.SetHeader([]string{header})
policyTable.SetHeaderLine(true)
policyTable.SetRowLine(true)
data := v2.Matrix{}
controlRows := generatePolicyRows(policies)
data = append(data, controlRows...)
policyTable.SetAlignment(tablewriter.ALIGN_CENTER)
policyTable.AppendBulk(data)
policyTable.Render()
func prettyPrintListFormat(listPolicies *metav1.ListPolicies, policies []string) {
sep := "\n * "
fmt.Printf("Supported %s:%s%s\n", listPolicies.Target, sep, strings.Join(policies, sep))
}
func jsonListFormat(targetPolicy string, policies []string) {
func jsonListFormat(listPolicies *metav1.ListPolicies, policies []string) {
j, _ := json.MarshalIndent(policies, "", " ")
fmt.Printf("%s\n", j)
}
func prettyPrintControls(policies []string) {
controlsTable := tablewriter.NewWriter(printer.GetWriter(""))
controlsTable.SetAutoWrapText(true)
controlsTable.SetHeader([]string{"Control ID", "Control Name", "Docs"})
controlsTable.SetHeaderLine(true)
controlsTable.SetRowLine(true)
data := v2.Matrix{}
controlRows := generateControlRows(policies)
data = append(data, controlRows...)
controlsTable.AppendBulk(data)
controlsTable.Render()
}
func generateControlRows(policies []string) [][]string {
rows := [][]string{}
for _, control := range policies {
idAndControl := strings.Split(control, "|")
id, control := idAndControl[0], idAndControl[1]
docs := cautils.GetControlLink(id)
currentRow := []string{id, control, docs}
rows = append(rows, currentRow)
}
return rows
}
func generatePolicyRows(policies []string) [][]string {
rows := [][]string{}
for _, policy := range policies {
currentRow := []string{policy}
rows = append(rows, currentRow)
}
return rows
}

View File

@@ -0,0 +1,8 @@
package v1
type FixInfo struct {
ReportFile string // path to report file (mandatory)
NoConfirm bool // if true, no confirmation will be given to the user before applying the fix
SkipUserValues bool // if true, user values will not be changed
DryRun bool // if true, no changes will be applied
}

View File

@@ -4,6 +4,7 @@ import "github.com/kubescape/kubescape/v2/core/cautils"
type ListPolicies struct {
Target string
ListIDs bool
Format string
Credentials cautils.Credentials
}

View File

@@ -25,4 +25,7 @@ type IKubescape interface {
// delete
DeleteExceptions(deleteexceptions *metav1.DeleteExceptions) error
// fix
Fix(fixInfo *metav1.FixInfo) error
}

View File

@@ -8,7 +8,7 @@ import (
"github.com/kubescape/opa-utils/reporthandling"
)
var mockControl_0006 = `{"guid":"","name":"HostPath mount","attributes":{"armoBuiltin":true},"id":"C-0048","controlID":"C-0048","creationTime":"","description":"Mounting host directory to the container can be abused to get access to sensitive data and gain persistence on the host machine.","remediation":"Refrain from using host path mount.","rules":[{"guid":"","name":"alert-rw-hostpath","attributes":{"armoBuiltin":true,"m$K8sThreatMatrix":"Persistence::Writable hostPath mount, Lateral Movement::Writable volume mounts on the host"},"creationTime":"","rule":"package armo_builtins\n\n# input: pod\n# apiversion: v1\n# does: returns hostPath volumes\n\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n volumes := pod.spec.volumes\n volume := volumes[_]\n volume.hostPath\n\tcontainer := pod.spec.containers[i]\n\tvolumeMount := container.volumeMounts[k]\n\tvolumeMount.name == volume.name\n\tbegginingOfPath := \"spec.\"\n\tresult := isRWMount(volumeMount, begginingOfPath, i, k)\n\n podname := pod.metadata.name\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"pod: %v has: %v as hostPath volume\", [podname, volume.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [result],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n#handles majority of workload resources\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n volumes := wl.spec.template.spec.volumes\n volume := volumes[_]\n volume.hostPath\n\tcontainer := wl.spec.template.spec.containers[i]\n\tvolumeMount := container.volumeMounts[k]\n\tvolumeMount.name == volume.name\n\tbegginingOfPath := \"spec.template.spec.\"\n\tresult := isRWMount(volumeMount, begginingOfPath, i, k)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has: %v as hostPath volume\", [wl.kind, wl.metadata.name, volume.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [result],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t\n\t}\n}\n\n#handles CronJobs\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n volumes := wl.spec.jobTemplate.spec.template.spec.volumes\n volume := volumes[_]\n volume.hostPath\n\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tvolumeMount := container.volumeMounts[k]\n\tvolumeMount.name == volume.name\n\tbegginingOfPath := \"spec.jobTemplate.spec.template.spec.\"\n\tresult := isRWMount(volumeMount, begginingOfPath, i, k)\n\n\tmsga := {\n\t\"alertMessage\": sprintf(\"%v: %v has: %v as hostPath volume\", [wl.kind, wl.metadata.name, volume.name]),\n\t\"packagename\": \"armo_builtins\",\n\t\"alertScore\": 7,\n\t\"failedPaths\": [result],\n\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nisRWMount(mount, begginingOfPath, i, k) = path {\n not mount.readOnly == true\n not mount.readOnly == false\n path = \"\"\n}\nisRWMount(mount, begginingOfPath, i, k) = path {\n mount.readOnly == false\n path = sprintf(\"%vcontainers[%v].volumeMounts[%v].readOnly\", [begginingOfPath, format_int(i, 10), format_int(k, 10)])\n} ","resourceEnumerator":"","ruleLanguage":"Rego","match":[{"apiGroups":["*"],"apiVersions":["*"],"resources":["Deployment","ReplicaSet","DaemonSet","StatefulSet","Job","CronJob","Pod"]}],"ruleDependencies":[{"packageName":"cautils"},{"packageName":"kubernetes.api.client"}],"configInputs":null,"controlConfigInputs":null,"description":"determines if any workload contains a hostPath volume with rw permissions","remediation":"Set the readOnly field of the mount to true","ruleQuery":""}],"rulesIDs":[""],"baseScore":6}`
var mockControl_0006 = `{"guid":"","name":"Allowed hostPath","attributes":{"armoBuiltin":true},"id":"C-0006","controlID":"C-0006","creationTime":"","description":"Mounting host directory to the container can be abused to get access to sensitive data and gain persistence on the host machine.","remediation":"Refrain from using host path mount.","rules":[{"guid":"","name":"alert-rw-hostpath","attributes":{"armoBuiltin":true,"m$K8sThreatMatrix":"Persistence::Writable hostPath mount, Lateral Movement::Writable volume mounts on the host"},"creationTime":"","rule":"package armo_builtins\n\n# input: pod\n# apiversion: v1\n# does: returns hostPath volumes\n\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n volumes := pod.spec.volumes\n volume := volumes[_]\n volume.hostPath\n\tcontainer := pod.spec.containers[i]\n\tvolumeMount := container.volumeMounts[k]\n\tvolumeMount.name == volume.name\n\tbegginingOfPath := \"spec.\"\n\tresult := isRWMount(volumeMount, begginingOfPath, i, k)\n\n podname := pod.metadata.name\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"pod: %v has: %v as hostPath volume\", [podname, volume.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [result],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n#handles majority of workload resources\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n volumes := wl.spec.template.spec.volumes\n volume := volumes[_]\n volume.hostPath\n\tcontainer := wl.spec.template.spec.containers[i]\n\tvolumeMount := container.volumeMounts[k]\n\tvolumeMount.name == volume.name\n\tbegginingOfPath := \"spec.template.spec.\"\n\tresult := isRWMount(volumeMount, begginingOfPath, i, k)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has: %v as hostPath volume\", [wl.kind, wl.metadata.name, volume.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [result],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t\n\t}\n}\n\n#handles CronJobs\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n volumes := wl.spec.jobTemplate.spec.template.spec.volumes\n volume := volumes[_]\n volume.hostPath\n\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tvolumeMount := container.volumeMounts[k]\n\tvolumeMount.name == volume.name\n\tbegginingOfPath := \"spec.jobTemplate.spec.template.spec.\"\n\tresult := isRWMount(volumeMount, begginingOfPath, i, k)\n\n\tmsga := {\n\t\"alertMessage\": sprintf(\"%v: %v has: %v as hostPath volume\", [wl.kind, wl.metadata.name, volume.name]),\n\t\"packagename\": \"armo_builtins\",\n\t\"alertScore\": 7,\n\t\"failedPaths\": [result],\n\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nisRWMount(mount, begginingOfPath, i, k) = path {\n not mount.readOnly == true\n not mount.readOnly == false\n path = \"\"\n}\nisRWMount(mount, begginingOfPath, i, k) = path {\n mount.readOnly == false\n path = sprintf(\"%vcontainers[%v].volumeMounts[%v].readOnly\", [begginingOfPath, format_int(i, 10), format_int(k, 10)])\n} ","resourceEnumerator":"","ruleLanguage":"Rego","match":[{"apiGroups":["*"],"apiVersions":["*"],"resources":["Deployment","ReplicaSet","DaemonSet","StatefulSet","Job","CronJob","Pod"]}],"ruleDependencies":[{"packageName":"cautils"},{"packageName":"kubernetes.api.client"}],"configInputs":null,"controlConfigInputs":null,"description":"determines if any workload contains a hostPath volume with rw permissions","remediation":"Set the readOnly field of the mount to true","ruleQuery":""}],"rulesIDs":[""],"baseScore":6}`
var mockControl_0044 = `{"guid":"","name":"Container hostPort","attributes":{"armoBuiltin":true},"id":"C-0044","controlID":"C-0044","creationTime":"","description":"Configuring hostPort limits you to a particular port, and if any two workloads that specify the same HostPort they cannot be deployed to the same node. Therefore, if the number of replica of such workload is higher than the number of nodes, the deployment will fail.","remediation":"Avoid usage of hostPort unless it is absolutely necessary. Use NodePort / ClusterIP instead.","rules":[{"guid":"","name":"container-hostPort","attributes":{"armoBuiltin":true},"creationTime":"","rule":"package armo_builtins\n\n\n# Fails if pod has container with hostPort\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n container := pod.spec.containers[i]\n\tbegginingOfPath := \"spec.\"\n\tpath := isHostPort(container, i, begginingOfPath)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v has Host-port\", [ container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if workload has container with hostPort\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\tbegginingOfPath := \"spec.template.spec.\"\n path := isHostPort(container, i, begginingOfPath)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v has Host-port\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if cronjob has container with hostPort\ndeny[msga] {\n \twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tbegginingOfPath := \"spec.jobTemplate.spec.template.spec.\"\n path := isHostPort(container, i, begginingOfPath)\n msga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v has Host-port\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n\nisHostPort(container, i, begginingOfPath) = path {\n\tpath = [sprintf(\"%vcontainers[%v].ports[%v].hostPort\", [begginingOfPath, format_int(i, 10), format_int(j, 10)]) | port = container.ports[j]; port.hostPort]\n\tcount(path) > 0\n}\n","resourceEnumerator":"","ruleLanguage":"Rego","match":[{"apiGroups":["*"],"apiVersions":["*"],"resources":["Deployment","ReplicaSet","DaemonSet","StatefulSet","Job","Pod","CronJob"]}],"ruleDependencies":[],"configInputs":null,"controlConfigInputs":null,"description":"fails if container has hostPort","remediation":"Make sure you do not configure hostPort for the container, if necessary use NodePort / ClusterIP","ruleQuery":"armo_builtins"}],"rulesIDs":[""],"baseScore":4}`
@@ -31,7 +31,7 @@ func MockFramework_0013() *reporthandling.Framework {
return fw
}
// MockFramework_0006_0013 mock control 0013 and control 0006 - "Non-root containers" and "HostPath mount"
// MockFramework_0006_0013 mock control 0013 and control 0006 - "Non-root containers" and "Allowed hostPath"
func MockFramework_0006_0013() *reporthandling.Framework {
fw := &reporthandling.Framework{
PortalBase: armotypes.PortalBase{

View File

@@ -0,0 +1,63 @@
package fixhandler
import (
"github.com/armosec/armoapi-go/armotypes"
metav1 "github.com/kubescape/kubescape/v2/core/meta/datastructures/v1"
"github.com/kubescape/opa-utils/reporthandling"
reporthandlingv2 "github.com/kubescape/opa-utils/reporthandling/v2"
"gopkg.in/yaml.v3"
)
// FixHandler is a struct that holds the information of the report to be fixed
type FixHandler struct {
fixInfo *metav1.FixInfo
reportObj *reporthandlingv2.PostureReport
localBasePath string
}
// ResourceFixInfo is a struct that holds the information about the resource that needs to be fixed
type ResourceFixInfo struct {
YamlExpressions map[string]*armotypes.FixPath
Resource *reporthandling.Resource
FilePath string
DocumentIndex int
}
// NodeInfo holds extra information about the node
type nodeInfo struct {
node *yaml.Node
parent *yaml.Node
// position of the node among siblings
index int
}
// FixInfoMetadata holds the arguments "getFixInfo" function needs to pass to the
// functions it uses
type fixInfoMetadata struct {
originalList *[]nodeInfo
fixedList *[]nodeInfo
originalListTracker int
fixedListTracker int
contentToAdd *[]contentToAdd
linesToRemove *[]linesToRemove
}
// ContentToAdd holds the information about where to insert the new changes in the existing yaml file
type contentToAdd struct {
// Line where the fix should be applied to
line int
// Content is a string representation of the YAML node that describes a suggested fix
content string
}
// LinesToRemove holds the line numbers to remove from the existing yaml file
type linesToRemove struct {
startLine int
endLine int
}
type fileFixInfo struct {
contentsToAdd *[]contentToAdd
linesToRemove *[]linesToRemove
}

View File

@@ -0,0 +1,346 @@
package fixhandler
import (
"encoding/json"
"fmt"
"io/ioutil"
"os"
"path"
"strconv"
"strings"
"github.com/armosec/armoapi-go/armotypes"
metav1 "github.com/kubescape/kubescape/v2/core/meta/datastructures/v1"
logger "github.com/kubescape/go-logger"
"github.com/kubescape/opa-utils/objectsenvelopes"
"github.com/kubescape/opa-utils/objectsenvelopes/localworkload"
"github.com/kubescape/opa-utils/reporthandling"
"github.com/kubescape/opa-utils/reporthandling/results/v1/resourcesresults"
reporthandlingv2 "github.com/kubescape/opa-utils/reporthandling/v2"
"github.com/mikefarah/yq/v4/pkg/yqlib"
"gopkg.in/op/go-logging.v1"
)
const UserValuePrefix = "YOUR_"
func NewFixHandler(fixInfo *metav1.FixInfo) (*FixHandler, error) {
jsonFile, err := os.Open(fixInfo.ReportFile)
if err != nil {
return nil, err
}
defer jsonFile.Close()
byteValue, _ := ioutil.ReadAll(jsonFile)
var reportObj reporthandlingv2.PostureReport
if err = json.Unmarshal(byteValue, &reportObj); err != nil {
return nil, err
}
if err = isSupportedScanningTarget(&reportObj); err != nil {
return nil, err
}
localPath := getLocalPath(&reportObj)
if _, err = os.Stat(localPath); err != nil {
return nil, err
}
backendLoggerLeveled := logging.AddModuleLevel(logging.NewLogBackend(logger.L().GetWriter(), "", 0))
backendLoggerLeveled.SetLevel(logging.ERROR, "")
yqlib.GetLogger().SetBackend(backendLoggerLeveled)
return &FixHandler{
fixInfo: fixInfo,
reportObj: &reportObj,
localBasePath: localPath,
}, nil
}
func isSupportedScanningTarget(report *reporthandlingv2.PostureReport) error {
if report.Metadata.ScanMetadata.ScanningTarget == reporthandlingv2.GitLocal || report.Metadata.ScanMetadata.ScanningTarget == reporthandlingv2.Directory {
return nil
}
return fmt.Errorf("unsupported scanning target. Only local git and directory scanning targets are supported")
}
func getLocalPath(report *reporthandlingv2.PostureReport) string {
if report.Metadata.ScanMetadata.ScanningTarget == reporthandlingv2.GitLocal {
return report.Metadata.ContextMetadata.RepoContextMetadata.LocalRootPath
}
if report.Metadata.ScanMetadata.ScanningTarget == reporthandlingv2.Directory {
return report.Metadata.ContextMetadata.DirectoryContextMetadata.BasePath
}
return ""
}
func (h *FixHandler) buildResourcesMap() map[string]*reporthandling.Resource {
resourceIdToRawResource := make(map[string]*reporthandling.Resource)
for i := range h.reportObj.Resources {
resourceIdToRawResource[h.reportObj.Resources[i].GetID()] = &h.reportObj.Resources[i]
}
for i := range h.reportObj.Results {
if h.reportObj.Results[i].RawResource == nil {
continue
}
resourceIdToRawResource[h.reportObj.Results[i].RawResource.GetID()] = h.reportObj.Results[i].RawResource
}
return resourceIdToRawResource
}
func (h *FixHandler) getPathFromRawResource(obj map[string]interface{}) string {
if localworkload.IsTypeLocalWorkload(obj) {
localwork := localworkload.NewLocalWorkload(obj)
return localwork.GetPath()
} else if objectsenvelopes.IsTypeRegoResponseVector(obj) {
regoResponseVectorObject := objectsenvelopes.NewRegoResponseVectorObject(obj)
relatedObjects := regoResponseVectorObject.GetRelatedObjects()
for _, relatedObject := range relatedObjects {
if localworkload.IsTypeLocalWorkload(relatedObject.GetObject()) {
return relatedObject.(*localworkload.LocalWorkload).GetPath()
}
}
}
return ""
}
func (h *FixHandler) PrepareResourcesToFix() []ResourceFixInfo {
resourceIdToResource := h.buildResourcesMap()
resourcesToFix := make([]ResourceFixInfo, 0)
for _, result := range h.reportObj.Results {
if !result.GetStatus(nil).IsFailed() {
continue
}
resourceID := result.ResourceID
resourceObj := resourceIdToResource[resourceID]
resourcePath := h.getPathFromRawResource(resourceObj.GetObject())
if resourcePath == "" {
continue
}
if resourceObj.Source == nil || resourceObj.Source.FileType != reporthandling.SourceTypeYaml {
continue
}
relativePath, documentIndex, err := h.getFilePathAndIndex(resourcePath)
if err != nil {
logger.L().Error("Skipping invalid resource path: " + resourcePath)
continue
}
absolutePath := path.Join(h.localBasePath, relativePath)
if _, err := os.Stat(absolutePath); err != nil {
logger.L().Error("Skipping missing file: " + absolutePath)
continue
}
rfi := ResourceFixInfo{
FilePath: absolutePath,
Resource: resourceObj,
YamlExpressions: make(map[string]*armotypes.FixPath, 0),
DocumentIndex: documentIndex,
}
for i := range result.AssociatedControls {
if result.AssociatedControls[i].GetStatus(nil).IsFailed() {
rfi.addYamlExpressionsFromResourceAssociatedControl(documentIndex, &result.AssociatedControls[i], h.fixInfo.SkipUserValues)
}
}
if len(rfi.YamlExpressions) > 0 {
resourcesToFix = append(resourcesToFix, rfi)
}
}
return resourcesToFix
}
func (h *FixHandler) PrintExpectedChanges(resourcesToFix []ResourceFixInfo) {
var sb strings.Builder
sb.WriteString("The following changes will be applied:\n")
for _, resourceFixInfo := range resourcesToFix {
sb.WriteString(fmt.Sprintf("File: %s\n", resourceFixInfo.FilePath))
sb.WriteString(fmt.Sprintf("Resource: %s\n", resourceFixInfo.Resource.GetName()))
sb.WriteString(fmt.Sprintf("Kind: %s\n", resourceFixInfo.Resource.GetKind()))
sb.WriteString("Changes:\n")
i := 1
for _, fixPath := range resourceFixInfo.YamlExpressions {
sb.WriteString(fmt.Sprintf("\t%d) %s = %s\n", i, (*fixPath).Path, (*fixPath).Value))
i++
}
sb.WriteString("\n------\n")
}
logger.L().Info(sb.String())
}
func (h *FixHandler) ApplyChanges(resourcesToFix []ResourceFixInfo) (int, []error) {
updatedFiles := make(map[string]bool)
errors := make([]error, 0)
fileYamlExpressions := h.getFileYamlExpressions(resourcesToFix)
for filepath, yamlExpression := range fileYamlExpressions {
fileAsString, err := getFileString(filepath)
if err != nil {
errors = append(errors, err)
continue
}
fixedYamlString, err := h.ApplyFixToContent(fileAsString, yamlExpression)
if err != nil {
errors = append(errors, fmt.Errorf("Failed to fix file %s: %w ", filepath, err))
continue
} else {
updatedFiles[filepath] = true
}
err = writeFixesToFile(filepath, fixedYamlString)
if err != nil {
logger.L().Error(fmt.Sprintf("Failed to write fixes to file %s, %v", filepath, err.Error()))
errors = append(errors, err)
}
}
return len(updatedFiles), errors
}
func (h *FixHandler) getFilePathAndIndex(filePathWithIndex string) (filePath string, documentIndex int, err error) {
splittedPath := strings.Split(filePathWithIndex, ":")
if len(splittedPath) <= 1 {
return "", 0, fmt.Errorf("expected to find ':' in file path")
}
filePath = splittedPath[0]
if documentIndex, err := strconv.Atoi(splittedPath[1]); err != nil {
return "", 0, err
} else {
return filePath, documentIndex, nil
}
}
func (h *FixHandler) ApplyFixToContent(yamlAsString, yamlExpression string) (fixedString string, err error) {
yamlLines := strings.Split(yamlAsString, "\n")
originalRootNodes, err := decodeDocumentRoots(yamlAsString)
if err != nil {
return "", err
}
fixedRootNodes, err := getFixedNodes(yamlAsString, yamlExpression)
if err != nil {
return "", err
}
fileFixInfo := getFixInfo(originalRootNodes, fixedRootNodes)
fixedYamlLines := getFixedYamlLines(yamlLines, fileFixInfo)
fixedString = getStringFromSlice(fixedYamlLines)
return fixedString, nil
}
func (h *FixHandler) getFileYamlExpressions(resourcesToFix []ResourceFixInfo) map[string]string {
fileYamlExpressions := make(map[string]string, 0)
for _, resourceToFix := range resourcesToFix {
singleExpression := reduceYamlExpressions(&resourceToFix)
resourceFilePath := resourceToFix.FilePath
if _, pathExistsInMap := fileYamlExpressions[resourceFilePath]; !pathExistsInMap {
fileYamlExpressions[resourceFilePath] = singleExpression
} else {
fileYamlExpressions[resourceFilePath] = joinStrings(fileYamlExpressions[resourceFilePath], " | ", singleExpression)
}
}
return fileYamlExpressions
}
func (rfi *ResourceFixInfo) addYamlExpressionsFromResourceAssociatedControl(documentIndex int, ac *resourcesresults.ResourceAssociatedControl, skipUserValues bool) {
for _, rule := range ac.ResourceAssociatedRules {
if !rule.GetStatus(nil).IsFailed() {
continue
}
for _, rulePaths := range rule.Paths {
if rulePaths.FixPath.Path == "" {
continue
}
if strings.HasPrefix(rulePaths.FixPath.Value, UserValuePrefix) && skipUserValues {
continue
}
yamlExpression := fixPathToValidYamlExpression(rulePaths.FixPath.Path, rulePaths.FixPath.Value, documentIndex)
rfi.YamlExpressions[yamlExpression] = &rulePaths.FixPath
}
}
}
// reduceYamlExpressions reduces the number of yaml expressions to a single one
func reduceYamlExpressions(resource *ResourceFixInfo) string {
expressions := make([]string, 0, len(resource.YamlExpressions))
for expr := range resource.YamlExpressions {
expressions = append(expressions, expr)
}
return strings.Join(expressions, " | ")
}
func fixPathToValidYamlExpression(fixPath, value string, documentIndexInYaml int) string {
isStringValue := true
if _, err := strconv.ParseBool(value); err == nil {
isStringValue = false
} else if _, err := strconv.ParseFloat(value, 64); err == nil {
isStringValue = false
} else if _, err := strconv.Atoi(value); err == nil {
isStringValue = false
}
// Strings should be quoted
if isStringValue {
value = fmt.Sprintf("\"%s\"", value)
}
// select document index and add a dot for the root node
return fmt.Sprintf("select(di==%d).%s |= %s", documentIndexInYaml, fixPath, value)
}
func joinStrings(inputStrings ...string) string {
return strings.Join(inputStrings, "")
}
func getFileString(filepath string) (string, error) {
bytes, err := ioutil.ReadFile(filepath)
if err != nil {
return "", fmt.Errorf("Error reading file %s", filepath)
}
return string(bytes), nil
}
func writeFixesToFile(filepath, content string) error {
err := ioutil.WriteFile(filepath, []byte(content), 0644)
if err != nil {
return fmt.Errorf("Error writing fixes to file: %w", err)
}
return nil
}

View File

@@ -0,0 +1,248 @@
package fixhandler
import (
"os"
"path/filepath"
"testing"
logger "github.com/kubescape/go-logger"
metav1 "github.com/kubescape/kubescape/v2/core/meta/datastructures/v1"
reporthandlingv2 "github.com/kubescape/opa-utils/reporthandling/v2"
"github.com/mikefarah/yq/v4/pkg/yqlib"
"github.com/stretchr/testify/assert"
"gopkg.in/op/go-logging.v1"
)
type indentationTestCase struct {
inputFile string
yamlExpression string
expectedFile string
}
func NewFixHandlerMock() (*FixHandler, error) {
backendLoggerLeveled := logging.AddModuleLevel(logging.NewLogBackend(logger.L().GetWriter(), "", 0))
backendLoggerLeveled.SetLevel(logging.ERROR, "")
yqlib.GetLogger().SetBackend(backendLoggerLeveled)
return &FixHandler{
fixInfo: &metav1.FixInfo{},
reportObj: &reporthandlingv2.PostureReport{},
localBasePath: "",
}, nil
}
func getTestdataPath() string {
currentDir, _ := os.Getwd()
return filepath.Join(currentDir, "testdata")
}
func getTestCases() []indentationTestCase {
indentationTestCases := []indentationTestCase{
// Insertion Scenarios
{
"inserts/tc-01-00-input-mapping-insert-mapping.yaml",
"select(di==0).spec.containers[0].securityContext.allowPrivilegeEscalation |= false",
"inserts/tc-01-01-expected.yaml",
},
{
"inserts/tc-02-00-input-mapping-insert-mapping-with-list.yaml",
"select(di==0).spec.containers[0].securityContext.capabilities.drop += [\"NET_RAW\"]",
"inserts/tc-02-01-expected.yaml",
},
{
"inserts/tc-03-00-input-list-append-scalar.yaml",
"select(di==0).spec.containers[0].securityContext.capabilities.drop += [\"SYS_ADM\"]",
"inserts/tc-03-01-expected.yaml",
},
{
"inserts/tc-04-00-input-multiple-inserts.yaml",
`select(di==0).spec.template.spec.securityContext.allowPrivilegeEscalation |= false |
select(di==0).spec.template.spec.containers[0].securityContext.capabilities.drop += ["NET_RAW"] |
select(di==0).spec.template.spec.containers[0].securityContext.seccompProfile.type |= "RuntimeDefault" |
select(di==0).spec.template.spec.containers[0].securityContext.allowPrivilegeEscalation |= false |
select(di==0).spec.template.spec.containers[0].securityContext.readOnlyRootFilesystem |= true`,
"inserts/tc-04-01-expected.yaml",
},
{
"inserts/tc-05-00-input-comment-blank-line-single-insert.yaml",
"select(di==0).spec.containers[0].securityContext.allowPrivilegeEscalation |= false",
"inserts/tc-05-01-expected.yaml",
},
{
"inserts/tc-06-00-input-list-append-scalar-oneline.yaml",
"select(di==0).spec.containers[0].securityContext.capabilities.drop += [\"SYS_ADM\"]",
"inserts/tc-06-01-expected.yaml",
},
{
"inserts/tc-07-00-input-multiple-documents.yaml",
`select(di==0).spec.containers[0].securityContext.allowPrivilegeEscalation |= false |
select(di==1).spec.containers[0].securityContext.allowPrivilegeEscalation |= false`,
"inserts/tc-07-01-expected.yaml",
},
{
"inserts/tc-08-00-input-mapping-insert-mapping-indented.yaml",
"select(di==0).spec.containers[0].securityContext.capabilities.drop += [\"NET_RAW\"]",
"inserts/tc-08-01-expected.yaml",
},
{
"inserts/tc-09-00-input-list-insert-new-mapping-indented.yaml",
`select(di==0).spec.containers += {"name": "redis", "image": "redis"}`,
"inserts/tc-09-01-expected.yaml",
},
{
"inserts/tc-10-00-input-list-insert-new-mapping.yaml",
`select(di==0).spec.containers += {"name": "redis", "image": "redis"}`,
"inserts/tc-10-01-expected.yaml",
},
// Removal Scenarios
{
"removals/tc-01-00-input.yaml",
"del(select(di==0).spec.containers[0].securityContext)",
"removals/tc-01-01-expected.yaml",
},
{
"removals/tc-02-00-input.yaml",
"del(select(di==0).spec.containers[1])",
"removals/tc-02-01-expected.yaml",
},
{
"removals/tc-03-00-input.yaml",
"del(select(di==0).spec.containers[0].securityContext.capabilities.drop[1])",
"removals/tc-03-01-expected.yaml",
},
{
"removes/tc-04-00-input.yaml",
`del(select(di==0).spec.containers[0].securityContext) |
del(select(di==1).spec.containers[1])`,
"removes/tc-04-01-expected.yaml",
},
// Replace Scenarios
{
"replaces/tc-01-00-input.yaml",
"select(di==0).spec.containers[0].securityContext.runAsRoot |= false",
"replaces/tc-01-01-expected.yaml",
},
{
"replaces/tc-02-00-input.yaml",
`select(di==0).spec.containers[0].securityContext.capabilities.drop[0] |= "SYS_ADM" |
select(di==0).spec.containers[0].securityContext.capabilities.add[0] |= "NET_RAW"`,
"replaces/tc-02-01-expected.yaml",
},
// Hybrid Scenarios
{
"hybrids/tc-01-00-input.yaml",
`del(select(di==0).spec.containers[0].securityContext) |
select(di==0).spec.securityContext.runAsRoot |= false`,
"hybrids/tc-01-01-expected.yaml",
},
{
"hybrids/tc-02-00-input-indented-list.yaml",
`del(select(di==0).spec.containers[0].securityContext) |
select(di==0).spec.securityContext.runAsRoot |= false`,
"hybrids/tc-02-01-expected.yaml",
},
{
"hybrids/tc-03-00-input-comments.yaml",
`del(select(di==0).spec.containers[0].securityContext) |
select(di==0).spec.securityContext.runAsRoot |= false`,
"hybrids/tc-03-01-expected.yaml",
},
{
"hybrids/tc-04-00-input-separated-keys.yaml",
`del(select(di==0).spec.containers[0].securityContext) |
select(di==0).spec.securityContext.runAsRoot |= false`,
"hybrids/tc-04-01-expected.yaml",
},
}
return indentationTestCases
}
func TestApplyFixKeepsFormatting(t *testing.T) {
testCases := getTestCases()
for _, tc := range testCases {
t.Run(tc.inputFile, func(t *testing.T) {
getTestDataPath := func(filename string) string {
currentDir, _ := os.Getwd()
currentFile := "testdata/" + filename
return filepath.Join(currentDir, currentFile)
}
input, _ := os.ReadFile(getTestDataPath(tc.inputFile))
wantRaw, _ := os.ReadFile(getTestDataPath(tc.expectedFile))
want := string(wantRaw)
expression := tc.yamlExpression
h, _ := NewFixHandlerMock()
got, _ := h.ApplyFixToContent(string(input), expression)
assert.Equalf(
t, want, got,
"Contents of the fixed file don't match the expectation.\n"+
"Input file: %s\n\n"+
"Got: <%s>\n\n"+
"Want: <%s>",
tc.inputFile, got, want,
)
},
)
}
}
func Test_fixPathToValidYamlExpression(t *testing.T) {
type args struct {
fixPath string
value string
documentIndexInYaml int
}
tests := []struct {
name string
args args
want string
}{
{
name: "fix path with boolean value",
args: args{
fixPath: "spec.template.spec.containers[0].securityContext.privileged",
value: "true",
documentIndexInYaml: 2,
},
want: "select(di==2).spec.template.spec.containers[0].securityContext.privileged |= true",
},
{
name: "fix path with string value",
args: args{
fixPath: "metadata.namespace",
value: "YOUR_NAMESPACE",
documentIndexInYaml: 0,
},
want: "select(di==0).metadata.namespace |= \"YOUR_NAMESPACE\"",
},
{
name: "fix path with number",
args: args{
fixPath: "xxx.yyy",
value: "123",
documentIndexInYaml: 0,
},
want: "select(di==0).xxx.yyy |= 123",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := fixPathToValidYamlExpression(tt.args.fixPath, tt.args.value, tt.args.documentIndexInYaml); got != tt.want {
t.Errorf("fixPathToValidYamlExpression() = %v, want %v", got, tt.want)
}
})
}
}

View File

@@ -0,0 +1,19 @@
# Fix to Apply:
# REMOVE:
# "del(select(di==0).spec.containers[0].securityContext)"
# INSERT:
# select(di==0).spec.securityContext.runAsRoot: false
apiVersion: v1
kind: Pod
metadata:
name: insert_to_mapping_node_1
spec:
containers:
- name: nginx_container
image: nginx
securityContext:
runAsRoot: true

View File

@@ -0,0 +1,19 @@
# Fix to Apply:
# REMOVE:
# "del(select(di==0).spec.containers[0].securityContext)"
# INSERT:
# select(di==0).spec.securityContext.runAsRoot: false
apiVersion: v1
kind: Pod
metadata:
name: insert_to_mapping_node_1
spec:
containers:
- name: nginx_container
image: nginx
securityContext:
runAsRoot: false

View File

@@ -0,0 +1,19 @@
# Fix to Apply:
# REMOVE:
# "del(select(di==0).spec.containers[0].securityContext)"
# INSERT:
# select(di==0).spec.securityContext.runAsRoot: false
apiVersion: v1
kind: Pod
metadata:
name: insert_to_mapping_node_1
spec:
containers:
- name: nginx_container
image: nginx
securityContext:
runAsRoot: true

View File

@@ -0,0 +1,19 @@
# Fix to Apply:
# REMOVE:
# "del(select(di==0).spec.containers[0].securityContext)"
# INSERT:
# select(di==0).spec.securityContext.runAsRoot: false
apiVersion: v1
kind: Pod
metadata:
name: insert_to_mapping_node_1
spec:
containers:
- name: nginx_container
image: nginx
securityContext:
runAsRoot: false

View File

@@ -0,0 +1,21 @@
# Fix to Apply:
# REMOVE:
# "del(select(di==0).spec.containers[0].securityContext)"
# INSERT:
# select(di==0).spec.securityContext.runAsRoot: false
apiVersion: v1
kind: Pod
metadata:
name: insert_to_mapping_node_1
spec:
# These are the container comments
containers:
# These are the first containers comments
- name: nginx_container
image: nginx
securityContext:
runAsRoot: true

View File

@@ -0,0 +1,21 @@
# Fix to Apply:
# REMOVE:
# "del(select(di==0).spec.containers[0].securityContext)"
# INSERT:
# select(di==0).spec.securityContext.runAsRoot: false
apiVersion: v1
kind: Pod
metadata:
name: insert_to_mapping_node_1
spec:
# These are the container comments
containers:
# These are the first containers comments
- name: nginx_container
image: nginx
securityContext:
runAsRoot: false

View File

@@ -0,0 +1,21 @@
# Fix to Apply:
# REMOVE:
# "del(select(di==0).spec.containers[0].securityContext)"
# INSERT:
# select(di==0).spec.securityContext.runAsRoot: false
apiVersion: v1
kind: Pod
metadata:
name: insert_to_mapping_node_1
spec:
containers:
- name: nginx_container
image: nginx
securityContext:
runAsRoot: true

View File

@@ -0,0 +1,21 @@
# Fix to Apply:
# REMOVE:
# "del(select(di==0).spec.containers[0].securityContext)"
# INSERT:
# select(di==0).spec.securityContext.runAsRoot: false
apiVersion: v1
kind: Pod
metadata:
name: insert_to_mapping_node_1
spec:
containers:
- name: nginx_container
image: nginx
securityContext:
runAsRoot: false

View File

@@ -0,0 +1,12 @@
# Fix to Apply:
# "select(di==0).spec.containers[0].securityContext.allowPrivilegeEscalation |= false"
apiVersion: v1
kind: Pod
metadata:
name: insert_to_mapping_node_1
spec:
containers:
- name: nginx_container
image: nginx

View File

@@ -0,0 +1,14 @@
# Fix to Apply:
# "select(di==0).spec.containers[0].securityContext.allowPrivilegeEscalation |= false"
apiVersion: v1
kind: Pod
metadata:
name: insert_to_mapping_node_1
spec:
containers:
- name: nginx_container
image: nginx
securityContext:
allowPrivilegeEscalation: false

View File

@@ -0,0 +1,11 @@
# Fix to Apply:
# select(di==0).spec.containers[0].securityContext.capabilities.drop += ["NET_RAW"]
apiVersion: v1
kind: Pod
metadata:
name: insert_list
spec:
containers:
- name: nginx_container
image: nginx

View File

@@ -0,0 +1,15 @@
# Fix to Apply:
# select(di==0).spec.containers[0].securityContext.capabilities.drop += ["NET_RAW"]
apiVersion: v1
kind: Pod
metadata:
name: insert_list
spec:
containers:
- name: nginx_container
image: nginx
securityContext:
capabilities:
drop:
- NET_RAW

View File

@@ -0,0 +1,15 @@
# Fix to Apply:
# select(di==0).spec.containers[0].securityContext.capabilities.drop += ["SYS_ADM"]
apiVersion: v1
kind: Pod
metadata:
name: insert_list
spec:
containers:
- name: nginx_container
image: nginx
securityContext:
capabilities:
drop:
- NET_RAW

View File

@@ -0,0 +1,16 @@
# Fix to Apply:
# select(di==0).spec.containers[0].securityContext.capabilities.drop += ["SYS_ADM"]
apiVersion: v1
kind: Pod
metadata:
name: insert_list
spec:
containers:
- name: nginx_container
image: nginx
securityContext:
capabilities:
drop:
- NET_RAW
- SYS_ADM

View File

@@ -0,0 +1,47 @@
# Fixes to Apply:
# 1) select(di==0).spec.template.spec.securityContext.allowPrivilegeEscalation = false
# 2) select(di==0).spec.template.spec.containers[0].securityContext.capabilities.drop += ["NET_RAW"]
# 3) select(di==0).spec.template.spec.containers[0].securityContext.seccompProfile.type = RuntimeDefault
# 4) select(di==0).spec.template.spec.containers[0].securityContext.allowPrivilegeEscalation |= false
# 5) select(di==0).spec.template.spec.containers[0].securityContext.readOnlyRootFilesystem |= true
apiVersion: apps/v1
kind: Deployment
metadata:
name: multiple_inserts
spec:
selector:
matchLabels:
app: example_4
template:
metadata:
labels:
app: example_4
spec:
serviceAccountName: default
terminationGracePeriodSeconds: 5
containers:
- name: example_4
image: nginx
ports:
- containerPort: 3000
env:
- name: PORT
value: "3000"
resources:
requests:
cpu: 200m
memory: 180Mi
limits:
cpu: 300m
memory: 300Mi
readinessProbe:
initialDelaySeconds: 20
periodSeconds: 15
exec:
command: ["/bin/grpc_health_probe", "-addr=:3000"]
livenessProbe:
initialDelaySeconds: 20
periodSeconds: 15
exec:
command: ["/bin/grpc_health_probe", "-addr=:3000"]

View File

@@ -0,0 +1,57 @@
# Fixes to Apply:
# 1) select(di==0).spec.template.spec.securityContext.allowPrivilegeEscalation = false
# 2) select(di==0).spec.template.spec.containers[0].securityContext.capabilities.drop += ["NET_RAW"]
# 3) select(di==0).spec.template.spec.containers[0].securityContext.seccompProfile.type = RuntimeDefault
# 4) select(di==0).spec.template.spec.containers[0].securityContext.allowPrivilegeEscalation |= false
# 5) select(di==0).spec.template.spec.containers[0].securityContext.readOnlyRootFilesystem |= true
apiVersion: apps/v1
kind: Deployment
metadata:
name: multiple_inserts
spec:
selector:
matchLabels:
app: example_4
template:
metadata:
labels:
app: example_4
spec:
serviceAccountName: default
terminationGracePeriodSeconds: 5
containers:
- name: example_4
image: nginx
ports:
- containerPort: 3000
env:
- name: PORT
value: "3000"
resources:
requests:
cpu: 200m
memory: 180Mi
limits:
cpu: 300m
memory: 300Mi
readinessProbe:
initialDelaySeconds: 20
periodSeconds: 15
exec:
command: ["/bin/grpc_health_probe", "-addr=:3000"]
livenessProbe:
initialDelaySeconds: 20
periodSeconds: 15
exec:
command: ["/bin/grpc_health_probe", "-addr=:3000"]
securityContext:
capabilities:
drop:
- NET_RAW
seccompProfile:
type: RuntimeDefault
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
securityContext:
allowPrivilegeEscalation: false

View File

@@ -0,0 +1,16 @@
# Fix to Apply:
# "select(di==0).spec.containers[0].securityContext.allowPrivilegeEscalation |= false"
apiVersion: v1
kind: Pod
metadata:
name: insert_to_mapping_node_1
spec:
containers:
- name: nginx_container
image: nginx
# Testing if comments are retained as intended
securityContext:
runAsRoot: false

View File

@@ -0,0 +1,18 @@
# Fix to Apply:
# "select(di==0).spec.containers[0].securityContext.allowPrivilegeEscalation |= false"
apiVersion: v1
kind: Pod
metadata:
name: insert_to_mapping_node_1
spec:
containers:
- name: nginx_container
image: nginx
securityContext:
allowPrivilegeEscalation: false
# Testing if comments are retained as intended
securityContext:
runAsRoot: false

View File

@@ -0,0 +1,14 @@
# Fix to Apply:
# select(di==0).spec.containers[0].securityContext.capabilities.drop += ["SYS_ADM"]
apiVersion: v1
kind: Pod
metadata:
name: insert_list
spec:
containers:
- name: nginx1
image: nginx
securityContext:
capabilities:
drop: [NET_RAW]

View File

@@ -0,0 +1,14 @@
# Fix to Apply:
# select(di==0).spec.containers[0].securityContext.capabilities.drop += ["SYS_ADM"]
apiVersion: v1
kind: Pod
metadata:
name: insert_list
spec:
containers:
- name: nginx1
image: nginx
securityContext:
capabilities:
drop: [NET_RAW, SYS_ADM]

View File

@@ -0,0 +1,27 @@
# Fix to Apply:
# "select(di==0).spec.containers[0].securityContext.allowPrivilegeEscalation |= false"
apiVersion: v1
kind: Pod
metadata:
name: insert_to_mapping_node_1
spec:
containers:
- name: nginx_container
image: nginx
---
# Fix to Apply:
# "select(di==1).spec.containers[0].securityContext.allowPrivilegeEscalation |= false"
apiVersion: v1
kind: Pod
metadata:
name: insert_to_mapping_node_1
spec:
containers:
- name: nginx_container
image: nginx

View File

@@ -0,0 +1,31 @@
# Fix to Apply:
# "select(di==0).spec.containers[0].securityContext.allowPrivilegeEscalation |= false"
apiVersion: v1
kind: Pod
metadata:
name: insert_to_mapping_node_1
spec:
containers:
- name: nginx_container
image: nginx
securityContext:
allowPrivilegeEscalation: false
---
# Fix to Apply:
# "select(di==1).spec.containers[0].securityContext.allowPrivilegeEscalation |= false"
apiVersion: v1
kind: Pod
metadata:
name: insert_to_mapping_node_1
spec:
containers:
- name: nginx_container
image: nginx
securityContext:
allowPrivilegeEscalation: false

View File

@@ -0,0 +1,11 @@
# Fix to Apply:
# select(di==0).spec.containers[0].securityContext.capabilities.drop += ["NET_RAW"]
apiVersion: v1
kind: Pod
metadata:
name: indented-parent-list-insert-list-value
spec:
containers:
- name: nginx_container
image: nginx

View File

@@ -0,0 +1,15 @@
# Fix to Apply:
# select(di==0).spec.containers[0].securityContext.capabilities.drop += ["NET_RAW"]
apiVersion: v1
kind: Pod
metadata:
name: indented-parent-list-insert-list-value
spec:
containers:
- name: nginx_container
image: nginx
securityContext:
capabilities:
drop:
- NET_RAW

View File

@@ -0,0 +1,11 @@
# Fix to Apply:
# select(di==0).spec.containers += {"name": "redis", "image": "redis"}
apiVersion: v1
kind: Pod
metadata:
name: indented-parent-list-insert-list-value
spec:
containers:
- name: nginx_container
image: nginx

View File

@@ -0,0 +1,13 @@
# Fix to Apply:
# select(di==0).spec.containers += {"name": "redis", "image": "redis"}
apiVersion: v1
kind: Pod
metadata:
name: indented-parent-list-insert-list-value
spec:
containers:
- name: nginx_container
image: nginx
- name: redis
image: redis

View File

@@ -0,0 +1,11 @@
# Fix to Apply:
# select(di==0).spec.containers += {"name": "redis", "image": "redis"}
apiVersion: v1
kind: Pod
metadata:
name: indented-list-insert-new-object
spec:
containers:
- name: nginx_container
image: nginx

View File

@@ -0,0 +1,13 @@
# Fix to Apply:
# select(di==0).spec.containers += {"name": "redis", "image": "redis"}
apiVersion: v1
kind: Pod
metadata:
name: indented-list-insert-new-object
spec:
containers:
- name: nginx_container
image: nginx
- name: redis
image: redis

View File

@@ -0,0 +1,14 @@
# Fix to Apply:
# del(select(di==0).spec.containers[0].securityContext)
apiVersion: v1
kind: Pod
metadata:
name: remove_example
spec:
containers:
- name: nginx_container
image: nginx
securityContext:
runAsRoot: false

View File

@@ -0,0 +1,12 @@
# Fix to Apply:
# del(select(di==0).spec.containers[0].securityContext)
apiVersion: v1
kind: Pod
metadata:
name: remove_example
spec:
containers:
- name: nginx_container
image: nginx

View File

@@ -0,0 +1,15 @@
# Fix to Apply:
# del(select(di==0).spec.containers[1])
apiVersion: v1
kind: Pod
metadata:
name: remove_example
spec:
containers:
- name: nginx_container
image: nginx
- name: container_with_security_issues
image: image_with_security_issues

View File

@@ -0,0 +1,12 @@
# Fix to Apply:
# del(select(di==0).spec.containers[1])
apiVersion: v1
kind: Pod
metadata:
name: remove_example
spec:
containers:
- name: nginx_container
image: nginx

View File

@@ -0,0 +1,14 @@
# Fix to Apply:
# del(select(di==0).spec.containers[0].securityContext.capabilities.drop[1])
apiVersion: v1
kind: Pod
metadata:
name: insert_list
spec:
containers:
- name: nginx1
image: nginx
securityContext:
capabilities:
drop: ["NET_RAW", "SYS_ADM"]

View File

@@ -0,0 +1,14 @@
# Fix to Apply:
# del(select(di==0).spec.containers[0].securityContext.capabilities.drop[1])
apiVersion: v1
kind: Pod
metadata:
name: insert_list
spec:
containers:
- name: nginx1
image: nginx
securityContext:
capabilities:
drop: ["NET_RAW"]

View File

@@ -0,0 +1,32 @@
# Fix to Apply:
# del(select(di==0).spec.containers[0].securityContext)
apiVersion: v1
kind: Pod
metadata:
name: remove_example
spec:
containers:
- name: nginx_container
image: nginx
securityContext:
runAsRoot: false
---
# Fix to Apply:
# del(select(di==0).spec.containers[1])
apiVersion: v1
kind: Pod
metadata:
name: remove_example
spec:
containers:
- name: nginx_container
image: nginx
- name: container_with_security_issues
image: image_with_security_issues

View File

@@ -0,0 +1,27 @@
# Fix to Apply:
# del(select(di==0).spec.containers[0].securityContext)
apiVersion: v1
kind: Pod
metadata:
name: remove_example
spec:
containers:
- name: nginx_container
image: nginx
---
# Fix to Apply:
# del(select(di==0).spec.containers[1])
apiVersion: v1
kind: Pod
metadata:
name: remove_example
spec:
containers:
- name: nginx_container
image: nginx

View File

@@ -0,0 +1,14 @@
# Fix to Apply:
# "select(di==0).spec.containers[0].securityContext.runAsRoot |= false"
apiVersion: v1
kind: Pod
metadata:
name: insert_to_mapping_node_1
spec:
containers:
- name: nginx_container
image: nginx
securityContext:
runAsRoot: true

View File

@@ -0,0 +1,14 @@
# Fix to Apply:
# "select(di==0).spec.containers[0].securityContext.runAsRoot |= false"
apiVersion: v1
kind: Pod
metadata:
name: insert_to_mapping_node_1
spec:
containers:
- name: nginx_container
image: nginx
securityContext:
runAsRoot: false

View File

@@ -0,0 +1,18 @@
# Fix to Apply:
# select(di==0).spec.containers[0].securityContext.capabilities.drop[0] |= "SYS_ADM"
# select(di==0).spec.containers[0].securityContext.capabilities.add[0] |= "NET_RAW"
apiVersion: v1
kind: Pod
metadata:
name: insert_list
spec:
containers:
- name: nginx1
image: nginx
securityContext:
capabilities:
drop:
- "NET_RAW"
add: ["SYS_ADM"]

View File

@@ -0,0 +1,18 @@
# Fix to Apply:
# select(di==0).spec.containers[0].securityContext.capabilities.drop[0] |= "SYS_ADM"
# select(di==0).spec.containers[0].securityContext.capabilities.add[0] |= "NET_RAW"
apiVersion: v1
kind: Pod
metadata:
name: insert_list
spec:
containers:
- name: nginx1
image: nginx
securityContext:
capabilities:
drop:
- "SYS_ADM"
add: ["NET_RAW"]

View File

@@ -0,0 +1,286 @@
package fixhandler
import (
"container/list"
"errors"
"fmt"
"io"
"strings"
"github.com/mikefarah/yq/v4/pkg/yqlib"
"gopkg.in/yaml.v3"
)
// decodeDocumentRoots decodes all YAML documents stored in a given `filepath` and returns a slice of their root nodes
func decodeDocumentRoots(yamlAsString string) ([]yaml.Node, error) {
fileReader := strings.NewReader(yamlAsString)
dec := yaml.NewDecoder(fileReader)
nodes := make([]yaml.Node, 0)
for {
var node yaml.Node
err := dec.Decode(&node)
nodes = append(nodes, node)
if errors.Is(err, io.EOF) {
break
}
if err != nil {
return nil, fmt.Errorf("Cannot Decode File as YAML")
}
}
return nodes, nil
}
func getFixedNodes(yamlAsString, yamlExpression string) ([]yaml.Node, error) {
preferences := yqlib.ConfiguredYamlPreferences
preferences.EvaluateTogether = true
decoder := yqlib.NewYamlDecoder(preferences)
var allDocuments = list.New()
reader := strings.NewReader(yamlAsString)
fileDocuments, err := readDocuments(reader, decoder)
if err != nil {
return nil, err
}
allDocuments.PushBackList(fileDocuments)
allAtOnceEvaluator := yqlib.NewAllAtOnceEvaluator()
fixedCandidateNodes, err := allAtOnceEvaluator.EvaluateCandidateNodes(yamlExpression, allDocuments)
if err != nil {
return nil, fmt.Errorf("Error fixing YAML, %w", err)
}
fixedNodes := make([]yaml.Node, 0)
var fixedNode *yaml.Node
for fixedCandidateNode := fixedCandidateNodes.Front(); fixedCandidateNode != nil; fixedCandidateNode = fixedCandidateNode.Next() {
fixedNode = fixedCandidateNode.Value.(*yqlib.CandidateNode).Node
fixedNodes = append(fixedNodes, *fixedNode)
}
return fixedNodes, nil
}
func flattenWithDFS(node *yaml.Node) *[]nodeInfo {
dfsOrder := make([]nodeInfo, 0)
flattenWithDFSHelper(node, nil, &dfsOrder, 0)
return &dfsOrder
}
func flattenWithDFSHelper(node *yaml.Node, parent *yaml.Node, dfsOrder *[]nodeInfo, index int) {
dfsNode := nodeInfo{
node: node,
parent: parent,
index: index,
}
*dfsOrder = append(*dfsOrder, dfsNode)
for idx, child := range node.Content {
flattenWithDFSHelper(child, node, dfsOrder, idx)
}
}
func getFixInfo(originalRootNodes, fixedRootNodes []yaml.Node) fileFixInfo {
contentToAdd := make([]contentToAdd, 0)
linesToRemove := make([]linesToRemove, 0)
for idx := 0; idx < len(fixedRootNodes); idx++ {
originalList := flattenWithDFS(&originalRootNodes[idx])
fixedList := flattenWithDFS(&fixedRootNodes[idx])
nodeContentToAdd, nodeLinesToRemove := getFixInfoHelper(*originalList, *fixedList)
contentToAdd = append(contentToAdd, nodeContentToAdd...)
linesToRemove = append(linesToRemove, nodeLinesToRemove...)
}
return fileFixInfo{
contentsToAdd: &contentToAdd,
linesToRemove: &linesToRemove,
}
}
func getFixInfoHelper(originalList, fixedList []nodeInfo) ([]contentToAdd, []linesToRemove) {
// While obtaining fixedYamlNode, comments and empty lines at the top are ignored.
// This causes a difference in Line numbers across the tree structure. In order to
// counter this, line numbers are adjusted in fixed list.
adjustFixedListLines(&originalList, &fixedList)
contentToAdd := make([]contentToAdd, 0)
linesToRemove := make([]linesToRemove, 0)
originalListTracker, fixedListTracker := 0, 0
fixInfoMetadata := &fixInfoMetadata{
originalList: &originalList,
fixedList: &fixedList,
originalListTracker: originalListTracker,
fixedListTracker: fixedListTracker,
contentToAdd: &contentToAdd,
linesToRemove: &linesToRemove,
}
for originalListTracker < len(originalList) && fixedListTracker < len(fixedList) {
matchNodeResult := matchNodes(originalList[originalListTracker].node, fixedList[fixedListTracker].node)
fixInfoMetadata.originalListTracker = originalListTracker
fixInfoMetadata.fixedListTracker = fixedListTracker
switch matchNodeResult {
case sameNodes:
originalListTracker += 1
fixedListTracker += 1
case removedNode:
originalListTracker, fixedListTracker = addLinesToRemove(fixInfoMetadata)
case insertedNode:
originalListTracker, fixedListTracker = addLinesToInsert(fixInfoMetadata)
case replacedNode:
originalListTracker, fixedListTracker = updateLinesToReplace(fixInfoMetadata)
}
}
// Some nodes are still not visited if they are removed at the end of the list
for originalListTracker < len(originalList) {
fixInfoMetadata.originalListTracker = originalListTracker
originalListTracker, _ = addLinesToRemove(fixInfoMetadata)
}
// Some nodes are still not visited if they are inserted at the end of the list
for fixedListTracker < len(fixedList) {
// Use negative index of last node in original list as a placeholder to determine the last line number later
fixInfoMetadata.originalListTracker = -(len(originalList) - 1)
fixInfoMetadata.fixedListTracker = fixedListTracker
_, fixedListTracker = addLinesToInsert(fixInfoMetadata)
}
return contentToAdd, linesToRemove
}
// Adds the lines to remove and returns the updated originalListTracker
func addLinesToRemove(fixInfoMetadata *fixInfoMetadata) (int, int) {
isOneLine, line := isOneLineSequenceNode(fixInfoMetadata.originalList, fixInfoMetadata.originalListTracker)
if isOneLine {
// Remove the entire line and replace it with the sequence node in fixed info. This way,
// the original formatting is not lost.
return replaceSingleLineSequence(fixInfoMetadata, line)
}
currentDFSNode := (*fixInfoMetadata.originalList)[fixInfoMetadata.originalListTracker]
newOriginalListTracker := updateTracker(fixInfoMetadata.originalList, fixInfoMetadata.originalListTracker)
*fixInfoMetadata.linesToRemove = append(*fixInfoMetadata.linesToRemove, linesToRemove{
startLine: currentDFSNode.node.Line,
endLine: getNodeLine(fixInfoMetadata.originalList, newOriginalListTracker),
})
return newOriginalListTracker, fixInfoMetadata.fixedListTracker
}
// Adds the lines to insert and returns the updated fixedListTracker
func addLinesToInsert(fixInfoMetadata *fixInfoMetadata) (int, int) {
isOneLine, line := isOneLineSequenceNode(fixInfoMetadata.fixedList, fixInfoMetadata.fixedListTracker)
if isOneLine {
return replaceSingleLineSequence(fixInfoMetadata, line)
}
currentDFSNode := (*fixInfoMetadata.fixedList)[fixInfoMetadata.fixedListTracker]
lineToInsert := getLineToInsert(fixInfoMetadata)
contentToInsert := getContent(currentDFSNode.parent, fixInfoMetadata.fixedList, fixInfoMetadata.fixedListTracker)
newFixedTracker := updateTracker(fixInfoMetadata.fixedList, fixInfoMetadata.fixedListTracker)
*fixInfoMetadata.contentToAdd = append(*fixInfoMetadata.contentToAdd, contentToAdd{
line: lineToInsert,
content: contentToInsert,
})
return fixInfoMetadata.originalListTracker, newFixedTracker
}
// Adds the lines to remove and insert and updates the fixedListTracker and originalListTracker
func updateLinesToReplace(fixInfoMetadata *fixInfoMetadata) (int, int) {
isOneLine, line := isOneLineSequenceNode(fixInfoMetadata.fixedList, fixInfoMetadata.fixedListTracker)
if isOneLine {
return replaceSingleLineSequence(fixInfoMetadata, line)
}
currentDFSNode := (*fixInfoMetadata.fixedList)[fixInfoMetadata.fixedListTracker]
// If only the value node is changed, entire "key-value" pair is replaced
if isValueNodeinMapping(&currentDFSNode) {
fixInfoMetadata.originalListTracker -= 1
fixInfoMetadata.fixedListTracker -= 1
}
addLinesToRemove(fixInfoMetadata)
updatedOriginalTracker, updatedFixedTracker := addLinesToInsert(fixInfoMetadata)
return updatedOriginalTracker, updatedFixedTracker
}
func removeNewLinesAtTheEnd(yamlLines []string) []string {
for idx := 1; idx < len(yamlLines); idx++ {
if yamlLines[len(yamlLines)-idx] != "\n" {
yamlLines = yamlLines[:len(yamlLines)-idx+1]
break
}
}
return yamlLines
}
func getFixedYamlLines(yamlLines []string, fileFixInfo fileFixInfo) (fixedYamlLines []string) {
// Determining last line requires original yaml lines slice. The placeholder for last line is replaced with the real last line
assignLastLine(fileFixInfo.contentsToAdd, fileFixInfo.linesToRemove, &yamlLines)
removeLines(fileFixInfo.linesToRemove, &yamlLines)
fixedYamlLines = make([]string, 0)
lineIdx, lineToAddIdx := 1, 0
// Ideally, new node is inserted at line before the next node in DFS order. But, when the previous line contains a
// comment or empty line, we need to insert new nodes before them.
adjustContentLines(fileFixInfo.contentsToAdd, &yamlLines)
for lineToAddIdx < len(*fileFixInfo.contentsToAdd) {
for lineIdx <= (*fileFixInfo.contentsToAdd)[lineToAddIdx].line {
// Check if the current line is not removed
if yamlLines[lineIdx-1] != "*" {
fixedYamlLines = append(fixedYamlLines, yamlLines[lineIdx-1])
}
lineIdx += 1
}
content := (*fileFixInfo.contentsToAdd)[lineToAddIdx].content
fixedYamlLines = append(fixedYamlLines, content)
lineToAddIdx += 1
}
for lineIdx <= len(yamlLines) {
if yamlLines[lineIdx-1] != "*" {
fixedYamlLines = append(fixedYamlLines, yamlLines[lineIdx-1])
}
lineIdx += 1
}
fixedYamlLines = removeNewLinesAtTheEnd(fixedYamlLines)
return fixedYamlLines
}

View File

@@ -0,0 +1,406 @@
package fixhandler
import (
"bufio"
"bytes"
"container/list"
"errors"
"fmt"
"io"
"math"
"os"
"strings"
logger "github.com/kubescape/go-logger"
"github.com/mikefarah/yq/v4/pkg/yqlib"
"gopkg.in/yaml.v3"
)
type NodeRelation int
const (
sameNodes NodeRelation = iota
insertedNode
removedNode
replacedNode
)
func matchNodes(nodeOne, nodeTwo *yaml.Node) NodeRelation {
isNewNode := nodeTwo.Line == 0 && nodeTwo.Column == 0
sameLines := nodeOne.Line == nodeTwo.Line
sameColumns := nodeOne.Column == nodeTwo.Column
isSameNode := isSameNode(nodeOne, nodeTwo)
switch {
case isSameNode:
return sameNodes
case isNewNode:
return insertedNode
case sameLines && sameColumns:
return replacedNode
default:
return removedNode
}
}
func adjustContentLines(contentToAdd *[]contentToAdd, linesSlice *[]string) {
for contentIdx, content := range *contentToAdd {
line := content.line
// Adjust line numbers such that there are no "empty lines or comment lines of next nodes" before them
for idx := line - 1; idx >= 0; idx-- {
if isEmptyLineOrComment((*linesSlice)[idx]) {
(*contentToAdd)[contentIdx].line -= 1
} else {
break
}
}
}
}
func adjustFixedListLines(originalList, fixedList *[]nodeInfo) {
differenceAtTop := (*originalList)[0].node.Line - (*fixedList)[0].node.Line
if differenceAtTop <= 0 {
return
}
for _, node := range *fixedList {
// line numbers should not be changed for new nodes.
if node.node.Line != 0 {
node.node.Line += differenceAtTop
}
}
return
}
func enocodeIntoYaml(parentNode *yaml.Node, nodeList *[]nodeInfo, tracker int) (string, error) {
content := make([]*yaml.Node, 0)
currentNode := (*nodeList)[tracker].node
content = append(content, currentNode)
// Add the value in "key-value" pair to construct if the parent is mapping node
if parentNode.Kind == yaml.MappingNode {
valueNode := (*nodeList)[tracker+1].node
content = append(content, valueNode)
}
// The parent is added at the top to encode into YAML
parentForContent := yaml.Node{
Kind: parentNode.Kind,
Content: content,
}
buf := new(bytes.Buffer)
encoder := yaml.NewEncoder(buf)
encoder.SetIndent(2)
errorEncoding := encoder.Encode(parentForContent)
if errorEncoding != nil {
return "", fmt.Errorf("Error debugging node, %v", errorEncoding.Error())
}
errorClosingEncoder := encoder.Close()
if errorClosingEncoder != nil {
return "", fmt.Errorf("Error closing encoder: %v", errorClosingEncoder.Error())
}
return fmt.Sprintf(`%v`, buf.String()), nil
}
func getContent(parentNode *yaml.Node, nodeList *[]nodeInfo, tracker int) string {
content, err := enocodeIntoYaml(parentNode, nodeList, tracker)
if err != nil {
logger.L().Fatal("Cannot Encode into YAML")
}
indentationSpaces := parentNode.Column - 1
content = indentContent(content, indentationSpaces)
return strings.TrimSuffix(content, "\n")
}
func indentContent(content string, indentationSpaces int) string {
indentedContent := ""
indentSpaces := strings.Repeat(" ", indentationSpaces)
scanner := bufio.NewScanner(strings.NewReader(content))
for scanner.Scan() {
line := scanner.Text()
indentedContent += (indentSpaces + line + "\n")
}
return indentedContent
}
func getLineToInsert(fixInfoMetadata *fixInfoMetadata) int {
var lineToInsert int
// Check if lineToInsert is last line
if fixInfoMetadata.originalListTracker < 0 {
originalListTracker := int(math.Abs(float64(fixInfoMetadata.originalListTracker)))
// Storing the negative value of line of last node as a placeholder to determine the last line later.
lineToInsert = -(*fixInfoMetadata.originalList)[originalListTracker].node.Line
} else {
lineToInsert = (*fixInfoMetadata.originalList)[fixInfoMetadata.originalListTracker].node.Line - 1
}
return lineToInsert
}
func assignLastLine(contentsToAdd *[]contentToAdd, linesToRemove *[]linesToRemove, linesSlice *[]string) {
for idx, contentToAdd := range *contentsToAdd {
if contentToAdd.line < 0 {
currentLine := int(math.Abs(float64(contentToAdd.line)))
(*contentsToAdd)[idx].line, _ = getLastLineOfResource(linesSlice, currentLine)
}
}
for idx, lineToRemove := range *linesToRemove {
if lineToRemove.endLine < 0 {
endLine, _ := getLastLineOfResource(linesSlice, lineToRemove.startLine)
(*linesToRemove)[idx].endLine = endLine
}
}
}
func getLastLineOfResource(linesSlice *[]string, currentLine int) (int, error) {
// Get lastlines of all resources...
lastLinesOfResources := make([]int, 0)
for lineNumber, lineContent := range *linesSlice {
if lineContent == "---" {
for lastLine := lineNumber - 1; lastLine >= 0; lastLine-- {
if !isEmptyLineOrComment((*linesSlice)[lastLine]) {
lastLinesOfResources = append(lastLinesOfResources, lastLine+1)
break
}
}
}
}
lastLine := len(*linesSlice)
for lastLine >= 0 {
if !isEmptyLineOrComment((*linesSlice)[lastLine-1]) {
lastLinesOfResources = append(lastLinesOfResources, lastLine)
break
} else {
lastLine--
}
}
// Get last line of the resource we need
for _, endLine := range lastLinesOfResources {
if currentLine <= endLine {
return endLine, nil
}
}
return 0, fmt.Errorf("Provided line is greater than the length of YAML file")
}
func getNodeLine(nodeList *[]nodeInfo, tracker int) int {
if tracker < len(*nodeList) {
return (*nodeList)[tracker].node.Line
} else {
return -1
}
}
// Checks if the node is value node in "key-value" pairs of mapping node
func isValueNodeinMapping(node *nodeInfo) bool {
if node.parent.Kind == yaml.MappingNode && node.index%2 != 0 {
return true
}
return false
}
// Checks if the node is part of single line sequence node and returns the line
func isOneLineSequenceNode(list *[]nodeInfo, currentTracker int) (bool, int) {
parentNode := (*list)[currentTracker].parent
if parentNode.Kind != yaml.SequenceNode {
return false, -1
}
var currentNode, prevNode nodeInfo
currentTracker -= 1
for (*list)[currentTracker].node != parentNode {
currentNode = (*list)[currentTracker]
prevNode = (*list)[currentTracker-1]
if currentNode.node.Line != prevNode.node.Line {
return false, -1
}
currentTracker -= 1
}
parentNodeInfo := (*list)[currentTracker]
if parentNodeInfo.parent.Kind == yaml.MappingNode {
keyNodeInfo := (*list)[currentTracker-1]
if keyNodeInfo.node.Line == parentNode.Line {
return true, parentNode.Line
} else {
return false, -1
}
} else {
if parentNodeInfo.parent.Line == parentNode.Line {
return true, parentNode.Line
} else {
return false, -1
}
}
}
// Checks if nodes are of same kind, value, line and column
func isSameNode(nodeOne, nodeTwo *yaml.Node) bool {
sameLines := nodeOne.Line == nodeTwo.Line
sameColumns := nodeOne.Column == nodeTwo.Column
sameKinds := nodeOne.Kind == nodeTwo.Kind
sameValues := nodeOne.Value == nodeTwo.Value
return sameKinds && sameValues && sameLines && sameColumns
}
// Checks if the line is empty or a comment
func isEmptyLineOrComment(lineContent string) bool {
lineContent = strings.TrimSpace(lineContent)
if lineContent == "" {
return true
} else if lineContent[0:1] == "#" {
return true
}
return false
}
func readDocuments(reader io.Reader, decoder yqlib.Decoder) (*list.List, error) {
err := decoder.Init(reader)
if err != nil {
return nil, fmt.Errorf("Error Initializing the decoder, %w", err)
}
inputList := list.New()
var currentIndex uint
for {
candidateNode, errorReading := decoder.Decode()
if errors.Is(errorReading, io.EOF) {
switch reader := reader.(type) {
case *os.File:
safelyCloseFile(reader)
}
return inputList, nil
} else if errorReading != nil {
return nil, fmt.Errorf("Error Decoding YAML file, %w", errorReading)
}
candidateNode.Document = currentIndex
candidateNode.EvaluateTogether = true
inputList.PushBack(candidateNode)
currentIndex = currentIndex + 1
}
}
func safelyCloseFile(file *os.File) {
err := file.Close()
if err != nil {
logger.L().Error("Error Closing File")
}
}
// Remove the entire line and replace it with the sequence node in fixed info. This way,
// the original formatting is lost.
func replaceSingleLineSequence(fixInfoMetadata *fixInfoMetadata, line int) (int, int) {
originalListTracker := getFirstNodeInLine(fixInfoMetadata.originalList, line)
fixedListTracker := getFirstNodeInLine(fixInfoMetadata.fixedList, line)
currentDFSNode := (*fixInfoMetadata.fixedList)[fixedListTracker]
contentToInsert := getContent(currentDFSNode.parent, fixInfoMetadata.fixedList, fixedListTracker)
// Remove the Single line
*fixInfoMetadata.linesToRemove = append(*fixInfoMetadata.linesToRemove, linesToRemove{
startLine: line,
endLine: line,
})
// Encode entire Sequence Node and Insert
*fixInfoMetadata.contentToAdd = append(*fixInfoMetadata.contentToAdd, contentToAdd{
line: line,
content: contentToInsert,
})
originalListTracker = updateTracker(fixInfoMetadata.originalList, originalListTracker)
fixedListTracker = updateTracker(fixInfoMetadata.fixedList, fixedListTracker)
return originalListTracker, fixedListTracker
}
// Returns the first node in the given line that is not mapping node
func getFirstNodeInLine(list *[]nodeInfo, line int) int {
tracker := 0
currentNode := (*list)[tracker].node
for currentNode.Line != line || currentNode.Kind == yaml.MappingNode {
tracker += 1
currentNode = (*list)[tracker].node
}
return tracker
}
// To not mess with the line number while inserting, removed lines are not deleted but replaced with "*"
func removeLines(linesToRemove *[]linesToRemove, linesSlice *[]string) {
var startLine, endLine int
for _, lineToRemove := range *linesToRemove {
startLine = lineToRemove.startLine - 1
endLine = lineToRemove.endLine - 1
for line := startLine; line <= endLine; line++ {
lineContent := (*linesSlice)[line]
// When determining the endLine, empty lines and comments which are not intended to be removed are included.
// To deal with that, we need to refrain from removing empty lines and comments
if isEmptyLineOrComment(lineContent) {
break
}
(*linesSlice)[line] = "*"
}
}
}
// Skips the current node including it's children in DFS order and returns the new tracker.
func skipCurrentNode(node *yaml.Node, currentTracker int) int {
updatedTracker := currentTracker + getChildrenCount(node)
return updatedTracker
}
func getChildrenCount(node *yaml.Node) int {
totalChildren := 1
for _, child := range node.Content {
totalChildren += getChildrenCount(child)
}
return totalChildren
}
// The current node along with it's children is skipped and the tracker is moved to next sibling
// of current node. If parent is mapping node, "value" in "key-value" pairs is also skipped.
func updateTracker(nodeList *[]nodeInfo, tracker int) int {
currentNode := (*nodeList)[tracker]
var updatedTracker int
if currentNode.parent.Kind == yaml.MappingNode {
valueNode := (*nodeList)[tracker+1]
updatedTracker = skipCurrentNode(valueNode.node, tracker+1)
} else {
updatedTracker = skipCurrentNode(currentNode.node, tracker)
}
return updatedTracker
}
func getStringFromSlice(yamlLines []string) (fixedYamlString string) {
return strings.Join(yamlLines, "\n")
}

View File

@@ -103,6 +103,8 @@ func getResourcesFromPath(path string) (map[string]reporthandling.Source, []work
gitRepo, err := cautils.NewLocalGitRepository(path)
if err == nil && gitRepo != nil {
repoRoot, _ = gitRepo.GetRootDir()
} else {
repoRoot, _ = filepath.Abs(path)
}
// load resource from local file system
@@ -141,7 +143,7 @@ func getResourcesFromPath(path string) (map[string]reporthandling.Source, []work
}
workloadSource := reporthandling.Source{
RelativePath: source,
RelativePath: relSource,
FileType: filetype,
LastCommit: lastCommit,
}

View File

@@ -3,8 +3,6 @@ package resourcesprioritization
import (
"fmt"
logger "github.com/kubescape/go-logger"
"github.com/kubescape/go-logger/helpers"
"github.com/kubescape/k8s-interface/workloadinterface"
"github.com/kubescape/kubescape/v2/core/cautils"
"github.com/kubescape/kubescape/v2/core/cautils/getter"
@@ -21,8 +19,7 @@ func NewResourcesPrioritizationHandler(attackTracksGetter getter.IAttackTracksGe
attackTracks: make([]v1alpha1.IAttackTrack, 0),
}
tracks, err := attackTracksGetter.GetAttackTracks()
if err != nil {
if tracks, err := attackTracksGetter.GetAttackTracks(); err != nil {
return nil, err
} else {
for _, attackTrack := range tracks {
@@ -39,12 +36,6 @@ func NewResourcesPrioritizationHandler(attackTracksGetter getter.IAttackTracksGe
return nil, fmt.Errorf("expected to find at least one attack track")
}
// Store attack tracks in cache
cache := getter.GetDefaultPath(cautils.LocalAttackTracksFilename)
if err := getter.SaveInFile(tracks, cache); err != nil {
logger.L().Warning("failed to cache file", helpers.String("file", cache), helpers.Error(err))
}
return handler, nil
}
@@ -64,8 +55,7 @@ func (handler *ResourcesPrioritizationHandler) PrioritizeResources(sessionObj *c
resourcePriorityVector := []prioritization.ControlsVector{}
resource, exist := sessionObj.AllResources[resourceId]
if !exist {
logger.L().Error("resource not found in resources map", helpers.String("resource ID", resourceId))
continue
return fmt.Errorf("expected to find resource id '%s' in scanned resources map", resourceId)
}
workload := workloadinterface.NewWorkloadObj(resource.GetObject())

View File

@@ -4,7 +4,6 @@ import (
"encoding/xml"
"fmt"
"os"
"sort"
"strings"
logger "github.com/kubescape/go-logger"
@@ -12,8 +11,9 @@ import (
"github.com/kubescape/k8s-interface/workloadinterface"
"github.com/kubescape/kubescape/v2/core/cautils"
"github.com/kubescape/kubescape/v2/core/pkg/resultshandling/printer"
"github.com/kubescape/opa-utils/reporthandling/apis"
"github.com/kubescape/opa-utils/reporthandling/results/v1/reportsummary"
"github.com/kubescape/opa-utils/shared"
"github.com/kubescape/opa-utils/reporthandling/results/v1/resourcesresults"
)
/*
@@ -55,6 +55,7 @@ type JUnitTestSuite struct {
Skipped string `xml:"skipped,attr"` // The total number of skipped tests
Time string `xml:"time,attr"` // Time taken (in seconds) to execute the tests in the suite
Timestamp string `xml:"timestamp,attr"` // when the test was executed in ISO 8601 format (2014-01-21T16:17:18)
File string `xml:"file,attr"` // The file be tested
Properties []JUnitProperty `xml:"properties>property,omitempty"`
TestCases []JUnitTestCase `xml:"testcase"`
}
@@ -88,6 +89,11 @@ type JUnitFailure struct {
Contents string `xml:",chardata"`
}
const (
lineSeparator = "\n===================================================================================================================\n\n"
testCaseTypeResources = "Resources"
)
func NewJunitPrinter(verbose bool) *JunitPrinter {
return &JunitPrinter{
verbose: verbose,
@@ -118,96 +124,118 @@ func (junitPrinter *JunitPrinter) ActionPrint(opaSessionObj *cautils.OPASessionO
func testsSuites(results *cautils.OPASessionObj) *JUnitTestSuites {
return &JUnitTestSuites{
Suites: listTestsSuite(results),
Tests: results.Report.SummaryDetails.NumberOfControls().All(),
Tests: results.Report.SummaryDetails.NumberOfResources().All(),
Name: "Kubescape Scanning",
Failures: results.Report.SummaryDetails.NumberOfControls().Failed(),
Failures: results.Report.SummaryDetails.NumberOfResources().Failed(),
}
}
// aggregate resources source to a list of resources results
func sourceToResourcesResults(results *cautils.OPASessionObj) map[string][]resourcesresults.Result {
resourceResults := make(map[string][]resourcesresults.Result)
for i := range results.ResourceSource {
if r, ok := results.ResourcesResult[i]; ok {
if _, ok := resourceResults[results.ResourceSource[i].RelativePath]; !ok {
resourceResults[results.ResourceSource[i].RelativePath] = []resourcesresults.Result{}
}
resourceResults[results.ResourceSource[i].RelativePath] = append(resourceResults[results.ResourceSource[i].RelativePath], r)
}
}
return resourceResults
}
// listTestsSuite returns a list of testsuites
func listTestsSuite(results *cautils.OPASessionObj) []JUnitTestSuite {
var testSuites []JUnitTestSuite
resourceResults := sourceToResourcesResults(results)
counter := 0
// control scan
if len(results.Report.SummaryDetails.ListFrameworks()) == 0 {
for path, resourcesResult := range resourceResults {
testSuite := JUnitTestSuite{}
testSuite.Failures = results.Report.SummaryDetails.NumberOfControls().Failed()
testSuite.Timestamp = results.Report.ReportGenerationTime.String()
testSuite.ID = 0
testSuite.Name = "kubescape"
testSuite.Properties = properties(results.Report.SummaryDetails.Score)
testSuite.TestCases = testsCases(results, &results.Report.SummaryDetails.Controls, "Kubescape")
testSuites = append(testSuites, testSuite)
return testSuites
}
for i, f := range results.Report.SummaryDetails.Frameworks {
testSuite := JUnitTestSuite{}
testSuite.Failures = f.NumberOfControls().Failed()
testSuite.Timestamp = results.Report.ReportGenerationTime.String()
testSuite.ID = i
testSuite.Name = f.Name
testSuite.Properties = properties(f.Score)
testSuite.TestCases = testsCases(results, f.GetControls(), f.GetName())
testSuites = append(testSuites, testSuite)
testSuite.ID = counter
counter++
testSuite.File = path
testSuite.TestCases = testsCases(results, resourcesResult)
if len(testSuite.TestCases) > 0 {
testSuites = append(testSuites, testSuite)
}
}
return testSuites
}
func testsCases(results *cautils.OPASessionObj, controls reportsummary.IControlsSummaries, classname string) []JUnitTestCase {
var testCases []JUnitTestCase
iter := controls.ListControlsIDs().All()
for iter.HasNext() {
cID := iter.Next()
testCase := JUnitTestCase{}
control := results.Report.SummaryDetails.Controls.GetControl(reportsummary.EControlCriteriaID, cID)
testCase.Name = control.GetName()
testCase.Classname = classname
testCase.Status = string(control.GetStatus().Status())
if control.GetStatus().IsFailed() {
resources := map[string]interface{}{}
resourceIDs := control.ListResourcesIDs().Failed()
for j := range resourceIDs {
resource := results.AllResources[resourceIDs[j]]
resources[resourceToString(resource)] = nil
func failedControlsToFailureMessage(results *cautils.OPASessionObj, controls []resourcesresults.ResourceAssociatedControl, severityCounter []int) string {
msg := ""
for _, c := range controls {
control := results.Report.SummaryDetails.Controls.GetControl(reportsummary.EControlCriteriaID, c.GetID())
if c.GetStatus(nil).IsFailed() {
msg += fmt.Sprintf("Test: %s\n", control.GetName())
msg += fmt.Sprintf("Severity: %s\n", apis.ControlSeverityToString(control.GetScoreFactor()))
msg += fmt.Sprintf("Remediation: %s\n", control.GetRemediation())
msg += fmt.Sprintf("Link: %s\n", getControlLink(control.GetID()))
if failedPaths := failedPathsToString(&c); len(failedPaths) > 0 {
msg += fmt.Sprintf("Failed paths: \n - %s\n", strings.Join(failedPaths, "\n - "))
}
resourcesStr := shared.MapStringToSlice(resources)
sort.Strings(resourcesStr)
testCaseFailure := JUnitFailure{}
testCaseFailure.Type = "Control"
// testCaseFailure.Contents =
testCaseFailure.Message = fmt.Sprintf("Remediation: %s\nMore details: %s\n\n%s", control.GetRemediation(), cautils.GetControlLink(control.GetID()), strings.Join(resourcesStr, "\n"))
testCase.Failure = &testCaseFailure
} else if control.GetStatus().IsSkipped() {
testCase.SkipMessage = &JUnitSkipMessage{
Message: "", // TODO - fill after statusInfo is supported
if fixPaths := fixPathsToString(&c); len(fixPaths) > 0 {
msg += fmt.Sprintf("Available fix: \n - %s\n", strings.Join(fixPaths, "\n - "))
}
msg += "\n"
severityCounter[apis.ControlSeverityToInt(control.GetScoreFactor())] += 1
}
}
return msg
}
// Every testCase includes a file (even if the file contains several resources)
func testsCases(results *cautils.OPASessionObj, resourcesResult []resourcesresults.Result) []JUnitTestCase {
var testCases []JUnitTestCase
testCase := JUnitTestCase{}
testCaseFailure := JUnitFailure{}
testCaseFailure.Type = testCaseTypeResources
message := ""
// severityCounter represents the severities, 0: Unknown, 1: Low, 2: Medium, 3: High, 4: Critical
severityCounter := make([]int, apis.NumberOfSeverities, apis.NumberOfSeverities)
for i := range resourcesResult {
if failedControls := failedControlsToFailureMessage(results, resourcesResult[i].ListControls(), severityCounter); failedControls != "" {
message += fmt.Sprintf("%sResource: %s\n\n%s", lineSeparator, resourceNameToString(results.AllResources[resourcesResult[i].GetResourceID()]), failedControls)
}
}
testCaseFailure.Message += fmt.Sprintf("%s\n%s", getSummaryMessage(severityCounter), message)
testCase.Failure = &testCaseFailure
if testCase.Failure.Message != "" {
testCases = append(testCases, testCase)
}
return testCases
}
func resourceToString(resource workloadinterface.IMetadata) string {
sep := "; "
s := ""
s += fmt.Sprintf("apiVersion: %s", resource.GetApiVersion()) + sep
s += fmt.Sprintf("kind: %s", resource.GetKind()) + sep
if resource.GetNamespace() != "" {
s += fmt.Sprintf("namespace: %s", resource.GetNamespace()) + sep
func getSummaryMessage(severityCounter []int) string {
total := 0
severities := ""
for i, count := range severityCounter {
if apis.SeverityNumberToString(i) == apis.SeverityNumberToString(apis.SeverityUnknown) {
continue
}
severities += fmt.Sprintf("%s: %d, ", apis.SeverityNumberToString(i), count)
total += count
}
s += fmt.Sprintf("name: %s", resource.GetName())
return s
if len(severities) == 0 {
return ""
}
return fmt.Sprintf("Total: %d (%s)", total, severities[:len(severities)-2])
}
func properties(riskScore float32) []JUnitProperty {
return []JUnitProperty{
{
Name: "riskScore",
Value: fmt.Sprintf("%.2f", riskScore),
},
func resourceNameToString(resource workloadinterface.IMetadata) string {
s := ""
s += fmt.Sprintf("kind=%s/", resource.GetKind())
if resource.GetNamespace() != "" {
s += fmt.Sprintf("namespace=%s/", resource.GetNamespace())
}
s += fmt.Sprintf("name=%s", resource.GetName())
return s
}

View File

@@ -4,6 +4,7 @@ import (
"fmt"
"os"
"sort"
"strings"
"github.com/enescakir/emoji"
"github.com/kubescape/k8s-interface/workloadinterface"
@@ -83,7 +84,7 @@ func (prettyPrinter *PrettyPrinter) printSummary(controlName string, controlSumm
}
func (prettyPrinter *PrettyPrinter) printTitle(controlSummary reportsummary.IControlSummary) {
cautils.InfoDisplay(prettyPrinter.writer, "[control: %s - %s] ", controlSummary.GetName(), cautils.GetControlLink(controlSummary.GetID()))
cautils.InfoDisplay(prettyPrinter.writer, "[control: %s - %s] ", controlSummary.GetName(), getControlLink(controlSummary.GetID()))
switch controlSummary.GetStatus().Status() {
case apis.StatusSkipped:
cautils.InfoDisplay(prettyPrinter.writer, "skipped %v\n", emoji.ConfusedFace)
@@ -254,6 +255,10 @@ func frameworksScoresToString(frameworks []reportsummary.IFrameworkSummary) stri
return ""
}
func getControlLink(controlID string) string {
return fmt.Sprintf("https://hub.armosec.io/docs/%s", strings.ToLower(controlID))
}
// renderSeverityCountersSummary renders the string that reports severity counters summary
func renderSeverityCountersSummary(counters reportsummary.ISeverityCounters) string {
critical := counters.NumberOfResourcesWithCriticalSeverity()

View File

@@ -289,7 +289,7 @@ func (m *Metrics) setRiskScores(summaryDetails *reportsummary.SummaryDetails) {
controlName: control.GetName(),
controlID: control.GetID(),
riskScore: cautils.Float32ToInt(control.GetScore()),
link: cautils.GetControlLink(control.GetID()),
link: getControlLink(control.GetID()),
severity: apis.ControlSeverityToString(control.GetScoreFactor()),
remediation: control.GetRemediation(),
}

View File

@@ -73,7 +73,7 @@ func generateResourceRows(controls []resourcesresults.ResourceAssociatedControl,
continue
}
row[resourceColumnURL] = cautils.GetControlLink(controls[i].GetID())
row[resourceColumnURL] = fmt.Sprintf("https://hub.armosec.io/docs/%s", strings.ToLower(controls[i].GetID()))
row[resourceColumnPath] = strings.Join(append(failedPathsToString(&controls[i]), fixPathsToString(&controls[i])...), "\n")
row[resourceColumnName] = controls[i].GetName()

View File

@@ -1,7 +1,6 @@
package v2
import (
"fmt"
"os"
"path"
"path/filepath"
@@ -85,7 +84,7 @@ func (sp *SARIFPrinter) addRule(scanRun *sarif.Run, control reportsummary.IContr
WithDefaultConfiguration(configuration).
WithShortDescription(sarif.NewMultiformatMessageString(control.GetName())).
WithFullDescription(sarif.NewMultiformatMessageString(control.GetDescription())).
WithHelp(sarif.NewMultiformatMessageString(sp.generateRemediationMessage(control)))
WithHelp(sarif.NewMultiformatMessageString(control.GetRemediation()))
}
// addResult adds a result of checking a rule to the scan run based on the given control summary
@@ -197,8 +196,3 @@ func getBasePathFromMetadata(opaSessionObj cautils.OPASessionObj) string {
return ""
}
// generateRemediationMessage generates a remediation message for the given control summary
func (sp *SARIFPrinter) generateRemediationMessage(control reportsummary.IControlSummary) string {
return fmt.Sprintf("Remediation: %s", control.GetRemediation())
}

View File

@@ -16,6 +16,7 @@ import (
func FinalizeResults(data *cautils.OPASessionObj) *reporthandlingv2.PostureReport {
report := reporthandlingv2.PostureReport{
SummaryDetails: data.Report.SummaryDetails,
Metadata: *data.Metadata,
ClusterAPIServerInfo: data.Report.ClusterAPIServerInfo,
ReportGenerationTime: data.Report.ReportGenerationTime,
Attributes: data.Report.Attributes,

View File

@@ -86,7 +86,7 @@ func NewPrinter(printFormat, formatVersion string, verboseMode bool, viewType ca
case "v2":
return printerv2.NewJsonPrinter()
default:
logger.L().Warning("Deprecated format version", helpers.String("run", "--format-version=v2"), helpers.String("This will not be supported after", "1/Jan/2023"))
logger.L().Warning("Deprecated format version", helpers.String("run", "--format-version=v2"))
return printerv1.NewJsonPrinter()
}
case printer.JunitResultFormat:

File diff suppressed because one or more lines are too long

View File

@@ -64,24 +64,24 @@ But if you wish to exclude all namespaces **OR** any resource with the label `"e
Same works with the `posturePolicies` list ->
e.g. If you wish to exclude the resources declared in the `resources` list that failed when scanning the `NSA` framework **AND** failed the `HostPath mount` control, the `posturePolicies` list should look as follows:
e.g. If you wish to exclude the resources declared in the `resources` list that failed when scanning the `NSA` framework **AND** failed the `Allowed hostPath` control, the `posturePolicies` list should look as follows:
```
"posturePolicies": [
{
"frameworkName": "NSA",
"controlName": "HostPath mount"
"controlName": "Allowed hostPath"
}
]
```
But if you wish to exclude the resources declared in the `resources` list that failed when scanning the `NSA` framework **OR** failed the `HostPath mount` control, the `posturePolicies` list should look as follows:
But if you wish to exclude the resources declared in the `resources` list that failed when scanning the `NSA` framework **OR** failed the `Allowed hostPath` control, the `posturePolicies` list should look as follows:
```
"posturePolicies": [
{
"frameworkName": "NSA"
},
{
"controlName": "HostPath mount"
"controlName": "Allowed hostPath"
}
]
```
@@ -122,7 +122,7 @@ The resources
]
```
### Exclude deployments in the default namespace that failed the "HostPath mount" control
### Exclude deployments in the default namespace that failed the "Allowed hostPath" control
```
[
{
@@ -142,7 +142,7 @@ The resources
],
"posturePolicies": [
{
"controlName": "HostPath mount"
"controlName": "Allowed hostPath"
}
]
}

View File

@@ -15,7 +15,7 @@
],
"posturePolicies": [
{
"controlName": "HostPath mount"
"controlName": "Allowed hostPath"
}
]
}

View File

@@ -16,7 +16,7 @@
],
"posturePolicies": [
{
"controlName": "HostPath mount"
"controlName": "Allowed hostPath"
}
]
}

View File

@@ -98,56 +98,56 @@ kubescape_object_failed_count{framework="NSA",control="Privileged container",nam
kubescape_object_failed_count{framework="NSA",control="Privileged container",namespace="kubescape",name="armo-kubescape",groupVersionKind="batch/v1/CronJob"} 1
# Failed object from "NSA" control "Privileged container"
kubescape_object_failed_count{framework="NSA",control="Privileged container",namespace="kubescape",name="armo-scan-scheduler",groupVersionKind="batch/v1/CronJob"} 1
# Number of resources found as part of NSA control HostPath mount
kubescape_resources_found_count{framework="NSA",control="HostPath mount"} 22
# Number of resources excluded as part of NSA control HostPath mount
kubescape_resources_excluded_count{framework="NSA",control="HostPath mount"} 0
# Number of resources failed as part of NSA control HostPath mount
kubescape_resources_failed_count{framework="NSA",control="HostPath mount"} 7
# Failed object from "NSA" control "HostPath mount"
kubescape_object_failed_count{framework="NSA",control="HostPath mount",namespace="kube-system",name="kube-proxy",groupVersionKind="apps/v1/DaemonSet"} 1
# Failed object from "NSA" control "HostPath mount"
kubescape_object_failed_count{framework="NSA",control="HostPath mount",namespace="kube-system",name="etcd-david-virtualbox",groupVersionKind="v1/Pod"} 1
# Failed object from "NSA" control "HostPath mount"
kubescape_object_failed_count{framework="NSA",control="HostPath mount",namespace="kube-system",name="kube-controller-manager-david-virtualbox",groupVersionKind="v1/Pod"} 1
# Failed object from "NSA" control "HostPath mount"
kubescape_object_failed_count{framework="NSA",control="HostPath mount",namespace="kube-system",name="storage-provisioner",groupVersionKind="v1/Pod"} 1
# Failed object from "NSA" control "HostPath mount"
kubescape_object_failed_count{framework="NSA",control="HostPath mount",namespace="cyberarmor-system",name="ca-webhook",groupVersionKind="apps/v1/Deployment"} 1
# Failed object from "NSA" control "HostPath mount"
kubescape_object_failed_count{framework="NSA",control="HostPath mount",namespace="cyberarmor-system",name="ca-websocket",groupVersionKind="apps/v1/Deployment"} 1
# Failed object from "NSA" control "HostPath mount"
kubescape_object_failed_count{framework="NSA",control="HostPath mount",namespace="kubescape",name="armo-web-socket",groupVersionKind="apps/v1/Deployment"} 1
# Failed object from "NSA" control "HostPath mount"
kubescape_object_failed_count{framework="NSA",control="HostPath mount",namespace="kubescape",name="armo-vuln-scan",groupVersionKind="apps/v1/Deployment"} 1
# Failed object from "NSA" control "HostPath mount"
kubescape_object_failed_count{framework="NSA",control="HostPath mount",namespace="cyberarmor-system",name="ca-oracle",groupVersionKind="apps/v1/Deployment"} 1
# Failed object from "NSA" control "HostPath mount"
kubescape_object_failed_count{framework="NSA",control="HostPath mount",namespace="cyberarmor-system",name="ca-posture",groupVersionKind="apps/v1/Deployment"} 1
# Failed object from "NSA" control "HostPath mount"
kubescape_object_failed_count{framework="NSA",control="HostPath mount",namespace="cyberarmor-system",name="ca-rbac",groupVersionKind="apps/v1/Deployment"} 1
# Failed object from "NSA" control "HostPath mount"
kubescape_object_failed_count{framework="NSA",control="HostPath mount",namespace="cyberarmor-system",name="ca-vuln-scan",groupVersionKind="apps/v1/Deployment"} 1
# Failed object from "NSA" control "HostPath mount"
kubescape_object_failed_count{framework="NSA",control="HostPath mount",namespace="cyberarmor-system",name="ca-audit",groupVersionKind="apps/v1/Deployment"} 1
# Failed object from "NSA" control "HostPath mount"
kubescape_object_failed_count{framework="NSA",control="HostPath mount",namespace="cyberarmor-system",name="ca-dashboard-aggregator",groupVersionKind="apps/v1/Deployment"} 1
# Failed object from "NSA" control "HostPath mount"
kubescape_object_failed_count{framework="NSA",control="HostPath mount",namespace="cyberarmor-system",name="ca-notification-server",groupVersionKind="apps/v1/Deployment"} 1
# Failed object from "NSA" control "HostPath mount"
kubescape_object_failed_count{framework="NSA",control="HostPath mount",namespace="cyberarmor-system",name="ca-ocimage",groupVersionKind="apps/v1/Deployment"} 1
# Failed object from "NSA" control "HostPath mount"
kubescape_object_failed_count{framework="NSA",control="HostPath mount",namespace="kube-system",name="coredns",groupVersionKind="apps/v1/Deployment"} 1
# Failed object from "NSA" control "HostPath mount"
kubescape_object_failed_count{framework="NSA",control="HostPath mount",namespace="nginx-ingress",name="nginx-ingress",groupVersionKind="apps/v1/Deployment"} 1
# Failed object from "NSA" control "HostPath mount"
kubescape_object_failed_count{framework="NSA",control="HostPath mount",namespace="kubescape",name="armo-kubescape",groupVersionKind="batch/v1/CronJob"} 1
# Failed object from "NSA" control "HostPath mount"
kubescape_object_failed_count{framework="NSA",control="HostPath mount",namespace="kubescape",name="armo-scan-scheduler",groupVersionKind="batch/v1/CronJob"} 1
# Failed object from "NSA" control "HostPath mount"
kubescape_object_failed_count{framework="NSA",control="HostPath mount",namespace="kube-system",name="kube-apiserver-david-virtualbox",groupVersionKind="v1/Pod"} 1
# Failed object from "NSA" control "HostPath mount"
kubescape_object_failed_count{framework="NSA",control="HostPath mount",namespace="kube-system",name="kube-scheduler-david-virtualbox",groupVersionKind="v1/Pod"} 1
# Number of resources found as part of NSA control Allowed hostPath
kubescape_resources_found_count{framework="NSA",control="Allowed hostPath"} 22
# Number of resources excluded as part of NSA control Allowed hostPath
kubescape_resources_excluded_count{framework="NSA",control="Allowed hostPath"} 0
# Number of resources failed as part of NSA control Allowed hostPath
kubescape_resources_failed_count{framework="NSA",control="Allowed hostPath"} 7
# Failed object from "NSA" control "Allowed hostPath"
kubescape_object_failed_count{framework="NSA",control="Allowed hostPath",namespace="kube-system",name="kube-proxy",groupVersionKind="apps/v1/DaemonSet"} 1
# Failed object from "NSA" control "Allowed hostPath"
kubescape_object_failed_count{framework="NSA",control="Allowed hostPath",namespace="kube-system",name="etcd-david-virtualbox",groupVersionKind="v1/Pod"} 1
# Failed object from "NSA" control "Allowed hostPath"
kubescape_object_failed_count{framework="NSA",control="Allowed hostPath",namespace="kube-system",name="kube-controller-manager-david-virtualbox",groupVersionKind="v1/Pod"} 1
# Failed object from "NSA" control "Allowed hostPath"
kubescape_object_failed_count{framework="NSA",control="Allowed hostPath",namespace="kube-system",name="storage-provisioner",groupVersionKind="v1/Pod"} 1
# Failed object from "NSA" control "Allowed hostPath"
kubescape_object_failed_count{framework="NSA",control="Allowed hostPath",namespace="cyberarmor-system",name="ca-webhook",groupVersionKind="apps/v1/Deployment"} 1
# Failed object from "NSA" control "Allowed hostPath"
kubescape_object_failed_count{framework="NSA",control="Allowed hostPath",namespace="cyberarmor-system",name="ca-websocket",groupVersionKind="apps/v1/Deployment"} 1
# Failed object from "NSA" control "Allowed hostPath"
kubescape_object_failed_count{framework="NSA",control="Allowed hostPath",namespace="kubescape",name="armo-web-socket",groupVersionKind="apps/v1/Deployment"} 1
# Failed object from "NSA" control "Allowed hostPath"
kubescape_object_failed_count{framework="NSA",control="Allowed hostPath",namespace="kubescape",name="armo-vuln-scan",groupVersionKind="apps/v1/Deployment"} 1
# Failed object from "NSA" control "Allowed hostPath"
kubescape_object_failed_count{framework="NSA",control="Allowed hostPath",namespace="cyberarmor-system",name="ca-oracle",groupVersionKind="apps/v1/Deployment"} 1
# Failed object from "NSA" control "Allowed hostPath"
kubescape_object_failed_count{framework="NSA",control="Allowed hostPath",namespace="cyberarmor-system",name="ca-posture",groupVersionKind="apps/v1/Deployment"} 1
# Failed object from "NSA" control "Allowed hostPath"
kubescape_object_failed_count{framework="NSA",control="Allowed hostPath",namespace="cyberarmor-system",name="ca-rbac",groupVersionKind="apps/v1/Deployment"} 1
# Failed object from "NSA" control "Allowed hostPath"
kubescape_object_failed_count{framework="NSA",control="Allowed hostPath",namespace="cyberarmor-system",name="ca-vuln-scan",groupVersionKind="apps/v1/Deployment"} 1
# Failed object from "NSA" control "Allowed hostPath"
kubescape_object_failed_count{framework="NSA",control="Allowed hostPath",namespace="cyberarmor-system",name="ca-audit",groupVersionKind="apps/v1/Deployment"} 1
# Failed object from "NSA" control "Allowed hostPath"
kubescape_object_failed_count{framework="NSA",control="Allowed hostPath",namespace="cyberarmor-system",name="ca-dashboard-aggregator",groupVersionKind="apps/v1/Deployment"} 1
# Failed object from "NSA" control "Allowed hostPath"
kubescape_object_failed_count{framework="NSA",control="Allowed hostPath",namespace="cyberarmor-system",name="ca-notification-server",groupVersionKind="apps/v1/Deployment"} 1
# Failed object from "NSA" control "Allowed hostPath"
kubescape_object_failed_count{framework="NSA",control="Allowed hostPath",namespace="cyberarmor-system",name="ca-ocimage",groupVersionKind="apps/v1/Deployment"} 1
# Failed object from "NSA" control "Allowed hostPath"
kubescape_object_failed_count{framework="NSA",control="Allowed hostPath",namespace="kube-system",name="coredns",groupVersionKind="apps/v1/Deployment"} 1
# Failed object from "NSA" control "Allowed hostPath"
kubescape_object_failed_count{framework="NSA",control="Allowed hostPath",namespace="nginx-ingress",name="nginx-ingress",groupVersionKind="apps/v1/Deployment"} 1
# Failed object from "NSA" control "Allowed hostPath"
kubescape_object_failed_count{framework="NSA",control="Allowed hostPath",namespace="kubescape",name="armo-kubescape",groupVersionKind="batch/v1/CronJob"} 1
# Failed object from "NSA" control "Allowed hostPath"
kubescape_object_failed_count{framework="NSA",control="Allowed hostPath",namespace="kubescape",name="armo-scan-scheduler",groupVersionKind="batch/v1/CronJob"} 1
# Failed object from "NSA" control "Allowed hostPath"
kubescape_object_failed_count{framework="NSA",control="Allowed hostPath",namespace="kube-system",name="kube-apiserver-david-virtualbox",groupVersionKind="v1/Pod"} 1
# Failed object from "NSA" control "Allowed hostPath"
kubescape_object_failed_count{framework="NSA",control="Allowed hostPath",namespace="kube-system",name="kube-scheduler-david-virtualbox",groupVersionKind="v1/Pod"} 1
# Number of resources found as part of NSA control Automatic mapping of service account
kubescape_resources_found_count{framework="NSA",control="Automatic mapping of service account"} 47
# Number of resources excluded as part of NSA control Automatic mapping of service account
@@ -2668,56 +2668,56 @@ kubescape_object_failed_count{framework="ArmoBest",control="Privileged container
kubescape_object_failed_count{framework="ArmoBest",control="Privileged container",namespace="kubescape",name="armo-kubescape",groupVersionKind="batch/v1/CronJob"} 1
# Failed object from "ArmoBest" control "Privileged container"
kubescape_object_failed_count{framework="ArmoBest",control="Privileged container",namespace="kubescape",name="armo-scan-scheduler",groupVersionKind="batch/v1/CronJob"} 1
# Number of resources found as part of ArmoBest control HostPath mount
kubescape_resources_found_count{framework="ArmoBest",control="HostPath mount"} 22
# Number of resources excluded as part of ArmoBest control HostPath mount
kubescape_resources_excluded_count{framework="ArmoBest",control="HostPath mount"} 0
# Number of resources failed as part of ArmoBest control HostPath mount
kubescape_resources_failed_count{framework="ArmoBest",control="HostPath mount"} 7
# Failed object from "ArmoBest" control "HostPath mount"
kubescape_object_failed_count{framework="ArmoBest",control="HostPath mount",namespace="kube-system",name="kube-proxy",groupVersionKind="apps/v1/DaemonSet"} 1
# Failed object from "ArmoBest" control "HostPath mount"
kubescape_object_failed_count{framework="ArmoBest",control="HostPath mount",namespace="kube-system",name="kube-controller-manager-david-virtualbox",groupVersionKind="v1/Pod"} 1
# Failed object from "ArmoBest" control "HostPath mount"
kubescape_object_failed_count{framework="ArmoBest",control="HostPath mount",namespace="kube-system",name="storage-provisioner",groupVersionKind="v1/Pod"} 1
# Failed object from "ArmoBest" control "HostPath mount"
kubescape_object_failed_count{framework="ArmoBest",control="HostPath mount",namespace="kube-system",name="etcd-david-virtualbox",groupVersionKind="v1/Pod"} 1
# Failed object from "ArmoBest" control "HostPath mount"
kubescape_object_failed_count{framework="ArmoBest",control="HostPath mount",namespace="kubescape",name="armo-web-socket",groupVersionKind="apps/v1/Deployment"} 1
# Failed object from "ArmoBest" control "HostPath mount"
kubescape_object_failed_count{framework="ArmoBest",control="HostPath mount",namespace="cyberarmor-system",name="ca-webhook",groupVersionKind="apps/v1/Deployment"} 1
# Failed object from "ArmoBest" control "HostPath mount"
kubescape_object_failed_count{framework="ArmoBest",control="HostPath mount",namespace="cyberarmor-system",name="ca-websocket",groupVersionKind="apps/v1/Deployment"} 1
# Failed object from "ArmoBest" control "HostPath mount"
kubescape_object_failed_count{framework="ArmoBest",control="HostPath mount",namespace="kube-system",name="coredns",groupVersionKind="apps/v1/Deployment"} 1
# Failed object from "ArmoBest" control "HostPath mount"
kubescape_object_failed_count{framework="ArmoBest",control="HostPath mount",namespace="nginx-ingress",name="nginx-ingress",groupVersionKind="apps/v1/Deployment"} 1
# Failed object from "ArmoBest" control "HostPath mount"
kubescape_object_failed_count{framework="ArmoBest",control="HostPath mount",namespace="kubescape",name="armo-vuln-scan",groupVersionKind="apps/v1/Deployment"} 1
# Failed object from "ArmoBest" control "HostPath mount"
kubescape_object_failed_count{framework="ArmoBest",control="HostPath mount",namespace="cyberarmor-system",name="ca-oracle",groupVersionKind="apps/v1/Deployment"} 1
# Failed object from "ArmoBest" control "HostPath mount"
kubescape_object_failed_count{framework="ArmoBest",control="HostPath mount",namespace="cyberarmor-system",name="ca-posture",groupVersionKind="apps/v1/Deployment"} 1
# Failed object from "ArmoBest" control "HostPath mount"
kubescape_object_failed_count{framework="ArmoBest",control="HostPath mount",namespace="cyberarmor-system",name="ca-rbac",groupVersionKind="apps/v1/Deployment"} 1
# Failed object from "ArmoBest" control "HostPath mount"
kubescape_object_failed_count{framework="ArmoBest",control="HostPath mount",namespace="cyberarmor-system",name="ca-vuln-scan",groupVersionKind="apps/v1/Deployment"} 1
# Failed object from "ArmoBest" control "HostPath mount"
kubescape_object_failed_count{framework="ArmoBest",control="HostPath mount",namespace="cyberarmor-system",name="ca-audit",groupVersionKind="apps/v1/Deployment"} 1
# Failed object from "ArmoBest" control "HostPath mount"
kubescape_object_failed_count{framework="ArmoBest",control="HostPath mount",namespace="cyberarmor-system",name="ca-dashboard-aggregator",groupVersionKind="apps/v1/Deployment"} 1
# Failed object from "ArmoBest" control "HostPath mount"
kubescape_object_failed_count{framework="ArmoBest",control="HostPath mount",namespace="cyberarmor-system",name="ca-notification-server",groupVersionKind="apps/v1/Deployment"} 1
# Failed object from "ArmoBest" control "HostPath mount"
kubescape_object_failed_count{framework="ArmoBest",control="HostPath mount",namespace="cyberarmor-system",name="ca-ocimage",groupVersionKind="apps/v1/Deployment"} 1
# Failed object from "ArmoBest" control "HostPath mount"
kubescape_object_failed_count{framework="ArmoBest",control="HostPath mount",namespace="kubescape",name="armo-kubescape",groupVersionKind="batch/v1/CronJob"} 1
# Failed object from "ArmoBest" control "HostPath mount"
kubescape_object_failed_count{framework="ArmoBest",control="HostPath mount",namespace="kubescape",name="armo-scan-scheduler",groupVersionKind="batch/v1/CronJob"} 1
# Failed object from "ArmoBest" control "HostPath mount"
kubescape_object_failed_count{framework="ArmoBest",control="HostPath mount",namespace="kube-system",name="kube-apiserver-david-virtualbox",groupVersionKind="v1/Pod"} 1
# Failed object from "ArmoBest" control "HostPath mount"
kubescape_object_failed_count{framework="ArmoBest",control="HostPath mount",namespace="kube-system",name="kube-scheduler-david-virtualbox",groupVersionKind="v1/Pod"} 1
# Number of resources found as part of ArmoBest control Allowed hostPath
kubescape_resources_found_count{framework="ArmoBest",control="Allowed hostPath"} 22
# Number of resources excluded as part of ArmoBest control Allowed hostPath
kubescape_resources_excluded_count{framework="ArmoBest",control="Allowed hostPath"} 0
# Number of resources failed as part of ArmoBest control Allowed hostPath
kubescape_resources_failed_count{framework="ArmoBest",control="Allowed hostPath"} 7
# Failed object from "ArmoBest" control "Allowed hostPath"
kubescape_object_failed_count{framework="ArmoBest",control="Allowed hostPath",namespace="kube-system",name="kube-proxy",groupVersionKind="apps/v1/DaemonSet"} 1
# Failed object from "ArmoBest" control "Allowed hostPath"
kubescape_object_failed_count{framework="ArmoBest",control="Allowed hostPath",namespace="kube-system",name="kube-controller-manager-david-virtualbox",groupVersionKind="v1/Pod"} 1
# Failed object from "ArmoBest" control "Allowed hostPath"
kubescape_object_failed_count{framework="ArmoBest",control="Allowed hostPath",namespace="kube-system",name="storage-provisioner",groupVersionKind="v1/Pod"} 1
# Failed object from "ArmoBest" control "Allowed hostPath"
kubescape_object_failed_count{framework="ArmoBest",control="Allowed hostPath",namespace="kube-system",name="etcd-david-virtualbox",groupVersionKind="v1/Pod"} 1
# Failed object from "ArmoBest" control "Allowed hostPath"
kubescape_object_failed_count{framework="ArmoBest",control="Allowed hostPath",namespace="kubescape",name="armo-web-socket",groupVersionKind="apps/v1/Deployment"} 1
# Failed object from "ArmoBest" control "Allowed hostPath"
kubescape_object_failed_count{framework="ArmoBest",control="Allowed hostPath",namespace="cyberarmor-system",name="ca-webhook",groupVersionKind="apps/v1/Deployment"} 1
# Failed object from "ArmoBest" control "Allowed hostPath"
kubescape_object_failed_count{framework="ArmoBest",control="Allowed hostPath",namespace="cyberarmor-system",name="ca-websocket",groupVersionKind="apps/v1/Deployment"} 1
# Failed object from "ArmoBest" control "Allowed hostPath"
kubescape_object_failed_count{framework="ArmoBest",control="Allowed hostPath",namespace="kube-system",name="coredns",groupVersionKind="apps/v1/Deployment"} 1
# Failed object from "ArmoBest" control "Allowed hostPath"
kubescape_object_failed_count{framework="ArmoBest",control="Allowed hostPath",namespace="nginx-ingress",name="nginx-ingress",groupVersionKind="apps/v1/Deployment"} 1
# Failed object from "ArmoBest" control "Allowed hostPath"
kubescape_object_failed_count{framework="ArmoBest",control="Allowed hostPath",namespace="kubescape",name="armo-vuln-scan",groupVersionKind="apps/v1/Deployment"} 1
# Failed object from "ArmoBest" control "Allowed hostPath"
kubescape_object_failed_count{framework="ArmoBest",control="Allowed hostPath",namespace="cyberarmor-system",name="ca-oracle",groupVersionKind="apps/v1/Deployment"} 1
# Failed object from "ArmoBest" control "Allowed hostPath"
kubescape_object_failed_count{framework="ArmoBest",control="Allowed hostPath",namespace="cyberarmor-system",name="ca-posture",groupVersionKind="apps/v1/Deployment"} 1
# Failed object from "ArmoBest" control "Allowed hostPath"
kubescape_object_failed_count{framework="ArmoBest",control="Allowed hostPath",namespace="cyberarmor-system",name="ca-rbac",groupVersionKind="apps/v1/Deployment"} 1
# Failed object from "ArmoBest" control "Allowed hostPath"
kubescape_object_failed_count{framework="ArmoBest",control="Allowed hostPath",namespace="cyberarmor-system",name="ca-vuln-scan",groupVersionKind="apps/v1/Deployment"} 1
# Failed object from "ArmoBest" control "Allowed hostPath"
kubescape_object_failed_count{framework="ArmoBest",control="Allowed hostPath",namespace="cyberarmor-system",name="ca-audit",groupVersionKind="apps/v1/Deployment"} 1
# Failed object from "ArmoBest" control "Allowed hostPath"
kubescape_object_failed_count{framework="ArmoBest",control="Allowed hostPath",namespace="cyberarmor-system",name="ca-dashboard-aggregator",groupVersionKind="apps/v1/Deployment"} 1
# Failed object from "ArmoBest" control "Allowed hostPath"
kubescape_object_failed_count{framework="ArmoBest",control="Allowed hostPath",namespace="cyberarmor-system",name="ca-notification-server",groupVersionKind="apps/v1/Deployment"} 1
# Failed object from "ArmoBest" control "Allowed hostPath"
kubescape_object_failed_count{framework="ArmoBest",control="Allowed hostPath",namespace="cyberarmor-system",name="ca-ocimage",groupVersionKind="apps/v1/Deployment"} 1
# Failed object from "ArmoBest" control "Allowed hostPath"
kubescape_object_failed_count{framework="ArmoBest",control="Allowed hostPath",namespace="kubescape",name="armo-kubescape",groupVersionKind="batch/v1/CronJob"} 1
# Failed object from "ArmoBest" control "Allowed hostPath"
kubescape_object_failed_count{framework="ArmoBest",control="Allowed hostPath",namespace="kubescape",name="armo-scan-scheduler",groupVersionKind="batch/v1/CronJob"} 1
# Failed object from "ArmoBest" control "Allowed hostPath"
kubescape_object_failed_count{framework="ArmoBest",control="Allowed hostPath",namespace="kube-system",name="kube-apiserver-david-virtualbox",groupVersionKind="v1/Pod"} 1
# Failed object from "ArmoBest" control "Allowed hostPath"
kubescape_object_failed_count{framework="ArmoBest",control="Allowed hostPath",namespace="kube-system",name="kube-scheduler-david-virtualbox",groupVersionKind="v1/Pod"} 1
# Number of resources found as part of ArmoBest control Automatic mapping of service account
kubescape_resources_found_count{framework="ArmoBest",control="Automatic mapping of service account"} 47
# Number of resources excluded as part of ArmoBest control Automatic mapping of service account

View File

@@ -54,26 +54,26 @@ kubescape_resources_excluded_count{framework="NSA",control="Privileged container
kubescape_resources_failed_count{framework="NSA",control="Privileged container"} 1
# Failed object from "NSA" control "Privileged container"
kubescape_object_failed_count{framework="NSA",control="Privileged container",namespace="kube-system",name="kube-proxy",groupVersionKind="apps/v1/DaemonSet"} 1
# Number of resources found as part of NSA control HostPath mount
kubescape_resources_found_count{framework="NSA",control="HostPath mount"} 22
# Number of resources excluded as part of NSA control HostPath mount
kubescape_resources_excluded_count{framework="NSA",control="HostPath mount"} 0
# Number of resources failed as part of NSA control HostPath mount
kubescape_resources_failed_count{framework="NSA",control="HostPath mount"} 7
# Failed object from "NSA" control "HostPath mount"
kubescape_object_failed_count{framework="NSA",control="HostPath mount",namespace="kube-system",name="etcd-david-virtualbox",groupVersionKind="v1/Pod"} 1
# Failed object from "NSA" control "HostPath mount"
kubescape_object_failed_count{framework="NSA",control="HostPath mount",namespace="kube-system",name="kube-controller-manager-david-virtualbox",groupVersionKind="v1/Pod"} 1
# Failed object from "NSA" control "HostPath mount"
kubescape_object_failed_count{framework="NSA",control="HostPath mount",namespace="kube-system",name="storage-provisioner",groupVersionKind="v1/Pod"} 1
# Failed object from "NSA" control "HostPath mount"
kubescape_object_failed_count{framework="NSA",control="HostPath mount",namespace="kubescape",name="armo-web-socket",groupVersionKind="apps/v1/Deployment"} 1
# Failed object from "NSA" control "HostPath mount"
kubescape_object_failed_count{framework="NSA",control="HostPath mount",namespace="cyberarmor-system",name="ca-websocket",groupVersionKind="apps/v1/Deployment"} 1
# Failed object from "NSA" control "HostPath mount"
kubescape_object_failed_count{framework="NSA",control="HostPath mount",namespace="cyberarmor-system",name="ca-webhook",groupVersionKind="apps/v1/Deployment"} 1
# Failed object from "NSA" control "HostPath mount"
kubescape_object_failed_count{framework="NSA",control="HostPath mount",namespace="kube-system",name="kube-proxy",groupVersionKind="apps/v1/DaemonSet"} 1
# Number of resources found as part of NSA control Allowed hostPath
kubescape_resources_found_count{framework="NSA",control="Allowed hostPath"} 22
# Number of resources excluded as part of NSA control Allowed hostPath
kubescape_resources_excluded_count{framework="NSA",control="Allowed hostPath"} 0
# Number of resources failed as part of NSA control Allowed hostPath
kubescape_resources_failed_count{framework="NSA",control="Allowed hostPath"} 7
# Failed object from "NSA" control "Allowed hostPath"
kubescape_object_failed_count{framework="NSA",control="Allowed hostPath",namespace="kube-system",name="etcd-david-virtualbox",groupVersionKind="v1/Pod"} 1
# Failed object from "NSA" control "Allowed hostPath"
kubescape_object_failed_count{framework="NSA",control="Allowed hostPath",namespace="kube-system",name="kube-controller-manager-david-virtualbox",groupVersionKind="v1/Pod"} 1
# Failed object from "NSA" control "Allowed hostPath"
kubescape_object_failed_count{framework="NSA",control="Allowed hostPath",namespace="kube-system",name="storage-provisioner",groupVersionKind="v1/Pod"} 1
# Failed object from "NSA" control "Allowed hostPath"
kubescape_object_failed_count{framework="NSA",control="Allowed hostPath",namespace="kubescape",name="armo-web-socket",groupVersionKind="apps/v1/Deployment"} 1
# Failed object from "NSA" control "Allowed hostPath"
kubescape_object_failed_count{framework="NSA",control="Allowed hostPath",namespace="cyberarmor-system",name="ca-websocket",groupVersionKind="apps/v1/Deployment"} 1
# Failed object from "NSA" control "Allowed hostPath"
kubescape_object_failed_count{framework="NSA",control="Allowed hostPath",namespace="cyberarmor-system",name="ca-webhook",groupVersionKind="apps/v1/Deployment"} 1
# Failed object from "NSA" control "Allowed hostPath"
kubescape_object_failed_count{framework="NSA",control="Allowed hostPath",namespace="kube-system",name="kube-proxy",groupVersionKind="apps/v1/DaemonSet"} 1
# Number of resources found as part of NSA control Automatic mapping of service account
kubescape_resources_found_count{framework="NSA",control="Automatic mapping of service account"} 47
# Number of resources excluded as part of NSA control Automatic mapping of service account
@@ -872,26 +872,26 @@ kubescape_resources_excluded_count{framework="ArmoBest",control="Privileged cont
kubescape_resources_failed_count{framework="ArmoBest",control="Privileged container"} 1
# Failed object from "ArmoBest" control "Privileged container"
kubescape_object_failed_count{framework="ArmoBest",control="Privileged container",namespace="kube-system",name="kube-proxy",groupVersionKind="apps/v1/DaemonSet"} 1
# Number of resources found as part of ArmoBest control HostPath mount
kubescape_resources_found_count{framework="ArmoBest",control="HostPath mount"} 22
# Number of resources excluded as part of ArmoBest control HostPath mount
kubescape_resources_excluded_count{framework="ArmoBest",control="HostPath mount"} 0
# Number of resources failed as part of ArmoBest control HostPath mount
kubescape_resources_failed_count{framework="ArmoBest",control="HostPath mount"} 7
# Failed object from "ArmoBest" control "HostPath mount"
kubescape_object_failed_count{framework="ArmoBest",control="HostPath mount",namespace="kube-system",name="kube-proxy",groupVersionKind="apps/v1/DaemonSet"} 1
# Failed object from "ArmoBest" control "HostPath mount"
kubescape_object_failed_count{framework="ArmoBest",control="HostPath mount",namespace="kube-system",name="etcd-david-virtualbox",groupVersionKind="v1/Pod"} 1
# Failed object from "ArmoBest" control "HostPath mount"
kubescape_object_failed_count{framework="ArmoBest",control="HostPath mount",namespace="kube-system",name="kube-controller-manager-david-virtualbox",groupVersionKind="v1/Pod"} 1
# Failed object from "ArmoBest" control "HostPath mount"
kubescape_object_failed_count{framework="ArmoBest",control="HostPath mount",namespace="kube-system",name="storage-provisioner",groupVersionKind="v1/Pod"} 1
# Failed object from "ArmoBest" control "HostPath mount"
kubescape_object_failed_count{framework="ArmoBest",control="HostPath mount",namespace="kubescape",name="armo-web-socket",groupVersionKind="apps/v1/Deployment"} 1
# Failed object from "ArmoBest" control "HostPath mount"
kubescape_object_failed_count{framework="ArmoBest",control="HostPath mount",namespace="cyberarmor-system",name="ca-webhook",groupVersionKind="apps/v1/Deployment"} 1
# Failed object from "ArmoBest" control "HostPath mount"
kubescape_object_failed_count{framework="ArmoBest",control="HostPath mount",namespace="cyberarmor-system",name="ca-websocket",groupVersionKind="apps/v1/Deployment"} 1
# Number of resources found as part of ArmoBest control Allowed hostPath
kubescape_resources_found_count{framework="ArmoBest",control="Allowed hostPath"} 22
# Number of resources excluded as part of ArmoBest control Allowed hostPath
kubescape_resources_excluded_count{framework="ArmoBest",control="Allowed hostPath"} 0
# Number of resources failed as part of ArmoBest control Allowed hostPath
kubescape_resources_failed_count{framework="ArmoBest",control="Allowed hostPath"} 7
# Failed object from "ArmoBest" control "Allowed hostPath"
kubescape_object_failed_count{framework="ArmoBest",control="Allowed hostPath",namespace="kube-system",name="kube-proxy",groupVersionKind="apps/v1/DaemonSet"} 1
# Failed object from "ArmoBest" control "Allowed hostPath"
kubescape_object_failed_count{framework="ArmoBest",control="Allowed hostPath",namespace="kube-system",name="etcd-david-virtualbox",groupVersionKind="v1/Pod"} 1
# Failed object from "ArmoBest" control "Allowed hostPath"
kubescape_object_failed_count{framework="ArmoBest",control="Allowed hostPath",namespace="kube-system",name="kube-controller-manager-david-virtualbox",groupVersionKind="v1/Pod"} 1
# Failed object from "ArmoBest" control "Allowed hostPath"
kubescape_object_failed_count{framework="ArmoBest",control="Allowed hostPath",namespace="kube-system",name="storage-provisioner",groupVersionKind="v1/Pod"} 1
# Failed object from "ArmoBest" control "Allowed hostPath"
kubescape_object_failed_count{framework="ArmoBest",control="Allowed hostPath",namespace="kubescape",name="armo-web-socket",groupVersionKind="apps/v1/Deployment"} 1
# Failed object from "ArmoBest" control "Allowed hostPath"
kubescape_object_failed_count{framework="ArmoBest",control="Allowed hostPath",namespace="cyberarmor-system",name="ca-webhook",groupVersionKind="apps/v1/Deployment"} 1
# Failed object from "ArmoBest" control "Allowed hostPath"
kubescape_object_failed_count{framework="ArmoBest",control="Allowed hostPath",namespace="cyberarmor-system",name="ca-websocket",groupVersionKind="apps/v1/Deployment"} 1
# Number of resources found as part of ArmoBest control Automatic mapping of service account
kubescape_resources_found_count{framework="ArmoBest",control="Automatic mapping of service account"} 47
# Number of resources excluded as part of ArmoBest control Automatic mapping of service account

26
go.mod
View File

@@ -16,9 +16,9 @@ require (
github.com/google/uuid v1.3.0
github.com/johnfercher/maroto v0.37.0
github.com/kubescape/go-logger v0.0.6
github.com/kubescape/k8s-interface v0.0.89
github.com/kubescape/k8s-interface v0.0.84
github.com/kubescape/opa-utils v0.0.200
github.com/kubescape/rbac-utils v0.0.19
github.com/kubescape/rbac-utils v0.0.17
github.com/libgit2/git2go/v33 v33.0.9
github.com/mattn/go-isatty v0.0.14
github.com/mikefarah/yq/v4 v4.29.1
@@ -35,10 +35,10 @@ require (
gopkg.in/op/go-logging.v1 v1.0.0-20160211212156-b2cb9fa56473
gopkg.in/yaml.v3 v3.0.1
helm.sh/helm/v3 v3.9.0
k8s.io/api v0.25.3
k8s.io/apimachinery v0.25.3
k8s.io/client-go v0.25.3
k8s.io/utils v0.0.0-20220728103510-ee6ede2d64ed
k8s.io/api v0.24.3
k8s.io/apimachinery v0.24.3
k8s.io/client-go v0.24.3
k8s.io/utils v0.0.0-20220706174534-f6158b442e7c
sigs.k8s.io/kustomize/api v0.11.4
sigs.k8s.io/kustomize/kyaml v0.13.6
sigs.k8s.io/yaml v1.3.0
@@ -52,8 +52,8 @@ require (
cloud.google.com/go/grafeas v0.2.0 // indirect
github.com/Azure/azure-sdk-for-go v66.0.0+incompatible // indirect
github.com/Azure/go-autorest v14.2.0+incompatible // indirect
github.com/Azure/go-autorest/autorest v0.11.27 // indirect
github.com/Azure/go-autorest/autorest/adal v0.9.20 // indirect
github.com/Azure/go-autorest/autorest v0.11.24 // indirect
github.com/Azure/go-autorest/autorest/adal v0.9.18 // indirect
github.com/Azure/go-autorest/autorest/azure/auth v0.5.11 // indirect
github.com/Azure/go-autorest/autorest/azure/cli v0.4.5 // indirect
github.com/Azure/go-autorest/autorest/date v0.3.0 // indirect
@@ -96,7 +96,7 @@ require (
github.com/docker/go-connections v0.4.0 // indirect
github.com/docker/go-units v0.4.0 // indirect
github.com/elliotchance/orderedmap v1.5.0 // indirect
github.com/emicklei/go-restful/v3 v3.8.0 // indirect
github.com/emicklei/go-restful v2.9.5+incompatible // indirect
github.com/emirpasic/gods v1.12.0 // indirect
github.com/evanphx/json-patch v4.12.0+incompatible // indirect
github.com/ghodss/yaml v1.0.0 // indirect
@@ -181,11 +181,11 @@ require (
gopkg.in/warnings.v0 v0.1.2 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
k8s.io/apiextensions-apiserver v0.24.2 // indirect
k8s.io/klog/v2 v2.70.1 // indirect
k8s.io/kube-openapi v0.0.0-20220803162953-67bda5d908f1 // indirect
k8s.io/klog/v2 v2.60.1 // indirect
k8s.io/kube-openapi v0.0.0-20220328201542-3ee0da9b0b42 // indirect
sigs.k8s.io/controller-runtime v0.12.3 // indirect
sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 // indirect
sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect
sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2 // indirect
sigs.k8s.io/structured-merge-diff/v4 v4.2.1 // indirect
)
replace github.com/libgit2/git2go/v33 => ./git2go

51
go.sum
View File

@@ -78,22 +78,19 @@ github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg6
github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs=
github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
github.com/Azure/go-autorest/autorest v0.11.18/go.mod h1:dSiJPy22c3u0OtOKDNttNgqpNFY/GeWa7GH/Pz56QRA=
github.com/Azure/go-autorest/autorest v0.11.24 h1:1fIGgHKqVm54KIPT+q8Zmd1QlVsmHqeUGso5qm2BqqE=
github.com/Azure/go-autorest/autorest v0.11.24/go.mod h1:G6kyRlFnTuSbEYkQGawPfsCswgme4iYf6rfSKUDzbCc=
github.com/Azure/go-autorest/autorest v0.11.27 h1:F3R3q42aWytozkV8ihzcgMO4OA4cuqr3bNlsEuF6//A=
github.com/Azure/go-autorest/autorest v0.11.27/go.mod h1:7l8ybrIdUmGqZMTD0sRtAr8NvbHjfofbf8RSP2q7w7U=
github.com/Azure/go-autorest/autorest/adal v0.9.13/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M=
github.com/Azure/go-autorest/autorest/adal v0.9.18 h1:kLnPsRjzZZUF3K5REu/Kc+qMQrvuza2bwSnNdhmzLfQ=
github.com/Azure/go-autorest/autorest/adal v0.9.18/go.mod h1:XVVeme+LZwABT8K5Lc3hA4nAe8LDBVle26gTrguhhPQ=
github.com/Azure/go-autorest/autorest/adal v0.9.20 h1:gJ3E98kMpFB1MFqQCvA1yFab8vthOeD4VlFRQULxahg=
github.com/Azure/go-autorest/autorest/adal v0.9.20/go.mod h1:XVVeme+LZwABT8K5Lc3hA4nAe8LDBVle26gTrguhhPQ=
github.com/Azure/go-autorest/autorest/azure/auth v0.5.11 h1:P6bYXFoao05z5uhOQzbC3Qd8JqF3jUoocoTeIxkp2cA=
github.com/Azure/go-autorest/autorest/azure/auth v0.5.11/go.mod h1:84w/uV8E37feW2NCJ08uT9VBfjfUHpgLVnG2InYD6cg=
github.com/Azure/go-autorest/autorest/azure/cli v0.4.5 h1:0W/yGmFdTIT77fvdlGZ0LMISoLHFJ7Tx4U0yeB+uFs4=
github.com/Azure/go-autorest/autorest/azure/cli v0.4.5/go.mod h1:ADQAXrkgm7acgWVUNamOgh8YNrv4p27l3Wc55oVfpzg=
github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw=
github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74=
github.com/Azure/go-autorest/autorest/mocks v0.4.1 h1:K0laFcLE6VLTOwNgSxaGbUcLPuGXlNkbVvq4cW4nIHk=
github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k=
github.com/Azure/go-autorest/autorest/mocks v0.4.2 h1:PGN4EDXnuQbojHbU0UWoNvmu9AGVwYHG9/fkDYhtAfw=
github.com/Azure/go-autorest/autorest/mocks v0.4.2/go.mod h1:Vy7OitM9Kei0i1Oj+LvyAWMXJHeKH1MVlzFugfVrmyU=
github.com/Azure/go-autorest/autorest/to v0.4.0 h1:oXVqrxakqqV1UZdSazDOPOLvOIz+XA683u8EctwboHk=
github.com/Azure/go-autorest/autorest/to v0.4.0/go.mod h1:fE8iZBn7LQR7zH/9XU2NcPR4o9jEImooCeWJcYV/zLE=
github.com/Azure/go-autorest/autorest/validation v0.3.1 h1:AgyqjAd94fwNAoTjl/WQXg4VvFeRFpO+UhNyRXqF1ac=
@@ -274,9 +271,8 @@ github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkg
github.com/elliotchance/orderedmap v1.5.0 h1:1IsExUsjv5XNBD3ZdC7jkAAqLWOOKdbPTmkHx63OsBg=
github.com/elliotchance/orderedmap v1.5.0/go.mod h1:wsDwEaX5jEoyhbs7x93zk2H/qv0zwuhg4inXhDkYqys=
github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
github.com/emicklei/go-restful v2.9.5+incompatible h1:spTtZBk5DYEvbxMVutUuTyh1Ao2r4iyvLdACqsl/Ljk=
github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
github.com/emicklei/go-restful/v3 v3.8.0 h1:eCZ8ulSerjdAiaNpF7GxXIE7ZCMo1moN1qX+S609eVw=
github.com/emicklei/go-restful/v3 v3.8.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
github.com/emirpasic/gods v1.12.0 h1:QAUIPSaCu4G+POclxeqb3F+WPpdKqFGlw36+yOzGlrg=
github.com/emirpasic/gods v1.12.0/go.mod h1:YfzfFFoVP/catgzJb4IKIqXjX78Ha8FMSDh3ymbK86o=
github.com/enescakir/emoji v1.0.0 h1:W+HsNql8swfCQFtioDGDHCHri8nudlK1n5p2rHCJoog=
@@ -587,12 +583,12 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/kubescape/go-logger v0.0.6 h1:ynhAmwrz0O7Jtqq1CdmCZUrKveji25hVP+B/FAb3QrA=
github.com/kubescape/go-logger v0.0.6/go.mod h1:DnVWEvC90LFY1nNMaNo6nBVOcqkLMK3S0qzXP1fzRvI=
github.com/kubescape/k8s-interface v0.0.89 h1:OtlvZosHpjlbHfsilfQk2wRbuBnxwF0e+WZX6GbkfLU=
github.com/kubescape/k8s-interface v0.0.89/go.mod h1:pgFRs20mHiavf6+fFWY7h/f8HuKlwuZwirvjxiKJlu0=
github.com/kubescape/k8s-interface v0.0.84 h1:k7YzpQ3SaN+bJCtpXzMj60WWIK9RkQQrU8dFQutr3LA=
github.com/kubescape/k8s-interface v0.0.84/go.mod h1:ihX96yqar+xogHl45mFE8zT9DLI06iy7XQPAP+j5KJE=
github.com/kubescape/opa-utils v0.0.200 h1:7EhE9FTabzkUxicvxdchXuaTWW0J2mFj04vK4jTrxN0=
github.com/kubescape/opa-utils v0.0.200/go.mod h1:rDC3PANuk8gU5lSDO/WPFTluypBQ+/6qiuZLye+slYg=
github.com/kubescape/rbac-utils v0.0.19 h1:7iydgVxlMLW15MgHORfMBMqNj9jHtFGACd744fdtrFs=
github.com/kubescape/rbac-utils v0.0.19/go.mod h1:t57AhSrjuNGQ+mpZWQM/hBzrCOeKBDHegFoVo4tbikQ=
github.com/kubescape/rbac-utils v0.0.17 h1:B78kjlTKqjYK/PXwmi4GPysHsFxIwVz1KFb4+IGT29w=
github.com/kubescape/rbac-utils v0.0.17/go.mod h1:pBwjpcrVeuH/no+DiCZWvlhYtCDzd3U0o/hEZKi+eM8=
github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII=
github.com/lunixbochs/vtclean v1.0.0/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI=
github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
@@ -676,11 +672,10 @@ github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+W
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY=
github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE=
github.com/onsi/ginkgo/v2 v2.1.6 h1:Fx2POJZfKRQcM1pH49qSZiYeu319wji004qX+GDovrU=
github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
github.com/onsi/gomega v1.20.1 h1:PA/3qinGoukvymdIDV8pii6tiZgC8kbmJO6Z5+b002Q=
github.com/onsi/gomega v1.18.1 h1:M1GfJqGRrBrrGGsbxzV5dqM2U2ApXefZCQpkukxYRLE=
github.com/open-policy-agent/opa v0.45.0 h1:P5nuhVRtR+e58fk3CMMbiqr6ZFyWQPNOC3otsorGsFs=
github.com/open-policy-agent/opa v0.45.0/go.mod h1:/OnsYljNEWJ6DXeFOOnoGn8CvwZGMUS4iRqzYdJvmBI=
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
@@ -1533,17 +1528,17 @@ honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt
honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
k8s.io/api v0.24.2/go.mod h1:AHqbSkTm6YrQ0ObxjO3Pmp/ubFF/KuM7jU+3khoBsOg=
k8s.io/api v0.25.3 h1:Q1v5UFfYe87vi5H7NU0p4RXC26PPMT8KOpr1TLQbCMQ=
k8s.io/api v0.25.3/go.mod h1:o42gKscFrEVjHdQnyRenACrMtbuJsVdP+WVjqejfzmI=
k8s.io/api v0.24.3 h1:tt55QEmKd6L2k5DP6G/ZzdMQKvG5ro4H4teClqm0sTY=
k8s.io/api v0.24.3/go.mod h1:elGR/XSZrS7z7cSZPzVWaycpJuGIw57j9b95/1PdJNI=
k8s.io/apiextensions-apiserver v0.24.2 h1:/4NEQHKlEz1MlaK/wHT5KMKC9UKYz6NZz6JE6ov4G6k=
k8s.io/apiextensions-apiserver v0.24.2/go.mod h1:e5t2GMFVngUEHUd0wuCJzw8YDwZoqZfJiGOW6mm2hLQ=
k8s.io/apimachinery v0.24.2/go.mod h1:82Bi4sCzVBdpYjyI4jY6aHX+YCUchUIrZrXKedjd2UM=
k8s.io/apimachinery v0.25.3 h1:7o9ium4uyUOM76t6aunP0nZuex7gDf8VGwkR5RcJnQc=
k8s.io/apimachinery v0.25.3/go.mod h1:jaF9C/iPNM1FuLl7Zuy5b9v+n35HGSh6AQ4HYRkCqwo=
k8s.io/apimachinery v0.24.3 h1:hrFiNSA2cBZqllakVYyH/VyEh4B581bQRmqATJSeQTg=
k8s.io/apimachinery v0.24.3/go.mod h1:82Bi4sCzVBdpYjyI4jY6aHX+YCUchUIrZrXKedjd2UM=
k8s.io/apiserver v0.24.2/go.mod h1:pSuKzr3zV+L+MWqsEo0kHHYwCo77AT5qXbFXP2jbvFI=
k8s.io/client-go v0.24.2/go.mod h1:zg4Xaoo+umDsfCWr4fCnmLEtQXyCNXCvJuSsglNcV30=
k8s.io/client-go v0.25.3 h1:oB4Dyl8d6UbfDHD8Bv8evKylzs3BXzzufLiO27xuPs0=
k8s.io/client-go v0.25.3/go.mod h1:t39LPczAIMwycjcXkVc+CB+PZV69jQuNx4um5ORDjQA=
k8s.io/client-go v0.24.3 h1:Nl1840+6p4JqkFWEW2LnMKU667BUxw03REfLAVhuKQY=
k8s.io/client-go v0.24.3/go.mod h1:AAovolf5Z9bY1wIg2FZ8LPQlEdKHjLI7ZD4rw920BJw=
k8s.io/code-generator v0.24.2/go.mod h1:dpVhs00hTuTdTY6jvVxvTFCk6gSMrtfRydbhZwHI15w=
k8s.io/component-base v0.24.2/go.mod h1:ucHwW76dajvQ9B7+zecZAP3BVqvrHoOxm8olHEg0nmM=
k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
@@ -1551,17 +1546,15 @@ k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAE
k8s.io/gengo v0.0.0-20211129171323-c02415ce4185/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E=
k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE=
k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
k8s.io/klog/v2 v2.60.1 h1:VW25q3bZx9uE3vvdL6M8ezOX79vA2Aq1nEWLqNQclHc=
k8s.io/klog/v2 v2.60.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
k8s.io/klog/v2 v2.70.1 h1:7aaoSdahviPmR+XkS7FyxlkkXs6tHISSG03RxleQAVQ=
k8s.io/klog/v2 v2.70.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
k8s.io/kube-openapi v0.0.0-20210421082810-95288971da7e/go.mod h1:vHXdDvt9+2spS2Rx9ql3I8tycm3H9FDfdUoIuKCefvw=
k8s.io/kube-openapi v0.0.0-20220328201542-3ee0da9b0b42 h1:Gii5eqf+GmIEwGNKQYQClCayuJCe2/4fZUvF7VG99sU=
k8s.io/kube-openapi v0.0.0-20220328201542-3ee0da9b0b42/go.mod h1:Z/45zLw8lUo4wdiUkI+v/ImEGAvu3WatcZl3lPMR4Rk=
k8s.io/kube-openapi v0.0.0-20220803162953-67bda5d908f1 h1:MQ8BAZPZlWk3S9K4a9NCkIFQtZShWqoha7snGixVgEA=
k8s.io/kube-openapi v0.0.0-20220803162953-67bda5d908f1/go.mod h1:C/N6wCaBHeBHkHUesQOQy2/MZqGgMAFPqGsGQLdbZBU=
k8s.io/utils v0.0.0-20210802155522-efc7438f0176/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
k8s.io/utils v0.0.0-20220728103510-ee6ede2d64ed h1:jAne/RjBTyawwAy0utX5eqigAwz/lQhTmy+Hr/Cpue4=
k8s.io/utils v0.0.0-20220728103510-ee6ede2d64ed/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
k8s.io/utils v0.0.0-20220706174534-f6158b442e7c h1:hFZO68mv/0xe8+V0gRT9BAq3/31cKjjeVv4nScriuBk=
k8s.io/utils v0.0.0-20220706174534-f6158b442e7c/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4=
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
@@ -1569,17 +1562,15 @@ rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.30/go.mod h1:fEO7lRTdivWO2qYVCVG7dEADOMo/MLDCVr8So2g88Uw=
sigs.k8s.io/controller-runtime v0.12.3 h1:FCM8xeY/FI8hoAfh/V4XbbYMY20gElh9yh+A98usMio=
sigs.k8s.io/controller-runtime v0.12.3/go.mod h1:qKsk4WE6zW2Hfj0G4v10EnNB2jMG1C+NTb8h+DwCoU0=
sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2 h1:kDi4JBNAsJWfz1aEXhO8Jg87JJaPNLh5tIzYHgStQ9Y=
sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2/go.mod h1:B+TnT182UBxE84DiCz4CVE26eOSDAeYCpfDnC2kdKMY=
sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 h1:iXTIw73aPyC+oRdyqqvVJuloN1p0AC/kzH07hu3NE+k=
sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0=
sigs.k8s.io/kustomize/api v0.11.4 h1:/0Mr3kfBBNcNPOW5Qwk/3eb8zkswCwnqQxxKtmrTkRo=
sigs.k8s.io/kustomize/api v0.11.4/go.mod h1:k+8RsqYbgpkIrJ4p9jcdPqe8DprLxFUUO0yNOq8C+xI=
sigs.k8s.io/kustomize/kyaml v0.13.6 h1:eF+wsn4J7GOAXlvajv6OknSunxpcOBQQqsnPxObtkGs=
sigs.k8s.io/kustomize/kyaml v0.13.6/go.mod h1:yHP031rn1QX1lr/Xd934Ri/xdVNG8BE2ECa78Ht/kEg=
sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw=
sigs.k8s.io/structured-merge-diff/v4 v4.2.1 h1:bKCqE9GvQ5tiVHn5rfn1r+yao3aLQEaLzkkmAkf+A6Y=
sigs.k8s.io/structured-merge-diff/v4 v4.2.1/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4=
sigs.k8s.io/structured-merge-diff/v4 v4.2.3 h1:PRbqxJClWWYMNV1dhaG4NsibJbArud9kFxnAMREiWFE=
sigs.k8s.io/structured-merge-diff/v4 v4.2.3/go.mod h1:qjx8mGObPmV2aSZepjQjbmb2ihdVs8cGKBraizNC69E=
sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc=
sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo=
sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8=

View File

@@ -56,7 +56,7 @@ def main():
if client_name:
ldflags += " -X {}={}".format(client_var, client_name)
build_command = ["go", "build", "-buildmode=pie", "-tags=static", "-o", ks_file, "-ldflags" ,ldflags]
build_command = ["go", "build", "-tags=static", "-o", ks_file, "-ldflags" ,ldflags]
print("Building kubescape and saving here: {}".format(ks_file))
print("Build command: {}".format(" ".join(build_command)))

View File

@@ -14,7 +14,7 @@ require (
github.com/kubescape/kubescape/v2 v2.0.0-00010101000000-000000000000
github.com/kubescape/opa-utils v0.0.200
github.com/stretchr/testify v1.8.0
k8s.io/utils v0.0.0-20220728103510-ee6ede2d64ed
k8s.io/utils v0.0.0-20220706174534-f6158b442e7c
)
require (
@@ -24,8 +24,8 @@ require (
cloud.google.com/go/grafeas v0.2.0 // indirect
github.com/Azure/azure-sdk-for-go v66.0.0+incompatible // indirect
github.com/Azure/go-autorest v14.2.0+incompatible // indirect
github.com/Azure/go-autorest/autorest v0.11.27 // indirect
github.com/Azure/go-autorest/autorest/adal v0.9.20 // indirect
github.com/Azure/go-autorest/autorest v0.11.24 // indirect
github.com/Azure/go-autorest/autorest/adal v0.9.18 // indirect
github.com/Azure/go-autorest/autorest/azure/auth v0.5.11 // indirect
github.com/Azure/go-autorest/autorest/azure/cli v0.4.5 // indirect
github.com/Azure/go-autorest/autorest/date v0.3.0 // indirect
@@ -73,7 +73,7 @@ require (
github.com/docker/go-connections v0.4.0 // indirect
github.com/docker/go-units v0.4.0 // indirect
github.com/elliotchance/orderedmap v1.5.0 // indirect
github.com/emicklei/go-restful/v3 v3.8.0 // indirect
github.com/emicklei/go-restful v2.9.5+incompatible // indirect
github.com/emirpasic/gods v1.12.0 // indirect
github.com/enescakir/emoji v1.0.0 // indirect
github.com/evanphx/json-patch v4.12.0+incompatible // indirect
@@ -119,8 +119,8 @@ require (
github.com/json-iterator/go v1.1.12 // indirect
github.com/jung-kurt/gofpdf v1.16.2 // indirect
github.com/kevinburke/ssh_config v0.0.0-20201106050909-4977a11b4351 // indirect
github.com/kubescape/k8s-interface v0.0.89 // indirect
github.com/kubescape/rbac-utils v0.0.19 // indirect
github.com/kubescape/k8s-interface v0.0.84 // indirect
github.com/kubescape/rbac-utils v0.0.17 // indirect
github.com/libgit2/git2go/v33 v33.0.9 // indirect
github.com/magiconair/properties v1.8.6 // indirect
github.com/mailru/easyjson v0.7.7 // indirect
@@ -187,17 +187,17 @@ require (
gopkg.in/yaml.v2 v2.4.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
helm.sh/helm/v3 v3.9.0 // indirect
k8s.io/api v0.25.3 // indirect
k8s.io/api v0.24.3 // indirect
k8s.io/apiextensions-apiserver v0.24.2 // indirect
k8s.io/apimachinery v0.25.3 // indirect
k8s.io/client-go v0.25.3 // indirect
k8s.io/klog/v2 v2.70.1 // indirect
k8s.io/kube-openapi v0.0.0-20220803162953-67bda5d908f1 // indirect
k8s.io/apimachinery v0.24.3 // indirect
k8s.io/client-go v0.24.3 // indirect
k8s.io/klog/v2 v2.60.1 // indirect
k8s.io/kube-openapi v0.0.0-20220328201542-3ee0da9b0b42 // indirect
sigs.k8s.io/controller-runtime v0.12.3 // indirect
sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 // indirect
sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2 // indirect
sigs.k8s.io/kustomize/api v0.11.4 // indirect
sigs.k8s.io/kustomize/kyaml v0.13.6 // indirect
sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect
sigs.k8s.io/structured-merge-diff/v4 v4.2.1 // indirect
sigs.k8s.io/yaml v1.3.0 // indirect
)

View File

@@ -78,22 +78,19 @@ github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg6
github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs=
github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
github.com/Azure/go-autorest/autorest v0.11.18/go.mod h1:dSiJPy22c3u0OtOKDNttNgqpNFY/GeWa7GH/Pz56QRA=
github.com/Azure/go-autorest/autorest v0.11.24 h1:1fIGgHKqVm54KIPT+q8Zmd1QlVsmHqeUGso5qm2BqqE=
github.com/Azure/go-autorest/autorest v0.11.24/go.mod h1:G6kyRlFnTuSbEYkQGawPfsCswgme4iYf6rfSKUDzbCc=
github.com/Azure/go-autorest/autorest v0.11.27 h1:F3R3q42aWytozkV8ihzcgMO4OA4cuqr3bNlsEuF6//A=
github.com/Azure/go-autorest/autorest v0.11.27/go.mod h1:7l8ybrIdUmGqZMTD0sRtAr8NvbHjfofbf8RSP2q7w7U=
github.com/Azure/go-autorest/autorest/adal v0.9.13/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M=
github.com/Azure/go-autorest/autorest/adal v0.9.18 h1:kLnPsRjzZZUF3K5REu/Kc+qMQrvuza2bwSnNdhmzLfQ=
github.com/Azure/go-autorest/autorest/adal v0.9.18/go.mod h1:XVVeme+LZwABT8K5Lc3hA4nAe8LDBVle26gTrguhhPQ=
github.com/Azure/go-autorest/autorest/adal v0.9.20 h1:gJ3E98kMpFB1MFqQCvA1yFab8vthOeD4VlFRQULxahg=
github.com/Azure/go-autorest/autorest/adal v0.9.20/go.mod h1:XVVeme+LZwABT8K5Lc3hA4nAe8LDBVle26gTrguhhPQ=
github.com/Azure/go-autorest/autorest/azure/auth v0.5.11 h1:P6bYXFoao05z5uhOQzbC3Qd8JqF3jUoocoTeIxkp2cA=
github.com/Azure/go-autorest/autorest/azure/auth v0.5.11/go.mod h1:84w/uV8E37feW2NCJ08uT9VBfjfUHpgLVnG2InYD6cg=
github.com/Azure/go-autorest/autorest/azure/cli v0.4.5 h1:0W/yGmFdTIT77fvdlGZ0LMISoLHFJ7Tx4U0yeB+uFs4=
github.com/Azure/go-autorest/autorest/azure/cli v0.4.5/go.mod h1:ADQAXrkgm7acgWVUNamOgh8YNrv4p27l3Wc55oVfpzg=
github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw=
github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74=
github.com/Azure/go-autorest/autorest/mocks v0.4.1 h1:K0laFcLE6VLTOwNgSxaGbUcLPuGXlNkbVvq4cW4nIHk=
github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k=
github.com/Azure/go-autorest/autorest/mocks v0.4.2 h1:PGN4EDXnuQbojHbU0UWoNvmu9AGVwYHG9/fkDYhtAfw=
github.com/Azure/go-autorest/autorest/mocks v0.4.2/go.mod h1:Vy7OitM9Kei0i1Oj+LvyAWMXJHeKH1MVlzFugfVrmyU=
github.com/Azure/go-autorest/autorest/to v0.4.0 h1:oXVqrxakqqV1UZdSazDOPOLvOIz+XA683u8EctwboHk=
github.com/Azure/go-autorest/autorest/to v0.4.0/go.mod h1:fE8iZBn7LQR7zH/9XU2NcPR4o9jEImooCeWJcYV/zLE=
github.com/Azure/go-autorest/autorest/validation v0.3.1 h1:AgyqjAd94fwNAoTjl/WQXg4VvFeRFpO+UhNyRXqF1ac=
@@ -276,9 +273,8 @@ github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkg
github.com/elliotchance/orderedmap v1.5.0 h1:1IsExUsjv5XNBD3ZdC7jkAAqLWOOKdbPTmkHx63OsBg=
github.com/elliotchance/orderedmap v1.5.0/go.mod h1:wsDwEaX5jEoyhbs7x93zk2H/qv0zwuhg4inXhDkYqys=
github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
github.com/emicklei/go-restful v2.9.5+incompatible h1:spTtZBk5DYEvbxMVutUuTyh1Ao2r4iyvLdACqsl/Ljk=
github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
github.com/emicklei/go-restful/v3 v3.8.0 h1:eCZ8ulSerjdAiaNpF7GxXIE7ZCMo1moN1qX+S609eVw=
github.com/emicklei/go-restful/v3 v3.8.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
github.com/emirpasic/gods v1.12.0 h1:QAUIPSaCu4G+POclxeqb3F+WPpdKqFGlw36+yOzGlrg=
github.com/emirpasic/gods v1.12.0/go.mod h1:YfzfFFoVP/catgzJb4IKIqXjX78Ha8FMSDh3ymbK86o=
github.com/enescakir/emoji v1.0.0 h1:W+HsNql8swfCQFtioDGDHCHri8nudlK1n5p2rHCJoog=
@@ -643,12 +639,12 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/kubescape/go-logger v0.0.6 h1:ynhAmwrz0O7Jtqq1CdmCZUrKveji25hVP+B/FAb3QrA=
github.com/kubescape/go-logger v0.0.6/go.mod h1:DnVWEvC90LFY1nNMaNo6nBVOcqkLMK3S0qzXP1fzRvI=
github.com/kubescape/k8s-interface v0.0.89 h1:OtlvZosHpjlbHfsilfQk2wRbuBnxwF0e+WZX6GbkfLU=
github.com/kubescape/k8s-interface v0.0.89/go.mod h1:pgFRs20mHiavf6+fFWY7h/f8HuKlwuZwirvjxiKJlu0=
github.com/kubescape/k8s-interface v0.0.84 h1:k7YzpQ3SaN+bJCtpXzMj60WWIK9RkQQrU8dFQutr3LA=
github.com/kubescape/k8s-interface v0.0.84/go.mod h1:ihX96yqar+xogHl45mFE8zT9DLI06iy7XQPAP+j5KJE=
github.com/kubescape/opa-utils v0.0.200 h1:7EhE9FTabzkUxicvxdchXuaTWW0J2mFj04vK4jTrxN0=
github.com/kubescape/opa-utils v0.0.200/go.mod h1:rDC3PANuk8gU5lSDO/WPFTluypBQ+/6qiuZLye+slYg=
github.com/kubescape/rbac-utils v0.0.19 h1:7iydgVxlMLW15MgHORfMBMqNj9jHtFGACd744fdtrFs=
github.com/kubescape/rbac-utils v0.0.19/go.mod h1:t57AhSrjuNGQ+mpZWQM/hBzrCOeKBDHegFoVo4tbikQ=
github.com/kubescape/rbac-utils v0.0.17 h1:B78kjlTKqjYK/PXwmi4GPysHsFxIwVz1KFb4+IGT29w=
github.com/kubescape/rbac-utils v0.0.17/go.mod h1:pBwjpcrVeuH/no+DiCZWvlhYtCDzd3U0o/hEZKi+eM8=
github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII=
github.com/lunixbochs/vtclean v1.0.0/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI=
github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
@@ -739,11 +735,10 @@ github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+W
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY=
github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE=
github.com/onsi/ginkgo/v2 v2.1.6 h1:Fx2POJZfKRQcM1pH49qSZiYeu319wji004qX+GDovrU=
github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
github.com/onsi/gomega v1.20.1 h1:PA/3qinGoukvymdIDV8pii6tiZgC8kbmJO6Z5+b002Q=
github.com/onsi/gomega v1.18.1 h1:M1GfJqGRrBrrGGsbxzV5dqM2U2ApXefZCQpkukxYRLE=
github.com/open-policy-agent/opa v0.45.0 h1:P5nuhVRtR+e58fk3CMMbiqr6ZFyWQPNOC3otsorGsFs=
github.com/open-policy-agent/opa v0.45.0/go.mod h1:/OnsYljNEWJ6DXeFOOnoGn8CvwZGMUS4iRqzYdJvmBI=
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
@@ -1624,17 +1619,17 @@ honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt
honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
k8s.io/api v0.24.2/go.mod h1:AHqbSkTm6YrQ0ObxjO3Pmp/ubFF/KuM7jU+3khoBsOg=
k8s.io/api v0.25.3 h1:Q1v5UFfYe87vi5H7NU0p4RXC26PPMT8KOpr1TLQbCMQ=
k8s.io/api v0.25.3/go.mod h1:o42gKscFrEVjHdQnyRenACrMtbuJsVdP+WVjqejfzmI=
k8s.io/api v0.24.3 h1:tt55QEmKd6L2k5DP6G/ZzdMQKvG5ro4H4teClqm0sTY=
k8s.io/api v0.24.3/go.mod h1:elGR/XSZrS7z7cSZPzVWaycpJuGIw57j9b95/1PdJNI=
k8s.io/apiextensions-apiserver v0.24.2 h1:/4NEQHKlEz1MlaK/wHT5KMKC9UKYz6NZz6JE6ov4G6k=
k8s.io/apiextensions-apiserver v0.24.2/go.mod h1:e5t2GMFVngUEHUd0wuCJzw8YDwZoqZfJiGOW6mm2hLQ=
k8s.io/apimachinery v0.24.2/go.mod h1:82Bi4sCzVBdpYjyI4jY6aHX+YCUchUIrZrXKedjd2UM=
k8s.io/apimachinery v0.25.3 h1:7o9ium4uyUOM76t6aunP0nZuex7gDf8VGwkR5RcJnQc=
k8s.io/apimachinery v0.25.3/go.mod h1:jaF9C/iPNM1FuLl7Zuy5b9v+n35HGSh6AQ4HYRkCqwo=
k8s.io/apimachinery v0.24.3 h1:hrFiNSA2cBZqllakVYyH/VyEh4B581bQRmqATJSeQTg=
k8s.io/apimachinery v0.24.3/go.mod h1:82Bi4sCzVBdpYjyI4jY6aHX+YCUchUIrZrXKedjd2UM=
k8s.io/apiserver v0.24.2/go.mod h1:pSuKzr3zV+L+MWqsEo0kHHYwCo77AT5qXbFXP2jbvFI=
k8s.io/client-go v0.24.2/go.mod h1:zg4Xaoo+umDsfCWr4fCnmLEtQXyCNXCvJuSsglNcV30=
k8s.io/client-go v0.25.3 h1:oB4Dyl8d6UbfDHD8Bv8evKylzs3BXzzufLiO27xuPs0=
k8s.io/client-go v0.25.3/go.mod h1:t39LPczAIMwycjcXkVc+CB+PZV69jQuNx4um5ORDjQA=
k8s.io/client-go v0.24.3 h1:Nl1840+6p4JqkFWEW2LnMKU667BUxw03REfLAVhuKQY=
k8s.io/client-go v0.24.3/go.mod h1:AAovolf5Z9bY1wIg2FZ8LPQlEdKHjLI7ZD4rw920BJw=
k8s.io/code-generator v0.24.2/go.mod h1:dpVhs00hTuTdTY6jvVxvTFCk6gSMrtfRydbhZwHI15w=
k8s.io/component-base v0.24.2/go.mod h1:ucHwW76dajvQ9B7+zecZAP3BVqvrHoOxm8olHEg0nmM=
k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
@@ -1642,17 +1637,15 @@ k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAE
k8s.io/gengo v0.0.0-20211129171323-c02415ce4185/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E=
k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE=
k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
k8s.io/klog/v2 v2.60.1 h1:VW25q3bZx9uE3vvdL6M8ezOX79vA2Aq1nEWLqNQclHc=
k8s.io/klog/v2 v2.60.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
k8s.io/klog/v2 v2.70.1 h1:7aaoSdahviPmR+XkS7FyxlkkXs6tHISSG03RxleQAVQ=
k8s.io/klog/v2 v2.70.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
k8s.io/kube-openapi v0.0.0-20210421082810-95288971da7e/go.mod h1:vHXdDvt9+2spS2Rx9ql3I8tycm3H9FDfdUoIuKCefvw=
k8s.io/kube-openapi v0.0.0-20220328201542-3ee0da9b0b42 h1:Gii5eqf+GmIEwGNKQYQClCayuJCe2/4fZUvF7VG99sU=
k8s.io/kube-openapi v0.0.0-20220328201542-3ee0da9b0b42/go.mod h1:Z/45zLw8lUo4wdiUkI+v/ImEGAvu3WatcZl3lPMR4Rk=
k8s.io/kube-openapi v0.0.0-20220803162953-67bda5d908f1 h1:MQ8BAZPZlWk3S9K4a9NCkIFQtZShWqoha7snGixVgEA=
k8s.io/kube-openapi v0.0.0-20220803162953-67bda5d908f1/go.mod h1:C/N6wCaBHeBHkHUesQOQy2/MZqGgMAFPqGsGQLdbZBU=
k8s.io/utils v0.0.0-20210802155522-efc7438f0176/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
k8s.io/utils v0.0.0-20220728103510-ee6ede2d64ed h1:jAne/RjBTyawwAy0utX5eqigAwz/lQhTmy+Hr/Cpue4=
k8s.io/utils v0.0.0-20220728103510-ee6ede2d64ed/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
k8s.io/utils v0.0.0-20220706174534-f6158b442e7c h1:hFZO68mv/0xe8+V0gRT9BAq3/31cKjjeVv4nScriuBk=
k8s.io/utils v0.0.0-20220706174534-f6158b442e7c/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4=
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
@@ -1660,17 +1653,15 @@ rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.30/go.mod h1:fEO7lRTdivWO2qYVCVG7dEADOMo/MLDCVr8So2g88Uw=
sigs.k8s.io/controller-runtime v0.12.3 h1:FCM8xeY/FI8hoAfh/V4XbbYMY20gElh9yh+A98usMio=
sigs.k8s.io/controller-runtime v0.12.3/go.mod h1:qKsk4WE6zW2Hfj0G4v10EnNB2jMG1C+NTb8h+DwCoU0=
sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2 h1:kDi4JBNAsJWfz1aEXhO8Jg87JJaPNLh5tIzYHgStQ9Y=
sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2/go.mod h1:B+TnT182UBxE84DiCz4CVE26eOSDAeYCpfDnC2kdKMY=
sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 h1:iXTIw73aPyC+oRdyqqvVJuloN1p0AC/kzH07hu3NE+k=
sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0=
sigs.k8s.io/kustomize/api v0.11.4 h1:/0Mr3kfBBNcNPOW5Qwk/3eb8zkswCwnqQxxKtmrTkRo=
sigs.k8s.io/kustomize/api v0.11.4/go.mod h1:k+8RsqYbgpkIrJ4p9jcdPqe8DprLxFUUO0yNOq8C+xI=
sigs.k8s.io/kustomize/kyaml v0.13.6 h1:eF+wsn4J7GOAXlvajv6OknSunxpcOBQQqsnPxObtkGs=
sigs.k8s.io/kustomize/kyaml v0.13.6/go.mod h1:yHP031rn1QX1lr/Xd934Ri/xdVNG8BE2ECa78Ht/kEg=
sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw=
sigs.k8s.io/structured-merge-diff/v4 v4.2.1 h1:bKCqE9GvQ5tiVHn5rfn1r+yao3aLQEaLzkkmAkf+A6Y=
sigs.k8s.io/structured-merge-diff/v4 v4.2.1/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4=
sigs.k8s.io/structured-merge-diff/v4 v4.2.3 h1:PRbqxJClWWYMNV1dhaG4NsibJbArud9kFxnAMREiWFE=
sigs.k8s.io/structured-merge-diff/v4 v4.2.3/go.mod h1:qjx8mGObPmV2aSZepjQjbmb2ihdVs8cGKBraizNC69E=
sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc=
sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo=
sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8=

View File

@@ -3,7 +3,7 @@ package v1
import (
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net/http"
"sync"
@@ -124,7 +124,7 @@ func getScanParamsFromRequest(r *http.Request, scanID string) (*scanRequestParam
return scanRequestParams, fmt.Errorf("failed to parse query params, reason: %s", err.Error())
}
readBuffer, err := io.ReadAll(r.Body)
readBuffer, err := ioutil.ReadAll(r.Body)
if err != nil {
// handler.writeError(w, fmt.Errorf("failed to read request body, reason: %s", err.Error()), scanID)
return scanRequestParams, fmt.Errorf("failed to read request body, reason: %s", err.Error())

View File

@@ -22,6 +22,7 @@ def run(kubescape_exec:str):
test_command(command=[kubescape_exec, "scan", "framework"])
test_command(command=[kubescape_exec, "scan", "control"])
test_command(command=[kubescape_exec, "submit", "results"])
test_command(command=[kubescape_exec, "submit", "rbac"])
print("Done testing commands")

View File

@@ -13,15 +13,15 @@ def scan_all(kubescape_exec: str):
def scan_control_name(kubescape_exec: str):
return smoke_utils.run_command(command=[kubescape_exec, "scan", "control", 'HostPath mount', all_files, "--enable-host-scan=false"])
return smoke_utils.run_command(command=[kubescape_exec, "scan", "control", 'Allowed hostPath', all_files, "--enable-host-scan=false"])
def scan_control_id(kubescape_exec: str):
return smoke_utils.run_command(command=[kubescape_exec, "scan", "control", 'C-0048', all_files, "--enable-host-scan=false"])
return smoke_utils.run_command(command=[kubescape_exec, "scan", "control", 'C-0006', all_files, "--enable-host-scan=false"])
def scan_controls(kubescape_exec: str):
return smoke_utils.run_command(command=[kubescape_exec, "scan", "control", 'HostPath mount,Allow privilege escalation', all_files, "--enable-host-scan=false"])
return smoke_utils.run_command(command=[kubescape_exec, "scan", "control", 'Allowed hostPath,Allow privilege escalation', all_files, "--enable-host-scan=false"])
def scan_framework(kubescape_exec: str):