mirror of
https://github.com/kubescape/kubescape.git
synced 2026-02-14 18:09:55 +00:00
Compare commits
41 Commits
refactor-i
...
fix-comman
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
b0f65cce1d | ||
|
|
0e8b2f976d | ||
|
|
f1d646ac97 | ||
|
|
5f668037a7 | ||
|
|
c448c97463 | ||
|
|
da3bc8e8ea | ||
|
|
2fce139a9a | ||
|
|
85d2f5c250 | ||
|
|
c3771eec7e | ||
|
|
76d2154152 | ||
|
|
fa5e7fef23 | ||
|
|
38d2696058 | ||
|
|
e9a8ffbda9 | ||
|
|
0d76fffa48 | ||
|
|
218c77f3ae | ||
|
|
89fd7eb439 | ||
|
|
8079f9ae7d | ||
|
|
f9a26b7a95 | ||
|
|
663401d908 | ||
|
|
926790f49d | ||
|
|
566b7c29c1 | ||
|
|
af5cdefc5f | ||
|
|
36b7b8e2ac | ||
|
|
17c52bd0ae | ||
|
|
e02086e90c | ||
|
|
baf62887b9 | ||
|
|
99fa81e411 | ||
|
|
f64200f42f | ||
|
|
f72cb215d7 | ||
|
|
fa03a9dae3 | ||
|
|
48516b891f | ||
|
|
252a564552 | ||
|
|
30e5b9b57d | ||
|
|
7fcfa27d9a | ||
|
|
4b898b0075 | ||
|
|
f3665866af | ||
|
|
a7989bbe76 | ||
|
|
5ce69a750d | ||
|
|
2b61989073 | ||
|
|
be33054973 | ||
|
|
4b9bd5f3ae |
45
cmd/fix/fix.go
Normal file
45
cmd/fix/fix.go
Normal file
@@ -0,0 +1,45 @@
|
||||
package fix
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"github.com/kubescape/kubescape/v2/core/meta"
|
||||
metav1 "github.com/kubescape/kubescape/v2/core/meta/datastructures/v1"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var fixCmdExamples = `
|
||||
Fix command is for fixing kubernetes manifest files based on a scan command output.
|
||||
Use with caution, this command will change your files in-place.
|
||||
|
||||
# Fix kubernetes YAML manifest files based on a scan command output (output.json)
|
||||
1) kubescape scan --format json --format-version v2 --output output.json
|
||||
2) kubescape fix output.json
|
||||
|
||||
`
|
||||
|
||||
func GetFixCmd(ks meta.IKubescape) *cobra.Command {
|
||||
var fixInfo metav1.FixInfo
|
||||
|
||||
fixCmd := &cobra.Command{
|
||||
Use: "fix <report output file>",
|
||||
Short: "Fix misconfiguration in files",
|
||||
Long: ``,
|
||||
Example: fixCmdExamples,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
if len(args) < 1 {
|
||||
return errors.New("report output file is required")
|
||||
}
|
||||
fixInfo.ReportFile = args[0]
|
||||
|
||||
return ks.Fix(&fixInfo)
|
||||
},
|
||||
}
|
||||
|
||||
fixCmd.PersistentFlags().BoolVar(&fixInfo.NoConfirm, "no-confirm", false, "No confirmation will be given to the user before applying the fix (default false)")
|
||||
fixCmd.PersistentFlags().BoolVar(&fixInfo.DryRun, "dry-run", false, "No changes will be applied (default false)")
|
||||
fixCmd.PersistentFlags().BoolVar(&fixInfo.SkipUserValues, "skip-user-values", true, "Changes which involve user-defined values will be skipped")
|
||||
|
||||
return fixCmd
|
||||
}
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
"github.com/kubescape/kubescape/v2/cmd/config"
|
||||
"github.com/kubescape/kubescape/v2/cmd/delete"
|
||||
"github.com/kubescape/kubescape/v2/cmd/download"
|
||||
"github.com/kubescape/kubescape/v2/cmd/fix"
|
||||
"github.com/kubescape/kubescape/v2/cmd/list"
|
||||
"github.com/kubescape/kubescape/v2/cmd/scan"
|
||||
"github.com/kubescape/kubescape/v2/cmd/submit"
|
||||
@@ -78,6 +79,7 @@ func getRootCmd(ks meta.IKubescape) *cobra.Command {
|
||||
rootCmd.AddCommand(version.GetVersionCmd())
|
||||
rootCmd.AddCommand(config.GetConfigCmd(ks))
|
||||
rootCmd.AddCommand(update.GetUpdateCmd())
|
||||
rootCmd.AddCommand(fix.GetFixCmd(ks))
|
||||
|
||||
return rootCmd
|
||||
}
|
||||
|
||||
@@ -419,7 +419,7 @@ func metadataGitLocal(input string) (*reporthandlingv2.RepoContextMetadata, erro
|
||||
Date: commit.Committer.Date,
|
||||
CommitterName: commit.Committer.Name,
|
||||
}
|
||||
context.LocalRootPath = getAbsPath(input)
|
||||
context.LocalRootPath, _ = gitParser.GetRootDir()
|
||||
|
||||
return context, nil
|
||||
}
|
||||
|
||||
72
core/core/fix.go
Normal file
72
core/core/fix.go
Normal file
@@ -0,0 +1,72 @@
|
||||
package core
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
logger "github.com/kubescape/go-logger"
|
||||
metav1 "github.com/kubescape/kubescape/v2/core/meta/datastructures/v1"
|
||||
|
||||
"github.com/kubescape/kubescape/v2/core/pkg/fixhandler"
|
||||
)
|
||||
|
||||
const NoChangesApplied = "No changes were applied."
|
||||
const NoResourcesToFix = "No issues to fix."
|
||||
const ConfirmationQuestion = "Would you like to apply the changes to the files above? [y|n]: "
|
||||
|
||||
func (ks *Kubescape) Fix(fixInfo *metav1.FixInfo) error {
|
||||
logger.L().Info("Reading report file...")
|
||||
handler, err := fixhandler.NewFixHandler(fixInfo)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
resourcesToFix := handler.PrepareResourcesToFix()
|
||||
|
||||
if len(resourcesToFix) == 0 {
|
||||
logger.L().Info(NoResourcesToFix)
|
||||
return nil
|
||||
}
|
||||
|
||||
handler.PrintExpectedChanges(resourcesToFix)
|
||||
|
||||
if fixInfo.DryRun {
|
||||
logger.L().Info(NoChangesApplied)
|
||||
return nil
|
||||
}
|
||||
|
||||
if !fixInfo.NoConfirm && !userConfirmed() {
|
||||
logger.L().Info(NoChangesApplied)
|
||||
return nil
|
||||
}
|
||||
|
||||
updatedFilesCount, errors := handler.ApplyChanges(resourcesToFix)
|
||||
logger.L().Info(fmt.Sprintf("Fixed resources in %d files.", updatedFilesCount))
|
||||
|
||||
if len(errors) > 0 {
|
||||
for _, err := range errors {
|
||||
logger.L().Error(err.Error())
|
||||
}
|
||||
return fmt.Errorf("Failed to fix some resources, check the logs for more details")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func userConfirmed() bool {
|
||||
var input string
|
||||
|
||||
for {
|
||||
fmt.Printf(ConfirmationQuestion)
|
||||
if _, err := fmt.Scanln(&input); err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
input = strings.ToLower(input)
|
||||
if input == "y" || input == "yes" {
|
||||
return true
|
||||
} else if input == "n" || input == "no" {
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
8
core/meta/datastructures/v1/fix.go
Normal file
8
core/meta/datastructures/v1/fix.go
Normal file
@@ -0,0 +1,8 @@
|
||||
package v1
|
||||
|
||||
type FixInfo struct {
|
||||
ReportFile string // path to report file (mandatory)
|
||||
NoConfirm bool // if true, no confirmation will be given to the user before applying the fix
|
||||
SkipUserValues bool // if true, user values will not be changed
|
||||
DryRun bool // if true, no changes will be applied
|
||||
}
|
||||
@@ -25,4 +25,7 @@ type IKubescape interface {
|
||||
|
||||
// delete
|
||||
DeleteExceptions(deleteexceptions *metav1.DeleteExceptions) error
|
||||
|
||||
// fix
|
||||
Fix(fixInfo *metav1.FixInfo) error
|
||||
}
|
||||
|
||||
63
core/pkg/fixhandler/datastructures.go
Normal file
63
core/pkg/fixhandler/datastructures.go
Normal file
@@ -0,0 +1,63 @@
|
||||
package fixhandler
|
||||
|
||||
import (
|
||||
"github.com/armosec/armoapi-go/armotypes"
|
||||
metav1 "github.com/kubescape/kubescape/v2/core/meta/datastructures/v1"
|
||||
"github.com/kubescape/opa-utils/reporthandling"
|
||||
reporthandlingv2 "github.com/kubescape/opa-utils/reporthandling/v2"
|
||||
"gopkg.in/yaml.v3"
|
||||
)
|
||||
|
||||
// FixHandler is a struct that holds the information of the report to be fixed
|
||||
type FixHandler struct {
|
||||
fixInfo *metav1.FixInfo
|
||||
reportObj *reporthandlingv2.PostureReport
|
||||
localBasePath string
|
||||
}
|
||||
|
||||
// ResourceFixInfo is a struct that holds the information about the resource that needs to be fixed
|
||||
type ResourceFixInfo struct {
|
||||
YamlExpressions map[string]*armotypes.FixPath
|
||||
Resource *reporthandling.Resource
|
||||
FilePath string
|
||||
DocumentIndex int
|
||||
}
|
||||
|
||||
// NodeInfo holds extra information about the node
|
||||
type nodeInfo struct {
|
||||
node *yaml.Node
|
||||
parent *yaml.Node
|
||||
|
||||
// position of the node among siblings
|
||||
index int
|
||||
}
|
||||
|
||||
// FixInfoMetadata holds the arguments "getFixInfo" function needs to pass to the
|
||||
// functions it uses
|
||||
type fixInfoMetadata struct {
|
||||
originalList *[]nodeInfo
|
||||
fixedList *[]nodeInfo
|
||||
originalListTracker int
|
||||
fixedListTracker int
|
||||
contentToAdd *[]contentToAdd
|
||||
linesToRemove *[]linesToRemove
|
||||
}
|
||||
|
||||
// ContentToAdd holds the information about where to insert the new changes in the existing yaml file
|
||||
type contentToAdd struct {
|
||||
// Line where the fix should be applied to
|
||||
line int
|
||||
// Content is a string representation of the YAML node that describes a suggested fix
|
||||
content string
|
||||
}
|
||||
|
||||
// LinesToRemove holds the line numbers to remove from the existing yaml file
|
||||
type linesToRemove struct {
|
||||
startLine int
|
||||
endLine int
|
||||
}
|
||||
|
||||
type fileFixInfo struct {
|
||||
contentsToAdd *[]contentToAdd
|
||||
linesToRemove *[]linesToRemove
|
||||
}
|
||||
346
core/pkg/fixhandler/fixhandler.go
Normal file
346
core/pkg/fixhandler/fixhandler.go
Normal file
@@ -0,0 +1,346 @@
|
||||
package fixhandler
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/armosec/armoapi-go/armotypes"
|
||||
metav1 "github.com/kubescape/kubescape/v2/core/meta/datastructures/v1"
|
||||
|
||||
logger "github.com/kubescape/go-logger"
|
||||
"github.com/kubescape/opa-utils/objectsenvelopes"
|
||||
"github.com/kubescape/opa-utils/objectsenvelopes/localworkload"
|
||||
"github.com/kubescape/opa-utils/reporthandling"
|
||||
"github.com/kubescape/opa-utils/reporthandling/results/v1/resourcesresults"
|
||||
reporthandlingv2 "github.com/kubescape/opa-utils/reporthandling/v2"
|
||||
"github.com/mikefarah/yq/v4/pkg/yqlib"
|
||||
"gopkg.in/op/go-logging.v1"
|
||||
)
|
||||
|
||||
const UserValuePrefix = "YOUR_"
|
||||
|
||||
func NewFixHandler(fixInfo *metav1.FixInfo) (*FixHandler, error) {
|
||||
jsonFile, err := os.Open(fixInfo.ReportFile)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer jsonFile.Close()
|
||||
byteValue, _ := ioutil.ReadAll(jsonFile)
|
||||
|
||||
var reportObj reporthandlingv2.PostureReport
|
||||
if err = json.Unmarshal(byteValue, &reportObj); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err = isSupportedScanningTarget(&reportObj); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
localPath := getLocalPath(&reportObj)
|
||||
if _, err = os.Stat(localPath); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
backendLoggerLeveled := logging.AddModuleLevel(logging.NewLogBackend(logger.L().GetWriter(), "", 0))
|
||||
backendLoggerLeveled.SetLevel(logging.ERROR, "")
|
||||
yqlib.GetLogger().SetBackend(backendLoggerLeveled)
|
||||
|
||||
return &FixHandler{
|
||||
fixInfo: fixInfo,
|
||||
reportObj: &reportObj,
|
||||
localBasePath: localPath,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func isSupportedScanningTarget(report *reporthandlingv2.PostureReport) error {
|
||||
if report.Metadata.ScanMetadata.ScanningTarget == reporthandlingv2.GitLocal || report.Metadata.ScanMetadata.ScanningTarget == reporthandlingv2.Directory {
|
||||
return nil
|
||||
}
|
||||
|
||||
return fmt.Errorf("unsupported scanning target. Only local git and directory scanning targets are supported")
|
||||
}
|
||||
|
||||
func getLocalPath(report *reporthandlingv2.PostureReport) string {
|
||||
if report.Metadata.ScanMetadata.ScanningTarget == reporthandlingv2.GitLocal {
|
||||
return report.Metadata.ContextMetadata.RepoContextMetadata.LocalRootPath
|
||||
}
|
||||
|
||||
if report.Metadata.ScanMetadata.ScanningTarget == reporthandlingv2.Directory {
|
||||
return report.Metadata.ContextMetadata.DirectoryContextMetadata.BasePath
|
||||
}
|
||||
|
||||
return ""
|
||||
}
|
||||
|
||||
func (h *FixHandler) buildResourcesMap() map[string]*reporthandling.Resource {
|
||||
resourceIdToRawResource := make(map[string]*reporthandling.Resource)
|
||||
for i := range h.reportObj.Resources {
|
||||
resourceIdToRawResource[h.reportObj.Resources[i].GetID()] = &h.reportObj.Resources[i]
|
||||
}
|
||||
for i := range h.reportObj.Results {
|
||||
if h.reportObj.Results[i].RawResource == nil {
|
||||
continue
|
||||
}
|
||||
resourceIdToRawResource[h.reportObj.Results[i].RawResource.GetID()] = h.reportObj.Results[i].RawResource
|
||||
}
|
||||
|
||||
return resourceIdToRawResource
|
||||
}
|
||||
|
||||
func (h *FixHandler) getPathFromRawResource(obj map[string]interface{}) string {
|
||||
if localworkload.IsTypeLocalWorkload(obj) {
|
||||
localwork := localworkload.NewLocalWorkload(obj)
|
||||
return localwork.GetPath()
|
||||
} else if objectsenvelopes.IsTypeRegoResponseVector(obj) {
|
||||
regoResponseVectorObject := objectsenvelopes.NewRegoResponseVectorObject(obj)
|
||||
relatedObjects := regoResponseVectorObject.GetRelatedObjects()
|
||||
for _, relatedObject := range relatedObjects {
|
||||
if localworkload.IsTypeLocalWorkload(relatedObject.GetObject()) {
|
||||
return relatedObject.(*localworkload.LocalWorkload).GetPath()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return ""
|
||||
}
|
||||
|
||||
func (h *FixHandler) PrepareResourcesToFix() []ResourceFixInfo {
|
||||
resourceIdToResource := h.buildResourcesMap()
|
||||
|
||||
resourcesToFix := make([]ResourceFixInfo, 0)
|
||||
for _, result := range h.reportObj.Results {
|
||||
if !result.GetStatus(nil).IsFailed() {
|
||||
continue
|
||||
}
|
||||
|
||||
resourceID := result.ResourceID
|
||||
resourceObj := resourceIdToResource[resourceID]
|
||||
resourcePath := h.getPathFromRawResource(resourceObj.GetObject())
|
||||
if resourcePath == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
if resourceObj.Source == nil || resourceObj.Source.FileType != reporthandling.SourceTypeYaml {
|
||||
continue
|
||||
}
|
||||
|
||||
relativePath, documentIndex, err := h.getFilePathAndIndex(resourcePath)
|
||||
if err != nil {
|
||||
logger.L().Error("Skipping invalid resource path: " + resourcePath)
|
||||
continue
|
||||
}
|
||||
|
||||
absolutePath := path.Join(h.localBasePath, relativePath)
|
||||
if _, err := os.Stat(absolutePath); err != nil {
|
||||
logger.L().Error("Skipping missing file: " + absolutePath)
|
||||
continue
|
||||
}
|
||||
|
||||
rfi := ResourceFixInfo{
|
||||
FilePath: absolutePath,
|
||||
Resource: resourceObj,
|
||||
YamlExpressions: make(map[string]*armotypes.FixPath, 0),
|
||||
DocumentIndex: documentIndex,
|
||||
}
|
||||
|
||||
for i := range result.AssociatedControls {
|
||||
if result.AssociatedControls[i].GetStatus(nil).IsFailed() {
|
||||
rfi.addYamlExpressionsFromResourceAssociatedControl(documentIndex, &result.AssociatedControls[i], h.fixInfo.SkipUserValues)
|
||||
}
|
||||
}
|
||||
|
||||
if len(rfi.YamlExpressions) > 0 {
|
||||
resourcesToFix = append(resourcesToFix, rfi)
|
||||
}
|
||||
}
|
||||
|
||||
return resourcesToFix
|
||||
}
|
||||
|
||||
func (h *FixHandler) PrintExpectedChanges(resourcesToFix []ResourceFixInfo) {
|
||||
var sb strings.Builder
|
||||
sb.WriteString("The following changes will be applied:\n")
|
||||
|
||||
for _, resourceFixInfo := range resourcesToFix {
|
||||
sb.WriteString(fmt.Sprintf("File: %s\n", resourceFixInfo.FilePath))
|
||||
sb.WriteString(fmt.Sprintf("Resource: %s\n", resourceFixInfo.Resource.GetName()))
|
||||
sb.WriteString(fmt.Sprintf("Kind: %s\n", resourceFixInfo.Resource.GetKind()))
|
||||
sb.WriteString("Changes:\n")
|
||||
|
||||
i := 1
|
||||
for _, fixPath := range resourceFixInfo.YamlExpressions {
|
||||
sb.WriteString(fmt.Sprintf("\t%d) %s = %s\n", i, (*fixPath).Path, (*fixPath).Value))
|
||||
i++
|
||||
}
|
||||
sb.WriteString("\n------\n")
|
||||
}
|
||||
|
||||
logger.L().Info(sb.String())
|
||||
}
|
||||
|
||||
func (h *FixHandler) ApplyChanges(resourcesToFix []ResourceFixInfo) (int, []error) {
|
||||
updatedFiles := make(map[string]bool)
|
||||
errors := make([]error, 0)
|
||||
|
||||
fileYamlExpressions := h.getFileYamlExpressions(resourcesToFix)
|
||||
|
||||
for filepath, yamlExpression := range fileYamlExpressions {
|
||||
fileAsString, err := getFileString(filepath)
|
||||
|
||||
if err != nil {
|
||||
errors = append(errors, err)
|
||||
continue
|
||||
}
|
||||
|
||||
fixedYamlString, err := h.ApplyFixToContent(fileAsString, yamlExpression)
|
||||
|
||||
if err != nil {
|
||||
errors = append(errors, fmt.Errorf("Failed to fix file %s: %w ", filepath, err))
|
||||
continue
|
||||
} else {
|
||||
updatedFiles[filepath] = true
|
||||
}
|
||||
|
||||
err = writeFixesToFile(filepath, fixedYamlString)
|
||||
|
||||
if err != nil {
|
||||
logger.L().Error(fmt.Sprintf("Failed to write fixes to file %s, %v", filepath, err.Error()))
|
||||
errors = append(errors, err)
|
||||
}
|
||||
}
|
||||
|
||||
return len(updatedFiles), errors
|
||||
}
|
||||
|
||||
func (h *FixHandler) getFilePathAndIndex(filePathWithIndex string) (filePath string, documentIndex int, err error) {
|
||||
splittedPath := strings.Split(filePathWithIndex, ":")
|
||||
if len(splittedPath) <= 1 {
|
||||
return "", 0, fmt.Errorf("expected to find ':' in file path")
|
||||
}
|
||||
|
||||
filePath = splittedPath[0]
|
||||
if documentIndex, err := strconv.Atoi(splittedPath[1]); err != nil {
|
||||
return "", 0, err
|
||||
} else {
|
||||
return filePath, documentIndex, nil
|
||||
}
|
||||
}
|
||||
|
||||
func (h *FixHandler) ApplyFixToContent(yamlAsString, yamlExpression string) (fixedString string, err error) {
|
||||
yamlLines := strings.Split(yamlAsString, "\n")
|
||||
|
||||
originalRootNodes, err := decodeDocumentRoots(yamlAsString)
|
||||
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
fixedRootNodes, err := getFixedNodes(yamlAsString, yamlExpression)
|
||||
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
fileFixInfo := getFixInfo(originalRootNodes, fixedRootNodes)
|
||||
|
||||
fixedYamlLines := getFixedYamlLines(yamlLines, fileFixInfo)
|
||||
|
||||
fixedString = getStringFromSlice(fixedYamlLines)
|
||||
|
||||
return fixedString, nil
|
||||
}
|
||||
|
||||
func (h *FixHandler) getFileYamlExpressions(resourcesToFix []ResourceFixInfo) map[string]string {
|
||||
fileYamlExpressions := make(map[string]string, 0)
|
||||
for _, resourceToFix := range resourcesToFix {
|
||||
singleExpression := reduceYamlExpressions(&resourceToFix)
|
||||
resourceFilePath := resourceToFix.FilePath
|
||||
|
||||
if _, pathExistsInMap := fileYamlExpressions[resourceFilePath]; !pathExistsInMap {
|
||||
fileYamlExpressions[resourceFilePath] = singleExpression
|
||||
} else {
|
||||
fileYamlExpressions[resourceFilePath] = joinStrings(fileYamlExpressions[resourceFilePath], " | ", singleExpression)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return fileYamlExpressions
|
||||
}
|
||||
|
||||
func (rfi *ResourceFixInfo) addYamlExpressionsFromResourceAssociatedControl(documentIndex int, ac *resourcesresults.ResourceAssociatedControl, skipUserValues bool) {
|
||||
for _, rule := range ac.ResourceAssociatedRules {
|
||||
if !rule.GetStatus(nil).IsFailed() {
|
||||
continue
|
||||
}
|
||||
|
||||
for _, rulePaths := range rule.Paths {
|
||||
if rulePaths.FixPath.Path == "" {
|
||||
continue
|
||||
}
|
||||
if strings.HasPrefix(rulePaths.FixPath.Value, UserValuePrefix) && skipUserValues {
|
||||
continue
|
||||
}
|
||||
|
||||
yamlExpression := fixPathToValidYamlExpression(rulePaths.FixPath.Path, rulePaths.FixPath.Value, documentIndex)
|
||||
rfi.YamlExpressions[yamlExpression] = &rulePaths.FixPath
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// reduceYamlExpressions reduces the number of yaml expressions to a single one
|
||||
func reduceYamlExpressions(resource *ResourceFixInfo) string {
|
||||
expressions := make([]string, 0, len(resource.YamlExpressions))
|
||||
for expr := range resource.YamlExpressions {
|
||||
expressions = append(expressions, expr)
|
||||
}
|
||||
|
||||
return strings.Join(expressions, " | ")
|
||||
}
|
||||
|
||||
func fixPathToValidYamlExpression(fixPath, value string, documentIndexInYaml int) string {
|
||||
isStringValue := true
|
||||
if _, err := strconv.ParseBool(value); err == nil {
|
||||
isStringValue = false
|
||||
} else if _, err := strconv.ParseFloat(value, 64); err == nil {
|
||||
isStringValue = false
|
||||
} else if _, err := strconv.Atoi(value); err == nil {
|
||||
isStringValue = false
|
||||
}
|
||||
|
||||
// Strings should be quoted
|
||||
if isStringValue {
|
||||
value = fmt.Sprintf("\"%s\"", value)
|
||||
}
|
||||
|
||||
// select document index and add a dot for the root node
|
||||
return fmt.Sprintf("select(di==%d).%s |= %s", documentIndexInYaml, fixPath, value)
|
||||
}
|
||||
|
||||
func joinStrings(inputStrings ...string) string {
|
||||
return strings.Join(inputStrings, "")
|
||||
}
|
||||
|
||||
func getFileString(filepath string) (string, error) {
|
||||
bytes, err := ioutil.ReadFile(filepath)
|
||||
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("Error reading file %s", filepath)
|
||||
}
|
||||
|
||||
return string(bytes), nil
|
||||
}
|
||||
|
||||
func writeFixesToFile(filepath, content string) error {
|
||||
err := ioutil.WriteFile(filepath, []byte(content), 0644)
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error writing fixes to file: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
248
core/pkg/fixhandler/fixhandler_test.go
Normal file
248
core/pkg/fixhandler/fixhandler_test.go
Normal file
@@ -0,0 +1,248 @@
|
||||
package fixhandler
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
logger "github.com/kubescape/go-logger"
|
||||
metav1 "github.com/kubescape/kubescape/v2/core/meta/datastructures/v1"
|
||||
reporthandlingv2 "github.com/kubescape/opa-utils/reporthandling/v2"
|
||||
"github.com/mikefarah/yq/v4/pkg/yqlib"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"gopkg.in/op/go-logging.v1"
|
||||
)
|
||||
|
||||
type indentationTestCase struct {
|
||||
inputFile string
|
||||
yamlExpression string
|
||||
expectedFile string
|
||||
}
|
||||
|
||||
func NewFixHandlerMock() (*FixHandler, error) {
|
||||
backendLoggerLeveled := logging.AddModuleLevel(logging.NewLogBackend(logger.L().GetWriter(), "", 0))
|
||||
backendLoggerLeveled.SetLevel(logging.ERROR, "")
|
||||
yqlib.GetLogger().SetBackend(backendLoggerLeveled)
|
||||
|
||||
return &FixHandler{
|
||||
fixInfo: &metav1.FixInfo{},
|
||||
reportObj: &reporthandlingv2.PostureReport{},
|
||||
localBasePath: "",
|
||||
}, nil
|
||||
}
|
||||
|
||||
func getTestdataPath() string {
|
||||
currentDir, _ := os.Getwd()
|
||||
return filepath.Join(currentDir, "testdata")
|
||||
}
|
||||
|
||||
func getTestCases() []indentationTestCase {
|
||||
indentationTestCases := []indentationTestCase{
|
||||
// Insertion Scenarios
|
||||
{
|
||||
"inserts/tc-01-00-input-mapping-insert-mapping.yaml",
|
||||
"select(di==0).spec.containers[0].securityContext.allowPrivilegeEscalation |= false",
|
||||
"inserts/tc-01-01-expected.yaml",
|
||||
},
|
||||
{
|
||||
"inserts/tc-02-00-input-mapping-insert-mapping-with-list.yaml",
|
||||
"select(di==0).spec.containers[0].securityContext.capabilities.drop += [\"NET_RAW\"]",
|
||||
"inserts/tc-02-01-expected.yaml",
|
||||
},
|
||||
{
|
||||
"inserts/tc-03-00-input-list-append-scalar.yaml",
|
||||
"select(di==0).spec.containers[0].securityContext.capabilities.drop += [\"SYS_ADM\"]",
|
||||
"inserts/tc-03-01-expected.yaml",
|
||||
},
|
||||
{
|
||||
"inserts/tc-04-00-input-multiple-inserts.yaml",
|
||||
|
||||
`select(di==0).spec.template.spec.securityContext.allowPrivilegeEscalation |= false |
|
||||
select(di==0).spec.template.spec.containers[0].securityContext.capabilities.drop += ["NET_RAW"] |
|
||||
select(di==0).spec.template.spec.containers[0].securityContext.seccompProfile.type |= "RuntimeDefault" |
|
||||
select(di==0).spec.template.spec.containers[0].securityContext.allowPrivilegeEscalation |= false |
|
||||
select(di==0).spec.template.spec.containers[0].securityContext.readOnlyRootFilesystem |= true`,
|
||||
|
||||
"inserts/tc-04-01-expected.yaml",
|
||||
},
|
||||
{
|
||||
"inserts/tc-05-00-input-comment-blank-line-single-insert.yaml",
|
||||
"select(di==0).spec.containers[0].securityContext.allowPrivilegeEscalation |= false",
|
||||
"inserts/tc-05-01-expected.yaml",
|
||||
},
|
||||
{
|
||||
"inserts/tc-06-00-input-list-append-scalar-oneline.yaml",
|
||||
"select(di==0).spec.containers[0].securityContext.capabilities.drop += [\"SYS_ADM\"]",
|
||||
"inserts/tc-06-01-expected.yaml",
|
||||
},
|
||||
{
|
||||
"inserts/tc-07-00-input-multiple-documents.yaml",
|
||||
|
||||
`select(di==0).spec.containers[0].securityContext.allowPrivilegeEscalation |= false |
|
||||
select(di==1).spec.containers[0].securityContext.allowPrivilegeEscalation |= false`,
|
||||
|
||||
"inserts/tc-07-01-expected.yaml",
|
||||
},
|
||||
{
|
||||
"inserts/tc-08-00-input-mapping-insert-mapping-indented.yaml",
|
||||
"select(di==0).spec.containers[0].securityContext.capabilities.drop += [\"NET_RAW\"]",
|
||||
"inserts/tc-08-01-expected.yaml",
|
||||
},
|
||||
{
|
||||
"inserts/tc-09-00-input-list-insert-new-mapping-indented.yaml",
|
||||
`select(di==0).spec.containers += {"name": "redis", "image": "redis"}`,
|
||||
"inserts/tc-09-01-expected.yaml",
|
||||
},
|
||||
{
|
||||
"inserts/tc-10-00-input-list-insert-new-mapping.yaml",
|
||||
`select(di==0).spec.containers += {"name": "redis", "image": "redis"}`,
|
||||
"inserts/tc-10-01-expected.yaml",
|
||||
},
|
||||
|
||||
// Removal Scenarios
|
||||
{
|
||||
"removals/tc-01-00-input.yaml",
|
||||
"del(select(di==0).spec.containers[0].securityContext)",
|
||||
"removals/tc-01-01-expected.yaml",
|
||||
},
|
||||
{
|
||||
"removals/tc-02-00-input.yaml",
|
||||
"del(select(di==0).spec.containers[1])",
|
||||
"removals/tc-02-01-expected.yaml",
|
||||
},
|
||||
{
|
||||
"removals/tc-03-00-input.yaml",
|
||||
"del(select(di==0).spec.containers[0].securityContext.capabilities.drop[1])",
|
||||
"removals/tc-03-01-expected.yaml",
|
||||
},
|
||||
{
|
||||
"removes/tc-04-00-input.yaml",
|
||||
`del(select(di==0).spec.containers[0].securityContext) |
|
||||
del(select(di==1).spec.containers[1])`,
|
||||
"removes/tc-04-01-expected.yaml",
|
||||
},
|
||||
|
||||
// Replace Scenarios
|
||||
{
|
||||
"replaces/tc-01-00-input.yaml",
|
||||
"select(di==0).spec.containers[0].securityContext.runAsRoot |= false",
|
||||
"replaces/tc-01-01-expected.yaml",
|
||||
},
|
||||
{
|
||||
"replaces/tc-02-00-input.yaml",
|
||||
`select(di==0).spec.containers[0].securityContext.capabilities.drop[0] |= "SYS_ADM" |
|
||||
select(di==0).spec.containers[0].securityContext.capabilities.add[0] |= "NET_RAW"`,
|
||||
"replaces/tc-02-01-expected.yaml",
|
||||
},
|
||||
|
||||
// Hybrid Scenarios
|
||||
{
|
||||
"hybrids/tc-01-00-input.yaml",
|
||||
`del(select(di==0).spec.containers[0].securityContext) |
|
||||
select(di==0).spec.securityContext.runAsRoot |= false`,
|
||||
"hybrids/tc-01-01-expected.yaml",
|
||||
},
|
||||
{
|
||||
"hybrids/tc-02-00-input-indented-list.yaml",
|
||||
`del(select(di==0).spec.containers[0].securityContext) |
|
||||
select(di==0).spec.securityContext.runAsRoot |= false`,
|
||||
"hybrids/tc-02-01-expected.yaml",
|
||||
},
|
||||
{
|
||||
"hybrids/tc-03-00-input-comments.yaml",
|
||||
`del(select(di==0).spec.containers[0].securityContext) |
|
||||
select(di==0).spec.securityContext.runAsRoot |= false`,
|
||||
"hybrids/tc-03-01-expected.yaml",
|
||||
},
|
||||
{
|
||||
"hybrids/tc-04-00-input-separated-keys.yaml",
|
||||
`del(select(di==0).spec.containers[0].securityContext) |
|
||||
select(di==0).spec.securityContext.runAsRoot |= false`,
|
||||
"hybrids/tc-04-01-expected.yaml",
|
||||
},
|
||||
}
|
||||
|
||||
return indentationTestCases
|
||||
}
|
||||
|
||||
func TestApplyFixKeepsFormatting(t *testing.T) {
|
||||
testCases := getTestCases()
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.inputFile, func(t *testing.T) {
|
||||
getTestDataPath := func(filename string) string {
|
||||
currentDir, _ := os.Getwd()
|
||||
currentFile := "testdata/" + filename
|
||||
return filepath.Join(currentDir, currentFile)
|
||||
}
|
||||
|
||||
input, _ := os.ReadFile(getTestDataPath(tc.inputFile))
|
||||
wantRaw, _ := os.ReadFile(getTestDataPath(tc.expectedFile))
|
||||
want := string(wantRaw)
|
||||
expression := tc.yamlExpression
|
||||
|
||||
h, _ := NewFixHandlerMock()
|
||||
|
||||
got, _ := h.ApplyFixToContent(string(input), expression)
|
||||
|
||||
assert.Equalf(
|
||||
t, want, got,
|
||||
"Contents of the fixed file don't match the expectation.\n"+
|
||||
"Input file: %s\n\n"+
|
||||
"Got: <%s>\n\n"+
|
||||
"Want: <%s>",
|
||||
tc.inputFile, got, want,
|
||||
)
|
||||
},
|
||||
)
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
func Test_fixPathToValidYamlExpression(t *testing.T) {
|
||||
type args struct {
|
||||
fixPath string
|
||||
value string
|
||||
documentIndexInYaml int
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
want string
|
||||
}{
|
||||
{
|
||||
name: "fix path with boolean value",
|
||||
args: args{
|
||||
fixPath: "spec.template.spec.containers[0].securityContext.privileged",
|
||||
value: "true",
|
||||
documentIndexInYaml: 2,
|
||||
},
|
||||
want: "select(di==2).spec.template.spec.containers[0].securityContext.privileged |= true",
|
||||
},
|
||||
{
|
||||
name: "fix path with string value",
|
||||
args: args{
|
||||
fixPath: "metadata.namespace",
|
||||
value: "YOUR_NAMESPACE",
|
||||
documentIndexInYaml: 0,
|
||||
},
|
||||
want: "select(di==0).metadata.namespace |= \"YOUR_NAMESPACE\"",
|
||||
},
|
||||
{
|
||||
name: "fix path with number",
|
||||
args: args{
|
||||
fixPath: "xxx.yyy",
|
||||
value: "123",
|
||||
documentIndexInYaml: 0,
|
||||
},
|
||||
want: "select(di==0).xxx.yyy |= 123",
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if got := fixPathToValidYamlExpression(tt.args.fixPath, tt.args.value, tt.args.documentIndexInYaml); got != tt.want {
|
||||
t.Errorf("fixPathToValidYamlExpression() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
19
core/pkg/fixhandler/testdata/hybrids/tc-01-00-input.yaml
vendored
Normal file
19
core/pkg/fixhandler/testdata/hybrids/tc-01-00-input.yaml
vendored
Normal file
@@ -0,0 +1,19 @@
|
||||
# Fix to Apply:
|
||||
# REMOVE:
|
||||
# "del(select(di==0).spec.containers[0].securityContext)"
|
||||
|
||||
# INSERT:
|
||||
# select(di==0).spec.securityContext.runAsRoot: false
|
||||
|
||||
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: insert_to_mapping_node_1
|
||||
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx_container
|
||||
image: nginx
|
||||
securityContext:
|
||||
runAsRoot: true
|
||||
19
core/pkg/fixhandler/testdata/hybrids/tc-01-01-expected.yaml
vendored
Normal file
19
core/pkg/fixhandler/testdata/hybrids/tc-01-01-expected.yaml
vendored
Normal file
@@ -0,0 +1,19 @@
|
||||
# Fix to Apply:
|
||||
# REMOVE:
|
||||
# "del(select(di==0).spec.containers[0].securityContext)"
|
||||
|
||||
# INSERT:
|
||||
# select(di==0).spec.securityContext.runAsRoot: false
|
||||
|
||||
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: insert_to_mapping_node_1
|
||||
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx_container
|
||||
image: nginx
|
||||
securityContext:
|
||||
runAsRoot: false
|
||||
19
core/pkg/fixhandler/testdata/hybrids/tc-02-00-input-indented-list.yaml
vendored
Normal file
19
core/pkg/fixhandler/testdata/hybrids/tc-02-00-input-indented-list.yaml
vendored
Normal file
@@ -0,0 +1,19 @@
|
||||
# Fix to Apply:
|
||||
# REMOVE:
|
||||
# "del(select(di==0).spec.containers[0].securityContext)"
|
||||
|
||||
# INSERT:
|
||||
# select(di==0).spec.securityContext.runAsRoot: false
|
||||
|
||||
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: insert_to_mapping_node_1
|
||||
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx_container
|
||||
image: nginx
|
||||
securityContext:
|
||||
runAsRoot: true
|
||||
19
core/pkg/fixhandler/testdata/hybrids/tc-02-01-expected.yaml
vendored
Normal file
19
core/pkg/fixhandler/testdata/hybrids/tc-02-01-expected.yaml
vendored
Normal file
@@ -0,0 +1,19 @@
|
||||
# Fix to Apply:
|
||||
# REMOVE:
|
||||
# "del(select(di==0).spec.containers[0].securityContext)"
|
||||
|
||||
# INSERT:
|
||||
# select(di==0).spec.securityContext.runAsRoot: false
|
||||
|
||||
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: insert_to_mapping_node_1
|
||||
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx_container
|
||||
image: nginx
|
||||
securityContext:
|
||||
runAsRoot: false
|
||||
21
core/pkg/fixhandler/testdata/hybrids/tc-03-00-input-comments.yaml
vendored
Normal file
21
core/pkg/fixhandler/testdata/hybrids/tc-03-00-input-comments.yaml
vendored
Normal file
@@ -0,0 +1,21 @@
|
||||
# Fix to Apply:
|
||||
# REMOVE:
|
||||
# "del(select(di==0).spec.containers[0].securityContext)"
|
||||
|
||||
# INSERT:
|
||||
# select(di==0).spec.securityContext.runAsRoot: false
|
||||
|
||||
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: insert_to_mapping_node_1
|
||||
|
||||
spec:
|
||||
# These are the container comments
|
||||
containers:
|
||||
# These are the first containers comments
|
||||
- name: nginx_container
|
||||
image: nginx
|
||||
securityContext:
|
||||
runAsRoot: true
|
||||
21
core/pkg/fixhandler/testdata/hybrids/tc-03-01-expected.yaml
vendored
Normal file
21
core/pkg/fixhandler/testdata/hybrids/tc-03-01-expected.yaml
vendored
Normal file
@@ -0,0 +1,21 @@
|
||||
# Fix to Apply:
|
||||
# REMOVE:
|
||||
# "del(select(di==0).spec.containers[0].securityContext)"
|
||||
|
||||
# INSERT:
|
||||
# select(di==0).spec.securityContext.runAsRoot: false
|
||||
|
||||
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: insert_to_mapping_node_1
|
||||
|
||||
spec:
|
||||
# These are the container comments
|
||||
containers:
|
||||
# These are the first containers comments
|
||||
- name: nginx_container
|
||||
image: nginx
|
||||
securityContext:
|
||||
runAsRoot: false
|
||||
21
core/pkg/fixhandler/testdata/hybrids/tc-04-00-input-separated-keys.yaml
vendored
Normal file
21
core/pkg/fixhandler/testdata/hybrids/tc-04-00-input-separated-keys.yaml
vendored
Normal file
@@ -0,0 +1,21 @@
|
||||
# Fix to Apply:
|
||||
# REMOVE:
|
||||
# "del(select(di==0).spec.containers[0].securityContext)"
|
||||
|
||||
# INSERT:
|
||||
# select(di==0).spec.securityContext.runAsRoot: false
|
||||
|
||||
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: insert_to_mapping_node_1
|
||||
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx_container
|
||||
|
||||
image: nginx
|
||||
|
||||
securityContext:
|
||||
runAsRoot: true
|
||||
21
core/pkg/fixhandler/testdata/hybrids/tc-04-01-expected.yaml
vendored
Normal file
21
core/pkg/fixhandler/testdata/hybrids/tc-04-01-expected.yaml
vendored
Normal file
@@ -0,0 +1,21 @@
|
||||
# Fix to Apply:
|
||||
# REMOVE:
|
||||
# "del(select(di==0).spec.containers[0].securityContext)"
|
||||
|
||||
# INSERT:
|
||||
# select(di==0).spec.securityContext.runAsRoot: false
|
||||
|
||||
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: insert_to_mapping_node_1
|
||||
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx_container
|
||||
|
||||
image: nginx
|
||||
securityContext:
|
||||
runAsRoot: false
|
||||
|
||||
12
core/pkg/fixhandler/testdata/inserts/tc-01-00-input-mapping-insert-mapping.yaml
vendored
Normal file
12
core/pkg/fixhandler/testdata/inserts/tc-01-00-input-mapping-insert-mapping.yaml
vendored
Normal file
@@ -0,0 +1,12 @@
|
||||
# Fix to Apply:
|
||||
# "select(di==0).spec.containers[0].securityContext.allowPrivilegeEscalation |= false"
|
||||
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: insert_to_mapping_node_1
|
||||
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx_container
|
||||
image: nginx
|
||||
14
core/pkg/fixhandler/testdata/inserts/tc-01-01-expected.yaml
vendored
Normal file
14
core/pkg/fixhandler/testdata/inserts/tc-01-01-expected.yaml
vendored
Normal file
@@ -0,0 +1,14 @@
|
||||
# Fix to Apply:
|
||||
# "select(di==0).spec.containers[0].securityContext.allowPrivilegeEscalation |= false"
|
||||
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: insert_to_mapping_node_1
|
||||
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx_container
|
||||
image: nginx
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
11
core/pkg/fixhandler/testdata/inserts/tc-02-00-input-mapping-insert-mapping-with-list.yaml
vendored
Normal file
11
core/pkg/fixhandler/testdata/inserts/tc-02-00-input-mapping-insert-mapping-with-list.yaml
vendored
Normal file
@@ -0,0 +1,11 @@
|
||||
# Fix to Apply:
|
||||
# select(di==0).spec.containers[0].securityContext.capabilities.drop += ["NET_RAW"]
|
||||
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: insert_list
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx_container
|
||||
image: nginx
|
||||
15
core/pkg/fixhandler/testdata/inserts/tc-02-01-expected.yaml
vendored
Normal file
15
core/pkg/fixhandler/testdata/inserts/tc-02-01-expected.yaml
vendored
Normal file
@@ -0,0 +1,15 @@
|
||||
# Fix to Apply:
|
||||
# select(di==0).spec.containers[0].securityContext.capabilities.drop += ["NET_RAW"]
|
||||
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: insert_list
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx_container
|
||||
image: nginx
|
||||
securityContext:
|
||||
capabilities:
|
||||
drop:
|
||||
- NET_RAW
|
||||
15
core/pkg/fixhandler/testdata/inserts/tc-03-00-input-list-append-scalar.yaml
vendored
Normal file
15
core/pkg/fixhandler/testdata/inserts/tc-03-00-input-list-append-scalar.yaml
vendored
Normal file
@@ -0,0 +1,15 @@
|
||||
# Fix to Apply:
|
||||
# select(di==0).spec.containers[0].securityContext.capabilities.drop += ["SYS_ADM"]
|
||||
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: insert_list
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx_container
|
||||
image: nginx
|
||||
securityContext:
|
||||
capabilities:
|
||||
drop:
|
||||
- NET_RAW
|
||||
16
core/pkg/fixhandler/testdata/inserts/tc-03-01-expected.yaml
vendored
Normal file
16
core/pkg/fixhandler/testdata/inserts/tc-03-01-expected.yaml
vendored
Normal file
@@ -0,0 +1,16 @@
|
||||
# Fix to Apply:
|
||||
# select(di==0).spec.containers[0].securityContext.capabilities.drop += ["SYS_ADM"]
|
||||
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: insert_list
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx_container
|
||||
image: nginx
|
||||
securityContext:
|
||||
capabilities:
|
||||
drop:
|
||||
- NET_RAW
|
||||
- SYS_ADM
|
||||
47
core/pkg/fixhandler/testdata/inserts/tc-04-00-input-multiple-inserts.yaml
vendored
Normal file
47
core/pkg/fixhandler/testdata/inserts/tc-04-00-input-multiple-inserts.yaml
vendored
Normal file
@@ -0,0 +1,47 @@
|
||||
# Fixes to Apply:
|
||||
# 1) select(di==0).spec.template.spec.securityContext.allowPrivilegeEscalation = false
|
||||
# 2) select(di==0).spec.template.spec.containers[0].securityContext.capabilities.drop += ["NET_RAW"]
|
||||
# 3) select(di==0).spec.template.spec.containers[0].securityContext.seccompProfile.type = RuntimeDefault
|
||||
# 4) select(di==0).spec.template.spec.containers[0].securityContext.allowPrivilegeEscalation |= false
|
||||
# 5) select(di==0).spec.template.spec.containers[0].securityContext.readOnlyRootFilesystem |= true
|
||||
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: multiple_inserts
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: example_4
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: example_4
|
||||
spec:
|
||||
serviceAccountName: default
|
||||
terminationGracePeriodSeconds: 5
|
||||
containers:
|
||||
- name: example_4
|
||||
image: nginx
|
||||
ports:
|
||||
- containerPort: 3000
|
||||
env:
|
||||
- name: PORT
|
||||
value: "3000"
|
||||
resources:
|
||||
requests:
|
||||
cpu: 200m
|
||||
memory: 180Mi
|
||||
limits:
|
||||
cpu: 300m
|
||||
memory: 300Mi
|
||||
readinessProbe:
|
||||
initialDelaySeconds: 20
|
||||
periodSeconds: 15
|
||||
exec:
|
||||
command: ["/bin/grpc_health_probe", "-addr=:3000"]
|
||||
livenessProbe:
|
||||
initialDelaySeconds: 20
|
||||
periodSeconds: 15
|
||||
exec:
|
||||
command: ["/bin/grpc_health_probe", "-addr=:3000"]
|
||||
57
core/pkg/fixhandler/testdata/inserts/tc-04-01-expected.yaml
vendored
Normal file
57
core/pkg/fixhandler/testdata/inserts/tc-04-01-expected.yaml
vendored
Normal file
@@ -0,0 +1,57 @@
|
||||
# Fixes to Apply:
|
||||
# 1) select(di==0).spec.template.spec.securityContext.allowPrivilegeEscalation = false
|
||||
# 2) select(di==0).spec.template.spec.containers[0].securityContext.capabilities.drop += ["NET_RAW"]
|
||||
# 3) select(di==0).spec.template.spec.containers[0].securityContext.seccompProfile.type = RuntimeDefault
|
||||
# 4) select(di==0).spec.template.spec.containers[0].securityContext.allowPrivilegeEscalation |= false
|
||||
# 5) select(di==0).spec.template.spec.containers[0].securityContext.readOnlyRootFilesystem |= true
|
||||
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: multiple_inserts
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: example_4
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: example_4
|
||||
spec:
|
||||
serviceAccountName: default
|
||||
terminationGracePeriodSeconds: 5
|
||||
containers:
|
||||
- name: example_4
|
||||
image: nginx
|
||||
ports:
|
||||
- containerPort: 3000
|
||||
env:
|
||||
- name: PORT
|
||||
value: "3000"
|
||||
resources:
|
||||
requests:
|
||||
cpu: 200m
|
||||
memory: 180Mi
|
||||
limits:
|
||||
cpu: 300m
|
||||
memory: 300Mi
|
||||
readinessProbe:
|
||||
initialDelaySeconds: 20
|
||||
periodSeconds: 15
|
||||
exec:
|
||||
command: ["/bin/grpc_health_probe", "-addr=:3000"]
|
||||
livenessProbe:
|
||||
initialDelaySeconds: 20
|
||||
periodSeconds: 15
|
||||
exec:
|
||||
command: ["/bin/grpc_health_probe", "-addr=:3000"]
|
||||
securityContext:
|
||||
capabilities:
|
||||
drop:
|
||||
- NET_RAW
|
||||
seccompProfile:
|
||||
type: RuntimeDefault
|
||||
allowPrivilegeEscalation: false
|
||||
readOnlyRootFilesystem: true
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
16
core/pkg/fixhandler/testdata/inserts/tc-05-00-input-comment-blank-line-single-insert.yaml
vendored
Normal file
16
core/pkg/fixhandler/testdata/inserts/tc-05-00-input-comment-blank-line-single-insert.yaml
vendored
Normal file
@@ -0,0 +1,16 @@
|
||||
# Fix to Apply:
|
||||
# "select(di==0).spec.containers[0].securityContext.allowPrivilegeEscalation |= false"
|
||||
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: insert_to_mapping_node_1
|
||||
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx_container
|
||||
image: nginx
|
||||
|
||||
# Testing if comments are retained as intended
|
||||
securityContext:
|
||||
runAsRoot: false
|
||||
18
core/pkg/fixhandler/testdata/inserts/tc-05-01-expected.yaml
vendored
Normal file
18
core/pkg/fixhandler/testdata/inserts/tc-05-01-expected.yaml
vendored
Normal file
@@ -0,0 +1,18 @@
|
||||
# Fix to Apply:
|
||||
# "select(di==0).spec.containers[0].securityContext.allowPrivilegeEscalation |= false"
|
||||
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: insert_to_mapping_node_1
|
||||
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx_container
|
||||
image: nginx
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
|
||||
# Testing if comments are retained as intended
|
||||
securityContext:
|
||||
runAsRoot: false
|
||||
14
core/pkg/fixhandler/testdata/inserts/tc-06-00-input-list-append-scalar-oneline.yaml
vendored
Normal file
14
core/pkg/fixhandler/testdata/inserts/tc-06-00-input-list-append-scalar-oneline.yaml
vendored
Normal file
@@ -0,0 +1,14 @@
|
||||
# Fix to Apply:
|
||||
# select(di==0).spec.containers[0].securityContext.capabilities.drop += ["SYS_ADM"]
|
||||
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: insert_list
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx1
|
||||
image: nginx
|
||||
securityContext:
|
||||
capabilities:
|
||||
drop: [NET_RAW]
|
||||
14
core/pkg/fixhandler/testdata/inserts/tc-06-01-expected.yaml
vendored
Normal file
14
core/pkg/fixhandler/testdata/inserts/tc-06-01-expected.yaml
vendored
Normal file
@@ -0,0 +1,14 @@
|
||||
# Fix to Apply:
|
||||
# select(di==0).spec.containers[0].securityContext.capabilities.drop += ["SYS_ADM"]
|
||||
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: insert_list
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx1
|
||||
image: nginx
|
||||
securityContext:
|
||||
capabilities:
|
||||
drop: [NET_RAW, SYS_ADM]
|
||||
27
core/pkg/fixhandler/testdata/inserts/tc-07-00-input-multiple-documents.yaml
vendored
Normal file
27
core/pkg/fixhandler/testdata/inserts/tc-07-00-input-multiple-documents.yaml
vendored
Normal file
@@ -0,0 +1,27 @@
|
||||
# Fix to Apply:
|
||||
# "select(di==0).spec.containers[0].securityContext.allowPrivilegeEscalation |= false"
|
||||
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: insert_to_mapping_node_1
|
||||
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx_container
|
||||
image: nginx
|
||||
|
||||
---
|
||||
|
||||
# Fix to Apply:
|
||||
# "select(di==1).spec.containers[0].securityContext.allowPrivilegeEscalation |= false"
|
||||
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: insert_to_mapping_node_1
|
||||
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx_container
|
||||
image: nginx
|
||||
31
core/pkg/fixhandler/testdata/inserts/tc-07-01-expected.yaml
vendored
Normal file
31
core/pkg/fixhandler/testdata/inserts/tc-07-01-expected.yaml
vendored
Normal file
@@ -0,0 +1,31 @@
|
||||
# Fix to Apply:
|
||||
# "select(di==0).spec.containers[0].securityContext.allowPrivilegeEscalation |= false"
|
||||
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: insert_to_mapping_node_1
|
||||
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx_container
|
||||
image: nginx
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
|
||||
---
|
||||
|
||||
# Fix to Apply:
|
||||
# "select(di==1).spec.containers[0].securityContext.allowPrivilegeEscalation |= false"
|
||||
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: insert_to_mapping_node_1
|
||||
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx_container
|
||||
image: nginx
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
11
core/pkg/fixhandler/testdata/inserts/tc-08-00-input-mapping-insert-mapping-indented.yaml
vendored
Normal file
11
core/pkg/fixhandler/testdata/inserts/tc-08-00-input-mapping-insert-mapping-indented.yaml
vendored
Normal file
@@ -0,0 +1,11 @@
|
||||
# Fix to Apply:
|
||||
# select(di==0).spec.containers[0].securityContext.capabilities.drop += ["NET_RAW"]
|
||||
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: indented-parent-list-insert-list-value
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx_container
|
||||
image: nginx
|
||||
15
core/pkg/fixhandler/testdata/inserts/tc-08-01-expected.yaml
vendored
Normal file
15
core/pkg/fixhandler/testdata/inserts/tc-08-01-expected.yaml
vendored
Normal file
@@ -0,0 +1,15 @@
|
||||
# Fix to Apply:
|
||||
# select(di==0).spec.containers[0].securityContext.capabilities.drop += ["NET_RAW"]
|
||||
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: indented-parent-list-insert-list-value
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx_container
|
||||
image: nginx
|
||||
securityContext:
|
||||
capabilities:
|
||||
drop:
|
||||
- NET_RAW
|
||||
11
core/pkg/fixhandler/testdata/inserts/tc-09-00-input-list-insert-new-mapping-indented.yaml
vendored
Normal file
11
core/pkg/fixhandler/testdata/inserts/tc-09-00-input-list-insert-new-mapping-indented.yaml
vendored
Normal file
@@ -0,0 +1,11 @@
|
||||
# Fix to Apply:
|
||||
# select(di==0).spec.containers += {"name": "redis", "image": "redis"}
|
||||
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: indented-parent-list-insert-list-value
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx_container
|
||||
image: nginx
|
||||
13
core/pkg/fixhandler/testdata/inserts/tc-09-01-expected.yaml
vendored
Normal file
13
core/pkg/fixhandler/testdata/inserts/tc-09-01-expected.yaml
vendored
Normal file
@@ -0,0 +1,13 @@
|
||||
# Fix to Apply:
|
||||
# select(di==0).spec.containers += {"name": "redis", "image": "redis"}
|
||||
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: indented-parent-list-insert-list-value
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx_container
|
||||
image: nginx
|
||||
- name: redis
|
||||
image: redis
|
||||
11
core/pkg/fixhandler/testdata/inserts/tc-10-00-input-list-insert-new-mapping.yaml
vendored
Normal file
11
core/pkg/fixhandler/testdata/inserts/tc-10-00-input-list-insert-new-mapping.yaml
vendored
Normal file
@@ -0,0 +1,11 @@
|
||||
# Fix to Apply:
|
||||
# select(di==0).spec.containers += {"name": "redis", "image": "redis"}
|
||||
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: indented-list-insert-new-object
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx_container
|
||||
image: nginx
|
||||
13
core/pkg/fixhandler/testdata/inserts/tc-10-01-expected.yaml
vendored
Normal file
13
core/pkg/fixhandler/testdata/inserts/tc-10-01-expected.yaml
vendored
Normal file
@@ -0,0 +1,13 @@
|
||||
# Fix to Apply:
|
||||
# select(di==0).spec.containers += {"name": "redis", "image": "redis"}
|
||||
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: indented-list-insert-new-object
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx_container
|
||||
image: nginx
|
||||
- name: redis
|
||||
image: redis
|
||||
14
core/pkg/fixhandler/testdata/removals/tc-01-00-input.yaml
vendored
Normal file
14
core/pkg/fixhandler/testdata/removals/tc-01-00-input.yaml
vendored
Normal file
@@ -0,0 +1,14 @@
|
||||
# Fix to Apply:
|
||||
# del(select(di==0).spec.containers[0].securityContext)
|
||||
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: remove_example
|
||||
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx_container
|
||||
image: nginx
|
||||
securityContext:
|
||||
runAsRoot: false
|
||||
12
core/pkg/fixhandler/testdata/removals/tc-01-01-expected.yaml
vendored
Normal file
12
core/pkg/fixhandler/testdata/removals/tc-01-01-expected.yaml
vendored
Normal file
@@ -0,0 +1,12 @@
|
||||
# Fix to Apply:
|
||||
# del(select(di==0).spec.containers[0].securityContext)
|
||||
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: remove_example
|
||||
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx_container
|
||||
image: nginx
|
||||
15
core/pkg/fixhandler/testdata/removals/tc-02-00-input.yaml
vendored
Normal file
15
core/pkg/fixhandler/testdata/removals/tc-02-00-input.yaml
vendored
Normal file
@@ -0,0 +1,15 @@
|
||||
# Fix to Apply:
|
||||
# del(select(di==0).spec.containers[1])
|
||||
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: remove_example
|
||||
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx_container
|
||||
image: nginx
|
||||
|
||||
- name: container_with_security_issues
|
||||
image: image_with_security_issues
|
||||
12
core/pkg/fixhandler/testdata/removals/tc-02-01-expected.yaml
vendored
Normal file
12
core/pkg/fixhandler/testdata/removals/tc-02-01-expected.yaml
vendored
Normal file
@@ -0,0 +1,12 @@
|
||||
# Fix to Apply:
|
||||
# del(select(di==0).spec.containers[1])
|
||||
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: remove_example
|
||||
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx_container
|
||||
image: nginx
|
||||
14
core/pkg/fixhandler/testdata/removals/tc-03-00-input.yaml
vendored
Normal file
14
core/pkg/fixhandler/testdata/removals/tc-03-00-input.yaml
vendored
Normal file
@@ -0,0 +1,14 @@
|
||||
# Fix to Apply:
|
||||
# del(select(di==0).spec.containers[0].securityContext.capabilities.drop[1])
|
||||
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: insert_list
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx1
|
||||
image: nginx
|
||||
securityContext:
|
||||
capabilities:
|
||||
drop: ["NET_RAW", "SYS_ADM"]
|
||||
14
core/pkg/fixhandler/testdata/removals/tc-03-01-expected.yaml
vendored
Normal file
14
core/pkg/fixhandler/testdata/removals/tc-03-01-expected.yaml
vendored
Normal file
@@ -0,0 +1,14 @@
|
||||
# Fix to Apply:
|
||||
# del(select(di==0).spec.containers[0].securityContext.capabilities.drop[1])
|
||||
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: insert_list
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx1
|
||||
image: nginx
|
||||
securityContext:
|
||||
capabilities:
|
||||
drop: ["NET_RAW"]
|
||||
32
core/pkg/fixhandler/testdata/removals/tc-04-00-input.yaml
vendored
Normal file
32
core/pkg/fixhandler/testdata/removals/tc-04-00-input.yaml
vendored
Normal file
@@ -0,0 +1,32 @@
|
||||
# Fix to Apply:
|
||||
# del(select(di==0).spec.containers[0].securityContext)
|
||||
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: remove_example
|
||||
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx_container
|
||||
image: nginx
|
||||
securityContext:
|
||||
runAsRoot: false
|
||||
|
||||
---
|
||||
|
||||
# Fix to Apply:
|
||||
# del(select(di==0).spec.containers[1])
|
||||
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: remove_example
|
||||
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx_container
|
||||
image: nginx
|
||||
|
||||
- name: container_with_security_issues
|
||||
image: image_with_security_issues
|
||||
27
core/pkg/fixhandler/testdata/removals/tc-04-01-expected.yaml
vendored
Normal file
27
core/pkg/fixhandler/testdata/removals/tc-04-01-expected.yaml
vendored
Normal file
@@ -0,0 +1,27 @@
|
||||
# Fix to Apply:
|
||||
# del(select(di==0).spec.containers[0].securityContext)
|
||||
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: remove_example
|
||||
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx_container
|
||||
image: nginx
|
||||
|
||||
---
|
||||
|
||||
# Fix to Apply:
|
||||
# del(select(di==0).spec.containers[1])
|
||||
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: remove_example
|
||||
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx_container
|
||||
image: nginx
|
||||
14
core/pkg/fixhandler/testdata/replaces/tc-01-00-input.yaml
vendored
Normal file
14
core/pkg/fixhandler/testdata/replaces/tc-01-00-input.yaml
vendored
Normal file
@@ -0,0 +1,14 @@
|
||||
# Fix to Apply:
|
||||
# "select(di==0).spec.containers[0].securityContext.runAsRoot |= false"
|
||||
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: insert_to_mapping_node_1
|
||||
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx_container
|
||||
image: nginx
|
||||
securityContext:
|
||||
runAsRoot: true
|
||||
14
core/pkg/fixhandler/testdata/replaces/tc-01-01-expected.yaml
vendored
Normal file
14
core/pkg/fixhandler/testdata/replaces/tc-01-01-expected.yaml
vendored
Normal file
@@ -0,0 +1,14 @@
|
||||
# Fix to Apply:
|
||||
# "select(di==0).spec.containers[0].securityContext.runAsRoot |= false"
|
||||
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: insert_to_mapping_node_1
|
||||
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx_container
|
||||
image: nginx
|
||||
securityContext:
|
||||
runAsRoot: false
|
||||
18
core/pkg/fixhandler/testdata/replaces/tc-02-00-input.yaml
vendored
Normal file
18
core/pkg/fixhandler/testdata/replaces/tc-02-00-input.yaml
vendored
Normal file
@@ -0,0 +1,18 @@
|
||||
# Fix to Apply:
|
||||
# select(di==0).spec.containers[0].securityContext.capabilities.drop[0] |= "SYS_ADM"
|
||||
# select(di==0).spec.containers[0].securityContext.capabilities.add[0] |= "NET_RAW"
|
||||
|
||||
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: insert_list
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx1
|
||||
image: nginx
|
||||
securityContext:
|
||||
capabilities:
|
||||
drop:
|
||||
- "NET_RAW"
|
||||
add: ["SYS_ADM"]
|
||||
18
core/pkg/fixhandler/testdata/replaces/tc-02-01-expected.yaml
vendored
Normal file
18
core/pkg/fixhandler/testdata/replaces/tc-02-01-expected.yaml
vendored
Normal file
@@ -0,0 +1,18 @@
|
||||
# Fix to Apply:
|
||||
# select(di==0).spec.containers[0].securityContext.capabilities.drop[0] |= "SYS_ADM"
|
||||
# select(di==0).spec.containers[0].securityContext.capabilities.add[0] |= "NET_RAW"
|
||||
|
||||
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: insert_list
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx1
|
||||
image: nginx
|
||||
securityContext:
|
||||
capabilities:
|
||||
drop:
|
||||
- "SYS_ADM"
|
||||
add: ["NET_RAW"]
|
||||
286
core/pkg/fixhandler/yamlhandler.go
Normal file
286
core/pkg/fixhandler/yamlhandler.go
Normal file
@@ -0,0 +1,286 @@
|
||||
package fixhandler
|
||||
|
||||
import (
|
||||
"container/list"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"strings"
|
||||
|
||||
"github.com/mikefarah/yq/v4/pkg/yqlib"
|
||||
|
||||
"gopkg.in/yaml.v3"
|
||||
)
|
||||
|
||||
// decodeDocumentRoots decodes all YAML documents stored in a given `filepath` and returns a slice of their root nodes
|
||||
func decodeDocumentRoots(yamlAsString string) ([]yaml.Node, error) {
|
||||
fileReader := strings.NewReader(yamlAsString)
|
||||
dec := yaml.NewDecoder(fileReader)
|
||||
|
||||
nodes := make([]yaml.Node, 0)
|
||||
for {
|
||||
var node yaml.Node
|
||||
err := dec.Decode(&node)
|
||||
|
||||
nodes = append(nodes, node)
|
||||
|
||||
if errors.Is(err, io.EOF) {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Cannot Decode File as YAML")
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
return nodes, nil
|
||||
}
|
||||
|
||||
func getFixedNodes(yamlAsString, yamlExpression string) ([]yaml.Node, error) {
|
||||
preferences := yqlib.ConfiguredYamlPreferences
|
||||
preferences.EvaluateTogether = true
|
||||
decoder := yqlib.NewYamlDecoder(preferences)
|
||||
|
||||
var allDocuments = list.New()
|
||||
reader := strings.NewReader(yamlAsString)
|
||||
|
||||
fileDocuments, err := readDocuments(reader, decoder)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
allDocuments.PushBackList(fileDocuments)
|
||||
|
||||
allAtOnceEvaluator := yqlib.NewAllAtOnceEvaluator()
|
||||
|
||||
fixedCandidateNodes, err := allAtOnceEvaluator.EvaluateCandidateNodes(yamlExpression, allDocuments)
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Error fixing YAML, %w", err)
|
||||
}
|
||||
|
||||
fixedNodes := make([]yaml.Node, 0)
|
||||
var fixedNode *yaml.Node
|
||||
for fixedCandidateNode := fixedCandidateNodes.Front(); fixedCandidateNode != nil; fixedCandidateNode = fixedCandidateNode.Next() {
|
||||
fixedNode = fixedCandidateNode.Value.(*yqlib.CandidateNode).Node
|
||||
fixedNodes = append(fixedNodes, *fixedNode)
|
||||
}
|
||||
|
||||
return fixedNodes, nil
|
||||
}
|
||||
|
||||
func flattenWithDFS(node *yaml.Node) *[]nodeInfo {
|
||||
dfsOrder := make([]nodeInfo, 0)
|
||||
flattenWithDFSHelper(node, nil, &dfsOrder, 0)
|
||||
return &dfsOrder
|
||||
}
|
||||
|
||||
func flattenWithDFSHelper(node *yaml.Node, parent *yaml.Node, dfsOrder *[]nodeInfo, index int) {
|
||||
dfsNode := nodeInfo{
|
||||
node: node,
|
||||
parent: parent,
|
||||
index: index,
|
||||
}
|
||||
*dfsOrder = append(*dfsOrder, dfsNode)
|
||||
|
||||
for idx, child := range node.Content {
|
||||
flattenWithDFSHelper(child, node, dfsOrder, idx)
|
||||
}
|
||||
}
|
||||
|
||||
func getFixInfo(originalRootNodes, fixedRootNodes []yaml.Node) fileFixInfo {
|
||||
contentToAdd := make([]contentToAdd, 0)
|
||||
linesToRemove := make([]linesToRemove, 0)
|
||||
|
||||
for idx := 0; idx < len(fixedRootNodes); idx++ {
|
||||
originalList := flattenWithDFS(&originalRootNodes[idx])
|
||||
fixedList := flattenWithDFS(&fixedRootNodes[idx])
|
||||
nodeContentToAdd, nodeLinesToRemove := getFixInfoHelper(*originalList, *fixedList)
|
||||
contentToAdd = append(contentToAdd, nodeContentToAdd...)
|
||||
linesToRemove = append(linesToRemove, nodeLinesToRemove...)
|
||||
}
|
||||
|
||||
return fileFixInfo{
|
||||
contentsToAdd: &contentToAdd,
|
||||
linesToRemove: &linesToRemove,
|
||||
}
|
||||
}
|
||||
|
||||
func getFixInfoHelper(originalList, fixedList []nodeInfo) ([]contentToAdd, []linesToRemove) {
|
||||
|
||||
// While obtaining fixedYamlNode, comments and empty lines at the top are ignored.
|
||||
// This causes a difference in Line numbers across the tree structure. In order to
|
||||
// counter this, line numbers are adjusted in fixed list.
|
||||
adjustFixedListLines(&originalList, &fixedList)
|
||||
|
||||
contentToAdd := make([]contentToAdd, 0)
|
||||
linesToRemove := make([]linesToRemove, 0)
|
||||
|
||||
originalListTracker, fixedListTracker := 0, 0
|
||||
|
||||
fixInfoMetadata := &fixInfoMetadata{
|
||||
originalList: &originalList,
|
||||
fixedList: &fixedList,
|
||||
originalListTracker: originalListTracker,
|
||||
fixedListTracker: fixedListTracker,
|
||||
contentToAdd: &contentToAdd,
|
||||
linesToRemove: &linesToRemove,
|
||||
}
|
||||
|
||||
for originalListTracker < len(originalList) && fixedListTracker < len(fixedList) {
|
||||
matchNodeResult := matchNodes(originalList[originalListTracker].node, fixedList[fixedListTracker].node)
|
||||
|
||||
fixInfoMetadata.originalListTracker = originalListTracker
|
||||
fixInfoMetadata.fixedListTracker = fixedListTracker
|
||||
|
||||
switch matchNodeResult {
|
||||
case sameNodes:
|
||||
originalListTracker += 1
|
||||
fixedListTracker += 1
|
||||
|
||||
case removedNode:
|
||||
originalListTracker, fixedListTracker = addLinesToRemove(fixInfoMetadata)
|
||||
|
||||
case insertedNode:
|
||||
originalListTracker, fixedListTracker = addLinesToInsert(fixInfoMetadata)
|
||||
|
||||
case replacedNode:
|
||||
originalListTracker, fixedListTracker = updateLinesToReplace(fixInfoMetadata)
|
||||
}
|
||||
}
|
||||
|
||||
// Some nodes are still not visited if they are removed at the end of the list
|
||||
for originalListTracker < len(originalList) {
|
||||
fixInfoMetadata.originalListTracker = originalListTracker
|
||||
originalListTracker, _ = addLinesToRemove(fixInfoMetadata)
|
||||
}
|
||||
|
||||
// Some nodes are still not visited if they are inserted at the end of the list
|
||||
for fixedListTracker < len(fixedList) {
|
||||
// Use negative index of last node in original list as a placeholder to determine the last line number later
|
||||
fixInfoMetadata.originalListTracker = -(len(originalList) - 1)
|
||||
fixInfoMetadata.fixedListTracker = fixedListTracker
|
||||
_, fixedListTracker = addLinesToInsert(fixInfoMetadata)
|
||||
}
|
||||
|
||||
return contentToAdd, linesToRemove
|
||||
|
||||
}
|
||||
|
||||
// Adds the lines to remove and returns the updated originalListTracker
|
||||
func addLinesToRemove(fixInfoMetadata *fixInfoMetadata) (int, int) {
|
||||
isOneLine, line := isOneLineSequenceNode(fixInfoMetadata.originalList, fixInfoMetadata.originalListTracker)
|
||||
|
||||
if isOneLine {
|
||||
// Remove the entire line and replace it with the sequence node in fixed info. This way,
|
||||
// the original formatting is not lost.
|
||||
return replaceSingleLineSequence(fixInfoMetadata, line)
|
||||
}
|
||||
|
||||
currentDFSNode := (*fixInfoMetadata.originalList)[fixInfoMetadata.originalListTracker]
|
||||
|
||||
newOriginalListTracker := updateTracker(fixInfoMetadata.originalList, fixInfoMetadata.originalListTracker)
|
||||
*fixInfoMetadata.linesToRemove = append(*fixInfoMetadata.linesToRemove, linesToRemove{
|
||||
startLine: currentDFSNode.node.Line,
|
||||
endLine: getNodeLine(fixInfoMetadata.originalList, newOriginalListTracker),
|
||||
})
|
||||
|
||||
return newOriginalListTracker, fixInfoMetadata.fixedListTracker
|
||||
}
|
||||
|
||||
// Adds the lines to insert and returns the updated fixedListTracker
|
||||
func addLinesToInsert(fixInfoMetadata *fixInfoMetadata) (int, int) {
|
||||
|
||||
isOneLine, line := isOneLineSequenceNode(fixInfoMetadata.fixedList, fixInfoMetadata.fixedListTracker)
|
||||
|
||||
if isOneLine {
|
||||
return replaceSingleLineSequence(fixInfoMetadata, line)
|
||||
}
|
||||
|
||||
currentDFSNode := (*fixInfoMetadata.fixedList)[fixInfoMetadata.fixedListTracker]
|
||||
|
||||
lineToInsert := getLineToInsert(fixInfoMetadata)
|
||||
contentToInsert := getContent(currentDFSNode.parent, fixInfoMetadata.fixedList, fixInfoMetadata.fixedListTracker)
|
||||
|
||||
newFixedTracker := updateTracker(fixInfoMetadata.fixedList, fixInfoMetadata.fixedListTracker)
|
||||
|
||||
*fixInfoMetadata.contentToAdd = append(*fixInfoMetadata.contentToAdd, contentToAdd{
|
||||
line: lineToInsert,
|
||||
content: contentToInsert,
|
||||
})
|
||||
|
||||
return fixInfoMetadata.originalListTracker, newFixedTracker
|
||||
}
|
||||
|
||||
// Adds the lines to remove and insert and updates the fixedListTracker and originalListTracker
|
||||
func updateLinesToReplace(fixInfoMetadata *fixInfoMetadata) (int, int) {
|
||||
|
||||
isOneLine, line := isOneLineSequenceNode(fixInfoMetadata.fixedList, fixInfoMetadata.fixedListTracker)
|
||||
|
||||
if isOneLine {
|
||||
return replaceSingleLineSequence(fixInfoMetadata, line)
|
||||
}
|
||||
|
||||
currentDFSNode := (*fixInfoMetadata.fixedList)[fixInfoMetadata.fixedListTracker]
|
||||
|
||||
// If only the value node is changed, entire "key-value" pair is replaced
|
||||
if isValueNodeinMapping(¤tDFSNode) {
|
||||
fixInfoMetadata.originalListTracker -= 1
|
||||
fixInfoMetadata.fixedListTracker -= 1
|
||||
}
|
||||
|
||||
addLinesToRemove(fixInfoMetadata)
|
||||
updatedOriginalTracker, updatedFixedTracker := addLinesToInsert(fixInfoMetadata)
|
||||
|
||||
return updatedOriginalTracker, updatedFixedTracker
|
||||
}
|
||||
|
||||
func removeNewLinesAtTheEnd(yamlLines []string) []string {
|
||||
for idx := 1; idx < len(yamlLines); idx++ {
|
||||
if yamlLines[len(yamlLines)-idx] != "\n" {
|
||||
yamlLines = yamlLines[:len(yamlLines)-idx+1]
|
||||
break
|
||||
}
|
||||
}
|
||||
return yamlLines
|
||||
}
|
||||
|
||||
func getFixedYamlLines(yamlLines []string, fileFixInfo fileFixInfo) (fixedYamlLines []string) {
|
||||
|
||||
// Determining last line requires original yaml lines slice. The placeholder for last line is replaced with the real last line
|
||||
assignLastLine(fileFixInfo.contentsToAdd, fileFixInfo.linesToRemove, &yamlLines)
|
||||
|
||||
removeLines(fileFixInfo.linesToRemove, &yamlLines)
|
||||
|
||||
fixedYamlLines = make([]string, 0)
|
||||
lineIdx, lineToAddIdx := 1, 0
|
||||
|
||||
// Ideally, new node is inserted at line before the next node in DFS order. But, when the previous line contains a
|
||||
// comment or empty line, we need to insert new nodes before them.
|
||||
adjustContentLines(fileFixInfo.contentsToAdd, &yamlLines)
|
||||
|
||||
for lineToAddIdx < len(*fileFixInfo.contentsToAdd) {
|
||||
for lineIdx <= (*fileFixInfo.contentsToAdd)[lineToAddIdx].line {
|
||||
// Check if the current line is not removed
|
||||
if yamlLines[lineIdx-1] != "*" {
|
||||
fixedYamlLines = append(fixedYamlLines, yamlLines[lineIdx-1])
|
||||
}
|
||||
lineIdx += 1
|
||||
}
|
||||
|
||||
content := (*fileFixInfo.contentsToAdd)[lineToAddIdx].content
|
||||
fixedYamlLines = append(fixedYamlLines, content)
|
||||
|
||||
lineToAddIdx += 1
|
||||
}
|
||||
|
||||
for lineIdx <= len(yamlLines) {
|
||||
if yamlLines[lineIdx-1] != "*" {
|
||||
fixedYamlLines = append(fixedYamlLines, yamlLines[lineIdx-1])
|
||||
}
|
||||
lineIdx += 1
|
||||
}
|
||||
|
||||
fixedYamlLines = removeNewLinesAtTheEnd(fixedYamlLines)
|
||||
|
||||
return fixedYamlLines
|
||||
}
|
||||
406
core/pkg/fixhandler/yamlhelper.go
Normal file
406
core/pkg/fixhandler/yamlhelper.go
Normal file
@@ -0,0 +1,406 @@
|
||||
package fixhandler
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"container/list"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
logger "github.com/kubescape/go-logger"
|
||||
"github.com/mikefarah/yq/v4/pkg/yqlib"
|
||||
"gopkg.in/yaml.v3"
|
||||
)
|
||||
|
||||
type NodeRelation int
|
||||
|
||||
const (
|
||||
sameNodes NodeRelation = iota
|
||||
insertedNode
|
||||
removedNode
|
||||
replacedNode
|
||||
)
|
||||
|
||||
func matchNodes(nodeOne, nodeTwo *yaml.Node) NodeRelation {
|
||||
|
||||
isNewNode := nodeTwo.Line == 0 && nodeTwo.Column == 0
|
||||
sameLines := nodeOne.Line == nodeTwo.Line
|
||||
sameColumns := nodeOne.Column == nodeTwo.Column
|
||||
|
||||
isSameNode := isSameNode(nodeOne, nodeTwo)
|
||||
|
||||
switch {
|
||||
case isSameNode:
|
||||
return sameNodes
|
||||
case isNewNode:
|
||||
return insertedNode
|
||||
case sameLines && sameColumns:
|
||||
return replacedNode
|
||||
default:
|
||||
return removedNode
|
||||
}
|
||||
}
|
||||
|
||||
func adjustContentLines(contentToAdd *[]contentToAdd, linesSlice *[]string) {
|
||||
for contentIdx, content := range *contentToAdd {
|
||||
line := content.line
|
||||
|
||||
// Adjust line numbers such that there are no "empty lines or comment lines of next nodes" before them
|
||||
for idx := line - 1; idx >= 0; idx-- {
|
||||
if isEmptyLineOrComment((*linesSlice)[idx]) {
|
||||
(*contentToAdd)[contentIdx].line -= 1
|
||||
} else {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func adjustFixedListLines(originalList, fixedList *[]nodeInfo) {
|
||||
differenceAtTop := (*originalList)[0].node.Line - (*fixedList)[0].node.Line
|
||||
|
||||
if differenceAtTop <= 0 {
|
||||
return
|
||||
}
|
||||
|
||||
for _, node := range *fixedList {
|
||||
// line numbers should not be changed for new nodes.
|
||||
if node.node.Line != 0 {
|
||||
node.node.Line += differenceAtTop
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
|
||||
}
|
||||
|
||||
func enocodeIntoYaml(parentNode *yaml.Node, nodeList *[]nodeInfo, tracker int) (string, error) {
|
||||
content := make([]*yaml.Node, 0)
|
||||
currentNode := (*nodeList)[tracker].node
|
||||
content = append(content, currentNode)
|
||||
|
||||
// Add the value in "key-value" pair to construct if the parent is mapping node
|
||||
if parentNode.Kind == yaml.MappingNode {
|
||||
valueNode := (*nodeList)[tracker+1].node
|
||||
content = append(content, valueNode)
|
||||
}
|
||||
|
||||
// The parent is added at the top to encode into YAML
|
||||
parentForContent := yaml.Node{
|
||||
Kind: parentNode.Kind,
|
||||
Content: content,
|
||||
}
|
||||
|
||||
buf := new(bytes.Buffer)
|
||||
|
||||
encoder := yaml.NewEncoder(buf)
|
||||
encoder.SetIndent(2)
|
||||
|
||||
errorEncoding := encoder.Encode(parentForContent)
|
||||
if errorEncoding != nil {
|
||||
return "", fmt.Errorf("Error debugging node, %v", errorEncoding.Error())
|
||||
}
|
||||
errorClosingEncoder := encoder.Close()
|
||||
if errorClosingEncoder != nil {
|
||||
return "", fmt.Errorf("Error closing encoder: %v", errorClosingEncoder.Error())
|
||||
}
|
||||
return fmt.Sprintf(`%v`, buf.String()), nil
|
||||
}
|
||||
|
||||
func getContent(parentNode *yaml.Node, nodeList *[]nodeInfo, tracker int) string {
|
||||
content, err := enocodeIntoYaml(parentNode, nodeList, tracker)
|
||||
if err != nil {
|
||||
logger.L().Fatal("Cannot Encode into YAML")
|
||||
}
|
||||
|
||||
indentationSpaces := parentNode.Column - 1
|
||||
|
||||
content = indentContent(content, indentationSpaces)
|
||||
|
||||
return strings.TrimSuffix(content, "\n")
|
||||
}
|
||||
|
||||
func indentContent(content string, indentationSpaces int) string {
|
||||
indentedContent := ""
|
||||
indentSpaces := strings.Repeat(" ", indentationSpaces)
|
||||
|
||||
scanner := bufio.NewScanner(strings.NewReader(content))
|
||||
for scanner.Scan() {
|
||||
line := scanner.Text()
|
||||
indentedContent += (indentSpaces + line + "\n")
|
||||
}
|
||||
return indentedContent
|
||||
}
|
||||
|
||||
func getLineToInsert(fixInfoMetadata *fixInfoMetadata) int {
|
||||
var lineToInsert int
|
||||
// Check if lineToInsert is last line
|
||||
if fixInfoMetadata.originalListTracker < 0 {
|
||||
originalListTracker := int(math.Abs(float64(fixInfoMetadata.originalListTracker)))
|
||||
// Storing the negative value of line of last node as a placeholder to determine the last line later.
|
||||
lineToInsert = -(*fixInfoMetadata.originalList)[originalListTracker].node.Line
|
||||
} else {
|
||||
lineToInsert = (*fixInfoMetadata.originalList)[fixInfoMetadata.originalListTracker].node.Line - 1
|
||||
}
|
||||
return lineToInsert
|
||||
}
|
||||
|
||||
func assignLastLine(contentsToAdd *[]contentToAdd, linesToRemove *[]linesToRemove, linesSlice *[]string) {
|
||||
for idx, contentToAdd := range *contentsToAdd {
|
||||
if contentToAdd.line < 0 {
|
||||
currentLine := int(math.Abs(float64(contentToAdd.line)))
|
||||
(*contentsToAdd)[idx].line, _ = getLastLineOfResource(linesSlice, currentLine)
|
||||
}
|
||||
}
|
||||
|
||||
for idx, lineToRemove := range *linesToRemove {
|
||||
if lineToRemove.endLine < 0 {
|
||||
endLine, _ := getLastLineOfResource(linesSlice, lineToRemove.startLine)
|
||||
(*linesToRemove)[idx].endLine = endLine
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func getLastLineOfResource(linesSlice *[]string, currentLine int) (int, error) {
|
||||
// Get lastlines of all resources...
|
||||
lastLinesOfResources := make([]int, 0)
|
||||
for lineNumber, lineContent := range *linesSlice {
|
||||
if lineContent == "---" {
|
||||
for lastLine := lineNumber - 1; lastLine >= 0; lastLine-- {
|
||||
if !isEmptyLineOrComment((*linesSlice)[lastLine]) {
|
||||
lastLinesOfResources = append(lastLinesOfResources, lastLine+1)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
lastLine := len(*linesSlice)
|
||||
for lastLine >= 0 {
|
||||
if !isEmptyLineOrComment((*linesSlice)[lastLine-1]) {
|
||||
lastLinesOfResources = append(lastLinesOfResources, lastLine)
|
||||
break
|
||||
} else {
|
||||
lastLine--
|
||||
}
|
||||
}
|
||||
|
||||
// Get last line of the resource we need
|
||||
for _, endLine := range lastLinesOfResources {
|
||||
if currentLine <= endLine {
|
||||
return endLine, nil
|
||||
}
|
||||
}
|
||||
|
||||
return 0, fmt.Errorf("Provided line is greater than the length of YAML file")
|
||||
}
|
||||
|
||||
func getNodeLine(nodeList *[]nodeInfo, tracker int) int {
|
||||
if tracker < len(*nodeList) {
|
||||
return (*nodeList)[tracker].node.Line
|
||||
} else {
|
||||
return -1
|
||||
}
|
||||
}
|
||||
|
||||
// Checks if the node is value node in "key-value" pairs of mapping node
|
||||
func isValueNodeinMapping(node *nodeInfo) bool {
|
||||
if node.parent.Kind == yaml.MappingNode && node.index%2 != 0 {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Checks if the node is part of single line sequence node and returns the line
|
||||
func isOneLineSequenceNode(list *[]nodeInfo, currentTracker int) (bool, int) {
|
||||
parentNode := (*list)[currentTracker].parent
|
||||
if parentNode.Kind != yaml.SequenceNode {
|
||||
return false, -1
|
||||
}
|
||||
|
||||
var currentNode, prevNode nodeInfo
|
||||
currentTracker -= 1
|
||||
|
||||
for (*list)[currentTracker].node != parentNode {
|
||||
currentNode = (*list)[currentTracker]
|
||||
prevNode = (*list)[currentTracker-1]
|
||||
|
||||
if currentNode.node.Line != prevNode.node.Line {
|
||||
return false, -1
|
||||
}
|
||||
currentTracker -= 1
|
||||
}
|
||||
|
||||
parentNodeInfo := (*list)[currentTracker]
|
||||
|
||||
if parentNodeInfo.parent.Kind == yaml.MappingNode {
|
||||
keyNodeInfo := (*list)[currentTracker-1]
|
||||
if keyNodeInfo.node.Line == parentNode.Line {
|
||||
return true, parentNode.Line
|
||||
} else {
|
||||
return false, -1
|
||||
}
|
||||
} else {
|
||||
if parentNodeInfo.parent.Line == parentNode.Line {
|
||||
return true, parentNode.Line
|
||||
} else {
|
||||
return false, -1
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Checks if nodes are of same kind, value, line and column
|
||||
func isSameNode(nodeOne, nodeTwo *yaml.Node) bool {
|
||||
sameLines := nodeOne.Line == nodeTwo.Line
|
||||
sameColumns := nodeOne.Column == nodeTwo.Column
|
||||
sameKinds := nodeOne.Kind == nodeTwo.Kind
|
||||
sameValues := nodeOne.Value == nodeTwo.Value
|
||||
|
||||
return sameKinds && sameValues && sameLines && sameColumns
|
||||
}
|
||||
|
||||
// Checks if the line is empty or a comment
|
||||
func isEmptyLineOrComment(lineContent string) bool {
|
||||
lineContent = strings.TrimSpace(lineContent)
|
||||
if lineContent == "" {
|
||||
return true
|
||||
} else if lineContent[0:1] == "#" {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func readDocuments(reader io.Reader, decoder yqlib.Decoder) (*list.List, error) {
|
||||
err := decoder.Init(reader)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Error Initializing the decoder, %w", err)
|
||||
}
|
||||
inputList := list.New()
|
||||
|
||||
var currentIndex uint
|
||||
|
||||
for {
|
||||
candidateNode, errorReading := decoder.Decode()
|
||||
|
||||
if errors.Is(errorReading, io.EOF) {
|
||||
switch reader := reader.(type) {
|
||||
case *os.File:
|
||||
safelyCloseFile(reader)
|
||||
}
|
||||
return inputList, nil
|
||||
} else if errorReading != nil {
|
||||
return nil, fmt.Errorf("Error Decoding YAML file, %w", errorReading)
|
||||
}
|
||||
|
||||
candidateNode.Document = currentIndex
|
||||
candidateNode.EvaluateTogether = true
|
||||
|
||||
inputList.PushBack(candidateNode)
|
||||
|
||||
currentIndex = currentIndex + 1
|
||||
}
|
||||
}
|
||||
|
||||
func safelyCloseFile(file *os.File) {
|
||||
err := file.Close()
|
||||
if err != nil {
|
||||
logger.L().Error("Error Closing File")
|
||||
}
|
||||
}
|
||||
|
||||
// Remove the entire line and replace it with the sequence node in fixed info. This way,
|
||||
// the original formatting is lost.
|
||||
func replaceSingleLineSequence(fixInfoMetadata *fixInfoMetadata, line int) (int, int) {
|
||||
originalListTracker := getFirstNodeInLine(fixInfoMetadata.originalList, line)
|
||||
fixedListTracker := getFirstNodeInLine(fixInfoMetadata.fixedList, line)
|
||||
|
||||
currentDFSNode := (*fixInfoMetadata.fixedList)[fixedListTracker]
|
||||
contentToInsert := getContent(currentDFSNode.parent, fixInfoMetadata.fixedList, fixedListTracker)
|
||||
|
||||
// Remove the Single line
|
||||
*fixInfoMetadata.linesToRemove = append(*fixInfoMetadata.linesToRemove, linesToRemove{
|
||||
startLine: line,
|
||||
endLine: line,
|
||||
})
|
||||
|
||||
// Encode entire Sequence Node and Insert
|
||||
*fixInfoMetadata.contentToAdd = append(*fixInfoMetadata.contentToAdd, contentToAdd{
|
||||
line: line,
|
||||
content: contentToInsert,
|
||||
})
|
||||
|
||||
originalListTracker = updateTracker(fixInfoMetadata.originalList, originalListTracker)
|
||||
fixedListTracker = updateTracker(fixInfoMetadata.fixedList, fixedListTracker)
|
||||
|
||||
return originalListTracker, fixedListTracker
|
||||
}
|
||||
|
||||
// Returns the first node in the given line that is not mapping node
|
||||
func getFirstNodeInLine(list *[]nodeInfo, line int) int {
|
||||
tracker := 0
|
||||
|
||||
currentNode := (*list)[tracker].node
|
||||
for currentNode.Line != line || currentNode.Kind == yaml.MappingNode {
|
||||
tracker += 1
|
||||
currentNode = (*list)[tracker].node
|
||||
}
|
||||
|
||||
return tracker
|
||||
}
|
||||
|
||||
// To not mess with the line number while inserting, removed lines are not deleted but replaced with "*"
|
||||
func removeLines(linesToRemove *[]linesToRemove, linesSlice *[]string) {
|
||||
var startLine, endLine int
|
||||
for _, lineToRemove := range *linesToRemove {
|
||||
startLine = lineToRemove.startLine - 1
|
||||
endLine = lineToRemove.endLine - 1
|
||||
|
||||
for line := startLine; line <= endLine; line++ {
|
||||
lineContent := (*linesSlice)[line]
|
||||
// When determining the endLine, empty lines and comments which are not intended to be removed are included.
|
||||
// To deal with that, we need to refrain from removing empty lines and comments
|
||||
if isEmptyLineOrComment(lineContent) {
|
||||
break
|
||||
}
|
||||
(*linesSlice)[line] = "*"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Skips the current node including it's children in DFS order and returns the new tracker.
|
||||
func skipCurrentNode(node *yaml.Node, currentTracker int) int {
|
||||
updatedTracker := currentTracker + getChildrenCount(node)
|
||||
return updatedTracker
|
||||
}
|
||||
|
||||
func getChildrenCount(node *yaml.Node) int {
|
||||
totalChildren := 1
|
||||
for _, child := range node.Content {
|
||||
totalChildren += getChildrenCount(child)
|
||||
}
|
||||
return totalChildren
|
||||
}
|
||||
|
||||
// The current node along with it's children is skipped and the tracker is moved to next sibling
|
||||
// of current node. If parent is mapping node, "value" in "key-value" pairs is also skipped.
|
||||
func updateTracker(nodeList *[]nodeInfo, tracker int) int {
|
||||
currentNode := (*nodeList)[tracker]
|
||||
var updatedTracker int
|
||||
|
||||
if currentNode.parent.Kind == yaml.MappingNode {
|
||||
valueNode := (*nodeList)[tracker+1]
|
||||
updatedTracker = skipCurrentNode(valueNode.node, tracker+1)
|
||||
} else {
|
||||
updatedTracker = skipCurrentNode(currentNode.node, tracker)
|
||||
}
|
||||
|
||||
return updatedTracker
|
||||
}
|
||||
|
||||
func getStringFromSlice(yamlLines []string) (fixedYamlString string) {
|
||||
return strings.Join(yamlLines, "\n")
|
||||
}
|
||||
@@ -103,6 +103,8 @@ func getResourcesFromPath(path string) (map[string]reporthandling.Source, []work
|
||||
gitRepo, err := cautils.NewLocalGitRepository(path)
|
||||
if err == nil && gitRepo != nil {
|
||||
repoRoot, _ = gitRepo.GetRootDir()
|
||||
} else {
|
||||
repoRoot, _ = filepath.Abs(path)
|
||||
}
|
||||
|
||||
// load resource from local file system
|
||||
@@ -141,7 +143,7 @@ func getResourcesFromPath(path string) (map[string]reporthandling.Source, []work
|
||||
}
|
||||
|
||||
workloadSource := reporthandling.Source{
|
||||
RelativePath: source,
|
||||
RelativePath: relSource,
|
||||
FileType: filetype,
|
||||
LastCommit: lastCommit,
|
||||
}
|
||||
|
||||
@@ -16,6 +16,7 @@ import (
|
||||
func FinalizeResults(data *cautils.OPASessionObj) *reporthandlingv2.PostureReport {
|
||||
report := reporthandlingv2.PostureReport{
|
||||
SummaryDetails: data.Report.SummaryDetails,
|
||||
Metadata: *data.Metadata,
|
||||
ClusterAPIServerInfo: data.Report.ClusterAPIServerInfo,
|
||||
ReportGenerationTime: data.Report.ReportGenerationTime,
|
||||
Attributes: data.Report.Attributes,
|
||||
|
||||
Reference in New Issue
Block a user