Merge branch 'master' of github.com:armosec/kubescape into scan-workload

This commit is contained in:
Amir Malka
2023-07-20 19:39:17 +03:00
27 changed files with 41472 additions and 240 deletions

View File

@@ -2,6 +2,9 @@ package cautils
import (
"fmt"
"os"
"sort"
"strconv"
"strings"
)
@@ -42,3 +45,32 @@ func StringInSlice(strSlice []string, str string) int {
}
return ValueNotFound
}
func StringSlicesAreEqual(a, b []string) bool {
if len(a) != len(b) {
return false
}
sort.Strings(a)
sort.Strings(b)
for i := range a {
if a[i] != b[i] {
return false
}
}
return true
}
func ParseIntEnvVar(varName string, defaultValue int) (int, error) {
varValue, exists := os.LookupEnv(varName)
if !exists {
return defaultValue, nil
}
intValue, err := strconv.Atoi(varValue)
if err != nil {
return defaultValue, fmt.Errorf("failed to parse %s env var as int: %w", varName, err)
}
return intValue, nil
}

View File

@@ -2,8 +2,11 @@ package cautils
import (
"fmt"
"os"
"strings"
"testing"
"github.com/stretchr/testify/assert"
)
func TestConvertLabelsToString(t *testing.T) {
@@ -33,3 +36,102 @@ func TestConvertStringToLabels(t *testing.T) {
t.Errorf("%s != %s", fmt.Sprintf("%v", rstrMap), fmt.Sprintf("%v", strMap))
}
}
func TestParseIntEnvVar(t *testing.T) {
testCases := []struct {
expectedErr string
name string
varName string
varValue string
defaultValue int
expected int
}{
{
name: "Variable does not exist",
varName: "DOES_NOT_EXIST",
varValue: "",
defaultValue: 123,
expected: 123,
expectedErr: "",
},
{
name: "Variable exists and is a valid integer",
varName: "MY_VAR",
varValue: "456",
defaultValue: 123,
expected: 456,
expectedErr: "",
},
{
name: "Variable exists but is not a valid integer",
varName: "MY_VAR",
varValue: "not_an_integer",
defaultValue: 123,
expected: 123,
expectedErr: "failed to parse MY_VAR env var as int",
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
if tc.varValue != "" {
os.Setenv(tc.varName, tc.varValue)
} else {
os.Unsetenv(tc.varName)
}
actual, err := ParseIntEnvVar(tc.varName, tc.defaultValue)
if tc.expectedErr != "" {
assert.NotNil(t, err)
assert.ErrorContains(t, err, tc.expectedErr)
} else {
assert.Nil(t, err)
}
assert.Equalf(t, tc.expected, actual, "unexpected result")
})
}
}
func TestStringSlicesAreEqual(t *testing.T) {
tt := []struct {
name string
a []string
b []string
want bool
}{
{
name: "equal unsorted slices",
a: []string{"foo", "bar", "baz"},
b: []string{"baz", "foo", "bar"},
want: true,
},
{
name: "equal sorted slices",
a: []string{"bar", "baz", "foo"},
b: []string{"bar", "baz", "foo"},
want: true,
},
{
name: "unequal slices",
a: []string{"foo", "bar", "baz"},
b: []string{"foo", "bar", "qux"},
want: false,
},
{
name: "different length slices",
a: []string{"foo", "bar", "baz"},
b: []string{"foo", "bar"},
want: false,
},
}
for _, tc := range tt {
t.Run(tc.name, func(t *testing.T) {
got := StringSlicesAreEqual(tc.a, tc.b)
if got != tc.want {
t.Errorf("StringSlicesAreEqual(%v, %v) = %v; want %v", tc.a, tc.b, got, tc.want)
}
})
}
}

View File

@@ -27,9 +27,9 @@ type componentInterfaces struct {
tenantConfig cautils.ITenantConfig
resourceHandler resourcehandler.IResourceHandler
report reporter.IReport
outputPrinters []printer.IPrinter
uiPrinter printer.IPrinter
hostSensorHandler hostsensorutils.IHostSensor
outputPrinters []printer.IPrinter
}
func getInterfaces(ctx context.Context, scanInfo *cautils.ScanInfo) componentInterfaces {
@@ -116,7 +116,6 @@ func getInterfaces(ctx context.Context, scanInfo *cautils.ScanInfo) componentInt
func (ks *Kubescape) Scan(ctx context.Context, scanInfo *cautils.ScanInfo) (*resultshandling.ResultsHandler, error) {
ctxInit, spanInit := otel.Tracer("").Start(ctx, "initialization")
logger.L().Info("Kubescape scanner starting")
// ===================== Initialization =====================
@@ -151,15 +150,24 @@ func (ks *Kubescape) Scan(ctx context.Context, scanInfo *cautils.ScanInfo) (*res
resultsHandling := resultshandling.NewResultsHandler(interfaces.report, interfaces.outputPrinters, interfaces.uiPrinter)
// ===================== policies & resources =====================
ctxPolicies, spanPolicies := otel.Tracer("").Start(ctxInit, "policies & resources")
policyHandler := policyhandler.NewPolicyHandler(interfaces.resourceHandler)
scanData, err := policyHandler.CollectResources(ctxPolicies, scanInfo.PolicyIdentifier, scanInfo, cautils.NewProgressHandler(""))
// ===================== policies =====================
ctxPolicies, spanPolicies := otel.Tracer("").Start(ctxInit, "policies")
policyHandler := policyhandler.NewPolicyHandler()
scanData, err := policyHandler.CollectPolicies(ctxPolicies, scanInfo.PolicyIdentifier, scanInfo)
if err != nil {
spanInit.End()
return resultsHandling, err
}
spanPolicies.End()
// ===================== resources =====================
ctxResources, spanResources := otel.Tracer("").Start(ctxInit, "resources")
err = resourcehandler.CollectResources(ctxResources, interfaces.resourceHandler, scanInfo.PolicyIdentifier, scanData, cautils.NewProgressHandler(""))
if err != nil {
spanInit.End()
return resultsHandling, err
}
spanResources.End()
spanInit.End()
// ========================= opa testing =====================

View File

@@ -63,7 +63,7 @@ func (opap *OPAProcessor) ProcessRulesListener(ctx context.Context, progressList
ConvertFrameworksToSummaryDetails(&opap.Report.SummaryDetails, opap.Policies, opap.OPASessionObj.AllPolicies)
maxGoRoutines, err := parseIntEnvVar("RULE_PROCESSING_GOMAXPROCS", 2*runtime.NumCPU())
maxGoRoutines, err := cautils.ParseIntEnvVar("RULE_PROCESSING_GOMAXPROCS", 2*runtime.NumCPU())
if err != nil {
logger.L().Ctx(ctx).Warning(err.Error())
}
@@ -332,6 +332,15 @@ func (opap *OPAProcessor) processRule(ctx context.Context, rule *reporthandling.
if ruleResponse.FixCommand != "" {
ruleResult.Paths = append(ruleResult.Paths, armotypes.PosturePaths{FixCommand: ruleResponse.FixCommand})
}
// if ruleResponse has relatedObjects, add it to ruleResult
if len(ruleResponse.RelatedObjects) > 0 {
for _, relatedObject := range ruleResponse.RelatedObjects {
wl := objectsenvelopes.NewObject(relatedObject.Object)
if wl != nil {
ruleResult.RelatedResourcesIDs = append(ruleResult.RelatedResourcesIDs, wl.GetID())
}
}
}
resources[failedResource.GetID()] = ruleResult
}

View File

@@ -16,6 +16,7 @@ import (
"github.com/kubescape/kubescape/v2/core/cautils"
"github.com/kubescape/kubescape/v2/core/mocks"
"github.com/kubescape/opa-utils/reporthandling"
"github.com/kubescape/opa-utils/reporthandling/results/v1/resourcesresults"
"github.com/kubescape/opa-utils/resources"
"github.com/stretchr/testify/assert"
@@ -25,10 +26,14 @@ import (
var (
//go:embed testdata/opaSessionObjMock.json
opaSessionObjMockData string
//go:embed testdata/opaSessionObjMock1.json
opaSessionObjMockData1 string
//go:embed testdata/regoDependenciesDataMock.json
regoDependenciesData string
allResourcesMockData []byte
//go:embed testdata/resourcesMock1.json
resourcesMock1 []byte
)
func unzipAllResourcesTestDataAndSetVar(zipFilePath, destFilePath string) error {
@@ -73,17 +78,17 @@ func unzipAllResourcesTestDataAndSetVar(zipFilePath, destFilePath string) error
return nil
}
func NewOPAProcessorMock() *OPAProcessor {
func NewOPAProcessorMock(opaSessionObjMock string, resourcesMock []byte) *OPAProcessor {
opap := &OPAProcessor{}
if err := json.Unmarshal([]byte(regoDependenciesData), &opap.regoDependenciesData); err != nil {
panic(err)
}
// no err check because Unmarshal will fail on AllResources field (expected)
json.Unmarshal([]byte(opaSessionObjMockData), &opap.OPASessionObj)
json.Unmarshal([]byte(opaSessionObjMock), &opap.OPASessionObj)
opap.AllResources = make(map[string]workloadinterface.IMetadata)
allResources := make(map[string]map[string]interface{})
if err := json.Unmarshal(allResourcesMockData, &allResources); err != nil {
if err := json.Unmarshal(resourcesMock, &allResources); err != nil {
panic(err)
}
for i := range allResources {
@@ -149,7 +154,7 @@ func BenchmarkProcess(b *testing.B) {
testName := fmt.Sprintf("opaprocessor.Process_%d", maxGoRoutines)
b.Run(testName, func(b *testing.B) {
// setup
opap := NewOPAProcessorMock()
opap := NewOPAProcessorMock(opaSessionObjMockData, allResourcesMockData)
b.ResetTimer()
var maxHeap uint64
quitChan := make(chan bool)
@@ -245,3 +250,85 @@ func TestProcessResourcesResult(t *testing.T) {
assert.Equal(t, 0, summaryDetails.ListResourcesIDs(nil).Passed())
assert.Equal(t, 0, summaryDetails.ListResourcesIDs(nil).Skipped())
}
// don't parallelize this test because it uses a global variable - allResourcesMockData
func TestProcessRule(t *testing.T) {
testCases := []struct {
name string
rule reporthandling.PolicyRule
resourcesMock []byte
opaSessionObjMock string
expectedResult map[string]*resourcesresults.ResourceAssociatedRule
}{
{
name: "TestRelatedResourcesIDs",
rule: reporthandling.PolicyRule{
PortalBase: armotypes.PortalBase{
Name: "exposure-to-internet",
Attributes: map[string]interface{}{
"armoBuiltin": true,
},
},
Rule: "package armo_builtins\n\n# Checks if NodePort or LoadBalancer is connected to a workload to expose something\ndeny[msga] {\n service := input[_]\n service.kind == \"Service\"\n is_exposed_service(service)\n \n wl := input[_]\n spec_template_spec_patterns := {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Pod\", \"Job\", \"CronJob\"}\n spec_template_spec_patterns[wl.kind]\n wl_connected_to_service(wl, service)\n failPath := [\"spec.type\"]\n msga := {\n \"alertMessage\": sprintf(\"workload '%v' is exposed through service '%v'\", [wl.metadata.name, service.metadata.name]),\n \"packagename\": \"armo_builtins\",\n \"alertScore\": 7,\n \"fixPaths\": [],\n \"failedPaths\": [],\n \"alertObject\": {\n \"k8sApiObjects\": [wl]\n },\n \"relatedObjects\": [{\n \"object\": service,\n \"failedPaths\": failPath,\n }]\n }\n}\n\n# Checks if Ingress is connected to a service and a workload to expose something\ndeny[msga] {\n ingress := input[_]\n ingress.kind == \"Ingress\"\n \n svc := input[_]\n svc.kind == \"Service\"\n # avoid duplicate alerts\n # if service is already exposed through NodePort or LoadBalancer workload will fail on that\n not is_exposed_service(svc)\n\n wl := input[_]\n spec_template_spec_patterns := {\"Deployment\", \"ReplicaSet\", \"DaemonSet\", \"StatefulSet\", \"Pod\", \"Job\", \"CronJob\"}\n spec_template_spec_patterns[wl.kind]\n wl_connected_to_service(wl, svc)\n\n result := svc_connected_to_ingress(svc, ingress)\n \n msga := {\n \"alertMessage\": sprintf(\"workload '%v' is exposed through ingress '%v'\", [wl.metadata.name, ingress.metadata.name]),\n \"packagename\": \"armo_builtins\",\n \"failedPaths\": [],\n \"fixPaths\": [],\n \"alertScore\": 7,\n \"alertObject\": {\n \"k8sApiObjects\": [wl]\n },\n \"relatedObjects\": [{\n \"object\": ingress,\n \"failedPaths\": result,\n }]\n }\n} \n\n# ====================================================================================\n\nis_exposed_service(svc) {\n svc.spec.type == \"NodePort\"\n}\n\nis_exposed_service(svc) {\n svc.spec.type == \"LoadBalancer\"\n}\n\nwl_connected_to_service(wl, svc) {\n count({x | svc.spec.selector[x] == wl.metadata.labels[x]}) == count(svc.spec.selector)\n}\n\nwl_connected_to_service(wl, svc) {\n wl.spec.selector.matchLabels == svc.spec.selector\n}\n\n# check if service is connected to ingress\nsvc_connected_to_ingress(svc, ingress) = result {\n rule := ingress.spec.rules[i]\n paths := rule.http.paths[j]\n svc.metadata.name == paths.backend.service.name\n result := [sprintf(\"ingress.spec.rules[%d].http.paths[%d].backend.service.name\", [i,j])]\n}\n\n",
Match: []reporthandling.RuleMatchObjects{
{
APIGroups: []string{""},
APIVersions: []string{"v1"},
Resources: []string{"Pod", "Service"},
},
{
APIGroups: []string{"apps"},
APIVersions: []string{"v1"},
Resources: []string{"Deployment", "ReplicaSet", "DaemonSet", "StatefulSet"},
},
{
APIGroups: []string{"batch"},
APIVersions: []string{"*"},
Resources: []string{"Job", "CronJob"},
},
{
APIGroups: []string{"extensions", "networking.k8s.io"},
APIVersions: []string{"v1beta1", "v1"},
Resources: []string{"Ingress"},
},
},
Description: "fails in case the running workload has binded Service or Ingress that are exposing it on Internet.",
Remediation: "",
RuleQuery: "armo_builtins",
RuleLanguage: reporthandling.RegoLanguage,
},
resourcesMock: resourcesMock1,
opaSessionObjMock: opaSessionObjMockData1,
expectedResult: map[string]*resourcesresults.ResourceAssociatedRule{
"/v1/default/Pod/fake-pod-1-22gck": {
Name: "exposure-to-internet",
ControlConfigurations: map[string][]string{},
Status: "failed",
SubStatus: "",
Paths: nil,
Exception: nil,
RelatedResourcesIDs: []string{
"/v1/default/Service/fake-service-1",
},
},
"/v1/default/Service/fake-service-1": {
Name: "exposure-to-internet",
ControlConfigurations: map[string][]string{},
Status: "passed",
SubStatus: "",
Paths: nil,
Exception: nil,
RelatedResourcesIDs: nil,
},
},
},
}
for _, tc := range testCases {
// since all resources JSON is a large file, we need to unzip it and set the variable before running the benchmark
unzipAllResourcesTestDataAndSetVar("testdata/allResourcesMock.json.zip", "testdata/allResourcesMock.json")
opap := NewOPAProcessorMock(tc.opaSessionObjMock, tc.resourcesMock)
resources, _, err := opap.processRule(context.Background(), &tc.rule, nil)
assert.NoError(t, err)
assert.Equal(t, tc.expectedResult, resources)
}
}

File diff suppressed because one or more lines are too long

View File

@@ -0,0 +1,220 @@
{
"/v1/default/Pod/fake-pod-1-22gck": {
"apiVersion": "v1",
"kind": "Pod",
"metadata": {
"annotations": {
"kubectl.kubernetes.io/last-applied-configuration": "{\"apiVersion\":\"v1\",\"kind\":\"Pod\",\"metadata\":{\"annotations\":{},\"name\":\"fake-pod-1-22gck\",\"namespace\":\"default\"},\"spec\":{\"containers\":[{\"image\":\"redis\",\"name\":\"fake-pod-1-22gck\",\"volumeMounts\":[{\"mountPath\":\"/etc/foo\",\"name\":\"foo\",\"readOnly\":true}]}],\"volumes\":[{\"name\":\"foo\",\"secret\":{\"optional\":true,\"secretName\":\"mysecret\"}}]}}\n"
},
"creationTimestamp": "2023-06-22T07:47:38Z",
"name": "fake-pod-1-22gck",
"namespace": "default",
"resourceVersion": "1087189",
"uid": "046753fa-c7b6-46dd-ae18-dd68b8b20cd3",
"labels": {"app": "argo-server"}
},
"spec": {
"containers": [
{
"image": "redis",
"imagePullPolicy": "Always",
"name": "fake-pod-1-22gck",
"resources": {},
"terminationMessagePath": "/dev/termination-log",
"terminationMessagePolicy": "File",
"volumeMounts": [
{
"mountPath": "/etc/foo",
"name": "foo",
"readOnly": true
},
{
"mountPath": "/var/run/secrets/kubernetes.io/serviceaccount",
"name": "kube-api-access-lrpxm",
"readOnly": true
}
]
}
],
"dnsPolicy": "ClusterFirst",
"enableServiceLinks": true,
"nodeName": "minikube-yiscah",
"preemptionPolicy": "PreemptLowerPriority",
"priority": 0,
"restartPolicy": "Always",
"schedulerName": "default-scheduler",
"securityContext": {},
"serviceAccount": "default",
"serviceAccountName": "default",
"terminationGracePeriodSeconds": 30,
"tolerations": [
{
"effect": "NoExecute",
"key": "node.kubernetes.io/not-ready",
"operator": "Exists",
"tolerationSeconds": 300
},
{
"effect": "NoExecute",
"key": "node.kubernetes.io/unreachable",
"operator": "Exists",
"tolerationSeconds": 300
}
],
"volumes": [
{
"name": "foo",
"secret": {
"defaultMode": 420,
"optional": true,
"secretName": "mysecret"
}
},
{
"name": "kube-api-access-lrpxm",
"projected": {
"defaultMode": 420,
"sources": [
{
"serviceAccountToken": {
"expirationSeconds": 3607,
"path": "token"
}
},
{
"configMap": {
"items": [
{
"key": "ca.crt",
"path": "ca.crt"
}
],
"name": "kube-root-ca.crt"
}
},
{
"downwardAPI": {
"items": [
{
"fieldRef": {
"apiVersion": "v1",
"fieldPath": "metadata.namespace"
},
"path": "namespace"
}
]
}
}
]
}
}
]
},
"status": {
"conditions": [
{
"lastProbeTime": null,
"lastTransitionTime": "2023-06-22T07:47:38Z",
"status": "True",
"type": "Initialized"
},
{
"lastProbeTime": null,
"lastTransitionTime": "2023-07-18T05:07:57Z",
"status": "True",
"type": "Ready"
},
{
"lastProbeTime": null,
"lastTransitionTime": "2023-07-18T05:07:57Z",
"status": "True",
"type": "ContainersReady"
},
{
"lastProbeTime": null,
"lastTransitionTime": "2023-06-22T07:47:38Z",
"status": "True",
"type": "PodScheduled"
}
],
"containerStatuses": [
{
"containerID": "docker://a3a1aac00031c6ab85f75cfa17d14ebd71ab15f1fc5c82a262449621a77d7a7e",
"image": "redis:latest",
"imageID": "docker-pullable://redis@sha256:08a82d4bf8a8b4dd94e8f5408cdbad9dd184c1cf311d34176cd3e9972c43f872",
"lastState": {
"terminated": {
"containerID": "docker://1ae623f4faf8cda5dabdc65c342752dfdf1675cb173b46875596c2eb0dae472f",
"exitCode": 255,
"finishedAt": "2023-07-18T05:03:55Z",
"reason": "Error",
"startedAt": "2023-07-17T16:32:35Z"
}
},
"name": "fake-pod-1-22gck",
"ready": true,
"restartCount": 9,
"started": true,
"state": {
"running": {
"startedAt": "2023-07-18T05:07:56Z"
}
}
}
],
"hostIP": "192.168.85.2",
"phase": "Running",
"podIP": "10.244.1.131",
"podIPs": [
{
"ip": "10.244.1.131"
}
],
"qosClass": "BestEffort",
"startTime": "2023-06-22T07:47:38Z"
}
},
"/v1/default/Service/fake-service-1": {
"apiVersion": "v1",
"kind": "Service",
"metadata": {
"annotations": {
"kubectl.kubernetes.io/last-applied-configuration": "{\"apiVersion\":\"v1\",\"kind\":\"Service\",\"metadata\":{\"annotations\":{},\"name\":\"fake-service-1\",\"namespace\":\"default\"},\"spec\":{\"clusterIP\":\"10.96.0.11\",\"ports\":[{\"port\":80,\"protocol\":\"TCP\",\"targetPort\":9376}],\"selector\":{\"app\":\"argo-server\"},\"type\":\"LoadBalancer\"},\"status\":{\"loadBalancer\":{\"ingress\":[{\"ip\":\"192.0.2.127\"}]}}}\n"
},
"creationTimestamp": "2023-07-09T06:22:27Z",
"name": "fake-service-1",
"namespace": "default",
"resourceVersion": "981856",
"uid": "dd629eb1-6779-4298-a70f-0bdbd046d409"
},
"spec": {
"allocateLoadBalancerNodePorts": true,
"clusterIP": "10.96.0.11",
"clusterIPs": [
"10.96.0.11"
],
"externalTrafficPolicy": "Cluster",
"internalTrafficPolicy": "Cluster",
"ipFamilies": [
"IPv4"
],
"ipFamilyPolicy": "SingleStack",
"ports": [
{
"nodePort": 30706,
"port": 80,
"protocol": "TCP",
"targetPort": 9376
}
],
"selector": {
"app": "argo-server"
},
"sessionAffinity": "None",
"type": "LoadBalancer"
},
"status": {
"loadBalancer": {}
}
}
}

File diff suppressed because one or more lines are too long

View File

@@ -2,8 +2,6 @@ package opaprocessor
import (
"fmt"
"os"
"strconv"
logger "github.com/kubescape/go-logger"
"github.com/kubescape/go-logger/helpers"
@@ -95,17 +93,3 @@ var cosignHasSignatureDefinition = func(bctx rego.BuiltinContext, a *ast.Term) (
}
return ast.BooleanTerm(has_signature(string(aStr))), nil
}
func parseIntEnvVar(varName string, defaultValue int) (int, error) {
varValue, exists := os.LookupEnv(varName)
if !exists {
return defaultValue, nil
}
intValue, err := strconv.Atoi(varValue)
if err != nil {
return defaultValue, fmt.Errorf("failed to parse %s env var as int: %w", varName, err)
}
return intValue, nil
}

View File

@@ -1,7 +1,6 @@
package opaprocessor
import (
"os"
"testing"
"github.com/stretchr/testify/assert"
@@ -29,59 +28,3 @@ func TestInitializeSummaryDetails(t *testing.T) {
assert.Equal(t, 2, len(summaryDetails.Frameworks))
assert.Equal(t, 3, len(summaryDetails.Controls))
}
func TestParseIntEnvVar(t *testing.T) {
testCases := []struct {
expectedErr string
name string
varName string
varValue string
defaultValue int
expected int
}{
{
name: "Variable does not exist",
varName: "DOES_NOT_EXIST",
varValue: "",
defaultValue: 123,
expected: 123,
expectedErr: "",
},
{
name: "Variable exists and is a valid integer",
varName: "MY_VAR",
varValue: "456",
defaultValue: 123,
expected: 456,
expectedErr: "",
},
{
name: "Variable exists but is not a valid integer",
varName: "MY_VAR",
varValue: "not_an_integer",
defaultValue: 123,
expected: 123,
expectedErr: "failed to parse MY_VAR env var as int",
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
if tc.varValue != "" {
os.Setenv(tc.varName, tc.varValue)
} else {
os.Unsetenv(tc.varName)
}
actual, err := parseIntEnvVar(tc.varName, tc.defaultValue)
if tc.expectedErr != "" {
assert.NotNil(t, err)
assert.ErrorContains(t, err, tc.expectedErr)
} else {
assert.Nil(t, err)
}
assert.Equalf(t, tc.expected, actual, "unexpected result")
})
}
}

View File

@@ -0,0 +1,73 @@
package policyhandler
import (
"sync"
"time"
)
// TimedCache provides functionality for managing a timed cache.
// The timed cache holds a value for a specified time duration (TTL).
// After the TTL has passed, the value is invalidated.
//
// The cache is thread safe.
type TimedCache[T any] struct {
value T
isSet bool
ttl time.Duration
expiration int64
mutex sync.RWMutex
}
func NewTimedCache[T any](ttl time.Duration) *TimedCache[T] {
cache := &TimedCache[T]{
ttl: ttl,
isSet: false,
}
// start the invalidate task only when the ttl is greater than 0 (cache is enabled)
if ttl > 0 {
go cache.invalidateTask()
}
return cache
}
func (c *TimedCache[T]) Set(value T) {
c.mutex.Lock()
defer c.mutex.Unlock()
// cache is disabled
if c.ttl == 0 {
return
}
c.isSet = true
c.value = value
c.expiration = time.Now().Add(c.ttl).UnixNano()
}
func (c *TimedCache[T]) Get() (T, bool) {
c.mutex.RLock()
defer c.mutex.RUnlock()
if !c.isSet || time.Now().UnixNano() > c.expiration {
return c.value, false
}
return c.value, true
}
func (c *TimedCache[T]) invalidateTask() {
for {
<-time.After(c.ttl)
if time.Now().UnixNano() > c.expiration {
c.Invalidate()
}
}
}
func (c *TimedCache[T]) Invalidate() {
c.mutex.Lock()
defer c.mutex.Unlock()
c.isSet = false
}

View File

@@ -0,0 +1,75 @@
package policyhandler
import (
"testing"
"time"
)
func TestTimedCache(t *testing.T) {
tests := []struct {
name string
// value ttl
ttl time.Duration
// value to set
value int
// time to wait before checking if value exists
wait time.Duration
// number of times to check if value exists (with wait in between)
checks int
// should the value exist in cache
exists bool
// expected cache value
wantVal int
}{
{
name: "value exists before ttl",
ttl: time.Second * 5,
value: 42,
wait: time.Second * 1,
checks: 2,
exists: true,
wantVal: 42,
},
{
name: "value does not exist after ttl",
ttl: time.Second * 3,
value: 55,
wait: time.Second * 4,
checks: 1,
exists: false,
},
{
name: "cache is disabled (ttl = 0) always returns false",
ttl: 0,
value: 55,
wait: 0,
checks: 1,
exists: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
cache := NewTimedCache[int](tt.ttl)
cache.Set(tt.value)
for i := 0; i < tt.checks; i++ {
// Wait for the specified duration
time.Sleep(tt.wait)
// Get the value from the cache
value, exists := cache.Get()
// Check if value exists
if exists != tt.exists {
t.Errorf("Expected exists to be %v, got %v", tt.exists, exists)
}
// Check value
if exists && value != tt.wantVal {
t.Errorf("Expected value to be %d, got %d", tt.wantVal, value)
}
}
})
}
}

View File

@@ -5,6 +5,7 @@ import (
"fmt"
"strings"
"github.com/armosec/armoapi-go/armotypes"
logger "github.com/kubescape/go-logger"
"github.com/kubescape/go-logger/helpers"
"github.com/kubescape/kubescape/v2/core/cautils"
@@ -14,7 +15,55 @@ import (
"go.opentelemetry.io/otel"
)
func (policyHandler *PolicyHandler) getPolicies(ctx context.Context, policyIdentifier []cautils.PolicyIdentifier, policiesAndResources *cautils.OPASessionObj) error {
const (
PoliciesCacheTtlEnvVar = "POLICIES_CACHE_TTL"
)
var policyHandlerInstance *PolicyHandler
// PolicyHandler
type PolicyHandler struct {
getters *cautils.Getters
cachedPolicyIdentifiers *TimedCache[[]string]
cachedFrameworks *TimedCache[[]reporthandling.Framework]
cachedExceptions *TimedCache[[]armotypes.PostureExceptionPolicy]
cachedControlInputs *TimedCache[map[string][]string]
}
// NewPolicyHandler creates and returns an instance of the `PolicyHandler`. The function initializes the `PolicyHandler` only if it hasn't been previously created.
// The PolicyHandler supports caching of downloaded policies and exceptions by setting the `POLICIES_CACHE_TTL` environment variable (default is no caching).
func NewPolicyHandler() *PolicyHandler {
if policyHandlerInstance == nil {
cacheTtl := getPoliciesCacheTtl()
policyHandlerInstance = &PolicyHandler{
cachedPolicyIdentifiers: NewTimedCache[[]string](cacheTtl),
cachedFrameworks: NewTimedCache[[]reporthandling.Framework](cacheTtl),
cachedExceptions: NewTimedCache[[]armotypes.PostureExceptionPolicy](cacheTtl),
cachedControlInputs: NewTimedCache[map[string][]string](cacheTtl),
}
}
return policyHandlerInstance
}
func (policyHandler *PolicyHandler) CollectPolicies(ctx context.Context, policyIdentifier []cautils.PolicyIdentifier, scanInfo *cautils.ScanInfo) (*cautils.OPASessionObj, error) {
opaSessionObj := cautils.NewOPASessionObj(ctx, nil, nil, scanInfo)
policyHandler.getters = &scanInfo.Getters
// get policies, exceptions and controls inputs
policies, exceptions, controlInputs, err := policyHandler.getPolicies(ctx, policyIdentifier)
if err != nil {
return opaSessionObj, err
}
opaSessionObj.Policies = policies
opaSessionObj.Exceptions = exceptions
opaSessionObj.RegoInputData.PostureControlInputs = controlInputs
return opaSessionObj, nil
}
func (policyHandler *PolicyHandler) getPolicies(ctx context.Context, policyIdentifier []cautils.PolicyIdentifier) (policies []reporthandling.Framework, exceptions []armotypes.PostureExceptionPolicy, controlInputs map[string][]string, err error) {
ctx, span := otel.Tracer("").Start(ctx, "policyHandler.getPolicies")
defer span.End()
logger.L().Info("Downloading/Loading policy definitions")
@@ -22,38 +71,57 @@ func (policyHandler *PolicyHandler) getPolicies(ctx context.Context, policyIdent
cautils.StartSpinner()
defer cautils.StopSpinner()
policies, err := policyHandler.getScanPolicies(ctx, policyIdentifier)
// get policies
policies, err = policyHandler.getScanPolicies(ctx, policyIdentifier)
if err != nil {
return err
return nil, nil, nil, err
}
if len(policies) == 0 {
return fmt.Errorf("failed to download policies: '%s'. Make sure the policy exist and you spelled it correctly. For more information, please feel free to contact ARMO team", strings.Join(policyIdentifierToSlice(policyIdentifier), ", "))
return nil, nil, nil, fmt.Errorf("failed to download policies: '%s'. Make sure the policy exist and you spelled it correctly. For more information, please feel free to contact ARMO team", strings.Join(policyIdentifierToSlice(policyIdentifier), ", "))
}
policiesAndResources.Policies = policies
// get exceptions
exceptionPolicies, err := policyHandler.getters.ExceptionsGetter.GetExceptions(cautils.ClusterName)
if err == nil {
policiesAndResources.Exceptions = exceptionPolicies
} else {
if exceptions, err = policyHandler.getExceptions(); err != nil {
logger.L().Ctx(ctx).Warning("failed to load exceptions", helpers.Error(err))
}
// get account configuration
controlsInputs, err := policyHandler.getters.ControlsInputsGetter.GetControlsInputs(cautils.ClusterName)
if err == nil {
policiesAndResources.RegoInputData.PostureControlInputs = controlsInputs
} else {
if controlInputs, err = policyHandler.getControlInputs(); err != nil {
logger.L().Ctx(ctx).Warning(err.Error())
}
cautils.StopSpinner()
cautils.StopSpinner()
logger.L().Success("Downloaded/Loaded policy")
return nil
return policies, exceptions, controlInputs, nil
}
// getScanPolicies - get policies from cache or downloads them. The function returns an error if the policies could not be downloaded.
func (policyHandler *PolicyHandler) getScanPolicies(ctx context.Context, policyIdentifier []cautils.PolicyIdentifier) ([]reporthandling.Framework, error) {
policyIdentifiersSlice := policyIdentifierToSlice(policyIdentifier)
// check if policies are cached
if cachedPolicies, policiesExist := policyHandler.cachedFrameworks.Get(); policiesExist {
// check if the cached policies are the same as the requested policies, otherwise download the policies
if cachedIdentifiers, identifiersExist := policyHandler.cachedPolicyIdentifiers.Get(); identifiersExist && cautils.StringSlicesAreEqual(cachedIdentifiers, policyIdentifiersSlice) {
logger.L().Info("Using cached policies")
return cachedPolicies, nil
}
logger.L().Debug("Cached policies are not the same as the requested policies")
policyHandler.cachedPolicyIdentifiers.Invalidate()
policyHandler.cachedFrameworks.Invalidate()
}
policies, err := policyHandler.downloadScanPolicies(ctx, policyIdentifier)
if err == nil {
policyHandler.cachedFrameworks.Set(policies)
policyHandler.cachedPolicyIdentifiers.Set(policyIdentifiersSlice)
}
return policies, err
}
func (policyHandler *PolicyHandler) downloadScanPolicies(ctx context.Context, policyIdentifier []cautils.PolicyIdentifier) ([]reporthandling.Framework, error) {
frameworks := []reporthandling.Framework{}
switch getScanKind(policyIdentifier) {
@@ -102,10 +170,30 @@ func (policyHandler *PolicyHandler) getScanPolicies(ctx context.Context, policyI
return frameworks, nil
}
func policyIdentifierToSlice(rules []cautils.PolicyIdentifier) []string {
s := []string{}
for i := range rules {
s = append(s, fmt.Sprintf("%s: %s", rules[i].Kind, rules[i].Identifier))
func (policyHandler *PolicyHandler) getExceptions() ([]armotypes.PostureExceptionPolicy, error) {
if cachedExceptions, exist := policyHandler.cachedExceptions.Get(); exist {
logger.L().Info("Using cached exceptions")
return cachedExceptions, nil
}
return s
exceptions, err := policyHandler.getters.ExceptionsGetter.GetExceptions(cautils.ClusterName)
if err == nil {
policyHandler.cachedExceptions.Set(exceptions)
}
return exceptions, err
}
func (policyHandler *PolicyHandler) getControlInputs() (map[string][]string, error) {
if cachedControlInputs, exist := policyHandler.cachedControlInputs.Get(); exist {
logger.L().Info("Using cached control inputs")
return cachedControlInputs, nil
}
controlInputs, err := policyHandler.getters.ControlsInputsGetter.GetControlsInputs(cautils.ClusterName)
if err == nil {
policyHandler.cachedControlInputs.Set(controlInputs)
}
return controlInputs, err
}

View File

@@ -3,6 +3,7 @@ package policyhandler
import (
"fmt"
"strings"
"time"
apisv1 "github.com/kubescape/opa-utils/httpserver/apis/v1"
"github.com/kubescape/opa-utils/reporthandling"
@@ -35,3 +36,20 @@ func validateFramework(framework *reporthandling.Framework) error {
}
return nil
}
// getPoliciesCacheTtl - get policies cache TTL from environment variable or return 0 if not set
func getPoliciesCacheTtl() time.Duration {
if val, err := cautils.ParseIntEnvVar(PoliciesCacheTtlEnvVar, 0); err == nil {
return time.Duration(val) * time.Minute
}
return 0
}
func policyIdentifierToSlice(rules []cautils.PolicyIdentifier) []string {
s := []string{}
for i := range rules {
s = append(s, fmt.Sprintf("%s: %s", rules[i].Kind, rules[i].Identifier))
}
return s
}

View File

@@ -11,8 +11,8 @@ func Test_validateFramework(t *testing.T) {
framework *reporthandling.Framework
}
tests := []struct {
name string
args args
name string
wantErr bool
}{
{

View File

@@ -1,4 +1,4 @@
package policyhandler
package resourcehandler
import (
"context"
@@ -8,7 +8,7 @@ import (
"github.com/kubescape/k8s-interface/k8sinterface"
"github.com/kubescape/kubescape/v2/core/cautils"
"github.com/kubescape/kubescape/v2/core/pkg/resourcehandler"
"github.com/kubescape/opa-utils/reporthandling/apis"
helpersv1 "github.com/kubescape/opa-utils/reporthandling/helpers/v1"
reporthandlingv2 "github.com/kubescape/opa-utils/reporthandling/v2"
@@ -22,7 +22,7 @@ import (
)
var (
//go:embed kubeconfig_mock.json
//go:embed testdata/kubeconfig_mock.json
kubeConfigMock string
)
@@ -35,9 +35,9 @@ func getKubeConfigMock() *clientcmdapi.Config {
}
func Test_getCloudMetadata(t *testing.T) {
type args struct {
context string
opaSessionObj *cautils.OPASessionObj
kubeConfig *clientcmdapi.Config
context string
}
kubeConfig := getKubeConfigMock()
tests := []struct {
@@ -221,7 +221,7 @@ func (*iResourceHandlerMock) GetClusterAPIServerInfo() *version.Info {
// https://github.com/kubescape/kubescape/pull/1004
// Cluster named .*eks.* config without a cloudconfig panics whereas we just want to scan a file
func getResourceHandlerMock() *resourcehandler.K8sResourceHandler {
func getResourceHandlerMock() *K8sResourceHandler {
client := fakeclientset.NewSimpleClientset()
fakeDiscovery := client.Discovery()
@@ -232,10 +232,10 @@ func getResourceHandlerMock() *resourcehandler.K8sResourceHandler {
Context: context.Background(),
}
return resourcehandler.NewK8sResourceHandler(k8s, &resourcehandler.EmptySelector{}, nil, nil, nil)
return NewK8sResourceHandler(k8s, &EmptySelector{}, nil, nil, nil)
}
func Test_getResources(t *testing.T) {
policyHandler := &PolicyHandler{resourceHandler: getResourceHandlerMock()}
func Test_CollectResources(t *testing.T) {
resourceHandler := getResourceHandlerMock()
objSession := &cautils.OPASessionObj{
Metadata: &reporthandlingv2.Metadata{
ScanMetadata: reporthandlingv2.ScanMetadata{
@@ -249,12 +249,12 @@ func Test_getResources(t *testing.T) {
policyIdentifier := []cautils.PolicyIdentifier{{}}
assert.NotPanics(t, func() {
policyHandler.getResources(context.TODO(), policyIdentifier, objSession, cautils.NewProgressHandler(""))
CollectResources(context.TODO(), resourceHandler, policyIdentifier, objSession, cautils.NewProgressHandler(""))
}, "Cluster named .*eks.* without a cloud config panics on cluster scan !")
assert.NotPanics(t, func() {
objSession.Metadata.ScanMetadata.ScanningTarget = reportv2.File
policyHandler.getResources(context.TODO(), policyIdentifier, objSession, cautils.NewProgressHandler(""))
CollectResources(context.TODO(), resourceHandler, policyIdentifier, objSession, cautils.NewProgressHandler(""))
}, "Cluster named .*eks.* without a cloud config panics on non-cluster scan !")
}

View File

@@ -1,4 +1,4 @@
package policyhandler
package resourcehandler
import (
"context"
@@ -7,70 +7,30 @@ import (
logger "github.com/kubescape/go-logger"
"github.com/kubescape/go-logger/helpers"
helpersv1 "github.com/kubescape/opa-utils/reporthandling/helpers/v1"
"go.opentelemetry.io/otel"
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
cloudsupportv1 "github.com/kubescape/k8s-interface/cloudsupport/v1"
"github.com/kubescape/kubescape/v2/core/pkg/opaprocessor"
reportv2 "github.com/kubescape/opa-utils/reporthandling/v2"
"github.com/kubescape/k8s-interface/cloudsupport"
cloudsupportv1 "github.com/kubescape/k8s-interface/cloudsupport/v1"
"github.com/kubescape/k8s-interface/k8sinterface"
"github.com/kubescape/kubescape/v2/core/cautils"
"github.com/kubescape/kubescape/v2/core/pkg/resourcehandler"
"github.com/kubescape/kubescape/v2/core/pkg/opaprocessor"
"github.com/kubescape/opa-utils/reporthandling/apis"
helpersv1 "github.com/kubescape/opa-utils/reporthandling/helpers/v1"
reportv2 "github.com/kubescape/opa-utils/reporthandling/v2"
"go.opentelemetry.io/otel"
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
)
// PolicyHandler -
type PolicyHandler struct {
resourceHandler resourcehandler.IResourceHandler
// we are listening on this chan in opaprocessor/processorhandler.go/ProcessRulesListener func
getters *cautils.Getters
}
// CreatePolicyHandler Create ws-handler obj
func NewPolicyHandler(resourceHandler resourcehandler.IResourceHandler) *PolicyHandler {
return &PolicyHandler{
resourceHandler: resourceHandler,
}
}
func (policyHandler *PolicyHandler) CollectResources(ctx context.Context, policyIdentifier []cautils.PolicyIdentifier, scanInfo *cautils.ScanInfo, progressListener opaprocessor.IJobProgressNotificationClient) (*cautils.OPASessionObj, error) {
opaSessionObj := cautils.NewOPASessionObj(ctx, nil, nil, scanInfo)
// validate notification
// TODO
policyHandler.getters = &scanInfo.Getters
// get policies
if err := policyHandler.getPolicies(ctx, policyIdentifier, opaSessionObj); err != nil {
return opaSessionObj, err
}
err := policyHandler.getResources(ctx, policyIdentifier, opaSessionObj, progressListener)
if err != nil {
return opaSessionObj, err
}
if (opaSessionObj.K8SResources == nil || len(*opaSessionObj.K8SResources) == 0) && (opaSessionObj.ArmoResource == nil || len(*opaSessionObj.ArmoResource) == 0) {
return opaSessionObj, fmt.Errorf("empty list of resources")
}
// update channel
return opaSessionObj, nil
}
func (policyHandler *PolicyHandler) getResources(ctx context.Context, policyIdentifier []cautils.PolicyIdentifier, opaSessionObj *cautils.OPASessionObj, progressListener opaprocessor.IJobProgressNotificationClient) error {
ctx, span := otel.Tracer("").Start(ctx, "policyHandler.getResources")
// CollectResources uses the provided resource handler to collect resources and returns an updated OPASessionObj
func CollectResources(ctx context.Context, rsrcHandler IResourceHandler, policyIdentifier []cautils.PolicyIdentifier, opaSessionObj *cautils.OPASessionObj, progressListener opaprocessor.IJobProgressNotificationClient) error {
ctx, span := otel.Tracer("").Start(ctx, "resourcehandler.CollectResources")
defer span.End()
opaSessionObj.Report.ClusterAPIServerInfo = policyHandler.resourceHandler.GetClusterAPIServerInfo(ctx)
opaSessionObj.Report.ClusterAPIServerInfo = rsrcHandler.GetClusterAPIServerInfo(ctx)
// set cloud metadata only when scanning a cluster
if opaSessionObj.Metadata.ScanMetadata.ScanningTarget == reportv2.Cluster {
setCloudMetadata(opaSessionObj)
}
resourcesMap, allResources, ksResources, err := policyHandler.resourceHandler.GetResources(ctx, opaSessionObj, &policyIdentifier[0].Designators, progressListener)
resourcesMap, allResources, ksResources, err := rsrcHandler.GetResources(ctx, opaSessionObj, &policyIdentifier[0].Designators, progressListener)
if err != nil {
return err
}
@@ -79,18 +39,13 @@ func (policyHandler *PolicyHandler) getResources(ctx context.Context, policyIden
opaSessionObj.AllResources = allResources
opaSessionObj.ArmoResource = ksResources
if (opaSessionObj.K8SResources == nil || len(*opaSessionObj.K8SResources) == 0) && (opaSessionObj.ArmoResource == nil || len(*opaSessionObj.ArmoResource) == 0) {
return fmt.Errorf("empty list of resources")
}
return nil
}
/* unused for now
func getDesignator(policyIdentifier []cautils.PolicyIdentifier) *armotypes.PortalDesignator {
if len(policyIdentifier) > 0 {
return &policyIdentifier[0].Designators
}
return &armotypes.PortalDesignator{}
}
*/
func setCloudMetadata(opaSessionObj *cautils.OPASessionObj) {
iCloudMetadata := getCloudMetadata(opaSessionObj, k8sinterface.GetConfig())
if iCloudMetadata == nil {

View File

@@ -19,7 +19,7 @@ import (
type AttackTracksGetterMock struct{}
func (mock *AttackTracksGetterMock) GetAttackTracks() ([]v1alpha1.AttackTrack, error) {
mock_1 := v1alpha1.AttackTrackMock(v1alpha1.AttackTrackStep{
mock_1 := v1alpha1.GetAttackTrackMock(v1alpha1.AttackTrackStep{
Name: "A",
SubSteps: []v1alpha1.AttackTrackStep{
{
@@ -39,12 +39,13 @@ func (mock *AttackTracksGetterMock) GetAttackTracks() ([]v1alpha1.AttackTrack, e
},
})
mock_2 := v1alpha1.AttackTrackMock(v1alpha1.AttackTrackStep{
mock_2 := v1alpha1.GetAttackTrackMock(v1alpha1.AttackTrackStep{
Name: "Z",
})
mock_2.Metadata["name"] = "TestAttackTrack_2"
return []v1alpha1.AttackTrack{*mock_1, *mock_2}, nil
m1 := mock_1.(*v1alpha1.AttackTrack)
m2 := mock_2.(*v1alpha1.AttackTrack)
m2.Metadata["name"] = "TestAttackTrack_2"
return []v1alpha1.AttackTrack{*m1, *m2}, nil
}
func ControlMock(id string, baseScore float32, tags, categories []string) reporthandling.Control {

View File

@@ -2,9 +2,7 @@ package printer
import (
"fmt"
"os"
"sort"
"strconv"
"strings"
"github.com/fatih/color"
@@ -77,23 +75,14 @@ func (prettyPrinter *PrettyPrinter) printResourceAttackGraph(attackTrack v1alpha
fmt.Fprintln(prettyPrinter.writer, tree.Print())
}
func getNumericValueFromEnvVar(envVar string, defaultValue int) int {
value := os.Getenv(envVar)
if value != "" {
if value, err := strconv.Atoi(value); err == nil {
return value
}
}
return defaultValue
}
func (prettyPrinter *PrettyPrinter) printAttackTracks(opaSessionObj *cautils.OPASessionObj) {
if !prettyPrinter.printAttackTree || opaSessionObj.ResourceAttackTracks == nil {
return
}
// check if counters are set in env vars and use them, otherwise use default values
topResourceCount := getNumericValueFromEnvVar("ATTACK_TREE_TOP_RESOURCES", TOP_RESOURCE_COUNT)
topVectorCount := getNumericValueFromEnvVar("ATTACK_TREE_TOP_VECTORS", TOP_VECTOR_COUNT)
topResourceCount, _ := cautils.ParseIntEnvVar("ATTACK_TREE_TOP_RESOURCES", TOP_RESOURCE_COUNT)
topVectorCount, _ := cautils.ParseIntEnvVar("ATTACK_TREE_TOP_VECTORS", TOP_VECTOR_COUNT)
prioritizedResources := opaSessionObj.ResourcesPrioritized
resourceToAttackTrack := opaSessionObj.ResourceAttackTracks

11
go.mod
View File

@@ -4,7 +4,7 @@ go 1.20
require (
cloud.google.com/go/containeranalysis v0.9.0
github.com/armosec/armoapi-go v0.0.173
github.com/armosec/armoapi-go v0.0.202
github.com/armosec/utils-go v0.0.14
github.com/armosec/utils-k8s-go v0.0.13
github.com/briandowns/spinner v1.18.1
@@ -19,7 +19,7 @@ require (
github.com/kubescape/go-git-url v0.0.25
github.com/kubescape/go-logger v0.0.13
github.com/kubescape/k8s-interface v0.0.116
github.com/kubescape/opa-utils v0.0.250
github.com/kubescape/opa-utils v0.0.253
github.com/kubescape/rbac-utils v0.0.20
github.com/kubescape/regolibrary v1.0.286-rc.0
github.com/libgit2/git2go/v33 v33.0.9
@@ -36,7 +36,7 @@ require (
github.com/whilp/git-urls v1.0.0
go.opentelemetry.io/otel v1.16.0
go.opentelemetry.io/otel/metric v1.16.0
golang.org/x/mod v0.8.0
golang.org/x/mod v0.11.0
golang.org/x/sync v0.1.0
google.golang.org/api v0.114.0
google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1
@@ -336,7 +336,7 @@ require (
go.uber.org/multierr v1.11.0 // indirect
go.uber.org/zap v1.24.0 // indirect
golang.org/x/crypto v0.5.0 // indirect
golang.org/x/exp v0.0.0-20230519143937-03e91628a987 // indirect
golang.org/x/exp v0.0.0-20230711023510-fffb14384f22 // indirect
golang.org/x/net v0.10.0 // indirect
golang.org/x/oauth2 v0.6.0 // indirect
golang.org/x/sys v0.8.0 // indirect
@@ -366,4 +366,5 @@ require (
)
replace github.com/libgit2/git2go/v33 => ./git2go
replace google.golang.org/grpc => google.golang.org/grpc v1.54.0
replace google.golang.org/grpc => google.golang.org/grpc v1.54.0

17
go.sum
View File

@@ -599,8 +599,8 @@ github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj
github.com/armon/go-radix v1.0.0 h1:F4z6KzEeeQIMeLFa97iZU6vupzoecKdU5TX24SNppXI=
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio=
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs=
github.com/armosec/armoapi-go v0.0.173 h1:TwNxmTxx9ATJPZBlld/53s/WvSVUfoF4gxgHT6UbFng=
github.com/armosec/armoapi-go v0.0.173/go.mod h1:xlW8dGq0vVzbuk+kDZqMQIkfU9P/iiiiDavoCIboqgI=
github.com/armosec/armoapi-go v0.0.202 h1:0PM89laC4rplz4eSELJ5ueVxxDIuEfIlDwlGa/c0pkg=
github.com/armosec/armoapi-go v0.0.202/go.mod h1:Fq2xtueM2ha0VK1b/PcbtbkAzHUgjqMz4MEdabNpdwY=
github.com/armosec/utils-go v0.0.14 h1:Q6HGxOyc5aPObgUM2FQpkYGXjj7/LSrUPkppFJGTexU=
github.com/armosec/utils-go v0.0.14/go.mod h1:F/K1mI/qcj7fNuJl7xktoCeHM83azOF0Zq6eC2WuPyU=
github.com/armosec/utils-k8s-go v0.0.13 h1:MzrRotrtZjpz4Yq1VRGbxDOfd6b5qRqZupzLnpj+W1A=
@@ -985,8 +985,8 @@ github.com/go-redis/redis v6.15.9+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8w
github.com/go-rod/rod v0.111.0 h1:aMNNdz10GYPYec9z1WsFqwAdRYVsuufVTOrah7whG3I=
github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
github.com/go-sql-driver/mysql v1.6.0 h1:BCTh4TKNUYmOmMUcQ3IipzF5prigylS7XXjEkfCHuOE=
github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
github.com/go-sql-driver/mysql v1.7.0 h1:ueSltNNllEqE3qcWBTD0iQd3IpL/6U+mJxLkazJ7YPc=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/go-stack/stack v1.8.1/go.mod h1:dcoOX6HbPZSZptuspn9bctJ+N/CnF5gGygcUP3XYfe4=
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE=
@@ -1384,8 +1384,8 @@ github.com/kubescape/go-logger v0.0.13 h1:Rio+grBhpcdExZIVT+HcBVcgbvwrU/aVSV99iK
github.com/kubescape/go-logger v0.0.13/go.mod h1:Tod++iNn5kofkhjpfwjUrqie2YHkLZNoH0Pq0+KldMo=
github.com/kubescape/k8s-interface v0.0.116 h1:Sn76gsMLAArc5kbHZVoRMS6QlM4mOz9Dolpym9BOul8=
github.com/kubescape/k8s-interface v0.0.116/go.mod h1:ENpA9SkkS6E3PIT+AaMu/JGkuyE04aUamY+a7WLqsJQ=
github.com/kubescape/opa-utils v0.0.250 h1:SpMjtDB3EgyvbwxpCpZXAtQ/TixOeVQAmkcfi+w66KA=
github.com/kubescape/opa-utils v0.0.250/go.mod h1:SkNqbhUGipSYVE+oAUaHko6aggp8XVVbDChoNg48lao=
github.com/kubescape/opa-utils v0.0.253 h1:LOEum6gVscgjFlPT5WUHPsYbjELnMOJTcOrEJUKIRlM=
github.com/kubescape/opa-utils v0.0.253/go.mod h1:SkNqbhUGipSYVE+oAUaHko6aggp8XVVbDChoNg48lao=
github.com/kubescape/rbac-utils v0.0.20 h1:1MMxsCsCZ3ntDi8f9ZYYcY+K7bv50bDW5ZvnGnhMhJw=
github.com/kubescape/rbac-utils v0.0.20/go.mod h1:t57AhSrjuNGQ+mpZWQM/hBzrCOeKBDHegFoVo4tbikQ=
github.com/kubescape/regolibrary v1.0.286-rc.0 h1:OzhtQEx1npAxTbgkbEpMLZvPWg6sh2CmCgQLs0j6pQ4=
@@ -2125,8 +2125,8 @@ golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u0
golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
golang.org/x/exp v0.0.0-20200331195152-e8c3332aa8e5/go.mod h1:4M0jN8W1tt0AVLNr8HDosyJCDCDuyL9N9+3m7wDWgKw=
golang.org/x/exp v0.0.0-20230519143937-03e91628a987 h1:3xJIFvzUFbu4ls0BTBYcgbCGhA63eAOEMxIHugyXJqA=
golang.org/x/exp v0.0.0-20230519143937-03e91628a987/go.mod h1:V1LtkGg67GoY2N1AnLN78QLrzxkLyJw7RJb1gzOOz9w=
golang.org/x/exp v0.0.0-20230711023510-fffb14384f22 h1:FqrVOBQxQ8r/UwwXibI0KMolVhvFiGobSfdE33deHJM=
golang.org/x/exp v0.0.0-20230711023510-fffb14384f22/go.mod h1:FXUEEKJgO7OQYeo8N01OfiKP8RXMtf6e8aTskBGqWdc=
golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs=
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
@@ -2162,8 +2162,9 @@ golang.org/x/mod v0.5.0/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro=
golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/mod v0.8.0 h1:LUYupSeNrTNCGzR/hVBk2NHZO4hXcVaW1k4Qx7rjPx8=
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/mod v0.11.0 h1:bUO06HqtnRcc/7l71XBe4WcqTZ+3AH1J59zWDDwLKgU=
golang.org/x/mod v0.11.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=

View File

@@ -13,7 +13,7 @@ require (
github.com/kubescape/go-logger v0.0.13
github.com/kubescape/k8s-interface v0.0.116
github.com/kubescape/kubescape/v2 v2.0.0-00010101000000-000000000000
github.com/kubescape/opa-utils v0.0.250
github.com/kubescape/opa-utils v0.0.253
github.com/stretchr/testify v1.8.3
go.opentelemetry.io/contrib/instrumentation/github.com/gorilla/mux/otelmux v0.38.0
go.opentelemetry.io/otel v1.16.0
@@ -26,8 +26,8 @@ require (
go.uber.org/multierr v1.11.0 // indirect
go.uber.org/zap v1.24.0 // indirect
golang.org/x/crypto v0.5.0 // indirect
golang.org/x/exp v0.0.0-20230519143937-03e91628a987 // indirect
golang.org/x/mod v0.8.0 // indirect
golang.org/x/exp v0.0.0-20230711023510-fffb14384f22 // indirect
golang.org/x/mod v0.11.0 // indirect
golang.org/x/net v0.10.0 // indirect
golang.org/x/oauth2 v0.6.0 // indirect
google.golang.org/appengine v1.6.7 // indirect
@@ -83,7 +83,7 @@ require (
github.com/alibabacloud-go/tea-utils v1.4.4 // indirect
github.com/alibabacloud-go/tea-xml v1.1.2 // indirect
github.com/aliyun/credentials-go v1.2.3 // indirect
github.com/armosec/armoapi-go v0.0.173 // indirect
github.com/armosec/armoapi-go v0.0.202 // indirect
github.com/armosec/utils-k8s-go v0.0.13 // indirect
github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d // indirect
github.com/aws/aws-sdk-go v1.44.114 // indirect

View File

@@ -599,8 +599,8 @@ github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj
github.com/armon/go-radix v1.0.0 h1:F4z6KzEeeQIMeLFa97iZU6vupzoecKdU5TX24SNppXI=
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio=
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs=
github.com/armosec/armoapi-go v0.0.173 h1:TwNxmTxx9ATJPZBlld/53s/WvSVUfoF4gxgHT6UbFng=
github.com/armosec/armoapi-go v0.0.173/go.mod h1:xlW8dGq0vVzbuk+kDZqMQIkfU9P/iiiiDavoCIboqgI=
github.com/armosec/armoapi-go v0.0.202 h1:0PM89laC4rplz4eSELJ5ueVxxDIuEfIlDwlGa/c0pkg=
github.com/armosec/armoapi-go v0.0.202/go.mod h1:Fq2xtueM2ha0VK1b/PcbtbkAzHUgjqMz4MEdabNpdwY=
github.com/armosec/utils-go v0.0.14 h1:Q6HGxOyc5aPObgUM2FQpkYGXjj7/LSrUPkppFJGTexU=
github.com/armosec/utils-go v0.0.14/go.mod h1:F/K1mI/qcj7fNuJl7xktoCeHM83azOF0Zq6eC2WuPyU=
github.com/armosec/utils-k8s-go v0.0.13 h1:MzrRotrtZjpz4Yq1VRGbxDOfd6b5qRqZupzLnpj+W1A=
@@ -987,8 +987,8 @@ github.com/go-redis/redis v6.15.9+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8w
github.com/go-rod/rod v0.111.0 h1:aMNNdz10GYPYec9z1WsFqwAdRYVsuufVTOrah7whG3I=
github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
github.com/go-sql-driver/mysql v1.6.0 h1:BCTh4TKNUYmOmMUcQ3IipzF5prigylS7XXjEkfCHuOE=
github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
github.com/go-sql-driver/mysql v1.7.0 h1:ueSltNNllEqE3qcWBTD0iQd3IpL/6U+mJxLkazJ7YPc=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/go-stack/stack v1.8.1/go.mod h1:dcoOX6HbPZSZptuspn9bctJ+N/CnF5gGygcUP3XYfe4=
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE=
@@ -1389,8 +1389,8 @@ github.com/kubescape/go-logger v0.0.13 h1:Rio+grBhpcdExZIVT+HcBVcgbvwrU/aVSV99iK
github.com/kubescape/go-logger v0.0.13/go.mod h1:Tod++iNn5kofkhjpfwjUrqie2YHkLZNoH0Pq0+KldMo=
github.com/kubescape/k8s-interface v0.0.116 h1:Sn76gsMLAArc5kbHZVoRMS6QlM4mOz9Dolpym9BOul8=
github.com/kubescape/k8s-interface v0.0.116/go.mod h1:ENpA9SkkS6E3PIT+AaMu/JGkuyE04aUamY+a7WLqsJQ=
github.com/kubescape/opa-utils v0.0.250 h1:SpMjtDB3EgyvbwxpCpZXAtQ/TixOeVQAmkcfi+w66KA=
github.com/kubescape/opa-utils v0.0.250/go.mod h1:SkNqbhUGipSYVE+oAUaHko6aggp8XVVbDChoNg48lao=
github.com/kubescape/opa-utils v0.0.253 h1:LOEum6gVscgjFlPT5WUHPsYbjELnMOJTcOrEJUKIRlM=
github.com/kubescape/opa-utils v0.0.253/go.mod h1:SkNqbhUGipSYVE+oAUaHko6aggp8XVVbDChoNg48lao=
github.com/kubescape/rbac-utils v0.0.20 h1:1MMxsCsCZ3ntDi8f9ZYYcY+K7bv50bDW5ZvnGnhMhJw=
github.com/kubescape/rbac-utils v0.0.20/go.mod h1:t57AhSrjuNGQ+mpZWQM/hBzrCOeKBDHegFoVo4tbikQ=
github.com/kubescape/regolibrary v1.0.286-rc.0 h1:OzhtQEx1npAxTbgkbEpMLZvPWg6sh2CmCgQLs0j6pQ4=
@@ -2132,8 +2132,8 @@ golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u0
golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
golang.org/x/exp v0.0.0-20200331195152-e8c3332aa8e5/go.mod h1:4M0jN8W1tt0AVLNr8HDosyJCDCDuyL9N9+3m7wDWgKw=
golang.org/x/exp v0.0.0-20230519143937-03e91628a987 h1:3xJIFvzUFbu4ls0BTBYcgbCGhA63eAOEMxIHugyXJqA=
golang.org/x/exp v0.0.0-20230519143937-03e91628a987/go.mod h1:V1LtkGg67GoY2N1AnLN78QLrzxkLyJw7RJb1gzOOz9w=
golang.org/x/exp v0.0.0-20230711023510-fffb14384f22 h1:FqrVOBQxQ8r/UwwXibI0KMolVhvFiGobSfdE33deHJM=
golang.org/x/exp v0.0.0-20230711023510-fffb14384f22/go.mod h1:FXUEEKJgO7OQYeo8N01OfiKP8RXMtf6e8aTskBGqWdc=
golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs=
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
@@ -2169,8 +2169,9 @@ golang.org/x/mod v0.5.0/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro=
golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/mod v0.8.0 h1:LUYupSeNrTNCGzR/hVBk2NHZO4hXcVaW1k4Qx7rjPx8=
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/mod v0.11.0 h1:bUO06HqtnRcc/7l71XBe4WcqTZ+3AH1J59zWDDwLKgU=
golang.org/x/mod v0.11.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=

View File

@@ -60,18 +60,12 @@ func ToScanInfo(scanRequest *utilsmetav1.PostScanRequest) *cautils.ScanInfo {
}
func setTargetInScanInfo(scanRequest *utilsmetav1.PostScanRequest, scanInfo *cautils.ScanInfo) {
// remove empty targets from slice
scanRequest.TargetNames = slices.Filter(nil, scanRequest.TargetNames, func(e string) bool { return e != "" })
if scanRequest.TargetType != "" && len(scanRequest.TargetNames) > 0 {
if strings.EqualFold(string(scanRequest.TargetType), string(apisv1.KindFramework)) {
scanRequest.TargetType = apisv1.KindFramework
scanInfo.FrameworkScan = true
scanInfo.ScanAll = false
if cautils.StringInSlice(scanRequest.TargetNames, "all") != cautils.ValueNotFound { // if scan all frameworks
scanRequest.TargetNames = []string{}
scanInfo.ScanAll = true
}
scanInfo.ScanAll = slices.Contains(scanRequest.TargetNames, "all") || slices.Contains(scanRequest.TargetNames, "")
scanRequest.TargetNames = slices.Filter(nil, scanRequest.TargetNames, func(e string) bool { return e != "" && e != "all" })
} else if strings.EqualFold(string(scanRequest.TargetType), string(apisv1.KindControl)) {
scanRequest.TargetType = apisv1.KindControl
scanInfo.ScanAll = false

View File

@@ -72,6 +72,17 @@ func TestSetTargetInScanInfo(t *testing.T) {
assert.True(t, scanInfo.ScanAll)
assert.Equal(t, 0, len(scanInfo.PolicyIdentifier))
}
{
req := &utilsmetav1.PostScanRequest{
TargetType: apisv1.KindFramework,
TargetNames: []string{"", "security"},
}
scanInfo := &cautils.ScanInfo{}
setTargetInScanInfo(req, scanInfo)
assert.True(t, scanInfo.FrameworkScan)
assert.True(t, scanInfo.ScanAll)
assert.Equal(t, 1, len(scanInfo.PolicyIdentifier))
}
{
req := &utilsmetav1.PostScanRequest{
TargetType: apisv1.KindFramework,

View File

@@ -1,12 +0,0 @@
<html>
<head>
<title>
Kubescape Website
</title>
</head>
<body>
<h1>
Coming soon!
</h1>
</body>
</html>