Compare commits

..

36 Commits

Author SHA1 Message Date
dwertent
4e13609985 update get context 2022-03-21 09:08:01 +02:00
David Wertenteil
2e5e4328f6 Merge pull request #460 from dwertent/master
update readme
2022-03-20 11:30:37 +02:00
dwertent
d98a11a8fa udpate badges 2022-03-20 11:28:56 +02:00
dwertent
bdb25cbb66 adding vs code to readme 2022-03-20 11:08:48 +02:00
David Wertenteil
369804cb6e Merge pull request #459 from shm12/dev
send scan metadata
2022-03-20 10:45:19 +02:00
shm12
1b08a92095 send scan metadata 2022-03-20 10:05:40 +02:00
David Wertenteil
e787454d53 Merge pull request #458 from dwertent/master
fixed json output
2022-03-16 16:58:54 +02:00
dwertent
31d1ba663a json output 2022-03-16 16:58:05 +02:00
David Wertenteil
c3731d8ff6 Merge pull request #457 from dwertent/master
support frameworks from http request
2022-03-16 16:37:40 +02:00
dwertent
c5b46beb1a support frameworks from http request 2022-03-16 16:33:03 +02:00
dwertent
c5ca576c98 Merge remote-tracking branch 'armosec/dev' 2022-03-16 15:27:10 +02:00
dwertent
eae6458b42 fixed cmd init 2022-03-16 15:26:59 +02:00
David Wertenteil
aa1aa913b6 Merge pull request #456 from Daniel-GrunbergerCA/dev
Support status information
2022-03-16 15:17:49 +02:00
DanielGrunbergerCA
44084592cb fixes 2022-03-16 14:16:48 +02:00
DanielGrunbergerCA
6cacfb7b16 refactor 2022-03-16 12:22:21 +02:00
DanielGrunbergerCA
306d3a7081 fix table display 2022-03-16 12:12:39 +02:00
DanielGrunbergerCA
442530061f rm space 2022-03-16 12:09:53 +02:00
DanielGrunbergerCA
961a6f6ebc Merge remote-tracking branch 'upstream/dev' into dev 2022-03-16 12:08:38 +02:00
DanielGrunbergerCA
0d0c8e1b97 support status info 2022-03-16 12:08:04 +02:00
David Wertenteil
5b843ba2c4 Merge pull request #455 from dwertent/master
update prometheus format
2022-03-16 09:32:23 +02:00
dwertent
8f9b46cdbe update prometheus format 2022-03-16 09:25:45 +02:00
David Wertenteil
e16885a044 Merge pull request #452 from dwertent/master
update dockerfile and scan triggering
2022-03-15 22:12:31 +02:00
dwertent
06a2fa05be add ks user to dockerfile 2022-03-15 22:10:27 +02:00
dwertent
d26f90b98e update Prometheus yaml 2022-03-15 18:40:42 +02:00
David Wertenteil
b47c128eb3 Merge pull request #451 from dwertent/master
update prometheus format
2022-03-15 17:12:07 +02:00
dwertent
9d957b3c77 update output 2022-03-15 17:04:59 +02:00
dwertent
8ec5615569 junit format 2022-03-15 16:50:39 +02:00
David Wertenteil
fae73b827a Merge pull request #450 from dwertent/master
build each pkg
2022-03-14 19:27:47 +02:00
dwertent
6477437872 update readme 2022-03-14 19:14:31 +02:00
dwertent
6099f46dea adding docker build 2022-03-14 18:34:34 +02:00
Rotem Refael
5009e6ef47 change gif 2022-03-14 11:33:56 +02:00
Rotem Refael
c4450d3259 add web & CLI Interfaces 2022-03-14 11:27:57 +02:00
Rotem Refael
0c3339f1c9 update gif 2022-03-14 10:28:32 +02:00
Rotem Refael
faee3d5ad6 Add new video to readme 2022-03-14 10:14:55 +02:00
David Wertenteil
a279963b28 Merge pull request #449 from dwertent/master
split to packages
2022-03-13 19:37:02 +02:00
David Wertenteil
353a39d66a Merge pull request #448 from dwertent/master
microservice support
2022-03-10 17:17:35 +02:00
63 changed files with 1465 additions and 284 deletions

View File

@@ -51,7 +51,7 @@ jobs:
ArmoERServer: report.armo.cloud
ArmoWebsite: portal.armo.cloud
CGO_ENABLED: 0
run: python3 --version && python3 build.py
run: cd cmd && python3 --version && python3 build.py
- name: Smoke Testing
env:

View File

@@ -35,7 +35,7 @@ jobs:
ArmoERServer: report.armo.cloud
ArmoWebsite: portal.armo.cloud
CGO_ENABLED: 0
run: python3 --version && python3 build.py
run: cd cmd && python3 --version && python3 build.py
- name: Smoke Testing
env:

View File

@@ -19,8 +19,14 @@ jobs:
with:
go-version: 1.17
- name: Test
run: go test -v ./...
- name: Test cmd pkg
run: cd cmd && go test -v ./...
- name: Test core pkg
run: cd core && go test -v ./...
- name: Test httphandler pkg
run: cd httphandler && go test -v ./...
- name: Build
env:
@@ -30,7 +36,7 @@ jobs:
ArmoERServer: report.armo.cloud
ArmoWebsite: portal.armo.cloud
CGO_ENABLED: 0
run: python3 --version && python3 build.py
run: cd cmd && python3 --version && python3 build.py
- name: Smoke Testing
env:

View File

@@ -1,8 +1,7 @@
<img src="docs/kubescape.png" width="300" alt="logo" align="center">
<br>
[![build](https://github.com/armosec/kubescape/actions/workflows/build.yaml/badge.svg)](https://github.com/armosec/kubescape/actions/workflows/build.yaml)
[![Go Report Card](https://goreportcard.com/badge/github.com/armosec/kubescape)](https://goreportcard.com/report/github.com/armosec/kubescape)
![build](https://github.com/armosec/kubescape/actions/workflows/build.yaml/badge.svg) ![GitHub tag (latest by date)](https://img.shields.io/github/v/tag/armosec/kubescape?label=release&color=blue) ![GitHub go.mod Go version](https://img.shields.io/github/go-mod/go-version/armosec/kubescape?color=yellow)
Kubescape is a K8s open-source tool providing a multi-cloud K8s single pane of glass, including risk analysis, security compliance, RBAC visualizer and image vulnerabilities scanning.
@@ -12,8 +11,12 @@ Kubescape integrates natively with other DevOps tools, including Jenkins, Circle
</br>
# CLI Interface:
<img src="docs/demo.gif">
<!-- # [Web Interface:](https://portal.armo.cloud/)
<img src="docs/ARMO-header-2022.gif"> -->
# TL;DR
## Install:
```
@@ -225,7 +228,6 @@ kubescape download framework nsa --output /path/nsa.json
kubescape scan framework nsa --use-from /path/nsa.json
```
## Scan Periodically using Helm - Contributed by [@yonahd](https://github.com/yonahd)
[Please follow the instructions here](https://hub.armo.cloud/docs/installation-of-armo-in-cluster)
[helm chart repo](https://github.com/armosec/armo-helm)
@@ -255,6 +257,14 @@ Now you can submit the results to the Kubescape SaaS version -
kubescape submit results path/to/results.json
```
# Integrations
## VS Code Extension
![Visual Studio Marketplace Downloads](https://img.shields.io/visual-studio-marketplace/d/kubescape.kubescape?label=VScode) ![Open VSX](https://img.shields.io/open-vsx/dt/kubescape/kubescape?label=openVSX&color=yellowgreen)
Scan the YAML files while writing them using the [vs code extension](https://github.com/armosec/vscode-kubescape/blob/master/README.md)
# Under the hood
## Technology

View File

@@ -18,14 +18,27 @@ RUN pip3 install --no-cache --upgrade pip setuptools
WORKDIR /work
ADD . .
# build kubescape server
WORKDIR /work/httphandler
RUN python build.py
RUN ls -ltr build/ubuntu-latest
# build kubescape cmd
WORKDIR /work/cmd
RUN python build.py
RUN /work/build/ubuntu-latest/kubescape download artifacts -o /work/artifacts
FROM alpine
RUN addgroup -S ks && adduser -S ks -G ks
USER ks
WORKDIR /home/ks/
COPY --from=builder /work/httphandler/build/ubuntu-latest/kubescape /usr/bin/ksserver
COPY --from=builder /work/build/ubuntu-latest/kubescape /usr/bin/kubescape
# # Download the frameworks. Use the "--use-default" flag when running kubescape
# RUN kubescape download framework nsa && kubescape download framework mitre
RUN mkdir /home/ks/.kubescape && chmod 777 -R /home/ks/.kubescape
COPY --from=builder /work/artifacts/ /home/ks/.kubescape
ENTRYPOINT ["kubescape"]
ENTRYPOINT ["ksserver"]

View File

@@ -18,7 +18,7 @@ def checkStatus(status, msg):
def getBuildDir():
currentPlatform = platform.system()
buildDir = "build/"
buildDir = "../build/"
if currentPlatform == "Windows": buildDir += "windows-latest"
elif currentPlatform == "Linux": buildDir += "ubuntu-latest"
@@ -70,7 +70,7 @@ def main():
ldflags += " -X {}={}".format(WEBSITE_CONST, ArmoWebsite)
if ArmoAuthServer:
ldflags += " -X {}={}".format(AUTH_SERVER_CONST, ArmoAuthServer)
build_command = ["go", "build", "-o", ks_file, "-ldflags" ,ldflags]
print("Building kubescape and saving here: {}".format(ks_file))

View File

@@ -7,7 +7,7 @@ replace github.com/armosec/kubescape/core => ../core
require (
github.com/armosec/k8s-interface v0.0.68
github.com/armosec/kubescape/core v0.0.0-00010101000000-000000000000
github.com/armosec/opa-utils v0.0.116
github.com/armosec/opa-utils v0.0.120
github.com/armosec/rbac-utils v0.0.14
github.com/google/uuid v1.3.0
github.com/mattn/go-isatty v0.0.14

View File

@@ -109,8 +109,8 @@ github.com/armosec/k8s-interface v0.0.66/go.mod h1:vwprS8qn/iowd5yf0JHpqDsLA5I8W
github.com/armosec/k8s-interface v0.0.68 h1:6CtSakISiI47YHkxh+Va9FzZQIBkWa6g9sbiNxq1Zkk=
github.com/armosec/k8s-interface v0.0.68/go.mod h1:PeWn41C2uenZi+xfZdyFF/zG5wXACA00htQyknDUWDE=
github.com/armosec/opa-utils v0.0.64/go.mod h1:6tQP8UDq2EvEfSqh8vrUdr/9QVSCG4sJfju1SXQOn4c=
github.com/armosec/opa-utils v0.0.116 h1:3oWuhcpI+MJD/CktEStU1BA0feGNwsCbQrI3ifVfzMs=
github.com/armosec/opa-utils v0.0.116/go.mod h1:gap+EaLG5rnyqvIRGxtdNDC9y7VvoGNm90zK8Ls7avQ=
github.com/armosec/opa-utils v0.0.120 h1:WAtgm2U1o9fgA/2pjYNy+igqNC6ju3/CxQ8qRHdO+5k=
github.com/armosec/opa-utils v0.0.120/go.mod h1:gap+EaLG5rnyqvIRGxtdNDC9y7VvoGNm90zK8Ls7avQ=
github.com/armosec/rbac-utils v0.0.1/go.mod h1:pQ8CBiij8kSKV7aeZm9FMvtZN28VgA7LZcYyTWimq40=
github.com/armosec/rbac-utils v0.0.14 h1:CKYKcgqJEXWF2Hen/B1pVGtS3nDAG1wp9dDv6oNtq90=
github.com/armosec/rbac-utils v0.0.14/go.mod h1:Ex/IdGWhGv9HZq6Hs8N/ApzCKSIvpNe/ETqDfnuyah0=

View File

@@ -22,6 +22,8 @@ import (
"github.com/spf13/cobra"
)
var rootInfo cautils.RootInfo
var ksExamples = `
# Scan command
kubescape scan --submit
@@ -43,7 +45,6 @@ func NewDefaultKubescapeCommand() *cobra.Command {
}
func getRootCmd(ks meta.IKubescape) *cobra.Command {
var rootInfo cautils.RootInfo
rootCmd := &cobra.Command{
Use: "kubescape",
@@ -53,8 +54,8 @@ func getRootCmd(ks meta.IKubescape) *cobra.Command {
Example: ksExamples,
}
rootCmd.PersistentFlags().StringVar(&armoBEURLsDep, "environment", "", envFlagUsage)
rootCmd.PersistentFlags().StringVar(&armoBEURLs, "env", "", envFlagUsage)
rootCmd.PersistentFlags().StringVar(&rootInfo.ArmoBEURLsDep, "environment", "", envFlagUsage)
rootCmd.PersistentFlags().StringVar(&rootInfo.ArmoBEURLs, "env", "", envFlagUsage)
rootCmd.PersistentFlags().MarkDeprecated("environment", "use 'env' instead")
rootCmd.PersistentFlags().MarkHidden("environment")
rootCmd.PersistentFlags().MarkHidden("env")
@@ -66,11 +67,7 @@ func getRootCmd(ks meta.IKubescape) *cobra.Command {
rootCmd.PersistentFlags().StringVar(&rootInfo.CacheDir, "cache-dir", getter.DefaultLocalStore, "Cache directory [$KS_CACHE_DIR]")
rootCmd.PersistentFlags().BoolVarP(&rootInfo.DisableColor, "disable-color", "", false, "Disable Color output for logging")
// Initialize
initLogger(&rootInfo)
initLoggerLevel(&rootInfo)
initEnvironment(&rootInfo)
initCacheDir(&rootInfo)
cobra.OnInitialize(initLogger, initLoggerLevel, initEnvironment, initCacheDir)
// Supported commands
rootCmd.AddCommand(scan.GetScanCommand(ks))

View File

@@ -5,7 +5,6 @@ import (
"os"
"strings"
"github.com/armosec/kubescape/core/cautils"
"github.com/armosec/kubescape/core/cautils/getter"
"github.com/armosec/kubescape/core/cautils/logger"
"github.com/armosec/kubescape/core/cautils/logger/helpers"
@@ -13,12 +12,9 @@ import (
"github.com/mattn/go-isatty"
)
var armoBEURLs = ""
var armoBEURLsDep = ""
const envFlagUsage = "Send report results to specific URL. Format:<ReportReceiver>,<Backend>,<Frontend>.\n\t\tExample:report.armo.cloud,api.armo.cloud,portal.armo.cloud"
func initLogger(rootInfo *cautils.RootInfo) {
func initLogger() {
logger.DisableColor(rootInfo.DisableColor)
if rootInfo.LoggerName == "" {
@@ -36,8 +32,8 @@ func initLogger(rootInfo *cautils.RootInfo) {
logger.InitLogger(rootInfo.LoggerName)
}
func initLoggerLevel(rootInfo *cautils.RootInfo) {
if rootInfo.Logger != helpers.InfoLevel.String() {
func initLoggerLevel() {
if rootInfo.Logger == helpers.InfoLevel.String() {
} else if l := os.Getenv("KS_LOGGER"); l != "" {
rootInfo.Logger = l
}
@@ -47,7 +43,7 @@ func initLoggerLevel(rootInfo *cautils.RootInfo) {
}
}
func initCacheDir(rootInfo *cautils.RootInfo) {
func initCacheDir() {
if rootInfo.CacheDir == getter.DefaultLocalStore {
getter.DefaultLocalStore = rootInfo.CacheDir
} else if cacheDir := os.Getenv("KS_CACHE_DIR"); cacheDir != "" {
@@ -58,11 +54,11 @@ func initCacheDir(rootInfo *cautils.RootInfo) {
logger.L().Debug("cache dir updated", helpers.String("path", getter.DefaultLocalStore))
}
func initEnvironment(rootInfo *cautils.RootInfo) {
if armoBEURLsDep != "" {
armoBEURLs = armoBEURLsDep
func initEnvironment() {
if rootInfo.ArmoBEURLs == "" {
rootInfo.ArmoBEURLs = rootInfo.ArmoBEURLsDep
}
urlSlices := strings.Split(armoBEURLs, ",")
urlSlices := strings.Split(rootInfo.ArmoBEURLs, ",")
if len(urlSlices) != 1 && len(urlSlices) < 3 {
logger.L().Fatal("expected at least 3 URLs (report, api, frontend, auth)")
}
@@ -85,7 +81,7 @@ func initEnvironment(rootInfo *cautils.RootInfo) {
armoERURL := urlSlices[0] // mandatory
armoBEURL := urlSlices[1] // mandatory
armoFEURL := urlSlices[2] // mandatory
if len(urlSlices) <= 4 {
if len(urlSlices) >= 4 {
armoAUTHURL = urlSlices[3]
}
getter.SetARMOAPIConnector(getter.NewARMOAPICustomized(armoERURL, armoBEURL, armoFEURL, armoAUTHURL))

View File

@@ -4,35 +4,45 @@ import (
"github.com/armosec/armoapi-go/armotypes"
"github.com/armosec/k8s-interface/workloadinterface"
"github.com/armosec/opa-utils/reporthandling"
apis "github.com/armosec/opa-utils/reporthandling/apis"
"github.com/armosec/opa-utils/reporthandling/results/v1/resourcesresults"
reporthandlingv2 "github.com/armosec/opa-utils/reporthandling/v2"
v2 "github.com/armosec/opa-utils/reporthandling/v2"
)
// K8SResources map[<api group>/<api version>/<resource>][]<resourceID>
type K8SResources map[string][]string
type ArmoResources map[string][]string
type OPASessionObj struct {
K8SResources *K8SResources // input k8s objects
Policies []reporthandling.Framework // list of frameworks to scan
AllResources map[string]workloadinterface.IMetadata // all scanned resources, map[<rtesource ID>]<resource>
ResourcesResult map[string]resourcesresults.Result // resources scan results, map[<rtesource ID>]<resource result>
PostureReport *reporthandling.PostureReport // scan results v1 - Remove
Report *reporthandlingv2.PostureReport // scan results v2 - Remove
Exceptions []armotypes.PostureExceptionPolicy // list of exceptions to apply on scan results
RegoInputData RegoInputData // input passed to rgo for scanning. map[<control name>][<input arguments>]
K8SResources *K8SResources // input k8s objects
ArmoResource *ArmoResources // input ARMO objects
Policies []reporthandling.Framework // list of frameworks to scan
AllResources map[string]workloadinterface.IMetadata // all scanned resources, map[<rtesource ID>]<resource>
ResourcesResult map[string]resourcesresults.Result // resources scan results, map[<rtesource ID>]<resource result>
PostureReport *reporthandling.PostureReport // scan results v1 - Remove
Report *reporthandlingv2.PostureReport // scan results v2 - Remove
Exceptions []armotypes.PostureExceptionPolicy // list of exceptions to apply on scan results
RegoInputData RegoInputData // input passed to rgo for scanning. map[<control name>][<input arguments>]
Metadata *reporthandlingv2.Metadata
InfoMap map[string]apis.StatusInfo // Map errors of resources to StatusInfo
ResourceToControlsMap map[string][]string // map[<apigroup/apiversion/resource>] = [<control_IDs>]
}
func NewOPASessionObj(frameworks []reporthandling.Framework, k8sResources *K8SResources) *OPASessionObj {
return &OPASessionObj{
Report: &reporthandlingv2.PostureReport{},
Policies: frameworks,
K8SResources: k8sResources,
AllResources: make(map[string]workloadinterface.IMetadata),
ResourcesResult: make(map[string]resourcesresults.Result),
Report: &reporthandlingv2.PostureReport{},
Policies: frameworks,
K8SResources: k8sResources,
AllResources: make(map[string]workloadinterface.IMetadata),
ResourcesResult: make(map[string]resourcesresults.Result),
InfoMap: make(map[string]apis.StatusInfo),
ResourceToControlsMap: make(map[string][]string),
PostureReport: &reporthandling.PostureReport{
ClusterName: ClusterName,
CustomerGUID: CustomerGUID,
},
Metadata: &v2.Metadata{},
}
}

View File

@@ -11,8 +11,7 @@ func ReportV2ToV1(opaSessionObj *OPASessionObj) {
if len(opaSessionObj.PostureReport.FrameworkReports) > 0 {
return // report already converted
}
opaSessionObj.PostureReport.ClusterCloudProvider = opaSessionObj.Report.ClusterCloudProvider
// opaSessionObj.PostureReport.ClusterCloudProvider = opaSessionObj.Report.ClusterCloudProvider
frameworks := []reporthandling.FrameworkReport{}

89
core/cautils/rootinfo.go Normal file
View File

@@ -0,0 +1,89 @@
package cautils
type RootInfo struct {
Logger string // logger level
LoggerName string // logger name ("pretty"/"zap"/"none")
CacheDir string // cached dir
DisableColor bool // Disable Color
ArmoBEURLs string // armo url
ArmoBEURLsDep string // armo url
}
// func (rootInfo *RootInfo) InitLogger() {
// logger.DisableColor(rootInfo.DisableColor)
// if rootInfo.LoggerName == "" {
// if l := os.Getenv("KS_LOGGER_NAME"); l != "" {
// rootInfo.LoggerName = l
// } else {
// if isatty.IsTerminal(os.Stdout.Fd()) {
// rootInfo.LoggerName = "pretty"
// } else {
// rootInfo.LoggerName = "zap"
// }
// }
// }
// logger.InitLogger(rootInfo.LoggerName)
// }
// func (rootInfo *RootInfo) InitLoggerLevel() error {
// if rootInfo.Logger == helpers.InfoLevel.String() {
// } else if l := os.Getenv("KS_LOGGER"); l != "" {
// rootInfo.Logger = l
// }
// if err := logger.L().SetLevel(rootInfo.Logger); err != nil {
// return fmt.Errorf("supported levels: %s", strings.Join(helpers.SupportedLevels(), "/"))
// }
// return nil
// }
// func (rootInfo *RootInfo) InitCacheDir() error {
// if rootInfo.CacheDir == getter.DefaultLocalStore {
// getter.DefaultLocalStore = rootInfo.CacheDir
// } else if cacheDir := os.Getenv("KS_CACHE_DIR"); cacheDir != "" {
// getter.DefaultLocalStore = cacheDir
// } else {
// return nil // using default cache dir location
// }
// // TODO create dir if not found exist
// // logger.L().Debug("cache dir updated", helpers.String("path", getter.DefaultLocalStore))
// return nil
// }
// func (rootInfo *RootInfo) InitEnvironment() error {
// urlSlices := strings.Split(rootInfo.ArmoBEURLs, ",")
// if len(urlSlices) != 1 && len(urlSlices) < 3 {
// return fmt.Errorf("expected at least 2 URLs (report,api,frontend,auth)")
// }
// switch len(urlSlices) {
// case 1:
// switch urlSlices[0] {
// case "dev", "development":
// getter.SetARMOAPIConnector(getter.NewARMOAPIDev())
// case "stage", "staging":
// getter.SetARMOAPIConnector(getter.NewARMOAPIStaging())
// case "":
// getter.SetARMOAPIConnector(getter.NewARMOAPIProd())
// default:
// return fmt.Errorf("unknown environment")
// }
// case 2:
// armoERURL := urlSlices[0] // mandatory
// armoBEURL := urlSlices[1] // mandatory
// getter.SetARMOAPIConnector(getter.NewARMOAPICustomized(armoERURL, armoBEURL, "", ""))
// case 3, 4:
// var armoAUTHURL string
// armoERURL := urlSlices[0] // mandatory
// armoBEURL := urlSlices[1] // mandatory
// armoFEURL := urlSlices[2] // mandatory
// if len(urlSlices) <= 4 {
// armoAUTHURL = urlSlices[3]
// }
// getter.SetARMOAPIConnector(getter.NewARMOAPICustomized(armoERURL, armoBEURL, armoFEURL, armoAUTHURL))
// }
// return nil
// }

View File

@@ -38,6 +38,12 @@ func (bpf *BoolPtrFlag) String() string {
func (bpf *BoolPtrFlag) Get() *bool {
return bpf.valPtr
}
func (bpf *BoolPtrFlag) GetBool() bool {
if bpf.valPtr == nil {
return false
}
return *bpf.valPtr
}
func (bpf *BoolPtrFlag) SetBool(val bool) {
bpf.valPtr = &val
@@ -53,13 +59,6 @@ func (bpf *BoolPtrFlag) Set(val string) error {
return nil
}
type RootInfo struct {
Logger string // logger level
LoggerName string // logger name ("pretty"/"zap"/"none")
CacheDir string // cached dir
DisableColor bool // Disable Color
}
// TODO - UPDATE
type ScanInfo struct {
Getters // TODO - remove from object
@@ -133,15 +132,6 @@ func (scanInfo *ScanInfo) setUseArtifactsFrom() {
scanInfo.UseExceptions = filepath.Join(scanInfo.UseArtifactsFrom, localExceptionsFilename)
}
func (scanInfo *ScanInfo) setUseExceptions() {
if scanInfo.UseExceptions != "" {
// load exceptions from file
scanInfo.ExceptionsGetter = getter.NewLoadPolicy([]string{scanInfo.UseExceptions})
} else {
scanInfo.ExceptionsGetter = getter.GetArmoAPIConnector()
}
}
func (scanInfo *ScanInfo) setUseFrom() {
if scanInfo.UseDefault {
for _, policy := range scanInfo.PolicyIdentifier {

View File

@@ -13,7 +13,8 @@ import (
"golang.org/x/mod/semver"
)
const SKIP_VERSION_CHECK = "KUBESCAPE_SKIP_UPDATE_CHECK"
const SKIP_VERSION_CHECK_DEPRECATED = "KUBESCAPE_SKIP_UPDATE_CHECK"
const SKIP_VERSION_CHECK = "KS_SKIP_UPDATE_CHECK"
var BuildNumber string
@@ -29,6 +30,8 @@ func NewIVersionCheckHandler() IVersionCheckHandler {
}
if v, ok := os.LookupEnv(SKIP_VERSION_CHECK); ok && pkgutils.StringToBool(v) {
return NewVersionCheckHandlerMock()
} else if v, ok := os.LookupEnv(SKIP_VERSION_CHECK_DEPRECATED); ok && pkgutils.StringToBool(v) {
return NewVersionCheckHandlerMock()
}
return NewVersionCheckHandler()
}

View File

@@ -0,0 +1,52 @@
package cautils
import (
"strings"
"github.com/armosec/opa-utils/reporthandling/apis"
)
var (
ImageVulnResources = []string{"ImageVulnerabilities"}
HostSensorResources = []string{"KubeletConfiguration",
"KubeletCommandLine",
"OsReleaseFile",
"KernelVersion",
"LinuxSecurityHardeningStatus",
"OpenPortsList",
"LinuxKernelVariables"}
CloudResources = []string{"ClusterDescribe"}
)
func MapArmoResource(armoResourceMap *ArmoResources, resources []string) []string {
var hostResources []string
for k := range *armoResourceMap {
for _, resource := range resources {
if strings.Contains(k, resource) {
hostResources = append(hostResources, k)
}
}
}
return hostResources
}
func MapHostResources(armoResourceMap *ArmoResources) []string {
return MapArmoResource(armoResourceMap, HostSensorResources)
}
func MapImageVulnResources(armoResourceMap *ArmoResources) []string {
return MapArmoResource(armoResourceMap, ImageVulnResources)
}
func MapCloudResources(armoResourceMap *ArmoResources) []string {
return MapArmoResource(armoResourceMap, CloudResources)
}
func SetInfoMapForResources(info string, resources []string, errorMap map[string]apis.StatusInfo) {
for _, resource := range resources {
errorMap[resource] = apis.StatusInfo{
InnerInfo: info,
InnerStatus: apis.StatusSkipped,
}
}
}

View File

@@ -2,6 +2,7 @@ package core
import (
"fmt"
"os"
"path/filepath"
"strings"
@@ -30,6 +31,9 @@ func DownloadSupportCommands() []string {
func (ks *Kubescape) Download(downloadInfo *metav1.DownloadInfo) error {
setPathandFilename(downloadInfo)
if err := os.MkdirAll(downloadInfo.Path, os.ModePerm); err != nil {
return err
}
if err := downloadArtifact(downloadInfo, downloadFunc); err != nil {
return err
}
@@ -86,6 +90,9 @@ func downloadConfigInputs(downloadInfo *metav1.DownloadInfo) error {
if downloadInfo.FileName == "" {
downloadInfo.FileName = fmt.Sprintf("%s.json", downloadInfo.Target)
}
if controlInputs == nil {
return fmt.Errorf("failed to download controlInputs - received an empty objects")
}
// save in file
err = getter.SaveInFile(controlInputs, filepath.Join(downloadInfo.Path, downloadInfo.FileName))
if err != nil {
@@ -148,6 +155,9 @@ func downloadFramework(downloadInfo *metav1.DownloadInfo) error {
if err != nil {
return err
}
if framework == nil {
return fmt.Errorf("failed to download framework - received an empty objects")
}
downloadTo := filepath.Join(downloadInfo.Path, downloadInfo.FileName)
err = getter.SaveInFile(framework, downloadTo)
if err != nil {
@@ -175,6 +185,9 @@ func downloadControl(downloadInfo *metav1.DownloadInfo) error {
if err != nil {
return err
}
if controls == nil {
return fmt.Errorf("failed to download control - received an empty objects")
}
downloadTo := filepath.Join(downloadInfo.Path, downloadInfo.FileName)
err = getter.SaveInFile(controls, downloadTo)
if err != nil {

View File

@@ -5,7 +5,7 @@ go 1.17
require (
github.com/armosec/armoapi-go v0.0.58
github.com/armosec/k8s-interface v0.0.68
github.com/armosec/opa-utils v0.0.116
github.com/armosec/opa-utils v0.0.120
github.com/armosec/rbac-utils v0.0.14
github.com/armosec/utils-go v0.0.3
github.com/armosec/utils-k8s-go v0.0.3
@@ -25,6 +25,7 @@ require (
k8s.io/api v0.23.4
k8s.io/apimachinery v0.23.4
k8s.io/client-go v0.23.4
k8s.io/utils v0.0.0-20211116205334-6203023598ed
sigs.k8s.io/yaml v1.3.0
)
@@ -116,7 +117,6 @@ require (
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect
k8s.io/klog/v2 v2.30.0 // indirect
k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65 // indirect
k8s.io/utils v0.0.0-20211116205334-6203023598ed // indirect
sigs.k8s.io/controller-runtime v0.11.1 // indirect
sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6 // indirect
sigs.k8s.io/structured-merge-diff/v4 v4.2.1 // indirect

View File

@@ -109,8 +109,8 @@ github.com/armosec/k8s-interface v0.0.66/go.mod h1:vwprS8qn/iowd5yf0JHpqDsLA5I8W
github.com/armosec/k8s-interface v0.0.68 h1:6CtSakISiI47YHkxh+Va9FzZQIBkWa6g9sbiNxq1Zkk=
github.com/armosec/k8s-interface v0.0.68/go.mod h1:PeWn41C2uenZi+xfZdyFF/zG5wXACA00htQyknDUWDE=
github.com/armosec/opa-utils v0.0.64/go.mod h1:6tQP8UDq2EvEfSqh8vrUdr/9QVSCG4sJfju1SXQOn4c=
github.com/armosec/opa-utils v0.0.116 h1:3oWuhcpI+MJD/CktEStU1BA0feGNwsCbQrI3ifVfzMs=
github.com/armosec/opa-utils v0.0.116/go.mod h1:gap+EaLG5rnyqvIRGxtdNDC9y7VvoGNm90zK8Ls7avQ=
github.com/armosec/opa-utils v0.0.120 h1:WAtgm2U1o9fgA/2pjYNy+igqNC6ju3/CxQ8qRHdO+5k=
github.com/armosec/opa-utils v0.0.120/go.mod h1:gap+EaLG5rnyqvIRGxtdNDC9y7VvoGNm90zK8Ls7avQ=
github.com/armosec/rbac-utils v0.0.1/go.mod h1:pQ8CBiij8kSKV7aeZm9FMvtZN28VgA7LZcYyTWimq40=
github.com/armosec/rbac-utils v0.0.14 h1:CKYKcgqJEXWF2Hen/B1pVGtS3nDAG1wp9dDv6oNtq90=
github.com/armosec/rbac-utils v0.0.14/go.mod h1:Ex/IdGWhGv9HZq6Hs8N/ApzCKSIvpNe/ETqDfnuyah0=

View File

@@ -9,6 +9,7 @@ import (
"github.com/armosec/kubescape/core/cautils/logger"
"github.com/armosec/kubescape/core/cautils/logger/helpers"
"github.com/armosec/opa-utils/objectsenvelopes/hostsensor"
"github.com/armosec/opa-utils/reporthandling/apis"
"sigs.k8s.io/yaml"
)
@@ -156,56 +157,77 @@ func (hsh *HostSensorHandler) GetKubeletConfigurations() ([]hostsensor.HostSenso
return res, err
}
func (hsh *HostSensorHandler) CollectResources() ([]hostsensor.HostSensorDataEnvelope, error) {
func (hsh *HostSensorHandler) CollectResources() ([]hostsensor.HostSensorDataEnvelope, map[string]apis.StatusInfo, error) {
res := make([]hostsensor.HostSensorDataEnvelope, 0)
infoMap := make(map[string]apis.StatusInfo)
if hsh.DaemonSet == nil {
return res, nil
return res, nil, nil
}
var kcData []hostsensor.HostSensorDataEnvelope
var err error
logger.L().Debug("Accessing host scanner")
kcData, err := hsh.GetKubeletConfigurations()
kcData, err = hsh.GetKubeletConfigurations()
if err != nil {
return kcData, err
addInfoToMap(KubeletConfiguration, infoMap, err)
logger.L().Warning(err.Error())
}
if len(kcData) > 0 {
res = append(res, kcData...)
}
res = append(res, kcData...)
//
kcData, err = hsh.GetKubeletCommandLine()
if err != nil {
return kcData, err
addInfoToMap(KubeletCommandLine, infoMap, err)
logger.L().Warning(err.Error())
}
if len(kcData) > 0 {
res = append(res, kcData...)
}
res = append(res, kcData...)
//
kcData, err = hsh.GetOsReleaseFile()
if err != nil {
return kcData, err
addInfoToMap(OsReleaseFile, infoMap, err)
logger.L().Warning(err.Error())
}
if len(kcData) > 0 {
res = append(res, kcData...)
}
res = append(res, kcData...)
//
kcData, err = hsh.GetKernelVersion()
if err != nil {
return kcData, err
addInfoToMap(KernelVersion, infoMap, err)
logger.L().Warning(err.Error())
}
if len(kcData) > 0 {
res = append(res, kcData...)
}
res = append(res, kcData...)
//
kcData, err = hsh.GetLinuxSecurityHardeningStatus()
if err != nil {
return kcData, err
addInfoToMap(LinuxSecurityHardeningStatus, infoMap, err)
logger.L().Warning(err.Error())
}
if len(kcData) > 0 {
res = append(res, kcData...)
}
res = append(res, kcData...)
//
kcData, err = hsh.GetOpenPortsList()
if err != nil {
return kcData, err
addInfoToMap(OpenPortsList, infoMap, err)
logger.L().Warning(err.Error())
}
if len(kcData) > 0 {
res = append(res, kcData...)
}
res = append(res, kcData...)
// GetKernelVariables
kcData, err = hsh.GetKernelVariables()
if err != nil {
return kcData, err
addInfoToMap(LinuxKernelVariables, infoMap, err)
logger.L().Warning(err.Error())
}
if len(kcData) > 0 {
res = append(res, kcData...)
}
res = append(res, kcData...)
// finish
logger.L().Debug("Done reading information from host scanner")
return res, nil
return res, infoMap, nil
}

View File

@@ -1,10 +1,13 @@
package hostsensorutils
import "github.com/armosec/opa-utils/objectsenvelopes/hostsensor"
import (
"github.com/armosec/opa-utils/objectsenvelopes/hostsensor"
"github.com/armosec/opa-utils/reporthandling/apis"
)
type IHostSensor interface {
Init() error
TearDown() error
CollectResources() ([]hostsensor.HostSensorDataEnvelope, error)
CollectResources() ([]hostsensor.HostSensorDataEnvelope, map[string]apis.StatusInfo, error)
GetNamespace() string
}

View File

@@ -2,6 +2,7 @@ package hostsensorutils
import (
"github.com/armosec/opa-utils/objectsenvelopes/hostsensor"
"github.com/armosec/opa-utils/reporthandling/apis"
)
type HostSensorHandlerMock struct {
@@ -15,8 +16,8 @@ func (hshm *HostSensorHandlerMock) TearDown() error {
return nil
}
func (hshm *HostSensorHandlerMock) CollectResources() ([]hostsensor.HostSensorDataEnvelope, error) {
return []hostsensor.HostSensorDataEnvelope{}, nil
func (hshm *HostSensorHandlerMock) CollectResources() ([]hostsensor.HostSensorDataEnvelope, map[string]apis.StatusInfo, error) {
return []hostsensor.HostSensorDataEnvelope{}, nil, nil
}
func (hshm *HostSensorHandlerMock) GetNamespace() string {

View File

@@ -0,0 +1,35 @@
package hostsensorutils
import (
"github.com/armosec/k8s-interface/k8sinterface"
"github.com/armosec/opa-utils/reporthandling/apis"
)
var (
KubeletConfiguration = "KubeletConfiguration"
OsReleaseFile = "OsReleaseFile"
KernelVersion = "KernelVersion"
LinuxSecurityHardeningStatus = "LinuxSecurityHardeningStatus"
OpenPortsList = "OpenPortsList"
LinuxKernelVariables = "LinuxKernelVariables"
KubeletCommandLine = "KubeletCommandLine"
MapResourceToApiGroup = map[string]string{
KubeletConfiguration: "hostdata.kubescape.cloud/v1beta0",
OsReleaseFile: "hostdata.kubescape.cloud/v1beta0/",
KubeletCommandLine: "hostdata.kubescape.cloud/v1beta0/",
KernelVersion: "hostdata.kubescape.cloud/v1beta0/",
LinuxSecurityHardeningStatus: "hostdata.kubescape.cloud/v1beta0/",
OpenPortsList: "hostdata.kubescape.cloud/v1beta0/",
LinuxKernelVariables: "hostdata.kubescape.cloud/v1beta0/",
}
)
func addInfoToMap(resource string, infoMap map[string]apis.StatusInfo, err error) {
group, version := k8sinterface.SplitApiVersion(MapResourceToApiGroup[resource])
r := k8sinterface.JoinResourceTriplets(group, version, resource)
infoMap[r] = apis.StatusInfo{
InnerStatus: apis.StatusSkipped,
InnerInfo: err.Error(),
}
}

View File

@@ -133,7 +133,7 @@ func (opap *OPAProcessor) processRule(rule *reporthandling.PolicyRule) (map[stri
postureControlInputs := opap.regoDependenciesData.GetFilteredPostureControlInputs(rule.ConfigInputs) // get store
inputResources, err := reporthandling.RegoResourcesAggregator(rule, getAllSupportedObjects(opap.K8SResources, opap.AllResources, rule))
inputResources, err := reporthandling.RegoResourcesAggregator(rule, getAllSupportedObjects(opap.K8SResources, opap.ArmoResource, opap.AllResources, rule))
if err != nil {
return nil, fmt.Errorf("error getting aggregated k8sObjects: %s", err.Error())
}

View File

@@ -112,10 +112,10 @@ func TestProcessResourcesResult(t *testing.T) {
assert.Equal(t, 0, len(summaryDetails.ListResourcesIDs().Passed()))
// test control listing
assert.Equal(t, len(res.ListControlsIDs(nil).All()), len(summaryDetails.ListControls().All()))
assert.Equal(t, len(res.ListControlsIDs(nil).Passed()), len(summaryDetails.ListControls().Passed()))
assert.Equal(t, len(res.ListControlsIDs(nil).Failed()), len(summaryDetails.ListControls().Failed()))
assert.Equal(t, len(res.ListControlsIDs(nil).Excluded()), len(summaryDetails.ListControls().Excluded()))
assert.Equal(t, len(res.ListControlsIDs(nil).All()), summaryDetails.NumberOfControls().All())
assert.Equal(t, len(res.ListControlsIDs(nil).Passed()), summaryDetails.NumberOfControls().Passed())
assert.Equal(t, len(res.ListControlsIDs(nil).Failed()), summaryDetails.NumberOfControls().Failed())
assert.Equal(t, len(res.ListControlsIDs(nil).Excluded()), summaryDetails.NumberOfControls().Excluded())
assert.True(t, summaryDetails.GetStatus().IsFailed())
opaSessionObj.Exceptions = []armotypes.PostureExceptionPolicy{*mocks.MockExceptionAllKinds(&armotypes.PosturePolicy{FrameworkName: frameworks[0].Name})}

View File

@@ -9,6 +9,7 @@ import (
"github.com/armosec/k8s-interface/k8sinterface"
"github.com/armosec/k8s-interface/workloadinterface"
"github.com/armosec/opa-utils/reporthandling"
"github.com/armosec/opa-utils/reporthandling/apis"
resources "github.com/armosec/opa-utils/resources"
)
@@ -46,8 +47,9 @@ func (opap *OPAProcessor) updateResults() {
}
// set result summary
opap.Report.SummaryDetails.InitResourcesSummary()
// map control to error
controlToInfoMap := mapControlToInfo(opap.ResourceToControlsMap, opap.InfoMap)
opap.Report.SummaryDetails.InitResourcesSummary(controlToInfoMap)
// for f := range opap.PostureReport.FrameworkReports {
// // set exceptions
// exceptions.SetFrameworkExceptions(&opap.PostureReport.FrameworkReports[f], opap.Exceptions, cautils.ClusterName)
@@ -60,13 +62,50 @@ func (opap *OPAProcessor) updateResults() {
// }
}
func getAllSupportedObjects(k8sResources *cautils.K8SResources, allResources map[string]workloadinterface.IMetadata, rule *reporthandling.PolicyRule) []workloadinterface.IMetadata {
func mapControlToInfo(mapResourceToControls map[string][]string, infoMap map[string]apis.StatusInfo) map[string]apis.StatusInfo {
controlToInfoMap := make(map[string]apis.StatusInfo)
for resource, statusInfo := range infoMap {
controls := mapResourceToControls[resource]
for _, control := range controls {
controlToInfoMap[control] = statusInfo
}
}
return controlToInfoMap
}
func getAllSupportedObjects(k8sResources *cautils.K8SResources, armoResources *cautils.ArmoResources, allResources map[string]workloadinterface.IMetadata, rule *reporthandling.PolicyRule) []workloadinterface.IMetadata {
k8sObjects := []workloadinterface.IMetadata{}
k8sObjects = append(k8sObjects, getKubernetesObjects(k8sResources, allResources, rule.Match)...)
k8sObjects = append(k8sObjects, getKubernetesObjects(k8sResources, allResources, rule.DynamicMatch)...)
k8sObjects = append(k8sObjects, getArmoObjects(armoResources, allResources, rule.DynamicMatch)...)
return k8sObjects
}
func getArmoObjects(k8sResources *cautils.ArmoResources, allResources map[string]workloadinterface.IMetadata, match []reporthandling.RuleMatchObjects) []workloadinterface.IMetadata {
k8sObjects := []workloadinterface.IMetadata{}
for m := range match {
for _, groups := range match[m].APIGroups {
for _, version := range match[m].APIVersions {
for _, resource := range match[m].Resources {
groupResources := k8sinterface.ResourceGroupToString(groups, version, resource)
for _, groupResource := range groupResources {
if k8sObj, ok := (*k8sResources)[groupResource]; ok {
if k8sObj == nil {
logger.L().Debug(fmt.Sprintf("resource '%s' is nil, probably failed to pull the resource", groupResource))
}
for i := range k8sObj {
k8sObjects = append(k8sObjects, allResources[k8sObj[i]])
}
}
}
}
}
}
}
return filterOutChildResources(k8sObjects, match)
}
func getKubernetesObjects(k8sResources *cautils.K8SResources, allResources map[string]workloadinterface.IMetadata, match []reporthandling.RuleMatchObjects) []workloadinterface.IMetadata {
k8sObjects := []workloadinterface.IMetadata{}

View File

@@ -3,6 +3,7 @@ package policyhandler
import (
"fmt"
"github.com/armosec/k8s-interface/k8sinterface"
"github.com/armosec/kubescape/core/cautils"
"github.com/armosec/kubescape/core/pkg/resourcehandler"
"github.com/armosec/opa-utils/reporthandling"
@@ -46,15 +47,46 @@ func (policyHandler *PolicyHandler) CollectResources(notification *reporthandlin
}
func (policyHandler *PolicyHandler) getResources(notification *reporthandling.PolicyNotification, opaSessionObj *cautils.OPASessionObj, scanInfo *cautils.ScanInfo) error {
opaSessionObj.Report.ClusterAPIServerInfo = policyHandler.resourceHandler.GetClusterAPIServerInfo()
resourcesMap, allResources, err := policyHandler.resourceHandler.GetResources(opaSessionObj.Policies, &notification.Designators)
scanInfoToScanMetadata(opaSessionObj, scanInfo)
resourcesMap, allResources, armoResources, err := policyHandler.resourceHandler.GetResources(opaSessionObj, &notification.Designators)
if err != nil {
return err
}
opaSessionObj.K8SResources = resourcesMap
opaSessionObj.AllResources = allResources
opaSessionObj.ArmoResource = armoResources
return nil
}
func scanInfoToScanMetadata(opaSessionObj *cautils.OPASessionObj, scanInfo *cautils.ScanInfo) {
opaSessionObj.Metadata.ClusterMetadata.ContextName = k8sinterface.GetClusterName()
opaSessionObj.Metadata.ScanMetadata.Format = scanInfo.Format
opaSessionObj.Metadata.ScanMetadata.Submit = scanInfo.Submit
// TODO - Add excluded and included namespaces
// if len(scanInfo.ExcludedNamespaces) > 1 {
// opaSessionObj.Metadata.ScanMetadata.ExcludedNamespaces = strings.Split(scanInfo.ExcludedNamespaces[1:], ",")
// }
// if len(scanInfo.IncludeNamespaces) > 1 {
// opaSessionObj.Metadata.ScanMetadata.IncludeNamespaces = strings.Split(scanInfo.IncludeNamespaces[1:], ",")
// }
// scan type
if len(scanInfo.PolicyIdentifier) > 0 {
opaSessionObj.Metadata.ScanMetadata.TargetType = string(scanInfo.PolicyIdentifier[0].Kind)
}
// append frameworks
for _, policy := range scanInfo.PolicyIdentifier {
opaSessionObj.Metadata.ScanMetadata.TargetNames = append(opaSessionObj.Metadata.ScanMetadata.TargetNames, policy.Name)
}
opaSessionObj.Metadata.ScanMetadata.VerboseMode = scanInfo.VerboseMode
opaSessionObj.Metadata.ScanMetadata.FailThreshold = scanInfo.FailThreshold
opaSessionObj.Metadata.ScanMetadata.HostScanner = scanInfo.HostSensorEnabled.GetBool()
opaSessionObj.Metadata.ScanMetadata.VerboseMode = scanInfo.VerboseMode
opaSessionObj.Metadata.ScanMetadata.ControlsInputs = scanInfo.ControlsInputs
}

View File

@@ -60,6 +60,9 @@ func responseObjectToVulnerabilities(vulnerabilitiesList containerscan.Vulnerabi
vulnerabilities[i].Relevancy = vulnerabilityEntry.Relevancy
vulnerabilities[i].Severity = vulnerabilityEntry.Severity
vulnerabilities[i].UrgentCount = vulnerabilityEntry.UrgentCount
vulnerabilities[i].Categories = registryvulnerabilities.Categories{
IsRCE: vulnerabilityEntry.Categories.IsRCE,
}
}
return vulnerabilities
}

View File

@@ -23,6 +23,10 @@ type FixedIn struct {
ImgTag string `json:"imageTag"`
Version string `json:"version"`
}
type Categories struct {
IsRCE bool `json:"isRce"`
}
type Vulnerability struct {
Name string `json:"name"`
RelatedPackageName string `json:"packageName"`
@@ -36,6 +40,7 @@ type Vulnerability struct {
UrgentCount int `json:"urgent"`
NeglectedCount int `json:"neglected"`
HealthStatus string `json:"healthStatus"`
Categories Categories `json:"categories"`
}
type ContainerImageVulnerabilityReport struct {

View File

@@ -10,7 +10,6 @@ import (
"github.com/armosec/k8s-interface/k8sinterface"
"github.com/armosec/kubescape/core/cautils"
"github.com/armosec/opa-utils/reporthandling"
)
// FileResourceHandler handle resources from files and URLs
@@ -27,19 +26,20 @@ func NewFileResourceHandler(inputPatterns []string, registryAdaptors *RegistryAd
}
}
func (fileHandler *FileResourceHandler) GetResources(frameworks []reporthandling.Framework, designator *armotypes.PortalDesignator) (*cautils.K8SResources, map[string]workloadinterface.IMetadata, error) {
func (fileHandler *FileResourceHandler) GetResources(sessionObj *cautils.OPASessionObj, designator *armotypes.PortalDesignator) (*cautils.K8SResources, map[string]workloadinterface.IMetadata, *cautils.ArmoResources, error) {
// build resources map
// map resources based on framework required resources: map["/group/version/kind"][]<k8s workloads ids>
k8sResources := setResourceMap(frameworks)
k8sResources := setK8sResourceMap(sessionObj.Policies)
allResources := map[string]workloadinterface.IMetadata{}
armoResources := &cautils.ArmoResources{}
workloads := []workloadinterface.IMetadata{}
// load resource from local file system
w, err := cautils.LoadResourcesFromFiles(fileHandler.inputPatterns)
if err != nil {
return nil, allResources, err
return nil, allResources, nil, err
}
if w != nil {
workloads = append(workloads, w...)
@@ -48,14 +48,14 @@ func (fileHandler *FileResourceHandler) GetResources(frameworks []reporthandling
// load resources from url
w, err = loadResourcesFromUrl(fileHandler.inputPatterns)
if err != nil {
return nil, allResources, err
return nil, allResources, nil, err
}
if w != nil {
workloads = append(workloads, w...)
}
if len(workloads) == 0 {
return nil, allResources, fmt.Errorf("empty list of workloads - no workloads found")
return nil, allResources, nil, fmt.Errorf("empty list of workloads - no workloads found")
}
// map all resources: map["/group/version/kind"][]<k8s workloads>
@@ -73,11 +73,11 @@ func (fileHandler *FileResourceHandler) GetResources(frameworks []reporthandling
}
}
if err := fileHandler.registryAdaptors.collectImagesVulnerabilities(k8sResources, allResources); err != nil {
if err := fileHandler.registryAdaptors.collectImagesVulnerabilities(k8sResources, allResources, armoResources); err != nil {
cautils.WarningDisplay(os.Stderr, "Warning: failed to collect images vulnerabilities: %s\n", err.Error())
}
return k8sResources, allResources, nil
return k8sResources, allResources, armoResources, nil
}

View File

@@ -10,7 +10,7 @@ import (
"github.com/armosec/kubescape/core/cautils/logger/helpers"
"github.com/armosec/kubescape/core/pkg/hostsensorutils"
"github.com/armosec/opa-utils/objectsenvelopes"
"github.com/armosec/opa-utils/reporthandling"
"github.com/armosec/opa-utils/reporthandling/apis"
"github.com/armosec/k8s-interface/cloudsupport"
"github.com/armosec/k8s-interface/k8sinterface"
@@ -44,46 +44,88 @@ func NewK8sResourceHandler(k8s *k8sinterface.KubernetesApi, fieldSelector IField
}
}
func (k8sHandler *K8sResourceHandler) GetResources(frameworks []reporthandling.Framework, designator *armotypes.PortalDesignator) (*cautils.K8SResources, map[string]workloadinterface.IMetadata, error) {
func (k8sHandler *K8sResourceHandler) GetResources(sessionObj *cautils.OPASessionObj, designator *armotypes.PortalDesignator) (*cautils.K8SResources, map[string]workloadinterface.IMetadata, *cautils.ArmoResources, error) {
allResources := map[string]workloadinterface.IMetadata{}
// get k8s resources
logger.L().Info("Accessing Kubernetes objects")
cautils.StartSpinner()
resourceToControl := make(map[string][]string)
// build resources map
// map resources based on framework required resources: map["/group/version/kind"][]<k8s workloads ids>
k8sResourcesMap := setResourceMap(frameworks)
k8sResourcesMap := setK8sResourceMap(sessionObj.Policies)
// get namespace and labels from designator (ignore cluster labels)
_, namespace, labels := armotypes.DigestPortalDesignator(designator)
// pull k8s recourses
armoResourceMap := setArmoResourceMap(sessionObj.Policies, resourceToControl)
// map of armo resources to control_ids
sessionObj.ResourceToControlsMap = resourceToControl
if err := k8sHandler.pullResources(k8sResourcesMap, allResources, namespace, labels); err != nil {
cautils.StopSpinner()
return k8sResourcesMap, allResources, err
return k8sResourcesMap, allResources, armoResourceMap, err
}
if err := k8sHandler.registryAdaptors.collectImagesVulnerabilities(k8sResourcesMap, allResources); err != nil {
logger.L().Warning("failed to collect image vulnerabilities", helpers.Error(err))
numberOfWorkerNodes, err := k8sHandler.pullWorkerNodesNumber()
if err != nil {
logger.L().Debug("failed to collect worker nodes number", helpers.Error(err))
} else {
sessionObj.Metadata.ClusterMetadata.NumberOfWorkerNodes = numberOfWorkerNodes
}
if err := k8sHandler.collectHostResources(allResources, k8sResourcesMap); err != nil {
logger.L().Warning("failed to collect host scanner resources", helpers.Error(err))
imgVulnResources := cautils.MapImageVulnResources(armoResourceMap)
// check that controls use image vulnerability resources
if len(imgVulnResources) > 0 {
if err := k8sHandler.registryAdaptors.collectImagesVulnerabilities(k8sResourcesMap, allResources, armoResourceMap); err != nil {
logger.L().Warning("failed to collect image vulnerabilities", helpers.Error(err))
}
}
hostResources := cautils.MapHostResources(armoResourceMap)
// check that controls use host sensor resources
if len(hostResources) > 0 {
if sessionObj.Metadata.ScanMetadata.HostScanner {
infoMap, err := k8sHandler.collectHostResources(allResources, armoResourceMap)
if err != nil {
logger.L().Warning("failed to collect host scanner resources", helpers.Error(err))
cautils.SetInfoMapForResources(err.Error(), hostResources, sessionObj.InfoMap)
} else if k8sHandler.hostSensorHandler == nil {
// using hostSensor mock
cautils.SetInfoMapForResources("failed to init host scanner", hostResources, sessionObj.InfoMap)
} else {
sessionObj.InfoMap = infoMap
}
} else {
cautils.SetInfoMapForResources("enable-host-scan flag not used", hostResources, sessionObj.InfoMap)
}
}
if err := k8sHandler.collectRbacResources(allResources); err != nil {
logger.L().Warning("failed to collect rbac resources", helpers.Error(err))
}
if err := getCloudProviderDescription(allResources, k8sResourcesMap); err != nil {
logger.L().Warning("failed to collect cloud data", helpers.Error(err))
cloudResources := cautils.MapCloudResources(armoResourceMap)
// check that controls use cloud resources
if len(cloudResources) > 0 {
provider, err := getCloudProviderDescription(allResources, armoResourceMap)
if err != nil {
cautils.SetInfoMapForResources(err.Error(), cloudResources, sessionObj.InfoMap)
logger.L().Warning("failed to collect cloud data", helpers.Error(err))
}
if provider != "" {
sessionObj.Metadata.ClusterMetadata.CloudProvider = provider
}
}
cautils.StopSpinner()
logger.L().Success("Accessed to Kubernetes objects")
return k8sResourcesMap, allResources, nil
return k8sResourcesMap, allResources, armoResourceMap, nil
}
func (k8sHandler *K8sResourceHandler) GetClusterAPIServerInfo() *version.Info {
@@ -180,12 +222,11 @@ func ConvertMapListToMeta(resourceMap []map[string]interface{}) []workloadinterf
// }
// return nil
// }
func (k8sHandler *K8sResourceHandler) collectHostResources(allResources map[string]workloadinterface.IMetadata, resourcesMap *cautils.K8SResources) error {
func (k8sHandler *K8sResourceHandler) collectHostResources(allResources map[string]workloadinterface.IMetadata, armoResourceMap *cautils.ArmoResources) (map[string]apis.StatusInfo, error) {
logger.L().Debug("Collecting host scanner resources")
hostResources, err := k8sHandler.hostSensorHandler.CollectResources()
hostResources, infoMap, err := k8sHandler.hostSensorHandler.CollectResources()
if err != nil {
return err
return nil, err
}
for rscIdx := range hostResources {
@@ -193,13 +234,13 @@ func (k8sHandler *K8sResourceHandler) collectHostResources(allResources map[stri
groupResource := k8sinterface.JoinResourceTriplets(group, version, hostResources[rscIdx].GetKind())
allResources[hostResources[rscIdx].GetID()] = &hostResources[rscIdx]
grpResourceList, ok := (*resourcesMap)[groupResource]
grpResourceList, ok := (*armoResourceMap)[groupResource]
if !ok {
grpResourceList = make([]string, 0)
}
(*resourcesMap)[groupResource] = append(grpResourceList, hostResources[rscIdx].GetID())
(*armoResourceMap)[groupResource] = append(grpResourceList, hostResources[rscIdx].GetID())
}
return nil
return infoMap, nil
}
func (k8sHandler *K8sResourceHandler) collectRbacResources(allResources map[string]workloadinterface.IMetadata) error {
@@ -218,20 +259,19 @@ func (k8sHandler *K8sResourceHandler) collectRbacResources(allResources map[stri
return nil
}
func getCloudProviderDescription(allResources map[string]workloadinterface.IMetadata, k8sResourcesMap *cautils.K8SResources) error {
func getCloudProviderDescription(allResources map[string]workloadinterface.IMetadata, armoResourceMap *cautils.ArmoResources) (string, error) {
logger.L().Debug("Collecting cloud data")
cloudProvider := initCloudProvider()
cluster := cloudProvider.getKubeCluster()
clusterName := cloudProvider.getKubeClusterName()
provider := getCloudProvider()
region, err := cloudProvider.getRegion(cluster, provider)
if err != nil {
return err
return provider, err
}
project, err := cloudProvider.getProject(cluster, provider)
if err != nil {
return err
return provider, err
}
if provider != "" {
@@ -242,17 +282,33 @@ func getCloudProviderDescription(allResources map[string]workloadinterface.IMeta
// Return error with useful info on how to configure credentials for getting cloud provider info
switch provider {
case "gke":
return fmt.Errorf("could not get descriptive information about gke cluster: %s using sdk client. See https://developers.google.com/accounts/docs/application-default-credentials for more information", cluster)
return provider, fmt.Errorf("could not get descriptive information about gke cluster: %s using sdk client. See https://developers.google.com/accounts/docs/application-default-credentials for more information", cluster)
case "eks":
return fmt.Errorf("could not get descriptive information about eks cluster: %s using sdk client. Check out how to configure credentials in https://docs.aws.amazon.com/sdk-for-go/api/", cluster)
return provider, fmt.Errorf("could not get descriptive information about eks cluster: %s using sdk client. Check out how to configure credentials in https://docs.aws.amazon.com/sdk-for-go/api/", cluster)
case "aks":
return fmt.Errorf("could not get descriptive information about aks cluster: %s. %v", cluster, err.Error())
return provider, fmt.Errorf("could not get descriptive information about aks cluster: %s. %v", cluster, err.Error())
}
return err
return provider, err
}
allResources[wl.GetID()] = wl
(*k8sResourcesMap)[fmt.Sprintf("%s/%s", wl.GetApiVersion(), wl.GetKind())] = []string{wl.GetID()}
(*armoResourceMap)[fmt.Sprintf("%s/%s", wl.GetApiVersion(), wl.GetKind())] = []string{wl.GetID()}
}
return nil
return provider, nil
}
func (k8sHandler *K8sResourceHandler) pullWorkerNodesNumber() (int, error) {
// labels used for control plane
listOptions := metav1.ListOptions{
LabelSelector: "!node-role.kubernetes.io/control-plane,!node-role.kubernetes.io/master",
}
nodesList, err := k8sHandler.k8s.KubernetesClient.CoreV1().Nodes().List(context.TODO(), listOptions)
if err != nil {
return 0, err
}
nodesNumber := 0
if nodesList != nil {
nodesNumber = len(nodesList.Items)
}
return nodesNumber, nil
}

View File

@@ -4,14 +4,23 @@ import (
"strings"
"github.com/armosec/kubescape/core/cautils"
"github.com/armosec/kubescape/core/pkg/hostsensorutils"
"github.com/armosec/opa-utils/reporthandling"
"k8s.io/utils/strings/slices"
"github.com/armosec/k8s-interface/k8sinterface"
)
func setResourceMap(frameworks []reporthandling.Framework) *cautils.K8SResources {
var (
ClusterDescribe = "ClusterDescribe"
MapResourceToApiGroupCloud = map[string][]string{
ClusterDescribe: {"container.googleapis.com/v1", "eks.amazonaws.com/v1"}}
)
func setK8sResourceMap(frameworks []reporthandling.Framework) *cautils.K8SResources {
k8sResources := make(cautils.K8SResources)
complexMap := setComplexResourceMap(frameworks)
complexMap := setComplexK8sResourceMap(frameworks)
for group := range complexMap {
for version := range complexMap[group] {
for resource := range complexMap[group][version] {
@@ -25,33 +34,87 @@ func setResourceMap(frameworks []reporthandling.Framework) *cautils.K8SResources
return &k8sResources
}
func setArmoResourceMap(frameworks []reporthandling.Framework, resourceToControl map[string][]string) *cautils.ArmoResources {
armoResources := make(cautils.ArmoResources)
complexMap := setComplexArmoResourceMap(frameworks, resourceToControl)
for group := range complexMap {
for version := range complexMap[group] {
for resource := range complexMap[group][version] {
groupResources := k8sinterface.ResourceGroupToString(group, version, resource)
for _, groupResource := range groupResources {
armoResources[groupResource] = nil
}
}
}
}
return &armoResources
}
func convertComplexResourceMap(frameworks []reporthandling.Framework) map[string]map[string]map[string]interface{} {
k8sResources := make(map[string]map[string]map[string]interface{})
for _, framework := range frameworks {
for _, control := range framework.Controls {
for _, rule := range control.Rules {
for _, match := range rule.Match {
insertK8sResources(k8sResources, match)
insertResources(k8sResources, match)
}
}
}
}
return k8sResources
}
func setComplexResourceMap(frameworks []reporthandling.Framework) map[string]map[string]map[string]interface{} {
func setComplexK8sResourceMap(frameworks []reporthandling.Framework) map[string]map[string]map[string]interface{} {
k8sResources := make(map[string]map[string]map[string]interface{})
for _, framework := range frameworks {
for _, control := range framework.Controls {
for _, rule := range control.Rules {
for _, match := range rule.Match {
insertK8sResources(k8sResources, match)
insertResources(k8sResources, match)
}
}
}
}
return k8sResources
}
func insertK8sResources(k8sResources map[string]map[string]map[string]interface{}, match reporthandling.RuleMatchObjects) {
// [group][versionn][resource]
func setComplexArmoResourceMap(frameworks []reporthandling.Framework, resourceToControls map[string][]string) map[string]map[string]map[string]interface{} {
k8sResources := make(map[string]map[string]map[string]interface{})
for _, framework := range frameworks {
for _, control := range framework.Controls {
for _, rule := range control.Rules {
for _, match := range rule.DynamicMatch {
insertArmoResourcesAndControls(k8sResources, match, resourceToControls, control)
}
}
}
}
return k8sResources
}
func mapArmoResourceToApiGroup(resource string) []string {
if val, ok := hostsensorutils.MapResourceToApiGroup[resource]; ok {
return []string{val}
}
return MapResourceToApiGroupCloud[resource]
}
func insertControls(resource string, resourceToControl map[string][]string, control reporthandling.Control) {
armoResources := mapArmoResourceToApiGroup(resource)
for _, armoResource := range armoResources {
group, version := k8sinterface.SplitApiVersion(armoResource)
r := k8sinterface.JoinResourceTriplets(group, version, resource)
if _, ok := resourceToControl[r]; !ok {
resourceToControl[r] = append(resourceToControl[r], control.ControlID)
} else {
if !slices.Contains(resourceToControl[r], control.ControlID) {
resourceToControl[r] = append(resourceToControl[r], control.ControlID)
}
}
}
}
func insertResources(k8sResources map[string]map[string]map[string]interface{}, match reporthandling.RuleMatchObjects) {
for _, apiGroup := range match.APIGroups {
if v, ok := k8sResources[apiGroup]; !ok || v == nil {
k8sResources[apiGroup] = make(map[string]map[string]interface{})
@@ -69,6 +132,25 @@ func insertK8sResources(k8sResources map[string]map[string]map[string]interface{
}
}
func insertArmoResourcesAndControls(k8sResources map[string]map[string]map[string]interface{}, match reporthandling.RuleMatchObjects, resourceToControl map[string][]string, control reporthandling.Control) {
for _, apiGroup := range match.APIGroups {
if v, ok := k8sResources[apiGroup]; !ok || v == nil {
k8sResources[apiGroup] = make(map[string]map[string]interface{})
}
for _, apiVersions := range match.APIVersions {
if v, ok := k8sResources[apiGroup][apiVersions]; !ok || v == nil {
k8sResources[apiGroup][apiVersions] = make(map[string]interface{})
}
for _, resource := range match.Resources {
if _, ok := k8sResources[apiGroup][apiVersions][resource]; !ok {
k8sResources[apiGroup][apiVersions][resource] = nil
}
insertControls(resource, resourceToControl, control)
}
}
}
}
func getGroupNVersion(apiVersion string) (string, string) {
gv := strings.Split(apiVersion, "/")
group, version := "", ""

View File

@@ -13,7 +13,7 @@ func TestGetK8sResources(t *testing.T) {
func TestSetResourceMap(t *testing.T) {
k8sinterface.InitializeMapResourcesMock()
framework := reporthandling.MockFrameworkA()
k8sResources := setResourceMap([]reporthandling.Framework{*framework})
k8sResources := setK8sResourceMap([]reporthandling.Framework{*framework})
resources := k8sinterface.ResourceGroupToString("*", "v1", "Pod")
if len(resources) == 0 {
t.Error("expected resources")
@@ -43,9 +43,9 @@ func TestInsertK8sResources(t *testing.T) {
APIVersions: []string{"v1"},
Resources: []string{"secrets"},
}
insertK8sResources(k8sResources, match1)
insertK8sResources(k8sResources, match2)
insertK8sResources(k8sResources, match3)
insertResources(k8sResources, match1)
insertResources(k8sResources, match2)
insertResources(k8sResources, match3)
apiGroup1, ok := k8sResources["apps"]
if !ok {

View File

@@ -34,7 +34,7 @@ func NewRegistryAdaptors() (*RegistryAdaptors, error) {
return registryAdaptors, nil
}
func (registryAdaptors *RegistryAdaptors) collectImagesVulnerabilities(k8sResourcesMap *cautils.K8SResources, allResources map[string]workloadinterface.IMetadata) error {
func (registryAdaptors *RegistryAdaptors) collectImagesVulnerabilities(k8sResourcesMap *cautils.K8SResources, allResources map[string]workloadinterface.IMetadata, armoResourceMap *cautils.ArmoResources) error {
logger.L().Debug("Collecting images vulnerabilities")
// list cluster images
@@ -64,7 +64,7 @@ func (registryAdaptors *RegistryAdaptors) collectImagesVulnerabilities(k8sResour
for i := range metaObjs {
allResources[metaObjs[i].GetID()] = metaObjs[i]
}
(*k8sResourcesMap)[k8sinterface.JoinResourceTriplets(ImagevulnerabilitiesObjectGroup, ImagevulnerabilitiesObjectVersion, ImagevulnerabilitiesObjectKind)] = workloadinterface.ListMetaIDs(metaObjs)
(*armoResourceMap)[k8sinterface.JoinResourceTriplets(ImagevulnerabilitiesObjectGroup, ImagevulnerabilitiesObjectVersion, ImagevulnerabilitiesObjectKind)] = workloadinterface.ListMetaIDs(metaObjs)
return nil
}

View File

@@ -4,11 +4,10 @@ import (
"github.com/armosec/armoapi-go/armotypes"
"github.com/armosec/k8s-interface/workloadinterface"
"github.com/armosec/kubescape/core/cautils"
"github.com/armosec/opa-utils/reporthandling"
"k8s.io/apimachinery/pkg/version"
)
type IResourceHandler interface {
GetResources([]reporthandling.Framework, *armotypes.PortalDesignator) (*cautils.K8SResources, map[string]workloadinterface.IMetadata, error)
GetResources(*cautils.OPASessionObj, *armotypes.PortalDesignator) (*cautils.K8SResources, map[string]workloadinterface.IMetadata, *cautils.ArmoResources, error)
GetClusterAPIServerInfo() *version.Info
}

View File

@@ -7,7 +7,7 @@ import (
"github.com/armosec/opa-utils/reporthandling/results/v1/reportsummary"
)
func generateRow(controlSummary reportsummary.IControlSummary) []string {
func generateRow(controlSummary reportsummary.IControlSummary, infoToPrintInfoMap map[string]string) []string {
row := []string{controlSummary.GetName()}
row = append(row, fmt.Sprintf("%d", controlSummary.NumberOfResources().Failed()))
row = append(row, fmt.Sprintf("%d", controlSummary.NumberOfResources().Excluded()))
@@ -15,8 +15,14 @@ func generateRow(controlSummary reportsummary.IControlSummary) []string {
if !controlSummary.GetStatus().IsSkipped() {
row = append(row, fmt.Sprintf("%d", int(controlSummary.GetScore()))+"%")
row = append(row, "")
} else {
row = append(row, "skipped")
row = append(row, string(controlSummary.GetStatus().Status()))
if controlSummary.GetStatus().IsSkipped() {
row = append(row, infoToPrintInfoMap[controlSummary.GetStatus().Info()])
} else {
row = append(row, "")
}
}
return row
}
@@ -32,5 +38,5 @@ func getSortedControlsNames(controls reportsummary.ControlSummaries) []string {
}
func getControlTableHeaders() []string {
return []string{"CONTROL NAME", "FAILED RESOURCES", "EXCLUDED RESOURCES", "ALL RESOURCES", "% RISK-SCORE"}
return []string{"CONTROL NAME", "FAILED RESOURCES", "EXCLUDED RESOURCES", "ALL RESOURCES", "% RISK-SCORE", "INFO"}
}

View File

@@ -32,7 +32,6 @@ func (jsonPrinter *JsonPrinter) ActionPrint(opaSessionObj *cautils.OPASessionObj
if err != nil {
logger.L().Fatal("failed to Marshal posture report object")
}
jsonPrinter.writer.Write(r)
logOUtputFile(jsonPrinter.writer.Name())
if _, err := jsonPrinter.writer.Write(r); err != nil {

View File

@@ -128,7 +128,7 @@ func listTestsSuite(results *cautils.OPASessionObj) []JUnitTestSuite {
var testSuites []JUnitTestSuite
// control scan
if len(results.Report.SummaryDetails.ListFrameworks().All()) == 0 {
if len(results.Report.SummaryDetails.ListFrameworks()) == 0 {
testSuite := JUnitTestSuite{}
testSuite.Failures = results.Report.SummaryDetails.NumberOfControls().Failed()
testSuite.Timestamp = results.Report.ReportGenerationTime.String()
@@ -147,7 +147,7 @@ func listTestsSuite(results *cautils.OPASessionObj) []JUnitTestSuite {
testSuite.ID = i
testSuite.Name = f.Name
testSuite.Properties = properties(f.Score)
testSuite.TestCases = testsCases(results, f.ListControls(), f.GetName())
testSuite.TestCases = testsCases(results, f.GetControls(), f.GetName())
testSuites = append(testSuites, testSuite)
}
@@ -176,7 +176,7 @@ func testsCases(results *cautils.OPASessionObj, controls reportsummary.IControls
testCaseFailure := JUnitFailure{}
testCaseFailure.Type = "Control"
// testCaseFailure.Contents =
testCaseFailure.Message = fmt.Sprintf("Remediation: %s\nMore details: %s\n\n%s", control.GetRemediation(), getControlURL(control.GetID()), strings.Join(resourcesStr, "\n"))
testCaseFailure.Message = fmt.Sprintf("Remediation: %s\nMore details: %s\n\n%s", control.GetRemediation(), getControlLink(control.GetID()), strings.Join(resourcesStr, "\n"))
testCase.Failure = &testCaseFailure
} else if control.GetStatus().IsSkipped() {

View File

@@ -54,15 +54,37 @@ func (pdfPrinter *PdfPrinter) SetWriter(outputFile string) {
func (pdfPrinter *PdfPrinter) Score(score float32) {
fmt.Fprintf(os.Stderr, "\nOverall risk-score (0- Excellent, 100- All failed): %d\n", int(score))
}
func (pdfPrinter *PdfPrinter) printInfo(m pdf.Maroto, summaryDetails *reportsummary.SummaryDetails, infoMap map[string]string) {
emptyRowCounter := 1
for key, val := range infoMap {
if val != "" {
m.Row(5, func() {
m.Col(1, func() {
m.Text(fmt.Sprintf("%v", val))
})
m.Col(12, func() {
m.Text(fmt.Sprintf("%v", key))
})
})
if emptyRowCounter < len(infoMap) {
m.Row(2.5, func() {})
emptyRowCounter++
}
}
}
}
func (pdfPrinter *PdfPrinter) ActionPrint(opaSessionObj *cautils.OPASessionObj) {
pdfPrinter.sortedControlNames = getSortedControlsNames(opaSessionObj.Report.SummaryDetails.Controls)
infoToPrintInfoMap := mapInfoToPrintInfo(opaSessionObj.Report.SummaryDetails.Controls)
m := pdf.NewMaroto(consts.Portrait, consts.A4)
pdfPrinter.printHeader(m)
pdfPrinter.printFramework(m, opaSessionObj.Report.SummaryDetails.ListFrameworks().All())
pdfPrinter.printFramework(m, opaSessionObj.Report.SummaryDetails.ListFrameworks())
pdfPrinter.printTable(m, &opaSessionObj.Report.SummaryDetails)
pdfPrinter.printFinalResult(m, &opaSessionObj.Report.SummaryDetails)
pdfPrinter.printInfo(m, &opaSessionObj.Report.SummaryDetails, infoToPrintInfoMap)
// Extrat output buffer.
outBuff, err := m.Output()
@@ -115,7 +137,7 @@ func (pdfPrinter *PdfPrinter) printHeader(m pdf.Maroto) {
}
// Print pdf frameworks after pdf header.
func (pdfPrinter *PdfPrinter) printFramework(m pdf.Maroto, frameworks []reportsummary.IPolicies) {
func (pdfPrinter *PdfPrinter) printFramework(m pdf.Maroto, frameworks []reportsummary.IFrameworkSummary) {
m.Row(10, func() {
m.Text(frameworksScoresToString(frameworks), props.Text{
Align: consts.Center,
@@ -129,12 +151,13 @@ func (pdfPrinter *PdfPrinter) printFramework(m pdf.Maroto, frameworks []reportsu
// Create pdf table
func (pdfPrinter *PdfPrinter) printTable(m pdf.Maroto, summaryDetails *reportsummary.SummaryDetails) {
headers := getControlTableHeaders()
infoToPrintInfoMap := mapInfoToPrintInfo(summaryDetails.Controls)
controls := make([][]string, len(pdfPrinter.sortedControlNames))
for i := range controls {
controls[i] = make([]string, len(headers))
}
for i := 0; i < len(pdfPrinter.sortedControlNames); i++ {
controls[i] = generateRow(summaryDetails.Controls.GetControl(reportsummary.EControlCriteriaName, pdfPrinter.sortedControlNames[i]))
controls[i] = generateRow(summaryDetails.Controls.GetControl(reportsummary.EControlCriteriaName, pdfPrinter.sortedControlNames[i]), infoToPrintInfoMap)
}
m.TableList(headers, controls, props.TableList{

View File

@@ -75,7 +75,7 @@ func (prettyPrinter *PrettyPrinter) printSummary(controlName string, controlSumm
}
func (prettyPrinter *PrettyPrinter) printTitle(controlSummary reportsummary.IControlSummary) {
cautils.InfoDisplay(prettyPrinter.writer, "[control: %s - %s] ", controlSummary.GetName(), getControlURL(controlSummary.GetID()))
cautils.InfoDisplay(prettyPrinter.writer, "[control: %s - %s] ", controlSummary.GetName(), getControlLink(controlSummary.GetID()))
switch controlSummary.GetStatus().Status() {
case apis.StatusSkipped:
cautils.InfoDisplay(prettyPrinter.writer, "skipped %v\n", emoji.ConfusedFace)
@@ -83,10 +83,17 @@ func (prettyPrinter *PrettyPrinter) printTitle(controlSummary reportsummary.ICon
cautils.FailureDisplay(prettyPrinter.writer, "failed %v\n", emoji.SadButRelievedFace)
case apis.StatusExcluded:
cautils.WarningDisplay(prettyPrinter.writer, "excluded %v\n", emoji.NeutralFace)
case apis.StatusIrrelevant:
cautils.SuccessDisplay(prettyPrinter.writer, "irrelevant %v\n", emoji.ConfusedFace)
case apis.StatusError:
cautils.WarningDisplay(prettyPrinter.writer, "error %v\n", emoji.ConfusedFace)
default:
cautils.SuccessDisplay(prettyPrinter.writer, "passed %v\n", emoji.ThumbsUp)
}
cautils.DescriptionDisplay(prettyPrinter.writer, "Description: %s\n", controlSummary.GetDescription())
if controlSummary.GetStatus().Info() != "" {
cautils.WarningDisplay(prettyPrinter.writer, "Reason: %v\n", controlSummary.GetStatus().Info())
}
}
func (prettyPrinter *PrettyPrinter) printResources(controlSummary reportsummary.IControlSummary, allResources map[string]workloadinterface.IMetadata) {
@@ -166,6 +173,7 @@ func generateFooter(summaryDetails *reportsummary.SummaryDetails) []string {
row = append(row, fmt.Sprintf("%d", summaryDetails.NumberOfResources().Excluded()))
row = append(row, fmt.Sprintf("%d", summaryDetails.NumberOfResources().All()))
row = append(row, fmt.Sprintf("%.2f%s", summaryDetails.Score, "%"))
row = append(row, " ")
return row
}
@@ -175,30 +183,37 @@ func (prettyPrinter *PrettyPrinter) printSummaryTable(summaryDetails *reportsumm
summaryTable.SetAutoWrapText(false)
summaryTable.SetHeader(getControlTableHeaders())
summaryTable.SetHeaderLine(true)
alignments := []int{tablewriter.ALIGN_LEFT, tablewriter.ALIGN_CENTER, tablewriter.ALIGN_CENTER, tablewriter.ALIGN_CENTER, tablewriter.ALIGN_CENTER}
alignments := []int{tablewriter.ALIGN_LEFT, tablewriter.ALIGN_CENTER, tablewriter.ALIGN_CENTER, tablewriter.ALIGN_CENTER, tablewriter.ALIGN_CENTER, tablewriter.ALIGN_CENTER}
summaryTable.SetColumnAlignment(alignments)
infoToPrintInfoMap := mapInfoToPrintInfo(summaryDetails.Controls)
for i := 0; i < len(prettyPrinter.sortedControlNames); i++ {
summaryTable.Append(generateRow(summaryDetails.Controls.GetControl(reportsummary.EControlCriteriaName, prettyPrinter.sortedControlNames[i])))
summaryTable.Append(generateRow(summaryDetails.Controls.GetControl(reportsummary.EControlCriteriaName, prettyPrinter.sortedControlNames[i]), infoToPrintInfoMap))
}
summaryTable.SetFooter(generateFooter(summaryDetails))
// summaryTable.SetFooter(generateFooter())
cautils.InfoTextDisplay(prettyPrinter.writer, frameworksScoresToString(summaryDetails.ListFrameworks()))
summaryTable.Render()
prettyPrinter.printInfo(infoToPrintInfoMap)
// For control scan framework will be nil
cautils.InfoTextDisplay(prettyPrinter.writer, frameworksScoresToString(summaryDetails.ListFrameworks().All()))
}
func frameworksScoresToString(frameworks []reportsummary.IPolicies) string {
func (prettyPrinter *PrettyPrinter) printInfo(infoToPrintInfoMap map[string]string) {
for info, stars := range infoToPrintInfoMap {
cautils.WarningDisplay(prettyPrinter.writer, fmt.Sprintf("%s - %s\n", stars, info))
}
}
func frameworksScoresToString(frameworks []reportsummary.IFrameworkSummary) string {
if len(frameworks) == 1 {
if frameworks[0].GetName() != "" {
return fmt.Sprintf("FRAMEWORK %s\n", frameworks[0].GetName())
return fmt.Sprintf("\nFRAMEWORK %s\n", frameworks[0].GetName())
// cautils.InfoTextDisplay(prettyPrinter.writer, ))
}
} else if len(frameworks) > 1 {
p := "FRAMEWORKS: "
p := "\nFRAMEWORKS: "
i := 0
for ; i < len(frameworks)-1; i++ {
p += fmt.Sprintf("%s (risk: %.2f), ", frameworks[i].GetName(), frameworks[i].GetScore())
@@ -217,6 +232,6 @@ func frameworksScoresToString(frameworks []reportsummary.IPolicies) string {
// sort.Strings(controlNames)
// return controlNames
// }
func getControlURL(controlID string) string {
func getControlLink(controlID string) string {
return fmt.Sprintf("https://hub.armo.cloud/docs/%s", strings.ToLower(controlID))
}

View File

@@ -0,0 +1,55 @@
package v2
import (
"fmt"
"os"
"github.com/armosec/k8s-interface/workloadinterface"
"github.com/armosec/kubescape/core/cautils"
"github.com/armosec/kubescape/core/cautils/logger"
"github.com/armosec/kubescape/core/cautils/logger/helpers"
"github.com/armosec/kubescape/core/pkg/resultshandling/printer"
"github.com/armosec/opa-utils/reporthandling/results/v1/reportsummary"
"github.com/armosec/opa-utils/reporthandling/results/v1/resourcesresults"
)
type PrometheusPrinter struct {
writer *os.File
verboseMode bool
}
func NewPrometheusPrinter(verboseMode bool) *PrometheusPrinter {
return &PrometheusPrinter{
verboseMode: verboseMode,
}
}
func (prometheusPrinter *PrometheusPrinter) SetWriter(outputFile string) {
prometheusPrinter.writer = printer.GetWriter(outputFile)
}
func (prometheusPrinter *PrometheusPrinter) Score(score float32) {
fmt.Printf("\n# Overall risk-score (0- Excellent, 100- All failed)\nkubescape_score %d\n", int(score))
}
func (printer *PrometheusPrinter) generatePrometheusFormat(
resources map[string]workloadinterface.IMetadata,
results map[string]resourcesresults.Result,
summaryDetails *reportsummary.SummaryDetails) *Metrics {
m := &Metrics{}
m.setRiskScores(summaryDetails)
m.setResourcesCounters(resources, results)
return m
}
func (printer *PrometheusPrinter) ActionPrint(opaSessionObj *cautils.OPASessionObj) {
metrics := printer.generatePrometheusFormat(opaSessionObj.AllResources, opaSessionObj.ResourcesResult, &opaSessionObj.Report.SummaryDetails)
logOUtputFile(printer.writer.Name())
if _, err := printer.writer.Write([]byte(metrics.String())); err != nil {
logger.L().Error("failed to write results", helpers.Error(err))
}
}

View File

@@ -0,0 +1,252 @@
package v2
import (
"fmt"
"github.com/armosec/k8s-interface/workloadinterface"
"github.com/armosec/opa-utils/reporthandling/apis"
"github.com/armosec/opa-utils/reporthandling/results/v1/reportsummary"
"github.com/armosec/opa-utils/reporthandling/results/v1/resourcesresults"
)
type metricsName string
const (
metricsFrameworkScore metricsName = "kubescape_risk_score_framework"
metricsControlScore metricsName = "kubescape_risk_score_control"
metricsScore metricsName = "kubescape_risk_score"
metricsresourceFailed metricsName = "kubescape_resource_controls_number_of_failed"
metricsresourcePassed metricsName = "kubescape_resource_controls_number_of_passed"
metricsresourceExcluded metricsName = "kubescape_resource_controls_number_of_exclude"
)
func (mrs *mRiskScore) string() string {
r := fmt.Sprintf("resourcesCountFailed=\"%d\"", mrs.resourcesCountFailed) + ","
r += fmt.Sprintf("resourcesCountExcluded=\"%d\"", mrs.resourcesCountExcluded) + ","
r += fmt.Sprintf("resourcesCountPassed=\"%d\"", mrs.resourcesCountPassed) + ","
r += fmt.Sprintf("controlsCountFailed=\"%d\"", mrs.controlsCountFailed) + ","
r += fmt.Sprintf("controlsCountExcluded=\"%d\"", mrs.controlsCountExcluded) + ","
r += fmt.Sprintf("controlsCountPassed=\"%d\"", mrs.controlsCountPassed) + ","
r += fmt.Sprintf("controlsCountSkipped=\"%d\"", mrs.controlsCountSkipped)
return r
}
func (mrs *mRiskScore) value() int {
return mrs.riskScore
}
func (mcrs *mControlRiskScore) string() string {
r := fmt.Sprintf("controlName=\"%s\"", mcrs.controlName) + ","
r += fmt.Sprintf("controlID=\"%s\"", mcrs.controlID) + ","
r += fmt.Sprintf("severity=\"%s\"", mcrs.severity) + ","
r += fmt.Sprintf("resourcesCountFailed=\"%d\"", mcrs.resourcesCountFailed) + ","
r += fmt.Sprintf("resourcesCountExcluded=\"%d\"", mcrs.resourcesCountExcluded) + ","
r += fmt.Sprintf("resourcesCountPassed=\"%d\"", mcrs.resourcesCountPassed) + ","
r += fmt.Sprintf("link=\"%s\"", mcrs.link) + ","
r += fmt.Sprintf("remediation=\"%s\"", mcrs.remediation)
return r
}
func (mcrs *mControlRiskScore) value() int {
return mcrs.riskScore
}
func (mfrs *mFrameworkRiskScore) string() string {
r := fmt.Sprintf("frameworkName=\"%s\"", mfrs.frameworkName) + ","
r += fmt.Sprintf("resourcesCountFailed=\"%d\"", mfrs.resourcesCountFailed) + ","
r += fmt.Sprintf("resourcesCountExcluded=\"%d\"", mfrs.resourcesCountExcluded) + ","
r += fmt.Sprintf("resourcesCountPassed=\"%d\"", mfrs.resourcesCountPassed) + ","
r += fmt.Sprintf("controlsCountFailed=\"%d\"", mfrs.controlsCountFailed) + ","
r += fmt.Sprintf("controlsCountExcluded=\"%d\"", mfrs.controlsCountExcluded) + ","
r += fmt.Sprintf("controlsCountPassed=\"%d\"", mfrs.controlsCountPassed) + ","
r += fmt.Sprintf("controlsCountSkipped=\"%d\"", mfrs.controlsCountSkipped)
return r
}
func (mfrs *mFrameworkRiskScore) value() int {
return mfrs.riskScore
}
func (mrc *mResourceControls) string() string {
r := fmt.Sprintf("apiVersion=\"%s\"", mrc.apiVersion) + ","
r += fmt.Sprintf("kind=\"%s\"", mrc.kind) + ","
r += fmt.Sprintf("namespace=\"%s\"", mrc.namespace) + ","
r += fmt.Sprintf("name=\"%s\"", mrc.name)
return r
}
func (mrc *mResourceControls) value() int {
return mrc.controls
}
func toRowInMetrics(name metricsName, row string, value int) string {
return fmt.Sprintf("%s{%s} %d\n", name, row, value)
}
func (m *Metrics) String() string {
r := toRowInMetrics(metricsScore, m.rs.string(), m.rs.value())
for i := range m.listFrameworks {
r += toRowInMetrics(metricsFrameworkScore, m.listFrameworks[i].string(), m.listFrameworks[i].value())
}
for i := range m.listControls {
r += toRowInMetrics(metricsControlScore, m.listControls[i].string(), m.listControls[i].value())
}
for i := range m.listResourcesControlsFiled {
r += toRowInMetrics(metricsresourceFailed, m.listResourcesControlsFiled[i].string(), m.listResourcesControlsFiled[i].value())
}
for i := range m.listResourcesControlsExcluded {
r += toRowInMetrics(metricsresourceExcluded, m.listResourcesControlsExcluded[i].string(), m.listResourcesControlsExcluded[i].value())
}
for i := range m.listResourcesControlsPassed {
r += toRowInMetrics(metricsresourcePassed, m.listResourcesControlsPassed[i].string(), m.listResourcesControlsPassed[i].value())
}
return r
}
type mRiskScore struct {
resourcesCountPassed int
resourcesCountFailed int
resourcesCountExcluded int
controlsCountPassed int
controlsCountFailed int
controlsCountExcluded int
controlsCountSkipped int
riskScore int // metric
}
type mControlRiskScore struct {
controlName string
controlID string
link string
severity string
remediation string
resourcesCountPassed int
resourcesCountFailed int
resourcesCountExcluded int
riskScore int // metric
}
type mFrameworkRiskScore struct {
frameworkName string
resourcesCountPassed int
resourcesCountFailed int
resourcesCountExcluded int
controlsCountPassed int
controlsCountFailed int
controlsCountExcluded int
controlsCountSkipped int
riskScore int // metric
}
type mResourceControls struct {
name string
namespace string
apiVersion string
kind string
controls int // metric
}
type Metrics struct {
rs mRiskScore
listFrameworks []mFrameworkRiskScore
listControls []mControlRiskScore
listResourcesControlsFiled []mResourceControls
listResourcesControlsPassed []mResourceControls
listResourcesControlsExcluded []mResourceControls
}
func (mrs *mRiskScore) set(resources reportsummary.ICounters, controls reportsummary.ICounters) {
mrs.resourcesCountExcluded = resources.Excluded()
mrs.resourcesCountFailed = resources.Failed()
mrs.resourcesCountPassed = resources.Passed()
mrs.controlsCountExcluded = controls.Excluded()
mrs.controlsCountFailed = controls.Failed()
mrs.controlsCountPassed = controls.Passed()
mrs.controlsCountSkipped = controls.Skipped()
}
func (mfrs *mFrameworkRiskScore) set(resources reportsummary.ICounters, controls reportsummary.ICounters) {
mfrs.resourcesCountExcluded = resources.Excluded()
mfrs.resourcesCountFailed = resources.Failed()
mfrs.resourcesCountPassed = resources.Passed()
mfrs.controlsCountExcluded = controls.Excluded()
mfrs.controlsCountFailed = controls.Failed()
mfrs.controlsCountPassed = controls.Passed()
mfrs.controlsCountSkipped = controls.Skipped()
}
func (mcrs *mControlRiskScore) set(resources reportsummary.ICounters) {
mcrs.resourcesCountExcluded = resources.Excluded()
mcrs.resourcesCountFailed = resources.Failed()
mcrs.resourcesCountPassed = resources.Passed()
}
func (m *Metrics) setRiskScores(summaryDetails *reportsummary.SummaryDetails) {
m.rs.set(summaryDetails.NumberOfResources(), summaryDetails.NumberOfControls())
m.rs.riskScore = int(summaryDetails.GetScore())
for _, fw := range summaryDetails.ListFrameworks() {
mfrs := mFrameworkRiskScore{
frameworkName: fw.GetName(),
riskScore: int(fw.GetScore()),
}
mfrs.set(fw.NumberOfResources(), fw.NumberOfControls())
m.listFrameworks = append(m.listFrameworks, mfrs)
}
for _, control := range summaryDetails.ListControls() {
mcrs := mControlRiskScore{
controlName: control.GetName(),
controlID: control.GetID(),
riskScore: int(control.GetScore()),
link: getControlLink(control.GetID()),
severity: apis.ControlSeverityToString(control.GetScoreFactor()),
remediation: control.GetRemediation(),
}
mcrs.set(control.NumberOfResources())
m.listControls = append(m.listControls, mcrs)
}
}
// return -> (passed, exceluded, failed)
func resourceControlStatusCounters(result *resourcesresults.Result) (int, int, int) {
failed := 0
excluded := 0
passed := 0
for i := range result.ListControls() {
switch result.ListControls()[i].GetStatus(nil).Status() {
case apis.StatusExcluded:
excluded++
case apis.StatusFailed:
failed++
case apis.StatusPassed:
passed++
}
}
return passed, excluded, failed
}
func (m *Metrics) setResourcesCounters(
resources map[string]workloadinterface.IMetadata,
results map[string]resourcesresults.Result) {
for resourceID, result := range results {
r, ok := resources[resourceID]
if !ok {
continue
}
passed, excluded, failed := resourceControlStatusCounters(&result)
mrc := mResourceControls{}
mrc.apiVersion = r.GetApiVersion()
mrc.namespace = r.GetNamespace()
mrc.kind = r.GetKind()
mrc.name = r.GetName()
// append
if passed > 0 {
mrc.controls = passed
m.listResourcesControlsPassed = append(m.listResourcesControlsPassed, mrc)
}
if failed > 0 {
mrc.controls = failed
m.listResourcesControlsFiled = append(m.listResourcesControlsFiled, mrc)
}
if excluded > 0 {
mrc.controls = excluded
m.listResourcesControlsExcluded = append(m.listResourcesControlsExcluded, mrc)
}
}
}

View File

@@ -6,6 +6,7 @@ import (
"github.com/armosec/kubescape/core/cautils/logger"
"github.com/armosec/kubescape/core/cautils/logger/helpers"
"github.com/armosec/opa-utils/reporthandling"
"github.com/armosec/opa-utils/reporthandling/results/v1/reportsummary"
"github.com/armosec/opa-utils/reporthandling/results/v1/resourcesresults"
reporthandlingv2 "github.com/armosec/opa-utils/reporthandling/v2"
)
@@ -38,6 +39,20 @@ func finalizeResults(results []resourcesresults.Result, resourcesResult map[stri
}
}
func mapInfoToPrintInfo(controls reportsummary.ControlSummaries) map[string]string {
infoToPrintInfoMap := make(map[string]string)
starCount := "*"
for _, control := range controls {
if control.GetStatus().IsSkipped() && control.GetStatus().Info() != "" {
if _, ok := infoToPrintInfoMap[control.GetStatus().Info()]; !ok {
infoToPrintInfoMap[control.GetStatus().Info()] = starCount
starCount += starCount
}
}
}
return infoToPrintInfoMap
}
func finalizeResources(resources []reporthandling.Resource, results []resourcesresults.Result, allResources map[string]workloadinterface.IMetadata) {
for i := range results {
if obj, ok := allResources[results[i].ResourceID]; ok {

View File

@@ -60,6 +60,7 @@ func (report *ReportEventReceiver) ActionSendReport(opaSessionObj *cautils.OPASe
opaSessionObj.Report.ReportID = uuid.NewString()
opaSessionObj.Report.CustomerGUID = report.customerGUID
opaSessionObj.Report.ClusterName = report.clusterName
opaSessionObj.Report.Metadata = *opaSessionObj.Metadata
if err := report.prepareReport(opaSessionObj.Report); err != nil {
logger.L().Error("failed to publish results", helpers.Error(err))

View File

@@ -44,6 +44,7 @@ func setSubReport(postureReport *reporthandlingv2.PostureReport) *reporthandling
ClusterCloudProvider: postureReport.ClusterCloudProvider,
JobID: postureReport.JobID,
ClusterAPIServerInfo: postureReport.ClusterAPIServerInfo,
Metadata: postureReport.Metadata,
}
}
func iMetaToResource(obj workloadinterface.IMetadata) *reporthandling.Resource {

View File

@@ -84,7 +84,7 @@ func NewPrinter(printFormat, formatVersion string, verboseMode bool) printer.IPr
case printer.JunitResultFormat:
return printerv2.NewJunitPrinter(verboseMode)
case printer.PrometheusFormat:
return printerv1.NewPrometheusPrinter(verboseMode)
return printerv2.NewPrometheusPrinter(verboseMode)
case printer.PdfFormat:
return printerv2.NewPdfPrinter()
default:

BIN
docs/ARMO-header-2022.gif Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 14 MiB

View File

@@ -5,14 +5,14 @@
Running `kubescape` will start up a webserver on port `8080` which will serve the following paths:
* POST `/v1/scan` - Trigger a kubescape scan. The server will return an ID and will execute the scanning asynchronously
* * `synchronously`: scan synchronously (return results and not ID). Use only in small clusters are with an increased timeout
* * `wait`: scan synchronously (return results and not ID). Use only in small clusters are with an increased timeout
* GET `/v1/results` - Request kubescape scan results
* * query `id=<string>` -> ID returned when triggering the scan action. If empty will return latest results
* * query `id=<string>` -> ID returned when triggering the scan action. ~If empty will return latest results~ (not supported)
* * query `remove` -> Remove results from storage after reading the results
* DELETE `/v1/results` - Delete kubescape scan results from storage If empty will delete latest results
* DELETE `/v1/results` - Delete kubescape scan results from storage. ~If empty will delete latest results~ (not supported)
* * query `id=<string>`: Delete ID of specific results
* * query `all`: Delete all cached results
* GET/POST `/v1/metrics` - will trigger cluster scan. will respond with prometheus metrics once they have been scanned. This will respond 503 if the scan failed.
* GET/POST `/metrics` - will trigger cluster scan. will respond with prometheus metrics once they have been scanned. This will respond 503 if the scan failed.
* `/livez` - will respond 200 is server is alive
* `/readyz` - will respond 200 if server can receive requests
@@ -20,15 +20,16 @@ Running `kubescape` will start up a webserver on port `8080` which will serve th
POST /v1/results
body:
```json
```
{
"format": "", // results format [default: json] (same as 'kubescape scan --format')
"excludedNamespaces": null, // list of namespaces to exclude (same as 'kubescape scan --excluded-namespaces')
"includeNamespaces": null, // list of namespaces to include (same as 'kubescape scan --include-namespaces')
"submit": false, // submit results to Kubescape cloud (same as 'kubescape scan --submit')
"hostScanner": false, // deploy kubescape K8s host-scanner DaemonSet in the scanned cluster (same as 'kubescape scan --enable-host-scan')
"keepLocal": false, // do not submit results to Kubescape cloud (same as 'kubescape scan --keep-local')
"account": "" // account ID (same as 'kubescape scan --account')
"format": <str>, // results format [default: json] (same as 'kubescape scan --format')
"excludedNamespaces": <[]str>, // list of namespaces to exclude (same as 'kubescape scan --excluded-namespaces')
"includeNamespaces": <[]str>, // list of namespaces to include (same as 'kubescape scan --include-namespaces')
"useCachedArtifacts"`: <bool>, // use the cached artifacts instead of downloading (offline support)
"submit": <bool>, // submit results to Kubescape cloud (same as 'kubescape scan --submit')
"hostScanner": <bool>, // deploy kubescape K8s host-scanner DaemonSet in the scanned cluster (same as 'kubescape scan --enable-host-scan')
"keepLocal": <bool>, // do not submit results to Kubescape cloud (same as 'kubescape scan --keep-local')
"account": <str> // account ID (same as 'kubescape scan --account')
}
```
@@ -37,11 +38,10 @@ e.g.:
```bash
curl --header "Content-Type: application/json" \
--request POST \
--data '{"account":"XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX","hostScanner":true, "submit":true}' \
--data '{"hostScanner":true, "submit":true}' \
http://127.0.0.1:8080/v1/scan
```
## Installation into kubernetes
## Examples
The [yaml](ks-prometheus-support.yaml) file will deploy one instance of kubescape (with all relevant dependencies) to run on your cluster
**NOTE** Make sure the configurations suit your cluster (e.g. `serviceType`, namespace, etc.)
* [Prometheus](examples/prometheus/README.md)
* [Microservice](examples/microservice/README.md)

92
httphandler/build.py Normal file
View File

@@ -0,0 +1,92 @@
import os
import sys
import hashlib
import platform
import subprocess
BASE_GETTER_CONST = "github.com/armosec/kubescape/core/cautils/getter"
BE_SERVER_CONST = BASE_GETTER_CONST + ".ArmoBEURL"
ER_SERVER_CONST = BASE_GETTER_CONST + ".ArmoERURL"
WEBSITE_CONST = BASE_GETTER_CONST + ".ArmoFEURL"
AUTH_SERVER_CONST = BASE_GETTER_CONST + ".armoAUTHURL"
def checkStatus(status, msg):
if status != 0:
sys.stderr.write(msg)
exit(status)
def getBuildDir():
currentPlatform = platform.system()
buildDir = "build/"
if currentPlatform == "Windows": return os.path.join(buildDir, "windows-latest")
if currentPlatform == "Linux": return os.path.join(buildDir, "ubuntu-latest")
if currentPlatform == "Darwin": return os.path.join(buildDir, "macos-latest")
raise OSError("Platform %s is not supported!" % (currentPlatform))
def getPackageName():
packageName = "kubescape"
# if platform.system() == "Windows": packageName += ".exe"
return packageName
def main():
print("Building Kubescape")
# print environment variables
# print(os.environ)
# Set some variables
packageName = getPackageName()
buildUrl = "github.com/armosec/kubescape/core/cautils.BuildNumber"
releaseVersion = os.getenv("RELEASE")
ArmoBEServer = os.getenv("ArmoBEServer")
ArmoERServer = os.getenv("ArmoERServer")
ArmoWebsite = os.getenv("ArmoWebsite")
ArmoAuthServer = os.getenv("ArmoAuthServer")
# Create build directory
buildDir = getBuildDir()
ks_file = os.path.join(buildDir, packageName)
hash_file = ks_file + ".sha256"
if not os.path.isdir(buildDir):
os.makedirs(buildDir)
# Build kubescape
ldflags = "-w -s"
if releaseVersion:
ldflags += " -X {}={}".format(buildUrl, releaseVersion)
if ArmoBEServer:
ldflags += " -X {}={}".format(BE_SERVER_CONST, ArmoBEServer)
if ArmoERServer:
ldflags += " -X {}={}".format(ER_SERVER_CONST, ArmoERServer)
if ArmoWebsite:
ldflags += " -X {}={}".format(WEBSITE_CONST, ArmoWebsite)
if ArmoAuthServer:
ldflags += " -X {}={}".format(AUTH_SERVER_CONST, ArmoAuthServer)
build_command = ["go", "build", "-o", ks_file, "-ldflags" ,ldflags]
print("Building kubescape and saving here: {}".format(ks_file))
print("Build command: {}".format(" ".join(build_command)))
status = subprocess.call(build_command)
checkStatus(status, "Failed to build kubescape")
sha256 = hashlib.sha256()
with open(ks_file, "rb") as kube:
sha256.update(kube.read())
with open(hash_file, "w") as kube_sha:
hash = sha256.hexdigest()
print("kubescape hash: {}, file: {}".format(hash, hash_file))
kube_sha.write(sha256.hexdigest())
print("Build Done")
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,20 @@
# Kubescape as a microservice
1. Deploy kubescape microservice
```bash
kubectl apply -f ks-deployment.yaml
```
> **NOTE** Make sure the configurations suit your cluster (e.g. `serviceType`, namespace, etc.)
2. Trigger scan
```bash
curl --header "Content-Type: application/json" \
--request POST \
--data '{"account":"XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX","hostScanner":true, "submit":true}' \
http://127.0.0.1:8080/v1/scan
```
3. Get results
```bash
curl --request GET http://127.0.0.1:8080/v1/results -o results.json
```

View File

@@ -51,6 +51,7 @@ spec:
type: NodePort
ports:
- port: 8080
name: http
targetPort: 8080
protocol: TCP
selector:
@@ -76,28 +77,31 @@ spec:
serviceAccountName: kubescape-discovery
containers:
- name: kubescape
# livenessProbe:
# httpGet:
# path: /livez
# port: 8080
# initialDelaySeconds: 3
# periodSeconds: 3
# readinessProbe:
# httpGet:
# path: /readyz
# port: 8080
# initialDelaySeconds: 3
# periodSeconds: 3
image: quay.io/armosec/kubescape:prometheus.v1
livenessProbe:
httpGet:
path: /livez
port: 8080
initialDelaySeconds: 3
periodSeconds: 3
readinessProbe:
httpGet:
path: /readyz
port: 8080
initialDelaySeconds: 3
periodSeconds: 3
image: quay.io/armosec/kubescape:prometheus.v2
env:
- name: KS_RUN_PROMETHEUS_SERVER
value: "true"
- name: KS_DEFAULT_CONFIGMAP_NAMESPACE
value: "ks-scanner"
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.namespace
ports:
- containerPort: 8080
name: http
protocol: TCP
command:
- kubescape
- ksserver
resources:
requests:
cpu: 10m

View File

@@ -0,0 +1,20 @@
# Prometheus Kubescape Integration
1. Deploy kubescape
```bash
kubectl apply -f ks-deployment.yaml
```
> **NOTE** Make sure the configurations suit your cluster (e.g. `serviceType`, etc.)
2. Deploy kube-prometheus-stack
```bash
helm repo add prometheus-community https://prometheus-community.github.io/helm-charts
helm repo update
kubectl create namespace prometheus
helm install -n prometheus kube-prometheus-stack prometheus-community/kube-prometheus-stack --set prometheus.prometheusSpec.podMonitorSelectorNilUsesHelmValues=false,prometheus.prometheusSpec.serviceMonitorSelectorNilUsesHelmValues=false
```
3. Deploy pod monitor
```bash
kubectl apply -f podmonitor.yaml
```

View File

@@ -0,0 +1,117 @@
---
apiVersion: v1
kind: Namespace
metadata:
labels:
app: kubescape
name: ks-scanner
---
# ------------------- Kubescape Service Account ------------------- #
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
app: kubescape
name: kubescape-discovery
namespace: ks-scanner
---
# ------------------- Kubescape Cluster Role & Cluster Role Binding ------------------- #
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: kubescape-discovery-clusterroles
# "namespace" omitted since ClusterRoles are not namespaced
rules:
- apiGroups: ["*"]
resources: ["*"]
verbs: ["get", "list", "describe"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: kubescape-discovery-role-binding
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: kubescape-discovery-clusterroles
subjects:
- kind: ServiceAccount
name: kubescape-discovery
namespace: ks-scanner
---
apiVersion: v1
kind: Service
metadata:
name: kubescape-service
namespace: ks-scanner
labels:
app: kubescape-service
spec:
type: NodePort
ports:
- port: 8080
name: http
targetPort: 8080
protocol: TCP
selector:
app: kubescape
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: kubescape
namespace: ks-scanner
labels:
app: kubescape
spec:
replicas: 1
selector:
matchLabels:
app: kubescape
template:
metadata:
labels:
app: kubescape
spec:
serviceAccountName: kubescape-discovery
containers:
- name: kubescape
livenessProbe:
httpGet:
path: /livez
port: 8080
initialDelaySeconds: 3
periodSeconds: 3
readinessProbe:
httpGet:
path: /readyz
port: 8080
initialDelaySeconds: 3
periodSeconds: 3
image: quay.io/armosec/kubescape:prometheus.v2
env:
- name: KS_DEFAULT_CONFIGMAP_NAMESPACE
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.namespace
- name: "KS_SKIP_UPDATE_CHECK" # do not check latest version
value: "true"
- name: KS_ENABLE_HOST_SCANNER # enable host scanner -> https://hub.armo.cloud/docs/host-sensor
value: "false" # TODO - add permissions to rbac
- name: KS_DOWNLOAD_ARTIFACTS # When set to true the artifacts will be downloaded every scan execution
value: "false"
ports:
- containerPort: 8080
name: http
protocol: TCP
command:
- ksserver
resources:
requests:
cpu: 10m
memory: 100Mi
limits:
cpu: 500m
memory: 500Mi

View File

@@ -0,0 +1,16 @@
apiVersion: monitoring.coreos.com/v1
kind: PodMonitor
metadata:
name: kubescape
namespace: ks-scanner
labels:
app: kubescape
spec:
selector:
matchLabels:
app: kubescape
podMetricsEndpoints:
- port: http
# path: v1
interval: 120s
scrapeTimeout: 100s

View File

@@ -6,8 +6,10 @@ replace github.com/armosec/kubescape/core => ../core
require (
github.com/armosec/kubescape/core v0.0.0-00010101000000-000000000000
github.com/armosec/utils-go v0.0.3
github.com/google/uuid v1.3.0
github.com/gorilla/mux v1.8.0
github.com/stretchr/testify v1.7.0
)
require (
@@ -22,9 +24,8 @@ require (
github.com/OneOfOne/xxhash v1.2.8 // indirect
github.com/armosec/armoapi-go v0.0.58 // indirect
github.com/armosec/k8s-interface v0.0.68 // indirect
github.com/armosec/opa-utils v0.0.116 // indirect
github.com/armosec/opa-utils v0.0.120 // indirect
github.com/armosec/rbac-utils v0.0.14 // indirect
github.com/armosec/utils-go v0.0.3 // indirect
github.com/armosec/utils-k8s-go v0.0.3 // indirect
github.com/aws/aws-sdk-go v1.41.11 // indirect
github.com/aws/aws-sdk-go-v2 v1.12.0 // indirect
@@ -83,6 +84,7 @@ require (
github.com/opencontainers/go-digest v1.0.0 // indirect
github.com/opencontainers/image-spec v1.0.2 // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/pquerna/cachecontrol v0.1.0 // indirect
github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0 // indirect
github.com/ruudk/golang-pdf417 v0.0.0-20181029194003-1af4ab5afa58 // indirect

View File

@@ -109,8 +109,8 @@ github.com/armosec/k8s-interface v0.0.66/go.mod h1:vwprS8qn/iowd5yf0JHpqDsLA5I8W
github.com/armosec/k8s-interface v0.0.68 h1:6CtSakISiI47YHkxh+Va9FzZQIBkWa6g9sbiNxq1Zkk=
github.com/armosec/k8s-interface v0.0.68/go.mod h1:PeWn41C2uenZi+xfZdyFF/zG5wXACA00htQyknDUWDE=
github.com/armosec/opa-utils v0.0.64/go.mod h1:6tQP8UDq2EvEfSqh8vrUdr/9QVSCG4sJfju1SXQOn4c=
github.com/armosec/opa-utils v0.0.116 h1:3oWuhcpI+MJD/CktEStU1BA0feGNwsCbQrI3ifVfzMs=
github.com/armosec/opa-utils v0.0.116/go.mod h1:gap+EaLG5rnyqvIRGxtdNDC9y7VvoGNm90zK8Ls7avQ=
github.com/armosec/opa-utils v0.0.120 h1:WAtgm2U1o9fgA/2pjYNy+igqNC6ju3/CxQ8qRHdO+5k=
github.com/armosec/opa-utils v0.0.120/go.mod h1:gap+EaLG5rnyqvIRGxtdNDC9y7VvoGNm90zK8Ls7avQ=
github.com/armosec/rbac-utils v0.0.1/go.mod h1:pQ8CBiij8kSKV7aeZm9FMvtZN28VgA7LZcYyTWimq40=
github.com/armosec/rbac-utils v0.0.14 h1:CKYKcgqJEXWF2Hen/B1pVGtS3nDAG1wp9dDv6oNtq90=
github.com/armosec/rbac-utils v0.0.14/go.mod h1:Ex/IdGWhGv9HZq6Hs8N/ApzCKSIvpNe/ETqDfnuyah0=

View File

@@ -1,17 +1,23 @@
package v1
import (
"github.com/armosec/kubescape/core/cautils"
"github.com/armosec/opa-utils/reporthandling"
)
type PostScanRequest struct {
Format string `json:"format"` // Format results (table, json, junit ...) - default json
ExcludedNamespaces []string `json:"excludedNamespaces"` // used for host scanner namespace
IncludeNamespaces []string `json:"includeNamespaces"` // DEPRECATED?
FailThreshold float32 `json:"failThreshold"` // Failure score threshold
Submit bool `json:"submit"` // Submit results to Armo BE - default will
HostScanner bool `json:"hostScanner"` // Deploy ARMO K8s host scanner to collect data from certain controls
KeepLocal bool `json:"keepLocal"` // Do not submit results
Account string `json:"account"` // account ID
Logger string `json:"-"` // logger level - debug/info/error - default is debug
TargetType string `json:"-"` // framework/control - default is framework
TargetNames []string `json:"-"` // default is all
Format string `json:"format"` // Format results (table, json, junit ...) - default json
Account string `json:"account"` // account ID
Logger string `json:"-"` // logger level - debug/info/error - default is debug
FailThreshold float32 `json:"failThreshold"` // Failure score threshold
ExcludedNamespaces []string `json:"excludedNamespaces"` // used for host scanner namespace
IncludeNamespaces []string `json:"includeNamespaces"` // DEPRECATED?
TargetNames []string `json:"targetNames"` // default is all
TargetType *reporthandling.NotificationPolicyKind `json:"targetType"` // framework/control - default is framework
Submit cautils.BoolPtrFlag `json:"submit"` // Submit results to Armo BE - default will
HostScanner cautils.BoolPtrFlag `json:"hostScanner"` // Deploy ARMO K8s host scanner to collect data from certain controls
KeepLocal cautils.BoolPtrFlag `json:"keepLocal"` // Do not submit results
UseCachedArtifacts cautils.BoolPtrFlag `json:"useCachedArtifacts"` // Use the cached artifacts instead of downloading
// UseExceptions string // Load file with exceptions configuration
// ControlsInputs string // Load file with inputs for controls
// VerboseMode bool // Display all of the input resources and not only failed resources

View File

@@ -4,27 +4,48 @@ import (
"strings"
"github.com/armosec/kubescape/core/cautils"
"github.com/armosec/kubescape/core/cautils/getter"
"github.com/armosec/opa-utils/reporthandling"
)
func (scanRequest *PostScanRequest) ToScanInfo() *cautils.ScanInfo {
scanInfo := cautils.ScanInfo{}
scanInfo.Account = scanRequest.Account
scanInfo.ExcludedNamespaces = strings.Join(scanRequest.ExcludedNamespaces, ",")
scanInfo.IncludeNamespaces = strings.Join(scanRequest.IncludeNamespaces, ",")
scanInfo.FailThreshold = scanRequest.FailThreshold // TODO - handle default
scanInfo := defaultScanInfo()
scanInfo.Format = scanRequest.Format // TODO - handle default
scanInfo.Local = scanRequest.KeepLocal
scanInfo.Submit = scanRequest.Submit
scanInfo.HostSensorEnabled.SetBool(scanRequest.HostScanner)
return &scanInfo
}
/*
err := clihandler.ScanCliSetup(&scanInfo)
if err != nil {
logger.L().Fatal(err.Error())
if scanRequest.TargetType != nil && len(scanRequest.TargetNames) > 0 {
if *scanRequest.TargetType == reporthandling.KindFramework {
scanInfo.FrameworkScan = true
}
*/
scanInfo.SetPolicyIdentifiers(scanRequest.TargetNames, *scanRequest.TargetType)
scanInfo.ScanAll = false
} else {
scanInfo.ScanAll = true
}
if scanRequest.Account != "" {
scanInfo.Account = scanRequest.Account
}
if len(scanRequest.ExcludedNamespaces) > 0 {
scanInfo.ExcludedNamespaces = strings.Join(scanRequest.ExcludedNamespaces, ",")
}
if len(scanRequest.IncludeNamespaces) > 0 {
scanInfo.IncludeNamespaces = strings.Join(scanRequest.IncludeNamespaces, ",")
}
if scanRequest.Format == "" {
scanInfo.Format = scanRequest.Format // TODO - handle default
}
if scanRequest.UseCachedArtifacts.Get() != nil && !*scanRequest.UseCachedArtifacts.Get() {
scanInfo.UseArtifactsFrom = getter.DefaultLocalStore // Load files from cache (this will prevent kubescape fom downloading the artifacts every time)
}
if scanRequest.KeepLocal.Get() != nil {
scanInfo.Local = *scanRequest.KeepLocal.Get() // Load files from cache (this will prevent kubescape fom downloading the artifacts every time)
}
if scanRequest.Submit.Get() != nil {
scanInfo.Submit = *scanRequest.Submit.Get()
}
scanInfo.HostSensorEnabled = scanRequest.HostScanner
return scanInfo
}

View File

@@ -15,45 +15,52 @@ import (
// Metrics http listener for prometheus support
func (handler *HTTPHandler) Metrics(w http.ResponseWriter, r *http.Request) {
if handler.state.isBusy() { // if already scanning the cluster
w.Write([]byte(fmt.Sprintf("scan '%s' in action", handler.state.getID())))
message := fmt.Sprintf("scan '%s' in action", handler.state.getID())
logger.L().Info("server is busy", helpers.String("message", message), helpers.Time())
w.WriteHeader(http.StatusServiceUnavailable)
w.Write([]byte(message))
return
}
handler.state.setBusy()
defer handler.state.setNotBusy()
handler.state.setID(uuid.NewString())
scanID := uuid.NewString()
handler.state.setID(scanID)
// trigger scanning
logger.L().Info(handler.state.getID(), helpers.String("action", "triggering scan"), helpers.Time())
ks := core.NewKubescape()
results, err := ks.Scan(getPrometheusDefaultScanCommand(handler.state.getID()))
results, err := ks.Scan(getPrometheusDefaultScanCommand(scanID))
if err != nil {
w.WriteHeader(http.StatusInternalServerError)
w.Write([]byte(fmt.Sprintf("failed to complete scan. reason: %s", err.Error())))
return
}
results.HandleResults()
logger.L().Info(handler.state.getID(), helpers.String("action", "done scanning"), helpers.Time())
f, err := os.ReadFile(scanID)
// res, err := results.ToJson()
if err != nil {
w.Write([]byte(fmt.Sprintf("failed to complete scan. reason: %s", err.Error())))
w.WriteHeader(http.StatusInternalServerError)
return
}
res, err := results.ToJson()
if err != nil {
w.Write([]byte(fmt.Sprintf("failed to convert scan scan results to json. reason: %s", err.Error())))
w.WriteHeader(http.StatusInternalServerError)
w.Write([]byte(fmt.Sprintf("failed read results from file. reason: %s", err.Error())))
return
}
os.Remove(scanID)
w.Write(res)
w.WriteHeader(http.StatusOK)
w.Write(f)
}
func getPrometheusDefaultScanCommand(scanID string) *cautils.ScanInfo {
scanInfo := cautils.ScanInfo{}
scanInfo := defaultScanInfo()
scanInfo.FrameworkScan = true
scanInfo.ScanAll = true // scan all frameworks
scanInfo.ReportID = scanID // scan ID
scanInfo.HostSensorEnabled.Set(os.Getenv("KS_ENABLE_HOST_SENSOR")) // enable host scanner
scanInfo.FailThreshold = 100 // Do not fail scanning
// scanInfo.Format = "prometheus" // results format
return &scanInfo
scanInfo.ScanAll = true // scan all frameworks
scanInfo.ReportID = scanID // scan ID
scanInfo.FailThreshold = 100 // Do not fail scanning
scanInfo.Output = scanID // results output
scanInfo.Format = envToString("KS_FORMAT", "prometheus") // default output should be json
scanInfo.HostSensorEnabled.SetBool(envToBool("KS_ENABLE_HOST_SCANNER", false)) // enable host scanner
return scanInfo
}

View File

@@ -0,0 +1,19 @@
package v1
import (
"testing"
"github.com/armosec/kubescape/core/cautils/getter"
"github.com/stretchr/testify/assert"
)
func TestGetPrometheusDefaultScanCommand(t *testing.T) {
scanID := "1234"
scanInfo := getPrometheusDefaultScanCommand(scanID)
assert.Equal(t, scanID, scanInfo.ReportID)
assert.Equal(t, scanID, scanInfo.Output)
assert.Equal(t, "prometheus", scanInfo.Format)
// assert.False(t, *scanInfo.HostSensorEnabled.Get())
assert.Equal(t, getter.DefaultLocalStore, scanInfo.UseArtifactsFrom)
}

View File

@@ -110,6 +110,7 @@ func (handler *HTTPHandler) Scan(w http.ResponseWriter, r *http.Request) {
}()
wg.Wait()
w.WriteHeader(http.StatusOK)
w.Write(response)
}
func (handler *HTTPHandler) Results(w http.ResponseWriter, r *http.Request) {
@@ -128,30 +129,33 @@ func (handler *HTTPHandler) Results(w http.ResponseWriter, r *http.Request) {
if scanID = r.URL.Query().Get("scanID"); scanID == "" {
scanID = handler.state.getLatestID()
}
logger.L().Info("requesting results", helpers.String("ID", scanID))
if handler.state.isBusy() { // if requested ID is still scanning
if scanID == handler.state.getID() {
logger.L().Info("scan in process", helpers.String("ID", scanID))
w.Write([]byte(handler.state.getID()))
w.WriteHeader(http.StatusOK) // Should we return ok?
w.Write([]byte(handler.state.getID()))
return
}
}
switch r.Method {
case http.MethodGet:
logger.L().Info("requesting results", helpers.String("ID", scanID))
if r.URL.Query().Has("remove") {
defer removeResultsFile(scanID)
}
if res, err := readResultsFile(scanID); err != nil {
w.Write([]byte(err.Error()))
w.WriteHeader(http.StatusNoContent)
w.Write([]byte(err.Error()))
} else {
w.Write(res)
w.WriteHeader(http.StatusOK)
w.Write(res)
}
case http.MethodDelete:
logger.L().Info("deleting results", helpers.String("ID", scanID))
if r.URL.Query().Has("all") {
removeResultDirs()
} else {

View File

@@ -5,12 +5,16 @@ import (
"os"
"path/filepath"
pkgcautils "github.com/armosec/utils-go/utils"
"github.com/armosec/kubescape/core/cautils"
"github.com/armosec/kubescape/core/cautils/getter"
"github.com/armosec/kubescape/core/core"
)
func scan(scanRequest *PostScanRequest, scanID string) ([]byte, error) {
scanInfo := getScanCommand(scanRequest, scanID)
ks := core.NewKubescape()
result, err := ks.Scan(scanInfo)
if err != nil {
@@ -34,7 +38,7 @@ func readResultsFile(fileID string) ([]byte, error) {
if fileName := searchFile(fileID); fileName != "" {
return os.ReadFile(fileName)
}
return nil, fmt.Errorf("file not found")
return nil, fmt.Errorf("file %s not found", fileID)
}
func removeResultDirs() {
@@ -59,7 +63,7 @@ func searchFile(fileID string) string {
func findFile(targetDir string, fileName string) (string, error) {
matches, err := filepath.Glob(targetDir + fileName)
matches, err := filepath.Glob(filepath.Join(targetDir, fileName))
if err != nil {
return "", err
}
@@ -75,12 +79,6 @@ func getScanCommand(scanRequest *PostScanRequest, scanID string) *cautils.ScanIn
scanInfo := scanRequest.ToScanInfo()
scanInfo.ReportID = scanID
// *** start ***
// TODO - support frameworks/controls and support scanning single frameworks/controls
scanInfo.FrameworkScan = true
scanInfo.ScanAll = true
// *** end ***
// *** start ***
// Set default format
if scanInfo.Format == "" {
@@ -94,7 +92,36 @@ func getScanCommand(scanRequest *PostScanRequest, scanID string) *cautils.ScanIn
scanInfo.Output = filepath.Join(OutputDir, scanID)
// *** end ***
scanInfo.Init()
return scanInfo
}
func defaultScanInfo() *cautils.ScanInfo {
scanInfo := &cautils.ScanInfo{}
scanInfo.FailThreshold = 100
scanInfo.Account = envToString("KS_ACCOUNT", "") // publish results to Kubescape SaaS
scanInfo.ExcludedNamespaces = envToString("KS_EXCLUDE_NAMESPACES", "") // namespace to exclude
scanInfo.IncludeNamespaces = envToString("KS_INCLUDE_NAMESPACES", "") // namespace to include
scanInfo.FormatVersion = envToString("KS_FORMAT_VERSION", "v2") // output format version
scanInfo.Format = envToString("KS_FORMAT", "json") // default output should be json
scanInfo.Submit = envToBool("KS_SUBMIT", false) // publish results to Kubescape SaaS
scanInfo.HostSensorEnabled.SetBool(envToBool("KS_ENABLE_HOST_SCANNER", true)) // enable host scanner
scanInfo.Local = envToBool("KS_KEEP_LOCAL", false) // do not publish results to Kubescape SaaS
if !envToBool("KS_DOWNLOAD_ARTIFACTS", false) {
scanInfo.UseArtifactsFrom = getter.DefaultLocalStore // Load files from cache (this will prevent kubescape fom downloading the artifacts every time)
}
return scanInfo
}
func envToBool(env string, defaultValue bool) bool {
if d, ok := os.LookupEnv(env); ok {
return pkgcautils.StringToBool(d)
}
return defaultValue
}
func envToString(env string, defaultValue string) string {
if d, ok := os.LookupEnv(env); ok {
return d
}
return defaultValue
}

View File

@@ -6,8 +6,10 @@ import (
"net/http"
"os"
"github.com/armosec/kubescape/core/cautils"
"github.com/armosec/kubescape/core/cautils/logger"
"github.com/armosec/kubescape/core/cautils/logger/helpers"
"github.com/armosec/kubescape/core/cautils/logger/zaplogger"
handlerequestsv1 "github.com/armosec/kubescape/httphandler/handlerequests/v1"
"github.com/gorilla/mux"
)
@@ -15,13 +17,15 @@ import (
const (
scanPath = "/v1/scan"
resultsPath = "/v1/results"
prometheusMmeticsPath = "/v1/metrics"
prometheusMmeticsPath = "/metrics"
livePath = "/livez"
readyPath = "/readyz"
)
// SetupHTTPListener set up listening http servers
func SetupHTTPListener() error {
logger.InitLogger(zaplogger.LoggerName)
keyPair, err := loadTLSKey("", "") // TODO - support key and crt files
if err != nil {
return err
@@ -47,7 +51,7 @@ func SetupHTTPListener() error {
server.Handler = rtr
logger.L().Info("Started Kubescape server", helpers.String("port", getPort()))
logger.L().Info("Started Kubescape server", helpers.String("port", getPort()), helpers.String("version", cautils.BuildNumber))
server.ListenAndServe()
if keyPair != nil {
return server.ListenAndServeTLS("", "")