Compare commits

...

32 Commits

Author SHA1 Message Date
dwertent
33f92d1a5f update logger support 2022-03-10 11:19:05 +02:00
dwertent
4bd468f03e Merge remote-tracking branch 'armosec/dev' 2022-03-09 20:54:22 +02:00
dwertent
c6eaecd596 export to json 2022-03-09 20:53:55 +02:00
dwertent
a2a5b06024 adding http handler 2022-03-09 20:51:55 +02:00
David Wertenteil
825732f60f Merge pull request #444 from Akasurde/misspell_dev
Misc typo fixes
2022-03-09 20:12:19 +02:00
David Wertenteil
596ec17106 Merge pull request #445 from Akasurde/no_color_dev
cli: added support for no color
2022-03-09 20:01:24 +02:00
Abhijeet Kasurde
fbd0f352c4 cli: added support for no color
Using commandline flag users can now disable colored output
in logging.

Fixes: #434

Signed-off-by: Abhijeet Kasurde <akasurde@redhat.com>
2022-03-09 21:25:48 +05:30
Abhijeet Kasurde
2600052735 Misc typo fixes
Signed-off-by: Abhijeet Kasurde <akasurde@redhat.com>
2022-03-09 21:22:41 +05:30
dwertent
a985b2ce09 Add mock logger 2022-03-08 21:39:09 +02:00
dwertent
829c176644 Merge remote-tracking branch 'armosec/dev' into server-support 2022-03-08 14:52:00 +02:00
dwertent
7d7d247bc2 udpate go mod 2022-03-08 14:51:24 +02:00
dwertent
43ae8e2a81 Merge branch 'master' into server-support 2022-03-08 14:49:41 +02:00
David Wertenteil
b0f37e9465 Merge pull request #441 from xdavidel/update_io_apis
update vulns input / output api
2022-03-08 14:34:45 +02:00
David Wertenteil
396ef55267 Merge pull request #440 from dwertent/master
Use semver.Compare for version check
2022-03-08 10:12:59 +02:00
dwertent
4b07469bb2 use semver.Compare for version check 2022-03-08 10:10:06 +02:00
David Delarosa
260f7b06c1 update vulns input / output api 2022-03-07 17:03:14 +02:00
David Wertenteil
67ba28a3cb Merge pull request #433 from dwertent/master
call `setTenant` when submitting results
2022-03-06 11:29:34 +02:00
dwertent
a768d22a1d call setTenant when submitting results 2022-03-06 11:25:16 +02:00
David Wertenteil
ede88550da Merge pull request #432 from dwertent/master
Update roadmap
2022-03-06 11:03:54 +02:00
David Wertenteil
ab55a0d134 Merge pull request #431 from Bezbran/dev
Read Linux kernel variables from host sensor
2022-03-06 10:59:41 +02:00
Bezalel Brandwine
bfd7060044 read linux kernel variables from host sensor 2022-03-06 10:51:24 +02:00
dwertent
bf215a0f96 update roadmap 2022-03-06 10:50:45 +02:00
dwertent
a2e1fb36df update maintainers 2022-03-06 10:42:30 +02:00
dwertent
4e9c6f34b3 Update maintainers and roadmap 2022-03-06 10:38:03 +02:00
David Wertenteil
b08c0f2ec6 Merge pull request #429 from dwertent/master
Update readme
2022-03-06 09:54:41 +02:00
dwertent
4c0e358afc support submitting v2 2022-03-06 09:51:05 +02:00
dwertent
2df0c12e10 auto complete examples 2022-03-03 17:45:54 +02:00
David Wertenteil
d37025dc6c json v2 version 2022-03-03 16:17:25 +02:00
David Wertenteil
d537c56159 Update format output support to v2 2022-03-03 13:24:45 +02:00
dwertent
37644e1f57 update readme 2022-02-10 21:05:08 +02:00
dwertent
8a04934fbd Adding readme and yaml 2022-02-10 20:41:28 +02:00
dwertent
31e1b3055f Prometheus support 2022-02-10 20:11:00 +02:00
58 changed files with 1122 additions and 505 deletions

View File

@@ -2,8 +2,9 @@
The following table lists Kubescape project maintainers
| Name | GitHub | Email | Organization | Repositories/Area of Expertise | Added/Renewed On |
| Name | GitHub | Email | Organization | Role | Added/Renewed On |
| --- | --- | --- | --- | --- | --- |
| Ben Hirschberg | @slashben | ben@armosec.io | ARMO | Kubescape CLI | 2021-09-01 |
| Rotem Refael | @rotemamsa | rrefael@armosec.io | ARMO | Kubescape CLI | 2021-10-11 |
| David Wertenteil | @dwertent | dwertent@armosec.io | ARMO | Kubescape CLI | 2021-09-01 |
| [Ben Hirschberg](https://www.linkedin.com/in/benyamin-ben-hirschberg-66141890) | [@slashben](https://github.com/slashben) | ben@armosec.io | [ARMO](https://www.armosec.io/) | VP R&D | 2021-09-01 |
| [Rotem Refael](https://www.linkedin.com/in/rotem-refael) | [@rotemamsa](https://github.com/rotemamsa) | rrefael@armosec.io | [ARMO](https://www.armosec.io/) | Team Leader | 2021-10-11 |
| [David Wertenteil](https://www.linkedin.com/in/david-wertenteil-0ba277b9) | [@dwertent](https://github.com/dwertent) | dwertent@armosec.io | [ARMO](https://www.armosec.io/) | Kubescape CLI Developer | 2021-09-01 |
| [Bezalel Brandwine](https://www.linkedin.com/in/bezalel-brandwine) | [@Bezbran](https://github.com/Bezbran) | bbrandwine@armosec.io | [ARMO](https://www.armosec.io/) | Kubescape SaaS Developer | 2021-09-01 |

View File

@@ -48,9 +48,9 @@ We invite you to our team! We are excited about this project and want to return
Want to contribute? Want to discuss something? Have an issue?
* Feel free to pick a task from the [roadmap](docs/roadmap.md) or suggest a feature of your own. [Contact us](MAINTAINERS.md) directly for more information :)
* Open a issue, we are trying to respond within 48 hours
* [Join us](https://armosec.github.io/kubescape/) in a discussion on our discord server!
* [Join us](https://armosec.github.io/kubescape/) in a discussion on our discord server!
[<img src="docs/discord-banner.png" width="100" alt="logo" align="center">](https://armosec.github.io/kubescape/)
![discord](https://img.shields.io/discord/893048809884643379)

View File

@@ -13,11 +13,11 @@ type K8SResources map[string][]string
type OPASessionObj struct {
K8SResources *K8SResources // input k8s objects
Frameworks []reporthandling.Framework // list of frameworks to scan
Policies []reporthandling.Framework // list of frameworks to scan
AllResources map[string]workloadinterface.IMetadata // all scanned resources, map[<rtesource ID>]<resource>
ResourcesResult map[string]resourcesresults.Result // resources scan results, map[<rtesource ID>]<resource result>
PostureReport *reporthandling.PostureReport // scan results v1
Report *reporthandlingv2.PostureReport // scan results v2
PostureReport *reporthandling.PostureReport // scan results v1 - Remove
Report *reporthandlingv2.PostureReport // scan results v2 - Remove
Exceptions []armotypes.PostureExceptionPolicy // list of exceptions to apply on scan results
RegoInputData RegoInputData // input passed to rgo for scanning. map[<control name>][<input arguments>]
}
@@ -25,7 +25,7 @@ type OPASessionObj struct {
func NewOPASessionObj(frameworks []reporthandling.Framework, k8sResources *K8SResources) *OPASessionObj {
return &OPASessionObj{
Report: &reporthandlingv2.PostureReport{},
Frameworks: frameworks,
Policies: frameworks,
K8SResources: k8sResources,
AllResources: make(map[string]workloadinterface.IMetadata),
ResourcesResult: make(map[string]resourcesresults.Result),
@@ -38,7 +38,7 @@ func NewOPASessionObj(frameworks []reporthandling.Framework, k8sResources *K8SRe
func NewOPASessionObjMock() *OPASessionObj {
return &OPASessionObj{
Frameworks: nil,
Policies: nil,
K8SResources: nil,
AllResources: make(map[string]workloadinterface.IMetadata),
ResourcesResult: make(map[string]resourcesresults.Result),

View File

@@ -2,6 +2,7 @@ package cautils
import (
pkgcautils "github.com/armosec/utils-go/utils"
"golang.org/x/mod/semver"
"github.com/armosec/opa-utils/reporthandling"
)
@@ -50,14 +51,15 @@ func ruleWithArmoOpaDependency(attributes map[string]interface{}) bool {
func isRuleKubescapeVersionCompatible(attributes map[string]interface{}, version string) bool {
if from, ok := attributes["useFromKubescapeVersion"]; ok && from != nil {
if version != "" {
if from.(string) > BuildNumber {
if semver.Compare(from.(string), BuildNumber) > 0 {
return false
}
}
}
if until, ok := attributes["useUntilKubescapeVersion"]; ok && until != nil {
if version != "" {
if until.(string) <= BuildNumber {
if semver.Compare(BuildNumber, until.(string)) >= 0 {
return false
}
} else {

View File

@@ -9,16 +9,6 @@ import (
"github.com/mattn/go-isatty"
)
var silent = false
func SetSilentMode(s bool) {
silent = s
}
func IsSilent() bool {
return silent
}
var FailureDisplay = color.New(color.Bold, color.FgHiRed).FprintfFunc()
var WarningDisplay = color.New(color.Bold, color.FgHiYellow).FprintfFunc()
var FailureTextDisplay = color.New(color.Faint, color.FgHiRed).FprintfFunc()
@@ -31,7 +21,7 @@ var DescriptionDisplay = color.New(color.Faint, color.FgWhite).FprintfFunc()
var Spinner *spinner.Spinner
func StartSpinner() {
if !IsSilent() && isatty.IsTerminal(os.Stdout.Fd()) {
if isatty.IsTerminal(os.Stdout.Fd()) {
Spinner = spinner.New(spinner.CharSets[7], 100*time.Millisecond) // Build our new spinner
Spinner.Start()
}

View File

@@ -21,7 +21,7 @@ import (
var (
// ATTENTION!!!
// Changes in this URLs variable names, or in the usage is affecting the build process! BE CAREFULL
// Changes in this URLs variable names, or in the usage is affecting the build process! BE CAREFUL
armoERURL = "report.armo.cloud"
armoBEURL = "api.armo.cloud"
armoFEURL = "portal.armo.cloud"
@@ -62,7 +62,8 @@ func SetARMOAPIConnector(armoAPI *ArmoAPI) {
func GetArmoAPIConnector() *ArmoAPI {
if globalArmoAPIConnector == nil {
logger.L().Error("returning nil API connector")
// logger.L().Error("returning nil API connector")
SetARMOAPIConnector(NewARMOAPIProd())
}
return globalArmoAPIConnector
}

View File

@@ -1,5 +1,7 @@
package helpers
import "time"
type StringObj struct {
key string
value string
@@ -24,3 +26,6 @@ func Error(e error) *ErrorObj { return &ErrorObj{key: "e
func Int(k string, v int) *IntObj { return &IntObj{key: k, value: v} }
func String(k, v string) *StringObj { return &StringObj{key: k, value: v} }
func Interface(k string, v interface{}) *InterfaceObj { return &InterfaceObj{key: k, value: v} }
func Time() *StringObj {
return &StringObj{key: "time", value: time.Now().Format("2006-01-02 15:04:05")}
}

View File

@@ -5,9 +5,9 @@ import (
"strings"
"github.com/armosec/kubescape/cautils/logger/helpers"
"github.com/armosec/kubescape/cautils/logger/nonelogger"
"github.com/armosec/kubescape/cautils/logger/prettylogger"
"github.com/armosec/kubescape/cautils/logger/zaplogger"
"github.com/mattn/go-isatty"
)
type ILogger interface {
@@ -23,29 +23,59 @@ type ILogger interface {
SetWriter(w *os.File)
GetWriter() *os.File
LoggerName() string
}
var l ILogger
// Return initialized logger. If logger not initialized, will call InitializeLogger() with the default value
func L() ILogger {
if l == nil {
InitializeLogger("")
InitDefaultLogger()
}
return l
}
func InitializeLogger(loggerName string) {
/* InitLogger initialize desired logger
Use:
InitLogger("<logger name>")
Supported logger names (call ListLoggersNames() for listing supported loggers)
- "zap": Logger from package "go.uber.org/zap"
- "pretty", "colorful": Human friendly colorful logger
- "none", "mock", "empty", "ignore": Logger will not print anything
Default:
- "pretty"
e.g.
InitLogger("none") -> will initialize the mock logger
*/
func InitLogger(loggerName string) {
switch strings.ToLower(loggerName) {
case "zap":
case zaplogger.LoggerName:
l = zaplogger.NewZapLogger()
case "pretty":
case prettylogger.LoggerName, "colorful":
l = prettylogger.NewPrettyLogger()
case nonelogger.LoggerName, "mock", "empty", "ignore":
l = nonelogger.NewNoneLogger()
default:
if isatty.IsTerminal(os.Stdout.Fd()) {
l = prettylogger.NewPrettyLogger()
} else {
l = zaplogger.NewZapLogger()
}
InitDefaultLogger()
}
}
func InitDefaultLogger() {
l = prettylogger.NewPrettyLogger()
}
func DisableColor(flag bool) {
prettylogger.DisableColor(flag)
}
func ListLoggersNames() []string {
return []string{prettylogger.LoggerName, zaplogger.LoggerName, nonelogger.LoggerName}
}

View File

@@ -0,0 +1,28 @@
package nonelogger
import (
"os"
"github.com/armosec/kubescape/cautils/logger/helpers"
)
const LoggerName string = "none"
type NoneLogger struct {
}
func NewNoneLogger() *NoneLogger {
return &NoneLogger{}
}
func (nl *NoneLogger) GetLevel() string { return "" }
func (nl *NoneLogger) LoggerName() string { return LoggerName }
func (nl *NoneLogger) SetWriter(w *os.File) {}
func (nl *NoneLogger) GetWriter() *os.File { return nil }
func (nl *NoneLogger) SetLevel(level string) error { return nil }
func (nl *NoneLogger) Fatal(msg string, details ...helpers.IDetails) {}
func (nl *NoneLogger) Error(msg string, details ...helpers.IDetails) {}
func (nl *NoneLogger) Warning(msg string, details ...helpers.IDetails) {}
func (nl *NoneLogger) Success(msg string, details ...helpers.IDetails) {}
func (nl *NoneLogger) Info(msg string, details ...helpers.IDetails) {}
func (nl *NoneLogger) Debug(msg string, details ...helpers.IDetails) {}

View File

@@ -29,3 +29,9 @@ func prefix(l helpers.Level) func(w io.Writer, format string, a ...interface{})
}
return message
}
func DisableColor(flag bool) {
if flag {
color.NoColor = true
}
}

View File

@@ -8,6 +8,8 @@ import (
"github.com/armosec/kubescape/cautils/logger/helpers"
)
const LoggerName string = "pretty"
type PrettyLogger struct {
writer *os.File
level helpers.Level
@@ -15,6 +17,7 @@ type PrettyLogger struct {
}
func NewPrettyLogger() *PrettyLogger {
return &PrettyLogger{
writer: os.Stderr, // default to stderr
level: helpers.InfoLevel,
@@ -25,6 +28,7 @@ func NewPrettyLogger() *PrettyLogger {
func (pl *PrettyLogger) GetLevel() string { return pl.level.String() }
func (pl *PrettyLogger) SetWriter(w *os.File) { pl.writer = w }
func (pl *PrettyLogger) GetWriter() *os.File { return pl.writer }
func (pl *PrettyLogger) LoggerName() string { return LoggerName }
func (pl *PrettyLogger) SetLevel(level string) error {
pl.level = helpers.ToLevel(level)
@@ -69,7 +73,7 @@ func (pl *PrettyLogger) print(level helpers.Level, msg string, details ...helper
func detailsToString(details []helpers.IDetails) string {
s := ""
for i := range details {
s += fmt.Sprintf("%s: %s", details[i].Key(), details[i].Value())
s += fmt.Sprintf("%s: %v", details[i].Key(), details[i].Value())
if i < len(details)-1 {
s += "; "
}

View File

@@ -8,6 +8,8 @@ import (
"go.uber.org/zap/zapcore"
)
const LoggerName string = "zap"
type ZapLogger struct {
zapL *zap.Logger
cfg zap.Config
@@ -35,8 +37,7 @@ func NewZapLogger() *ZapLogger {
func (zl *ZapLogger) GetLevel() string { return zl.cfg.Level.Level().String() }
func (zl *ZapLogger) SetWriter(w *os.File) {}
func (zl *ZapLogger) GetWriter() *os.File { return nil }
func GetWriter() *os.File { return nil }
func (zl *ZapLogger) LoggerName() string { return LoggerName }
func (zl *ZapLogger) SetLevel(level string) error {
l := zapcore.Level(1)
err := l.Set(level)

View File

@@ -54,9 +54,12 @@ func (bpf *BoolPtrFlag) Set(val string) error {
}
type RootInfo struct {
Logger string // logger level
CacheDir string // cached dir
Logger string // logger level
LoggerName string // logger name ("pretty"/"zap"/"none")
CacheDir string // cached dir
DisableColor bool // Disable Color
}
type ScanInfo struct {
Getters
PolicyIdentifier []reporthandling.PolicyIdentifier
@@ -75,6 +78,7 @@ type ScanInfo struct {
Silent bool // Silent mode - Do not print progress logs
FailThreshold float32 // Failure score threshold
Submit bool // Submit results to Armo BE
ReportID string // Report id of the current scan
HostSensorEnabled BoolPtrFlag // Deploy ARMO K8s host sensor to collect data from certain controls
HostSensorYamlPath string // Path to hostsensor file
Local bool // Do not submit results

View File

@@ -10,6 +10,7 @@ import (
"github.com/armosec/kubescape/cautils/logger"
"github.com/armosec/kubescape/cautils/logger/helpers"
pkgutils "github.com/armosec/utils-go/utils"
"golang.org/x/mod/semver"
)
const SKIP_VERSION_CHECK = "KUBESCAPE_SKIP_UPDATE_CHECK"
@@ -97,7 +98,7 @@ func (v *VersionCheckHandler) CheckLatestVersion(versionData *VersionCheckReques
}
if latestVersion.ClientUpdate != "" {
if BuildNumber != "" && BuildNumber < latestVersion.ClientUpdate {
if BuildNumber != "" && semver.Compare(BuildNumber, latestVersion.ClientUpdate) >= 0 {
logger.L().Warning(warningMessage(latestVersion.ClientUpdate))
}
}

View File

@@ -0,0 +1,38 @@
package cautils
import (
"testing"
"github.com/armosec/armoapi-go/armotypes"
"github.com/armosec/opa-utils/reporthandling"
"github.com/stretchr/testify/assert"
)
func TestGetKubernetesObjects(t *testing.T) {
}
var rule_v1_0_131 = &reporthandling.PolicyRule{PortalBase: armotypes.PortalBase{
Attributes: map[string]interface{}{"useUntilKubescapeVersion": "v1.0.132"}}}
var rule_v1_0_132 = &reporthandling.PolicyRule{PortalBase: armotypes.PortalBase{
Attributes: map[string]interface{}{"useFromKubescapeVersion": "v1.0.132", "useUntilKubescapeVersion": "v1.0.133"}}}
var rule_v1_0_133 = &reporthandling.PolicyRule{PortalBase: armotypes.PortalBase{
Attributes: map[string]interface{}{"useFromKubescapeVersion": "v1.0.133", "useUntilKubescapeVersion": "v1.0.134"}}}
var rule_v1_0_134 = &reporthandling.PolicyRule{PortalBase: armotypes.PortalBase{
Attributes: map[string]interface{}{"useFromKubescapeVersion": "v1.0.134"}}}
func TestIsRuleKubescapeVersionCompatible(t *testing.T) {
// local build- no build number
// should use only rules that don't have "until"
buildNumberMock := ""
assert.False(t, isRuleKubescapeVersionCompatible(rule_v1_0_131.Attributes, buildNumberMock))
assert.False(t, isRuleKubescapeVersionCompatible(rule_v1_0_132.Attributes, buildNumberMock))
assert.False(t, isRuleKubescapeVersionCompatible(rule_v1_0_133.Attributes, buildNumberMock))
assert.True(t, isRuleKubescapeVersionCompatible(rule_v1_0_134.Attributes, buildNumberMock))
// should only use rules that version is in range of use
buildNumberMock = "v1.0.133"
assert.True(t, isRuleKubescapeVersionCompatible(rule_v1_0_131.Attributes, buildNumberMock))
assert.False(t, isRuleKubescapeVersionCompatible(rule_v1_0_132.Attributes, buildNumberMock))
assert.False(t, isRuleKubescapeVersionCompatible(rule_v1_0_133.Attributes, buildNumberMock))
assert.False(t, isRuleKubescapeVersionCompatible(rule_v1_0_134.Attributes, buildNumberMock))
}

View File

@@ -10,10 +10,12 @@ import (
var completionCmdExamples = `
# Enable BASH shell autocompletion
echo 'source <(kubescape completion bash)' >> ~/.bashrc
$ source <(kubescape completion bash)
$ echo 'source <(kubescape completion bash)' >> ~/.bashrc
# Enable ZSH shell autocompletion
echo 'source <(kubectl completion zsh)' >> "${fpath[1]}/_kubectl"
$ source <(kubectl completion zsh)
$ echo 'source <(kubectl completion zsh)' >> "${fpath[1]}/_kubectl"
`
var completionCmd = &cobra.Command{

View File

@@ -81,12 +81,15 @@ var controlCmd = &cobra.Command{
}
scanInfo.FrameworkScan = false
scanInfo.Init()
cautils.SetSilentMode(scanInfo.Silent)
err := clihandler.ScanCliSetup(&scanInfo)
results, err := clihandler.Scan(&scanInfo)
if err != nil {
logger.L().Fatal(err.Error())
}
results.HandleResults()
if results.GetRiskScore() > float32(scanInfo.FailThreshold) {
return fmt.Errorf("scan risk-score %.2f is above permitted threshold %.2f", results.GetRiskScore(), scanInfo.FailThreshold)
}
return nil
},
}

View File

@@ -87,12 +87,14 @@ var frameworkCmd = &cobra.Command{
scanInfo.SetPolicyIdentifiers(frameworks, reporthandling.KindFramework)
scanInfo.Init()
cautils.SetSilentMode(scanInfo.Silent)
err := clihandler.ScanCliSetup(&scanInfo)
results, err := clihandler.Scan(&scanInfo)
if err != nil {
logger.L().Fatal(err.Error())
}
results.HandleResults()
if results.GetRiskScore() > float32(scanInfo.FailThreshold) {
return fmt.Errorf("scan risk-score %.2f is above permitted threshold %.2f", results.GetRiskScore(), scanInfo.FailThreshold)
}
return nil
},
}

View File

@@ -11,12 +11,16 @@ import (
"github.com/armosec/kubescape/cautils/logger/helpers"
"github.com/armosec/kubescape/clihandler"
"github.com/armosec/kubescape/clihandler/cliinterfaces"
"github.com/armosec/kubescape/resultshandling/reporter"
reporterv1 "github.com/armosec/kubescape/resultshandling/reporter/v1"
reporterv2 "github.com/armosec/kubescape/resultshandling/reporter/v2"
"github.com/armosec/opa-utils/reporthandling"
"github.com/google/uuid"
"github.com/spf13/cobra"
)
var formatVersion string
type ResultsObject struct {
filePath string
customerGUID string
@@ -51,7 +55,7 @@ func (resultsObject *ResultsObject) ListAllResources() (map[string]workloadinter
}
var resultsCmd = &cobra.Command{
Use: "results <json file>\nExample:\n$ kubescape submit results path/to/results.json",
Use: "results <json file>\nExample:\n$ kubescape submit results path/to/results.json --format-version v2",
Short: "Submit a pre scanned results file. The file must be in json format",
Long: ``,
RunE: func(cmd *cobra.Command, args []string) error {
@@ -70,7 +74,14 @@ var resultsCmd = &cobra.Command{
resultsObjects := NewResultsObject(clusterConfig.GetAccountID(), clusterConfig.GetClusterName(), args[0])
// submit resources
r := reporterv1.NewReportEventReceiver(clusterConfig.GetConfigObj())
var r reporter.IReport
switch formatVersion {
case "v2":
r = reporterv2.NewReportEventReceiver(clusterConfig.GetConfigObj(), "")
default:
logger.L().Warning("Deprecated results version. run with '--format-version' flag", helpers.String("your version", formatVersion), helpers.String("latest version", "v2"))
r = reporterv1.NewReportEventReceiver(clusterConfig.GetConfigObj())
}
submitInterfaces := cliinterfaces.SubmitInterfaces{
ClusterConfig: clusterConfig,
@@ -87,6 +98,7 @@ var resultsCmd = &cobra.Command{
func init() {
submitCmd.AddCommand(resultsCmd)
resultsCmd.PersistentFlags().StringVar(&formatVersion, "format-version", "v1", "Output object can be differnet between versions, this is for maintaining backward and forward compatibility. Supported:'v1'/'v2'")
}
func loadResultsFromFile(filePath string) ([]reporthandling.FrameworkReport, error) {

View File

@@ -9,6 +9,7 @@ import (
"github.com/armosec/kubescape/cautils/getter"
"github.com/armosec/kubescape/cautils/logger"
"github.com/armosec/kubescape/cautils/logger/helpers"
"github.com/mattn/go-isatty"
"github.com/spf13/cobra"
)
@@ -54,32 +55,51 @@ func init() {
rootCmd.PersistentFlags().MarkHidden("environment")
rootCmd.PersistentFlags().MarkHidden("env")
rootCmd.PersistentFlags().StringVarP(&rootInfo.LoggerName, "logger-name", "l", "", fmt.Sprintf("Logger name. Supported: %s [$KS_LOGGER_NAME]", strings.Join(logger.ListLoggersNames(), "/")))
rootCmd.PersistentFlags().MarkHidden("logger-name")
rootCmd.PersistentFlags().StringVarP(&rootInfo.Logger, "logger", "l", helpers.InfoLevel.String(), fmt.Sprintf("Logger level. Supported: %s [$KS_LOGGER]", strings.Join(helpers.SupportedLevels(), "/")))
rootCmd.PersistentFlags().StringVar(&rootInfo.CacheDir, "cache-dir", getter.DefaultLocalStore, "Cache directory [$KS_CACHE_DIR]")
rootCmd.PersistentFlags().BoolVarP(&rootInfo.DisableColor, "disable-color", "", false, "Disable Color output for logging")
}
func initLogger() {
if l := os.Getenv("KS_LOGGER_NAME"); l != "" {
logger.InitializeLogger(l)
logger.DisableColor(rootInfo.DisableColor)
if rootInfo.LoggerName == "" {
if l := os.Getenv("KS_LOGGER_NAME"); l != "" {
rootInfo.LoggerName = l
} else {
if isatty.IsTerminal(os.Stdout.Fd()) {
rootInfo.LoggerName = "pretty"
} else {
rootInfo.LoggerName = "zap"
}
}
}
logger.InitLogger(rootInfo.LoggerName)
}
func initLoggerLevel() {
if rootInfo.Logger != helpers.InfoLevel.String() {
} else if l := os.Getenv("KS_LOGGER"); l != "" {
rootInfo.Logger = l
}
if err := logger.L().SetLevel(rootInfo.Logger); err != nil {
logger.L().Fatal(fmt.Sprintf("supported levels: %s", strings.Join(helpers.SupportedLevels(), "/")), helpers.Error(err))
}
}
func initCacheDir() {
if rootInfo.CacheDir != getter.DefaultLocalStore {
if rootInfo.CacheDir == getter.DefaultLocalStore {
getter.DefaultLocalStore = rootInfo.CacheDir
} else if cacheDir := os.Getenv("KS_CACHE_DIR"); cacheDir != "" {
getter.DefaultLocalStore = cacheDir
} else {
return // using default cache di location
return // using default cache dir location
}
logger.L().Debug("cache dir updated", helpers.String("path", getter.DefaultLocalStore))

View File

@@ -55,14 +55,13 @@ var scanCmd = &cobra.Command{
func frameworkInitConfig() {
k8sinterface.SetClusterContextName(scanInfo.KubeContext)
}
func init() {
cobra.OnInitialize(frameworkInitConfig)
rootCmd.AddCommand(scanCmd)
scanCmd.PersistentFlags().StringVarP(&scanInfo.Account, "account", "", "", "Armo portal account ID. Default will load account ID from configMap or config file")
scanCmd.PersistentFlags().StringVarP(&scanInfo.Account, "account", "", "", "ARMO portal account ID. Default will load account ID from configMap or config file")
scanCmd.PersistentFlags().StringVarP(&scanInfo.KubeContext, "kube-context", "", "", "Kube context. Default will use the current-context")
scanCmd.PersistentFlags().StringVar(&scanInfo.ControlsInputs, "controls-config", "", "Path to an controls-config obj. If not set will download controls-config from ARMO management portal")
scanCmd.PersistentFlags().StringVar(&scanInfo.UseExceptions, "exceptions", "", "Path to an exceptions obj. If not set will download exceptions from ARMO management portal")
@@ -71,22 +70,25 @@ func init() {
scanCmd.PersistentFlags().Float32VarP(&scanInfo.FailThreshold, "fail-threshold", "t", 100, "Failure threshold is the percent above which the command fails and returns exit code 1")
scanCmd.PersistentFlags().StringVarP(&scanInfo.Format, "format", "f", "pretty-printer", `Output format. Supported formats: "pretty-printer","json","junit","prometheus","pdf"`)
scanCmd.PersistentFlags().StringVar(&scanInfo.IncludeNamespaces, "include-namespaces", "", "scan specific namespaces. e.g: --include-namespaces ns-a,ns-b")
scanCmd.PersistentFlags().BoolVarP(&scanInfo.Local, "keep-local", "", false, "If you do not want your Kubescape results reported to Armo backend. Use this flag if you ran with the '--submit' flag in the past and you do not want to submit your current scan results")
scanCmd.PersistentFlags().BoolVarP(&scanInfo.Local, "keep-local", "", false, "If you do not want your Kubescape results reported to ARMO backend. Use this flag if you ran with the '--submit' flag in the past and you do not want to submit your current scan results")
scanCmd.PersistentFlags().StringVarP(&scanInfo.Output, "output", "o", "", "Output file. Print output to file and not stdout")
scanCmd.PersistentFlags().BoolVar(&scanInfo.VerboseMode, "verbose", false, "Display all of the input resources and not only failed resources")
scanCmd.PersistentFlags().BoolVar(&scanInfo.UseDefault, "use-default", false, "Load local policy object from default path. If not used will download latest")
scanCmd.PersistentFlags().StringSliceVar(&scanInfo.UseFrom, "use-from", nil, "Load local policy object from specified path. If not used will download latest")
scanCmd.PersistentFlags().BoolVarP(&scanInfo.Silent, "silent", "s", false, "Silent progress messages")
scanCmd.PersistentFlags().BoolVarP(&scanInfo.Submit, "submit", "", false, "Send the scan results to Armo management portal where you can see the results in a user-friendly UI, choose your preferred compliance framework, check risk results history and trends, manage exceptions, get remediation recommendations and much more. By default the results are not submitted")
scanCmd.PersistentFlags().BoolVarP(&scanInfo.Submit, "submit", "", false, "Send the scan results to ARMO management portal where you can see the results in a user-friendly UI, choose your preferred compliance framework, check risk results history and trends, manage exceptions, get remediation recommendations and much more. By default the results are not submitted")
scanCmd.PersistentFlags().StringVar(&scanInfo.HostSensorYamlPath, "host-scan-yaml", "", "Override default host sensor DaemonSet. Use this flag cautiously")
scanCmd.PersistentFlags().StringVar(&scanInfo.FormatVersion, "format-version", "v1", "Output object can be differnet between versions, this is for testing and backward compatibility")
scanCmd.PersistentFlags().StringVar(&scanInfo.FormatVersion, "format-version", "v1", "Output object can be differnet between versions, this is for maintaining backward and forward compatibility. Supported:'v1'/'v2'")
// Deprecated flags - remove 1.May.2022
scanCmd.PersistentFlags().MarkDeprecated("silent", "use '--logger' flag instead. Flag will be removed at 1.May.2022")
// hidden flags
scanCmd.PersistentFlags().MarkHidden("host-scan-yaml") // this flag should be used very cautiously. We prefer users will not use it at all unless the DaemoSet can not run pods on the nodes
scanCmd.PersistentFlags().MarkHidden("host-scan-yaml") // this flag should be used very cautiously. We prefer users will not use it at all unless the DaemonSet can not run pods on the nodes
scanCmd.PersistentFlags().MarkHidden("silent") // this flag should be deprecated since we added the --logger support
scanCmd.PersistentFlags().MarkHidden("format-version") // meant for testing different output approaches and not for common use
// scanCmd.PersistentFlags().MarkHidden("format-version") // meant for testing different output approaches and not for common use
hostF := scanCmd.PersistentFlags().VarPF(&scanInfo.HostSensorEnabled, "enable-host-scan", "", "Deploy ARMO K8s host-sensor daemonset in the scanned cluster. Deleting it right after we collecting the data. Required to collect valueable data from cluster nodes for certain controls. Yaml file: https://raw.githubusercontent.com/armosec/kubescape/master/hostsensorutils/hostsensor.yaml")
hostF := scanCmd.PersistentFlags().VarPF(&scanInfo.HostSensorEnabled, "enable-host-scan", "", "Deploy ARMO K8s host-sensor daemonset in the scanned cluster. Deleting it right after we collecting the data. Required to collect valuable data from cluster nodes for certain controls. Yaml file: https://raw.githubusercontent.com/armosec/kubescape/master/hostsensorutils/hostsensor.yaml")
hostF.NoOptDefVal = "true"
hostF.DefValue = "false, for no TTY in stdin"

View File

@@ -20,6 +20,7 @@ import (
"github.com/armosec/kubescape/resultshandling"
"github.com/armosec/kubescape/resultshandling/reporter"
"github.com/armosec/opa-utils/reporthandling"
"github.com/armosec/opa-utils/resources"
"github.com/mattn/go-isatty"
)
@@ -49,6 +50,13 @@ func getInterfaces(scanInfo *cautils.ScanInfo) componentInterfaces {
// Set submit behavior AFTER loading tenant config
setSubmitBehavior(scanInfo, tenantConfig)
if scanInfo.Submit {
// submit - Create tenant & Submit report
if err := tenantConfig.SetTenant(); err != nil {
logger.L().Error(err.Error())
}
}
// ================== version testing ======================================
v := cautils.NewIVersionCheckHandler()
@@ -80,7 +88,7 @@ func getInterfaces(scanInfo *cautils.ScanInfo) componentInterfaces {
// ================== setup reporter & printer objects ======================================
// reporting behavior - setup reporter
reportHandler := getReporter(tenantConfig, scanInfo.Submit, scanInfo.FrameworkScan, len(scanInfo.InputPatterns) == 0)
reportHandler := getReporter(tenantConfig, scanInfo.ReportID, scanInfo.Submit, scanInfo.FrameworkScan, len(scanInfo.InputPatterns) == 0)
// setup printer
printerHandler := resultshandling.NewPrinter(scanInfo.Format, scanInfo.FormatVersion, scanInfo.VerboseMode)
@@ -97,14 +105,13 @@ func getInterfaces(scanInfo *cautils.ScanInfo) componentInterfaces {
}
}
func ScanCliSetup(scanInfo *cautils.ScanInfo) error {
func Scan(scanInfo *cautils.ScanInfo) (*resultshandling.ResultsHandler, error) {
logger.L().Info("ARMO security scanner starting")
interfaces := getInterfaces(scanInfo)
// setPolicyGetter(scanInfo, interfaces.clusterConfig.GetCustomerGUID())
// ===================== Initialization =====================
scanInfo.Init() // initialize scan info
processNotification := make(chan *cautils.OPASessionObj)
reportResults := make(chan *cautils.OPASessionObj)
interfaces := getInterfaces(scanInfo)
cautils.ClusterName = interfaces.tenantConfig.GetClusterName() // TODO - Deprecated
cautils.CustomerGUID = interfaces.tenantConfig.GetAccountID() // TODO - Deprecated
@@ -123,43 +130,41 @@ func ScanCliSetup(scanInfo *cautils.ScanInfo) error {
scanInfo.SetPolicyIdentifiers(listFrameworksNames(scanInfo.Getters.PolicyGetter), reporthandling.KindFramework)
}
//
// remove host scanner components
defer func() {
if err := interfaces.hostSensorHandler.TearDown(); err != nil {
logger.L().Error("failed to tear down host sensor", helpers.Error(err))
}
}()
// cli handler setup
go func() {
// policy handler setup
policyHandler := policyhandler.NewPolicyHandler(&processNotification, interfaces.resourceHandler)
resultsHandling := resultshandling.NewResultsHandler(interfaces.report, interfaces.printerHandler)
if err := Scan(policyHandler, scanInfo); err != nil {
logger.L().Fatal(err.Error())
}
}()
// processor setup - rego run
go func() {
opaprocessorObj := opaprocessor.NewOPAProcessorHandler(&processNotification, &reportResults)
opaprocessorObj.ProcessRulesListenner()
}()
resultsHandling := resultshandling.NewResultsHandler(&reportResults, interfaces.report, interfaces.printerHandler)
score := resultsHandling.HandleResults(scanInfo)
// print report url
interfaces.report.DisplayReportURL()
if score > float32(scanInfo.FailThreshold) {
return fmt.Errorf("scan risk-score %.2f is above permitted threshold %.2f", score, scanInfo.FailThreshold)
// ===================== policies & resources =====================
policyHandler := policyhandler.NewPolicyHandler(interfaces.resourceHandler)
scanData, err := CollectResources(policyHandler, scanInfo)
if err != nil {
return resultsHandling, err
}
return nil
// ========================= opa testing =====================
deps := resources.NewRegoDependenciesData(k8sinterface.GetK8sConfig(), interfaces.tenantConfig.GetClusterName())
reportResults := opaprocessor.NewOPAProcessor(scanData, deps)
if err := reportResults.ProcessRulesListenner(); err != nil {
// TODO - do something
return resultsHandling, err
}
// ========================= results handling =====================
resultsHandling.SetData(scanData)
// if resultsHandling.GetRiskScore() > float32(scanInfo.FailThreshold) {
// return resultsHandling, fmt.Errorf("scan risk-score %.2f is above permitted threshold %.2f", resultsHandling.GetRiskScore(), scanInfo.FailThreshold)
// }
return resultsHandling, nil
}
func Scan(policyHandler *policyhandler.PolicyHandler, scanInfo *cautils.ScanInfo) error {
func CollectResources(policyHandler *policyhandler.PolicyHandler, scanInfo *cautils.ScanInfo) (*cautils.OPASessionObj, error) {
policyNotification := &reporthandling.PolicyNotification{
Rules: scanInfo.PolicyIdentifier,
KubescapeNotification: reporthandling.KubescapeNotification{
@@ -169,14 +174,16 @@ func Scan(policyHandler *policyhandler.PolicyHandler, scanInfo *cautils.ScanInfo
}
switch policyNotification.KubescapeNotification.NotificationType {
case reporthandling.TypeExecPostureScan:
if err := policyHandler.HandleNotificationRequest(policyNotification, scanInfo); err != nil {
return err
collectedResources, err := policyHandler.CollectResources(policyNotification, scanInfo)
if err != nil {
return nil, err
}
return collectedResources, nil
default:
return fmt.Errorf("notification type '%s' Unknown", policyNotification.KubescapeNotification.NotificationType)
return nil, fmt.Errorf("notification type '%s' Unknown", policyNotification.KubescapeNotification.NotificationType)
}
return nil
return nil, nil
}
func askUserForHostSensor() bool {

View File

@@ -48,9 +48,9 @@ func getRBACHandler(tenantConfig cautils.ITenantConfig, k8s *k8sinterface.Kubern
return nil
}
func getReporter(tenantConfig cautils.ITenantConfig, submit, fwScan, clusterScan bool) reporter.IReport {
func getReporter(tenantConfig cautils.ITenantConfig, reportID string, submit, fwScan, clusterScan bool) reporter.IReport {
if submit && clusterScan {
return reporterv2.NewReportEventReceiver(tenantConfig.GetConfigObj())
return reporterv2.NewReportEventReceiver(tenantConfig.GetConfigObj(), reportID)
}
if tenantConfig.GetAccountID() == "" && fwScan && clusterScan {
// Add link only when scanning a cluster using a framework
@@ -122,7 +122,7 @@ func policyIdentifierNames(pi []reporthandling.PolicyIdentifier) string {
return policiesNames
}
// setSubmitBehavior - Setup the desired cluster behavior regarding submittion to the Armo BE
// setSubmitBehavior - Setup the desired cluster behavior regarding submitting to the Armo BE
func setSubmitBehavior(scanInfo *cautils.ScanInfo, tenantConfig cautils.ITenantConfig) {
/*
@@ -148,14 +148,8 @@ func setSubmitBehavior(scanInfo *cautils.ScanInfo, tenantConfig cautils.ITenantC
// Submit report
scanInfo.Submit = true
}
} else { // config not found in cache (not submitted)
if scanInfo.Submit {
// submit - Create tenant & Submit report
if err := tenantConfig.SetTenant(); err != nil {
logger.L().Error(err.Error())
}
}
}
}
// setPolicyGetter set the policy getter - local file/github release/ArmoAPI

View File

@@ -11,7 +11,7 @@ source #287
### Relation to this proposal
There are multiple changes and design decisions needs to be made before Kubescape will support the before outlined controls. However, a focal point the whole picutre is the ability to access vulnerabilty databases of container images. We anticiapte that most container image repositories will support image vulnerabilty scanning, some major players are already do. Since there is no a single API available which all of these data sources support it is important to create an adaption layer within Kubescape so different datasources can serve Kubescape's goals.
There are multiple changes and design decisions needs to be made before Kubescape will support the before outlined controls. However, a focal point the whole picutre is the ability to access vulnerability databases of container images. We anticipate that most container image repositories will support image vulnerability scanning, some major players are already do. Since there is no a single API available which all of these data sources support it is important to create an adaption layer within Kubescape so different datasources can serve Kubescape's goals.
## High level design of Kubescape
@@ -21,7 +21,7 @@ There are multiple changes and design decisions needs to be made before Kubescap
* OPA engine: the [OPA](https://github.com/open-policy-agent/opa) rego interpreter
* Rules processor: Kubescape component, it enumerates and runs the controls while also preparing the all the input data that the controls need for running
* Data sources: set of different modules providing data to the Rules processor so it can run the controls with them. Examples: Kubernetes objects, cloud vendor API objects and adding in this proposal the vulnerability infomration
* Cloud Image Vulnerability adaption interface: the subject of this proposal, it gives a common interface for different registry/vulnerabilty vendors to adapt to.
* Cloud Image Vulnerability adaption interface: the subject of this proposal, it gives a common interface for different registry/vulnerability vendors to adapt to.
* CIV adaptors: specific implementation of the CIV interface, example Harbor adaption
```
-----------------------

View File

@@ -1,91 +0,0 @@
# Kubescape Release
## Input
### Scan a running Kubernetes cluster
* Scan your Kubernetes cluster. Ignore `kube-system` and `kube-public` namespaces
```
kubescape scan framework nsa --exclude-namespaces kube-system,kube-public
```
* Scan your Kubernetes cluster
```
kubescape scan framework nsa
```
### Scan a local Kubernetes manifest
* Scan single Kubernetes manifest file <img src="new-feature.svg">
```
kubescape scan framework nsa <my-workload.yaml>
```
* Scan many Kubernetes manifest files <img src="new-feature.svg">
```
kubescape scan framework nsa <my-workload-1.yaml> <my-workload-2.yaml>
```
* Scan all Kubernetes manifest files in directory <img src="new-feature.svg">
```
kubescape scan framework nsa *.yaml
```
* Scan Kubernetes manifest from stdout <img src="new-feature.svg">
```
cat <my-workload.yaml> | kubescape scan framework nsa -
```
* Scan Kubernetes manifest url <img src="new-feature.svg">
```
kubescape scan framework nsa https://raw.githubusercontent.com/GoogleCloudPlatform/microservices-demo/master/release/kubernetes-manifests.yaml
```
### Scan HELM chart
* Render the helm chart using [`helm template`](https://helm.sh/docs/helm/helm_template/) and pass to stdout <img src="new-feature.svg">
```
helm template [CHART] [flags] --generate-name --dry-run | kubescape scan framework nsa -
```
### Scan on-prem (offline)
* Scan using a framework from the local file system
```
kubescape scan framework --use-from <path>
```
* Scan using the framework from the default location in file system
```
kubescape scan framework --use-default
```
## Output formats
By default, the output is user friendly.
For the sake of automation, it is possible to receive the result in a `json` or `junit xml` format.
* Output in `json` format <img src="new-feature.svg">
```
kubescape scan framework nsa --format json --output results.json
```
* Output in `junit xml` format <img src="new-feature.svg">
```
kubescape scan framework nsa --format junit --output results.xml
```
## Download
* Download and save in file <img src="new-feature.svg">
```
kubescape download framework nsa --output nsa.json
```
* Download and save in default file (`~/.kubescape/<framework name>.json`)
```
kubescape download framework nsa
```

View File

@@ -5,38 +5,52 @@
Kubescape roadmap items are labeled based on where the feature is used and by their maturity.
The features serve different stages of the workflow of the users:
* development phase (writing Kubernetes manifests) - example: VS Code extension is used while editing YAMLs
* CI phase (integrating manifests to GIT repo) - example: GitHub action validating HELM charts on PRs
* delivery phase (deploying applications in Kubernetes) - example: running cluster scan after a new deployment
* monitoring phase (scanning application in Kubernetes) - example: Prometheus scraping the cluster security risk
* **Development phase** (writing Kubernetes manifests) - example: VS Code extension is used while editing YAMLs
* **CI phase** (integrating manifests to GIT repo) - example: GitHub action validating HELM charts on PRs
* **CD phase** (deploying applications in Kubernetes) - example: running cluster scan after a new deployment
* **Monitoring phase** (scanning application in Kubernetes) - example: Prometheus scraping the cluster security risk
Items in Kubescape roadmap are split to 3 major groups based on the feature planning maturity:
The items in Kubescape roadmap are split to 3 major groups based on the feature planning maturity:
* Planning - we have tickets open for these issues with more or less clear vision of design
* Backlog - feature which were discussed at a high level but are not ready for development
* Wishlist - features we are dreaming of 😀 and want to push them gradually forward
* [Planning](#planning) - we have tickets open for these issues with more or less clear vision of design
* [Backlog](#backlog) - feature which were discussed at a high level but are not ready for development
* [Wishlist](#wishlist) - features we are dreaming of 😀 and want to push them gradually forward
## Planning 👷
* **Integration with image registries**: we want to expand Kubescape to integrate with differnet image registries and read image vulnerability information from there. This will allow Kubescape to give contextual security information about vulnerabilities [Container registry integration](/docs/proposals/container-image-vulnerability-adaptor.md)
* **Kubescape as a microservice**: create a REST API for Kubescape so it can run constantly in a cluster and other components like Prometheus can scrape results
* **Kubescape CLI control over cluster operations**: add functionality to Kubescape CLI to trigger operations in Kubescape cluster components (example: trigger images scans and etc.)
* **Produce md/HTML reports**: create scan reports for different output formats
* **Git integration for pull requests**: create insightful GitHub actions for Kubescape
* ##### Integration with image registries
We want to expand Kubescape to integrate with differnet image registries and read image vulnerability information from there. This will allow Kubescape to give contextual security information about vulnerabilities [Container registry integration](/docs/proposals/container-image-vulnerability-adaptor.md)
* ##### Kubescape as a microservice
Create a REST API for Kubescape so it can run constantly in a cluster and other components like Prometheus can scrape results
* ##### Kubescape CLI control over cluster operations
Add functionality to Kubescape CLI to trigger operations in Kubescape cluster components (example: trigger images scans and etc.)
* ##### Produce md/HTML reports
Create scan reports for different output formats
* ##### Git integration for pull requests
Create insightful GitHub actions for Kubescape
## Backlog 📅
* **JSON path for HELM charts**: today Kubescape can point to issues in the Kubernetes object, we want to develop this feature so Kubescape will be able to point to the misconfigured source file (HELM)
* **Create Kubescape HELM plugin**
* **Kubescape based admission controller**: Implement admission controller API for Kubescape microservice to enable users to use Kubescape rules as policies
* ##### JSON path for HELM charts
Today Kubescape can point to issues in the Kubernetes object, we want to develop this feature so Kubescape will be able to point to the misconfigured source file (HELM)
* ##### Create Kubescape HELM plugin
* ##### Kubescape based admission controller
Implement admission controller API for Kubescape microservice to enable users to use Kubescape rules as policies
## Wishlist 💭
* **Integrate with other Kubernetes CLI tools** use Kubescape as a YAML validator for `kubectl` and others.
* **Kubernetes audit log integration**: connect Kubescape to audit log stream to enable it to produce more contextual security information based on how the API service is used.
* **TUI for Kubescape**: interactive terminal based user interface which helps to analyze and fix issues
* **Scanning images with GO for vulnerabilities**: Images scanners cannot determine which packages were used to build Go executables and we want to scan them for vulnerabilities
* **Scanning Dockerfile-s for security best practices**: Scan image or Dockerfile to determine whether it is using security best practices (like root containers)
* **Custom controls and rules**: enable users to define their own Rego base rules
* **More CI/CD tool integration**: Jenkins and etc. 😀
* ##### Integrate with other Kubernetes CLI tools
Use Kubescape as a YAML validator for `kubectl` and others.
* ##### Kubernetes audit log integration
Connect Kubescape to audit log stream to enable it to produce more contextual security information based on how the API service is used.
* ##### TUI for Kubescape
Interactive terminal based user interface which helps to analyze and fix issues
* ##### Scanning images with GO for vulnerabilities
Images scanners cannot determine which packages were used to build Go executables and we want to scan them for vulnerabilities
* ##### Scanning Dockerfile-s for security best practices
Scan image or Dockerfile to determine whether it is using security best practices (like root containers)
* ##### Custom controls and rules
Enable users to define their own Rego base rules
* ##### More CI/CD tool integration
Jenkins and etc. 😀
## Completed features 🎓
@@ -46,7 +60,7 @@ Items in Kubescape roadmap are split to 3 major groups based on the feature plan
* Assisted remediation (telling where/what to fix)
* Integration with Prometheus
* Confiugration of controls (customizing rules for a given environment)
* Installation in the cluster for continous monitoring
* Installation in the cluster for continuous monitoring
* Host scanner
* Cloud vendor API integration
* Custom exceptions

View File

@@ -1,87 +0,0 @@
<img src="kubescape.png" width="300" alt="logo" align="center">
# More detailed look on command line arguments and options
## Simple run:
```
kubescape scan framework nsa --exclude-namespaces kube-system,kube-public
```
## Flags
| flag | default | description | options |
| --- | --- | --- | --- |
| `-e`/`--exclude-namespaces` | Scan all namespaces | Namespaces to exclude from scanning. Recommended to exclude `kube-system` and `kube-public` namespaces |
| `-s`/`--silent` | Display progress messages | Silent progress messages |
| `-t`/`--fail-threshold` | `0` (do not fail) | fail command (return exit code 1) if result is below threshold| `0` -> `100` |
| `-f`/`--format` | `pretty-printer` | Output format | `pretty-printer`/`json`/`junit` |
| `-o`/`--output` | print to stdout | Save scan result in file |
| `--use-from` | | Load local framework object from specified path. If not used will download latest |
| `--use-default` | `false` | Load local framework object from default path. If not used will download latest | `true`/`false` |
| `--exceptions` | | Path to an [exceptions obj](examples/exceptions.json). If not set will download exceptions from Armo management portal |
| `--results-locally` | `false` | Kubescape sends scan results to Armo management portal to allow users to control exceptions and maintain chronological scan results. Use this flag if you do not wish to use these features | `true`/`false`|
## Usage & Examples
### Examples
* Scan a running Kubernetes cluster with [`nsa`](https://www.nsa.gov/Press-Room/News-Highlights/Article/Article/2716980/nsa-cisa-release-kubernetes-hardening-guidance/) framework
```
kubescape scan framework nsa --exclude-namespaces kube-system,kube-public
```
* Scan local `yaml`/`json` files before deploying
```
kubescape scan framework nsa *.yaml
```
* Scan `yaml`/`json` files from url
```
kubescape scan framework nsa https://raw.githubusercontent.com/GoogleCloudPlatform/microservices-demo/master/release/kubernetes-manifests.yaml
```
* Output in `json` format
```
kubescape scan framework nsa --exclude-namespaces kube-system,kube-public --format json --output results.json
```
* Output in `junit xml` format
```
kubescape scan framework nsa --exclude-namespaces kube-system,kube-public --format junit --output results.xml
```
* Scan with exceptions, objects with exceptions will be presented as `warning` and not `fail`
```
kubescape scan framework nsa --exceptions examples/exceptions.json
```
### Helm Support
* Render the helm chart using [`helm template`](https://helm.sh/docs/helm/helm_template/) and pass to stdout
```
helm template [NAME] [CHART] [flags] --dry-run | kubescape scan framework nsa -
```
for example:
```
helm template bitnami/mysql --generate-name --dry-run | kubescape scan framework nsa -
```
### Offline Support <img src="docs/new-feature.svg">
It is possible to run Kubescape offline!
First download the framework and then scan with `--use-from` flag
* Download and save in file, if file name not specified, will store save to `~/.kubescape/<framework name>.json`
```
kubescape download framework nsa --output nsa.json
```
* Scan using the downloaded framework
```
kubescape scan framework nsa --use-from nsa.json
```
Kubescape is an open source project, we welcome your feedback and ideas for improvement. Were also aiming to collaborate with the Kubernetes community to help make the tests themselves more robust and complete as Kubernetes develops.

View File

@@ -1,5 +0,0 @@
#! /bin/bash
echo "Testing Online Boutique yamls (https://github.com/GoogleCloudPlatform/microservices-demo)"
kubescape scan framework nsa online-boutique/*

2
go.mod
View File

@@ -14,6 +14,7 @@ require (
github.com/fatih/color v1.13.0
github.com/francoispqt/gojay v1.2.13
github.com/google/uuid v1.3.0
github.com/gorilla/mux v1.8.0
github.com/johnfercher/maroto v0.34.0
github.com/mattn/go-isatty v0.0.14
github.com/olekukonko/tablewriter v0.0.5
@@ -21,6 +22,7 @@ require (
github.com/spf13/cobra v1.2.1
github.com/stretchr/testify v1.7.0
go.uber.org/zap v1.19.1
golang.org/x/mod v0.4.2
gopkg.in/yaml.v2 v2.4.0
k8s.io/api v0.22.2
k8s.io/apimachinery v0.22.2

2
go.sum
View File

@@ -394,6 +394,7 @@ github.com/googleapis/gnostic v0.5.1/go.mod h1:6U4PtQXGIEt/Z3h5MAT7FNofLnw9vXk2c
github.com/googleapis/gnostic v0.5.5 h1:9fHAtK0uDfpveeqqo1hkEZJcFvYXAiCN3UutL8F9xHw=
github.com/googleapis/gnostic v0.5.5/go.mod h1:7+EbHbldMins07ALC74bsA81Ovc97DwqyJO1AENw9kA=
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI=
github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
@@ -800,6 +801,7 @@ golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.2 h1:Gz96sIWK3OalVv/I/qNygP42zyoKp3xptRVCWRFEBvo=
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=

View File

@@ -91,6 +91,12 @@ func (hsh *HostSensorHandler) sendAllPodsHTTPGETRequest(path, requestKind string
return res, nil
}
// return list of LinuxKernelVariables
func (hsh *HostSensorHandler) GetKernelVariables() ([]hostsensor.HostSensorDataEnvelope, error) {
// loop over pods and port-forward it to each of them
return hsh.sendAllPodsHTTPGETRequest("/LinuxKernelVariables", "LinuxKernelVariables")
}
// return list of OpenPortsList
func (hsh *HostSensorHandler) GetOpenPortsList() ([]hostsensor.HostSensorDataEnvelope, error) {
// loop over pods and port-forward it to each of them
@@ -195,6 +201,12 @@ func (hsh *HostSensorHandler) CollectResources() ([]hostsensor.HostSensorDataEnv
return kcData, err
}
res = append(res, kcData...)
// GetKernelVariables
kcData, err = hsh.GetKernelVariables()
if err != nil {
return kcData, err
}
res = append(res, kcData...)
// finish
logger.L().Debug("Done reading information from host sensor")

41
httphandler/README.md Normal file
View File

@@ -0,0 +1,41 @@
# Run kubescape as a microservice
> This is a beta version, we might make some changes before publishing the official Prometheus support
**Set environment `KS_MICROSERVICE=true`**
Running `kubescape` will start up a webserver on port `8080` which will serve the following paths:
* POST `/v1/scan` - Trigger a kubescape scan. The server will return an ID and will execute the scanning asynchronously
* * `synchronously`: scan synchronously (return results and not ID). Use only in small clusters are with an increased timeout
* GET `/v1/results` - Request kubescape scan results
* * query `id=<string>` -> ID returned when triggering the scan action. If empty will return latest results
* * query `remove` -> Remove results from storage after reading the results
* DELETE `/v1/results` - Delete kubescape scan results from storage If empty will delete latest results
* * query `id=<string>`: Delete ID of specific results
* * query `all`: Delete all cached results
* GET/POST `/metrics` - will trigger cluster scan. will respond with prometheus metrics once they have been scanned. This will respond 503 if the scan failed.
* `/livez` - will respond 200 is server is alive
* `/readyz` - will respond 200 if server can receive requests
## Trigger Kubescape scan
POST /v1/results
body:
```json
```
e.g.:
```bash
curl --header "Content-Type: application/json" \
--request POST \
--data '{"account":"42ec914f-74e6-4bcb-8e69-5edd819d9b15","hostSensor":true}' \
http://127.0.0.1:5000/v1/scan
```
## Installation into kubernetes
The [yaml](ks-prometheus-support.yaml) file will deploy one instance of kubescape (with all relevant dependencies) to run on your cluster
**NOTE** Make sure the configurations suit your cluster (e.g. `serviceType`, namespace, etc.)

View File

@@ -0,0 +1,18 @@
package v1
type PostScanRequest struct {
Format string `json:"format"` // Format results (table, json, junit ...) - default json
ExcludedNamespaces []string `json:"excludedNamespaces"` // used for host sensor namespace
IncludeNamespaces []string `json:"includeNamespaces"` // DEPRECATED?
FailThreshold float32 `json:"failThreshold"` // Failure score threshold
Submit bool `json:"submit"` // Submit results to Armo BE - default will
HostSensor bool `json:"hostSensor"` // Deploy ARMO K8s host sensor to collect data from certain controls
KeepLocal bool `json:"keepLocal"` // Do not submit results
Account string `json:"account"` // account ID
Logger string `json:"-"` // logger level - debug/info/error - default is debug
TargetType string `json:"-"` // framework/control - default is framework
TargetNames []string `json:"-"` // default is all
// UseExceptions string // Load file with exceptions configuration
// ControlsInputs string // Load file with inputs for controls
// VerboseMode bool // Display all of the input resources and not only failed resources
}

View File

@@ -0,0 +1,30 @@
package v1
import (
"strings"
"github.com/armosec/kubescape/cautils"
)
func (scanRequest *PostScanRequest) ToScanInfo() *cautils.ScanInfo {
scanInfo := cautils.ScanInfo{}
scanInfo.Account = scanRequest.Account
scanInfo.ExcludedNamespaces = strings.Join(scanRequest.ExcludedNamespaces, ",")
scanInfo.IncludeNamespaces = strings.Join(scanRequest.IncludeNamespaces, ",")
scanInfo.FailThreshold = scanRequest.FailThreshold // TODO - handle default
scanInfo.Format = scanRequest.Format // TODO - handle default
scanInfo.Local = scanRequest.KeepLocal
scanInfo.Submit = scanRequest.Submit
scanInfo.HostSensorEnabled.SetBool(scanRequest.HostSensor)
return &scanInfo
}
/*
err := clihandler.ScanCliSetup(&scanInfo)
if err != nil {
logger.L().Fatal(err.Error())
}
*/

View File

@@ -0,0 +1,58 @@
package v1
import (
"fmt"
"net/http"
"os"
"github.com/armosec/kubescape/cautils"
"github.com/armosec/kubescape/cautils/logger"
"github.com/armosec/kubescape/cautils/logger/helpers"
"github.com/armosec/kubescape/clihandler"
"github.com/google/uuid"
)
// Metrics http listener for prometheus support
func (handler *HTTPHandler) Metrics(w http.ResponseWriter, r *http.Request) {
if handler.state.isBusy() { // if already scanning the cluster
w.Write([]byte(fmt.Sprintf("scan '%s' in action", handler.state.getID())))
w.WriteHeader(http.StatusServiceUnavailable)
return
}
handler.state.setBusy()
defer handler.state.setNotBusy()
handler.state.setID(uuid.NewString())
// trigger scanning
logger.L().Info(handler.state.getID(), helpers.String("action", "triggering scan"), helpers.Time())
results, err := clihandler.Scan(getPrometheusDefaultScanCommand(handler.state.getID()))
logger.L().Info(handler.state.getID(), helpers.String("action", "done scanning"), helpers.Time())
if err != nil {
w.Write([]byte(fmt.Sprintf("failed to complete scan. reason: %s", err.Error())))
w.WriteHeader(http.StatusInternalServerError)
return
}
res, err := results.ToJson()
if err != nil {
w.Write([]byte(fmt.Sprintf("failed to convert scan scan results to json. reason: %s", err.Error())))
w.WriteHeader(http.StatusInternalServerError)
return
}
w.Write(res)
w.WriteHeader(http.StatusOK)
}
func getPrometheusDefaultScanCommand(scanID string) *cautils.ScanInfo {
scanInfo := cautils.ScanInfo{}
scanInfo.FrameworkScan = true
scanInfo.ScanAll = true // scan all frameworks
scanInfo.ReportID = scanID // scan ID
scanInfo.HostSensorEnabled.Set(os.Getenv("KS_ENABLE_HOST_SENSOR")) // enable host scanner
scanInfo.FailThreshold = 100 // Do not fail scanning
// scanInfo.Format = "prometheus" // results format
return &scanInfo
}

View File

@@ -0,0 +1,178 @@
package v1
import (
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"sync"
"github.com/armosec/kubescape/cautils/logger"
"github.com/armosec/kubescape/cautils/logger/helpers"
"github.com/google/uuid"
)
var OutputDir = "/results"
var FailedOutputDir = "/failed"
type HTTPHandler struct {
state *serverState
}
func NewHTTPHandler() *HTTPHandler {
return &HTTPHandler{
state: newServerState(),
}
}
func (handler *HTTPHandler) Scan(w http.ResponseWriter, r *http.Request) {
defer func() {
if err := recover(); err != nil {
handler.state.setNotBusy()
logger.L().Error("Scan recover", helpers.Error(fmt.Errorf("%v", err)))
w.WriteHeader(http.StatusInternalServerError)
w.Write([]byte(fmt.Sprintf("%v", err)))
}
}()
defer r.Body.Close()
switch r.Method {
case http.MethodGet: // return request template
json.NewEncoder(w).Encode(PostScanRequest{})
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
return
case http.MethodPost:
default:
w.WriteHeader(http.StatusMethodNotAllowed)
return
}
if handler.state.isBusy() {
w.Write([]byte(handler.state.getID()))
w.WriteHeader(http.StatusOK)
return
}
handler.state.setBusy()
// generate id
scanID := uuid.NewString()
handler.state.setID(scanID)
readBuffer, err := ioutil.ReadAll(r.Body)
if err != nil {
defer handler.state.setNotBusy()
w.WriteHeader(http.StatusBadRequest)
w.Write([]byte(fmt.Sprintf("failed to read request body, reason: %s", err.Error())))
return
}
scanRequest := PostScanRequest{}
if err := json.Unmarshal(readBuffer, &scanRequest); err != nil {
defer handler.state.setNotBusy()
w.WriteHeader(http.StatusBadRequest)
w.Write([]byte(fmt.Sprintf("failed to parse request payload, reason: %s", err.Error())))
return
}
response := []byte(scanID)
returnResults := r.URL.Query().Has("wait")
var wg sync.WaitGroup
if returnResults {
wg.Add(1)
}
go func() {
// execute scan in the background
logger.L().Info("scan triggered", helpers.String("ID", scanID))
results, err := scan(&scanRequest, scanID)
if err != nil {
logger.L().Error("scanning failed", helpers.String("ID", scanID), helpers.Error(err))
if returnResults {
response = []byte(err.Error())
}
} else {
if returnResults {
w.Header().Set("Content-Type", "application/json")
response = results
}
logger.L().Success("done scanning", helpers.String("ID", scanID))
}
// // saveing the scan status/result in 'response' object only when waiting for results
// if returnResults {
// w.Header().Set("Content-Type", "application/json")
// response = results
// }
wg.Done()
handler.state.setNotBusy()
}()
wg.Wait()
w.Write(response)
w.WriteHeader(http.StatusOK)
}
func (handler *HTTPHandler) Results(w http.ResponseWriter, r *http.Request) {
defer func() {
if err := recover(); err != nil {
handler.state.setNotBusy()
logger.L().Error("Results recover", helpers.Error(fmt.Errorf("%v", err)))
w.WriteHeader(http.StatusInternalServerError)
w.Write([]byte(fmt.Sprintf("%v", err)))
}
}()
defer r.Body.Close()
var scanID string
if scanID = r.URL.Query().Get("scanID"); scanID == "" {
scanID = handler.state.getLatestID()
}
logger.L().Info("requesting results", helpers.String("ID", scanID))
if handler.state.isBusy() { // if requested ID is still scanning
if scanID == handler.state.getID() {
logger.L().Info("scan in process", helpers.String("ID", scanID))
w.Write([]byte(handler.state.getID()))
w.WriteHeader(http.StatusOK) // Should we return ok?
return
}
}
switch r.Method {
case http.MethodGet:
if r.URL.Query().Has("remove") {
defer removeResultsFile(scanID)
}
if res, err := readResultsFile(scanID); err != nil {
w.Write([]byte(err.Error()))
w.WriteHeader(http.StatusNoContent)
} else {
w.Write(res)
w.WriteHeader(http.StatusOK)
}
case http.MethodDelete:
if r.URL.Query().Has("all") {
removeResultDirs()
} else {
removeResultsFile(scanID)
}
w.WriteHeader(http.StatusOK)
default:
w.WriteHeader(http.StatusMethodNotAllowed)
}
}
func (handler *HTTPHandler) Live(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusOK)
}
func (handler *HTTPHandler) Ready(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusOK)
}

View File

@@ -0,0 +1,90 @@
package v1
import (
"fmt"
"os"
"path/filepath"
"github.com/armosec/kubescape/cautils"
"github.com/armosec/kubescape/clihandler"
)
func scan(scanRequest *PostScanRequest, scanID string) ([]byte, error) {
scanInfo := getScanCommand(scanRequest, scanID)
result, err := clihandler.Scan(scanInfo)
if err != nil {
f, e := os.Open(filepath.Join(FailedOutputDir, scanID))
if e != nil {
return []byte{}, fmt.Errorf("failed to scan. reason: '%s'. failed to save error in file. reason: %s", err.Error(), e.Error())
}
defer f.Close()
f.Write([]byte(e.Error()))
}
result.HandleResults()
b, err := result.ToJson()
if err != nil {
err = fmt.Errorf("failed to parse results to json, reason: %s", err.Error())
}
return b, err
}
func readResultsFile(fileID string) ([]byte, error) {
if fileName := searchFile(fileID); fileName != "" {
return os.ReadFile(fileName)
}
return nil, fmt.Errorf("file not found")
}
func removeResultDirs() {
os.ReadDir(OutputDir)
os.ReadDir(FailedOutputDir)
}
func removeResultsFile(fileID string) error {
if fileName := searchFile(fileID); fileName != "" {
return os.Remove(fileName)
}
return nil // no files found to delete
}
func searchFile(fileID string) string {
if fileName, _ := findFile(OutputDir, fileID); fileName != "" {
return fileName
}
if fileName, _ := findFile(FailedOutputDir, fileID); fileName != "" {
return fileName
}
return ""
}
func findFile(targetDir string, fileName string) (string, error) {
matches, err := filepath.Glob(targetDir + fileName)
if err != nil {
return "", err
}
if len(matches) != 0 {
return matches[0], nil
}
return "", nil
}
func getScanCommand(scanRequest *PostScanRequest, scanID string) *cautils.ScanInfo {
scanInfo := scanRequest.ToScanInfo()
scanInfo.ReportID = scanID
// *** start ***
// TODO - support frameworks/controls and support scanning single frameworks/controls
scanInfo.FrameworkScan = true
scanInfo.ScanAll = true
// *** end ***
// *** start ***
// DO NOT CHANGE
scanInfo.Output = filepath.Join(OutputDir, scanID)
// *** end ***
return scanInfo
}

View File

@@ -0,0 +1,59 @@
package v1
import "sync"
type serverState struct {
// response string
busy bool
id string
latestID string
mtx sync.RWMutex
}
func (s *serverState) isBusy() bool {
s.mtx.RLock()
busy := s.busy
s.mtx.RUnlock()
return busy
}
func (s *serverState) setBusy() {
s.mtx.Lock()
s.busy = true
s.mtx.Unlock()
}
func (s *serverState) setNotBusy() {
s.mtx.Lock()
s.busy = false
s.latestID = s.id
s.id = ""
s.mtx.Unlock()
}
func (s *serverState) getID() string {
s.mtx.RLock()
id := s.id
s.mtx.RUnlock()
return id
}
func (s *serverState) setID(id string) {
s.mtx.Lock()
s.id = id
s.mtx.Unlock()
}
func (s *serverState) getLatestID() string {
s.mtx.RLock()
id := s.latestID
s.mtx.RUnlock()
return id
}
func newServerState() *serverState {
return &serverState{
busy: false,
mtx: sync.RWMutex{},
}
}

View File

@@ -0,0 +1,107 @@
---
apiVersion: v1
kind: Namespace
metadata:
labels:
app: kubescape
name: ks-scanner
---
# ------------------- Kubescape Service Account ------------------- #
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
app: kubescape
name: kubescape-discovery
namespace: ks-scanner
---
# ------------------- Kubescape Cluster Role & Cluster Role Binding ------------------- #
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: kubescape-discovery-clusterroles
# "namespace" omitted since ClusterRoles are not namespaced
rules:
- apiGroups: ["*"]
resources: ["*"]
verbs: ["get", "list", "describe"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: kubescape-discovery-role-binding
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: kubescape-discovery-clusterroles
subjects:
- kind: ServiceAccount
name: kubescape-discovery
namespace: ks-scanner
---
apiVersion: v1
kind: Service
metadata:
name: kubescape-service
namespace: ks-scanner
labels:
app: kubescape-service
spec:
type: NodePort
ports:
- port: 8080
targetPort: 8080
protocol: TCP
selector:
app: kubescape
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: kubescape
namespace: ks-scanner
labels:
app: kubescape
spec:
replicas: 1
selector:
matchLabels:
app: kubescape
template:
metadata:
labels:
app: kubescape
spec:
serviceAccountName: kubescape-discovery
containers:
- name: kubescape
# livenessProbe:
# httpGet:
# path: /livez
# port: 8080
# initialDelaySeconds: 3
# periodSeconds: 3
# readinessProbe:
# httpGet:
# path: /readyz
# port: 8080
# initialDelaySeconds: 3
# periodSeconds: 3
image: quay.io/armosec/kubescape:prometheus.v1
env:
- name: KS_RUN_PROMETHEUS_SERVER
value: "true"
- name: KS_DEFAULT_CONFIGMAP_NAMESPACE
value: "ks-scanner"
ports:
- containerPort: 8080
command:
- kubescape
resources:
requests:
cpu: 10m
memory: 100Mi
limits:
cpu: 500m
memory: 500Mi

View File

@@ -0,0 +1,20 @@
package listener
import (
"encoding/json"
"fmt"
"net/http"
"github.com/armosec/kubescape/cautils/logger"
"github.com/armosec/kubescape/cautils/logger/helpers"
)
// RecoverFunc recover function for http requests
func RecoverFunc(w http.ResponseWriter) {
if err := recover(); err != nil {
logger.L().Error("", helpers.Error(fmt.Errorf("%v", err)))
w.WriteHeader(http.StatusInternalServerError)
bErr, _ := json.Marshal(err)
w.Write(bErr)
}
}

View File

@@ -0,0 +1,78 @@
package listener
import (
"crypto/tls"
"fmt"
"net/http"
"os"
"github.com/armosec/kubescape/cautils/logger"
"github.com/armosec/kubescape/cautils/logger/helpers"
handlerequestsv1 "github.com/armosec/kubescape/httphandler/handlerequests/v1"
"github.com/gorilla/mux"
)
const (
scanPath = "/v1/scan"
resultsPath = "/v1/results"
prometheusMmeticsPath = "/metrics"
livePath = "/livez"
readyPath = "/readyz"
)
// SetupHTTPListener set up listening http servers
func SetupHTTPListener() error {
keyPair, err := loadTLSKey("", "") // TODO - support key and crt files
if err != nil {
return err
}
server := &http.Server{
Addr: fmt.Sprintf(":%s", getPort()), // TODO - support loading port from config/env
}
if keyPair != nil {
server.TLSConfig = &tls.Config{Certificates: []tls.Certificate{*keyPair}}
}
rtr := mux.NewRouter()
// rtr.HandleFunc(opapolicy.PostureRestAPIPathV1, resthandler.RestAPIReceiveNotification)
// listen
httpHandler := handlerequestsv1.NewHTTPHandler()
rtr.HandleFunc(prometheusMmeticsPath, httpHandler.Metrics)
rtr.HandleFunc(scanPath, httpHandler.Scan)
rtr.HandleFunc(resultsPath, httpHandler.Results)
rtr.HandleFunc(livePath, httpHandler.Live)
rtr.HandleFunc(readyPath, httpHandler.Ready)
server.Handler = rtr
logger.L().Info("Started Kubescape server", helpers.String("port", getPort()))
server.ListenAndServe()
if keyPair != nil {
logger.L().Fatal(server.ListenAndServeTLS("", "").Error())
}
logger.L().Fatal(server.ListenAndServe().Error())
return nil
}
func loadTLSKey(certFile, keyFile string) (*tls.Certificate, error) {
if keyFile == "" || certFile == "" {
return nil, nil
}
pair, err := tls.LoadX509KeyPair(certFile, keyFile)
if err != nil {
return nil, fmt.Errorf("filed to load key pair: %v", err)
}
return &pair, nil
}
func getPort() string {
if p := os.Getenv("KS_PORT"); p != "" {
return p
}
return "8080"
}

View File

@@ -2,8 +2,12 @@ package main
import (
"github.com/armosec/kubescape/clihandler/cmd"
// "github.com/armosec/kubescape/httphandler/listener"
)
func main() {
cmd.Execute()
// // cmd.Execute()
// listener.SetupHTTPListener()
}

View File

@@ -8,7 +8,7 @@ import (
"github.com/armosec/opa-utils/reporthandling"
)
var mockControl_0006 = `{"guid":"","name":"Allowed hostPath","attributes":{"armoBuiltin":true},"id":"C-0006","controlID":"C-0006","creationTime":"","description":"Mounting host directory to the container can be abused to get access to sensitive data and gain persistence on the host machine.","remediation":"Refrain from using host path mount.","rules":[{"guid":"","name":"alert-rw-hostpath","attributes":{"armoBuiltin":true,"m$K8sThreatMatrix":"Persistance::Writable hostPath mount, Lateral Movement::Writable volume mounts on the host"},"creationTime":"","rule":"package armo_builtins\n\n# input: pod\n# apiversion: v1\n# does: returns hostPath volumes\n\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n volumes := pod.spec.volumes\n volume := volumes[_]\n volume.hostPath\n\tcontainer := pod.spec.containers[i]\n\tvolumeMount := container.volumeMounts[k]\n\tvolumeMount.name == volume.name\n\tbegginingOfPath := \"spec.\"\n\tresult := isRWMount(volumeMount, begginingOfPath, i, k)\n\n podname := pod.metadata.name\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"pod: %v has: %v as hostPath volume\", [podname, volume.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [result],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n#handles majority of workload resources\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n volumes := wl.spec.template.spec.volumes\n volume := volumes[_]\n volume.hostPath\n\tcontainer := wl.spec.template.spec.containers[i]\n\tvolumeMount := container.volumeMounts[k]\n\tvolumeMount.name == volume.name\n\tbegginingOfPath := \"spec.template.spec.\"\n\tresult := isRWMount(volumeMount, begginingOfPath, i, k)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has: %v as hostPath volume\", [wl.kind, wl.metadata.name, volume.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [result],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t\n\t}\n}\n\n#handles CronJobs\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n volumes := wl.spec.jobTemplate.spec.template.spec.volumes\n volume := volumes[_]\n volume.hostPath\n\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tvolumeMount := container.volumeMounts[k]\n\tvolumeMount.name == volume.name\n\tbegginingOfPath := \"spec.jobTemplate.spec.template.spec.\"\n\tresult := isRWMount(volumeMount, begginingOfPath, i, k)\n\n\tmsga := {\n\t\"alertMessage\": sprintf(\"%v: %v has: %v as hostPath volume\", [wl.kind, wl.metadata.name, volume.name]),\n\t\"packagename\": \"armo_builtins\",\n\t\"alertScore\": 7,\n\t\"failedPaths\": [result],\n\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nisRWMount(mount, begginingOfPath, i, k) = path {\n not mount.readOnly == true\n not mount.readOnly == false\n path = \"\"\n}\nisRWMount(mount, begginingOfPath, i, k) = path {\n mount.readOnly == false\n path = sprintf(\"%vcontainers[%v].volumeMounts[%v].readOnly\", [begginingOfPath, format_int(i, 10), format_int(k, 10)])\n} ","resourceEnumerator":"","ruleLanguage":"Rego","match":[{"apiGroups":["*"],"apiVersions":["*"],"resources":["Deployment","ReplicaSet","DaemonSet","StatefulSet","Job","CronJob","Pod"]}],"ruleDependencies":[{"packageName":"cautils"},{"packageName":"kubernetes.api.client"}],"configInputs":null,"controlConfigInputs":null,"description":"determines if any workload contains a hostPath volume with rw permissions","remediation":"Set the readOnly field of the mount to true","ruleQuery":""}],"rulesIDs":[""],"baseScore":6}`
var mockControl_0006 = `{"guid":"","name":"Allowed hostPath","attributes":{"armoBuiltin":true},"id":"C-0006","controlID":"C-0006","creationTime":"","description":"Mounting host directory to the container can be abused to get access to sensitive data and gain persistence on the host machine.","remediation":"Refrain from using host path mount.","rules":[{"guid":"","name":"alert-rw-hostpath","attributes":{"armoBuiltin":true,"m$K8sThreatMatrix":"Persistence::Writable hostPath mount, Lateral Movement::Writable volume mounts on the host"},"creationTime":"","rule":"package armo_builtins\n\n# input: pod\n# apiversion: v1\n# does: returns hostPath volumes\n\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n volumes := pod.spec.volumes\n volume := volumes[_]\n volume.hostPath\n\tcontainer := pod.spec.containers[i]\n\tvolumeMount := container.volumeMounts[k]\n\tvolumeMount.name == volume.name\n\tbegginingOfPath := \"spec.\"\n\tresult := isRWMount(volumeMount, begginingOfPath, i, k)\n\n podname := pod.metadata.name\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"pod: %v has: %v as hostPath volume\", [podname, volume.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [result],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n#handles majority of workload resources\ndeny[msga] {\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n volumes := wl.spec.template.spec.volumes\n volume := volumes[_]\n volume.hostPath\n\tcontainer := wl.spec.template.spec.containers[i]\n\tvolumeMount := container.volumeMounts[k]\n\tvolumeMount.name == volume.name\n\tbegginingOfPath := \"spec.template.spec.\"\n\tresult := isRWMount(volumeMount, begginingOfPath, i, k)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has: %v as hostPath volume\", [wl.kind, wl.metadata.name, volume.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": [result],\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t\n\t}\n}\n\n#handles CronJobs\ndeny[msga] {\n\twl := input[_]\n\twl.kind == \"CronJob\"\n volumes := wl.spec.jobTemplate.spec.template.spec.volumes\n volume := volumes[_]\n volume.hostPath\n\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tvolumeMount := container.volumeMounts[k]\n\tvolumeMount.name == volume.name\n\tbegginingOfPath := \"spec.jobTemplate.spec.template.spec.\"\n\tresult := isRWMount(volumeMount, begginingOfPath, i, k)\n\n\tmsga := {\n\t\"alertMessage\": sprintf(\"%v: %v has: %v as hostPath volume\", [wl.kind, wl.metadata.name, volume.name]),\n\t\"packagename\": \"armo_builtins\",\n\t\"alertScore\": 7,\n\t\"failedPaths\": [result],\n\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\nisRWMount(mount, begginingOfPath, i, k) = path {\n not mount.readOnly == true\n not mount.readOnly == false\n path = \"\"\n}\nisRWMount(mount, begginingOfPath, i, k) = path {\n mount.readOnly == false\n path = sprintf(\"%vcontainers[%v].volumeMounts[%v].readOnly\", [begginingOfPath, format_int(i, 10), format_int(k, 10)])\n} ","resourceEnumerator":"","ruleLanguage":"Rego","match":[{"apiGroups":["*"],"apiVersions":["*"],"resources":["Deployment","ReplicaSet","DaemonSet","StatefulSet","Job","CronJob","Pod"]}],"ruleDependencies":[{"packageName":"cautils"},{"packageName":"kubernetes.api.client"}],"configInputs":null,"controlConfigInputs":null,"description":"determines if any workload contains a hostPath volume with rw permissions","remediation":"Set the readOnly field of the mount to true","ruleQuery":""}],"rulesIDs":[""],"baseScore":6}`
var mockControl_0044 = `{"guid":"","name":"Container hostPort","attributes":{"armoBuiltin":true},"id":"C-0044","controlID":"C-0044","creationTime":"","description":"Configuring hostPort limits you to a particular port, and if any two workloads that specify the same HostPort they cannot be deployed to the same node. Therefore, if the number of replica of such workload is higher than the number of nodes, the deployment will fail.","remediation":"Avoid usage of hostPort unless it is absolutely necessary. Use NodePort / ClusterIP instead.","rules":[{"guid":"","name":"container-hostPort","attributes":{"armoBuiltin":true},"creationTime":"","rule":"package armo_builtins\n\n\n# Fails if pod has container with hostPort\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n container := pod.spec.containers[i]\n\tbegginingOfPath := \"spec.\"\n\tpath := isHostPort(container, i, begginingOfPath)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v has Host-port\", [ container.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [pod]\n\t\t}\n\t}\n}\n\n# Fails if workload has container with hostPort\ndeny[msga] {\n wl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n container := wl.spec.template.spec.containers[i]\n\tbegginingOfPath := \"spec.template.spec.\"\n path := isHostPort(container, i, begginingOfPath)\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v has Host-port\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n# Fails if cronjob has container with hostPort\ndeny[msga] {\n \twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainer = wl.spec.jobTemplate.spec.template.spec.containers[i]\n\tbegginingOfPath := \"spec.jobTemplate.spec.template.spec.\"\n path := isHostPort(container, i, begginingOfPath)\n msga := {\n\t\t\"alertMessage\": sprintf(\"Container: %v in %v: %v has Host-port\", [ container.name, wl.kind, wl.metadata.name]),\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertScore\": 7,\n\t\t\"failedPaths\": path,\n\t\t\"alertObject\": {\n\t\t\t\"k8sApiObjects\": [wl]\n\t\t}\n\t}\n}\n\n\n\nisHostPort(container, i, begginingOfPath) = path {\n\tpath = [sprintf(\"%vcontainers[%v].ports[%v].hostPort\", [begginingOfPath, format_int(i, 10), format_int(j, 10)]) | port = container.ports[j]; port.hostPort]\n\tcount(path) > 0\n}\n","resourceEnumerator":"","ruleLanguage":"Rego","match":[{"apiGroups":["*"],"apiVersions":["*"],"resources":["Deployment","ReplicaSet","DaemonSet","StatefulSet","Job","Pod","CronJob"]}],"ruleDependencies":[],"configInputs":null,"controlConfigInputs":null,"description":"fails if container has hostPort","remediation":"Make sure you do not configure hostPort for the container, if necessary use NodePort / ClusterIP","ruleQuery":"armo_builtins"}],"rulesIDs":[""],"baseScore":4}`

View File

@@ -15,7 +15,6 @@ import (
"github.com/armosec/opa-utils/reporthandling/results/v1/resourcesresults"
"github.com/open-policy-agent/opa/storage"
"github.com/armosec/k8s-interface/k8sinterface"
"github.com/armosec/k8s-interface/workloadinterface"
"github.com/armosec/opa-utils/resources"
@@ -25,59 +24,37 @@ import (
const ScoreConfigPath = "/resources/config"
type OPAProcessorHandler struct {
processedPolicy *chan *cautils.OPASessionObj
reportResults *chan *cautils.OPASessionObj
regoDependenciesData *resources.RegoDependenciesData
}
type OPAProcessor struct {
*cautils.OPASessionObj
regoDependenciesData *resources.RegoDependenciesData
*cautils.OPASessionObj
}
func NewOPAProcessor(sessionObj *cautils.OPASessionObj, regoDependenciesData *resources.RegoDependenciesData) *OPAProcessor {
if regoDependenciesData != nil && sessionObj != nil {
regoDependenciesData.PostureControlInputs = sessionObj.RegoInputData.PostureControlInputs
}
return &OPAProcessor{
OPASessionObj: sessionObj,
regoDependenciesData: regoDependenciesData,
}
}
func (opap *OPAProcessor) ProcessRulesListenner() error {
func NewOPAProcessorHandler(processedPolicy, reportResults *chan *cautils.OPASessionObj) *OPAProcessorHandler {
return &OPAProcessorHandler{
processedPolicy: processedPolicy,
reportResults: reportResults,
regoDependenciesData: resources.NewRegoDependenciesData(k8sinterface.GetK8sConfig(), cautils.ClusterName),
policies := ConvertFrameworksToPolicies(opap.Policies, cautils.BuildNumber)
ConvertFrameworksToSummaryDetails(&opap.Report.SummaryDetails, opap.Policies, policies)
// process
if err := opap.Process(policies); err != nil {
logger.L().Error(err.Error())
// Return error?
}
}
func (opaHandler *OPAProcessorHandler) ProcessRulesListenner() {
// edit results
opap.updateResults()
for {
opaSessionObj := <-*opaHandler.processedPolicy
opap := NewOPAProcessor(opaSessionObj, opaHandler.regoDependenciesData)
//TODO: review this location
scorewrapper := ksscore.NewScoreWrapper(opap.OPASessionObj)
scorewrapper.Calculate(ksscore.EPostureReportV2)
policies := ConvertFrameworksToPolicies(opap.Frameworks, cautils.BuildNumber)
ConvertFrameworksToSummaryDetails(&opap.Report.SummaryDetails, opap.Frameworks, policies)
// process
if err := opap.Process(policies); err != nil {
logger.L().Error(err.Error())
}
// edit results
opap.updateResults()
//TODO: review this location
scorewrapper := ksscore.NewScoreWrapper(opaSessionObj)
scorewrapper.Calculate(ksscore.EPostureReportV2)
// report
*opaHandler.reportResults <- opaSessionObj
}
return nil
}
func (opap *OPAProcessor) Process(policies *cautils.Policies) error {

View File

@@ -32,13 +32,13 @@ func TestProcess(t *testing.T) {
// set opaSessionObj
opaSessionObj := cautils.NewOPASessionObjMock()
opaSessionObj.Frameworks = []reporthandling.Framework{*reporthandling.MockFrameworkA()}
policies := ConvertFrameworksToPolicies(opaSessionObj.Frameworks, "")
opaSessionObj.Policies = []reporthandling.Framework{*reporthandling.MockFrameworkA()}
policies := ConvertFrameworksToPolicies(opaSessionObj.Policies, "")
opaSessionObj.K8SResources = &k8sResources
opaSessionObj.AllResources = allResources
opap := NewOPAProcessor(opaSessionObj, resources.NewRegoDependenciesDataMock())
opap := NewOPAProcessor(opaSessionObj, resources.NewRegoDependenciesDataMock()) // ,
opap.Process(policies)
opap.updateResults()
for _, f := range opap.PostureReport.FrameworkReports {
@@ -68,10 +68,10 @@ func TestProcessResourcesResult(t *testing.T) {
// set opaSessionObj
opaSessionObj := cautils.NewOPASessionObjMock()
opaSessionObj.Frameworks = frameworks
opaSessionObj.Policies = frameworks
policies := ConvertFrameworksToPolicies(opaSessionObj.Frameworks, "")
ConvertFrameworksToSummaryDetails(&opaSessionObj.Report.SummaryDetails, opaSessionObj.Frameworks, policies)
policies := ConvertFrameworksToPolicies(opaSessionObj.Policies, "")
ConvertFrameworksToSummaryDetails(&opaSessionObj.Report.SummaryDetails, opaSessionObj.Policies, policies)
opaSessionObj.K8SResources = &k8sResources
opaSessionObj.AllResources[deployment.GetID()] = deployment

View File

@@ -137,29 +137,6 @@ func ruleWithArmoOpaDependency(annotations map[string]interface{}) bool {
return false
}
// Checks that kubescape version is in range of use for this rule
// In local build (BuildNumber = ""):
// returns true only if rule doesn't have the "until" attribute
func isRuleKubescapeVersionCompatible(rule *reporthandling.PolicyRule) bool {
if from, ok := rule.Attributes["useFromKubescapeVersion"]; ok {
if cautils.BuildNumber != "" {
if from.(string) > cautils.BuildNumber {
return false
}
}
}
if until, ok := rule.Attributes["useUntilKubescapeVersion"]; ok {
if cautils.BuildNumber != "" {
if until.(string) <= cautils.BuildNumber {
return false
}
} else {
return false
}
}
return true
}
func removeData(obj workloadinterface.IMetadata) {
if !k8sinterface.IsTypeWorkload(obj.GetObject()) {
return // remove data only from kubernetes objects

View File

@@ -5,57 +5,9 @@ import (
"github.com/stretchr/testify/assert"
"github.com/armosec/armoapi-go/armotypes"
"github.com/armosec/k8s-interface/workloadinterface"
"github.com/armosec/kubescape/cautils"
"github.com/armosec/opa-utils/reporthandling"
)
func TestGetKubernetesObjects(t *testing.T) {
}
var rule_v1_0_131 = &reporthandling.PolicyRule{PortalBase: armotypes.PortalBase{
Attributes: map[string]interface{}{"useUntilKubescapeVersion": "v1.0.132"}}}
var rule_v1_0_132 = &reporthandling.PolicyRule{PortalBase: armotypes.PortalBase{
Attributes: map[string]interface{}{"useFromKubescapeVersion": "v1.0.132", "useUntilKubescapeVersion": "v1.0.133"}}}
var rule_v1_0_133 = &reporthandling.PolicyRule{PortalBase: armotypes.PortalBase{
Attributes: map[string]interface{}{"useFromKubescapeVersion": "v1.0.133", "useUntilKubescapeVersion": "v1.0.134"}}}
var rule_v1_0_134 = &reporthandling.PolicyRule{PortalBase: armotypes.PortalBase{
Attributes: map[string]interface{}{"useFromKubescapeVersion": "v1.0.134"}}}
func TestIsRuleKubescapeVersionCompatible(t *testing.T) {
// local build- no build number
// should use only rules that don't have "until"
cautils.BuildNumber = ""
if isRuleKubescapeVersionCompatible(rule_v1_0_131) {
t.Error("error in isRuleKubescapeVersionCompatible")
}
if isRuleKubescapeVersionCompatible(rule_v1_0_132) {
t.Error("error in isRuleKubescapeVersionCompatible")
}
if isRuleKubescapeVersionCompatible(rule_v1_0_133) {
t.Error("error in isRuleKubescapeVersionCompatible")
}
if !isRuleKubescapeVersionCompatible(rule_v1_0_134) {
t.Error("error in isRuleKubescapeVersionCompatible")
}
// should only use rules that version is in range of use
cautils.BuildNumber = "v1.0.133"
if isRuleKubescapeVersionCompatible(rule_v1_0_131) {
t.Error("error in isRuleKubescapeVersionCompatible")
}
if isRuleKubescapeVersionCompatible(rule_v1_0_132) {
t.Error("error in isRuleKubescapeVersionCompatible")
}
if !isRuleKubescapeVersionCompatible(rule_v1_0_133) {
t.Error("error in isRuleKubescapeVersionCompatible")
}
if isRuleKubescapeVersionCompatible(rule_v1_0_134) {
t.Error("error in isRuleKubescapeVersionCompatible")
}
}
func TestRemoveData(t *testing.T) {
w := `{"apiVersion":"apps/v1","kind":"Deployment","metadata":{"name":"demoservice-server"},"spec":{"replicas":1,"selector":{"matchLabels":{"app":"demoservice-server"}},"template":{"metadata":{"creationTimestamp":null,"labels":{"app":"demoservice-server"}},"spec":{"containers":[{"env":[{"name":"SERVER_PORT","value":"8089"},{"name":"SLEEP_DURATION","value":"1"},{"name":"DEMO_FOLDERS","value":"/app"},{"name":"ARMO_TEST_NAME","value":"auto_attach_deployment"},{"name":"CAA_ENABLE_CRASH_REPORTER","value":"1"}],"image":"quay.io/armosec/demoservice:v25","imagePullPolicy":"IfNotPresent","name":"demoservice","ports":[{"containerPort":8089,"protocol":"TCP"}],"resources":{},"terminationMessagePath":"/dev/termination-log","terminationMessagePolicy":"File"}],"dnsPolicy":"ClusterFirst","restartPolicy":"Always","schedulerName":"default-scheduler","securityContext":{},"terminationGracePeriodSeconds":30}}}}`

View File

@@ -12,19 +12,17 @@ import (
type PolicyHandler struct {
resourceHandler resourcehandler.IResourceHandler
// we are listening on this chan in opaprocessor/processorhandler.go/ProcessRulesListenner func
processPolicy *chan *cautils.OPASessionObj
getters *cautils.Getters
getters *cautils.Getters
}
// CreatePolicyHandler Create ws-handler obj
func NewPolicyHandler(processPolicy *chan *cautils.OPASessionObj, resourceHandler resourcehandler.IResourceHandler) *PolicyHandler {
func NewPolicyHandler(resourceHandler resourcehandler.IResourceHandler) *PolicyHandler {
return &PolicyHandler{
resourceHandler: resourceHandler,
processPolicy: processPolicy,
}
}
func (policyHandler *PolicyHandler) HandleNotificationRequest(notification *reporthandling.PolicyNotification, scanInfo *cautils.ScanInfo) error {
func (policyHandler *PolicyHandler) CollectResources(notification *reporthandling.PolicyNotification, scanInfo *cautils.ScanInfo) (*cautils.OPASessionObj, error) {
opaSessionObj := cautils.NewOPASessionObj(nil, nil)
// validate notification
// TODO
@@ -32,26 +30,25 @@ func (policyHandler *PolicyHandler) HandleNotificationRequest(notification *repo
// get policies
if err := policyHandler.getPolicies(notification, opaSessionObj); err != nil {
return err
return opaSessionObj, err
}
err := policyHandler.getResources(notification, opaSessionObj, scanInfo)
if err != nil {
return err
return opaSessionObj, err
}
if opaSessionObj.K8SResources == nil || len(*opaSessionObj.K8SResources) == 0 {
return fmt.Errorf("empty list of resources")
return opaSessionObj, fmt.Errorf("empty list of resources")
}
// update channel
*policyHandler.processPolicy <- opaSessionObj
return nil
return opaSessionObj, nil
}
func (policyHandler *PolicyHandler) getResources(notification *reporthandling.PolicyNotification, opaSessionObj *cautils.OPASessionObj, scanInfo *cautils.ScanInfo) error {
opaSessionObj.Report.ClusterAPIServerInfo = policyHandler.resourceHandler.GetClusterAPIServerInfo()
resourcesMap, allResources, err := policyHandler.resourceHandler.GetResources(opaSessionObj.Frameworks, &notification.Designators)
resourcesMap, allResources, err := policyHandler.resourceHandler.GetResources(opaSessionObj.Policies, &notification.Designators)
if err != nil {
return err
}

View File

@@ -12,15 +12,15 @@ import (
func (policyHandler *PolicyHandler) getPolicies(notification *reporthandling.PolicyNotification, policiesAndResources *cautils.OPASessionObj) error {
logger.L().Info("Downloading/Loading policy definitions")
frameworks, err := policyHandler.getScanPolicies(notification)
policies, err := policyHandler.getScanPolicies(notification)
if err != nil {
return err
}
if len(frameworks) == 0 {
if len(policies) == 0 {
return fmt.Errorf("failed to download policies: '%s'. Make sure the policy exist and you spelled it correctly. For more information, please feel free to contact ARMO team", strings.Join(policyIdentifierToSlice(notification.Rules), ", "))
}
policiesAndResources.Frameworks = frameworks
policiesAndResources.Policies = policies
// get exceptions
exceptionPolicies, err := policyHandler.getters.ExceptionsGetter.GetExceptions(cautils.ClusterName)

View File

@@ -1,7 +1,7 @@
# Integrate With Vulnerability Server
There are some controls that check the relation between the kubernetes manifest and vulnerabilities.
For these controls to work properly, it is necasery to
For these controls to work properly, it is necessary to
## Supported Servers
* Armosec

View File

@@ -12,7 +12,7 @@ type V2ListRequest struct {
PageSize *int `json:"pageSize,omitempty"`
// One can leave it empty for 0, then call ValidatePageProperties
PageNum *int `json:"pageNum,omitempty"`
// The time window of the list to return. Default: since - begining og the time, until - now.
// The time window of the list to return. Default: since - beginning of the time, until - now.
Since *time.Time `json:"since,omitempty"`
Until *time.Time `json:"until,omitempty"`
// Which elements of the list to return, each field can hold multiple values separated by comma

View File

@@ -8,7 +8,7 @@
* OPA engine: the [OPA](https://github.com/open-policy-agent/opa) rego interpreter
* Rules processor: Kubescape component, it enumerates and runs the controls while also preparing the all the input data that the controls need for running
* Data sources: set of different modules providing data to the Rules processor so it can run the controls with them. Examples: Kubernetes objects, cloud vendor API objects and adding in this proposal the vulnerability infomration
* Cloud Image Vulnerability adaption interface: the subject of this proposal, it gives a common interface for different registry/vulnerabilty vendors to adapt to.
* Cloud Image Vulnerability adaption interface: the subject of this proposal, it gives a common interface for different registry/vulnerability vendors to adapt to.
* CIV adaptors: specific implementation of the CIV interface, example Harbor adaption
```
-----------------------
@@ -53,7 +53,7 @@ The interface needs to cover the following functionalities:
## Go API proposal
```
```go
/*type ContainerImageRegistryCredentials struct {
Password string
@@ -110,9 +110,9 @@ type IContainerImageVulnerabilityAdaptor interface {
The objects received from the interface will be converted to an IMetadata compatible objects as following
```
```json
{
"apiVersion": "image.vulnscan.com/v1",
"apiVersion": "armo.vuln.images/v1",
"kind": "ImageVulnerabilities",
"metadata": {
"name": "nginx:latest"
@@ -128,9 +128,9 @@ The objects received from the interface will be converted to an IMetadata compat
The rego results will be a combination of the k8s artifact and the list of relevant CVEs for the control
```
```json
{
"apiVersion": "result.vulnscan.com/v1",
"apiVersion": "armo.vuln/v1",
"kind": "Pod",
"metadata": {
"name": "nginx"

View File

@@ -13,7 +13,7 @@ import (
)
const (
ImagevulnerabilitiesObjectGroup = "image.vulnscan.com"
ImagevulnerabilitiesObjectGroup = "armo.vuln.images"
ImagevulnerabilitiesObjectVersion = "v1"
ImagevulnerabilitiesObjectKind = "ImageVulnerabilities"
)
@@ -96,14 +96,14 @@ func listImagesTags(k8sResourcesMap *cautils.K8SResources, allResources map[stri
if resource, ok := allResources[resources[j]]; ok {
if resource.GetObjectType() == workloadinterface.TypeWorkloadObject {
workload := workloadinterface.NewWorkloadObj(resource.GetObject())
if contianers, err := workload.GetContainers(); err == nil {
for i := range contianers {
images = append(images, contianers[i].Image)
if containers, err := workload.GetContainers(); err == nil {
for i := range containers {
images = append(images, containers[i].Image)
}
}
if contianers, err := workload.GetInitContainers(); err == nil {
for i := range contianers {
images = append(images, contianers[i].Image)
if containers, err := workload.GetInitContainers(); err == nil {
for i := range containers {
images = append(images, containers[i].Image)
}
}
}

View File

@@ -27,8 +27,7 @@ func (jsonPrinter *JsonPrinter) Score(score float32) {
}
func (jsonPrinter *JsonPrinter) ActionPrint(opaSessionObj *cautils.OPASessionObj) {
finalizeJson(opaSessionObj)
r, err := json.Marshal(opaSessionObj.Report)
r, err := json.Marshal(DataToJson(opaSessionObj))
if err != nil {
logger.L().Fatal("failed to Marshal posture report object")
}

View File

@@ -7,20 +7,28 @@ import (
"github.com/armosec/kubescape/cautils/logger/helpers"
"github.com/armosec/opa-utils/reporthandling"
"github.com/armosec/opa-utils/reporthandling/results/v1/resourcesresults"
reporthandlingv2 "github.com/armosec/opa-utils/reporthandling/v2"
)
// finalizeV2Report finalize the results objects by copying data from map to lists
func finalizeJson(opaSessionObj *cautils.OPASessionObj) {
if len(opaSessionObj.Report.Results) == 0 {
opaSessionObj.Report.Results = make([]resourcesresults.Result, len(opaSessionObj.ResourcesResult))
finalizeResults(opaSessionObj.Report.Results, opaSessionObj.ResourcesResult)
func DataToJson(data *cautils.OPASessionObj) *reporthandlingv2.PostureReport {
report := reporthandlingv2.PostureReport{
SummaryDetails: data.Report.SummaryDetails,
ClusterAPIServerInfo: data.Report.ClusterAPIServerInfo,
ReportGenerationTime: data.Report.ReportGenerationTime,
Attributes: data.Report.Attributes,
ClusterName: data.Report.ClusterName,
CustomerGUID: data.Report.CustomerGUID,
ClusterCloudProvider: data.Report.ClusterCloudProvider,
}
if len(opaSessionObj.Report.Resources) == 0 {
opaSessionObj.Report.Resources = make([]reporthandling.Resource, len(opaSessionObj.AllResources))
finalizeResources(opaSessionObj.Report.Resources, opaSessionObj.Report.Results, opaSessionObj.AllResources)
}
report.Results = make([]resourcesresults.Result, len(data.ResourcesResult))
finalizeResults(report.Results, data.ResourcesResult)
report.Resources = make([]reporthandling.Resource, len(data.AllResources))
finalizeResources(report.Resources, report.Results, data.AllResources)
return &report
}
func finalizeResults(results []resourcesresults.Result, resourcesResult map[string]resourcesresults.Result) {
index := 0

View File

@@ -28,15 +28,17 @@ type ReportEventReceiver struct {
token string
customerAdminEMail string
message string
reportID string
}
func NewReportEventReceiver(tenantConfig *cautils.ConfigObj) *ReportEventReceiver {
func NewReportEventReceiver(tenantConfig *cautils.ConfigObj, reportID string) *ReportEventReceiver {
return &ReportEventReceiver{
httpClient: &http.Client{},
clusterName: tenantConfig.ClusterName,
customerGUID: tenantConfig.AccountID,
token: tenantConfig.Token,
customerAdminEMail: tenantConfig.CustomerAdminEMail,
reportID: reportID,
}
}
@@ -52,6 +54,9 @@ func (report *ReportEventReceiver) ActionSendReport(opaSessionObj *cautils.OPASe
logger.L().Warning("failed to publish results because the cluster name is Unknown. If you are scanning YAML files the results are not submitted to the Kubescape SaaS")
return nil
}
if opaSessionObj.Report.ReportID == "" {
opaSessionObj.Report.ReportID = uuid.NewString()
}
opaSessionObj.Report.ReportID = uuid.NewString()
opaSessionObj.Report.CustomerGUID = report.customerGUID
opaSessionObj.Report.ClusterName = report.clusterName
@@ -172,7 +177,7 @@ func (report *ReportEventReceiver) generateMessage() {
u.Scheme = "https"
u.Host = getter.GetArmoAPIConnector().GetFrontendURL()
if report.customerAdminEMail != "" { // data has been submitted
if report.customerAdminEMail != "" || report.token == "" { // data has been submitted
u.Path = fmt.Sprintf("configuration-scanning/%s", report.clusterName)
} else {
u.Path = "account/sign-up"

View File

@@ -1,58 +1,76 @@
package resultshandling
import (
"encoding/json"
"github.com/armosec/kubescape/cautils"
"github.com/armosec/kubescape/cautils/logger"
"github.com/armosec/kubescape/cautils/logger/helpers"
"github.com/armosec/kubescape/resultshandling/printer"
printerv1 "github.com/armosec/kubescape/resultshandling/printer/v1"
printerv2 "github.com/armosec/kubescape/resultshandling/printer/v2"
"github.com/armosec/kubescape/resultshandling/reporter"
"github.com/armosec/opa-utils/reporthandling"
)
type ResultsHandler struct {
opaSessionObj *chan *cautils.OPASessionObj
reporterObj reporter.IReport
printerObj printer.IPrinter
reporterObj reporter.IReport
printerObj printer.IPrinter
scanData *cautils.OPASessionObj
}
func NewResultsHandler(opaSessionObj *chan *cautils.OPASessionObj, reporterObj reporter.IReport, printerObj printer.IPrinter) *ResultsHandler {
func NewResultsHandler(reporterObj reporter.IReport, printerObj printer.IPrinter) *ResultsHandler {
return &ResultsHandler{
opaSessionObj: opaSessionObj,
reporterObj: reporterObj,
printerObj: printerObj,
reporterObj: reporterObj,
printerObj: printerObj,
}
}
func (resultsHandler *ResultsHandler) HandleResults(scanInfo *cautils.ScanInfo) float32 {
// GetScore return scan risk-score
func (resultsHandler *ResultsHandler) GetRiskScore() float32 {
return resultsHandler.scanData.Report.SummaryDetails.Score
}
opaSessionObj := <-*resultsHandler.opaSessionObj
// GetData get scan/action related data (policies, resources, results, etc.). Call ToJson function if you wish the json representation of the data
func (resultsHandler *ResultsHandler) GetData() *cautils.OPASessionObj {
return resultsHandler.scanData
}
resultsHandler.printerObj.ActionPrint(opaSessionObj)
// SetData set scan/action related data
func (resultsHandler *ResultsHandler) SetData(data *cautils.OPASessionObj) {
resultsHandler.scanData = data
}
if err := resultsHandler.reporterObj.ActionSendReport(opaSessionObj); err != nil {
// GetPrinter get printer object
func (resultsHandler *ResultsHandler) GetPrinter() printer.IPrinter {
return resultsHandler.printerObj
}
// GetReporter get reporter object
func (resultsHandler *ResultsHandler) GetReporter() reporter.IReport {
return resultsHandler.reporterObj
}
// ToJson return results in json format
func (resultsHandler *ResultsHandler) ToJson() ([]byte, error) {
return json.Marshal(printerv2.DataToJson(resultsHandler.scanData))
}
// HandleResults handle the scan results according to the pre defind interfaces
func (resultsHandler *ResultsHandler) HandleResults() {
resultsHandler.printerObj.ActionPrint(resultsHandler.scanData)
if err := resultsHandler.reporterObj.ActionSendReport(resultsHandler.scanData); err != nil {
logger.L().Error(err.Error())
}
score := opaSessionObj.Report.SummaryDetails.Score
resultsHandler.printerObj.Score(score)
resultsHandler.printerObj.Score(resultsHandler.GetRiskScore())
return score
}
// CalculatePostureScore calculate final score
func CalculatePostureScore(postureReport *reporthandling.PostureReport) float32 {
failedResources := []string{}
allResources := []string{}
for _, frameworkReport := range postureReport.FrameworkReports {
failedResources = reporthandling.GetUniqueResourcesIDs(append(failedResources, frameworkReport.ListResourcesIDs().GetFailedResources()...))
allResources = reporthandling.GetUniqueResourcesIDs(append(allResources, frameworkReport.ListResourcesIDs().GetAllResources()...))
}
return (float32(len(allResources)) - float32(len(failedResources))) / float32(len(allResources))
resultsHandler.reporterObj.DisplayReportURL()
}
// NewPrinter defind output format
func NewPrinter(printFormat, formatVersion string, verboseMode bool) printer.IPrinter {
switch printFormat {
@@ -61,6 +79,7 @@ func NewPrinter(printFormat, formatVersion string, verboseMode bool) printer.IPr
case "v2":
return printerv2.NewJsonPrinter()
default:
logger.L().Warning("Deprecated format version. run with '--format-version' flag", helpers.String("your version", formatVersion), helpers.String("latest version", "v2"))
return printerv1.NewJsonPrinter()
}
case printer.JunitResultFormat:

File diff suppressed because one or more lines are too long