mirror of
https://github.com/kubescape/kubescape.git
synced 2026-03-17 17:10:34 +00:00
Compare commits
185 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
839c3e261f | ||
|
|
95b579d191 | ||
|
|
05b6394c5c | ||
|
|
306b9d28ca | ||
|
|
6fe87bba20 | ||
|
|
c0d534072d | ||
|
|
009221aa98 | ||
|
|
46e5aff5f9 | ||
|
|
59498361e7 | ||
|
|
c652da130d | ||
|
|
9e524ffc34 | ||
|
|
004cc0c469 | ||
|
|
bd089d76af | ||
|
|
d5025b54bf | ||
|
|
740497047d | ||
|
|
d917e21364 | ||
|
|
32cedaf565 | ||
|
|
4c2a5e9a11 | ||
|
|
a41d2a46ff | ||
|
|
4794cbfb36 | ||
|
|
d021217cf7 | ||
|
|
4573d83831 | ||
|
|
e68e6dcd3d | ||
|
|
670ff4a15d | ||
|
|
b616a37800 | ||
|
|
ce488a3645 | ||
|
|
fb47a9c742 | ||
|
|
80ace81a12 | ||
|
|
1efdae5197 | ||
|
|
a4c88edfca | ||
|
|
8f38c2f627 | ||
|
|
bbf68d4ce8 | ||
|
|
e1eec47a22 | ||
|
|
fc05075817 | ||
|
|
5bb64b634a | ||
|
|
7bc2c2be13 | ||
|
|
27e2c044da | ||
|
|
1213e8d6ac | ||
|
|
3f58d68d2a | ||
|
|
803e62020e | ||
|
|
fde437312f | ||
|
|
18425c915b | ||
|
|
0de6892ddd | ||
|
|
dfb92ffec3 | ||
|
|
85317f1ee1 | ||
|
|
f22f60508f | ||
|
|
716bdaaf38 | ||
|
|
1b0e2b87de | ||
|
|
2c57b809d2 | ||
|
|
d9c96db212 | ||
|
|
5f7391a76b | ||
|
|
accd80eda8 | ||
|
|
e49499f085 | ||
|
|
521f8930d7 | ||
|
|
11b9a8eb6e | ||
|
|
0d4350ae24 | ||
|
|
62a6a25aa1 | ||
|
|
14a74e7312 | ||
|
|
3fad2f3430 | ||
|
|
c35d1e8791 | ||
|
|
0367255a2a | ||
|
|
f5f5552ecd | ||
|
|
046a22bd2b | ||
|
|
ad94ac7595 | ||
|
|
cfa3993b79 | ||
|
|
972793b98a | ||
|
|
35682bf5b8 | ||
|
|
b023f592aa | ||
|
|
a1c34646f1 | ||
|
|
9ac3768f1d | ||
|
|
ff7881130f | ||
|
|
37effda7c5 | ||
|
|
0cac7cb1a5 | ||
|
|
8d41d11ca3 | ||
|
|
0ef516d147 | ||
|
|
f57a30898c | ||
|
|
a10c67555d | ||
|
|
14d0df3926 | ||
|
|
c085aeaa68 | ||
|
|
8543afccca | ||
|
|
61b5603a3b | ||
|
|
e3efffb2ec | ||
|
|
fe9a342b42 | ||
|
|
c7668b4436 | ||
|
|
ccdf6b227f | ||
|
|
0aea384f41 | ||
|
|
467059cd26 | ||
|
|
f41af36ea9 | ||
|
|
e2f8902222 | ||
|
|
52bfd4cadc | ||
|
|
7cdc556292 | ||
|
|
039bda9eaf | ||
|
|
a6d73d6f8b | ||
|
|
8e5af59153 | ||
|
|
278467518e | ||
|
|
a7080a5778 | ||
|
|
6a71ef6745 | ||
|
|
10eb576260 | ||
|
|
f14acb79bf | ||
|
|
b8e011bd27 | ||
|
|
f6295308cd | ||
|
|
f981675850 | ||
|
|
93bb7610e6 | ||
|
|
23975ee359 | ||
|
|
14eaedf375 | ||
|
|
ced0b741b9 | ||
|
|
13e805b213 | ||
|
|
c424c1e394 | ||
|
|
77d68bdc73 | ||
|
|
a1555bb9cd | ||
|
|
3ca61b218e | ||
|
|
e7917277e7 | ||
|
|
aa18be17fa | ||
|
|
39c7af5f8d | ||
|
|
a5f7f8bbe4 | ||
|
|
420e491963 | ||
|
|
36f2ff997a | ||
|
|
c33807d052 | ||
|
|
fb3946b64f | ||
|
|
51322e7270 | ||
|
|
3f084d8525 | ||
|
|
b1f4002036 | ||
|
|
bb1cbe0902 | ||
|
|
a095634755 | ||
|
|
1b9ff074af | ||
|
|
f8361446a4 | ||
|
|
5713490f14 | ||
|
|
1ceac2a0a0 | ||
|
|
8a2967a0db | ||
|
|
86297720d5 | ||
|
|
1aeb2b96e2 | ||
|
|
4ee8b9d7f6 | ||
|
|
1d208ed5ec | ||
|
|
3883aaabab | ||
|
|
6fb3c070d0 | ||
|
|
d8d8b4ed73 | ||
|
|
907f46769f | ||
|
|
1ffdb717f7 | ||
|
|
9080603bce | ||
|
|
5796ae9084 | ||
|
|
50636e3a7e | ||
|
|
501d4c9dfc | ||
|
|
84cbc4ae04 | ||
|
|
cbb2a3e46f | ||
|
|
493197c073 | ||
|
|
31a2952101 | ||
|
|
acaccc23e8 | ||
|
|
70e339164d | ||
|
|
0de5d72d75 | ||
|
|
d604cc7faf | ||
|
|
d843a3e359 | ||
|
|
37586662b3 | ||
|
|
193687418f | ||
|
|
72e6bb9537 | ||
|
|
d69e790c61 | ||
|
|
01d41520d4 | ||
|
|
aea9eb9e01 | ||
|
|
26717b13e9 | ||
|
|
5f36417bd9 | ||
|
|
021ea34814 | ||
|
|
4a08fbdf28 | ||
|
|
268753091d | ||
|
|
ec688829b5 | ||
|
|
ec5bf58b0f | ||
|
|
f877d821f0 | ||
|
|
6c22cfef1e | ||
|
|
05305d858b | ||
|
|
e094237bbf | ||
|
|
77eb52bc51 | ||
|
|
c79834cec7 | ||
|
|
aefc5fded7 | ||
|
|
5fd5a5d4fa | ||
|
|
0368ecf7f3 | ||
|
|
d9ec5dcb56 | ||
|
|
030bc6c6b6 | ||
|
|
c1dd2fe0f4 | ||
|
|
4e0851868e | ||
|
|
276178c27c | ||
|
|
3006e6bcbf | ||
|
|
3a50c5686e | ||
|
|
f8eea4d082 | ||
|
|
8a42d77990 | ||
|
|
4064be6577 | ||
|
|
477a3e7263 | ||
|
|
cd0f20ca2f |
@@ -19,7 +19,8 @@ Please note we have a code of conduct, please follow it in all your interactions
|
||||
build.
|
||||
2. Update the README.md with details of changes to the interface, this includes new environment
|
||||
variables, exposed ports, useful file locations and container parameters.
|
||||
3. We will merge the Pull Request in once you have the sign-off.
|
||||
3. Open Pull Request to `dev` branch - we test the component before merging into the `master` branch
|
||||
4. We will merge the Pull Request in once you have the sign-off.
|
||||
|
||||
## Code of Conduct
|
||||
|
||||
|
||||
11
README.md
11
README.md
@@ -24,7 +24,7 @@ curl -s https://raw.githubusercontent.com/armosec/kubescape/master/install.sh |
|
||||
|
||||
## Run:
|
||||
```
|
||||
kubescape scan framework nsa --submit
|
||||
kubescape scan --submit
|
||||
```
|
||||
|
||||
<img src="docs/summary.png">
|
||||
@@ -91,7 +91,7 @@ Set-ExecutionPolicy RemoteSigned -scope CurrentUser
|
||||
| `-e`/`--exclude-namespaces` | Scan all namespaces | Namespaces to exclude from scanning. Recommended to exclude `kube-system` and `kube-public` namespaces | |
|
||||
| `--include-namespaces` | Scan all namespaces | Scan specific namespaces | |
|
||||
| `-s`/`--silent` | Display progress messages | Silent progress messages | |
|
||||
| `-t`/`--fail-threshold` | `0` (do not fail) | fail command (return exit code 1) if result is below threshold | `0` -> `100` |
|
||||
| `-t`/`--fail-threshold` | `100` (do not fail) | fail command (return exit code 1) if result is above threshold | `0` -> `100` |
|
||||
| `-f`/`--format` | `pretty-printer` | Output format | `pretty-printer`/`json`/`junit`/`prometheus` |
|
||||
| `-o`/`--output` | print to stdout | Save scan result in file | |
|
||||
| `--use-from` | | Load local framework object from specified path. If not used will download latest | |
|
||||
@@ -100,6 +100,8 @@ Set-ExecutionPolicy RemoteSigned -scope CurrentUser
|
||||
| `--submit` | `false` | If set, Kubescape will send the scan results to Armo management portal where you can see the results in a user-friendly UI, choose your preferred compliance framework, check risk results history and trends, manage exceptions, get remediation recommendations and much more. By default the results are not sent | `true`/`false` |
|
||||
| `--keep-local` | `false` | Kubescape will not send scan results to Armo management portal. Use this flag if you ran with the `--submit` flag in the past and you do not want to submit your current scan results | `true`/`false` |
|
||||
| `--account` | | Armo portal account ID. Default will load account ID from configMap or config file | |
|
||||
| `--kube-context` | current-context | Cluster context to scan | |
|
||||
| `--verbose` | `false` | Display all of the input resources and not only failed resources | `true`/`false` |
|
||||
|
||||
|
||||
## Usage & Examples
|
||||
@@ -143,6 +145,11 @@ kubescape scan framework nsa *.yaml
|
||||
kubescape scan framework nsa https://github.com/armosec/kubescape
|
||||
```
|
||||
|
||||
#### Display all scanned resources (including the resources who passed)
|
||||
```
|
||||
kubescape scan framework nsa --verbose
|
||||
```
|
||||
|
||||
#### Output in `json` format
|
||||
```
|
||||
kubescape scan framework nsa --format json --output results.json
|
||||
|
||||
@@ -4,7 +4,6 @@ import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
@@ -15,10 +14,7 @@ import (
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
const (
|
||||
configMapName = "kubescape"
|
||||
configFileName = "config"
|
||||
)
|
||||
const configFileName = "config"
|
||||
|
||||
func ConfigFileFullPath() string { return getter.GetDefaultPath(configFileName + ".json") }
|
||||
|
||||
@@ -57,163 +53,86 @@ func (co *ConfigObj) Config() []byte {
|
||||
// ======================================================================================
|
||||
// =============================== interface ============================================
|
||||
// ======================================================================================
|
||||
type IClusterConfig interface {
|
||||
|
||||
type ITenantConfig interface {
|
||||
// set
|
||||
SetConfig(customerGUID string) error
|
||||
SetTenant() error
|
||||
|
||||
// getters
|
||||
GetClusterName() string
|
||||
GetCustomerGUID() string
|
||||
GetConfigObj() *ConfigObj
|
||||
GetK8sAPI() *k8sinterface.KubernetesApi
|
||||
GetBackendAPI() getter.IBackend
|
||||
GetDefaultNS() string
|
||||
GenerateURL()
|
||||
}
|
||||
// GetBackendAPI() getter.IBackend
|
||||
// GenerateURL()
|
||||
|
||||
// ClusterConfigSetup - Setup the desired cluster behavior regarding submittion to the Armo BE
|
||||
func ClusterConfigSetup(scanInfo *ScanInfo, k8s *k8sinterface.KubernetesApi, beAPI getter.IBackend) IClusterConfig {
|
||||
/*
|
||||
|
||||
If "First run (local config not found)" -
|
||||
Default - Do not send report (local)
|
||||
Local - Do not send report
|
||||
Submit - Create tenant & Submit report
|
||||
|
||||
If "Submitted but not signed up" -
|
||||
Default - Delete local config & Do not send report (local)
|
||||
Local - Delete local config & Do not send report
|
||||
Submit - Submit report
|
||||
|
||||
If "Signed up user" -
|
||||
Default - Submit report (submit)
|
||||
Local - Do not send report
|
||||
Submit - Submit report
|
||||
|
||||
*/
|
||||
clusterConfig := NewClusterConfig(k8s, beAPI)
|
||||
clusterConfig.LoadConfig()
|
||||
|
||||
if !IsSubmitted(clusterConfig) {
|
||||
if scanInfo.Submit {
|
||||
return clusterConfig // submit - Create tenant & Submit report
|
||||
}
|
||||
return NewEmptyConfig() // local/default - Do not send report
|
||||
}
|
||||
if !IsRegistered(clusterConfig) {
|
||||
if scanInfo.Submit {
|
||||
return clusterConfig // submit/default - Submit report
|
||||
}
|
||||
DeleteConfig(k8s)
|
||||
return NewEmptyConfig() // local - Delete local config & Do not send report
|
||||
}
|
||||
if scanInfo.Local {
|
||||
scanInfo.Submit = false
|
||||
return NewEmptyConfig() // local - Do not send report
|
||||
}
|
||||
scanInfo.Submit = true
|
||||
return clusterConfig // submit/default - Submit report
|
||||
IsConfigFound() bool
|
||||
}
|
||||
|
||||
// ======================================================================================
|
||||
// ============================= Mock Config ============================================
|
||||
// ============================ Local Config ============================================
|
||||
// ======================================================================================
|
||||
type EmptyConfig struct {
|
||||
}
|
||||
|
||||
func NewEmptyConfig() *EmptyConfig { return &EmptyConfig{} }
|
||||
func (c *EmptyConfig) SetConfig(customerGUID string) error { return nil }
|
||||
func (c *EmptyConfig) GetConfigObj() *ConfigObj { return &ConfigObj{} }
|
||||
func (c *EmptyConfig) GetCustomerGUID() string { return "" }
|
||||
func (c *EmptyConfig) GetK8sAPI() *k8sinterface.KubernetesApi { return nil } // TODO: return mock obj
|
||||
func (c *EmptyConfig) GetDefaultNS() string { return k8sinterface.GetDefaultNamespace() }
|
||||
func (c *EmptyConfig) GetBackendAPI() getter.IBackend { return nil } // TODO: return mock obj
|
||||
func (c *EmptyConfig) GetClusterName() string { return adoptClusterName(k8sinterface.GetClusterName()) }
|
||||
func (c *EmptyConfig) GenerateURL() {
|
||||
message := fmt.Sprintf("\nYou can see the results in a user-friendly UI, choose your preferred compliance framework, check risk results history and trends, manage exceptions, get remediation recommendations and much more by registering here: https://%s\n", getter.GetArmoAPIConnector().GetFrontendURL())
|
||||
InfoTextDisplay(os.Stdout, fmt.Sprintf("\n%s\n", message))
|
||||
}
|
||||
|
||||
// ======================================================================================
|
||||
// ========================== Cluster Config ============================================
|
||||
// ======================================================================================
|
||||
|
||||
type ClusterConfig struct {
|
||||
k8s *k8sinterface.KubernetesApi
|
||||
defaultNS string
|
||||
// Config when scanning YAML files or URL but not a Kubernetes cluster
|
||||
type LocalConfig struct {
|
||||
backendAPI getter.IBackend
|
||||
configObj *ConfigObj
|
||||
}
|
||||
|
||||
func NewClusterConfig(k8s *k8sinterface.KubernetesApi, backendAPI getter.IBackend) *ClusterConfig {
|
||||
return &ClusterConfig{
|
||||
k8s: k8s,
|
||||
func NewLocalConfig(backendAPI getter.IBackend, customerGUID, clusterName string) *LocalConfig {
|
||||
var configObj *ConfigObj
|
||||
|
||||
lc := &LocalConfig{
|
||||
backendAPI: backendAPI,
|
||||
configObj: &ConfigObj{},
|
||||
defaultNS: k8sinterface.GetDefaultNamespace(),
|
||||
}
|
||||
// get from configMap
|
||||
if existsConfigFile() { // get from file
|
||||
configObj, _ = loadConfigFromFile()
|
||||
} else {
|
||||
configObj = &ConfigObj{}
|
||||
}
|
||||
if configObj != nil {
|
||||
lc.configObj = configObj
|
||||
}
|
||||
if customerGUID != "" {
|
||||
lc.configObj.CustomerGUID = customerGUID // override config customerGUID
|
||||
}
|
||||
if clusterName != "" {
|
||||
lc.configObj.ClusterName = AdoptClusterName(clusterName) // override config clusterName
|
||||
}
|
||||
if lc.configObj.CustomerGUID != "" {
|
||||
if err := lc.SetTenant(); err != nil {
|
||||
fmt.Println(err)
|
||||
}
|
||||
}
|
||||
|
||||
return lc
|
||||
}
|
||||
|
||||
func (c *ClusterConfig) GetConfigObj() *ConfigObj { return c.configObj }
|
||||
func (c *ClusterConfig) GetK8sAPI() *k8sinterface.KubernetesApi { return c.k8s }
|
||||
func (c *ClusterConfig) GetDefaultNS() string { return c.defaultNS }
|
||||
func (c *ClusterConfig) GetBackendAPI() getter.IBackend { return c.backendAPI }
|
||||
|
||||
func (c *ClusterConfig) GenerateURL() {
|
||||
message := "You can see the results in a user-friendly UI, choose your preferred compliance framework, check risk results history and trends, manage exceptions, get remediation recommendations and much more by registering here: "
|
||||
|
||||
u := url.URL{}
|
||||
u.Scheme = "https"
|
||||
u.Host = getter.GetArmoAPIConnector().GetFrontendURL()
|
||||
if c.configObj == nil {
|
||||
return
|
||||
func (lc *LocalConfig) GetConfigObj() *ConfigObj { return lc.configObj }
|
||||
func (lc *LocalConfig) GetCustomerGUID() string { return lc.configObj.CustomerGUID }
|
||||
func (lc *LocalConfig) SetCustomerGUID(customerGUID string) { lc.configObj.CustomerGUID = customerGUID }
|
||||
func (lc *LocalConfig) GetClusterName() string { return lc.configObj.ClusterName }
|
||||
func (lc *LocalConfig) IsConfigFound() bool { return existsConfigFile() }
|
||||
func (lc *LocalConfig) SetTenant() error {
|
||||
// ARMO tenant GUID
|
||||
if err := getTenantConfigFromBE(lc.backendAPI, lc.configObj); err != nil {
|
||||
return err
|
||||
}
|
||||
if c.configObj.CustomerAdminEMail != "" {
|
||||
InfoTextDisplay(os.Stdout, "\n\n"+message+u.String()+"\n\n")
|
||||
return
|
||||
}
|
||||
u.Path = "account/sign-up"
|
||||
q := u.Query()
|
||||
q.Add("invitationToken", c.configObj.Token)
|
||||
q.Add("customerGUID", c.configObj.CustomerGUID)
|
||||
updateConfigFile(lc.configObj)
|
||||
return nil
|
||||
|
||||
u.RawQuery = q.Encode()
|
||||
InfoTextDisplay(os.Stdout, "\n\n"+message+u.String()+"\n\n")
|
||||
}
|
||||
|
||||
func (c *ClusterConfig) GetCustomerGUID() string {
|
||||
if c.configObj != nil {
|
||||
return c.configObj.CustomerGUID
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (c *ClusterConfig) SetConfig(customerGUID string) error {
|
||||
if c.configObj == nil {
|
||||
c.configObj = &ConfigObj{}
|
||||
}
|
||||
|
||||
// cluster name
|
||||
if c.GetClusterName() == "" {
|
||||
c.setClusterName(k8sinterface.GetClusterName())
|
||||
}
|
||||
|
||||
// ARMO customer GUID
|
||||
if customerGUID != "" && c.GetCustomerGUID() != customerGUID {
|
||||
c.setCustomerGUID(customerGUID) // override config customerGUID
|
||||
}
|
||||
|
||||
customerGUID = c.GetCustomerGUID()
|
||||
func getTenantConfigFromBE(backendAPI getter.IBackend, configObj *ConfigObj) error {
|
||||
|
||||
// get from armoBE
|
||||
tenantResponse, err := c.backendAPI.GetCustomerGUID(customerGUID)
|
||||
backendAPI.SetCustomerGUID(configObj.CustomerGUID)
|
||||
tenantResponse, err := backendAPI.GetCustomerGUID()
|
||||
if err == nil && tenantResponse != nil {
|
||||
if tenantResponse.AdminMail != "" { // this customer already belongs to some user
|
||||
c.setCustomerAdminEMail(tenantResponse.AdminMail)
|
||||
} else {
|
||||
c.setToken(tenantResponse.Token)
|
||||
c.setCustomerGUID(tenantResponse.TenantID)
|
||||
if tenantResponse.AdminMail != "" { // registered tenant
|
||||
configObj.CustomerAdminEMail = tenantResponse.AdminMail
|
||||
} else { // new tenant
|
||||
configObj.Token = tenantResponse.Token
|
||||
configObj.CustomerGUID = tenantResponse.TenantID
|
||||
}
|
||||
} else {
|
||||
if err != nil && !strings.Contains(err.Error(), "already exists") {
|
||||
@@ -221,44 +140,100 @@ func (c *ClusterConfig) SetConfig(customerGUID string) error {
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ======================================================================================
|
||||
// ========================== Cluster Config ============================================
|
||||
// ======================================================================================
|
||||
|
||||
// ClusterConfig configuration of specific cluster
|
||||
/*
|
||||
|
||||
Supported environments variables:
|
||||
KS_DEFAULT_CONFIGMAP_NAME // name of configmap, if not set default is 'kubescape'
|
||||
KS_DEFAULT_CONFIGMAP_NAMESPACE // configmap namespace, if not set default is 'default'
|
||||
|
||||
TODO - supprot:
|
||||
KS_ACCOUNT // Account ID
|
||||
KS_CACHE // path to cached files
|
||||
*/
|
||||
type ClusterConfig struct {
|
||||
k8s *k8sinterface.KubernetesApi
|
||||
configMapName string
|
||||
configMapNamespace string
|
||||
backendAPI getter.IBackend
|
||||
configObj *ConfigObj
|
||||
}
|
||||
|
||||
func NewClusterConfig(k8s *k8sinterface.KubernetesApi, backendAPI getter.IBackend, customerGUID, clusterName string) *ClusterConfig {
|
||||
var configObj *ConfigObj
|
||||
c := &ClusterConfig{
|
||||
k8s: k8s,
|
||||
backendAPI: backendAPI,
|
||||
configObj: &ConfigObj{},
|
||||
configMapName: getConfigMapName(),
|
||||
configMapNamespace: getConfigMapNamespace(),
|
||||
}
|
||||
|
||||
// get from configMap
|
||||
if c.existsConfigMap() {
|
||||
configObj, _ = c.loadConfigFromConfigMap()
|
||||
}
|
||||
if configObj == nil && existsConfigFile() { // get from file
|
||||
configObj, _ = loadConfigFromFile()
|
||||
}
|
||||
if configObj != nil {
|
||||
c.configObj = configObj
|
||||
}
|
||||
if customerGUID != "" {
|
||||
c.configObj.CustomerGUID = customerGUID // override config customerGUID
|
||||
}
|
||||
if clusterName != "" {
|
||||
c.configObj.ClusterName = AdoptClusterName(clusterName) // override config clusterName
|
||||
}
|
||||
if c.configObj.CustomerGUID != "" {
|
||||
if err := c.SetTenant(); err != nil {
|
||||
fmt.Println(err)
|
||||
}
|
||||
}
|
||||
if c.configObj.ClusterName == "" {
|
||||
c.configObj.ClusterName = AdoptClusterName(k8sinterface.GetClusterName())
|
||||
} else { // override the cluster name if it has unwanted characters
|
||||
c.configObj.ClusterName = AdoptClusterName(c.configObj.ClusterName)
|
||||
}
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
func (c *ClusterConfig) GetConfigObj() *ConfigObj { return c.configObj }
|
||||
func (c *ClusterConfig) GetDefaultNS() string { return c.configMapNamespace }
|
||||
func (c *ClusterConfig) GetCustomerGUID() string { return c.configObj.CustomerGUID }
|
||||
func (c *ClusterConfig) SetCustomerGUID(customerGUID string) { c.configObj.CustomerGUID = customerGUID }
|
||||
func (c *ClusterConfig) IsConfigFound() bool {
|
||||
return existsConfigFile() || c.existsConfigMap()
|
||||
}
|
||||
|
||||
func (c *ClusterConfig) SetTenant() error {
|
||||
|
||||
// ARMO tenant GUID
|
||||
if err := getTenantConfigFromBE(c.backendAPI, c.configObj); err != nil {
|
||||
return err
|
||||
}
|
||||
// update/create config
|
||||
if c.existsConfigMap() {
|
||||
c.updateConfigMap()
|
||||
} else {
|
||||
c.createConfigMap()
|
||||
}
|
||||
c.updateConfigFile()
|
||||
|
||||
updateConfigFile(c.configObj)
|
||||
return nil
|
||||
|
||||
}
|
||||
|
||||
func (c *ClusterConfig) setToken(token string) {
|
||||
c.configObj.Token = token
|
||||
}
|
||||
|
||||
func (c *ClusterConfig) setCustomerAdminEMail(customerAdminEMail string) {
|
||||
c.configObj.CustomerAdminEMail = customerAdminEMail
|
||||
}
|
||||
func (c *ClusterConfig) setCustomerGUID(customerGUID string) {
|
||||
c.configObj.CustomerGUID = customerGUID
|
||||
}
|
||||
|
||||
func (c *ClusterConfig) setClusterName(clusterName string) {
|
||||
c.configObj.ClusterName = adoptClusterName(clusterName)
|
||||
}
|
||||
func (c *ClusterConfig) GetClusterName() string {
|
||||
return c.configObj.ClusterName
|
||||
}
|
||||
func (c *ClusterConfig) LoadConfig() {
|
||||
// get from configMap
|
||||
if c.existsConfigMap() {
|
||||
c.configObj, _ = c.loadConfigFromConfigMap()
|
||||
} else if existsConfigFile() { // get from file
|
||||
c.configObj, _ = loadConfigFromFile()
|
||||
} else {
|
||||
c.configObj = &ConfigObj{}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *ClusterConfig) ToMapString() map[string]interface{} {
|
||||
m := map[string]interface{}{}
|
||||
@@ -268,10 +243,7 @@ func (c *ClusterConfig) ToMapString() map[string]interface{} {
|
||||
return m
|
||||
}
|
||||
func (c *ClusterConfig) loadConfigFromConfigMap() (*ConfigObj, error) {
|
||||
if c.k8s == nil {
|
||||
return nil, nil
|
||||
}
|
||||
configMap, err := c.k8s.KubernetesClient.CoreV1().ConfigMaps(c.defaultNS).Get(context.Background(), configMapName, metav1.GetOptions{})
|
||||
configMap, err := c.k8s.KubernetesClient.CoreV1().ConfigMaps(c.configMapNamespace).Get(context.Background(), c.configMapName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -283,14 +255,14 @@ func (c *ClusterConfig) loadConfigFromConfigMap() (*ConfigObj, error) {
|
||||
}
|
||||
|
||||
func (c *ClusterConfig) existsConfigMap() bool {
|
||||
_, err := c.k8s.KubernetesClient.CoreV1().ConfigMaps(c.defaultNS).Get(context.Background(), configMapName, metav1.GetOptions{})
|
||||
_, err := c.k8s.KubernetesClient.CoreV1().ConfigMaps(c.configMapNamespace).Get(context.Background(), c.configMapName, metav1.GetOptions{})
|
||||
// TODO - check if has customerGUID
|
||||
return err == nil
|
||||
}
|
||||
|
||||
func (c *ClusterConfig) GetValueByKeyFromConfigMap(key string) (string, error) {
|
||||
|
||||
configMap, err := c.k8s.KubernetesClient.CoreV1().ConfigMaps(c.defaultNS).Get(context.Background(), configMapName, metav1.GetOptions{})
|
||||
configMap, err := c.k8s.KubernetesClient.CoreV1().ConfigMaps(c.configMapNamespace).Get(context.Background(), c.configMapName, metav1.GetOptions{})
|
||||
|
||||
if err != nil {
|
||||
return "", err
|
||||
@@ -342,11 +314,11 @@ func SetKeyValueInConfigJson(key string, value string) error {
|
||||
|
||||
func (c *ClusterConfig) SetKeyValueInConfigmap(key string, value string) error {
|
||||
|
||||
configMap, err := c.k8s.KubernetesClient.CoreV1().ConfigMaps(c.defaultNS).Get(context.Background(), configMapName, metav1.GetOptions{})
|
||||
configMap, err := c.k8s.KubernetesClient.CoreV1().ConfigMaps(c.configMapNamespace).Get(context.Background(), c.configMapName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
configMap = &corev1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: configMapName,
|
||||
Name: c.configMapName,
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -358,9 +330,9 @@ func (c *ClusterConfig) SetKeyValueInConfigmap(key string, value string) error {
|
||||
configMap.Data[key] = value
|
||||
|
||||
if err != nil {
|
||||
_, err = c.k8s.KubernetesClient.CoreV1().ConfigMaps(c.defaultNS).Create(context.Background(), configMap, metav1.CreateOptions{})
|
||||
_, err = c.k8s.KubernetesClient.CoreV1().ConfigMaps(c.configMapNamespace).Create(context.Background(), configMap, metav1.CreateOptions{})
|
||||
} else {
|
||||
_, err = c.k8s.KubernetesClient.CoreV1().ConfigMaps(configMap.Namespace).Update(context.Background(), configMap, metav1.UpdateOptions{})
|
||||
_, err = c.k8s.KubernetesClient.CoreV1().ConfigMaps(c.configMapNamespace).Update(context.Background(), configMap, metav1.UpdateOptions{})
|
||||
}
|
||||
|
||||
return err
|
||||
@@ -377,12 +349,12 @@ func (c *ClusterConfig) createConfigMap() error {
|
||||
}
|
||||
configMap := &corev1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: configMapName,
|
||||
Name: c.configMapName,
|
||||
},
|
||||
}
|
||||
c.updateConfigData(configMap)
|
||||
|
||||
_, err := c.k8s.KubernetesClient.CoreV1().ConfigMaps(c.defaultNS).Create(context.Background(), configMap, metav1.CreateOptions{})
|
||||
_, err := c.k8s.KubernetesClient.CoreV1().ConfigMaps(c.configMapNamespace).Create(context.Background(), configMap, metav1.CreateOptions{})
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -390,7 +362,7 @@ func (c *ClusterConfig) updateConfigMap() error {
|
||||
if c.k8s == nil {
|
||||
return nil
|
||||
}
|
||||
configMap, err := c.k8s.KubernetesClient.CoreV1().ConfigMaps(c.defaultNS).Get(context.Background(), configMapName, metav1.GetOptions{})
|
||||
configMap, err := c.k8s.KubernetesClient.CoreV1().ConfigMaps(c.configMapNamespace).Get(context.Background(), c.configMapName, metav1.GetOptions{})
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -398,12 +370,12 @@ func (c *ClusterConfig) updateConfigMap() error {
|
||||
|
||||
c.updateConfigData(configMap)
|
||||
|
||||
_, err = c.k8s.KubernetesClient.CoreV1().ConfigMaps(configMap.Namespace).Update(context.Background(), configMap, metav1.UpdateOptions{})
|
||||
_, err = c.k8s.KubernetesClient.CoreV1().ConfigMaps(c.configMapNamespace).Update(context.Background(), configMap, metav1.UpdateOptions{})
|
||||
return err
|
||||
}
|
||||
|
||||
func (c *ClusterConfig) updateConfigFile() error {
|
||||
if err := os.WriteFile(ConfigFileFullPath(), c.configObj.Config(), 0664); err != nil {
|
||||
func updateConfigFile(configObj *ConfigObj) error {
|
||||
if err := os.WriteFile(ConfigFileFullPath(), configObj.Config(), 0664); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
@@ -434,21 +406,23 @@ func readConfig(dat []byte) (*ConfigObj, error) {
|
||||
return nil, nil
|
||||
}
|
||||
configObj := &ConfigObj{}
|
||||
err := json.Unmarshal(dat, configObj)
|
||||
|
||||
return configObj, err
|
||||
if err := json.Unmarshal(dat, configObj); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return configObj, nil
|
||||
}
|
||||
|
||||
// Check if the customer is submitted
|
||||
func IsSubmitted(clusterConfig *ClusterConfig) bool {
|
||||
func (clusterConfig *ClusterConfig) IsSubmitted() bool {
|
||||
return clusterConfig.existsConfigMap() || existsConfigFile()
|
||||
}
|
||||
|
||||
// Check if the customer is registered
|
||||
func IsRegistered(clusterConfig *ClusterConfig) bool {
|
||||
func (clusterConfig *ClusterConfig) IsRegistered() bool {
|
||||
|
||||
// get from armoBE
|
||||
tenantResponse, err := clusterConfig.backendAPI.GetCustomerGUID(clusterConfig.GetCustomerGUID())
|
||||
clusterConfig.backendAPI.SetCustomerGUID(clusterConfig.GetCustomerGUID())
|
||||
tenantResponse, err := clusterConfig.backendAPI.GetCustomerGUID()
|
||||
if err == nil && tenantResponse != nil {
|
||||
if tenantResponse.AdminMail != "" { // this customer already belongs to some user
|
||||
return true
|
||||
@@ -457,8 +431,8 @@ func IsRegistered(clusterConfig *ClusterConfig) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func DeleteConfig(k8s *k8sinterface.KubernetesApi) error {
|
||||
if err := DeleteConfigMap(k8s); err != nil {
|
||||
func (clusterConfig *ClusterConfig) DeleteConfig() error {
|
||||
if err := clusterConfig.DeleteConfigMap(); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := DeleteConfigFile(); err != nil {
|
||||
@@ -466,14 +440,28 @@ func DeleteConfig(k8s *k8sinterface.KubernetesApi) error {
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func DeleteConfigMap(k8s *k8sinterface.KubernetesApi) error {
|
||||
return k8s.KubernetesClient.CoreV1().ConfigMaps(k8sinterface.GetDefaultNamespace()).Delete(context.Background(), configMapName, metav1.DeleteOptions{})
|
||||
func (clusterConfig *ClusterConfig) DeleteConfigMap() error {
|
||||
return clusterConfig.k8s.KubernetesClient.CoreV1().ConfigMaps(clusterConfig.configMapNamespace).Delete(context.Background(), clusterConfig.configMapName, metav1.DeleteOptions{})
|
||||
}
|
||||
|
||||
func DeleteConfigFile() error {
|
||||
return os.Remove(ConfigFileFullPath())
|
||||
}
|
||||
|
||||
func adoptClusterName(clusterName string) string {
|
||||
func AdoptClusterName(clusterName string) string {
|
||||
return strings.ReplaceAll(clusterName, "/", "-")
|
||||
}
|
||||
|
||||
func getConfigMapName() string {
|
||||
if n := os.Getenv("KS_DEFAULT_CONFIGMAP_NAME"); n != "" {
|
||||
return n
|
||||
}
|
||||
return "kubescape"
|
||||
}
|
||||
|
||||
func getConfigMapNamespace() string {
|
||||
if n := os.Getenv("KS_DEFAULT_CONFIGMAP_NAMESPACE"); n != "" {
|
||||
return n
|
||||
}
|
||||
return "default"
|
||||
}
|
||||
|
||||
@@ -2,24 +2,33 @@ package cautils
|
||||
|
||||
import (
|
||||
"github.com/armosec/armoapi-go/armotypes"
|
||||
"github.com/armosec/k8s-interface/workloadinterface"
|
||||
"github.com/armosec/opa-utils/reporthandling"
|
||||
"github.com/armosec/opa-utils/reporthandling/results/v1/resourcesresults"
|
||||
reporthandlingv2 "github.com/armosec/opa-utils/reporthandling/v2"
|
||||
)
|
||||
|
||||
// K8SResources map[<api group>/<api version>/<resource>]<resource object>
|
||||
type K8SResources map[string]interface{}
|
||||
// K8SResources map[<api group>/<api version>/<resource>][]<resourceID>
|
||||
type K8SResources map[string][]string
|
||||
|
||||
type OPASessionObj struct {
|
||||
Frameworks []reporthandling.Framework
|
||||
K8SResources *K8SResources
|
||||
Exceptions []armotypes.PostureExceptionPolicy
|
||||
PostureReport *reporthandling.PostureReport
|
||||
RegoInputData RegoInputData // map[<control name>][<input arguments>]
|
||||
K8SResources *K8SResources // input k8s objects
|
||||
Frameworks []reporthandling.Framework // list of frameworks to scan
|
||||
AllResources map[string]workloadinterface.IMetadata // all scanned resources, map[<rtesource ID>]<resource>
|
||||
ResourcesResult map[string]resourcesresults.Result // resources scan results, map[<rtesource ID>]<resource result>
|
||||
PostureReport *reporthandling.PostureReport // scan results v1
|
||||
Report *reporthandlingv2.PostureReport // scan results v2
|
||||
Exceptions []armotypes.PostureExceptionPolicy // list of exceptions to apply on scan results
|
||||
RegoInputData RegoInputData // input passed to rgo for scanning. map[<control name>][<input arguments>]
|
||||
}
|
||||
|
||||
func NewOPASessionObj(frameworks []reporthandling.Framework, k8sResources *K8SResources) *OPASessionObj {
|
||||
return &OPASessionObj{
|
||||
Frameworks: frameworks,
|
||||
K8SResources: k8sResources,
|
||||
Report: &reporthandlingv2.PostureReport{},
|
||||
Frameworks: frameworks,
|
||||
K8SResources: k8sResources,
|
||||
AllResources: make(map[string]workloadinterface.IMetadata),
|
||||
ResourcesResult: make(map[string]resourcesresults.Result),
|
||||
PostureReport: &reporthandling.PostureReport{
|
||||
ClusterName: ClusterName,
|
||||
CustomerGUID: CustomerGUID,
|
||||
@@ -29,8 +38,11 @@ func NewOPASessionObj(frameworks []reporthandling.Framework, k8sResources *K8SRe
|
||||
|
||||
func NewOPASessionObjMock() *OPASessionObj {
|
||||
return &OPASessionObj{
|
||||
Frameworks: nil,
|
||||
K8SResources: nil,
|
||||
Frameworks: nil,
|
||||
K8SResources: nil,
|
||||
AllResources: make(map[string]workloadinterface.IMetadata),
|
||||
ResourcesResult: make(map[string]resourcesresults.Result),
|
||||
Report: &reporthandlingv2.PostureReport{},
|
||||
PostureReport: &reporthandling.PostureReport{
|
||||
ClusterName: "",
|
||||
CustomerGUID: "",
|
||||
@@ -56,3 +68,8 @@ type RegoInputData struct {
|
||||
// ClusterName string `json:"clusterName"`
|
||||
// K8sConfig RegoK8sConfig `json:"k8sconfig"`
|
||||
}
|
||||
|
||||
type Policies struct {
|
||||
Frameworks []string
|
||||
Controls map[string]reporthandling.Control // map[<control ID>]<control>
|
||||
}
|
||||
|
||||
@@ -1,26 +1,67 @@
|
||||
package cautils
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
pkgcautils "github.com/armosec/utils-go/utils"
|
||||
|
||||
"github.com/open-policy-agent/opa/storage"
|
||||
"github.com/open-policy-agent/opa/storage/inmem"
|
||||
"github.com/open-policy-agent/opa/util"
|
||||
"github.com/armosec/opa-utils/reporthandling"
|
||||
)
|
||||
|
||||
func (data *RegoInputData) SetControlsInputs(controlsInputs map[string][]string) {
|
||||
data.PostureControlInputs = controlsInputs
|
||||
func NewPolicies() *Policies {
|
||||
return &Policies{
|
||||
Frameworks: make([]string, 0),
|
||||
Controls: make(map[string]reporthandling.Control),
|
||||
}
|
||||
}
|
||||
|
||||
func (data *RegoInputData) TOStorage() (storage.Store, error) {
|
||||
var jsonObj map[string]interface{}
|
||||
bytesData, err := json.Marshal(*data)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
func (policies *Policies) Set(frameworks []reporthandling.Framework, version string) {
|
||||
for i := range frameworks {
|
||||
if frameworks[i].Name != "" {
|
||||
policies.Frameworks = append(policies.Frameworks, frameworks[i].Name)
|
||||
}
|
||||
for j := range frameworks[i].Controls {
|
||||
compatibleRules := []reporthandling.PolicyRule{}
|
||||
for r := range frameworks[i].Controls[j].Rules {
|
||||
if !ruleWithArmoOpaDependency(frameworks[i].Controls[j].Rules[r].Attributes) && isRuleKubescapeVersionCompatible(frameworks[i].Controls[j].Rules[r].Attributes, version) {
|
||||
compatibleRules = append(compatibleRules, frameworks[i].Controls[j].Rules[r])
|
||||
}
|
||||
}
|
||||
if len(compatibleRules) > 0 {
|
||||
frameworks[i].Controls[j].Rules = compatibleRules
|
||||
policies.Controls[frameworks[i].Controls[j].ControlID] = frameworks[i].Controls[j]
|
||||
}
|
||||
}
|
||||
}
|
||||
// glog.Infof("RegoDependenciesData: %s", bytesData)
|
||||
if err := util.UnmarshalJSON(bytesData, &jsonObj); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return inmem.NewFromObject(jsonObj), nil
|
||||
}
|
||||
|
||||
func ruleWithArmoOpaDependency(attributes map[string]interface{}) bool {
|
||||
if attributes == nil {
|
||||
return false
|
||||
}
|
||||
if s, ok := attributes["armoOpa"]; ok { // TODO - make global
|
||||
return pkgcautils.StringToBool(s.(string))
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Checks that kubescape version is in range of use for this rule
|
||||
// In local build (BuildNumber = ""):
|
||||
// returns true only if rule doesn't have the "until" attribute
|
||||
func isRuleKubescapeVersionCompatible(attributes map[string]interface{}, version string) bool {
|
||||
if from, ok := attributes["useFromKubescapeVersion"]; ok && from != nil {
|
||||
if version != "" {
|
||||
if from.(string) > BuildNumber {
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
if until, ok := attributes["useUntilKubescapeVersion"]; ok && until != nil {
|
||||
if version != "" {
|
||||
if until.(string) <= BuildNumber {
|
||||
return false
|
||||
}
|
||||
} else {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
@@ -35,15 +35,15 @@ func ScanStartDisplay() {
|
||||
if IsSilent() {
|
||||
return
|
||||
}
|
||||
InfoDisplay(os.Stdout, "ARMO security scanner starting\n")
|
||||
InfoDisplay(os.Stderr, "ARMO security scanner starting\n")
|
||||
}
|
||||
|
||||
func SuccessTextDisplay(str string) {
|
||||
if IsSilent() {
|
||||
return
|
||||
}
|
||||
SuccessDisplay(os.Stdout, "[success] ")
|
||||
SimpleDisplay(os.Stdout, fmt.Sprintf("%s\n", str))
|
||||
SuccessDisplay(os.Stderr, "[success] ")
|
||||
SimpleDisplay(os.Stderr, fmt.Sprintf("%s\n", str))
|
||||
|
||||
}
|
||||
|
||||
@@ -51,8 +51,8 @@ func ErrorDisplay(str string) {
|
||||
if IsSilent() {
|
||||
return
|
||||
}
|
||||
SuccessDisplay(os.Stdout, "[Error] ")
|
||||
SimpleDisplay(os.Stdout, fmt.Sprintf("%s\n", str))
|
||||
FailureDisplay(os.Stderr, "[Error] ")
|
||||
SimpleDisplay(os.Stderr, fmt.Sprintf("%s\n", str))
|
||||
|
||||
}
|
||||
|
||||
@@ -60,8 +60,8 @@ func ProgressTextDisplay(str string) {
|
||||
if IsSilent() {
|
||||
return
|
||||
}
|
||||
InfoDisplay(os.Stdout, "[progress] ")
|
||||
SimpleDisplay(os.Stdout, fmt.Sprintf("%s\n", str))
|
||||
InfoDisplay(os.Stderr, "[progress] ")
|
||||
SimpleDisplay(os.Stderr, fmt.Sprintf("%s\n", str))
|
||||
|
||||
}
|
||||
func StartSpinner() {
|
||||
|
||||
@@ -1,7 +1,9 @@
|
||||
package cautils
|
||||
|
||||
type DownloadInfo struct {
|
||||
Path string
|
||||
FrameworkName string
|
||||
ControlName string
|
||||
Path string // directory to save artifact. Default is "~/.kubescape/"
|
||||
FileName string // can be empty
|
||||
Target string // type of artifact to download
|
||||
Name string // name of artifact to download
|
||||
Account string // customerGUID
|
||||
}
|
||||
|
||||
@@ -8,7 +8,6 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/armosec/armoapi-go/armotypes"
|
||||
"github.com/armosec/opa-utils/gitregostore"
|
||||
"github.com/armosec/opa-utils/reporthandling"
|
||||
"github.com/golang/glog"
|
||||
)
|
||||
@@ -26,7 +25,7 @@ var (
|
||||
|
||||
armoDevERURL = "report.eudev3.cyberarmorsoft.com"
|
||||
armoDevBEURL = "eggdashbe.eudev3.cyberarmorsoft.com"
|
||||
armoDevFEURL = "armoui.eudev3.cyberarmorsoft.com"
|
||||
armoDevFEURL = "armoui-dev.eudev3.cyberarmorsoft.com"
|
||||
)
|
||||
|
||||
// Armo API for downloading policies
|
||||
@@ -36,7 +35,6 @@ type ArmoAPI struct {
|
||||
erURL string
|
||||
feURL string
|
||||
customerGUID string
|
||||
gs *gitregostore.GitRegoStore
|
||||
}
|
||||
|
||||
var globalArmoAPIConnecctor *ArmoAPI
|
||||
@@ -85,7 +83,6 @@ func NewARMOAPICustomized(armoERURL, armoBEURL, armoFEURL string) *ArmoAPI {
|
||||
func newArmoAPI() *ArmoAPI {
|
||||
return &ArmoAPI{
|
||||
httpClient: &http.Client{Timeout: time.Duration(61) * time.Second},
|
||||
gs: gitregostore.InitDefaultGitRegoStore(-1),
|
||||
}
|
||||
}
|
||||
func (armoAPI *ArmoAPI) SetCustomerGUID(customerGUID string) {
|
||||
@@ -103,38 +100,41 @@ func (armoAPI *ArmoAPI) GetReportReceiverURL() string {
|
||||
func (armoAPI *ArmoAPI) GetFramework(name string) (*reporthandling.Framework, error) {
|
||||
respStr, err := HttpGetter(armoAPI.httpClient, armoAPI.getFrameworkURL(name), nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
framework := &reporthandling.Framework{}
|
||||
if err = JSONDecoder(respStr).Decode(framework); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
SaveFrameworkInFile(framework, GetDefaultPath(name+".json"))
|
||||
SaveInFile(framework, GetDefaultPath(name+".json"))
|
||||
|
||||
return framework, err
|
||||
}
|
||||
|
||||
func (armoAPI *ArmoAPI) GetControl(policyName string) (*reporthandling.Control, error) {
|
||||
var control *reporthandling.Control
|
||||
var err error
|
||||
if strings.HasPrefix(policyName, "C-") || strings.HasPrefix(policyName, "c-") {
|
||||
control, err = armoAPI.gs.GetOPAControlByID(policyName)
|
||||
} else {
|
||||
control, err = armoAPI.gs.GetOPAControlByName(policyName)
|
||||
}
|
||||
func (armoAPI *ArmoAPI) GetFrameworks() ([]reporthandling.Framework, error) {
|
||||
respStr, err := HttpGetter(armoAPI.httpClient, armoAPI.getListFrameworkURL(), nil)
|
||||
if err != nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
frameworks := []reporthandling.Framework{}
|
||||
if err = JSONDecoder(respStr).Decode(&frameworks); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return control, nil
|
||||
// SaveInFile(framework, GetDefaultPath(name+".json"))
|
||||
|
||||
return frameworks, err
|
||||
}
|
||||
|
||||
func (armoAPI *ArmoAPI) GetExceptions(customerGUID, clusterName string) ([]armotypes.PostureExceptionPolicy, error) {
|
||||
func (armoAPI *ArmoAPI) GetControl(policyName string) (*reporthandling.Control, error) {
|
||||
return nil, fmt.Errorf("control api is not public")
|
||||
}
|
||||
|
||||
func (armoAPI *ArmoAPI) GetExceptions(clusterName string) ([]armotypes.PostureExceptionPolicy, error) {
|
||||
exceptions := []armotypes.PostureExceptionPolicy{}
|
||||
if customerGUID == "" {
|
||||
return exceptions, nil
|
||||
}
|
||||
respStr, err := HttpGetter(armoAPI.httpClient, armoAPI.getExceptionsURL(customerGUID, clusterName), nil)
|
||||
|
||||
respStr, err := HttpGetter(armoAPI.httpClient, armoAPI.getExceptionsURL(clusterName), nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -146,10 +146,10 @@ func (armoAPI *ArmoAPI) GetExceptions(customerGUID, clusterName string) ([]armot
|
||||
return exceptions, nil
|
||||
}
|
||||
|
||||
func (armoAPI *ArmoAPI) GetCustomerGUID(customerGUID string) (*TenantResponse, error) {
|
||||
func (armoAPI *ArmoAPI) GetCustomerGUID() (*TenantResponse, error) {
|
||||
url := armoAPI.getCustomerURL()
|
||||
if customerGUID != "" {
|
||||
url = fmt.Sprintf("%s?customerGUID=%s", url, customerGUID)
|
||||
if armoAPI.customerGUID != "" {
|
||||
url = fmt.Sprintf("%s?customerGUID=%s", url, armoAPI.customerGUID)
|
||||
}
|
||||
respStr, err := HttpGetter(armoAPI.httpClient, url, nil)
|
||||
if err != nil {
|
||||
@@ -164,12 +164,12 @@ func (armoAPI *ArmoAPI) GetCustomerGUID(customerGUID string) (*TenantResponse, e
|
||||
}
|
||||
|
||||
// ControlsInputs // map[<control name>][<input arguments>]
|
||||
func (armoAPI *ArmoAPI) GetAccountConfig(customerGUID, clusterName string) (*armotypes.CustomerConfig, error) {
|
||||
func (armoAPI *ArmoAPI) GetAccountConfig(clusterName string) (*armotypes.CustomerConfig, error) {
|
||||
accountConfig := &armotypes.CustomerConfig{}
|
||||
if customerGUID == "" {
|
||||
if armoAPI.customerGUID == "" {
|
||||
return accountConfig, nil
|
||||
}
|
||||
respStr, err := HttpGetter(armoAPI.httpClient, armoAPI.getAccountConfig(customerGUID, clusterName), nil)
|
||||
respStr, err := HttpGetter(armoAPI.httpClient, armoAPI.getAccountConfig(clusterName), nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -182,15 +182,15 @@ func (armoAPI *ArmoAPI) GetAccountConfig(customerGUID, clusterName string) (*arm
|
||||
}
|
||||
|
||||
// ControlsInputs // map[<control name>][<input arguments>]
|
||||
func (armoAPI *ArmoAPI) GetControlsInputs(customerGUID, clusterName string) (map[string][]string, error) {
|
||||
accountConfig, err := armoAPI.GetAccountConfig(customerGUID, clusterName)
|
||||
func (armoAPI *ArmoAPI) GetControlsInputs(clusterName string) (map[string][]string, error) {
|
||||
accountConfig, err := armoAPI.GetAccountConfig(clusterName)
|
||||
if err == nil {
|
||||
return accountConfig.Settings.PostureControlInputs, nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
func (armoAPI *ArmoAPI) ListCustomFrameworks(customerGUID string) ([]string, error) {
|
||||
func (armoAPI *ArmoAPI) ListCustomFrameworks() ([]string, error) {
|
||||
respStr, err := HttpGetter(armoAPI.httpClient, armoAPI.getListFrameworkURL(), nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -210,7 +210,7 @@ func (armoAPI *ArmoAPI) ListCustomFrameworks(customerGUID string) ([]string, err
|
||||
return frameworkList, nil
|
||||
}
|
||||
|
||||
func (armoAPI *ArmoAPI) ListFrameworks(customerGUID string) ([]string, error) {
|
||||
func (armoAPI *ArmoAPI) ListFrameworks() ([]string, error) {
|
||||
respStr, err := HttpGetter(armoAPI.httpClient, armoAPI.getListFrameworkURL(), nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -232,6 +232,10 @@ func (armoAPI *ArmoAPI) ListFrameworks(customerGUID string) ([]string, error) {
|
||||
return frameworkList, nil
|
||||
}
|
||||
|
||||
func (armoAPI *ArmoAPI) ListControls(l ListType) ([]string, error) {
|
||||
return nil, fmt.Errorf("control api is not public")
|
||||
}
|
||||
|
||||
type TenantResponse struct {
|
||||
TenantID string `json:"tenantId"`
|
||||
Token string `json:"token"`
|
||||
|
||||
@@ -5,7 +5,7 @@ import (
|
||||
"strings"
|
||||
)
|
||||
|
||||
var NativeFrameworks = []string{"nsa", "mitre", "armobest"}
|
||||
var NativeFrameworks = []string{"nsa", "mitre", "armobest", "devopsbest"}
|
||||
|
||||
func (armoAPI *ArmoAPI) getFrameworkURL(frameworkName string) string {
|
||||
u := url.URL{}
|
||||
@@ -36,14 +36,14 @@ func (armoAPI *ArmoAPI) getListFrameworkURL() string {
|
||||
|
||||
return u.String()
|
||||
}
|
||||
func (armoAPI *ArmoAPI) getExceptionsURL(customerGUID, clusterName string) string {
|
||||
func (armoAPI *ArmoAPI) getExceptionsURL(clusterName string) string {
|
||||
u := url.URL{}
|
||||
u.Scheme = "https"
|
||||
u.Host = armoAPI.apiURL
|
||||
u.Path = "api/v1/armoPostureExceptions"
|
||||
|
||||
q := u.Query()
|
||||
q.Add("customerGUID", customerGUID)
|
||||
q.Add("customerGUID", armoAPI.customerGUID)
|
||||
// if clusterName != "" { // TODO - fix customer name support in Armo BE
|
||||
// q.Add("clusterName", clusterName)
|
||||
// }
|
||||
@@ -52,14 +52,14 @@ func (armoAPI *ArmoAPI) getExceptionsURL(customerGUID, clusterName string) strin
|
||||
return u.String()
|
||||
}
|
||||
|
||||
func (armoAPI *ArmoAPI) getAccountConfig(customerGUID, clusterName string) string {
|
||||
func (armoAPI *ArmoAPI) getAccountConfig(clusterName string) string {
|
||||
u := url.URL{}
|
||||
u.Scheme = "https"
|
||||
u.Host = armoAPI.apiURL
|
||||
u.Path = "api/v1/armoCustomerConfiguration"
|
||||
|
||||
q := u.Query()
|
||||
q.Add("customerGUID", customerGUID)
|
||||
q.Add("customerGUID", armoAPI.customerGUID)
|
||||
if clusterName != "" { // TODO - fix customer name support in Armo BE
|
||||
q.Add("clusterName", clusterName)
|
||||
}
|
||||
|
||||
@@ -18,7 +18,7 @@ type DownloadReleasedPolicy struct {
|
||||
|
||||
func NewDownloadReleasedPolicy() *DownloadReleasedPolicy {
|
||||
return &DownloadReleasedPolicy{
|
||||
gs: gitregostore.InitDefaultGitRegoStore(-1),
|
||||
gs: gitregostore.NewDefaultGitRegoStore(-1),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -41,6 +41,43 @@ func (drp *DownloadReleasedPolicy) GetFramework(name string) (*reporthandling.Fr
|
||||
return framework, err
|
||||
}
|
||||
|
||||
func (drp *DownloadReleasedPolicy) GetFrameworks() ([]reporthandling.Framework, error) {
|
||||
frameworks, err := drp.gs.GetOPAFrameworks()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return frameworks, err
|
||||
}
|
||||
|
||||
func (drp *DownloadReleasedPolicy) ListFrameworks() ([]string, error) {
|
||||
return drp.gs.GetOPAFrameworksNamesList()
|
||||
}
|
||||
|
||||
func (drp *DownloadReleasedPolicy) ListControls(listType ListType) ([]string, error) {
|
||||
switch listType {
|
||||
case ListID:
|
||||
return drp.gs.GetOPAControlsIDsList()
|
||||
default:
|
||||
return drp.gs.GetOPAControlsNamesList()
|
||||
}
|
||||
}
|
||||
|
||||
func (drp *DownloadReleasedPolicy) GetControlsInputs(clusterName string) (map[string][]string, error) {
|
||||
defaultConfigInputs, err := drp.gs.GetDefaultConfigInputs()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return defaultConfigInputs.Settings.PostureControlInputs, err
|
||||
}
|
||||
|
||||
func (drp *DownloadReleasedPolicy) SetRegoObjects() error {
|
||||
fwNames, err := drp.gs.GetOPAFrameworksNamesList()
|
||||
if len(fwNames) != 0 && err == nil {
|
||||
return nil
|
||||
}
|
||||
return drp.gs.SetRegoObjects()
|
||||
}
|
||||
|
||||
func isNativeFramework(framework string) bool {
|
||||
return contains(NativeFrameworks, framework)
|
||||
}
|
||||
|
||||
@@ -5,18 +5,29 @@ import (
|
||||
"github.com/armosec/opa-utils/reporthandling"
|
||||
)
|
||||
|
||||
// supported listing
|
||||
type ListType string
|
||||
|
||||
const ListID ListType = "id"
|
||||
const ListName ListType = "name"
|
||||
|
||||
type IPolicyGetter interface {
|
||||
GetFramework(name string) (*reporthandling.Framework, error)
|
||||
GetFrameworks() ([]reporthandling.Framework, error)
|
||||
GetControl(name string) (*reporthandling.Control, error)
|
||||
|
||||
ListFrameworks() ([]string, error)
|
||||
ListControls(ListType) ([]string, error)
|
||||
}
|
||||
|
||||
type IExceptionsGetter interface {
|
||||
GetExceptions(customerGUID, clusterName string) ([]armotypes.PostureExceptionPolicy, error)
|
||||
GetExceptions(clusterName string) ([]armotypes.PostureExceptionPolicy, error)
|
||||
}
|
||||
type IBackend interface {
|
||||
GetCustomerGUID(customerGUID string) (*TenantResponse, error)
|
||||
GetCustomerGUID() (*TenantResponse, error)
|
||||
SetCustomerGUID(customerGUID string)
|
||||
}
|
||||
|
||||
type IControlsInputsGetter interface {
|
||||
GetControlsInputs(customerGUID, clusterName string) (map[string][]string, error)
|
||||
GetControlsInputs(clusterName string) (map[string][]string, error)
|
||||
}
|
||||
|
||||
@@ -10,8 +10,6 @@ import (
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/armosec/opa-utils/reporthandling"
|
||||
)
|
||||
|
||||
func GetDefaultPath(name string) string {
|
||||
@@ -22,33 +20,8 @@ func GetDefaultPath(name string) string {
|
||||
return defaultfilePath
|
||||
}
|
||||
|
||||
// Save control as json in file
|
||||
func SaveControlInFile(control *reporthandling.Control, pathStr string) error {
|
||||
encodedData, err := json.Marshal(control)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = os.WriteFile(pathStr, []byte(fmt.Sprintf("%v", string(encodedData))), 0644)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
pathDir := path.Dir(pathStr)
|
||||
if err := os.Mkdir(pathDir, 0744); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
return err
|
||||
|
||||
}
|
||||
err = os.WriteFile(pathStr, []byte(fmt.Sprintf("%v", string(encodedData))), 0644)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func SaveFrameworkInFile(framework *reporthandling.Framework, pathStr string) error {
|
||||
encodedData, err := json.Marshal(framework)
|
||||
func SaveInFile(policy interface{}, pathStr string) error {
|
||||
encodedData, err := json.Marshal(policy)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -78,7 +78,23 @@ func (lp *LoadPolicy) GetFramework(frameworkName string) (*reporthandling.Framew
|
||||
return framework, err
|
||||
}
|
||||
|
||||
func (lp *LoadPolicy) GetExceptions(customerGUID, clusterName string) ([]armotypes.PostureExceptionPolicy, error) {
|
||||
func (lp *LoadPolicy) GetFrameworks() ([]reporthandling.Framework, error) {
|
||||
frameworks := []reporthandling.Framework{}
|
||||
var err error
|
||||
return frameworks, err
|
||||
}
|
||||
|
||||
func (lp *LoadPolicy) ListFrameworks() ([]string, error) {
|
||||
// TODO - Support
|
||||
return []string{}, fmt.Errorf("loading frameworks list from file is not supported")
|
||||
}
|
||||
|
||||
func (lp *LoadPolicy) ListControls(listType ListType) ([]string, error) {
|
||||
// TODO - Support
|
||||
return []string{}, fmt.Errorf("loading controls list from file is not supported")
|
||||
}
|
||||
|
||||
func (lp *LoadPolicy) GetExceptions(clusterName string) ([]armotypes.PostureExceptionPolicy, error) {
|
||||
filePath := lp.filePath()
|
||||
exception := []armotypes.PostureExceptionPolicy{}
|
||||
f, err := os.ReadFile(filePath)
|
||||
@@ -90,7 +106,7 @@ func (lp *LoadPolicy) GetExceptions(customerGUID, clusterName string) ([]armotyp
|
||||
return exception, err
|
||||
}
|
||||
|
||||
func (lp *LoadPolicy) GetControlsInputs(customerGUID, clusterName string) (map[string][]string, error) {
|
||||
func (lp *LoadPolicy) GetControlsInputs(clusterName string) (map[string][]string, error) {
|
||||
filePath := lp.filePath()
|
||||
accountConfig := &armotypes.CustomerConfig{}
|
||||
f, err := os.ReadFile(filePath)
|
||||
|
||||
13
cautils/getter/loadpolicy_test.go
Normal file
13
cautils/getter/loadpolicy_test.go
Normal file
@@ -0,0 +1,13 @@
|
||||
package getter
|
||||
|
||||
import (
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
var mockFrameworkBasePath = filepath.Join("examples", "mocks", "frameworks")
|
||||
|
||||
func MockNewLoadPolicy() *LoadPolicy {
|
||||
return &LoadPolicy{
|
||||
filePaths: []string{""},
|
||||
}
|
||||
}
|
||||
7
cautils/listpolicies.go
Normal file
7
cautils/listpolicies.go
Normal file
@@ -0,0 +1,7 @@
|
||||
package cautils
|
||||
|
||||
type ListPolicies struct {
|
||||
Target string
|
||||
ListIDs bool
|
||||
Account string
|
||||
}
|
||||
126
cautils/rbac.go
Normal file
126
cautils/rbac.go
Normal file
@@ -0,0 +1,126 @@
|
||||
package cautils
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"time"
|
||||
|
||||
"github.com/armosec/k8s-interface/workloadinterface"
|
||||
"github.com/armosec/opa-utils/reporthandling"
|
||||
"github.com/armosec/rbac-utils/rbacscanner"
|
||||
"github.com/armosec/rbac-utils/rbacutils"
|
||||
uuid "github.com/satori/go.uuid"
|
||||
)
|
||||
|
||||
type RBACObjects struct {
|
||||
scanner *rbacscanner.RbacScannerFromK8sAPI
|
||||
}
|
||||
|
||||
func NewRBACObjects(scanner *rbacscanner.RbacScannerFromK8sAPI) *RBACObjects {
|
||||
return &RBACObjects{scanner: scanner}
|
||||
}
|
||||
|
||||
func (rbacObjects *RBACObjects) SetResourcesReport() (*reporthandling.PostureReport, error) {
|
||||
return &reporthandling.PostureReport{
|
||||
ReportID: uuid.NewV4().String(),
|
||||
ReportGenerationTime: time.Now().UTC(),
|
||||
CustomerGUID: rbacObjects.scanner.CustomerGUID,
|
||||
ClusterName: rbacObjects.scanner.ClusterName,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (rbacObjects *RBACObjects) ListAllResources() (map[string]workloadinterface.IMetadata, error) {
|
||||
resources, err := rbacObjects.scanner.ListResources()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
allresources, err := rbacObjects.rbacObjectsToResources(resources)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return allresources, nil
|
||||
}
|
||||
|
||||
func (rbacObjects *RBACObjects) rbacObjectsToResources(resources *rbacutils.RbacObjects) (map[string]workloadinterface.IMetadata, error) {
|
||||
allresources := map[string]workloadinterface.IMetadata{}
|
||||
|
||||
/*
|
||||
************************************************************************************************************************
|
||||
This code is adding a non valid ID ->
|
||||
(github.com/armosec/rbac-utils v0.0.11): "//SA2WLIDmap/SA2WLIDmap"
|
||||
(github.com/armosec/rbac-utils v0.0.12): "armo.rbac.com/v0beta1//SAID2WLIDmap/SAID2WLIDmap"
|
||||
|
||||
Should be investigated
|
||||
************************************************************************************************************************
|
||||
|
||||
// wrap rbac aggregated objects in IMetadata and add to allresources
|
||||
// TODO - DEPRECATE SA2WLIDmap
|
||||
SA2WLIDmapIMeta, err := rbacutils.SA2WLIDmapIMetadataWrapper(resources.SA2WLIDmap)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
allresources[SA2WLIDmapIMeta.GetID()] = SA2WLIDmapIMeta
|
||||
|
||||
SAID2WLIDmapIMeta, err := rbacutils.SAID2WLIDmapIMetadataWrapper(resources.SAID2WLIDmap)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
allresources[SAID2WLIDmapIMeta.GetID()] = SAID2WLIDmapIMeta
|
||||
|
||||
*/
|
||||
|
||||
// convert rbac k8s resources to IMetadata and add to allresources
|
||||
for _, cr := range resources.ClusterRoles.Items {
|
||||
crmap, err := convertToMap(cr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
crmap["apiVersion"] = "rbac.authorization.k8s.io/v1" // TODO - is the the correct apiVersion?
|
||||
crIMeta := workloadinterface.NewWorkloadObj(crmap)
|
||||
crIMeta.SetKind("ClusterRole")
|
||||
allresources[crIMeta.GetID()] = crIMeta
|
||||
}
|
||||
for _, cr := range resources.Roles.Items {
|
||||
crmap, err := convertToMap(cr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
crmap["apiVersion"] = "rbac.authorization.k8s.io/v1" // TODO - is the the correct apiVersion?
|
||||
crIMeta := workloadinterface.NewWorkloadObj(crmap)
|
||||
crIMeta.SetKind("Role")
|
||||
allresources[crIMeta.GetID()] = crIMeta
|
||||
}
|
||||
for _, cr := range resources.ClusterRoleBindings.Items {
|
||||
crmap, err := convertToMap(cr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
crmap["apiVersion"] = "rbac.authorization.k8s.io/v1" // TODO - is the the correct apiVersion?
|
||||
crIMeta := workloadinterface.NewWorkloadObj(crmap)
|
||||
crIMeta.SetKind("ClusterRoleBinding")
|
||||
allresources[crIMeta.GetID()] = crIMeta
|
||||
}
|
||||
for _, cr := range resources.RoleBindings.Items {
|
||||
crmap, err := convertToMap(cr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
crmap["apiVersion"] = "rbac.authorization.k8s.io/v1" // TODO - is the the correct apiVersion?
|
||||
crIMeta := workloadinterface.NewWorkloadObj(crmap)
|
||||
crIMeta.SetKind("RoleBinding")
|
||||
allresources[crIMeta.GetID()] = crIMeta
|
||||
}
|
||||
return allresources, nil
|
||||
}
|
||||
|
||||
func convertToMap(obj interface{}) (map[string]interface{}, error) {
|
||||
var inInterface map[string]interface{}
|
||||
inrec, err := json.Marshal(obj)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = json.Unmarshal(inrec, &inInterface)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return inInterface, nil
|
||||
}
|
||||
154
cautils/reportv2tov1.go
Normal file
154
cautils/reportv2tov1.go
Normal file
@@ -0,0 +1,154 @@
|
||||
package cautils
|
||||
|
||||
import (
|
||||
"github.com/armosec/k8s-interface/workloadinterface"
|
||||
"github.com/armosec/opa-utils/reporthandling"
|
||||
helpersv1 "github.com/armosec/opa-utils/reporthandling/helpers/v1"
|
||||
"github.com/armosec/opa-utils/reporthandling/results/v1/reportsummary"
|
||||
"github.com/armosec/opa-utils/score"
|
||||
)
|
||||
|
||||
func ReportV2ToV1(opaSessionObj *OPASessionObj) {
|
||||
if len(opaSessionObj.PostureReport.FrameworkReports) > 0 {
|
||||
return // report already converted
|
||||
}
|
||||
|
||||
opaSessionObj.PostureReport.ClusterCloudProvider = opaSessionObj.Report.ClusterCloudProvider
|
||||
|
||||
frameworks := []reporthandling.FrameworkReport{}
|
||||
|
||||
if len(opaSessionObj.Report.SummaryDetails.Frameworks) > 0 {
|
||||
for _, fwv2 := range opaSessionObj.Report.SummaryDetails.Frameworks {
|
||||
fwv1 := reporthandling.FrameworkReport{}
|
||||
fwv1.Name = fwv2.GetName()
|
||||
fwv1.Score = fwv2.GetScore()
|
||||
|
||||
fwv1.ControlReports = append(fwv1.ControlReports, controlReportV2ToV1(opaSessionObj, fwv2.GetName(), fwv2.Controls)...)
|
||||
frameworks = append(frameworks, fwv1)
|
||||
|
||||
}
|
||||
} else {
|
||||
fwv1 := reporthandling.FrameworkReport{}
|
||||
fwv1.Name = ""
|
||||
fwv1.Score = 0
|
||||
|
||||
fwv1.ControlReports = append(fwv1.ControlReports, controlReportV2ToV1(opaSessionObj, "", opaSessionObj.Report.SummaryDetails.Controls)...)
|
||||
frameworks = append(frameworks, fwv1)
|
||||
}
|
||||
|
||||
// // remove unused data
|
||||
// opaSessionObj.Report = nil
|
||||
// opaSessionObj.ResourcesResult = nil
|
||||
|
||||
// setup counters and score
|
||||
for f := range frameworks {
|
||||
// // set exceptions
|
||||
// exceptions.SetFrameworkExceptions(frameworks, opap.Exceptions, cautils.ClusterName)
|
||||
|
||||
// set counters
|
||||
reporthandling.SetUniqueResourcesCounter(&frameworks[f])
|
||||
|
||||
// set default score
|
||||
reporthandling.SetDefaultScore(&frameworks[f])
|
||||
}
|
||||
|
||||
// update score
|
||||
scoreutil := score.NewScore(opaSessionObj.AllResources)
|
||||
scoreutil.Calculate(frameworks)
|
||||
|
||||
opaSessionObj.PostureReport.FrameworkReports = frameworks
|
||||
|
||||
// opaSessionObj.Report.SummaryDetails.Score = 0
|
||||
// for i := range frameworks {
|
||||
// for j := range frameworks[i].ControlReports {
|
||||
// // frameworks[i].ControlReports[j].Score
|
||||
// for w := range opaSessionObj.Report.SummaryDetails.Frameworks {
|
||||
// if opaSessionObj.Report.SummaryDetails.Frameworks[w].Name == frameworks[i].Name {
|
||||
// opaSessionObj.Report.SummaryDetails.Frameworks[w].Score = frameworks[i].Score
|
||||
// }
|
||||
// if c, ok := opaSessionObj.Report.SummaryDetails.Frameworks[w].Controls[frameworks[i].ControlReports[j].ControlID]; ok {
|
||||
// c.Score = frameworks[i].ControlReports[j].Score
|
||||
// opaSessionObj.Report.SummaryDetails.Frameworks[w].Controls[frameworks[i].ControlReports[j].ControlID] = c
|
||||
// }
|
||||
// }
|
||||
// if c, ok := opaSessionObj.Report.SummaryDetails.Controls[frameworks[i].ControlReports[j].ControlID]; ok {
|
||||
// c.Score = frameworks[i].ControlReports[j].Score
|
||||
// opaSessionObj.Report.SummaryDetails.Controls[frameworks[i].ControlReports[j].ControlID] = c
|
||||
// }
|
||||
// }
|
||||
// opaSessionObj.Report.SummaryDetails.Score += opaSessionObj.PostureReport.FrameworkReports[i].Score
|
||||
// }
|
||||
// opaSessionObj.Report.SummaryDetails.Score /= float32(len(opaSessionObj.Report.SummaryDetails.Frameworks))
|
||||
}
|
||||
|
||||
func controlReportV2ToV1(opaSessionObj *OPASessionObj, frameworkName string, controls map[string]reportsummary.ControlSummary) []reporthandling.ControlReport {
|
||||
controlRepors := []reporthandling.ControlReport{}
|
||||
for controlID, crv2 := range controls {
|
||||
crv1 := reporthandling.ControlReport{}
|
||||
crv1.ControlID = controlID
|
||||
crv1.BaseScore = crv2.ScoreFactor
|
||||
crv1.Name = crv2.GetName()
|
||||
crv1.Control_ID = controlID
|
||||
// crv1.Attributes = crv2.
|
||||
crv1.Score = crv2.GetScore()
|
||||
|
||||
// TODO - add fields
|
||||
crv1.Description = crv2.Description
|
||||
crv1.Remediation = crv2.Remediation
|
||||
|
||||
rulesv1 := map[string]reporthandling.RuleReport{}
|
||||
|
||||
for _, resourceID := range crv2.ListResourcesIDs().All() {
|
||||
if result, ok := opaSessionObj.ResourcesResult[resourceID]; ok {
|
||||
for _, rulev2 := range result.ListRulesOfControl(crv2.GetID(), "") {
|
||||
|
||||
if _, ok := rulesv1[rulev2.GetName()]; !ok {
|
||||
rulesv1[rulev2.GetName()] = reporthandling.RuleReport{
|
||||
Name: rulev2.GetName(),
|
||||
RuleStatus: reporthandling.RuleStatus{
|
||||
Status: "success",
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
rulev1 := rulesv1[rulev2.GetName()]
|
||||
status := rulev2.GetStatus(&helpersv1.Filters{FrameworkNames: []string{frameworkName}})
|
||||
|
||||
if status.IsFailed() || status.IsExcluded() {
|
||||
|
||||
// rule response
|
||||
ruleResponse := reporthandling.RuleResponse{}
|
||||
ruleResponse.Rulename = rulev2.GetName()
|
||||
for i := range rulev2.Paths {
|
||||
ruleResponse.FailedPaths = append(ruleResponse.FailedPaths, rulev2.Paths[i].FailedPath)
|
||||
}
|
||||
ruleResponse.RuleStatus = string(status.Status())
|
||||
if len(rulev2.Exception) > 0 {
|
||||
ruleResponse.Exception = &rulev2.Exception[0]
|
||||
}
|
||||
|
||||
if fullRessource, ok := opaSessionObj.AllResources[resourceID]; ok {
|
||||
tmp := fullRessource.GetObject()
|
||||
workloadinterface.RemoveFromMap(tmp, "spec")
|
||||
ruleResponse.AlertObject.K8SApiObjects = append(ruleResponse.AlertObject.K8SApiObjects, tmp)
|
||||
}
|
||||
rulev1.RuleResponses = append(rulev1.RuleResponses, ruleResponse)
|
||||
}
|
||||
|
||||
rulev1.ListInputKinds = append(rulev1.ListInputKinds, resourceID)
|
||||
rulesv1[rulev2.GetName()] = rulev1
|
||||
}
|
||||
}
|
||||
}
|
||||
if len(rulesv1) > 0 {
|
||||
for i := range rulesv1 {
|
||||
crv1.RuleReports = append(crv1.RuleReports, rulesv1[i])
|
||||
}
|
||||
}
|
||||
if len(crv1.RuleReports) == 0 {
|
||||
crv1.RuleReports = []reporthandling.RuleReport{}
|
||||
}
|
||||
controlRepors = append(controlRepors, crv1)
|
||||
}
|
||||
return controlRepors
|
||||
}
|
||||
@@ -1,31 +1,73 @@
|
||||
package cautils
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/armosec/kubescape/cautils/getter"
|
||||
"github.com/armosec/opa-utils/reporthandling"
|
||||
)
|
||||
|
||||
const (
|
||||
ScanCluster string = "cluster"
|
||||
ScanLocalFiles string = "yaml"
|
||||
)
|
||||
|
||||
type BoolPtrFlag struct {
|
||||
valPtr *bool
|
||||
}
|
||||
|
||||
func (bpf *BoolPtrFlag) Type() string {
|
||||
return "bool"
|
||||
}
|
||||
|
||||
func (bpf *BoolPtrFlag) String() string {
|
||||
if bpf.valPtr != nil {
|
||||
return fmt.Sprintf("%v", *bpf.valPtr)
|
||||
}
|
||||
return ""
|
||||
}
|
||||
func (bpf *BoolPtrFlag) Get() *bool {
|
||||
return bpf.valPtr
|
||||
}
|
||||
|
||||
func (bpf *BoolPtrFlag) SetBool(val bool) {
|
||||
bpf.valPtr = &val
|
||||
}
|
||||
|
||||
func (bpf *BoolPtrFlag) Set(val string) error {
|
||||
switch val {
|
||||
case "true":
|
||||
bpf.SetBool(true)
|
||||
case "false":
|
||||
bpf.SetBool(false)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type ScanInfo struct {
|
||||
Getters
|
||||
PolicyIdentifier []reporthandling.PolicyIdentifier
|
||||
UseExceptions string // Load file with exceptions configuration
|
||||
ControlsInputs string // Load file with inputs for controls
|
||||
UseFrom []string // Load framework from local file (instead of download). Use when running offline
|
||||
UseDefault bool // Load framework from cached file (instead of download). Use when running offline
|
||||
Format string // Format results (table, json, junit ...)
|
||||
Output string // Store results in an output file, Output file name
|
||||
ExcludedNamespaces string // DEPRECATED?
|
||||
IncludeNamespaces string // DEPRECATED?
|
||||
InputPatterns []string // Yaml files input patterns
|
||||
Silent bool // Silent mode - Do not print progress logs
|
||||
FailThreshold uint16 // Failure score threshold
|
||||
Submit bool // Submit results to Armo BE
|
||||
Local bool // Do not submit results
|
||||
Account string // account ID
|
||||
FrameworkScan bool // false if scanning control
|
||||
ScanAll bool // true if scan all frameworks
|
||||
UseExceptions string // Load file with exceptions configuration
|
||||
ControlsInputs string // Load file with inputs for controls
|
||||
UseFrom []string // Load framework from local file (instead of download). Use when running offline
|
||||
UseDefault bool // Load framework from cached file (instead of download). Use when running offline
|
||||
VerboseMode bool // Display all of the input resources and not only failed resources
|
||||
Format string // Format results (table, json, junit ...)
|
||||
Output string // Store results in an output file, Output file name
|
||||
ExcludedNamespaces string // used for host sensor namespace
|
||||
IncludeNamespaces string // DEPRECATED?
|
||||
InputPatterns []string // Yaml files input patterns
|
||||
Silent bool // Silent mode - Do not print progress logs
|
||||
FailThreshold uint16 // Failure score threshold
|
||||
Submit bool // Submit results to Armo BE
|
||||
HostSensor BoolPtrFlag // Deploy ARMO K8s host sensor to collect data from certain controls
|
||||
Local bool // Do not submit results
|
||||
Account string // account ID
|
||||
// ClusterName string // cluster name
|
||||
KubeContext string // context name
|
||||
FrameworkScan bool // false if scanning control
|
||||
ScanAll bool // true if scan all frameworks
|
||||
}
|
||||
|
||||
type Getters struct {
|
||||
@@ -36,29 +78,10 @@ type Getters struct {
|
||||
|
||||
func (scanInfo *ScanInfo) Init() {
|
||||
scanInfo.setUseFrom()
|
||||
scanInfo.setUseExceptions()
|
||||
scanInfo.setAccountConfig()
|
||||
scanInfo.setOutputFile()
|
||||
|
||||
}
|
||||
|
||||
func (scanInfo *ScanInfo) setUseExceptions() {
|
||||
if scanInfo.UseExceptions != "" {
|
||||
// load exceptions from file
|
||||
scanInfo.ExceptionsGetter = getter.NewLoadPolicy([]string{scanInfo.UseExceptions})
|
||||
} else {
|
||||
scanInfo.ExceptionsGetter = getter.GetArmoAPIConnector()
|
||||
}
|
||||
}
|
||||
|
||||
func (scanInfo *ScanInfo) setAccountConfig() {
|
||||
if scanInfo.ControlsInputs != "" {
|
||||
// load account config from file
|
||||
scanInfo.ControlsInputsGetter = getter.NewLoadPolicy([]string{scanInfo.ControlsInputs})
|
||||
} else {
|
||||
scanInfo.ControlsInputsGetter = getter.GetArmoAPIConnector()
|
||||
}
|
||||
}
|
||||
func (scanInfo *ScanInfo) setUseFrom() {
|
||||
if scanInfo.UseDefault {
|
||||
for _, policy := range scanInfo.PolicyIdentifier {
|
||||
@@ -83,8 +106,11 @@ func (scanInfo *ScanInfo) setOutputFile() {
|
||||
}
|
||||
}
|
||||
|
||||
func (scanInfo *ScanInfo) ScanRunningCluster() bool {
|
||||
return len(scanInfo.InputPatterns) == 0
|
||||
func (scanInfo *ScanInfo) GetScanningEnvironment() string {
|
||||
if len(scanInfo.InputPatterns) != 0 {
|
||||
return ScanLocalFiles
|
||||
}
|
||||
return ScanCluster
|
||||
}
|
||||
|
||||
func (scanInfo *ScanInfo) SetPolicyIdentifiers(policies []string, kind reporthandling.NotificationPolicyKind) {
|
||||
|
||||
@@ -14,11 +14,16 @@ const SKIP_VERSION_CHECK = "KUBESCAPE_SKIP_UPDATE_CHECK"
|
||||
|
||||
var BuildNumber string
|
||||
|
||||
const UnknownBuildNumber = "unknown"
|
||||
|
||||
type IVersionCheckHandler interface {
|
||||
CheckLatestVersion(*VersionCheckRequest) error
|
||||
}
|
||||
|
||||
func NewIVersionCheckHandler() IVersionCheckHandler {
|
||||
if BuildNumber == "" {
|
||||
WarningDisplay(os.Stderr, "Warning: unknown build number, this might affect your scan results. Please make sure you are updated to latest version.\n")
|
||||
}
|
||||
if v, ok := os.LookupEnv(SKIP_VERSION_CHECK); ok && pkgutils.StringToBool(v) {
|
||||
return NewVersionCheckHandlerMock()
|
||||
}
|
||||
@@ -58,7 +63,10 @@ func NewVersionCheckHandler() *VersionCheckHandler {
|
||||
}
|
||||
func NewVersionCheckRequest(buildNumber, frameworkName, frameworkVersion, scanningTarget string) *VersionCheckRequest {
|
||||
if buildNumber == "" {
|
||||
buildNumber = "unknown"
|
||||
buildNumber = UnknownBuildNumber
|
||||
}
|
||||
if scanningTarget == "" {
|
||||
scanningTarget = "unknown"
|
||||
}
|
||||
return &VersionCheckRequest{
|
||||
Client: "kubescape",
|
||||
@@ -75,14 +83,21 @@ func (v *VersionCheckHandlerMock) CheckLatestVersion(versionData *VersionCheckRe
|
||||
}
|
||||
|
||||
func (v *VersionCheckHandler) CheckLatestVersion(versionData *VersionCheckRequest) error {
|
||||
defer func() {
|
||||
if err := recover(); err != nil {
|
||||
WarningDisplay(os.Stderr, "failed to get latest version\n")
|
||||
}
|
||||
}()
|
||||
|
||||
latestVersion, err := v.getLatestVersion(versionData)
|
||||
if err != nil || latestVersion == nil {
|
||||
return fmt.Errorf("failed to get latest version: %v", err)
|
||||
return fmt.Errorf("failed to get latest version")
|
||||
}
|
||||
|
||||
if latestVersion.ClientUpdate != "" {
|
||||
fmt.Println(warningMessage(latestVersion.Client, latestVersion.ClientUpdate))
|
||||
if BuildNumber != "" && BuildNumber < latestVersion.ClientUpdate {
|
||||
WarningDisplay(os.Stderr, warningMessage(latestVersion.Client, latestVersion.ClientUpdate), "\n")
|
||||
}
|
||||
}
|
||||
|
||||
// TODO - Enable after supporting framework version
|
||||
@@ -91,7 +106,7 @@ func (v *VersionCheckHandler) CheckLatestVersion(versionData *VersionCheckReques
|
||||
// }
|
||||
|
||||
if latestVersion.Message != "" {
|
||||
fmt.Println(latestVersion.Message)
|
||||
InfoDisplay(os.Stderr, latestVersion.Message, "\n")
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
176
clihandler/clidownload.go
Normal file
176
clihandler/clidownload.go
Normal file
@@ -0,0 +1,176 @@
|
||||
package clihandler
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/armosec/armoapi-go/armotypes"
|
||||
"github.com/armosec/kubescape/cautils"
|
||||
"github.com/armosec/kubescape/cautils/getter"
|
||||
)
|
||||
|
||||
var downloadFunc = map[string]func(*cautils.DownloadInfo) error{
|
||||
"controls-inputs": downloadConfigInputs,
|
||||
"exceptions": downloadExceptions,
|
||||
"control": downloadControl,
|
||||
"framework": downloadFramework,
|
||||
"artifacts": downloadArtifacts,
|
||||
}
|
||||
|
||||
func DownloadSupportCommands() []string {
|
||||
commands := []string{}
|
||||
for k := range downloadFunc {
|
||||
commands = append(commands, k)
|
||||
}
|
||||
return commands
|
||||
}
|
||||
|
||||
func CliDownload(downloadInfo *cautils.DownloadInfo) error {
|
||||
setPathandFilename(downloadInfo)
|
||||
if err := downloadArtifact(downloadInfo, downloadFunc); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func downloadArtifact(downloadInfo *cautils.DownloadInfo, downloadArtifactFunc map[string]func(*cautils.DownloadInfo) error) error {
|
||||
if f, ok := downloadArtifactFunc[downloadInfo.Target]; ok {
|
||||
if err := f(downloadInfo); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("unknown command to download")
|
||||
}
|
||||
|
||||
func setPathandFilename(downloadInfo *cautils.DownloadInfo) {
|
||||
if downloadInfo.Path == "" {
|
||||
downloadInfo.Path = getter.GetDefaultPath("")
|
||||
} else {
|
||||
dir, file := filepath.Split(downloadInfo.Path)
|
||||
if dir == "" {
|
||||
downloadInfo.Path = file
|
||||
} else {
|
||||
downloadInfo.Path = dir
|
||||
downloadInfo.FileName = file
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func downloadArtifacts(downloadInfo *cautils.DownloadInfo) error {
|
||||
downloadInfo.FileName = ""
|
||||
var artifacts = map[string]func(*cautils.DownloadInfo) error{
|
||||
"controls-inputs": downloadConfigInputs,
|
||||
"exceptions": downloadExceptions,
|
||||
"framework": downloadFramework,
|
||||
}
|
||||
for artifact := range artifacts {
|
||||
if err := downloadArtifact(&cautils.DownloadInfo{Target: artifact, Path: downloadInfo.Path, FileName: fmt.Sprintf("%s.json", artifact)}, artifacts); err != nil {
|
||||
fmt.Printf("error downloading %s, error: %s", artifact, err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func downloadConfigInputs(downloadInfo *cautils.DownloadInfo) error {
|
||||
tenant := getTenantConfig(downloadInfo.Account, "", getKubernetesApi())
|
||||
controlsInputsGetter := getConfigInputsGetter(downloadInfo.Name, tenant.GetCustomerGUID(), nil)
|
||||
controlInputs, err := controlsInputsGetter.GetControlsInputs(tenant.GetClusterName())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if downloadInfo.FileName == "" {
|
||||
downloadInfo.FileName = fmt.Sprintf("%s.json", downloadInfo.Target)
|
||||
}
|
||||
// save in file
|
||||
err = getter.SaveInFile(controlInputs, filepath.Join(downloadInfo.Path, downloadInfo.FileName))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Printf("'%s' downloaded successfully and saved at: '%s'\n", downloadInfo.Target, filepath.Join(downloadInfo.Path, downloadInfo.FileName))
|
||||
return nil
|
||||
}
|
||||
|
||||
func downloadExceptions(downloadInfo *cautils.DownloadInfo) error {
|
||||
var err error
|
||||
tenant := getTenantConfig(downloadInfo.Account, "", getKubernetesApi())
|
||||
exceptionsGetter := getExceptionsGetter("")
|
||||
exceptions := []armotypes.PostureExceptionPolicy{}
|
||||
if tenant.GetCustomerGUID() != "" {
|
||||
exceptions, err = exceptionsGetter.GetExceptions(tenant.GetClusterName())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if downloadInfo.FileName == "" {
|
||||
downloadInfo.FileName = fmt.Sprintf("%s.json", downloadInfo.Target)
|
||||
}
|
||||
// save in file
|
||||
err = getter.SaveInFile(exceptions, filepath.Join(downloadInfo.Path, downloadInfo.FileName))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Printf("'%s' downloaded successfully and saved at: '%s'\n", downloadInfo.Target, filepath.Join(downloadInfo.Path, downloadInfo.FileName))
|
||||
return nil
|
||||
}
|
||||
|
||||
func downloadFramework(downloadInfo *cautils.DownloadInfo) error {
|
||||
|
||||
tenant := getTenantConfig(downloadInfo.Account, "", getKubernetesApi())
|
||||
g := getPolicyGetter(nil, tenant.GetCustomerGUID(), true, nil)
|
||||
|
||||
if downloadInfo.Name == "" {
|
||||
// if framework name not specified - download all frameworks
|
||||
frameworks, err := g.GetFrameworks()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, fw := range frameworks {
|
||||
err = getter.SaveInFile(fw, filepath.Join(downloadInfo.Path, (strings.ToLower(fw.Name)+".json")))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Printf("'%s': '%s' downloaded successfully and saved at: '%s'\n", downloadInfo.Target, fw.Name, filepath.Join(downloadInfo.Path, (strings.ToLower(fw.Name)+".json")))
|
||||
}
|
||||
// return fmt.Errorf("missing framework name")
|
||||
} else {
|
||||
if downloadInfo.FileName == "" {
|
||||
downloadInfo.FileName = fmt.Sprintf("%s.json", downloadInfo.Name)
|
||||
}
|
||||
framework, err := g.GetFramework(downloadInfo.Name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = getter.SaveInFile(framework, filepath.Join(downloadInfo.Path, downloadInfo.FileName))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Printf("'%s' downloaded successfully and saved at: '%s'\n", downloadInfo.Target, filepath.Join(downloadInfo.Path, downloadInfo.FileName))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func downloadControl(downloadInfo *cautils.DownloadInfo) error {
|
||||
|
||||
tenant := getTenantConfig(downloadInfo.Account, "", getKubernetesApi())
|
||||
g := getPolicyGetter(nil, tenant.GetCustomerGUID(), false, nil)
|
||||
|
||||
if downloadInfo.Name == "" {
|
||||
// TODO - support
|
||||
return fmt.Errorf("missing control name")
|
||||
}
|
||||
if downloadInfo.FileName == "" {
|
||||
downloadInfo.FileName = fmt.Sprintf("%s.json", downloadInfo.Name)
|
||||
}
|
||||
controls, err := g.GetControl(downloadInfo.Name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = getter.SaveInFile(controls, filepath.Join(downloadInfo.Path, downloadInfo.FileName))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Printf("'%s' downloaded successfully and saved at: '%s'\n", downloadInfo.Target, filepath.Join(downloadInfo.Path, downloadInfo.FileName))
|
||||
return nil
|
||||
}
|
||||
@@ -1,6 +1,7 @@
|
||||
package cliinterfaces
|
||||
|
||||
import (
|
||||
"github.com/armosec/k8s-interface/workloadinterface"
|
||||
"github.com/armosec/kubescape/cautils"
|
||||
"github.com/armosec/kubescape/resultshandling/reporter"
|
||||
"github.com/armosec/opa-utils/reporthandling"
|
||||
@@ -8,10 +9,11 @@ import (
|
||||
|
||||
type ISubmitObjects interface {
|
||||
SetResourcesReport() (*reporthandling.PostureReport, error)
|
||||
ListAllResources() (map[string]workloadinterface.IMetadata, error)
|
||||
}
|
||||
|
||||
type SubmitInterfaces struct {
|
||||
SubmitObjects ISubmitObjects
|
||||
Reporter reporter.IReport
|
||||
ClusterConfig cautils.IClusterConfig
|
||||
ClusterConfig cautils.ITenantConfig
|
||||
}
|
||||
|
||||
58
clihandler/clilist.go
Normal file
58
clihandler/clilist.go
Normal file
@@ -0,0 +1,58 @@
|
||||
package clihandler
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/armosec/kubescape/cautils"
|
||||
"github.com/armosec/kubescape/cautils/getter"
|
||||
)
|
||||
|
||||
var listFunc = map[string]func(*cautils.ListPolicies) ([]string, error){
|
||||
"controls": listControls,
|
||||
"frameworks": listFrameworks,
|
||||
}
|
||||
|
||||
func ListSupportCommands() []string {
|
||||
commands := []string{}
|
||||
for k := range listFunc {
|
||||
commands = append(commands, k)
|
||||
}
|
||||
return commands
|
||||
}
|
||||
func CliList(listPolicies *cautils.ListPolicies) error {
|
||||
if f, ok := listFunc[listPolicies.Target]; ok {
|
||||
policies, err := f(listPolicies)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
sort.Strings(policies)
|
||||
|
||||
sep := "\n * "
|
||||
usageCmd := strings.TrimSuffix(listPolicies.Target, "s")
|
||||
fmt.Printf("Supported %s:%s%s\n", listPolicies.Target, sep, strings.Join(policies, sep))
|
||||
fmt.Printf("\nUseage:\n")
|
||||
fmt.Printf("$ kubescape scan %s \"name\"\n", usageCmd)
|
||||
fmt.Printf("$ kubescape scan %s \"name-0\",\"name-1\"\n\n", usageCmd)
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("unknown command to download")
|
||||
}
|
||||
|
||||
func listFrameworks(listPolicies *cautils.ListPolicies) ([]string, error) {
|
||||
tenant := getTenantConfig(listPolicies.Account, "", getKubernetesApi()) // change k8sinterface
|
||||
g := getPolicyGetter(nil, tenant.GetCustomerGUID(), true, nil)
|
||||
|
||||
return listFrameworksNames(g), nil
|
||||
}
|
||||
|
||||
func listControls(listPolicies *cautils.ListPolicies) ([]string, error) {
|
||||
tenant := getTenantConfig(listPolicies.Account, "", getKubernetesApi()) // change k8sinterface
|
||||
g := getPolicyGetter(nil, tenant.GetCustomerGUID(), false, nil)
|
||||
l := getter.ListName
|
||||
if listPolicies.ListIDs {
|
||||
l = getter.ListID
|
||||
}
|
||||
return g.ListControls(l)
|
||||
}
|
||||
@@ -11,10 +11,9 @@ import (
|
||||
)
|
||||
|
||||
var getCmd = &cobra.Command{
|
||||
Use: "get <key>",
|
||||
Short: "Get configuration in cluster",
|
||||
Long: ``,
|
||||
ValidArgs: getter.NativeFrameworks,
|
||||
Use: "get <key>",
|
||||
Short: "Get configuration in cluster",
|
||||
Long: ``,
|
||||
Args: func(cmd *cobra.Command, args []string) error {
|
||||
if len(args) < 1 || len(args) > 1 {
|
||||
return fmt.Errorf("requires one argument")
|
||||
@@ -31,7 +30,7 @@ var getCmd = &cobra.Command{
|
||||
key := keyValue[0]
|
||||
|
||||
k8s := k8sinterface.NewKubernetesApi()
|
||||
clusterConfig := cautils.NewClusterConfig(k8s, getter.GetArmoAPIConnector())
|
||||
clusterConfig := cautils.NewClusterConfig(k8s, getter.GetArmoAPIConnector(), scanInfo.Account, "")
|
||||
val, err := clusterConfig.GetValueByKeyFromConfigMap(key)
|
||||
if err != nil {
|
||||
if err.Error() == "value does not exist." {
|
||||
|
||||
@@ -30,7 +30,7 @@ var setCmd = &cobra.Command{
|
||||
data := keyValue[1]
|
||||
|
||||
k8s := k8sinterface.NewKubernetesApi()
|
||||
clusterConfig := cautils.NewClusterConfig(k8s, getter.GetArmoAPIConnector())
|
||||
clusterConfig := cautils.NewClusterConfig(k8s, getter.GetArmoAPIConnector(), scanInfo.Account, "")
|
||||
if err := clusterConfig.SetKeyValueInConfigmap(key, data); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -7,16 +7,34 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/armosec/kubescape/cautils"
|
||||
"github.com/armosec/kubescape/cautils/getter"
|
||||
"github.com/armosec/kubescape/clihandler"
|
||||
"github.com/armosec/opa-utils/reporthandling"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var (
|
||||
controlExample = `
|
||||
# Scan the 'privileged container' control
|
||||
kubescape scan control "privileged container"
|
||||
|
||||
# Scan list of controls separated with a comma
|
||||
kubescape scan control "privileged container","allowed hostpath"
|
||||
|
||||
# Scan list of controls using the control ID separated with a comma
|
||||
kubescape scan control C-0058,C-0057
|
||||
|
||||
Run 'kubescape list controls' for the list of supported controls
|
||||
|
||||
Control documentation:
|
||||
https://hub.armo.cloud/docs/controls
|
||||
`
|
||||
)
|
||||
|
||||
// controlCmd represents the control command
|
||||
var controlCmd = &cobra.Command{
|
||||
Use: "control <control names list>/<control ids list>.\nExamples:\n$ kubescape scan control C-0058,C-0057 [flags]\n$ kubescape scan contol C-0058 [flags]\n$ kubescape scan control 'privileged container,allowed hostpath' [flags]",
|
||||
Short: fmt.Sprintf("The control you wish to use for scan. It must be present in at least one of the folloiwng frameworks: %s", getter.NativeFrameworks),
|
||||
Use: "control <control names list>/<control ids list>",
|
||||
Short: "The controls you wish to use. Run 'kubescape list controls' for the list of supported controls",
|
||||
Example: controlExample,
|
||||
Args: func(cmd *cobra.Command, args []string) error {
|
||||
if len(args) > 0 {
|
||||
controls := strings.Split(args[0], ",")
|
||||
@@ -35,7 +53,7 @@ var controlCmd = &cobra.Command{
|
||||
scanInfo.PolicyIdentifier = []reporthandling.PolicyIdentifier{}
|
||||
|
||||
if len(args) == 0 {
|
||||
scanInfo.SetPolicyIdentifiers(getter.NativeFrameworks, reporthandling.KindFramework)
|
||||
// scanInfo.SetPolicyIdentifiers(getter.NativeFrameworks, reporthandling.KindFramework)
|
||||
scanInfo.ScanAll = true
|
||||
} else { // expected control or list of control sepparated by ","
|
||||
|
||||
|
||||
@@ -2,65 +2,74 @@ package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/armosec/kubescape/cautils"
|
||||
"github.com/armosec/kubescape/cautils/getter"
|
||||
"github.com/armosec/kubescape/clihandler"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var downloadInfo cautils.DownloadInfo
|
||||
var downloadInfo = cautils.DownloadInfo{}
|
||||
|
||||
var (
|
||||
downloadExample = `
|
||||
# Download all artifacts and save them in the default path (~/.kubescape)
|
||||
kubescape download artifacts
|
||||
|
||||
# Download all artifacts and save them in /tmp path
|
||||
kubescape download artifacts --output /tmp
|
||||
|
||||
# Download the NSA framework. Run 'kubescape list frameworks' for all frameworks names
|
||||
kubescape download frameworks nsa
|
||||
|
||||
# Download the "Allowed hostPath" control. Run 'kubescape list controls' for all controls names
|
||||
kubescape download control "Allowed hostPath"
|
||||
|
||||
# Download the "C-0001" control. Run 'kubescape list controls --id' for all controls ids
|
||||
kubescape download control C-0001
|
||||
|
||||
# Download the configured exceptions
|
||||
kubescape download exceptions
|
||||
|
||||
# Download the configured controls-inputs
|
||||
kubescape download controls-inputs
|
||||
|
||||
`
|
||||
)
|
||||
var downloadCmd = &cobra.Command{
|
||||
Use: fmt.Sprintf("download framework/control <framework-name>/<control-name> [flags]\nSupported frameworks: %s", getter.NativeFrameworks),
|
||||
Short: "Download framework/control",
|
||||
Long: ``,
|
||||
Use: "download <policy> <policy name>",
|
||||
Short: fmt.Sprintf("Download %s", strings.Join(clihandler.DownloadSupportCommands(), ",")),
|
||||
Long: ``,
|
||||
Example: downloadExample,
|
||||
Args: func(cmd *cobra.Command, args []string) error {
|
||||
if len(args) != 2 {
|
||||
return fmt.Errorf("requires two arguments : framework/control <framework-name>/<control-name>")
|
||||
supported := strings.Join(clihandler.DownloadSupportCommands(), ",")
|
||||
if len(args) < 1 {
|
||||
return fmt.Errorf("policy type required, supported: %v", supported)
|
||||
}
|
||||
if !strings.EqualFold(args[0], "framework") && !strings.EqualFold(args[0], "control") {
|
||||
return fmt.Errorf("invalid parameter '%s'. Supported parameters: framework, control", args[0])
|
||||
if cautils.StringInSlice(clihandler.DownloadSupportCommands(), args[0]) == cautils.ValueNotFound {
|
||||
return fmt.Errorf("invalid parameter '%s'. Supported parameters: %s", args[0], supported)
|
||||
}
|
||||
return nil
|
||||
},
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
if strings.EqualFold(args[0], "framework") {
|
||||
downloadInfo.FrameworkName = strings.ToLower(args[1])
|
||||
g := getter.NewDownloadReleasedPolicy()
|
||||
if downloadInfo.Path == "" {
|
||||
downloadInfo.Path = getter.GetDefaultPath(downloadInfo.FrameworkName + ".json")
|
||||
}
|
||||
frameworks, err := g.GetFramework(downloadInfo.FrameworkName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = getter.SaveFrameworkInFile(frameworks, downloadInfo.Path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else if strings.EqualFold(args[0], "control") {
|
||||
downloadInfo.ControlName = strings.ToLower(args[1])
|
||||
g := getter.NewDownloadReleasedPolicy()
|
||||
if downloadInfo.Path == "" {
|
||||
downloadInfo.Path = getter.GetDefaultPath(downloadInfo.ControlName + ".json")
|
||||
}
|
||||
controls, err := g.GetControl(downloadInfo.ControlName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = getter.SaveControlInFile(controls, downloadInfo.Path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
downloadInfo.Target = args[0]
|
||||
if len(args) >= 2 {
|
||||
downloadInfo.Name = args[1]
|
||||
}
|
||||
if err := clihandler.CliDownload(&downloadInfo); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "error: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
// cobra.OnInitialize(initConfig)
|
||||
|
||||
rootCmd.AddCommand(downloadCmd)
|
||||
downloadInfo = cautils.DownloadInfo{}
|
||||
downloadCmd.Flags().StringVarP(&downloadInfo.Path, "output", "o", "", "Output file. If specified, will store save to `~/.kubescape/<framework name>.json`")
|
||||
downloadCmd.Flags().StringVarP(&downloadInfo.Path, "output", "o", "", "Output file. If not specified, will save in `~/.kubescape/<policy name>.json`")
|
||||
downloadCmd.PersistentFlags().StringVarP(&downloadInfo.Account, "account", "", "", "Armo portal account ID. Default will load account ID from configMap or config file")
|
||||
|
||||
}
|
||||
|
||||
@@ -7,17 +7,46 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/armosec/kubescape/cautils"
|
||||
"github.com/armosec/kubescape/cautils/getter"
|
||||
"github.com/armosec/kubescape/clihandler"
|
||||
"github.com/armosec/opa-utils/reporthandling"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var (
|
||||
frameworkExample = `
|
||||
# Scan all frameworks and submit the results
|
||||
kubescape scan --submit
|
||||
|
||||
# Scan the NSA framework
|
||||
kubescape scan framework nsa
|
||||
|
||||
# Scan the NSA and MITRE framework
|
||||
kubescape scan framework nsa,mitre
|
||||
|
||||
# Scan all frameworks
|
||||
kubescape scan framework all
|
||||
|
||||
# Scan kubernetes YAML manifest files
|
||||
kubescape scan framework nsa *.yaml
|
||||
|
||||
# Scan and save the results in the JSON format
|
||||
kubescape scan --format json --output results.json
|
||||
|
||||
# Save scan results in JSON format
|
||||
kubescape scan --format json --output results.json
|
||||
|
||||
# Display all resources
|
||||
kubescape scan --verbose
|
||||
|
||||
Run 'kubescape list frameworks' for the list of supported frameworks
|
||||
`
|
||||
)
|
||||
var frameworkCmd = &cobra.Command{
|
||||
Use: fmt.Sprintf("framework <framework names list> [`<glob pattern>`/`-`] [flags]\nExamples:\n$ kubescape scan framework nsa [flags]\n$ kubescape scan framework mitre,nsa [flags]\n$ kubescape scan framework 'nsa, mitre' [flags]\nSupported frameworks: %s", getter.NativeFrameworks),
|
||||
Short: fmt.Sprintf("The framework you wish to use. Supported frameworks: %s", strings.Join(getter.NativeFrameworks, ", ")),
|
||||
Long: "Execute a scan on a running Kubernetes cluster or `yaml`/`json` files (use glob) or `-` for stdin",
|
||||
ValidArgs: getter.NativeFrameworks,
|
||||
Use: "framework <framework names list> [`<glob pattern>`/`-`] [flags]",
|
||||
Short: "The framework you wish to use. Run 'kubescape list frameworks' for the list of supported frameworks",
|
||||
Example: frameworkExample,
|
||||
Long: "Execute a scan on a running Kubernetes cluster or `yaml`/`json` files (use glob) or `-` for stdin",
|
||||
// ValidArgs: getter.NativeFrameworks,
|
||||
Args: func(cmd *cobra.Command, args []string) error {
|
||||
if len(args) > 0 {
|
||||
frameworks := strings.Split(args[0], ",")
|
||||
@@ -36,12 +65,15 @@ var frameworkCmd = &cobra.Command{
|
||||
var frameworks []string
|
||||
|
||||
if len(args) == 0 { // scan all frameworks
|
||||
frameworks = getter.NativeFrameworks
|
||||
// frameworks = getter.NativeFrameworks
|
||||
scanInfo.ScanAll = true
|
||||
} else {
|
||||
// Read frameworks from input args
|
||||
frameworks = strings.Split(args[0], ",")
|
||||
|
||||
if cautils.StringInSlice(frameworks, "all") != cautils.ValueNotFound {
|
||||
scanInfo.ScanAll = true
|
||||
frameworks = []string{}
|
||||
}
|
||||
if len(args) > 1 {
|
||||
if len(args[1:]) == 0 || args[1] != "-" {
|
||||
scanInfo.InputPatterns = args[1:]
|
||||
@@ -59,13 +91,16 @@ var frameworkCmd = &cobra.Command{
|
||||
}
|
||||
}
|
||||
}
|
||||
scanInfo.FrameworkScan = true
|
||||
|
||||
scanInfo.SetPolicyIdentifiers(frameworks, reporthandling.KindFramework)
|
||||
|
||||
scanInfo.Init()
|
||||
cautils.SetSilentMode(scanInfo.Silent)
|
||||
err := clihandler.ScanCliSetup(&scanInfo)
|
||||
if err != nil {
|
||||
return err
|
||||
fmt.Fprintf(os.Stderr, "error: %v\n\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
return nil
|
||||
},
|
||||
|
||||
66
clihandler/cmd/list.go
Normal file
66
clihandler/cmd/list.go
Normal file
@@ -0,0 +1,66 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/armosec/kubescape/cautils"
|
||||
"github.com/armosec/kubescape/clihandler"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var (
|
||||
listExample = `
|
||||
# List default supported frameworks names
|
||||
kubescape list frameworks
|
||||
|
||||
# List all supported frameworks names
|
||||
kubescape list frameworks --account <account id>
|
||||
|
||||
# List all supported controls names
|
||||
kubescape list controls
|
||||
|
||||
# List all supported controls ids
|
||||
kubescape list controls --id
|
||||
|
||||
Control documentation:
|
||||
https://hub.armo.cloud/docs/controls
|
||||
`
|
||||
)
|
||||
var listPolicies = cautils.ListPolicies{}
|
||||
|
||||
var listCmd = &cobra.Command{
|
||||
Use: "list <policy> [flags]",
|
||||
Short: "List frameworks/controls will list the supported frameworks and controls",
|
||||
Long: ``,
|
||||
Example: listExample,
|
||||
Args: func(cmd *cobra.Command, args []string) error {
|
||||
supported := strings.Join(clihandler.ListSupportCommands(), ",")
|
||||
|
||||
if len(args) < 1 {
|
||||
return fmt.Errorf("policy type requeued, supported: %s", supported)
|
||||
}
|
||||
if cautils.StringInSlice(clihandler.ListSupportCommands(), args[0]) == cautils.ValueNotFound {
|
||||
return fmt.Errorf("invalid parameter '%s'. Supported parameters: %s", args[0], supported)
|
||||
}
|
||||
return nil
|
||||
},
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
listPolicies.Target = args[0]
|
||||
|
||||
if err := clihandler.CliList(&listPolicies); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "error: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
// cobra.OnInitialize(initConfig)
|
||||
|
||||
rootCmd.AddCommand(listCmd)
|
||||
listCmd.PersistentFlags().StringVarP(&listPolicies.Account, "account", "", "", "Armo portal account ID. Default will load account ID from configMap or config file")
|
||||
listCmd.PersistentFlags().BoolVarP(&listPolicies.ListIDs, "id", "", false, "List control ID's instead of controls names")
|
||||
}
|
||||
@@ -3,40 +3,16 @@ package cmd
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/armosec/k8s-interface/k8sinterface"
|
||||
"github.com/armosec/kubescape/cautils"
|
||||
"github.com/armosec/kubescape/clihandler"
|
||||
"github.com/armosec/kubescape/clihandler/cliinterfaces"
|
||||
"github.com/armosec/kubescape/resultshandling/reporter"
|
||||
"github.com/armosec/opa-utils/reporthandling"
|
||||
reporterv1 "github.com/armosec/kubescape/resultshandling/reporter/v1"
|
||||
"github.com/armosec/rbac-utils/rbacscanner"
|
||||
uuid "github.com/satori/go.uuid"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
type RBACObjects struct {
|
||||
scanner *rbacscanner.RbacScannerFromK8sAPI
|
||||
}
|
||||
|
||||
func NewRBACObjects(scanner *rbacscanner.RbacScannerFromK8sAPI) *RBACObjects {
|
||||
return &RBACObjects{scanner: scanner}
|
||||
}
|
||||
|
||||
func (rbacObjects *RBACObjects) SetResourcesReport() (*reporthandling.PostureReport, error) {
|
||||
resources, err := rbacObjects.scanner.ListResources()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &reporthandling.PostureReport{
|
||||
ReportID: uuid.NewV4().String(),
|
||||
ReportGenerationTime: time.Now().UTC(),
|
||||
CustomerGUID: rbacObjects.scanner.CustomerGUID,
|
||||
ClusterName: rbacObjects.scanner.ClusterName,
|
||||
RBACObjects: *resources,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// rabcCmd represents the RBAC command
|
||||
var rabcCmd = &cobra.Command{
|
||||
Use: "rbac \nExample:\n$ kubescape submit rbac",
|
||||
@@ -52,14 +28,11 @@ var rabcCmd = &cobra.Command{
|
||||
return err
|
||||
}
|
||||
|
||||
clusterName := clusterConfig.GetClusterName()
|
||||
customerGUID := clusterConfig.GetCustomerGUID()
|
||||
|
||||
// list RBAC
|
||||
rbacObjects := NewRBACObjects(rbacscanner.NewRbacScannerFromK8sAPI(k8s, customerGUID, clusterName))
|
||||
rbacObjects := cautils.NewRBACObjects(rbacscanner.NewRbacScannerFromK8sAPI(k8s, clusterConfig.GetCustomerGUID(), clusterConfig.GetClusterName()))
|
||||
|
||||
// submit resources
|
||||
r := reporter.NewReportEventReceiver(customerGUID, clusterName)
|
||||
r := reporterv1.NewReportEventReceiver(clusterConfig.GetConfigObj())
|
||||
|
||||
submitInterfaces := cliinterfaces.SubmitInterfaces{
|
||||
ClusterConfig: clusterConfig,
|
||||
|
||||
@@ -7,9 +7,10 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/armosec/k8s-interface/k8sinterface"
|
||||
"github.com/armosec/k8s-interface/workloadinterface"
|
||||
"github.com/armosec/kubescape/clihandler"
|
||||
"github.com/armosec/kubescape/clihandler/cliinterfaces"
|
||||
"github.com/armosec/kubescape/resultshandling/reporter"
|
||||
reporterv1 "github.com/armosec/kubescape/resultshandling/reporter/v1"
|
||||
"github.com/armosec/opa-utils/reporthandling"
|
||||
uuid "github.com/satori/go.uuid"
|
||||
"github.com/spf13/cobra"
|
||||
@@ -45,6 +46,10 @@ func (resultsObject *ResultsObject) SetResourcesReport() (*reporthandling.Postur
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (resultsObject *ResultsObject) ListAllResources() (map[string]workloadinterface.IMetadata, error) {
|
||||
return map[string]workloadinterface.IMetadata{}, nil
|
||||
}
|
||||
|
||||
var resultsCmd = &cobra.Command{
|
||||
Use: "results <json file>\nExample:\n$ kubescape submit results path/to/results.json",
|
||||
Short: "Submit a pre scanned results file. The file must be in json format",
|
||||
@@ -62,13 +67,10 @@ var resultsCmd = &cobra.Command{
|
||||
return err
|
||||
}
|
||||
|
||||
clusterName := clusterConfig.GetClusterName()
|
||||
customerGUID := clusterConfig.GetCustomerGUID()
|
||||
|
||||
resultsObjects := NewResultsObject(customerGUID, clusterName, args[0])
|
||||
resultsObjects := NewResultsObject(clusterConfig.GetCustomerGUID(), clusterConfig.GetClusterName(), args[0])
|
||||
|
||||
// submit resources
|
||||
r := reporter.NewReportEventReceiver(customerGUID, clusterName)
|
||||
r := reporterv1.NewReportEventReceiver(clusterConfig.GetConfigObj())
|
||||
|
||||
submitInterfaces := cliinterfaces.SubmitInterfaces{
|
||||
ClusterConfig: clusterConfig,
|
||||
|
||||
@@ -35,14 +35,9 @@ func init() {
|
||||
flag.CommandLine.StringVar(&armoBEURLs, "environment", "", envFlagUsage)
|
||||
rootCmd.PersistentFlags().StringVar(&armoBEURLs, "environment", "", envFlagUsage)
|
||||
rootCmd.PersistentFlags().MarkHidden("environment")
|
||||
cobra.OnInitialize(initConfig)
|
||||
|
||||
}
|
||||
|
||||
// initConfig reads in config file and ENV variables if set.
|
||||
func initConfig() {
|
||||
}
|
||||
|
||||
func InitArmoBEConnector() {
|
||||
urlSlices := strings.Split(armoBEURLs, ",")
|
||||
if len(urlSlices) > 3 {
|
||||
|
||||
@@ -4,8 +4,8 @@ import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/armosec/k8s-interface/k8sinterface"
|
||||
"github.com/armosec/kubescape/cautils"
|
||||
"github.com/armosec/kubescape/cautils/getter"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
@@ -27,25 +27,37 @@ var scanCmd = &cobra.Command{
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
if len(args) == 0 {
|
||||
scanInfo.ScanAll = true
|
||||
frameworks := getter.NativeFrameworks
|
||||
frameworkArgs := []string{strings.Join(frameworks, ",")}
|
||||
frameworkCmd.RunE(cmd, frameworkArgs)
|
||||
// frameworks := getter.NativeFrameworks
|
||||
// frameworkArgs := []string{strings.Join(frameworks, ",")}
|
||||
frameworkCmd.RunE(cmd, []string{"all"})
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
rootCmd.AddCommand(scanCmd)
|
||||
scanCmd.PersistentFlags().BoolVarP(&scanInfo.Submit, "submit", "", false, "Send the scan results to Armo management portal where you can see the results in a user-friendly UI, choose your preferred compliance framework, check risk results history and trends, manage exceptions, get remediation recommendations and much more. By default the results are not submitted")
|
||||
scanCmd.PersistentFlags().BoolVarP(&scanInfo.Local, "keep-local", "", false, "If you do not want your Kubescape results reported to Armo backend. Use this flag if you ran with the '--submit' flag in the past and you do not want to submit your current scan results")
|
||||
scanCmd.PersistentFlags().StringVarP(&scanInfo.ExcludedNamespaces, "exclude-namespaces", "e", "", "Namespaces to exclude from scanning. Recommended: kube-system,kube-public")
|
||||
scanCmd.PersistentFlags().StringVar(&scanInfo.IncludeNamespaces, "include-namespaces", "", "scan specific namespaces. e.g: --include-namespaces ns-a,ns-b")
|
||||
scanCmd.PersistentFlags().StringVarP(&scanInfo.Format, "format", "f", "pretty-printer", `Output format. Supported formats: "pretty-printer"/"json"/"junit"/"prometheus"`)
|
||||
scanCmd.PersistentFlags().StringVarP(&scanInfo.Output, "output", "o", "", "Output file. Print output to file and not stdout")
|
||||
scanCmd.PersistentFlags().BoolVarP(&scanInfo.Silent, "silent", "s", false, "Silent progress messages")
|
||||
scanCmd.PersistentFlags().Uint16VarP(&scanInfo.FailThreshold, "fail-threshold", "t", 0, "Failure threshold is the percent below which the command fails and returns exit code 1")
|
||||
scanCmd.PersistentFlags().StringSliceVar(&scanInfo.UseFrom, "use-from", nil, "Load local policy object from specified path. If not used will download latest")
|
||||
scanCmd.PersistentFlags().BoolVar(&scanInfo.UseDefault, "use-default", false, "Load local policy object from default path. If not used will download latest")
|
||||
scanCmd.PersistentFlags().StringVar(&scanInfo.UseExceptions, "exceptions", "", "Path to an exceptions obj. If not set will download exceptions from ARMO management portal")
|
||||
scanCmd.PersistentFlags().StringVar(&scanInfo.ControlsInputs, "controls-config", "", "Path to an controls-config obj. If not set will download controls-config from ARMO management portal")
|
||||
func frameworkInitConfig() {
|
||||
k8sinterface.SetClusterContextName(scanInfo.KubeContext)
|
||||
}
|
||||
|
||||
func init() {
|
||||
cobra.OnInitialize(frameworkInitConfig)
|
||||
|
||||
rootCmd.AddCommand(scanCmd)
|
||||
rootCmd.PersistentFlags().StringVarP(&scanInfo.KubeContext, "--kube-context", "", "", "Kube context. Default will use the current-context")
|
||||
scanCmd.PersistentFlags().StringVar(&scanInfo.ControlsInputs, "controls-config", "", "Path to an controls-config obj. If not set will download controls-config from ARMO management portal")
|
||||
scanCmd.PersistentFlags().StringVar(&scanInfo.UseExceptions, "exceptions", "", "Path to an exceptions obj. If not set will download exceptions from ARMO management portal")
|
||||
scanCmd.PersistentFlags().StringVarP(&scanInfo.ExcludedNamespaces, "exclude-namespaces", "e", "", "Namespaces to exclude from scanning. Recommended: kube-system,kube-public")
|
||||
scanCmd.PersistentFlags().Uint16VarP(&scanInfo.FailThreshold, "fail-threshold", "t", 100, "Failure threshold is the percent above which the command fails and returns exit code 1")
|
||||
scanCmd.PersistentFlags().StringVarP(&scanInfo.Format, "format", "f", "pretty-printer", `Output format. Supported formats: "pretty-printer"/"json"/"junit"/"prometheus"`)
|
||||
scanCmd.PersistentFlags().StringVar(&scanInfo.IncludeNamespaces, "include-namespaces", "", "scan specific namespaces. e.g: --include-namespaces ns-a,ns-b")
|
||||
scanCmd.PersistentFlags().BoolVarP(&scanInfo.Local, "keep-local", "", false, "If you do not want your Kubescape results reported to Armo backend. Use this flag if you ran with the '--submit' flag in the past and you do not want to submit your current scan results")
|
||||
scanCmd.PersistentFlags().StringVarP(&scanInfo.Output, "output", "o", "", "Output file. Print output to file and not stdout")
|
||||
scanCmd.PersistentFlags().BoolVar(&scanInfo.VerboseMode, "verbose", false, "Display all of the input resources and not only failed resources")
|
||||
scanCmd.PersistentFlags().BoolVar(&scanInfo.UseDefault, "use-default", false, "Load local policy object from default path. If not used will download latest")
|
||||
scanCmd.PersistentFlags().StringSliceVar(&scanInfo.UseFrom, "use-from", nil, "Load local policy object from specified path. If not used will download latest")
|
||||
scanCmd.PersistentFlags().BoolVarP(&scanInfo.Silent, "silent", "s", false, "Silent progress messages")
|
||||
scanCmd.PersistentFlags().BoolVarP(&scanInfo.Submit, "submit", "", false, "Send the scan results to Armo management portal where you can see the results in a user-friendly UI, choose your preferred compliance framework, check risk results history and trends, manage exceptions, get remediation recommendations and much more. By default the results are not submitted")
|
||||
|
||||
hostF := scanCmd.PersistentFlags().VarPF(&scanInfo.HostSensor, "enable-host-scan", "", "Deploy ARMO K8s host-sensor daemonset in the scanned cluster. Deleting it right after we collecting the data. Required to collect valueable data from cluster nodes for certain controls")
|
||||
hostF.NoOptDefVal = "true"
|
||||
hostF.DefValue = "false, for no TTY in stdin"
|
||||
}
|
||||
|
||||
@@ -20,8 +20,12 @@ func init() {
|
||||
}
|
||||
|
||||
func getSubmittedClusterConfig(k8s *k8sinterface.KubernetesApi) (*cautils.ClusterConfig, error) {
|
||||
clusterConfig := cautils.NewClusterConfig(k8s, getter.GetArmoAPIConnector())
|
||||
clusterConfig.LoadConfig()
|
||||
err := clusterConfig.SetConfig(scanInfo.Account)
|
||||
return clusterConfig, err
|
||||
clusterConfig := cautils.NewClusterConfig(k8s, getter.GetArmoAPIConnector(), scanInfo.Account, scanInfo.KubeContext) // TODO - support none cluster env submit
|
||||
if clusterConfig.GetCustomerGUID() != "" {
|
||||
if err := clusterConfig.SetTenant(); err != nil {
|
||||
return clusterConfig, err
|
||||
}
|
||||
}
|
||||
|
||||
return clusterConfig, nil
|
||||
}
|
||||
|
||||
@@ -13,7 +13,7 @@ var versionCmd = &cobra.Command{
|
||||
Long: ``,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
v := cautils.NewIVersionCheckHandler()
|
||||
v.CheckLatestVersion(cautils.NewVersionCheckRequest(cautils.BuildNumber, "", "", ""))
|
||||
v.CheckLatestVersion(cautils.NewVersionCheckRequest(cautils.BuildNumber, "", "", "version"))
|
||||
fmt.Println("Your current version is: " + cautils.BuildNumber)
|
||||
return nil
|
||||
},
|
||||
|
||||
@@ -2,110 +2,126 @@ package clihandler
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/fs"
|
||||
"os"
|
||||
|
||||
"github.com/armosec/armoapi-go/armotypes"
|
||||
"github.com/armosec/k8s-interface/k8sinterface"
|
||||
"github.com/armosec/kubescape/resultshandling/printer"
|
||||
printerv1 "github.com/armosec/kubescape/resultshandling/printer/v1"
|
||||
|
||||
// printerv2 "github.com/armosec/kubescape/resultshandling/printer/v2"
|
||||
|
||||
"github.com/armosec/armoapi-go/armotypes"
|
||||
"github.com/armosec/kubescape/cautils"
|
||||
"github.com/armosec/kubescape/cautils/getter"
|
||||
"github.com/armosec/kubescape/clihandler/cliinterfaces"
|
||||
"github.com/armosec/kubescape/hostsensorutils"
|
||||
"github.com/armosec/kubescape/opaprocessor"
|
||||
"github.com/armosec/kubescape/policyhandler"
|
||||
"github.com/armosec/kubescape/resourcehandler"
|
||||
"github.com/armosec/kubescape/resultshandling"
|
||||
"github.com/armosec/kubescape/resultshandling/printer"
|
||||
"github.com/armosec/kubescape/resultshandling/reporter"
|
||||
"github.com/armosec/opa-utils/reporthandling"
|
||||
"github.com/golang/glog"
|
||||
"github.com/mattn/go-isatty"
|
||||
)
|
||||
|
||||
type componentInterfaces struct {
|
||||
clusterConfig cautils.IClusterConfig
|
||||
resourceHandler resourcehandler.IResourceHandler
|
||||
report reporter.IReport
|
||||
printerHandler printer.IPrinter
|
||||
tenantConfig cautils.ITenantConfig
|
||||
resourceHandler resourcehandler.IResourceHandler
|
||||
report reporter.IReport
|
||||
printerHandler printer.IPrinter
|
||||
hostSensorHandler hostsensorutils.IHostSensor
|
||||
}
|
||||
|
||||
func getInterfaces(scanInfo *cautils.ScanInfo) componentInterfaces {
|
||||
var resourceHandler resourcehandler.IResourceHandler
|
||||
var clusterConfig cautils.IClusterConfig
|
||||
var reportHandler reporter.IReport
|
||||
var scanningTarget string
|
||||
|
||||
if !scanInfo.ScanRunningCluster() {
|
||||
k8sinterface.ConnectedToCluster = false
|
||||
clusterConfig = cautils.NewEmptyConfig()
|
||||
|
||||
// load fom file
|
||||
resourceHandler = resourcehandler.NewFileResourceHandler(scanInfo.InputPatterns)
|
||||
|
||||
// set mock report (do not send report)
|
||||
reportHandler = reporter.NewReportMock()
|
||||
scanningTarget = "yaml"
|
||||
} else {
|
||||
k8s := k8sinterface.NewKubernetesApi()
|
||||
resourceHandler = resourcehandler.NewK8sResourceHandler(k8s, getFieldSelector(scanInfo))
|
||||
clusterConfig = cautils.ClusterConfigSetup(scanInfo, k8s, getter.GetArmoAPIConnector())
|
||||
|
||||
// setup reporter
|
||||
reportHandler = getReporter(scanInfo)
|
||||
scanningTarget = "cluster"
|
||||
var k8s *k8sinterface.KubernetesApi
|
||||
if scanInfo.GetScanningEnvironment() == cautils.ScanCluster {
|
||||
k8s = getKubernetesApi()
|
||||
if k8s == nil {
|
||||
fmt.Println("Failed connecting to Kubernetes cluster")
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
tenantConfig := getTenantConfig(scanInfo.Account, scanInfo.KubeContext, k8s)
|
||||
|
||||
// Set submit behavior AFTER loading tenant config
|
||||
setSubmitBehavior(scanInfo, tenantConfig)
|
||||
|
||||
hostSensorHandler := getHostSensorHandler(scanInfo, k8s)
|
||||
if err := hostSensorHandler.Init(); err != nil {
|
||||
errMsg := "failed to init host sensor"
|
||||
if scanInfo.VerboseMode {
|
||||
errMsg = fmt.Sprintf("%s: %v", errMsg, err)
|
||||
}
|
||||
cautils.ErrorDisplay(errMsg)
|
||||
hostSensorHandler = &hostsensorutils.HostSensorHandlerMock{}
|
||||
}
|
||||
// excluding hostsensor namespace
|
||||
if len(scanInfo.IncludeNamespaces) == 0 && hostSensorHandler.GetNamespace() != "" {
|
||||
scanInfo.ExcludedNamespaces = fmt.Sprintf("%s,%s", scanInfo.ExcludedNamespaces, hostSensorHandler.GetNamespace())
|
||||
}
|
||||
|
||||
resourceHandler := getResourceHandler(scanInfo, tenantConfig, k8s, hostSensorHandler)
|
||||
|
||||
// reporting behavior - setup reporter
|
||||
reportHandler := getReporter(tenantConfig, scanInfo.Submit)
|
||||
|
||||
v := cautils.NewIVersionCheckHandler()
|
||||
v.CheckLatestVersion(cautils.NewVersionCheckRequest(cautils.BuildNumber, policyIdentifierNames(scanInfo.PolicyIdentifier), "", scanningTarget))
|
||||
v.CheckLatestVersion(cautils.NewVersionCheckRequest(cautils.BuildNumber, policyIdentifierNames(scanInfo.PolicyIdentifier), "", scanInfo.GetScanningEnvironment()))
|
||||
|
||||
// setup printer
|
||||
printerHandler := printer.GetPrinter(scanInfo.Format)
|
||||
printerHandler := printerv1.GetPrinter(scanInfo.Format, scanInfo.VerboseMode)
|
||||
// printerHandler = printerv2.GetPrinter(scanInfo.Format, scanInfo.VerboseMode)
|
||||
printerHandler.SetWriter(scanInfo.Output)
|
||||
|
||||
return componentInterfaces{
|
||||
clusterConfig: clusterConfig,
|
||||
resourceHandler: resourceHandler,
|
||||
report: reportHandler,
|
||||
printerHandler: printerHandler,
|
||||
}
|
||||
}
|
||||
func setPolicyGetter(scanInfo *cautils.ScanInfo, customerGUID string) {
|
||||
if len(scanInfo.UseFrom) > 0 {
|
||||
//load from file
|
||||
scanInfo.PolicyGetter = getter.NewLoadPolicy(scanInfo.UseFrom)
|
||||
} else {
|
||||
if customerGUID == "" || !scanInfo.FrameworkScan {
|
||||
scanInfo.PolicyGetter = getter.NewDownloadReleasedPolicy()
|
||||
} else {
|
||||
g := getter.GetArmoAPIConnector()
|
||||
g.SetCustomerGUID(customerGUID)
|
||||
scanInfo.PolicyGetter = g
|
||||
if scanInfo.ScanAll {
|
||||
frameworks, err := g.ListCustomFrameworks(customerGUID)
|
||||
if err != nil {
|
||||
glog.Error("failed to get custom frameworks") // handle error
|
||||
return
|
||||
}
|
||||
scanInfo.SetPolicyIdentifiers(frameworks, reporthandling.KindFramework)
|
||||
}
|
||||
}
|
||||
tenantConfig: tenantConfig,
|
||||
resourceHandler: resourceHandler,
|
||||
report: reportHandler,
|
||||
printerHandler: printerHandler,
|
||||
hostSensorHandler: hostSensorHandler,
|
||||
}
|
||||
}
|
||||
|
||||
func ScanCliSetup(scanInfo *cautils.ScanInfo) error {
|
||||
cautils.ScanStartDisplay()
|
||||
|
||||
interfaces := getInterfaces(scanInfo)
|
||||
|
||||
setPolicyGetter(scanInfo, interfaces.clusterConfig.GetCustomerGUID())
|
||||
// setPolicyGetter(scanInfo, interfaces.clusterConfig.GetCustomerGUID())
|
||||
|
||||
processNotification := make(chan *cautils.OPASessionObj)
|
||||
reportResults := make(chan *cautils.OPASessionObj)
|
||||
|
||||
if err := interfaces.clusterConfig.SetConfig(scanInfo.Account); err != nil {
|
||||
fmt.Println(err)
|
||||
cautils.ClusterName = interfaces.tenantConfig.GetClusterName() // TODO - Deprecated
|
||||
cautils.CustomerGUID = interfaces.tenantConfig.GetCustomerGUID() // TODO - Deprecated
|
||||
interfaces.report.SetClusterName(interfaces.tenantConfig.GetClusterName())
|
||||
interfaces.report.SetCustomerGUID(interfaces.tenantConfig.GetCustomerGUID())
|
||||
|
||||
downloadReleasedPolicy := getter.NewDownloadReleasedPolicy() // download config inputs from github release
|
||||
|
||||
// set policy getter only after setting the customerGUID
|
||||
scanInfo.Getters.PolicyGetter = getPolicyGetter(scanInfo.UseFrom, interfaces.tenantConfig.GetCustomerGUID(), scanInfo.FrameworkScan, downloadReleasedPolicy)
|
||||
scanInfo.Getters.ControlsInputsGetter = getConfigInputsGetter(scanInfo.ControlsInputs, interfaces.tenantConfig.GetCustomerGUID(), downloadReleasedPolicy)
|
||||
scanInfo.Getters.ExceptionsGetter = getExceptionsGetter(scanInfo.UseExceptions)
|
||||
|
||||
// TODO - list supported frameworks/controls
|
||||
if scanInfo.ScanAll {
|
||||
scanInfo.SetPolicyIdentifiers(listFrameworksNames(scanInfo.Getters.PolicyGetter), reporthandling.KindFramework)
|
||||
}
|
||||
|
||||
cautils.ClusterName = interfaces.clusterConfig.GetClusterName() // TODO - Deprecated
|
||||
cautils.CustomerGUID = interfaces.clusterConfig.GetCustomerGUID() // TODO - Deprecated
|
||||
interfaces.report.SetClusterName(interfaces.clusterConfig.GetClusterName())
|
||||
interfaces.report.SetCustomerGUID(interfaces.clusterConfig.GetCustomerGUID())
|
||||
//
|
||||
defer func() {
|
||||
if err := interfaces.hostSensorHandler.TearDown(); err != nil {
|
||||
errMsg := "failed to tear down host sensor"
|
||||
if scanInfo.VerboseMode {
|
||||
errMsg = fmt.Sprintf("%s: %v", errMsg, err)
|
||||
}
|
||||
cautils.ErrorDisplay(errMsg)
|
||||
}
|
||||
}()
|
||||
|
||||
// cli handler setup
|
||||
go func() {
|
||||
// policy handler setup
|
||||
@@ -127,18 +143,16 @@ func ScanCliSetup(scanInfo *cautils.ScanInfo) error {
|
||||
score := resultsHandling.HandleResults(scanInfo)
|
||||
|
||||
// print report url
|
||||
interfaces.clusterConfig.GenerateURL()
|
||||
interfaces.report.DisplayReportURL()
|
||||
|
||||
adjustedFailThreshold := float32(scanInfo.FailThreshold) / 100
|
||||
if score < adjustedFailThreshold {
|
||||
return fmt.Errorf("Scan score is below threshold")
|
||||
if score > float32(scanInfo.FailThreshold) {
|
||||
return fmt.Errorf("scan risk-score %.2f is above permitted threshold %d", score, scanInfo.FailThreshold)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func Scan(policyHandler *policyhandler.PolicyHandler, scanInfo *cautils.ScanInfo) error {
|
||||
cautils.ScanStartDisplay()
|
||||
policyNotification := &reporthandling.PolicyNotification{
|
||||
NotificationType: reporthandling.TypeExecPostureScan,
|
||||
Rules: scanInfo.PolicyIdentifier,
|
||||
@@ -163,13 +177,38 @@ func Submit(submitInterfaces cliinterfaces.SubmitInterfaces) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
allresources, err := submitInterfaces.SubmitObjects.ListAllResources()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// report
|
||||
if err := submitInterfaces.Reporter.ActionSendReport(&cautils.OPASessionObj{PostureReport: postureReport}); err != nil {
|
||||
if err := submitInterfaces.Reporter.ActionSendReport(&cautils.OPASessionObj{PostureReport: postureReport, AllResources: allresources}); err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Printf("\nData has been submitted successfully")
|
||||
submitInterfaces.ClusterConfig.GenerateURL()
|
||||
submitInterfaces.Reporter.DisplayReportURL()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func askUserForHostSensor() bool {
|
||||
return false
|
||||
|
||||
if !isatty.IsTerminal(os.Stdin.Fd()) {
|
||||
return false
|
||||
}
|
||||
if ssss, err := os.Stdin.Stat(); err == nil {
|
||||
// fmt.Printf("Found stdin type: %s\n", ssss.Mode().Type())
|
||||
if ssss.Mode().Type()&(fs.ModeDevice|fs.ModeCharDevice) > 0 { //has TTY
|
||||
fmt.Printf("Would you like to scan K8s nodes? [y/N]. This is required to collect valuable data for certain controls\n")
|
||||
fmt.Printf("Use --enable-host-scan flag to suppress this message\n")
|
||||
var b []byte = make([]byte, 1)
|
||||
if n, err := os.Stdin.Read(b); err == nil {
|
||||
if n > 0 && len(b) > 0 && (b[0] == 'y' || b[0] == 'Y') {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -1,23 +1,89 @@
|
||||
package clihandler
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/armosec/k8s-interface/k8sinterface"
|
||||
"github.com/armosec/kubescape/cautils"
|
||||
"github.com/armosec/kubescape/cautils/getter"
|
||||
"github.com/armosec/kubescape/hostsensorutils"
|
||||
"github.com/armosec/kubescape/resourcehandler"
|
||||
"github.com/armosec/kubescape/resultshandling/reporter"
|
||||
reporterv1 "github.com/armosec/kubescape/resultshandling/reporter/v1"
|
||||
reporterv2 "github.com/armosec/kubescape/resultshandling/reporter/v2"
|
||||
|
||||
"github.com/armosec/opa-utils/reporthandling"
|
||||
"github.com/armosec/rbac-utils/rbacscanner"
|
||||
)
|
||||
|
||||
func getReporter(scanInfo *cautils.ScanInfo) reporter.IReport {
|
||||
if !scanInfo.Submit {
|
||||
return reporter.NewReportMock()
|
||||
// getKubernetesApi
|
||||
func getKubernetesApi() *k8sinterface.KubernetesApi {
|
||||
if !k8sinterface.IsConnectedToCluster() {
|
||||
return nil
|
||||
}
|
||||
if !scanInfo.FrameworkScan {
|
||||
return reporter.NewReportMock()
|
||||
return k8sinterface.NewKubernetesApi()
|
||||
}
|
||||
func getTenantConfig(Account, clusterName string, k8s *k8sinterface.KubernetesApi) cautils.ITenantConfig {
|
||||
if !k8sinterface.IsConnectedToCluster() || k8s == nil {
|
||||
return cautils.NewLocalConfig(getter.GetArmoAPIConnector(), Account, clusterName)
|
||||
}
|
||||
|
||||
return reporter.NewReportEventReceiver("", "")
|
||||
return cautils.NewClusterConfig(k8s, getter.GetArmoAPIConnector(), Account, clusterName)
|
||||
}
|
||||
|
||||
func getExceptionsGetter(useExceptions string) getter.IExceptionsGetter {
|
||||
if useExceptions != "" {
|
||||
// load exceptions from file
|
||||
return getter.NewLoadPolicy([]string{useExceptions})
|
||||
} else {
|
||||
return getter.GetArmoAPIConnector()
|
||||
}
|
||||
}
|
||||
|
||||
func getRBACHandler(tenantConfig cautils.ITenantConfig, k8s *k8sinterface.KubernetesApi, submit bool) *cautils.RBACObjects {
|
||||
if submit {
|
||||
return cautils.NewRBACObjects(rbacscanner.NewRbacScannerFromK8sAPI(k8s, tenantConfig.GetCustomerGUID(), tenantConfig.GetClusterName()))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func getReporter(tenantConfig cautils.ITenantConfig, submit bool) reporter.IReport {
|
||||
if submit {
|
||||
// return reporterv1.NewReportEventReceiver(tenantConfig.GetConfigObj())
|
||||
return reporterv2.NewReportEventReceiver(tenantConfig.GetConfigObj())
|
||||
}
|
||||
return reporterv1.NewReportMock()
|
||||
}
|
||||
|
||||
func getResourceHandler(scanInfo *cautils.ScanInfo, tenantConfig cautils.ITenantConfig, k8s *k8sinterface.KubernetesApi, hostSensorHandler hostsensorutils.IHostSensor) resourcehandler.IResourceHandler {
|
||||
if len(scanInfo.InputPatterns) > 0 || k8s == nil {
|
||||
return resourcehandler.NewFileResourceHandler(scanInfo.InputPatterns)
|
||||
}
|
||||
rbacObjects := getRBACHandler(tenantConfig, k8s, scanInfo.Submit)
|
||||
return resourcehandler.NewK8sResourceHandler(k8s, getFieldSelector(scanInfo), hostSensorHandler, rbacObjects)
|
||||
}
|
||||
|
||||
func getHostSensorHandler(scanInfo *cautils.ScanInfo, k8s *k8sinterface.KubernetesApi) hostsensorutils.IHostSensor {
|
||||
if !k8sinterface.IsConnectedToCluster() || k8s == nil {
|
||||
return &hostsensorutils.HostSensorHandlerMock{}
|
||||
}
|
||||
|
||||
hasHostSensorControls := true
|
||||
// we need to determined which controls needs host sensor
|
||||
if scanInfo.HostSensor.Get() == nil && hasHostSensorControls {
|
||||
scanInfo.HostSensor.SetBool(askUserForHostSensor())
|
||||
cautils.WarningDisplay(os.Stderr, "Warning: Kubernetes cluster nodes scanning is disabled. This is required to collect valuable data for certain controls. You can enable it using the --enable-host-scan flag\n")
|
||||
}
|
||||
if hostSensorVal := scanInfo.HostSensor.Get(); hostSensorVal != nil && *hostSensorVal {
|
||||
hostSensorHandler, err := hostsensorutils.NewHostSensorHandler(k8s)
|
||||
if err != nil {
|
||||
cautils.WarningDisplay(os.Stderr, fmt.Sprintf("Warning: failed to create host sensor: %v\n", err.Error()))
|
||||
return &hostsensorutils.HostSensorHandlerMock{}
|
||||
}
|
||||
return hostSensorHandler
|
||||
}
|
||||
return &hostsensorutils.HostSensorHandlerMock{}
|
||||
}
|
||||
func getFieldSelector(scanInfo *cautils.ScanInfo) resourcehandler.IFieldSelector {
|
||||
if scanInfo.IncludeNamespaces != "" {
|
||||
return resourcehandler.NewIncludeSelector(scanInfo.IncludeNamespaces)
|
||||
@@ -37,5 +103,119 @@ func policyIdentifierNames(pi []reporthandling.PolicyIdentifier) string {
|
||||
policiesNames += ","
|
||||
}
|
||||
}
|
||||
if policiesNames == "" {
|
||||
policiesNames = "all"
|
||||
}
|
||||
return policiesNames
|
||||
}
|
||||
|
||||
// setSubmitBehavior - Setup the desired cluster behavior regarding submittion to the Armo BE
|
||||
func setSubmitBehavior(scanInfo *cautils.ScanInfo, tenantConfig cautils.ITenantConfig) {
|
||||
|
||||
/*
|
||||
|
||||
If "First run (local config not found)" -
|
||||
Default/keep-local - Do not send report
|
||||
Submit - Create tenant & Submit report
|
||||
|
||||
If "Submitted" -
|
||||
keep-local - Do not send report
|
||||
Default/Submit - Submit report
|
||||
|
||||
*/
|
||||
|
||||
// do not submit control scanning
|
||||
if !scanInfo.FrameworkScan {
|
||||
scanInfo.Submit = false
|
||||
return
|
||||
}
|
||||
|
||||
if tenantConfig.IsConfigFound() { // config found in cache (submitted)
|
||||
if !scanInfo.Local {
|
||||
// Submit report
|
||||
scanInfo.Submit = true
|
||||
}
|
||||
} else { // config not found in cache (not submitted)
|
||||
if scanInfo.Submit {
|
||||
// submit - Create tenant & Submit report
|
||||
if err := tenantConfig.SetTenant(); err != nil {
|
||||
fmt.Println(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// setPolicyGetter set the policy getter - local file/github release/ArmoAPI
|
||||
func getPolicyGetter(loadPoliciesFromFile []string, accountID string, frameworkScope bool, downloadReleasedPolicy *getter.DownloadReleasedPolicy) getter.IPolicyGetter {
|
||||
if len(loadPoliciesFromFile) > 0 {
|
||||
return getter.NewLoadPolicy(loadPoliciesFromFile)
|
||||
}
|
||||
if accountID != "" && frameworkScope {
|
||||
g := getter.GetArmoAPIConnector() // download policy from ARMO backend
|
||||
g.SetCustomerGUID(accountID)
|
||||
return g
|
||||
}
|
||||
if downloadReleasedPolicy == nil {
|
||||
downloadReleasedPolicy = getter.NewDownloadReleasedPolicy()
|
||||
}
|
||||
return getDownloadReleasedPolicy(downloadReleasedPolicy)
|
||||
|
||||
}
|
||||
|
||||
// func setGetArmoAPIConnector(scanInfo *cautils.ScanInfo, customerGUID string) {
|
||||
// g := getter.GetArmoAPIConnector() // download policy from ARMO backend
|
||||
// g.SetCustomerGUID(customerGUID)
|
||||
// scanInfo.PolicyGetter = g
|
||||
// if scanInfo.ScanAll {
|
||||
// frameworks, err := g.ListCustomFrameworks(customerGUID)
|
||||
// if err != nil {
|
||||
// glog.Error("failed to get custom frameworks") // handle error
|
||||
// return
|
||||
// }
|
||||
// scanInfo.SetPolicyIdentifiers(frameworks, reporthandling.KindFramework)
|
||||
// }
|
||||
// }
|
||||
|
||||
// setConfigInputsGetter sets the config input getter - local file/github release/ArmoAPI
|
||||
func getConfigInputsGetter(ControlsInputs string, accountID string, downloadReleasedPolicy *getter.DownloadReleasedPolicy) getter.IControlsInputsGetter {
|
||||
if len(ControlsInputs) > 0 {
|
||||
return getter.NewLoadPolicy([]string{ControlsInputs})
|
||||
}
|
||||
if accountID != "" {
|
||||
g := getter.GetArmoAPIConnector() // download config from ARMO backend
|
||||
g.SetCustomerGUID(accountID)
|
||||
return g
|
||||
}
|
||||
if downloadReleasedPolicy == nil {
|
||||
downloadReleasedPolicy = getter.NewDownloadReleasedPolicy()
|
||||
}
|
||||
if err := downloadReleasedPolicy.SetRegoObjects(); err != nil { // if failed to pull config inputs, fallback to BE
|
||||
cautils.WarningDisplay(os.Stderr, "Warning: failed to get config inputs from github release, this may affect the scanning results\n")
|
||||
}
|
||||
return downloadReleasedPolicy
|
||||
}
|
||||
|
||||
func getDownloadReleasedPolicy(downloadReleasedPolicy *getter.DownloadReleasedPolicy) getter.IPolicyGetter {
|
||||
if err := downloadReleasedPolicy.SetRegoObjects(); err != nil { // if failed to pull policy, fallback to cache
|
||||
cautils.WarningDisplay(os.Stderr, "Warning: failed to get policies from github release, loading policies from cache\n")
|
||||
return getter.NewLoadPolicy(getDefaultFrameworksPaths())
|
||||
} else {
|
||||
return downloadReleasedPolicy
|
||||
}
|
||||
}
|
||||
|
||||
func getDefaultFrameworksPaths() []string {
|
||||
fwPaths := []string{}
|
||||
for i := range getter.NativeFrameworks {
|
||||
fwPaths = append(fwPaths, getter.GetDefaultPath(getter.NativeFrameworks[i]))
|
||||
}
|
||||
return fwPaths
|
||||
}
|
||||
|
||||
func listFrameworksNames(policyGetter getter.IPolicyGetter) []string {
|
||||
fw, err := policyGetter.ListFrameworks()
|
||||
if err != nil {
|
||||
fw = getDefaultFrameworksPaths()
|
||||
}
|
||||
return fw
|
||||
}
|
||||
|
||||
Binary file not shown.
|
Before Width: | Height: | Size: 44 KiB After Width: | Height: | Size: 53 KiB |
176
docs/proposals/container-image-vulnerability-adaptor.md
Normal file
176
docs/proposals/container-image-vulnerability-adaptor.md
Normal file
@@ -0,0 +1,176 @@
|
||||
# Container image vulnerability adaptor interface proposal
|
||||
|
||||
## Rationale
|
||||
|
||||
source #287
|
||||
|
||||
### Big picture
|
||||
|
||||
* Kubescape team is planning to create controls which take into account image vulnerabilities, example: looking for public internet facing workloads with critical vulnerabilities. These are seriously effecting the security health of a cluster and therefore we think it is important to cover it. We think that most container registries are/will support image scanning like Harbor and therefore the ability to get information from them is important.
|
||||
* There are information in the image repository which is important for existing controls as well. They are incomplete without it, example see this issue: Non-root containers check is broken #19 . These are not necessarily image vulnerability related. Can be information in the image manifest (like the issue before), but it can be the image BOM related.
|
||||
|
||||
### Relation to this proposal
|
||||
|
||||
There are multiple changes and design decisions needs to be made before Kubescape will support the before outlined controls. However, a focal point the whole picutre is the ability to access vulnerabilty databases of container images. We anticiapte that most container image repositories will support image vulnerabilty scanning, some major players are already do. Since there is no a single API available which all of these data sources support it is important to create an adaption layer within Kubescape so different datasources can serve Kubescape's goals.
|
||||
|
||||
## High level design of Kubescape
|
||||
|
||||
### Layers
|
||||
|
||||
* Controls and Rules: that actual control logic implementation, the "tests" themselves. Implemented in rego
|
||||
* OPA engine: the [OPA](https://github.com/open-policy-agent/opa) rego interpreter
|
||||
* Rules processor: Kubescape component, it enumerates and runs the controls while also preparing the all the input data that the controls need for running
|
||||
* Data sources: set of different modules providing data to the Rules processor so it can run the controls with them. Examples: Kubernetes objects, cloud vendor API objects and adding in this proposal the vulnerability infomration
|
||||
* Cloud Image Vulnerability adaption interface: the subject of this proposal, it gives a common interface for different registry/vulnerabilty vendors to adapt to.
|
||||
* CIV adaptors: specific implementation of the CIV interface, example Harbor adaption
|
||||
```
|
||||
-----------------------
|
||||
| Controls/Rules (rego) |
|
||||
-----------------------
|
||||
|
|
||||
-----------------------
|
||||
| OPA engine |
|
||||
-----------------------
|
||||
|
|
||||
-----------------------
|
||||
| Rules processor |
|
||||
-----------------------
|
||||
|
|
||||
-----------------------
|
||||
| Data sources |
|
||||
-----------------------
|
||||
|
|
||||
=======================
|
||||
| CIV adaption interface| <- Adding this layer in this proposal
|
||||
=======================
|
||||
|
|
||||
-----------------------
|
||||
| Specific CIV adaptors | <- will be implemented based on this proposal
|
||||
-----------------------
|
||||
|
||||
|
||||
|
||||
```
|
||||
|
||||
## Functionalities to cover
|
||||
|
||||
The interface needs to cover the following functionalities:
|
||||
|
||||
* Authentication against the information source (abstracted login)
|
||||
* Triggering image scan (if applicable, the source might store vulnerabilities for images but cannot scan alone)
|
||||
* Reading image scan status (with last scan date and etc.)
|
||||
* Getting vulnerability information for a given image
|
||||
* Getting image information
|
||||
* Image manifests
|
||||
* Image BOMs (bill of material)
|
||||
|
||||
## Go API proposal
|
||||
|
||||
```
|
||||
|
||||
/*type ContainerImageRegistryCredentials struct {
|
||||
map[string]string
|
||||
Password string
|
||||
Tag string
|
||||
Hash string
|
||||
}*/
|
||||
|
||||
type ContainerImageIdentifier struct {
|
||||
Registry string
|
||||
Repository string
|
||||
Tag string
|
||||
Hash string
|
||||
}
|
||||
|
||||
type ContainerImageScanStatus struct {
|
||||
ImageID ContainerImageIdentifier
|
||||
IsScanAvailable bool
|
||||
IsBomAvailable bool
|
||||
LastScanDate time.Time
|
||||
}
|
||||
|
||||
type ContainerImageVulnerability struct {
|
||||
ImageID ContainerImageIdentifier
|
||||
// TBD
|
||||
}
|
||||
|
||||
type ContainerImageInformation struct {
|
||||
ImageID ContainerImageIdentifier
|
||||
Bom []string
|
||||
ImageManifest Manifest // will use here Docker package definition
|
||||
}
|
||||
|
||||
type IContainerImageVulnerabilityAdaptor interface {
|
||||
// Credentials are coming from user input (CLI or configuration file) and they are abstracted at string to string map level
|
||||
// so and example use would be like registry: "simpledockerregistry:80" and credentials like {"username":"joedoe","password":"abcd1234"}
|
||||
Login(registry string, credentials map[string]string) error
|
||||
|
||||
// For "help" purposes
|
||||
DescribeAdaptor() string
|
||||
|
||||
GetImagesScanStatus(imageIDs []ContainerImageIdentifier) ([]ContainerImageScanStatus, error)
|
||||
|
||||
GetImagesVulnerabilties(imageIDs []ContainerImageIdentifier) ([]ContainerImageVulnerability, error)
|
||||
|
||||
GetImagesInformation(imageIDs []ContainerImageIdentifier) ([]ContainerImageInformation, error)
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
|
||||
# Integration
|
||||
|
||||
# Input
|
||||
|
||||
The objects received from the interface will be converted to an Imetadata compatible objects as following
|
||||
|
||||
```
|
||||
{
|
||||
"apiVersion": "image.vulnscan.com/v1",
|
||||
"kind": "VulnScan",
|
||||
"metadata": {
|
||||
"name": "nginx:latest"
|
||||
},
|
||||
"data": {
|
||||
// returned by the adaptor API (structure like our backend gives for an image
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
# Output
|
||||
|
||||
The rego results will be a combination of the k8s artifact and the list of relevant CVEs for the control
|
||||
|
||||
```
|
||||
{
|
||||
"apiVersion": "result.vulnscan.com/v1",
|
||||
"kind": "Pod",
|
||||
"metadata": {
|
||||
"name": "nginx"
|
||||
},
|
||||
"relatedObjects": [
|
||||
{
|
||||
"apiVersion": "v1",
|
||||
"kind": "Pod",
|
||||
"metadata": {
|
||||
"name": "nginx"
|
||||
},
|
||||
"spec": {
|
||||
// podSpec
|
||||
},
|
||||
},
|
||||
{
|
||||
"apiVersion": "container.vulnscan.com/v1",
|
||||
"kind": "VulnScan",
|
||||
"metadata": {
|
||||
"name": "nginx:latest",
|
||||
},
|
||||
"data": {
|
||||
|
||||
// returned by the adaptor API (structure like our backend gives for an image
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
BIN
docs/summary.png
BIN
docs/summary.png
Binary file not shown.
|
Before Width: | Height: | Size: 62 KiB After Width: | Height: | Size: 60 KiB |
4026
examples/output_mocks/prometheus-verbose-flag.txt
Normal file
4026
examples/output_mocks/prometheus-verbose-flag.txt
Normal file
File diff suppressed because it is too large
Load Diff
1326
examples/output_mocks/prometheus.txt
Normal file
1326
examples/output_mocks/prometheus.txt
Normal file
File diff suppressed because it is too large
Load Diff
27
go.mod
27
go.mod
@@ -3,12 +3,12 @@ module github.com/armosec/kubescape
|
||||
go 1.17
|
||||
|
||||
require (
|
||||
github.com/armosec/armoapi-go v0.0.23
|
||||
github.com/armosec/k8s-interface v0.0.8
|
||||
github.com/armosec/opa-utils v0.0.42
|
||||
github.com/armosec/rbac-utils v0.0.1
|
||||
github.com/armosec/armoapi-go v0.0.41
|
||||
github.com/armosec/k8s-interface v0.0.54
|
||||
github.com/armosec/opa-utils v0.0.97
|
||||
github.com/armosec/rbac-utils v0.0.12
|
||||
github.com/armosec/utils-go v0.0.3
|
||||
github.com/briandowns/spinner v1.16.0
|
||||
github.com/briandowns/spinner v1.18.0
|
||||
github.com/enescakir/emoji v1.0.0
|
||||
github.com/fatih/color v1.13.0
|
||||
github.com/gofrs/uuid v4.1.0+incompatible
|
||||
@@ -18,10 +18,12 @@ require (
|
||||
github.com/open-policy-agent/opa v0.33.1
|
||||
github.com/satori/go.uuid v1.2.0
|
||||
github.com/spf13/cobra v1.2.1
|
||||
github.com/stretchr/testify v1.7.0
|
||||
gopkg.in/yaml.v2 v2.4.0
|
||||
k8s.io/api v0.22.2
|
||||
k8s.io/apimachinery v0.22.2
|
||||
k8s.io/client-go v0.22.2
|
||||
sigs.k8s.io/yaml v1.2.0
|
||||
)
|
||||
|
||||
require (
|
||||
@@ -33,7 +35,9 @@ require (
|
||||
github.com/Azure/go-autorest/logger v0.2.1 // indirect
|
||||
github.com/Azure/go-autorest/tracing v0.6.0 // indirect
|
||||
github.com/OneOfOne/xxhash v1.2.8 // indirect
|
||||
github.com/armosec/armo-interfaces v0.0.3 // indirect
|
||||
github.com/armosec/utils-k8s-go v0.0.1 // indirect
|
||||
github.com/aws/aws-sdk-go v1.41.11 // indirect
|
||||
github.com/coreos/go-oidc v2.2.1+incompatible // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/docker/docker v20.10.9+incompatible // indirect
|
||||
@@ -46,30 +50,35 @@ require (
|
||||
github.com/go-logr/logr v0.4.0 // indirect
|
||||
github.com/gobwas/glob v0.2.3 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
||||
github.com/golang/protobuf v1.5.2 // indirect
|
||||
github.com/google/go-cmp v0.5.5 // indirect
|
||||
github.com/google/gofuzz v1.1.0 // indirect
|
||||
github.com/googleapis/gax-go/v2 v2.0.5 // indirect
|
||||
github.com/googleapis/gnostic v0.5.5 // indirect
|
||||
github.com/imdario/mergo v0.3.12 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.0.0 // indirect
|
||||
github.com/jmespath/go-jmespath v0.4.0 // indirect
|
||||
github.com/json-iterator/go v1.1.11 // indirect
|
||||
github.com/mattn/go-colorable v0.1.9 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.9 // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.1 // indirect
|
||||
github.com/opencontainers/go-digest v1.0.0 // indirect
|
||||
github.com/opencontainers/image-spec v1.0.1 // indirect
|
||||
github.com/opencontainers/image-spec v1.0.2 // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/pquerna/cachecontrol v0.1.0 // indirect
|
||||
github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0 // indirect
|
||||
github.com/spf13/pflag v1.0.5 // indirect
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect
|
||||
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
|
||||
github.com/yashtewari/glob-intersection v0.0.0-20180916065949-5c77d914dd0b // indirect
|
||||
go.opencensus.io v0.23.0 // indirect
|
||||
go.uber.org/atomic v1.7.0 // indirect
|
||||
go.uber.org/multierr v1.6.0 // indirect
|
||||
go.uber.org/zap v1.19.1 // indirect
|
||||
golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83 // indirect
|
||||
golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97 // indirect
|
||||
golang.org/x/net v0.0.0-20210825183410-e898025ed96a // indirect
|
||||
golang.org/x/oauth2 v0.0.0-20211005180243-6b3c2da341f1 // indirect
|
||||
golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf // indirect
|
||||
@@ -77,7 +86,10 @@ require (
|
||||
golang.org/x/text v0.3.6 // indirect
|
||||
golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac // indirect
|
||||
gonum.org/v1/gonum v0.9.1 // indirect
|
||||
google.golang.org/api v0.44.0 // indirect
|
||||
google.golang.org/appengine v1.6.7 // indirect
|
||||
google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c // indirect
|
||||
google.golang.org/grpc v1.38.0 // indirect
|
||||
google.golang.org/protobuf v1.27.1 // indirect
|
||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||
gopkg.in/square/go-jose.v2 v2.6.0 // indirect
|
||||
@@ -86,5 +98,4 @@ require (
|
||||
k8s.io/utils v0.0.0-20210819203725-bdf08cb9a70a // indirect
|
||||
sigs.k8s.io/controller-runtime v0.10.2 // indirect
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.1.2 // indirect
|
||||
sigs.k8s.io/yaml v1.2.0 // indirect
|
||||
)
|
||||
|
||||
37
go.sum
37
go.sum
@@ -83,15 +83,23 @@ github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hC
|
||||
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
|
||||
github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=
|
||||
github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
|
||||
github.com/armosec/armo-interfaces v0.0.3 h1:kG4mJIPgWBJvQFDDy8JzdqX3ASbyl8t32IuJYqB31Pk=
|
||||
github.com/armosec/armo-interfaces v0.0.3/go.mod h1:7XYefhcBCFYoF5LflCZHWuUHu+JrSJbmzk0zoNv2WlU=
|
||||
github.com/armosec/armoapi-go v0.0.2/go.mod h1:vIK17yoKbJRQyZXWWLe3AqfqCRITxW8qmSkApyq5xFs=
|
||||
github.com/armosec/armoapi-go v0.0.23 h1:jqoLIWM5CR7DCD9fpFgN0ePqtHvOCoZv/XzCwsUluJU=
|
||||
github.com/armosec/armoapi-go v0.0.23/go.mod h1:iaVVGyc23QGGzAdv4n+szGQg3Rbpixn9yQTU3qWRpaw=
|
||||
github.com/armosec/k8s-interface v0.0.8 h1:Eo3Qen4yFXxzVem49FNeij2ckyzHSAJ0w6PZMaSEIm8=
|
||||
github.com/armosec/armoapi-go v0.0.41 h1:iMkaCsME+zhE6vnCOMaqfqc0cp7pste8QFHojeGKfGg=
|
||||
github.com/armosec/armoapi-go v0.0.41/go.mod h1:exk1O3rK6V+X8SSyxc06lwb0j9ILQuKAoIdz9hs6Ndw=
|
||||
github.com/armosec/k8s-interface v0.0.8/go.mod h1:xxS+V5QT3gVQTwZyAMMDrYLWGrfKOpiJ7Jfhfa0w9sM=
|
||||
github.com/armosec/opa-utils v0.0.42 h1:7YzQJNVBmM0+1nWOAiUgDt+mvlVEwApg80FjMh4oxXo=
|
||||
github.com/armosec/opa-utils v0.0.42/go.mod h1:OqewZoSqKD5udtQ4lGFixb8yyFNqLq9zqinlAL6KSjM=
|
||||
github.com/armosec/rbac-utils v0.0.1 h1:N2MI98F/0zbDjmRZ29CNElU1AXkFLk5csd/qAHOBdXY=
|
||||
github.com/armosec/k8s-interface v0.0.37/go.mod h1:vHxGWqD/uh6+GQb9Sqv7OGMs+Rvc2dsFVc0XtgRh1ZU=
|
||||
github.com/armosec/k8s-interface v0.0.50/go.mod h1:vHxGWqD/uh6+GQb9Sqv7OGMs+Rvc2dsFVc0XtgRh1ZU=
|
||||
github.com/armosec/k8s-interface v0.0.54 h1:1sQeoEZA5bgpXVibXhEiTSeLd3GKY5NkTOeewdgR0Bs=
|
||||
github.com/armosec/k8s-interface v0.0.54/go.mod h1:vHxGWqD/uh6+GQb9Sqv7OGMs+Rvc2dsFVc0XtgRh1ZU=
|
||||
github.com/armosec/opa-utils v0.0.64/go.mod h1:6tQP8UDq2EvEfSqh8vrUdr/9QVSCG4sJfju1SXQOn4c=
|
||||
github.com/armosec/opa-utils v0.0.97 h1:KPjRZdsAC9EObo17QxiW+s5KWmF6vNFu+VQSOgFv5uk=
|
||||
github.com/armosec/opa-utils v0.0.97/go.mod h1:BNTjeianyXlflJMz3bZM0GimBWqmzirUf1whWR6Os04=
|
||||
github.com/armosec/rbac-utils v0.0.1/go.mod h1:pQ8CBiij8kSKV7aeZm9FMvtZN28VgA7LZcYyTWimq40=
|
||||
github.com/armosec/rbac-utils v0.0.12 h1:uJpMGDyLAX129PrKHp6NPNB6lVRhE0OZIwV6ywzSDrs=
|
||||
github.com/armosec/rbac-utils v0.0.12/go.mod h1:Ex/IdGWhGv9HZq6Hs8N/ApzCKSIvpNe/ETqDfnuyah0=
|
||||
github.com/armosec/utils-go v0.0.2/go.mod h1:itWmRLzRdsnwjpEOomL0mBWGnVNNIxSjDAdyc+b0iUo=
|
||||
github.com/armosec/utils-go v0.0.3 h1:uyQI676yRciQM0sSN9uPoqHkbspTxHO0kmzXhBeE/xU=
|
||||
github.com/armosec/utils-go v0.0.3/go.mod h1:itWmRLzRdsnwjpEOomL0mBWGnVNNIxSjDAdyc+b0iUo=
|
||||
@@ -99,6 +107,7 @@ github.com/armosec/utils-k8s-go v0.0.1 h1:Ay3y7fW+4+FjVc0+obOWm8YsnEvM31vPAVoKTy
|
||||
github.com/armosec/utils-k8s-go v0.0.1/go.mod h1:qrU4pmY2iZsOb39Eltpm0sTTNM3E4pmeyWx4dgDUC2U=
|
||||
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
|
||||
github.com/aws/aws-sdk-go v1.41.1/go.mod h1:585smgzpB/KqRA+K3y/NL/oYRqQvpNJYvLm+LY1U59Q=
|
||||
github.com/aws/aws-sdk-go v1.41.11 h1:QLouWsiYQ8i22kD8k58Dpdhio1A0MpT7bg9ZNXqEjuI=
|
||||
github.com/aws/aws-sdk-go v1.41.11/go.mod h1:585smgzpB/KqRA+K3y/NL/oYRqQvpNJYvLm+LY1U59Q=
|
||||
github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM=
|
||||
github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8=
|
||||
@@ -112,8 +121,8 @@ github.com/bketelsen/crypt v0.0.4/go.mod h1:aI6NrJ0pMGgvZKL1iVgXLnfIFJtfV+bKCoqO
|
||||
github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
|
||||
github.com/boombuler/barcode v1.0.0/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8=
|
||||
github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g=
|
||||
github.com/briandowns/spinner v1.16.0 h1:DFmp6hEaIx2QXXuqSJmtfSBSAjRmpGiKG6ip2Wm/yOs=
|
||||
github.com/briandowns/spinner v1.16.0/go.mod h1:QOuQk7x+EaDASo80FEXwlwiA+j/PPIcX3FScO+3/ZPQ=
|
||||
github.com/briandowns/spinner v1.18.0 h1:SJs0maNOs4FqhBwiJ3Gr7Z1D39/rukIVGQvpNZVHVcM=
|
||||
github.com/briandowns/spinner v1.18.0/go.mod h1:QOuQk7x+EaDASo80FEXwlwiA+j/PPIcX3FScO+3/ZPQ=
|
||||
github.com/buger/jsonparser v0.0.0-20181115193947-bf1c66bbce23/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s=
|
||||
github.com/bytecodealliance/wasmtime-go v0.30.0 h1:WfYpr4WdqInt8m5/HvYinf+HrSEAIhItKIcth+qb1h4=
|
||||
github.com/bytecodealliance/wasmtime-go v0.30.0/go.mod h1:q320gUxqyI8yB+ZqRuaJOEnGkAnHh6WtJjMaT2CW4wI=
|
||||
@@ -331,9 +340,11 @@ github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLe
|
||||
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
|
||||
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/googleapis/gax-go v2.0.0+incompatible h1:j0GKcs05QVmm7yesiZq2+9cxHkNK9YM6zKx4D2qucQU=
|
||||
github.com/googleapis/gax-go v2.0.0+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY=
|
||||
github.com/googleapis/gax-go/v2 v2.0.3/go.mod h1:LLvjysVCY1JZeum8Z6l8qUty8fiNwE08qbEPm1M08qg=
|
||||
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
|
||||
github.com/googleapis/gax-go/v2 v2.0.5 h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+Tv3SM=
|
||||
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
|
||||
github.com/googleapis/gnostic v0.5.1/go.mod h1:6U4PtQXGIEt/Z3h5MAT7FNofLnw9vXk2cUuW7uA/OeU=
|
||||
github.com/googleapis/gnostic v0.5.5 h1:9fHAtK0uDfpveeqqo1hkEZJcFvYXAiCN3UutL8F9xHw=
|
||||
@@ -378,7 +389,9 @@ github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NH
|
||||
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
|
||||
github.com/jellevandenhooff/dkim v0.0.0-20150330215556-f50fe3d243e1/go.mod h1:E0B/fFc00Y+Rasa88328GlI/XbtyysCtTHZS8h7IrBU=
|
||||
github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
|
||||
github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg=
|
||||
github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
|
||||
github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8=
|
||||
github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=
|
||||
github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
|
||||
github.com/jonboulle/clockwork v0.2.2/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8=
|
||||
@@ -483,8 +496,9 @@ github.com/open-policy-agent/opa v0.33.1 h1:EJe00U5H82iMsemgxcNm9RFwjW8zPyRMvL+0
|
||||
github.com/open-policy-agent/opa v0.33.1/go.mod h1:Zb+IdRe0s7M++Rv/KgyuB0qvxO3CUpQ+ZW5v+w/cRUo=
|
||||
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
|
||||
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
|
||||
github.com/opencontainers/image-spec v1.0.1 h1:JMemWkRwHx4Zj+fVxWoMCFm/8sYGGrUVojFA6h/TRcI=
|
||||
github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
|
||||
github.com/opencontainers/image-spec v1.0.2 h1:9yCKha/T5XdGtO0q9Q9a6T5NUCsTn/DrBg0D7ufOcFM=
|
||||
github.com/opencontainers/image-spec v1.0.2/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
|
||||
github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
|
||||
github.com/openzipkin/zipkin-go v0.1.1/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8=
|
||||
github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
|
||||
@@ -685,8 +699,9 @@ golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8U
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83 h1:/ZScEX8SfEmUGRHs0gxpqteO5nfNW6axyZbBdw9A12g=
|
||||
golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
|
||||
golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97 h1:/UOmuWzQfxxo9UtlXMwuQU8CMgg1eZXqTRwkSQJWKOI=
|
||||
golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
@@ -882,6 +897,7 @@ golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7w
|
||||
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210817190340-bfb29a6856f2/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
@@ -1008,6 +1024,7 @@ google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34q
|
||||
google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8=
|
||||
google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU=
|
||||
google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94=
|
||||
google.golang.org/api v0.44.0 h1:URs6qR1lAxDsqWITsQXI4ZkGiYJ5dHtRNiCpfs2OeKA=
|
||||
google.golang.org/api v0.44.0/go.mod h1:EBOGZqzyhtvMDoxwS97ctnh0zUmYY6CxqXsc1AvkYD8=
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
@@ -1065,6 +1082,7 @@ google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6D
|
||||
google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A=
|
||||
google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c h1:wtujag7C+4D6KMoulW9YauvK2lgdvCMS260jsqqBXr0=
|
||||
google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0=
|
||||
google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
|
||||
google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio=
|
||||
@@ -1089,6 +1107,7 @@ google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAG
|
||||
google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
|
||||
google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
|
||||
google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
|
||||
google.golang.org/grpc v1.38.0 h1:/9BgsAsa5nWe26HqOlvlgJnqBuktYOLCgjCPqsa56W0=
|
||||
google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
|
||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||
|
||||
208
hostsensorutils/hostsensordeploy.go
Normal file
208
hostsensorutils/hostsensordeploy.go
Normal file
@@ -0,0 +1,208 @@
|
||||
package hostsensorutils
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/armosec/k8s-interface/k8sinterface"
|
||||
"github.com/armosec/kubescape/cautils"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/yaml"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
appsapplyv1 "k8s.io/client-go/applyconfigurations/apps/v1"
|
||||
coreapplyv1 "k8s.io/client-go/applyconfigurations/core/v1"
|
||||
)
|
||||
|
||||
type HostSensorHandler struct {
|
||||
HostSensorPort int32
|
||||
HostSensorPodNames map[string]string //map from pod names to node names
|
||||
IsReady <-chan bool //readonly chan
|
||||
k8sObj *k8sinterface.KubernetesApi
|
||||
DaemonSet *appsv1.DaemonSet
|
||||
podListLock sync.RWMutex
|
||||
gracePeriod int64
|
||||
}
|
||||
|
||||
func NewHostSensorHandler(k8sObj *k8sinterface.KubernetesApi) (*HostSensorHandler, error) {
|
||||
|
||||
if k8sObj == nil {
|
||||
return nil, fmt.Errorf("nil k8s interface received")
|
||||
}
|
||||
hsh := &HostSensorHandler{
|
||||
k8sObj: k8sObj,
|
||||
HostSensorPodNames: map[string]string{},
|
||||
gracePeriod: int64(15),
|
||||
}
|
||||
// Don't deploy on cluster with no nodes. Some cloud providers prevents termination of K8s objects for cluster with no nodes!!!
|
||||
if nodeList, err := k8sObj.KubernetesClient.CoreV1().Nodes().List(k8sObj.Context, metav1.ListOptions{}); err != nil || len(nodeList.Items) == 0 {
|
||||
if err == nil {
|
||||
err = fmt.Errorf("no nodes to scan")
|
||||
}
|
||||
return hsh, fmt.Errorf("in NewHostSensorHandler, failed to get nodes list: %v", err)
|
||||
}
|
||||
|
||||
return hsh, nil
|
||||
}
|
||||
|
||||
func (hsh *HostSensorHandler) Init() error {
|
||||
// deploy the YAML
|
||||
// store namespace + port
|
||||
// store pod names
|
||||
// make sure all pods are running, after X seconds treat has running anyway, and log an error on the pods not running yet
|
||||
cautils.ProgressTextDisplay("Installing host sensor")
|
||||
cautils.StartSpinner()
|
||||
defer cautils.StopSpinner()
|
||||
if err := hsh.applyYAML(); err != nil {
|
||||
return fmt.Errorf("in HostSensorHandler init failed to apply YAML: %v", err)
|
||||
}
|
||||
hsh.populatePodNamesToNodeNames()
|
||||
if err := hsh.checkPodForEachNode(); err != nil {
|
||||
fmt.Printf("failed to validate host-sensor pods status: %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (hsh *HostSensorHandler) applyYAML() error {
|
||||
dec := yaml.NewDocumentDecoder(io.NopCloser(strings.NewReader(hostSensorYAML)))
|
||||
// apply namespace
|
||||
singleYAMLBytes := make([]byte, 4096)
|
||||
if readLen, err := dec.Read(singleYAMLBytes); err != nil {
|
||||
return fmt.Errorf("failed to read YAML of namespace: %v", err)
|
||||
} else {
|
||||
singleYAMLBytes = singleYAMLBytes[:readLen]
|
||||
}
|
||||
namespaceAC := &coreapplyv1.NamespaceApplyConfiguration{}
|
||||
if err := yaml.Unmarshal(singleYAMLBytes, namespaceAC); err != nil {
|
||||
return fmt.Errorf("failed to Unmarshal YAML of namespace: %v", err)
|
||||
}
|
||||
namespaceName := ""
|
||||
|
||||
if ns, err := hsh.k8sObj.KubernetesClient.CoreV1().Namespaces().Apply(hsh.k8sObj.Context, namespaceAC, metav1.ApplyOptions{
|
||||
FieldManager: "kubescape",
|
||||
}); err != nil {
|
||||
return fmt.Errorf("failed to apply YAML of namespace: %v", err)
|
||||
} else {
|
||||
namespaceName = ns.Name
|
||||
}
|
||||
// apply DaemonSet
|
||||
daemonAC := &appsapplyv1.DaemonSetApplyConfiguration{}
|
||||
singleYAMLBytes = make([]byte, 4096)
|
||||
if readLen, err := dec.Read(singleYAMLBytes); err != nil {
|
||||
if erra := hsh.tearDownNamesapce(namespaceName); erra != nil {
|
||||
err = fmt.Errorf("%v; In addidtion %v", err, erra)
|
||||
}
|
||||
return fmt.Errorf("failed to read YAML of DaemonSet: %v", err)
|
||||
} else {
|
||||
singleYAMLBytes = singleYAMLBytes[:readLen]
|
||||
}
|
||||
if err := yaml.Unmarshal(singleYAMLBytes, daemonAC); err != nil {
|
||||
if erra := hsh.tearDownNamesapce(namespaceName); erra != nil {
|
||||
err = fmt.Errorf("%v; In addidtion %v", err, erra)
|
||||
}
|
||||
return fmt.Errorf("failed to Unmarshal YAML of DaemonSet: %v", err)
|
||||
}
|
||||
daemonAC.Namespace = &namespaceName
|
||||
if ds, err := hsh.k8sObj.KubernetesClient.AppsV1().DaemonSets(namespaceName).Apply(hsh.k8sObj.Context, daemonAC, metav1.ApplyOptions{
|
||||
FieldManager: "kubescape",
|
||||
}); err != nil {
|
||||
if erra := hsh.tearDownNamesapce(namespaceName); erra != nil {
|
||||
err = fmt.Errorf("%v; In addidtion %v", err, erra)
|
||||
}
|
||||
return fmt.Errorf("failed to apply YAML of DaemonSet: %v", err)
|
||||
} else {
|
||||
hsh.HostSensorPort = ds.Spec.Template.Spec.Containers[0].Ports[0].ContainerPort
|
||||
hsh.DaemonSet = ds
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (hsh *HostSensorHandler) checkPodForEachNode() error {
|
||||
deadline := time.Now().Add(time.Second * 100)
|
||||
for {
|
||||
nodesList, err := hsh.k8sObj.KubernetesClient.CoreV1().Nodes().List(hsh.k8sObj.Context, metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("in checkPodsForEveryNode, failed to get nodes list: %v", nodesList)
|
||||
}
|
||||
hsh.podListLock.RLock()
|
||||
podsNum := len(hsh.HostSensorPodNames)
|
||||
hsh.podListLock.RUnlock()
|
||||
if len(nodesList.Items) == podsNum {
|
||||
break
|
||||
}
|
||||
if time.Now().After(deadline) {
|
||||
return fmt.Errorf("host-sensor pods number (%d) differ than nodes number (%d) after deadline exceded", podsNum, len(nodesList.Items))
|
||||
}
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// initiating routine to keep pod list updated
|
||||
func (hsh *HostSensorHandler) populatePodNamesToNodeNames() {
|
||||
|
||||
go func() {
|
||||
watchRes, err := hsh.k8sObj.KubernetesClient.CoreV1().Pods(hsh.DaemonSet.Namespace).Watch(hsh.k8sObj.Context, metav1.ListOptions{
|
||||
Watch: true,
|
||||
LabelSelector: fmt.Sprintf("name=%s", hsh.DaemonSet.Spec.Template.Labels["name"]),
|
||||
})
|
||||
if err != nil {
|
||||
fmt.Printf("Failed to watch over daemonset pods")
|
||||
}
|
||||
for eve := range watchRes.ResultChan() {
|
||||
pod, ok := eve.Object.(*corev1.Pod)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
go hsh.updatePodInListAtomic(eve.Type, pod)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
func (hsh *HostSensorHandler) updatePodInListAtomic(eventType watch.EventType, podObj *corev1.Pod) {
|
||||
hsh.podListLock.Lock()
|
||||
defer hsh.podListLock.Unlock()
|
||||
|
||||
switch eventType {
|
||||
case watch.Added, watch.Modified:
|
||||
if podObj.Status.Phase == corev1.PodRunning {
|
||||
hsh.HostSensorPodNames[podObj.ObjectMeta.Name] = podObj.Spec.NodeName
|
||||
} else {
|
||||
delete(hsh.HostSensorPodNames, podObj.ObjectMeta.Name)
|
||||
}
|
||||
default:
|
||||
delete(hsh.HostSensorPodNames, podObj.ObjectMeta.Name)
|
||||
}
|
||||
}
|
||||
|
||||
func (hsh *HostSensorHandler) tearDownNamesapce(namespace string) error {
|
||||
|
||||
if err := hsh.k8sObj.KubernetesClient.CoreV1().Namespaces().Delete(hsh.k8sObj.Context, namespace, metav1.DeleteOptions{GracePeriodSeconds: &hsh.gracePeriod}); err != nil {
|
||||
return fmt.Errorf("failed to delete host-sensor namespace: %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (hsh *HostSensorHandler) TearDown() error {
|
||||
namespace := hsh.GetNamespace()
|
||||
if err := hsh.k8sObj.KubernetesClient.AppsV1().DaemonSets(hsh.GetNamespace()).Delete(hsh.k8sObj.Context, hsh.DaemonSet.Name, metav1.DeleteOptions{GracePeriodSeconds: &hsh.gracePeriod}); err != nil {
|
||||
return fmt.Errorf("failed to delete host-sensor daemonset: %v", err)
|
||||
}
|
||||
if err := hsh.tearDownNamesapce(namespace); err != nil {
|
||||
return fmt.Errorf("failed to delete host-sensor daemonset: %v", err)
|
||||
}
|
||||
// TODO: wait for termination? may take up to 120 seconds!!!
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (hsh *HostSensorHandler) GetNamespace() string {
|
||||
if hsh.DaemonSet == nil {
|
||||
return ""
|
||||
}
|
||||
return hsh.DaemonSet.Namespace
|
||||
}
|
||||
198
hostsensorutils/hostsensorgetfrompod.go
Normal file
198
hostsensorutils/hostsensorgetfrompod.go
Normal file
@@ -0,0 +1,198 @@
|
||||
package hostsensorutils
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"github.com/armosec/k8s-interface/k8sinterface"
|
||||
"github.com/armosec/kubescape/cautils"
|
||||
"github.com/armosec/opa-utils/objectsenvelopes/hostsensor"
|
||||
"sigs.k8s.io/yaml"
|
||||
)
|
||||
|
||||
func (hsh *HostSensorHandler) getPodList() (res map[string]string, err error) {
|
||||
hsh.podListLock.RLock()
|
||||
jsonBytes, err := json.Marshal(hsh.HostSensorPodNames)
|
||||
hsh.podListLock.RUnlock()
|
||||
if err != nil {
|
||||
return res, fmt.Errorf("failed to marshal pod list: %v", err)
|
||||
}
|
||||
err = json.Unmarshal(jsonBytes, &res)
|
||||
if err != nil {
|
||||
return res, fmt.Errorf("failed to unmarshal pod list: %v", err)
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func (hsh *HostSensorHandler) HTTPGetToPod(podName, path string) ([]byte, error) {
|
||||
// send the request to the port
|
||||
|
||||
restProxy := hsh.k8sObj.KubernetesClient.CoreV1().Pods(hsh.DaemonSet.Namespace).ProxyGet("http", podName, fmt.Sprintf("%d", hsh.HostSensorPort), path, map[string]string{})
|
||||
return restProxy.DoRaw(hsh.k8sObj.Context)
|
||||
|
||||
}
|
||||
|
||||
func (hsh *HostSensorHandler) ForwardToPod(podName, path string) ([]byte, error) {
|
||||
// NOT IN USE:
|
||||
// ---
|
||||
// spawn port forwarding
|
||||
// req := hsh.k8sObj.KubernetesClient.CoreV1().RESTClient().Post()
|
||||
// req = req.Name(podName)
|
||||
// req = req.Namespace(hsh.DaemonSet.Namespace)
|
||||
// req = req.Resource("pods")
|
||||
// req = req.SubResource("portforward")
|
||||
// ----
|
||||
// https://github.com/gianarb/kube-port-forward
|
||||
// fullPath := fmt.Sprintf("/api/v1/namespaces/%s/pods/%s/portforward",
|
||||
// hsh.DaemonSet.Namespace, podName)
|
||||
// transport, upgrader, err := spdy.RoundTripperFor(hsh.k8sObj.KubernetesClient.config)
|
||||
// if err != nil {
|
||||
// return nil, err
|
||||
// }
|
||||
// hostIP := strings.TrimLeft(req.RestConfig.Host, "htps:/")
|
||||
// dialer := spdy.NewDialer(upgrader, &http.Client{Transport: transport}, http.MethodPost, &url.URL{Scheme: "http", Path: path, Host: hostIP})
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// sendAllPodsHTTPGETRequest fills the raw byte response in the envelope and the node name, but not the GroupVersionKind
|
||||
// so the caller is responsible to convert the raw data to some structured data and add the GroupVersionKind details
|
||||
func (hsh *HostSensorHandler) sendAllPodsHTTPGETRequest(path, requestKind string) ([]hostsensor.HostSensorDataEnvelope, error) {
|
||||
podList, err := hsh.getPodList()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to sendAllPodsHTTPGETRequest: %v", err)
|
||||
}
|
||||
res := make([]hostsensor.HostSensorDataEnvelope, 0, len(podList))
|
||||
resLock := sync.Mutex{}
|
||||
wg := sync.WaitGroup{}
|
||||
wg.Add(len(podList))
|
||||
for podName := range podList {
|
||||
go func(podName, path string) {
|
||||
defer wg.Done()
|
||||
resBytes, err := hsh.HTTPGetToPod(podName, path)
|
||||
if err != nil {
|
||||
fmt.Printf("In sendAllPodsHTTPGETRequest failed to get data '%s' from pod '%s': %v", path, podName, err)
|
||||
} else {
|
||||
resLock.Lock()
|
||||
defer resLock.Unlock()
|
||||
hostSensorDataEnvelope := hostsensor.HostSensorDataEnvelope{}
|
||||
hostSensorDataEnvelope.SetApiVersion(k8sinterface.JoinGroupVersion(hostsensor.GroupHostSensor, hostsensor.Version))
|
||||
hostSensorDataEnvelope.SetKind(requestKind)
|
||||
hostSensorDataEnvelope.SetName(podList[podName])
|
||||
hostSensorDataEnvelope.SetData(resBytes)
|
||||
res = append(res, hostSensorDataEnvelope)
|
||||
}
|
||||
|
||||
}(podName, path)
|
||||
}
|
||||
wg.Wait()
|
||||
return res, nil
|
||||
}
|
||||
|
||||
// return list of OpenPortsList
|
||||
func (hsh *HostSensorHandler) GetOpenPortsList() ([]hostsensor.HostSensorDataEnvelope, error) {
|
||||
// loop over pods and port-forward it to each of them
|
||||
return hsh.sendAllPodsHTTPGETRequest("/openedPorts", "OpenPortsList")
|
||||
}
|
||||
|
||||
// return list of LinuxSecurityHardeningStatus
|
||||
func (hsh *HostSensorHandler) GetLinuxSecurityHardeningStatus() ([]hostsensor.HostSensorDataEnvelope, error) {
|
||||
// loop over pods and port-forward it to each of them
|
||||
return hsh.sendAllPodsHTTPGETRequest("/linuxSecurityHardening", "LinuxSecurityHardeningStatus")
|
||||
}
|
||||
|
||||
// return list of KubeletCommandLine
|
||||
func (hsh *HostSensorHandler) GetKubeletCommandLine() ([]hostsensor.HostSensorDataEnvelope, error) {
|
||||
// loop over pods and port-forward it to each of them
|
||||
resps, err := hsh.sendAllPodsHTTPGETRequest("/kubeletCommandLine", "KubeletCommandLine")
|
||||
if err != nil {
|
||||
return resps, err
|
||||
}
|
||||
for resp := range resps {
|
||||
var data = make(map[string]interface{})
|
||||
data["fullCommand"] = string(resps[resp].Data)
|
||||
resBytesMarshal, err := json.Marshal(data)
|
||||
// TODO catch error
|
||||
if err == nil {
|
||||
resps[resp].Data = json.RawMessage(resBytesMarshal)
|
||||
}
|
||||
}
|
||||
|
||||
return resps, nil
|
||||
|
||||
}
|
||||
|
||||
// return list of
|
||||
func (hsh *HostSensorHandler) GetKernelVersion() ([]hostsensor.HostSensorDataEnvelope, error) {
|
||||
// loop over pods and port-forward it to each of them
|
||||
return hsh.sendAllPodsHTTPGETRequest("/kernelVersion", "KernelVersion")
|
||||
}
|
||||
|
||||
// return list of
|
||||
func (hsh *HostSensorHandler) GetOsReleaseFile() ([]hostsensor.HostSensorDataEnvelope, error) {
|
||||
// loop over pods and port-forward it to each of them
|
||||
return hsh.sendAllPodsHTTPGETRequest("/osRelease", "OsReleaseFile")
|
||||
}
|
||||
|
||||
// return list of
|
||||
func (hsh *HostSensorHandler) GetKubeletConfigurations() ([]hostsensor.HostSensorDataEnvelope, error) {
|
||||
// loop over pods and port-forward it to each of them
|
||||
res, err := hsh.sendAllPodsHTTPGETRequest("/kubeletConfigurations", "KubeletConfiguration") // empty kind, will be overridden
|
||||
for resIdx := range res {
|
||||
jsonBytes, err := yaml.YAMLToJSON(res[resIdx].Data)
|
||||
if err != nil {
|
||||
fmt.Printf("In GetKubeletConfigurations failed to YAMLToJSON: %v;\n%v", err, res[resIdx])
|
||||
continue
|
||||
}
|
||||
res[resIdx].SetData(jsonBytes)
|
||||
}
|
||||
return res, err
|
||||
}
|
||||
|
||||
func (hsh *HostSensorHandler) CollectResources() ([]hostsensor.HostSensorDataEnvelope, error) {
|
||||
res := make([]hostsensor.HostSensorDataEnvelope, 0)
|
||||
if hsh.DaemonSet == nil {
|
||||
return res, nil
|
||||
}
|
||||
cautils.ProgressTextDisplay("Accessing host sensor")
|
||||
cautils.StartSpinner()
|
||||
defer cautils.StopSpinner()
|
||||
kcData, err := hsh.GetKubeletConfigurations()
|
||||
if err != nil {
|
||||
return kcData, err
|
||||
}
|
||||
res = append(res, kcData...)
|
||||
//
|
||||
kcData, err = hsh.GetKubeletCommandLine()
|
||||
if err != nil {
|
||||
return kcData, err
|
||||
}
|
||||
res = append(res, kcData...)
|
||||
//
|
||||
kcData, err = hsh.GetOsReleaseFile()
|
||||
if err != nil {
|
||||
return kcData, err
|
||||
}
|
||||
res = append(res, kcData...)
|
||||
//
|
||||
kcData, err = hsh.GetKernelVersion()
|
||||
if err != nil {
|
||||
return kcData, err
|
||||
}
|
||||
res = append(res, kcData...)
|
||||
//
|
||||
kcData, err = hsh.GetLinuxSecurityHardeningStatus()
|
||||
if err != nil {
|
||||
return kcData, err
|
||||
}
|
||||
res = append(res, kcData...)
|
||||
//
|
||||
kcData, err = hsh.GetOpenPortsList()
|
||||
if err != nil {
|
||||
return kcData, err
|
||||
}
|
||||
res = append(res, kcData...)
|
||||
// finish
|
||||
cautils.SuccessTextDisplay("Read host information from host sensor")
|
||||
return res, nil
|
||||
}
|
||||
10
hostsensorutils/hostsensorinterface.go
Normal file
10
hostsensorutils/hostsensorinterface.go
Normal file
@@ -0,0 +1,10 @@
|
||||
package hostsensorutils
|
||||
|
||||
import "github.com/armosec/opa-utils/objectsenvelopes/hostsensor"
|
||||
|
||||
type IHostSensor interface {
|
||||
Init() error
|
||||
TearDown() error
|
||||
CollectResources() ([]hostsensor.HostSensorDataEnvelope, error)
|
||||
GetNamespace() string
|
||||
}
|
||||
24
hostsensorutils/hostsensormock.go
Normal file
24
hostsensorutils/hostsensormock.go
Normal file
@@ -0,0 +1,24 @@
|
||||
package hostsensorutils
|
||||
|
||||
import (
|
||||
"github.com/armosec/opa-utils/objectsenvelopes/hostsensor"
|
||||
)
|
||||
|
||||
type HostSensorHandlerMock struct {
|
||||
}
|
||||
|
||||
func (hshm *HostSensorHandlerMock) Init() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (hshm *HostSensorHandlerMock) TearDown() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (hshm *HostSensorHandlerMock) CollectResources() ([]hostsensor.HostSensorDataEnvelope, error) {
|
||||
return []hostsensor.HostSensorDataEnvelope{}, nil
|
||||
}
|
||||
|
||||
func (hshm *HostSensorHandlerMock) GetNamespace() string {
|
||||
return ""
|
||||
}
|
||||
65
hostsensorutils/hostsensoryamls.go
Normal file
65
hostsensorutils/hostsensoryamls.go
Normal file
@@ -0,0 +1,65 @@
|
||||
package hostsensorutils
|
||||
|
||||
const hostSensorYAML = `apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
labels:
|
||||
app: host-sensor
|
||||
kubernetes.io/metadata.name: armo-kube-host-sensor
|
||||
tier: armo-kube-host-sensor-control-plane
|
||||
name: armo-kube-host-sensor
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: host-sensor
|
||||
namespace: armo-kube-host-sensor
|
||||
labels:
|
||||
k8s-app: armo-kube-host-sensor
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
name: host-sensor
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
name: host-sensor
|
||||
spec:
|
||||
tolerations:
|
||||
# this toleration is to have the daemonset runnable on master nodes
|
||||
# remove it if your masters can't run pods
|
||||
- key: node-role.kubernetes.io/master
|
||||
operator: Exists
|
||||
effect: NoSchedule
|
||||
containers:
|
||||
- name: host-sensor
|
||||
image: quay.io/armosec/kube-host-sensor:latest
|
||||
securityContext:
|
||||
privileged: true
|
||||
readOnlyRootFilesystem: true
|
||||
procMount: Unmasked
|
||||
ports:
|
||||
- name: http
|
||||
hostPort: 7888
|
||||
containerPort: 7888
|
||||
resources:
|
||||
limits:
|
||||
cpu: 1m
|
||||
memory: 200Mi
|
||||
requests:
|
||||
cpu: 1m
|
||||
memory: 200Mi
|
||||
volumeMounts:
|
||||
- mountPath: /host_fs
|
||||
name: host-filesystem
|
||||
terminationGracePeriodSeconds: 120
|
||||
dnsPolicy: ClusterFirstWithHostNet
|
||||
automountServiceAccountToken: false
|
||||
volumes:
|
||||
- hostPath:
|
||||
path: /
|
||||
type: Directory
|
||||
name: host-filesystem
|
||||
hostNetwork: true
|
||||
hostPID: true
|
||||
hostIPC: true`
|
||||
@@ -53,6 +53,6 @@ echo -e "\033[0m"
|
||||
$KUBESCAPE_EXEC version
|
||||
echo
|
||||
|
||||
echo -e "\033[35mUsage: $ $KUBESCAPE_EXEC scan framework nsa"
|
||||
echo -e "\033[35mUsage: $ $KUBESCAPE_EXEC scan --submit"
|
||||
|
||||
echo -e "\033[0m"
|
||||
|
||||
85
mocks/loadmocks.go
Normal file
85
mocks/loadmocks.go
Normal file
File diff suppressed because one or more lines are too long
@@ -6,16 +6,20 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/armosec/kubescape/cautils"
|
||||
"github.com/armosec/opa-utils/exceptions"
|
||||
"github.com/armosec/opa-utils/objectsenvelopes"
|
||||
"github.com/armosec/opa-utils/reporthandling"
|
||||
"github.com/armosec/opa-utils/reporthandling/apis"
|
||||
"github.com/armosec/opa-utils/reporthandling/results/v1/resourcesresults"
|
||||
"github.com/open-policy-agent/opa/storage"
|
||||
|
||||
"github.com/golang/glog"
|
||||
|
||||
"github.com/armosec/k8s-interface/k8sinterface"
|
||||
"github.com/armosec/k8s-interface/workloadinterface"
|
||||
|
||||
"github.com/armosec/opa-utils/resources"
|
||||
"github.com/golang/glog"
|
||||
"github.com/open-policy-agent/opa/ast"
|
||||
"github.com/open-policy-agent/opa/rego"
|
||||
uuid "github.com/satori/go.uuid"
|
||||
)
|
||||
|
||||
const ScoreConfigPath = "/resources/config"
|
||||
@@ -55,171 +59,210 @@ func (opaHandler *OPAProcessorHandler) ProcessRulesListenner() {
|
||||
opaSessionObj := <-*opaHandler.processedPolicy
|
||||
opap := NewOPAProcessor(opaSessionObj, opaHandler.regoDependenciesData)
|
||||
|
||||
policies := ConvertFrameworksToPolicies(opap.Frameworks, cautils.BuildNumber)
|
||||
|
||||
ConvertFrameworksToSummaryDetails(&opap.Report.SummaryDetails, opap.Frameworks, policies)
|
||||
|
||||
// process
|
||||
if err := opap.Process(); err != nil {
|
||||
fmt.Println(err)
|
||||
if err := opap.Process(policies); err != nil {
|
||||
// fmt.Println(err)
|
||||
}
|
||||
|
||||
// edit results
|
||||
opap.updateResults()
|
||||
|
||||
// update score
|
||||
// opap.updateScore()
|
||||
|
||||
// report
|
||||
*opaHandler.reportResults <- opaSessionObj
|
||||
}
|
||||
}
|
||||
|
||||
func (opap *OPAProcessor) Process() error {
|
||||
func (opap *OPAProcessor) Process(policies *cautils.Policies) error {
|
||||
// glog.Infof(fmt.Sprintf("Starting 'Process'. reportID: %s", opap.PostureReport.ReportID))
|
||||
cautils.ProgressTextDisplay(fmt.Sprintf("Scanning cluster %s", cautils.ClusterName))
|
||||
cautils.StartSpinner()
|
||||
frameworkReports := []reporthandling.FrameworkReport{}
|
||||
|
||||
var errs error
|
||||
for i := range opap.Frameworks {
|
||||
frameworkReport, err := opap.processFramework(&opap.Frameworks[i])
|
||||
for _, control := range policies.Controls {
|
||||
|
||||
resourcesAssociatedControl, err := opap.processControl(&control)
|
||||
if err != nil {
|
||||
errs = fmt.Errorf("%v\n%s", errs, err.Error())
|
||||
appendError(&errs, err)
|
||||
}
|
||||
// update resources with latest results
|
||||
if len(resourcesAssociatedControl) != 0 {
|
||||
for resourceID, controlResult := range resourcesAssociatedControl {
|
||||
if _, ok := opap.ResourcesResult[resourceID]; !ok {
|
||||
opap.ResourcesResult[resourceID] = resourcesresults.Result{ResourceID: resourceID}
|
||||
}
|
||||
t := opap.ResourcesResult[resourceID]
|
||||
t.AssociatedControls = append(t.AssociatedControls, controlResult)
|
||||
opap.ResourcesResult[resourceID] = t
|
||||
}
|
||||
}
|
||||
frameworkReports = append(frameworkReports, *frameworkReport)
|
||||
}
|
||||
|
||||
opap.PostureReport.FrameworkReports = frameworkReports
|
||||
opap.PostureReport.ReportID = uuid.NewV4().String()
|
||||
opap.PostureReport.ReportGenerationTime = time.Now().UTC()
|
||||
// glog.Infof(fmt.Sprintf("Done 'Process'. reportID: %s", opap.PostureReport.ReportID))
|
||||
opap.Report.ReportGenerationTime = time.Now().UTC()
|
||||
|
||||
cautils.StopSpinner()
|
||||
cautils.SuccessTextDisplay(fmt.Sprintf("Done scanning cluster %s", cautils.ClusterName))
|
||||
return errs
|
||||
}
|
||||
|
||||
func (opap *OPAProcessor) processFramework(framework *reporthandling.Framework) (*reporthandling.FrameworkReport, error) {
|
||||
var errs error
|
||||
|
||||
frameworkReport := reporthandling.FrameworkReport{}
|
||||
frameworkReport.Name = framework.Name
|
||||
|
||||
controlReports := []reporthandling.ControlReport{}
|
||||
for i := range framework.Controls {
|
||||
controlReport, err := opap.processControl(&framework.Controls[i])
|
||||
if err != nil {
|
||||
errs = fmt.Errorf("%v\n%s", errs, err.Error())
|
||||
}
|
||||
if controlReport != nil {
|
||||
controlReports = append(controlReports, *controlReport)
|
||||
}
|
||||
func appendError(errs *error, err error) {
|
||||
if err == nil {
|
||||
return
|
||||
}
|
||||
if errs == nil {
|
||||
errs = &err
|
||||
} else {
|
||||
*errs = fmt.Errorf("%v\n%s", *errs, err.Error())
|
||||
}
|
||||
frameworkReport.ControlReports = controlReports
|
||||
return &frameworkReport, errs
|
||||
}
|
||||
|
||||
func (opap *OPAProcessor) processControl(control *reporthandling.Control) (*reporthandling.ControlReport, error) {
|
||||
func (opap *OPAProcessor) processControl(control *reporthandling.Control) (map[string]resourcesresults.ResourceAssociatedControl, error) {
|
||||
var errs error
|
||||
|
||||
controlReport := reporthandling.ControlReport{}
|
||||
controlReport.PortalBase = control.PortalBase
|
||||
controlReport.ControlID = control.ControlID
|
||||
controlReport.BaseScore = control.BaseScore
|
||||
resourcesAssociatedControl := make(map[string]resourcesresults.ResourceAssociatedControl)
|
||||
|
||||
controlReport.Control_ID = control.Control_ID // TODO: delete when 'id' is deprecated
|
||||
|
||||
controlReport.Name = control.Name
|
||||
controlReport.Description = control.Description
|
||||
controlReport.Remediation = control.Remediation
|
||||
|
||||
ruleReports := []reporthandling.RuleReport{}
|
||||
// ruleResults := make(map[string][]resourcesresults.ResourceAssociatedRule)
|
||||
for i := range control.Rules {
|
||||
ruleReport, err := opap.processRule(&control.Rules[i])
|
||||
resourceAssociatedRule, err := opap.processRule(&control.Rules[i])
|
||||
if err != nil {
|
||||
errs = fmt.Errorf("%v\n%s", errs, err.Error())
|
||||
appendError(&errs, err)
|
||||
continue
|
||||
}
|
||||
if ruleReport != nil {
|
||||
ruleReports = append(ruleReports, *ruleReport)
|
||||
|
||||
// append failed rules to controls
|
||||
if len(resourceAssociatedRule) != 0 {
|
||||
for resourceID, ruleResponse := range resourceAssociatedRule {
|
||||
|
||||
controlResult := resourcesresults.ResourceAssociatedControl{}
|
||||
controlResult.SetID(control.ControlID)
|
||||
controlResult.SetName(control.Name)
|
||||
|
||||
if _, ok := resourcesAssociatedControl[resourceID]; ok {
|
||||
controlResult.ResourceAssociatedRules = resourcesAssociatedControl[resourceID].ResourceAssociatedRules
|
||||
}
|
||||
if ruleResponse != nil {
|
||||
controlResult.ResourceAssociatedRules = append(controlResult.ResourceAssociatedRules, *ruleResponse)
|
||||
}
|
||||
resourcesAssociatedControl[resourceID] = controlResult
|
||||
}
|
||||
}
|
||||
}
|
||||
if len(ruleReports) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
controlReport.RuleReports = ruleReports
|
||||
return &controlReport, errs
|
||||
|
||||
return resourcesAssociatedControl, errs
|
||||
}
|
||||
|
||||
func (opap *OPAProcessor) processRule(rule *reporthandling.PolicyRule) (*reporthandling.RuleReport, error) {
|
||||
if ruleWithArmoOpaDependency(rule.Attributes) {
|
||||
return nil, nil
|
||||
}
|
||||
k8sObjects := getKubernetesObjects(opap.K8SResources, rule.Match)
|
||||
ruleReport, err := opap.runOPAOnSingleRule(rule, k8sObjects)
|
||||
func (opap *OPAProcessor) processRule(rule *reporthandling.PolicyRule) (map[string]*resourcesresults.ResourceAssociatedRule, error) {
|
||||
|
||||
postureControlInputs := opap.regoDependenciesData.GetFilteredPostureControlInputs(rule.ConfigInputs) // get store
|
||||
|
||||
inputResources, err := reporthandling.RegoResourcesAggregator(rule, getAllSupportedObjects(opap.K8SResources, opap.AllResources, rule))
|
||||
if err != nil {
|
||||
ruleReport.RuleStatus.Status = "failure"
|
||||
ruleReport.RuleStatus.Message = err.Error()
|
||||
return nil, fmt.Errorf("error getting aggregated k8sObjects: %s", err.Error())
|
||||
}
|
||||
if len(inputResources) == 0 {
|
||||
return nil, nil // no resources found for testing
|
||||
}
|
||||
|
||||
inputRawResources := workloadinterface.ListMetaToMap(inputResources)
|
||||
|
||||
resources := map[string]*resourcesresults.ResourceAssociatedRule{}
|
||||
// the failed resources are a subgroup of the enumeratedData, so we store the enumeratedData like it was the input data
|
||||
enumeratedData, err := opap.enumerateData(rule, inputRawResources)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
inputResources = objectsenvelopes.ListMapToMeta(enumeratedData)
|
||||
for i := range inputResources {
|
||||
resources[inputResources[i].GetID()] = &resourcesresults.ResourceAssociatedRule{
|
||||
Name: rule.Name,
|
||||
ControlConfigurations: postureControlInputs,
|
||||
Status: apis.StatusPassed,
|
||||
}
|
||||
opap.AllResources[inputResources[i].GetID()] = inputResources[i]
|
||||
}
|
||||
|
||||
ruleResponses, err := opap.runOPAOnSingleRule(rule, inputRawResources, ruleData, postureControlInputs)
|
||||
if err != nil {
|
||||
// TODO - Handle error
|
||||
glog.Error(err)
|
||||
} else {
|
||||
ruleReport.RuleStatus.Status = "success"
|
||||
// ruleResponse to ruleResult
|
||||
for i := range ruleResponses {
|
||||
failedResources := objectsenvelopes.ListMapToMeta(ruleResponses[i].GetFailedResources())
|
||||
for j := range failedResources {
|
||||
ruleResult := &resourcesresults.ResourceAssociatedRule{}
|
||||
if r, k := resources[failedResources[j].GetID()]; k {
|
||||
ruleResult = r
|
||||
}
|
||||
|
||||
ruleResult.Status = apis.StatusFailed
|
||||
for j := range ruleResponses[i].FailedPaths {
|
||||
ruleResult.Paths = append(ruleResult.Paths, resourcesresults.Path{FailedPath: ruleResponses[i].FailedPaths[j]})
|
||||
}
|
||||
resources[failedResources[j].GetID()] = ruleResult
|
||||
}
|
||||
}
|
||||
}
|
||||
ruleReport.ListInputResources = k8sObjects
|
||||
return &ruleReport, err
|
||||
|
||||
return resources, err
|
||||
}
|
||||
|
||||
func (opap *OPAProcessor) runOPAOnSingleRule(rule *reporthandling.PolicyRule, k8sObjects []map[string]interface{}) (reporthandling.RuleReport, error) {
|
||||
func (opap *OPAProcessor) runOPAOnSingleRule(rule *reporthandling.PolicyRule, k8sObjects []map[string]interface{}, getRuleData func(*reporthandling.PolicyRule) string, postureControlInputs map[string][]string) ([]reporthandling.RuleResponse, error) {
|
||||
switch rule.RuleLanguage {
|
||||
case reporthandling.RegoLanguage, reporthandling.RegoLanguage2:
|
||||
return opap.runRegoOnK8s(rule, k8sObjects)
|
||||
return opap.runRegoOnK8s(rule, k8sObjects, getRuleData, postureControlInputs)
|
||||
default:
|
||||
return reporthandling.RuleReport{}, fmt.Errorf("rule: '%s', language '%v' not supported", rule.Name, rule.RuleLanguage)
|
||||
return nil, fmt.Errorf("rule: '%s', language '%v' not supported", rule.Name, rule.RuleLanguage)
|
||||
}
|
||||
}
|
||||
func (opap *OPAProcessor) runRegoOnK8s(rule *reporthandling.PolicyRule, k8sObjects []map[string]interface{}) (reporthandling.RuleReport, error) {
|
||||
|
||||
func (opap *OPAProcessor) runRegoOnK8s(rule *reporthandling.PolicyRule, k8sObjects []map[string]interface{}, getRuleData func(*reporthandling.PolicyRule) string, postureControlInputs map[string][]string) ([]reporthandling.RuleResponse, error) {
|
||||
var errs error
|
||||
ruleReport := reporthandling.RuleReport{
|
||||
Name: rule.Name,
|
||||
}
|
||||
|
||||
// compile modules
|
||||
modules, err := getRuleDependencies()
|
||||
if err != nil {
|
||||
return ruleReport, fmt.Errorf("rule: '%s', %s", rule.Name, err.Error())
|
||||
return nil, fmt.Errorf("rule: '%s', %s", rule.Name, err.Error())
|
||||
}
|
||||
modules[rule.Name] = rule.Rule
|
||||
modules[rule.Name] = getRuleData(rule)
|
||||
compiled, err := ast.CompileModules(modules)
|
||||
if err != nil {
|
||||
return ruleReport, fmt.Errorf("in 'runRegoOnSingleRule', failed to compile rule, name: %s, reason: %s", rule.Name, err.Error())
|
||||
return nil, fmt.Errorf("in 'runRegoOnSingleRule', failed to compile rule, name: %s, reason: %s", rule.Name, err.Error())
|
||||
}
|
||||
|
||||
store, err := resources.TOStorage(postureControlInputs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Eval
|
||||
results, err := opap.regoEval(k8sObjects, compiled)
|
||||
results, err := opap.regoEval(k8sObjects, compiled, &store)
|
||||
if err != nil {
|
||||
errs = fmt.Errorf("rule: '%s', %s", rule.Name, err.Error())
|
||||
}
|
||||
|
||||
if results != nil {
|
||||
ruleReport.RuleResponses = append(ruleReport.RuleResponses, results...)
|
||||
}
|
||||
return ruleReport, errs
|
||||
return results, errs
|
||||
}
|
||||
|
||||
func (opap *OPAProcessor) regoEval(inputObj []map[string]interface{}, compiledRego *ast.Compiler) ([]reporthandling.RuleResponse, error) {
|
||||
store, err := opap.regoDependenciesData.TOStorage() // get store
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
func (opap *OPAProcessor) regoEval(inputObj []map[string]interface{}, compiledRego *ast.Compiler, store *storage.Store) ([]reporthandling.RuleResponse, error) {
|
||||
// opap.regoDependenciesData.PostureControlInputs
|
||||
|
||||
rego := rego.New(
|
||||
rego.Query("data.armo_builtins"), // get package name from rule
|
||||
rego.Compiler(compiledRego),
|
||||
rego.Input(inputObj),
|
||||
rego.Store(store),
|
||||
rego.Store(*store),
|
||||
)
|
||||
|
||||
// Run evaluation
|
||||
resultSet, err := rego.Eval(context.Background())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("in 'regoEval', failed to evaluate rule, reason: %s", err.Error())
|
||||
return nil, err
|
||||
}
|
||||
results, err := reporthandling.ParseRegoResult(&resultSet)
|
||||
|
||||
// results, err := ParseRegoResult(&resultSet)
|
||||
if err != nil {
|
||||
return results, err
|
||||
}
|
||||
@@ -227,27 +270,20 @@ func (opap *OPAProcessor) regoEval(inputObj []map[string]interface{}, compiledRe
|
||||
return results, nil
|
||||
}
|
||||
|
||||
func (opap *OPAProcessor) updateResults() {
|
||||
for f := range opap.PostureReport.FrameworkReports {
|
||||
// set exceptions
|
||||
exceptions.SetFrameworkExceptions(&opap.PostureReport.FrameworkReports[f], opap.Exceptions, cautils.ClusterName)
|
||||
|
||||
// set counters
|
||||
reporthandling.SetUniqueResourcesCounter(&opap.PostureReport.FrameworkReports[f])
|
||||
|
||||
// set default score
|
||||
reporthandling.SetDefaultScore(&opap.PostureReport.FrameworkReports[f])
|
||||
|
||||
// edit results - remove data
|
||||
|
||||
// TODO - move function to pkg - use RemoveData
|
||||
for c := range opap.PostureReport.FrameworkReports[f].ControlReports {
|
||||
for r, ruleReport := range opap.PostureReport.FrameworkReports[f].ControlReports[c].RuleReports {
|
||||
// editing the responses -> removing duplications, clearing secret data, etc.
|
||||
opap.PostureReport.FrameworkReports[f].ControlReports[c].RuleReports[r].RuleResponses = editRuleResponses(ruleReport.RuleResponses)
|
||||
}
|
||||
}
|
||||
func (opap *OPAProcessor) enumerateData(rule *reporthandling.PolicyRule, k8sObjects []map[string]interface{}) ([]map[string]interface{}, error) {
|
||||
|
||||
if ruleEnumeratorData(rule) == "" {
|
||||
return k8sObjects, nil
|
||||
}
|
||||
postureControlInputs := opap.regoDependenciesData.GetFilteredPostureControlInputs(rule.ConfigInputs)
|
||||
|
||||
ruleResponse, err := opap.runOPAOnSingleRule(rule, k8sObjects, ruleEnumeratorData, postureControlInputs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
failedResources := []map[string]interface{}{}
|
||||
for _, ruleResponse := range ruleResponse {
|
||||
failedResources = append(failedResources, ruleResponse.GetFailedResources()...)
|
||||
}
|
||||
return failedResources, nil
|
||||
}
|
||||
|
||||
@@ -3,11 +3,16 @@ package opaprocessor
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/armosec/armoapi-go/armotypes"
|
||||
"github.com/armosec/kubescape/cautils"
|
||||
"github.com/armosec/kubescape/mocks"
|
||||
"github.com/armosec/opa-utils/objectsenvelopes"
|
||||
"github.com/armosec/opa-utils/reporthandling"
|
||||
"github.com/armosec/opa-utils/resources"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/armosec/k8s-interface/k8sinterface"
|
||||
"github.com/armosec/k8s-interface/workloadinterface"
|
||||
// _ "k8s.io/client-go/plugin/pkg/client/auth"
|
||||
)
|
||||
|
||||
@@ -18,15 +23,23 @@ func TestProcess(t *testing.T) {
|
||||
|
||||
// set k8s
|
||||
k8sResources := make(cautils.K8SResources)
|
||||
k8sResources["/v1/pods"] = k8sinterface.ConvertUnstructuredSliceToMap(k8sinterface.V1KubeSystemNamespaceMock().Items)
|
||||
allResources := make(map[string]workloadinterface.IMetadata)
|
||||
imetaObj := objectsenvelopes.ListMapToMeta(k8sinterface.ConvertUnstructuredSliceToMap(k8sinterface.V1KubeSystemNamespaceMock().Items))
|
||||
for i := range imetaObj {
|
||||
allResources[imetaObj[i].GetID()] = imetaObj[i]
|
||||
}
|
||||
k8sResources["/v1/pods"] = workloadinterface.ListMetaIDs(imetaObj)
|
||||
|
||||
// set opaSessionObj
|
||||
opaSessionObj := cautils.NewOPASessionObjMock()
|
||||
opaSessionObj.Frameworks = []reporthandling.Framework{*reporthandling.MockFrameworkA()}
|
||||
policies := ConvertFrameworksToPolicies(opaSessionObj.Frameworks, "")
|
||||
|
||||
opaSessionObj.K8SResources = &k8sResources
|
||||
opaSessionObj.AllResources = allResources
|
||||
|
||||
opap := NewOPAProcessor(opaSessionObj, resources.NewRegoDependenciesDataMock())
|
||||
opap.Process()
|
||||
opap.Process(policies)
|
||||
opap.updateResults()
|
||||
for _, f := range opap.PostureReport.FrameworkReports {
|
||||
for _, c := range f.ControlReports {
|
||||
@@ -42,3 +55,85 @@ func TestProcess(t *testing.T) {
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestProcessResourcesResult(t *testing.T) {
|
||||
|
||||
// set k8s
|
||||
k8sResources := make(cautils.K8SResources)
|
||||
|
||||
deployment := mocks.MockDevelopmentWithHostpath()
|
||||
frameworks := []reporthandling.Framework{*mocks.MockFramework_0006_0013()}
|
||||
|
||||
k8sResources["apps/v1/deployments"] = workloadinterface.ListMetaIDs([]workloadinterface.IMetadata{deployment})
|
||||
|
||||
// set opaSessionObj
|
||||
opaSessionObj := cautils.NewOPASessionObjMock()
|
||||
opaSessionObj.Frameworks = frameworks
|
||||
|
||||
policies := ConvertFrameworksToPolicies(opaSessionObj.Frameworks, "")
|
||||
ConvertFrameworksToSummaryDetails(&opaSessionObj.Report.SummaryDetails, opaSessionObj.Frameworks, policies)
|
||||
|
||||
opaSessionObj.K8SResources = &k8sResources
|
||||
opaSessionObj.AllResources[deployment.GetID()] = deployment
|
||||
|
||||
opap := NewOPAProcessor(opaSessionObj, resources.NewRegoDependenciesDataMock())
|
||||
opap.Process(policies)
|
||||
|
||||
assert.Equal(t, 1, len(opaSessionObj.ResourcesResult))
|
||||
res := opaSessionObj.ResourcesResult[deployment.GetID()]
|
||||
assert.Equal(t, 2, len(res.ListControlsIDs(nil).All()))
|
||||
assert.Equal(t, 1, len(res.ListControlsIDs(nil).Failed()))
|
||||
assert.Equal(t, 1, len(res.ListControlsIDs(nil).Passed()))
|
||||
assert.True(t, res.GetStatus(nil).IsFailed())
|
||||
assert.False(t, res.GetStatus(nil).IsPassed())
|
||||
assert.Equal(t, deployment.GetID(), opaSessionObj.ResourcesResult[deployment.GetID()].ResourceID)
|
||||
|
||||
opap.updateResults()
|
||||
res = opaSessionObj.ResourcesResult[deployment.GetID()]
|
||||
assert.Equal(t, 2, len(res.ListControlsIDs(nil).All()))
|
||||
assert.Equal(t, 2, len(res.ListControlsIDs(nil).All()))
|
||||
assert.Equal(t, 1, len(res.ListControlsIDs(nil).Failed()))
|
||||
assert.Equal(t, 1, len(res.ListControlsIDs(nil).Passed()))
|
||||
assert.True(t, res.GetStatus(nil).IsFailed())
|
||||
assert.False(t, res.GetStatus(nil).IsPassed())
|
||||
assert.Equal(t, deployment.GetID(), opaSessionObj.ResourcesResult[deployment.GetID()].ResourceID)
|
||||
|
||||
// test resource counters
|
||||
summaryDetails := opaSessionObj.Report.SummaryDetails
|
||||
assert.Equal(t, 1, summaryDetails.NumberOfResources().All())
|
||||
assert.Equal(t, 1, summaryDetails.NumberOfResources().Failed())
|
||||
assert.Equal(t, 0, summaryDetails.NumberOfResources().Excluded())
|
||||
assert.Equal(t, 0, summaryDetails.NumberOfResources().Passed())
|
||||
|
||||
// test resource listing
|
||||
assert.Equal(t, 1, len(summaryDetails.ListResourcesIDs().All()))
|
||||
assert.Equal(t, 1, len(summaryDetails.ListResourcesIDs().Failed()))
|
||||
assert.Equal(t, 0, len(summaryDetails.ListResourcesIDs().Excluded()))
|
||||
assert.Equal(t, 0, len(summaryDetails.ListResourcesIDs().Passed()))
|
||||
|
||||
// test control listing
|
||||
assert.Equal(t, len(res.ListControlsIDs(nil).All()), len(summaryDetails.ListControls().All()))
|
||||
assert.Equal(t, len(res.ListControlsIDs(nil).Passed()), len(summaryDetails.ListControls().Passed()))
|
||||
assert.Equal(t, len(res.ListControlsIDs(nil).Failed()), len(summaryDetails.ListControls().Failed()))
|
||||
assert.Equal(t, len(res.ListControlsIDs(nil).Excluded()), len(summaryDetails.ListControls().Excluded()))
|
||||
assert.True(t, summaryDetails.GetStatus().IsFailed())
|
||||
|
||||
opaSessionObj.Exceptions = []armotypes.PostureExceptionPolicy{*mocks.MockExceptionAllKinds(&armotypes.PosturePolicy{FrameworkName: frameworks[0].Name})}
|
||||
opap.updateResults()
|
||||
|
||||
res = opaSessionObj.ResourcesResult[deployment.GetID()]
|
||||
assert.Equal(t, 2, len(res.ListControlsIDs(nil).All()))
|
||||
assert.Equal(t, 1, len(res.ListControlsIDs(nil).Excluded()))
|
||||
assert.Equal(t, 1, len(res.ListControlsIDs(nil).Passed()))
|
||||
assert.True(t, res.GetStatus(nil).IsExcluded())
|
||||
assert.False(t, res.GetStatus(nil).IsPassed())
|
||||
assert.False(t, res.GetStatus(nil).IsFailed())
|
||||
assert.Equal(t, deployment.GetID(), opaSessionObj.ResourcesResult[deployment.GetID()].ResourceID)
|
||||
|
||||
// test resource listing
|
||||
summaryDetails = opaSessionObj.Report.SummaryDetails
|
||||
assert.Equal(t, 1, len(summaryDetails.ListResourcesIDs().All()))
|
||||
assert.Equal(t, 1, len(summaryDetails.ListResourcesIDs().Failed()))
|
||||
assert.Equal(t, 0, len(summaryDetails.ListResourcesIDs().Excluded()))
|
||||
assert.Equal(t, 0, len(summaryDetails.ListResourcesIDs().Passed()))
|
||||
}
|
||||
|
||||
@@ -11,11 +11,66 @@ import (
|
||||
resources "github.com/armosec/opa-utils/resources"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
)
|
||||
|
||||
func getKubernetesObjects(k8sResources *cautils.K8SResources, match []reporthandling.RuleMatchObjects) []map[string]interface{} {
|
||||
k8sObjects := []map[string]interface{}{}
|
||||
// updateResults update the results objects and report objects. This is a critical function - DO NOT CHANGE
|
||||
/*
|
||||
- remove sensible data
|
||||
- adding exceptions
|
||||
- summarize results
|
||||
*/
|
||||
func (opap *OPAProcessor) updateResults() {
|
||||
|
||||
// remove data from all objects
|
||||
for i := range opap.AllResources {
|
||||
removeData(opap.AllResources[i])
|
||||
}
|
||||
|
||||
// set exceptions
|
||||
for i := range opap.ResourcesResult {
|
||||
|
||||
t := opap.ResourcesResult[i]
|
||||
|
||||
// first set exceptions
|
||||
if resource, ok := opap.AllResources[i]; ok {
|
||||
t.SetExceptions(resource, opap.Exceptions, cautils.ClusterName)
|
||||
}
|
||||
|
||||
// summarize the resources
|
||||
opap.Report.AppendResourceResultToSummary(&t)
|
||||
|
||||
// Add score
|
||||
// TODO
|
||||
|
||||
// save changes
|
||||
opap.ResourcesResult[i] = t
|
||||
}
|
||||
|
||||
// set result summary
|
||||
opap.Report.SummaryDetails.InitResourcesSummary()
|
||||
|
||||
// for f := range opap.PostureReport.FrameworkReports {
|
||||
// // set exceptions
|
||||
// exceptions.SetFrameworkExceptions(&opap.PostureReport.FrameworkReports[f], opap.Exceptions, cautils.ClusterName)
|
||||
|
||||
// // set counters
|
||||
// reporthandling.SetUniqueResourcesCounter(&opap.PostureReport.FrameworkReports[f])
|
||||
|
||||
// // set default score
|
||||
// // reporthandling.SetDefaultScore(&opap.PostureReport.FrameworkReports[f])
|
||||
// }
|
||||
}
|
||||
|
||||
func getAllSupportedObjects(k8sResources *cautils.K8SResources, allResources map[string]workloadinterface.IMetadata, rule *reporthandling.PolicyRule) []workloadinterface.IMetadata {
|
||||
k8sObjects := []workloadinterface.IMetadata{}
|
||||
k8sObjects = append(k8sObjects, getKubernetesObjects(k8sResources, allResources, rule.Match)...)
|
||||
k8sObjects = append(k8sObjects, getKubernetesObjects(k8sResources, allResources, rule.DynamicMatch)...)
|
||||
return k8sObjects
|
||||
}
|
||||
|
||||
func getKubernetesObjects(k8sResources *cautils.K8SResources, allResources map[string]workloadinterface.IMetadata, match []reporthandling.RuleMatchObjects) []workloadinterface.IMetadata {
|
||||
k8sObjects := []workloadinterface.IMetadata{}
|
||||
|
||||
for m := range match {
|
||||
for _, groups := range match[m].APIGroups {
|
||||
for _, version := range match[m].APIVersions {
|
||||
@@ -24,15 +79,11 @@ func getKubernetesObjects(k8sResources *cautils.K8SResources, match []reporthand
|
||||
for _, groupResource := range groupResources {
|
||||
if k8sObj, ok := (*k8sResources)[groupResource]; ok {
|
||||
if k8sObj == nil {
|
||||
continue
|
||||
// glog.Errorf("Resource '%s' is nil, probably failed to pull the resource", groupResource)
|
||||
} else if v, k := k8sObj.([]map[string]interface{}); k {
|
||||
k8sObjects = append(k8sObjects, v...)
|
||||
} else if v, k := k8sObj.(map[string]interface{}); k {
|
||||
k8sObjects = append(k8sObjects, v)
|
||||
} else if v, k := k8sObj.([]unstructured.Unstructured); k {
|
||||
k8sObjects = append(k8sObjects, k8sinterface.ConvertUnstructuredSliceToMap(v)...) //
|
||||
} else {
|
||||
glog.Errorf("In 'getKubernetesObjects' resource '%s' unknown type", groupResource)
|
||||
}
|
||||
for i := range k8sObj {
|
||||
k8sObjects = append(k8sObjects, allResources[k8sObj[i]])
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -41,9 +92,33 @@ func getKubernetesObjects(k8sResources *cautils.K8SResources, match []reporthand
|
||||
}
|
||||
}
|
||||
|
||||
return k8sObjects
|
||||
return filterOutChildResources(k8sObjects, match)
|
||||
}
|
||||
|
||||
// filterOutChildResources filter out child resources if the parent resource is in the list
|
||||
func filterOutChildResources(objects []workloadinterface.IMetadata, match []reporthandling.RuleMatchObjects) []workloadinterface.IMetadata {
|
||||
response := []workloadinterface.IMetadata{}
|
||||
owners := []string{}
|
||||
for m := range match {
|
||||
for i := range match[m].Resources {
|
||||
owners = append(owners, match[m].Resources[i])
|
||||
}
|
||||
}
|
||||
for i := range objects {
|
||||
if !k8sinterface.IsTypeWorkload(objects[i].GetObject()) {
|
||||
response = append(response, objects[i])
|
||||
continue
|
||||
}
|
||||
w := workloadinterface.NewWorkloadObj(objects[i].GetObject())
|
||||
ownerReferences, err := w.GetOwnerReferences()
|
||||
if err != nil || len(ownerReferences) == 0 {
|
||||
response = append(response, w)
|
||||
} else if !k8sinterface.IsStringInSlice(owners, ownerReferences[0].Kind) {
|
||||
response = append(response, w)
|
||||
}
|
||||
}
|
||||
return response
|
||||
}
|
||||
func getRuleDependencies() (map[string]string, error) {
|
||||
modules := resources.LoadRegoModules()
|
||||
if len(modules) == 0 {
|
||||
@@ -52,28 +127,6 @@ func getRuleDependencies() (map[string]string, error) {
|
||||
return modules, nil
|
||||
}
|
||||
|
||||
//editRuleResponses editing the responses -> removing duplications, clearing secret data, etc.
|
||||
func editRuleResponses(ruleResponses []reporthandling.RuleResponse) []reporthandling.RuleResponse {
|
||||
lenRuleResponses := len(ruleResponses)
|
||||
for i := 0; i < lenRuleResponses; i++ {
|
||||
for j := range ruleResponses[i].AlertObject.K8SApiObjects {
|
||||
w := workloadinterface.NewWorkloadObj(ruleResponses[i].AlertObject.K8SApiObjects[j])
|
||||
if w == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
cleanRuleResponses(w)
|
||||
ruleResponses[i].AlertObject.K8SApiObjects[j] = w.GetWorkload()
|
||||
}
|
||||
}
|
||||
return ruleResponses
|
||||
}
|
||||
func cleanRuleResponses(workload k8sinterface.IWorkload) {
|
||||
if workload.GetKind() == "Secret" {
|
||||
workload.RemoveSecretData()
|
||||
}
|
||||
}
|
||||
|
||||
func ruleWithArmoOpaDependency(annotations map[string]interface{}) bool {
|
||||
if annotations == nil {
|
||||
return false
|
||||
@@ -83,3 +136,89 @@ func ruleWithArmoOpaDependency(annotations map[string]interface{}) bool {
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Checks that kubescape version is in range of use for this rule
|
||||
// In local build (BuildNumber = ""):
|
||||
// returns true only if rule doesn't have the "until" attribute
|
||||
func isRuleKubescapeVersionCompatible(rule *reporthandling.PolicyRule) bool {
|
||||
if from, ok := rule.Attributes["useFromKubescapeVersion"]; ok {
|
||||
if cautils.BuildNumber != "" {
|
||||
if from.(string) > cautils.BuildNumber {
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
if until, ok := rule.Attributes["useUntilKubescapeVersion"]; ok {
|
||||
if cautils.BuildNumber != "" {
|
||||
if until.(string) <= cautils.BuildNumber {
|
||||
return false
|
||||
}
|
||||
} else {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func removeData(obj workloadinterface.IMetadata) {
|
||||
if !k8sinterface.IsTypeWorkload(obj.GetObject()) {
|
||||
return // remove data only from kubernetes objects
|
||||
}
|
||||
workload := workloadinterface.NewWorkloadObj(obj.GetObject())
|
||||
switch workload.GetKind() {
|
||||
case "Secret":
|
||||
removeSecretData(workload)
|
||||
case "ConfigMap":
|
||||
removeConfigMapData(workload)
|
||||
default:
|
||||
removePodData(workload)
|
||||
}
|
||||
}
|
||||
|
||||
func removeConfigMapData(workload workloadinterface.IWorkload) {
|
||||
workload.RemoveAnnotation("kubectl.kubernetes.io/last-applied-configuration")
|
||||
workloadinterface.RemoveFromMap(workload.GetObject(), "metadata", "managedFields")
|
||||
overrideSensitiveData(workload)
|
||||
}
|
||||
|
||||
func overrideSensitiveData(workload workloadinterface.IWorkload) {
|
||||
dataInterface, ok := workloadinterface.InspectMap(workload.GetObject(), "data")
|
||||
if ok {
|
||||
data, ok := dataInterface.(map[string]interface{})
|
||||
if ok {
|
||||
for key := range data {
|
||||
workloadinterface.SetInMap(workload.GetObject(), []string{"data"}, key, "XXXXXX")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func removeSecretData(workload workloadinterface.IWorkload) {
|
||||
workload.RemoveAnnotation("kubectl.kubernetes.io/last-applied-configuration")
|
||||
workloadinterface.RemoveFromMap(workload.GetObject(), "metadata", "managedFields")
|
||||
overrideSensitiveData(workload)
|
||||
}
|
||||
func removePodData(workload workloadinterface.IWorkload) {
|
||||
workload.RemoveAnnotation("kubectl.kubernetes.io/last-applied-configuration")
|
||||
workloadinterface.RemoveFromMap(workload.GetObject(), "metadata", "managedFields")
|
||||
workloadinterface.RemoveFromMap(workload.GetObject(), "status")
|
||||
|
||||
containers, err := workload.GetContainers()
|
||||
if err != nil || len(containers) == 0 {
|
||||
return
|
||||
}
|
||||
for i := range containers {
|
||||
for j := range containers[i].Env {
|
||||
containers[i].Env[j].Value = "XXXXXX"
|
||||
}
|
||||
}
|
||||
workloadinterface.SetInMap(workload.GetObject(), workloadinterface.PodSpec(workload.GetKind()), "containers", containers)
|
||||
}
|
||||
|
||||
func ruleData(rule *reporthandling.PolicyRule) string {
|
||||
return rule.Rule
|
||||
}
|
||||
|
||||
func ruleEnumeratorData(rule *reporthandling.PolicyRule) string {
|
||||
return rule.ResourceEnumerator
|
||||
}
|
||||
|
||||
@@ -2,7 +2,71 @@ package opaprocessor
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/armosec/armoapi-go/armotypes"
|
||||
"github.com/armosec/k8s-interface/workloadinterface"
|
||||
"github.com/armosec/kubescape/cautils"
|
||||
"github.com/armosec/opa-utils/reporthandling"
|
||||
)
|
||||
|
||||
func TestGetKubernetesObjects(t *testing.T) {
|
||||
}
|
||||
|
||||
var rule_v1_0_131 = &reporthandling.PolicyRule{PortalBase: armotypes.PortalBase{
|
||||
Attributes: map[string]interface{}{"useUntilKubescapeVersion": "v1.0.132"}}}
|
||||
var rule_v1_0_132 = &reporthandling.PolicyRule{PortalBase: armotypes.PortalBase{
|
||||
Attributes: map[string]interface{}{"useFromKubescapeVersion": "v1.0.132", "useUntilKubescapeVersion": "v1.0.133"}}}
|
||||
var rule_v1_0_133 = &reporthandling.PolicyRule{PortalBase: armotypes.PortalBase{
|
||||
Attributes: map[string]interface{}{"useFromKubescapeVersion": "v1.0.133", "useUntilKubescapeVersion": "v1.0.134"}}}
|
||||
var rule_v1_0_134 = &reporthandling.PolicyRule{PortalBase: armotypes.PortalBase{
|
||||
Attributes: map[string]interface{}{"useFromKubescapeVersion": "v1.0.134"}}}
|
||||
|
||||
func TestIsRuleKubescapeVersionCompatible(t *testing.T) {
|
||||
// local build- no build number
|
||||
// should use only rules that don't have "until"
|
||||
cautils.BuildNumber = ""
|
||||
if isRuleKubescapeVersionCompatible(rule_v1_0_131) {
|
||||
t.Error("error in isRuleKubescapeVersionCompatible")
|
||||
}
|
||||
if isRuleKubescapeVersionCompatible(rule_v1_0_132) {
|
||||
t.Error("error in isRuleKubescapeVersionCompatible")
|
||||
}
|
||||
if isRuleKubescapeVersionCompatible(rule_v1_0_133) {
|
||||
t.Error("error in isRuleKubescapeVersionCompatible")
|
||||
}
|
||||
if !isRuleKubescapeVersionCompatible(rule_v1_0_134) {
|
||||
t.Error("error in isRuleKubescapeVersionCompatible")
|
||||
}
|
||||
|
||||
// should only use rules that version is in range of use
|
||||
cautils.BuildNumber = "v1.0.133"
|
||||
if isRuleKubescapeVersionCompatible(rule_v1_0_131) {
|
||||
t.Error("error in isRuleKubescapeVersionCompatible")
|
||||
}
|
||||
if isRuleKubescapeVersionCompatible(rule_v1_0_132) {
|
||||
t.Error("error in isRuleKubescapeVersionCompatible")
|
||||
}
|
||||
if !isRuleKubescapeVersionCompatible(rule_v1_0_133) {
|
||||
t.Error("error in isRuleKubescapeVersionCompatible")
|
||||
}
|
||||
if isRuleKubescapeVersionCompatible(rule_v1_0_134) {
|
||||
t.Error("error in isRuleKubescapeVersionCompatible")
|
||||
}
|
||||
}
|
||||
|
||||
func TestRemoveData(t *testing.T) {
|
||||
|
||||
w := `{"apiVersion":"apps/v1","kind":"Deployment","metadata":{"name":"demoservice-server"},"spec":{"replicas":1,"selector":{"matchLabels":{"app":"demoservice-server"}},"template":{"metadata":{"creationTimestamp":null,"labels":{"app":"demoservice-server"}},"spec":{"containers":[{"env":[{"name":"SERVER_PORT","value":"8089"},{"name":"SLEEP_DURATION","value":"1"},{"name":"DEMO_FOLDERS","value":"/app"},{"name":"ARMO_TEST_NAME","value":"auto_attach_deployment"},{"name":"CAA_ENABLE_CRASH_REPORTER","value":"1"}],"image":"quay.io/armosec/demoservice:v25","imagePullPolicy":"IfNotPresent","name":"demoservice","ports":[{"containerPort":8089,"protocol":"TCP"}],"resources":{},"terminationMessagePath":"/dev/termination-log","terminationMessagePolicy":"File"}],"dnsPolicy":"ClusterFirst","restartPolicy":"Always","schedulerName":"default-scheduler","securityContext":{},"terminationGracePeriodSeconds":30}}}}`
|
||||
obj, _ := workloadinterface.NewWorkload([]byte(w))
|
||||
removeData(obj)
|
||||
|
||||
workload := workloadinterface.NewWorkloadObj(obj.GetObject())
|
||||
c, _ := workload.GetContainers()
|
||||
for i := range c {
|
||||
for _, e := range c[i].Env {
|
||||
assert.Equal(t, "XXXXXX", e.Value)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
45
opaprocessor/utils.go
Normal file
45
opaprocessor/utils.go
Normal file
@@ -0,0 +1,45 @@
|
||||
package opaprocessor
|
||||
|
||||
import (
|
||||
"github.com/armosec/kubescape/cautils"
|
||||
"github.com/armosec/opa-utils/reporthandling"
|
||||
"github.com/armosec/opa-utils/reporthandling/results/v1/reportsummary"
|
||||
)
|
||||
|
||||
// ConvertFrameworksToPolicies convert list of frameworks to list of policies
|
||||
func ConvertFrameworksToPolicies(frameworks []reporthandling.Framework, version string) *cautils.Policies {
|
||||
policies := cautils.NewPolicies()
|
||||
policies.Set(frameworks, version)
|
||||
return policies
|
||||
}
|
||||
|
||||
// ConvertFrameworksToSummaryDetails initialize the summary details for the report object
|
||||
func ConvertFrameworksToSummaryDetails(summaryDetails *reportsummary.SummaryDetails, frameworks []reporthandling.Framework, policies *cautils.Policies) {
|
||||
if summaryDetails.Controls == nil {
|
||||
summaryDetails.Controls = make(map[string]reportsummary.ControlSummary)
|
||||
}
|
||||
for i := range frameworks {
|
||||
controls := map[string]reportsummary.ControlSummary{}
|
||||
for j := range frameworks[i].Controls {
|
||||
id := frameworks[i].Controls[j].ControlID
|
||||
if _, ok := policies.Controls[id]; ok {
|
||||
c := reportsummary.ControlSummary{
|
||||
Name: frameworks[i].Controls[j].Name,
|
||||
ControlID: id,
|
||||
ScoreFactor: frameworks[i].Controls[j].BaseScore,
|
||||
Description: frameworks[i].Controls[j].Description,
|
||||
Remediation: frameworks[i].Controls[j].Remediation,
|
||||
}
|
||||
controls[frameworks[i].Controls[j].ControlID] = c
|
||||
summaryDetails.Controls[id] = c
|
||||
}
|
||||
}
|
||||
if cautils.StringInSlice(policies.Frameworks, frameworks[i].Name) != cautils.ValueNotFound {
|
||||
summaryDetails.Frameworks = append(summaryDetails.Frameworks, reportsummary.FrameworkSummary{
|
||||
Name: frameworks[i].Name,
|
||||
Controls: controls,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
30
opaprocessor/utils_test.go
Normal file
30
opaprocessor/utils_test.go
Normal file
@@ -0,0 +1,30 @@
|
||||
package opaprocessor
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/armosec/kubescape/mocks"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/armosec/opa-utils/reporthandling"
|
||||
"github.com/armosec/opa-utils/reporthandling/results/v1/reportsummary"
|
||||
)
|
||||
|
||||
func TestConvertFrameworksToPolicies(t *testing.T) {
|
||||
fw0 := mocks.MockFramework_0006_0013()
|
||||
fw1 := mocks.MockFramework_0044()
|
||||
policies := ConvertFrameworksToPolicies([]reporthandling.Framework{*fw0, *fw1}, "")
|
||||
assert.Equal(t, 2, len(policies.Frameworks))
|
||||
assert.Equal(t, 3, len(policies.Controls))
|
||||
}
|
||||
func TestInitializeSummaryDetails(t *testing.T) {
|
||||
fw0 := mocks.MockFramework_0006_0013()
|
||||
fw1 := mocks.MockFramework_0044()
|
||||
|
||||
summaryDetails := reportsummary.SummaryDetails{}
|
||||
frameworks := []reporthandling.Framework{*fw0, *fw1}
|
||||
policies := ConvertFrameworksToPolicies([]reporthandling.Framework{*fw0, *fw1}, "")
|
||||
ConvertFrameworksToSummaryDetails(&summaryDetails, frameworks, policies)
|
||||
assert.Equal(t, 2, len(summaryDetails.Frameworks))
|
||||
assert.Equal(t, 3, len(summaryDetails.Controls))
|
||||
}
|
||||
@@ -35,22 +35,29 @@ func (policyHandler *PolicyHandler) HandleNotificationRequest(notification *repo
|
||||
return err
|
||||
}
|
||||
|
||||
k8sResources, err := policyHandler.getResources(notification, opaSessionObj, scanInfo)
|
||||
err := policyHandler.getResources(notification, opaSessionObj, scanInfo)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if k8sResources == nil || len(*k8sResources) == 0 {
|
||||
if opaSessionObj.K8SResources == nil || len(*opaSessionObj.K8SResources) == 0 {
|
||||
return fmt.Errorf("empty list of resources")
|
||||
}
|
||||
opaSessionObj.K8SResources = k8sResources
|
||||
|
||||
// update channel
|
||||
*policyHandler.processPolicy <- opaSessionObj
|
||||
return nil
|
||||
}
|
||||
|
||||
func (policyHandler *PolicyHandler) getResources(notification *reporthandling.PolicyNotification, opaSessionObj *cautils.OPASessionObj, scanInfo *cautils.ScanInfo) (*cautils.K8SResources, error) {
|
||||
func (policyHandler *PolicyHandler) getResources(notification *reporthandling.PolicyNotification, opaSessionObj *cautils.OPASessionObj, scanInfo *cautils.ScanInfo) error {
|
||||
|
||||
opaSessionObj.PostureReport.ClusterAPIServerInfo = policyHandler.resourceHandler.GetClusterAPIServerInfo()
|
||||
return policyHandler.resourceHandler.GetResources(opaSessionObj.Frameworks, ¬ification.Designators)
|
||||
resourcesMap, allResources, err := policyHandler.resourceHandler.GetResources(opaSessionObj.Frameworks, ¬ification.Designators)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
opaSessionObj.K8SResources = resourcesMap
|
||||
opaSessionObj.AllResources = allResources
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -2,6 +2,7 @@ package policyhandler
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/armosec/kubescape/cautils"
|
||||
"github.com/armosec/opa-utils/reporthandling"
|
||||
@@ -15,19 +16,19 @@ func (policyHandler *PolicyHandler) getPolicies(notification *reporthandling.Pol
|
||||
return err
|
||||
}
|
||||
if len(frameworks) == 0 {
|
||||
return fmt.Errorf("failed to download policies, please ARMO team for more information")
|
||||
return fmt.Errorf("failed to download policies: '%s'. Make sure the policy exist and you spelled it correctly. For more information, please feel free to contact ARMO team", strings.Join(policyIdentifierToSlice(notification.Rules), ","))
|
||||
}
|
||||
|
||||
policiesAndResources.Frameworks = frameworks
|
||||
|
||||
// get exceptions
|
||||
exceptionPolicies, err := policyHandler.getters.ExceptionsGetter.GetExceptions(cautils.CustomerGUID, cautils.ClusterName)
|
||||
exceptionPolicies, err := policyHandler.getters.ExceptionsGetter.GetExceptions(cautils.ClusterName)
|
||||
if err == nil {
|
||||
policiesAndResources.Exceptions = exceptionPolicies
|
||||
}
|
||||
|
||||
// get account configuration
|
||||
controlsInputs, err := policyHandler.getters.ControlsInputsGetter.GetControlsInputs(cautils.CustomerGUID, cautils.ClusterName)
|
||||
controlsInputs, err := policyHandler.getters.ControlsInputsGetter.GetControlsInputs(cautils.ClusterName)
|
||||
if err == nil {
|
||||
policiesAndResources.RegoInputData.PostureControlInputs = controlsInputs
|
||||
}
|
||||
@@ -70,3 +71,11 @@ func (policyHandler *PolicyHandler) getScanPolicies(notification *reporthandling
|
||||
}
|
||||
return frameworks, nil
|
||||
}
|
||||
|
||||
func policyIdentifierToSlice(rules []reporthandling.PolicyIdentifier) []string {
|
||||
s := []string{}
|
||||
for i := range rules {
|
||||
s = append(s, fmt.Sprintf("%s: %s", rules[i].Kind, rules[i].Name))
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
@@ -9,14 +9,14 @@ import (
|
||||
)
|
||||
|
||||
type IFieldSelector interface {
|
||||
GetNamespacesSelector(*schema.GroupVersionResource) string
|
||||
GetNamespacesSelectors(*schema.GroupVersionResource) []string
|
||||
}
|
||||
|
||||
type EmptySelector struct {
|
||||
}
|
||||
|
||||
func (es *EmptySelector) GetNamespacesSelector(resource *schema.GroupVersionResource) string {
|
||||
return ""
|
||||
func (es *EmptySelector) GetNamespacesSelectors(resource *schema.GroupVersionResource) []string {
|
||||
return []string{""} //
|
||||
}
|
||||
|
||||
type ExcludeSelector struct {
|
||||
@@ -34,28 +34,36 @@ type IncludeSelector struct {
|
||||
func NewIncludeSelector(ns string) *IncludeSelector {
|
||||
return &IncludeSelector{namespace: ns}
|
||||
}
|
||||
func (es *ExcludeSelector) GetNamespacesSelector(resource *schema.GroupVersionResource) string {
|
||||
return getNamespacesSelector(resource, es.namespace, "!=")
|
||||
func (es *ExcludeSelector) GetNamespacesSelectors(resource *schema.GroupVersionResource) []string {
|
||||
fieldSelectors := ""
|
||||
for _, n := range strings.Split(es.namespace, ",") {
|
||||
if n != "" {
|
||||
fieldSelectors += getNamespacesSelector(resource, n, "!=") + ","
|
||||
}
|
||||
}
|
||||
return []string{fieldSelectors}
|
||||
|
||||
}
|
||||
|
||||
func (is *IncludeSelector) GetNamespacesSelector(resource *schema.GroupVersionResource) string {
|
||||
return getNamespacesSelector(resource, is.namespace, "==")
|
||||
func (is *IncludeSelector) GetNamespacesSelectors(resource *schema.GroupVersionResource) []string {
|
||||
fieldSelectors := []string{}
|
||||
for _, n := range strings.Split(is.namespace, ",") {
|
||||
if n != "" {
|
||||
fieldSelectors = append(fieldSelectors, getNamespacesSelector(resource, n, "=="))
|
||||
}
|
||||
}
|
||||
return fieldSelectors
|
||||
}
|
||||
|
||||
func getNamespacesSelector(resource *schema.GroupVersionResource, ns, operator string) string {
|
||||
fieldSelectors := ""
|
||||
fieldSelector := "metadata."
|
||||
if resource.Resource == "namespaces" {
|
||||
fieldSelector += "name"
|
||||
} else if k8sinterface.IsNamespaceScope(resource.Group, resource.Resource) {
|
||||
} else if k8sinterface.IsResourceInNamespaceScope(resource.Resource) {
|
||||
fieldSelector += "namespace"
|
||||
} else {
|
||||
return ""
|
||||
}
|
||||
namespacesSlice := strings.Split(ns, ",")
|
||||
for _, n := range namespacesSlice {
|
||||
fieldSelectors += fmt.Sprintf("%s%s%s,", fieldSelector, operator, n)
|
||||
}
|
||||
return fieldSelectors
|
||||
return fmt.Sprintf("%s%s%s", fieldSelector, operator, ns)
|
||||
|
||||
}
|
||||
|
||||
43
resourcehandler/fieldselector_test.go
Normal file
43
resourcehandler/fieldselector_test.go
Normal file
@@ -0,0 +1,43 @@
|
||||
package resourcehandler
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/armosec/k8s-interface/k8sinterface"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
)
|
||||
|
||||
func TestGetNamespacesSelector(t *testing.T) {
|
||||
k8sinterface.InitializeMapResourcesMock()
|
||||
assert.Equal(t, "metadata.namespace==default", getNamespacesSelector(&schema.GroupVersionResource{Version: "v1", Resource: "pods"}, "default", "=="))
|
||||
assert.Equal(t, "", getNamespacesSelector(&schema.GroupVersionResource{Version: "v1", Resource: "nodes"}, "default", "=="))
|
||||
}
|
||||
|
||||
func TestExcludedNamespacesSelectors(t *testing.T) {
|
||||
k8sinterface.InitializeMapResourcesMock()
|
||||
|
||||
es := NewExcludeSelector("default,ingress")
|
||||
selectors := es.GetNamespacesSelectors(&schema.GroupVersionResource{Resource: "pods"})
|
||||
assert.Equal(t, 1, len(selectors))
|
||||
assert.Equal(t, "metadata.namespace!=default,metadata.namespace!=ingress,", selectors[0])
|
||||
|
||||
selectors2 := es.GetNamespacesSelectors(&schema.GroupVersionResource{Resource: "namespaces"})
|
||||
assert.Equal(t, 1, len(selectors2))
|
||||
assert.Equal(t, "metadata.name!=default,metadata.name!=ingress,", selectors2[0])
|
||||
}
|
||||
|
||||
func TestIncludeNamespacesSelectors(t *testing.T) {
|
||||
k8sinterface.InitializeMapResourcesMock()
|
||||
|
||||
is := NewIncludeSelector("default,ingress")
|
||||
selectors := is.GetNamespacesSelectors(&schema.GroupVersionResource{Resource: "pods"})
|
||||
assert.Equal(t, 2, len(selectors))
|
||||
assert.Equal(t, "metadata.namespace==default", selectors[0])
|
||||
assert.Equal(t, "metadata.namespace==ingress", selectors[1])
|
||||
|
||||
selectors2 := is.GetNamespacesSelectors(&schema.GroupVersionResource{Resource: "namespaces"})
|
||||
assert.Equal(t, 2, len(selectors2))
|
||||
assert.Equal(t, "metadata.name==default", selectors2[0])
|
||||
assert.Equal(t, "metadata.name==ingress", selectors2[1])
|
||||
}
|
||||
@@ -14,6 +14,7 @@ import (
|
||||
|
||||
"github.com/armosec/k8s-interface/k8sinterface"
|
||||
"github.com/armosec/kubescape/cautils"
|
||||
"github.com/armosec/opa-utils/objectsenvelopes"
|
||||
"github.com/armosec/opa-utils/reporthandling"
|
||||
|
||||
"gopkg.in/yaml.v2"
|
||||
@@ -37,18 +38,25 @@ type FileResourceHandler struct {
|
||||
}
|
||||
|
||||
func NewFileResourceHandler(inputPatterns []string) *FileResourceHandler {
|
||||
k8sinterface.InitializeMapResourcesMock() // initialize the resource map
|
||||
return &FileResourceHandler{
|
||||
inputPatterns: inputPatterns,
|
||||
}
|
||||
}
|
||||
|
||||
func (fileHandler *FileResourceHandler) GetResources(frameworks []reporthandling.Framework, designator *armotypes.PortalDesignator) (*cautils.K8SResources, error) {
|
||||
workloads := []k8sinterface.IWorkload{}
|
||||
func (fileHandler *FileResourceHandler) GetResources(frameworks []reporthandling.Framework, designator *armotypes.PortalDesignator) (*cautils.K8SResources, map[string]workloadinterface.IMetadata, error) {
|
||||
|
||||
// build resources map
|
||||
// map resources based on framework required resources: map["/group/version/kind"][]<k8s workloads ids>
|
||||
k8sResources := setResourceMap(frameworks)
|
||||
allResources := map[string]workloadinterface.IMetadata{}
|
||||
|
||||
workloads := []workloadinterface.IMetadata{}
|
||||
|
||||
// load resource from local file system
|
||||
w, err := loadResourcesFromFiles(fileHandler.inputPatterns)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, allResources, err
|
||||
}
|
||||
if w != nil {
|
||||
workloads = append(workloads, w...)
|
||||
@@ -57,31 +65,32 @@ func (fileHandler *FileResourceHandler) GetResources(frameworks []reporthandling
|
||||
// load resources from url
|
||||
w, err = loadResourcesFromUrl(fileHandler.inputPatterns)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, allResources, err
|
||||
}
|
||||
if w != nil {
|
||||
workloads = append(workloads, w...)
|
||||
}
|
||||
|
||||
if len(workloads) == 0 {
|
||||
return nil, fmt.Errorf("empty list of workloads - no workloads found")
|
||||
return nil, allResources, fmt.Errorf("empty list of workloads - no workloads found")
|
||||
}
|
||||
|
||||
// map all resources: map["/group/version/kind"][]<k8s workloads>
|
||||
allResources := mapResources(workloads)
|
||||
|
||||
// build resources map
|
||||
// map resources based on framework required resources: map["/group/version/kind"][]<k8s workloads>
|
||||
k8sResources := setResourceMap(frameworks) // TODO - support designators
|
||||
mappedResources := mapResources(workloads)
|
||||
|
||||
// save only relevant resources
|
||||
for i := range allResources {
|
||||
for i := range mappedResources {
|
||||
if _, ok := (*k8sResources)[i]; ok {
|
||||
(*k8sResources)[i] = allResources[i]
|
||||
ids := []string{}
|
||||
for j := range mappedResources[i] {
|
||||
ids = append(ids, mappedResources[i][j].GetID())
|
||||
allResources[mappedResources[i][j].GetID()] = mappedResources[i][j]
|
||||
}
|
||||
(*k8sResources)[i] = ids
|
||||
}
|
||||
}
|
||||
|
||||
return k8sResources, nil
|
||||
return k8sResources, allResources, nil
|
||||
|
||||
}
|
||||
|
||||
@@ -89,7 +98,7 @@ func (fileHandler *FileResourceHandler) GetClusterAPIServerInfo() *version.Info
|
||||
return nil
|
||||
}
|
||||
|
||||
func loadResourcesFromFiles(inputPatterns []string) ([]k8sinterface.IWorkload, error) {
|
||||
func loadResourcesFromFiles(inputPatterns []string) ([]workloadinterface.IMetadata, error) {
|
||||
files, errs := listFiles(inputPatterns)
|
||||
if len(errs) > 0 {
|
||||
cautils.ErrorDisplay(fmt.Sprintf("%v", errs)) // TODO - print error
|
||||
@@ -106,32 +115,36 @@ func loadResourcesFromFiles(inputPatterns []string) ([]k8sinterface.IWorkload, e
|
||||
}
|
||||
|
||||
// build resources map
|
||||
func mapResources(workloads []k8sinterface.IWorkload) map[string][]map[string]interface{} {
|
||||
allResources := map[string][]map[string]interface{}{}
|
||||
func mapResources(workloads []workloadinterface.IMetadata) map[string][]workloadinterface.IMetadata {
|
||||
|
||||
allResources := map[string][]workloadinterface.IMetadata{}
|
||||
for i := range workloads {
|
||||
groupVersionResource, err := k8sinterface.GetGroupVersionResource(workloads[i].GetKind())
|
||||
if err != nil {
|
||||
// TODO - print warning
|
||||
continue
|
||||
}
|
||||
if groupVersionResource.Group != workloads[i].GetGroup() || groupVersionResource.Version != workloads[i].GetVersion() {
|
||||
// TODO - print warning
|
||||
continue
|
||||
|
||||
if k8sinterface.IsTypeWorkload(workloads[i].GetObject()) {
|
||||
w := workloadinterface.NewWorkloadObj(workloads[i].GetObject())
|
||||
if groupVersionResource.Group != w.GetGroup() || groupVersionResource.Version != w.GetVersion() {
|
||||
// TODO - print warning
|
||||
continue
|
||||
}
|
||||
}
|
||||
resourceTriplets := k8sinterface.JoinResourceTriplets(groupVersionResource.Group, groupVersionResource.Version, groupVersionResource.Resource)
|
||||
if r, ok := allResources[resourceTriplets]; ok {
|
||||
r = append(r, workloads[i].GetWorkload())
|
||||
allResources[resourceTriplets] = r
|
||||
allResources[resourceTriplets] = append(r, workloads[i])
|
||||
} else {
|
||||
allResources[resourceTriplets] = []map[string]interface{}{workloads[i].GetWorkload()}
|
||||
allResources[resourceTriplets] = []workloadinterface.IMetadata{workloads[i]}
|
||||
}
|
||||
}
|
||||
return allResources
|
||||
|
||||
}
|
||||
|
||||
func loadFiles(filePaths []string) ([]k8sinterface.IWorkload, []error) {
|
||||
workloads := []k8sinterface.IWorkload{}
|
||||
func loadFiles(filePaths []string) ([]workloadinterface.IMetadata, []error) {
|
||||
workloads := []workloadinterface.IMetadata{}
|
||||
errs := []error{}
|
||||
for i := range filePaths {
|
||||
f, err := loadFile(filePaths[i])
|
||||
@@ -151,7 +164,7 @@ func loadFiles(filePaths []string) ([]k8sinterface.IWorkload, []error) {
|
||||
func loadFile(filePath string) ([]byte, error) {
|
||||
return os.ReadFile(filePath)
|
||||
}
|
||||
func readFile(fileContent []byte, fileFromat FileFormat) ([]k8sinterface.IWorkload, []error) {
|
||||
func readFile(fileContent []byte, fileFromat FileFormat) ([]workloadinterface.IMetadata, []error) {
|
||||
|
||||
switch fileFromat {
|
||||
case YAML_FILE_FORMAT:
|
||||
@@ -185,12 +198,12 @@ func listFiles(patterns []string) ([]string, []error) {
|
||||
return files, errs
|
||||
}
|
||||
|
||||
func readYamlFile(yamlFile []byte) ([]k8sinterface.IWorkload, []error) {
|
||||
func readYamlFile(yamlFile []byte) ([]workloadinterface.IMetadata, []error) {
|
||||
errs := []error{}
|
||||
|
||||
r := bytes.NewReader(yamlFile)
|
||||
dec := yaml.NewDecoder(r)
|
||||
yamlObjs := []k8sinterface.IWorkload{}
|
||||
yamlObjs := []workloadinterface.IMetadata{}
|
||||
|
||||
var t interface{}
|
||||
for dec.Decode(&t) == nil {
|
||||
@@ -199,7 +212,13 @@ func readYamlFile(yamlFile []byte) ([]k8sinterface.IWorkload, []error) {
|
||||
continue
|
||||
}
|
||||
if obj, ok := j.(map[string]interface{}); ok {
|
||||
yamlObjs = append(yamlObjs, workloadinterface.NewWorkloadObj(obj))
|
||||
if o := objectsenvelopes.NewObject(obj); o != nil {
|
||||
if o.GetKind() == "List" {
|
||||
yamlObjs = append(yamlObjs, handleListObject(o)...)
|
||||
} else {
|
||||
yamlObjs = append(yamlObjs, o)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
errs = append(errs, fmt.Errorf("failed to convert yaml file to map[string]interface, file content: %v", j))
|
||||
}
|
||||
@@ -208,8 +227,8 @@ func readYamlFile(yamlFile []byte) ([]k8sinterface.IWorkload, []error) {
|
||||
return yamlObjs, errs
|
||||
}
|
||||
|
||||
func readJsonFile(jsonFile []byte) ([]k8sinterface.IWorkload, []error) {
|
||||
workloads := []k8sinterface.IWorkload{}
|
||||
func readJsonFile(jsonFile []byte) ([]workloadinterface.IMetadata, []error) {
|
||||
workloads := []workloadinterface.IMetadata{}
|
||||
var jsonObj interface{}
|
||||
if err := json.Unmarshal(jsonFile, &jsonObj); err != nil {
|
||||
return workloads, []error{err}
|
||||
@@ -219,11 +238,13 @@ func readJsonFile(jsonFile []byte) ([]k8sinterface.IWorkload, []error) {
|
||||
|
||||
return workloads, nil
|
||||
}
|
||||
func convertJsonToWorkload(jsonObj interface{}, workloads *[]k8sinterface.IWorkload) {
|
||||
func convertJsonToWorkload(jsonObj interface{}, workloads *[]workloadinterface.IMetadata) {
|
||||
|
||||
switch x := jsonObj.(type) {
|
||||
case map[string]interface{}:
|
||||
(*workloads) = append(*workloads, workloadinterface.NewWorkloadObj(x))
|
||||
if o := objectsenvelopes.NewObject(x); o != nil {
|
||||
(*workloads) = append(*workloads, o)
|
||||
}
|
||||
case []interface{}:
|
||||
for i := range x {
|
||||
convertJsonToWorkload(x[i], workloads)
|
||||
@@ -286,3 +307,20 @@ func getFileFormat(filePath string) FileFormat {
|
||||
return FileFormat(filePath)
|
||||
}
|
||||
}
|
||||
|
||||
// handleListObject handle a List manifest
|
||||
func handleListObject(obj workloadinterface.IMetadata) []workloadinterface.IMetadata {
|
||||
yamlObjs := []workloadinterface.IMetadata{}
|
||||
if i, ok := workloadinterface.InspectMap(obj.GetObject(), "items"); ok && i != nil {
|
||||
if items, ok := i.([]interface{}); ok && items != nil {
|
||||
for item := range items {
|
||||
if m, ok := items[item].(map[string]interface{}); ok && m != nil {
|
||||
if o := objectsenvelopes.NewObject(m); o != nil {
|
||||
yamlObjs = append(yamlObjs, o)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return yamlObjs
|
||||
}
|
||||
|
||||
@@ -41,7 +41,7 @@ func TestLoadFile(t *testing.T) {
|
||||
t.Errorf("%v", err)
|
||||
}
|
||||
}
|
||||
func TestLoadResources(t *testing.T) {
|
||||
func TestMapResources(t *testing.T) {
|
||||
// policyHandler := &PolicyHandler{}
|
||||
// k8sResources, err := policyHandler.loadResources(opaSessionObj.Frameworks, scanInfo)
|
||||
// files, _ := listFiles([]string{onlineBoutiquePath()})
|
||||
|
||||
@@ -3,11 +3,17 @@ package resourcehandler
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/armosec/kubescape/cautils"
|
||||
"github.com/armosec/kubescape/hostsensorutils"
|
||||
"github.com/armosec/opa-utils/objectsenvelopes"
|
||||
"github.com/armosec/opa-utils/reporthandling"
|
||||
|
||||
"github.com/armosec/k8s-interface/cloudsupport"
|
||||
"github.com/armosec/k8s-interface/k8sinterface"
|
||||
"github.com/armosec/k8s-interface/workloadinterface"
|
||||
|
||||
"github.com/armosec/armoapi-go/armotypes"
|
||||
|
||||
@@ -20,45 +26,67 @@ import (
|
||||
)
|
||||
|
||||
type K8sResourceHandler struct {
|
||||
k8s *k8sinterface.KubernetesApi
|
||||
fieldSelector IFieldSelector
|
||||
k8s *k8sinterface.KubernetesApi
|
||||
hostSensorHandler hostsensorutils.IHostSensor
|
||||
fieldSelector IFieldSelector
|
||||
rbacObjectsAPI *cautils.RBACObjects
|
||||
}
|
||||
|
||||
func NewK8sResourceHandler(k8s *k8sinterface.KubernetesApi, fieldSelector IFieldSelector) *K8sResourceHandler {
|
||||
func NewK8sResourceHandler(k8s *k8sinterface.KubernetesApi, fieldSelector IFieldSelector, hostSensorHandler hostsensorutils.IHostSensor, rbacObjects *cautils.RBACObjects) *K8sResourceHandler {
|
||||
return &K8sResourceHandler{
|
||||
k8s: k8s,
|
||||
fieldSelector: fieldSelector,
|
||||
k8s: k8s,
|
||||
fieldSelector: fieldSelector,
|
||||
hostSensorHandler: hostSensorHandler,
|
||||
rbacObjectsAPI: rbacObjects,
|
||||
}
|
||||
}
|
||||
|
||||
func (k8sHandler *K8sResourceHandler) GetResources(frameworks []reporthandling.Framework, designator *armotypes.PortalDesignator) (*cautils.K8SResources, error) {
|
||||
func (k8sHandler *K8sResourceHandler) GetResources(frameworks []reporthandling.Framework, designator *armotypes.PortalDesignator) (*cautils.K8SResources, map[string]workloadinterface.IMetadata, error) {
|
||||
allResources := map[string]workloadinterface.IMetadata{}
|
||||
|
||||
// get k8s resources
|
||||
cautils.ProgressTextDisplay("Accessing Kubernetes objects")
|
||||
|
||||
cautils.StartSpinner()
|
||||
|
||||
// build resources map
|
||||
// map resources based on framework required resources: map["/group/version/kind"][]<k8s workloads ids>
|
||||
k8sResourcesMap := setResourceMap(frameworks)
|
||||
|
||||
// get namespace and labels from designator (ignore cluster labels)
|
||||
_, namespace, labels := armotypes.DigestPortalDesignator(designator)
|
||||
|
||||
// pull k8s recourses
|
||||
if err := k8sHandler.pullResources(k8sResourcesMap, namespace, labels); err != nil {
|
||||
return k8sResourcesMap, err
|
||||
if err := k8sHandler.pullResources(k8sResourcesMap, allResources, namespace, labels); err != nil {
|
||||
return k8sResourcesMap, allResources, err
|
||||
}
|
||||
if err := k8sHandler.collectHostResources(allResources, k8sResourcesMap); err != nil {
|
||||
cautils.WarningDisplay(os.Stderr, "Warning: failed to collect host sensor resources\n")
|
||||
}
|
||||
|
||||
cautils.SuccessTextDisplay("Accessed successfully to Kubernetes objects, let’s start!!!")
|
||||
return k8sResourcesMap, nil
|
||||
if err := k8sHandler.collectRbacResources(allResources); err != nil {
|
||||
cautils.WarningDisplay(os.Stderr, "Warning: failed to collect rbac resources\n")
|
||||
}
|
||||
if err := getCloudProviderDescription(allResources, k8sResourcesMap); err != nil {
|
||||
cautils.WarningDisplay(os.Stderr, fmt.Sprintf("Warning: %v\n", err.Error()))
|
||||
}
|
||||
|
||||
cautils.StopSpinner()
|
||||
|
||||
cautils.SuccessTextDisplay("Accessed successfully to Kubernetes objects")
|
||||
return k8sResourcesMap, allResources, nil
|
||||
}
|
||||
|
||||
func (k8sHandler *K8sResourceHandler) GetClusterAPIServerInfo() *version.Info {
|
||||
clusterAPIServerInfo, err := k8sHandler.k8s.KubernetesClient.Discovery().ServerVersion()
|
||||
clusterAPIServerInfo, err := k8sHandler.k8s.DiscoveryClient.ServerVersion()
|
||||
if err != nil {
|
||||
cautils.ErrorDisplay(fmt.Sprintf("Failed to discover API server information: %v", err))
|
||||
return nil
|
||||
}
|
||||
return clusterAPIServerInfo
|
||||
}
|
||||
func (k8sHandler *K8sResourceHandler) pullResources(k8sResources *cautils.K8SResources, namespace string, labels map[string]string) error {
|
||||
|
||||
func (k8sHandler *K8sResourceHandler) pullResources(k8sResources *cautils.K8SResources, allResources map[string]workloadinterface.IMetadata, namespace string, labels map[string]string) error {
|
||||
|
||||
var errs error
|
||||
for groupResource := range *k8sResources {
|
||||
@@ -66,46 +94,124 @@ func (k8sHandler *K8sResourceHandler) pullResources(k8sResources *cautils.K8SRes
|
||||
gvr := schema.GroupVersionResource{Group: apiGroup, Version: apiVersion, Resource: resource}
|
||||
result, err := k8sHandler.pullSingleResource(&gvr, namespace, labels)
|
||||
if err != nil {
|
||||
// handle error
|
||||
if errs == nil {
|
||||
errs = err
|
||||
} else {
|
||||
errs = fmt.Errorf("%s\n%s", errs, err.Error())
|
||||
if !strings.Contains(err.Error(), "the server could not find the requested resource") {
|
||||
// handle error
|
||||
if errs == nil {
|
||||
errs = err
|
||||
} else {
|
||||
errs = fmt.Errorf("%s\n%s", errs, err.Error())
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// store result as []map[string]interface{}
|
||||
(*k8sResources)[groupResource] = k8sinterface.ConvertUnstructuredSliceToMap(k8sinterface.FilterOutOwneredResources(result))
|
||||
continue
|
||||
}
|
||||
// store result as []map[string]interface{}
|
||||
metaObjs := ConvertMapListToMeta(k8sinterface.ConvertUnstructuredSliceToMap(result))
|
||||
for i := range metaObjs {
|
||||
allResources[metaObjs[i].GetID()] = metaObjs[i]
|
||||
}
|
||||
(*k8sResources)[groupResource] = workloadinterface.ListMetaIDs(metaObjs)
|
||||
}
|
||||
return errs
|
||||
}
|
||||
|
||||
func (k8sHandler *K8sResourceHandler) pullSingleResource(resource *schema.GroupVersionResource, namespace string, labels map[string]string) ([]unstructured.Unstructured, error) {
|
||||
|
||||
resourceList := []unstructured.Unstructured{}
|
||||
// set labels
|
||||
listOptions := metav1.ListOptions{}
|
||||
fieldSelectors := k8sHandler.fieldSelector.GetNamespacesSelectors(resource)
|
||||
for i := range fieldSelectors {
|
||||
|
||||
listOptions.FieldSelector += k8sHandler.fieldSelector.GetNamespacesSelector(resource)
|
||||
listOptions.FieldSelector = fieldSelectors[i]
|
||||
|
||||
if len(labels) > 0 {
|
||||
set := k8slabels.Set(labels)
|
||||
listOptions.LabelSelector = set.AsSelector().String()
|
||||
}
|
||||
|
||||
// set dynamic object
|
||||
var clientResource dynamic.ResourceInterface
|
||||
if namespace != "" && k8sinterface.IsNamespaceScope(resource) {
|
||||
clientResource = k8sHandler.k8s.DynamicClient.Resource(*resource).Namespace(namespace)
|
||||
} else {
|
||||
clientResource = k8sHandler.k8s.DynamicClient.Resource(*resource)
|
||||
}
|
||||
|
||||
// list resources
|
||||
result, err := clientResource.List(context.Background(), listOptions)
|
||||
if err != nil || result == nil {
|
||||
return nil, fmt.Errorf("failed to get resource: %v, namespace: %s, labelSelector: %v, reason: %v", resource, namespace, listOptions.LabelSelector, err)
|
||||
}
|
||||
|
||||
resourceList = append(resourceList, result.Items...)
|
||||
|
||||
if len(labels) > 0 {
|
||||
set := k8slabels.Set(labels)
|
||||
listOptions.LabelSelector = set.AsSelector().String()
|
||||
}
|
||||
|
||||
// set dynamic object
|
||||
var clientResource dynamic.ResourceInterface
|
||||
if namespace != "" && k8sinterface.IsNamespaceScope(resource.Group, resource.Resource) {
|
||||
clientResource = k8sHandler.k8s.DynamicClient.Resource(*resource).Namespace(namespace)
|
||||
} else {
|
||||
clientResource = k8sHandler.k8s.DynamicClient.Resource(*resource)
|
||||
}
|
||||
|
||||
// list resources
|
||||
result, err := clientResource.List(context.Background(), listOptions)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get resource: %v, namespace: %s, labelSelector: %v, reason: %s", resource, namespace, listOptions.LabelSelector, err.Error())
|
||||
}
|
||||
|
||||
return result.Items, nil
|
||||
return resourceList, nil
|
||||
|
||||
}
|
||||
func ConvertMapListToMeta(resourceMap []map[string]interface{}) []workloadinterface.IMetadata {
|
||||
workloads := []workloadinterface.IMetadata{}
|
||||
for i := range resourceMap {
|
||||
if w := objectsenvelopes.NewObject(resourceMap[i]); w != nil {
|
||||
workloads = append(workloads, w)
|
||||
}
|
||||
}
|
||||
return workloads
|
||||
}
|
||||
|
||||
func (k8sHandler *K8sResourceHandler) collectHostResources(allResources map[string]workloadinterface.IMetadata, resourcesMap *cautils.K8SResources) error {
|
||||
hostResources, err := k8sHandler.hostSensorHandler.CollectResources()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for rscIdx := range hostResources {
|
||||
group, version := getGroupNVersion(hostResources[rscIdx].GetApiVersion())
|
||||
groupResource := k8sinterface.JoinResourceTriplets(group, version, hostResources[rscIdx].GetKind())
|
||||
allResources[hostResources[rscIdx].GetID()] = &hostResources[rscIdx]
|
||||
|
||||
grpResourceList, ok := (*resourcesMap)[groupResource]
|
||||
if !ok {
|
||||
grpResourceList = make([]string, 0)
|
||||
}
|
||||
(*resourcesMap)[groupResource] = append(grpResourceList, hostResources[rscIdx].GetID())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (k8sHandler *K8sResourceHandler) collectRbacResources(allResources map[string]workloadinterface.IMetadata) error {
|
||||
if k8sHandler.rbacObjectsAPI == nil {
|
||||
return nil
|
||||
}
|
||||
allRbacResources, err := k8sHandler.rbacObjectsAPI.ListAllResources()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for k, v := range allRbacResources {
|
||||
allResources[k] = v
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func getCloudProviderDescription(allResources map[string]workloadinterface.IMetadata, k8sResourcesMap *cautils.K8SResources) error {
|
||||
if cloudsupport.IsRunningInCloudProvider() {
|
||||
wl, err := cloudsupport.GetDescriptiveInfoFromCloudProvider()
|
||||
if err != nil {
|
||||
cluster := k8sinterface.GetCurrentContext().Cluster
|
||||
provider := cloudsupport.GetCloudProvider(cluster)
|
||||
// Return error with useful info on how to configure credentials for getting cloud provider info
|
||||
switch provider {
|
||||
case "gke":
|
||||
return fmt.Errorf("could not get descriptive information about gke cluster: %s using sdk client. See https://developers.google.com/accounts/docs/application-default-credentials for more information", cluster)
|
||||
case "eks":
|
||||
return fmt.Errorf("could not get descriptive information about eks cluster: %s using sdk client. Check out how to configure credentials in https://docs.aws.amazon.com/sdk-for-go/api/", cluster)
|
||||
case "aks":
|
||||
return fmt.Errorf("could not get descriptive information about aks cluster: %s. %v", cluster, err.Error())
|
||||
}
|
||||
return err
|
||||
}
|
||||
allResources[wl.GetID()] = wl
|
||||
(*k8sResourcesMap)[fmt.Sprintf("%s/%s", wl.GetApiVersion(), wl.GetKind())] = []string{wl.GetID()}
|
||||
}
|
||||
return nil
|
||||
|
||||
}
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
package resourcehandler
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/armosec/kubescape/cautils"
|
||||
"github.com/armosec/opa-utils/reporthandling"
|
||||
|
||||
@@ -66,3 +68,15 @@ func insertK8sResources(k8sResources map[string]map[string]map[string]interface{
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func getGroupNVersion(apiVersion string) (string, string) {
|
||||
gv := strings.Split(apiVersion, "/")
|
||||
group, version := "", ""
|
||||
if len(gv) >= 1 {
|
||||
group = gv[0]
|
||||
}
|
||||
if len(gv) >= 2 {
|
||||
version = gv[1]
|
||||
}
|
||||
return group, version
|
||||
}
|
||||
|
||||
@@ -11,6 +11,7 @@ func TestGetK8sResources(t *testing.T) {
|
||||
// getK8sResources
|
||||
}
|
||||
func TestSetResourceMap(t *testing.T) {
|
||||
k8sinterface.InitializeMapResourcesMock()
|
||||
framework := reporthandling.MockFrameworkA()
|
||||
k8sResources := setResourceMap([]reporthandling.Framework{*framework})
|
||||
resources := k8sinterface.ResourceGroupToString("*", "v1", "Pod")
|
||||
|
||||
@@ -2,12 +2,13 @@ package resourcehandler
|
||||
|
||||
import (
|
||||
"github.com/armosec/armoapi-go/armotypes"
|
||||
"github.com/armosec/k8s-interface/workloadinterface"
|
||||
"github.com/armosec/kubescape/cautils"
|
||||
"github.com/armosec/opa-utils/reporthandling"
|
||||
"k8s.io/apimachinery/pkg/version"
|
||||
)
|
||||
|
||||
type IResourceHandler interface {
|
||||
GetResources(frameworks []reporthandling.Framework, designator *armotypes.PortalDesignator) (*cautils.K8SResources, error)
|
||||
GetResources([]reporthandling.Framework, *armotypes.PortalDesignator) (*cautils.K8SResources, map[string]workloadinterface.IMetadata, error)
|
||||
GetClusterAPIServerInfo() *version.Info
|
||||
}
|
||||
|
||||
@@ -7,11 +7,11 @@ import (
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"github.com/armosec/k8s-interface/k8sinterface"
|
||||
"github.com/armosec/k8s-interface/workloadinterface"
|
||||
"github.com/armosec/kubescape/cautils"
|
||||
)
|
||||
|
||||
func loadResourcesFromUrl(inputPatterns []string) ([]k8sinterface.IWorkload, error) {
|
||||
func loadResourcesFromUrl(inputPatterns []string) ([]workloadinterface.IMetadata, error) {
|
||||
urls := listUrls(inputPatterns)
|
||||
if len(urls) == 0 {
|
||||
return nil, nil
|
||||
@@ -43,8 +43,8 @@ func listUrls(patterns []string) []string {
|
||||
return urls
|
||||
}
|
||||
|
||||
func downloadFiles(urls []string) ([]k8sinterface.IWorkload, []error) {
|
||||
workloads := []k8sinterface.IWorkload{}
|
||||
func downloadFiles(urls []string) ([]workloadinterface.IMetadata, []error) {
|
||||
workloads := []workloadinterface.IMetadata{}
|
||||
errs := []error{}
|
||||
for i := range urls {
|
||||
f, err := downloadFile(urls[i])
|
||||
|
||||
@@ -1,239 +0,0 @@
|
||||
package printer
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"sort"
|
||||
|
||||
"github.com/armosec/kubescape/cautils"
|
||||
"github.com/armosec/opa-utils/reporthandling"
|
||||
"github.com/enescakir/emoji"
|
||||
"github.com/olekukonko/tablewriter"
|
||||
)
|
||||
|
||||
type PrettyPrinter struct {
|
||||
writer *os.File
|
||||
summary Summary
|
||||
sortedControlNames []string
|
||||
frameworkSummary ControlSummary
|
||||
}
|
||||
|
||||
func NewPrettyPrinter() *PrettyPrinter {
|
||||
return &PrettyPrinter{
|
||||
summary: NewSummary(),
|
||||
}
|
||||
}
|
||||
|
||||
// Initializes empty printer for new table
|
||||
func (printer *PrettyPrinter) init() *PrettyPrinter {
|
||||
printer.frameworkSummary = ControlSummary{}
|
||||
printer.summary = Summary{}
|
||||
printer.sortedControlNames = []string{}
|
||||
return printer
|
||||
}
|
||||
|
||||
func (printer *PrettyPrinter) ActionPrint(opaSessionObj *cautils.OPASessionObj) {
|
||||
// score := calculatePostureScore(opaSessionObj.PostureReport)
|
||||
for _, report := range opaSessionObj.PostureReport.FrameworkReports {
|
||||
// Print summary table together for control scan
|
||||
if report.Name != "" {
|
||||
printer = printer.init()
|
||||
}
|
||||
printer.summarySetup(report)
|
||||
printer.printResults()
|
||||
printer.printSummaryTable(report.Name)
|
||||
}
|
||||
|
||||
// return score
|
||||
}
|
||||
|
||||
func (printer *PrettyPrinter) SetWriter(outputFile string) {
|
||||
printer.writer = getWriter(outputFile)
|
||||
}
|
||||
|
||||
func (printer *PrettyPrinter) Score(score float32) {
|
||||
}
|
||||
|
||||
func (printer *PrettyPrinter) summarySetup(fr reporthandling.FrameworkReport) {
|
||||
printer.frameworkSummary = ControlSummary{
|
||||
TotalResources: fr.GetNumberOfResources(),
|
||||
TotalFailed: fr.GetNumberOfFailedResources(),
|
||||
TotalWarning: fr.GetNumberOfWarningResources(),
|
||||
}
|
||||
for _, cr := range fr.ControlReports {
|
||||
if len(cr.RuleReports) == 0 {
|
||||
continue
|
||||
}
|
||||
workloadsSummary := listResultSummary(cr.RuleReports)
|
||||
|
||||
printer.summary[cr.Name] = ControlSummary{
|
||||
TotalResources: cr.GetNumberOfResources(),
|
||||
TotalFailed: cr.GetNumberOfFailedResources(),
|
||||
TotalWarning: cr.GetNumberOfWarningResources(),
|
||||
FailedWorkloads: groupByNamespace(workloadsSummary, workloadSummaryFailed),
|
||||
ExcludedWorkloads: groupByNamespace(workloadsSummary, workloadSummaryExclude),
|
||||
Description: cr.Description,
|
||||
Remediation: cr.Remediation,
|
||||
ListInputKinds: cr.ListControlsInputKinds(),
|
||||
}
|
||||
}
|
||||
printer.sortedControlNames = printer.getSortedControlsNames()
|
||||
}
|
||||
func (printer *PrettyPrinter) printResults() {
|
||||
for i := 0; i < len(printer.sortedControlNames); i++ {
|
||||
controlSummary := printer.summary[printer.sortedControlNames[i]]
|
||||
printer.printTitle(printer.sortedControlNames[i], &controlSummary)
|
||||
printer.printResources(&controlSummary)
|
||||
if printer.summary[printer.sortedControlNames[i]].TotalResources > 0 {
|
||||
printer.printSummary(printer.sortedControlNames[i], &controlSummary)
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
func (printer *PrettyPrinter) printSummary(controlName string, controlSummary *ControlSummary) {
|
||||
cautils.SimpleDisplay(printer.writer, "Summary - ")
|
||||
cautils.SuccessDisplay(printer.writer, "Passed:%v ", controlSummary.TotalResources-controlSummary.TotalFailed-controlSummary.TotalWarning)
|
||||
cautils.WarningDisplay(printer.writer, "Excluded:%v ", controlSummary.TotalWarning)
|
||||
cautils.FailureDisplay(printer.writer, "Failed:%v ", controlSummary.TotalFailed)
|
||||
cautils.InfoDisplay(printer.writer, "Total:%v\n", controlSummary.TotalResources)
|
||||
if controlSummary.TotalFailed > 0 {
|
||||
cautils.DescriptionDisplay(printer.writer, "Remediation: %v\n", controlSummary.Remediation)
|
||||
}
|
||||
cautils.DescriptionDisplay(printer.writer, "\n")
|
||||
|
||||
}
|
||||
|
||||
func (printer *PrettyPrinter) printTitle(controlName string, controlSummary *ControlSummary) {
|
||||
cautils.InfoDisplay(printer.writer, "[control: %s] ", controlName)
|
||||
if controlSummary.TotalResources == 0 {
|
||||
cautils.InfoDisplay(printer.writer, "resources not found %v\n", emoji.ConfusedFace)
|
||||
} else if controlSummary.TotalFailed != 0 {
|
||||
cautils.FailureDisplay(printer.writer, "failed %v\n", emoji.SadButRelievedFace)
|
||||
} else if controlSummary.TotalWarning != 0 {
|
||||
cautils.WarningDisplay(printer.writer, "excluded %v\n", emoji.NeutralFace)
|
||||
} else {
|
||||
cautils.SuccessDisplay(printer.writer, "passed %v\n", emoji.ThumbsUp)
|
||||
}
|
||||
|
||||
cautils.DescriptionDisplay(printer.writer, "Description: %s\n", controlSummary.Description)
|
||||
|
||||
}
|
||||
func (printer *PrettyPrinter) printResources(controlSummary *ControlSummary) {
|
||||
|
||||
if len(controlSummary.FailedWorkloads) > 0 {
|
||||
cautils.FailureDisplay(printer.writer, "Failed:\n")
|
||||
printer.printGroupedResources(controlSummary.FailedWorkloads)
|
||||
}
|
||||
if len(controlSummary.ExcludedWorkloads) > 0 {
|
||||
cautils.WarningDisplay(printer.writer, "Excluded:\n")
|
||||
printer.printGroupedResources(controlSummary.ExcludedWorkloads)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func (printer *PrettyPrinter) printGroupedResources(workloads map[string][]WorkloadSummary) {
|
||||
|
||||
indent := INDENT
|
||||
|
||||
for ns, rsc := range workloads {
|
||||
preIndent := indent
|
||||
if ns != "" {
|
||||
cautils.SimpleDisplay(printer.writer, "%sNamespace %s\n", indent, ns)
|
||||
}
|
||||
preIndent2 := indent
|
||||
for r := range rsc {
|
||||
indent += indent
|
||||
cautils.SimpleDisplay(printer.writer, fmt.Sprintf("%s%s - %s\n", indent, rsc[r].Kind, rsc[r].Name))
|
||||
indent = preIndent2
|
||||
}
|
||||
indent = preIndent
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func generateRow(control string, cs ControlSummary) []string {
|
||||
row := []string{control}
|
||||
row = append(row, cs.ToSlice()...)
|
||||
if cs.TotalResources != 0 {
|
||||
row = append(row, fmt.Sprintf("%d%s", percentage(cs.TotalResources, cs.TotalFailed), "%"))
|
||||
} else {
|
||||
row = append(row, EmptyPercentage)
|
||||
}
|
||||
return row
|
||||
}
|
||||
|
||||
func generateHeader() []string {
|
||||
return []string{"Control Name", "Failed Resources", "Excluded Resources", "All Resources", "% success"}
|
||||
}
|
||||
|
||||
func percentage(big, small int) int {
|
||||
if big == 0 {
|
||||
if small == 0 {
|
||||
return 100
|
||||
}
|
||||
return 0
|
||||
}
|
||||
return int(float64(float64(big-small)/float64(big)) * 100)
|
||||
}
|
||||
func generateFooter(numControlers, sumFailed, sumWarning, sumTotal int) []string {
|
||||
// Control name | # failed resources | all resources | % success
|
||||
row := []string{}
|
||||
row = append(row, "Resource Summary") //fmt.Sprintf(""%d", numControlers"))
|
||||
row = append(row, fmt.Sprintf("%d", sumFailed))
|
||||
row = append(row, fmt.Sprintf("%d", sumWarning))
|
||||
row = append(row, fmt.Sprintf("%d", sumTotal))
|
||||
if sumTotal != 0 {
|
||||
row = append(row, fmt.Sprintf("%d%s", percentage(sumTotal, sumFailed), "%"))
|
||||
} else {
|
||||
row = append(row, EmptyPercentage)
|
||||
}
|
||||
return row
|
||||
}
|
||||
func (printer *PrettyPrinter) printSummaryTable(framework string) {
|
||||
// For control scan framework will be nil
|
||||
printer.printFramework(framework)
|
||||
|
||||
summaryTable := tablewriter.NewWriter(printer.writer)
|
||||
summaryTable.SetAutoWrapText(false)
|
||||
summaryTable.SetHeader(generateHeader())
|
||||
summaryTable.SetHeaderLine(true)
|
||||
alignments := []int{tablewriter.ALIGN_LEFT, tablewriter.ALIGN_CENTER, tablewriter.ALIGN_CENTER, tablewriter.ALIGN_CENTER, tablewriter.ALIGN_CENTER}
|
||||
summaryTable.SetColumnAlignment(alignments)
|
||||
|
||||
for i := 0; i < len(printer.sortedControlNames); i++ {
|
||||
controlSummary := printer.summary[printer.sortedControlNames[i]]
|
||||
summaryTable.Append(generateRow(printer.sortedControlNames[i], controlSummary))
|
||||
}
|
||||
summaryTable.SetFooter(generateFooter(len(printer.summary), printer.frameworkSummary.TotalFailed, printer.frameworkSummary.TotalWarning, printer.frameworkSummary.TotalResources))
|
||||
summaryTable.Render()
|
||||
}
|
||||
|
||||
func (printer *PrettyPrinter) printFramework(framework string) {
|
||||
if framework != "" {
|
||||
cautils.InfoTextDisplay(printer.writer, fmt.Sprintf("%s FRAMEWORK\n", framework))
|
||||
}
|
||||
}
|
||||
|
||||
func (printer *PrettyPrinter) getSortedControlsNames() []string {
|
||||
controlNames := make([]string, 0, len(printer.summary))
|
||||
for k := range printer.summary {
|
||||
controlNames = append(controlNames, k)
|
||||
}
|
||||
sort.Strings(controlNames)
|
||||
return controlNames
|
||||
}
|
||||
|
||||
func getWriter(outputFile string) *os.File {
|
||||
os.Remove(outputFile)
|
||||
if outputFile != "" {
|
||||
f, err := os.OpenFile(outputFile, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
|
||||
if err != nil {
|
||||
fmt.Println("Error opening file")
|
||||
return os.Stdout
|
||||
}
|
||||
return f
|
||||
}
|
||||
return os.Stdout
|
||||
|
||||
}
|
||||
@@ -1,18 +1,19 @@
|
||||
package printer
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/armosec/kubescape/cautils"
|
||||
)
|
||||
|
||||
var INDENT = " "
|
||||
|
||||
const EmptyPercentage = "NaN"
|
||||
|
||||
const (
|
||||
PrettyFormat string = "pretty-printer"
|
||||
JsonFormat string = "json"
|
||||
JunitResultFormat string = "junit"
|
||||
PrometheusFormat string = "prometheus"
|
||||
PrettyFormat string = "pretty-printer"
|
||||
JsonFormat string = "json"
|
||||
JunitResultFormat string = "junit"
|
||||
PrometheusFormat string = "prometheus"
|
||||
)
|
||||
|
||||
type IPrinter interface {
|
||||
@@ -21,15 +22,16 @@ type IPrinter interface {
|
||||
Score(score float32)
|
||||
}
|
||||
|
||||
func GetPrinter(printFormat string) IPrinter {
|
||||
switch printFormat {
|
||||
case JsonFormat:
|
||||
return NewJsonPrinter()
|
||||
case JunitResultFormat:
|
||||
return NewJunitPrinter()
|
||||
case PrometheusFormat:
|
||||
return NewPrometheusPrinter()
|
||||
default:
|
||||
return NewPrettyPrinter()
|
||||
func GetWriter(outputFile string) *os.File {
|
||||
os.Remove(outputFile)
|
||||
if outputFile != "" {
|
||||
f, err := os.OpenFile(outputFile, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
|
||||
if err != nil {
|
||||
fmt.Println("failed to open file for writing, reason: ", err.Error())
|
||||
return os.Stdout
|
||||
}
|
||||
return f
|
||||
}
|
||||
return os.Stdout
|
||||
|
||||
}
|
||||
|
||||
@@ -1,100 +0,0 @@
|
||||
package printer
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/armosec/kubescape/cautils"
|
||||
"github.com/armosec/opa-utils/reporthandling"
|
||||
)
|
||||
|
||||
type PrometheusPrinter struct {
|
||||
writer *os.File
|
||||
}
|
||||
|
||||
func NewPrometheusPrinter() *PrometheusPrinter {
|
||||
return &PrometheusPrinter{}
|
||||
}
|
||||
|
||||
func (prometheusPrinter *PrometheusPrinter) SetWriter(outputFile string) {
|
||||
prometheusPrinter.writer = getWriter(outputFile)
|
||||
}
|
||||
|
||||
func (prometheusPrinter *PrometheusPrinter) Score(score float32) {
|
||||
fmt.Printf("\n# Overall score out of 100\nkubescape_score %f\n", score*100)
|
||||
}
|
||||
|
||||
func (printer *PrometheusPrinter) printDetails(details []reporthandling.RuleResponse, frameworkName string, controlName string) error {
|
||||
objs := make(map[string]map[string]map[string]int)
|
||||
for _, ruleResponses := range details {
|
||||
for _, k8sObj := range ruleResponses.AlertObject.K8SApiObjects {
|
||||
kind, ok := k8sObj[`kind`].(string)
|
||||
if (!ok) {
|
||||
return errors.New("Found object with non string kind")
|
||||
}
|
||||
apiVersion,ok := k8sObj[`apiVersion`].(string)
|
||||
if (!ok) {
|
||||
return errors.New("Found object with non string apiVersion")
|
||||
}
|
||||
gvk := fmt.Sprintf("%s/%s",apiVersion,kind)
|
||||
metadata,ok := k8sObj[`metadata`].(map[string]interface{})
|
||||
if (!ok) {
|
||||
return errors.New("Found object with non convertable metadata")
|
||||
}
|
||||
name,ok := metadata[`name`].(string)
|
||||
if (!ok) {
|
||||
return errors.New("Found metadata with non string name")
|
||||
}
|
||||
namespace,ok := metadata[`namespace`].(string)
|
||||
if (!ok) {
|
||||
namespace = ""
|
||||
}
|
||||
if (objs[gvk] == nil) {
|
||||
objs[gvk] = make(map[string]map[string]int)
|
||||
}
|
||||
if (objs[gvk][namespace] == nil) {
|
||||
objs[gvk][namespace] = make(map[string]int)
|
||||
}
|
||||
objs[gvk][namespace][name]++
|
||||
}
|
||||
}
|
||||
for gvk, namespaces := range objs {
|
||||
for namespace, names := range namespaces {
|
||||
for name, value := range names {
|
||||
fmt.Fprintf(printer.writer, "# Failed object from %s control %s\n", frameworkName, controlName)
|
||||
if namespace != "" {
|
||||
fmt.Fprintf(printer.writer, "kubescape_object_failed_count{framework=\"%s\",control=\"%s\",namespace=\"%s\",name=\"%s\",groupVersionKind=\"%s\"} %d\n", frameworkName, controlName, namespace, name, gvk, value)
|
||||
} else {
|
||||
fmt.Fprintf(printer.writer, "kubescape_object_failed_count{framework=\"%s\",control=\"%s\",name=\"%s\",groupVersionKind=\"%s\"} %d\n", frameworkName, controlName, name, gvk, value)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (printer *PrometheusPrinter) printReports(frameworks []reporthandling.FrameworkReport) error {
|
||||
for _, framework := range frameworks {
|
||||
for _, controlReports := range framework.ControlReports {
|
||||
if len(controlReports.RuleReports[0].RuleResponses) > 0 {
|
||||
fmt.Fprintf(printer.writer, "# Number of resources found as part of %s control %s\nkubescape_resources_found_count{framework=\"%s\",control=\"%s\"} %d\n", framework.Name, controlReports.Name, framework.Name, controlReports.Name, controlReports.GetNumberOfResources())
|
||||
fmt.Fprintf(printer.writer, "# Number of resources excluded as part of %s control %s\nkubescape_resources_excluded_count{framework=\"%s\",control=\"%s\"} %d\n", framework.Name, controlReports.Name, framework.Name, controlReports.Name, controlReports.GetNumberOfWarningResources())
|
||||
fmt.Fprintf(printer.writer, "# Number of resources failed as part of %s control %s\nkubescape_resources_failed_count{framework=\"%s\",control=\"%s\"} %d\n", framework.Name, controlReports.Name, framework.Name, controlReports.Name, controlReports.GetNumberOfFailedResources())
|
||||
err := printer.printDetails(controlReports.RuleReports[0].RuleResponses, framework.Name, controlReports.Name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (printer *PrometheusPrinter) ActionPrint(opaSessionObj *cautils.OPASessionObj) {
|
||||
err := printer.printReports(opaSessionObj.PostureReport.FrameworkReports)
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
@@ -1,75 +0,0 @@
|
||||
package printer
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/armosec/k8s-interface/workloadinterface"
|
||||
"github.com/armosec/opa-utils/reporthandling"
|
||||
)
|
||||
|
||||
// Group workloads by namespace - return {"namespace": <[]WorkloadSummary>}
|
||||
func groupByNamespace(resources []WorkloadSummary, status func(workloadSummary *WorkloadSummary) bool) map[string][]WorkloadSummary {
|
||||
mapResources := make(map[string][]WorkloadSummary)
|
||||
for i := range resources {
|
||||
if status(&resources[i]) {
|
||||
if r, ok := mapResources[resources[i].Namespace]; ok {
|
||||
r = append(r, resources[i])
|
||||
mapResources[resources[i].Namespace] = r
|
||||
} else {
|
||||
mapResources[resources[i].Namespace] = []WorkloadSummary{resources[i]}
|
||||
}
|
||||
}
|
||||
}
|
||||
return mapResources
|
||||
}
|
||||
func listResultSummary(ruleReports []reporthandling.RuleReport) []WorkloadSummary {
|
||||
workloadsSummary := []WorkloadSummary{}
|
||||
track := map[string]bool{}
|
||||
|
||||
for c := range ruleReports {
|
||||
for _, ruleReport := range ruleReports[c].RuleResponses {
|
||||
resource, err := ruleResultSummary(ruleReport.AlertObject)
|
||||
if err != nil {
|
||||
fmt.Println(err.Error())
|
||||
continue
|
||||
}
|
||||
|
||||
// add resource only once
|
||||
for i := range resource {
|
||||
resource[i].Exception = ruleReport.Exception
|
||||
if ok := track[resource[i].ToString()]; !ok {
|
||||
track[resource[i].ToString()] = true
|
||||
workloadsSummary = append(workloadsSummary, resource[i])
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return workloadsSummary
|
||||
}
|
||||
func ruleResultSummary(obj reporthandling.AlertObject) ([]WorkloadSummary, error) {
|
||||
resource := []WorkloadSummary{}
|
||||
|
||||
for i := range obj.K8SApiObjects {
|
||||
r, err := newWorkloadSummary(obj.K8SApiObjects[i])
|
||||
if err != nil {
|
||||
return resource, err
|
||||
}
|
||||
|
||||
resource = append(resource, *r)
|
||||
}
|
||||
|
||||
return resource, nil
|
||||
}
|
||||
|
||||
func newWorkloadSummary(obj map[string]interface{}) (*WorkloadSummary, error) {
|
||||
r := &WorkloadSummary{}
|
||||
|
||||
workload := workloadinterface.NewWorkloadObj(obj)
|
||||
if workload == nil {
|
||||
return r, fmt.Errorf("expecting k8s API object")
|
||||
}
|
||||
r.Kind = workload.GetKind()
|
||||
r.Namespace = workload.GetNamespace()
|
||||
r.Name = workload.GetName()
|
||||
return r, nil
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
package printer
|
||||
package v1
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"os"
|
||||
|
||||
"github.com/armosec/kubescape/cautils"
|
||||
"github.com/armosec/kubescape/resultshandling/printer"
|
||||
)
|
||||
|
||||
type JsonPrinter struct {
|
||||
@@ -17,14 +18,16 @@ func NewJsonPrinter() *JsonPrinter {
|
||||
}
|
||||
|
||||
func (jsonPrinter *JsonPrinter) SetWriter(outputFile string) {
|
||||
jsonPrinter.writer = getWriter(outputFile)
|
||||
jsonPrinter.writer = printer.GetWriter(outputFile)
|
||||
}
|
||||
|
||||
func (jsonPrinter *JsonPrinter) Score(score float32) {
|
||||
fmt.Printf("\nFinal score: %d", int(score*100))
|
||||
fmt.Fprintf(os.Stderr, "\nOverall risk-score (0- Excellent, 100- All failed): %d\n", int(score))
|
||||
}
|
||||
|
||||
func (jsonPrinter *JsonPrinter) ActionPrint(opaSessionObj *cautils.OPASessionObj) {
|
||||
cautils.ReportV2ToV1(opaSessionObj)
|
||||
|
||||
var postureReportStr []byte
|
||||
var err error
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
package printer
|
||||
package v1
|
||||
|
||||
import (
|
||||
"encoding/xml"
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"os"
|
||||
|
||||
"github.com/armosec/kubescape/cautils"
|
||||
"github.com/armosec/kubescape/resultshandling/printer"
|
||||
"github.com/armosec/opa-utils/reporthandling"
|
||||
)
|
||||
|
||||
@@ -18,14 +19,16 @@ func NewJunitPrinter() *JunitPrinter {
|
||||
}
|
||||
|
||||
func (junitPrinter *JunitPrinter) SetWriter(outputFile string) {
|
||||
junitPrinter.writer = getWriter(outputFile)
|
||||
junitPrinter.writer = printer.GetWriter(outputFile)
|
||||
}
|
||||
|
||||
func (junitPrinter *JunitPrinter) Score(score float32) {
|
||||
fmt.Printf("\nFinal score: %d", int(score*100))
|
||||
fmt.Fprintf(os.Stderr, "\nOverall risk-score (0- Excellent, 100- All failed): %d\n", int(score))
|
||||
}
|
||||
|
||||
func (junitPrinter *JunitPrinter) ActionPrint(opaSessionObj *cautils.OPASessionObj) {
|
||||
cautils.ReportV2ToV1(opaSessionObj)
|
||||
|
||||
junitResult, err := convertPostureReportToJunitResult(opaSessionObj.PostureReport)
|
||||
if err != nil {
|
||||
fmt.Println("Failed to convert posture report object!")
|
||||
@@ -103,7 +106,7 @@ func convertPostureReportToJunitResult(postureResult *reporthandling.PostureRepo
|
||||
testCase := JUnitTestCase{}
|
||||
testCase.Name = controlReports.Name
|
||||
testCase.Classname = "Kubescape"
|
||||
testCase.Time = "0"
|
||||
testCase.Time = postureResult.ReportGenerationTime.String()
|
||||
if 0 < len(controlReports.RuleReports[0].RuleResponses) {
|
||||
|
||||
testCase.Resources = controlReports.GetNumberOfResources()
|
||||
273
resultshandling/printer/v1/prettyprinter.go
Normal file
273
resultshandling/printer/v1/prettyprinter.go
Normal file
@@ -0,0 +1,273 @@
|
||||
package v1
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/armosec/k8s-interface/workloadinterface"
|
||||
"github.com/armosec/kubescape/cautils"
|
||||
"github.com/armosec/kubescape/resultshandling/printer"
|
||||
"github.com/armosec/opa-utils/objectsenvelopes"
|
||||
"github.com/armosec/opa-utils/reporthandling"
|
||||
"github.com/enescakir/emoji"
|
||||
"github.com/olekukonko/tablewriter"
|
||||
)
|
||||
|
||||
type PrettyPrinter struct {
|
||||
writer *os.File
|
||||
summary Summary
|
||||
verboseMode bool
|
||||
sortedControlNames []string
|
||||
frameworkSummary ResultSummary
|
||||
}
|
||||
|
||||
func NewPrettyPrinter(verboseMode bool) *PrettyPrinter {
|
||||
return &PrettyPrinter{
|
||||
verboseMode: verboseMode,
|
||||
summary: NewSummary(),
|
||||
}
|
||||
}
|
||||
|
||||
func (prettyPrinter *PrettyPrinter) ActionPrint(opaSessionObj *cautils.OPASessionObj) {
|
||||
cautils.ReportV2ToV1(opaSessionObj)
|
||||
|
||||
// score := calculatePostureScore(opaSessionObj.PostureReport)
|
||||
failedResources := []string{}
|
||||
warningResources := []string{}
|
||||
allResources := []string{}
|
||||
frameworkNames := []string{}
|
||||
frameworkScores := []float32{}
|
||||
|
||||
var overallRiskScore float32 = 0
|
||||
for _, frameworkReport := range opaSessionObj.PostureReport.FrameworkReports {
|
||||
frameworkNames = append(frameworkNames, frameworkReport.Name)
|
||||
frameworkScores = append(frameworkScores, frameworkReport.Score)
|
||||
failedResources = reporthandling.GetUniqueResourcesIDs(append(failedResources, frameworkReport.ListResourcesIDs().GetFailedResources()...))
|
||||
warningResources = reporthandling.GetUniqueResourcesIDs(append(warningResources, frameworkReport.ListResourcesIDs().GetWarningResources()...))
|
||||
allResources = reporthandling.GetUniqueResourcesIDs(append(allResources, frameworkReport.ListResourcesIDs().GetAllResources()...))
|
||||
prettyPrinter.summarySetup(frameworkReport, opaSessionObj.AllResources)
|
||||
overallRiskScore += frameworkReport.Score
|
||||
}
|
||||
|
||||
overallRiskScore /= float32(len(opaSessionObj.PostureReport.FrameworkReports))
|
||||
|
||||
prettyPrinter.frameworkSummary = ResultSummary{
|
||||
RiskScore: overallRiskScore,
|
||||
TotalResources: len(allResources),
|
||||
TotalFailed: len(failedResources),
|
||||
TotalWarning: len(warningResources),
|
||||
}
|
||||
|
||||
prettyPrinter.printResults()
|
||||
prettyPrinter.printSummaryTable(frameworkNames, frameworkScores)
|
||||
|
||||
}
|
||||
|
||||
func (prettyPrinter *PrettyPrinter) SetWriter(outputFile string) {
|
||||
prettyPrinter.writer = printer.GetWriter(outputFile)
|
||||
}
|
||||
|
||||
func (prettyPrinter *PrettyPrinter) Score(score float32) {
|
||||
}
|
||||
|
||||
func (prettyPrinter *PrettyPrinter) summarySetup(fr reporthandling.FrameworkReport, allResources map[string]workloadinterface.IMetadata) {
|
||||
|
||||
for _, cr := range fr.ControlReports {
|
||||
// if len(cr.RuleReports) == 0 {
|
||||
// continue
|
||||
// }
|
||||
workloadsSummary := listResultSummary(cr.RuleReports, allResources)
|
||||
|
||||
var passedWorkloads map[string][]WorkloadSummary
|
||||
if prettyPrinter.verboseMode {
|
||||
passedWorkloads = groupByNamespaceOrKind(workloadsSummary, workloadSummaryPassed)
|
||||
}
|
||||
|
||||
//controlSummary
|
||||
prettyPrinter.summary[cr.Name] = ResultSummary{
|
||||
ID: cr.ControlID,
|
||||
RiskScore: cr.Score,
|
||||
TotalResources: cr.GetNumberOfResources(),
|
||||
TotalFailed: cr.GetNumberOfFailedResources(),
|
||||
TotalWarning: cr.GetNumberOfWarningResources(),
|
||||
FailedWorkloads: groupByNamespaceOrKind(workloadsSummary, workloadSummaryFailed),
|
||||
ExcludedWorkloads: groupByNamespaceOrKind(workloadsSummary, workloadSummaryExclude),
|
||||
PassedWorkloads: passedWorkloads,
|
||||
Description: cr.Description,
|
||||
Remediation: cr.Remediation,
|
||||
ListInputKinds: cr.ListControlsInputKinds(),
|
||||
}
|
||||
|
||||
}
|
||||
prettyPrinter.sortedControlNames = prettyPrinter.getSortedControlsNames()
|
||||
}
|
||||
func (prettyPrinter *PrettyPrinter) printResults() {
|
||||
for i := 0; i < len(prettyPrinter.sortedControlNames); i++ {
|
||||
controlSummary := prettyPrinter.summary[prettyPrinter.sortedControlNames[i]]
|
||||
prettyPrinter.printTitle(prettyPrinter.sortedControlNames[i], &controlSummary)
|
||||
prettyPrinter.printResources(&controlSummary)
|
||||
if prettyPrinter.summary[prettyPrinter.sortedControlNames[i]].TotalResources > 0 {
|
||||
prettyPrinter.printSummary(prettyPrinter.sortedControlNames[i], &controlSummary)
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
func (prettyPrinter *PrettyPrinter) printSummary(controlName string, controlSummary *ResultSummary) {
|
||||
cautils.SimpleDisplay(prettyPrinter.writer, "Summary - ")
|
||||
cautils.SuccessDisplay(prettyPrinter.writer, "Passed:%v ", controlSummary.TotalResources-controlSummary.TotalFailed-controlSummary.TotalWarning)
|
||||
cautils.WarningDisplay(prettyPrinter.writer, "Excluded:%v ", controlSummary.TotalWarning)
|
||||
cautils.FailureDisplay(prettyPrinter.writer, "Failed:%v ", controlSummary.TotalFailed)
|
||||
cautils.InfoDisplay(prettyPrinter.writer, "Total:%v\n", controlSummary.TotalResources)
|
||||
if controlSummary.TotalFailed > 0 {
|
||||
cautils.DescriptionDisplay(prettyPrinter.writer, "Remediation: %v\n", controlSummary.Remediation)
|
||||
}
|
||||
cautils.DescriptionDisplay(prettyPrinter.writer, "\n")
|
||||
|
||||
}
|
||||
func (prettyPrinter *PrettyPrinter) printTitle(controlName string, controlSummary *ResultSummary) {
|
||||
cautils.InfoDisplay(prettyPrinter.writer, "[control: %s - %s] ", controlName, getControlURL(controlSummary.ID))
|
||||
if controlSummary.TotalResources == 0 {
|
||||
cautils.InfoDisplay(prettyPrinter.writer, "skipped %v\n", emoji.ConfusedFace)
|
||||
} else if controlSummary.TotalFailed != 0 {
|
||||
cautils.FailureDisplay(prettyPrinter.writer, "failed %v\n", emoji.SadButRelievedFace)
|
||||
} else if controlSummary.TotalWarning != 0 {
|
||||
cautils.WarningDisplay(prettyPrinter.writer, "excluded %v\n", emoji.NeutralFace)
|
||||
} else {
|
||||
cautils.SuccessDisplay(prettyPrinter.writer, "passed %v\n", emoji.ThumbsUp)
|
||||
}
|
||||
|
||||
cautils.DescriptionDisplay(prettyPrinter.writer, "Description: %s\n", controlSummary.Description)
|
||||
|
||||
}
|
||||
func (prettyPrinter *PrettyPrinter) printResources(controlSummary *ResultSummary) {
|
||||
|
||||
if len(controlSummary.FailedWorkloads) > 0 {
|
||||
cautils.FailureDisplay(prettyPrinter.writer, "Failed:\n")
|
||||
prettyPrinter.printGroupedResources(controlSummary.FailedWorkloads)
|
||||
}
|
||||
if len(controlSummary.ExcludedWorkloads) > 0 {
|
||||
cautils.WarningDisplay(prettyPrinter.writer, "Excluded:\n")
|
||||
prettyPrinter.printGroupedResources(controlSummary.ExcludedWorkloads)
|
||||
}
|
||||
if len(controlSummary.PassedWorkloads) > 0 {
|
||||
cautils.SuccessDisplay(prettyPrinter.writer, "Passed:\n")
|
||||
prettyPrinter.printGroupedResources(controlSummary.PassedWorkloads)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func (prettyPrinter *PrettyPrinter) printGroupedResources(workloads map[string][]WorkloadSummary) {
|
||||
indent := INDENT
|
||||
for title, rsc := range workloads {
|
||||
prettyPrinter.printGroupedResource(indent, title, rsc)
|
||||
}
|
||||
}
|
||||
|
||||
func (prettyPrinter *PrettyPrinter) printGroupedResource(indent string, title string, rsc []WorkloadSummary) {
|
||||
preIndent := indent
|
||||
if title != "" {
|
||||
cautils.SimpleDisplay(prettyPrinter.writer, "%s%s\n", indent, title)
|
||||
indent += indent
|
||||
}
|
||||
|
||||
for r := range rsc {
|
||||
relatedObjectsStr := generateRelatedObjectsStr(rsc[r])
|
||||
cautils.SimpleDisplay(prettyPrinter.writer, fmt.Sprintf("%s%s - %s %s\n", indent, rsc[r].resource.GetKind(), rsc[r].resource.GetName(), relatedObjectsStr))
|
||||
}
|
||||
indent = preIndent
|
||||
}
|
||||
|
||||
func generateRelatedObjectsStr(workload WorkloadSummary) string {
|
||||
relatedStr := ""
|
||||
if workload.resource.GetObjectType() == workloadinterface.TypeWorkloadObject {
|
||||
relatedObjects := objectsenvelopes.NewRegoResponseVectorObject(workload.resource.GetObject()).GetRelatedObjects()
|
||||
for i, related := range relatedObjects {
|
||||
if ns := related.GetNamespace(); i == 0 && ns != "" {
|
||||
relatedStr += fmt.Sprintf("Namespace - %s, ", ns)
|
||||
}
|
||||
relatedStr += fmt.Sprintf("%s - %s, ", related.GetKind(), related.GetName())
|
||||
}
|
||||
}
|
||||
if relatedStr != "" {
|
||||
relatedStr = fmt.Sprintf(" [%s]", relatedStr[:len(relatedStr)-2])
|
||||
}
|
||||
return relatedStr
|
||||
}
|
||||
|
||||
func generateRow(control string, cs ResultSummary) []string {
|
||||
row := []string{control}
|
||||
row = append(row, cs.ToSlice()...)
|
||||
if cs.TotalResources != 0 {
|
||||
row = append(row, fmt.Sprintf("%d", int(cs.RiskScore))+"%")
|
||||
} else {
|
||||
row = append(row, "skipped")
|
||||
}
|
||||
return row
|
||||
}
|
||||
|
||||
func generateHeader() []string {
|
||||
return []string{"Control Name", "Failed Resources", "Excluded Resources", "All Resources", "% risk-score"}
|
||||
}
|
||||
|
||||
func generateFooter(prettyPrinter *PrettyPrinter) []string {
|
||||
// Control name | # failed resources | all resources | % success
|
||||
row := []string{}
|
||||
row = append(row, "Resource Summary") //fmt.Sprintf(""%d", numControlers"))
|
||||
row = append(row, fmt.Sprintf("%d", prettyPrinter.frameworkSummary.TotalFailed))
|
||||
row = append(row, fmt.Sprintf("%d", prettyPrinter.frameworkSummary.TotalWarning))
|
||||
row = append(row, fmt.Sprintf("%d", prettyPrinter.frameworkSummary.TotalResources))
|
||||
row = append(row, fmt.Sprintf("%.2f%s", prettyPrinter.frameworkSummary.RiskScore, "%"))
|
||||
|
||||
return row
|
||||
}
|
||||
func (prettyPrinter *PrettyPrinter) printSummaryTable(frameworksNames []string, frameworkScores []float32) {
|
||||
// For control scan framework will be nil
|
||||
prettyPrinter.printFramework(frameworksNames, frameworkScores)
|
||||
|
||||
summaryTable := tablewriter.NewWriter(prettyPrinter.writer)
|
||||
summaryTable.SetAutoWrapText(false)
|
||||
summaryTable.SetHeader(generateHeader())
|
||||
summaryTable.SetHeaderLine(true)
|
||||
alignments := []int{tablewriter.ALIGN_LEFT, tablewriter.ALIGN_CENTER, tablewriter.ALIGN_CENTER, tablewriter.ALIGN_CENTER, tablewriter.ALIGN_CENTER}
|
||||
summaryTable.SetColumnAlignment(alignments)
|
||||
|
||||
for i := 0; i < len(prettyPrinter.sortedControlNames); i++ {
|
||||
controlSummary := prettyPrinter.summary[prettyPrinter.sortedControlNames[i]]
|
||||
summaryTable.Append(generateRow(prettyPrinter.sortedControlNames[i], controlSummary))
|
||||
}
|
||||
|
||||
summaryTable.SetFooter(generateFooter(prettyPrinter))
|
||||
|
||||
// summaryTable.SetFooter(generateFooter())
|
||||
summaryTable.Render()
|
||||
}
|
||||
|
||||
func (prettyPrinter *PrettyPrinter) printFramework(frameworksNames []string, frameworkScores []float32) {
|
||||
if len(frameworksNames) == 1 {
|
||||
if frameworksNames[0] != "" {
|
||||
cautils.InfoTextDisplay(prettyPrinter.writer, fmt.Sprintf("FRAMEWORK %s\n", frameworksNames[0]))
|
||||
}
|
||||
} else if len(frameworksNames) > 1 {
|
||||
p := "FRAMEWORKS: "
|
||||
for i := 0; i < len(frameworksNames)-1; i++ {
|
||||
p += fmt.Sprintf("%s (risk: %.2f), ", frameworksNames[i], frameworkScores[i])
|
||||
}
|
||||
p += fmt.Sprintf("%s (risk: %.2f)\n", frameworksNames[len(frameworksNames)-1], frameworkScores[len(frameworkScores)-1])
|
||||
cautils.InfoTextDisplay(prettyPrinter.writer, p)
|
||||
}
|
||||
}
|
||||
|
||||
func (prettyPrinter *PrettyPrinter) getSortedControlsNames() []string {
|
||||
controlNames := make([]string, 0, len(prettyPrinter.summary))
|
||||
for k := range prettyPrinter.summary {
|
||||
controlNames = append(controlNames, k)
|
||||
}
|
||||
sort.Strings(controlNames)
|
||||
return controlNames
|
||||
}
|
||||
func getControlURL(controlID string) string {
|
||||
return fmt.Sprintf("https://hub.armo.cloud/docs/%s", strings.ToLower(controlID))
|
||||
}
|
||||
21
resultshandling/printer/v1/printresults.go
Normal file
21
resultshandling/printer/v1/printresults.go
Normal file
@@ -0,0 +1,21 @@
|
||||
package v1
|
||||
|
||||
import (
|
||||
"github.com/armosec/kubescape/resultshandling/printer"
|
||||
"github.com/armosec/kubescape/resultshandling/printer/v2/controlmapping"
|
||||
)
|
||||
|
||||
var INDENT = " "
|
||||
|
||||
func GetPrinter(printFormat string, verboseMode bool) printer.IPrinter {
|
||||
switch printFormat {
|
||||
case printer.JsonFormat:
|
||||
return NewJsonPrinter()
|
||||
case printer.JunitResultFormat:
|
||||
return NewJunitPrinter()
|
||||
case printer.PrometheusFormat:
|
||||
return NewPrometheusPrinter(verboseMode)
|
||||
default:
|
||||
return controlmapping.NewPrettyPrinter(verboseMode)
|
||||
}
|
||||
}
|
||||
96
resultshandling/printer/v1/prometheusprinter.go
Normal file
96
resultshandling/printer/v1/prometheusprinter.go
Normal file
@@ -0,0 +1,96 @@
|
||||
package v1
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/armosec/k8s-interface/workloadinterface"
|
||||
"github.com/armosec/kubescape/cautils"
|
||||
"github.com/armosec/kubescape/resultshandling/printer"
|
||||
"github.com/armosec/opa-utils/reporthandling"
|
||||
)
|
||||
|
||||
type PrometheusPrinter struct {
|
||||
writer *os.File
|
||||
verboseMode bool
|
||||
}
|
||||
|
||||
func NewPrometheusPrinter(verboseMode bool) *PrometheusPrinter {
|
||||
return &PrometheusPrinter{
|
||||
verboseMode: verboseMode,
|
||||
}
|
||||
}
|
||||
|
||||
func (prometheusPrinter *PrometheusPrinter) SetWriter(outputFile string) {
|
||||
prometheusPrinter.writer = printer.GetWriter(outputFile)
|
||||
}
|
||||
|
||||
func (prometheusPrinter *PrometheusPrinter) Score(score float32) {
|
||||
fmt.Printf("\n# Overall risk-score (0- Excellent, 100- All failed)\nkubescape_score %d\n", int(score))
|
||||
}
|
||||
|
||||
func (printer *PrometheusPrinter) printResources(allResources map[string]workloadinterface.IMetadata, resourcesIDs *reporthandling.ResourcesIDs, frameworkName, controlName string) {
|
||||
printer.printDetails(allResources, resourcesIDs.GetFailedResources(), frameworkName, controlName, "failed")
|
||||
printer.printDetails(allResources, resourcesIDs.GetWarningResources(), frameworkName, controlName, "excluded")
|
||||
if printer.verboseMode {
|
||||
printer.printDetails(allResources, resourcesIDs.GetPassedResources(), frameworkName, controlName, "passed")
|
||||
}
|
||||
|
||||
}
|
||||
func (printer *PrometheusPrinter) printDetails(allResources map[string]workloadinterface.IMetadata, resourcesIDs []string, frameworkName, controlName, status string) {
|
||||
objs := make(map[string]map[string]map[string]int)
|
||||
for _, resourceID := range resourcesIDs {
|
||||
resource := allResources[resourceID]
|
||||
|
||||
gvk := fmt.Sprintf("%s/%s", resource.GetApiVersion(), resource.GetKind())
|
||||
|
||||
if objs[gvk] == nil {
|
||||
objs[gvk] = make(map[string]map[string]int)
|
||||
}
|
||||
if objs[gvk][resource.GetNamespace()] == nil {
|
||||
objs[gvk][resource.GetNamespace()] = make(map[string]int)
|
||||
}
|
||||
objs[gvk][resource.GetNamespace()][resource.GetName()]++
|
||||
}
|
||||
for gvk, namespaces := range objs {
|
||||
for namespace, names := range namespaces {
|
||||
for name, value := range names {
|
||||
fmt.Fprintf(printer.writer, "# Failed object from \"%s\" control \"%s\"\n", frameworkName, controlName)
|
||||
if namespace != "" {
|
||||
fmt.Fprintf(printer.writer, "kubescape_object_failed_count{framework=\"%s\",control=\"%s\",namespace=\"%s\",name=\"%s\",groupVersionKind=\"%s\"} %d\n", frameworkName, controlName, namespace, name, gvk, value)
|
||||
} else {
|
||||
fmt.Fprintf(printer.writer, "kubescape_object_failed_count{framework=\"%s\",control=\"%s\",name=\"%s\",groupVersionKind=\"%s\"} %d\n", frameworkName, controlName, name, gvk, value)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (printer *PrometheusPrinter) printReports(allResources map[string]workloadinterface.IMetadata, frameworks []reporthandling.FrameworkReport) error {
|
||||
for _, frameworkReport := range frameworks {
|
||||
for _, controlReport := range frameworkReport.ControlReports {
|
||||
if controlReport.GetNumberOfResources() == 0 {
|
||||
continue // the control did not test any resources
|
||||
}
|
||||
if controlReport.Passed() {
|
||||
continue // control passed, do not print results
|
||||
}
|
||||
fmt.Fprintf(printer.writer, "# Number of resources found as part of %s control %s\nkubescape_resources_found_count{framework=\"%s\",control=\"%s\"} %d\n", frameworkReport.Name, controlReport.Name, frameworkReport.Name, controlReport.Name, controlReport.GetNumberOfResources())
|
||||
fmt.Fprintf(printer.writer, "# Number of resources excluded as part of %s control %s\nkubescape_resources_excluded_count{framework=\"%s\",control=\"%s\"} %d\n", frameworkReport.Name, controlReport.Name, frameworkReport.Name, controlReport.Name, controlReport.GetNumberOfWarningResources())
|
||||
fmt.Fprintf(printer.writer, "# Number of resources failed as part of %s control %s\nkubescape_resources_failed_count{framework=\"%s\",control=\"%s\"} %d\n", frameworkReport.Name, controlReport.Name, frameworkReport.Name, controlReport.Name, controlReport.GetNumberOfFailedResources())
|
||||
|
||||
printer.printResources(allResources, controlReport.ListResourcesIDs(), frameworkReport.Name, controlReport.Name)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (printer *PrometheusPrinter) ActionPrint(opaSessionObj *cautils.OPASessionObj) {
|
||||
cautils.ReportV2ToV1(opaSessionObj)
|
||||
|
||||
err := printer.printReports(opaSessionObj.AllResources, opaSessionObj.PostureReport.FrameworkReports)
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
package printer
|
||||
package v1
|
||||
|
||||
import (
|
||||
"github.com/armosec/kubescape/cautils"
|
||||
@@ -1,37 +1,39 @@
|
||||
package printer
|
||||
package v1
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/armosec/armoapi-go/armotypes"
|
||||
"github.com/armosec/k8s-interface/workloadinterface"
|
||||
"github.com/armosec/opa-utils/reporthandling"
|
||||
)
|
||||
|
||||
type Summary map[string]ControlSummary
|
||||
type Summary map[string]ResultSummary
|
||||
|
||||
func NewSummary() Summary {
|
||||
return make(map[string]ControlSummary)
|
||||
return make(map[string]ResultSummary)
|
||||
}
|
||||
|
||||
type ControlSummary struct {
|
||||
type ResultSummary struct {
|
||||
ID string
|
||||
RiskScore float32
|
||||
TotalResources int
|
||||
TotalFailed int
|
||||
TotalWarning int
|
||||
Description string
|
||||
Remediation string
|
||||
Framework []string
|
||||
ListInputKinds []string
|
||||
FailedWorkloads map[string][]WorkloadSummary // <namespace>:[<WorkloadSummary>]
|
||||
ExcludedWorkloads map[string][]WorkloadSummary // <namespace>:[<WorkloadSummary>]
|
||||
PassedWorkloads map[string][]WorkloadSummary // <namespace>:[<WorkloadSummary>]
|
||||
}
|
||||
|
||||
type WorkloadSummary struct {
|
||||
Kind string
|
||||
Name string
|
||||
Namespace string
|
||||
Group string
|
||||
Exception *armotypes.PostureExceptionPolicy
|
||||
resource workloadinterface.IMetadata
|
||||
status string
|
||||
}
|
||||
|
||||
func (controlSummary *ControlSummary) ToSlice() []string {
|
||||
func (controlSummary *ResultSummary) ToSlice() []string {
|
||||
s := []string{}
|
||||
s = append(s, fmt.Sprintf("%d", controlSummary.TotalFailed))
|
||||
s = append(s, fmt.Sprintf("%d", controlSummary.TotalWarning))
|
||||
@@ -39,14 +41,14 @@ func (controlSummary *ControlSummary) ToSlice() []string {
|
||||
return s
|
||||
}
|
||||
|
||||
func (workloadSummary *WorkloadSummary) ToString() string {
|
||||
return fmt.Sprintf("/%s/%s/%s/%s", workloadSummary.Group, workloadSummary.Namespace, workloadSummary.Kind, workloadSummary.Name)
|
||||
}
|
||||
|
||||
func workloadSummaryFailed(workloadSummary *WorkloadSummary) bool {
|
||||
return workloadSummary.Exception == nil
|
||||
return workloadSummary.status == reporthandling.StatusFailed
|
||||
}
|
||||
|
||||
func workloadSummaryExclude(workloadSummary *WorkloadSummary) bool {
|
||||
return workloadSummary.Exception != nil && workloadSummary.Exception.IsAlertOnly()
|
||||
return workloadSummary.status == reporthandling.StatusWarning
|
||||
}
|
||||
|
||||
func workloadSummaryPassed(workloadSummary *WorkloadSummary) bool {
|
||||
return workloadSummary.status == reporthandling.StatusPassed
|
||||
}
|
||||
85
resultshandling/printer/v1/summeryhelpers.go
Normal file
85
resultshandling/printer/v1/summeryhelpers.go
Normal file
@@ -0,0 +1,85 @@
|
||||
package v1
|
||||
|
||||
import (
|
||||
"github.com/armosec/k8s-interface/k8sinterface"
|
||||
"github.com/armosec/k8s-interface/workloadinterface"
|
||||
"github.com/armosec/opa-utils/objectsenvelopes"
|
||||
"github.com/armosec/opa-utils/reporthandling"
|
||||
)
|
||||
|
||||
// Group workloads by namespace - return {"namespace": <[]WorkloadSummary>}
|
||||
func groupByNamespaceOrKind(resources []WorkloadSummary, status func(workloadSummary *WorkloadSummary) bool) map[string][]WorkloadSummary {
|
||||
mapResources := make(map[string][]WorkloadSummary)
|
||||
for i := range resources {
|
||||
if !status(&resources[i]) {
|
||||
continue
|
||||
}
|
||||
t := resources[i].resource.GetObjectType()
|
||||
if t == objectsenvelopes.TypeRegoResponseVectorObject && !isKindToBeGrouped(resources[i].resource.GetKind()) {
|
||||
t = workloadinterface.TypeWorkloadObject
|
||||
}
|
||||
switch t { // TODO - find a better way to defind the groups
|
||||
case workloadinterface.TypeWorkloadObject:
|
||||
ns := ""
|
||||
if resources[i].resource.GetNamespace() != "" {
|
||||
ns = "Namescape " + resources[i].resource.GetNamespace()
|
||||
}
|
||||
if r, ok := mapResources[ns]; ok {
|
||||
r = append(r, resources[i])
|
||||
mapResources[ns] = r
|
||||
} else {
|
||||
mapResources[ns] = []WorkloadSummary{resources[i]}
|
||||
}
|
||||
case objectsenvelopes.TypeRegoResponseVectorObject:
|
||||
group := resources[i].resource.GetKind() + "s"
|
||||
if r, ok := mapResources[group]; ok {
|
||||
r = append(r, resources[i])
|
||||
mapResources[group] = r
|
||||
} else {
|
||||
mapResources[group] = []WorkloadSummary{resources[i]}
|
||||
}
|
||||
default:
|
||||
group, _ := k8sinterface.SplitApiVersion(resources[i].resource.GetApiVersion())
|
||||
if r, ok := mapResources[group]; ok {
|
||||
r = append(r, resources[i])
|
||||
mapResources[group] = r
|
||||
} else {
|
||||
mapResources[group] = []WorkloadSummary{resources[i]}
|
||||
}
|
||||
}
|
||||
}
|
||||
return mapResources
|
||||
}
|
||||
|
||||
func isKindToBeGrouped(kind string) bool {
|
||||
if kind == "Group" || kind == "User" {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func listResultSummary(ruleReports []reporthandling.RuleReport, allResources map[string]workloadinterface.IMetadata) []WorkloadSummary {
|
||||
workloadsSummary := []WorkloadSummary{}
|
||||
|
||||
for c := range ruleReports {
|
||||
resourcesIDs := ruleReports[c].ListResourcesIDs()
|
||||
workloadsSummary = append(workloadsSummary, newListWorkloadsSummary(allResources, resourcesIDs.GetFailedResources(), reporthandling.StatusFailed)...)
|
||||
workloadsSummary = append(workloadsSummary, newListWorkloadsSummary(allResources, resourcesIDs.GetWarningResources(), reporthandling.StatusWarning)...)
|
||||
workloadsSummary = append(workloadsSummary, newListWorkloadsSummary(allResources, resourcesIDs.GetPassedResources(), reporthandling.StatusPassed)...)
|
||||
}
|
||||
return workloadsSummary
|
||||
}
|
||||
|
||||
func newListWorkloadsSummary(allResources map[string]workloadinterface.IMetadata, resourcesIDs []string, status string) []WorkloadSummary {
|
||||
workloadsSummary := []WorkloadSummary{}
|
||||
|
||||
for _, i := range resourcesIDs {
|
||||
if r, ok := allResources[i]; ok {
|
||||
workloadsSummary = append(workloadsSummary, WorkloadSummary{
|
||||
resource: r,
|
||||
status: status,
|
||||
})
|
||||
}
|
||||
}
|
||||
return workloadsSummary
|
||||
}
|
||||
244
resultshandling/printer/v2/controlmapping/prettyprinter.go
Normal file
244
resultshandling/printer/v2/controlmapping/prettyprinter.go
Normal file
@@ -0,0 +1,244 @@
|
||||
package controlmapping
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/armosec/k8s-interface/workloadinterface"
|
||||
"github.com/armosec/kubescape/cautils"
|
||||
"github.com/armosec/kubescape/resultshandling/printer"
|
||||
"github.com/armosec/opa-utils/objectsenvelopes"
|
||||
"github.com/armosec/opa-utils/reporthandling/apis"
|
||||
"github.com/armosec/opa-utils/reporthandling/results/v1/reportsummary"
|
||||
"github.com/enescakir/emoji"
|
||||
"github.com/olekukonko/tablewriter"
|
||||
)
|
||||
|
||||
type PrettyPrinter struct {
|
||||
writer *os.File
|
||||
verboseMode bool
|
||||
sortedControlNames []string
|
||||
}
|
||||
|
||||
func NewPrettyPrinter(verboseMode bool) *PrettyPrinter {
|
||||
return &PrettyPrinter{
|
||||
verboseMode: verboseMode,
|
||||
}
|
||||
}
|
||||
|
||||
func (prettyPrinter *PrettyPrinter) ActionPrint(opaSessionObj *cautils.OPASessionObj) {
|
||||
prettyPrinter.sortedControlNames = getSortedControlsNames(opaSessionObj.Report.SummaryDetails.Controls) // ListControls().All())
|
||||
|
||||
prettyPrinter.printResults(&opaSessionObj.Report.SummaryDetails.Controls, opaSessionObj.AllResources)
|
||||
prettyPrinter.printSummaryTable(&opaSessionObj.Report.SummaryDetails)
|
||||
|
||||
}
|
||||
|
||||
func (prettyPrinter *PrettyPrinter) SetWriter(outputFile string) {
|
||||
prettyPrinter.writer = printer.GetWriter(outputFile)
|
||||
}
|
||||
|
||||
func (prettyPrinter *PrettyPrinter) Score(score float32) {
|
||||
}
|
||||
|
||||
func (prettyPrinter *PrettyPrinter) printResults(controls *reportsummary.ControlSummaries, allResources map[string]workloadinterface.IMetadata) {
|
||||
for i := 0; i < len(prettyPrinter.sortedControlNames); i++ {
|
||||
|
||||
controlSummary := controls.GetControl(reportsummary.EControlCriteriaName, prettyPrinter.sortedControlNames[i]) // summaryDetails.Controls ListControls().All() Controls.GetControl(ca)
|
||||
prettyPrinter.printTitle(controlSummary)
|
||||
prettyPrinter.printResources(controlSummary, allResources)
|
||||
|
||||
if controlSummary.GetStatus().IsSkipped() {
|
||||
prettyPrinter.printSummary(prettyPrinter.sortedControlNames[i], controlSummary)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (prettyPrinter *PrettyPrinter) printSummary(controlName string, controlSummary reportsummary.IControlSummary) {
|
||||
if controlSummary.GetStatus().IsSkipped() {
|
||||
return
|
||||
}
|
||||
cautils.SimpleDisplay(prettyPrinter.writer, "Summary - ")
|
||||
cautils.SuccessDisplay(prettyPrinter.writer, "Passed:%v ", controlSummary.NumberOfResources().Passed())
|
||||
cautils.WarningDisplay(prettyPrinter.writer, "Excluded:%v ", controlSummary.NumberOfResources().Excluded())
|
||||
cautils.FailureDisplay(prettyPrinter.writer, "Failed:%v ", controlSummary.NumberOfResources().Failed())
|
||||
cautils.InfoDisplay(prettyPrinter.writer, "Total:%v\n", controlSummary.NumberOfResources().All())
|
||||
if controlSummary.GetStatus().IsFailed() {
|
||||
cautils.DescriptionDisplay(prettyPrinter.writer, "Remediation: %v\n", controlSummary.GetRemediation())
|
||||
}
|
||||
cautils.DescriptionDisplay(prettyPrinter.writer, "\n")
|
||||
|
||||
}
|
||||
func (prettyPrinter *PrettyPrinter) printTitle(controlSummary reportsummary.IControlSummary) {
|
||||
cautils.InfoDisplay(prettyPrinter.writer, "[control: %s - %s] ", controlSummary.GetName(), getControlURL(controlSummary.GetID()))
|
||||
switch controlSummary.GetStatus().Status() {
|
||||
case apis.StatusSkipped:
|
||||
cautils.InfoDisplay(prettyPrinter.writer, "skipped %v\n", emoji.ConfusedFace)
|
||||
case apis.StatusFailed:
|
||||
cautils.FailureDisplay(prettyPrinter.writer, "failed %v\n", emoji.SadButRelievedFace)
|
||||
case apis.StatusExcluded:
|
||||
cautils.WarningDisplay(prettyPrinter.writer, "excluded %v\n", emoji.NeutralFace)
|
||||
default:
|
||||
cautils.SuccessDisplay(prettyPrinter.writer, "passed %v\n", emoji.ThumbsUp)
|
||||
}
|
||||
cautils.DescriptionDisplay(prettyPrinter.writer, "Description: %s\n", controlSummary.GetDescription())
|
||||
}
|
||||
func (prettyPrinter *PrettyPrinter) printResources(controlSummary reportsummary.IControlSummary, allResources map[string]workloadinterface.IMetadata) {
|
||||
|
||||
workloadsSummary := listResultSummary(controlSummary, allResources)
|
||||
|
||||
failedWorkloads := groupByNamespaceOrKind(workloadsSummary, workloadSummaryFailed)
|
||||
excludedWorkloads := groupByNamespaceOrKind(workloadsSummary, workloadSummaryExclude)
|
||||
|
||||
var passedWorkloads map[string][]WorkloadSummary
|
||||
if prettyPrinter.verboseMode {
|
||||
passedWorkloads = groupByNamespaceOrKind(workloadsSummary, workloadSummaryPassed)
|
||||
}
|
||||
if len(failedWorkloads) > 0 {
|
||||
cautils.FailureDisplay(prettyPrinter.writer, "Failed:\n")
|
||||
prettyPrinter.printGroupedResources(failedWorkloads)
|
||||
}
|
||||
if len(excludedWorkloads) > 0 {
|
||||
cautils.WarningDisplay(prettyPrinter.writer, "Excluded:\n")
|
||||
prettyPrinter.printGroupedResources(excludedWorkloads)
|
||||
}
|
||||
if len(passedWorkloads) > 0 {
|
||||
cautils.SuccessDisplay(prettyPrinter.writer, "Passed:\n")
|
||||
prettyPrinter.printGroupedResources(passedWorkloads)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func (prettyPrinter *PrettyPrinter) printGroupedResources(workloads map[string][]WorkloadSummary) {
|
||||
indent := " "
|
||||
for title, rsc := range workloads {
|
||||
prettyPrinter.printGroupedResource(indent, title, rsc)
|
||||
}
|
||||
}
|
||||
|
||||
func (prettyPrinter *PrettyPrinter) printGroupedResource(indent string, title string, rsc []WorkloadSummary) {
|
||||
preIndent := indent
|
||||
if title != "" {
|
||||
cautils.SimpleDisplay(prettyPrinter.writer, "%s%s\n", indent, title)
|
||||
indent += indent
|
||||
}
|
||||
|
||||
resources := []string{}
|
||||
for r := range rsc {
|
||||
relatedObjectsStr := generateRelatedObjectsStr(rsc[r]) // TODO -
|
||||
resources = append(resources, fmt.Sprintf("%s%s - %s %s", indent, rsc[r].resource.GetKind(), rsc[r].resource.GetName(), relatedObjectsStr))
|
||||
}
|
||||
|
||||
sort.Strings(resources)
|
||||
for i := range resources {
|
||||
cautils.SimpleDisplay(prettyPrinter.writer, resources[i]+"\n")
|
||||
}
|
||||
|
||||
indent = preIndent
|
||||
}
|
||||
|
||||
func generateRelatedObjectsStr(workload WorkloadSummary) string {
|
||||
relatedStr := ""
|
||||
if workload.resource.GetObjectType() == workloadinterface.TypeWorkloadObject {
|
||||
relatedObjects := objectsenvelopes.NewRegoResponseVectorObject(workload.resource.GetObject()).GetRelatedObjects()
|
||||
for i, related := range relatedObjects {
|
||||
if ns := related.GetNamespace(); i == 0 && ns != "" {
|
||||
relatedStr += fmt.Sprintf("Namespace - %s, ", ns)
|
||||
}
|
||||
relatedStr += fmt.Sprintf("%s - %s, ", related.GetKind(), related.GetName())
|
||||
}
|
||||
}
|
||||
if relatedStr != "" {
|
||||
relatedStr = fmt.Sprintf(" [%s]", relatedStr[:len(relatedStr)-2])
|
||||
}
|
||||
return relatedStr
|
||||
}
|
||||
|
||||
func generateRow(controlSummary reportsummary.IControlSummary) []string {
|
||||
row := []string{controlSummary.GetName()}
|
||||
row = append(row, fmt.Sprintf("%d", controlSummary.NumberOfResources().Failed()))
|
||||
row = append(row, fmt.Sprintf("%d", controlSummary.NumberOfResources().Excluded()))
|
||||
row = append(row, fmt.Sprintf("%d", controlSummary.NumberOfResources().All()))
|
||||
|
||||
if !controlSummary.GetStatus().IsSkipped() {
|
||||
row = append(row, fmt.Sprintf("%d", int(controlSummary.GetScore()))+"%")
|
||||
} else {
|
||||
row = append(row, "skipped")
|
||||
}
|
||||
return row
|
||||
}
|
||||
|
||||
func generateHeader() []string {
|
||||
return []string{"Control Name", "Failed Resources", "Excluded Resources", "All Resources", "% risk-score"}
|
||||
}
|
||||
|
||||
func generateFooter(summaryDetails *reportsummary.SummaryDetails) []string {
|
||||
// Control name | # failed resources | all resources | % success
|
||||
row := []string{}
|
||||
row = append(row, "Resource Summary") //fmt.Sprintf(""%d", numControlers"))
|
||||
row = append(row, fmt.Sprintf("%d", summaryDetails.NumberOfResources().Failed()))
|
||||
row = append(row, fmt.Sprintf("%d", summaryDetails.NumberOfResources().Excluded()))
|
||||
row = append(row, fmt.Sprintf("%d", summaryDetails.NumberOfResources().All()))
|
||||
row = append(row, fmt.Sprintf("%.2f%s", summaryDetails.Score, "%"))
|
||||
|
||||
return row
|
||||
}
|
||||
func (prettyPrinter *PrettyPrinter) printSummaryTable(summaryDetails *reportsummary.SummaryDetails) {
|
||||
// For control scan framework will be nil
|
||||
prettyPrinter.printFramework(summaryDetails.ListFrameworks().All())
|
||||
|
||||
summaryTable := tablewriter.NewWriter(prettyPrinter.writer)
|
||||
summaryTable.SetAutoWrapText(false)
|
||||
summaryTable.SetHeader(generateHeader())
|
||||
summaryTable.SetHeaderLine(true)
|
||||
alignments := []int{tablewriter.ALIGN_LEFT, tablewriter.ALIGN_CENTER, tablewriter.ALIGN_CENTER, tablewriter.ALIGN_CENTER, tablewriter.ALIGN_CENTER}
|
||||
summaryTable.SetColumnAlignment(alignments)
|
||||
|
||||
for i := 0; i < len(prettyPrinter.sortedControlNames); i++ {
|
||||
summaryTable.Append(generateRow(summaryDetails.Controls.GetControl(reportsummary.EControlCriteriaName, prettyPrinter.sortedControlNames[i])))
|
||||
}
|
||||
|
||||
summaryTable.SetFooter(generateFooter(summaryDetails))
|
||||
|
||||
// summaryTable.SetFooter(generateFooter())
|
||||
summaryTable.Render()
|
||||
}
|
||||
|
||||
func (prettyPrinter *PrettyPrinter) printFramework(frameworks []reportsummary.IPolicies) {
|
||||
if len(frameworks) == 1 {
|
||||
if frameworks[0].GetName() != "" {
|
||||
cautils.InfoTextDisplay(prettyPrinter.writer, fmt.Sprintf("FRAMEWORK %s\n", frameworks[0].GetName()))
|
||||
}
|
||||
} else if len(frameworks) > 1 {
|
||||
p := "FRAMEWORKS: "
|
||||
i := 0
|
||||
for ; i < len(frameworks)-1; i++ {
|
||||
p += fmt.Sprintf("%s (risk: %.2f), ", frameworks[i].GetName(), frameworks[i].GetScore())
|
||||
}
|
||||
p += fmt.Sprintf("%s (risk: %.2f)\n", frameworks[i].GetName(), frameworks[i].GetScore())
|
||||
cautils.InfoTextDisplay(prettyPrinter.writer, p)
|
||||
}
|
||||
}
|
||||
func getSortedControlsNames(controls reportsummary.ControlSummaries) []string {
|
||||
controlNames := make([]string, 0, len(controls))
|
||||
for k := range controls {
|
||||
c := controls[k]
|
||||
controlNames = append(controlNames, c.GetName())
|
||||
}
|
||||
sort.Strings(controlNames)
|
||||
return controlNames
|
||||
}
|
||||
|
||||
// func getSortedControlsNames(controls []reportsummary.IPolicies) []string {
|
||||
// controlNames := make([]string, 0, len(controls))
|
||||
// for k := range controls {
|
||||
// controlNames = append(controlNames, controls[k].Get())
|
||||
// }
|
||||
// sort.Strings(controlNames)
|
||||
// return controlNames
|
||||
// }
|
||||
func getControlURL(controlID string) string {
|
||||
return fmt.Sprintf("https://hub.armo.cloud/docs/%s", strings.ToLower(controlID))
|
||||
}
|
||||
101
resultshandling/printer/v2/controlmapping/summeryhelpers.go
Normal file
101
resultshandling/printer/v2/controlmapping/summeryhelpers.go
Normal file
@@ -0,0 +1,101 @@
|
||||
package controlmapping
|
||||
|
||||
import (
|
||||
"github.com/armosec/k8s-interface/k8sinterface"
|
||||
"github.com/armosec/k8s-interface/workloadinterface"
|
||||
"github.com/armosec/opa-utils/objectsenvelopes"
|
||||
"github.com/armosec/opa-utils/reporthandling/apis"
|
||||
"github.com/armosec/opa-utils/reporthandling/results/v1/reportsummary"
|
||||
)
|
||||
|
||||
type WorkloadSummary struct {
|
||||
resource workloadinterface.IMetadata
|
||||
status apis.ScanningStatus
|
||||
}
|
||||
|
||||
func workloadSummaryFailed(workloadSummary *WorkloadSummary) bool {
|
||||
return workloadSummary.status == apis.StatusFailed
|
||||
}
|
||||
|
||||
func workloadSummaryExclude(workloadSummary *WorkloadSummary) bool {
|
||||
return workloadSummary.status == apis.StatusExcluded
|
||||
}
|
||||
|
||||
func workloadSummaryPassed(workloadSummary *WorkloadSummary) bool {
|
||||
return workloadSummary.status == apis.StatusPassed
|
||||
}
|
||||
|
||||
// Group workloads by namespace - return {"namespace": <[]WorkloadSummary>}
|
||||
func groupByNamespaceOrKind(resources []WorkloadSummary, status func(workloadSummary *WorkloadSummary) bool) map[string][]WorkloadSummary {
|
||||
mapResources := make(map[string][]WorkloadSummary)
|
||||
for i := range resources {
|
||||
if !status(&resources[i]) {
|
||||
continue
|
||||
}
|
||||
t := resources[i].resource.GetObjectType()
|
||||
if t == objectsenvelopes.TypeRegoResponseVectorObject && !isKindToBeGrouped(resources[i].resource.GetKind()) {
|
||||
t = workloadinterface.TypeWorkloadObject
|
||||
}
|
||||
switch t { // TODO - find a better way to defind the groups
|
||||
case workloadinterface.TypeWorkloadObject:
|
||||
ns := ""
|
||||
if resources[i].resource.GetNamespace() != "" {
|
||||
ns = "Namescape " + resources[i].resource.GetNamespace()
|
||||
}
|
||||
if r, ok := mapResources[ns]; ok {
|
||||
r = append(r, resources[i])
|
||||
mapResources[ns] = r
|
||||
} else {
|
||||
mapResources[ns] = []WorkloadSummary{resources[i]}
|
||||
}
|
||||
case objectsenvelopes.TypeRegoResponseVectorObject:
|
||||
group := resources[i].resource.GetKind() + "s"
|
||||
if r, ok := mapResources[group]; ok {
|
||||
r = append(r, resources[i])
|
||||
mapResources[group] = r
|
||||
} else {
|
||||
mapResources[group] = []WorkloadSummary{resources[i]}
|
||||
}
|
||||
default:
|
||||
group, _ := k8sinterface.SplitApiVersion(resources[i].resource.GetApiVersion())
|
||||
if r, ok := mapResources[group]; ok {
|
||||
r = append(r, resources[i])
|
||||
mapResources[group] = r
|
||||
} else {
|
||||
mapResources[group] = []WorkloadSummary{resources[i]}
|
||||
}
|
||||
}
|
||||
}
|
||||
return mapResources
|
||||
}
|
||||
|
||||
func isKindToBeGrouped(kind string) bool {
|
||||
if kind == "Group" || kind == "User" {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func listResultSummary(controlSummary reportsummary.IControlSummary, allResources map[string]workloadinterface.IMetadata) []WorkloadSummary {
|
||||
workloadsSummary := []WorkloadSummary{}
|
||||
|
||||
workloadsSummary = append(workloadsSummary, newListWorkloadsSummary(allResources, controlSummary.ListResourcesIDs().Failed(), apis.StatusFailed)...)
|
||||
workloadsSummary = append(workloadsSummary, newListWorkloadsSummary(allResources, controlSummary.ListResourcesIDs().Excluded(), apis.StatusExcluded)...)
|
||||
workloadsSummary = append(workloadsSummary, newListWorkloadsSummary(allResources, controlSummary.ListResourcesIDs().Passed(), apis.StatusPassed)...)
|
||||
|
||||
return workloadsSummary
|
||||
}
|
||||
|
||||
func newListWorkloadsSummary(allResources map[string]workloadinterface.IMetadata, resourcesIDs []string, status apis.ScanningStatus) []WorkloadSummary {
|
||||
workloadsSummary := []WorkloadSummary{}
|
||||
|
||||
for _, i := range resourcesIDs {
|
||||
if r, ok := allResources[i]; ok {
|
||||
workloadsSummary = append(workloadsSummary, WorkloadSummary{
|
||||
resource: r,
|
||||
status: status,
|
||||
})
|
||||
}
|
||||
}
|
||||
return workloadsSummary
|
||||
}
|
||||
129
resultshandling/printer/v2/junit.go
Normal file
129
resultshandling/printer/v2/junit.go
Normal file
@@ -0,0 +1,129 @@
|
||||
package v2
|
||||
|
||||
import (
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/armosec/kubescape/cautils"
|
||||
"github.com/armosec/kubescape/resultshandling/printer"
|
||||
reporthandlingv2 "github.com/armosec/opa-utils/reporthandling/v2"
|
||||
)
|
||||
|
||||
type JunitPrinter struct {
|
||||
writer *os.File
|
||||
}
|
||||
|
||||
func NewJunitPrinter() *JunitPrinter {
|
||||
return &JunitPrinter{}
|
||||
}
|
||||
|
||||
func (junitPrinter *JunitPrinter) SetWriter(outputFile string) {
|
||||
junitPrinter.writer = printer.GetWriter(outputFile)
|
||||
}
|
||||
|
||||
func (junitPrinter *JunitPrinter) Score(score float32) {
|
||||
fmt.Fprintf(os.Stderr, "\nOverall risk-score (0- Excellent, 100- All failed): %d\n", int(score))
|
||||
}
|
||||
|
||||
func (junitPrinter *JunitPrinter) FinalizeData(opaSessionObj *cautils.OPASessionObj) {
|
||||
finalizeReport(opaSessionObj)
|
||||
}
|
||||
|
||||
func (junitPrinter *JunitPrinter) ActionPrint(opaSessionObj *cautils.OPASessionObj) {
|
||||
junitResult, err := convertPostureReportToJunitResult(opaSessionObj.Report)
|
||||
if err != nil {
|
||||
fmt.Println("Failed to convert posture report object!")
|
||||
os.Exit(1)
|
||||
}
|
||||
postureReportStr, err := xml.Marshal(junitResult)
|
||||
if err != nil {
|
||||
fmt.Println("Failed to convert posture report object!")
|
||||
os.Exit(1)
|
||||
}
|
||||
junitPrinter.writer.Write(postureReportStr)
|
||||
}
|
||||
|
||||
type JUnitTestSuites struct {
|
||||
XMLName xml.Name `xml:"testsuites"`
|
||||
Suites []JUnitTestSuite `xml:"testsuite"`
|
||||
}
|
||||
|
||||
// JUnitTestSuite is a single JUnit test suite which may contain many
|
||||
// testcases.
|
||||
type JUnitTestSuite struct {
|
||||
XMLName xml.Name `xml:"testsuite"`
|
||||
Tests int `xml:"tests,attr"`
|
||||
Time string `xml:"time,attr"`
|
||||
Name string `xml:"name,attr"`
|
||||
Resources int `xml:"resources,attr"`
|
||||
Excluded int `xml:"excluded,attr"`
|
||||
Failed int `xml:"filed,attr"`
|
||||
Properties []JUnitProperty `xml:"properties>property,omitempty"`
|
||||
TestCases []JUnitTestCase `xml:"testcase"`
|
||||
}
|
||||
|
||||
// JUnitTestCase is a single test case with its result.
|
||||
type JUnitTestCase struct {
|
||||
XMLName xml.Name `xml:"testcase"`
|
||||
Classname string `xml:"classname,attr"`
|
||||
Name string `xml:"name,attr"`
|
||||
Time string `xml:"time,attr"`
|
||||
Resources int `xml:"resources,attr"`
|
||||
Excluded int `xml:"excluded,attr"`
|
||||
Failed int `xml:"filed,attr"`
|
||||
SkipMessage *JUnitSkipMessage `xml:"skipped,omitempty"`
|
||||
Failure *JUnitFailure `xml:"failure,omitempty"`
|
||||
}
|
||||
|
||||
// JUnitSkipMessage contains the reason why a testcase was skipped.
|
||||
type JUnitSkipMessage struct {
|
||||
Message string `xml:"message,attr"`
|
||||
}
|
||||
|
||||
// JUnitProperty represents a key/value pair used to define properties.
|
||||
type JUnitProperty struct {
|
||||
Name string `xml:"name,attr"`
|
||||
Value string `xml:"value,attr"`
|
||||
}
|
||||
|
||||
// JUnitFailure contains data related to a failed test.
|
||||
type JUnitFailure struct {
|
||||
Message string `xml:"message,attr"`
|
||||
Type string `xml:"type,attr"`
|
||||
Contents string `xml:",chardata"`
|
||||
}
|
||||
|
||||
func convertPostureReportToJunitResult(postureResult *reporthandlingv2.PostureReport) (*JUnitTestSuites, error) {
|
||||
juResult := JUnitTestSuites{XMLName: xml.Name{Local: "Kubescape scan results"}}
|
||||
for _, framework := range postureResult.ListFrameworks().All() {
|
||||
suite := JUnitTestSuite{
|
||||
Name: framework.GetName(),
|
||||
Resources: framework.NumberOfResources().All(),
|
||||
Excluded: framework.NumberOfResources().Excluded(),
|
||||
Failed: framework.NumberOfResources().Failed(),
|
||||
}
|
||||
for _, controlReports := range postureResult.ListControls().All() {
|
||||
suite.Tests = suite.Tests + 1
|
||||
testCase := JUnitTestCase{}
|
||||
testCase.Name = controlReports.GetName()
|
||||
testCase.Classname = "Kubescape"
|
||||
testCase.Time = postureResult.ReportGenerationTime.String()
|
||||
// if 0 < len(controlReports.RuleReports[0].RuleResponses) {
|
||||
|
||||
// testCase.Resources = controlReports.NumberOfResources().All()
|
||||
// testCase.Excluded = controlReports.NumberOfResources().Excluded()
|
||||
// testCase.Failed = controlReports.NumberOfResources().Failed()
|
||||
// failure := JUnitFailure{}
|
||||
// failure.Message = fmt.Sprintf("%d resources failed", testCase.Failed)
|
||||
// for _, ruleResponses := range controlReports.RuleReports[0].RuleResponses {
|
||||
// failure.Contents = fmt.Sprintf("%s\n%s", failure.Contents, ruleResponses.AlertMessage)
|
||||
// }
|
||||
// testCase.Failure = &failure
|
||||
// }
|
||||
suite.TestCases = append(suite.TestCases, testCase)
|
||||
}
|
||||
juResult.Suites = append(juResult.Suites, suite)
|
||||
}
|
||||
return &juResult, nil
|
||||
}
|
||||
21
resultshandling/printer/v2/printresults.go
Normal file
21
resultshandling/printer/v2/printresults.go
Normal file
@@ -0,0 +1,21 @@
|
||||
package v2
|
||||
|
||||
import (
|
||||
"github.com/armosec/kubescape/resultshandling/printer"
|
||||
"github.com/armosec/kubescape/resultshandling/printer/v2/resourcemapping"
|
||||
)
|
||||
|
||||
var INDENT = " "
|
||||
|
||||
func GetPrinter(printFormat string, verboseMode bool) printer.IPrinter {
|
||||
switch printFormat {
|
||||
case printer.JsonFormat:
|
||||
return resourcemapping.NewJsonPrinter()
|
||||
case printer.JunitResultFormat:
|
||||
return NewJunitPrinter()
|
||||
// case printer.PrometheusFormat:
|
||||
// return NewPrometheusPrinter(verboseMode)
|
||||
default:
|
||||
return resourcemapping.NewPrettyPrinter(verboseMode)
|
||||
}
|
||||
}
|
||||
41
resultshandling/printer/v2/resourcemapping/jsonprinter.go
Normal file
41
resultshandling/printer/v2/resourcemapping/jsonprinter.go
Normal file
@@ -0,0 +1,41 @@
|
||||
package resourcemapping
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/armosec/kubescape/cautils"
|
||||
"github.com/armosec/kubescape/resultshandling/printer"
|
||||
)
|
||||
|
||||
type JsonPrinter struct {
|
||||
writer *os.File
|
||||
}
|
||||
|
||||
func NewJsonPrinter() *JsonPrinter {
|
||||
return &JsonPrinter{}
|
||||
}
|
||||
|
||||
func (jsonPrinter *JsonPrinter) SetWriter(outputFile string) {
|
||||
jsonPrinter.writer = printer.GetWriter(outputFile)
|
||||
}
|
||||
|
||||
func (jsonPrinter *JsonPrinter) Score(score float32) {
|
||||
fmt.Fprintf(os.Stderr, "\nOverall risk-score (0- Excellent, 100- All failed): %d\n", int(score))
|
||||
}
|
||||
|
||||
func (jsonPrinter *JsonPrinter) ActionPrint(opaSessionObj *cautils.OPASessionObj) {
|
||||
|
||||
postureReportStr, err := json.Marshal(opaSessionObj.Report)
|
||||
|
||||
if err != nil {
|
||||
fmt.Println("Failed to convert posture report object!")
|
||||
os.Exit(1)
|
||||
}
|
||||
jsonPrinter.writer.Write(postureReportStr)
|
||||
}
|
||||
|
||||
func (jsonPrinter *JsonPrinter) FinalizeData(opaSessionObj *cautils.OPASessionObj) {
|
||||
// finalizeReport(opaSessionObj)
|
||||
}
|
||||
215
resultshandling/printer/v2/resourcemapping/prettyprinter.go
Normal file
215
resultshandling/printer/v2/resourcemapping/prettyprinter.go
Normal file
@@ -0,0 +1,215 @@
|
||||
package resourcemapping
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/armosec/kubescape/cautils"
|
||||
"github.com/armosec/kubescape/resultshandling/printer"
|
||||
"github.com/armosec/opa-utils/reporthandling/results/v1/resourcesresults"
|
||||
"github.com/olekukonko/tablewriter"
|
||||
)
|
||||
|
||||
type PrettyPrinter struct {
|
||||
writer *os.File
|
||||
verboseMode bool
|
||||
}
|
||||
|
||||
func NewPrettyPrinter(verboseMode bool) *PrettyPrinter {
|
||||
return &PrettyPrinter{
|
||||
verboseMode: verboseMode,
|
||||
}
|
||||
}
|
||||
|
||||
func (prettyPrinter *PrettyPrinter) printResourceTable(results []resourcesresults.Result) {
|
||||
|
||||
summaryTable := tablewriter.NewWriter(prettyPrinter.writer)
|
||||
summaryTable.SetAutoWrapText(true)
|
||||
summaryTable.SetAutoMergeCells(false)
|
||||
// summaryTable.SetCenterSeparator("=")
|
||||
// summaryTable.SetRowSeparator("*")
|
||||
// summaryTable.
|
||||
summaryTable.SetHeader(generateResourceHeader())
|
||||
summaryTable.SetHeaderLine(true)
|
||||
|
||||
// For control scan framework will be nil
|
||||
for i := range results {
|
||||
// status := result.GetStatus(nil).Status()
|
||||
resourceID := results[i].GetResourceID()
|
||||
control := results[i].ListControls()
|
||||
if raw := generateResourceRow(resourceID, control, prettyPrinter.verboseMode); len(raw) > 0 {
|
||||
summaryTable.Append(raw)
|
||||
}
|
||||
}
|
||||
|
||||
// alignments := []int{tablewriter.ALIGN_LEFT, tablewriter.ALIGN_CENTER, tablewriter.ALIGN_CENTER, tablewriter.ALIGN_CENTER, tablewriter.ALIGN_CENTER}
|
||||
// summaryTable.SetColumnAlignment(alignments)
|
||||
|
||||
// for i := 0; i < len(prettyPrinter.sortedControlNames); i++ {
|
||||
// controlSummary := prettyPrinter.summary[prettyPrinter.sortedControlNames[i]]
|
||||
// summaryTable.Append(generateRow(prettyPrinter.sortedControlNames[i], controlSummary))
|
||||
// }
|
||||
|
||||
// summaryTable.SetFooter(generateFooter(prettyPrinter))
|
||||
|
||||
summaryTable.Render()
|
||||
}
|
||||
|
||||
func (prettyPrinter *PrettyPrinter) ActionPrint(opaSessionObj *cautils.OPASessionObj) {
|
||||
|
||||
prettyPrinter.printResourceTable(opaSessionObj.Report.Results)
|
||||
// var overallRiskScore float32 = 0
|
||||
// for _, frameworkReport := range opaSessionObj.PostureReport.FrameworkReports {
|
||||
// frameworkNames = append(frameworkNames, frameworkReport.Name)
|
||||
// frameworkScores = append(frameworkScores, frameworkReport.Score)
|
||||
// failedResources = reporthandling.GetUniqueResourcesIDs(append(failedResources, frameworkReport.ListResourcesIDs().GetFailedResources()...))
|
||||
// warningResources = reporthandling.GetUniqueResourcesIDs(append(warningResources, frameworkReport.ListResourcesIDs().GetWarningResources()...))
|
||||
// allResources = reporthandling.GetUniqueResourcesIDs(append(allResources, frameworkReport.ListResourcesIDs().GetAllResources()...))
|
||||
// prettyPrinter.summarySetup(frameworkReport, opaSessionObj.AllResources)
|
||||
// overallRiskScore += frameworkReport.Score
|
||||
// }
|
||||
|
||||
// overallRiskScore /= float32(len(opaSessionObj.PostureReport.FrameworkReports))
|
||||
|
||||
// prettyPrinter.frameworkSummary = ResultSummary{
|
||||
// RiskScore: overallRiskScore,
|
||||
// TotalResources: len(allResources),
|
||||
// TotalFailed: len(failedResources),
|
||||
// TotalWarning: len(warningResources),
|
||||
// }
|
||||
|
||||
// prettyPrinter.printResults()
|
||||
// prettyPrinter.printSummaryTable(frameworkNames, frameworkScores)
|
||||
|
||||
}
|
||||
|
||||
func (prettyPrinter *PrettyPrinter) SetWriter(outputFile string) {
|
||||
prettyPrinter.writer = printer.GetWriter(outputFile)
|
||||
}
|
||||
|
||||
func (prettyPrinter *PrettyPrinter) FinalizeData(opaSessionObj *cautils.OPASessionObj) {
|
||||
// finalizeReport(opaSessionObj)
|
||||
}
|
||||
func (prettyPrinter *PrettyPrinter) Score(score float32) {
|
||||
}
|
||||
|
||||
// func (prettyPrinter *PrettyPrinter) printSummary(controlName string, controlSummary *ResultSummary) {
|
||||
// // cautils.SimpleDisplay(prettyPrinter.writer, "Summary - ")
|
||||
// // cautils.SuccessDisplay(prettyPrinter.writer, "Passed:%v ", controlSummary.TotalResources-controlSummary.TotalFailed-controlSummary.TotalWarning)
|
||||
// // cautils.WarningDisplay(prettyPrinter.writer, "Excluded:%v ", controlSummary.TotalWarning)
|
||||
// // cautils.FailureDisplay(prettyPrinter.writer, "Failed:%v ", controlSummary.TotalFailed)
|
||||
// // cautils.InfoDisplay(prettyPrinter.writer, "Total:%v\n", controlSummary.TotalResources)
|
||||
// // if controlSummary.TotalFailed > 0 {
|
||||
// // cautils.DescriptionDisplay(prettyPrinter.writer, "Remediation: %v\n", controlSummary.Remediation)
|
||||
// // }
|
||||
// // cautils.DescriptionDisplay(prettyPrinter.writer, "\n")
|
||||
|
||||
// }
|
||||
|
||||
func generateResourceRow(resourceID string, controls []resourcesresults.ResourceAssociatedControl, verboseMode bool) []string {
|
||||
row := []string{}
|
||||
|
||||
controlsNames := []string{}
|
||||
statuses := []string{}
|
||||
|
||||
for i := range controls {
|
||||
if !verboseMode && controls[i].GetStatus(nil).IsPassed() {
|
||||
continue
|
||||
}
|
||||
if controls[i].GetName() == "" {
|
||||
continue
|
||||
}
|
||||
controlsNames = append(controlsNames, controls[i].GetName())
|
||||
statuses = append(statuses, string(controls[i].GetStatus(nil).Status()))
|
||||
}
|
||||
|
||||
splitted := strings.Split(resourceID, "/")
|
||||
if len(splitted) < 5 || len(controlsNames) == 0 {
|
||||
return row
|
||||
}
|
||||
|
||||
row = append(row, splitted[3])
|
||||
row = append(row, splitted[4])
|
||||
row = append(row, splitted[2])
|
||||
|
||||
row = append(row, strings.Join(controlsNames, "\n"))
|
||||
row = append(row, strings.Join(statuses, "\n"))
|
||||
|
||||
return row
|
||||
}
|
||||
|
||||
// func generateRow(control string, cs ResultSummary) []string {
|
||||
// row := []string{control}
|
||||
// row = append(row, cs.ToSlice()...)
|
||||
// if cs.TotalResources != 0 {
|
||||
// row = append(row, fmt.Sprintf("%d", int(cs.RiskScore))+"%")
|
||||
// } else {
|
||||
// row = append(row, "skipped")
|
||||
// }
|
||||
// return row
|
||||
// }
|
||||
|
||||
func generateResourceHeader() []string {
|
||||
return []string{"Kind", "Name", "Namespace", "Controls", "Statues"}
|
||||
}
|
||||
func generateHeader() []string {
|
||||
return []string{"Control Name", "Failed Resources", "Excluded Resources", "All Resources", "% risk-score"}
|
||||
}
|
||||
|
||||
func generateFooter(prettyPrinter *PrettyPrinter) []string {
|
||||
// Control name | # failed resources | all resources | % success
|
||||
row := []string{}
|
||||
// row = append(row, "Resource Summary") //fmt.Sprintf(""%d", numControlers"))
|
||||
// row = append(row, fmt.Sprintf("%d", prettyPrinter.frameworkSummary.TotalFailed))
|
||||
// row = append(row, fmt.Sprintf("%d", prettyPrinter.frameworkSummary.TotalWarning))
|
||||
// row = append(row, fmt.Sprintf("%d", prettyPrinter.frameworkSummary.TotalResources))
|
||||
// row = append(row, fmt.Sprintf("%.2f%s", prettyPrinter.frameworkSummary.RiskScore, "%"))
|
||||
|
||||
return row
|
||||
}
|
||||
func (prettyPrinter *PrettyPrinter) printSummaryTable(frameworksNames []string, frameworkScores []float32) {
|
||||
// For control scan framework will be nil
|
||||
prettyPrinter.printFramework(frameworksNames, frameworkScores)
|
||||
|
||||
summaryTable := tablewriter.NewWriter(prettyPrinter.writer)
|
||||
// summaryTable.SetAutoWrapText(false)
|
||||
// summaryTable.SetHeader(generateHeader())
|
||||
// summaryTable.SetHeaderLine(true)
|
||||
// alignments := []int{tablewriter.ALIGN_LEFT, tablewriter.ALIGN_CENTER, tablewriter.ALIGN_CENTER, tablewriter.ALIGN_CENTER, tablewriter.ALIGN_CENTER}
|
||||
// summaryTable.SetColumnAlignment(alignments)
|
||||
|
||||
// for i := 0; i < len(prettyPrinter.sortedControlNames); i++ {
|
||||
// controlSummary := prettyPrinter.summary[prettyPrinter.sortedControlNames[i]]
|
||||
// summaryTable.Append(generateRow(prettyPrinter.sortedControlNames[i], controlSummary))
|
||||
// }
|
||||
|
||||
// summaryTable.SetFooter(generateFooter(prettyPrinter))
|
||||
|
||||
summaryTable.Render()
|
||||
}
|
||||
|
||||
func (prettyPrinter *PrettyPrinter) printFramework(frameworksNames []string, frameworkScores []float32) {
|
||||
if len(frameworksNames) == 1 {
|
||||
cautils.InfoTextDisplay(prettyPrinter.writer, fmt.Sprintf("FRAMEWORK %s\n", frameworksNames[0]))
|
||||
} else if len(frameworksNames) > 1 {
|
||||
p := "FRAMEWORKS: "
|
||||
for i := 0; i < len(frameworksNames)-1; i++ {
|
||||
p += fmt.Sprintf("%s (risk: %.2f), ", frameworksNames[i], frameworkScores[i])
|
||||
}
|
||||
p += fmt.Sprintf("%s (risk: %.2f)\n", frameworksNames[len(frameworksNames)-1], frameworkScores[len(frameworkScores)-1])
|
||||
cautils.InfoTextDisplay(prettyPrinter.writer, p)
|
||||
}
|
||||
}
|
||||
|
||||
// func (prettyPrinter *PrettyPrinter) getSortedControlsNames() []string {
|
||||
// controlNames := make([]string, 0, len(prettyPrinter.summary))
|
||||
// for k := range prettyPrinter.summary {
|
||||
// controlNames = append(controlNames, k)
|
||||
// }
|
||||
// sort.Strings(controlNames)
|
||||
// return controlNames
|
||||
// }
|
||||
// func getControlURL(controlID string) string {
|
||||
// return fmt.Sprintf("https://hub.armo.cloud/docs/%s", strings.ToLower(controlID))
|
||||
// }
|
||||
11
resultshandling/printer/v2/silentprinter.go
Normal file
11
resultshandling/printer/v2/silentprinter.go
Normal file
@@ -0,0 +1,11 @@
|
||||
package v2
|
||||
|
||||
import (
|
||||
"github.com/armosec/kubescape/cautils"
|
||||
)
|
||||
|
||||
type SilentPrinter struct {
|
||||
}
|
||||
|
||||
func (silentPrinter *SilentPrinter) ActionPrint(opaSessionObj *cautils.OPASessionObj) {
|
||||
}
|
||||
1
resultshandling/printer/v2/summary.go
Normal file
1
resultshandling/printer/v2/summary.go
Normal file
@@ -0,0 +1 @@
|
||||
package v2
|
||||
44
resultshandling/printer/v2/utils.go
Normal file
44
resultshandling/printer/v2/utils.go
Normal file
@@ -0,0 +1,44 @@
|
||||
package v2
|
||||
|
||||
import (
|
||||
"github.com/armosec/k8s-interface/workloadinterface"
|
||||
"github.com/armosec/kubescape/cautils"
|
||||
"github.com/armosec/opa-utils/reporthandling"
|
||||
"github.com/armosec/opa-utils/reporthandling/results/v1/resourcesresults"
|
||||
)
|
||||
|
||||
// finalizeV2Report finalize the results objects by copying data from map to lists
|
||||
func finalizeReport(opaSessionObj *cautils.OPASessionObj) {
|
||||
if len(opaSessionObj.Report.Results) == 0 {
|
||||
opaSessionObj.Report.Results = make([]resourcesresults.Result, len(opaSessionObj.ResourcesResult))
|
||||
finalizeResults(opaSessionObj.Report.Results, opaSessionObj.ResourcesResult)
|
||||
opaSessionObj.ResourcesResult = nil
|
||||
}
|
||||
|
||||
if len(opaSessionObj.Report.Resources) == 0 {
|
||||
opaSessionObj.Report.Resources = make([]reporthandling.Resource, len(opaSessionObj.AllResources))
|
||||
finalizeResources(opaSessionObj.Report.Resources, opaSessionObj.AllResources)
|
||||
opaSessionObj.AllResources = nil
|
||||
}
|
||||
|
||||
}
|
||||
func finalizeResults(results []resourcesresults.Result, resourcesResult map[string]resourcesresults.Result) {
|
||||
index := 0
|
||||
for resourceID := range resourcesResult {
|
||||
results[index] = resourcesResult[resourceID]
|
||||
index++
|
||||
}
|
||||
}
|
||||
|
||||
func finalizeResources(resources []reporthandling.Resource, allResources map[string]workloadinterface.IMetadata) {
|
||||
index := 0
|
||||
for resourceID := range allResources {
|
||||
if obj, ok := allResources[resourceID]; ok {
|
||||
r := *reporthandling.NewResource(obj.GetObject())
|
||||
r.ResourceID = resourceID
|
||||
resources[index] = r
|
||||
}
|
||||
|
||||
index++
|
||||
}
|
||||
}
|
||||
10
resultshandling/reporter/interface.go
Normal file
10
resultshandling/reporter/interface.go
Normal file
@@ -0,0 +1,10 @@
|
||||
package reporter
|
||||
|
||||
import "github.com/armosec/kubescape/cautils"
|
||||
|
||||
type IReport interface {
|
||||
ActionSendReport(opaSessionObj *cautils.OPASessionObj) error
|
||||
SetCustomerGUID(customerGUID string)
|
||||
SetClusterName(clusterName string)
|
||||
DisplayReportURL()
|
||||
}
|
||||
@@ -1,19 +0,0 @@
|
||||
package reporter
|
||||
|
||||
import "github.com/armosec/kubescape/cautils"
|
||||
|
||||
type ReportMock struct {
|
||||
}
|
||||
|
||||
func NewReportMock() *ReportMock {
|
||||
return &ReportMock{}
|
||||
}
|
||||
func (reportMock *ReportMock) ActionSendReport(opaSessionObj *cautils.OPASessionObj) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (reportMock *ReportMock) SetCustomerGUID(customerGUID string) {
|
||||
}
|
||||
|
||||
func (reportMock *ReportMock) SetClusterName(clusterName string) {
|
||||
}
|
||||
@@ -1,66 +0,0 @@
|
||||
package reporter
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
"github.com/armosec/kubescape/cautils"
|
||||
"github.com/armosec/kubescape/cautils/getter"
|
||||
"github.com/armosec/opa-utils/reporthandling"
|
||||
)
|
||||
|
||||
type IReport interface {
|
||||
ActionSendReport(opaSessionObj *cautils.OPASessionObj) error
|
||||
SetCustomerGUID(customerGUID string)
|
||||
SetClusterName(clusterName string)
|
||||
}
|
||||
|
||||
type ReportEventReceiver struct {
|
||||
httpClient *http.Client
|
||||
clusterName string
|
||||
customerGUID string
|
||||
}
|
||||
|
||||
func NewReportEventReceiver(customerGUID, clusterName string) *ReportEventReceiver {
|
||||
return &ReportEventReceiver{
|
||||
httpClient: &http.Client{},
|
||||
clusterName: clusterName,
|
||||
customerGUID: customerGUID,
|
||||
}
|
||||
}
|
||||
|
||||
func (report *ReportEventReceiver) ActionSendReport(opaSessionObj *cautils.OPASessionObj) error {
|
||||
// Remove data before reporting
|
||||
keepFields := []string{"kind", "apiVersion", "metadata"}
|
||||
keepMetadataFields := []string{"name", "namespace", "labels"}
|
||||
opaSessionObj.PostureReport.RemoveData(keepFields, keepMetadataFields)
|
||||
|
||||
if err := report.send(opaSessionObj.PostureReport); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (report *ReportEventReceiver) SetCustomerGUID(customerGUID string) {
|
||||
report.customerGUID = customerGUID
|
||||
}
|
||||
|
||||
func (report *ReportEventReceiver) SetClusterName(clusterName string) {
|
||||
report.clusterName = clusterName
|
||||
}
|
||||
|
||||
func (report *ReportEventReceiver) send(postureReport *reporthandling.PostureReport) error {
|
||||
|
||||
reqBody, err := json.Marshal(*postureReport)
|
||||
if err != nil {
|
||||
return fmt.Errorf("in 'Send' failed to json.Marshal, reason: %v", err)
|
||||
}
|
||||
host := hostToString(report.initEventReceiverURL(), postureReport.ReportID)
|
||||
|
||||
msg, err := getter.HttpPost(report.httpClient, host, nil, reqBody)
|
||||
if err != nil {
|
||||
return fmt.Errorf("%s, %v:%s", host, err, msg)
|
||||
}
|
||||
return err
|
||||
}
|
||||
@@ -1,30 +0,0 @@
|
||||
package reporter
|
||||
|
||||
import (
|
||||
"net/url"
|
||||
|
||||
"github.com/armosec/kubescape/cautils/getter"
|
||||
"github.com/gofrs/uuid"
|
||||
)
|
||||
|
||||
func (report *ReportEventReceiver) initEventReceiverURL() *url.URL {
|
||||
urlObj := url.URL{}
|
||||
|
||||
urlObj.Scheme = "https"
|
||||
urlObj.Host = getter.GetArmoAPIConnector().GetReportReceiverURL()
|
||||
urlObj.Path = "/k8s/postureReport"
|
||||
q := urlObj.Query()
|
||||
q.Add("customerGUID", uuid.FromStringOrNil(report.customerGUID).String())
|
||||
q.Add("clusterName", report.clusterName)
|
||||
|
||||
urlObj.RawQuery = q.Encode()
|
||||
|
||||
return &urlObj
|
||||
}
|
||||
|
||||
func hostToString(host *url.URL, reportID string) string {
|
||||
q := host.Query()
|
||||
q.Add("reportID", reportID) // TODO - do we add the reportID?
|
||||
host.RawQuery = q.Encode()
|
||||
return host.String()
|
||||
}
|
||||
30
resultshandling/reporter/v1/mockreporter.go
Normal file
30
resultshandling/reporter/v1/mockreporter.go
Normal file
@@ -0,0 +1,30 @@
|
||||
package v1
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/armosec/kubescape/cautils"
|
||||
"github.com/armosec/kubescape/cautils/getter"
|
||||
)
|
||||
|
||||
type ReportMock struct {
|
||||
}
|
||||
|
||||
func NewReportMock() *ReportMock {
|
||||
return &ReportMock{}
|
||||
}
|
||||
func (reportMock *ReportMock) ActionSendReport(opaSessionObj *cautils.OPASessionObj) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (reportMock *ReportMock) SetCustomerGUID(customerGUID string) {
|
||||
}
|
||||
|
||||
func (reportMock *ReportMock) SetClusterName(clusterName string) {
|
||||
}
|
||||
|
||||
func (reportMock *ReportMock) DisplayReportURL() {
|
||||
message := fmt.Sprintf("\nYou can see the results in a user-friendly UI, choose your preferred compliance framework, check risk results history and trends, manage exceptions, get remediation recommendations and much more by registering here: https://%s/cli-signup \n", getter.GetArmoAPIConnector().GetFrontendURL())
|
||||
cautils.InfoTextDisplay(os.Stderr, fmt.Sprintf("\n%s\n", message))
|
||||
}
|
||||
176
resultshandling/reporter/v1/reporteventreceiver.go
Normal file
176
resultshandling/reporter/v1/reporteventreceiver.go
Normal file
@@ -0,0 +1,176 @@
|
||||
package v1
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/armosec/k8s-interface/workloadinterface"
|
||||
"github.com/armosec/kubescape/cautils"
|
||||
"github.com/armosec/kubescape/cautils/getter"
|
||||
"github.com/armosec/opa-utils/reporthandling"
|
||||
uuid "github.com/satori/go.uuid"
|
||||
)
|
||||
|
||||
const MAX_REPORT_SIZE = 2097152 // 2 MB
|
||||
|
||||
type ReportEventReceiver struct {
|
||||
httpClient *http.Client
|
||||
clusterName string
|
||||
customerGUID string
|
||||
eventReceiverURL *url.URL
|
||||
token string
|
||||
customerAdminEMail string
|
||||
message string
|
||||
}
|
||||
|
||||
func NewReportEventReceiver(tenantConfig *cautils.ConfigObj) *ReportEventReceiver {
|
||||
return &ReportEventReceiver{
|
||||
httpClient: &http.Client{},
|
||||
clusterName: tenantConfig.ClusterName,
|
||||
customerGUID: tenantConfig.CustomerGUID,
|
||||
token: tenantConfig.Token,
|
||||
customerAdminEMail: tenantConfig.CustomerAdminEMail,
|
||||
}
|
||||
}
|
||||
|
||||
func (report *ReportEventReceiver) ActionSendReport(opaSessionObj *cautils.OPASessionObj) error {
|
||||
cautils.ReportV2ToV1(opaSessionObj)
|
||||
|
||||
if report.customerGUID == "" {
|
||||
report.message = "WARNING: Failed to publish results. Reason: Unknown accout ID. Run kubescape with the '--account <account ID>' flag. Contact ARMO team for more details"
|
||||
return nil
|
||||
}
|
||||
if report.clusterName == "" {
|
||||
report.message = "WARNING: Failed to publish results. Reason: Unknown cluster name. Run kubescape with the '--kube-context <cluster name>' flag"
|
||||
return nil
|
||||
}
|
||||
|
||||
opaSessionObj.PostureReport.ReportID = uuid.NewV4().String()
|
||||
opaSessionObj.PostureReport.CustomerGUID = report.customerGUID
|
||||
opaSessionObj.PostureReport.ClusterName = report.clusterName
|
||||
|
||||
if err := report.prepareReport(opaSessionObj.PostureReport, opaSessionObj.AllResources); err != nil {
|
||||
report.message = err.Error()
|
||||
return nil
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (report *ReportEventReceiver) SetCustomerGUID(customerGUID string) {
|
||||
report.customerGUID = customerGUID
|
||||
}
|
||||
|
||||
func (report *ReportEventReceiver) SetClusterName(clusterName string) {
|
||||
report.clusterName = cautils.AdoptClusterName(clusterName) // clean cluster name
|
||||
}
|
||||
|
||||
func (report *ReportEventReceiver) prepareReport(postureReport *reporthandling.PostureReport, allResources map[string]workloadinterface.IMetadata) error {
|
||||
report.initEventReceiverURL()
|
||||
host := hostToString(report.eventReceiverURL, postureReport.ReportID)
|
||||
|
||||
cautils.StartSpinner()
|
||||
defer cautils.StopSpinner()
|
||||
|
||||
// send framework results
|
||||
if err := report.sendReport(host, postureReport); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// send resources
|
||||
if err := report.sendResources(host, postureReport, allResources); err != nil {
|
||||
return err
|
||||
}
|
||||
report.generateMessage()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (report *ReportEventReceiver) sendResources(host string, postureReport *reporthandling.PostureReport, allResources map[string]workloadinterface.IMetadata) error {
|
||||
splittedPostureReport := setPaginationReport(postureReport)
|
||||
counter := 0
|
||||
|
||||
for _, v := range allResources {
|
||||
r, err := json.Marshal(*iMetaToResource(v))
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to unmarshal resource '%s', reason: %v", v.GetID(), err)
|
||||
}
|
||||
|
||||
if counter+len(r) >= MAX_REPORT_SIZE && len(splittedPostureReport.Resources) > 0 {
|
||||
|
||||
// send report
|
||||
if err := report.sendReport(host, splittedPostureReport); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// delete resources
|
||||
splittedPostureReport.Resources = []reporthandling.Resource{}
|
||||
|
||||
// restart counter
|
||||
counter = 0
|
||||
}
|
||||
|
||||
counter += len(r)
|
||||
splittedPostureReport.Resources = append(splittedPostureReport.Resources, *iMetaToResource(v))
|
||||
}
|
||||
|
||||
return report.sendReport(host, splittedPostureReport)
|
||||
}
|
||||
func (report *ReportEventReceiver) sendReport(host string, postureReport *reporthandling.PostureReport) error {
|
||||
reqBody, err := json.Marshal(postureReport)
|
||||
if err != nil {
|
||||
return fmt.Errorf("in 'sendReport' failed to json.Marshal, reason: %v", err)
|
||||
}
|
||||
// fmt.Printf("\n\n%s\n\n", reqBody)
|
||||
|
||||
msg, err := getter.HttpPost(report.httpClient, host, nil, reqBody)
|
||||
if err != nil {
|
||||
return fmt.Errorf("%s, %v:%s", host, err, msg)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (report *ReportEventReceiver) generateMessage() {
|
||||
message := "You can see the results in a user-friendly UI, choose your preferred compliance framework, check risk results history and trends, manage exceptions, get remediation recommendations and much more by registering here:"
|
||||
|
||||
u := url.URL{}
|
||||
u.Scheme = "https"
|
||||
u.Host = getter.GetArmoAPIConnector().GetFrontendURL()
|
||||
|
||||
if report.customerAdminEMail != "" {
|
||||
report.message = fmt.Sprintf("%s %s/risk/%s\n(Account: %s)", message, u.String(), report.clusterName, maskID(report.customerGUID))
|
||||
return
|
||||
}
|
||||
u.Path = "account/sign-up"
|
||||
q := u.Query()
|
||||
q.Add("invitationToken", report.token)
|
||||
q.Add("customerGUID", report.customerGUID)
|
||||
|
||||
u.RawQuery = q.Encode()
|
||||
report.message = fmt.Sprintf("%s %s", message, u.String())
|
||||
}
|
||||
|
||||
func (report *ReportEventReceiver) DisplayReportURL() {
|
||||
cautils.InfoTextDisplay(os.Stderr, fmt.Sprintf("\n\n%s\n\n", report.message))
|
||||
}
|
||||
|
||||
func maskID(id string) string {
|
||||
sep := "-"
|
||||
splitted := strings.Split(id, sep)
|
||||
if len(splitted) != 5 {
|
||||
return ""
|
||||
}
|
||||
str := splitted[0][:4]
|
||||
splitted[0] = splitted[0][4:]
|
||||
for i := range splitted {
|
||||
for j := 0; j < len(splitted[i]); j++ {
|
||||
str += "X"
|
||||
}
|
||||
str += sep
|
||||
}
|
||||
|
||||
return strings.TrimSuffix(str, sep)
|
||||
}
|
||||
47
resultshandling/reporter/v1/reporteventreceiverutils.go
Normal file
47
resultshandling/reporter/v1/reporteventreceiverutils.go
Normal file
@@ -0,0 +1,47 @@
|
||||
package v1
|
||||
|
||||
import (
|
||||
"net/url"
|
||||
|
||||
"github.com/armosec/k8s-interface/workloadinterface"
|
||||
"github.com/armosec/kubescape/cautils/getter"
|
||||
"github.com/armosec/opa-utils/reporthandling"
|
||||
"github.com/gofrs/uuid"
|
||||
)
|
||||
|
||||
func (report *ReportEventReceiver) initEventReceiverURL() {
|
||||
urlObj := url.URL{}
|
||||
|
||||
urlObj.Scheme = "https"
|
||||
urlObj.Host = getter.GetArmoAPIConnector().GetReportReceiverURL()
|
||||
urlObj.Path = "/k8s/postureReport"
|
||||
q := urlObj.Query()
|
||||
q.Add("customerGUID", uuid.FromStringOrNil(report.customerGUID).String())
|
||||
q.Add("clusterName", report.clusterName)
|
||||
|
||||
urlObj.RawQuery = q.Encode()
|
||||
|
||||
report.eventReceiverURL = &urlObj
|
||||
}
|
||||
|
||||
func hostToString(host *url.URL, reportID string) string {
|
||||
q := host.Query()
|
||||
q.Add("reportID", reportID) // TODO - do we add the reportID?
|
||||
host.RawQuery = q.Encode()
|
||||
return host.String()
|
||||
}
|
||||
|
||||
func setPaginationReport(postureReport *reporthandling.PostureReport) *reporthandling.PostureReport {
|
||||
return &reporthandling.PostureReport{
|
||||
CustomerGUID: postureReport.CustomerGUID,
|
||||
ClusterName: postureReport.ClusterName,
|
||||
ReportID: postureReport.ReportID,
|
||||
ReportGenerationTime: postureReport.ReportGenerationTime,
|
||||
}
|
||||
}
|
||||
func iMetaToResource(obj workloadinterface.IMetadata) *reporthandling.Resource {
|
||||
return &reporthandling.Resource{
|
||||
ResourceID: obj.GetID(),
|
||||
Object: obj.GetObject(),
|
||||
}
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
package reporter
|
||||
package v1
|
||||
|
||||
import (
|
||||
"net/url"
|
||||
197
resultshandling/reporter/v2/reporteventreceiver.go
Normal file
197
resultshandling/reporter/v2/reporteventreceiver.go
Normal file
@@ -0,0 +1,197 @@
|
||||
package v2
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
|
||||
"github.com/armosec/kubescape/cautils"
|
||||
"github.com/armosec/kubescape/cautils/getter"
|
||||
uuid "github.com/satori/go.uuid"
|
||||
|
||||
"github.com/armosec/opa-utils/reporthandling"
|
||||
"github.com/armosec/opa-utils/reporthandling/results/v1/resourcesresults"
|
||||
reporthandlingv2 "github.com/armosec/opa-utils/reporthandling/v2"
|
||||
)
|
||||
|
||||
const MAX_REPORT_SIZE = 2097152 // 2 MB
|
||||
|
||||
type ReportEventReceiver struct {
|
||||
httpClient *http.Client
|
||||
clusterName string
|
||||
customerGUID string
|
||||
eventReceiverURL *url.URL
|
||||
token string
|
||||
customerAdminEMail string
|
||||
message string
|
||||
}
|
||||
|
||||
func NewReportEventReceiver(tenantConfig *cautils.ConfigObj) *ReportEventReceiver {
|
||||
return &ReportEventReceiver{
|
||||
httpClient: &http.Client{},
|
||||
clusterName: tenantConfig.ClusterName,
|
||||
customerGUID: tenantConfig.CustomerGUID,
|
||||
token: tenantConfig.Token,
|
||||
customerAdminEMail: tenantConfig.CustomerAdminEMail,
|
||||
}
|
||||
}
|
||||
|
||||
func (report *ReportEventReceiver) ActionSendReport(opaSessionObj *cautils.OPASessionObj) error {
|
||||
finalizeReport(opaSessionObj)
|
||||
|
||||
if report.customerGUID == "" {
|
||||
report.message = "WARNING: Failed to publish results. Reason: Unknown accout ID. Run kubescape with the '--account <account ID>' flag. Contact ARMO team for more details"
|
||||
return nil
|
||||
}
|
||||
if report.clusterName == "" {
|
||||
report.message = "WARNING: Failed to publish results. Reason: Unknown cluster name. Run kubescape with the '--kube-context <cluster name>' flag"
|
||||
return nil
|
||||
}
|
||||
opaSessionObj.Report.ReportID = uuid.NewV4().String()
|
||||
opaSessionObj.Report.CustomerGUID = report.customerGUID
|
||||
opaSessionObj.Report.ClusterName = report.clusterName
|
||||
|
||||
if err := report.prepareReport(opaSessionObj.Report); err != nil {
|
||||
report.message = err.Error()
|
||||
} else {
|
||||
report.generateMessage()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (report *ReportEventReceiver) SetCustomerGUID(customerGUID string) {
|
||||
report.customerGUID = customerGUID
|
||||
}
|
||||
|
||||
func (report *ReportEventReceiver) SetClusterName(clusterName string) {
|
||||
report.clusterName = cautils.AdoptClusterName(clusterName) // clean cluster name
|
||||
}
|
||||
|
||||
func (report *ReportEventReceiver) prepareReport(postureReport *reporthandlingv2.PostureReport) error {
|
||||
report.initEventReceiverURL()
|
||||
host := hostToString(report.eventReceiverURL, postureReport.ReportID)
|
||||
|
||||
cautils.StartSpinner()
|
||||
defer cautils.StopSpinner()
|
||||
|
||||
reportCounter := 0
|
||||
|
||||
// send resources
|
||||
if err := report.sendResources(host, postureReport, &reportCounter, false); err != nil {
|
||||
return err
|
||||
}
|
||||
reportCounter++
|
||||
|
||||
// send results
|
||||
if err := report.sendResults(host, postureReport, &reportCounter, true); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (report *ReportEventReceiver) sendResources(host string, postureReport *reporthandlingv2.PostureReport, reportCounter *int, isLastReport bool) error {
|
||||
splittedPostureReport := setSubReport(postureReport)
|
||||
counter := 0
|
||||
|
||||
for _, v := range postureReport.Resources {
|
||||
r, err := json.Marshal(v)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to unmarshal resource '%s', reason: %v", v.ResourceID, err)
|
||||
}
|
||||
|
||||
if counter+len(r) >= MAX_REPORT_SIZE && len(splittedPostureReport.Resources) > 0 {
|
||||
|
||||
// send report
|
||||
if err := report.sendReport(host, splittedPostureReport, *reportCounter, false); err != nil {
|
||||
return err
|
||||
}
|
||||
*reportCounter++
|
||||
|
||||
// delete resources
|
||||
splittedPostureReport.Resources = []reporthandling.Resource{}
|
||||
|
||||
// restart counter
|
||||
counter = 0
|
||||
}
|
||||
|
||||
counter += len(r)
|
||||
splittedPostureReport.Resources = append(splittedPostureReport.Resources, v)
|
||||
}
|
||||
|
||||
return report.sendReport(host, splittedPostureReport, *reportCounter, isLastReport)
|
||||
}
|
||||
|
||||
func (report *ReportEventReceiver) sendResults(host string, postureReport *reporthandlingv2.PostureReport, reportCounter *int, isLastReport bool) error {
|
||||
splittedPostureReport := setSubReport(postureReport)
|
||||
counter := 0
|
||||
|
||||
for _, v := range postureReport.Results {
|
||||
r, err := json.Marshal(v)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to unmarshal resource '%s', reason: %v", v.GetResourceID(), err)
|
||||
}
|
||||
|
||||
if counter+len(r) >= MAX_REPORT_SIZE && len(splittedPostureReport.Resources) > 0 {
|
||||
|
||||
// send report
|
||||
if err := report.sendReport(host, splittedPostureReport, *reportCounter, false); err != nil {
|
||||
return err
|
||||
}
|
||||
*reportCounter++
|
||||
|
||||
// delete results
|
||||
splittedPostureReport.Results = []resourcesresults.Result{}
|
||||
|
||||
// restart counter
|
||||
counter = 0
|
||||
}
|
||||
|
||||
counter += len(r)
|
||||
splittedPostureReport.Results = append(splittedPostureReport.Results, v)
|
||||
}
|
||||
|
||||
return report.sendReport(host, splittedPostureReport, *reportCounter, isLastReport)
|
||||
}
|
||||
|
||||
func (report *ReportEventReceiver) sendReport(host string, postureReport *reporthandlingv2.PostureReport, counter int, isLastReport bool) error {
|
||||
postureReport.PaginationInfo = reporthandlingv2.PaginationMarks{
|
||||
ReportNumber: counter,
|
||||
IsLastReport: isLastReport,
|
||||
}
|
||||
reqBody, err := json.Marshal(postureReport)
|
||||
if err != nil {
|
||||
return fmt.Errorf("in 'sendReport' failed to json.Marshal, reason: %v", err)
|
||||
}
|
||||
msg, err := getter.HttpPost(report.httpClient, host, nil, reqBody)
|
||||
if err != nil {
|
||||
return fmt.Errorf("%s, %v:%s", host, err, msg)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (report *ReportEventReceiver) generateMessage() {
|
||||
message := "You can see the results in a user-friendly UI, choose your preferred compliance framework, check risk results history and trends, manage exceptions, get remediation recommendations and much more by registering here:"
|
||||
|
||||
u := url.URL{}
|
||||
u.Scheme = "https"
|
||||
u.Host = getter.GetArmoAPIConnector().GetFrontendURL()
|
||||
|
||||
if report.customerAdminEMail != "" {
|
||||
report.message = fmt.Sprintf("%s %s/risk/%s\n(Account: %s)", message, u.String(), report.clusterName, maskID(report.customerGUID))
|
||||
return
|
||||
}
|
||||
u.Path = "account/sign-up"
|
||||
q := u.Query()
|
||||
q.Add("invitationToken", report.token)
|
||||
q.Add("customerGUID", report.customerGUID)
|
||||
|
||||
u.RawQuery = q.Encode()
|
||||
report.message = fmt.Sprintf("%s %s", message, u.String())
|
||||
}
|
||||
|
||||
func (report *ReportEventReceiver) DisplayReportURL() {
|
||||
cautils.InfoTextDisplay(os.Stderr, fmt.Sprintf("\n\n%s\n\n", report.message))
|
||||
}
|
||||
54
resultshandling/reporter/v2/reporteventreceiverutils.go
Normal file
54
resultshandling/reporter/v2/reporteventreceiverutils.go
Normal file
@@ -0,0 +1,54 @@
|
||||
package v2
|
||||
|
||||
import (
|
||||
"net/url"
|
||||
|
||||
"github.com/armosec/k8s-interface/workloadinterface"
|
||||
"github.com/armosec/kubescape/cautils/getter"
|
||||
"github.com/armosec/opa-utils/reporthandling"
|
||||
reporthandlingv2 "github.com/armosec/opa-utils/reporthandling/v2"
|
||||
"github.com/gofrs/uuid"
|
||||
)
|
||||
|
||||
func (report *ReportEventReceiver) initEventReceiverURL() {
|
||||
urlObj := url.URL{}
|
||||
|
||||
urlObj.Scheme = "https"
|
||||
urlObj.Host = getter.GetArmoAPIConnector().GetReportReceiverURL()
|
||||
urlObj.Path = "/k8s/v2/postureReport"
|
||||
|
||||
q := urlObj.Query()
|
||||
q.Add("customerGUID", uuid.FromStringOrNil(report.customerGUID).String())
|
||||
q.Add("clusterName", report.clusterName)
|
||||
|
||||
urlObj.RawQuery = q.Encode()
|
||||
|
||||
report.eventReceiverURL = &urlObj
|
||||
}
|
||||
|
||||
func hostToString(host *url.URL, reportID string) string {
|
||||
q := host.Query()
|
||||
q.Add("reportGUID", reportID) // TODO - do we add the reportID?
|
||||
host.RawQuery = q.Encode()
|
||||
return host.String()
|
||||
}
|
||||
|
||||
func setSubReport(postureReport *reporthandlingv2.PostureReport) *reporthandlingv2.PostureReport {
|
||||
return &reporthandlingv2.PostureReport{
|
||||
CustomerGUID: postureReport.CustomerGUID,
|
||||
ClusterName: postureReport.ClusterName,
|
||||
ReportID: postureReport.ReportID,
|
||||
ReportGenerationTime: postureReport.ReportGenerationTime,
|
||||
SummaryDetails: postureReport.SummaryDetails,
|
||||
Attributes: postureReport.Attributes,
|
||||
ClusterCloudProvider: postureReport.ClusterCloudProvider,
|
||||
JobID: postureReport.JobID,
|
||||
ClusterAPIServerInfo: postureReport.ClusterAPIServerInfo,
|
||||
}
|
||||
}
|
||||
func iMetaToResource(obj workloadinterface.IMetadata) *reporthandling.Resource {
|
||||
return &reporthandling.Resource{
|
||||
ResourceID: obj.GetID(),
|
||||
Object: obj.GetObject(),
|
||||
}
|
||||
}
|
||||
20
resultshandling/reporter/v2/reporteventreceiverutils_test.go
Normal file
20
resultshandling/reporter/v2/reporteventreceiverutils_test.go
Normal file
@@ -0,0 +1,20 @@
|
||||
package v2
|
||||
|
||||
import (
|
||||
"net/url"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestHostToString(t *testing.T) {
|
||||
host := url.URL{
|
||||
Scheme: "https",
|
||||
Host: "report.eudev3.cyberarmorsoft.com",
|
||||
Path: "k8srestapi/v1/postureReport",
|
||||
RawQuery: "cluster=openrasty_seal-7fvz&customerGUID=5d817063-096f-4d91-b39b-8665240080af",
|
||||
}
|
||||
expectedHost := "https://report.eudev3.cyberarmorsoft.com/k8srestapi/v1/postureReport?cluster=openrasty_seal-7fvz&customerGUID=5d817063-096f-4d91-b39b-8665240080af&reportID=ffdd2a00-4dc8-4bf3-b97a-a6d4fd198a41"
|
||||
receivedHost := hostToString(&host, "ffdd2a00-4dc8-4bf3-b97a-a6d4fd198a41")
|
||||
if receivedHost != expectedHost {
|
||||
t.Errorf("%s != %s", receivedHost, expectedHost)
|
||||
}
|
||||
}
|
||||
65
resultshandling/reporter/v2/utils.go
Normal file
65
resultshandling/reporter/v2/utils.go
Normal file
@@ -0,0 +1,65 @@
|
||||
package v2
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/armosec/k8s-interface/workloadinterface"
|
||||
"github.com/armosec/kubescape/cautils"
|
||||
"github.com/armosec/opa-utils/reporthandling"
|
||||
"github.com/armosec/opa-utils/reporthandling/results/v1/resourcesresults"
|
||||
)
|
||||
|
||||
// finalizeV2Report finalize the results objects by copying data from map to lists
|
||||
func finalizeReport(opaSessionObj *cautils.OPASessionObj) {
|
||||
opaSessionObj.PostureReport = nil
|
||||
if len(opaSessionObj.Report.Results) == 0 {
|
||||
opaSessionObj.Report.Results = make([]resourcesresults.Result, len(opaSessionObj.ResourcesResult))
|
||||
finalizeResults(opaSessionObj.Report.Results, opaSessionObj.ResourcesResult)
|
||||
opaSessionObj.ResourcesResult = nil
|
||||
}
|
||||
|
||||
if len(opaSessionObj.Report.Resources) == 0 {
|
||||
opaSessionObj.Report.Resources = make([]reporthandling.Resource, len(opaSessionObj.AllResources))
|
||||
finalizeResources(opaSessionObj.Report.Resources, opaSessionObj.AllResources)
|
||||
opaSessionObj.AllResources = nil
|
||||
}
|
||||
|
||||
}
|
||||
func finalizeResults(results []resourcesresults.Result, resourcesResult map[string]resourcesresults.Result) {
|
||||
index := 0
|
||||
for resourceID := range resourcesResult {
|
||||
results[index] = resourcesResult[resourceID]
|
||||
index++
|
||||
}
|
||||
}
|
||||
|
||||
func finalizeResources(resources []reporthandling.Resource, allResources map[string]workloadinterface.IMetadata) {
|
||||
index := 0
|
||||
for resourceID := range allResources {
|
||||
if obj, ok := allResources[resourceID]; ok {
|
||||
r := *reporthandling.NewResource(obj.GetObject())
|
||||
r.ResourceID = resourceID
|
||||
resources[index] = r
|
||||
}
|
||||
|
||||
index++
|
||||
}
|
||||
}
|
||||
|
||||
func maskID(id string) string {
|
||||
sep := "-"
|
||||
splitted := strings.Split(id, sep)
|
||||
if len(splitted) != 5 {
|
||||
return ""
|
||||
}
|
||||
str := splitted[0][:4]
|
||||
splitted[0] = splitted[0][4:]
|
||||
for i := range splitted {
|
||||
for j := 0; j < len(splitted[i]); j++ {
|
||||
str += "X"
|
||||
}
|
||||
str += sep
|
||||
}
|
||||
|
||||
return strings.TrimSuffix(str, sep)
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user