mirror of
https://github.com/kubescape/kubescape.git
synced 2026-03-03 02:00:27 +00:00
Compare commits
115 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
ca927dec30 | ||
|
|
3a78ef46a3 | ||
|
|
bdb1cd0905 | ||
|
|
ffb556a637 | ||
|
|
40acfb5e9d | ||
|
|
de8bcfa0d2 | ||
|
|
9439f407da | ||
|
|
5095e62961 | ||
|
|
3301907864 | ||
|
|
151175c40f | ||
|
|
234d4fa537 | ||
|
|
66068757e1 | ||
|
|
8a7cda5dd1 | ||
|
|
a0ca68cc41 | ||
|
|
41cae0bc93 | ||
|
|
b4198fde8c | ||
|
|
bd24f35738 | ||
|
|
6fcbb757b5 | ||
|
|
3b8825e5d2 | ||
|
|
5cf3244918 | ||
|
|
934c9ccc8b | ||
|
|
41dfdfd1e8 | ||
|
|
427fb59c99 | ||
|
|
ae825800f6 | ||
|
|
d72700acf6 | ||
|
|
3310a6a26f | ||
|
|
740b5aa772 | ||
|
|
04b55e764a | ||
|
|
beb4062bb1 | ||
|
|
5d4cd4acdc | ||
|
|
aec8198131 | ||
|
|
0a850e47df | ||
|
|
4f466d517a | ||
|
|
548201c256 | ||
|
|
a54e5d9f8b | ||
|
|
536257afa1 | ||
|
|
d194dd173f | ||
|
|
a90177e7c0 | ||
|
|
be9e8ca47d | ||
|
|
eb9fe85c75 | ||
|
|
47183c405f | ||
|
|
2725923b9b | ||
|
|
f6c03ed7a2 | ||
|
|
76b5548216 | ||
|
|
cc57a34a32 | ||
|
|
f7099b62e6 | ||
|
|
093ee8916e | ||
|
|
ac0259157b | ||
|
|
9cb937798f | ||
|
|
11d4926c85 | ||
|
|
836211ae2b | ||
|
|
fddf3d3f58 | ||
|
|
b036d1079e | ||
|
|
137c39e918 | ||
|
|
8778d022cf | ||
|
|
043bdbacec | ||
|
|
e0e19b0258 | ||
|
|
c72e0f790a | ||
|
|
87e79110a2 | ||
|
|
faeae1af60 | ||
|
|
b371fbad01 | ||
|
|
90831e153d | ||
|
|
009d8275c1 | ||
|
|
05c88e0ffc | ||
|
|
7d12552932 | ||
|
|
b761505bb1 | ||
|
|
63367f4f31 | ||
|
|
6f9d6b4af3 | ||
|
|
6ed8287b01 | ||
|
|
d948e20682 | ||
|
|
42929dac58 | ||
|
|
74449c64a2 | ||
|
|
0bd164c69e | ||
|
|
d44c082134 | ||
|
|
d756b9bfe4 | ||
|
|
6144050212 | ||
|
|
b5fe456b0d | ||
|
|
37791ff391 | ||
|
|
c2d99163a6 | ||
|
|
d948353b99 | ||
|
|
2649cb75f6 | ||
|
|
7c8da4a4b9 | ||
|
|
b72f5d75f7 | ||
|
|
947826a764 | ||
|
|
906c69a86d | ||
|
|
9c65aadcc7 | ||
|
|
09879e00ba | ||
|
|
5cbb7d940d | ||
|
|
050976d7a3 | ||
|
|
6a96d9f8b5 | ||
|
|
8c55dfbcf6 | ||
|
|
78c5b49b5a | ||
|
|
c33ca04a4f | ||
|
|
f12e66d315 | ||
|
|
8e4bb36df8 | ||
|
|
034412f6fe | ||
|
|
88d83ba72d | ||
|
|
829e7a33aa | ||
|
|
1dcde1538e | ||
|
|
b876cd0975 | ||
|
|
a06d11dc17 | ||
|
|
b1f5cd45c4 | ||
|
|
2c44c0e1f0 | ||
|
|
37c429b264 | ||
|
|
82994ce754 | ||
|
|
621f64c363 | ||
|
|
419a77f144 | ||
|
|
f043358a59 | ||
|
|
24b17a8c27 | ||
|
|
26a64b788b | ||
|
|
718d549bb3 | ||
|
|
54a6a8324a | ||
|
|
ea42d9a061 | ||
|
|
1d1344ebc1 | ||
|
|
b053b84197 |
31
.github/workflows/build.yaml
vendored
31
.github/workflows/build.yaml
vendored
@@ -29,7 +29,6 @@ jobs:
|
||||
os: [ubuntu-latest, macos-latest, windows-latest]
|
||||
steps:
|
||||
- uses: actions/checkout@v1
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
@@ -57,3 +56,33 @@ jobs:
|
||||
asset_path: build/${{ matrix.os }}/kubescape
|
||||
asset_name: kubescape-${{ matrix.os }}
|
||||
asset_content_type: application/octet-stream
|
||||
|
||||
|
||||
build-docker:
|
||||
name: Build docker container, tag and upload to registry
|
||||
needs: build
|
||||
if: ${{ github.repository == 'armosec/kubescape' }}
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
|
||||
- name: Set name
|
||||
run: echo quay.io/armosec/kubescape:v1.0.${{ github.run_number }} > build_tag.txt
|
||||
|
||||
- name: Build the Docker image
|
||||
run: docker build . --file build/Dockerfile --tag $(cat build_tag.txt)
|
||||
|
||||
- name: Re-Tag Image to latest
|
||||
run: docker tag $(cat build_tag.txt) quay.io/armosec/kubescape:latest
|
||||
|
||||
- name: Login to Quay.io
|
||||
env: # Or as an environment variable
|
||||
QUAY_PASSWORD: ${{ secrets.QUAYIO_REGISTRY_PASSWORD }}
|
||||
QUAY_USERNAME: ${{ secrets.QUAYIO_REGISTRY_USERNAME }}
|
||||
run: docker login -u="${QUAY_USERNAME}" -p="${QUAY_PASSWORD}" quay.io
|
||||
- name: Push Docker image
|
||||
run: |
|
||||
docker push $(cat build_tag.txt)
|
||||
docker push quay.io/armosec/kubescape:latest
|
||||
|
||||
|
||||
31
.github/workflows/build_dev.yaml
vendored
31
.github/workflows/build_dev.yaml
vendored
@@ -3,9 +3,6 @@ name: build-dev
|
||||
on:
|
||||
push:
|
||||
branches: [ dev ]
|
||||
pull_request:
|
||||
branches: [ dev ]
|
||||
types: [ closed ]
|
||||
jobs:
|
||||
build:
|
||||
name: Create cross-platform dev build
|
||||
@@ -38,3 +35,31 @@ jobs:
|
||||
with:
|
||||
name: kubescape-${{ matrix.os }}
|
||||
path: build/${{ matrix.os }}/kubescape
|
||||
|
||||
|
||||
build-docker:
|
||||
name: Build docker container, tag and upload to registry
|
||||
needs: build
|
||||
if: ${{ github.repository == 'armosec/kubescape' }}
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
|
||||
- name: Set name
|
||||
run: echo quay.io/armosec/kubescape:dev-v1.0.${{ github.run_number }} > build_tag.txt
|
||||
|
||||
- name: Build the Docker image
|
||||
run: docker build . --file build/Dockerfile --tag $(cat build_tag.txt)
|
||||
|
||||
- name: Login to Quay.io
|
||||
env: # Or as an environment variable
|
||||
QUAY_PASSWORD: ${{ secrets.QUAYIO_REGISTRY_PASSWORD }}
|
||||
QUAY_USERNAME: ${{ secrets.QUAYIO_REGISTRY_USERNAME }}
|
||||
run: docker login -u="${QUAY_USERNAME}" -p="${QUAY_PASSWORD}" quay.io
|
||||
|
||||
- name: Push Docker image
|
||||
run: |
|
||||
docker push $(cat build_tag.txt)
|
||||
|
||||
|
||||
|
||||
1
.gitignore
vendored
1
.gitignore
vendored
@@ -1,4 +1,5 @@
|
||||
*.vs*
|
||||
*kubescape*
|
||||
*debug*
|
||||
*vender*
|
||||
.idea
|
||||
45
README.md
45
README.md
@@ -3,9 +3,12 @@
|
||||
[](https://github.com/armosec/kubescape/actions/workflows/build.yaml)
|
||||
[](https://goreportcard.com/report/github.com/armosec/kubescape)
|
||||
|
||||
Kubescape is the first tool for testing if Kubernetes is deployed securely as defined in [Kubernetes Hardening Guidance by NSA and CISA](https://www.nsa.gov/Press-Room/News-Highlights/Article/Article/2716980/nsa-cisa-release-kubernetes-hardening-guidance/)
|
||||
Kubescape is the first open-source tool for testing if Kubernetes is deployed securely according to multiple frameworks:
|
||||
regulatory, customized company policies and DevSecOps best practices, such as the [NSA-CISA](https://www.armosec.io/blog/kubernetes-hardening-guidance-summary-by-armo) and the [MITRE ATT&CK®](https://www.microsoft.com/security/blog/2021/03/23/secure-containerized-environments-with-updated-threat-matrix-for-kubernetes/) .
|
||||
Kubescape scans K8s clusters, YAML files, and HELM charts, and detect misconfigurations and software vulnerabilities at early stages of the CI/CD pipeline and provides a risk score instantly and risk trends over time.
|
||||
Kubescape integrates natively with other DevOps tools, including Jenkins, CircleCI and Github workflows.
|
||||
|
||||
Use Kubescape to test clusters or scan single YAML files and integrate it to your processes.
|
||||
</br>
|
||||
|
||||
<img src="docs/demo.gif">
|
||||
|
||||
@@ -21,11 +24,9 @@ curl -s https://raw.githubusercontent.com/armosec/kubescape/master/install.sh |
|
||||
|
||||
## Run:
|
||||
```
|
||||
kubescape scan framework nsa --exclude-namespaces kube-system,kube-public
|
||||
kubescape scan framework nsa
|
||||
```
|
||||
|
||||
If you wish to scan all namespaces in your cluster, remove the `--exclude-namespaces` flag.
|
||||
|
||||
<img src="docs/summary.png">
|
||||
|
||||
### Click [👍](https://github.com/armosec/kubescape/stargazers) if you want us to continue to develop and improve Kubescape 😀
|
||||
@@ -86,15 +87,21 @@ Set-ExecutionPolicy RemoteSigned -scope CurrentUser
|
||||
|
||||
### Examples
|
||||
|
||||
* Scan a running Kubernetes cluster with [`nsa`](https://www.nsa.gov/News-Features/Feature-Stories/Article-View/Article/2716980/nsa-cisa-release-kubernetes-hardening-guidance/) framework and submit results to [Armo portal](https://portal.armo.cloud/)
|
||||
* Scan a running Kubernetes cluster with [`nsa`](https://www.nsa.gov/News-Features/Feature-Stories/Article-View/Article/2716980/nsa-cisa-release-kubernetes-hardening-guidance/) framework and submit results to the [Kubescape SaaS version](https://portal.armo.cloud/)
|
||||
```
|
||||
kubescape scan framework nsa --exclude-namespaces kube-system,kube-public --submit
|
||||
kubescape scan framework nsa --submit
|
||||
```
|
||||
|
||||
|
||||
* Scan a running Kubernetes cluster with [`mitre`](https://www.microsoft.com/security/blog/2020/04/02/attack-matrix-kubernetes/) framework and submit results to [Armo portal](https://portal.armo.cloud/)
|
||||
* Scan a running Kubernetes cluster with [`MITRE ATT&CK®`](https://www.microsoft.com/security/blog/2021/03/23/secure-containerized-environments-with-updated-threat-matrix-for-kubernetes/) framework and submit results to the [Kubescape SaaS version](https://portal.armo.cloud/)
|
||||
```
|
||||
kubescape scan framework mitre --exclude-namespaces kube-system,kube-public --submit
|
||||
kubescape scan framework mitre --submit
|
||||
```
|
||||
|
||||
|
||||
* Scan a running Kubernetes cluster with a specific control using the control name or control ID. [List of controls](https://hub.armo.cloud/docs/controls)
|
||||
```
|
||||
kubescape scan control "Privileged container"
|
||||
```
|
||||
|
||||
* Scan local `yaml`/`json` files before deploying. [Take a look at the demonstration](https://youtu.be/Ox6DaR7_4ZI)
|
||||
@@ -103,9 +110,9 @@ kubescape scan framework nsa *.yaml
|
||||
```
|
||||
|
||||
|
||||
* Scan `yaml`/`json` files from url
|
||||
* Scan kubernetes manifest files from a public github repository
|
||||
```
|
||||
kubescape scan framework nsa https://raw.githubusercontent.com/GoogleCloudPlatform/microservices-demo/master/release/kubernetes-manifests.yaml
|
||||
kubescape scan framework nsa https://github.com/armosec/kubescape
|
||||
```
|
||||
|
||||
* Output in `json` format
|
||||
@@ -118,11 +125,15 @@ kubescape scan framework nsa --exclude-namespaces kube-system,kube-public --form
|
||||
kubescape scan framework nsa --exclude-namespaces kube-system,kube-public --format junit --output results.xml
|
||||
```
|
||||
|
||||
* Scan with exceptions, objects with exceptions will be presented as `warning` and not `fail` <img src="docs/new-feature.svg">
|
||||
* Scan with exceptions, objects with exceptions will be presented as `exclude` and not `fail`
|
||||
```
|
||||
kubescape scan framework nsa --exceptions examples/exceptions.json
|
||||
```
|
||||
|
||||
### Repeatedly Kubescape Scanning using a CronJob
|
||||
|
||||
For setting up a cronJob please follow the [instructions](examples/cronJob-support/README.md)
|
||||
|
||||
### Helm Support
|
||||
|
||||
* Render the helm chart using [`helm template`](https://helm.sh/docs/helm/helm_template/) and pass to stdout
|
||||
@@ -187,12 +198,18 @@ go build -o kubescape .
|
||||
|
||||
3. Run
|
||||
```
|
||||
./kubescape scan framework nsa --exclude-namespaces kube-system,kube-public
|
||||
./kubescape scan framework nsa
|
||||
```
|
||||
|
||||
4. Enjoy :zany_face:
|
||||
|
||||
## How to build in Docker
|
||||
## Docker Support
|
||||
|
||||
### Official Docker image
|
||||
```
|
||||
quay.io/armosec/kubescape
|
||||
```
|
||||
### Build your own Docker image
|
||||
|
||||
1. Clone Project
|
||||
```
|
||||
|
||||
2
build.py
2
build.py
@@ -41,7 +41,7 @@ def main():
|
||||
|
||||
# Set some variables
|
||||
packageName = getPackageName()
|
||||
buildUrl = "github.com/armosec/kubescape/cmd.BuildNumber"
|
||||
buildUrl = "github.com/armosec/kubescape/clihandler/cmd.BuildNumber"
|
||||
releaseVersion = os.getenv("RELEASE")
|
||||
ArmoBEServer = os.getenv("ArmoBEServer")
|
||||
ArmoERServer = os.getenv("ArmoERServer")
|
||||
|
||||
@@ -1,15 +1,27 @@
|
||||
FROM golang:1.17-alpine as builder
|
||||
ENV GOPROXY=https://goproxy.io,direct
|
||||
ENV GO111MODULE=on
|
||||
#ENV GOPROXY=https://goproxy.io,direct
|
||||
ENV GO111MODULE=
|
||||
|
||||
ENV CGO_ENABLED=0
|
||||
|
||||
# Install required python/pip
|
||||
ENV PYTHONUNBUFFERED=1
|
||||
RUN apk add --update --no-cache python3 && ln -sf python3 /usr/bin/python
|
||||
RUN python3 -m ensurepip
|
||||
RUN pip3 install --no-cache --upgrade pip setuptools
|
||||
|
||||
WORKDIR /work
|
||||
ADD . .
|
||||
RUN GOOS=linux CGO_ENABLED=0 go build -ldflags="-s -w " -installsuffix cgo -o kubescape .
|
||||
|
||||
RUN python build.py
|
||||
|
||||
RUN ls -ltr build/ubuntu-latest
|
||||
RUN cat /work/build/ubuntu-latest/kubescape.sha1
|
||||
|
||||
FROM alpine
|
||||
COPY --from=builder /work/kubescape /usr/bin/kubescape
|
||||
COPY --from=builder /work/build/ubuntu-latest/kubescape /usr/bin/kubescape
|
||||
|
||||
# # Download the frameworks. Use the "--use-default" flag when running kubescape
|
||||
# RUN kubescape download framework nsa && kubescape download framework mitre
|
||||
|
||||
CMD ["kubescape"]
|
||||
ENTRYPOINT ["kubescape"]
|
||||
|
||||
@@ -1,101 +0,0 @@
|
||||
package apis
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
// HTTPReqFunc allows you to insert query params and more to aggregation message while using update aggregator
|
||||
type HTTPReqFunc func(req *http.Request, qryData interface{})
|
||||
|
||||
func BasicBEQuery(req *http.Request, qryData interface{}) {
|
||||
|
||||
q := req.URL.Query()
|
||||
|
||||
if notificationData, isok := qryData.(*LoginObject); isok {
|
||||
q.Add("customerGUID", notificationData.GUID)
|
||||
}
|
||||
|
||||
req.URL.RawQuery = q.Encode()
|
||||
}
|
||||
|
||||
func EmptyQuery(req *http.Request, qryData interface{}) {
|
||||
q := req.URL.Query()
|
||||
req.URL.RawQuery = q.Encode()
|
||||
}
|
||||
|
||||
func MapQuery(req *http.Request, qryData interface{}) {
|
||||
q := req.URL.Query()
|
||||
if qryMap, isok := qryData.(map[string]string); isok {
|
||||
for k, v := range qryMap {
|
||||
q.Add(k, v)
|
||||
}
|
||||
|
||||
}
|
||||
req.URL.RawQuery = q.Encode()
|
||||
}
|
||||
|
||||
func BEHttpRequest(loginobj *LoginObject, beURL,
|
||||
httpverb string,
|
||||
endpoint string,
|
||||
payload []byte,
|
||||
f HTTPReqFunc,
|
||||
qryData interface{}) ([]byte, error) {
|
||||
client := &http.Client{}
|
||||
|
||||
beURL = fmt.Sprintf("%v/%v", beURL, endpoint)
|
||||
req, err := http.NewRequest(httpverb, beURL, bytes.NewReader(payload))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
req.Header.Set("Authorization", loginobj.Authorization)
|
||||
f(req, qryData)
|
||||
|
||||
for _, cookie := range loginobj.Cookies {
|
||||
req.AddCookie(cookie)
|
||||
}
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if resp.StatusCode < 200 || resp.StatusCode >= 300 {
|
||||
fmt.Printf("req:\n%v\nresp:%v\n", req, resp)
|
||||
return nil, fmt.Errorf("Error #%v Due to: %v", resp.StatusCode, resp.Status)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return body, nil
|
||||
}
|
||||
|
||||
type BELoginResponse struct {
|
||||
Name string `json:"name"`
|
||||
PreferredUsername string `json:"preferred_username"`
|
||||
Email string `json:"email"`
|
||||
CustomerGuid string `json:"customerGuid"`
|
||||
Expires string `json:"expires"`
|
||||
Authorization string `json:"authorization"`
|
||||
Cookies []*http.Cookie
|
||||
}
|
||||
|
||||
func (r *BELoginResponse) ToLoginObject() *LoginObject {
|
||||
l := &LoginObject{}
|
||||
l.Authorization = r.Authorization
|
||||
l.Cookies = r.Cookies
|
||||
l.Expires = r.Expires
|
||||
l.GUID = r.CustomerGuid
|
||||
|
||||
return l
|
||||
}
|
||||
|
||||
type BackendConnector struct {
|
||||
BaseURL string
|
||||
BELoginResponse *BELoginResponse
|
||||
Credentials *CustomerLoginDetails
|
||||
HTTPClient *http.Client
|
||||
}
|
||||
@@ -1,128 +0,0 @@
|
||||
package apis
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func MakeBackendConnector(client *http.Client, baseURL string, loginDetails *CustomerLoginDetails) (*BackendConnector, error) {
|
||||
if err := ValidateBEConnectorMakerInput(client, baseURL, loginDetails); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
conn := &BackendConnector{BaseURL: baseURL, Credentials: loginDetails, HTTPClient: client}
|
||||
err := conn.Login()
|
||||
|
||||
return conn, err
|
||||
}
|
||||
|
||||
func ValidateBEConnectorMakerInput(client *http.Client, baseURL string, loginDetails *CustomerLoginDetails) error {
|
||||
if client == nil {
|
||||
return fmt.Errorf("You must provide an initialized httpclient")
|
||||
}
|
||||
if len(baseURL) == 0 {
|
||||
return fmt.Errorf("you must provide a valid backend url")
|
||||
}
|
||||
|
||||
if loginDetails == nil || (len(loginDetails.Email) == 0 && len(loginDetails.Password) == 0) {
|
||||
return fmt.Errorf("you must provide valid login details")
|
||||
}
|
||||
return nil
|
||||
|
||||
}
|
||||
|
||||
func (r *BackendConnector) Login() error {
|
||||
if !r.IsExpired() {
|
||||
return nil
|
||||
}
|
||||
|
||||
loginInfoBytes, err := json.Marshal(r.Credentials)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to marshal credentials properly")
|
||||
}
|
||||
|
||||
beURL := fmt.Sprintf("%v/%v", r.BaseURL, "login")
|
||||
|
||||
req, err := http.NewRequest("POST", beURL, bytes.NewReader(loginInfoBytes))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
req.Header.Set("Referer", strings.Replace(beURL, "dashbe", "cpanel", 1))
|
||||
resp, err := r.HTTPClient.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to read login response")
|
||||
}
|
||||
|
||||
loginS := &BELoginResponse{}
|
||||
json.Unmarshal(body, &loginS)
|
||||
|
||||
loginS.Cookies = resp.Cookies()
|
||||
r.BELoginResponse = loginS
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *BackendConnector) IsExpired() bool {
|
||||
return r.BELoginResponse == nil || r.BELoginResponse.ToLoginObject().IsExpired()
|
||||
}
|
||||
|
||||
func (r *BackendConnector) GetBaseURL() string {
|
||||
return r.BaseURL
|
||||
}
|
||||
func (r *BackendConnector) GetLoginObj() *LoginObject {
|
||||
return r.BELoginResponse.ToLoginObject()
|
||||
}
|
||||
func (r *BackendConnector) GetClient() *http.Client {
|
||||
return r.HTTPClient
|
||||
}
|
||||
|
||||
func (r *BackendConnector) HTTPSend(httpverb string,
|
||||
endpoint string,
|
||||
payload []byte,
|
||||
f HTTPReqFunc,
|
||||
qryData interface{}) ([]byte, error) {
|
||||
|
||||
beURL := fmt.Sprintf("%v/%v", r.GetBaseURL(), endpoint)
|
||||
req, err := http.NewRequest(httpverb, beURL, bytes.NewReader(payload))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if r.IsExpired() {
|
||||
r.Login()
|
||||
}
|
||||
|
||||
loginobj := r.GetLoginObj()
|
||||
req.Header.Set("Authorization", loginobj.Authorization)
|
||||
f(req, qryData)
|
||||
q := req.URL.Query()
|
||||
q.Set("customerGUID", loginobj.GUID)
|
||||
req.URL.RawQuery = q.Encode()
|
||||
|
||||
for _, cookie := range loginobj.Cookies {
|
||||
req.AddCookie(cookie)
|
||||
}
|
||||
resp, err := r.GetClient().Do(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if resp.StatusCode < 200 || resp.StatusCode >= 300 {
|
||||
fmt.Printf("req:\n%v\nresp:%v\n", req, resp)
|
||||
return nil, fmt.Errorf("Error #%v Due to: %v", resp.StatusCode, resp.Status)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return body, nil
|
||||
}
|
||||
@@ -1,25 +0,0 @@
|
||||
package apis
|
||||
|
||||
// WebsocketScanCommand api
|
||||
const (
|
||||
WebsocketScanCommandVersion string = "v1"
|
||||
WebsocketScanCommandPath string = "scanImage"
|
||||
)
|
||||
|
||||
// commands send via websocket
|
||||
const (
|
||||
UPDATE string = "update"
|
||||
ATTACH string = "Attach"
|
||||
REMOVE string = "remove"
|
||||
DETACH string = "Detach"
|
||||
INCOMPATIBLE string = "Incompatible"
|
||||
REPLACE_HEADERS string = "ReplaceHeaders"
|
||||
IMAGE_UNREACHABLE string = "ImageUnreachable"
|
||||
SIGN string = "sign"
|
||||
UNREGISTERED string = "unregistered"
|
||||
INJECT string = "inject"
|
||||
RESTART string = "restart"
|
||||
ENCRYPT string = "encryptSecret"
|
||||
DECRYPT string = "decryptSecret"
|
||||
SCAN string = "scan"
|
||||
)
|
||||
@@ -1,78 +0,0 @@
|
||||
package apis
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
)
|
||||
|
||||
// WebsocketScanCommand trigger scan thru the websocket
|
||||
type WebsocketScanCommand struct {
|
||||
// CustomerGUID string `json:"customerGUID"`
|
||||
ImageTag string `json:"imageTag"`
|
||||
Wlid string `json:"wlid"`
|
||||
IsScanned bool `json:"isScanned"`
|
||||
ContainerName string `json:"containerName"`
|
||||
JobID string `json:"jobID,omitempty"`
|
||||
LastAction int `json:"actionIDN"`
|
||||
// ImageHash string `json:"imageHash"`
|
||||
Credentials *types.AuthConfig `json:"credentials,omitempty"`
|
||||
}
|
||||
|
||||
//taken from BE
|
||||
// ElasticRespTotal holds the total struct in Elastic array response
|
||||
type ElasticRespTotal struct {
|
||||
Value int `json:"value"`
|
||||
Relation string `json:"relation"`
|
||||
}
|
||||
|
||||
// V2ListResponse holds the response of some list request with some metadata
|
||||
type V2ListResponse struct {
|
||||
Total ElasticRespTotal `json:"total"`
|
||||
Response interface{} `json:"response"`
|
||||
// Cursor for quick access to the next page. Not supported yet
|
||||
Cursor string `json:"cursor"`
|
||||
}
|
||||
|
||||
// Oauth2Customer returns inside the "ca_groups" field in claims section of
|
||||
// Oauth2 verification process
|
||||
type Oauth2Customer struct {
|
||||
CustomerName string `json:"customerName"`
|
||||
CustomerGUID string `json:"customerGUID"`
|
||||
}
|
||||
|
||||
type LoginObject struct {
|
||||
Authorization string `json:"authorization"`
|
||||
GUID string
|
||||
Cookies []*http.Cookie
|
||||
Expires string
|
||||
}
|
||||
|
||||
type SafeMode struct {
|
||||
Reporter string `json:"reporter"` // "Agent"
|
||||
Action string `json:"action,omitempty"` // "action"
|
||||
Wlid string `json:"wlid"` // CAA_WLID
|
||||
PodName string `json:"podName"` // CAA_POD_NAME
|
||||
InstanceID string `json:"instanceID"` // CAA_POD_NAME
|
||||
ContainerName string `json:"containerName,omitempty"` // CAA_CONTAINER_NAME
|
||||
ProcessName string `json:"processName,omitempty"`
|
||||
ProcessID int `json:"processID,omitempty"`
|
||||
ProcessCMD string `json:"processCMD,omitempty"`
|
||||
ComponentGUID string `json:"componentGUID,omitempty"` // CAA_GUID
|
||||
StatusCode int `json:"statusCode"` // 0/1/2
|
||||
ProcessExitCode int `json:"processExitCode"` // 0 +
|
||||
Timestamp int64 `json:"timestamp"`
|
||||
Message string `json:"message,omitempty"` // any string
|
||||
JobID string `json:"jobID,omitempty"` // any string
|
||||
Compatible *bool `json:"compatible,omitempty"`
|
||||
}
|
||||
|
||||
func (safeMode *SafeMode) Json() string {
|
||||
b, err := json.Marshal(*safeMode)
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
return fmt.Sprintf("%s", b)
|
||||
}
|
||||
@@ -1,26 +0,0 @@
|
||||
package apis
|
||||
|
||||
// import (
|
||||
// "fmt"
|
||||
// "net/http"
|
||||
// "testing"
|
||||
// )
|
||||
|
||||
// func TestAuditStructure(t *testing.T) {
|
||||
// c := http.Client{}
|
||||
// be, err := MakeBackendConnector(&c, "https://dashbe.eudev3.cyberarmorsoft.com", &CustomerLoginDetails{Email: "lalafi@cyberarmor.io", Password: "*", CustomerName: "CyberArmorTests"})
|
||||
// if err != nil {
|
||||
// t.Errorf("sad1")
|
||||
|
||||
// }
|
||||
|
||||
// b, err := be.HTTPSend("GET", "v1/microservicesOverview", nil, MapQuery, map[string]string{"wlid": "wlid://cluster-childrenofbodom/namespace-default/deployment-pos"})
|
||||
// if err != nil {
|
||||
// t.Errorf("sad2")
|
||||
|
||||
// }
|
||||
// fmt.Printf("%v", string(b))
|
||||
|
||||
// t.Errorf("sad")
|
||||
|
||||
// }
|
||||
@@ -1,27 +0,0 @@
|
||||
package apis
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
)
|
||||
|
||||
// type Dashboard interface {
|
||||
// OPAFRAMEWORKGet(string, bool) ([]opapolicy.Framework, error)
|
||||
// }
|
||||
|
||||
// Connector - interface for any connector (BE/Portal and so on)
|
||||
type Connector interface {
|
||||
|
||||
//may used for a more generic httpsend interface based method
|
||||
GetBaseURL() string
|
||||
GetLoginObj() *LoginObject
|
||||
GetClient() *http.Client
|
||||
|
||||
Login() error
|
||||
IsExpired() bool
|
||||
|
||||
HTTPSend(httpverb string,
|
||||
endpoint string,
|
||||
payload []byte,
|
||||
f HTTPReqFunc,
|
||||
qryData interface{}) ([]byte, error)
|
||||
}
|
||||
@@ -1,255 +0,0 @@
|
||||
package apis
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
oidc "github.com/coreos/go-oidc"
|
||||
uuid "github.com/satori/go.uuid"
|
||||
|
||||
// "go.uber.org/zap"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"golang.org/x/oauth2"
|
||||
)
|
||||
|
||||
func GetOauth2TokenURL() string {
|
||||
return "https://idens.eudev3.cyberarmorsoft.com/auth/realms/CyberArmorSites"
|
||||
}
|
||||
|
||||
func GetLoginStruct() (LoginAux, error) {
|
||||
|
||||
return LoginAux{Referer: "https://cpanel.eudev3.cyberarmorsoft.com/login", Url: "https://cpanel.eudev3.cyberarmorsoft.com/login"}, nil
|
||||
}
|
||||
|
||||
func LoginWithKeycloak(loginDetails CustomerLoginDetails) ([]uuid.UUID, *oidc.IDToken, error) {
|
||||
// var custGUID uuid.UUID
|
||||
// config.Oauth2TokenURL
|
||||
if GetOauth2TokenURL() == "" {
|
||||
return nil, nil, fmt.Errorf("missing oauth2 token URL")
|
||||
}
|
||||
urlaux, _ := GetLoginStruct()
|
||||
conf, err := getOauth2Config(urlaux)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
ctx := context.Background()
|
||||
provider, err := oidc.NewProvider(ctx, GetOauth2TokenURL())
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
// "Oauth2ClientID": "golang-client"
|
||||
oidcConfig := &oidc.Config{
|
||||
ClientID: "golang-client",
|
||||
SkipClientIDCheck: true,
|
||||
}
|
||||
|
||||
verifier := provider.Verifier(oidcConfig)
|
||||
ouToken, err := conf.PasswordCredentialsToken(ctx, loginDetails.Email, loginDetails.Password)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
// "Authorization",
|
||||
authorization := fmt.Sprintf("%s %s", ouToken.Type(), ouToken.AccessToken)
|
||||
// oidc.IDTokenVerifier
|
||||
tkn, err := verifier.Verify(ctx, ouToken.AccessToken)
|
||||
if err != nil {
|
||||
return nil, tkn, err
|
||||
}
|
||||
tkn.Nonce = authorization
|
||||
if loginDetails.CustomerName == "" {
|
||||
customers, err := getCustomersNames(tkn)
|
||||
if err != nil {
|
||||
return nil, tkn, err
|
||||
}
|
||||
if len(customers) == 1 {
|
||||
loginDetails.CustomerName = customers[0]
|
||||
} else {
|
||||
return nil, tkn, fmt.Errorf("login with one of the following customers: %v", customers)
|
||||
}
|
||||
}
|
||||
custGUID, err := getCustomerGUID(tkn, &loginDetails)
|
||||
if err != nil {
|
||||
return nil, tkn, err
|
||||
}
|
||||
return []uuid.UUID{custGUID}, tkn, nil
|
||||
}
|
||||
|
||||
func getOauth2Config(urlaux LoginAux) (*oauth2.Config, error) {
|
||||
reURLSlices := strings.Split(urlaux.Referer, "/")
|
||||
if len(reURLSlices) == 0 {
|
||||
reURLSlices = strings.Split(urlaux.Url, "/")
|
||||
}
|
||||
// zapLogger.With(zap.Strings("referer", reURLSlices)).Info("Searching oauth2Config for")
|
||||
if len(reURLSlices) < 3 {
|
||||
reURLSlices = []string{reURLSlices[0], reURLSlices[0], reURLSlices[0]}
|
||||
}
|
||||
lg, _ := GetLoginStruct()
|
||||
provider, _ := oidc.NewProvider(context.Background(), GetOauth2TokenURL())
|
||||
//provider.Endpoint {"AuthURL":"https://idens.eudev3.cyberarmorsoft.com/auth/realms/CyberArmorSites/protocol/openid-connect/auth","TokenURL":"https://idens.eudev3.cyberarmorsoft.com/auth/realms/CyberArmorSites/protocol/openid-connect/token","AuthStyle":0}
|
||||
conf := oauth2.Config{
|
||||
ClientID: "golang-client",
|
||||
ClientSecret: "4e33bad2-3491-41a6-b486-93c492cfb4a2",
|
||||
RedirectURL: lg.Referer,
|
||||
// Discovery returns the OAuth2 endpoints.
|
||||
Endpoint: provider.Endpoint(),
|
||||
// "openid" is a required scope for OpenID Connect flows.
|
||||
Scopes: []string{oidc.ScopeOpenID, "profile", "email"},
|
||||
}
|
||||
return &conf, nil
|
||||
// return nil, fmt.Errorf("canno't find oauth2Config for referer '%+v'.\nPlease set referer or origin headers", reURLSlices)
|
||||
}
|
||||
|
||||
func getCustomersNames(oauth2Details *oidc.IDToken) ([]string, error) {
|
||||
var claimsJSON Oauth2Claims
|
||||
if err := oauth2Details.Claims(&claimsJSON); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
customersList := make([]string, 0, len(claimsJSON.CAGroups))
|
||||
for _, v := range claimsJSON.CAGroups {
|
||||
var caCustomer Oauth2Customer
|
||||
if err := json.Unmarshal([]byte(v), &caCustomer); err == nil {
|
||||
customersList = append(customersList, caCustomer.CustomerName)
|
||||
}
|
||||
}
|
||||
return customersList, nil
|
||||
}
|
||||
|
||||
func getCustomerGUID(tkn *oidc.IDToken, loginDetails *CustomerLoginDetails) (uuid.UUID, error) {
|
||||
|
||||
customers, err := getCustomersList(tkn)
|
||||
if err != nil {
|
||||
return uuid.UUID{}, err
|
||||
}
|
||||
|
||||
// if customer name not provided - use default customer
|
||||
if loginDetails.CustomerName == "" && len(customers) > 0 {
|
||||
return uuid.FromString(customers[0].CustomerGUID)
|
||||
}
|
||||
|
||||
for _, i := range customers {
|
||||
if i.CustomerName == loginDetails.CustomerName {
|
||||
return uuid.FromString(i.CustomerGUID)
|
||||
}
|
||||
}
|
||||
return uuid.UUID{}, fmt.Errorf("customer name not found in customer list")
|
||||
}
|
||||
|
||||
func getCustomersList(oauth2Details *oidc.IDToken) ([]Oauth2Customer, error) {
|
||||
var claimsJSON Oauth2Claims
|
||||
if err := oauth2Details.Claims(&claimsJSON); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
customersList := make([]Oauth2Customer, 0, len(claimsJSON.CAGroups))
|
||||
for _, v := range claimsJSON.CAGroups {
|
||||
var caCustomer Oauth2Customer
|
||||
if err := json.Unmarshal([]byte(v), &caCustomer); err == nil {
|
||||
customersList = append(customersList, caCustomer)
|
||||
}
|
||||
}
|
||||
return customersList, nil
|
||||
}
|
||||
|
||||
// func MakeAuthCookies(custGUID uuid.UUID, ouToken *oidc.IDToken) (*http.Cookie, error) {
|
||||
// var ccc http.Cookie
|
||||
// var responseData AuthenticationCookie
|
||||
// expireDate := time.Now().UTC().Add(time.Duration(config.CookieExpirationHours) * time.Hour)
|
||||
// if ouToken != nil {
|
||||
// expireDate = ouToken.Expiry
|
||||
// }
|
||||
// ccc.Expires = expireDate
|
||||
// responseData.CustomerGUID = custGUID
|
||||
// responseData.Expires = ccc.Expires
|
||||
// responseData.Version = 0
|
||||
// authorizationStr := ""
|
||||
// if ouToken != nil {
|
||||
// authorizationStr = ouToken.Nonce
|
||||
// if err := ouToken.Claims(&responseData.Oauth2Claims); err != nil {
|
||||
// errStr := fmt.Sprintf("failed to get claims from JWT")
|
||||
// return nil, fmt.Errorf("%v", errStr)
|
||||
// }
|
||||
// }
|
||||
// jsonBytes, err := json.Marshal(responseData)
|
||||
// if err != nil {
|
||||
// errStr := fmt.Sprintf("failed to get claims from JWT")
|
||||
// return nil, fmt.Errorf("%v", errStr)
|
||||
// }
|
||||
// ccc.Name = "auth"
|
||||
// ccc.Value = hex.EncodeToString(jsonBytes) + "." + cacheaccess.CalcHmac256(jsonBytes)
|
||||
// // TODO: HttpOnly for security...
|
||||
// ccc.HttpOnly = false
|
||||
// ccc.Path = "/"
|
||||
// ccc.Secure = true
|
||||
// ccc.SameSite = http.SameSiteNoneMode
|
||||
// http.SetCookie(w, &ccc)
|
||||
// responseData.Authorization = authorizationStr
|
||||
// jsonBytes, err = json.Marshal(responseData)
|
||||
// if err != nil {
|
||||
// w.WriteHeader(http.StatusInternalServerError)
|
||||
// fmt.Fprintf(w, "error while marshaling response(2) %s", err)
|
||||
// return
|
||||
// }
|
||||
// w.Write(jsonBytes)
|
||||
// }
|
||||
|
||||
func Login(loginDetails CustomerLoginDetails) (*LoginObject, error) {
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func GetBEInfo(cfgFile string) string {
|
||||
return "https://dashbe.eudev3.cyberarmorsoft.com"
|
||||
}
|
||||
|
||||
func BELogin(loginDetails *CustomerLoginDetails, login string, cfg string) (*BELoginResponse, error) {
|
||||
client := &http.Client{}
|
||||
|
||||
basebeURL := GetBEInfo(cfg)
|
||||
beURL := fmt.Sprintf("%v/%v", basebeURL, login)
|
||||
|
||||
loginInfoBytes, err := json.Marshal(loginDetails)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req, err := http.NewRequest("POST", beURL, bytes.NewReader(loginInfoBytes))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
req.Header.Set("Referer", strings.Replace(beURL, "dashbe", "cpanel", 1))
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
loginS := &BELoginResponse{}
|
||||
json.Unmarshal(body, &loginS)
|
||||
|
||||
loginS.Cookies = resp.Cookies()
|
||||
return loginS, nil
|
||||
}
|
||||
|
||||
func (r *LoginObject) IsExpired() bool {
|
||||
if r == nil {
|
||||
return true
|
||||
}
|
||||
t, err := time.Parse(time.RFC3339, r.Expires)
|
||||
if err != nil {
|
||||
return true
|
||||
}
|
||||
|
||||
return t.UTC().Before(time.Now().UTC())
|
||||
}
|
||||
@@ -1,38 +0,0 @@
|
||||
package apis
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/gofrs/uuid"
|
||||
)
|
||||
|
||||
// AuthenticationCookie is what it is
|
||||
type AuthenticationCookie struct {
|
||||
Oauth2Claims `json:",inline"`
|
||||
CustomerGUID uuid.UUID `json:"customerGuid"`
|
||||
Expires time.Time `json:"expires"`
|
||||
Version int `json:"version"`
|
||||
Authorization string `json:"authorization,omitempty"`
|
||||
}
|
||||
|
||||
type LoginAux struct {
|
||||
Referer string
|
||||
Url string
|
||||
}
|
||||
|
||||
// CustomerLoginDetails is what it is
|
||||
type CustomerLoginDetails struct {
|
||||
Email string `json:"email"`
|
||||
Password string `json:"password"`
|
||||
CustomerName string `json:"customer,omitempty"`
|
||||
CustomerGUID uuid.UUID `json:"customerGuid,omitempty"`
|
||||
}
|
||||
|
||||
// Oauth2Claims returns in claims section of Oauth2 verification process
|
||||
type Oauth2Claims struct {
|
||||
Sub string `json:"sub"`
|
||||
Name string `json:"name"`
|
||||
PreferredUserName string `json:"preferred_username"`
|
||||
CAGroups []string `json:"ca_groups"`
|
||||
Email string `json:"email"`
|
||||
}
|
||||
@@ -1,132 +0,0 @@
|
||||
package apis
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// Commands list of commands received from websocket
|
||||
type Commands struct {
|
||||
Commands []Command `json:"commands"`
|
||||
}
|
||||
|
||||
// Command structure of command received from websocket
|
||||
type Command struct {
|
||||
CommandName string `json:"commandName"`
|
||||
ResponseID string `json:"responseID"`
|
||||
Wlid string `json:"wlid,omitempty"`
|
||||
WildWlid string `json:"wildWlid,omitempty"`
|
||||
Sid string `json:"sid,omitempty"`
|
||||
WildSid string `json:"wildSid,omitempty"`
|
||||
JobTracking JobTracking `json:"jobTracking"`
|
||||
Args map[string]interface{} `json:"args,omitempty"`
|
||||
}
|
||||
|
||||
type JobTracking struct {
|
||||
JobID string `json:"jobID,omitempty"`
|
||||
ParentID string `json:"parentAction,omitempty"`
|
||||
LastActionNumber int `json:"numSeq,omitempty"`
|
||||
}
|
||||
|
||||
func (c *Command) DeepCopy() *Command {
|
||||
newCommand := &Command{}
|
||||
newCommand.CommandName = c.CommandName
|
||||
newCommand.ResponseID = c.ResponseID
|
||||
newCommand.Wlid = c.Wlid
|
||||
newCommand.WildWlid = c.WildWlid
|
||||
if c.Args != nil {
|
||||
newCommand.Args = make(map[string]interface{})
|
||||
for i, j := range c.Args {
|
||||
newCommand.Args[i] = j
|
||||
}
|
||||
}
|
||||
return newCommand
|
||||
}
|
||||
|
||||
func (c *Command) GetLabels() map[string]string {
|
||||
if c.Args != nil {
|
||||
if ilabels, ok := c.Args["labels"]; ok {
|
||||
labels := map[string]string{}
|
||||
if b, e := json.Marshal(ilabels); e == nil {
|
||||
if e = json.Unmarshal(b, &labels); e == nil {
|
||||
return labels
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return map[string]string{}
|
||||
}
|
||||
|
||||
func (c *Command) SetLabels(labels map[string]string) {
|
||||
if c.Args == nil {
|
||||
c.Args = make(map[string]interface{})
|
||||
}
|
||||
c.Args["labels"] = labels
|
||||
}
|
||||
|
||||
func (c *Command) GetFieldSelector() map[string]string {
|
||||
if c.Args != nil {
|
||||
if ilabels, ok := c.Args["fieldSelector"]; ok {
|
||||
labels := map[string]string{}
|
||||
if b, e := json.Marshal(ilabels); e == nil {
|
||||
if e = json.Unmarshal(b, &labels); e == nil {
|
||||
return labels
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return map[string]string{}
|
||||
}
|
||||
|
||||
func (c *Command) SetFieldSelector(labels map[string]string) {
|
||||
if c.Args == nil {
|
||||
c.Args = make(map[string]interface{})
|
||||
}
|
||||
c.Args["fieldSelector"] = labels
|
||||
}
|
||||
|
||||
func (c *Command) GetID() string {
|
||||
if c.WildWlid != "" {
|
||||
return c.WildWlid
|
||||
}
|
||||
if c.WildSid != "" {
|
||||
return c.WildSid
|
||||
}
|
||||
if c.Wlid != "" {
|
||||
return c.Wlid
|
||||
}
|
||||
if c.Sid != "" {
|
||||
return c.Sid
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (c *Command) Json() string {
|
||||
b, _ := json.Marshal(*c)
|
||||
return fmt.Sprintf("%s", b)
|
||||
}
|
||||
|
||||
func SIDFallback(c *Command) {
|
||||
if c.GetID() == "" {
|
||||
sid, err := getSIDFromArgs(c.Args)
|
||||
if err != nil || sid == "" {
|
||||
return
|
||||
}
|
||||
c.Sid = sid
|
||||
}
|
||||
}
|
||||
|
||||
func getSIDFromArgs(args map[string]interface{}) (string, error) {
|
||||
sidInterface, ok := args["sid"]
|
||||
if !ok {
|
||||
return "", nil
|
||||
}
|
||||
sid, ok := sidInterface.(string)
|
||||
if !ok || sid == "" {
|
||||
return "", fmt.Errorf("sid found in args but empty")
|
||||
}
|
||||
// if _, err := secrethandling.SplitSecretID(sid); err != nil {
|
||||
// return "", err
|
||||
// }
|
||||
return sid, nil
|
||||
}
|
||||
@@ -1,16 +0,0 @@
|
||||
package armotypes
|
||||
|
||||
type EnforcementsRule struct {
|
||||
MonitoredObject []string `json:"monitoredObject"`
|
||||
MonitoredObjectExistence []string `json:"objectExistence"`
|
||||
MonitoredObjectEvent []string `json:"event"`
|
||||
Action []string `json:"action"`
|
||||
}
|
||||
|
||||
type ExecutionPolicy struct {
|
||||
PortalBase `json:",inline"`
|
||||
Designators []PortalDesignator `json:"designators"`
|
||||
PolicyType string `json:"policyType"`
|
||||
CreationTime string `json:"creation_time"`
|
||||
ExecutionEnforcementsRule []EnforcementsRule `json:"enforcementRules"`
|
||||
}
|
||||
@@ -1,66 +0,0 @@
|
||||
package armotypes
|
||||
|
||||
import "strings"
|
||||
|
||||
const (
|
||||
CostumerGuidQuery = "costumerGUID"
|
||||
ClusterNameQuery = "cluster"
|
||||
DatacenterNameQuery = "datacenter"
|
||||
NamespaceQuery = "namespace"
|
||||
ProjectQuery = "project"
|
||||
WlidQuery = "wlid"
|
||||
SidQuery = "sid"
|
||||
)
|
||||
|
||||
// PortalBase holds basic items data from portal BE
|
||||
type PortalBase struct {
|
||||
GUID string `json:"guid"`
|
||||
Name string `json:"name"`
|
||||
Attributes map[string]interface{} `json:"attributes,omitempty"` // could be string
|
||||
}
|
||||
|
||||
type DesignatorType string
|
||||
|
||||
// Supported designators
|
||||
const (
|
||||
DesignatorAttributes DesignatorType = "Attributes"
|
||||
DesignatorAttribute DesignatorType = "Attribute" // Deprecated
|
||||
/*
|
||||
WorkloadID format.
|
||||
k8s format: wlid://cluster-<cluster>/namespace-<namespace>/<kind>-<name>
|
||||
native format: wlid://datacenter-<datacenter>/project-<project>/native-<name>
|
||||
*/
|
||||
DesignatorWlid DesignatorType = "Wlid"
|
||||
/*
|
||||
Wild card - subset of wlid. e.g.
|
||||
1. Include cluster:
|
||||
wlid://cluster-<cluster>/
|
||||
2. Include cluster and namespace (filter out all other namespaces):
|
||||
wlid://cluster-<cluster>/namespace-<namespace>/
|
||||
*/
|
||||
DesignatorWildWlid DesignatorType = "WildWlid"
|
||||
DesignatorWlidContainer DesignatorType = "WlidContainer"
|
||||
DesignatorWlidProcess DesignatorType = "WlidProcess"
|
||||
DesignatorSid DesignatorType = "Sid" // secret id
|
||||
)
|
||||
|
||||
func (dt DesignatorType) ToLower() DesignatorType {
|
||||
return DesignatorType(strings.ToLower(string(dt)))
|
||||
}
|
||||
|
||||
// attributes
|
||||
const (
|
||||
AttributeCluster = "cluster"
|
||||
AttributeNamespace = "namespace"
|
||||
AttributeKind = "kind"
|
||||
AttributeName = "name"
|
||||
)
|
||||
|
||||
// PortalDesignator represented single designation options
|
||||
type PortalDesignator struct {
|
||||
DesignatorType DesignatorType `json:"designatorType"`
|
||||
WLID string `json:"wlid"`
|
||||
WildWLID string `json:"wildwlid"`
|
||||
SID string `json:"sid"`
|
||||
Attributes map[string]string `json:"attributes"`
|
||||
}
|
||||
@@ -1,18 +0,0 @@
|
||||
package armotypes
|
||||
|
||||
func MockPortalBase(customerGUID, name string, attributes map[string]interface{}) *PortalBase {
|
||||
if customerGUID == "" {
|
||||
customerGUID = "36b6f9e1-3b63-4628-994d-cbe16f81e9c7"
|
||||
}
|
||||
if name == "" {
|
||||
name = "portalbase-a"
|
||||
}
|
||||
if attributes == nil {
|
||||
attributes = make(map[string]interface{})
|
||||
}
|
||||
return &PortalBase{
|
||||
GUID: customerGUID,
|
||||
Name: name,
|
||||
Attributes: attributes,
|
||||
}
|
||||
}
|
||||
@@ -1,113 +0,0 @@
|
||||
package armotypes
|
||||
|
||||
import (
|
||||
"github.com/armosec/kubescape/cautils/cautils"
|
||||
"github.com/golang/glog"
|
||||
)
|
||||
|
||||
var IgnoreLabels = []string{AttributeCluster, AttributeNamespace}
|
||||
|
||||
func (designator *PortalDesignator) GetCluster() string {
|
||||
cluster, _, _, _, _ := designator.DigestPortalDesignator()
|
||||
return cluster
|
||||
}
|
||||
|
||||
func (designator *PortalDesignator) GetNamespace() string {
|
||||
_, namespace, _, _, _ := designator.DigestPortalDesignator()
|
||||
return namespace
|
||||
}
|
||||
|
||||
func (designator *PortalDesignator) GetKind() string {
|
||||
_, _, kind, _, _ := designator.DigestPortalDesignator()
|
||||
return kind
|
||||
}
|
||||
|
||||
func (designator *PortalDesignator) GetName() string {
|
||||
_, _, _, name, _ := designator.DigestPortalDesignator()
|
||||
return name
|
||||
}
|
||||
func (designator *PortalDesignator) GetLabels() map[string]string {
|
||||
_, _, _, _, labels := designator.DigestPortalDesignator()
|
||||
return labels
|
||||
}
|
||||
|
||||
// DigestPortalDesignator - get cluster namespace and labels from designator
|
||||
func (designator *PortalDesignator) DigestPortalDesignator() (string, string, string, string, map[string]string) {
|
||||
switch designator.DesignatorType.ToLower() {
|
||||
case DesignatorAttributes.ToLower(), DesignatorAttribute.ToLower():
|
||||
return designator.DigestAttributesDesignator()
|
||||
case DesignatorWlid.ToLower(), DesignatorWildWlid.ToLower():
|
||||
return cautils.GetClusterFromWlid(designator.WLID), cautils.GetNamespaceFromWlid(designator.WLID), cautils.GetKindFromWlid(designator.WLID), cautils.GetNameFromWlid(designator.WLID), map[string]string{}
|
||||
// case DesignatorSid: // TODO
|
||||
default:
|
||||
glog.Warningf("in 'digestPortalDesignator' designator type: '%v' not yet supported. please contact Armo team", designator.DesignatorType)
|
||||
}
|
||||
return "", "", "", "", nil
|
||||
}
|
||||
|
||||
func (designator *PortalDesignator) DigestAttributesDesignator() (string, string, string, string, map[string]string) {
|
||||
cluster := ""
|
||||
namespace := ""
|
||||
kind := ""
|
||||
name := ""
|
||||
labels := map[string]string{}
|
||||
attributes := designator.Attributes
|
||||
if attributes == nil {
|
||||
return cluster, namespace, kind, name, labels
|
||||
}
|
||||
for k, v := range attributes {
|
||||
labels[k] = v
|
||||
}
|
||||
if v, ok := attributes[AttributeNamespace]; ok {
|
||||
namespace = v
|
||||
delete(labels, AttributeNamespace)
|
||||
}
|
||||
if v, ok := attributes[AttributeCluster]; ok {
|
||||
cluster = v
|
||||
delete(labels, AttributeCluster)
|
||||
}
|
||||
if v, ok := attributes[AttributeKind]; ok {
|
||||
kind = v
|
||||
delete(labels, AttributeKind)
|
||||
}
|
||||
if v, ok := attributes[AttributeName]; ok {
|
||||
name = v
|
||||
delete(labels, AttributeName)
|
||||
}
|
||||
return cluster, namespace, kind, name, labels
|
||||
}
|
||||
|
||||
// DigestPortalDesignator DEPRECATED. use designator.DigestPortalDesignator() - get cluster namespace and labels from designator
|
||||
func DigestPortalDesignator(designator *PortalDesignator) (string, string, map[string]string) {
|
||||
switch designator.DesignatorType {
|
||||
case DesignatorAttributes, DesignatorAttribute:
|
||||
return DigestAttributesDesignator(designator.Attributes)
|
||||
case DesignatorWlid, DesignatorWildWlid:
|
||||
return cautils.GetClusterFromWlid(designator.WLID), cautils.GetNamespaceFromWlid(designator.WLID), map[string]string{}
|
||||
// case DesignatorSid: // TODO
|
||||
default:
|
||||
glog.Warningf("in 'digestPortalDesignator' designator type: '%v' not yet supported. please contact Armo team", designator.DesignatorType)
|
||||
}
|
||||
return "", "", nil
|
||||
}
|
||||
func DigestAttributesDesignator(attributes map[string]string) (string, string, map[string]string) {
|
||||
cluster := ""
|
||||
namespace := ""
|
||||
labels := map[string]string{}
|
||||
if attributes == nil {
|
||||
return cluster, namespace, labels
|
||||
}
|
||||
for k, v := range attributes {
|
||||
labels[k] = v
|
||||
}
|
||||
if v, ok := attributes[AttributeNamespace]; ok {
|
||||
namespace = v
|
||||
delete(labels, AttributeNamespace)
|
||||
}
|
||||
if v, ok := attributes[AttributeCluster]; ok {
|
||||
cluster = v
|
||||
delete(labels, AttributeCluster)
|
||||
}
|
||||
|
||||
return cluster, namespace, labels
|
||||
}
|
||||
@@ -1,42 +0,0 @@
|
||||
package armotypes
|
||||
|
||||
type PostureExceptionPolicyActions string
|
||||
|
||||
const AlertOnly PostureExceptionPolicyActions = "alertOnly"
|
||||
const Disable PostureExceptionPolicyActions = "disable"
|
||||
|
||||
type PostureExceptionPolicy struct {
|
||||
PortalBase `json:",inline"`
|
||||
PolicyType string `json:"policyType"`
|
||||
CreationTime string `json:"creationTime"`
|
||||
Actions []PostureExceptionPolicyActions `json:"actions"`
|
||||
Resources []PortalDesignator `json:"resources"`
|
||||
PosturePolicies []PosturePolicy `json:"posturePolicies"`
|
||||
}
|
||||
|
||||
type PosturePolicy struct {
|
||||
FrameworkName string `json:"frameworkName"`
|
||||
ControlName string `json:"controlName"`
|
||||
RuleName string `json:"ruleName"`
|
||||
}
|
||||
|
||||
func (exceptionPolicy *PostureExceptionPolicy) IsAlertOnly() bool {
|
||||
if exceptionPolicy.IsDisable() {
|
||||
return false
|
||||
}
|
||||
|
||||
for i := range exceptionPolicy.Actions {
|
||||
if exceptionPolicy.Actions[i] == AlertOnly {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
func (exceptionPolicy *PostureExceptionPolicy) IsDisable() bool {
|
||||
for i := range exceptionPolicy.Actions {
|
||||
if exceptionPolicy.Actions[i] == Disable {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
@@ -1 +0,0 @@
|
||||
package armotypes
|
||||
@@ -1,196 +0,0 @@
|
||||
package cautils
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/golang/glog"
|
||||
)
|
||||
|
||||
// labels added to the workload
|
||||
const (
|
||||
ArmoPrefix string = "armo"
|
||||
ArmoAttach string = ArmoPrefix + ".attach"
|
||||
ArmoInitialSecret string = ArmoPrefix + ".initial"
|
||||
ArmoSecretStatus string = ArmoPrefix + ".secret"
|
||||
ArmoCompatibleLabel string = ArmoPrefix + ".compatible"
|
||||
|
||||
ArmoSecretProtectStatus string = "protect"
|
||||
ArmoSecretClearStatus string = "clear"
|
||||
)
|
||||
|
||||
// annotations added to the workload
|
||||
const (
|
||||
ArmoUpdate string = ArmoPrefix + ".last-update"
|
||||
ArmoWlid string = ArmoPrefix + ".wlid"
|
||||
ArmoSid string = ArmoPrefix + ".sid"
|
||||
ArmoJobID string = ArmoPrefix + ".job"
|
||||
ArmoJobIDPath string = ArmoJobID + "/id"
|
||||
ArmoJobParentPath string = ArmoJobID + "/parent"
|
||||
ArmoJobActionPath string = ArmoJobID + "/action"
|
||||
ArmoCompatibleAnnotation string = ArmoAttach + "/compatible"
|
||||
ArmoReplaceheaders string = ArmoAttach + "/replaceheaders"
|
||||
)
|
||||
|
||||
const ( // DEPRECATED
|
||||
|
||||
CAAttachLabel string = "cyberarmor"
|
||||
Patched string = "Patched"
|
||||
Done string = "Done"
|
||||
Encrypted string = "Protected"
|
||||
|
||||
CAInjectOld = "injectCyberArmor"
|
||||
|
||||
CAPrefix string = "cyberarmor"
|
||||
CAProtectedSecret string = CAPrefix + ".secret"
|
||||
CAInitialSecret string = CAPrefix + ".initial"
|
||||
CAInject string = CAPrefix + ".inject"
|
||||
CAIgnore string = CAPrefix + ".ignore"
|
||||
CAReplaceHeaders string = CAPrefix + ".removeSecurityHeaders"
|
||||
)
|
||||
|
||||
const ( // DEPRECATED
|
||||
CAUpdate string = CAPrefix + ".last-update"
|
||||
CAStatus string = CAPrefix + ".status"
|
||||
CAWlid string = CAPrefix + ".wlid"
|
||||
)
|
||||
|
||||
type ClusterConfig struct {
|
||||
EventReceiverREST string `json:"eventReceiverREST"`
|
||||
EventReceiverWS string `json:"eventReceiverWS"`
|
||||
MaserNotificationServer string `json:"maserNotificationServer"`
|
||||
Postman string `json:"postman"`
|
||||
Dashboard string `json:"dashboard"`
|
||||
Portal string `json:"portal"`
|
||||
CustomerGUID string `json:"customerGUID"`
|
||||
ClusterGUID string `json:"clusterGUID"`
|
||||
ClusterName string `json:"clusterName"`
|
||||
OciImageURL string `json:"ociImageURL"`
|
||||
NotificationWSURL string `json:"notificationWSURL"`
|
||||
NotificationRestURL string `json:"notificationRestURL"`
|
||||
VulnScanURL string `json:"vulnScanURL"`
|
||||
OracleURL string `json:"oracleURL"`
|
||||
ClairURL string `json:"clairURL"`
|
||||
}
|
||||
|
||||
// represents workload basic info
|
||||
type SpiffeBasicInfo struct {
|
||||
//cluster/datacenter
|
||||
Level0 string `json:"level0"`
|
||||
Level0Type string `json:"level0Type"`
|
||||
|
||||
//namespace/project
|
||||
Level1 string `json:"level0"`
|
||||
Level1Type string `json:"level0Type"`
|
||||
|
||||
Kind string `json:"kind"`
|
||||
Name string `json:"name"`
|
||||
}
|
||||
|
||||
type ImageInfo struct {
|
||||
Registry string `json:"registry"`
|
||||
VersionImage string `json:"versionImage"`
|
||||
}
|
||||
|
||||
func IsAttached(labels map[string]string) *bool {
|
||||
attach := false
|
||||
if labels == nil {
|
||||
return nil
|
||||
}
|
||||
if attached, ok := labels[ArmoAttach]; ok {
|
||||
if strings.ToLower(attached) == "true" {
|
||||
attach = true
|
||||
return &attach
|
||||
} else {
|
||||
return &attach
|
||||
}
|
||||
}
|
||||
|
||||
// deprecated
|
||||
if _, ok := labels[CAAttachLabel]; ok {
|
||||
attach = true
|
||||
return &attach
|
||||
}
|
||||
|
||||
// deprecated
|
||||
if inject, ok := labels[CAInject]; ok {
|
||||
if strings.ToLower(inject) == "true" {
|
||||
attach = true
|
||||
return &attach
|
||||
}
|
||||
}
|
||||
|
||||
// deprecated
|
||||
if ignore, ok := labels[CAIgnore]; ok {
|
||||
if strings.ToLower(ignore) == "true" {
|
||||
return &attach
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func IsSecretProtected(labels map[string]string) *bool {
|
||||
protect := false
|
||||
if labels == nil {
|
||||
return nil
|
||||
}
|
||||
if protected, ok := labels[ArmoSecretStatus]; ok {
|
||||
if strings.ToLower(protected) == ArmoSecretProtectStatus {
|
||||
protect = true
|
||||
return &protect
|
||||
} else {
|
||||
return &protect
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func LoadConfig(configPath string, loadToEnv bool) (*ClusterConfig, error) {
|
||||
if configPath == "" {
|
||||
configPath = "/etc/config/clusterData.json"
|
||||
}
|
||||
|
||||
dat, err := os.ReadFile(configPath)
|
||||
if err != nil || len(dat) == 0 {
|
||||
return nil, fmt.Errorf("Config empty or not found. path: %s", configPath)
|
||||
}
|
||||
componentConfig := &ClusterConfig{}
|
||||
if err := json.Unmarshal(dat, componentConfig); err != nil {
|
||||
return componentConfig, fmt.Errorf("Failed to read component config, path: %s, reason: %s", configPath, err.Error())
|
||||
}
|
||||
if loadToEnv {
|
||||
componentConfig.LoadConfigToEnv()
|
||||
}
|
||||
return componentConfig, nil
|
||||
}
|
||||
|
||||
func (clusterConfig *ClusterConfig) LoadConfigToEnv() {
|
||||
|
||||
SetEnv("CA_CLUSTER_NAME", clusterConfig.ClusterName)
|
||||
SetEnv("CA_CLUSTER_GUID", clusterConfig.ClusterGUID)
|
||||
SetEnv("CA_ORACLE_SERVER", clusterConfig.OracleURL)
|
||||
SetEnv("CA_CUSTOMER_GUID", clusterConfig.CustomerGUID)
|
||||
SetEnv("CA_DASHBOARD_BACKEND", clusterConfig.Dashboard)
|
||||
SetEnv("CA_NOTIFICATION_SERVER_REST", clusterConfig.NotificationWSURL)
|
||||
SetEnv("CA_NOTIFICATION_SERVER_WS", clusterConfig.NotificationWSURL)
|
||||
SetEnv("CA_NOTIFICATION_SERVER_REST", clusterConfig.NotificationRestURL)
|
||||
SetEnv("CA_OCIMAGE_URL", clusterConfig.OciImageURL)
|
||||
SetEnv("CA_K8S_REPORT_URL", clusterConfig.EventReceiverWS)
|
||||
SetEnv("CA_EVENT_RECEIVER_HTTP", clusterConfig.EventReceiverREST)
|
||||
SetEnv("CA_VULNSCAN", clusterConfig.VulnScanURL)
|
||||
SetEnv("CA_POSTMAN", clusterConfig.Postman)
|
||||
SetEnv("MASTER_NOTIFICATION_SERVER_HOST", clusterConfig.MaserNotificationServer)
|
||||
SetEnv("CLAIR_URL", clusterConfig.ClairURL)
|
||||
|
||||
}
|
||||
|
||||
func SetEnv(key, value string) {
|
||||
if e := os.Getenv(key); e == "" {
|
||||
if err := os.Setenv(key, value); err != nil {
|
||||
glog.Warningf("%s: %s", key, err.Error())
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,29 +0,0 @@
|
||||
package cautils
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
// tests wlid parse
|
||||
|
||||
func TestSpiffeWLIDToInfoSuccess(t *testing.T) {
|
||||
|
||||
WLID := "wlid://cluster-HipsterShopCluster2/namespace-prod/deployment-cartservice"
|
||||
ms, er := SpiffeToSpiffeInfo(WLID)
|
||||
|
||||
if er != nil || ms.Level0 != "HipsterShopCluster2" || ms.Level0Type != "cluster" || ms.Level1 != "prod" || ms.Level1Type != "namespace" ||
|
||||
ms.Kind != "deployment" || ms.Name != "cartservice" {
|
||||
t.Errorf("TestSpiffeWLIDToInfoSuccess failed to parse %v", WLID)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSpiffeSIDInfoSuccess(t *testing.T) {
|
||||
|
||||
SID := "sid://cluster-HipsterShopCluster2/namespace-dev/secret-caregcred"
|
||||
ms, er := SpiffeToSpiffeInfo(SID)
|
||||
|
||||
if er != nil || ms.Level0 != "HipsterShopCluster2" || ms.Level0Type != "cluster" || ms.Level1 != "dev" || ms.Level1Type != "namespace" ||
|
||||
ms.Kind != "secret" || ms.Name != "caregcred" {
|
||||
t.Errorf("TestSpiffeSIDInfoSuccess failed to parse %v", SID)
|
||||
}
|
||||
}
|
||||
@@ -1,118 +0,0 @@
|
||||
package cautils
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// wlid/ sid utils
|
||||
const (
|
||||
SpiffePrefix = "://"
|
||||
)
|
||||
|
||||
// wlid/ sid utils
|
||||
const (
|
||||
PackagePath = "vendor/github.com/armosec/capacketsgo"
|
||||
)
|
||||
|
||||
//AsSHA256 takes anything turns it into string :) https://blog.8bitzen.com/posts/22-08-2019-how-to-hash-a-struct-in-go
|
||||
func AsSHA256(v interface{}) string {
|
||||
h := sha256.New()
|
||||
h.Write([]byte(fmt.Sprintf("%v", v)))
|
||||
|
||||
return fmt.Sprintf("%x", h.Sum(nil))
|
||||
}
|
||||
|
||||
func SpiffeToSpiffeInfo(spiffe string) (*SpiffeBasicInfo, error) {
|
||||
basicInfo := &SpiffeBasicInfo{}
|
||||
|
||||
pos := strings.Index(spiffe, SpiffePrefix)
|
||||
if pos < 0 {
|
||||
return nil, fmt.Errorf("invalid spiffe %s", spiffe)
|
||||
}
|
||||
|
||||
pos += len(SpiffePrefix)
|
||||
spiffeNoPrefix := spiffe[pos:]
|
||||
splits := strings.Split(spiffeNoPrefix, "/")
|
||||
if len(splits) < 3 {
|
||||
return nil, fmt.Errorf("invalid spiffe %s", spiffe)
|
||||
}
|
||||
|
||||
p0 := strings.Index(splits[0], "-")
|
||||
p1 := strings.Index(splits[1], "-")
|
||||
p2 := strings.Index(splits[2], "-")
|
||||
if p0 == -1 || p1 == -1 || p2 == -1 {
|
||||
return nil, fmt.Errorf("invalid spiffe %s", spiffe)
|
||||
}
|
||||
basicInfo.Level0Type = splits[0][:p0]
|
||||
basicInfo.Level0 = splits[0][p0+1:]
|
||||
basicInfo.Level1Type = splits[1][:p1]
|
||||
basicInfo.Level1 = splits[1][p1+1:]
|
||||
basicInfo.Kind = splits[2][:p2]
|
||||
basicInfo.Name = splits[2][p2+1:]
|
||||
|
||||
return basicInfo, nil
|
||||
}
|
||||
|
||||
func ImageTagToImageInfo(imageTag string) (*ImageInfo, error) {
|
||||
ImageInfo := &ImageInfo{}
|
||||
spDelimiter := "/"
|
||||
pos := strings.Index(imageTag, spDelimiter)
|
||||
if pos < 0 {
|
||||
ImageInfo.Registry = ""
|
||||
ImageInfo.VersionImage = imageTag
|
||||
return ImageInfo, nil
|
||||
}
|
||||
|
||||
splits := strings.Split(imageTag, spDelimiter)
|
||||
if len(splits) == 0 {
|
||||
|
||||
return nil, fmt.Errorf("Invalid image info %s", imageTag)
|
||||
}
|
||||
|
||||
ImageInfo.Registry = splits[0]
|
||||
if len(splits) > 1 {
|
||||
ImageInfo.VersionImage = splits[len(splits)-1]
|
||||
} else {
|
||||
ImageInfo.VersionImage = ""
|
||||
}
|
||||
|
||||
return ImageInfo, nil
|
||||
}
|
||||
|
||||
func BoolPointer(b bool) *bool { return &b }
|
||||
|
||||
func BoolToString(b bool) string {
|
||||
if b {
|
||||
return "true"
|
||||
}
|
||||
return "false"
|
||||
}
|
||||
|
||||
func BoolPointerToString(b *bool) string {
|
||||
if b == nil {
|
||||
return ""
|
||||
}
|
||||
if *b {
|
||||
return "true"
|
||||
}
|
||||
return "false"
|
||||
}
|
||||
|
||||
func StringToBool(s string) bool {
|
||||
if strings.ToLower(s) == "true" || strings.ToLower(s) == "1" {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func StringToBoolPointer(s string) *bool {
|
||||
if strings.ToLower(s) == "true" || strings.ToLower(s) == "1" {
|
||||
return BoolPointer(true)
|
||||
}
|
||||
if strings.ToLower(s) == "false" || strings.ToLower(s) == "0" {
|
||||
return BoolPointer(false)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -1,52 +0,0 @@
|
||||
package cautils
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"hash/fnv"
|
||||
"strings"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
var NamespacesListToIgnore = make([]string, 0)
|
||||
var KubeNamespaces = []string{metav1.NamespaceSystem, metav1.NamespacePublic}
|
||||
|
||||
// NamespacesListToIgnore namespaces to ignore if a pod
|
||||
func InitNamespacesListToIgnore(caNamespace string) {
|
||||
if len(NamespacesListToIgnore) > 0 {
|
||||
return
|
||||
}
|
||||
NamespacesListToIgnore = append(NamespacesListToIgnore, KubeNamespaces...)
|
||||
NamespacesListToIgnore = append(NamespacesListToIgnore, caNamespace)
|
||||
}
|
||||
|
||||
func IfIgnoreNamespace(ns string) bool {
|
||||
for i := range NamespacesListToIgnore {
|
||||
if NamespacesListToIgnore[i] == ns {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func IfKubeNamespace(ns string) bool {
|
||||
for i := range KubeNamespaces {
|
||||
if NamespacesListToIgnore[i] == ns {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func hash(s string) string {
|
||||
h := fnv.New32a()
|
||||
h.Write([]byte(s))
|
||||
return fmt.Sprintf("%d", h.Sum32())
|
||||
}
|
||||
func GenarateConfigMapName(wlid string) string {
|
||||
name := strings.ToLower(fmt.Sprintf("ca-%s-%s-%s", GetNamespaceFromWlid(wlid), GetKindFromWlid(wlid), GetNameFromWlid(wlid)))
|
||||
if len(name) >= 63 {
|
||||
name = hash(name)
|
||||
}
|
||||
return name
|
||||
}
|
||||
@@ -1,238 +0,0 @@
|
||||
package cautils
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// API fields
|
||||
var (
|
||||
WlidPrefix = "wlid://"
|
||||
SidPrefix = "sid://"
|
||||
ClusterWlidPrefix = "cluster-"
|
||||
NamespaceWlidPrefix = "namespace-"
|
||||
DataCenterWlidPrefix = "datacenter-"
|
||||
ProjectWlidPrefix = "project-"
|
||||
SecretSIDPrefix = "secret-"
|
||||
SubSecretSIDPrefix = "subsecret-"
|
||||
K8SKindsList = []string{"ComponentStatus", "ConfigMap", "ControllerRevision", "CronJob",
|
||||
"CustomResourceDefinition", "DaemonSet", "Deployment", "Endpoints", "Event", "HorizontalPodAutoscaler",
|
||||
"Ingress", "Job", "Lease", "LimitRange", "LocalSubjectAccessReview", "MutatingWebhookConfiguration",
|
||||
"Namespace", "NetworkPolicy", "Node", "PersistentVolume", "PersistentVolumeClaim", "Pod",
|
||||
"PodDisruptionBudget", "PodSecurityPolicy", "PodTemplate", "PriorityClass", "ReplicaSet",
|
||||
"ReplicationController", "ResourceQuota", "Role", "RoleBinding", "Secret", "SelfSubjectAccessReview",
|
||||
"SelfSubjectRulesReview", "Service", "ServiceAccount", "StatefulSet", "StorageClass",
|
||||
"SubjectAccessReview", "TokenReview", "ValidatingWebhookConfiguration", "VolumeAttachment"}
|
||||
NativeKindsList = []string{"Dockerized", "Native"}
|
||||
KindReverseMap = map[string]string{}
|
||||
dataImagesList = []string{}
|
||||
)
|
||||
|
||||
func IsWlid(id string) bool {
|
||||
return strings.HasPrefix(id, WlidPrefix)
|
||||
}
|
||||
|
||||
func IsSid(id string) bool {
|
||||
return strings.HasPrefix(id, SidPrefix)
|
||||
}
|
||||
|
||||
// GetK8SKindFronList get the calculated wlid
|
||||
func GetK8SKindFronList(kind string) string { // TODO GetK8SKindFromList
|
||||
for i := range K8SKindsList {
|
||||
if strings.ToLower(kind) == strings.ToLower(K8SKindsList[i]) {
|
||||
return K8SKindsList[i]
|
||||
}
|
||||
}
|
||||
return kind
|
||||
}
|
||||
|
||||
// IsK8SKindInList Check if the kind is a known kind
|
||||
func IsK8SKindInList(kind string) bool {
|
||||
for i := range K8SKindsList {
|
||||
if strings.ToLower(kind) == strings.ToLower(K8SKindsList[i]) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// generateWLID
|
||||
func generateWLID(pLevel0, level0, pLevel1, level1, k, name string) string {
|
||||
kind := strings.ToLower(k)
|
||||
kind = strings.Replace(kind, "-", "", -1)
|
||||
|
||||
wlid := WlidPrefix
|
||||
wlid += fmt.Sprintf("%s%s", pLevel0, level0)
|
||||
if level1 == "" {
|
||||
return wlid
|
||||
}
|
||||
wlid += fmt.Sprintf("/%s%s", pLevel1, level1)
|
||||
|
||||
if kind == "" {
|
||||
return wlid
|
||||
}
|
||||
wlid += fmt.Sprintf("/%s", kind)
|
||||
|
||||
if name == "" {
|
||||
return wlid
|
||||
}
|
||||
wlid += fmt.Sprintf("-%s", name)
|
||||
|
||||
return wlid
|
||||
}
|
||||
|
||||
// GetWLID get the calculated wlid
|
||||
func GetWLID(level0, level1, k, name string) string {
|
||||
return generateWLID(ClusterWlidPrefix, level0, NamespaceWlidPrefix, level1, k, name)
|
||||
}
|
||||
|
||||
// GetK8sWLID get the k8s calculated wlid
|
||||
func GetK8sWLID(level0, level1, k, name string) string {
|
||||
return generateWLID(ClusterWlidPrefix, level0, NamespaceWlidPrefix, level1, k, name)
|
||||
}
|
||||
|
||||
// GetNativeWLID get the native calculated wlid
|
||||
func GetNativeWLID(level0, level1, k, name string) string {
|
||||
return generateWLID(DataCenterWlidPrefix, level0, ProjectWlidPrefix, level1, k, name)
|
||||
}
|
||||
|
||||
// WildWlidContainsWlid does WildWlid contains Wlid
|
||||
func WildWlidContainsWlid(wildWlid, wlid string) bool { // TODO- test
|
||||
if wildWlid == wlid {
|
||||
return true
|
||||
}
|
||||
wildWlidR, _ := RestoreMicroserviceIDsFromSpiffe(wildWlid)
|
||||
wlidR, _ := RestoreMicroserviceIDsFromSpiffe(wlid)
|
||||
if len(wildWlidR) > len(wildWlidR) {
|
||||
// invalid wlid
|
||||
return false
|
||||
}
|
||||
|
||||
for i := range wildWlidR {
|
||||
if wildWlidR[i] != wlidR[i] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func restoreInnerIdentifiersFromID(spiffeSlices []string) []string {
|
||||
if len(spiffeSlices) >= 1 && strings.HasPrefix(spiffeSlices[0], ClusterWlidPrefix) {
|
||||
spiffeSlices[0] = spiffeSlices[0][len(ClusterWlidPrefix):]
|
||||
}
|
||||
if len(spiffeSlices) >= 2 && strings.HasPrefix(spiffeSlices[1], NamespaceWlidPrefix) {
|
||||
spiffeSlices[1] = spiffeSlices[1][len(NamespaceWlidPrefix):]
|
||||
}
|
||||
if len(spiffeSlices) >= 3 && strings.Contains(spiffeSlices[2], "-") {
|
||||
dashIdx := strings.Index(spiffeSlices[2], "-")
|
||||
spiffeSlices = append(spiffeSlices, spiffeSlices[2][dashIdx+1:])
|
||||
spiffeSlices[2] = spiffeSlices[2][:dashIdx]
|
||||
if val, ok := KindReverseMap[spiffeSlices[2]]; ok {
|
||||
spiffeSlices[2] = val
|
||||
}
|
||||
}
|
||||
return spiffeSlices
|
||||
}
|
||||
|
||||
// RestoreMicroserviceIDsFromSpiffe -
|
||||
func RestoreMicroserviceIDsFromSpiffe(spiffe string) ([]string, error) {
|
||||
if spiffe == "" {
|
||||
return nil, fmt.Errorf("in RestoreMicroserviceIDsFromSpiffe, expecting valid wlid recieved empty string")
|
||||
}
|
||||
|
||||
if StringHasWhitespace(spiffe) {
|
||||
return nil, fmt.Errorf("wlid %s invalid. whitespace found", spiffe)
|
||||
}
|
||||
|
||||
if strings.HasPrefix(spiffe, WlidPrefix) {
|
||||
spiffe = spiffe[len(WlidPrefix):]
|
||||
} else if strings.HasPrefix(spiffe, SidPrefix) {
|
||||
spiffe = spiffe[len(SidPrefix):]
|
||||
}
|
||||
spiffeSlices := strings.Split(spiffe, "/")
|
||||
// The documented WLID format (https://cyberarmorio.sharepoint.com/sites/development2/Shared%20Documents/kubernetes_design1.docx?web=1)
|
||||
if len(spiffeSlices) <= 3 {
|
||||
spiffeSlices = restoreInnerIdentifiersFromID(spiffeSlices)
|
||||
}
|
||||
if len(spiffeSlices) != 4 { // first used WLID, deprecated since 24.10.2019
|
||||
return spiffeSlices, fmt.Errorf("invalid WLID format. format received: %v", spiffeSlices)
|
||||
}
|
||||
|
||||
for i := range spiffeSlices {
|
||||
if spiffeSlices[i] == "" {
|
||||
return spiffeSlices, fmt.Errorf("one or more entities are empty, spiffeSlices: %v", spiffeSlices)
|
||||
}
|
||||
}
|
||||
|
||||
return spiffeSlices, nil
|
||||
}
|
||||
|
||||
// RestoreMicroserviceIDsFromSpiffe -
|
||||
func RestoreMicroserviceIDs(spiffe string) []string {
|
||||
if spiffe == "" {
|
||||
return []string{}
|
||||
}
|
||||
|
||||
if StringHasWhitespace(spiffe) {
|
||||
return []string{}
|
||||
}
|
||||
|
||||
if strings.HasPrefix(spiffe, WlidPrefix) {
|
||||
spiffe = spiffe[len(WlidPrefix):]
|
||||
} else if strings.HasPrefix(spiffe, SidPrefix) {
|
||||
spiffe = spiffe[len(SidPrefix):]
|
||||
}
|
||||
spiffeSlices := strings.Split(spiffe, "/")
|
||||
|
||||
return restoreInnerIdentifiersFromID(spiffeSlices)
|
||||
}
|
||||
|
||||
// GetClusterFromWlid parse wlid and get cluster
|
||||
func GetClusterFromWlid(wlid string) string {
|
||||
r := RestoreMicroserviceIDs(wlid)
|
||||
if len(r) >= 1 {
|
||||
return r[0]
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// GetNamespaceFromWlid parse wlid and get Namespace
|
||||
func GetNamespaceFromWlid(wlid string) string {
|
||||
r := RestoreMicroserviceIDs(wlid)
|
||||
if len(r) >= 2 {
|
||||
return r[1]
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// GetKindFromWlid parse wlid and get kind
|
||||
func GetKindFromWlid(wlid string) string {
|
||||
r := RestoreMicroserviceIDs(wlid)
|
||||
if len(r) >= 3 {
|
||||
return GetK8SKindFronList(r[2])
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// GetNameFromWlid parse wlid and get name
|
||||
func GetNameFromWlid(wlid string) string {
|
||||
r := RestoreMicroserviceIDs(wlid)
|
||||
if len(r) >= 4 {
|
||||
return GetK8SKindFronList(r[3])
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// IsWlidValid test if wlid is a valid wlid
|
||||
func IsWlidValid(wlid string) error {
|
||||
_, err := RestoreMicroserviceIDsFromSpiffe(wlid)
|
||||
return err
|
||||
}
|
||||
|
||||
// StringHasWhitespace check if a string has whitespace
|
||||
func StringHasWhitespace(str string) bool {
|
||||
if whitespace := strings.Index(str, " "); whitespace != -1 {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
@@ -11,7 +11,7 @@ import (
|
||||
"github.com/armosec/kubescape/cautils/getter"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
"github.com/armosec/kubescape/cautils/k8sinterface"
|
||||
"github.com/armosec/k8s-interface/k8sinterface"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
@@ -30,6 +30,7 @@ type ConfigObj struct {
|
||||
CustomerGUID string `json:"customerGUID"`
|
||||
Token string `json:"invitationParam"`
|
||||
CustomerAdminEMail string `json:"adminMail"`
|
||||
ClusterName string `json:"clusterName"`
|
||||
}
|
||||
|
||||
func (co *ConfigObj) Json() []byte {
|
||||
@@ -39,14 +40,30 @@ func (co *ConfigObj) Json() []byte {
|
||||
return []byte{}
|
||||
}
|
||||
|
||||
// Config - convert ConfigObj to config file
|
||||
func (co *ConfigObj) Config() []byte {
|
||||
clusterName := co.ClusterName
|
||||
co.ClusterName = "" // remove cluster name before saving to file
|
||||
b, err := json.Marshal(co)
|
||||
co.ClusterName = clusterName
|
||||
|
||||
if err == nil {
|
||||
return b
|
||||
}
|
||||
|
||||
return []byte{}
|
||||
}
|
||||
|
||||
// ======================================================================================
|
||||
// =============================== interface ============================================
|
||||
// ======================================================================================
|
||||
type IClusterConfig interface {
|
||||
// setters
|
||||
SetCustomerGUID(customerGUID string) error
|
||||
|
||||
// set
|
||||
SetConfig(customerGUID string) error
|
||||
|
||||
// getters
|
||||
GetClusterName() string
|
||||
GetCustomerGUID() string
|
||||
GetConfigObj() *ConfigObj
|
||||
GetK8sAPI() *k8sinterface.KubernetesApi
|
||||
@@ -92,8 +109,10 @@ func ClusterConfigSetup(scanInfo *ScanInfo, k8s *k8sinterface.KubernetesApi, beA
|
||||
return NewEmptyConfig() // local - Delete local config & Do not send report
|
||||
}
|
||||
if scanInfo.Local {
|
||||
scanInfo.Submit = false
|
||||
return NewEmptyConfig() // local - Do not send report
|
||||
}
|
||||
scanInfo.Submit = true
|
||||
return clusterConfig // submit/default - Submit report
|
||||
}
|
||||
|
||||
@@ -103,16 +122,17 @@ func ClusterConfigSetup(scanInfo *ScanInfo, k8s *k8sinterface.KubernetesApi, beA
|
||||
type EmptyConfig struct {
|
||||
}
|
||||
|
||||
func NewEmptyConfig() *EmptyConfig { return &EmptyConfig{} }
|
||||
func (c *EmptyConfig) GetConfigObj() *ConfigObj { return &ConfigObj{} }
|
||||
func (c *EmptyConfig) SetCustomerGUID(customerGUID string) error { return nil }
|
||||
func (c *EmptyConfig) GetCustomerGUID() string { return "" }
|
||||
func (c *EmptyConfig) GetK8sAPI() *k8sinterface.KubernetesApi { return nil } // TODO: return mock obj
|
||||
func (c *EmptyConfig) GetDefaultNS() string { return k8sinterface.GetDefaultNamespace() }
|
||||
func (c *EmptyConfig) GetBackendAPI() getter.IBackend { return nil } // TODO: return mock obj
|
||||
func NewEmptyConfig() *EmptyConfig { return &EmptyConfig{} }
|
||||
func (c *EmptyConfig) SetConfig(customerGUID string) error { return nil }
|
||||
func (c *EmptyConfig) GetConfigObj() *ConfigObj { return &ConfigObj{} }
|
||||
func (c *EmptyConfig) GetCustomerGUID() string { return "" }
|
||||
func (c *EmptyConfig) GetK8sAPI() *k8sinterface.KubernetesApi { return nil } // TODO: return mock obj
|
||||
func (c *EmptyConfig) GetDefaultNS() string { return k8sinterface.GetDefaultNamespace() }
|
||||
func (c *EmptyConfig) GetBackendAPI() getter.IBackend { return nil } // TODO: return mock obj
|
||||
func (c *EmptyConfig) GetClusterName() string { return adoptClusterName(k8sinterface.GetClusterName()) }
|
||||
func (c *EmptyConfig) GenerateURL() {
|
||||
message := fmt.Sprintf("You can see the results in a user-friendly UI, choose your preferred compliance framework, check risk results history and trends, manage exceptions, get remediation recommendations and much more by registering here: https://%s", getter.GetArmoAPIConnector().GetFrontendURL())
|
||||
InfoTextDisplay(os.Stdout, message+"\n")
|
||||
message := fmt.Sprintf("\nCheckout for more cool features: https://%s\n", getter.GetArmoAPIConnector().GetFrontendURL())
|
||||
InfoTextDisplay(os.Stdout, fmt.Sprintf("\n%s\n", message))
|
||||
}
|
||||
|
||||
// ======================================================================================
|
||||
@@ -140,6 +160,7 @@ func (c *ClusterConfig) GetDefaultNS() string { return c.defau
|
||||
func (c *ClusterConfig) GetBackendAPI() getter.IBackend { return c.backendAPI }
|
||||
|
||||
func (c *ClusterConfig) GenerateURL() {
|
||||
message := "Checkout for more cool features: "
|
||||
|
||||
u := url.URL{}
|
||||
u.Scheme = "https"
|
||||
@@ -147,9 +168,8 @@ func (c *ClusterConfig) GenerateURL() {
|
||||
if c.configObj == nil {
|
||||
return
|
||||
}
|
||||
message := fmt.Sprintf("You can see the results in a user-friendly UI, choose your preferred compliance framework, check risk results history and trends, manage exceptions, get remediation recommendations and much more by registering here: %s", u.String())
|
||||
if c.configObj.CustomerAdminEMail != "" {
|
||||
InfoTextDisplay(os.Stdout, message+"\n")
|
||||
InfoTextDisplay(os.Stdout, "\n\n"+message+u.String()+"\n\n")
|
||||
return
|
||||
}
|
||||
u.Path = "account/sign-up"
|
||||
@@ -158,8 +178,7 @@ func (c *ClusterConfig) GenerateURL() {
|
||||
q.Add("customerGUID", c.configObj.CustomerGUID)
|
||||
|
||||
u.RawQuery = q.Encode()
|
||||
InfoTextDisplay(os.Stdout, message+"\n")
|
||||
|
||||
InfoTextDisplay(os.Stdout, "\n\n"+message+u.String()+"\n\n")
|
||||
}
|
||||
|
||||
func (c *ClusterConfig) GetCustomerGUID() string {
|
||||
@@ -169,10 +188,19 @@ func (c *ClusterConfig) GetCustomerGUID() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func (c *ClusterConfig) SetCustomerGUID(customerGUID string) error {
|
||||
func (c *ClusterConfig) SetConfig(customerGUID string) error {
|
||||
if c.configObj == nil {
|
||||
c.configObj = &ConfigObj{}
|
||||
}
|
||||
|
||||
// cluster name
|
||||
if c.GetClusterName() == "" {
|
||||
c.setClusterName(k8sinterface.GetClusterName())
|
||||
}
|
||||
|
||||
// ARMO customer GUID
|
||||
if customerGUID != "" && c.GetCustomerGUID() != customerGUID {
|
||||
c.configObj.CustomerGUID = customerGUID // override config customerGUID
|
||||
c.setCustomerGUID(customerGUID) // override config customerGUID
|
||||
}
|
||||
|
||||
customerGUID = c.GetCustomerGUID()
|
||||
@@ -181,10 +209,10 @@ func (c *ClusterConfig) SetCustomerGUID(customerGUID string) error {
|
||||
tenantResponse, err := c.backendAPI.GetCustomerGUID(customerGUID)
|
||||
if err == nil && tenantResponse != nil {
|
||||
if tenantResponse.AdminMail != "" { // this customer already belongs to some user
|
||||
c.configObj.CustomerAdminEMail = tenantResponse.AdminMail
|
||||
c.setCustomerAdminEMail(tenantResponse.AdminMail)
|
||||
} else {
|
||||
c.configObj.Token = tenantResponse.Token
|
||||
c.configObj.CustomerGUID = tenantResponse.TenantID
|
||||
c.setToken(tenantResponse.Token)
|
||||
c.setCustomerGUID(tenantResponse.TenantID)
|
||||
}
|
||||
} else {
|
||||
if err != nil && !strings.Contains(err.Error(), "already exists") {
|
||||
@@ -198,15 +226,28 @@ func (c *ClusterConfig) SetCustomerGUID(customerGUID string) error {
|
||||
} else {
|
||||
c.createConfigMap()
|
||||
}
|
||||
if existsConfigFile() {
|
||||
c.updateConfigFile()
|
||||
} else {
|
||||
c.createConfigFile()
|
||||
}
|
||||
c.updateConfigFile()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *ClusterConfig) setToken(token string) {
|
||||
c.configObj.Token = token
|
||||
}
|
||||
|
||||
func (c *ClusterConfig) setCustomerAdminEMail(customerAdminEMail string) {
|
||||
c.configObj.CustomerAdminEMail = customerAdminEMail
|
||||
}
|
||||
func (c *ClusterConfig) setCustomerGUID(customerGUID string) {
|
||||
c.configObj.CustomerGUID = customerGUID
|
||||
}
|
||||
|
||||
func (c *ClusterConfig) setClusterName(clusterName string) {
|
||||
c.configObj.ClusterName = adoptClusterName(clusterName)
|
||||
}
|
||||
func (c *ClusterConfig) GetClusterName() string {
|
||||
return c.configObj.ClusterName
|
||||
}
|
||||
func (c *ClusterConfig) LoadConfig() {
|
||||
// get from configMap
|
||||
if c.existsConfigMap() {
|
||||
@@ -220,8 +261,9 @@ func (c *ClusterConfig) LoadConfig() {
|
||||
|
||||
func (c *ClusterConfig) ToMapString() map[string]interface{} {
|
||||
m := map[string]interface{}{}
|
||||
bc, _ := json.Marshal(c.configObj)
|
||||
json.Unmarshal(bc, &m)
|
||||
if bc, err := json.Marshal(c.configObj); err == nil {
|
||||
json.Unmarshal(bc, &m)
|
||||
}
|
||||
return m
|
||||
}
|
||||
func (c *ClusterConfig) loadConfigFromConfigMap() (*ConfigObj, error) {
|
||||
@@ -360,14 +402,7 @@ func (c *ClusterConfig) updateConfigMap() error {
|
||||
}
|
||||
|
||||
func (c *ClusterConfig) updateConfigFile() error {
|
||||
if err := os.WriteFile(ConfigFileFullPath(), c.configObj.Json(), 0664); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *ClusterConfig) createConfigFile() error {
|
||||
if err := os.WriteFile(ConfigFileFullPath(), c.configObj.Json(), 0664); err != nil {
|
||||
if err := os.WriteFile(ConfigFileFullPath(), c.configObj.Config(), 0664); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
@@ -437,3 +472,7 @@ func DeleteConfigMap(k8s *k8sinterface.KubernetesApi) error {
|
||||
func DeleteConfigFile() error {
|
||||
return os.Remove(ConfigFileFullPath())
|
||||
}
|
||||
|
||||
func adoptClusterName(clusterName string) string {
|
||||
return strings.ReplaceAll(clusterName, "/", "-")
|
||||
}
|
||||
|
||||
@@ -1,25 +1,25 @@
|
||||
package cautils
|
||||
|
||||
import (
|
||||
"github.com/armosec/kubescape/cautils/armotypes"
|
||||
"github.com/armosec/kubescape/cautils/opapolicy"
|
||||
"github.com/armosec/armoapi-go/armotypes"
|
||||
"github.com/armosec/opa-utils/reporthandling"
|
||||
)
|
||||
|
||||
// K8SResources map[<api group>/<api version>/<resource>]<resource object>
|
||||
type K8SResources map[string]interface{}
|
||||
|
||||
type OPASessionObj struct {
|
||||
Frameworks []opapolicy.Framework
|
||||
Frameworks []reporthandling.Framework
|
||||
K8SResources *K8SResources
|
||||
Exceptions []armotypes.PostureExceptionPolicy
|
||||
PostureReport *opapolicy.PostureReport
|
||||
PostureReport *reporthandling.PostureReport
|
||||
}
|
||||
|
||||
func NewOPASessionObj(frameworks []opapolicy.Framework, k8sResources *K8SResources) *OPASessionObj {
|
||||
func NewOPASessionObj(frameworks []reporthandling.Framework, k8sResources *K8SResources) *OPASessionObj {
|
||||
return &OPASessionObj{
|
||||
Frameworks: frameworks,
|
||||
K8SResources: k8sResources,
|
||||
PostureReport: &opapolicy.PostureReport{
|
||||
PostureReport: &reporthandling.PostureReport{
|
||||
ClusterName: ClusterName,
|
||||
CustomerGUID: CustomerGUID,
|
||||
},
|
||||
@@ -30,7 +30,7 @@ func NewOPASessionObjMock() *OPASessionObj {
|
||||
return &OPASessionObj{
|
||||
Frameworks: nil,
|
||||
K8SResources: nil,
|
||||
PostureReport: &opapolicy.PostureReport{
|
||||
PostureReport: &reporthandling.PostureReport{
|
||||
ClusterName: "",
|
||||
CustomerGUID: "",
|
||||
ReportID: "",
|
||||
@@ -44,8 +44,8 @@ type ComponentConfig struct {
|
||||
}
|
||||
|
||||
type Exception struct {
|
||||
Ignore *bool `json:"ignore"` // ignore test results
|
||||
MultipleScore *opapolicy.AlertScore `json:"multipleScore"` // MultipleScore number - float32
|
||||
Namespaces []string `json:"namespaces"`
|
||||
Regex string `json:"regex"` // not supported
|
||||
Ignore *bool `json:"ignore"` // ignore test results
|
||||
MultipleScore *reporthandling.AlertScore `json:"multipleScore"` // MultipleScore number - float32
|
||||
Namespaces []string `json:"namespaces"`
|
||||
Regex string `json:"regex"` // not supported
|
||||
}
|
||||
|
||||
@@ -3,4 +3,5 @@ package cautils
|
||||
type DownloadInfo struct {
|
||||
Path string
|
||||
FrameworkName string
|
||||
ControlName string
|
||||
}
|
||||
|
||||
@@ -5,8 +5,8 @@ import (
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/armosec/kubescape/cautils/armotypes"
|
||||
"github.com/armosec/kubescape/cautils/opapolicy"
|
||||
"github.com/armosec/armoapi-go/armotypes"
|
||||
"github.com/armosec/opa-utils/reporthandling"
|
||||
"github.com/golang/glog"
|
||||
)
|
||||
|
||||
@@ -91,13 +91,13 @@ func (armoAPI *ArmoAPI) GetReportReceiverURL() string {
|
||||
return armoAPI.erURL
|
||||
}
|
||||
|
||||
func (armoAPI *ArmoAPI) GetFramework(name string) (*opapolicy.Framework, error) {
|
||||
func (armoAPI *ArmoAPI) GetFramework(name string) (*reporthandling.Framework, error) {
|
||||
respStr, err := HttpGetter(armoAPI.httpClient, armoAPI.getFrameworkURL(name))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
framework := &opapolicy.Framework{}
|
||||
framework := &reporthandling.Framework{}
|
||||
if err = JSONDecoder(respStr).Decode(framework); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -1,13 +1,10 @@
|
||||
package getter
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"time"
|
||||
"strings"
|
||||
|
||||
"github.com/armosec/kubescape/cautils/opapolicy"
|
||||
"github.com/armosec/opa-utils/gitregostore"
|
||||
"github.com/armosec/opa-utils/reporthandling"
|
||||
)
|
||||
|
||||
// =======================================================================================================================
|
||||
@@ -16,71 +13,34 @@ import (
|
||||
|
||||
// Download released version
|
||||
type DownloadReleasedPolicy struct {
|
||||
hostURL string
|
||||
httpClient *http.Client
|
||||
gs *gitregostore.GitRegoStore
|
||||
}
|
||||
|
||||
func NewDownloadReleasedPolicy() *DownloadReleasedPolicy {
|
||||
return &DownloadReleasedPolicy{
|
||||
hostURL: "",
|
||||
httpClient: &http.Client{Timeout: 61 * time.Second},
|
||||
gs: gitregostore.InitDefaultGitRegoStore(-1),
|
||||
}
|
||||
}
|
||||
|
||||
func (drp *DownloadReleasedPolicy) GetFramework(name string) (*opapolicy.Framework, error) {
|
||||
if err := drp.setURL(name); err != nil {
|
||||
return nil, err
|
||||
// Return control per name/id using ARMO api
|
||||
func (drp *DownloadReleasedPolicy) GetControl(policyName string) (*reporthandling.Control, error) {
|
||||
var control *reporthandling.Control
|
||||
var err error
|
||||
if strings.HasPrefix(policyName, "C-") || strings.HasPrefix(policyName, "c-") {
|
||||
control, err = drp.gs.GetOPAControlByID(policyName)
|
||||
} else {
|
||||
control, err = drp.gs.GetOPAControlByName(policyName)
|
||||
}
|
||||
respStr, err := HttpGetter(drp.httpClient, drp.hostURL)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return control, nil
|
||||
}
|
||||
|
||||
framework := &opapolicy.Framework{}
|
||||
if err = JSONDecoder(respStr).Decode(framework); err != nil {
|
||||
return framework, err
|
||||
func (drp *DownloadReleasedPolicy) GetFramework(name string) (*reporthandling.Framework, error) {
|
||||
framework, err := drp.gs.GetOPAFrameworkByName(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
SaveFrameworkInFile(framework, GetDefaultPath(name+".json"))
|
||||
return framework, err
|
||||
}
|
||||
|
||||
func (drp *DownloadReleasedPolicy) setURL(frameworkName string) error {
|
||||
|
||||
latestReleases := "https://api.github.com/repos/armosec/regolibrary/releases/latest"
|
||||
resp, err := http.Get(latestReleases)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get latest releases from '%s', reason: %s", latestReleases, err.Error())
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
if resp.StatusCode < 200 || 301 < resp.StatusCode {
|
||||
return fmt.Errorf("failed to download file, status code: %s", resp.Status)
|
||||
}
|
||||
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to read response body from '%s', reason: %s", latestReleases, err.Error())
|
||||
}
|
||||
var data map[string]interface{}
|
||||
err = json.Unmarshal(body, &data)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to unmarshal response body from '%s', reason: %s", latestReleases, err.Error())
|
||||
}
|
||||
|
||||
if assets, ok := data["assets"].([]interface{}); ok {
|
||||
for i := range assets {
|
||||
if asset, ok := assets[i].(map[string]interface{}); ok {
|
||||
if name, ok := asset["name"].(string); ok {
|
||||
if name == frameworkName {
|
||||
if url, ok := asset["browser_download_url"].(string); ok {
|
||||
drp.hostURL = url
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return fmt.Errorf("failed to download '%s' - not found", frameworkName)
|
||||
|
||||
}
|
||||
|
||||
@@ -1,12 +1,13 @@
|
||||
package getter
|
||||
|
||||
import (
|
||||
"github.com/armosec/kubescape/cautils/armotypes"
|
||||
"github.com/armosec/kubescape/cautils/opapolicy"
|
||||
"github.com/armosec/armoapi-go/armotypes"
|
||||
"github.com/armosec/opa-utils/reporthandling"
|
||||
)
|
||||
|
||||
type IPolicyGetter interface {
|
||||
GetFramework(name string) (*opapolicy.Framework, error)
|
||||
GetFramework(name string) (*reporthandling.Framework, error)
|
||||
GetControl(policyName string) (*reporthandling.Control, error)
|
||||
}
|
||||
|
||||
type IExceptionsGetter interface {
|
||||
|
||||
@@ -10,7 +10,7 @@ import (
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/armosec/kubescape/cautils/opapolicy"
|
||||
"github.com/armosec/opa-utils/reporthandling"
|
||||
)
|
||||
|
||||
func GetDefaultPath(name string) string {
|
||||
@@ -21,7 +21,32 @@ func GetDefaultPath(name string) string {
|
||||
return defaultfilePath
|
||||
}
|
||||
|
||||
func SaveFrameworkInFile(framework *opapolicy.Framework, pathStr string) error {
|
||||
// Save control as json in file
|
||||
func SaveControlInFile(control *reporthandling.Control, pathStr string) error {
|
||||
encodedData, err := json.Marshal(control)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = os.WriteFile(pathStr, []byte(fmt.Sprintf("%v", string(encodedData))), 0644)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
pathDir := path.Dir(pathStr)
|
||||
if err := os.Mkdir(pathDir, 0744); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
return err
|
||||
|
||||
}
|
||||
err = os.WriteFile(pathStr, []byte(fmt.Sprintf("%v", string(encodedData))), 0644)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func SaveFrameworkInFile(framework *reporthandling.Framework, pathStr string) error {
|
||||
encodedData, err := json.Marshal(framework)
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
@@ -6,8 +6,8 @@ import (
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/armosec/kubescape/cautils/armotypes"
|
||||
"github.com/armosec/kubescape/cautils/opapolicy"
|
||||
"github.com/armosec/armoapi-go/armotypes"
|
||||
"github.com/armosec/opa-utils/reporthandling"
|
||||
)
|
||||
|
||||
// =======================================================================================================================
|
||||
@@ -17,34 +17,71 @@ const DefaultLocalStore = ".kubescape"
|
||||
|
||||
// Load policies from a local repository
|
||||
type LoadPolicy struct {
|
||||
filePath string
|
||||
filePaths []string
|
||||
}
|
||||
|
||||
func NewLoadPolicy(filePath string) *LoadPolicy {
|
||||
func NewLoadPolicy(filePaths []string) *LoadPolicy {
|
||||
return &LoadPolicy{
|
||||
filePath: filePath,
|
||||
filePaths: filePaths,
|
||||
}
|
||||
}
|
||||
|
||||
func (lp *LoadPolicy) GetFramework(frameworkName string) (*opapolicy.Framework, error) {
|
||||
// Return control from file
|
||||
func (lp *LoadPolicy) GetControl(controlName string) (*reporthandling.Control, error) {
|
||||
|
||||
framework := &opapolicy.Framework{}
|
||||
f, err := os.ReadFile(lp.filePath)
|
||||
control := &reporthandling.Control{}
|
||||
filePath := lp.getFileForControl()
|
||||
f, err := os.ReadFile(filePath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = json.Unmarshal(f, framework)
|
||||
if err = json.Unmarshal(f, control); err != nil {
|
||||
return control, err
|
||||
}
|
||||
if controlName != "" && !strings.EqualFold(controlName, control.Name) && !strings.EqualFold(controlName, control.ControlID) {
|
||||
framework, err := lp.GetFramework(controlName)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("control from file not matching")
|
||||
} else {
|
||||
for _, ctrl := range framework.Controls {
|
||||
if strings.EqualFold(ctrl.Name, controlName) || strings.EqualFold(ctrl.ControlID, controlName) {
|
||||
control = &ctrl
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return control, err
|
||||
}
|
||||
|
||||
func (lp *LoadPolicy) GetFramework(frameworkName string) (*reporthandling.Framework, error) {
|
||||
framework := &reporthandling.Framework{}
|
||||
var err error
|
||||
for _, filePath := range lp.filePaths {
|
||||
f, err := os.ReadFile(filePath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err = json.Unmarshal(f, framework); err != nil {
|
||||
return framework, err
|
||||
}
|
||||
if strings.EqualFold(frameworkName, framework.Name) {
|
||||
break
|
||||
}
|
||||
}
|
||||
if frameworkName != "" && !strings.EqualFold(frameworkName, framework.Name) {
|
||||
|
||||
return nil, fmt.Errorf("framework from file not matching")
|
||||
}
|
||||
return framework, err
|
||||
}
|
||||
|
||||
func (lp *LoadPolicy) GetExceptions(customerGUID, clusterName string) ([]armotypes.PostureExceptionPolicy, error) {
|
||||
|
||||
filePath := lp.getFileForException()
|
||||
exception := []armotypes.PostureExceptionPolicy{}
|
||||
f, err := os.ReadFile(lp.filePath)
|
||||
f, err := os.ReadFile(filePath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -52,3 +89,11 @@ func (lp *LoadPolicy) GetExceptions(customerGUID, clusterName string) ([]armotyp
|
||||
err = json.Unmarshal(f, &exception)
|
||||
return exception, err
|
||||
}
|
||||
|
||||
func (lp *LoadPolicy) getFileForException() string {
|
||||
return lp.filePaths[0]
|
||||
}
|
||||
|
||||
func (lp *LoadPolicy) getFileForControl() string {
|
||||
return lp.filePaths[0]
|
||||
}
|
||||
|
||||
@@ -1,265 +0,0 @@
|
||||
package k8sinterface
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/session"
|
||||
"github.com/aws/aws-sdk-go/service/ecr"
|
||||
"github.com/docker/docker/api/types"
|
||||
)
|
||||
|
||||
// For GCR there are some permissions one need to assign in order to allow ARMO to pull images:
|
||||
// https://cloud.google.com/kubernetes-engine/docs/how-to/workload-identity
|
||||
// gcloud iam service-accounts create armo-controller-sa
|
||||
// gcloud projects add-iam-policy-binding <PROJECT_NAME> --role roles/storage.objectViewer --member "serviceAccount:armo-controller-sa@<PROJECT_NAME>.iam.gserviceaccount.com"
|
||||
// gcloud iam service-accounts add-iam-policy-binding --role roles/iam.workloadIdentityUser --member "serviceAccount:<PROJECT_NAME>.svc.id.goog[cyberarmor-system/ca-controller-service-account]" armo-controller-sa@<PROJECT_NAME>.iam.gserviceaccount.com
|
||||
// kubectl annotate serviceaccount --overwrite --namespace cyberarmor-system ca-controller-service-account iam.gke.io/gcp-service-account=armo-controller-sa@<PROJECT_NAME>.iam.gserviceaccount.com
|
||||
|
||||
const (
|
||||
gcrDefaultServiceAccountName = "default"
|
||||
// armoServiceAccountName = "ca-controller-service-account"
|
||||
)
|
||||
|
||||
var (
|
||||
httpClient = http.Client{Timeout: 5 * time.Second}
|
||||
)
|
||||
|
||||
// CheckIsECRImage check if this image is suspected as ECR hosted image
|
||||
func CheckIsECRImage(imageTag string) bool {
|
||||
return strings.Contains(imageTag, "dkr.ecr")
|
||||
}
|
||||
|
||||
// GetLoginDetailsForECR return user name + password using the default iam-role OR ~/.aws/config of the machine
|
||||
func GetLoginDetailsForECR(imageTag string) (string, string, error) {
|
||||
// imageTag := "015253967648.dkr.ecr.eu-central-1.amazonaws.com/armo:1"
|
||||
imageTagSlices := strings.Split(imageTag, ".")
|
||||
repo := imageTagSlices[0]
|
||||
region := imageTagSlices[3]
|
||||
mySession := session.Must(session.NewSession())
|
||||
ecrClient := ecr.New(mySession, aws.NewConfig().WithRegion(region))
|
||||
input := &ecr.GetAuthorizationTokenInput{
|
||||
RegistryIds: []*string{&repo},
|
||||
}
|
||||
res, err := ecrClient.GetAuthorizationToken(input)
|
||||
if err != nil {
|
||||
return "", "", fmt.Errorf("in PullFromECR, failed to GetAuthorizationToken: %v", err)
|
||||
}
|
||||
res64 := (*res.AuthorizationData[0].AuthorizationToken)
|
||||
resB, err := base64.StdEncoding.DecodeString(res64)
|
||||
if err != nil {
|
||||
return "", "", fmt.Errorf("in PullFromECR, failed to DecodeString: %v", err)
|
||||
}
|
||||
delimiterIdx := bytes.IndexByte(resB, ':')
|
||||
// userName := resB[:delimiterIdx]
|
||||
// resB = resB[delimiterIdx+1:]
|
||||
// resB, err = base64.StdEncoding.DecodeString(string(resB))
|
||||
// if err != nil {
|
||||
// t.Errorf("failed to DecodeString #2: %v\n\n", err)
|
||||
// }
|
||||
return string(resB[:delimiterIdx]), string(resB[delimiterIdx+1:]), nil
|
||||
}
|
||||
|
||||
func CheckIsACRImage(imageTag string) bool {
|
||||
// atest1.azurecr.io/go-inf:1
|
||||
return strings.Contains(imageTag, ".azurecr.io/")
|
||||
}
|
||||
|
||||
type azureADDResponseJson struct {
|
||||
AccessToken string `json:"access_token"`
|
||||
RefreshToken string `json:"refresh_token"`
|
||||
ExpiresIn string `json:"expires_in"`
|
||||
ExpiresOn string `json:"expires_on"`
|
||||
NotBefore string `json:"not_before"`
|
||||
Resource string `json:"resource"`
|
||||
TokenType string `json:"token_type"`
|
||||
}
|
||||
|
||||
func getAzureAADAccessToken() (string, error) {
|
||||
msi_endpoint, err := url.Parse("http://169.254.169.254/metadata/identity/oauth2/token?api-version=2018-02-01")
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("creating URL : %v", err)
|
||||
}
|
||||
msi_parameters := url.Values{}
|
||||
msi_parameters.Add("resource", "https://management.azure.com/")
|
||||
msi_parameters.Add("api-version", "2018-02-01")
|
||||
msi_endpoint.RawQuery = msi_parameters.Encode()
|
||||
req, err := http.NewRequest("GET", msi_endpoint.String(), nil)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("creating HTTP request : %v", err)
|
||||
}
|
||||
req.Header.Add("Metadata", "true")
|
||||
|
||||
// Call managed services for Azure resources token endpoint
|
||||
resp, err := httpClient.Do(req)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("calling token endpoint : %v", err)
|
||||
}
|
||||
|
||||
// Pull out response body
|
||||
responseBytes, err := io.ReadAll(resp.Body)
|
||||
defer resp.Body.Close()
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("reading response body : %v", err)
|
||||
}
|
||||
if resp.StatusCode < 200 || resp.StatusCode >= 300 {
|
||||
return "", fmt.Errorf("azure ActiveDirectory AT resp: %v, %v", resp.Status, string(responseBytes))
|
||||
}
|
||||
|
||||
// Unmarshall response body into struct
|
||||
var r azureADDResponseJson
|
||||
err = json.Unmarshal(responseBytes, &r)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("unmarshalling the response: %v", err)
|
||||
}
|
||||
return r.AccessToken, nil
|
||||
}
|
||||
|
||||
// GetLoginDetailsForAzurCR return user name + password to use
|
||||
func GetLoginDetailsForAzurCR(imageTag string) (string, string, error) {
|
||||
// imageTag := "atest1.azurecr.io/go-inf:1"
|
||||
imageTagSlices := strings.Split(imageTag, "/")
|
||||
azureIdensAT, err := getAzureAADAccessToken()
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
atMap := make(map[string]interface{})
|
||||
azureIdensATSlices := strings.Split(azureIdensAT, ".")
|
||||
if len(azureIdensATSlices) < 2 {
|
||||
return "", "", fmt.Errorf("len(azureIdensATSlices) < 2")
|
||||
}
|
||||
resB, err := base64.RawStdEncoding.DecodeString(azureIdensATSlices[1])
|
||||
if err != nil {
|
||||
return "", "", fmt.Errorf("in GetLoginDetailsForAzurCR, failed to DecodeString: %v, %s", err, azureIdensATSlices[1])
|
||||
}
|
||||
if err := json.Unmarshal(resB, &atMap); err != nil {
|
||||
return "", "", fmt.Errorf("failed to unmarshal azureIdensAT: %v, %s", err, string(resB))
|
||||
}
|
||||
// excahnging AAD for ACR refresh token
|
||||
refreshToken, err := excahngeAzureAADAccessTokenForACRRefreshToken(imageTagSlices[0], fmt.Sprintf("%v", atMap["tid"]), azureIdensAT)
|
||||
if err != nil {
|
||||
return "", "", fmt.Errorf("failed to excahngeAzureAADAccessTokenForACRRefreshToken: %v, registry: %s, tenantID: %s, azureAADAT: %s", err, imageTagSlices[0], fmt.Sprintf("%v", atMap["tid"]), azureIdensAT)
|
||||
}
|
||||
|
||||
return "00000000-0000-0000-0000-000000000000", refreshToken, nil
|
||||
}
|
||||
|
||||
func excahngeAzureAADAccessTokenForACRRefreshToken(registry, tenantID, azureAADAT string) (string, error) {
|
||||
msi_parameters := url.Values{}
|
||||
msi_parameters.Add("service", registry)
|
||||
msi_parameters.Add("grant_type", "access_token")
|
||||
msi_parameters.Add("tenant", tenantID)
|
||||
msi_parameters.Add("access_token", azureAADAT)
|
||||
postBodyStr := msi_parameters.Encode()
|
||||
req, err := http.NewRequest("POST", fmt.Sprintf("https://%v/oauth2/exchange", registry), strings.NewReader(postBodyStr))
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("creating HTTP request : %v", err)
|
||||
}
|
||||
req.Header.Add("Metadata", "true")
|
||||
req.Header.Add("Content-Type", "application/x-www-form-urlencoded")
|
||||
|
||||
// Call managed services for Azure resources token endpoint
|
||||
resp, err := httpClient.Do(req)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("calling token endpoint : %v", err)
|
||||
}
|
||||
|
||||
// Pull out response body
|
||||
responseBytes, err := io.ReadAll(resp.Body)
|
||||
defer resp.Body.Close()
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("reading response body : %v", err)
|
||||
}
|
||||
if resp.StatusCode < 200 || resp.StatusCode >= 300 {
|
||||
return "", fmt.Errorf("azure exchange AT resp: %v, %v", resp.Status, string(responseBytes))
|
||||
}
|
||||
resultMap := make(map[string]string)
|
||||
err = json.Unmarshal(responseBytes, &resultMap)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("unmarshalling the response: %v", err)
|
||||
}
|
||||
return resultMap["refresh_token"], nil
|
||||
}
|
||||
|
||||
func CheckIsGCRImage(imageTag string) bool {
|
||||
// gcr.io/elated-pottery-310110/golang-inf:2
|
||||
return strings.Contains(imageTag, "gcr.io/")
|
||||
}
|
||||
|
||||
// GetLoginDetailsForGCR return user name + password to use
|
||||
func GetLoginDetailsForGCR(imageTag string) (string, string, error) {
|
||||
msi_endpoint, err := url.Parse(fmt.Sprintf("http://169.254.169.254/computeMetadata/v1/instance/service-accounts/%s/token", gcrDefaultServiceAccountName))
|
||||
if err != nil {
|
||||
return "", "", fmt.Errorf("creating URL : %v", err)
|
||||
}
|
||||
req, err := http.NewRequest("GET", msi_endpoint.String(), nil)
|
||||
if err != nil {
|
||||
return "", "", fmt.Errorf("creating HTTP request : %v", err)
|
||||
}
|
||||
req.Header.Add("Metadata-Flavor", "Google")
|
||||
|
||||
// Call managed services for Azure resources token endpoint
|
||||
resp, err := httpClient.Do(req)
|
||||
if err != nil {
|
||||
return "", "", fmt.Errorf("calling token endpoint : %v", err)
|
||||
}
|
||||
if resp.StatusCode < 200 || resp.StatusCode >= 300 {
|
||||
return "", "", fmt.Errorf("HTTP Status : %v, make sure the '%s' service account is configured for ARMO pod", resp.Status, gcrDefaultServiceAccountName)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
respMap := make(map[string]interface{})
|
||||
if err := json.NewDecoder(resp.Body).Decode(&respMap); err != nil {
|
||||
return "", "", fmt.Errorf("json Decode : %v", err)
|
||||
}
|
||||
return "oauth2accesstoken", fmt.Sprintf("%v", respMap["access_token"]), nil
|
||||
}
|
||||
|
||||
func GetCloudVendorRegistryCredentials(imageTag string) (map[string]types.AuthConfig, error) {
|
||||
secrets := map[string]types.AuthConfig{}
|
||||
var errRes error
|
||||
if CheckIsACRImage(imageTag) {
|
||||
userName, password, err := GetLoginDetailsForAzurCR(imageTag)
|
||||
if err != nil {
|
||||
errRes = fmt.Errorf("failed to GetLoginDetailsForACR(%s): %v", imageTag, err)
|
||||
} else {
|
||||
secrets[imageTag] = types.AuthConfig{
|
||||
Username: userName,
|
||||
Password: password,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if CheckIsECRImage(imageTag) {
|
||||
userName, password, err := GetLoginDetailsForECR(imageTag)
|
||||
if err != nil {
|
||||
errRes = fmt.Errorf("failed to GetLoginDetailsForECR(%s): %v", imageTag, err)
|
||||
} else {
|
||||
secrets[imageTag] = types.AuthConfig{
|
||||
Username: userName,
|
||||
Password: password,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if CheckIsGCRImage(imageTag) {
|
||||
userName, password, err := GetLoginDetailsForGCR(imageTag)
|
||||
if err != nil {
|
||||
errRes = fmt.Errorf("failed to GetLoginDetailsForGCR(%s): %v", imageTag, err)
|
||||
} else {
|
||||
secrets[imageTag] = types.AuthConfig{
|
||||
Username: userName,
|
||||
Password: password,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return secrets, errRes
|
||||
}
|
||||
@@ -1,121 +0,0 @@
|
||||
package k8sinterface
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"k8s.io/client-go/dynamic"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
|
||||
// DO NOT REMOVE - load cloud providers auth
|
||||
_ "k8s.io/client-go/plugin/pkg/client/auth"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client/config"
|
||||
)
|
||||
|
||||
var ConnectedToCluster = true
|
||||
|
||||
// K8SConfig pointer to k8s config
|
||||
var K8SConfig *restclient.Config
|
||||
|
||||
// KubernetesApi -
|
||||
type KubernetesApi struct {
|
||||
KubernetesClient kubernetes.Interface
|
||||
DynamicClient dynamic.Interface
|
||||
Context context.Context
|
||||
}
|
||||
|
||||
// NewKubernetesApi -
|
||||
func NewKubernetesApi() *KubernetesApi {
|
||||
var kubernetesClient *kubernetes.Clientset
|
||||
var err error
|
||||
|
||||
if !IsConnectedToCluster() {
|
||||
fmt.Println(fmt.Errorf("failed to load kubernetes config: no configuration has been provided, try setting KUBECONFIG environment variable"))
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
kubernetesClient, err = kubernetes.NewForConfig(GetK8sConfig())
|
||||
if err != nil {
|
||||
fmt.Printf("Failed to load config file, reason: %s", err.Error())
|
||||
os.Exit(1)
|
||||
}
|
||||
dynamicClient, err := dynamic.NewForConfig(K8SConfig)
|
||||
if err != nil {
|
||||
fmt.Printf("Failed to load config file, reason: %s", err.Error())
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
return &KubernetesApi{
|
||||
KubernetesClient: kubernetesClient,
|
||||
DynamicClient: dynamicClient,
|
||||
Context: context.Background(),
|
||||
}
|
||||
}
|
||||
|
||||
// RunningIncluster whether running in cluster
|
||||
var RunningIncluster bool
|
||||
|
||||
// LoadK8sConfig load config from local file or from cluster
|
||||
func LoadK8sConfig() error {
|
||||
kubeconfig, err := config.GetConfig()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to load kubernetes config: %s", strings.ReplaceAll(err.Error(), "KUBERNETES_MASTER", "KUBECONFIG"))
|
||||
}
|
||||
if _, err := restclient.InClusterConfig(); err == nil {
|
||||
RunningIncluster = true
|
||||
}
|
||||
|
||||
K8SConfig = kubeconfig
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetK8sConfig get config. load if not loaded yet
|
||||
func GetK8sConfig() *restclient.Config {
|
||||
if !IsConnectedToCluster() {
|
||||
return nil
|
||||
}
|
||||
return K8SConfig
|
||||
}
|
||||
|
||||
func IsConnectedToCluster() bool {
|
||||
if K8SConfig == nil {
|
||||
if err := LoadK8sConfig(); err != nil {
|
||||
ConnectedToCluster = false
|
||||
}
|
||||
}
|
||||
return ConnectedToCluster
|
||||
}
|
||||
func GetClusterName() string {
|
||||
if !ConnectedToCluster {
|
||||
return ""
|
||||
}
|
||||
|
||||
kubeConfig := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(clientcmd.NewDefaultClientConfigLoadingRules(), &clientcmd.ConfigOverrides{})
|
||||
config, err := kubeConfig.RawConfig()
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
// TODO - Handle if empty
|
||||
return config.CurrentContext
|
||||
}
|
||||
|
||||
func GetDefaultNamespace() string {
|
||||
defaultNamespace := "default"
|
||||
clientCfg, err := clientcmd.NewDefaultClientConfigLoadingRules().Load()
|
||||
if err != nil {
|
||||
return defaultNamespace
|
||||
}
|
||||
apiContext, ok := clientCfg.Contexts[clientCfg.CurrentContext]
|
||||
if !ok || apiContext == nil {
|
||||
return defaultNamespace
|
||||
}
|
||||
namespace := apiContext.Namespace
|
||||
if apiContext.Namespace == "" {
|
||||
namespace = defaultNamespace
|
||||
}
|
||||
return namespace
|
||||
}
|
||||
@@ -1,34 +0,0 @@
|
||||
package k8sinterface
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/armosec/kubescape/cautils/cautils"
|
||||
)
|
||||
|
||||
func TestGetGroupVersionResource(t *testing.T) {
|
||||
wlid := "wlid://cluster-david-v1/namespace-default/deployment-nginx-deployment"
|
||||
r, err := GetGroupVersionResource(cautils.GetKindFromWlid(wlid))
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
if r.Group != "apps" {
|
||||
t.Errorf("wrong group")
|
||||
}
|
||||
if r.Version != "v1" {
|
||||
t.Errorf("wrong Version")
|
||||
}
|
||||
if r.Resource != "deployments" {
|
||||
t.Errorf("wrong Resource")
|
||||
}
|
||||
|
||||
r2, err := GetGroupVersionResource("NetworkPolicy")
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
if r2.Resource != "networkpolicies" {
|
||||
t.Errorf("wrong Resource")
|
||||
}
|
||||
}
|
||||
@@ -1,145 +0,0 @@
|
||||
package k8sinterface
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/armosec/kubescape/cautils/cautils"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/client-go/dynamic"
|
||||
//
|
||||
// Uncomment to load all auth plugins
|
||||
// _ "k8s.io/client-go/plugin/pkg/client/auth
|
||||
//
|
||||
// Or uncomment to load specific auth plugins
|
||||
// _ "k8s.io/client-go/plugin/pkg/client/auth/azure"
|
||||
// _ "k8s.io/client-go/plugin/pkg/client/auth/gcp"
|
||||
// _ "k8s.io/client-go/plugin/pkg/client/auth/oidc"
|
||||
// _ "k8s.io/client-go/plugin/pkg/client/auth/openstack"
|
||||
)
|
||||
|
||||
func (k8sAPI *KubernetesApi) GetWorkloadByWlid(wlid string) (*Workload, error) {
|
||||
return k8sAPI.GetWorkload(cautils.GetNamespaceFromWlid(wlid), cautils.GetKindFromWlid(wlid), cautils.GetNameFromWlid(wlid))
|
||||
}
|
||||
|
||||
func (k8sAPI *KubernetesApi) GetWorkload(namespace, kind, name string) (*Workload, error) {
|
||||
groupVersionResource, err := GetGroupVersionResource(kind)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
w, err := k8sAPI.ResourceInterface(&groupVersionResource, namespace).Get(k8sAPI.Context, name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to GET resource, kind: '%s', namespace: '%s', name: '%s', reason: %s", kind, namespace, name, err.Error())
|
||||
}
|
||||
return NewWorkloadObj(w.Object), nil
|
||||
}
|
||||
|
||||
func (k8sAPI *KubernetesApi) ListWorkloads(groupVersionResource *schema.GroupVersionResource, namespace string, podLabels, fieldSelector map[string]string) ([]Workload, error) {
|
||||
listOptions := metav1.ListOptions{}
|
||||
if podLabels != nil && len(podLabels) > 0 {
|
||||
set := labels.Set(podLabels)
|
||||
listOptions.LabelSelector = SelectorToString(set)
|
||||
}
|
||||
if fieldSelector != nil && len(fieldSelector) > 0 {
|
||||
set := labels.Set(fieldSelector)
|
||||
listOptions.FieldSelector = SelectorToString(set)
|
||||
}
|
||||
uList, err := k8sAPI.ResourceInterface(groupVersionResource, namespace).List(k8sAPI.Context, listOptions)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to LIST resources, reason: %s", err.Error())
|
||||
}
|
||||
workloads := make([]Workload, len(uList.Items))
|
||||
for i := range uList.Items {
|
||||
workloads[i] = *NewWorkloadObj(uList.Items[i].Object)
|
||||
}
|
||||
return workloads, nil
|
||||
}
|
||||
|
||||
func (k8sAPI *KubernetesApi) DeleteWorkloadByWlid(wlid string) error {
|
||||
groupVersionResource, err := GetGroupVersionResource(cautils.GetKindFromWlid(wlid))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = k8sAPI.ResourceInterface(&groupVersionResource, cautils.GetNamespaceFromWlid(wlid)).Delete(k8sAPI.Context, cautils.GetNameFromWlid(wlid), metav1.DeleteOptions{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to DELETE resource, workloadID: '%s', reason: %s", wlid, err.Error())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (k8sAPI *KubernetesApi) CreateWorkload(workload *Workload) (*Workload, error) {
|
||||
groupVersionResource, err := GetGroupVersionResource(workload.GetKind())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
obj, err := workload.ToUnstructured()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
w, err := k8sAPI.ResourceInterface(&groupVersionResource, workload.GetNamespace()).Create(k8sAPI.Context, obj, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to CREATE resource, workload: '%s', reason: %s", workload.Json(), err.Error())
|
||||
}
|
||||
return NewWorkloadObj(w.Object), nil
|
||||
}
|
||||
|
||||
func (k8sAPI *KubernetesApi) UpdateWorkload(workload *Workload) (*Workload, error) {
|
||||
groupVersionResource, err := GetGroupVersionResource(workload.GetKind())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
obj, err := workload.ToUnstructured()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
w, err := k8sAPI.ResourceInterface(&groupVersionResource, workload.GetNamespace()).Update(k8sAPI.Context, obj, metav1.UpdateOptions{})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to UPDATE resource, workload: '%s', reason: %s", workload.Json(), err.Error())
|
||||
}
|
||||
return NewWorkloadObj(w.Object), nil
|
||||
}
|
||||
|
||||
func (k8sAPI *KubernetesApi) GetNamespace(ns string) (*Workload, error) {
|
||||
groupVersionResource, err := GetGroupVersionResource("namespace")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
w, err := k8sAPI.DynamicClient.Resource(groupVersionResource).Get(k8sAPI.Context, ns, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get namespace: '%s', reason: %s", ns, err.Error())
|
||||
}
|
||||
return NewWorkloadObj(w.Object), nil
|
||||
}
|
||||
|
||||
func (k8sAPI *KubernetesApi) ResourceInterface(resource *schema.GroupVersionResource, namespace string) dynamic.ResourceInterface {
|
||||
if IsNamespaceScope(resource.Group, resource.Resource) {
|
||||
return k8sAPI.DynamicClient.Resource(*resource).Namespace(namespace)
|
||||
}
|
||||
return k8sAPI.DynamicClient.Resource(*resource)
|
||||
}
|
||||
|
||||
func (k8sAPI *KubernetesApi) CalculateWorkloadParentRecursive(workload *Workload) (string, string, error) {
|
||||
ownerReferences, err := workload.GetOwnerReferences() // OwnerReferences in workload
|
||||
if err != nil {
|
||||
return workload.GetKind(), workload.GetName(), err
|
||||
}
|
||||
if len(ownerReferences) == 0 {
|
||||
return workload.GetKind(), workload.GetName(), nil // parent found
|
||||
}
|
||||
ownerReference := ownerReferences[0]
|
||||
|
||||
parentWorkload, err := k8sAPI.GetWorkload(workload.GetNamespace(), ownerReference.Kind, ownerReference.Name)
|
||||
if err != nil {
|
||||
if strings.Contains(err.Error(), "not found in resourceMap") { // if parent is RCD
|
||||
return workload.GetKind(), workload.GetName(), nil // parent found
|
||||
}
|
||||
return workload.GetKind(), workload.GetName(), err
|
||||
}
|
||||
return k8sAPI.CalculateWorkloadParentRecursive(parentWorkload)
|
||||
}
|
||||
@@ -1,43 +0,0 @@
|
||||
package k8sinterface
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
dynamicfake "k8s.io/client-go/dynamic/fake"
|
||||
kubernetesfake "k8s.io/client-go/kubernetes/fake"
|
||||
//
|
||||
// metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
// Uncomment to load all auth plugins
|
||||
// _ "k8s.io/client-go/plugin/pkg/client/auth
|
||||
//
|
||||
// Or uncomment to load specific auth plugins
|
||||
// _ "k8s.io/client-go/plugin/pkg/client/auth/azure"
|
||||
// _ "k8s.io/client-go/plugin/pkg/client/auth/gcp"
|
||||
// _ "k8s.io/client-go/plugin/pkg/client/auth/oidc"
|
||||
// _ "k8s.io/client-go/plugin/pkg/client/auth/openstack"
|
||||
)
|
||||
|
||||
// NewKubernetesApi -
|
||||
func NewKubernetesApiMock() *KubernetesApi {
|
||||
|
||||
return &KubernetesApi{
|
||||
KubernetesClient: kubernetesfake.NewSimpleClientset(),
|
||||
DynamicClient: dynamicfake.NewSimpleDynamicClient(&runtime.Scheme{}),
|
||||
Context: context.Background(),
|
||||
}
|
||||
}
|
||||
|
||||
// func TestListDynamic(t *testing.T) {
|
||||
// k8s := NewKubernetesApi()
|
||||
// resource := schema.GroupVersionResource{Group: "", Version: "v1", Resource: "pods"}
|
||||
// clientResource, err := k8s.DynamicClient.Resource(resource).Namespace("default").List(k8s.Context, metav1.ListOptions{})
|
||||
// if err != nil {
|
||||
// t.Errorf("err: %v", err)
|
||||
// } else {
|
||||
// bla, _ := json.Marshal(clientResource)
|
||||
// // t.Errorf("BearerToken: %v", *K8SConfig)
|
||||
// // os.WriteFile("bla.json", bla, 777)
|
||||
// t.Errorf("clientResource: %s", string(bla))
|
||||
// }
|
||||
// }
|
||||
@@ -1,66 +0,0 @@
|
||||
package k8sinterface
|
||||
|
||||
import (
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
)
|
||||
|
||||
//
|
||||
// Uncomment to load all auth plugins
|
||||
// _ "k8s.io/client-go/plugin/pkg/client/auth
|
||||
//
|
||||
// Or uncomment to load specific auth plugins
|
||||
// _ "k8s.io/client-go/plugin/pkg/client/auth/azure"
|
||||
// _ "k8s.io/client-go/plugin/pkg/client/auth/gcp"
|
||||
// _ "k8s.io/client-go/plugin/pkg/client/auth/oidc"
|
||||
// _ "k8s.io/client-go/plugin/pkg/client/auth/openstack"
|
||||
|
||||
func ConvertUnstructuredSliceToMap(unstructuredSlice []unstructured.Unstructured) []map[string]interface{} {
|
||||
converted := make([]map[string]interface{}, len(unstructuredSlice))
|
||||
for i := range unstructuredSlice {
|
||||
converted[i] = unstructuredSlice[i].Object
|
||||
}
|
||||
return converted
|
||||
}
|
||||
|
||||
func FilterOutOwneredResources(result []unstructured.Unstructured) []unstructured.Unstructured {
|
||||
response := []unstructured.Unstructured{}
|
||||
recognizedOwners := []string{"Deployment", "ReplicaSet", "DaemonSet", "StatefulSet", "Job", "CronJob"}
|
||||
for i := range result {
|
||||
ownerReferences := result[i].GetOwnerReferences()
|
||||
if len(ownerReferences) == 0 {
|
||||
response = append(response, result[i])
|
||||
} else if !IsStringInSlice(recognizedOwners, ownerReferences[0].Kind) {
|
||||
response = append(response, result[i])
|
||||
}
|
||||
}
|
||||
return response
|
||||
}
|
||||
|
||||
func IsStringInSlice(slice []string, val string) bool {
|
||||
for _, item := range slice {
|
||||
if item == val {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// String returns all labels listed as a human readable string.
|
||||
// Conveniently, exactly the format that ParseSelector takes.
|
||||
func SelectorToString(ls labels.Set) string {
|
||||
selector := make([]string, 0, len(ls))
|
||||
for key, value := range ls {
|
||||
if value != "" {
|
||||
selector = append(selector, key+"="+value)
|
||||
} else {
|
||||
selector = append(selector, key)
|
||||
}
|
||||
}
|
||||
// Sort for determinism.
|
||||
sort.StringSlice(selector).Sort()
|
||||
return strings.Join(selector, ",")
|
||||
}
|
||||
@@ -1,10 +0,0 @@
|
||||
package k8sinterface
|
||||
|
||||
import "testing"
|
||||
|
||||
func TestConvertUnstructuredSliceToMap(t *testing.T) {
|
||||
converted := ConvertUnstructuredSliceToMap(V1KubeSystemNamespaceMock().Items)
|
||||
if len(converted) == 0 { // != 7
|
||||
t.Errorf("len(converted) == 0")
|
||||
}
|
||||
}
|
||||
@@ -1,71 +0,0 @@
|
||||
package k8sinterface
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/armosec/kubescape/cautils/cautils"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
)
|
||||
|
||||
func IsAttached(labels map[string]string) *bool {
|
||||
return IsLabel(labels, cautils.ArmoAttach)
|
||||
}
|
||||
|
||||
func IsAgentCompatibleLabel(labels map[string]string) *bool {
|
||||
return IsLabel(labels, cautils.ArmoCompatibleLabel)
|
||||
}
|
||||
func IsAgentCompatibleAnnotation(annotations map[string]string) *bool {
|
||||
return IsLabel(annotations, cautils.ArmoCompatibleAnnotation)
|
||||
}
|
||||
func SetAgentCompatibleLabel(labels map[string]string, val bool) {
|
||||
SetLabel(labels, cautils.ArmoCompatibleLabel, val)
|
||||
}
|
||||
func SetAgentCompatibleAnnotation(annotations map[string]string, val bool) {
|
||||
SetLabel(annotations, cautils.ArmoCompatibleAnnotation, val)
|
||||
}
|
||||
func IsLabel(labels map[string]string, key string) *bool {
|
||||
if labels == nil || len(labels) == 0 {
|
||||
return nil
|
||||
}
|
||||
var k bool
|
||||
if l, ok := labels[key]; ok {
|
||||
if l == "true" {
|
||||
k = true
|
||||
} else if l == "false" {
|
||||
k = false
|
||||
}
|
||||
return &k
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func SetLabel(labels map[string]string, key string, val bool) {
|
||||
if labels == nil {
|
||||
return
|
||||
}
|
||||
v := ""
|
||||
if val {
|
||||
v = "true"
|
||||
} else {
|
||||
v = "false"
|
||||
}
|
||||
labels[key] = v
|
||||
}
|
||||
func (k8sAPI *KubernetesApi) ListAttachedPods(namespace string) ([]corev1.Pod, error) {
|
||||
return k8sAPI.ListPods(namespace, map[string]string{cautils.ArmoAttach: cautils.BoolToString(true)})
|
||||
}
|
||||
|
||||
func (k8sAPI *KubernetesApi) ListPods(namespace string, podLabels map[string]string) ([]corev1.Pod, error) {
|
||||
listOptions := metav1.ListOptions{}
|
||||
if podLabels != nil && len(podLabels) > 0 {
|
||||
set := labels.Set(podLabels)
|
||||
listOptions.LabelSelector = set.AsSelector().String()
|
||||
}
|
||||
pods, err := k8sAPI.KubernetesClient.CoreV1().Pods(namespace).List(context.Background(), listOptions)
|
||||
if err != nil {
|
||||
return []corev1.Pod{}, err
|
||||
}
|
||||
return pods.Items, nil
|
||||
}
|
||||
File diff suppressed because one or more lines are too long
@@ -1,142 +0,0 @@
|
||||
package k8sinterface
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
)
|
||||
|
||||
const ValueNotFound = -1
|
||||
|
||||
// https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.21/#-strong-api-groups-strong-
|
||||
var ResourceGroupMapping = map[string]string{
|
||||
"services": "/v1",
|
||||
"pods": "/v1",
|
||||
"replicationcontrollers": "/v1",
|
||||
"podtemplates": "/v1",
|
||||
"namespaces": "/v1",
|
||||
"nodes": "/v1",
|
||||
"configmaps": "/v1",
|
||||
"secrets": "/v1",
|
||||
"serviceaccounts": "/v1",
|
||||
"persistentvolumeclaims": "/v1",
|
||||
"limitranges": "/v1",
|
||||
"resourcequotas": "/v1",
|
||||
"daemonsets": "apps/v1",
|
||||
"deployments": "apps/v1",
|
||||
"replicasets": "apps/v1",
|
||||
"statefulsets": "apps/v1",
|
||||
"controllerrevisions": "apps/v1",
|
||||
"jobs": "batch/v1",
|
||||
"cronjobs": "batch/v1beta1",
|
||||
"horizontalpodautoscalers": "autoscaling/v1",
|
||||
"ingresses": "extensions/v1beta1",
|
||||
"networkpolicies": "networking.k8s.io/v1",
|
||||
"clusterroles": "rbac.authorization.k8s.io/v1",
|
||||
"clusterrolebindings": "rbac.authorization.k8s.io/v1",
|
||||
"roles": "rbac.authorization.k8s.io/v1",
|
||||
"rolebindings": "rbac.authorization.k8s.io/v1",
|
||||
"mutatingwebhookconfigurations": "admissionregistration.k8s.io/v1",
|
||||
"validatingwebhookconfigurations": "admissionregistration.k8s.io/v1",
|
||||
}
|
||||
|
||||
var GroupsClusterScope = []string{}
|
||||
var ResourceClusterScope = []string{"nodes", "namespaces", "clusterroles", "clusterrolebindings"}
|
||||
|
||||
func GetGroupVersionResource(resource string) (schema.GroupVersionResource, error) {
|
||||
resource = updateResourceKind(resource)
|
||||
if r, ok := ResourceGroupMapping[resource]; ok {
|
||||
gv := strings.Split(r, "/")
|
||||
return schema.GroupVersionResource{Group: gv[0], Version: gv[1], Resource: resource}, nil
|
||||
}
|
||||
return schema.GroupVersionResource{}, fmt.Errorf("resource '%s' not found in resourceMap", resource)
|
||||
}
|
||||
|
||||
func IsNamespaceScope(apiGroup, resource string) bool {
|
||||
return StringInSlice(GroupsClusterScope, apiGroup) == ValueNotFound &&
|
||||
StringInSlice(ResourceClusterScope, resource) == ValueNotFound
|
||||
}
|
||||
|
||||
func StringInSlice(strSlice []string, str string) int {
|
||||
for i := range strSlice {
|
||||
if strSlice[i] == str {
|
||||
return i
|
||||
}
|
||||
}
|
||||
return ValueNotFound
|
||||
}
|
||||
|
||||
func JoinResourceTriplets(group, version, resource string) string {
|
||||
return fmt.Sprintf("%s/%s/%s", group, version, resource)
|
||||
}
|
||||
func GetResourceTriplets(group, version, resource string) []string {
|
||||
resourceTriplets := []string{}
|
||||
if resource == "" {
|
||||
// load full map
|
||||
for k, v := range ResourceGroupMapping {
|
||||
g := strings.Split(v, "/")
|
||||
resourceTriplets = append(resourceTriplets, JoinResourceTriplets(g[0], g[1], k))
|
||||
}
|
||||
} else if version == "" {
|
||||
// load by resource
|
||||
if v, ok := ResourceGroupMapping[resource]; ok {
|
||||
g := strings.Split(v, "/")
|
||||
if group == "" {
|
||||
group = g[0]
|
||||
}
|
||||
resourceTriplets = append(resourceTriplets, JoinResourceTriplets(group, g[1], resource))
|
||||
} else {
|
||||
glog.Errorf("Resource '%s' unknown", resource)
|
||||
}
|
||||
} else if group == "" {
|
||||
// load by resource and version
|
||||
if v, ok := ResourceGroupMapping[resource]; ok {
|
||||
g := strings.Split(v, "/")
|
||||
resourceTriplets = append(resourceTriplets, JoinResourceTriplets(g[0], version, resource))
|
||||
} else {
|
||||
glog.Errorf("Resource '%s' unknown", resource)
|
||||
}
|
||||
} else {
|
||||
resourceTriplets = append(resourceTriplets, JoinResourceTriplets(group, version, resource))
|
||||
}
|
||||
return resourceTriplets
|
||||
}
|
||||
func ResourceGroupToString(group, version, resource string) []string {
|
||||
if group == "*" {
|
||||
group = ""
|
||||
}
|
||||
if version == "*" {
|
||||
version = ""
|
||||
}
|
||||
if resource == "*" {
|
||||
resource = ""
|
||||
}
|
||||
resource = updateResourceKind(resource)
|
||||
return GetResourceTriplets(group, version, resource)
|
||||
}
|
||||
|
||||
func StringToResourceGroup(str string) (string, string, string) {
|
||||
splitted := strings.Split(str, "/")
|
||||
for i := range splitted {
|
||||
if splitted[i] == "*" {
|
||||
splitted[i] = ""
|
||||
}
|
||||
}
|
||||
return splitted[0], splitted[1], splitted[2]
|
||||
}
|
||||
|
||||
func updateResourceKind(resource string) string {
|
||||
resource = strings.ToLower(resource)
|
||||
|
||||
if resource != "" && !strings.HasSuffix(resource, "s") {
|
||||
if strings.HasSuffix(resource, "y") {
|
||||
return fmt.Sprintf("%sies", strings.TrimSuffix(resource, "y")) // e.g. NetworkPolicy -> networkpolicies
|
||||
} else {
|
||||
return fmt.Sprintf("%ss", resource) // add 's' at the end of a resource
|
||||
}
|
||||
}
|
||||
return resource
|
||||
|
||||
}
|
||||
@@ -1,22 +0,0 @@
|
||||
package k8sinterface
|
||||
|
||||
import "testing"
|
||||
|
||||
func TestResourceGroupToString(t *testing.T) {
|
||||
allResources := ResourceGroupToString("*", "*", "*")
|
||||
if len(allResources) != len(ResourceGroupMapping) {
|
||||
t.Errorf("Expected len: %d, received: %d", len(ResourceGroupMapping), len(allResources))
|
||||
}
|
||||
pod := ResourceGroupToString("*", "*", "Pod")
|
||||
if len(pod) == 0 || pod[0] != "/v1/pods" {
|
||||
t.Errorf("pod: %v", pod)
|
||||
}
|
||||
deployments := ResourceGroupToString("*", "*", "Deployment")
|
||||
if len(deployments) == 0 || deployments[0] != "apps/v1/deployments" {
|
||||
t.Errorf("deployments: %v", deployments)
|
||||
}
|
||||
cronjobs := ResourceGroupToString("*", "*", "cronjobs")
|
||||
if len(cronjobs) == 0 || cronjobs[0] != "batch/v1beta1/cronjobs" {
|
||||
t.Errorf("cronjobs: %v", cronjobs)
|
||||
}
|
||||
}
|
||||
@@ -1,161 +0,0 @@
|
||||
package k8sinterface
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
|
||||
"github.com/armosec/kubescape/cautils/apis"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
)
|
||||
|
||||
type IWorkload interface {
|
||||
IBasicWorkload
|
||||
|
||||
// Convert
|
||||
ToUnstructured() (*unstructured.Unstructured, error)
|
||||
ToString() string
|
||||
Json() string // DEPRECATED
|
||||
|
||||
// GET
|
||||
GetWlid() string
|
||||
GetJobID() *apis.JobTracking
|
||||
GetVersion() string
|
||||
GetGroup() string
|
||||
|
||||
// SET
|
||||
SetWlid(string)
|
||||
SetInject()
|
||||
SetIgnore()
|
||||
SetUpdateTime()
|
||||
SetJobID(apis.JobTracking)
|
||||
SetCompatible()
|
||||
SetIncompatible()
|
||||
SetReplaceheaders()
|
||||
|
||||
// EXIST
|
||||
IsIgnore() bool
|
||||
IsInject() bool
|
||||
IsAttached() bool
|
||||
IsCompatible() bool
|
||||
IsIncompatible() bool
|
||||
|
||||
// REMOVE
|
||||
RemoveWlid()
|
||||
RemoveSecretData()
|
||||
RemoveInject()
|
||||
RemoveIgnore()
|
||||
RemoveUpdateTime()
|
||||
RemoveJobID()
|
||||
RemoveCompatible()
|
||||
RemoveArmoMetadata()
|
||||
RemoveArmoLabels()
|
||||
RemoveArmoAnnotations()
|
||||
}
|
||||
type IBasicWorkload interface {
|
||||
|
||||
// Set
|
||||
SetKind(string)
|
||||
SetWorkload(map[string]interface{})
|
||||
SetLabel(key, value string)
|
||||
SetAnnotation(key, value string)
|
||||
SetNamespace(string)
|
||||
SetName(string)
|
||||
|
||||
// Get
|
||||
GetNamespace() string
|
||||
GetName() string
|
||||
GetGenerateName() string
|
||||
GetApiVersion() string
|
||||
GetKind() string
|
||||
GetInnerAnnotation(string) (string, bool)
|
||||
GetPodAnnotation(string) (string, bool)
|
||||
GetAnnotation(string) (string, bool)
|
||||
GetLabel(string) (string, bool)
|
||||
GetAnnotations() map[string]string
|
||||
GetInnerAnnotations() map[string]string
|
||||
GetPodAnnotations() map[string]string
|
||||
GetLabels() map[string]string
|
||||
GetInnerLabels() map[string]string
|
||||
GetPodLabels() map[string]string
|
||||
GetVolumes() ([]corev1.Volume, error)
|
||||
GetReplicas() int
|
||||
GetContainers() ([]corev1.Container, error)
|
||||
GetInitContainers() ([]corev1.Container, error)
|
||||
GetOwnerReferences() ([]metav1.OwnerReference, error)
|
||||
GetImagePullSecret() ([]corev1.LocalObjectReference, error)
|
||||
GetServiceAccountName() string
|
||||
GetSelector() (*metav1.LabelSelector, error)
|
||||
GetResourceVersion() string
|
||||
GetUID() string
|
||||
GetPodSpec() (*corev1.PodSpec, error)
|
||||
|
||||
GetWorkload() map[string]interface{}
|
||||
|
||||
// REMOVE
|
||||
RemoveLabel(string)
|
||||
RemoveAnnotation(string)
|
||||
RemovePodStatus()
|
||||
RemoveResourceVersion()
|
||||
}
|
||||
|
||||
type Workload struct {
|
||||
workload map[string]interface{}
|
||||
}
|
||||
|
||||
func NewWorkload(bWorkload []byte) (*Workload, error) {
|
||||
workload := make(map[string]interface{})
|
||||
if bWorkload != nil {
|
||||
if err := json.Unmarshal(bWorkload, &workload); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return &Workload{
|
||||
workload: workload,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func NewWorkloadObj(workload map[string]interface{}) *Workload {
|
||||
return &Workload{
|
||||
workload: workload,
|
||||
}
|
||||
}
|
||||
|
||||
func (w *Workload) Json() string {
|
||||
return w.ToString()
|
||||
}
|
||||
func (w *Workload) ToString() string {
|
||||
if w.GetWorkload() == nil {
|
||||
return ""
|
||||
}
|
||||
bWorkload, err := json.Marshal(w.GetWorkload())
|
||||
if err != nil {
|
||||
return err.Error()
|
||||
}
|
||||
return string(bWorkload)
|
||||
}
|
||||
|
||||
func (workload *Workload) DeepCopy(w map[string]interface{}) {
|
||||
workload.workload = make(map[string]interface{})
|
||||
byt, _ := json.Marshal(w)
|
||||
json.Unmarshal(byt, &workload.workload)
|
||||
}
|
||||
|
||||
func (w *Workload) ToUnstructured() (*unstructured.Unstructured, error) {
|
||||
obj := &unstructured.Unstructured{}
|
||||
if w.workload == nil {
|
||||
return obj, nil
|
||||
}
|
||||
bWorkload, err := json.Marshal(w.workload)
|
||||
if err != nil {
|
||||
return obj, err
|
||||
}
|
||||
if err := json.Unmarshal(bWorkload, obj); err != nil {
|
||||
return obj, err
|
||||
|
||||
}
|
||||
|
||||
return obj, nil
|
||||
}
|
||||
@@ -1,642 +0,0 @@
|
||||
package k8sinterface
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/armosec/kubescape/cautils/apis"
|
||||
"github.com/armosec/kubescape/cautils/cautils"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
// ======================================= DELETE ========================================
|
||||
|
||||
func (w *Workload) RemoveInject() {
|
||||
w.RemovePodLabel(cautils.CAInject) // DEPRECATED
|
||||
w.RemovePodLabel(cautils.CAAttachLabel) // DEPRECATED
|
||||
w.RemovePodLabel(cautils.ArmoAttach)
|
||||
|
||||
w.RemoveLabel(cautils.CAInject) // DEPRECATED
|
||||
w.RemoveLabel(cautils.CAAttachLabel) // DEPRECATED
|
||||
w.RemoveLabel(cautils.ArmoAttach)
|
||||
}
|
||||
|
||||
func (w *Workload) RemoveIgnore() {
|
||||
w.RemovePodLabel(cautils.CAIgnore) // DEPRECATED
|
||||
w.RemovePodLabel(cautils.ArmoAttach)
|
||||
|
||||
w.RemoveLabel(cautils.CAIgnore) // DEPRECATED
|
||||
w.RemoveLabel(cautils.ArmoAttach)
|
||||
}
|
||||
|
||||
func (w *Workload) RemoveWlid() {
|
||||
w.RemovePodAnnotation(cautils.CAWlid) // DEPRECATED
|
||||
w.RemovePodAnnotation(cautils.ArmoWlid)
|
||||
|
||||
w.RemoveAnnotation(cautils.CAWlid) // DEPRECATED
|
||||
w.RemoveAnnotation(cautils.ArmoWlid)
|
||||
}
|
||||
|
||||
func (w *Workload) RemoveCompatible() {
|
||||
w.RemovePodAnnotation(cautils.ArmoCompatibleAnnotation)
|
||||
}
|
||||
func (w *Workload) RemoveJobID() {
|
||||
w.RemovePodAnnotation(cautils.ArmoJobIDPath)
|
||||
w.RemovePodAnnotation(cautils.ArmoJobParentPath)
|
||||
w.RemovePodAnnotation(cautils.ArmoJobActionPath)
|
||||
|
||||
w.RemoveAnnotation(cautils.ArmoJobIDPath)
|
||||
w.RemoveAnnotation(cautils.ArmoJobParentPath)
|
||||
w.RemoveAnnotation(cautils.ArmoJobActionPath)
|
||||
}
|
||||
func (w *Workload) RemoveArmoMetadata() {
|
||||
w.RemoveArmoLabels()
|
||||
w.RemoveArmoAnnotations()
|
||||
}
|
||||
|
||||
func (w *Workload) RemoveArmoAnnotations() {
|
||||
l := w.GetAnnotations()
|
||||
if l != nil {
|
||||
for k := range l {
|
||||
if strings.HasPrefix(k, cautils.ArmoPrefix) {
|
||||
w.RemoveAnnotation(k)
|
||||
}
|
||||
if strings.HasPrefix(k, cautils.CAPrefix) { // DEPRECATED
|
||||
w.RemoveAnnotation(k)
|
||||
}
|
||||
}
|
||||
}
|
||||
lp := w.GetPodAnnotations()
|
||||
if lp != nil {
|
||||
for k := range lp {
|
||||
if strings.HasPrefix(k, cautils.ArmoPrefix) {
|
||||
w.RemovePodAnnotation(k)
|
||||
}
|
||||
if strings.HasPrefix(k, cautils.CAPrefix) { // DEPRECATED
|
||||
w.RemovePodAnnotation(k)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
func (w *Workload) RemoveArmoLabels() {
|
||||
l := w.GetLabels()
|
||||
if l != nil {
|
||||
for k := range l {
|
||||
if strings.HasPrefix(k, cautils.ArmoPrefix) {
|
||||
w.RemoveLabel(k)
|
||||
}
|
||||
if strings.HasPrefix(k, cautils.CAPrefix) { // DEPRECATED
|
||||
w.RemoveLabel(k)
|
||||
}
|
||||
}
|
||||
}
|
||||
lp := w.GetPodLabels()
|
||||
if lp != nil {
|
||||
for k := range lp {
|
||||
if strings.HasPrefix(k, cautils.ArmoPrefix) {
|
||||
w.RemovePodLabel(k)
|
||||
}
|
||||
if strings.HasPrefix(k, cautils.CAPrefix) { // DEPRECATED
|
||||
w.RemovePodLabel(k)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
func (w *Workload) RemoveUpdateTime() {
|
||||
|
||||
// remove from pod
|
||||
w.RemovePodAnnotation(cautils.CAUpdate) // DEPRECATED
|
||||
w.RemovePodAnnotation(cautils.ArmoUpdate)
|
||||
|
||||
// remove from workload
|
||||
w.RemoveAnnotation(cautils.CAUpdate) // DEPRECATED
|
||||
w.RemoveAnnotation(cautils.ArmoUpdate)
|
||||
}
|
||||
func (w *Workload) RemoveSecretData() {
|
||||
w.RemoveAnnotation("kubectl.kubernetes.io/last-applied-configuration")
|
||||
delete(w.workload, "data")
|
||||
}
|
||||
|
||||
func (w *Workload) RemovePodStatus() {
|
||||
delete(w.workload, "status")
|
||||
}
|
||||
|
||||
func (w *Workload) RemoveResourceVersion() {
|
||||
if _, ok := w.workload["metadata"]; !ok {
|
||||
return
|
||||
}
|
||||
meta, _ := w.workload["metadata"].(map[string]interface{})
|
||||
delete(meta, "resourceVersion")
|
||||
}
|
||||
|
||||
func (w *Workload) RemoveLabel(key string) {
|
||||
w.RemoveMetadata([]string{"metadata"}, "labels", key)
|
||||
}
|
||||
|
||||
func (w *Workload) RemoveAnnotation(key string) {
|
||||
w.RemoveMetadata([]string{"metadata"}, "annotations", key)
|
||||
}
|
||||
|
||||
func (w *Workload) RemovePodAnnotation(key string) {
|
||||
w.RemoveMetadata(PodMetadata(w.GetKind()), "annotations", key)
|
||||
}
|
||||
|
||||
func (w *Workload) RemovePodLabel(key string) {
|
||||
w.RemoveMetadata(PodMetadata(w.GetKind()), "labels", key)
|
||||
}
|
||||
|
||||
func (w *Workload) RemoveMetadata(scope []string, metadata, key string) {
|
||||
|
||||
workload := w.workload
|
||||
for i := range scope {
|
||||
if _, ok := workload[scope[i]]; !ok {
|
||||
return
|
||||
}
|
||||
workload, _ = workload[scope[i]].(map[string]interface{})
|
||||
}
|
||||
|
||||
if _, ok := workload[metadata]; !ok {
|
||||
return
|
||||
}
|
||||
|
||||
labels, _ := workload[metadata].(map[string]interface{})
|
||||
delete(labels, key)
|
||||
|
||||
}
|
||||
|
||||
// ========================================= SET =========================================
|
||||
|
||||
func (w *Workload) SetWorkload(workload map[string]interface{}) {
|
||||
w.workload = workload
|
||||
}
|
||||
|
||||
func (w *Workload) SetKind(kind string) {
|
||||
w.workload["kind"] = kind
|
||||
}
|
||||
|
||||
func (w *Workload) SetInject() {
|
||||
w.SetPodLabel(cautils.ArmoAttach, cautils.BoolToString(true))
|
||||
}
|
||||
|
||||
func (w *Workload) SetJobID(jobTracking apis.JobTracking) {
|
||||
w.SetPodAnnotation(cautils.ArmoJobIDPath, jobTracking.JobID)
|
||||
w.SetPodAnnotation(cautils.ArmoJobParentPath, jobTracking.ParentID)
|
||||
w.SetPodAnnotation(cautils.ArmoJobActionPath, fmt.Sprintf("%d", jobTracking.LastActionNumber))
|
||||
}
|
||||
|
||||
func (w *Workload) SetIgnore() {
|
||||
w.SetPodLabel(cautils.ArmoAttach, cautils.BoolToString(false))
|
||||
}
|
||||
|
||||
func (w *Workload) SetCompatible() {
|
||||
w.SetPodAnnotation(cautils.ArmoCompatibleAnnotation, cautils.BoolToString(true))
|
||||
}
|
||||
|
||||
func (w *Workload) SetIncompatible() {
|
||||
w.SetPodAnnotation(cautils.ArmoCompatibleAnnotation, cautils.BoolToString(false))
|
||||
}
|
||||
|
||||
func (w *Workload) SetReplaceheaders() {
|
||||
w.SetPodAnnotation(cautils.ArmoReplaceheaders, cautils.BoolToString(true))
|
||||
}
|
||||
|
||||
func (w *Workload) SetWlid(wlid string) {
|
||||
w.SetPodAnnotation(cautils.ArmoWlid, wlid)
|
||||
}
|
||||
|
||||
func (w *Workload) SetUpdateTime() {
|
||||
w.SetPodAnnotation(cautils.ArmoUpdate, string(time.Now().UTC().Format("02-01-2006 15:04:05")))
|
||||
}
|
||||
|
||||
func (w *Workload) SetNamespace(namespace string) {
|
||||
w.SetMetadata([]string{"metadata"}, "namespace", namespace)
|
||||
}
|
||||
|
||||
func (w *Workload) SetName(name string) {
|
||||
w.SetMetadata([]string{"metadata"}, "name", name)
|
||||
}
|
||||
|
||||
func (w *Workload) SetLabel(key, value string) {
|
||||
w.SetMetadata([]string{"metadata", "labels"}, key, value)
|
||||
}
|
||||
|
||||
func (w *Workload) SetPodLabel(key, value string) {
|
||||
w.SetMetadata(append(PodMetadata(w.GetKind()), "labels"), key, value)
|
||||
}
|
||||
func (w *Workload) SetAnnotation(key, value string) {
|
||||
w.SetMetadata([]string{"metadata", "annotations"}, key, value)
|
||||
}
|
||||
func (w *Workload) SetPodAnnotation(key, value string) {
|
||||
w.SetMetadata(append(PodMetadata(w.GetKind()), "annotations"), key, value)
|
||||
}
|
||||
|
||||
func (w *Workload) SetMetadata(scope []string, key string, val interface{}) {
|
||||
workload := w.workload
|
||||
for i := range scope {
|
||||
if _, ok := workload[scope[i]]; !ok {
|
||||
workload[scope[i]] = make(map[string]interface{})
|
||||
}
|
||||
workload, _ = workload[scope[i]].(map[string]interface{})
|
||||
}
|
||||
|
||||
workload[key] = val
|
||||
}
|
||||
|
||||
// ========================================= GET =========================================
|
||||
func (w *Workload) GetWorkload() map[string]interface{} {
|
||||
return w.workload
|
||||
}
|
||||
func (w *Workload) GetNamespace() string {
|
||||
if v, ok := InspectWorkload(w.workload, "metadata", "namespace"); ok {
|
||||
return v.(string)
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (w *Workload) GetName() string {
|
||||
if v, ok := InspectWorkload(w.workload, "metadata", "name"); ok {
|
||||
return v.(string)
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (w *Workload) GetApiVersion() string {
|
||||
if v, ok := InspectWorkload(w.workload, "apiVersion"); ok {
|
||||
return v.(string)
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (w *Workload) GetVersion() string {
|
||||
apiVersion := w.GetApiVersion()
|
||||
splitted := strings.Split(apiVersion, "/")
|
||||
if len(splitted) == 1 {
|
||||
return splitted[0]
|
||||
} else if len(splitted) == 2 {
|
||||
return splitted[1]
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (w *Workload) GetGroup() string {
|
||||
apiVersion := w.GetApiVersion()
|
||||
splitted := strings.Split(apiVersion, "/")
|
||||
if len(splitted) == 2 {
|
||||
return splitted[0]
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (w *Workload) GetGenerateName() string {
|
||||
if v, ok := InspectWorkload(w.workload, "metadata", "generateName"); ok {
|
||||
return v.(string)
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (w *Workload) GetReplicas() int {
|
||||
if v, ok := InspectWorkload(w.workload, "spec", "replicas"); ok {
|
||||
replicas, isok := v.(float64)
|
||||
if isok {
|
||||
return int(replicas)
|
||||
}
|
||||
}
|
||||
return 1
|
||||
}
|
||||
|
||||
func (w *Workload) GetKind() string {
|
||||
if v, ok := InspectWorkload(w.workload, "kind"); ok {
|
||||
return v.(string)
|
||||
}
|
||||
return ""
|
||||
}
|
||||
func (w *Workload) GetSelector() (*metav1.LabelSelector, error) {
|
||||
selector := &metav1.LabelSelector{}
|
||||
if v, ok := InspectWorkload(w.workload, "spec", "selector", "matchLabels"); ok && v != nil {
|
||||
b, err := json.Marshal(v)
|
||||
if err != nil {
|
||||
return selector, err
|
||||
}
|
||||
if err := json.Unmarshal(b, selector); err != nil {
|
||||
return selector, err
|
||||
}
|
||||
return selector, nil
|
||||
}
|
||||
return selector, nil
|
||||
}
|
||||
|
||||
func (w *Workload) GetAnnotation(annotation string) (string, bool) {
|
||||
if v, ok := InspectWorkload(w.workload, "metadata", "annotations", annotation); ok {
|
||||
return v.(string), ok
|
||||
}
|
||||
return "", false
|
||||
}
|
||||
func (w *Workload) GetLabel(label string) (string, bool) {
|
||||
if v, ok := InspectWorkload(w.workload, "metadata", "labels", label); ok {
|
||||
return v.(string), ok
|
||||
}
|
||||
return "", false
|
||||
}
|
||||
|
||||
func (w *Workload) GetPodLabel(label string) (string, bool) {
|
||||
if v, ok := InspectWorkload(w.workload, append(PodMetadata(w.GetKind()), "labels", label)...); ok && v != nil {
|
||||
return v.(string), ok
|
||||
}
|
||||
return "", false
|
||||
}
|
||||
|
||||
func (w *Workload) GetLabels() map[string]string {
|
||||
if v, ok := InspectWorkload(w.workload, "metadata", "labels"); ok && v != nil {
|
||||
labels := make(map[string]string)
|
||||
for k, i := range v.(map[string]interface{}) {
|
||||
labels[k] = i.(string)
|
||||
}
|
||||
return labels
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetInnerLabels - DEPRECATED
|
||||
func (w *Workload) GetInnerLabels() map[string]string {
|
||||
return w.GetPodLabels()
|
||||
}
|
||||
|
||||
func (w *Workload) GetPodLabels() map[string]string {
|
||||
if v, ok := InspectWorkload(w.workload, append(PodMetadata(w.GetKind()), "labels")...); ok && v != nil {
|
||||
labels := make(map[string]string)
|
||||
for k, i := range v.(map[string]interface{}) {
|
||||
labels[k] = i.(string)
|
||||
}
|
||||
return labels
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetInnerAnnotations - DEPRECATED
|
||||
func (w *Workload) GetInnerAnnotations() map[string]string {
|
||||
return w.GetPodAnnotations()
|
||||
}
|
||||
|
||||
// GetPodAnnotations
|
||||
func (w *Workload) GetPodAnnotations() map[string]string {
|
||||
if v, ok := InspectWorkload(w.workload, append(PodMetadata(w.GetKind()), "annotations")...); ok && v != nil {
|
||||
annotations := make(map[string]string)
|
||||
for k, i := range v.(map[string]interface{}) {
|
||||
annotations[k] = fmt.Sprintf("%v", i)
|
||||
}
|
||||
return annotations
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetInnerAnnotation DEPRECATED
|
||||
func (w *Workload) GetInnerAnnotation(annotation string) (string, bool) {
|
||||
return w.GetPodAnnotation(annotation)
|
||||
}
|
||||
|
||||
func (w *Workload) GetPodAnnotation(annotation string) (string, bool) {
|
||||
if v, ok := InspectWorkload(w.workload, append(PodMetadata(w.GetKind()), "annotations", annotation)...); ok && v != nil {
|
||||
return v.(string), ok
|
||||
}
|
||||
return "", false
|
||||
}
|
||||
|
||||
func (w *Workload) GetAnnotations() map[string]string {
|
||||
if v, ok := InspectWorkload(w.workload, "metadata", "annotations"); ok && v != nil {
|
||||
annotations := make(map[string]string)
|
||||
for k, i := range v.(map[string]interface{}) {
|
||||
annotations[k] = fmt.Sprintf("%v", i)
|
||||
}
|
||||
return annotations
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetVolumes -
|
||||
func (w *Workload) GetVolumes() ([]corev1.Volume, error) {
|
||||
volumes := []corev1.Volume{}
|
||||
|
||||
interVolumes, _ := InspectWorkload(w.workload, append(PodSpec(w.GetKind()), "volumes")...)
|
||||
if interVolumes == nil {
|
||||
return volumes, nil
|
||||
}
|
||||
volumesBytes, err := json.Marshal(interVolumes)
|
||||
if err != nil {
|
||||
return volumes, err
|
||||
}
|
||||
err = json.Unmarshal(volumesBytes, &volumes)
|
||||
|
||||
return volumes, err
|
||||
}
|
||||
|
||||
func (w *Workload) GetServiceAccountName() string {
|
||||
|
||||
if v, ok := InspectWorkload(w.workload, append(PodSpec(w.GetKind()), "serviceAccountName")...); ok && v != nil {
|
||||
return v.(string)
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (w *Workload) GetPodSpec() (*corev1.PodSpec, error) {
|
||||
podSpec := &corev1.PodSpec{}
|
||||
podSepcRaw, _ := InspectWorkload(w.workload, PodSpec(w.GetKind())...)
|
||||
if podSepcRaw == nil {
|
||||
return podSpec, fmt.Errorf("no PodSpec for workload: %v", w)
|
||||
}
|
||||
b, err := json.Marshal(podSepcRaw)
|
||||
if err != nil {
|
||||
return podSpec, err
|
||||
}
|
||||
err = json.Unmarshal(b, podSpec)
|
||||
|
||||
return podSpec, err
|
||||
}
|
||||
|
||||
func (w *Workload) GetImagePullSecret() ([]corev1.LocalObjectReference, error) {
|
||||
imgPullSecrets := []corev1.LocalObjectReference{}
|
||||
|
||||
iImgPullSecrets, _ := InspectWorkload(w.workload, append(PodSpec(w.GetKind()), "imagePullSecrets")...)
|
||||
b, err := json.Marshal(iImgPullSecrets)
|
||||
if err != nil {
|
||||
return imgPullSecrets, err
|
||||
}
|
||||
err = json.Unmarshal(b, &imgPullSecrets)
|
||||
|
||||
return imgPullSecrets, err
|
||||
}
|
||||
|
||||
// GetContainers -
|
||||
func (w *Workload) GetContainers() ([]corev1.Container, error) {
|
||||
containers := []corev1.Container{}
|
||||
|
||||
interContainers, _ := InspectWorkload(w.workload, append(PodSpec(w.GetKind()), "containers")...)
|
||||
if interContainers == nil {
|
||||
return containers, nil
|
||||
}
|
||||
containersBytes, err := json.Marshal(interContainers)
|
||||
if err != nil {
|
||||
return containers, err
|
||||
}
|
||||
err = json.Unmarshal(containersBytes, &containers)
|
||||
|
||||
return containers, err
|
||||
}
|
||||
|
||||
// GetInitContainers -
|
||||
func (w *Workload) GetInitContainers() ([]corev1.Container, error) {
|
||||
containers := []corev1.Container{}
|
||||
|
||||
interContainers, _ := InspectWorkload(w.workload, append(PodSpec(w.GetKind()), "initContainers")...)
|
||||
if interContainers == nil {
|
||||
return containers, nil
|
||||
}
|
||||
containersBytes, err := json.Marshal(interContainers)
|
||||
if err != nil {
|
||||
return containers, err
|
||||
}
|
||||
err = json.Unmarshal(containersBytes, &containers)
|
||||
|
||||
return containers, err
|
||||
}
|
||||
|
||||
// GetOwnerReferences -
|
||||
func (w *Workload) GetOwnerReferences() ([]metav1.OwnerReference, error) {
|
||||
ownerReferences := []metav1.OwnerReference{}
|
||||
interOwnerReferences, ok := InspectWorkload(w.workload, "metadata", "ownerReferences")
|
||||
if !ok {
|
||||
return ownerReferences, nil
|
||||
}
|
||||
|
||||
ownerReferencesBytes, err := json.Marshal(interOwnerReferences)
|
||||
if err != nil {
|
||||
return ownerReferences, err
|
||||
}
|
||||
err = json.Unmarshal(ownerReferencesBytes, &ownerReferences)
|
||||
if err != nil {
|
||||
return ownerReferences, err
|
||||
|
||||
}
|
||||
return ownerReferences, nil
|
||||
}
|
||||
func (w *Workload) GetResourceVersion() string {
|
||||
if v, ok := InspectWorkload(w.workload, "metadata", "resourceVersion"); ok {
|
||||
return v.(string)
|
||||
}
|
||||
return ""
|
||||
}
|
||||
func (w *Workload) GetUID() string {
|
||||
if v, ok := InspectWorkload(w.workload, "metadata", "uid"); ok {
|
||||
return v.(string)
|
||||
}
|
||||
return ""
|
||||
}
|
||||
func (w *Workload) GetWlid() string {
|
||||
if wlid, ok := w.GetAnnotation(cautils.ArmoWlid); ok {
|
||||
return wlid
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (w *Workload) GetJobID() *apis.JobTracking {
|
||||
jobTracking := apis.JobTracking{}
|
||||
if job, ok := w.GetPodAnnotation(cautils.ArmoJobIDPath); ok {
|
||||
jobTracking.JobID = job
|
||||
}
|
||||
if parent, ok := w.GetPodAnnotation(cautils.ArmoJobParentPath); ok {
|
||||
jobTracking.ParentID = parent
|
||||
}
|
||||
if action, ok := w.GetPodAnnotation(cautils.ArmoJobActionPath); ok {
|
||||
if i, err := strconv.Atoi(action); err == nil {
|
||||
jobTracking.LastActionNumber = i
|
||||
}
|
||||
}
|
||||
if jobTracking.LastActionNumber == 0 { // start the counter at 1
|
||||
jobTracking.LastActionNumber = 1
|
||||
}
|
||||
return &jobTracking
|
||||
}
|
||||
|
||||
// func (w *Workload) GetJobID() string {
|
||||
// if status, ok := w.GetAnnotation(cautils.ArmoJobID); ok {
|
||||
// return status
|
||||
// }
|
||||
// return ""
|
||||
// }
|
||||
|
||||
// ========================================= IS =========================================
|
||||
|
||||
func (w *Workload) IsInject() bool {
|
||||
return w.IsAttached()
|
||||
}
|
||||
|
||||
func (w *Workload) IsIgnore() bool {
|
||||
if attach := cautils.IsAttached(w.GetPodLabels()); attach != nil {
|
||||
return !(*attach)
|
||||
}
|
||||
if attach := cautils.IsAttached(w.GetLabels()); attach != nil {
|
||||
return !(*attach)
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (w *Workload) IsCompatible() bool {
|
||||
if c, ok := w.GetPodAnnotation(cautils.ArmoCompatibleAnnotation); ok {
|
||||
return cautils.StringToBool(c)
|
||||
|
||||
}
|
||||
if c, ok := w.GetAnnotation(cautils.ArmoCompatibleAnnotation); ok {
|
||||
return cautils.StringToBool(c)
|
||||
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (w *Workload) IsIncompatible() bool {
|
||||
if c, ok := w.GetPodAnnotation(cautils.ArmoCompatibleAnnotation); ok {
|
||||
return !cautils.StringToBool(c)
|
||||
}
|
||||
if c, ok := w.GetAnnotation(cautils.ArmoCompatibleAnnotation); ok {
|
||||
return !cautils.StringToBool(c)
|
||||
}
|
||||
return false
|
||||
}
|
||||
func (w *Workload) IsAttached() bool {
|
||||
if attach := cautils.IsAttached(w.GetPodLabels()); attach != nil {
|
||||
return *attach
|
||||
}
|
||||
if attach := cautils.IsAttached(w.GetLabels()); attach != nil {
|
||||
return *attach
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (w *Workload) IsReplaceheaders() bool {
|
||||
if c, ok := w.GetPodAnnotation(cautils.ArmoReplaceheaders); ok {
|
||||
return cautils.StringToBool(c)
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// ======================================= UTILS =========================================
|
||||
|
||||
// InspectWorkload -
|
||||
func InspectWorkload(workload interface{}, scopes ...string) (val interface{}, k bool) {
|
||||
|
||||
val, k = nil, false
|
||||
if len(scopes) == 0 {
|
||||
if workload != nil {
|
||||
return workload, true
|
||||
}
|
||||
return nil, false
|
||||
}
|
||||
if data, ok := workload.(map[string]interface{}); ok {
|
||||
val, k = InspectWorkload(data[scopes[0]], scopes[1:]...)
|
||||
}
|
||||
return val, k
|
||||
|
||||
}
|
||||
@@ -1,155 +0,0 @@
|
||||
package k8sinterface
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
// ========================================= IS =========================================
|
||||
|
||||
func TestLabels(t *testing.T) {
|
||||
w := `{"apiVersion":"apps/v1","kind":"Deployment","metadata":{"annotations":{"deployment.kubernetes.io/revision":"1"},"creationTimestamp":"2021-05-03T13:10:32Z","generation":1,"labels":{"app":"demoservice-server","cyberarmor.inject":"true"},"managedFields":[{"apiVersion":"apps/v1","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:labels":{".":{},"f:app":{},"f:cyberarmor.inject":{}}},"f:spec":{"f:progressDeadlineSeconds":{},"f:replicas":{},"f:revisionHistoryLimit":{},"f:selector":{},"f:strategy":{"f:rollingUpdate":{".":{},"f:maxSurge":{},"f:maxUnavailable":{}},"f:type":{}},"f:template":{"f:metadata":{"f:labels":{".":{},"f:app":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"demoservice\"}":{".":{},"f:env":{".":{},"k:{\"name\":\"ARMO_TEST_NAME\"}":{".":{},"f:name":{},"f:value":{}},"k:{\"name\":\"CAA_ENABLE_CRASH_REPORTER\"}":{".":{},"f:name":{},"f:value":{}},"k:{\"name\":\"DEMO_FOLDERS\"}":{".":{},"f:name":{},"f:value":{}},"k:{\"name\":\"SERVER_PORT\"}":{".":{},"f:name":{},"f:value":{}},"k:{\"name\":\"SLEEP_DURATION\"}":{".":{},"f:name":{},"f:value":{}}},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:ports":{".":{},"k:{\"containerPort\":8089,\"protocol\":\"TCP\"}":{".":{},"f:containerPort":{},"f:protocol":{}}},"f:resources":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}}},"manager":"OpenAPI-Generator","operation":"Update","time":"2021-05-03T13:10:32Z"},{"apiVersion":"apps/v1","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{".":{},"f:deployment.kubernetes.io/revision":{}}},"f:status":{"f:availableReplicas":{},"f:conditions":{".":{},"k:{\"type\":\"Available\"}":{".":{},"f:lastTransitionTime":{},"f:lastUpdateTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Progressing\"}":{".":{},"f:lastTransitionTime":{},"f:lastUpdateTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}},"f:observedGeneration":{},"f:readyReplicas":{},"f:replicas":{},"f:updatedReplicas":{}}},"manager":"kube-controller-manager","operation":"Update","time":"2021-05-03T13:52:58Z"}],"name":"demoservice-server","namespace":"default","resourceVersion":"1016043","uid":"e9e8a3e9-6cb4-4301-ace1-2c0cef3bd61e"},"spec":{"progressDeadlineSeconds":600,"replicas":1,"revisionHistoryLimit":10,"selector":{"matchLabels":{"app":"demoservice-server"}},"strategy":{"rollingUpdate":{"maxSurge":"25%","maxUnavailable":"25%"},"type":"RollingUpdate"},"template":{"metadata":{"creationTimestamp":null,"labels":{"app":"demoservice-server"}},"spec":{"containers":[{"env":[{"name":"SERVER_PORT","value":"8089"},{"name":"SLEEP_DURATION","value":"1"},{"name":"DEMO_FOLDERS","value":"/app"},{"name":"ARMO_TEST_NAME","value":"auto_attach_deployment"},{"name":"CAA_ENABLE_CRASH_REPORTER","value":"1"}],"image":"quay.io/armosec/demoservice:v25","imagePullPolicy":"IfNotPresent","name":"demoservice","ports":[{"containerPort":8089,"protocol":"TCP"}],"resources":{},"terminationMessagePath":"/dev/termination-log","terminationMessagePolicy":"File"}],"dnsPolicy":"ClusterFirst","restartPolicy":"Always","schedulerName":"default-scheduler","securityContext":{},"terminationGracePeriodSeconds":30}}},"status":{"availableReplicas":1,"conditions":[{"lastTransitionTime":"2021-05-03T13:10:32Z","lastUpdateTime":"2021-05-03T13:10:37Z","message":"ReplicaSet \"demoservice-server-7d478b6998\" has successfully progressed.","reason":"NewReplicaSetAvailable","status":"True","type":"Progressing"},{"lastTransitionTime":"2021-05-03T13:52:58Z","lastUpdateTime":"2021-05-03T13:52:58Z","message":"Deployment has minimum availability.","reason":"MinimumReplicasAvailable","status":"True","type":"Available"}],"observedGeneration":1,"readyReplicas":1,"replicas":1,"updatedReplicas":1}}`
|
||||
workload, err := NewWorkload([]byte(w))
|
||||
if err != nil {
|
||||
t.Errorf(err.Error())
|
||||
}
|
||||
if workload.GetKind() != "Deployment" {
|
||||
t.Errorf("wrong kind")
|
||||
}
|
||||
if workload.GetNamespace() != "default" {
|
||||
t.Errorf("wrong namespace")
|
||||
}
|
||||
if workload.GetName() != "demoservice-server" {
|
||||
t.Errorf("wrong name")
|
||||
}
|
||||
if !workload.IsInject() {
|
||||
t.Errorf("expect to find inject label")
|
||||
}
|
||||
if workload.IsIgnore() {
|
||||
t.Errorf("expect to find ignore label")
|
||||
}
|
||||
}
|
||||
|
||||
func TestSetNamespace(t *testing.T) {
|
||||
w := `{"apiVersion":"apps/v1","kind":"Deployment","metadata":{"name":"demoservice-server"},"spec":{"replicas":1,"selector":{"matchLabels":{"app":"demoservice-server"}},"template":{"metadata":{"creationTimestamp":null,"labels":{"app":"demoservice-server"}},"spec":{"containers":[{"env":[{"name":"SERVER_PORT","value":"8089"},{"name":"SLEEP_DURATION","value":"1"},{"name":"DEMO_FOLDERS","value":"/app"},{"name":"ARMO_TEST_NAME","value":"auto_attach_deployment"},{"name":"CAA_ENABLE_CRASH_REPORTER","value":"1"}],"image":"quay.io/armosec/demoservice:v25","imagePullPolicy":"IfNotPresent","name":"demoservice","ports":[{"containerPort":8089,"protocol":"TCP"}],"resources":{},"terminationMessagePath":"/dev/termination-log","terminationMessagePolicy":"File"}],"dnsPolicy":"ClusterFirst","restartPolicy":"Always","schedulerName":"default-scheduler","securityContext":{},"terminationGracePeriodSeconds":30}}}}`
|
||||
workload, err := NewWorkload([]byte(w))
|
||||
if err != nil {
|
||||
t.Errorf(err.Error())
|
||||
}
|
||||
workload.SetNamespace("default")
|
||||
if workload.GetNamespace() != "default" {
|
||||
t.Errorf("wrong namespace")
|
||||
}
|
||||
}
|
||||
func TestSetLabels(t *testing.T) {
|
||||
w := `{"apiVersion":"apps/v1","kind":"Deployment","metadata":{"annotations":{"deployment.kubernetes.io/revision":"1"},"creationTimestamp":"2021-05-03T13:10:32Z","generation":1,"managedFields":[{"apiVersion":"apps/v1","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:labels":{".":{},"f:app":{},"f:cyberarmor.inject":{}}},"f:spec":{"f:progressDeadlineSeconds":{},"f:replicas":{},"f:revisionHistoryLimit":{},"f:selector":{},"f:strategy":{"f:rollingUpdate":{".":{},"f:maxSurge":{},"f:maxUnavailable":{}},"f:type":{}},"f:template":{"f:metadata":{"f:labels":{".":{},"f:app":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"demoservice\"}":{".":{},"f:env":{".":{},"k:{\"name\":\"ARMO_TEST_NAME\"}":{".":{},"f:name":{},"f:value":{}},"k:{\"name\":\"CAA_ENABLE_CRASH_REPORTER\"}":{".":{},"f:name":{},"f:value":{}},"k:{\"name\":\"DEMO_FOLDERS\"}":{".":{},"f:name":{},"f:value":{}},"k:{\"name\":\"SERVER_PORT\"}":{".":{},"f:name":{},"f:value":{}},"k:{\"name\":\"SLEEP_DURATION\"}":{".":{},"f:name":{},"f:value":{}}},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:ports":{".":{},"k:{\"containerPort\":8089,\"protocol\":\"TCP\"}":{".":{},"f:containerPort":{},"f:protocol":{}}},"f:resources":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}}},"manager":"OpenAPI-Generator","operation":"Update","time":"2021-05-03T13:10:32Z"},{"apiVersion":"apps/v1","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{".":{},"f:deployment.kubernetes.io/revision":{}}},"f:status":{"f:availableReplicas":{},"f:conditions":{".":{},"k:{\"type\":\"Available\"}":{".":{},"f:lastTransitionTime":{},"f:lastUpdateTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Progressing\"}":{".":{},"f:lastTransitionTime":{},"f:lastUpdateTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}},"f:observedGeneration":{},"f:readyReplicas":{},"f:replicas":{},"f:updatedReplicas":{}}},"manager":"kube-controller-manager","operation":"Update","time":"2021-05-03T13:52:58Z"}],"name":"demoservice-server","namespace":"default","resourceVersion":"1016043","uid":"e9e8a3e9-6cb4-4301-ace1-2c0cef3bd61e"},"spec":{"progressDeadlineSeconds":600,"replicas":1,"revisionHistoryLimit":10,"selector":{"matchLabels":{"app":"demoservice-server"}},"strategy":{"rollingUpdate":{"maxSurge":"25%","maxUnavailable":"25%"},"type":"RollingUpdate"},"template":{"metadata":{"creationTimestamp":null,"labels":{"app":"demoservice-server"}},"spec":{"containers":[{"env":[{"name":"SERVER_PORT","value":"8089"},{"name":"SLEEP_DURATION","value":"1"},{"name":"DEMO_FOLDERS","value":"/app"},{"name":"ARMO_TEST_NAME","value":"auto_attach_deployment"},{"name":"CAA_ENABLE_CRASH_REPORTER","value":"1"}],"image":"quay.io/armosec/demoservice:v25","imagePullPolicy":"IfNotPresent","name":"demoservice","ports":[{"containerPort":8089,"protocol":"TCP"}],"resources":{},"terminationMessagePath":"/dev/termination-log","terminationMessagePolicy":"File"}],"dnsPolicy":"ClusterFirst","restartPolicy":"Always","schedulerName":"default-scheduler","securityContext":{},"terminationGracePeriodSeconds":30}}},"status":{"availableReplicas":1,"conditions":[{"lastTransitionTime":"2021-05-03T13:10:32Z","lastUpdateTime":"2021-05-03T13:10:37Z","message":"ReplicaSet \"demoservice-server-7d478b6998\" has successfully progressed.","reason":"NewReplicaSetAvailable","status":"True","type":"Progressing"},{"lastTransitionTime":"2021-05-03T13:52:58Z","lastUpdateTime":"2021-05-03T13:52:58Z","message":"Deployment has minimum availability.","reason":"MinimumReplicasAvailable","status":"True","type":"Available"}],"observedGeneration":1,"readyReplicas":1,"replicas":1,"updatedReplicas":1}}`
|
||||
workload, err := NewWorkload([]byte(w))
|
||||
if err != nil {
|
||||
t.Errorf(err.Error())
|
||||
}
|
||||
workload.SetLabel("bla", "daa")
|
||||
v, ok := workload.GetLabel("bla")
|
||||
if !ok || v != "daa" {
|
||||
t.Errorf("expect to find label")
|
||||
}
|
||||
workload.RemoveLabel("bla")
|
||||
v2, ok2 := workload.GetLabel("bla")
|
||||
if ok2 || v2 == "daa" {
|
||||
t.Errorf("label not deleted")
|
||||
}
|
||||
}
|
||||
|
||||
func TestSetAnnotations(t *testing.T) {
|
||||
w := `{"apiVersion":"apps/v1","kind":"Deployment","metadata":{"annotations":{"deployment.kubernetes.io/revision":"1"},"creationTimestamp":"2021-05-03T13:10:32Z","generation":1,"managedFields":[{"apiVersion":"apps/v1","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:labels":{".":{},"f:app":{},"f:cyberarmor.inject":{}}},"f:spec":{"f:progressDeadlineSeconds":{},"f:replicas":{},"f:revisionHistoryLimit":{},"f:selector":{},"f:strategy":{"f:rollingUpdate":{".":{},"f:maxSurge":{},"f:maxUnavailable":{}},"f:type":{}},"f:template":{"f:metadata":{"f:labels":{".":{},"f:app":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"demoservice\"}":{".":{},"f:env":{".":{},"k:{\"name\":\"ARMO_TEST_NAME\"}":{".":{},"f:name":{},"f:value":{}},"k:{\"name\":\"CAA_ENABLE_CRASH_REPORTER\"}":{".":{},"f:name":{},"f:value":{}},"k:{\"name\":\"DEMO_FOLDERS\"}":{".":{},"f:name":{},"f:value":{}},"k:{\"name\":\"SERVER_PORT\"}":{".":{},"f:name":{},"f:value":{}},"k:{\"name\":\"SLEEP_DURATION\"}":{".":{},"f:name":{},"f:value":{}}},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:ports":{".":{},"k:{\"containerPort\":8089,\"protocol\":\"TCP\"}":{".":{},"f:containerPort":{},"f:protocol":{}}},"f:resources":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}}},"manager":"OpenAPI-Generator","operation":"Update","time":"2021-05-03T13:10:32Z"},{"apiVersion":"apps/v1","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{".":{},"f:deployment.kubernetes.io/revision":{}}},"f:status":{"f:availableReplicas":{},"f:conditions":{".":{},"k:{\"type\":\"Available\"}":{".":{},"f:lastTransitionTime":{},"f:lastUpdateTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Progressing\"}":{".":{},"f:lastTransitionTime":{},"f:lastUpdateTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}},"f:observedGeneration":{},"f:readyReplicas":{},"f:replicas":{},"f:updatedReplicas":{}}},"manager":"kube-controller-manager","operation":"Update","time":"2021-05-03T13:52:58Z"}],"name":"demoservice-server","namespace":"default","resourceVersion":"1016043","uid":"e9e8a3e9-6cb4-4301-ace1-2c0cef3bd61e"},"spec":{"progressDeadlineSeconds":600,"replicas":1,"revisionHistoryLimit":10,"selector":{"matchLabels":{"app":"demoservice-server"}},"strategy":{"rollingUpdate":{"maxSurge":"25%","maxUnavailable":"25%"},"type":"RollingUpdate"},"template":{"metadata":{"creationTimestamp":null,"labels":{"app":"demoservice-server"}},"spec":{"containers":[{"env":[{"name":"SERVER_PORT","value":"8089"},{"name":"SLEEP_DURATION","value":"1"},{"name":"DEMO_FOLDERS","value":"/app"},{"name":"ARMO_TEST_NAME","value":"auto_attach_deployment"},{"name":"CAA_ENABLE_CRASH_REPORTER","value":"1"}],"image":"quay.io/armosec/demoservice:v25","imagePullPolicy":"IfNotPresent","name":"demoservice","ports":[{"containerPort":8089,"protocol":"TCP"}],"resources":{},"terminationMessagePath":"/dev/termination-log","terminationMessagePolicy":"File"}],"dnsPolicy":"ClusterFirst","restartPolicy":"Always","schedulerName":"default-scheduler","securityContext":{},"terminationGracePeriodSeconds":30}}},"status":{"availableReplicas":1,"conditions":[{"lastTransitionTime":"2021-05-03T13:10:32Z","lastUpdateTime":"2021-05-03T13:10:37Z","message":"ReplicaSet \"demoservice-server-7d478b6998\" has successfully progressed.","reason":"NewReplicaSetAvailable","status":"True","type":"Progressing"},{"lastTransitionTime":"2021-05-03T13:52:58Z","lastUpdateTime":"2021-05-03T13:52:58Z","message":"Deployment has minimum availability.","reason":"MinimumReplicasAvailable","status":"True","type":"Available"}],"observedGeneration":1,"readyReplicas":1,"replicas":1,"updatedReplicas":1}}`
|
||||
workload, err := NewWorkload([]byte(w))
|
||||
if err != nil {
|
||||
t.Errorf(err.Error())
|
||||
}
|
||||
workload.SetAnnotation("bla", "daa")
|
||||
v, ok := workload.GetAnnotation("bla")
|
||||
if !ok || v != "daa" {
|
||||
t.Errorf("expect to find annotation")
|
||||
}
|
||||
workload.RemoveAnnotation("bla")
|
||||
v2, ok2 := workload.GetAnnotation("bla")
|
||||
if ok2 || v2 == "daa" {
|
||||
t.Errorf("annotation not deleted")
|
||||
}
|
||||
}
|
||||
func TestSetPodLabels(t *testing.T) {
|
||||
w := `{"apiVersion":"apps/v1","kind":"Deployment","metadata":{"annotations":{"deployment.kubernetes.io/revision":"1"},"creationTimestamp":"2021-05-03T13:10:32Z","generation":1,"managedFields":[{"apiVersion":"apps/v1","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:labels":{".":{},"f:app":{},"f:cyberarmor.inject":{}}},"f:spec":{"f:progressDeadlineSeconds":{},"f:replicas":{},"f:revisionHistoryLimit":{},"f:selector":{},"f:strategy":{"f:rollingUpdate":{".":{},"f:maxSurge":{},"f:maxUnavailable":{}},"f:type":{}},"f:template":{"f:metadata":{"f:labels":{".":{},"f:app":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"demoservice\"}":{".":{},"f:env":{".":{},"k:{\"name\":\"ARMO_TEST_NAME\"}":{".":{},"f:name":{},"f:value":{}},"k:{\"name\":\"CAA_ENABLE_CRASH_REPORTER\"}":{".":{},"f:name":{},"f:value":{}},"k:{\"name\":\"DEMO_FOLDERS\"}":{".":{},"f:name":{},"f:value":{}},"k:{\"name\":\"SERVER_PORT\"}":{".":{},"f:name":{},"f:value":{}},"k:{\"name\":\"SLEEP_DURATION\"}":{".":{},"f:name":{},"f:value":{}}},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:ports":{".":{},"k:{\"containerPort\":8089,\"protocol\":\"TCP\"}":{".":{},"f:containerPort":{},"f:protocol":{}}},"f:resources":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}}},"manager":"OpenAPI-Generator","operation":"Update","time":"2021-05-03T13:10:32Z"},{"apiVersion":"apps/v1","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{".":{},"f:deployment.kubernetes.io/revision":{}}},"f:status":{"f:availableReplicas":{},"f:conditions":{".":{},"k:{\"type\":\"Available\"}":{".":{},"f:lastTransitionTime":{},"f:lastUpdateTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Progressing\"}":{".":{},"f:lastTransitionTime":{},"f:lastUpdateTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}},"f:observedGeneration":{},"f:readyReplicas":{},"f:replicas":{},"f:updatedReplicas":{}}},"manager":"kube-controller-manager","operation":"Update","time":"2021-05-03T13:52:58Z"}],"name":"demoservice-server","namespace":"default","resourceVersion":"1016043","uid":"e9e8a3e9-6cb4-4301-ace1-2c0cef3bd61e"},"spec":{"progressDeadlineSeconds":600,"replicas":1,"revisionHistoryLimit":10,"selector":{"matchLabels":{"app":"demoservice-server"}},"strategy":{"rollingUpdate":{"maxSurge":"25%","maxUnavailable":"25%"},"type":"RollingUpdate"},"template":{"metadata":{"creationTimestamp":null,"labels":{"app":"demoservice-server"}},"spec":{"containers":[{"env":[{"name":"SERVER_PORT","value":"8089"},{"name":"SLEEP_DURATION","value":"1"},{"name":"DEMO_FOLDERS","value":"/app"},{"name":"ARMO_TEST_NAME","value":"auto_attach_deployment"},{"name":"CAA_ENABLE_CRASH_REPORTER","value":"1"}],"image":"quay.io/armosec/demoservice:v25","imagePullPolicy":"IfNotPresent","name":"demoservice","ports":[{"containerPort":8089,"protocol":"TCP"}],"resources":{},"terminationMessagePath":"/dev/termination-log","terminationMessagePolicy":"File"}],"dnsPolicy":"ClusterFirst","restartPolicy":"Always","schedulerName":"default-scheduler","securityContext":{},"terminationGracePeriodSeconds":30}}},"status":{"availableReplicas":1,"conditions":[{"lastTransitionTime":"2021-05-03T13:10:32Z","lastUpdateTime":"2021-05-03T13:10:37Z","message":"ReplicaSet \"demoservice-server-7d478b6998\" has successfully progressed.","reason":"NewReplicaSetAvailable","status":"True","type":"Progressing"},{"lastTransitionTime":"2021-05-03T13:52:58Z","lastUpdateTime":"2021-05-03T13:52:58Z","message":"Deployment has minimum availability.","reason":"MinimumReplicasAvailable","status":"True","type":"Available"}],"observedGeneration":1,"readyReplicas":1,"replicas":1,"updatedReplicas":1}}`
|
||||
workload, err := NewWorkload([]byte(w))
|
||||
if err != nil {
|
||||
t.Errorf(err.Error())
|
||||
}
|
||||
workload.SetPodLabel("bla", "daa")
|
||||
v, ok := workload.GetPodLabel("bla")
|
||||
if !ok || v != "daa" {
|
||||
t.Errorf("expect to find label")
|
||||
}
|
||||
workload.RemovePodLabel("bla")
|
||||
v2, ok2 := workload.GetPodLabel("bla")
|
||||
if ok2 || v2 == "daa" {
|
||||
t.Errorf("label not deleted")
|
||||
}
|
||||
}
|
||||
func TestRemoveArmo(t *testing.T) {
|
||||
w := `{"apiVersion":"apps/v1","kind":"Deployment","metadata":{"annotations":{"deployment.kubernetes.io/revision":"1"},"creationTimestamp":"2021-05-03T13:10:32Z","generation":1,"managedFields":[{"apiVersion":"apps/v1","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:labels":{".":{},"f:app":{},"f:cyberarmor.inject":{}}},"f:spec":{"f:progressDeadlineSeconds":{},"f:replicas":{},"f:revisionHistoryLimit":{},"f:selector":{},"f:strategy":{"f:rollingUpdate":{".":{},"f:maxSurge":{},"f:maxUnavailable":{}},"f:type":{}},"f:template":{"f:metadata":{"f:labels":{".":{},"f:app":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"demoservice\"}":{".":{},"f:env":{".":{},"k:{\"name\":\"ARMO_TEST_NAME\"}":{".":{},"f:name":{},"f:value":{}},"k:{\"name\":\"CAA_ENABLE_CRASH_REPORTER\"}":{".":{},"f:name":{},"f:value":{}},"k:{\"name\":\"DEMO_FOLDERS\"}":{".":{},"f:name":{},"f:value":{}},"k:{\"name\":\"SERVER_PORT\"}":{".":{},"f:name":{},"f:value":{}},"k:{\"name\":\"SLEEP_DURATION\"}":{".":{},"f:name":{},"f:value":{}}},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:ports":{".":{},"k:{\"containerPort\":8089,\"protocol\":\"TCP\"}":{".":{},"f:containerPort":{},"f:protocol":{}}},"f:resources":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}}},"manager":"OpenAPI-Generator","operation":"Update","time":"2021-05-03T13:10:32Z"},{"apiVersion":"apps/v1","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{".":{},"f:deployment.kubernetes.io/revision":{}}},"f:status":{"f:availableReplicas":{},"f:conditions":{".":{},"k:{\"type\":\"Available\"}":{".":{},"f:lastTransitionTime":{},"f:lastUpdateTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Progressing\"}":{".":{},"f:lastTransitionTime":{},"f:lastUpdateTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}},"f:observedGeneration":{},"f:readyReplicas":{},"f:replicas":{},"f:updatedReplicas":{}}},"manager":"kube-controller-manager","operation":"Update","time":"2021-05-03T13:52:58Z"}],"name":"demoservice-server","namespace":"default","resourceVersion":"1016043","uid":"e9e8a3e9-6cb4-4301-ace1-2c0cef3bd61e"},"spec":{"progressDeadlineSeconds":600,"replicas":1,"revisionHistoryLimit":10,"selector":{"matchLabels":{"app":"demoservice-server"}},"strategy":{"rollingUpdate":{"maxSurge":"25%","maxUnavailable":"25%"},"type":"RollingUpdate"},"template":{"metadata":{"creationTimestamp":null,"labels":{"app":"demoservice-server", "armo.attach": "true"}},"spec":{"containers":[{"env":[{"name":"SERVER_PORT","value":"8089"},{"name":"SLEEP_DURATION","value":"1"},{"name":"DEMO_FOLDERS","value":"/app"},{"name":"ARMO_TEST_NAME","value":"auto_attach_deployment"},{"name":"CAA_ENABLE_CRASH_REPORTER","value":"1"}],"image":"quay.io/armosec/demoservice:v25","imagePullPolicy":"IfNotPresent","name":"demoservice","ports":[{"containerPort":8089,"protocol":"TCP"}],"resources":{},"terminationMessagePath":"/dev/termination-log","terminationMessagePolicy":"File"}],"dnsPolicy":"ClusterFirst","restartPolicy":"Always","schedulerName":"default-scheduler","securityContext":{},"terminationGracePeriodSeconds":30}}},"status":{"availableReplicas":1,"conditions":[{"lastTransitionTime":"2021-05-03T13:10:32Z","lastUpdateTime":"2021-05-03T13:10:37Z","message":"ReplicaSet \"demoservice-server-7d478b6998\" has successfully progressed.","reason":"NewReplicaSetAvailable","status":"True","type":"Progressing"},{"lastTransitionTime":"2021-05-03T13:52:58Z","lastUpdateTime":"2021-05-03T13:52:58Z","message":"Deployment has minimum availability.","reason":"MinimumReplicasAvailable","status":"True","type":"Available"}],"observedGeneration":1,"readyReplicas":1,"replicas":1,"updatedReplicas":1}}`
|
||||
workload, err := NewWorkload([]byte(w))
|
||||
if err != nil {
|
||||
t.Errorf(err.Error())
|
||||
}
|
||||
if !workload.IsAttached() {
|
||||
t.Errorf("expect to be attached")
|
||||
}
|
||||
workload.RemoveArmoMetadata()
|
||||
if workload.IsAttached() {
|
||||
t.Errorf("expect to be clear")
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestSetWlid(t *testing.T) {
|
||||
w := `{"apiVersion":"apps/v1","kind":"Deployment","metadata":{"annotations":{"deployment.kubernetes.io/revision":"1"},"creationTimestamp":"2021-05-03T13:10:32Z","generation":1,"managedFields":[{"apiVersion":"apps/v1","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:labels":{".":{},"f:app":{},"f:cyberarmor.inject":{}}},"f:spec":{"f:progressDeadlineSeconds":{},"f:replicas":{},"f:revisionHistoryLimit":{},"f:selector":{},"f:strategy":{"f:rollingUpdate":{".":{},"f:maxSurge":{},"f:maxUnavailable":{}},"f:type":{}},"f:template":{"f:metadata":{"f:labels":{".":{},"f:app":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"demoservice\"}":{".":{},"f:env":{".":{},"k:{\"name\":\"ARMO_TEST_NAME\"}":{".":{},"f:name":{},"f:value":{}},"k:{\"name\":\"CAA_ENABLE_CRASH_REPORTER\"}":{".":{},"f:name":{},"f:value":{}},"k:{\"name\":\"DEMO_FOLDERS\"}":{".":{},"f:name":{},"f:value":{}},"k:{\"name\":\"SERVER_PORT\"}":{".":{},"f:name":{},"f:value":{}},"k:{\"name\":\"SLEEP_DURATION\"}":{".":{},"f:name":{},"f:value":{}}},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:ports":{".":{},"k:{\"containerPort\":8089,\"protocol\":\"TCP\"}":{".":{},"f:containerPort":{},"f:protocol":{}}},"f:resources":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}}},"manager":"OpenAPI-Generator","operation":"Update","time":"2021-05-03T13:10:32Z"},{"apiVersion":"apps/v1","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{".":{},"f:deployment.kubernetes.io/revision":{}}},"f:status":{"f:availableReplicas":{},"f:conditions":{".":{},"k:{\"type\":\"Available\"}":{".":{},"f:lastTransitionTime":{},"f:lastUpdateTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Progressing\"}":{".":{},"f:lastTransitionTime":{},"f:lastUpdateTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}},"f:observedGeneration":{},"f:readyReplicas":{},"f:replicas":{},"f:updatedReplicas":{}}},"manager":"kube-controller-manager","operation":"Update","time":"2021-05-03T13:52:58Z"}],"name":"demoservice-server","namespace":"default","resourceVersion":"1016043","uid":"e9e8a3e9-6cb4-4301-ace1-2c0cef3bd61e"},"spec":{"progressDeadlineSeconds":600,"replicas":1,"revisionHistoryLimit":10,"selector":{"matchLabels":{"app":"demoservice-server"}},"strategy":{"rollingUpdate":{"maxSurge":"25%","maxUnavailable":"25%"},"type":"RollingUpdate"},"template":{"metadata":{"creationTimestamp":null,"labels":{"app":"demoservice-server"}},"spec":{"containers":[{"env":[{"name":"SERVER_PORT","value":"8089"},{"name":"SLEEP_DURATION","value":"1"},{"name":"DEMO_FOLDERS","value":"/app"},{"name":"ARMO_TEST_NAME","value":"auto_attach_deployment"},{"name":"CAA_ENABLE_CRASH_REPORTER","value":"1"}],"image":"quay.io/armosec/demoservice:v25","imagePullPolicy":"IfNotPresent","name":"demoservice","ports":[{"containerPort":8089,"protocol":"TCP"}],"resources":{},"terminationMessagePath":"/dev/termination-log","terminationMessagePolicy":"File"}],"dnsPolicy":"ClusterFirst","restartPolicy":"Always","schedulerName":"default-scheduler","securityContext":{},"terminationGracePeriodSeconds":30}}},"status":{"availableReplicas":1,"conditions":[{"lastTransitionTime":"2021-05-03T13:10:32Z","lastUpdateTime":"2021-05-03T13:10:37Z","message":"ReplicaSet \"demoservice-server-7d478b6998\" has successfully progressed.","reason":"NewReplicaSetAvailable","status":"True","type":"Progressing"},{"lastTransitionTime":"2021-05-03T13:52:58Z","lastUpdateTime":"2021-05-03T13:52:58Z","message":"Deployment has minimum availability.","reason":"MinimumReplicasAvailable","status":"True","type":"Available"}],"observedGeneration":1,"readyReplicas":1,"replicas":1,"updatedReplicas":1}}`
|
||||
workload, err := NewWorkload([]byte(w))
|
||||
if err != nil {
|
||||
t.Errorf(err.Error())
|
||||
}
|
||||
workload.SetWlid("wlid://bla")
|
||||
// t.Errorf(workload.Json())
|
||||
|
||||
}
|
||||
|
||||
func TestGetResourceVersion(t *testing.T) {
|
||||
w := `{"apiVersion":"apps/v1","kind":"Deployment","metadata":{"annotations":{"deployment.kubernetes.io/revision":"1"},"creationTimestamp":"2021-05-03T13:10:32Z","generation":1,"managedFields":[{"apiVersion":"apps/v1","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:labels":{".":{},"f:app":{},"f:cyberarmor.inject":{}}},"f:spec":{"f:progressDeadlineSeconds":{},"f:replicas":{},"f:revisionHistoryLimit":{},"f:selector":{},"f:strategy":{"f:rollingUpdate":{".":{},"f:maxSurge":{},"f:maxUnavailable":{}},"f:type":{}},"f:template":{"f:metadata":{"f:labels":{".":{},"f:app":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"demoservice\"}":{".":{},"f:env":{".":{},"k:{\"name\":\"ARMO_TEST_NAME\"}":{".":{},"f:name":{},"f:value":{}},"k:{\"name\":\"CAA_ENABLE_CRASH_REPORTER\"}":{".":{},"f:name":{},"f:value":{}},"k:{\"name\":\"DEMO_FOLDERS\"}":{".":{},"f:name":{},"f:value":{}},"k:{\"name\":\"SERVER_PORT\"}":{".":{},"f:name":{},"f:value":{}},"k:{\"name\":\"SLEEP_DURATION\"}":{".":{},"f:name":{},"f:value":{}}},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:ports":{".":{},"k:{\"containerPort\":8089,\"protocol\":\"TCP\"}":{".":{},"f:containerPort":{},"f:protocol":{}}},"f:resources":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}}},"manager":"OpenAPI-Generator","operation":"Update","time":"2021-05-03T13:10:32Z"},{"apiVersion":"apps/v1","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{".":{},"f:deployment.kubernetes.io/revision":{}}},"f:status":{"f:availableReplicas":{},"f:conditions":{".":{},"k:{\"type\":\"Available\"}":{".":{},"f:lastTransitionTime":{},"f:lastUpdateTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Progressing\"}":{".":{},"f:lastTransitionTime":{},"f:lastUpdateTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}},"f:observedGeneration":{},"f:readyReplicas":{},"f:replicas":{},"f:updatedReplicas":{}}},"manager":"kube-controller-manager","operation":"Update","time":"2021-05-03T13:52:58Z"}],"name":"demoservice-server","namespace":"default","resourceVersion":"1016043","uid":"e9e8a3e9-6cb4-4301-ace1-2c0cef3bd61e"},"spec":{"progressDeadlineSeconds":600,"replicas":1,"revisionHistoryLimit":10,"selector":{"matchLabels":{"app":"demoservice-server"}},"strategy":{"rollingUpdate":{"maxSurge":"25%","maxUnavailable":"25%"},"type":"RollingUpdate"},"template":{"metadata":{"creationTimestamp":null,"labels":{"app":"demoservice-server"}},"spec":{"containers":[{"env":[{"name":"SERVER_PORT","value":"8089"},{"name":"SLEEP_DURATION","value":"1"},{"name":"DEMO_FOLDERS","value":"/app"},{"name":"ARMO_TEST_NAME","value":"auto_attach_deployment"},{"name":"CAA_ENABLE_CRASH_REPORTER","value":"1"}],"image":"quay.io/armosec/demoservice:v25","imagePullPolicy":"IfNotPresent","name":"demoservice","ports":[{"containerPort":8089,"protocol":"TCP"}],"resources":{},"terminationMessagePath":"/dev/termination-log","terminationMessagePolicy":"File"}],"dnsPolicy":"ClusterFirst","restartPolicy":"Always","schedulerName":"default-scheduler","securityContext":{},"terminationGracePeriodSeconds":30}}},"status":{"availableReplicas":1,"conditions":[{"lastTransitionTime":"2021-05-03T13:10:32Z","lastUpdateTime":"2021-05-03T13:10:37Z","message":"ReplicaSet \"demoservice-server-7d478b6998\" has successfully progressed.","reason":"NewReplicaSetAvailable","status":"True","type":"Progressing"},{"lastTransitionTime":"2021-05-03T13:52:58Z","lastUpdateTime":"2021-05-03T13:52:58Z","message":"Deployment has minimum availability.","reason":"MinimumReplicasAvailable","status":"True","type":"Available"}],"observedGeneration":1,"readyReplicas":1,"replicas":1,"updatedReplicas":1}}`
|
||||
workload, err := NewWorkload([]byte(w))
|
||||
if err != nil {
|
||||
t.Errorf(err.Error())
|
||||
}
|
||||
if workload.GetResourceVersion() != "1016043" {
|
||||
t.Errorf("wrong resourceVersion")
|
||||
}
|
||||
|
||||
}
|
||||
func TestGetUID(t *testing.T) {
|
||||
w := `{"apiVersion":"apps/v1","kind":"Deployment","metadata":{"annotations":{"deployment.kubernetes.io/revision":"1"},"creationTimestamp":"2021-05-03T13:10:32Z","generation":1,"managedFields":[{"apiVersion":"apps/v1","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:labels":{".":{},"f:app":{},"f:cyberarmor.inject":{}}},"f:spec":{"f:progressDeadlineSeconds":{},"f:replicas":{},"f:revisionHistoryLimit":{},"f:selector":{},"f:strategy":{"f:rollingUpdate":{".":{},"f:maxSurge":{},"f:maxUnavailable":{}},"f:type":{}},"f:template":{"f:metadata":{"f:labels":{".":{},"f:app":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"demoservice\"}":{".":{},"f:env":{".":{},"k:{\"name\":\"ARMO_TEST_NAME\"}":{".":{},"f:name":{},"f:value":{}},"k:{\"name\":\"CAA_ENABLE_CRASH_REPORTER\"}":{".":{},"f:name":{},"f:value":{}},"k:{\"name\":\"DEMO_FOLDERS\"}":{".":{},"f:name":{},"f:value":{}},"k:{\"name\":\"SERVER_PORT\"}":{".":{},"f:name":{},"f:value":{}},"k:{\"name\":\"SLEEP_DURATION\"}":{".":{},"f:name":{},"f:value":{}}},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:ports":{".":{},"k:{\"containerPort\":8089,\"protocol\":\"TCP\"}":{".":{},"f:containerPort":{},"f:protocol":{}}},"f:resources":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}}},"manager":"OpenAPI-Generator","operation":"Update","time":"2021-05-03T13:10:32Z"},{"apiVersion":"apps/v1","fieldsType":"FieldsV1","fieldsV1":{"f:metadata":{"f:annotations":{".":{},"f:deployment.kubernetes.io/revision":{}}},"f:status":{"f:availableReplicas":{},"f:conditions":{".":{},"k:{\"type\":\"Available\"}":{".":{},"f:lastTransitionTime":{},"f:lastUpdateTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Progressing\"}":{".":{},"f:lastTransitionTime":{},"f:lastUpdateTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}},"f:observedGeneration":{},"f:readyReplicas":{},"f:replicas":{},"f:updatedReplicas":{}}},"manager":"kube-controller-manager","operation":"Update","time":"2021-05-03T13:52:58Z"}],"name":"demoservice-server","namespace":"default","resourceVersion":"1016043","uid":"e9e8a3e9-6cb4-4301-ace1-2c0cef3bd61e"},"spec":{"progressDeadlineSeconds":600,"replicas":1,"revisionHistoryLimit":10,"selector":{"matchLabels":{"app":"demoservice-server"}},"strategy":{"rollingUpdate":{"maxSurge":"25%","maxUnavailable":"25%"},"type":"RollingUpdate"},"template":{"metadata":{"creationTimestamp":null,"labels":{"app":"demoservice-server"}},"spec":{"containers":[{"env":[{"name":"SERVER_PORT","value":"8089"},{"name":"SLEEP_DURATION","value":"1"},{"name":"DEMO_FOLDERS","value":"/app"},{"name":"ARMO_TEST_NAME","value":"auto_attach_deployment"},{"name":"CAA_ENABLE_CRASH_REPORTER","value":"1"}],"image":"quay.io/armosec/demoservice:v25","imagePullPolicy":"IfNotPresent","name":"demoservice","ports":[{"containerPort":8089,"protocol":"TCP"}],"resources":{},"terminationMessagePath":"/dev/termination-log","terminationMessagePolicy":"File"}],"dnsPolicy":"ClusterFirst","restartPolicy":"Always","schedulerName":"default-scheduler","securityContext":{},"terminationGracePeriodSeconds":30}}},"status":{"availableReplicas":1,"conditions":[{"lastTransitionTime":"2021-05-03T13:10:32Z","lastUpdateTime":"2021-05-03T13:10:37Z","message":"ReplicaSet \"demoservice-server-7d478b6998\" has successfully progressed.","reason":"NewReplicaSetAvailable","status":"True","type":"Progressing"},{"lastTransitionTime":"2021-05-03T13:52:58Z","lastUpdateTime":"2021-05-03T13:52:58Z","message":"Deployment has minimum availability.","reason":"MinimumReplicasAvailable","status":"True","type":"Available"}],"observedGeneration":1,"readyReplicas":1,"replicas":1,"updatedReplicas":1}}`
|
||||
workload, err := NewWorkload([]byte(w))
|
||||
if err != nil {
|
||||
t.Errorf(err.Error())
|
||||
}
|
||||
if workload.GetUID() != "e9e8a3e9-6cb4-4301-ace1-2c0cef3bd61e" {
|
||||
t.Errorf("wrong UID")
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestIsAttached(t *testing.T) {
|
||||
w := `{"apiVersion":"apps/v1","kind":"Deployment","metadata":{"annotations":{"deployment.kubernetes.io/revision":"3"},"creationTimestamp":"2021-06-21T04:52:05Z","generation":3,"name":"emailservice","namespace":"default"},"spec":{"progressDeadlineSeconds":600,"replicas":1,"revisionHistoryLimit":10,"selector":{"matchLabels":{"app":"emailservice"}},"strategy":{"rollingUpdate":{"maxSurge":"25%","maxUnavailable":"25%"},"type":"RollingUpdate"},"template":{"metadata":{"annotations":{"armo.last-update":"21-06-2021 06:40:42","armo.wlid":"wlid://cluster-david-demo/namespace-default/deployment-emailservice"},"creationTimestamp":null,"labels":{"app":"emailservice","armo.attach":"true"}},"spec":{"containers":[{"env":[{"name":"PORT","value":"8080"},{"name":"DISABLE_PROFILER","value":"1"}],"image":"gcr.io/google-samples/microservices-demo/emailservice:v0.2.3","imagePullPolicy":"IfNotPresent","livenessProbe":{"exec":{"command":["/bin/grpc_health_probe","-addr=:8080"]},"failureThreshold":3,"periodSeconds":5,"successThreshold":1,"timeoutSeconds":1},"name":"server","ports":[{"containerPort":8080,"protocol":"TCP"}],"readinessProbe":{"exec":{"command":["/bin/grpc_health_probe","-addr=:8080"]},"failureThreshold":3,"periodSeconds":5,"successThreshold":1,"timeoutSeconds":1},"resources":{"limits":{"cpu":"200m","memory":"128Mi"},"requests":{"cpu":"100m","memory":"64Mi"}},"terminationMessagePath":"/dev/termination-log","terminationMessagePolicy":"File"}],"dnsPolicy":"ClusterFirst","restartPolicy":"Always","schedulerName":"default-scheduler","securityContext":{},"serviceAccount":"default","serviceAccountName":"default","terminationGracePeriodSeconds":5}}}}`
|
||||
workload, err := NewWorkload([]byte(w))
|
||||
if err != nil {
|
||||
t.Errorf(err.Error())
|
||||
}
|
||||
if !workload.IsAttached() {
|
||||
t.Errorf("expected attached")
|
||||
}
|
||||
|
||||
}
|
||||
@@ -1,23 +0,0 @@
|
||||
package k8sinterface
|
||||
|
||||
func PodSpec(kind string) []string {
|
||||
switch kind {
|
||||
case "Pod", "Namespace":
|
||||
return []string{"spec"}
|
||||
case "CronJob":
|
||||
return []string{"spec", "jobTemplate", "spec", "template", "spec"}
|
||||
default:
|
||||
return []string{"spec", "template", "spec"}
|
||||
}
|
||||
}
|
||||
|
||||
func PodMetadata(kind string) []string {
|
||||
switch kind {
|
||||
case "Pod", "Namespace", "Secret":
|
||||
return []string{"metadata"}
|
||||
case "CronJob":
|
||||
return []string{"spec", "jobTemplate", "spec", "template", "metadata"}
|
||||
default:
|
||||
return []string{"spec", "template", "metadata"}
|
||||
}
|
||||
}
|
||||
@@ -1,7 +0,0 @@
|
||||
package opapolicy
|
||||
|
||||
const (
|
||||
PostureRestAPIPathV1 = "/v1/posture"
|
||||
PostureRedisPrefix = "_postureReportv1"
|
||||
K8sPostureNotification = "/k8srestapi/v1/newPostureReport"
|
||||
)
|
||||
@@ -1,163 +0,0 @@
|
||||
package opapolicy
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
armotypes "github.com/armosec/kubescape/cautils/armotypes"
|
||||
)
|
||||
|
||||
type AlertScore float32
|
||||
type RuleLanguages string
|
||||
|
||||
const (
|
||||
RegoLanguage RuleLanguages = "Rego"
|
||||
RegoLanguage2 RuleLanguages = "rego"
|
||||
)
|
||||
|
||||
// RegoResponse the expected response of single run of rego policy
|
||||
type RuleResponse struct {
|
||||
AlertMessage string `json:"alertMessage"`
|
||||
RuleStatus string `json:"ruleStatus"`
|
||||
PackageName string `json:"packagename"`
|
||||
AlertScore AlertScore `json:"alertScore"`
|
||||
AlertObject AlertObject `json:"alertObject"`
|
||||
Context []string `json:"context,omitempty"` // TODO - Remove
|
||||
Rulename string `json:"rulename,omitempty"` // TODO - Remove
|
||||
ExceptionName string `json:"exceptionName,omitempty"` // Not in use
|
||||
Exception *armotypes.PostureExceptionPolicy `json:"exception,omitempty"`
|
||||
}
|
||||
|
||||
type AlertObject struct {
|
||||
K8SApiObjects []map[string]interface{} `json:"k8sApiObjects,omitempty"`
|
||||
ExternalObjects map[string]interface{} `json:"externalObjects,omitempty"`
|
||||
}
|
||||
|
||||
type FrameworkReport struct {
|
||||
Name string `json:"name"`
|
||||
ControlReports []ControlReport `json:"controlReports"`
|
||||
Score float32 `json:"score,omitempty"`
|
||||
ARMOImprovement float32 `json:"ARMOImprovement,omitempty"`
|
||||
WCSScore float32 `json:"wcsScore,omitempty"`
|
||||
}
|
||||
type ControlReport struct {
|
||||
armotypes.PortalBase `json:",inline"`
|
||||
Control_ID string `json:"id,omitempty"` // to be Deprecated
|
||||
ControlID string `json:"controlID"`
|
||||
Name string `json:"name"`
|
||||
RuleReports []RuleReport `json:"ruleReports"`
|
||||
Remediation string `json:"remediation"`
|
||||
Description string `json:"description"`
|
||||
Score float32 `json:"score"`
|
||||
BaseScore float32 `json:"baseScore,omitempty"`
|
||||
ARMOImprovement float32 `json:"ARMOImprovement,omitempty"`
|
||||
}
|
||||
type RuleReport struct {
|
||||
Name string `json:"name"`
|
||||
Remediation string `json:"remediation"`
|
||||
RuleStatus RuleStatus `json:"ruleStatus"` // did we run the rule or not (if there where compile errors, the value will be failed)
|
||||
RuleResponses []RuleResponse `json:"ruleResponses"`
|
||||
ListInputResources []map[string]interface{} `json:"-"`
|
||||
ListInputKinds []string `json:"-"`
|
||||
}
|
||||
type RuleStatus struct {
|
||||
Status string `json:"status"`
|
||||
Message string `json:"message"`
|
||||
}
|
||||
|
||||
// PostureReport
|
||||
type PostureReport struct {
|
||||
CustomerGUID string `json:"customerGUID"`
|
||||
ClusterName string `json:"clusterName"`
|
||||
ReportID string `json:"reportID"`
|
||||
JobID string `json:"jobID"`
|
||||
ReportGenerationTime time.Time `json:"generationTime"`
|
||||
FrameworkReports []FrameworkReport `json:"frameworks"`
|
||||
}
|
||||
|
||||
// RuleMatchObjects defines which objects this rule applied on
|
||||
type RuleMatchObjects struct {
|
||||
APIGroups []string `json:"apiGroups"` // apps
|
||||
APIVersions []string `json:"apiVersions"` // v1/ v1beta1 / *
|
||||
Resources []string `json:"resources"` // dep.., pods,
|
||||
}
|
||||
|
||||
// RuleMatchObjects defines which objects this rule applied on
|
||||
type RuleDependency struct {
|
||||
PackageName string `json:"packageName"` // package name
|
||||
}
|
||||
|
||||
// PolicyRule represents single rule, the fundamental executable block of policy
|
||||
type PolicyRule struct {
|
||||
armotypes.PortalBase `json:",inline"`
|
||||
CreationTime string `json:"creationTime"`
|
||||
Rule string `json:"rule"` // multiline string!
|
||||
RuleLanguage RuleLanguages `json:"ruleLanguage"`
|
||||
Match []RuleMatchObjects `json:"match"`
|
||||
RuleDependencies []RuleDependency `json:"ruleDependencies"`
|
||||
Description string `json:"description"`
|
||||
Remediation string `json:"remediation"`
|
||||
RuleQuery string `json:"ruleQuery"` // default "armo_builtins" - DEPRECATED
|
||||
}
|
||||
|
||||
// Control represents a collection of rules which are combined together to single purpose
|
||||
type Control struct {
|
||||
armotypes.PortalBase `json:",inline"`
|
||||
Control_ID string `json:"id,omitempty"` // to be Deprecated
|
||||
ControlID string `json:"controlID"`
|
||||
CreationTime string `json:"creationTime"`
|
||||
Description string `json:"description"`
|
||||
Remediation string `json:"remediation"`
|
||||
Rules []PolicyRule `json:"rules"`
|
||||
// for new list of rules in POST/UPADTE requests
|
||||
RulesIDs *[]string `json:"rulesIDs,omitempty"`
|
||||
}
|
||||
|
||||
type UpdatedControl struct {
|
||||
Control `json:",inline"`
|
||||
Rules []interface{} `json:"rules"`
|
||||
}
|
||||
|
||||
// Framework represents a collection of controls which are combined together to expose comprehensive behavior
|
||||
type Framework struct {
|
||||
armotypes.PortalBase `json:",inline"`
|
||||
CreationTime string `json:"creationTime"`
|
||||
Description string `json:"description"`
|
||||
Controls []Control `json:"controls"`
|
||||
// for new list of controls in POST/UPADTE requests
|
||||
ControlsIDs *[]string `json:"controlsIDs,omitempty"`
|
||||
}
|
||||
|
||||
type UpdatedFramework struct {
|
||||
Framework `json:",inline"`
|
||||
Controls []interface{} `json:"controls"`
|
||||
}
|
||||
|
||||
type NotificationPolicyType string
|
||||
type NotificationPolicyKind string
|
||||
|
||||
// Supported NotificationTypes
|
||||
const (
|
||||
TypeValidateRules NotificationPolicyType = "validateRules"
|
||||
TypeExecPostureScan NotificationPolicyType = "execPostureScan"
|
||||
TypeUpdateRules NotificationPolicyType = "updateRules"
|
||||
)
|
||||
|
||||
// Supported NotificationKinds
|
||||
const (
|
||||
KindFramework NotificationPolicyKind = "Framework"
|
||||
KindControl NotificationPolicyKind = "Control"
|
||||
KindRule NotificationPolicyKind = "Rule"
|
||||
)
|
||||
|
||||
type PolicyNotification struct {
|
||||
NotificationType NotificationPolicyType `json:"notificationType"`
|
||||
Rules []PolicyIdentifier `json:"rules"`
|
||||
ReportID string `json:"reportID"`
|
||||
JobID string `json:"jobID"`
|
||||
Designators armotypes.PortalDesignator `json:"designators"`
|
||||
}
|
||||
|
||||
type PolicyIdentifier struct {
|
||||
Kind NotificationPolicyKind `json:"kind"`
|
||||
Name string `json:"name"`
|
||||
}
|
||||
@@ -1,301 +0,0 @@
|
||||
package opapolicy
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
armotypes "github.com/armosec/kubescape/cautils/armotypes"
|
||||
)
|
||||
|
||||
// Mock A
|
||||
var (
|
||||
AMockCustomerGUID = "5d817063-096f-4d91-b39b-8665240080af"
|
||||
AMockJobID = "36b6f9e1-3b63-4628-994d-cbe16f81e9c7"
|
||||
AMockReportID = "2c31e4da-c6fe-440d-9b8a-785b80c8576a"
|
||||
AMockClusterName = "clusterA"
|
||||
AMockFrameworkName = "testFrameworkA"
|
||||
AMockControlName = "testControlA"
|
||||
AMockRuleName = "testRuleA"
|
||||
AMockPortalBase = *armotypes.MockPortalBase(AMockCustomerGUID, "", nil)
|
||||
)
|
||||
|
||||
func MockRuleResponseA() *RuleResponse {
|
||||
return &RuleResponse{
|
||||
AlertMessage: "test alert message A",
|
||||
AlertScore: 0,
|
||||
Rulename: AMockRuleName,
|
||||
PackageName: "test.package.name.A",
|
||||
Context: []string{},
|
||||
}
|
||||
}
|
||||
|
||||
func MockFrameworkReportA() *FrameworkReport {
|
||||
return &FrameworkReport{
|
||||
Name: AMockFrameworkName,
|
||||
ControlReports: []ControlReport{
|
||||
{
|
||||
ControlID: "C-0010",
|
||||
Name: AMockControlName,
|
||||
RuleReports: []RuleReport{
|
||||
{
|
||||
Name: AMockRuleName,
|
||||
Remediation: "remove privilegedContainer: True flag from your pod spec",
|
||||
RuleResponses: []RuleResponse{
|
||||
*MockRuleResponseA(),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func MockPostureReportA() *PostureReport {
|
||||
return &PostureReport{
|
||||
CustomerGUID: AMockCustomerGUID,
|
||||
ClusterName: AMockClusterName,
|
||||
ReportID: AMockReportID,
|
||||
JobID: AMockJobID,
|
||||
ReportGenerationTime: time.Now().UTC(),
|
||||
FrameworkReports: []FrameworkReport{*MockFrameworkReportA()},
|
||||
}
|
||||
}
|
||||
|
||||
func MockFrameworkA() *Framework {
|
||||
return &Framework{
|
||||
PortalBase: *armotypes.MockPortalBase("aaaaaaaa-096f-4d91-b39b-8665240080af", AMockFrameworkName, nil),
|
||||
CreationTime: "",
|
||||
Description: "mock framework descryption",
|
||||
Controls: []Control{
|
||||
{
|
||||
PortalBase: *armotypes.MockPortalBase("aaaaaaaa-aaaa-4d91-b39b-8665240080af", AMockControlName, nil),
|
||||
Rules: []PolicyRule{
|
||||
*MockRuleA(),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func MockRuleUntrustedRegistries() *PolicyRule {
|
||||
return &PolicyRule{
|
||||
PortalBase: *armotypes.MockPortalBase("aaaaaaaa-aaaa-aaaa-b39b-8665240080af", AMockControlName, nil),
|
||||
Rule: `
|
||||
package armo_builtins
|
||||
# Check for images from blacklisted repos
|
||||
|
||||
untrusted_registries(z) = x {
|
||||
x := ["015253967648.dkr.ecr.eu-central-1.amazonaws.com/"]
|
||||
}
|
||||
|
||||
public_registries(z) = y{
|
||||
y := ["quay.io/kiali/","quay.io/datawire/","quay.io/keycloak/","quay.io/bitnami/"]
|
||||
}
|
||||
|
||||
untrustedImageRepo[msga] {
|
||||
pod := input[_]
|
||||
k := pod.kind
|
||||
k == "Pod"
|
||||
container := pod.spec.containers[_]
|
||||
image := container.image
|
||||
repo_prefix := untrusted_registries(image)[_]
|
||||
startswith(image, repo_prefix)
|
||||
selfLink := pod.metadata.selfLink
|
||||
containerName := container.name
|
||||
|
||||
msga := {
|
||||
"alertMessage": sprintf("image '%v' in container '%s' in [%s] comes from untrusted registry", [image, containerName, selfLink]),
|
||||
"alert": true,
|
||||
"prevent": false,
|
||||
"alertScore": 2,
|
||||
"alertObject": [{"pod":pod}]
|
||||
}
|
||||
}
|
||||
|
||||
untrustedImageRepo[msga] {
|
||||
pod := input[_]
|
||||
k := pod.kind
|
||||
k == "Pod"
|
||||
container := pod.spec.containers[_]
|
||||
image := container.image
|
||||
repo_prefix := public_registries(image)[_]
|
||||
startswith(pod, repo_prefix)
|
||||
selfLink := input.metadata.selfLink
|
||||
containerName := container.name
|
||||
|
||||
msga := {
|
||||
"alertMessage": sprintf("image '%v' in container '%s' in [%s] comes from public registry", [image, containerName, selfLink]),
|
||||
"alert": true,
|
||||
"prevent": false,
|
||||
"alertScore": 1,
|
||||
"alertObject": [{"pod":pod}]
|
||||
}
|
||||
}
|
||||
`,
|
||||
RuleLanguage: RegoLanguage,
|
||||
Match: []RuleMatchObjects{
|
||||
{
|
||||
APIVersions: []string{"v1"},
|
||||
APIGroups: []string{"*"},
|
||||
Resources: []string{"pods"},
|
||||
},
|
||||
},
|
||||
RuleDependencies: []RuleDependency{
|
||||
{
|
||||
PackageName: "kubernetes.api.client",
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func MockRuleA() *PolicyRule {
|
||||
return &PolicyRule{
|
||||
PortalBase: *armotypes.MockPortalBase("aaaaaaaa-aaaa-aaaa-b39b-8665240080af", AMockControlName, nil),
|
||||
Rule: MockRegoPrivilegedPods(), //
|
||||
RuleLanguage: RegoLanguage,
|
||||
Match: []RuleMatchObjects{
|
||||
{
|
||||
APIVersions: []string{"v1"},
|
||||
APIGroups: []string{"*"},
|
||||
Resources: []string{"pods"},
|
||||
},
|
||||
},
|
||||
RuleDependencies: []RuleDependency{
|
||||
{
|
||||
PackageName: "kubernetes.api.client",
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func MockRuleB() *PolicyRule {
|
||||
return &PolicyRule{
|
||||
PortalBase: *armotypes.MockPortalBase("bbbbbbbb-aaaa-aaaa-b39b-8665240080af", AMockControlName, nil),
|
||||
Rule: MockExternalFacingService(), //
|
||||
RuleLanguage: RegoLanguage,
|
||||
Match: []RuleMatchObjects{
|
||||
{
|
||||
APIVersions: []string{"v1"},
|
||||
APIGroups: []string{""},
|
||||
Resources: []string{"pods"},
|
||||
},
|
||||
},
|
||||
RuleDependencies: []RuleDependency{
|
||||
{
|
||||
PackageName: "kubernetes.api.client",
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func MockPolicyNotificationA() *PolicyNotification {
|
||||
return &PolicyNotification{
|
||||
NotificationType: TypeExecPostureScan,
|
||||
ReportID: AMockReportID,
|
||||
JobID: AMockJobID,
|
||||
Designators: armotypes.PortalDesignator{},
|
||||
Rules: []PolicyIdentifier{
|
||||
{
|
||||
Kind: KindFramework,
|
||||
Name: AMockFrameworkName,
|
||||
}},
|
||||
}
|
||||
}
|
||||
|
||||
func MockTemp() string {
|
||||
return `
|
||||
package armo_builtins
|
||||
import data.kubernetes.api.client as client
|
||||
deny[msga] {
|
||||
#object := input[_]
|
||||
object := client.query_all("pods")
|
||||
obj := object.body.items[_]
|
||||
msga := {
|
||||
"packagename": "armo_builtins",
|
||||
"alertMessage": "found object",
|
||||
"alertScore": 3,
|
||||
"alertObject": {"object": obj},
|
||||
}
|
||||
}
|
||||
`
|
||||
}
|
||||
|
||||
func MockRegoPrivilegedPods() string {
|
||||
return `package armo_builtins
|
||||
|
||||
import data.kubernetes.api.client as client
|
||||
|
||||
# Deny mutating action unless user is in group owning the resource
|
||||
|
||||
#privileged pods
|
||||
deny[msga] {
|
||||
|
||||
pod := input[_]
|
||||
containers := pod.spec.containers[_]
|
||||
containers.securityContext.privileged == true
|
||||
msga := {
|
||||
"packagename": "armo_builtins",
|
||||
"alertMessage": sprintf("the following pods are defined as privileged: %v", [pod]),
|
||||
"alertScore": 3,
|
||||
"alertObject": pod,
|
||||
}
|
||||
}
|
||||
|
||||
#handles majority of workload resources
|
||||
deny[msga] {
|
||||
wl := input[_]
|
||||
spec_template_spec_patterns := {"Deployment","ReplicaSet","DaemonSet","StatefulSet","Job"}
|
||||
spec_template_spec_patterns[wl.kind]
|
||||
containers := wl.spec.template.spec.containers[_]
|
||||
containers.securityContext.privileged == true
|
||||
msga := {
|
||||
"packagename": "armo_builtins",
|
||||
"alertMessage": sprintf("the following workloads are defined as privileged: %v", [wl]),
|
||||
"alertScore": 3,
|
||||
"alertObject": wl,
|
||||
}
|
||||
}
|
||||
|
||||
#handles cronjob
|
||||
deny[msga] {
|
||||
wl := input[_]
|
||||
wl.kind == "CronJob"
|
||||
containers := wl.spec.jobTemplate.spec.template.spec.containers[_]
|
||||
containers.securityContext.privileged == true
|
||||
msga := {
|
||||
"packagename": "armo_builtins",
|
||||
"alertMessage": sprintf("the following cronjobs are defined as privileged: %v", [wl]),
|
||||
"alertScore": 3,
|
||||
"alertObject": wl,
|
||||
}
|
||||
}
|
||||
`
|
||||
}
|
||||
|
||||
func MockExternalFacingService() string {
|
||||
return "\n\tpackage armo_builtins\n\n\timport data.kubernetes.api.client as client\n\timport data.cautils as cautils\n\ndeny[msga] {\n\n\twl := input[_]\n\tcluster_resource := client.query_all(\n\t\t\"services\"\n\t)\n\n\tlabels := wl.metadata.labels\n\tfiltered_labels := json.remove(labels, [\"pod-template-hash\"])\n \n#service := cluster_resource.body.items[i]\nservices := [svc | cluster_resource.body.items[i].metadata.namespace == wl.metadata.namespace; svc := cluster_resource.body.items[i]]\nservice := services[_]\nnp_or_lb := {\"NodePort\", \"LoadBalancer\"}\nnp_or_lb[service.spec.type]\ncautils.is_subobject(service.spec.selector,filtered_labels)\n\n msga := {\n\t\t\"alertMessage\": sprintf(\"%v pod %v expose external facing service: %v\",[wl.metadata.namespace, wl.metadata.name, service.metadata.name]),\n\t\t\"alertScore\": 2,\n\t\t\"packagename\": \"armo_builtins\",\n\t\t\"alertObject\": {\"srvc\":service}\n\t}\n}\n\t"
|
||||
}
|
||||
func GetRuntimePods() string {
|
||||
return `
|
||||
package armo_builtins
|
||||
|
||||
import data.kubernetes.api.client as client
|
||||
|
||||
|
||||
deny[msga] {
|
||||
|
||||
|
||||
cluster_resource := client.query_all(
|
||||
"pods"
|
||||
)
|
||||
|
||||
pod := cluster_resource.body.items[i]
|
||||
msga := {
|
||||
"alertMessage": "got something",
|
||||
"alertScore": 2,
|
||||
"packagename": "armo_builtins",
|
||||
"alertObject": {"pod": pod}
|
||||
}
|
||||
}
|
||||
|
||||
`
|
||||
}
|
||||
@@ -1,42 +0,0 @@
|
||||
package opapolicy
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestMockPolicyNotificationA(t *testing.T) {
|
||||
policy := MockPolicyNotificationA()
|
||||
bp, err := json.Marshal(policy)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
} else {
|
||||
t.Logf("%s\n", string(bp))
|
||||
// t.Errorf("%s\n", string(bp))
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestMockFrameworkA(t *testing.T) {
|
||||
policy := MockFrameworkA()
|
||||
bp, err := json.Marshal(policy)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
} else {
|
||||
t.Logf("%s\n", string(bp))
|
||||
// t.Errorf("%s\n", string(bp))
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestMockPostureReportA(t *testing.T) {
|
||||
policy := MockPostureReportA()
|
||||
bp, err := json.Marshal(policy)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
} else {
|
||||
// t.Errorf("%s\n", string(bp))
|
||||
t.Logf("%s\n", string(bp))
|
||||
}
|
||||
|
||||
}
|
||||
@@ -1,198 +0,0 @@
|
||||
package opapolicy
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
)
|
||||
|
||||
func (pn *PolicyNotification) ToJSONBytesBuffer() (*bytes.Buffer, error) {
|
||||
res, err := json.Marshal(pn)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return bytes.NewBuffer(res), err
|
||||
}
|
||||
|
||||
func (RuleResponse *RuleResponse) GetSingleResultStatus() string {
|
||||
if RuleResponse.Exception != nil {
|
||||
if RuleResponse.Exception.IsAlertOnly() {
|
||||
return "warning"
|
||||
}
|
||||
if RuleResponse.Exception.IsDisable() {
|
||||
return "ignore"
|
||||
}
|
||||
}
|
||||
return "failed"
|
||||
|
||||
}
|
||||
|
||||
func (ruleReport *RuleReport) GetRuleStatus() (string, []RuleResponse, []RuleResponse) {
|
||||
if len(ruleReport.RuleResponses) == 0 {
|
||||
return "success", nil, nil
|
||||
}
|
||||
exceptions := make([]RuleResponse, 0)
|
||||
failed := make([]RuleResponse, 0)
|
||||
|
||||
for _, rule := range ruleReport.RuleResponses {
|
||||
if rule.ExceptionName != "" {
|
||||
exceptions = append(exceptions, rule)
|
||||
} else if rule.Exception != nil {
|
||||
exceptions = append(exceptions, rule)
|
||||
} else {
|
||||
failed = append(failed, rule)
|
||||
}
|
||||
}
|
||||
|
||||
status := "failed"
|
||||
if len(failed) == 0 && len(exceptions) > 0 {
|
||||
status = "warning"
|
||||
}
|
||||
return status, failed, exceptions
|
||||
}
|
||||
|
||||
func (controlReport *ControlReport) GetNumberOfResources() int {
|
||||
sum := 0
|
||||
for i := range controlReport.RuleReports {
|
||||
sum += controlReport.RuleReports[i].GetNumberOfResources()
|
||||
}
|
||||
return sum
|
||||
}
|
||||
|
||||
func (controlReport *ControlReport) GetNumberOfFailedResources() int {
|
||||
sum := 0
|
||||
for i := range controlReport.RuleReports {
|
||||
sum += controlReport.RuleReports[i].GetNumberOfFailedResources()
|
||||
}
|
||||
return sum
|
||||
}
|
||||
|
||||
func (controlReport *ControlReport) GetNumberOfWarningResources() int {
|
||||
sum := 0
|
||||
for i := range controlReport.RuleReports {
|
||||
sum += controlReport.RuleReports[i].GetNumberOfWarningResources()
|
||||
}
|
||||
return sum
|
||||
}
|
||||
func (controlReport *ControlReport) ListControlsInputKinds() []string {
|
||||
listControlsInputKinds := []string{}
|
||||
for i := range controlReport.RuleReports {
|
||||
listControlsInputKinds = append(listControlsInputKinds, controlReport.RuleReports[i].ListInputKinds...)
|
||||
}
|
||||
return listControlsInputKinds
|
||||
}
|
||||
|
||||
func (controlReport *ControlReport) Passed() bool {
|
||||
for i := range controlReport.RuleReports {
|
||||
if len(controlReport.RuleReports[i].RuleResponses) != 0 {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (controlReport *ControlReport) Warning() bool {
|
||||
if controlReport.Passed() || controlReport.Failed() {
|
||||
return false
|
||||
}
|
||||
for i := range controlReport.RuleReports {
|
||||
if status, _, _ := controlReport.RuleReports[i].GetRuleStatus(); status == "warning" {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (controlReport *ControlReport) Failed() bool {
|
||||
if controlReport.Passed() {
|
||||
return false
|
||||
}
|
||||
for i := range controlReport.RuleReports {
|
||||
if status, _, _ := controlReport.RuleReports[i].GetRuleStatus(); status == "failed" {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (ruleReport *RuleReport) GetNumberOfResources() int {
|
||||
return len(ruleReport.ListInputResources)
|
||||
}
|
||||
|
||||
func (ruleReport *RuleReport) GetNumberOfFailedResources() int {
|
||||
sum := 0
|
||||
for i := len(ruleReport.RuleResponses) - 1; i >= 0; i-- {
|
||||
if ruleReport.RuleResponses[i].GetSingleResultStatus() == "failed" {
|
||||
sum += len(ruleReport.RuleResponses[i].AlertObject.K8SApiObjects)
|
||||
}
|
||||
}
|
||||
return sum
|
||||
}
|
||||
|
||||
func (ruleReport *RuleReport) GetNumberOfWarningResources() int {
|
||||
sum := 0
|
||||
for i := range ruleReport.RuleResponses {
|
||||
if ruleReport.RuleResponses[i].GetSingleResultStatus() == "warning" {
|
||||
sum += len(ruleReport.RuleResponses[i].AlertObject.K8SApiObjects)
|
||||
}
|
||||
}
|
||||
return sum
|
||||
}
|
||||
|
||||
func (postureReport *PostureReport) RemoveData() {
|
||||
for i := range postureReport.FrameworkReports {
|
||||
postureReport.FrameworkReports[i].RemoveData()
|
||||
}
|
||||
}
|
||||
func (frameworkReport *FrameworkReport) RemoveData() {
|
||||
for i := range frameworkReport.ControlReports {
|
||||
frameworkReport.ControlReports[i].RemoveData()
|
||||
}
|
||||
}
|
||||
func (controlReport *ControlReport) RemoveData() {
|
||||
for i := range controlReport.RuleReports {
|
||||
controlReport.RuleReports[i].RemoveData()
|
||||
}
|
||||
}
|
||||
|
||||
func (ruleReport *RuleReport) RemoveData() {
|
||||
for i := range ruleReport.RuleResponses {
|
||||
ruleReport.RuleResponses[i].RemoveData()
|
||||
}
|
||||
}
|
||||
|
||||
func (r *RuleResponse) RemoveData() {
|
||||
r.AlertObject.ExternalObjects = nil
|
||||
|
||||
keepFields := []string{"kind", "apiVersion", "metadata"}
|
||||
keepMetadataFields := []string{"name", "namespace", "labels"}
|
||||
|
||||
for i := range r.AlertObject.K8SApiObjects {
|
||||
deleteFromMap(r.AlertObject.K8SApiObjects[i], keepFields)
|
||||
for k := range r.AlertObject.K8SApiObjects[i] {
|
||||
if k == "metadata" {
|
||||
if b, ok := r.AlertObject.K8SApiObjects[i][k].(map[string]interface{}); ok {
|
||||
deleteFromMap(b, keepMetadataFields)
|
||||
r.AlertObject.K8SApiObjects[i][k] = b
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func deleteFromMap(m map[string]interface{}, keepFields []string) {
|
||||
for k := range m {
|
||||
if StringInSlice(keepFields, k) {
|
||||
continue
|
||||
}
|
||||
delete(m, k)
|
||||
}
|
||||
}
|
||||
|
||||
func StringInSlice(strSlice []string, str string) bool {
|
||||
for i := range strSlice {
|
||||
if strSlice[i] == str {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
@@ -1,47 +0,0 @@
|
||||
package opapolicy
|
||||
|
||||
import (
|
||||
"github.com/francoispqt/gojay"
|
||||
"time"
|
||||
)
|
||||
|
||||
/*
|
||||
responsible on fast unmarshaling of various COMMON containerscan structures and substructures
|
||||
|
||||
*/
|
||||
// UnmarshalJSONObject - File inside a pkg
|
||||
func (r *PostureReport) UnmarshalJSONObject(dec *gojay.Decoder, key string) (err error) {
|
||||
|
||||
switch key {
|
||||
case "customerGUID":
|
||||
err = dec.String(&(r.CustomerGUID))
|
||||
|
||||
case "clusterName":
|
||||
err = dec.String(&(r.ClusterName))
|
||||
|
||||
case "reportID":
|
||||
err = dec.String(&(r.ReportID))
|
||||
case "jobID":
|
||||
err = dec.String(&(r.JobID))
|
||||
case "generationTime":
|
||||
err = dec.Time(&(r.ReportGenerationTime), time.RFC3339)
|
||||
r.ReportGenerationTime = r.ReportGenerationTime.Local()
|
||||
}
|
||||
return err
|
||||
|
||||
}
|
||||
|
||||
// func (files *PkgFiles) UnmarshalJSONArray(dec *gojay.Decoder) error {
|
||||
// lae := PackageFile{}
|
||||
// if err := dec.Object(&lae); err != nil {
|
||||
// return err
|
||||
// }
|
||||
|
||||
// *files = append(*files, lae)
|
||||
// return nil
|
||||
// }
|
||||
|
||||
func (file *PostureReport) NKeys() int {
|
||||
return 0
|
||||
}
|
||||
//------------------------
|
||||
@@ -1,219 +0,0 @@
|
||||
package resources
|
||||
|
||||
var RegoCAUtils = `
|
||||
package cautils
|
||||
|
||||
list_contains(lista,element) {
|
||||
some i
|
||||
lista[i] == element
|
||||
}
|
||||
|
||||
# getPodName(metadata) = name {
|
||||
# name := metadata.generateName
|
||||
#}
|
||||
getPodName(metadata) = name {
|
||||
name := metadata.name
|
||||
}
|
||||
|
||||
#returns subobject ,sub1 is partial to parent, e.g parent = {a:a,b:b,c:c,d:d}
|
||||
# sub1 = {b:b,c:c} - result is {b:b,c:c}, if sub1={b:b,e:f} returns {b:b}
|
||||
object_intersection(parent,sub1) = r{
|
||||
|
||||
r := {k:p | p := sub1[k]
|
||||
parent[k]== p
|
||||
}
|
||||
}
|
||||
|
||||
#returns if parent contains sub(both are objects not sets!!)
|
||||
is_subobject(sub,parent) {
|
||||
object_intersection(sub,parent) == sub
|
||||
}
|
||||
`
|
||||
|
||||
var RegoDesignators = `
|
||||
package designators
|
||||
|
||||
import data.cautils
|
||||
#functions that related to designators
|
||||
|
||||
#allowed_namespace
|
||||
#@input@: receive as part of the input object "included_namespaces" list
|
||||
#@input@: item's namespace as "namespace"
|
||||
#returns true if namespace exists in that list
|
||||
included_namespaces(namespace){
|
||||
cautils.list_contains(["default"],namespace)
|
||||
}
|
||||
|
||||
#forbidden_namespaces
|
||||
#@input@: receive as part of the input object "forbidden_namespaces" list
|
||||
#@input@: item's namespace as "namespace"
|
||||
#returns true if namespace exists in that list
|
||||
excluded_namespaces(namespace){
|
||||
not cautils.list_contains(["excluded"],namespace)
|
||||
}
|
||||
|
||||
forbidden_wlids(wlid){
|
||||
input.forbidden_wlids[_] == wlid
|
||||
}
|
||||
|
||||
filter_k8s_object(obj) = filtered {
|
||||
#put
|
||||
filtered := obj
|
||||
#filtered := [ x | cautils.list_contains(["default"],obj[i].metadata.namespace) ; x := obj[i] ]
|
||||
# filtered := [ x | not cautils.list_contains([],filter1Set[i].metadata.namespace); x := filter1Set[i]]
|
||||
|
||||
}
|
||||
`
|
||||
var RegoKubernetesApiClient = `
|
||||
package kubernetes.api.client
|
||||
|
||||
# service account token
|
||||
token := data.k8sconfig.token
|
||||
|
||||
# Cluster host
|
||||
host := data.k8sconfig.host
|
||||
|
||||
# default certificate path
|
||||
# crt_file := "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt"
|
||||
crt_file := data.k8sconfig.crtfile
|
||||
|
||||
client_crt_file := data.k8sconfig.clientcrtfile
|
||||
client_key_file := data.k8sconfig.clientkeyfile
|
||||
|
||||
|
||||
# This information could be retrieved from the kubernetes API
|
||||
# too, but would essentially require a request per API group,
|
||||
# so for now use a lookup table for the most common resources.
|
||||
resource_group_mapping := {
|
||||
"services": "api/v1",
|
||||
"pods": "api/v1",
|
||||
"configmaps": "api/v1",
|
||||
"secrets": "api/v1",
|
||||
"persistentvolumeclaims": "api/v1",
|
||||
"daemonsets": "apis/apps/v1",
|
||||
"deployments": "apis/apps/v1",
|
||||
"statefulsets": "apis/apps/v1",
|
||||
"horizontalpodautoscalers": "api/autoscaling/v1",
|
||||
"jobs": "apis/batch/v1",
|
||||
"cronjobs": "apis/batch/v1beta1",
|
||||
"ingresses": "api/extensions/v1beta1",
|
||||
"replicasets": "apis/apps/v1",
|
||||
"networkpolicies": "apis/networking.k8s.io/v1",
|
||||
"clusterroles": "apis/rbac.authorization.k8s.io/v1",
|
||||
"clusterrolebindings": "apis/rbac.authorization.k8s.io/v1",
|
||||
"roles": "apis/rbac.authorization.k8s.io/v1",
|
||||
"rolebindings": "apis/rbac.authorization.k8s.io/v1",
|
||||
"serviceaccounts": "api/v1"
|
||||
}
|
||||
|
||||
# Query for given resource/name in provided namespace
|
||||
# Example: query_ns("deployments", "my-app", "default")
|
||||
query_name_ns(resource, name, namespace) = http.send({
|
||||
"url": sprintf("%v/%v/namespaces/%v/%v/%v", [
|
||||
host,
|
||||
resource_group_mapping[resource],
|
||||
namespace,
|
||||
resource,
|
||||
name,
|
||||
]),
|
||||
"method": "get",
|
||||
"headers": {"authorization": token},
|
||||
"tls_client_cert_file": client_crt_file,
|
||||
"tls_client_key_file": client_key_file,
|
||||
"tls_ca_cert_file": crt_file,
|
||||
"raise_error": true,
|
||||
})
|
||||
|
||||
# Query for given resource type using label selectors
|
||||
# https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#api
|
||||
# Example: query_label_selector_ns("deployments", {"app": "opa-kubernetes-api-client"}, "default")
|
||||
query_label_selector_ns(resource, selector, namespace) = http.send({
|
||||
"url": sprintf("%v/%v/namespaces/%v/%v?labelSelector=%v", [
|
||||
host,
|
||||
resource_group_mapping[resource],
|
||||
namespace,
|
||||
resource,
|
||||
label_map_to_query_string(selector),
|
||||
]),
|
||||
"method": "get",
|
||||
"headers": {"authorization": token},
|
||||
"tls_client_cert_file": client_crt_file,
|
||||
"tls_client_key_file": client_key_file,
|
||||
"tls_ca_cert_file": crt_file,
|
||||
"raise_error": true,
|
||||
})
|
||||
|
||||
# x := field_transform_to_qry_param("spec.selector",input)
|
||||
# input = {"app": "acmefit", "service": "catalog-db"}
|
||||
# result: "spec.selector.app%3Dacmefit,spec.selector.service%3Dcatalog-db"
|
||||
|
||||
|
||||
query_field_selector_ns(resource, field, selector, namespace) = http.send({
|
||||
"url": sprintf("%v/%v/namespaces/%v/%v?fieldSelector=%v", [
|
||||
host,
|
||||
resource_group_mapping[resource],
|
||||
namespace,
|
||||
resource,
|
||||
field_transform_to_qry_param(field,selector),
|
||||
]),
|
||||
"method": "get",
|
||||
"headers": {"authorization": token},
|
||||
"tls_client_cert_file": client_crt_file,
|
||||
"tls_client_key_file": client_key_file,
|
||||
"tls_ca_cert_file": crt_file,
|
||||
"raise_error": true,
|
||||
|
||||
})
|
||||
|
||||
# # Query for all resources of type resource in all namespaces
|
||||
# # Example: query_all("deployments")
|
||||
# query_all(resource) = http.send({
|
||||
# "url": sprintf("https://%v:%v/%v/%v", [
|
||||
# ip,
|
||||
# port,
|
||||
# resource_group_mapping[resource],
|
||||
# resource,
|
||||
# ]),
|
||||
# "method": "get",
|
||||
# "headers": {"authorization": sprintf("Bearer %v", [token])},
|
||||
# "tls_client_cert_file": crt_file,
|
||||
# "raise_error": true,
|
||||
# })
|
||||
|
||||
# Query for all resources of type resource in all namespaces
|
||||
# Example: query_all("deployments")
|
||||
query_all(resource) = http.send({
|
||||
"url": sprintf("%v/%v/%v", [
|
||||
host,
|
||||
resource_group_mapping[resource],
|
||||
resource,
|
||||
]),
|
||||
"method": "get",
|
||||
"headers": {"authorization": token},
|
||||
"tls_client_cert_file": client_crt_file,
|
||||
"tls_client_key_file": client_key_file,
|
||||
"tls_ca_cert_file": crt_file,
|
||||
"raise_error": true,
|
||||
})
|
||||
|
||||
|
||||
|
||||
# Query for all resources of type resource in all namespaces - without authentication
|
||||
# Example: query_all("deployments")
|
||||
query_all_no_auth(resource) = http.send({
|
||||
"url": sprintf("%v/%v/namespaces/default/%v", [
|
||||
host,
|
||||
resource_group_mapping[resource],
|
||||
resource,
|
||||
]),
|
||||
"method": "get",
|
||||
"raise_error": true,
|
||||
"tls_insecure_skip_verify" : true,
|
||||
})
|
||||
|
||||
field_transform_to_qry_param(field,map) = finala {
|
||||
mid := {concat(".",[field,key]): val | val := map[key]}
|
||||
finala := label_map_to_query_string(mid)
|
||||
}
|
||||
label_map_to_query_string(map) = concat(",", [str | val := map[key]; str := concat("%3D", [key, val])])
|
||||
`
|
||||
@@ -1,20 +0,0 @@
|
||||
package armo_builtins
|
||||
|
||||
# import data.kubernetes.api.client as client
|
||||
import data.cautils as cautils
|
||||
|
||||
|
||||
# alert cronjobs
|
||||
|
||||
#handles cronjob
|
||||
deny[msga] {
|
||||
|
||||
wl := input[_]
|
||||
wl.kind == "CronJob"
|
||||
msga := {
|
||||
"alertMessage": sprintf("the following cronjobs are defined: %v", [wl]),
|
||||
"alertScore": 2,
|
||||
"packagename": "armo_builtins",
|
||||
"alertObject": wl
|
||||
}
|
||||
}
|
||||
@@ -1,44 +0,0 @@
|
||||
package armo_builtins
|
||||
|
||||
import data.kubernetes.api.client as client
|
||||
|
||||
|
||||
# input: pod
|
||||
# apiversion: v1
|
||||
# does:
|
||||
# returns the external facing services of that pod
|
||||
#
|
||||
#
|
||||
deny[msga] {
|
||||
pod := input[_]
|
||||
podns := pod.metadata.namespace
|
||||
podname := getName(pod.metadata)
|
||||
# pod := client.query_name_ns("pods","frontend-86c5ffb485-kfp9d", "default")
|
||||
labels := pod.body.metadata.labels
|
||||
filtered_labels := json.remove(labels, ["pod-template-hash"])
|
||||
|
||||
cluster_resource := client.query_all(
|
||||
"services"
|
||||
)
|
||||
|
||||
|
||||
services := [svc | cluster_resource.body.items[i].metadata.namespace == podns; svc := cluster_resource.body.items[i]]
|
||||
service := services[_]
|
||||
np_or_lb := {"NodePort", "LoadBalancer"}
|
||||
np_or_lb[service.spec.type]
|
||||
service.spec.selector == filtered_labels
|
||||
|
||||
msga := {
|
||||
"packagename": "armo_builtins",
|
||||
"alertMessage": sprintf("pod %v/%v exposed services: %v\n", [podns,podname,service]),
|
||||
"alertScore": 7,
|
||||
"alertObject": {"service":service,"labels":filtered_labels, "podname":podname,"namespace":podns}
|
||||
}
|
||||
}
|
||||
|
||||
getName(metadata) = name {
|
||||
name := metadata.generateName
|
||||
}
|
||||
getName(metadata) = name {
|
||||
name := metadata.name
|
||||
}
|
||||
@@ -1,57 +0,0 @@
|
||||
package armo_builtins
|
||||
#import data.kubernetes.api.client as client
|
||||
import data.cautils as cautils
|
||||
|
||||
# input: pod
|
||||
# apiversion: v1
|
||||
# does:
|
||||
# returns hostPath volumes
|
||||
#
|
||||
#
|
||||
deny[msga] {
|
||||
pod := input[_]
|
||||
pod.kind == "Pod"
|
||||
volumes := pod.spec.volumes
|
||||
volume := volumes[_]
|
||||
# crsrcs.body.spec.containers[_].volumeMounts[_].name = volume.name
|
||||
volume.hostPath
|
||||
podname := cautils.getPodName(pod.metadata)
|
||||
obj := {"volume":volume,"podname": podname}
|
||||
|
||||
msga := {
|
||||
"packagename": "armo_builtins",
|
||||
"alertMessage": sprintf("pod: %v has {%v,%v} ashostPath volume \n\n\n", [podname, volume]),
|
||||
"alertScore": 7,
|
||||
"alertObject": [obj]
|
||||
}
|
||||
}
|
||||
|
||||
isRWMount(mount) {
|
||||
not mount.readOnly
|
||||
}
|
||||
isRWMount(mount) {
|
||||
mount.readOnly == false
|
||||
}
|
||||
|
||||
|
||||
#handles majority of workload resources
|
||||
deny[msga] {
|
||||
|
||||
wl := input[_]
|
||||
spec_template_spec_patterns := {"Deployment","ReplicaSet","DaemonSet","StatefulSet","Job"}
|
||||
spec_template_spec_patterns[wl.kind]
|
||||
volumes := wl.spec.template.spec.volumes
|
||||
volume := volumes[_]
|
||||
volume.hostPath
|
||||
wlname := cautils.getPodName(wl.metadata)
|
||||
obj := {"volume":volume,"podname": wlname}
|
||||
|
||||
msga := {
|
||||
"packagename": "armo_builtins",
|
||||
"alertMessage": sprintf("%v: %v has {%v,%v} as hostPath volume\n\n\n", [wl.kind,wlname, volume]),
|
||||
"alertScore": 7,
|
||||
"alertObject": [obj]
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1,56 +0,0 @@
|
||||
package armo_builtins
|
||||
|
||||
#import data.kubernetes.api.client as client
|
||||
|
||||
|
||||
# Deny mutating action unless user is in group owning the resource
|
||||
|
||||
|
||||
#privileged pods
|
||||
deny[msga] {
|
||||
|
||||
|
||||
pod := input[_]
|
||||
containers := pod.spec.containers[_]
|
||||
containers.securityContext.privileged == true
|
||||
msga := {
|
||||
"packagename": "armo_builtins",
|
||||
"alertMessage": sprintf("the following pods are defined as privileged: %v", [pod]),
|
||||
"alertScore": 3,
|
||||
"alertObject": pod,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
#handles majority of workload resources
|
||||
deny[msga] {
|
||||
|
||||
wl := input[_]
|
||||
spec_template_spec_patterns := {"Deployment","ReplicaSet","DaemonSet","StatefulSet","Job"}
|
||||
spec_template_spec_patterns[wl.kind]
|
||||
containers := wl.spec.template.spec.containers[_]
|
||||
containers.securityContext.privileged == true
|
||||
msga := {
|
||||
"packagename": "armo_builtins",
|
||||
"alertMessage": sprintf("the following workloads are defined as privileged: %v", [wl]),
|
||||
"alertScore": 3,
|
||||
"alertObject": wl,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
#handles cronjob
|
||||
deny[msga] {
|
||||
|
||||
wl := input[_]
|
||||
wl.kind == "CronJob"
|
||||
containers := wl.spec.jobTemplate.spec.template.spec.containers[_]
|
||||
containers.securityContext.privileged == true
|
||||
msga := {
|
||||
"packagename": "armo_builtins",
|
||||
"alertMessage": sprintf("the following cronjobs are defined as privileged: %v", [wl]),
|
||||
"alertScore": 3,
|
||||
"alertObject": wl,
|
||||
}
|
||||
}
|
||||
@@ -1,98 +0,0 @@
|
||||
package armo_builtins
|
||||
import data.kubernetes.api.client as client
|
||||
import data.cautils as cautils
|
||||
|
||||
|
||||
# input: None
|
||||
# apiversion: v1
|
||||
# does:
|
||||
# returns roles+ related subjects in rolebinding
|
||||
|
||||
|
||||
deny[msga] {
|
||||
# rsrc := client.query_all("roles")
|
||||
# role := rsrc.body.items[_]
|
||||
role := input[_]
|
||||
role.kind == "Role"
|
||||
rule := role.rules[_]
|
||||
cautils.list_contains(rule.resources,"secrets")
|
||||
canViewSecrets(rule)
|
||||
rbsrc := client.query_all("rolebindings")
|
||||
rolebinding := rbsrc.body.items[_]
|
||||
rolebinding.roleRef.kind == "Role"
|
||||
rolebinding.roleRef.name == role.metadata.name
|
||||
|
||||
|
||||
msga := {
|
||||
"alertMessage": sprintf("the following users: %v , got read secret access roles", [rolebinding.subjects]),
|
||||
"alertScore": 9,
|
||||
"packagename": "armo_builtins",
|
||||
"alertObject": {"role":role,"users":rolebinding.subjects}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
# input: None
|
||||
# apiversion: v1
|
||||
# does:
|
||||
# returns clusterroles+ related subjects in rolebinding
|
||||
|
||||
|
||||
deny[msga] {
|
||||
# rsrc := client.query_all("clusterroles")
|
||||
# role := rsrc.body.items[_]
|
||||
role := input[_]
|
||||
role.kind == "ClusterRole"
|
||||
rule := role.rules[_]
|
||||
cautils.list_contains(rule.resources,"secrets")
|
||||
canViewSecrets(rule)
|
||||
rbsrc := client.query_all("rolebindings")
|
||||
rolebinding := rbsrc.body.items[_]
|
||||
rolebinding.roleRef.kind == "ClusterRole"
|
||||
rolebinding.roleRef.name == role.metadata.name
|
||||
|
||||
|
||||
msga := {
|
||||
"alertMessage": sprintf("the following users: %v , got read secret access roles", [rolebinding.subjects]),
|
||||
"alertScore": 9,
|
||||
"packagename": "armo_builtins",
|
||||
"alertObject": {"clusterrole":role,"users":rolebinding.subjects}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
# input: None
|
||||
# apiversion: v1
|
||||
# does:
|
||||
# returns clusterroles+ related subjects in clusterrolebinding
|
||||
#
|
||||
#
|
||||
deny[msga] {
|
||||
# rsrc := client.query_all("clusterroles")
|
||||
# role := rsrc.body.items[_]
|
||||
role := input[_]
|
||||
role.kind == "ClusterRole"
|
||||
rule := role.rules[_]
|
||||
cautils.list_contains(rule.resources,"secrets")
|
||||
canViewSecrets(rule)
|
||||
rbsrc := client.query_all("clusterrolebindings")
|
||||
rolebinding := rbsrc.body.items[_]
|
||||
rolebinding.roleRef.kind == "ClusterRole"
|
||||
rolebinding.roleRef.name == role.metadata.name
|
||||
|
||||
|
||||
msga := {
|
||||
"alertMessage": sprintf("the following users: %v , got read secret access roles", [rolebinding.subjects]),
|
||||
"alertScore": 9,
|
||||
"packagename": "armo_builtins",
|
||||
"alertObject": {"clusterrole":role,"users":rolebinding.subjects}
|
||||
}
|
||||
}
|
||||
|
||||
canViewSecrets(rule) {
|
||||
cautils.list_contains(rule.verbs,"get")
|
||||
}
|
||||
canViewSecrets(rule) {
|
||||
cautils.list_contains(rule.verbs,"watch")
|
||||
}
|
||||
@@ -1,64 +0,0 @@
|
||||
package armo_builtins
|
||||
#import data.kubernetes.api.client as client
|
||||
import data.cautils as cautils
|
||||
|
||||
# input: pod
|
||||
# apiversion: v1
|
||||
# does:
|
||||
# returns rw hostpath volumes of that pod
|
||||
#
|
||||
#
|
||||
deny[msga] {
|
||||
pod := input[_]
|
||||
pod.kind == "Pod"
|
||||
volumes := pod.spec.volumes
|
||||
volume := volumes[_]
|
||||
# crsrcs.body.spec.containers[_].volumeMounts[_].name = volume.name
|
||||
mount := pod.spec.containers[_].volumeMounts[_]
|
||||
mount.name == volume.name
|
||||
volume.hostPath
|
||||
isRWMount(mount)
|
||||
podname := cautils.getPodName(pod.metadata)
|
||||
obj := {"volume":volume,"mount":mount,"podname": podname}
|
||||
|
||||
msga := {
|
||||
"packagename": "armo_builtins",
|
||||
"alertMessage": sprintf("pod: %v has {%v,%v} as rw hostPath volume and volumemount pair\n\n\n", [podname, volume,mount]),
|
||||
"alertScore": 7,
|
||||
"alertObject": [obj],
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
isRWMount(mount) {
|
||||
not mount.readOnly
|
||||
}
|
||||
isRWMount(mount) {
|
||||
mount.readOnly == false
|
||||
}
|
||||
|
||||
|
||||
#handles majority of workload resources
|
||||
deny[msga] {
|
||||
|
||||
wl := input[_]
|
||||
spec_template_spec_patterns := {"Deployment","ReplicaSet","DaemonSet","StatefulSet","Job"}
|
||||
spec_template_spec_patterns[wl.kind]
|
||||
volumes := wl.spec.template.spec.volumes
|
||||
volume := volumes[_]
|
||||
mount := wl.spec.template.spec.containers[_].volumeMounts[_]
|
||||
mount.name == volume.name
|
||||
volume.hostPath
|
||||
isRWMount(mount)
|
||||
wlname := cautils.getPodName(wl.metadata)
|
||||
obj := {"volume":volume,"mount":mount,"podname": wlname}
|
||||
|
||||
msga := {
|
||||
"packagename": "armo_builtins",
|
||||
"alertMessage": sprintf("%v: %v has {%v,%v} as rw hostPath volume and volumemount pair\n\n\n", [wl.kind,wlname, volume,mount]),
|
||||
"alertScore": 7,
|
||||
"alertObject": [obj],
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1,57 +0,0 @@
|
||||
package armo_builtins
|
||||
import data.kubernetes.api.client as client
|
||||
import data.cautils as cautils
|
||||
|
||||
# input: pod
|
||||
# apiversion: v1
|
||||
# does:
|
||||
# returns the external facing services of that pod
|
||||
#
|
||||
#
|
||||
deny[msga] {
|
||||
pod := input[_]
|
||||
podns := pod.metadata.namespace
|
||||
podname := cautils.getPodName(pod.metadata)
|
||||
# pod := client.query_name_ns("pods", "catalog-mongo-6f468d99b4-pn242", "default")
|
||||
labels := pod.body.metadata.labels
|
||||
filtered_labels := json.remove(labels, ["pod-template-hash"])
|
||||
|
||||
cluster_resource := client.query_all(
|
||||
"services"
|
||||
)
|
||||
|
||||
services := [svc | cluster_resource.body.items[i].metadata.namespace == podns; svc := cluster_resource.body.items[i]]
|
||||
service := services[_]
|
||||
service.spec.selector == filtered_labels
|
||||
|
||||
hasSSHPorts(service)
|
||||
|
||||
msga := {
|
||||
"alertMessage": sprintf("pod %v/%v exposed by SSH services: %v\n", [podns,podname,service]),
|
||||
"packagename": "armo_builtins",
|
||||
"alertScore": 7,
|
||||
"alertObject": [{"pod":pod,"service":{service}}]
|
||||
}
|
||||
}
|
||||
|
||||
hasSSHPorts(service) {
|
||||
port := service.spec.ports[_]
|
||||
port.port == 22
|
||||
}
|
||||
|
||||
|
||||
hasSSHPorts(service) {
|
||||
port := service.spec.ports[_]
|
||||
port.port == 2222
|
||||
}
|
||||
|
||||
hasSSHPorts(service) {
|
||||
port := service.spec.ports[_]
|
||||
port.targetPort == 22
|
||||
}
|
||||
|
||||
|
||||
hasSSHPorts(service) {
|
||||
port := service.spec.ports[_]
|
||||
port.targetPort == 2222
|
||||
}
|
||||
@@ -1,33 +0,0 @@
|
||||
{
|
||||
"guid": "3b0467c9-488d-c244-99d0-90fbf600aaff",
|
||||
"name": "[Builtin] rule-deny-access-to-secrets",
|
||||
"creationTime": "2019-09-04T12:04:58.461455",
|
||||
"description": "determines which users can get/list/watch secrets",
|
||||
"attributes": {
|
||||
"m$K8sThreatMatrix": "Credential Access::List k8s Secrets"
|
||||
},
|
||||
"ruleDependencies": [
|
||||
{
|
||||
"packageName":"cautils"
|
||||
},
|
||||
{
|
||||
"packageName":"kubernetes.api.client"
|
||||
}
|
||||
],
|
||||
"remediation": "",
|
||||
"match": [
|
||||
{
|
||||
"resources": [
|
||||
"Role","ClusterRole"
|
||||
],
|
||||
"apiVersions": [
|
||||
"v1"
|
||||
],
|
||||
"apiGroups": [
|
||||
"rbac.authorization.k8s.io"
|
||||
]
|
||||
}
|
||||
],
|
||||
"ruleLanguage": "Rego",
|
||||
"rule": "\npackage armo_builtins\nimport data.kubernetes.api.client as client\nimport data.cautils as cautils\n\n\n# input: None\n# apiversion: v1\n# does: \n#\treturns roles+ related subjects in rolebinding\n\n\ndeny[msga] {\n\t# rsrc := client.query_all(\"roles\")\n\t# role := rsrc.body.items[_]\n\trole := input[_]\n\trole.kind == \"Role\"\n\trule := role.rules[_]\n\tcautils.list_contains(rule.resources,\"secrets\")\n\tcanViewSecrets(rule)\n\trbsrc := client.query_all(\"rolebindings\")\n\trolebinding := rbsrc.body.items[_]\n\trolebinding.roleRef.kind == \"Role\"\n\trolebinding.roleRef.name == role.metadata.name\n\t\n \n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"the following users: %v , got read secret access roles\", [rolebinding.subjects]),\n\t\t\"alert\": true,\n\t\t\"prevent\": false,\n\t\t\"alertScore\": 9,\n\t\t\"alertObject\": {\"role\":role,\"users\":rolebinding.subjects}\n\t\n\t}\n}\n\n\n\n# input: None\n# apiversion: v1\n# does: \n#\treturns clusterroles+ related subjects in rolebinding\n\n\ndeny[msga] {\n\t# rsrc := client.query_all(\"clusterroles\")\n\t# role := rsrc.body.items[_]\n\trole := input[_]\n\trole.kind == \"ClusterRole\"\n\trule := role.rules[_]\n\tcautils.list_contains(rule.resources,\"secrets\")\n\tcanViewSecrets(rule)\n\trbsrc := client.query_all(\"rolebindings\")\n\trolebinding := rbsrc.body.items[_]\n\trolebinding.roleRef.kind == \"ClusterRole\"\n\trolebinding.roleRef.name == role.metadata.name\n\t\n \n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"the following users: %v , got read secret access roles\", [rolebinding.subjects]),\n\t\t\"alert\": true,\n\t\t\"prevent\": false,\n\t\t\"alertScore\": 9,\n\t\t\"alertObject\": {\"clusterrole\":role,\"users\":rolebinding.subjects}\n\t\n\t}\n}\n\n\n# input: None\n# apiversion: v1\n# does: \n#\treturns clusterroles+ related subjects in clusterrolebinding\n#\n#\ndeny[msga] {\n\t# rsrc := client.query_all(\"clusterroles\")\n\t# role := rsrc.body.items[_]\n\trole := input[_]\n\trole.kind == \"ClusterRole\"\n\trule := role.rules[_]\n\tcautils.list_contains(rule.resources,\"secrets\")\n\tcanViewSecrets(rule)\n\trbsrc := client.query_all(\"clusterrolebindings\")\n\trolebinding := rbsrc.body.items[_]\n\trolebinding.roleRef.kind == \"ClusterRole\"\n\trolebinding.roleRef.name == role.metadata.name\n\t\n \n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"the following users: %v , got read secret access roles\", [rolebinding.subjects]),\n\t\t\"alert\": true,\n\t\t\"prevent\": false,\n\t\t\"alertScore\": 9,\n\t\t\"alertObject\": {\"clusterrole\":role,\"users\":rolebinding.subjects}\n\t\n\t}\n}\n\ncanViewSecrets(rule) {\n\tcautils.list_contains(rule.verbs,\"get\")\n}\ncanViewSecrets(rule) {\n\tcautils.list_contains(rule.verbs,\"watch\")\n}\n"
|
||||
}
|
||||
@@ -1,34 +0,0 @@
|
||||
{
|
||||
"guid": "3b0467c9-488d-c244-99d0-90fbf600aaff",
|
||||
"name": "[Builtin] rule-can-ssh-to-pod",
|
||||
"creationTime": "2019-09-04T12:04:58.461455",
|
||||
"description": "denies pods with SSH ports opened(22/222)",
|
||||
"attributes": {
|
||||
"microsoftK8sThreatMatrix": "val1"
|
||||
},
|
||||
"ruleDependencies": [
|
||||
{
|
||||
"packageName":"cautils"
|
||||
},
|
||||
{
|
||||
"packageName":"kubernetes.api.client"
|
||||
}
|
||||
],
|
||||
"remediation": "create a network policy that protects SSH ports",
|
||||
"match": [
|
||||
{
|
||||
"resources": [
|
||||
"Pods"
|
||||
],
|
||||
"apiVersions": [
|
||||
"v1"
|
||||
],
|
||||
"apiGroups": [
|
||||
"*"
|
||||
]
|
||||
}
|
||||
],
|
||||
"ruleLanguage": "Rego",
|
||||
"rule": "\npackage armo_builtins\nimport data.kubernetes.api.client as client\nimport data.cautils as cautils\n\n# input: pod\n# apiversion: v1\n# does: \n#\treturns the external facing services of that pod\n#\n#\ndeny[msga] {\n\tpod := input[_]\n\tpodns := pod.metadata.namespace\n\tpodname := cautils.getPodName(pod.metadata)\n\t# pod := client.query_name_ns(\"pods\", \"catalog-mongo-6f468d99b4-pn242\", \"default\")\n\tlabels := pod.body.metadata.labels\n\tfiltered_labels := json.remove(labels, [\"pod-template-hash\"])\n \n\t cluster_resource := client.query_all(\n\t \t\"services\"\n\t )\n\n\tservices := [svc | cluster_resource.body.items[i].metadata.namespace == podns; svc := cluster_resource.body.items[i]]\n\tservice := \tservices[_]\n\tservice.spec.selector == filtered_labels\n \n\thasSSHPorts(service)\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"pod %v/%v exposed by SSH services: %v\n\", [podns,podname,service]),\n\t\t\"alert\": true,\n\t\t\"prevent\": false,\n\t\t\"alertScore\": 7,\n\t\t\"alertObject\": [{\"pod\":pod,\"service\":{service}}]\n\t\n\t}\n}\n\nhasSSHPorts(service) {\n\tport := service.spec.ports[_]\n\tport.port == 22\n}\n\n\nhasSSHPorts(service) {\n\tport := service.spec.ports[_]\n\tport.port == 2222\n}\n\nhasSSHPorts(service) {\n\tport := service.spec.ports[_]\n\tport.targetPort == 22\n}\n\n\nhasSSHPorts(service) {\n\tport := service.spec.ports[_]\n\tport.targetPort == 2222\n}\n"
|
||||
|
||||
}
|
||||
@@ -1,33 +0,0 @@
|
||||
{
|
||||
"guid": "",
|
||||
"name": "[Builtin] rule-identify-blacklisted-image-registries",
|
||||
"creationTime": "",
|
||||
"description": "Identifying if pod container images are from unallowed registries",
|
||||
"attributes": {
|
||||
"m$K8sThreatMatrix": "Initial Access::Compromised images in registry"
|
||||
},
|
||||
"ruleDependencies": [
|
||||
{
|
||||
"packageName": "cautils"
|
||||
},
|
||||
{
|
||||
"packageName": "kubernetes.api.client"
|
||||
}
|
||||
],
|
||||
"remediation": "Use images from safe registry",
|
||||
"match": [
|
||||
{
|
||||
"resources": [
|
||||
"Pods"
|
||||
],
|
||||
"apiVersions": [
|
||||
"v1"
|
||||
],
|
||||
"apiGroups": [
|
||||
"*"
|
||||
]
|
||||
}
|
||||
],
|
||||
"ruleLanguage": "Rego",
|
||||
"rule": "\npackage armo_builtins\n# Check for images from blacklisted repos\n\nuntrusted_registries(z) = x {\n\tx := [\"015253967648.dkr.ecr.eu-central-1.amazonaws.com/\"]\t\n}\n\npublic_registries(z) = y{\n\ty := [\"quay.io/kiali/\",\"quay.io/datawire/\",\"quay.io/keycloak/\",\"quay.io/bitnami/\"]\n}\n\nuntrustedImageRepo[msga] {\n\tpod := input[_]\n\tk := pod.kind\n\tk == \"Pod\"\n\tcontainer := pod.spec.containers[_]\n\timage := container.image\n repo_prefix := untrusted_registries(image)[_]\n\tstartswith(image, repo_prefix)\n\tselfLink := pod.metadata.selfLink\n\tcontainerName := container.name\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"image '%v' in container '%s' in [%s] comes from untrusted registry\", [image, containerName, selfLink]),\n\t\t\"alert\": true,\n\t\t\"prevent\": false,\n\t\t\"alertScore\": 2,\n\t\t\"alertObject\": [{\"pod\":pod}]\n\t}\n}\n\nuntrustedImageRepo[msga] {\n pod := input[_]\n\tk := pod.kind\n\tk == \"Pod\"\n\tcontainer := pod.spec.containers[_]\n\timage := container.image\n repo_prefix := public_registries(image)[_]\n\tstartswith(pod, repo_prefix)\n\tselfLink := input.metadata.selfLink\n\tcontainerName := container.name\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"image '%v' in container '%s' in [%s] comes from public registry\", [image, containerName, selfLink]),\n\t\t\"alert\": true,\n\t\t\"prevent\": false,\n\t\t\"alertScore\": 1,\n\t\t\"alertObject\": [{\"pod\":pod}]\n\t}\n}"
|
||||
}
|
||||
@@ -1,31 +0,0 @@
|
||||
{
|
||||
"guid": "3b0467c9-488d-c244-99d0-90fbf600aaff",
|
||||
"name": "[Builtin] rule-pod-external-facing",
|
||||
"creationTime": "2019-09-04T12:04:58.461455",
|
||||
"description": "denies pods with external facing services, grabs related services",
|
||||
"attributes": {
|
||||
"microsoftK8sThreatMatrix": "val1"
|
||||
},
|
||||
"ruleDependencies": [
|
||||
{
|
||||
"packageName":"kubernetes.api.client"
|
||||
}
|
||||
],
|
||||
"remediation": "create a network policy that controls which protect your cluster from unwanted connections and the outside world",
|
||||
"match": [
|
||||
{
|
||||
"resources": [
|
||||
"Pods"
|
||||
],
|
||||
"apiVersions": [
|
||||
"v1"
|
||||
],
|
||||
"apiGroups": [
|
||||
"*"
|
||||
]
|
||||
}
|
||||
],
|
||||
"ruleLanguage": "Rego",
|
||||
"rule": "\npackage armo_builtins\n\nimport data.kubernetes.api.client as client\n\n\n# input: pod\n# apiversion: v1\n# does: \n#\treturns the external facing services of that pod\n#\n#\ndeny[msga] {\n\tpod := input[_]\n\tpodns := pod.metadata.namespace\n\tpodname := getName(pod.metadata)\n\t# pod := client.query_name_ns(\"pods\",\"frontend-86c5ffb485-kfp9d\", \"default\")\n\tlabels := pod.body.metadata.labels\n\tfiltered_labels := json.remove(labels, [\"pod-template-hash\"])\n \n\t cluster_resource := client.query_all(\n\t \t\"services\"\n\t )\n\n\n\tservices := [svc | cluster_resource.body.items[i].metadata.namespace == podns; svc := cluster_resource.body.items[i]]\n\tservice := \tservices[_]\n\tnp_or_lb := {\"NodePort\", \"LoadBalancer\"}\n\tnp_or_lb[service.spec.type]\n\tservice.spec.selector == filtered_labels\n \n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"pod %v/%v exposed services: %v\n\", [podns,podname,service]),\n\t\t\"alert\": true,\n\t\t\"prevent\": false,\n\t\t\"alertScore\": 7,\n\t\t\"alertObject\": {\"service\":service,\"labels\":filtered_labels, \"podname\":podname,\"namespace\":podns}\n\t\n\t}\n}\n\ngetName(metadata) = name {\n\tname := metadata.generateName\n}\ngetName(metadata) = name {\n\tname := metadata.name\n}\n"
|
||||
|
||||
}
|
||||
@@ -1,31 +0,0 @@
|
||||
{
|
||||
"guid": "3b0467c9-488d-c244-99d0-90fbf600aaff",
|
||||
"name": "[Builtin] alert-any-hostpath",
|
||||
"creationTime": "2019-09-04T12:04:58.461455",
|
||||
"description": "determines if any workload contains a hostPath volume",
|
||||
"attributes": {
|
||||
"m$K8sThreatMatrix": "Privilege Escalation::hostPath mount"
|
||||
},
|
||||
"ruleDependencies": [
|
||||
{
|
||||
"packageName":"cautils"
|
||||
}
|
||||
],
|
||||
"remediation": "consider if hostPath is really necessary - reading sensitive data like hostPath credentials might endanger cluster, if so consider encrypting the data",
|
||||
|
||||
"match": [
|
||||
{
|
||||
"resources": [
|
||||
"Deployment","ReplicaSet","DaemonSet","StatefulSet","Job","Pod"
|
||||
],
|
||||
"apiVersions": [
|
||||
"v1"
|
||||
],
|
||||
"apiGroups": [
|
||||
"*"
|
||||
]
|
||||
}
|
||||
],
|
||||
"ruleLanguage": "Rego",
|
||||
"rule": "\npackage armo_builtins\nimport data.kubernetes.api.client as client\nimport data.cautils as cautils\n\n# input: pod\n# apiversion: v1\n# does: \n#\treturns hostPath volumes\n#\n#\ndeny[msga] {\n pod := input[_]\n pod.kind == \"Pod\"\n volumes := pod.spec.volumes\n volume := volumes[_]\n # crsrcs.body.spec.containers[_].volumeMounts[_].name = volume.name\n volume.hostPath\n podname := cautils.getPodName(pod.metadata)\n obj := {\"volume\":volume,\"podname\": podname}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"pod: %v has {%v,%v} ashostPath volume \n\n\n\", [podname, volume]),\n\t\t\"alert\": true,\n\t\t\"prevent\": false,\n\t\t\"alertScore\": 7,\n\t\t\"alertObject\": [obj],\n\t\n\t}\n}\n\nisRWMount(mount) {\n not mount.readOnly\n}\nisRWMount(mount) {\n mount.readOnly == false\n}\n\n\n#handles majority of workload resources\ndeny[msga] {\n\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n volumes := wl.spec.template.spec.volumes\n volume := volumes[_]\n volume.hostPath\n wlname := cautils.getPodName(wl.metadata)\n obj := {\"volume\":volume,\"podname\": wlname}\n\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"%v: %v has {%v,%v} as hostPath volume\n\n\n\", [wl.kind,wlname, volume]),\n\t\t\"alert\": true,\n\t\t\"prevent\": false,\n\t\t\"alertScore\": 7,\n\t\t\"alertObject\": [obj],\n\t\n\t}\n}\n\n\n"
|
||||
}
|
||||
@@ -1,27 +0,0 @@
|
||||
{
|
||||
"guid": "82f19070-2826-4fe4-a079-f5f7e7a1b04d",
|
||||
"name": "[Builtin] instance-metadata-api-access",
|
||||
"attributes": {
|
||||
"m$K8sThreatMatrix": "Credential Access::Instance Metadata API"
|
||||
},
|
||||
"creationTime": "2021-04-25T10:48:48.861806",
|
||||
"rule": "package armo_builtins\n# Check for images from blacklisted repos\n\nmetadata_azure(z) = http.send({\n\t\"url\": \"http://169.254.169.254/metadata/instance?api-version=2020-09-01\",\n\t\"method\": \"get\",\n\t\"headers\": {\"Metadata\": \"true\"},\n\t\"raise_error\": true,\t\n})\n\nmetadata_gcp(z) = http.send({\n\t\"url\": \"http://169.254.169.254/computeMetadata/v1/?alt=json&recursive=true\",\n\t\"method\": \"get\",\n\t\"headers\": {\"Metadata-Flavor\": \"Google\"},\n\t\"raise_error\": true,\t\n})\n\nmetadata_aws(z) = metadata_object { \n\thostname := http.send({\n\t\"url\": \"http://169.254.169.254/latest/meta-data/local-hostname\",\n\t\"method\": \"get\",\n\t\"raise_error\": true,\t\n })\n\tmetadata_object := {\n\t\t\"raw_body\": hostname.raw_body,\n\t\t\"hostname\" : hostname.raw_body,\n\t\t\"status_code\" : hostname.status_code\n\t}\n}\n\nazure_metadata[msga] {\t\n\tmetadata_object := metadata_azure(\"aaa\")\n\tmetadata_object.status_code == 200\n\tnode_name := metadata_object.body.compute.name\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Node '%s' has access to Instance Metadata Services of Azure.\", [node_name]),\n\t\t\"alert\": true,\n\t\t\"prevent\": false,\n\t\t\"alertScore\": 1,\n\t\t\"alertObject\": [{\"nodeMetadata\":metadata_object.body}]\n\t}\n}\n\ngcp_metadata[msga] {\t\n\tmetadata_object := metadata_gcp(\"aaa\")\n\tmetadata_object.status_code == 200\n\tnode_name := metadata_object.body.instance.hostname\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Node '%s' has access to Instance Metadata Services of GCP.\", [node_name]),\n\t\t\"alert\": true,\n\t\t\"prevent\": false,\n\t\t\"alertScore\": 1,\n\t\t\"alertObject\": [{\"nodeMetadata\": metadata_object.raw_body}]\n\t}\n}\n\naws_metadata[msga] {\t\n\tmetadata_object := metadata_aws(\"aaa\")\n\tmetadata_object.status_code == 200\n\tnode_name := metadata_object.hostname\n\tmsga := {\n\t\t\"alertMessage\": sprintf(\"Node '%s' has access to Instance Metadata Services of AWS.\", [node_name]),\n\t\t\"alert\": true,\n\t\t\"prevent\": false,\n\t\t\"alertScore\": 1,\n\t\t\"alertObject\": [{\"nodeMetadata\": metadata_object.raw_body}]\n\t}\n}",
|
||||
"ruleLanguage": "Rego",
|
||||
"match": [
|
||||
{
|
||||
"apiGroups": [
|
||||
"*"
|
||||
],
|
||||
"apiVersions": [
|
||||
"*"
|
||||
],
|
||||
"resources": [
|
||||
"nodes"
|
||||
]
|
||||
}
|
||||
],
|
||||
"ruleDependencies": [],
|
||||
"description": "Checks if there is access from the nodes to cloud prividers instance metadata services",
|
||||
"remediation": "From https://attack.mitre.org/techniques/T1552/005/ :Option A: Disable or Remove Feature or Program, Option B: Filter Network Traffic",
|
||||
"ruleQuery": ""
|
||||
}
|
||||
@@ -1,30 +0,0 @@
|
||||
{
|
||||
"guid": "[Builtin] 3b0467c9-488d-c244-99d0-90fbf600aaff",
|
||||
"name": "rule-deny-cronjobs",
|
||||
"creationTime": "2019-09-04T12:04:58.461455",
|
||||
"description": "determines if it's cronjob",
|
||||
"attributes": {
|
||||
"m$K8sThreatMatrix": "Persistence::Cronjob"
|
||||
},
|
||||
"ruleDependencies": [
|
||||
{
|
||||
"packageName":"cautils"
|
||||
}
|
||||
],
|
||||
"remediation": "",
|
||||
"match": [
|
||||
{
|
||||
"resources": [
|
||||
"CronJob"
|
||||
],
|
||||
"apiVersions": [
|
||||
"v1beta1"
|
||||
],
|
||||
"apiGroups": [
|
||||
"batch"
|
||||
]
|
||||
}
|
||||
],
|
||||
"ruleLanguage": "Rego",
|
||||
"rule": "\npackage armo_builtins\n\n# import data.kubernetes.api.client as client\nimport data.cautils as cautils\n\n\n# alert cronjobs\n\n#handles cronjob\ndeny[msga] {\n\n\twl := input[_]\n\twl.kind == \"CronJob\"\n msga := {\n\t\t\"alertMessage\": sprintf(\"the following cronjobs are defined: %v\", [wl]),\n\t\t\"alert\": true,\n\t\t\"prevent\": false,\n\t\t\"alertScore\": 2,\n\t\t\"alertObject\": wl\n\t\n\t}\n}\n"
|
||||
}
|
||||
@@ -1,29 +0,0 @@
|
||||
{
|
||||
"guid": "",
|
||||
"name": "[Builtin] rule-privilege-escalation",
|
||||
"creationTime": "2019-09-04T12:04:58.461455",
|
||||
"description": "determines if pods/deployments defined as privileged true",
|
||||
"attributes": {
|
||||
"mitre": "Privilege Escalation",
|
||||
"mitreCode": "TA0004",
|
||||
"m$K8sThreatMatrix": "Privilege Escalation::privileged container"
|
||||
},
|
||||
"ruleDependencies": [
|
||||
],
|
||||
"remediation": "avoid defining pods as privilleged",
|
||||
"match": [
|
||||
{
|
||||
"resources": [
|
||||
"Deployment","ReplicaSet","DaemonSet","StatefulSet","Job","Pod","CronJob"
|
||||
],
|
||||
"apiVersions": [
|
||||
"v1"
|
||||
],
|
||||
"apiGroups": [
|
||||
"*"
|
||||
]
|
||||
}
|
||||
],
|
||||
"ruleLanguage": "Rego",
|
||||
"rule": "\npackage armo_builtins\n\nimport data.kubernetes.api.client as client\nimport data.designators as scope\nimport data.cautils as cautils\n\n\n# Deny mutating action unless user is in group owning the resource\n\n\n#privileged pods\ndeny[msga] {\n\n \n\tpod := input[_]\n\tcontainers := pod.spec.containers[_]\n\tcontainers.securityContext.privileged == true\n msga := {\n\t\t\"alertMessage\": sprintf(\"the following pods are defined as privileged: %v\", [pod]),\n\t\t\"alert\": true,\n\t\t\"prevent\": false,\n\t\t\"alertScore\": 3,\n\t\t\"alertObject\": pod,\n\t\n\t}\n}\n\n\n#handles majority of workload resources\ndeny[msga] {\n\n\twl := input[_]\n\tspec_template_spec_patterns := {\"Deployment\",\"ReplicaSet\",\"DaemonSet\",\"StatefulSet\",\"Job\"}\n\tspec_template_spec_patterns[wl.kind]\n\tcontainers := wl.spec.template.spec.containers[_]\n\tcontainers.securityContext.privileged == true\n msga := {\n\t\t\"alertMessage\": sprintf(\"the following workloads are defined as privileged: %v\", [wl]),\n\t\t\"alert\": true,\n\t\t\"prevent\": false,\n\t\t\"alertScore\": 3,\n\t\t\"alertObject\": wl,\n\t\n\t}\n}\n\n\n\n#handles cronjob\ndeny[msga] {\n\n\twl := input[_]\n\twl.kind == \"CronJob\"\n\tcontainers := wl.spec.jobTemplate.spec.template.spec.containers[_]\n\tcontainers.securityContext.privileged == true\n msga := {\n\t\t\"alertMessage\": sprintf(\"the following cronjobs are defined as privileged: %v\", [wl]),\n\t\t\"alert\": true,\n\t\t\"prevent\": false,\n\t\t\"alertScore\": 3,\n\t\t\"alertObject\": wl,\n\t\n\t}\n}\n\n"
|
||||
}
|
||||
@@ -1,31 +0,0 @@
|
||||
{
|
||||
"guid": "3b0467c9-488d-c244-99d0-90fbf600aaff",
|
||||
"name": "[Builtin] alert-rw-hostpath",
|
||||
"creationTime": "2019-09-04T12:04:58.461455",
|
||||
"description": "determines if any workload contains a hostPath volume with rw permissions",
|
||||
"attributes": {
|
||||
"m$K8sThreatMatrix": "Persistance::Writable hostPath mount"
|
||||
},
|
||||
"ruleDependencies": [
|
||||
{
|
||||
"packageName":"cautils"
|
||||
}
|
||||
],
|
||||
"remediation": "consider if hostPath is really necessary- sensitive data like hostPath credentials might endanger cluster, if so consider encrypting the data",
|
||||
|
||||
"match": [
|
||||
{
|
||||
"resources": [
|
||||
"Deployment","ReplicaSet","DaemonSet","StatefulSet","Job","Pod"
|
||||
],
|
||||
"apiVersions": [
|
||||
"v1"
|
||||
],
|
||||
"apiGroups": [
|
||||
"*"
|
||||
]
|
||||
}
|
||||
],
|
||||
"ruleLanguage": "Rego",
|
||||
"rule": "\"\\npackage armo_builtins\\nimport data.kubernetes.api.client as client\\nimport data.cautils as cautils\\n\\n# input: pod\\n# apiversion: v1\\n# does: \\n#\\treturns hostPath volumes\\n#\\n#\\ndeny[msga] {\\n pod := input[_]\\n pod.kind == \\\"Pod\\\"\\n volumes := pod.spec.volumes\\n volume := volumes[_]\\n # crsrcs.body.spec.containers[_].volumeMounts[_].name = volume.name\\n volume.hostPath\\n podname := cautils.getPodName(pod.metadata)\\n obj := {\\\"volume\\\":volume,\\\"podname\\\": podname}\\n\\n\\tmsga := {\\n\\t\\t\\\"alertMessage\\\": sprintf(\\\"pod: %v has {%v,%v} ashostPath volume \\n\\n\\n\\\", [podname, volume]),\\n\\t\\t\\\"alert\\\": true,\\n\\t\\t\\\"prevent\\\": false,\\n\\t\\t\\\"alertScore\\\": 7,\\n\\t\\t\\\"alertObject\\\": [obj],\\n\\t\\n\\t}\\n}\\n\\nisRWMount(mount) {\\n not mount.readOnly\\n}\\nisRWMount(mount) {\\n mount.readOnly == false\\n}\\n\\n\\n#handles majority of workload resources\\ndeny[msga] {\\n\\n\\twl := input[_]\\n\\tspec_template_spec_patterns := {\\\"Deployment\\\",\\\"ReplicaSet\\\",\\\"DaemonSet\\\",\\\"StatefulSet\\\",\\\"Job\\\"}\\n\\tspec_template_spec_patterns[wl.kind]\\n volumes := wl.spec.template.spec.volumes\\n volume := volumes[_]\\n volume.hostPath\\n wlname := cautils.getPodName(wl.metadata)\\n obj := {\\\"volume\\\":volume,\\\"podname\\\": wlname}\\n\\n\\tmsga := {\\n\\t\\t\\\"alertMessage\\\": sprintf(\\\"%v: %v has {%v,%v} as hostPath volume\\n\\n\\n\\\", [wl.kind,wlname, volume]),\\n\\t\\t\\\"alert\\\": true,\\n\\t\\t\\\"prevent\\\": false,\\n\\t\\t\\\"alertScore\\\": 7,\\n\\t\\t\\\"alertObject\\\": [obj],\\n\\t\\n\\t}\\n}\\n\\n\\n\""
|
||||
}
|
||||
@@ -1,118 +0,0 @@
|
||||
package resources
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/armosec/kubescape/cautils/k8sinterface"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"github.com/open-policy-agent/opa/storage"
|
||||
"github.com/open-policy-agent/opa/storage/inmem"
|
||||
"github.com/open-policy-agent/opa/util"
|
||||
"k8s.io/client-go/rest"
|
||||
)
|
||||
|
||||
var (
|
||||
RegoDependenciesPath = "/resources/rego/dependencies"
|
||||
)
|
||||
|
||||
type RegoDependenciesData struct {
|
||||
K8sConfig RegoK8sConfig `json:"k8sconfig"`
|
||||
}
|
||||
|
||||
type RegoK8sConfig struct {
|
||||
Token string `json:"token"`
|
||||
IP string `json:"ip"`
|
||||
Host string `json:"host"`
|
||||
Port string `json:"port"`
|
||||
CrtFile string `json:"crtfile"`
|
||||
ClientCrtFile string `json:"clientcrtfile"`
|
||||
ClientKeyFile string `json:"clientkeyfile"`
|
||||
// ClientKeyFile string `json:"crtfile"`
|
||||
}
|
||||
|
||||
func NewRegoDependenciesDataMock() *RegoDependenciesData {
|
||||
return NewRegoDependenciesData(k8sinterface.GetK8sConfig())
|
||||
}
|
||||
|
||||
func NewRegoDependenciesData(k8sConfig *rest.Config) *RegoDependenciesData {
|
||||
|
||||
regoDependenciesData := RegoDependenciesData{}
|
||||
|
||||
if k8sConfig != nil {
|
||||
regoDependenciesData.K8sConfig = *NewRegoK8sConfig(k8sConfig)
|
||||
}
|
||||
|
||||
return ®oDependenciesData
|
||||
}
|
||||
func NewRegoK8sConfig(k8sConfig *rest.Config) *RegoK8sConfig {
|
||||
|
||||
host := k8sConfig.Host
|
||||
if host == "" {
|
||||
ip := os.Getenv("KUBERNETES_SERVICE_HOST")
|
||||
port := os.Getenv("KUBERNETES_SERVICE_PORT")
|
||||
host = fmt.Sprintf("https://%s:%s", ip, port)
|
||||
}
|
||||
|
||||
token := ""
|
||||
if k8sConfig.BearerToken != "" {
|
||||
token = fmt.Sprintf("Bearer %s", k8sConfig.BearerToken)
|
||||
}
|
||||
|
||||
regoK8sConfig := RegoK8sConfig{
|
||||
Token: token,
|
||||
Host: host,
|
||||
CrtFile: k8sConfig.CAFile,
|
||||
ClientCrtFile: k8sConfig.CertFile,
|
||||
ClientKeyFile: k8sConfig.KeyFile,
|
||||
}
|
||||
return ®oK8sConfig
|
||||
}
|
||||
func (data *RegoDependenciesData) TOStorage() (storage.Store, error) {
|
||||
var jsonObj map[string]interface{}
|
||||
bytesData, err := json.Marshal(*data)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// glog.Infof("RegoDependenciesData: %s", bytesData)
|
||||
if err := util.UnmarshalJSON(bytesData, &jsonObj); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return inmem.NewFromObject(jsonObj), nil
|
||||
}
|
||||
|
||||
// LoadRegoDependenciesFromDir loads the policies list from *.rego file in given directory
|
||||
func LoadRegoFiles(dir string) map[string]string {
|
||||
|
||||
modules := make(map[string]string)
|
||||
|
||||
// Compile the module. The keys are used as identifiers in error messages.
|
||||
filepath.Walk(dir, func(path string, info os.FileInfo, err error) error {
|
||||
if err == nil && strings.HasSuffix(path, ".rego") && !info.IsDir() {
|
||||
content, err := os.ReadFile(path)
|
||||
if err != nil {
|
||||
glog.Errorf("LoadRegoFiles, Failed to load: %s: %v", path, err)
|
||||
} else {
|
||||
modules[strings.Trim(filepath.Base(path), ".rego")] = string(content)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
return modules
|
||||
}
|
||||
|
||||
// LoadRegoModules loads the policies from variables
|
||||
func LoadRegoModules() map[string]string {
|
||||
|
||||
modules := make(map[string]string)
|
||||
modules["cautils"] = RegoCAUtils
|
||||
modules["designators"] = RegoDesignators
|
||||
modules["kubernetes.api.client"] = RegoKubernetesApiClient
|
||||
|
||||
return modules
|
||||
}
|
||||
@@ -1 +0,0 @@
|
||||
package resources
|
||||
@@ -4,14 +4,14 @@ import (
|
||||
"path/filepath"
|
||||
|
||||
"github.com/armosec/kubescape/cautils/getter"
|
||||
"github.com/armosec/kubescape/cautils/opapolicy"
|
||||
"github.com/armosec/opa-utils/reporthandling"
|
||||
)
|
||||
|
||||
type ScanInfo struct {
|
||||
Getters
|
||||
PolicyIdentifier opapolicy.PolicyIdentifier
|
||||
PolicyIdentifier []reporthandling.PolicyIdentifier
|
||||
UseExceptions string // Load exceptions configuration
|
||||
UseFrom string // Load framework from local file (instead of download). Use when running offline
|
||||
UseFrom []string // Load framework from local file (instead of download). Use when running offline
|
||||
UseDefault bool // Load framework from cached file (instead of download). Use when running offline
|
||||
Format string // Format results (table, json, junit ...)
|
||||
Output string // Store results in an output file, Output file name
|
||||
@@ -19,10 +19,10 @@ type ScanInfo struct {
|
||||
InputPatterns []string // Yaml files input patterns
|
||||
Silent bool // Silent mode - Do not print progress logs
|
||||
FailThreshold uint16 // Failure score threshold
|
||||
DoNotSendResults bool // DEPRECATED
|
||||
Submit bool // Submit results to Armo BE
|
||||
Local bool // Do not submit results
|
||||
Account string // account ID
|
||||
FrameworkScan bool // false if scanning control
|
||||
}
|
||||
|
||||
type Getters struct {
|
||||
@@ -41,23 +41,21 @@ func (scanInfo *ScanInfo) Init() {
|
||||
func (scanInfo *ScanInfo) setUseExceptions() {
|
||||
if scanInfo.UseExceptions != "" {
|
||||
// load exceptions from file
|
||||
scanInfo.ExceptionsGetter = getter.NewLoadPolicy(scanInfo.UseExceptions)
|
||||
scanInfo.ExceptionsGetter = getter.NewLoadPolicy([]string{scanInfo.UseExceptions})
|
||||
} else {
|
||||
scanInfo.ExceptionsGetter = getter.GetArmoAPIConnector()
|
||||
}
|
||||
|
||||
}
|
||||
func (scanInfo *ScanInfo) setUseFrom() {
|
||||
if scanInfo.UseFrom != "" {
|
||||
return
|
||||
}
|
||||
if scanInfo.UseDefault {
|
||||
scanInfo.UseFrom = getter.GetDefaultPath(scanInfo.PolicyIdentifier.Name + ".json")
|
||||
for _, policy := range scanInfo.PolicyIdentifier {
|
||||
scanInfo.UseFrom = append(scanInfo.UseFrom, getter.GetDefaultPath(policy.Name+".json"))
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
func (scanInfo *ScanInfo) setGetter() {
|
||||
if scanInfo.UseFrom != "" {
|
||||
if len(scanInfo.UseFrom) > 0 {
|
||||
// load from file
|
||||
scanInfo.PolicyGetter = getter.NewLoadPolicy(scanInfo.UseFrom)
|
||||
} else {
|
||||
@@ -84,8 +82,3 @@ func (scanInfo *ScanInfo) setOutputFile() {
|
||||
func (scanInfo *ScanInfo) ScanRunningCluster() bool {
|
||||
return len(scanInfo.InputPatterns) == 0
|
||||
}
|
||||
|
||||
// func (scanInfo *ScanInfo) ConnectedToCluster(k8s k8sinterface.) bool {
|
||||
// _, err := k8s.KubernetesClient.CoreV1().Pods("").List(context.TODO(), metav1.ListOptions{})
|
||||
// return err == nil
|
||||
// }
|
||||
|
||||
@@ -4,9 +4,10 @@ import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/armosec/k8s-interface/k8sinterface"
|
||||
"github.com/armosec/kubescape/cautils"
|
||||
"github.com/armosec/kubescape/cautils/getter"
|
||||
"github.com/armosec/kubescape/cautils/k8sinterface"
|
||||
"github.com/armosec/kubescape/clihandler"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
@@ -14,7 +15,7 @@ var getCmd = &cobra.Command{
|
||||
Use: "get <key>",
|
||||
Short: "Get configuration in cluster",
|
||||
Long: ``,
|
||||
ValidArgs: supportedFrameworks,
|
||||
ValidArgs: clihandler.SupportedFrameworks,
|
||||
Args: func(cmd *cobra.Command, args []string) error {
|
||||
if len(args) < 1 || len(args) > 1 {
|
||||
return fmt.Errorf("requires one argument")
|
||||
@@ -4,9 +4,9 @@ import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/armosec/k8s-interface/k8sinterface"
|
||||
"github.com/armosec/kubescape/cautils"
|
||||
"github.com/armosec/kubescape/cautils/getter"
|
||||
"github.com/armosec/kubescape/cautils/k8sinterface"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
90
clihandler/cmd/control.go
Normal file
90
clihandler/cmd/control.go
Normal file
@@ -0,0 +1,90 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/armosec/kubescape/cautils"
|
||||
"github.com/armosec/kubescape/clihandler"
|
||||
"github.com/armosec/opa-utils/reporthandling"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
// controlCmd represents the control command
|
||||
var controlCmd = &cobra.Command{
|
||||
Use: "control <control names list>/<control ids list>.\nExamples:\n$ kubescape scan control C-0058,C-0057 [flags]\n$ kubescape scan contol C-0058 [flags]\n$ kubescape scan control 'privileged container,allowed hostpath' [flags]",
|
||||
Short: fmt.Sprintf("The control you wish to use for scan. It must be present in at least one of the folloiwng frameworks: %s", clihandler.ValidFrameworks),
|
||||
Args: func(cmd *cobra.Command, args []string) error {
|
||||
if len(args) > 0 {
|
||||
controls := strings.Split(args[0], ",")
|
||||
if len(controls) > 1 {
|
||||
if controls[1] == "" {
|
||||
return fmt.Errorf("usage: <control_one>,<control_two>")
|
||||
}
|
||||
}
|
||||
} else {
|
||||
return fmt.Errorf("requires at least one control name")
|
||||
}
|
||||
return nil
|
||||
},
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
flagValidationControl()
|
||||
scanInfo.PolicyIdentifier = []reporthandling.PolicyIdentifier{}
|
||||
|
||||
if len(args) < 1 {
|
||||
scanInfo.PolicyIdentifier = SetScanForGivenFrameworks(clihandler.SupportedFrameworks)
|
||||
} else {
|
||||
var controls []string
|
||||
if len(args) > 0 {
|
||||
controls = strings.Split(args[0], ",")
|
||||
scanInfo.PolicyIdentifier = []reporthandling.PolicyIdentifier{}
|
||||
scanInfo.PolicyIdentifier = setScanForFirstControl(controls)
|
||||
}
|
||||
|
||||
if len(controls) > 1 {
|
||||
scanInfo.PolicyIdentifier = SetScanForGivenControls(controls[1:])
|
||||
}
|
||||
}
|
||||
scanInfo.FrameworkScan = false
|
||||
scanInfo.Init()
|
||||
cautils.SetSilentMode(scanInfo.Silent)
|
||||
err := clihandler.CliSetup(&scanInfo)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "error: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
scanInfo = cautils.ScanInfo{}
|
||||
scanCmd.AddCommand(controlCmd)
|
||||
}
|
||||
|
||||
func flagValidationControl() {
|
||||
if 100 < scanInfo.FailThreshold {
|
||||
fmt.Println("bad argument: out of range threshold")
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
func setScanForFirstControl(controls []string) []reporthandling.PolicyIdentifier {
|
||||
newPolicy := reporthandling.PolicyIdentifier{}
|
||||
newPolicy.Kind = reporthandling.KindControl
|
||||
newPolicy.Name = controls[0]
|
||||
scanInfo.PolicyIdentifier = append(scanInfo.PolicyIdentifier, newPolicy)
|
||||
return scanInfo.PolicyIdentifier
|
||||
}
|
||||
|
||||
func SetScanForGivenControls(controls []string) []reporthandling.PolicyIdentifier {
|
||||
for _, control := range controls {
|
||||
control := strings.TrimLeft(control, " ")
|
||||
newPolicy := reporthandling.PolicyIdentifier{}
|
||||
newPolicy.Kind = reporthandling.KindControl
|
||||
newPolicy.Name = control
|
||||
scanInfo.PolicyIdentifier = append(scanInfo.PolicyIdentifier, newPolicy)
|
||||
}
|
||||
return scanInfo.PolicyIdentifier
|
||||
}
|
||||
67
clihandler/cmd/download.go
Normal file
67
clihandler/cmd/download.go
Normal file
@@ -0,0 +1,67 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/armosec/kubescape/cautils"
|
||||
"github.com/armosec/kubescape/cautils/getter"
|
||||
"github.com/armosec/kubescape/clihandler"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var downloadInfo cautils.DownloadInfo
|
||||
|
||||
var downloadCmd = &cobra.Command{
|
||||
Use: fmt.Sprintf("download framework/control <framework-name>/<control-name> [flags]\nSupported frameworks: %s", clihandler.ValidFrameworks),
|
||||
Short: "Download framework/control",
|
||||
Long: ``,
|
||||
Args: func(cmd *cobra.Command, args []string) error {
|
||||
if len(args) != 2 {
|
||||
return fmt.Errorf("requires two arguments : framework/control <framework-name>/<control-name>")
|
||||
}
|
||||
if !strings.EqualFold(args[0], "framework") && !strings.EqualFold(args[0], "control") {
|
||||
return fmt.Errorf("invalid parameter '%s'. Supported parameters: framework, control", args[0])
|
||||
}
|
||||
return nil
|
||||
},
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
if strings.EqualFold(args[0], "framework") {
|
||||
downloadInfo.FrameworkName = strings.ToLower(args[1])
|
||||
g := getter.NewDownloadReleasedPolicy()
|
||||
if downloadInfo.Path == "" {
|
||||
downloadInfo.Path = getter.GetDefaultPath(downloadInfo.FrameworkName + ".json")
|
||||
}
|
||||
frameworks, err := g.GetFramework(downloadInfo.FrameworkName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = getter.SaveFrameworkInFile(frameworks, downloadInfo.Path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else if strings.EqualFold(args[0], "control") {
|
||||
downloadInfo.ControlName = strings.ToLower(args[1])
|
||||
g := getter.NewDownloadReleasedPolicy()
|
||||
if downloadInfo.Path == "" {
|
||||
downloadInfo.Path = getter.GetDefaultPath(downloadInfo.ControlName + ".json")
|
||||
}
|
||||
controls, err := g.GetControl(downloadInfo.ControlName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = getter.SaveControlInFile(controls, downloadInfo.Path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
rootCmd.AddCommand(downloadCmd)
|
||||
downloadInfo = cautils.DownloadInfo{}
|
||||
downloadCmd.Flags().StringVarP(&downloadInfo.Path, "output", "o", "", "Output file. If specified, will store save to `~/.kubescape/<framework name>.json`")
|
||||
}
|
||||
115
clihandler/cmd/framework.go
Normal file
115
clihandler/cmd/framework.go
Normal file
@@ -0,0 +1,115 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/armosec/kubescape/cautils"
|
||||
"github.com/armosec/kubescape/clihandler"
|
||||
"github.com/armosec/opa-utils/reporthandling"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var frameworkCmd = &cobra.Command{
|
||||
Use: fmt.Sprintf("framework <framework names list> [`<glob pattern>`/`-`] [flags]\nExamples:\n$ kubescape scan framework nsa [flags]\n$ kubescape scan framework mitre,nsa [flags]\n$ kubescape scan framework 'nsa, mitre' [flags]\nSupported frameworks: %s", clihandler.ValidFrameworks),
|
||||
Short: fmt.Sprintf("The framework you wish to use. Supported frameworks: %s", strings.Join(clihandler.SupportedFrameworks, ", ")),
|
||||
Long: "Execute a scan on a running Kubernetes cluster or `yaml`/`json` files (use glob) or `-` for stdin",
|
||||
ValidArgs: clihandler.SupportedFrameworks,
|
||||
Args: func(cmd *cobra.Command, args []string) error {
|
||||
if len(args) > 0 {
|
||||
// "nsa, mitre" -> ["nsa", "mitre"] and nsa,mitre -> ["nsa", "mitre"]
|
||||
frameworks := strings.Split(strings.Join(strings.Fields(args[0]), ""), ",")
|
||||
for _, framework := range frameworks {
|
||||
if !isValidFramework(strings.ToLower(framework)) {
|
||||
return fmt.Errorf(fmt.Sprintf("supported frameworks: %s", strings.Join(clihandler.SupportedFrameworks, ", ")))
|
||||
}
|
||||
}
|
||||
} else {
|
||||
return fmt.Errorf("requires at least one framework name")
|
||||
}
|
||||
return nil
|
||||
},
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
flagValidationFramework()
|
||||
scanInfo.PolicyIdentifier = []reporthandling.PolicyIdentifier{}
|
||||
// If no framework provided, use all
|
||||
if len(args) < 1 {
|
||||
scanInfo.PolicyIdentifier = SetScanForGivenFrameworks(clihandler.SupportedFrameworks)
|
||||
} else {
|
||||
// Read frameworks from input args
|
||||
scanInfo.PolicyIdentifier = []reporthandling.PolicyIdentifier{}
|
||||
frameworks := strings.Split(strings.Join(strings.Fields(args[0]), ""), ",")
|
||||
scanInfo.PolicyIdentifier = SetScanForFirstFramework(frameworks)
|
||||
if len(frameworks) > 1 {
|
||||
scanInfo.PolicyIdentifier = SetScanForGivenFrameworks(frameworks[1:])
|
||||
}
|
||||
}
|
||||
if len(args) > 0 {
|
||||
if len(args[1:]) == 0 || args[1] != "-" {
|
||||
scanInfo.InputPatterns = args[1:]
|
||||
} else { // store stout to file
|
||||
tempFile, err := os.CreateTemp(".", "tmp-kubescape*.yaml")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer os.Remove(tempFile.Name())
|
||||
|
||||
if _, err := io.Copy(tempFile, os.Stdin); err != nil {
|
||||
return err
|
||||
}
|
||||
scanInfo.InputPatterns = []string{tempFile.Name()}
|
||||
}
|
||||
}
|
||||
scanInfo.Init()
|
||||
cautils.SetSilentMode(scanInfo.Silent)
|
||||
err := clihandler.CliSetup(&scanInfo)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "error: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
func isValidFramework(framework string) bool {
|
||||
return cautils.StringInSlice(clihandler.SupportedFrameworks, framework) != cautils.ValueNotFound
|
||||
}
|
||||
|
||||
func init() {
|
||||
scanCmd.AddCommand(frameworkCmd)
|
||||
scanInfo = cautils.ScanInfo{}
|
||||
scanInfo.FrameworkScan = true
|
||||
frameworkCmd.Flags().BoolVarP(&scanInfo.Submit, "submit", "", false, "Send the scan results to Armo management portal where you can see the results in a user-friendly UI, choose your preferred compliance framework, check risk results history and trends, manage exceptions, get remediation recommendations and much more. By default the results are not submitted")
|
||||
frameworkCmd.Flags().BoolVarP(&scanInfo.Local, "keep-local", "", false, "If you do not want your Kubescape results reported to Armo backend. Use this flag if you ran with the '--submit' flag in the past and you do not want to submit your current scan results")
|
||||
frameworkCmd.Flags().StringVarP(&scanInfo.Account, "account", "", "", "Armo portal account ID. Default will load account ID from configMap or config file")
|
||||
}
|
||||
func SetScanForGivenFrameworks(frameworks []string) []reporthandling.PolicyIdentifier {
|
||||
for _, framework := range frameworks {
|
||||
newPolicy := reporthandling.PolicyIdentifier{}
|
||||
newPolicy.Kind = reporthandling.KindFramework
|
||||
newPolicy.Name = framework
|
||||
scanInfo.PolicyIdentifier = append(scanInfo.PolicyIdentifier, newPolicy)
|
||||
}
|
||||
return scanInfo.PolicyIdentifier
|
||||
}
|
||||
|
||||
func SetScanForFirstFramework(frameworks []string) []reporthandling.PolicyIdentifier {
|
||||
newPolicy := reporthandling.PolicyIdentifier{}
|
||||
newPolicy.Kind = reporthandling.KindFramework
|
||||
newPolicy.Name = frameworks[0]
|
||||
scanInfo.PolicyIdentifier = append(scanInfo.PolicyIdentifier, newPolicy)
|
||||
return scanInfo.PolicyIdentifier
|
||||
}
|
||||
|
||||
func flagValidationFramework() {
|
||||
if scanInfo.Submit && scanInfo.Local {
|
||||
fmt.Println("You can use `keep-local` or `submit`, but not both")
|
||||
os.Exit(1)
|
||||
}
|
||||
if 100 < scanInfo.FailThreshold {
|
||||
fmt.Println("bad argument: out of range threshold")
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
46
clihandler/cmd/scan.go
Normal file
46
clihandler/cmd/scan.go
Normal file
@@ -0,0 +1,46 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/armosec/kubescape/cautils"
|
||||
"github.com/armosec/kubescape/clihandler"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var scanInfo cautils.ScanInfo
|
||||
|
||||
// scanCmd represents the scan command
|
||||
var scanCmd = &cobra.Command{
|
||||
Use: "scan <command>",
|
||||
Short: "Scan the current running cluster or yaml files",
|
||||
Long: `The action you want to perform`,
|
||||
Args: func(cmd *cobra.Command, args []string) error {
|
||||
if len(args) > 0 {
|
||||
if !strings.EqualFold(args[0], "framework") && !strings.EqualFold(args[0], "control") {
|
||||
return fmt.Errorf("invalid parameter '%s'. Supported parameters: framework, control", args[0])
|
||||
}
|
||||
}
|
||||
return nil
|
||||
},
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
if len(args) == 0 {
|
||||
frameworkArgs := []string{clihandler.ValidFrameworks}
|
||||
frameworkArgs = append(frameworkArgs, args...)
|
||||
frameworkCmd.RunE(cmd, frameworkArgs)
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
rootCmd.AddCommand(scanCmd)
|
||||
scanCmd.PersistentFlags().StringVarP(&scanInfo.ExcludedNamespaces, "exclude-namespaces", "e", "", "Namespaces to exclude from scanning. Recommended: kube-system, kube-public")
|
||||
scanCmd.PersistentFlags().StringVarP(&scanInfo.Format, "format", "f", "pretty-printer", `Output format. Supported formats: "pretty-printer"/"json"/"junit"`)
|
||||
scanCmd.PersistentFlags().StringVarP(&scanInfo.Output, "output", "o", "", "Output file. Print output to file and not stdout")
|
||||
scanCmd.PersistentFlags().BoolVarP(&scanInfo.Silent, "silent", "s", false, "Silent progress messages")
|
||||
scanCmd.PersistentFlags().Uint16VarP(&scanInfo.FailThreshold, "fail-threshold", "t", 0, "Failure threshold is the percent bellow which the command fails and returns exit code 1")
|
||||
scanCmd.PersistentFlags().StringSliceVar(&scanInfo.UseFrom, "use-from", nil, "Load local policy object from specified path. If not used will download latest")
|
||||
scanCmd.PersistentFlags().BoolVar(&scanInfo.UseDefault, "use-default", false, "Load local policy object from default path. If not used will download latest")
|
||||
scanCmd.PersistentFlags().StringVar(&scanInfo.UseExceptions, "exceptions", "", "Path to an exceptions obj. If not set will download exceptions from Armo management portal")
|
||||
}
|
||||
@@ -25,21 +25,21 @@ func GetLatestVersion() (string, error) {
|
||||
latestVersion := "https://api.github.com/repos/armosec/kubescape/releases/latest"
|
||||
resp, err := http.Get(latestVersion)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to get latest releases from '%s', reason: %s", latestVersion, err.Error())
|
||||
return "unknown", fmt.Errorf("failed to get latest releases from '%s', reason: %s", latestVersion, err.Error())
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
if resp.StatusCode < 200 || 301 < resp.StatusCode {
|
||||
return "", fmt.Errorf("failed to download file, status code: %s", resp.Status)
|
||||
return "unknown", nil
|
||||
}
|
||||
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to read response body from '%s', reason: %s", latestVersion, err.Error())
|
||||
return "unknown", fmt.Errorf("failed to read response body from '%s', reason: %s", latestVersion, err.Error())
|
||||
}
|
||||
var data map[string]interface{}
|
||||
err = json.Unmarshal(body, &data)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to unmarshal response body from '%s', reason: %s", latestVersion, err.Error())
|
||||
return "unknown", fmt.Errorf("failed to unmarshal response body from '%s', reason: %s", latestVersion, err.Error())
|
||||
}
|
||||
return fmt.Sprintf("%v", data["tag_name"]), nil
|
||||
}
|
||||
151
clihandler/initcli.go
Normal file
151
clihandler/initcli.go
Normal file
@@ -0,0 +1,151 @@
|
||||
package clihandler
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/armosec/armoapi-go/armotypes"
|
||||
"github.com/armosec/k8s-interface/k8sinterface"
|
||||
"github.com/armosec/kubescape/cautils"
|
||||
"github.com/armosec/kubescape/cautils/getter"
|
||||
"github.com/armosec/kubescape/opaprocessor"
|
||||
"github.com/armosec/kubescape/policyhandler"
|
||||
"github.com/armosec/kubescape/resourcehandler"
|
||||
"github.com/armosec/kubescape/resultshandling"
|
||||
"github.com/armosec/kubescape/resultshandling/printer"
|
||||
"github.com/armosec/kubescape/resultshandling/reporter"
|
||||
"github.com/armosec/opa-utils/reporthandling"
|
||||
)
|
||||
|
||||
type CLIHandler struct {
|
||||
policyHandler *policyhandler.PolicyHandler
|
||||
scanInfo *cautils.ScanInfo
|
||||
}
|
||||
|
||||
var SupportedFrameworks = []string{"nsa", "mitre"}
|
||||
var ValidFrameworks = strings.Join(SupportedFrameworks, ", ")
|
||||
|
||||
type componentInterfaces struct {
|
||||
clusterConfig cautils.IClusterConfig
|
||||
resourceHandler resourcehandler.IResourceHandler
|
||||
report reporter.IReport
|
||||
printerHandler printer.IPrinter
|
||||
}
|
||||
|
||||
func getReporter(scanInfo *cautils.ScanInfo) reporter.IReport {
|
||||
if !scanInfo.Submit {
|
||||
return reporter.NewReportMock()
|
||||
}
|
||||
if !scanInfo.FrameworkScan {
|
||||
return reporter.NewReportMock()
|
||||
}
|
||||
|
||||
return reporter.NewReportEventReceiver()
|
||||
}
|
||||
func getInterfaces(scanInfo *cautils.ScanInfo) componentInterfaces {
|
||||
var resourceHandler resourcehandler.IResourceHandler
|
||||
var clusterConfig cautils.IClusterConfig
|
||||
var reportHandler reporter.IReport
|
||||
|
||||
if !scanInfo.ScanRunningCluster() {
|
||||
k8sinterface.ConnectedToCluster = false
|
||||
clusterConfig = cautils.NewEmptyConfig()
|
||||
|
||||
// load fom file
|
||||
resourceHandler = resourcehandler.NewFileResourceHandler(scanInfo.InputPatterns)
|
||||
|
||||
// set mock report (do not send report)
|
||||
reportHandler = reporter.NewReportMock()
|
||||
} else {
|
||||
k8s := k8sinterface.NewKubernetesApi()
|
||||
resourceHandler = resourcehandler.NewK8sResourceHandler(k8s, scanInfo.ExcludedNamespaces)
|
||||
clusterConfig = cautils.ClusterConfigSetup(scanInfo, k8s, getter.GetArmoAPIConnector())
|
||||
|
||||
// setup reporter
|
||||
reportHandler = getReporter(scanInfo)
|
||||
}
|
||||
|
||||
// setup printer
|
||||
printerHandler := printer.GetPrinter(scanInfo.Format)
|
||||
printerHandler.SetWriter(scanInfo.Output)
|
||||
|
||||
return componentInterfaces{
|
||||
clusterConfig: clusterConfig,
|
||||
resourceHandler: resourceHandler,
|
||||
report: reportHandler,
|
||||
printerHandler: printerHandler,
|
||||
}
|
||||
}
|
||||
|
||||
func CliSetup(scanInfo *cautils.ScanInfo) error {
|
||||
|
||||
interfaces := getInterfaces(scanInfo)
|
||||
|
||||
processNotification := make(chan *cautils.OPASessionObj)
|
||||
reportResults := make(chan *cautils.OPASessionObj)
|
||||
|
||||
if err := interfaces.clusterConfig.SetConfig(scanInfo.Account); err != nil {
|
||||
fmt.Println(err)
|
||||
}
|
||||
|
||||
cautils.ClusterName = interfaces.clusterConfig.GetClusterName() // TODO - Deprecated
|
||||
cautils.CustomerGUID = interfaces.clusterConfig.GetCustomerGUID() // TODO - Deprecated
|
||||
interfaces.report.SetClusterName(interfaces.clusterConfig.GetClusterName())
|
||||
interfaces.report.SetCustomerGUID(interfaces.clusterConfig.GetCustomerGUID())
|
||||
// cli handler setup
|
||||
go func() {
|
||||
// policy handler setup
|
||||
policyHandler := policyhandler.NewPolicyHandler(&processNotification, interfaces.resourceHandler)
|
||||
cli := NewCLIHandler(policyHandler, scanInfo)
|
||||
if err := cli.Scan(); err != nil {
|
||||
fmt.Println(err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}()
|
||||
|
||||
// processor setup - rego run
|
||||
go func() {
|
||||
opaprocessorObj := opaprocessor.NewOPAProcessorHandler(&processNotification, &reportResults)
|
||||
opaprocessorObj.ProcessRulesListenner()
|
||||
}()
|
||||
|
||||
resultsHandling := resultshandling.NewResultsHandler(&reportResults, interfaces.report, interfaces.printerHandler)
|
||||
score := resultsHandling.HandleResults(scanInfo)
|
||||
|
||||
// print report url
|
||||
interfaces.clusterConfig.GenerateURL()
|
||||
|
||||
adjustedFailThreshold := float32(scanInfo.FailThreshold) / 100
|
||||
if score < adjustedFailThreshold {
|
||||
return fmt.Errorf("Scan score is bellow threshold")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func NewCLIHandler(policyHandler *policyhandler.PolicyHandler, scanInfo *cautils.ScanInfo) *CLIHandler {
|
||||
return &CLIHandler{
|
||||
scanInfo: scanInfo,
|
||||
policyHandler: policyHandler,
|
||||
}
|
||||
}
|
||||
|
||||
func (clihandler *CLIHandler) Scan() error {
|
||||
cautils.ScanStartDisplay()
|
||||
policyNotification := &reporthandling.PolicyNotification{
|
||||
NotificationType: reporthandling.TypeExecPostureScan,
|
||||
Rules: clihandler.scanInfo.PolicyIdentifier,
|
||||
Designators: armotypes.PortalDesignator{},
|
||||
}
|
||||
switch policyNotification.NotificationType {
|
||||
case reporthandling.TypeExecPostureScan:
|
||||
if err := clihandler.policyHandler.HandleNotificationRequest(policyNotification, clihandler.scanInfo); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
default:
|
||||
return fmt.Errorf("notification type '%s' Unknown", policyNotification.NotificationType)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -1,45 +0,0 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/armosec/kubescape/cautils"
|
||||
"github.com/armosec/kubescape/cautils/getter"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var downloadInfo cautils.DownloadInfo
|
||||
|
||||
var downloadCmd = &cobra.Command{
|
||||
Use: fmt.Sprintf("download framework <framework-name> [flags]\nSupported frameworks: %s", validFrameworks),
|
||||
Short: "Download framework controls",
|
||||
Long: ``,
|
||||
Args: func(cmd *cobra.Command, args []string) error {
|
||||
if len(args) != 2 {
|
||||
return fmt.Errorf("requires two arguments : framework <framework-name>")
|
||||
}
|
||||
return nil
|
||||
},
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
downloadInfo.FrameworkName = args[1]
|
||||
g := getter.NewDownloadReleasedPolicy()
|
||||
if downloadInfo.Path == "" {
|
||||
downloadInfo.Path = getter.GetDefaultPath(downloadInfo.FrameworkName + ".json")
|
||||
}
|
||||
frameworks, err := g.GetFramework(downloadInfo.FrameworkName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = getter.SaveFrameworkInFile(frameworks, downloadInfo.Path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
rootCmd.AddCommand(downloadCmd)
|
||||
downloadInfo = cautils.DownloadInfo{}
|
||||
downloadCmd.Flags().StringVarP(&downloadInfo.Path, "output", "o", "", "Output file. If specified, will store save to `~/.kubescape/<framework name>.json`")
|
||||
}
|
||||
201
cmd/framework.go
201
cmd/framework.go
@@ -1,201 +0,0 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/armosec/kubescape/cautils"
|
||||
"github.com/armosec/kubescape/cautils/armotypes"
|
||||
"github.com/armosec/kubescape/cautils/getter"
|
||||
"github.com/armosec/kubescape/cautils/k8sinterface"
|
||||
"github.com/armosec/kubescape/cautils/opapolicy"
|
||||
"github.com/armosec/kubescape/opaprocessor"
|
||||
"github.com/armosec/kubescape/policyhandler"
|
||||
"github.com/armosec/kubescape/resultshandling"
|
||||
"github.com/armosec/kubescape/resultshandling/printer"
|
||||
"github.com/armosec/kubescape/resultshandling/reporter"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var scanInfo cautils.ScanInfo
|
||||
var supportedFrameworks = []string{"nsa", "mitre"}
|
||||
var validFrameworks = strings.Join(supportedFrameworks, ", ")
|
||||
|
||||
type CLIHandler struct {
|
||||
policyHandler *policyhandler.PolicyHandler
|
||||
scanInfo *cautils.ScanInfo
|
||||
}
|
||||
|
||||
var frameworkCmd = &cobra.Command{
|
||||
|
||||
Use: fmt.Sprintf("framework <framework name> [`<glob pattern>`/`-`] [flags]\nSupported frameworks: %s", validFrameworks),
|
||||
Short: fmt.Sprintf("The framework you wish to use. Supported frameworks: %s", strings.Join(supportedFrameworks, ", ")),
|
||||
Long: "Execute a scan on a running Kubernetes cluster or `yaml`/`json` files (use glob) or `-` for stdin",
|
||||
ValidArgs: supportedFrameworks,
|
||||
Args: func(cmd *cobra.Command, args []string) error {
|
||||
if len(args) < 1 && !(cmd.Flags().Lookup("use-from").Changed) {
|
||||
return fmt.Errorf("requires at least one argument")
|
||||
} else if len(args) > 0 {
|
||||
if !isValidFramework(strings.ToLower(args[0])) {
|
||||
return fmt.Errorf(fmt.Sprintf("supported frameworks: %s", strings.Join(supportedFrameworks, ", ")))
|
||||
}
|
||||
}
|
||||
return nil
|
||||
},
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
scanInfo.PolicyIdentifier = opapolicy.PolicyIdentifier{}
|
||||
scanInfo.PolicyIdentifier.Kind = opapolicy.KindFramework
|
||||
|
||||
if !(cmd.Flags().Lookup("use-from").Changed) {
|
||||
scanInfo.PolicyIdentifier.Name = strings.ToLower(args[0])
|
||||
}
|
||||
if len(args) > 0 {
|
||||
if len(args[1:]) == 0 || args[1] != "-" {
|
||||
scanInfo.InputPatterns = args[1:]
|
||||
} else { // store stout to file
|
||||
tempFile, err := os.CreateTemp(".", "tmp-kubescape*.yaml")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer os.Remove(tempFile.Name())
|
||||
|
||||
if _, err := io.Copy(tempFile, os.Stdin); err != nil {
|
||||
return err
|
||||
}
|
||||
scanInfo.InputPatterns = []string{tempFile.Name()}
|
||||
}
|
||||
}
|
||||
scanInfo.Init()
|
||||
cautils.SetSilentMode(scanInfo.Silent)
|
||||
err := CliSetup()
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "error: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
func isValidFramework(framework string) bool {
|
||||
return cautils.StringInSlice(supportedFrameworks, framework) != cautils.ValueNotFound
|
||||
}
|
||||
|
||||
func init() {
|
||||
scanCmd.AddCommand(frameworkCmd)
|
||||
scanInfo = cautils.ScanInfo{}
|
||||
frameworkCmd.Flags().StringVar(&scanInfo.UseFrom, "use-from", "", "Load local framework object from specified path. If not used will download latest")
|
||||
frameworkCmd.Flags().BoolVar(&scanInfo.UseDefault, "use-default", false, "Load local framework object from default path. If not used will download latest")
|
||||
frameworkCmd.Flags().StringVar(&scanInfo.UseExceptions, "exceptions", "", "Path to an exceptions obj. If not set will download exceptions from Armo management portal")
|
||||
frameworkCmd.Flags().StringVarP(&scanInfo.ExcludedNamespaces, "exclude-namespaces", "e", "", "Namespaces to exclude from scanning. Recommended: kube-system, kube-public")
|
||||
frameworkCmd.Flags().StringVarP(&scanInfo.Format, "format", "f", "pretty-printer", `Output format. Supported formats: "pretty-printer"/"json"/"junit"`)
|
||||
frameworkCmd.Flags().StringVarP(&scanInfo.Output, "output", "o", "", "Output file. Print output to file and not stdout")
|
||||
frameworkCmd.Flags().BoolVarP(&scanInfo.Silent, "silent", "s", false, "Silent progress messages")
|
||||
frameworkCmd.Flags().Uint16VarP(&scanInfo.FailThreshold, "fail-threshold", "t", 0, "Failure threshold is the percent bellow which the command fails and returns exit code 1")
|
||||
frameworkCmd.Flags().BoolVarP(&scanInfo.DoNotSendResults, "results-locally", "", false, "Deprecated. Please use `--keep-local` instead")
|
||||
frameworkCmd.Flags().BoolVarP(&scanInfo.Submit, "submit", "", false, "Send the scan results to Armo management portal where you can see the results in a user-friendly UI, choose your preferred compliance framework, check risk results history and trends, manage exceptions, get remediation recommendations and much more. By default the results are not submitted")
|
||||
frameworkCmd.Flags().BoolVarP(&scanInfo.Local, "keep-local", "", false, "If you do not want your Kubescape results reported to Armo backend. Use this flag if you ran with the `--submit` flag in the past and you do not want to submit your current scan results")
|
||||
frameworkCmd.Flags().StringVarP(&scanInfo.Account, "account", "", "", "Armo portal account ID. Default will load account ID from configMap or config file")
|
||||
|
||||
}
|
||||
|
||||
func CliSetup() error {
|
||||
flagValidation()
|
||||
|
||||
var k8s *k8sinterface.KubernetesApi
|
||||
var clusterConfig cautils.IClusterConfig
|
||||
if !scanInfo.ScanRunningCluster() {
|
||||
k8sinterface.ConnectedToCluster = false
|
||||
clusterConfig = cautils.NewEmptyConfig()
|
||||
} else {
|
||||
k8s = k8sinterface.NewKubernetesApi()
|
||||
// setup cluster config
|
||||
clusterConfig = cautils.ClusterConfigSetup(&scanInfo, k8s, getter.GetArmoAPIConnector())
|
||||
}
|
||||
|
||||
processNotification := make(chan *cautils.OPASessionObj)
|
||||
reportResults := make(chan *cautils.OPASessionObj)
|
||||
|
||||
// policy handler setup
|
||||
policyHandler := policyhandler.NewPolicyHandler(&processNotification, k8s)
|
||||
|
||||
if err := clusterConfig.SetCustomerGUID(scanInfo.Account); err != nil {
|
||||
fmt.Println(err)
|
||||
}
|
||||
|
||||
cautils.CustomerGUID = clusterConfig.GetCustomerGUID()
|
||||
cautils.ClusterName = k8sinterface.GetClusterName()
|
||||
|
||||
// cli handler setup
|
||||
go func() {
|
||||
cli := NewCLIHandler(policyHandler)
|
||||
if err := cli.Scan(); err != nil {
|
||||
fmt.Println(err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}()
|
||||
|
||||
// processor setup - rego run
|
||||
go func() {
|
||||
opaprocessorObj := opaprocessor.NewOPAProcessorHandler(&processNotification, &reportResults)
|
||||
opaprocessorObj.ProcessRulesListenner()
|
||||
}()
|
||||
|
||||
resultsHandling := resultshandling.NewResultsHandler(&reportResults, reporter.NewReportEventReceiver(), printer.NewPrinter(scanInfo.Format, scanInfo.Output))
|
||||
score := resultsHandling.HandleResults()
|
||||
|
||||
// print report url
|
||||
clusterConfig.GenerateURL()
|
||||
|
||||
adjustedFailThreshold := float32(scanInfo.FailThreshold) / 100
|
||||
if score < adjustedFailThreshold {
|
||||
return fmt.Errorf("Scan score is bellow threshold")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func NewCLIHandler(policyHandler *policyhandler.PolicyHandler) *CLIHandler {
|
||||
return &CLIHandler{
|
||||
scanInfo: &scanInfo,
|
||||
policyHandler: policyHandler,
|
||||
}
|
||||
}
|
||||
|
||||
func (clihandler *CLIHandler) Scan() error {
|
||||
cautils.ScanStartDisplay()
|
||||
policyNotification := &opapolicy.PolicyNotification{
|
||||
NotificationType: opapolicy.TypeExecPostureScan,
|
||||
Rules: []opapolicy.PolicyIdentifier{
|
||||
clihandler.scanInfo.PolicyIdentifier,
|
||||
},
|
||||
Designators: armotypes.PortalDesignator{},
|
||||
}
|
||||
switch policyNotification.NotificationType {
|
||||
case opapolicy.TypeExecPostureScan:
|
||||
//
|
||||
if err := clihandler.policyHandler.HandleNotificationRequest(policyNotification, clihandler.scanInfo); err != nil {
|
||||
return err
|
||||
}
|
||||
default:
|
||||
return fmt.Errorf("notification type '%s' Unknown", policyNotification.NotificationType)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func flagValidation() {
|
||||
if scanInfo.DoNotSendResults {
|
||||
fmt.Println("Deprecated. Please use `--keep-local` instead")
|
||||
}
|
||||
|
||||
if scanInfo.Submit && scanInfo.Local {
|
||||
fmt.Println("You can use `keep-local` or `submit`, but not both")
|
||||
os.Exit(1)
|
||||
}
|
||||
if 100 < scanInfo.FailThreshold {
|
||||
fmt.Println("bad argument: out of range threshold")
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
18
cmd/scan.go
18
cmd/scan.go
@@ -1,18 +0,0 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
// scanCmd represents the scan command
|
||||
var scanCmd = &cobra.Command{
|
||||
Use: "scan",
|
||||
Short: "Scan the current running cluster or yaml files",
|
||||
Long: `The action you want to perform`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
rootCmd.AddCommand(scanCmd)
|
||||
}
|
||||
@@ -51,7 +51,7 @@ kubescape scan framework nsa --exclude-namespaces kube-system,kube-public --form
|
||||
kubescape scan framework nsa --exclude-namespaces kube-system,kube-public --format junit --output results.xml
|
||||
```
|
||||
|
||||
* Scan with exceptions, objects with exceptions will be presented as `warning` and not `fail` <img src="docs/new-feature.svg">
|
||||
* Scan with exceptions, objects with exceptions will be presented as `warning` and not `fail`
|
||||
```
|
||||
kubescape scan framework nsa --exceptions examples/exceptions.json
|
||||
```
|
||||
|
||||
83
examples/cronJob-support/README.md
Normal file
83
examples/cronJob-support/README.md
Normal file
@@ -0,0 +1,83 @@
|
||||
# Repeatedly Kubescape Scanning
|
||||
|
||||
You can scan your cluster repeatedly by adding a `CronJob` that will repeatedly trigger kubescape
|
||||
|
||||
* Setup [scanning & submitting](#scanning-&-submitting)
|
||||
* Setup [scanning without submitting](#scanning-without-submitting)
|
||||
|
||||
## Scanning & Submitting
|
||||
|
||||
If you wish to repeatedly scan and submit the result to the [Kubescape SaaS version](https://portal.armo.cloud/) where you can benefit the features the SaaS version provides, please follow this instructions ->
|
||||
|
||||
1. Apply kubescape namespace
|
||||
```
|
||||
kubectl apply ks-namespace.yaml
|
||||
```
|
||||
|
||||
2. Apply serviceAccount and roles
|
||||
```
|
||||
kubectl apply ks-serviceAccount.yaml
|
||||
```
|
||||
|
||||
3. Setup and apply configMap
|
||||
|
||||
Before you apply the configMap you need to set the account ID and cluster name in the `ks-configMap.yaml` file.
|
||||
|
||||
* Set cluster name:
|
||||
Run `kubectl config current-context` and set the result in the `data.clusterName` field
|
||||
* Set account ID:
|
||||
1. Navigate to the [Kubescape SaaS version](https://portal.armo.cloud/) and login/sign up for free
|
||||
2. Click the `Add Cluster` button on the top right of the page
|
||||
<img src="screenshots/add-cluster.png" alt="add-cluster">
|
||||
3. Copy the value of `--account` and set it in the `data.customerGUID` field
|
||||
<img src="screenshots/account.png" alt="account">
|
||||
|
||||
Make sure the configMap looks as following;
|
||||
```
|
||||
kind: ConfigMap
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: kubescape
|
||||
labels:
|
||||
app: kubescape
|
||||
namespace: kubescape
|
||||
data:
|
||||
config.json: |
|
||||
{
|
||||
"customerGUID": "XXXXXXXX-XXXX-XXXX-XXXXXXXXXXXX",
|
||||
"clusterName": "my-awesome-cluster-name"
|
||||
}
|
||||
```
|
||||
|
||||
Finally, apply the configMap
|
||||
```
|
||||
kubectl apply ks-configMap.yaml
|
||||
```
|
||||
|
||||
4. Apply CronJob
|
||||
|
||||
Before you apply the cronJob, make sure the scanning frequency suites your needs
|
||||
```
|
||||
kubectl apply ks-cronJob-submit.yaml
|
||||
```
|
||||
|
||||
## Scanning Without Submitting
|
||||
|
||||
If you wish to repeatedly scan but not submit the scan results, follow this instructions ->
|
||||
|
||||
1. Apply kubescape namespace
|
||||
```
|
||||
kubectl apply ks-namespace.yaml
|
||||
```
|
||||
|
||||
2. Apply serviceAccount and roles
|
||||
```
|
||||
kubectl apply ks-serviceAccount.yaml
|
||||
```
|
||||
|
||||
3. Apply CronJob
|
||||
|
||||
Before you apply the cronJob, make sure the scanning frequency suites your needs
|
||||
```
|
||||
kubectl apply ks-cronJob-non-submit.yaml
|
||||
```
|
||||
14
examples/cronJob-support/ks-configMap.yaml
Normal file
14
examples/cronJob-support/ks-configMap.yaml
Normal file
@@ -0,0 +1,14 @@
|
||||
# ------------------- Kubescape User/Customer ID ------------------- #
|
||||
kind: ConfigMap
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: kubescape
|
||||
labels:
|
||||
app: kubescape
|
||||
namespace: kubescape
|
||||
data:
|
||||
config.json: |
|
||||
{
|
||||
"customerGUID": "<ID>",
|
||||
"clusterName": "<cluster name>"
|
||||
}
|
||||
32
examples/cronJob-support/ks-cronJob-non-submit.yaml
Normal file
32
examples/cronJob-support/ks-cronJob-non-submit.yaml
Normal file
@@ -0,0 +1,32 @@
|
||||
apiVersion: batch/v1
|
||||
kind: CronJob
|
||||
metadata:
|
||||
name: kubescape
|
||||
labels:
|
||||
app: kubescape
|
||||
namespace: kubescape
|
||||
spec:
|
||||
# ┌────────────────── timezone (optional)
|
||||
# | ┌───────────── minute (0 - 59)
|
||||
# | │ ┌───────────── hour (0 - 23)
|
||||
# | │ │ ┌───────────── day of the month (1 - 31)
|
||||
# | │ │ │ ┌───────────── month (1 - 12)
|
||||
# | │ │ │ │ ┌───────────── day of the week (0 - 6) (Sunday to Saturday;
|
||||
# | │ │ │ │ │ 7 is also Sunday on some systems)
|
||||
# | │ │ │ │ │
|
||||
# | │ │ │ │ │
|
||||
# CRON_TZ=UTC * * * * *
|
||||
schedule: "0 0 1 * *"
|
||||
jobTemplate:
|
||||
spec:
|
||||
template:
|
||||
spec:
|
||||
containers:
|
||||
- name: kubescape
|
||||
image: quay.io/armosec/kubescape:latest
|
||||
imagePullPolicy: IfNotPresent
|
||||
command: ["/bin/sh","-c"]
|
||||
args:
|
||||
- kubescape scan framework nsa
|
||||
restartPolicy: OnFailure
|
||||
serviceAccountName: kubescape-discovery
|
||||
40
examples/cronJob-support/ks-cronJob-submit.yaml
Normal file
40
examples/cronJob-support/ks-cronJob-submit.yaml
Normal file
@@ -0,0 +1,40 @@
|
||||
apiVersion: batch/v1
|
||||
kind: CronJob
|
||||
metadata:
|
||||
name: kubescape
|
||||
labels:
|
||||
app: kubescape
|
||||
namespace: kubescape
|
||||
spec:
|
||||
# ┌────────────────── timezone (optional)
|
||||
# | ┌───────────── minute (0 - 59)
|
||||
# | │ ┌───────────── hour (0 - 23)
|
||||
# | │ │ ┌───────────── day of the month (1 - 31)
|
||||
# | │ │ │ ┌───────────── month (1 - 12)
|
||||
# | │ │ │ │ ┌───────────── day of the week (0 - 6) (Sunday to Saturday;
|
||||
# | │ │ │ │ │ 7 is also Sunday on some systems)
|
||||
# | │ │ │ │ │
|
||||
# | │ │ │ │ │
|
||||
# CRON_TZ=UTC * * * * *
|
||||
schedule: "0 0 1 * *"
|
||||
jobTemplate:
|
||||
spec:
|
||||
template:
|
||||
spec:
|
||||
containers:
|
||||
- name: kubescape
|
||||
image: quay.io/armosec/kubescape:latest
|
||||
imagePullPolicy: IfNotPresent
|
||||
command: ["/bin/sh","-c"]
|
||||
args:
|
||||
- kubescape scan framework nsa --submit
|
||||
volumeMounts:
|
||||
- name: kubescape-config-volume
|
||||
mountPath: /root/.kubescape/config.json
|
||||
subPath: config.json
|
||||
restartPolicy: OnFailure
|
||||
serviceAccountName: kubescape-discovery
|
||||
volumes:
|
||||
- name: kubescape-config-volume
|
||||
configMap:
|
||||
name: kubescape
|
||||
7
examples/cronJob-support/ks-namespace.yaml
Normal file
7
examples/cronJob-support/ks-namespace.yaml
Normal file
@@ -0,0 +1,7 @@
|
||||
# ------------------- Kubescape User/Customer ID ------------------- #
|
||||
kind: Namespace
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: kubescape
|
||||
labels:
|
||||
app: kubescape
|
||||
61
examples/cronJob-support/ks-serviceAccount.yaml
Normal file
61
examples/cronJob-support/ks-serviceAccount.yaml
Normal file
@@ -0,0 +1,61 @@
|
||||
---
|
||||
# ------------------- Kubescape Service Account ------------------- #
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
labels:
|
||||
app: kubescape
|
||||
name: kubescape-discovery
|
||||
namespace: kubescape
|
||||
|
||||
---
|
||||
# ------------------- Kubescape Role & Role Binding ------------------- #
|
||||
kind: Role
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: kubescape-discovery-role
|
||||
namespace: kubescape
|
||||
rules:
|
||||
- apiGroups: ["*"]
|
||||
resources: ["*"]
|
||||
verbs: ["get", "list", "describe"]
|
||||
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: kubescape-discovery-binding
|
||||
namespace: kubescape
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: kubescape-discovery-role
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: kubescape-discovery
|
||||
|
||||
---
|
||||
# ------------------- Kubescape Cluster Role & Cluster Role Binding ------------------- #
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: kubescape-discovery-clusterroles
|
||||
# "namespace" omitted since ClusterRoles are not namespaced
|
||||
rules:
|
||||
- apiGroups: ["*"]
|
||||
resources: ["*"]
|
||||
verbs: ["get", "list", "describe"]
|
||||
|
||||
---
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: kubescape-discovery-role-binding
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: kubescape-discovery-clusterroles
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: kubescape-discovery
|
||||
namespace: kubescape
|
||||
BIN
examples/cronJob-support/screenshots/account.png
Normal file
BIN
examples/cronJob-support/screenshots/account.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 41 KiB |
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user