mirror of
https://github.com/kubeshark/kubeshark.git
synced 2026-02-15 02:19:54 +00:00
Compare commits
22 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
d844d6eb04 | ||
|
|
6979441422 | ||
|
|
9ec8347c6c | ||
|
|
617fb89ca5 | ||
|
|
1cbd9cb199 | ||
|
|
23c1b66855 | ||
|
|
f5fa9ff270 | ||
|
|
4159938cea | ||
|
|
5614e153f3 | ||
|
|
5e90d67b0e | ||
|
|
dd430c31d5 | ||
|
|
c1d774e53c | ||
|
|
c671bc6655 | ||
|
|
2c1aa9022b | ||
|
|
5af0c5a9e9 | ||
|
|
8217ac3ed0 | ||
|
|
de769131de | ||
|
|
5f8a5a3a29 | ||
|
|
7f4cb6dfd4 | ||
|
|
3b063c3bb5 | ||
|
|
b9f5475e3a | ||
|
|
346e904e77 |
2
.github/workflows/acceptance_tests.yml
vendored
2
.github/workflows/acceptance_tests.yml
vendored
@@ -22,7 +22,7 @@ jobs:
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: Setup acceptance test
|
||||
run: source ./acceptanceTests/setup.sh
|
||||
run: ./acceptanceTests/setup.sh
|
||||
|
||||
- name: Create k8s users and change context
|
||||
env:
|
||||
|
||||
44
.github/workflows/build-custom-branch.yml
vendored
Normal file
44
.github/workflows/build-custom-branch.yml
vendored
Normal file
@@ -0,0 +1,44 @@
|
||||
name: Build Custom Branch
|
||||
|
||||
on: push
|
||||
|
||||
concurrency:
|
||||
group: custom-branch-build-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
build:
|
||||
name: Push custom branch image to GCR
|
||||
runs-on: ubuntu-latest
|
||||
if: ${{ contains(github.event.head_commit.message, '#build_and_publish_custom_image') }}
|
||||
|
||||
steps:
|
||||
- name: Check out the repo
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- id: 'auth'
|
||||
uses: 'google-github-actions/auth@v0'
|
||||
with:
|
||||
credentials_json: '${{ secrets.GCR_JSON_KEY }}'
|
||||
|
||||
- name: 'Set up Cloud SDK'
|
||||
uses: 'google-github-actions/setup-gcloud@v0'
|
||||
|
||||
- name: Get base image name
|
||||
shell: bash
|
||||
run: echo "##[set-output name=image;]$(echo gcr.io/up9-docker-hub/mizu/${GITHUB_REF#refs/heads/})"
|
||||
id: base_image_step
|
||||
|
||||
- name: Login to GCR
|
||||
uses: docker/login-action@v1
|
||||
with:
|
||||
registry: gcr.io
|
||||
username: _json_key
|
||||
password: ${{ secrets.GCR_JSON_KEY }}
|
||||
|
||||
- name: Build and push
|
||||
uses: docker/build-push-action@v2
|
||||
with:
|
||||
context: .
|
||||
push: true
|
||||
tags: ${{ steps.base_image_step.outputs.image }}:latest
|
||||
35
.github/workflows/build.yml
vendored
35
.github/workflows/build.yml
vendored
@@ -15,15 +15,23 @@ jobs:
|
||||
name: CLI executable build
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Check out code into the Go module directory
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 2
|
||||
|
||||
- name: Check modified files
|
||||
id: modified_files
|
||||
run: devops/check_modified_files.sh cli/
|
||||
|
||||
- name: Set up Go 1.17
|
||||
if: steps.modified_files.outputs.matched == 'true'
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: '1.17'
|
||||
|
||||
- name: Check out code into the Go module directory
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: Build CLI
|
||||
if: steps.modified_files.outputs.matched == 'true'
|
||||
run: make cli
|
||||
|
||||
build-agent:
|
||||
@@ -32,6 +40,23 @@ jobs:
|
||||
steps:
|
||||
- name: Check out code into the Go module directory
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 2
|
||||
|
||||
- name: Check modified files
|
||||
id: modified_files
|
||||
run: devops/check_modified_files.sh agent/ shared/ tap/ ui/ Dockerfile
|
||||
|
||||
- name: Build Agent
|
||||
run: make agent-docker
|
||||
- name: Set up Docker Buildx
|
||||
if: steps.modified_files.outputs.matched == 'true'
|
||||
uses: docker/setup-buildx-action@v1
|
||||
|
||||
- name: Build
|
||||
uses: docker/build-push-action@v2
|
||||
if: steps.modified_files.outputs.matched == 'true'
|
||||
with:
|
||||
context: .
|
||||
push: false
|
||||
tags: up9inc/mizu:devlatest
|
||||
cache-from: type=gha
|
||||
cache-to: type=gha,mode=max
|
||||
|
||||
4
.github/workflows/release.yml
vendored
4
.github/workflows/release.yml
vendored
@@ -59,6 +59,7 @@ jobs:
|
||||
tags: |
|
||||
type=raw,${{ steps.versioning.outputs.version }}
|
||||
type=raw,value=latest,enable=${{ steps.condval.outputs.value == 'stable' }}
|
||||
type=raw,value=dev-latest,enable=${{ steps.condval.outputs.value == 'dev' }}
|
||||
flavor: |
|
||||
latest=auto
|
||||
prefix=
|
||||
@@ -145,6 +146,7 @@ jobs:
|
||||
tags: |
|
||||
type=raw,${{ steps.versioning.outputs.version }}
|
||||
type=raw,value=latest,enable=${{ steps.condval.outputs.value == 'stable' }}
|
||||
type=raw,value=dev-latest,enable=${{ steps.condval.outputs.value == 'dev' }}
|
||||
flavor: |
|
||||
latest=auto
|
||||
prefix=
|
||||
@@ -208,7 +210,7 @@ jobs:
|
||||
tags: |
|
||||
type=raw,${{ steps.versioning.outputs.version }}
|
||||
type=raw,value=latest,enable=${{ steps.condval.outputs.value == 'stable' }}
|
||||
|
||||
type=raw,value=dev-latest,enable=${{ steps.condval.outputs.value == 'dev' }}
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v1
|
||||
with:
|
||||
|
||||
23
.github/workflows/test.yml
vendored
23
.github/workflows/test.yml
vendored
@@ -19,14 +19,16 @@ jobs:
|
||||
name: Unit Tests
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Check out code into the Go module directory
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 2
|
||||
|
||||
- name: Set up Go 1.17
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: '^1.17'
|
||||
|
||||
- name: Check out code into the Go module directory
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: Install libpcap
|
||||
shell: bash
|
||||
run: |
|
||||
@@ -40,16 +42,31 @@ jobs:
|
||||
- name: 'Set up Cloud SDK'
|
||||
uses: 'google-github-actions/setup-gcloud@v0'
|
||||
|
||||
- name: Check CLI modified files
|
||||
id: cli_modified_files
|
||||
run: devops/check_modified_files.sh cli/
|
||||
|
||||
- name: CLI Test
|
||||
if: github.event_name == 'push' || steps.cli_modified_files.outputs.matched == 'true'
|
||||
run: make test-cli
|
||||
|
||||
- name: Check Agent modified files
|
||||
id: agent_modified_files
|
||||
run: devops/check_modified_files.sh agent/
|
||||
|
||||
- name: Agent Test
|
||||
if: github.event_name == 'push' || steps.agent_modified_files.outputs.matched == 'true'
|
||||
run: make test-agent
|
||||
|
||||
- name: Shared Test
|
||||
run: make test-shared
|
||||
|
||||
- name: Check extensions modified files
|
||||
id: ext_modified_files
|
||||
run: devops/check_modified_files.sh tap/extensions/
|
||||
|
||||
- name: Extensions Test
|
||||
if: github.event_name == 'push' || steps.ext_modified_files.outputs.matched == 'true'
|
||||
run: make test-extensions
|
||||
|
||||
- name: Upload coverage to Codecov
|
||||
|
||||
2
.gitignore
vendored
2
.gitignore
vendored
@@ -30,7 +30,7 @@ build
|
||||
pprof/*
|
||||
|
||||
# Database Files
|
||||
*.bin
|
||||
*.db
|
||||
*.gob
|
||||
|
||||
# Nohup Files - https://man7.org/linux/man-pages/man1/nohup.1p.html
|
||||
|
||||
@@ -78,8 +78,8 @@ RUN go build -ldflags="-extldflags=-static -s -w \
|
||||
-X 'github.com/up9inc/mizu/agent/pkg/version.Ver=${VER}'" -o mizuagent .
|
||||
|
||||
# Download Basenine executable, verify the sha1sum
|
||||
ADD https://github.com/up9inc/basenine/releases/download/v0.4.17/basenine_linux_${GOARCH} ./basenine_linux_${GOARCH}
|
||||
ADD https://github.com/up9inc/basenine/releases/download/v0.4.17/basenine_linux_${GOARCH}.sha256 ./basenine_linux_${GOARCH}.sha256
|
||||
ADD https://github.com/up9inc/basenine/releases/download/v0.6.3/basenine_linux_${GOARCH} ./basenine_linux_${GOARCH}
|
||||
ADD https://github.com/up9inc/basenine/releases/download/v0.6.3/basenine_linux_${GOARCH}.sha256 ./basenine_linux_${GOARCH}.sha256
|
||||
RUN shasum -a 256 -c basenine_linux_${GOARCH}.sha256
|
||||
RUN chmod +x ./basenine_linux_${GOARCH}
|
||||
RUN mv ./basenine_linux_${GOARCH} ./basenine
|
||||
|
||||
7
Makefile
7
Makefile
@@ -31,9 +31,6 @@ cli: ## Build CLI.
|
||||
cli-debug: ## Build CLI.
|
||||
@echo "building cli"; cd cli && $(MAKE) build-debug
|
||||
|
||||
build-cli-ci: ## Build CLI for CI.
|
||||
@echo "building cli for ci"; cd cli && $(MAKE) build GIT_BRANCH=ci SUFFIX=ci
|
||||
|
||||
agent: ## Build agent.
|
||||
@(echo "building mizu agent .." )
|
||||
@(cd agent; go build -o build/mizuagent main.go)
|
||||
@@ -57,10 +54,6 @@ push-docker: ## Build and publish agent docker image.
|
||||
@echo "publishing Docker image .. "
|
||||
devops/build-push-featurebranch.sh
|
||||
|
||||
build-docker-ci: ## Build agent docker image for CI.
|
||||
@echo "building docker image for ci"
|
||||
devops/build-agent-ci.sh
|
||||
|
||||
push-cli: ## Build and publish CLI.
|
||||
@echo "publishing CLI .. "
|
||||
@cd cli; $(MAKE) build-all
|
||||
|
||||
177
README.md
177
README.md
@@ -26,178 +26,17 @@ Think TCPDump and Wireshark re-invented for Kubernetes.
|
||||
|
||||

|
||||
|
||||
## Features
|
||||
## Quickstart and documentation
|
||||
|
||||
- Simple and powerful CLI
|
||||
- Monitoring network traffic in real-time. Supported protocols:
|
||||
- [HTTP/1.x](https://datatracker.ietf.org/doc/html/rfc2616) (REST, GraphQL, SOAP, etc.)
|
||||
- [HTTP/2](https://datatracker.ietf.org/doc/html/rfc7540) (gRPC)
|
||||
- [AMQP](https://www.rabbitmq.com/amqp-0-9-1-reference.html) (RabbitMQ, Apache Qpid, etc.)
|
||||
- [Apache Kafka](https://kafka.apache.org/protocol)
|
||||
- [Redis](https://redis.io/topics/protocol)
|
||||
- Works with Kubernetes APIs. No installation or code instrumentation
|
||||
- Rich filtering
|
||||
You can run Mizu on any Kubernetes cluster (version of 1.16.0 or higher) in a matter of seconds. See the [Mizu Getting Started Guide](https://getmizu.io/docs/) for how.
|
||||
|
||||
## Requirements
|
||||
For more comprehensive documentation, start with the [docs](https://getmizu.io/docs/mizu/mizu-cli).
|
||||
|
||||
A Kubernetes server version of 1.16.0 or higher is required.
|
||||
## Working in this repo
|
||||
|
||||
## Download
|
||||
We ❤️ pull requests! See [CONTRIBUTING.md](docs/CONTRIBUTING.md) for info on contributing changes. <br />
|
||||
In the wiki you can find an intorduction to [mizu components](https://github.com/up9inc/mizu/wiki/Introduction-to-Mizu), and [development workflows](https://github.com/up9inc/mizu/wiki/Development-Workflows).
|
||||
|
||||
Download Mizu for your platform and operating system
|
||||
## Code of Conduct
|
||||
|
||||
### Latest Stable Release
|
||||
|
||||
* for MacOS - Intel
|
||||
```
|
||||
curl -Lo mizu \
|
||||
https://github.com/up9inc/mizu/releases/latest/download/mizu_darwin_amd64 \
|
||||
&& chmod 755 mizu
|
||||
```
|
||||
|
||||
* for Linux - Intel 64bit
|
||||
```
|
||||
curl -Lo mizu \
|
||||
https://github.com/up9inc/mizu/releases/latest/download/mizu_linux_amd64 \
|
||||
&& chmod 755 mizu
|
||||
```
|
||||
|
||||
SHA256 checksums are available on the [Releases](https://github.com/up9inc/mizu/releases) page
|
||||
|
||||
### Development (unstable) Build
|
||||
Pick one from the [Releases](https://github.com/up9inc/mizu/releases) page
|
||||
|
||||
## How to Run
|
||||
|
||||
1. Find pods you'd like to tap to in your Kubernetes cluster
|
||||
2. Run `mizu tap` or `mizu tap PODNAME`
|
||||
3. Open browser on `http://localhost:8899` **or** as instructed in the CLI
|
||||
4. Watch the API traffic flowing
|
||||
5. Type ^C to stop
|
||||
|
||||
## Examples
|
||||
|
||||
Run `mizu help` for usage options
|
||||
|
||||
To tap all pods in current namespace -
|
||||
```
|
||||
$ kubectl get pods
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
carts-66c77f5fbb-fq65r 2/2 Running 0 20m
|
||||
catalogue-5f4cb7cf5-7zrmn 2/2 Running 0 20m
|
||||
front-end-649fc5fd6-kqbtn 2/2 Running 0 20m
|
||||
..
|
||||
|
||||
$ mizu tap
|
||||
+carts-66c77f5fbb-fq65r
|
||||
+catalogue-5f4cb7cf5-7zrmn
|
||||
+front-end-649fc5fd6-kqbtn
|
||||
Web interface is now available at http://localhost:8899
|
||||
^C
|
||||
```
|
||||
|
||||
|
||||
### To tap specific pod
|
||||
```bash
|
||||
$ kubectl get pods
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
front-end-649fc5fd6-kqbtn 2/2 Running 0 7m
|
||||
..
|
||||
|
||||
$ mizu tap front-end-649fc5fd6-kqbtn
|
||||
+front-end-649fc5fd6-kqbtn
|
||||
Web interface is now available at http://localhost:8899
|
||||
^C
|
||||
```
|
||||
|
||||
### To tap multiple pods using regex
|
||||
```bash
|
||||
$ kubectl get pods
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
carts-66c77f5fbb-fq65r 2/2 Running 0 20m
|
||||
catalogue-5f4cb7cf5-7zrmn 2/2 Running 0 20m
|
||||
front-end-649fc5fd6-kqbtn 2/2 Running 0 20m
|
||||
..
|
||||
|
||||
$ mizu tap "^ca.*"
|
||||
+carts-66c77f5fbb-fq65r
|
||||
+catalogue-5f4cb7cf5-7zrmn
|
||||
Web interface is now available at http://localhost:8899
|
||||
^C
|
||||
```
|
||||
|
||||
## Configuration
|
||||
|
||||
Mizu can optionally work with a config file that can be provided as a CLI argument (using `--set config-path=<PATH>`) or if not provided, will be stored at ${HOME}/.mizu/config.yaml
|
||||
In case of partial configuration defined, all other fields will be used with defaults <br />
|
||||
You can always override the defaults or config file with CLI flags
|
||||
|
||||
To get the default config params run `mizu config` <br />
|
||||
To generate a new config file with default values use `mizu config -r`
|
||||
|
||||
|
||||
## Advanced Usage
|
||||
|
||||
### Kubeconfig
|
||||
|
||||
It is possible to change the kubeconfig path using `KUBECONFIG` environment variable or the command like flag
|
||||
with `--set kube-config-path=<PATH>`. </br >
|
||||
If both are not set - Mizu assumes that configuration is at `${HOME}/.kube/config`
|
||||
|
||||
### Namespace-Restricted Mode
|
||||
|
||||
Some users have permission to only manage resources in one particular namespace assigned to them
|
||||
By default `mizu tap` creates a new namespace `mizu` for all of its Kubernetes resources. In order to instead install
|
||||
Mizu in an existing namespace, set the `mizu-resources-namespace` config option
|
||||
|
||||
If `mizu-resources-namespace` is set to a value other than the default `mizu`, Mizu will operate in a
|
||||
Namespace-Restricted mode. It will only tap pods in `mizu-resources-namespace`. This way Mizu only requires permissions
|
||||
to the namespace set by `mizu-resources-namespace`. The user must set the tapped namespace to the same namespace by
|
||||
using the `--namespace` flag or by setting `tap.namespaces` in the config file
|
||||
|
||||
Setting `mizu-resources-namespace=mizu` resets Mizu to its default behavior
|
||||
|
||||
For detailed list of k8s permissions see [PERMISSIONS](docs/PERMISSIONS.md) document
|
||||
|
||||
### User agent filtering
|
||||
|
||||
User-agent filtering (like health checks) - can be configured using command-line options:
|
||||
|
||||
```shell
|
||||
$ mizu tap "^ca.*" --set tap.ignored-user-agents=kube-probe --set tap.ignored-user-agents=prometheus
|
||||
+carts-66c77f5fbb-fq65r
|
||||
+catalogue-5f4cb7cf5-7zrmn
|
||||
Web interface is now available at http://localhost:8899
|
||||
^C
|
||||
|
||||
```
|
||||
Any request that contains `User-Agent` header with one of the specified values (`kube-probe` or `prometheus`) will not be captured
|
||||
|
||||
### Traffic validation rules
|
||||
|
||||
This feature allows you to define set of simple rules, and test the traffic against them.
|
||||
Such validation may test response for specific JSON fields, headers, etc.
|
||||
|
||||
Please see [TRAFFIC RULES](docs/POLICY_RULES.md) page for more details and syntax.
|
||||
|
||||
### OpenAPI Specification (OAS) Contract Monitoring
|
||||
|
||||
An OAS/Swagger file can contain schemas under `parameters` and `responses` fields. With `--contract catalogue.yaml`
|
||||
CLI option, you can pass your API description to Mizu and the traffic will automatically be validated
|
||||
against the contracts.
|
||||
|
||||
Please see [CONTRACT MONITORING](docs/CONTRACT_MONITORING.md) page for more details and syntax.
|
||||
|
||||
### Configure proxy host
|
||||
|
||||
By default, mizu will be accessible via local host: 'http://localhost:8899', it is possible to change the host, for
|
||||
instance, to '0.0.0.0' which can grant access via machine IP address. This setting can be changed via command line
|
||||
flag `--set tap.proxy-host=<value>` or via config file:
|
||||
tap proxy-host: 0.0.0.0 and when changed it will support accessing by IP
|
||||
|
||||
### Install Mizu standalone
|
||||
|
||||
Mizu can be run detached from the cli using the install command: `mizu install`. This type of mizu instance will run
|
||||
indefinitely in the cluster.
|
||||
|
||||
For more information please refer to [INSTALL STANDALONE](docs/INSTALL_STANDALONE.md)
|
||||
This project is for everyone. We ask that our users and contributors take a few minutes to review our [Code of Conduct](docs/CODE_OF_CONDUCT.md).
|
||||
|
||||
@@ -65,14 +65,14 @@ export function checkThatAllEntriesShown() {
|
||||
}
|
||||
|
||||
export function checkFilterByMethod(funcDict) {
|
||||
const {protocol, method, summary, hugeMizu} = funcDict;
|
||||
const summaryDict = getSummeryDict(summary);
|
||||
const methodDict = getMethodDict(method);
|
||||
const {protocol, method, methodQuery, summary, summaryQuery} = funcDict;
|
||||
const summaryDict = getSummaryDict(summary, summaryQuery);
|
||||
const methodDict = getMethodDict(method, methodQuery);
|
||||
const protocolDict = getProtocolDict(protocol.name, protocol.text);
|
||||
|
||||
it(`Testing the method: ${method}`, function () {
|
||||
// applying filter
|
||||
cy.get('.w-tc-editor-text').clear().type(`method == "${method}"`);
|
||||
cy.get('.w-tc-editor-text').clear().type(methodQuery);
|
||||
cy.get('[type="submit"]').click();
|
||||
cy.get('.w-tc-editor').should('have.attr', 'style').and('include', Cypress.env('greenFilterColor'));
|
||||
|
||||
@@ -121,7 +121,7 @@ function resizeIfNeeded(entriesLen) {
|
||||
function deepCheck(generalDict, protocolDict, methodDict, entry) {
|
||||
const entryNum = getEntryNumById(entry.id);
|
||||
const {summary, value} = generalDict;
|
||||
const summaryDict = getSummeryDict(summary);
|
||||
const summaryDict = getSummaryDict(summary);
|
||||
|
||||
leftOnHoverCheck(entryNum, methodDict.pathLeft, methodDict.expectedOnHover);
|
||||
leftOnHoverCheck(entryNum, protocolDict.pathLeft, protocolDict.expectedOnHover);
|
||||
@@ -149,13 +149,13 @@ function deepCheck(generalDict, protocolDict, methodDict, entry) {
|
||||
}
|
||||
}
|
||||
|
||||
function getSummeryDict(summary) {
|
||||
if (summary) {
|
||||
function getSummaryDict(value, query) {
|
||||
if (value) {
|
||||
return {
|
||||
pathLeft: '> :nth-child(2) > :nth-child(1) > :nth-child(2) > :nth-child(2)',
|
||||
pathRight: '> :nth-child(2) > :nth-child(1) > :nth-child(1) > :nth-child(2) > :nth-child(2)',
|
||||
expectedText: summary,
|
||||
expectedOnHover: `summary == "${summary}"`
|
||||
expectedText: value,
|
||||
expectedOnHover: query
|
||||
};
|
||||
}
|
||||
else {
|
||||
@@ -163,12 +163,12 @@ function getSummeryDict(summary) {
|
||||
}
|
||||
}
|
||||
|
||||
function getMethodDict(method) {
|
||||
function getMethodDict(value, query) {
|
||||
return {
|
||||
pathLeft: '> :nth-child(2) > :nth-child(1) > :nth-child(1) > :nth-child(2)',
|
||||
pathRight: '> :nth-child(2) > :nth-child(1) > :nth-child(1) > :nth-child(1) > :nth-child(2)',
|
||||
expectedText: method,
|
||||
expectedOnHover: `method == "${method}"`
|
||||
expectedText: value,
|
||||
expectedOnHover: query
|
||||
};
|
||||
}
|
||||
|
||||
|
||||
@@ -9,41 +9,53 @@ const rabbitProtocolDetails = {name: 'AMQP', text: 'Advanced Message Queuing Pro
|
||||
checkFilterByMethod({
|
||||
protocol: rabbitProtocolDetails,
|
||||
method: 'exchange declare',
|
||||
methodQuery: 'request.method == "exchange declare"',
|
||||
summary: 'exchange',
|
||||
summaryQuery: 'request.exchange == "exchange"',
|
||||
value: null
|
||||
});
|
||||
|
||||
checkFilterByMethod({
|
||||
protocol: rabbitProtocolDetails,
|
||||
method: 'queue declare',
|
||||
methodQuery: 'request.method == "queue declare"',
|
||||
summary: 'queue',
|
||||
summaryQuery: 'request.queue == "queue"',
|
||||
value: null
|
||||
});
|
||||
|
||||
checkFilterByMethod({
|
||||
protocol: rabbitProtocolDetails,
|
||||
method: 'queue bind',
|
||||
methodQuery: 'request.method == "queue bind"',
|
||||
summary: 'queue',
|
||||
summaryQuery: 'request.queue == "queue"',
|
||||
value: null
|
||||
});
|
||||
|
||||
checkFilterByMethod({
|
||||
protocol: rabbitProtocolDetails,
|
||||
method: 'basic publish',
|
||||
methodQuery: 'request.method == "basic publish"',
|
||||
summary: 'exchange',
|
||||
summaryQuery: 'request.exchange == "exchange"',
|
||||
value: {tab: valueTabs.request, regex: /^message$/mg}
|
||||
});
|
||||
|
||||
checkFilterByMethod({
|
||||
protocol: rabbitProtocolDetails,
|
||||
method: 'basic consume',
|
||||
methodQuery: 'request.method == "basic consume"',
|
||||
summary: 'queue',
|
||||
summaryQuery: 'request.queue == "queue"',
|
||||
value: null
|
||||
});
|
||||
|
||||
checkFilterByMethod({
|
||||
protocol: rabbitProtocolDetails,
|
||||
method: 'basic deliver',
|
||||
methodQuery: 'request.method == "basic deliver"',
|
||||
summary: 'exchange',
|
||||
summaryQuery: 'request.queue == "exchange"',
|
||||
value: {tab: valueTabs.request, regex: /^message$/mg}
|
||||
});
|
||||
|
||||
@@ -9,34 +9,44 @@ const redisProtocolDetails = {name: 'redis', text: 'Redis Serialization Protocol
|
||||
checkFilterByMethod({
|
||||
protocol: redisProtocolDetails,
|
||||
method: 'PING',
|
||||
methodQuery: 'request.command == "PING"',
|
||||
summary: null,
|
||||
summaryQuery: '',
|
||||
value: null
|
||||
})
|
||||
|
||||
checkFilterByMethod({
|
||||
protocol: redisProtocolDetails,
|
||||
method: 'SET',
|
||||
methodQuery: 'request.command == "SET"',
|
||||
summary: 'key',
|
||||
summaryQuery: 'request.key == "key"',
|
||||
value: {tab: valueTabs.request, regex: /^\[value, keepttl]$/mg}
|
||||
})
|
||||
|
||||
checkFilterByMethod({
|
||||
protocol: redisProtocolDetails,
|
||||
method: 'EXISTS',
|
||||
methodQuery: 'request.command == "EXISTS"',
|
||||
summary: 'key',
|
||||
summaryQuery: 'request.key == "key"',
|
||||
value: {tab: valueTabs.response, regex: /^1$/mg}
|
||||
})
|
||||
|
||||
checkFilterByMethod({
|
||||
protocol: redisProtocolDetails,
|
||||
method: 'GET',
|
||||
methodQuery: 'request.command == "GET"',
|
||||
summary: 'key',
|
||||
summaryQuery: 'request.key == "key"',
|
||||
value: {tab: valueTabs.response, regex: /^value$/mg}
|
||||
})
|
||||
|
||||
checkFilterByMethod({
|
||||
protocol: redisProtocolDetails,
|
||||
method: 'DEL',
|
||||
methodQuery: 'request.command == "DEL"',
|
||||
summary: 'key',
|
||||
summaryQuery: 'request.key == "key"',
|
||||
value: {tab: valueTabs.response, regex: /^1$|^0$/mg}
|
||||
})
|
||||
|
||||
@@ -113,7 +113,7 @@ if (Cypress.env('shouldCheckSrcAndDest')) {
|
||||
}
|
||||
|
||||
checkFilter({
|
||||
name: 'method == "GET"',
|
||||
name: 'request.method == "GET"',
|
||||
leftSidePath: '> :nth-child(3) > :nth-child(1) > :nth-child(1) > :nth-child(2)',
|
||||
leftSideExpectedText: 'GET',
|
||||
rightSidePath: '> :nth-child(2) > :nth-child(2) > :nth-child(1) > :nth-child(1) > :nth-child(2)',
|
||||
@@ -122,7 +122,7 @@ checkFilter({
|
||||
});
|
||||
|
||||
checkFilter({
|
||||
name: 'summary == "/get"',
|
||||
name: 'request.path == "/get"',
|
||||
leftSidePath: '> :nth-child(3) > :nth-child(1) > :nth-child(2) > :nth-child(2)',
|
||||
leftSideExpectedText: '/get',
|
||||
rightSidePath: '> :nth-child(2) > :nth-child(2) > :nth-child(1) > :nth-child(2) > :nth-child(2)',
|
||||
@@ -139,7 +139,7 @@ checkFilter({
|
||||
applyByEnter: false
|
||||
});
|
||||
|
||||
checkFilterNoResults('method == "POST"');
|
||||
checkFilterNoResults('request.method == "POST"');
|
||||
|
||||
function checkFilterNoResults(filterName) {
|
||||
it(`checking the filter: ${filterName}. Expecting no results`, function () {
|
||||
|
||||
@@ -40,7 +40,7 @@ func TestRedis(t *testing.T) {
|
||||
return
|
||||
}
|
||||
|
||||
apiServerUrl := GetApiServerUrl(defaultApiServerPort)
|
||||
apiServerUrl := GetApiServerUrl(DefaultApiServerPort)
|
||||
|
||||
if err := WaitTapPodsReady(apiServerUrl); err != nil {
|
||||
t.Errorf("failed to start tap pods on time, err: %v", err)
|
||||
@@ -55,7 +55,7 @@ func TestRedis(t *testing.T) {
|
||||
return
|
||||
}
|
||||
|
||||
redisExternalIp, err := kubernetesProvider.GetServiceExternalIp(ctx, defaultNamespaceName, "redis")
|
||||
redisExternalIp, err := kubernetesProvider.GetServiceExternalIp(ctx, DefaultNamespaceName, "redis")
|
||||
if err != nil {
|
||||
t.Errorf("failed to get redis external ip, err: %v", err)
|
||||
return
|
||||
@@ -65,7 +65,7 @@ func TestRedis(t *testing.T) {
|
||||
Addr: fmt.Sprintf("%v:6379", redisExternalIp),
|
||||
})
|
||||
|
||||
for i := 0; i < defaultEntriesCount/5; i++ {
|
||||
for i := 0; i < DefaultEntriesCount/5; i++ {
|
||||
requestErr := rdb.Ping(ctx).Err()
|
||||
if requestErr != nil {
|
||||
t.Errorf("failed to send redis request, err: %v", requestErr)
|
||||
@@ -73,7 +73,7 @@ func TestRedis(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
for i := 0; i < defaultEntriesCount/5; i++ {
|
||||
for i := 0; i < DefaultEntriesCount/5; i++ {
|
||||
requestErr := rdb.Set(ctx, "key", "value", -1).Err()
|
||||
if requestErr != nil {
|
||||
t.Errorf("failed to send redis request, err: %v", requestErr)
|
||||
@@ -81,7 +81,7 @@ func TestRedis(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
for i := 0; i < defaultEntriesCount/5; i++ {
|
||||
for i := 0; i < DefaultEntriesCount/5; i++ {
|
||||
requestErr := rdb.Exists(ctx, "key").Err()
|
||||
if requestErr != nil {
|
||||
t.Errorf("failed to send redis request, err: %v", requestErr)
|
||||
@@ -89,7 +89,7 @@ func TestRedis(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
for i := 0; i < defaultEntriesCount/5; i++ {
|
||||
for i := 0; i < DefaultEntriesCount/5; i++ {
|
||||
requestErr := rdb.Get(ctx, "key").Err()
|
||||
if requestErr != nil {
|
||||
t.Errorf("failed to send redis request, err: %v", requestErr)
|
||||
@@ -97,7 +97,7 @@ func TestRedis(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
for i := 0; i < defaultEntriesCount/5; i++ {
|
||||
for i := 0; i < DefaultEntriesCount/5; i++ {
|
||||
requestErr := rdb.Del(ctx, "key").Err()
|
||||
if requestErr != nil {
|
||||
t.Errorf("failed to send redis request, err: %v", requestErr)
|
||||
@@ -138,7 +138,7 @@ func TestAmqp(t *testing.T) {
|
||||
return
|
||||
}
|
||||
|
||||
apiServerUrl := GetApiServerUrl(defaultApiServerPort)
|
||||
apiServerUrl := GetApiServerUrl(DefaultApiServerPort)
|
||||
|
||||
if err := WaitTapPodsReady(apiServerUrl); err != nil {
|
||||
t.Errorf("failed to start tap pods on time, err: %v", err)
|
||||
@@ -153,7 +153,7 @@ func TestAmqp(t *testing.T) {
|
||||
return
|
||||
}
|
||||
|
||||
rabbitmqExternalIp, err := kubernetesProvider.GetServiceExternalIp(ctx, defaultNamespaceName, "rabbitmq")
|
||||
rabbitmqExternalIp, err := kubernetesProvider.GetServiceExternalIp(ctx, DefaultNamespaceName, "rabbitmq")
|
||||
if err != nil {
|
||||
t.Errorf("failed to get RabbitMQ external ip, err: %v", err)
|
||||
return
|
||||
@@ -169,7 +169,7 @@ func TestAmqp(t *testing.T) {
|
||||
// Temporary fix for missing amqp entries
|
||||
time.Sleep(10 * time.Second)
|
||||
|
||||
for i := 0; i < defaultEntriesCount/5; i++ {
|
||||
for i := 0; i < DefaultEntriesCount/5; i++ {
|
||||
ch, err := conn.Channel()
|
||||
if err != nil {
|
||||
t.Errorf("failed to open a channel, err: %v", err)
|
||||
|
||||
@@ -36,7 +36,7 @@ func TestLogs(t *testing.T) {
|
||||
return
|
||||
}
|
||||
|
||||
apiServerUrl := GetApiServerUrl(defaultApiServerPort)
|
||||
apiServerUrl := GetApiServerUrl(DefaultApiServerPort)
|
||||
|
||||
if err := WaitTapPodsReady(apiServerUrl); err != nil {
|
||||
t.Errorf("failed to start tap pods on time, err: %v", err)
|
||||
@@ -137,7 +137,7 @@ func TestLogsPath(t *testing.T) {
|
||||
return
|
||||
}
|
||||
|
||||
apiServerUrl := GetApiServerUrl(defaultApiServerPort)
|
||||
apiServerUrl := GetApiServerUrl(DefaultApiServerPort)
|
||||
|
||||
if err := WaitTapPodsReady(apiServerUrl); err != nil {
|
||||
t.Errorf("failed to start tap pods on time, err: %v", err)
|
||||
|
||||
4
acceptanceTests/setup.sh
Normal file → Executable file
4
acceptanceTests/setup.sh
Normal file → Executable file
@@ -61,10 +61,10 @@ echo "Setting minikube docker env"
|
||||
eval $(minikube docker-env)
|
||||
|
||||
echo "Build agent image"
|
||||
make build-docker-ci
|
||||
docker build -t mizu/ci:0.0 .
|
||||
|
||||
echo "Build cli"
|
||||
make build-cli-ci
|
||||
cd cli && make build GIT_BRANCH=ci SUFFIX=ci
|
||||
|
||||
echo "Starting tunnel"
|
||||
minikube tunnel &
|
||||
|
||||
@@ -53,14 +53,14 @@ func basicTapTest(t *testing.T, shouldCheckSrcAndDest bool, extraArgs... string)
|
||||
return
|
||||
}
|
||||
|
||||
apiServerUrl := GetApiServerUrl(defaultApiServerPort)
|
||||
apiServerUrl := GetApiServerUrl(DefaultApiServerPort)
|
||||
|
||||
if err := WaitTapPodsReady(apiServerUrl); err != nil {
|
||||
t.Errorf("failed to start tap pods on time, err: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
proxyUrl := GetProxyUrl(defaultNamespaceName, defaultServiceName)
|
||||
proxyUrl := GetProxyUrl(DefaultNamespaceName, DefaultServiceName)
|
||||
for i := 0; i < entriesCount; i++ {
|
||||
if _, requestErr := ExecuteHttpGetRequest(fmt.Sprintf("%v/get", proxyUrl)); requestErr != nil {
|
||||
t.Errorf("failed to send proxy request, err: %v", requestErr)
|
||||
@@ -127,8 +127,8 @@ func TestTapGuiPort(t *testing.T) {
|
||||
return
|
||||
}
|
||||
|
||||
proxyUrl := GetProxyUrl(defaultNamespaceName, defaultServiceName)
|
||||
for i := 0; i < defaultEntriesCount; i++ {
|
||||
proxyUrl := GetProxyUrl(DefaultNamespaceName, DefaultServiceName)
|
||||
for i := 0; i < DefaultEntriesCount; i++ {
|
||||
if _, requestErr := ExecuteHttpGetRequest(fmt.Sprintf("%v/get", proxyUrl)); requestErr != nil {
|
||||
t.Errorf("failed to send proxy request, err: %v", requestErr)
|
||||
return
|
||||
@@ -175,7 +175,7 @@ func TestTapAllNamespaces(t *testing.T) {
|
||||
return
|
||||
}
|
||||
|
||||
apiServerUrl := GetApiServerUrl(defaultApiServerPort)
|
||||
apiServerUrl := GetApiServerUrl(DefaultApiServerPort)
|
||||
|
||||
if err := WaitTapPodsReady(apiServerUrl); err != nil {
|
||||
t.Errorf("failed to start tap pods on time, err: %v", err)
|
||||
@@ -224,7 +224,7 @@ func TestTapMultipleNamespaces(t *testing.T) {
|
||||
return
|
||||
}
|
||||
|
||||
apiServerUrl := GetApiServerUrl(defaultApiServerPort)
|
||||
apiServerUrl := GetApiServerUrl(DefaultApiServerPort)
|
||||
|
||||
if err := WaitTapPodsReady(apiServerUrl); err != nil {
|
||||
t.Errorf("failed to start tap pods on time, err: %v", err)
|
||||
@@ -270,7 +270,7 @@ func TestTapRegex(t *testing.T) {
|
||||
return
|
||||
}
|
||||
|
||||
apiServerUrl := GetApiServerUrl(defaultApiServerPort)
|
||||
apiServerUrl := GetApiServerUrl(DefaultApiServerPort)
|
||||
|
||||
if err := WaitTapPodsReady(apiServerUrl); err != nil {
|
||||
t.Errorf("failed to start tap pods on time, err: %v", err)
|
||||
@@ -318,7 +318,7 @@ func TestTapDryRun(t *testing.T) {
|
||||
}()
|
||||
|
||||
go func() {
|
||||
time.Sleep(shortRetriesCount * time.Second)
|
||||
time.Sleep(ShortRetriesCount * time.Second)
|
||||
resultChannel <- "fail"
|
||||
}()
|
||||
|
||||
@@ -358,17 +358,17 @@ func TestTapRedact(t *testing.T) {
|
||||
return
|
||||
}
|
||||
|
||||
apiServerUrl := GetApiServerUrl(defaultApiServerPort)
|
||||
apiServerUrl := GetApiServerUrl(DefaultApiServerPort)
|
||||
|
||||
if err := WaitTapPodsReady(apiServerUrl); err != nil {
|
||||
t.Errorf("failed to start tap pods on time, err: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
proxyUrl := GetProxyUrl(defaultNamespaceName, defaultServiceName)
|
||||
proxyUrl := GetProxyUrl(DefaultNamespaceName, DefaultServiceName)
|
||||
requestHeaders := map[string]string{"User-Header": "Mizu"}
|
||||
requestBody := map[string]string{"User": "Mizu"}
|
||||
for i := 0; i < defaultEntriesCount; i++ {
|
||||
for i := 0; i < DefaultEntriesCount; i++ {
|
||||
if _, requestErr := ExecuteHttpPostRequestWithHeaders(fmt.Sprintf("%v/post", proxyUrl), requestHeaders, requestBody); requestErr != nil {
|
||||
t.Errorf("failed to send proxy request, err: %v", requestErr)
|
||||
return
|
||||
@@ -410,17 +410,17 @@ func TestTapNoRedact(t *testing.T) {
|
||||
return
|
||||
}
|
||||
|
||||
apiServerUrl := GetApiServerUrl(defaultApiServerPort)
|
||||
apiServerUrl := GetApiServerUrl(DefaultApiServerPort)
|
||||
|
||||
if err := WaitTapPodsReady(apiServerUrl); err != nil {
|
||||
t.Errorf("failed to start tap pods on time, err: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
proxyUrl := GetProxyUrl(defaultNamespaceName, defaultServiceName)
|
||||
proxyUrl := GetProxyUrl(DefaultNamespaceName, DefaultServiceName)
|
||||
requestHeaders := map[string]string{"User-Header": "Mizu"}
|
||||
requestBody := map[string]string{"User": "Mizu"}
|
||||
for i := 0; i < defaultEntriesCount; i++ {
|
||||
for i := 0; i < DefaultEntriesCount; i++ {
|
||||
if _, requestErr := ExecuteHttpPostRequestWithHeaders(fmt.Sprintf("%v/post", proxyUrl), requestHeaders, requestBody); requestErr != nil {
|
||||
t.Errorf("failed to send proxy request, err: %v", requestErr)
|
||||
return
|
||||
@@ -462,15 +462,15 @@ func TestTapRegexMasking(t *testing.T) {
|
||||
return
|
||||
}
|
||||
|
||||
apiServerUrl := GetApiServerUrl(defaultApiServerPort)
|
||||
apiServerUrl := GetApiServerUrl(DefaultApiServerPort)
|
||||
|
||||
if err := WaitTapPodsReady(apiServerUrl); err != nil {
|
||||
t.Errorf("failed to start tap pods on time, err: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
proxyUrl := GetProxyUrl(defaultNamespaceName, defaultServiceName)
|
||||
for i := 0; i < defaultEntriesCount; i++ {
|
||||
proxyUrl := GetProxyUrl(DefaultNamespaceName, DefaultServiceName)
|
||||
for i := 0; i < DefaultEntriesCount; i++ {
|
||||
response, requestErr := http.Post(fmt.Sprintf("%v/post", proxyUrl), "text/plain", bytes.NewBufferString("Mizu"))
|
||||
if _, requestErr = ExecuteHttpRequest(response, requestErr); requestErr != nil {
|
||||
t.Errorf("failed to send proxy request, err: %v", requestErr)
|
||||
@@ -515,25 +515,25 @@ func TestTapIgnoredUserAgents(t *testing.T) {
|
||||
return
|
||||
}
|
||||
|
||||
apiServerUrl := GetApiServerUrl(defaultApiServerPort)
|
||||
apiServerUrl := GetApiServerUrl(DefaultApiServerPort)
|
||||
|
||||
if err := WaitTapPodsReady(apiServerUrl); err != nil {
|
||||
t.Errorf("failed to start tap pods on time, err: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
proxyUrl := GetProxyUrl(defaultNamespaceName, defaultServiceName)
|
||||
proxyUrl := GetProxyUrl(DefaultNamespaceName, DefaultServiceName)
|
||||
|
||||
ignoredUserAgentCustomHeader := "Ignored-User-Agent"
|
||||
headers := map[string]string{"User-Agent": ignoredUserAgentValue, ignoredUserAgentCustomHeader: ""}
|
||||
for i := 0; i < defaultEntriesCount; i++ {
|
||||
for i := 0; i < DefaultEntriesCount; i++ {
|
||||
if _, requestErr := ExecuteHttpGetRequestWithHeaders(fmt.Sprintf("%v/get", proxyUrl), headers); requestErr != nil {
|
||||
t.Errorf("failed to send proxy request, err: %v", requestErr)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
for i := 0; i < defaultEntriesCount; i++ {
|
||||
for i := 0; i < DefaultEntriesCount; i++ {
|
||||
if _, requestErr := ExecuteHttpGetRequest(fmt.Sprintf("%v/get", proxyUrl)); requestErr != nil {
|
||||
t.Errorf("failed to send proxy request, err: %v", requestErr)
|
||||
return
|
||||
@@ -569,7 +569,7 @@ func TestTapDumpLogs(t *testing.T) {
|
||||
return
|
||||
}
|
||||
|
||||
apiServerUrl := GetApiServerUrl(defaultApiServerPort)
|
||||
apiServerUrl := GetApiServerUrl(DefaultApiServerPort)
|
||||
|
||||
if err := WaitTapPodsReady(apiServerUrl); err != nil {
|
||||
t.Errorf("failed to start tap pods on time, err: %v", err)
|
||||
@@ -651,7 +651,7 @@ func TestTapDumpLogs(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestIpResolving(t *testing.T) {
|
||||
namespace := allNamespaces
|
||||
namespace := AllNamespaces
|
||||
|
||||
t.Log("add permissions for ip-resolution for current user")
|
||||
if err := ApplyKubeFilesForTest(
|
||||
|
||||
@@ -24,14 +24,14 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
longRetriesCount = 100
|
||||
shortRetriesCount = 10
|
||||
defaultApiServerPort = shared.DefaultApiServerPort
|
||||
defaultNamespaceName = "mizu-tests"
|
||||
defaultServiceName = "httpbin"
|
||||
defaultEntriesCount = 50
|
||||
waitAfterTapPodsReady = 3 * time.Second
|
||||
allNamespaces = ""
|
||||
LongRetriesCount = 100
|
||||
ShortRetriesCount = 10
|
||||
DefaultApiServerPort = shared.DefaultApiServerPort
|
||||
DefaultNamespaceName = "mizu-tests"
|
||||
DefaultServiceName = "httpbin"
|
||||
DefaultEntriesCount = 50
|
||||
WaitAfterTapPodsReady = 3 * time.Second
|
||||
AllNamespaces = ""
|
||||
)
|
||||
|
||||
type PodDescriptor struct {
|
||||
@@ -181,7 +181,7 @@ func ApplyKubeFile(kubeContext string, namespace string, filename string) (error
|
||||
"--context", kubeContext,
|
||||
"-f", filename,
|
||||
}
|
||||
if namespace != allNamespaces {
|
||||
if namespace != AllNamespaces {
|
||||
cmdArgs = append(cmdArgs, "-n", namespace)
|
||||
}
|
||||
cmd := exec.Command("kubectl", cmdArgs...)
|
||||
@@ -199,7 +199,7 @@ func DeleteKubeFile(kubeContext string, namespace string, filename string) error
|
||||
"--context", kubeContext,
|
||||
"-f", filename,
|
||||
}
|
||||
if namespace != allNamespaces {
|
||||
if namespace != AllNamespaces {
|
||||
cmdArgs = append(cmdArgs, "-n", namespace)
|
||||
}
|
||||
cmd := exec.Command("kubectl", cmdArgs...)
|
||||
@@ -214,7 +214,7 @@ func DeleteKubeFile(kubeContext string, namespace string, filename string) error
|
||||
func getDefaultCommandArgs() []string {
|
||||
setFlag := "--set"
|
||||
telemetry := "telemetry=false"
|
||||
agentImage := "agent-image=gcr.io/up9-docker-hub/mizu/ci:0.0"
|
||||
agentImage := "agent-image=mizu/ci:0.0"
|
||||
imagePullPolicy := "image-pull-policy=IfNotPresent"
|
||||
headless := "headless=true"
|
||||
|
||||
@@ -265,11 +265,11 @@ func RunCypressTests(t *testing.T, cypressRunCmd string) {
|
||||
t.Logf("%s", out)
|
||||
}
|
||||
|
||||
func retriesExecute(retriesCount int, executeFunc func() error) error {
|
||||
func RetriesExecute(retriesCount int, executeFunc func() error) error {
|
||||
var lastError interface{}
|
||||
|
||||
for i := 0; i < retriesCount; i++ {
|
||||
if err := tryExecuteFunc(executeFunc); err != nil {
|
||||
if err := TryExecuteFunc(executeFunc); err != nil {
|
||||
lastError = err
|
||||
|
||||
time.Sleep(1 * time.Second)
|
||||
@@ -282,7 +282,7 @@ func retriesExecute(retriesCount int, executeFunc func() error) error {
|
||||
return fmt.Errorf("reached max retries count, retries count: %v, last err: %v", retriesCount, lastError)
|
||||
}
|
||||
|
||||
func tryExecuteFunc(executeFunc func() error) (err interface{}) {
|
||||
func TryExecuteFunc(executeFunc func() error) (err interface{}) {
|
||||
defer func() {
|
||||
if panicErr := recover(); panicErr != nil {
|
||||
err = panicErr
|
||||
@@ -304,14 +304,14 @@ func WaitTapPodsReady(apiServerUrl string) error {
|
||||
if connectedTappersCount == 0 {
|
||||
return fmt.Errorf("no connected tappers running")
|
||||
}
|
||||
time.Sleep(waitAfterTapPodsReady)
|
||||
time.Sleep(WaitAfterTapPodsReady)
|
||||
return nil
|
||||
}
|
||||
|
||||
return retriesExecute(longRetriesCount, tapPodsReadyFunc)
|
||||
return RetriesExecute(LongRetriesCount, tapPodsReadyFunc)
|
||||
}
|
||||
|
||||
func jsonBytesToInterface(jsonBytes []byte) (interface{}, error) {
|
||||
func JsonBytesToInterface(jsonBytes []byte) (interface{}, error) {
|
||||
var result interface{}
|
||||
if parseErr := json.Unmarshal(jsonBytes, &result); parseErr != nil {
|
||||
return nil, parseErr
|
||||
@@ -334,7 +334,7 @@ func ExecuteHttpRequest(response *http.Response, requestErr error) (interface{},
|
||||
return nil, readErr
|
||||
}
|
||||
|
||||
return jsonBytesToInterface(data)
|
||||
return JsonBytesToInterface(data)
|
||||
}
|
||||
|
||||
func ExecuteHttpGetRequestWithHeaders(url string, headers map[string]string) (interface{}, error) {
|
||||
|
||||
@@ -22,7 +22,7 @@ require (
|
||||
github.com/ory/kratos-client-go v0.8.2-alpha.1
|
||||
github.com/patrickmn/go-cache v2.1.0+incompatible
|
||||
github.com/stretchr/testify v1.7.0
|
||||
github.com/up9inc/basenine/client/go v0.0.0-20220220204122-0ef8cb24fab1
|
||||
github.com/up9inc/basenine/client/go v0.0.0-20220315070758-3a76cfc4378e
|
||||
github.com/up9inc/mizu/shared v0.0.0
|
||||
github.com/up9inc/mizu/tap v0.0.0
|
||||
github.com/up9inc/mizu/tap/api v0.0.0
|
||||
|
||||
@@ -853,8 +853,8 @@ github.com/ugorji/go v1.2.6/go.mod h1:anCg0y61KIhDlPZmnH+so+RQbysYVyDko0IMgJv0Nn
|
||||
github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY=
|
||||
github.com/ugorji/go/codec v1.2.6 h1:7kbGefxLoDBuYXOms4yD7223OpNMMPNPZxXk5TvFcyQ=
|
||||
github.com/ugorji/go/codec v1.2.6/go.mod h1:V6TCNZ4PHqoHGFZuSG1W8nrCzzdgA2DozYxWFFpvxTw=
|
||||
github.com/up9inc/basenine/client/go v0.0.0-20220220204122-0ef8cb24fab1 h1:0XN8s3HtwUBr9hbWRAFulFMsu1f2cabfJbwpz/sOoLA=
|
||||
github.com/up9inc/basenine/client/go v0.0.0-20220220204122-0ef8cb24fab1/go.mod h1:SvJGPoa/6erhUQV7kvHBwM/0x5LyO6XaG2lUaCaKiUI=
|
||||
github.com/up9inc/basenine/client/go v0.0.0-20220315070758-3a76cfc4378e h1:/9dFXqvRDHcwPQdIGHP6iz6M0iAWBPOxYf6C+Ntq5w0=
|
||||
github.com/up9inc/basenine/client/go v0.0.0-20220315070758-3a76cfc4378e/go.mod h1:SvJGPoa/6erhUQV7kvHBwM/0x5LyO6XaG2lUaCaKiUI=
|
||||
github.com/vektah/gqlparser v1.1.2/go.mod h1:1ycwN7Ij5njmMkPPAOaRFY4rET2Enx7IkVv3vaXspKw=
|
||||
github.com/vishvananda/netns v0.0.0-20211101163701-50045581ed74 h1:gga7acRE695APm9hlsSMoOoE65U4/TcqNj90mc69Rlg=
|
||||
github.com/vishvananda/netns v0.0.0-20211101163701-50045581ed74/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0=
|
||||
|
||||
@@ -86,7 +86,7 @@ func hostApi(socketHarOutputChannel chan<- *tapApi.OutputChannelItem) *gin.Engin
|
||||
app := gin.Default()
|
||||
|
||||
app.GET("/echo", func(c *gin.Context) {
|
||||
c.String(http.StatusOK, "Here is Mizu agent")
|
||||
c.JSON(http.StatusOK, "Here is Mizu agent")
|
||||
})
|
||||
|
||||
eventHandlers := api.RoutesEventHandlers{
|
||||
@@ -140,7 +140,7 @@ func runInApiServerMode(namespace string) *gin.Engine {
|
||||
if err := config.LoadConfig(); err != nil {
|
||||
logger.Log.Fatalf("Error loading config file %v", err)
|
||||
}
|
||||
app.ConfigureBasenineServer(shared.BasenineHost, shared.BaseninePort, config.Config.MaxDBSizeBytes, config.Config.LogLevel)
|
||||
app.ConfigureBasenineServer(shared.BasenineHost, shared.BaseninePort, config.Config.MaxDBSizeBytes, config.Config.LogLevel, config.Config.InsertionFilter)
|
||||
startTime = time.Now().UnixNano() / int64(time.Millisecond)
|
||||
api.StartResolving(namespace)
|
||||
|
||||
|
||||
@@ -17,6 +17,12 @@ import (
|
||||
tapApi "github.com/up9inc/mizu/tap/api"
|
||||
)
|
||||
|
||||
var extensionsMap map[string]*tapApi.Extension // global
|
||||
|
||||
func InitExtensionsMap(ref map[string]*tapApi.Extension) {
|
||||
extensionsMap = ref
|
||||
}
|
||||
|
||||
type EventHandlers interface {
|
||||
WebSocketConnect(socketId int, isTapper bool)
|
||||
WebSocketDisconnect(socketId int, isTapper bool)
|
||||
@@ -165,7 +171,8 @@ func websocketHandler(w http.ResponseWriter, r *http.Request, eventHandlers Even
|
||||
if params.EnableFullEntries {
|
||||
message, _ = models.CreateFullEntryWebSocketMessage(entry)
|
||||
} else {
|
||||
base := tapApi.Summarize(entry)
|
||||
extension := extensionsMap[entry.Protocol.Name]
|
||||
base := extension.Dissector.Summarize(entry)
|
||||
message, _ = models.CreateBaseEntryWebSocketMessage(base)
|
||||
}
|
||||
|
||||
|
||||
@@ -60,9 +60,10 @@ func LoadExtensions() {
|
||||
})
|
||||
|
||||
controllers.InitExtensionsMap(ExtensionsMap)
|
||||
api.InitExtensionsMap(ExtensionsMap)
|
||||
}
|
||||
|
||||
func ConfigureBasenineServer(host string, port string, dbSize int64, logLevel logging.Level) {
|
||||
func ConfigureBasenineServer(host string, port string, dbSize int64, logLevel logging.Level, insertionFilter string) {
|
||||
if !wait.New(
|
||||
wait.WithProto("tcp"),
|
||||
wait.WithWait(200*time.Millisecond),
|
||||
@@ -86,6 +87,11 @@ func ConfigureBasenineServer(host string, port string, dbSize int64, logLevel lo
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Set the insertion filter that comes from the config
|
||||
if err := basenine.InsertionFilter(host, port, insertionFilter); err != nil {
|
||||
logger.Log.Errorf("Error while setting the insertion filter: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func GetEntryInputChannel() chan *tapApi.OutputChannelItem {
|
||||
|
||||
@@ -77,7 +77,8 @@ func GetEntries(c *gin.Context) {
|
||||
return // exit
|
||||
}
|
||||
|
||||
base := tapApi.Summarize(entry)
|
||||
extension := extensionsMap[entry.Protocol.Name]
|
||||
base := extension.Dissector.Summarize(entry)
|
||||
|
||||
dataSlice = append(dataSlice, base)
|
||||
}
|
||||
@@ -123,6 +124,7 @@ func GetEntry(c *gin.Context) {
|
||||
}
|
||||
|
||||
extension := extensionsMap[entry.Protocol.Name]
|
||||
base := extension.Dissector.Summarize(entry)
|
||||
representation, bodySize, _ := extension.Dissector.Represent(entry.Request, entry.Response)
|
||||
|
||||
var rules []map[string]interface{}
|
||||
@@ -142,6 +144,7 @@ func GetEntry(c *gin.Context) {
|
||||
Representation: string(representation),
|
||||
BodySize: bodySize,
|
||||
Data: entry,
|
||||
Base: base,
|
||||
Rules: rules,
|
||||
IsRulesEnabled: isRulesEnabled,
|
||||
})
|
||||
|
||||
@@ -4,13 +4,14 @@ import (
|
||||
"bytes"
|
||||
"crypto/tls"
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/elastic/go-elasticsearch/v7"
|
||||
"github.com/up9inc/mizu/shared"
|
||||
"github.com/up9inc/mizu/shared/logger"
|
||||
"github.com/up9inc/mizu/tap/api"
|
||||
"net/http"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
type client struct {
|
||||
@@ -31,6 +32,9 @@ func GetInstance() *client {
|
||||
|
||||
func (client *client) Configure(config shared.ElasticConfig) {
|
||||
if config.Url == "" || config.User == "" || config.Password == "" {
|
||||
if client.es != nil {
|
||||
client.es = nil
|
||||
}
|
||||
logger.Log.Infof("No elastic configuration was supplied, elastic exporter disabled")
|
||||
return
|
||||
}
|
||||
@@ -46,13 +50,13 @@ func (client *client) Configure(config shared.ElasticConfig) {
|
||||
|
||||
es, err := elasticsearch.NewClient(cfg)
|
||||
if err != nil {
|
||||
logger.Log.Fatalf("Failed to initialize elastic client %v", err)
|
||||
logger.Log.Errorf("Failed to initialize elastic client %v", err)
|
||||
}
|
||||
|
||||
// Have the client instance return a response
|
||||
res, err := es.Info()
|
||||
if err != nil {
|
||||
logger.Log.Fatalf("Elastic client.Info() ERROR: %v", err)
|
||||
logger.Log.Errorf("Elastic client.Info() ERROR: %v", err)
|
||||
} else {
|
||||
client.es = es
|
||||
client.index = "mizu_traffic_http_" + time.Now().Format("2006_01_02_15_04")
|
||||
@@ -76,11 +80,7 @@ type httpEntry struct {
|
||||
CreatedAt time.Time `json:"createdAt"`
|
||||
Request map[string]interface{} `json:"request"`
|
||||
Response map[string]interface{} `json:"response"`
|
||||
Summary string `json:"summary"`
|
||||
Method string `json:"method"`
|
||||
Status int `json:"status"`
|
||||
ElapsedTime int64 `json:"elapsedTime"`
|
||||
Path string `json:"path"`
|
||||
}
|
||||
|
||||
func (client *client) PushEntry(entry *api.Entry) {
|
||||
@@ -99,11 +99,7 @@ func (client *client) PushEntry(entry *api.Entry) {
|
||||
CreatedAt: entry.StartTime,
|
||||
Request: entry.Request,
|
||||
Response: entry.Response,
|
||||
Summary: entry.Summary,
|
||||
Method: entry.Method,
|
||||
Status: entry.Status,
|
||||
ElapsedTime: entry.ElapsedTime,
|
||||
Path: entry.Path,
|
||||
}
|
||||
|
||||
entryJson, err := json.Marshal(entryToPush)
|
||||
|
||||
@@ -12,10 +12,6 @@ import (
|
||||
"github.com/up9inc/mizu/tap"
|
||||
)
|
||||
|
||||
func GetEntry(r *tapApi.Entry, v tapApi.DataUnmarshaler) error {
|
||||
return v.UnmarshalData(r)
|
||||
}
|
||||
|
||||
type TapConfig struct {
|
||||
TappedNamespaces map[string]bool `json:"tappedNamespaces"`
|
||||
}
|
||||
|
||||
@@ -33,10 +33,23 @@ func (g *oasGenerator) Start() {
|
||||
g.entriesChan = make(chan EntryWithSource, 100) // buffer up to 100 entries for OAS processing
|
||||
g.ServiceSpecs = &sync.Map{}
|
||||
g.started = true
|
||||
go instance.runGeneretor()
|
||||
go instance.runGenerator()
|
||||
}
|
||||
|
||||
func (g *oasGenerator) runGeneretor() {
|
||||
func (g *oasGenerator) Stop() {
|
||||
if !g.started {
|
||||
return
|
||||
}
|
||||
g.cancel()
|
||||
g.Reset()
|
||||
g.started = false
|
||||
}
|
||||
|
||||
func (g *oasGenerator) IsStarted() bool {
|
||||
return g.started
|
||||
}
|
||||
|
||||
func (g *oasGenerator) runGenerator() {
|
||||
for {
|
||||
select {
|
||||
case <-g.ctx.Done():
|
||||
|
||||
@@ -32,6 +32,7 @@ type serviceMap struct {
|
||||
|
||||
type ServiceMap interface {
|
||||
Enable()
|
||||
Disable()
|
||||
IsEnabled() bool
|
||||
NewTCPEntry(source *tapApi.TCP, destination *tapApi.TCP, protocol *tapApi.Protocol)
|
||||
GetStatus() ServiceMapStatus
|
||||
@@ -159,6 +160,11 @@ func (s *serviceMap) Enable() {
|
||||
s.enabled = true
|
||||
}
|
||||
|
||||
func (s *serviceMap) Disable() {
|
||||
s.Reset()
|
||||
s.enabled = false
|
||||
}
|
||||
|
||||
func (s *serviceMap) IsEnabled() bool {
|
||||
return s.enabled
|
||||
}
|
||||
|
||||
@@ -4,11 +4,10 @@ import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"github.com/up9inc/mizu/cli/utils"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/up9inc/mizu/shared/kubernetes"
|
||||
@@ -59,7 +58,7 @@ func (provider *Provider) TestConnection() error {
|
||||
|
||||
func (provider *Provider) isReachable() (bool, error) {
|
||||
echoUrl := fmt.Sprintf("%s/echo", provider.url)
|
||||
if _, err := provider.get(echoUrl); err != nil {
|
||||
if _, err := utils.Get(echoUrl, provider.client); err != nil {
|
||||
return false, err
|
||||
} else {
|
||||
return true, nil
|
||||
@@ -72,7 +71,7 @@ func (provider *Provider) ReportTapperStatus(tapperStatus shared.TapperStatus) e
|
||||
if jsonValue, err := json.Marshal(tapperStatus); err != nil {
|
||||
return fmt.Errorf("failed Marshal the tapper status %w", err)
|
||||
} else {
|
||||
if _, err := provider.post(tapperStatusUrl, "application/json", bytes.NewBuffer(jsonValue)); err != nil {
|
||||
if _, err := utils.Post(tapperStatusUrl, "application/json", bytes.NewBuffer(jsonValue), provider.client); err != nil {
|
||||
return fmt.Errorf("failed sending to API server the tapped pods %w", err)
|
||||
} else {
|
||||
logger.Log.Debugf("Reported to server API about tapper status: %v", tapperStatus)
|
||||
@@ -89,7 +88,7 @@ func (provider *Provider) ReportTappedPods(pods []core.Pod) error {
|
||||
if jsonValue, err := json.Marshal(podInfos); err != nil {
|
||||
return fmt.Errorf("failed Marshal the tapped pods %w", err)
|
||||
} else {
|
||||
if _, err := provider.post(tappedPodsUrl, "application/json", bytes.NewBuffer(jsonValue)); err != nil {
|
||||
if _, err := utils.Post(tappedPodsUrl, "application/json", bytes.NewBuffer(jsonValue), provider.client); err != nil {
|
||||
return fmt.Errorf("failed sending to API server the tapped pods %w", err)
|
||||
} else {
|
||||
logger.Log.Debugf("Reported to server API about %d taped pods successfully", len(podInfos))
|
||||
@@ -101,7 +100,7 @@ func (provider *Provider) ReportTappedPods(pods []core.Pod) error {
|
||||
func (provider *Provider) GetGeneralStats() (map[string]interface{}, error) {
|
||||
generalStatsUrl := fmt.Sprintf("%s/status/general", provider.url)
|
||||
|
||||
response, requestErr := provider.get(generalStatsUrl)
|
||||
response, requestErr := utils.Get(generalStatsUrl, provider.client)
|
||||
if requestErr != nil {
|
||||
return nil, fmt.Errorf("failed to get general stats for telemetry, err: %w", requestErr)
|
||||
}
|
||||
@@ -126,7 +125,7 @@ func (provider *Provider) GetVersion() (string, error) {
|
||||
Method: http.MethodGet,
|
||||
URL: versionUrl,
|
||||
}
|
||||
statusResp, err := provider.do(req)
|
||||
statusResp, err := utils.Do(req, provider.client)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
@@ -139,40 +138,3 @@ func (provider *Provider) GetVersion() (string, error) {
|
||||
|
||||
return versionResponse.Ver, nil
|
||||
}
|
||||
|
||||
// When err is nil, resp always contains a non-nil resp.Body.
|
||||
// Caller should close resp.Body when done reading from it.
|
||||
func (provider *Provider) get(url string) (*http.Response, error) {
|
||||
return provider.checkError(provider.client.Get(url))
|
||||
}
|
||||
|
||||
// When err is nil, resp always contains a non-nil resp.Body.
|
||||
// Caller should close resp.Body when done reading from it.
|
||||
func (provider *Provider) post(url, contentType string, body io.Reader) (*http.Response, error) {
|
||||
return provider.checkError(provider.client.Post(url, contentType, body))
|
||||
}
|
||||
|
||||
// When err is nil, resp always contains a non-nil resp.Body.
|
||||
// Caller should close resp.Body when done reading from it.
|
||||
func (provider *Provider) do(req *http.Request) (*http.Response, error) {
|
||||
return provider.checkError(provider.client.Do(req))
|
||||
}
|
||||
|
||||
func (provider *Provider) checkError(response *http.Response, errInOperation error) (*http.Response, error) {
|
||||
if (errInOperation != nil) {
|
||||
return response, errInOperation
|
||||
// Check only if status != 200 (and not status >= 300). Agent APIs return only 200 on success.
|
||||
} else if response.StatusCode != http.StatusOK {
|
||||
body, err := ioutil.ReadAll(response.Body)
|
||||
response.Body.Close()
|
||||
response.Body = io.NopCloser(bytes.NewBuffer(body)) // rewind
|
||||
if err != nil {
|
||||
return response, err
|
||||
}
|
||||
|
||||
errorMsg := strings.ReplaceAll((string(body)), "\n", ";")
|
||||
return response, fmt.Errorf("got response with status code: %d, body: %s", response.StatusCode, errorMsg)
|
||||
}
|
||||
|
||||
return response, nil
|
||||
}
|
||||
|
||||
42
cli/bucket/provider.go
Normal file
42
cli/bucket/provider.go
Normal file
@@ -0,0 +1,42 @@
|
||||
package bucket
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/up9inc/mizu/cli/utils"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"time"
|
||||
)
|
||||
|
||||
type Provider struct {
|
||||
url string
|
||||
client *http.Client
|
||||
}
|
||||
|
||||
const DefaultTimeout = 2 * time.Second
|
||||
|
||||
func NewProvider(url string, timeout time.Duration) *Provider {
|
||||
return &Provider{
|
||||
url: url,
|
||||
client: &http.Client{
|
||||
Timeout: timeout,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (provider *Provider) GetInstallTemplate(templateName string) (string, error) {
|
||||
url := fmt.Sprintf("%s/%v", provider.url, templateName)
|
||||
response, err := utils.Get(url, provider.client)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
defer response.Body.Close()
|
||||
|
||||
installTemplate, err := ioutil.ReadAll(response.Body)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return string(installTemplate), nil
|
||||
}
|
||||
@@ -65,7 +65,7 @@ func runMizuCheck() {
|
||||
func checkKubernetesApi() (*kubernetes.Provider, *semver.SemVersion, bool) {
|
||||
logger.Log.Infof("\nkubernetes-api\n--------------------")
|
||||
|
||||
kubernetesProvider, err := kubernetes.NewProvider(config.Config.KubeConfigPath())
|
||||
kubernetesProvider, err := kubernetes.NewProvider(config.Config.KubeConfigPath(), config.Config.KubeContext)
|
||||
if err != nil {
|
||||
logger.Log.Errorf("%v can't initialize the client, err: %v", fmt.Sprintf(uiUtils.Red, "✗"), err)
|
||||
return nil, nil, false
|
||||
|
||||
@@ -36,8 +36,8 @@ func startProxyReportErrorIfAny(kubernetesProvider *kubernetes.Provider, ctx con
|
||||
return
|
||||
}
|
||||
|
||||
apiProvider = apiserver.NewProvider(GetApiServerUrl(port), apiserver.DefaultRetries, apiserver.DefaultTimeout)
|
||||
if err := apiProvider.TestConnection(); err != nil {
|
||||
provider := apiserver.NewProvider(GetApiServerUrl(port), apiserver.DefaultRetries, apiserver.DefaultTimeout)
|
||||
if err := provider.TestConnection(); err != nil {
|
||||
logger.Log.Debugf("Couldn't connect using proxy, stopping proxy and trying to create port-forward")
|
||||
if err := httpServer.Shutdown(ctx); err != nil {
|
||||
logger.Log.Debugf("Error occurred while stopping proxy %v", errormessage.FormatError(err))
|
||||
@@ -51,8 +51,8 @@ func startProxyReportErrorIfAny(kubernetesProvider *kubernetes.Provider, ctx con
|
||||
return
|
||||
}
|
||||
|
||||
apiProvider = apiserver.NewProvider(GetApiServerUrl(port), apiserver.DefaultRetries, apiserver.DefaultTimeout)
|
||||
if err := apiProvider.TestConnection(); err != nil {
|
||||
provider = apiserver.NewProvider(GetApiServerUrl(port), apiserver.DefaultRetries, apiserver.DefaultTimeout)
|
||||
if err := provider.TestConnection(); err != nil {
|
||||
logger.Log.Errorf(uiUtils.Error, fmt.Sprintf("Couldn't connect to API server, for more info check logs at %s", fsUtils.GetLogFilePath()))
|
||||
cancel()
|
||||
return
|
||||
@@ -61,7 +61,7 @@ func startProxyReportErrorIfAny(kubernetesProvider *kubernetes.Provider, ctx con
|
||||
}
|
||||
|
||||
func getKubernetesProviderForCli() (*kubernetes.Provider, error) {
|
||||
kubernetesProvider, err := kubernetes.NewProvider(config.Config.KubeConfigPath())
|
||||
kubernetesProvider, err := kubernetes.NewProvider(config.Config.KubeConfigPath(), config.Config.KubeContext)
|
||||
if err != nil {
|
||||
handleKubernetesProviderError(err)
|
||||
return nil, err
|
||||
|
||||
@@ -3,7 +3,6 @@ package cmd
|
||||
import (
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/up9inc/mizu/cli/telemetry"
|
||||
"github.com/up9inc/mizu/shared/logger"
|
||||
)
|
||||
|
||||
var installCmd = &cobra.Command{
|
||||
@@ -11,14 +10,7 @@ var installCmd = &cobra.Command{
|
||||
Short: "Installs mizu components",
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
go telemetry.ReportRun("install", nil)
|
||||
logger.Log.Infof("This command has been deprecated, please use helm as described below.\n\n")
|
||||
|
||||
logger.Log.Infof("To install stable build of Mizu on your cluster using helm, run the following command:")
|
||||
logger.Log.Infof(" helm install mizu up9mizu --repo https://static.up9.com/mizu/helm --namespace=mizu --create-namespace\n\n")
|
||||
|
||||
logger.Log.Infof("To install development build of Mizu on your cluster using helm, run the following command:")
|
||||
logger.Log.Infof(" helm install mizu up9mizu --repo https://static.up9.com/mizu/helm-develop --namespace=mizu --create-namespace\n")
|
||||
|
||||
runMizuInstall()
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
19
cli/cmd/installRunner.go
Normal file
19
cli/cmd/installRunner.go
Normal file
@@ -0,0 +1,19 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/up9inc/mizu/cli/bucket"
|
||||
"github.com/up9inc/mizu/cli/config"
|
||||
"github.com/up9inc/mizu/shared/logger"
|
||||
)
|
||||
|
||||
func runMizuInstall() {
|
||||
bucketProvider := bucket.NewProvider(config.Config.Install.TemplateUrl, bucket.DefaultTimeout)
|
||||
installTemplate, err := bucketProvider.GetInstallTemplate(config.Config.Install.TemplateName)
|
||||
if err != nil {
|
||||
logger.Log.Errorf("Failed getting install template, err: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
fmt.Print(installTemplate)
|
||||
}
|
||||
@@ -3,9 +3,10 @@ package cmd
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/up9inc/mizu/cli/up9"
|
||||
"os"
|
||||
|
||||
"github.com/up9inc/mizu/cli/up9"
|
||||
|
||||
"github.com/creasty/defaults"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/up9inc/mizu/cli/auth"
|
||||
@@ -115,6 +116,7 @@ func init() {
|
||||
tapCmd.Flags().StringSliceP(configStructs.PlainTextFilterRegexesTapName, "r", defaultTapConfig.PlainTextFilterRegexes, "List of regex expressions that are used to filter matching values from text/plain http bodies")
|
||||
tapCmd.Flags().Bool(configStructs.DisableRedactionTapName, defaultTapConfig.DisableRedaction, "Disables redaction of potentially sensitive request/response headers and body values")
|
||||
tapCmd.Flags().String(configStructs.HumanMaxEntriesDBSizeTapName, defaultTapConfig.HumanMaxEntriesDBSize, "Override the default max entries db size")
|
||||
tapCmd.Flags().String(configStructs.InsertionFilterName, defaultTapConfig.InsertionFilter, "Set the insertion filter. Accepts string or a file path.")
|
||||
tapCmd.Flags().Bool(configStructs.DryRunTapName, defaultTapConfig.DryRun, "Preview of all pods matching the regex, without tapping them")
|
||||
tapCmd.Flags().StringP(configStructs.WorkspaceTapName, "w", defaultTapConfig.Workspace, "Uploads traffic to your UP9 workspace for further analysis (requires auth)")
|
||||
tapCmd.Flags().String(configStructs.EnforcePolicyFile, defaultTapConfig.EnforcePolicyFile, "Yaml file path with policy rules")
|
||||
|
||||
@@ -154,6 +154,7 @@ func finishTapExecution(kubernetesProvider *kubernetes.Provider) {
|
||||
func getTapMizuAgentConfig() *shared.MizuAgentConfig {
|
||||
mizuAgentConfig := shared.MizuAgentConfig{
|
||||
MaxDBSizeBytes: config.Config.Tap.MaxEntriesDBSizeBytes(),
|
||||
InsertionFilter: config.Config.Tap.GetInsertionFilter(),
|
||||
AgentImage: config.Config.AgentImage,
|
||||
PullPolicy: config.Config.ImagePullPolicyStr,
|
||||
LogLevel: config.Config.LogLevel(),
|
||||
|
||||
@@ -23,6 +23,7 @@ const (
|
||||
type ConfigStruct struct {
|
||||
Tap configStructs.TapConfig `yaml:"tap"`
|
||||
Check configStructs.CheckConfig `yaml:"check"`
|
||||
Install configStructs.InstallConfig `yaml:"install"`
|
||||
Version configStructs.VersionConfig `yaml:"version"`
|
||||
View configStructs.ViewConfig `yaml:"view"`
|
||||
Logs configStructs.LogsConfig `yaml:"logs"`
|
||||
@@ -36,6 +37,7 @@ type ConfigStruct struct {
|
||||
Telemetry bool `yaml:"telemetry" default:"true"`
|
||||
DumpLogs bool `yaml:"dump-logs" default:"false"`
|
||||
KubeConfigPathStr string `yaml:"kube-config-path"`
|
||||
KubeContext string `yaml:"kube-context"`
|
||||
ConfigFilePath string `yaml:"config-path,omitempty" readonly:""`
|
||||
HeadlessMode bool `yaml:"headless" default:"false"`
|
||||
LogLevelStr string `yaml:"log-level,omitempty" default:"INFO" readonly:""`
|
||||
|
||||
6
cli/config/configStructs/installConfig.go
Normal file
6
cli/config/configStructs/installConfig.go
Normal file
@@ -0,0 +1,6 @@
|
||||
package configStructs
|
||||
|
||||
type InstallConfig struct {
|
||||
TemplateUrl string `yaml:"template-url" default:"https://storage.googleapis.com/static.up9.io/mizu/helm-template"`
|
||||
TemplateName string `yaml:"template-name" default:"helm-template.yaml"`
|
||||
}
|
||||
@@ -3,10 +3,16 @@ package configStructs
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/fs"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"regexp"
|
||||
|
||||
"github.com/up9inc/mizu/cli/uiUtils"
|
||||
"github.com/up9inc/mizu/shared"
|
||||
|
||||
basenine "github.com/up9inc/basenine/server/lib"
|
||||
"github.com/up9inc/mizu/shared/logger"
|
||||
"github.com/up9inc/mizu/shared/units"
|
||||
)
|
||||
|
||||
@@ -18,6 +24,7 @@ const (
|
||||
PlainTextFilterRegexesTapName = "regex-masking"
|
||||
DisableRedactionTapName = "no-redact"
|
||||
HumanMaxEntriesDBSizeTapName = "max-entries-db-size"
|
||||
InsertionFilterName = "insertion-filter"
|
||||
DryRunTapName = "dry-run"
|
||||
WorkspaceTapName = "workspace"
|
||||
EnforcePolicyFile = "traffic-validation-file"
|
||||
@@ -27,26 +34,27 @@ const (
|
||||
)
|
||||
|
||||
type TapConfig struct {
|
||||
UploadIntervalSec int `yaml:"upload-interval" default:"10"`
|
||||
PodRegexStr string `yaml:"regex" default:".*"`
|
||||
GuiPort uint16 `yaml:"gui-port" default:"8899"`
|
||||
ProxyHost string `yaml:"proxy-host" default:"127.0.0.1"`
|
||||
Namespaces []string `yaml:"namespaces"`
|
||||
Analysis bool `yaml:"analysis" default:"false"`
|
||||
AllNamespaces bool `yaml:"all-namespaces" default:"false"`
|
||||
PlainTextFilterRegexes []string `yaml:"regex-masking"`
|
||||
IgnoredUserAgents []string `yaml:"ignored-user-agents"`
|
||||
DisableRedaction bool `yaml:"no-redact" default:"false"`
|
||||
HumanMaxEntriesDBSize string `yaml:"max-entries-db-size" default:"200MB"`
|
||||
DryRun bool `yaml:"dry-run" default:"false"`
|
||||
Workspace string `yaml:"workspace"`
|
||||
EnforcePolicyFile string `yaml:"traffic-validation-file"`
|
||||
ContractFile string `yaml:"contract"`
|
||||
AskUploadConfirmation bool `yaml:"ask-upload-confirmation" default:"true"`
|
||||
ApiServerResources shared.Resources `yaml:"api-server-resources"`
|
||||
TapperResources shared.Resources `yaml:"tapper-resources"`
|
||||
ServiceMesh bool `yaml:"service-mesh" default:"false"`
|
||||
Tls bool `yaml:"tls" default:"false"`
|
||||
UploadIntervalSec int `yaml:"upload-interval" default:"10"`
|
||||
PodRegexStr string `yaml:"regex" default:".*"`
|
||||
GuiPort uint16 `yaml:"gui-port" default:"8899"`
|
||||
ProxyHost string `yaml:"proxy-host" default:"127.0.0.1"`
|
||||
Namespaces []string `yaml:"namespaces"`
|
||||
Analysis bool `yaml:"analysis" default:"false"`
|
||||
AllNamespaces bool `yaml:"all-namespaces" default:"false"`
|
||||
PlainTextFilterRegexes []string `yaml:"regex-masking"`
|
||||
IgnoredUserAgents []string `yaml:"ignored-user-agents"`
|
||||
DisableRedaction bool `yaml:"no-redact" default:"false"`
|
||||
HumanMaxEntriesDBSize string `yaml:"max-entries-db-size" default:"200MB"`
|
||||
InsertionFilter string `yaml:"insertion-filter" default:""`
|
||||
DryRun bool `yaml:"dry-run" default:"false"`
|
||||
Workspace string `yaml:"workspace"`
|
||||
EnforcePolicyFile string `yaml:"traffic-validation-file"`
|
||||
ContractFile string `yaml:"contract"`
|
||||
AskUploadConfirmation bool `yaml:"ask-upload-confirmation" default:"true"`
|
||||
ApiServerResources shared.Resources `yaml:"api-server-resources"`
|
||||
TapperResources shared.Resources `yaml:"tapper-resources"`
|
||||
ServiceMesh bool `yaml:"service-mesh" default:"false"`
|
||||
Tls bool `yaml:"tls" default:"false"`
|
||||
}
|
||||
|
||||
func (config *TapConfig) PodRegex() *regexp.Regexp {
|
||||
@@ -59,6 +67,25 @@ func (config *TapConfig) MaxEntriesDBSizeBytes() int64 {
|
||||
return maxEntriesDBSizeBytes
|
||||
}
|
||||
|
||||
func (config *TapConfig) GetInsertionFilter() string {
|
||||
insertionFilter := config.InsertionFilter
|
||||
if fs.ValidPath(insertionFilter) {
|
||||
if _, err := os.Stat(insertionFilter); err == nil {
|
||||
b, err := ioutil.ReadFile(insertionFilter)
|
||||
if err != nil {
|
||||
logger.Log.Warningf(uiUtils.Warning, fmt.Sprintf("Couldn't read the file on path: %s, err: %v", insertionFilter, err))
|
||||
} else {
|
||||
insertionFilter = string(b)
|
||||
}
|
||||
}
|
||||
}
|
||||
_, err := basenine.Parse(insertionFilter)
|
||||
if err != nil {
|
||||
logger.Log.Warningf(uiUtils.Warning, fmt.Sprintf("Insertion filter syntax error: %v", err))
|
||||
}
|
||||
return insertionFilter
|
||||
}
|
||||
|
||||
func (config *TapConfig) Validate() error {
|
||||
_, compileErr := regexp.Compile(config.PodRegexStr)
|
||||
if compileErr != nil {
|
||||
|
||||
@@ -11,6 +11,7 @@ require (
|
||||
github.com/op/go-logging v0.0.0-20160315200505-970db520ece7
|
||||
github.com/spf13/cobra v1.3.0
|
||||
github.com/spf13/pflag v1.0.5
|
||||
github.com/up9inc/basenine/server/lib v0.0.0-20220315070758-3a76cfc4378e
|
||||
github.com/up9inc/mizu/shared v0.0.0
|
||||
github.com/up9inc/mizu/tap/api v0.0.0
|
||||
golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8
|
||||
@@ -32,8 +33,11 @@ require (
|
||||
github.com/MakeNowJust/heredoc v1.0.0 // indirect
|
||||
github.com/PuerkitoBio/purell v1.1.1 // indirect
|
||||
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 // indirect
|
||||
github.com/alecthomas/participle/v2 v2.0.0-alpha7 // indirect
|
||||
github.com/chai2010/gettext-go v0.0.0-20160711120539-c6fed771bfd5 // indirect
|
||||
github.com/clbanning/mxj/v2 v2.5.5 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/dlclark/regexp2 v1.4.0 // indirect
|
||||
github.com/docker/go-units v0.4.0 // indirect
|
||||
github.com/evanphx/json-patch v5.6.0+incompatible // indirect
|
||||
github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f // indirect
|
||||
@@ -68,6 +72,7 @@ require (
|
||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||
github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 // indirect
|
||||
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect
|
||||
github.com/ohler55/ojg v1.12.13 // indirect
|
||||
github.com/peterbourgon/diskv v2.0.1+incompatible // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
|
||||
12
cli/go.sum
12
cli/go.sum
@@ -83,6 +83,10 @@ github.com/PuerkitoBio/purell v1.1.1 h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tN
|
||||
github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
|
||||
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M=
|
||||
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
|
||||
github.com/alecthomas/participle/v2 v2.0.0-alpha7 h1:cK4vjj0VSgb3lN1nuKA5F7dw+1s1pWBe5bx7nNCnN+c=
|
||||
github.com/alecthomas/participle/v2 v2.0.0-alpha7/go.mod h1:NumScqsC42o9x+dGj8/YqsIfhrIQjFEOFovxotbBirA=
|
||||
github.com/alecthomas/repr v0.0.0-20181024024818-d37bc2a10ba1 h1:GDQdwm/gAcJcLAKQQZGOJ4knlw+7rfEQQcmwTbt4p5E=
|
||||
github.com/alecthomas/repr v0.0.0-20181024024818-d37bc2a10ba1/go.mod h1:xTS7Pm1pD1mvyM075QCDSRqH6qRLXylzS24ZTpRiSzQ=
|
||||
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
@@ -116,6 +120,8 @@ github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5P
|
||||
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
|
||||
github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag=
|
||||
github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I=
|
||||
github.com/clbanning/mxj/v2 v2.5.5 h1:oT81vUeEiQQ/DcHbzSytRngP6Ky9O+L+0Bw0zSJag9E=
|
||||
github.com/clbanning/mxj/v2 v2.5.5/go.mod h1:hNiWqW14h+kc+MdF9C6/YoRfjEJoR3ou6tn/Qo+ve2s=
|
||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
|
||||
github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
|
||||
@@ -149,6 +155,8 @@ github.com/denisbrodbeck/machineid v1.0.1 h1:geKr9qtkB876mXguW2X6TU4ZynleN6ezuMS
|
||||
github.com/denisbrodbeck/machineid v1.0.1/go.mod h1:dJUwb7PTidGDeYyUBmXZ2GphQBbjJCrnectwCyxcUSI=
|
||||
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
|
||||
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
|
||||
github.com/dlclark/regexp2 v1.4.0 h1:F1rxgk7p4uKjwIQxBs9oAXe5CqrXlCduYEJvrF4u93E=
|
||||
github.com/dlclark/regexp2 v1.4.0/go.mod h1:2pZnwuY/m+8K6iRw6wQdMtk+rH5tNGR1i55kozfMjCc=
|
||||
github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
|
||||
github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw=
|
||||
github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
|
||||
@@ -479,6 +487,8 @@ github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWb
|
||||
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
|
||||
github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78=
|
||||
github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
|
||||
github.com/ohler55/ojg v1.12.13 h1:FvfVpYzLgMraLcg3rrXiRXaihOP6fnzQNEU9YyZ/AmM=
|
||||
github.com/ohler55/ojg v1.12.13/go.mod h1:LBbIVRAgoFbYBXQhRhuEpaJIqq+goSO63/FQ+nyJU88=
|
||||
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
|
||||
github.com/olekukonko/tablewriter v0.0.4/go.mod h1:zq6QwlOf5SlnkVbMSr5EoBv3636FWnp+qbPhuoO21uA=
|
||||
github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
@@ -590,6 +600,8 @@ github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69
|
||||
github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
|
||||
github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM=
|
||||
github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc=
|
||||
github.com/up9inc/basenine/server/lib v0.0.0-20220315070758-3a76cfc4378e h1:reG/QwyxdfvGObfdrae7DZc3rTMiGwQ6S/4PRkwtBoE=
|
||||
github.com/up9inc/basenine/server/lib v0.0.0-20220315070758-3a76cfc4378e/go.mod h1:ZIkxWiJm65jYQIso9k+OZKhR7gQ1we2jNyE2kQX9IQI=
|
||||
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
|
||||
github.com/xlab/treeprint v0.0.0-20181112141820-a009c3971eca/go.mod h1:ce1O1j6UtZfjr22oyGxGLbauSBp2YVXpARAosm7dHBg=
|
||||
github.com/xlab/treeprint v1.1.0 h1:G/1DjNkPpfZCFt9CSh6b5/nY4VimlbHF3Rh4obvtzDk=
|
||||
|
||||
47
cli/utils/httpUtils.go
Normal file
47
cli/utils/httpUtils.go
Normal file
@@ -0,0 +1,47 @@
|
||||
package utils
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Get - When err is nil, resp always contains a non-nil resp.Body.
|
||||
// Caller should close resp.Body when done reading from it.
|
||||
func Get(url string, client *http.Client) (*http.Response, error) {
|
||||
return checkError(client.Get(url))
|
||||
}
|
||||
|
||||
// Post - When err is nil, resp always contains a non-nil resp.Body.
|
||||
// Caller should close resp.Body when done reading from it.
|
||||
func Post(url, contentType string, body io.Reader, client *http.Client) (*http.Response, error) {
|
||||
return checkError(client.Post(url, contentType, body))
|
||||
}
|
||||
|
||||
// Do - When err is nil, resp always contains a non-nil resp.Body.
|
||||
// Caller should close resp.Body when done reading from it.
|
||||
func Do(req *http.Request, client *http.Client) (*http.Response, error) {
|
||||
return checkError(client.Do(req))
|
||||
}
|
||||
|
||||
func checkError(response *http.Response, errInOperation error) (*http.Response, error) {
|
||||
if errInOperation != nil {
|
||||
return response, errInOperation
|
||||
// Check only if status != 200 (and not status >= 300). Agent APIs return only 200 on success.
|
||||
} else if response.StatusCode != http.StatusOK {
|
||||
body, err := ioutil.ReadAll(response.Body)
|
||||
response.Body.Close()
|
||||
response.Body = io.NopCloser(bytes.NewBuffer(body)) // rewind
|
||||
if err != nil {
|
||||
return response, err
|
||||
}
|
||||
|
||||
errorMsg := strings.ReplaceAll(string(body), "\n", ";")
|
||||
return response, fmt.Errorf("got response with status code: %d, body: %s", response.StatusCode, errorMsg)
|
||||
}
|
||||
|
||||
return response, nil
|
||||
}
|
||||
@@ -1,23 +0,0 @@
|
||||
# Patterns to ignore when building packages.
|
||||
# This supports shell glob matching, relative path matching, and
|
||||
# negation (prefixed with !). Only one pattern per line.
|
||||
.DS_Store
|
||||
# Common VCS dirs
|
||||
.git/
|
||||
.gitignore
|
||||
.bzr/
|
||||
.bzrignore
|
||||
.hg/
|
||||
.hgignore
|
||||
.svn/
|
||||
# Common backup files
|
||||
*.swp
|
||||
*.bak
|
||||
*.tmp
|
||||
*.orig
|
||||
*~
|
||||
# Various IDEs
|
||||
.project
|
||||
.idea/
|
||||
*.tmproj
|
||||
.vscode/
|
||||
@@ -1,7 +0,0 @@
|
||||
apiVersion: v2
|
||||
name: mizuhelm
|
||||
description: Mizu helm chart for Kubernetes
|
||||
type: application
|
||||
version: 0.1.1
|
||||
kubeVersion: ">= 1.16.0-0"
|
||||
appVersion: "0.21.29"
|
||||
@@ -1,13 +0,0 @@
|
||||
kind: PersistentVolumeClaim
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: {{ .Values.volumeClaim.name }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
resources:
|
||||
limits:
|
||||
storage: 700M
|
||||
requests:
|
||||
storage: 700M
|
||||
@@ -1,30 +0,0 @@
|
||||
{{- if .Values.rbac.create -}}
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: {{ .Values.rbac.name }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
mizu-cli-version: {{ .Chart.AppVersion }}
|
||||
heritage: {{ .Release.Service }}
|
||||
release: {{ .Release.Name }}
|
||||
rules:
|
||||
- apiGroups: [ "", "extensions", "apps" ]
|
||||
resources: [ "endpoints", "pods", "services", "namespaces" ]
|
||||
verbs: [ "get", "list", "watch" ]
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: {{ .Values.rbac.roleBindingName }}
|
||||
labels:
|
||||
mizu-cli-version: {{ .Chart.AppVersion }}
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: {{ .Values.rbac.name }}
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: {{ .Values.serviceAccountName }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
{{- end -}}
|
||||
@@ -1,8 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: {{ .Values.configMap.name }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
data:
|
||||
mizu-config.json: >-
|
||||
{"maxDBSizeBytes":200000000,"agentImage":"{{ .Values.container.tapper.image.repository }}:{{ .Values.container.tapper.image.tag }}","pullPolicy":"Always","logLevel":4,"tapperResources":{"CpuLimit":"750m","MemoryLimit":"1Gi","CpuRequests":"50m","MemoryRequests":"50Mi"},"mizuResourceNamespace":"{{ .Release.Namespace }}","agentDatabasePath":"/app/data/","standaloneMode":true}
|
||||
@@ -1,128 +0,0 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: {{ .Values.pod.name }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
app: {{ .Values.pod.name }}
|
||||
spec:
|
||||
replicas: {{ .Values.deployment.replicaCount }}
|
||||
selector:
|
||||
matchLabels:
|
||||
app: {{ .Values.pod.name }}
|
||||
template:
|
||||
metadata:
|
||||
name: {{ .Values.pod.name }}
|
||||
creationTimestamp: null
|
||||
labels:
|
||||
app: {{ .Values.pod.name }}
|
||||
spec:
|
||||
volumes:
|
||||
- name: {{ .Values.configMap.name }}
|
||||
configMap:
|
||||
name: {{ .Values.configMap.name }}
|
||||
defaultMode: 420
|
||||
- name: {{ .Values.volumeClaim.name }}
|
||||
persistentVolumeClaim:
|
||||
claimName: {{ .Values.volumeClaim.name }}
|
||||
containers:
|
||||
- name: {{ .Values.pod.name }}
|
||||
image: "{{ .Values.container.mizuAgent.image.repository }}:{{ .Values.container.mizuAgent.image.tag | default .Chart.AppVersion }}"
|
||||
command:
|
||||
- ./mizuagent
|
||||
- '--api-server'
|
||||
env:
|
||||
- name: SYNC_ENTRIES_CONFIG
|
||||
- name: LOG_LEVEL
|
||||
value: INFO
|
||||
resources:
|
||||
limits:
|
||||
cpu: 750m
|
||||
memory: 1Gi
|
||||
requests:
|
||||
cpu: 50m
|
||||
memory: 50Mi
|
||||
volumeMounts:
|
||||
- name: {{ .Values.configMap.name }}
|
||||
mountPath: /app/config/
|
||||
- name: {{ .Values.volumeClaim.name }}
|
||||
mountPath: /app/data/
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /echo
|
||||
port: {{ .Values.pod.port }}
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 1
|
||||
timeoutSeconds: 1
|
||||
periodSeconds: 10
|
||||
successThreshold: 1
|
||||
failureThreshold: 3
|
||||
terminationMessagePath: /dev/termination-log
|
||||
terminationMessagePolicy: File
|
||||
imagePullPolicy: Always
|
||||
- name: {{ .Values.container.basenine.name }}
|
||||
image: "{{ .Values.container.basenine.image.repository }}:{{ .Values.container.basenine.image.tag | default .Chart.AppVersion }}"
|
||||
command:
|
||||
- /basenine
|
||||
args:
|
||||
- '-addr'
|
||||
- 0.0.0.0
|
||||
- '-port'
|
||||
- '9099'
|
||||
- '-persistent'
|
||||
workingDir: /app/data/
|
||||
resources:
|
||||
limits:
|
||||
cpu: 750m
|
||||
memory: 1Gi
|
||||
requests:
|
||||
cpu: 50m
|
||||
memory: 50Mi
|
||||
volumeMounts:
|
||||
- name: {{ .Values.configMap.name }}
|
||||
mountPath: /app/config/
|
||||
- name: {{ .Values.volumeClaim.name }}
|
||||
mountPath: /app/data/
|
||||
readinessProbe:
|
||||
tcpSocket:
|
||||
port: 9099
|
||||
timeoutSeconds: 1
|
||||
periodSeconds: 1
|
||||
successThreshold: 1
|
||||
failureThreshold: 3
|
||||
terminationMessagePath: /dev/termination-log
|
||||
terminationMessagePolicy: File
|
||||
imagePullPolicy: Always
|
||||
- name: kratos
|
||||
image: "{{ .Values.container.kratos.image.repository }}:{{ .Values.container.kratos.image.tag | default .Chart.AppVersion }}"
|
||||
resources:
|
||||
limits:
|
||||
cpu: 750m
|
||||
memory: 1Gi
|
||||
requests:
|
||||
cpu: 50m
|
||||
memory: 50Mi
|
||||
volumeMounts:
|
||||
- name: {{ .Values.configMap.name }}
|
||||
mountPath: /app/config/
|
||||
- name: {{ .Values.volumeClaim.name }}
|
||||
mountPath: /app/data/
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /health/ready
|
||||
port: 4433
|
||||
scheme: HTTP
|
||||
timeoutSeconds: 1
|
||||
periodSeconds: 1
|
||||
successThreshold: 1
|
||||
failureThreshold: 3
|
||||
terminationMessagePath: /dev/termination-log
|
||||
terminationMessagePolicy: File
|
||||
imagePullPolicy: Always
|
||||
restartPolicy: Always
|
||||
terminationGracePeriodSeconds: 0
|
||||
dnsPolicy: ClusterFirstWithHostNet
|
||||
serviceAccountName: {{ .Values.serviceAccountName }}
|
||||
serviceAccount: {{ .Values.serviceAccountName }}
|
||||
securityContext: { }
|
||||
schedulerName: default-scheduler
|
||||
@@ -1,29 +0,0 @@
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
name: {{ .Values.roleName }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
mizu-cli-version: {{ .Chart.AppVersion }}
|
||||
rules:
|
||||
- apiGroups: [ "apps" ]
|
||||
resources: [ "daemonsets" ]
|
||||
verbs: [ "patch", "get", "list", "create", "delete" ]
|
||||
- apiGroups: [ "events.k8s.i" ]
|
||||
resources: [ "events" ]
|
||||
verbs: [ "list", "watch" ]
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: {{ .Values.roleBindingName }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: {{ .Values.roleName }}
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: {{ .Values.serviceAccountName }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
---
|
||||
@@ -1,14 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: {{ .Values.service.name }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
spec:
|
||||
type: {{ .Values.service.type }}
|
||||
ports:
|
||||
- name: api
|
||||
port: {{ .Values.service.port }}
|
||||
targetPort: {{ .Values.pod.port }}
|
||||
protocol: TCP
|
||||
selector:
|
||||
app: {{ .Values.pod.name }}
|
||||
@@ -1,7 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: {{ .Values.serviceAccountName }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
mizu-cli-version: {{ .Chart.AppVersion }}
|
||||
@@ -1,51 +0,0 @@
|
||||
# Default values for mizu.
|
||||
rbac:
|
||||
create: true
|
||||
name: "mizu-cluster-role"
|
||||
roleBindingName: "mizu-role-binding"
|
||||
|
||||
serviceAccountName: "mizu-service-account"
|
||||
|
||||
roleName: "mizu-role-daemon"
|
||||
roleBindingName: "mizu-role-binding-daemon"
|
||||
|
||||
service:
|
||||
name: "mizu-api-server"
|
||||
type: ClusterIP
|
||||
port: 80
|
||||
|
||||
pod:
|
||||
name: "mizu-api-server"
|
||||
port: 8899
|
||||
|
||||
container:
|
||||
mizuAgent:
|
||||
image:
|
||||
repository: "gcr.io/up9-docker-hub/mizu/main"
|
||||
tag: "0.22.0"
|
||||
tapper:
|
||||
image:
|
||||
repository: "gcr.io/up9-docker-hub/mizu/main"
|
||||
tag: "0.22.0"
|
||||
basenine:
|
||||
name: "basenine"
|
||||
port: 9099
|
||||
image:
|
||||
repository: "ghcr.io/up9inc/basenine"
|
||||
tag: "v0.3.0"
|
||||
kratos:
|
||||
name: "kratos"
|
||||
port: 4433
|
||||
image:
|
||||
repository: "gcr.io/up9-docker-hub/mizu-kratos/stable"
|
||||
tag: "0.0.0"
|
||||
|
||||
deployment:
|
||||
replicaCount: 1
|
||||
|
||||
configMap:
|
||||
name: "mizu-config"
|
||||
|
||||
volumeClaim:
|
||||
create: true
|
||||
name: "mizu-volume-claim"
|
||||
@@ -1,15 +0,0 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
GCP_PROJECT=up9-docker-hub
|
||||
REPOSITORY=gcr.io/$GCP_PROJECT
|
||||
SERVER_NAME=mizu
|
||||
GIT_BRANCH=ci
|
||||
|
||||
DOCKER_REPO=$REPOSITORY/$SERVER_NAME/$GIT_BRANCH
|
||||
VER=${VER=0.0}
|
||||
|
||||
DOCKER_TAGGED_BUILD="$DOCKER_REPO:$VER"
|
||||
|
||||
echo "building $DOCKER_TAGGED_BUILD"
|
||||
docker build -t ${DOCKER_TAGGED_BUILD} --build-arg VER=${VER} --build-arg BUILD_TIMESTAMP=${BUILD_TIMESTAMP} --build-arg GIT_BRANCH=${GIT_BRANCH} --build-arg COMMIT_HASH=${COMMIT_HASH} .
|
||||
45
devops/check_modified_files.sh
Executable file
45
devops/check_modified_files.sh
Executable file
@@ -0,0 +1,45 @@
|
||||
#!/bin/bash
|
||||
paths_arr=( "$@" )
|
||||
|
||||
printf "\n========== List modified files ==========\n"
|
||||
echo "$(git diff --name-only HEAD^ HEAD)"
|
||||
|
||||
printf "\n========== List paths to match and check existence ==========\n"
|
||||
for path in ${paths_arr[*]}
|
||||
do
|
||||
if [ -f "$path" ] || [ -d "$path" ]; then
|
||||
echo "$path - found"
|
||||
else
|
||||
echo "$path - does not found - exiting with failure"
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
|
||||
printf "\n========== Check paths of modified files ==========\n"
|
||||
git diff --name-only HEAD^ HEAD > files.txt
|
||||
matched=false
|
||||
while IFS= read -r file
|
||||
do
|
||||
for path in ${paths_arr[*]}
|
||||
do
|
||||
if [[ $file == $path* ]]; then
|
||||
echo "$file - match path: $path"
|
||||
matched=true
|
||||
break
|
||||
fi
|
||||
done
|
||||
if [[ $matched == true ]]; then
|
||||
break
|
||||
else
|
||||
echo "$file - does not match any given path"
|
||||
fi
|
||||
done < files.txt
|
||||
|
||||
printf "\n========== Result ==========\n"
|
||||
if [[ $matched = true ]]; then
|
||||
echo "match found"
|
||||
echo "::set-output name=matched::true"
|
||||
else
|
||||
echo "no match found"
|
||||
echo "::set-output name=matched::false"
|
||||
fi
|
||||
@@ -1,89 +0,0 @@
|
||||

|
||||
# Configuration options for Mizu
|
||||
|
||||
Mizu has many configuration options and flags that affect its behavior. Their values can be modified via command-line interface or via configuration file.
|
||||
|
||||
The list below covers most useful configuration options.
|
||||
|
||||
### Config file
|
||||
Mizu behaviour can be modified via YAML configuration file located at `$HOME/.mizu/config.yaml`.
|
||||
|
||||
Default values for the file can be viewed via `mizu config` command.
|
||||
|
||||
### Applying config options via command line
|
||||
To apply any configuration option via command line, use `--set` following by config option name and value, like in the following example:
|
||||
|
||||
```
|
||||
mizu tap --set tap.dry-run=true
|
||||
```
|
||||
|
||||
Please make sure to use full option name (`tap.dry-run` as opposed to `dry-run` only), incl. section (`tap`, in this example)
|
||||
|
||||
## General section
|
||||
|
||||
* `agent-image` - full path to Mizu container image, in format `full.path.to/your/image:tag`. Default value is set at compilation time to `gcr.io/up9-docker-hub/mizu/<branch>:<version>`
|
||||
|
||||
* `dump-logs` - if set to `true`, saves log files for all Mizu components (tapper, api-server, CLI) in a zip file under `$HOME/.mizu`. Default value is `false`
|
||||
|
||||
* `image-pull-policy` - container image pull policy for Kubernetes, default value `Always`. Other accepted values are `Never` or `IfNotPresent`. Please mind the implications when changing this.
|
||||
|
||||
* `kube-config-path` - path to alternative kubeconfig file to use for all interactions with Kubernetes cluster. By default - `$HOME/.kubeconfig`
|
||||
|
||||
* `mizu-resources-namespace` - Kubernetes namespace where all Mizu-related resources are created. Default value `mizu`
|
||||
|
||||
* `telemetry` - report anonymous usage statistics. Default value `true`
|
||||
|
||||
## section `tap`
|
||||
* `namespaces` - list of namespace names, in which pods are tapped. Default value is empty, meaning only pods in the current namespace are tapped. Typically supplied as command line options.
|
||||
|
||||
* `all-namespaces` - special flag indicating whether Mizu should search and tap pods, matching the regex, in all namespaces. Default is `false`. Please use with caution, tapping too many pods can affect resource consumption.
|
||||
|
||||
* `dry-run` - if true, Mizu will print list of pods matching the supplied (or default) regex and exit without actually tapping the traffic. Default value is `false`. Typically supplied as command-line option `--dry-run`
|
||||
|
||||
* `proxy-host` - IP address on which proxy to Mizu API service is launched; should be accessible at `proxy-host:gui-port`. Default value is `127.0.0.1`
|
||||
|
||||
* `gui-port` - port on which Mizu GUI is accessible, default value is `8899` (stands for `8899/tcp`)
|
||||
|
||||
* `regex` - regular expression used to match pods to tap, when no regex is given in the command line; default value is `.*`, which means `mizu tap` with no additional arguments is runnining as `mizu tap .*` (i.e. tap all pods found in current workspace)
|
||||
|
||||
* `no-redact` - instructs Mizu whether to redact certain sensitive fields in the collected traffic. Default value is `false`, i.e. Mizu will replace sentitive data values with *REDACTED* placeholder.
|
||||
|
||||
* `ignored-user-agents` - array of strings, describing HTTP *User-Agent* header values to be ignored. Useful to ignore Kubernetes healthcheck and other similar noisy periodic probes. Default value is empty.
|
||||
|
||||
* `max-entries-db-size` - maximal size of traffic stored locally in the `mizu-api-server` pod. When this size is reached, older traffic is overwritten with new entries. Default value is `200MB`
|
||||
|
||||
|
||||
### section `tap.api-server-resources`
|
||||
Kubernetes request and limit values for the `mizu-api-server` pod.
|
||||
Parameters and their default values are same as used natively in Kubernetes pods:
|
||||
|
||||
```
|
||||
cpu-limit: 750m
|
||||
memory-limit: 1Gi
|
||||
cpu-requests: 50m
|
||||
memory-requests: 50Mi
|
||||
```
|
||||
|
||||
### section `tap.tapper-resources`
|
||||
Kubernetes request and limit values for the `mizu-tapper` pods (launched via daemonset).
|
||||
Parameters and their default values are same as used natively in Kubernetes pods:
|
||||
|
||||
```
|
||||
cpu-limit: 750m
|
||||
memory-limit: 1Gi
|
||||
cpu-requests: 50m
|
||||
memory-requests: 50Mi
|
||||
```
|
||||
|
||||
|
||||
--
|
||||
|
||||
* `analsys` - enables advanced analysis of collected traffic in the UP9 coud platform. Default value is `false`
|
||||
|
||||
* `upload-interval` - in the *analysis* mode, push traffic to UP9 cloud every `upload-interval` seconds. Default value is `10` seconds
|
||||
|
||||
* `ask-upload-confirmation` - request user confirmation when uploading tapped data to UP9 cloud
|
||||
|
||||
|
||||
## section `version`
|
||||
* `debug`- print additional version and build information when `mizu version` command is invoked. Default value is `false`.
|
||||
@@ -1,172 +0,0 @@
|
||||
# OpenAPI Specification (OAS) Contract Monitoring
|
||||
|
||||
An OAS/Swagger file can contain schemas under `parameters` and `responses` fields. With `--contract catalogue.yaml`
|
||||
CLI option, you can pass your API description to Mizu and the traffic will automatically be validated
|
||||
against the contracts.
|
||||
|
||||
Below is an example of an OAS/Swagger file from [Sock Shop](https://microservices-demo.github.io/) microservice demo
|
||||
that contains a bunch contracts:
|
||||
|
||||
```yaml
|
||||
openapi: 3.0.1
|
||||
info:
|
||||
title: Catalogue resources
|
||||
version: 1.0.0
|
||||
description: ""
|
||||
license:
|
||||
name: MIT
|
||||
url: http://github.com/gruntjs/grunt/blob/master/LICENSE-MIT
|
||||
paths:
|
||||
/catalogue:
|
||||
get:
|
||||
description: Catalogue API
|
||||
operationId: List catalogue
|
||||
responses:
|
||||
200:
|
||||
description: ""
|
||||
content:
|
||||
application/json;charset=UTF-8:
|
||||
schema:
|
||||
type: array
|
||||
items:
|
||||
$ref: '#/components/schemas/Listresponse'
|
||||
/catalogue/{id}:
|
||||
get:
|
||||
operationId: Get an item
|
||||
parameters:
|
||||
- name: id
|
||||
in: path
|
||||
required: true
|
||||
schema:
|
||||
type: string
|
||||
example: a0a4f044-b040-410d-8ead-4de0446aec7e
|
||||
responses:
|
||||
200:
|
||||
description: ""
|
||||
content:
|
||||
application/json; charset=UTF-8:
|
||||
schema:
|
||||
$ref: '#/components/schemas/Getanitemresponse'
|
||||
/catalogue/size:
|
||||
get:
|
||||
operationId: Get size
|
||||
responses:
|
||||
200:
|
||||
description: ""
|
||||
content:
|
||||
application/json;charset=UTF-8:
|
||||
schema:
|
||||
$ref: '#/components/schemas/Getsizeresponse'
|
||||
/tags:
|
||||
get:
|
||||
operationId: List_
|
||||
responses:
|
||||
200:
|
||||
description: ""
|
||||
content:
|
||||
application/json;charset=UTF-8:
|
||||
schema:
|
||||
$ref: '#/components/schemas/Listresponse3'
|
||||
components:
|
||||
schemas:
|
||||
Listresponse:
|
||||
title: List response
|
||||
required:
|
||||
- count
|
||||
- description
|
||||
- id
|
||||
- imageUrl
|
||||
- name
|
||||
- price
|
||||
- tag
|
||||
type: object
|
||||
properties:
|
||||
id:
|
||||
type: string
|
||||
name:
|
||||
type: string
|
||||
description:
|
||||
type: string
|
||||
imageUrl:
|
||||
type: array
|
||||
items:
|
||||
type: string
|
||||
price:
|
||||
type: number
|
||||
format: double
|
||||
count:
|
||||
type: integer
|
||||
format: int32
|
||||
tag:
|
||||
type: array
|
||||
items:
|
||||
type: string
|
||||
Getanitemresponse:
|
||||
title: Get an item response
|
||||
required:
|
||||
- count
|
||||
- description
|
||||
- id
|
||||
- imageUrl
|
||||
- name
|
||||
- price
|
||||
- tag
|
||||
type: object
|
||||
properties:
|
||||
id:
|
||||
type: string
|
||||
name:
|
||||
type: string
|
||||
description:
|
||||
type: string
|
||||
imageUrl:
|
||||
type: array
|
||||
items:
|
||||
type: string
|
||||
price:
|
||||
type: number
|
||||
format: double
|
||||
count:
|
||||
type: integer
|
||||
format: int32
|
||||
tag:
|
||||
type: array
|
||||
items:
|
||||
type: string
|
||||
Getsizeresponse:
|
||||
title: Get size response
|
||||
required:
|
||||
- size
|
||||
type: object
|
||||
properties:
|
||||
size:
|
||||
type: integer
|
||||
format: int32
|
||||
Listresponse3:
|
||||
title: List response3
|
||||
required:
|
||||
- tags
|
||||
type: object
|
||||
properties:
|
||||
tags:
|
||||
type: array
|
||||
items:
|
||||
type: string
|
||||
```
|
||||
|
||||
Pass it to Mizu through the CLI option: `mizu tap -n sock-shop --contract catalogue.yaml`
|
||||
|
||||
Now Mizu will monitor the traffic against these contracts.
|
||||
|
||||
If an entry fails to comply with the contract, it's marked with `Breach` notice in the UI.
|
||||
The reason of the failure can be seen under the `CONTRACT` tab in the details layout.
|
||||
|
||||
### Notes
|
||||
|
||||
Make sure that you;
|
||||
|
||||
- specified the `openapi` version
|
||||
- specified the `info.version` version in the YAML
|
||||
- and removed `servers` field from the YAML
|
||||
|
||||
Otherwise the OAS file cannot be recognized. (see [this issue](https://github.com/getkin/kin-openapi/issues/356))
|
||||
@@ -1,74 +0,0 @@
|
||||
# Mizu install standalone
|
||||
|
||||
Mizu can be run detached from the cli using the install command: `mizu install`. This type of mizu instance will run
|
||||
indefinitely in the cluster.
|
||||
|
||||
Please note that install standalone requires you to have RBAC creation permissions, see the [permissions](PERMISSIONS.md)
|
||||
doc for more details.
|
||||
|
||||
```bash
|
||||
$ mizu install
|
||||
```
|
||||
|
||||
## Stop mizu install
|
||||
|
||||
To stop the detached mizu instance and clean all cluster side resources, run `mizu clean`
|
||||
|
||||
```bash
|
||||
$ mizu clean # mizu will continue running in cluster until clean is executed
|
||||
Removing mizu resources
|
||||
```
|
||||
|
||||
## Expose mizu web app
|
||||
|
||||
Mizu could be exposed at a later stage in any of the following ways:
|
||||
|
||||
### Using mizu view command
|
||||
|
||||
In a machine that can access both the cluster and a browser, you can run `mizu view` command which creates a proxy.
|
||||
Besides that, all the regular ways to expose k8s pods are valid.
|
||||
|
||||
```bash
|
||||
$ mizu view
|
||||
Establishing connection to k8s cluster...
|
||||
Mizu is available at http://localhost:8899
|
||||
^C
|
||||
..
|
||||
```
|
||||
|
||||
### Port Forward
|
||||
|
||||
```bash
|
||||
$ kubectl port-forward -n mizu deployment/mizu-api-server 8899:8899
|
||||
```
|
||||
|
||||
### NodePort
|
||||
|
||||
```bash
|
||||
$ kubectl expose -n mizu deployment mizu-api-server --name mizu-node-port --type NodePort --port 80 --target-port 8899
|
||||
```
|
||||
|
||||
Mizu's IP is the IP of any node (get the IP with `kubectl get nodes -o wide`) and the port is the target port of the new
|
||||
service (`kubectl get services -n mizu mizu-node-port`). Note that this method will expose Mizu to public access if your
|
||||
nodes are public.
|
||||
|
||||
### LoadBalancer
|
||||
|
||||
```bash
|
||||
$ kubectl expose deployment -n mizu --port 80 --target-port 8899 mizu-api-server --type=LoadBalancer --name=mizu-lb
|
||||
service/mizu-lb exposed
|
||||
..
|
||||
|
||||
$ kubectl get services -n mizu
|
||||
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
|
||||
mizu-api-server ClusterIP 10.107.200.100 <none> 80/TCP 5m5s
|
||||
mizu-lb LoadBalancer 10.107.200.101 34.77.120.116 80:30141/TCP 76s
|
||||
```
|
||||
|
||||
Note that `LoadBalancer` services only work on supported clusters (usually cloud providers) and might incur extra costs
|
||||
|
||||
If you changed the `mizu-resources-namespace` value, make sure the `-n mizu` flag of the `kubectl expose` command is
|
||||
changed to the value of `mizu-resources-namespace`
|
||||
|
||||
mizu will now be available both by running `mizu view` or by accessing the `EXTERNAL-IP` of the `mizu-lb` service
|
||||
through your browser.
|
||||
@@ -1,88 +0,0 @@
|
||||

|
||||
|
||||
# Kubernetes permissions for MIZU
|
||||
|
||||
This document describes in details all permissions required for full and correct operation of Mizu.
|
||||
|
||||
## Editing permissions
|
||||
|
||||
During installation, Mizu creates a `ServiceAccount` and the roles it requires. No further action is required.
|
||||
However, if there is a need, it is possible to make changes to Mizu permissions.
|
||||
|
||||
### Adding permissions on top of Mizu's defaults
|
||||
|
||||
Mizu pods use the `ServiceAccount` `mizu-service-account`. Permissions can be added to Mizu by creating `ClusterRoleBindings` and `RoleBindings` that target that `ServiceAccount`.
|
||||
|
||||
For example, in order to add a `PodSecurityPolicy` which allows Mizu to run `hostNetwork` and `privileged` pods, create the following resources:
|
||||
|
||||
```yaml
|
||||
apiVersion: policy/v1beta1
|
||||
kind: PodSecurityPolicy
|
||||
metadata:
|
||||
name: my-mizu-psp
|
||||
spec:
|
||||
hostNetwork: true
|
||||
privileged: true
|
||||
allowedCapabilities:
|
||||
- "*"
|
||||
fsGroup:
|
||||
rule: RunAsAny
|
||||
runAsUser:
|
||||
rule: RunAsAny
|
||||
seLinux:
|
||||
rule: RunAsAny
|
||||
supplementalGroups:
|
||||
rule: RunAsAny
|
||||
volumes:
|
||||
- "*"
|
||||
---
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: my-mizu-clusterrole
|
||||
rules:
|
||||
- apiGroups:
|
||||
- policy
|
||||
resources:
|
||||
- podsecuritypolicies
|
||||
verbs:
|
||||
- use
|
||||
resourceNames:
|
||||
- my-mizu-psp
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: my-mizu-clusterrolebinding
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: my-mizu-clusterrole
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: mizu-service-account # The service account used by Mizu
|
||||
namespace: mizu
|
||||
```
|
||||
|
||||
With this setup, when Mizu starts and creates `mizu-service-account`, this account will be subject to `my-mizu-psp` via `my-mizu-clusterrolebinding`.
|
||||
When Mizu cleans up resources, the above resources will remain available for future executions.
|
||||
|
||||
### Replacing Mizu's default permissions with custom permissions
|
||||
|
||||
Mizu does not create its `ServiceAccounts`, `ClusterRoles`, `ClusterRoleBindings`, `Roles` or `RoleBindings` if resources by the same name already exist. In order to replace Mizu's defaults, simply create your resources before running Mizu.
|
||||
|
||||
For example, creating a `ClusterRole` by the name of `mizu-cluster-role` before running Mizu will cause Mizu to use that `ClusterRole` instead of the default one created by Mizu.
|
||||
|
||||
Notes:
|
||||
|
||||
1. The resource names must match Mizu's default names.
|
||||
2. User-managed resources must not have the label `app.kubernetes.io/managed-by=mizu`. Remove the label or set it to another value.
|
||||
|
||||
## List of permissions
|
||||
|
||||
The permissions that are required to run Mizu depend on the configuration.
|
||||
By default Mizu requires cluster-wide permissions.
|
||||
If these are not available to the user, it is possible to run Mizu in namespace-restricted mode which has a reduced set of requirements.
|
||||
This is done by by setting the `mizu-resources-namespace` config option. See [configuration](CONFIGURATION.md) for instructions.
|
||||
|
||||
The different requirements are listed in [the permission templates dir](../cli/cmd/permissionFiles)
|
||||
@@ -1,75 +0,0 @@
|
||||
|
||||
# Traffic validation rules
|
||||
|
||||
This feature allows you to define set of simple rules, and test the traffic against them.
|
||||
Such validation may test response for specific JSON fields, headers, etc.
|
||||
|
||||
## Examples
|
||||
|
||||
Example 1: HTTP request (REST API call) that didn't pass validation is highlighted in red
|
||||
|
||||

|
||||
|
||||
- - -
|
||||
|
||||
Example 2: Details pane shows the validation rule details and whether it passed or failed
|
||||
|
||||

|
||||
|
||||
|
||||
## How to use
|
||||
|
||||
To use this feature - create simple rules file (see details below) and pass this file as parameter to `mizu tap` command. For example, if rules are stored in file named `rules.yaml` — run the following command:
|
||||
|
||||
```shell
|
||||
mizu tap --traffic-validation-file rules.yaml
|
||||
```
|
||||
|
||||
|
||||
## Rules file structure
|
||||
|
||||
The structure of the traffic-validation-file is:
|
||||
|
||||
* `name`: string, name of the rule
|
||||
* `type`: string, type of the rule, must be `json` or `header` or `slo`
|
||||
* `key`: string, [jsonpath](https://code.google.com/archive/p/jsonpath/wikis/Javascript.wiki) used only in `json` or `header` type
|
||||
* `value`: string, [regex](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Guide/Regular_Expressions) used only in `json` or `header` type
|
||||
* `service`: string, [regex](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Guide/Regular_Expressions) service name to filter
|
||||
* `path`: string, [regex](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Guide/Regular_Expressions) URL path to filter
|
||||
* `response-time`: integer, time in ms of the expected latency.
|
||||
|
||||
|
||||
### For example:
|
||||
|
||||
```yaml
|
||||
rules:
|
||||
- name: holy-in-name-property
|
||||
type: json
|
||||
key: "$.name"
|
||||
value: "Holy"
|
||||
service: "catalogue.*"
|
||||
path: "catalogue.*"
|
||||
- name: content-length-header
|
||||
type: header
|
||||
key: "Content-Le.*"
|
||||
value: "(\\d+(?:\\.\\d+)?)"
|
||||
- name: latency-test
|
||||
type: slo
|
||||
response-time: 1
|
||||
service: "carts.*"
|
||||
```
|
||||
|
||||
|
||||
### Explanation:
|
||||
|
||||
* First rule `holy-in-name-property`:
|
||||
|
||||
> This rule will be applied to all request made to `catalogue.*` services with `catalogue.*` on the URL path with a json response containing a `$.name` field. If the value of `$.name` is `Holy` than is marked as success, marked as failure otherwise.
|
||||
|
||||
* Second rule `content-length-header`:
|
||||
|
||||
> This rule will be applied to all request that has `Content-Le.*` on header. If the value of `Content-Le.*` is `(\\d+(?:\\.\\d+)?)` (number), will be marked as success, marked as failure otherwise.
|
||||
|
||||
* Third rule `latency-test`:
|
||||
|
||||
> This rule will be applied to all request made to `carts.*` services. If the latency of the response is greater than `1ms` will be marked as failure, marked as success otherwise.
|
||||
@@ -1,95 +0,0 @@
|
||||

|
||||
|
||||
# Service mesh mutual tls (mtls) with Mizu
|
||||
|
||||
This document describe how Mizu tapper handles workloads configured with mtls, making the internal traffic between services in a cluster to be encrypted.
|
||||
|
||||
The list of service meshes supported by Mizu include:
|
||||
|
||||
- Istio
|
||||
- Linkerd (beta)
|
||||
|
||||
## Installation
|
||||
|
||||
### Optional: Allow source IP resolving in Istio
|
||||
|
||||
When using Istio, in order to enable Mizu to reslove source IPs to names, turn on the [use_remote_address](https://www.envoyproxy.io/docs/envoy/latest/configuration/http/http_conn_man/headers#x-forwarded-for) option in Istio sidecar Envoys.
|
||||
This setting causes the Envoys to append to `X-Forwarded-For` request header. Mizu in turn uses the `X-Forwarded-For` header to determine the true source IPs.
|
||||
One way to turn on the `use_remote_address` HTTP connection manager option is by applying an `EnvoyFilter`:
|
||||
|
||||
```yaml
|
||||
apiVersion: networking.istio.io/v1alpha3
|
||||
kind: EnvoyFilter
|
||||
metadata:
|
||||
name: mizu-xff
|
||||
namespace: istio-system # as defined in meshConfig resource.
|
||||
spec:
|
||||
configPatches:
|
||||
- applyTo: NETWORK_FILTER
|
||||
match:
|
||||
context: SIDECAR_OUTBOUND # will match outbound listeners in all sidecars
|
||||
patch:
|
||||
operation: MERGE
|
||||
value:
|
||||
typed_config:
|
||||
"@type": "type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager"
|
||||
use_remote_address: true
|
||||
```
|
||||
|
||||
Save the above text to `mizu-xff-envoyfilter.yaml` and run `kubectl apply -f mizu-xff-envoyfilter.yaml`.
|
||||
|
||||
With Istio, mizu does not resolve source IPs for non-HTTP traffic.
|
||||
|
||||
## Implementation
|
||||
|
||||
### Istio support
|
||||
|
||||
#### The connection between Istio and Envoy
|
||||
|
||||
In order to implement its service mesh capabilities, [Istio](https://istio.io) uses an [Envoy](https://www.envoyproxy.io) sidecar in front of every pod in the cluster. The Envoy is responsible for the mtls communication, and that's why we are focusing on Envoy proxy.
|
||||
|
||||
In the future we might see more players in that field, then we'll have to either add support for each of them or go with a unified eBPF solution.
|
||||
|
||||
#### Network namespaces
|
||||
|
||||
A [linux network namespace](https://man7.org/linux/man-pages/man7/network_namespaces.7.html) is an isolation that limit the process view of the network. In the container world it used to isolate one container from another. In the Kubernetes world it used to isolate a pod from another. That means that two containers running on the same pod share the same network namespace. A container can reach a container in the same pod by accessing `localhost`.
|
||||
|
||||
An Envoy proxy configured with mtls receives the inbound traffic directed to the pod, decrypts it and sends it via `localhost` to the target container.
|
||||
|
||||
#### Tapping mtls traffic
|
||||
|
||||
In order for Mizu to be able to see the decrypted traffic it needs to listen on the same network namespace of the target pod. Multiple threads of the same process can have different network namespaces.
|
||||
|
||||
[gopacket](https://github.com/google/gopacket) uses [libpacp](https://github.com/the-tcpdump-group/libpcap) by default for capturing the traffic. Libpacap doesn't support network namespaces and we can't ask it to listen to traffic on a different namespace. However, we can change the network namespace of the calling thread and then start libpcap to see the traffic on a different namespace.
|
||||
|
||||
#### Finding the network namespace of a running process
|
||||
|
||||
The network namespace of a running process can be found in `/proc/PID/ns/net` link. Once we have this link, we can ask Linux to change the network namespace of a thread to this one.
|
||||
|
||||
This mean that Mizu needs to have access to the `/proc` (procfs) of the running node.
|
||||
|
||||
#### Finding the network namespace of a running pod
|
||||
|
||||
In order for Mizu to be able to listen to mtls traffic, it needs to get the PIDs of the the running pods, filter them according to the user filters and then start listen to their internal network namespace traffic.
|
||||
|
||||
There is no official way in Kubernetes to get from pod to PID. The CRI implementation purposefully doesn't force a pod to be a processes on the host. It can be a Virtual Machine as well like [Kata containers](https://katacontainers.io)
|
||||
|
||||
While we can provide a solution for various CRIs (like Docker, Containerd and CRI-O) it's better to have a unified solution. In order to achieve that, Mizu scans all the processes in the host, and finds the Envoy processes using their `/proc/PID/exe` link.
|
||||
|
||||
Once Mizu detects an Envoy process, it need to check whether this specific Envoy process is relevant according the user filters. The user filters are a list of `CLUSTER_IPS`. The tapper gets them via the `TapOpts.FilterAuthorities` list.
|
||||
|
||||
Istio sends an `INSTANCE_IP` environment variable to every Envoy proxy process. By examining the Envoy process's environment variables we can see whether it's relevant or not. Examining a process environment variables is done by reading the `/proc/PID/envion` file.
|
||||
|
||||
#### Edge cases
|
||||
|
||||
The method we use to find Envoy processes and correlate them to the cluster IPs may be inaccurate in certain situations. If, for example, a user runs an Envoy process manually, and set its `INSTANCE_IP` environment variable to one of the `CLUSTER_IPS` the tapper gets, then Mizu will capture traffic for it.
|
||||
|
||||
## Development
|
||||
|
||||
In order to create a service mesh setup for development, follow those steps:
|
||||
|
||||
1. Deploy a sample application to a Kubernetes cluster, the sample application needs to make internal service to service calls
|
||||
2. SSH to one of the nodes, and run `tcpdump`
|
||||
3. Make sure you see the internal service to service calls in a plain text
|
||||
4. Deploy a service mesh (Istio, Linkerd) to the cluster - make sure it is attached to all pods of the sample application, and that it is configured with mtls (default)
|
||||
5. Run `tcpdump` again, make sure you don't see the internal service to service calls in a plain text
|
||||
@@ -325,15 +325,22 @@ func (tapperSyncer *MizuTapperSyncer) updateMizuTappers() error {
|
||||
tapperSyncer.config.MizuApiFilteringOptions,
|
||||
tapperSyncer.config.LogLevel,
|
||||
tapperSyncer.config.ServiceMesh,
|
||||
tapperSyncer.config.Tls,
|
||||
); err != nil {
|
||||
tapperSyncer.config.Tls); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
logger.Log.Debugf("Successfully created %v tappers", len(tapperSyncer.nodeToTappedPodMap))
|
||||
} else {
|
||||
if err := tapperSyncer.kubernetesProvider.RemoveDaemonSet(tapperSyncer.context, tapperSyncer.config.MizuResourcesNamespace, TapperDaemonSetName); err != nil {
|
||||
if err := tapperSyncer.kubernetesProvider.ResetMizuTapperDaemonSet(
|
||||
tapperSyncer.context,
|
||||
tapperSyncer.config.MizuResourcesNamespace,
|
||||
TapperDaemonSetName,
|
||||
tapperSyncer.config.AgentImage,
|
||||
TapperPodName); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
logger.Log.Debugf("Successfully reset tapper daemon set")
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
@@ -56,8 +56,8 @@ const (
|
||||
sysfsMountPath = "/sys"
|
||||
)
|
||||
|
||||
func NewProvider(kubeConfigPath string) (*Provider, error) {
|
||||
kubernetesConfig := loadKubernetesConfiguration(kubeConfigPath)
|
||||
func NewProvider(kubeConfigPath string, contextName string) (*Provider, error) {
|
||||
kubernetesConfig := loadKubernetesConfiguration(kubeConfigPath, contextName)
|
||||
restClientConfig, err := kubernetesConfig.ClientConfig()
|
||||
if err != nil {
|
||||
if clientcmd.IsEmptyConfig(err) {
|
||||
@@ -449,9 +449,9 @@ func (provider *Provider) CanI(ctx context.Context, namespace string, resource s
|
||||
Spec: auth.SelfSubjectAccessReviewSpec{
|
||||
ResourceAttributes: &auth.ResourceAttributes{
|
||||
Namespace: namespace,
|
||||
Resource: resource,
|
||||
Verb: verb,
|
||||
Group: group,
|
||||
Resource: resource,
|
||||
Verb: verb,
|
||||
Group: group,
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -995,6 +995,55 @@ func (provider *Provider) ApplyMizuTapperDaemonSet(ctx context.Context, namespac
|
||||
return err
|
||||
}
|
||||
|
||||
func (provider *Provider) ResetMizuTapperDaemonSet(ctx context.Context, namespace string, daemonSetName string, podImage string, tapperPodName string) error {
|
||||
agentContainer := applyconfcore.Container()
|
||||
agentContainer.WithName(tapperPodName)
|
||||
agentContainer.WithImage(podImage)
|
||||
|
||||
nodeSelectorRequirement := applyconfcore.NodeSelectorRequirement()
|
||||
nodeSelectorRequirement.WithKey("mizu-non-existing-label")
|
||||
nodeSelectorRequirement.WithOperator(core.NodeSelectorOpExists)
|
||||
nodeSelectorTerm := applyconfcore.NodeSelectorTerm()
|
||||
nodeSelectorTerm.WithMatchExpressions(nodeSelectorRequirement)
|
||||
nodeSelector := applyconfcore.NodeSelector()
|
||||
nodeSelector.WithNodeSelectorTerms(nodeSelectorTerm)
|
||||
nodeAffinity := applyconfcore.NodeAffinity()
|
||||
nodeAffinity.WithRequiredDuringSchedulingIgnoredDuringExecution(nodeSelector)
|
||||
affinity := applyconfcore.Affinity()
|
||||
affinity.WithNodeAffinity(nodeAffinity)
|
||||
|
||||
podSpec := applyconfcore.PodSpec()
|
||||
podSpec.WithContainers(agentContainer)
|
||||
podSpec.WithAffinity(affinity)
|
||||
|
||||
podTemplate := applyconfcore.PodTemplateSpec()
|
||||
podTemplate.WithLabels(map[string]string{
|
||||
"app": tapperPodName,
|
||||
LabelManagedBy: provider.managedBy,
|
||||
LabelCreatedBy: provider.createdBy,
|
||||
})
|
||||
podTemplate.WithSpec(podSpec)
|
||||
|
||||
labelSelector := applyconfmeta.LabelSelector()
|
||||
labelSelector.WithMatchLabels(map[string]string{"app": tapperPodName})
|
||||
|
||||
applyOptions := metav1.ApplyOptions{
|
||||
Force: true,
|
||||
FieldManager: fieldManagerName,
|
||||
}
|
||||
|
||||
daemonSet := applyconfapp.DaemonSet(daemonSetName, namespace)
|
||||
daemonSet.
|
||||
WithLabels(map[string]string{
|
||||
LabelManagedBy: provider.managedBy,
|
||||
LabelCreatedBy: provider.createdBy,
|
||||
}).
|
||||
WithSpec(applyconfapp.DaemonSetSpec().WithSelector(labelSelector).WithTemplate(podTemplate))
|
||||
|
||||
_, err := provider.clientSet.AppsV1().DaemonSets(namespace).Apply(ctx, daemonSet, applyOptions)
|
||||
return err
|
||||
}
|
||||
|
||||
func (provider *Provider) listPodsImpl(ctx context.Context, regex *regexp.Regexp, namespaces []string, listOptions metav1.ListOptions) ([]core.Pod, error) {
|
||||
var pods []core.Pod
|
||||
for _, namespace := range namespaces {
|
||||
@@ -1038,7 +1087,7 @@ func (provider *Provider) ListAllRunningPodsMatchingRegex(ctx context.Context, r
|
||||
return matchingPods, nil
|
||||
}
|
||||
|
||||
func(provider *Provider) ListPodsByAppLabel(ctx context.Context, namespaces string, labelName string) ([]core.Pod, error) {
|
||||
func (provider *Provider) ListPodsByAppLabel(ctx context.Context, namespaces string, labelName string) ([]core.Pod, error) {
|
||||
pods, err := provider.clientSet.CoreV1().Pods(namespaces).List(ctx, metav1.ListOptions{LabelSelector: fmt.Sprintf("app=%s", labelName)})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -1212,7 +1261,7 @@ func ValidateKubernetesVersion(serverVersionSemVer *semver.SemVersion) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func loadKubernetesConfiguration(kubeConfigPath string) clientcmd.ClientConfig {
|
||||
func loadKubernetesConfiguration(kubeConfigPath string, context string) clientcmd.ClientConfig {
|
||||
logger.Log.Debugf("Using kube config %s", kubeConfigPath)
|
||||
configPathList := filepath.SplitList(kubeConfigPath)
|
||||
configLoadingRules := &clientcmd.ClientConfigLoadingRules{}
|
||||
@@ -1221,7 +1270,7 @@ func loadKubernetesConfiguration(kubeConfigPath string) clientcmd.ClientConfig {
|
||||
} else {
|
||||
configLoadingRules.Precedence = configPathList
|
||||
}
|
||||
contextName := ""
|
||||
contextName := context
|
||||
return clientcmd.NewNonInteractiveDeferredLoadingClientConfig(
|
||||
configLoadingRules,
|
||||
&clientcmd.ConfigOverrides{
|
||||
|
||||
@@ -35,6 +35,7 @@ type Resources struct {
|
||||
|
||||
type MizuAgentConfig struct {
|
||||
MaxDBSizeBytes int64 `json:"maxDBSizeBytes"`
|
||||
InsertionFilter string `json:"insertionFilter"`
|
||||
AgentImage string `json:"agentImage"`
|
||||
PullPolicy string `json:"pullPolicy"`
|
||||
LogLevel logging.Level `json:"logLevel"`
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"net/http"
|
||||
"os"
|
||||
"sort"
|
||||
@@ -18,6 +19,9 @@ import (
|
||||
|
||||
const mizuTestEnvVar = "MIZU_TEST"
|
||||
|
||||
var UnknownIp net.IP = net.IP{0, 0, 0, 0}
|
||||
var UnknownPort uint16 = 0
|
||||
|
||||
type Protocol struct {
|
||||
Name string `json:"name"`
|
||||
LongName string `json:"longName"`
|
||||
@@ -100,6 +104,7 @@ type Dissector interface {
|
||||
Ping()
|
||||
Dissect(b *bufio.Reader, isClient bool, tcpID *TcpID, counterPair *CounterPair, superTimer *SuperTimer, superIdentifier *SuperIdentifier, emitter Emitter, options *TrafficFilteringOptions, reqResMatcher RequestResponseMatcher) error
|
||||
Analyze(item *OutputChannelItem, resolvedSource string, resolvedDestination string, namespace string) *Entry
|
||||
Summarize(entry *Entry) *BaseEntry
|
||||
Represent(request map[string]interface{}, response map[string]interface{}) (object []byte, bodySize int64, err error)
|
||||
Macros() map[string]string
|
||||
NewResponseRequestMatcher() RequestResponseMatcher
|
||||
@@ -135,12 +140,7 @@ type Entry struct {
|
||||
StartTime time.Time `json:"startTime"`
|
||||
Request map[string]interface{} `json:"request"`
|
||||
Response map[string]interface{} `json:"response"`
|
||||
Summary string `json:"summary"`
|
||||
Method string `json:"method"`
|
||||
Status int `json:"status"`
|
||||
ElapsedTime int64 `json:"elapsedTime"`
|
||||
Path string `json:"path"`
|
||||
IsOutgoing bool `json:"isOutgoing,omitempty"`
|
||||
Rules ApplicableRules `json:"rules,omitempty"`
|
||||
ContractStatus ContractStatus `json:"contractStatus,omitempty"`
|
||||
ContractRequestReason string `json:"contractRequestReason,omitempty"`
|
||||
@@ -154,6 +154,7 @@ type EntryWrapper struct {
|
||||
Representation string `json:"representation"`
|
||||
BodySize int64 `json:"bodySize"`
|
||||
Data *Entry `json:"data"`
|
||||
Base *BaseEntry `json:"base"`
|
||||
Rules []map[string]interface{} `json:"rulesMatched,omitempty"`
|
||||
IsRulesEnabled bool `json:"isRulesEnabled"`
|
||||
}
|
||||
@@ -161,11 +162,12 @@ type EntryWrapper struct {
|
||||
type BaseEntry struct {
|
||||
Id uint `json:"id"`
|
||||
Protocol Protocol `json:"proto,omitempty"`
|
||||
Url string `json:"url,omitempty"`
|
||||
Path string `json:"path,omitempty"`
|
||||
Summary string `json:"summary,omitempty"`
|
||||
StatusCode int `json:"status"`
|
||||
SummaryQuery string `json:"summaryQuery,omitempty"`
|
||||
Status int `json:"status"`
|
||||
StatusQuery string `json:"statusQuery"`
|
||||
Method string `json:"method,omitempty"`
|
||||
MethodQuery string `json:"methodQuery,omitempty"`
|
||||
Timestamp int64 `json:"timestamp,omitempty"`
|
||||
Source *TCP `json:"src"`
|
||||
Destination *TCP `json:"dst"`
|
||||
@@ -190,44 +192,6 @@ type Contract struct {
|
||||
Content string `json:"content"`
|
||||
}
|
||||
|
||||
func Summarize(entry *Entry) *BaseEntry {
|
||||
return &BaseEntry{
|
||||
Id: entry.Id,
|
||||
Protocol: entry.Protocol,
|
||||
Path: entry.Path,
|
||||
Summary: entry.Summary,
|
||||
StatusCode: entry.Status,
|
||||
Method: entry.Method,
|
||||
Timestamp: entry.Timestamp,
|
||||
Source: entry.Source,
|
||||
Destination: entry.Destination,
|
||||
IsOutgoing: entry.IsOutgoing,
|
||||
Latency: entry.ElapsedTime,
|
||||
Rules: entry.Rules,
|
||||
ContractStatus: entry.ContractStatus,
|
||||
}
|
||||
}
|
||||
|
||||
type DataUnmarshaler interface {
|
||||
UnmarshalData(*Entry) error
|
||||
}
|
||||
|
||||
func (bed *BaseEntry) UnmarshalData(entry *Entry) error {
|
||||
bed.Protocol = entry.Protocol
|
||||
bed.Id = entry.Id
|
||||
bed.Path = entry.Path
|
||||
bed.Summary = entry.Summary
|
||||
bed.StatusCode = entry.Status
|
||||
bed.Method = entry.Method
|
||||
bed.Timestamp = entry.Timestamp
|
||||
bed.Source = entry.Source
|
||||
bed.Destination = entry.Destination
|
||||
bed.IsOutgoing = entry.IsOutgoing
|
||||
bed.Latency = entry.ElapsedTime
|
||||
bed.ContractStatus = entry.ContractStatus
|
||||
return nil
|
||||
}
|
||||
|
||||
const (
|
||||
TABLE string = "table"
|
||||
BODY string = "body"
|
||||
|
||||
@@ -13,4 +13,4 @@ test-pull-bin:
|
||||
|
||||
test-pull-expect:
|
||||
@mkdir -p expect
|
||||
@[ "${skipexpect}" ] && echo "Skipping downloading expected JSONs" || gsutil -o 'GSUtil:parallel_process_count=5' -o 'GSUtil:parallel_thread_count=5' -m cp -r gs://static.up9.io/mizu/test-pcap/expect/amqp/\* expect
|
||||
@[ "${skipexpect}" ] && echo "Skipping downloading expected JSONs" || gsutil -o 'GSUtil:parallel_process_count=5' -o 'GSUtil:parallel_thread_count=5' -m cp -r gs://static.up9.io/mizu/test-pcap/expect3/amqp/\* expect
|
||||
|
||||
@@ -219,31 +219,6 @@ func (d dissecting) Analyze(item *api.OutputChannelItem, resolvedSource string,
|
||||
request := item.Pair.Request.Payload.(map[string]interface{})
|
||||
reqDetails := request["details"].(map[string]interface{})
|
||||
|
||||
summary := ""
|
||||
switch request["method"] {
|
||||
case basicMethodMap[40]:
|
||||
summary = reqDetails["exchange"].(string)
|
||||
case basicMethodMap[60]:
|
||||
summary = reqDetails["exchange"].(string)
|
||||
case exchangeMethodMap[10]:
|
||||
summary = reqDetails["exchange"].(string)
|
||||
case queueMethodMap[10]:
|
||||
summary = reqDetails["queue"].(string)
|
||||
case connectionMethodMap[10]:
|
||||
summary = fmt.Sprintf(
|
||||
"%s.%s",
|
||||
strconv.Itoa(int(reqDetails["versionMajor"].(float64))),
|
||||
strconv.Itoa(int(reqDetails["versionMinor"].(float64))),
|
||||
)
|
||||
case connectionMethodMap[50]:
|
||||
summary = reqDetails["replyText"].(string)
|
||||
case queueMethodMap[20]:
|
||||
summary = reqDetails["queue"].(string)
|
||||
case basicMethodMap[20]:
|
||||
summary = reqDetails["queue"].(string)
|
||||
}
|
||||
|
||||
request["url"] = summary
|
||||
reqDetails["method"] = request["method"]
|
||||
return &api.Entry{
|
||||
Protocol: protocol,
|
||||
@@ -260,17 +235,70 @@ func (d dissecting) Analyze(item *api.OutputChannelItem, resolvedSource string,
|
||||
Namespace: namespace,
|
||||
Outgoing: item.ConnectionInfo.IsOutgoing,
|
||||
Request: reqDetails,
|
||||
Method: request["method"].(string),
|
||||
Status: 0,
|
||||
Timestamp: item.Timestamp,
|
||||
StartTime: item.Pair.Request.CaptureTime,
|
||||
ElapsedTime: 0,
|
||||
Summary: summary,
|
||||
IsOutgoing: item.ConnectionInfo.IsOutgoing,
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func (d dissecting) Summarize(entry *api.Entry) *api.BaseEntry {
|
||||
summary := ""
|
||||
summaryQuery := ""
|
||||
method := entry.Request["method"].(string)
|
||||
methodQuery := fmt.Sprintf(`request.method == "%s"`, method)
|
||||
switch method {
|
||||
case basicMethodMap[40]:
|
||||
summary = entry.Request["exchange"].(string)
|
||||
summaryQuery = fmt.Sprintf(`request.exchange == "%s"`, summary)
|
||||
case basicMethodMap[60]:
|
||||
summary = entry.Request["exchange"].(string)
|
||||
summaryQuery = fmt.Sprintf(`request.exchange == "%s"`, summary)
|
||||
case exchangeMethodMap[10]:
|
||||
summary = entry.Request["exchange"].(string)
|
||||
summaryQuery = fmt.Sprintf(`request.exchange == "%s"`, summary)
|
||||
case queueMethodMap[10]:
|
||||
summary = entry.Request["queue"].(string)
|
||||
summaryQuery = fmt.Sprintf(`request.queue == "%s"`, summary)
|
||||
case connectionMethodMap[10]:
|
||||
versionMajor := int(entry.Request["versionMajor"].(float64))
|
||||
versionMinor := int(entry.Request["versionMinor"].(float64))
|
||||
summary = fmt.Sprintf(
|
||||
"%s.%s",
|
||||
strconv.Itoa(versionMajor),
|
||||
strconv.Itoa(versionMinor),
|
||||
)
|
||||
summaryQuery = fmt.Sprintf(`request.versionMajor == %d and request.versionMinor == %d`, versionMajor, versionMinor)
|
||||
case connectionMethodMap[50]:
|
||||
summary = entry.Request["replyText"].(string)
|
||||
summaryQuery = fmt.Sprintf(`request.replyText == "%s"`, summary)
|
||||
case queueMethodMap[20]:
|
||||
summary = entry.Request["queue"].(string)
|
||||
summaryQuery = fmt.Sprintf(`request.queue == "%s"`, summary)
|
||||
case basicMethodMap[20]:
|
||||
summary = entry.Request["queue"].(string)
|
||||
summaryQuery = fmt.Sprintf(`request.queue == "%s"`, summary)
|
||||
}
|
||||
|
||||
return &api.BaseEntry{
|
||||
Id: entry.Id,
|
||||
Protocol: entry.Protocol,
|
||||
Summary: summary,
|
||||
SummaryQuery: summaryQuery,
|
||||
Status: 0,
|
||||
StatusQuery: "",
|
||||
Method: method,
|
||||
MethodQuery: methodQuery,
|
||||
Timestamp: entry.Timestamp,
|
||||
Source: entry.Source,
|
||||
Destination: entry.Destination,
|
||||
IsOutgoing: entry.Outgoing,
|
||||
Latency: entry.ElapsedTime,
|
||||
Rules: entry.Rules,
|
||||
ContractStatus: entry.ContractStatus,
|
||||
}
|
||||
}
|
||||
|
||||
func (d dissecting) Represent(request map[string]interface{}, response map[string]interface{}) (object []byte, bodySize int64, err error) {
|
||||
bodySize = 0
|
||||
representation := make(map[string]interface{})
|
||||
|
||||
@@ -21,14 +21,16 @@ import (
|
||||
const (
|
||||
binDir = "bin"
|
||||
patternBin = "*_req.bin"
|
||||
patternDissect = "*.json"
|
||||
patternExpect = "*.json"
|
||||
msgDissecting = "Dissecting:"
|
||||
msgAnalyzing = "Analyzing:"
|
||||
msgSummarizing = "Summarizing:"
|
||||
msgRepresenting = "Representing:"
|
||||
respSuffix = "_res.bin"
|
||||
expectDir = "expect"
|
||||
dissectDir = "dissect"
|
||||
analyzeDir = "analyze"
|
||||
summarizeDir = "summarize"
|
||||
representDir = "represent"
|
||||
testUpdate = "TEST_UPDATE"
|
||||
)
|
||||
@@ -186,7 +188,7 @@ func TestAnalyze(t *testing.T) {
|
||||
}
|
||||
|
||||
dissector := NewDissector()
|
||||
paths, err := filepath.Glob(path.Join(expectDirDissect, patternDissect))
|
||||
paths, err := filepath.Glob(path.Join(expectDirDissect, patternExpect))
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
@@ -230,6 +232,63 @@ func TestAnalyze(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestSummarize(t *testing.T) {
|
||||
_, testUpdateEnabled := os.LookupEnv(testUpdate)
|
||||
|
||||
expectDirAnalyze := path.Join(expectDir, analyzeDir)
|
||||
expectDirSummarize := path.Join(expectDir, summarizeDir)
|
||||
|
||||
if testUpdateEnabled {
|
||||
os.RemoveAll(expectDirSummarize)
|
||||
err := os.MkdirAll(expectDirSummarize, 0775)
|
||||
assert.Nil(t, err)
|
||||
}
|
||||
|
||||
dissector := NewDissector()
|
||||
paths, err := filepath.Glob(path.Join(expectDirAnalyze, patternExpect))
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
for _, _path := range paths {
|
||||
fmt.Printf("%s %s\n", msgSummarizing, _path)
|
||||
|
||||
bytes, err := ioutil.ReadFile(_path)
|
||||
assert.Nil(t, err)
|
||||
|
||||
var entries []*api.Entry
|
||||
err = json.Unmarshal(bytes, &entries)
|
||||
assert.Nil(t, err)
|
||||
|
||||
var baseEntries []*api.BaseEntry
|
||||
for _, entry := range entries {
|
||||
baseEntry := dissector.Summarize(entry)
|
||||
baseEntries = append(baseEntries, baseEntry)
|
||||
}
|
||||
|
||||
pathExpect := path.Join(expectDirSummarize, filepath.Base(_path))
|
||||
|
||||
marshaled, err := json.Marshal(baseEntries)
|
||||
assert.Nil(t, err)
|
||||
|
||||
if testUpdateEnabled {
|
||||
if len(baseEntries) > 0 {
|
||||
err = os.WriteFile(pathExpect, marshaled, 0644)
|
||||
assert.Nil(t, err)
|
||||
}
|
||||
} else {
|
||||
if _, err := os.Stat(pathExpect); errors.Is(err, os.ErrNotExist) {
|
||||
assert.Len(t, entries, 0)
|
||||
} else {
|
||||
expectedBytes, err := ioutil.ReadFile(pathExpect)
|
||||
assert.Nil(t, err)
|
||||
|
||||
assert.JSONEq(t, string(expectedBytes), string(marshaled))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestRepresent(t *testing.T) {
|
||||
_, testUpdateEnabled := os.LookupEnv(testUpdate)
|
||||
|
||||
@@ -243,7 +302,7 @@ func TestRepresent(t *testing.T) {
|
||||
}
|
||||
|
||||
dissector := NewDissector()
|
||||
paths, err := filepath.Glob(path.Join(expectDirAnalyze, patternDissect))
|
||||
paths, err := filepath.Glob(path.Join(expectDirAnalyze, patternExpect))
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
@@ -13,4 +13,4 @@ test-pull-bin:
|
||||
|
||||
test-pull-expect:
|
||||
@mkdir -p expect
|
||||
@[ "${skipexpect}" ] && echo "Skipping downloading expected JSONs" || gsutil -o 'GSUtil:parallel_process_count=5' -o 'GSUtil:parallel_thread_count=5' -m cp -r gs://static.up9.io/mizu/test-pcap/expect2/http/\* expect
|
||||
@[ "${skipexpect}" ] && echo "Skipping downloading expected JSONs" || gsutil -o 'GSUtil:parallel_process_count=5' -o 'GSUtil:parallel_thread_count=5' -m cp -r gs://static.up9.io/mizu/test-pcap/expect3/http/\* expect
|
||||
|
||||
@@ -231,7 +231,6 @@ func (d dissecting) Analyze(item *api.OutputChannelItem, resolvedSource string,
|
||||
reqDetails["targetUri"] = reqDetails["url"]
|
||||
reqDetails["path"] = path
|
||||
reqDetails["pathSegments"] = strings.Split(path, "/")[1:]
|
||||
reqDetails["summary"] = path
|
||||
|
||||
// Rearrange the maps for the querying
|
||||
reqDetails["_headers"] = reqDetails["headers"]
|
||||
@@ -248,17 +247,11 @@ func (d dissecting) Analyze(item *api.OutputChannelItem, resolvedSource string,
|
||||
reqDetails["_queryStringMerged"] = mapSliceMergeRepeatedKeys(reqDetails["_queryString"].([]interface{}))
|
||||
reqDetails["queryString"] = mapSliceRebuildAsMap(reqDetails["_queryStringMerged"].([]interface{}))
|
||||
|
||||
method := reqDetails["method"].(string)
|
||||
statusCode := int(resDetails["status"].(float64))
|
||||
if item.Protocol.Abbreviation == "gRPC" {
|
||||
resDetails["statusText"] = grpcStatusCodes[statusCode]
|
||||
}
|
||||
|
||||
if item.Protocol.Version == "2.0" && !isRequestUpgradedH2C {
|
||||
reqDetails["url"] = path
|
||||
request["url"] = path
|
||||
}
|
||||
|
||||
elapsedTime := item.Pair.Response.CaptureTime.Sub(item.Pair.Request.CaptureTime).Round(time.Millisecond).Milliseconds()
|
||||
if elapsedTime < 0 {
|
||||
elapsedTime = 0
|
||||
@@ -280,17 +273,40 @@ func (d dissecting) Analyze(item *api.OutputChannelItem, resolvedSource string,
|
||||
Outgoing: item.ConnectionInfo.IsOutgoing,
|
||||
Request: reqDetails,
|
||||
Response: resDetails,
|
||||
Method: method,
|
||||
Status: statusCode,
|
||||
Timestamp: item.Timestamp,
|
||||
StartTime: item.Pair.Request.CaptureTime,
|
||||
ElapsedTime: elapsedTime,
|
||||
Summary: path,
|
||||
IsOutgoing: item.ConnectionInfo.IsOutgoing,
|
||||
HTTPPair: string(httpPair),
|
||||
}
|
||||
}
|
||||
|
||||
func (d dissecting) Summarize(entry *api.Entry) *api.BaseEntry {
|
||||
summary := entry.Request["path"].(string)
|
||||
summaryQuery := fmt.Sprintf(`request.path == "%s"`, summary)
|
||||
method := entry.Request["method"].(string)
|
||||
methodQuery := fmt.Sprintf(`request.method == "%s"`, method)
|
||||
status := int(entry.Response["status"].(float64))
|
||||
statusQuery := fmt.Sprintf(`response.status == %d`, status)
|
||||
|
||||
return &api.BaseEntry{
|
||||
Id: entry.Id,
|
||||
Protocol: entry.Protocol,
|
||||
Summary: summary,
|
||||
SummaryQuery: summaryQuery,
|
||||
Status: status,
|
||||
StatusQuery: statusQuery,
|
||||
Method: method,
|
||||
MethodQuery: methodQuery,
|
||||
Timestamp: entry.Timestamp,
|
||||
Source: entry.Source,
|
||||
Destination: entry.Destination,
|
||||
IsOutgoing: entry.Outgoing,
|
||||
Latency: entry.ElapsedTime,
|
||||
Rules: entry.Rules,
|
||||
ContractStatus: entry.ContractStatus,
|
||||
}
|
||||
}
|
||||
|
||||
func representRequest(request map[string]interface{}) (repRequest []interface{}) {
|
||||
details, _ := json.Marshal([]api.TableData{
|
||||
{
|
||||
|
||||
@@ -21,14 +21,16 @@ import (
|
||||
const (
|
||||
binDir = "bin"
|
||||
patternBin = "*_req.bin"
|
||||
patternDissect = "*.json"
|
||||
patternExpect = "*.json"
|
||||
msgDissecting = "Dissecting:"
|
||||
msgAnalyzing = "Analyzing:"
|
||||
msgSummarizing = "Summarizing:"
|
||||
msgRepresenting = "Representing:"
|
||||
respSuffix = "_res.bin"
|
||||
expectDir = "expect"
|
||||
dissectDir = "dissect"
|
||||
analyzeDir = "analyze"
|
||||
summarizeDir = "summarize"
|
||||
representDir = "represent"
|
||||
testUpdate = "TEST_UPDATE"
|
||||
)
|
||||
@@ -188,7 +190,7 @@ func TestAnalyze(t *testing.T) {
|
||||
}
|
||||
|
||||
dissector := NewDissector()
|
||||
paths, err := filepath.Glob(path.Join(expectDirDissect, patternDissect))
|
||||
paths, err := filepath.Glob(path.Join(expectDirDissect, patternExpect))
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
@@ -232,6 +234,63 @@ func TestAnalyze(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestSummarize(t *testing.T) {
|
||||
_, testUpdateEnabled := os.LookupEnv(testUpdate)
|
||||
|
||||
expectDirAnalyze := path.Join(expectDir, analyzeDir)
|
||||
expectDirSummarize := path.Join(expectDir, summarizeDir)
|
||||
|
||||
if testUpdateEnabled {
|
||||
os.RemoveAll(expectDirSummarize)
|
||||
err := os.MkdirAll(expectDirSummarize, 0775)
|
||||
assert.Nil(t, err)
|
||||
}
|
||||
|
||||
dissector := NewDissector()
|
||||
paths, err := filepath.Glob(path.Join(expectDirAnalyze, patternExpect))
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
for _, _path := range paths {
|
||||
fmt.Printf("%s %s\n", msgSummarizing, _path)
|
||||
|
||||
bytes, err := ioutil.ReadFile(_path)
|
||||
assert.Nil(t, err)
|
||||
|
||||
var entries []*api.Entry
|
||||
err = json.Unmarshal(bytes, &entries)
|
||||
assert.Nil(t, err)
|
||||
|
||||
var baseEntries []*api.BaseEntry
|
||||
for _, entry := range entries {
|
||||
baseEntry := dissector.Summarize(entry)
|
||||
baseEntries = append(baseEntries, baseEntry)
|
||||
}
|
||||
|
||||
pathExpect := path.Join(expectDirSummarize, filepath.Base(_path))
|
||||
|
||||
marshaled, err := json.Marshal(baseEntries)
|
||||
assert.Nil(t, err)
|
||||
|
||||
if testUpdateEnabled {
|
||||
if len(baseEntries) > 0 {
|
||||
err = os.WriteFile(pathExpect, marshaled, 0644)
|
||||
assert.Nil(t, err)
|
||||
}
|
||||
} else {
|
||||
if _, err := os.Stat(pathExpect); errors.Is(err, os.ErrNotExist) {
|
||||
assert.Len(t, entries, 0)
|
||||
} else {
|
||||
expectedBytes, err := ioutil.ReadFile(pathExpect)
|
||||
assert.Nil(t, err)
|
||||
|
||||
assert.JSONEq(t, string(expectedBytes), string(marshaled))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestRepresent(t *testing.T) {
|
||||
_, testUpdateEnabled := os.LookupEnv(testUpdate)
|
||||
|
||||
@@ -245,7 +304,7 @@ func TestRepresent(t *testing.T) {
|
||||
}
|
||||
|
||||
dissector := NewDissector()
|
||||
paths, err := filepath.Glob(path.Join(expectDirAnalyze, patternDissect))
|
||||
paths, err := filepath.Glob(path.Join(expectDirAnalyze, patternExpect))
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
@@ -13,4 +13,4 @@ test-pull-bin:
|
||||
|
||||
test-pull-expect:
|
||||
@mkdir -p expect
|
||||
@[ "${skipexpect}" ] && echo "Skipping downloading expected JSONs" || gsutil -o 'GSUtil:parallel_process_count=5' -o 'GSUtil:parallel_thread_count=5' -m cp -r gs://static.up9.io/mizu/test-pcap/expect/kafka/\* expect
|
||||
@[ "${skipexpect}" ] && echo "Skipping downloading expected JSONs" || gsutil -o 'GSUtil:parallel_process_count=5' -o 'GSUtil:parallel_thread_count=5' -m cp -r gs://static.up9.io/mizu/test-pcap/expect3/kafka/\* expect
|
||||
|
||||
@@ -61,83 +61,7 @@ func (d dissecting) Dissect(b *bufio.Reader, isClient bool, tcpID *api.TcpID, co
|
||||
func (d dissecting) Analyze(item *api.OutputChannelItem, resolvedSource string, resolvedDestination string, namespace string) *api.Entry {
|
||||
request := item.Pair.Request.Payload.(map[string]interface{})
|
||||
reqDetails := request["details"].(map[string]interface{})
|
||||
apiKey := ApiKey(reqDetails["apiKey"].(float64))
|
||||
|
||||
summary := ""
|
||||
switch apiKey {
|
||||
case Metadata:
|
||||
_topics := reqDetails["payload"].(map[string]interface{})["topics"]
|
||||
if _topics == nil {
|
||||
break
|
||||
}
|
||||
topics := _topics.([]interface{})
|
||||
for _, topic := range topics {
|
||||
summary += fmt.Sprintf("%s, ", topic.(map[string]interface{})["name"].(string))
|
||||
}
|
||||
if len(summary) > 0 {
|
||||
summary = summary[:len(summary)-2]
|
||||
}
|
||||
case ApiVersions:
|
||||
summary = reqDetails["clientID"].(string)
|
||||
case Produce:
|
||||
_topics := reqDetails["payload"].(map[string]interface{})["topicData"]
|
||||
if _topics == nil {
|
||||
break
|
||||
}
|
||||
topics := _topics.([]interface{})
|
||||
for _, topic := range topics {
|
||||
summary += fmt.Sprintf("%s, ", topic.(map[string]interface{})["topic"].(string))
|
||||
}
|
||||
if len(summary) > 0 {
|
||||
summary = summary[:len(summary)-2]
|
||||
}
|
||||
case Fetch:
|
||||
_topics := reqDetails["payload"].(map[string]interface{})["topics"]
|
||||
if _topics == nil {
|
||||
break
|
||||
}
|
||||
topics := _topics.([]interface{})
|
||||
for _, topic := range topics {
|
||||
summary += fmt.Sprintf("%s, ", topic.(map[string]interface{})["topic"].(string))
|
||||
}
|
||||
if len(summary) > 0 {
|
||||
summary = summary[:len(summary)-2]
|
||||
}
|
||||
case ListOffsets:
|
||||
_topics := reqDetails["payload"].(map[string]interface{})["topics"]
|
||||
if _topics == nil {
|
||||
break
|
||||
}
|
||||
topics := _topics.([]interface{})
|
||||
for _, topic := range topics {
|
||||
summary += fmt.Sprintf("%s, ", topic.(map[string]interface{})["name"].(string))
|
||||
}
|
||||
if len(summary) > 0 {
|
||||
summary = summary[:len(summary)-2]
|
||||
}
|
||||
case CreateTopics:
|
||||
_topics := reqDetails["payload"].(map[string]interface{})["topics"]
|
||||
if _topics == nil {
|
||||
break
|
||||
}
|
||||
topics := _topics.([]interface{})
|
||||
for _, topic := range topics {
|
||||
summary += fmt.Sprintf("%s, ", topic.(map[string]interface{})["name"].(string))
|
||||
}
|
||||
if len(summary) > 0 {
|
||||
summary = summary[:len(summary)-2]
|
||||
}
|
||||
case DeleteTopics:
|
||||
if reqDetails["topicNames"] == nil {
|
||||
break
|
||||
}
|
||||
topicNames := reqDetails["topicNames"].([]string)
|
||||
for _, name := range topicNames {
|
||||
summary += fmt.Sprintf("%s, ", name)
|
||||
}
|
||||
}
|
||||
|
||||
request["url"] = summary
|
||||
elapsedTime := item.Pair.Response.CaptureTime.Sub(item.Pair.Request.CaptureTime).Round(time.Millisecond).Milliseconds()
|
||||
if elapsedTime < 0 {
|
||||
elapsedTime = 0
|
||||
@@ -158,13 +82,127 @@ func (d dissecting) Analyze(item *api.OutputChannelItem, resolvedSource string,
|
||||
Outgoing: item.ConnectionInfo.IsOutgoing,
|
||||
Request: reqDetails,
|
||||
Response: item.Pair.Response.Payload.(map[string]interface{})["details"].(map[string]interface{}),
|
||||
Method: apiNames[apiKey],
|
||||
Status: 0,
|
||||
Timestamp: item.Timestamp,
|
||||
StartTime: item.Pair.Request.CaptureTime,
|
||||
ElapsedTime: elapsedTime,
|
||||
Summary: summary,
|
||||
IsOutgoing: item.ConnectionInfo.IsOutgoing,
|
||||
}
|
||||
}
|
||||
|
||||
func (d dissecting) Summarize(entry *api.Entry) *api.BaseEntry {
|
||||
status := 0
|
||||
statusQuery := ""
|
||||
|
||||
apiKey := ApiKey(entry.Request["apiKey"].(float64))
|
||||
method := apiNames[apiKey]
|
||||
methodQuery := fmt.Sprintf("request.apiKey == %d", int(entry.Request["apiKey"].(float64)))
|
||||
|
||||
summary := ""
|
||||
summaryQuery := ""
|
||||
switch apiKey {
|
||||
case Metadata:
|
||||
_topics := entry.Request["payload"].(map[string]interface{})["topics"]
|
||||
if _topics == nil {
|
||||
break
|
||||
}
|
||||
topics := _topics.([]interface{})
|
||||
for i, topic := range topics {
|
||||
summary += fmt.Sprintf("%s, ", topic.(map[string]interface{})["name"].(string))
|
||||
summaryQuery += fmt.Sprintf(`request.payload.topics[%d].name == "%s" and`, i, summary)
|
||||
}
|
||||
if len(summary) > 0 {
|
||||
summary = summary[:len(summary)-2]
|
||||
summaryQuery = summaryQuery[:len(summaryQuery)-4]
|
||||
}
|
||||
case ApiVersions:
|
||||
summary = entry.Request["clientID"].(string)
|
||||
summaryQuery = fmt.Sprintf(`request.clientID == "%s"`, summary)
|
||||
case Produce:
|
||||
_topics := entry.Request["payload"].(map[string]interface{})["topicData"]
|
||||
if _topics == nil {
|
||||
break
|
||||
}
|
||||
topics := _topics.([]interface{})
|
||||
for i, topic := range topics {
|
||||
summary += fmt.Sprintf("%s, ", topic.(map[string]interface{})["topic"].(string))
|
||||
summaryQuery += fmt.Sprintf(`request.payload.topicData[%d].topic == "%s" and`, i, summary)
|
||||
}
|
||||
if len(summary) > 0 {
|
||||
summary = summary[:len(summary)-2]
|
||||
summaryQuery = summaryQuery[:len(summaryQuery)-4]
|
||||
}
|
||||
case Fetch:
|
||||
_topics := entry.Request["payload"].(map[string]interface{})["topics"]
|
||||
if _topics == nil {
|
||||
break
|
||||
}
|
||||
topics := _topics.([]interface{})
|
||||
for i, topic := range topics {
|
||||
summary += fmt.Sprintf("%s, ", topic.(map[string]interface{})["topic"].(string))
|
||||
summaryQuery += fmt.Sprintf(`request.payload.topics[%d].topic == "%s" and`, i, summary)
|
||||
}
|
||||
if len(summary) > 0 {
|
||||
summary = summary[:len(summary)-2]
|
||||
summaryQuery = summaryQuery[:len(summaryQuery)-4]
|
||||
}
|
||||
case ListOffsets:
|
||||
_topics := entry.Request["payload"].(map[string]interface{})["topics"]
|
||||
if _topics == nil {
|
||||
break
|
||||
}
|
||||
topics := _topics.([]interface{})
|
||||
for i, topic := range topics {
|
||||
summary += fmt.Sprintf("%s, ", topic.(map[string]interface{})["name"].(string))
|
||||
summaryQuery += fmt.Sprintf(`request.payload.topics[%d].name == "%s" and`, i, summary)
|
||||
}
|
||||
if len(summary) > 0 {
|
||||
summary = summary[:len(summary)-2]
|
||||
summaryQuery = summaryQuery[:len(summaryQuery)-4]
|
||||
}
|
||||
case CreateTopics:
|
||||
_topics := entry.Request["payload"].(map[string]interface{})["topics"]
|
||||
if _topics == nil {
|
||||
break
|
||||
}
|
||||
topics := _topics.([]interface{})
|
||||
for i, topic := range topics {
|
||||
summary += fmt.Sprintf("%s, ", topic.(map[string]interface{})["name"].(string))
|
||||
summaryQuery += fmt.Sprintf(`request.payload.topics[%d].name == "%s" and`, i, summary)
|
||||
}
|
||||
if len(summary) > 0 {
|
||||
summary = summary[:len(summary)-2]
|
||||
summaryQuery = summaryQuery[:len(summaryQuery)-4]
|
||||
}
|
||||
case DeleteTopics:
|
||||
if entry.Request["topicNames"] == nil {
|
||||
break
|
||||
}
|
||||
topicNames := entry.Request["topicNames"].([]string)
|
||||
for i, name := range topicNames {
|
||||
summary += fmt.Sprintf("%s, ", name)
|
||||
summaryQuery += fmt.Sprintf(`request.topicNames[%d] == "%s" and`, i, summary)
|
||||
}
|
||||
if len(summary) > 0 {
|
||||
summary = summary[:len(summary)-2]
|
||||
summaryQuery = summaryQuery[:len(summaryQuery)-4]
|
||||
}
|
||||
}
|
||||
|
||||
return &api.BaseEntry{
|
||||
Id: entry.Id,
|
||||
Protocol: entry.Protocol,
|
||||
Summary: summary,
|
||||
SummaryQuery: summaryQuery,
|
||||
Status: status,
|
||||
StatusQuery: statusQuery,
|
||||
Method: method,
|
||||
MethodQuery: methodQuery,
|
||||
Timestamp: entry.Timestamp,
|
||||
Source: entry.Source,
|
||||
Destination: entry.Destination,
|
||||
IsOutgoing: entry.Outgoing,
|
||||
Latency: entry.ElapsedTime,
|
||||
Rules: entry.Rules,
|
||||
ContractStatus: entry.ContractStatus,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -21,14 +21,16 @@ import (
|
||||
const (
|
||||
binDir = "bin"
|
||||
patternBin = "*_req.bin"
|
||||
patternDissect = "*.json"
|
||||
patternExpect = "*.json"
|
||||
msgDissecting = "Dissecting:"
|
||||
msgAnalyzing = "Analyzing:"
|
||||
msgSummarizing = "Summarizing:"
|
||||
msgRepresenting = "Representing:"
|
||||
respSuffix = "_res.bin"
|
||||
expectDir = "expect"
|
||||
dissectDir = "dissect"
|
||||
analyzeDir = "analyze"
|
||||
summarizeDir = "summarize"
|
||||
representDir = "represent"
|
||||
testUpdate = "TEST_UPDATE"
|
||||
)
|
||||
@@ -187,7 +189,7 @@ func TestAnalyze(t *testing.T) {
|
||||
}
|
||||
|
||||
dissector := NewDissector()
|
||||
paths, err := filepath.Glob(path.Join(expectDirDissect, patternDissect))
|
||||
paths, err := filepath.Glob(path.Join(expectDirDissect, patternExpect))
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
@@ -231,6 +233,63 @@ func TestAnalyze(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestSummarize(t *testing.T) {
|
||||
_, testUpdateEnabled := os.LookupEnv(testUpdate)
|
||||
|
||||
expectDirAnalyze := path.Join(expectDir, analyzeDir)
|
||||
expectDirSummarize := path.Join(expectDir, summarizeDir)
|
||||
|
||||
if testUpdateEnabled {
|
||||
os.RemoveAll(expectDirSummarize)
|
||||
err := os.MkdirAll(expectDirSummarize, 0775)
|
||||
assert.Nil(t, err)
|
||||
}
|
||||
|
||||
dissector := NewDissector()
|
||||
paths, err := filepath.Glob(path.Join(expectDirAnalyze, patternExpect))
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
for _, _path := range paths {
|
||||
fmt.Printf("%s %s\n", msgSummarizing, _path)
|
||||
|
||||
bytes, err := ioutil.ReadFile(_path)
|
||||
assert.Nil(t, err)
|
||||
|
||||
var entries []*api.Entry
|
||||
err = json.Unmarshal(bytes, &entries)
|
||||
assert.Nil(t, err)
|
||||
|
||||
var baseEntries []*api.BaseEntry
|
||||
for _, entry := range entries {
|
||||
baseEntry := dissector.Summarize(entry)
|
||||
baseEntries = append(baseEntries, baseEntry)
|
||||
}
|
||||
|
||||
pathExpect := path.Join(expectDirSummarize, filepath.Base(_path))
|
||||
|
||||
marshaled, err := json.Marshal(baseEntries)
|
||||
assert.Nil(t, err)
|
||||
|
||||
if testUpdateEnabled {
|
||||
if len(baseEntries) > 0 {
|
||||
err = os.WriteFile(pathExpect, marshaled, 0644)
|
||||
assert.Nil(t, err)
|
||||
}
|
||||
} else {
|
||||
if _, err := os.Stat(pathExpect); errors.Is(err, os.ErrNotExist) {
|
||||
assert.Len(t, entries, 0)
|
||||
} else {
|
||||
expectedBytes, err := ioutil.ReadFile(pathExpect)
|
||||
assert.Nil(t, err)
|
||||
|
||||
assert.JSONEq(t, string(expectedBytes), string(marshaled))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestRepresent(t *testing.T) {
|
||||
_, testUpdateEnabled := os.LookupEnv(testUpdate)
|
||||
|
||||
@@ -244,7 +303,7 @@ func TestRepresent(t *testing.T) {
|
||||
}
|
||||
|
||||
dissector := NewDissector()
|
||||
paths, err := filepath.Glob(path.Join(expectDirAnalyze, patternDissect))
|
||||
paths, err := filepath.Glob(path.Join(expectDirAnalyze, patternExpect))
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
@@ -13,4 +13,4 @@ test-pull-bin:
|
||||
|
||||
test-pull-expect:
|
||||
@mkdir -p expect
|
||||
@[ "${skipexpect}" ] && echo "Skipping downloading expected JSONs" || gsutil -o 'GSUtil:parallel_process_count=5' -o 'GSUtil:parallel_thread_count=5' -m cp -r gs://static.up9.io/mizu/test-pcap/expect/redis/\* expect
|
||||
@[ "${skipexpect}" ] && echo "Skipping downloading expected JSONs" || gsutil -o 'GSUtil:parallel_process_count=5' -o 'GSUtil:parallel_thread_count=5' -m cp -r gs://static.up9.io/mizu/test-pcap/expect3/redis/\* expect
|
||||
|
||||
@@ -65,17 +65,6 @@ func (d dissecting) Analyze(item *api.OutputChannelItem, resolvedSource string,
|
||||
reqDetails := request["details"].(map[string]interface{})
|
||||
resDetails := response["details"].(map[string]interface{})
|
||||
|
||||
method := ""
|
||||
if reqDetails["command"] != nil {
|
||||
method = reqDetails["command"].(string)
|
||||
}
|
||||
|
||||
summary := ""
|
||||
if reqDetails["key"] != nil {
|
||||
summary = reqDetails["key"].(string)
|
||||
}
|
||||
|
||||
request["url"] = summary
|
||||
elapsedTime := item.Pair.Response.CaptureTime.Sub(item.Pair.Request.CaptureTime).Round(time.Millisecond).Milliseconds()
|
||||
if elapsedTime < 0 {
|
||||
elapsedTime = 0
|
||||
@@ -96,17 +85,50 @@ func (d dissecting) Analyze(item *api.OutputChannelItem, resolvedSource string,
|
||||
Outgoing: item.ConnectionInfo.IsOutgoing,
|
||||
Request: reqDetails,
|
||||
Response: resDetails,
|
||||
Method: method,
|
||||
Status: 0,
|
||||
Timestamp: item.Timestamp,
|
||||
StartTime: item.Pair.Request.CaptureTime,
|
||||
ElapsedTime: elapsedTime,
|
||||
Summary: summary,
|
||||
IsOutgoing: item.ConnectionInfo.IsOutgoing,
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func (d dissecting) Summarize(entry *api.Entry) *api.BaseEntry {
|
||||
status := 0
|
||||
statusQuery := ""
|
||||
|
||||
method := ""
|
||||
methodQuery := ""
|
||||
if entry.Request["command"] != nil {
|
||||
method = entry.Request["command"].(string)
|
||||
methodQuery = fmt.Sprintf(`request.command == "%s"`, method)
|
||||
}
|
||||
|
||||
summary := ""
|
||||
summaryQuery := ""
|
||||
if entry.Request["key"] != nil {
|
||||
summary = entry.Request["key"].(string)
|
||||
summaryQuery = fmt.Sprintf(`request.key == "%s"`, summary)
|
||||
}
|
||||
|
||||
return &api.BaseEntry{
|
||||
Id: entry.Id,
|
||||
Protocol: entry.Protocol,
|
||||
Summary: summary,
|
||||
SummaryQuery: summaryQuery,
|
||||
Status: status,
|
||||
StatusQuery: statusQuery,
|
||||
Method: method,
|
||||
MethodQuery: methodQuery,
|
||||
Timestamp: entry.Timestamp,
|
||||
Source: entry.Source,
|
||||
Destination: entry.Destination,
|
||||
IsOutgoing: entry.Outgoing,
|
||||
Latency: entry.ElapsedTime,
|
||||
Rules: entry.Rules,
|
||||
ContractStatus: entry.ContractStatus,
|
||||
}
|
||||
}
|
||||
|
||||
func (d dissecting) Represent(request map[string]interface{}, response map[string]interface{}) (object []byte, bodySize int64, err error) {
|
||||
bodySize = 0
|
||||
representation := make(map[string]interface{})
|
||||
|
||||
@@ -22,14 +22,16 @@ import (
|
||||
const (
|
||||
binDir = "bin"
|
||||
patternBin = "*_req.bin"
|
||||
patternDissect = "*.json"
|
||||
patternExpect = "*.json"
|
||||
msgDissecting = "Dissecting:"
|
||||
msgAnalyzing = "Analyzing:"
|
||||
msgSummarizing = "Summarizing:"
|
||||
msgRepresenting = "Representing:"
|
||||
respSuffix = "_res.bin"
|
||||
expectDir = "expect"
|
||||
dissectDir = "dissect"
|
||||
analyzeDir = "analyze"
|
||||
summarizeDir = "summarize"
|
||||
representDir = "represent"
|
||||
testUpdate = "TEST_UPDATE"
|
||||
)
|
||||
@@ -187,7 +189,7 @@ func TestAnalyze(t *testing.T) {
|
||||
}
|
||||
|
||||
dissector := NewDissector()
|
||||
paths, err := filepath.Glob(path.Join(expectDirDissect, patternDissect))
|
||||
paths, err := filepath.Glob(path.Join(expectDirDissect, patternExpect))
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
@@ -231,6 +233,63 @@ func TestAnalyze(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestSummarize(t *testing.T) {
|
||||
_, testUpdateEnabled := os.LookupEnv(testUpdate)
|
||||
|
||||
expectDirAnalyze := path.Join(expectDir, analyzeDir)
|
||||
expectDirSummarize := path.Join(expectDir, summarizeDir)
|
||||
|
||||
if testUpdateEnabled {
|
||||
os.RemoveAll(expectDirSummarize)
|
||||
err := os.MkdirAll(expectDirSummarize, 0775)
|
||||
assert.Nil(t, err)
|
||||
}
|
||||
|
||||
dissector := NewDissector()
|
||||
paths, err := filepath.Glob(path.Join(expectDirAnalyze, patternExpect))
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
for _, _path := range paths {
|
||||
fmt.Printf("%s %s\n", msgSummarizing, _path)
|
||||
|
||||
bytes, err := ioutil.ReadFile(_path)
|
||||
assert.Nil(t, err)
|
||||
|
||||
var entries []*api.Entry
|
||||
err = json.Unmarshal(bytes, &entries)
|
||||
assert.Nil(t, err)
|
||||
|
||||
var baseEntries []*api.BaseEntry
|
||||
for _, entry := range entries {
|
||||
baseEntry := dissector.Summarize(entry)
|
||||
baseEntries = append(baseEntries, baseEntry)
|
||||
}
|
||||
|
||||
pathExpect := path.Join(expectDirSummarize, filepath.Base(_path))
|
||||
|
||||
marshaled, err := json.Marshal(baseEntries)
|
||||
assert.Nil(t, err)
|
||||
|
||||
if testUpdateEnabled {
|
||||
if len(baseEntries) > 0 {
|
||||
err = os.WriteFile(pathExpect, marshaled, 0644)
|
||||
assert.Nil(t, err)
|
||||
}
|
||||
} else {
|
||||
if _, err := os.Stat(pathExpect); errors.Is(err, os.ErrNotExist) {
|
||||
assert.Len(t, entries, 0)
|
||||
} else {
|
||||
expectedBytes, err := ioutil.ReadFile(pathExpect)
|
||||
assert.Nil(t, err)
|
||||
|
||||
assert.JSONEq(t, string(expectedBytes), string(marshaled))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestRepresent(t *testing.T) {
|
||||
_, testUpdateEnabled := os.LookupEnv(testUpdate)
|
||||
|
||||
@@ -244,7 +303,7 @@ func TestRepresent(t *testing.T) {
|
||||
}
|
||||
|
||||
dissector := NewDissector()
|
||||
paths, err := filepath.Glob(path.Join(expectDirAnalyze, patternDissect))
|
||||
paths, err := filepath.Glob(path.Join(expectDirAnalyze, patternExpect))
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
@@ -52,7 +52,6 @@ var snaplen = flag.Int("s", 65536, "Snap length (number of bytes max to read per
|
||||
var tstype = flag.String("timestamp_type", "", "Type of timestamps to use")
|
||||
var promisc = flag.Bool("promisc", true, "Set promiscuous mode")
|
||||
var staleTimeoutSeconds = flag.Int("staletimout", 120, "Max time in seconds to keep connections which don't transmit data")
|
||||
var pids = flag.String("pids", "", "A comma separated list of PIDs to capture their network namespaces")
|
||||
var servicemesh = flag.Bool("servicemesh", false, "Record decrypted traffic if the cluster is configured with a service mesh and with mtls")
|
||||
var tls = flag.Bool("tls", false, "Enable TLS tapper")
|
||||
|
||||
@@ -190,7 +189,7 @@ func initializePacketSources() error {
|
||||
}
|
||||
|
||||
var err error
|
||||
if packetSourceManager, err = source.NewPacketSourceManager(*procfs, *pids, *fname, *iface, *servicemesh, tapTargets, behaviour); err != nil {
|
||||
if packetSourceManager, err = source.NewPacketSourceManager(*procfs, *fname, *iface, *servicemesh, tapTargets, behaviour); err != nil {
|
||||
return err
|
||||
} else {
|
||||
packetSourceManager.ReadPackets(!*nodefrag, mainPacketInputChan)
|
||||
@@ -248,7 +247,7 @@ func startTlsTapper(extension *api.Extension, outputItems chan *api.OutputChanne
|
||||
tls := tlstapper.TlsTapper{}
|
||||
tlsPerfBufferSize := os.Getpagesize() * 100
|
||||
|
||||
if err := tls.Init(tlsPerfBufferSize); err != nil {
|
||||
if err := tls.Init(tlsPerfBufferSize, *procfs, extension); err != nil {
|
||||
tlstapper.LogError(err)
|
||||
return
|
||||
}
|
||||
@@ -272,6 +271,5 @@ func startTlsTapper(extension *api.Extension, outputItems chan *api.OutputChanne
|
||||
OutputChannel: outputItems,
|
||||
}
|
||||
|
||||
poller := tlstapper.NewTlsPoller(&tls, extension)
|
||||
go poller.Poll(extension, emitter, options)
|
||||
go tls.Poll(emitter, options)
|
||||
}
|
||||
|
||||
83
tap/source/netns_packet_source.go
Normal file
83
tap/source/netns_packet_source.go
Normal file
@@ -0,0 +1,83 @@
|
||||
package source
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"runtime"
|
||||
|
||||
"github.com/up9inc/mizu/shared/logger"
|
||||
"github.com/vishvananda/netns"
|
||||
)
|
||||
|
||||
func newNetnsPacketSource(procfs string, pid string,
|
||||
interfaceName string, behaviour TcpPacketSourceBehaviour) (*tcpPacketSource, error) {
|
||||
nsh, err := netns.GetFromPath(fmt.Sprintf("%s/%s/ns/net", procfs, pid))
|
||||
|
||||
if err != nil {
|
||||
logger.Log.Errorf("Unable to get netns of pid %s - %w", pid, err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
src, err := newPacketSourceFromNetnsHandle(pid, nsh, interfaceName, behaviour)
|
||||
|
||||
if err != nil {
|
||||
logger.Log.Errorf("Error starting netns packet source for %s - %w", pid, err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return src, nil
|
||||
}
|
||||
|
||||
func newPacketSourceFromNetnsHandle(pid string, nsh netns.NsHandle, interfaceName string,
|
||||
behaviour TcpPacketSourceBehaviour) (*tcpPacketSource, error) {
|
||||
|
||||
done := make(chan *tcpPacketSource)
|
||||
errors := make(chan error)
|
||||
|
||||
go func(done chan<- *tcpPacketSource) {
|
||||
// Setting a netns should be done from a dedicated OS thread.
|
||||
//
|
||||
// goroutines are not really OS threads, we try to mimic the issue by
|
||||
// locking the OS thread to this goroutine
|
||||
//
|
||||
runtime.LockOSThread()
|
||||
defer runtime.UnlockOSThread()
|
||||
|
||||
oldnetns, err := netns.Get()
|
||||
|
||||
if err != nil {
|
||||
logger.Log.Errorf("Unable to get netns of current thread %w", err)
|
||||
errors <- err
|
||||
return
|
||||
}
|
||||
|
||||
if err := netns.Set(nsh); err != nil {
|
||||
logger.Log.Errorf("Unable to set netns of pid %s - %w", pid, err)
|
||||
errors <- err
|
||||
return
|
||||
}
|
||||
|
||||
name := fmt.Sprintf("netns-%s-%s", pid, interfaceName)
|
||||
src, err := newTcpPacketSource(name, "", interfaceName, behaviour)
|
||||
|
||||
if err != nil {
|
||||
logger.Log.Errorf("Error listening to PID %s - %w", pid, err)
|
||||
errors <- err
|
||||
return
|
||||
}
|
||||
|
||||
if err := netns.Set(oldnetns); err != nil {
|
||||
logger.Log.Errorf("Unable to set back netns of current thread %w", err)
|
||||
errors <- err
|
||||
return
|
||||
}
|
||||
|
||||
done <- src
|
||||
}(done)
|
||||
|
||||
select {
|
||||
case err := <-errors:
|
||||
return nil, err
|
||||
case source := <-done:
|
||||
return source, nil
|
||||
}
|
||||
}
|
||||
@@ -2,109 +2,46 @@ package source
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/up9inc/mizu/shared/logger"
|
||||
"github.com/vishvananda/netns"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
const bpfFilterMaxPods = 150
|
||||
const hostSourcePid = "0"
|
||||
|
||||
type PacketSourceManager struct {
|
||||
sources []*tcpPacketSource
|
||||
sources map[string]*tcpPacketSource
|
||||
}
|
||||
|
||||
func NewPacketSourceManager(procfs string, pids string, filename string, interfaceName string,
|
||||
func NewPacketSourceManager(procfs string, filename string, interfaceName string,
|
||||
mtls bool, pods []v1.Pod, behaviour TcpPacketSourceBehaviour) (*PacketSourceManager, error) {
|
||||
sources := make([]*tcpPacketSource, 0)
|
||||
sources, err := createHostSource(sources, filename, interfaceName, behaviour)
|
||||
|
||||
hostSource, err := newHostPacketSource(filename, interfaceName, behaviour)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
sources = createSourcesFromPids(sources, procfs, pids, interfaceName, behaviour)
|
||||
sources = createSourcesFromEnvoy(sources, mtls, procfs, pods, interfaceName, behaviour)
|
||||
sources = createSourcesFromLinkerd(sources, mtls, procfs, pods, interfaceName, behaviour)
|
||||
|
||||
return &PacketSourceManager{
|
||||
sources: sources,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func createHostSource(sources []*tcpPacketSource, filename string, interfaceName string,
|
||||
behaviour TcpPacketSourceBehaviour) ([]*tcpPacketSource, error) {
|
||||
hostSource, err := newHostPacketSource(filename, interfaceName, behaviour)
|
||||
|
||||
if err != nil {
|
||||
return sources, err
|
||||
sourceManager := &PacketSourceManager{
|
||||
sources: map[string]*tcpPacketSource{
|
||||
hostSourcePid: hostSource,
|
||||
},
|
||||
}
|
||||
|
||||
return append(sources, hostSource), nil
|
||||
}
|
||||
|
||||
func createSourcesFromPids(sources []*tcpPacketSource, procfs string, pids string,
|
||||
interfaceName string, behaviour TcpPacketSourceBehaviour) []*tcpPacketSource {
|
||||
if pids == "" {
|
||||
return sources
|
||||
}
|
||||
|
||||
netnsSources := newNetnsPacketSources(procfs, strings.Split(pids, ","), interfaceName, behaviour)
|
||||
sources = append(sources, netnsSources...)
|
||||
return sources
|
||||
}
|
||||
|
||||
func createSourcesFromEnvoy(sources []*tcpPacketSource, mtls bool, procfs string, pods []v1.Pod,
|
||||
interfaceName string, behaviour TcpPacketSourceBehaviour) []*tcpPacketSource {
|
||||
if !mtls {
|
||||
return sources
|
||||
}
|
||||
|
||||
envoyPids, err := discoverRelevantEnvoyPids(procfs, pods)
|
||||
|
||||
if err != nil {
|
||||
logger.Log.Warningf("Unable to discover envoy pids - %v", err)
|
||||
return sources
|
||||
}
|
||||
|
||||
netnsSources := newNetnsPacketSources(procfs, envoyPids, interfaceName, behaviour)
|
||||
sources = append(sources, netnsSources...)
|
||||
|
||||
return sources
|
||||
}
|
||||
|
||||
func createSourcesFromLinkerd(sources []*tcpPacketSource, mtls bool, procfs string, pods []v1.Pod,
|
||||
interfaceName string, behaviour TcpPacketSourceBehaviour) []*tcpPacketSource {
|
||||
if !mtls {
|
||||
return sources
|
||||
}
|
||||
|
||||
linkerdPids, err := discoverRelevantLinkerdPids(procfs, pods)
|
||||
|
||||
if err != nil {
|
||||
logger.Log.Warningf("Unable to discover linkerd pids - %v", err)
|
||||
return sources
|
||||
}
|
||||
|
||||
netnsSources := newNetnsPacketSources(procfs, linkerdPids, interfaceName, behaviour)
|
||||
sources = append(sources, netnsSources...)
|
||||
|
||||
return sources
|
||||
sourceManager.UpdatePods(mtls, procfs, pods, interfaceName, behaviour)
|
||||
return sourceManager, nil
|
||||
}
|
||||
|
||||
func newHostPacketSource(filename string, interfaceName string,
|
||||
behaviour TcpPacketSourceBehaviour) (*tcpPacketSource, error) {
|
||||
var name string
|
||||
|
||||
if filename == "" {
|
||||
name = fmt.Sprintf("host-%v", interfaceName)
|
||||
name = fmt.Sprintf("host-%s", interfaceName)
|
||||
} else {
|
||||
name = fmt.Sprintf("file-%v", filename)
|
||||
name = fmt.Sprintf("file-%s", filename)
|
||||
}
|
||||
|
||||
source, err := newTcpPacketSource(name, filename, interfaceName, behaviour)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -112,90 +49,93 @@ func newHostPacketSource(filename string, interfaceName string,
|
||||
return source, nil
|
||||
}
|
||||
|
||||
func newNetnsPacketSources(procfs string, pids []string, interfaceName string,
|
||||
behaviour TcpPacketSourceBehaviour) []*tcpPacketSource {
|
||||
result := make([]*tcpPacketSource, 0)
|
||||
|
||||
for _, pidstr := range pids {
|
||||
pid, err := strconv.Atoi(pidstr)
|
||||
|
||||
if err != nil {
|
||||
logger.Log.Errorf("Invalid PID: %v - %v", pid, err)
|
||||
continue
|
||||
}
|
||||
|
||||
nsh, err := netns.GetFromPath(fmt.Sprintf("%v/%v/ns/net", procfs, pid))
|
||||
|
||||
if err != nil {
|
||||
logger.Log.Errorf("Unable to get netns of pid %v - %v", pid, err)
|
||||
continue
|
||||
}
|
||||
|
||||
src, err := newNetnsPacketSource(pid, nsh, interfaceName, behaviour)
|
||||
|
||||
if err != nil {
|
||||
logger.Log.Errorf("Error starting netns packet source for %v - %v", pid, err)
|
||||
continue
|
||||
}
|
||||
|
||||
result = append(result, src)
|
||||
func (m *PacketSourceManager) UpdatePods(mtls bool, procfs string, pods []v1.Pod,
|
||||
interfaceName string, behaviour TcpPacketSourceBehaviour) {
|
||||
if mtls {
|
||||
m.updateMtlsPods(procfs, pods, interfaceName, behaviour)
|
||||
}
|
||||
|
||||
return result
|
||||
m.setBPFFilter(pods)
|
||||
}
|
||||
|
||||
func newNetnsPacketSource(pid int, nsh netns.NsHandle, interfaceName string,
|
||||
behaviour TcpPacketSourceBehaviour) (*tcpPacketSource, error) {
|
||||
func (m *PacketSourceManager) updateMtlsPods(procfs string, pods []v1.Pod,
|
||||
interfaceName string, behaviour TcpPacketSourceBehaviour) {
|
||||
|
||||
done := make(chan *tcpPacketSource)
|
||||
errors := make(chan error)
|
||||
relevantPids := m.getRelevantPids(procfs, pods)
|
||||
logger.Log.Infof("Updating mtls pods (new: %v) (current: %v)", relevantPids, m.sources)
|
||||
|
||||
go func(done chan<- *tcpPacketSource) {
|
||||
// Setting a netns should be done from a dedicated OS thread.
|
||||
//
|
||||
// goroutines are not really OS threads, we try to mimic the issue by
|
||||
// locking the OS thread to this goroutine
|
||||
//
|
||||
runtime.LockOSThread()
|
||||
defer runtime.UnlockOSThread()
|
||||
|
||||
oldnetns, err := netns.Get()
|
||||
|
||||
if err != nil {
|
||||
logger.Log.Errorf("Unable to get netns of current thread %v", err)
|
||||
errors <- err
|
||||
return
|
||||
for pid, src := range m.sources {
|
||||
if _, ok := relevantPids[pid]; !ok {
|
||||
src.close()
|
||||
delete(m.sources, pid)
|
||||
}
|
||||
}
|
||||
|
||||
if err := netns.Set(nsh); err != nil {
|
||||
logger.Log.Errorf("Unable to set netns of pid %v - %v", pid, err)
|
||||
errors <- err
|
||||
return
|
||||
for pid := range relevantPids {
|
||||
if _, ok := m.sources[pid]; !ok {
|
||||
source, err := newNetnsPacketSource(procfs, pid, interfaceName, behaviour)
|
||||
|
||||
if err == nil {
|
||||
m.sources[pid] = source
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
name := fmt.Sprintf("netns-%v-%v", pid, interfaceName)
|
||||
src, err := newTcpPacketSource(name, "", interfaceName, behaviour)
|
||||
func (m *PacketSourceManager) getRelevantPids(procfs string, pods []v1.Pod) map[string]bool {
|
||||
relevantPids := make(map[string]bool)
|
||||
relevantPids[hostSourcePid] = true
|
||||
|
||||
if err != nil {
|
||||
logger.Log.Errorf("Error listening to PID %v - %v", pid, err)
|
||||
errors <- err
|
||||
return
|
||||
if envoyPids, err := discoverRelevantEnvoyPids(procfs, pods); err != nil {
|
||||
logger.Log.Warningf("Unable to discover envoy pids - %w", err)
|
||||
} else {
|
||||
for _, pid := range envoyPids {
|
||||
relevantPids[pid] = true
|
||||
}
|
||||
}
|
||||
|
||||
if err := netns.Set(oldnetns); err != nil {
|
||||
logger.Log.Errorf("Unable to set back netns of current thread %v", err)
|
||||
errors <- err
|
||||
return
|
||||
if linkerdPids, err := discoverRelevantLinkerdPids(procfs, pods); err != nil {
|
||||
logger.Log.Warningf("Unable to discover linkerd pids - %w", err)
|
||||
} else {
|
||||
for _, pid := range linkerdPids {
|
||||
relevantPids[pid] = true
|
||||
}
|
||||
}
|
||||
|
||||
done <- src
|
||||
}(done)
|
||||
return relevantPids
|
||||
}
|
||||
|
||||
select {
|
||||
case err := <-errors:
|
||||
return nil, err
|
||||
case source := <-done:
|
||||
return source, nil
|
||||
func buildBPFExpr(pods []v1.Pod) string {
|
||||
hostsFilter := make([]string, 0)
|
||||
|
||||
for _, pod := range pods {
|
||||
hostsFilter = append(hostsFilter, fmt.Sprintf("host %s", pod.Status.PodIP))
|
||||
}
|
||||
|
||||
return fmt.Sprintf("%s and port not 443", strings.Join(hostsFilter, " or "))
|
||||
}
|
||||
|
||||
func (m *PacketSourceManager) setBPFFilter(pods []v1.Pod) {
|
||||
if len(pods) == 0 {
|
||||
logger.Log.Info("No pods provided, skipping pcap bpf filter")
|
||||
return
|
||||
}
|
||||
|
||||
var expr string
|
||||
|
||||
if len(pods) > bpfFilterMaxPods {
|
||||
logger.Log.Info("Too many pods for setting ebpf filter %d, setting just not 443", len(pods))
|
||||
expr = "port not 443"
|
||||
} else {
|
||||
expr = buildBPFExpr(pods)
|
||||
}
|
||||
|
||||
logger.Log.Infof("Setting pcap bpf filter %s", expr)
|
||||
|
||||
for pid, src := range m.sources {
|
||||
if err := src.setBPFFilter(expr); err != nil {
|
||||
logger.Log.Warningf("Error setting bpf filter for %s %v - %w", pid, src, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -98,6 +98,14 @@ func newTcpPacketSource(name, filename string, interfaceName string,
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func (source *tcpPacketSource) String() string {
|
||||
return source.name
|
||||
}
|
||||
|
||||
func (source *tcpPacketSource) setBPFFilter(expr string) (err error) {
|
||||
return source.handle.SetBPFFilter(expr)
|
||||
}
|
||||
|
||||
func (source *tcpPacketSource) close() {
|
||||
if source.handle != nil {
|
||||
source.handle.Close()
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
FROM alpine:3
|
||||
FROM alpine:3.14
|
||||
|
||||
RUN apk --no-cache update && apk --no-cache add clang llvm libbpf-dev go linux-headers
|
||||
|
||||
|
||||
@@ -10,7 +10,7 @@ Copyright (C) UP9 Inc.
|
||||
#define FLAGS_IS_CLIENT_BIT (1 << 0)
|
||||
#define FLAGS_IS_READ_BIT (1 << 1)
|
||||
|
||||
// The same struct can be found in Chunk.go
|
||||
// The same struct can be found in chunk.go
|
||||
//
|
||||
// Be careful when editing, alignment and padding should be exactly the same in go/c.
|
||||
//
|
||||
|
||||
@@ -8,20 +8,20 @@ import (
|
||||
"github.com/go-errors/errors"
|
||||
)
|
||||
|
||||
const FLAGS_IS_CLIENT_BIT int32 = (1 << 0)
|
||||
const FLAGS_IS_READ_BIT int32 = (1 << 1)
|
||||
const FLAGS_IS_CLIENT_BIT uint32 = (1 << 0)
|
||||
const FLAGS_IS_READ_BIT uint32 = (1 << 1)
|
||||
|
||||
// The same struct can be found in maps.h
|
||||
//
|
||||
// Be careful when editing, alignment and padding should be exactly the same in go/c.
|
||||
//
|
||||
type tlsChunk struct {
|
||||
Pid int32
|
||||
Tgid int32
|
||||
Len int32
|
||||
Recorded int32
|
||||
Fd int32
|
||||
Flags int32
|
||||
Pid uint32
|
||||
Tgid uint32
|
||||
Len uint32
|
||||
Recorded uint32
|
||||
Fd uint32
|
||||
Flags uint32
|
||||
Address [16]byte
|
||||
Data [4096]byte
|
||||
}
|
||||
@@ -68,3 +68,7 @@ func (c *tlsChunk) isWrite() bool {
|
||||
func (c *tlsChunk) getRecordedData() []byte {
|
||||
return c.Data[:c.Recorded]
|
||||
}
|
||||
|
||||
func (c *tlsChunk) isRequest() bool {
|
||||
return (c.isClient() && c.isWrite()) || (c.isServer() && c.isRead())
|
||||
}
|
||||
|
||||
102
tap/tlstapper/sockfd_info.go
Normal file
102
tap/tlstapper/sockfd_info.go
Normal file
@@ -0,0 +1,102 @@
|
||||
package tlstapper
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"os"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/go-errors/errors"
|
||||
)
|
||||
|
||||
var socketInodeRegex = regexp.MustCompile(`socket:\[(\d+)\]`)
|
||||
|
||||
const (
|
||||
SRC_ADDRESS_FILED_INDEX = 1
|
||||
DST_ADDRESS_FILED_INDEX = 2
|
||||
INODE_FILED_INDEX = 9
|
||||
)
|
||||
|
||||
// This file helps to extract Ip and Port out of a Socket file descriptor.
|
||||
//
|
||||
// The equivalent bash commands are:
|
||||
//
|
||||
// > ls -l /proc/<pid>/fd/<fd>
|
||||
// Output something like "socket:[1234]" for sockets - 1234 is the inode of the socket
|
||||
// > cat /proc/<pid>/net/tcp | grep <inode>
|
||||
// Output a line per ipv4 socket, the 9th field is the inode of the socket
|
||||
// The 1st and 2nd fields are the source and dest ip and ports in a Hex format
|
||||
// 0100007F:50 is 127.0.0.1:80
|
||||
|
||||
func getAddressBySockfd(procfs string, pid uint32, fd uint32, src bool) (net.IP, uint16, error) {
|
||||
inode, err := getSocketInode(procfs, pid, fd)
|
||||
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
tcppath := fmt.Sprintf("%s/%d/net/tcp", procfs, pid)
|
||||
tcp, err := ioutil.ReadFile(tcppath)
|
||||
|
||||
if err != nil {
|
||||
return nil, 0, errors.Wrap(err, 0)
|
||||
}
|
||||
|
||||
for _, line := range strings.Split(string(tcp), "\n") {
|
||||
parts := strings.Fields(line)
|
||||
|
||||
if len(parts) < 10 {
|
||||
continue
|
||||
}
|
||||
|
||||
if inode == parts[INODE_FILED_INDEX] {
|
||||
if src {
|
||||
return parseHexAddress(parts[SRC_ADDRESS_FILED_INDEX])
|
||||
} else {
|
||||
return parseHexAddress(parts[DST_ADDRESS_FILED_INDEX])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil, 0, errors.Errorf("address not found [pid: %d] [sockfd: %d] [inode: %s]", pid, fd, inode)
|
||||
}
|
||||
|
||||
func getSocketInode(procfs string, pid uint32, fd uint32) (string, error) {
|
||||
fdlinkPath := fmt.Sprintf("%s/%d/fd/%d", procfs, pid, fd)
|
||||
fdlink, err := os.Readlink(fdlinkPath)
|
||||
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, 0)
|
||||
}
|
||||
|
||||
tokens := socketInodeRegex.FindStringSubmatch(fdlink)
|
||||
|
||||
if tokens == nil || len(tokens) < 1 {
|
||||
return "", errors.Errorf("socket inode not found [pid: %d] [sockfd: %d] [link: %s]", pid, fd, fdlink)
|
||||
}
|
||||
|
||||
return tokens[1], nil
|
||||
}
|
||||
|
||||
// Format looks like 0100007F:50 for 127.0.0.1:80
|
||||
//
|
||||
func parseHexAddress(addr string) (net.IP, uint16, error) {
|
||||
addrParts := strings.Split(addr, ":")
|
||||
|
||||
port, err := strconv.ParseUint(addrParts[1], 16, 16)
|
||||
|
||||
if err != nil {
|
||||
return nil, 0, errors.Wrap(err, 0)
|
||||
}
|
||||
|
||||
ip, err := strconv.ParseUint(addrParts[0], 16, 32)
|
||||
|
||||
if err != nil {
|
||||
return nil, 0, errors.Wrap(err, 0)
|
||||
}
|
||||
|
||||
return net.IP{uint8(ip), uint8(ip >> 8), uint8(ip >> 16), uint8(ip >> 24)}, uint16(port), nil
|
||||
}
|
||||
@@ -2,7 +2,6 @@ package tlstapper
|
||||
|
||||
import (
|
||||
"debug/elf"
|
||||
"fmt"
|
||||
|
||||
"github.com/go-errors/errors"
|
||||
"github.com/up9inc/mizu/shared/logger"
|
||||
@@ -47,7 +46,7 @@ func findBaseAddress(sslElf *elf.File, sslLibraryPath string) (uint64, error) {
|
||||
}
|
||||
}
|
||||
|
||||
return 0, errors.New(fmt.Sprintf("Program header not found in %v", sslLibraryPath))
|
||||
return 0, errors.Errorf("Program header not found in %v", sslLibraryPath)
|
||||
}
|
||||
|
||||
func findSslOffsets(sslElf *elf.File, base uint64) (sslOffsets, error) {
|
||||
|
||||
@@ -2,43 +2,64 @@ package tlstapper
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"fmt"
|
||||
"net"
|
||||
|
||||
"encoding/binary"
|
||||
"encoding/hex"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/cilium/ebpf/perf"
|
||||
"github.com/go-errors/errors"
|
||||
"github.com/up9inc/mizu/shared/logger"
|
||||
"github.com/up9inc/mizu/tap/api"
|
||||
)
|
||||
|
||||
const UNKNOWN_PORT uint16 = 80
|
||||
const UNKNOWN_HOST string = "127.0.0.1"
|
||||
|
||||
type tlsPoller struct {
|
||||
tls *TlsTapper
|
||||
readers map[string]*tlsReader
|
||||
closedReaders chan string
|
||||
reqResMatcher api.RequestResponseMatcher
|
||||
chunksReader *perf.Reader
|
||||
extension *api.Extension
|
||||
procfs string
|
||||
}
|
||||
|
||||
func NewTlsPoller(tls *TlsTapper, extension *api.Extension) *tlsPoller {
|
||||
func newTlsPoller(tls *TlsTapper, extension *api.Extension, procfs string) *tlsPoller {
|
||||
return &tlsPoller{
|
||||
tls: tls,
|
||||
readers: make(map[string]*tlsReader),
|
||||
closedReaders: make(chan string, 100),
|
||||
reqResMatcher: extension.Dissector.NewResponseRequestMatcher(),
|
||||
extension: extension,
|
||||
chunksReader: nil,
|
||||
procfs: procfs,
|
||||
}
|
||||
}
|
||||
|
||||
func (p *tlsPoller) Poll(extension *api.Extension,
|
||||
emitter api.Emitter, options *api.TrafficFilteringOptions) {
|
||||
func (p *tlsPoller) init(bpfObjects *tlsTapperObjects, bufferSize int) error {
|
||||
var err error
|
||||
|
||||
p.chunksReader, err = perf.NewReader(bpfObjects.ChunksBuffer, bufferSize)
|
||||
|
||||
if err != nil {
|
||||
return errors.Wrap(err, 0)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *tlsPoller) close() error {
|
||||
return p.chunksReader.Close()
|
||||
}
|
||||
|
||||
func (p *tlsPoller) poll(emitter api.Emitter, options *api.TrafficFilteringOptions) {
|
||||
chunks := make(chan *tlsChunk)
|
||||
|
||||
go p.tls.pollPerf(chunks)
|
||||
go p.pollChunksPerfBuffer(chunks)
|
||||
|
||||
for {
|
||||
select {
|
||||
@@ -47,7 +68,7 @@ func (p *tlsPoller) Poll(extension *api.Extension,
|
||||
return
|
||||
}
|
||||
|
||||
if err := p.handleTlsChunk(chunk, extension, emitter, options); err != nil {
|
||||
if err := p.handleTlsChunk(chunk, p.extension, emitter, options); err != nil {
|
||||
LogError(err)
|
||||
}
|
||||
case key := <-p.closedReaders:
|
||||
@@ -56,6 +77,41 @@ func (p *tlsPoller) Poll(extension *api.Extension,
|
||||
}
|
||||
}
|
||||
|
||||
func (p *tlsPoller) pollChunksPerfBuffer(chunks chan<- *tlsChunk) {
|
||||
logger.Log.Infof("Start polling for tls events")
|
||||
|
||||
for {
|
||||
record, err := p.chunksReader.Read()
|
||||
|
||||
if err != nil {
|
||||
close(chunks)
|
||||
|
||||
if errors.Is(err, perf.ErrClosed) {
|
||||
return
|
||||
}
|
||||
|
||||
LogError(errors.Errorf("Error reading chunks from tls perf, aborting TLS! %v", err))
|
||||
return
|
||||
}
|
||||
|
||||
if record.LostSamples != 0 {
|
||||
logger.Log.Infof("Buffer is full, dropped %d chunks", record.LostSamples)
|
||||
continue
|
||||
}
|
||||
|
||||
buffer := bytes.NewReader(record.RawSample)
|
||||
|
||||
var chunk tlsChunk
|
||||
|
||||
if err := binary.Read(buffer, binary.LittleEndian, &chunk); err != nil {
|
||||
LogError(errors.Errorf("Error parsing chunk %v", err))
|
||||
continue
|
||||
}
|
||||
|
||||
chunks <- &chunk
|
||||
}
|
||||
}
|
||||
|
||||
func (p *tlsPoller) handleTlsChunk(chunk *tlsChunk, extension *api.Extension,
|
||||
emitter api.Emitter, options *api.TrafficFilteringOptions) error {
|
||||
ip, port, err := chunk.getAddress()
|
||||
@@ -75,7 +131,7 @@ func (p *tlsPoller) handleTlsChunk(chunk *tlsChunk, extension *api.Extension,
|
||||
reader.chunks <- chunk
|
||||
|
||||
if os.Getenv("MIZU_VERBOSE_TLS_TAPPER") == "true" {
|
||||
logTls(chunk, ip, port)
|
||||
p.logTls(chunk, ip, port)
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -92,10 +148,9 @@ func (p *tlsPoller) startNewTlsReader(chunk *tlsChunk, ip net.IP, port uint16, k
|
||||
},
|
||||
}
|
||||
|
||||
isRequest := (chunk.isClient() && chunk.isWrite()) || (chunk.isServer() && chunk.isRead())
|
||||
tcpid := buildTcpId(isRequest, ip, port)
|
||||
tcpid := p.buildTcpId(chunk, ip, port)
|
||||
|
||||
go dissect(extension, reader, isRequest, &tcpid, emitter, options, p.reqResMatcher)
|
||||
go dissect(extension, reader, chunk.isRequest(), &tcpid, emitter, options, p.reqResMatcher)
|
||||
return reader
|
||||
}
|
||||
|
||||
@@ -120,27 +175,36 @@ func buildTlsKey(chunk *tlsChunk, ip net.IP, port uint16) string {
|
||||
return fmt.Sprintf("%v:%v-%v:%v", chunk.isClient(), chunk.isRead(), ip, port)
|
||||
}
|
||||
|
||||
func buildTcpId(isRequest bool, ip net.IP, port uint16) api.TcpID {
|
||||
if isRequest {
|
||||
func (p *tlsPoller) buildTcpId(chunk *tlsChunk, ip net.IP, port uint16) api.TcpID {
|
||||
myIp, myPort, err := getAddressBySockfd(p.procfs, chunk.Pid, chunk.Fd, chunk.isClient())
|
||||
|
||||
if err != nil {
|
||||
// May happen if the socket already closed, very likely to happen for localhost
|
||||
//
|
||||
myIp = api.UnknownIp
|
||||
myPort = api.UnknownPort
|
||||
}
|
||||
|
||||
if chunk.isRequest() {
|
||||
return api.TcpID{
|
||||
SrcIP: UNKNOWN_HOST,
|
||||
SrcIP: myIp.String(),
|
||||
DstIP: ip.String(),
|
||||
SrcPort: strconv.Itoa(int(UNKNOWN_PORT)),
|
||||
DstPort: strconv.FormatInt(int64(port), 10),
|
||||
SrcPort: strconv.FormatUint(uint64(myPort), 10),
|
||||
DstPort: strconv.FormatUint(uint64(port), 10),
|
||||
Ident: "",
|
||||
}
|
||||
} else {
|
||||
return api.TcpID{
|
||||
SrcIP: ip.String(),
|
||||
DstIP: UNKNOWN_HOST,
|
||||
SrcPort: strconv.FormatInt(int64(port), 10),
|
||||
DstPort: strconv.Itoa(int(UNKNOWN_PORT)),
|
||||
DstIP: myIp.String(),
|
||||
SrcPort: strconv.FormatUint(uint64(port), 10),
|
||||
DstPort: strconv.FormatUint(uint64(myPort), 10),
|
||||
Ident: "",
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func logTls(chunk *tlsChunk, ip net.IP, port uint16) {
|
||||
func (p *tlsPoller) logTls(chunk *tlsChunk, ip net.IP, port uint16) {
|
||||
var flagsStr string
|
||||
|
||||
if chunk.isClient() {
|
||||
@@ -155,8 +219,13 @@ func logTls(chunk *tlsChunk, ip net.IP, port uint16) {
|
||||
flagsStr += "W"
|
||||
}
|
||||
|
||||
srcIp, srcPort, _ := getAddressBySockfd(p.procfs, chunk.Pid, chunk.Fd, true)
|
||||
dstIp, dstPort, _ := getAddressBySockfd(p.procfs, chunk.Pid, chunk.Fd, false)
|
||||
|
||||
str := strings.ReplaceAll(strings.ReplaceAll(string(chunk.Data[0:chunk.Recorded]), "\n", " "), "\r", "")
|
||||
|
||||
logger.Log.Infof("PID: %v (tid: %v) (fd: %v) (client: %v) (addr: %v:%v) (recorded %v out of %v) - %v - %v",
|
||||
chunk.Pid, chunk.Tgid, chunk.Fd, flagsStr, ip, port, chunk.Recorded, chunk.Len, str, hex.EncodeToString(chunk.Data[0:chunk.Recorded]))
|
||||
logger.Log.Infof("PID: %v (tid: %v) (fd: %v) (client: %v) (addr: %v:%v) (fdaddr %v:%v>%v:%v) (recorded %v out of %v) - %v - %v",
|
||||
chunk.Pid, chunk.Tgid, chunk.Fd, flagsStr, ip, port,
|
||||
srcIp, srcPort, dstIp, dstPort,
|
||||
chunk.Recorded, chunk.Len, str, hex.EncodeToString(chunk.Data[0:chunk.Recorded]))
|
||||
}
|
||||
|
||||
@@ -132,8 +132,22 @@ func extractCgroup(lines []string) string {
|
||||
return ""
|
||||
}
|
||||
|
||||
// cgroup in the /proc/<pid>/cgroup may look something like
|
||||
//
|
||||
// /system.slice/docker-<ID>.scope
|
||||
// /system.slice/containerd-<ID>.scope
|
||||
// /kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3beae8e0_164d_4689_a087_efd902d8c2ab.slice/docker-<ID>.scope
|
||||
// /kubepods/besteffort/pod7709c1d5-447c-428f-bed9-8ddec35c93f4/<ID>
|
||||
//
|
||||
// This function extract the <ID> out of the cgroup path, the <ID> should match
|
||||
// the "Container ID:" field when running kubectl describe pod <POD>
|
||||
//
|
||||
func normalizeCgroup(cgrouppath string) string {
|
||||
basename := strings.TrimSpace(path.Base(cgrouppath))
|
||||
|
||||
if strings.Contains(basename, "-") {
|
||||
basename = basename[strings.Index(basename, "-") + 1:]
|
||||
}
|
||||
|
||||
if strings.Contains(basename, ".") {
|
||||
return strings.TrimSuffix(basename, filepath.Ext(basename))
|
||||
|
||||
@@ -1,13 +1,10 @@
|
||||
package tlstapper
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
|
||||
"github.com/cilium/ebpf/perf"
|
||||
"github.com/cilium/ebpf/rlimit"
|
||||
"github.com/go-errors/errors"
|
||||
"github.com/up9inc/mizu/shared/logger"
|
||||
"github.com/up9inc/mizu/tap/api"
|
||||
)
|
||||
|
||||
//go:generate go run github.com/cilium/ebpf/cmd/bpf2go tlsTapper bpf/tls_tapper.c -- -O2 -g -D__TARGET_ARCH_x86
|
||||
@@ -16,10 +13,10 @@ type TlsTapper struct {
|
||||
bpfObjects tlsTapperObjects
|
||||
syscallHooks syscallHooks
|
||||
sslHooksStructs []sslHooks
|
||||
reader *perf.Reader
|
||||
poller *tlsPoller
|
||||
}
|
||||
|
||||
func (t *TlsTapper) Init(bufferSize int) error {
|
||||
func (t *TlsTapper) Init(bufferSize int, procfs string, extension *api.Extension) error {
|
||||
logger.Log.Infof("Initializing tls tapper (bufferSize: %v)", bufferSize)
|
||||
|
||||
if err := setupRLimit(); err != nil {
|
||||
@@ -27,55 +24,23 @@ func (t *TlsTapper) Init(bufferSize int) error {
|
||||
}
|
||||
|
||||
t.bpfObjects = tlsTapperObjects{}
|
||||
|
||||
if err := loadTlsTapperObjects(&t.bpfObjects, nil); err != nil {
|
||||
return errors.Wrap(err, 0)
|
||||
}
|
||||
|
||||
t.syscallHooks = syscallHooks{}
|
||||
|
||||
if err := t.syscallHooks.installSyscallHooks(&t.bpfObjects); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
t.sslHooksStructs = make([]sslHooks, 0)
|
||||
|
||||
return t.initChunksReader(bufferSize)
|
||||
t.poller = newTlsPoller(t, extension, procfs)
|
||||
return t.poller.init(&t.bpfObjects, bufferSize)
|
||||
}
|
||||
|
||||
func (t *TlsTapper) pollPerf(chunks chan<- *tlsChunk) {
|
||||
logger.Log.Infof("Start polling for tls events")
|
||||
|
||||
for {
|
||||
record, err := t.reader.Read()
|
||||
|
||||
if err != nil {
|
||||
close(chunks)
|
||||
|
||||
if errors.Is(err, perf.ErrClosed) {
|
||||
return
|
||||
}
|
||||
|
||||
LogError(errors.Errorf("Error reading chunks from tls perf, aborting TLS! %v", err))
|
||||
return
|
||||
}
|
||||
|
||||
if record.LostSamples != 0 {
|
||||
logger.Log.Infof("Buffer is full, dropped %d chunks", record.LostSamples)
|
||||
continue
|
||||
}
|
||||
|
||||
buffer := bytes.NewReader(record.RawSample)
|
||||
|
||||
var chunk tlsChunk
|
||||
|
||||
if err := binary.Read(buffer, binary.LittleEndian, &chunk); err != nil {
|
||||
LogError(errors.Errorf("Error parsing chunk %v", err))
|
||||
continue
|
||||
}
|
||||
|
||||
chunks <- &chunk
|
||||
}
|
||||
func (t *TlsTapper) Poll(emitter api.Emitter, options *api.TrafficFilteringOptions) {
|
||||
t.poller.poll(emitter, options)
|
||||
}
|
||||
|
||||
func (t *TlsTapper) GlobalTap(sslLibrary string) error {
|
||||
@@ -118,7 +83,7 @@ func (t *TlsTapper) Close() []error {
|
||||
errors = append(errors, sslHooks.close()...)
|
||||
}
|
||||
|
||||
if err := t.reader.Close(); err != nil {
|
||||
if err := t.poller.close(); err != nil {
|
||||
errors = append(errors, err)
|
||||
}
|
||||
|
||||
@@ -135,18 +100,6 @@ func setupRLimit() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *TlsTapper) initChunksReader(bufferSize int) error {
|
||||
var err error
|
||||
|
||||
t.reader, err = perf.NewReader(t.bpfObjects.ChunksBuffer, bufferSize)
|
||||
|
||||
if err != nil {
|
||||
return errors.Wrap(err, 0)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *TlsTapper) tapPid(pid uint32, sslLibrary string) error {
|
||||
logger.Log.Infof("Tapping TLS (pid: %v) (sslLibrary: %v)", pid, sslLibrary)
|
||||
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
// Code generated by bpf2go; DO NOT EDIT.
|
||||
//go:build arm64be || armbe || mips || mips64 || mips64p32 || ppc64 || s390 || s390x || sparc || sparc64
|
||||
// +build arm64be armbe mips mips64 mips64p32 ppc64 s390 s390x sparc sparc64
|
||||
|
||||
package tlstapper
|
||||
|
||||
Binary file not shown.
@@ -1,5 +1,4 @@
|
||||
// Code generated by bpf2go; DO NOT EDIT.
|
||||
//go:build 386 || amd64 || amd64p32 || arm || arm64 || mips64le || mips64p32le || mipsle || ppc64le || riscv64
|
||||
// +build 386 amd64 amd64p32 arm arm64 mips64le mips64p32le mipsle ppc64le riscv64
|
||||
|
||||
package tlstapper
|
||||
|
||||
Binary file not shown.
16689
ui/package-lock.json
generated
16689
ui/package-lock.json
generated
File diff suppressed because it is too large
Load Diff
@@ -5,11 +5,14 @@ import {TrafficPage} from "./components/Pages/TrafficPage/TrafficPage";
|
||||
import { ServiceMapModal } from './components/ServiceMapModal/ServiceMapModal';
|
||||
import {useRecoilState} from "recoil";
|
||||
import serviceMapModalOpenAtom from "./recoil/serviceMapModalOpen";
|
||||
import OasModal from './components/OasModal/OasModal';
|
||||
import oasModalOpenAtom from './recoil/oasModalOpen/atom';
|
||||
|
||||
const App = () => {
|
||||
|
||||
const [analyzeStatus, setAnalyzeStatus] = useState(null);
|
||||
const [serviceMapModalOpen, setServiceMapModalOpen] = useRecoilState(serviceMapModalOpenAtom);
|
||||
const [oasModalOpen, setOasModalOpen] = useRecoilState(oasModalOpenAtom)
|
||||
|
||||
return (
|
||||
<div className="mizuApp">
|
||||
@@ -20,6 +23,10 @@ const App = () => {
|
||||
onOpen={() => setServiceMapModalOpen(true)}
|
||||
onClose={() => setServiceMapModalOpen(false)}
|
||||
/>}
|
||||
{window["isOasEnabled"] && <OasModal
|
||||
openModal={oasModalOpen}
|
||||
handleCloseModal={() => setOasModalOpen(false)}
|
||||
/>}
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
@@ -119,7 +119,7 @@ export const EntryDetailed = () => {
|
||||
bodySize={entryData.bodySize}
|
||||
elapsedTime={entryData.data.elapsedTime}
|
||||
/>}
|
||||
{entryData && <EntrySummary entry={entryData.data}/>}
|
||||
{entryData && <EntrySummary entry={entryData.base}/>}
|
||||
<>
|
||||
{entryData && <EntryViewer
|
||||
representation={entryData.representation}
|
||||
|
||||
@@ -25,9 +25,12 @@ interface TCPInterface {
|
||||
interface Entry {
|
||||
proto: ProtocolInterface,
|
||||
method?: string,
|
||||
methodQuery?: string,
|
||||
summary: string,
|
||||
summaryQuery: string,
|
||||
id: number,
|
||||
status?: number;
|
||||
statusQuery?: string;
|
||||
timestamp: Date;
|
||||
src: TCPInterface,
|
||||
dst: TCPInterface,
|
||||
@@ -152,10 +155,10 @@ export const EntryItem: React.FC<EntryProps> = ({entry, style, headingMode}) =>
|
||||
horizontal={false}
|
||||
/> : null}
|
||||
{isStatusCodeEnabled && <div>
|
||||
<StatusCode statusCode={entry.status}/>
|
||||
<StatusCode statusCode={entry.status} statusQuery={entry.statusQuery}/>
|
||||
</div>}
|
||||
<div className={styles.endpointServiceContainer} style={{paddingLeft: endpointServiceContainer}}>
|
||||
<Summary method={entry.method} summary={entry.summary}/>
|
||||
<Summary method={entry.method} methodQuery={entry.methodQuery} summary={entry.summary} summaryQuery={entry.summaryQuery}/>
|
||||
<div className={styles.resolvedName}>
|
||||
<Queryable
|
||||
query={`src.name == "${entry.src.name}"`}
|
||||
|
||||
29
ui/src/components/OasModal/OasModal.module.sass
Normal file
29
ui/src/components/OasModal/OasModal.module.sass
Normal file
@@ -0,0 +1,29 @@
|
||||
@import '../../variables.module.scss'
|
||||
|
||||
.boxContainer
|
||||
display: flex
|
||||
justifyContent: space-between
|
||||
padding: 10px
|
||||
|
||||
.selectHeader
|
||||
display: flex
|
||||
align-items: center
|
||||
width: 100%
|
||||
margin-top: -1%
|
||||
|
||||
.openApilogo
|
||||
width: 36px
|
||||
|
||||
.title
|
||||
color:#494677
|
||||
font-family: Lato
|
||||
font-size: 20px
|
||||
font-weight: 600
|
||||
|
||||
.selectContainer
|
||||
margin-left: 1%
|
||||
|
||||
.redoc
|
||||
height: 98%
|
||||
overflow-y: scroll
|
||||
|
||||
@@ -1,6 +0,0 @@
|
||||
@import '../../variables.module.scss'
|
||||
|
||||
.NotSelectedMessage
|
||||
margin-left: 41%
|
||||
padding-top: 3%
|
||||
font-size: large
|
||||
@@ -1,44 +1,86 @@
|
||||
import { Box, Fade, FormControl, MenuItem, Modal } from "@material-ui/core";
|
||||
import { useEffect, useState } from "react";
|
||||
import { Box, Fade, FormControl, MenuItem, Modal, Backdrop, ListSubheader } from "@material-ui/core";
|
||||
import { useCallback, useEffect, useState } from "react";
|
||||
import { RedocStandalone } from "redoc";
|
||||
import Api from "../../helpers/api";
|
||||
import { Select } from "../UI/Select";
|
||||
import closeIcon from "../assets/closeIcon.svg";
|
||||
import { toast } from 'react-toastify';
|
||||
import './OasModal.sass'
|
||||
import style from './OasModal.module.sass';
|
||||
import openApiLogo from '../assets/openApiLogo.png'
|
||||
import { redocThemeOptions } from "./redocThemeOptions";
|
||||
|
||||
const modalStyle = {
|
||||
position: 'absolute',
|
||||
top: '6%',
|
||||
left: '50%',
|
||||
transform: 'translate(-50%, 0%)',
|
||||
width: '89vw',
|
||||
height: '82vh',
|
||||
bgcolor: 'background.paper',
|
||||
borderRadius: '5px',
|
||||
boxShadow: 24,
|
||||
p: 4,
|
||||
color: '#000',
|
||||
};
|
||||
|
||||
const api = Api.getInstance();
|
||||
const noOasServiceSelectedMessage = "Please Select OasService";
|
||||
const ipAddressWithPortRegex = new RegExp('([0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3}):([0-9]{1,5})');
|
||||
|
||||
const OasModal = ({ openModal, handleCloseModal }) => {
|
||||
const [oasServices, setOasServices] = useState([])
|
||||
const [oasServices, setOasServices] = useState([] as string[])
|
||||
const [selectedServiceName, setSelectedServiceName] = useState("");
|
||||
const [selectedServiceSpec, setSelectedServiceSpec] = useState(null);
|
||||
const [resolvedServices, setResolvedServices] = useState([]);
|
||||
const [unResolvedServices, setUnResolvedServices] = useState([]);
|
||||
|
||||
const onSelectedOASService = useCallback( async (selectedService) => {
|
||||
if (!!selectedService){
|
||||
setSelectedServiceName(selectedService);
|
||||
if(oasServices.length === 0){
|
||||
return
|
||||
}
|
||||
try {
|
||||
const data = await api.getOasByService(selectedService);
|
||||
setSelectedServiceSpec(data);
|
||||
} catch (e) {
|
||||
toast.error("Error occurred while fetching service OAS spec");
|
||||
console.error(e);
|
||||
}
|
||||
}
|
||||
},[oasServices.length])
|
||||
|
||||
const resolvedArrayBuilder = useCallback(async(services) => {
|
||||
const resServices = [];
|
||||
const unResServices = [];
|
||||
services.forEach(s => {
|
||||
if(ipAddressWithPortRegex.test(s)){
|
||||
unResServices.push(s);
|
||||
}
|
||||
else {
|
||||
resServices.push(s);
|
||||
}
|
||||
});
|
||||
|
||||
resServices.sort();
|
||||
unResServices.sort();
|
||||
onSelectedOASService(resServices[0]);
|
||||
setResolvedServices(resServices);
|
||||
setUnResolvedServices(unResServices);
|
||||
},[onSelectedOASService])
|
||||
|
||||
useEffect(() => {
|
||||
(async () => {
|
||||
try {
|
||||
const services = await api.getOasServices();
|
||||
resolvedArrayBuilder(services);
|
||||
setOasServices(services);
|
||||
} catch (e) {
|
||||
console.error(e);
|
||||
}
|
||||
})();
|
||||
}, [openModal]);
|
||||
}, [openModal,resolvedArrayBuilder]);
|
||||
|
||||
|
||||
const onSelectedOASService = async (selectedService) => {
|
||||
setSelectedServiceName(selectedService);
|
||||
if(oasServices.length === 0){
|
||||
return
|
||||
}
|
||||
try {
|
||||
const data = await api.getOasByService(selectedService);
|
||||
setSelectedServiceSpec(data);
|
||||
} catch (e) {
|
||||
toast.error("Error occurred while fetching service OAS spec");
|
||||
console.error(e);
|
||||
}
|
||||
};
|
||||
|
||||
return (
|
||||
<Modal
|
||||
@@ -47,43 +89,50 @@ const OasModal = ({ openModal, handleCloseModal }) => {
|
||||
open={openModal}
|
||||
onClose={handleCloseModal}
|
||||
closeAfterTransition
|
||||
hideBackdrop={true}
|
||||
style={{ overflow: "auto", backgroundColor: "#ffffff", color:"black" }}
|
||||
BackdropComponent={Backdrop}
|
||||
BackdropProps={{
|
||||
timeout: 500,
|
||||
}}
|
||||
>
|
||||
<Fade in={openModal}>
|
||||
<Box>
|
||||
<div
|
||||
style={{
|
||||
display: "flex",
|
||||
justifyContent: "space-between",
|
||||
padding: "1%",
|
||||
}}
|
||||
>
|
||||
<div style={{ marginLeft: "40%" }}>
|
||||
<FormControl>
|
||||
<Select
|
||||
labelId="service-select-label"
|
||||
id="service-select"
|
||||
label="Show OAS"
|
||||
placeholder="Show OAS"
|
||||
value={selectedServiceName}
|
||||
onChange={onSelectedOASService}
|
||||
>
|
||||
{oasServices.map((service) => (
|
||||
<MenuItem key={service} value={service}>
|
||||
{service}
|
||||
</MenuItem>
|
||||
))}
|
||||
</Select>
|
||||
</FormControl>
|
||||
<Box sx={modalStyle}>
|
||||
<div className={style.boxContainer}>
|
||||
<div className={style.selectHeader}>
|
||||
<div><img src={openApiLogo} alt="openApi" className={style.openApilogo}/></div>
|
||||
<div className={style.title}>OpenAPI selected service: </div>
|
||||
<div className={style.selectContainer} >
|
||||
<FormControl>
|
||||
<Select
|
||||
labelId="service-select-label"
|
||||
id="service-select"
|
||||
placeholder="Show OAS"
|
||||
value={selectedServiceName}
|
||||
onChange={onSelectedOASService}
|
||||
>
|
||||
<ListSubheader disableSticky={true}>Resolved</ListSubheader>
|
||||
{resolvedServices.map((service) => (
|
||||
<MenuItem key={service} value={service}>
|
||||
{service}
|
||||
</MenuItem>
|
||||
))}
|
||||
<ListSubheader disableSticky={true}>UnResolved</ListSubheader>
|
||||
{unResolvedServices.map((service) => (
|
||||
<MenuItem key={service} value={service}>
|
||||
{service}
|
||||
</MenuItem>
|
||||
))}
|
||||
</Select>
|
||||
</FormControl>
|
||||
</div>
|
||||
</div>
|
||||
<div style={{ cursor: "pointer" }}>
|
||||
<img src={closeIcon} alt="Back" onClick={handleCloseModal} />
|
||||
<img src={closeIcon} alt="close" onClick={handleCloseModal} />
|
||||
</div>
|
||||
</div>
|
||||
{selectedServiceSpec && <RedocStandalone spec={selectedServiceSpec} />}
|
||||
<div className="NotSelectedMessage">
|
||||
{!selectedServiceName && noOasServiceSelectedMessage}
|
||||
<div className={style.redoc}>
|
||||
{selectedServiceSpec && <RedocStandalone
|
||||
spec={selectedServiceSpec}
|
||||
options={redocThemeOptions}/>}
|
||||
</div>
|
||||
</Box>
|
||||
</Fade>
|
||||
|
||||
35
ui/src/components/OasModal/redocThemeOptions.ts
Normal file
35
ui/src/components/OasModal/redocThemeOptions.ts
Normal file
@@ -0,0 +1,35 @@
|
||||
export const redocThemeOptions = {
|
||||
theme:{
|
||||
codeBlock:{
|
||||
backgroundColor:"#11171a",
|
||||
},
|
||||
colors:{
|
||||
responses:{
|
||||
error:{
|
||||
tabTextColor:"#1b1b29"
|
||||
},
|
||||
info:{
|
||||
tabTextColor:"#1b1b29",
|
||||
},
|
||||
success:{
|
||||
tabTextColor:"#0c0b1a"
|
||||
},
|
||||
},
|
||||
text:{
|
||||
primary:"#1b1b29",
|
||||
secondary:"#4d4d4d"
|
||||
}
|
||||
},
|
||||
rightPanel:{
|
||||
backgroundColor:"#253237",
|
||||
},
|
||||
sidebar:{
|
||||
backgroundColor:"#ffffff"
|
||||
},
|
||||
typography:{
|
||||
code:{
|
||||
color:"#0c0b1a"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user