mirror of
https://github.com/kubeshark/kubeshark.git
synced 2026-02-15 18:39:58 +00:00
Compare commits
34 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
fd5b72a66e | ||
|
|
826f2cfb04 | ||
|
|
e6f140adb2 | ||
|
|
1af2cd728d | ||
|
|
d5f6093084 | ||
|
|
32ad2f17c3 | ||
|
|
553433622e | ||
|
|
b6917ed0b7 | ||
|
|
d22e5a9620 | ||
|
|
e880a9224f | ||
|
|
67c3e5aab5 | ||
|
|
ed791c988e | ||
|
|
3567f5c00b | ||
|
|
605c67c22f | ||
|
|
9b44838fed | ||
|
|
9f6abf7a32 | ||
|
|
609f85e6ae | ||
|
|
d2188bfc22 | ||
|
|
d5942c3c49 | ||
|
|
f03c2adce4 | ||
|
|
93ebec09b9 | ||
|
|
06c7b9c748 | ||
|
|
db66119182 | ||
|
|
2ae6153631 | ||
|
|
2dfb0b2549 | ||
|
|
a2b774ae1f | ||
|
|
862f4252c3 | ||
|
|
d5f057156e | ||
|
|
96ce694945 | ||
|
|
2d7ff95426 | ||
|
|
af9daf91cf | ||
|
|
ea82d1779b | ||
|
|
51ddbf3815 | ||
|
|
8bbdcb6877 |
4
.github/CODEOWNERS
vendored
4
.github/CODEOWNERS
vendored
@@ -1,4 +0,0 @@
|
||||
# This is a comment.
|
||||
# Each line is a file pattern followed by one or more owners.
|
||||
|
||||
/ui/ @frontend
|
||||
38
.github/ISSUE_TEMPLATE/bug_report.md
vendored
38
.github/ISSUE_TEMPLATE/bug_report.md
vendored
@@ -1,38 +0,0 @@
|
||||
---
|
||||
name: Bug report
|
||||
about: Create a report to help us improve
|
||||
title: ''
|
||||
labels: ''
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
**Describe the bug**
|
||||
A clear and concise description of what the bug is.
|
||||
|
||||
**To Reproduce**
|
||||
Steps to reproduce the behavior:
|
||||
1. Run mizu <command> '...'
|
||||
2. Click on '....'
|
||||
3. Scroll down to '....'
|
||||
4. See error
|
||||
|
||||
**Expected behavior**
|
||||
A clear and concise description of what you expected to happen.
|
||||
|
||||
**Logs**
|
||||
Upload logs:
|
||||
1. Run the mizu command with `--set dump-logs=true` (e.g `mizu tap --set dump-logs=true`)
|
||||
2. Try to reproduce the issue
|
||||
3. CNTRL+C on terminal tab which runs mizu
|
||||
4. Upload the logs zip file from ~/.mizu/mizu_logs_**.zip
|
||||
|
||||
**Screenshots**
|
||||
If applicable, add screenshots to help explain your problem.
|
||||
|
||||
**Desktop (please complete the following information):**
|
||||
- OS: [e.g. iOS]
|
||||
- Browser [e.g. chrome]
|
||||
|
||||
**Additional context**
|
||||
Add any other context about the problem here.
|
||||
32
.github/workflows/acceptance_tests.yml
vendored
32
.github/workflows/acceptance_tests.yml
vendored
@@ -1,32 +0,0 @@
|
||||
name: acceptance tests
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- 'main'
|
||||
push:
|
||||
branches:
|
||||
- 'develop'
|
||||
|
||||
concurrency:
|
||||
group: mizu-acceptance-tests-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
run-acceptance-tests:
|
||||
name: Run acceptance tests
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Set up Go 1.16
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: '^1.16'
|
||||
|
||||
- name: Check out code into the Go module directory
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: Setup acceptance test
|
||||
run: source ./acceptanceTests/setup.sh
|
||||
|
||||
- name: Test
|
||||
run: make acceptance-test
|
||||
46
.github/workflows/pr_validation.yml
vendored
46
.github/workflows/pr_validation.yml
vendored
@@ -1,46 +0,0 @@
|
||||
name: PR validation
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- 'develop'
|
||||
- 'main'
|
||||
|
||||
concurrency:
|
||||
group: mizu-pr-validation-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
build-cli:
|
||||
name: Build CLI
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Set up Go 1.16
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: '^1.16'
|
||||
|
||||
- name: Check out code into the Go module directory
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: Build CLI
|
||||
run: make cli
|
||||
|
||||
build-agent:
|
||||
name: Build Agent
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Set up Go 1.16
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: '^1.16'
|
||||
|
||||
- name: Check out code into the Go module directory
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- shell: bash
|
||||
run: |
|
||||
sudo apt-get install libpcap-dev
|
||||
|
||||
- name: Build Agent
|
||||
run: make agent
|
||||
@@ -1,15 +1,10 @@
|
||||
name: publish
|
||||
|
||||
name: public-cli
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- 'develop'
|
||||
- 'main'
|
||||
|
||||
concurrency:
|
||||
group: mizu-publish-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
- develop
|
||||
- main
|
||||
- my-temp-release-check
|
||||
jobs:
|
||||
docker:
|
||||
runs-on: ubuntu-latest
|
||||
@@ -33,48 +28,14 @@ jobs:
|
||||
with:
|
||||
releaseType: ${{ steps.condval.outputs.value }}
|
||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: Get version parameters
|
||||
- name: Get base image name
|
||||
shell: bash
|
||||
run: |
|
||||
echo "##[set-output name=build_timestamp;]$(echo $(date +%s))"
|
||||
echo "##[set-output name=branch;]$(echo ${GITHUB_REF#refs/heads/})"
|
||||
id: version_parameters
|
||||
- name: Get base image name
|
||||
shell: bash
|
||||
run: echo "##[set-output name=image;]$(echo gcr.io/up9-docker-hub/mizu/${GITHUB_REF#refs/heads/})"
|
||||
id: base_image_step
|
||||
- name: Docker meta
|
||||
id: meta
|
||||
uses: crazy-max/ghaction-docker-meta@v2
|
||||
with:
|
||||
images: ${{ steps.base_image_step.outputs.image }}
|
||||
tags: |
|
||||
type=sha
|
||||
type=raw,${{ github.sha }}
|
||||
type=raw,${{ steps.versioning.outputs.version }}
|
||||
- name: Login to DockerHub
|
||||
uses: docker/login-action@v1
|
||||
with:
|
||||
registry: gcr.io
|
||||
username: _json_key
|
||||
password: ${{ secrets.GCR_JSON_KEY }}
|
||||
- name: Build and push
|
||||
uses: docker/build-push-action@v2
|
||||
with:
|
||||
context: .
|
||||
push: true
|
||||
tags: ${{ steps.meta.outputs.tags }}
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
build-args: |
|
||||
SEM_VER=${{ steps.versioning.outputs.version }}
|
||||
BUILD_TIMESTAMP=${{ steps.version_parameters.outputs.build_timestamp }}
|
||||
GIT_BRANCH=${{ steps.version_parameters.outputs.branch }}
|
||||
COMMIT_HASH=${{ github.sha }}
|
||||
- name: Build and Push CLI
|
||||
run: make push-cli SEM_VER='${{ steps.versioning.outputs.version }}' BUILD_TIMESTAMP='${{ steps.version_parameters.outputs.build_timestamp }}'
|
||||
- shell: bash
|
||||
run: |
|
||||
echo '${{ steps.versioning.outputs.version }}' >> cli/bin/version.txt
|
||||
- name: publish
|
||||
uses: ncipollo/release-action@v1
|
||||
with:
|
||||
39
.github/workflows/publish-docker.yml
vendored
Normal file
39
.github/workflows/publish-docker.yml
vendored
Normal file
@@ -0,0 +1,39 @@
|
||||
name: publish-docker
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- 'develop'
|
||||
- 'main'
|
||||
jobs:
|
||||
docker:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
- name: Get base image name
|
||||
shell: bash
|
||||
run: echo "##[set-output name=image;]$(echo gcr.io/up9-docker-hub/mizu/${GITHUB_REF#refs/heads/})"
|
||||
id: base_image_step
|
||||
- name: Docker meta
|
||||
id: meta
|
||||
uses: crazy-max/ghaction-docker-meta@v2
|
||||
with:
|
||||
images: ${{ steps.base_image_step.outputs.image }}
|
||||
tags: |
|
||||
type=sha
|
||||
type=raw,${{ github.sha }}
|
||||
type=raw,latest
|
||||
- name: Login to DockerHub
|
||||
uses: docker/login-action@v1
|
||||
with:
|
||||
registry: gcr.io
|
||||
username: _json_key
|
||||
password: ${{ secrets.GCR_JSON_KEY }}
|
||||
- name: Build and push
|
||||
uses: docker/build-push-action@v2
|
||||
with:
|
||||
context: .
|
||||
push: true
|
||||
tags: ${{ steps.meta.outputs.tags }}
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
|
||||
56
.github/workflows/tests_validation.yml
vendored
56
.github/workflows/tests_validation.yml
vendored
@@ -1,56 +0,0 @@
|
||||
name: tests validation
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- 'develop'
|
||||
- 'main'
|
||||
push:
|
||||
branches:
|
||||
- 'develop'
|
||||
- 'main'
|
||||
|
||||
concurrency:
|
||||
group: mizu-tests-validation-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
run-tests-cli:
|
||||
name: Run CLI tests
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Set up Go 1.16
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: '^1.16'
|
||||
|
||||
- name: Check out code into the Go module directory
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: Test
|
||||
run: make test-cli
|
||||
|
||||
- name: Upload coverage to Codecov
|
||||
uses: codecov/codecov-action@v2
|
||||
|
||||
run-tests-agent:
|
||||
name: Run Agent tests
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Set up Go 1.16
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: '^1.16'
|
||||
|
||||
- name: Check out code into the Go module directory
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- shell: bash
|
||||
run: |
|
||||
sudo apt-get install libpcap-dev
|
||||
|
||||
- name: Test
|
||||
run: make test-agent
|
||||
|
||||
- name: Upload coverage to Codecov
|
||||
uses: codecov/codecov-action@v2
|
||||
@@ -1,18 +0,0 @@
|
||||

|
||||
# CONTRIBUTE
|
||||
We welcome code contributions from the community.
|
||||
Please read and follow the guidelines below.
|
||||
|
||||
## Communication
|
||||
* Before starting work on a major feature, please reach out to us via [GitHub](https://github.com/up9inc/mizu), [Slack](https://join.slack.com/share/zt-u6bbs3pg-X1zhQOXOH0yEoqILgH~csw), [email](mailto:mizu@up9.com), etc. We will make sure no one else is already working on it. A _major feature_ is defined as any change that is > 100 LOC altered (not including tests), or changes any user-facing behavior
|
||||
* Small patches and bug fixes don't need prior communication.
|
||||
|
||||
## Contribution requirements
|
||||
* Code style - most of the code is written in Go, please follow [these guidelines](https://golang.org/doc/effective_go)
|
||||
* Go-tools compatible (`go get`, `go test`, etc)
|
||||
* Unit-test coverage can’t go down ..
|
||||
* Code must be usefully commented. Not only for developers on the project, but also for external users of these packages
|
||||
* When reviewing PRs, you are encouraged to use Golang's [code review comments page](https://github.com/golang/go/wiki/CodeReviewComments)
|
||||
|
||||
|
||||
|
||||
28
Dockerfile
28
Dockerfile
@@ -13,30 +13,19 @@ ENV CGO_ENABLED=1 GOOS=linux GOARCH=amd64
|
||||
|
||||
RUN apk add libpcap-dev gcc g++ make
|
||||
|
||||
# Move to agent working directory (/agent-build).
|
||||
WORKDIR /app/agent-build
|
||||
# Move to api working directory (/api-build).
|
||||
WORKDIR /app/api-build
|
||||
|
||||
COPY agent/go.mod agent/go.sum ./
|
||||
COPY api/go.mod api/go.sum ./
|
||||
COPY shared/go.mod shared/go.mod ../shared/
|
||||
COPY tap/go.mod tap/go.mod ../tap/
|
||||
RUN go mod download
|
||||
# cheap trick to make the build faster (As long as go.mod wasn't changes)
|
||||
RUN go list -f '{{.Path}}@{{.Version}}' -m all | sed 1d | grep -e 'go-cache' -e 'sqlite' | xargs go get
|
||||
|
||||
ARG COMMIT_HASH
|
||||
ARG GIT_BRANCH
|
||||
ARG BUILD_TIMESTAMP
|
||||
ARG SEM_VER
|
||||
|
||||
# Copy and build agent code
|
||||
# Copy and build api code
|
||||
COPY shared ../shared
|
||||
COPY tap ../tap
|
||||
COPY agent .
|
||||
RUN go build -ldflags="-s -w \
|
||||
-X 'mizuserver/pkg/version.GitCommitHash=${COMMIT_HASH}' \
|
||||
-X 'mizuserver/pkg/version.Branch=${GIT_BRANCH}' \
|
||||
-X 'mizuserver/pkg/version.BuildTimestamp=${BUILD_TIMESTAMP}' \
|
||||
-X 'mizuserver/pkg/version.SemVer=${SEM_VER}'" -o mizuagent .
|
||||
COPY api .
|
||||
RUN go build -ldflags="-s -w" -o mizuagent .
|
||||
|
||||
|
||||
FROM alpine:3.13.5
|
||||
@@ -45,11 +34,10 @@ RUN apk add bash libpcap-dev tcpdump
|
||||
WORKDIR /app
|
||||
|
||||
# Copy binary and config files from /build to root folder of scratch container.
|
||||
COPY --from=builder ["/app/agent-build/mizuagent", "."]
|
||||
COPY --from=builder ["/app/api-build/mizuagent", "."]
|
||||
COPY --from=site-build ["/app/ui-build/build", "site"]
|
||||
|
||||
# gin-gonic runs in debug mode without this
|
||||
ENV GIN_MODE=release
|
||||
COPY api/start.sh .
|
||||
|
||||
# this script runs both apiserver and passivetapper and exits either if one of them exits, preventing a scenario where the container runs without one process
|
||||
ENTRYPOINT "/app/mizuagent"
|
||||
|
||||
56
Makefile
56
Makefile
@@ -8,7 +8,7 @@ SHELL=/bin/bash
|
||||
# HELP
|
||||
# This will output the help for each task
|
||||
# thanks to https://marmelab.com/blog/2016/02/29/auto-documented-makefile.html
|
||||
.PHONY: help ui agent cli tap docker
|
||||
.PHONY: help ui api cli tap docker
|
||||
|
||||
help: ## This help.
|
||||
@awk 'BEGIN {FS = ":.*?## "} /^[a-zA-Z_-]+:.*?## / {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}' $(MAKEFILE_LIST)
|
||||
@@ -19,37 +19,34 @@ help: ## This help.
|
||||
TS_SUFFIX="$(shell date '+%s')"
|
||||
GIT_BRANCH="$(shell git branch | grep \* | cut -d ' ' -f2 | tr '[:upper:]' '[:lower:]' | tr '/' '_')"
|
||||
BUCKET_PATH=static.up9.io/mizu/$(GIT_BRANCH)
|
||||
export SEM_VER?=0.0.0
|
||||
|
||||
ui: ## Build UI.
|
||||
ui: ## build UI
|
||||
@(cd ui; npm i ; npm run build; )
|
||||
@ls -l ui/build
|
||||
|
||||
cli: ## Build CLI.
|
||||
cli: # build CLI
|
||||
@echo "building cli"; cd cli && $(MAKE) build
|
||||
|
||||
build-cli-ci: ## Build CLI for CI.
|
||||
@echo "building cli for ci"; cd cli && $(MAKE) build GIT_BRANCH=ci SUFFIX=ci
|
||||
api: ## build API server
|
||||
@(echo "building API server .." )
|
||||
@(cd api; go build -o build/apiserver main.go)
|
||||
@ls -l api/build
|
||||
|
||||
agent: ## Build agent.
|
||||
@(echo "building mizu agent .." )
|
||||
@(cd agent; go build -o build/mizuagent main.go)
|
||||
@ls -l agent/build
|
||||
#tap: ## build tap binary
|
||||
# @(cd tap; go build -o build/tap ./src)
|
||||
# @ls -l tap/build
|
||||
|
||||
docker: ## Build and publish agent docker image.
|
||||
$(MAKE) push-docker
|
||||
docker: ## build Docker image
|
||||
@(echo "building docker image" )
|
||||
./build-push-featurebranch.sh
|
||||
|
||||
push: push-docker push-cli ## Build and publish agent docker image & CLI.
|
||||
push: push-docker push-cli ## build and publish Mizu docker image & CLI
|
||||
|
||||
push-docker: ## Build and publish agent docker image.
|
||||
push-docker:
|
||||
@echo "publishing Docker image .. "
|
||||
./build-push-featurebranch.sh
|
||||
|
||||
build-docker-ci: ## Build agent docker image for CI.
|
||||
@echo "building docker image for ci"
|
||||
./build-agent-ci.sh
|
||||
|
||||
push-cli: ## Build and publish CLI.
|
||||
push-cli:
|
||||
@echo "publishing CLI .. "
|
||||
@cd cli; $(MAKE) build-all
|
||||
@echo "publishing file ${OUTPUT_FILE} .."
|
||||
@@ -57,25 +54,18 @@ push-cli: ## Build and publish CLI.
|
||||
gsutil cp -r ./cli/bin/* gs://${BUCKET_PATH}/
|
||||
gsutil setmeta -r -h "Cache-Control:public, max-age=30" gs://${BUCKET_PATH}/\*
|
||||
|
||||
clean: clean-ui clean-agent clean-cli clean-docker ## Clean all build artifacts.
|
||||
|
||||
clean-ui: ## Clean UI.
|
||||
clean: clean-ui clean-api clean-cli clean-docker ## Clean all build artifacts
|
||||
|
||||
clean-ui:
|
||||
@(rm -rf ui/build ; echo "UI cleanup done" )
|
||||
|
||||
clean-agent: ## Clean agent.
|
||||
@(rm -rf agent/build ; echo "agent cleanup done" )
|
||||
clean-api:
|
||||
@(rm -rf api/build ; echo "api cleanup done" )
|
||||
|
||||
clean-cli: ## Clean CLI.
|
||||
clean-cli:
|
||||
@(cd cli; make clean ; echo "CLI cleanup done" )
|
||||
|
||||
clean-docker:
|
||||
clean-docker:
|
||||
@(echo "DOCKER cleanup - NOT IMPLEMENTED YET " )
|
||||
|
||||
test-cli:
|
||||
@echo "running cli tests"; cd cli && $(MAKE) test
|
||||
|
||||
test-agent:
|
||||
@echo "running agent tests"; cd agent && $(MAKE) test
|
||||
|
||||
acceptance-test:
|
||||
@echo "running acceptance tests"; cd acceptanceTests && $(MAKE) test
|
||||
|
||||
328
PERMISSIONS.md
328
PERMISSIONS.md
@@ -1,328 +0,0 @@
|
||||

|
||||
# Kubernetes permissions for MIZU
|
||||
|
||||
This document describes in details all permissions required for full and correct operation of Mizu
|
||||
|
||||
We broke down this list into few categories:
|
||||
- Required - what is needed for `mizu` to run properly on your k8s cluster
|
||||
- Optional - permissions needed for proper name resolving for service & pod IPs
|
||||
- addition required for policy validation
|
||||
|
||||
|
||||
|
||||
# Required permissions
|
||||
|
||||
Mizu needs following permissions on your Kubernetes cluster to run properly
|
||||
|
||||
```yaml
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- pods
|
||||
verbs:
|
||||
- list
|
||||
- watch
|
||||
- create
|
||||
- delete
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- services
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- apiGroups:
|
||||
- apps
|
||||
resources:
|
||||
- daemonsets
|
||||
verbs:
|
||||
- create
|
||||
- patch
|
||||
- delete
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- namespaces
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- create
|
||||
- delete
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- services/proxy
|
||||
verbs:
|
||||
- get
|
||||
```
|
||||
|
||||
## Permissions required for service / pod name resolving (opt)
|
||||
|
||||
Optionally, for proper resolving of IP addresses to Kubernetes service name, Mizu needs below permissions:
|
||||
|
||||
```yaml
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- pods
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- create
|
||||
- delete
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- services
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- create
|
||||
- delete
|
||||
- apiGroups:
|
||||
- apps
|
||||
resources:
|
||||
- daemonsets
|
||||
verbs:
|
||||
- create
|
||||
- patch
|
||||
- delete
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- namespaces
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- create
|
||||
- delete
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- services/proxy
|
||||
verbs:
|
||||
- get
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- serviceaccounts
|
||||
verbs:
|
||||
- get
|
||||
- create
|
||||
- delete
|
||||
- apiGroups:
|
||||
- rbac.authorization.k8s.io
|
||||
resources:
|
||||
- clusterroles
|
||||
verbs:
|
||||
- get
|
||||
- create
|
||||
- delete
|
||||
- apiGroups:
|
||||
- rbac.authorization.k8s.io
|
||||
resources:
|
||||
- clusterrolebindings
|
||||
verbs:
|
||||
- get
|
||||
- create
|
||||
- delete
|
||||
- apiGroups:
|
||||
- rbac.authorization.k8s.io
|
||||
resources:
|
||||
- roles
|
||||
verbs:
|
||||
- get
|
||||
- create
|
||||
- delete
|
||||
- apiGroups:
|
||||
- rbac.authorization.k8s.io
|
||||
resources:
|
||||
- rolebindings
|
||||
verbs:
|
||||
- get
|
||||
- create
|
||||
- delete
|
||||
- apiGroups:
|
||||
- apps
|
||||
- extensions
|
||||
resources:
|
||||
- pods
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- apps
|
||||
- extensions
|
||||
resources:
|
||||
- services
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
- apps
|
||||
- extensions
|
||||
resources:
|
||||
- endpoints
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
```
|
||||
|
||||
## Permissions for Policy rules validation feature (opt)
|
||||
|
||||
Optionally, in order to use the policy rules validation feature, Mizu requires the following additional permissions:
|
||||
|
||||
```yaml
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- configmaps
|
||||
verbs:
|
||||
- get
|
||||
- create
|
||||
- delete
|
||||
```
|
||||
|
||||
- - -
|
||||
|
||||
## Namespace-Restricted mode
|
||||
|
||||
Alternatively, in order to restrict Mizu to one namespace only (by setting `agent.namespace` in the config file), Mizu needs the following permissions in that namespace:
|
||||
|
||||
```yaml
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- pods
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- create
|
||||
- delete
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- services
|
||||
verbs:
|
||||
- get
|
||||
- create
|
||||
- delete
|
||||
- apiGroups:
|
||||
- apps
|
||||
resources:
|
||||
- daemonsets
|
||||
verbs:
|
||||
- get
|
||||
- create
|
||||
- patch
|
||||
- delete
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- services/proxy
|
||||
verbs:
|
||||
- get
|
||||
```
|
||||
|
||||
### Name resolving in Namespace-Restricted mode (opt)
|
||||
|
||||
To restrict Mizu to one namespace while also resolving IPs, Mizu needs the following permissions in that namespace:
|
||||
|
||||
```yaml
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- pods
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- create
|
||||
- delete
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- services
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- create
|
||||
- delete
|
||||
- apiGroups:
|
||||
- apps
|
||||
resources:
|
||||
- daemonsets
|
||||
verbs:
|
||||
- get
|
||||
- create
|
||||
- patch
|
||||
- delete
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- services/proxy
|
||||
verbs:
|
||||
- get
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- serviceaccounts
|
||||
verbs:
|
||||
- get
|
||||
- create
|
||||
- delete
|
||||
- apiGroups:
|
||||
- rbac.authorization.k8s.io
|
||||
resources:
|
||||
- roles
|
||||
verbs:
|
||||
- get
|
||||
- create
|
||||
- delete
|
||||
- apiGroups:
|
||||
- rbac.authorization.k8s.io
|
||||
resources:
|
||||
- rolebindings
|
||||
verbs:
|
||||
- get
|
||||
- create
|
||||
- delete
|
||||
- apiGroups:
|
||||
- apps
|
||||
- extensions
|
||||
resources:
|
||||
- pods
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- apps
|
||||
- extensions
|
||||
resources:
|
||||
- services
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
- apps
|
||||
- extensions
|
||||
resources:
|
||||
- endpoints
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
```
|
||||
178
README.md
178
README.md
@@ -1,174 +1,24 @@
|
||||

|
||||
|
||||
# The API Traffic Viewer for Kubernetes
|
||||
|
||||
A simple-yet-powerful API traffic viewer for Kubernetes enabling you to view all API communication between microservices to help your debug and troubleshoot regressions.
|
||||
|
||||
Think TCPDump and Chrome Dev Tools combined.
|
||||
|
||||

|
||||
|
||||
## Features
|
||||
|
||||
- Simple and powerful CLI
|
||||
- Real-time view of all HTTP requests, REST and gRPC API calls
|
||||
- No installation or code instrumentation
|
||||
- Works completely on premises
|
||||
# 水 mizu
|
||||
standalone web app traffic viewer for Kubernetes
|
||||
|
||||
## Download
|
||||
|
||||
Download Mizu for your platform and operating system
|
||||
Download `mizu` for your platform and operating system
|
||||
|
||||
### Latest Stable Release
|
||||
### Latest stable release
|
||||
|
||||
* for MacOS - Intel
|
||||
```
|
||||
curl -Lo mizu \
|
||||
https://github.com/up9inc/mizu/releases/latest/download/mizu_darwin_amd64 \
|
||||
&& chmod 755 mizu
|
||||
```
|
||||
|
||||
* for Linux - Intel 64bit
|
||||
```
|
||||
curl -Lo mizu \
|
||||
https://github.com/up9inc/mizu/releases/latest/download/mizu_linux_amd64 \
|
||||
&& chmod 755 mizu
|
||||
```
|
||||
* for MacOS - `curl -o mizu https://github.com/up9inc/mizu/releases/download/latest/mizu_darwin_amd64 && chmod 755 mizu`
|
||||
* for Linux - `curl -o mizu https://github.com/up9inc/mizu/releases/download/latest/mizu_linux_amd64 && chmod 755 mizu`
|
||||
|
||||
SHA256 checksums are available on the [Releases](https://github.com/up9inc/mizu/releases) page
|
||||
### Development (unstable) build
|
||||
Pick one from the [Releases](https://github.com/up9inc/mizu/releases) page.
|
||||
|
||||
### Development (unstable) Build
|
||||
Pick one from the [Releases](https://github.com/up9inc/mizu/releases) page
|
||||
## How to run
|
||||
|
||||
## Kubeconfig & Permissions
|
||||
While `mizu`most often works out of the box, you can influence its behavior:
|
||||
|
||||
1. [OPTIONAL] Set `KUBECONFIG` environment variable to your Kubernetes configuration. If this is not set, Mizu assumes that configuration is at `${HOME}/.kube/config`
|
||||
2. `mizu` assumes user running the command has permissions to create resources (such as pods, services, namespaces) on your Kubernetes cluster (no worries - `mizu` resources are cleaned up upon termination)
|
||||
|
||||
For detailed list of k8s permissions see [PERMISSIONS](PERMISSIONS.md) document
|
||||
|
||||
|
||||
## How to Run
|
||||
|
||||
1. Find pods you'd like to tap to in your Kubernetes cluster
|
||||
2. Run `mizu tap` or `mizu tap PODNAME`
|
||||
3. Open browser on `http://localhost:8899/mizu` **or** as instructed in the CLI
|
||||
4. Watch the API traffic flowing
|
||||
5. Type ^C to stop
|
||||
1. Find pod you'd like to tap to in your Kubernetes cluster
|
||||
2. Run `mizu PODNAME` or `mizu REGEX`
|
||||
3. Open browser on `http://localhost:8899` as instructed ..
|
||||
4. Watch the WebAPI traffic flowing ..
|
||||
|
||||
## Examples
|
||||
|
||||
Run `mizu help` for usage options
|
||||
|
||||
To tap all pods in current namespace -
|
||||
```
|
||||
$ kubectl get pods
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
carts-66c77f5fbb-fq65r 2/2 Running 0 20m
|
||||
catalogue-5f4cb7cf5-7zrmn 2/2 Running 0 20m
|
||||
front-end-649fc5fd6-kqbtn 2/2 Running 0 20m
|
||||
..
|
||||
|
||||
$ mizu tap
|
||||
+carts-66c77f5fbb-fq65r
|
||||
+catalogue-5f4cb7cf5-7zrmn
|
||||
+front-end-649fc5fd6-kqbtn
|
||||
Web interface is now available at http://localhost:8899
|
||||
^C
|
||||
```
|
||||
|
||||
|
||||
To tap specific pod -
|
||||
```bash
|
||||
$ kubectl get pods
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
front-end-649fc5fd6-kqbtn 2/2 Running 0 7m
|
||||
..
|
||||
|
||||
$ mizu tap front-end-649fc5fd6-kqbtn
|
||||
+front-end-649fc5fd6-kqbtn
|
||||
Web interface is now available at http://localhost:8899
|
||||
^C
|
||||
```
|
||||
|
||||
To tap multiple pods using regex -
|
||||
```bash
|
||||
$ kubectl get pods
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
carts-66c77f5fbb-fq65r 2/2 Running 0 20m
|
||||
catalogue-5f4cb7cf5-7zrmn 2/2 Running 0 20m
|
||||
front-end-649fc5fd6-kqbtn 2/2 Running 0 20m
|
||||
..
|
||||
|
||||
$ mizu tap "^ca.*"
|
||||
+carts-66c77f5fbb-fq65r
|
||||
+catalogue-5f4cb7cf5-7zrmn
|
||||
Web interface is now available at http://localhost:8899
|
||||
^C
|
||||
```
|
||||
|
||||
## Configuration
|
||||
|
||||
Mizu can work with config file which should be stored in ${HOME}/.mizu/config.yaml (macOS: ~/.mizu/config.yaml) <br />
|
||||
In case no config file found, defaults will be used <br />
|
||||
In case of partial configuration defined, all other fields will be used with defaults <br />
|
||||
You can always override the defaults or config file with CLI flags
|
||||
|
||||
To get the default config params run `mizu config` <br />
|
||||
To generate a new config file with default values use `mizu config -r`
|
||||
|
||||
### Telemetry
|
||||
|
||||
By default, mizu reports usage telemetry. It can be disabled by adding a line of `telemetry: false` in the `${HOME}/.mizu/config.yaml` file
|
||||
|
||||
|
||||
## Advanced Usage
|
||||
|
||||
### Namespace-Restricted Mode
|
||||
|
||||
Some users have permission to only manage resources in one particular namespace assigned to them
|
||||
By default `mizu tap` creates a new namespace `mizu` for all of its Kubernetes resources. In order to instead install
|
||||
Mizu in an existing namespace, set the `mizu-resources-namespace` config option
|
||||
|
||||
If `mizu-resources-namespace` is set to a value other than the default `mizu`, Mizu will operate in a
|
||||
Namespace-Restricted mode. It will only tap pods in `mizu-resources-namespace`. This way Mizu only requires permissions
|
||||
to the namespace set by `mizu-resources-namespace`. The user must set the tapped namespace to the same namespace by
|
||||
using the `--namespace` flag or by setting `tap.namespaces` in the config file
|
||||
|
||||
Setting `mizu-resources-namespace=mizu` resets Mizu to its default behavior
|
||||
|
||||
### User agent filtering
|
||||
|
||||
User-agent filtering (like health checks) - can be configured using command-line options:
|
||||
|
||||
```shell
|
||||
$ mizu tap "^ca.*" --set ignored-user-agents=kube-probe --set ignored-user-agents=prometheus
|
||||
+carts-66c77f5fbb-fq65r
|
||||
+catalogue-5f4cb7cf5-7zrmn
|
||||
Web interface is now available at http://localhost:8899
|
||||
^C
|
||||
|
||||
```
|
||||
|
||||
Any request that contains `User-Agent` header with one of the specified values (`kube-probe` or `prometheus`) will not be captured
|
||||
|
||||
### API Rules validation
|
||||
|
||||
This feature allows you to define set of simple rules, and test the API against them.
|
||||
Such validation may test response for specific JSON fields, headers, etc.
|
||||
|
||||
Please see [API RULES](docs/POLICY_RULES.md) page for more details and syntax.
|
||||
|
||||
|
||||
## How to Run local UI
|
||||
|
||||
- run from mizu/agent `go run main.go --hars-read --hars-dir <folder>`
|
||||
|
||||
- copy Har files into the folder from last command
|
||||
|
||||
- change `MizuWebsocketURL` and `apiURL` in `api.js` file
|
||||
|
||||
- run from mizu/ui - `npm run start`
|
||||
|
||||
- open browser on `localhost:3000`
|
||||
TBD
|
||||
|
||||
15
TESTING.md
15
TESTING.md
@@ -1,15 +0,0 @@
|
||||

|
||||
# TESTING
|
||||
Testing guidelines for Mizu project
|
||||
|
||||
## Unit-tests
|
||||
* TBD
|
||||
* TBD
|
||||
* TBD
|
||||
|
||||
|
||||
|
||||
## System tests
|
||||
* TBD
|
||||
* TBD
|
||||
* TBD
|
||||
@@ -1,2 +0,0 @@
|
||||
test: ## Run acceptance tests.
|
||||
@go test ./... -timeout 1h
|
||||
@@ -1,283 +0,0 @@
|
||||
package acceptanceTests
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"gopkg.in/yaml.v3"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"os/exec"
|
||||
"testing"
|
||||
)
|
||||
|
||||
type tapConfig struct {
|
||||
GuiPort uint16 `yaml:"gui-port"`
|
||||
}
|
||||
|
||||
type configStruct struct {
|
||||
Tap tapConfig `yaml:"tap"`
|
||||
}
|
||||
|
||||
func TestConfigRegenerate(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("ignored acceptance test")
|
||||
}
|
||||
|
||||
cliPath, cliPathErr := getCliPath()
|
||||
if cliPathErr != nil {
|
||||
t.Errorf("failed to get cli path, err: %v", cliPathErr)
|
||||
return
|
||||
}
|
||||
|
||||
configPath, configPathErr := getConfigPath()
|
||||
if configPathErr != nil {
|
||||
t.Errorf("failed to get config path, err: %v", cliPathErr)
|
||||
return
|
||||
}
|
||||
|
||||
configCmdArgs := getDefaultConfigCommandArgs()
|
||||
|
||||
configCmdArgs = append(configCmdArgs, "-r")
|
||||
|
||||
configCmd := exec.Command(cliPath, configCmdArgs...)
|
||||
t.Logf("running command: %v", configCmd.String())
|
||||
|
||||
t.Cleanup(func() {
|
||||
if err := os.Remove(configPath); err != nil {
|
||||
t.Logf("failed to delete config file, err: %v", err)
|
||||
}
|
||||
})
|
||||
|
||||
if err := configCmd.Start(); err != nil {
|
||||
t.Errorf("failed to start config command, err: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
if err := configCmd.Wait(); err != nil {
|
||||
t.Errorf("failed to wait config command, err: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
_, readFileErr := ioutil.ReadFile(configPath)
|
||||
if readFileErr != nil {
|
||||
t.Errorf("failed to read config file, err: %v", readFileErr)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func TestConfigGuiPort(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("ignored acceptance test")
|
||||
}
|
||||
|
||||
tests := []uint16{8898}
|
||||
|
||||
for _, guiPort := range tests {
|
||||
t.Run(fmt.Sprintf("%d", guiPort), func(t *testing.T) {
|
||||
cliPath, cliPathErr := getCliPath()
|
||||
if cliPathErr != nil {
|
||||
t.Errorf("failed to get cli path, err: %v", cliPathErr)
|
||||
return
|
||||
}
|
||||
|
||||
configPath, configPathErr := getConfigPath()
|
||||
if configPathErr != nil {
|
||||
t.Errorf("failed to get config path, err: %v", cliPathErr)
|
||||
return
|
||||
}
|
||||
|
||||
config := configStruct{}
|
||||
config.Tap.GuiPort = guiPort
|
||||
|
||||
configBytes, marshalErr := yaml.Marshal(config)
|
||||
if marshalErr != nil {
|
||||
t.Errorf("failed to marshal config, err: %v", marshalErr)
|
||||
return
|
||||
}
|
||||
|
||||
if writeErr := ioutil.WriteFile(configPath, configBytes, 0644); writeErr != nil {
|
||||
t.Errorf("failed to write config to file, err: %v", writeErr)
|
||||
return
|
||||
}
|
||||
|
||||
tapCmdArgs := getDefaultTapCommandArgs()
|
||||
|
||||
tapNamespace := getDefaultTapNamespace()
|
||||
tapCmdArgs = append(tapCmdArgs, tapNamespace...)
|
||||
|
||||
tapCmd := exec.Command(cliPath, tapCmdArgs...)
|
||||
t.Logf("running command: %v", tapCmd.String())
|
||||
|
||||
t.Cleanup(func() {
|
||||
if err := cleanupCommand(tapCmd); err != nil {
|
||||
t.Logf("failed to cleanup tap command, err: %v", err)
|
||||
}
|
||||
|
||||
if err := os.Remove(configPath); err != nil {
|
||||
t.Logf("failed to delete config file, err: %v", err)
|
||||
}
|
||||
})
|
||||
|
||||
if err := tapCmd.Start(); err != nil {
|
||||
t.Errorf("failed to start tap command, err: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
apiServerUrl := getApiServerUrl(guiPort)
|
||||
|
||||
if err := waitTapPodsReady(apiServerUrl); err != nil {
|
||||
t.Errorf("failed to start tap pods on time, err: %v", err)
|
||||
return
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestConfigSetGuiPort(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("ignored acceptance test")
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
ConfigFileGuiPort uint16
|
||||
SetGuiPort uint16
|
||||
}{
|
||||
{ConfigFileGuiPort: 8898, SetGuiPort: 8897},
|
||||
}
|
||||
|
||||
for _, guiPortStruct := range tests {
|
||||
t.Run(fmt.Sprintf("%d", guiPortStruct.SetGuiPort), func(t *testing.T) {
|
||||
cliPath, cliPathErr := getCliPath()
|
||||
if cliPathErr != nil {
|
||||
t.Errorf("failed to get cli path, err: %v", cliPathErr)
|
||||
return
|
||||
}
|
||||
|
||||
configPath, configPathErr := getConfigPath()
|
||||
if configPathErr != nil {
|
||||
t.Errorf("failed to get config path, err: %v", cliPathErr)
|
||||
return
|
||||
}
|
||||
|
||||
config := configStruct{}
|
||||
config.Tap.GuiPort = guiPortStruct.ConfigFileGuiPort
|
||||
|
||||
configBytes, marshalErr := yaml.Marshal(config)
|
||||
if marshalErr != nil {
|
||||
t.Errorf("failed to marshal config, err: %v", marshalErr)
|
||||
return
|
||||
}
|
||||
|
||||
if writeErr := ioutil.WriteFile(configPath, configBytes, 0644); writeErr != nil {
|
||||
t.Errorf("failed to write config to file, err: %v", writeErr)
|
||||
return
|
||||
}
|
||||
|
||||
tapCmdArgs := getDefaultTapCommandArgs()
|
||||
|
||||
tapNamespace := getDefaultTapNamespace()
|
||||
tapCmdArgs = append(tapCmdArgs, tapNamespace...)
|
||||
|
||||
tapCmdArgs = append(tapCmdArgs, "--set", fmt.Sprintf("tap.gui-port=%v", guiPortStruct.SetGuiPort))
|
||||
|
||||
tapCmd := exec.Command(cliPath, tapCmdArgs...)
|
||||
t.Logf("running command: %v", tapCmd.String())
|
||||
|
||||
t.Cleanup(func() {
|
||||
if err := cleanupCommand(tapCmd); err != nil {
|
||||
t.Logf("failed to cleanup tap command, err: %v", err)
|
||||
}
|
||||
|
||||
if err := os.Remove(configPath); err != nil {
|
||||
t.Logf("failed to delete config file, err: %v", err)
|
||||
}
|
||||
})
|
||||
|
||||
if err := tapCmd.Start(); err != nil {
|
||||
t.Errorf("failed to start tap command, err: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
apiServerUrl := getApiServerUrl(guiPortStruct.SetGuiPort)
|
||||
|
||||
if err := waitTapPodsReady(apiServerUrl); err != nil {
|
||||
t.Errorf("failed to start tap pods on time, err: %v", err)
|
||||
return
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestConfigFlagGuiPort(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("ignored acceptance test")
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
ConfigFileGuiPort uint16
|
||||
FlagGuiPort uint16
|
||||
}{
|
||||
{ConfigFileGuiPort: 8898, FlagGuiPort: 8896},
|
||||
}
|
||||
|
||||
for _, guiPortStruct := range tests {
|
||||
t.Run(fmt.Sprintf("%d", guiPortStruct.FlagGuiPort), func(t *testing.T) {
|
||||
cliPath, cliPathErr := getCliPath()
|
||||
if cliPathErr != nil {
|
||||
t.Errorf("failed to get cli path, err: %v", cliPathErr)
|
||||
return
|
||||
}
|
||||
|
||||
configPath, configPathErr := getConfigPath()
|
||||
if configPathErr != nil {
|
||||
t.Errorf("failed to get config path, err: %v", cliPathErr)
|
||||
return
|
||||
}
|
||||
|
||||
config := configStruct{}
|
||||
config.Tap.GuiPort = guiPortStruct.ConfigFileGuiPort
|
||||
|
||||
configBytes, marshalErr := yaml.Marshal(config)
|
||||
if marshalErr != nil {
|
||||
t.Errorf("failed to marshal config, err: %v", marshalErr)
|
||||
return
|
||||
}
|
||||
|
||||
if writeErr := ioutil.WriteFile(configPath, configBytes, 0644); writeErr != nil {
|
||||
t.Errorf("failed to write config to file, err: %v", writeErr)
|
||||
return
|
||||
}
|
||||
|
||||
tapCmdArgs := getDefaultTapCommandArgs()
|
||||
|
||||
tapNamespace := getDefaultTapNamespace()
|
||||
tapCmdArgs = append(tapCmdArgs, tapNamespace...)
|
||||
|
||||
tapCmdArgs = append(tapCmdArgs, "-p", fmt.Sprintf("%v", guiPortStruct.FlagGuiPort))
|
||||
|
||||
tapCmd := exec.Command(cliPath, tapCmdArgs...)
|
||||
t.Logf("running command: %v", tapCmd.String())
|
||||
|
||||
t.Cleanup(func() {
|
||||
if err := cleanupCommand(tapCmd); err != nil {
|
||||
t.Logf("failed to cleanup tap command, err: %v", err)
|
||||
}
|
||||
|
||||
if err := os.Remove(configPath); err != nil {
|
||||
t.Logf("failed to delete config file, err: %v", err)
|
||||
}
|
||||
})
|
||||
|
||||
if err := tapCmd.Start(); err != nil {
|
||||
t.Errorf("failed to start tap command, err: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
apiServerUrl := getApiServerUrl(guiPortStruct.FlagGuiPort)
|
||||
|
||||
if err := waitTapPodsReady(apiServerUrl); err != nil {
|
||||
t.Errorf("failed to start tap pods on time, err: %v", err)
|
||||
return
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -1,5 +0,0 @@
|
||||
module github.com/up9inc/mizu/tests
|
||||
|
||||
go 1.16
|
||||
|
||||
require gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b
|
||||
@@ -1,4 +0,0 @@
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo=
|
||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
@@ -1,55 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
PREFIX=$HOME/local/bin
|
||||
VERSION=v1.22.0
|
||||
|
||||
echo "Attempting to install minikube and assorted tools to $PREFIX"
|
||||
|
||||
if ! [ -x "$(command -v kubectl)" ]; then
|
||||
echo "Installing kubectl version $VERSION"
|
||||
curl -LO "https://storage.googleapis.com/kubernetes-release/release/$VERSION/bin/linux/amd64/kubectl"
|
||||
chmod +x kubectl
|
||||
mv kubectl "$PREFIX"
|
||||
else
|
||||
echo "kubetcl is already installed"
|
||||
fi
|
||||
|
||||
if ! [ -x "$(command -v minikube)" ]; then
|
||||
echo "Installing minikube version $VERSION"
|
||||
curl -Lo minikube https://storage.googleapis.com/minikube/releases/$VERSION/minikube-linux-amd64
|
||||
chmod +x minikube
|
||||
mv minikube "$PREFIX"
|
||||
else
|
||||
echo "minikube is already installed"
|
||||
fi
|
||||
|
||||
echo "Starting minikube..."
|
||||
minikube start
|
||||
|
||||
echo "Creating mizu tests namespaces"
|
||||
kubectl create namespace mizu-tests
|
||||
kubectl create namespace mizu-tests2
|
||||
|
||||
echo "Creating httpbin deployments"
|
||||
kubectl create deployment httpbin --image=kennethreitz/httpbin -n mizu-tests
|
||||
kubectl create deployment httpbin2 --image=kennethreitz/httpbin -n mizu-tests
|
||||
|
||||
kubectl create deployment httpbin --image=kennethreitz/httpbin -n mizu-tests2
|
||||
|
||||
echo "Creating httpbin services"
|
||||
kubectl expose deployment httpbin --type=NodePort --port=80 -n mizu-tests
|
||||
kubectl expose deployment httpbin2 --type=NodePort --port=80 -n mizu-tests
|
||||
|
||||
kubectl expose deployment httpbin --type=NodePort --port=80 -n mizu-tests2
|
||||
|
||||
echo "Starting proxy"
|
||||
kubectl proxy --port=8080 &
|
||||
|
||||
echo "Setting minikube docker env"
|
||||
eval $(minikube docker-env)
|
||||
|
||||
echo "Build agent image"
|
||||
make build-docker-ci
|
||||
|
||||
echo "Build cli"
|
||||
make build-cli-ci
|
||||
@@ -1,765 +0,0 @@
|
||||
package acceptanceTests
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"os/exec"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestTapAndFetch(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("ignored acceptance test")
|
||||
}
|
||||
|
||||
tests := []int{50}
|
||||
|
||||
for _, entriesCount := range tests {
|
||||
t.Run(fmt.Sprintf("%d", entriesCount), func(t *testing.T) {
|
||||
cliPath, cliPathErr := getCliPath()
|
||||
if cliPathErr != nil {
|
||||
t.Errorf("failed to get cli path, err: %v", cliPathErr)
|
||||
return
|
||||
}
|
||||
|
||||
tapCmdArgs := getDefaultTapCommandArgs()
|
||||
|
||||
tapNamespace := getDefaultTapNamespace()
|
||||
tapCmdArgs = append(tapCmdArgs, tapNamespace...)
|
||||
|
||||
tapCmd := exec.Command(cliPath, tapCmdArgs...)
|
||||
t.Logf("running command: %v", tapCmd.String())
|
||||
|
||||
t.Cleanup(func() {
|
||||
if err := cleanupCommand(tapCmd); err != nil {
|
||||
t.Logf("failed to cleanup tap command, err: %v", err)
|
||||
}
|
||||
})
|
||||
|
||||
if err := tapCmd.Start(); err != nil {
|
||||
t.Errorf("failed to start tap command, err: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
apiServerUrl := getApiServerUrl(defaultApiServerPort)
|
||||
|
||||
if err := waitTapPodsReady(apiServerUrl); err != nil {
|
||||
t.Errorf("failed to start tap pods on time, err: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
proxyUrl := getProxyUrl(defaultNamespaceName, defaultServiceName)
|
||||
for i := 0; i < entriesCount; i++ {
|
||||
if _, requestErr := executeHttpGetRequest(fmt.Sprintf("%v/get", proxyUrl)); requestErr != nil {
|
||||
t.Errorf("failed to send proxy request, err: %v", requestErr)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
entriesCheckFunc := func() error {
|
||||
timestamp := time.Now().UnixNano() / int64(time.Millisecond)
|
||||
|
||||
entriesUrl := fmt.Sprintf("%v/api/entries?limit=%v&operator=lt×tamp=%v", apiServerUrl, entriesCount, timestamp)
|
||||
requestResult, requestErr := executeHttpGetRequest(entriesUrl)
|
||||
if requestErr != nil {
|
||||
return fmt.Errorf("failed to get entries, err: %v", requestErr)
|
||||
}
|
||||
|
||||
entries := requestResult.([]interface{})
|
||||
if len(entries) == 0 {
|
||||
return fmt.Errorf("unexpected entries result - Expected more than 0 entries")
|
||||
}
|
||||
|
||||
entry := entries[0].(map[string]interface{})
|
||||
|
||||
entryUrl := fmt.Sprintf("%v/api/entries/%v", apiServerUrl, entry["id"])
|
||||
requestResult, requestErr = executeHttpGetRequest(entryUrl)
|
||||
if requestErr != nil {
|
||||
return fmt.Errorf("failed to get entry, err: %v", requestErr)
|
||||
}
|
||||
|
||||
if requestResult == nil {
|
||||
return fmt.Errorf("unexpected nil entry result")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
if err := retriesExecute(shortRetriesCount, entriesCheckFunc); err != nil {
|
||||
t.Errorf("%v", err)
|
||||
return
|
||||
}
|
||||
|
||||
fetchCmdArgs := getDefaultFetchCommandArgs()
|
||||
fetchCmd := exec.Command(cliPath, fetchCmdArgs...)
|
||||
t.Logf("running command: %v", fetchCmd.String())
|
||||
|
||||
if err := fetchCmd.Start(); err != nil {
|
||||
t.Errorf("failed to start fetch command, err: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
harCheckFunc := func() error {
|
||||
harBytes, readFileErr := ioutil.ReadFile("./unknown_source.har")
|
||||
if readFileErr != nil {
|
||||
return fmt.Errorf("failed to read har file, err: %v", readFileErr)
|
||||
}
|
||||
|
||||
harEntries, err := getEntriesFromHarBytes(harBytes)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get entries from har, err: %v", err)
|
||||
}
|
||||
|
||||
if len(harEntries) == 0 {
|
||||
return fmt.Errorf("unexpected har entries result - Expected more than 0 entries")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
if err := retriesExecute(shortRetriesCount, harCheckFunc); err != nil {
|
||||
t.Errorf("%v", err)
|
||||
return
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestTapGuiPort(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("ignored acceptance test")
|
||||
}
|
||||
|
||||
tests := []uint16{8898}
|
||||
|
||||
for _, guiPort := range tests {
|
||||
t.Run(fmt.Sprintf("%d", guiPort), func(t *testing.T) {
|
||||
cliPath, cliPathErr := getCliPath()
|
||||
if cliPathErr != nil {
|
||||
t.Errorf("failed to get cli path, err: %v", cliPathErr)
|
||||
return
|
||||
}
|
||||
|
||||
tapCmdArgs := getDefaultTapCommandArgs()
|
||||
|
||||
tapNamespace := getDefaultTapNamespace()
|
||||
tapCmdArgs = append(tapCmdArgs, tapNamespace...)
|
||||
|
||||
tapCmdArgs = append(tapCmdArgs, "-p", fmt.Sprintf("%d", guiPort))
|
||||
|
||||
tapCmd := exec.Command(cliPath, tapCmdArgs...)
|
||||
t.Logf("running command: %v", tapCmd.String())
|
||||
|
||||
t.Cleanup(func() {
|
||||
if err := cleanupCommand(tapCmd); err != nil {
|
||||
t.Logf("failed to cleanup tap command, err: %v", err)
|
||||
}
|
||||
})
|
||||
|
||||
if err := tapCmd.Start(); err != nil {
|
||||
t.Errorf("failed to start tap command, err: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
apiServerUrl := getApiServerUrl(guiPort)
|
||||
|
||||
if err := waitTapPodsReady(apiServerUrl); err != nil {
|
||||
t.Errorf("failed to start tap pods on time, err: %v", err)
|
||||
return
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestTapAllNamespaces(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("ignored acceptance test")
|
||||
}
|
||||
|
||||
expectedPods := []struct{
|
||||
Name string
|
||||
Namespace string
|
||||
}{
|
||||
{Name: "httpbin", Namespace: "mizu-tests"},
|
||||
{Name: "httpbin", Namespace: "mizu-tests2"},
|
||||
}
|
||||
|
||||
cliPath, cliPathErr := getCliPath()
|
||||
if cliPathErr != nil {
|
||||
t.Errorf("failed to get cli path, err: %v", cliPathErr)
|
||||
return
|
||||
}
|
||||
|
||||
tapCmdArgs := getDefaultTapCommandArgs()
|
||||
tapCmdArgs = append(tapCmdArgs, "-A")
|
||||
|
||||
tapCmd := exec.Command(cliPath, tapCmdArgs...)
|
||||
t.Logf("running command: %v", tapCmd.String())
|
||||
|
||||
t.Cleanup(func() {
|
||||
if err := cleanupCommand(tapCmd); err != nil {
|
||||
t.Logf("failed to cleanup tap command, err: %v", err)
|
||||
}
|
||||
})
|
||||
|
||||
if err := tapCmd.Start(); err != nil {
|
||||
t.Errorf("failed to start tap command, err: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
apiServerUrl := getApiServerUrl(defaultApiServerPort)
|
||||
|
||||
if err := waitTapPodsReady(apiServerUrl); err != nil {
|
||||
t.Errorf("failed to start tap pods on time, err: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
podsUrl := fmt.Sprintf("%v/api/tapStatus", apiServerUrl)
|
||||
requestResult, requestErr := executeHttpGetRequest(podsUrl)
|
||||
if requestErr != nil {
|
||||
t.Errorf("failed to get tap status, err: %v", requestErr)
|
||||
return
|
||||
}
|
||||
|
||||
pods, err := getPods(requestResult)
|
||||
if err != nil {
|
||||
t.Errorf("failed to get pods, err: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
for _, expectedPod := range expectedPods {
|
||||
podFound := false
|
||||
|
||||
for _, pod := range pods {
|
||||
podNamespace := pod["namespace"].(string)
|
||||
podName := pod["name"].(string)
|
||||
|
||||
if expectedPod.Namespace == podNamespace && strings.Contains(podName, expectedPod.Name) {
|
||||
podFound = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !podFound {
|
||||
t.Errorf("unexpected result - expected pod not found, pod namespace: %v, pod name: %v", expectedPod.Namespace, expectedPod.Name)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestTapMultipleNamespaces(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("ignored acceptance test")
|
||||
}
|
||||
|
||||
expectedPods := []struct{
|
||||
Name string
|
||||
Namespace string
|
||||
}{
|
||||
{Name: "httpbin", Namespace: "mizu-tests"},
|
||||
{Name: "httpbin2", Namespace: "mizu-tests"},
|
||||
{Name: "httpbin", Namespace: "mizu-tests2"},
|
||||
}
|
||||
|
||||
cliPath, cliPathErr := getCliPath()
|
||||
if cliPathErr != nil {
|
||||
t.Errorf("failed to get cli path, err: %v", cliPathErr)
|
||||
return
|
||||
}
|
||||
|
||||
tapCmdArgs := getDefaultTapCommandArgs()
|
||||
var namespacesCmd []string
|
||||
for _, expectedPod := range expectedPods {
|
||||
namespacesCmd = append(namespacesCmd, "-n", expectedPod.Namespace)
|
||||
}
|
||||
tapCmdArgs = append(tapCmdArgs, namespacesCmd...)
|
||||
|
||||
tapCmd := exec.Command(cliPath, tapCmdArgs...)
|
||||
t.Logf("running command: %v", tapCmd.String())
|
||||
|
||||
t.Cleanup(func() {
|
||||
if err := cleanupCommand(tapCmd); err != nil {
|
||||
t.Logf("failed to cleanup tap command, err: %v", err)
|
||||
}
|
||||
})
|
||||
|
||||
if err := tapCmd.Start(); err != nil {
|
||||
t.Errorf("failed to start tap command, err: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
apiServerUrl := getApiServerUrl(defaultApiServerPort)
|
||||
|
||||
if err := waitTapPodsReady(apiServerUrl); err != nil {
|
||||
t.Errorf("failed to start tap pods on time, err: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
podsUrl := fmt.Sprintf("%v/api/tapStatus", apiServerUrl)
|
||||
requestResult, requestErr := executeHttpGetRequest(podsUrl)
|
||||
if requestErr != nil {
|
||||
t.Errorf("failed to get tap status, err: %v", requestErr)
|
||||
return
|
||||
}
|
||||
|
||||
pods, err := getPods(requestResult)
|
||||
if err != nil {
|
||||
t.Errorf("failed to get pods, err: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
if len(expectedPods) != len(pods) {
|
||||
t.Errorf("unexpected result - expected pods length: %v, actual pods length: %v", len(expectedPods), len(pods))
|
||||
return
|
||||
}
|
||||
|
||||
for _, expectedPod := range expectedPods {
|
||||
podFound := false
|
||||
|
||||
for _, pod := range pods {
|
||||
podNamespace := pod["namespace"].(string)
|
||||
podName := pod["name"].(string)
|
||||
|
||||
if expectedPod.Namespace == podNamespace && strings.Contains(podName, expectedPod.Name) {
|
||||
podFound = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !podFound {
|
||||
t.Errorf("unexpected result - expected pod not found, pod namespace: %v, pod name: %v", expectedPod.Namespace, expectedPod.Name)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestTapRegex(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("ignored acceptance test")
|
||||
}
|
||||
|
||||
regexPodName := "httpbin2"
|
||||
expectedPods := []struct{
|
||||
Name string
|
||||
Namespace string
|
||||
}{
|
||||
{Name: regexPodName, Namespace: "mizu-tests"},
|
||||
}
|
||||
|
||||
cliPath, cliPathErr := getCliPath()
|
||||
if cliPathErr != nil {
|
||||
t.Errorf("failed to get cli path, err: %v", cliPathErr)
|
||||
return
|
||||
}
|
||||
|
||||
tapCmdArgs := getDefaultTapCommandArgsWithRegex(regexPodName)
|
||||
|
||||
tapNamespace := getDefaultTapNamespace()
|
||||
tapCmdArgs = append(tapCmdArgs, tapNamespace...)
|
||||
|
||||
tapCmd := exec.Command(cliPath, tapCmdArgs...)
|
||||
t.Logf("running command: %v", tapCmd.String())
|
||||
|
||||
t.Cleanup(func() {
|
||||
if err := cleanupCommand(tapCmd); err != nil {
|
||||
t.Logf("failed to cleanup tap command, err: %v", err)
|
||||
}
|
||||
})
|
||||
|
||||
if err := tapCmd.Start(); err != nil {
|
||||
t.Errorf("failed to start tap command, err: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
apiServerUrl := getApiServerUrl(defaultApiServerPort)
|
||||
|
||||
if err := waitTapPodsReady(apiServerUrl); err != nil {
|
||||
t.Errorf("failed to start tap pods on time, err: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
podsUrl := fmt.Sprintf("%v/api/tapStatus", apiServerUrl)
|
||||
requestResult, requestErr := executeHttpGetRequest(podsUrl)
|
||||
if requestErr != nil {
|
||||
t.Errorf("failed to get tap status, err: %v", requestErr)
|
||||
return
|
||||
}
|
||||
|
||||
pods, err := getPods(requestResult)
|
||||
if err != nil {
|
||||
t.Errorf("failed to get pods, err: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
if len(expectedPods) != len(pods) {
|
||||
t.Errorf("unexpected result - expected pods length: %v, actual pods length: %v", len(expectedPods), len(pods))
|
||||
return
|
||||
}
|
||||
|
||||
for _, expectedPod := range expectedPods {
|
||||
podFound := false
|
||||
|
||||
for _, pod := range pods {
|
||||
podNamespace := pod["namespace"].(string)
|
||||
podName := pod["name"].(string)
|
||||
|
||||
if expectedPod.Namespace == podNamespace && strings.Contains(podName, expectedPod.Name) {
|
||||
podFound = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !podFound {
|
||||
t.Errorf("unexpected result - expected pod not found, pod namespace: %v, pod name: %v", expectedPod.Namespace, expectedPod.Name)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestTapDryRun(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("ignored acceptance test")
|
||||
}
|
||||
|
||||
cliPath, cliPathErr := getCliPath()
|
||||
if cliPathErr != nil {
|
||||
t.Errorf("failed to get cli path, err: %v", cliPathErr)
|
||||
return
|
||||
}
|
||||
|
||||
tapCmdArgs := getDefaultTapCommandArgs()
|
||||
|
||||
tapNamespace := getDefaultTapNamespace()
|
||||
tapCmdArgs = append(tapCmdArgs, tapNamespace...)
|
||||
|
||||
tapCmdArgs = append(tapCmdArgs, "--dry-run")
|
||||
|
||||
tapCmd := exec.Command(cliPath, tapCmdArgs...)
|
||||
t.Logf("running command: %v", tapCmd.String())
|
||||
|
||||
if err := tapCmd.Start(); err != nil {
|
||||
t.Errorf("failed to start tap command, err: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
resultChannel := make(chan string, 1)
|
||||
|
||||
go func() {
|
||||
if err := tapCmd.Wait(); err != nil {
|
||||
resultChannel <- "fail"
|
||||
return
|
||||
}
|
||||
resultChannel <- "success"
|
||||
}()
|
||||
|
||||
go func() {
|
||||
time.Sleep(shortRetriesCount * time.Second)
|
||||
resultChannel <- "fail"
|
||||
}()
|
||||
|
||||
testResult := <- resultChannel
|
||||
if testResult != "success" {
|
||||
t.Errorf("unexpected result - dry run cmd not done")
|
||||
}
|
||||
}
|
||||
|
||||
func TestTapRedact(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("ignored acceptance test")
|
||||
}
|
||||
|
||||
cliPath, cliPathErr := getCliPath()
|
||||
if cliPathErr != nil {
|
||||
t.Errorf("failed to get cli path, err: %v", cliPathErr)
|
||||
return
|
||||
}
|
||||
|
||||
tapCmdArgs := getDefaultTapCommandArgs()
|
||||
|
||||
tapNamespace := getDefaultTapNamespace()
|
||||
tapCmdArgs = append(tapCmdArgs, tapNamespace...)
|
||||
|
||||
tapCmd := exec.Command(cliPath, tapCmdArgs...)
|
||||
t.Logf("running command: %v", tapCmd.String())
|
||||
|
||||
t.Cleanup(func() {
|
||||
if err := cleanupCommand(tapCmd); err != nil {
|
||||
t.Logf("failed to cleanup tap command, err: %v", err)
|
||||
}
|
||||
})
|
||||
|
||||
if err := tapCmd.Start(); err != nil {
|
||||
t.Errorf("failed to start tap command, err: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
apiServerUrl := getApiServerUrl(defaultApiServerPort)
|
||||
|
||||
if err := waitTapPodsReady(apiServerUrl); err != nil {
|
||||
t.Errorf("failed to start tap pods on time, err: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
proxyUrl := getProxyUrl(defaultNamespaceName, defaultServiceName)
|
||||
requestBody := map[string]string{"User": "Mizu"}
|
||||
for i := 0; i < defaultEntriesCount; i++ {
|
||||
if _, requestErr := executeHttpPostRequest(fmt.Sprintf("%v/post", proxyUrl), requestBody); requestErr != nil {
|
||||
t.Errorf("failed to send proxy request, err: %v", requestErr)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
redactCheckFunc := func() error {
|
||||
timestamp := time.Now().UnixNano() / int64(time.Millisecond)
|
||||
|
||||
entriesUrl := fmt.Sprintf("%v/api/entries?limit=%v&operator=lt×tamp=%v", apiServerUrl, defaultEntriesCount, timestamp)
|
||||
requestResult, requestErr := executeHttpGetRequest(entriesUrl)
|
||||
if requestErr != nil {
|
||||
return fmt.Errorf("failed to get entries, err: %v", requestErr)
|
||||
}
|
||||
|
||||
entries := requestResult.([]interface{})
|
||||
if len(entries) == 0 {
|
||||
return fmt.Errorf("unexpected entries result - Expected more than 0 entries")
|
||||
}
|
||||
|
||||
firstEntry := entries[0].(map[string]interface{})
|
||||
|
||||
entryUrl := fmt.Sprintf("%v/api/entries/%v", apiServerUrl, firstEntry["id"])
|
||||
requestResult, requestErr = executeHttpGetRequest(entryUrl)
|
||||
if requestErr != nil {
|
||||
return fmt.Errorf("failed to get entry, err: %v", requestErr)
|
||||
}
|
||||
|
||||
entry := requestResult.(map[string]interface{})["entry"].(map[string]interface{})
|
||||
entryRequest := entry["request"].(map[string]interface{})
|
||||
|
||||
headers := entryRequest["headers"].([]interface{})
|
||||
for _, headerInterface := range headers {
|
||||
header := headerInterface.(map[string]interface{})
|
||||
if header["name"].(string) != "User-Agent" {
|
||||
continue
|
||||
}
|
||||
|
||||
userAgent := header["value"].(string)
|
||||
if userAgent != "[REDACTED]" {
|
||||
return fmt.Errorf("unexpected result - user agent is not redacted")
|
||||
}
|
||||
}
|
||||
|
||||
data := entryRequest["postData"].(map[string]interface{})
|
||||
textDataStr := data["text"].(string)
|
||||
|
||||
var textData map[string]string
|
||||
if parseErr := json.Unmarshal([]byte(textDataStr), &textData); parseErr != nil {
|
||||
return fmt.Errorf("failed to parse text data, err: %v", parseErr)
|
||||
}
|
||||
|
||||
if textData["User"] != "[REDACTED]" {
|
||||
return fmt.Errorf("unexpected result - user in body is not redacted")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
if err := retriesExecute(shortRetriesCount, redactCheckFunc); err != nil {
|
||||
t.Errorf("%v", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func TestTapNoRedact(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("ignored acceptance test")
|
||||
}
|
||||
|
||||
cliPath, cliPathErr := getCliPath()
|
||||
if cliPathErr != nil {
|
||||
t.Errorf("failed to get cli path, err: %v", cliPathErr)
|
||||
return
|
||||
}
|
||||
|
||||
tapCmdArgs := getDefaultTapCommandArgs()
|
||||
|
||||
tapNamespace := getDefaultTapNamespace()
|
||||
tapCmdArgs = append(tapCmdArgs, tapNamespace...)
|
||||
|
||||
tapCmdArgs = append(tapCmdArgs, "--no-redact")
|
||||
|
||||
tapCmd := exec.Command(cliPath, tapCmdArgs...)
|
||||
t.Logf("running command: %v", tapCmd.String())
|
||||
|
||||
t.Cleanup(func() {
|
||||
if err := cleanupCommand(tapCmd); err != nil {
|
||||
t.Logf("failed to cleanup tap command, err: %v", err)
|
||||
}
|
||||
})
|
||||
|
||||
if err := tapCmd.Start(); err != nil {
|
||||
t.Errorf("failed to start tap command, err: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
apiServerUrl := getApiServerUrl(defaultApiServerPort)
|
||||
|
||||
if err := waitTapPodsReady(apiServerUrl); err != nil {
|
||||
t.Errorf("failed to start tap pods on time, err: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
proxyUrl := getProxyUrl(defaultNamespaceName, defaultServiceName)
|
||||
requestBody := map[string]string{"User": "Mizu"}
|
||||
for i := 0; i < defaultEntriesCount; i++ {
|
||||
if _, requestErr := executeHttpPostRequest(fmt.Sprintf("%v/post", proxyUrl), requestBody); requestErr != nil {
|
||||
t.Errorf("failed to send proxy request, err: %v", requestErr)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
redactCheckFunc := func() error {
|
||||
timestamp := time.Now().UnixNano() / int64(time.Millisecond)
|
||||
|
||||
entriesUrl := fmt.Sprintf("%v/api/entries?limit=%v&operator=lt×tamp=%v", apiServerUrl, defaultEntriesCount, timestamp)
|
||||
requestResult, requestErr := executeHttpGetRequest(entriesUrl)
|
||||
if requestErr != nil {
|
||||
return fmt.Errorf("failed to get entries, err: %v", requestErr)
|
||||
}
|
||||
|
||||
entries := requestResult.([]interface{})
|
||||
if len(entries) == 0 {
|
||||
return fmt.Errorf("unexpected entries result - Expected more than 0 entries")
|
||||
}
|
||||
|
||||
firstEntry := entries[0].(map[string]interface{})
|
||||
|
||||
entryUrl := fmt.Sprintf("%v/api/entries/%v", apiServerUrl, firstEntry["id"])
|
||||
requestResult, requestErr = executeHttpGetRequest(entryUrl)
|
||||
if requestErr != nil {
|
||||
return fmt.Errorf("failed to get entry, err: %v", requestErr)
|
||||
}
|
||||
|
||||
entry := requestResult.(map[string]interface{})["entry"].(map[string]interface{})
|
||||
entryRequest := entry["request"].(map[string]interface{})
|
||||
|
||||
headers := entryRequest["headers"].([]interface{})
|
||||
for _, headerInterface := range headers {
|
||||
header := headerInterface.(map[string]interface{})
|
||||
if header["name"].(string) != "User-Agent" {
|
||||
continue
|
||||
}
|
||||
|
||||
userAgent := header["value"].(string)
|
||||
if userAgent == "[REDACTED]" {
|
||||
return fmt.Errorf("unexpected result - user agent is redacted")
|
||||
}
|
||||
}
|
||||
|
||||
data := entryRequest["postData"].(map[string]interface{})
|
||||
textDataStr := data["text"].(string)
|
||||
|
||||
var textData map[string]string
|
||||
if parseErr := json.Unmarshal([]byte(textDataStr), &textData); parseErr != nil {
|
||||
return fmt.Errorf("failed to parse text data, err: %v", parseErr)
|
||||
}
|
||||
|
||||
if textData["User"] == "[REDACTED]" {
|
||||
return fmt.Errorf("unexpected result - user in body is redacted")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
if err := retriesExecute(shortRetriesCount, redactCheckFunc); err != nil {
|
||||
t.Errorf("%v", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func TestTapRegexMasking(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("ignored acceptance test")
|
||||
}
|
||||
|
||||
cliPath, cliPathErr := getCliPath()
|
||||
if cliPathErr != nil {
|
||||
t.Errorf("failed to get cli path, err: %v", cliPathErr)
|
||||
return
|
||||
}
|
||||
|
||||
tapCmdArgs := getDefaultTapCommandArgs()
|
||||
|
||||
tapNamespace := getDefaultTapNamespace()
|
||||
tapCmdArgs = append(tapCmdArgs, tapNamespace...)
|
||||
|
||||
tapCmdArgs = append(tapCmdArgs, "-r", "Mizu")
|
||||
|
||||
tapCmd := exec.Command(cliPath, tapCmdArgs...)
|
||||
t.Logf("running command: %v", tapCmd.String())
|
||||
|
||||
t.Cleanup(func() {
|
||||
if err := cleanupCommand(tapCmd); err != nil {
|
||||
t.Logf("failed to cleanup tap command, err: %v", err)
|
||||
}
|
||||
})
|
||||
|
||||
if err := tapCmd.Start(); err != nil {
|
||||
t.Errorf("failed to start tap command, err: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
apiServerUrl := getApiServerUrl(defaultApiServerPort)
|
||||
|
||||
if err := waitTapPodsReady(apiServerUrl); err != nil {
|
||||
t.Errorf("failed to start tap pods on time, err: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
proxyUrl := getProxyUrl(defaultNamespaceName, defaultServiceName)
|
||||
for i := 0; i < defaultEntriesCount; i++ {
|
||||
response, requestErr := http.Post(fmt.Sprintf("%v/post", proxyUrl), "text/plain", bytes.NewBufferString("Mizu"))
|
||||
if _, requestErr = executeHttpRequest(response, requestErr); requestErr != nil {
|
||||
t.Errorf("failed to send proxy request, err: %v", requestErr)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
redactCheckFunc := func() error {
|
||||
timestamp := time.Now().UnixNano() / int64(time.Millisecond)
|
||||
|
||||
entriesUrl := fmt.Sprintf("%v/api/entries?limit=%v&operator=lt×tamp=%v", apiServerUrl, defaultEntriesCount, timestamp)
|
||||
requestResult, requestErr := executeHttpGetRequest(entriesUrl)
|
||||
if requestErr != nil {
|
||||
return fmt.Errorf("failed to get entries, err: %v", requestErr)
|
||||
}
|
||||
|
||||
entries := requestResult.([]interface{})
|
||||
if len(entries) == 0 {
|
||||
return fmt.Errorf("unexpected entries result - Expected more than 0 entries")
|
||||
}
|
||||
|
||||
firstEntry := entries[0].(map[string]interface{})
|
||||
|
||||
entryUrl := fmt.Sprintf("%v/api/entries/%v", apiServerUrl, firstEntry["id"])
|
||||
requestResult, requestErr = executeHttpGetRequest(entryUrl)
|
||||
if requestErr != nil {
|
||||
return fmt.Errorf("failed to get entry, err: %v", requestErr)
|
||||
}
|
||||
|
||||
entry := requestResult.(map[string]interface{})["entry"].(map[string]interface{})
|
||||
entryRequest := entry["request"].(map[string]interface{})
|
||||
|
||||
data := entryRequest["postData"].(map[string]interface{})
|
||||
textData := data["text"].(string)
|
||||
|
||||
if textData != "[REDACTED]" {
|
||||
return fmt.Errorf("unexpected result - body is not redacted")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
if err := retriesExecute(shortRetriesCount, redactCheckFunc); err != nil {
|
||||
t.Errorf("%v", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
@@ -1,205 +0,0 @@
|
||||
package acceptanceTests
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path"
|
||||
"syscall"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
longRetriesCount = 100
|
||||
shortRetriesCount = 10
|
||||
defaultApiServerPort = 8899
|
||||
defaultNamespaceName = "mizu-tests"
|
||||
defaultServiceName = "httpbin"
|
||||
defaultEntriesCount = 50
|
||||
)
|
||||
|
||||
func getCliPath() (string, error) {
|
||||
dir, filePathErr := os.Getwd()
|
||||
if filePathErr != nil {
|
||||
return "", filePathErr
|
||||
}
|
||||
|
||||
cliPath := path.Join(dir, "../cli/bin/mizu_ci")
|
||||
return cliPath, nil
|
||||
}
|
||||
|
||||
func getConfigPath() (string, error) {
|
||||
home, homeDirErr := os.UserHomeDir()
|
||||
if homeDirErr != nil {
|
||||
return "", homeDirErr
|
||||
}
|
||||
|
||||
return path.Join(home, ".mizu", "config.yaml"), nil
|
||||
}
|
||||
|
||||
func getProxyUrl(namespace string, service string) string {
|
||||
return fmt.Sprintf("http://localhost:8080/api/v1/namespaces/%v/services/%v/proxy", namespace, service)
|
||||
}
|
||||
|
||||
func getApiServerUrl(port uint16) string {
|
||||
return fmt.Sprintf("http://localhost:%v/mizu", port)
|
||||
}
|
||||
|
||||
func getDefaultCommandArgs() []string {
|
||||
setFlag := "--set"
|
||||
telemetry := "telemetry=false"
|
||||
agentImage := "agent-image=gcr.io/up9-docker-hub/mizu/ci:0.0.0"
|
||||
imagePullPolicy := "image-pull-policy=Never"
|
||||
|
||||
return []string{setFlag, telemetry, setFlag, agentImage, setFlag, imagePullPolicy}
|
||||
}
|
||||
|
||||
func getDefaultTapCommandArgs() []string {
|
||||
tapCommand := "tap"
|
||||
defaultCmdArgs := getDefaultCommandArgs()
|
||||
|
||||
return append([]string{tapCommand}, defaultCmdArgs...)
|
||||
}
|
||||
|
||||
func getDefaultTapCommandArgsWithRegex(regex string) []string {
|
||||
tapCommand := "tap"
|
||||
defaultCmdArgs := getDefaultCommandArgs()
|
||||
|
||||
return append([]string{tapCommand, regex}, defaultCmdArgs...)
|
||||
}
|
||||
|
||||
func getDefaultTapNamespace() []string {
|
||||
return []string{"-n", "mizu-tests"}
|
||||
}
|
||||
|
||||
func getDefaultFetchCommandArgs() []string {
|
||||
fetchCommand := "fetch"
|
||||
defaultCmdArgs := getDefaultCommandArgs()
|
||||
|
||||
return append([]string{fetchCommand}, defaultCmdArgs...)
|
||||
}
|
||||
|
||||
func getDefaultConfigCommandArgs() []string {
|
||||
configCommand := "config"
|
||||
defaultCmdArgs := getDefaultCommandArgs()
|
||||
|
||||
return append([]string{configCommand}, defaultCmdArgs...)
|
||||
}
|
||||
|
||||
func retriesExecute(retriesCount int, executeFunc func() error) error {
|
||||
var lastError error
|
||||
|
||||
for i := 0; i < retriesCount; i++ {
|
||||
if err := executeFunc(); err != nil {
|
||||
lastError = err
|
||||
|
||||
time.Sleep(1 * time.Second)
|
||||
continue
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
return fmt.Errorf("reached max retries count, retries count: %v, last err: %v", retriesCount, lastError)
|
||||
}
|
||||
|
||||
func waitTapPodsReady(apiServerUrl string) error {
|
||||
resolvingUrl := fmt.Sprintf("%v/status/tappersCount", apiServerUrl)
|
||||
tapPodsReadyFunc := func() error {
|
||||
requestResult, requestErr := executeHttpGetRequest(resolvingUrl)
|
||||
if requestErr != nil {
|
||||
return requestErr
|
||||
}
|
||||
|
||||
tappersCount := requestResult.(float64)
|
||||
if tappersCount == 0 {
|
||||
return fmt.Errorf("no tappers running")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
return retriesExecute(longRetriesCount, tapPodsReadyFunc)
|
||||
}
|
||||
|
||||
func jsonBytesToInterface(jsonBytes []byte) (interface{}, error) {
|
||||
var result interface{}
|
||||
if parseErr := json.Unmarshal(jsonBytes, &result); parseErr != nil {
|
||||
return nil, parseErr
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func executeHttpRequest(response *http.Response, requestErr error) (interface{}, error) {
|
||||
if requestErr != nil {
|
||||
return nil, requestErr
|
||||
} else if response.StatusCode != 200 {
|
||||
return nil, fmt.Errorf("invalid status code %v", response.StatusCode)
|
||||
}
|
||||
|
||||
defer func() { response.Body.Close() }()
|
||||
|
||||
data, readErr := ioutil.ReadAll(response.Body)
|
||||
if readErr != nil {
|
||||
return nil, readErr
|
||||
}
|
||||
|
||||
return jsonBytesToInterface(data)
|
||||
}
|
||||
|
||||
func executeHttpGetRequest(url string) (interface{}, error) {
|
||||
response, requestErr := http.Get(url)
|
||||
return executeHttpRequest(response, requestErr)
|
||||
}
|
||||
|
||||
func executeHttpPostRequest(url string, body interface{}) (interface{}, error) {
|
||||
requestBody, jsonErr := json.Marshal(body)
|
||||
if jsonErr != nil {
|
||||
return nil, jsonErr
|
||||
}
|
||||
|
||||
response, requestErr := http.Post(url, "application/json", bytes.NewBuffer(requestBody))
|
||||
return executeHttpRequest(response, requestErr)
|
||||
}
|
||||
|
||||
func cleanupCommand(cmd *exec.Cmd) error {
|
||||
if err := cmd.Process.Signal(syscall.SIGQUIT); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := cmd.Wait(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func getEntriesFromHarBytes(harBytes []byte) ([]interface{}, error) {
|
||||
harInterface, convertErr := jsonBytesToInterface(harBytes)
|
||||
if convertErr != nil {
|
||||
return nil, convertErr
|
||||
}
|
||||
|
||||
har := harInterface.(map[string]interface{})
|
||||
harLog := har["log"].(map[string]interface{})
|
||||
harEntries := harLog["entries"].([]interface{})
|
||||
|
||||
return harEntries, nil
|
||||
}
|
||||
|
||||
func getPods(tapStatusInterface interface{}) ([]map[string]interface{}, error) {
|
||||
tapStatus := tapStatusInterface.(map[string]interface{})
|
||||
podsInterface := tapStatus["pods"].([]interface{})
|
||||
|
||||
var pods []map[string]interface{}
|
||||
for _, podInterface := range podsInterface {
|
||||
pods = append(pods, podInterface.(map[string]interface{}))
|
||||
}
|
||||
|
||||
return pods, nil
|
||||
}
|
||||
@@ -1,2 +0,0 @@
|
||||
test: ## Run agent tests.
|
||||
@go test ./... -coverpkg=./... -race -coverprofile=coverage.out -covermode=atomic
|
||||
246
agent/main.go
246
agent/main.go
@@ -1,246 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"flag"
|
||||
"fmt"
|
||||
"github.com/gin-contrib/static"
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/gorilla/websocket"
|
||||
"github.com/romana/rlog"
|
||||
"github.com/up9inc/mizu/shared"
|
||||
"github.com/up9inc/mizu/tap"
|
||||
"mizuserver/pkg/api"
|
||||
"mizuserver/pkg/models"
|
||||
"mizuserver/pkg/routes"
|
||||
"mizuserver/pkg/sensitiveDataFiltering"
|
||||
"mizuserver/pkg/utils"
|
||||
"net/http"
|
||||
"os"
|
||||
"os/signal"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var tapperMode = flag.Bool("tap", false, "Run in tapper mode without API")
|
||||
var apiServerMode = flag.Bool("api-server", false, "Run in API server mode with API")
|
||||
var standaloneMode = flag.Bool("standalone", false, "Run in standalone tapper and API mode")
|
||||
var apiServerAddress = flag.String("api-server-address", "", "Address of mizu API server")
|
||||
var namespace = flag.String("namespace", "", "Resolve IPs if they belong to resources in this namespace (default is all)")
|
||||
var harsReaderMode = flag.Bool("hars-read", false, "Run in hars-read mode")
|
||||
var harsDir = flag.String("hars-dir", "", "Directory to read hars from")
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
hostMode := os.Getenv(shared.HostModeEnvVar) == "1"
|
||||
tapOpts := &tap.TapOpts{HostMode: hostMode}
|
||||
|
||||
|
||||
if !*tapperMode && !*apiServerMode && !*standaloneMode && !*harsReaderMode{
|
||||
panic("One of the flags --tap, --api or --standalone or --hars-read must be provided")
|
||||
}
|
||||
|
||||
if *standaloneMode {
|
||||
api.StartResolving(*namespace)
|
||||
|
||||
harOutputChannel, outboundLinkOutputChannel := tap.StartPassiveTapper(tapOpts)
|
||||
filteredHarChannel := make(chan *tap.OutputChannelItem)
|
||||
|
||||
go filterHarItems(harOutputChannel, filteredHarChannel, getTrafficFilteringOptions())
|
||||
go api.StartReadingEntries(filteredHarChannel, nil)
|
||||
go api.StartReadingOutbound(outboundLinkOutputChannel)
|
||||
|
||||
hostApi(nil)
|
||||
} else if *tapperMode {
|
||||
if *apiServerAddress == "" {
|
||||
panic("API server address must be provided with --api-server-address when using --tap")
|
||||
}
|
||||
|
||||
tapTargets := getTapTargets()
|
||||
if tapTargets != nil {
|
||||
tap.SetFilterAuthorities(tapTargets)
|
||||
rlog.Infof("Filtering for the following authorities: %v", tap.GetFilterIPs())
|
||||
}
|
||||
|
||||
harOutputChannel, outboundLinkOutputChannel := tap.StartPassiveTapper(tapOpts)
|
||||
|
||||
socketConnection, err := shared.ConnectToSocketServer(*apiServerAddress, shared.DEFAULT_SOCKET_RETRIES, shared.DEFAULT_SOCKET_RETRY_SLEEP_TIME, false)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("Error connecting to socket server at %s %v", *apiServerAddress, err))
|
||||
}
|
||||
|
||||
go pipeTapChannelToSocket(socketConnection, harOutputChannel)
|
||||
go pipeOutboundLinksChannelToSocket(socketConnection, outboundLinkOutputChannel)
|
||||
} else if *apiServerMode {
|
||||
api.StartResolving(*namespace)
|
||||
|
||||
socketHarOutChannel := make(chan *tap.OutputChannelItem, 1000)
|
||||
filteredHarChannel := make(chan *tap.OutputChannelItem)
|
||||
|
||||
go filterHarItems(socketHarOutChannel, filteredHarChannel, getTrafficFilteringOptions())
|
||||
go api.StartReadingEntries(filteredHarChannel, nil)
|
||||
|
||||
hostApi(socketHarOutChannel)
|
||||
} else if *harsReaderMode {
|
||||
socketHarOutChannel := make(chan *tap.OutputChannelItem, 1000)
|
||||
filteredHarChannel := make(chan *tap.OutputChannelItem)
|
||||
|
||||
go filterHarItems(socketHarOutChannel, filteredHarChannel, getTrafficFilteringOptions())
|
||||
go api.StartReadingEntries(filteredHarChannel, harsDir)
|
||||
hostApi(nil)
|
||||
}
|
||||
|
||||
signalChan := make(chan os.Signal, 1)
|
||||
signal.Notify(signalChan, os.Interrupt)
|
||||
<-signalChan
|
||||
|
||||
rlog.Info("Exiting")
|
||||
}
|
||||
|
||||
func hostApi(socketHarOutputChannel chan<- *tap.OutputChannelItem) {
|
||||
app := gin.Default()
|
||||
|
||||
app.GET("/echo", func(c *gin.Context) {
|
||||
c.String(http.StatusOK, "Here is Mizu agent")
|
||||
})
|
||||
|
||||
eventHandlers := api.RoutesEventHandlers{
|
||||
SocketHarOutChannel: socketHarOutputChannel,
|
||||
}
|
||||
|
||||
app.Use(DisableRootStaticCache())
|
||||
app.Use(static.ServeRoot("/", "./site"))
|
||||
app.Use(CORSMiddleware()) // This has to be called after the static middleware, does not work if its called before
|
||||
|
||||
api.WebSocketRoutes(app, &eventHandlers)
|
||||
routes.EntriesRoutes(app)
|
||||
routes.MetadataRoutes(app)
|
||||
routes.StatusRoutes(app)
|
||||
routes.NotFoundRoute(app)
|
||||
|
||||
utils.StartServer(app)
|
||||
}
|
||||
|
||||
func DisableRootStaticCache() gin.HandlerFunc {
|
||||
return func(c *gin.Context) {
|
||||
if c.Request.RequestURI == "/" {
|
||||
// Disable cache only for the main static route
|
||||
c.Writer.Header().Set("Cache-Control", "no-store")
|
||||
}
|
||||
|
||||
c.Next()
|
||||
}
|
||||
}
|
||||
|
||||
func CORSMiddleware() gin.HandlerFunc {
|
||||
return func(c *gin.Context) {
|
||||
c.Writer.Header().Set("Access-Control-Allow-Origin", "*")
|
||||
c.Writer.Header().Set("Access-Control-Allow-Credentials", "true")
|
||||
c.Writer.Header().Set("Access-Control-Allow-Headers", "Content-Type, Content-Length, Accept-Encoding, X-CSRF-Token, Authorization, accept, origin, Cache-Control, X-Requested-With")
|
||||
c.Writer.Header().Set("Access-Control-Allow-Methods", "POST, OPTIONS, GET, PUT")
|
||||
|
||||
if c.Request.Method == "OPTIONS" {
|
||||
c.AbortWithStatus(204)
|
||||
return
|
||||
}
|
||||
|
||||
c.Next()
|
||||
}
|
||||
}
|
||||
|
||||
func getTapTargets() []string {
|
||||
nodeName := os.Getenv(shared.NodeNameEnvVar)
|
||||
var tappedAddressesPerNodeDict map[string][]string
|
||||
err := json.Unmarshal([]byte(os.Getenv(shared.TappedAddressesPerNodeDictEnvVar)), &tappedAddressesPerNodeDict)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("env var %s's value of %s is invalid! must be map[string][]string %v", shared.TappedAddressesPerNodeDictEnvVar, tappedAddressesPerNodeDict, err))
|
||||
}
|
||||
return tappedAddressesPerNodeDict[nodeName]
|
||||
}
|
||||
|
||||
func getTrafficFilteringOptions() *shared.TrafficFilteringOptions {
|
||||
filteringOptionsJson := os.Getenv(shared.MizuFilteringOptionsEnvVar)
|
||||
if filteringOptionsJson == "" {
|
||||
return nil
|
||||
}
|
||||
var filteringOptions shared.TrafficFilteringOptions
|
||||
err := json.Unmarshal([]byte(filteringOptionsJson), &filteringOptions)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("env var %s's value of %s is invalid! json must match the shared.TrafficFilteringOptions struct %v", shared.MizuFilteringOptionsEnvVar, filteringOptionsJson, err))
|
||||
}
|
||||
|
||||
return &filteringOptions
|
||||
}
|
||||
|
||||
func filterHarItems(inChannel <-chan *tap.OutputChannelItem, outChannel chan *tap.OutputChannelItem, filterOptions *shared.TrafficFilteringOptions) {
|
||||
for message := range inChannel {
|
||||
if message.ConnectionInfo.IsOutgoing && api.CheckIsServiceIP(message.ConnectionInfo.ServerIP) {
|
||||
continue
|
||||
}
|
||||
// TODO: move this to tappers https://up9.atlassian.net/browse/TRA-3441
|
||||
if isHealthCheckByUserAgent(message, filterOptions.HealthChecksUserAgentHeaders) {
|
||||
continue
|
||||
}
|
||||
|
||||
if !filterOptions.DisableRedaction {
|
||||
sensitiveDataFiltering.FilterSensitiveInfoFromHarRequest(message, filterOptions)
|
||||
}
|
||||
|
||||
outChannel <- message
|
||||
}
|
||||
}
|
||||
|
||||
func isHealthCheckByUserAgent(message *tap.OutputChannelItem, userAgentsToIgnore []string) bool {
|
||||
for _, header := range message.HarEntry.Request.Headers {
|
||||
if strings.ToLower(header.Name) == "user-agent" {
|
||||
for _, userAgent := range userAgentsToIgnore {
|
||||
if strings.Contains(strings.ToLower(header.Value), strings.ToLower(userAgent)) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func pipeTapChannelToSocket(connection *websocket.Conn, messageDataChannel <-chan *tap.OutputChannelItem) {
|
||||
if connection == nil {
|
||||
panic("Websocket connection is nil")
|
||||
}
|
||||
|
||||
if messageDataChannel == nil {
|
||||
panic("Channel of captured messages is nil")
|
||||
}
|
||||
|
||||
for messageData := range messageDataChannel {
|
||||
marshaledData, err := models.CreateWebsocketTappedEntryMessage(messageData)
|
||||
if err != nil {
|
||||
rlog.Infof("error converting message to json %s, (%v,%+v)\n", err, err, err)
|
||||
continue
|
||||
}
|
||||
|
||||
err = connection.WriteMessage(websocket.TextMessage, marshaledData)
|
||||
if err != nil {
|
||||
rlog.Infof("error sending message through socket server %s, (%v,%+v)\n", err, err, err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func pipeOutboundLinksChannelToSocket(connection *websocket.Conn, outboundLinkChannel <-chan *tap.OutboundLink) {
|
||||
for outboundLink := range outboundLinkChannel {
|
||||
if outboundLink.SuggestedProtocol == tap.TLSProtocol {
|
||||
marshaledData, err := models.CreateWebsocketOutboundLinkMessage(outboundLink)
|
||||
if err != nil {
|
||||
rlog.Infof("Error converting outbound link to json %s, (%v,%+v)", err, err, err)
|
||||
continue
|
||||
}
|
||||
|
||||
err = connection.WriteMessage(websocket.TextMessage, marshaledData)
|
||||
if err != nil {
|
||||
rlog.Infof("error sending outbound link message through socket server %s, (%v,%+v)", err, err, err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,204 +0,0 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"mizuserver/pkg/holder"
|
||||
"mizuserver/pkg/providers"
|
||||
"net/url"
|
||||
"os"
|
||||
"path"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/google/martian/har"
|
||||
"github.com/romana/rlog"
|
||||
"github.com/up9inc/mizu/tap"
|
||||
"go.mongodb.org/mongo-driver/bson/primitive"
|
||||
|
||||
"mizuserver/pkg/database"
|
||||
"mizuserver/pkg/models"
|
||||
"mizuserver/pkg/resolver"
|
||||
"mizuserver/pkg/utils"
|
||||
)
|
||||
|
||||
var k8sResolver *resolver.Resolver
|
||||
|
||||
func StartResolving(namespace string) {
|
||||
errOut := make(chan error, 100)
|
||||
res, err := resolver.NewFromInCluster(errOut, namespace)
|
||||
if err != nil {
|
||||
rlog.Infof("error creating k8s resolver %s", err)
|
||||
return
|
||||
}
|
||||
ctx := context.Background()
|
||||
res.Start(ctx)
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
case err := <-errOut:
|
||||
rlog.Infof("name resolving error %s", err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
k8sResolver = res
|
||||
holder.SetResolver(res)
|
||||
}
|
||||
|
||||
func StartReadingEntries(harChannel <-chan *tap.OutputChannelItem, workingDir *string) {
|
||||
if workingDir != nil && *workingDir != "" {
|
||||
startReadingFiles(*workingDir)
|
||||
} else {
|
||||
startReadingChannel(harChannel)
|
||||
}
|
||||
}
|
||||
|
||||
func startReadingFiles(workingDir string) {
|
||||
err := os.MkdirAll(workingDir, os.ModePerm)
|
||||
utils.CheckErr(err)
|
||||
|
||||
for true {
|
||||
dir, _ := os.Open(workingDir)
|
||||
dirFiles, _ := dir.Readdir(-1)
|
||||
|
||||
var harFiles []os.FileInfo
|
||||
for _, fileInfo := range dirFiles {
|
||||
if strings.HasSuffix(fileInfo.Name(), ".har") {
|
||||
harFiles = append(harFiles, fileInfo)
|
||||
}
|
||||
}
|
||||
sort.Sort(utils.ByModTime(harFiles))
|
||||
|
||||
if len(harFiles) == 0 {
|
||||
rlog.Infof("Waiting for new files\n")
|
||||
time.Sleep(3 * time.Second)
|
||||
continue
|
||||
}
|
||||
fileInfo := harFiles[0]
|
||||
inputFilePath := path.Join(workingDir, fileInfo.Name())
|
||||
file, err := os.Open(inputFilePath)
|
||||
utils.CheckErr(err)
|
||||
|
||||
var inputHar har.HAR
|
||||
decErr := json.NewDecoder(bufio.NewReader(file)).Decode(&inputHar)
|
||||
utils.CheckErr(decErr)
|
||||
|
||||
for _, entry := range inputHar.Log.Entries {
|
||||
time.Sleep(time.Millisecond * 250)
|
||||
connectionInfo := &tap.ConnectionInfo{
|
||||
ClientIP: fileInfo.Name(),
|
||||
ClientPort: "",
|
||||
ServerIP: "",
|
||||
ServerPort: "",
|
||||
IsOutgoing: false,
|
||||
}
|
||||
saveHarToDb(entry, connectionInfo)
|
||||
}
|
||||
rmErr := os.Remove(inputFilePath)
|
||||
utils.CheckErr(rmErr)
|
||||
}
|
||||
}
|
||||
|
||||
func startReadingChannel(outputItems <-chan *tap.OutputChannelItem) {
|
||||
if outputItems == nil {
|
||||
panic("Channel of captured messages is nil")
|
||||
}
|
||||
|
||||
for item := range outputItems {
|
||||
providers.EntryAdded()
|
||||
saveHarToDb(item.HarEntry, item.ConnectionInfo)
|
||||
}
|
||||
}
|
||||
|
||||
func StartReadingOutbound(outboundLinkChannel <-chan *tap.OutboundLink) {
|
||||
// tcpStreamFactory will block on write to channel. Empty channel to unblock.
|
||||
// TODO: Make write to channel optional.
|
||||
for range outboundLinkChannel {
|
||||
}
|
||||
}
|
||||
|
||||
func saveHarToDb(entry *har.Entry, connectionInfo *tap.ConnectionInfo) {
|
||||
entryBytes, _ := json.Marshal(entry)
|
||||
serviceName, urlPath := getServiceNameFromUrl(entry.Request.URL)
|
||||
entryId := primitive.NewObjectID().Hex()
|
||||
var (
|
||||
resolvedSource string
|
||||
resolvedDestination string
|
||||
)
|
||||
if k8sResolver != nil {
|
||||
unresolvedSource := connectionInfo.ClientIP
|
||||
resolvedSource = k8sResolver.Resolve(unresolvedSource)
|
||||
if resolvedSource == "" {
|
||||
rlog.Debugf("Cannot find resolved name to source: %s\n", unresolvedSource)
|
||||
if os.Getenv("SKIP_NOT_RESOLVED_SOURCE") == "1" {
|
||||
return
|
||||
}
|
||||
}
|
||||
unresolvedDestination := fmt.Sprintf("%s:%s", connectionInfo.ServerIP, connectionInfo.ServerPort)
|
||||
resolvedDestination = k8sResolver.Resolve(unresolvedDestination)
|
||||
if resolvedDestination == "" {
|
||||
rlog.Debugf("Cannot find resolved name to dest: %s\n", unresolvedDestination)
|
||||
if os.Getenv("SKIP_NOT_RESOLVED_DEST") == "1" {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
mizuEntry := models.MizuEntry{
|
||||
EntryId: entryId,
|
||||
Entry: string(entryBytes), // simple way to store it and not convert to bytes
|
||||
Service: serviceName,
|
||||
Url: entry.Request.URL,
|
||||
Path: urlPath,
|
||||
Method: entry.Request.Method,
|
||||
Status: entry.Response.Status,
|
||||
RequestSenderIp: connectionInfo.ClientIP,
|
||||
Timestamp: entry.StartedDateTime.UnixNano() / int64(time.Millisecond),
|
||||
ResolvedSource: resolvedSource,
|
||||
ResolvedDestination: resolvedDestination,
|
||||
IsOutgoing: connectionInfo.IsOutgoing,
|
||||
}
|
||||
mizuEntry.EstimatedSizeBytes = getEstimatedEntrySizeBytes(mizuEntry)
|
||||
database.CreateEntry(&mizuEntry)
|
||||
|
||||
baseEntry := models.BaseEntryDetails{}
|
||||
if err := models.GetEntry(&mizuEntry, &baseEntry); err != nil {
|
||||
return
|
||||
}
|
||||
baseEntry.Rules = models.RunValidationRulesState(*entry, serviceName)
|
||||
baseEntry.Latency = entry.Timings.Receive
|
||||
baseEntryBytes, _ := models.CreateBaseEntryWebSocketMessage(&baseEntry)
|
||||
BroadcastToBrowserClients(baseEntryBytes)
|
||||
}
|
||||
|
||||
func getServiceNameFromUrl(inputUrl string) (string, string) {
|
||||
parsed, err := url.Parse(inputUrl)
|
||||
utils.CheckErr(err)
|
||||
return fmt.Sprintf("%s://%s", parsed.Scheme, parsed.Host), parsed.Path
|
||||
}
|
||||
|
||||
func CheckIsServiceIP(address string) bool {
|
||||
return k8sResolver.CheckIsServiceIP(address)
|
||||
}
|
||||
|
||||
// gives a rough estimate of the size this will take up in the db, good enough for maintaining db size limit accurately
|
||||
func getEstimatedEntrySizeBytes(mizuEntry models.MizuEntry) int {
|
||||
sizeBytes := len(mizuEntry.Entry)
|
||||
sizeBytes += len(mizuEntry.EntryId)
|
||||
sizeBytes += len(mizuEntry.Service)
|
||||
sizeBytes += len(mizuEntry.Url)
|
||||
sizeBytes += len(mizuEntry.Method)
|
||||
sizeBytes += len(mizuEntry.RequestSenderIp)
|
||||
sizeBytes += len(mizuEntry.ResolvedDestination)
|
||||
sizeBytes += len(mizuEntry.ResolvedSource)
|
||||
sizeBytes += 8 // Status bytes (sqlite integer is always 8 bytes)
|
||||
sizeBytes += 8 // Timestamp bytes
|
||||
sizeBytes += 8 // SizeBytes bytes
|
||||
sizeBytes += 1 // IsOutgoing bytes
|
||||
|
||||
return sizeBytes
|
||||
}
|
||||
@@ -1,118 +0,0 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/gorilla/websocket"
|
||||
"github.com/romana/rlog"
|
||||
"github.com/up9inc/mizu/shared/debounce"
|
||||
"net/http"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
type EventHandlers interface {
|
||||
WebSocketConnect(socketId int, isTapper bool)
|
||||
WebSocketDisconnect(socketId int, isTapper bool)
|
||||
WebSocketMessage(socketId int, message []byte)
|
||||
}
|
||||
|
||||
type SocketConnection struct {
|
||||
connection *websocket.Conn
|
||||
lock *sync.Mutex
|
||||
eventHandlers EventHandlers
|
||||
isTapper bool
|
||||
}
|
||||
|
||||
var websocketUpgrader = websocket.Upgrader{
|
||||
ReadBufferSize: 1024,
|
||||
WriteBufferSize: 1024,
|
||||
}
|
||||
|
||||
var websocketIdsLock = sync.Mutex{}
|
||||
var connectedWebsockets map[int]*SocketConnection
|
||||
var connectedWebsocketIdCounter = 0
|
||||
|
||||
func init() {
|
||||
websocketUpgrader.CheckOrigin = func(r *http.Request) bool { return true } // like cors for web socket
|
||||
connectedWebsockets = make(map[int]*SocketConnection, 0)
|
||||
}
|
||||
|
||||
func WebSocketRoutes(app *gin.Engine, eventHandlers EventHandlers) {
|
||||
app.GET("/ws", func(c *gin.Context) {
|
||||
websocketHandler(c.Writer, c.Request, eventHandlers, false)
|
||||
})
|
||||
app.GET("/wsTapper", func(c *gin.Context) {
|
||||
websocketHandler(c.Writer, c.Request, eventHandlers, true)
|
||||
})
|
||||
}
|
||||
|
||||
func websocketHandler(w http.ResponseWriter, r *http.Request, eventHandlers EventHandlers, isTapper bool) {
|
||||
conn, err := websocketUpgrader.Upgrade(w, r, nil)
|
||||
if err != nil {
|
||||
rlog.Errorf("Failed to set websocket upgrade: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
websocketIdsLock.Lock()
|
||||
|
||||
connectedWebsocketIdCounter++
|
||||
socketId := connectedWebsocketIdCounter
|
||||
connectedWebsockets[socketId] = &SocketConnection{connection: conn, lock: &sync.Mutex{}, eventHandlers: eventHandlers, isTapper: isTapper}
|
||||
|
||||
websocketIdsLock.Unlock()
|
||||
|
||||
defer func() {
|
||||
socketCleanup(socketId, connectedWebsockets[socketId])
|
||||
}()
|
||||
|
||||
eventHandlers.WebSocketConnect(socketId, isTapper)
|
||||
|
||||
for {
|
||||
_, msg, err := conn.ReadMessage()
|
||||
if err != nil {
|
||||
rlog.Errorf("Error reading message, socket id: %d, error: %v", socketId, err)
|
||||
break
|
||||
}
|
||||
eventHandlers.WebSocketMessage(socketId, msg)
|
||||
}
|
||||
}
|
||||
|
||||
func socketCleanup(socketId int, socketConnection *SocketConnection) {
|
||||
err := socketConnection.connection.Close()
|
||||
if err != nil {
|
||||
rlog.Errorf("Error closing socket connection for socket id %d: %v\n", socketId, err)
|
||||
}
|
||||
|
||||
websocketIdsLock.Lock()
|
||||
connectedWebsockets[socketId] = nil
|
||||
websocketIdsLock.Unlock()
|
||||
|
||||
socketConnection.eventHandlers.WebSocketDisconnect(socketId, socketConnection.isTapper)
|
||||
}
|
||||
|
||||
var db = debounce.NewDebouncer(time.Second*5, func() {
|
||||
rlog.Error("Successfully sent to socket")
|
||||
})
|
||||
|
||||
func SendToSocket(socketId int, message []byte) error {
|
||||
socketObj := connectedWebsockets[socketId]
|
||||
if socketObj == nil {
|
||||
return errors.New("Socket is disconnected")
|
||||
}
|
||||
|
||||
var sent = false
|
||||
time.AfterFunc(time.Second*5, func() {
|
||||
if !sent {
|
||||
rlog.Error("Socket timed out")
|
||||
socketCleanup(socketId, socketObj)
|
||||
}
|
||||
})
|
||||
|
||||
socketObj.lock.Lock() // gorilla socket panics from concurrent writes to a single socket
|
||||
err := socketObj.connection.WriteMessage(1, message)
|
||||
socketObj.lock.Unlock()
|
||||
|
||||
sent = true
|
||||
return err
|
||||
}
|
||||
@@ -1,132 +0,0 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"mizuserver/pkg/models"
|
||||
"mizuserver/pkg/providers"
|
||||
"mizuserver/pkg/up9"
|
||||
"sync"
|
||||
|
||||
"github.com/romana/rlog"
|
||||
"github.com/up9inc/mizu/shared"
|
||||
"github.com/up9inc/mizu/tap"
|
||||
)
|
||||
|
||||
var browserClientSocketUUIDs = make([]int, 0)
|
||||
var socketListLock = sync.Mutex{}
|
||||
|
||||
type RoutesEventHandlers struct {
|
||||
EventHandlers
|
||||
SocketHarOutChannel chan<- *tap.OutputChannelItem
|
||||
}
|
||||
|
||||
func init() {
|
||||
go up9.UpdateAnalyzeStatus(BroadcastToBrowserClients)
|
||||
}
|
||||
|
||||
func (h *RoutesEventHandlers) WebSocketConnect(socketId int, isTapper bool) {
|
||||
if isTapper {
|
||||
rlog.Infof("Websocket event - Tapper connected, socket ID: %d", socketId)
|
||||
providers.TapperAdded()
|
||||
} else {
|
||||
rlog.Infof("Websocket event - Browser socket connected, socket ID: %d", socketId)
|
||||
socketListLock.Lock()
|
||||
browserClientSocketUUIDs = append(browserClientSocketUUIDs, socketId)
|
||||
socketListLock.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
func (h *RoutesEventHandlers) WebSocketDisconnect(socketId int, isTapper bool) {
|
||||
if isTapper {
|
||||
rlog.Infof("Websocket event - Tapper disconnected, socket ID: %d", socketId)
|
||||
providers.TapperRemoved()
|
||||
} else {
|
||||
rlog.Infof("Websocket event - Browser socket disconnected, socket ID: %d", socketId)
|
||||
socketListLock.Lock()
|
||||
removeSocketUUIDFromBrowserSlice(socketId)
|
||||
socketListLock.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
func BroadcastToBrowserClients(message []byte) {
|
||||
for _, socketId := range browserClientSocketUUIDs {
|
||||
go func(socketId int) {
|
||||
err := SendToSocket(socketId, message)
|
||||
if err != nil {
|
||||
rlog.Errorf("error sending message to socket ID %d: %v", socketId, err)
|
||||
}
|
||||
}(socketId)
|
||||
}
|
||||
}
|
||||
|
||||
func (h *RoutesEventHandlers) WebSocketMessage(_ int, message []byte) {
|
||||
var socketMessageBase shared.WebSocketMessageMetadata
|
||||
err := json.Unmarshal(message, &socketMessageBase)
|
||||
if err != nil {
|
||||
rlog.Infof("Could not unmarshal websocket message %v\n", err)
|
||||
} else {
|
||||
switch socketMessageBase.MessageType {
|
||||
case shared.WebSocketMessageTypeTappedEntry:
|
||||
var tappedEntryMessage models.WebSocketTappedEntryMessage
|
||||
err := json.Unmarshal(message, &tappedEntryMessage)
|
||||
if err != nil {
|
||||
rlog.Infof("Could not unmarshal message of message type %s %v\n", socketMessageBase.MessageType, err)
|
||||
} else {
|
||||
h.SocketHarOutChannel <- tappedEntryMessage.Data
|
||||
}
|
||||
case shared.WebSocketMessageTypeUpdateStatus:
|
||||
var statusMessage shared.WebSocketStatusMessage
|
||||
err := json.Unmarshal(message, &statusMessage)
|
||||
if err != nil {
|
||||
rlog.Infof("Could not unmarshal message of message type %s %v\n", socketMessageBase.MessageType, err)
|
||||
} else {
|
||||
providers.TapStatus.Pods = statusMessage.TappingStatus.Pods
|
||||
BroadcastToBrowserClients(message)
|
||||
}
|
||||
case shared.WebsocketMessageTypeOutboundLink:
|
||||
var outboundLinkMessage models.WebsocketOutboundLinkMessage
|
||||
err := json.Unmarshal(message, &outboundLinkMessage)
|
||||
if err != nil {
|
||||
rlog.Infof("Could not unmarshal message of message type %s %v\n", socketMessageBase.MessageType, err)
|
||||
} else {
|
||||
handleTLSLink(outboundLinkMessage)
|
||||
}
|
||||
default:
|
||||
rlog.Infof("Received socket message of type %s for which no handlers are defined", socketMessageBase.MessageType)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func handleTLSLink(outboundLinkMessage models.WebsocketOutboundLinkMessage) {
|
||||
resolvedName := k8sResolver.Resolve(outboundLinkMessage.Data.DstIP)
|
||||
if resolvedName != "" {
|
||||
outboundLinkMessage.Data.DstIP = resolvedName
|
||||
} else if outboundLinkMessage.Data.SuggestedResolvedName != "" {
|
||||
outboundLinkMessage.Data.DstIP = outboundLinkMessage.Data.SuggestedResolvedName
|
||||
}
|
||||
cacheKey := fmt.Sprintf("%s -> %s:%d", outboundLinkMessage.Data.Src, outboundLinkMessage.Data.DstIP, outboundLinkMessage.Data.DstPort)
|
||||
_, isInCache := providers.RecentTLSLinks.Get(cacheKey)
|
||||
if isInCache {
|
||||
return
|
||||
} else {
|
||||
providers.RecentTLSLinks.SetDefault(cacheKey, outboundLinkMessage.Data)
|
||||
}
|
||||
marshaledMessage, err := json.Marshal(outboundLinkMessage)
|
||||
if err != nil {
|
||||
rlog.Errorf("Error marshaling outbound link message for broadcasting: %v", err)
|
||||
} else {
|
||||
rlog.Errorf("Broadcasting outboundlink message %s", string(marshaledMessage))
|
||||
BroadcastToBrowserClients(marshaledMessage)
|
||||
}
|
||||
}
|
||||
|
||||
func removeSocketUUIDFromBrowserSlice(uuidToRemove int) {
|
||||
newUUIDSlice := make([]int, 0, len(browserClientSocketUUIDs))
|
||||
for _, uuid := range browserClientSocketUUIDs {
|
||||
if uuid != uuidToRemove {
|
||||
newUUIDSlice = append(newUUIDSlice, uuid)
|
||||
}
|
||||
}
|
||||
browserClientSocketUUIDs = newUUIDSlice
|
||||
}
|
||||
@@ -1,257 +0,0 @@
|
||||
package controllers
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"mizuserver/pkg/database"
|
||||
"mizuserver/pkg/models"
|
||||
"mizuserver/pkg/providers"
|
||||
"mizuserver/pkg/up9"
|
||||
"mizuserver/pkg/utils"
|
||||
"mizuserver/pkg/validation"
|
||||
"net/http"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/google/martian/har"
|
||||
"github.com/romana/rlog"
|
||||
)
|
||||
|
||||
func GetEntries(c *gin.Context) {
|
||||
entriesFilter := &models.EntriesFilter{}
|
||||
|
||||
if err := c.BindQuery(entriesFilter); err != nil {
|
||||
c.JSON(http.StatusBadRequest, err)
|
||||
}
|
||||
err := validation.Validate(entriesFilter)
|
||||
if err != nil {
|
||||
c.JSON(http.StatusBadRequest, err)
|
||||
}
|
||||
|
||||
order := database.OperatorToOrderMapping[entriesFilter.Operator]
|
||||
operatorSymbol := database.OperatorToSymbolMapping[entriesFilter.Operator]
|
||||
var entries []models.MizuEntry
|
||||
database.GetEntriesTable().
|
||||
Order(fmt.Sprintf("timestamp %s", order)).
|
||||
Where(fmt.Sprintf("timestamp %s %v", operatorSymbol, entriesFilter.Timestamp)).
|
||||
Omit("entry"). // remove the "big" entry field
|
||||
Limit(entriesFilter.Limit).
|
||||
Find(&entries)
|
||||
|
||||
if len(entries) > 0 && order == database.OrderDesc {
|
||||
// the entries always order from oldest to newest so we should revers
|
||||
utils.ReverseSlice(entries)
|
||||
}
|
||||
|
||||
baseEntries := make([]models.BaseEntryDetails, 0)
|
||||
for _, data := range entries {
|
||||
harEntry := models.BaseEntryDetails{}
|
||||
if err := models.GetEntry(&data, &harEntry); err != nil {
|
||||
continue
|
||||
}
|
||||
baseEntries = append(baseEntries, harEntry)
|
||||
}
|
||||
|
||||
c.JSON(http.StatusOK, baseEntries)
|
||||
}
|
||||
|
||||
func GetHARs(c *gin.Context) {
|
||||
entriesFilter := &models.HarFetchRequestQuery{}
|
||||
order := database.OrderDesc
|
||||
if err := c.BindQuery(entriesFilter); err != nil {
|
||||
c.JSON(http.StatusBadRequest, err)
|
||||
}
|
||||
err := validation.Validate(entriesFilter)
|
||||
if err != nil {
|
||||
c.JSON(http.StatusBadRequest, err)
|
||||
}
|
||||
|
||||
var timestampFrom, timestampTo int64
|
||||
|
||||
if entriesFilter.From < 0 {
|
||||
timestampFrom = 0
|
||||
} else {
|
||||
timestampFrom = entriesFilter.From
|
||||
}
|
||||
if entriesFilter.To <= 0 {
|
||||
timestampTo = time.Now().UnixNano() / int64(time.Millisecond)
|
||||
} else {
|
||||
timestampTo = entriesFilter.To
|
||||
}
|
||||
|
||||
var entries []models.MizuEntry
|
||||
database.GetEntriesTable().
|
||||
Where(fmt.Sprintf("timestamp BETWEEN %v AND %v", timestampFrom, timestampTo)).
|
||||
Order(fmt.Sprintf("timestamp %s", order)).
|
||||
Find(&entries)
|
||||
|
||||
if len(entries) > 0 {
|
||||
// the entries always order from oldest to newest so we should revers
|
||||
utils.ReverseSlice(entries)
|
||||
}
|
||||
|
||||
harsObject := map[string]*models.ExtendedHAR{}
|
||||
|
||||
for _, entryData := range entries {
|
||||
var harEntry har.Entry
|
||||
_ = json.Unmarshal([]byte(entryData.Entry), &harEntry)
|
||||
if entryData.ResolvedDestination != "" {
|
||||
harEntry.Request.URL = utils.SetHostname(harEntry.Request.URL, entryData.ResolvedDestination)
|
||||
}
|
||||
|
||||
var fileName string
|
||||
sourceOfEntry := entryData.ResolvedSource
|
||||
if sourceOfEntry != "" {
|
||||
// naively assumes the proper service source is http
|
||||
sourceOfEntry = fmt.Sprintf("http://%s", sourceOfEntry)
|
||||
//replace / from the file name cause they end up creating a corrupted folder
|
||||
fileName = fmt.Sprintf("%s.har", strings.ReplaceAll(sourceOfEntry, "/", "_"))
|
||||
} else {
|
||||
fileName = "unknown_source.har"
|
||||
}
|
||||
if harOfSource, ok := harsObject[fileName]; ok {
|
||||
harOfSource.Log.Entries = append(harOfSource.Log.Entries, &harEntry)
|
||||
} else {
|
||||
var entriesHar []*har.Entry
|
||||
entriesHar = append(entriesHar, &harEntry)
|
||||
harsObject[fileName] = &models.ExtendedHAR{
|
||||
Log: &models.ExtendedLog{
|
||||
Version: "1.2",
|
||||
Creator: &models.ExtendedCreator{
|
||||
Creator: &har.Creator{
|
||||
Name: "mizu",
|
||||
Version: "0.0.2",
|
||||
},
|
||||
},
|
||||
Entries: entriesHar,
|
||||
},
|
||||
}
|
||||
// leave undefined when no source is present, otherwise modeler assumes source is empty string ""
|
||||
if sourceOfEntry != "" {
|
||||
harsObject[fileName].Log.Creator.Source = &sourceOfEntry
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
retObj := map[string][]byte{}
|
||||
for k, v := range harsObject {
|
||||
bytesData, _ := json.Marshal(v)
|
||||
retObj[k] = bytesData
|
||||
}
|
||||
buffer := utils.ZipData(retObj)
|
||||
c.Data(http.StatusOK, "application/octet-stream", buffer.Bytes())
|
||||
}
|
||||
|
||||
func UploadEntries(c *gin.Context) {
|
||||
rlog.Infof("Upload entries - started\n")
|
||||
|
||||
uploadParams := &models.UploadEntriesRequestQuery{}
|
||||
if err := c.BindQuery(uploadParams); err != nil {
|
||||
c.JSON(http.StatusBadRequest, err)
|
||||
return
|
||||
}
|
||||
if err := validation.Validate(uploadParams); err != nil {
|
||||
c.JSON(http.StatusBadRequest, err)
|
||||
return
|
||||
}
|
||||
if up9.GetAnalyzeInfo().IsAnalyzing {
|
||||
c.String(http.StatusBadRequest, "Cannot analyze, mizu is already analyzing")
|
||||
return
|
||||
}
|
||||
|
||||
rlog.Infof("Upload entries - creating token. dest %s\n", uploadParams.Dest)
|
||||
token, err := up9.CreateAnonymousToken(uploadParams.Dest)
|
||||
if err != nil {
|
||||
c.String(http.StatusServiceUnavailable, "Cannot analyze, mizu is already analyzing")
|
||||
return
|
||||
}
|
||||
rlog.Infof("Upload entries - uploading. token: %s model: %s\n", token.Token, token.Model)
|
||||
go up9.UploadEntriesImpl(token.Token, token.Model, uploadParams.Dest, uploadParams.SleepIntervalSec)
|
||||
c.String(http.StatusOK, "OK")
|
||||
}
|
||||
|
||||
func GetFullEntries(c *gin.Context) {
|
||||
entriesFilter := &models.HarFetchRequestQuery{}
|
||||
if err := c.BindQuery(entriesFilter); err != nil {
|
||||
c.JSON(http.StatusBadRequest, err)
|
||||
}
|
||||
err := validation.Validate(entriesFilter)
|
||||
if err != nil {
|
||||
c.JSON(http.StatusBadRequest, err)
|
||||
}
|
||||
|
||||
var timestampFrom, timestampTo int64
|
||||
|
||||
if entriesFilter.From < 0 {
|
||||
timestampFrom = 0
|
||||
} else {
|
||||
timestampFrom = entriesFilter.From
|
||||
}
|
||||
if entriesFilter.To <= 0 {
|
||||
timestampTo = time.Now().UnixNano() / int64(time.Millisecond)
|
||||
} else {
|
||||
timestampTo = entriesFilter.To
|
||||
}
|
||||
|
||||
entriesArray := database.GetEntriesFromDb(timestampFrom, timestampTo)
|
||||
result := make([]models.FullEntryDetails, 0)
|
||||
for _, data := range entriesArray {
|
||||
harEntry := models.FullEntryDetails{}
|
||||
if err := models.GetEntry(&data, &harEntry); err != nil {
|
||||
continue
|
||||
}
|
||||
result = append(result, harEntry)
|
||||
}
|
||||
c.JSON(http.StatusOK, result)
|
||||
}
|
||||
|
||||
func GetEntry(c *gin.Context) {
|
||||
var entryData models.MizuEntry
|
||||
database.GetEntriesTable().
|
||||
Where(map[string]string{"entryId": c.Param("entryId")}).
|
||||
First(&entryData)
|
||||
|
||||
fullEntry := models.FullEntryDetails{}
|
||||
if err := models.GetEntry(&entryData, &fullEntry); err != nil {
|
||||
c.JSON(http.StatusInternalServerError, map[string]interface{}{
|
||||
"error": true,
|
||||
"msg": "Can't get entry details",
|
||||
})
|
||||
}
|
||||
fullEntryWithPolicy := models.FullEntryWithPolicy{}
|
||||
if err := models.GetEntry(&entryData, &fullEntryWithPolicy); err != nil {
|
||||
c.JSON(http.StatusInternalServerError, map[string]interface{}{
|
||||
"error": true,
|
||||
"msg": "Can't get entry details",
|
||||
})
|
||||
}
|
||||
c.JSON(http.StatusOK, fullEntryWithPolicy)
|
||||
}
|
||||
|
||||
func DeleteAllEntries(c *gin.Context) {
|
||||
database.GetEntriesTable().
|
||||
Where("1 = 1").
|
||||
Delete(&models.MizuEntry{})
|
||||
|
||||
c.JSON(http.StatusOK, map[string]string{
|
||||
"msg": "Success",
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
func GetGeneralStats(c *gin.Context) {
|
||||
c.JSON(http.StatusOK, providers.GetGeneralStats())
|
||||
}
|
||||
|
||||
func GetTappingStatus(c *gin.Context) {
|
||||
c.JSON(http.StatusOK, providers.TapStatus)
|
||||
}
|
||||
|
||||
func AnalyzeInformation(c *gin.Context) {
|
||||
c.JSON(http.StatusOK, up9.GetAnalyzeInfo())
|
||||
}
|
||||
|
||||
func GetRecentTLSLinks(c *gin.Context) {
|
||||
c.JSON(http.StatusOK, providers.GetAllRecentTLSAddresses())
|
||||
}
|
||||
@@ -1,13 +0,0 @@
|
||||
package controllers
|
||||
|
||||
import (
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/up9inc/mizu/shared"
|
||||
"mizuserver/pkg/version"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
func GetVersion(c *gin.Context) {
|
||||
resp := shared.VersionResponse{SemVer: version.SemVer}
|
||||
c.JSON(http.StatusOK, resp)
|
||||
}
|
||||
@@ -1,12 +0,0 @@
|
||||
package controllers
|
||||
|
||||
import (
|
||||
"github.com/gin-gonic/gin"
|
||||
"mizuserver/pkg/holder"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
func GetCurrentResolvingInformation(c *gin.Context) {
|
||||
c.JSON(http.StatusOK, holder.GetResolver().GetMap())
|
||||
}
|
||||
|
||||
@@ -1,36 +0,0 @@
|
||||
package controllers
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/romana/rlog"
|
||||
"github.com/up9inc/mizu/shared"
|
||||
"mizuserver/pkg/api"
|
||||
"mizuserver/pkg/providers"
|
||||
"mizuserver/pkg/validation"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
func PostTappedPods(c *gin.Context) {
|
||||
tapStatus := &shared.TapStatus{}
|
||||
if err := c.Bind(tapStatus); err != nil {
|
||||
c.JSON(http.StatusBadRequest, err)
|
||||
return
|
||||
}
|
||||
if err := validation.Validate(tapStatus); err != nil {
|
||||
c.JSON(http.StatusBadRequest, err)
|
||||
return
|
||||
}
|
||||
rlog.Infof("[Status] POST request: %d tapped pods", len(tapStatus.Pods))
|
||||
providers.TapStatus.Pods = tapStatus.Pods
|
||||
message := shared.CreateWebSocketStatusMessage(*tapStatus)
|
||||
if jsonBytes, err := json.Marshal(message); err != nil {
|
||||
rlog.Errorf("Could not Marshal message %v\n", err)
|
||||
} else {
|
||||
api.BroadcastToBrowserClients(jsonBytes)
|
||||
}
|
||||
}
|
||||
|
||||
func GetTappersCount(c *gin.Context) {
|
||||
c.JSON(http.StatusOK, providers.TappersCount)
|
||||
}
|
||||
@@ -1,73 +0,0 @@
|
||||
package database
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"gorm.io/driver/sqlite"
|
||||
"gorm.io/gorm"
|
||||
"gorm.io/gorm/logger"
|
||||
"mizuserver/pkg/models"
|
||||
"mizuserver/pkg/utils"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
DBPath = "./entries.db"
|
||||
OrderDesc = "desc"
|
||||
OrderAsc = "asc"
|
||||
LT = "lt"
|
||||
GT = "gt"
|
||||
)
|
||||
|
||||
var (
|
||||
DB *gorm.DB
|
||||
IsDBLocked = false
|
||||
OperatorToSymbolMapping = map[string]string{
|
||||
LT: "<",
|
||||
GT: ">",
|
||||
}
|
||||
OperatorToOrderMapping = map[string]string{
|
||||
LT: OrderDesc,
|
||||
GT: OrderAsc,
|
||||
}
|
||||
)
|
||||
|
||||
func init() {
|
||||
DB = initDataBase(DBPath)
|
||||
go StartEnforcingDatabaseSize()
|
||||
}
|
||||
|
||||
func GetEntriesTable() *gorm.DB {
|
||||
return DB.Table("mizu_entries")
|
||||
}
|
||||
|
||||
func CreateEntry(entry *models.MizuEntry) {
|
||||
if IsDBLocked {
|
||||
return
|
||||
}
|
||||
GetEntriesTable().Create(entry)
|
||||
}
|
||||
|
||||
func initDataBase(databasePath string) *gorm.DB {
|
||||
temp, _ := gorm.Open(sqlite.Open(databasePath), &gorm.Config{
|
||||
Logger: &utils.TruncatingLogger{LogLevel: logger.Warn, SlowThreshold: 500 * time.Millisecond},
|
||||
})
|
||||
_ = temp.AutoMigrate(&models.MizuEntry{}) // this will ensure table is created
|
||||
return temp
|
||||
}
|
||||
|
||||
|
||||
func GetEntriesFromDb(timestampFrom int64, timestampTo int64) []models.MizuEntry {
|
||||
order := OrderDesc
|
||||
var entries []models.MizuEntry
|
||||
GetEntriesTable().
|
||||
Where(fmt.Sprintf("timestamp BETWEEN %v AND %v", timestampFrom, timestampTo)).
|
||||
Order(fmt.Sprintf("timestamp %s", order)).
|
||||
Find(&entries)
|
||||
|
||||
if len(entries) > 0 {
|
||||
// the entries always order from oldest to newest so we should revers
|
||||
utils.ReverseSlice(entries)
|
||||
}
|
||||
return entries
|
||||
}
|
||||
|
||||
@@ -1,121 +0,0 @@
|
||||
package database
|
||||
|
||||
import (
|
||||
"github.com/fsnotify/fsnotify"
|
||||
"github.com/romana/rlog"
|
||||
"github.com/up9inc/mizu/shared"
|
||||
"github.com/up9inc/mizu/shared/debounce"
|
||||
"github.com/up9inc/mizu/shared/units"
|
||||
"log"
|
||||
"mizuserver/pkg/models"
|
||||
"os"
|
||||
"strconv"
|
||||
"time"
|
||||
)
|
||||
|
||||
const percentageOfMaxSizeBytesToPrune = 15
|
||||
const defaultMaxDatabaseSizeBytes int64 = 200 * 1000 * 1000
|
||||
|
||||
func StartEnforcingDatabaseSize() {
|
||||
watcher, err := fsnotify.NewWatcher()
|
||||
if err != nil {
|
||||
log.Fatalf("Error creating filesystem watcher for db size enforcement: %v\n", err)
|
||||
return
|
||||
}
|
||||
|
||||
maxEntriesDBByteSize, err := getMaxEntriesDBByteSize()
|
||||
if err != nil {
|
||||
log.Fatalf("Error parsing max db size: %v\n", err)
|
||||
return
|
||||
}
|
||||
|
||||
checkFileSizeDebouncer := debounce.NewDebouncer(5*time.Second, func() {
|
||||
checkFileSize(maxEntriesDBByteSize)
|
||||
})
|
||||
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
case event, ok := <-watcher.Events:
|
||||
if !ok {
|
||||
return // closed channel
|
||||
}
|
||||
if event.Op == fsnotify.Write {
|
||||
checkFileSizeDebouncer.SetOn()
|
||||
}
|
||||
case err, ok := <-watcher.Errors:
|
||||
if !ok {
|
||||
return // closed channel
|
||||
}
|
||||
rlog.Errorf("filesystem watcher encountered error:%v", err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
err = watcher.Add(DBPath)
|
||||
if err != nil {
|
||||
log.Fatalf("Error adding %s to filesystem watcher for db size enforcement: %v\n", DBPath, err)
|
||||
}
|
||||
}
|
||||
|
||||
func getMaxEntriesDBByteSize() (int64, error) {
|
||||
maxEntriesDBByteSize := defaultMaxDatabaseSizeBytes
|
||||
var err error
|
||||
|
||||
maxEntriesDBSizeByteSEnvVarValue := os.Getenv(shared.MaxEntriesDBSizeBytesEnvVar)
|
||||
if maxEntriesDBSizeByteSEnvVarValue != "" {
|
||||
maxEntriesDBByteSize, err = strconv.ParseInt(maxEntriesDBSizeByteSEnvVarValue, 10, 64)
|
||||
}
|
||||
return maxEntriesDBByteSize, err
|
||||
}
|
||||
|
||||
func checkFileSize(maxSizeBytes int64) {
|
||||
fileStat, err := os.Stat(DBPath)
|
||||
if err != nil {
|
||||
rlog.Errorf("Error checking %s file size: %v", DBPath, err)
|
||||
} else {
|
||||
if fileStat.Size() > maxSizeBytes {
|
||||
pruneOldEntries(fileStat.Size())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func pruneOldEntries(currentFileSize int64) {
|
||||
// sqlite locks the database while delete or VACUUM are running and sqlite is terrible at handling its own db lock while a lot of inserts are attempted, we prevent a significant bottleneck by handling the db lock ourselves here
|
||||
IsDBLocked = true
|
||||
defer func() { IsDBLocked = false }()
|
||||
|
||||
amountOfBytesToTrim := currentFileSize / (100 / percentageOfMaxSizeBytesToPrune)
|
||||
|
||||
rows, err := GetEntriesTable().Limit(10000).Order("id").Rows()
|
||||
if err != nil {
|
||||
rlog.Errorf("Error getting 10000 first db rows: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
entryIdsToRemove := make([]uint, 0)
|
||||
bytesToBeRemoved := int64(0)
|
||||
for rows.Next() {
|
||||
if bytesToBeRemoved >= amountOfBytesToTrim {
|
||||
break
|
||||
}
|
||||
var entry models.MizuEntry
|
||||
err = DB.ScanRows(rows, &entry)
|
||||
if err != nil {
|
||||
rlog.Errorf("Error scanning db row: %v", err)
|
||||
continue
|
||||
}
|
||||
|
||||
entryIdsToRemove = append(entryIdsToRemove, entry.ID)
|
||||
bytesToBeRemoved += int64(entry.EstimatedSizeBytes)
|
||||
}
|
||||
|
||||
if len(entryIdsToRemove) > 0 {
|
||||
GetEntriesTable().Where(entryIdsToRemove).Delete(models.MizuEntry{})
|
||||
// VACUUM causes sqlite to shrink the db file after rows have been deleted, the db file will not shrink without this
|
||||
DB.Exec("VACUUM")
|
||||
rlog.Errorf("Removed %d rows and cleared %s", len(entryIdsToRemove), units.BytesToHumanReadable(bytesToBeRemoved))
|
||||
} else {
|
||||
rlog.Error("Found no rows to remove when pruning")
|
||||
}
|
||||
}
|
||||
@@ -1,14 +0,0 @@
|
||||
package holder
|
||||
|
||||
import "mizuserver/pkg/resolver"
|
||||
|
||||
var k8sResolver *resolver.Resolver
|
||||
|
||||
func SetResolver(param *resolver.Resolver) {
|
||||
k8sResolver = param
|
||||
}
|
||||
|
||||
func GetResolver() *resolver.Resolver {
|
||||
return k8sResolver
|
||||
}
|
||||
|
||||
@@ -1,226 +0,0 @@
|
||||
package models
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
|
||||
"mizuserver/pkg/rules"
|
||||
"mizuserver/pkg/utils"
|
||||
"time"
|
||||
|
||||
"github.com/google/martian/har"
|
||||
"github.com/up9inc/mizu/shared"
|
||||
"github.com/up9inc/mizu/tap"
|
||||
)
|
||||
|
||||
type DataUnmarshaler interface {
|
||||
UnmarshalData(*MizuEntry) error
|
||||
}
|
||||
|
||||
func GetEntry(r *MizuEntry, v DataUnmarshaler) error {
|
||||
return v.UnmarshalData(r)
|
||||
}
|
||||
|
||||
type MizuEntry struct {
|
||||
ID uint `gorm:"primarykey"`
|
||||
CreatedAt time.Time
|
||||
UpdatedAt time.Time
|
||||
Entry string `json:"entry,omitempty" gorm:"column:entry"`
|
||||
EntryId string `json:"entryId" gorm:"column:entryId"`
|
||||
Url string `json:"url" gorm:"column:url"`
|
||||
Method string `json:"method" gorm:"column:method"`
|
||||
Status int `json:"status" gorm:"column:status"`
|
||||
RequestSenderIp string `json:"requestSenderIp" gorm:"column:requestSenderIp"`
|
||||
Service string `json:"service" gorm:"column:service"`
|
||||
Timestamp int64 `json:"timestamp" gorm:"column:timestamp"`
|
||||
Path string `json:"path" gorm:"column:path"`
|
||||
ResolvedSource string `json:"resolvedSource,omitempty" gorm:"column:resolvedSource"`
|
||||
ResolvedDestination string `json:"resolvedDestination,omitempty" gorm:"column:resolvedDestination"`
|
||||
IsOutgoing bool `json:"isOutgoing,omitempty" gorm:"column:isOutgoing"`
|
||||
EstimatedSizeBytes int `json:"-" gorm:"column:estimatedSizeBytes"`
|
||||
}
|
||||
|
||||
type BaseEntryDetails struct {
|
||||
Id string `json:"id,omitempty"`
|
||||
Url string `json:"url,omitempty"`
|
||||
RequestSenderIp string `json:"requestSenderIp,omitempty"`
|
||||
Service string `json:"service,omitempty"`
|
||||
Path string `json:"path,omitempty"`
|
||||
StatusCode int `json:"statusCode,omitempty"`
|
||||
Method string `json:"method,omitempty"`
|
||||
Timestamp int64 `json:"timestamp,omitempty"`
|
||||
IsOutgoing bool `json:"isOutgoing,omitempty"`
|
||||
Latency int64 `json:"latency,omitempty"`
|
||||
Rules ApplicableRules `json:"rules,omitempty"`
|
||||
}
|
||||
|
||||
type ApplicableRules struct {
|
||||
Latency int64 `json:"latency,omitempty"`
|
||||
Status bool `json:"status,omitempty"`
|
||||
NumberOfRules int `json:"numberOfRules,omitempty"`
|
||||
}
|
||||
|
||||
func NewApplicableRules(status bool, latency int64, number int) ApplicableRules {
|
||||
ar := ApplicableRules{}
|
||||
ar.Status = status
|
||||
ar.Latency = latency
|
||||
ar.NumberOfRules = number
|
||||
return ar
|
||||
}
|
||||
|
||||
type FullEntryDetails struct {
|
||||
har.Entry
|
||||
}
|
||||
|
||||
type FullEntryDetailsExtra struct {
|
||||
har.Entry
|
||||
}
|
||||
|
||||
func (bed *BaseEntryDetails) UnmarshalData(entry *MizuEntry) error {
|
||||
entryUrl := entry.Url
|
||||
service := entry.Service
|
||||
if entry.ResolvedDestination != "" {
|
||||
entryUrl = utils.SetHostname(entryUrl, entry.ResolvedDestination)
|
||||
service = utils.SetHostname(service, entry.ResolvedDestination)
|
||||
}
|
||||
bed.Id = entry.EntryId
|
||||
bed.Url = entryUrl
|
||||
bed.Service = service
|
||||
bed.Path = entry.Path
|
||||
bed.StatusCode = entry.Status
|
||||
bed.Method = entry.Method
|
||||
bed.Timestamp = entry.Timestamp
|
||||
bed.RequestSenderIp = entry.RequestSenderIp
|
||||
bed.IsOutgoing = entry.IsOutgoing
|
||||
return nil
|
||||
}
|
||||
|
||||
func (fed *FullEntryDetails) UnmarshalData(entry *MizuEntry) error {
|
||||
if err := json.Unmarshal([]byte(entry.Entry), &fed.Entry); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if entry.ResolvedDestination != "" {
|
||||
fed.Entry.Request.URL = utils.SetHostname(fed.Entry.Request.URL, entry.ResolvedDestination)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (fedex *FullEntryDetailsExtra) UnmarshalData(entry *MizuEntry) error {
|
||||
if err := json.Unmarshal([]byte(entry.Entry), &fedex.Entry); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if entry.ResolvedSource != "" {
|
||||
fedex.Entry.Request.Headers = append(fedex.Request.Headers, har.Header{Name: "x-mizu-source", Value: entry.ResolvedSource})
|
||||
}
|
||||
if entry.ResolvedDestination != "" {
|
||||
fedex.Entry.Request.Headers = append(fedex.Request.Headers, har.Header{Name: "x-mizu-destination", Value: entry.ResolvedDestination})
|
||||
fedex.Entry.Request.URL = utils.SetHostname(fedex.Entry.Request.URL, entry.ResolvedDestination)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type EntriesFilter struct {
|
||||
Limit int `form:"limit" validate:"required,min=1,max=200"`
|
||||
Operator string `form:"operator" validate:"required,oneof='lt' 'gt'"`
|
||||
Timestamp int64 `form:"timestamp" validate:"required,min=1"`
|
||||
}
|
||||
|
||||
type UploadEntriesRequestQuery struct {
|
||||
Dest string `form:"dest"`
|
||||
SleepIntervalSec int `form:"interval"`
|
||||
}
|
||||
|
||||
type HarFetchRequestQuery struct {
|
||||
From int64 `form:"from"`
|
||||
To int64 `form:"to"`
|
||||
}
|
||||
|
||||
type WebSocketEntryMessage struct {
|
||||
*shared.WebSocketMessageMetadata
|
||||
Data *BaseEntryDetails `json:"data,omitempty"`
|
||||
}
|
||||
|
||||
type WebSocketTappedEntryMessage struct {
|
||||
*shared.WebSocketMessageMetadata
|
||||
Data *tap.OutputChannelItem
|
||||
}
|
||||
|
||||
type WebsocketOutboundLinkMessage struct {
|
||||
*shared.WebSocketMessageMetadata
|
||||
Data *tap.OutboundLink
|
||||
}
|
||||
|
||||
func CreateBaseEntryWebSocketMessage(base *BaseEntryDetails) ([]byte, error) {
|
||||
message := &WebSocketEntryMessage{
|
||||
WebSocketMessageMetadata: &shared.WebSocketMessageMetadata{
|
||||
MessageType: shared.WebSocketMessageTypeEntry,
|
||||
},
|
||||
Data: base,
|
||||
}
|
||||
return json.Marshal(message)
|
||||
}
|
||||
|
||||
func CreateWebsocketTappedEntryMessage(base *tap.OutputChannelItem) ([]byte, error) {
|
||||
message := &WebSocketTappedEntryMessage{
|
||||
WebSocketMessageMetadata: &shared.WebSocketMessageMetadata{
|
||||
MessageType: shared.WebSocketMessageTypeTappedEntry,
|
||||
},
|
||||
Data: base,
|
||||
}
|
||||
return json.Marshal(message)
|
||||
}
|
||||
|
||||
func CreateWebsocketOutboundLinkMessage(base *tap.OutboundLink) ([]byte, error) {
|
||||
message := &WebsocketOutboundLinkMessage{
|
||||
WebSocketMessageMetadata: &shared.WebSocketMessageMetadata{
|
||||
MessageType: shared.WebsocketMessageTypeOutboundLink,
|
||||
},
|
||||
Data: base,
|
||||
}
|
||||
return json.Marshal(message)
|
||||
}
|
||||
|
||||
// ExtendedHAR is the top level object of a HAR log.
|
||||
type ExtendedHAR struct {
|
||||
Log *ExtendedLog `json:"log"`
|
||||
}
|
||||
|
||||
// ExtendedLog is the HAR HTTP request and response log.
|
||||
type ExtendedLog struct {
|
||||
// Version number of the HAR format.
|
||||
Version string `json:"version"`
|
||||
// Creator holds information about the log creator application.
|
||||
Creator *ExtendedCreator `json:"creator"`
|
||||
// Entries is a list containing requests and responses.
|
||||
Entries []*har.Entry `json:"entries"`
|
||||
}
|
||||
|
||||
type ExtendedCreator struct {
|
||||
*har.Creator
|
||||
Source *string `json:"_source"`
|
||||
}
|
||||
|
||||
type FullEntryWithPolicy struct {
|
||||
RulesMatched []rules.RulesMatched `json:"rulesMatched,omitempty"`
|
||||
Entry har.Entry `json:"entry"`
|
||||
Service string `json:"service"`
|
||||
}
|
||||
|
||||
func (fewp *FullEntryWithPolicy) UnmarshalData(entry *MizuEntry) error {
|
||||
if err := json.Unmarshal([]byte(entry.Entry), &fewp.Entry); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, resultPolicyToSend := rules.MatchRequestPolicy(fewp.Entry, entry.Service)
|
||||
fewp.RulesMatched = resultPolicyToSend
|
||||
fewp.Service = entry.Service
|
||||
return nil
|
||||
}
|
||||
|
||||
func RunValidationRulesState(harEntry har.Entry, service string) ApplicableRules {
|
||||
numberOfRules, resultPolicyToSend := rules.MatchRequestPolicy(harEntry, service)
|
||||
statusPolicyToSend, latency, numberOfRules := rules.PassedValidationRules(resultPolicyToSend, numberOfRules)
|
||||
ar := NewApplicableRules(statusPolicyToSend, latency, numberOfRules)
|
||||
return ar
|
||||
}
|
||||
@@ -1,36 +0,0 @@
|
||||
package providers
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"time"
|
||||
)
|
||||
|
||||
type GeneralStats struct {
|
||||
EntriesCount int
|
||||
FirstEntryTimestamp int
|
||||
LastEntryTimestamp int
|
||||
}
|
||||
|
||||
var generalStats = GeneralStats{}
|
||||
|
||||
func ResetGeneralStats() {
|
||||
generalStats = GeneralStats{}
|
||||
}
|
||||
|
||||
func GetGeneralStats() GeneralStats {
|
||||
return generalStats
|
||||
}
|
||||
|
||||
func EntryAdded() {
|
||||
generalStats.EntriesCount++
|
||||
|
||||
currentTimestamp := int(time.Now().Unix())
|
||||
|
||||
if reflect.Value.IsZero(reflect.ValueOf(generalStats.FirstEntryTimestamp)) {
|
||||
generalStats.FirstEntryTimestamp = currentTimestamp
|
||||
}
|
||||
|
||||
generalStats.LastEntryTimestamp = currentTimestamp
|
||||
}
|
||||
|
||||
|
||||
@@ -1,35 +0,0 @@
|
||||
package providers_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"mizuserver/pkg/providers"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestNoEntryAddedCount(t *testing.T) {
|
||||
entriesStats := providers.GetGeneralStats()
|
||||
|
||||
if entriesStats.EntriesCount != 0 {
|
||||
t.Errorf("unexpected result - expected: %v, actual: %v", 0, entriesStats.EntriesCount)
|
||||
}
|
||||
}
|
||||
|
||||
func TestEntryAddedCount(t *testing.T) {
|
||||
tests := []int{1, 5, 10, 100, 500, 1000}
|
||||
|
||||
for _, entriesCount := range tests {
|
||||
t.Run(fmt.Sprintf("%d", entriesCount), func(t *testing.T) {
|
||||
for i := 0; i < entriesCount; i++ {
|
||||
providers.EntryAdded()
|
||||
}
|
||||
|
||||
entriesStats := providers.GetGeneralStats()
|
||||
|
||||
if entriesStats.EntriesCount != entriesCount {
|
||||
t.Errorf("unexpected result - expected: %v, actual: %v", entriesCount, entriesStats.EntriesCount)
|
||||
}
|
||||
|
||||
t.Cleanup(providers.ResetGeneralStats)
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -1,44 +0,0 @@
|
||||
package providers
|
||||
|
||||
import (
|
||||
"github.com/patrickmn/go-cache"
|
||||
"github.com/up9inc/mizu/shared"
|
||||
"github.com/up9inc/mizu/tap"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
const tlsLinkRetainmentTime = time.Minute * 15
|
||||
|
||||
var (
|
||||
TappersCount int
|
||||
TapStatus shared.TapStatus
|
||||
RecentTLSLinks = cache.New(tlsLinkRetainmentTime, tlsLinkRetainmentTime)
|
||||
|
||||
tappersCountLock = sync.Mutex{}
|
||||
)
|
||||
|
||||
func GetAllRecentTLSAddresses() []string {
|
||||
recentTLSLinks := make([]string, 0)
|
||||
|
||||
for _, outboundLinkItem := range RecentTLSLinks.Items() {
|
||||
outboundLink, castOk := outboundLinkItem.Object.(*tap.OutboundLink)
|
||||
if castOk {
|
||||
recentTLSLinks = append(recentTLSLinks, outboundLink.DstIP)
|
||||
}
|
||||
}
|
||||
|
||||
return recentTLSLinks
|
||||
}
|
||||
|
||||
func TapperAdded() {
|
||||
tappersCountLock.Lock()
|
||||
TappersCount++
|
||||
tappersCountLock.Unlock()
|
||||
}
|
||||
|
||||
func TapperRemoved() {
|
||||
tappersCountLock.Lock()
|
||||
TappersCount--
|
||||
tappersCountLock.Unlock()
|
||||
}
|
||||
@@ -1,23 +0,0 @@
|
||||
package resolver
|
||||
|
||||
import (
|
||||
cmap "github.com/orcaman/concurrent-map"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
_ "k8s.io/client-go/plugin/pkg/client/auth/azure"
|
||||
_ "k8s.io/client-go/plugin/pkg/client/auth/gcp"
|
||||
_ "k8s.io/client-go/plugin/pkg/client/auth/oidc"
|
||||
_ "k8s.io/client-go/plugin/pkg/client/auth/openstack"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
)
|
||||
|
||||
func NewFromInCluster(errOut chan error, namesapce string) (*Resolver, error) {
|
||||
config, err := restclient.InClusterConfig()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
clientset, err := kubernetes.NewForConfig(config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &Resolver{clientConfig: config, clientSet: clientset, nameMap: cmap.New(), serviceMap: cmap.New(), errOut: errOut, namespace: namesapce}, nil
|
||||
}
|
||||
@@ -1,192 +0,0 @@
|
||||
package resolver
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/romana/rlog"
|
||||
k8serrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
|
||||
"github.com/orcaman/concurrent-map"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
)
|
||||
|
||||
const (
|
||||
kubClientNullString = "None"
|
||||
)
|
||||
|
||||
type Resolver struct {
|
||||
clientConfig *restclient.Config
|
||||
clientSet *kubernetes.Clientset
|
||||
nameMap cmap.ConcurrentMap
|
||||
serviceMap cmap.ConcurrentMap
|
||||
isStarted bool
|
||||
errOut chan error
|
||||
namespace string
|
||||
}
|
||||
|
||||
func (resolver *Resolver) Start(ctx context.Context) {
|
||||
if !resolver.isStarted {
|
||||
resolver.isStarted = true
|
||||
|
||||
go resolver.infiniteErrorHandleRetryFunc(ctx, resolver.watchServices)
|
||||
go resolver.infiniteErrorHandleRetryFunc(ctx, resolver.watchEndpoints)
|
||||
go resolver.infiniteErrorHandleRetryFunc(ctx, resolver.watchPods)
|
||||
}
|
||||
}
|
||||
|
||||
func (resolver *Resolver) Resolve(name string) string {
|
||||
resolvedName, isFound := resolver.nameMap.Get(name)
|
||||
if !isFound {
|
||||
return ""
|
||||
}
|
||||
return resolvedName.(string)
|
||||
}
|
||||
|
||||
func (resolver *Resolver) GetMap() cmap.ConcurrentMap {
|
||||
return resolver.nameMap
|
||||
}
|
||||
|
||||
func (resolver *Resolver) CheckIsServiceIP(address string) bool {
|
||||
_, isFound := resolver.serviceMap.Get(address)
|
||||
return isFound
|
||||
}
|
||||
|
||||
func (resolver *Resolver) watchPods(ctx context.Context) error {
|
||||
// empty namespace makes the client watch all namespaces
|
||||
watcher, err := resolver.clientSet.CoreV1().Pods(resolver.namespace).Watch(ctx, metav1.ListOptions{Watch: true})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for {
|
||||
select {
|
||||
case event := <-watcher.ResultChan():
|
||||
if event.Object == nil {
|
||||
return errors.New("error in kubectl pod watch")
|
||||
}
|
||||
if event.Type == watch.Deleted {
|
||||
pod := event.Object.(*corev1.Pod)
|
||||
resolver.saveResolvedName(pod.Status.PodIP, "", event.Type)
|
||||
}
|
||||
case <-ctx.Done():
|
||||
watcher.Stop()
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (resolver *Resolver) watchEndpoints(ctx context.Context) error {
|
||||
// empty namespace makes the client watch all namespaces
|
||||
watcher, err := resolver.clientSet.CoreV1().Endpoints(resolver.namespace).Watch(ctx, metav1.ListOptions{Watch: true})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for {
|
||||
select {
|
||||
case event := <-watcher.ResultChan():
|
||||
if event.Object == nil {
|
||||
return errors.New("error in kubectl endpoint watch")
|
||||
}
|
||||
endpoint := event.Object.(*corev1.Endpoints)
|
||||
serviceHostname := fmt.Sprintf("%s.%s", endpoint.Name, endpoint.Namespace)
|
||||
if endpoint.Subsets != nil {
|
||||
for _, subset := range endpoint.Subsets {
|
||||
var ports []int32
|
||||
if subset.Ports != nil {
|
||||
for _, portMapping := range subset.Ports {
|
||||
if portMapping.Port > 0 {
|
||||
ports = append(ports, portMapping.Port)
|
||||
}
|
||||
}
|
||||
}
|
||||
if subset.Addresses != nil {
|
||||
for _, address := range subset.Addresses {
|
||||
resolver.saveResolvedName(address.IP, serviceHostname, event.Type)
|
||||
for _, port := range ports {
|
||||
ipWithPort := fmt.Sprintf("%s:%d", address.IP, port)
|
||||
resolver.saveResolvedName(ipWithPort, serviceHostname, event.Type)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
case <-ctx.Done():
|
||||
watcher.Stop()
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (resolver *Resolver) watchServices(ctx context.Context) error {
|
||||
// empty namespace makes the client watch all namespaces
|
||||
watcher, err := resolver.clientSet.CoreV1().Services(resolver.namespace).Watch(ctx, metav1.ListOptions{Watch: true})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for {
|
||||
select {
|
||||
case event := <-watcher.ResultChan():
|
||||
if event.Object == nil {
|
||||
return errors.New("error in kubectl service watch")
|
||||
}
|
||||
|
||||
service := event.Object.(*corev1.Service)
|
||||
serviceHostname := fmt.Sprintf("%s.%s", service.Name, service.Namespace)
|
||||
if service.Spec.ClusterIP != "" && service.Spec.ClusterIP != kubClientNullString {
|
||||
resolver.saveResolvedName(service.Spec.ClusterIP, serviceHostname, event.Type)
|
||||
resolver.saveServiceIP(service.Spec.ClusterIP, serviceHostname, event.Type)
|
||||
}
|
||||
if service.Status.LoadBalancer.Ingress != nil {
|
||||
for _, ingress := range service.Status.LoadBalancer.Ingress {
|
||||
resolver.saveResolvedName(ingress.IP, serviceHostname, event.Type)
|
||||
}
|
||||
}
|
||||
case <-ctx.Done():
|
||||
watcher.Stop()
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (resolver *Resolver) saveResolvedName(key string, resolved string, eventType watch.EventType) {
|
||||
if eventType == watch.Deleted {
|
||||
resolver.nameMap.Remove(key)
|
||||
rlog.Infof("setting %s=nil\n", key)
|
||||
} else {
|
||||
resolver.nameMap.Set(key, resolved)
|
||||
rlog.Infof("setting %s=%s\n", key, resolved)
|
||||
}
|
||||
}
|
||||
|
||||
func (resolver *Resolver) saveServiceIP(key string, resolved string, eventType watch.EventType) {
|
||||
if eventType == watch.Deleted {
|
||||
resolver.serviceMap.Remove(key)
|
||||
} else {
|
||||
resolver.serviceMap.Set(key, resolved)
|
||||
}
|
||||
}
|
||||
|
||||
func (resolver *Resolver) infiniteErrorHandleRetryFunc(ctx context.Context, fun func(ctx context.Context) error) {
|
||||
for {
|
||||
err := fun(ctx)
|
||||
if err != nil {
|
||||
resolver.errOut <- err
|
||||
|
||||
var statusError *k8serrors.StatusError
|
||||
if errors.As(err, &statusError) {
|
||||
if statusError.ErrStatus.Reason == metav1.StatusReasonForbidden {
|
||||
rlog.Infof("Resolver loop encountered permission error, aborting event listening - %v\n", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
if ctx.Err() != nil { // context was cancelled or errored
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,26 +0,0 @@
|
||||
package routes
|
||||
|
||||
import (
|
||||
"github.com/gin-gonic/gin"
|
||||
"mizuserver/pkg/controllers"
|
||||
)
|
||||
|
||||
// EntriesRoutes defines the group of har entries routes.
|
||||
func EntriesRoutes(ginApp *gin.Engine) {
|
||||
routeGroup := ginApp.Group("/api")
|
||||
|
||||
routeGroup.GET("/entries", controllers.GetEntries) // get entries (base/thin entries)
|
||||
routeGroup.GET("/entries/:entryId", controllers.GetEntry) // get single (full) entry
|
||||
routeGroup.GET("/exportEntries", controllers.GetFullEntries)
|
||||
routeGroup.GET("/uploadEntries", controllers.UploadEntries)
|
||||
routeGroup.GET("/resolving", controllers.GetCurrentResolvingInformation)
|
||||
|
||||
routeGroup.GET("/har", controllers.GetHARs)
|
||||
|
||||
routeGroup.GET("/resetDB", controllers.DeleteAllEntries) // get single (full) entry
|
||||
routeGroup.GET("/generalStats", controllers.GetGeneralStats) // get general stats about entries in DB
|
||||
|
||||
routeGroup.GET("/tapStatus", controllers.GetTappingStatus) // get tapping status
|
||||
routeGroup.GET("/analyzeStatus", controllers.AnalyzeInformation)
|
||||
routeGroup.GET("/recentTLSLinks", controllers.GetRecentTLSLinks)
|
||||
}
|
||||
@@ -1,13 +0,0 @@
|
||||
package routes
|
||||
|
||||
import (
|
||||
"github.com/gin-gonic/gin"
|
||||
"mizuserver/pkg/controllers"
|
||||
)
|
||||
|
||||
// MetadataRoutes defines the group of metadata routes.
|
||||
func MetadataRoutes(app *gin.Engine) {
|
||||
routeGroup := app.Group("/metadata")
|
||||
|
||||
routeGroup.GET("/version", controllers.GetVersion)
|
||||
}
|
||||
@@ -1,18 +0,0 @@
|
||||
package routes
|
||||
|
||||
import (
|
||||
"github.com/gin-gonic/gin"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
// NotFoundRoute defines the 404 Error route.
|
||||
func NotFoundRoute(app *gin.Engine) {
|
||||
app.Use(
|
||||
func(c *gin.Context) {
|
||||
c.JSON(http.StatusNotFound, map[string]interface{}{
|
||||
"error": true,
|
||||
"msg": "sorry, endpoint is not found",
|
||||
})
|
||||
},
|
||||
)
|
||||
}
|
||||
@@ -1,14 +0,0 @@
|
||||
package routes
|
||||
|
||||
import (
|
||||
"github.com/gin-gonic/gin"
|
||||
"mizuserver/pkg/controllers"
|
||||
)
|
||||
|
||||
func StatusRoutes(ginApp *gin.Engine) {
|
||||
routeGroup := ginApp.Group("/status")
|
||||
|
||||
routeGroup.POST("/tappedPods", controllers.PostTappedPods)
|
||||
|
||||
routeGroup.GET("/tappersCount", controllers.GetTappersCount)
|
||||
}
|
||||
@@ -1,110 +0,0 @@
|
||||
package rules
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"github.com/google/martian/har"
|
||||
"github.com/up9inc/mizu/shared"
|
||||
jsonpath "github.com/yalp/jsonpath"
|
||||
)
|
||||
|
||||
type RulesMatched struct {
|
||||
Matched bool `json:"matched"`
|
||||
Rule shared.RulePolicy `json:"rule"`
|
||||
}
|
||||
|
||||
func appendRulesMatched(rulesMatched []RulesMatched, matched bool, rule shared.RulePolicy) []RulesMatched {
|
||||
return append(rulesMatched, RulesMatched{Matched: matched, Rule: rule})
|
||||
}
|
||||
|
||||
func ValidatePath(URLFromRule string, URL string) bool {
|
||||
if URLFromRule != "" {
|
||||
matchPath, err := regexp.MatchString(URLFromRule, URL)
|
||||
if err != nil || !matchPath {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func ValidateService(serviceFromRule string, service string) bool {
|
||||
if serviceFromRule != "" {
|
||||
matchService, err := regexp.MatchString(serviceFromRule, service)
|
||||
if err != nil || !matchService {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func MatchRequestPolicy(harEntry har.Entry, service string) (int, []RulesMatched) {
|
||||
enforcePolicy, _ := shared.DecodeEnforcePolicy(fmt.Sprintf("%s/%s", shared.RulePolicyPath, shared.RulePolicyFileName))
|
||||
var resultPolicyToSend []RulesMatched
|
||||
for _, rule := range enforcePolicy.Rules {
|
||||
if !ValidatePath(rule.Path, harEntry.Request.URL) || !ValidateService(rule.Service, service) {
|
||||
continue
|
||||
}
|
||||
if rule.Type == "json" {
|
||||
var bodyJsonMap interface{}
|
||||
if err := json.Unmarshal(harEntry.Response.Content.Text, &bodyJsonMap); err != nil {
|
||||
continue
|
||||
}
|
||||
out, err := jsonpath.Read(bodyJsonMap, rule.Key)
|
||||
if err != nil || out == nil {
|
||||
continue
|
||||
}
|
||||
var matchValue bool
|
||||
if reflect.TypeOf(out).Kind() == reflect.String {
|
||||
matchValue, err = regexp.MatchString(rule.Value, out.(string))
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
} else {
|
||||
val := fmt.Sprint(out)
|
||||
matchValue, err = regexp.MatchString(rule.Value, val)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
}
|
||||
resultPolicyToSend = appendRulesMatched(resultPolicyToSend, matchValue, rule)
|
||||
} else if rule.Type == "header" {
|
||||
for j := range harEntry.Response.Headers {
|
||||
matchKey, err := regexp.MatchString(rule.Key, harEntry.Response.Headers[j].Name)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
if matchKey {
|
||||
matchValue, err := regexp.MatchString(rule.Value, harEntry.Response.Headers[j].Value)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
resultPolicyToSend = appendRulesMatched(resultPolicyToSend, matchValue, rule)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
resultPolicyToSend = appendRulesMatched(resultPolicyToSend, true, rule)
|
||||
}
|
||||
}
|
||||
return len(enforcePolicy.Rules), resultPolicyToSend
|
||||
}
|
||||
|
||||
func PassedValidationRules(rulesMatched []RulesMatched, numberOfRules int) (bool, int64, int) {
|
||||
if len(rulesMatched) == 0 {
|
||||
return false, 0, 0
|
||||
}
|
||||
for _, rule := range rulesMatched {
|
||||
if rule.Matched == false {
|
||||
return false, -1, len(rulesMatched)
|
||||
}
|
||||
}
|
||||
for _, rule := range rulesMatched {
|
||||
if strings.ToLower(rule.Rule.Type) == "latency" {
|
||||
return true, rule.Rule.Latency, len(rulesMatched)
|
||||
}
|
||||
}
|
||||
return true, -1, len(rulesMatched)
|
||||
}
|
||||
@@ -1,201 +0,0 @@
|
||||
package up9
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"compress/zlib"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"github.com/romana/rlog"
|
||||
"github.com/up9inc/mizu/shared"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"mizuserver/pkg/database"
|
||||
"mizuserver/pkg/models"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
AnalyzeCheckSleepTime = 5 * time.Second
|
||||
)
|
||||
|
||||
type GuestToken struct {
|
||||
Token string `json:"token"`
|
||||
Model string `json:"model"`
|
||||
}
|
||||
|
||||
type ModelStatus struct {
|
||||
LastMajorGeneration float64 `json:"lastMajorGeneration"`
|
||||
}
|
||||
|
||||
func getGuestToken(url string, target *GuestToken) error {
|
||||
resp, err := http.Get(url)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
rlog.Infof("Got token from the server, starting to json decode... status code: %v", resp.StatusCode)
|
||||
return json.NewDecoder(resp.Body).Decode(target)
|
||||
}
|
||||
|
||||
func CreateAnonymousToken(envPrefix string) (*GuestToken, error) {
|
||||
tokenUrl := fmt.Sprintf("https://trcc.%s/anonymous/token", envPrefix)
|
||||
if strings.HasPrefix(envPrefix, "http") {
|
||||
tokenUrl = fmt.Sprintf("%s/api/token", envPrefix)
|
||||
}
|
||||
token := &GuestToken{}
|
||||
if err := getGuestToken(tokenUrl, token); err != nil {
|
||||
rlog.Infof("Failed to get token, %s", err)
|
||||
return nil, err
|
||||
}
|
||||
return token, nil
|
||||
}
|
||||
|
||||
func GetRemoteUrl(analyzeDestination string, analyzeToken string) string {
|
||||
return fmt.Sprintf("https://%s/share/%s", analyzeDestination, analyzeToken)
|
||||
}
|
||||
|
||||
func CheckIfModelReady(analyzeDestination string, analyzeModel string, analyzeToken string) bool {
|
||||
statusUrl, _ := url.Parse(fmt.Sprintf("https://trcc.%s/models/%s/status", analyzeDestination, analyzeModel))
|
||||
req := &http.Request{
|
||||
Method: http.MethodGet,
|
||||
URL: statusUrl,
|
||||
Header: map[string][]string{
|
||||
"Content-Type": {"application/json"},
|
||||
"Guest-Auth": {analyzeToken},
|
||||
},
|
||||
}
|
||||
statusResp, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
defer statusResp.Body.Close()
|
||||
|
||||
target := &ModelStatus{}
|
||||
_ = json.NewDecoder(statusResp.Body).Decode(&target)
|
||||
|
||||
return target.LastMajorGeneration > 0
|
||||
}
|
||||
|
||||
func GetTrafficDumpUrl(analyzeDestination string, analyzeModel string) *url.URL {
|
||||
strUrl := fmt.Sprintf("https://traffic.%s/dumpTrafficBulk/%s", analyzeDestination, analyzeModel)
|
||||
if strings.HasPrefix(analyzeDestination, "http") {
|
||||
strUrl = fmt.Sprintf("%s/api/workspace/dumpTrafficBulk", analyzeDestination)
|
||||
}
|
||||
postUrl, _ := url.Parse(strUrl)
|
||||
return postUrl
|
||||
}
|
||||
|
||||
type AnalyzeInformation struct {
|
||||
IsAnalyzing bool
|
||||
SentCount int
|
||||
AnalyzedModel string
|
||||
AnalyzeToken string
|
||||
AnalyzeDestination string
|
||||
}
|
||||
|
||||
func (info *AnalyzeInformation) Reset() {
|
||||
info.IsAnalyzing = false
|
||||
info.AnalyzedModel = ""
|
||||
info.AnalyzeToken = ""
|
||||
info.AnalyzeDestination = ""
|
||||
info.SentCount = 0
|
||||
}
|
||||
|
||||
var analyzeInformation = &AnalyzeInformation{}
|
||||
|
||||
func GetAnalyzeInfo() *shared.AnalyzeStatus {
|
||||
return &shared.AnalyzeStatus{
|
||||
IsAnalyzing: analyzeInformation.IsAnalyzing,
|
||||
RemoteUrl: GetRemoteUrl(analyzeInformation.AnalyzeDestination, analyzeInformation.AnalyzeToken),
|
||||
IsRemoteReady: CheckIfModelReady(analyzeInformation.AnalyzeDestination, analyzeInformation.AnalyzedModel, analyzeInformation.AnalyzeToken),
|
||||
SentCount: analyzeInformation.SentCount,
|
||||
}
|
||||
}
|
||||
|
||||
func UploadEntriesImpl(token string, model string, envPrefix string, sleepIntervalSec int) {
|
||||
analyzeInformation.IsAnalyzing = true
|
||||
analyzeInformation.AnalyzedModel = model
|
||||
analyzeInformation.AnalyzeToken = token
|
||||
analyzeInformation.AnalyzeDestination = envPrefix
|
||||
analyzeInformation.SentCount = 0
|
||||
|
||||
sleepTime := time.Second * time.Duration(sleepIntervalSec)
|
||||
|
||||
var timestampFrom int64 = 0
|
||||
|
||||
for {
|
||||
timestampTo := time.Now().UnixNano() / int64(time.Millisecond)
|
||||
rlog.Infof("Getting entries from %v, to %v\n", timestampFrom, timestampTo)
|
||||
entriesArray := database.GetEntriesFromDb(timestampFrom, timestampTo)
|
||||
|
||||
if len(entriesArray) > 0 {
|
||||
|
||||
fullEntriesExtra := make([]models.FullEntryDetailsExtra, 0)
|
||||
for _, data := range entriesArray {
|
||||
harEntry := models.FullEntryDetailsExtra{}
|
||||
if err := models.GetEntry(&data, &harEntry); err != nil {
|
||||
continue
|
||||
}
|
||||
fullEntriesExtra = append(fullEntriesExtra, harEntry)
|
||||
}
|
||||
rlog.Infof("About to upload %v entries\n", len(fullEntriesExtra))
|
||||
|
||||
body, jMarshalErr := json.Marshal(fullEntriesExtra)
|
||||
if jMarshalErr != nil {
|
||||
analyzeInformation.Reset()
|
||||
rlog.Infof("Stopping analyzing")
|
||||
log.Fatal(jMarshalErr)
|
||||
}
|
||||
|
||||
var in bytes.Buffer
|
||||
w := zlib.NewWriter(&in)
|
||||
_, _ = w.Write(body)
|
||||
_ = w.Close()
|
||||
reqBody := ioutil.NopCloser(bytes.NewReader(in.Bytes()))
|
||||
|
||||
req := &http.Request{
|
||||
Method: http.MethodPost,
|
||||
URL: GetTrafficDumpUrl(envPrefix, model),
|
||||
Header: map[string][]string{
|
||||
"Content-Encoding": {"deflate"},
|
||||
"Content-Type": {"application/octet-stream"},
|
||||
"Guest-Auth": {token},
|
||||
},
|
||||
Body: reqBody,
|
||||
}
|
||||
|
||||
if _, postErr := http.DefaultClient.Do(req); postErr != nil {
|
||||
analyzeInformation.Reset()
|
||||
rlog.Info("Stopping analyzing")
|
||||
log.Fatal(postErr)
|
||||
}
|
||||
analyzeInformation.SentCount += len(entriesArray)
|
||||
rlog.Infof("Finish uploading %v entries to %s\n", len(entriesArray), GetTrafficDumpUrl(envPrefix, model))
|
||||
|
||||
} else {
|
||||
rlog.Infof("Nothing to upload")
|
||||
}
|
||||
|
||||
rlog.Infof("Sleeping for %v...\n", sleepTime)
|
||||
time.Sleep(sleepTime)
|
||||
timestampFrom = timestampTo
|
||||
}
|
||||
}
|
||||
|
||||
func UpdateAnalyzeStatus(callback func(data []byte)) {
|
||||
for {
|
||||
if !analyzeInformation.IsAnalyzing {
|
||||
time.Sleep(AnalyzeCheckSleepTime)
|
||||
continue
|
||||
}
|
||||
analyzeStatus := GetAnalyzeInfo()
|
||||
socketMessage := shared.CreateWebSocketMessageTypeAnalyzeStatus(*analyzeStatus)
|
||||
|
||||
jsonMessage, _ := json.Marshal(socketMessage)
|
||||
callback(jsonMessage)
|
||||
time.Sleep(AnalyzeCheckSleepTime)
|
||||
}
|
||||
}
|
||||
@@ -1,59 +0,0 @@
|
||||
package utils
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/romana/rlog"
|
||||
"gorm.io/gorm/logger"
|
||||
"gorm.io/gorm/utils"
|
||||
"time"
|
||||
)
|
||||
|
||||
// TruncatingLogger implements the gorm logger.Interface interface. Its purpose is to act as gorm's logger while truncating logs to a max of 50 characters to minimise the performance impact
|
||||
type TruncatingLogger struct {
|
||||
LogLevel logger.LogLevel
|
||||
SlowThreshold time.Duration
|
||||
}
|
||||
|
||||
func (truncatingLogger *TruncatingLogger) LogMode(logLevel logger.LogLevel) logger.Interface {
|
||||
truncatingLogger.LogLevel = logLevel
|
||||
return truncatingLogger
|
||||
}
|
||||
|
||||
func (truncatingLogger *TruncatingLogger) Info(_ context.Context, message string, __ ...interface{}) {
|
||||
if truncatingLogger.LogLevel < logger.Info {
|
||||
return
|
||||
}
|
||||
rlog.Errorf("gorm info: %.150s", message)
|
||||
}
|
||||
|
||||
func (truncatingLogger *TruncatingLogger) Warn(_ context.Context, message string, __ ...interface{}) {
|
||||
if truncatingLogger.LogLevel < logger.Warn {
|
||||
return
|
||||
}
|
||||
rlog.Errorf("gorm warning: %.150s", message)
|
||||
}
|
||||
|
||||
func (truncatingLogger *TruncatingLogger) Error(_ context.Context, message string, __ ...interface{}) {
|
||||
if truncatingLogger.LogLevel < logger.Error {
|
||||
return
|
||||
}
|
||||
rlog.Errorf("gorm error: %.150s", message)
|
||||
}
|
||||
|
||||
func (truncatingLogger *TruncatingLogger) Trace(ctx context.Context, begin time.Time, fc func() (string, int64), err error) {
|
||||
if truncatingLogger.LogLevel == logger.Silent {
|
||||
return
|
||||
}
|
||||
elapsed := time.Since(begin)
|
||||
if err != nil {
|
||||
sql, rows := fc() // copied into every condition as this is a potentially heavy operation best done only when necessary
|
||||
truncatingLogger.Error(ctx, fmt.Sprintf("Error in %s: %v - elapsed: %fs affected rows: %d, sql: %s", utils.FileWithLineNum(), err, elapsed.Seconds(), rows, sql))
|
||||
} else if truncatingLogger.LogLevel >= logger.Warn && elapsed > truncatingLogger.SlowThreshold {
|
||||
sql, rows := fc()
|
||||
truncatingLogger.Warn(ctx, fmt.Sprintf("Slow sql query - elapse: %fs rows: %d, sql: %s", elapsed.Seconds(), rows, sql))
|
||||
} else if truncatingLogger.LogLevel >= logger.Info {
|
||||
sql, rows := fc()
|
||||
truncatingLogger.Info(ctx, fmt.Sprintf("Sql query - elapse: %fs rows: %d, sql: %s", elapsed.Seconds(), rows, sql))
|
||||
}
|
||||
}
|
||||
@@ -1,8 +0,0 @@
|
||||
package version
|
||||
|
||||
var (
|
||||
SemVer = "0.0.1"
|
||||
Branch = "develop"
|
||||
GitCommitHash = "" // this var is overridden using ldflags in makefile when building
|
||||
BuildTimestamp = "" // this var is overridden using ldflags in makefile when building
|
||||
)
|
||||
@@ -1,5 +1,5 @@
|
||||
# mizu agent
|
||||
Agent for MIZU (API server and tapper)
|
||||
# mizu API server
|
||||
API server for MIZU
|
||||
Basic APIs:
|
||||
* /fetch - retrieve traffic data
|
||||
* /stats - retrieve statistics of collected data
|
||||
@@ -14,7 +14,7 @@ Basic APIs:
|
||||
|
||||
### Connecting
|
||||
1. Start mizu using the cli with the debug image `mizu tap --mizu-image gcr.io/up9-docker-hub/mizu/debug:latest {tapped_pod_name}`
|
||||
2. Forward the debug port using `kubectl port-forward -n default mizu-api-server 2345:2345`
|
||||
2. Forward the debug port using `kubectl port-forward -n default mizu-collector 2345:2345`
|
||||
3. Run the run/debug configuration you've created earlier in Intellij.
|
||||
|
||||
<small>Do note that dlv won't start the api until a debugger connects to it.</small>
|
||||
@@ -3,23 +3,23 @@ module mizuserver
|
||||
go 1.16
|
||||
|
||||
require (
|
||||
github.com/antoniodipinto/ikisocket v0.0.0-20210417133349-f1502512d69a
|
||||
github.com/beevik/etree v1.1.0
|
||||
github.com/djherbis/atime v1.0.0
|
||||
github.com/fsnotify/fsnotify v1.4.9
|
||||
github.com/gin-contrib/static v0.0.1
|
||||
github.com/gin-gonic/gin v1.7.2
|
||||
github.com/fasthttp/websocket v1.4.3-beta.1 // indirect
|
||||
github.com/go-playground/locales v0.13.0
|
||||
github.com/go-playground/universal-translator v0.17.0
|
||||
github.com/go-playground/validator/v10 v10.5.0
|
||||
github.com/gofiber/fiber/v2 v2.8.0
|
||||
github.com/google/gopacket v1.1.19
|
||||
github.com/google/martian v2.1.0+incompatible
|
||||
github.com/gorilla/websocket v1.4.2
|
||||
github.com/leodido/go-urn v1.2.1 // indirect
|
||||
github.com/orcaman/concurrent-map v0.0.0-20210106121528-16402b402231
|
||||
github.com/patrickmn/go-cache v2.1.0+incompatible
|
||||
github.com/romana/rlog v0.0.0-20171115192701-f018bc92e7d7
|
||||
github.com/up9inc/mizu/shared v0.0.0
|
||||
github.com/up9inc/mizu/tap v0.0.0
|
||||
github.com/yalp/jsonpath v0.0.0-20180802001716-5cc68e5049a0
|
||||
go.mongodb.org/mongo-driver v1.5.1
|
||||
golang.org/x/net v0.0.0-20210421230115-4e50805a0758
|
||||
gorm.io/driver/sqlite v1.1.4
|
||||
gorm.io/gorm v1.21.8
|
||||
k8s.io/api v0.21.0
|
||||
@@ -28,5 +28,3 @@ require (
|
||||
)
|
||||
|
||||
replace github.com/up9inc/mizu/shared v0.0.0 => ../shared
|
||||
|
||||
replace github.com/up9inc/mizu/tap v0.0.0 => ../tap
|
||||
@@ -41,12 +41,15 @@ github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym
|
||||
github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
|
||||
github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
|
||||
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
|
||||
github.com/andybalholm/brotli v1.0.0/go.mod h1:loMXtMfwqflxFJPmdbJO0a3KNoPuLBgiu3qAvBg8x/Y=
|
||||
github.com/andybalholm/brotli v1.0.1 h1:KqhlKozYbRtJvsPrrEeXcO+N2l6NYT5A2QAFmSULpEc=
|
||||
github.com/andybalholm/brotli v1.0.1/go.mod h1:loMXtMfwqflxFJPmdbJO0a3KNoPuLBgiu3qAvBg8x/Y=
|
||||
github.com/antoniodipinto/ikisocket v0.0.0-20210417133349-f1502512d69a h1:76llBleIE3fkdqaJFDzdirtiYhQPdIQem8H8r2iwA1Q=
|
||||
github.com/antoniodipinto/ikisocket v0.0.0-20210417133349-f1502512d69a/go.mod h1:QvDfsDQDmGxUsvEeWabVZ5pp2FMXpOkwQV0L6SE6cp0=
|
||||
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
|
||||
github.com/aws/aws-sdk-go v1.34.28/go.mod h1:H7NKnBqNVzoTJpGfLrQkkD+ytBA93eiDYi/+8rV9s48=
|
||||
github.com/beevik/etree v1.1.0 h1:T0xke/WvNtMoCqgzPhkX2r4rjY3GDZFi+FjpRZY2Jbs=
|
||||
github.com/beevik/etree v1.1.0/go.mod h1:r8Aw8JqVegEf0w2fDnATrX9VpkMcyFeM0FhwO62wh+A=
|
||||
github.com/bradleyfalzon/tlsx v0.0.0-20170624122154-28fd0e59bac4 h1:NJOOlc6ZJjix0A1rAU+nxruZtR8KboG1848yqpIUo4M=
|
||||
github.com/bradleyfalzon/tlsx v0.0.0-20170624122154-28fd0e59bac4/go.mod h1:DQPxZS994Ld1Y8uwnJT+dRL04XPD0cElP/pHH/zEBHM=
|
||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
|
||||
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
|
||||
@@ -58,26 +61,18 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/djherbis/atime v1.0.0 h1:ySLvBAM0EvOGaX7TI4dAM5lWj+RdJUCKtGSEHN8SGBg=
|
||||
github.com/djherbis/atime v1.0.0/go.mod h1:5W+KBIuTwVGcqjIfaTwt+KSYX1o6uep8dtevevQP/f8=
|
||||
github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw=
|
||||
github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
|
||||
github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE=
|
||||
github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc=
|
||||
github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
|
||||
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
||||
github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
|
||||
github.com/fasthttp/websocket v1.4.2/go.mod h1:smsv/h4PBEBaU0XDTY5UwJTpZv69fQ0FfcLJr21mA6Y=
|
||||
github.com/fasthttp/websocket v1.4.3-beta.1 h1:stc4P2aoxYKsdmbe1AJ5mAm73Fxc1NOgrZpPftvZIXQ=
|
||||
github.com/fasthttp/websocket v1.4.3-beta.1/go.mod h1:JGrgLaT02bL9NuJkZbHN8mVV2tkCJZQh7yJ5/XCXO2g=
|
||||
github.com/form3tech-oss/jwt-go v3.2.2+incompatible h1:TcekIExNqud5crz4xD2pavyTgWiPvpYe4Xau31I0PRk=
|
||||
github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4=
|
||||
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
|
||||
github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE=
|
||||
github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI=
|
||||
github.com/gin-contrib/static v0.0.1 h1:JVxuvHPuUfkoul12N7dtQw7KRn/pSMq7Ue1Va9Swm1U=
|
||||
github.com/gin-contrib/static v0.0.1/go.mod h1:CSxeF+wep05e0kCOsqWdAWbSszmc31zTIbD8TvWl7Hs=
|
||||
github.com/gin-gonic/gin v1.6.3/go.mod h1:75u5sXoLsGZoRN5Sgbi1eraJ4GU3++wFwWzhwvtwp4M=
|
||||
github.com/gin-gonic/gin v1.7.2 h1:Tg03T9yM2xa8j6I3Z3oqLaQRSmKvxPd6g/2HJ6zICFA=
|
||||
github.com/gin-gonic/gin v1.7.2/go.mod h1:jD2toBW3GZUr5UMcdrwQA10I7RuaFOl/SGeDjXkfUtY=
|
||||
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
|
||||
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
|
||||
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
|
||||
@@ -97,8 +92,6 @@ github.com/go-playground/locales v0.13.0 h1:HyWk6mgj5qFqCT5fjGBuRArbVDfE4hi8+e8c
|
||||
github.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTMfnGwq/HNwmWNS8=
|
||||
github.com/go-playground/universal-translator v0.17.0 h1:icxd5fm+REJzpZx7ZfpaD876Lmtgy7VtROAbHHXk8no=
|
||||
github.com/go-playground/universal-translator v0.17.0/go.mod h1:UkSxE5sNxxRwHyU+Scu5vgOQjsIJAF8j9muTVoKLVtA=
|
||||
github.com/go-playground/validator/v10 v10.2.0/go.mod h1:uOYAAleCW8F/7oMFd6aG0GOhaH6EGOAJShg8Id5JGkI=
|
||||
github.com/go-playground/validator/v10 v10.4.1/go.mod h1:nlOn6nFhuKACm19sB/8EGNn9GlaMV7XkbRSipzJ0Ii4=
|
||||
github.com/go-playground/validator/v10 v10.5.0 h1:X9rflw/KmpACwT8zdrm1upefpvdy6ur8d1kWyq6sg3E=
|
||||
github.com/go-playground/validator/v10 v10.5.0/go.mod h1:xm76BBt941f7yWdGnI2DVPFFg1UK3YY04qifoXU3lOk=
|
||||
github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
|
||||
@@ -127,6 +120,12 @@ github.com/gobuffalo/packd v0.1.0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWe
|
||||
github.com/gobuffalo/packr/v2 v2.0.9/go.mod h1:emmyGweYTm6Kdper+iywB6YK5YzuKchGtJQZ0Odn4pQ=
|
||||
github.com/gobuffalo/packr/v2 v2.2.0/go.mod h1:CaAwI0GPIAv+5wKLtv8Afwl+Cm78K/I/VCm/3ptBN+0=
|
||||
github.com/gobuffalo/syncx v0.0.0-20190224160051-33c29581e754/go.mod h1:HhnNqWY95UYwwW3uSASeV7vtgYkT2t16hJgV3AEPUpw=
|
||||
github.com/gofiber/fiber/v2 v2.1.3/go.mod h1:MMiSv1HrDkN8Pv7NeVDYK+T/lwXOEKAvPBbLvJPCEfA=
|
||||
github.com/gofiber/fiber/v2 v2.7.1/go.mod h1:f8BRRIMjMdRyt2qmJ/0Sea3j3rwwfufPrh9WNBRiVZ0=
|
||||
github.com/gofiber/fiber/v2 v2.8.0 h1:BdWvZmg/WY/Vjtjm38aXOp1Lks1BhuyS2b7lSWSPAzk=
|
||||
github.com/gofiber/fiber/v2 v2.8.0/go.mod h1:Ah3IJikrKNRepl/HuVawppS25X7FWohwfCSRn7kJG28=
|
||||
github.com/gofiber/websocket/v2 v2.0.3 h1:nqPGHB4LQhxKX5KJUjayOd2xiiENieS/dn6TPfCL8uk=
|
||||
github.com/gofiber/websocket/v2 v2.0.3/go.mod h1:/OTEImCxORKE5unw0dWqJYovid6vZF+wB1W0aaMKs2M=
|
||||
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
|
||||
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||
@@ -187,6 +186,7 @@ github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ
|
||||
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
|
||||
github.com/imdario/mergo v0.3.5 h1:JboBksRwiiAJWvIYJVo46AfV+IAIKZpfrSzVKj42R4Q=
|
||||
github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
|
||||
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
|
||||
github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD/E=
|
||||
@@ -198,7 +198,6 @@ github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHW
|
||||
github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=
|
||||
github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg=
|
||||
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
|
||||
github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||
github.com/json-iterator/go v1.1.10 h1:Kz6Cvnvv2wGdaG/V8yMvfkmNiXq9Ya2KUv4rouJJr68=
|
||||
github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
|
||||
@@ -207,7 +206,13 @@ github.com/karrick/godirwalk v1.8.0/go.mod h1:H5KPZjojv4lE+QYImBI8xVtrBRgYrIVsaR
|
||||
github.com/karrick/godirwalk v1.10.3/go.mod h1:RoGL9dQei4vP9ilrpETWE8CLOZ1kiN0LhBygSwrAsHA=
|
||||
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/klauspost/compress v1.8.2/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
|
||||
github.com/klauspost/compress v1.9.5/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
|
||||
github.com/klauspost/compress v1.10.7/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
|
||||
github.com/klauspost/compress v1.11.8/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
|
||||
github.com/klauspost/compress v1.11.13 h1:eSvu8Tmq6j2psUJqJrLcWH6K3w5Dwc+qipbaA6eVEN4=
|
||||
github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
|
||||
github.com/klauspost/cpuid v1.2.1/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||
@@ -217,14 +222,13 @@ github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/leodido/go-urn v1.2.0 h1:hpXL4XnriNwQ/ABnpepYM/1vCLWNDfUNts8dX3xTG6Y=
|
||||
github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII=
|
||||
github.com/leodido/go-urn v1.2.1 h1:BqpAaACuzVSgi/VLzGZIobT2z4v53pjosyNd9Yv6n/w=
|
||||
github.com/leodido/go-urn v1.2.1/go.mod h1:zt4jvISO2HfUBqxjfIshjdMTYS56ZS/qv49ictyFfxY=
|
||||
github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
||||
github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
||||
github.com/markbates/oncer v0.0.0-20181203154359-bf2de49a0be2/go.mod h1:Ld9puTsIW75CHf65OeIOkyKbteujpZVXDpWK6YGZbxE=
|
||||
github.com/markbates/safe v1.0.1/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0=
|
||||
github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY=
|
||||
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
|
||||
github.com/mattn/go-sqlite3 v1.14.5 h1:1IdxlwTNazvbKJQSxoJ5/9ECbEeaTTyeU7sEAZ5KKTQ=
|
||||
github.com/mattn/go-sqlite3 v1.14.5/go.mod h1:WVKg1VTActs4Qso6iwGbiFih2UIHo0ENGwNd0Lj+XmI=
|
||||
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
|
||||
@@ -260,8 +264,9 @@ github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:
|
||||
github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||
github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||
github.com/romana/rlog v0.0.0-20171115192701-f018bc92e7d7 h1:jkvpcEatpwuMF5O5LVxTnehj6YZ/aEZN4NWD/Xml4pI=
|
||||
github.com/romana/rlog v0.0.0-20171115192701-f018bc92e7d7/go.mod h1:KTrHyWpO1sevuXPZwyeZc72ddWRFqNSKDFl7uVWKpg0=
|
||||
github.com/savsgio/gotils v0.0.0-20200117113501-90175b0fbe3f/go.mod h1:lHhJedqxCoHN+zMtwGNTXWmF0u9Jt363FYRhV6g0CdY=
|
||||
github.com/savsgio/gotils v0.0.0-20200616100644-13ff1fd2c28c h1:KKqhycXW1WVNkX7r4ekTV2gFkbhdyihlWD8c0/FiWmk=
|
||||
github.com/savsgio/gotils v0.0.0-20200616100644-13ff1fd2c28c/go.mod h1:TWNAOTaVzGOXq8RbEvHnhzA/A2sLZzgn0m6URjnukY8=
|
||||
github.com/sirupsen/logrus v1.4.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
||||
github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q=
|
||||
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
|
||||
@@ -273,22 +278,28 @@ github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
|
||||
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.2.0 h1:Hbg2NidpLE8veEBkEZTL3CvlkUIVzuU9jDplZO54c48=
|
||||
github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
||||
github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0=
|
||||
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY=
|
||||
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk=
|
||||
github.com/ugorji/go v1.1.7 h1:/68gy2h+1mWMrwZFeD1kQialdSzAb432dtpeJ42ovdo=
|
||||
github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw=
|
||||
github.com/ugorji/go/codec v1.1.7 h1:2SvQaVZ1ouYrrKKwoSk2pzd4A9evlKJb9oTL+OaLUSs=
|
||||
github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY=
|
||||
github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw=
|
||||
github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
|
||||
github.com/valyala/fasthttp v1.9.0/go.mod h1:FstJa9V+Pj9vQ7OJie2qMHdwemEDaDiSdBnvPM1Su9w=
|
||||
github.com/valyala/fasthttp v1.15.1/go.mod h1:YOKImeEosDdBPnxc0gy7INqi3m1zK6A+xl6TwOBhHCA=
|
||||
github.com/valyala/fasthttp v1.16.0/go.mod h1:YOKImeEosDdBPnxc0gy7INqi3m1zK6A+xl6TwOBhHCA=
|
||||
github.com/valyala/fasthttp v1.18.0/go.mod h1:jjraHZVbKOXftJfsOYoAjaeygpj5hr8ermTRJNroD7A=
|
||||
github.com/valyala/fasthttp v1.23.0 h1:0ufwSD9BhWa6f8HWdmdq4FHQ23peRo3Ng/Qs8m5NcFs=
|
||||
github.com/valyala/fasthttp v1.23.0/go.mod h1:0mw2RjXGOzxf4NL2jni3gUQ7LfjjUSiG5sskOUUSEpU=
|
||||
github.com/valyala/tcplisten v0.0.0-20161114210144-ceec8f93295a h1:0R4NLDRDZX6JcmhJgXi5E4b8Wg84ihbmUKp/GvSPEzc=
|
||||
github.com/valyala/tcplisten v0.0.0-20161114210144-ceec8f93295a/go.mod h1:v3UYOV9WzVtRmSR+PDvWpU/qWl4Wa5LApYYX4ZtKbio=
|
||||
github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI=
|
||||
github.com/xdg-go/scram v1.0.2/go.mod h1:1WAq6h33pAW+iRreB34OORO2Nf7qel3VV3fjBj+hCSs=
|
||||
github.com/xdg-go/stringprep v1.0.2/go.mod h1:8F9zXuvzgwmyT5DUm4GUfZGDdT3W+LCvS6+da4O5kxM=
|
||||
github.com/yalp/jsonpath v0.0.0-20180802001716-5cc68e5049a0 h1:6fRhSjgLCkTD3JnJxvaJ4Sj+TYblw757bqYgZaOq5ZY=
|
||||
github.com/yalp/jsonpath v0.0.0-20180802001716-5cc68e5049a0/go.mod h1:/LWChgwKmvncFJFHJ7Gvn9wZArjbV5/FppcK2fKk/tI=
|
||||
github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA=
|
||||
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
@@ -361,8 +372,11 @@ golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLL
|
||||
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20200602114024-627f9648deb9/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20201016165138-7b1cca2348c0/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.0.0-20210224082022-3d97a244fca7/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/net v0.0.0-20210226101413-39120d07d75e/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/net v0.0.0-20210421230115-4e50805a0758 h1:aEpZnXcAmXkd6AvLb2OPt+EN1Zu/8Ne3pCqPjja5PXY=
|
||||
golang.org/x/net v0.0.0-20210421230115-4e50805a0758/go.mod h1:72T/g9IO56b78aLF+1Kcs5dz7/ng1VjMUvfKvpfy+jM=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
@@ -396,20 +410,21 @@ golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7w
|
||||
golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200602225109-6fdc65e7d980/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201101102859-da207088b7d1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201210223839-7e3030f88018/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210420072515-93ed5bcd2bfe h1:WdX7u8s3yOigWAhHEaDl8r9G+4XwFQEQFtBMYyN+kXQ=
|
||||
golang.org/x/sys v0.0.0-20210420072515-93ed5bcd2bfe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
@@ -540,9 +555,8 @@ gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
|
||||
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo=
|
||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gorm.io/driver/sqlite v1.1.4 h1:PDzwYE+sI6De2+mxAneV9Xs11+ZyKV6oxD3wDGkaNvM=
|
||||
gorm.io/driver/sqlite v1.1.4/go.mod h1:mJCeTFr7+crvS+TRnWc5Z3UvwxUN1BGBLMrf5LA9DYw=
|
||||
gorm.io/gorm v1.20.7/go.mod h1:0HFTzE/SqkGTzK6TlDPPQbAYCluiVvhzoA1+aVyzenw=
|
||||
147
api/main.go
Normal file
147
api/main.go
Normal file
@@ -0,0 +1,147 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"flag"
|
||||
"fmt"
|
||||
"github.com/gofiber/fiber/v2"
|
||||
"github.com/gorilla/websocket"
|
||||
"github.com/up9inc/mizu/shared"
|
||||
"mizuserver/pkg/api"
|
||||
"mizuserver/pkg/middleware"
|
||||
"mizuserver/pkg/models"
|
||||
"mizuserver/pkg/routes"
|
||||
"mizuserver/pkg/sensitiveDataFiltering"
|
||||
"mizuserver/pkg/tap"
|
||||
"mizuserver/pkg/utils"
|
||||
"os"
|
||||
"os/signal"
|
||||
)
|
||||
|
||||
var shouldTap = flag.Bool("tap", false, "Run in tapper mode without API")
|
||||
var aggregator = flag.Bool("aggregator", false, "Run in aggregator mode with API")
|
||||
var standalone = flag.Bool("standalone", false, "Run in standalone tapper and API mode")
|
||||
var aggregatorAddress = flag.String("aggregator-address", "", "Address of mizu collector for tapping")
|
||||
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
|
||||
if !*shouldTap && !*aggregator && !*standalone{
|
||||
panic("One of the flags --tap, --api or --standalone must be provided")
|
||||
}
|
||||
|
||||
if *standalone {
|
||||
harOutputChannel := tap.StartPassiveTapper()
|
||||
filteredHarChannel := make(chan *tap.OutputChannelItem)
|
||||
go filterHarHeaders(harOutputChannel, filteredHarChannel, getTrafficFilteringOptions())
|
||||
go api.StartReadingEntries(filteredHarChannel, nil)
|
||||
hostApi(nil)
|
||||
} else if *shouldTap {
|
||||
if *aggregatorAddress == "" {
|
||||
panic("Aggregator address must be provided with --aggregator-address when using --tap")
|
||||
}
|
||||
|
||||
tapTargets := getTapTargets()
|
||||
if tapTargets != nil {
|
||||
tap.HostAppAddresses = tapTargets
|
||||
fmt.Println("Filtering for the following addresses:", tap.HostAppAddresses)
|
||||
}
|
||||
|
||||
harOutputChannel := tap.StartPassiveTapper()
|
||||
socketConnection, err := shared.ConnectToSocketServer(*aggregatorAddress, shared.DEFAULT_SOCKET_RETRIES, shared.DEFAULT_SOCKET_RETRY_SLEEP_TIME, false)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("Error connecting to socket server at %s %v", *aggregatorAddress, err))
|
||||
}
|
||||
go pipeChannelToSocket(socketConnection, harOutputChannel)
|
||||
} else if *aggregator {
|
||||
socketHarOutChannel := make(chan *tap.OutputChannelItem, 1000)
|
||||
filteredHarChannel := make(chan *tap.OutputChannelItem)
|
||||
go api.StartReadingEntries(filteredHarChannel, nil)
|
||||
go filterHarHeaders(socketHarOutChannel, filteredHarChannel, getTrafficFilteringOptions())
|
||||
hostApi(socketHarOutChannel)
|
||||
}
|
||||
|
||||
signalChan := make(chan os.Signal, 1)
|
||||
signal.Notify(signalChan, os.Interrupt)
|
||||
<-signalChan
|
||||
|
||||
fmt.Println("Exiting")
|
||||
}
|
||||
|
||||
func hostApi(socketHarOutputChannel chan<- *tap.OutputChannelItem) {
|
||||
app := fiber.New()
|
||||
|
||||
|
||||
middleware.FiberMiddleware(app) // Register Fiber's middleware for app.
|
||||
app.Static("/", "./site")
|
||||
|
||||
//Simple route to know server is running
|
||||
app.Get("/echo", func(c *fiber.Ctx) error {
|
||||
return c.SendString("Hello, World 👋!")
|
||||
})
|
||||
eventHandlers := api.RoutesEventHandlers{
|
||||
SocketHarOutChannel: socketHarOutputChannel,
|
||||
}
|
||||
routes.WebSocketRoutes(app, &eventHandlers)
|
||||
routes.EntriesRoutes(app)
|
||||
routes.NotFoundRoute(app)
|
||||
|
||||
utils.StartServer(app)
|
||||
}
|
||||
|
||||
|
||||
func getTapTargets() []string {
|
||||
nodeName := os.Getenv(shared.NodeNameEnvVar)
|
||||
var tappedAddressesPerNodeDict map[string][]string
|
||||
err := json.Unmarshal([]byte(os.Getenv(shared.TappedAddressesPerNodeDictEnvVar)), &tappedAddressesPerNodeDict)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("env var %s's value of %s is invalid! must be map[string][]string %v", shared.TappedAddressesPerNodeDictEnvVar, tappedAddressesPerNodeDict, err))
|
||||
}
|
||||
return tappedAddressesPerNodeDict[nodeName]
|
||||
}
|
||||
|
||||
func getTrafficFilteringOptions() *shared.TrafficFilteringOptions {
|
||||
filteringOptionsJson := os.Getenv(shared.MizuFilteringOptionsEnvVar)
|
||||
if filteringOptionsJson == "" {
|
||||
return nil
|
||||
}
|
||||
var filteringOptions shared.TrafficFilteringOptions
|
||||
err := json.Unmarshal([]byte(filteringOptionsJson), &filteringOptions)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("env var %s's value of %s is invalid! json must match the shared.TrafficFilteringOptions struct %v", shared.MizuFilteringOptionsEnvVar, filteringOptionsJson, err))
|
||||
}
|
||||
|
||||
return &filteringOptions
|
||||
}
|
||||
|
||||
func filterHarHeaders(inChannel <- chan *tap.OutputChannelItem, outChannel chan *tap.OutputChannelItem, filterOptions *shared.TrafficFilteringOptions) {
|
||||
for message := range inChannel {
|
||||
sensitiveDataFiltering.FilterSensitiveInfoFromHarRequest(message, filterOptions)
|
||||
outChannel <- message
|
||||
}
|
||||
}
|
||||
|
||||
func pipeChannelToSocket(connection *websocket.Conn, messageDataChannel <-chan *tap.OutputChannelItem) {
|
||||
if connection == nil {
|
||||
panic("Websocket connection is nil")
|
||||
}
|
||||
|
||||
if messageDataChannel == nil {
|
||||
panic("Channel of captured messages is nil")
|
||||
}
|
||||
|
||||
for messageData := range messageDataChannel {
|
||||
marshaledData, err := models.CreateWebsocketTappedEntryMessage(messageData)
|
||||
if err != nil {
|
||||
fmt.Printf("error converting message to json %s, (%v,%+v)\n", err, err, err)
|
||||
continue
|
||||
}
|
||||
|
||||
err = connection.WriteMessage(websocket.TextMessage, marshaledData)
|
||||
if err != nil {
|
||||
fmt.Printf("error sending message through socket server %s, (%v,%+v)\n", err, err, err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
131
api/pkg/api/main.go
Normal file
131
api/pkg/api/main.go
Normal file
@@ -0,0 +1,131 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"github.com/google/martian/har"
|
||||
"go.mongodb.org/mongo-driver/bson/primitive"
|
||||
"mizuserver/pkg/database"
|
||||
"mizuserver/pkg/models"
|
||||
"mizuserver/pkg/resolver"
|
||||
"mizuserver/pkg/tap"
|
||||
"mizuserver/pkg/utils"
|
||||
"net/url"
|
||||
"os"
|
||||
"path"
|
||||
"sort"
|
||||
"time"
|
||||
)
|
||||
|
||||
var k8sResolver *resolver.Resolver
|
||||
|
||||
func init() {
|
||||
errOut := make(chan error, 100)
|
||||
res, err := resolver.NewFromInCluster(errOut)
|
||||
if err != nil {
|
||||
fmt.Printf("error creating k8s resolver %s", err)
|
||||
return
|
||||
}
|
||||
ctx := context.Background()
|
||||
res.Start(ctx)
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
case err := <-errOut:
|
||||
fmt.Printf("name resolving error %s", err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
k8sResolver = res
|
||||
}
|
||||
|
||||
func StartReadingEntries(harChannel <-chan *tap.OutputChannelItem, workingDir *string) {
|
||||
if workingDir != nil && *workingDir != "" {
|
||||
startReadingFiles(*workingDir)
|
||||
} else {
|
||||
startReadingChannel(harChannel)
|
||||
}
|
||||
}
|
||||
|
||||
func startReadingFiles(workingDir string) {
|
||||
err := os.MkdirAll(workingDir, os.ModePerm)
|
||||
utils.CheckErr(err)
|
||||
|
||||
for true {
|
||||
dir, _ := os.Open(workingDir)
|
||||
dirFiles, _ := dir.Readdir(-1)
|
||||
sort.Sort(utils.ByModTime(dirFiles))
|
||||
|
||||
if len(dirFiles) == 0 {
|
||||
fmt.Printf("Waiting for new files\n")
|
||||
time.Sleep(3 * time.Second)
|
||||
continue
|
||||
}
|
||||
fileInfo := dirFiles[0]
|
||||
inputFilePath := path.Join(workingDir, fileInfo.Name())
|
||||
file, err := os.Open(inputFilePath)
|
||||
utils.CheckErr(err)
|
||||
|
||||
var inputHar har.HAR
|
||||
decErr := json.NewDecoder(bufio.NewReader(file)).Decode(&inputHar)
|
||||
utils.CheckErr(decErr)
|
||||
|
||||
for _, entry := range inputHar.Log.Entries {
|
||||
time.Sleep(time.Millisecond * 250)
|
||||
saveHarToDb(entry, fileInfo.Name())
|
||||
}
|
||||
rmErr := os.Remove(inputFilePath)
|
||||
utils.CheckErr(rmErr)
|
||||
}
|
||||
}
|
||||
|
||||
func startReadingChannel(outputItems <-chan *tap.OutputChannelItem) {
|
||||
if outputItems == nil {
|
||||
panic("Channel of captured messages is nil")
|
||||
}
|
||||
|
||||
for item := range outputItems {
|
||||
saveHarToDb(item.HarEntry, item.RequestSenderIp)
|
||||
}
|
||||
}
|
||||
|
||||
func saveHarToDb(entry *har.Entry, sender string) {
|
||||
entryBytes, _ := json.Marshal(entry)
|
||||
serviceName, urlPath, serviceHostName := getServiceNameFromUrl(entry.Request.URL)
|
||||
entryId := primitive.NewObjectID().Hex()
|
||||
var (
|
||||
resolvedSource string
|
||||
resolvedDestination string
|
||||
)
|
||||
if k8sResolver != nil {
|
||||
resolvedSource = k8sResolver.Resolve(sender)
|
||||
resolvedDestination = k8sResolver.Resolve(serviceHostName)
|
||||
}
|
||||
mizuEntry := models.MizuEntry{
|
||||
EntryId: entryId,
|
||||
Entry: string(entryBytes), // simple way to store it and not convert to bytes
|
||||
Service: serviceName,
|
||||
Url: entry.Request.URL,
|
||||
Path: urlPath,
|
||||
Method: entry.Request.Method,
|
||||
Status: entry.Response.Status,
|
||||
RequestSenderIp: sender,
|
||||
Timestamp: entry.StartedDateTime.UnixNano() / int64(time.Millisecond),
|
||||
ResolvedSource: resolvedSource,
|
||||
ResolvedDestination: resolvedDestination,
|
||||
}
|
||||
database.GetEntriesTable().Create(&mizuEntry)
|
||||
|
||||
baseEntry := utils.GetResolvedBaseEntry(mizuEntry)
|
||||
baseEntryBytes, _ := models.CreateBaseEntryWebSocketMessage(&baseEntry)
|
||||
broadcastToBrowserClients(baseEntryBytes)
|
||||
}
|
||||
|
||||
func getServiceNameFromUrl(inputUrl string) (string, string, string) {
|
||||
parsed, err := url.Parse(inputUrl)
|
||||
utils.CheckErr(err)
|
||||
return fmt.Sprintf("%s://%s", parsed.Scheme, parsed.Host), parsed.Path, parsed.Host
|
||||
}
|
||||
96
api/pkg/api/socket_server_handlers.go
Normal file
96
api/pkg/api/socket_server_handlers.go
Normal file
@@ -0,0 +1,96 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"github.com/antoniodipinto/ikisocket"
|
||||
"github.com/up9inc/mizu/shared"
|
||||
"mizuserver/pkg/controllers"
|
||||
"mizuserver/pkg/models"
|
||||
"mizuserver/pkg/routes"
|
||||
"mizuserver/pkg/tap"
|
||||
)
|
||||
|
||||
var browserClientSocketUUIDs = make([]string, 0)
|
||||
|
||||
type RoutesEventHandlers struct {
|
||||
routes.EventHandlers
|
||||
SocketHarOutChannel chan<- *tap.OutputChannelItem
|
||||
}
|
||||
|
||||
|
||||
func (h *RoutesEventHandlers) WebSocketConnect(ep *ikisocket.EventPayload) {
|
||||
if ep.Kws.GetAttribute("is_tapper") == true {
|
||||
fmt.Println(fmt.Sprintf("Websocket Connection event - Tapper connected: %s", ep.SocketUUID))
|
||||
} else {
|
||||
fmt.Println(fmt.Sprintf("Websocket Connection event - Browser socket connected: %s", ep.SocketUUID))
|
||||
browserClientSocketUUIDs = append(browserClientSocketUUIDs, ep.SocketUUID)
|
||||
}
|
||||
}
|
||||
|
||||
func (h *RoutesEventHandlers) WebSocketDisconnect(ep *ikisocket.EventPayload) {
|
||||
if ep.Kws.GetAttribute("is_tapper") == true {
|
||||
fmt.Println(fmt.Sprintf("Disconnection event - Tapper connected: %s", ep.SocketUUID))
|
||||
} else {
|
||||
fmt.Println(fmt.Sprintf("Disconnection event - Browser socket connected: %s", ep.SocketUUID))
|
||||
removeSocketUUIDFromBrowserSlice(ep.SocketUUID)
|
||||
}
|
||||
}
|
||||
|
||||
func broadcastToBrowserClients(message []byte) {
|
||||
ikisocket.EmitToList(browserClientSocketUUIDs, message)
|
||||
}
|
||||
|
||||
func (h *RoutesEventHandlers) WebSocketClose(ep *ikisocket.EventPayload) {
|
||||
if ep.Kws.GetAttribute("is_tapper") == true {
|
||||
fmt.Println(fmt.Sprintf("Websocket Close event - Tapper connected: %s", ep.SocketUUID))
|
||||
} else {
|
||||
fmt.Println(fmt.Sprintf("Websocket Close event - Browser socket connected: %s", ep.SocketUUID))
|
||||
removeSocketUUIDFromBrowserSlice(ep.SocketUUID)
|
||||
}
|
||||
}
|
||||
|
||||
func (h *RoutesEventHandlers) WebSocketError(ep *ikisocket.EventPayload) {
|
||||
fmt.Println(fmt.Sprintf("Socket error - Socket uuid : %s %v", ep.SocketUUID, ep.Error))
|
||||
}
|
||||
|
||||
func (h *RoutesEventHandlers) WebSocketMessage(ep *ikisocket.EventPayload) {
|
||||
var socketMessageBase shared.WebSocketMessageMetadata
|
||||
err := json.Unmarshal(ep.Data, &socketMessageBase)
|
||||
if err != nil {
|
||||
fmt.Printf("Could not unmarshal websocket message %v\n", err)
|
||||
} else {
|
||||
switch socketMessageBase.MessageType {
|
||||
case shared.WebSocketMessageTypeTappedEntry:
|
||||
var tappedEntryMessage models.WebSocketTappedEntryMessage
|
||||
err := json.Unmarshal(ep.Data, &tappedEntryMessage)
|
||||
if err != nil {
|
||||
fmt.Printf("Could not unmarshal message of message type %s %v\n", socketMessageBase.MessageType, err)
|
||||
} else {
|
||||
h.SocketHarOutChannel <- tappedEntryMessage.Data
|
||||
}
|
||||
case shared.WebSocketMessageTypeUpdateStatus:
|
||||
var statusMessage shared.WebSocketStatusMessage
|
||||
err := json.Unmarshal(ep.Data, &statusMessage)
|
||||
if err != nil {
|
||||
fmt.Printf("Could not unmarshal message of message type %s %v\n", socketMessageBase.MessageType, err)
|
||||
} else {
|
||||
controllers.TapStatus = statusMessage.TappingStatus
|
||||
broadcastToBrowserClients(ep.Data)
|
||||
}
|
||||
default:
|
||||
fmt.Printf("Received socket message of type %s for which no handlers are defined", socketMessageBase.MessageType)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
func removeSocketUUIDFromBrowserSlice(uuidToRemove string) {
|
||||
newUUIDSlice := make([]string, 0, len(browserClientSocketUUIDs))
|
||||
for _, uuid := range browserClientSocketUUIDs {
|
||||
if uuid != uuidToRemove {
|
||||
newUUIDSlice = append(newUUIDSlice, uuid)
|
||||
}
|
||||
}
|
||||
browserClientSocketUUIDs = newUUIDSlice
|
||||
}
|
||||
166
api/pkg/controllers/entries_controller.go
Normal file
166
api/pkg/controllers/entries_controller.go
Normal file
@@ -0,0 +1,166 @@
|
||||
package controllers
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"github.com/gofiber/fiber/v2"
|
||||
"github.com/google/martian/har"
|
||||
"mizuserver/pkg/database"
|
||||
"mizuserver/pkg/models"
|
||||
"mizuserver/pkg/utils"
|
||||
"mizuserver/pkg/validation"
|
||||
)
|
||||
|
||||
const (
|
||||
OrderDesc = "desc"
|
||||
OrderAsc = "asc"
|
||||
LT = "lt"
|
||||
GT = "gt"
|
||||
)
|
||||
|
||||
var (
|
||||
operatorToSymbolMapping = map[string]string{
|
||||
LT: "<",
|
||||
GT: ">",
|
||||
}
|
||||
operatorToOrderMapping = map[string]string{
|
||||
LT: OrderDesc,
|
||||
GT: OrderAsc,
|
||||
}
|
||||
)
|
||||
|
||||
func GetEntries(c *fiber.Ctx) error {
|
||||
entriesFilter := &models.EntriesFilter{}
|
||||
|
||||
if err := c.QueryParser(entriesFilter); err != nil {
|
||||
return c.Status(fiber.StatusBadRequest).JSON(err)
|
||||
}
|
||||
err := validation.Validate(entriesFilter)
|
||||
if err != nil {
|
||||
return c.Status(fiber.StatusBadRequest).JSON(err)
|
||||
}
|
||||
|
||||
order := operatorToOrderMapping[entriesFilter.Operator]
|
||||
operatorSymbol := operatorToSymbolMapping[entriesFilter.Operator]
|
||||
var entries []models.MizuEntry
|
||||
database.GetEntriesTable().
|
||||
Order(fmt.Sprintf("timestamp %s", order)).
|
||||
Where(fmt.Sprintf("timestamp %s %v", operatorSymbol, entriesFilter.Timestamp)).
|
||||
Omit("entry"). // remove the "big" entry field
|
||||
Limit(entriesFilter.Limit).
|
||||
Find(&entries)
|
||||
|
||||
if len(entries) > 0 && order == OrderDesc {
|
||||
// the entries always order from oldest to newest so we should revers
|
||||
utils.ReverseSlice(entries)
|
||||
}
|
||||
|
||||
// Convert to base entries
|
||||
baseEntries := make([]models.BaseEntryDetails, 0, entriesFilter.Limit)
|
||||
for _, entry := range entries {
|
||||
baseEntries = append(baseEntries, utils.GetResolvedBaseEntry(entry))
|
||||
}
|
||||
|
||||
return c.Status(fiber.StatusOK).JSON(baseEntries)
|
||||
}
|
||||
|
||||
func GetHARs(c *fiber.Ctx) error {
|
||||
entriesFilter := &models.HarFetchRequestBody{}
|
||||
order := OrderDesc
|
||||
if err := c.QueryParser(entriesFilter); err != nil {
|
||||
return c.Status(fiber.StatusBadRequest).JSON(err)
|
||||
}
|
||||
err := validation.Validate(entriesFilter)
|
||||
if err != nil {
|
||||
return c.Status(fiber.StatusBadRequest).JSON(err)
|
||||
}
|
||||
|
||||
var entries []models.MizuEntry
|
||||
database.GetEntriesTable().
|
||||
Order(fmt.Sprintf("timestamp %s", order)).
|
||||
// Where(fmt.Sprintf("timestamp %s %v", operatorSymbol, entriesFilter.Timestamp)).
|
||||
Limit(1000).
|
||||
Find(&entries)
|
||||
|
||||
if len(entries) > 0 {
|
||||
// the entries always order from oldest to newest so we should revers
|
||||
utils.ReverseSlice(entries)
|
||||
}
|
||||
|
||||
harsObject := map[string]*models.ExtendedHAR{}
|
||||
|
||||
for _, entryData := range entries {
|
||||
var harEntry har.Entry
|
||||
_ = json.Unmarshal([]byte(entryData.Entry), &harEntry)
|
||||
|
||||
sourceOfEntry := entryData.ResolvedSource
|
||||
fileName := fmt.Sprintf("%s.har", sourceOfEntry)
|
||||
if harOfSource, ok := harsObject[fileName]; ok {
|
||||
harOfSource.Log.Entries = append(harOfSource.Log.Entries, &harEntry)
|
||||
} else {
|
||||
var entriesHar []*har.Entry
|
||||
entriesHar = append(entriesHar, &harEntry)
|
||||
harsObject[fileName] = &models.ExtendedHAR{
|
||||
Log: &models.ExtendedLog{
|
||||
Version: "1.2",
|
||||
Creator: &models.ExtendedCreator{
|
||||
Creator: &har.Creator{
|
||||
Name: "mizu",
|
||||
Version: "0.0.2",
|
||||
},
|
||||
Source: sourceOfEntry,
|
||||
},
|
||||
Entries: entriesHar,
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
retObj := map[string][]byte{}
|
||||
for k, v := range harsObject {
|
||||
bytesData, _ := json.Marshal(v)
|
||||
retObj[k] = bytesData
|
||||
}
|
||||
buffer := utils.ZipData(retObj)
|
||||
return c.Status(fiber.StatusOK).SendStream(buffer)
|
||||
}
|
||||
|
||||
func GetEntry(c *fiber.Ctx) error {
|
||||
var entryData models.EntryData
|
||||
database.GetEntriesTable().
|
||||
Select("entry", "resolvedDestination").
|
||||
Where(map[string]string{"entryId": c.Params("entryId")}).
|
||||
First(&entryData)
|
||||
|
||||
var fullEntry har.Entry
|
||||
unmarshallErr := json.Unmarshal([]byte(entryData.Entry), &fullEntry)
|
||||
utils.CheckErr(unmarshallErr)
|
||||
|
||||
if entryData.ResolvedDestination != "" {
|
||||
fullEntry.Request.URL = utils.SetHostname(fullEntry.Request.URL, entryData.ResolvedDestination)
|
||||
}
|
||||
|
||||
return c.Status(fiber.StatusOK).JSON(fullEntry)
|
||||
}
|
||||
|
||||
func DeleteAllEntries(c *fiber.Ctx) error {
|
||||
database.GetEntriesTable().
|
||||
Where("1 = 1").
|
||||
Delete(&models.MizuEntry{})
|
||||
|
||||
return c.Status(fiber.StatusOK).JSON(fiber.Map{
|
||||
"msg": "Success",
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
func GetGeneralStats(c *fiber.Ctx) error {
|
||||
sqlQuery := "SELECT count(*) as count, min(timestamp) as min, max(timestamp) as max from mizu_entries"
|
||||
var result struct {
|
||||
Count int
|
||||
Min int
|
||||
Max int
|
||||
}
|
||||
database.GetEntriesTable().Raw(sqlQuery).Scan(&result)
|
||||
return c.Status(fiber.StatusOK).JSON(&result)
|
||||
}
|
||||
12
api/pkg/controllers/status_controller.go
Normal file
12
api/pkg/controllers/status_controller.go
Normal file
@@ -0,0 +1,12 @@
|
||||
package controllers
|
||||
|
||||
import (
|
||||
"github.com/gofiber/fiber/v2"
|
||||
"github.com/up9inc/mizu/shared"
|
||||
)
|
||||
|
||||
var TapStatus shared.TapStatus
|
||||
|
||||
func GetTappingStatus(c *fiber.Ctx) error {
|
||||
return c.Status(fiber.StatusOK).JSON(TapStatus)
|
||||
}
|
||||
25
api/pkg/database/main.go
Normal file
25
api/pkg/database/main.go
Normal file
@@ -0,0 +1,25 @@
|
||||
package database
|
||||
|
||||
import (
|
||||
"gorm.io/driver/sqlite"
|
||||
"gorm.io/gorm"
|
||||
"mizuserver/pkg/models"
|
||||
)
|
||||
|
||||
const (
|
||||
DBPath = "./entries.db"
|
||||
)
|
||||
|
||||
var (
|
||||
DB = initDataBase(DBPath)
|
||||
)
|
||||
|
||||
func GetEntriesTable() *gorm.DB {
|
||||
return DB.Table("mizu_entries")
|
||||
}
|
||||
|
||||
func initDataBase(databasePath string) *gorm.DB {
|
||||
temp, _ := gorm.Open(sqlite.Open(databasePath), &gorm.Config{})
|
||||
_ = temp.AutoMigrate(&models.MizuEntry{}) // this will ensure table is created
|
||||
return temp
|
||||
}
|
||||
18
api/pkg/middleware/fiber_middleware.go
Normal file
18
api/pkg/middleware/fiber_middleware.go
Normal file
@@ -0,0 +1,18 @@
|
||||
package middleware
|
||||
|
||||
import (
|
||||
"github.com/gofiber/fiber/v2"
|
||||
"github.com/gofiber/fiber/v2/middleware/cors"
|
||||
"github.com/gofiber/fiber/v2/middleware/logger"
|
||||
)
|
||||
|
||||
// FiberMiddleware provide Fiber's built-in middlewares.
|
||||
// See: https://docs.gofiber.io/api/middleware
|
||||
func FiberMiddleware(a *fiber.App) {
|
||||
a.Use(
|
||||
// Add CORS to each route.
|
||||
cors.New(),
|
||||
// Add simple logger.
|
||||
logger.New(),
|
||||
)
|
||||
}
|
||||
104
api/pkg/models/models.go
Normal file
104
api/pkg/models/models.go
Normal file
@@ -0,0 +1,104 @@
|
||||
package models
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"github.com/google/martian/har"
|
||||
"github.com/up9inc/mizu/shared"
|
||||
"mizuserver/pkg/tap"
|
||||
"time"
|
||||
)
|
||||
|
||||
type MizuEntry struct {
|
||||
ID uint `gorm:"primarykey"`
|
||||
CreatedAt time.Time
|
||||
UpdatedAt time.Time
|
||||
Entry string `json:"entry,omitempty" gorm:"column:entry"`
|
||||
EntryId string `json:"entryId" gorm:"column:entryId"`
|
||||
Url string `json:"url" gorm:"column:url"`
|
||||
Method string `json:"method" gorm:"column:method"`
|
||||
Status int `json:"status" gorm:"column:status"`
|
||||
RequestSenderIp string `json:"requestSenderIp" gorm:"column:requestSenderIp"`
|
||||
Service string `json:"service" gorm:"column:service"`
|
||||
Timestamp int64 `json:"timestamp" gorm:"column:timestamp"`
|
||||
Path string `json:"path" gorm:"column:path"`
|
||||
ResolvedSource string `json:"resolvedSource,omitempty" gorm:"column:resolvedSource"`
|
||||
ResolvedDestination string `json:"resolvedDestination,omitempty" gorm:"column:resolvedDestination"`
|
||||
}
|
||||
|
||||
type BaseEntryDetails struct {
|
||||
Id string `json:"id,omitempty"`
|
||||
Url string `json:"url,omitempty"`
|
||||
RequestSenderIp string `json:"requestSenderIp,omitempty"`
|
||||
Service string `json:"service,omitempty"`
|
||||
Path string `json:"path,omitempty"`
|
||||
StatusCode int `json:"statusCode,omitempty"`
|
||||
Method string `json:"method,omitempty"`
|
||||
Timestamp int64 `json:"timestamp,omitempty"`
|
||||
}
|
||||
|
||||
type EntryData struct {
|
||||
Entry string `json:"entry,omitempty"`
|
||||
ResolvedDestination string `json:"resolvedDestination,omitempty" gorm:"column:resolvedDestination"`
|
||||
}
|
||||
|
||||
type EntriesFilter struct {
|
||||
Limit int `query:"limit" validate:"required,min=1,max=200"`
|
||||
Operator string `query:"operator" validate:"required,oneof='lt' 'gt'"`
|
||||
Timestamp int64 `query:"timestamp" validate:"required,min=1"`
|
||||
}
|
||||
|
||||
type HarFetchRequestBody struct {
|
||||
Limit int `query:"limit"`
|
||||
}
|
||||
|
||||
type WebSocketEntryMessage struct {
|
||||
*shared.WebSocketMessageMetadata
|
||||
Data *BaseEntryDetails `json:"data,omitempty"`
|
||||
}
|
||||
|
||||
|
||||
type WebSocketTappedEntryMessage struct {
|
||||
*shared.WebSocketMessageMetadata
|
||||
Data *tap.OutputChannelItem
|
||||
}
|
||||
|
||||
func CreateBaseEntryWebSocketMessage(base *BaseEntryDetails) ([]byte, error) {
|
||||
message := &WebSocketEntryMessage{
|
||||
WebSocketMessageMetadata: &shared.WebSocketMessageMetadata{
|
||||
MessageType: shared.WebSocketMessageTypeEntry,
|
||||
},
|
||||
Data: base,
|
||||
}
|
||||
return json.Marshal(message)
|
||||
}
|
||||
|
||||
func CreateWebsocketTappedEntryMessage(base *tap.OutputChannelItem) ([]byte, error) {
|
||||
message := &WebSocketTappedEntryMessage{
|
||||
WebSocketMessageMetadata: &shared.WebSocketMessageMetadata{
|
||||
MessageType: shared.WebSocketMessageTypeTappedEntry,
|
||||
},
|
||||
Data: base,
|
||||
}
|
||||
return json.Marshal(message)
|
||||
}
|
||||
|
||||
|
||||
// ExtendedHAR is the top level object of a HAR log.
|
||||
type ExtendedHAR struct {
|
||||
Log *ExtendedLog `json:"log"`
|
||||
}
|
||||
|
||||
// ExtendedLog is the HAR HTTP request and response log.
|
||||
type ExtendedLog struct {
|
||||
// Version number of the HAR format.
|
||||
Version string `json:"version"`
|
||||
// Creator holds information about the log creator application.
|
||||
Creator *ExtendedCreator `json:"creator"`
|
||||
// Entries is a list containing requests and responses.
|
||||
Entries []*har.Entry `json:"entries"`
|
||||
}
|
||||
|
||||
type ExtendedCreator struct {
|
||||
*har.Creator
|
||||
Source string `json:"_source"`
|
||||
}
|
||||
@@ -32,7 +32,7 @@ Now you will be able to import `github.com/up9inc/mizu/resolver` in any `.go` fi
|
||||
errOut := make(chan error, 100)
|
||||
k8sResolver, err := resolver.NewFromOutOfCluster("", errOut)
|
||||
if err != nil {
|
||||
rlog.Errorf("error creating k8s resolver %s", err)
|
||||
fmt.Printf("error creating k8s resolver %s", err)
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
@@ -40,15 +40,15 @@ k8sResolver.Start(ctx)
|
||||
|
||||
resolvedName := k8sResolver.Resolve("10.107.251.91") // will always return `nil` in real scenarios as the internal map takes a moment to populate after `Start` is called
|
||||
if resolvedName != nil {
|
||||
rlog.Errorf("resolved 10.107.251.91=%s", *resolvedName)
|
||||
fmt.Printf("resolved 10.107.251.91=%s", *resolvedName)
|
||||
} else {
|
||||
rlog.Error("Could not find a resolved name for 10.107.251.91")
|
||||
fmt.Printf("Could not find a resolved name for 10.107.251.91")
|
||||
}
|
||||
|
||||
for {
|
||||
select {
|
||||
case err := <- errOut:
|
||||
rlog.Errorf("name resolving error %s", err)
|
||||
fmt.Printf("name resolving error %s", err)
|
||||
}
|
||||
}
|
||||
```
|
||||
61
api/pkg/resolver/loader.go
Normal file
61
api/pkg/resolver/loader.go
Normal file
@@ -0,0 +1,61 @@
|
||||
package resolver
|
||||
|
||||
import (
|
||||
"k8s.io/client-go/kubernetes"
|
||||
_ "k8s.io/client-go/plugin/pkg/client/auth/azure"
|
||||
_ "k8s.io/client-go/plugin/pkg/client/auth/gcp"
|
||||
_ "k8s.io/client-go/plugin/pkg/client/auth/oidc"
|
||||
_ "k8s.io/client-go/plugin/pkg/client/auth/openstack"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
"k8s.io/client-go/util/homedir"
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
func NewFromInCluster(errOut chan error) (*Resolver, error) {
|
||||
config, err := restclient.InClusterConfig()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
clientset, err := kubernetes.NewForConfig(config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &Resolver{clientConfig: config, clientSet: clientset, nameMap: make(map[string]string), errOut: errOut}, nil
|
||||
}
|
||||
|
||||
func NewFromOutOfCluster(kubeConfigPath string, errOut chan error) (*Resolver, error) {
|
||||
if kubeConfigPath == "" {
|
||||
home := homedir.HomeDir()
|
||||
kubeConfigPath = filepath.Join(home, ".kube", "config")
|
||||
}
|
||||
|
||||
configPathList := filepath.SplitList(kubeConfigPath)
|
||||
configLoadingRules := &clientcmd.ClientConfigLoadingRules{}
|
||||
if len(configPathList) <= 1 {
|
||||
configLoadingRules.ExplicitPath = kubeConfigPath
|
||||
} else {
|
||||
configLoadingRules.Precedence = configPathList
|
||||
}
|
||||
contextName := ""
|
||||
clientConfigLoader := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(
|
||||
configLoadingRules,
|
||||
&clientcmd.ConfigOverrides{
|
||||
CurrentContext: contextName,
|
||||
},
|
||||
)
|
||||
clientConfig, err := clientConfigLoader.ClientConfig()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
clientset, err := kubernetes.NewForConfig(clientConfig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &Resolver{clientConfig: clientConfig, clientSet: clientset, nameMap: make(map[string]string), errOut: errOut}, nil
|
||||
}
|
||||
|
||||
func NewFromExisting(clientConfig *restclient.Config, clientSet *kubernetes.Clientset, errOut chan error) *Resolver {
|
||||
return &Resolver{clientConfig: clientConfig, clientSet: clientSet, nameMap: make(map[string]string), errOut: errOut}
|
||||
}
|
||||
169
api/pkg/resolver/resolver.go
Normal file
169
api/pkg/resolver/resolver.go
Normal file
@@ -0,0 +1,169 @@
|
||||
package resolver
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
k8serrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
)
|
||||
const (
|
||||
kubClientNullString = "None"
|
||||
)
|
||||
|
||||
type Resolver struct {
|
||||
clientConfig *restclient.Config
|
||||
clientSet *kubernetes.Clientset
|
||||
nameMap map[string]string
|
||||
isStarted bool
|
||||
errOut chan error
|
||||
}
|
||||
|
||||
func (resolver *Resolver) Start(ctx context.Context) {
|
||||
if !resolver.isStarted {
|
||||
resolver.isStarted = true
|
||||
go resolver.infiniteErrorHandleRetryFunc(ctx, resolver.watchServices)
|
||||
go resolver.infiniteErrorHandleRetryFunc(ctx, resolver.watchEndpoints)
|
||||
go resolver.infiniteErrorHandleRetryFunc(ctx, resolver.watchPods)
|
||||
}
|
||||
}
|
||||
|
||||
func (resolver *Resolver) Resolve(name string) string {
|
||||
resolvedName, isFound := resolver.nameMap[name]
|
||||
if !isFound {
|
||||
return ""
|
||||
}
|
||||
return resolvedName
|
||||
}
|
||||
|
||||
func (resolver *Resolver) watchPods(ctx context.Context) error {
|
||||
// empty namespace makes the client watch all namespaces
|
||||
watcher, err := resolver.clientSet.CoreV1().Pods("").Watch(ctx, metav1.ListOptions{Watch: true})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for {
|
||||
select {
|
||||
case event := <- watcher.ResultChan():
|
||||
if event.Object == nil {
|
||||
return errors.New("error in kubectl pod watch")
|
||||
}
|
||||
if event.Type == watch.Deleted {
|
||||
pod := event.Object.(*corev1.Pod)
|
||||
resolver.saveResolvedName(pod.Status.PodIP, "", event.Type)
|
||||
}
|
||||
case <- ctx.Done():
|
||||
watcher.Stop()
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (resolver *Resolver) watchEndpoints(ctx context.Context) error {
|
||||
// empty namespace makes the client watch all namespaces
|
||||
watcher, err := resolver.clientSet.CoreV1().Endpoints("").Watch(ctx, metav1.ListOptions{Watch: true})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for {
|
||||
select {
|
||||
case event := <- watcher.ResultChan():
|
||||
if event.Object == nil {
|
||||
return errors.New("error in kubectl endpoint watch")
|
||||
}
|
||||
endpoint := event.Object.(*corev1.Endpoints)
|
||||
serviceHostname := fmt.Sprintf("%s.%s", endpoint.Name, endpoint.Namespace)
|
||||
if endpoint.Subsets != nil {
|
||||
for _, subset := range endpoint.Subsets {
|
||||
var ports []int32
|
||||
if subset.Ports != nil {
|
||||
for _, portMapping := range subset.Ports {
|
||||
if portMapping.Port > 0 {
|
||||
ports = append(ports, portMapping.Port)
|
||||
}
|
||||
}
|
||||
}
|
||||
if subset.Addresses != nil {
|
||||
for _, address := range subset.Addresses {
|
||||
resolver.saveResolvedName(address.IP, serviceHostname, event.Type)
|
||||
for _, port := range ports {
|
||||
ipWithPort := fmt.Sprintf("%s:%d", address.IP, port)
|
||||
resolver.saveResolvedName(ipWithPort, serviceHostname, event.Type)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
case <- ctx.Done():
|
||||
watcher.Stop()
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (resolver *Resolver) watchServices(ctx context.Context) error {
|
||||
// empty namespace makes the client watch all namespaces
|
||||
watcher, err := resolver.clientSet.CoreV1().Services("").Watch(ctx, metav1.ListOptions{Watch: true})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for {
|
||||
select {
|
||||
case event := <- watcher.ResultChan():
|
||||
if event.Object == nil {
|
||||
return errors.New("error in kubectl service watch")
|
||||
}
|
||||
|
||||
service := event.Object.(*corev1.Service)
|
||||
serviceHostname := fmt.Sprintf("%s.%s", service.Name, service.Namespace)
|
||||
if service.Spec.ClusterIP != "" && service.Spec.ClusterIP != kubClientNullString {
|
||||
resolver.saveResolvedName(service.Spec.ClusterIP, serviceHostname, event.Type)
|
||||
}
|
||||
if service.Status.LoadBalancer.Ingress != nil {
|
||||
for _, ingress := range service.Status.LoadBalancer.Ingress {
|
||||
resolver.saveResolvedName(ingress.IP, serviceHostname, event.Type)
|
||||
}
|
||||
}
|
||||
case <- ctx.Done():
|
||||
watcher.Stop()
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (resolver *Resolver) saveResolvedName(key string, resolved string, eventType watch.EventType) {
|
||||
if eventType == watch.Deleted {
|
||||
delete(resolver.nameMap, key)
|
||||
// fmt.Printf("setting %s=nil\n", key)
|
||||
} else {
|
||||
resolver.nameMap[key] = resolved
|
||||
// fmt.Printf("setting %s=%s\n", key, resolved)
|
||||
}
|
||||
}
|
||||
|
||||
func (resolver *Resolver) infiniteErrorHandleRetryFunc(ctx context.Context, fun func(ctx context.Context) error) {
|
||||
for {
|
||||
err := fun(ctx)
|
||||
if err != nil {
|
||||
resolver.errOut <- err
|
||||
|
||||
var statusError *k8serrors.StatusError
|
||||
if errors.As(err, &statusError) {
|
||||
if statusError.ErrStatus.Reason == metav1.StatusReasonForbidden {
|
||||
fmt.Printf("Resolver loop encountered permission error, aborting event listening - %v\n", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
if ctx.Err() != nil { // context was cancelled or errored
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
15
api/pkg/routes/not_found_route.go
Normal file
15
api/pkg/routes/not_found_route.go
Normal file
@@ -0,0 +1,15 @@
|
||||
package routes
|
||||
|
||||
import "github.com/gofiber/fiber/v2"
|
||||
|
||||
// NotFoundRoute func for describe 404 Error route.
|
||||
func NotFoundRoute(fiberApp *fiber.App) {
|
||||
fiberApp.Use(
|
||||
func(c *fiber.Ctx) error {
|
||||
return c.Status(fiber.StatusNotFound).JSON(fiber.Map{
|
||||
"error": true,
|
||||
"msg": "sorry, endpoint is not found",
|
||||
})
|
||||
},
|
||||
)
|
||||
}
|
||||
20
api/pkg/routes/public_routes.go
Normal file
20
api/pkg/routes/public_routes.go
Normal file
@@ -0,0 +1,20 @@
|
||||
package routes
|
||||
|
||||
import (
|
||||
"github.com/gofiber/fiber/v2"
|
||||
"mizuserver/pkg/controllers"
|
||||
)
|
||||
|
||||
// EntriesRoutes func for describe group of public routes.
|
||||
func EntriesRoutes(fiberApp *fiber.App) {
|
||||
routeGroup := fiberApp.Group("/api")
|
||||
|
||||
routeGroup.Get("/entries", controllers.GetEntries) // get entries (base/thin entries)
|
||||
routeGroup.Get("/entries/:entryId", controllers.GetEntry) // get single (full) entry
|
||||
|
||||
routeGroup.Get("/har", controllers.GetHARs)
|
||||
routeGroup.Get("/resetDB", controllers.DeleteAllEntries) // get single (full) entry
|
||||
routeGroup.Get("/generalStats", controllers.GetGeneralStats) // get general stats about entries in DB
|
||||
|
||||
routeGroup.Get("/tapStatus", controllers.GetTappingStatus) // get tapping status
|
||||
}
|
||||
31
api/pkg/routes/socket_routes.go
Normal file
31
api/pkg/routes/socket_routes.go
Normal file
@@ -0,0 +1,31 @@
|
||||
package routes
|
||||
|
||||
import (
|
||||
"github.com/antoniodipinto/ikisocket"
|
||||
"github.com/gofiber/fiber/v2"
|
||||
)
|
||||
|
||||
type EventHandlers interface {
|
||||
WebSocketConnect(ep *ikisocket.EventPayload)
|
||||
WebSocketDisconnect(ep *ikisocket.EventPayload)
|
||||
WebSocketClose(ep *ikisocket.EventPayload)
|
||||
WebSocketError(ep *ikisocket.EventPayload)
|
||||
WebSocketMessage(ep *ikisocket.EventPayload)
|
||||
}
|
||||
|
||||
func WebSocketRoutes(app *fiber.App, eventHandlers EventHandlers) {
|
||||
app.Get("/ws", ikisocket.New(func(kws *ikisocket.Websocket) {
|
||||
kws.SetAttribute("is_tapper", false)
|
||||
}))
|
||||
|
||||
app.Get("/wsTapper", ikisocket.New(func(kws *ikisocket.Websocket) {
|
||||
// Tapper clients are handled differently, they don't need to receive new message broadcasts.
|
||||
kws.SetAttribute("is_tapper", true)
|
||||
}))
|
||||
|
||||
ikisocket.On(ikisocket.EventMessage, eventHandlers.WebSocketMessage)
|
||||
ikisocket.On(ikisocket.EventConnect, eventHandlers.WebSocketConnect)
|
||||
ikisocket.On(ikisocket.EventDisconnect, eventHandlers.WebSocketDisconnect)
|
||||
ikisocket.On(ikisocket.EventClose, eventHandlers.WebSocketClose) // This event is called when the server disconnects the user actively with .Close() method
|
||||
ikisocket.On(ikisocket.EventError, eventHandlers.WebSocketError) // On error event
|
||||
}
|
||||
@@ -5,7 +5,7 @@ import (
|
||||
"encoding/xml"
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/up9inc/mizu/tap"
|
||||
"mizuserver/pkg/tap"
|
||||
"net/url"
|
||||
"strings"
|
||||
|
||||
@@ -15,8 +15,8 @@ import (
|
||||
)
|
||||
|
||||
func FilterSensitiveInfoFromHarRequest(harOutputItem *tap.OutputChannelItem, options *shared.TrafficFilteringOptions) {
|
||||
harOutputItem.HarEntry.Request.Headers = filterHarHeaders(harOutputItem.HarEntry.Request.Headers)
|
||||
harOutputItem.HarEntry.Response.Headers = filterHarHeaders(harOutputItem.HarEntry.Response.Headers)
|
||||
filterHarHeaders(harOutputItem.HarEntry.Request.Headers)
|
||||
filterHarHeaders(harOutputItem.HarEntry.Response.Headers)
|
||||
|
||||
harOutputItem.HarEntry.Request.Cookies = make([]har.Cookie, 0, 0)
|
||||
harOutputItem.HarEntry.Response.Cookies = make([]har.Cookie, 0, 0)
|
||||
@@ -44,19 +44,12 @@ func FilterSensitiveInfoFromHarRequest(harOutputItem *tap.OutputChannelItem, opt
|
||||
}
|
||||
}
|
||||
|
||||
func filterHarHeaders(headers []har.Header) []har.Header {
|
||||
newHeaders := make([]har.Header, 0)
|
||||
func filterHarHeaders(headers []har.Header) {
|
||||
for i, header := range headers {
|
||||
if strings.ToLower(header.Name) == "cookie" {
|
||||
continue
|
||||
} else if isFieldNameSensitive(header.Name) {
|
||||
newHeaders = append(newHeaders, har.Header{Name: header.Name, Value: maskedFieldPlaceholderValue})
|
||||
if isFieldNameSensitive(header.Name) {
|
||||
headers[i].Value = maskedFieldPlaceholderValue
|
||||
} else {
|
||||
newHeaders = append(newHeaders, header)
|
||||
}
|
||||
}
|
||||
return newHeaders
|
||||
}
|
||||
|
||||
func getContentTypeHeaderValue(headers []har.Header) string {
|
||||
@@ -158,11 +151,9 @@ func filterJsonBody(bytes []byte) ([]byte, error) {
|
||||
|
||||
func filterJsonMap(jsonMap map[string] interface{}) {
|
||||
for key, value := range jsonMap {
|
||||
// Do not replace nil values with maskedFieldPlaceholderValue
|
||||
if value == nil {
|
||||
continue
|
||||
return
|
||||
}
|
||||
|
||||
nestedMap, isNested := value.(map[string] interface{})
|
||||
if isNested {
|
||||
filterJsonMap(nestedMap)
|
||||
@@ -1,7 +1,6 @@
|
||||
package tap
|
||||
|
||||
import (
|
||||
"github.com/romana/rlog"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
@@ -21,21 +20,19 @@ type Cleaner struct {
|
||||
cleanPeriod time.Duration
|
||||
connectionTimeout time.Duration
|
||||
stats CleanerStats
|
||||
statsMutex sync.Mutex
|
||||
statsMutex sync.Mutex
|
||||
}
|
||||
|
||||
func (cl *Cleaner) clean() {
|
||||
startCleanTime := time.Now()
|
||||
|
||||
cl.assemblerMutex.Lock()
|
||||
rlog.Debugf("Assembler Stats before cleaning %s", cl.assembler.Dump())
|
||||
flushed, closed := cl.assembler.FlushCloseOlderThan(startCleanTime.Add(-cl.connectionTimeout))
|
||||
cl.assemblerMutex.Unlock()
|
||||
|
||||
deleted := cl.matcher.deleteOlderThan(startCleanTime.Add(-cl.connectionTimeout))
|
||||
|
||||
cl.statsMutex.Lock()
|
||||
rlog.Debugf("Assembler Stats after cleaning %s", cl.assembler.Dump())
|
||||
cl.stats.flushed += flushed
|
||||
cl.stats.closed += closed
|
||||
cl.stats.deleted += deleted
|
||||
@@ -58,7 +55,7 @@ func (cl *Cleaner) dumpStats() CleanerStats {
|
||||
|
||||
stats := CleanerStats{
|
||||
flushed: cl.stats.flushed,
|
||||
closed: cl.stats.closed,
|
||||
closed : cl.stats.closed,
|
||||
deleted: cl.stats.deleted,
|
||||
}
|
||||
|
||||
@@ -84,14 +84,14 @@ type GrpcAssembler struct {
|
||||
framer *http2.Framer
|
||||
}
|
||||
|
||||
func (ga *GrpcAssembler) readMessage() (uint32, interface{}, error) {
|
||||
func (ga *GrpcAssembler) readMessage() (uint32, interface{}, string, error) {
|
||||
// Exactly one Framer is used for each half connection.
|
||||
// (Instead of creating a new Framer for each ReadFrame operation)
|
||||
// This is needed in order to decompress the headers,
|
||||
// because the compression context is updated with each requests/response.
|
||||
frame, err := ga.framer.ReadFrame()
|
||||
if err != nil {
|
||||
return 0, nil, err
|
||||
return 0, nil, "", err
|
||||
}
|
||||
|
||||
streamID := frame.Header().StreamID
|
||||
@@ -99,7 +99,7 @@ func (ga *GrpcAssembler) readMessage() (uint32, interface{}, error) {
|
||||
ga.fragmentsByStream.appendFrame(streamID, frame)
|
||||
|
||||
if !(ga.isStreamEnd(frame)) {
|
||||
return 0, nil, nil
|
||||
return 0, nil, "", nil
|
||||
}
|
||||
|
||||
headers, data := ga.fragmentsByStream.pop(streamID)
|
||||
@@ -137,10 +137,10 @@ func (ga *GrpcAssembler) readMessage() (uint32, interface{}, error) {
|
||||
ContentLength: int64(len(dataString)),
|
||||
}
|
||||
} else {
|
||||
return 0, nil, errors.New("Failed to assemble stream: neither a request nor a message")
|
||||
return 0, nil, "", errors.New("Failed to assemble stream: neither a request nor a message")
|
||||
}
|
||||
|
||||
return streamID, messageHTTP1, nil
|
||||
return streamID, messageHTTP1, dataString, nil
|
||||
}
|
||||
|
||||
func (ga *GrpcAssembler) isStreamEnd(frame http2.Frame) bool {
|
||||
@@ -1,12 +1,9 @@
|
||||
package tap
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
@@ -18,8 +15,7 @@ import (
|
||||
)
|
||||
|
||||
const readPermission = 0644
|
||||
const harFilenameSuffix = ".har"
|
||||
const tempFilenameSuffix = ".har.tmp"
|
||||
const tempFilenamePrefix = "har_writer"
|
||||
|
||||
type PairChanItem struct {
|
||||
Request *http.Request
|
||||
@@ -27,13 +23,12 @@ type PairChanItem struct {
|
||||
Response *http.Response
|
||||
ResponseTime time.Time
|
||||
RequestSenderIp string
|
||||
ConnectionInfo *ConnectionInfo
|
||||
}
|
||||
|
||||
func openNewHarFile(filename string) *HarFile {
|
||||
file, err := os.OpenFile(filename, os.O_APPEND|os.O_CREATE|os.O_WRONLY, readPermission)
|
||||
if err != nil {
|
||||
log.Panicf("Failed to open output file: %s (%v,%+v)", err, err, err)
|
||||
panic(fmt.Sprintf("Failed to open output file: %s (%v,%+v)", err, err, err))
|
||||
}
|
||||
|
||||
harFile := HarFile{file: file, entryCount: 0}
|
||||
@@ -43,35 +38,20 @@ func openNewHarFile(filename string) *HarFile {
|
||||
}
|
||||
|
||||
type HarFile struct {
|
||||
file *os.File
|
||||
file *os.File
|
||||
entryCount int
|
||||
}
|
||||
|
||||
func NewEntry(request *http.Request, requestTime time.Time, response *http.Response, responseTime time.Time) (*har.Entry, error) {
|
||||
harRequest, err := har.NewRequest(request, false)
|
||||
harRequest, err := har.NewRequest(request, true)
|
||||
if err != nil {
|
||||
SilentError("convert-request-to-har", "Failed converting request to HAR %s (%v,%+v)", err, err, err)
|
||||
SilentError("convert-request-to-har", "Failed converting request to HAR %s (%v,%+v)\n", err, err, err)
|
||||
return nil, errors.New("Failed converting request to HAR")
|
||||
}
|
||||
|
||||
// For requests with multipart/form-data or application/x-www-form-urlencoded Content-Type,
|
||||
// martian/har will parse the request body and place the parameters in harRequest.PostData.Params
|
||||
// instead of harRequest.PostData.Text (as the HAR spec requires it).
|
||||
// Mizu currently only looks at PostData.Text. Therefore, instead of letting martian/har set the content of
|
||||
// PostData, always copy the request body to PostData.Text.
|
||||
if (request.ContentLength > 0) {
|
||||
reqBody, err := ioutil.ReadAll(request.Body)
|
||||
if err != nil {
|
||||
SilentError("read-request-body", "Failed converting request to HAR %s (%v,%+v)", err, err, err)
|
||||
return nil, errors.New("Failed reading request body")
|
||||
}
|
||||
request.Body = ioutil.NopCloser(bytes.NewReader(reqBody))
|
||||
harRequest.PostData.Text = string(reqBody)
|
||||
}
|
||||
|
||||
harResponse, err := har.NewResponse(response, true)
|
||||
if err != nil {
|
||||
SilentError("convert-response-to-har", "Failed converting response to HAR %s (%v,%+v)", err, err, err)
|
||||
SilentError("convert-response-to-har", "Failed converting response to HAR %s (%v,%+v)\n", err, err, err)
|
||||
return nil, errors.New("Failed converting response to HAR")
|
||||
}
|
||||
|
||||
@@ -82,7 +62,7 @@ func NewEntry(request *http.Request, requestTime time.Time, response *http.Respo
|
||||
|
||||
status, err := strconv.Atoi(response.Header.Get(":status"))
|
||||
if err != nil {
|
||||
SilentError("convert-response-status-for-har", "Failed converting status to int %s (%v,%+v)", err, err, err)
|
||||
SilentError("convert-response-status-for-har", "Failed converting status to int %s (%v,%+v)\n", err, err, err)
|
||||
return nil, errors.New("Failed converting response status to int for HAR")
|
||||
}
|
||||
harResponse.Status = status
|
||||
@@ -105,13 +85,13 @@ func NewEntry(request *http.Request, requestTime time.Time, response *http.Respo
|
||||
|
||||
harEntry := har.Entry{
|
||||
StartedDateTime: time.Now().UTC(),
|
||||
Time: totalTime,
|
||||
Request: harRequest,
|
||||
Response: harResponse,
|
||||
Cache: &har.Cache{},
|
||||
Time: totalTime,
|
||||
Request: harRequest,
|
||||
Response: harResponse,
|
||||
Cache: &har.Cache{},
|
||||
Timings: &har.Timings{
|
||||
Send: -1,
|
||||
Wait: -1,
|
||||
Send: -1,
|
||||
Wait: -1,
|
||||
Receive: totalTime,
|
||||
},
|
||||
}
|
||||
@@ -122,7 +102,7 @@ func NewEntry(request *http.Request, requestTime time.Time, response *http.Respo
|
||||
func (f *HarFile) WriteEntry(harEntry *har.Entry) {
|
||||
harEntryJson, err := json.Marshal(harEntry)
|
||||
if err != nil {
|
||||
SilentError("har-entry-marshal", "Failed converting har entry object to JSON%s (%v,%+v)", err, err, err)
|
||||
SilentError("har-entry-marshal", "Failed converting har entry object to JSON%s (%v,%+v)\n", err, err, err)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -136,7 +116,7 @@ func (f *HarFile) WriteEntry(harEntry *har.Entry) {
|
||||
harEntryString := append([]byte(separator), harEntryJson...)
|
||||
|
||||
if _, err := f.file.Write(harEntryString); err != nil {
|
||||
log.Panicf("Failed to write to output file: %s (%v,%+v)", err, err, err)
|
||||
panic(fmt.Sprintf("Failed to write to output file: %s (%v,%+v)", err, err, err))
|
||||
}
|
||||
|
||||
f.entryCount++
|
||||
@@ -151,64 +131,63 @@ func (f *HarFile) Close() {
|
||||
|
||||
err := f.file.Close()
|
||||
if err != nil {
|
||||
log.Panicf("Failed to close output file: %s (%v,%+v)", err, err, err)
|
||||
panic(fmt.Sprintf("Failed to close output file: %s (%v,%+v)", err, err, err))
|
||||
}
|
||||
}
|
||||
|
||||
func (f *HarFile) writeHeader() {
|
||||
func (f*HarFile) writeHeader() {
|
||||
header := []byte(`{"log": {"version": "1.2", "creator": {"name": "Mizu", "version": "0.0.1"}, "entries": [`)
|
||||
if _, err := f.file.Write(header); err != nil {
|
||||
log.Panicf("Failed to write header to output file: %s (%v,%+v)", err, err, err)
|
||||
panic(fmt.Sprintf("Failed to write header to output file: %s (%v,%+v)", err, err, err))
|
||||
}
|
||||
}
|
||||
|
||||
func (f *HarFile) writeTrailer() {
|
||||
func (f*HarFile) writeTrailer() {
|
||||
trailer := []byte("]}}")
|
||||
if _, err := f.file.Write(trailer); err != nil {
|
||||
log.Panicf("Failed to write trailer to output file: %s (%v,%+v)", err, err, err)
|
||||
panic(fmt.Sprintf("Failed to write trailer to output file: %s (%v,%+v)", err, err, err))
|
||||
}
|
||||
}
|
||||
|
||||
func NewHarWriter(outputDir string, maxEntries int) *HarWriter {
|
||||
return &HarWriter{
|
||||
OutputDirPath: outputDir,
|
||||
MaxEntries: maxEntries,
|
||||
PairChan: make(chan *PairChanItem),
|
||||
OutChan: make(chan *OutputChannelItem, 1000),
|
||||
currentFile: nil,
|
||||
done: make(chan bool),
|
||||
MaxEntries: maxEntries,
|
||||
PairChan: make(chan *PairChanItem),
|
||||
OutChan: make(chan *OutputChannelItem, 1000),
|
||||
currentFile: nil,
|
||||
done: make(chan bool),
|
||||
}
|
||||
}
|
||||
|
||||
type OutputChannelItem struct {
|
||||
HarEntry *har.Entry
|
||||
ConnectionInfo *ConnectionInfo
|
||||
ValidationRulesChecker string
|
||||
HarEntry *har.Entry
|
||||
RequestSenderIp string
|
||||
}
|
||||
|
||||
type HarWriter struct {
|
||||
OutputDirPath string
|
||||
MaxEntries int
|
||||
PairChan chan *PairChanItem
|
||||
OutChan chan *OutputChannelItem
|
||||
currentFile *HarFile
|
||||
done chan bool
|
||||
MaxEntries int
|
||||
PairChan chan *PairChanItem
|
||||
OutChan chan *OutputChannelItem
|
||||
currentFile *HarFile
|
||||
done chan bool
|
||||
}
|
||||
|
||||
func (hw *HarWriter) WritePair(request *http.Request, requestTime time.Time, response *http.Response, responseTime time.Time, connectionInfo *ConnectionInfo) {
|
||||
func (hw *HarWriter) WritePair(request *http.Request, requestTime time.Time, response *http.Response, responseTime time.Time, requestSenderIp string) {
|
||||
hw.PairChan <- &PairChanItem{
|
||||
Request: request,
|
||||
RequestTime: requestTime,
|
||||
Response: response,
|
||||
ResponseTime: responseTime,
|
||||
ConnectionInfo: connectionInfo,
|
||||
Request: request,
|
||||
RequestTime: requestTime,
|
||||
Response: response,
|
||||
ResponseTime: responseTime,
|
||||
RequestSenderIp: requestSenderIp,
|
||||
}
|
||||
}
|
||||
|
||||
func (hw *HarWriter) Start() {
|
||||
if hw.OutputDirPath != "" {
|
||||
if err := os.MkdirAll(hw.OutputDirPath, os.ModePerm); err != nil {
|
||||
log.Panicf("Failed to create output directory: %s (%v,%+v)", err, err, err)
|
||||
panic(fmt.Sprintf("Failed to create output directory: %s (%v,%+v)", err, err, err))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -231,8 +210,8 @@ func (hw *HarWriter) Start() {
|
||||
}
|
||||
} else {
|
||||
hw.OutChan <- &OutputChannelItem{
|
||||
HarEntry: harEntry,
|
||||
ConnectionInfo: pair.ConnectionInfo,
|
||||
HarEntry: harEntry,
|
||||
RequestSenderIp: pair.RequestSenderIp,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -241,17 +220,16 @@ func (hw *HarWriter) Start() {
|
||||
hw.closeFile()
|
||||
}
|
||||
hw.done <- true
|
||||
}()
|
||||
} ()
|
||||
}
|
||||
|
||||
func (hw *HarWriter) Stop() {
|
||||
close(hw.PairChan)
|
||||
<-hw.done
|
||||
close(hw.OutChan)
|
||||
}
|
||||
|
||||
func (hw *HarWriter) openNewFile() {
|
||||
filename := buildFilename(hw.OutputDirPath, time.Now(), tempFilenameSuffix)
|
||||
filename := filepath.Join(os.TempDir(), fmt.Sprintf("%s_%d", tempFilenamePrefix, time.Now().UnixNano()))
|
||||
hw.currentFile = openNewHarFile(filename)
|
||||
}
|
||||
|
||||
@@ -260,15 +238,15 @@ func (hw *HarWriter) closeFile() {
|
||||
tmpFilename := hw.currentFile.file.Name()
|
||||
hw.currentFile = nil
|
||||
|
||||
filename := buildFilename(hw.OutputDirPath, time.Now(), harFilenameSuffix)
|
||||
filename := buildFilename(hw.OutputDirPath, time.Now())
|
||||
err := os.Rename(tmpFilename, filename)
|
||||
if err != nil {
|
||||
SilentError("Rename-file", "cannot rename file: %s (%v,%+v)", err, err, err)
|
||||
SilentError("Rename-file", "cannot rename file: %s (%v,%+v)\n", err, err, err)
|
||||
}
|
||||
}
|
||||
|
||||
func buildFilename(dir string, t time.Time, suffix string) string {
|
||||
func buildFilename(dir string, t time.Time) string {
|
||||
// (epoch time in nanoseconds)__(YYYY_Month_DD__hh-mm-ss).har
|
||||
filename := fmt.Sprintf("%d__%s%s", t.UnixNano(), t.Format("2006_Jan_02__15-04-05"), suffix)
|
||||
filename := fmt.Sprintf("%d__%s.har", t.UnixNano(), t.Format("2006_Jan_02__15-04-05"))
|
||||
return filepath.Join(dir, filename)
|
||||
}
|
||||
209
api/pkg/tap/http_matcher.go
Normal file
209
api/pkg/tap/http_matcher.go
Normal file
@@ -0,0 +1,209 @@
|
||||
package tap
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/orcaman/concurrent-map"
|
||||
)
|
||||
|
||||
type requestResponsePair struct {
|
||||
Request httpMessage `json:"request"`
|
||||
Response httpMessage `json:"response"`
|
||||
}
|
||||
|
||||
type envoyMessageWrapper struct {
|
||||
HttpBufferedTrace requestResponsePair `json:"http_buffered_trace"`
|
||||
}
|
||||
|
||||
type headerKeyVal struct {
|
||||
Key string `json:"key"`
|
||||
Value string `json:"value"`
|
||||
}
|
||||
|
||||
type messageBody struct {
|
||||
Truncated bool `json:"truncated"`
|
||||
AsBytes string `json:"as_bytes"`
|
||||
}
|
||||
|
||||
type httpMessage struct {
|
||||
IsRequest bool
|
||||
Headers []headerKeyVal `json:"headers"`
|
||||
HTTPVersion string `json:"httpVersion"`
|
||||
Body messageBody `json:"body"`
|
||||
captureTime time.Time
|
||||
orig interface {}
|
||||
requestSenderIp string
|
||||
}
|
||||
|
||||
|
||||
// Key is {client_addr}:{client_port}->{dest_addr}:{dest_port}
|
||||
type requestResponseMatcher struct {
|
||||
openMessagesMap cmap.ConcurrentMap
|
||||
|
||||
}
|
||||
|
||||
func createResponseRequestMatcher() requestResponseMatcher {
|
||||
newMatcher := &requestResponseMatcher{openMessagesMap: cmap.New()}
|
||||
return *newMatcher
|
||||
}
|
||||
|
||||
func (matcher *requestResponseMatcher) registerRequest(ident string, request *http.Request, captureTime time.Time, body string, isHTTP2 bool) *envoyMessageWrapper {
|
||||
split := splitIdent(ident)
|
||||
key := genKey(split)
|
||||
|
||||
messageExtraHeaders := []headerKeyVal{
|
||||
{Key: "x-up9-source", Value: split[0]},
|
||||
{Key: "x-up9-destination", Value: split[1] + ":" + split[3]},
|
||||
}
|
||||
|
||||
requestHTTPMessage := requestToMessage(request, captureTime, body, &messageExtraHeaders, isHTTP2, split[0])
|
||||
|
||||
if response, found := matcher.openMessagesMap.Pop(key); found {
|
||||
// Type assertion always succeeds because all of the map's values are of httpMessage type
|
||||
responseHTTPMessage := response.(*httpMessage)
|
||||
if responseHTTPMessage.IsRequest {
|
||||
SilentError("Request-Duplicate", "Got duplicate request with same identifier\n")
|
||||
return nil
|
||||
}
|
||||
Debug("Matched open Response for %s\n", key)
|
||||
return matcher.preparePair(&requestHTTPMessage, responseHTTPMessage)
|
||||
}
|
||||
|
||||
matcher.openMessagesMap.Set(key, &requestHTTPMessage)
|
||||
Debug("Registered open Request for %s\n", key)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (matcher *requestResponseMatcher) registerResponse(ident string, response *http.Response, captureTime time.Time, body string, isHTTP2 bool) *envoyMessageWrapper {
|
||||
split := splitIdent(ident)
|
||||
key := genKey(split)
|
||||
|
||||
responseHTTPMessage := responseToMessage(response, captureTime, body, isHTTP2)
|
||||
|
||||
if request, found := matcher.openMessagesMap.Pop(key); found {
|
||||
// Type assertion always succeeds because all of the map's values are of httpMessage type
|
||||
requestHTTPMessage := request.(*httpMessage)
|
||||
if !requestHTTPMessage.IsRequest {
|
||||
SilentError("Response-Duplicate", "Got duplicate response with same identifier\n")
|
||||
return nil
|
||||
}
|
||||
Debug("Matched open Request for %s\n", key)
|
||||
return matcher.preparePair(requestHTTPMessage, &responseHTTPMessage)
|
||||
}
|
||||
|
||||
matcher.openMessagesMap.Set(key, &responseHTTPMessage)
|
||||
Debug("Registered open Response for %s\n", key)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (matcher *requestResponseMatcher) preparePair(requestHTTPMessage *httpMessage, responseHTTPMessage *httpMessage) *envoyMessageWrapper {
|
||||
matcher.addDuration(requestHTTPMessage, responseHTTPMessage)
|
||||
|
||||
return &envoyMessageWrapper{
|
||||
HttpBufferedTrace: requestResponsePair{
|
||||
Request: *requestHTTPMessage,
|
||||
Response: *responseHTTPMessage,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func requestToMessage(request *http.Request, captureTime time.Time, body string, messageExtraHeaders *[]headerKeyVal, isHTTP2 bool, requestSenderIp string) httpMessage {
|
||||
messageHeaders := make([]headerKeyVal, 0)
|
||||
|
||||
for key, value := range request.Header {
|
||||
messageHeaders = append(messageHeaders, headerKeyVal{Key: key, Value: value[0]})
|
||||
}
|
||||
|
||||
if !isHTTP2 {
|
||||
messageHeaders = append(messageHeaders, headerKeyVal{Key: ":method", Value: request.Method})
|
||||
messageHeaders = append(messageHeaders, headerKeyVal{Key: ":path", Value: request.RequestURI})
|
||||
messageHeaders = append(messageHeaders, headerKeyVal{Key: ":authority", Value: request.Host})
|
||||
messageHeaders = append(messageHeaders, headerKeyVal{Key: ":scheme", Value: "http"})
|
||||
}
|
||||
|
||||
messageHeaders = append(messageHeaders, headerKeyVal{Key: "x-request-start", Value: fmt.Sprintf("%.3f", float64(captureTime.UnixNano()) / float64(1000000000))})
|
||||
|
||||
messageHeaders = append(messageHeaders, *messageExtraHeaders...)
|
||||
|
||||
httpVersion := request.Proto
|
||||
|
||||
requestBody := messageBody{Truncated: false, AsBytes: body}
|
||||
|
||||
return httpMessage{
|
||||
IsRequest: true,
|
||||
Headers: messageHeaders,
|
||||
HTTPVersion: httpVersion,
|
||||
Body: requestBody,
|
||||
captureTime: captureTime,
|
||||
orig: request,
|
||||
requestSenderIp: requestSenderIp,
|
||||
}
|
||||
}
|
||||
|
||||
func responseToMessage(response *http.Response, captureTime time.Time, body string, isHTTP2 bool) httpMessage {
|
||||
messageHeaders := make([]headerKeyVal, 0)
|
||||
|
||||
for key, value := range response.Header {
|
||||
messageHeaders = append(messageHeaders, headerKeyVal{Key: key, Value: value[0]})
|
||||
}
|
||||
|
||||
if !isHTTP2 {
|
||||
messageHeaders = append(messageHeaders, headerKeyVal{Key: ":status", Value: strconv.Itoa(response.StatusCode)})
|
||||
}
|
||||
|
||||
httpVersion := response.Proto
|
||||
|
||||
requestBody := messageBody{Truncated: false, AsBytes: body}
|
||||
|
||||
return httpMessage{
|
||||
IsRequest: false,
|
||||
Headers: messageHeaders,
|
||||
HTTPVersion: httpVersion,
|
||||
Body: requestBody,
|
||||
captureTime: captureTime,
|
||||
orig: response,
|
||||
}
|
||||
}
|
||||
|
||||
func (matcher *requestResponseMatcher) addDuration(requestHTTPMessage *httpMessage, responseHTTPMessage *httpMessage) {
|
||||
durationMs := float64(responseHTTPMessage.captureTime.UnixNano() / 1000000) - float64(requestHTTPMessage.captureTime.UnixNano() / 1000000)
|
||||
if durationMs < 1 {
|
||||
durationMs = 1
|
||||
}
|
||||
|
||||
responseHTTPMessage.Headers = append(responseHTTPMessage.Headers, headerKeyVal{Key: "x-up9-duration-ms", Value: fmt.Sprintf("%.0f", durationMs)})
|
||||
}
|
||||
|
||||
func splitIdent(ident string) []string {
|
||||
ident = strings.Replace(ident, "->", " ", -1)
|
||||
return strings.Split(ident, " ")
|
||||
}
|
||||
|
||||
func genKey(split []string) string {
|
||||
key := fmt.Sprintf("%s:%s->%s:%s,%s", split[0], split[2], split[1], split[3], split[4])
|
||||
return key
|
||||
}
|
||||
|
||||
func (matcher *requestResponseMatcher) deleteOlderThan(t time.Time) int {
|
||||
keysToPop := make([]string, 0)
|
||||
for item := range matcher.openMessagesMap.IterBuffered() {
|
||||
// Map only contains values of type httpMessage
|
||||
message, _ := item.Val.(*httpMessage)
|
||||
|
||||
if message.captureTime.Before(t) {
|
||||
keysToPop = append(keysToPop, item.Key)
|
||||
}
|
||||
}
|
||||
|
||||
numDeleted := len(keysToPop)
|
||||
|
||||
for _, key := range keysToPop {
|
||||
_, _ = matcher.openMessagesMap.Pop(key)
|
||||
}
|
||||
|
||||
return numDeleted
|
||||
}
|
||||
@@ -3,39 +3,30 @@ package tap
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
b64 "encoding/base64"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"github.com/bradleyfalzon/tlsx"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
const checkTLSPacketAmount = 100
|
||||
|
||||
type httpReaderDataMsg struct {
|
||||
bytes []byte
|
||||
timestamp time.Time
|
||||
}
|
||||
|
||||
type tcpID struct {
|
||||
srcIP string
|
||||
dstIP string
|
||||
srcIP string
|
||||
dstIP string
|
||||
srcPort string
|
||||
dstPort string
|
||||
}
|
||||
|
||||
type ConnectionInfo struct {
|
||||
ClientIP string
|
||||
ClientPort string
|
||||
ServerIP string
|
||||
ServerPort string
|
||||
IsOutgoing bool
|
||||
}
|
||||
|
||||
func (tid *tcpID) String() string {
|
||||
return fmt.Sprintf("%s->%s %s->%s", tid.srcIP, tid.dstIP, tid.srcPort, tid.dstPort)
|
||||
}
|
||||
@@ -46,45 +37,27 @@ func (tid *tcpID) String() string {
|
||||
* Implements io.Reader interface (Read)
|
||||
*/
|
||||
type httpReader struct {
|
||||
ident string
|
||||
tcpID tcpID
|
||||
isClient bool
|
||||
isHTTP2 bool
|
||||
isOutgoing bool
|
||||
msgQueue chan httpReaderDataMsg // Channel of captured reassembled tcp payload
|
||||
data []byte
|
||||
captureTime time.Time
|
||||
hexdump bool
|
||||
parent *tcpStream
|
||||
grpcAssembler GrpcAssembler
|
||||
messageCount uint
|
||||
harWriter *HarWriter
|
||||
packetsSeen uint
|
||||
outboundLinkWriter *OutboundLinkWriter
|
||||
ident string
|
||||
tcpID tcpID
|
||||
isClient bool
|
||||
isHTTP2 bool
|
||||
msgQueue chan httpReaderDataMsg // Channel of captured reassembled tcp payload
|
||||
data []byte
|
||||
captureTime time.Time
|
||||
hexdump bool
|
||||
parent *tcpStream
|
||||
grpcAssembler GrpcAssembler
|
||||
messageCount uint
|
||||
harWriter *HarWriter
|
||||
}
|
||||
|
||||
func (h *httpReader) Read(p []byte) (int, error) {
|
||||
var msg httpReaderDataMsg
|
||||
|
||||
ok := true
|
||||
for ok && len(h.data) == 0 {
|
||||
msg, ok = <-h.msgQueue
|
||||
h.data = msg.bytes
|
||||
|
||||
h.captureTime = msg.timestamp
|
||||
if len(h.data) > 0 {
|
||||
h.packetsSeen += 1
|
||||
}
|
||||
if h.packetsSeen < checkTLSPacketAmount && len(msg.bytes) > 5 { // packets with less than 5 bytes cause tlsx to panic
|
||||
clientHello := tlsx.ClientHello{}
|
||||
err := clientHello.Unmarshall(msg.bytes)
|
||||
if err == nil {
|
||||
statsTracker.incTlsConnectionsCount()
|
||||
Debug("Detected TLS client hello with SNI %s\n", clientHello.SNI)
|
||||
numericPort, _ := strconv.Atoi(h.tcpID.dstPort)
|
||||
h.outboundLinkWriter.WriteOutboundLink(h.tcpID.srcIP, h.tcpID.dstIP, numericPort, clientHello.SNI, TLSProtocol)
|
||||
}
|
||||
}
|
||||
}
|
||||
if !ok || len(h.data) == 0 {
|
||||
return 0, io.EOF
|
||||
@@ -100,7 +73,7 @@ func (h *httpReader) run(wg *sync.WaitGroup) {
|
||||
b := bufio.NewReader(h)
|
||||
|
||||
if isHTTP2, err := checkIsHTTP2Connection(b, h.isClient); err != nil {
|
||||
SilentError("HTTP/2-Prepare-Connection", "stream %s Failed to check if client is HTTP/2: %s (%v,%+v)", h.ident, err, err, err)
|
||||
SilentError("HTTP/2-Prepare-Connection", "stream %s Failed to check if client is HTTP/2: %s (%v,%+v)\n", h.ident, err, err, err)
|
||||
// Do something?
|
||||
} else {
|
||||
h.isHTTP2 = isHTTP2
|
||||
@@ -109,7 +82,7 @@ func (h *httpReader) run(wg *sync.WaitGroup) {
|
||||
if h.isHTTP2 {
|
||||
err := prepareHTTP2Connection(b, h.isClient)
|
||||
if err != nil {
|
||||
SilentError("HTTP/2-Prepare-Connection-After-Check", "stream %s error: %s (%v,%+v)", h.ident, err, err, err)
|
||||
SilentError("HTTP/2-Prepare-Connection-After-Check", "stream %s error: %s (%v,%+v)\n", h.ident, err, err, err)
|
||||
}
|
||||
h.grpcAssembler = createGrpcAssembler(b)
|
||||
}
|
||||
@@ -120,7 +93,7 @@ func (h *httpReader) run(wg *sync.WaitGroup) {
|
||||
if err == io.EOF || err == io.ErrUnexpectedEOF {
|
||||
break
|
||||
} else if err != nil {
|
||||
SilentError("HTTP/2", "stream %s error: %s (%v,%+v)", h.ident, err, err, err)
|
||||
SilentError("HTTP/2", "stream %s error: %s (%v,%+v)\n", h.ident, err, err, err)
|
||||
continue
|
||||
}
|
||||
} else if h.isClient {
|
||||
@@ -128,7 +101,7 @@ func (h *httpReader) run(wg *sync.WaitGroup) {
|
||||
if err == io.EOF || err == io.ErrUnexpectedEOF {
|
||||
break
|
||||
} else if err != nil {
|
||||
SilentError("HTTP-request", "stream %s Request error: %s (%v,%+v)", h.ident, err, err, err)
|
||||
SilentError("HTTP-request", "stream %s Request error: %s (%v,%+v)\n", h.ident, err, err, err)
|
||||
continue
|
||||
}
|
||||
} else {
|
||||
@@ -136,7 +109,7 @@ func (h *httpReader) run(wg *sync.WaitGroup) {
|
||||
if err == io.EOF || err == io.ErrUnexpectedEOF {
|
||||
break
|
||||
} else if err != nil {
|
||||
SilentError("HTTP-response", "stream %s Response error: %s (%v,%+v)", h.ident, err, err, err)
|
||||
SilentError("HTTP-response", "stream %s Response error: %s (%v,%+v)\n", h.ident, err, err, err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
@@ -144,49 +117,38 @@ func (h *httpReader) run(wg *sync.WaitGroup) {
|
||||
}
|
||||
|
||||
func (h *httpReader) handleHTTP2Stream() error {
|
||||
streamID, messageHTTP1, err := h.grpcAssembler.readMessage()
|
||||
streamID, messageHTTP1, body, err := h.grpcAssembler.readMessage()
|
||||
h.messageCount++
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var reqResPair *requestResponsePair
|
||||
var connectionInfo *ConnectionInfo
|
||||
var reqResPair *envoyMessageWrapper
|
||||
|
||||
switch messageHTTP1 := messageHTTP1.(type) {
|
||||
case http.Request:
|
||||
ident := fmt.Sprintf("%s->%s %s->%s %d", h.tcpID.srcIP, h.tcpID.dstIP, h.tcpID.srcPort, h.tcpID.dstPort, streamID)
|
||||
connectionInfo = &ConnectionInfo{
|
||||
ClientIP: h.tcpID.srcIP,
|
||||
ClientPort: h.tcpID.srcPort,
|
||||
ServerIP: h.tcpID.dstIP,
|
||||
ServerPort: h.tcpID.dstPort,
|
||||
IsOutgoing: h.isOutgoing,
|
||||
}
|
||||
reqResPair = reqResMatcher.registerRequest(ident, &messageHTTP1, h.captureTime)
|
||||
reqResPair = reqResMatcher.registerRequest(ident, &messageHTTP1, h.captureTime, body, true)
|
||||
case http.Response:
|
||||
ident := fmt.Sprintf("%s->%s %s->%s %d", h.tcpID.dstIP, h.tcpID.srcIP, h.tcpID.dstPort, h.tcpID.srcPort, streamID)
|
||||
connectionInfo = &ConnectionInfo{
|
||||
ClientIP: h.tcpID.dstIP,
|
||||
ClientPort: h.tcpID.dstPort,
|
||||
ServerIP: h.tcpID.srcIP,
|
||||
ServerPort: h.tcpID.srcPort,
|
||||
IsOutgoing: h.isOutgoing,
|
||||
}
|
||||
reqResPair = reqResMatcher.registerResponse(ident, &messageHTTP1, h.captureTime)
|
||||
reqResPair = reqResMatcher.registerResponse(ident, &messageHTTP1, h.captureTime, body, true)
|
||||
}
|
||||
|
||||
if reqResPair != nil {
|
||||
statsTracker.incMatchedPairs()
|
||||
|
||||
if h.harWriter != nil {
|
||||
h.harWriter.WritePair(
|
||||
reqResPair.Request.orig.(*http.Request),
|
||||
reqResPair.Request.captureTime,
|
||||
reqResPair.Response.orig.(*http.Response),
|
||||
reqResPair.Response.captureTime,
|
||||
connectionInfo,
|
||||
reqResPair.HttpBufferedTrace.Request.orig.(*http.Request),
|
||||
reqResPair.HttpBufferedTrace.Request.captureTime,
|
||||
reqResPair.HttpBufferedTrace.Response.orig.(*http.Response),
|
||||
reqResPair.HttpBufferedTrace.Response.captureTime,
|
||||
reqResPair.HttpBufferedTrace.Request.requestSenderIp,
|
||||
)
|
||||
} else {
|
||||
jsonStr, err := json.Marshal(reqResPair)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
broadcastReqResPair(jsonStr)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -203,35 +165,37 @@ func (h *httpReader) handleHTTP1ClientStream(b *bufio.Reader) error {
|
||||
req.Body = io.NopCloser(bytes.NewBuffer(body)) // rewind
|
||||
s := len(body)
|
||||
if err != nil {
|
||||
SilentError("HTTP-request-body", "stream %s Got body err: %s", h.ident, err)
|
||||
SilentError("HTTP-request-body", "stream %s Got body err: %s\n", h.ident, err)
|
||||
} else if h.hexdump {
|
||||
Debug("Body(%d/0x%x) - %s", len(body), len(body), hex.Dump(body))
|
||||
Info("Body(%d/0x%x)\n%s\n", len(body), len(body), hex.Dump(body))
|
||||
}
|
||||
if err := req.Body.Close(); err != nil {
|
||||
SilentError("HTTP-request-body-close", "stream %s Failed to close request body: %s", h.ident, err)
|
||||
SilentError("HTTP-request-body-close", "stream %s Failed to close request body: %s\n", h.ident, err)
|
||||
}
|
||||
encoding := req.Header["Content-Encoding"]
|
||||
Debug("HTTP/1 Request: %s %s %s (Body:%d) -> %s", h.ident, req.Method, req.URL, s, encoding)
|
||||
bodyStr, err := readBody(body, encoding)
|
||||
if err != nil {
|
||||
SilentError("HTTP-request-body-decode", "stream %s Failed to decode body: %s\n", h.ident, err)
|
||||
}
|
||||
Info("HTTP/%s Request: %s %s (Body:%d)\n", h.ident, req.Method, req.URL, s)
|
||||
|
||||
ident := fmt.Sprintf("%s->%s %s->%s %d", h.tcpID.srcIP, h.tcpID.dstIP, h.tcpID.srcPort, h.tcpID.dstPort, h.messageCount)
|
||||
reqResPair := reqResMatcher.registerRequest(ident, req, h.captureTime)
|
||||
reqResPair := reqResMatcher.registerRequest(ident, req, h.captureTime, bodyStr, false)
|
||||
if reqResPair != nil {
|
||||
statsTracker.incMatchedPairs()
|
||||
|
||||
if h.harWriter != nil {
|
||||
h.harWriter.WritePair(
|
||||
reqResPair.Request.orig.(*http.Request),
|
||||
reqResPair.Request.captureTime,
|
||||
reqResPair.Response.orig.(*http.Response),
|
||||
reqResPair.Response.captureTime,
|
||||
&ConnectionInfo{
|
||||
ClientIP: h.tcpID.srcIP,
|
||||
ClientPort: h.tcpID.srcPort,
|
||||
ServerIP: h.tcpID.dstIP,
|
||||
ServerPort: h.tcpID.dstPort,
|
||||
IsOutgoing: h.isOutgoing,
|
||||
},
|
||||
reqResPair.HttpBufferedTrace.Request.orig.(*http.Request),
|
||||
reqResPair.HttpBufferedTrace.Request.captureTime,
|
||||
reqResPair.HttpBufferedTrace.Response.orig.(*http.Response),
|
||||
reqResPair.HttpBufferedTrace.Response.captureTime,
|
||||
reqResPair.HttpBufferedTrace.Request.requestSenderIp,
|
||||
)
|
||||
} else {
|
||||
jsonStr, err := json.Marshal(reqResPair)
|
||||
if err != nil {
|
||||
SilentError("HTTP-marshal", "stream %s Error convert request response to json: %s\n", h.ident, err)
|
||||
}
|
||||
broadcastReqResPair(jsonStr)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -260,13 +224,13 @@ func (h *httpReader) handleHTTP1ServerStream(b *bufio.Reader) error {
|
||||
res.Body = io.NopCloser(bytes.NewBuffer(body)) // rewind
|
||||
s := len(body)
|
||||
if err != nil {
|
||||
SilentError("HTTP-response-body", "HTTP/%s: failed to get body(parsed len:%d): %s", h.ident, s, err)
|
||||
SilentError("HTTP-response-body", "HTTP/%s: failed to get body(parsed len:%d): %s\n", h.ident, s, err)
|
||||
}
|
||||
if h.hexdump {
|
||||
Debug("Body(%d/0x%x) - %s", len(body), len(body), hex.Dump(body))
|
||||
Info("Body(%d/0x%x)\n%s\n", len(body), len(body), hex.Dump(body))
|
||||
}
|
||||
if err := res.Body.Close(); err != nil {
|
||||
SilentError("HTTP-response-body-close", "HTTP/%s: failed to close body(parsed len:%d): %s", h.ident, s, err)
|
||||
SilentError("HTTP-response-body-close", "HTTP/%s: failed to close body(parsed len:%d): %s\n", h.ident, s, err)
|
||||
}
|
||||
sym := ","
|
||||
if res.ContentLength > 0 && res.ContentLength != int64(s) {
|
||||
@@ -277,29 +241,54 @@ func (h *httpReader) handleHTTP1ServerStream(b *bufio.Reader) error {
|
||||
contentType = []string{http.DetectContentType(body)}
|
||||
}
|
||||
encoding := res.Header["Content-Encoding"]
|
||||
Debug("HTTP/1 Response: %s %s URL:%s (%d%s%d%s) -> %s", h.ident, res.Status, req, res.ContentLength, sym, s, contentType, encoding)
|
||||
Info("HTTP/%s Response: %s URL:%s (%d%s%d%s) -> %s\n", h.ident, res.Status, req, res.ContentLength, sym, s, contentType, encoding)
|
||||
bodyStr, err := readBody(body, encoding)
|
||||
if err != nil {
|
||||
SilentError("HTTP-response-body-decode", "stream %s Failed to decode body: %s\n", h.ident, err)
|
||||
}
|
||||
|
||||
ident := fmt.Sprintf("%s->%s %s->%s %d", h.tcpID.dstIP, h.tcpID.srcIP, h.tcpID.dstPort, h.tcpID.srcPort, h.messageCount)
|
||||
reqResPair := reqResMatcher.registerResponse(ident, res, h.captureTime)
|
||||
reqResPair := reqResMatcher.registerResponse(ident, res, h.captureTime, bodyStr, false)
|
||||
if reqResPair != nil {
|
||||
statsTracker.incMatchedPairs()
|
||||
|
||||
if h.harWriter != nil {
|
||||
h.harWriter.WritePair(
|
||||
reqResPair.Request.orig.(*http.Request),
|
||||
reqResPair.Request.captureTime,
|
||||
reqResPair.Response.orig.(*http.Response),
|
||||
reqResPair.Response.captureTime,
|
||||
&ConnectionInfo{
|
||||
ClientIP: h.tcpID.dstIP,
|
||||
ClientPort: h.tcpID.dstPort,
|
||||
ServerIP: h.tcpID.srcIP,
|
||||
ServerPort: h.tcpID.srcPort,
|
||||
IsOutgoing: h.isOutgoing,
|
||||
},
|
||||
reqResPair.HttpBufferedTrace.Request.orig.(*http.Request),
|
||||
reqResPair.HttpBufferedTrace.Request.captureTime,
|
||||
reqResPair.HttpBufferedTrace.Response.orig.(*http.Response),
|
||||
reqResPair.HttpBufferedTrace.Response.captureTime,
|
||||
reqResPair.HttpBufferedTrace.Request.requestSenderIp,
|
||||
)
|
||||
} else {
|
||||
jsonStr, err := json.Marshal(reqResPair)
|
||||
if err != nil {
|
||||
SilentError("HTTP-marshal", "stream %s Error convert request response to json: %s\n", h.ident, err)
|
||||
}
|
||||
broadcastReqResPair(jsonStr)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func readBody(bodyBytes []byte, encoding []string) (string, error) {
|
||||
var bodyBuffer io.Reader
|
||||
bodyBuffer = bytes.NewBuffer(bodyBytes)
|
||||
var err error
|
||||
if len(encoding) > 0 && (encoding[0] == "gzip" || encoding[0] == "deflate") {
|
||||
bodyBuffer, err = gzip.NewReader(bodyBuffer)
|
||||
if err != nil {
|
||||
SilentError("HTTP-gunzip", "Failed to gzip decode: %s\n", err)
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
if _, ok := bodyBuffer.(*gzip.Reader); ok {
|
||||
err = bodyBuffer.(*gzip.Reader).Close()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
|
||||
buf := new(bytes.Buffer)
|
||||
_, err = buf.ReadFrom(bodyBuffer)
|
||||
return b64.StdEncoding.EncodeToString(buf.Bytes()), err
|
||||
}
|
||||
@@ -13,6 +13,7 @@ import (
|
||||
"encoding/json"
|
||||
"flag"
|
||||
"fmt"
|
||||
"github.com/up9inc/mizu/shared"
|
||||
"log"
|
||||
"os"
|
||||
"os/signal"
|
||||
@@ -23,8 +24,6 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/romana/rlog"
|
||||
|
||||
"github.com/google/gopacket"
|
||||
"github.com/google/gopacket/examples/util"
|
||||
"github.com/google/gopacket/ip4defrag"
|
||||
@@ -34,18 +33,20 @@ import (
|
||||
)
|
||||
|
||||
const AppPortsEnvVar = "APP_PORTS"
|
||||
const OutPortEnvVar = "WEB_SOCKET_PORT"
|
||||
const maxHTTP2DataLenEnvVar = "HTTP2_DATA_SIZE_LIMIT"
|
||||
const maxHTTP2DataLenDefault = 1 * 1024 * 1024 // 1MB
|
||||
// default is 1MB, more than the max size accepted by collector and traffic-dumper
|
||||
const maxHTTP2DataLenDefault = 1 * 1024 * 1024
|
||||
const cleanPeriod = time.Second * 10
|
||||
|
||||
var remoteOnlyOutboundPorts = []int{80, 443}
|
||||
const outboundThrottleCacheExpiryPeriod = time.Minute * 15
|
||||
var remoteOnlyOutboundPorts = []int { 80, 443 }
|
||||
|
||||
func parseAppPorts(appPortsList string) []int {
|
||||
ports := make([]int, 0)
|
||||
for _, portStr := range strings.Split(appPortsList, ",") {
|
||||
parsedInt, parseError := strconv.Atoi(portStr)
|
||||
if parseError != nil {
|
||||
log.Printf("Provided app port %v is not a valid number!", portStr)
|
||||
fmt.Println("Provided app port ", portStr, " is not a valid number!")
|
||||
} else {
|
||||
ports = append(ports, parsedInt)
|
||||
}
|
||||
@@ -53,15 +54,22 @@ func parseAppPorts(appPortsList string) []int {
|
||||
return ports
|
||||
}
|
||||
|
||||
var maxcount = flag.Int64("c", -1, "Only grab this many packets, then exit")
|
||||
func parseHostAppAddresses(hostAppAddressesString string) []string {
|
||||
if len(hostAppAddressesString) == 0 {
|
||||
return []string{}
|
||||
}
|
||||
return strings.Split(hostAppAddressesString, ",")
|
||||
}
|
||||
|
||||
var maxcount = flag.Int("c", -1, "Only grab this many packets, then exit")
|
||||
var decoder = flag.String("decoder", "", "Name of the decoder to use (default: guess from capture)")
|
||||
var statsevery = flag.Int("stats", 60, "Output statistics every N seconds")
|
||||
var lazy = flag.Bool("lazy", false, "If true, do lazy decoding")
|
||||
var nodefrag = flag.Bool("nodefrag", false, "If true, do not do IPv4 defrag")
|
||||
var checksum = flag.Bool("checksum", false, "Check TCP checksum") // global
|
||||
var nooptcheck = flag.Bool("nooptcheck", true, "Do not check TCP options (useful to ignore MSS on captures with TSO)") // global
|
||||
var ignorefsmerr = flag.Bool("ignorefsmerr", true, "Ignore TCP FSM errors") // global
|
||||
var allowmissinginit = flag.Bool("allowmissinginit", true, "Support streams without SYN/SYN+ACK/ACK sequence") // global
|
||||
var checksum = flag.Bool("checksum", false, "Check TCP checksum") // global
|
||||
var nooptcheck = flag.Bool("nooptcheck", true, "Do not check TCP options (useful to ignore MSS on captures with TSO)") // global
|
||||
var ignorefsmerr = flag.Bool("ignorefsmerr", true, "Ignore TCP FSM errors") // global
|
||||
var allowmissinginit = flag.Bool("allowmissinginit", true, "Support streams without SYN/SYN+ACK/ACK sequence") // global
|
||||
var verbose = flag.Bool("verbose", false, "Be verbose")
|
||||
var debug = flag.Bool("debug", false, "Display debug information")
|
||||
var quiet = flag.Bool("quiet", false, "Be quiet regarding errors")
|
||||
@@ -71,7 +79,7 @@ var nohttp = flag.Bool("nohttp", false, "Disable HTTP parsing")
|
||||
var output = flag.String("output", "", "Path to create file for HTTP 200 OK responses")
|
||||
var writeincomplete = flag.Bool("writeincomplete", false, "Write incomplete response")
|
||||
|
||||
var hexdump = flag.Bool("dump", false, "Dump HTTP request/response as hex") // global
|
||||
var hexdump = flag.Bool("dump", false, "Dump HTTP request/response as hex") // global
|
||||
var hexdumppkt = flag.Bool("dumppkt", false, "Dump packet as hex")
|
||||
|
||||
// capture
|
||||
@@ -82,14 +90,16 @@ var tstype = flag.String("timestamp_type", "", "Type of timestamps to use")
|
||||
var promisc = flag.Bool("promisc", true, "Set promiscuous mode")
|
||||
var anydirection = flag.Bool("anydirection", false, "Capture http requests to other hosts")
|
||||
var staleTimeoutSeconds = flag.Int("staletimout", 120, "Max time in seconds to keep connections which don't transmit data")
|
||||
var hostAppAddressesString = flag.String("targets", "", "Comma separated list of ip:ports to tap")
|
||||
|
||||
var memprofile = flag.String("memprofile", "", "Write memory profile")
|
||||
|
||||
// output
|
||||
var dumpToHar = flag.Bool("hardump", false, "Dump traffic to har files")
|
||||
var HarOutputDir = flag.String("hardir", "", "Directory in which to store output har files")
|
||||
var harEntriesPerFile = flag.Int("harentriesperfile", 200, "Number of max number of har entries to store in each file")
|
||||
|
||||
var reqResMatcher = createResponseRequestMatcher() // global
|
||||
var reqResMatcher = createResponseRequestMatcher() // global
|
||||
var statsTracker = StatsTracker{}
|
||||
|
||||
// global
|
||||
@@ -111,20 +121,24 @@ var stats struct {
|
||||
overlapPackets int
|
||||
}
|
||||
|
||||
type TapOpts struct {
|
||||
HostMode bool
|
||||
type CollectorMessage struct {
|
||||
MessageType string
|
||||
Ports *[]int `json:"ports,omitempty"`
|
||||
Addresses *[]string `json:"addresses,omitempty"`
|
||||
}
|
||||
|
||||
var outputLevel int
|
||||
var errorsMap map[string]uint
|
||||
var errorsMapMutex sync.Mutex
|
||||
var nErrors uint
|
||||
var ownIps []string // global
|
||||
var hostMode bool // global
|
||||
var appPorts []int // global
|
||||
var ownIps []string //global
|
||||
var hostMode bool //global
|
||||
var HostAppAddresses []string //global
|
||||
|
||||
/* minOutputLevel: Error will be printed only if outputLevel is above this value
|
||||
* t: key for errorsMap (counting errors)
|
||||
* s, a: arguments log.Printf
|
||||
* s, a: arguments fmt.Printf
|
||||
* Note: Too bad for perf that a... is evaluated
|
||||
*/
|
||||
func logError(minOutputLevel int, t string, s string, a ...interface{}) {
|
||||
@@ -133,10 +147,9 @@ func logError(minOutputLevel int, t string, s string, a ...interface{}) {
|
||||
nb, _ := errorsMap[t]
|
||||
errorsMap[t] = nb + 1
|
||||
errorsMapMutex.Unlock()
|
||||
|
||||
if outputLevel >= minOutputLevel {
|
||||
formatStr := fmt.Sprintf("%s: %s", t, s)
|
||||
rlog.Errorf(formatStr, a...)
|
||||
fmt.Printf(formatStr, a...)
|
||||
}
|
||||
}
|
||||
func Error(t string, s string, a ...interface{}) {
|
||||
@@ -145,11 +158,15 @@ func Error(t string, s string, a ...interface{}) {
|
||||
func SilentError(t string, s string, a ...interface{}) {
|
||||
logError(2, t, s, a...)
|
||||
}
|
||||
func Debug(s string, a ...interface{}) {
|
||||
rlog.Debugf(s, a...)
|
||||
func Info(s string, a ...interface{}) {
|
||||
if outputLevel >= 1 {
|
||||
fmt.Printf(s, a...)
|
||||
}
|
||||
}
|
||||
func Trace(s string, a ...interface{}) {
|
||||
rlog.Tracef(1, s, a...)
|
||||
func Debug(s string, a ...interface{}) {
|
||||
if outputLevel >= 2 {
|
||||
fmt.Printf(s, a...)
|
||||
}
|
||||
}
|
||||
|
||||
func inArrayInt(arr []int, valueToCheck int) bool {
|
||||
@@ -170,65 +187,33 @@ func inArrayString(arr []string, valueToCheck string) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// Context
|
||||
// The assembler context
|
||||
/*
|
||||
* The assembler context
|
||||
*/
|
||||
type Context struct {
|
||||
CaptureInfo gopacket.CaptureInfo
|
||||
}
|
||||
|
||||
func GetStats() AppStats {
|
||||
return statsTracker.appStats
|
||||
}
|
||||
|
||||
func (c *Context) GetCaptureInfo() gopacket.CaptureInfo {
|
||||
return c.CaptureInfo
|
||||
}
|
||||
|
||||
func StartPassiveTapper(opts *TapOpts) (<-chan *OutputChannelItem, <-chan *OutboundLink) {
|
||||
hostMode = opts.HostMode
|
||||
func StartPassiveTapper() <-chan *OutputChannelItem {
|
||||
var harWriter *HarWriter
|
||||
if *dumpToHar {
|
||||
harWriter = NewHarWriter(*HarOutputDir, *harEntriesPerFile)
|
||||
}
|
||||
|
||||
harWriter := NewHarWriter(*HarOutputDir, *harEntriesPerFile)
|
||||
outboundLinkWriter := NewOutboundLinkWriter()
|
||||
go startPassiveTapper(harWriter)
|
||||
|
||||
go startPassiveTapper(harWriter, outboundLinkWriter)
|
||||
if harWriter != nil {
|
||||
return harWriter.OutChan
|
||||
}
|
||||
|
||||
return harWriter.OutChan, outboundLinkWriter.OutChan
|
||||
return nil
|
||||
}
|
||||
|
||||
func startMemoryProfiler() {
|
||||
dirname := "/app/pprof"
|
||||
rlog.Info("Profiling is on, results will be written to %s", dirname)
|
||||
go func() {
|
||||
if _, err := os.Stat(dirname); os.IsNotExist(err) {
|
||||
if err := os.Mkdir(dirname, 0777); err != nil {
|
||||
log.Fatal("could not create directory for profile: ", err)
|
||||
}
|
||||
}
|
||||
|
||||
for true {
|
||||
t := time.Now()
|
||||
|
||||
filename := fmt.Sprintf("%s/%s__mem.prof", dirname, t.Format("15_04_05"))
|
||||
|
||||
rlog.Info("Writing memory profile to %s\n", filename)
|
||||
|
||||
f, err := os.Create(filename)
|
||||
if err != nil {
|
||||
log.Fatal("could not create memory profile: ", err)
|
||||
}
|
||||
runtime.GC() // get up-to-date statistics
|
||||
if err := pprof.WriteHeapProfile(f); err != nil {
|
||||
log.Fatal("could not write memory profile: ", err)
|
||||
}
|
||||
_ = f.Close()
|
||||
time.Sleep(time.Minute)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
func startPassiveTapper(harWriter *HarWriter, outboundLinkWriter *OutboundLinkWriter) {
|
||||
log.SetFlags(log.LstdFlags | log.LUTC | log.Lshortfile)
|
||||
|
||||
func startPassiveTapper(harWriter *HarWriter) {
|
||||
defer util.Run()()
|
||||
if *debug {
|
||||
outputLevel = 2
|
||||
@@ -241,43 +226,68 @@ func startPassiveTapper(harWriter *HarWriter, outboundLinkWriter *OutboundLinkWr
|
||||
|
||||
if localhostIPs, err := getLocalhostIPs(); err != nil {
|
||||
// TODO: think this over
|
||||
rlog.Info("Failed to get self IP addresses")
|
||||
rlog.Errorf("Getting-Self-Address", "Error getting self ip address: %s (%v,%+v)", err, err, err)
|
||||
fmt.Println("Failed to get self IP addresses")
|
||||
Error("Getting-Self-Address", "Error getting self ip address: %s (%v,%+v)\n", err, err, err)
|
||||
ownIps = make([]string, 0)
|
||||
} else {
|
||||
ownIps = localhostIPs
|
||||
}
|
||||
|
||||
appPortsStr := os.Getenv(AppPortsEnvVar)
|
||||
var appPorts []int
|
||||
if appPortsStr == "" {
|
||||
rlog.Info("Received empty/no APP_PORTS env var! only listening to http on port 80!")
|
||||
fmt.Println("Received empty/no APP_PORTS env var! only listening to http on port 80!")
|
||||
appPorts = make([]int, 0)
|
||||
} else {
|
||||
appPorts = parseAppPorts(appPortsStr)
|
||||
}
|
||||
SetFilterPorts(appPorts)
|
||||
tapOutputPort := os.Getenv(OutPortEnvVar)
|
||||
if tapOutputPort == "" {
|
||||
fmt.Println("Received empty/no WEB_SOCKET_PORT env var! falling back to port 8080")
|
||||
tapOutputPort = "8080"
|
||||
}
|
||||
envVal := os.Getenv(maxHTTP2DataLenEnvVar)
|
||||
if envVal == "" {
|
||||
rlog.Infof("Received empty/no HTTP2_DATA_SIZE_LIMIT env var! falling back to %v", maxHTTP2DataLenDefault)
|
||||
fmt.Println("Received empty/no HTTP2_DATA_SIZE_LIMIT env var! falling back to", maxHTTP2DataLenDefault)
|
||||
maxHTTP2DataLen = maxHTTP2DataLenDefault
|
||||
} else {
|
||||
if convertedInt, err := strconv.Atoi(envVal); err != nil {
|
||||
rlog.Infof("Received invalid HTTP2_DATA_SIZE_LIMIT env var! falling back to %v", maxHTTP2DataLenDefault)
|
||||
fmt.Println("Received invalid HTTP2_DATA_SIZE_LIMIT env var! falling back to", maxHTTP2DataLenDefault)
|
||||
maxHTTP2DataLen = maxHTTP2DataLenDefault
|
||||
} else {
|
||||
rlog.Infof("Received HTTP2_DATA_SIZE_LIMIT env var: %v", maxHTTP2DataLenDefault)
|
||||
fmt.Println("Received HTTP2_DATA_SIZE_LIMIT env var:", maxHTTP2DataLenDefault)
|
||||
maxHTTP2DataLen = convertedInt
|
||||
}
|
||||
}
|
||||
hostMode = os.Getenv(shared.HostModeEnvVar) == "1"
|
||||
|
||||
log.Printf("App Ports: %v", gSettings.filterPorts)
|
||||
fmt.Printf("App Ports: %v\n", appPorts)
|
||||
fmt.Printf("Tap output websocket port: %s\n", tapOutputPort)
|
||||
|
||||
var onCollectorMessage = func(message []byte) {
|
||||
var parsedMessage CollectorMessage
|
||||
err := json.Unmarshal(message, &parsedMessage)
|
||||
if err == nil {
|
||||
|
||||
if parsedMessage.MessageType == "setPorts" {
|
||||
Debug("Got message from collector. Type: %s, Ports: %v\n", parsedMessage.MessageType, parsedMessage.Ports)
|
||||
appPorts = *parsedMessage.Ports
|
||||
} else if parsedMessage.MessageType == "setAddresses" {
|
||||
Debug("Got message from collector. Type: %s, IPs: %v\n", parsedMessage.MessageType, parsedMessage.Addresses)
|
||||
HostAppAddresses = *parsedMessage.Addresses
|
||||
Info("Filtering for the following addresses: %s\n", HostAppAddresses)
|
||||
}
|
||||
} else {
|
||||
Error("Collector-Message-Parsing", "Error parsing message from collector: %s (%v,%+v)\n", err, err, err)
|
||||
}
|
||||
}
|
||||
|
||||
go startOutputServer(tapOutputPort, onCollectorMessage)
|
||||
|
||||
var handle *pcap.Handle
|
||||
var err error
|
||||
if *fname != "" {
|
||||
if handle, err = pcap.OpenOffline(*fname); err != nil {
|
||||
log.Fatalf("PCAP OpenOffline error: %v", err)
|
||||
log.Fatal("PCAP OpenOffline error:", err)
|
||||
}
|
||||
} else {
|
||||
// This is a little complicated because we want to allow all possible options
|
||||
@@ -303,21 +313,22 @@ func startPassiveTapper(harWriter *HarWriter, outboundLinkWriter *OutboundLinkWr
|
||||
}
|
||||
}
|
||||
if handle, err = inactive.Activate(); err != nil {
|
||||
log.Fatalf("PCAP Activate error: %v", err)
|
||||
log.Fatal("PCAP Activate error:", err)
|
||||
}
|
||||
defer handle.Close()
|
||||
}
|
||||
if len(flag.Args()) > 0 {
|
||||
bpffilter := strings.Join(flag.Args(), " ")
|
||||
rlog.Infof("Using BPF filter %q", bpffilter)
|
||||
Info("Using BPF filter %q\n", bpffilter)
|
||||
if err = handle.SetBPFFilter(bpffilter); err != nil {
|
||||
log.Fatalf("BPF filter error: %v", err)
|
||||
log.Fatal("BPF filter error:", err)
|
||||
}
|
||||
}
|
||||
|
||||
harWriter.Start()
|
||||
defer harWriter.Stop()
|
||||
defer outboundLinkWriter.Stop()
|
||||
if *dumpToHar {
|
||||
harWriter.Start()
|
||||
defer harWriter.Stop()
|
||||
}
|
||||
|
||||
var dec gopacket.Decoder
|
||||
var ok bool
|
||||
@@ -331,24 +342,15 @@ func startPassiveTapper(harWriter *HarWriter, outboundLinkWriter *OutboundLinkWr
|
||||
source := gopacket.NewPacketSource(handle, dec)
|
||||
source.Lazy = *lazy
|
||||
source.NoCopy = true
|
||||
rlog.Info("Starting to read packets")
|
||||
statsTracker.setStartTime(time.Now())
|
||||
Info("Starting to read packets\n")
|
||||
count := 0
|
||||
bytes := int64(0)
|
||||
start := time.Now()
|
||||
defragger := ip4defrag.NewIPv4Defragmenter()
|
||||
|
||||
streamFactory := &tcpStreamFactory{
|
||||
doHTTP: !*nohttp,
|
||||
harWriter: harWriter,
|
||||
outbountLinkWriter: outboundLinkWriter,
|
||||
}
|
||||
streamFactory := &tcpStreamFactory{doHTTP: !*nohttp, harWriter: harWriter}
|
||||
streamPool := reassembly.NewStreamPool(streamFactory)
|
||||
assembler := reassembly.NewAssembler(streamPool)
|
||||
|
||||
maxBufferedPagesTotal := GetMaxBufferedPagesPerConnection()
|
||||
maxBufferedPagesPerConnection := GetMaxBufferedPagesTotal()
|
||||
rlog.Infof("Assembler options: maxBufferedPagesTotal=%d, maxBufferedPagesPerConnection=%d", maxBufferedPagesTotal, maxBufferedPagesPerConnection)
|
||||
assembler.AssemblerOptions.MaxBufferedPagesTotal = maxBufferedPagesTotal
|
||||
assembler.AssemblerOptions.MaxBufferedPagesPerConnection = maxBufferedPagesPerConnection
|
||||
|
||||
var assemblerMutex sync.Mutex
|
||||
|
||||
signalChan := make(chan os.Signal, 1)
|
||||
@@ -356,10 +358,10 @@ func startPassiveTapper(harWriter *HarWriter, outboundLinkWriter *OutboundLinkWr
|
||||
|
||||
staleConnectionTimeout := time.Second * time.Duration(*staleTimeoutSeconds)
|
||||
cleaner := Cleaner{
|
||||
assembler: assembler,
|
||||
assemblerMutex: &assemblerMutex,
|
||||
matcher: &reqResMatcher,
|
||||
cleanPeriod: cleanPeriod,
|
||||
assembler: assembler,
|
||||
assemblerMutex: &assemblerMutex,
|
||||
matcher: &reqResMatcher,
|
||||
cleanPeriod: cleanPeriod,
|
||||
connectionTimeout: staleConnectionTimeout,
|
||||
}
|
||||
cleaner.start()
|
||||
@@ -376,8 +378,10 @@ func startPassiveTapper(harWriter *HarWriter, outboundLinkWriter *OutboundLinkWr
|
||||
errorMapLen := len(errorsMap)
|
||||
errorsSummery := fmt.Sprintf("%v", errorsMap)
|
||||
errorsMapMutex.Unlock()
|
||||
log.Printf("%v (errors: %v, errTypes:%v) - Errors Summary: %s",
|
||||
time.Since(statsTracker.appStats.StartTime),
|
||||
fmt.Printf("Processed %v packets (%v bytes) in %v (errors: %v, errTypes:%v)\nErrors Summary: %s\n",
|
||||
count,
|
||||
bytes,
|
||||
time.Since(start),
|
||||
nErrors,
|
||||
errorMapLen,
|
||||
errorsSummery,
|
||||
@@ -386,8 +390,8 @@ func startPassiveTapper(harWriter *HarWriter, outboundLinkWriter *OutboundLinkWr
|
||||
// At this moment
|
||||
memStats := runtime.MemStats{}
|
||||
runtime.ReadMemStats(&memStats)
|
||||
log.Printf(
|
||||
"mem: %d, goroutines: %d, unmatched messages: %d",
|
||||
fmt.Printf(
|
||||
"mem: %d, goroutines: %d, unmatched messages: %d\n",
|
||||
memStats.HeapAlloc,
|
||||
runtime.NumGoroutine(),
|
||||
reqResMatcher.openMessagesMap.Count(),
|
||||
@@ -395,29 +399,24 @@ func startPassiveTapper(harWriter *HarWriter, outboundLinkWriter *OutboundLinkWr
|
||||
|
||||
// Since the last print
|
||||
cleanStats := cleaner.dumpStats()
|
||||
log.Printf(
|
||||
"cleaner - flushed connections: %d, closed connections: %d, deleted messages: %d",
|
||||
appStats := statsTracker.dumpStats()
|
||||
fmt.Printf(
|
||||
"flushed connections %d, closed connections: %d, deleted messages: %d, matched messages: %d\n",
|
||||
cleanStats.flushed,
|
||||
cleanStats.closed,
|
||||
cleanStats.deleted,
|
||||
appStats.matchedMessages,
|
||||
)
|
||||
currentAppStats := statsTracker.dumpStats()
|
||||
appStatsJSON, _ := json.Marshal(currentAppStats)
|
||||
log.Printf("app stats - %v", string(appStatsJSON))
|
||||
}
|
||||
}()
|
||||
|
||||
if GetMemoryProfilingEnabled() {
|
||||
startMemoryProfiler()
|
||||
}
|
||||
|
||||
for packet := range source.Packets() {
|
||||
packetsCount := statsTracker.incPacketsCount()
|
||||
rlog.Debugf("PACKET #%d", packetsCount)
|
||||
count++
|
||||
Debug("PACKET #%d\n", count)
|
||||
data := packet.Data()
|
||||
statsTracker.updateProcessedBytes(int64(len(data)))
|
||||
bytes += int64(len(data))
|
||||
if *hexdumppkt {
|
||||
rlog.Debugf("Packet content (%d/0x%x) - %s", len(data), len(data), hex.Dump(data))
|
||||
Debug("Packet content (%d/0x%x)\n%s\n", len(data), len(data), hex.Dump(data))
|
||||
}
|
||||
|
||||
// defrag the IPv4 packet if required
|
||||
@@ -432,24 +431,23 @@ func startPassiveTapper(harWriter *HarWriter, outboundLinkWriter *OutboundLinkWr
|
||||
if err != nil {
|
||||
log.Fatalln("Error while de-fragmenting", err)
|
||||
} else if newip4 == nil {
|
||||
rlog.Debugf("Fragment...")
|
||||
Debug("Fragment...\n")
|
||||
continue // packet fragment, we don't have whole packet yet.
|
||||
}
|
||||
if newip4.Length != l {
|
||||
stats.ipdefrag++
|
||||
rlog.Debugf("Decoding re-assembled packet: %s", newip4.NextLayerType())
|
||||
Debug("Decoding re-assembled packet: %s\n", newip4.NextLayerType())
|
||||
pb, ok := packet.(gopacket.PacketBuilder)
|
||||
if !ok {
|
||||
log.Panic("Not a PacketBuilder")
|
||||
panic("Not a PacketBuilder")
|
||||
}
|
||||
nextDecoder := newip4.NextLayerType()
|
||||
_ = nextDecoder.Decode(newip4.Payload, pb)
|
||||
nextDecoder.Decode(newip4.Payload, pb)
|
||||
}
|
||||
}
|
||||
|
||||
tcp := packet.Layer(layers.LayerTypeTCP)
|
||||
if tcp != nil {
|
||||
statsTracker.incTcpPacketsCount()
|
||||
tcp := tcp.(*layers.TCP)
|
||||
if *checksum {
|
||||
err := tcp.SetNetworkLayerForChecksum(packet.NetworkLayer())
|
||||
@@ -461,27 +459,22 @@ func startPassiveTapper(harWriter *HarWriter, outboundLinkWriter *OutboundLinkWr
|
||||
CaptureInfo: packet.Metadata().CaptureInfo,
|
||||
}
|
||||
stats.totalsz += len(tcp.Payload)
|
||||
rlog.Debugf("%s : %v -> %s : %v", packet.NetworkLayer().NetworkFlow().Src(), tcp.SrcPort, packet.NetworkLayer().NetworkFlow().Dst(), tcp.DstPort)
|
||||
//fmt.Println(packet.NetworkLayer().NetworkFlow().Src(), ":", tcp.SrcPort, " -> ", packet.NetworkLayer().NetworkFlow().Dst(), ":", tcp.DstPort)
|
||||
assemblerMutex.Lock()
|
||||
assembler.AssembleWithContext(packet.NetworkLayer().NetworkFlow(), tcp, &c)
|
||||
assemblerMutex.Unlock()
|
||||
}
|
||||
|
||||
done := *maxcount > 0 && statsTracker.appStats.PacketsCount >= *maxcount
|
||||
done := *maxcount > 0 && count >= *maxcount
|
||||
if done {
|
||||
errorsMapMutex.Lock()
|
||||
errorMapLen := len(errorsMap)
|
||||
errorsMapMutex.Unlock()
|
||||
log.Printf("Processed %v packets (%v bytes) in %v (errors: %v, errTypes:%v)",
|
||||
statsTracker.appStats.PacketsCount,
|
||||
statsTracker.appStats.ProcessedBytes,
|
||||
time.Since(statsTracker.appStats.StartTime),
|
||||
nErrors,
|
||||
errorMapLen)
|
||||
fmt.Fprintf(os.Stderr, "Processed %v packets (%v bytes) in %v (errors: %v, errTypes:%v)\n", count, bytes, time.Since(start), nErrors, errorMapLen)
|
||||
}
|
||||
select {
|
||||
case <-signalChan:
|
||||
log.Printf("Caught SIGINT: aborting")
|
||||
fmt.Fprintf(os.Stderr, "\nCaught SIGINT: aborting\n")
|
||||
done = true
|
||||
default:
|
||||
// NOP: continue
|
||||
@@ -494,7 +487,7 @@ func startPassiveTapper(harWriter *HarWriter, outboundLinkWriter *OutboundLinkWr
|
||||
assemblerMutex.Lock()
|
||||
closed := assembler.FlushAll()
|
||||
assemblerMutex.Unlock()
|
||||
rlog.Debugf("Final flush: %d closed", closed)
|
||||
Debug("Final flush: %d closed", closed)
|
||||
if outputLevel >= 2 {
|
||||
streamPool.Dump()
|
||||
}
|
||||
@@ -504,35 +497,34 @@ func startPassiveTapper(harWriter *HarWriter, outboundLinkWriter *OutboundLinkWr
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
_ = pprof.WriteHeapProfile(f)
|
||||
_ = f.Close()
|
||||
pprof.WriteHeapProfile(f)
|
||||
f.Close()
|
||||
}
|
||||
|
||||
streamFactory.WaitGoRoutines()
|
||||
assemblerMutex.Lock()
|
||||
rlog.Debugf("%s", assembler.Dump())
|
||||
Debug("%s\n", assembler.Dump())
|
||||
assemblerMutex.Unlock()
|
||||
if !*nodefrag {
|
||||
log.Printf("IPdefrag:\t\t%d", stats.ipdefrag)
|
||||
fmt.Printf("IPdefrag:\t\t%d\n", stats.ipdefrag)
|
||||
}
|
||||
log.Printf("TCP stats:")
|
||||
log.Printf(" missed bytes:\t\t%d", stats.missedBytes)
|
||||
log.Printf(" total packets:\t\t%d", stats.pkt)
|
||||
log.Printf(" rejected FSM:\t\t%d", stats.rejectFsm)
|
||||
log.Printf(" rejected Options:\t%d", stats.rejectOpt)
|
||||
log.Printf(" reassembled bytes:\t%d", stats.sz)
|
||||
log.Printf(" total TCP bytes:\t%d", stats.totalsz)
|
||||
log.Printf(" conn rejected FSM:\t%d", stats.rejectConnFsm)
|
||||
log.Printf(" reassembled chunks:\t%d", stats.reassembled)
|
||||
log.Printf(" out-of-order packets:\t%d", stats.outOfOrderPackets)
|
||||
log.Printf(" out-of-order bytes:\t%d", stats.outOfOrderBytes)
|
||||
log.Printf(" biggest-chunk packets:\t%d", stats.biggestChunkPackets)
|
||||
log.Printf(" biggest-chunk bytes:\t%d", stats.biggestChunkBytes)
|
||||
log.Printf(" overlap packets:\t%d", stats.overlapPackets)
|
||||
log.Printf(" overlap bytes:\t\t%d", stats.overlapBytes)
|
||||
log.Printf("Errors: %d", nErrors)
|
||||
fmt.Printf("TCP stats:\n")
|
||||
fmt.Printf(" missed bytes:\t\t%d\n", stats.missedBytes)
|
||||
fmt.Printf(" total packets:\t\t%d\n", stats.pkt)
|
||||
fmt.Printf(" rejected FSM:\t\t%d\n", stats.rejectFsm)
|
||||
fmt.Printf(" rejected Options:\t%d\n", stats.rejectOpt)
|
||||
fmt.Printf(" reassembled bytes:\t%d\n", stats.sz)
|
||||
fmt.Printf(" total TCP bytes:\t%d\n", stats.totalsz)
|
||||
fmt.Printf(" conn rejected FSM:\t%d\n", stats.rejectConnFsm)
|
||||
fmt.Printf(" reassembled chunks:\t%d\n", stats.reassembled)
|
||||
fmt.Printf(" out-of-order packets:\t%d\n", stats.outOfOrderPackets)
|
||||
fmt.Printf(" out-of-order bytes:\t%d\n", stats.outOfOrderBytes)
|
||||
fmt.Printf(" biggest-chunk packets:\t%d\n", stats.biggestChunkPackets)
|
||||
fmt.Printf(" biggest-chunk bytes:\t%d\n", stats.biggestChunkBytes)
|
||||
fmt.Printf(" overlap packets:\t%d\n", stats.overlapPackets)
|
||||
fmt.Printf(" overlap bytes:\t\t%d\n", stats.overlapBytes)
|
||||
fmt.Printf("Errors: %d\n", nErrors)
|
||||
for e := range errorsMap {
|
||||
log.Printf(" %s:\t\t%d", e, errorsMap[e])
|
||||
fmt.Printf(" %s:\t\t%d\n", e, errorsMap[e])
|
||||
}
|
||||
log.Printf("AppStats: %v", GetStats())
|
||||
}
|
||||
35
api/pkg/tap/stats_tracker.go
Normal file
35
api/pkg/tap/stats_tracker.go
Normal file
@@ -0,0 +1,35 @@
|
||||
package tap
|
||||
|
||||
import (
|
||||
"sync"
|
||||
)
|
||||
|
||||
type AppStats struct {
|
||||
matchedMessages int
|
||||
}
|
||||
|
||||
type StatsTracker struct {
|
||||
stats AppStats
|
||||
statsMutex sync.Mutex
|
||||
}
|
||||
|
||||
func (st *StatsTracker) incMatchedMessages() {
|
||||
st.statsMutex.Lock()
|
||||
st.stats.matchedMessages++
|
||||
st.statsMutex.Unlock()
|
||||
}
|
||||
|
||||
func (st *StatsTracker) dumpStats() AppStats {
|
||||
st.statsMutex.Lock()
|
||||
|
||||
stats := AppStats{
|
||||
matchedMessages: st.stats.matchedMessages,
|
||||
}
|
||||
|
||||
st.stats.matchedMessages = 0
|
||||
|
||||
st.statsMutex.Unlock()
|
||||
|
||||
return stats
|
||||
}
|
||||
|
||||
239
api/pkg/tap/tap_output.go
Normal file
239
api/pkg/tap/tap_output.go
Normal file
@@ -0,0 +1,239 @@
|
||||
package tap
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"log"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/gorilla/websocket"
|
||||
"github.com/patrickmn/go-cache"
|
||||
)
|
||||
|
||||
|
||||
const (
|
||||
// Time allowed to write a message to the peer.
|
||||
writeWait = 10 * time.Second
|
||||
|
||||
// Time allowed to read the next pong message from the peer.
|
||||
pongWait = 60 * time.Second
|
||||
|
||||
// Send pings to peer with this period. Must be less than pongWait.
|
||||
pingPeriod = (pongWait * 9) / 10
|
||||
|
||||
// Maximum message size allowed from peer.
|
||||
maxMessageSize = 512
|
||||
)
|
||||
|
||||
var (
|
||||
newline = []byte{'\n'}
|
||||
space = []byte{' '}
|
||||
hub *Hub
|
||||
outboundSocketNotifyExpiringCache = cache.New(outboundThrottleCacheExpiryPeriod, outboundThrottleCacheExpiryPeriod)
|
||||
)
|
||||
|
||||
var upgrader = websocket.Upgrader{
|
||||
ReadBufferSize: 1024,
|
||||
WriteBufferSize: 1024,
|
||||
CheckOrigin: func (_ *http.Request) bool { return true },
|
||||
}
|
||||
|
||||
// Client is a middleman between the websocket connection and the hub.
|
||||
type Client struct {
|
||||
hub *Hub
|
||||
|
||||
// The websocket connection.
|
||||
conn *websocket.Conn
|
||||
|
||||
// Buffered channel of outbound messages.
|
||||
send chan []byte
|
||||
}
|
||||
|
||||
type OutBoundLinkMessage struct {
|
||||
SourceIP string `json:"sourceIP"`
|
||||
IP string `json:"ip"`
|
||||
Port int `json:"port"`
|
||||
Type string `json:"type"`
|
||||
}
|
||||
|
||||
|
||||
// readPump pumps messages from the websocket connection to the hub.
|
||||
//
|
||||
// The application runs readPump in a per-connection goroutine. The application
|
||||
// ensures that there is at most one reader on a connection by executing all
|
||||
// reads from this goroutine.
|
||||
func (c *Client) readPump() {
|
||||
defer func() {
|
||||
c.hub.unregister <- c
|
||||
c.conn.Close()
|
||||
}()
|
||||
c.conn.SetReadLimit(maxMessageSize)
|
||||
c.conn.SetReadDeadline(time.Now().Add(pongWait))
|
||||
c.conn.SetPongHandler(func(string) error { c.conn.SetReadDeadline(time.Now().Add(pongWait)); return nil })
|
||||
for {
|
||||
_, message, err := c.conn.ReadMessage()
|
||||
if err != nil {
|
||||
if websocket.IsUnexpectedCloseError(err, websocket.CloseGoingAway, websocket.CloseAbnormalClosure) {
|
||||
log.Printf("error: %v", err)
|
||||
}
|
||||
break
|
||||
}
|
||||
message = bytes.TrimSpace(bytes.Replace(message, newline, space, -1))
|
||||
c.hub.onMessageCallback(message)
|
||||
}
|
||||
}
|
||||
|
||||
// writePump pumps messages from the hub to the websocket connection.
|
||||
//
|
||||
// A goroutine running writePump is started for each connection. The
|
||||
// application ensures that there is at most one writer to a connection by
|
||||
// executing all writes from this goroutine.
|
||||
func (c *Client) writePump() {
|
||||
ticker := time.NewTicker(pingPeriod)
|
||||
defer func() {
|
||||
ticker.Stop()
|
||||
c.conn.Close()
|
||||
}()
|
||||
for {
|
||||
select {
|
||||
case message, ok := <-c.send:
|
||||
c.conn.SetWriteDeadline(time.Now().Add(writeWait))
|
||||
if !ok {
|
||||
// The hub closed the channel.
|
||||
c.conn.WriteMessage(websocket.CloseMessage, []byte{})
|
||||
return
|
||||
}
|
||||
|
||||
w, err := c.conn.NextWriter(websocket.TextMessage)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
w.Write(message)
|
||||
|
||||
|
||||
if err := w.Close(); err != nil {
|
||||
return
|
||||
}
|
||||
case <-ticker.C:
|
||||
c.conn.SetWriteDeadline(time.Now().Add(writeWait))
|
||||
if err := c.conn.WriteMessage(websocket.PingMessage, nil); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type Hub struct {
|
||||
// Registered clients.
|
||||
clients map[*Client]bool
|
||||
|
||||
// Inbound messages from the clients.
|
||||
broadcast chan []byte
|
||||
|
||||
// Register requests from the clients.
|
||||
register chan *Client
|
||||
|
||||
// Unregister requests from clients.
|
||||
unregister chan *Client
|
||||
|
||||
// Handle messages from client
|
||||
onMessageCallback func([]byte)
|
||||
|
||||
|
||||
}
|
||||
|
||||
func newHub(onMessageCallback func([]byte)) *Hub {
|
||||
return &Hub{
|
||||
broadcast: make(chan []byte),
|
||||
register: make(chan *Client),
|
||||
unregister: make(chan *Client),
|
||||
clients: make(map[*Client]bool),
|
||||
onMessageCallback: onMessageCallback,
|
||||
}
|
||||
}
|
||||
|
||||
func (h *Hub) run() {
|
||||
for {
|
||||
select {
|
||||
case client := <-h.register:
|
||||
h.clients[client] = true
|
||||
case client := <-h.unregister:
|
||||
if _, ok := h.clients[client]; ok {
|
||||
delete(h.clients, client)
|
||||
close(client.send)
|
||||
}
|
||||
case message := <-h.broadcast:
|
||||
// matched messages counter is incremented in this thread instead of in multiple http reader
|
||||
// threads in order to reduce contention.
|
||||
statsTracker.incMatchedMessages()
|
||||
|
||||
for client := range h.clients {
|
||||
select {
|
||||
case client.send <- message:
|
||||
default:
|
||||
close(client.send)
|
||||
delete(h.clients, client)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// serveWs handles websocket requests from the peer.
|
||||
func serveWs(hub *Hub, w http.ResponseWriter, r *http.Request) {
|
||||
conn, err := upgrader.Upgrade(w, r, nil)
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
return
|
||||
}
|
||||
client := &Client{hub: hub, conn: conn, send: make(chan []byte, 256)}
|
||||
client.hub.register <- client
|
||||
|
||||
// Allow collection of memory referenced by the caller by doing all work in
|
||||
// new goroutines.
|
||||
go client.writePump()
|
||||
go client.readPump()
|
||||
}
|
||||
|
||||
func startOutputServer(port string, messageCallback func([]byte)) {
|
||||
hub = newHub(messageCallback)
|
||||
go hub.run()
|
||||
http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
|
||||
serveWs(hub, w, r)
|
||||
})
|
||||
err := http.ListenAndServe("0.0.0.0:" + port, nil)
|
||||
if err != nil {
|
||||
log.Fatal("Output server error: ", err)
|
||||
}
|
||||
}
|
||||
|
||||
func broadcastReqResPair(reqResJson []byte) {
|
||||
hub.broadcast <- reqResJson
|
||||
}
|
||||
|
||||
func broadcastOutboundLink(srcIP string, dstIP string, dstPort int) {
|
||||
cacheKey := fmt.Sprintf("%s -> %s:%d", srcIP, dstIP, dstPort)
|
||||
_, isInCache := outboundSocketNotifyExpiringCache.Get(cacheKey)
|
||||
if isInCache {
|
||||
return
|
||||
} else {
|
||||
outboundSocketNotifyExpiringCache.SetDefault(cacheKey, true)
|
||||
}
|
||||
|
||||
socketMessage := OutBoundLinkMessage{
|
||||
SourceIP: srcIP,
|
||||
IP: dstIP,
|
||||
Port: dstPort,
|
||||
Type: "outboundSocketDetected",
|
||||
}
|
||||
|
||||
jsonStr, err := json.Marshal(socketMessage)
|
||||
if err != nil {
|
||||
log.Printf("error marshalling outbound socket detection object: %v", err)
|
||||
} else {
|
||||
hub.broadcast <- jsonStr
|
||||
}
|
||||
}
|
||||
@@ -34,7 +34,7 @@ type tcpStream struct {
|
||||
func (t *tcpStream) Accept(tcp *layers.TCP, ci gopacket.CaptureInfo, dir reassembly.TCPFlowDirection, nextSeq reassembly.Sequence, start *bool, ac reassembly.AssemblerContext) bool {
|
||||
// FSM
|
||||
if !t.tcpstate.CheckState(tcp, dir) {
|
||||
SilentError("FSM-rejection", "%s: Packet rejected by FSM (state:%s)", t.ident, t.tcpstate.String())
|
||||
//SilentError("FSM", "%s: Packet rejected by FSM (state:%s)\n", t.ident, t.tcpstate.String())
|
||||
stats.rejectFsm++
|
||||
if !t.fsmerr {
|
||||
t.fsmerr = true
|
||||
@@ -47,7 +47,7 @@ func (t *tcpStream) Accept(tcp *layers.TCP, ci gopacket.CaptureInfo, dir reassem
|
||||
// Options
|
||||
err := t.optchecker.Accept(tcp, ci, dir, nextSeq, start)
|
||||
if err != nil {
|
||||
SilentError("OptionChecker-rejection", "%s: Packet rejected by OptionChecker: %s", t.ident, err)
|
||||
//SilentError("OptionChecker", "%s: Packet rejected by OptionChecker: %s\n", t.ident, err)
|
||||
stats.rejectOpt++
|
||||
if !*nooptcheck {
|
||||
return false
|
||||
@@ -58,10 +58,10 @@ func (t *tcpStream) Accept(tcp *layers.TCP, ci gopacket.CaptureInfo, dir reassem
|
||||
if *checksum {
|
||||
c, err := tcp.ComputeChecksum()
|
||||
if err != nil {
|
||||
SilentError("ChecksumCompute", "%s: Got error computing checksum: %s", t.ident, err)
|
||||
SilentError("ChecksumCompute", "%s: Got error computing checksum: %s\n", t.ident, err)
|
||||
accept = false
|
||||
} else if c != 0x0 {
|
||||
SilentError("Checksum", "%s: Invalid checksum: 0x%x", t.ident, c)
|
||||
SilentError("Checksum", "%s: Invalid checksum: 0x%x\n", t.ident, c)
|
||||
accept = false
|
||||
}
|
||||
}
|
||||
@@ -95,7 +95,7 @@ func (t *tcpStream) ReassembledSG(sg reassembly.ScatterGather, ac reassembly.Ass
|
||||
if sgStats.OverlapBytes != 0 && sgStats.OverlapPackets == 0 {
|
||||
// In the original example this was handled with panic().
|
||||
// I don't know what this error means or how to handle it properly.
|
||||
SilentError("Invalid-Overlap", "bytes:%d, pkts:%d", sgStats.OverlapBytes, sgStats.OverlapPackets)
|
||||
SilentError("Invalid-Overlap", "bytes:%d, pkts:%d\n", sgStats.OverlapBytes, sgStats.OverlapPackets)
|
||||
}
|
||||
stats.overlapBytes += sgStats.OverlapBytes
|
||||
stats.overlapPackets += sgStats.OverlapPackets
|
||||
@@ -106,7 +106,7 @@ func (t *tcpStream) ReassembledSG(sg reassembly.ScatterGather, ac reassembly.Ass
|
||||
} else {
|
||||
ident = fmt.Sprintf("%v %v(%s): ", t.net.Reverse(), t.transport.Reverse(), dir)
|
||||
}
|
||||
Trace("%s: SG reassembled packet with %d bytes (start:%v,end:%v,skip:%d,saved:%d,nb:%d,%d,overlap:%d,%d)", ident, length, start, end, skip, saved, sgStats.Packets, sgStats.Chunks, sgStats.OverlapBytes, sgStats.OverlapPackets)
|
||||
Debug("%s: SG reassembled packet with %d bytes (start:%v,end:%v,skip:%d,saved:%d,nb:%d,%d,overlap:%d,%d)\n", ident, length, start, end, skip, saved, sgStats.Packets, sgStats.Chunks, sgStats.OverlapBytes, sgStats.OverlapPackets)
|
||||
if skip == -1 && *allowmissinginit {
|
||||
// this is allowed
|
||||
} else if skip != 0 {
|
||||
@@ -125,18 +125,18 @@ func (t *tcpStream) ReassembledSG(sg reassembly.ScatterGather, ac reassembly.Ass
|
||||
}
|
||||
dnsSize := binary.BigEndian.Uint16(data[:2])
|
||||
missing := int(dnsSize) - len(data[2:])
|
||||
Trace("dnsSize: %d, missing: %d", dnsSize, missing)
|
||||
Debug("dnsSize: %d, missing: %d\n", dnsSize, missing)
|
||||
if missing > 0 {
|
||||
Debug("Missing some bytes: %d", missing)
|
||||
Info("Missing some bytes: %d\n", missing)
|
||||
sg.KeepFrom(0)
|
||||
return
|
||||
}
|
||||
p := gopacket.NewDecodingLayerParser(layers.LayerTypeDNS, dns)
|
||||
err := p.DecodeLayers(data[2:], &decoded)
|
||||
if err != nil {
|
||||
SilentError("DNS-parser", "Failed to decode DNS: %v", err)
|
||||
SilentError("DNS-parser", "Failed to decode DNS: %v\n", err)
|
||||
} else {
|
||||
Trace("DNS: %s", gopacket.LayerDump(dns))
|
||||
Debug("DNS: %s\n", gopacket.LayerDump(dns))
|
||||
}
|
||||
if len(data) > 2+int(dnsSize) {
|
||||
sg.KeepFrom(2 + int(dnsSize))
|
||||
@@ -144,11 +144,10 @@ func (t *tcpStream) ReassembledSG(sg reassembly.ScatterGather, ac reassembly.Ass
|
||||
} else if t.isHTTP {
|
||||
if length > 0 {
|
||||
if *hexdump {
|
||||
Trace("Feeding http with:%s", hex.Dump(data))
|
||||
Debug("Feeding http with:\n%s", hex.Dump(data))
|
||||
}
|
||||
// This is where we pass the reassembled information onwards
|
||||
// This channel is read by an httpReader object
|
||||
statsTracker.incReassembledTcpPayloadsCount()
|
||||
if dir == reassembly.TCPDirClientToServer && !t.reversed {
|
||||
t.client.msgQueue <- httpReaderDataMsg{data, ac.GetCaptureInfo().Timestamp}
|
||||
} else {
|
||||
@@ -159,7 +158,7 @@ func (t *tcpStream) ReassembledSG(sg reassembly.ScatterGather, ac reassembly.Ass
|
||||
}
|
||||
|
||||
func (t *tcpStream) ReassemblyComplete(ac reassembly.AssemblerContext) bool {
|
||||
Trace("%s: Connection closed", t.ident)
|
||||
Debug("%s: Connection closed\n", t.ident)
|
||||
if t.isHTTP {
|
||||
close(t.client.msgQueue)
|
||||
close(t.server.msgQueue)
|
||||
117
api/pkg/tap/tcp_stream_factory.go
Normal file
117
api/pkg/tap/tcp_stream_factory.go
Normal file
@@ -0,0 +1,117 @@
|
||||
package tap
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"github.com/google/gopacket"
|
||||
"github.com/google/gopacket/layers" // pulls in all layers decoders
|
||||
"github.com/google/gopacket/reassembly"
|
||||
)
|
||||
|
||||
/*
|
||||
* The TCP factory: returns a new Stream
|
||||
* Implements gopacket.reassembly.StreamFactory interface (New)
|
||||
* Generates a new tcp stream for each new tcp connection. Closes the stream when the connection closes.
|
||||
*/
|
||||
type tcpStreamFactory struct {
|
||||
wg sync.WaitGroup
|
||||
doHTTP bool
|
||||
harWriter *HarWriter
|
||||
}
|
||||
|
||||
func (factory *tcpStreamFactory) New(net, transport gopacket.Flow, tcp *layers.TCP, ac reassembly.AssemblerContext) reassembly.Stream {
|
||||
Debug("* NEW: %s %s\n", net, transport)
|
||||
fsmOptions := reassembly.TCPSimpleFSMOptions{
|
||||
SupportMissingEstablishment: *allowmissinginit,
|
||||
}
|
||||
Debug("Current App Ports: %v\n", appPorts)
|
||||
dstIp := net.Dst().String()
|
||||
dstPort := int(tcp.DstPort)
|
||||
|
||||
if factory.shouldNotifyOnOutboundLink(dstIp, dstPort) {
|
||||
broadcastOutboundLink(net.Src().String(), dstIp, dstPort)
|
||||
}
|
||||
isHTTP := factory.shouldTap(dstIp, dstPort)
|
||||
stream := &tcpStream{
|
||||
net: net,
|
||||
transport: transport,
|
||||
isDNS: tcp.SrcPort == 53 || tcp.DstPort == 53,
|
||||
isHTTP: isHTTP && factory.doHTTP,
|
||||
reversed: tcp.SrcPort == 80,
|
||||
tcpstate: reassembly.NewTCPSimpleFSM(fsmOptions),
|
||||
ident: fmt.Sprintf("%s:%s", net, transport),
|
||||
optchecker: reassembly.NewTCPOptionCheck(),
|
||||
}
|
||||
if stream.isHTTP {
|
||||
stream.client = httpReader{
|
||||
msgQueue: make(chan httpReaderDataMsg),
|
||||
ident: fmt.Sprintf("%s %s", net, transport),
|
||||
tcpID: tcpID{
|
||||
srcIP: net.Src().String(),
|
||||
dstIP: net.Dst().String(),
|
||||
srcPort: transport.Src().String(),
|
||||
dstPort: transport.Dst().String(),
|
||||
},
|
||||
hexdump: *hexdump,
|
||||
parent: stream,
|
||||
isClient: true,
|
||||
harWriter: factory.harWriter,
|
||||
}
|
||||
stream.server = httpReader{
|
||||
msgQueue: make(chan httpReaderDataMsg),
|
||||
ident: fmt.Sprintf("%s %s", net.Reverse(), transport.Reverse()),
|
||||
tcpID: tcpID{
|
||||
srcIP: net.Dst().String(),
|
||||
dstIP: net.Src().String(),
|
||||
srcPort: transport.Dst().String(),
|
||||
dstPort: transport.Src().String(),
|
||||
},
|
||||
hexdump: *hexdump,
|
||||
parent: stream,
|
||||
harWriter: factory.harWriter,
|
||||
}
|
||||
factory.wg.Add(2)
|
||||
// Start reading from channels stream.client.bytes and stream.server.bytes
|
||||
go stream.client.run(&factory.wg)
|
||||
go stream.server.run(&factory.wg)
|
||||
}
|
||||
return stream
|
||||
}
|
||||
|
||||
func (factory *tcpStreamFactory) WaitGoRoutines() {
|
||||
factory.wg.Wait()
|
||||
}
|
||||
|
||||
func (factory *tcpStreamFactory) shouldTap(dstIP string, dstPort int) bool {
|
||||
if hostMode {
|
||||
if inArrayString(HostAppAddresses, fmt.Sprintf("%s:%d", dstIP, dstPort)) == true {
|
||||
return true
|
||||
} else if inArrayString(HostAppAddresses, dstIP) == true {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
} else {
|
||||
isTappedPort := dstPort == 80 || (appPorts != nil && (inArrayInt(appPorts, dstPort)))
|
||||
if !isTappedPort {
|
||||
return false
|
||||
}
|
||||
|
||||
if !*anydirection {
|
||||
isDirectedHere := inArrayString(ownIps, dstIP)
|
||||
if !isDirectedHere {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
func (factory *tcpStreamFactory) shouldNotifyOnOutboundLink(dstIP string, dstPort int) bool {
|
||||
if inArrayInt(remoteOnlyOutboundPorts, dstPort) {
|
||||
isDirectedHere := inArrayString(ownIps, dstIP)
|
||||
return !isDirectedHere && !isPrivateIP(dstIP)
|
||||
}
|
||||
return true
|
||||
}
|
||||
@@ -1,42 +1,34 @@
|
||||
package utils
|
||||
|
||||
import (
|
||||
"context"
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/romana/rlog"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"github.com/gofiber/fiber/v2"
|
||||
"log"
|
||||
"net/http"
|
||||
"mizuserver/pkg/models"
|
||||
"net/url"
|
||||
"os"
|
||||
"os/signal"
|
||||
"reflect"
|
||||
"syscall"
|
||||
"time"
|
||||
)
|
||||
|
||||
// StartServer starts the server with a graceful shutdown
|
||||
func StartServer(app *gin.Engine) {
|
||||
func StartServer(app *fiber.App) {
|
||||
signals := make(chan os.Signal, 2)
|
||||
signal.Notify(signals,
|
||||
os.Interrupt, // this catch ctrl + c
|
||||
syscall.SIGTSTP, // this catch ctrl + z
|
||||
)
|
||||
|
||||
srv := &http.Server{
|
||||
Addr: ":8080",
|
||||
Handler: app,
|
||||
}
|
||||
|
||||
go func() {
|
||||
_ = <-signals
|
||||
rlog.Infof("Shutting down...")
|
||||
ctx, _ := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
_ = srv.Shutdown(ctx)
|
||||
os.Exit(0)
|
||||
fmt.Println("Shutting down...")
|
||||
_ = app.Shutdown()
|
||||
}()
|
||||
|
||||
// Run server.
|
||||
if err := app.Run(":8899"); err != nil {
|
||||
if err := app.Listen(":8899"); err != nil {
|
||||
log.Printf("Oops... Server is not running! Reason: %v", err)
|
||||
}
|
||||
}
|
||||
@@ -69,3 +61,27 @@ func SetHostname(address, newHostname string) string {
|
||||
return replacedUrl.String()
|
||||
|
||||
}
|
||||
|
||||
func GetResolvedBaseEntry(entry models.MizuEntry) models.BaseEntryDetails {
|
||||
entryUrl := entry.Url
|
||||
service := entry.Service
|
||||
if entry.ResolvedDestination != "" {
|
||||
entryUrl = SetHostname(entryUrl, entry.ResolvedDestination)
|
||||
service = SetHostname(service, entry.ResolvedDestination)
|
||||
}
|
||||
return models.BaseEntryDetails{
|
||||
Id: entry.EntryId,
|
||||
Url: entryUrl,
|
||||
Service: service,
|
||||
Path: entry.Path,
|
||||
StatusCode: entry.Status,
|
||||
Method: entry.Method,
|
||||
Timestamp: entry.Timestamp,
|
||||
RequestSenderIp: entry.RequestSenderIp,
|
||||
}
|
||||
}
|
||||
|
||||
func GetBytesFromStruct(v interface{}) []byte{
|
||||
a, _ := json.Marshal(v)
|
||||
return a
|
||||
}
|
||||
2
api/start.sh
Executable file
2
api/start.sh
Executable file
@@ -0,0 +1,2 @@
|
||||
#!/bin/bash
|
||||
./mizuagent -i any -hardump -targets ${TAPPED_ADDRESSES}
|
||||
Binary file not shown.
|
Before Width: | Height: | Size: 811 KiB |
File diff suppressed because one or more lines are too long
|
Before Width: | Height: | Size: 44 KiB |
Binary file not shown.
|
Before Width: | Height: | Size: 491 KiB |
Binary file not shown.
|
Before Width: | Height: | Size: 55 KiB |
Binary file not shown.
|
Before Width: | Height: | Size: 43 KiB |
@@ -1,15 +0,0 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
GCP_PROJECT=up9-docker-hub
|
||||
REPOSITORY=gcr.io/$GCP_PROJECT
|
||||
SERVER_NAME=mizu
|
||||
GIT_BRANCH=ci
|
||||
|
||||
DOCKER_REPO=$REPOSITORY/$SERVER_NAME/$GIT_BRANCH
|
||||
SEM_VER=${SEM_VER=0.0.0}
|
||||
|
||||
DOCKER_TAGGED_BUILD="$DOCKER_REPO:$SEM_VER"
|
||||
|
||||
echo "building $DOCKER_TAGGED_BUILD"
|
||||
docker build -t ${DOCKER_TAGGED_BUILD} --build-arg SEM_VER=${SEM_VER} --build-arg BUILD_TIMESTAMP=${BUILD_TIMESTAMP} --build-arg GIT_BRANCH=${GIT_BRANCH} --build-arg COMMIT_HASH=${COMMIT_HASH} .
|
||||
@@ -1,15 +1,11 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
SERVER_NAME=mizu
|
||||
GCP_PROJECT=up9-docker-hub
|
||||
REPOSITORY=gcr.io/$GCP_PROJECT
|
||||
SERVER_NAME=mizu
|
||||
GIT_BRANCH=$(git branch | grep \* | cut -d ' ' -f2 | tr '[:upper:]' '[:lower:]')
|
||||
|
||||
DOCKER_REPO=$REPOSITORY/$SERVER_NAME/$GIT_BRANCH
|
||||
SEM_VER=${SEM_VER=0.0.0}
|
||||
|
||||
DOCKER_TAGGED_BUILDS=("$DOCKER_REPO:latest" "$DOCKER_REPO:$SEM_VER")
|
||||
DOCKER_TAGGED_BUILD=$REPOSITORY/$SERVER_NAME/$GIT_BRANCH:latest
|
||||
|
||||
if [ "$GIT_BRANCH" = 'develop' -o "$GIT_BRANCH" = 'master' -o "$GIT_BRANCH" = 'main' ]
|
||||
then
|
||||
@@ -17,12 +13,8 @@ then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "building ${DOCKER_TAGGED_BUILDS[@]}"
|
||||
DOCKER_TAGS_ARGS=$(echo ${DOCKER_TAGGED_BUILDS[@]/#/-t }) # "-t FIRST_TAG -t SECOND_TAG ..."
|
||||
docker build $DOCKER_TAGS_ARGS --build-arg SEM_VER=${SEM_VER} --build-arg BUILD_TIMESTAMP=${BUILD_TIMESTAMP} --build-arg GIT_BRANCH=${GIT_BRANCH} --build-arg COMMIT_HASH=${COMMIT_HASH} .
|
||||
echo "building $DOCKER_TAGGED_BUILD"
|
||||
docker build -t "$DOCKER_TAGGED_BUILD" .
|
||||
|
||||
for DOCKER_TAG in "${DOCKER_TAGGED_BUILDS[@]}"
|
||||
do
|
||||
echo pushing "$DOCKER_TAG"
|
||||
docker push "$DOCKER_TAG"
|
||||
done
|
||||
echo pushing to "$REPOSITORY"
|
||||
docker push "$DOCKER_TAGGED_BUILD"
|
||||
|
||||
1
cli/.gitignore
vendored
1
cli/.gitignore
vendored
@@ -1 +0,0 @@
|
||||
bin
|
||||
14
cli/Makefile
14
cli/Makefile
@@ -3,7 +3,6 @@ COMMIT_HASH=$(shell git rev-parse HEAD)
|
||||
GIT_BRANCH=$(shell git branch --show-current | tr '[:upper:]' '[:lower:]')
|
||||
GIT_VERSION=$(shell git branch --show-current | tr '[:upper:]' '[:lower:]')
|
||||
BUILD_TIMESTAMP=$(shell date +%s)
|
||||
export SEM_VER?=0.0.0
|
||||
|
||||
.PHONY: help
|
||||
.DEFAULT_GOAL := help
|
||||
@@ -14,7 +13,7 @@ help: ## This help.
|
||||
install:
|
||||
go install mizu.go
|
||||
|
||||
build: ## Build mizu CLI binary (select platform via GOOS / GOARCH env variables).
|
||||
build: ## build mizu CLI binary (select platform via GOOS / GOARCH env variables)
|
||||
go build -ldflags="-X 'github.com/up9inc/mizu/cli/mizu.GitCommitHash=$(COMMIT_HASH)' \
|
||||
-X 'github.com/up9inc/mizu/cli/mizu.Branch=$(GIT_BRANCH)' \
|
||||
-X 'github.com/up9inc/mizu/cli/mizu.BuildTimestamp=$(BUILD_TIMESTAMP)' \
|
||||
@@ -22,23 +21,20 @@ build: ## Build mizu CLI binary (select platform via GOOS / GOARCH env variables
|
||||
-o bin/mizu_$(SUFFIX) mizu.go
|
||||
(cd bin && shasum -a 256 mizu_${SUFFIX} > mizu_${SUFFIX}.sha256)
|
||||
|
||||
build-all: ## Build for all supported platforms.
|
||||
build-all: ## build for all supported platforms
|
||||
@echo "Compiling for every OS and Platform"
|
||||
@mkdir -p bin && sed s/_SEM_VER_/$(SEM_VER)/g README.md.TEMPLATE > bin/README.md
|
||||
@mkdir -p bin && echo "SHA256 checksums available for compiled binaries \n\nRun \`shasum -a 256 -c mizu_OS_ARCH.sha256\` to verify\n\n" > bin/README.md
|
||||
@$(MAKE) build GOOS=darwin GOARCH=amd64
|
||||
@$(MAKE) build GOOS=linux GOARCH=amd64
|
||||
@# $(MAKE) build GOOS=darwin GOARCH=arm64
|
||||
@# $(MAKE) GOOS=windows GOARCH=amd64
|
||||
@# $(MAKE) GOOS=linux GOARCH=386
|
||||
@# $(MAKE) GOOS=windows GOARCH=386
|
||||
@# $(MAKE) GOOS=darwin GOARCH=arm64
|
||||
@# $(MAKE) GOOS=linux GOARCH=arm64
|
||||
@# $(MAKE) GOOS=windows GOARCH=arm64
|
||||
@echo "---------"
|
||||
@find ./bin -ls
|
||||
|
||||
clean: ## Clean all build artifacts.
|
||||
clean: ## clean all build artifacts
|
||||
go clean
|
||||
rm -rf ./bin/*
|
||||
|
||||
test: ## Run cli tests.
|
||||
@go test ./... -coverpkg=./... -race -coverprofile=coverage.out -covermode=atomic
|
||||
|
||||
26
cli/README.md
Normal file
26
cli/README.md
Normal file
@@ -0,0 +1,26 @@
|
||||
# mizu CLI
|
||||
## Usage
|
||||
`./mizu {pod_name_regex}`
|
||||
|
||||
### Optional Flags
|
||||
|
||||
| flag | default | purpose |
|
||||
|----------------------|------------------|--------------------------------------------------------------------------------------------------------------|
|
||||
| `--no-gui` | `false` | Don't host the web interface (not applicable at the moment) |
|
||||
| `--gui-port` | `8899` | local port that web interface will be forwarded to |
|
||||
| `--namespace` | | use namespace different than the one found in kubeconfig |
|
||||
| `--kubeconfig` | | Path to custom kubeconfig file |
|
||||
|
||||
There are some extra flags defined in code that will show up in `./mizu --help`, these are non functional stubs for now
|
||||
|
||||
## Installation
|
||||
Make sure your go version is at least 1.11
|
||||
1. cd to `mizu/cli`
|
||||
2. Run `go mod download` (may take a moment)
|
||||
3. Run `go build mizu.go`
|
||||
|
||||
Alternatively, you can build+run directly using `go run mizu.go {pod_name_regex}`
|
||||
|
||||
|
||||
## Known issues
|
||||
* mid-flight port forwarding failures are not detected and no indication will be shown when this occurs
|
||||
@@ -1,20 +0,0 @@
|
||||
# Mizu release _SEM_VER_
|
||||
|
||||
Download Mizu for your platform
|
||||
|
||||
**Mac** (on Intel chip)
|
||||
```
|
||||
curl -Lo mizu https://github.com/up9inc/mizu/releases/download/_SEM_VER_/mizu_darwin_amd64 && chmod 755 mizu
|
||||
```
|
||||
|
||||
**Linux**
|
||||
```
|
||||
curl -Lo mizu https://github.com/up9inc/mizu/releases/download/_SEM_VER_/mizu_linux_amd64 && chmod 755 mizu
|
||||
```
|
||||
|
||||
|
||||
### Checksums
|
||||
SHA256 checksums available for compiled binaries.
|
||||
Run `shasum -a 256 -c mizu_OS_ARCH.sha256` to verify.
|
||||
|
||||
|
||||
@@ -1,168 +0,0 @@
|
||||
package apiserver
|
||||
|
||||
import (
|
||||
"archive/zip"
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"github.com/up9inc/mizu/cli/logger"
|
||||
"github.com/up9inc/mizu/cli/uiUtils"
|
||||
"github.com/up9inc/mizu/shared"
|
||||
"io/ioutil"
|
||||
core "k8s.io/api/core/v1"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"time"
|
||||
)
|
||||
|
||||
type apiServerProvider struct {
|
||||
url string
|
||||
isReady bool
|
||||
}
|
||||
|
||||
var Provider = apiServerProvider{}
|
||||
|
||||
func (provider *apiServerProvider) InitAndTestConnection(url string, retries int) error {
|
||||
healthUrl := fmt.Sprintf("%s/", url)
|
||||
retriesLeft := retries
|
||||
for retriesLeft > 0 {
|
||||
if response, err := http.Get(healthUrl); err != nil {
|
||||
logger.Log.Debugf("[ERROR] failed connecting to api server %v", err)
|
||||
} else if response.StatusCode != 200 {
|
||||
logger.Log.Debugf("can't connect to api server yet, response status code %v", response.StatusCode)
|
||||
} else {
|
||||
logger.Log.Debugf("connection test to api server passed successfully")
|
||||
break
|
||||
}
|
||||
retriesLeft -= 1
|
||||
time.Sleep(time.Second)
|
||||
}
|
||||
|
||||
if retriesLeft == 0 {
|
||||
provider.isReady = false
|
||||
return fmt.Errorf("couldn't reach the api server after %v retries", retries)
|
||||
}
|
||||
provider.url = url
|
||||
provider.isReady = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func (provider *apiServerProvider) ReportTappedPods(pods []core.Pod) error {
|
||||
if !provider.isReady {
|
||||
return fmt.Errorf("trying to reach api server when not initialized yet")
|
||||
}
|
||||
tappedPodsUrl := fmt.Sprintf("%s/status/tappedPods", provider.url)
|
||||
|
||||
podInfos := make([]shared.PodInfo, 0)
|
||||
for _, pod := range pods {
|
||||
podInfos = append(podInfos, shared.PodInfo{Name: pod.Name, Namespace: pod.Namespace})
|
||||
}
|
||||
tapStatus := shared.TapStatus{Pods: podInfos}
|
||||
|
||||
if jsonValue, err := json.Marshal(tapStatus); err != nil {
|
||||
return fmt.Errorf("failed Marshal the tapped pods %w", err)
|
||||
} else {
|
||||
if response, err := http.Post(tappedPodsUrl, "application/json", bytes.NewBuffer(jsonValue)); err != nil {
|
||||
return fmt.Errorf("failed sending to API server the tapped pods %w", err)
|
||||
} else if response.StatusCode != 200 {
|
||||
return fmt.Errorf("failed sending to API server the tapped pods, response status code %v", response.StatusCode)
|
||||
} else {
|
||||
logger.Log.Debugf("Reported to server API about %d taped pods successfully", len(podInfos))
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (provider *apiServerProvider) RequestAnalysis(analysisDestination string, sleepIntervalSec int) error {
|
||||
if !provider.isReady {
|
||||
return fmt.Errorf("trying to reach api server when not initialized yet")
|
||||
}
|
||||
urlPath := fmt.Sprintf("%s/api/uploadEntries?dest=%s&interval=%v", provider.url, url.QueryEscape(analysisDestination), sleepIntervalSec)
|
||||
u, parseErr := url.ParseRequestURI(urlPath)
|
||||
if parseErr != nil {
|
||||
logger.Log.Fatal("Failed parsing the URL (consider changing the analysis dest URL), err: %v", parseErr)
|
||||
}
|
||||
|
||||
logger.Log.Debugf("Analysis url %v", u.String())
|
||||
if response, requestErr := http.Get(u.String()); requestErr != nil {
|
||||
return fmt.Errorf("failed to notify agent for analysis, err: %w", requestErr)
|
||||
} else if response.StatusCode != 200 {
|
||||
return fmt.Errorf("failed to notify agent for analysis, status code: %v", response.StatusCode)
|
||||
} else {
|
||||
logger.Log.Infof(uiUtils.Purple, "Traffic is uploading to UP9 for further analysis")
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func (provider *apiServerProvider) GetGeneralStats() (map[string]interface{}, error) {
|
||||
if !provider.isReady {
|
||||
return nil, fmt.Errorf("trying to reach api server when not initialized yet")
|
||||
}
|
||||
generalStatsUrl := fmt.Sprintf("%s/api/generalStats", provider.url)
|
||||
|
||||
response, requestErr := http.Get(generalStatsUrl)
|
||||
if requestErr != nil {
|
||||
return nil, fmt.Errorf("failed to get general stats for telemetry, err: %w", requestErr)
|
||||
} else if response.StatusCode != 200 {
|
||||
return nil, fmt.Errorf("failed to get general stats for telemetry, status code: %v", response.StatusCode)
|
||||
}
|
||||
|
||||
defer func() { _ = response.Body.Close() }()
|
||||
|
||||
data, readErr := ioutil.ReadAll(response.Body)
|
||||
if readErr != nil {
|
||||
return nil, fmt.Errorf("failed to read general stats for telemetry, err: %v", readErr)
|
||||
}
|
||||
|
||||
var generalStats map[string]interface{}
|
||||
if parseErr := json.Unmarshal(data, &generalStats); parseErr != nil {
|
||||
return nil, fmt.Errorf("failed to parse general stats for telemetry, err: %v", parseErr)
|
||||
}
|
||||
return generalStats, nil
|
||||
}
|
||||
|
||||
func (provider *apiServerProvider) GetHars(fromTimestamp int, toTimestamp int) (*zip.Reader, error) {
|
||||
if !provider.isReady {
|
||||
return nil, fmt.Errorf("trying to reach api server when not initialized yet")
|
||||
}
|
||||
resp, err := http.Get(fmt.Sprintf("%s/api/har?from=%v&to=%v", provider.url, fromTimestamp, toTimestamp))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed getting har from api server %w", err)
|
||||
}
|
||||
|
||||
defer func() { _ = resp.Body.Close() }()
|
||||
|
||||
body, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed reading hars %w", err)
|
||||
}
|
||||
|
||||
zipReader, err := zip.NewReader(bytes.NewReader(body), int64(len(body)))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed craeting zip reader %w", err)
|
||||
}
|
||||
return zipReader, nil
|
||||
}
|
||||
|
||||
func (provider *apiServerProvider) GetVersion() (string, error) {
|
||||
if !provider.isReady {
|
||||
return "", fmt.Errorf("trying to reach api server when not initialized yet")
|
||||
}
|
||||
versionUrl, _ := url.Parse(fmt.Sprintf("%s/metadata/version", provider.url))
|
||||
req := &http.Request{
|
||||
Method: http.MethodGet,
|
||||
URL: versionUrl,
|
||||
}
|
||||
statusResp, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer statusResp.Body.Close()
|
||||
|
||||
versionResponse := &shared.VersionResponse{}
|
||||
if err := json.NewDecoder(statusResp.Body).Decode(&versionResponse); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return versionResponse.SemVer, nil
|
||||
}
|
||||
@@ -1,45 +0,0 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/up9inc/mizu/cli/config"
|
||||
"github.com/up9inc/mizu/cli/config/configStructs"
|
||||
"github.com/up9inc/mizu/cli/errormessage"
|
||||
"github.com/up9inc/mizu/cli/kubernetes"
|
||||
"github.com/up9inc/mizu/cli/logger"
|
||||
"github.com/up9inc/mizu/cli/mizu"
|
||||
"github.com/up9inc/mizu/cli/uiUtils"
|
||||
"os"
|
||||
"os/signal"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
func GetApiServerUrl() string {
|
||||
return fmt.Sprintf("http://%s", kubernetes.GetMizuApiServerProxiedHostAndPath(config.Config.Tap.GuiPort))
|
||||
}
|
||||
|
||||
func startProxyReportErrorIfAny(kubernetesProvider *kubernetes.Provider, cancel context.CancelFunc) {
|
||||
err := kubernetes.StartProxy(kubernetesProvider, config.Config.Tap.GuiPort, config.Config.MizuResourcesNamespace, mizu.ApiServerPodName)
|
||||
if err != nil {
|
||||
logger.Log.Errorf(uiUtils.Error, fmt.Sprintf("Error occured while running k8s proxy %v\n"+
|
||||
"Try setting different port by using --%s", errormessage.FormatError(err), configStructs.GuiPortTapName))
|
||||
cancel()
|
||||
}
|
||||
}
|
||||
|
||||
func waitForFinish(ctx context.Context, cancel context.CancelFunc) {
|
||||
logger.Log.Debugf("waiting for finish...")
|
||||
sigChan := make(chan os.Signal, 1)
|
||||
signal.Notify(sigChan, syscall.SIGINT, syscall.SIGTERM, syscall.SIGQUIT)
|
||||
|
||||
// block until ctx cancel is called or termination signal is received
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
logger.Log.Debugf("ctx done")
|
||||
break
|
||||
case <-sigChan:
|
||||
logger.Log.Debugf("Got termination signal, canceling execution...")
|
||||
cancel()
|
||||
}
|
||||
}
|
||||
@@ -1,48 +0,0 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/creasty/defaults"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/up9inc/mizu/cli/config"
|
||||
"github.com/up9inc/mizu/cli/config/configStructs"
|
||||
"github.com/up9inc/mizu/cli/logger"
|
||||
"github.com/up9inc/mizu/cli/telemetry"
|
||||
"github.com/up9inc/mizu/cli/uiUtils"
|
||||
"io/ioutil"
|
||||
)
|
||||
|
||||
var configCmd = &cobra.Command{
|
||||
Use: "config",
|
||||
Short: "Generate config with default values",
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
go telemetry.ReportRun("config", config.Config.Config)
|
||||
|
||||
template, err := config.GetConfigWithDefaults()
|
||||
if err != nil {
|
||||
logger.Log.Errorf("Failed generating config with defaults %v", err)
|
||||
return nil
|
||||
}
|
||||
if config.Config.Config.Regenerate {
|
||||
data := []byte(template)
|
||||
if err := ioutil.WriteFile(config.Config.ConfigFilePath, data, 0644); err != nil {
|
||||
logger.Log.Errorf("Failed writing config %v", err)
|
||||
return nil
|
||||
}
|
||||
logger.Log.Infof(fmt.Sprintf("Template File written to %s", fmt.Sprintf(uiUtils.Purple, config.Config.ConfigFilePath)))
|
||||
} else {
|
||||
logger.Log.Debugf("Writing template config.\n%v", template)
|
||||
fmt.Printf("%v", template)
|
||||
}
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
rootCmd.AddCommand(configCmd)
|
||||
|
||||
defaultConfig := config.ConfigStruct{}
|
||||
defaults.Set(&defaultConfig)
|
||||
|
||||
configCmd.Flags().BoolP(configStructs.RegenerateConfigName, "r", defaultConfig.Config.Regenerate, fmt.Sprintf("Regenerate the config file with default values to path %s or to chosen path using --%s", defaultConfig.ConfigFilePath, config.ConfigFilePathCommandName))
|
||||
}
|
||||
@@ -1,34 +1,21 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"github.com/creasty/defaults"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/up9inc/mizu/cli/apiserver"
|
||||
"github.com/up9inc/mizu/cli/config"
|
||||
"github.com/up9inc/mizu/cli/config/configStructs"
|
||||
"github.com/up9inc/mizu/cli/logger"
|
||||
"github.com/up9inc/mizu/cli/mizu/version"
|
||||
"github.com/up9inc/mizu/cli/telemetry"
|
||||
"github.com/up9inc/mizu/cli/uiUtils"
|
||||
)
|
||||
|
||||
type MizuFetchOptions struct {
|
||||
Limit uint16
|
||||
Directory string
|
||||
}
|
||||
|
||||
var mizuFetchOptions = MizuFetchOptions{}
|
||||
|
||||
var fetchCmd = &cobra.Command{
|
||||
Use: "fetch",
|
||||
Short: "Download recorded traffic to files",
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
go telemetry.ReportRun("fetch", config.Config.Fetch)
|
||||
|
||||
if err := apiserver.Provider.InitAndTestConnection(GetApiServerUrl(), 1); err != nil {
|
||||
logger.Log.Errorf(uiUtils.Error, "Couldn't connect to API server, make sure one running")
|
||||
return nil
|
||||
}
|
||||
|
||||
if isCompatible, err := version.CheckVersionCompatibility(); err != nil {
|
||||
return err
|
||||
} else if !isCompatible {
|
||||
return nil
|
||||
}
|
||||
RunMizuFetch()
|
||||
RunMizuFetch(&mizuFetchOptions)
|
||||
return nil
|
||||
},
|
||||
}
|
||||
@@ -36,11 +23,6 @@ var fetchCmd = &cobra.Command{
|
||||
func init() {
|
||||
rootCmd.AddCommand(fetchCmd)
|
||||
|
||||
defaultFetchConfig := configStructs.FetchConfig{}
|
||||
defaults.Set(&defaultFetchConfig)
|
||||
|
||||
fetchCmd.Flags().StringP(configStructs.DirectoryFetchName, "d", defaultFetchConfig.Directory, "Provide a custom directory for fetched entries")
|
||||
fetchCmd.Flags().Int(configStructs.FromTimestampFetchName, defaultFetchConfig.FromTimestamp, "Custom start timestamp for fetched entries")
|
||||
fetchCmd.Flags().Int(configStructs.ToTimestampFetchName, defaultFetchConfig.ToTimestamp, "Custom end timestamp fetched entries")
|
||||
fetchCmd.Flags().Uint16P(configStructs.GuiPortFetchName, "p", defaultFetchConfig.GuiPort, "Provide a custom port for the web interface webserver")
|
||||
fetchCmd.Flags().Uint16VarP(&mizuFetchOptions.Limit, "limit", "l", 1000, "Provide a custom limit for entries to fetch")
|
||||
fetchCmd.Flags().StringVarP(&mizuFetchOptions.Directory, "directory", "d", ".", "Provide a custom directory for fetched entries")
|
||||
}
|
||||
|
||||
@@ -1,25 +1,94 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"github.com/up9inc/mizu/cli/apiserver"
|
||||
"github.com/up9inc/mizu/cli/config"
|
||||
"github.com/up9inc/mizu/cli/logger"
|
||||
"github.com/up9inc/mizu/cli/mizu/fsUtils"
|
||||
"github.com/up9inc/mizu/cli/uiUtils"
|
||||
"archive/zip"
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func RunMizuFetch() {
|
||||
if err := apiserver.Provider.InitAndTestConnection(GetApiServerUrl(), 5); err != nil {
|
||||
logger.Log.Errorf(uiUtils.Error, "Couldn't connect to API server, check logs")
|
||||
}
|
||||
|
||||
zipReader, err := apiserver.Provider.GetHars(config.Config.Fetch.FromTimestamp, config.Config.Fetch.ToTimestamp)
|
||||
func RunMizuFetch(fetch *MizuFetchOptions) {
|
||||
resp, err := http.Get(fmt.Sprintf("http://localhost:8899/api/har?limit=%v", fetch.Limit))
|
||||
if err != nil {
|
||||
logger.Log.Errorf("Failed fetch data from API server %v", err)
|
||||
return
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
if err := fsUtils.Unzip(zipReader, config.Config.Fetch.Directory); err != nil {
|
||||
logger.Log.Debugf("[ERROR] failed unzip %v", err)
|
||||
defer func() { _ = resp.Body.Close() }()
|
||||
|
||||
body, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
zipReader, err := zip.NewReader(bytes.NewReader(body), int64(len(body)))
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
_ = Unzip(zipReader, fetch.Directory)
|
||||
|
||||
}
|
||||
|
||||
func Unzip(reader *zip.Reader, dest string) error {
|
||||
dest, _ = filepath.Abs(dest)
|
||||
_ = os.MkdirAll(dest, os.ModePerm)
|
||||
|
||||
// Closure to address file descriptors issue with all the deferred .Close() methods
|
||||
extractAndWriteFile := func(f *zip.File) error {
|
||||
rc, err := f.Open()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
if err := rc.Close(); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}()
|
||||
|
||||
path := filepath.Join(dest, f.Name)
|
||||
|
||||
// Check for ZipSlip (Directory traversal)
|
||||
if !strings.HasPrefix(path, filepath.Clean(dest) + string(os.PathSeparator)) {
|
||||
return fmt.Errorf("illegal file path: %s", path)
|
||||
}
|
||||
|
||||
if f.FileInfo().IsDir() {
|
||||
_ = os.MkdirAll(path, f.Mode())
|
||||
} else {
|
||||
_ = os.MkdirAll(filepath.Dir(path), f.Mode())
|
||||
fmt.Print("writing HAR file [ ", path, " ] .. ")
|
||||
f, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, f.Mode())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
if err := f.Close(); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
fmt.Println(" done")
|
||||
}()
|
||||
|
||||
_, err = io.Copy(f, rc)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
for _, f := range reader.File {
|
||||
err := extractAndWriteFile(f)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
|
||||
@@ -1,50 +0,0 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"github.com/creasty/defaults"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/up9inc/mizu/cli/config"
|
||||
"github.com/up9inc/mizu/cli/config/configStructs"
|
||||
"github.com/up9inc/mizu/cli/errormessage"
|
||||
"github.com/up9inc/mizu/cli/kubernetes"
|
||||
"github.com/up9inc/mizu/cli/logger"
|
||||
"github.com/up9inc/mizu/cli/mizu/fsUtils"
|
||||
"github.com/up9inc/mizu/cli/telemetry"
|
||||
)
|
||||
|
||||
var logsCmd = &cobra.Command{
|
||||
Use: "logs",
|
||||
Short: "Create a zip file with logs for Github issue or troubleshoot",
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
go telemetry.ReportRun("logs", config.Config.Logs)
|
||||
|
||||
kubernetesProvider, err := kubernetes.NewProvider(config.Config.KubeConfigPath())
|
||||
if err != nil {
|
||||
logger.Log.Error(err)
|
||||
return nil
|
||||
}
|
||||
ctx, _ := context.WithCancel(context.Background())
|
||||
|
||||
if validationErr := config.Config.Logs.Validate(); validationErr != nil {
|
||||
return errormessage.FormatError(validationErr)
|
||||
}
|
||||
|
||||
logger.Log.Debugf("Using file path %s", config.Config.Logs.FilePath())
|
||||
|
||||
if dumpLogsErr := fsUtils.DumpLogs(kubernetesProvider, ctx, config.Config.Logs.FilePath()); dumpLogsErr != nil {
|
||||
logger.Log.Errorf("Failed dump logs %v", dumpLogsErr)
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
rootCmd.AddCommand(logsCmd)
|
||||
|
||||
defaultLogsConfig := configStructs.LogsConfig{}
|
||||
defaults.Set(&defaultLogsConfig)
|
||||
|
||||
logsCmd.Flags().StringP(configStructs.FileLogsName, "f", defaultLogsConfig.FileStr, "Path for zip file (default current <pwd>\\mizu_logs.zip)")
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user