mirror of
https://github.com/kubeshark/kubeshark.git
synced 2026-02-15 18:39:58 +00:00
Compare commits
32 Commits
32.0-dev8
...
33.0-dev13
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
5c012641a5 | ||
|
|
74bd4b180f | ||
|
|
8ea2dabb34 | ||
|
|
366d34b8d0 | ||
|
|
5fc3e38c1a | ||
|
|
09a0fca2c2 | ||
|
|
0437586908 | ||
|
|
f8181ccb07 | ||
|
|
414e5cfe5a | ||
|
|
2fac0009ea | ||
|
|
36d59ede07 | ||
|
|
ac53508ad7 | ||
|
|
e24c18254c | ||
|
|
b2830f133f | ||
|
|
eef0ee8023 | ||
|
|
4c0aeb8146 | ||
|
|
0dc0459dff | ||
|
|
81f06003d5 | ||
|
|
c7d068748a | ||
|
|
8102c49138 | ||
|
|
65fb2a4fe4 | ||
|
|
57f8a8dca9 | ||
|
|
aead6cbc19 | ||
|
|
2f89690da6 | ||
|
|
3e47abf208 | ||
|
|
3cbccccb8b | ||
|
|
08ae2bf6d7 | ||
|
|
684c51686f | ||
|
|
1de50b0572 | ||
|
|
0881dad17f | ||
|
|
cade960b9b | ||
|
|
d3e6a69d82 |
23
.github/workflows/static_code_analysis.yml
vendored
23
.github/workflows/static_code_analysis.yml
vendored
@@ -10,7 +10,7 @@ permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
golangci:
|
||||
go-lint:
|
||||
name: Go lint
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
@@ -141,7 +141,26 @@ jobs:
|
||||
with:
|
||||
version: latest
|
||||
working-directory: tap/extensions/redis
|
||||
|
||||
|
||||
- name: Check logger modified files
|
||||
id: logger_modified_files
|
||||
run: devops/check_modified_files.sh logger/
|
||||
|
||||
- name: Go lint - logger
|
||||
uses: golangci/golangci-lint-action@v2
|
||||
if: steps.logger_modified_files.outputs.matched == 'true'
|
||||
with:
|
||||
version: latest
|
||||
working-directory: logger
|
||||
|
||||
es-lint:
|
||||
name: ES lint
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 2
|
||||
|
||||
- uses: actions/setup-node@v2
|
||||
with:
|
||||
node-version: 16
|
||||
|
||||
5
.gitignore
vendored
5
.gitignore
vendored
@@ -52,4 +52,7 @@ tap/extensions/*/expect
|
||||
# UI folders to ignore
|
||||
**/node_modules/**
|
||||
**/dist/**
|
||||
*.editorconfig
|
||||
*.editorconfig
|
||||
|
||||
# Ignore *.log files
|
||||
*.log
|
||||
|
||||
14
Dockerfile
14
Dockerfile
@@ -44,11 +44,18 @@ ENV CGO_ENABLED=1 GOOS=linux
|
||||
ENV GOARCH=arm64 CGO_CFLAGS="-I/work/libpcap"
|
||||
|
||||
|
||||
### Builder image for AArch64 to x86-64 cross-compilation
|
||||
FROM up9inc/linux-x86_64-musl-go-libpcap AS builder-from-arm64v8-to-amd64
|
||||
ENV CGO_ENABLED=1 GOOS=linux
|
||||
ENV GOARCH=amd64 CGO_CFLAGS="-I/libpcap"
|
||||
|
||||
|
||||
### Final builder image where the build happens
|
||||
# Possible build strategies:
|
||||
# BUILDARCH=amd64 TARGETARCH=amd64
|
||||
# BUILDARCH=arm64v8 TARGETARCH=arm64v8
|
||||
# BUILDARCH=amd64 TARGETARCH=arm64v8
|
||||
# BUILDARCH=arm64v8 TARGETARCH=amd64
|
||||
ARG BUILDARCH=amd64
|
||||
ARG TARGETARCH=amd64
|
||||
FROM builder-from-${BUILDARCH}-to-${TARGETARCH} AS builder
|
||||
@@ -66,9 +73,6 @@ COPY tap/extensions/http/go.mod ../tap/extensions/http/
|
||||
COPY tap/extensions/kafka/go.mod ../tap/extensions/kafka/
|
||||
COPY tap/extensions/redis/go.mod ../tap/extensions/redis/
|
||||
RUN go mod download
|
||||
# cheap trick to make the build faster (as long as go.mod did not change)
|
||||
RUN go get github.com/patrickmn/go-cache
|
||||
RUN go list -f '{{.Path}}@{{.Version}}' -m all | sed 1d | grep -e 'go-cache' | xargs go get
|
||||
|
||||
# Copy and build agent code
|
||||
COPY shared ../shared
|
||||
@@ -90,8 +94,8 @@ RUN go build -ldflags="-extldflags=-static -s -w \
|
||||
-X 'github.com/up9inc/mizu/agent/pkg/version.Ver=${VER}'" -o mizuagent .
|
||||
|
||||
# Download Basenine executable, verify the sha1sum
|
||||
ADD https://github.com/up9inc/basenine/releases/download/v0.7.3/basenine_linux_${GOARCH} ./basenine_linux_${GOARCH}
|
||||
ADD https://github.com/up9inc/basenine/releases/download/v0.7.3/basenine_linux_${GOARCH}.sha256 ./basenine_linux_${GOARCH}.sha256
|
||||
ADD https://github.com/up9inc/basenine/releases/download/v0.8.2/basenine_linux_${GOARCH} ./basenine_linux_${GOARCH}
|
||||
ADD https://github.com/up9inc/basenine/releases/download/v0.8.2/basenine_linux_${GOARCH}.sha256 ./basenine_linux_${GOARCH}.sha256
|
||||
|
||||
RUN shasum -a 256 -c basenine_linux_"${GOARCH}".sha256 && \
|
||||
chmod +x ./basenine_linux_"${GOARCH}" && \
|
||||
|
||||
@@ -57,13 +57,6 @@ export function rightOnHoverCheck(path, expectedText) {
|
||||
cy.get(`#rightSideContainer [data-cy='QueryableTooltip']`).invoke('text').should('match', new RegExp(expectedText));
|
||||
}
|
||||
|
||||
export function checkThatAllEntriesShown() {
|
||||
cy.get('#entries-length').then(number => {
|
||||
if (number.text() === '1')
|
||||
cy.get('[title="Fetch old records"]').click();
|
||||
});
|
||||
}
|
||||
|
||||
export function checkFilterByMethod(funcDict) {
|
||||
const {protocol, method, methodQuery, summary, summaryQuery} = funcDict;
|
||||
const summaryDict = getSummaryDict(summary, summaryQuery);
|
||||
@@ -76,12 +69,7 @@ export function checkFilterByMethod(funcDict) {
|
||||
cy.get('[type="submit"]').click();
|
||||
cy.get('.w-tc-editor').should('have.attr', 'style').and('include', Cypress.env('greenFilterColor'));
|
||||
|
||||
cy.get('#entries-length').then(number => {
|
||||
// if the entries list isn't expanded it expands here
|
||||
if (number.text() === '0' || number.text() === '1') // todo change when TRA-4262 is fixed
|
||||
cy.get('[title="Fetch old records"]').click();
|
||||
|
||||
cy.get('#entries-length').should('not.have.text', '0').and('not.have.text', '1').then(() => {
|
||||
cy.get('#entries-length').should('not.have.text', '0').then(() => {
|
||||
cy.get(`#list [id]`).then(elements => {
|
||||
const listElmWithIdAttr = Object.values(elements);
|
||||
let doneCheckOnFirst = false;
|
||||
@@ -108,7 +96,6 @@ export function checkFilterByMethod(funcDict) {
|
||||
});
|
||||
});
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
export function getEntryId(id) {
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
import {
|
||||
checkThatAllEntriesShown,
|
||||
isValueExistsInElement,
|
||||
resizeToHugeMizu,
|
||||
} from "../testHelpers/TrafficHelper";
|
||||
@@ -12,13 +11,14 @@ checkEntries();
|
||||
|
||||
function checkEntries() {
|
||||
it('checking all entries', function () {
|
||||
checkThatAllEntriesShown();
|
||||
resizeToHugeMizu();
|
||||
cy.get('#entries-length').should('not.have.text', '0').then(() => {
|
||||
resizeToHugeMizu();
|
||||
|
||||
cy.get('#list [id^=entry]').each(entryElement => {
|
||||
entryElement.click();
|
||||
cy.get('#tbody-Headers').should('be.visible');
|
||||
isValueExistsInElement(false, 'Ignored-User-Agent', '#tbody-Headers');
|
||||
cy.get('#list [id^=entry]').each(entryElement => {
|
||||
entryElement.click();
|
||||
cy.get('#tbody-Headers').should('be.visible');
|
||||
isValueExistsInElement(false, 'Ignored-User-Agent', '#tbody-Headers');
|
||||
});
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
@@ -65,70 +65,70 @@ it('right side sanity test', function () {
|
||||
checkIllegalFilter('invalid filter');
|
||||
|
||||
checkFilter({
|
||||
name: 'http',
|
||||
filter: 'http',
|
||||
leftSidePath: '> :nth-child(1) > :nth-child(1)',
|
||||
leftSideExpectedText: 'HTTP',
|
||||
rightSidePath: '[title=HTTP]',
|
||||
rightSideExpectedText: 'Hypertext Transfer Protocol -- HTTP/1.1',
|
||||
applyByEnter: true
|
||||
applyByCtrlEnter: true
|
||||
});
|
||||
|
||||
checkFilter({
|
||||
name: 'response.status == 200',
|
||||
filter: 'response.status == 200',
|
||||
leftSidePath: '[title="Status Code"]',
|
||||
leftSideExpectedText: '200',
|
||||
rightSidePath: '> :nth-child(2) [title="Status Code"]',
|
||||
rightSideExpectedText: '200',
|
||||
applyByEnter: false
|
||||
applyByCtrlEnter: false
|
||||
});
|
||||
|
||||
if (Cypress.env('shouldCheckSrcAndDest')) {
|
||||
serviceMapCheck();
|
||||
|
||||
checkFilter({
|
||||
name: 'src.name == ""',
|
||||
filter: 'src.name == ""',
|
||||
leftSidePath: '[title="Source Name"]',
|
||||
leftSideExpectedText: '[Unresolved]',
|
||||
rightSidePath: '> :nth-child(2) [title="Source Name"]',
|
||||
rightSideExpectedText: '[Unresolved]',
|
||||
applyByEnter: false
|
||||
applyByCtrlEnter: false
|
||||
});
|
||||
|
||||
checkFilter({
|
||||
name: `dst.name == "httpbin.mizu-tests"`,
|
||||
filter: `dst.name == "httpbin.mizu-tests"`,
|
||||
leftSidePath: '> :nth-child(3) > :nth-child(2) > :nth-child(3) > :nth-child(2)',
|
||||
leftSideExpectedText: 'httpbin.mizu-tests',
|
||||
rightSidePath: '> :nth-child(2) > :nth-child(2) > :nth-child(2) > :nth-child(3) > :nth-child(2)',
|
||||
rightSideExpectedText: 'httpbin.mizu-tests',
|
||||
applyByEnter: false
|
||||
applyByCtrlEnter: false
|
||||
});
|
||||
}
|
||||
|
||||
checkFilter({
|
||||
name: 'request.method == "GET"',
|
||||
filter: 'request.method == "GET"',
|
||||
leftSidePath: '> :nth-child(3) > :nth-child(1) > :nth-child(1) > :nth-child(2)',
|
||||
leftSideExpectedText: 'GET',
|
||||
rightSidePath: '> :nth-child(2) > :nth-child(2) > :nth-child(1) > :nth-child(1) > :nth-child(2)',
|
||||
rightSideExpectedText: 'GET',
|
||||
applyByEnter: true
|
||||
applyByCtrlEnter: true
|
||||
});
|
||||
|
||||
checkFilter({
|
||||
name: 'request.path == "/get"',
|
||||
filter: 'request.path == "/get"',
|
||||
leftSidePath: '> :nth-child(3) > :nth-child(1) > :nth-child(2) > :nth-child(2)',
|
||||
leftSideExpectedText: '/get',
|
||||
rightSidePath: '> :nth-child(2) > :nth-child(2) > :nth-child(1) > :nth-child(2) > :nth-child(2)',
|
||||
rightSideExpectedText: '/get',
|
||||
applyByEnter: false
|
||||
applyByCtrlEnter: false
|
||||
});
|
||||
|
||||
checkFilter({
|
||||
name: 'src.ip == "127.0.0.1"',
|
||||
filter: 'src.ip == "127.0.0.1"',
|
||||
leftSidePath: '[title="Source IP"]',
|
||||
leftSideExpectedText: '127.0.0.1',
|
||||
rightSidePath: '> :nth-child(2) [title="Source IP"]',
|
||||
rightSideExpectedText: '127.0.0.1',
|
||||
applyByEnter: false
|
||||
applyByCtrlEnter: false
|
||||
});
|
||||
|
||||
checkFilterNoResults('request.method == "POST"');
|
||||
@@ -182,17 +182,19 @@ function checkIllegalFilter(illegalFilterName) {
|
||||
|
||||
function checkFilter(filterDetails) {
|
||||
const {
|
||||
name,
|
||||
filter,
|
||||
leftSidePath,
|
||||
rightSidePath,
|
||||
rightSideExpectedText,
|
||||
leftSideExpectedText,
|
||||
applyByEnter
|
||||
applyByCtrlEnter
|
||||
} = filterDetails;
|
||||
|
||||
const entriesForDeeperCheck = 5;
|
||||
|
||||
it(`checking the filter: ${name}`, function () {
|
||||
it(`checking the filter: ${filter}`, function () {
|
||||
waitForFetch50AndPause();
|
||||
|
||||
cy.get('#total-entries').should('not.have.text', '0').then(number => {
|
||||
const totalEntries = number.text();
|
||||
|
||||
@@ -200,30 +202,28 @@ function checkFilter(filterDetails) {
|
||||
const element = elem[0];
|
||||
const entryId = getEntryId(element.id);
|
||||
// checks the hover on the last entry (the only one in DOM at the beginning)
|
||||
leftOnHoverCheck(entryId, leftSidePath, name);
|
||||
leftOnHoverCheck(entryId, leftSidePath, filter);
|
||||
|
||||
cy.get('.w-tc-editor-text').clear();
|
||||
// applying the filter with alt+enter or with the button
|
||||
cy.get('.w-tc-editor-text').type(`${name}${applyByEnter ? '{alt+enter}' : ''}`);
|
||||
cy.get('.w-tc-editor-text').type(`${filter}${applyByCtrlEnter ? '{ctrl+enter}' : ''}`);
|
||||
cy.get('.w-tc-editor').should('have.attr', 'style').and('include', Cypress.env('greenFilterColor'));
|
||||
if (!applyByEnter)
|
||||
if (!applyByCtrlEnter)
|
||||
cy.get('[type="submit"]').click();
|
||||
|
||||
waitForFetch50AndPause();
|
||||
|
||||
// only one entry in DOM after filtering, checking all checks on it
|
||||
leftTextCheck(entryId, leftSidePath, leftSideExpectedText);
|
||||
leftOnHoverCheck(entryId, leftSidePath, name);
|
||||
leftOnHoverCheck(entryId, leftSidePath, filter);
|
||||
|
||||
rightTextCheck(rightSidePath, rightSideExpectedText);
|
||||
rightOnHoverCheck(rightSidePath, name);
|
||||
rightOnHoverCheck(rightSidePath, filter);
|
||||
checkRightSideResponseBody();
|
||||
});
|
||||
|
||||
cy.get('[title="Fetch old records"]').click();
|
||||
resizeToHugeMizu();
|
||||
|
||||
// waiting for the entries number to load
|
||||
cy.get('#entries-length', {timeout: refreshWaitTimeout}).should('have.text', totalEntries);
|
||||
|
||||
// checking only 'leftTextCheck' on all entries because the rest of the checks require more time
|
||||
cy.get(`#list [id^=entry]`).each(elem => {
|
||||
const element = elem[0];
|
||||
@@ -232,7 +232,7 @@ function checkFilter(filterDetails) {
|
||||
});
|
||||
|
||||
// making the other 3 checks on the first X entries (longer time for each check)
|
||||
deeperCheck(leftSidePath, rightSidePath, name, leftSideExpectedText, rightSideExpectedText, entriesForDeeperCheck);
|
||||
deeperCheck(leftSidePath, rightSidePath, filter, rightSideExpectedText, entriesForDeeperCheck);
|
||||
|
||||
// reloading then waiting for the entries number to load
|
||||
resizeToNormalMizu();
|
||||
@@ -242,7 +242,14 @@ function checkFilter(filterDetails) {
|
||||
});
|
||||
}
|
||||
|
||||
function deeperCheck(leftSidePath, rightSidePath, filterName, leftSideExpectedText, rightSideExpectedText, entriesNumToCheck) {
|
||||
function waitForFetch50AndPause() {
|
||||
// wait half a second and pause the stream to preserve the DOM
|
||||
cy.wait(500);
|
||||
cy.get('#pause-icon').click();
|
||||
cy.get('#pause-icon').should('not.be.visible');
|
||||
}
|
||||
|
||||
function deeperCheck(leftSidePath, rightSidePath, filterName, rightSideExpectedText, entriesNumToCheck) {
|
||||
cy.get(`#list [id^=entry]`).each((element, index) => {
|
||||
if (index < entriesNumToCheck) {
|
||||
const entryId = getEntryId(element[0].id);
|
||||
|
||||
@@ -1,7 +1,10 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
PREFIX=$HOME/local/bin
|
||||
VERSION=v1.22.0
|
||||
TUNNEL_LOG="tunnel.log"
|
||||
PROXY_LOG="proxy.log"
|
||||
|
||||
echo "Attempting to install minikube and assorted tools to $PREFIX"
|
||||
|
||||
@@ -11,7 +14,7 @@ if ! [ -x "$(command -v kubectl)" ]; then
|
||||
chmod +x kubectl
|
||||
mv kubectl "$PREFIX"
|
||||
else
|
||||
echo "kubetcl is already installed"
|
||||
echo "kubectl is already installed"
|
||||
fi
|
||||
|
||||
if ! [ -x "$(command -v minikube)" ]; then
|
||||
@@ -27,35 +30,39 @@ echo "Starting minikube..."
|
||||
minikube start
|
||||
|
||||
echo "Creating mizu tests namespaces"
|
||||
kubectl create namespace mizu-tests
|
||||
kubectl create namespace mizu-tests2
|
||||
kubectl create namespace mizu-tests --dry-run=client -o yaml | kubectl apply -f -
|
||||
kubectl create namespace mizu-tests2 --dry-run=client -o yaml | kubectl apply -f -
|
||||
|
||||
echo "Creating httpbin deployments"
|
||||
kubectl create deployment httpbin --image=kennethreitz/httpbin -n mizu-tests
|
||||
kubectl create deployment httpbin2 --image=kennethreitz/httpbin -n mizu-tests
|
||||
kubectl create deployment httpbin --image=kennethreitz/httpbin -n mizu-tests --dry-run=client -o yaml | kubectl apply -f -
|
||||
kubectl create deployment httpbin2 --image=kennethreitz/httpbin -n mizu-tests --dry-run=client -o yaml | kubectl apply -f -
|
||||
|
||||
kubectl create deployment httpbin --image=kennethreitz/httpbin -n mizu-tests2
|
||||
kubectl create deployment httpbin --image=kennethreitz/httpbin -n mizu-tests2 --dry-run=client -o yaml | kubectl apply -f -
|
||||
|
||||
echo "Creating redis deployment"
|
||||
kubectl create deployment redis --image=redis -n mizu-tests
|
||||
kubectl create deployment redis --image=redis -n mizu-tests --dry-run=client -o yaml | kubectl apply -f -
|
||||
|
||||
echo "Creating rabbitmq deployment"
|
||||
kubectl create deployment rabbitmq --image=rabbitmq -n mizu-tests
|
||||
kubectl create deployment rabbitmq --image=rabbitmq -n mizu-tests --dry-run=client -o yaml | kubectl apply -f -
|
||||
|
||||
echo "Creating httpbin services"
|
||||
kubectl expose deployment httpbin --type=NodePort --port=80 -n mizu-tests
|
||||
kubectl expose deployment httpbin2 --type=NodePort --port=80 -n mizu-tests
|
||||
kubectl expose deployment httpbin --type=NodePort --port=80 -n mizu-tests --dry-run=client -o yaml | kubectl apply -f -
|
||||
kubectl expose deployment httpbin2 --type=NodePort --port=80 -n mizu-tests --dry-run=client -o yaml | kubectl apply -f -
|
||||
|
||||
kubectl expose deployment httpbin --type=NodePort --port=80 -n mizu-tests2
|
||||
kubectl expose deployment httpbin --type=NodePort --port=80 -n mizu-tests2 --dry-run=client -o yaml | kubectl apply -f -
|
||||
|
||||
echo "Creating redis service"
|
||||
kubectl expose deployment redis --type=LoadBalancer --port=6379 -n mizu-tests
|
||||
kubectl expose deployment redis --type=LoadBalancer --port=6379 -n mizu-tests --dry-run=client -o yaml | kubectl apply -f -
|
||||
|
||||
echo "Creating rabbitmq service"
|
||||
kubectl expose deployment rabbitmq --type=LoadBalancer --port=5672 -n mizu-tests
|
||||
kubectl expose deployment rabbitmq --type=LoadBalancer --port=5672 -n mizu-tests --dry-run=client -o yaml | kubectl apply -f -
|
||||
|
||||
# TODO: need to understand how to fail if address already in use
|
||||
echo "Starting proxy"
|
||||
kubectl proxy --port=8080 &
|
||||
rm -f ${PROXY_LOG}
|
||||
kubectl proxy --port=8080 > ${PROXY_LOG} &
|
||||
PID1=$!
|
||||
echo "kubectl proxy process id is ${PID1} and log of proxy in ${PROXY_LOG}"
|
||||
|
||||
if [[ -z "${CI}" ]]; then
|
||||
echo "Setting env var of mizu ci image"
|
||||
@@ -71,5 +78,9 @@ minikube image load "${MIZU_CI_IMAGE}"
|
||||
echo "Build cli"
|
||||
cd cli && make build GIT_BRANCH=ci SUFFIX=ci
|
||||
|
||||
# TODO: need to understand how to fail if password is asked (sudo)
|
||||
echo "Starting tunnel"
|
||||
minikube tunnel &
|
||||
rm -f ${TUNNEL_LOG}
|
||||
minikube tunnel > ${TUNNEL_LOG} &
|
||||
PID2=$!
|
||||
echo "Minikube tunnel process id is ${PID2} and log of tunnel in ${TUNNEL_LOG}"
|
||||
|
||||
@@ -6,7 +6,6 @@ require (
|
||||
github.com/antelman107/net-wait-go v0.0.0-20210623112055-cf684aebda7b
|
||||
github.com/chanced/openapi v0.0.8
|
||||
github.com/djherbis/atime v1.1.0
|
||||
github.com/elastic/go-elasticsearch/v7 v7.17.0
|
||||
github.com/getkin/kin-openapi v0.89.0
|
||||
github.com/gin-contrib/static v0.0.1
|
||||
github.com/gin-gonic/gin v1.7.7
|
||||
@@ -20,7 +19,7 @@ require (
|
||||
github.com/op/go-logging v0.0.0-20160315200505-970db520ece7
|
||||
github.com/orcaman/concurrent-map v1.0.0
|
||||
github.com/stretchr/testify v1.7.0
|
||||
github.com/up9inc/basenine/client/go v0.0.0-20220419100955-e2ca51087607
|
||||
github.com/up9inc/basenine/client/go v0.0.0-20220509204026-c37adfc587f4
|
||||
github.com/up9inc/mizu/logger v0.0.0
|
||||
github.com/up9inc/mizu/shared v0.0.0
|
||||
github.com/up9inc/mizu/tap v0.0.0
|
||||
@@ -76,6 +75,7 @@ require (
|
||||
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect
|
||||
github.com/googleapis/gnostic v0.5.5 // indirect
|
||||
github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 // indirect
|
||||
github.com/hashicorp/golang-lru v0.5.4 // indirect
|
||||
github.com/imdario/mergo v0.3.12 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.0.0 // indirect
|
||||
github.com/josharian/intern v1.0.0 // indirect
|
||||
@@ -85,6 +85,7 @@ require (
|
||||
github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect
|
||||
github.com/mailru/easyjson v0.7.7 // indirect
|
||||
github.com/mattn/go-isatty v0.0.14 // indirect
|
||||
github.com/mertyildiran/gqlparser/v2 v2.4.6 // indirect
|
||||
github.com/mitchellh/go-wordwrap v1.0.1 // indirect
|
||||
github.com/moby/spdystream v0.2.0 // indirect
|
||||
github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 // indirect
|
||||
@@ -110,7 +111,7 @@ require (
|
||||
github.com/vishvananda/netns v0.0.0-20211101163701-50045581ed74 // indirect
|
||||
github.com/xlab/treeprint v1.1.0 // indirect
|
||||
go.starlark.net v0.0.0-20220203230714-bb14e151c28f // indirect
|
||||
golang.org/x/crypto v0.0.0-20220208050332-20e1d8d225ab // indirect
|
||||
golang.org/x/crypto v0.0.0-20220314234659-1baeb1ce4c0b // indirect
|
||||
golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd // indirect
|
||||
golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8 // indirect
|
||||
golang.org/x/sys v0.0.0-20220207234003-57398862261d // indirect
|
||||
|
||||
13
agent/go.sum
13
agent/go.sum
@@ -83,6 +83,7 @@ github.com/PuerkitoBio/purell v1.1.1 h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tN
|
||||
github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
|
||||
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M=
|
||||
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
|
||||
github.com/agnivade/levenshtein v1.0.1/go.mod h1:CURSv5d9Uaml+FovSIICkLbAUZ9S4RqaHDIsdSBg7lM=
|
||||
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
@@ -165,8 +166,6 @@ github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDD
|
||||
github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE=
|
||||
github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21 h1:YEetp8/yCZMuEPMUDHG0CW/brkkEp8mzqk2+ODEitlw=
|
||||
github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU=
|
||||
github.com/elastic/go-elasticsearch/v7 v7.17.0 h1:0fcSh4qeC/i1+7QU1KXpmq2iUAdMk4l0/vmbtW1+KJM=
|
||||
github.com/elastic/go-elasticsearch/v7 v7.17.0/go.mod h1:OJ4wdbtDNk5g503kvlHLyErCgQwwzmDtaFC4XyOxXA4=
|
||||
github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153 h1:yUdfgN0XgIJw7foRItutHYUIhlcKzcSf5vDpdhQAKTc=
|
||||
github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc=
|
||||
github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
|
||||
@@ -404,6 +403,7 @@ github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/b
|
||||
github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90=
|
||||
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||
github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc=
|
||||
github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
|
||||
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
|
||||
github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64=
|
||||
@@ -493,6 +493,8 @@ github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27k
|
||||
github.com/mattn/go-runewidth v0.0.7/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
|
||||
github.com/mertyildiran/gqlparser/v2 v2.4.6 h1:enAq4F5PYgW/rYExPNzYt7IYrrZnzrfqdywMA1QdFtM=
|
||||
github.com/mertyildiran/gqlparser/v2 v2.4.6/go.mod h1:XZId58F+XqRSmoLrdsOLgqA918oNvBzuOORruJWBjDo=
|
||||
github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
|
||||
github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso=
|
||||
github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI=
|
||||
@@ -679,8 +681,8 @@ github.com/ugorji/go v1.2.6/go.mod h1:anCg0y61KIhDlPZmnH+so+RQbysYVyDko0IMgJv0Nn
|
||||
github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY=
|
||||
github.com/ugorji/go/codec v1.2.6 h1:7kbGefxLoDBuYXOms4yD7223OpNMMPNPZxXk5TvFcyQ=
|
||||
github.com/ugorji/go/codec v1.2.6/go.mod h1:V6TCNZ4PHqoHGFZuSG1W8nrCzzdgA2DozYxWFFpvxTw=
|
||||
github.com/up9inc/basenine/client/go v0.0.0-20220419100955-e2ca51087607 h1:UqxUSkOYOmsLZWQtMSk02ttnhdRwBRLOLt2aDiS9tEk=
|
||||
github.com/up9inc/basenine/client/go v0.0.0-20220419100955-e2ca51087607/go.mod h1:SvJGPoa/6erhUQV7kvHBwM/0x5LyO6XaG2lUaCaKiUI=
|
||||
github.com/up9inc/basenine/client/go v0.0.0-20220509204026-c37adfc587f4 h1:nNOrU1HVH0fnaG7GNhxCc8kNPVL035Iix7ihUF6lZT8=
|
||||
github.com/up9inc/basenine/client/go v0.0.0-20220509204026-c37adfc587f4/go.mod h1:SvJGPoa/6erhUQV7kvHBwM/0x5LyO6XaG2lUaCaKiUI=
|
||||
github.com/vishvananda/netns v0.0.0-20211101163701-50045581ed74 h1:gga7acRE695APm9hlsSMoOoE65U4/TcqNj90mc69Rlg=
|
||||
github.com/vishvananda/netns v0.0.0-20211101163701-50045581ed74/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0=
|
||||
github.com/wI2L/jsondiff v0.1.1 h1:r2TkoEet7E4JMO5+s1RCY2R0LrNPNHY6hbDeow2hRHw=
|
||||
@@ -751,8 +753,9 @@ golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5y
|
||||
golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||
golang.org/x/crypto v0.0.0-20220208050332-20e1d8d225ab h1:lnZ4LoV0UMdibeCUfIB2a4uFwRu491WX/VB2reB8xNc=
|
||||
golang.org/x/crypto v0.0.0-20220208050332-20e1d8d225ab/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||
golang.org/x/crypto v0.0.0-20220314234659-1baeb1ce4c0b h1:Qwe1rC8PSniVfAFPFJeyUkB+zcysC3RgJBAGk7eqBEU=
|
||||
golang.org/x/crypto v0.0.0-20220314234659-1baeb1ce4c0b/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
|
||||
|
||||
@@ -17,7 +17,6 @@ import (
|
||||
"github.com/gin-contrib/static"
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/up9inc/mizu/agent/pkg/dependency"
|
||||
"github.com/up9inc/mizu/agent/pkg/elastic"
|
||||
"github.com/up9inc/mizu/agent/pkg/entries"
|
||||
"github.com/up9inc/mizu/agent/pkg/middlewares"
|
||||
"github.com/up9inc/mizu/agent/pkg/models"
|
||||
@@ -205,7 +204,6 @@ func enableExpFeatureIfNeeded() {
|
||||
serviceMapGenerator := dependency.GetInstance(dependency.ServiceMapGeneratorDependency).(servicemap.ServiceMap)
|
||||
serviceMapGenerator.Enable()
|
||||
}
|
||||
elastic.GetInstance().Configure(config.Config.Elastic)
|
||||
}
|
||||
|
||||
func getSyncEntriesConfig() *shared.SyncEntriesConfig {
|
||||
@@ -373,6 +371,7 @@ func handleIncomingMessageAsTapper(socketConnection *websocket.Conn) {
|
||||
func initializeDependencies() {
|
||||
dependency.RegisterGenerator(dependency.ServiceMapGeneratorDependency, func() interface{} { return servicemap.GetDefaultServiceMapInstance() })
|
||||
dependency.RegisterGenerator(dependency.OasGeneratorDependency, func() interface{} { return oas.GetDefaultOasGeneratorInstance() })
|
||||
dependency.RegisterGenerator(dependency.EntriesInserter, func() interface{} { return api.GetBasenineEntryInserterInstance() })
|
||||
dependency.RegisterGenerator(dependency.EntriesProvider, func() interface{} { return &entries.BasenineEntriesProvider{} })
|
||||
dependency.RegisterGenerator(dependency.EntriesSocketStreamer, func() interface{} { return &api.BasenineEntryStreamer{} })
|
||||
dependency.RegisterGenerator(dependency.EntryStreamerSocketConnector, func() interface{} { return &api.DefaultEntryStreamerSocketConnector{} })
|
||||
|
||||
@@ -5,20 +5,19 @@ import (
|
||||
|
||||
basenine "github.com/up9inc/basenine/client/go"
|
||||
"github.com/up9inc/mizu/agent/pkg/models"
|
||||
"github.com/up9inc/mizu/logger"
|
||||
tapApi "github.com/up9inc/mizu/tap/api"
|
||||
)
|
||||
|
||||
type EntryStreamerSocketConnector interface {
|
||||
SendEntry(socketId int, entry *tapApi.Entry, params *WebSocketParams)
|
||||
SendMetadata(socketId int, metadata *basenine.Metadata)
|
||||
SendToastError(socketId int, err error)
|
||||
SendEntry(socketId int, entry *tapApi.Entry, params *WebSocketParams) error
|
||||
SendMetadata(socketId int, metadata *basenine.Metadata) error
|
||||
SendToastError(socketId int, err error) error
|
||||
CleanupSocket(socketId int)
|
||||
}
|
||||
|
||||
type DefaultEntryStreamerSocketConnector struct{}
|
||||
|
||||
func (e *DefaultEntryStreamerSocketConnector) SendEntry(socketId int, entry *tapApi.Entry, params *WebSocketParams) {
|
||||
func (e *DefaultEntryStreamerSocketConnector) SendEntry(socketId int, entry *tapApi.Entry, params *WebSocketParams) error {
|
||||
var message []byte
|
||||
if params.EnableFullEntries {
|
||||
message, _ = models.CreateFullEntryWebSocketMessage(entry)
|
||||
@@ -29,26 +28,32 @@ func (e *DefaultEntryStreamerSocketConnector) SendEntry(socketId int, entry *tap
|
||||
}
|
||||
|
||||
if err := SendToSocket(socketId, message); err != nil {
|
||||
logger.Log.Error(err)
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e *DefaultEntryStreamerSocketConnector) SendMetadata(socketId int, metadata *basenine.Metadata) {
|
||||
func (e *DefaultEntryStreamerSocketConnector) SendMetadata(socketId int, metadata *basenine.Metadata) error {
|
||||
metadataBytes, _ := models.CreateWebsocketQueryMetadataMessage(metadata)
|
||||
if err := SendToSocket(socketId, metadataBytes); err != nil {
|
||||
logger.Log.Error(err)
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e *DefaultEntryStreamerSocketConnector) SendToastError(socketId int, err error) {
|
||||
func (e *DefaultEntryStreamerSocketConnector) SendToastError(socketId int, err error) error {
|
||||
toastBytes, _ := models.CreateWebsocketToastMessage(&models.ToastMessage{
|
||||
Type: "error",
|
||||
AutoClose: 5000,
|
||||
Text: fmt.Sprintf("Syntax error: %s", err.Error()),
|
||||
})
|
||||
if err := SendToSocket(socketId, toastBytes); err != nil {
|
||||
logger.Log.Error(err)
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e *DefaultEntryStreamerSocketConnector) CleanupSocket(socketId int) {
|
||||
|
||||
@@ -14,7 +14,6 @@ import (
|
||||
"github.com/up9inc/mizu/agent/pkg/models"
|
||||
|
||||
"github.com/up9inc/mizu/agent/pkg/dependency"
|
||||
"github.com/up9inc/mizu/agent/pkg/elastic"
|
||||
"github.com/up9inc/mizu/agent/pkg/har"
|
||||
"github.com/up9inc/mizu/agent/pkg/holder"
|
||||
"github.com/up9inc/mizu/agent/pkg/providers"
|
||||
@@ -25,10 +24,7 @@ import (
|
||||
"github.com/up9inc/mizu/agent/pkg/utils"
|
||||
|
||||
"github.com/up9inc/mizu/logger"
|
||||
"github.com/up9inc/mizu/shared"
|
||||
tapApi "github.com/up9inc/mizu/tap/api"
|
||||
|
||||
basenine "github.com/up9inc/basenine/client/go"
|
||||
)
|
||||
|
||||
var k8sResolver *resolver.Resolver
|
||||
@@ -103,20 +99,6 @@ func startReadingChannel(outputItems <-chan *tapApi.OutputChannelItem, extension
|
||||
panic("Channel of captured messages is nil")
|
||||
}
|
||||
|
||||
BasenineReconnect:
|
||||
connection, err := basenine.NewConnection(shared.BasenineHost, shared.BaseninePort)
|
||||
if err != nil {
|
||||
logger.Log.Errorf("Can't establish a new connection to Basenine server: %v", err)
|
||||
time.Sleep(shared.BasenineReconnectInterval * time.Second)
|
||||
goto BasenineReconnect
|
||||
}
|
||||
if err = connection.InsertMode(); err != nil {
|
||||
logger.Log.Errorf("Insert mode call failed: %v", err)
|
||||
connection.Close()
|
||||
time.Sleep(shared.BasenineReconnectInterval * time.Second)
|
||||
goto BasenineReconnect
|
||||
}
|
||||
|
||||
disableOASValidation := false
|
||||
ctx := context.Background()
|
||||
doc, contractContent, router, err := loadOAS(ctx)
|
||||
@@ -163,17 +145,13 @@ BasenineReconnect:
|
||||
|
||||
providers.EntryAdded(len(data))
|
||||
|
||||
if err = connection.SendText(string(data)); err != nil {
|
||||
logger.Log.Errorf("An error occured while inserting a new record to database: %v", err)
|
||||
connection.Close()
|
||||
time.Sleep(shared.BasenineReconnectInterval * time.Second)
|
||||
goto BasenineReconnect
|
||||
entryInserter := dependency.GetInstance(dependency.EntriesInserter).(EntryInserter)
|
||||
if err := entryInserter.Insert(mizuEntry); err != nil {
|
||||
logger.Log.Errorf("Error inserting entry, err: %v", err)
|
||||
}
|
||||
|
||||
serviceMapGenerator := dependency.GetInstance(dependency.ServiceMapGeneratorDependency).(servicemap.ServiceMapSink)
|
||||
serviceMapGenerator.NewTCPEntry(mizuEntry.Source, mizuEntry.Destination, &item.Protocol)
|
||||
|
||||
elastic.GetInstance().PushEntry(mizuEntry)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
71
agent/pkg/api/socket_data_inserter.go
Normal file
71
agent/pkg/api/socket_data_inserter.go
Normal file
@@ -0,0 +1,71 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
basenine "github.com/up9inc/basenine/client/go"
|
||||
"github.com/up9inc/mizu/logger"
|
||||
"github.com/up9inc/mizu/shared"
|
||||
"github.com/up9inc/mizu/tap/api"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
type EntryInserter interface {
|
||||
Insert(entry *api.Entry) error
|
||||
}
|
||||
|
||||
type BasenineEntryInserter struct {
|
||||
connection *basenine.Connection
|
||||
}
|
||||
|
||||
var instance *BasenineEntryInserter
|
||||
var once sync.Once
|
||||
|
||||
func GetBasenineEntryInserterInstance() *BasenineEntryInserter {
|
||||
once.Do(func() {
|
||||
instance = &BasenineEntryInserter{}
|
||||
})
|
||||
|
||||
return instance
|
||||
}
|
||||
|
||||
func (e *BasenineEntryInserter) Insert(entry *api.Entry) error {
|
||||
if e.connection == nil {
|
||||
e.connection = initializeConnection()
|
||||
}
|
||||
|
||||
data, err := json.Marshal(entry)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error marshling entry, err: %v", err)
|
||||
}
|
||||
|
||||
if err := e.connection.SendText(string(data)); err != nil {
|
||||
e.connection.Close()
|
||||
e.connection = nil
|
||||
|
||||
return fmt.Errorf("error sending text to database, err: %v", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func initializeConnection() *basenine.Connection{
|
||||
for {
|
||||
connection, err := basenine.NewConnection(shared.BasenineHost, shared.BaseninePort)
|
||||
if err != nil {
|
||||
logger.Log.Errorf("Can't establish a new connection to Basenine server: %v", err)
|
||||
time.Sleep(shared.BasenineReconnectInterval * time.Second)
|
||||
continue
|
||||
}
|
||||
|
||||
if err = connection.InsertMode(); err != nil {
|
||||
logger.Log.Errorf("Insert mode call failed: %v", err)
|
||||
connection.Close()
|
||||
time.Sleep(shared.BasenineReconnectInterval * time.Second)
|
||||
continue
|
||||
}
|
||||
|
||||
return connection
|
||||
}
|
||||
}
|
||||
@@ -3,6 +3,7 @@ package api
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"time"
|
||||
|
||||
basenine "github.com/up9inc/basenine/client/go"
|
||||
"github.com/up9inc/mizu/agent/pkg/dependency"
|
||||
@@ -33,9 +34,18 @@ func (e *BasenineEntryStreamer) Get(ctx context.Context, socketId int, params *W
|
||||
meta := make(chan []byte)
|
||||
|
||||
query := params.Query
|
||||
err = basenine.Validate(shared.BasenineHost, shared.BaseninePort, query)
|
||||
if err = basenine.Validate(shared.BasenineHost, shared.BaseninePort, query); err != nil {
|
||||
if err := entryStreamerSocketConnector.SendToastError(socketId, err); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
entryStreamerSocketConnector.CleanupSocket(socketId)
|
||||
return err
|
||||
}
|
||||
|
||||
leftOff, err := e.fetch(socketId, params, entryStreamerSocketConnector)
|
||||
if err != nil {
|
||||
entryStreamerSocketConnector.SendToastError(socketId, err)
|
||||
logger.Log.Errorf("Fetch error: %v", err)
|
||||
}
|
||||
|
||||
handleDataChannel := func(c *basenine.Connection, data chan []byte) {
|
||||
@@ -47,13 +57,15 @@ func (e *BasenineEntryStreamer) Get(ctx context.Context, socketId int, params *W
|
||||
}
|
||||
|
||||
var entry *tapApi.Entry
|
||||
err = json.Unmarshal(bytes, &entry)
|
||||
if err != nil {
|
||||
logger.Log.Debugf("Error unmarshalling entry: %v", err.Error())
|
||||
if err = json.Unmarshal(bytes, &entry); err != nil {
|
||||
logger.Log.Debugf("Error unmarshalling entry: %v", err)
|
||||
continue
|
||||
}
|
||||
|
||||
entryStreamerSocketConnector.SendEntry(socketId, entry, params)
|
||||
if err := entryStreamerSocketConnector.SendEntry(socketId, entry, params); err != nil {
|
||||
logger.Log.Errorf("Error sending entry to socket, err: %v", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -66,20 +78,22 @@ func (e *BasenineEntryStreamer) Get(ctx context.Context, socketId int, params *W
|
||||
}
|
||||
|
||||
var metadata *basenine.Metadata
|
||||
err = json.Unmarshal(bytes, &metadata)
|
||||
if err != nil {
|
||||
logger.Log.Debugf("Error unmarshalling metadata: %v", err.Error())
|
||||
if err = json.Unmarshal(bytes, &metadata); err != nil {
|
||||
logger.Log.Debugf("Error unmarshalling metadata: %v", err)
|
||||
continue
|
||||
}
|
||||
|
||||
entryStreamerSocketConnector.SendMetadata(socketId, metadata)
|
||||
if err := entryStreamerSocketConnector.SendMetadata(socketId, metadata); err != nil {
|
||||
logger.Log.Errorf("Error sending metadata to socket, err: %v", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
go handleDataChannel(connection, data)
|
||||
go handleMetaChannel(connection, meta)
|
||||
|
||||
if err = connection.Query(query, data, meta); err != nil {
|
||||
if err = connection.Query(leftOff, query, data, meta); err != nil {
|
||||
logger.Log.Errorf("Query mode call failed: %v", err)
|
||||
entryStreamerSocketConnector.CleanupSocket(socketId)
|
||||
return err
|
||||
@@ -94,3 +108,64 @@ func (e *BasenineEntryStreamer) Get(ctx context.Context, socketId int, params *W
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Reverses a []byte slice.
|
||||
func (e *BasenineEntryStreamer) fetch(socketId int, params *WebSocketParams, connector EntryStreamerSocketConnector) (leftOff string, err error) {
|
||||
if params.Fetch <= 0 {
|
||||
leftOff = params.LeftOff
|
||||
return
|
||||
}
|
||||
|
||||
var data [][]byte
|
||||
var firstMeta []byte
|
||||
var lastMeta []byte
|
||||
data, firstMeta, lastMeta, err = basenine.Fetch(
|
||||
shared.BasenineHost,
|
||||
shared.BaseninePort,
|
||||
params.LeftOff,
|
||||
-1,
|
||||
params.Query,
|
||||
params.Fetch,
|
||||
time.Duration(params.TimeoutMs)*time.Millisecond,
|
||||
)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
var firstMetadata *basenine.Metadata
|
||||
if err = json.Unmarshal(firstMeta, &firstMetadata); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
leftOff = firstMetadata.LeftOff
|
||||
|
||||
var lastMetadata *basenine.Metadata
|
||||
if err = json.Unmarshal(lastMeta, &lastMetadata); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if err = connector.SendMetadata(socketId, lastMetadata); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
data = e.reverseBytesSlice(data)
|
||||
for _, row := range data {
|
||||
var entry *tapApi.Entry
|
||||
if err = json.Unmarshal(row, &entry); err != nil {
|
||||
break
|
||||
}
|
||||
|
||||
if err = connector.SendEntry(socketId, entry, params); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Reverses a []byte slice.
|
||||
func (e *BasenineEntryStreamer) reverseBytesSlice(arr [][]byte) (newArr [][]byte) {
|
||||
for i := len(arr) - 1; i >= 0; i-- {
|
||||
newArr = append(newArr, arr[i])
|
||||
}
|
||||
return newArr
|
||||
}
|
||||
|
||||
@@ -34,8 +34,11 @@ type SocketConnection struct {
|
||||
}
|
||||
|
||||
type WebSocketParams struct {
|
||||
LeftOff string `json:"leftOff"`
|
||||
Query string `json:"query"`
|
||||
EnableFullEntries bool `json:"enableFullEntries"`
|
||||
Fetch int `json:"fetch"`
|
||||
TimeoutMs int `json:"timeoutMs"`
|
||||
}
|
||||
|
||||
var (
|
||||
|
||||
@@ -5,6 +5,7 @@ type DependencyContainerType string
|
||||
const (
|
||||
ServiceMapGeneratorDependency = "ServiceMapGeneratorDependency"
|
||||
OasGeneratorDependency = "OasGeneratorDependency"
|
||||
EntriesInserter = "EntriesInserter"
|
||||
EntriesProvider = "EntriesProvider"
|
||||
EntriesSocketStreamer = "EntriesSocketStreamer"
|
||||
EntryStreamerSocketConnector = "EntryStreamerSocketConnector"
|
||||
|
||||
@@ -1,116 +0,0 @@
|
||||
package elastic
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/tls"
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/elastic/go-elasticsearch/v7"
|
||||
"github.com/up9inc/mizu/logger"
|
||||
"github.com/up9inc/mizu/shared"
|
||||
"github.com/up9inc/mizu/tap/api"
|
||||
)
|
||||
|
||||
type client struct {
|
||||
es *elasticsearch.Client
|
||||
index string
|
||||
insertedCount int
|
||||
}
|
||||
|
||||
var instance *client
|
||||
var once sync.Once
|
||||
|
||||
func GetInstance() *client {
|
||||
once.Do(func() {
|
||||
instance = newClient()
|
||||
})
|
||||
return instance
|
||||
}
|
||||
|
||||
func (client *client) Configure(config shared.ElasticConfig) {
|
||||
if config.Url == "" || config.User == "" || config.Password == "" {
|
||||
if client.es != nil {
|
||||
client.es = nil
|
||||
}
|
||||
logger.Log.Infof("No elastic configuration was supplied, elastic exporter disabled")
|
||||
return
|
||||
}
|
||||
transport := http.DefaultTransport
|
||||
tlsClientConfig := &tls.Config{InsecureSkipVerify: true}
|
||||
transport.(*http.Transport).TLSClientConfig = tlsClientConfig
|
||||
cfg := elasticsearch.Config{
|
||||
Addresses: []string{config.Url},
|
||||
Username: config.User,
|
||||
Password: config.Password,
|
||||
Transport: transport,
|
||||
}
|
||||
|
||||
es, err := elasticsearch.NewClient(cfg)
|
||||
if err != nil {
|
||||
logger.Log.Errorf("Failed to initialize elastic client %v", err)
|
||||
}
|
||||
|
||||
// Have the client instance return a response
|
||||
res, err := es.Info()
|
||||
if err != nil {
|
||||
logger.Log.Errorf("Elastic client.Info() ERROR: %v", err)
|
||||
} else {
|
||||
client.es = es
|
||||
client.index = "mizu_traffic_http_" + time.Now().Format("2006_01_02_15_04")
|
||||
client.insertedCount = 0
|
||||
logger.Log.Infof("Elastic client configured, index: %s, cluster info: %v", client.index, res)
|
||||
}
|
||||
defer res.Body.Close()
|
||||
}
|
||||
|
||||
func newClient() *client {
|
||||
return &client{
|
||||
es: nil,
|
||||
index: "",
|
||||
}
|
||||
}
|
||||
|
||||
type httpEntry struct {
|
||||
Source *api.TCP `json:"src"`
|
||||
Destination *api.TCP `json:"dst"`
|
||||
Outgoing bool `json:"outgoing"`
|
||||
CreatedAt time.Time `json:"createdAt"`
|
||||
Request map[string]interface{} `json:"request"`
|
||||
Response map[string]interface{} `json:"response"`
|
||||
ElapsedTime int64 `json:"elapsedTime"`
|
||||
}
|
||||
|
||||
func (client *client) PushEntry(entry *api.Entry) {
|
||||
if client.es == nil {
|
||||
return
|
||||
}
|
||||
|
||||
if entry.Protocol.Name != "http" {
|
||||
return
|
||||
}
|
||||
|
||||
entryToPush := httpEntry{
|
||||
Source: entry.Source,
|
||||
Destination: entry.Destination,
|
||||
Outgoing: entry.Outgoing,
|
||||
CreatedAt: entry.StartTime,
|
||||
Request: entry.Request,
|
||||
Response: entry.Response,
|
||||
ElapsedTime: entry.ElapsedTime,
|
||||
}
|
||||
|
||||
entryJson, err := json.Marshal(entryToPush)
|
||||
if err != nil {
|
||||
logger.Log.Errorf("json.Marshal ERROR: %v", err)
|
||||
return
|
||||
}
|
||||
var buffer bytes.Buffer
|
||||
buffer.WriteString(string(entryJson))
|
||||
res, _ := client.es.Index(client.index, &buffer)
|
||||
if res.StatusCode == 201 {
|
||||
client.insertedCount += 1
|
||||
}
|
||||
}
|
||||
@@ -22,7 +22,7 @@ type EntriesProvider interface {
|
||||
type BasenineEntriesProvider struct{}
|
||||
|
||||
func (e *BasenineEntriesProvider) GetEntries(entriesRequest *models.EntriesRequest) ([]*tapApi.EntryWrapper, *basenine.Metadata, error) {
|
||||
data, meta, err := basenine.Fetch(shared.BasenineHost, shared.BaseninePort,
|
||||
data, _, lastMeta, err := basenine.Fetch(shared.BasenineHost, shared.BaseninePort,
|
||||
entriesRequest.LeftOff, entriesRequest.Direction, entriesRequest.Query,
|
||||
entriesRequest.Limit, time.Duration(entriesRequest.TimeoutMs)*time.Millisecond)
|
||||
if err != nil {
|
||||
@@ -49,7 +49,7 @@ func (e *BasenineEntriesProvider) GetEntries(entriesRequest *models.EntriesReque
|
||||
}
|
||||
|
||||
var metadata *basenine.Metadata
|
||||
err = json.Unmarshal(meta, &metadata)
|
||||
err = json.Unmarshal(lastMeta, &metadata)
|
||||
if err != nil {
|
||||
logger.Log.Debugf("Error recieving metadata: %v", err.Error())
|
||||
}
|
||||
|
||||
@@ -124,8 +124,8 @@ func NewRequest(request map[string]interface{}) (harRequest *Request, err error)
|
||||
|
||||
postData, _ := request["postData"].(map[string]interface{})
|
||||
mimeType := postData["mimeType"]
|
||||
if mimeType == nil || len(mimeType.(string)) == 0 {
|
||||
mimeType = "text/html"
|
||||
if mimeType == nil {
|
||||
mimeType = ""
|
||||
}
|
||||
text := postData["text"]
|
||||
postDataText := ""
|
||||
@@ -177,8 +177,8 @@ func NewResponse(response map[string]interface{}) (harResponse *Response, err er
|
||||
|
||||
content, _ := response["content"].(map[string]interface{})
|
||||
mimeType := content["mimeType"]
|
||||
if mimeType == nil || len(mimeType.(string)) == 0 {
|
||||
mimeType = "text/html"
|
||||
if mimeType == nil {
|
||||
mimeType = ""
|
||||
}
|
||||
encoding := content["encoding"]
|
||||
text := content["text"]
|
||||
|
||||
@@ -104,7 +104,7 @@ func (g *defaultOasGenerator) runGenerator() {
|
||||
g.dbMutex.Lock()
|
||||
defer g.dbMutex.Unlock()
|
||||
logger.Log.Infof("Querying DB for OAS generator with query '%s'", g.entriesQuery)
|
||||
if err := g.dbConn.Query(g.entriesQuery, dataChan, metaChan); err != nil {
|
||||
if err := g.dbConn.Query("latest", g.entriesQuery, dataChan, metaChan); err != nil {
|
||||
logger.Log.Errorf("Query mode call failed: %v", err)
|
||||
}
|
||||
|
||||
|
||||
@@ -219,8 +219,6 @@ github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+
|
||||
github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
|
||||
github.com/onsi/gomega v1.7.0 h1:XPnZz8VVBHjVsy1vzJmRwIcSwiUO+JFfrv/xGiigmME=
|
||||
github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
||||
github.com/patrickmn/go-cache v2.1.0+incompatible h1:HRMgzkcYKYpi3C8ajMPV8OFXaaRUnok+kx1WdO15EQc=
|
||||
github.com/patrickmn/go-cache v2.1.0+incompatible/go.mod h1:3Qf8kWWT7OJRJbdiICTKqZju1ZixQ/KpMGzzAfe6+WQ=
|
||||
github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI=
|
||||
github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU=
|
||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||
|
||||
@@ -327,7 +327,7 @@ BasenineReconnect:
|
||||
go handleMetaChannel(&wg, connection, meta)
|
||||
wg.Add(2)
|
||||
|
||||
if err = connection.Query(query, data, meta); err != nil {
|
||||
if err = connection.Query("latest", query, data, meta); err != nil {
|
||||
logger.Log.Errorf("Query mode call failed: %v", err)
|
||||
connection.Close()
|
||||
time.Sleep(shared.BasenineReconnectInterval * time.Second)
|
||||
|
||||
@@ -164,7 +164,6 @@ func getTapMizuAgentConfig() *shared.MizuAgentConfig {
|
||||
ServiceMap: config.Config.ServiceMap,
|
||||
OAS: config.Config.OAS,
|
||||
Telemetry: config.Config.Telemetry,
|
||||
Elastic: config.Config.Elastic,
|
||||
}
|
||||
|
||||
return &mizuAgentConfig
|
||||
|
||||
@@ -41,7 +41,6 @@ type ConfigStruct struct {
|
||||
LogLevelStr string `yaml:"log-level,omitempty" default:"INFO" readonly:""`
|
||||
ServiceMap bool `yaml:"service-map" default:"true"`
|
||||
OAS bool `yaml:"oas" default:"true"`
|
||||
Elastic shared.ElasticConfig `yaml:"elastic"`
|
||||
}
|
||||
|
||||
func (config *ConfigStruct) validate() error {
|
||||
|
||||
31
devops/linux-x86_64-musl-go-libpcap/Dockerfile
Normal file
31
devops/linux-x86_64-musl-go-libpcap/Dockerfile
Normal file
@@ -0,0 +1,31 @@
|
||||
FROM messense/rust-musl-cross:x86_64-musl AS builder-from-arm64v8-to-amd64
|
||||
|
||||
ENV CROSS_TRIPLE x86_64-unknown-linux-musl
|
||||
ENV CROSS_ROOT /usr/local/musl
|
||||
|
||||
ENV AS=${CROSS_ROOT}/bin/${CROSS_TRIPLE}-as \
|
||||
AR=${CROSS_ROOT}/bin/${CROSS_TRIPLE}-ar \
|
||||
CC=${CROSS_ROOT}/bin/${CROSS_TRIPLE}-gcc \
|
||||
CPP=${CROSS_ROOT}/bin/${CROSS_TRIPLE}-cpp \
|
||||
CXX=${CROSS_ROOT}/bin/${CROSS_TRIPLE}-g++ \
|
||||
LD=${CROSS_ROOT}/bin/${CROSS_TRIPLE}-ld \
|
||||
FC=${CROSS_ROOT}/bin/${CROSS_TRIPLE}-gfortran
|
||||
|
||||
# Install Go
|
||||
WORKDIR /
|
||||
RUN curl https://go.dev/dl/go1.17.6.linux-arm64.tar.gz -Lo ./go.linux-arm64.tar.gz \
|
||||
&& curl https://go.dev/dl/go1.17.6.linux-arm64.tar.gz.asc -Lo ./go.linux-arm64.tar.gz.asc \
|
||||
&& curl https://dl.google.com/dl/linux/linux_signing_key.pub -Lo linux_signing_key.pub \
|
||||
&& gpg --import linux_signing_key.pub && gpg --verify ./go.linux-arm64.tar.gz.asc ./go.linux-arm64.tar.gz \
|
||||
&& rm -rf /usr/local/go && tar -C /usr/local -xzf go.linux-arm64.tar.gz
|
||||
ENV PATH "$PATH:/usr/local/go/bin"
|
||||
|
||||
# Compile libpcap from source
|
||||
RUN curl https://www.tcpdump.org/release/libpcap-1.10.1.tar.gz -Lo ./libpcap.tar.gz \
|
||||
&& curl https://www.tcpdump.org/release/libpcap-1.10.1.tar.gz.sig -Lo ./libpcap.tar.gz.sig \
|
||||
&& curl https://www.tcpdump.org/release/signing-key.asc -Lo ./signing-key.asc \
|
||||
&& gpg --import signing-key.asc && gpg --verify libpcap.tar.gz.sig libpcap.tar.gz \
|
||||
&& tar -xzf libpcap.tar.gz && mv ./libpcap-* ./libpcap
|
||||
WORKDIR /libpcap
|
||||
RUN ./configure --host=x86_64 && make \
|
||||
&& cp /libpcap/libpcap.a /usr/local/musl/lib/gcc/x86_64-unknown-linux-musl/*/
|
||||
4
devops/linux-x86_64-musl-go-libpcap/build-push.sh
Executable file
4
devops/linux-x86_64-musl-go-libpcap/build-push.sh
Executable file
@@ -0,0 +1,4 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
docker build . -t up9inc/linux-x86_64-musl-go-libpcap && docker push up9inc/linux-x86_64-musl-go-libpcap
|
||||
@@ -10,7 +10,6 @@ const (
|
||||
ValidationRulesFileName = "validation-rules.yaml"
|
||||
ContractFileName = "contract-oas.yaml"
|
||||
ConfigFileName = "mizu-config.json"
|
||||
GoGCEnvVar = "GOGC"
|
||||
DefaultApiServerPort = 8899
|
||||
LogLevelEnvVar = "LOG_LEVEL"
|
||||
MizuAgentImageRepo = "docker.io/up9inc/mizu"
|
||||
|
||||
@@ -768,7 +768,6 @@ func (provider *Provider) ApplyMizuTapperDaemonSet(ctx context.Context, namespac
|
||||
agentContainer.WithEnv(
|
||||
applyconfcore.EnvVar().WithName(shared.LogLevelEnvVar).WithValue(logLevel.String()),
|
||||
applyconfcore.EnvVar().WithName(shared.HostModeEnvVar).WithValue("1"),
|
||||
applyconfcore.EnvVar().WithName(shared.GoGCEnvVar).WithValue("12800"),
|
||||
applyconfcore.EnvVar().WithName(shared.MizuFilteringOptionsEnvVar).WithValue(string(mizuApiFilteringOptionsJsonStr)),
|
||||
)
|
||||
agentContainer.WithEnv(
|
||||
|
||||
@@ -45,13 +45,6 @@ type MizuAgentConfig struct {
|
||||
ServiceMap bool `json:"serviceMap"`
|
||||
OAS bool `json:"oas"`
|
||||
Telemetry bool `json:"telemetry"`
|
||||
Elastic ElasticConfig `json:"elastic"`
|
||||
}
|
||||
|
||||
type ElasticConfig struct {
|
||||
User string `yaml:"user,omitempty" default:"" readonly:""`
|
||||
Password string `yaml:"password,omitempty" default:"" readonly:""`
|
||||
Url string `yaml:"url,omitempty" default:"" readonly:""`
|
||||
}
|
||||
|
||||
type WebSocketMessageMetadata struct {
|
||||
|
||||
@@ -104,11 +104,7 @@ type OutputChannelItem struct {
|
||||
Namespace string
|
||||
}
|
||||
|
||||
type SuperTimer struct {
|
||||
CaptureTime time.Time
|
||||
}
|
||||
|
||||
type SuperIdentifier struct {
|
||||
type ProtoIdentifier struct {
|
||||
Protocol *Protocol
|
||||
IsClosedOthers bool
|
||||
}
|
||||
@@ -130,7 +126,7 @@ func (p *ReadProgress) Current() (n int) {
|
||||
type Dissector interface {
|
||||
Register(*Extension)
|
||||
Ping()
|
||||
Dissect(b *bufio.Reader, progress *ReadProgress, capture Capture, isClient bool, tcpID *TcpID, counterPair *CounterPair, superTimer *SuperTimer, superIdentifier *SuperIdentifier, emitter Emitter, options *TrafficFilteringOptions, reqResMatcher RequestResponseMatcher) error
|
||||
Dissect(b *bufio.Reader, reader TcpReader, options *TrafficFilteringOptions) error
|
||||
Analyze(item *OutputChannelItem, resolvedSource string, resolvedDestination string, namespace string) *Entry
|
||||
Summarize(entry *Entry) *BaseEntry
|
||||
Represent(request map[string]interface{}, response map[string]interface{}) (object []byte, err error)
|
||||
@@ -406,3 +402,39 @@ func (r *HTTPResponseWrapper) MarshalJSON() ([]byte, error) {
|
||||
Response: r.Response,
|
||||
})
|
||||
}
|
||||
|
||||
type TcpReaderDataMsg interface {
|
||||
GetBytes() []byte
|
||||
GetTimestamp() time.Time
|
||||
}
|
||||
|
||||
type TcpReader interface {
|
||||
Read(p []byte) (int, error)
|
||||
GetReqResMatcher() RequestResponseMatcher
|
||||
GetIsClient() bool
|
||||
GetReadProgress() *ReadProgress
|
||||
GetParent() TcpStream
|
||||
GetTcpID() *TcpID
|
||||
GetCounterPair() *CounterPair
|
||||
GetCaptureTime() time.Time
|
||||
GetEmitter() Emitter
|
||||
GetIsClosed() bool
|
||||
GetExtension() *Extension
|
||||
}
|
||||
|
||||
type TcpStream interface {
|
||||
SetProtocol(protocol *Protocol)
|
||||
GetOrigin() Capture
|
||||
GetProtoIdentifier() *ProtoIdentifier
|
||||
GetReqResMatchers() []RequestResponseMatcher
|
||||
GetIsTapTarget() bool
|
||||
GetIsClosed() bool
|
||||
}
|
||||
|
||||
type TcpStreamMap interface {
|
||||
Range(f func(key, value interface{}) bool)
|
||||
Store(key, value interface{})
|
||||
Delete(key interface{})
|
||||
NextId() int64
|
||||
CloseTimedoutTcpStreamChannels()
|
||||
}
|
||||
|
||||
@@ -22,7 +22,7 @@ type Cleaner struct {
|
||||
connectionTimeout time.Duration
|
||||
stats CleanerStats
|
||||
statsMutex sync.Mutex
|
||||
streamsMap *tcpStreamMap
|
||||
streamsMap api.TcpStreamMap
|
||||
}
|
||||
|
||||
func (cl *Cleaner) clean() {
|
||||
@@ -33,13 +33,15 @@ func (cl *Cleaner) clean() {
|
||||
flushed, closed := cl.assembler.FlushCloseOlderThan(startCleanTime.Add(-cl.connectionTimeout))
|
||||
cl.assemblerMutex.Unlock()
|
||||
|
||||
cl.streamsMap.streams.Range(func(k, v interface{}) bool {
|
||||
reqResMatcher := v.(*tcpStreamWrapper).reqResMatcher
|
||||
if reqResMatcher == nil {
|
||||
return true
|
||||
cl.streamsMap.Range(func(k, v interface{}) bool {
|
||||
reqResMatchers := v.(api.TcpStream).GetReqResMatchers()
|
||||
for _, reqResMatcher := range reqResMatchers {
|
||||
if reqResMatcher == nil {
|
||||
continue
|
||||
}
|
||||
deleted := deleteOlderThan(reqResMatcher.GetMap(), startCleanTime.Add(-cl.connectionTimeout))
|
||||
cl.stats.deleted += deleted
|
||||
}
|
||||
deleted := deleteOlderThan(reqResMatcher.GetMap(), startCleanTime.Add(-cl.connectionTimeout))
|
||||
cl.stats.deleted += deleted
|
||||
return true
|
||||
})
|
||||
|
||||
|
||||
@@ -39,17 +39,17 @@ func (d dissecting) Ping() {
|
||||
|
||||
const amqpRequest string = "amqp_request"
|
||||
|
||||
func (d dissecting) Dissect(b *bufio.Reader, progress *api.ReadProgress, capture api.Capture, isClient bool, tcpID *api.TcpID, counterPair *api.CounterPair, superTimer *api.SuperTimer, superIdentifier *api.SuperIdentifier, emitter api.Emitter, options *api.TrafficFilteringOptions, _reqResMatcher api.RequestResponseMatcher) error {
|
||||
func (d dissecting) Dissect(b *bufio.Reader, reader api.TcpReader, options *api.TrafficFilteringOptions) error {
|
||||
r := AmqpReader{b}
|
||||
|
||||
var remaining int
|
||||
var header *HeaderFrame
|
||||
|
||||
connectionInfo := &api.ConnectionInfo{
|
||||
ClientIP: tcpID.SrcIP,
|
||||
ClientPort: tcpID.SrcPort,
|
||||
ServerIP: tcpID.DstIP,
|
||||
ServerPort: tcpID.DstPort,
|
||||
ClientIP: reader.GetTcpID().SrcIP,
|
||||
ClientPort: reader.GetTcpID().SrcPort,
|
||||
ServerIP: reader.GetTcpID().DstIP,
|
||||
ServerPort: reader.GetTcpID().DstPort,
|
||||
IsOutgoing: true,
|
||||
}
|
||||
|
||||
@@ -75,7 +75,7 @@ func (d dissecting) Dissect(b *bufio.Reader, progress *api.ReadProgress, capture
|
||||
var lastMethodFrameMessage Message
|
||||
|
||||
for {
|
||||
if superIdentifier.Protocol != nil && superIdentifier.Protocol != &protocol {
|
||||
if reader.GetParent().GetProtoIdentifier().Protocol != nil && reader.GetParent().GetProtoIdentifier().Protocol != &protocol {
|
||||
return errors.New("Identified by another protocol")
|
||||
}
|
||||
|
||||
@@ -112,12 +112,12 @@ func (d dissecting) Dissect(b *bufio.Reader, progress *api.ReadProgress, capture
|
||||
switch lastMethodFrameMessage.(type) {
|
||||
case *BasicPublish:
|
||||
eventBasicPublish.Body = f.Body
|
||||
superIdentifier.Protocol = &protocol
|
||||
emitAMQP(*eventBasicPublish, amqpRequest, basicMethodMap[40], connectionInfo, superTimer.CaptureTime, progress.Current(), emitter, capture)
|
||||
reader.GetParent().SetProtocol(&protocol)
|
||||
emitAMQP(*eventBasicPublish, amqpRequest, basicMethodMap[40], connectionInfo, reader.GetCaptureTime(), reader.GetReadProgress().Current(), reader.GetEmitter(), reader.GetParent().GetOrigin())
|
||||
case *BasicDeliver:
|
||||
eventBasicDeliver.Body = f.Body
|
||||
superIdentifier.Protocol = &protocol
|
||||
emitAMQP(*eventBasicDeliver, amqpRequest, basicMethodMap[60], connectionInfo, superTimer.CaptureTime, progress.Current(), emitter, capture)
|
||||
reader.GetParent().SetProtocol(&protocol)
|
||||
emitAMQP(*eventBasicDeliver, amqpRequest, basicMethodMap[60], connectionInfo, reader.GetCaptureTime(), reader.GetReadProgress().Current(), reader.GetEmitter(), reader.GetParent().GetOrigin())
|
||||
}
|
||||
|
||||
case *MethodFrame:
|
||||
@@ -137,8 +137,8 @@ func (d dissecting) Dissect(b *bufio.Reader, progress *api.ReadProgress, capture
|
||||
NoWait: m.NoWait,
|
||||
Arguments: m.Arguments,
|
||||
}
|
||||
superIdentifier.Protocol = &protocol
|
||||
emitAMQP(*eventQueueBind, amqpRequest, queueMethodMap[20], connectionInfo, superTimer.CaptureTime, progress.Current(), emitter, capture)
|
||||
reader.GetParent().SetProtocol(&protocol)
|
||||
emitAMQP(*eventQueueBind, amqpRequest, queueMethodMap[20], connectionInfo, reader.GetCaptureTime(), reader.GetReadProgress().Current(), reader.GetEmitter(), reader.GetParent().GetOrigin())
|
||||
|
||||
case *BasicConsume:
|
||||
eventBasicConsume := &BasicConsume{
|
||||
@@ -150,8 +150,8 @@ func (d dissecting) Dissect(b *bufio.Reader, progress *api.ReadProgress, capture
|
||||
NoWait: m.NoWait,
|
||||
Arguments: m.Arguments,
|
||||
}
|
||||
superIdentifier.Protocol = &protocol
|
||||
emitAMQP(*eventBasicConsume, amqpRequest, basicMethodMap[20], connectionInfo, superTimer.CaptureTime, progress.Current(), emitter, capture)
|
||||
reader.GetParent().SetProtocol(&protocol)
|
||||
emitAMQP(*eventBasicConsume, amqpRequest, basicMethodMap[20], connectionInfo, reader.GetCaptureTime(), reader.GetReadProgress().Current(), reader.GetEmitter(), reader.GetParent().GetOrigin())
|
||||
|
||||
case *BasicDeliver:
|
||||
eventBasicDeliver.ConsumerTag = m.ConsumerTag
|
||||
@@ -170,8 +170,8 @@ func (d dissecting) Dissect(b *bufio.Reader, progress *api.ReadProgress, capture
|
||||
NoWait: m.NoWait,
|
||||
Arguments: m.Arguments,
|
||||
}
|
||||
superIdentifier.Protocol = &protocol
|
||||
emitAMQP(*eventQueueDeclare, amqpRequest, queueMethodMap[10], connectionInfo, superTimer.CaptureTime, progress.Current(), emitter, capture)
|
||||
reader.GetParent().SetProtocol(&protocol)
|
||||
emitAMQP(*eventQueueDeclare, amqpRequest, queueMethodMap[10], connectionInfo, reader.GetCaptureTime(), reader.GetReadProgress().Current(), reader.GetEmitter(), reader.GetParent().GetOrigin())
|
||||
|
||||
case *ExchangeDeclare:
|
||||
eventExchangeDeclare := &ExchangeDeclare{
|
||||
@@ -184,8 +184,8 @@ func (d dissecting) Dissect(b *bufio.Reader, progress *api.ReadProgress, capture
|
||||
NoWait: m.NoWait,
|
||||
Arguments: m.Arguments,
|
||||
}
|
||||
superIdentifier.Protocol = &protocol
|
||||
emitAMQP(*eventExchangeDeclare, amqpRequest, exchangeMethodMap[10], connectionInfo, superTimer.CaptureTime, progress.Current(), emitter, capture)
|
||||
reader.GetParent().SetProtocol(&protocol)
|
||||
emitAMQP(*eventExchangeDeclare, amqpRequest, exchangeMethodMap[10], connectionInfo, reader.GetCaptureTime(), reader.GetReadProgress().Current(), reader.GetEmitter(), reader.GetParent().GetOrigin())
|
||||
|
||||
case *ConnectionStart:
|
||||
eventConnectionStart := &ConnectionStart{
|
||||
@@ -195,8 +195,8 @@ func (d dissecting) Dissect(b *bufio.Reader, progress *api.ReadProgress, capture
|
||||
Mechanisms: m.Mechanisms,
|
||||
Locales: m.Locales,
|
||||
}
|
||||
superIdentifier.Protocol = &protocol
|
||||
emitAMQP(*eventConnectionStart, amqpRequest, connectionMethodMap[10], connectionInfo, superTimer.CaptureTime, progress.Current(), emitter, capture)
|
||||
reader.GetParent().SetProtocol(&protocol)
|
||||
emitAMQP(*eventConnectionStart, amqpRequest, connectionMethodMap[10], connectionInfo, reader.GetCaptureTime(), reader.GetReadProgress().Current(), reader.GetEmitter(), reader.GetParent().GetOrigin())
|
||||
|
||||
case *ConnectionClose:
|
||||
eventConnectionClose := &ConnectionClose{
|
||||
@@ -205,8 +205,8 @@ func (d dissecting) Dissect(b *bufio.Reader, progress *api.ReadProgress, capture
|
||||
ClassId: m.ClassId,
|
||||
MethodId: m.MethodId,
|
||||
}
|
||||
superIdentifier.Protocol = &protocol
|
||||
emitAMQP(*eventConnectionClose, amqpRequest, connectionMethodMap[50], connectionInfo, superTimer.CaptureTime, progress.Current(), emitter, capture)
|
||||
reader.GetParent().SetProtocol(&protocol)
|
||||
emitAMQP(*eventConnectionClose, amqpRequest, connectionMethodMap[50], connectionInfo, reader.GetCaptureTime(), reader.GetReadProgress().Current(), reader.GetEmitter(), reader.GetParent().GetOrigin())
|
||||
}
|
||||
|
||||
default:
|
||||
|
||||
@@ -106,7 +106,6 @@ func TestDissect(t *testing.T) {
|
||||
Request: 0,
|
||||
Response: 0,
|
||||
}
|
||||
superIdentifier := &api.SuperIdentifier{}
|
||||
|
||||
// Request
|
||||
pathClient := _path
|
||||
@@ -122,7 +121,21 @@ func TestDissect(t *testing.T) {
|
||||
DstPort: "2",
|
||||
}
|
||||
reqResMatcher := dissector.NewResponseRequestMatcher()
|
||||
err = dissector.Dissect(bufferClient, &api.ReadProgress{}, api.Pcap, true, tcpIDClient, counterPair, &api.SuperTimer{}, superIdentifier, emitter, options, reqResMatcher)
|
||||
stream := NewTcpStream(api.Pcap)
|
||||
reader := NewTcpReader(
|
||||
&api.ReadProgress{},
|
||||
"",
|
||||
tcpIDClient,
|
||||
time.Time{},
|
||||
stream,
|
||||
true,
|
||||
false,
|
||||
nil,
|
||||
emitter,
|
||||
counterPair,
|
||||
reqResMatcher,
|
||||
)
|
||||
err = dissector.Dissect(bufferClient, reader, options)
|
||||
if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF {
|
||||
panic(err)
|
||||
}
|
||||
@@ -140,7 +153,20 @@ func TestDissect(t *testing.T) {
|
||||
SrcPort: "2",
|
||||
DstPort: "1",
|
||||
}
|
||||
err = dissector.Dissect(bufferServer, &api.ReadProgress{}, api.Pcap, false, tcpIDServer, counterPair, &api.SuperTimer{}, superIdentifier, emitter, options, reqResMatcher)
|
||||
reader = NewTcpReader(
|
||||
&api.ReadProgress{},
|
||||
"",
|
||||
tcpIDServer,
|
||||
time.Time{},
|
||||
stream,
|
||||
false,
|
||||
false,
|
||||
nil,
|
||||
emitter,
|
||||
counterPair,
|
||||
reqResMatcher,
|
||||
)
|
||||
err = dissector.Dissect(bufferServer, reader, options)
|
||||
if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
84
tap/extensions/amqp/tcp_reader_mock_test.go
Normal file
84
tap/extensions/amqp/tcp_reader_mock_test.go
Normal file
@@ -0,0 +1,84 @@
|
||||
package amqp
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/up9inc/mizu/tap/api"
|
||||
)
|
||||
|
||||
type tcpReader struct {
|
||||
ident string
|
||||
tcpID *api.TcpID
|
||||
isClosed bool
|
||||
isClient bool
|
||||
isOutgoing bool
|
||||
progress *api.ReadProgress
|
||||
captureTime time.Time
|
||||
parent api.TcpStream
|
||||
extension *api.Extension
|
||||
emitter api.Emitter
|
||||
counterPair *api.CounterPair
|
||||
reqResMatcher api.RequestResponseMatcher
|
||||
sync.Mutex
|
||||
}
|
||||
|
||||
func NewTcpReader(progress *api.ReadProgress, ident string, tcpId *api.TcpID, captureTime time.Time, parent api.TcpStream, isClient bool, isOutgoing bool, extension *api.Extension, emitter api.Emitter, counterPair *api.CounterPair, reqResMatcher api.RequestResponseMatcher) api.TcpReader {
|
||||
return &tcpReader{
|
||||
progress: progress,
|
||||
ident: ident,
|
||||
tcpID: tcpId,
|
||||
captureTime: captureTime,
|
||||
parent: parent,
|
||||
isClient: isClient,
|
||||
isOutgoing: isOutgoing,
|
||||
extension: extension,
|
||||
emitter: emitter,
|
||||
counterPair: counterPair,
|
||||
reqResMatcher: reqResMatcher,
|
||||
}
|
||||
}
|
||||
|
||||
func (reader *tcpReader) Read(p []byte) (int, error) {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
func (reader *tcpReader) GetReqResMatcher() api.RequestResponseMatcher {
|
||||
return reader.reqResMatcher
|
||||
}
|
||||
|
||||
func (reader *tcpReader) GetIsClient() bool {
|
||||
return reader.isClient
|
||||
}
|
||||
|
||||
func (reader *tcpReader) GetReadProgress() *api.ReadProgress {
|
||||
return reader.progress
|
||||
}
|
||||
|
||||
func (reader *tcpReader) GetParent() api.TcpStream {
|
||||
return reader.parent
|
||||
}
|
||||
|
||||
func (reader *tcpReader) GetTcpID() *api.TcpID {
|
||||
return reader.tcpID
|
||||
}
|
||||
|
||||
func (reader *tcpReader) GetCounterPair() *api.CounterPair {
|
||||
return reader.counterPair
|
||||
}
|
||||
|
||||
func (reader *tcpReader) GetCaptureTime() time.Time {
|
||||
return reader.captureTime
|
||||
}
|
||||
|
||||
func (reader *tcpReader) GetEmitter() api.Emitter {
|
||||
return reader.emitter
|
||||
}
|
||||
|
||||
func (reader *tcpReader) GetIsClosed() bool {
|
||||
return reader.isClosed
|
||||
}
|
||||
|
||||
func (reader *tcpReader) GetExtension() *api.Extension {
|
||||
return reader.extension
|
||||
}
|
||||
45
tap/extensions/amqp/tcp_stream_mock_test.go
Normal file
45
tap/extensions/amqp/tcp_stream_mock_test.go
Normal file
@@ -0,0 +1,45 @@
|
||||
package amqp
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/up9inc/mizu/tap/api"
|
||||
)
|
||||
|
||||
type tcpStream struct {
|
||||
isClosed bool
|
||||
protoIdentifier *api.ProtoIdentifier
|
||||
isTapTarget bool
|
||||
origin api.Capture
|
||||
reqResMatchers []api.RequestResponseMatcher
|
||||
sync.Mutex
|
||||
}
|
||||
|
||||
func NewTcpStream(capture api.Capture) api.TcpStream {
|
||||
return &tcpStream{
|
||||
origin: capture,
|
||||
protoIdentifier: &api.ProtoIdentifier{},
|
||||
}
|
||||
}
|
||||
|
||||
func (t *tcpStream) SetProtocol(protocol *api.Protocol) {}
|
||||
|
||||
func (t *tcpStream) GetOrigin() api.Capture {
|
||||
return t.origin
|
||||
}
|
||||
|
||||
func (t *tcpStream) GetProtoIdentifier() *api.ProtoIdentifier {
|
||||
return t.protoIdentifier
|
||||
}
|
||||
|
||||
func (t *tcpStream) GetReqResMatchers() []api.RequestResponseMatcher {
|
||||
return t.reqResMatchers
|
||||
}
|
||||
|
||||
func (t *tcpStream) GetIsTapTarget() bool {
|
||||
return t.isTapTarget
|
||||
}
|
||||
|
||||
func (t *tcpStream) GetIsClosed() bool {
|
||||
return t.isClosed
|
||||
}
|
||||
@@ -13,4 +13,4 @@ test-pull-bin:
|
||||
|
||||
test-pull-expect:
|
||||
@mkdir -p expect
|
||||
@[ "${skipexpect}" ] && echo "Skipping downloading expected JSONs" || gsutil -o 'GSUtil:parallel_process_count=5' -o 'GSUtil:parallel_thread_count=5' -m cp -r gs://static.up9.io/mizu/test-pcap/expect8/http/\* expect
|
||||
@[ "${skipexpect}" ] && echo "Skipping downloading expected JSONs" || gsutil -o 'GSUtil:parallel_process_count=5' -o 'GSUtil:parallel_thread_count=5' -m cp -r gs://static.up9.io/mizu/test-pcap/expect9/http/\* expect
|
||||
|
||||
@@ -4,6 +4,7 @@ go 1.17
|
||||
|
||||
require (
|
||||
github.com/beevik/etree v1.1.0
|
||||
github.com/mertyildiran/gqlparser/v2 v2.4.6
|
||||
github.com/stretchr/testify v1.7.0
|
||||
github.com/up9inc/mizu/tap/api v0.0.0
|
||||
golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
github.com/agnivade/levenshtein v1.0.1/go.mod h1:CURSv5d9Uaml+FovSIICkLbAUZ9S4RqaHDIsdSBg7lM=
|
||||
github.com/beevik/etree v1.1.0 h1:T0xke/WvNtMoCqgzPhkX2r4rjY3GDZFi+FjpRZY2Jbs=
|
||||
github.com/beevik/etree v1.1.0/go.mod h1:r8Aw8JqVegEf0w2fDnATrX9VpkMcyFeM0FhwO62wh+A=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
@@ -5,6 +6,13 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no=
|
||||
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
|
||||
github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
|
||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/mertyildiran/gqlparser/v2 v2.4.6 h1:enAq4F5PYgW/rYExPNzYt7IYrrZnzrfqdywMA1QdFtM=
|
||||
github.com/mertyildiran/gqlparser/v2 v2.4.6/go.mod h1:XZId58F+XqRSmoLrdsOLgqA918oNvBzuOORruJWBjDo=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
@@ -18,8 +26,10 @@ golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuX
|
||||
golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk=
|
||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo=
|
||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo=
|
||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
|
||||
31
tap/extensions/http/graphql.go
Normal file
31
tap/extensions/http/graphql.go
Normal file
@@ -0,0 +1,31 @@
|
||||
package http
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
|
||||
"github.com/mertyildiran/gqlparser/v2/ast"
|
||||
"github.com/mertyildiran/gqlparser/v2/parser"
|
||||
)
|
||||
|
||||
func isGraphQL(request map[string]interface{}) bool {
|
||||
if postData, ok := request["postData"].(map[string]interface{}); ok {
|
||||
if postData["mimeType"] == "application/json" {
|
||||
if text, ok := postData["text"].(string); ok {
|
||||
var data map[string]interface{}
|
||||
if err := json.Unmarshal([]byte(text), &data); err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
if query, ok := data["query"].(string); ok {
|
||||
|
||||
_, err := parser.ParseQuery(&ast.Source{Name: "ff", Input: query})
|
||||
if err == nil {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/up9inc/mizu/tap/api"
|
||||
)
|
||||
@@ -47,7 +48,7 @@ func replaceForwardedFor(item *api.OutputChannelItem) {
|
||||
item.ConnectionInfo.ClientPort = ""
|
||||
}
|
||||
|
||||
func handleHTTP2Stream(http2Assembler *Http2Assembler, progress *api.ReadProgress, capture api.Capture, tcpID *api.TcpID, superTimer *api.SuperTimer, emitter api.Emitter, options *api.TrafficFilteringOptions, reqResMatcher *requestResponseMatcher) error {
|
||||
func handleHTTP2Stream(http2Assembler *Http2Assembler, progress *api.ReadProgress, capture api.Capture, tcpID *api.TcpID, captureTime time.Time, emitter api.Emitter, options *api.TrafficFilteringOptions, reqResMatcher *requestResponseMatcher) error {
|
||||
streamID, messageHTTP1, isGrpc, err := http2Assembler.readMessage()
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -66,7 +67,7 @@ func handleHTTP2Stream(http2Assembler *Http2Assembler, progress *api.ReadProgres
|
||||
streamID,
|
||||
"HTTP2",
|
||||
)
|
||||
item = reqResMatcher.registerRequest(ident, &messageHTTP1, superTimer.CaptureTime, progress.Current(), messageHTTP1.ProtoMinor)
|
||||
item = reqResMatcher.registerRequest(ident, &messageHTTP1, captureTime, progress.Current(), messageHTTP1.ProtoMinor)
|
||||
if item != nil {
|
||||
item.ConnectionInfo = &api.ConnectionInfo{
|
||||
ClientIP: tcpID.SrcIP,
|
||||
@@ -86,7 +87,7 @@ func handleHTTP2Stream(http2Assembler *Http2Assembler, progress *api.ReadProgres
|
||||
streamID,
|
||||
"HTTP2",
|
||||
)
|
||||
item = reqResMatcher.registerResponse(ident, &messageHTTP1, superTimer.CaptureTime, progress.Current(), messageHTTP1.ProtoMinor)
|
||||
item = reqResMatcher.registerResponse(ident, &messageHTTP1, captureTime, progress.Current(), messageHTTP1.ProtoMinor)
|
||||
if item != nil {
|
||||
item.ConnectionInfo = &api.ConnectionInfo{
|
||||
ClientIP: tcpID.DstIP,
|
||||
@@ -111,7 +112,7 @@ func handleHTTP2Stream(http2Assembler *Http2Assembler, progress *api.ReadProgres
|
||||
return nil
|
||||
}
|
||||
|
||||
func handleHTTP1ClientStream(b *bufio.Reader, progress *api.ReadProgress, capture api.Capture, tcpID *api.TcpID, counterPair *api.CounterPair, superTimer *api.SuperTimer, emitter api.Emitter, options *api.TrafficFilteringOptions, reqResMatcher *requestResponseMatcher) (switchingProtocolsHTTP2 bool, req *http.Request, err error) {
|
||||
func handleHTTP1ClientStream(b *bufio.Reader, progress *api.ReadProgress, capture api.Capture, tcpID *api.TcpID, counterPair *api.CounterPair, captureTime time.Time, emitter api.Emitter, options *api.TrafficFilteringOptions, reqResMatcher *requestResponseMatcher) (switchingProtocolsHTTP2 bool, req *http.Request, err error) {
|
||||
req, err = http.ReadRequest(b)
|
||||
if err != nil {
|
||||
return
|
||||
@@ -139,7 +140,7 @@ func handleHTTP1ClientStream(b *bufio.Reader, progress *api.ReadProgress, captur
|
||||
requestCounter,
|
||||
"HTTP1",
|
||||
)
|
||||
item := reqResMatcher.registerRequest(ident, req, superTimer.CaptureTime, progress.Current(), req.ProtoMinor)
|
||||
item := reqResMatcher.registerRequest(ident, req, captureTime, progress.Current(), req.ProtoMinor)
|
||||
if item != nil {
|
||||
item.ConnectionInfo = &api.ConnectionInfo{
|
||||
ClientIP: tcpID.SrcIP,
|
||||
@@ -154,7 +155,7 @@ func handleHTTP1ClientStream(b *bufio.Reader, progress *api.ReadProgress, captur
|
||||
return
|
||||
}
|
||||
|
||||
func handleHTTP1ServerStream(b *bufio.Reader, progress *api.ReadProgress, capture api.Capture, tcpID *api.TcpID, counterPair *api.CounterPair, superTimer *api.SuperTimer, emitter api.Emitter, options *api.TrafficFilteringOptions, reqResMatcher *requestResponseMatcher) (switchingProtocolsHTTP2 bool, err error) {
|
||||
func handleHTTP1ServerStream(b *bufio.Reader, progress *api.ReadProgress, capture api.Capture, tcpID *api.TcpID, counterPair *api.CounterPair, captureTime time.Time, emitter api.Emitter, options *api.TrafficFilteringOptions, reqResMatcher *requestResponseMatcher) (switchingProtocolsHTTP2 bool, err error) {
|
||||
var res *http.Response
|
||||
res, err = http.ReadResponse(b, nil)
|
||||
if err != nil {
|
||||
@@ -183,7 +184,7 @@ func handleHTTP1ServerStream(b *bufio.Reader, progress *api.ReadProgress, captur
|
||||
responseCounter,
|
||||
"HTTP1",
|
||||
)
|
||||
item := reqResMatcher.registerResponse(ident, res, superTimer.CaptureTime, progress.Current(), res.ProtoMinor)
|
||||
item := reqResMatcher.registerResponse(ident, res, captureTime, progress.Current(), res.ProtoMinor)
|
||||
if item != nil {
|
||||
item.ConnectionInfo = &api.ConnectionInfo{
|
||||
ClientIP: tcpID.DstIP,
|
||||
|
||||
@@ -66,7 +66,35 @@ var grpcProtocol api.Protocol = api.Protocol{
|
||||
BackgroundColor: "#244c5a",
|
||||
ForegroundColor: "#ffffff",
|
||||
FontSize: 11,
|
||||
ReferenceLink: "https://grpc.github.io/grpc/core/md_doc_statuscodes.html",
|
||||
ReferenceLink: "https://grpc.github.io/grpc/core/md_doc__p_r_o_t_o_c_o_l-_h_t_t_p2.html",
|
||||
Ports: []string{"80", "443", "8080", "50051"},
|
||||
Priority: 0,
|
||||
}
|
||||
|
||||
var graphQL1Protocol api.Protocol = api.Protocol{
|
||||
Name: "http",
|
||||
LongName: "Hypertext Transfer Protocol -- HTTP/1.1 [ GraphQL over HTTP/1.1 ]",
|
||||
Abbreviation: "GQL",
|
||||
Macro: "gql",
|
||||
Version: "1.1",
|
||||
BackgroundColor: "#e10098",
|
||||
ForegroundColor: "#ffffff",
|
||||
FontSize: 12,
|
||||
ReferenceLink: "https://graphql.org/learn/serving-over-http/",
|
||||
Ports: []string{"80", "443", "8080"},
|
||||
Priority: 0,
|
||||
}
|
||||
|
||||
var graphQL2Protocol api.Protocol = api.Protocol{
|
||||
Name: "http",
|
||||
LongName: "Hypertext Transfer Protocol Version 2 (HTTP/2) [ GraphQL over HTTP/2 ]",
|
||||
Abbreviation: "GQL",
|
||||
Macro: "gql",
|
||||
Version: "2.0",
|
||||
BackgroundColor: "#e10098",
|
||||
ForegroundColor: "#ffffff",
|
||||
FontSize: 12,
|
||||
ReferenceLink: "https://graphql.org/learn/serving-over-http/",
|
||||
Ports: []string{"80", "443", "8080", "50051"},
|
||||
Priority: 0,
|
||||
}
|
||||
@@ -86,15 +114,15 @@ func (d dissecting) Ping() {
|
||||
log.Printf("pong %s", http11protocol.Name)
|
||||
}
|
||||
|
||||
func (d dissecting) Dissect(b *bufio.Reader, progress *api.ReadProgress, capture api.Capture, isClient bool, tcpID *api.TcpID, counterPair *api.CounterPair, superTimer *api.SuperTimer, superIdentifier *api.SuperIdentifier, emitter api.Emitter, options *api.TrafficFilteringOptions, _reqResMatcher api.RequestResponseMatcher) error {
|
||||
reqResMatcher := _reqResMatcher.(*requestResponseMatcher)
|
||||
func (d dissecting) Dissect(b *bufio.Reader, reader api.TcpReader, options *api.TrafficFilteringOptions) error {
|
||||
reqResMatcher := reader.GetReqResMatcher().(*requestResponseMatcher)
|
||||
|
||||
var err error
|
||||
isHTTP2, _ := checkIsHTTP2Connection(b, isClient)
|
||||
isHTTP2, _ := checkIsHTTP2Connection(b, reader.GetIsClient())
|
||||
|
||||
var http2Assembler *Http2Assembler
|
||||
if isHTTP2 {
|
||||
err = prepareHTTP2Connection(b, isClient)
|
||||
err = prepareHTTP2Connection(b, reader.GetIsClient())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -105,74 +133,74 @@ func (d dissecting) Dissect(b *bufio.Reader, progress *api.ReadProgress, capture
|
||||
for {
|
||||
if switchingProtocolsHTTP2 {
|
||||
switchingProtocolsHTTP2 = false
|
||||
isHTTP2, err = checkIsHTTP2Connection(b, isClient)
|
||||
isHTTP2, err = checkIsHTTP2Connection(b, reader.GetIsClient())
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
err = prepareHTTP2Connection(b, isClient)
|
||||
err = prepareHTTP2Connection(b, reader.GetIsClient())
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
http2Assembler = createHTTP2Assembler(b)
|
||||
}
|
||||
|
||||
if superIdentifier.Protocol != nil && superIdentifier.Protocol != &http11protocol {
|
||||
if reader.GetParent().GetProtoIdentifier().Protocol != nil && reader.GetParent().GetProtoIdentifier().Protocol != &http11protocol {
|
||||
return errors.New("Identified by another protocol")
|
||||
}
|
||||
|
||||
if isHTTP2 {
|
||||
err = handleHTTP2Stream(http2Assembler, progress, capture, tcpID, superTimer, emitter, options, reqResMatcher)
|
||||
err = handleHTTP2Stream(http2Assembler, reader.GetReadProgress(), reader.GetParent().GetOrigin(), reader.GetTcpID(), reader.GetCaptureTime(), reader.GetEmitter(), options, reqResMatcher)
|
||||
if err == io.EOF || err == io.ErrUnexpectedEOF {
|
||||
break
|
||||
} else if err != nil {
|
||||
continue
|
||||
}
|
||||
superIdentifier.Protocol = &http11protocol
|
||||
} else if isClient {
|
||||
reader.GetParent().SetProtocol(&http11protocol)
|
||||
} else if reader.GetIsClient() {
|
||||
var req *http.Request
|
||||
switchingProtocolsHTTP2, req, err = handleHTTP1ClientStream(b, progress, capture, tcpID, counterPair, superTimer, emitter, options, reqResMatcher)
|
||||
switchingProtocolsHTTP2, req, err = handleHTTP1ClientStream(b, reader.GetReadProgress(), reader.GetParent().GetOrigin(), reader.GetTcpID(), reader.GetCounterPair(), reader.GetCaptureTime(), reader.GetEmitter(), options, reqResMatcher)
|
||||
if err == io.EOF || err == io.ErrUnexpectedEOF {
|
||||
break
|
||||
} else if err != nil {
|
||||
continue
|
||||
}
|
||||
superIdentifier.Protocol = &http11protocol
|
||||
reader.GetParent().SetProtocol(&http11protocol)
|
||||
|
||||
// In case of an HTTP2 upgrade, duplicate the HTTP1 request into HTTP2 with stream ID 1
|
||||
if switchingProtocolsHTTP2 {
|
||||
ident := fmt.Sprintf(
|
||||
"%s_%s_%s_%s_1_%s",
|
||||
tcpID.SrcIP,
|
||||
tcpID.DstIP,
|
||||
tcpID.SrcPort,
|
||||
tcpID.DstPort,
|
||||
reader.GetTcpID().SrcIP,
|
||||
reader.GetTcpID().DstIP,
|
||||
reader.GetTcpID().SrcPort,
|
||||
reader.GetTcpID().DstPort,
|
||||
"HTTP2",
|
||||
)
|
||||
item := reqResMatcher.registerRequest(ident, req, superTimer.CaptureTime, progress.Current(), req.ProtoMinor)
|
||||
item := reqResMatcher.registerRequest(ident, req, reader.GetCaptureTime(), reader.GetReadProgress().Current(), req.ProtoMinor)
|
||||
if item != nil {
|
||||
item.ConnectionInfo = &api.ConnectionInfo{
|
||||
ClientIP: tcpID.SrcIP,
|
||||
ClientPort: tcpID.SrcPort,
|
||||
ServerIP: tcpID.DstIP,
|
||||
ServerPort: tcpID.DstPort,
|
||||
ClientIP: reader.GetTcpID().SrcIP,
|
||||
ClientPort: reader.GetTcpID().SrcPort,
|
||||
ServerIP: reader.GetTcpID().DstIP,
|
||||
ServerPort: reader.GetTcpID().DstPort,
|
||||
IsOutgoing: true,
|
||||
}
|
||||
item.Capture = capture
|
||||
filterAndEmit(item, emitter, options)
|
||||
item.Capture = reader.GetParent().GetOrigin()
|
||||
filterAndEmit(item, reader.GetEmitter(), options)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
switchingProtocolsHTTP2, err = handleHTTP1ServerStream(b, progress, capture, tcpID, counterPair, superTimer, emitter, options, reqResMatcher)
|
||||
switchingProtocolsHTTP2, err = handleHTTP1ServerStream(b, reader.GetReadProgress(), reader.GetParent().GetOrigin(), reader.GetTcpID(), reader.GetCounterPair(), reader.GetCaptureTime(), reader.GetEmitter(), options, reqResMatcher)
|
||||
if err == io.EOF || err == io.ErrUnexpectedEOF {
|
||||
break
|
||||
} else if err != nil {
|
||||
continue
|
||||
}
|
||||
superIdentifier.Protocol = &http11protocol
|
||||
reader.GetParent().SetProtocol(&http11protocol)
|
||||
}
|
||||
}
|
||||
|
||||
if superIdentifier.Protocol == nil {
|
||||
if reader.GetParent().GetProtoIdentifier().Protocol == nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -208,6 +236,14 @@ func (d dissecting) Analyze(item *api.OutputChannelItem, resolvedSource string,
|
||||
}
|
||||
}
|
||||
|
||||
if isGraphQL(reqDetails) {
|
||||
if item.Protocol.Version == "2.0" {
|
||||
item.Protocol = graphQL2Protocol
|
||||
} else {
|
||||
item.Protocol = graphQL1Protocol
|
||||
}
|
||||
}
|
||||
|
||||
if resDetails["bodySize"].(float64) < 0 {
|
||||
resDetails["bodySize"] = 0
|
||||
}
|
||||
@@ -365,8 +401,8 @@ func representRequest(request map[string]interface{}) (repRequest []interface{})
|
||||
|
||||
postData, _ := request["postData"].(map[string]interface{})
|
||||
mimeType := postData["mimeType"]
|
||||
if mimeType == nil || len(mimeType.(string)) == 0 {
|
||||
mimeType = "text/html"
|
||||
if mimeType == nil {
|
||||
mimeType = ""
|
||||
}
|
||||
text := postData["text"]
|
||||
if text != nil {
|
||||
@@ -447,8 +483,8 @@ func representResponse(response map[string]interface{}) (repResponse []interface
|
||||
|
||||
content, _ := response["content"].(map[string]interface{})
|
||||
mimeType := content["mimeType"]
|
||||
if mimeType == nil || len(mimeType.(string)) == 0 {
|
||||
mimeType = "text/html"
|
||||
if mimeType == nil {
|
||||
mimeType = ""
|
||||
}
|
||||
encoding := content["encoding"]
|
||||
text := content["text"]
|
||||
@@ -481,6 +517,7 @@ func (d dissecting) Macros() map[string]string {
|
||||
`http`: fmt.Sprintf(`proto.name == "%s" and proto.version.startsWith("%c")`, http11protocol.Name, http11protocol.Version[0]),
|
||||
`http2`: fmt.Sprintf(`proto.name == "%s" and proto.version == "%s"`, http11protocol.Name, http2Protocol.Version),
|
||||
`grpc`: fmt.Sprintf(`proto.name == "%s" and proto.version == "%s" and proto.macro == "%s"`, http11protocol.Name, grpcProtocol.Version, grpcProtocol.Macro),
|
||||
`gql`: fmt.Sprintf(`proto.name == "%s" and proto.macro == "%s"`, graphQL1Protocol.Name, graphQL1Protocol.Macro),
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -47,6 +47,7 @@ func TestMacros(t *testing.T) {
|
||||
"http": `proto.name == "http" and proto.version.startsWith("1")`,
|
||||
"http2": `proto.name == "http" and proto.version == "2.0"`,
|
||||
"grpc": `proto.name == "http" and proto.version == "2.0" and proto.macro == "grpc"`,
|
||||
"gql": `proto.name == "http" and proto.macro == "gql"`,
|
||||
}
|
||||
dissector := NewDissector()
|
||||
macros := dissector.Macros()
|
||||
@@ -108,7 +109,6 @@ func TestDissect(t *testing.T) {
|
||||
Request: 0,
|
||||
Response: 0,
|
||||
}
|
||||
superIdentifier := &api.SuperIdentifier{}
|
||||
|
||||
// Request
|
||||
pathClient := _path
|
||||
@@ -124,7 +124,21 @@ func TestDissect(t *testing.T) {
|
||||
DstPort: "2",
|
||||
}
|
||||
reqResMatcher := dissector.NewResponseRequestMatcher()
|
||||
err = dissector.Dissect(bufferClient, &api.ReadProgress{}, api.Pcap, true, tcpIDClient, counterPair, &api.SuperTimer{}, superIdentifier, emitter, options, reqResMatcher)
|
||||
stream := NewTcpStream(api.Pcap)
|
||||
reader := NewTcpReader(
|
||||
&api.ReadProgress{},
|
||||
"",
|
||||
tcpIDClient,
|
||||
time.Time{},
|
||||
stream,
|
||||
true,
|
||||
false,
|
||||
nil,
|
||||
emitter,
|
||||
counterPair,
|
||||
reqResMatcher,
|
||||
)
|
||||
err = dissector.Dissect(bufferClient, reader, options)
|
||||
if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF {
|
||||
panic(err)
|
||||
}
|
||||
@@ -142,7 +156,20 @@ func TestDissect(t *testing.T) {
|
||||
SrcPort: "2",
|
||||
DstPort: "1",
|
||||
}
|
||||
err = dissector.Dissect(bufferServer, &api.ReadProgress{}, api.Pcap, false, tcpIDServer, counterPair, &api.SuperTimer{}, superIdentifier, emitter, options, reqResMatcher)
|
||||
reader = NewTcpReader(
|
||||
&api.ReadProgress{},
|
||||
"",
|
||||
tcpIDServer,
|
||||
time.Time{},
|
||||
stream,
|
||||
false,
|
||||
false,
|
||||
nil,
|
||||
emitter,
|
||||
counterPair,
|
||||
reqResMatcher,
|
||||
)
|
||||
err = dissector.Dissect(bufferServer, reader, options)
|
||||
if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
84
tap/extensions/http/tcp_reader_mock_test.go
Normal file
84
tap/extensions/http/tcp_reader_mock_test.go
Normal file
@@ -0,0 +1,84 @@
|
||||
package http
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/up9inc/mizu/tap/api"
|
||||
)
|
||||
|
||||
type tcpReader struct {
|
||||
ident string
|
||||
tcpID *api.TcpID
|
||||
isClosed bool
|
||||
isClient bool
|
||||
isOutgoing bool
|
||||
progress *api.ReadProgress
|
||||
captureTime time.Time
|
||||
parent api.TcpStream
|
||||
extension *api.Extension
|
||||
emitter api.Emitter
|
||||
counterPair *api.CounterPair
|
||||
reqResMatcher api.RequestResponseMatcher
|
||||
sync.Mutex
|
||||
}
|
||||
|
||||
func NewTcpReader(progress *api.ReadProgress, ident string, tcpId *api.TcpID, captureTime time.Time, parent api.TcpStream, isClient bool, isOutgoing bool, extension *api.Extension, emitter api.Emitter, counterPair *api.CounterPair, reqResMatcher api.RequestResponseMatcher) api.TcpReader {
|
||||
return &tcpReader{
|
||||
progress: progress,
|
||||
ident: ident,
|
||||
tcpID: tcpId,
|
||||
captureTime: captureTime,
|
||||
parent: parent,
|
||||
isClient: isClient,
|
||||
isOutgoing: isOutgoing,
|
||||
extension: extension,
|
||||
emitter: emitter,
|
||||
counterPair: counterPair,
|
||||
reqResMatcher: reqResMatcher,
|
||||
}
|
||||
}
|
||||
|
||||
func (reader *tcpReader) Read(p []byte) (int, error) {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
func (reader *tcpReader) GetReqResMatcher() api.RequestResponseMatcher {
|
||||
return reader.reqResMatcher
|
||||
}
|
||||
|
||||
func (reader *tcpReader) GetIsClient() bool {
|
||||
return reader.isClient
|
||||
}
|
||||
|
||||
func (reader *tcpReader) GetReadProgress() *api.ReadProgress {
|
||||
return reader.progress
|
||||
}
|
||||
|
||||
func (reader *tcpReader) GetParent() api.TcpStream {
|
||||
return reader.parent
|
||||
}
|
||||
|
||||
func (reader *tcpReader) GetTcpID() *api.TcpID {
|
||||
return reader.tcpID
|
||||
}
|
||||
|
||||
func (reader *tcpReader) GetCounterPair() *api.CounterPair {
|
||||
return reader.counterPair
|
||||
}
|
||||
|
||||
func (reader *tcpReader) GetCaptureTime() time.Time {
|
||||
return reader.captureTime
|
||||
}
|
||||
|
||||
func (reader *tcpReader) GetEmitter() api.Emitter {
|
||||
return reader.emitter
|
||||
}
|
||||
|
||||
func (reader *tcpReader) GetIsClosed() bool {
|
||||
return reader.isClosed
|
||||
}
|
||||
|
||||
func (reader *tcpReader) GetExtension() *api.Extension {
|
||||
return reader.extension
|
||||
}
|
||||
45
tap/extensions/http/tcp_stream_mock_test.go
Normal file
45
tap/extensions/http/tcp_stream_mock_test.go
Normal file
@@ -0,0 +1,45 @@
|
||||
package http
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/up9inc/mizu/tap/api"
|
||||
)
|
||||
|
||||
type tcpStream struct {
|
||||
isClosed bool
|
||||
protoIdentifier *api.ProtoIdentifier
|
||||
isTapTarget bool
|
||||
origin api.Capture
|
||||
reqResMatchers []api.RequestResponseMatcher
|
||||
sync.Mutex
|
||||
}
|
||||
|
||||
func NewTcpStream(capture api.Capture) api.TcpStream {
|
||||
return &tcpStream{
|
||||
origin: capture,
|
||||
protoIdentifier: &api.ProtoIdentifier{},
|
||||
}
|
||||
}
|
||||
|
||||
func (t *tcpStream) SetProtocol(protocol *api.Protocol) {}
|
||||
|
||||
func (t *tcpStream) GetOrigin() api.Capture {
|
||||
return t.origin
|
||||
}
|
||||
|
||||
func (t *tcpStream) GetProtoIdentifier() *api.ProtoIdentifier {
|
||||
return t.protoIdentifier
|
||||
}
|
||||
|
||||
func (t *tcpStream) GetReqResMatchers() []api.RequestResponseMatcher {
|
||||
return t.reqResMatchers
|
||||
}
|
||||
|
||||
func (t *tcpStream) GetIsTapTarget() bool {
|
||||
return t.isTapTarget
|
||||
}
|
||||
|
||||
func (t *tcpStream) GetIsClosed() bool {
|
||||
return t.isClosed
|
||||
}
|
||||
@@ -13,4 +13,4 @@ test-pull-bin:
|
||||
|
||||
test-pull-expect:
|
||||
@mkdir -p expect
|
||||
@[ "${skipexpect}" ] && echo "Skipping downloading expected JSONs" || gsutil -o 'GSUtil:parallel_process_count=5' -o 'GSUtil:parallel_thread_count=5' -m cp -r gs://static.up9.io/mizu/test-pcap/expect8/kafka/\* expect
|
||||
@[ "${skipexpect}" ] && echo "Skipping downloading expected JSONs" || gsutil -o 'GSUtil:parallel_process_count=5' -o 'GSUtil:parallel_thread_count=5' -m cp -r gs://static.up9.io/mizu/test-pcap/expect9/kafka/\* expect
|
||||
|
||||
@@ -3,13 +3,14 @@ package kafka
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"golang.org/x/text/cases"
|
||||
"golang.org/x/text/language"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"golang.org/x/text/cases"
|
||||
"golang.org/x/text/language"
|
||||
|
||||
"github.com/fatih/camelcase"
|
||||
"github.com/ohler55/ojg/jp"
|
||||
"github.com/ohler55/ojg/oj"
|
||||
@@ -36,9 +37,14 @@ type KafkaWrapper struct {
|
||||
|
||||
func representRequestHeader(data map[string]interface{}, rep []interface{}) []interface{} {
|
||||
requestHeader, _ := json.Marshal([]api.TableData{
|
||||
{
|
||||
Name: "ApiKeyName",
|
||||
Value: data["apiKeyName"].(string),
|
||||
Selector: `request.apiKeyName`,
|
||||
},
|
||||
{
|
||||
Name: "ApiKey",
|
||||
Value: apiNames[int(data["apiKey"].(float64))],
|
||||
Value: int(data["apiKey"].(float64)),
|
||||
Selector: `request.apiKey`,
|
||||
},
|
||||
{
|
||||
|
||||
@@ -35,25 +35,25 @@ func (d dissecting) Ping() {
|
||||
log.Printf("pong %s", _protocol.Name)
|
||||
}
|
||||
|
||||
func (d dissecting) Dissect(b *bufio.Reader, progress *api.ReadProgress, capture api.Capture, isClient bool, tcpID *api.TcpID, counterPair *api.CounterPair, superTimer *api.SuperTimer, superIdentifier *api.SuperIdentifier, emitter api.Emitter, options *api.TrafficFilteringOptions, _reqResMatcher api.RequestResponseMatcher) error {
|
||||
reqResMatcher := _reqResMatcher.(*requestResponseMatcher)
|
||||
func (d dissecting) Dissect(b *bufio.Reader, reader api.TcpReader, options *api.TrafficFilteringOptions) error {
|
||||
reqResMatcher := reader.GetReqResMatcher().(*requestResponseMatcher)
|
||||
for {
|
||||
if superIdentifier.Protocol != nil && superIdentifier.Protocol != &_protocol {
|
||||
if reader.GetParent().GetProtoIdentifier().Protocol != nil && reader.GetParent().GetProtoIdentifier().Protocol != &_protocol {
|
||||
return errors.New("Identified by another protocol")
|
||||
}
|
||||
|
||||
if isClient {
|
||||
_, _, err := ReadRequest(b, tcpID, counterPair, superTimer, reqResMatcher)
|
||||
if reader.GetIsClient() {
|
||||
_, _, err := ReadRequest(b, reader.GetTcpID(), reader.GetCounterPair(), reader.GetCaptureTime(), reqResMatcher)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
superIdentifier.Protocol = &_protocol
|
||||
reader.GetParent().SetProtocol(&_protocol)
|
||||
} else {
|
||||
err := ReadResponse(b, capture, tcpID, counterPair, superTimer, emitter, reqResMatcher)
|
||||
err := ReadResponse(b, reader.GetParent().GetOrigin(), reader.GetTcpID(), reader.GetCounterPair(), reader.GetCaptureTime(), reader.GetEmitter(), reqResMatcher)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
superIdentifier.Protocol = &_protocol
|
||||
reader.GetParent().SetProtocol(&_protocol)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -96,8 +96,8 @@ func (d dissecting) Summarize(entry *api.Entry) *api.BaseEntry {
|
||||
statusQuery := ""
|
||||
|
||||
apiKey := ApiKey(entry.Request["apiKey"].(float64))
|
||||
method := apiNames[apiKey]
|
||||
methodQuery := fmt.Sprintf("request.apiKey == %d", int(entry.Request["apiKey"].(float64)))
|
||||
method := entry.Request["apiKeyName"].(string)
|
||||
methodQuery := fmt.Sprintf(`request.apiKeyName == "%s"`, method)
|
||||
|
||||
summary := ""
|
||||
summaryQuery := ""
|
||||
|
||||
@@ -106,7 +106,6 @@ func TestDissect(t *testing.T) {
|
||||
Request: 0,
|
||||
Response: 0,
|
||||
}
|
||||
superIdentifier := &api.SuperIdentifier{}
|
||||
|
||||
// Request
|
||||
pathClient := _path
|
||||
@@ -123,7 +122,21 @@ func TestDissect(t *testing.T) {
|
||||
}
|
||||
reqResMatcher := dissector.NewResponseRequestMatcher()
|
||||
reqResMatcher.SetMaxTry(10)
|
||||
err = dissector.Dissect(bufferClient, &api.ReadProgress{}, api.Pcap, true, tcpIDClient, counterPair, &api.SuperTimer{}, superIdentifier, emitter, options, reqResMatcher)
|
||||
stream := NewTcpStream(api.Pcap)
|
||||
reader := NewTcpReader(
|
||||
&api.ReadProgress{},
|
||||
"",
|
||||
tcpIDClient,
|
||||
time.Time{},
|
||||
stream,
|
||||
true,
|
||||
false,
|
||||
nil,
|
||||
emitter,
|
||||
counterPair,
|
||||
reqResMatcher,
|
||||
)
|
||||
err = dissector.Dissect(bufferClient, reader, options)
|
||||
if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF {
|
||||
log.Println(err)
|
||||
}
|
||||
@@ -141,7 +154,20 @@ func TestDissect(t *testing.T) {
|
||||
SrcPort: "2",
|
||||
DstPort: "1",
|
||||
}
|
||||
err = dissector.Dissect(bufferServer, &api.ReadProgress{}, api.Pcap, false, tcpIDServer, counterPair, &api.SuperTimer{}, superIdentifier, emitter, options, reqResMatcher)
|
||||
reader = NewTcpReader(
|
||||
&api.ReadProgress{},
|
||||
"",
|
||||
tcpIDServer,
|
||||
time.Time{},
|
||||
stream,
|
||||
false,
|
||||
false,
|
||||
nil,
|
||||
emitter,
|
||||
counterPair,
|
||||
reqResMatcher,
|
||||
)
|
||||
err = dissector.Dissect(bufferServer, reader, options)
|
||||
if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF {
|
||||
log.Println(err)
|
||||
}
|
||||
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
|
||||
type Request struct {
|
||||
Size int32 `json:"size"`
|
||||
ApiKeyName string `json:"apiKeyName"`
|
||||
ApiKey ApiKey `json:"apiKey"`
|
||||
ApiVersion int16 `json:"apiVersion"`
|
||||
CorrelationID int32 `json:"correlationID"`
|
||||
@@ -19,7 +20,7 @@ type Request struct {
|
||||
CaptureTime time.Time `json:"captureTime"`
|
||||
}
|
||||
|
||||
func ReadRequest(r io.Reader, tcpID *api.TcpID, counterPair *api.CounterPair, superTimer *api.SuperTimer, reqResMatcher *requestResponseMatcher) (apiKey ApiKey, apiVersion int16, err error) {
|
||||
func ReadRequest(r io.Reader, tcpID *api.TcpID, counterPair *api.CounterPair, captureTime time.Time, reqResMatcher *requestResponseMatcher) (apiKey ApiKey, apiVersion int16, err error) {
|
||||
d := &decoder{reader: r, remain: 4}
|
||||
size := d.readInt32()
|
||||
|
||||
@@ -202,11 +203,12 @@ func ReadRequest(r io.Reader, tcpID *api.TcpID, counterPair *api.CounterPair, su
|
||||
|
||||
request := &Request{
|
||||
Size: size,
|
||||
ApiKeyName: apiNames[apiKey],
|
||||
ApiKey: apiKey,
|
||||
ApiVersion: apiVersion,
|
||||
CorrelationID: correlationID,
|
||||
ClientID: clientID,
|
||||
CaptureTime: superTimer.CaptureTime,
|
||||
CaptureTime: captureTime,
|
||||
Payload: payload,
|
||||
}
|
||||
|
||||
|
||||
@@ -16,7 +16,7 @@ type Response struct {
|
||||
CaptureTime time.Time `json:"captureTime"`
|
||||
}
|
||||
|
||||
func ReadResponse(r io.Reader, capture api.Capture, tcpID *api.TcpID, counterPair *api.CounterPair, superTimer *api.SuperTimer, emitter api.Emitter, reqResMatcher *requestResponseMatcher) (err error) {
|
||||
func ReadResponse(r io.Reader, capture api.Capture, tcpID *api.TcpID, counterPair *api.CounterPair, captureTime time.Time, emitter api.Emitter, reqResMatcher *requestResponseMatcher) (err error) {
|
||||
d := &decoder{reader: r, remain: 4}
|
||||
size := d.readInt32()
|
||||
|
||||
@@ -43,7 +43,7 @@ func ReadResponse(r io.Reader, capture api.Capture, tcpID *api.TcpID, counterPai
|
||||
Size: size,
|
||||
CorrelationID: correlationID,
|
||||
Payload: payload,
|
||||
CaptureTime: superTimer.CaptureTime,
|
||||
CaptureTime: captureTime,
|
||||
}
|
||||
|
||||
key := fmt.Sprintf(
|
||||
|
||||
84
tap/extensions/kafka/tcp_reader_mock_test.go
Normal file
84
tap/extensions/kafka/tcp_reader_mock_test.go
Normal file
@@ -0,0 +1,84 @@
|
||||
package kafka
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/up9inc/mizu/tap/api"
|
||||
)
|
||||
|
||||
type tcpReader struct {
|
||||
ident string
|
||||
tcpID *api.TcpID
|
||||
isClosed bool
|
||||
isClient bool
|
||||
isOutgoing bool
|
||||
progress *api.ReadProgress
|
||||
captureTime time.Time
|
||||
parent api.TcpStream
|
||||
extension *api.Extension
|
||||
emitter api.Emitter
|
||||
counterPair *api.CounterPair
|
||||
reqResMatcher api.RequestResponseMatcher
|
||||
sync.Mutex
|
||||
}
|
||||
|
||||
func NewTcpReader(progress *api.ReadProgress, ident string, tcpId *api.TcpID, captureTime time.Time, parent api.TcpStream, isClient bool, isOutgoing bool, extension *api.Extension, emitter api.Emitter, counterPair *api.CounterPair, reqResMatcher api.RequestResponseMatcher) api.TcpReader {
|
||||
return &tcpReader{
|
||||
progress: progress,
|
||||
ident: ident,
|
||||
tcpID: tcpId,
|
||||
captureTime: captureTime,
|
||||
parent: parent,
|
||||
isClient: isClient,
|
||||
isOutgoing: isOutgoing,
|
||||
extension: extension,
|
||||
emitter: emitter,
|
||||
counterPair: counterPair,
|
||||
reqResMatcher: reqResMatcher,
|
||||
}
|
||||
}
|
||||
|
||||
func (reader *tcpReader) Read(p []byte) (int, error) {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
func (reader *tcpReader) GetReqResMatcher() api.RequestResponseMatcher {
|
||||
return reader.reqResMatcher
|
||||
}
|
||||
|
||||
func (reader *tcpReader) GetIsClient() bool {
|
||||
return reader.isClient
|
||||
}
|
||||
|
||||
func (reader *tcpReader) GetReadProgress() *api.ReadProgress {
|
||||
return reader.progress
|
||||
}
|
||||
|
||||
func (reader *tcpReader) GetParent() api.TcpStream {
|
||||
return reader.parent
|
||||
}
|
||||
|
||||
func (reader *tcpReader) GetTcpID() *api.TcpID {
|
||||
return reader.tcpID
|
||||
}
|
||||
|
||||
func (reader *tcpReader) GetCounterPair() *api.CounterPair {
|
||||
return reader.counterPair
|
||||
}
|
||||
|
||||
func (reader *tcpReader) GetCaptureTime() time.Time {
|
||||
return reader.captureTime
|
||||
}
|
||||
|
||||
func (reader *tcpReader) GetEmitter() api.Emitter {
|
||||
return reader.emitter
|
||||
}
|
||||
|
||||
func (reader *tcpReader) GetIsClosed() bool {
|
||||
return reader.isClosed
|
||||
}
|
||||
|
||||
func (reader *tcpReader) GetExtension() *api.Extension {
|
||||
return reader.extension
|
||||
}
|
||||
45
tap/extensions/kafka/tcp_stream_mock_test.go
Normal file
45
tap/extensions/kafka/tcp_stream_mock_test.go
Normal file
@@ -0,0 +1,45 @@
|
||||
package kafka
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/up9inc/mizu/tap/api"
|
||||
)
|
||||
|
||||
type tcpStream struct {
|
||||
isClosed bool
|
||||
protoIdentifier *api.ProtoIdentifier
|
||||
isTapTarget bool
|
||||
origin api.Capture
|
||||
reqResMatchers []api.RequestResponseMatcher
|
||||
sync.Mutex
|
||||
}
|
||||
|
||||
func NewTcpStream(capture api.Capture) api.TcpStream {
|
||||
return &tcpStream{
|
||||
origin: capture,
|
||||
protoIdentifier: &api.ProtoIdentifier{},
|
||||
}
|
||||
}
|
||||
|
||||
func (t *tcpStream) SetProtocol(protocol *api.Protocol) {}
|
||||
|
||||
func (t *tcpStream) GetOrigin() api.Capture {
|
||||
return t.origin
|
||||
}
|
||||
|
||||
func (t *tcpStream) GetProtoIdentifier() *api.ProtoIdentifier {
|
||||
return t.protoIdentifier
|
||||
}
|
||||
|
||||
func (t *tcpStream) GetReqResMatchers() []api.RequestResponseMatcher {
|
||||
return t.reqResMatchers
|
||||
}
|
||||
|
||||
func (t *tcpStream) GetIsTapTarget() bool {
|
||||
return t.isTapTarget
|
||||
}
|
||||
|
||||
func (t *tcpStream) GetIsClosed() bool {
|
||||
return t.isClosed
|
||||
}
|
||||
@@ -2,11 +2,12 @@ package redis
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/up9inc/mizu/tap/api"
|
||||
)
|
||||
|
||||
func handleClientStream(progress *api.ReadProgress, capture api.Capture, tcpID *api.TcpID, counterPair *api.CounterPair, superTimer *api.SuperTimer, emitter api.Emitter, request *RedisPacket, reqResMatcher *requestResponseMatcher) error {
|
||||
func handleClientStream(progress *api.ReadProgress, capture api.Capture, tcpID *api.TcpID, counterPair *api.CounterPair, captureTime time.Time, emitter api.Emitter, request *RedisPacket, reqResMatcher *requestResponseMatcher) error {
|
||||
counterPair.Lock()
|
||||
counterPair.Request++
|
||||
requestCounter := counterPair.Request
|
||||
@@ -21,7 +22,7 @@ func handleClientStream(progress *api.ReadProgress, capture api.Capture, tcpID *
|
||||
requestCounter,
|
||||
)
|
||||
|
||||
item := reqResMatcher.registerRequest(ident, request, superTimer.CaptureTime, progress.Current())
|
||||
item := reqResMatcher.registerRequest(ident, request, captureTime, progress.Current())
|
||||
if item != nil {
|
||||
item.Capture = capture
|
||||
item.ConnectionInfo = &api.ConnectionInfo{
|
||||
@@ -36,7 +37,7 @@ func handleClientStream(progress *api.ReadProgress, capture api.Capture, tcpID *
|
||||
return nil
|
||||
}
|
||||
|
||||
func handleServerStream(progress *api.ReadProgress, capture api.Capture, tcpID *api.TcpID, counterPair *api.CounterPair, superTimer *api.SuperTimer, emitter api.Emitter, response *RedisPacket, reqResMatcher *requestResponseMatcher) error {
|
||||
func handleServerStream(progress *api.ReadProgress, capture api.Capture, tcpID *api.TcpID, counterPair *api.CounterPair, captureTime time.Time, emitter api.Emitter, response *RedisPacket, reqResMatcher *requestResponseMatcher) error {
|
||||
counterPair.Lock()
|
||||
counterPair.Response++
|
||||
responseCounter := counterPair.Response
|
||||
@@ -51,7 +52,7 @@ func handleServerStream(progress *api.ReadProgress, capture api.Capture, tcpID *
|
||||
responseCounter,
|
||||
)
|
||||
|
||||
item := reqResMatcher.registerResponse(ident, response, superTimer.CaptureTime, progress.Current())
|
||||
item := reqResMatcher.registerResponse(ident, response, captureTime, progress.Current())
|
||||
if item != nil {
|
||||
item.Capture = capture
|
||||
item.ConnectionInfo = &api.ConnectionInfo{
|
||||
|
||||
@@ -34,8 +34,8 @@ func (d dissecting) Ping() {
|
||||
log.Printf("pong %s", protocol.Name)
|
||||
}
|
||||
|
||||
func (d dissecting) Dissect(b *bufio.Reader, progress *api.ReadProgress, capture api.Capture, isClient bool, tcpID *api.TcpID, counterPair *api.CounterPair, superTimer *api.SuperTimer, superIdentifier *api.SuperIdentifier, emitter api.Emitter, options *api.TrafficFilteringOptions, _reqResMatcher api.RequestResponseMatcher) error {
|
||||
reqResMatcher := _reqResMatcher.(*requestResponseMatcher)
|
||||
func (d dissecting) Dissect(b *bufio.Reader, reader api.TcpReader, options *api.TrafficFilteringOptions) error {
|
||||
reqResMatcher := reader.GetReqResMatcher().(*requestResponseMatcher)
|
||||
is := &RedisInputStream{
|
||||
Reader: b,
|
||||
Buf: make([]byte, 8192),
|
||||
@@ -47,10 +47,10 @@ func (d dissecting) Dissect(b *bufio.Reader, progress *api.ReadProgress, capture
|
||||
return err
|
||||
}
|
||||
|
||||
if isClient {
|
||||
err = handleClientStream(progress, capture, tcpID, counterPair, superTimer, emitter, redisPacket, reqResMatcher)
|
||||
if reader.GetIsClient() {
|
||||
err = handleClientStream(reader.GetReadProgress(), reader.GetParent().GetOrigin(), reader.GetTcpID(), reader.GetCounterPair(), reader.GetCaptureTime(), reader.GetEmitter(), redisPacket, reqResMatcher)
|
||||
} else {
|
||||
err = handleServerStream(progress, capture, tcpID, counterPair, superTimer, emitter, redisPacket, reqResMatcher)
|
||||
err = handleServerStream(reader.GetReadProgress(), reader.GetParent().GetOrigin(), reader.GetTcpID(), reader.GetCounterPair(), reader.GetCaptureTime(), reader.GetEmitter(), redisPacket, reqResMatcher)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
|
||||
@@ -107,7 +107,6 @@ func TestDissect(t *testing.T) {
|
||||
Request: 0,
|
||||
Response: 0,
|
||||
}
|
||||
superIdentifier := &api.SuperIdentifier{}
|
||||
|
||||
// Request
|
||||
pathClient := _path
|
||||
@@ -123,7 +122,21 @@ func TestDissect(t *testing.T) {
|
||||
DstPort: "2",
|
||||
}
|
||||
reqResMatcher := dissector.NewResponseRequestMatcher()
|
||||
err = dissector.Dissect(bufferClient, &api.ReadProgress{}, api.Pcap, true, tcpIDClient, counterPair, &api.SuperTimer{}, superIdentifier, emitter, options, reqResMatcher)
|
||||
stream := NewTcpStream(api.Pcap)
|
||||
reader := NewTcpReader(
|
||||
&api.ReadProgress{},
|
||||
"",
|
||||
tcpIDClient,
|
||||
time.Time{},
|
||||
stream,
|
||||
true,
|
||||
false,
|
||||
nil,
|
||||
emitter,
|
||||
counterPair,
|
||||
reqResMatcher,
|
||||
)
|
||||
err = dissector.Dissect(bufferClient, reader, options)
|
||||
if err != nil && reflect.TypeOf(err) != reflect.TypeOf(&ConnectError{}) && err != io.EOF && err != io.ErrUnexpectedEOF {
|
||||
log.Println(err)
|
||||
}
|
||||
@@ -141,7 +154,20 @@ func TestDissect(t *testing.T) {
|
||||
SrcPort: "2",
|
||||
DstPort: "1",
|
||||
}
|
||||
err = dissector.Dissect(bufferServer, &api.ReadProgress{}, api.Pcap, false, tcpIDServer, counterPair, &api.SuperTimer{}, superIdentifier, emitter, options, reqResMatcher)
|
||||
reader = NewTcpReader(
|
||||
&api.ReadProgress{},
|
||||
"",
|
||||
tcpIDServer,
|
||||
time.Time{},
|
||||
stream,
|
||||
false,
|
||||
false,
|
||||
nil,
|
||||
emitter,
|
||||
counterPair,
|
||||
reqResMatcher,
|
||||
)
|
||||
err = dissector.Dissect(bufferServer, reader, options)
|
||||
if err != nil && reflect.TypeOf(err) != reflect.TypeOf(&ConnectError{}) && err != io.EOF && err != io.ErrUnexpectedEOF {
|
||||
log.Println(err)
|
||||
}
|
||||
|
||||
84
tap/extensions/redis/tcp_reader_mock_test.go
Normal file
84
tap/extensions/redis/tcp_reader_mock_test.go
Normal file
@@ -0,0 +1,84 @@
|
||||
package redis
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/up9inc/mizu/tap/api"
|
||||
)
|
||||
|
||||
type tcpReader struct {
|
||||
ident string
|
||||
tcpID *api.TcpID
|
||||
isClosed bool
|
||||
isClient bool
|
||||
isOutgoing bool
|
||||
progress *api.ReadProgress
|
||||
captureTime time.Time
|
||||
parent api.TcpStream
|
||||
extension *api.Extension
|
||||
emitter api.Emitter
|
||||
counterPair *api.CounterPair
|
||||
reqResMatcher api.RequestResponseMatcher
|
||||
sync.Mutex
|
||||
}
|
||||
|
||||
func NewTcpReader(progress *api.ReadProgress, ident string, tcpId *api.TcpID, captureTime time.Time, parent api.TcpStream, isClient bool, isOutgoing bool, extension *api.Extension, emitter api.Emitter, counterPair *api.CounterPair, reqResMatcher api.RequestResponseMatcher) api.TcpReader {
|
||||
return &tcpReader{
|
||||
progress: progress,
|
||||
ident: ident,
|
||||
tcpID: tcpId,
|
||||
captureTime: captureTime,
|
||||
parent: parent,
|
||||
isClient: isClient,
|
||||
isOutgoing: isOutgoing,
|
||||
extension: extension,
|
||||
emitter: emitter,
|
||||
counterPair: counterPair,
|
||||
reqResMatcher: reqResMatcher,
|
||||
}
|
||||
}
|
||||
|
||||
func (reader *tcpReader) Read(p []byte) (int, error) {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
func (reader *tcpReader) GetReqResMatcher() api.RequestResponseMatcher {
|
||||
return reader.reqResMatcher
|
||||
}
|
||||
|
||||
func (reader *tcpReader) GetIsClient() bool {
|
||||
return reader.isClient
|
||||
}
|
||||
|
||||
func (reader *tcpReader) GetReadProgress() *api.ReadProgress {
|
||||
return reader.progress
|
||||
}
|
||||
|
||||
func (reader *tcpReader) GetParent() api.TcpStream {
|
||||
return reader.parent
|
||||
}
|
||||
|
||||
func (reader *tcpReader) GetTcpID() *api.TcpID {
|
||||
return reader.tcpID
|
||||
}
|
||||
|
||||
func (reader *tcpReader) GetCounterPair() *api.CounterPair {
|
||||
return reader.counterPair
|
||||
}
|
||||
|
||||
func (reader *tcpReader) GetCaptureTime() time.Time {
|
||||
return reader.captureTime
|
||||
}
|
||||
|
||||
func (reader *tcpReader) GetEmitter() api.Emitter {
|
||||
return reader.emitter
|
||||
}
|
||||
|
||||
func (reader *tcpReader) GetIsClosed() bool {
|
||||
return reader.isClosed
|
||||
}
|
||||
|
||||
func (reader *tcpReader) GetExtension() *api.Extension {
|
||||
return reader.extension
|
||||
}
|
||||
45
tap/extensions/redis/tcp_stream_mock_test.go
Normal file
45
tap/extensions/redis/tcp_stream_mock_test.go
Normal file
@@ -0,0 +1,45 @@
|
||||
package redis
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/up9inc/mizu/tap/api"
|
||||
)
|
||||
|
||||
type tcpStream struct {
|
||||
isClosed bool
|
||||
protoIdentifier *api.ProtoIdentifier
|
||||
isTapTarget bool
|
||||
origin api.Capture
|
||||
reqResMatchers []api.RequestResponseMatcher
|
||||
sync.Mutex
|
||||
}
|
||||
|
||||
func NewTcpStream(capture api.Capture) api.TcpStream {
|
||||
return &tcpStream{
|
||||
origin: capture,
|
||||
protoIdentifier: &api.ProtoIdentifier{},
|
||||
}
|
||||
}
|
||||
|
||||
func (t *tcpStream) SetProtocol(protocol *api.Protocol) {}
|
||||
|
||||
func (t *tcpStream) GetOrigin() api.Capture {
|
||||
return t.origin
|
||||
}
|
||||
|
||||
func (t *tcpStream) GetProtoIdentifier() *api.ProtoIdentifier {
|
||||
return t.protoIdentifier
|
||||
}
|
||||
|
||||
func (t *tcpStream) GetReqResMatchers() []api.RequestResponseMatcher {
|
||||
return t.reqResMatchers
|
||||
}
|
||||
|
||||
func (t *tcpStream) GetIsTapTarget() bool {
|
||||
return t.isTapTarget
|
||||
}
|
||||
|
||||
func (t *tcpStream) GetIsClosed() bool {
|
||||
return t.isClosed
|
||||
}
|
||||
@@ -7,7 +7,6 @@ require (
|
||||
github.com/go-errors/errors v1.4.2
|
||||
github.com/google/gopacket v1.1.19
|
||||
github.com/up9inc/mizu/logger v0.0.0
|
||||
github.com/up9inc/mizu/shared v0.0.0
|
||||
github.com/up9inc/mizu/tap/api v0.0.0
|
||||
github.com/vishvananda/netns v0.0.0-20211101163701-50045581ed74
|
||||
k8s.io/api v0.23.3
|
||||
@@ -16,10 +15,10 @@ require (
|
||||
require (
|
||||
github.com/go-logr/logr v1.2.2 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang-jwt/jwt/v4 v4.2.0 // indirect
|
||||
github.com/google/go-cmp v0.5.7 // indirect
|
||||
github.com/google/gofuzz v1.2.0 // indirect
|
||||
github.com/google/martian v2.1.0+incompatible // indirect
|
||||
github.com/hashicorp/golang-lru v0.5.4 // indirect
|
||||
github.com/json-iterator/go v1.1.12 // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||
@@ -29,16 +28,14 @@ require (
|
||||
golang.org/x/text v0.3.7 // indirect
|
||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect
|
||||
k8s.io/apimachinery v0.23.3 // indirect
|
||||
k8s.io/klog/v2 v2.40.1 // indirect
|
||||
k8s.io/utils v0.0.0-20220127004650-9b3446523e65 // indirect
|
||||
sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2 // indirect
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.2.1 // indirect
|
||||
sigs.k8s.io/yaml v1.3.0 // indirect
|
||||
)
|
||||
|
||||
replace github.com/up9inc/mizu/logger v0.0.0 => ../logger
|
||||
|
||||
replace github.com/up9inc/mizu/tap/api v0.0.0 => ./api
|
||||
|
||||
replace github.com/up9inc/mizu/shared v0.0.0 => ../shared
|
||||
|
||||
845
tap/go.sum
845
tap/go.sum
File diff suppressed because it is too large
Load Diff
@@ -69,10 +69,12 @@ func StartPassiveTapper(opts *TapOpts, outputItems chan *api.OutputChannelItem,
|
||||
extensions = extensionsRef
|
||||
filteringOptions = options
|
||||
|
||||
streamsMap := NewTcpStreamMap()
|
||||
|
||||
if *tls {
|
||||
for _, e := range extensions {
|
||||
if e.Protocol.Name == "http" {
|
||||
tlsTapperInstance = startTlsTapper(e, outputItems, options)
|
||||
tlsTapperInstance = startTlsTapper(e, outputItems, options, streamsMap)
|
||||
break
|
||||
}
|
||||
}
|
||||
@@ -82,7 +84,7 @@ func StartPassiveTapper(opts *TapOpts, outputItems chan *api.OutputChannelItem,
|
||||
diagnose.StartMemoryProfiler(os.Getenv(MemoryProfilingDumpPath), os.Getenv(MemoryProfilingTimeIntervalSeconds))
|
||||
}
|
||||
|
||||
streamsMap, assembler := initializePassiveTapper(opts, outputItems)
|
||||
assembler := initializePassiveTapper(opts, outputItems, streamsMap)
|
||||
go startPassiveTapper(streamsMap, assembler)
|
||||
}
|
||||
|
||||
@@ -181,9 +183,7 @@ func initializePacketSources() error {
|
||||
return err
|
||||
}
|
||||
|
||||
func initializePassiveTapper(opts *TapOpts, outputItems chan *api.OutputChannelItem) (*tcpStreamMap, *tcpAssembler) {
|
||||
streamsMap := NewTcpStreamMap()
|
||||
|
||||
func initializePassiveTapper(opts *TapOpts, outputItems chan *api.OutputChannelItem, streamsMap api.TcpStreamMap) *tcpAssembler {
|
||||
diagnose.InitializeErrorsMap(*debug, *verbose, *quiet)
|
||||
diagnose.InitializeTapperInternalStats()
|
||||
|
||||
@@ -195,11 +195,11 @@ func initializePassiveTapper(opts *TapOpts, outputItems chan *api.OutputChannelI
|
||||
|
||||
assembler := NewTcpAssembler(outputItems, streamsMap, opts)
|
||||
|
||||
return streamsMap, assembler
|
||||
return assembler
|
||||
}
|
||||
|
||||
func startPassiveTapper(streamsMap *tcpStreamMap, assembler *tcpAssembler) {
|
||||
go streamsMap.closeTimedoutTcpStreamChannels()
|
||||
func startPassiveTapper(streamsMap api.TcpStreamMap, assembler *tcpAssembler) {
|
||||
go streamsMap.CloseTimedoutTcpStreamChannels()
|
||||
|
||||
diagnose.AppStats.SetStartTime(time.Now())
|
||||
|
||||
@@ -232,7 +232,8 @@ func startPassiveTapper(streamsMap *tcpStreamMap, assembler *tcpAssembler) {
|
||||
logger.Log.Infof("AppStats: %v", diagnose.AppStats)
|
||||
}
|
||||
|
||||
func startTlsTapper(extension *api.Extension, outputItems chan *api.OutputChannelItem, options *api.TrafficFilteringOptions) *tlstapper.TlsTapper {
|
||||
func startTlsTapper(extension *api.Extension, outputItems chan *api.OutputChannelItem,
|
||||
options *api.TrafficFilteringOptions, streamsMap api.TcpStreamMap) *tlstapper.TlsTapper {
|
||||
tls := tlstapper.TlsTapper{}
|
||||
chunksBufferSize := os.Getpagesize() * 100
|
||||
logBufferSize := os.Getpagesize()
|
||||
@@ -262,7 +263,7 @@ func startTlsTapper(extension *api.Extension, outputItems chan *api.OutputChanne
|
||||
}
|
||||
|
||||
go tls.PollForLogging()
|
||||
go tls.Poll(emitter, options)
|
||||
go tls.Poll(emitter, options, streamsMap)
|
||||
|
||||
return &tls
|
||||
}
|
||||
|
||||
@@ -4,19 +4,24 @@ import (
|
||||
"os"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/up9inc/mizu/logger"
|
||||
)
|
||||
|
||||
const (
|
||||
MemoryProfilingEnabledEnvVarName = "MEMORY_PROFILING_ENABLED"
|
||||
MemoryProfilingDumpPath = "MEMORY_PROFILING_DUMP_PATH"
|
||||
MemoryProfilingTimeIntervalSeconds = "MEMORY_PROFILING_TIME_INTERVAL"
|
||||
MaxBufferedPagesTotalEnvVarName = "MAX_BUFFERED_PAGES_TOTAL"
|
||||
MaxBufferedPagesPerConnectionEnvVarName = "MAX_BUFFERED_PAGES_PER_CONNECTION"
|
||||
TcpStreamChannelTimeoutMsEnvVarName = "TCP_STREAM_CHANNEL_TIMEOUT_MS"
|
||||
CloseTimedoutTcpChannelsIntervalMsEnvVar = "CLOSE_TIMEDOUT_TCP_STREAM_CHANNELS_INTERVAL_MS"
|
||||
MaxBufferedPagesTotalDefaultValue = 5000
|
||||
MaxBufferedPagesPerConnectionDefaultValue = 5000
|
||||
TcpStreamChannelTimeoutMsDefaultValue = 10000
|
||||
MemoryProfilingEnabledEnvVarName = "MEMORY_PROFILING_ENABLED"
|
||||
MemoryProfilingDumpPath = "MEMORY_PROFILING_DUMP_PATH"
|
||||
MemoryProfilingTimeIntervalSeconds = "MEMORY_PROFILING_TIME_INTERVAL"
|
||||
MaxBufferedPagesTotalEnvVarName = "MAX_BUFFERED_PAGES_TOTAL"
|
||||
MaxBufferedPagesPerConnectionEnvVarName = "MAX_BUFFERED_PAGES_PER_CONNECTION"
|
||||
MaxBufferedPagesTotalDefaultValue = 5000
|
||||
MaxBufferedPagesPerConnectionDefaultValue = 5000
|
||||
TcpStreamChannelTimeoutMsEnvVarName = "TCP_STREAM_CHANNEL_TIMEOUT_MS"
|
||||
TcpStreamChannelTimeoutMsDefaultValue = 10000
|
||||
CloseTimedoutTcpChannelsIntervalMsEnvVarName = "CLOSE_TIMEDOUT_TCP_STREAM_CHANNELS_INTERVAL_MS"
|
||||
CloseTimedoutTcpChannelsIntervalMsDefaultValue = 1000
|
||||
CloseTimedoutTcpChannelsIntervalMsMinValue = 10
|
||||
CloseTimedoutTcpChannelsIntervalMsMaxValue = 10000
|
||||
)
|
||||
|
||||
func GetMaxBufferedPagesTotal() int {
|
||||
@@ -35,6 +40,10 @@ func GetMaxBufferedPagesPerConnection() int {
|
||||
return valueFromEnv
|
||||
}
|
||||
|
||||
func GetMemoryProfilingEnabled() bool {
|
||||
return os.Getenv(MemoryProfilingEnabledEnvVarName) == "1"
|
||||
}
|
||||
|
||||
func GetTcpChannelTimeoutMs() time.Duration {
|
||||
valueFromEnv, err := strconv.Atoi(os.Getenv(TcpStreamChannelTimeoutMsEnvVarName))
|
||||
if err != nil {
|
||||
@@ -43,6 +52,25 @@ func GetTcpChannelTimeoutMs() time.Duration {
|
||||
return time.Duration(valueFromEnv) * time.Millisecond
|
||||
}
|
||||
|
||||
func GetMemoryProfilingEnabled() bool {
|
||||
return os.Getenv(MemoryProfilingEnabledEnvVarName) == "1"
|
||||
func GetCloseTimedoutTcpChannelsInterval() time.Duration {
|
||||
defaultDuration := CloseTimedoutTcpChannelsIntervalMsDefaultValue * time.Millisecond
|
||||
rangeMin := CloseTimedoutTcpChannelsIntervalMsMinValue
|
||||
rangeMax := CloseTimedoutTcpChannelsIntervalMsMaxValue
|
||||
closeTimedoutTcpChannelsIntervalMsStr := os.Getenv(CloseTimedoutTcpChannelsIntervalMsEnvVarName)
|
||||
if closeTimedoutTcpChannelsIntervalMsStr == "" {
|
||||
return defaultDuration
|
||||
} else {
|
||||
closeTimedoutTcpChannelsIntervalMs, err := strconv.Atoi(closeTimedoutTcpChannelsIntervalMsStr)
|
||||
if err != nil {
|
||||
logger.Log.Warningf("Error parsing environment variable %s: %v\n", CloseTimedoutTcpChannelsIntervalMsEnvVarName, err)
|
||||
return defaultDuration
|
||||
} else {
|
||||
if closeTimedoutTcpChannelsIntervalMs < rangeMin || closeTimedoutTcpChannelsIntervalMs > rangeMax {
|
||||
logger.Log.Warningf("The value of environment variable %s is not in acceptable range: %d - %d\n", CloseTimedoutTcpChannelsIntervalMsEnvVarName, rangeMin, rangeMax)
|
||||
return defaultDuration
|
||||
} else {
|
||||
return time.Duration(closeTimedoutTcpChannelsIntervalMs) * time.Millisecond
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -36,7 +36,7 @@ func (c *context) GetCaptureInfo() gopacket.CaptureInfo {
|
||||
return c.CaptureInfo
|
||||
}
|
||||
|
||||
func NewTcpAssembler(outputItems chan *api.OutputChannelItem, streamsMap *tcpStreamMap, opts *TapOpts) *tcpAssembler {
|
||||
func NewTcpAssembler(outputItems chan *api.OutputChannelItem, streamsMap api.TcpStreamMap, opts *TapOpts) *tcpAssembler {
|
||||
var emitter api.Emitter = &api.Emitting{
|
||||
AppStats: &diagnose.AppStats,
|
||||
OutputChannel: outputItems,
|
||||
@@ -82,12 +82,6 @@ func (a *tcpAssembler) processPackets(dumpPacket bool, packets <-chan source.Tcp
|
||||
if tcp != nil {
|
||||
diagnose.AppStats.IncTcpPacketsCount()
|
||||
tcp := tcp.(*layers.TCP)
|
||||
if *checksum {
|
||||
err := tcp.SetNetworkLayerForChecksum(packet.NetworkLayer())
|
||||
if err != nil {
|
||||
logger.Log.Fatalf("Failed to set network layer for checksum: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
c := context{
|
||||
CaptureInfo: packet.Metadata().CaptureInfo,
|
||||
|
||||
@@ -11,22 +11,9 @@ import (
|
||||
"github.com/up9inc/mizu/tap/api"
|
||||
)
|
||||
|
||||
type tcpReaderDataMsg struct {
|
||||
bytes []byte
|
||||
timestamp time.Time
|
||||
}
|
||||
|
||||
type ConnectionInfo struct {
|
||||
ClientIP string
|
||||
ClientPort string
|
||||
ServerIP string
|
||||
ServerPort string
|
||||
IsOutgoing bool
|
||||
}
|
||||
|
||||
/* tcpReader gets reads from a channel of bytes of tcp payload, and parses it into requests and responses.
|
||||
/* TcpReader gets reads from a channel of bytes of tcp payload, and parses it into requests and responses.
|
||||
* The payload is written to the channel by a tcpStream object that is dedicated to one tcp connection.
|
||||
* An tcpReader object is unidirectional: it parses either a client stream or a server stream.
|
||||
* An TcpReader object is unidirectional: it parses either a client stream or a server stream.
|
||||
* Implements io.Reader interface (Read)
|
||||
*/
|
||||
type tcpReader struct {
|
||||
@@ -35,10 +22,10 @@ type tcpReader struct {
|
||||
isClosed bool
|
||||
isClient bool
|
||||
isOutgoing bool
|
||||
msgQueue chan tcpReaderDataMsg // Channel of captured reassembled tcp payload
|
||||
msgQueue chan api.TcpReaderDataMsg // Channel of captured reassembled tcp payload
|
||||
data []byte
|
||||
progress *api.ReadProgress
|
||||
superTimer *api.SuperTimer
|
||||
captureTime time.Time
|
||||
parent *tcpStream
|
||||
packetsSeen uint
|
||||
extension *api.Extension
|
||||
@@ -48,47 +35,114 @@ type tcpReader struct {
|
||||
sync.Mutex
|
||||
}
|
||||
|
||||
func (h *tcpReader) Read(p []byte) (int, error) {
|
||||
var msg tcpReaderDataMsg
|
||||
|
||||
ok := true
|
||||
for ok && len(h.data) == 0 {
|
||||
msg, ok = <-h.msgQueue
|
||||
h.data = msg.bytes
|
||||
|
||||
h.superTimer.CaptureTime = msg.timestamp
|
||||
if len(h.data) > 0 {
|
||||
h.packetsSeen += 1
|
||||
}
|
||||
func NewTcpReader(msgQueue chan api.TcpReaderDataMsg, progress *api.ReadProgress, ident string, tcpId *api.TcpID, captureTime time.Time, parent *tcpStream, isClient bool, isOutgoing bool, extension *api.Extension, emitter api.Emitter, counterPair *api.CounterPair, reqResMatcher api.RequestResponseMatcher) *tcpReader {
|
||||
return &tcpReader{
|
||||
msgQueue: msgQueue,
|
||||
progress: progress,
|
||||
ident: ident,
|
||||
tcpID: tcpId,
|
||||
captureTime: captureTime,
|
||||
parent: parent,
|
||||
isClient: isClient,
|
||||
isOutgoing: isOutgoing,
|
||||
extension: extension,
|
||||
emitter: emitter,
|
||||
counterPair: counterPair,
|
||||
reqResMatcher: reqResMatcher,
|
||||
}
|
||||
if !ok || len(h.data) == 0 {
|
||||
return 0, io.EOF
|
||||
}
|
||||
|
||||
l := copy(p, h.data)
|
||||
h.data = h.data[l:]
|
||||
h.progress.Feed(l)
|
||||
|
||||
return l, nil
|
||||
}
|
||||
|
||||
func (h *tcpReader) Close() {
|
||||
h.Lock()
|
||||
if !h.isClosed {
|
||||
h.isClosed = true
|
||||
close(h.msgQueue)
|
||||
}
|
||||
h.Unlock()
|
||||
}
|
||||
|
||||
func (h *tcpReader) run(wg *sync.WaitGroup) {
|
||||
func (reader *tcpReader) run(options *api.TrafficFilteringOptions, wg *sync.WaitGroup) {
|
||||
defer wg.Done()
|
||||
b := bufio.NewReader(h)
|
||||
err := h.extension.Dissector.Dissect(b, h.progress, h.parent.origin, h.isClient, h.tcpID, h.counterPair, h.superTimer, h.parent.superIdentifier, h.emitter, filteringOptions, h.reqResMatcher)
|
||||
b := bufio.NewReader(reader)
|
||||
err := reader.extension.Dissector.Dissect(b, reader, options)
|
||||
if err != nil {
|
||||
_, err = io.Copy(ioutil.Discard, b)
|
||||
_, err = io.Copy(ioutil.Discard, reader)
|
||||
if err != nil {
|
||||
logger.Log.Errorf("%v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (reader *tcpReader) close() {
|
||||
reader.Lock()
|
||||
if !reader.isClosed {
|
||||
reader.isClosed = true
|
||||
close(reader.msgQueue)
|
||||
}
|
||||
reader.Unlock()
|
||||
}
|
||||
|
||||
func (reader *tcpReader) sendMsgIfNotClosed(msg api.TcpReaderDataMsg) {
|
||||
reader.Lock()
|
||||
if !reader.isClosed {
|
||||
reader.msgQueue <- msg
|
||||
}
|
||||
reader.Unlock()
|
||||
}
|
||||
|
||||
func (reader *tcpReader) Read(p []byte) (int, error) {
|
||||
var msg api.TcpReaderDataMsg
|
||||
|
||||
ok := true
|
||||
for ok && len(reader.data) == 0 {
|
||||
msg, ok = <-reader.msgQueue
|
||||
if msg != nil {
|
||||
reader.data = msg.GetBytes()
|
||||
reader.captureTime = msg.GetTimestamp()
|
||||
}
|
||||
|
||||
if len(reader.data) > 0 {
|
||||
reader.packetsSeen += 1
|
||||
}
|
||||
}
|
||||
if !ok || len(reader.data) == 0 {
|
||||
return 0, io.EOF
|
||||
}
|
||||
|
||||
l := copy(p, reader.data)
|
||||
reader.data = reader.data[l:]
|
||||
reader.progress.Feed(l)
|
||||
|
||||
return l, nil
|
||||
}
|
||||
|
||||
func (reader *tcpReader) GetReqResMatcher() api.RequestResponseMatcher {
|
||||
return reader.reqResMatcher
|
||||
}
|
||||
|
||||
func (reader *tcpReader) GetIsClient() bool {
|
||||
return reader.isClient
|
||||
}
|
||||
|
||||
func (reader *tcpReader) GetReadProgress() *api.ReadProgress {
|
||||
return reader.progress
|
||||
}
|
||||
|
||||
func (reader *tcpReader) GetParent() api.TcpStream {
|
||||
return reader.parent
|
||||
}
|
||||
|
||||
func (reader *tcpReader) GetTcpID() *api.TcpID {
|
||||
return reader.tcpID
|
||||
}
|
||||
|
||||
func (reader *tcpReader) GetCounterPair() *api.CounterPair {
|
||||
return reader.counterPair
|
||||
}
|
||||
|
||||
func (reader *tcpReader) GetCaptureTime() time.Time {
|
||||
return reader.captureTime
|
||||
}
|
||||
|
||||
func (reader *tcpReader) GetEmitter() api.Emitter {
|
||||
return reader.emitter
|
||||
}
|
||||
|
||||
func (reader *tcpReader) GetIsClosed() bool {
|
||||
return reader.isClosed
|
||||
}
|
||||
|
||||
func (reader *tcpReader) GetExtension() *api.Extension {
|
||||
return reader.extension
|
||||
}
|
||||
|
||||
24
tap/tcp_reader_data_msg.go
Normal file
24
tap/tcp_reader_data_msg.go
Normal file
@@ -0,0 +1,24 @@
|
||||
package tap
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/up9inc/mizu/tap/api"
|
||||
)
|
||||
|
||||
type tcpReaderDataMsg struct {
|
||||
bytes []byte
|
||||
timestamp time.Time
|
||||
}
|
||||
|
||||
func NewTcpReaderDataMsg(data []byte, timestamp time.Time) api.TcpReaderDataMsg {
|
||||
return &tcpReaderDataMsg{data, timestamp}
|
||||
}
|
||||
|
||||
func (dataMsg *tcpReaderDataMsg) GetBytes() []byte {
|
||||
return dataMsg.bytes
|
||||
}
|
||||
|
||||
func (dataMsg *tcpReaderDataMsg) GetTimestamp() time.Time {
|
||||
return dataMsg.timestamp
|
||||
}
|
||||
164
tap/tcp_reassembly_stream.go
Normal file
164
tap/tcp_reassembly_stream.go
Normal file
@@ -0,0 +1,164 @@
|
||||
package tap
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
|
||||
"github.com/google/gopacket"
|
||||
"github.com/google/gopacket/layers" // pulls in all layers decoders
|
||||
"github.com/google/gopacket/reassembly"
|
||||
"github.com/up9inc/mizu/tap/api"
|
||||
"github.com/up9inc/mizu/tap/diagnose"
|
||||
)
|
||||
|
||||
type tcpReassemblyStream struct {
|
||||
ident string
|
||||
tcpState *reassembly.TCPSimpleFSM
|
||||
fsmerr bool
|
||||
optchecker reassembly.TCPOptionCheck
|
||||
isDNS bool
|
||||
tcpStream api.TcpStream
|
||||
}
|
||||
|
||||
func NewTcpReassemblyStream(ident string, tcp *layers.TCP, fsmOptions reassembly.TCPSimpleFSMOptions, stream api.TcpStream) reassembly.Stream {
|
||||
return &tcpReassemblyStream{
|
||||
ident: ident,
|
||||
tcpState: reassembly.NewTCPSimpleFSM(fsmOptions),
|
||||
optchecker: reassembly.NewTCPOptionCheck(),
|
||||
isDNS: tcp.SrcPort == 53 || tcp.DstPort == 53,
|
||||
tcpStream: stream,
|
||||
}
|
||||
}
|
||||
|
||||
func (t *tcpReassemblyStream) Accept(tcp *layers.TCP, ci gopacket.CaptureInfo, dir reassembly.TCPFlowDirection, nextSeq reassembly.Sequence, start *bool, ac reassembly.AssemblerContext) bool {
|
||||
// FSM
|
||||
if !t.tcpState.CheckState(tcp, dir) {
|
||||
diagnose.TapErrors.SilentError("FSM-rejection", "%s: Packet rejected by FSM (state:%s)", t.ident, t.tcpState.String())
|
||||
diagnose.InternalStats.RejectFsm++
|
||||
if !t.fsmerr {
|
||||
t.fsmerr = true
|
||||
diagnose.InternalStats.RejectConnFsm++
|
||||
}
|
||||
if !*ignorefsmerr {
|
||||
return false
|
||||
}
|
||||
}
|
||||
// Options
|
||||
err := t.optchecker.Accept(tcp, ci, dir, nextSeq, start)
|
||||
if err != nil {
|
||||
diagnose.TapErrors.SilentError("OptionChecker-rejection", "%s: Packet rejected by OptionChecker: %s", t.ident, err)
|
||||
diagnose.InternalStats.RejectOpt++
|
||||
if !*nooptcheck {
|
||||
return false
|
||||
}
|
||||
}
|
||||
// Checksum
|
||||
accept := true
|
||||
if *checksum {
|
||||
c, err := tcp.ComputeChecksum()
|
||||
if err != nil {
|
||||
diagnose.TapErrors.SilentError("ChecksumCompute", "%s: Got error computing checksum: %s", t.ident, err)
|
||||
accept = false
|
||||
} else if c != 0x0 {
|
||||
diagnose.TapErrors.SilentError("Checksum", "%s: Invalid checksum: 0x%x", t.ident, c)
|
||||
accept = false
|
||||
}
|
||||
}
|
||||
if !accept {
|
||||
diagnose.InternalStats.RejectOpt++
|
||||
}
|
||||
|
||||
*start = true
|
||||
|
||||
return accept
|
||||
}
|
||||
|
||||
func (t *tcpReassemblyStream) ReassembledSG(sg reassembly.ScatterGather, ac reassembly.AssemblerContext) {
|
||||
dir, _, _, skip := sg.Info()
|
||||
length, saved := sg.Lengths()
|
||||
// update stats
|
||||
sgStats := sg.Stats()
|
||||
if skip > 0 {
|
||||
diagnose.InternalStats.MissedBytes += skip
|
||||
}
|
||||
diagnose.InternalStats.Sz += length - saved
|
||||
diagnose.InternalStats.Pkt += sgStats.Packets
|
||||
if sgStats.Chunks > 1 {
|
||||
diagnose.InternalStats.Reassembled++
|
||||
}
|
||||
diagnose.InternalStats.OutOfOrderPackets += sgStats.QueuedPackets
|
||||
diagnose.InternalStats.OutOfOrderBytes += sgStats.QueuedBytes
|
||||
if length > diagnose.InternalStats.BiggestChunkBytes {
|
||||
diagnose.InternalStats.BiggestChunkBytes = length
|
||||
}
|
||||
if sgStats.Packets > diagnose.InternalStats.BiggestChunkPackets {
|
||||
diagnose.InternalStats.BiggestChunkPackets = sgStats.Packets
|
||||
}
|
||||
if sgStats.OverlapBytes != 0 && sgStats.OverlapPackets == 0 {
|
||||
// In the original example this was handled with panic().
|
||||
// I don't know what this error means or how to handle it properly.
|
||||
diagnose.TapErrors.SilentError("Invalid-Overlap", "bytes:%d, pkts:%d", sgStats.OverlapBytes, sgStats.OverlapPackets)
|
||||
}
|
||||
diagnose.InternalStats.OverlapBytes += sgStats.OverlapBytes
|
||||
diagnose.InternalStats.OverlapPackets += sgStats.OverlapPackets
|
||||
|
||||
if skip != -1 && skip != 0 {
|
||||
// Missing bytes in stream: do not even try to parse it
|
||||
return
|
||||
}
|
||||
data := sg.Fetch(length)
|
||||
if t.isDNS {
|
||||
dns := &layers.DNS{}
|
||||
var decoded []gopacket.LayerType
|
||||
if len(data) < 2 {
|
||||
if len(data) > 0 {
|
||||
sg.KeepFrom(0)
|
||||
}
|
||||
return
|
||||
}
|
||||
dnsSize := binary.BigEndian.Uint16(data[:2])
|
||||
missing := int(dnsSize) - len(data[2:])
|
||||
diagnose.TapErrors.Debug("dnsSize: %d, missing: %d", dnsSize, missing)
|
||||
if missing > 0 {
|
||||
diagnose.TapErrors.Debug("Missing some bytes: %d", missing)
|
||||
sg.KeepFrom(0)
|
||||
return
|
||||
}
|
||||
p := gopacket.NewDecodingLayerParser(layers.LayerTypeDNS, dns)
|
||||
err := p.DecodeLayers(data[2:], &decoded)
|
||||
if err != nil {
|
||||
diagnose.TapErrors.SilentError("DNS-parser", "Failed to decode DNS: %v", err)
|
||||
} else {
|
||||
diagnose.TapErrors.Debug("DNS: %s", gopacket.LayerDump(dns))
|
||||
}
|
||||
if len(data) > 2+int(dnsSize) {
|
||||
sg.KeepFrom(2 + int(dnsSize))
|
||||
}
|
||||
} else if t.tcpStream.GetIsTapTarget() {
|
||||
if length > 0 {
|
||||
// This is where we pass the reassembled information onwards
|
||||
// This channel is read by an tcpReader object
|
||||
diagnose.AppStats.IncReassembledTcpPayloadsCount()
|
||||
timestamp := ac.GetCaptureInfo().Timestamp
|
||||
stream := t.tcpStream.(*tcpStream)
|
||||
if dir == reassembly.TCPDirClientToServer {
|
||||
for i := range stream.getClients() {
|
||||
reader := stream.getClient(i)
|
||||
reader.sendMsgIfNotClosed(NewTcpReaderDataMsg(data, timestamp))
|
||||
}
|
||||
} else {
|
||||
for i := range stream.getServers() {
|
||||
reader := stream.getServer(i)
|
||||
reader.sendMsgIfNotClosed(NewTcpReaderDataMsg(data, timestamp))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (t *tcpReassemblyStream) ReassemblyComplete(ac reassembly.AssemblerContext) bool {
|
||||
if t.tcpStream.GetIsTapTarget() && !t.tcpStream.GetIsClosed() {
|
||||
t.tcpStream.(*tcpStream).close()
|
||||
}
|
||||
// do not remove the connection to allow last ACK
|
||||
return false
|
||||
}
|
||||
@@ -1,202 +1,140 @@
|
||||
package tap
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/google/gopacket"
|
||||
"github.com/google/gopacket/layers" // pulls in all layers decoders
|
||||
"github.com/google/gopacket/reassembly"
|
||||
"github.com/up9inc/mizu/tap/api"
|
||||
"github.com/up9inc/mizu/tap/diagnose"
|
||||
)
|
||||
|
||||
/* It's a connection (bidirectional)
|
||||
* Implements gopacket.reassembly.Stream interface (Accept, ReassembledSG, ReassemblyComplete)
|
||||
* ReassembledSG gets called when new reassembled data is ready (i.e. bytes in order, no duplicates, complete)
|
||||
* In our implementation, we pass information from ReassembledSG to the tcpReader through a shared channel.
|
||||
* In our implementation, we pass information from ReassembledSG to the TcpReader through a shared channel.
|
||||
*/
|
||||
type tcpStream struct {
|
||||
id int64
|
||||
isClosed bool
|
||||
superIdentifier *api.SuperIdentifier
|
||||
tcpstate *reassembly.TCPSimpleFSM
|
||||
fsmerr bool
|
||||
optchecker reassembly.TCPOptionCheck
|
||||
net, transport gopacket.Flow
|
||||
isDNS bool
|
||||
protoIdentifier *api.ProtoIdentifier
|
||||
isTapTarget bool
|
||||
clients []tcpReader
|
||||
servers []tcpReader
|
||||
ident string
|
||||
clients []*tcpReader
|
||||
servers []*tcpReader
|
||||
origin api.Capture
|
||||
reqResMatchers []api.RequestResponseMatcher
|
||||
createdAt time.Time
|
||||
streamsMap api.TcpStreamMap
|
||||
sync.Mutex
|
||||
streamsMap *tcpStreamMap
|
||||
}
|
||||
|
||||
func (t *tcpStream) Accept(tcp *layers.TCP, ci gopacket.CaptureInfo, dir reassembly.TCPFlowDirection, nextSeq reassembly.Sequence, start *bool, ac reassembly.AssemblerContext) bool {
|
||||
// FSM
|
||||
if !t.tcpstate.CheckState(tcp, dir) {
|
||||
diagnose.TapErrors.SilentError("FSM-rejection", "%s: Packet rejected by FSM (state:%s)", t.ident, t.tcpstate.String())
|
||||
diagnose.InternalStats.RejectFsm++
|
||||
if !t.fsmerr {
|
||||
t.fsmerr = true
|
||||
diagnose.InternalStats.RejectConnFsm++
|
||||
}
|
||||
if !*ignorefsmerr {
|
||||
return false
|
||||
}
|
||||
}
|
||||
// Options
|
||||
err := t.optchecker.Accept(tcp, ci, dir, nextSeq, start)
|
||||
if err != nil {
|
||||
diagnose.TapErrors.SilentError("OptionChecker-rejection", "%s: Packet rejected by OptionChecker: %s", t.ident, err)
|
||||
diagnose.InternalStats.RejectOpt++
|
||||
if !*nooptcheck {
|
||||
return false
|
||||
}
|
||||
}
|
||||
// Checksum
|
||||
accept := true
|
||||
if *checksum {
|
||||
c, err := tcp.ComputeChecksum()
|
||||
if err != nil {
|
||||
diagnose.TapErrors.SilentError("ChecksumCompute", "%s: Got error computing checksum: %s", t.ident, err)
|
||||
accept = false
|
||||
} else if c != 0x0 {
|
||||
diagnose.TapErrors.SilentError("Checksum", "%s: Invalid checksum: 0x%x", t.ident, c)
|
||||
accept = false
|
||||
}
|
||||
}
|
||||
if !accept {
|
||||
diagnose.InternalStats.RejectOpt++
|
||||
}
|
||||
|
||||
*start = true
|
||||
|
||||
return accept
|
||||
}
|
||||
|
||||
func (t *tcpStream) ReassembledSG(sg reassembly.ScatterGather, ac reassembly.AssemblerContext) {
|
||||
dir, _, _, skip := sg.Info()
|
||||
length, saved := sg.Lengths()
|
||||
// update stats
|
||||
sgStats := sg.Stats()
|
||||
if skip > 0 {
|
||||
diagnose.InternalStats.MissedBytes += skip
|
||||
}
|
||||
diagnose.InternalStats.Sz += length - saved
|
||||
diagnose.InternalStats.Pkt += sgStats.Packets
|
||||
if sgStats.Chunks > 1 {
|
||||
diagnose.InternalStats.Reassembled++
|
||||
}
|
||||
diagnose.InternalStats.OutOfOrderPackets += sgStats.QueuedPackets
|
||||
diagnose.InternalStats.OutOfOrderBytes += sgStats.QueuedBytes
|
||||
if length > diagnose.InternalStats.BiggestChunkBytes {
|
||||
diagnose.InternalStats.BiggestChunkBytes = length
|
||||
}
|
||||
if sgStats.Packets > diagnose.InternalStats.BiggestChunkPackets {
|
||||
diagnose.InternalStats.BiggestChunkPackets = sgStats.Packets
|
||||
}
|
||||
if sgStats.OverlapBytes != 0 && sgStats.OverlapPackets == 0 {
|
||||
// In the original example this was handled with panic().
|
||||
// I don't know what this error means or how to handle it properly.
|
||||
diagnose.TapErrors.SilentError("Invalid-Overlap", "bytes:%d, pkts:%d", sgStats.OverlapBytes, sgStats.OverlapPackets)
|
||||
}
|
||||
diagnose.InternalStats.OverlapBytes += sgStats.OverlapBytes
|
||||
diagnose.InternalStats.OverlapPackets += sgStats.OverlapPackets
|
||||
|
||||
if skip == -1 && *allowmissinginit {
|
||||
// this is allowed
|
||||
} else if skip != 0 {
|
||||
// Missing bytes in stream: do not even try to parse it
|
||||
return
|
||||
}
|
||||
data := sg.Fetch(length)
|
||||
if t.isDNS {
|
||||
dns := &layers.DNS{}
|
||||
var decoded []gopacket.LayerType
|
||||
if len(data) < 2 {
|
||||
if len(data) > 0 {
|
||||
sg.KeepFrom(0)
|
||||
}
|
||||
return
|
||||
}
|
||||
dnsSize := binary.BigEndian.Uint16(data[:2])
|
||||
missing := int(dnsSize) - len(data[2:])
|
||||
diagnose.TapErrors.Debug("dnsSize: %d, missing: %d", dnsSize, missing)
|
||||
if missing > 0 {
|
||||
diagnose.TapErrors.Debug("Missing some bytes: %d", missing)
|
||||
sg.KeepFrom(0)
|
||||
return
|
||||
}
|
||||
p := gopacket.NewDecodingLayerParser(layers.LayerTypeDNS, dns)
|
||||
err := p.DecodeLayers(data[2:], &decoded)
|
||||
if err != nil {
|
||||
diagnose.TapErrors.SilentError("DNS-parser", "Failed to decode DNS: %v", err)
|
||||
} else {
|
||||
diagnose.TapErrors.Debug("DNS: %s", gopacket.LayerDump(dns))
|
||||
}
|
||||
if len(data) > 2+int(dnsSize) {
|
||||
sg.KeepFrom(2 + int(dnsSize))
|
||||
}
|
||||
} else if t.isTapTarget {
|
||||
if length > 0 {
|
||||
// This is where we pass the reassembled information onwards
|
||||
// This channel is read by an tcpReader object
|
||||
diagnose.AppStats.IncReassembledTcpPayloadsCount()
|
||||
timestamp := ac.GetCaptureInfo().Timestamp
|
||||
if dir == reassembly.TCPDirClientToServer {
|
||||
for i := range t.clients {
|
||||
reader := &t.clients[i]
|
||||
reader.Lock()
|
||||
if !reader.isClosed {
|
||||
reader.msgQueue <- tcpReaderDataMsg{data, timestamp}
|
||||
}
|
||||
reader.Unlock()
|
||||
}
|
||||
} else {
|
||||
for i := range t.servers {
|
||||
reader := &t.servers[i]
|
||||
reader.Lock()
|
||||
if !reader.isClosed {
|
||||
reader.msgQueue <- tcpReaderDataMsg{data, timestamp}
|
||||
}
|
||||
reader.Unlock()
|
||||
}
|
||||
}
|
||||
}
|
||||
func NewTcpStream(isTapTarget bool, streamsMap api.TcpStreamMap, capture api.Capture) *tcpStream {
|
||||
return &tcpStream{
|
||||
isTapTarget: isTapTarget,
|
||||
protoIdentifier: &api.ProtoIdentifier{},
|
||||
streamsMap: streamsMap,
|
||||
origin: capture,
|
||||
}
|
||||
}
|
||||
|
||||
func (t *tcpStream) ReassemblyComplete(ac reassembly.AssemblerContext) bool {
|
||||
if t.isTapTarget && !t.isClosed {
|
||||
t.Close()
|
||||
}
|
||||
// do not remove the connection to allow last ACK
|
||||
return false
|
||||
func (t *tcpStream) getId() int64 {
|
||||
return t.id
|
||||
}
|
||||
|
||||
func (t *tcpStream) Close() {
|
||||
shouldReturn := false
|
||||
func (t *tcpStream) setId(id int64) {
|
||||
t.id = id
|
||||
}
|
||||
|
||||
func (t *tcpStream) close() {
|
||||
t.Lock()
|
||||
defer t.Unlock()
|
||||
|
||||
if t.isClosed {
|
||||
shouldReturn = true
|
||||
} else {
|
||||
t.isClosed = true
|
||||
}
|
||||
t.Unlock()
|
||||
if shouldReturn {
|
||||
return
|
||||
}
|
||||
|
||||
t.isClosed = true
|
||||
|
||||
t.streamsMap.Delete(t.id)
|
||||
|
||||
for i := range t.clients {
|
||||
reader := &t.clients[i]
|
||||
reader.Close()
|
||||
reader := t.clients[i]
|
||||
reader.close()
|
||||
}
|
||||
for i := range t.servers {
|
||||
reader := &t.servers[i]
|
||||
reader.Close()
|
||||
reader := t.servers[i]
|
||||
reader.close()
|
||||
}
|
||||
}
|
||||
|
||||
func (t *tcpStream) addClient(reader *tcpReader) {
|
||||
t.clients = append(t.clients, reader)
|
||||
}
|
||||
|
||||
func (t *tcpStream) addServer(reader *tcpReader) {
|
||||
t.servers = append(t.servers, reader)
|
||||
}
|
||||
|
||||
func (t *tcpStream) getClients() []*tcpReader {
|
||||
return t.clients
|
||||
}
|
||||
|
||||
func (t *tcpStream) getServers() []*tcpReader {
|
||||
return t.servers
|
||||
}
|
||||
|
||||
func (t *tcpStream) getClient(index int) *tcpReader {
|
||||
return t.clients[index]
|
||||
}
|
||||
|
||||
func (t *tcpStream) getServer(index int) *tcpReader {
|
||||
return t.servers[index]
|
||||
}
|
||||
|
||||
func (t *tcpStream) addReqResMatcher(reqResMatcher api.RequestResponseMatcher) {
|
||||
t.reqResMatchers = append(t.reqResMatchers, reqResMatcher)
|
||||
}
|
||||
|
||||
func (t *tcpStream) SetProtocol(protocol *api.Protocol) {
|
||||
t.Lock()
|
||||
defer t.Unlock()
|
||||
|
||||
if t.protoIdentifier.IsClosedOthers {
|
||||
return
|
||||
}
|
||||
|
||||
t.protoIdentifier.Protocol = protocol
|
||||
|
||||
for i := range t.clients {
|
||||
reader := t.clients[i]
|
||||
if reader.GetExtension().Protocol != t.protoIdentifier.Protocol {
|
||||
reader.close()
|
||||
}
|
||||
}
|
||||
for i := range t.servers {
|
||||
reader := t.servers[i]
|
||||
if reader.GetExtension().Protocol != t.protoIdentifier.Protocol {
|
||||
reader.close()
|
||||
}
|
||||
}
|
||||
|
||||
t.protoIdentifier.IsClosedOthers = true
|
||||
}
|
||||
|
||||
func (t *tcpStream) GetOrigin() api.Capture {
|
||||
return t.origin
|
||||
}
|
||||
|
||||
func (t *tcpStream) GetProtoIdentifier() *api.ProtoIdentifier {
|
||||
return t.protoIdentifier
|
||||
}
|
||||
|
||||
func (t *tcpStream) GetReqResMatchers() []api.RequestResponseMatcher {
|
||||
return t.reqResMatchers
|
||||
}
|
||||
|
||||
func (t *tcpStream) GetIsTapTarget() bool {
|
||||
return t.isTapTarget
|
||||
}
|
||||
|
||||
func (t *tcpStream) GetIsClosed() bool {
|
||||
return t.isClosed
|
||||
}
|
||||
|
||||
@@ -21,19 +21,13 @@ import (
|
||||
*/
|
||||
type tcpStreamFactory struct {
|
||||
wg sync.WaitGroup
|
||||
Emitter api.Emitter
|
||||
streamsMap *tcpStreamMap
|
||||
emitter api.Emitter
|
||||
streamsMap api.TcpStreamMap
|
||||
ownIps []string
|
||||
opts *TapOpts
|
||||
}
|
||||
|
||||
type tcpStreamWrapper struct {
|
||||
stream *tcpStream
|
||||
reqResMatcher api.RequestResponseMatcher
|
||||
createdAt time.Time
|
||||
}
|
||||
|
||||
func NewTcpStreamFactory(emitter api.Emitter, streamsMap *tcpStreamMap, opts *TapOpts) *tcpStreamFactory {
|
||||
func NewTcpStreamFactory(emitter api.Emitter, streamsMap api.TcpStreamMap, opts *TapOpts) *tcpStreamFactory {
|
||||
var ownIps []string
|
||||
|
||||
if localhostIPs, err := getLocalhostIPs(); err != nil {
|
||||
@@ -46,14 +40,14 @@ func NewTcpStreamFactory(emitter api.Emitter, streamsMap *tcpStreamMap, opts *Ta
|
||||
}
|
||||
|
||||
return &tcpStreamFactory{
|
||||
Emitter: emitter,
|
||||
emitter: emitter,
|
||||
streamsMap: streamsMap,
|
||||
ownIps: ownIps,
|
||||
opts: opts,
|
||||
}
|
||||
}
|
||||
|
||||
func (factory *tcpStreamFactory) New(net, transport gopacket.Flow, tcp *layers.TCP, ac reassembly.AssemblerContext) reassembly.Stream {
|
||||
func (factory *tcpStreamFactory) New(net, transport gopacket.Flow, tcpLayer *layers.TCP, ac reassembly.AssemblerContext) reassembly.Stream {
|
||||
fsmOptions := reassembly.TCPSimpleFSMOptions{
|
||||
SupportMissingEstablishment: *allowmissinginit,
|
||||
}
|
||||
@@ -64,78 +58,68 @@ func (factory *tcpStreamFactory) New(net, transport gopacket.Flow, tcp *layers.T
|
||||
|
||||
props := factory.getStreamProps(srcIp, srcPort, dstIp, dstPort)
|
||||
isTapTarget := props.isTapTarget
|
||||
stream := &tcpStream{
|
||||
net: net,
|
||||
transport: transport,
|
||||
isDNS: tcp.SrcPort == 53 || tcp.DstPort == 53,
|
||||
isTapTarget: isTapTarget,
|
||||
tcpstate: reassembly.NewTCPSimpleFSM(fsmOptions),
|
||||
ident: fmt.Sprintf("%s:%s", net, transport),
|
||||
optchecker: reassembly.NewTCPOptionCheck(),
|
||||
superIdentifier: &api.SuperIdentifier{},
|
||||
streamsMap: factory.streamsMap,
|
||||
origin: getPacketOrigin(ac),
|
||||
}
|
||||
if stream.isTapTarget {
|
||||
stream.id = factory.streamsMap.nextId()
|
||||
stream := NewTcpStream(isTapTarget, factory.streamsMap, getPacketOrigin(ac))
|
||||
reassemblyStream := NewTcpReassemblyStream(fmt.Sprintf("%s:%s", net, transport), tcpLayer, fsmOptions, stream)
|
||||
if stream.GetIsTapTarget() {
|
||||
stream.setId(factory.streamsMap.NextId())
|
||||
for i, extension := range extensions {
|
||||
reqResMatcher := extension.Dissector.NewResponseRequestMatcher()
|
||||
stream.addReqResMatcher(reqResMatcher)
|
||||
counterPair := &api.CounterPair{
|
||||
Request: 0,
|
||||
Response: 0,
|
||||
}
|
||||
stream.clients = append(stream.clients, tcpReader{
|
||||
msgQueue: make(chan tcpReaderDataMsg),
|
||||
progress: &api.ReadProgress{},
|
||||
superTimer: &api.SuperTimer{},
|
||||
ident: fmt.Sprintf("%s %s", net, transport),
|
||||
tcpID: &api.TcpID{
|
||||
SrcIP: srcIp,
|
||||
DstIP: dstIp,
|
||||
SrcPort: srcPort,
|
||||
DstPort: dstPort,
|
||||
},
|
||||
parent: stream,
|
||||
isClient: true,
|
||||
isOutgoing: props.isOutgoing,
|
||||
extension: extension,
|
||||
emitter: factory.Emitter,
|
||||
counterPair: counterPair,
|
||||
reqResMatcher: reqResMatcher,
|
||||
})
|
||||
stream.servers = append(stream.servers, tcpReader{
|
||||
msgQueue: make(chan tcpReaderDataMsg),
|
||||
progress: &api.ReadProgress{},
|
||||
superTimer: &api.SuperTimer{},
|
||||
ident: fmt.Sprintf("%s %s", net, transport),
|
||||
tcpID: &api.TcpID{
|
||||
SrcIP: net.Dst().String(),
|
||||
DstIP: net.Src().String(),
|
||||
SrcPort: transport.Dst().String(),
|
||||
DstPort: transport.Src().String(),
|
||||
},
|
||||
parent: stream,
|
||||
isClient: false,
|
||||
isOutgoing: props.isOutgoing,
|
||||
extension: extension,
|
||||
emitter: factory.Emitter,
|
||||
counterPair: counterPair,
|
||||
reqResMatcher: reqResMatcher,
|
||||
})
|
||||
stream.addClient(
|
||||
NewTcpReader(
|
||||
make(chan api.TcpReaderDataMsg),
|
||||
&api.ReadProgress{},
|
||||
fmt.Sprintf("%s %s", net, transport),
|
||||
&api.TcpID{
|
||||
SrcIP: srcIp,
|
||||
DstIP: dstIp,
|
||||
SrcPort: srcPort,
|
||||
DstPort: dstPort,
|
||||
},
|
||||
time.Time{},
|
||||
stream,
|
||||
true,
|
||||
props.isOutgoing,
|
||||
extension,
|
||||
factory.emitter,
|
||||
counterPair,
|
||||
reqResMatcher,
|
||||
),
|
||||
)
|
||||
stream.addServer(
|
||||
NewTcpReader(
|
||||
make(chan api.TcpReaderDataMsg),
|
||||
&api.ReadProgress{},
|
||||
fmt.Sprintf("%s %s", net, transport),
|
||||
&api.TcpID{
|
||||
SrcIP: net.Dst().String(),
|
||||
DstIP: net.Src().String(),
|
||||
SrcPort: transport.Dst().String(),
|
||||
DstPort: transport.Src().String(),
|
||||
},
|
||||
time.Time{},
|
||||
stream,
|
||||
false,
|
||||
props.isOutgoing,
|
||||
extension,
|
||||
factory.emitter,
|
||||
counterPair,
|
||||
reqResMatcher,
|
||||
),
|
||||
)
|
||||
|
||||
factory.streamsMap.Store(stream.id, &tcpStreamWrapper{
|
||||
stream: stream,
|
||||
reqResMatcher: reqResMatcher,
|
||||
createdAt: time.Now(),
|
||||
})
|
||||
factory.streamsMap.Store(stream.getId(), stream)
|
||||
|
||||
factory.wg.Add(2)
|
||||
// Start reading from channel stream.reader.bytes
|
||||
go stream.clients[i].run(&factory.wg)
|
||||
go stream.servers[i].run(&factory.wg)
|
||||
go stream.getClient(i).run(filteringOptions, &factory.wg)
|
||||
go stream.getServer(i).run(filteringOptions, &factory.wg)
|
||||
}
|
||||
}
|
||||
return stream
|
||||
return reassemblyStream
|
||||
}
|
||||
|
||||
func (factory *tcpStreamFactory) WaitGoRoutines() {
|
||||
|
||||
@@ -1,14 +1,13 @@
|
||||
package tap
|
||||
|
||||
import (
|
||||
"os"
|
||||
"runtime"
|
||||
_debug "runtime/debug"
|
||||
"strconv"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/up9inc/mizu/logger"
|
||||
"github.com/up9inc/mizu/tap/api"
|
||||
"github.com/up9inc/mizu/tap/diagnose"
|
||||
)
|
||||
|
||||
@@ -17,12 +16,16 @@ type tcpStreamMap struct {
|
||||
streamId int64
|
||||
}
|
||||
|
||||
func NewTcpStreamMap() *tcpStreamMap {
|
||||
func NewTcpStreamMap() api.TcpStreamMap {
|
||||
return &tcpStreamMap{
|
||||
streams: &sync.Map{},
|
||||
}
|
||||
}
|
||||
|
||||
func (streamMap *tcpStreamMap) Range(f func(key, value interface{}) bool) {
|
||||
streamMap.streams.Range(f)
|
||||
}
|
||||
|
||||
func (streamMap *tcpStreamMap) Store(key, value interface{}) {
|
||||
streamMap.streams.Store(key, value)
|
||||
}
|
||||
@@ -31,66 +34,35 @@ func (streamMap *tcpStreamMap) Delete(key interface{}) {
|
||||
streamMap.streams.Delete(key)
|
||||
}
|
||||
|
||||
func (streamMap *tcpStreamMap) nextId() int64 {
|
||||
func (streamMap *tcpStreamMap) NextId() int64 {
|
||||
streamMap.streamId++
|
||||
return streamMap.streamId
|
||||
}
|
||||
|
||||
func (streamMap *tcpStreamMap) getCloseTimedoutTcpChannelsInterval() time.Duration {
|
||||
defaultDuration := 1000 * time.Millisecond
|
||||
rangeMin := 10
|
||||
rangeMax := 10000
|
||||
closeTimedoutTcpChannelsIntervalMsStr := os.Getenv(CloseTimedoutTcpChannelsIntervalMsEnvVar)
|
||||
if closeTimedoutTcpChannelsIntervalMsStr == "" {
|
||||
return defaultDuration
|
||||
} else {
|
||||
closeTimedoutTcpChannelsIntervalMs, err := strconv.Atoi(closeTimedoutTcpChannelsIntervalMsStr)
|
||||
if err != nil {
|
||||
logger.Log.Warningf("Error parsing environment variable %s: %v\n", CloseTimedoutTcpChannelsIntervalMsEnvVar, err)
|
||||
return defaultDuration
|
||||
} else {
|
||||
if closeTimedoutTcpChannelsIntervalMs < rangeMin || closeTimedoutTcpChannelsIntervalMs > rangeMax {
|
||||
logger.Log.Warningf("The value of environment variable %s is not in acceptable range: %d - %d\n", CloseTimedoutTcpChannelsIntervalMsEnvVar, rangeMin, rangeMax)
|
||||
return defaultDuration
|
||||
} else {
|
||||
return time.Duration(closeTimedoutTcpChannelsIntervalMs) * time.Millisecond
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (streamMap *tcpStreamMap) closeTimedoutTcpStreamChannels() {
|
||||
tcpStreamChannelTimeout := GetTcpChannelTimeoutMs()
|
||||
closeTimedoutTcpChannelsIntervalMs := streamMap.getCloseTimedoutTcpChannelsInterval()
|
||||
func (streamMap *tcpStreamMap) CloseTimedoutTcpStreamChannels() {
|
||||
tcpStreamChannelTimeoutMs := GetTcpChannelTimeoutMs()
|
||||
closeTimedoutTcpChannelsIntervalMs := GetCloseTimedoutTcpChannelsInterval()
|
||||
logger.Log.Infof("Using %d ms as the close timedout TCP stream channels interval", closeTimedoutTcpChannelsIntervalMs/time.Millisecond)
|
||||
|
||||
ticker := time.NewTicker(closeTimedoutTcpChannelsIntervalMs)
|
||||
for {
|
||||
time.Sleep(closeTimedoutTcpChannelsIntervalMs)
|
||||
<-ticker.C
|
||||
|
||||
_debug.FreeOSMemory()
|
||||
streamMap.streams.Range(func(key interface{}, value interface{}) bool {
|
||||
streamWrapper := value.(*tcpStreamWrapper)
|
||||
stream := streamWrapper.stream
|
||||
if stream.superIdentifier.Protocol == nil {
|
||||
if !stream.isClosed && time.Now().After(streamWrapper.createdAt.Add(tcpStreamChannelTimeout)) {
|
||||
stream.Close()
|
||||
// `*tlsStream` is not yet applicable to this routine.
|
||||
// So, we cast into `(*tcpStream)` and ignore `*tlsStream`
|
||||
stream, ok := value.(*tcpStream)
|
||||
if !ok {
|
||||
return true
|
||||
}
|
||||
|
||||
if stream.protoIdentifier.Protocol == nil {
|
||||
if !stream.isClosed && time.Now().After(stream.createdAt.Add(tcpStreamChannelTimeoutMs)) {
|
||||
stream.close()
|
||||
diagnose.AppStats.IncDroppedTcpStreams()
|
||||
logger.Log.Debugf("Dropped an unidentified TCP stream because of timeout. Total dropped: %d Total Goroutines: %d Timeout (ms): %d",
|
||||
diagnose.AppStats.DroppedTcpStreams, runtime.NumGoroutine(), tcpStreamChannelTimeout/time.Millisecond)
|
||||
}
|
||||
} else {
|
||||
if !stream.superIdentifier.IsClosedOthers {
|
||||
for i := range stream.clients {
|
||||
reader := &stream.clients[i]
|
||||
if reader.extension.Protocol != stream.superIdentifier.Protocol {
|
||||
reader.Close()
|
||||
}
|
||||
}
|
||||
for i := range stream.servers {
|
||||
reader := &stream.servers[i]
|
||||
if reader.extension.Protocol != stream.superIdentifier.Protocol {
|
||||
reader.Close()
|
||||
}
|
||||
}
|
||||
stream.superIdentifier.IsClosedOthers = true
|
||||
diagnose.AppStats.DroppedTcpStreams, runtime.NumGoroutine(), tcpStreamChannelTimeoutMs/time.Millisecond)
|
||||
}
|
||||
}
|
||||
return true
|
||||
|
||||
@@ -68,7 +68,7 @@ struct fd_info {
|
||||
BPF_HASH(pids_map, __u32, __u32);
|
||||
BPF_LRU_HASH(ssl_write_context, __u64, struct ssl_info);
|
||||
BPF_LRU_HASH(ssl_read_context, __u64, struct ssl_info);
|
||||
BPF_HASH(file_descriptor_to_ipv4, __u64, struct fd_info);
|
||||
BPF_LRU_HASH(file_descriptor_to_ipv4, __u64, struct fd_info);
|
||||
BPF_PERF_OUTPUT(chunks_buffer);
|
||||
BPF_PERF_OUTPUT(log_buffer);
|
||||
|
||||
|
||||
@@ -48,14 +48,14 @@ static __always_inline int get_count_bytes(struct pt_regs *ctx, struct ssl_info*
|
||||
return countBytes;
|
||||
}
|
||||
|
||||
static __always_inline void add_address_to_chunk(struct pt_regs *ctx, struct tlsChunk* chunk, __u64 id, __u32 fd) {
|
||||
static __always_inline int add_address_to_chunk(struct pt_regs *ctx, struct tlsChunk* chunk, __u64 id, __u32 fd) {
|
||||
__u32 pid = id >> 32;
|
||||
__u64 key = (__u64) pid << 32 | fd;
|
||||
|
||||
struct fd_info *fdinfo = bpf_map_lookup_elem(&file_descriptor_to_ipv4, &key);
|
||||
|
||||
if (fdinfo == NULL) {
|
||||
return;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int err = bpf_probe_read(chunk->address, sizeof(chunk->address), fdinfo->ipv4_addr);
|
||||
@@ -63,7 +63,10 @@ static __always_inline void add_address_to_chunk(struct pt_regs *ctx, struct tls
|
||||
|
||||
if (err != 0) {
|
||||
log_error(ctx, LOG_ERROR_READING_FD_ADDRESS, id, err, 0l);
|
||||
return 0;
|
||||
}
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
static __always_inline void send_chunk_part(struct pt_regs *ctx, __u8* buffer, __u64 id,
|
||||
@@ -143,7 +146,12 @@ static __always_inline void output_ssl_chunk(struct pt_regs *ctx, struct ssl_inf
|
||||
chunk->len = countBytes;
|
||||
chunk->fd = info->fd;
|
||||
|
||||
add_address_to_chunk(ctx, chunk, id, chunk->fd);
|
||||
if (!add_address_to_chunk(ctx, chunk, id, chunk->fd)) {
|
||||
// Without an address, we drop the chunk because there is not much to do with it in Go
|
||||
//
|
||||
return;
|
||||
}
|
||||
|
||||
send_chunk(ctx, info->buffer, id, chunk);
|
||||
}
|
||||
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"net"
|
||||
|
||||
"github.com/go-errors/errors"
|
||||
"github.com/up9inc/mizu/tap/api"
|
||||
)
|
||||
|
||||
const FLAGS_IS_CLIENT_BIT uint32 = (1 << 0)
|
||||
@@ -16,14 +17,14 @@ const FLAGS_IS_READ_BIT uint32 = (1 << 1)
|
||||
// Be careful when editing, alignment and padding should be exactly the same in go/c.
|
||||
//
|
||||
type tlsChunk struct {
|
||||
Pid uint32 // process id
|
||||
Tgid uint32 // thread id inside the process
|
||||
Len uint32 // the size of the native buffer used to read/write the tls data (may be bigger than tlsChunk.Data[])
|
||||
Start uint32 // the start offset withing the native buffer
|
||||
Recorded uint32 // number of bytes copied from the native buffer to tlsChunk.Data[]
|
||||
Fd uint32 // the file descriptor used to read/write the tls data (probably socket file descriptor)
|
||||
Flags uint32 // bitwise flags
|
||||
Address [16]byte // ipv4 address and port
|
||||
Pid uint32 // process id
|
||||
Tgid uint32 // thread id inside the process
|
||||
Len uint32 // the size of the native buffer used to read/write the tls data (may be bigger than tlsChunk.Data[])
|
||||
Start uint32 // the start offset withing the native buffer
|
||||
Recorded uint32 // number of bytes copied from the native buffer to tlsChunk.Data[]
|
||||
Fd uint32 // the file descriptor used to read/write the tls data (probably socket file descriptor)
|
||||
Flags uint32 // bitwise flags
|
||||
Address [16]byte // ipv4 address and port
|
||||
Data [4096]byte // actual tls data
|
||||
}
|
||||
|
||||
@@ -73,3 +74,27 @@ func (c *tlsChunk) getRecordedData() []byte {
|
||||
func (c *tlsChunk) isRequest() bool {
|
||||
return (c.isClient() && c.isWrite()) || (c.isServer() && c.isRead())
|
||||
}
|
||||
|
||||
func (c *tlsChunk) getAddressPair() (addressPair, error) {
|
||||
ip, port, err := c.getAddress()
|
||||
|
||||
if err != nil {
|
||||
return addressPair{}, err
|
||||
}
|
||||
|
||||
if c.isRequest() {
|
||||
return addressPair{
|
||||
srcIp: api.UnknownIp,
|
||||
srcPort: api.UnknownPort,
|
||||
dstIp: ip,
|
||||
dstPort: port,
|
||||
}, nil
|
||||
} else {
|
||||
return addressPair{
|
||||
srcIp: ip,
|
||||
srcPort: port,
|
||||
dstIp: api.UnknownIp,
|
||||
dstPort: api.UnknownPort,
|
||||
}, nil
|
||||
}
|
||||
}
|
||||
|
||||
@@ -20,10 +20,17 @@ const (
|
||||
INODE_FILED_INDEX = 9
|
||||
)
|
||||
|
||||
type addressPair struct {
|
||||
srcIp net.IP
|
||||
srcPort uint16
|
||||
dstIp net.IP
|
||||
dstPort uint16
|
||||
}
|
||||
|
||||
// This file helps to extract Ip and Port out of a Socket file descriptor.
|
||||
//
|
||||
//
|
||||
// The equivalent bash commands are:
|
||||
//
|
||||
//
|
||||
// > ls -l /proc/<pid>/fd/<fd>
|
||||
// Output something like "socket:[1234]" for sockets - 1234 is the inode of the socket
|
||||
// > cat /proc/<pid>/net/tcp | grep <inode>
|
||||
@@ -31,18 +38,18 @@ const (
|
||||
// The 1st and 2nd fields are the source and dest ip and ports in a Hex format
|
||||
// 0100007F:50 is 127.0.0.1:80
|
||||
|
||||
func getAddressBySockfd(procfs string, pid uint32, fd uint32, src bool) (net.IP, uint16, error) {
|
||||
func getAddressBySockfd(procfs string, pid uint32, fd uint32) (addressPair, error) {
|
||||
inode, err := getSocketInode(procfs, pid, fd)
|
||||
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
return addressPair{}, err
|
||||
}
|
||||
|
||||
tcppath := fmt.Sprintf("%s/%d/net/tcp", procfs, pid)
|
||||
tcp, err := ioutil.ReadFile(tcppath)
|
||||
|
||||
if err != nil {
|
||||
return nil, 0, errors.Wrap(err, 0)
|
||||
return addressPair{}, errors.Wrap(err, 0)
|
||||
}
|
||||
|
||||
for _, line := range strings.Split(string(tcp), "\n") {
|
||||
@@ -53,15 +60,28 @@ func getAddressBySockfd(procfs string, pid uint32, fd uint32, src bool) (net.IP,
|
||||
}
|
||||
|
||||
if inode == parts[INODE_FILED_INDEX] {
|
||||
if src {
|
||||
return parseHexAddress(parts[SRC_ADDRESS_FILED_INDEX])
|
||||
} else {
|
||||
return parseHexAddress(parts[DST_ADDRESS_FILED_INDEX])
|
||||
srcIp, srcPort, srcErr := parseHexAddress(parts[SRC_ADDRESS_FILED_INDEX])
|
||||
|
||||
if srcErr != nil {
|
||||
return addressPair{}, srcErr
|
||||
}
|
||||
|
||||
dstIp, dstPort, dstErr := parseHexAddress(parts[DST_ADDRESS_FILED_INDEX])
|
||||
|
||||
if dstErr != nil {
|
||||
return addressPair{}, dstErr
|
||||
}
|
||||
|
||||
return addressPair{
|
||||
srcIp: srcIp,
|
||||
srcPort: srcPort,
|
||||
dstIp: dstIp,
|
||||
dstPort: dstPort,
|
||||
}, nil
|
||||
}
|
||||
}
|
||||
|
||||
return nil, 0, errors.Errorf("address not found [pid: %d] [sockfd: %d] [inode: %s]", pid, fd, inode)
|
||||
return addressPair{}, errors.Errorf("address not found [pid: %d] [sockfd: %d] [inode: %s]", pid, fd, inode)
|
||||
}
|
||||
|
||||
func getSocketInode(procfs string, pid uint32, fd uint32) (string, error) {
|
||||
|
||||
@@ -4,7 +4,6 @@ import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"fmt"
|
||||
"net"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
@@ -16,10 +15,14 @@ import (
|
||||
|
||||
"github.com/cilium/ebpf/perf"
|
||||
"github.com/go-errors/errors"
|
||||
"github.com/hashicorp/golang-lru/simplelru"
|
||||
"github.com/up9inc/mizu/logger"
|
||||
"github.com/up9inc/mizu/tap/api"
|
||||
)
|
||||
|
||||
const fdCachedItemAvgSize = 40
|
||||
const fdCacheMaxItems = 500000 / fdCachedItemAvgSize
|
||||
|
||||
type tlsPoller struct {
|
||||
tls *TlsTapper
|
||||
readers map[string]*tlsReader
|
||||
@@ -29,10 +32,12 @@ type tlsPoller struct {
|
||||
extension *api.Extension
|
||||
procfs string
|
||||
pidToNamespace sync.Map
|
||||
fdCache *simplelru.LRU // Actual typs is map[string]addressPair
|
||||
evictedCounter int
|
||||
}
|
||||
|
||||
func newTlsPoller(tls *TlsTapper, extension *api.Extension, procfs string) *tlsPoller {
|
||||
return &tlsPoller{
|
||||
func newTlsPoller(tls *TlsTapper, extension *api.Extension, procfs string) (*tlsPoller, error) {
|
||||
poller := &tlsPoller{
|
||||
tls: tls,
|
||||
readers: make(map[string]*tlsReader),
|
||||
closedReaders: make(chan string, 100),
|
||||
@@ -41,6 +46,15 @@ func newTlsPoller(tls *TlsTapper, extension *api.Extension, procfs string) *tlsP
|
||||
chunksReader: nil,
|
||||
procfs: procfs,
|
||||
}
|
||||
|
||||
fdCache, err := simplelru.NewLRU(fdCacheMaxItems, poller.fdCacheEvictCallback)
|
||||
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, 0)
|
||||
}
|
||||
|
||||
poller.fdCache = fdCache
|
||||
return poller, nil
|
||||
}
|
||||
|
||||
func (p *tlsPoller) init(bpfObjects *tlsTapperObjects, bufferSize int) error {
|
||||
@@ -59,7 +73,7 @@ func (p *tlsPoller) close() error {
|
||||
return p.chunksReader.Close()
|
||||
}
|
||||
|
||||
func (p *tlsPoller) poll(emitter api.Emitter, options *api.TrafficFilteringOptions) {
|
||||
func (p *tlsPoller) poll(emitter api.Emitter, options *api.TrafficFilteringOptions, streamsMap api.TcpStreamMap) {
|
||||
chunks := make(chan *tlsChunk)
|
||||
|
||||
go p.pollChunksPerfBuffer(chunks)
|
||||
@@ -71,7 +85,7 @@ func (p *tlsPoller) poll(emitter api.Emitter, options *api.TrafficFilteringOptio
|
||||
return
|
||||
}
|
||||
|
||||
if err := p.handleTlsChunk(chunk, p.extension, emitter, options); err != nil {
|
||||
if err := p.handleTlsChunk(chunk, p.extension, emitter, options, streamsMap); err != nil {
|
||||
LogError(err)
|
||||
}
|
||||
case key := <-p.closedReaders:
|
||||
@@ -115,67 +129,83 @@ func (p *tlsPoller) pollChunksPerfBuffer(chunks chan<- *tlsChunk) {
|
||||
}
|
||||
}
|
||||
|
||||
func (p *tlsPoller) handleTlsChunk(chunk *tlsChunk, extension *api.Extension,
|
||||
emitter api.Emitter, options *api.TrafficFilteringOptions) error {
|
||||
ip, port, err := chunk.getAddress()
|
||||
func (p *tlsPoller) handleTlsChunk(chunk *tlsChunk, extension *api.Extension, emitter api.Emitter,
|
||||
options *api.TrafficFilteringOptions, streamsMap api.TcpStreamMap) error {
|
||||
address, err := p.getSockfdAddressPair(chunk)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
address, err = chunk.getAddressPair()
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
key := buildTlsKey(chunk, ip, port)
|
||||
key := buildTlsKey(address)
|
||||
reader, exists := p.readers[key]
|
||||
|
||||
if !exists {
|
||||
reader = p.startNewTlsReader(chunk, ip, port, key, extension, emitter, options)
|
||||
reader = p.startNewTlsReader(chunk, &address, key, emitter, extension, options, streamsMap)
|
||||
p.readers[key] = reader
|
||||
}
|
||||
|
||||
reader.timer.CaptureTime = time.Now()
|
||||
reader.chunks <- chunk
|
||||
reader.newChunk(chunk)
|
||||
|
||||
if os.Getenv("MIZU_VERBOSE_TLS_TAPPER") == "true" {
|
||||
p.logTls(chunk, ip, port)
|
||||
p.logTls(chunk, key, reader)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *tlsPoller) startNewTlsReader(chunk *tlsChunk, ip net.IP, port uint16, key string, extension *api.Extension,
|
||||
emitter api.Emitter, options *api.TrafficFilteringOptions) *tlsReader {
|
||||
func (p *tlsPoller) startNewTlsReader(chunk *tlsChunk, address *addressPair, key string,
|
||||
emitter api.Emitter, extension *api.Extension, options *api.TrafficFilteringOptions,
|
||||
streamsMap api.TcpStreamMap) *tlsReader {
|
||||
|
||||
reader := &tlsReader{
|
||||
key: key,
|
||||
chunks: make(chan *tlsChunk, 1),
|
||||
doneHandler: func(r *tlsReader) {
|
||||
p.closeReader(key, r)
|
||||
},
|
||||
progress: &api.ReadProgress{},
|
||||
timer: api.SuperTimer{
|
||||
CaptureTime: time.Now(),
|
||||
},
|
||||
tcpid := p.buildTcpId(chunk, address)
|
||||
|
||||
doneHandler := func(r *tlsReader) {
|
||||
p.closeReader(key, r)
|
||||
}
|
||||
|
||||
tcpid := p.buildTcpId(chunk, ip, port)
|
||||
|
||||
tlsEmitter := &tlsEmitter{
|
||||
delegate: emitter,
|
||||
namespace: p.getNamespace(chunk.Pid),
|
||||
}
|
||||
|
||||
go dissect(extension, reader, chunk.isRequest(), &tcpid, tlsEmitter, options, p.reqResMatcher)
|
||||
reader := &tlsReader{
|
||||
key: key,
|
||||
chunks: make(chan *tlsChunk, 1),
|
||||
doneHandler: doneHandler,
|
||||
progress: &api.ReadProgress{},
|
||||
tcpID: &tcpid,
|
||||
isClient: chunk.isRequest(),
|
||||
captureTime: time.Now(),
|
||||
extension: extension,
|
||||
emitter: tlsEmitter,
|
||||
counterPair: &api.CounterPair{},
|
||||
reqResMatcher: p.reqResMatcher,
|
||||
}
|
||||
|
||||
stream := &tlsStream{
|
||||
reader: reader,
|
||||
protoIdentifier: &api.ProtoIdentifier{},
|
||||
}
|
||||
streamsMap.Store(streamsMap.NextId(), stream)
|
||||
|
||||
reader.parent = stream
|
||||
|
||||
go dissect(extension, reader, options)
|
||||
return reader
|
||||
}
|
||||
|
||||
func dissect(extension *api.Extension, reader *tlsReader, isRequest bool, tcpid *api.TcpID,
|
||||
tlsEmitter *tlsEmitter, options *api.TrafficFilteringOptions, reqResMatcher api.RequestResponseMatcher) {
|
||||
func dissect(extension *api.Extension, reader *tlsReader, options *api.TrafficFilteringOptions) {
|
||||
b := bufio.NewReader(reader)
|
||||
|
||||
err := extension.Dissector.Dissect(b, reader.progress, api.Ebpf, isRequest, tcpid, &api.CounterPair{},
|
||||
&reader.timer, &api.SuperIdentifier{}, tlsEmitter, options, reqResMatcher)
|
||||
err := extension.Dissector.Dissect(b, reader, options)
|
||||
|
||||
if err != nil {
|
||||
logger.Log.Warningf("Error dissecting TLS %v - %v", tcpid, err)
|
||||
logger.Log.Warningf("Error dissecting TLS %v - %v", reader.GetTcpID(), err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -184,36 +214,52 @@ func (p *tlsPoller) closeReader(key string, r *tlsReader) {
|
||||
p.closedReaders <- key
|
||||
}
|
||||
|
||||
func buildTlsKey(chunk *tlsChunk, ip net.IP, port uint16) string {
|
||||
return fmt.Sprintf("%v:%v-%v:%v", chunk.isClient(), chunk.isRead(), ip, port)
|
||||
}
|
||||
func (p *tlsPoller) getSockfdAddressPair(chunk *tlsChunk) (addressPair, error) {
|
||||
address, err := getAddressBySockfd(p.procfs, chunk.Pid, chunk.Fd)
|
||||
fdCacheKey := fmt.Sprintf("%d:%d", chunk.Pid, chunk.Fd)
|
||||
|
||||
func (p *tlsPoller) buildTcpId(chunk *tlsChunk, ip net.IP, port uint16) api.TcpID {
|
||||
myIp, myPort, err := getAddressBySockfd(p.procfs, chunk.Pid, chunk.Fd, chunk.isClient())
|
||||
|
||||
if err != nil {
|
||||
// May happen if the socket already closed, very likely to happen for localhost
|
||||
//
|
||||
myIp = api.UnknownIp
|
||||
myPort = api.UnknownPort
|
||||
if err == nil {
|
||||
if !chunk.isRequest() {
|
||||
switchedAddress := addressPair{
|
||||
srcIp: address.dstIp,
|
||||
srcPort: address.dstPort,
|
||||
dstIp: address.srcIp,
|
||||
dstPort: address.srcPort,
|
||||
}
|
||||
p.fdCache.Add(fdCacheKey, switchedAddress)
|
||||
return switchedAddress, nil
|
||||
} else {
|
||||
p.fdCache.Add(fdCacheKey, address)
|
||||
return address, nil
|
||||
}
|
||||
}
|
||||
|
||||
if chunk.isRequest() {
|
||||
return api.TcpID{
|
||||
SrcIP: myIp.String(),
|
||||
DstIP: ip.String(),
|
||||
SrcPort: strconv.FormatUint(uint64(myPort), 10),
|
||||
DstPort: strconv.FormatUint(uint64(port), 10),
|
||||
Ident: "",
|
||||
}
|
||||
} else {
|
||||
return api.TcpID{
|
||||
SrcIP: ip.String(),
|
||||
DstIP: myIp.String(),
|
||||
SrcPort: strconv.FormatUint(uint64(port), 10),
|
||||
DstPort: strconv.FormatUint(uint64(myPort), 10),
|
||||
Ident: "",
|
||||
}
|
||||
fromCacheIfc, ok := p.fdCache.Get(fdCacheKey)
|
||||
|
||||
if !ok {
|
||||
return addressPair{}, err
|
||||
}
|
||||
|
||||
fromCache, ok := fromCacheIfc.(addressPair)
|
||||
|
||||
if !ok {
|
||||
return address, errors.Errorf("Unable to cast %T to addressPair", fromCacheIfc)
|
||||
}
|
||||
|
||||
return fromCache, nil
|
||||
}
|
||||
|
||||
func buildTlsKey(address addressPair) string {
|
||||
return fmt.Sprintf("%s:%d>%s:%d", address.srcIp, address.srcPort, address.dstIp, address.dstPort)
|
||||
}
|
||||
|
||||
func (p *tlsPoller) buildTcpId(chunk *tlsChunk, address *addressPair) api.TcpID {
|
||||
return api.TcpID{
|
||||
SrcIP: address.srcIp.String(),
|
||||
DstIP: address.dstIp.String(),
|
||||
SrcPort: strconv.FormatUint(uint64(address.srcPort), 10),
|
||||
DstPort: strconv.FormatUint(uint64(address.dstPort), 10),
|
||||
Ident: "",
|
||||
}
|
||||
}
|
||||
|
||||
@@ -244,7 +290,7 @@ func (p *tlsPoller) clearPids() {
|
||||
})
|
||||
}
|
||||
|
||||
func (p *tlsPoller) logTls(chunk *tlsChunk, ip net.IP, port uint16) {
|
||||
func (p *tlsPoller) logTls(chunk *tlsChunk, key string, reader *tlsReader) {
|
||||
var flagsStr string
|
||||
|
||||
if chunk.isClient() {
|
||||
@@ -259,13 +305,18 @@ func (p *tlsPoller) logTls(chunk *tlsChunk, ip net.IP, port uint16) {
|
||||
flagsStr += "W"
|
||||
}
|
||||
|
||||
srcIp, srcPort, _ := getAddressBySockfd(p.procfs, chunk.Pid, chunk.Fd, true)
|
||||
dstIp, dstPort, _ := getAddressBySockfd(p.procfs, chunk.Pid, chunk.Fd, false)
|
||||
|
||||
str := strings.ReplaceAll(strings.ReplaceAll(string(chunk.Data[0:chunk.Recorded]), "\n", " "), "\r", "")
|
||||
|
||||
logger.Log.Infof("PID: %v (tid: %v) (fd: %v) (client: %v) (addr: %v:%v) (fdaddr %v:%v>%v:%v) (recorded %v out of %v starting at %v) - %v - %v",
|
||||
chunk.Pid, chunk.Tgid, chunk.Fd, flagsStr, ip, port,
|
||||
srcIp, srcPort, dstIp, dstPort,
|
||||
chunk.Recorded, chunk.Len, chunk.Start, str, hex.EncodeToString(chunk.Data[0:chunk.Recorded]))
|
||||
logger.Log.Infof("[%-44s] %s #%-4d (fd: %d) (recorded %d/%d:%d) - %s - %s",
|
||||
key, flagsStr, reader.seenChunks, chunk.Fd,
|
||||
chunk.Recorded, chunk.Len, chunk.Start,
|
||||
str, hex.EncodeToString(chunk.Data[0:chunk.Recorded]))
|
||||
}
|
||||
|
||||
func (p *tlsPoller) fdCacheEvictCallback(key interface{}, value interface{}) {
|
||||
p.evictedCounter = p.evictedCounter + 1
|
||||
|
||||
if p.evictedCounter%1000000 == 0 {
|
||||
logger.Log.Infof("Tls fdCache evicted %d items", p.evictedCounter)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -8,12 +8,26 @@ import (
|
||||
)
|
||||
|
||||
type tlsReader struct {
|
||||
key string
|
||||
chunks chan *tlsChunk
|
||||
data []byte
|
||||
doneHandler func(r *tlsReader)
|
||||
progress *api.ReadProgress
|
||||
timer api.SuperTimer
|
||||
key string
|
||||
chunks chan *tlsChunk
|
||||
seenChunks int
|
||||
data []byte
|
||||
doneHandler func(r *tlsReader)
|
||||
progress *api.ReadProgress
|
||||
tcpID *api.TcpID
|
||||
isClient bool
|
||||
captureTime time.Time
|
||||
extension *api.Extension
|
||||
emitter api.Emitter
|
||||
counterPair *api.CounterPair
|
||||
parent *tlsStream
|
||||
reqResMatcher api.RequestResponseMatcher
|
||||
}
|
||||
|
||||
func (r *tlsReader) newChunk(chunk *tlsChunk) {
|
||||
r.captureTime = time.Now()
|
||||
r.seenChunks = r.seenChunks + 1
|
||||
r.chunks <- chunk
|
||||
}
|
||||
|
||||
func (r *tlsReader) Read(p []byte) (int, error) {
|
||||
@@ -44,3 +58,43 @@ func (r *tlsReader) Read(p []byte) (int, error) {
|
||||
|
||||
return l, nil
|
||||
}
|
||||
|
||||
func (r *tlsReader) GetReqResMatcher() api.RequestResponseMatcher {
|
||||
return r.reqResMatcher
|
||||
}
|
||||
|
||||
func (r *tlsReader) GetIsClient() bool {
|
||||
return r.isClient
|
||||
}
|
||||
|
||||
func (r *tlsReader) GetReadProgress() *api.ReadProgress {
|
||||
return r.progress
|
||||
}
|
||||
|
||||
func (r *tlsReader) GetParent() api.TcpStream {
|
||||
return r.parent
|
||||
}
|
||||
|
||||
func (r *tlsReader) GetTcpID() *api.TcpID {
|
||||
return r.tcpID
|
||||
}
|
||||
|
||||
func (r *tlsReader) GetCounterPair() *api.CounterPair {
|
||||
return r.counterPair
|
||||
}
|
||||
|
||||
func (r *tlsReader) GetCaptureTime() time.Time {
|
||||
return r.captureTime
|
||||
}
|
||||
|
||||
func (r *tlsReader) GetEmitter() api.Emitter {
|
||||
return r.emitter
|
||||
}
|
||||
|
||||
func (r *tlsReader) GetIsClosed() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (r *tlsReader) GetExtension() *api.Extension {
|
||||
return r.extension
|
||||
}
|
||||
|
||||
32
tap/tlstapper/tls_stream.go
Normal file
32
tap/tlstapper/tls_stream.go
Normal file
@@ -0,0 +1,32 @@
|
||||
package tlstapper
|
||||
|
||||
import "github.com/up9inc/mizu/tap/api"
|
||||
|
||||
type tlsStream struct {
|
||||
reader *tlsReader
|
||||
protoIdentifier *api.ProtoIdentifier
|
||||
}
|
||||
|
||||
func (t *tlsStream) GetOrigin() api.Capture {
|
||||
return api.Ebpf
|
||||
}
|
||||
|
||||
func (t *tlsStream) GetProtoIdentifier() *api.ProtoIdentifier {
|
||||
return t.protoIdentifier
|
||||
}
|
||||
|
||||
func (t *tlsStream) SetProtocol(protocol *api.Protocol) {
|
||||
t.protoIdentifier.Protocol = protocol
|
||||
}
|
||||
|
||||
func (t *tlsStream) GetReqResMatchers() []api.RequestResponseMatcher {
|
||||
return []api.RequestResponseMatcher{t.reader.reqResMatcher}
|
||||
}
|
||||
|
||||
func (t *tlsStream) GetIsTapTarget() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (t *tlsStream) GetIsClosed() bool {
|
||||
return false
|
||||
}
|
||||
@@ -46,12 +46,18 @@ func (t *TlsTapper) Init(chunksBufferSize int, logBufferSize int, procfs string,
|
||||
return err
|
||||
}
|
||||
|
||||
t.poller = newTlsPoller(t, extension, procfs)
|
||||
var err error
|
||||
t.poller, err = newTlsPoller(t, extension, procfs)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return t.poller.init(&t.bpfObjects, chunksBufferSize)
|
||||
}
|
||||
|
||||
func (t *TlsTapper) Poll(emitter api.Emitter, options *api.TrafficFilteringOptions) {
|
||||
t.poller.poll(emitter, options)
|
||||
func (t *TlsTapper) Poll(emitter api.Emitter, options *api.TrafficFilteringOptions, streamsMap api.TcpStreamMap) {
|
||||
t.poller.poll(emitter, options, streamsMap)
|
||||
}
|
||||
|
||||
func (t *TlsTapper) PollForLogging() {
|
||||
|
||||
Binary file not shown.
Binary file not shown.
@@ -1,5 +0,0 @@
|
||||
This example was bootstrapped with [Create React App](https://github.com/facebook/create-react-app).
|
||||
|
||||
It is linked to the liraz-test package in the parent directory for development purposes.
|
||||
|
||||
You can run `npm install` and then `npm start` to test your package.
|
||||
@@ -1,13 +0,0 @@
|
||||
module.exports = {
|
||||
webpack: {
|
||||
configure: (webpackConfig) => {
|
||||
const instanceOfMiniCssExtractPlugin = webpackConfig.plugins.find(
|
||||
(plugin) => plugin.options && plugin.options.ignoreOrder != null,
|
||||
);
|
||||
if(instanceOfMiniCssExtractPlugin)
|
||||
instanceOfMiniCssExtractPlugin.options.ignoreOrder = true;
|
||||
|
||||
return webpackConfig;
|
||||
}
|
||||
}
|
||||
}
|
||||
43491
ui-common/example/package-lock.json
generated
43491
ui-common/example/package-lock.json
generated
File diff suppressed because it is too large
Load Diff
@@ -1,48 +0,0 @@
|
||||
{
|
||||
"name": "@up9/mizu-common-example",
|
||||
"homepage": ".",
|
||||
"version": "0.0.0",
|
||||
"private": true,
|
||||
"scripts": {
|
||||
"start": "craco start",
|
||||
"comment-start": "node ../node_modules/react-scripts/bin/react-scripts.js start",
|
||||
"build": "node ../node_modules/react-scripts/bin/react-scripts.js build",
|
||||
"test": "node ../node_modules/react-scripts/bin/react-scripts.js test",
|
||||
"eject": "node ../node_modules/react-scripts/bin/react-scripts.js eject"
|
||||
},
|
||||
"dependencies": {
|
||||
"@testing-library/jest-dom": "file:../node_modules/@testing-library/jest-dom",
|
||||
"@testing-library/react": "file:../node_modules/@testing-library/react",
|
||||
"@testing-library/user-event": "file:../node_modules/@testing-library/user-event",
|
||||
"@types/jest": "file:../node_modules/@types/jest",
|
||||
"@types/node": "file:../node_modules/@types/node",
|
||||
"@types/react": "file:../node_modules/@types/react",
|
||||
"@types/react-dom": "file:../node_modules/@types/react-dom",
|
||||
"@up9/mizu-common": "file:..",
|
||||
"react": "file:../node_modules/react",
|
||||
"react-dom": "file:../node_modules/react-dom"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@babel/plugin-syntax-object-rest-spread": "^7.8.3",
|
||||
"@craco/craco": "^6.4.3",
|
||||
"eslint": "^7.11.0",
|
||||
"node-sass": "^6.0.0",
|
||||
"recoil": "^0.5.2",
|
||||
"react-scripts": "^4.0.3"
|
||||
},
|
||||
"eslintConfig": {
|
||||
"extends": "react-app"
|
||||
},
|
||||
"browserslist": {
|
||||
"production": [
|
||||
">0.2%",
|
||||
"not dead",
|
||||
"not op_mini all"
|
||||
],
|
||||
"development": [
|
||||
"last 1 chrome version",
|
||||
"last 1 firefox version",
|
||||
"last 1 safari version"
|
||||
]
|
||||
}
|
||||
}
|
||||
Binary file not shown.
|
Before Width: | Height: | Size: 3.8 KiB |
@@ -1,48 +0,0 @@
|
||||
<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta charset="utf-8" />
|
||||
<link rel="shortcut icon" href="%PUBLIC_URL%/favicon.ico" />
|
||||
<meta
|
||||
name="viewport"
|
||||
content="width=device-width, initial-scale=1, shrink-to-fit=no"
|
||||
/>
|
||||
<meta name="theme-color" content="#000000" />
|
||||
|
||||
<!--
|
||||
manifest.json provides metadata used when your web app is added to the
|
||||
homescreen on Android. See https://developers.google.com/web/fundamentals/engage-and-retain/web-app-manifest/
|
||||
-->
|
||||
<link rel="manifest" href="%PUBLIC_URL%/manifest.json" />
|
||||
|
||||
<!--
|
||||
Notice the use of %PUBLIC_URL% in the tags above.
|
||||
It will be replaced with the URL of the `public` folder during the build.
|
||||
Only files inside the `public` folder can be referenced from the HTML.
|
||||
|
||||
Unlike "/favicon.ico" or "favicon.ico", "%PUBLIC_URL%/favicon.ico" will
|
||||
work correctly both with client-side routing and a non-root public URL.
|
||||
Learn how to configure a non-root public URL by running `npm run build`.
|
||||
-->
|
||||
<title>@up9/mizu-common</title>
|
||||
</head>
|
||||
|
||||
<body>
|
||||
<noscript>
|
||||
You need to enable JavaScript to run this app.
|
||||
</noscript>
|
||||
|
||||
<div id="root"></div>
|
||||
|
||||
<!--
|
||||
This HTML file is a template.
|
||||
If you open it directly in the browser, you will see an empty page.
|
||||
|
||||
You can add webfonts, meta tags, or analytics to this file.
|
||||
The build step will place the bundled scripts into the <body> tag.
|
||||
|
||||
To begin the development, run `npm start` or `yarn start`.
|
||||
To create a production bundle, use `npm run build` or `yarn build`.
|
||||
-->
|
||||
</body>
|
||||
</html>
|
||||
@@ -1,15 +0,0 @@
|
||||
{
|
||||
"short_name": "@up9/mizu-common",
|
||||
"name": "@up9/mizu-common",
|
||||
"icons": [
|
||||
{
|
||||
"src": "favicon.ico",
|
||||
"sizes": "64x64 32x32 24x24 16x16",
|
||||
"type": "image/x-icon"
|
||||
}
|
||||
],
|
||||
"start_url": ".",
|
||||
"display": "standalone",
|
||||
"theme_color": "#000000",
|
||||
"background_color": "#ffffff"
|
||||
}
|
||||
@@ -1,9 +0,0 @@
|
||||
import React from 'react'
|
||||
import ReactDOM from 'react-dom'
|
||||
import App from './App'
|
||||
|
||||
it('renders without crashing', () => {
|
||||
const div = document.createElement('div')
|
||||
ReactDOM.render(<App />, div)
|
||||
ReactDOM.unmountComponentAtNode(div)
|
||||
})
|
||||
@@ -1,24 +0,0 @@
|
||||
import TrafficViewer,{useWS, DEFAULT_QUERY, OasModal} from '@up9/mizu-common';
|
||||
import "@up9/mizu-common/dist/index.css"
|
||||
import {useEffect} from 'react';
|
||||
import Api, {getWebsocketUrl} from "./api";
|
||||
|
||||
const api = Api.getInstance()
|
||||
|
||||
const App = () => {
|
||||
const {message,error,isOpen, openSocket, closeSocket, sendQueryWhenWsOpen} = useWS(getWebsocketUrl())
|
||||
const trafficViewerApi = {...api, webSocket:{open : openSocket, close: closeSocket, sendQueryWhenWsOpen: sendQueryWhenWsOpen}}
|
||||
sendQueryWhenWsOpen(DEFAULT_QUERY);
|
||||
|
||||
useEffect(() => {
|
||||
return () =>{
|
||||
closeSocket()
|
||||
}
|
||||
},[])
|
||||
|
||||
return <>
|
||||
|
||||
</>
|
||||
}
|
||||
|
||||
export default App
|
||||
@@ -1,119 +0,0 @@
|
||||
import * as axios from "axios";
|
||||
|
||||
export const MizuWebsocketURL = process.env.REACT_APP_OVERRIDE_WS_URL ? process.env.REACT_APP_OVERRIDE_WS_URL :
|
||||
window.location.protocol === 'https:' ? `wss://${window.location.host}/ws` : `ws://${window.location.host}/ws`;
|
||||
|
||||
export const FormValidationErrorType = "formError";
|
||||
|
||||
const CancelToken = axios.CancelToken;
|
||||
|
||||
const apiURL = process.env.REACT_APP_OVERRIDE_API_URL ? process.env.REACT_APP_OVERRIDE_API_URL : `${window.location.origin}/`;
|
||||
|
||||
let token = null
|
||||
let client = null
|
||||
let source = null
|
||||
|
||||
export default class Api {
|
||||
static instance;
|
||||
|
||||
static getInstance() {
|
||||
if (!Api.instance) {
|
||||
Api.instance = new Api();
|
||||
}
|
||||
return Api.instance;
|
||||
}
|
||||
|
||||
constructor() {
|
||||
token = localStorage.getItem("token");
|
||||
|
||||
client = this.getAxiosClient();
|
||||
source = null;
|
||||
}
|
||||
|
||||
tapStatus = async () => {
|
||||
const response = await client.get("/status/tap");
|
||||
return response.data;
|
||||
}
|
||||
|
||||
|
||||
analyzeStatus = async () => {
|
||||
const response = await client.get("/status/analyze");
|
||||
return response.data;
|
||||
}
|
||||
|
||||
getEntry = async (id, query) => {
|
||||
const response = await client.get(`/entries/${id}?query=${query}`);
|
||||
return response.data;
|
||||
}
|
||||
|
||||
fetchEntries = async (leftOff, direction, query, limit, timeoutMs) => {
|
||||
const response = await client.get(`/entries/?leftOff=${leftOff}&direction=${direction}&query=${query}&limit=${limit}&timeoutMs=${timeoutMs}`).catch(function (thrown) {
|
||||
console.error(thrown.message);
|
||||
return {};
|
||||
});
|
||||
return response.data;
|
||||
}
|
||||
|
||||
getOasServices = async () => {
|
||||
const response = await client.get("/oas");
|
||||
return response.data;
|
||||
}
|
||||
|
||||
getOasByService = async (selectedService) => {
|
||||
const response = await client.get(`/oas/${selectedService}`);
|
||||
return response.data;
|
||||
}
|
||||
|
||||
validateQuery = async (query) => {
|
||||
if (source) {
|
||||
source.cancel();
|
||||
}
|
||||
source = CancelToken.source();
|
||||
|
||||
const form = new FormData();
|
||||
form.append('query', query)
|
||||
const response = await client.post(`/query/validate`, form, {
|
||||
cancelToken: source.token
|
||||
}).catch(function (thrown) {
|
||||
if (!axios.isCancel(thrown)) {
|
||||
console.error('Validate error', thrown.message);
|
||||
}
|
||||
});
|
||||
|
||||
if (!response) {
|
||||
return null;
|
||||
}
|
||||
|
||||
return response.data;
|
||||
}
|
||||
|
||||
persistToken = (tk) => {
|
||||
token = tk;
|
||||
client = this.getAxiosClient();
|
||||
localStorage.setItem('token', token);
|
||||
}
|
||||
|
||||
getAxiosClient = () => {
|
||||
const headers = {
|
||||
Accept: "application/json"
|
||||
}
|
||||
|
||||
if (token) {
|
||||
headers['x-session-token'] = `${token}`; // we use `x-session-token` instead of `Authorization` because the latter is reserved by kubectl proxy, making mizu view not work
|
||||
}
|
||||
return axios.create({
|
||||
baseURL: apiURL,
|
||||
timeout: 31000,
|
||||
headers
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
export function getWebsocketUrl(){
|
||||
let websocketUrl = MizuWebsocketURL;
|
||||
if (token) {
|
||||
websocketUrl += `/${token}`;
|
||||
}
|
||||
|
||||
return websocketUrl;
|
||||
}
|
||||
@@ -1,14 +0,0 @@
|
||||
body {
|
||||
margin: 0;
|
||||
padding: 0;
|
||||
font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', 'Roboto', 'Oxygen',
|
||||
'Ubuntu', 'Cantarell', 'Fira Sans', 'Droid Sans', 'Helvetica Neue',
|
||||
sans-serif;
|
||||
-webkit-font-smoothing: antialiased;
|
||||
-moz-osx-font-smoothing: grayscale;
|
||||
}
|
||||
|
||||
code {
|
||||
font-family: source-code-pro, Menlo, Monaco, Consolas, 'Courier New',
|
||||
monospace;
|
||||
}
|
||||
@@ -1,7 +0,0 @@
|
||||
import './index.css'
|
||||
|
||||
import ReactDOM from 'react-dom'
|
||||
import App from './App'
|
||||
|
||||
|
||||
ReactDOM.render(<App />, document.getElementById('root'))
|
||||
1
ui-common/example/src/react-app-env.d.ts
vendored
1
ui-common/example/src/react-app-env.d.ts
vendored
@@ -1 +0,0 @@
|
||||
/// <reference types="react-scripts" />
|
||||
@@ -1,5 +0,0 @@
|
||||
// jest-dom adds custom jest matchers for asserting on DOM nodes.
|
||||
// allows you to do things like:
|
||||
// expect(element).toHaveTextContent(/react/i)
|
||||
// learn more: https://github.com/testing-library/jest-dom
|
||||
import '@testing-library/jest-dom/extend-expect';
|
||||
@@ -1,41 +0,0 @@
|
||||
{
|
||||
"compilerOptions": {
|
||||
"outDir": "dist",
|
||||
"module": "esnext",
|
||||
"lib": [
|
||||
"dom",
|
||||
"esnext"
|
||||
],
|
||||
"moduleResolution": "node",
|
||||
"jsx": "react-jsx",
|
||||
"sourceMap": true,
|
||||
"declaration": true,
|
||||
"esModuleInterop": true,
|
||||
"noImplicitReturns": true,
|
||||
"noImplicitThis": true,
|
||||
"noImplicitAny": true,
|
||||
"strictNullChecks": false,
|
||||
"suppressImplicitAnyIndexErrors": true,
|
||||
"noUnusedLocals": false,
|
||||
"noUnusedParameters": false,
|
||||
"allowSyntheticDefaultImports": true,
|
||||
"target": "es5",
|
||||
"allowJs": true,
|
||||
"skipLibCheck": true,
|
||||
|
||||
"strict": true,
|
||||
"forceConsistentCasingInFileNames": true,
|
||||
"resolveJsonModule": true,
|
||||
"isolatedModules": true,
|
||||
"noEmit": true,
|
||||
"noFallthroughCasesInSwitch": true
|
||||
},
|
||||
"include": [
|
||||
"src"
|
||||
],
|
||||
"exclude": [
|
||||
"node_modules",
|
||||
"build",
|
||||
"exmaple"
|
||||
]
|
||||
}
|
||||
22
ui-common/src/components.scss
Normal file
22
ui-common/src/components.scss
Normal file
@@ -0,0 +1,22 @@
|
||||
.subSectionHeader{
|
||||
position: relative;
|
||||
font-style: normal;
|
||||
font-weight: bold;
|
||||
font-size: 12px;
|
||||
line-height: 15px;
|
||||
color: $font-color;
|
||||
&::after{
|
||||
content: "";
|
||||
border: 1px solid #E9EBF8;
|
||||
transform: rotate(180deg);
|
||||
position: absolute;
|
||||
left: 0px;
|
||||
right: -100%;
|
||||
top: 100%;
|
||||
bottom: 0%;
|
||||
width: 100%;
|
||||
width: -moz-available;
|
||||
width: -webkit-fill-available;
|
||||
width: strech;
|
||||
}
|
||||
}
|
||||
@@ -1,6 +1,13 @@
|
||||
@import '../../variables.module.scss'
|
||||
|
||||
.boxContainer
|
||||
.closeIcon
|
||||
cursor: pointer
|
||||
user-select: none
|
||||
margin-top: -12px
|
||||
margin-right: -12px
|
||||
float: right
|
||||
|
||||
.boxContainer
|
||||
display: flex
|
||||
justifyContent: space-between
|
||||
padding: 10px
|
||||
@@ -17,7 +24,7 @@
|
||||
width: 43px
|
||||
|
||||
.title
|
||||
color:#494677
|
||||
color: #494677
|
||||
font-family: Source Sans Pro,Lucida Grande,Tahoma,sans-serif
|
||||
font-size: 28px
|
||||
font-weight: 600
|
||||
@@ -37,4 +44,3 @@
|
||||
|
||||
.root
|
||||
width: 100%
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
import { Box, Fade, FormControl, MenuItem, Modal, Backdrop } from "@material-ui/core";
|
||||
import { useEffect, useState } from "react";
|
||||
import { Box, Fade, FormControl, Modal, Backdrop } from "@material-ui/core";
|
||||
import { useCallback, useEffect, useState } from "react";
|
||||
import { RedocStandalone } from "redoc";
|
||||
import closeIcon from "assets/closeIcon.svg";
|
||||
import { toast } from 'react-toastify';
|
||||
@@ -30,10 +30,10 @@ const OasModal = ({ openModal, handleCloseModal, getOasServices, getOasByService
|
||||
const [oasServices, setOasServices] = useState([] as string[])
|
||||
const [selectedServiceName, setSelectedServiceName] = useState("");
|
||||
const [selectedServiceSpec, setSelectedServiceSpec] = useState(null);
|
||||
|
||||
const classes = {root: style.root}
|
||||
|
||||
const onSelectedOASService = async (selectedService) => {
|
||||
const classes = { root: style.root }
|
||||
|
||||
const onSelectedOASService = useCallback (async (selectedService) => {
|
||||
if (oasServices.length === 0) {
|
||||
setSelectedServiceSpec(null);
|
||||
setSelectedServiceName("");
|
||||
@@ -49,7 +49,8 @@ const OasModal = ({ openModal, handleCloseModal, getOasServices, getOasByService
|
||||
toast.error("Error occurred while fetching service OAS spec", { containerId: TOAST_CONTAINER_ID });
|
||||
console.error(e);
|
||||
}
|
||||
};
|
||||
// eslint-disable-next-line
|
||||
},[oasServices]);
|
||||
|
||||
useEffect(() => {
|
||||
(async () => {
|
||||
@@ -60,11 +61,13 @@ const OasModal = ({ openModal, handleCloseModal, getOasServices, getOasByService
|
||||
console.error(e);
|
||||
}
|
||||
})();
|
||||
// eslint-disable-next-line
|
||||
}, [openModal]);
|
||||
|
||||
|
||||
useEffect(() => {
|
||||
onSelectedOASService(null);
|
||||
}, [oasServices])
|
||||
}, [oasServices, onSelectedOASService])
|
||||
|
||||
return (
|
||||
<Modal
|
||||
@@ -80,18 +83,17 @@ const OasModal = ({ openModal, handleCloseModal, getOasServices, getOasByService
|
||||
>
|
||||
<Fade in={openModal}>
|
||||
<Box sx={modalStyle}>
|
||||
<img src={closeIcon} alt="close" onClick={handleCloseModal} className={style.closeIcon} />
|
||||
<div className={style.boxContainer}>
|
||||
<div className={style.selectHeader}>
|
||||
<div><img src={openApiLogo} alt="openAPI" className={style.openApilogo} /></div>
|
||||
<div className={style.title}>Service Catalog</div>
|
||||
</div>
|
||||
<div style={{ cursor: "pointer" }}>
|
||||
<img src={closeIcon} alt="close" onClick={handleCloseModal} />
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div className={style.selectContainer} >
|
||||
<FormControl classes={classes}>
|
||||
<SearchableDropdown
|
||||
<SearchableDropdown
|
||||
options={oasServices}
|
||||
selectedValues={selectedServiceName}
|
||||
onChange={onSelectedOASService}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
<svg width="22" height="22" viewBox="0 0 22 22" fill="none" xmlns="http://www.w3.org/2000/svg">
|
||||
<path d="M20.5 11C20.5 16.2467 16.2467 20.5 11 20.5C5.75329 20.5 1.5 16.2467 1.5 11C1.5 5.75329 5.75329 1.5 11 1.5C16.2467 1.5 20.5 5.75329 20.5 11Z" stroke="#2B3560"/>
|
||||
<path d="M14.4762 9.05338L13.1448 7.7219L11.1528 9.71382L9.16091 7.7219L7.82943 9.05338L9.82135 11.0453L7.83226 13.0344L9.16374 14.3659L11.1528 12.3768L13.1419 14.3659L14.4734 13.0344L12.4843 11.0453L14.4762 9.05338Z" fill="#627EF7"/>
|
||||
<svg width="20" height="20" viewBox="0 0 20 20" fill="none" xmlns="http://www.w3.org/2000/svg">
|
||||
<path d="M18.591 9.99997C18.591 14.7446 14.7447 18.5909 10.0001 18.5909C5.25546 18.5909 1.40918 14.7446 1.40918 9.99997C1.40918 5.25534 5.25546 1.40906 10.0001 1.40906C14.7447 1.40906 18.591 5.25534 18.591 9.99997Z" fill="#E9EBF8" stroke="#BCCEFD"/>
|
||||
<path d="M13.1604 8.23038L11.95 7.01994L10.1392 8.83078L8.32832 7.01994L7.11789 8.23038L8.92872 10.0412L7.12046 11.8495L8.33089 13.0599L10.1392 11.2517L11.9474 13.0599L13.1579 11.8495L11.3496 10.0412L13.1604 8.23038Z" fill="#205CF5"/>
|
||||
</svg>
|
||||
|
||||
|
Before Width: | Height: | Size: 507 B After Width: | Height: | Size: 588 B |
@@ -1,58 +1,98 @@
|
||||
@import "../../variables.module"
|
||||
@import "../../components"
|
||||
|
||||
.closeIcon
|
||||
position: absolute
|
||||
right: 20px
|
||||
top: 20px
|
||||
|
||||
.modalContainer
|
||||
display: flex
|
||||
height: calc(100% - 110px)
|
||||
background: #F0F5FF
|
||||
padding: 0 15px
|
||||
padding-bottom: 25px
|
||||
|
||||
.headerContainer
|
||||
width: 100%
|
||||
height: 100%
|
||||
background: white
|
||||
display: flex
|
||||
align-items: center
|
||||
margin-bottom: 15px
|
||||
|
||||
.headerSection
|
||||
display: flex
|
||||
align-content: center
|
||||
align-items: center
|
||||
margin-left: 35px
|
||||
margin-bottom: 25px
|
||||
margin-top: 25px
|
||||
|
||||
& .title
|
||||
font-size: 28px
|
||||
color: $blue-gray
|
||||
font-weight: 600
|
||||
margin-right: 35px
|
||||
|
||||
& .actions
|
||||
|
||||
.graphSection
|
||||
flex: 85%
|
||||
|
||||
.filterSection
|
||||
flex: 15%
|
||||
height: 100%
|
||||
display: none
|
||||
|
||||
&.show
|
||||
display: inline-block
|
||||
|
||||
.filters table
|
||||
margin-top: 0px
|
||||
|
||||
tr
|
||||
border-style: none
|
||||
|
||||
td
|
||||
color: #8f9bb2
|
||||
color: $light-gray
|
||||
font-size: 11px
|
||||
font-weight: 600
|
||||
padding-top: 2px
|
||||
padding-bottom: 2px
|
||||
padding-top: 5px
|
||||
padding-bottom: 5px
|
||||
th
|
||||
font-size: 12px
|
||||
|
||||
.colorBlock
|
||||
display: inline-block
|
||||
height: 15px
|
||||
width: 50px
|
||||
height: 12px
|
||||
width: 22px
|
||||
|
||||
.filterWrapper
|
||||
height: 100%
|
||||
display: flex
|
||||
flex-direction: column
|
||||
margin-right: 10px
|
||||
width: 100%
|
||||
border-radius: 4px
|
||||
|
||||
.servicesFilterSearch
|
||||
width: calc(100% - 10px)
|
||||
max-width: 300px
|
||||
width: -moz-available
|
||||
width: -webkit-fill-available
|
||||
width: fill-available
|
||||
max-width: 200px
|
||||
box-shadow: 0px 1px 5px #979797
|
||||
margin-left: 10px
|
||||
margin-bottom: 5px
|
||||
margin-top: 10px
|
||||
margin-right: 10px
|
||||
|
||||
.servicesFilter
|
||||
margin-top: 15px
|
||||
.card
|
||||
background: white
|
||||
padding: 10px
|
||||
border-radius: 4px
|
||||
user-select: none
|
||||
box-shadow: 0px 1px 5px #979797
|
||||
|
||||
.servicesFilterWrapper
|
||||
margin-top: 20px
|
||||
margin-bottom: 3px
|
||||
height: 100%
|
||||
overflow: hidden
|
||||
|
||||
& .servicesFilterList
|
||||
overflow-y: auto
|
||||
height: calc(100% - 30px - 5px)
|
||||
|
||||
.separtorLine
|
||||
margin-top: 10px
|
||||
border: 1px solid #E9EBF8
|
||||
.servicesFilterList
|
||||
height: calc(100% - 30px - 52px)
|
||||
|
||||
@@ -8,6 +8,8 @@ import debounce from 'lodash/debounce';
|
||||
import ServiceMapOptions from './ServiceMapOptions'
|
||||
import { useCommonStyles } from "../../helpers/commonStyle";
|
||||
import refreshIcon from "assets/refresh.svg";
|
||||
import filterIcon from "assets/filter-icon.svg";
|
||||
import filterIconClicked from "assets/filter-icon-clicked.svg";
|
||||
import closeIcon from "assets/close.svg"
|
||||
import styles from './ServiceMapModal.module.sass'
|
||||
import SelectList from "../UI/SelectList";
|
||||
@@ -23,14 +25,14 @@ const modalStyle = {
|
||||
transform: 'translate(-50%, 0%)',
|
||||
width: '89vw',
|
||||
height: '82vh',
|
||||
bgcolor: 'background.paper',
|
||||
bgcolor: '#F0F5FF',
|
||||
borderRadius: '5px',
|
||||
boxShadow: 24,
|
||||
p: 4,
|
||||
color: '#000',
|
||||
padding: "25px 15px"
|
||||
padding: "1px 1px",
|
||||
paddingBottom: "15px"
|
||||
};
|
||||
|
||||
interface LegentLabelProps {
|
||||
color: string,
|
||||
name: string
|
||||
@@ -45,13 +47,11 @@ const LegentLabel: React.FC<LegentLabelProps> = ({ color, name }) => {
|
||||
</React.Fragment>
|
||||
}
|
||||
|
||||
const protocols = [
|
||||
{ key: "HTTP", value: "HTTP", component: <LegentLabel color="#205cf5" name="HTTP" /> },
|
||||
{ key: "HTTP/2", value: "HTTP/2", component: <LegentLabel color='#244c5a' name="HTTP/2" /> },
|
||||
{ key: "gRPC", value: "gRPC", component: <LegentLabel color='#244c5a' name="gRPC" /> },
|
||||
{ key: "AMQP", value: "AMQP", component: <LegentLabel color='#ff6600' name="AMQP" /> },
|
||||
{ key: "KAFKA", value: "KAFKA", component: <LegentLabel color='#000000' name="KAFKA" /> },
|
||||
{ key: "REDIS", value: "REDIS", component: <LegentLabel color='#a41e11' name="REDIS" /> },]
|
||||
type ProtocolType = {
|
||||
key: string;
|
||||
value: string;
|
||||
component: JSX.Element;
|
||||
};
|
||||
|
||||
|
||||
interface ServiceMapModalProps {
|
||||
@@ -65,11 +65,11 @@ export const ServiceMapModal: React.FC<ServiceMapModalProps> = ({ isOpen, onClos
|
||||
const commonClasses = useCommonStyles();
|
||||
const [isLoading, setIsLoading] = useState<boolean>(true);
|
||||
const [graphData, setGraphData] = useState<GraphData>({ nodes: [], edges: [] });
|
||||
const [checkedProtocols, setCheckedProtocols] = useState(protocols.map(x => x.key))
|
||||
const [checkedProtocols, setCheckedProtocols] = useState([])
|
||||
const [checkedServices, setCheckedServices] = useState([])
|
||||
const [serviceMapApiData, setServiceMapApiData] = useState<ServiceMapGraph>({ edges: [], nodes: [] })
|
||||
const [servicesSearchVal, setServicesSearchVal] = useState("")
|
||||
const [graphOptions, setGraphOptions] = useState(ServiceMapOptions);
|
||||
const [isFilterClicked, setIsFilterClicked] = useState(true)
|
||||
|
||||
const getServiceMapData = useCallback(async () => {
|
||||
try {
|
||||
@@ -112,15 +112,23 @@ export const ServiceMapModal: React.FC<ServiceMapModalProps> = ({ isOpen, onClos
|
||||
},
|
||||
}
|
||||
}
|
||||
const mapToKeyValForFilter = (arr) => arr.map(mapNodesDatatoGraph)
|
||||
const mapToKeyValForFilter = useCallback((arr) => arr.map(mapNodesDatatoGraph)
|
||||
.map((edge) => { return { key: edge.label, value: edge.label } })
|
||||
.sort((a, b) => { return a.key.localeCompare(b.key) });
|
||||
.sort((a, b) => { return a.key.localeCompare(b.key) }), [])
|
||||
|
||||
const getProtocolsForFilter = useMemo(() => {
|
||||
return serviceMapApiData.edges.reduce<ProtocolType[]>((returnArr, currentValue, currentIndex, array) => {
|
||||
if (!returnArr.find(prot => prot.key === currentValue.protocol.abbr))
|
||||
returnArr.push({ key: currentValue.protocol.abbr, value: currentValue.protocol.abbr, component: <LegentLabel color={currentValue.protocol.backgroundColor} name={currentValue.protocol.abbr} /> })
|
||||
return returnArr
|
||||
}, new Array<ProtocolType>())
|
||||
}, [serviceMapApiData])
|
||||
|
||||
const getServicesForFilter = useMemo(() => {
|
||||
const resolved = mapToKeyValForFilter(serviceMapApiData.nodes?.filter(x => x.resolved))
|
||||
const unResolved = mapToKeyValForFilter(serviceMapApiData.nodes?.filter(x => !x.resolved))
|
||||
return [...resolved, ...unResolved]
|
||||
}, [serviceMapApiData])
|
||||
}, [mapToKeyValForFilter, serviceMapApiData.nodes])
|
||||
|
||||
useEffect(() => {
|
||||
const newGraphData: GraphData = {
|
||||
@@ -141,10 +149,18 @@ export const ServiceMapModal: React.FC<ServiceMapModalProps> = ({ isOpen, onClos
|
||||
}
|
||||
|
||||
useEffect(() => {
|
||||
if (checkedServices.length == 0)
|
||||
if (checkedServices.length === 0)
|
||||
setCheckedServices(getServicesForFilter.map(x => x.key).filter(serviceName => !Utils.isIpAddress(serviceName)))
|
||||
// eslint-disable-next-line react-hooks/exhaustive-deps
|
||||
}, [getServicesForFilter])
|
||||
|
||||
useEffect(() => {
|
||||
if (checkedProtocols.length === 0) {
|
||||
setCheckedProtocols(getProtocolsForFilter.map(x => x.key))
|
||||
}
|
||||
// eslint-disable-next-line react-hooks/exhaustive-deps
|
||||
}, [getProtocolsForFilter])
|
||||
|
||||
useEffect(() => {
|
||||
getServiceMapData()
|
||||
}, [getServiceMapData])
|
||||
@@ -172,20 +188,45 @@ export const ServiceMapModal: React.FC<ServiceMapModalProps> = ({ isOpen, onClos
|
||||
BackdropProps={{ timeout: 500 }}>
|
||||
<Fade in={isOpen}>
|
||||
<Box sx={modalStyle}>
|
||||
<div className={styles.closeIcon}>
|
||||
<img src={closeIcon} alt="close" onClick={() => onClose()} style={{ cursor: "pointer", userSelect: "none" }}></img>
|
||||
</div>
|
||||
<div className={styles.headerContainer}>
|
||||
<div className={styles.headerSection}>
|
||||
<span className={styles.title}>Services</span>
|
||||
<Button size="medium"
|
||||
variant="contained"
|
||||
startIcon={<img src={isFilterClicked ? filterIconClicked : filterIcon} className="custom" alt="refresh" style={{ height: "26px", width: "26px" }}></img>}
|
||||
className={commonClasses.outlinedButton + " " + commonClasses.imagedButton + ` ${isFilterClicked ? commonClasses.clickedButton : ""}`}
|
||||
onClick={() => setIsFilterClicked(prevState => !prevState)}
|
||||
style={{ textTransform: 'unset' }}>
|
||||
Filter
|
||||
</Button >
|
||||
<Button style={{ marginLeft: "2%", textTransform: 'unset' }}
|
||||
startIcon={<img src={refreshIcon} className="custom" alt="refresh"></img>}
|
||||
size="medium"
|
||||
variant="contained"
|
||||
className={commonClasses.outlinedButton + " " + commonClasses.imagedButton}
|
||||
onClick={refreshServiceMap}
|
||||
>
|
||||
Refresh
|
||||
</Button>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div className={styles.modalContainer}>
|
||||
<div className={styles.filterSection}>
|
||||
<div className={styles.filterSection + ` ${isFilterClicked ? styles.show : ""}`}>
|
||||
<Resizeable minWidth={170} maxWidth={320}>
|
||||
<div className={styles.filterWrapper}>
|
||||
<div className={styles.protocolsFilterList}>
|
||||
<SelectList items={protocols} checkBoxWidth="5%" tableName={"Protocols"} multiSelect={true}
|
||||
checkedValues={checkedProtocols} setCheckedValues={onProtocolsChange} tableClassName={styles.filters} />
|
||||
<div className={styles.card}>
|
||||
<SelectList items={getProtocolsForFilter} checkBoxWidth="5%" tableName={"PROTOCOLS"} multiSelect={true}
|
||||
checkedValues={checkedProtocols} setCheckedValues={onProtocolsChange} tableClassName={styles.filters}
|
||||
inputSearchClass={styles.servicesFilterSearch} isFilterable={false}/>
|
||||
</div>
|
||||
<div className={styles.separtorLine}></div>
|
||||
<div className={styles.servicesFilter}>
|
||||
<input className={commonClasses.textField + ` ${styles.servicesFilterSearch}`} placeholder="search service" value={servicesSearchVal} onChange={(event) => setServicesSearchVal(event.target.value)} />
|
||||
<div className={styles.servicesFilterWrapper + ` ${styles.card}`}>
|
||||
<div className={styles.servicesFilterList}>
|
||||
<SelectList items={getServicesForFilter} tableName={"Services"} tableClassName={styles.filters} multiSelect={true} searchValue={servicesSearchVal}
|
||||
checkBoxWidth="5%" checkedValues={checkedServices} setCheckedValues={onServiceChanges} />
|
||||
<SelectList items={getServicesForFilter} tableName={"SERVICES"} tableClassName={styles.filters} multiSelect={true}
|
||||
checkBoxWidth="5%" checkedValues={checkedServices} setCheckedValues={onServiceChanges} inputSearchClass={styles.servicesFilterSearch}/>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
@@ -193,16 +234,6 @@ export const ServiceMapModal: React.FC<ServiceMapModalProps> = ({ isOpen, onClos
|
||||
</div>
|
||||
<div className={styles.graphSection}>
|
||||
<div style={{ display: "flex", justifyContent: "space-between" }}>
|
||||
<Button style={{ marginLeft: "3%" }}
|
||||
startIcon={<img src={refreshIcon} className="custom" alt="refresh" style={{ marginRight: "8%" }}></img>}
|
||||
size="medium"
|
||||
variant="contained"
|
||||
className={commonClasses.outlinedButton + " " + commonClasses.imagedButton}
|
||||
onClick={refreshServiceMap}
|
||||
>
|
||||
Refresh
|
||||
</Button>
|
||||
<img src={closeIcon} alt="close" onClick={() => onClose()} style={{ cursor: "pointer", userSelect: "none" }}></img>
|
||||
</div>
|
||||
{isLoading && <div className={spinnerStyle.spinnerContainer}>
|
||||
<img alt="spinner" src={spinnerImg} style={{ height: 50 }} />
|
||||
|
||||
@@ -0,0 +1,3 @@
|
||||
<svg width="24" height="24" viewBox="0 0 24 24" fill="none" xmlns="http://www.w3.org/2000/svg">
|
||||
<path d="M6 8L10.6667 12.6667V18H13.3333V12.6667L18 8V6H6V8Z" stroke="white"/>
|
||||
</svg>
|
||||
|
After Width: | Height: | Size: 182 B |
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user