mirror of
https://github.com/kubeshark/kubeshark.git
synced 2026-02-15 02:19:54 +00:00
Compare commits
22 Commits
33.0-dev33
...
34.0-dev13
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
7a823e89f1 | ||
|
|
be98d85cb8 | ||
|
|
e743ab7f7a | ||
|
|
78081a4a16 | ||
|
|
0ef6a7d2c4 | ||
|
|
ba361df7e7 | ||
|
|
1f70eab0a4 | ||
|
|
ea4991905b | ||
|
|
2ad4838cf5 | ||
|
|
e41488ef3e | ||
|
|
533fb71bf4 | ||
|
|
6f8aad83e6 | ||
|
|
6e6bcec77e | ||
|
|
71db792a4e | ||
|
|
f7f61c1217 | ||
|
|
696501fa11 | ||
|
|
415b5e08fd | ||
|
|
7810f6defb | ||
|
|
2aeac6c9e6 | ||
|
|
dc241218bf | ||
|
|
02b3672e09 | ||
|
|
45b368b33e |
11
.github/workflows/static_code_analysis.yml
vendored
11
.github/workflows/static_code_analysis.yml
vendored
@@ -26,6 +26,7 @@ jobs:
|
||||
run: |
|
||||
sudo apt update
|
||||
sudo apt install -y libpcap-dev
|
||||
./devops/install-capstone.sh
|
||||
|
||||
- name: Check Agent modified files
|
||||
id: agent_modified_files
|
||||
@@ -37,7 +38,7 @@ jobs:
|
||||
with:
|
||||
version: latest
|
||||
working-directory: agent
|
||||
args: --timeout=3m
|
||||
args: --timeout=10m
|
||||
|
||||
- name: Check shared modified files
|
||||
id: shared_modified_files
|
||||
@@ -49,7 +50,7 @@ jobs:
|
||||
with:
|
||||
version: latest
|
||||
working-directory: shared
|
||||
args: --timeout=3m
|
||||
args: --timeout=10m
|
||||
|
||||
- name: Check tap modified files
|
||||
id: tap_modified_files
|
||||
@@ -61,7 +62,7 @@ jobs:
|
||||
with:
|
||||
version: latest
|
||||
working-directory: tap
|
||||
args: --timeout=3m
|
||||
args: --timeout=10m
|
||||
|
||||
- name: Check cli modified files
|
||||
id: cli_modified_files
|
||||
@@ -73,7 +74,7 @@ jobs:
|
||||
with:
|
||||
version: latest
|
||||
working-directory: cli
|
||||
args: --timeout=3m
|
||||
args: --timeout=10m
|
||||
|
||||
- name: Check acceptanceTests modified files
|
||||
id: acceptanceTests_modified_files
|
||||
@@ -85,7 +86,7 @@ jobs:
|
||||
with:
|
||||
version: latest
|
||||
working-directory: acceptanceTests
|
||||
args: --timeout=3m
|
||||
args: --timeout=10m
|
||||
|
||||
- name: Check tap/api modified files
|
||||
id: tap_api_modified_files
|
||||
|
||||
5
.github/workflows/test.yml
vendored
5
.github/workflows/test.yml
vendored
@@ -35,6 +35,11 @@ jobs:
|
||||
run: |
|
||||
sudo apt-get install libpcap-dev
|
||||
|
||||
- name: Install Capstone
|
||||
shell: bash
|
||||
run: |
|
||||
./devops/install-capstone.sh
|
||||
|
||||
- name: Check CLI modified files
|
||||
id: cli_modified_files
|
||||
run: devops/check_modified_files.sh cli/
|
||||
|
||||
16
Dockerfile
16
Dockerfile
@@ -25,7 +25,9 @@ RUN npm run build
|
||||
### Base builder image for native builds architecture
|
||||
FROM golang:1.17-alpine AS builder-native-base
|
||||
ENV CGO_ENABLED=1 GOOS=linux
|
||||
RUN apk add --no-cache libpcap-dev g++ perl-utils
|
||||
RUN apk add --no-cache libpcap-dev g++ perl-utils curl build-base binutils-gold bash
|
||||
COPY devops/install-capstone.sh .
|
||||
RUN ./install-capstone.sh
|
||||
|
||||
|
||||
### Intermediate builder image for x86-64 to x86-64 native builds
|
||||
@@ -39,15 +41,15 @@ ENV GOARCH=arm64
|
||||
|
||||
|
||||
### Builder image for x86-64 to AArch64 cross-compilation
|
||||
FROM up9inc/linux-arm64-musl-go-libpcap AS builder-from-amd64-to-arm64v8
|
||||
FROM up9inc/linux-arm64-musl-go-libpcap-capstone AS builder-from-amd64-to-arm64v8
|
||||
ENV CGO_ENABLED=1 GOOS=linux
|
||||
ENV GOARCH=arm64 CGO_CFLAGS="-I/work/libpcap"
|
||||
ENV GOARCH=arm64 CGO_CFLAGS="-I/work/libpcap -I/work/capstone/include"
|
||||
|
||||
|
||||
### Builder image for AArch64 to x86-64 cross-compilation
|
||||
FROM up9inc/linux-x86_64-musl-go-libpcap AS builder-from-arm64v8-to-amd64
|
||||
FROM up9inc/linux-x86_64-musl-go-libpcap-capstone AS builder-from-arm64v8-to-amd64
|
||||
ENV CGO_ENABLED=1 GOOS=linux
|
||||
ENV GOARCH=amd64 CGO_CFLAGS="-I/libpcap"
|
||||
ENV GOARCH=amd64 CGO_CFLAGS="-I/libpcap -I/capstone/include"
|
||||
|
||||
|
||||
### Final builder image where the build happens
|
||||
@@ -95,8 +97,8 @@ RUN go build -ldflags="-extldflags=-static -s -w \
|
||||
-X 'github.com/up9inc/mizu/agent/pkg/version.Ver=${VER}'" -o mizuagent .
|
||||
|
||||
# Download Basenine executable, verify the sha1sum
|
||||
ADD https://github.com/up9inc/basenine/releases/download/v0.8.2/basenine_linux_${GOARCH} ./basenine_linux_${GOARCH}
|
||||
ADD https://github.com/up9inc/basenine/releases/download/v0.8.2/basenine_linux_${GOARCH}.sha256 ./basenine_linux_${GOARCH}.sha256
|
||||
ADD https://github.com/up9inc/basenine/releases/download/v0.8.3/basenine_linux_${GOARCH} ./basenine_linux_${GOARCH}
|
||||
ADD https://github.com/up9inc/basenine/releases/download/v0.8.3/basenine_linux_${GOARCH}.sha256 ./basenine_linux_${GOARCH}.sha256
|
||||
|
||||
RUN shasum -a 256 -c basenine_linux_"${GOARCH}".sha256 && \
|
||||
chmod +x ./basenine_linux_"${GOARCH}" && \
|
||||
|
||||
@@ -1,2 +1,3 @@
|
||||
test: ## Run acceptance tests.
|
||||
@npm install cypress@10.0.1 -y
|
||||
@go test ./... -timeout 1h -v
|
||||
|
||||
31
acceptanceTests/cypress.config.js
Normal file
31
acceptanceTests/cypress.config.js
Normal file
@@ -0,0 +1,31 @@
|
||||
const { defineConfig } = require('cypress')
|
||||
|
||||
module.exports = defineConfig({
|
||||
watchForFileChanges: false,
|
||||
viewportWidth: 1920,
|
||||
viewportHeight: 1080,
|
||||
video: false,
|
||||
screenshotOnRunFailure: false,
|
||||
defaultCommandTimeout: 6000,
|
||||
env: {
|
||||
testUrl: 'http://localhost:8899/',
|
||||
redactHeaderContent: 'User-Header[REDACTED]',
|
||||
redactBodyContent: '{ "User": "[REDACTED]" }',
|
||||
regexMaskingBodyContent: '[REDACTED]',
|
||||
greenFilterColor: 'rgb(210, 250, 210)',
|
||||
redFilterColor: 'rgb(250, 214, 220)',
|
||||
bodyJsonClass: '.hljs',
|
||||
mizuWidth: 1920,
|
||||
normalMizuHeight: 1080,
|
||||
hugeMizuHeight: 3500,
|
||||
},
|
||||
e2e: {
|
||||
// We've imported your old cypress plugins here.
|
||||
// You may want to clean this up later by importing these.
|
||||
// setupNodeEvents(on, config) {
|
||||
// return require('./cypress/plugins/index.js')(on, config)
|
||||
// },
|
||||
specPattern: 'cypress/e2e/tests/*.js',
|
||||
supportFile: false
|
||||
},
|
||||
})
|
||||
@@ -1,34 +0,0 @@
|
||||
{
|
||||
"watchForFileChanges":false,
|
||||
"viewportWidth": 1920,
|
||||
"viewportHeight": 1080,
|
||||
"video": false,
|
||||
"screenshotOnRunFailure": false,
|
||||
"defaultCommandTimeout": 6000,
|
||||
"testFiles": [
|
||||
"tests/GuiPort.js",
|
||||
"tests/MultipleNamespaces.js",
|
||||
"tests/Redact.js",
|
||||
"tests/NoRedact.js",
|
||||
"tests/Regex.js",
|
||||
"tests/RegexMasking.js",
|
||||
"tests/IgnoredUserAgents.js",
|
||||
"tests/UiTest.js",
|
||||
"tests/Redis.js",
|
||||
"tests/Rabbit.js",
|
||||
"tests/serviceMapFunction.js"
|
||||
],
|
||||
|
||||
"env": {
|
||||
"testUrl": "http://localhost:8899/",
|
||||
"redactHeaderContent": "User-Header[REDACTED]",
|
||||
"redactBodyContent": "{ \"User\": \"[REDACTED]\" }",
|
||||
"regexMaskingBodyContent": "[REDACTED]",
|
||||
"greenFilterColor": "rgb(210, 250, 210)",
|
||||
"redFilterColor": "rgb(250, 214, 220)",
|
||||
"bodyJsonClass": ".hljs",
|
||||
"mizuWidth": 1920,
|
||||
"normalMizuHeight": 1080,
|
||||
"hugeMizuHeight": 3500
|
||||
}
|
||||
}
|
||||
@@ -4,8 +4,6 @@ export const valueTabs = {
|
||||
none: null
|
||||
}
|
||||
|
||||
const maxEntriesInDom = 13;
|
||||
|
||||
export function isValueExistsInElement(shouldInclude, content, domPathToContainer){
|
||||
it(`should ${shouldInclude ? '' : 'not'} include '${content}'`, function () {
|
||||
cy.get(domPathToContainer).then(htmlText => {
|
||||
@@ -269,7 +269,7 @@ function checkRightSideResponseBody() {
|
||||
const responseBody = JSON.parse(decodedBody);
|
||||
|
||||
|
||||
const expectdJsonBody = {
|
||||
const expectedJsonBody = {
|
||||
args: RegExp({}),
|
||||
url: RegExp('http://.*/get'),
|
||||
headers: {
|
||||
@@ -279,27 +279,24 @@ function checkRightSideResponseBody() {
|
||||
}
|
||||
};
|
||||
|
||||
expect(responseBody.args).to.match(expectdJsonBody.args);
|
||||
expect(responseBody.url).to.match(expectdJsonBody.url);
|
||||
expect(responseBody.headers['User-Agent']).to.match(expectdJsonBody.headers['User-Agent']);
|
||||
expect(responseBody.headers['Accept-Encoding']).to.match(expectdJsonBody.headers['Accept-Encoding']);
|
||||
expect(responseBody.headers['X-Forwarded-Uri']).to.match(expectdJsonBody.headers['X-Forwarded-Uri']);
|
||||
const expectedStringInJsonBody = RegExp('/api/v1/namespaces/.*/services/.*/proxy/get');
|
||||
|
||||
|
||||
expect(responseBody.args).to.match(expectedJsonBody.args);
|
||||
expect(responseBody.url).to.match(expectedJsonBody.url);
|
||||
expect(responseBody.headers['User-Agent']).to.match(expectedJsonBody.headers['User-Agent']);
|
||||
expect(responseBody.headers['Accept-Encoding']).to.match(expectedJsonBody.headers['Accept-Encoding']);
|
||||
expect(responseBody.headers['X-Forwarded-Uri']).to.match(expectedJsonBody.headers['X-Forwarded-Uri']);
|
||||
|
||||
cy.get(`${Cypress.env('bodyJsonClass')}`).should('have.text', encodedBody);
|
||||
cy.get(`[data-cy="lineNumbersCheckBoxInput"]`).should('be.disabled');
|
||||
|
||||
clickCheckbox('Decode Base64');
|
||||
cy.get(`[data-cy="lineNumbersCheckBoxInput"]`).should('not.be.disabled');
|
||||
|
||||
cy.get(`${Cypress.env('bodyJsonClass')} > `).its('length').should('be.gt', 1).then(linesNum => {
|
||||
cy.get(`${Cypress.env('bodyJsonClass')} > >`).its('length').should('be.gt', linesNum).then(jsonItemsNum => {
|
||||
// checkPrettyAndLineNums(decodedBody);
|
||||
|
||||
//clickCheckbox('Line numbers');
|
||||
//checkPrettyOrNothing(jsonItemsNum, decodedBody);
|
||||
|
||||
// clickCheckbox('Pretty');
|
||||
// checkPrettyOrNothing(jsonItemsNum, decodedBody);
|
||||
//
|
||||
// clickCheckbox('Line numbers');
|
||||
// checkOnlyLineNumberes(jsonItemsNum, decodedBody);
|
||||
checkOnlyLineNumberes(jsonItemsNum, expectedStringInJsonBody);
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -309,37 +306,9 @@ function clickCheckbox(type) {
|
||||
cy.contains(`${type}`).prev().children().click();
|
||||
}
|
||||
|
||||
function checkPrettyAndLineNums(decodedBody) {
|
||||
decodedBody = decodedBody.replaceAll(' ', '');
|
||||
cy.get(`${Cypress.env('bodyJsonClass')} >`).then(elements => {
|
||||
const lines = Object.values(elements);
|
||||
lines.forEach((line, index) => {
|
||||
if (line.getAttribute) {
|
||||
const cleanLine = getCleanLine(line);
|
||||
const currentLineFromDecodedText = decodedBody.substring(0, cleanLine.length);
|
||||
|
||||
expect(cleanLine).to.equal(currentLineFromDecodedText, `expected the text in line number ${index + 1} to match the text that generated by the base64 decoding`)
|
||||
|
||||
decodedBody = decodedBody.substring(cleanLine.length);
|
||||
}
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
function getCleanLine(lineElement) {
|
||||
return (lineElement.innerText.substring(0, lineElement.innerText.length - 1)).replaceAll(' ', '');
|
||||
}
|
||||
|
||||
function checkPrettyOrNothing(jsonItems, decodedBody) {
|
||||
cy.get(`${Cypress.env('bodyJsonClass')} > `).should('have.length', jsonItems).then(text => {
|
||||
const json = text.text();
|
||||
expect(json).to.equal(decodedBody);
|
||||
});
|
||||
}
|
||||
|
||||
function checkOnlyLineNumberes(jsonItems, decodedText) {
|
||||
cy.get(`${Cypress.env('bodyJsonClass')} >`).should('have.length', 1).and('have.text', decodedText);
|
||||
cy.get(`${Cypress.env('bodyJsonClass')} > >`).should('have.length', jsonItems)
|
||||
cy.get(`${Cypress.env('bodyJsonClass')} > >`).should('have.length', jsonItems);
|
||||
cy.get(`${Cypress.env('bodyJsonClass')} >`).contains(decodedText);
|
||||
}
|
||||
|
||||
function serviceMapCheck() {
|
||||
@@ -105,10 +105,13 @@ func TestRedis(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
RunCypressTests(t, "npx cypress run --spec \"cypress/integration/tests/Redis.js\"")
|
||||
RunCypressTests(t, "npx cypress run --spec \"cypress/e2e/tests/Redis.js\"")
|
||||
}
|
||||
|
||||
func TestAmqp(t *testing.T) {
|
||||
|
||||
t.Skip("ignoredd for now because those tests are not stable")
|
||||
|
||||
if testing.Short() {
|
||||
t.Skip("ignored acceptance test")
|
||||
}
|
||||
@@ -236,5 +239,5 @@ func TestAmqp(t *testing.T) {
|
||||
ch.Close()
|
||||
}
|
||||
|
||||
RunCypressTests(t, "npx cypress run --spec \"cypress/integration/tests/Rabbit.js\"")
|
||||
RunCypressTests(t, "npx cypress run --spec \"cypress/e2e/tests/Rabbit.js\"")
|
||||
}
|
||||
|
||||
@@ -27,7 +27,7 @@ else
|
||||
fi
|
||||
|
||||
echo "Starting minikube..."
|
||||
minikube start
|
||||
minikube start --cpus 2 --memory 6946
|
||||
|
||||
echo "Creating mizu tests namespaces"
|
||||
kubectl create namespace mizu-tests --dry-run=client -o yaml | kubectl apply -f -
|
||||
|
||||
@@ -78,7 +78,7 @@ func basicTapTest(t *testing.T, shouldCheckSrcAndDest bool, extraArgs... string)
|
||||
expectedPodsStr += fmt.Sprintf("Name:%vNamespace:%v", expectedPods[i].Name, expectedPods[i].Namespace)
|
||||
}
|
||||
|
||||
RunCypressTests(t, fmt.Sprintf("npx cypress run --spec \"cypress/integration/tests/UiTest.js\" --env entriesCount=%d,arrayDict=%v,shouldCheckSrcAndDest=%v",
|
||||
RunCypressTests(t, fmt.Sprintf("npx cypress run --spec \"cypress/e2e/tests/UiTest.js\" --env entriesCount=%d,arrayDict=%v,shouldCheckSrcAndDest=%v",
|
||||
entriesCount, expectedPodsStr, shouldCheckSrcAndDest))
|
||||
})
|
||||
}
|
||||
@@ -135,7 +135,7 @@ func TestTapGuiPort(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
RunCypressTests(t, fmt.Sprintf("npx cypress run --spec \"cypress/integration/tests/GuiPort.js\" --env name=%v,namespace=%v,port=%d",
|
||||
RunCypressTests(t, fmt.Sprintf("npx cypress run --spec \"cypress/e2e/tests/GuiPort.js\" --env name=%v,namespace=%v,port=%d",
|
||||
"httpbin", "mizu-tests", guiPort))
|
||||
})
|
||||
}
|
||||
@@ -182,7 +182,7 @@ func TestTapAllNamespaces(t *testing.T) {
|
||||
return
|
||||
}
|
||||
|
||||
RunCypressTests(t, fmt.Sprintf("npx cypress run --spec \"cypress/integration/tests/MultipleNamespaces.js\" --env name1=%v,name2=%v,name3=%v,namespace1=%v,namespace2=%v,namespace3=%v",
|
||||
RunCypressTests(t, fmt.Sprintf("npx cypress run --spec \"cypress/e2e/tests/MultipleNamespaces.js\" --env name1=%v,name2=%v,name3=%v,namespace1=%v,namespace2=%v,namespace3=%v",
|
||||
expectedPods[0].Name, expectedPods[1].Name, expectedPods[2].Name, expectedPods[0].Namespace, expectedPods[1].Namespace, expectedPods[2].Namespace))
|
||||
}
|
||||
|
||||
@@ -231,7 +231,7 @@ func TestTapMultipleNamespaces(t *testing.T) {
|
||||
return
|
||||
}
|
||||
|
||||
RunCypressTests(t, fmt.Sprintf("npx cypress run --spec \"cypress/integration/tests/MultipleNamespaces.js\" --env name1=%v,name2=%v,name3=%v,namespace1=%v,namespace2=%v,namespace3=%v",
|
||||
RunCypressTests(t, fmt.Sprintf("npx cypress run --spec \"cypress/e2e/tests/MultipleNamespaces.js\" --env name1=%v,name2=%v,name3=%v,namespace1=%v,namespace2=%v,namespace3=%v",
|
||||
expectedPods[0].Name, expectedPods[1].Name, expectedPods[2].Name, expectedPods[0].Namespace, expectedPods[1].Namespace, expectedPods[2].Namespace))
|
||||
}
|
||||
|
||||
@@ -277,7 +277,7 @@ func TestTapRegex(t *testing.T) {
|
||||
return
|
||||
}
|
||||
|
||||
RunCypressTests(t, fmt.Sprintf("npx cypress run --spec \"cypress/integration/tests/Regex.js\" --env name=%v,namespace=%v",
|
||||
RunCypressTests(t, fmt.Sprintf("npx cypress run --spec \"cypress/e2e/tests/Regex.js\" --env name=%v,namespace=%v",
|
||||
expectedPods[0].Name, expectedPods[0].Namespace))
|
||||
}
|
||||
|
||||
@@ -376,7 +376,7 @@ func TestTapRedact(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
RunCypressTests(t, "npx cypress run --spec \"cypress/integration/tests/Redact.js\"")
|
||||
RunCypressTests(t, "npx cypress run --spec \"cypress/e2e/tests/Redact.js\"")
|
||||
}
|
||||
|
||||
func TestTapNoRedact(t *testing.T) {
|
||||
@@ -426,7 +426,7 @@ func TestTapNoRedact(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
RunCypressTests(t, "npx cypress run --spec \"cypress/integration/tests/NoRedact.js\"")
|
||||
RunCypressTests(t, "npx cypress run --spec \"cypress/e2e/tests/NoRedact.js\"")
|
||||
}
|
||||
|
||||
func TestTapRegexMasking(t *testing.T) {
|
||||
@@ -479,7 +479,7 @@ func TestTapRegexMasking(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
RunCypressTests(t, "npx cypress run --spec \"cypress/integration/tests/RegexMasking.js\"")
|
||||
RunCypressTests(t, "npx cypress run --spec \"cypress/e2e/tests/RegexMasking.js\"")
|
||||
|
||||
}
|
||||
|
||||
@@ -541,7 +541,7 @@ func TestTapIgnoredUserAgents(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
RunCypressTests(t, "npx cypress run --spec \"cypress/integration/tests/IgnoredUserAgents.js\"")
|
||||
RunCypressTests(t, "npx cypress run --spec \"cypress/e2e/tests/IgnoredUserAgents.js\"")
|
||||
}
|
||||
|
||||
func TestTapDumpLogs(t *testing.T) {
|
||||
|
||||
@@ -20,7 +20,7 @@ require (
|
||||
github.com/op/go-logging v0.0.0-20160315200505-970db520ece7
|
||||
github.com/orcaman/concurrent-map v1.0.0
|
||||
github.com/stretchr/testify v1.7.0
|
||||
github.com/up9inc/basenine/client/go v0.0.0-20220509204026-c37adfc587f4
|
||||
github.com/up9inc/basenine/client/go v0.0.0-20220612112747-3b28eeac9c51
|
||||
github.com/up9inc/mizu/logger v0.0.0
|
||||
github.com/up9inc/mizu/shared v0.0.0
|
||||
github.com/up9inc/mizu/tap v0.0.0
|
||||
@@ -47,12 +47,13 @@ require (
|
||||
github.com/Azure/go-autorest/logger v0.2.1 // indirect
|
||||
github.com/Azure/go-autorest/tracing v0.6.0 // indirect
|
||||
github.com/MakeNowJust/heredoc v1.0.0 // indirect
|
||||
github.com/Masterminds/semver v1.5.0 // indirect
|
||||
github.com/PuerkitoBio/purell v1.1.1 // indirect
|
||||
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 // indirect
|
||||
github.com/beevik/etree v1.1.0 // indirect
|
||||
github.com/chai2010/gettext-go v0.0.0-20160711120539-c6fed771bfd5 // indirect
|
||||
github.com/chanced/dynamic v0.0.0-20211210164248-f8fadb1d735b // indirect
|
||||
github.com/cilium/ebpf v0.8.0 // indirect
|
||||
github.com/cilium/ebpf v0.8.1 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/evanphx/json-patch v5.6.0+incompatible // indirect
|
||||
github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f // indirect
|
||||
@@ -84,6 +85,7 @@ require (
|
||||
github.com/josharian/intern v1.0.0 // indirect
|
||||
github.com/json-iterator/go v1.1.12 // indirect
|
||||
github.com/klauspost/compress v1.14.2 // indirect
|
||||
github.com/knightsc/gapstone v0.0.0-20211014144438-5e0e64002a6e // indirect
|
||||
github.com/leodido/go-urn v1.2.1 // indirect
|
||||
github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect
|
||||
github.com/mailru/easyjson v0.7.7 // indirect
|
||||
|
||||
12
agent/go.sum
12
agent/go.sum
@@ -77,6 +77,8 @@ github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3
|
||||
github.com/MakeNowJust/heredoc v0.0.0-20170808103936-bb23615498cd/go.mod h1:64YHyfSL2R96J44Nlwm39UHepQbyR5q10x7iYa1ks2E=
|
||||
github.com/MakeNowJust/heredoc v1.0.0 h1:cXCdzVdstXyiTqTvfqk9SDHpKNjxuom+DOlyEeQ4pzQ=
|
||||
github.com/MakeNowJust/heredoc v1.0.0/go.mod h1:mG5amYoWBHf8vpLOuehzbGGw0EHxpZZ6lCpQ4fNJ8LE=
|
||||
github.com/Masterminds/semver v1.5.0 h1:H65muMkzWKEuNDnfl9d70GUjFniHKHRbFPGBuZ3QEww=
|
||||
github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y=
|
||||
github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
|
||||
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
|
||||
github.com/PuerkitoBio/purell v1.1.1 h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tNFfI=
|
||||
@@ -126,8 +128,8 @@ github.com/chanced/openapi v0.0.8/go.mod h1:SxE2VMLPw+T7Vq8nwbVVhDF2PigvRF4n5Xyq
|
||||
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
|
||||
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
|
||||
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
|
||||
github.com/cilium/ebpf v0.8.0 h1:2V6KSg3FRADVU2BMIRemZ0hV+9OM+aAHhZDjQyjJTAs=
|
||||
github.com/cilium/ebpf v0.8.0/go.mod h1:f5zLIM0FSNuAkSyLAN7X+Hy6yznlF1mNiWUMfxMtrgk=
|
||||
github.com/cilium/ebpf v0.8.1 h1:bLSSEbBLqGPXxls55pGr5qWZaTqcmfDJHhou7t254ao=
|
||||
github.com/cilium/ebpf v0.8.1/go.mod h1:f5zLIM0FSNuAkSyLAN7X+Hy6yznlF1mNiWUMfxMtrgk=
|
||||
github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag=
|
||||
github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I=
|
||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||
@@ -455,6 +457,8 @@ github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+o
|
||||
github.com/klauspost/compress v1.9.8/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
|
||||
github.com/klauspost/compress v1.14.2 h1:S0OHlFk/Gbon/yauFJ4FfJJF5V0fc5HbBTJazi28pRw=
|
||||
github.com/klauspost/compress v1.14.2/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
|
||||
github.com/knightsc/gapstone v0.0.0-20211014144438-5e0e64002a6e h1:6J5obSn9umEThiYzWzndcPOZR0Qj/sVCZpH6V1G7yNE=
|
||||
github.com/knightsc/gapstone v0.0.0-20211014144438-5e0e64002a6e/go.mod h1:1K5hEzsMBLTPdRJKEHqBFJ8Zt2VRqDhomcQ11KH0WW4=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg=
|
||||
@@ -694,8 +698,8 @@ github.com/ugorji/go v1.2.6/go.mod h1:anCg0y61KIhDlPZmnH+so+RQbysYVyDko0IMgJv0Nn
|
||||
github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY=
|
||||
github.com/ugorji/go/codec v1.2.6 h1:7kbGefxLoDBuYXOms4yD7223OpNMMPNPZxXk5TvFcyQ=
|
||||
github.com/ugorji/go/codec v1.2.6/go.mod h1:V6TCNZ4PHqoHGFZuSG1W8nrCzzdgA2DozYxWFFpvxTw=
|
||||
github.com/up9inc/basenine/client/go v0.0.0-20220509204026-c37adfc587f4 h1:nNOrU1HVH0fnaG7GNhxCc8kNPVL035Iix7ihUF6lZT8=
|
||||
github.com/up9inc/basenine/client/go v0.0.0-20220509204026-c37adfc587f4/go.mod h1:SvJGPoa/6erhUQV7kvHBwM/0x5LyO6XaG2lUaCaKiUI=
|
||||
github.com/up9inc/basenine/client/go v0.0.0-20220612112747-3b28eeac9c51 h1:6op+PUYmTlxze3V3f30lWKix3sWqv1M9rvRhyaxbsdQ=
|
||||
github.com/up9inc/basenine/client/go v0.0.0-20220612112747-3b28eeac9c51/go.mod h1:SvJGPoa/6erhUQV7kvHBwM/0x5LyO6XaG2lUaCaKiUI=
|
||||
github.com/vishvananda/netns v0.0.0-20211101163701-50045581ed74 h1:gga7acRE695APm9hlsSMoOoE65U4/TcqNj90mc69Rlg=
|
||||
github.com/vishvananda/netns v0.0.0-20211101163701-50045581ed74/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0=
|
||||
github.com/wI2L/jsondiff v0.1.1 h1:r2TkoEet7E4JMO5+s1RCY2R0LrNPNHY6hbDeow2hRHw=
|
||||
|
||||
@@ -24,7 +24,6 @@ import (
|
||||
"github.com/up9inc/mizu/agent/pkg/oas"
|
||||
"github.com/up9inc/mizu/agent/pkg/routes"
|
||||
"github.com/up9inc/mizu/agent/pkg/servicemap"
|
||||
"github.com/up9inc/mizu/agent/pkg/up9"
|
||||
"github.com/up9inc/mizu/agent/pkg/utils"
|
||||
|
||||
"github.com/up9inc/mizu/agent/pkg/api"
|
||||
@@ -72,13 +71,13 @@ func main() {
|
||||
} else if *tapperMode {
|
||||
runInTapperMode()
|
||||
} else if *apiServerMode {
|
||||
app := runInApiServerMode(*namespace)
|
||||
ginApp := runInApiServerMode(*namespace)
|
||||
|
||||
if *profiler {
|
||||
pprof.Register(app)
|
||||
pprof.Register(ginApp)
|
||||
}
|
||||
|
||||
utils.StartServer(app)
|
||||
utils.StartServer(ginApp)
|
||||
|
||||
} else if *harsReaderMode {
|
||||
runInHarReaderMode()
|
||||
@@ -92,9 +91,9 @@ func main() {
|
||||
}
|
||||
|
||||
func hostApi(socketHarOutputChannel chan<- *tapApi.OutputChannelItem) *gin.Engine {
|
||||
app := gin.Default()
|
||||
ginApp := gin.Default()
|
||||
|
||||
app.GET("/echo", func(c *gin.Context) {
|
||||
ginApp.GET("/echo", func(c *gin.Context) {
|
||||
c.JSON(http.StatusOK, "Here is Mizu agent")
|
||||
})
|
||||
|
||||
@@ -102,7 +101,7 @@ func hostApi(socketHarOutputChannel chan<- *tapApi.OutputChannelItem) *gin.Engin
|
||||
SocketOutChannel: socketHarOutputChannel,
|
||||
}
|
||||
|
||||
app.Use(disableRootStaticCache())
|
||||
ginApp.Use(disableRootStaticCache())
|
||||
|
||||
staticFolder := "./site"
|
||||
indexStaticFile := staticFolder + "/index.html"
|
||||
@@ -110,30 +109,30 @@ func hostApi(socketHarOutputChannel chan<- *tapApi.OutputChannelItem) *gin.Engin
|
||||
logger.Log.Errorf("Error setting ui flags, err: %v", err)
|
||||
}
|
||||
|
||||
app.Use(static.ServeRoot("/", staticFolder))
|
||||
app.NoRoute(func(c *gin.Context) {
|
||||
ginApp.Use(static.ServeRoot("/", staticFolder))
|
||||
ginApp.NoRoute(func(c *gin.Context) {
|
||||
c.File(indexStaticFile)
|
||||
})
|
||||
|
||||
app.Use(middlewares.CORSMiddleware()) // This has to be called after the static middleware, does not work if its called before
|
||||
ginApp.Use(middlewares.CORSMiddleware()) // This has to be called after the static middleware, does not work if it's called before
|
||||
|
||||
api.WebSocketRoutes(app, &eventHandlers)
|
||||
api.WebSocketRoutes(ginApp, &eventHandlers)
|
||||
|
||||
if config.Config.OAS {
|
||||
routes.OASRoutes(app)
|
||||
if config.Config.OAS.Enable {
|
||||
routes.OASRoutes(ginApp)
|
||||
}
|
||||
|
||||
if config.Config.ServiceMap {
|
||||
routes.ServiceMapRoutes(app)
|
||||
routes.ServiceMapRoutes(ginApp)
|
||||
}
|
||||
|
||||
routes.QueryRoutes(app)
|
||||
routes.EntriesRoutes(app)
|
||||
routes.MetadataRoutes(app)
|
||||
routes.StatusRoutes(app)
|
||||
routes.DbRoutes(app)
|
||||
routes.QueryRoutes(ginApp)
|
||||
routes.EntriesRoutes(ginApp)
|
||||
routes.MetadataRoutes(ginApp)
|
||||
routes.StatusRoutes(ginApp)
|
||||
routes.DbRoutes(ginApp)
|
||||
|
||||
return app
|
||||
return ginApp
|
||||
}
|
||||
|
||||
func runInApiServerMode(namespace string) *gin.Engine {
|
||||
@@ -145,13 +144,6 @@ func runInApiServerMode(namespace string) *gin.Engine {
|
||||
|
||||
enableExpFeatureIfNeeded()
|
||||
|
||||
syncEntriesConfig := getSyncEntriesConfig()
|
||||
if syncEntriesConfig != nil {
|
||||
if err := up9.SyncEntries(syncEntriesConfig); err != nil {
|
||||
logger.Log.Error("Error syncing entries, err: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
return hostApi(app.GetEntryInputChannel())
|
||||
}
|
||||
|
||||
@@ -208,7 +200,7 @@ func runInHarReaderMode() {
|
||||
}
|
||||
|
||||
func enableExpFeatureIfNeeded() {
|
||||
if config.Config.OAS {
|
||||
if config.Config.OAS.Enable {
|
||||
oasGenerator := dependency.GetInstance(dependency.OasGeneratorDependency).(oas.OasGenerator)
|
||||
oasGenerator.Start()
|
||||
}
|
||||
@@ -218,21 +210,6 @@ func enableExpFeatureIfNeeded() {
|
||||
}
|
||||
}
|
||||
|
||||
func getSyncEntriesConfig() *shared.SyncEntriesConfig {
|
||||
syncEntriesConfigJson := os.Getenv(shared.SyncEntriesConfigEnvVar)
|
||||
if syncEntriesConfigJson == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
var syncEntriesConfig = &shared.SyncEntriesConfig{}
|
||||
err := json.Unmarshal([]byte(syncEntriesConfigJson), syncEntriesConfig)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("env var %s's value of %s is invalid! json must match the shared.SyncEntriesConfig struct, err: %v", shared.SyncEntriesConfigEnvVar, syncEntriesConfigJson, err))
|
||||
}
|
||||
|
||||
return syncEntriesConfig
|
||||
}
|
||||
|
||||
func disableRootStaticCache() gin.HandlerFunc {
|
||||
return func(c *gin.Context) {
|
||||
if c.Request.RequestURI == "/" {
|
||||
@@ -250,7 +227,7 @@ func setUIFlags(uiIndexPath string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
replacedContent := strings.Replace(string(read), "__IS_OAS_ENABLED__", strconv.FormatBool(config.Config.OAS), 1)
|
||||
replacedContent := strings.Replace(string(read), "__IS_OAS_ENABLED__", strconv.FormatBool(config.Config.OAS.Enable), 1)
|
||||
replacedContent = strings.Replace(replacedContent, "__IS_SERVICE_MAP_ENABLED__", strconv.FormatBool(config.Config.ServiceMap), 1)
|
||||
|
||||
err = ioutil.WriteFile(uiIndexPath, []byte(replacedContent), 0)
|
||||
@@ -386,7 +363,7 @@ func handleIncomingMessageAsTapper(socketConnection *websocket.Conn) {
|
||||
|
||||
func initializeDependencies() {
|
||||
dependency.RegisterGenerator(dependency.ServiceMapGeneratorDependency, func() interface{} { return servicemap.GetDefaultServiceMapInstance() })
|
||||
dependency.RegisterGenerator(dependency.OasGeneratorDependency, func() interface{} { return oas.GetDefaultOasGeneratorInstance() })
|
||||
dependency.RegisterGenerator(dependency.OasGeneratorDependency, func() interface{} { return oas.GetDefaultOasGeneratorInstance(config.Config.OAS.MaxExampleLen) })
|
||||
dependency.RegisterGenerator(dependency.EntriesInserter, func() interface{} { return api.GetBasenineEntryInserterInstance() })
|
||||
dependency.RegisterGenerator(dependency.EntriesProvider, func() interface{} { return &entries.BasenineEntriesProvider{} })
|
||||
dependency.RegisterGenerator(dependency.EntriesSocketStreamer, func() interface{} { return &api.BasenineEntryStreamer{} })
|
||||
|
||||
@@ -10,7 +10,6 @@ import (
|
||||
"github.com/up9inc/mizu/agent/pkg/models"
|
||||
"github.com/up9inc/mizu/agent/pkg/providers/tappedPods"
|
||||
"github.com/up9inc/mizu/agent/pkg/providers/tappers"
|
||||
"github.com/up9inc/mizu/agent/pkg/up9"
|
||||
|
||||
tapApi "github.com/up9inc/mizu/tap/api"
|
||||
|
||||
@@ -31,10 +30,6 @@ type RoutesEventHandlers struct {
|
||||
SocketOutChannel chan<- *tapApi.OutputChannelItem
|
||||
}
|
||||
|
||||
func init() {
|
||||
go up9.UpdateAnalyzeStatus(BroadcastToBrowserClients)
|
||||
}
|
||||
|
||||
func (h *RoutesEventHandlers) WebSocketConnect(_ *gin.Context, socketId int, isTapper bool) {
|
||||
if isTapper {
|
||||
logger.Log.Infof("Websocket event - Tapper connected, socket ID: %d", socketId)
|
||||
|
||||
@@ -34,12 +34,12 @@ func TestGetOASSpec(t *testing.T) {
|
||||
|
||||
func getRecorderAndContext() (*httptest.ResponseRecorder, *gin.Context) {
|
||||
dependency.RegisterGenerator(dependency.OasGeneratorDependency, func() interface{} {
|
||||
return oas.GetDefaultOasGeneratorInstance()
|
||||
return oas.GetDefaultOasGeneratorInstance(-1)
|
||||
})
|
||||
|
||||
recorder := httptest.NewRecorder()
|
||||
c, _ := gin.CreateTestContext(recorder)
|
||||
oas.GetDefaultOasGeneratorInstance().Start()
|
||||
oas.GetDefaultOasGeneratorInstance().GetServiceSpecs().Store("some", oas.NewGen("some"))
|
||||
oas.GetDefaultOasGeneratorInstance(-1).Start()
|
||||
oas.GetDefaultOasGeneratorInstance(-1).GetServiceSpecs().Store("some", oas.NewGen("some"))
|
||||
return recorder, c
|
||||
}
|
||||
|
||||
@@ -11,7 +11,6 @@ import (
|
||||
"github.com/up9inc/mizu/agent/pkg/providers"
|
||||
"github.com/up9inc/mizu/agent/pkg/providers/tappedPods"
|
||||
"github.com/up9inc/mizu/agent/pkg/providers/tappers"
|
||||
"github.com/up9inc/mizu/agent/pkg/up9"
|
||||
"github.com/up9inc/mizu/agent/pkg/validation"
|
||||
"github.com/up9inc/mizu/logger"
|
||||
"github.com/up9inc/mizu/shared"
|
||||
@@ -71,25 +70,11 @@ func GetConnectedTappersCount(c *gin.Context) {
|
||||
c.JSON(http.StatusOK, tappers.GetConnectedCount())
|
||||
}
|
||||
|
||||
func GetAuthStatus(c *gin.Context) {
|
||||
authStatus, err := providers.GetAuthStatus()
|
||||
if err != nil {
|
||||
c.JSON(http.StatusInternalServerError, err)
|
||||
return
|
||||
}
|
||||
|
||||
c.JSON(http.StatusOK, authStatus)
|
||||
}
|
||||
|
||||
func GetTappingStatus(c *gin.Context) {
|
||||
tappedPodsStatus := tappedPods.GetTappedPodsStatus()
|
||||
c.JSON(http.StatusOK, tappedPodsStatus)
|
||||
}
|
||||
|
||||
func AnalyzeInformation(c *gin.Context) {
|
||||
c.JSON(http.StatusOK, up9.GetAnalyzeInfo())
|
||||
}
|
||||
|
||||
func GetGeneralStats(c *gin.Context) {
|
||||
c.JSON(http.StatusOK, providers.GetGeneralStats())
|
||||
}
|
||||
|
||||
@@ -43,11 +43,6 @@ type WebSocketTappedEntryMessage struct {
|
||||
Data *tapApi.OutputChannelItem
|
||||
}
|
||||
|
||||
type AuthStatus struct {
|
||||
Email string `json:"email"`
|
||||
Model string `json:"model"`
|
||||
}
|
||||
|
||||
type ToastMessage struct {
|
||||
Type string `json:"type"`
|
||||
AutoClose uint `json:"autoClose"`
|
||||
|
||||
@@ -9,7 +9,7 @@ var ignoredCtypes = []string{"application/javascript", "application/x-javascript
|
||||
|
||||
var ignoredHeaders = []string{
|
||||
"a-im", "accept",
|
||||
"authorization", "cache-control", "connection", "content-encoding", "content-length", "content-type", "cookie",
|
||||
"authorization", "cache-control", "connection", "content-encoding", "content-length", "content-range", "content-type", "cookie",
|
||||
"date", "dnt", "expect", "forwarded", "from", "front-end-https", "host", "http2-settings",
|
||||
"max-forwards", "origin", "pragma", "proxy-authorization", "proxy-connection", "range", "referer",
|
||||
"save-data", "te", "trailer", "transfer-encoding", "upgrade", "upgrade-insecure-requests", "x-download-options",
|
||||
|
||||
@@ -28,13 +28,14 @@ type OasGenerator interface {
|
||||
}
|
||||
|
||||
type defaultOasGenerator struct {
|
||||
started bool
|
||||
serviceSpecs *sync.Map
|
||||
started bool
|
||||
serviceSpecs *sync.Map
|
||||
maxExampleLen int
|
||||
}
|
||||
|
||||
func GetDefaultOasGeneratorInstance() *defaultOasGenerator {
|
||||
func GetDefaultOasGeneratorInstance(maxExampleLen int) *defaultOasGenerator {
|
||||
syncOnce.Do(func() {
|
||||
instance = NewDefaultOasGenerator()
|
||||
instance = NewDefaultOasGenerator(maxExampleLen)
|
||||
logger.Log.Debug("OAS Generator Initialized")
|
||||
})
|
||||
return instance
|
||||
@@ -117,6 +118,7 @@ func (g *defaultOasGenerator) getGen(dest string, urlStr string) *SpecGen {
|
||||
var gen *SpecGen
|
||||
if !found {
|
||||
gen = NewGen(u.Scheme + "://" + dest)
|
||||
gen.MaxExampleLen = g.maxExampleLen
|
||||
g.serviceSpecs.Store(dest, gen)
|
||||
} else {
|
||||
gen = val.(*SpecGen)
|
||||
@@ -132,9 +134,10 @@ func (g *defaultOasGenerator) GetServiceSpecs() *sync.Map {
|
||||
return g.serviceSpecs
|
||||
}
|
||||
|
||||
func NewDefaultOasGenerator() *defaultOasGenerator {
|
||||
func NewDefaultOasGenerator(maxExampleLen int) *defaultOasGenerator {
|
||||
return &defaultOasGenerator{
|
||||
started: false,
|
||||
serviceSpecs: &sync.Map{},
|
||||
started: false,
|
||||
serviceSpecs: &sync.Map{},
|
||||
maxExampleLen: maxExampleLen,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -8,7 +8,7 @@ import (
|
||||
)
|
||||
|
||||
func TestOASGen(t *testing.T) {
|
||||
gen := GetDefaultOasGeneratorInstance()
|
||||
gen := GetDefaultOasGeneratorInstance(-1)
|
||||
|
||||
e := new(har.Entry)
|
||||
err := json.Unmarshal([]byte(`{"startedDateTime": "20000101","request": {"url": "https://host/path", "method": "GET"}, "response": {"status": 200}}`), e)
|
||||
|
||||
@@ -42,6 +42,8 @@ type reqResp struct { // hello, generics in Go
|
||||
}
|
||||
|
||||
type SpecGen struct {
|
||||
MaxExampleLen int // -1 unlimited, 0 and above sets limit
|
||||
|
||||
oas *openapi.OpenAPI
|
||||
tree *Node
|
||||
lock sync.Mutex
|
||||
@@ -59,7 +61,11 @@ func NewGen(server string) *SpecGen {
|
||||
spec.Servers = make([]*openapi.Server, 0)
|
||||
spec.Servers = append(spec.Servers, &openapi.Server{URL: server})
|
||||
|
||||
gen := SpecGen{oas: spec, tree: new(Node)}
|
||||
gen := SpecGen{
|
||||
oas: spec,
|
||||
tree: new(Node),
|
||||
MaxExampleLen: -1,
|
||||
}
|
||||
return &gen
|
||||
}
|
||||
|
||||
@@ -228,7 +234,7 @@ func (g *SpecGen) handlePathObj(entryWithSource *EntryWithSource) (string, error
|
||||
split = strings.Split(urlParsed.Path, "/")
|
||||
}
|
||||
node := g.tree.getOrSet(split, new(openapi.PathObj), entryWithSource.Id)
|
||||
opObj, err := handleOpObj(entryWithSource, node.pathObj)
|
||||
opObj, err := handleOpObj(entryWithSource, node.pathObj, g.MaxExampleLen)
|
||||
|
||||
if opObj != nil {
|
||||
return opObj.OperationID, err
|
||||
@@ -237,7 +243,7 @@ func (g *SpecGen) handlePathObj(entryWithSource *EntryWithSource) (string, error
|
||||
return "", err
|
||||
}
|
||||
|
||||
func handleOpObj(entryWithSource *EntryWithSource, pathObj *openapi.PathObj) (*openapi.Operation, error) {
|
||||
func handleOpObj(entryWithSource *EntryWithSource, pathObj *openapi.PathObj, limit int) (*openapi.Operation, error) {
|
||||
entry := entryWithSource.Entry
|
||||
isSuccess := 100 <= entry.Response.Status && entry.Response.Status < 400
|
||||
opObj, wasMissing, err := getOpObj(pathObj, entry.Request.Method, isSuccess)
|
||||
@@ -250,12 +256,12 @@ func handleOpObj(entryWithSource *EntryWithSource, pathObj *openapi.PathObj) (*o
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
err = handleRequest(&entry.Request, opObj, isSuccess, entryWithSource.Id)
|
||||
err = handleRequest(&entry.Request, opObj, isSuccess, entryWithSource.Id, limit)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = handleResponse(&entry.Response, opObj, isSuccess, entryWithSource.Id)
|
||||
err = handleResponse(&entry.Response, opObj, isSuccess, entryWithSource.Id, limit)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -342,7 +348,7 @@ func handleCounters(opObj *openapi.Operation, success bool, entryWithSource *Ent
|
||||
return nil
|
||||
}
|
||||
|
||||
func handleRequest(req *har.Request, opObj *openapi.Operation, isSuccess bool, sampleId string) error {
|
||||
func handleRequest(req *har.Request, opObj *openapi.Operation, isSuccess bool, sampleId string, limit int) error {
|
||||
// TODO: we don't handle the situation when header/qstr param can be defined on pathObj level. Also the path param defined on opObj
|
||||
urlParsed, err := url.Parse(req.URL)
|
||||
if err != nil {
|
||||
@@ -390,7 +396,7 @@ func handleRequest(req *har.Request, opObj *openapi.Operation, isSuccess bool, s
|
||||
} else {
|
||||
|
||||
reqCtype, _ := getReqCtype(req)
|
||||
reqMedia, err := fillContent(reqResp{Req: req}, reqBody.Content, reqCtype, sampleId)
|
||||
reqMedia, err := fillContent(reqResp{Req: req}, reqBody.Content, reqCtype, sampleId, limit)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -402,7 +408,7 @@ func handleRequest(req *har.Request, opObj *openapi.Operation, isSuccess bool, s
|
||||
return nil
|
||||
}
|
||||
|
||||
func handleResponse(resp *har.Response, opObj *openapi.Operation, isSuccess bool, sampleId string) error {
|
||||
func handleResponse(resp *har.Response, opObj *openapi.Operation, isSuccess bool, sampleId string, limit int) error {
|
||||
// TODO: we don't support "default" response
|
||||
respObj, err := getResponseObj(resp, opObj, isSuccess)
|
||||
if err != nil {
|
||||
@@ -415,7 +421,7 @@ func handleResponse(resp *har.Response, opObj *openapi.Operation, isSuccess bool
|
||||
|
||||
respCtype := getRespCtype(resp)
|
||||
respContent := respObj.Content
|
||||
respMedia, err := fillContent(reqResp{Resp: resp}, respContent, respCtype, sampleId)
|
||||
respMedia, err := fillContent(reqResp{Resp: resp}, respContent, respCtype, sampleId, limit)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -467,7 +473,7 @@ func handleRespHeaders(reqHeaders []har.Header, respObj *openapi.ResponseObj, sa
|
||||
}
|
||||
}
|
||||
|
||||
func fillContent(reqResp reqResp, respContent openapi.Content, ctype string, sampleId string) (*openapi.MediaType, error) {
|
||||
func fillContent(reqResp reqResp, respContent openapi.Content, ctype string, sampleId string, limit int) (*openapi.MediaType, error) {
|
||||
content, found := respContent[ctype]
|
||||
if !found {
|
||||
respContent[ctype] = &openapi.MediaType{}
|
||||
@@ -510,7 +516,7 @@ func fillContent(reqResp reqResp, respContent openapi.Content, ctype string, sam
|
||||
handleFormDataMultipart(text, content, params)
|
||||
}
|
||||
|
||||
if content.Example == nil && len(exampleMsg) > len(content.Example) {
|
||||
if len(exampleMsg) > len(content.Example) && (limit < 0 || len(exampleMsg) <= limit) {
|
||||
content.Example = exampleMsg
|
||||
}
|
||||
}
|
||||
|
||||
@@ -48,7 +48,7 @@ func TestEntries(t *testing.T) {
|
||||
t.FailNow()
|
||||
}
|
||||
|
||||
gen := NewDefaultOasGenerator()
|
||||
gen := NewDefaultOasGenerator(-1)
|
||||
gen.serviceSpecs = new(sync.Map)
|
||||
loadStartingOAS("test_artifacts/catalogue.json", "catalogue", gen.serviceSpecs)
|
||||
loadStartingOAS("test_artifacts/trcc.json", "trcc-api-service", gen.serviceSpecs)
|
||||
@@ -122,7 +122,7 @@ func TestEntries(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestFileSingle(t *testing.T) {
|
||||
gen := NewDefaultOasGenerator()
|
||||
gen := NewDefaultOasGenerator(-1)
|
||||
gen.serviceSpecs = new(sync.Map)
|
||||
// loadStartingOAS()
|
||||
file := "test_artifacts/params.har"
|
||||
@@ -212,7 +212,7 @@ func loadStartingOAS(file string, label string, specs *sync.Map) {
|
||||
}
|
||||
|
||||
func TestEntriesNegative(t *testing.T) {
|
||||
gen := NewDefaultOasGenerator()
|
||||
gen := NewDefaultOasGenerator(-1)
|
||||
gen.serviceSpecs = new(sync.Map)
|
||||
files := []string{"invalid"}
|
||||
_, err := feedEntries(files, false, gen)
|
||||
@@ -223,7 +223,7 @@ func TestEntriesNegative(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestEntriesPositive(t *testing.T) {
|
||||
gen := NewDefaultOasGenerator()
|
||||
gen := NewDefaultOasGenerator(-1)
|
||||
gen.serviceSpecs = new(sync.Map)
|
||||
files := []string{"test_artifacts/params.har"}
|
||||
_, err := feedEntries(files, false, gen)
|
||||
|
||||
@@ -333,7 +333,7 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"example": "agent-id=ade\u0026callback-url=\u0026token=sometoken",
|
||||
"example": "agent-id=ade\u0026callback-url=\u0026token=sometoken-second-val\u0026optional=another",
|
||||
"x-sample-entry": "000000000000000000000008"
|
||||
}
|
||||
},
|
||||
|
||||
@@ -1,47 +0,0 @@
|
||||
package providers
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/up9inc/mizu/agent/pkg/models"
|
||||
"github.com/up9inc/mizu/shared"
|
||||
)
|
||||
|
||||
var (
|
||||
authStatus *models.AuthStatus
|
||||
)
|
||||
|
||||
func GetAuthStatus() (*models.AuthStatus, error) {
|
||||
if authStatus == nil {
|
||||
syncEntriesConfigJson := os.Getenv(shared.SyncEntriesConfigEnvVar)
|
||||
if syncEntriesConfigJson == "" {
|
||||
authStatus = &models.AuthStatus{}
|
||||
return authStatus, nil
|
||||
}
|
||||
|
||||
syncEntriesConfig := &shared.SyncEntriesConfig{}
|
||||
err := json.Unmarshal([]byte(syncEntriesConfigJson), syncEntriesConfig)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to marshal sync entries config, err: %v", err)
|
||||
}
|
||||
|
||||
if syncEntriesConfig.Token == "" {
|
||||
authStatus = &models.AuthStatus{}
|
||||
return authStatus, nil
|
||||
}
|
||||
|
||||
tokenEmail, err := shared.GetTokenEmail(syncEntriesConfig.Token)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get token email, err: %v", err)
|
||||
}
|
||||
|
||||
authStatus = &models.AuthStatus{
|
||||
Email: tokenEmail,
|
||||
Model: syncEntriesConfig.Workspace,
|
||||
}
|
||||
}
|
||||
|
||||
return authStatus, nil
|
||||
}
|
||||
@@ -15,10 +15,6 @@ func StatusRoutes(ginApp *gin.Engine) {
|
||||
routeGroup.GET("/connectedTappersCount", controllers.GetConnectedTappersCount)
|
||||
routeGroup.GET("/tap", controllers.GetTappingStatus)
|
||||
|
||||
routeGroup.GET("/auth", controllers.GetAuthStatus)
|
||||
|
||||
routeGroup.GET("/analyze", controllers.AnalyzeInformation)
|
||||
|
||||
routeGroup.GET("/general", controllers.GetGeneralStats) // get general stats about entries in DB
|
||||
|
||||
routeGroup.GET("/resolving", controllers.GetCurrentResolvingInformation)
|
||||
|
||||
@@ -1,353 +0,0 @@
|
||||
package up9
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"compress/zlib"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"regexp"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/up9inc/mizu/agent/pkg/har"
|
||||
"github.com/up9inc/mizu/agent/pkg/utils"
|
||||
|
||||
basenine "github.com/up9inc/basenine/client/go"
|
||||
"github.com/up9inc/mizu/logger"
|
||||
"github.com/up9inc/mizu/shared"
|
||||
tapApi "github.com/up9inc/mizu/tap/api"
|
||||
)
|
||||
|
||||
const (
|
||||
AnalyzeCheckSleepTime = 5 * time.Second
|
||||
SentCountLogInterval = 100
|
||||
)
|
||||
|
||||
type GuestToken struct {
|
||||
Token string `json:"token"`
|
||||
Model string `json:"model"`
|
||||
}
|
||||
|
||||
type ModelStatus struct {
|
||||
LastMajorGeneration float64 `json:"lastMajorGeneration"`
|
||||
}
|
||||
|
||||
func GetRemoteUrl(analyzeDestination string, analyzeModel string, analyzeToken string, guestMode bool) string {
|
||||
if guestMode {
|
||||
return fmt.Sprintf("https://%s/share/%s", analyzeDestination, analyzeToken)
|
||||
}
|
||||
|
||||
return fmt.Sprintf("https://%s/app/workspaces/%s", analyzeDestination, analyzeModel)
|
||||
}
|
||||
|
||||
func CheckIfModelReady(analyzeDestination string, analyzeModel string, analyzeToken string, guestMode bool) bool {
|
||||
statusUrl, _ := url.Parse(fmt.Sprintf("https://trcc.%s/models/%s/status", analyzeDestination, analyzeModel))
|
||||
|
||||
authHeader := getAuthHeader(guestMode)
|
||||
req := &http.Request{
|
||||
Method: http.MethodGet,
|
||||
URL: statusUrl,
|
||||
Header: map[string][]string{
|
||||
"Content-Type": {"application/json"},
|
||||
authHeader: {analyzeToken},
|
||||
},
|
||||
}
|
||||
statusResp, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
defer statusResp.Body.Close()
|
||||
|
||||
target := &ModelStatus{}
|
||||
_ = json.NewDecoder(statusResp.Body).Decode(&target)
|
||||
|
||||
return target.LastMajorGeneration > 0
|
||||
}
|
||||
|
||||
func getAuthHeader(guestMode bool) string {
|
||||
if guestMode {
|
||||
return "Guest-Auth"
|
||||
}
|
||||
|
||||
return "Authorization"
|
||||
}
|
||||
|
||||
func GetTrafficDumpUrl(analyzeDestination string, analyzeModel string) *url.URL {
|
||||
strUrl := fmt.Sprintf("https://traffic.%s/dumpTrafficBulk/%s", analyzeDestination, analyzeModel)
|
||||
postUrl, _ := url.Parse(strUrl)
|
||||
return postUrl
|
||||
}
|
||||
|
||||
type AnalyzeInformation struct {
|
||||
IsAnalyzing bool
|
||||
GuestMode bool
|
||||
SentCount int
|
||||
AnalyzedModel string
|
||||
AnalyzeToken string
|
||||
AnalyzeDestination string
|
||||
}
|
||||
|
||||
func (info *AnalyzeInformation) Reset() {
|
||||
info.IsAnalyzing = false
|
||||
info.GuestMode = true
|
||||
info.AnalyzedModel = ""
|
||||
info.AnalyzeToken = ""
|
||||
info.AnalyzeDestination = ""
|
||||
info.SentCount = 0
|
||||
}
|
||||
|
||||
var analyzeInformation = &AnalyzeInformation{}
|
||||
|
||||
func GetAnalyzeInfo() *shared.AnalyzeStatus {
|
||||
return &shared.AnalyzeStatus{
|
||||
IsAnalyzing: analyzeInformation.IsAnalyzing,
|
||||
RemoteUrl: GetRemoteUrl(analyzeInformation.AnalyzeDestination, analyzeInformation.AnalyzedModel, analyzeInformation.AnalyzeToken, analyzeInformation.GuestMode),
|
||||
IsRemoteReady: CheckIfModelReady(analyzeInformation.AnalyzeDestination, analyzeInformation.AnalyzedModel, analyzeInformation.AnalyzeToken, analyzeInformation.GuestMode),
|
||||
SentCount: analyzeInformation.SentCount,
|
||||
}
|
||||
}
|
||||
|
||||
func SyncEntries(syncEntriesConfig *shared.SyncEntriesConfig) error {
|
||||
logger.Log.Infof("Sync entries - started")
|
||||
|
||||
var (
|
||||
token, model string
|
||||
guestMode bool
|
||||
)
|
||||
if syncEntriesConfig.Token == "" {
|
||||
logger.Log.Infof("Sync entries - creating anonymous token. env %s", syncEntriesConfig.Env)
|
||||
guestToken, err := createAnonymousToken(syncEntriesConfig.Env)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed creating anonymous token, err: %v", err)
|
||||
}
|
||||
|
||||
token = guestToken.Token
|
||||
model = guestToken.Model
|
||||
guestMode = true
|
||||
} else {
|
||||
token = fmt.Sprintf("bearer %s", syncEntriesConfig.Token)
|
||||
model = syncEntriesConfig.Workspace
|
||||
guestMode = false
|
||||
|
||||
logger.Log.Infof("Sync entries - upserting model. env %s, model %s", syncEntriesConfig.Env, model)
|
||||
if err := upsertModel(token, model, syncEntriesConfig.Env); err != nil {
|
||||
return fmt.Errorf("failed upserting model, err: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
modelRegex, _ := regexp.Compile("[A-Za-z0-9][-A-Za-z0-9_.]*[A-Za-z0-9]+$")
|
||||
if len(model) > 63 || !modelRegex.MatchString(model) {
|
||||
return fmt.Errorf("invalid model name, model name: %s", model)
|
||||
}
|
||||
|
||||
logger.Log.Infof("Sync entries - syncing. token: %s, model: %s, guest mode: %v", token, model, guestMode)
|
||||
go syncEntriesImpl(token, model, syncEntriesConfig.Env, syncEntriesConfig.UploadIntervalSec, guestMode)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func upsertModel(token string, model string, envPrefix string) error {
|
||||
upsertModelUrl, _ := url.Parse(fmt.Sprintf("https://trcc.%s/models/%s", envPrefix, model))
|
||||
|
||||
authHeader := getAuthHeader(false)
|
||||
req := &http.Request{
|
||||
Method: http.MethodPost,
|
||||
URL: upsertModelUrl,
|
||||
Header: map[string][]string{
|
||||
authHeader: {token},
|
||||
},
|
||||
}
|
||||
|
||||
response, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed request to upsert model, err: %v", err)
|
||||
}
|
||||
|
||||
// In case the model is not created (not 201) and doesn't exists (not 409)
|
||||
if response.StatusCode != 201 && response.StatusCode != 409 {
|
||||
return fmt.Errorf("failed request to upsert model, status code: %v", response.StatusCode)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func createAnonymousToken(envPrefix string) (*GuestToken, error) {
|
||||
tokenUrl := fmt.Sprintf("https://trcc.%s/anonymous/token", envPrefix)
|
||||
if strings.HasPrefix(envPrefix, "http") {
|
||||
tokenUrl = fmt.Sprintf("%s/api/token", envPrefix)
|
||||
}
|
||||
token := &GuestToken{}
|
||||
if err := getGuestToken(tokenUrl, token); err != nil {
|
||||
logger.Log.Infof("Failed to get token, %s", err)
|
||||
return nil, err
|
||||
}
|
||||
return token, nil
|
||||
}
|
||||
|
||||
func getGuestToken(url string, target *GuestToken) error {
|
||||
resp, err := http.Get(url)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
logger.Log.Infof("Got token from the server, starting to json decode... status code: %v", resp.StatusCode)
|
||||
return json.NewDecoder(resp.Body).Decode(target)
|
||||
}
|
||||
|
||||
func syncEntriesImpl(token string, model string, envPrefix string, uploadIntervalSec int, guestMode bool) {
|
||||
analyzeInformation.IsAnalyzing = true
|
||||
analyzeInformation.GuestMode = guestMode
|
||||
analyzeInformation.AnalyzedModel = model
|
||||
analyzeInformation.AnalyzeToken = token
|
||||
analyzeInformation.AnalyzeDestination = envPrefix
|
||||
analyzeInformation.SentCount = 0
|
||||
|
||||
// "http or grpc" filter indicates that we're only interested in HTTP and gRPC entries
|
||||
query := "http or grpc"
|
||||
|
||||
logger.Log.Infof("Getting entries from the database")
|
||||
|
||||
BasenineReconnect:
|
||||
var connection *basenine.Connection
|
||||
var err error
|
||||
connection, err = basenine.NewConnection(shared.BasenineHost, shared.BaseninePort)
|
||||
if err != nil {
|
||||
logger.Log.Errorf("Can't establish a new connection to Basenine server: %v", err)
|
||||
connection.Close()
|
||||
time.Sleep(shared.BasenineReconnectInterval * time.Second)
|
||||
goto BasenineReconnect
|
||||
}
|
||||
|
||||
data := make(chan []byte)
|
||||
meta := make(chan []byte)
|
||||
|
||||
defer func() {
|
||||
data <- []byte(basenine.CloseChannel)
|
||||
meta <- []byte(basenine.CloseChannel)
|
||||
connection.Close()
|
||||
}()
|
||||
|
||||
lastTimeSynced := time.Time{}
|
||||
|
||||
batch := make([]har.Entry, 0)
|
||||
|
||||
handleDataChannel := func(wg *sync.WaitGroup, connection *basenine.Connection, data chan []byte) {
|
||||
defer wg.Done()
|
||||
for {
|
||||
dataBytes := <-data
|
||||
|
||||
if string(dataBytes) == basenine.CloseChannel {
|
||||
return
|
||||
}
|
||||
|
||||
var dataMap map[string]interface{}
|
||||
err = json.Unmarshal(dataBytes, &dataMap)
|
||||
|
||||
var entry tapApi.Entry
|
||||
if err := json.Unmarshal([]byte(dataBytes), &entry); err != nil {
|
||||
continue
|
||||
}
|
||||
harEntry, err := har.NewEntry(entry.Request, entry.Response, entry.StartTime, entry.ElapsedTime)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
if entry.Source.Name != "" {
|
||||
harEntry.Request.Headers = append(harEntry.Request.Headers, har.Header{Name: "x-mizu-source", Value: entry.Source.Name})
|
||||
}
|
||||
if entry.Destination.Name != "" {
|
||||
harEntry.Request.Headers = append(harEntry.Request.Headers, har.Header{Name: "x-mizu-destination", Value: entry.Destination.Name})
|
||||
harEntry.Request.URL = utils.SetHostname(harEntry.Request.URL, entry.Destination.Name)
|
||||
}
|
||||
|
||||
batch = append(batch, *harEntry)
|
||||
|
||||
now := time.Now()
|
||||
if lastTimeSynced.Add(time.Duration(uploadIntervalSec) * time.Second).After(now) {
|
||||
continue
|
||||
}
|
||||
lastTimeSynced = now
|
||||
|
||||
body, jMarshalErr := json.Marshal(batch)
|
||||
batchSize := len(batch)
|
||||
if jMarshalErr != nil {
|
||||
analyzeInformation.Reset()
|
||||
logger.Log.Infof("Stopping sync entries")
|
||||
logger.Log.Fatal(jMarshalErr)
|
||||
}
|
||||
batch = make([]har.Entry, 0)
|
||||
|
||||
var in bytes.Buffer
|
||||
w := zlib.NewWriter(&in)
|
||||
_, _ = w.Write(body)
|
||||
_ = w.Close()
|
||||
reqBody := ioutil.NopCloser(bytes.NewReader(in.Bytes()))
|
||||
|
||||
authHeader := getAuthHeader(guestMode)
|
||||
req := &http.Request{
|
||||
Method: http.MethodPost,
|
||||
URL: GetTrafficDumpUrl(envPrefix, model),
|
||||
Header: map[string][]string{
|
||||
"Content-Encoding": {"deflate"},
|
||||
"Content-Type": {"application/octet-stream"},
|
||||
authHeader: {token},
|
||||
},
|
||||
Body: reqBody,
|
||||
}
|
||||
|
||||
if _, postErr := http.DefaultClient.Do(req); postErr != nil {
|
||||
analyzeInformation.Reset()
|
||||
logger.Log.Info("Stopping sync entries")
|
||||
logger.Log.Fatal(postErr)
|
||||
}
|
||||
analyzeInformation.SentCount += batchSize
|
||||
|
||||
if analyzeInformation.SentCount%SentCountLogInterval == 0 {
|
||||
logger.Log.Infof("Uploaded %v entries until now", analyzeInformation.SentCount)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
handleMetaChannel := func(wg *sync.WaitGroup, connection *basenine.Connection, meta chan []byte) {
|
||||
defer wg.Done()
|
||||
for {
|
||||
metaBytes := <-meta
|
||||
|
||||
if string(metaBytes) == basenine.CloseChannel {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var wg sync.WaitGroup
|
||||
go handleDataChannel(&wg, connection, data)
|
||||
go handleMetaChannel(&wg, connection, meta)
|
||||
wg.Add(2)
|
||||
|
||||
if err = connection.Query("latest", query, data, meta); err != nil {
|
||||
logger.Log.Errorf("Query mode call failed: %v", err)
|
||||
connection.Close()
|
||||
time.Sleep(shared.BasenineReconnectInterval * time.Second)
|
||||
goto BasenineReconnect
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
func UpdateAnalyzeStatus(callback func(data []byte)) {
|
||||
for {
|
||||
if !analyzeInformation.IsAnalyzing {
|
||||
time.Sleep(AnalyzeCheckSleepTime)
|
||||
continue
|
||||
}
|
||||
analyzeStatus := GetAnalyzeInfo()
|
||||
socketMessage := shared.CreateWebSocketMessageTypeAnalyzeStatus(*analyzeStatus)
|
||||
|
||||
jsonMessage, _ := json.Marshal(socketMessage)
|
||||
callback(jsonMessage)
|
||||
time.Sleep(AnalyzeCheckSleepTime)
|
||||
}
|
||||
}
|
||||
@@ -1,147 +0,0 @@
|
||||
package auth
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/up9inc/mizu/cli/config"
|
||||
"github.com/up9inc/mizu/cli/config/configStructs"
|
||||
"github.com/up9inc/mizu/cli/uiUtils"
|
||||
"github.com/up9inc/mizu/logger"
|
||||
"golang.org/x/oauth2"
|
||||
)
|
||||
|
||||
const loginTimeoutInMin = 2
|
||||
|
||||
// Ports are configured in keycloak "cli" client as valid redirect URIs. A change here must be reflected there as well.
|
||||
var listenPorts = []int{3141, 4001, 5002, 6003, 7004, 8005, 9006, 10007}
|
||||
|
||||
func Login() error {
|
||||
token, loginErr := loginInteractively()
|
||||
if loginErr != nil {
|
||||
return fmt.Errorf("failed login interactively, err: %v", loginErr)
|
||||
}
|
||||
|
||||
authConfig := configStructs.AuthConfig{
|
||||
EnvName: config.Config.Auth.EnvName,
|
||||
Token: token.AccessToken,
|
||||
}
|
||||
|
||||
if err := config.UpdateConfig(func(configStruct *config.ConfigStruct) { configStruct.Auth = authConfig }); err != nil {
|
||||
return fmt.Errorf("failed updating config with auth, err: %v", err)
|
||||
}
|
||||
|
||||
config.Config.Auth = authConfig
|
||||
|
||||
logger.Log.Infof("Login successfully, token stored in config path: %s", fmt.Sprintf(uiUtils.Purple, config.Config.ConfigFilePath))
|
||||
return nil
|
||||
}
|
||||
|
||||
func loginInteractively() (*oauth2.Token, error) {
|
||||
tokenChannel := make(chan *oauth2.Token)
|
||||
errorChannel := make(chan error)
|
||||
|
||||
server := http.Server{}
|
||||
go startLoginServer(tokenChannel, errorChannel, &server)
|
||||
|
||||
defer func() {
|
||||
if err := server.Shutdown(context.Background()); err != nil {
|
||||
logger.Log.Debugf("Error shutting down server, err: %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
select {
|
||||
case <-time.After(loginTimeoutInMin * time.Minute):
|
||||
return nil, errors.New("auth timed out")
|
||||
case err := <-errorChannel:
|
||||
return nil, err
|
||||
case token := <-tokenChannel:
|
||||
return token, nil
|
||||
}
|
||||
}
|
||||
|
||||
func startLoginServer(tokenChannel chan *oauth2.Token, errorChannel chan error, server *http.Server) {
|
||||
for _, port := range listenPorts {
|
||||
var authConfig = &oauth2.Config{
|
||||
ClientID: "cli",
|
||||
RedirectURL: fmt.Sprintf("http://localhost:%v/callback", port),
|
||||
Endpoint: oauth2.Endpoint{
|
||||
AuthURL: fmt.Sprintf("https://auth.%s/auth/realms/testr/protocol/openid-connect/auth", config.Config.Auth.EnvName),
|
||||
TokenURL: fmt.Sprintf("https://auth.%s/auth/realms/testr/protocol/openid-connect/token", config.Config.Auth.EnvName),
|
||||
},
|
||||
}
|
||||
|
||||
state := uuid.New()
|
||||
|
||||
mux := http.NewServeMux()
|
||||
server.Handler = mux
|
||||
mux.Handle("/callback", loginCallbackHandler(tokenChannel, errorChannel, authConfig, state))
|
||||
|
||||
listener, listenErr := net.Listen("tcp", fmt.Sprintf("%s:%d", "127.0.0.1", port))
|
||||
if listenErr != nil {
|
||||
logger.Log.Debugf("failed to start listening on port %v, err: %v", port, listenErr)
|
||||
continue
|
||||
}
|
||||
|
||||
authorizationUrl := authConfig.AuthCodeURL(state.String())
|
||||
uiUtils.OpenBrowser(authorizationUrl)
|
||||
|
||||
serveErr := server.Serve(listener)
|
||||
if serveErr == http.ErrServerClosed {
|
||||
logger.Log.Debugf("received server shutdown, server on port %v is closed", port)
|
||||
return
|
||||
} else if serveErr != nil {
|
||||
logger.Log.Debugf("failed to start serving on port %v, err: %v", port, serveErr)
|
||||
continue
|
||||
}
|
||||
|
||||
logger.Log.Debugf("didn't receive server closed on port %v", port)
|
||||
return
|
||||
}
|
||||
|
||||
errorChannel <- fmt.Errorf("failed to start serving on all listen ports, ports: %v", listenPorts)
|
||||
}
|
||||
|
||||
func loginCallbackHandler(tokenChannel chan *oauth2.Token, errorChannel chan error, authConfig *oauth2.Config, state uuid.UUID) http.Handler {
|
||||
return http.HandlerFunc(func(writer http.ResponseWriter, request *http.Request) {
|
||||
if err := request.ParseForm(); err != nil {
|
||||
errorMsg := fmt.Sprintf("failed to parse form, err: %v", err)
|
||||
http.Error(writer, errorMsg, http.StatusBadRequest)
|
||||
errorChannel <- fmt.Errorf(errorMsg)
|
||||
return
|
||||
}
|
||||
|
||||
requestState := request.Form.Get("state")
|
||||
if requestState != state.String() {
|
||||
errorMsg := fmt.Sprintf("state invalid, requestState: %v, authState:%v", requestState, state.String())
|
||||
http.Error(writer, errorMsg, http.StatusBadRequest)
|
||||
errorChannel <- fmt.Errorf(errorMsg)
|
||||
return
|
||||
}
|
||||
|
||||
code := request.Form.Get("code")
|
||||
if code == "" {
|
||||
errorMsg := "code not found"
|
||||
http.Error(writer, errorMsg, http.StatusBadRequest)
|
||||
errorChannel <- fmt.Errorf(errorMsg)
|
||||
return
|
||||
}
|
||||
|
||||
token, err := authConfig.Exchange(context.Background(), code)
|
||||
if err != nil {
|
||||
errorMsg := fmt.Sprintf("failed to create token, err: %v", err)
|
||||
http.Error(writer, errorMsg, http.StatusInternalServerError)
|
||||
errorChannel <- fmt.Errorf(errorMsg)
|
||||
return
|
||||
}
|
||||
|
||||
tokenChannel <- token
|
||||
|
||||
http.Redirect(writer, request, fmt.Sprintf("https://%s/CliLogin", config.Config.Auth.EnvName), http.StatusFound)
|
||||
})
|
||||
}
|
||||
@@ -1,8 +1,11 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"github.com/creasty/defaults"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/up9inc/mizu/cli/config/configStructs"
|
||||
"github.com/up9inc/mizu/cli/telemetry"
|
||||
"github.com/up9inc/mizu/logger"
|
||||
)
|
||||
|
||||
var installCmd = &cobra.Command{
|
||||
@@ -17,4 +20,11 @@ var installCmd = &cobra.Command{
|
||||
|
||||
func init() {
|
||||
rootCmd.AddCommand(installCmd)
|
||||
|
||||
defaultInstallConfig := configStructs.InstallConfig{}
|
||||
if err := defaults.Set(&defaultInstallConfig); err != nil {
|
||||
logger.Log.Debug(err)
|
||||
}
|
||||
|
||||
installCmd.Flags().BoolP(configStructs.OutInstallName, "o", defaultInstallConfig.Out, "print (to stdout) Kubernetes manifest used to install Mizu Pro edition")
|
||||
}
|
||||
|
||||
@@ -2,6 +2,7 @@ package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/up9inc/mizu/cli/bucket"
|
||||
"github.com/up9inc/mizu/cli/config"
|
||||
@@ -9,12 +10,23 @@ import (
|
||||
)
|
||||
|
||||
func runMizuInstall() {
|
||||
bucketProvider := bucket.NewProvider(config.Config.Install.TemplateUrl, bucket.DefaultTimeout)
|
||||
installTemplate, err := bucketProvider.GetInstallTemplate(config.Config.Install.TemplateName)
|
||||
if err != nil {
|
||||
logger.Log.Errorf("Failed getting install template, err: %v", err)
|
||||
if config.Config.Install.Out {
|
||||
bucketProvider := bucket.NewProvider(config.Config.Install.TemplateUrl, bucket.DefaultTimeout)
|
||||
installTemplate, err := bucketProvider.GetInstallTemplate(config.Config.Install.TemplateName)
|
||||
if err != nil {
|
||||
logger.Log.Errorf("Failed getting install template, err: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
fmt.Print(installTemplate)
|
||||
return
|
||||
}
|
||||
|
||||
fmt.Print(installTemplate)
|
||||
var sb strings.Builder
|
||||
sb.WriteString("Hello! This command can be used to install Mizu Pro edition on your Kubernetes cluster.")
|
||||
sb.WriteString("\nPlease run:")
|
||||
sb.WriteString("\n\tmizu install -o | kubectl apply -n mizu -f -")
|
||||
sb.WriteString("\n\nor use helm chart as described in https://getmizu.io/docs/installing-mizu/centralized-installation\n")
|
||||
|
||||
fmt.Print(sb.String())
|
||||
}
|
||||
|
||||
@@ -2,24 +2,15 @@ package cmd
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/up9inc/mizu/cli/up9"
|
||||
|
||||
"github.com/creasty/defaults"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/up9inc/mizu/cli/auth"
|
||||
"github.com/up9inc/mizu/cli/config"
|
||||
"github.com/up9inc/mizu/cli/config/configStructs"
|
||||
"github.com/up9inc/mizu/cli/errormessage"
|
||||
"github.com/up9inc/mizu/cli/uiUtils"
|
||||
"github.com/up9inc/mizu/logger"
|
||||
"github.com/up9inc/mizu/shared"
|
||||
)
|
||||
|
||||
const uploadTrafficMessageToConfirm = `NOTE: running mizu with --%s flag will upload recorded traffic for further analysis and enriched presentation options.`
|
||||
|
||||
var tapCmd = &cobra.Command{
|
||||
Use: "tap [POD REGEX]",
|
||||
Short: "Record ingoing traffic of a kubernetes pod",
|
||||
@@ -40,67 +31,12 @@ Supported protocols are HTTP and gRPC.`,
|
||||
return errormessage.FormatError(err)
|
||||
}
|
||||
|
||||
if config.Config.Tap.Workspace != "" {
|
||||
askConfirmation(configStructs.WorkspaceTapName)
|
||||
|
||||
if config.Config.Auth.Token == "" {
|
||||
logger.Log.Infof("This action requires authentication, please log in to continue")
|
||||
if err := auth.Login(); err != nil {
|
||||
logger.Log.Errorf("failed to log in, err: %v", err)
|
||||
return nil
|
||||
}
|
||||
} else {
|
||||
tokenExpired, err := shared.IsTokenExpired(config.Config.Auth.Token)
|
||||
if err != nil {
|
||||
logger.Log.Errorf("failed to check if token is expired, err: %v", err)
|
||||
return nil
|
||||
}
|
||||
|
||||
if tokenExpired {
|
||||
logger.Log.Infof("Token expired, please log in again to continue")
|
||||
if err := auth.Login(); err != nil {
|
||||
logger.Log.Errorf("failed to log in, err: %v", err)
|
||||
return nil
|
||||
}
|
||||
} else if isValidToken := up9.IsTokenValid(config.Config.Auth.Token, config.Config.Auth.EnvName); !isValidToken {
|
||||
logger.Log.Errorf("Token is not valid, please log in again to continue")
|
||||
if err := auth.Login(); err != nil {
|
||||
logger.Log.Errorf("failed to log in, err: %v", err)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if config.Config.Tap.Analysis {
|
||||
askConfirmation(configStructs.AnalysisTapName)
|
||||
|
||||
config.Config.Auth.Token = ""
|
||||
}
|
||||
|
||||
logger.Log.Infof("Mizu will store up to %s of traffic, old traffic will be cleared once the limit is reached.", config.Config.Tap.HumanMaxEntriesDBSize)
|
||||
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
func askConfirmation(flagName string) {
|
||||
logger.Log.Infof(fmt.Sprintf(uploadTrafficMessageToConfirm, flagName))
|
||||
|
||||
if !config.Config.Tap.AskUploadConfirmation {
|
||||
return
|
||||
}
|
||||
|
||||
if !uiUtils.AskForConfirmation("Would you like to proceed [Y/n]: ") {
|
||||
logger.Log.Infof("You can always run mizu without %s, aborting", flagName)
|
||||
os.Exit(0)
|
||||
}
|
||||
|
||||
if err := config.UpdateConfig(func(configStruct *config.ConfigStruct) { configStruct.Tap.AskUploadConfirmation = false }); err != nil {
|
||||
logger.Log.Debugf("failed updating config with upload confirmation, err: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func init() {
|
||||
rootCmd.AddCommand(tapCmd)
|
||||
|
||||
@@ -111,14 +47,12 @@ func init() {
|
||||
|
||||
tapCmd.Flags().Uint16P(configStructs.GuiPortTapName, "p", defaultTapConfig.GuiPort, "Provide a custom port for the web interface webserver")
|
||||
tapCmd.Flags().StringSliceP(configStructs.NamespacesTapName, "n", defaultTapConfig.Namespaces, "Namespaces selector")
|
||||
tapCmd.Flags().Bool(configStructs.AnalysisTapName, defaultTapConfig.Analysis, "Uploads traffic to UP9 for further analysis (Beta)")
|
||||
tapCmd.Flags().BoolP(configStructs.AllNamespacesTapName, "A", defaultTapConfig.AllNamespaces, "Tap all namespaces")
|
||||
tapCmd.Flags().StringSliceP(configStructs.PlainTextFilterRegexesTapName, "r", defaultTapConfig.PlainTextFilterRegexes, "List of regex expressions that are used to filter matching values from text/plain http bodies")
|
||||
tapCmd.Flags().Bool(configStructs.EnableRedactionTapName, defaultTapConfig.EnableRedaction, "Enables redaction of potentially sensitive request/response headers and body values")
|
||||
tapCmd.Flags().String(configStructs.HumanMaxEntriesDBSizeTapName, defaultTapConfig.HumanMaxEntriesDBSize, "Override the default max entries db size")
|
||||
tapCmd.Flags().String(configStructs.InsertionFilterName, defaultTapConfig.InsertionFilter, "Set the insertion filter. Accepts string or a file path.")
|
||||
tapCmd.Flags().Bool(configStructs.DryRunTapName, defaultTapConfig.DryRun, "Preview of all pods matching the regex, without tapping them")
|
||||
tapCmd.Flags().StringP(configStructs.WorkspaceTapName, "w", defaultTapConfig.Workspace, "Uploads traffic to your UP9 workspace for further analysis (requires auth)")
|
||||
tapCmd.Flags().String(configStructs.EnforcePolicyFile, defaultTapConfig.EnforcePolicyFile, "Yaml file path with policy rules")
|
||||
tapCmd.Flags().String(configStructs.ContractFile, defaultTapConfig.ContractFile, "OAS/Swagger file to validate to monitor the contracts")
|
||||
tapCmd.Flags().Bool(configStructs.ServiceMeshName, defaultTapConfig.ServiceMesh, "Record decrypted traffic if the cluster is configured with a service mesh and with mtls")
|
||||
|
||||
@@ -124,7 +124,7 @@ func RunMizuTap() {
|
||||
}
|
||||
|
||||
logger.Log.Infof("Waiting for Mizu Agent to start...")
|
||||
if state.mizuServiceAccountExists, err = resources.CreateTapMizuResources(ctx, kubernetesProvider, serializedValidationRules, serializedContract, serializedMizuConfig, config.Config.IsNsRestrictedMode(), config.Config.MizuResourcesNamespace, config.Config.AgentImage, getSyncEntriesConfig(), config.Config.Tap.MaxEntriesDBSizeBytes(), config.Config.Tap.ApiServerResources, config.Config.ImagePullPolicy(), config.Config.LogLevel(), config.Config.Tap.Profiler); err != nil {
|
||||
if state.mizuServiceAccountExists, err = resources.CreateTapMizuResources(ctx, kubernetesProvider, serializedValidationRules, serializedContract, serializedMizuConfig, config.Config.IsNsRestrictedMode(), config.Config.MizuResourcesNamespace, config.Config.AgentImage, config.Config.Tap.MaxEntriesDBSizeBytes(), config.Config.Tap.ApiServerResources, config.Config.ImagePullPolicy(), config.Config.LogLevel(), config.Config.Tap.Profiler); err != nil {
|
||||
var statusError *k8serrors.StatusError
|
||||
if errors.As(err, &statusError) && (statusError.ErrStatus.Reason == metav1.StatusReasonAlreadyExists) {
|
||||
logger.Log.Info("Mizu is already running in this namespace, change the `mizu-resources-namespace` configuration or run `mizu clean` to remove the currently running Mizu instance")
|
||||
@@ -295,19 +295,6 @@ func getMizuApiFilteringOptions() (*api.TrafficFilteringOptions, error) {
|
||||
}, nil
|
||||
}
|
||||
|
||||
func getSyncEntriesConfig() *shared.SyncEntriesConfig {
|
||||
if !config.Config.Tap.Analysis && config.Config.Tap.Workspace == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
return &shared.SyncEntriesConfig{
|
||||
Token: config.Config.Auth.Token,
|
||||
Env: config.Config.Auth.EnvName,
|
||||
Workspace: config.Config.Tap.Workspace,
|
||||
UploadIntervalSec: config.Config.Tap.UploadIntervalSec,
|
||||
}
|
||||
}
|
||||
|
||||
func watchApiServerPod(ctx context.Context, kubernetesProvider *kubernetes.Provider, cancel context.CancelFunc) {
|
||||
podExactRegex := regexp.MustCompile(fmt.Sprintf("^%s$", kubernetes.ApiServerPodName))
|
||||
podWatchHelper := kubernetes.NewPodWatchHelper(kubernetesProvider, podExactRegex)
|
||||
|
||||
@@ -85,27 +85,6 @@ func WriteConfig(config *ConfigStruct) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
type updateConfigStruct func(*ConfigStruct)
|
||||
|
||||
func UpdateConfig(updateConfigStruct updateConfigStruct) error {
|
||||
configFile, err := GetConfigWithDefaults()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed getting config with defaults, err: %v", err)
|
||||
}
|
||||
|
||||
if err := loadConfigFile(Config.ConfigFilePath, configFile); err != nil && !os.IsNotExist(err) {
|
||||
return fmt.Errorf("failed getting config file, err: %v", err)
|
||||
}
|
||||
|
||||
updateConfigStruct(configFile)
|
||||
|
||||
if err := WriteConfig(configFile); err != nil {
|
||||
return fmt.Errorf("failed writing config, err: %v", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func loadConfigFile(configFilePath string, config *ConfigStruct) error {
|
||||
reader, openErr := os.Open(configFilePath)
|
||||
if openErr != nil {
|
||||
|
||||
@@ -27,7 +27,6 @@ type ConfigStruct struct {
|
||||
Version configStructs.VersionConfig `yaml:"version"`
|
||||
View configStructs.ViewConfig `yaml:"view"`
|
||||
Logs configStructs.LogsConfig `yaml:"logs"`
|
||||
Auth configStructs.AuthConfig `yaml:"auth"`
|
||||
Config configStructs.ConfigConfig `yaml:"config,omitempty"`
|
||||
AgentImage string `yaml:"agent-image,omitempty" readonly:""`
|
||||
ImagePullPolicyStr string `yaml:"image-pull-policy" default:"Always"`
|
||||
@@ -40,7 +39,7 @@ type ConfigStruct struct {
|
||||
HeadlessMode bool `yaml:"headless" default:"false"`
|
||||
LogLevelStr string `yaml:"log-level,omitempty" default:"INFO" readonly:""`
|
||||
ServiceMap bool `yaml:"service-map" default:"true"`
|
||||
OAS bool `yaml:"oas" default:"true"`
|
||||
OAS shared.OASConfig `yaml:"oas"`
|
||||
}
|
||||
|
||||
func (config *ConfigStruct) validate() error {
|
||||
|
||||
@@ -1,6 +0,0 @@
|
||||
package configStructs
|
||||
|
||||
type AuthConfig struct {
|
||||
EnvName string `yaml:"env-name" default:"up9.app"`
|
||||
Token string `yaml:"token"`
|
||||
}
|
||||
@@ -1,6 +1,11 @@
|
||||
package configStructs
|
||||
|
||||
const (
|
||||
OutInstallName = "out"
|
||||
)
|
||||
|
||||
type InstallConfig struct {
|
||||
TemplateUrl string `yaml:"template-url" default:"https://storage.googleapis.com/static.up9.io/mizu/helm-template"`
|
||||
TemplateName string `yaml:"template-name" default:"helm-template.yaml"`
|
||||
Out bool `yaml:"out"`
|
||||
}
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package configStructs
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/fs"
|
||||
"io/ioutil"
|
||||
@@ -18,14 +17,12 @@ import (
|
||||
const (
|
||||
GuiPortTapName = "gui-port"
|
||||
NamespacesTapName = "namespaces"
|
||||
AnalysisTapName = "analysis"
|
||||
AllNamespacesTapName = "all-namespaces"
|
||||
PlainTextFilterRegexesTapName = "regex-masking"
|
||||
EnableRedactionTapName = "redact"
|
||||
HumanMaxEntriesDBSizeTapName = "max-entries-db-size"
|
||||
InsertionFilterName = "insertion-filter"
|
||||
DryRunTapName = "dry-run"
|
||||
WorkspaceTapName = "workspace"
|
||||
EnforcePolicyFile = "traffic-validation-file"
|
||||
ContractFile = "contract"
|
||||
ServiceMeshName = "service-mesh"
|
||||
@@ -34,12 +31,10 @@ const (
|
||||
)
|
||||
|
||||
type TapConfig struct {
|
||||
UploadIntervalSec int `yaml:"upload-interval" default:"10"`
|
||||
PodRegexStr string `yaml:"regex" default:".*"`
|
||||
GuiPort uint16 `yaml:"gui-port" default:"8899"`
|
||||
ProxyHost string `yaml:"proxy-host" default:"127.0.0.1"`
|
||||
Namespaces []string `yaml:"namespaces"`
|
||||
Analysis bool `yaml:"analysis" default:"false"`
|
||||
AllNamespaces bool `yaml:"all-namespaces" default:"false"`
|
||||
PlainTextFilterRegexes []string `yaml:"regex-masking"`
|
||||
IgnoredUserAgents []string `yaml:"ignored-user-agents"`
|
||||
@@ -47,10 +42,8 @@ type TapConfig struct {
|
||||
HumanMaxEntriesDBSize string `yaml:"max-entries-db-size" default:"200MB"`
|
||||
InsertionFilter string `yaml:"insertion-filter" default:""`
|
||||
DryRun bool `yaml:"dry-run" default:"false"`
|
||||
Workspace string `yaml:"workspace"`
|
||||
EnforcePolicyFile string `yaml:"traffic-validation-file"`
|
||||
ContractFile string `yaml:"contract"`
|
||||
AskUploadConfirmation bool `yaml:"ask-upload-confirmation" default:"true"`
|
||||
ApiServerResources shared.Resources `yaml:"api-server-resources"`
|
||||
TapperResources shared.Resources `yaml:"tapper-resources"`
|
||||
ServiceMesh bool `yaml:"service-mesh" default:"false"`
|
||||
@@ -94,16 +87,5 @@ func (config *TapConfig) Validate() error {
|
||||
return fmt.Errorf("Could not parse --%s value %s", HumanMaxEntriesDBSizeTapName, config.HumanMaxEntriesDBSize)
|
||||
}
|
||||
|
||||
if config.Workspace != "" {
|
||||
workspaceRegex, _ := regexp.Compile("[A-Za-z0-9][-A-Za-z0-9_.]*[A-Za-z0-9]+$")
|
||||
if len(config.Workspace) > 63 || !workspaceRegex.MatchString(config.Workspace) {
|
||||
return errors.New("invalid workspace name")
|
||||
}
|
||||
}
|
||||
|
||||
if config.Analysis && config.Workspace != "" {
|
||||
return fmt.Errorf("Can't run with both --%s and --%s flags", AnalysisTapName, WorkspaceTapName)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -12,7 +12,6 @@ var (
|
||||
BuildTimestamp = "" // this var is overridden using ldflags in makefile when building
|
||||
RBACVersion = "v1"
|
||||
Platform = ""
|
||||
InstallModePersistentVolumeSizeBufferBytes = int64(500 * 1000 * 1000) //500mb
|
||||
)
|
||||
|
||||
const DEVENVVAR = "MIZU_DISABLE_TELEMTRY"
|
||||
|
||||
@@ -14,7 +14,7 @@ import (
|
||||
core "k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
func CreateTapMizuResources(ctx context.Context, kubernetesProvider *kubernetes.Provider, serializedValidationRules string, serializedContract string, serializedMizuConfig string, isNsRestrictedMode bool, mizuResourcesNamespace string, agentImage string, syncEntriesConfig *shared.SyncEntriesConfig, maxEntriesDBSizeBytes int64, apiServerResources shared.Resources, imagePullPolicy core.PullPolicy, logLevel logging.Level, profiler bool) (bool, error) {
|
||||
func CreateTapMizuResources(ctx context.Context, kubernetesProvider *kubernetes.Provider, serializedValidationRules string, serializedContract string, serializedMizuConfig string, isNsRestrictedMode bool, mizuResourcesNamespace string, agentImage string, maxEntriesDBSizeBytes int64, apiServerResources shared.Resources, imagePullPolicy core.PullPolicy, logLevel logging.Level, profiler bool) (bool, error) {
|
||||
if !isNsRestrictedMode {
|
||||
if err := createMizuNamespace(ctx, kubernetesProvider, mizuResourcesNamespace); err != nil {
|
||||
return false, err
|
||||
@@ -45,7 +45,6 @@ func CreateTapMizuResources(ctx context.Context, kubernetesProvider *kubernetes.
|
||||
KetoImage: "",
|
||||
ServiceAccountName: serviceAccountName,
|
||||
IsNamespaceRestricted: isNsRestrictedMode,
|
||||
SyncEntriesConfig: syncEntriesConfig,
|
||||
MaxEntriesDBSizeBytes: maxEntriesDBSizeBytes,
|
||||
Resources: apiServerResources,
|
||||
ImagePullPolicy: imagePullPolicy,
|
||||
|
||||
@@ -1,26 +0,0 @@
|
||||
package uiUtils
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/up9inc/mizu/logger"
|
||||
)
|
||||
|
||||
func AskForConfirmation(s string) bool {
|
||||
reader := bufio.NewReader(os.Stdin)
|
||||
|
||||
fmt.Printf(Magenta, s)
|
||||
|
||||
response, err := reader.ReadString('\n')
|
||||
if err != nil {
|
||||
logger.Log.Fatalf("Error while reading confirmation string, err: %v", err)
|
||||
}
|
||||
response = strings.ToLower(strings.TrimSpace(response))
|
||||
if response == "" || response == "y" || response == "yes" {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
@@ -1,27 +0,0 @@
|
||||
package up9
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
)
|
||||
|
||||
func IsTokenValid(tokenString string, envName string) bool {
|
||||
whoAmIUrl, _ := url.Parse(fmt.Sprintf("https://trcc.%s/admin/whoami", envName))
|
||||
|
||||
req := &http.Request{
|
||||
Method: http.MethodGet,
|
||||
URL: whoAmIUrl,
|
||||
Header: map[string][]string{
|
||||
"Authorization": {fmt.Sprintf("bearer %s", tokenString)},
|
||||
},
|
||||
}
|
||||
|
||||
response, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
defer response.Body.Close()
|
||||
|
||||
return response.StatusCode == http.StatusOK
|
||||
}
|
||||
12
devops/install-capstone.sh
Executable file
12
devops/install-capstone.sh
Executable file
@@ -0,0 +1,12 @@
|
||||
#!/bin/bash
|
||||
|
||||
SUDO=''
|
||||
if (( $EUID != 0 )); then
|
||||
SUDO='sudo'
|
||||
fi
|
||||
|
||||
curl https://github.com/capstone-engine/capstone/archive/4.0.2.tar.gz -Lo ./capstone.tar.gz \
|
||||
&& tar -xzf capstone.tar.gz && mv ./capstone-* ./capstone \
|
||||
&& cd capstone \
|
||||
&& CAPSTONE_ARCHS="aarch64 x86" ./make.sh \
|
||||
&& $SUDO ./make.sh install
|
||||
@@ -17,4 +17,11 @@ RUN curl https://www.tcpdump.org/release/libpcap-1.10.1.tar.gz -Lo ./libpcap.tar
|
||||
WORKDIR /work/libpcap
|
||||
RUN ./configure --host=arm && make \
|
||||
&& cp /work/libpcap/libpcap.a /usr/xcc/aarch64-linux-musl-cross/lib/gcc/aarch64-linux-musl/*/
|
||||
WORKDIR /work
|
||||
|
||||
# Build and install Capstone from source
|
||||
RUN curl https://github.com/capstone-engine/capstone/archive/4.0.2.tar.gz -Lo ./capstone.tar.gz \
|
||||
&& tar -xzf capstone.tar.gz && mv ./capstone-* ./capstone
|
||||
WORKDIR /work/capstone
|
||||
RUN CAPSTONE_ARCHS="aarch64" CAPSTONE_STATIC=yes ./make.sh \
|
||||
&& cp /work/capstone/libcapstone.a /usr/xcc/aarch64-linux-musl-cross/lib/gcc/aarch64-linux-musl/*/
|
||||
4
devops/linux-arm64-musl-go-libpcap-capstone/build-push.sh
Executable file
4
devops/linux-arm64-musl-go-libpcap-capstone/build-push.sh
Executable file
@@ -0,0 +1,4 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
docker build . -t up9inc/linux-arm64-musl-go-libpcap-capstone && docker push up9inc/linux-arm64-musl-go-libpcap-capstone
|
||||
@@ -1,4 +0,0 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
docker build . -t up9inc/linux-arm64-musl-go-libpcap && docker push up9inc/linux-arm64-musl-go-libpcap
|
||||
@@ -29,3 +29,11 @@ RUN curl https://www.tcpdump.org/release/libpcap-1.10.1.tar.gz -Lo ./libpcap.tar
|
||||
WORKDIR /libpcap
|
||||
RUN ./configure --host=x86_64 && make \
|
||||
&& cp /libpcap/libpcap.a /usr/local/musl/lib/gcc/x86_64-unknown-linux-musl/*/
|
||||
WORKDIR /
|
||||
|
||||
# Build and install Capstone from source
|
||||
RUN curl https://github.com/capstone-engine/capstone/archive/4.0.2.tar.gz -Lo ./capstone.tar.gz \
|
||||
&& tar -xzf capstone.tar.gz && mv ./capstone-* ./capstone
|
||||
WORKDIR /capstone
|
||||
RUN ./make.sh \
|
||||
&& cp /capstone/libcapstone.a /usr/local/musl/lib/gcc/x86_64-unknown-linux-musl/*/
|
||||
4
devops/linux-x86_64-musl-go-libpcap-capstone/build-push.sh
Executable file
4
devops/linux-x86_64-musl-go-libpcap-capstone/build-push.sh
Executable file
@@ -0,0 +1,4 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
docker build . -t up9inc/linux-x86_64-musl-go-libpcap-capstone && docker push up9inc/linux-x86_64-musl-go-libpcap-capstone
|
||||
@@ -1,4 +0,0 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
docker build . -t up9inc/linux-x86_64-musl-go-libpcap && docker push up9inc/linux-x86_64-musl-go-libpcap
|
||||
@@ -2,7 +2,6 @@ package shared
|
||||
|
||||
const (
|
||||
MizuFilteringOptionsEnvVar = "SENSITIVE_DATA_FILTERING_OPTIONS"
|
||||
SyncEntriesConfigEnvVar = "SYNC_ENTRIES_CONFIG"
|
||||
HostModeEnvVar = "HOST_MODE"
|
||||
NodeNameEnvVar = "NODE_NAME"
|
||||
ConfigDirPath = "/app/config/"
|
||||
|
||||
@@ -32,7 +32,6 @@ import (
|
||||
"k8s.io/client-go/kubernetes"
|
||||
_ "k8s.io/client-go/plugin/pkg/client/auth"
|
||||
"k8s.io/client-go/rest"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
watchtools "k8s.io/client-go/tools/watch"
|
||||
@@ -41,7 +40,7 @@ import (
|
||||
type Provider struct {
|
||||
clientSet *kubernetes.Clientset
|
||||
kubernetesConfig clientcmd.ClientConfig
|
||||
clientConfig restclient.Config
|
||||
clientConfig rest.Config
|
||||
managedBy string
|
||||
createdBy string
|
||||
}
|
||||
@@ -88,6 +87,7 @@ func NewProvider(kubeConfigPath string, contextName string) (*Provider, error) {
|
||||
}, nil
|
||||
}
|
||||
|
||||
//NewProviderInCluster Used in another repo that calls this function
|
||||
func NewProviderInCluster() (*Provider, error) {
|
||||
restClientConfig, err := rest.InClusterConfig()
|
||||
if err != nil {
|
||||
@@ -176,7 +176,6 @@ type ApiServerOptions struct {
|
||||
KetoImage string
|
||||
ServiceAccountName string
|
||||
IsNamespaceRestricted bool
|
||||
SyncEntriesConfig *shared.SyncEntriesConfig
|
||||
MaxEntriesDBSizeBytes int64
|
||||
Resources shared.Resources
|
||||
ImagePullPolicy core.PullPolicy
|
||||
@@ -185,14 +184,6 @@ type ApiServerOptions struct {
|
||||
}
|
||||
|
||||
func (provider *Provider) GetMizuApiServerPodObject(opts *ApiServerOptions, mountVolumeClaim bool, volumeClaimName string, createAuthContainer bool) (*core.Pod, error) {
|
||||
var marshaledSyncEntriesConfig []byte
|
||||
if opts.SyncEntriesConfig != nil {
|
||||
var err error
|
||||
if marshaledSyncEntriesConfig, err = json.Marshal(opts.SyncEntriesConfig); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
configMapVolume := &core.ConfigMapVolumeSource{}
|
||||
configMapVolume.Name = ConfigMapName
|
||||
|
||||
@@ -264,10 +255,6 @@ func (provider *Provider) GetMizuApiServerPodObject(opts *ApiServerOptions, moun
|
||||
VolumeMounts: volumeMounts,
|
||||
Command: command,
|
||||
Env: []core.EnvVar{
|
||||
{
|
||||
Name: shared.SyncEntriesConfigEnvVar,
|
||||
Value: string(marshaledSyncEntriesConfig),
|
||||
},
|
||||
{
|
||||
Name: shared.LogLevelEnvVar,
|
||||
Value: opts.LogLevel.String(),
|
||||
@@ -1113,7 +1100,7 @@ func (provider *Provider) GetKubernetesVersion() (*semver.SemVersion, error) {
|
||||
return &serverVersionSemVer, nil
|
||||
}
|
||||
|
||||
func getClientSet(config *restclient.Config) (*kubernetes.Clientset, error) {
|
||||
func getClientSet(config *rest.Config) (*kubernetes.Clientset, error) {
|
||||
clientSet, err := kubernetes.NewForConfig(config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
||||
@@ -19,7 +19,6 @@ const (
|
||||
WebSocketMessageTypeTappedEntry WebSocketMessageType = "tappedEntry"
|
||||
WebSocketMessageTypeUpdateStatus WebSocketMessageType = "status"
|
||||
WebSocketMessageTypeUpdateTappedPods WebSocketMessageType = "tappedPods"
|
||||
WebSocketMessageTypeAnalyzeStatus WebSocketMessageType = "analyzeStatus"
|
||||
WebSocketMessageTypeToast WebSocketMessageType = "toast"
|
||||
WebSocketMessageTypeQueryMetadata WebSocketMessageType = "queryMetadata"
|
||||
WebSocketMessageTypeStartTime WebSocketMessageType = "startTime"
|
||||
@@ -33,6 +32,11 @@ type Resources struct {
|
||||
MemoryRequests string `yaml:"memory-requests" default:"50Mi"`
|
||||
}
|
||||
|
||||
type OASConfig struct {
|
||||
Enable bool `yaml:"enabled" default:"true"`
|
||||
MaxExampleLen int `yaml:"max-example-len" default:"10240"`
|
||||
}
|
||||
|
||||
type MizuAgentConfig struct {
|
||||
MaxDBSizeBytes int64 `json:"maxDBSizeBytes"`
|
||||
InsertionFilter string `json:"insertionFilter"`
|
||||
@@ -43,7 +47,7 @@ type MizuAgentConfig struct {
|
||||
MizuResourcesNamespace string `json:"mizuResourceNamespace"`
|
||||
AgentDatabasePath string `json:"agentDatabasePath"`
|
||||
ServiceMap bool `json:"serviceMap"`
|
||||
OAS bool `json:"oas"`
|
||||
OAS OASConfig `json:"oas"`
|
||||
Telemetry bool `json:"telemetry"`
|
||||
}
|
||||
|
||||
@@ -51,17 +55,6 @@ type WebSocketMessageMetadata struct {
|
||||
MessageType WebSocketMessageType `json:"messageType,omitempty"`
|
||||
}
|
||||
|
||||
type WebSocketAnalyzeStatusMessage struct {
|
||||
*WebSocketMessageMetadata
|
||||
AnalyzeStatus AnalyzeStatus `json:"analyzeStatus"`
|
||||
}
|
||||
|
||||
type AnalyzeStatus struct {
|
||||
IsAnalyzing bool `json:"isAnalyzing"`
|
||||
RemoteUrl string `json:"remoteUrl"`
|
||||
IsRemoteReady bool `json:"isRemoteReady"`
|
||||
SentCount int `json:"sentCount"`
|
||||
}
|
||||
|
||||
type WebSocketStatusMessage struct {
|
||||
*WebSocketMessageMetadata
|
||||
@@ -116,13 +109,6 @@ type TLSLinkInfo struct {
|
||||
ResolvedSourceName string `json:"resolvedSourceName"`
|
||||
}
|
||||
|
||||
type SyncEntriesConfig struct {
|
||||
Token string `json:"token"`
|
||||
Env string `json:"env"`
|
||||
Workspace string `json:"workspace"`
|
||||
UploadIntervalSec int `json:"interval"`
|
||||
}
|
||||
|
||||
func CreateWebSocketStatusMessage(tappedPodsStatus []TappedPodStatus) WebSocketStatusMessage {
|
||||
return WebSocketStatusMessage{
|
||||
WebSocketMessageMetadata: &WebSocketMessageMetadata{
|
||||
@@ -141,15 +127,6 @@ func CreateWebSocketTappedPodsMessage(nodeToTappedPodMap NodeToPodsMap) WebSocke
|
||||
}
|
||||
}
|
||||
|
||||
func CreateWebSocketMessageTypeAnalyzeStatus(analyzeStatus AnalyzeStatus) WebSocketAnalyzeStatusMessage {
|
||||
return WebSocketAnalyzeStatusMessage{
|
||||
WebSocketMessageMetadata: &WebSocketMessageMetadata{
|
||||
MessageType: WebSocketMessageTypeAnalyzeStatus,
|
||||
},
|
||||
AnalyzeStatus: analyzeStatus,
|
||||
}
|
||||
}
|
||||
|
||||
type HealthResponse struct {
|
||||
TappedPods []*PodInfo `json:"tappedPods"`
|
||||
ConnectedTappersCount int `json:"connectedTappersCount"`
|
||||
|
||||
@@ -1,41 +0,0 @@
|
||||
package shared
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/golang-jwt/jwt/v4"
|
||||
"time"
|
||||
)
|
||||
|
||||
func IsTokenExpired(tokenString string) (bool, error) {
|
||||
claims, err := getTokenClaims(tokenString)
|
||||
if err != nil {
|
||||
return true, err
|
||||
}
|
||||
|
||||
expiry := time.Unix(int64(claims["exp"].(float64)), 0)
|
||||
|
||||
return time.Now().After(expiry), nil
|
||||
}
|
||||
|
||||
func GetTokenEmail(tokenString string) (string, error) {
|
||||
claims, err := getTokenClaims(tokenString)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return claims["email"].(string), nil
|
||||
}
|
||||
|
||||
func getTokenClaims(tokenString string) (jwt.MapClaims, error) {
|
||||
token, _, err := new(jwt.Parser).ParseUnverified(tokenString, jwt.MapClaims{})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse token, err: %v", err)
|
||||
}
|
||||
|
||||
claims, ok := token.Claims.(jwt.MapClaims)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("can't convert token's claims to standard claims")
|
||||
}
|
||||
|
||||
return claims, nil
|
||||
}
|
||||
@@ -3,10 +3,12 @@ module github.com/up9inc/mizu/tap
|
||||
go 1.17
|
||||
|
||||
require (
|
||||
github.com/cilium/ebpf v0.8.0
|
||||
github.com/Masterminds/semver v1.5.0
|
||||
github.com/cilium/ebpf v0.8.1
|
||||
github.com/go-errors/errors v1.4.2
|
||||
github.com/google/gopacket v1.1.19
|
||||
github.com/hashicorp/golang-lru v0.5.4
|
||||
github.com/knightsc/gapstone v0.0.0-20211014144438-5e0e64002a6e
|
||||
github.com/shirou/gopsutil v3.21.11+incompatible
|
||||
github.com/struCoder/pidusage v0.2.1
|
||||
github.com/up9inc/mizu/logger v0.0.0
|
||||
|
||||
11
tap/go.sum
11
tap/go.sum
@@ -1,12 +1,14 @@
|
||||
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/Masterminds/semver v1.5.0 h1:H65muMkzWKEuNDnfl9d70GUjFniHKHRbFPGBuZ3QEww=
|
||||
github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y=
|
||||
github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
|
||||
github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
|
||||
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
|
||||
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
|
||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||
github.com/cilium/ebpf v0.8.0 h1:2V6KSg3FRADVU2BMIRemZ0hV+9OM+aAHhZDjQyjJTAs=
|
||||
github.com/cilium/ebpf v0.8.0/go.mod h1:f5zLIM0FSNuAkSyLAN7X+Hy6yznlF1mNiWUMfxMtrgk=
|
||||
github.com/cilium/ebpf v0.8.1 h1:bLSSEbBLqGPXxls55pGr5qWZaTqcmfDJHhou7t254ao=
|
||||
github.com/cilium/ebpf v0.8.1/go.mod h1:f5zLIM0FSNuAkSyLAN7X+Hy6yznlF1mNiWUMfxMtrgk=
|
||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
@@ -81,6 +83,8 @@ github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnr
|
||||
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
|
||||
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/knightsc/gapstone v0.0.0-20211014144438-5e0e64002a6e h1:6J5obSn9umEThiYzWzndcPOZR0Qj/sVCZpH6V1G7yNE=
|
||||
github.com/knightsc/gapstone v0.0.0-20211014144438-5e0e64002a6e/go.mod h1:1K5hEzsMBLTPdRJKEHqBFJ8Zt2VRqDhomcQ11KH0WW4=
|
||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||
github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
||||
github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0=
|
||||
@@ -290,5 +294,6 @@ sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2/go.mod h1:B+TnT182UBxE84DiCz
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.2.1 h1:bKCqE9GvQ5tiVHn5rfn1r+yao3aLQEaLzkkmAkf+A6Y=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.2.1/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4=
|
||||
sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q=
|
||||
sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc=
|
||||
sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo=
|
||||
sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8=
|
||||
|
||||
@@ -14,9 +14,9 @@ import (
|
||||
"fmt"
|
||||
"os"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
"strconv"
|
||||
|
||||
"github.com/shirou/gopsutil/cpu"
|
||||
"github.com/struCoder/pidusage"
|
||||
@@ -59,8 +59,8 @@ var tls = flag.Bool("tls", false, "Enable TLS tapper")
|
||||
var memprofile = flag.String("memprofile", "", "Write memory profile")
|
||||
|
||||
type TapOpts struct {
|
||||
HostMode bool
|
||||
IgnoredPorts []uint16
|
||||
HostMode bool
|
||||
IgnoredPorts []uint16
|
||||
}
|
||||
|
||||
var extensions []*api.Extension // global
|
||||
@@ -100,7 +100,7 @@ func UpdateTapTargets(newTapTargets []v1.Pod) {
|
||||
|
||||
packetSourceManager.UpdatePods(tapTargets, !*nodefrag, mainPacketInputChan)
|
||||
|
||||
if tlsTapperInstance != nil {
|
||||
if tlsTapperInstance != nil && os.Getenv("MIZU_GLOBAL_GOLANG_PID") == "" {
|
||||
if err := tlstapper.UpdateTapTargets(tlsTapperInstance, &tapTargets, *procfs); err != nil {
|
||||
tlstapper.LogError(err)
|
||||
success = false
|
||||
@@ -278,7 +278,16 @@ func startTlsTapper(extension *api.Extension, outputItems chan *api.OutputChanne
|
||||
// A quick way to instrument libssl.so without PID filtering - used for debuging and troubleshooting
|
||||
//
|
||||
if os.Getenv("MIZU_GLOBAL_SSL_LIBRARY") != "" {
|
||||
if err := tls.GlobalTap(os.Getenv("MIZU_GLOBAL_SSL_LIBRARY")); err != nil {
|
||||
if err := tls.GlobalSsllibTap(os.Getenv("MIZU_GLOBAL_SSL_LIBRARY")); err != nil {
|
||||
tlstapper.LogError(err)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// A quick way to instrument Go `crypto/tls` without PID filtering - used for debuging and troubleshooting
|
||||
//
|
||||
if os.Getenv("MIZU_GLOBAL_GOLANG_PID") != "" {
|
||||
if err := tls.GlobalGoTap(*procfs, os.Getenv("MIZU_GLOBAL_GOLANG_PID")); err != nil {
|
||||
tlstapper.LogError(err)
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
FROM alpine:3.14
|
||||
FROM golang:1.17-alpine
|
||||
|
||||
RUN apk --no-cache update && apk --no-cache add clang llvm libbpf-dev go linux-headers
|
||||
RUN apk --no-cache update && apk --no-cache add clang llvm libbpf-dev linux-headers
|
||||
|
||||
WORKDIR /mizu
|
||||
|
||||
145
tap/tlstapper/bpf/common.c
Normal file
145
tap/tlstapper/bpf/common.c
Normal file
@@ -0,0 +1,145 @@
|
||||
/*
|
||||
Note: This file is licenced differently from the rest of the project
|
||||
SPDX-License-Identifier: GPL-2.0
|
||||
Copyright (C) UP9 Inc.
|
||||
*/
|
||||
|
||||
#include "include/headers.h"
|
||||
#include "include/util.h"
|
||||
#include "include/maps.h"
|
||||
#include "include/log.h"
|
||||
#include "include/logger_messages.h"
|
||||
#include "include/common.h"
|
||||
|
||||
|
||||
static __always_inline int add_address_to_chunk(struct pt_regs *ctx, struct tls_chunk* chunk, __u64 id, __u32 fd) {
|
||||
__u32 pid = id >> 32;
|
||||
__u64 key = (__u64) pid << 32 | fd;
|
||||
|
||||
struct fd_info *fdinfo = bpf_map_lookup_elem(&file_descriptor_to_ipv4, &key);
|
||||
|
||||
if (fdinfo == NULL) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
int err = bpf_probe_read(chunk->address, sizeof(chunk->address), fdinfo->ipv4_addr);
|
||||
chunk->flags |= (fdinfo->flags & FLAGS_IS_CLIENT_BIT);
|
||||
|
||||
if (err != 0) {
|
||||
log_error(ctx, LOG_ERROR_READING_FD_ADDRESS, id, err, 0l);
|
||||
return 0;
|
||||
}
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
static __always_inline void send_chunk_part(struct pt_regs *ctx, __u8* buffer, __u64 id,
|
||||
struct tls_chunk* chunk, int start, int end) {
|
||||
size_t recorded = MIN(end - start, sizeof(chunk->data));
|
||||
|
||||
if (recorded <= 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
chunk->recorded = recorded;
|
||||
chunk->start = start;
|
||||
|
||||
// This ugly trick is for the ebpf verifier happiness
|
||||
//
|
||||
long err = 0;
|
||||
if (chunk->recorded == sizeof(chunk->data)) {
|
||||
err = bpf_probe_read(chunk->data, sizeof(chunk->data), buffer + start);
|
||||
} else {
|
||||
recorded &= (sizeof(chunk->data) - 1); // Buffer must be N^2
|
||||
err = bpf_probe_read(chunk->data, recorded, buffer + start);
|
||||
}
|
||||
|
||||
if (err != 0) {
|
||||
log_error(ctx, LOG_ERROR_READING_FROM_SSL_BUFFER, id, err, 0l);
|
||||
return;
|
||||
}
|
||||
|
||||
bpf_perf_event_output(ctx, &chunks_buffer, BPF_F_CURRENT_CPU, chunk, sizeof(struct tls_chunk));
|
||||
}
|
||||
|
||||
static __always_inline void send_chunk(struct pt_regs *ctx, __u8* buffer, __u64 id, struct tls_chunk* chunk) {
|
||||
// ebpf loops must be bounded at compile time, we can't use (i < chunk->len / CHUNK_SIZE)
|
||||
//
|
||||
// https://lwn.net/Articles/794934/
|
||||
//
|
||||
// However we want to run in kernel older than 5.3, hence we use "#pragma unroll" anyway
|
||||
//
|
||||
#pragma unroll
|
||||
for (int i = 0; i < MAX_CHUNKS_PER_OPERATION; i++) {
|
||||
if (chunk->len <= (CHUNK_SIZE * i)) {
|
||||
break;
|
||||
}
|
||||
|
||||
send_chunk_part(ctx, buffer, id, chunk, CHUNK_SIZE * i, chunk->len);
|
||||
}
|
||||
}
|
||||
|
||||
static __always_inline void output_ssl_chunk(struct pt_regs *ctx, struct ssl_info* info, int count_bytes, __u64 id, __u32 flags) {
|
||||
if (count_bytes <= 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (count_bytes > (CHUNK_SIZE * MAX_CHUNKS_PER_OPERATION)) {
|
||||
log_error(ctx, LOG_ERROR_BUFFER_TOO_BIG, id, count_bytes, 0l);
|
||||
return;
|
||||
}
|
||||
|
||||
struct tls_chunk* chunk;
|
||||
int zero = 0;
|
||||
|
||||
// If other thread, running on the same CPU get to this point at the same time like us (context switch)
|
||||
// the data will be corrupted - protection may be added in the future
|
||||
//
|
||||
chunk = bpf_map_lookup_elem(&heap, &zero);
|
||||
|
||||
if (!chunk) {
|
||||
log_error(ctx, LOG_ERROR_ALLOCATING_CHUNK, id, 0l, 0l);
|
||||
return;
|
||||
}
|
||||
|
||||
chunk->flags = flags;
|
||||
chunk->pid = id >> 32;
|
||||
chunk->tgid = id;
|
||||
chunk->len = count_bytes;
|
||||
chunk->fd = info->fd;
|
||||
|
||||
if (!add_address_to_chunk(ctx, chunk, id, chunk->fd)) {
|
||||
// Without an address, we drop the chunk because there is not much to do with it in Go
|
||||
//
|
||||
return;
|
||||
}
|
||||
|
||||
send_chunk(ctx, info->buffer, id, chunk);
|
||||
}
|
||||
|
||||
static __always_inline struct ssl_info new_ssl_info() {
|
||||
struct ssl_info info = { .fd = invalid_fd, .created_at_nano = bpf_ktime_get_ns() };
|
||||
return info;
|
||||
}
|
||||
|
||||
static __always_inline struct ssl_info lookup_ssl_info(struct pt_regs *ctx, struct bpf_map_def* map_fd, __u64 pid_tgid) {
|
||||
struct ssl_info *infoPtr = bpf_map_lookup_elem(map_fd, &pid_tgid);
|
||||
struct ssl_info info = new_ssl_info();
|
||||
|
||||
if (infoPtr != NULL) {
|
||||
long err = bpf_probe_read(&info, sizeof(struct ssl_info), infoPtr);
|
||||
|
||||
if (err != 0) {
|
||||
log_error(ctx, LOG_ERROR_READING_SSL_CONTEXT, pid_tgid, err, ORIGIN_SSL_UPROBE_CODE);
|
||||
}
|
||||
|
||||
if ((bpf_ktime_get_ns() - info.created_at_nano) > SSL_INFO_MAX_TTL_NANO) {
|
||||
// If the ssl info is too old, we don't want to use its info because it may be incorrect.
|
||||
//
|
||||
info.fd = invalid_fd;
|
||||
info.created_at_nano = bpf_ktime_get_ns();
|
||||
}
|
||||
}
|
||||
|
||||
return info;
|
||||
}
|
||||
@@ -28,7 +28,7 @@ void sys_enter_read(struct sys_enter_read_ctx *ctx) {
|
||||
return;
|
||||
}
|
||||
|
||||
struct ssl_info *infoPtr = bpf_map_lookup_elem(&ssl_read_context, &id);
|
||||
struct ssl_info *infoPtr = bpf_map_lookup_elem(&openssl_read_context, &id);
|
||||
|
||||
if (infoPtr == NULL) {
|
||||
return;
|
||||
@@ -44,7 +44,7 @@ void sys_enter_read(struct sys_enter_read_ctx *ctx) {
|
||||
|
||||
info.fd = ctx->fd;
|
||||
|
||||
err = bpf_map_update_elem(&ssl_read_context, &id, &info, BPF_ANY);
|
||||
err = bpf_map_update_elem(&openssl_read_context, &id, &info, BPF_ANY);
|
||||
|
||||
if (err != 0) {
|
||||
log_error(ctx, LOG_ERROR_PUTTING_FILE_DESCRIPTOR, id, err, ORIGIN_SYS_ENTER_READ_CODE);
|
||||
@@ -68,7 +68,7 @@ void sys_enter_write(struct sys_enter_write_ctx *ctx) {
|
||||
return;
|
||||
}
|
||||
|
||||
struct ssl_info *infoPtr = bpf_map_lookup_elem(&ssl_write_context, &id);
|
||||
struct ssl_info *infoPtr = bpf_map_lookup_elem(&openssl_write_context, &id);
|
||||
|
||||
if (infoPtr == NULL) {
|
||||
return;
|
||||
@@ -84,7 +84,7 @@ void sys_enter_write(struct sys_enter_write_ctx *ctx) {
|
||||
|
||||
info.fd = ctx->fd;
|
||||
|
||||
err = bpf_map_update_elem(&ssl_write_context, &id, &info, BPF_ANY);
|
||||
err = bpf_map_update_elem(&openssl_write_context, &id, &info, BPF_ANY);
|
||||
|
||||
if (err != 0) {
|
||||
log_error(ctx, LOG_ERROR_PUTTING_FILE_DESCRIPTOR, id, err, ORIGIN_SYS_ENTER_WRITE_CODE);
|
||||
|
||||
154
tap/tlstapper/bpf/go_uprobes.c
Normal file
154
tap/tlstapper/bpf/go_uprobes.c
Normal file
@@ -0,0 +1,154 @@
|
||||
/*
|
||||
Note: This file is licenced differently from the rest of the project
|
||||
SPDX-License-Identifier: GPL-2.0
|
||||
Copyright (C) UP9 Inc.
|
||||
|
||||
|
||||
---
|
||||
|
||||
README
|
||||
|
||||
Go does not follow any platform ABI like x86-64 System V ABI.
|
||||
Before 1.17, Go followed stack-based Plan9 (Bell Labs) calling convention. (ABI0)
|
||||
After 1.17, Go switched to an internal register-based calling convention. (ABIInternal)
|
||||
For now, the probes in this file supports only ABIInternal (Go 1.17+)
|
||||
|
||||
`uretprobe` in Linux kernel uses trampoline pattern to jump to original return
|
||||
address of the probed function. A Goroutine's stack size is 2Kb while a C thread is 2MB on Linux.
|
||||
If stack size exceeds 2Kb, Go runtime relocates the stack. That causes the
|
||||
return address to become incorrect in case of `uretprobe` and probed Go program crashes.
|
||||
Therefore `uretprobe` CAN'T BE USED for a Go program.
|
||||
|
||||
`_ex_uprobe` suffixed probes suppose to be `uretprobe`(s) are actually `uprobe`(s)
|
||||
because of the non-standard ABI of Go. Therefore we probe all `ret` mnemonics under the symbol
|
||||
by automatically finding them through reading the ELF binary and disassembling the symbols.
|
||||
Disassembly related code located in `go_offsets.go` file and it uses Capstone Engine.
|
||||
Solution based on: https://github.com/iovisor/bcc/issues/1320#issuecomment-407927542
|
||||
*Example* We probe an arbitrary point in a function body (offset +559):
|
||||
https://github.com/golang/go/blob/go1.17.6/src/crypto/tls/conn.go#L1299
|
||||
|
||||
We get the file descriptor using the common $rax register that holds the address
|
||||
of `go.itab.*net.TCPConn,net.Conn` and through a series of dereferencing
|
||||
using `bpf_probe_read` calls in `go_crypto_tls_get_fd_from_tcp_conn` function.
|
||||
|
||||
---
|
||||
|
||||
SOURCES:
|
||||
|
||||
Tracing Go Functions with eBPF (before 1.17): https://www.grant.pizza/blog/tracing-go-functions-with-ebpf-part-2/
|
||||
Challenges of BPF Tracing Go: https://blog.0x74696d.com/posts/challenges-of-bpf-tracing-go/
|
||||
x86 calling conventions: https://en.wikipedia.org/wiki/X86_calling_conventions
|
||||
Plan 9 from Bell Labs: https://en.wikipedia.org/wiki/Plan_9_from_Bell_Labs
|
||||
The issue for calling convention change in Go: https://github.com/golang/go/issues/40724
|
||||
Proposal of Register-based Go calling convention: https://go.googlesource.com/proposal/+/master/design/40724-register-calling.md
|
||||
Go internal ABI (1.17) specification: https://go.googlesource.com/go/+/refs/heads/dev.regabi/src/cmd/compile/internal-abi.md
|
||||
Go internal ABI (current) specification: https://go.googlesource.com/go/+/refs/heads/master/src/cmd/compile/abi-internal.md
|
||||
A Quick Guide to Go's Assembler: https://go.googlesource.com/go/+/refs/heads/dev.regabi/doc/asm.html
|
||||
Dissecting Go Binaries: https://www.grant.pizza/blog/dissecting-go-binaries/
|
||||
Capstone Engine: https://www.capstone-engine.org/
|
||||
*/
|
||||
|
||||
#include "include/headers.h"
|
||||
#include "include/util.h"
|
||||
#include "include/maps.h"
|
||||
#include "include/log.h"
|
||||
#include "include/logger_messages.h"
|
||||
#include "include/pids.h"
|
||||
#include "include/common.h"
|
||||
#include "include/go_abi_internal.h"
|
||||
#include "include/go_types.h"
|
||||
|
||||
static __always_inline __u32 go_crypto_tls_get_fd_from_tcp_conn(struct pt_regs *ctx) {
|
||||
struct go_interface conn;
|
||||
long err = bpf_probe_read(&conn, sizeof(conn), (void*)GO_ABI_INTERNAL_PT_REGS_R1(ctx));
|
||||
if (err != 0) {
|
||||
return invalid_fd;
|
||||
}
|
||||
|
||||
void* net_fd_ptr;
|
||||
err = bpf_probe_read(&net_fd_ptr, sizeof(net_fd_ptr), conn.ptr);
|
||||
if (err != 0) {
|
||||
return invalid_fd;
|
||||
}
|
||||
|
||||
__u32 fd;
|
||||
err = bpf_probe_read(&fd, sizeof(fd), net_fd_ptr + 0x10);
|
||||
if (err != 0) {
|
||||
return invalid_fd;
|
||||
}
|
||||
|
||||
return fd;
|
||||
}
|
||||
|
||||
static __always_inline void go_crypto_tls_uprobe(struct pt_regs *ctx, struct bpf_map_def* go_context) {
|
||||
__u64 pid_tgid = bpf_get_current_pid_tgid();
|
||||
__u64 pid = pid_tgid >> 32;
|
||||
if (!should_tap(pid)) {
|
||||
return;
|
||||
}
|
||||
|
||||
struct ssl_info info = new_ssl_info();
|
||||
|
||||
info.buffer_len = GO_ABI_INTERNAL_PT_REGS_R2(ctx);
|
||||
info.buffer = (void*)GO_ABI_INTERNAL_PT_REGS_R4(ctx);
|
||||
info.fd = go_crypto_tls_get_fd_from_tcp_conn(ctx);
|
||||
|
||||
// GO_ABI_INTERNAL_PT_REGS_GP is Goroutine address
|
||||
__u64 pid_fp = pid << 32 | GO_ABI_INTERNAL_PT_REGS_GP(ctx);
|
||||
long err = bpf_map_update_elem(go_context, &pid_fp, &info, BPF_ANY);
|
||||
|
||||
if (err != 0) {
|
||||
log_error(ctx, LOG_ERROR_PUTTING_SSL_CONTEXT, pid_tgid, err, 0l);
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
static __always_inline void go_crypto_tls_ex_uprobe(struct pt_regs *ctx, struct bpf_map_def* go_context, __u32 flags) {
|
||||
__u64 pid_tgid = bpf_get_current_pid_tgid();
|
||||
__u64 pid = pid_tgid >> 32;
|
||||
if (!should_tap(pid)) {
|
||||
return;
|
||||
}
|
||||
|
||||
// GO_ABI_INTERNAL_PT_REGS_GP is Goroutine address
|
||||
__u64 pid_fp = pid << 32 | GO_ABI_INTERNAL_PT_REGS_GP(ctx);
|
||||
struct ssl_info *info_ptr = bpf_map_lookup_elem(go_context, &pid_fp);
|
||||
|
||||
if (info_ptr == NULL) {
|
||||
return;
|
||||
}
|
||||
bpf_map_delete_elem(go_context, &pid_fp);
|
||||
|
||||
struct ssl_info info;
|
||||
long err = bpf_probe_read(&info, sizeof(struct ssl_info), info_ptr);
|
||||
|
||||
if (err != 0) {
|
||||
log_error(ctx, LOG_ERROR_READING_SSL_CONTEXT, pid_tgid, err, ORIGIN_SSL_URETPROBE_CODE);
|
||||
return;
|
||||
}
|
||||
|
||||
output_ssl_chunk(ctx, &info, info.buffer_len, pid_tgid, flags);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
SEC("uprobe/go_crypto_tls_write")
|
||||
void BPF_KPROBE(go_crypto_tls_write) {
|
||||
go_crypto_tls_uprobe(ctx, &go_write_context);
|
||||
}
|
||||
|
||||
SEC("uprobe/go_crypto_tls_write_ex")
|
||||
void BPF_KPROBE(go_crypto_tls_write_ex) {
|
||||
go_crypto_tls_ex_uprobe(ctx, &go_write_context, 0);
|
||||
}
|
||||
|
||||
SEC("uprobe/go_crypto_tls_read")
|
||||
void BPF_KPROBE(go_crypto_tls_read) {
|
||||
go_crypto_tls_uprobe(ctx, &go_read_context);
|
||||
}
|
||||
|
||||
SEC("uprobe/go_crypto_tls_read_ex")
|
||||
void BPF_KPROBE(go_crypto_tls_read_ex) {
|
||||
go_crypto_tls_ex_uprobe(ctx, &go_read_context, FLAGS_IS_READ_BIT);
|
||||
}
|
||||
19
tap/tlstapper/bpf/include/common.h
Normal file
19
tap/tlstapper/bpf/include/common.h
Normal file
@@ -0,0 +1,19 @@
|
||||
/*
|
||||
Note: This file is licenced differently from the rest of the project
|
||||
SPDX-License-Identifier: GPL-2.0
|
||||
Copyright (C) UP9 Inc.
|
||||
*/
|
||||
|
||||
#ifndef __COMMON__
|
||||
#define __COMMON__
|
||||
|
||||
const __s32 invalid_fd = -1;
|
||||
|
||||
static int add_address_to_chunk(struct pt_regs *ctx, struct tls_chunk* chunk, __u64 id, __u32 fd);
|
||||
static void send_chunk_part(struct pt_regs *ctx, __u8* buffer, __u64 id, struct tls_chunk* chunk, int start, int end);
|
||||
static void send_chunk(struct pt_regs *ctx, __u8* buffer, __u64 id, struct tls_chunk* chunk);
|
||||
static void output_ssl_chunk(struct pt_regs *ctx, struct ssl_info* info, int count_bytes, __u64 id, __u32 flags);
|
||||
static struct ssl_info new_ssl_info();
|
||||
static struct ssl_info lookup_ssl_info(struct pt_regs *ctx, struct bpf_map_def* map_fd, __u64 pid_tgid);
|
||||
|
||||
#endif /* __COMMON__ */
|
||||
141
tap/tlstapper/bpf/include/go_abi_internal.h
Normal file
141
tap/tlstapper/bpf/include/go_abi_internal.h
Normal file
@@ -0,0 +1,141 @@
|
||||
/*
|
||||
Note: This file is licenced differently from the rest of the project
|
||||
SPDX-License-Identifier: GPL-2.0
|
||||
Copyright (C) UP9 Inc.
|
||||
*/
|
||||
|
||||
#ifndef __GO_ABI_INTERNAL__
|
||||
#define __GO_ABI_INTERNAL__
|
||||
|
||||
/*
|
||||
Go internal ABI specification
|
||||
https://go.googlesource.com/go/+/refs/heads/master/src/cmd/compile/abi-internal.md
|
||||
*/
|
||||
|
||||
/* Scan the ARCH passed in from ARCH env variable */
|
||||
#if defined(__TARGET_ARCH_x86)
|
||||
#define bpf_target_x86
|
||||
#define bpf_target_defined
|
||||
#elif defined(__TARGET_ARCH_s390)
|
||||
#define bpf_target_s390
|
||||
#define bpf_target_defined
|
||||
#elif defined(__TARGET_ARCH_arm)
|
||||
#define bpf_target_arm
|
||||
#define bpf_target_defined
|
||||
#elif defined(__TARGET_ARCH_arm64)
|
||||
#define bpf_target_arm64
|
||||
#define bpf_target_defined
|
||||
#elif defined(__TARGET_ARCH_mips)
|
||||
#define bpf_target_mips
|
||||
#define bpf_target_defined
|
||||
#elif defined(__TARGET_ARCH_powerpc)
|
||||
#define bpf_target_powerpc
|
||||
#define bpf_target_defined
|
||||
#elif defined(__TARGET_ARCH_sparc)
|
||||
#define bpf_target_sparc
|
||||
#define bpf_target_defined
|
||||
#else
|
||||
#undef bpf_target_defined
|
||||
#endif
|
||||
|
||||
/* Fall back to what the compiler says */
|
||||
#ifndef bpf_target_defined
|
||||
#if defined(__x86_64__)
|
||||
#define bpf_target_x86
|
||||
#elif defined(__s390__)
|
||||
#define bpf_target_s390
|
||||
#elif defined(__arm__)
|
||||
#define bpf_target_arm
|
||||
#elif defined(__aarch64__)
|
||||
#define bpf_target_arm64
|
||||
#elif defined(__mips__)
|
||||
#define bpf_target_mips
|
||||
#elif defined(__powerpc__)
|
||||
#define bpf_target_powerpc
|
||||
#elif defined(__sparc__)
|
||||
#define bpf_target_sparc
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#if defined(bpf_target_x86)
|
||||
|
||||
#ifdef __i386__
|
||||
|
||||
/*
|
||||
https://go.googlesource.com/go/+/refs/heads/dev.regabi/src/cmd/compile/internal-abi.md#amd64-architecture
|
||||
https://github.com/golang/go/blob/go1.17.6/src/cmd/compile/internal/ssa/gen/AMD64Ops.go#L100
|
||||
*/
|
||||
#define GO_ABI_INTERNAL_PT_REGS_R1(x) ((x)->eax)
|
||||
#define GO_ABI_INTERNAL_PT_REGS_P2(x) ((x)->ecx)
|
||||
#define GO_ABI_INTERNAL_PT_REGS_P3(x) ((x)->edx)
|
||||
#define GO_ABI_INTERNAL_PT_REGS_P4(x) 0
|
||||
#define GO_ABI_INTERNAL_PT_REGS_P5(x) 0
|
||||
#define GO_ABI_INTERNAL_PT_REGS_P6(x) 0
|
||||
#define GO_ABI_INTERNAL_PT_REGS_SP(x) ((x)->esp)
|
||||
#define GO_ABI_INTERNAL_PT_REGS_FP(x) ((x)->ebp)
|
||||
#define GO_ABI_INTERNAL_PT_REGS_GP(x) ((x)->e14)
|
||||
|
||||
#else
|
||||
|
||||
#define GO_ABI_INTERNAL_PT_REGS_R1(x) ((x)->rax)
|
||||
#define GO_ABI_INTERNAL_PT_REGS_R2(x) ((x)->rcx)
|
||||
#define GO_ABI_INTERNAL_PT_REGS_R3(x) ((x)->rdx)
|
||||
#define GO_ABI_INTERNAL_PT_REGS_R4(x) ((x)->rbx)
|
||||
#define GO_ABI_INTERNAL_PT_REGS_R5(x) ((x)->rbp)
|
||||
#define GO_ABI_INTERNAL_PT_REGS_R6(x) ((x)->rsi)
|
||||
#define GO_ABI_INTERNAL_PT_REGS_SP(x) ((x)->rsp)
|
||||
#define GO_ABI_INTERNAL_PT_REGS_FP(x) ((x)->rbp)
|
||||
#define GO_ABI_INTERNAL_PT_REGS_GP(x) ((x)->r14)
|
||||
|
||||
#endif
|
||||
|
||||
#elif defined(bpf_target_arm)
|
||||
|
||||
/*
|
||||
https://go.googlesource.com/go/+/refs/heads/master/src/cmd/compile/abi-internal.md#arm64-architecture
|
||||
https://github.com/golang/go/blob/go1.17.6/src/cmd/compile/internal/ssa/gen/ARM64Ops.go#L129-L131
|
||||
*/
|
||||
#define GO_ABI_INTERNAL_PT_REGS_R1(x) ((x)->uregs[0])
|
||||
#define GO_ABI_INTERNAL_PT_REGS_R2(x) ((x)->uregs[1])
|
||||
#define GO_ABI_INTERNAL_PT_REGS_R3(x) ((x)->uregs[2])
|
||||
#define GO_ABI_INTERNAL_PT_REGS_R4(x) ((x)->uregs[3])
|
||||
#define GO_ABI_INTERNAL_PT_REGS_R5(x) ((x)->uregs[4])
|
||||
#define GO_ABI_INTERNAL_PT_REGS_R6(x) ((x)->uregs[5])
|
||||
#define GO_ABI_INTERNAL_PT_REGS_SP(x) ((x)->uregs[14])
|
||||
#define GO_ABI_INTERNAL_PT_REGS_FP(x) ((x)->uregs[29])
|
||||
#define GO_ABI_INTERNAL_PT_REGS_GP(x) ((x)->uregs[28])
|
||||
|
||||
#elif defined(bpf_target_arm64)
|
||||
|
||||
/* arm64 provides struct user_pt_regs instead of struct pt_regs to userspace */
|
||||
struct pt_regs;
|
||||
#define PT_REGS_ARM64 const volatile struct user_pt_regs
|
||||
#define GO_ABI_INTERNAL_PT_REGS_R1(x) (((PT_REGS_ARM64 *)(x))->regs[0])
|
||||
#define GO_ABI_INTERNAL_PT_REGS_R2(x) (((PT_REGS_ARM64 *)(x))->regs[1])
|
||||
#define GO_ABI_INTERNAL_PT_REGS_R3(x) (((PT_REGS_ARM64 *)(x))->regs[2])
|
||||
#define GO_ABI_INTERNAL_PT_REGS_R4(x) (((PT_REGS_ARM64 *)(x))->regs[3])
|
||||
#define GO_ABI_INTERNAL_PT_REGS_R5(x) (((PT_REGS_ARM64 *)(x))->regs[4])
|
||||
#define GO_ABI_INTERNAL_PT_REGS_R6(x) (((PT_REGS_ARM64 *)(x))->regs[5])
|
||||
#define GO_ABI_INTERNAL_PT_REGS_SP(x) (((PT_REGS_ARM64 *)(x))->regs[30])
|
||||
#define GO_ABI_INTERNAL_PT_REGS_FP(x) (((PT_REGS_ARM64 *)(x))->regs[29])
|
||||
#define GO_ABI_INTERNAL_PT_REGS_GP(x) (((PT_REGS_ARM64 *)(x))->regs[28])
|
||||
|
||||
#elif defined(bpf_target_powerpc)
|
||||
|
||||
/*
|
||||
https://go.googlesource.com/go/+/refs/heads/master/src/cmd/compile/abi-internal.md#ppc64-architecture
|
||||
https://github.com/golang/go/blob/go1.17.6/src/cmd/compile/internal/ssa/gen/PPC64Ops.go#L125-L127
|
||||
*/
|
||||
#define GO_ABI_INTERNAL_PT_REGS_R1(x) ((x)->gpr[3])
|
||||
#define GO_ABI_INTERNAL_PT_REGS_R2(x) ((x)->gpr[4])
|
||||
#define GO_ABI_INTERNAL_PT_REGS_R3(x) ((x)->gpr[5])
|
||||
#define GO_ABI_INTERNAL_PT_REGS_R4(x) ((x)->gpr[6])
|
||||
#define GO_ABI_INTERNAL_PT_REGS_R5(x) ((x)->gpr[7])
|
||||
#define GO_ABI_INTERNAL_PT_REGS_R6(x) ((x)->gpr[8])
|
||||
#define GO_ABI_INTERNAL_PT_REGS_SP(x) ((x)->sp)
|
||||
#define GO_ABI_INTERNAL_PT_REGS_FP(x) ((x)->gpr[12])
|
||||
#define GO_ABI_INTERNAL_PT_REGS_GP(x) ((x)->gpr[30])
|
||||
|
||||
#endif
|
||||
|
||||
#endif /* __GO_ABI_INTERNAL__ */
|
||||
15
tap/tlstapper/bpf/include/go_types.h
Normal file
15
tap/tlstapper/bpf/include/go_types.h
Normal file
@@ -0,0 +1,15 @@
|
||||
/*
|
||||
Note: This file is licenced differently from the rest of the project
|
||||
SPDX-License-Identifier: GPL-2.0
|
||||
Copyright (C) UP9 Inc.
|
||||
*/
|
||||
|
||||
#ifndef __GO_TYPES__
|
||||
#define __GO_TYPES__
|
||||
|
||||
struct go_interface {
|
||||
__s64 type;
|
||||
void* ptr;
|
||||
};
|
||||
|
||||
#endif /* __GO_TYPES__ */
|
||||
@@ -16,11 +16,15 @@ Copyright (C) UP9 Inc.
|
||||
// One minute in nano seconds. Chosen by gut feeling.
|
||||
#define SSL_INFO_MAX_TTL_NANO (1000000000l * 60l)
|
||||
|
||||
#define MAX_ENTRIES_HASH (1 << 12) // 4096
|
||||
#define MAX_ENTRIES_PERF_OUTPUT (1 << 10) // 1024
|
||||
#define MAX_ENTRIES_LRU_HASH (1 << 14) // 16384
|
||||
|
||||
// The same struct can be found in chunk.go
|
||||
//
|
||||
// Be careful when editing, alignment and padding should be exactly the same in go/c.
|
||||
//
|
||||
struct tlsChunk {
|
||||
struct tls_chunk {
|
||||
__u32 pid;
|
||||
__u32 tgid;
|
||||
__u32 len;
|
||||
@@ -34,6 +38,7 @@ struct tlsChunk {
|
||||
|
||||
struct ssl_info {
|
||||
void* buffer;
|
||||
__u32 buffer_len;
|
||||
__u32 fd;
|
||||
__u64 created_at_nano;
|
||||
|
||||
@@ -48,6 +53,16 @@ struct fd_info {
|
||||
__u8 flags;
|
||||
};
|
||||
|
||||
// Heap-like area for eBPF programs - stack size limited to 512 bytes, we must use maps for bigger (chunk) objects.
|
||||
//
|
||||
struct {
|
||||
__uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
|
||||
__uint(max_entries, 1);
|
||||
__type(key, int);
|
||||
__type(value, struct tls_chunk);
|
||||
} heap SEC(".maps");
|
||||
|
||||
|
||||
#define BPF_MAP(_name, _type, _key_type, _value_type, _max_entries) \
|
||||
struct bpf_map_def SEC("maps") _name = { \
|
||||
.type = _type, \
|
||||
@@ -57,19 +72,26 @@ struct fd_info {
|
||||
};
|
||||
|
||||
#define BPF_HASH(_name, _key_type, _value_type) \
|
||||
BPF_MAP(_name, BPF_MAP_TYPE_HASH, _key_type, _value_type, 4096)
|
||||
BPF_MAP(_name, BPF_MAP_TYPE_HASH, _key_type, _value_type, MAX_ENTRIES_HASH)
|
||||
|
||||
#define BPF_PERF_OUTPUT(_name) \
|
||||
BPF_MAP(_name, BPF_MAP_TYPE_PERF_EVENT_ARRAY, int, __u32, 1024)
|
||||
|
||||
#define BPF_LRU_HASH(_name, _key_type, _value_type) \
|
||||
BPF_MAP(_name, BPF_MAP_TYPE_LRU_HASH, _key_type, _value_type, 16384)
|
||||
BPF_MAP(_name, BPF_MAP_TYPE_PERF_EVENT_ARRAY, int, __u32, MAX_ENTRIES_PERF_OUTPUT)
|
||||
|
||||
#define BPF_LRU_HASH(_name, _key_type, _value_type) \
|
||||
BPF_MAP(_name, BPF_MAP_TYPE_LRU_HASH, _key_type, _value_type, MAX_ENTRIES_LRU_HASH)
|
||||
|
||||
// Generic
|
||||
BPF_HASH(pids_map, __u32, __u32);
|
||||
BPF_LRU_HASH(ssl_write_context, __u64, struct ssl_info);
|
||||
BPF_LRU_HASH(ssl_read_context, __u64, struct ssl_info);
|
||||
BPF_LRU_HASH(file_descriptor_to_ipv4, __u64, struct fd_info);
|
||||
BPF_PERF_OUTPUT(chunks_buffer);
|
||||
BPF_PERF_OUTPUT(log_buffer);
|
||||
|
||||
// OpenSSL specific
|
||||
BPF_LRU_HASH(openssl_write_context, __u64, struct ssl_info);
|
||||
BPF_LRU_HASH(openssl_read_context, __u64, struct ssl_info);
|
||||
|
||||
// Go specific
|
||||
BPF_LRU_HASH(go_write_context, __u64, struct ssl_info);
|
||||
BPF_LRU_HASH(go_read_context, __u64, struct ssl_info);
|
||||
|
||||
#endif /* __MAPS__ */
|
||||
|
||||
@@ -10,149 +10,35 @@ Copyright (C) UP9 Inc.
|
||||
#include "include/log.h"
|
||||
#include "include/logger_messages.h"
|
||||
#include "include/pids.h"
|
||||
#include "include/common.h"
|
||||
|
||||
// Heap-like area for eBPF programs - stack size limited to 512 bytes, we must use maps for bigger (chunk) objects.
|
||||
//
|
||||
struct {
|
||||
__uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
|
||||
__uint(max_entries, 1);
|
||||
__type(key, int);
|
||||
__type(value, struct tlsChunk);
|
||||
} heap SEC(".maps");
|
||||
|
||||
static __always_inline int get_count_bytes(struct pt_regs *ctx, struct ssl_info* info, __u64 id) {
|
||||
int returnValue = PT_REGS_RC(ctx);
|
||||
|
||||
if (info->count_ptr == NULL) {
|
||||
// ssl_read and ssl_write return the number of bytes written/read
|
||||
//
|
||||
return returnValue;
|
||||
}
|
||||
|
||||
// ssl_read_ex and ssl_write_ex return 1 for success
|
||||
//
|
||||
if (returnValue != 1) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
// ssl_read_ex and ssl_write_ex write the number of bytes to an arg named *count
|
||||
//
|
||||
size_t countBytes;
|
||||
long err = bpf_probe_read(&countBytes, sizeof(size_t), (void*) info->count_ptr);
|
||||
|
||||
if (err != 0) {
|
||||
log_error(ctx, LOG_ERROR_READING_BYTES_COUNT, id, err, 0l);
|
||||
return 0;
|
||||
}
|
||||
|
||||
return countBytes;
|
||||
}
|
||||
int returnValue = PT_REGS_RC(ctx);
|
||||
|
||||
static __always_inline int add_address_to_chunk(struct pt_regs *ctx, struct tlsChunk* chunk, __u64 id, __u32 fd) {
|
||||
__u32 pid = id >> 32;
|
||||
__u64 key = (__u64) pid << 32 | fd;
|
||||
|
||||
struct fd_info *fdinfo = bpf_map_lookup_elem(&file_descriptor_to_ipv4, &key);
|
||||
|
||||
if (fdinfo == NULL) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
int err = bpf_probe_read(chunk->address, sizeof(chunk->address), fdinfo->ipv4_addr);
|
||||
chunk->flags |= (fdinfo->flags & FLAGS_IS_CLIENT_BIT);
|
||||
|
||||
if (err != 0) {
|
||||
log_error(ctx, LOG_ERROR_READING_FD_ADDRESS, id, err, 0l);
|
||||
return 0;
|
||||
}
|
||||
|
||||
return 1;
|
||||
}
|
||||
if (info->count_ptr == NULL) {
|
||||
// ssl_read and ssl_write return the number of bytes written/read
|
||||
//
|
||||
return returnValue;
|
||||
}
|
||||
|
||||
static __always_inline void send_chunk_part(struct pt_regs *ctx, __u8* buffer, __u64 id,
|
||||
struct tlsChunk* chunk, int start, int end) {
|
||||
size_t recorded = MIN(end - start, sizeof(chunk->data));
|
||||
|
||||
if (recorded <= 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
chunk->recorded = recorded;
|
||||
chunk->start = start;
|
||||
|
||||
// This ugly trick is for the ebpf verifier happiness
|
||||
//
|
||||
long err = 0;
|
||||
if (chunk->recorded == sizeof(chunk->data)) {
|
||||
err = bpf_probe_read(chunk->data, sizeof(chunk->data), buffer + start);
|
||||
} else {
|
||||
recorded &= (sizeof(chunk->data) - 1); // Buffer must be N^2
|
||||
err = bpf_probe_read(chunk->data, recorded, buffer + start);
|
||||
}
|
||||
|
||||
if (err != 0) {
|
||||
log_error(ctx, LOG_ERROR_READING_FROM_SSL_BUFFER, id, err, 0l);
|
||||
return;
|
||||
}
|
||||
|
||||
bpf_perf_event_output(ctx, &chunks_buffer, BPF_F_CURRENT_CPU, chunk, sizeof(struct tlsChunk));
|
||||
}
|
||||
// ssl_read_ex and ssl_write_ex return 1 for success
|
||||
//
|
||||
if (returnValue != 1) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
static __always_inline void send_chunk(struct pt_regs *ctx, __u8* buffer, __u64 id, struct tlsChunk* chunk) {
|
||||
// ebpf loops must be bounded at compile time, we can't use (i < chunk->len / CHUNK_SIZE)
|
||||
//
|
||||
// https://lwn.net/Articles/794934/
|
||||
//
|
||||
// However we want to run in kernel older than 5.3, hence we use "#pragma unroll" anyway
|
||||
//
|
||||
#pragma unroll
|
||||
for (int i = 0; i < MAX_CHUNKS_PER_OPERATION; i++) {
|
||||
if (chunk->len <= (CHUNK_SIZE * i)) {
|
||||
break;
|
||||
}
|
||||
|
||||
send_chunk_part(ctx, buffer, id, chunk, CHUNK_SIZE * i, chunk->len);
|
||||
}
|
||||
}
|
||||
// ssl_read_ex and ssl_write_ex write the number of bytes to an arg named *count
|
||||
//
|
||||
size_t countBytes;
|
||||
long err = bpf_probe_read(&countBytes, sizeof(size_t), (void*) info->count_ptr);
|
||||
|
||||
static __always_inline void output_ssl_chunk(struct pt_regs *ctx, struct ssl_info* info, __u64 id, __u32 flags) {
|
||||
int countBytes = get_count_bytes(ctx, info, id);
|
||||
|
||||
if (countBytes <= 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (countBytes > (CHUNK_SIZE * MAX_CHUNKS_PER_OPERATION)) {
|
||||
log_error(ctx, LOG_ERROR_BUFFER_TOO_BIG, id, countBytes, 0l);
|
||||
return;
|
||||
}
|
||||
|
||||
struct tlsChunk* chunk;
|
||||
int zero = 0;
|
||||
|
||||
// If other thread, running on the same CPU get to this point at the same time like us (context switch)
|
||||
// the data will be corrupted - protection may be added in the future
|
||||
//
|
||||
chunk = bpf_map_lookup_elem(&heap, &zero);
|
||||
|
||||
if (!chunk) {
|
||||
log_error(ctx, LOG_ERROR_ALLOCATING_CHUNK, id, 0l, 0l);
|
||||
return;
|
||||
}
|
||||
|
||||
chunk->flags = flags;
|
||||
chunk->pid = id >> 32;
|
||||
chunk->tgid = id;
|
||||
chunk->len = countBytes;
|
||||
chunk->fd = info->fd;
|
||||
|
||||
if (!add_address_to_chunk(ctx, chunk, id, chunk->fd)) {
|
||||
// Without an address, we drop the chunk because there is not much to do with it in Go
|
||||
//
|
||||
return;
|
||||
}
|
||||
|
||||
send_chunk(ctx, info->buffer, id, chunk);
|
||||
if (err != 0) {
|
||||
log_error(ctx, LOG_ERROR_READING_BYTES_COUNT, id, err, 0l);
|
||||
return 0;
|
||||
}
|
||||
|
||||
return countBytes;
|
||||
}
|
||||
|
||||
static __always_inline void ssl_uprobe(struct pt_regs *ctx, void* ssl, void* buffer, int num, struct bpf_map_def* map_fd, size_t *count_ptr) {
|
||||
@@ -163,25 +49,7 @@ static __always_inline void ssl_uprobe(struct pt_regs *ctx, void* ssl, void* buf
|
||||
}
|
||||
|
||||
struct ssl_info *infoPtr = bpf_map_lookup_elem(map_fd, &id);
|
||||
struct ssl_info info = {};
|
||||
|
||||
if (infoPtr == NULL) {
|
||||
info.fd = -1;
|
||||
info.created_at_nano = bpf_ktime_get_ns();
|
||||
} else {
|
||||
long err = bpf_probe_read(&info, sizeof(struct ssl_info), infoPtr);
|
||||
|
||||
if (err != 0) {
|
||||
log_error(ctx, LOG_ERROR_READING_SSL_CONTEXT, id, err, ORIGIN_SSL_UPROBE_CODE);
|
||||
}
|
||||
|
||||
if ((bpf_ktime_get_ns() - info.created_at_nano) > SSL_INFO_MAX_TTL_NANO) {
|
||||
// If the ssl info is too old, we don't want to use its info because it may be incorrect.
|
||||
//
|
||||
info.fd = -1;
|
||||
info.created_at_nano = bpf_ktime_get_ns();
|
||||
}
|
||||
}
|
||||
struct ssl_info info = lookup_ssl_info(ctx, &openssl_write_context, id);
|
||||
|
||||
info.count_ptr = count_ptr;
|
||||
info.buffer = buffer;
|
||||
@@ -227,50 +95,52 @@ static __always_inline void ssl_uretprobe(struct pt_regs *ctx, struct bpf_map_de
|
||||
return;
|
||||
}
|
||||
|
||||
if (info.fd == -1) {
|
||||
if (info.fd == invalid_fd) {
|
||||
log_error(ctx, LOG_ERROR_MISSING_FILE_DESCRIPTOR, id, 0l, 0l);
|
||||
return;
|
||||
}
|
||||
|
||||
output_ssl_chunk(ctx, &info, id, flags);
|
||||
|
||||
int count_bytes = get_count_bytes(ctx, &info, id);
|
||||
|
||||
output_ssl_chunk(ctx, &info, count_bytes, id, flags);
|
||||
}
|
||||
|
||||
SEC("uprobe/ssl_write")
|
||||
void BPF_KPROBE(ssl_write, void* ssl, void* buffer, int num) {
|
||||
ssl_uprobe(ctx, ssl, buffer, num, &ssl_write_context, 0);
|
||||
ssl_uprobe(ctx, ssl, buffer, num, &openssl_write_context, 0);
|
||||
}
|
||||
|
||||
SEC("uretprobe/ssl_write")
|
||||
void BPF_KPROBE(ssl_ret_write) {
|
||||
ssl_uretprobe(ctx, &ssl_write_context, 0);
|
||||
ssl_uretprobe(ctx, &openssl_write_context, 0);
|
||||
}
|
||||
|
||||
SEC("uprobe/ssl_read")
|
||||
void BPF_KPROBE(ssl_read, void* ssl, void* buffer, int num) {
|
||||
ssl_uprobe(ctx, ssl, buffer, num, &ssl_read_context, 0);
|
||||
ssl_uprobe(ctx, ssl, buffer, num, &openssl_read_context, 0);
|
||||
}
|
||||
|
||||
SEC("uretprobe/ssl_read")
|
||||
void BPF_KPROBE(ssl_ret_read) {
|
||||
ssl_uretprobe(ctx, &ssl_read_context, FLAGS_IS_READ_BIT);
|
||||
ssl_uretprobe(ctx, &openssl_read_context, FLAGS_IS_READ_BIT);
|
||||
}
|
||||
|
||||
SEC("uprobe/ssl_write_ex")
|
||||
void BPF_KPROBE(ssl_write_ex, void* ssl, void* buffer, size_t num, size_t *written) {
|
||||
ssl_uprobe(ctx, ssl, buffer, num, &ssl_write_context, written);
|
||||
ssl_uprobe(ctx, ssl, buffer, num, &openssl_write_context, written);
|
||||
}
|
||||
|
||||
SEC("uretprobe/ssl_write_ex")
|
||||
void BPF_KPROBE(ssl_ret_write_ex) {
|
||||
ssl_uretprobe(ctx, &ssl_write_context, 0);
|
||||
ssl_uretprobe(ctx, &openssl_write_context, 0);
|
||||
}
|
||||
|
||||
SEC("uprobe/ssl_read_ex")
|
||||
void BPF_KPROBE(ssl_read_ex, void* ssl, void* buffer, size_t num, size_t *readbytes) {
|
||||
ssl_uprobe(ctx, ssl, buffer, num, &ssl_read_context, readbytes);
|
||||
ssl_uprobe(ctx, ssl, buffer, num, &openssl_read_context, readbytes);
|
||||
}
|
||||
|
||||
SEC("uretprobe/ssl_read_ex")
|
||||
void BPF_KPROBE(ssl_ret_read_ex) {
|
||||
ssl_uretprobe(ctx, &ssl_read_context, FLAGS_IS_READ_BIT);
|
||||
ssl_uretprobe(ctx, &openssl_read_context, FLAGS_IS_READ_BIT);
|
||||
}
|
||||
|
||||
@@ -13,7 +13,9 @@ Copyright (C) UP9 Inc.
|
||||
|
||||
// To avoid multiple .o files
|
||||
//
|
||||
#include "common.c"
|
||||
#include "openssl_uprobes.c"
|
||||
#include "go_uprobes.c"
|
||||
#include "fd_tracepoints.c"
|
||||
#include "fd_to_address_tracepoints.c"
|
||||
|
||||
|
||||
@@ -2,7 +2,7 @@ package tlstapper
|
||||
|
||||
// Must be synced with logger_messages.h
|
||||
//
|
||||
var bpfLogMessages = []string {
|
||||
var bpfLogMessages = []string{
|
||||
/*0000*/ "[%d] Unable to read bytes count from _ex methods [err: %d]",
|
||||
/*0001*/ "[%d] Unable to read ipv4 address [err: %d]",
|
||||
/*0002*/ "[%d] Unable to read ssl buffer [err: %d]",
|
||||
@@ -20,6 +20,4 @@ var bpfLogMessages = []string {
|
||||
/*0014*/ "[%d] Unable to put connect info [err: %d]",
|
||||
/*0015*/ "[%d] Unable to get connect info",
|
||||
/*0016*/ "[%d] Unable to read connect info [err: %d]",
|
||||
|
||||
}
|
||||
|
||||
|
||||
@@ -12,23 +12,7 @@ import (
|
||||
const FLAGS_IS_CLIENT_BIT uint32 = (1 << 0)
|
||||
const FLAGS_IS_READ_BIT uint32 = (1 << 1)
|
||||
|
||||
// The same struct can be found in maps.h
|
||||
//
|
||||
// Be careful when editing, alignment and padding should be exactly the same in go/c.
|
||||
//
|
||||
type tlsChunk struct {
|
||||
Pid uint32 // process id
|
||||
Tgid uint32 // thread id inside the process
|
||||
Len uint32 // the size of the native buffer used to read/write the tls data (may be bigger than tlsChunk.Data[])
|
||||
Start uint32 // the start offset withing the native buffer
|
||||
Recorded uint32 // number of bytes copied from the native buffer to tlsChunk.Data[]
|
||||
Fd uint32 // the file descriptor used to read/write the tls data (probably socket file descriptor)
|
||||
Flags uint32 // bitwise flags
|
||||
Address [16]byte // ipv4 address and port
|
||||
Data [4096]byte // actual tls data
|
||||
}
|
||||
|
||||
func (c *tlsChunk) getAddress() (net.IP, uint16, error) {
|
||||
func (c *tlsTapperTlsChunk) getAddress() (net.IP, uint16, error) {
|
||||
address := bytes.NewReader(c.Address[:])
|
||||
var family uint16
|
||||
var port uint16
|
||||
@@ -51,31 +35,31 @@ func (c *tlsChunk) getAddress() (net.IP, uint16, error) {
|
||||
return ip, port, nil
|
||||
}
|
||||
|
||||
func (c *tlsChunk) isClient() bool {
|
||||
func (c *tlsTapperTlsChunk) isClient() bool {
|
||||
return c.Flags&FLAGS_IS_CLIENT_BIT != 0
|
||||
}
|
||||
|
||||
func (c *tlsChunk) isServer() bool {
|
||||
func (c *tlsTapperTlsChunk) isServer() bool {
|
||||
return !c.isClient()
|
||||
}
|
||||
|
||||
func (c *tlsChunk) isRead() bool {
|
||||
func (c *tlsTapperTlsChunk) isRead() bool {
|
||||
return c.Flags&FLAGS_IS_READ_BIT != 0
|
||||
}
|
||||
|
||||
func (c *tlsChunk) isWrite() bool {
|
||||
func (c *tlsTapperTlsChunk) isWrite() bool {
|
||||
return !c.isRead()
|
||||
}
|
||||
|
||||
func (c *tlsChunk) getRecordedData() []byte {
|
||||
func (c *tlsTapperTlsChunk) getRecordedData() []byte {
|
||||
return c.Data[:c.Recorded]
|
||||
}
|
||||
|
||||
func (c *tlsChunk) isRequest() bool {
|
||||
func (c *tlsTapperTlsChunk) isRequest() bool {
|
||||
return (c.isClient() && c.isWrite()) || (c.isServer() && c.isRead())
|
||||
}
|
||||
|
||||
func (c *tlsChunk) getAddressPair() (addressPair, error) {
|
||||
func (c *tlsTapperTlsChunk) getAddressPair() (addressPair, error) {
|
||||
ip, port, err := c.getAddress()
|
||||
|
||||
if err != nil {
|
||||
|
||||
105
tap/tlstapper/go_hooks.go
Normal file
105
tap/tlstapper/go_hooks.go
Normal file
@@ -0,0 +1,105 @@
|
||||
package tlstapper
|
||||
|
||||
import (
|
||||
"github.com/cilium/ebpf/link"
|
||||
"github.com/go-errors/errors"
|
||||
)
|
||||
|
||||
type goHooks struct {
|
||||
goWriteProbe link.Link
|
||||
goWriteExProbes []link.Link
|
||||
goReadProbe link.Link
|
||||
goReadExProbes []link.Link
|
||||
}
|
||||
|
||||
func (s *goHooks) installUprobes(bpfObjects *tlsTapperObjects, filePath string) error {
|
||||
ex, err := link.OpenExecutable(filePath)
|
||||
|
||||
if err != nil {
|
||||
return errors.Wrap(err, 0)
|
||||
}
|
||||
|
||||
offsets, err := findGoOffsets(filePath)
|
||||
|
||||
if err != nil {
|
||||
return errors.Wrap(err, 0)
|
||||
}
|
||||
|
||||
return s.installHooks(bpfObjects, ex, offsets)
|
||||
}
|
||||
|
||||
func (s *goHooks) installHooks(bpfObjects *tlsTapperObjects, ex *link.Executable, offsets goOffsets) error {
|
||||
var err error
|
||||
|
||||
// Symbol points to
|
||||
// [`crypto/tls.(*Conn).Write`](https://github.com/golang/go/blob/go1.17.6/src/crypto/tls/conn.go#L1099)
|
||||
s.goWriteProbe, err = ex.Uprobe(goWriteSymbol, bpfObjects.GoCryptoTlsWrite, &link.UprobeOptions{
|
||||
Offset: offsets.GoWriteOffset.enter,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return errors.Wrap(err, 0)
|
||||
}
|
||||
|
||||
for _, offset := range offsets.GoWriteOffset.exits {
|
||||
probe, err := ex.Uprobe(goWriteSymbol, bpfObjects.GoCryptoTlsWriteEx, &link.UprobeOptions{
|
||||
Offset: offset,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return errors.Wrap(err, 0)
|
||||
}
|
||||
|
||||
s.goWriteExProbes = append(s.goWriteExProbes, probe)
|
||||
}
|
||||
|
||||
// Symbol points to
|
||||
// [`crypto/tls.(*Conn).Read`](https://github.com/golang/go/blob/go1.17.6/src/crypto/tls/conn.go#L1263)
|
||||
s.goReadProbe, err = ex.Uprobe(goReadSymbol, bpfObjects.GoCryptoTlsRead, &link.UprobeOptions{
|
||||
Offset: offsets.GoReadOffset.enter,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return errors.Wrap(err, 0)
|
||||
}
|
||||
|
||||
for _, offset := range offsets.GoReadOffset.exits {
|
||||
probe, err := ex.Uprobe(goReadSymbol, bpfObjects.GoCryptoTlsReadEx, &link.UprobeOptions{
|
||||
Offset: offset,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return errors.Wrap(err, 0)
|
||||
}
|
||||
|
||||
s.goReadExProbes = append(s.goReadExProbes, probe)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *goHooks) close() []error {
|
||||
errors := make([]error, 0)
|
||||
|
||||
if err := s.goWriteProbe.Close(); err != nil {
|
||||
errors = append(errors, err)
|
||||
}
|
||||
|
||||
for _, probe := range s.goWriteExProbes {
|
||||
if err := probe.Close(); err != nil {
|
||||
errors = append(errors, err)
|
||||
}
|
||||
}
|
||||
|
||||
if err := s.goReadProbe.Close(); err != nil {
|
||||
errors = append(errors, err)
|
||||
}
|
||||
|
||||
for _, probe := range s.goReadExProbes {
|
||||
if err := probe.Close(); err != nil {
|
||||
errors = append(errors, err)
|
||||
}
|
||||
}
|
||||
|
||||
return errors
|
||||
}
|
||||
213
tap/tlstapper/go_offsets.go
Normal file
213
tap/tlstapper/go_offsets.go
Normal file
@@ -0,0 +1,213 @@
|
||||
package tlstapper
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"debug/elf"
|
||||
"fmt"
|
||||
"os"
|
||||
"runtime"
|
||||
|
||||
"github.com/Masterminds/semver"
|
||||
"github.com/cilium/ebpf/link"
|
||||
"github.com/knightsc/gapstone"
|
||||
)
|
||||
|
||||
type goOffsets struct {
|
||||
GoWriteOffset *goExtendedOffset
|
||||
GoReadOffset *goExtendedOffset
|
||||
}
|
||||
|
||||
type goExtendedOffset struct {
|
||||
enter uint64
|
||||
exits []uint64
|
||||
}
|
||||
|
||||
const (
|
||||
minimumSupportedGoVersion = "1.17.0"
|
||||
goVersionSymbol = "runtime.buildVersion.str"
|
||||
goWriteSymbol = "crypto/tls.(*Conn).Write"
|
||||
goReadSymbol = "crypto/tls.(*Conn).Read"
|
||||
)
|
||||
|
||||
func findGoOffsets(filePath string) (goOffsets, error) {
|
||||
offsets, err := getOffsets(filePath)
|
||||
if err != nil {
|
||||
return goOffsets{}, err
|
||||
}
|
||||
|
||||
goVersionOffset, err := getOffset(offsets, goVersionSymbol)
|
||||
if err != nil {
|
||||
return goOffsets{}, err
|
||||
}
|
||||
|
||||
passed, goVersion, err := checkGoVersion(filePath, goVersionOffset)
|
||||
if err != nil {
|
||||
return goOffsets{}, fmt.Errorf("Checking Go version: %s", err)
|
||||
}
|
||||
|
||||
if !passed {
|
||||
return goOffsets{}, fmt.Errorf("Unsupported Go version: %s", goVersion)
|
||||
}
|
||||
|
||||
writeOffset, err := getOffset(offsets, goWriteSymbol)
|
||||
if err != nil {
|
||||
return goOffsets{}, fmt.Errorf("reading offset [%s]: %s", goWriteSymbol, err)
|
||||
}
|
||||
|
||||
readOffset, err := getOffset(offsets, goReadSymbol)
|
||||
if err != nil {
|
||||
return goOffsets{}, fmt.Errorf("reading offset [%s]: %s", goReadSymbol, err)
|
||||
}
|
||||
|
||||
return goOffsets{
|
||||
GoWriteOffset: writeOffset,
|
||||
GoReadOffset: readOffset,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func getOffsets(filePath string) (offsets map[string]*goExtendedOffset, err error) {
|
||||
var engine gapstone.Engine
|
||||
switch runtime.GOARCH {
|
||||
case "amd64":
|
||||
engine, err = gapstone.New(
|
||||
gapstone.CS_ARCH_X86,
|
||||
gapstone.CS_MODE_64,
|
||||
)
|
||||
case "arm64":
|
||||
engine, err = gapstone.New(
|
||||
gapstone.CS_ARCH_ARM64,
|
||||
gapstone.CS_MODE_ARM,
|
||||
)
|
||||
default:
|
||||
err = fmt.Errorf("Unsupported architecture: %v", runtime.GOARCH)
|
||||
}
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
offsets = make(map[string]*goExtendedOffset)
|
||||
var fd *os.File
|
||||
fd, err = os.Open(filePath)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer fd.Close()
|
||||
|
||||
var se *elf.File
|
||||
se, err = elf.NewFile(fd)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
textSection := se.Section(".text")
|
||||
if textSection == nil {
|
||||
err = fmt.Errorf("No text section")
|
||||
return
|
||||
}
|
||||
|
||||
// extract the raw bytes from the .text section
|
||||
var textSectionData []byte
|
||||
textSectionData, err = textSection.Data()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
syms, err := se.Symbols()
|
||||
for _, sym := range syms {
|
||||
offset := sym.Value
|
||||
|
||||
var lastProg *elf.Prog
|
||||
for _, prog := range se.Progs {
|
||||
if prog.Vaddr <= sym.Value && sym.Value < (prog.Vaddr+prog.Memsz) {
|
||||
offset = sym.Value - prog.Vaddr + prog.Off
|
||||
lastProg = prog
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
extendedOffset := &goExtendedOffset{enter: offset}
|
||||
|
||||
// source: https://gist.github.com/grantseltzer/3efa8ecc5de1fb566e8091533050d608
|
||||
// skip over any symbols that aren't functinons/methods
|
||||
if sym.Info != byte(2) && sym.Info != byte(18) {
|
||||
offsets[sym.Name] = extendedOffset
|
||||
continue
|
||||
}
|
||||
|
||||
// skip over empty symbols
|
||||
if sym.Size == 0 {
|
||||
offsets[sym.Name] = extendedOffset
|
||||
continue
|
||||
}
|
||||
|
||||
// calculate starting and ending index of the symbol within the text section
|
||||
symStartingIndex := sym.Value - textSection.Addr
|
||||
symEndingIndex := symStartingIndex + sym.Size
|
||||
|
||||
// collect the bytes of the symbol
|
||||
symBytes := textSectionData[symStartingIndex:symEndingIndex]
|
||||
|
||||
// disasemble the symbol
|
||||
var instructions []gapstone.Instruction
|
||||
instructions, err = engine.Disasm(symBytes, sym.Value, 0)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// iterate over each instruction and if the mnemonic is `ret` then that's an exit offset
|
||||
for _, ins := range instructions {
|
||||
if ins.Mnemonic == "ret" {
|
||||
extendedOffset.exits = append(extendedOffset.exits, uint64(ins.Address)-lastProg.Vaddr+lastProg.Off)
|
||||
}
|
||||
}
|
||||
|
||||
offsets[sym.Name] = extendedOffset
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func getOffset(offsets map[string]*goExtendedOffset, symbol string) (*goExtendedOffset, error) {
|
||||
if offset, ok := offsets[symbol]; ok {
|
||||
return offset, nil
|
||||
}
|
||||
return nil, fmt.Errorf("symbol %s: %w", symbol, link.ErrNoSymbol)
|
||||
}
|
||||
|
||||
func checkGoVersion(filePath string, offset *goExtendedOffset) (bool, string, error) {
|
||||
fd, err := os.Open(filePath)
|
||||
if err != nil {
|
||||
return false, "", err
|
||||
}
|
||||
defer fd.Close()
|
||||
|
||||
reader := bufio.NewReader(fd)
|
||||
|
||||
_, err = reader.Discard(int(offset.enter))
|
||||
if err != nil {
|
||||
return false, "", err
|
||||
}
|
||||
|
||||
line, err := reader.ReadString(0)
|
||||
if err != nil {
|
||||
return false, "", err
|
||||
}
|
||||
|
||||
if len(line) < 3 {
|
||||
return false, "", fmt.Errorf("ELF data segment read error (corrupted result)")
|
||||
}
|
||||
|
||||
goVersionStr := line[2 : len(line)-1]
|
||||
|
||||
goVersion, err := semver.NewVersion(goVersionStr)
|
||||
if err != nil {
|
||||
return false, goVersionStr, err
|
||||
}
|
||||
|
||||
goVersionConstraint, err := semver.NewConstraint(fmt.Sprintf(">= %s", minimumSupportedGoVersion))
|
||||
if err != nil {
|
||||
return false, goVersionStr, err
|
||||
}
|
||||
|
||||
return goVersionConstraint.Check(goVersion), goVersionStr, nil
|
||||
}
|
||||
@@ -46,7 +46,7 @@ func findLibraryByPid(procfs string, pid uint32, libraryName string) (string, er
|
||||
|
||||
filepath := parts[5]
|
||||
|
||||
if !strings.Contains(filepath, libraryName) {
|
||||
if libraryName != "" && !strings.Contains(filepath, libraryName) {
|
||||
continue
|
||||
}
|
||||
|
||||
|
||||
@@ -20,8 +20,10 @@ import (
|
||||
"github.com/up9inc/mizu/tap/api"
|
||||
)
|
||||
|
||||
const fdCachedItemAvgSize = 40
|
||||
const fdCacheMaxItems = 500000 / fdCachedItemAvgSize
|
||||
const (
|
||||
fdCachedItemAvgSize = 40
|
||||
fdCacheMaxItems = 500000 / fdCachedItemAvgSize
|
||||
)
|
||||
|
||||
type tlsPoller struct {
|
||||
tls *TlsTapper
|
||||
@@ -74,7 +76,8 @@ func (p *tlsPoller) close() error {
|
||||
}
|
||||
|
||||
func (p *tlsPoller) poll(emitter api.Emitter, options *api.TrafficFilteringOptions, streamsMap api.TcpStreamMap) {
|
||||
chunks := make(chan *tlsChunk)
|
||||
// tlsTapperTlsChunk is generated by bpf2go.
|
||||
chunks := make(chan *tlsTapperTlsChunk)
|
||||
|
||||
go p.pollChunksPerfBuffer(chunks)
|
||||
|
||||
@@ -94,7 +97,7 @@ func (p *tlsPoller) poll(emitter api.Emitter, options *api.TrafficFilteringOptio
|
||||
}
|
||||
}
|
||||
|
||||
func (p *tlsPoller) pollChunksPerfBuffer(chunks chan<- *tlsChunk) {
|
||||
func (p *tlsPoller) pollChunksPerfBuffer(chunks chan<- *tlsTapperTlsChunk) {
|
||||
logger.Log.Infof("Start polling for tls events")
|
||||
|
||||
for {
|
||||
@@ -118,7 +121,7 @@ func (p *tlsPoller) pollChunksPerfBuffer(chunks chan<- *tlsChunk) {
|
||||
|
||||
buffer := bytes.NewReader(record.RawSample)
|
||||
|
||||
var chunk tlsChunk
|
||||
var chunk tlsTapperTlsChunk
|
||||
|
||||
if err := binary.Read(buffer, binary.LittleEndian, &chunk); err != nil {
|
||||
LogError(errors.Errorf("Error parsing chunk %v", err))
|
||||
@@ -129,7 +132,7 @@ func (p *tlsPoller) pollChunksPerfBuffer(chunks chan<- *tlsChunk) {
|
||||
}
|
||||
}
|
||||
|
||||
func (p *tlsPoller) handleTlsChunk(chunk *tlsChunk, extension *api.Extension, emitter api.Emitter,
|
||||
func (p *tlsPoller) handleTlsChunk(chunk *tlsTapperTlsChunk, extension *api.Extension, emitter api.Emitter,
|
||||
options *api.TrafficFilteringOptions, streamsMap api.TcpStreamMap) error {
|
||||
address, err := p.getSockfdAddressPair(chunk)
|
||||
|
||||
@@ -158,11 +161,11 @@ func (p *tlsPoller) handleTlsChunk(chunk *tlsChunk, extension *api.Extension, em
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *tlsPoller) startNewTlsReader(chunk *tlsChunk, address *addressPair, key string,
|
||||
func (p *tlsPoller) startNewTlsReader(chunk *tlsTapperTlsChunk, address *addressPair, key string,
|
||||
emitter api.Emitter, extension *api.Extension, options *api.TrafficFilteringOptions,
|
||||
streamsMap api.TcpStreamMap) *tlsReader {
|
||||
|
||||
tcpid := p.buildTcpId(chunk, address)
|
||||
tcpid := p.buildTcpId(address)
|
||||
|
||||
doneHandler := func(r *tlsReader) {
|
||||
p.closeReader(key, r)
|
||||
@@ -175,7 +178,7 @@ func (p *tlsPoller) startNewTlsReader(chunk *tlsChunk, address *addressPair, key
|
||||
|
||||
reader := &tlsReader{
|
||||
key: key,
|
||||
chunks: make(chan *tlsChunk, 1),
|
||||
chunks: make(chan *tlsTapperTlsChunk, 1),
|
||||
doneHandler: doneHandler,
|
||||
progress: &api.ReadProgress{},
|
||||
tcpID: &tcpid,
|
||||
@@ -198,7 +201,7 @@ func (p *tlsPoller) startNewTlsReader(chunk *tlsChunk, address *addressPair, key
|
||||
return reader
|
||||
}
|
||||
|
||||
func dissect(extension *api.Extension, reader *tlsReader, options *api.TrafficFilteringOptions) {
|
||||
func dissect(extension *api.Extension, reader api.TcpReader, options *api.TrafficFilteringOptions) {
|
||||
b := bufio.NewReader(reader)
|
||||
|
||||
err := extension.Dissector.Dissect(b, reader, options)
|
||||
@@ -213,7 +216,7 @@ func (p *tlsPoller) closeReader(key string, r *tlsReader) {
|
||||
p.closedReaders <- key
|
||||
}
|
||||
|
||||
func (p *tlsPoller) getSockfdAddressPair(chunk *tlsChunk) (addressPair, error) {
|
||||
func (p *tlsPoller) getSockfdAddressPair(chunk *tlsTapperTlsChunk) (addressPair, error) {
|
||||
address, err := getAddressBySockfd(p.procfs, chunk.Pid, chunk.Fd)
|
||||
fdCacheKey := fmt.Sprintf("%d:%d", chunk.Pid, chunk.Fd)
|
||||
|
||||
@@ -252,7 +255,7 @@ func buildTlsKey(address addressPair) string {
|
||||
return fmt.Sprintf("%s:%d>%s:%d", address.srcIp, address.srcPort, address.dstIp, address.dstPort)
|
||||
}
|
||||
|
||||
func (p *tlsPoller) buildTcpId(chunk *tlsChunk, address *addressPair) api.TcpID {
|
||||
func (p *tlsPoller) buildTcpId(address *addressPair) api.TcpID {
|
||||
return api.TcpID{
|
||||
SrcIP: address.srcIp.String(),
|
||||
DstIP: address.dstIp.String(),
|
||||
@@ -289,7 +292,7 @@ func (p *tlsPoller) clearPids() {
|
||||
})
|
||||
}
|
||||
|
||||
func (p *tlsPoller) logTls(chunk *tlsChunk, key string, reader *tlsReader) {
|
||||
func (p *tlsPoller) logTls(chunk *tlsTapperTlsChunk, key string, reader *tlsReader) {
|
||||
var flagsStr string
|
||||
|
||||
if chunk.isClient() {
|
||||
|
||||
@@ -28,7 +28,11 @@ func UpdateTapTargets(tls *TlsTapper, pods *[]v1.Pod, procfs string) error {
|
||||
tls.ClearPids()
|
||||
|
||||
for pid, pod := range containerPids {
|
||||
if err := tls.AddPid(procfs, pid, pod.Namespace); err != nil {
|
||||
if err := tls.AddSsllibPid(procfs, pid, pod.Namespace); err != nil {
|
||||
LogError(err)
|
||||
}
|
||||
|
||||
if err := tls.AddGoPid(procfs, pid, pod.Namespace); err != nil {
|
||||
LogError(err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -9,7 +9,7 @@ import (
|
||||
|
||||
type tlsReader struct {
|
||||
key string
|
||||
chunks chan *tlsChunk
|
||||
chunks chan *tlsTapperTlsChunk
|
||||
seenChunks int
|
||||
data []byte
|
||||
doneHandler func(r *tlsReader)
|
||||
@@ -24,14 +24,14 @@ type tlsReader struct {
|
||||
reqResMatcher api.RequestResponseMatcher
|
||||
}
|
||||
|
||||
func (r *tlsReader) newChunk(chunk *tlsChunk) {
|
||||
func (r *tlsReader) newChunk(chunk *tlsTapperTlsChunk) {
|
||||
r.captureTime = time.Now()
|
||||
r.seenChunks = r.seenChunks + 1
|
||||
r.chunks <- chunk
|
||||
}
|
||||
|
||||
func (r *tlsReader) Read(p []byte) (int, error) {
|
||||
var chunk *tlsChunk
|
||||
var chunk *tlsTapperTlsChunk
|
||||
|
||||
for len(r.data) == 0 {
|
||||
var ok bool
|
||||
@@ -42,7 +42,7 @@ func (r *tlsReader) Read(p []byte) (int, error) {
|
||||
}
|
||||
|
||||
r.data = chunk.getRecordedData()
|
||||
case <-time.After(time.Second * 3):
|
||||
case <-time.After(time.Second * 120):
|
||||
r.doneHandler(r)
|
||||
return 0, io.EOF
|
||||
}
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package tlstapper
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
"sync"
|
||||
|
||||
"github.com/cilium/ebpf/rlimit"
|
||||
@@ -11,12 +12,13 @@ import (
|
||||
|
||||
const GLOABL_TAP_PID = 0
|
||||
|
||||
//go:generate go run github.com/cilium/ebpf/cmd/bpf2go tlsTapper bpf/tls_tapper.c -- -O2 -g -D__TARGET_ARCH_x86
|
||||
//go:generate go run github.com/cilium/ebpf/cmd/bpf2go@0d0727ef53e2f53b1731c73f4c61e0f58693083a -type tls_chunk tlsTapper bpf/tls_tapper.c -- -O2 -g -D__TARGET_ARCH_x86
|
||||
|
||||
type TlsTapper struct {
|
||||
bpfObjects tlsTapperObjects
|
||||
syscallHooks syscallHooks
|
||||
sslHooksStructs []sslHooks
|
||||
goHooksStructs []goHooks
|
||||
poller *tlsPoller
|
||||
bpfLogger *bpfLogger
|
||||
registeredPids sync.Map
|
||||
@@ -64,11 +66,20 @@ func (t *TlsTapper) PollForLogging() {
|
||||
t.bpfLogger.poll()
|
||||
}
|
||||
|
||||
func (t *TlsTapper) GlobalTap(sslLibrary string) error {
|
||||
return t.tapPid(GLOABL_TAP_PID, sslLibrary, api.UNKNOWN_NAMESPACE)
|
||||
func (t *TlsTapper) GlobalSsllibTap(sslLibrary string) error {
|
||||
return t.tapSsllibPid(GLOABL_TAP_PID, sslLibrary, api.UNKNOWN_NAMESPACE)
|
||||
}
|
||||
|
||||
func (t *TlsTapper) AddPid(procfs string, pid uint32, namespace string) error {
|
||||
func (t *TlsTapper) GlobalGoTap(procfs string, pid string) error {
|
||||
_pid, err := strconv.Atoi(pid)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return t.tapGoPid(procfs, uint32(_pid), api.UNKNOWN_NAMESPACE)
|
||||
}
|
||||
|
||||
func (t *TlsTapper) AddSsllibPid(procfs string, pid uint32, namespace string) error {
|
||||
sslLibrary, err := findSsllib(procfs, pid)
|
||||
|
||||
if err != nil {
|
||||
@@ -76,7 +87,11 @@ func (t *TlsTapper) AddPid(procfs string, pid uint32, namespace string) error {
|
||||
return nil // hide the error on purpose, its OK for a process to not use libssl.so
|
||||
}
|
||||
|
||||
return t.tapPid(pid, sslLibrary, namespace)
|
||||
return t.tapSsllibPid(pid, sslLibrary, namespace)
|
||||
}
|
||||
|
||||
func (t *TlsTapper) AddGoPid(procfs string, pid uint32, namespace string) error {
|
||||
return t.tapGoPid(procfs, pid, namespace)
|
||||
}
|
||||
|
||||
func (t *TlsTapper) RemovePid(pid uint32) error {
|
||||
@@ -120,6 +135,10 @@ func (t *TlsTapper) Close() []error {
|
||||
errors = append(errors, sslHooks.close()...)
|
||||
}
|
||||
|
||||
for _, goHooks := range t.goHooksStructs {
|
||||
errors = append(errors, goHooks.close()...)
|
||||
}
|
||||
|
||||
if err := t.bpfLogger.close(); err != nil {
|
||||
errors = append(errors, err)
|
||||
}
|
||||
@@ -141,7 +160,7 @@ func setupRLimit() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *TlsTapper) tapPid(pid uint32, sslLibrary string, namespace string) error {
|
||||
func (t *TlsTapper) tapSsllibPid(pid uint32, sslLibrary string, namespace string) error {
|
||||
logger.Log.Infof("Tapping TLS (pid: %v) (sslLibrary: %v)", pid, sslLibrary)
|
||||
|
||||
newSsl := sslHooks{}
|
||||
@@ -165,6 +184,35 @@ func (t *TlsTapper) tapPid(pid uint32, sslLibrary string, namespace string) erro
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *TlsTapper) tapGoPid(procfs string, pid uint32, namespace string) error {
|
||||
exePath, err := findLibraryByPid(procfs, pid, "")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
hooks := goHooks{}
|
||||
|
||||
if err := hooks.installUprobes(&t.bpfObjects, exePath); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
logger.Log.Infof("Tapping TLS (pid: %v) (Go: %v)", pid, exePath)
|
||||
|
||||
t.goHooksStructs = append(t.goHooksStructs, hooks)
|
||||
|
||||
t.poller.addPid(pid, namespace)
|
||||
|
||||
pids := t.bpfObjects.tlsTapperMaps.PidsMap
|
||||
|
||||
if err := pids.Put(pid, uint32(1)); err != nil {
|
||||
return errors.Wrap(err, 0)
|
||||
}
|
||||
|
||||
t.registeredPids.Store(pid, true)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func LogError(err error) {
|
||||
var e *errors.Error
|
||||
if errors.As(err, &e) {
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
// Code generated by bpf2go; DO NOT EDIT.
|
||||
//go:build arm64be || armbe || mips || mips64 || mips64p32 || ppc64 || s390 || s390x || sparc || sparc64
|
||||
// +build arm64be armbe mips mips64 mips64p32 ppc64 s390 s390x sparc sparc64
|
||||
|
||||
package tlstapper
|
||||
@@ -12,6 +13,18 @@ import (
|
||||
"github.com/cilium/ebpf"
|
||||
)
|
||||
|
||||
type tlsTapperTlsChunk struct {
|
||||
Pid uint32
|
||||
Tgid uint32
|
||||
Len uint32
|
||||
Start uint32
|
||||
Recorded uint32
|
||||
Fd uint32
|
||||
Flags uint32
|
||||
Address [16]uint8
|
||||
Data [4096]uint8
|
||||
}
|
||||
|
||||
// loadTlsTapper returns the embedded CollectionSpec for tlsTapper.
|
||||
func loadTlsTapper() (*ebpf.CollectionSpec, error) {
|
||||
reader := bytes.NewReader(_TlsTapperBytes)
|
||||
@@ -53,20 +66,24 @@ type tlsTapperSpecs struct {
|
||||
//
|
||||
// It can be passed ebpf.CollectionSpec.Assign.
|
||||
type tlsTapperProgramSpecs struct {
|
||||
SslRead *ebpf.ProgramSpec `ebpf:"ssl_read"`
|
||||
SslReadEx *ebpf.ProgramSpec `ebpf:"ssl_read_ex"`
|
||||
SslRetRead *ebpf.ProgramSpec `ebpf:"ssl_ret_read"`
|
||||
SslRetReadEx *ebpf.ProgramSpec `ebpf:"ssl_ret_read_ex"`
|
||||
SslRetWrite *ebpf.ProgramSpec `ebpf:"ssl_ret_write"`
|
||||
SslRetWriteEx *ebpf.ProgramSpec `ebpf:"ssl_ret_write_ex"`
|
||||
SslWrite *ebpf.ProgramSpec `ebpf:"ssl_write"`
|
||||
SslWriteEx *ebpf.ProgramSpec `ebpf:"ssl_write_ex"`
|
||||
SysEnterAccept4 *ebpf.ProgramSpec `ebpf:"sys_enter_accept4"`
|
||||
SysEnterConnect *ebpf.ProgramSpec `ebpf:"sys_enter_connect"`
|
||||
SysEnterRead *ebpf.ProgramSpec `ebpf:"sys_enter_read"`
|
||||
SysEnterWrite *ebpf.ProgramSpec `ebpf:"sys_enter_write"`
|
||||
SysExitAccept4 *ebpf.ProgramSpec `ebpf:"sys_exit_accept4"`
|
||||
SysExitConnect *ebpf.ProgramSpec `ebpf:"sys_exit_connect"`
|
||||
GoCryptoTlsRead *ebpf.ProgramSpec `ebpf:"go_crypto_tls_read"`
|
||||
GoCryptoTlsReadEx *ebpf.ProgramSpec `ebpf:"go_crypto_tls_read_ex"`
|
||||
GoCryptoTlsWrite *ebpf.ProgramSpec `ebpf:"go_crypto_tls_write"`
|
||||
GoCryptoTlsWriteEx *ebpf.ProgramSpec `ebpf:"go_crypto_tls_write_ex"`
|
||||
SslRead *ebpf.ProgramSpec `ebpf:"ssl_read"`
|
||||
SslReadEx *ebpf.ProgramSpec `ebpf:"ssl_read_ex"`
|
||||
SslRetRead *ebpf.ProgramSpec `ebpf:"ssl_ret_read"`
|
||||
SslRetReadEx *ebpf.ProgramSpec `ebpf:"ssl_ret_read_ex"`
|
||||
SslRetWrite *ebpf.ProgramSpec `ebpf:"ssl_ret_write"`
|
||||
SslRetWriteEx *ebpf.ProgramSpec `ebpf:"ssl_ret_write_ex"`
|
||||
SslWrite *ebpf.ProgramSpec `ebpf:"ssl_write"`
|
||||
SslWriteEx *ebpf.ProgramSpec `ebpf:"ssl_write_ex"`
|
||||
SysEnterAccept4 *ebpf.ProgramSpec `ebpf:"sys_enter_accept4"`
|
||||
SysEnterConnect *ebpf.ProgramSpec `ebpf:"sys_enter_connect"`
|
||||
SysEnterRead *ebpf.ProgramSpec `ebpf:"sys_enter_read"`
|
||||
SysEnterWrite *ebpf.ProgramSpec `ebpf:"sys_enter_write"`
|
||||
SysExitAccept4 *ebpf.ProgramSpec `ebpf:"sys_exit_accept4"`
|
||||
SysExitConnect *ebpf.ProgramSpec `ebpf:"sys_exit_connect"`
|
||||
}
|
||||
|
||||
// tlsTapperMapSpecs contains maps before they are loaded into the kernel.
|
||||
@@ -77,11 +94,13 @@ type tlsTapperMapSpecs struct {
|
||||
ChunksBuffer *ebpf.MapSpec `ebpf:"chunks_buffer"`
|
||||
ConnectSyscallInfo *ebpf.MapSpec `ebpf:"connect_syscall_info"`
|
||||
FileDescriptorToIpv4 *ebpf.MapSpec `ebpf:"file_descriptor_to_ipv4"`
|
||||
GoReadContext *ebpf.MapSpec `ebpf:"go_read_context"`
|
||||
GoWriteContext *ebpf.MapSpec `ebpf:"go_write_context"`
|
||||
Heap *ebpf.MapSpec `ebpf:"heap"`
|
||||
LogBuffer *ebpf.MapSpec `ebpf:"log_buffer"`
|
||||
OpensslReadContext *ebpf.MapSpec `ebpf:"openssl_read_context"`
|
||||
OpensslWriteContext *ebpf.MapSpec `ebpf:"openssl_write_context"`
|
||||
PidsMap *ebpf.MapSpec `ebpf:"pids_map"`
|
||||
SslReadContext *ebpf.MapSpec `ebpf:"ssl_read_context"`
|
||||
SslWriteContext *ebpf.MapSpec `ebpf:"ssl_write_context"`
|
||||
}
|
||||
|
||||
// tlsTapperObjects contains all objects after they have been loaded into the kernel.
|
||||
@@ -107,11 +126,13 @@ type tlsTapperMaps struct {
|
||||
ChunksBuffer *ebpf.Map `ebpf:"chunks_buffer"`
|
||||
ConnectSyscallInfo *ebpf.Map `ebpf:"connect_syscall_info"`
|
||||
FileDescriptorToIpv4 *ebpf.Map `ebpf:"file_descriptor_to_ipv4"`
|
||||
GoReadContext *ebpf.Map `ebpf:"go_read_context"`
|
||||
GoWriteContext *ebpf.Map `ebpf:"go_write_context"`
|
||||
Heap *ebpf.Map `ebpf:"heap"`
|
||||
LogBuffer *ebpf.Map `ebpf:"log_buffer"`
|
||||
OpensslReadContext *ebpf.Map `ebpf:"openssl_read_context"`
|
||||
OpensslWriteContext *ebpf.Map `ebpf:"openssl_write_context"`
|
||||
PidsMap *ebpf.Map `ebpf:"pids_map"`
|
||||
SslReadContext *ebpf.Map `ebpf:"ssl_read_context"`
|
||||
SslWriteContext *ebpf.Map `ebpf:"ssl_write_context"`
|
||||
}
|
||||
|
||||
func (m *tlsTapperMaps) Close() error {
|
||||
@@ -120,11 +141,13 @@ func (m *tlsTapperMaps) Close() error {
|
||||
m.ChunksBuffer,
|
||||
m.ConnectSyscallInfo,
|
||||
m.FileDescriptorToIpv4,
|
||||
m.GoReadContext,
|
||||
m.GoWriteContext,
|
||||
m.Heap,
|
||||
m.LogBuffer,
|
||||
m.OpensslReadContext,
|
||||
m.OpensslWriteContext,
|
||||
m.PidsMap,
|
||||
m.SslReadContext,
|
||||
m.SslWriteContext,
|
||||
)
|
||||
}
|
||||
|
||||
@@ -132,24 +155,32 @@ func (m *tlsTapperMaps) Close() error {
|
||||
//
|
||||
// It can be passed to loadTlsTapperObjects or ebpf.CollectionSpec.LoadAndAssign.
|
||||
type tlsTapperPrograms struct {
|
||||
SslRead *ebpf.Program `ebpf:"ssl_read"`
|
||||
SslReadEx *ebpf.Program `ebpf:"ssl_read_ex"`
|
||||
SslRetRead *ebpf.Program `ebpf:"ssl_ret_read"`
|
||||
SslRetReadEx *ebpf.Program `ebpf:"ssl_ret_read_ex"`
|
||||
SslRetWrite *ebpf.Program `ebpf:"ssl_ret_write"`
|
||||
SslRetWriteEx *ebpf.Program `ebpf:"ssl_ret_write_ex"`
|
||||
SslWrite *ebpf.Program `ebpf:"ssl_write"`
|
||||
SslWriteEx *ebpf.Program `ebpf:"ssl_write_ex"`
|
||||
SysEnterAccept4 *ebpf.Program `ebpf:"sys_enter_accept4"`
|
||||
SysEnterConnect *ebpf.Program `ebpf:"sys_enter_connect"`
|
||||
SysEnterRead *ebpf.Program `ebpf:"sys_enter_read"`
|
||||
SysEnterWrite *ebpf.Program `ebpf:"sys_enter_write"`
|
||||
SysExitAccept4 *ebpf.Program `ebpf:"sys_exit_accept4"`
|
||||
SysExitConnect *ebpf.Program `ebpf:"sys_exit_connect"`
|
||||
GoCryptoTlsRead *ebpf.Program `ebpf:"go_crypto_tls_read"`
|
||||
GoCryptoTlsReadEx *ebpf.Program `ebpf:"go_crypto_tls_read_ex"`
|
||||
GoCryptoTlsWrite *ebpf.Program `ebpf:"go_crypto_tls_write"`
|
||||
GoCryptoTlsWriteEx *ebpf.Program `ebpf:"go_crypto_tls_write_ex"`
|
||||
SslRead *ebpf.Program `ebpf:"ssl_read"`
|
||||
SslReadEx *ebpf.Program `ebpf:"ssl_read_ex"`
|
||||
SslRetRead *ebpf.Program `ebpf:"ssl_ret_read"`
|
||||
SslRetReadEx *ebpf.Program `ebpf:"ssl_ret_read_ex"`
|
||||
SslRetWrite *ebpf.Program `ebpf:"ssl_ret_write"`
|
||||
SslRetWriteEx *ebpf.Program `ebpf:"ssl_ret_write_ex"`
|
||||
SslWrite *ebpf.Program `ebpf:"ssl_write"`
|
||||
SslWriteEx *ebpf.Program `ebpf:"ssl_write_ex"`
|
||||
SysEnterAccept4 *ebpf.Program `ebpf:"sys_enter_accept4"`
|
||||
SysEnterConnect *ebpf.Program `ebpf:"sys_enter_connect"`
|
||||
SysEnterRead *ebpf.Program `ebpf:"sys_enter_read"`
|
||||
SysEnterWrite *ebpf.Program `ebpf:"sys_enter_write"`
|
||||
SysExitAccept4 *ebpf.Program `ebpf:"sys_exit_accept4"`
|
||||
SysExitConnect *ebpf.Program `ebpf:"sys_exit_connect"`
|
||||
}
|
||||
|
||||
func (p *tlsTapperPrograms) Close() error {
|
||||
return _TlsTapperClose(
|
||||
p.GoCryptoTlsRead,
|
||||
p.GoCryptoTlsReadEx,
|
||||
p.GoCryptoTlsWrite,
|
||||
p.GoCryptoTlsWriteEx,
|
||||
p.SslRead,
|
||||
p.SslReadEx,
|
||||
p.SslRetRead,
|
||||
|
||||
Binary file not shown.
@@ -1,4 +1,5 @@
|
||||
// Code generated by bpf2go; DO NOT EDIT.
|
||||
//go:build 386 || amd64 || amd64p32 || arm || arm64 || mips64le || mips64p32le || mipsle || ppc64le || riscv64
|
||||
// +build 386 amd64 amd64p32 arm arm64 mips64le mips64p32le mipsle ppc64le riscv64
|
||||
|
||||
package tlstapper
|
||||
@@ -12,6 +13,18 @@ import (
|
||||
"github.com/cilium/ebpf"
|
||||
)
|
||||
|
||||
type tlsTapperTlsChunk struct {
|
||||
Pid uint32
|
||||
Tgid uint32
|
||||
Len uint32
|
||||
Start uint32
|
||||
Recorded uint32
|
||||
Fd uint32
|
||||
Flags uint32
|
||||
Address [16]uint8
|
||||
Data [4096]uint8
|
||||
}
|
||||
|
||||
// loadTlsTapper returns the embedded CollectionSpec for tlsTapper.
|
||||
func loadTlsTapper() (*ebpf.CollectionSpec, error) {
|
||||
reader := bytes.NewReader(_TlsTapperBytes)
|
||||
@@ -53,20 +66,24 @@ type tlsTapperSpecs struct {
|
||||
//
|
||||
// It can be passed ebpf.CollectionSpec.Assign.
|
||||
type tlsTapperProgramSpecs struct {
|
||||
SslRead *ebpf.ProgramSpec `ebpf:"ssl_read"`
|
||||
SslReadEx *ebpf.ProgramSpec `ebpf:"ssl_read_ex"`
|
||||
SslRetRead *ebpf.ProgramSpec `ebpf:"ssl_ret_read"`
|
||||
SslRetReadEx *ebpf.ProgramSpec `ebpf:"ssl_ret_read_ex"`
|
||||
SslRetWrite *ebpf.ProgramSpec `ebpf:"ssl_ret_write"`
|
||||
SslRetWriteEx *ebpf.ProgramSpec `ebpf:"ssl_ret_write_ex"`
|
||||
SslWrite *ebpf.ProgramSpec `ebpf:"ssl_write"`
|
||||
SslWriteEx *ebpf.ProgramSpec `ebpf:"ssl_write_ex"`
|
||||
SysEnterAccept4 *ebpf.ProgramSpec `ebpf:"sys_enter_accept4"`
|
||||
SysEnterConnect *ebpf.ProgramSpec `ebpf:"sys_enter_connect"`
|
||||
SysEnterRead *ebpf.ProgramSpec `ebpf:"sys_enter_read"`
|
||||
SysEnterWrite *ebpf.ProgramSpec `ebpf:"sys_enter_write"`
|
||||
SysExitAccept4 *ebpf.ProgramSpec `ebpf:"sys_exit_accept4"`
|
||||
SysExitConnect *ebpf.ProgramSpec `ebpf:"sys_exit_connect"`
|
||||
GoCryptoTlsRead *ebpf.ProgramSpec `ebpf:"go_crypto_tls_read"`
|
||||
GoCryptoTlsReadEx *ebpf.ProgramSpec `ebpf:"go_crypto_tls_read_ex"`
|
||||
GoCryptoTlsWrite *ebpf.ProgramSpec `ebpf:"go_crypto_tls_write"`
|
||||
GoCryptoTlsWriteEx *ebpf.ProgramSpec `ebpf:"go_crypto_tls_write_ex"`
|
||||
SslRead *ebpf.ProgramSpec `ebpf:"ssl_read"`
|
||||
SslReadEx *ebpf.ProgramSpec `ebpf:"ssl_read_ex"`
|
||||
SslRetRead *ebpf.ProgramSpec `ebpf:"ssl_ret_read"`
|
||||
SslRetReadEx *ebpf.ProgramSpec `ebpf:"ssl_ret_read_ex"`
|
||||
SslRetWrite *ebpf.ProgramSpec `ebpf:"ssl_ret_write"`
|
||||
SslRetWriteEx *ebpf.ProgramSpec `ebpf:"ssl_ret_write_ex"`
|
||||
SslWrite *ebpf.ProgramSpec `ebpf:"ssl_write"`
|
||||
SslWriteEx *ebpf.ProgramSpec `ebpf:"ssl_write_ex"`
|
||||
SysEnterAccept4 *ebpf.ProgramSpec `ebpf:"sys_enter_accept4"`
|
||||
SysEnterConnect *ebpf.ProgramSpec `ebpf:"sys_enter_connect"`
|
||||
SysEnterRead *ebpf.ProgramSpec `ebpf:"sys_enter_read"`
|
||||
SysEnterWrite *ebpf.ProgramSpec `ebpf:"sys_enter_write"`
|
||||
SysExitAccept4 *ebpf.ProgramSpec `ebpf:"sys_exit_accept4"`
|
||||
SysExitConnect *ebpf.ProgramSpec `ebpf:"sys_exit_connect"`
|
||||
}
|
||||
|
||||
// tlsTapperMapSpecs contains maps before they are loaded into the kernel.
|
||||
@@ -77,11 +94,13 @@ type tlsTapperMapSpecs struct {
|
||||
ChunksBuffer *ebpf.MapSpec `ebpf:"chunks_buffer"`
|
||||
ConnectSyscallInfo *ebpf.MapSpec `ebpf:"connect_syscall_info"`
|
||||
FileDescriptorToIpv4 *ebpf.MapSpec `ebpf:"file_descriptor_to_ipv4"`
|
||||
GoReadContext *ebpf.MapSpec `ebpf:"go_read_context"`
|
||||
GoWriteContext *ebpf.MapSpec `ebpf:"go_write_context"`
|
||||
Heap *ebpf.MapSpec `ebpf:"heap"`
|
||||
LogBuffer *ebpf.MapSpec `ebpf:"log_buffer"`
|
||||
OpensslReadContext *ebpf.MapSpec `ebpf:"openssl_read_context"`
|
||||
OpensslWriteContext *ebpf.MapSpec `ebpf:"openssl_write_context"`
|
||||
PidsMap *ebpf.MapSpec `ebpf:"pids_map"`
|
||||
SslReadContext *ebpf.MapSpec `ebpf:"ssl_read_context"`
|
||||
SslWriteContext *ebpf.MapSpec `ebpf:"ssl_write_context"`
|
||||
}
|
||||
|
||||
// tlsTapperObjects contains all objects after they have been loaded into the kernel.
|
||||
@@ -107,11 +126,13 @@ type tlsTapperMaps struct {
|
||||
ChunksBuffer *ebpf.Map `ebpf:"chunks_buffer"`
|
||||
ConnectSyscallInfo *ebpf.Map `ebpf:"connect_syscall_info"`
|
||||
FileDescriptorToIpv4 *ebpf.Map `ebpf:"file_descriptor_to_ipv4"`
|
||||
GoReadContext *ebpf.Map `ebpf:"go_read_context"`
|
||||
GoWriteContext *ebpf.Map `ebpf:"go_write_context"`
|
||||
Heap *ebpf.Map `ebpf:"heap"`
|
||||
LogBuffer *ebpf.Map `ebpf:"log_buffer"`
|
||||
OpensslReadContext *ebpf.Map `ebpf:"openssl_read_context"`
|
||||
OpensslWriteContext *ebpf.Map `ebpf:"openssl_write_context"`
|
||||
PidsMap *ebpf.Map `ebpf:"pids_map"`
|
||||
SslReadContext *ebpf.Map `ebpf:"ssl_read_context"`
|
||||
SslWriteContext *ebpf.Map `ebpf:"ssl_write_context"`
|
||||
}
|
||||
|
||||
func (m *tlsTapperMaps) Close() error {
|
||||
@@ -120,11 +141,13 @@ func (m *tlsTapperMaps) Close() error {
|
||||
m.ChunksBuffer,
|
||||
m.ConnectSyscallInfo,
|
||||
m.FileDescriptorToIpv4,
|
||||
m.GoReadContext,
|
||||
m.GoWriteContext,
|
||||
m.Heap,
|
||||
m.LogBuffer,
|
||||
m.OpensslReadContext,
|
||||
m.OpensslWriteContext,
|
||||
m.PidsMap,
|
||||
m.SslReadContext,
|
||||
m.SslWriteContext,
|
||||
)
|
||||
}
|
||||
|
||||
@@ -132,24 +155,32 @@ func (m *tlsTapperMaps) Close() error {
|
||||
//
|
||||
// It can be passed to loadTlsTapperObjects or ebpf.CollectionSpec.LoadAndAssign.
|
||||
type tlsTapperPrograms struct {
|
||||
SslRead *ebpf.Program `ebpf:"ssl_read"`
|
||||
SslReadEx *ebpf.Program `ebpf:"ssl_read_ex"`
|
||||
SslRetRead *ebpf.Program `ebpf:"ssl_ret_read"`
|
||||
SslRetReadEx *ebpf.Program `ebpf:"ssl_ret_read_ex"`
|
||||
SslRetWrite *ebpf.Program `ebpf:"ssl_ret_write"`
|
||||
SslRetWriteEx *ebpf.Program `ebpf:"ssl_ret_write_ex"`
|
||||
SslWrite *ebpf.Program `ebpf:"ssl_write"`
|
||||
SslWriteEx *ebpf.Program `ebpf:"ssl_write_ex"`
|
||||
SysEnterAccept4 *ebpf.Program `ebpf:"sys_enter_accept4"`
|
||||
SysEnterConnect *ebpf.Program `ebpf:"sys_enter_connect"`
|
||||
SysEnterRead *ebpf.Program `ebpf:"sys_enter_read"`
|
||||
SysEnterWrite *ebpf.Program `ebpf:"sys_enter_write"`
|
||||
SysExitAccept4 *ebpf.Program `ebpf:"sys_exit_accept4"`
|
||||
SysExitConnect *ebpf.Program `ebpf:"sys_exit_connect"`
|
||||
GoCryptoTlsRead *ebpf.Program `ebpf:"go_crypto_tls_read"`
|
||||
GoCryptoTlsReadEx *ebpf.Program `ebpf:"go_crypto_tls_read_ex"`
|
||||
GoCryptoTlsWrite *ebpf.Program `ebpf:"go_crypto_tls_write"`
|
||||
GoCryptoTlsWriteEx *ebpf.Program `ebpf:"go_crypto_tls_write_ex"`
|
||||
SslRead *ebpf.Program `ebpf:"ssl_read"`
|
||||
SslReadEx *ebpf.Program `ebpf:"ssl_read_ex"`
|
||||
SslRetRead *ebpf.Program `ebpf:"ssl_ret_read"`
|
||||
SslRetReadEx *ebpf.Program `ebpf:"ssl_ret_read_ex"`
|
||||
SslRetWrite *ebpf.Program `ebpf:"ssl_ret_write"`
|
||||
SslRetWriteEx *ebpf.Program `ebpf:"ssl_ret_write_ex"`
|
||||
SslWrite *ebpf.Program `ebpf:"ssl_write"`
|
||||
SslWriteEx *ebpf.Program `ebpf:"ssl_write_ex"`
|
||||
SysEnterAccept4 *ebpf.Program `ebpf:"sys_enter_accept4"`
|
||||
SysEnterConnect *ebpf.Program `ebpf:"sys_enter_connect"`
|
||||
SysEnterRead *ebpf.Program `ebpf:"sys_enter_read"`
|
||||
SysEnterWrite *ebpf.Program `ebpf:"sys_enter_write"`
|
||||
SysExitAccept4 *ebpf.Program `ebpf:"sys_exit_accept4"`
|
||||
SysExitConnect *ebpf.Program `ebpf:"sys_exit_connect"`
|
||||
}
|
||||
|
||||
func (p *tlsTapperPrograms) Close() error {
|
||||
return _TlsTapperClose(
|
||||
p.GoCryptoTlsRead,
|
||||
p.GoCryptoTlsReadEx,
|
||||
p.GoCryptoTlsWrite,
|
||||
p.GoCryptoTlsWriteEx,
|
||||
p.SslRead,
|
||||
p.SslReadEx,
|
||||
p.SslRetRead,
|
||||
|
||||
Binary file not shown.
14322
ui-common/package-lock.json
generated
14322
ui-common/package-lock.json
generated
File diff suppressed because it is too large
Load Diff
@@ -24,63 +24,63 @@
|
||||
},
|
||||
"peerDependencies": {
|
||||
"@craco/craco": "^6.4.3",
|
||||
"@material-ui/core": "^4.11.3",
|
||||
"@material-ui/icons": "^4.11.2",
|
||||
"@material-ui/lab": "^4.0.0-alpha.60",
|
||||
"@types/jest": "^26.0.22",
|
||||
"@types/node": "^12.20.10",
|
||||
"node-sass": "^6.0.0",
|
||||
"@types/jest": "^26.0.24",
|
||||
"@types/node": "^12.20.54",
|
||||
"sass": "^1.52.3",
|
||||
"react": "^17.0.2",
|
||||
"react-copy-to-clipboard": "^5.0.3",
|
||||
"react-copy-to-clipboard": "^5.1.0",
|
||||
"react-dom": "^17.0.2",
|
||||
"recoil": "^0.5.2"
|
||||
"recoil": "^0.7.2"
|
||||
},
|
||||
"dependencies": {
|
||||
"@craco/craco": "^6.4.3",
|
||||
"@types/lodash": "^4.14.179",
|
||||
"@uiw/react-textarea-code-editor": "^1.4.12",
|
||||
"axios": "^0.25.0",
|
||||
"core-js": "^3.20.2",
|
||||
"highlight.js": "^11.3.1",
|
||||
"@emotion/react": "^11.9.0",
|
||||
"@emotion/styled": "^11.8.1",
|
||||
"@mui/icons-material": "^5.8.2",
|
||||
"@mui/lab": "^5.0.0-alpha.84",
|
||||
"@mui/material": "^5.8.2",
|
||||
"@mui/styles": "^5.8.0",
|
||||
"@types/lodash": "^4.14.182",
|
||||
"@uiw/react-textarea-code-editor": "^1.6.0",
|
||||
"axios": "^0.27.2",
|
||||
"core-js": "^3.22.7",
|
||||
"highlight.js": "^11.5.1",
|
||||
"json-beautify": "^1.1.1",
|
||||
"jsonpath": "^1.1.1",
|
||||
"marked": "^4.0.10",
|
||||
"material-ui-popup-state": "^2.0.0",
|
||||
"mobx": "^6.3.10",
|
||||
"moment": "^2.29.1",
|
||||
"node-fetch": "^3.1.1",
|
||||
"marked": "^4.0.16",
|
||||
"material-ui-popup-state": "^2.0.1",
|
||||
"mobx": "^6.6.0",
|
||||
"moment": "^2.29.3",
|
||||
"node-fetch": "^3.2.4",
|
||||
"numeral": "^2.0.6",
|
||||
"protobuf-decoder": "^0.1.2",
|
||||
"react-graph-vis": "^1.0.7",
|
||||
"react-lowlight": "^3.0.0",
|
||||
"react-router-dom": "^6.2.1",
|
||||
"react-router-dom": "^6.3.0",
|
||||
"react-scrollable-feed-virtualized": "^1.4.9",
|
||||
"react-syntax-highlighter": "^15.4.3",
|
||||
"react-toastify": "^8.0.3",
|
||||
"redoc": "^2.0.0-rc.59",
|
||||
"styled-components": "^5.3.3",
|
||||
"web-vitals": "^1.1.1",
|
||||
"xml-formatter": "^2.6.0"
|
||||
"react-syntax-highlighter": "^15.5.0",
|
||||
"react-toastify": "^8.2.0",
|
||||
"redoc": "^2.0.0-rc.71",
|
||||
"styled-components": "^5.3.5",
|
||||
"web-vitals": "^2.1.4",
|
||||
"xml-formatter": "^2.6.1"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@rollup/plugin-node-resolve": "^13.1.3",
|
||||
"@rollup/plugin-node-resolve": "^13.3.0",
|
||||
"@svgr/rollup": "^6.2.1",
|
||||
"@testing-library/jest-dom": "^4.2.4",
|
||||
"@testing-library/react": "^9.5.0",
|
||||
"@testing-library/user-event": "^7.2.1",
|
||||
"cross-env": "^7.0.2",
|
||||
"env-cmd": "^10.0.1",
|
||||
"gh-pages": "^2.2.0",
|
||||
"microbundle-crl": "^0.13.10",
|
||||
"cross-env": "^7.0.3",
|
||||
"env-cmd": "^10.1.0",
|
||||
"gh-pages": "^4.0.0",
|
||||
"microbundle-crl": "^0.13.11",
|
||||
"npm-run-all": "^4.1.5",
|
||||
"prettier": "^2.0.4",
|
||||
"rollup-plugin-import-css": "^3.0.2",
|
||||
"rollup-plugin-postcss": "^4.0.2",
|
||||
"rollup-plugin-sass": "^1.2.10",
|
||||
"rollup-plugin-scss": "^3.0.0",
|
||||
"prettier": "^2.6.2",
|
||||
"react": "^17.0.2",
|
||||
"react-dom": "^17.0.2",
|
||||
"typescript": "^4.2.4"
|
||||
"rollup-plugin-import-css": "^3.0.3",
|
||||
"rollup-plugin-postcss": "^4.0.2",
|
||||
"rollup-plugin-sass": "^1.2.12",
|
||||
"rollup-plugin-scss": "^3.0.0",
|
||||
"typescript": "^4.7.2"
|
||||
},
|
||||
"eslintConfig": {
|
||||
"extends": [
|
||||
|
||||
@@ -1,86 +0,0 @@
|
||||
import {Button} from "@material-ui/core";
|
||||
import React from "react";
|
||||
import logo_up9 from "logo_up9.svg";
|
||||
import {makeStyles} from "@material-ui/core/styles";
|
||||
import { Tooltip } from "../UI";
|
||||
|
||||
const useStyles = makeStyles(() => ({
|
||||
tooltip: {
|
||||
backgroundColor: "#3868dc",
|
||||
color: "white",
|
||||
fontSize: 13,
|
||||
},
|
||||
}));
|
||||
|
||||
interface AnalyseButtonProps {
|
||||
analyzeStatus: any
|
||||
}
|
||||
|
||||
export const AnalyzeButton: React.FC<AnalyseButtonProps> = ({analyzeStatus}) => {
|
||||
|
||||
const classes = useStyles();
|
||||
|
||||
const analysisMessage = analyzeStatus?.isRemoteReady ?
|
||||
<span>
|
||||
<table>
|
||||
<tr>
|
||||
<td>Status</td>
|
||||
<td><b>Available</b></td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Messages</td>
|
||||
<td><b>{analyzeStatus?.sentCount}</b></td>
|
||||
</tr>
|
||||
</table>
|
||||
</span> :
|
||||
analyzeStatus?.sentCount > 0 ?
|
||||
<span>
|
||||
<table>
|
||||
<tr>
|
||||
<td>Status</td>
|
||||
<td><b>Processing</b></td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Messages</td>
|
||||
<td><b>{analyzeStatus?.sentCount}</b></td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td colSpan={2}> Please allow a few minutes for the analysis to complete</td>
|
||||
</tr>
|
||||
</table>
|
||||
</span> :
|
||||
<span>
|
||||
<table>
|
||||
<tr>
|
||||
<td>Status</td>
|
||||
<td><b>Waiting for traffic</b></td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Messages</td>
|
||||
<td><b>{analyzeStatus?.sentCount}</b></td>
|
||||
</tr>
|
||||
</table>
|
||||
</span>
|
||||
|
||||
return ( <div>
|
||||
<Tooltip title={analysisMessage} isSimple classes={classes}>
|
||||
<div>
|
||||
<Button
|
||||
style={{fontFamily: "system-ui",
|
||||
fontWeight: 600,
|
||||
fontSize: 12,
|
||||
padding: 8}}
|
||||
size={"small"}
|
||||
variant="contained"
|
||||
color="primary"
|
||||
startIcon={<img style={{height: 24, maxHeight: "none", maxWidth: "none"}} src={logo_up9} alt={"up9"}/>}
|
||||
disabled={!analyzeStatus?.isRemoteReady}
|
||||
onClick={() => {
|
||||
window.open(analyzeStatus?.remoteUrl)
|
||||
}}>
|
||||
Analysis
|
||||
</Button>
|
||||
</div>
|
||||
</Tooltip>
|
||||
</div>);
|
||||
}
|
||||
@@ -1,39 +0,0 @@
|
||||
<svg xmlns="http://www.w3.org/2000/svg" id="prefix__logo" width="148" height="88" viewBox="0 0 148 88">
|
||||
<defs>
|
||||
<style>
|
||||
.prefix__cls-1{fill:#070047}.prefix__cls-2{fill:#fff}.prefix__cls-4{fill:#8f9bb2}
|
||||
</style>
|
||||
</defs>
|
||||
<g id="prefix__Group_4690_2_">
|
||||
<path id="prefix__Path_2985_1_" d="M0 62.1a7.832 7.832 0 0 0 3.928 6.786l31.405 18.071a7.866 7.866 0 0 0 7.84 0L74.563 68.9a7.823 7.823 0 0 0 3.928-6.784v-36.2a7.828 7.828 0 0 0-3.926-6.782L43.175 1.052a7.869 7.869 0 0 0-7.86.006L3.914 19.217A7.827 7.827 0 0 0 0 25.994z" class="prefix__cls-1" transform="translate(0 -.004)"/>
|
||||
<path id="prefix__Path_2986_2_" d="M55.987 134.68a.976.976 0 0 1-.008 1.7l-4.4 2.482-.033-.02-6.832 3.948a1.966 1.966 0 0 1-1.966 0l-8.83-5.09A7.818 7.818 0 0 1 30 130.927v-29.356a7.813 7.813 0 0 1 3.979-6.8l.443-.25a.984.984 0 0 1 1.339.369.964.964 0 0 1 .127.482v36.187a.979.979 0 0 0 .488.845l4.909 2.855a.984.984 0 0 0 .981 0l.063-.035a.977.977 0 0 0 0-1.7l-3.994-2.3a.978.978 0 0 1-.49-.847V93.654a1.955 1.955 0 0 1 .995-1.7l3.429-1.936a.986.986 0 0 1 1.339.369.962.962 0 0 1 .127.48v36.187a.978.978 0 0 0 .49.847z" class="prefix__cls-2" transform="translate(-24.126 -72.289)"/>
|
||||
<path id="prefix__Path_2987_2_" d="M117.554 80.338a.981.981 0 0 1-.366-1.338.969.969 0 0 1 .37-.368l9.788-5.733a.981.981 0 0 0 .484-.846V37.572a1.961 1.961 0 0 0-2.951-1.694l-8.823 5.148-4.891 2.767a1.962 1.962 0 0 0-.995 1.707v36.849a.982.982 0 0 0 .49.85l12.246 7.086a1.965 1.965 0 0 0 1.991-.016l3.46-2.076a.983.983 0 0 0-.018-1.694zM116.532 47.6l3.924-2.356a.98.98 0 0 1 1.484.842v22.619a.982.982 0 0 1-.5.854l-3.924 2.224a.984.984 0 0 1-1.339-.37.97.97 0 0 1-.127-.484V48.447a1 1 0 0 1 .482-.847z" class="prefix__cls-2" transform="translate(-88.598 -28.638)"/>
|
||||
<path id="prefix__Path_2988_2_" d="M133.819 43.492V77.9a.979.979 0 0 1-.49.848l-7.858 4.545a.978.978 0 0 0 0 1.693l10.8 6.235a.977.977 0 0 1 0 1.693l-20.607 11.9a.978.978 0 0 0 0 1.7l12.279 7.01a7.87 7.87 0 0 0 7.8 0l26.5-15.161a5.871 5.871 0 0 0 2.957-5.094V62.723a7.825 7.825 0 0 0-3.92-6.778L136.762 41.8a1.959 1.959 0 0 0-2.943 1.7zm24.037 42.144L140.684 75.7a1.957 1.957 0 0 1-.977-1.693V60.981a.983.983 0 0 1 1.472-.848l14.239 8.237a7.826 7.826 0 0 1 3.91 6.772v9.647a.979.979 0 0 1-.979.98.968.968 0 0 1-.493-.133z" transform="translate(-92.625 -33.401)" style="fill:#627ef7"/>
|
||||
<path id="prefix__Path_2989_2_" d="M60.346 291.555l-4.4 2.491-.033-.02-6.832 3.963a1.963 1.963 0 0 1-1.968 0l-8.835-5.111a7.826 7.826 0 0 1-2.853-2.851l4.938-2.851a1 1 0 0 0 .37.384l4.909 2.865a.981.981 0 0 0 .981 0l.063-.035a.982.982 0 0 0 0-1.7l-3.986-2.306a.981.981 0 0 1-.352-.352l5.86-3.382a.99.99 0 0 0 .378.4l11.764 6.8a.981.981 0 0 1-.006 1.7z" class="prefix__cls-4" transform="translate(-28.493 -227.445)"/>
|
||||
<path id="prefix__Path_2991_2_" d="M164.463 248.2a5.858 5.858 0 0 1-2.193 2.207l-26.5 15.2a7.854 7.854 0 0 1-7.8 0l-12.279-7.026a.983.983 0 0 1 0-1.7l16.187-9.375 4.419-2.559a.98.98 0 0 0 0-1.7l-4.419-2.557-6.392-3.69a.98.98 0 0 1 0-1.7l6.385-3.7 1.472-.854a.968.968 0 0 0 .333-.321z" transform="translate(-92.637 -185.485)" style="fill:#3b4c95"/>
|
||||
<path id="prefix__Path_2992_2_" d="M128.9 265.267l-.525.313-2.933 1.762a1.965 1.965 0 0 1-1.991.016l-12.244-7.082a1.021 1.021 0 0 1-.206-.161.928.928 0 0 1-.151-.2v-.006l2.692-1.555 4.445-2.567a.762.762 0 0 0-.094.084.6.6 0 0 0-.08.092.035.035 0 0 0-.012.018.479.479 0 0 0-.055.084.316.316 0 0 0-.02.035c-.01.02-.02.039-.027.059a.369.369 0 0 0-.023.059.378.378 0 0 0-.022.067.08.08 0 0 0-.01.035.729.729 0 0 0-.018.09.974.974 0 0 0 .482 1l10.274 5.872.507.288a.979.979 0 0 1 .366 1.335 1 1 0 0 1-.355.362z" class="prefix__cls-4" transform="translate(-89.145 -205.825)"/>
|
||||
</g>
|
||||
<g id="prefix__Group_4695" data-name="Group 4695" transform="translate(72.538 19.232)">
|
||||
<g id="prefix__Group_4694" data-name="Group 4694">
|
||||
<g id="prefix__Group_4691" data-name="Group 4691">
|
||||
<path id="prefix__Path_2993" d="M394.794 158.1a9.4 9.4 0 0 1-6.626-2.728 9.4 9.4 0 0 1-1.987-2.937 9.248 9.248 0 0 1-.74-3.687v-31.142a3.92 3.92 0 0 1 3.916-3.916h4.455a3.92 3.92 0 0 1 3.916 3.916v28.2h.54v-28.2a3.92 3.92 0 0 1 3.916-3.916h4.507a3.92 3.92 0 0 1 3.916 3.916v36.577a3.92 3.92 0 0 1-3.916 3.916z" class="prefix__cls-2" data-name="Path 2993" transform="translate(-382.507 -110.753)"/>
|
||||
<path id="prefix__Path_2994" d="M394.648 104.564a.979.979 0 0 1 .979.979v36.577a.979.979 0 0 1-.979.979h-11.9a6.454 6.454 0 0 1-4.547-1.866 6.447 6.447 0 0 1-1.367-2.025 6.291 6.291 0 0 1-.5-2.524v-31.141a.979.979 0 0 1 .979-.979h4.455a.979.979 0 0 1 .979.979v30.162a.979.979 0 0 0 .979.979h4.457a.979.979 0 0 0 .979-.979v-30.162a.979.979 0 0 1 .979-.979h4.507m0-5.874h-4.508a6.832 6.832 0 0 0-4.186 1.429 6.819 6.819 0 0 0-4.186-1.429h-4.455a6.861 6.861 0 0 0-6.853 6.853v31.141a12.147 12.147 0 0 0 .983 4.856 12.29 12.29 0 0 0 11.3 7.433h11.9a6.861 6.861 0 0 0 6.853-6.853v-36.577a6.861 6.861 0 0 0-6.853-6.853z" class="prefix__cls-1" data-name="Path 2994" transform="translate(-370.46 -98.69)"/>
|
||||
</g>
|
||||
<g id="prefix__Group_4692" data-name="Group 4692" transform="translate(22.447)">
|
||||
<path id="prefix__Path_2995" d="M504.016 158.1a3.92 3.92 0 0 1-3.916-3.916v-36.578a3.92 3.92 0 0 1 3.916-3.916h11.848a9.332 9.332 0 0 1 3.64.73 9.635 9.635 0 0 1 2.978 1.964 9.144 9.144 0 0 1 2.054 3.015 9.312 9.312 0 0 1 .73 3.642v9.62a9.235 9.235 0 0 1-.74 3.687 9.48 9.48 0 0 1-5.024 4.985 9.3 9.3 0 0 1-3.638.73h-3.478v12.118a3.92 3.92 0 0 1-3.916 3.916zm8.911-28.374v-3.746h-.54v3.746z" class="prefix__cls-2" data-name="Path 2995" transform="translate(-497.163 -110.753)"/>
|
||||
<path id="prefix__Path_2996" d="M503.8 104.564a6.351 6.351 0 0 1 2.5.5 6.709 6.709 0 0 1 2.078 1.367 6.157 6.157 0 0 1 1.392 2.05 6.358 6.358 0 0 1 .5 2.5v9.62a6.3 6.3 0 0 1-.5 2.524 6.536 6.536 0 0 1-1.392 2.05 6.453 6.453 0 0 1-2.078 1.394 6.358 6.358 0 0 1-2.5.5h-5.436a.979.979 0 0 0-.979.979v14.072a.979.979 0 0 1-.979.979h-4.455a.979.979 0 0 1-.979-.979v-36.577a.979.979 0 0 1 .979-.979H503.8m-5.434 16.036h4.457a.979.979 0 0 0 .979-.979v-7.662a.979.979 0 0 0-.979-.979h-4.457a.979.979 0 0 0-.979.979v7.662a.979.979 0 0 0 .979.979m5.434-21.91h-11.847a6.861 6.861 0 0 0-6.853 6.853v36.577a6.861 6.861 0 0 0 6.853 6.853h4.455a6.861 6.861 0 0 0 6.853-6.853v-9.181h.54a12.337 12.337 0 0 0 8.729-3.613 12.423 12.423 0 0 0 2.632-3.873 12.164 12.164 0 0 0 .981-4.854v-9.62a12.217 12.217 0 0 0-.961-4.78 12.067 12.067 0 0 0-2.714-3.981 12.589 12.589 0 0 0-3.881-2.563 12.193 12.193 0 0 0-4.786-.965z" class="prefix__cls-1" data-name="Path 2996" transform="translate(-485.1 -98.69)"/>
|
||||
</g>
|
||||
<g id="prefix__Group_4693" data-name="Group 4693" transform="translate(44.421)">
|
||||
<path id="prefix__Path_2997" d="M621.68 158.107a9.331 9.331 0 0 1-9.35-9.35V144.9a3.92 3.92 0 0 1 3.916-3.916h1.038a9.008 9.008 0 0 1-2.26-1.7 9.676 9.676 0 0 1-1.954-2.931 9.249 9.249 0 0 1-.74-3.687v-9.62a9.328 9.328 0 0 1 9.35-9.35h6.415a9.332 9.332 0 0 1 3.64.73 9.613 9.613 0 0 1 2.978 1.964 9.127 9.127 0 0 1 2.054 3.015 9.316 9.316 0 0 1 .732 3.642v25.707a9.287 9.287 0 0 1-.73 3.638 9.113 9.113 0 0 1-2.054 3.015 9.66 9.66 0 0 1-2.98 1.966 9.308 9.308 0 0 1-3.638.73h-6.417zm3.478-12.289v-3.746h-1.747a3.905 3.905 0 0 1 1.208 2.825v.92zm0-16.085v-3.746h-.54v3.746z" class="prefix__cls-2" data-name="Path 2997" transform="translate(-609.391 -110.761)"/>
|
||||
<path id="prefix__Path_2998" d="M616.024 104.564a6.351 6.351 0 0 1 2.5.5 6.708 6.708 0 0 1 2.078 1.367 6.157 6.157 0 0 1 1.392 2.05 6.358 6.358 0 0 1 .5 2.5v25.707a6.344 6.344 0 0 1-.5 2.5 6.157 6.157 0 0 1-1.392 2.05 6.709 6.709 0 0 1-2.078 1.367 6.358 6.358 0 0 1-2.5.5h-6.415a6.393 6.393 0 0 1-6.413-6.413v-3.857a.979.979 0 0 1 .979-.979h4.455a.979.979 0 0 1 .979.979v2.878a.979.979 0 0 0 .979.979h4.457a.979.979 0 0 0 .979-.979v-7.662a.979.979 0 0 0-.979-.979h-5.436a6.344 6.344 0 0 1-2.5-.5 6.133 6.133 0 0 1-2.05-1.394 6.769 6.769 0 0 1-1.367-2.05 6.291 6.291 0 0 1-.5-2.524v-9.62a6.393 6.393 0 0 1 6.413-6.413h6.415m-5.432 16.029h4.457a.979.979 0 0 0 .979-.979v-7.662a.979.979 0 0 0-.979-.979h-4.457a.979.979 0 0 0-.979.979v7.662a.979.979 0 0 0 .979.979m5.436-21.909h-6.415a12.268 12.268 0 0 0-12.289 12.287v9.62a12.147 12.147 0 0 0 .983 4.856 12.629 12.629 0 0 0 1.281 2.287 6.841 6.841 0 0 0-2.264 5.085v3.857a12.268 12.268 0 0 0 12.287 12.289h6.415a12.2 12.2 0 0 0 4.784-.963 12.579 12.579 0 0 0 3.879-2.559 12.047 12.047 0 0 0 2.714-3.981 12.233 12.233 0 0 0 .963-4.785v-25.707a12.234 12.234 0 0 0-.961-4.782 12.066 12.066 0 0 0-2.714-3.981 12.623 12.623 0 0 0-3.881-2.563 12.233 12.233 0 0 0-4.782-.961z" class="prefix__cls-1" data-name="Path 2998" transform="translate(-597.32 -98.69)"/>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
<g id="prefix__Group_4697" data-name="Group 4697" transform="translate(78.412 25.106)">
|
||||
<g id="prefix__Group_4696" data-name="Group 4696">
|
||||
<path id="prefix__Path_2999" d="M419.753 129.669v36.577a.979.979 0 0 1-.979.979h-11.9a6.454 6.454 0 0 1-4.547-1.866 6.447 6.447 0 0 1-1.367-2.025 6.292 6.292 0 0 1-.5-2.524v-31.141a.979.979 0 0 1 .979-.979h4.455a.979.979 0 0 1 .979.979v30.162a.979.979 0 0 0 .979.979h4.457a.979.979 0 0 0 .979-.979v-30.162a.979.979 0 0 1 .979-.979h4.507a.979.979 0 0 1 .979.979z" class="prefix__cls-2" data-name="Path 2999" transform="translate(-400.46 -128.69)"/>
|
||||
<path id="prefix__Path_3000" d="M534.393 135.1v9.62a6.3 6.3 0 0 1-.5 2.524 6.535 6.535 0 0 1-1.392 2.05 6.453 6.453 0 0 1-2.077 1.394 6.358 6.358 0 0 1-2.5.5h-5.436a.979.979 0 0 0-.979.979v14.076a.979.979 0 0 1-.979.979h-4.455a.979.979 0 0 1-.979-.979v-36.574a.979.979 0 0 1 .979-.979h11.848a6.351 6.351 0 0 1 2.5.5 6.708 6.708 0 0 1 2.077 1.367 6.157 6.157 0 0 1 1.392 2.05 6.358 6.358 0 0 1 .501 2.493zm-6.466 8.643v-7.662a.979.979 0 0 0-.979-.979h-4.457a.979.979 0 0 0-.979.979v7.662a.979.979 0 0 0 .979.979h4.457a.979.979 0 0 0 .979-.977z" class="prefix__cls-2" data-name="Path 3000" transform="translate(-492.653 -128.69)"/>
|
||||
<path id="prefix__Path_3001" d="M646.623 135.1v25.71a6.345 6.345 0 0 1-.5 2.5 6.158 6.158 0 0 1-1.392 2.05 6.71 6.71 0 0 1-2.078 1.367 6.358 6.358 0 0 1-2.5.5h-6.415a6.393 6.393 0 0 1-6.413-6.413v-3.857a.979.979 0 0 1 .979-.979h4.455a.979.979 0 0 1 .979.979v2.878a.979.979 0 0 0 .979.979h4.457a.979.979 0 0 0 .979-.979v-7.662a.979.979 0 0 0-.979-.979h-5.436a6.344 6.344 0 0 1-2.5-.5 6.133 6.133 0 0 1-2.05-1.394 6.77 6.77 0 0 1-1.367-2.05 6.292 6.292 0 0 1-.5-2.524V135.1a6.393 6.393 0 0 1 6.413-6.413h6.415a6.351 6.351 0 0 1 2.5.5 6.709 6.709 0 0 1 2.078 1.367 6.157 6.157 0 0 1 1.392 2.05 6.359 6.359 0 0 1 .504 2.496zm-6.466 8.643v-7.662a.979.979 0 0 0-.979-.979h-4.457a.979.979 0 0 0-.979.979v7.662a.979.979 0 0 0 .979.979h4.457a.979.979 0 0 0 .979-.976z" class="prefix__cls-2" data-name="Path 3001" transform="translate(-582.908 -128.69)"/>
|
||||
</g>
|
||||
</g>
|
||||
</svg>
|
||||
|
Before Width: | Height: | Size: 11 KiB |
@@ -1,15 +1,15 @@
|
||||
import React, {useCallback, useEffect, useMemo, useState} from "react";
|
||||
import styles from '../style/EntriesList.module.sass';
|
||||
import styles from './EntriesList.module.sass';
|
||||
import ScrollableFeedVirtualized from "react-scrollable-feed-virtualized";
|
||||
import Moment from 'moment';
|
||||
import {EntryItem} from "./EntryListItem/EntryListItem";
|
||||
import {EntryItem} from "../EntryListItem/EntryListItem";
|
||||
import down from "assets/downImg.svg";
|
||||
import spinner from 'assets/spinner.svg';
|
||||
import {RecoilState, useRecoilState, useRecoilValue, useSetRecoilState} from "recoil";
|
||||
import entriesAtom from "../../recoil/entries";
|
||||
import queryAtom from "../../recoil/query";
|
||||
import TrafficViewerApiAtom from "../../recoil/TrafficViewerApi";
|
||||
import TrafficViewerApi from "./TrafficViewerApi";
|
||||
import TrafficViewerApi from "../TrafficViewer/TrafficViewerApi";
|
||||
import focusedEntryIdAtom from "../../recoil/focusedEntryId";
|
||||
import {toast} from "react-toastify";
|
||||
import {MAX_ENTRIES, TOAST_CONTAINER_ID} from "../../configs/Consts";
|
||||
|
Before Width: | Height: | Size: 301 B After Width: | Height: | Size: 301 B |
|
Before Width: | Height: | Size: 673 B After Width: | Height: | Size: 673 B |
@@ -1,13 +1,13 @@
|
||||
import React, { useEffect, useState } from "react";
|
||||
import EntryViewer from "./EntryDetailed/EntryViewer";
|
||||
import { EntryItem } from "./EntryListItem/EntryListItem";
|
||||
import { makeStyles } from "@material-ui/core";
|
||||
import Protocol from "../UI/Protocol"
|
||||
import Queryable from "../UI/Queryable";
|
||||
import EntryViewer from "./EntryViewer/EntryViewer";
|
||||
import { EntryItem } from "../EntryListItem/EntryListItem";
|
||||
import makeStyles from '@mui/styles/makeStyles';
|
||||
import Protocol from "../UI/Protocol/Protocol"
|
||||
import Queryable from "../UI/Queryable/Queryable";
|
||||
import { toast } from "react-toastify";
|
||||
import { RecoilState, useRecoilValue } from "recoil";
|
||||
import focusedEntryIdAtom from "../../recoil/focusedEntryId";
|
||||
import TrafficViewerApi from "./TrafficViewerApi";
|
||||
import TrafficViewerApi from "../TrafficViewer/TrafficViewerApi";
|
||||
import TrafficViewerApiAtom from "../../recoil/TrafficViewerApi/atom";
|
||||
import queryAtom from "../../recoil/query/atom";
|
||||
import useWindowDimensions, { useRequestTextByWidth } from "../../hooks/WindowDimensionsHook";
|
||||
@@ -1,10 +1,10 @@
|
||||
import styles from "./EntrySections.module.sass";
|
||||
import React, { useCallback, useEffect, useMemo, useState } from "react";
|
||||
import { SyntaxHighlighter } from "../../UI/SyntaxHighlighter/index";
|
||||
import CollapsibleContainer from "../../UI/CollapsibleContainer";
|
||||
import FancyTextDisplay from "../../UI/FancyTextDisplay";
|
||||
import Queryable from "../../UI/Queryable";
|
||||
import Checkbox from "../../UI/Checkbox";
|
||||
import { SyntaxHighlighter } from "../../UI/SyntaxHighlighter";
|
||||
import CollapsibleContainer from "../../UI/CollapsibleContainer/CollapsibleContainer";
|
||||
import FancyTextDisplay from "../../UI/FancyTextDisplay/FancyTextDisplay";
|
||||
import Queryable from "../../UI/Queryable/Queryable";
|
||||
import Checkbox from "../../UI/Checkbox/Checkbox";
|
||||
import ProtobufDecoder from "protobuf-decoder";
|
||||
import { default as jsonBeautify } from "json-beautify";
|
||||
import { default as xmlBeautify } from "xml-formatter";
|
||||
@@ -192,17 +192,17 @@ export const EntryBodySection: React.FC<EntryBodySectionProps> = ({
|
||||
>
|
||||
<div style={{ display: 'flex', alignItems: 'center', alignContent: 'center', margin: "5px 0" }}>
|
||||
{supportsPrettying && <div style={{ paddingTop: 3 }}>
|
||||
<Checkbox checked={isPretty} onToggle={() => { setIsPretty(!isPretty) }} />
|
||||
<Checkbox checked={isPretty} onToggle={() => { setIsPretty(!isPretty) }} data-cy="prettyCheckBoxInput"/>
|
||||
</div>}
|
||||
{supportsPrettying && <span style={{ marginLeft: '.2rem' }}>Pretty</span>}
|
||||
|
||||
<div style={{ paddingTop: 3, paddingLeft: supportsPrettying ? 20 : 0 }}>
|
||||
<Checkbox checked={showLineNumbers} onToggle={() => { setShowLineNumbers(!showLineNumbers) }} disabled={!isLineNumbersGreaterThenOne || !decodeBase64} />
|
||||
<Checkbox checked={showLineNumbers} onToggle={() => { setShowLineNumbers(!showLineNumbers) }} disabled={!isLineNumbersGreaterThenOne || !decodeBase64} data-cy="lineNumbersCheckBoxInput"/>
|
||||
</div>
|
||||
<span style={{ marginLeft: '.2rem' }}>Line numbers</span>
|
||||
|
||||
{isBase64Encoding && <div style={{ paddingTop: 3, paddingLeft: (isLineNumbersGreaterThenOne || supportsPrettying) ? 20 : 0 }}>
|
||||
<Checkbox checked={decodeBase64} onToggle={() => { setDecodeBase64(!decodeBase64) }} />
|
||||
<Checkbox checked={decodeBase64} onToggle={() => { setDecodeBase64(!decodeBase64) }} data-cy="decodeBase64CheckboxInput"/>
|
||||
</div>}
|
||||
{isBase64Encoding && <span style={{ marginLeft: '.2rem' }}>Decode Base64</span>}
|
||||
{!isDecodeGrpc && <span style={{ fontSize: '12px', color: '#DB2156', marginLeft: '.8rem' }}>More than one message in protobuf payload is not supported</span>}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user