Compare commits

..

5 Commits

Author SHA1 Message Date
Igor Gov
f102031c45 Hotfix: mizu install command should be namespace aware
#patch
2022-06-13 15:57:12 +03:00
gadotroee
a5ddb162e8 Develop -> main (release 33.0)
Merge pull request #1128 from up9inc/develop #major
2022-06-06 10:49:08 +03:00
Igor Gov
1fcc22c356 Develop -> main (release 32.0)
Merge pull request #1073 from up9inc/develop #major
2022-05-09 15:08:28 +03:00
Igor Gov
960d39f27d Develop -> main (Patch release 31.1)
Merge pull request #1041 from up9inc/develop #patch
2022-04-25 12:02:38 +03:00
gadotroee
0709b861d6 Develop -> main (Release 31.0)
Merge pull request #1036 from up9inc/develop #major
2022-04-24 15:06:44 +03:00
193 changed files with 9355 additions and 11183 deletions

View File

@@ -26,7 +26,6 @@ jobs:
run: |
sudo apt update
sudo apt install -y libpcap-dev
./devops/install-capstone.sh
- name: Check Agent modified files
id: agent_modified_files
@@ -38,7 +37,7 @@ jobs:
with:
version: latest
working-directory: agent
args: --timeout=10m
args: --timeout=3m
- name: Check shared modified files
id: shared_modified_files
@@ -50,7 +49,7 @@ jobs:
with:
version: latest
working-directory: shared
args: --timeout=10m
args: --timeout=3m
- name: Check tap modified files
id: tap_modified_files
@@ -62,7 +61,7 @@ jobs:
with:
version: latest
working-directory: tap
args: --timeout=10m
args: --timeout=3m
- name: Check cli modified files
id: cli_modified_files
@@ -74,7 +73,7 @@ jobs:
with:
version: latest
working-directory: cli
args: --timeout=10m
args: --timeout=3m
- name: Check acceptanceTests modified files
id: acceptanceTests_modified_files
@@ -86,7 +85,7 @@ jobs:
with:
version: latest
working-directory: acceptanceTests
args: --timeout=10m
args: --timeout=3m
- name: Check tap/api modified files
id: tap_api_modified_files

View File

@@ -35,11 +35,6 @@ jobs:
run: |
sudo apt-get install libpcap-dev
- name: Install Capstone
shell: bash
run: |
./devops/install-capstone.sh
- name: Check CLI modified files
id: cli_modified_files
run: devops/check_modified_files.sh cli/

View File

@@ -25,46 +25,29 @@ RUN npm run build
### Base builder image for native builds architecture
FROM golang:1.17-alpine AS builder-native-base
ENV CGO_ENABLED=1 GOOS=linux
RUN apk add --no-cache \
libpcap-dev \
g++ \
perl-utils \
curl \
build-base \
binutils-gold \
bash \
clang \
llvm \
libbpf-dev \
linux-headers
COPY devops/install-capstone.sh .
RUN ./install-capstone.sh
RUN apk add --no-cache libpcap-dev g++ perl-utils
### Intermediate builder image for x86-64 to x86-64 native builds
FROM builder-native-base AS builder-from-amd64-to-amd64
ENV GOARCH=amd64
ENV BPF_TARGET=amd64 BPF_CFLAGS="-O2 -g -D__TARGET_ARCH_x86"
### Intermediate builder image for AArch64 to AArch64 native builds
FROM builder-native-base AS builder-from-arm64v8-to-arm64v8
ENV GOARCH=arm64
ENV BPF_TARGET=arm64 BPF_CFLAGS="-O2 -g -D__TARGET_ARCH_arm64"
### Builder image for x86-64 to AArch64 cross-compilation
FROM up9inc/linux-arm64-musl-go-libpcap-capstone-bpf AS builder-from-amd64-to-arm64v8
FROM up9inc/linux-arm64-musl-go-libpcap AS builder-from-amd64-to-arm64v8
ENV CGO_ENABLED=1 GOOS=linux
ENV GOARCH=arm64 CGO_CFLAGS="-I/work/libpcap -I/work/capstone/include"
ENV BPF_TARGET=arm64 BPF_CFLAGS="-O2 -g -D__TARGET_ARCH_arm64 -I/usr/xcc/aarch64-linux-musl-cross/aarch64-linux-musl/include/"
ENV GOARCH=arm64 CGO_CFLAGS="-I/work/libpcap"
### Builder image for AArch64 to x86-64 cross-compilation
FROM up9inc/linux-x86_64-musl-go-libpcap-capstone-bpf AS builder-from-arm64v8-to-amd64
FROM up9inc/linux-x86_64-musl-go-libpcap AS builder-from-arm64v8-to-amd64
ENV CGO_ENABLED=1 GOOS=linux
ENV GOARCH=amd64 CGO_CFLAGS="-I/libpcap -I/capstone/include"
ENV BPF_TARGET=amd64 BPF_CFLAGS="-O2 -g -D__TARGET_ARCH_x86 -I/usr/local/musl/x86_64-unknown-linux-musl/include/"
ENV GOARCH=amd64 CGO_CFLAGS="-I/libpcap"
### Final builder image where the build happens
@@ -103,11 +86,6 @@ ARG GIT_BRANCH
ARG BUILD_TIMESTAMP
ARG VER=0.0
WORKDIR /app/tap/tlstapper
RUN rm tlstapper_bpf*
RUN GOARCH=${BUILDARCH} go generate tls_tapper.go
WORKDIR /app/agent-build
RUN go build -ldflags="-extldflags=-static -s -w \
@@ -117,8 +95,8 @@ RUN go build -ldflags="-extldflags=-static -s -w \
-X 'github.com/up9inc/mizu/agent/pkg/version.Ver=${VER}'" -o mizuagent .
# Download Basenine executable, verify the sha1sum
ADD https://github.com/up9inc/basenine/releases/download/v0.8.3/basenine_linux_${GOARCH} ./basenine_linux_${GOARCH}
ADD https://github.com/up9inc/basenine/releases/download/v0.8.3/basenine_linux_${GOARCH}.sha256 ./basenine_linux_${GOARCH}.sha256
ADD https://github.com/up9inc/basenine/releases/download/v0.8.2/basenine_linux_${GOARCH} ./basenine_linux_${GOARCH}
ADD https://github.com/up9inc/basenine/releases/download/v0.8.2/basenine_linux_${GOARCH}.sha256 ./basenine_linux_${GOARCH}.sha256
RUN shasum -a 256 -c basenine_linux_"${GOARCH}".sha256 && \
chmod +x ./basenine_linux_"${GOARCH}" && \

View File

@@ -1,3 +1,2 @@
test: ## Run acceptance tests.
@npm install cypress@10.0.1 -y
@go test ./... -timeout 1h -v

View File

@@ -1,31 +0,0 @@
const { defineConfig } = require('cypress')
module.exports = defineConfig({
watchForFileChanges: false,
viewportWidth: 1920,
viewportHeight: 1080,
video: false,
screenshotOnRunFailure: false,
defaultCommandTimeout: 6000,
env: {
testUrl: 'http://localhost:8899/',
redactHeaderContent: 'User-Header[REDACTED]',
redactBodyContent: '{ "User": "[REDACTED]" }',
regexMaskingBodyContent: '[REDACTED]',
greenFilterColor: 'rgb(210, 250, 210)',
redFilterColor: 'rgb(250, 214, 220)',
bodyJsonClass: '.hljs',
mizuWidth: 1920,
normalMizuHeight: 1080,
hugeMizuHeight: 3500,
},
e2e: {
// We've imported your old cypress plugins here.
// You may want to clean this up later by importing these.
// setupNodeEvents(on, config) {
// return require('./cypress/plugins/index.js')(on, config)
// },
specPattern: 'cypress/e2e/tests/*.js',
supportFile: false
},
})

View File

@@ -0,0 +1,34 @@
{
"watchForFileChanges":false,
"viewportWidth": 1920,
"viewportHeight": 1080,
"video": false,
"screenshotOnRunFailure": false,
"defaultCommandTimeout": 6000,
"testFiles": [
"tests/GuiPort.js",
"tests/MultipleNamespaces.js",
"tests/Redact.js",
"tests/NoRedact.js",
"tests/Regex.js",
"tests/RegexMasking.js",
"tests/IgnoredUserAgents.js",
"tests/UiTest.js",
"tests/Redis.js",
"tests/Rabbit.js",
"tests/serviceMapFunction.js"
],
"env": {
"testUrl": "http://localhost:8899/",
"redactHeaderContent": "User-Header[REDACTED]",
"redactBodyContent": "{ \"User\": \"[REDACTED]\" }",
"regexMaskingBodyContent": "[REDACTED]",
"greenFilterColor": "rgb(210, 250, 210)",
"redFilterColor": "rgb(250, 214, 220)",
"bodyJsonClass": ".hljs",
"mizuWidth": 1920,
"normalMizuHeight": 1080,
"hugeMizuHeight": 3500
}
}

View File

@@ -4,6 +4,8 @@ export const valueTabs = {
none: null
}
const maxEntriesInDom = 13;
export function isValueExistsInElement(shouldInclude, content, domPathToContainer){
it(`should ${shouldInclude ? '' : 'not'} include '${content}'`, function () {
cy.get(domPathToContainer).then(htmlText => {

View File

@@ -269,7 +269,7 @@ function checkRightSideResponseBody() {
const responseBody = JSON.parse(decodedBody);
const expectedJsonBody = {
const expectdJsonBody = {
args: RegExp({}),
url: RegExp('http://.*/get'),
headers: {
@@ -279,24 +279,27 @@ function checkRightSideResponseBody() {
}
};
const expectedStringInJsonBody = RegExp('/api/v1/namespaces/.*/services/.*/proxy/get');
expect(responseBody.args).to.match(expectedJsonBody.args);
expect(responseBody.url).to.match(expectedJsonBody.url);
expect(responseBody.headers['User-Agent']).to.match(expectedJsonBody.headers['User-Agent']);
expect(responseBody.headers['Accept-Encoding']).to.match(expectedJsonBody.headers['Accept-Encoding']);
expect(responseBody.headers['X-Forwarded-Uri']).to.match(expectedJsonBody.headers['X-Forwarded-Uri']);
expect(responseBody.args).to.match(expectdJsonBody.args);
expect(responseBody.url).to.match(expectdJsonBody.url);
expect(responseBody.headers['User-Agent']).to.match(expectdJsonBody.headers['User-Agent']);
expect(responseBody.headers['Accept-Encoding']).to.match(expectdJsonBody.headers['Accept-Encoding']);
expect(responseBody.headers['X-Forwarded-Uri']).to.match(expectdJsonBody.headers['X-Forwarded-Uri']);
cy.get(`${Cypress.env('bodyJsonClass')}`).should('have.text', encodedBody);
cy.get(`[data-cy="lineNumbersCheckBoxInput"]`).should('be.disabled');
clickCheckbox('Decode Base64');
cy.get(`[data-cy="lineNumbersCheckBoxInput"]`).should('not.be.disabled');
cy.get(`${Cypress.env('bodyJsonClass')} > `).its('length').should('be.gt', 1).then(linesNum => {
cy.get(`${Cypress.env('bodyJsonClass')} > >`).its('length').should('be.gt', linesNum).then(jsonItemsNum => {
checkOnlyLineNumberes(jsonItemsNum, expectedStringInJsonBody);
// checkPrettyAndLineNums(decodedBody);
//clickCheckbox('Line numbers');
//checkPrettyOrNothing(jsonItemsNum, decodedBody);
// clickCheckbox('Pretty');
// checkPrettyOrNothing(jsonItemsNum, decodedBody);
//
// clickCheckbox('Line numbers');
// checkOnlyLineNumberes(jsonItemsNum, decodedBody);
});
});
});
@@ -306,9 +309,37 @@ function clickCheckbox(type) {
cy.contains(`${type}`).prev().children().click();
}
function checkPrettyAndLineNums(decodedBody) {
decodedBody = decodedBody.replaceAll(' ', '');
cy.get(`${Cypress.env('bodyJsonClass')} >`).then(elements => {
const lines = Object.values(elements);
lines.forEach((line, index) => {
if (line.getAttribute) {
const cleanLine = getCleanLine(line);
const currentLineFromDecodedText = decodedBody.substring(0, cleanLine.length);
expect(cleanLine).to.equal(currentLineFromDecodedText, `expected the text in line number ${index + 1} to match the text that generated by the base64 decoding`)
decodedBody = decodedBody.substring(cleanLine.length);
}
});
});
}
function getCleanLine(lineElement) {
return (lineElement.innerText.substring(0, lineElement.innerText.length - 1)).replaceAll(' ', '');
}
function checkPrettyOrNothing(jsonItems, decodedBody) {
cy.get(`${Cypress.env('bodyJsonClass')} > `).should('have.length', jsonItems).then(text => {
const json = text.text();
expect(json).to.equal(decodedBody);
});
}
function checkOnlyLineNumberes(jsonItems, decodedText) {
cy.get(`${Cypress.env('bodyJsonClass')} > >`).should('have.length', jsonItems);
cy.get(`${Cypress.env('bodyJsonClass')} >`).contains(decodedText);
cy.get(`${Cypress.env('bodyJsonClass')} >`).should('have.length', 1).and('have.text', decodedText);
cy.get(`${Cypress.env('bodyJsonClass')} > >`).should('have.length', jsonItems)
}
function serviceMapCheck() {

View File

@@ -105,13 +105,10 @@ func TestRedis(t *testing.T) {
}
}
RunCypressTests(t, "npx cypress run --spec \"cypress/e2e/tests/Redis.js\"")
RunCypressTests(t, "npx cypress@9.5.4 run --spec \"cypress/integration/tests/Redis.js\"")
}
func TestAmqp(t *testing.T) {
t.Skip("ignoredd for now because those tests are not stable")
if testing.Short() {
t.Skip("ignored acceptance test")
}
@@ -239,5 +236,5 @@ func TestAmqp(t *testing.T) {
ch.Close()
}
RunCypressTests(t, "npx cypress run --spec \"cypress/e2e/tests/Rabbit.js\"")
RunCypressTests(t, "npx cypress@9.5.4 run --spec \"cypress/integration/tests/Rabbit.js\"")
}

View File

@@ -27,7 +27,7 @@ else
fi
echo "Starting minikube..."
minikube start --cpus 2 --memory 6946
minikube start
echo "Creating mizu tests namespaces"
kubectl create namespace mizu-tests --dry-run=client -o yaml | kubectl apply -f -

View File

@@ -78,7 +78,7 @@ func basicTapTest(t *testing.T, shouldCheckSrcAndDest bool, extraArgs... string)
expectedPodsStr += fmt.Sprintf("Name:%vNamespace:%v", expectedPods[i].Name, expectedPods[i].Namespace)
}
RunCypressTests(t, fmt.Sprintf("npx cypress run --spec \"cypress/e2e/tests/UiTest.js\" --env entriesCount=%d,arrayDict=%v,shouldCheckSrcAndDest=%v",
RunCypressTests(t, fmt.Sprintf("npx cypress@9.5.4 run --spec \"cypress/integration/tests/UiTest.js\" --env entriesCount=%d,arrayDict=%v,shouldCheckSrcAndDest=%v",
entriesCount, expectedPodsStr, shouldCheckSrcAndDest))
})
}
@@ -135,7 +135,7 @@ func TestTapGuiPort(t *testing.T) {
}
}
RunCypressTests(t, fmt.Sprintf("npx cypress run --spec \"cypress/e2e/tests/GuiPort.js\" --env name=%v,namespace=%v,port=%d",
RunCypressTests(t, fmt.Sprintf("npx cypress@9.5.4 run --spec \"cypress/integration/tests/GuiPort.js\" --env name=%v,namespace=%v,port=%d",
"httpbin", "mizu-tests", guiPort))
})
}
@@ -182,7 +182,7 @@ func TestTapAllNamespaces(t *testing.T) {
return
}
RunCypressTests(t, fmt.Sprintf("npx cypress run --spec \"cypress/e2e/tests/MultipleNamespaces.js\" --env name1=%v,name2=%v,name3=%v,namespace1=%v,namespace2=%v,namespace3=%v",
RunCypressTests(t, fmt.Sprintf("npx cypress@9.5.4 run --spec \"cypress/integration/tests/MultipleNamespaces.js\" --env name1=%v,name2=%v,name3=%v,namespace1=%v,namespace2=%v,namespace3=%v",
expectedPods[0].Name, expectedPods[1].Name, expectedPods[2].Name, expectedPods[0].Namespace, expectedPods[1].Namespace, expectedPods[2].Namespace))
}
@@ -231,7 +231,7 @@ func TestTapMultipleNamespaces(t *testing.T) {
return
}
RunCypressTests(t, fmt.Sprintf("npx cypress run --spec \"cypress/e2e/tests/MultipleNamespaces.js\" --env name1=%v,name2=%v,name3=%v,namespace1=%v,namespace2=%v,namespace3=%v",
RunCypressTests(t, fmt.Sprintf("npx cypress@9.5.4 run --spec \"cypress/integration/tests/MultipleNamespaces.js\" --env name1=%v,name2=%v,name3=%v,namespace1=%v,namespace2=%v,namespace3=%v",
expectedPods[0].Name, expectedPods[1].Name, expectedPods[2].Name, expectedPods[0].Namespace, expectedPods[1].Namespace, expectedPods[2].Namespace))
}
@@ -277,7 +277,7 @@ func TestTapRegex(t *testing.T) {
return
}
RunCypressTests(t, fmt.Sprintf("npx cypress run --spec \"cypress/e2e/tests/Regex.js\" --env name=%v,namespace=%v",
RunCypressTests(t, fmt.Sprintf("npx cypress@9.5.4 run --spec \"cypress/integration/tests/Regex.js\" --env name=%v,namespace=%v",
expectedPods[0].Name, expectedPods[0].Namespace))
}
@@ -376,7 +376,7 @@ func TestTapRedact(t *testing.T) {
}
}
RunCypressTests(t, "npx cypress run --spec \"cypress/e2e/tests/Redact.js\"")
RunCypressTests(t, "npx cypress@9.5.4 run --spec \"cypress/integration/tests/Redact.js\"")
}
func TestTapNoRedact(t *testing.T) {
@@ -426,7 +426,7 @@ func TestTapNoRedact(t *testing.T) {
}
}
RunCypressTests(t, "npx cypress run --spec \"cypress/e2e/tests/NoRedact.js\"")
RunCypressTests(t, "npx cypress@9.5.4 run --spec \"cypress/integration/tests/NoRedact.js\"")
}
func TestTapRegexMasking(t *testing.T) {
@@ -479,7 +479,7 @@ func TestTapRegexMasking(t *testing.T) {
}
}
RunCypressTests(t, "npx cypress run --spec \"cypress/e2e/tests/RegexMasking.js\"")
RunCypressTests(t, "npx cypress@9.5.4 run --spec \"cypress/integration/tests/RegexMasking.js\"")
}
@@ -541,7 +541,7 @@ func TestTapIgnoredUserAgents(t *testing.T) {
}
}
RunCypressTests(t, "npx cypress run --spec \"cypress/e2e/tests/IgnoredUserAgents.js\"")
RunCypressTests(t, "npx cypress@9.5.4 run --spec \"cypress/integration/tests/IgnoredUserAgents.js\"")
}
func TestTapDumpLogs(t *testing.T) {

View File

@@ -20,7 +20,7 @@ require (
github.com/op/go-logging v0.0.0-20160315200505-970db520ece7
github.com/orcaman/concurrent-map v1.0.0
github.com/stretchr/testify v1.7.0
github.com/up9inc/basenine/client/go v0.0.0-20220612112747-3b28eeac9c51
github.com/up9inc/basenine/client/go v0.0.0-20220509204026-c37adfc587f4
github.com/up9inc/mizu/logger v0.0.0
github.com/up9inc/mizu/shared v0.0.0
github.com/up9inc/mizu/tap v0.0.0
@@ -47,13 +47,12 @@ require (
github.com/Azure/go-autorest/logger v0.2.1 // indirect
github.com/Azure/go-autorest/tracing v0.6.0 // indirect
github.com/MakeNowJust/heredoc v1.0.0 // indirect
github.com/Masterminds/semver v1.5.0 // indirect
github.com/PuerkitoBio/purell v1.1.1 // indirect
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 // indirect
github.com/beevik/etree v1.1.0 // indirect
github.com/chai2010/gettext-go v0.0.0-20160711120539-c6fed771bfd5 // indirect
github.com/chanced/dynamic v0.0.0-20211210164248-f8fadb1d735b // indirect
github.com/cilium/ebpf v0.8.1 // indirect
github.com/cilium/ebpf v0.8.0 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/evanphx/json-patch v5.6.0+incompatible // indirect
github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f // indirect
@@ -85,7 +84,6 @@ require (
github.com/josharian/intern v1.0.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect
github.com/klauspost/compress v1.14.2 // indirect
github.com/knightsc/gapstone v0.0.0-20211014144438-5e0e64002a6e // indirect
github.com/leodido/go-urn v1.2.1 // indirect
github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect
github.com/mailru/easyjson v0.7.7 // indirect

View File

@@ -77,8 +77,6 @@ github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3
github.com/MakeNowJust/heredoc v0.0.0-20170808103936-bb23615498cd/go.mod h1:64YHyfSL2R96J44Nlwm39UHepQbyR5q10x7iYa1ks2E=
github.com/MakeNowJust/heredoc v1.0.0 h1:cXCdzVdstXyiTqTvfqk9SDHpKNjxuom+DOlyEeQ4pzQ=
github.com/MakeNowJust/heredoc v1.0.0/go.mod h1:mG5amYoWBHf8vpLOuehzbGGw0EHxpZZ6lCpQ4fNJ8LE=
github.com/Masterminds/semver v1.5.0 h1:H65muMkzWKEuNDnfl9d70GUjFniHKHRbFPGBuZ3QEww=
github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y=
github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
github.com/PuerkitoBio/purell v1.1.1 h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tNFfI=
@@ -128,8 +126,8 @@ github.com/chanced/openapi v0.0.8/go.mod h1:SxE2VMLPw+T7Vq8nwbVVhDF2PigvRF4n5Xyq
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
github.com/cilium/ebpf v0.8.1 h1:bLSSEbBLqGPXxls55pGr5qWZaTqcmfDJHhou7t254ao=
github.com/cilium/ebpf v0.8.1/go.mod h1:f5zLIM0FSNuAkSyLAN7X+Hy6yznlF1mNiWUMfxMtrgk=
github.com/cilium/ebpf v0.8.0 h1:2V6KSg3FRADVU2BMIRemZ0hV+9OM+aAHhZDjQyjJTAs=
github.com/cilium/ebpf v0.8.0/go.mod h1:f5zLIM0FSNuAkSyLAN7X+Hy6yznlF1mNiWUMfxMtrgk=
github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag=
github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
@@ -457,8 +455,6 @@ github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+o
github.com/klauspost/compress v1.9.8/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
github.com/klauspost/compress v1.14.2 h1:S0OHlFk/Gbon/yauFJ4FfJJF5V0fc5HbBTJazi28pRw=
github.com/klauspost/compress v1.14.2/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
github.com/knightsc/gapstone v0.0.0-20211014144438-5e0e64002a6e h1:6J5obSn9umEThiYzWzndcPOZR0Qj/sVCZpH6V1G7yNE=
github.com/knightsc/gapstone v0.0.0-20211014144438-5e0e64002a6e/go.mod h1:1K5hEzsMBLTPdRJKEHqBFJ8Zt2VRqDhomcQ11KH0WW4=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg=
@@ -698,8 +694,8 @@ github.com/ugorji/go v1.2.6/go.mod h1:anCg0y61KIhDlPZmnH+so+RQbysYVyDko0IMgJv0Nn
github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY=
github.com/ugorji/go/codec v1.2.6 h1:7kbGefxLoDBuYXOms4yD7223OpNMMPNPZxXk5TvFcyQ=
github.com/ugorji/go/codec v1.2.6/go.mod h1:V6TCNZ4PHqoHGFZuSG1W8nrCzzdgA2DozYxWFFpvxTw=
github.com/up9inc/basenine/client/go v0.0.0-20220612112747-3b28eeac9c51 h1:6op+PUYmTlxze3V3f30lWKix3sWqv1M9rvRhyaxbsdQ=
github.com/up9inc/basenine/client/go v0.0.0-20220612112747-3b28eeac9c51/go.mod h1:SvJGPoa/6erhUQV7kvHBwM/0x5LyO6XaG2lUaCaKiUI=
github.com/up9inc/basenine/client/go v0.0.0-20220509204026-c37adfc587f4 h1:nNOrU1HVH0fnaG7GNhxCc8kNPVL035Iix7ihUF6lZT8=
github.com/up9inc/basenine/client/go v0.0.0-20220509204026-c37adfc587f4/go.mod h1:SvJGPoa/6erhUQV7kvHBwM/0x5LyO6XaG2lUaCaKiUI=
github.com/vishvananda/netns v0.0.0-20211101163701-50045581ed74 h1:gga7acRE695APm9hlsSMoOoE65U4/TcqNj90mc69Rlg=
github.com/vishvananda/netns v0.0.0-20211101163701-50045581ed74/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0=
github.com/wI2L/jsondiff v0.1.1 h1:r2TkoEet7E4JMO5+s1RCY2R0LrNPNHY6hbDeow2hRHw=

View File

@@ -118,7 +118,7 @@ func hostApi(socketHarOutputChannel chan<- *tapApi.OutputChannelItem) *gin.Engin
api.WebSocketRoutes(ginApp, &eventHandlers)
if config.Config.OAS.Enable {
if config.Config.OAS {
routes.OASRoutes(ginApp)
}
@@ -200,7 +200,7 @@ func runInHarReaderMode() {
}
func enableExpFeatureIfNeeded() {
if config.Config.OAS.Enable {
if config.Config.OAS {
oasGenerator := dependency.GetInstance(dependency.OasGeneratorDependency).(oas.OasGenerator)
oasGenerator.Start()
}
@@ -227,7 +227,7 @@ func setUIFlags(uiIndexPath string) error {
return err
}
replacedContent := strings.Replace(string(read), "__IS_OAS_ENABLED__", strconv.FormatBool(config.Config.OAS.Enable), 1)
replacedContent := strings.Replace(string(read), "__IS_OAS_ENABLED__", strconv.FormatBool(config.Config.OAS), 1)
replacedContent = strings.Replace(replacedContent, "__IS_SERVICE_MAP_ENABLED__", strconv.FormatBool(config.Config.ServiceMap), 1)
err = ioutil.WriteFile(uiIndexPath, []byte(replacedContent), 0)
@@ -363,7 +363,7 @@ func handleIncomingMessageAsTapper(socketConnection *websocket.Conn) {
func initializeDependencies() {
dependency.RegisterGenerator(dependency.ServiceMapGeneratorDependency, func() interface{} { return servicemap.GetDefaultServiceMapInstance() })
dependency.RegisterGenerator(dependency.OasGeneratorDependency, func() interface{} { return oas.GetDefaultOasGeneratorInstance(config.Config.OAS.MaxExampleLen) })
dependency.RegisterGenerator(dependency.OasGeneratorDependency, func() interface{} { return oas.GetDefaultOasGeneratorInstance() })
dependency.RegisterGenerator(dependency.EntriesInserter, func() interface{} { return api.GetBasenineEntryInserterInstance() })
dependency.RegisterGenerator(dependency.EntriesProvider, func() interface{} { return &entries.BasenineEntriesProvider{} })
dependency.RegisterGenerator(dependency.EntriesSocketStreamer, func() interface{} { return &api.BasenineEntryStreamer{} })

View File

@@ -11,15 +11,16 @@ import (
"strings"
"time"
"github.com/up9inc/mizu/agent/pkg/dependency"
"github.com/up9inc/mizu/agent/pkg/models"
"github.com/up9inc/mizu/agent/pkg/oas"
"github.com/up9inc/mizu/agent/pkg/servicemap"
"github.com/up9inc/mizu/agent/pkg/dependency"
"github.com/up9inc/mizu/agent/pkg/har"
"github.com/up9inc/mizu/agent/pkg/holder"
"github.com/up9inc/mizu/agent/pkg/providers"
"github.com/up9inc/mizu/agent/pkg/oas"
"github.com/up9inc/mizu/agent/pkg/servicemap"
"github.com/up9inc/mizu/agent/pkg/resolver"
"github.com/up9inc/mizu/agent/pkg/utils"
@@ -143,14 +144,13 @@ func startReadingChannel(outputItems <-chan *tapApi.OutputChannelItem, extension
continue
}
providers.EntryAdded(len(data))
entryInserter := dependency.GetInstance(dependency.EntriesInserter).(EntryInserter)
if err := entryInserter.Insert(mizuEntry); err != nil {
logger.Log.Errorf("Error inserting entry, err: %v", err)
}
summary := extension.Dissector.Summarize(mizuEntry)
providers.EntryAdded(len(data), summary)
serviceMapGenerator := dependency.GetInstance(dependency.ServiceMapGeneratorDependency).(servicemap.ServiceMapSink)
serviceMapGenerator.NewTCPEntry(mizuEntry.Source, mizuEntry.Destination, &item.Protocol)

View File

@@ -34,12 +34,12 @@ func TestGetOASSpec(t *testing.T) {
func getRecorderAndContext() (*httptest.ResponseRecorder, *gin.Context) {
dependency.RegisterGenerator(dependency.OasGeneratorDependency, func() interface{} {
return oas.GetDefaultOasGeneratorInstance(-1)
return oas.GetDefaultOasGeneratorInstance()
})
recorder := httptest.NewRecorder()
c, _ := gin.CreateTestContext(recorder)
oas.GetDefaultOasGeneratorInstance(-1).Start()
oas.GetDefaultOasGeneratorInstance(-1).GetServiceSpecs().Store("some", oas.NewGen("some"))
oas.GetDefaultOasGeneratorInstance().Start()
oas.GetDefaultOasGeneratorInstance().GetServiceSpecs().Store("some", oas.NewGen("some"))
return recorder, c
}

View File

@@ -79,15 +79,6 @@ func GetGeneralStats(c *gin.Context) {
c.JSON(http.StatusOK, providers.GetGeneralStats())
}
func GetAccumulativeStats(c *gin.Context) {
c.JSON(http.StatusOK, providers.GetAccumulativeStats())
}
func GetAccumulativeStatsTiming(c *gin.Context) {
// for now hardcoded 10 bars of 5 minutes interval
c.JSON(http.StatusOK, providers.GetAccumulativeStatsTiming(300, 10))
}
func GetCurrentResolvingInformation(c *gin.Context) {
c.JSON(http.StatusOK, holder.GetResolver().GetMap())
}

View File

@@ -9,7 +9,7 @@ var ignoredCtypes = []string{"application/javascript", "application/x-javascript
var ignoredHeaders = []string{
"a-im", "accept",
"authorization", "cache-control", "connection", "content-encoding", "content-length", "content-range", "content-type", "cookie",
"authorization", "cache-control", "connection", "content-encoding", "content-length", "content-type", "cookie",
"date", "dnt", "expect", "forwarded", "from", "front-end-https", "host", "http2-settings",
"max-forwards", "origin", "pragma", "proxy-authorization", "proxy-connection", "range", "referer",
"save-data", "te", "trailer", "transfer-encoding", "upgrade", "upgrade-insecure-requests", "x-download-options",

View File

@@ -28,14 +28,13 @@ type OasGenerator interface {
}
type defaultOasGenerator struct {
started bool
serviceSpecs *sync.Map
maxExampleLen int
started bool
serviceSpecs *sync.Map
}
func GetDefaultOasGeneratorInstance(maxExampleLen int) *defaultOasGenerator {
func GetDefaultOasGeneratorInstance() *defaultOasGenerator {
syncOnce.Do(func() {
instance = NewDefaultOasGenerator(maxExampleLen)
instance = NewDefaultOasGenerator()
logger.Log.Debug("OAS Generator Initialized")
})
return instance
@@ -118,7 +117,6 @@ func (g *defaultOasGenerator) getGen(dest string, urlStr string) *SpecGen {
var gen *SpecGen
if !found {
gen = NewGen(u.Scheme + "://" + dest)
gen.MaxExampleLen = g.maxExampleLen
g.serviceSpecs.Store(dest, gen)
} else {
gen = val.(*SpecGen)
@@ -134,10 +132,9 @@ func (g *defaultOasGenerator) GetServiceSpecs() *sync.Map {
return g.serviceSpecs
}
func NewDefaultOasGenerator(maxExampleLen int) *defaultOasGenerator {
func NewDefaultOasGenerator() *defaultOasGenerator {
return &defaultOasGenerator{
started: false,
serviceSpecs: &sync.Map{},
maxExampleLen: maxExampleLen,
started: false,
serviceSpecs: &sync.Map{},
}
}

View File

@@ -8,7 +8,7 @@ import (
)
func TestOASGen(t *testing.T) {
gen := GetDefaultOasGeneratorInstance(-1)
gen := GetDefaultOasGeneratorInstance()
e := new(har.Entry)
err := json.Unmarshal([]byte(`{"startedDateTime": "20000101","request": {"url": "https://host/path", "method": "GET"}, "response": {"status": 200}}`), e)

View File

@@ -42,8 +42,6 @@ type reqResp struct { // hello, generics in Go
}
type SpecGen struct {
MaxExampleLen int // -1 unlimited, 0 and above sets limit
oas *openapi.OpenAPI
tree *Node
lock sync.Mutex
@@ -61,11 +59,7 @@ func NewGen(server string) *SpecGen {
spec.Servers = make([]*openapi.Server, 0)
spec.Servers = append(spec.Servers, &openapi.Server{URL: server})
gen := SpecGen{
oas: spec,
tree: new(Node),
MaxExampleLen: -1,
}
gen := SpecGen{oas: spec, tree: new(Node)}
return &gen
}
@@ -234,7 +228,7 @@ func (g *SpecGen) handlePathObj(entryWithSource *EntryWithSource) (string, error
split = strings.Split(urlParsed.Path, "/")
}
node := g.tree.getOrSet(split, new(openapi.PathObj), entryWithSource.Id)
opObj, err := handleOpObj(entryWithSource, node.pathObj, g.MaxExampleLen)
opObj, err := handleOpObj(entryWithSource, node.pathObj)
if opObj != nil {
return opObj.OperationID, err
@@ -243,7 +237,7 @@ func (g *SpecGen) handlePathObj(entryWithSource *EntryWithSource) (string, error
return "", err
}
func handleOpObj(entryWithSource *EntryWithSource, pathObj *openapi.PathObj, limit int) (*openapi.Operation, error) {
func handleOpObj(entryWithSource *EntryWithSource, pathObj *openapi.PathObj) (*openapi.Operation, error) {
entry := entryWithSource.Entry
isSuccess := 100 <= entry.Response.Status && entry.Response.Status < 400
opObj, wasMissing, err := getOpObj(pathObj, entry.Request.Method, isSuccess)
@@ -256,12 +250,12 @@ func handleOpObj(entryWithSource *EntryWithSource, pathObj *openapi.PathObj, lim
return nil, nil
}
err = handleRequest(&entry.Request, opObj, isSuccess, entryWithSource.Id, limit)
err = handleRequest(&entry.Request, opObj, isSuccess, entryWithSource.Id)
if err != nil {
return nil, err
}
err = handleResponse(&entry.Response, opObj, isSuccess, entryWithSource.Id, limit)
err = handleResponse(&entry.Response, opObj, isSuccess, entryWithSource.Id)
if err != nil {
return nil, err
}
@@ -348,7 +342,7 @@ func handleCounters(opObj *openapi.Operation, success bool, entryWithSource *Ent
return nil
}
func handleRequest(req *har.Request, opObj *openapi.Operation, isSuccess bool, sampleId string, limit int) error {
func handleRequest(req *har.Request, opObj *openapi.Operation, isSuccess bool, sampleId string) error {
// TODO: we don't handle the situation when header/qstr param can be defined on pathObj level. Also the path param defined on opObj
urlParsed, err := url.Parse(req.URL)
if err != nil {
@@ -396,7 +390,7 @@ func handleRequest(req *har.Request, opObj *openapi.Operation, isSuccess bool, s
} else {
reqCtype, _ := getReqCtype(req)
reqMedia, err := fillContent(reqResp{Req: req}, reqBody.Content, reqCtype, sampleId, limit)
reqMedia, err := fillContent(reqResp{Req: req}, reqBody.Content, reqCtype, sampleId)
if err != nil {
return err
}
@@ -408,7 +402,7 @@ func handleRequest(req *har.Request, opObj *openapi.Operation, isSuccess bool, s
return nil
}
func handleResponse(resp *har.Response, opObj *openapi.Operation, isSuccess bool, sampleId string, limit int) error {
func handleResponse(resp *har.Response, opObj *openapi.Operation, isSuccess bool, sampleId string) error {
// TODO: we don't support "default" response
respObj, err := getResponseObj(resp, opObj, isSuccess)
if err != nil {
@@ -421,7 +415,7 @@ func handleResponse(resp *har.Response, opObj *openapi.Operation, isSuccess bool
respCtype := getRespCtype(resp)
respContent := respObj.Content
respMedia, err := fillContent(reqResp{Resp: resp}, respContent, respCtype, sampleId, limit)
respMedia, err := fillContent(reqResp{Resp: resp}, respContent, respCtype, sampleId)
if err != nil {
return err
}
@@ -473,7 +467,7 @@ func handleRespHeaders(reqHeaders []har.Header, respObj *openapi.ResponseObj, sa
}
}
func fillContent(reqResp reqResp, respContent openapi.Content, ctype string, sampleId string, limit int) (*openapi.MediaType, error) {
func fillContent(reqResp reqResp, respContent openapi.Content, ctype string, sampleId string) (*openapi.MediaType, error) {
content, found := respContent[ctype]
if !found {
respContent[ctype] = &openapi.MediaType{}
@@ -516,7 +510,7 @@ func fillContent(reqResp reqResp, respContent openapi.Content, ctype string, sam
handleFormDataMultipart(text, content, params)
}
if len(exampleMsg) > len(content.Example) && (limit < 0 || len(exampleMsg) <= limit) {
if content.Example == nil && len(exampleMsg) > len(content.Example) {
content.Example = exampleMsg
}
}

View File

@@ -48,7 +48,7 @@ func TestEntries(t *testing.T) {
t.FailNow()
}
gen := NewDefaultOasGenerator(-1)
gen := NewDefaultOasGenerator()
gen.serviceSpecs = new(sync.Map)
loadStartingOAS("test_artifacts/catalogue.json", "catalogue", gen.serviceSpecs)
loadStartingOAS("test_artifacts/trcc.json", "trcc-api-service", gen.serviceSpecs)
@@ -122,7 +122,7 @@ func TestEntries(t *testing.T) {
}
func TestFileSingle(t *testing.T) {
gen := NewDefaultOasGenerator(-1)
gen := NewDefaultOasGenerator()
gen.serviceSpecs = new(sync.Map)
// loadStartingOAS()
file := "test_artifacts/params.har"
@@ -212,7 +212,7 @@ func loadStartingOAS(file string, label string, specs *sync.Map) {
}
func TestEntriesNegative(t *testing.T) {
gen := NewDefaultOasGenerator(-1)
gen := NewDefaultOasGenerator()
gen.serviceSpecs = new(sync.Map)
files := []string{"invalid"}
_, err := feedEntries(files, false, gen)
@@ -223,7 +223,7 @@ func TestEntriesNegative(t *testing.T) {
}
func TestEntriesPositive(t *testing.T) {
gen := NewDefaultOasGenerator(-1)
gen := NewDefaultOasGenerator()
gen.serviceSpecs = new(sync.Map)
files := []string{"test_artifacts/params.har"}
_, err := feedEntries(files, false, gen)

View File

@@ -333,7 +333,7 @@
}
}
},
"example": "agent-id=ade\u0026callback-url=\u0026token=sometoken-second-val\u0026optional=another",
"example": "agent-id=ade\u0026callback-url=\u0026token=sometoken",
"x-sample-entry": "000000000000000000000008"
}
},

View File

@@ -2,12 +2,7 @@ package providers
import (
"reflect"
"sync"
"time"
"github.com/jinzhu/copier"
"github.com/up9inc/mizu/logger"
"github.com/up9inc/mizu/tap/api"
)
type GeneralStats struct {
@@ -17,49 +12,7 @@ type GeneralStats struct {
LastEntryTimestamp int
}
type BucketStats []*TimeFrameStatsValue
type TimeFrameStatsValue struct {
BucketTime time.Time `json:"timestamp"`
ProtocolStats map[string]ProtocolStats `json:"protocols"`
}
type ProtocolStats struct {
MethodsStats map[string]*SizeAndEntriesCount `json:"methods"`
Color string `json:"color"`
}
type SizeAndEntriesCount struct {
EntriesCount int `json:"entriesCount"`
VolumeInBytes int `json:"volumeInBytes"`
}
type AccumulativeStatsCounter struct {
Name string `json:"name"`
EntriesCount int `json:"entriesCount"`
VolumeSizeBytes int `json:"volumeSizeBytes"`
}
type AccumulativeStatsProtocol struct {
AccumulativeStatsCounter
Color string `json:"color"`
Methods []*AccumulativeStatsCounter `json:"methods"`
}
type AccumulativeStatsProtocolTime struct {
ProtocolsData []*AccumulativeStatsProtocol `json:"protocols"`
Time int64 `json:"timestamp"`
}
var (
generalStats = GeneralStats{}
bucketsStats = BucketStats{}
bucketStatsLocker = sync.Mutex{}
)
const (
InternalBucketThreshold = time.Minute * 1
)
var generalStats = GeneralStats{}
func ResetGeneralStats() {
generalStats = GeneralStats{}
@@ -69,156 +22,7 @@ func GetGeneralStats() GeneralStats {
return generalStats
}
func getBucketStatsCopy() BucketStats {
bucketStatsCopy := BucketStats{}
bucketStatsLocker.Lock()
if err := copier.Copy(&bucketStatsCopy, bucketsStats); err != nil {
logger.Log.Errorf("Error while copying src stats into temporary copied object")
return nil
}
bucketStatsLocker.Unlock()
return bucketStatsCopy
}
func GetAccumulativeStats() []*AccumulativeStatsProtocol {
bucketStatsCopy := getBucketStatsCopy()
if bucketStatsCopy == nil {
return make([]*AccumulativeStatsProtocol, 0)
}
protocolToColor := make(map[string]string, 0)
methodsPerProtocolAggregated := make(map[string]map[string]*AccumulativeStatsCounter, 0)
for _, countersOfTimeFrame := range bucketStatsCopy {
for protocolName, value := range countersOfTimeFrame.ProtocolStats {
if _, ok := protocolToColor[protocolName]; !ok {
protocolToColor[protocolName] = value.Color
}
for method, countersValue := range value.MethodsStats {
if _, found := methodsPerProtocolAggregated[protocolName]; !found {
methodsPerProtocolAggregated[protocolName] = map[string]*AccumulativeStatsCounter{}
}
if _, found := methodsPerProtocolAggregated[protocolName][method]; !found {
methodsPerProtocolAggregated[protocolName][method] = &AccumulativeStatsCounter{
Name: method,
EntriesCount: 0,
VolumeSizeBytes: 0,
}
}
methodsPerProtocolAggregated[protocolName][method].EntriesCount += countersValue.EntriesCount
methodsPerProtocolAggregated[protocolName][method].VolumeSizeBytes += countersValue.VolumeInBytes
}
}
}
return ConvertToPieData(methodsPerProtocolAggregated, protocolToColor)
}
func ConvertToPieData(methodsPerProtocolAggregated map[string]map[string]*AccumulativeStatsCounter, protocolToColor map[string]string) []*AccumulativeStatsProtocol {
protocolsData := make([]*AccumulativeStatsProtocol, 0)
for protocolName, value := range methodsPerProtocolAggregated {
entriesCount := 0
volumeSizeBytes := 0
methods := make([]*AccumulativeStatsCounter, 0)
for _, methodAccData := range value {
entriesCount += methodAccData.EntriesCount
volumeSizeBytes += methodAccData.VolumeSizeBytes
methods = append(methods, methodAccData)
}
protocolsData = append(protocolsData, &AccumulativeStatsProtocol{
AccumulativeStatsCounter: AccumulativeStatsCounter{
Name: protocolName,
EntriesCount: entriesCount,
VolumeSizeBytes: volumeSizeBytes,
},
Color: protocolToColor[protocolName],
Methods: methods,
})
}
return protocolsData
}
func GetAccumulativeStatsTiming(intervalSeconds int, numberOfBars int) []*AccumulativeStatsProtocolTime {
bucketStatsCopy := getBucketStatsCopy()
if len(bucketStatsCopy) == 0 {
return make([]*AccumulativeStatsProtocolTime, 0)
}
protocolToColor := make(map[string]string, 0)
methodsPerProtocolPerTimeAggregated := make(map[time.Time]map[string]map[string]*AccumulativeStatsCounter, 0)
// TODO: Extract to function and add tests for those values
lastBucketTime := time.Now().UTC().Add(-1 * InternalBucketThreshold / 2).Round(InternalBucketThreshold)
firstBucketTime := lastBucketTime.Add(-1 * time.Second * time.Duration(intervalSeconds*(numberOfBars-1)))
bucketStatsIndex := len(bucketStatsCopy) - 1
for bucketStatsIndex >= 0 {
currentBucketTime := bucketStatsCopy[bucketStatsIndex].BucketTime
if currentBucketTime.After(firstBucketTime) || currentBucketTime.Equal(firstBucketTime) {
resultBucketRoundedKey := currentBucketTime.Add(-1 * time.Second * time.Duration(intervalSeconds) / 2).Round(time.Second * time.Duration(intervalSeconds))
for protocolName, data := range bucketStatsCopy[bucketStatsIndex].ProtocolStats {
if _, ok := protocolToColor[protocolName]; !ok {
protocolToColor[protocolName] = data.Color
}
for methodName, dataOfMethod := range data.MethodsStats {
if _, ok := methodsPerProtocolPerTimeAggregated[resultBucketRoundedKey]; !ok {
methodsPerProtocolPerTimeAggregated[resultBucketRoundedKey] = map[string]map[string]*AccumulativeStatsCounter{}
}
if _, ok := methodsPerProtocolPerTimeAggregated[resultBucketRoundedKey][protocolName]; !ok {
methodsPerProtocolPerTimeAggregated[resultBucketRoundedKey][protocolName] = map[string]*AccumulativeStatsCounter{}
}
if _, ok := methodsPerProtocolPerTimeAggregated[resultBucketRoundedKey][protocolName][methodName]; !ok {
methodsPerProtocolPerTimeAggregated[resultBucketRoundedKey][protocolName][methodName] = &AccumulativeStatsCounter{
Name: methodName,
EntriesCount: 0,
VolumeSizeBytes: 0,
}
}
methodsPerProtocolPerTimeAggregated[resultBucketRoundedKey][protocolName][methodName].EntriesCount += dataOfMethod.EntriesCount
methodsPerProtocolPerTimeAggregated[resultBucketRoundedKey][protocolName][methodName].VolumeSizeBytes += dataOfMethod.VolumeInBytes
}
}
}
bucketStatsIndex--
}
return ConvertToTimelineData(methodsPerProtocolPerTimeAggregated, protocolToColor)
}
func ConvertToTimelineData(methodsPerProtocolPerTimeAggregated map[time.Time]map[string]map[string]*AccumulativeStatsCounter, protocolToColor map[string]string) []*AccumulativeStatsProtocolTime {
finalResult := make([]*AccumulativeStatsProtocolTime, 0)
for timeKey, item := range methodsPerProtocolPerTimeAggregated {
protocolsData := make([]*AccumulativeStatsProtocol, 0)
for protocolName := range item {
entriesCount := 0
volumeSizeBytes := 0
methods := make([]*AccumulativeStatsCounter, 0)
for _, methodAccData := range methodsPerProtocolPerTimeAggregated[timeKey][protocolName] {
entriesCount += methodAccData.EntriesCount
volumeSizeBytes += methodAccData.VolumeSizeBytes
methods = append(methods, methodAccData)
}
protocolsData = append(protocolsData, &AccumulativeStatsProtocol{
AccumulativeStatsCounter: AccumulativeStatsCounter{
Name: protocolName,
EntriesCount: entriesCount,
VolumeSizeBytes: volumeSizeBytes,
},
Color: protocolToColor[protocolName],
Methods: methods,
})
}
finalResult = append(finalResult, &AccumulativeStatsProtocolTime{
Time: timeKey.UnixMilli(),
ProtocolsData: protocolsData,
})
}
return finalResult
}
func EntryAdded(size int, summery *api.BaseEntry) {
func EntryAdded(size int) {
generalStats.EntriesCount++
generalStats.EntriesVolumeInGB += float64(size) / (1 << 30)
@@ -228,47 +32,5 @@ func EntryAdded(size int, summery *api.BaseEntry) {
generalStats.FirstEntryTimestamp = currentTimestamp
}
addToBucketStats(size, summery)
generalStats.LastEntryTimestamp = currentTimestamp
}
//GetBucketOfTimeStamp Round the entry to the nearest threshold (one minute) floored (e.g: 15:31:45 -> 15:31:00)
func GetBucketOfTimeStamp(timestamp int64) time.Time {
entryTimeStampAsTime := time.UnixMilli(timestamp)
return entryTimeStampAsTime.Add(-1 * InternalBucketThreshold / 2).Round(InternalBucketThreshold)
}
func addToBucketStats(size int, summery *api.BaseEntry) {
entryTimeBucketRounded := GetBucketOfTimeStamp(summery.Timestamp)
if len(bucketsStats) == 0 {
bucketsStats = append(bucketsStats, &TimeFrameStatsValue{
BucketTime: entryTimeBucketRounded,
ProtocolStats: map[string]ProtocolStats{},
})
}
bucketOfEntry := bucketsStats[len(bucketsStats)-1]
if bucketOfEntry.BucketTime != entryTimeBucketRounded {
bucketOfEntry = &TimeFrameStatsValue{
BucketTime: entryTimeBucketRounded,
ProtocolStats: map[string]ProtocolStats{},
}
bucketsStats = append(bucketsStats, bucketOfEntry)
}
if _, found := bucketOfEntry.ProtocolStats[summery.Protocol.Abbreviation]; !found {
bucketOfEntry.ProtocolStats[summery.Protocol.Abbreviation] = ProtocolStats{
MethodsStats: map[string]*SizeAndEntriesCount{},
Color: summery.Protocol.BackgroundColor,
}
}
if _, found := bucketOfEntry.ProtocolStats[summery.Protocol.Abbreviation].MethodsStats[summery.Method]; !found {
bucketOfEntry.ProtocolStats[summery.Protocol.Abbreviation].MethodsStats[summery.Method] = &SizeAndEntriesCount{
VolumeInBytes: 0,
EntriesCount: 0,
}
}
bucketOfEntry.ProtocolStats[summery.Protocol.Abbreviation].MethodsStats[summery.Method].EntriesCount += 1
bucketOfEntry.ProtocolStats[summery.Protocol.Abbreviation].MethodsStats[summery.Method].VolumeInBytes += size
}

View File

@@ -3,10 +3,8 @@ package providers_test
import (
"fmt"
"testing"
"time"
"github.com/up9inc/mizu/agent/pkg/providers"
"github.com/up9inc/mizu/tap/api"
)
func TestNoEntryAddedCount(t *testing.T) {
@@ -24,13 +22,10 @@ func TestNoEntryAddedCount(t *testing.T) {
func TestEntryAddedCount(t *testing.T) {
tests := []int{1, 5, 10, 100, 500, 1000}
entryBucketKey := time.Date(2021, 1, 1, 10, 0, 0, 0, time.UTC)
valueLessThanBucketThreshold := time.Second * 130
mockSummery := &api.BaseEntry{Protocol: api.Protocol{Name: "mock"}, Method: "mock-method", Timestamp: entryBucketKey.Add(valueLessThanBucketThreshold).UnixNano()}
for _, entriesCount := range tests {
t.Run(fmt.Sprintf("%d", entriesCount), func(t *testing.T) {
for i := 0; i < entriesCount; i++ {
providers.EntryAdded(0, mockSummery)
providers.EntryAdded(0)
}
entriesStats := providers.GetGeneralStats()
@@ -43,14 +38,7 @@ func TestEntryAddedCount(t *testing.T) {
t.Errorf("unexpected result - expected: %v, actual: %v", 0, entriesStats.EntriesVolumeInGB)
}
t.Cleanup(func() {
providers.ResetGeneralStats()
generalStats := providers.GetGeneralStats()
if generalStats.EntriesCount != 0 {
t.Errorf("unexpected result - expected: %v, actual: %v", 0, generalStats.EntriesCount)
}
})
t.Cleanup(providers.ResetGeneralStats)
})
}
}
@@ -61,14 +49,12 @@ func TestEntryAddedVolume(t *testing.T) {
var expectedEntriesCount int
var expectedVolumeInGB float64
mockSummery := &api.BaseEntry{Protocol: api.Protocol{Name: "mock"}, Method: "mock-method", Timestamp: time.Date(2021, 1, 1, 10, 0, 0, 0, time.UTC).UnixNano()}
for _, data := range tests {
t.Run(fmt.Sprintf("%d", len(data)), func(t *testing.T) {
expectedEntriesCount++
expectedVolumeInGB += float64(len(data)) / (1 << 30)
providers.EntryAdded(len(data), mockSummery)
providers.EntryAdded(len(data))
entriesStats := providers.GetGeneralStats()
@@ -83,22 +69,3 @@ func TestEntryAddedVolume(t *testing.T) {
}
}
func TestGetBucketOfTimeStamp(t *testing.T) {
tests := map[int64]time.Time{
time.Date(2022, time.Month(1), 1, 10, 34, 45, 0, time.Local).UnixMilli(): time.Date(2022, time.Month(1), 1, 10, 34, 00, 0, time.Local),
time.Date(2022, time.Month(1), 1, 10, 34, 00, 0, time.Local).UnixMilli(): time.Date(2022, time.Month(1), 1, 10, 34, 00, 0, time.Local),
time.Date(2022, time.Month(1), 1, 10, 59, 01, 0, time.Local).UnixMilli(): time.Date(2022, time.Month(1), 1, 10, 59, 00, 0, time.Local),
}
for key, value := range tests {
t.Run(fmt.Sprintf("%v", key), func(t *testing.T) {
actual := providers.GetBucketOfTimeStamp(key)
if actual != value {
t.Errorf("unexpected result - expected: %v, actual: %v", value, actual)
}
})
}
}

View File

@@ -16,8 +16,6 @@ func StatusRoutes(ginApp *gin.Engine) {
routeGroup.GET("/tap", controllers.GetTappingStatus)
routeGroup.GET("/general", controllers.GetGeneralStats) // get general stats about entries in DB
routeGroup.GET("/accumulative", controllers.GetAccumulativeStats)
routeGroup.GET("/accumulativeTiming", controllers.GetAccumulativeStatsTiming)
routeGroup.GET("/resolving", controllers.GetCurrentResolvingInformation)
}

View File

@@ -39,7 +39,7 @@ type ConfigStruct struct {
HeadlessMode bool `yaml:"headless" default:"false"`
LogLevelStr string `yaml:"log-level,omitempty" default:"INFO" readonly:""`
ServiceMap bool `yaml:"service-map" default:"true"`
OAS shared.OASConfig `yaml:"oas"`
OAS bool `yaml:"oas" default:"true"`
}
func (config *ConfigStruct) validate() error {

View File

@@ -1,12 +0,0 @@
#!/bin/bash
SUDO=''
if (( $EUID != 0 )); then
SUDO='sudo'
fi
curl https://github.com/capstone-engine/capstone/archive/4.0.2.tar.gz -Lo ./capstone.tar.gz \
&& tar -xzf capstone.tar.gz && mv ./capstone-* ./capstone \
&& cd capstone \
&& CAPSTONE_ARCHS="aarch64 x86" ./make.sh \
&& $SUDO ./make.sh install

View File

@@ -1,4 +0,0 @@
#!/bin/bash
set -e
docker build . -t up9inc/linux-arm64-musl-go-libpcap-capstone-bpf && docker push up9inc/linux-arm64-musl-go-libpcap-capstone-bpf

View File

@@ -17,14 +17,4 @@ RUN curl https://www.tcpdump.org/release/libpcap-1.10.1.tar.gz -Lo ./libpcap.tar
WORKDIR /work/libpcap
RUN ./configure --host=arm && make \
&& cp /work/libpcap/libpcap.a /usr/xcc/aarch64-linux-musl-cross/lib/gcc/aarch64-linux-musl/*/
WORKDIR /work
# Build and install Capstone from source
RUN curl https://github.com/capstone-engine/capstone/archive/4.0.2.tar.gz -Lo ./capstone.tar.gz \
&& tar -xzf capstone.tar.gz && mv ./capstone-* ./capstone
WORKDIR /work/capstone
RUN CAPSTONE_ARCHS="aarch64" CAPSTONE_STATIC=yes ./make.sh \
&& cp /work/capstone/libcapstone.a /usr/xcc/aarch64-linux-musl-cross/lib/gcc/aarch64-linux-musl/*/
# Install eBPF related dependencies
RUN apt-get -y install clang llvm libbpf-dev

View File

@@ -0,0 +1,4 @@
#!/bin/bash
set -e
docker build . -t up9inc/linux-arm64-musl-go-libpcap && docker push up9inc/linux-arm64-musl-go-libpcap

View File

@@ -1,4 +0,0 @@
#!/bin/bash
set -e
docker build . -t up9inc/linux-x86_64-musl-go-libpcap-capstone-bpf && docker push up9inc/linux-x86_64-musl-go-libpcap-capstone-bpf

View File

@@ -1,18 +1,5 @@
FROM messense/rust-musl-cross:x86_64-musl AS builder-from-arm64v8-to-amd64
WORKDIR /
# Install eBPF related dependencies
RUN apt-get update
RUN apt-get -y install clang llvm libelf-dev pkg-config
# Build and install libbpf from source
RUN curl https://github.com/libbpf/libbpf/archive/refs/tags/v0.8.0.tar.gz -Lo ./libbpf.tar.gz \
&& tar -xzf libbpf.tar.gz && mv ./libbpf-* ./libbpf
WORKDIR /libbpf/src
RUN make && make install
WORKDIR /
ENV CROSS_TRIPLE x86_64-unknown-linux-musl
ENV CROSS_ROOT /usr/local/musl
@@ -25,6 +12,7 @@ ENV AS=${CROSS_ROOT}/bin/${CROSS_TRIPLE}-as \
FC=${CROSS_ROOT}/bin/${CROSS_TRIPLE}-gfortran
# Install Go
WORKDIR /
RUN curl https://go.dev/dl/go1.17.6.linux-arm64.tar.gz -Lo ./go.linux-arm64.tar.gz \
&& curl https://go.dev/dl/go1.17.6.linux-arm64.tar.gz.asc -Lo ./go.linux-arm64.tar.gz.asc \
&& curl https://dl.google.com/dl/linux/linux_signing_key.pub -Lo linux_signing_key.pub \
@@ -41,11 +29,3 @@ RUN curl https://www.tcpdump.org/release/libpcap-1.10.1.tar.gz -Lo ./libpcap.tar
WORKDIR /libpcap
RUN ./configure --host=x86_64 && make \
&& cp /libpcap/libpcap.a /usr/local/musl/lib/gcc/x86_64-unknown-linux-musl/*/
WORKDIR /
# Build and install Capstone from source
RUN curl https://github.com/capstone-engine/capstone/archive/4.0.2.tar.gz -Lo ./capstone.tar.gz \
&& tar -xzf capstone.tar.gz && mv ./capstone-* ./capstone
WORKDIR /capstone
RUN CAPSTONE_ARCHS="x86" CAPSTONE_STATIC=yes ./make.sh \
&& cp /capstone/libcapstone.a /usr/local/musl/lib/gcc/x86_64-unknown-linux-musl/*/

View File

@@ -0,0 +1,4 @@
#!/bin/bash
set -e
docker build . -t up9inc/linux-x86_64-musl-go-libpcap && docker push up9inc/linux-x86_64-musl-go-libpcap

View File

@@ -57,7 +57,7 @@ def extract_samples(f: typing.IO) -> typing.Tuple[pd.Series, pd.Series, pd.Serie
append_sample('"matchedPairs"', line, matched_samples)
append_sample('"liveTcpStreams"', line, live_samples)
append_sample('"processedBytes"', line, processed_samples)
append_sample('heap-alloc', line, heap_samples)
append_sample('mem', line, heap_samples)
append_sample('goroutines', line, goroutines_samples)
cpu_samples = pd.Series(cpu_samples)

View File

@@ -32,11 +32,6 @@ type Resources struct {
MemoryRequests string `yaml:"memory-requests" default:"50Mi"`
}
type OASConfig struct {
Enable bool `yaml:"enabled" default:"true"`
MaxExampleLen int `yaml:"max-example-len" default:"10240"`
}
type MizuAgentConfig struct {
MaxDBSizeBytes int64 `json:"maxDBSizeBytes"`
InsertionFilter string `json:"insertionFilter"`
@@ -47,7 +42,7 @@ type MizuAgentConfig struct {
MizuResourcesNamespace string `json:"mizuResourceNamespace"`
AgentDatabasePath string `json:"agentDatabasePath"`
ServiceMap bool `json:"serviceMap"`
OAS OASConfig `json:"oas"`
OAS bool `json:"oas"`
Telemetry bool `json:"telemetry"`
}

View File

@@ -16,8 +16,6 @@ type AppStats struct {
MatchedPairs uint64 `json:"matchedPairs"`
DroppedTcpStreams uint64 `json:"droppedTcpStreams"`
LiveTcpStreams uint64 `json:"liveTcpStreams"`
IgnoredLastAckCount uint64 `json:"ignoredLastAckCount"`
ThrottledPackets uint64 `json:"throttledPackets"`
}
func (as *AppStats) IncMatchedPairs() {
@@ -41,14 +39,6 @@ func (as *AppStats) IncIgnoredPacketsCount() {
atomic.AddUint64(&as.IgnoredPacketsCount, 1)
}
func (as *AppStats) IncIgnoredLastAckCount() {
atomic.AddUint64(&as.IgnoredLastAckCount, 1)
}
func (as *AppStats) IncThrottledPackets() {
atomic.AddUint64(&as.ThrottledPackets, 1)
}
func (as *AppStats) IncReassembledTcpPayloadsCount() {
atomic.AddUint64(&as.ReassembledTcpPayloadsCount, 1)
}
@@ -84,8 +74,6 @@ func (as *AppStats) DumpStats() *AppStats {
currentAppStats.TlsConnectionsCount = resetUint64(&as.TlsConnectionsCount)
currentAppStats.MatchedPairs = resetUint64(&as.MatchedPairs)
currentAppStats.DroppedTcpStreams = resetUint64(&as.DroppedTcpStreams)
currentAppStats.IgnoredLastAckCount = resetUint64(&as.IgnoredLastAckCount)
currentAppStats.ThrottledPackets = resetUint64(&as.ThrottledPackets)
currentAppStats.LiveTcpStreams = as.LiveTcpStreams
return currentAppStats

View File

@@ -10,11 +10,14 @@ import (
)
type CleanerStats struct {
flushed int
closed int
deleted int
}
type Cleaner struct {
assembler *reassembly.Assembler
assemblerMutex *sync.Mutex
cleanPeriod time.Duration
connectionTimeout time.Duration
stats CleanerStats
@@ -25,6 +28,11 @@ type Cleaner struct {
func (cl *Cleaner) clean() {
startCleanTime := time.Now()
cl.assemblerMutex.Lock()
logger.Log.Debugf("Assembler Stats before cleaning %s", cl.assembler.Dump())
flushed, closed := cl.assembler.FlushCloseOlderThan(startCleanTime.Add(-cl.connectionTimeout))
cl.assemblerMutex.Unlock()
cl.streamsMap.Range(func(k, v interface{}) bool {
reqResMatchers := v.(api.TcpStream).GetReqResMatchers()
for _, reqResMatcher := range reqResMatchers {
@@ -39,6 +47,8 @@ func (cl *Cleaner) clean() {
cl.statsMutex.Lock()
logger.Log.Debugf("Assembler Stats after cleaning %s", cl.assembler.Dump())
cl.stats.flushed += flushed
cl.stats.closed += closed
cl.statsMutex.Unlock()
}
@@ -57,12 +67,17 @@ func (cl *Cleaner) dumpStats() CleanerStats {
cl.statsMutex.Lock()
stats := CleanerStats{
flushed: cl.stats.flushed,
closed: cl.stats.closed,
deleted: cl.stats.deleted,
}
cl.stats.flushed = 0
cl.stats.closed = 0
cl.stats.deleted = 0
cl.statsMutex.Unlock()
return stats
}

View File

@@ -3,12 +3,10 @@ module github.com/up9inc/mizu/tap
go 1.17
require (
github.com/Masterminds/semver v1.5.0
github.com/cilium/ebpf v0.8.1
github.com/cilium/ebpf v0.8.0
github.com/go-errors/errors v1.4.2
github.com/google/gopacket v1.1.19
github.com/hashicorp/golang-lru v0.5.4
github.com/knightsc/gapstone v0.0.0-20211014144438-5e0e64002a6e
github.com/shirou/gopsutil v3.21.11+incompatible
github.com/struCoder/pidusage v0.2.1
github.com/up9inc/mizu/logger v0.0.0

View File

@@ -1,14 +1,12 @@
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/Masterminds/semver v1.5.0 h1:H65muMkzWKEuNDnfl9d70GUjFniHKHRbFPGBuZ3QEww=
github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y=
github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/cilium/ebpf v0.8.1 h1:bLSSEbBLqGPXxls55pGr5qWZaTqcmfDJHhou7t254ao=
github.com/cilium/ebpf v0.8.1/go.mod h1:f5zLIM0FSNuAkSyLAN7X+Hy6yznlF1mNiWUMfxMtrgk=
github.com/cilium/ebpf v0.8.0 h1:2V6KSg3FRADVU2BMIRemZ0hV+9OM+aAHhZDjQyjJTAs=
github.com/cilium/ebpf v0.8.0/go.mod h1:f5zLIM0FSNuAkSyLAN7X+Hy6yznlF1mNiWUMfxMtrgk=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
@@ -83,8 +81,6 @@ github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnr
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/knightsc/gapstone v0.0.0-20211014144438-5e0e64002a6e h1:6J5obSn9umEThiYzWzndcPOZR0Qj/sVCZpH6V1G7yNE=
github.com/knightsc/gapstone v0.0.0-20211014144438-5e0e64002a6e/go.mod h1:1K5hEzsMBLTPdRJKEHqBFJ8Zt2VRqDhomcQ11KH0WW4=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0=
@@ -294,6 +290,5 @@ sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2/go.mod h1:B+TnT182UBxE84DiCz
sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw=
sigs.k8s.io/structured-merge-diff/v4 v4.2.1 h1:bKCqE9GvQ5tiVHn5rfn1r+yao3aLQEaLzkkmAkf+A6Y=
sigs.k8s.io/structured-merge-diff/v4 v4.2.1/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4=
sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q=
sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc=
sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo=
sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8=

View File

@@ -14,9 +14,9 @@ import (
"fmt"
"os"
"runtime"
"strconv"
"strings"
"time"
"strconv"
"github.com/shirou/gopsutil/cpu"
"github.com/struCoder/pidusage"
@@ -45,7 +45,6 @@ var quiet = flag.Bool("quiet", false, "Be quiet regarding errors")
var hexdumppkt = flag.Bool("dumppkt", false, "Dump packet as hex")
var procfs = flag.String("procfs", "/proc", "The procfs directory, used when mapping host volumes into a container")
var ignoredPorts = flag.String("ignore-ports", "", "A comma separated list of ports to ignore")
var maxLiveStreams = flag.Int("max-live-streams", 500, "Maximum live streams to handle concurrently")
// capture
var iface = flag.String("i", "en0", "Interface to read packets from")
@@ -60,10 +59,8 @@ var tls = flag.Bool("tls", false, "Enable TLS tapper")
var memprofile = flag.String("memprofile", "", "Write memory profile")
type TapOpts struct {
HostMode bool
IgnoredPorts []uint16
maxLiveStreams int
staleConnectionTimeout time.Duration
HostMode bool
IgnoredPorts []uint16
}
var extensions []*api.Extension // global
@@ -92,13 +89,7 @@ func StartPassiveTapper(opts *TapOpts, outputItems chan *api.OutputChannelItem,
diagnose.StartMemoryProfiler(os.Getenv(MemoryProfilingDumpPath), os.Getenv(MemoryProfilingTimeIntervalSeconds))
}
assembler, err := initializePassiveTapper(opts, outputItems, streamsMap)
if err != nil {
logger.Log.Errorf("Error initializing tapper %w", err)
return
}
assembler := initializePassiveTapper(opts, outputItems, streamsMap)
go startPassiveTapper(streamsMap, assembler)
}
@@ -109,7 +100,7 @@ func UpdateTapTargets(newTapTargets []v1.Pod) {
packetSourceManager.UpdatePods(tapTargets, !*nodefrag, mainPacketInputChan)
if tlsTapperInstance != nil && os.Getenv("MIZU_GLOBAL_GOLANG_PID") == "" {
if tlsTapperInstance != nil {
if err := tlstapper.UpdateTapTargets(tlsTapperInstance, &tapTargets, *procfs); err != nil {
tlstapper.LogError(err)
success = false
@@ -133,7 +124,7 @@ func printNewTapTargets(success bool) {
}
}
func printPeriodicStats(cleaner *Cleaner, assembler *tcpAssembler) {
func printPeriodicStats(cleaner *Cleaner) {
statsPeriod := time.Second * time.Duration(*statsevery)
ticker := time.NewTicker(statsPeriod)
@@ -171,10 +162,8 @@ func printPeriodicStats(cleaner *Cleaner, assembler *tcpAssembler) {
}
}
logger.Log.Infof(
"heap-alloc: %d, heap-idle: %d, heap-objects: %d, goroutines: %d, cpu: %f, cores: %d/%d, rss: %f",
"mem: %d, goroutines: %d, cpu: %f, cores: %d/%d, rss: %f",
memStats.HeapAlloc,
memStats.HeapIdle,
memStats.HeapObjects,
runtime.NumGoroutine(),
sysInfo.CPU,
logicalCoreCount,
@@ -183,19 +172,15 @@ func printPeriodicStats(cleaner *Cleaner, assembler *tcpAssembler) {
// Since the last print
cleanStats := cleaner.dumpStats()
assemblerStats := assembler.DumpStats()
logger.Log.Infof(
"cleaner - flushed connections: %d, closed connections: %d, deleted messages: %d",
assemblerStats.flushedConnections,
assemblerStats.closedConnections,
cleanStats.flushed,
cleanStats.closed,
cleanStats.deleted,
)
currentAppStats := diagnose.AppStats.DumpStats()
appStatsJSON, _ := json.Marshal(currentAppStats)
logger.Log.Infof("app stats - %v", string(appStatsJSON))
// At the moment
logger.Log.Infof("assembler-stats: %s, packet-source-stats: %s", assembler.Dump(), packetSourceManager.Stats())
}
}
@@ -223,7 +208,7 @@ func initializePacketSources() error {
return err
}
func initializePassiveTapper(opts *TapOpts, outputItems chan *api.OutputChannelItem, streamsMap api.TcpStreamMap) (*tcpAssembler, error) {
func initializePassiveTapper(opts *TapOpts, outputItems chan *api.OutputChannelItem, streamsMap api.TcpStreamMap) *tcpAssembler {
diagnose.InitializeErrorsMap(*debug, *verbose, *quiet)
diagnose.InitializeTapperInternalStats()
@@ -234,10 +219,10 @@ func initializePassiveTapper(opts *TapOpts, outputItems chan *api.OutputChannelI
}
opts.IgnoredPorts = append(opts.IgnoredPorts, buildIgnoredPortsList(*ignoredPorts)...)
opts.maxLiveStreams = *maxLiveStreams
opts.staleConnectionTimeout = time.Duration(*staleTimeoutSeconds) * time.Second
return NewTcpAssembler(outputItems, streamsMap, opts)
assembler := NewTcpAssembler(outputItems, streamsMap, opts)
return assembler
}
func startPassiveTapper(streamsMap api.TcpStreamMap, assembler *tcpAssembler) {
@@ -248,13 +233,14 @@ func startPassiveTapper(streamsMap api.TcpStreamMap, assembler *tcpAssembler) {
staleConnectionTimeout := time.Second * time.Duration(*staleTimeoutSeconds)
cleaner := Cleaner{
assembler: assembler.Assembler,
assemblerMutex: &assembler.assemblerMutex,
cleanPeriod: cleanPeriod,
connectionTimeout: staleConnectionTimeout,
streamsMap: streamsMap,
}
cleaner.start()
go printPeriodicStats(&cleaner, assembler)
go printPeriodicStats(&cleaner)
assembler.processPackets(*hexdumppkt, mainPacketInputChan)
@@ -292,16 +278,7 @@ func startTlsTapper(extension *api.Extension, outputItems chan *api.OutputChanne
// A quick way to instrument libssl.so without PID filtering - used for debuging and troubleshooting
//
if os.Getenv("MIZU_GLOBAL_SSL_LIBRARY") != "" {
if err := tls.GlobalSsllibTap(os.Getenv("MIZU_GLOBAL_SSL_LIBRARY")); err != nil {
tlstapper.LogError(err)
return nil
}
}
// A quick way to instrument Go `crypto/tls` without PID filtering - used for debuging and troubleshooting
//
if os.Getenv("MIZU_GLOBAL_GOLANG_PID") != "" {
if err := tls.GlobalGoTap(*procfs, os.Getenv("MIZU_GLOBAL_GOLANG_PID")); err != nil {
if err := tls.GlobalTap(os.Getenv("MIZU_GLOBAL_SSL_LIBRARY")); err != nil {
tlstapper.LogError(err)
return nil
}

View File

@@ -160,19 +160,3 @@ func (m *PacketSourceManager) Close() {
src.close()
}
}
func (m *PacketSourceManager) Stats() string {
result := ""
for _, source := range m.sources {
stats, err := source.Stats()
if err != nil {
result = result + fmt.Sprintf("[%s: err:%s]", source.String(), err)
} else {
result = result + fmt.Sprintf("[%s: rec: %d dropped: %d]", source.String(), stats.PacketsReceived, stats.PacketsDropped)
}
}
return result
}

View File

@@ -116,10 +116,6 @@ func (source *tcpPacketSource) close() {
}
}
func (source *tcpPacketSource) Stats() (stat *pcap.Stats, err error) {
return source.handle.Stats()
}
func (source *tcpPacketSource) readPackets(ipdefrag bool, packets chan<- TcpPacketInfo) {
if dbgctl.MizuTapperDisablePcap {
return

View File

@@ -2,15 +2,14 @@ package tap
import (
"encoding/hex"
"fmt"
"os"
"os/signal"
"sync"
"time"
"github.com/google/gopacket"
"github.com/google/gopacket/layers"
"github.com/google/gopacket/reassembly"
"github.com/hashicorp/golang-lru/simplelru"
"github.com/up9inc/mizu/logger"
"github.com/up9inc/mizu/tap/api"
"github.com/up9inc/mizu/tap/dbgctl"
@@ -18,33 +17,14 @@ import (
"github.com/up9inc/mizu/tap/source"
)
const (
lastClosedConnectionsMaxItems = 1000
packetsSeenLogThreshold = 1000
lastAckThreshold = time.Duration(3) * time.Second
)
type connectionId string
func NewConnectionId(c string) connectionId {
return connectionId(c)
}
type AssemblerStats struct {
flushedConnections int
closedConnections int
}
const PACKETS_SEEN_LOG_THRESHOLD = 1000
type tcpAssembler struct {
*reassembly.Assembler
streamPool *reassembly.StreamPool
streamFactory *tcpStreamFactory
ignoredPorts []uint16
lastClosedConnections *simplelru.LRU // Actual type is map[string]int64 which is "connId -> lastSeen"
liveConnections map[connectionId]bool
maxLiveStreams int
staleConnectionTimeout time.Duration
stats AssemblerStats
streamPool *reassembly.StreamPool
streamFactory *tcpStreamFactory
assemblerMutex sync.Mutex
ignoredPorts []uint16
}
// Context
@@ -58,166 +38,108 @@ func (c *context) GetCaptureInfo() gopacket.CaptureInfo {
return c.CaptureInfo
}
func NewTcpAssembler(outputItems chan *api.OutputChannelItem, streamsMap api.TcpStreamMap, opts *TapOpts) (*tcpAssembler, error) {
func NewTcpAssembler(outputItems chan *api.OutputChannelItem, streamsMap api.TcpStreamMap, opts *TapOpts) *tcpAssembler {
var emitter api.Emitter = &api.Emitting{
AppStats: &diagnose.AppStats,
OutputChannel: outputItems,
}
lastClosedConnections, err := simplelru.NewLRU(lastClosedConnectionsMaxItems, func(key interface{}, value interface{}) {})
if err != nil {
return nil, err
}
a := &tcpAssembler{
ignoredPorts: opts.IgnoredPorts,
lastClosedConnections: lastClosedConnections,
liveConnections: make(map[connectionId]bool),
maxLiveStreams: opts.maxLiveStreams,
staleConnectionTimeout: opts.staleConnectionTimeout,
stats: AssemblerStats{},
}
a.streamFactory = NewTcpStreamFactory(emitter, streamsMap, opts, a)
a.streamPool = reassembly.NewStreamPool(a.streamFactory)
a.Assembler = reassembly.NewAssembler(a.streamPool)
streamFactory := NewTcpStreamFactory(emitter, streamsMap, opts)
streamPool := reassembly.NewStreamPool(streamFactory)
assembler := reassembly.NewAssembler(streamPool)
maxBufferedPagesTotal := GetMaxBufferedPagesPerConnection()
maxBufferedPagesPerConnection := GetMaxBufferedPagesTotal()
logger.Log.Infof("Assembler options: maxBufferedPagesTotal=%d, maxBufferedPagesPerConnection=%d, opts=%v",
maxBufferedPagesTotal, maxBufferedPagesPerConnection, opts)
a.Assembler.AssemblerOptions.MaxBufferedPagesTotal = maxBufferedPagesTotal
a.Assembler.AssemblerOptions.MaxBufferedPagesPerConnection = maxBufferedPagesPerConnection
assembler.AssemblerOptions.MaxBufferedPagesTotal = maxBufferedPagesTotal
assembler.AssemblerOptions.MaxBufferedPagesPerConnection = maxBufferedPagesPerConnection
return a, nil
return &tcpAssembler{
Assembler: assembler,
streamPool: streamPool,
streamFactory: streamFactory,
ignoredPorts: opts.IgnoredPorts,
}
}
func (a *tcpAssembler) processPackets(dumpPacket bool, packets <-chan source.TcpPacketInfo) {
signalChan := make(chan os.Signal, 1)
signal.Notify(signalChan, os.Interrupt)
ticker := time.NewTicker(a.staleConnectionTimeout)
out:
for {
for packetInfo := range packets {
packetsCount := diagnose.AppStats.IncPacketsCount()
if packetsCount%PACKETS_SEEN_LOG_THRESHOLD == 0 {
logger.Log.Debugf("Packets seen: #%d", packetsCount)
}
packet := packetInfo.Packet
data := packet.Data()
diagnose.AppStats.UpdateProcessedBytes(uint64(len(data)))
if dumpPacket {
logger.Log.Debugf("Packet content (%d/0x%x) - %s", len(data), len(data), hex.Dump(data))
}
tcp := packet.Layer(layers.LayerTypeTCP)
if tcp != nil {
diagnose.AppStats.IncTcpPacketsCount()
tcp := tcp.(*layers.TCP)
if a.shouldIgnorePort(uint16(tcp.DstPort)) {
diagnose.AppStats.IncIgnoredPacketsCount()
} else {
c := context{
CaptureInfo: packet.Metadata().CaptureInfo,
Origin: packetInfo.Source.Origin,
}
diagnose.InternalStats.Totalsz += len(tcp.Payload)
if !dbgctl.MizuTapperDisableTcpReassembly {
a.assemblerMutex.Lock()
a.AssembleWithContext(packet.NetworkLayer().NetworkFlow(), tcp, &c)
a.assemblerMutex.Unlock()
}
}
}
done := *maxcount > 0 && int64(diagnose.AppStats.PacketsCount) >= *maxcount
if done {
errorMapLen, _ := diagnose.TapErrors.GetErrorsSummary()
logger.Log.Infof("Processed %v packets (%v bytes) in %v (errors: %v, errTypes:%v)",
diagnose.AppStats.PacketsCount,
diagnose.AppStats.ProcessedBytes,
time.Since(diagnose.AppStats.StartTime),
diagnose.TapErrors.ErrorsCount,
errorMapLen)
}
select {
case packetInfo, ok := <-packets:
if !ok {
break out
}
if a.processPacket(packetInfo, dumpPacket) {
break out
}
case <-signalChan:
logger.Log.Infof("Caught SIGINT: aborting")
break out
case <-ticker.C:
a.periodicClean()
done = true
default:
// NOP: continue
}
if done {
break
}
}
a.assemblerMutex.Lock()
closed := a.FlushAll()
a.assemblerMutex.Unlock()
logger.Log.Debugf("Final flush: %d closed", closed)
}
func (a *tcpAssembler) processPacket(packetInfo source.TcpPacketInfo, dumpPacket bool) bool {
packetsCount := diagnose.AppStats.IncPacketsCount()
if packetsCount%packetsSeenLogThreshold == 0 {
logger.Log.Debugf("Packets seen: #%d", packetsCount)
}
packet := packetInfo.Packet
data := packet.Data()
diagnose.AppStats.UpdateProcessedBytes(uint64(len(data)))
if dumpPacket {
logger.Log.Debugf("Packet content (%d/0x%x) - %s", len(data), len(data), hex.Dump(data))
}
tcp := packet.Layer(layers.LayerTypeTCP)
if tcp != nil {
a.processTcpPacket(packetInfo.Source.Origin, packet, tcp.(*layers.TCP))
}
done := *maxcount > 0 && int64(diagnose.AppStats.PacketsCount) >= *maxcount
if done {
errorMapLen, _ := diagnose.TapErrors.GetErrorsSummary()
logger.Log.Infof("Processed %v packets (%v bytes) in %v (errors: %v, errTypes:%v)",
diagnose.AppStats.PacketsCount,
diagnose.AppStats.ProcessedBytes,
time.Since(diagnose.AppStats.StartTime),
diagnose.TapErrors.ErrorsCount,
errorMapLen)
}
return done
}
func (a *tcpAssembler) processTcpPacket(origin api.Capture, packet gopacket.Packet, tcp *layers.TCP) {
diagnose.AppStats.IncTcpPacketsCount()
if a.shouldIgnorePort(uint16(tcp.DstPort)) || a.shouldIgnorePort(uint16(tcp.SrcPort)) {
diagnose.AppStats.IncIgnoredPacketsCount()
return
}
id := getConnectionId(packet.NetworkLayer().NetworkFlow().Src().String(),
packet.TransportLayer().TransportFlow().Src().String(),
packet.NetworkLayer().NetworkFlow().Dst().String(),
packet.TransportLayer().TransportFlow().Dst().String())
if a.isRecentlyClosed(id) {
diagnose.AppStats.IncIgnoredLastAckCount()
return
}
if a.shouldThrottle(id) {
diagnose.AppStats.IncThrottledPackets()
return
}
c := context{
CaptureInfo: packet.Metadata().CaptureInfo,
Origin: origin,
}
diagnose.InternalStats.Totalsz += len(tcp.Payload)
if !dbgctl.MizuTapperDisableTcpReassembly {
a.AssembleWithContext(packet.NetworkLayer().NetworkFlow(), tcp, &c)
}
}
func (a *tcpAssembler) tcpStreamCreated(stream *tcpStream) {
a.liveConnections[stream.connectionId] = true
}
func (a *tcpAssembler) tcpStreamClosed(stream *tcpStream) {
a.lastClosedConnections.Add(stream.connectionId, time.Now().UnixMilli())
delete(a.liveConnections, stream.connectionId)
}
func (a *tcpAssembler) isRecentlyClosed(c connectionId) bool {
if closedTimeMillis, ok := a.lastClosedConnections.Get(c); ok {
timeSinceClosed := time.Since(time.UnixMilli(closedTimeMillis.(int64)))
if timeSinceClosed < lastAckThreshold {
return true
}
}
return false
}
func (a *tcpAssembler) shouldThrottle(c connectionId) bool {
if _, ok := a.liveConnections[c]; ok {
return false
}
return len(a.liveConnections) > a.maxLiveStreams
}
func (a *tcpAssembler) dumpStreamPool() {
a.streamPool.Dump()
}
func (a *tcpAssembler) waitAndDump() {
a.streamFactory.WaitGoRoutines()
a.assemblerMutex.Lock()
logger.Log.Debugf("%s", a.Dump())
a.assemblerMutex.Unlock()
}
func (a *tcpAssembler) shouldIgnorePort(port uint16) bool {
@@ -229,26 +151,3 @@ func (a *tcpAssembler) shouldIgnorePort(port uint16) bool {
return false
}
func (a *tcpAssembler) periodicClean() {
flushed, closed := a.FlushCloseOlderThan(time.Now().Add(-a.staleConnectionTimeout))
stats := a.stats
stats.closedConnections += closed
stats.flushedConnections += flushed
}
func (a *tcpAssembler) DumpStats() AssemblerStats {
result := a.stats
a.stats = AssemblerStats{}
return result
}
func getConnectionId(saddr string, sport string, daddr string, dport string) connectionId {
s := fmt.Sprintf("%s:%s", saddr, sport)
d := fmt.Sprintf("%s:%s", daddr, dport)
if s > d {
return NewConnectionId(fmt.Sprintf("%s#%s", s, d))
} else {
return NewConnectionId(fmt.Sprintf("%s#%s", d, s))
}
}

View File

@@ -151,6 +151,6 @@ func (t *tcpReassemblyStream) ReassemblyComplete(ac reassembly.AssemblerContext)
if t.tcpStream.GetIsTapTarget() && !t.tcpStream.GetIsClosed() {
t.tcpStream.close()
}
return true
// do not remove the connection to allow last ACK
return false
}

View File

@@ -8,11 +8,6 @@ import (
"github.com/up9inc/mizu/tap/dbgctl"
)
type tcpStreamCallbacks interface {
tcpStreamCreated(stream *tcpStream)
tcpStreamClosed(stream *tcpStream)
}
/* It's a connection (bidirectional)
* Implements gopacket.reassembly.Stream interface (Accept, ReassembledSG, ReassemblyComplete)
* ReassembledSG gets called when new reassembled data is ready (i.e. bytes in order, no duplicates, complete)
@@ -30,25 +25,16 @@ type tcpStream struct {
reqResMatchers []api.RequestResponseMatcher
createdAt time.Time
streamsMap api.TcpStreamMap
connectionId connectionId
callbacks tcpStreamCallbacks
sync.Mutex
}
func NewTcpStream(isTapTarget bool, streamsMap api.TcpStreamMap, capture api.Capture,
connectionId connectionId, callbacks tcpStreamCallbacks) *tcpStream {
t := &tcpStream{
isTapTarget: isTapTarget,
streamsMap: streamsMap,
origin: capture,
createdAt: time.Now(),
connectionId: connectionId,
callbacks: callbacks,
func NewTcpStream(isTapTarget bool, streamsMap api.TcpStreamMap, capture api.Capture) *tcpStream {
return &tcpStream{
isTapTarget: isTapTarget,
streamsMap: streamsMap,
origin: capture,
createdAt: time.Now(),
}
t.callbacks.tcpStreamCreated(t)
return t
}
func (t *tcpStream) getId() int64 {
@@ -70,9 +56,9 @@ func (t *tcpStream) close() {
t.isClosed = true
t.streamsMap.Delete(t.id)
t.client.close()
t.server.close()
t.callbacks.tcpStreamClosed(t)
}
func (t *tcpStream) addCounterPair(counterPair *api.CounterPair) {

View File

@@ -19,15 +19,14 @@ import (
* Generates a new tcp stream for each new tcp connection. Closes the stream when the connection closes.
*/
type tcpStreamFactory struct {
wg sync.WaitGroup
emitter api.Emitter
streamsMap api.TcpStreamMap
ownIps []string
opts *TapOpts
streamsCallbacks tcpStreamCallbacks
wg sync.WaitGroup
emitter api.Emitter
streamsMap api.TcpStreamMap
ownIps []string
opts *TapOpts
}
func NewTcpStreamFactory(emitter api.Emitter, streamsMap api.TcpStreamMap, opts *TapOpts, streamsCallbacks tcpStreamCallbacks) *tcpStreamFactory {
func NewTcpStreamFactory(emitter api.Emitter, streamsMap api.TcpStreamMap, opts *TapOpts) *tcpStreamFactory {
var ownIps []string
if localhostIPs, err := getLocalhostIPs(); err != nil {
@@ -40,11 +39,10 @@ func NewTcpStreamFactory(emitter api.Emitter, streamsMap api.TcpStreamMap, opts
}
return &tcpStreamFactory{
emitter: emitter,
streamsMap: streamsMap,
ownIps: ownIps,
opts: opts,
streamsCallbacks: streamsCallbacks,
emitter: emitter,
streamsMap: streamsMap,
ownIps: ownIps,
opts: opts,
}
}
@@ -59,8 +57,7 @@ func (factory *tcpStreamFactory) New(net, transport gopacket.Flow, tcpLayer *lay
props := factory.getStreamProps(srcIp, srcPort, dstIp, dstPort)
isTapTarget := props.isTapTarget
connectionId := getConnectionId(srcIp, srcPort, dstIp, dstPort)
stream := NewTcpStream(isTapTarget, factory.streamsMap, getPacketOrigin(ac), connectionId, factory.streamsCallbacks)
stream := NewTcpStream(isTapTarget, factory.streamsMap, getPacketOrigin(ac))
reassemblyStream := NewTcpReassemblyStream(fmt.Sprintf("%s:%s", net, transport), tcpLayer, fsmOptions, stream)
if stream.GetIsTapTarget() {
stream.setId(factory.streamsMap.NextId())

View File

@@ -1,5 +1,5 @@
FROM golang:1.17-alpine
FROM alpine:3.14
RUN apk --no-cache update && apk --no-cache add clang llvm libbpf-dev linux-headers
RUN apk --no-cache update && apk --no-cache add clang llvm libbpf-dev go linux-headers
WORKDIR /mizu

View File

@@ -6,22 +6,17 @@ MIZU_HOME=$(realpath ../../../)
docker build -t mizu-ebpf-builder . || exit 1
BPF_TARGET=amd64
BPF_CFLAGS="-O2 -g -D__TARGET_ARCH_x86"
ARCH=$(uname -m)
if [[ $ARCH == "aarch64" ]]; then
BPF_TARGET=arm64
BPF_CFLAGS="-O2 -g -D__TARGET_ARCH_arm64"
fi
docker run --rm \
--name mizu-ebpf-builder \
-v $MIZU_HOME:/mizu \
-v $(go env GOPATH):/root/go \
-it mizu-ebpf-builder \
sh -c "
BPF_TARGET=\"$BPF_TARGET\" BPF_CFLAGS=\"$BPF_CFLAGS\" go generate tap/tlstapper/tls_tapper.go
chown $(id -u):$(id -g) tap/tlstapper/tlstapper_bpf*
go generate tap/tlstapper/tls_tapper.go
chown $(id -u):$(id -g) tap/tlstapper/tlstapper_bpfeb.go
chown $(id -u):$(id -g) tap/tlstapper/tlstapper_bpfeb.o
chown $(id -u):$(id -g) tap/tlstapper/tlstapper_bpfel.go
chown $(id -u):$(id -g) tap/tlstapper/tlstapper_bpfel.o
" || exit 1
popd

View File

@@ -1,141 +0,0 @@
/*
Note: This file is licenced differently from the rest of the project
SPDX-License-Identifier: GPL-2.0
Copyright (C) UP9 Inc.
*/
#include "include/headers.h"
#include "include/util.h"
#include "include/maps.h"
#include "include/log.h"
#include "include/logger_messages.h"
#include "include/common.h"
static __always_inline int add_address_to_chunk(struct pt_regs *ctx, struct tls_chunk* chunk, __u64 id, __u32 fd) {
__u32 pid = id >> 32;
__u64 key = (__u64) pid << 32 | fd;
struct fd_info *fdinfo = bpf_map_lookup_elem(&file_descriptor_to_ipv4, &key);
if (fdinfo == NULL) {
return 0;
}
int err = bpf_probe_read(chunk->address, sizeof(chunk->address), fdinfo->ipv4_addr);
chunk->flags |= (fdinfo->flags & FLAGS_IS_CLIENT_BIT);
if (err != 0) {
log_error(ctx, LOG_ERROR_READING_FD_ADDRESS, id, err, 0l);
return 0;
}
return 1;
}
static __always_inline void send_chunk_part(struct pt_regs *ctx, __u8* buffer, __u64 id,
struct tls_chunk* chunk, int start, int end) {
size_t recorded = MIN(end - start, sizeof(chunk->data));
if (recorded <= 0) {
return;
}
chunk->recorded = recorded;
chunk->start = start;
// This ugly trick is for the ebpf verifier happiness
//
long err = 0;
if (chunk->recorded == sizeof(chunk->data)) {
err = bpf_probe_read(chunk->data, sizeof(chunk->data), buffer + start);
} else {
recorded &= (sizeof(chunk->data) - 1); // Buffer must be N^2
err = bpf_probe_read(chunk->data, recorded, buffer + start);
}
if (err != 0) {
log_error(ctx, LOG_ERROR_READING_FROM_SSL_BUFFER, id, err, 0l);
return;
}
bpf_perf_event_output(ctx, &chunks_buffer, BPF_F_CURRENT_CPU, chunk, sizeof(struct tls_chunk));
}
static __always_inline void send_chunk(struct pt_regs *ctx, __u8* buffer, __u64 id, struct tls_chunk* chunk) {
// ebpf loops must be bounded at compile time, we can't use (i < chunk->len / CHUNK_SIZE)
//
// https://lwn.net/Articles/794934/
//
// However we want to run in kernel older than 5.3, hence we use "#pragma unroll" anyway
//
#pragma unroll
for (int i = 0; i < MAX_CHUNKS_PER_OPERATION; i++) {
if (chunk->len <= (CHUNK_SIZE * i)) {
break;
}
send_chunk_part(ctx, buffer, id, chunk, CHUNK_SIZE * i, chunk->len);
}
}
static __always_inline void output_ssl_chunk(struct pt_regs *ctx, struct ssl_info* info, int count_bytes, __u64 id, __u32 flags) {
if (count_bytes > (CHUNK_SIZE * MAX_CHUNKS_PER_OPERATION)) {
log_error(ctx, LOG_ERROR_BUFFER_TOO_BIG, id, count_bytes, 0l);
return;
}
struct tls_chunk* chunk;
int zero = 0;
// If other thread, running on the same CPU get to this point at the same time like us (context switch)
// the data will be corrupted - protection may be added in the future
//
chunk = bpf_map_lookup_elem(&heap, &zero);
if (!chunk) {
log_error(ctx, LOG_ERROR_ALLOCATING_CHUNK, id, 0l, 0l);
return;
}
chunk->flags = flags;
chunk->pid = id >> 32;
chunk->tgid = id;
chunk->len = count_bytes;
chunk->fd = info->fd;
if (!add_address_to_chunk(ctx, chunk, id, chunk->fd)) {
// Without an address, we drop the chunk because there is not much to do with it in Go
//
return;
}
send_chunk(ctx, info->buffer, id, chunk);
}
static __always_inline struct ssl_info new_ssl_info() {
struct ssl_info info = { .fd = invalid_fd, .created_at_nano = bpf_ktime_get_ns() };
return info;
}
static __always_inline struct ssl_info lookup_ssl_info(struct pt_regs *ctx, struct bpf_map_def* map_fd, __u64 pid_tgid) {
struct ssl_info *infoPtr = bpf_map_lookup_elem(map_fd, &pid_tgid);
struct ssl_info info = new_ssl_info();
if (infoPtr != NULL) {
long err = bpf_probe_read(&info, sizeof(struct ssl_info), infoPtr);
if (err != 0) {
log_error(ctx, LOG_ERROR_READING_SSL_CONTEXT, pid_tgid, err, ORIGIN_SSL_UPROBE_CODE);
}
if ((bpf_ktime_get_ns() - info.created_at_nano) > SSL_INFO_MAX_TTL_NANO) {
// If the ssl info is too old, we don't want to use its info because it may be incorrect.
//
info.fd = invalid_fd;
info.created_at_nano = bpf_ktime_get_ns();
}
}
return info;
}

View File

@@ -28,7 +28,7 @@ void sys_enter_read(struct sys_enter_read_ctx *ctx) {
return;
}
struct ssl_info *infoPtr = bpf_map_lookup_elem(&openssl_read_context, &id);
struct ssl_info *infoPtr = bpf_map_lookup_elem(&ssl_read_context, &id);
if (infoPtr == NULL) {
return;
@@ -44,7 +44,7 @@ void sys_enter_read(struct sys_enter_read_ctx *ctx) {
info.fd = ctx->fd;
err = bpf_map_update_elem(&openssl_read_context, &id, &info, BPF_ANY);
err = bpf_map_update_elem(&ssl_read_context, &id, &info, BPF_ANY);
if (err != 0) {
log_error(ctx, LOG_ERROR_PUTTING_FILE_DESCRIPTOR, id, err, ORIGIN_SYS_ENTER_READ_CODE);
@@ -68,7 +68,7 @@ void sys_enter_write(struct sys_enter_write_ctx *ctx) {
return;
}
struct ssl_info *infoPtr = bpf_map_lookup_elem(&openssl_write_context, &id);
struct ssl_info *infoPtr = bpf_map_lookup_elem(&ssl_write_context, &id);
if (infoPtr == NULL) {
return;
@@ -84,7 +84,7 @@ void sys_enter_write(struct sys_enter_write_ctx *ctx) {
info.fd = ctx->fd;
err = bpf_map_update_elem(&openssl_write_context, &id, &info, BPF_ANY);
err = bpf_map_update_elem(&ssl_write_context, &id, &info, BPF_ANY);
if (err != 0) {
log_error(ctx, LOG_ERROR_PUTTING_FILE_DESCRIPTOR, id, err, ORIGIN_SYS_ENTER_WRITE_CODE);

View File

@@ -1,163 +0,0 @@
/*
Note: This file is licenced differently from the rest of the project
SPDX-License-Identifier: GPL-2.0
Copyright (C) UP9 Inc.
---
README
Go does not follow any platform ABI like x86-64 System V ABI.
Before 1.17, Go followed stack-based Plan9 (Bell Labs) calling convention. (ABI0)
After 1.17, Go switched to an internal register-based calling convention. (ABIInternal)
For now, the probes in this file supports only ABIInternal (Go 1.17+)
`uretprobe` in Linux kernel uses trampoline pattern to jump to original return
address of the probed function. A Goroutine's stack size is 2Kb while a C thread is 2MB on Linux.
If stack size exceeds 2Kb, Go runtime relocates the stack. That causes the
return address to become incorrect in case of `uretprobe` and probed Go program crashes.
Therefore `uretprobe` CAN'T BE USED for a Go program.
`_ex_uprobe` suffixed probes suppose to be `uretprobe`(s) are actually `uprobe`(s)
because of the non-standard ABI of Go. Therefore we probe all `ret` mnemonics under the symbol
by automatically finding them through reading the ELF binary and disassembling the symbols.
Disassembly related code located in `go_offsets.go` file and it uses Capstone Engine.
Solution based on: https://github.com/iovisor/bcc/issues/1320#issuecomment-407927542
*Example* We probe an arbitrary point in a function body (offset +559):
https://github.com/golang/go/blob/go1.17.6/src/crypto/tls/conn.go#L1299
We get the file descriptor using the common $rax register that holds the address
of `go.itab.*net.TCPConn,net.Conn` and through a series of dereferencing
using `bpf_probe_read` calls in `go_crypto_tls_get_fd_from_tcp_conn` function.
---
SOURCES:
Tracing Go Functions with eBPF (before 1.17): https://www.grant.pizza/blog/tracing-go-functions-with-ebpf-part-2/
Challenges of BPF Tracing Go: https://blog.0x74696d.com/posts/challenges-of-bpf-tracing-go/
x86 calling conventions: https://en.wikipedia.org/wiki/X86_calling_conventions
Plan 9 from Bell Labs: https://en.wikipedia.org/wiki/Plan_9_from_Bell_Labs
The issue for calling convention change in Go: https://github.com/golang/go/issues/40724
Proposal of Register-based Go calling convention: https://go.googlesource.com/proposal/+/master/design/40724-register-calling.md
Go internal ABI (1.17) specification: https://go.googlesource.com/go/+/refs/heads/dev.regabi/src/cmd/compile/internal-abi.md
Go internal ABI (current) specification: https://go.googlesource.com/go/+/refs/heads/master/src/cmd/compile/abi-internal.md
A Quick Guide to Go's Assembler: https://go.googlesource.com/go/+/refs/heads/dev.regabi/doc/asm.html
Dissecting Go Binaries: https://www.grant.pizza/blog/dissecting-go-binaries/
Capstone Engine: https://www.capstone-engine.org/
*/
#include "include/headers.h"
#include "include/util.h"
#include "include/maps.h"
#include "include/log.h"
#include "include/logger_messages.h"
#include "include/pids.h"
#include "include/common.h"
#include "include/go_abi_internal.h"
#include "include/go_types.h"
static __always_inline __u32 go_crypto_tls_get_fd_from_tcp_conn(struct pt_regs *ctx) {
struct go_interface conn;
long err = bpf_probe_read(&conn, sizeof(conn), (void*)GO_ABI_INTERNAL_PT_REGS_R1(ctx));
if (err != 0) {
return invalid_fd;
}
void* net_fd_ptr;
err = bpf_probe_read(&net_fd_ptr, sizeof(net_fd_ptr), conn.ptr);
if (err != 0) {
return invalid_fd;
}
__u32 fd;
err = bpf_probe_read(&fd, sizeof(fd), net_fd_ptr + 0x10);
if (err != 0) {
return invalid_fd;
}
return fd;
}
static __always_inline void go_crypto_tls_uprobe(struct pt_regs *ctx, struct bpf_map_def* go_context) {
__u64 pid_tgid = bpf_get_current_pid_tgid();
__u64 pid = pid_tgid >> 32;
if (!should_tap(pid)) {
return;
}
struct ssl_info info = new_ssl_info();
info.buffer_len = GO_ABI_INTERNAL_PT_REGS_R2(ctx);
info.buffer = (void*)GO_ABI_INTERNAL_PT_REGS_R4(ctx);
info.fd = go_crypto_tls_get_fd_from_tcp_conn(ctx);
// GO_ABI_INTERNAL_PT_REGS_GP is Goroutine address
__u64 pid_fp = pid << 32 | GO_ABI_INTERNAL_PT_REGS_GP(ctx);
long err = bpf_map_update_elem(go_context, &pid_fp, &info, BPF_ANY);
if (err != 0) {
log_error(ctx, LOG_ERROR_PUTTING_SSL_CONTEXT, pid_tgid, err, 0l);
}
return;
}
static __always_inline void go_crypto_tls_ex_uprobe(struct pt_regs *ctx, struct bpf_map_def* go_context, __u32 flags) {
__u64 pid_tgid = bpf_get_current_pid_tgid();
__u64 pid = pid_tgid >> 32;
if (!should_tap(pid)) {
return;
}
// GO_ABI_INTERNAL_PT_REGS_GP is Goroutine address
__u64 pid_fp = pid << 32 | GO_ABI_INTERNAL_PT_REGS_GP(ctx);
struct ssl_info *info_ptr = bpf_map_lookup_elem(go_context, &pid_fp);
if (info_ptr == NULL) {
return;
}
bpf_map_delete_elem(go_context, &pid_fp);
struct ssl_info info;
long err = bpf_probe_read(&info, sizeof(struct ssl_info), info_ptr);
if (err != 0) {
log_error(ctx, LOG_ERROR_READING_SSL_CONTEXT, pid_tgid, err, ORIGIN_SSL_URETPROBE_CODE);
return;
}
// In case of read, the length is determined on return
if (flags == FLAGS_IS_READ_BIT) {
info.buffer_len = GO_ABI_INTERNAL_PT_REGS_R1(ctx); // n in return n, nil
// This check achieves ignoring 0 length reads (the reads result with an error)
if (info.buffer_len <= 0) {
return;
}
}
output_ssl_chunk(ctx, &info, info.buffer_len, pid_tgid, flags);
return;
}
SEC("uprobe/go_crypto_tls_write")
void BPF_KPROBE(go_crypto_tls_write) {
go_crypto_tls_uprobe(ctx, &go_write_context);
}
SEC("uprobe/go_crypto_tls_write_ex")
void BPF_KPROBE(go_crypto_tls_write_ex) {
go_crypto_tls_ex_uprobe(ctx, &go_write_context, 0);
}
SEC("uprobe/go_crypto_tls_read")
void BPF_KPROBE(go_crypto_tls_read) {
go_crypto_tls_uprobe(ctx, &go_read_context);
}
SEC("uprobe/go_crypto_tls_read_ex")
void BPF_KPROBE(go_crypto_tls_read_ex) {
go_crypto_tls_ex_uprobe(ctx, &go_read_context, FLAGS_IS_READ_BIT);
}

View File

@@ -1,19 +0,0 @@
/*
Note: This file is licenced differently from the rest of the project
SPDX-License-Identifier: GPL-2.0
Copyright (C) UP9 Inc.
*/
#ifndef __COMMON__
#define __COMMON__
const __s32 invalid_fd = -1;
static int add_address_to_chunk(struct pt_regs *ctx, struct tls_chunk* chunk, __u64 id, __u32 fd);
static void send_chunk_part(struct pt_regs *ctx, __u8* buffer, __u64 id, struct tls_chunk* chunk, int start, int end);
static void send_chunk(struct pt_regs *ctx, __u8* buffer, __u64 id, struct tls_chunk* chunk);
static void output_ssl_chunk(struct pt_regs *ctx, struct ssl_info* info, int count_bytes, __u64 id, __u32 flags);
static struct ssl_info new_ssl_info();
static struct ssl_info lookup_ssl_info(struct pt_regs *ctx, struct bpf_map_def* map_fd, __u64 pid_tgid);
#endif /* __COMMON__ */

View File

@@ -1,141 +0,0 @@
/*
Note: This file is licenced differently from the rest of the project
SPDX-License-Identifier: GPL-2.0
Copyright (C) UP9 Inc.
*/
#ifndef __GO_ABI_INTERNAL__
#define __GO_ABI_INTERNAL__
/*
Go internal ABI specification
https://go.googlesource.com/go/+/refs/heads/master/src/cmd/compile/abi-internal.md
*/
/* Scan the ARCH passed in from ARCH env variable */
#if defined(__TARGET_ARCH_x86)
#define bpf_target_x86
#define bpf_target_defined
#elif defined(__TARGET_ARCH_s390)
#define bpf_target_s390
#define bpf_target_defined
#elif defined(__TARGET_ARCH_arm)
#define bpf_target_arm
#define bpf_target_defined
#elif defined(__TARGET_ARCH_arm64)
#define bpf_target_arm64
#define bpf_target_defined
#elif defined(__TARGET_ARCH_mips)
#define bpf_target_mips
#define bpf_target_defined
#elif defined(__TARGET_ARCH_powerpc)
#define bpf_target_powerpc
#define bpf_target_defined
#elif defined(__TARGET_ARCH_sparc)
#define bpf_target_sparc
#define bpf_target_defined
#else
#undef bpf_target_defined
#endif
/* Fall back to what the compiler says */
#ifndef bpf_target_defined
#if defined(__x86_64__)
#define bpf_target_x86
#elif defined(__s390__)
#define bpf_target_s390
#elif defined(__arm__)
#define bpf_target_arm
#elif defined(__aarch64__)
#define bpf_target_arm64
#elif defined(__mips__)
#define bpf_target_mips
#elif defined(__powerpc__)
#define bpf_target_powerpc
#elif defined(__sparc__)
#define bpf_target_sparc
#endif
#endif
#if defined(bpf_target_x86)
#ifdef __i386__
/*
https://go.googlesource.com/go/+/refs/heads/dev.regabi/src/cmd/compile/internal-abi.md#amd64-architecture
https://github.com/golang/go/blob/go1.17.6/src/cmd/compile/internal/ssa/gen/AMD64Ops.go#L100
*/
#define GO_ABI_INTERNAL_PT_REGS_R1(x) ((x)->eax)
#define GO_ABI_INTERNAL_PT_REGS_P2(x) ((x)->ecx)
#define GO_ABI_INTERNAL_PT_REGS_P3(x) ((x)->edx)
#define GO_ABI_INTERNAL_PT_REGS_P4(x) 0
#define GO_ABI_INTERNAL_PT_REGS_P5(x) 0
#define GO_ABI_INTERNAL_PT_REGS_P6(x) 0
#define GO_ABI_INTERNAL_PT_REGS_SP(x) ((x)->esp)
#define GO_ABI_INTERNAL_PT_REGS_FP(x) ((x)->ebp)
#define GO_ABI_INTERNAL_PT_REGS_GP(x) ((x)->e14)
#else
#define GO_ABI_INTERNAL_PT_REGS_R1(x) ((x)->rax)
#define GO_ABI_INTERNAL_PT_REGS_R2(x) ((x)->rcx)
#define GO_ABI_INTERNAL_PT_REGS_R3(x) ((x)->rdx)
#define GO_ABI_INTERNAL_PT_REGS_R4(x) ((x)->rbx)
#define GO_ABI_INTERNAL_PT_REGS_R5(x) ((x)->rbp)
#define GO_ABI_INTERNAL_PT_REGS_R6(x) ((x)->rsi)
#define GO_ABI_INTERNAL_PT_REGS_SP(x) ((x)->rsp)
#define GO_ABI_INTERNAL_PT_REGS_FP(x) ((x)->rbp)
#define GO_ABI_INTERNAL_PT_REGS_GP(x) ((x)->r14)
#endif
#elif defined(bpf_target_arm)
/*
https://go.googlesource.com/go/+/refs/heads/master/src/cmd/compile/abi-internal.md#arm64-architecture
https://github.com/golang/go/blob/go1.17.6/src/cmd/compile/internal/ssa/gen/ARM64Ops.go#L129-L131
*/
#define GO_ABI_INTERNAL_PT_REGS_R1(x) ((x)->uregs[0])
#define GO_ABI_INTERNAL_PT_REGS_R2(x) ((x)->uregs[1])
#define GO_ABI_INTERNAL_PT_REGS_R3(x) ((x)->uregs[2])
#define GO_ABI_INTERNAL_PT_REGS_R4(x) ((x)->uregs[3])
#define GO_ABI_INTERNAL_PT_REGS_R5(x) ((x)->uregs[4])
#define GO_ABI_INTERNAL_PT_REGS_R6(x) ((x)->uregs[5])
#define GO_ABI_INTERNAL_PT_REGS_SP(x) ((x)->uregs[14])
#define GO_ABI_INTERNAL_PT_REGS_FP(x) ((x)->uregs[29])
#define GO_ABI_INTERNAL_PT_REGS_GP(x) ((x)->uregs[28])
#elif defined(bpf_target_arm64)
/* arm64 provides struct user_pt_regs instead of struct pt_regs to userspace */
struct pt_regs;
#define PT_REGS_ARM64 const volatile struct user_pt_regs
#define GO_ABI_INTERNAL_PT_REGS_R1(x) (((PT_REGS_ARM64 *)(x))->regs[0])
#define GO_ABI_INTERNAL_PT_REGS_R2(x) (((PT_REGS_ARM64 *)(x))->regs[1])
#define GO_ABI_INTERNAL_PT_REGS_R3(x) (((PT_REGS_ARM64 *)(x))->regs[2])
#define GO_ABI_INTERNAL_PT_REGS_R4(x) (((PT_REGS_ARM64 *)(x))->regs[3])
#define GO_ABI_INTERNAL_PT_REGS_R5(x) (((PT_REGS_ARM64 *)(x))->regs[4])
#define GO_ABI_INTERNAL_PT_REGS_R6(x) (((PT_REGS_ARM64 *)(x))->regs[5])
#define GO_ABI_INTERNAL_PT_REGS_SP(x) (((PT_REGS_ARM64 *)(x))->regs[30])
#define GO_ABI_INTERNAL_PT_REGS_FP(x) (((PT_REGS_ARM64 *)(x))->regs[29])
#define GO_ABI_INTERNAL_PT_REGS_GP(x) (((PT_REGS_ARM64 *)(x))->regs[28])
#elif defined(bpf_target_powerpc)
/*
https://go.googlesource.com/go/+/refs/heads/master/src/cmd/compile/abi-internal.md#ppc64-architecture
https://github.com/golang/go/blob/go1.17.6/src/cmd/compile/internal/ssa/gen/PPC64Ops.go#L125-L127
*/
#define GO_ABI_INTERNAL_PT_REGS_R1(x) ((x)->gpr[3])
#define GO_ABI_INTERNAL_PT_REGS_R2(x) ((x)->gpr[4])
#define GO_ABI_INTERNAL_PT_REGS_R3(x) ((x)->gpr[5])
#define GO_ABI_INTERNAL_PT_REGS_R4(x) ((x)->gpr[6])
#define GO_ABI_INTERNAL_PT_REGS_R5(x) ((x)->gpr[7])
#define GO_ABI_INTERNAL_PT_REGS_R6(x) ((x)->gpr[8])
#define GO_ABI_INTERNAL_PT_REGS_SP(x) ((x)->sp)
#define GO_ABI_INTERNAL_PT_REGS_FP(x) ((x)->gpr[12])
#define GO_ABI_INTERNAL_PT_REGS_GP(x) ((x)->gpr[30])
#endif
#endif /* __GO_ABI_INTERNAL__ */

View File

@@ -1,15 +0,0 @@
/*
Note: This file is licenced differently from the rest of the project
SPDX-License-Identifier: GPL-2.0
Copyright (C) UP9 Inc.
*/
#ifndef __GO_TYPES__
#define __GO_TYPES__
struct go_interface {
__s64 type;
void* ptr;
};
#endif /* __GO_TYPES__ */

View File

@@ -16,15 +16,11 @@ Copyright (C) UP9 Inc.
// One minute in nano seconds. Chosen by gut feeling.
#define SSL_INFO_MAX_TTL_NANO (1000000000l * 60l)
#define MAX_ENTRIES_HASH (1 << 12) // 4096
#define MAX_ENTRIES_PERF_OUTPUT (1 << 10) // 1024
#define MAX_ENTRIES_LRU_HASH (1 << 14) // 16384
// The same struct can be found in chunk.go
//
// Be careful when editing, alignment and padding should be exactly the same in go/c.
//
struct tls_chunk {
struct tlsChunk {
__u32 pid;
__u32 tgid;
__u32 len;
@@ -38,7 +34,6 @@ struct tls_chunk {
struct ssl_info {
void* buffer;
__u32 buffer_len;
__u32 fd;
__u64 created_at_nano;
@@ -53,16 +48,6 @@ struct fd_info {
__u8 flags;
};
// Heap-like area for eBPF programs - stack size limited to 512 bytes, we must use maps for bigger (chunk) objects.
//
struct {
__uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
__uint(max_entries, 1);
__type(key, int);
__type(value, struct tls_chunk);
} heap SEC(".maps");
#define BPF_MAP(_name, _type, _key_type, _value_type, _max_entries) \
struct bpf_map_def SEC("maps") _name = { \
.type = _type, \
@@ -72,26 +57,19 @@ struct {
};
#define BPF_HASH(_name, _key_type, _value_type) \
BPF_MAP(_name, BPF_MAP_TYPE_HASH, _key_type, _value_type, MAX_ENTRIES_HASH)
BPF_MAP(_name, BPF_MAP_TYPE_HASH, _key_type, _value_type, 4096)
#define BPF_PERF_OUTPUT(_name) \
BPF_MAP(_name, BPF_MAP_TYPE_PERF_EVENT_ARRAY, int, __u32, MAX_ENTRIES_PERF_OUTPUT)
BPF_MAP(_name, BPF_MAP_TYPE_PERF_EVENT_ARRAY, int, __u32, 1024)
#define BPF_LRU_HASH(_name, _key_type, _value_type) \
BPF_MAP(_name, BPF_MAP_TYPE_LRU_HASH, _key_type, _value_type, MAX_ENTRIES_LRU_HASH)
BPF_MAP(_name, BPF_MAP_TYPE_LRU_HASH, _key_type, _value_type, 16384)
// Generic
BPF_HASH(pids_map, __u32, __u32);
BPF_LRU_HASH(ssl_write_context, __u64, struct ssl_info);
BPF_LRU_HASH(ssl_read_context, __u64, struct ssl_info);
BPF_LRU_HASH(file_descriptor_to_ipv4, __u64, struct fd_info);
BPF_PERF_OUTPUT(chunks_buffer);
BPF_PERF_OUTPUT(log_buffer);
// OpenSSL specific
BPF_LRU_HASH(openssl_write_context, __u64, struct ssl_info);
BPF_LRU_HASH(openssl_read_context, __u64, struct ssl_info);
// Go specific
BPF_LRU_HASH(go_write_context, __u64, struct ssl_info);
BPF_LRU_HASH(go_read_context, __u64, struct ssl_info);
#endif /* __MAPS__ */

View File

@@ -10,35 +10,149 @@ Copyright (C) UP9 Inc.
#include "include/log.h"
#include "include/logger_messages.h"
#include "include/pids.h"
#include "include/common.h"
// Heap-like area for eBPF programs - stack size limited to 512 bytes, we must use maps for bigger (chunk) objects.
//
struct {
__uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
__uint(max_entries, 1);
__type(key, int);
__type(value, struct tlsChunk);
} heap SEC(".maps");
static __always_inline int get_count_bytes(struct pt_regs *ctx, struct ssl_info* info, __u64 id) {
int returnValue = PT_REGS_RC(ctx);
int returnValue = PT_REGS_RC(ctx);
if (info->count_ptr == NULL) {
// ssl_read and ssl_write return the number of bytes written/read
//
return returnValue;
}
// ssl_read_ex and ssl_write_ex return 1 for success
//
if (returnValue != 1) {
return 0;
}
// ssl_read_ex and ssl_write_ex write the number of bytes to an arg named *count
//
size_t countBytes;
long err = bpf_probe_read(&countBytes, sizeof(size_t), (void*) info->count_ptr);
if (err != 0) {
log_error(ctx, LOG_ERROR_READING_BYTES_COUNT, id, err, 0l);
return 0;
}
return countBytes;
}
if (info->count_ptr == NULL) {
// ssl_read and ssl_write return the number of bytes written/read
//
return returnValue;
}
static __always_inline int add_address_to_chunk(struct pt_regs *ctx, struct tlsChunk* chunk, __u64 id, __u32 fd) {
__u32 pid = id >> 32;
__u64 key = (__u64) pid << 32 | fd;
struct fd_info *fdinfo = bpf_map_lookup_elem(&file_descriptor_to_ipv4, &key);
if (fdinfo == NULL) {
return 0;
}
int err = bpf_probe_read(chunk->address, sizeof(chunk->address), fdinfo->ipv4_addr);
chunk->flags |= (fdinfo->flags & FLAGS_IS_CLIENT_BIT);
if (err != 0) {
log_error(ctx, LOG_ERROR_READING_FD_ADDRESS, id, err, 0l);
return 0;
}
return 1;
}
// ssl_read_ex and ssl_write_ex return 1 for success
//
if (returnValue != 1) {
return 0;
}
static __always_inline void send_chunk_part(struct pt_regs *ctx, __u8* buffer, __u64 id,
struct tlsChunk* chunk, int start, int end) {
size_t recorded = MIN(end - start, sizeof(chunk->data));
if (recorded <= 0) {
return;
}
chunk->recorded = recorded;
chunk->start = start;
// This ugly trick is for the ebpf verifier happiness
//
long err = 0;
if (chunk->recorded == sizeof(chunk->data)) {
err = bpf_probe_read(chunk->data, sizeof(chunk->data), buffer + start);
} else {
recorded &= (sizeof(chunk->data) - 1); // Buffer must be N^2
err = bpf_probe_read(chunk->data, recorded, buffer + start);
}
if (err != 0) {
log_error(ctx, LOG_ERROR_READING_FROM_SSL_BUFFER, id, err, 0l);
return;
}
bpf_perf_event_output(ctx, &chunks_buffer, BPF_F_CURRENT_CPU, chunk, sizeof(struct tlsChunk));
}
// ssl_read_ex and ssl_write_ex write the number of bytes to an arg named *count
//
size_t countBytes;
long err = bpf_probe_read(&countBytes, sizeof(size_t), (void*) info->count_ptr);
static __always_inline void send_chunk(struct pt_regs *ctx, __u8* buffer, __u64 id, struct tlsChunk* chunk) {
// ebpf loops must be bounded at compile time, we can't use (i < chunk->len / CHUNK_SIZE)
//
// https://lwn.net/Articles/794934/
//
// However we want to run in kernel older than 5.3, hence we use "#pragma unroll" anyway
//
#pragma unroll
for (int i = 0; i < MAX_CHUNKS_PER_OPERATION; i++) {
if (chunk->len <= (CHUNK_SIZE * i)) {
break;
}
send_chunk_part(ctx, buffer, id, chunk, CHUNK_SIZE * i, chunk->len);
}
}
if (err != 0) {
log_error(ctx, LOG_ERROR_READING_BYTES_COUNT, id, err, 0l);
return 0;
}
return countBytes;
static __always_inline void output_ssl_chunk(struct pt_regs *ctx, struct ssl_info* info, __u64 id, __u32 flags) {
int countBytes = get_count_bytes(ctx, info, id);
if (countBytes <= 0) {
return;
}
if (countBytes > (CHUNK_SIZE * MAX_CHUNKS_PER_OPERATION)) {
log_error(ctx, LOG_ERROR_BUFFER_TOO_BIG, id, countBytes, 0l);
return;
}
struct tlsChunk* chunk;
int zero = 0;
// If other thread, running on the same CPU get to this point at the same time like us (context switch)
// the data will be corrupted - protection may be added in the future
//
chunk = bpf_map_lookup_elem(&heap, &zero);
if (!chunk) {
log_error(ctx, LOG_ERROR_ALLOCATING_CHUNK, id, 0l, 0l);
return;
}
chunk->flags = flags;
chunk->pid = id >> 32;
chunk->tgid = id;
chunk->len = countBytes;
chunk->fd = info->fd;
if (!add_address_to_chunk(ctx, chunk, id, chunk->fd)) {
// Without an address, we drop the chunk because there is not much to do with it in Go
//
return;
}
send_chunk(ctx, info->buffer, id, chunk);
}
static __always_inline void ssl_uprobe(struct pt_regs *ctx, void* ssl, void* buffer, int num, struct bpf_map_def* map_fd, size_t *count_ptr) {
@@ -49,7 +163,25 @@ static __always_inline void ssl_uprobe(struct pt_regs *ctx, void* ssl, void* buf
}
struct ssl_info *infoPtr = bpf_map_lookup_elem(map_fd, &id);
struct ssl_info info = lookup_ssl_info(ctx, &openssl_write_context, id);
struct ssl_info info = {};
if (infoPtr == NULL) {
info.fd = -1;
info.created_at_nano = bpf_ktime_get_ns();
} else {
long err = bpf_probe_read(&info, sizeof(struct ssl_info), infoPtr);
if (err != 0) {
log_error(ctx, LOG_ERROR_READING_SSL_CONTEXT, id, err, ORIGIN_SSL_UPROBE_CODE);
}
if ((bpf_ktime_get_ns() - info.created_at_nano) > SSL_INFO_MAX_TTL_NANO) {
// If the ssl info is too old, we don't want to use its info because it may be incorrect.
//
info.fd = -1;
info.created_at_nano = bpf_ktime_get_ns();
}
}
info.count_ptr = count_ptr;
info.buffer = buffer;
@@ -95,55 +227,50 @@ static __always_inline void ssl_uretprobe(struct pt_regs *ctx, struct bpf_map_de
return;
}
if (info.fd == invalid_fd) {
if (info.fd == -1) {
log_error(ctx, LOG_ERROR_MISSING_FILE_DESCRIPTOR, id, 0l, 0l);
return;
}
int count_bytes = get_count_bytes(ctx, &info, id);
if (count_bytes <= 0) {
return;
}
output_ssl_chunk(ctx, &info, count_bytes, id, flags);
output_ssl_chunk(ctx, &info, id, flags);
}
SEC("uprobe/ssl_write")
void BPF_KPROBE(ssl_write, void* ssl, void* buffer, int num) {
ssl_uprobe(ctx, ssl, buffer, num, &openssl_write_context, 0);
ssl_uprobe(ctx, ssl, buffer, num, &ssl_write_context, 0);
}
SEC("uretprobe/ssl_write")
void BPF_KPROBE(ssl_ret_write) {
ssl_uretprobe(ctx, &openssl_write_context, 0);
ssl_uretprobe(ctx, &ssl_write_context, 0);
}
SEC("uprobe/ssl_read")
void BPF_KPROBE(ssl_read, void* ssl, void* buffer, int num) {
ssl_uprobe(ctx, ssl, buffer, num, &openssl_read_context, 0);
ssl_uprobe(ctx, ssl, buffer, num, &ssl_read_context, 0);
}
SEC("uretprobe/ssl_read")
void BPF_KPROBE(ssl_ret_read) {
ssl_uretprobe(ctx, &openssl_read_context, FLAGS_IS_READ_BIT);
ssl_uretprobe(ctx, &ssl_read_context, FLAGS_IS_READ_BIT);
}
SEC("uprobe/ssl_write_ex")
void BPF_KPROBE(ssl_write_ex, void* ssl, void* buffer, size_t num, size_t *written) {
ssl_uprobe(ctx, ssl, buffer, num, &openssl_write_context, written);
ssl_uprobe(ctx, ssl, buffer, num, &ssl_write_context, written);
}
SEC("uretprobe/ssl_write_ex")
void BPF_KPROBE(ssl_ret_write_ex) {
ssl_uretprobe(ctx, &openssl_write_context, 0);
ssl_uretprobe(ctx, &ssl_write_context, 0);
}
SEC("uprobe/ssl_read_ex")
void BPF_KPROBE(ssl_read_ex, void* ssl, void* buffer, size_t num, size_t *readbytes) {
ssl_uprobe(ctx, ssl, buffer, num, &openssl_read_context, readbytes);
ssl_uprobe(ctx, ssl, buffer, num, &ssl_read_context, readbytes);
}
SEC("uretprobe/ssl_read_ex")
void BPF_KPROBE(ssl_ret_read_ex) {
ssl_uretprobe(ctx, &openssl_read_context, FLAGS_IS_READ_BIT);
ssl_uretprobe(ctx, &ssl_read_context, FLAGS_IS_READ_BIT);
}

View File

@@ -13,9 +13,7 @@ Copyright (C) UP9 Inc.
// To avoid multiple .o files
//
#include "common.c"
#include "openssl_uprobes.c"
#include "go_uprobes.c"
#include "fd_tracepoints.c"
#include "fd_to_address_tracepoints.c"

View File

@@ -2,7 +2,7 @@ package tlstapper
// Must be synced with logger_messages.h
//
var bpfLogMessages = []string{
var bpfLogMessages = []string {
/*0000*/ "[%d] Unable to read bytes count from _ex methods [err: %d]",
/*0001*/ "[%d] Unable to read ipv4 address [err: %d]",
/*0002*/ "[%d] Unable to read ssl buffer [err: %d]",
@@ -20,4 +20,6 @@ var bpfLogMessages = []string{
/*0014*/ "[%d] Unable to put connect info [err: %d]",
/*0015*/ "[%d] Unable to get connect info",
/*0016*/ "[%d] Unable to read connect info [err: %d]",
}

View File

@@ -12,7 +12,23 @@ import (
const FLAGS_IS_CLIENT_BIT uint32 = (1 << 0)
const FLAGS_IS_READ_BIT uint32 = (1 << 1)
func (c *tlsTapperTlsChunk) getAddress() (net.IP, uint16, error) {
// The same struct can be found in maps.h
//
// Be careful when editing, alignment and padding should be exactly the same in go/c.
//
type tlsChunk struct {
Pid uint32 // process id
Tgid uint32 // thread id inside the process
Len uint32 // the size of the native buffer used to read/write the tls data (may be bigger than tlsChunk.Data[])
Start uint32 // the start offset withing the native buffer
Recorded uint32 // number of bytes copied from the native buffer to tlsChunk.Data[]
Fd uint32 // the file descriptor used to read/write the tls data (probably socket file descriptor)
Flags uint32 // bitwise flags
Address [16]byte // ipv4 address and port
Data [4096]byte // actual tls data
}
func (c *tlsChunk) getAddress() (net.IP, uint16, error) {
address := bytes.NewReader(c.Address[:])
var family uint16
var port uint16
@@ -35,31 +51,31 @@ func (c *tlsTapperTlsChunk) getAddress() (net.IP, uint16, error) {
return ip, port, nil
}
func (c *tlsTapperTlsChunk) isClient() bool {
func (c *tlsChunk) isClient() bool {
return c.Flags&FLAGS_IS_CLIENT_BIT != 0
}
func (c *tlsTapperTlsChunk) isServer() bool {
func (c *tlsChunk) isServer() bool {
return !c.isClient()
}
func (c *tlsTapperTlsChunk) isRead() bool {
func (c *tlsChunk) isRead() bool {
return c.Flags&FLAGS_IS_READ_BIT != 0
}
func (c *tlsTapperTlsChunk) isWrite() bool {
func (c *tlsChunk) isWrite() bool {
return !c.isRead()
}
func (c *tlsTapperTlsChunk) getRecordedData() []byte {
func (c *tlsChunk) getRecordedData() []byte {
return c.Data[:c.Recorded]
}
func (c *tlsTapperTlsChunk) isRequest() bool {
func (c *tlsChunk) isRequest() bool {
return (c.isClient() && c.isWrite()) || (c.isServer() && c.isRead())
}
func (c *tlsTapperTlsChunk) getAddressPair() (addressPair, error) {
func (c *tlsChunk) getAddressPair() (addressPair, error) {
ip, port, err := c.getAddress()
if err != nil {

View File

@@ -1,105 +0,0 @@
package tlstapper
import (
"github.com/cilium/ebpf/link"
"github.com/go-errors/errors"
)
type goHooks struct {
goWriteProbe link.Link
goWriteExProbes []link.Link
goReadProbe link.Link
goReadExProbes []link.Link
}
func (s *goHooks) installUprobes(bpfObjects *tlsTapperObjects, filePath string) error {
ex, err := link.OpenExecutable(filePath)
if err != nil {
return errors.Wrap(err, 0)
}
offsets, err := findGoOffsets(filePath)
if err != nil {
return errors.Wrap(err, 0)
}
return s.installHooks(bpfObjects, ex, offsets)
}
func (s *goHooks) installHooks(bpfObjects *tlsTapperObjects, ex *link.Executable, offsets goOffsets) error {
var err error
// Symbol points to
// [`crypto/tls.(*Conn).Write`](https://github.com/golang/go/blob/go1.17.6/src/crypto/tls/conn.go#L1099)
s.goWriteProbe, err = ex.Uprobe(goWriteSymbol, bpfObjects.GoCryptoTlsWrite, &link.UprobeOptions{
Offset: offsets.GoWriteOffset.enter,
})
if err != nil {
return errors.Wrap(err, 0)
}
for _, offset := range offsets.GoWriteOffset.exits {
probe, err := ex.Uprobe(goWriteSymbol, bpfObjects.GoCryptoTlsWriteEx, &link.UprobeOptions{
Offset: offset,
})
if err != nil {
return errors.Wrap(err, 0)
}
s.goWriteExProbes = append(s.goWriteExProbes, probe)
}
// Symbol points to
// [`crypto/tls.(*Conn).Read`](https://github.com/golang/go/blob/go1.17.6/src/crypto/tls/conn.go#L1263)
s.goReadProbe, err = ex.Uprobe(goReadSymbol, bpfObjects.GoCryptoTlsRead, &link.UprobeOptions{
Offset: offsets.GoReadOffset.enter,
})
if err != nil {
return errors.Wrap(err, 0)
}
for _, offset := range offsets.GoReadOffset.exits {
probe, err := ex.Uprobe(goReadSymbol, bpfObjects.GoCryptoTlsReadEx, &link.UprobeOptions{
Offset: offset,
})
if err != nil {
return errors.Wrap(err, 0)
}
s.goReadExProbes = append(s.goReadExProbes, probe)
}
return nil
}
func (s *goHooks) close() []error {
errors := make([]error, 0)
if err := s.goWriteProbe.Close(); err != nil {
errors = append(errors, err)
}
for _, probe := range s.goWriteExProbes {
if err := probe.Close(); err != nil {
errors = append(errors, err)
}
}
if err := s.goReadProbe.Close(); err != nil {
errors = append(errors, err)
}
for _, probe := range s.goReadExProbes {
if err := probe.Close(); err != nil {
errors = append(errors, err)
}
}
return errors
}

View File

@@ -1,228 +0,0 @@
package tlstapper
import (
"bufio"
"debug/elf"
"fmt"
"os"
"runtime"
"github.com/Masterminds/semver"
"github.com/cilium/ebpf/link"
"github.com/knightsc/gapstone"
"github.com/up9inc/mizu/logger"
)
type goOffsets struct {
GoWriteOffset *goExtendedOffset
GoReadOffset *goExtendedOffset
}
type goExtendedOffset struct {
enter uint64
exits []uint64
}
const (
minimumSupportedGoVersion = "1.17.0"
goVersionSymbol = "runtime.buildVersion.str"
goWriteSymbol = "crypto/tls.(*Conn).Write"
goReadSymbol = "crypto/tls.(*Conn).Read"
)
func findGoOffsets(filePath string) (goOffsets, error) {
offsets, err := getOffsets(filePath)
if err != nil {
return goOffsets{}, err
}
goVersionOffset, err := getOffset(offsets, goVersionSymbol)
if err != nil {
return goOffsets{}, err
}
passed, goVersion, err := checkGoVersion(filePath, goVersionOffset)
if err != nil {
return goOffsets{}, fmt.Errorf("Checking Go version: %s", err)
}
if !passed {
return goOffsets{}, fmt.Errorf("Unsupported Go version: %s", goVersion)
}
writeOffset, err := getOffset(offsets, goWriteSymbol)
if err != nil {
return goOffsets{}, fmt.Errorf("reading offset [%s]: %s", goWriteSymbol, err)
}
readOffset, err := getOffset(offsets, goReadSymbol)
if err != nil {
return goOffsets{}, fmt.Errorf("reading offset [%s]: %s", goReadSymbol, err)
}
return goOffsets{
GoWriteOffset: writeOffset,
GoReadOffset: readOffset,
}, nil
}
func getOffsets(filePath string) (offsets map[string]*goExtendedOffset, err error) {
var engine gapstone.Engine
switch runtime.GOARCH {
case "amd64":
engine, err = gapstone.New(
gapstone.CS_ARCH_X86,
gapstone.CS_MODE_64,
)
case "arm64":
engine, err = gapstone.New(
gapstone.CS_ARCH_ARM64,
gapstone.CS_MODE_ARM,
)
default:
err = fmt.Errorf("Unsupported architecture: %v", runtime.GOARCH)
}
if err != nil {
return
}
offsets = make(map[string]*goExtendedOffset)
var fd *os.File
fd, err = os.Open(filePath)
if err != nil {
return
}
defer fd.Close()
var se *elf.File
se, err = elf.NewFile(fd)
if err != nil {
return
}
textSection := se.Section(".text")
if textSection == nil {
err = fmt.Errorf("No text section")
return
}
// extract the raw bytes from the .text section
var textSectionData []byte
textSectionData, err = textSection.Data()
if err != nil {
return
}
var syms []elf.Symbol
syms, err = se.Symbols()
if err != nil {
return
}
for _, sym := range syms {
offset := sym.Value
var lastProg *elf.Prog
for _, prog := range se.Progs {
if prog.Vaddr <= sym.Value && sym.Value < (prog.Vaddr+prog.Memsz) {
offset = sym.Value - prog.Vaddr + prog.Off
lastProg = prog
break
}
}
extendedOffset := &goExtendedOffset{enter: offset}
// source: https://gist.github.com/grantseltzer/3efa8ecc5de1fb566e8091533050d608
// skip over any symbols that aren't functinons/methods
if sym.Info != byte(2) && sym.Info != byte(18) {
offsets[sym.Name] = extendedOffset
continue
}
// skip over empty symbols
if sym.Size == 0 {
offsets[sym.Name] = extendedOffset
continue
}
// calculate starting and ending index of the symbol within the text section
symStartingIndex := sym.Value - textSection.Addr
symEndingIndex := symStartingIndex + sym.Size
// collect the bytes of the symbol
textSectionDataLen := uint64(len(textSectionData) - 1)
if symEndingIndex > textSectionDataLen {
logger.Log.Warningf(
"Skipping symbol %v, ending index %v is bigger than text section data length %v",
sym.Name,
symEndingIndex,
textSectionDataLen,
)
continue
}
symBytes := textSectionData[symStartingIndex:symEndingIndex]
// disassemble the symbol
var instructions []gapstone.Instruction
instructions, err = engine.Disasm(symBytes, sym.Value, 0)
if err != nil {
return
}
// iterate over each instruction and if the mnemonic is `ret` then that's an exit offset
for _, ins := range instructions {
if ins.Mnemonic == "ret" {
extendedOffset.exits = append(extendedOffset.exits, uint64(ins.Address)-lastProg.Vaddr+lastProg.Off)
}
}
offsets[sym.Name] = extendedOffset
}
return
}
func getOffset(offsets map[string]*goExtendedOffset, symbol string) (*goExtendedOffset, error) {
if offset, ok := offsets[symbol]; ok {
return offset, nil
}
return nil, fmt.Errorf("symbol %s: %w", symbol, link.ErrNoSymbol)
}
func checkGoVersion(filePath string, offset *goExtendedOffset) (bool, string, error) {
fd, err := os.Open(filePath)
if err != nil {
return false, "", err
}
defer fd.Close()
reader := bufio.NewReader(fd)
_, err = reader.Discard(int(offset.enter))
if err != nil {
return false, "", err
}
line, err := reader.ReadString(0)
if err != nil {
return false, "", err
}
if len(line) < 3 {
return false, "", fmt.Errorf("ELF data segment read error (corrupted result)")
}
goVersionStr := line[2 : len(line)-1]
goVersion, err := semver.NewVersion(goVersionStr)
if err != nil {
return false, goVersionStr, err
}
goVersionConstraint, err := semver.NewConstraint(fmt.Sprintf(">= %s", minimumSupportedGoVersion))
if err != nil {
return false, goVersionStr, err
}
return goVersionConstraint.Check(goVersion), goVersionStr, nil
}

View File

@@ -46,7 +46,7 @@ func findLibraryByPid(procfs string, pid uint32, libraryName string) (string, er
filepath := parts[5]
if libraryName != "" && !strings.Contains(filepath, libraryName) {
if !strings.Contains(filepath, libraryName) {
continue
}

View File

@@ -20,10 +20,8 @@ import (
"github.com/up9inc/mizu/tap/api"
)
const (
fdCachedItemAvgSize = 40
fdCacheMaxItems = 500000 / fdCachedItemAvgSize
)
const fdCachedItemAvgSize = 40
const fdCacheMaxItems = 500000 / fdCachedItemAvgSize
type tlsPoller struct {
tls *TlsTapper
@@ -34,7 +32,7 @@ type tlsPoller struct {
extension *api.Extension
procfs string
pidToNamespace sync.Map
fdCache *simplelru.LRU // Actual type is map[string]addressPair
fdCache *simplelru.LRU // Actual typs is map[string]addressPair
evictedCounter int
}
@@ -76,8 +74,7 @@ func (p *tlsPoller) close() error {
}
func (p *tlsPoller) poll(emitter api.Emitter, options *api.TrafficFilteringOptions, streamsMap api.TcpStreamMap) {
// tlsTapperTlsChunk is generated by bpf2go.
chunks := make(chan *tlsTapperTlsChunk)
chunks := make(chan *tlsChunk)
go p.pollChunksPerfBuffer(chunks)
@@ -97,7 +94,7 @@ func (p *tlsPoller) poll(emitter api.Emitter, options *api.TrafficFilteringOptio
}
}
func (p *tlsPoller) pollChunksPerfBuffer(chunks chan<- *tlsTapperTlsChunk) {
func (p *tlsPoller) pollChunksPerfBuffer(chunks chan<- *tlsChunk) {
logger.Log.Infof("Start polling for tls events")
for {
@@ -121,7 +118,7 @@ func (p *tlsPoller) pollChunksPerfBuffer(chunks chan<- *tlsTapperTlsChunk) {
buffer := bytes.NewReader(record.RawSample)
var chunk tlsTapperTlsChunk
var chunk tlsChunk
if err := binary.Read(buffer, binary.LittleEndian, &chunk); err != nil {
LogError(errors.Errorf("Error parsing chunk %v", err))
@@ -132,7 +129,7 @@ func (p *tlsPoller) pollChunksPerfBuffer(chunks chan<- *tlsTapperTlsChunk) {
}
}
func (p *tlsPoller) handleTlsChunk(chunk *tlsTapperTlsChunk, extension *api.Extension, emitter api.Emitter,
func (p *tlsPoller) handleTlsChunk(chunk *tlsChunk, extension *api.Extension, emitter api.Emitter,
options *api.TrafficFilteringOptions, streamsMap api.TcpStreamMap) error {
address, err := p.getSockfdAddressPair(chunk)
@@ -161,11 +158,11 @@ func (p *tlsPoller) handleTlsChunk(chunk *tlsTapperTlsChunk, extension *api.Exte
return nil
}
func (p *tlsPoller) startNewTlsReader(chunk *tlsTapperTlsChunk, address *addressPair, key string,
func (p *tlsPoller) startNewTlsReader(chunk *tlsChunk, address *addressPair, key string,
emitter api.Emitter, extension *api.Extension, options *api.TrafficFilteringOptions,
streamsMap api.TcpStreamMap) *tlsReader {
tcpid := p.buildTcpId(address)
tcpid := p.buildTcpId(chunk, address)
doneHandler := func(r *tlsReader) {
p.closeReader(key, r)
@@ -178,7 +175,7 @@ func (p *tlsPoller) startNewTlsReader(chunk *tlsTapperTlsChunk, address *address
reader := &tlsReader{
key: key,
chunks: make(chan *tlsTapperTlsChunk, 1),
chunks: make(chan *tlsChunk, 1),
doneHandler: doneHandler,
progress: &api.ReadProgress{},
tcpID: &tcpid,
@@ -201,7 +198,7 @@ func (p *tlsPoller) startNewTlsReader(chunk *tlsTapperTlsChunk, address *address
return reader
}
func dissect(extension *api.Extension, reader api.TcpReader, options *api.TrafficFilteringOptions) {
func dissect(extension *api.Extension, reader *tlsReader, options *api.TrafficFilteringOptions) {
b := bufio.NewReader(reader)
err := extension.Dissector.Dissect(b, reader, options)
@@ -216,7 +213,7 @@ func (p *tlsPoller) closeReader(key string, r *tlsReader) {
p.closedReaders <- key
}
func (p *tlsPoller) getSockfdAddressPair(chunk *tlsTapperTlsChunk) (addressPair, error) {
func (p *tlsPoller) getSockfdAddressPair(chunk *tlsChunk) (addressPair, error) {
address, err := getAddressBySockfd(p.procfs, chunk.Pid, chunk.Fd)
fdCacheKey := fmt.Sprintf("%d:%d", chunk.Pid, chunk.Fd)
@@ -255,7 +252,7 @@ func buildTlsKey(address addressPair) string {
return fmt.Sprintf("%s:%d>%s:%d", address.srcIp, address.srcPort, address.dstIp, address.dstPort)
}
func (p *tlsPoller) buildTcpId(address *addressPair) api.TcpID {
func (p *tlsPoller) buildTcpId(chunk *tlsChunk, address *addressPair) api.TcpID {
return api.TcpID{
SrcIP: address.srcIp.String(),
DstIP: address.dstIp.String(),
@@ -292,7 +289,7 @@ func (p *tlsPoller) clearPids() {
})
}
func (p *tlsPoller) logTls(chunk *tlsTapperTlsChunk, key string, reader *tlsReader) {
func (p *tlsPoller) logTls(chunk *tlsChunk, key string, reader *tlsReader) {
var flagsStr string
if chunk.isClient() {

View File

@@ -28,11 +28,7 @@ func UpdateTapTargets(tls *TlsTapper, pods *[]v1.Pod, procfs string) error {
tls.ClearPids()
for pid, pod := range containerPids {
if err := tls.AddSsllibPid(procfs, pid, pod.Namespace); err != nil {
LogError(err)
}
if err := tls.AddGoPid(procfs, pid, pod.Namespace); err != nil {
if err := tls.AddPid(procfs, pid, pod.Namespace); err != nil {
LogError(err)
}
}

View File

@@ -9,7 +9,7 @@ import (
type tlsReader struct {
key string
chunks chan *tlsTapperTlsChunk
chunks chan *tlsChunk
seenChunks int
data []byte
doneHandler func(r *tlsReader)
@@ -24,14 +24,14 @@ type tlsReader struct {
reqResMatcher api.RequestResponseMatcher
}
func (r *tlsReader) newChunk(chunk *tlsTapperTlsChunk) {
func (r *tlsReader) newChunk(chunk *tlsChunk) {
r.captureTime = time.Now()
r.seenChunks = r.seenChunks + 1
r.chunks <- chunk
}
func (r *tlsReader) Read(p []byte) (int, error) {
var chunk *tlsTapperTlsChunk
var chunk *tlsChunk
for len(r.data) == 0 {
var ok bool
@@ -42,7 +42,7 @@ func (r *tlsReader) Read(p []byte) (int, error) {
}
r.data = chunk.getRecordedData()
case <-time.After(time.Second * 120):
case <-time.After(time.Second * 3):
r.doneHandler(r)
return 0, io.EOF
}

View File

@@ -1,7 +1,6 @@
package tlstapper
import (
"strconv"
"sync"
"github.com/cilium/ebpf/rlimit"
@@ -12,13 +11,12 @@ import (
const GLOABL_TAP_PID = 0
//go:generate go run github.com/cilium/ebpf/cmd/bpf2go@0d0727ef53e2f53b1731c73f4c61e0f58693083a -target $BPF_TARGET -cflags $BPF_CFLAGS -type tls_chunk tlsTapper bpf/tls_tapper.c
//go:generate go run github.com/cilium/ebpf/cmd/bpf2go tlsTapper bpf/tls_tapper.c -- -O2 -g -D__TARGET_ARCH_x86
type TlsTapper struct {
bpfObjects tlsTapperObjects
syscallHooks syscallHooks
sslHooksStructs []sslHooks
goHooksStructs []goHooks
poller *tlsPoller
bpfLogger *bpfLogger
registeredPids sync.Map
@@ -66,20 +64,11 @@ func (t *TlsTapper) PollForLogging() {
t.bpfLogger.poll()
}
func (t *TlsTapper) GlobalSsllibTap(sslLibrary string) error {
return t.tapSsllibPid(GLOABL_TAP_PID, sslLibrary, api.UNKNOWN_NAMESPACE)
func (t *TlsTapper) GlobalTap(sslLibrary string) error {
return t.tapPid(GLOABL_TAP_PID, sslLibrary, api.UNKNOWN_NAMESPACE)
}
func (t *TlsTapper) GlobalGoTap(procfs string, pid string) error {
_pid, err := strconv.Atoi(pid)
if err != nil {
return err
}
return t.tapGoPid(procfs, uint32(_pid), api.UNKNOWN_NAMESPACE)
}
func (t *TlsTapper) AddSsllibPid(procfs string, pid uint32, namespace string) error {
func (t *TlsTapper) AddPid(procfs string, pid uint32, namespace string) error {
sslLibrary, err := findSsllib(procfs, pid)
if err != nil {
@@ -87,11 +76,7 @@ func (t *TlsTapper) AddSsllibPid(procfs string, pid uint32, namespace string) er
return nil // hide the error on purpose, its OK for a process to not use libssl.so
}
return t.tapSsllibPid(pid, sslLibrary, namespace)
}
func (t *TlsTapper) AddGoPid(procfs string, pid uint32, namespace string) error {
return t.tapGoPid(procfs, pid, namespace)
return t.tapPid(pid, sslLibrary, namespace)
}
func (t *TlsTapper) RemovePid(pid uint32) error {
@@ -135,10 +120,6 @@ func (t *TlsTapper) Close() []error {
errors = append(errors, sslHooks.close()...)
}
for _, goHooks := range t.goHooksStructs {
errors = append(errors, goHooks.close()...)
}
if err := t.bpfLogger.close(); err != nil {
errors = append(errors, err)
}
@@ -160,15 +141,15 @@ func setupRLimit() error {
return nil
}
func (t *TlsTapper) tapSsllibPid(pid uint32, sslLibrary string, namespace string) error {
func (t *TlsTapper) tapPid(pid uint32, sslLibrary string, namespace string) error {
logger.Log.Infof("Tapping TLS (pid: %v) (sslLibrary: %v)", pid, sslLibrary)
newSsl := sslHooks{}
if err := newSsl.installUprobes(&t.bpfObjects, sslLibrary); err != nil {
return err
}
logger.Log.Infof("Tapping TLS (pid: %v) (sslLibrary: %v)", pid, sslLibrary)
t.sslHooksStructs = append(t.sslHooksStructs, newSsl)
t.poller.addPid(pid, namespace)
@@ -184,35 +165,6 @@ func (t *TlsTapper) tapSsllibPid(pid uint32, sslLibrary string, namespace string
return nil
}
func (t *TlsTapper) tapGoPid(procfs string, pid uint32, namespace string) error {
exePath, err := findLibraryByPid(procfs, pid, "")
if err != nil {
return err
}
hooks := goHooks{}
if err := hooks.installUprobes(&t.bpfObjects, exePath); err != nil {
return err
}
logger.Log.Infof("Tapping TLS (pid: %v) (Go: %v)", pid, exePath)
t.goHooksStructs = append(t.goHooksStructs, hooks)
t.poller.addPid(pid, namespace)
pids := t.bpfObjects.tlsTapperMaps.PidsMap
if err := pids.Put(pid, uint32(1)); err != nil {
return errors.Wrap(err, 0)
}
t.registeredPids.Store(pid, true)
return nil
}
func LogError(err error) {
var e *errors.Error
if errors.As(err, &e) {

View File

@@ -1,6 +1,5 @@
// Code generated by bpf2go; DO NOT EDIT.
//go:build arm64
// +build arm64
// +build arm64be armbe mips mips64 mips64p32 ppc64 s390 s390x sparc sparc64
package tlstapper
@@ -13,18 +12,6 @@ import (
"github.com/cilium/ebpf"
)
type tlsTapperTlsChunk struct {
Pid uint32
Tgid uint32
Len uint32
Start uint32
Recorded uint32
Fd uint32
Flags uint32
Address [16]uint8
Data [4096]uint8
}
// loadTlsTapper returns the embedded CollectionSpec for tlsTapper.
func loadTlsTapper() (*ebpf.CollectionSpec, error) {
reader := bytes.NewReader(_TlsTapperBytes)
@@ -66,24 +53,20 @@ type tlsTapperSpecs struct {
//
// It can be passed ebpf.CollectionSpec.Assign.
type tlsTapperProgramSpecs struct {
GoCryptoTlsRead *ebpf.ProgramSpec `ebpf:"go_crypto_tls_read"`
GoCryptoTlsReadEx *ebpf.ProgramSpec `ebpf:"go_crypto_tls_read_ex"`
GoCryptoTlsWrite *ebpf.ProgramSpec `ebpf:"go_crypto_tls_write"`
GoCryptoTlsWriteEx *ebpf.ProgramSpec `ebpf:"go_crypto_tls_write_ex"`
SslRead *ebpf.ProgramSpec `ebpf:"ssl_read"`
SslReadEx *ebpf.ProgramSpec `ebpf:"ssl_read_ex"`
SslRetRead *ebpf.ProgramSpec `ebpf:"ssl_ret_read"`
SslRetReadEx *ebpf.ProgramSpec `ebpf:"ssl_ret_read_ex"`
SslRetWrite *ebpf.ProgramSpec `ebpf:"ssl_ret_write"`
SslRetWriteEx *ebpf.ProgramSpec `ebpf:"ssl_ret_write_ex"`
SslWrite *ebpf.ProgramSpec `ebpf:"ssl_write"`
SslWriteEx *ebpf.ProgramSpec `ebpf:"ssl_write_ex"`
SysEnterAccept4 *ebpf.ProgramSpec `ebpf:"sys_enter_accept4"`
SysEnterConnect *ebpf.ProgramSpec `ebpf:"sys_enter_connect"`
SysEnterRead *ebpf.ProgramSpec `ebpf:"sys_enter_read"`
SysEnterWrite *ebpf.ProgramSpec `ebpf:"sys_enter_write"`
SysExitAccept4 *ebpf.ProgramSpec `ebpf:"sys_exit_accept4"`
SysExitConnect *ebpf.ProgramSpec `ebpf:"sys_exit_connect"`
SslRead *ebpf.ProgramSpec `ebpf:"ssl_read"`
SslReadEx *ebpf.ProgramSpec `ebpf:"ssl_read_ex"`
SslRetRead *ebpf.ProgramSpec `ebpf:"ssl_ret_read"`
SslRetReadEx *ebpf.ProgramSpec `ebpf:"ssl_ret_read_ex"`
SslRetWrite *ebpf.ProgramSpec `ebpf:"ssl_ret_write"`
SslRetWriteEx *ebpf.ProgramSpec `ebpf:"ssl_ret_write_ex"`
SslWrite *ebpf.ProgramSpec `ebpf:"ssl_write"`
SslWriteEx *ebpf.ProgramSpec `ebpf:"ssl_write_ex"`
SysEnterAccept4 *ebpf.ProgramSpec `ebpf:"sys_enter_accept4"`
SysEnterConnect *ebpf.ProgramSpec `ebpf:"sys_enter_connect"`
SysEnterRead *ebpf.ProgramSpec `ebpf:"sys_enter_read"`
SysEnterWrite *ebpf.ProgramSpec `ebpf:"sys_enter_write"`
SysExitAccept4 *ebpf.ProgramSpec `ebpf:"sys_exit_accept4"`
SysExitConnect *ebpf.ProgramSpec `ebpf:"sys_exit_connect"`
}
// tlsTapperMapSpecs contains maps before they are loaded into the kernel.
@@ -94,13 +77,11 @@ type tlsTapperMapSpecs struct {
ChunksBuffer *ebpf.MapSpec `ebpf:"chunks_buffer"`
ConnectSyscallInfo *ebpf.MapSpec `ebpf:"connect_syscall_info"`
FileDescriptorToIpv4 *ebpf.MapSpec `ebpf:"file_descriptor_to_ipv4"`
GoReadContext *ebpf.MapSpec `ebpf:"go_read_context"`
GoWriteContext *ebpf.MapSpec `ebpf:"go_write_context"`
Heap *ebpf.MapSpec `ebpf:"heap"`
LogBuffer *ebpf.MapSpec `ebpf:"log_buffer"`
OpensslReadContext *ebpf.MapSpec `ebpf:"openssl_read_context"`
OpensslWriteContext *ebpf.MapSpec `ebpf:"openssl_write_context"`
PidsMap *ebpf.MapSpec `ebpf:"pids_map"`
SslReadContext *ebpf.MapSpec `ebpf:"ssl_read_context"`
SslWriteContext *ebpf.MapSpec `ebpf:"ssl_write_context"`
}
// tlsTapperObjects contains all objects after they have been loaded into the kernel.
@@ -126,13 +107,11 @@ type tlsTapperMaps struct {
ChunksBuffer *ebpf.Map `ebpf:"chunks_buffer"`
ConnectSyscallInfo *ebpf.Map `ebpf:"connect_syscall_info"`
FileDescriptorToIpv4 *ebpf.Map `ebpf:"file_descriptor_to_ipv4"`
GoReadContext *ebpf.Map `ebpf:"go_read_context"`
GoWriteContext *ebpf.Map `ebpf:"go_write_context"`
Heap *ebpf.Map `ebpf:"heap"`
LogBuffer *ebpf.Map `ebpf:"log_buffer"`
OpensslReadContext *ebpf.Map `ebpf:"openssl_read_context"`
OpensslWriteContext *ebpf.Map `ebpf:"openssl_write_context"`
PidsMap *ebpf.Map `ebpf:"pids_map"`
SslReadContext *ebpf.Map `ebpf:"ssl_read_context"`
SslWriteContext *ebpf.Map `ebpf:"ssl_write_context"`
}
func (m *tlsTapperMaps) Close() error {
@@ -141,13 +120,11 @@ func (m *tlsTapperMaps) Close() error {
m.ChunksBuffer,
m.ConnectSyscallInfo,
m.FileDescriptorToIpv4,
m.GoReadContext,
m.GoWriteContext,
m.Heap,
m.LogBuffer,
m.OpensslReadContext,
m.OpensslWriteContext,
m.PidsMap,
m.SslReadContext,
m.SslWriteContext,
)
}
@@ -155,32 +132,24 @@ func (m *tlsTapperMaps) Close() error {
//
// It can be passed to loadTlsTapperObjects or ebpf.CollectionSpec.LoadAndAssign.
type tlsTapperPrograms struct {
GoCryptoTlsRead *ebpf.Program `ebpf:"go_crypto_tls_read"`
GoCryptoTlsReadEx *ebpf.Program `ebpf:"go_crypto_tls_read_ex"`
GoCryptoTlsWrite *ebpf.Program `ebpf:"go_crypto_tls_write"`
GoCryptoTlsWriteEx *ebpf.Program `ebpf:"go_crypto_tls_write_ex"`
SslRead *ebpf.Program `ebpf:"ssl_read"`
SslReadEx *ebpf.Program `ebpf:"ssl_read_ex"`
SslRetRead *ebpf.Program `ebpf:"ssl_ret_read"`
SslRetReadEx *ebpf.Program `ebpf:"ssl_ret_read_ex"`
SslRetWrite *ebpf.Program `ebpf:"ssl_ret_write"`
SslRetWriteEx *ebpf.Program `ebpf:"ssl_ret_write_ex"`
SslWrite *ebpf.Program `ebpf:"ssl_write"`
SslWriteEx *ebpf.Program `ebpf:"ssl_write_ex"`
SysEnterAccept4 *ebpf.Program `ebpf:"sys_enter_accept4"`
SysEnterConnect *ebpf.Program `ebpf:"sys_enter_connect"`
SysEnterRead *ebpf.Program `ebpf:"sys_enter_read"`
SysEnterWrite *ebpf.Program `ebpf:"sys_enter_write"`
SysExitAccept4 *ebpf.Program `ebpf:"sys_exit_accept4"`
SysExitConnect *ebpf.Program `ebpf:"sys_exit_connect"`
SslRead *ebpf.Program `ebpf:"ssl_read"`
SslReadEx *ebpf.Program `ebpf:"ssl_read_ex"`
SslRetRead *ebpf.Program `ebpf:"ssl_ret_read"`
SslRetReadEx *ebpf.Program `ebpf:"ssl_ret_read_ex"`
SslRetWrite *ebpf.Program `ebpf:"ssl_ret_write"`
SslRetWriteEx *ebpf.Program `ebpf:"ssl_ret_write_ex"`
SslWrite *ebpf.Program `ebpf:"ssl_write"`
SslWriteEx *ebpf.Program `ebpf:"ssl_write_ex"`
SysEnterAccept4 *ebpf.Program `ebpf:"sys_enter_accept4"`
SysEnterConnect *ebpf.Program `ebpf:"sys_enter_connect"`
SysEnterRead *ebpf.Program `ebpf:"sys_enter_read"`
SysEnterWrite *ebpf.Program `ebpf:"sys_enter_write"`
SysExitAccept4 *ebpf.Program `ebpf:"sys_exit_accept4"`
SysExitConnect *ebpf.Program `ebpf:"sys_exit_connect"`
}
func (p *tlsTapperPrograms) Close() error {
return _TlsTapperClose(
p.GoCryptoTlsRead,
p.GoCryptoTlsReadEx,
p.GoCryptoTlsWrite,
p.GoCryptoTlsWriteEx,
p.SslRead,
p.SslReadEx,
p.SslRetRead,
@@ -208,5 +177,5 @@ func _TlsTapperClose(closers ...io.Closer) error {
}
// Do not access this directly.
//go:embed tlstapper_bpfel_arm64.o
//go:embed tlstapper_bpfeb.o
var _TlsTapperBytes []byte

Binary file not shown.

View File

@@ -1,6 +1,5 @@
// Code generated by bpf2go; DO NOT EDIT.
//go:build 386 || amd64
// +build 386 amd64
// +build 386 amd64 amd64p32 arm arm64 mips64le mips64p32le mipsle ppc64le riscv64
package tlstapper
@@ -13,18 +12,6 @@ import (
"github.com/cilium/ebpf"
)
type tlsTapperTlsChunk struct {
Pid uint32
Tgid uint32
Len uint32
Start uint32
Recorded uint32
Fd uint32
Flags uint32
Address [16]uint8
Data [4096]uint8
}
// loadTlsTapper returns the embedded CollectionSpec for tlsTapper.
func loadTlsTapper() (*ebpf.CollectionSpec, error) {
reader := bytes.NewReader(_TlsTapperBytes)
@@ -66,24 +53,20 @@ type tlsTapperSpecs struct {
//
// It can be passed ebpf.CollectionSpec.Assign.
type tlsTapperProgramSpecs struct {
GoCryptoTlsRead *ebpf.ProgramSpec `ebpf:"go_crypto_tls_read"`
GoCryptoTlsReadEx *ebpf.ProgramSpec `ebpf:"go_crypto_tls_read_ex"`
GoCryptoTlsWrite *ebpf.ProgramSpec `ebpf:"go_crypto_tls_write"`
GoCryptoTlsWriteEx *ebpf.ProgramSpec `ebpf:"go_crypto_tls_write_ex"`
SslRead *ebpf.ProgramSpec `ebpf:"ssl_read"`
SslReadEx *ebpf.ProgramSpec `ebpf:"ssl_read_ex"`
SslRetRead *ebpf.ProgramSpec `ebpf:"ssl_ret_read"`
SslRetReadEx *ebpf.ProgramSpec `ebpf:"ssl_ret_read_ex"`
SslRetWrite *ebpf.ProgramSpec `ebpf:"ssl_ret_write"`
SslRetWriteEx *ebpf.ProgramSpec `ebpf:"ssl_ret_write_ex"`
SslWrite *ebpf.ProgramSpec `ebpf:"ssl_write"`
SslWriteEx *ebpf.ProgramSpec `ebpf:"ssl_write_ex"`
SysEnterAccept4 *ebpf.ProgramSpec `ebpf:"sys_enter_accept4"`
SysEnterConnect *ebpf.ProgramSpec `ebpf:"sys_enter_connect"`
SysEnterRead *ebpf.ProgramSpec `ebpf:"sys_enter_read"`
SysEnterWrite *ebpf.ProgramSpec `ebpf:"sys_enter_write"`
SysExitAccept4 *ebpf.ProgramSpec `ebpf:"sys_exit_accept4"`
SysExitConnect *ebpf.ProgramSpec `ebpf:"sys_exit_connect"`
SslRead *ebpf.ProgramSpec `ebpf:"ssl_read"`
SslReadEx *ebpf.ProgramSpec `ebpf:"ssl_read_ex"`
SslRetRead *ebpf.ProgramSpec `ebpf:"ssl_ret_read"`
SslRetReadEx *ebpf.ProgramSpec `ebpf:"ssl_ret_read_ex"`
SslRetWrite *ebpf.ProgramSpec `ebpf:"ssl_ret_write"`
SslRetWriteEx *ebpf.ProgramSpec `ebpf:"ssl_ret_write_ex"`
SslWrite *ebpf.ProgramSpec `ebpf:"ssl_write"`
SslWriteEx *ebpf.ProgramSpec `ebpf:"ssl_write_ex"`
SysEnterAccept4 *ebpf.ProgramSpec `ebpf:"sys_enter_accept4"`
SysEnterConnect *ebpf.ProgramSpec `ebpf:"sys_enter_connect"`
SysEnterRead *ebpf.ProgramSpec `ebpf:"sys_enter_read"`
SysEnterWrite *ebpf.ProgramSpec `ebpf:"sys_enter_write"`
SysExitAccept4 *ebpf.ProgramSpec `ebpf:"sys_exit_accept4"`
SysExitConnect *ebpf.ProgramSpec `ebpf:"sys_exit_connect"`
}
// tlsTapperMapSpecs contains maps before they are loaded into the kernel.
@@ -94,13 +77,11 @@ type tlsTapperMapSpecs struct {
ChunksBuffer *ebpf.MapSpec `ebpf:"chunks_buffer"`
ConnectSyscallInfo *ebpf.MapSpec `ebpf:"connect_syscall_info"`
FileDescriptorToIpv4 *ebpf.MapSpec `ebpf:"file_descriptor_to_ipv4"`
GoReadContext *ebpf.MapSpec `ebpf:"go_read_context"`
GoWriteContext *ebpf.MapSpec `ebpf:"go_write_context"`
Heap *ebpf.MapSpec `ebpf:"heap"`
LogBuffer *ebpf.MapSpec `ebpf:"log_buffer"`
OpensslReadContext *ebpf.MapSpec `ebpf:"openssl_read_context"`
OpensslWriteContext *ebpf.MapSpec `ebpf:"openssl_write_context"`
PidsMap *ebpf.MapSpec `ebpf:"pids_map"`
SslReadContext *ebpf.MapSpec `ebpf:"ssl_read_context"`
SslWriteContext *ebpf.MapSpec `ebpf:"ssl_write_context"`
}
// tlsTapperObjects contains all objects after they have been loaded into the kernel.
@@ -126,13 +107,11 @@ type tlsTapperMaps struct {
ChunksBuffer *ebpf.Map `ebpf:"chunks_buffer"`
ConnectSyscallInfo *ebpf.Map `ebpf:"connect_syscall_info"`
FileDescriptorToIpv4 *ebpf.Map `ebpf:"file_descriptor_to_ipv4"`
GoReadContext *ebpf.Map `ebpf:"go_read_context"`
GoWriteContext *ebpf.Map `ebpf:"go_write_context"`
Heap *ebpf.Map `ebpf:"heap"`
LogBuffer *ebpf.Map `ebpf:"log_buffer"`
OpensslReadContext *ebpf.Map `ebpf:"openssl_read_context"`
OpensslWriteContext *ebpf.Map `ebpf:"openssl_write_context"`
PidsMap *ebpf.Map `ebpf:"pids_map"`
SslReadContext *ebpf.Map `ebpf:"ssl_read_context"`
SslWriteContext *ebpf.Map `ebpf:"ssl_write_context"`
}
func (m *tlsTapperMaps) Close() error {
@@ -141,13 +120,11 @@ func (m *tlsTapperMaps) Close() error {
m.ChunksBuffer,
m.ConnectSyscallInfo,
m.FileDescriptorToIpv4,
m.GoReadContext,
m.GoWriteContext,
m.Heap,
m.LogBuffer,
m.OpensslReadContext,
m.OpensslWriteContext,
m.PidsMap,
m.SslReadContext,
m.SslWriteContext,
)
}
@@ -155,32 +132,24 @@ func (m *tlsTapperMaps) Close() error {
//
// It can be passed to loadTlsTapperObjects or ebpf.CollectionSpec.LoadAndAssign.
type tlsTapperPrograms struct {
GoCryptoTlsRead *ebpf.Program `ebpf:"go_crypto_tls_read"`
GoCryptoTlsReadEx *ebpf.Program `ebpf:"go_crypto_tls_read_ex"`
GoCryptoTlsWrite *ebpf.Program `ebpf:"go_crypto_tls_write"`
GoCryptoTlsWriteEx *ebpf.Program `ebpf:"go_crypto_tls_write_ex"`
SslRead *ebpf.Program `ebpf:"ssl_read"`
SslReadEx *ebpf.Program `ebpf:"ssl_read_ex"`
SslRetRead *ebpf.Program `ebpf:"ssl_ret_read"`
SslRetReadEx *ebpf.Program `ebpf:"ssl_ret_read_ex"`
SslRetWrite *ebpf.Program `ebpf:"ssl_ret_write"`
SslRetWriteEx *ebpf.Program `ebpf:"ssl_ret_write_ex"`
SslWrite *ebpf.Program `ebpf:"ssl_write"`
SslWriteEx *ebpf.Program `ebpf:"ssl_write_ex"`
SysEnterAccept4 *ebpf.Program `ebpf:"sys_enter_accept4"`
SysEnterConnect *ebpf.Program `ebpf:"sys_enter_connect"`
SysEnterRead *ebpf.Program `ebpf:"sys_enter_read"`
SysEnterWrite *ebpf.Program `ebpf:"sys_enter_write"`
SysExitAccept4 *ebpf.Program `ebpf:"sys_exit_accept4"`
SysExitConnect *ebpf.Program `ebpf:"sys_exit_connect"`
SslRead *ebpf.Program `ebpf:"ssl_read"`
SslReadEx *ebpf.Program `ebpf:"ssl_read_ex"`
SslRetRead *ebpf.Program `ebpf:"ssl_ret_read"`
SslRetReadEx *ebpf.Program `ebpf:"ssl_ret_read_ex"`
SslRetWrite *ebpf.Program `ebpf:"ssl_ret_write"`
SslRetWriteEx *ebpf.Program `ebpf:"ssl_ret_write_ex"`
SslWrite *ebpf.Program `ebpf:"ssl_write"`
SslWriteEx *ebpf.Program `ebpf:"ssl_write_ex"`
SysEnterAccept4 *ebpf.Program `ebpf:"sys_enter_accept4"`
SysEnterConnect *ebpf.Program `ebpf:"sys_enter_connect"`
SysEnterRead *ebpf.Program `ebpf:"sys_enter_read"`
SysEnterWrite *ebpf.Program `ebpf:"sys_enter_write"`
SysExitAccept4 *ebpf.Program `ebpf:"sys_exit_accept4"`
SysExitConnect *ebpf.Program `ebpf:"sys_exit_connect"`
}
func (p *tlsTapperPrograms) Close() error {
return _TlsTapperClose(
p.GoCryptoTlsRead,
p.GoCryptoTlsReadEx,
p.GoCryptoTlsWrite,
p.GoCryptoTlsWriteEx,
p.SslRead,
p.SslReadEx,
p.SslRetRead,
@@ -208,5 +177,5 @@ func _TlsTapperClose(closers ...io.Closer) error {
}
// Do not access this directly.
//go:embed tlstapper_bpfel_x86.o
//go:embed tlstapper_bpfel.o
var _TlsTapperBytes []byte

Binary file not shown.

Binary file not shown.

File diff suppressed because it is too large Load Diff

View File

@@ -24,9 +24,12 @@
},
"peerDependencies": {
"@craco/craco": "^6.4.3",
"@material-ui/core": "^4.12.4",
"@material-ui/icons": "^4.11.3",
"@material-ui/lab": "^4.0.0-alpha.60",
"@types/jest": "^26.0.24",
"@types/node": "^12.20.54",
"sass": "^1.52.3",
"node-sass": "^6.0.1",
"react": "^17.0.2",
"react-copy-to-clipboard": "^5.1.0",
"react-dom": "^17.0.2",
@@ -34,12 +37,6 @@
},
"dependencies": {
"@craco/craco": "^6.4.3",
"@emotion/react": "^11.9.0",
"@emotion/styled": "^11.8.1",
"@mui/icons-material": "^5.8.2",
"@mui/lab": "^5.0.0-alpha.84",
"@mui/material": "^5.8.2",
"@mui/styles": "^5.8.0",
"@types/lodash": "^4.14.182",
"@uiw/react-textarea-code-editor": "^1.6.0",
"axios": "^0.27.2",
@@ -60,7 +57,6 @@
"react-scrollable-feed-virtualized": "^1.4.9",
"react-syntax-highlighter": "^15.5.0",
"react-toastify": "^8.2.0",
"recharts": "^2.1.10",
"redoc": "^2.0.0-rc.71",
"styled-components": "^5.3.5",
"web-vitals": "^2.1.4",

View File

@@ -1,4 +1,4 @@
@import '../../../variables.module'
@import '../../variables.module.scss'
.closeIcon
cursor: pointer

View File

@@ -1,4 +1,4 @@
import { Box, Fade, FormControl, Modal, Backdrop } from "@mui/material";
import { Box, Fade, FormControl, Modal, Backdrop } from "@material-ui/core";
import { useCallback, useEffect, useState } from "react";
import { RedocStandalone } from "redoc";
import closeIcon from "assets/closeIcon.svg";
@@ -7,8 +7,8 @@ import style from './OasModal.module.sass';
import openApiLogo from 'assets/openApiLogo.png'
import { redocThemeOptions } from "./redocThemeOptions";
import React from "react";
import { TOAST_CONTAINER_ID } from "../../../configs/Consts";
import SearchableDropdown from "../../UI/SearchableDropdown/SearchableDropdown";
import { TOAST_CONTAINER_ID } from "../../configs/Consts";
import SearchableDropdown from "../UI/SearchableDropdown/SearchableDropdown";
const modalStyle = {
@@ -112,4 +112,4 @@ const OasModal = ({ openModal, handleCloseModal, getOasServices, getOasByService
);
};
export default OasModal;
export default OasModal;

View File

Before

Width:  |  Height:  |  Size: 588 B

After

Width:  |  Height:  |  Size: 588 B

View File

Before

Width:  |  Height:  |  Size: 66 KiB

After

Width:  |  Height:  |  Size: 66 KiB

View File

@@ -1,5 +1,5 @@
@import "../../../variables.module"
@import "../../../components"
@import "../../variables.module"
@import "../../components"
.closeIcon
position: absolute
@@ -94,8 +94,3 @@
.servicesFilterList
height: calc(100% - 30px - 52px)
.spinnerContainer
display: flex
justify-content: center
margin-bottom: 10px

View File

@@ -1,21 +1,22 @@
import React, { useState, useEffect, useCallback, useMemo } from "react";
import { Box, Fade, Modal, Backdrop, Button } from "@mui/material";
import { Box, Fade, Modal, Backdrop, Button } from "@material-ui/core";
import { toast } from "react-toastify";
import spinnerStyle from '../UI/style/Spinner.module.sass';
import spinnerImg from 'assets/spinner.svg';
import Graph from "react-graph-vis";
import debounce from 'lodash/debounce';
import ServiceMapOptions from './ServiceMapOptions'
import { useCommonStyles } from "../../../helpers/commonStyle";
import { useCommonStyles } from "../../helpers/commonStyle";
import refreshIcon from "assets/refresh.svg";
import filterIcon from "assets/filter-icon.svg";
import filterIconClicked from "assets/filter-icon-clicked.svg";
import closeIcon from "assets/close.svg"
import styles from './ServiceMapModal.module.sass'
import SelectList from "../../UI/SelectList/SelectList";
import SelectList from "../UI/SelectList";
import { GraphData, ServiceMapGraph } from "./ServiceMapModalTypes"
import { Utils } from "../../../helpers/Utils";
import { TOAST_CONTAINER_ID } from "../../../configs/Consts";
import Resizeable from "../../UI/Resizeable/Resizeable"
import { Utils } from "../../helpers/Utils";
import { TOAST_CONTAINER_ID } from "../../configs/Consts";
import Resizeable from "../UI/Resizeable"
const modalStyle = {
position: 'absolute',
@@ -197,14 +198,14 @@ export const ServiceMapModal: React.FC<ServiceMapModalProps> = ({ isOpen, onClos
<Fade in={isOpen}>
<Box sx={modalStyle}>
<div className={styles.closeIcon}>
<img src={closeIcon} alt="close" onClick={() => onClose()} style={{ cursor: "pointer", userSelect: "none" }}/>
<img src={closeIcon} alt="close" onClick={() => onClose()} style={{ cursor: "pointer", userSelect: "none" }}></img>
</div>
<div className={styles.headerContainer}>
<div className={styles.headerSection}>
<span className={styles.title}>Services</span>
<Button size="medium"
variant="contained"
startIcon={<img src={isFilterClicked ? filterIconClicked : filterIcon} className="custom" alt="refresh" style={{ height: "26px", width: "26px" }}/>}
startIcon={<img src={isFilterClicked ? filterIconClicked : filterIcon} className="custom" alt="refresh" style={{ height: "26px", width: "26px" }}></img>}
className={commonClasses.outlinedButton + " " + commonClasses.imagedButton + ` ${isFilterClicked ? commonClasses.clickedButton : ""}`}
onClick={() => setIsFilterClicked(prevState => !prevState)}
style={{ textTransform: 'unset' }}>
@@ -243,7 +244,7 @@ export const ServiceMapModal: React.FC<ServiceMapModalProps> = ({ isOpen, onClos
<div className={styles.graphSection}>
<div style={{ display: "flex", justifyContent: "space-between" }}>
</div>
{isLoading && <div className={styles.spinnerContainer}>
{isLoading && <div className={spinnerStyle.spinnerContainer}>
<img alt="spinner" src={spinnerImg} style={{ height: 50 }} />
</div>}
{!isLoading && <div style={{ height: "100%", width: "100%" }}>

View File

Before

Width:  |  Height:  |  Size: 588 B

After

Width:  |  Height:  |  Size: 588 B

View File

Before

Width:  |  Height:  |  Size: 184 B

After

Width:  |  Height:  |  Size: 184 B

View File

Before

Width:  |  Height:  |  Size: 1.4 KiB

After

Width:  |  Height:  |  Size: 1.4 KiB

View File

Before

Width:  |  Height:  |  Size: 673 B

After

Width:  |  Height:  |  Size: 673 B

View File

@@ -1,15 +1,15 @@
import React, {useCallback, useEffect, useMemo, useState} from "react";
import styles from './EntriesList.module.sass';
import styles from '../style/EntriesList.module.sass';
import ScrollableFeedVirtualized from "react-scrollable-feed-virtualized";
import Moment from 'moment';
import {EntryItem} from "../EntryListItem/EntryListItem";
import {EntryItem} from "./EntryListItem/EntryListItem";
import down from "assets/downImg.svg";
import spinner from 'assets/spinner.svg';
import {RecoilState, useRecoilState, useRecoilValue, useSetRecoilState} from "recoil";
import entriesAtom from "../../recoil/entries";
import queryAtom from "../../recoil/query";
import TrafficViewerApiAtom from "../../recoil/TrafficViewerApi";
import TrafficViewerApi from "../TrafficViewer/TrafficViewerApi";
import TrafficViewerApi from "./TrafficViewerApi";
import focusedEntryIdAtom from "../../recoil/focusedEntryId";
import {toast} from "react-toastify";
import {MAX_ENTRIES, TOAST_CONTAINER_ID} from "../../configs/Consts";

Some files were not shown because too many files have changed in this diff Show More