mirror of
https://github.com/kubeshark/kubeshark.git
synced 2026-02-15 10:29:55 +00:00
Compare commits
29 Commits
30.0-dev7
...
30.0-dev33
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
c45a869b75 | ||
|
|
0a793cd9e0 | ||
|
|
8d19080c11 | ||
|
|
319c3c7a8d | ||
|
|
0e7704eb15 | ||
|
|
dbcb776139 | ||
|
|
a3de34f544 | ||
|
|
99667984d6 | ||
|
|
763b0e7362 | ||
|
|
e07e04377f | ||
|
|
3c8f63ed92 | ||
|
|
11a2246cb9 | ||
|
|
a2595afd5e | ||
|
|
0f4710918f | ||
|
|
4bdda920d5 | ||
|
|
59e6268ddd | ||
|
|
2513e9099f | ||
|
|
a5c35d7d90 | ||
|
|
41a7587088 | ||
|
|
12f46da5c6 | ||
|
|
17f7879cff | ||
|
|
bc7776cbd3 | ||
|
|
2a31739100 | ||
|
|
308fa78955 | ||
|
|
cff5987ed4 | ||
|
|
7893b4596d | ||
|
|
774f07fccd | ||
|
|
482e5c8b69 | ||
|
|
21902b5f86 |
8
.github/workflows/test.yml
vendored
8
.github/workflows/test.yml
vendored
@@ -34,14 +34,6 @@ jobs:
|
||||
run: |
|
||||
sudo apt-get install libpcap-dev
|
||||
|
||||
- id: 'auth'
|
||||
uses: 'google-github-actions/auth@v0'
|
||||
with:
|
||||
credentials_json: '${{ secrets.GCR_JSON_KEY }}'
|
||||
|
||||
- name: 'Set up Cloud SDK'
|
||||
uses: 'google-github-actions/setup-gcloud@v0'
|
||||
|
||||
- name: Check CLI modified files
|
||||
id: cli_modified_files
|
||||
run: devops/check_modified_files.sh cli/
|
||||
|
||||
16
Dockerfile
16
Dockerfile
@@ -6,8 +6,7 @@ FROM node:16 AS front-end
|
||||
|
||||
WORKDIR /app/ui-build
|
||||
|
||||
COPY ui/package.json .
|
||||
COPY ui/package-lock.json .
|
||||
COPY ui/package.json ui/package-lock.json ./
|
||||
RUN npm i
|
||||
COPY ui .
|
||||
RUN npm run build
|
||||
@@ -15,7 +14,7 @@ RUN npm run build
|
||||
### Base builder image for native builds architecture
|
||||
FROM golang:1.17-alpine AS builder-native-base
|
||||
ENV CGO_ENABLED=1 GOOS=linux
|
||||
RUN apk add libpcap-dev g++ perl-utils
|
||||
RUN apk add --no-cache libpcap-dev g++ perl-utils
|
||||
|
||||
|
||||
### Intermediate builder image for x86-64 to x86-64 native builds
|
||||
@@ -77,17 +76,16 @@ RUN go build -ldflags="-extldflags=-static -s -w \
|
||||
-X 'github.com/up9inc/mizu/agent/pkg/version.Ver=${VER}'" -o mizuagent .
|
||||
|
||||
# Download Basenine executable, verify the sha1sum
|
||||
ADD https://github.com/up9inc/basenine/releases/download/v0.6.3/basenine_linux_${GOARCH} ./basenine_linux_${GOARCH}
|
||||
ADD https://github.com/up9inc/basenine/releases/download/v0.6.3/basenine_linux_${GOARCH}.sha256 ./basenine_linux_${GOARCH}.sha256
|
||||
RUN shasum -a 256 -c basenine_linux_${GOARCH}.sha256
|
||||
RUN chmod +x ./basenine_linux_${GOARCH}
|
||||
RUN mv ./basenine_linux_${GOARCH} ./basenine
|
||||
ADD https://github.com/up9inc/basenine/releases/download/v0.6.6/basenine_linux_${GOARCH} ./basenine_linux_${GOARCH}
|
||||
ADD https://github.com/up9inc/basenine/releases/download/v0.6.6/basenine_linux_${GOARCH}.sha256 ./basenine_linux_${GOARCH}.sha256
|
||||
|
||||
RUN shasum -a 256 -c basenine_linux_"${GOARCH}".sha256 && \
|
||||
chmod +x ./basenine_linux_"${GOARCH}" && \
|
||||
mv ./basenine_linux_"${GOARCH}" ./basenine
|
||||
|
||||
### The shipped image
|
||||
ARG TARGETARCH=amd64
|
||||
FROM ${TARGETARCH}/busybox:latest
|
||||
|
||||
# gin-gonic runs in debug mode without this
|
||||
ENV GIN_MODE=release
|
||||
|
||||
|
||||
2
Makefile
2
Makefile
@@ -73,7 +73,7 @@ clean-agent: ## Clean agent.
|
||||
clean-cli: ## Clean CLI.
|
||||
@(cd cli; make clean ; echo "CLI cleanup done" )
|
||||
|
||||
clean-docker: ## Run clen docker
|
||||
clean-docker: ## Run clean docker
|
||||
@(echo "DOCKER cleanup - NOT IMPLEMENTED YET " )
|
||||
|
||||
test-lint: ## Run lint on all modules
|
||||
|
||||
@@ -142,7 +142,9 @@ function deepCheck(generalDict, protocolDict, methodDict, entry) {
|
||||
|
||||
if (value) {
|
||||
if (value.tab === valueTabs.response)
|
||||
cy.contains('Response').click();
|
||||
// temporary fix, change to some "data-cy" attribute,
|
||||
// this will fix the issue that happen because we have "response:" in the header of the right side
|
||||
cy.get('#rightSideContainer > :nth-child(3)').contains('Response').click();
|
||||
cy.get(Cypress.env('bodyJsonClass')).then(text => {
|
||||
expect(text.text()).to.match(value.regex)
|
||||
});
|
||||
|
||||
@@ -40,32 +40,23 @@ it('filtering guide check', function () {
|
||||
});
|
||||
|
||||
it('right side sanity test', function () {
|
||||
cy.get('#entryDetailedTitleBodySize').then(sizeTopLine => {
|
||||
const sizeOnTopLine = sizeTopLine.text().replace(' B', '');
|
||||
cy.contains('Response').click();
|
||||
cy.contains('Body Size (bytes)').parent().next().then(size => {
|
||||
const bodySizeByDetails = size.text();
|
||||
expect(sizeOnTopLine).to.equal(bodySizeByDetails, 'The body size in the top line should match the details in the response');
|
||||
cy.get('#entryDetailedTitleElapsedTime').then(timeInMs => {
|
||||
const time = timeInMs.text();
|
||||
if (time < '0ms') {
|
||||
throw new Error(`The time in the top line cannot be negative ${time}`);
|
||||
}
|
||||
});
|
||||
|
||||
if (parseInt(bodySizeByDetails) < 0) {
|
||||
throw new Error(`The body size cannot be negative. got the size: ${bodySizeByDetails}`)
|
||||
}
|
||||
// temporary fix, change to some "data-cy" attribute,
|
||||
// this will fix the issue that happen because we have "response:" in the header of the right side
|
||||
cy.get('#rightSideContainer > :nth-child(3)').contains('Response').click();
|
||||
|
||||
cy.get('#entryDetailedTitleElapsedTime').then(timeInMs => {
|
||||
const time = timeInMs.text();
|
||||
if (time < '0ms') {
|
||||
throw new Error(`The time in the top line cannot be negative ${time}`);
|
||||
}
|
||||
cy.get('#rightSideContainer [title="Status Code"]').then(status => {
|
||||
const statusCode = status.text();
|
||||
cy.contains('Status').parent().next().then(statusInDetails => {
|
||||
const statusCodeInDetails = statusInDetails.text();
|
||||
|
||||
cy.get('#rightSideContainer [title="Status Code"]').then(status => {
|
||||
const statusCode = status.text();
|
||||
cy.contains('Status').parent().next().then(statusInDetails => {
|
||||
const statusCodeInDetails = statusInDetails.text();
|
||||
|
||||
expect(statusCode).to.equal(statusCodeInDetails, 'The status code in the top line should match the status code in details');
|
||||
});
|
||||
});
|
||||
});
|
||||
expect(statusCode).to.equal(statusCodeInDetails, 'The status code in the top line should match the status code in details');
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -252,7 +243,9 @@ function deeperChcek(leftSidePath, rightSidePath, filterName, leftSideExpectedTe
|
||||
}
|
||||
|
||||
function checkRightSideResponseBody() {
|
||||
cy.contains('Response').click();
|
||||
// temporary fix, change to some "data-cy" attribute,
|
||||
// this will fix the issue that happen because we have "response:" in the header of the right side
|
||||
cy.get('#rightSideContainer > :nth-child(3)').contains('Response').click();
|
||||
clickCheckbox('Decode Base64');
|
||||
|
||||
cy.get(`${Cypress.env('bodyJsonClass')}`).then(value => {
|
||||
|
||||
35
agent/go.mod
35
agent/go.mod
@@ -20,7 +20,7 @@ require (
|
||||
github.com/orcaman/concurrent-map v1.0.0
|
||||
github.com/patrickmn/go-cache v2.1.0+incompatible
|
||||
github.com/stretchr/testify v1.7.0
|
||||
github.com/up9inc/basenine/client/go v0.0.0-20220315070758-3a76cfc4378e
|
||||
github.com/up9inc/basenine/client/go v0.0.0-20220326121918-785f3061c8ce
|
||||
github.com/up9inc/mizu/shared v0.0.0
|
||||
github.com/up9inc/mizu/tap v0.0.0
|
||||
github.com/up9inc/mizu/tap/api v0.0.0
|
||||
@@ -37,53 +37,79 @@ require (
|
||||
|
||||
require (
|
||||
cloud.google.com/go/compute v1.2.0 // indirect
|
||||
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect
|
||||
github.com/Azure/go-autorest v14.2.0+incompatible // indirect
|
||||
github.com/Azure/go-autorest/autorest v0.11.24 // indirect
|
||||
github.com/Azure/go-autorest/autorest/adal v0.9.18 // indirect
|
||||
github.com/Azure/go-autorest/autorest/date v0.3.0 // indirect
|
||||
github.com/Azure/go-autorest/logger v0.2.1 // indirect
|
||||
github.com/Azure/go-autorest/tracing v0.6.0 // indirect
|
||||
github.com/MakeNowJust/heredoc v1.0.0 // indirect
|
||||
github.com/PuerkitoBio/purell v1.1.1 // indirect
|
||||
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 // indirect
|
||||
github.com/beevik/etree v1.1.0 // indirect
|
||||
github.com/bradleyfalzon/tlsx v0.0.0-20170624122154-28fd0e59bac4 // indirect
|
||||
github.com/chai2010/gettext-go v0.0.0-20160711120539-c6fed771bfd5 // indirect
|
||||
github.com/chanced/dynamic v0.0.0-20211210164248-f8fadb1d735b // indirect
|
||||
github.com/cilium/ebpf v0.8.0 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/evanphx/json-patch v5.6.0+incompatible // indirect
|
||||
github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f // indirect
|
||||
github.com/fatih/camelcase v1.0.0 // indirect
|
||||
github.com/fsnotify/fsnotify v1.5.1 // indirect
|
||||
github.com/fvbommel/sortorder v1.0.2 // indirect
|
||||
github.com/ghodss/yaml v1.0.0 // indirect
|
||||
github.com/gin-contrib/sse v0.1.0 // indirect
|
||||
github.com/go-errors/errors v1.4.2 // indirect
|
||||
github.com/go-logr/logr v1.2.2 // indirect
|
||||
github.com/go-openapi/jsonpointer v0.19.5 // indirect
|
||||
github.com/go-openapi/jsonreference v0.19.6 // indirect
|
||||
github.com/go-openapi/swag v0.21.1 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang-jwt/jwt/v4 v4.2.0 // indirect
|
||||
github.com/golang/protobuf v1.5.2 // indirect
|
||||
github.com/golang/snappy v0.0.4 // indirect
|
||||
github.com/google/btree v1.0.1 // indirect
|
||||
github.com/google/go-cmp v0.5.7 // indirect
|
||||
github.com/google/gofuzz v1.2.0 // indirect
|
||||
github.com/google/gopacket v1.1.19 // indirect
|
||||
github.com/google/martian v2.1.0+incompatible // indirect
|
||||
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect
|
||||
github.com/googleapis/gnostic v0.5.5 // indirect
|
||||
github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 // indirect
|
||||
github.com/imdario/mergo v0.3.12 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.0.0 // indirect
|
||||
github.com/josharian/intern v1.0.0 // indirect
|
||||
github.com/json-iterator/go v1.1.12 // indirect
|
||||
github.com/klauspost/compress v1.14.2 // indirect
|
||||
github.com/leodido/go-urn v1.2.1 // indirect
|
||||
github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect
|
||||
github.com/mailru/easyjson v0.7.7 // indirect
|
||||
github.com/mattn/go-isatty v0.0.14 // indirect
|
||||
github.com/mitchellh/go-wordwrap v1.0.1 // indirect
|
||||
github.com/moby/spdystream v0.2.0 // indirect
|
||||
github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||
github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 // indirect
|
||||
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect
|
||||
github.com/ohler55/ojg v1.12.12 // indirect
|
||||
github.com/peterbourgon/diskv v2.0.1+incompatible // indirect
|
||||
github.com/pierrec/lz4 v2.6.1+incompatible // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/russross/blackfriday v1.6.0 // indirect
|
||||
github.com/santhosh-tekuri/jsonschema/v5 v5.0.0 // indirect
|
||||
github.com/segmentio/kafka-go v0.4.27 // indirect
|
||||
github.com/spf13/cobra v1.3.0 // indirect
|
||||
github.com/spf13/pflag v1.0.5 // indirect
|
||||
github.com/tidwall/gjson v1.14.0 // indirect
|
||||
github.com/tidwall/match v1.1.1 // indirect
|
||||
github.com/tidwall/pretty v1.2.0 // indirect
|
||||
github.com/tidwall/sjson v1.2.4 // indirect
|
||||
github.com/ugorji/go/codec v1.2.6 // indirect
|
||||
github.com/vishvananda/netns v0.0.0-20211101163701-50045581ed74 // indirect
|
||||
github.com/xlab/treeprint v1.1.0 // indirect
|
||||
go.starlark.net v0.0.0-20220203230714-bb14e151c28f // indirect
|
||||
golang.org/x/crypto v0.0.0-20220208050332-20e1d8d225ab // indirect
|
||||
golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd // indirect
|
||||
golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8 // indirect
|
||||
@@ -96,10 +122,15 @@ require (
|
||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect
|
||||
k8s.io/cli-runtime v0.23.3 // indirect
|
||||
k8s.io/component-base v0.23.3 // indirect
|
||||
k8s.io/klog/v2 v2.40.1 // indirect
|
||||
k8s.io/kube-openapi v0.0.0-20220124234850-424119656bbf // indirect
|
||||
k8s.io/kubectl v0.23.3 // indirect
|
||||
k8s.io/utils v0.0.0-20220127004650-9b3446523e65 // indirect
|
||||
sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2 // indirect
|
||||
sigs.k8s.io/kustomize/api v0.11.1 // indirect
|
||||
sigs.k8s.io/kustomize/kyaml v0.13.3 // indirect
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.2.1 // indirect
|
||||
sigs.k8s.io/yaml v1.3.0 // indirect
|
||||
)
|
||||
|
||||
38
agent/go.sum
38
agent/go.sum
@@ -53,6 +53,7 @@ cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RX
|
||||
cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
|
||||
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
|
||||
github.com/Azure/go-ansiterm v0.0.0-20210608223527-2377c96fe795/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8=
|
||||
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8=
|
||||
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
|
||||
github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs=
|
||||
github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
|
||||
@@ -74,10 +75,13 @@ github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03
|
||||
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
|
||||
github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ=
|
||||
github.com/MakeNowJust/heredoc v0.0.0-20170808103936-bb23615498cd/go.mod h1:64YHyfSL2R96J44Nlwm39UHepQbyR5q10x7iYa1ks2E=
|
||||
github.com/MakeNowJust/heredoc v1.0.0 h1:cXCdzVdstXyiTqTvfqk9SDHpKNjxuom+DOlyEeQ4pzQ=
|
||||
github.com/MakeNowJust/heredoc v1.0.0/go.mod h1:mG5amYoWBHf8vpLOuehzbGGw0EHxpZZ6lCpQ4fNJ8LE=
|
||||
github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
|
||||
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
|
||||
github.com/PuerkitoBio/purell v1.1.1 h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tNFfI=
|
||||
github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
|
||||
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M=
|
||||
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
|
||||
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||
@@ -111,6 +115,7 @@ github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA
|
||||
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
|
||||
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/chai2010/gettext-go v0.0.0-20160711120539-c6fed771bfd5 h1:7aWHqerlJ41y6FOsEUvknqgXnGmJyJSbjhAWq5pO4F8=
|
||||
github.com/chai2010/gettext-go v0.0.0-20160711120539-c6fed771bfd5/go.mod h1:/iP1qXHoty45bqomnu2LM+VVyAEdWN+vtSHGlQgyxbw=
|
||||
github.com/chanced/cmpjson v0.0.0-20210415035445-da9262c1f20a h1:zG6t+4krPXcCKtLbjFvAh+fKN1d0qfD+RaCj+680OU8=
|
||||
github.com/chanced/cmpjson v0.0.0-20210415035445-da9262c1f20a/go.mod h1:yhcmlFk1hxuZ+5XZbupzT/cEm/eE4ZvWbmsW1+Q/aZE=
|
||||
@@ -147,6 +152,7 @@ github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfc
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.1/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||
github.com/creack/pty v1.1.11 h1:07n33Z8lZxZ2qwegKbObQohDhXDQxiMMz1NOUGYlesw=
|
||||
github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
@@ -163,6 +169,7 @@ github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21 h1:YEetp8
|
||||
github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU=
|
||||
github.com/elastic/go-elasticsearch/v7 v7.17.0 h1:0fcSh4qeC/i1+7QU1KXpmq2iUAdMk4l0/vmbtW1+KJM=
|
||||
github.com/elastic/go-elasticsearch/v7 v7.17.0/go.mod h1:OJ4wdbtDNk5g503kvlHLyErCgQwwzmDtaFC4XyOxXA4=
|
||||
github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153 h1:yUdfgN0XgIJw7foRItutHYUIhlcKzcSf5vDpdhQAKTc=
|
||||
github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc=
|
||||
github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
|
||||
github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
|
||||
@@ -184,6 +191,7 @@ github.com/evanphx/json-patch v5.6.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLi
|
||||
github.com/evanphx/json-patch/v5 v5.6.0 h1:b91NhWfaz02IuVxO9faSllyAtNXHMPkC5J8sJCLunww=
|
||||
github.com/evanphx/json-patch/v5 v5.6.0/go.mod h1:G79N1coSVB93tBe7j6PhzjmR3/2VvlbKOFpnXhI9Bw4=
|
||||
github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d/go.mod h1:ZZMPRZwes7CROmyNKgQzC3XPs6L/G2EJLHddWejkmf4=
|
||||
github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f h1:Wl78ApPPB2Wvf/TIe2xdyJxTlb6obmF18d8QdkxNDu4=
|
||||
github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f/go.mod h1:OSYXu++VVOHnXeitef/D8n/6y4QV8uLHSFXX4NeXMGc=
|
||||
github.com/fatih/camelcase v1.0.0 h1:hxNvNX/xYBp0ovncs8WyWZrOrpBNub/JfaMvbURyft8=
|
||||
github.com/fatih/camelcase v1.0.0/go.mod h1:yN2Sb0lFhZJUdVvtELVWefmrXpuZESvPmqwoZc+/fpc=
|
||||
@@ -201,6 +209,7 @@ github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4
|
||||
github.com/fsnotify/fsnotify v1.5.1 h1:mZcQUHVQUQWoPXXtuf9yuEXKudkV2sx1E06UadKWpgI=
|
||||
github.com/fsnotify/fsnotify v1.5.1/go.mod h1:T3375wBYaZdLLcVNkcVbzGHY7f1l/uK5T5Ai1i3InKU=
|
||||
github.com/fvbommel/sortorder v1.0.1/go.mod h1:uk88iVf1ovNn1iLfgUVU2F9o5eO30ui720w+kxuqRs0=
|
||||
github.com/fvbommel/sortorder v1.0.2 h1:mV4o8B2hKboCdkJm+a7uX/SIpZob4JzUpc5GGnM45eo=
|
||||
github.com/fvbommel/sortorder v1.0.2/go.mod h1:uk88iVf1ovNn1iLfgUVU2F9o5eO30ui720w+kxuqRs0=
|
||||
github.com/getkin/kin-openapi v0.76.0/go.mod h1:660oXbgy5JFMKreazJaQTw7o+X00qeSyhcnluiMv+Xg=
|
||||
github.com/getkin/kin-openapi v0.89.0 h1:p4nagHchUKGn85z/f+pse4aSh50nIBOYjOhMIku2hiA=
|
||||
@@ -237,6 +246,7 @@ github.com/go-openapi/jsonpointer v0.19.5 h1:gZr+CIYByUqjcgeLXnQu2gHYQC9o73G2XUe
|
||||
github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
|
||||
github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8=
|
||||
github.com/go-openapi/jsonreference v0.19.5/go.mod h1:RdybgQwPxbL4UEjuAruzK1x3nE69AqPYEJeo/TWfEeg=
|
||||
github.com/go-openapi/jsonreference v0.19.6 h1:UBIxjkht+AWIgYzCDSv2GN+E/togfwXUJFRTWhl2Jjs=
|
||||
github.com/go-openapi/jsonreference v0.19.6/go.mod h1:diGHMEHg2IqXZGKxqyvWdfWU/aim5Dprw5bqpKkTvns=
|
||||
github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
|
||||
github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ=
|
||||
@@ -303,6 +313,7 @@ github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEW
|
||||
github.com/golangplus/testing v0.0.0-20180327235837-af21d9c3145e/go.mod h1:0AA//k/eakGydO4jKRoRL2j92ZKSzTgj9tclaCrvXHk=
|
||||
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||
github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4=
|
||||
github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA=
|
||||
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
@@ -344,6 +355,7 @@ github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLe
|
||||
github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||
github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
|
||||
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4=
|
||||
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ=
|
||||
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
@@ -363,6 +375,7 @@ github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoA
|
||||
github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc=
|
||||
github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
|
||||
github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 h1:+ngKgrYPPJrOjhax5N+uePQ0Fh1Z7PheYoUI/0nzkPA=
|
||||
github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
|
||||
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
|
||||
@@ -410,7 +423,9 @@ github.com/iancoleman/strcase v0.2.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47
|
||||
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
|
||||
github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
|
||||
github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
|
||||
github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU=
|
||||
github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
|
||||
github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM=
|
||||
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
|
||||
github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
|
||||
github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
|
||||
@@ -451,6 +466,7 @@ github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII=
|
||||
github.com/leodido/go-urn v1.2.1 h1:BqpAaACuzVSgi/VLzGZIobT2z4v53pjosyNd9Yv6n/w=
|
||||
github.com/leodido/go-urn v1.2.1/go.mod h1:zt4jvISO2HfUBqxjfIshjdMTYS56ZS/qv49ictyFfxY=
|
||||
github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de h1:9TO3cAIGXtEhnIaL+V+BEER86oLrvS+kWobKpbJuye0=
|
||||
github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9B/r2mtpb6U+EI2rYA5OAXxsYw6wTamcNW+zcE=
|
||||
github.com/lithammer/dedent v1.1.0/go.mod h1:jrXYCQtgg0nJiN+StA2KgR7w6CiQNv9Fd/Z9BP0jIOc=
|
||||
github.com/lyft/protoc-gen-star v0.5.3/go.mod h1:V0xaHgaf5oCCqmcxYcWiDfTiKsZsRc87/1qhoTACD8w=
|
||||
@@ -486,6 +502,7 @@ github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrk
|
||||
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
|
||||
github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI=
|
||||
github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo=
|
||||
github.com/mitchellh/go-wordwrap v1.0.1 h1:TLuKupo69TCn6TQSyGxwI1EblZZEsQ0vMlAFQflz0v0=
|
||||
github.com/mitchellh/go-wordwrap v1.0.1/go.mod h1:R62XHJLzvMFRBbcrT7m7WgmE1eOyTSsCt+hzestvNj0=
|
||||
github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg=
|
||||
github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY=
|
||||
@@ -493,8 +510,10 @@ github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:F
|
||||
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
|
||||
github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
||||
github.com/mitchellh/mapstructure v1.4.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
||||
github.com/moby/spdystream v0.2.0 h1:cjW1zVyyoiM0T7b6UoySUFqzXMoqRckQtXwGPiBhOM8=
|
||||
github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c=
|
||||
github.com/moby/term v0.0.0-20210610120745-9d4ed1856297/go.mod h1:vgPCkQMyxTZ7IDy8SXRufE172gr8+K/JE/7hHFxHW3A=
|
||||
github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 h1:dcztxKSvZ4Id8iPpHERQBbIJfabdt4wUm5qy3wOL2Zc=
|
||||
github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6/go.mod h1:E2VnQOmVuvZB6UYnnDB0qG5Nq/1tD9acaOpo6xmt0Kw=
|
||||
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
|
||||
@@ -503,10 +522,12 @@ github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lN
|
||||
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||
github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
|
||||
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
|
||||
github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 h1:n6/2gBQ3RWajuToeY6ZtZTIKv2v7ThUy5KKusIT0yc0=
|
||||
github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00/go.mod h1:Pm3mSP3c5uWn86xMLZ5Sa7JB9GsEZySvHYXCTK4E9q4=
|
||||
github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
|
||||
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
||||
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
||||
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus=
|
||||
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw=
|
||||
github.com/nav-inc/datetime v0.1.3 h1:PaybPUsScX+Cd3TEa1tYpfwU61deCEhMTlCO2hONm1c=
|
||||
github.com/nav-inc/datetime v0.1.3/go.mod h1:gKGf5G+cW7qkTo5TC/sieNyz6lYdrA9cf1PNV+pXIOE=
|
||||
@@ -538,6 +559,7 @@ github.com/patrickmn/go-cache v2.1.0+incompatible/go.mod h1:3Qf8kWWT7OJRJbdiICTK
|
||||
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
|
||||
github.com/pelletier/go-toml v1.9.3/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c=
|
||||
github.com/pelletier/go-toml v1.9.4/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c=
|
||||
github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI=
|
||||
github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU=
|
||||
github.com/pierrec/lz4 v2.6.0+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
|
||||
github.com/pierrec/lz4 v2.6.1+incompatible h1:9UY3+iC23yxF0UfGaYrGplQ+79Rg+h/q9FV9ix19jjM=
|
||||
@@ -583,6 +605,7 @@ github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTE
|
||||
github.com/rogpeppe/go-internal v1.8.0 h1:FCbCCtXNOY3UtUuHUYaghJg4y7Fd14rXifAYUAtL9R8=
|
||||
github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6poM+XZ2dLUbcbE=
|
||||
github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
|
||||
github.com/russross/blackfriday v1.6.0 h1:KqfZb0pUVN2lYqZUYRddxF4OR8ZMURnJIG5Y3VRLtww=
|
||||
github.com/russross/blackfriday v1.6.0/go.mod h1:ti0ldHuxg49ri4ksnFxlkCfN+hvslNlmVHqNRXXJNAY=
|
||||
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
@@ -593,6 +616,7 @@ github.com/santhosh-tekuri/jsonschema/v5 v5.0.0/go.mod h1:FKdcjfQW6rpZSnxxUvEA5H
|
||||
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
|
||||
github.com/segmentio/kafka-go v0.4.27 h1:sIhEozeL/TLN2mZ5dkG462vcGEWYKS+u31sXPjKhAM4=
|
||||
github.com/segmentio/kafka-go v0.4.27/go.mod h1:XzMcoMjSzDGHcIwpWUI7GB43iKZ2fTVmryPSGLf/MPg=
|
||||
github.com/sergi/go-diff v1.1.0 h1:we8PVUC3FE2uYfodKH/nBHMSetSfHDR6scGdBi+erh0=
|
||||
github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM=
|
||||
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
|
||||
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
||||
@@ -611,6 +635,7 @@ github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkU
|
||||
github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
|
||||
github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE=
|
||||
github.com/spf13/cobra v1.2.1/go.mod h1:ExllRjgxM/piMAM+3tAZvg8fsklGAf3tPfi+i8t68Nk=
|
||||
github.com/spf13/cobra v1.3.0 h1:R7cSvGu+Vv+qX0gW5R/85dx2kmmJT5z5NM8ifdYjdn0=
|
||||
github.com/spf13/cobra v1.3.0/go.mod h1:BrRVncBjOJa/eUcVVm9CE+oC6as8k+VYr4NY7WCi9V4=
|
||||
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
|
||||
github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo=
|
||||
@@ -624,6 +649,7 @@ github.com/spf13/viper v1.10.0/go.mod h1:SoyBPwAtKDzypXNDFKN5kzH7ppppbGZtls1UpIy
|
||||
github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.2.0 h1:Hbg2NidpLE8veEBkEZTL3CvlkUIVzuU9jDplZO54c48=
|
||||
github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=
|
||||
github.com/stretchr/testify v1.2.1/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
@@ -655,8 +681,8 @@ github.com/ugorji/go v1.2.6/go.mod h1:anCg0y61KIhDlPZmnH+so+RQbysYVyDko0IMgJv0Nn
|
||||
github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY=
|
||||
github.com/ugorji/go/codec v1.2.6 h1:7kbGefxLoDBuYXOms4yD7223OpNMMPNPZxXk5TvFcyQ=
|
||||
github.com/ugorji/go/codec v1.2.6/go.mod h1:V6TCNZ4PHqoHGFZuSG1W8nrCzzdgA2DozYxWFFpvxTw=
|
||||
github.com/up9inc/basenine/client/go v0.0.0-20220315070758-3a76cfc4378e h1:/9dFXqvRDHcwPQdIGHP6iz6M0iAWBPOxYf6C+Ntq5w0=
|
||||
github.com/up9inc/basenine/client/go v0.0.0-20220315070758-3a76cfc4378e/go.mod h1:SvJGPoa/6erhUQV7kvHBwM/0x5LyO6XaG2lUaCaKiUI=
|
||||
github.com/up9inc/basenine/client/go v0.0.0-20220326121918-785f3061c8ce h1:vMTCpKItc9OyTLJXocNaq2NcBU5EnurJgTVOYb8W8dw=
|
||||
github.com/up9inc/basenine/client/go v0.0.0-20220326121918-785f3061c8ce/go.mod h1:SvJGPoa/6erhUQV7kvHBwM/0x5LyO6XaG2lUaCaKiUI=
|
||||
github.com/vishvananda/netns v0.0.0-20211101163701-50045581ed74 h1:gga7acRE695APm9hlsSMoOoE65U4/TcqNj90mc69Rlg=
|
||||
github.com/vishvananda/netns v0.0.0-20211101163701-50045581ed74/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0=
|
||||
github.com/wI2L/jsondiff v0.1.1 h1:r2TkoEet7E4JMO5+s1RCY2R0LrNPNHY6hbDeow2hRHw=
|
||||
@@ -665,6 +691,7 @@ github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c/go.mod h1:lB8K/P019DLNhe
|
||||
github.com/xdg/stringprep v1.0.0/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y=
|
||||
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
|
||||
github.com/xlab/treeprint v0.0.0-20181112141820-a009c3971eca/go.mod h1:ce1O1j6UtZfjr22oyGxGLbauSBp2YVXpARAosm7dHBg=
|
||||
github.com/xlab/treeprint v1.1.0 h1:G/1DjNkPpfZCFt9CSh6b5/nY4VimlbHF3Rh4obvtzDk=
|
||||
github.com/xlab/treeprint v1.1.0/go.mod h1:gj5Gd3gPdKtR1ikdDK6fnFLdmIS0X30kTTuNd/WEJu0=
|
||||
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
|
||||
github.com/yalp/jsonpath v0.0.0-20180802001716-5cc68e5049a0 h1:6fRhSjgLCkTD3JnJxvaJ4Sj+TYblw757bqYgZaOq5ZY=
|
||||
@@ -701,6 +728,7 @@ go.opentelemetry.io/otel/sdk/metric v0.20.0/go.mod h1:knxiS8Xd4E/N+ZqKmUPf3gTTZ4
|
||||
go.opentelemetry.io/otel/trace v0.20.0/go.mod h1:6GjCW8zgDjwGHGa6GkyeB8+/5vjT16gUEi0Nf1iBdgw=
|
||||
go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=
|
||||
go.starlark.net v0.0.0-20200306205701-8dd3e2ee1dd5/go.mod h1:nmDLcffg48OtT/PSW0Hg7FvpRQsQh5OSqIylirxKC7o=
|
||||
go.starlark.net v0.0.0-20220203230714-bb14e151c28f h1:aW4TkS39/naJa9wPSbIXtZUQOlvuUh8gxCsLRrJoByU=
|
||||
go.starlark.net v0.0.0-20220203230714-bb14e151c28f/go.mod h1:t3mmBBPzAVvK0L0n1drDmrQsJ8FoIx4INCqVMTr/Zo0=
|
||||
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
||||
go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
|
||||
@@ -1205,6 +1233,7 @@ gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C
|
||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo=
|
||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk=
|
||||
gotest.tools/v3 v3.0.3 h1:4AuOwCGf4lLR9u3YOe2awrHygurzhO/HeQ6laiA6Sx0=
|
||||
gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8=
|
||||
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
@@ -1217,10 +1246,12 @@ k8s.io/api v0.23.3 h1:KNrME8KHGr12Ozjf8ytOewKzZh6hl/hHUZeHddT3a38=
|
||||
k8s.io/api v0.23.3/go.mod h1:w258XdGyvCmnBj/vGzQMj6kzdufJZVUwEM1U2fRJwSQ=
|
||||
k8s.io/apimachinery v0.23.3 h1:7IW6jxNzrXTsP0c8yXz2E5Yx/WTzVPTsHIx/2Vm0cIk=
|
||||
k8s.io/apimachinery v0.23.3/go.mod h1:BEuFMMBaIbcOqVIJqNZJXGFTP4W6AycEpb5+m/97hrM=
|
||||
k8s.io/cli-runtime v0.23.3 h1:aJiediw+uUbxkfO6BNulcAMTUoU9Om43g3R7rIkYqcw=
|
||||
k8s.io/cli-runtime v0.23.3/go.mod h1:yA00O5pDqnjkBh8fkuugBbfIfjB1nOpz+aYLotbnOfc=
|
||||
k8s.io/client-go v0.23.3 h1:23QYUmCQ/W6hW78xIwm3XqZrrKZM+LWDqW2zfo+szJs=
|
||||
k8s.io/client-go v0.23.3/go.mod h1:47oMd+YvAOqZM7pcQ6neJtBiFH7alOyfunYN48VsmwE=
|
||||
k8s.io/code-generator v0.23.3/go.mod h1:S0Q1JVA+kSzTI1oUvbKAxZY/DYbA/ZUb4Uknog12ETk=
|
||||
k8s.io/component-base v0.23.3 h1:q+epprVdylgecijVGVdf4MbizEL2feW4ssd7cdo6LVY=
|
||||
k8s.io/component-base v0.23.3/go.mod h1:1Smc4C60rWG7d3HjSYpIwEbySQ3YWg0uzH5a2AtaTLg=
|
||||
k8s.io/component-helpers v0.23.3/go.mod h1:SH+W/WPTaTenbWyDEeY7iytAQiMh45aqKxkvlqQ57cg=
|
||||
k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
|
||||
@@ -1234,6 +1265,7 @@ k8s.io/kube-openapi v0.0.0-20210421082810-95288971da7e/go.mod h1:vHXdDvt9+2spS2R
|
||||
k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65/go.mod h1:sX9MT8g7NVZM5lVL/j8QyCCJe8YSMW30QvGZWaCIDIk=
|
||||
k8s.io/kube-openapi v0.0.0-20220124234850-424119656bbf h1:M9XBsiMslw2lb2ZzglC0TOkBPK5NQi0/noUrdnoFwUg=
|
||||
k8s.io/kube-openapi v0.0.0-20220124234850-424119656bbf/go.mod h1:sX9MT8g7NVZM5lVL/j8QyCCJe8YSMW30QvGZWaCIDIk=
|
||||
k8s.io/kubectl v0.23.3 h1:gJsF7cahkWDPYlNvYKK+OrBZLAJUBzCym+Zsi+dfi1E=
|
||||
k8s.io/kubectl v0.23.3/go.mod h1:VBeeXNgLhSabu4/k0O7Q0YujgnA3+CLTUE0RcmF73yY=
|
||||
k8s.io/metrics v0.23.3/go.mod h1:Ut8TvkbsO4oMVeUzaTArvPrcw9QRFLs2XNzUlORjdYE=
|
||||
k8s.io/utils v0.0.0-20210802155522-efc7438f0176/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
|
||||
@@ -1247,10 +1279,12 @@ sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6/go.mod h1:p4QtZmO4uMYipTQNza
|
||||
sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2 h1:kDi4JBNAsJWfz1aEXhO8Jg87JJaPNLh5tIzYHgStQ9Y=
|
||||
sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2/go.mod h1:B+TnT182UBxE84DiCz4CVE26eOSDAeYCpfDnC2kdKMY=
|
||||
sigs.k8s.io/kustomize/api v0.10.1/go.mod h1:2FigT1QN6xKdcnGS2Ppp1uIWrtWN28Ms8A3OZUZhwr8=
|
||||
sigs.k8s.io/kustomize/api v0.11.1 h1:/Vutu+gAqVo8skw1xCZrsZD39SN4Adg+z7FrSTw9pds=
|
||||
sigs.k8s.io/kustomize/api v0.11.1/go.mod h1:GZuhith5YcqxIDe0GnRJNx5xxPTjlwaLTt/e+ChUtJA=
|
||||
sigs.k8s.io/kustomize/cmd/config v0.10.2/go.mod h1:K2aW7nXJ0AaT+VA/eO0/dzFLxmpFcTzudmAgDwPY1HQ=
|
||||
sigs.k8s.io/kustomize/kustomize/v4 v4.4.1/go.mod h1:qOKJMMz2mBP+vcS7vK+mNz4HBLjaQSWRY22EF6Tb7Io=
|
||||
sigs.k8s.io/kustomize/kyaml v0.13.0/go.mod h1:FTJxEZ86ScK184NpGSAQcfEqee0nul8oLCK30D47m4E=
|
||||
sigs.k8s.io/kustomize/kyaml v0.13.3 h1:tNNQIC+8cc+aXFTVg+RtQAOsjwUdYBZRAgYOVI3RBc4=
|
||||
sigs.k8s.io/kustomize/kyaml v0.13.3/go.mod h1:/ya3Gk4diiQzlE4mBh7wykyLRFZNvqlbh+JnwQ9Vhrc=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.2.1 h1:bKCqE9GvQ5tiVHn5rfn1r+yao3aLQEaLzkkmAkf+A6Y=
|
||||
|
||||
@@ -30,8 +30,6 @@ import (
|
||||
"github.com/up9inc/mizu/agent/pkg/app"
|
||||
"github.com/up9inc/mizu/agent/pkg/config"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
|
||||
"github.com/gorilla/websocket"
|
||||
"github.com/op/go-logging"
|
||||
"github.com/up9inc/mizu/shared"
|
||||
@@ -47,7 +45,6 @@ var apiServerAddress = flag.String("api-server-address", "", "Address of mizu AP
|
||||
var namespace = flag.String("namespace", "", "Resolve IPs if they belong to resources in this namespace (default is all)")
|
||||
var harsReaderMode = flag.Bool("hars-read", false, "Run in hars-read mode")
|
||||
var harsDir = flag.String("hars-dir", "", "Directory to read hars from")
|
||||
var startTime int64
|
||||
|
||||
const (
|
||||
socketConnectionRetries = 30
|
||||
@@ -110,7 +107,7 @@ func hostApi(socketHarOutputChannel chan<- *tapApi.OutputChannelItem) *gin.Engin
|
||||
|
||||
app.Use(middlewares.CORSMiddleware()) // This has to be called after the static middleware, does not work if its called before
|
||||
|
||||
api.WebSocketRoutes(app, &eventHandlers, startTime)
|
||||
api.WebSocketRoutes(app, &eventHandlers)
|
||||
|
||||
if config.Config.OAS {
|
||||
routes.OASRoutes(app)
|
||||
@@ -124,6 +121,7 @@ func hostApi(socketHarOutputChannel chan<- *tapApi.OutputChannelItem) *gin.Engin
|
||||
routes.EntriesRoutes(app)
|
||||
routes.MetadataRoutes(app)
|
||||
routes.StatusRoutes(app)
|
||||
routes.DbRoutes(app)
|
||||
|
||||
return app
|
||||
}
|
||||
@@ -133,7 +131,6 @@ func runInApiServerMode(namespace string) *gin.Engine {
|
||||
logger.Log.Fatalf("Error loading config file %v", err)
|
||||
}
|
||||
app.ConfigureBasenineServer(shared.BasenineHost, shared.BaseninePort, config.Config.MaxDBSizeBytes, config.Config.LogLevel, config.Config.InsertionFilter)
|
||||
startTime = time.Now().UnixNano() / int64(time.Millisecond)
|
||||
api.StartResolving(namespace)
|
||||
|
||||
enableExpFeatureIfNeeded()
|
||||
@@ -156,11 +153,6 @@ func runInTapperMode() {
|
||||
|
||||
hostMode := os.Getenv(shared.HostModeEnvVar) == "1"
|
||||
tapOpts := &tap.TapOpts{HostMode: hostMode}
|
||||
tapTargets := getTapTargets()
|
||||
if tapTargets != nil {
|
||||
tapOpts.FilterAuthorities = tapTargets
|
||||
logger.Log.Infof("Filtering for the following authorities: %v", tapOpts.FilterAuthorities)
|
||||
}
|
||||
|
||||
filteredOutputItemsChannel := make(chan *tapApi.OutputChannelItem)
|
||||
|
||||
@@ -258,28 +250,6 @@ func setUIFlags(uiIndexPath string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func parseEnvVar(env string) map[string][]v1.Pod {
|
||||
var mapOfList map[string][]v1.Pod
|
||||
|
||||
val, present := os.LookupEnv(env)
|
||||
|
||||
if !present {
|
||||
return mapOfList
|
||||
}
|
||||
|
||||
err := json.Unmarshal([]byte(val), &mapOfList)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("env var %s's value of %v is invalid! must be map[string][]v1.Pod %v", env, mapOfList, err))
|
||||
}
|
||||
return mapOfList
|
||||
}
|
||||
|
||||
func getTapTargets() []v1.Pod {
|
||||
nodeName := os.Getenv(shared.NodeNameEnvVar)
|
||||
tappedAddressesPerNodeDict := parseEnvVar(shared.TappedAddressesPerNodeDictEnvVar)
|
||||
return tappedAddressesPerNodeDict[nodeName]
|
||||
}
|
||||
|
||||
func getTrafficFilteringOptions() *tapApi.TrafficFilteringOptions {
|
||||
filteringOptionsJson := os.Getenv(shared.MizuFilteringOptionsEnvVar)
|
||||
if filteringOptionsJson == "" {
|
||||
@@ -382,6 +352,14 @@ func handleIncomingMessageAsTapper(socketConnection *websocket.Conn) {
|
||||
} else {
|
||||
tap.UpdateTapTargets(tapConfigMessage.TapTargets)
|
||||
}
|
||||
case shared.WebSocketMessageTypeUpdateTappedPods:
|
||||
var tappedPodsMessage shared.WebSocketTappedPodsMessage
|
||||
if err := json.Unmarshal(message, &tappedPodsMessage); err != nil {
|
||||
logger.Log.Infof("Could not unmarshal message of message type %s %v", socketMessageBase.MessageType, err)
|
||||
return
|
||||
}
|
||||
nodeName := os.Getenv(shared.NodeNameEnvVar)
|
||||
tap.UpdateTapTargets(tappedPodsMessage.NodeToTappedPodMap[nodeName])
|
||||
default:
|
||||
logger.Log.Warningf("Received socket message of type %s for which no handlers are defined", socketMessageBase.MessageType)
|
||||
}
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/up9inc/mizu/agent/pkg/models"
|
||||
"github.com/up9inc/mizu/agent/pkg/utils"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/gorilla/websocket"
|
||||
@@ -59,13 +60,13 @@ func init() {
|
||||
connectedWebsockets = make(map[int]*SocketConnection)
|
||||
}
|
||||
|
||||
func WebSocketRoutes(app *gin.Engine, eventHandlers EventHandlers, startTime int64) {
|
||||
func WebSocketRoutes(app *gin.Engine, eventHandlers EventHandlers) {
|
||||
SocketGetBrowserHandler = func(c *gin.Context) {
|
||||
websocketHandler(c.Writer, c.Request, eventHandlers, false, startTime)
|
||||
websocketHandler(c.Writer, c.Request, eventHandlers, false)
|
||||
}
|
||||
|
||||
SocketGetTapperHandler = func(c *gin.Context) {
|
||||
websocketHandler(c.Writer, c.Request, eventHandlers, true, startTime)
|
||||
websocketHandler(c.Writer, c.Request, eventHandlers, true)
|
||||
}
|
||||
|
||||
app.GET("/ws", func(c *gin.Context) {
|
||||
@@ -77,7 +78,7 @@ func WebSocketRoutes(app *gin.Engine, eventHandlers EventHandlers, startTime int
|
||||
})
|
||||
}
|
||||
|
||||
func websocketHandler(w http.ResponseWriter, r *http.Request, eventHandlers EventHandlers, isTapper bool, startTime int64) {
|
||||
func websocketHandler(w http.ResponseWriter, r *http.Request, eventHandlers EventHandlers, isTapper bool) {
|
||||
ws, err := websocketUpgrader.Upgrade(w, r, nil)
|
||||
if err != nil {
|
||||
logger.Log.Errorf("Failed to set websocket upgrade: %v", err)
|
||||
@@ -99,7 +100,9 @@ func websocketHandler(w http.ResponseWriter, r *http.Request, eventHandlers Even
|
||||
if !isTapper {
|
||||
connection, err = basenine.NewConnection(shared.BasenineHost, shared.BaseninePort)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
logger.Log.Errorf("Failed to establish a connection to Basenine: %v", err)
|
||||
socketCleanup(socketId, connectedWebsockets[socketId])
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
@@ -115,7 +118,7 @@ func websocketHandler(w http.ResponseWriter, r *http.Request, eventHandlers Even
|
||||
|
||||
eventHandlers.WebSocketConnect(socketId, isTapper)
|
||||
|
||||
startTimeBytes, _ := models.CreateWebsocketStartTimeMessage(startTime)
|
||||
startTimeBytes, _ := models.CreateWebsocketStartTimeMessage(utils.StartTime)
|
||||
|
||||
if err = SendToSocket(socketId, startTimeBytes); err != nil {
|
||||
logger.Log.Error(err)
|
||||
@@ -137,7 +140,8 @@ func websocketHandler(w http.ResponseWriter, r *http.Request, eventHandlers Even
|
||||
|
||||
if !isTapper && !isQuerySet {
|
||||
if err := json.Unmarshal(msg, ¶ms); err != nil {
|
||||
logger.Log.Errorf("Error: %v", socketId, err)
|
||||
logger.Log.Errorf("Error unmarshalling parameters: %v", socketId, err)
|
||||
continue
|
||||
}
|
||||
|
||||
query := params.Query
|
||||
@@ -166,6 +170,10 @@ func websocketHandler(w http.ResponseWriter, r *http.Request, eventHandlers Even
|
||||
|
||||
var entry *tapApi.Entry
|
||||
err = json.Unmarshal(bytes, &entry)
|
||||
if err != nil {
|
||||
logger.Log.Debugf("Error unmarshalling entry: %v", err.Error())
|
||||
continue
|
||||
}
|
||||
|
||||
var message []byte
|
||||
if params.EnableFullEntries {
|
||||
@@ -193,7 +201,8 @@ func websocketHandler(w http.ResponseWriter, r *http.Request, eventHandlers Even
|
||||
var metadata *basenine.Metadata
|
||||
err = json.Unmarshal(bytes, &metadata)
|
||||
if err != nil {
|
||||
logger.Log.Debugf("Error recieving metadata: %v", err.Error())
|
||||
logger.Log.Debugf("Error unmarshalling metadata: %v", err.Error())
|
||||
continue
|
||||
}
|
||||
|
||||
metadataBytes, _ := models.CreateWebsocketQueryMetadataMessage(metadata)
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
|
||||
"github.com/up9inc/mizu/agent/pkg/models"
|
||||
"github.com/up9inc/mizu/agent/pkg/providers"
|
||||
"github.com/up9inc/mizu/agent/pkg/providers/tappedPods"
|
||||
"github.com/up9inc/mizu/agent/pkg/providers/tappers"
|
||||
"github.com/up9inc/mizu/agent/pkg/up9"
|
||||
|
||||
@@ -17,6 +18,7 @@ import (
|
||||
)
|
||||
|
||||
var browserClientSocketUUIDs = make([]int, 0)
|
||||
var tapperClientSocketUUIDs = make([]int, 0)
|
||||
var socketListLock = sync.Mutex{}
|
||||
|
||||
type RoutesEventHandlers struct {
|
||||
@@ -32,6 +34,13 @@ func (h *RoutesEventHandlers) WebSocketConnect(socketId int, isTapper bool) {
|
||||
if isTapper {
|
||||
logger.Log.Infof("Websocket event - Tapper connected, socket ID: %d", socketId)
|
||||
tappers.Connected()
|
||||
|
||||
socketListLock.Lock()
|
||||
tapperClientSocketUUIDs = append(tapperClientSocketUUIDs, socketId)
|
||||
socketListLock.Unlock()
|
||||
|
||||
nodeToTappedPodMap := tappedPods.GetNodeToTappedPodMap()
|
||||
SendTappedPods(socketId, nodeToTappedPodMap)
|
||||
} else {
|
||||
logger.Log.Infof("Websocket event - Browser socket connected, socket ID: %d", socketId)
|
||||
|
||||
@@ -47,6 +56,10 @@ func (h *RoutesEventHandlers) WebSocketDisconnect(socketId int, isTapper bool) {
|
||||
if isTapper {
|
||||
logger.Log.Infof("Websocket event - Tapper disconnected, socket ID: %d", socketId)
|
||||
tappers.Disconnected()
|
||||
|
||||
socketListLock.Lock()
|
||||
removeSocketUUIDFromTapperSlice(socketId)
|
||||
socketListLock.Unlock()
|
||||
} else {
|
||||
logger.Log.Infof("Websocket event - Browser socket disconnected, socket ID: %d", socketId)
|
||||
socketListLock.Lock()
|
||||
@@ -65,6 +78,16 @@ func BroadcastToBrowserClients(message []byte) {
|
||||
}
|
||||
}
|
||||
|
||||
func BroadcastToTapperClients(message []byte) {
|
||||
for _, socketId := range tapperClientSocketUUIDs {
|
||||
go func(socketId int) {
|
||||
if err := SendToSocket(socketId, message); err != nil {
|
||||
logger.Log.Error(err)
|
||||
}
|
||||
}(socketId)
|
||||
}
|
||||
}
|
||||
|
||||
func (h *RoutesEventHandlers) WebSocketMessage(_ int, message []byte) {
|
||||
var socketMessageBase shared.WebSocketMessageMetadata
|
||||
err := json.Unmarshal(message, &socketMessageBase)
|
||||
@@ -135,3 +158,13 @@ func removeSocketUUIDFromBrowserSlice(uuidToRemove int) {
|
||||
}
|
||||
browserClientSocketUUIDs = newUUIDSlice
|
||||
}
|
||||
|
||||
func removeSocketUUIDFromTapperSlice(uuidToRemove int) {
|
||||
newUUIDSlice := make([]int, 0, len(tapperClientSocketUUIDs))
|
||||
for _, uuid := range tapperClientSocketUUIDs {
|
||||
if uuid != uuidToRemove {
|
||||
newUUIDSlice = append(newUUIDSlice, uuid)
|
||||
}
|
||||
}
|
||||
tapperClientSocketUUIDs = newUUIDSlice
|
||||
}
|
||||
|
||||
@@ -18,3 +18,23 @@ func BroadcastTappedPodsStatus() {
|
||||
BroadcastToBrowserClients(jsonBytes)
|
||||
}
|
||||
}
|
||||
|
||||
func SendTappedPods(socketId int, nodeToTappedPodMap shared.NodeToPodsMap) {
|
||||
message := shared.CreateWebSocketTappedPodsMessage(nodeToTappedPodMap)
|
||||
if jsonBytes, err := json.Marshal(message); err != nil {
|
||||
logger.Log.Errorf("Could not Marshal message %v", err)
|
||||
} else {
|
||||
if err := SendToSocket(socketId, jsonBytes); err != nil {
|
||||
logger.Log.Error(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BroadcastTappedPodsToTappers(nodeToTappedPodMap shared.NodeToPodsMap) {
|
||||
message := shared.CreateWebSocketTappedPodsMessage(nodeToTappedPodMap)
|
||||
if jsonBytes, err := json.Marshal(message); err != nil {
|
||||
logger.Log.Errorf("Could not Marshal message %v", err)
|
||||
} else {
|
||||
BroadcastToTapperClients(jsonBytes)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -9,7 +9,7 @@ import (
|
||||
"github.com/op/go-logging"
|
||||
basenine "github.com/up9inc/basenine/client/go"
|
||||
"github.com/up9inc/mizu/agent/pkg/api"
|
||||
"github.com/up9inc/mizu/agent/pkg/controllers"
|
||||
"github.com/up9inc/mizu/agent/pkg/utils"
|
||||
"github.com/up9inc/mizu/shared/logger"
|
||||
tapApi "github.com/up9inc/mizu/tap/api"
|
||||
amqpExt "github.com/up9inc/mizu/tap/extensions/amqp"
|
||||
@@ -59,7 +59,6 @@ func LoadExtensions() {
|
||||
return Extensions[i].Protocol.Priority < Extensions[j].Protocol.Priority
|
||||
})
|
||||
|
||||
controllers.InitExtensionsMap(ExtensionsMap)
|
||||
api.InitExtensionsMap(ExtensionsMap)
|
||||
}
|
||||
|
||||
@@ -92,6 +91,8 @@ func ConfigureBasenineServer(host string, port string, dbSize int64, logLevel lo
|
||||
if err := basenine.InsertionFilter(host, port, insertionFilter); err != nil {
|
||||
logger.Log.Errorf("Error while setting the insertion filter: %v", err)
|
||||
}
|
||||
|
||||
utils.StartTime = time.Now().UnixNano() / int64(time.Millisecond)
|
||||
}
|
||||
|
||||
func GetEntryInputChannel() chan *tapApi.OutputChannelItem {
|
||||
|
||||
28
agent/pkg/controllers/db_controller.go
Normal file
28
agent/pkg/controllers/db_controller.go
Normal file
@@ -0,0 +1,28 @@
|
||||
package controllers
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
basenine "github.com/up9inc/basenine/client/go"
|
||||
"github.com/up9inc/mizu/agent/pkg/app"
|
||||
"github.com/up9inc/mizu/agent/pkg/config"
|
||||
"github.com/up9inc/mizu/shared"
|
||||
)
|
||||
|
||||
func Flush(c *gin.Context) {
|
||||
if err := basenine.Flush(shared.BasenineHost, shared.BaseninePort); err != nil {
|
||||
c.JSON(http.StatusBadRequest, err)
|
||||
} else {
|
||||
c.JSON(http.StatusOK, "Flushed.")
|
||||
}
|
||||
}
|
||||
|
||||
func Reset(c *gin.Context) {
|
||||
if err := basenine.Reset(shared.BasenineHost, shared.BaseninePort); err != nil {
|
||||
c.JSON(http.StatusBadRequest, err)
|
||||
} else {
|
||||
app.ConfigureBasenineServer(shared.BasenineHost, shared.BaseninePort, config.Config.MaxDBSizeBytes, config.Config.LogLevel, config.Config.InsertionFilter)
|
||||
c.JSON(http.StatusOK, "Resetted.")
|
||||
}
|
||||
}
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/up9inc/mizu/agent/pkg/app"
|
||||
"github.com/up9inc/mizu/agent/pkg/har"
|
||||
"github.com/up9inc/mizu/agent/pkg/models"
|
||||
"github.com/up9inc/mizu/agent/pkg/validation"
|
||||
@@ -18,12 +19,6 @@ import (
|
||||
tapApi "github.com/up9inc/mizu/tap/api"
|
||||
)
|
||||
|
||||
var extensionsMap map[string]*tapApi.Extension // global
|
||||
|
||||
func InitExtensionsMap(ref map[string]*tapApi.Extension) {
|
||||
extensionsMap = ref
|
||||
}
|
||||
|
||||
func Error(c *gin.Context, err error) bool {
|
||||
if err != nil {
|
||||
logger.Log.Errorf("Error getting entry: %v", err)
|
||||
@@ -77,7 +72,7 @@ func GetEntries(c *gin.Context) {
|
||||
return // exit
|
||||
}
|
||||
|
||||
extension := extensionsMap[entry.Protocol.Name]
|
||||
extension := app.ExtensionsMap[entry.Protocol.Name]
|
||||
base := extension.Dissector.Summarize(entry)
|
||||
|
||||
dataSlice = append(dataSlice, base)
|
||||
@@ -123,9 +118,19 @@ func GetEntry(c *gin.Context) {
|
||||
return // exit
|
||||
}
|
||||
|
||||
extension := extensionsMap[entry.Protocol.Name]
|
||||
extension := app.ExtensionsMap[entry.Protocol.Name]
|
||||
base := extension.Dissector.Summarize(entry)
|
||||
representation, bodySize, _ := extension.Dissector.Represent(entry.Request, entry.Response)
|
||||
var representation []byte
|
||||
representation, err = extension.Dissector.Represent(entry.Request, entry.Response)
|
||||
if err != nil {
|
||||
c.JSON(http.StatusNotFound, gin.H{
|
||||
"error": true,
|
||||
"type": "error",
|
||||
"autoClose": "5000",
|
||||
"msg": err.Error(),
|
||||
})
|
||||
return // exit
|
||||
}
|
||||
|
||||
var rules []map[string]interface{}
|
||||
var isRulesEnabled bool
|
||||
@@ -142,7 +147,6 @@ func GetEntry(c *gin.Context) {
|
||||
c.JSON(http.StatusOK, tapApi.EntryWrapper{
|
||||
Protocol: entry.Protocol,
|
||||
Representation: string(representation),
|
||||
BodySize: bodySize,
|
||||
Data: entry,
|
||||
Base: base,
|
||||
Rules: rules,
|
||||
|
||||
@@ -2,6 +2,7 @@ package controllers
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
core "k8s.io/api/core/v1"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/up9inc/mizu/agent/pkg/api"
|
||||
@@ -12,6 +13,7 @@ import (
|
||||
"github.com/up9inc/mizu/agent/pkg/up9"
|
||||
"github.com/up9inc/mizu/agent/pkg/validation"
|
||||
"github.com/up9inc/mizu/shared"
|
||||
"github.com/up9inc/mizu/shared/kubernetes"
|
||||
"github.com/up9inc/mizu/shared/logger"
|
||||
)
|
||||
|
||||
@@ -30,15 +32,21 @@ func HealthCheck(c *gin.Context) {
|
||||
}
|
||||
|
||||
func PostTappedPods(c *gin.Context) {
|
||||
var requestTappedPods []*shared.PodInfo
|
||||
var requestTappedPods []core.Pod
|
||||
if err := c.Bind(&requestTappedPods); err != nil {
|
||||
c.JSON(http.StatusBadRequest, err)
|
||||
return
|
||||
}
|
||||
|
||||
podInfos := kubernetes.GetPodInfosForPods(requestTappedPods)
|
||||
|
||||
logger.Log.Infof("[Status] POST request: %d tapped pods", len(requestTappedPods))
|
||||
tappedPods.Set(requestTappedPods)
|
||||
tappedPods.Set(podInfos)
|
||||
api.BroadcastTappedPodsStatus()
|
||||
|
||||
nodeToTappedPodMap := kubernetes.GetNodeHostToTappedPodsMap(requestTappedPods)
|
||||
tappedPods.SetNodeToTappedPodMap(nodeToTappedPodMap)
|
||||
api.BroadcastTappedPodsToTappers(nodeToTappedPodMap)
|
||||
}
|
||||
|
||||
func PostTapperStatus(c *gin.Context) {
|
||||
|
||||
@@ -14,9 +14,10 @@ import (
|
||||
const FilePath = shared.DataDirPath + "tapped-pods.json"
|
||||
|
||||
var (
|
||||
lock = &sync.Mutex{}
|
||||
syncOnce sync.Once
|
||||
tappedPods []*shared.PodInfo
|
||||
lock = &sync.Mutex{}
|
||||
syncOnce sync.Once
|
||||
tappedPods []*shared.PodInfo
|
||||
nodeHostToTappedPodsMap shared.NodeToPodsMap
|
||||
)
|
||||
|
||||
func Get() []*shared.PodInfo {
|
||||
@@ -55,3 +56,14 @@ func GetTappedPodsStatus() []shared.TappedPodStatus {
|
||||
|
||||
return tappedPodsStatus
|
||||
}
|
||||
|
||||
func SetNodeToTappedPodMap(nodeToTappedPodsMap shared.NodeToPodsMap) {
|
||||
summary := nodeToTappedPodsMap.Summary()
|
||||
logger.Log.Infof("Setting node to tapped pods map to %v", summary)
|
||||
|
||||
nodeHostToTappedPodsMap = nodeToTappedPodsMap
|
||||
}
|
||||
|
||||
func GetNodeToTappedPodMap() shared.NodeToPodsMap {
|
||||
return nodeHostToTappedPodsMap
|
||||
}
|
||||
|
||||
15
agent/pkg/routes/db_routes.go
Normal file
15
agent/pkg/routes/db_routes.go
Normal file
@@ -0,0 +1,15 @@
|
||||
package routes
|
||||
|
||||
import (
|
||||
"github.com/up9inc/mizu/agent/pkg/controllers"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
)
|
||||
|
||||
// DdRoutes defines the group of database routes.
|
||||
func DbRoutes(app *gin.Engine) {
|
||||
routeGroup := app.Group("/db")
|
||||
|
||||
routeGroup.GET("/flush", controllers.Flush)
|
||||
routeGroup.GET("/reset", controllers.Reset)
|
||||
}
|
||||
@@ -17,6 +17,10 @@ import (
|
||||
"github.com/up9inc/mizu/shared/logger"
|
||||
)
|
||||
|
||||
var (
|
||||
StartTime int64 // global
|
||||
)
|
||||
|
||||
// StartServer starts the server with a graceful shutdown
|
||||
func StartServer(app *gin.Engine) {
|
||||
signals := make(chan os.Signal, 2)
|
||||
|
||||
@@ -10,8 +10,6 @@ import (
|
||||
"net/url"
|
||||
"time"
|
||||
|
||||
"github.com/up9inc/mizu/shared/kubernetes"
|
||||
|
||||
"github.com/up9inc/mizu/cli/config"
|
||||
"github.com/up9inc/mizu/shared"
|
||||
"github.com/up9inc/mizu/shared/logger"
|
||||
@@ -83,15 +81,13 @@ func (provider *Provider) ReportTapperStatus(tapperStatus shared.TapperStatus) e
|
||||
func (provider *Provider) ReportTappedPods(pods []core.Pod) error {
|
||||
tappedPodsUrl := fmt.Sprintf("%s/status/tappedPods", provider.url)
|
||||
|
||||
podInfos := kubernetes.GetPodInfosForPods(pods)
|
||||
|
||||
if jsonValue, err := json.Marshal(podInfos); err != nil {
|
||||
if jsonValue, err := json.Marshal(pods); err != nil {
|
||||
return fmt.Errorf("failed Marshal the tapped pods %w", err)
|
||||
} else {
|
||||
if _, err := utils.Post(tappedPodsUrl, "application/json", bytes.NewBuffer(jsonValue), provider.client); err != nil {
|
||||
return fmt.Errorf("failed sending to API server the tapped pods %w", err)
|
||||
} else {
|
||||
logger.Log.Debugf("Reported to server API about %d taped pods successfully", len(podInfos))
|
||||
logger.Log.Debugf("Reported to server API about %d taped pods successfully", len(pods))
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
@@ -27,4 +27,6 @@ func init() {
|
||||
}
|
||||
|
||||
checkCmd.Flags().Bool(configStructs.PreTapCheckName, defaultCheckConfig.PreTap, "Check pre-tap Mizu installation for potential problems")
|
||||
checkCmd.Flags().Bool(configStructs.PreInstallCheckName, defaultCheckConfig.PreInstall, "Check pre-install Mizu installation for potential problems")
|
||||
checkCmd.Flags().Bool(configStructs.ImagePullCheckName, defaultCheckConfig.ImagePull, "Test connectivity to container image registry by creating and removing a temporary pod in 'default' namespace")
|
||||
}
|
||||
|
||||
102
cli/cmd/check/imagePullInCluster.go
Normal file
102
cli/cmd/check/imagePullInCluster.go
Normal file
@@ -0,0 +1,102 @@
|
||||
package check
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/up9inc/mizu/cli/uiUtils"
|
||||
"github.com/up9inc/mizu/shared/kubernetes"
|
||||
"github.com/up9inc/mizu/shared/logger"
|
||||
core "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"regexp"
|
||||
"time"
|
||||
)
|
||||
|
||||
func ImagePullInCluster(ctx context.Context, kubernetesProvider *kubernetes.Provider) bool {
|
||||
logger.Log.Infof("\nimage-pull-in-cluster\n--------------------")
|
||||
|
||||
namespace := "default"
|
||||
podName := "mizu-test"
|
||||
|
||||
defer func() {
|
||||
if err := kubernetesProvider.RemovePod(ctx, namespace, podName); err != nil {
|
||||
logger.Log.Errorf("%v error while removing test pod in cluster, err: %v", fmt.Sprintf(uiUtils.Red, "✗"), err)
|
||||
}
|
||||
}()
|
||||
|
||||
if err := createImagePullInClusterPod(ctx, kubernetesProvider, namespace, podName); err != nil {
|
||||
logger.Log.Errorf("%v error while creating test pod in cluster, err: %v", fmt.Sprintf(uiUtils.Red, "✗"), err)
|
||||
return false
|
||||
}
|
||||
|
||||
if err := checkImagePulled(ctx, kubernetesProvider, namespace, podName); err != nil {
|
||||
logger.Log.Errorf("%v cluster is not able to pull mizu containers from docker hub, err: %v", fmt.Sprintf(uiUtils.Red, "✗"), err)
|
||||
return false
|
||||
}
|
||||
|
||||
logger.Log.Infof("%v cluster is able to pull mizu containers from docker hub", fmt.Sprintf(uiUtils.Green, "√"))
|
||||
return true
|
||||
}
|
||||
|
||||
func checkImagePulled(ctx context.Context, kubernetesProvider *kubernetes.Provider, namespace string, podName string) error {
|
||||
podExactRegex := regexp.MustCompile(fmt.Sprintf("^%s$", podName))
|
||||
podWatchHelper := kubernetes.NewPodWatchHelper(kubernetesProvider, podExactRegex)
|
||||
eventChan, errorChan := kubernetes.FilteredWatch(ctx, podWatchHelper, []string{namespace}, podWatchHelper)
|
||||
|
||||
timeAfter := time.After(30 * time.Second)
|
||||
|
||||
for {
|
||||
select {
|
||||
case wEvent, ok := <-eventChan:
|
||||
if !ok {
|
||||
eventChan = nil
|
||||
continue
|
||||
}
|
||||
|
||||
pod, err := wEvent.ToPod()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if pod.Status.Phase == core.PodRunning {
|
||||
return nil
|
||||
}
|
||||
case err, ok := <-errorChan:
|
||||
if !ok {
|
||||
errorChan = nil
|
||||
continue
|
||||
}
|
||||
|
||||
return err
|
||||
case <-timeAfter:
|
||||
return fmt.Errorf("image not pulled in time")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func createImagePullInClusterPod(ctx context.Context, kubernetesProvider *kubernetes.Provider, namespace string, podName string) error {
|
||||
var zero int64
|
||||
pod := &core.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: podName,
|
||||
},
|
||||
Spec: core.PodSpec{
|
||||
Containers: []core.Container{
|
||||
{
|
||||
Name: "probe",
|
||||
Image: "up9inc/busybox",
|
||||
ImagePullPolicy: "Always",
|
||||
Command: []string{"cat"},
|
||||
Stdin: true,
|
||||
},
|
||||
},
|
||||
TerminationGracePeriodSeconds: &zero,
|
||||
},
|
||||
}
|
||||
|
||||
if _, err := kubernetesProvider.CreatePod(ctx, namespace, pod); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
31
cli/cmd/check/kubernetesApi.go
Normal file
31
cli/cmd/check/kubernetesApi.go
Normal file
@@ -0,0 +1,31 @@
|
||||
package check
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/up9inc/mizu/cli/config"
|
||||
"github.com/up9inc/mizu/cli/uiUtils"
|
||||
"github.com/up9inc/mizu/shared/kubernetes"
|
||||
"github.com/up9inc/mizu/shared/logger"
|
||||
"github.com/up9inc/mizu/shared/semver"
|
||||
)
|
||||
|
||||
|
||||
func KubernetesApi() (*kubernetes.Provider, *semver.SemVersion, bool) {
|
||||
logger.Log.Infof("\nkubernetes-api\n--------------------")
|
||||
|
||||
kubernetesProvider, err := kubernetes.NewProvider(config.Config.KubeConfigPath(), config.Config.KubeContext)
|
||||
if err != nil {
|
||||
logger.Log.Errorf("%v can't initialize the client, err: %v", fmt.Sprintf(uiUtils.Red, "✗"), err)
|
||||
return nil, nil, false
|
||||
}
|
||||
logger.Log.Infof("%v can initialize the client", fmt.Sprintf(uiUtils.Green, "√"))
|
||||
|
||||
kubernetesVersion, err := kubernetesProvider.GetKubernetesVersion()
|
||||
if err != nil {
|
||||
logger.Log.Errorf("%v can't query the Kubernetes API, err: %v", fmt.Sprintf(uiUtils.Red, "✗"), err)
|
||||
return nil, nil, false
|
||||
}
|
||||
logger.Log.Infof("%v can query the Kubernetes API", fmt.Sprintf(uiUtils.Green, "√"))
|
||||
|
||||
return kubernetesProvider, kubernetesVersion, true
|
||||
}
|
||||
131
cli/cmd/check/kubernetesPermissions.go
Normal file
131
cli/cmd/check/kubernetesPermissions.go
Normal file
@@ -0,0 +1,131 @@
|
||||
package check
|
||||
|
||||
import (
|
||||
"context"
|
||||
"embed"
|
||||
"fmt"
|
||||
"github.com/up9inc/mizu/cli/bucket"
|
||||
"github.com/up9inc/mizu/cli/config"
|
||||
"github.com/up9inc/mizu/cli/uiUtils"
|
||||
"github.com/up9inc/mizu/shared/kubernetes"
|
||||
"github.com/up9inc/mizu/shared/logger"
|
||||
rbac "k8s.io/api/rbac/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func TapKubernetesPermissions(ctx context.Context, embedFS embed.FS, kubernetesProvider *kubernetes.Provider) bool {
|
||||
logger.Log.Infof("\nkubernetes-permissions\n--------------------")
|
||||
|
||||
var filePath string
|
||||
if config.Config.IsNsRestrictedMode() {
|
||||
filePath = "permissionFiles/permissions-ns-tap.yaml"
|
||||
} else {
|
||||
filePath = "permissionFiles/permissions-all-namespaces-tap.yaml"
|
||||
}
|
||||
|
||||
data, err := embedFS.ReadFile(filePath)
|
||||
if err != nil {
|
||||
logger.Log.Errorf("%v error while checking kubernetes permissions, err: %v", fmt.Sprintf(uiUtils.Red, "✗"), err)
|
||||
return false
|
||||
}
|
||||
|
||||
decode := scheme.Codecs.UniversalDeserializer().Decode
|
||||
obj, _, err := decode(data, nil, nil)
|
||||
if err != nil {
|
||||
logger.Log.Errorf("%v error while checking kubernetes permissions, err: %v", fmt.Sprintf(uiUtils.Red, "✗"), err)
|
||||
return false
|
||||
}
|
||||
|
||||
switch resource := obj.(type) {
|
||||
case *rbac.Role:
|
||||
return checkRulesPermissions(ctx, kubernetesProvider, resource.Rules, config.Config.MizuResourcesNamespace)
|
||||
case *rbac.ClusterRole:
|
||||
return checkRulesPermissions(ctx, kubernetesProvider, resource.Rules, "")
|
||||
}
|
||||
|
||||
logger.Log.Errorf("%v error while checking kubernetes permissions, err: resource of type 'Role' or 'ClusterRole' not found in permission files", fmt.Sprintf(uiUtils.Red, "✗"))
|
||||
return false
|
||||
}
|
||||
|
||||
func InstallKubernetesPermissions(ctx context.Context, kubernetesProvider *kubernetes.Provider) bool {
|
||||
logger.Log.Infof("\nkubernetes-permissions\n--------------------")
|
||||
|
||||
bucketProvider := bucket.NewProvider(config.Config.Install.TemplateUrl, bucket.DefaultTimeout)
|
||||
installTemplate, err := bucketProvider.GetInstallTemplate(config.Config.Install.TemplateName)
|
||||
if err != nil {
|
||||
logger.Log.Errorf("%v error while checking kubernetes permissions, err: %v", fmt.Sprintf(uiUtils.Red, "✗"), err)
|
||||
return false
|
||||
}
|
||||
|
||||
resourcesTemplate := strings.Split(installTemplate, "---")[1:]
|
||||
|
||||
permissionsExist := true
|
||||
|
||||
decode := scheme.Codecs.UniversalDeserializer().Decode
|
||||
for _, resourceTemplate := range resourcesTemplate {
|
||||
obj, _, err := decode([]byte(resourceTemplate), nil, nil)
|
||||
if err != nil {
|
||||
logger.Log.Errorf("%v error while checking kubernetes permissions, err: %v", fmt.Sprintf(uiUtils.Red, "✗"), err)
|
||||
return false
|
||||
}
|
||||
|
||||
groupVersionKind := obj.GetObjectKind().GroupVersionKind()
|
||||
resource := fmt.Sprintf("%vs", strings.ToLower(groupVersionKind.Kind))
|
||||
permissionsExist = checkCreatePermission(ctx, kubernetesProvider, resource, groupVersionKind.Group, obj.(metav1.Object).GetNamespace()) && permissionsExist
|
||||
|
||||
switch resourceObj := obj.(type) {
|
||||
case *rbac.Role:
|
||||
permissionsExist = checkRulesPermissions(ctx, kubernetesProvider, resourceObj.Rules, resourceObj.Namespace) && permissionsExist
|
||||
case *rbac.ClusterRole:
|
||||
permissionsExist = checkRulesPermissions(ctx, kubernetesProvider, resourceObj.Rules, "") && permissionsExist
|
||||
}
|
||||
}
|
||||
|
||||
return permissionsExist
|
||||
}
|
||||
|
||||
func checkCreatePermission(ctx context.Context, kubernetesProvider *kubernetes.Provider, resource string, group string, namespace string) bool {
|
||||
exist, err := kubernetesProvider.CanI(ctx, namespace, resource, "create", group)
|
||||
return checkPermissionExist(group, resource, "create", namespace, exist, err)
|
||||
}
|
||||
|
||||
func checkRulesPermissions(ctx context.Context, kubernetesProvider *kubernetes.Provider, rules []rbac.PolicyRule, namespace string) bool {
|
||||
permissionsExist := true
|
||||
|
||||
for _, rule := range rules {
|
||||
for _, group := range rule.APIGroups {
|
||||
for _, resource := range rule.Resources {
|
||||
for _, verb := range rule.Verbs {
|
||||
exist, err := kubernetesProvider.CanI(ctx, namespace, resource, verb, group)
|
||||
permissionsExist = checkPermissionExist(group, resource, verb, namespace, exist, err) && permissionsExist
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return permissionsExist
|
||||
}
|
||||
|
||||
func checkPermissionExist(group string, resource string, verb string, namespace string, exist bool, err error) bool {
|
||||
var groupAndNamespace string
|
||||
if group != "" && namespace != "" {
|
||||
groupAndNamespace = fmt.Sprintf("in api group '%v' and namespace '%v'", group, namespace)
|
||||
} else if group != "" {
|
||||
groupAndNamespace = fmt.Sprintf("in api group '%v'", group)
|
||||
} else if namespace != "" {
|
||||
groupAndNamespace = fmt.Sprintf("in namespace '%v'", namespace)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
logger.Log.Errorf("%v error checking permission for %v %v %v, err: %v", fmt.Sprintf(uiUtils.Red, "✗"), verb, resource, groupAndNamespace, err)
|
||||
return false
|
||||
} else if !exist {
|
||||
logger.Log.Errorf("%v can't %v %v %v", fmt.Sprintf(uiUtils.Red, "✗"), verb, resource, groupAndNamespace)
|
||||
return false
|
||||
}
|
||||
|
||||
logger.Log.Infof("%v can %v %v %v", fmt.Sprintf(uiUtils.Green, "√"), verb, resource, groupAndNamespace)
|
||||
return true
|
||||
}
|
||||
95
cli/cmd/check/kubernetesResources.go
Normal file
95
cli/cmd/check/kubernetesResources.go
Normal file
@@ -0,0 +1,95 @@
|
||||
package check
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/up9inc/mizu/cli/config"
|
||||
"github.com/up9inc/mizu/cli/uiUtils"
|
||||
"github.com/up9inc/mizu/shared/kubernetes"
|
||||
"github.com/up9inc/mizu/shared/logger"
|
||||
)
|
||||
|
||||
func KubernetesResources(ctx context.Context, kubernetesProvider *kubernetes.Provider) bool {
|
||||
logger.Log.Infof("\nk8s-components\n--------------------")
|
||||
|
||||
exist, err := kubernetesProvider.DoesNamespaceExist(ctx, config.Config.MizuResourcesNamespace)
|
||||
allResourcesExist := checkResourceExist(config.Config.MizuResourcesNamespace, "namespace", exist, err)
|
||||
|
||||
exist, err = kubernetesProvider.DoesConfigMapExist(ctx, config.Config.MizuResourcesNamespace, kubernetes.ConfigMapName)
|
||||
allResourcesExist = checkResourceExist(kubernetes.ConfigMapName, "config map", exist, err) && allResourcesExist
|
||||
|
||||
exist, err = kubernetesProvider.DoesServiceAccountExist(ctx, config.Config.MizuResourcesNamespace, kubernetes.ServiceAccountName)
|
||||
allResourcesExist = checkResourceExist(kubernetes.ServiceAccountName, "service account", exist, err) && allResourcesExist
|
||||
|
||||
if config.Config.IsNsRestrictedMode() {
|
||||
exist, err = kubernetesProvider.DoesRoleExist(ctx, config.Config.MizuResourcesNamespace, kubernetes.RoleName)
|
||||
allResourcesExist = checkResourceExist(kubernetes.RoleName, "role", exist, err) && allResourcesExist
|
||||
|
||||
exist, err = kubernetesProvider.DoesRoleBindingExist(ctx, config.Config.MizuResourcesNamespace, kubernetes.RoleBindingName)
|
||||
allResourcesExist = checkResourceExist(kubernetes.RoleBindingName, "role binding", exist, err) && allResourcesExist
|
||||
} else {
|
||||
exist, err = kubernetesProvider.DoesClusterRoleExist(ctx, kubernetes.ClusterRoleName)
|
||||
allResourcesExist = checkResourceExist(kubernetes.ClusterRoleName, "cluster role", exist, err) && allResourcesExist
|
||||
|
||||
exist, err = kubernetesProvider.DoesClusterRoleBindingExist(ctx, kubernetes.ClusterRoleBindingName)
|
||||
allResourcesExist = checkResourceExist(kubernetes.ClusterRoleBindingName, "cluster role binding", exist, err) && allResourcesExist
|
||||
}
|
||||
|
||||
exist, err = kubernetesProvider.DoesServiceExist(ctx, config.Config.MizuResourcesNamespace, kubernetes.ApiServerPodName)
|
||||
allResourcesExist = checkResourceExist(kubernetes.ApiServerPodName, "service", exist, err) && allResourcesExist
|
||||
|
||||
allResourcesExist = checkPodResourcesExist(ctx, kubernetesProvider) && allResourcesExist
|
||||
|
||||
return allResourcesExist
|
||||
}
|
||||
|
||||
func checkPodResourcesExist(ctx context.Context, kubernetesProvider *kubernetes.Provider) bool {
|
||||
if pods, err := kubernetesProvider.ListPodsByAppLabel(ctx, config.Config.MizuResourcesNamespace, kubernetes.ApiServerPodName); err != nil {
|
||||
logger.Log.Errorf("%v error checking if '%v' pod is running, err: %v", fmt.Sprintf(uiUtils.Red, "✗"), kubernetes.ApiServerPodName, err)
|
||||
return false
|
||||
} else if len(pods) == 0 {
|
||||
logger.Log.Errorf("%v '%v' pod doesn't exist", fmt.Sprintf(uiUtils.Red, "✗"), kubernetes.ApiServerPodName)
|
||||
return false
|
||||
} else if !kubernetes.IsPodRunning(&pods[0]) {
|
||||
logger.Log.Errorf("%v '%v' pod not running", fmt.Sprintf(uiUtils.Red, "✗"), kubernetes.ApiServerPodName)
|
||||
return false
|
||||
}
|
||||
|
||||
logger.Log.Infof("%v '%v' pod running", fmt.Sprintf(uiUtils.Green, "√"), kubernetes.ApiServerPodName)
|
||||
|
||||
if pods, err := kubernetesProvider.ListPodsByAppLabel(ctx, config.Config.MizuResourcesNamespace, kubernetes.TapperPodName); err != nil {
|
||||
logger.Log.Errorf("%v error checking if '%v' pods are running, err: %v", fmt.Sprintf(uiUtils.Red, "✗"), kubernetes.TapperPodName, err)
|
||||
return false
|
||||
} else {
|
||||
tappers := 0
|
||||
notRunningTappers := 0
|
||||
|
||||
for _, pod := range pods {
|
||||
tappers += 1
|
||||
if !kubernetes.IsPodRunning(&pod) {
|
||||
notRunningTappers += 1
|
||||
}
|
||||
}
|
||||
|
||||
if notRunningTappers > 0 {
|
||||
logger.Log.Errorf("%v '%v' %v/%v pods are not running", fmt.Sprintf(uiUtils.Red, "✗"), kubernetes.TapperPodName, notRunningTappers, tappers)
|
||||
return false
|
||||
}
|
||||
|
||||
logger.Log.Infof("%v '%v' %v pods running", fmt.Sprintf(uiUtils.Green, "√"), kubernetes.TapperPodName, tappers)
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
func checkResourceExist(resourceName string, resourceType string, exist bool, err error) bool {
|
||||
if err != nil {
|
||||
logger.Log.Errorf("%v error checking if '%v' %v exists, err: %v", fmt.Sprintf(uiUtils.Red, "✗"), resourceName, resourceType, err)
|
||||
return false
|
||||
} else if !exist {
|
||||
logger.Log.Errorf("%v '%v' %v doesn't exist", fmt.Sprintf(uiUtils.Red, "✗"), resourceName, resourceType)
|
||||
return false
|
||||
}
|
||||
|
||||
logger.Log.Infof("%v '%v' %v exists", fmt.Sprintf(uiUtils.Green, "√"), resourceName, resourceType)
|
||||
return true
|
||||
}
|
||||
21
cli/cmd/check/kubernetesVersion.go
Normal file
21
cli/cmd/check/kubernetesVersion.go
Normal file
@@ -0,0 +1,21 @@
|
||||
package check
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/up9inc/mizu/cli/uiUtils"
|
||||
"github.com/up9inc/mizu/shared/kubernetes"
|
||||
"github.com/up9inc/mizu/shared/logger"
|
||||
"github.com/up9inc/mizu/shared/semver"
|
||||
)
|
||||
|
||||
func KubernetesVersion(kubernetesVersion *semver.SemVersion) bool {
|
||||
logger.Log.Infof("\nkubernetes-version\n--------------------")
|
||||
|
||||
if err := kubernetes.ValidateKubernetesVersion(kubernetesVersion); err != nil {
|
||||
logger.Log.Errorf("%v not running the minimum Kubernetes API version, err: %v", fmt.Sprintf(uiUtils.Red, "✗"), err)
|
||||
return false
|
||||
}
|
||||
|
||||
logger.Log.Infof("%v is running the minimum Kubernetes API version", fmt.Sprintf(uiUtils.Green, "√"))
|
||||
return true
|
||||
}
|
||||
83
cli/cmd/check/serverConnection.go
Normal file
83
cli/cmd/check/serverConnection.go
Normal file
@@ -0,0 +1,83 @@
|
||||
package check
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/up9inc/mizu/cli/apiserver"
|
||||
"github.com/up9inc/mizu/cli/config"
|
||||
"github.com/up9inc/mizu/cli/uiUtils"
|
||||
"github.com/up9inc/mizu/shared/kubernetes"
|
||||
"github.com/up9inc/mizu/shared/logger"
|
||||
"regexp"
|
||||
)
|
||||
|
||||
func ServerConnection(kubernetesProvider *kubernetes.Provider) bool {
|
||||
logger.Log.Infof("\nAPI-server-connectivity\n--------------------")
|
||||
|
||||
serverUrl := fmt.Sprintf("http://%s", kubernetes.GetMizuApiServerProxiedHostAndPath(config.Config.Tap.GuiPort))
|
||||
|
||||
apiServerProvider := apiserver.NewProvider(serverUrl, 1, apiserver.DefaultTimeout)
|
||||
if err := apiServerProvider.TestConnection(); err == nil {
|
||||
logger.Log.Infof("%v found Mizu server tunnel available and connected successfully to API server", fmt.Sprintf(uiUtils.Green, "√"))
|
||||
return true
|
||||
}
|
||||
|
||||
connectedToApiServer := false
|
||||
|
||||
if err := checkProxy(serverUrl, kubernetesProvider); err != nil {
|
||||
logger.Log.Errorf("%v couldn't connect to API server using proxy, err: %v", fmt.Sprintf(uiUtils.Red, "✗"), err)
|
||||
} else {
|
||||
connectedToApiServer = true
|
||||
logger.Log.Infof("%v connected successfully to API server using proxy", fmt.Sprintf(uiUtils.Green, "√"))
|
||||
}
|
||||
|
||||
if err := checkPortForward(serverUrl, kubernetesProvider); err != nil {
|
||||
logger.Log.Errorf("%v couldn't connect to API server using port-forward, err: %v", fmt.Sprintf(uiUtils.Red, "✗"), err)
|
||||
} else {
|
||||
connectedToApiServer = true
|
||||
logger.Log.Infof("%v connected successfully to API server using port-forward", fmt.Sprintf(uiUtils.Green, "√"))
|
||||
}
|
||||
|
||||
return connectedToApiServer
|
||||
}
|
||||
|
||||
func checkProxy(serverUrl string, kubernetesProvider *kubernetes.Provider) error {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
httpServer, err := kubernetes.StartProxy(kubernetesProvider, config.Config.Tap.ProxyHost, config.Config.Tap.GuiPort, config.Config.MizuResourcesNamespace, kubernetes.ApiServerPodName, cancel)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
apiServerProvider := apiserver.NewProvider(serverUrl, apiserver.DefaultRetries, apiserver.DefaultTimeout)
|
||||
if err := apiServerProvider.TestConnection(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := httpServer.Shutdown(ctx); err != nil {
|
||||
logger.Log.Debugf("Error occurred while stopping proxy, err: %v", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func checkPortForward(serverUrl string, kubernetesProvider *kubernetes.Provider) error {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
podRegex, _ := regexp.Compile(kubernetes.ApiServerPodName)
|
||||
forwarder, err := kubernetes.NewPortForward(kubernetesProvider, config.Config.MizuResourcesNamespace, podRegex, config.Config.Tap.GuiPort, ctx, cancel)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
apiServerProvider := apiserver.NewProvider(serverUrl, apiserver.DefaultRetries, apiserver.DefaultTimeout)
|
||||
if err := apiServerProvider.TestConnection(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
forwarder.Close()
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -4,20 +4,10 @@ import (
|
||||
"context"
|
||||
"embed"
|
||||
"fmt"
|
||||
core "k8s.io/api/core/v1"
|
||||
rbac "k8s.io/api/rbac/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
"regexp"
|
||||
"time"
|
||||
|
||||
"github.com/up9inc/mizu/cli/apiserver"
|
||||
"github.com/up9inc/mizu/cli/cmd/check"
|
||||
"github.com/up9inc/mizu/cli/config"
|
||||
"github.com/up9inc/mizu/cli/uiUtils"
|
||||
"github.com/up9inc/mizu/shared/kubernetes"
|
||||
"github.com/up9inc/mizu/shared/logger"
|
||||
"github.com/up9inc/mizu/shared/semver"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -31,27 +21,35 @@ func runMizuCheck() {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel() // cancel will be called when this function exits
|
||||
|
||||
kubernetesProvider, kubernetesVersion, checkPassed := checkKubernetesApi()
|
||||
kubernetesProvider, kubernetesVersion, checkPassed := check.KubernetesApi()
|
||||
|
||||
if checkPassed {
|
||||
checkPassed = checkKubernetesVersion(kubernetesVersion)
|
||||
checkPassed = check.KubernetesVersion(kubernetesVersion)
|
||||
}
|
||||
|
||||
if config.Config.Check.PreTap {
|
||||
if checkPassed {
|
||||
checkPassed = checkK8sTapPermissions(ctx, kubernetesProvider)
|
||||
if config.Config.Check.PreTap || config.Config.Check.PreInstall || config.Config.Check.ImagePull {
|
||||
if config.Config.Check.PreTap {
|
||||
if checkPassed {
|
||||
checkPassed = check.TapKubernetesPermissions(ctx, embedFS, kubernetesProvider)
|
||||
}
|
||||
} else if config.Config.Check.PreInstall {
|
||||
if checkPassed {
|
||||
checkPassed = check.InstallKubernetesPermissions(ctx, kubernetesProvider)
|
||||
}
|
||||
}
|
||||
|
||||
if checkPassed {
|
||||
checkPassed = checkImagePullInCluster(ctx, kubernetesProvider)
|
||||
if config.Config.Check.ImagePull {
|
||||
if checkPassed {
|
||||
checkPassed = check.ImagePullInCluster(ctx, kubernetesProvider)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if checkPassed {
|
||||
checkPassed = checkK8sResources(ctx, kubernetesProvider)
|
||||
checkPassed = check.KubernetesResources(ctx, kubernetesProvider)
|
||||
}
|
||||
|
||||
if checkPassed {
|
||||
checkPassed = checkServerConnection(kubernetesProvider)
|
||||
checkPassed = check.ServerConnection(kubernetesProvider)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -61,365 +59,3 @@ func runMizuCheck() {
|
||||
logger.Log.Errorf("\nStatus check results are %v", fmt.Sprintf(uiUtils.Red, "✗"))
|
||||
}
|
||||
}
|
||||
|
||||
func checkKubernetesApi() (*kubernetes.Provider, *semver.SemVersion, bool) {
|
||||
logger.Log.Infof("\nkubernetes-api\n--------------------")
|
||||
|
||||
kubernetesProvider, err := kubernetes.NewProvider(config.Config.KubeConfigPath(), config.Config.KubeContext)
|
||||
if err != nil {
|
||||
logger.Log.Errorf("%v can't initialize the client, err: %v", fmt.Sprintf(uiUtils.Red, "✗"), err)
|
||||
return nil, nil, false
|
||||
}
|
||||
logger.Log.Infof("%v can initialize the client", fmt.Sprintf(uiUtils.Green, "√"))
|
||||
|
||||
kubernetesVersion, err := kubernetesProvider.GetKubernetesVersion()
|
||||
if err != nil {
|
||||
logger.Log.Errorf("%v can't query the Kubernetes API, err: %v", fmt.Sprintf(uiUtils.Red, "✗"), err)
|
||||
return nil, nil, false
|
||||
}
|
||||
logger.Log.Infof("%v can query the Kubernetes API", fmt.Sprintf(uiUtils.Green, "√"))
|
||||
|
||||
return kubernetesProvider, kubernetesVersion, true
|
||||
}
|
||||
|
||||
func checkKubernetesVersion(kubernetesVersion *semver.SemVersion) bool {
|
||||
logger.Log.Infof("\nkubernetes-version\n--------------------")
|
||||
|
||||
if err := kubernetes.ValidateKubernetesVersion(kubernetesVersion); err != nil {
|
||||
logger.Log.Errorf("%v not running the minimum Kubernetes API version, err: %v", fmt.Sprintf(uiUtils.Red, "✗"), err)
|
||||
return false
|
||||
}
|
||||
|
||||
logger.Log.Infof("%v is running the minimum Kubernetes API version", fmt.Sprintf(uiUtils.Green, "√"))
|
||||
return true
|
||||
}
|
||||
|
||||
func checkServerConnection(kubernetesProvider *kubernetes.Provider) bool {
|
||||
logger.Log.Infof("\nAPI-server-connectivity\n--------------------")
|
||||
|
||||
serverUrl := GetApiServerUrl(config.Config.Tap.GuiPort)
|
||||
|
||||
apiServerProvider := apiserver.NewProvider(serverUrl, 1, apiserver.DefaultTimeout)
|
||||
if err := apiServerProvider.TestConnection(); err == nil {
|
||||
logger.Log.Infof("%v found Mizu server tunnel available and connected successfully to API server", fmt.Sprintf(uiUtils.Green, "√"))
|
||||
return true
|
||||
}
|
||||
|
||||
connectedToApiServer := false
|
||||
|
||||
if err := checkProxy(serverUrl, kubernetesProvider); err != nil {
|
||||
logger.Log.Errorf("%v couldn't connect to API server using proxy, err: %v", fmt.Sprintf(uiUtils.Red, "✗"), err)
|
||||
} else {
|
||||
connectedToApiServer = true
|
||||
logger.Log.Infof("%v connected successfully to API server using proxy", fmt.Sprintf(uiUtils.Green, "√"))
|
||||
}
|
||||
|
||||
if err := checkPortForward(serverUrl, kubernetesProvider); err != nil {
|
||||
logger.Log.Errorf("%v couldn't connect to API server using port-forward, err: %v", fmt.Sprintf(uiUtils.Red, "✗"), err)
|
||||
} else {
|
||||
connectedToApiServer = true
|
||||
logger.Log.Infof("%v connected successfully to API server using port-forward", fmt.Sprintf(uiUtils.Green, "√"))
|
||||
}
|
||||
|
||||
return connectedToApiServer
|
||||
}
|
||||
|
||||
func checkProxy(serverUrl string, kubernetesProvider *kubernetes.Provider) error {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
httpServer, err := kubernetes.StartProxy(kubernetesProvider, config.Config.Tap.ProxyHost, config.Config.Tap.GuiPort, config.Config.MizuResourcesNamespace, kubernetes.ApiServerPodName, cancel)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
apiServerProvider := apiserver.NewProvider(serverUrl, apiserver.DefaultRetries, apiserver.DefaultTimeout)
|
||||
if err := apiServerProvider.TestConnection(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := httpServer.Shutdown(ctx); err != nil {
|
||||
logger.Log.Debugf("Error occurred while stopping proxy, err: %v", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func checkPortForward(serverUrl string, kubernetesProvider *kubernetes.Provider) error {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
podRegex, _ := regexp.Compile(kubernetes.ApiServerPodName)
|
||||
forwarder, err := kubernetes.NewPortForward(kubernetesProvider, config.Config.MizuResourcesNamespace, podRegex, config.Config.Tap.GuiPort, ctx, cancel)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
apiServerProvider := apiserver.NewProvider(serverUrl, apiserver.DefaultRetries, apiserver.DefaultTimeout)
|
||||
if err := apiServerProvider.TestConnection(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
forwarder.Close()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func checkK8sResources(ctx context.Context, kubernetesProvider *kubernetes.Provider) bool {
|
||||
logger.Log.Infof("\nk8s-components\n--------------------")
|
||||
|
||||
exist, err := kubernetesProvider.DoesNamespaceExist(ctx, config.Config.MizuResourcesNamespace)
|
||||
allResourcesExist := checkResourceExist(config.Config.MizuResourcesNamespace, "namespace", exist, err)
|
||||
|
||||
exist, err = kubernetesProvider.DoesConfigMapExist(ctx, config.Config.MizuResourcesNamespace, kubernetes.ConfigMapName)
|
||||
allResourcesExist = checkResourceExist(kubernetes.ConfigMapName, "config map", exist, err) && allResourcesExist
|
||||
|
||||
exist, err = kubernetesProvider.DoesServiceAccountExist(ctx, config.Config.MizuResourcesNamespace, kubernetes.ServiceAccountName)
|
||||
allResourcesExist = checkResourceExist(kubernetes.ServiceAccountName, "service account", exist, err) && allResourcesExist
|
||||
|
||||
if config.Config.IsNsRestrictedMode() {
|
||||
exist, err = kubernetesProvider.DoesRoleExist(ctx, config.Config.MizuResourcesNamespace, kubernetes.RoleName)
|
||||
allResourcesExist = checkResourceExist(kubernetes.RoleName, "role", exist, err) && allResourcesExist
|
||||
|
||||
exist, err = kubernetesProvider.DoesRoleBindingExist(ctx, config.Config.MizuResourcesNamespace, kubernetes.RoleBindingName)
|
||||
allResourcesExist = checkResourceExist(kubernetes.RoleBindingName, "role binding", exist, err) && allResourcesExist
|
||||
} else {
|
||||
exist, err = kubernetesProvider.DoesClusterRoleExist(ctx, kubernetes.ClusterRoleName)
|
||||
allResourcesExist = checkResourceExist(kubernetes.ClusterRoleName, "cluster role", exist, err) && allResourcesExist
|
||||
|
||||
exist, err = kubernetesProvider.DoesClusterRoleBindingExist(ctx, kubernetes.ClusterRoleBindingName)
|
||||
allResourcesExist = checkResourceExist(kubernetes.ClusterRoleBindingName, "cluster role binding", exist, err) && allResourcesExist
|
||||
}
|
||||
|
||||
exist, err = kubernetesProvider.DoesServiceExist(ctx, config.Config.MizuResourcesNamespace, kubernetes.ApiServerPodName)
|
||||
allResourcesExist = checkResourceExist(kubernetes.ApiServerPodName, "service", exist, err) && allResourcesExist
|
||||
|
||||
allResourcesExist = checkPodResourcesExist(ctx, kubernetesProvider) && allResourcesExist
|
||||
|
||||
return allResourcesExist
|
||||
}
|
||||
|
||||
func checkPodResourcesExist(ctx context.Context, kubernetesProvider *kubernetes.Provider) bool {
|
||||
if pods, err := kubernetesProvider.ListPodsByAppLabel(ctx, config.Config.MizuResourcesNamespace, kubernetes.ApiServerPodName); err != nil {
|
||||
logger.Log.Errorf("%v error checking if '%v' pod is running, err: %v", fmt.Sprintf(uiUtils.Red, "✗"), kubernetes.ApiServerPodName, err)
|
||||
return false
|
||||
} else if len(pods) == 0 {
|
||||
logger.Log.Errorf("%v '%v' pod doesn't exist", fmt.Sprintf(uiUtils.Red, "✗"), kubernetes.ApiServerPodName)
|
||||
return false
|
||||
} else if !kubernetes.IsPodRunning(&pods[0]) {
|
||||
logger.Log.Errorf("%v '%v' pod not running", fmt.Sprintf(uiUtils.Red, "✗"), kubernetes.ApiServerPodName)
|
||||
return false
|
||||
}
|
||||
|
||||
logger.Log.Infof("%v '%v' pod running", fmt.Sprintf(uiUtils.Green, "√"), kubernetes.ApiServerPodName)
|
||||
|
||||
if pods, err := kubernetesProvider.ListPodsByAppLabel(ctx, config.Config.MizuResourcesNamespace, kubernetes.TapperPodName); err != nil {
|
||||
logger.Log.Errorf("%v error checking if '%v' pods are running, err: %v", fmt.Sprintf(uiUtils.Red, "✗"), kubernetes.TapperPodName, err)
|
||||
return false
|
||||
} else {
|
||||
tappers := 0
|
||||
notRunningTappers := 0
|
||||
|
||||
for _, pod := range pods {
|
||||
tappers += 1
|
||||
if !kubernetes.IsPodRunning(&pod) {
|
||||
notRunningTappers += 1
|
||||
}
|
||||
}
|
||||
|
||||
if notRunningTappers > 0 {
|
||||
logger.Log.Errorf("%v '%v' %v/%v pods are not running", fmt.Sprintf(uiUtils.Red, "✗"), kubernetes.TapperPodName, notRunningTappers, tappers)
|
||||
return false
|
||||
}
|
||||
|
||||
logger.Log.Infof("%v '%v' %v pods running", fmt.Sprintf(uiUtils.Green, "√"), kubernetes.TapperPodName, tappers)
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
func checkResourceExist(resourceName string, resourceType string, exist bool, err error) bool {
|
||||
if err != nil {
|
||||
logger.Log.Errorf("%v error checking if '%v' %v exists, err: %v", fmt.Sprintf(uiUtils.Red, "✗"), resourceName, resourceType, err)
|
||||
return false
|
||||
} else if !exist {
|
||||
logger.Log.Errorf("%v '%v' %v doesn't exist", fmt.Sprintf(uiUtils.Red, "✗"), resourceName, resourceType)
|
||||
return false
|
||||
}
|
||||
|
||||
logger.Log.Infof("%v '%v' %v exists", fmt.Sprintf(uiUtils.Green, "√"), resourceName, resourceType)
|
||||
return true
|
||||
}
|
||||
|
||||
func checkK8sTapPermissions(ctx context.Context, kubernetesProvider *kubernetes.Provider) bool {
|
||||
logger.Log.Infof("\nkubernetes-permissions\n--------------------")
|
||||
|
||||
var filePath string
|
||||
if config.Config.IsNsRestrictedMode() {
|
||||
filePath = "permissionFiles/permissions-ns-tap.yaml"
|
||||
} else {
|
||||
filePath = "permissionFiles/permissions-all-namespaces-tap.yaml"
|
||||
}
|
||||
|
||||
data, err := embedFS.ReadFile(filePath)
|
||||
if err != nil {
|
||||
logger.Log.Errorf("%v error while checking kubernetes permissions, err: %v", fmt.Sprintf(uiUtils.Red, "✗"), err)
|
||||
return false
|
||||
}
|
||||
|
||||
obj, err := getDecodedObject(data)
|
||||
if err != nil {
|
||||
logger.Log.Errorf("%v error while checking kubernetes permissions, err: %v", fmt.Sprintf(uiUtils.Red, "✗"), err)
|
||||
return false
|
||||
}
|
||||
|
||||
var rules []rbac.PolicyRule
|
||||
if config.Config.IsNsRestrictedMode() {
|
||||
rules = obj.(*rbac.Role).Rules
|
||||
} else {
|
||||
rules = obj.(*rbac.ClusterRole).Rules
|
||||
}
|
||||
|
||||
return checkPermissions(ctx, kubernetesProvider, rules)
|
||||
}
|
||||
|
||||
func getDecodedObject(data []byte) (runtime.Object, error) {
|
||||
decode := scheme.Codecs.UniversalDeserializer().Decode
|
||||
|
||||
obj, _, err := decode(data, nil, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return obj, nil
|
||||
}
|
||||
|
||||
func checkPermissions(ctx context.Context, kubernetesProvider *kubernetes.Provider, rules []rbac.PolicyRule) bool {
|
||||
permissionsExist := true
|
||||
|
||||
for _, rule := range rules {
|
||||
for _, group := range rule.APIGroups {
|
||||
for _, resource := range rule.Resources {
|
||||
for _, verb := range rule.Verbs {
|
||||
exist, err := kubernetesProvider.CanI(ctx, config.Config.MizuResourcesNamespace, resource, verb, group)
|
||||
permissionsExist = checkPermissionExist(group, resource, verb, exist, err) && permissionsExist
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return permissionsExist
|
||||
}
|
||||
|
||||
func checkPermissionExist(group string, resource string, verb string, exist bool, err error) bool {
|
||||
if err != nil {
|
||||
logger.Log.Errorf("%v error checking permission for %v %v in group '%v', err: %v", fmt.Sprintf(uiUtils.Red, "✗"), verb, resource, group, err)
|
||||
return false
|
||||
} else if !exist {
|
||||
logger.Log.Errorf("%v can't %v %v in group '%v'", fmt.Sprintf(uiUtils.Red, "✗"), verb, resource, group)
|
||||
return false
|
||||
}
|
||||
|
||||
logger.Log.Infof("%v can %v %v in group '%v'", fmt.Sprintf(uiUtils.Green, "√"), verb, resource, group)
|
||||
return true
|
||||
}
|
||||
|
||||
func checkImagePullInCluster(ctx context.Context, kubernetesProvider *kubernetes.Provider) bool {
|
||||
logger.Log.Infof("\nimage-pull-in-cluster\n--------------------")
|
||||
|
||||
podName := "image-pull-in-cluster"
|
||||
|
||||
defer removeImagePullInClusterResources(ctx, kubernetesProvider, podName)
|
||||
if err := createImagePullInClusterResources(ctx, kubernetesProvider, podName); err != nil {
|
||||
logger.Log.Errorf("%v error while creating image pull in cluster resources, err: %v", fmt.Sprintf(uiUtils.Red, "✗"), err)
|
||||
return false
|
||||
}
|
||||
|
||||
if err := checkImagePulled(ctx, kubernetesProvider, podName); err != nil {
|
||||
logger.Log.Errorf("%v cluster is not able to pull mizu containers from docker hub, err: %v", fmt.Sprintf(uiUtils.Red, "✗"), err)
|
||||
return false
|
||||
}
|
||||
|
||||
logger.Log.Infof("%v cluster is able to pull mizu containers from docker hub", fmt.Sprintf(uiUtils.Green, "√"))
|
||||
return true
|
||||
}
|
||||
|
||||
func checkImagePulled(ctx context.Context, kubernetesProvider *kubernetes.Provider, podName string) error {
|
||||
podExactRegex := regexp.MustCompile(fmt.Sprintf("^%s$", podName))
|
||||
podWatchHelper := kubernetes.NewPodWatchHelper(kubernetesProvider, podExactRegex)
|
||||
eventChan, errorChan := kubernetes.FilteredWatch(ctx, podWatchHelper, []string{config.Config.MizuResourcesNamespace}, podWatchHelper)
|
||||
|
||||
timeAfter := time.After(30 * time.Second)
|
||||
|
||||
for {
|
||||
select {
|
||||
case wEvent, ok := <-eventChan:
|
||||
if !ok {
|
||||
eventChan = nil
|
||||
continue
|
||||
}
|
||||
|
||||
pod, err := wEvent.ToPod()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if pod.Status.Phase == core.PodRunning {
|
||||
return nil
|
||||
}
|
||||
case err, ok := <-errorChan:
|
||||
if !ok {
|
||||
errorChan = nil
|
||||
continue
|
||||
}
|
||||
|
||||
return err
|
||||
case <-timeAfter:
|
||||
return fmt.Errorf("image not pulled in time")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func removeImagePullInClusterResources(ctx context.Context, kubernetesProvider *kubernetes.Provider, podName string) {
|
||||
if err := kubernetesProvider.RemovePod(ctx, config.Config.MizuResourcesNamespace, podName); err != nil {
|
||||
logger.Log.Debugf("error while removing image pull in cluster resources, err: %v", err)
|
||||
}
|
||||
|
||||
if !config.Config.IsNsRestrictedMode() {
|
||||
if err := kubernetesProvider.RemoveNamespace(ctx, config.Config.MizuResourcesNamespace); err != nil {
|
||||
logger.Log.Debugf("error while removing image pull in cluster resources, err: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func createImagePullInClusterResources(ctx context.Context, kubernetesProvider *kubernetes.Provider, podName string) error {
|
||||
if !config.Config.IsNsRestrictedMode() {
|
||||
if _, err := kubernetesProvider.CreateNamespace(ctx, config.Config.MizuResourcesNamespace); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
var zero int64
|
||||
pod := &core.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: podName,
|
||||
},
|
||||
Spec: core.PodSpec{
|
||||
Containers: []core.Container{
|
||||
{
|
||||
Name: "probe",
|
||||
Image: "up9inc/busybox",
|
||||
ImagePullPolicy: "Always",
|
||||
Command: []string{"cat"},
|
||||
Stdin: true,
|
||||
},
|
||||
},
|
||||
TerminationGracePeriodSeconds: &zero,
|
||||
},
|
||||
}
|
||||
|
||||
if _, err := kubernetesProvider.CreatePod(ctx, config.Config.MizuResourcesNamespace, pod); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -40,7 +40,7 @@ type ConfigStruct struct {
|
||||
HeadlessMode bool `yaml:"headless" default:"false"`
|
||||
LogLevelStr string `yaml:"log-level,omitempty" default:"INFO" readonly:""`
|
||||
ServiceMap bool `yaml:"service-map" default:"true"`
|
||||
OAS bool `yaml:"oas,omitempty" default:"false" readonly:""`
|
||||
OAS bool `yaml:"oas" default:"true"`
|
||||
Elastic shared.ElasticConfig `yaml:"elastic"`
|
||||
}
|
||||
|
||||
|
||||
@@ -1,9 +1,13 @@
|
||||
package configStructs
|
||||
|
||||
const (
|
||||
PreTapCheckName = "pre-tap"
|
||||
PreTapCheckName = "pre-tap"
|
||||
PreInstallCheckName = "pre-install"
|
||||
ImagePullCheckName = "image-pull"
|
||||
)
|
||||
|
||||
type CheckConfig struct {
|
||||
PreTap bool `yaml:"pre-tap"`
|
||||
PreTap bool `yaml:"pre-tap"`
|
||||
PreInstall bool `yaml:"pre-install"`
|
||||
ImagePull bool `yaml:"image-pull"`
|
||||
}
|
||||
|
||||
@@ -11,7 +11,7 @@ require (
|
||||
github.com/op/go-logging v0.0.0-20160315200505-970db520ece7
|
||||
github.com/spf13/cobra v1.3.0
|
||||
github.com/spf13/pflag v1.0.5
|
||||
github.com/up9inc/basenine/server/lib v0.0.0-20220315070758-3a76cfc4378e
|
||||
github.com/up9inc/basenine/server/lib v0.0.0-20220326121918-785f3061c8ce
|
||||
github.com/up9inc/mizu/shared v0.0.0
|
||||
github.com/up9inc/mizu/tap/api v0.0.0
|
||||
golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8
|
||||
|
||||
@@ -600,8 +600,8 @@ github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69
|
||||
github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
|
||||
github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM=
|
||||
github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc=
|
||||
github.com/up9inc/basenine/server/lib v0.0.0-20220315070758-3a76cfc4378e h1:reG/QwyxdfvGObfdrae7DZc3rTMiGwQ6S/4PRkwtBoE=
|
||||
github.com/up9inc/basenine/server/lib v0.0.0-20220315070758-3a76cfc4378e/go.mod h1:ZIkxWiJm65jYQIso9k+OZKhR7gQ1we2jNyE2kQX9IQI=
|
||||
github.com/up9inc/basenine/server/lib v0.0.0-20220326121918-785f3061c8ce h1:PypqybjmuxftGkX4NmP4JAUyEykZj2r6W4r9lnRZ/kE=
|
||||
github.com/up9inc/basenine/server/lib v0.0.0-20220326121918-785f3061c8ce/go.mod h1:ZIkxWiJm65jYQIso9k+OZKhR7gQ1we2jNyE2kQX9IQI=
|
||||
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
|
||||
github.com/xlab/treeprint v0.0.0-20181112141820-a009c3971eca/go.mod h1:ce1O1j6UtZfjr22oyGxGLbauSBp2YVXpARAosm7dHBg=
|
||||
github.com/xlab/treeprint v1.1.0 h1:G/1DjNkPpfZCFt9CSh6b5/nY4VimlbHF3Rh4obvtzDk=
|
||||
|
||||
@@ -1,18 +1,20 @@
|
||||
FROM dockcross/linux-arm64-musl:latest AS builder-from-amd64-to-arm64v8
|
||||
|
||||
# Install Go
|
||||
RUN curl https://go.dev/dl/go1.17.6.linux-amd64.tar.gz -Lo ./go.linux-amd64.tar.gz
|
||||
RUN curl https://go.dev/dl/go1.17.6.linux-amd64.tar.gz.asc -Lo ./go.linux-amd64.tar.gz.asc
|
||||
RUN curl https://dl.google.com/dl/linux/linux_signing_key.pub -Lo linux_signing_key.pub
|
||||
RUN gpg --import linux_signing_key.pub && gpg --verify ./go.linux-amd64.tar.gz.asc ./go.linux-amd64.tar.gz
|
||||
RUN rm -rf /usr/local/go && tar -C /usr/local -xzf go.linux-amd64.tar.gz
|
||||
RUN curl https://go.dev/dl/go1.17.6.linux-amd64.tar.gz -Lo ./go.linux-amd64.tar.gz \
|
||||
&& curl https://go.dev/dl/go1.17.6.linux-amd64.tar.gz.asc -Lo ./go.linux-amd64.tar.gz.asc \
|
||||
&& curl https://dl.google.com/dl/linux/linux_signing_key.pub -Lo linux_signing_key.pub \
|
||||
&& gpg --import linux_signing_key.pub && gpg --verify ./go.linux-amd64.tar.gz.asc ./go.linux-amd64.tar.gz \
|
||||
&& rm -rf /usr/local/go && tar -C /usr/local -xzf go.linux-amd64.tar.gz
|
||||
ENV PATH "$PATH:/usr/local/go/bin"
|
||||
|
||||
# Compile libpcap from source
|
||||
RUN curl https://www.tcpdump.org/release/libpcap-1.10.1.tar.gz -Lo ./libpcap.tar.gz
|
||||
RUN curl https://www.tcpdump.org/release/libpcap-1.10.1.tar.gz.sig -Lo ./libpcap.tar.gz.sig
|
||||
RUN curl https://www.tcpdump.org/release/signing-key.asc -Lo ./signing-key.asc
|
||||
RUN gpg --import signing-key.asc && gpg --verify libpcap.tar.gz.sig libpcap.tar.gz
|
||||
RUN tar -xzf libpcap.tar.gz && mv ./libpcap-* ./libpcap
|
||||
RUN cd ./libpcap && ./configure --host=arm && make
|
||||
RUN cp /work/libpcap/libpcap.a /usr/xcc/aarch64-linux-musl-cross/lib/gcc/aarch64-linux-musl/*/
|
||||
RUN curl https://www.tcpdump.org/release/libpcap-1.10.1.tar.gz -Lo ./libpcap.tar.gz \
|
||||
&& curl https://www.tcpdump.org/release/libpcap-1.10.1.tar.gz.sig -Lo ./libpcap.tar.gz.sig \
|
||||
&& curl https://www.tcpdump.org/release/signing-key.asc -Lo ./signing-key.asc \
|
||||
&& gpg --import signing-key.asc && gpg --verify libpcap.tar.gz.sig libpcap.tar.gz \
|
||||
&& tar -xzf libpcap.tar.gz && mv ./libpcap-* ./libpcap
|
||||
WORKDIR /work/libpcap
|
||||
RUN ./configure --host=arm && make \
|
||||
&& cp /work/libpcap/libpcap.a /usr/xcc/aarch64-linux-musl-cross/lib/gcc/aarch64-linux-musl/*/
|
||||
|
||||
|
||||
@@ -5,7 +5,6 @@ const (
|
||||
SyncEntriesConfigEnvVar = "SYNC_ENTRIES_CONFIG"
|
||||
HostModeEnvVar = "HOST_MODE"
|
||||
NodeNameEnvVar = "NODE_NAME"
|
||||
TappedAddressesPerNodeDictEnvVar = "TAPPED_ADDRESSES_PER_HOST"
|
||||
ConfigDirPath = "/app/config/"
|
||||
DataDirPath = "/app/data/"
|
||||
ValidationRulesFileName = "validation-rules.yaml"
|
||||
|
||||
@@ -31,7 +31,8 @@ type MizuTapperSyncer struct {
|
||||
TapPodChangesOut chan TappedPodChangeEvent
|
||||
TapperStatusChangedOut chan shared.TapperStatus
|
||||
ErrorOut chan K8sTapManagerError
|
||||
nodeToTappedPodMap map[string][]core.Pod
|
||||
nodeToTappedPodMap shared.NodeToPodsMap
|
||||
tappedNodes []string
|
||||
}
|
||||
|
||||
type TapperSyncerConfig struct {
|
||||
@@ -94,10 +95,6 @@ func (tapperSyncer *MizuTapperSyncer) watchTapperPods() {
|
||||
continue
|
||||
}
|
||||
|
||||
if tapperSyncer.startTime.After(pod.CreationTimestamp.Time) {
|
||||
continue
|
||||
}
|
||||
|
||||
logger.Log.Debugf("Watching tapper pods loop, tapper: %v, node: %v, status: %v", pod.Name, pod.Spec.NodeName, pod.Status.Phase)
|
||||
if pod.Spec.NodeName != "" {
|
||||
tapperStatus := shared.TapperStatus{TapperName: pod.Name, NodeName: pod.Spec.NodeName, Status: string(pod.Status.Phase)}
|
||||
@@ -137,10 +134,6 @@ func (tapperSyncer *MizuTapperSyncer) watchTapperEvents() {
|
||||
continue
|
||||
}
|
||||
|
||||
if tapperSyncer.startTime.After(event.CreationTimestamp.Time) {
|
||||
continue
|
||||
}
|
||||
|
||||
logger.Log.Debugf(
|
||||
fmt.Sprintf("Watching tapper events loop, event %s, time: %v, resource: %s (%s), reason: %s, note: %s",
|
||||
event.Name,
|
||||
@@ -185,7 +178,7 @@ func (tapperSyncer *MizuTapperSyncer) watchPodsForTapping() {
|
||||
podWatchHelper := NewPodWatchHelper(tapperSyncer.kubernetesProvider, &tapperSyncer.config.PodFilterRegex)
|
||||
eventChan, errorChan := FilteredWatch(tapperSyncer.context, podWatchHelper, tapperSyncer.config.TargetNamespaces, podWatchHelper)
|
||||
|
||||
restartTappers := func() {
|
||||
handleChangeInPods := func() {
|
||||
err, changeFound := tapperSyncer.updateCurrentlyTappedPods()
|
||||
if err != nil {
|
||||
tapperSyncer.ErrorOut <- K8sTapManagerError{
|
||||
@@ -205,7 +198,7 @@ func (tapperSyncer *MizuTapperSyncer) watchPodsForTapping() {
|
||||
}
|
||||
}
|
||||
}
|
||||
restartTappersDebouncer := debounce.NewDebouncer(updateTappersDelay, restartTappers)
|
||||
restartTappersDebouncer := debounce.NewDebouncer(updateTappersDelay, handleChangeInPods)
|
||||
|
||||
for {
|
||||
select {
|
||||
@@ -303,6 +296,20 @@ func (tapperSyncer *MizuTapperSyncer) updateCurrentlyTappedPods() (err error, ch
|
||||
}
|
||||
|
||||
func (tapperSyncer *MizuTapperSyncer) updateMizuTappers() error {
|
||||
nodesToTap := make([]string, len(tapperSyncer.nodeToTappedPodMap))
|
||||
i := 0
|
||||
for node := range tapperSyncer.nodeToTappedPodMap {
|
||||
nodesToTap[i] = node
|
||||
i++
|
||||
}
|
||||
|
||||
if shared.EqualStringSlices(nodesToTap, tapperSyncer.tappedNodes) {
|
||||
logger.Log.Debug("Skipping apply, DaemonSet is up to date")
|
||||
return nil
|
||||
}
|
||||
|
||||
logger.Log.Debugf("Updating DaemonSet to run on nodes: %v", nodesToTap)
|
||||
|
||||
if len(tapperSyncer.nodeToTappedPodMap) > 0 {
|
||||
var serviceAccountName string
|
||||
if tapperSyncer.config.MizuServiceAccountExists {
|
||||
@@ -311,6 +318,11 @@ func (tapperSyncer *MizuTapperSyncer) updateMizuTappers() error {
|
||||
serviceAccountName = ""
|
||||
}
|
||||
|
||||
nodeNames := make([]string, 0, len(tapperSyncer.nodeToTappedPodMap))
|
||||
for nodeName := range tapperSyncer.nodeToTappedPodMap {
|
||||
nodeNames = append(nodeNames, nodeName)
|
||||
}
|
||||
|
||||
if err := tapperSyncer.kubernetesProvider.ApplyMizuTapperDaemonSet(
|
||||
tapperSyncer.context,
|
||||
tapperSyncer.config.MizuResourcesNamespace,
|
||||
@@ -318,7 +330,7 @@ func (tapperSyncer *MizuTapperSyncer) updateMizuTappers() error {
|
||||
tapperSyncer.config.AgentImage,
|
||||
TapperPodName,
|
||||
fmt.Sprintf("%s.%s.svc.cluster.local", ApiServerPodName, tapperSyncer.config.MizuResourcesNamespace),
|
||||
tapperSyncer.nodeToTappedPodMap,
|
||||
nodeNames,
|
||||
serviceAccountName,
|
||||
tapperSyncer.config.TapperResources,
|
||||
tapperSyncer.config.ImagePullPolicy,
|
||||
@@ -343,5 +355,7 @@ func (tapperSyncer *MizuTapperSyncer) updateMizuTappers() error {
|
||||
logger.Log.Debugf("Successfully reset tapper daemon set")
|
||||
}
|
||||
|
||||
tapperSyncer.tappedNodes = nodesToTap
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -708,18 +708,13 @@ func (provider *Provider) CreateConfigMap(ctx context.Context, namespace string,
|
||||
return nil
|
||||
}
|
||||
|
||||
func (provider *Provider) ApplyMizuTapperDaemonSet(ctx context.Context, namespace string, daemonSetName string, podImage string, tapperPodName string, apiServerPodIp string, nodeToTappedPodMap map[string][]core.Pod, serviceAccountName string, resources shared.Resources, imagePullPolicy core.PullPolicy, mizuApiFilteringOptions api.TrafficFilteringOptions, logLevel logging.Level, serviceMesh bool, tls bool) error {
|
||||
logger.Log.Debugf("Applying %d tapper daemon sets, ns: %s, daemonSetName: %s, podImage: %s, tapperPodName: %s", len(nodeToTappedPodMap), namespace, daemonSetName, podImage, tapperPodName)
|
||||
func (provider *Provider) ApplyMizuTapperDaemonSet(ctx context.Context, namespace string, daemonSetName string, podImage string, tapperPodName string, apiServerPodIp string, nodeNames []string, serviceAccountName string, resources shared.Resources, imagePullPolicy core.PullPolicy, mizuApiFilteringOptions api.TrafficFilteringOptions, logLevel logging.Level, serviceMesh bool, tls bool) error {
|
||||
logger.Log.Debugf("Applying %d tapper daemon sets, ns: %s, daemonSetName: %s, podImage: %s, tapperPodName: %s", len(nodeNames), namespace, daemonSetName, podImage, tapperPodName)
|
||||
|
||||
if len(nodeToTappedPodMap) == 0 {
|
||||
if len(nodeNames) == 0 {
|
||||
return fmt.Errorf("daemon set %s must tap at least 1 pod", daemonSetName)
|
||||
}
|
||||
|
||||
nodeToTappedPodMapJsonStr, err := json.Marshal(nodeToTappedPodMap)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
mizuApiFilteringOptionsJsonStr, err := json.Marshal(mizuApiFilteringOptions)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -773,7 +768,6 @@ func (provider *Provider) ApplyMizuTapperDaemonSet(ctx context.Context, namespac
|
||||
agentContainer.WithEnv(
|
||||
applyconfcore.EnvVar().WithName(shared.LogLevelEnvVar).WithValue(logLevel.String()),
|
||||
applyconfcore.EnvVar().WithName(shared.HostModeEnvVar).WithValue("1"),
|
||||
applyconfcore.EnvVar().WithName(shared.TappedAddressesPerNodeDictEnvVar).WithValue(string(nodeToTappedPodMapJsonStr)),
|
||||
applyconfcore.EnvVar().WithName(shared.GoGCEnvVar).WithValue("12800"),
|
||||
applyconfcore.EnvVar().WithName(shared.MizuFilteringOptionsEnvVar).WithValue(string(mizuApiFilteringOptionsJsonStr)),
|
||||
)
|
||||
@@ -811,10 +805,6 @@ func (provider *Provider) ApplyMizuTapperDaemonSet(ctx context.Context, namespac
|
||||
agentResources := applyconfcore.ResourceRequirements().WithRequests(agentResourceRequests).WithLimits(agentResourceLimits)
|
||||
agentContainer.WithResources(agentResources)
|
||||
|
||||
nodeNames := make([]string, 0, len(nodeToTappedPodMap))
|
||||
for nodeName := range nodeToTappedPodMap {
|
||||
nodeNames = append(nodeNames, nodeName)
|
||||
}
|
||||
nodeSelectorRequirement := applyconfcore.NodeSelectorRequirement()
|
||||
nodeSelectorRequirement.WithKey("kubernetes.io/hostname")
|
||||
nodeSelectorRequirement.WithOperator(core.NodeSelectorOpIn)
|
||||
|
||||
@@ -5,12 +5,11 @@ import (
|
||||
|
||||
"github.com/up9inc/mizu/shared"
|
||||
core "k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
func GetNodeHostToTappedPodsMap(tappedPods []core.Pod) map[string][]core.Pod {
|
||||
nodeToTappedPodMap := make(map[string][]core.Pod)
|
||||
func GetNodeHostToTappedPodsMap(tappedPods []core.Pod) shared.NodeToPodsMap {
|
||||
nodeToTappedPodMap := make(shared.NodeToPodsMap)
|
||||
for _, pod := range tappedPods {
|
||||
minimizedPod := getMinimizedPod(pod)
|
||||
|
||||
@@ -29,18 +28,18 @@ func getMinimizedPod(fullPod core.Pod) core.Pod {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: fullPod.Name,
|
||||
},
|
||||
Status: v1.PodStatus{
|
||||
Status: core.PodStatus{
|
||||
PodIP: fullPod.Status.PodIP,
|
||||
ContainerStatuses: getMinimizedContainerStatuses(fullPod),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func getMinimizedContainerStatuses(fullPod core.Pod) []v1.ContainerStatus {
|
||||
result := make([]v1.ContainerStatus, len(fullPod.Status.ContainerStatuses))
|
||||
func getMinimizedContainerStatuses(fullPod core.Pod) []core.ContainerStatus {
|
||||
result := make([]core.ContainerStatus, len(fullPod.Status.ContainerStatuses))
|
||||
|
||||
for i, container := range fullPod.Status.ContainerStatuses {
|
||||
result[i] = v1.ContainerStatus{
|
||||
result[i] = core.ContainerStatus{
|
||||
ContainerID: container.ContainerID,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -6,24 +6,25 @@ import (
|
||||
|
||||
"github.com/op/go-logging"
|
||||
"github.com/up9inc/mizu/shared/logger"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
|
||||
"gopkg.in/yaml.v3"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
type WebSocketMessageType string
|
||||
|
||||
const (
|
||||
WebSocketMessageTypeEntry WebSocketMessageType = "entry"
|
||||
WebSocketMessageTypeFullEntry WebSocketMessageType = "fullEntry"
|
||||
WebSocketMessageTypeTappedEntry WebSocketMessageType = "tappedEntry"
|
||||
WebSocketMessageTypeUpdateStatus WebSocketMessageType = "status"
|
||||
WebSocketMessageTypeAnalyzeStatus WebSocketMessageType = "analyzeStatus"
|
||||
WebsocketMessageTypeOutboundLink WebSocketMessageType = "outboundLink"
|
||||
WebSocketMessageTypeToast WebSocketMessageType = "toast"
|
||||
WebSocketMessageTypeQueryMetadata WebSocketMessageType = "queryMetadata"
|
||||
WebSocketMessageTypeStartTime WebSocketMessageType = "startTime"
|
||||
WebSocketMessageTypeTapConfig WebSocketMessageType = "tapConfig"
|
||||
WebSocketMessageTypeEntry WebSocketMessageType = "entry"
|
||||
WebSocketMessageTypeFullEntry WebSocketMessageType = "fullEntry"
|
||||
WebSocketMessageTypeTappedEntry WebSocketMessageType = "tappedEntry"
|
||||
WebSocketMessageTypeUpdateStatus WebSocketMessageType = "status"
|
||||
WebSocketMessageTypeUpdateTappedPods WebSocketMessageType = "tappedPods"
|
||||
WebSocketMessageTypeAnalyzeStatus WebSocketMessageType = "analyzeStatus"
|
||||
WebsocketMessageTypeOutboundLink WebSocketMessageType = "outboundLink"
|
||||
WebSocketMessageTypeToast WebSocketMessageType = "toast"
|
||||
WebSocketMessageTypeQueryMetadata WebSocketMessageType = "queryMetadata"
|
||||
WebSocketMessageTypeStartTime WebSocketMessageType = "startTime"
|
||||
WebSocketMessageTypeTapConfig WebSocketMessageType = "tapConfig"
|
||||
)
|
||||
|
||||
type Resources struct {
|
||||
@@ -75,11 +76,29 @@ type WebSocketStatusMessage struct {
|
||||
TappingStatus []TappedPodStatus `json:"tappingStatus"`
|
||||
}
|
||||
|
||||
type WebSocketTappedPodsMessage struct {
|
||||
*WebSocketMessageMetadata
|
||||
NodeToTappedPodMap NodeToPodsMap `json:"nodeToTappedPodMap"`
|
||||
}
|
||||
|
||||
type WebSocketTapConfigMessage struct {
|
||||
*WebSocketMessageMetadata
|
||||
TapTargets []v1.Pod `json:"pods"`
|
||||
}
|
||||
|
||||
type NodeToPodsMap map[string][]v1.Pod
|
||||
|
||||
func (np NodeToPodsMap) Summary() map[string][]string {
|
||||
summary := make(map[string][]string)
|
||||
for node, pods := range np {
|
||||
for _, pod := range pods {
|
||||
summary[node] = append(summary[node], pod.Namespace + "/" + pod.Name)
|
||||
}
|
||||
}
|
||||
|
||||
return summary
|
||||
}
|
||||
|
||||
type TapperStatus struct {
|
||||
TapperName string `json:"tapperName"`
|
||||
NodeName string `json:"nodeName"`
|
||||
@@ -121,6 +140,15 @@ func CreateWebSocketStatusMessage(tappedPodsStatus []TappedPodStatus) WebSocketS
|
||||
}
|
||||
}
|
||||
|
||||
func CreateWebSocketTappedPodsMessage(nodeToTappedPodMap NodeToPodsMap) WebSocketTappedPodsMessage {
|
||||
return WebSocketTappedPodsMessage{
|
||||
WebSocketMessageMetadata: &WebSocketMessageMetadata{
|
||||
MessageType: WebSocketMessageTypeUpdateTappedPods,
|
||||
},
|
||||
NodeToTappedPodMap: nodeToTappedPodMap,
|
||||
}
|
||||
}
|
||||
|
||||
func CreateWebSocketMessageTypeAnalyzeStatus(analyzeStatus AnalyzeStatus) WebSocketAnalyzeStatusMessage {
|
||||
return WebSocketAnalyzeStatusMessage{
|
||||
WebSocketMessageMetadata: &WebSocketMessageMetadata{
|
||||
|
||||
@@ -32,3 +32,17 @@ func Unique(slice []string) []string {
|
||||
|
||||
return list
|
||||
}
|
||||
|
||||
func EqualStringSlices(slice1 []string, slice2 []string) bool {
|
||||
if len(slice1) != len(slice2) {
|
||||
return false
|
||||
}
|
||||
|
||||
for _, v := range slice1 {
|
||||
if !Contains(slice2, v) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
@@ -19,7 +19,7 @@ import (
|
||||
|
||||
const mizuTestEnvVar = "MIZU_TEST"
|
||||
|
||||
var UnknownIp net.IP = net.IP{0, 0, 0, 0}
|
||||
var UnknownIp net.IP = net.IP{0, 0, 0, 0}
|
||||
var UnknownPort uint16 = 0
|
||||
|
||||
type Protocol struct {
|
||||
@@ -83,6 +83,7 @@ type CounterPair struct {
|
||||
type GenericMessage struct {
|
||||
IsRequest bool `json:"isRequest"`
|
||||
CaptureTime time.Time `json:"captureTime"`
|
||||
CaptureSize int `json:"captureSize"`
|
||||
Payload interface{} `json:"payload"`
|
||||
}
|
||||
|
||||
@@ -110,13 +111,27 @@ type SuperIdentifier struct {
|
||||
IsClosedOthers bool
|
||||
}
|
||||
|
||||
type ReadProgress struct {
|
||||
readBytes int
|
||||
lastCurrent int
|
||||
}
|
||||
|
||||
func (p *ReadProgress) Feed(n int) {
|
||||
p.readBytes += n
|
||||
}
|
||||
|
||||
func (p *ReadProgress) Current() (n int) {
|
||||
p.lastCurrent = p.readBytes - p.lastCurrent
|
||||
return p.lastCurrent
|
||||
}
|
||||
|
||||
type Dissector interface {
|
||||
Register(*Extension)
|
||||
Ping()
|
||||
Dissect(b *bufio.Reader, capture Capture, isClient bool, tcpID *TcpID, counterPair *CounterPair, superTimer *SuperTimer, superIdentifier *SuperIdentifier, emitter Emitter, options *TrafficFilteringOptions, reqResMatcher RequestResponseMatcher) error
|
||||
Dissect(b *bufio.Reader, progress *ReadProgress, capture Capture, isClient bool, tcpID *TcpID, counterPair *CounterPair, superTimer *SuperTimer, superIdentifier *SuperIdentifier, emitter Emitter, options *TrafficFilteringOptions, reqResMatcher RequestResponseMatcher) error
|
||||
Analyze(item *OutputChannelItem, resolvedSource string, resolvedDestination string, namespace string) *Entry
|
||||
Summarize(entry *Entry) *BaseEntry
|
||||
Represent(request map[string]interface{}, response map[string]interface{}) (object []byte, bodySize int64, err error)
|
||||
Represent(request map[string]interface{}, response map[string]interface{}) (object []byte, err error)
|
||||
Macros() map[string]string
|
||||
NewResponseRequestMatcher() RequestResponseMatcher
|
||||
}
|
||||
@@ -152,6 +167,8 @@ type Entry struct {
|
||||
StartTime time.Time `json:"startTime"`
|
||||
Request map[string]interface{} `json:"request"`
|
||||
Response map[string]interface{} `json:"response"`
|
||||
RequestSize int `json:"requestSize"`
|
||||
ResponseSize int `json:"responseSize"`
|
||||
ElapsedTime int64 `json:"elapsedTime"`
|
||||
Rules ApplicableRules `json:"rules,omitempty"`
|
||||
ContractStatus ContractStatus `json:"contractStatus,omitempty"`
|
||||
@@ -164,7 +181,6 @@ type Entry struct {
|
||||
type EntryWrapper struct {
|
||||
Protocol Protocol `json:"protocol"`
|
||||
Representation string `json:"representation"`
|
||||
BodySize int64 `json:"bodySize"`
|
||||
Data *Entry `json:"data"`
|
||||
Base *BaseEntry `json:"base"`
|
||||
Rules []map[string]interface{} `json:"rulesMatched,omitempty"`
|
||||
|
||||
@@ -13,4 +13,4 @@ test-pull-bin:
|
||||
|
||||
test-pull-expect:
|
||||
@mkdir -p expect
|
||||
@[ "${skipexpect}" ] && echo "Skipping downloading expected JSONs" || gsutil -o 'GSUtil:parallel_process_count=5' -o 'GSUtil:parallel_thread_count=5' -m cp -r gs://static.up9.io/mizu/test-pcap/expect4/amqp/\* expect
|
||||
@[ "${skipexpect}" ] && echo "Skipping downloading expected JSONs" || gsutil -o 'GSUtil:parallel_process_count=5' -o 'GSUtil:parallel_thread_count=5' -m cp -r gs://static.up9.io/mizu/test-pcap/expect5/amqp/\* expect
|
||||
|
||||
@@ -94,7 +94,7 @@ type AMQPWrapper struct {
|
||||
Details interface{} `json:"details"`
|
||||
}
|
||||
|
||||
func emitAMQP(event interface{}, _type string, method string, connectionInfo *api.ConnectionInfo, captureTime time.Time, emitter api.Emitter, capture api.Capture) {
|
||||
func emitAMQP(event interface{}, _type string, method string, connectionInfo *api.ConnectionInfo, captureTime time.Time, captureSize int, emitter api.Emitter, capture api.Capture) {
|
||||
request := &api.GenericMessage{
|
||||
IsRequest: true,
|
||||
CaptureTime: captureTime,
|
||||
|
||||
@@ -39,7 +39,7 @@ func (d dissecting) Ping() {
|
||||
|
||||
const amqpRequest string = "amqp_request"
|
||||
|
||||
func (d dissecting) Dissect(b *bufio.Reader, capture api.Capture, isClient bool, tcpID *api.TcpID, counterPair *api.CounterPair, superTimer *api.SuperTimer, superIdentifier *api.SuperIdentifier, emitter api.Emitter, options *api.TrafficFilteringOptions, _reqResMatcher api.RequestResponseMatcher) error {
|
||||
func (d dissecting) Dissect(b *bufio.Reader, progress *api.ReadProgress, capture api.Capture, isClient bool, tcpID *api.TcpID, counterPair *api.CounterPair, superTimer *api.SuperTimer, superIdentifier *api.SuperIdentifier, emitter api.Emitter, options *api.TrafficFilteringOptions, _reqResMatcher api.RequestResponseMatcher) error {
|
||||
r := AmqpReader{b}
|
||||
|
||||
var remaining int
|
||||
@@ -113,11 +113,11 @@ func (d dissecting) Dissect(b *bufio.Reader, capture api.Capture, isClient bool,
|
||||
case *BasicPublish:
|
||||
eventBasicPublish.Body = f.Body
|
||||
superIdentifier.Protocol = &protocol
|
||||
emitAMQP(*eventBasicPublish, amqpRequest, basicMethodMap[40], connectionInfo, superTimer.CaptureTime, emitter, capture)
|
||||
emitAMQP(*eventBasicPublish, amqpRequest, basicMethodMap[40], connectionInfo, superTimer.CaptureTime, progress.Current(), emitter, capture)
|
||||
case *BasicDeliver:
|
||||
eventBasicDeliver.Body = f.Body
|
||||
superIdentifier.Protocol = &protocol
|
||||
emitAMQP(*eventBasicDeliver, amqpRequest, basicMethodMap[60], connectionInfo, superTimer.CaptureTime, emitter, capture)
|
||||
emitAMQP(*eventBasicDeliver, amqpRequest, basicMethodMap[60], connectionInfo, superTimer.CaptureTime, progress.Current(), emitter, capture)
|
||||
}
|
||||
|
||||
case *MethodFrame:
|
||||
@@ -138,7 +138,7 @@ func (d dissecting) Dissect(b *bufio.Reader, capture api.Capture, isClient bool,
|
||||
Arguments: m.Arguments,
|
||||
}
|
||||
superIdentifier.Protocol = &protocol
|
||||
emitAMQP(*eventQueueBind, amqpRequest, queueMethodMap[20], connectionInfo, superTimer.CaptureTime, emitter, capture)
|
||||
emitAMQP(*eventQueueBind, amqpRequest, queueMethodMap[20], connectionInfo, superTimer.CaptureTime, progress.Current(), emitter, capture)
|
||||
|
||||
case *BasicConsume:
|
||||
eventBasicConsume := &BasicConsume{
|
||||
@@ -151,7 +151,7 @@ func (d dissecting) Dissect(b *bufio.Reader, capture api.Capture, isClient bool,
|
||||
Arguments: m.Arguments,
|
||||
}
|
||||
superIdentifier.Protocol = &protocol
|
||||
emitAMQP(*eventBasicConsume, amqpRequest, basicMethodMap[20], connectionInfo, superTimer.CaptureTime, emitter, capture)
|
||||
emitAMQP(*eventBasicConsume, amqpRequest, basicMethodMap[20], connectionInfo, superTimer.CaptureTime, progress.Current(), emitter, capture)
|
||||
|
||||
case *BasicDeliver:
|
||||
eventBasicDeliver.ConsumerTag = m.ConsumerTag
|
||||
@@ -171,7 +171,7 @@ func (d dissecting) Dissect(b *bufio.Reader, capture api.Capture, isClient bool,
|
||||
Arguments: m.Arguments,
|
||||
}
|
||||
superIdentifier.Protocol = &protocol
|
||||
emitAMQP(*eventQueueDeclare, amqpRequest, queueMethodMap[10], connectionInfo, superTimer.CaptureTime, emitter, capture)
|
||||
emitAMQP(*eventQueueDeclare, amqpRequest, queueMethodMap[10], connectionInfo, superTimer.CaptureTime, progress.Current(), emitter, capture)
|
||||
|
||||
case *ExchangeDeclare:
|
||||
eventExchangeDeclare := &ExchangeDeclare{
|
||||
@@ -185,7 +185,7 @@ func (d dissecting) Dissect(b *bufio.Reader, capture api.Capture, isClient bool,
|
||||
Arguments: m.Arguments,
|
||||
}
|
||||
superIdentifier.Protocol = &protocol
|
||||
emitAMQP(*eventExchangeDeclare, amqpRequest, exchangeMethodMap[10], connectionInfo, superTimer.CaptureTime, emitter, capture)
|
||||
emitAMQP(*eventExchangeDeclare, amqpRequest, exchangeMethodMap[10], connectionInfo, superTimer.CaptureTime, progress.Current(), emitter, capture)
|
||||
|
||||
case *ConnectionStart:
|
||||
eventConnectionStart := &ConnectionStart{
|
||||
@@ -196,7 +196,7 @@ func (d dissecting) Dissect(b *bufio.Reader, capture api.Capture, isClient bool,
|
||||
Locales: m.Locales,
|
||||
}
|
||||
superIdentifier.Protocol = &protocol
|
||||
emitAMQP(*eventConnectionStart, amqpRequest, connectionMethodMap[10], connectionInfo, superTimer.CaptureTime, emitter, capture)
|
||||
emitAMQP(*eventConnectionStart, amqpRequest, connectionMethodMap[10], connectionInfo, superTimer.CaptureTime, progress.Current(), emitter, capture)
|
||||
|
||||
case *ConnectionClose:
|
||||
eventConnectionClose := &ConnectionClose{
|
||||
@@ -206,7 +206,7 @@ func (d dissecting) Dissect(b *bufio.Reader, capture api.Capture, isClient bool,
|
||||
MethodId: m.MethodId,
|
||||
}
|
||||
superIdentifier.Protocol = &protocol
|
||||
emitAMQP(*eventConnectionClose, amqpRequest, connectionMethodMap[50], connectionInfo, superTimer.CaptureTime, emitter, capture)
|
||||
emitAMQP(*eventConnectionClose, amqpRequest, connectionMethodMap[50], connectionInfo, superTimer.CaptureTime, progress.Current(), emitter, capture)
|
||||
}
|
||||
|
||||
default:
|
||||
@@ -236,6 +236,7 @@ func (d dissecting) Analyze(item *api.OutputChannelItem, resolvedSource string,
|
||||
Namespace: namespace,
|
||||
Outgoing: item.ConnectionInfo.IsOutgoing,
|
||||
Request: reqDetails,
|
||||
RequestSize: item.Pair.Request.CaptureSize,
|
||||
Timestamp: item.Timestamp,
|
||||
StartTime: item.Pair.Request.CaptureTime,
|
||||
ElapsedTime: 0,
|
||||
@@ -301,8 +302,7 @@ func (d dissecting) Summarize(entry *api.Entry) *api.BaseEntry {
|
||||
}
|
||||
}
|
||||
|
||||
func (d dissecting) Represent(request map[string]interface{}, response map[string]interface{}) (object []byte, bodySize int64, err error) {
|
||||
bodySize = 0
|
||||
func (d dissecting) Represent(request map[string]interface{}, response map[string]interface{}) (object []byte, err error) {
|
||||
representation := make(map[string]interface{})
|
||||
var repRequest []interface{}
|
||||
switch request["method"].(string) {
|
||||
|
||||
@@ -122,7 +122,7 @@ func TestDissect(t *testing.T) {
|
||||
DstPort: "2",
|
||||
}
|
||||
reqResMatcher := dissector.NewResponseRequestMatcher()
|
||||
err = dissector.Dissect(bufferClient, api.Pcap, true, tcpIDClient, counterPair, &api.SuperTimer{}, superIdentifier, emitter, options, reqResMatcher)
|
||||
err = dissector.Dissect(bufferClient, &api.ReadProgress{}, api.Pcap, true, tcpIDClient, counterPair, &api.SuperTimer{}, superIdentifier, emitter, options, reqResMatcher)
|
||||
if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF {
|
||||
panic(err)
|
||||
}
|
||||
@@ -140,7 +140,7 @@ func TestDissect(t *testing.T) {
|
||||
SrcPort: "2",
|
||||
DstPort: "1",
|
||||
}
|
||||
err = dissector.Dissect(bufferServer, api.Pcap, false, tcpIDServer, counterPair, &api.SuperTimer{}, superIdentifier, emitter, options, reqResMatcher)
|
||||
err = dissector.Dissect(bufferServer, &api.ReadProgress{}, api.Pcap, false, tcpIDServer, counterPair, &api.SuperTimer{}, superIdentifier, emitter, options, reqResMatcher)
|
||||
if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF {
|
||||
panic(err)
|
||||
}
|
||||
@@ -319,7 +319,7 @@ func TestRepresent(t *testing.T) {
|
||||
|
||||
var objects []string
|
||||
for _, entry := range entries {
|
||||
object, _, err := dissector.Represent(entry.Request, entry.Response)
|
||||
object, err := dissector.Represent(entry.Request, entry.Response)
|
||||
assert.Nil(t, err)
|
||||
objects = append(objects, string(object))
|
||||
}
|
||||
|
||||
@@ -13,4 +13,4 @@ test-pull-bin:
|
||||
|
||||
test-pull-expect:
|
||||
@mkdir -p expect
|
||||
@[ "${skipexpect}" ] && echo "Skipping downloading expected JSONs" || gsutil -o 'GSUtil:parallel_process_count=5' -o 'GSUtil:parallel_thread_count=5' -m cp -r gs://static.up9.io/mizu/test-pcap/expect4/http/\* expect
|
||||
@[ "${skipexpect}" ] && echo "Skipping downloading expected JSONs" || gsutil -o 'GSUtil:parallel_process_count=5' -o 'GSUtil:parallel_thread_count=5' -m cp -r gs://static.up9.io/mizu/test-pcap/expect5/http/\* expect
|
||||
|
||||
@@ -47,7 +47,7 @@ func replaceForwardedFor(item *api.OutputChannelItem) {
|
||||
item.ConnectionInfo.ClientPort = ""
|
||||
}
|
||||
|
||||
func handleHTTP2Stream(http2Assembler *Http2Assembler, capture api.Capture, tcpID *api.TcpID, superTimer *api.SuperTimer, emitter api.Emitter, options *api.TrafficFilteringOptions, reqResMatcher *requestResponseMatcher) error {
|
||||
func handleHTTP2Stream(http2Assembler *Http2Assembler, progress *api.ReadProgress, capture api.Capture, tcpID *api.TcpID, superTimer *api.SuperTimer, emitter api.Emitter, options *api.TrafficFilteringOptions, reqResMatcher *requestResponseMatcher) error {
|
||||
streamID, messageHTTP1, isGrpc, err := http2Assembler.readMessage()
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -66,7 +66,7 @@ func handleHTTP2Stream(http2Assembler *Http2Assembler, capture api.Capture, tcpI
|
||||
streamID,
|
||||
"HTTP2",
|
||||
)
|
||||
item = reqResMatcher.registerRequest(ident, &messageHTTP1, superTimer.CaptureTime, messageHTTP1.ProtoMinor)
|
||||
item = reqResMatcher.registerRequest(ident, &messageHTTP1, superTimer.CaptureTime, progress.Current(), messageHTTP1.ProtoMinor)
|
||||
if item != nil {
|
||||
item.ConnectionInfo = &api.ConnectionInfo{
|
||||
ClientIP: tcpID.SrcIP,
|
||||
@@ -86,7 +86,7 @@ func handleHTTP2Stream(http2Assembler *Http2Assembler, capture api.Capture, tcpI
|
||||
streamID,
|
||||
"HTTP2",
|
||||
)
|
||||
item = reqResMatcher.registerResponse(ident, &messageHTTP1, superTimer.CaptureTime, messageHTTP1.ProtoMinor)
|
||||
item = reqResMatcher.registerResponse(ident, &messageHTTP1, superTimer.CaptureTime, progress.Current(), messageHTTP1.ProtoMinor)
|
||||
if item != nil {
|
||||
item.ConnectionInfo = &api.ConnectionInfo{
|
||||
ClientIP: tcpID.DstIP,
|
||||
@@ -111,7 +111,7 @@ func handleHTTP2Stream(http2Assembler *Http2Assembler, capture api.Capture, tcpI
|
||||
return nil
|
||||
}
|
||||
|
||||
func handleHTTP1ClientStream(b *bufio.Reader, capture api.Capture, tcpID *api.TcpID, counterPair *api.CounterPair, superTimer *api.SuperTimer, emitter api.Emitter, options *api.TrafficFilteringOptions, reqResMatcher *requestResponseMatcher) (switchingProtocolsHTTP2 bool, req *http.Request, err error) {
|
||||
func handleHTTP1ClientStream(b *bufio.Reader, progress *api.ReadProgress, capture api.Capture, tcpID *api.TcpID, counterPair *api.CounterPair, superTimer *api.SuperTimer, emitter api.Emitter, options *api.TrafficFilteringOptions, reqResMatcher *requestResponseMatcher) (switchingProtocolsHTTP2 bool, req *http.Request, err error) {
|
||||
req, err = http.ReadRequest(b)
|
||||
if err != nil {
|
||||
return
|
||||
@@ -139,7 +139,7 @@ func handleHTTP1ClientStream(b *bufio.Reader, capture api.Capture, tcpID *api.Tc
|
||||
requestCounter,
|
||||
"HTTP1",
|
||||
)
|
||||
item := reqResMatcher.registerRequest(ident, req, superTimer.CaptureTime, req.ProtoMinor)
|
||||
item := reqResMatcher.registerRequest(ident, req, superTimer.CaptureTime, progress.Current(), req.ProtoMinor)
|
||||
if item != nil {
|
||||
item.ConnectionInfo = &api.ConnectionInfo{
|
||||
ClientIP: tcpID.SrcIP,
|
||||
@@ -154,7 +154,7 @@ func handleHTTP1ClientStream(b *bufio.Reader, capture api.Capture, tcpID *api.Tc
|
||||
return
|
||||
}
|
||||
|
||||
func handleHTTP1ServerStream(b *bufio.Reader, capture api.Capture, tcpID *api.TcpID, counterPair *api.CounterPair, superTimer *api.SuperTimer, emitter api.Emitter, options *api.TrafficFilteringOptions, reqResMatcher *requestResponseMatcher) (switchingProtocolsHTTP2 bool, err error) {
|
||||
func handleHTTP1ServerStream(b *bufio.Reader, progress *api.ReadProgress, capture api.Capture, tcpID *api.TcpID, counterPair *api.CounterPair, superTimer *api.SuperTimer, emitter api.Emitter, options *api.TrafficFilteringOptions, reqResMatcher *requestResponseMatcher) (switchingProtocolsHTTP2 bool, err error) {
|
||||
var res *http.Response
|
||||
res, err = http.ReadResponse(b, nil)
|
||||
if err != nil {
|
||||
@@ -183,7 +183,7 @@ func handleHTTP1ServerStream(b *bufio.Reader, capture api.Capture, tcpID *api.Tc
|
||||
responseCounter,
|
||||
"HTTP1",
|
||||
)
|
||||
item := reqResMatcher.registerResponse(ident, res, superTimer.CaptureTime, res.ProtoMinor)
|
||||
item := reqResMatcher.registerResponse(ident, res, superTimer.CaptureTime, progress.Current(), res.ProtoMinor)
|
||||
if item != nil {
|
||||
item.ConnectionInfo = &api.ConnectionInfo{
|
||||
ClientIP: tcpID.DstIP,
|
||||
|
||||
@@ -86,7 +86,7 @@ func (d dissecting) Ping() {
|
||||
log.Printf("pong %s", http11protocol.Name)
|
||||
}
|
||||
|
||||
func (d dissecting) Dissect(b *bufio.Reader, capture api.Capture, isClient bool, tcpID *api.TcpID, counterPair *api.CounterPair, superTimer *api.SuperTimer, superIdentifier *api.SuperIdentifier, emitter api.Emitter, options *api.TrafficFilteringOptions, _reqResMatcher api.RequestResponseMatcher) error {
|
||||
func (d dissecting) Dissect(b *bufio.Reader, progress *api.ReadProgress, capture api.Capture, isClient bool, tcpID *api.TcpID, counterPair *api.CounterPair, superTimer *api.SuperTimer, superIdentifier *api.SuperIdentifier, emitter api.Emitter, options *api.TrafficFilteringOptions, _reqResMatcher api.RequestResponseMatcher) error {
|
||||
reqResMatcher := _reqResMatcher.(*requestResponseMatcher)
|
||||
|
||||
var err error
|
||||
@@ -121,7 +121,7 @@ func (d dissecting) Dissect(b *bufio.Reader, capture api.Capture, isClient bool,
|
||||
}
|
||||
|
||||
if isHTTP2 {
|
||||
err = handleHTTP2Stream(http2Assembler, capture, tcpID, superTimer, emitter, options, reqResMatcher)
|
||||
err = handleHTTP2Stream(http2Assembler, progress, capture, tcpID, superTimer, emitter, options, reqResMatcher)
|
||||
if err == io.EOF || err == io.ErrUnexpectedEOF {
|
||||
break
|
||||
} else if err != nil {
|
||||
@@ -130,7 +130,7 @@ func (d dissecting) Dissect(b *bufio.Reader, capture api.Capture, isClient bool,
|
||||
superIdentifier.Protocol = &http11protocol
|
||||
} else if isClient {
|
||||
var req *http.Request
|
||||
switchingProtocolsHTTP2, req, err = handleHTTP1ClientStream(b, capture, tcpID, counterPair, superTimer, emitter, options, reqResMatcher)
|
||||
switchingProtocolsHTTP2, req, err = handleHTTP1ClientStream(b, progress, capture, tcpID, counterPair, superTimer, emitter, options, reqResMatcher)
|
||||
if err == io.EOF || err == io.ErrUnexpectedEOF {
|
||||
break
|
||||
} else if err != nil {
|
||||
@@ -148,7 +148,7 @@ func (d dissecting) Dissect(b *bufio.Reader, capture api.Capture, isClient bool,
|
||||
tcpID.DstPort,
|
||||
"HTTP2",
|
||||
)
|
||||
item := reqResMatcher.registerRequest(ident, req, superTimer.CaptureTime, req.ProtoMinor)
|
||||
item := reqResMatcher.registerRequest(ident, req, superTimer.CaptureTime, progress.Current(), req.ProtoMinor)
|
||||
if item != nil {
|
||||
item.ConnectionInfo = &api.ConnectionInfo{
|
||||
ClientIP: tcpID.SrcIP,
|
||||
@@ -162,7 +162,7 @@ func (d dissecting) Dissect(b *bufio.Reader, capture api.Capture, isClient bool,
|
||||
}
|
||||
}
|
||||
} else {
|
||||
switchingProtocolsHTTP2, err = handleHTTP1ServerStream(b, capture, tcpID, counterPair, superTimer, emitter, options, reqResMatcher)
|
||||
switchingProtocolsHTTP2, err = handleHTTP1ServerStream(b, progress, capture, tcpID, counterPair, superTimer, emitter, options, reqResMatcher)
|
||||
if err == io.EOF || err == io.ErrUnexpectedEOF {
|
||||
break
|
||||
} else if err != nil {
|
||||
@@ -271,14 +271,16 @@ func (d dissecting) Analyze(item *api.OutputChannelItem, resolvedSource string,
|
||||
IP: item.ConnectionInfo.ServerIP,
|
||||
Port: item.ConnectionInfo.ServerPort,
|
||||
},
|
||||
Namespace: namespace,
|
||||
Outgoing: item.ConnectionInfo.IsOutgoing,
|
||||
Request: reqDetails,
|
||||
Response: resDetails,
|
||||
Timestamp: item.Timestamp,
|
||||
StartTime: item.Pair.Request.CaptureTime,
|
||||
ElapsedTime: elapsedTime,
|
||||
HTTPPair: string(httpPair),
|
||||
Namespace: namespace,
|
||||
Outgoing: item.ConnectionInfo.IsOutgoing,
|
||||
Request: reqDetails,
|
||||
Response: resDetails,
|
||||
RequestSize: item.Pair.Request.CaptureSize,
|
||||
ResponseSize: item.Pair.Response.CaptureSize,
|
||||
Timestamp: item.Timestamp,
|
||||
StartTime: item.Pair.Request.CaptureTime,
|
||||
ElapsedTime: elapsedTime,
|
||||
HTTPPair: string(httpPair),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -410,11 +412,9 @@ func representRequest(request map[string]interface{}) (repRequest []interface{})
|
||||
return
|
||||
}
|
||||
|
||||
func representResponse(response map[string]interface{}) (repResponse []interface{}, bodySize int64) {
|
||||
func representResponse(response map[string]interface{}) (repResponse []interface{}) {
|
||||
repResponse = make([]interface{}, 0)
|
||||
|
||||
bodySize = int64(response["bodySize"].(float64))
|
||||
|
||||
details, _ := json.Marshal([]api.TableData{
|
||||
{
|
||||
Name: "Status",
|
||||
@@ -428,7 +428,7 @@ func representResponse(response map[string]interface{}) (repResponse []interface
|
||||
},
|
||||
{
|
||||
Name: "Body Size (bytes)",
|
||||
Value: bodySize,
|
||||
Value: int64(response["bodySize"].(float64)),
|
||||
Selector: `response.bodySize`,
|
||||
},
|
||||
})
|
||||
@@ -471,10 +471,10 @@ func representResponse(response map[string]interface{}) (repResponse []interface
|
||||
return
|
||||
}
|
||||
|
||||
func (d dissecting) Represent(request map[string]interface{}, response map[string]interface{}) (object []byte, bodySize int64, err error) {
|
||||
func (d dissecting) Represent(request map[string]interface{}, response map[string]interface{}) (object []byte, err error) {
|
||||
representation := make(map[string]interface{})
|
||||
repRequest := representRequest(request)
|
||||
repResponse, bodySize := representResponse(response)
|
||||
repResponse := representResponse(response)
|
||||
representation["request"] = repRequest
|
||||
representation["response"] = repResponse
|
||||
object, err = json.Marshal(representation)
|
||||
|
||||
@@ -124,7 +124,7 @@ func TestDissect(t *testing.T) {
|
||||
DstPort: "2",
|
||||
}
|
||||
reqResMatcher := dissector.NewResponseRequestMatcher()
|
||||
err = dissector.Dissect(bufferClient, api.Pcap, true, tcpIDClient, counterPair, &api.SuperTimer{}, superIdentifier, emitter, options, reqResMatcher)
|
||||
err = dissector.Dissect(bufferClient, &api.ReadProgress{}, api.Pcap, true, tcpIDClient, counterPair, &api.SuperTimer{}, superIdentifier, emitter, options, reqResMatcher)
|
||||
if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF {
|
||||
panic(err)
|
||||
}
|
||||
@@ -142,7 +142,7 @@ func TestDissect(t *testing.T) {
|
||||
SrcPort: "2",
|
||||
DstPort: "1",
|
||||
}
|
||||
err = dissector.Dissect(bufferServer, api.Pcap, false, tcpIDServer, counterPair, &api.SuperTimer{}, superIdentifier, emitter, options, reqResMatcher)
|
||||
err = dissector.Dissect(bufferServer, &api.ReadProgress{}, api.Pcap, false, tcpIDServer, counterPair, &api.SuperTimer{}, superIdentifier, emitter, options, reqResMatcher)
|
||||
if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF {
|
||||
panic(err)
|
||||
}
|
||||
@@ -321,7 +321,7 @@ func TestRepresent(t *testing.T) {
|
||||
|
||||
var objects []string
|
||||
for _, entry := range entries {
|
||||
object, _, err := dissector.Represent(entry.Request, entry.Response)
|
||||
object, err := dissector.Represent(entry.Request, entry.Response)
|
||||
assert.Nil(t, err)
|
||||
objects = append(objects, string(object))
|
||||
}
|
||||
|
||||
@@ -24,10 +24,11 @@ func (matcher *requestResponseMatcher) GetMap() *sync.Map {
|
||||
func (matcher *requestResponseMatcher) SetMaxTry(value int) {
|
||||
}
|
||||
|
||||
func (matcher *requestResponseMatcher) registerRequest(ident string, request *http.Request, captureTime time.Time, protoMinor int) *api.OutputChannelItem {
|
||||
func (matcher *requestResponseMatcher) registerRequest(ident string, request *http.Request, captureTime time.Time, captureSize int, protoMinor int) *api.OutputChannelItem {
|
||||
requestHTTPMessage := api.GenericMessage{
|
||||
IsRequest: true,
|
||||
CaptureTime: captureTime,
|
||||
CaptureSize: captureSize,
|
||||
Payload: api.HTTPPayload{
|
||||
Type: TypeHttpRequest,
|
||||
Data: request,
|
||||
@@ -47,10 +48,11 @@ func (matcher *requestResponseMatcher) registerRequest(ident string, request *ht
|
||||
return nil
|
||||
}
|
||||
|
||||
func (matcher *requestResponseMatcher) registerResponse(ident string, response *http.Response, captureTime time.Time, protoMinor int) *api.OutputChannelItem {
|
||||
func (matcher *requestResponseMatcher) registerResponse(ident string, response *http.Response, captureTime time.Time, captureSize int, protoMinor int) *api.OutputChannelItem {
|
||||
responseHTTPMessage := api.GenericMessage{
|
||||
IsRequest: false,
|
||||
CaptureTime: captureTime,
|
||||
CaptureSize: captureSize,
|
||||
Payload: api.HTTPPayload{
|
||||
Type: TypeHttpResponse,
|
||||
Data: response,
|
||||
|
||||
@@ -13,4 +13,4 @@ test-pull-bin:
|
||||
|
||||
test-pull-expect:
|
||||
@mkdir -p expect
|
||||
@[ "${skipexpect}" ] && echo "Skipping downloading expected JSONs" || gsutil -o 'GSUtil:parallel_process_count=5' -o 'GSUtil:parallel_thread_count=5' -m cp -r gs://static.up9.io/mizu/test-pcap/expect4/kafka/\* expect
|
||||
@[ "${skipexpect}" ] && echo "Skipping downloading expected JSONs" || gsutil -o 'GSUtil:parallel_process_count=5' -o 'GSUtil:parallel_thread_count=5' -m cp -r gs://static.up9.io/mizu/test-pcap/expect5/kafka/\* expect
|
||||
|
||||
@@ -35,7 +35,7 @@ func (d dissecting) Ping() {
|
||||
log.Printf("pong %s", _protocol.Name)
|
||||
}
|
||||
|
||||
func (d dissecting) Dissect(b *bufio.Reader, capture api.Capture, isClient bool, tcpID *api.TcpID, counterPair *api.CounterPair, superTimer *api.SuperTimer, superIdentifier *api.SuperIdentifier, emitter api.Emitter, options *api.TrafficFilteringOptions, _reqResMatcher api.RequestResponseMatcher) error {
|
||||
func (d dissecting) Dissect(b *bufio.Reader, progress *api.ReadProgress, capture api.Capture, isClient bool, tcpID *api.TcpID, counterPair *api.CounterPair, superTimer *api.SuperTimer, superIdentifier *api.SuperIdentifier, emitter api.Emitter, options *api.TrafficFilteringOptions, _reqResMatcher api.RequestResponseMatcher) error {
|
||||
reqResMatcher := _reqResMatcher.(*requestResponseMatcher)
|
||||
for {
|
||||
if superIdentifier.Protocol != nil && superIdentifier.Protocol != &_protocol {
|
||||
@@ -79,13 +79,15 @@ func (d dissecting) Analyze(item *api.OutputChannelItem, resolvedSource string,
|
||||
IP: item.ConnectionInfo.ServerIP,
|
||||
Port: item.ConnectionInfo.ServerPort,
|
||||
},
|
||||
Namespace: namespace,
|
||||
Outgoing: item.ConnectionInfo.IsOutgoing,
|
||||
Request: reqDetails,
|
||||
Response: item.Pair.Response.Payload.(map[string]interface{})["details"].(map[string]interface{}),
|
||||
Timestamp: item.Timestamp,
|
||||
StartTime: item.Pair.Request.CaptureTime,
|
||||
ElapsedTime: elapsedTime,
|
||||
Namespace: namespace,
|
||||
Outgoing: item.ConnectionInfo.IsOutgoing,
|
||||
Request: reqDetails,
|
||||
Response: item.Pair.Response.Payload.(map[string]interface{})["details"].(map[string]interface{}),
|
||||
RequestSize: item.Pair.Request.CaptureSize,
|
||||
ResponseSize: item.Pair.Response.CaptureSize,
|
||||
Timestamp: item.Timestamp,
|
||||
StartTime: item.Pair.Request.CaptureTime,
|
||||
ElapsedTime: elapsedTime,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -208,8 +210,7 @@ func (d dissecting) Summarize(entry *api.Entry) *api.BaseEntry {
|
||||
}
|
||||
}
|
||||
|
||||
func (d dissecting) Represent(request map[string]interface{}, response map[string]interface{}) (object []byte, bodySize int64, err error) {
|
||||
bodySize = 0
|
||||
func (d dissecting) Represent(request map[string]interface{}, response map[string]interface{}) (object []byte, err error) {
|
||||
representation := make(map[string]interface{})
|
||||
|
||||
apiKey := ApiKey(request["apiKey"].(float64))
|
||||
|
||||
@@ -123,7 +123,7 @@ func TestDissect(t *testing.T) {
|
||||
}
|
||||
reqResMatcher := dissector.NewResponseRequestMatcher()
|
||||
reqResMatcher.SetMaxTry(10)
|
||||
err = dissector.Dissect(bufferClient, api.Pcap, true, tcpIDClient, counterPair, &api.SuperTimer{}, superIdentifier, emitter, options, reqResMatcher)
|
||||
err = dissector.Dissect(bufferClient, &api.ReadProgress{}, api.Pcap, true, tcpIDClient, counterPair, &api.SuperTimer{}, superIdentifier, emitter, options, reqResMatcher)
|
||||
if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF {
|
||||
log.Println(err)
|
||||
}
|
||||
@@ -141,7 +141,7 @@ func TestDissect(t *testing.T) {
|
||||
SrcPort: "2",
|
||||
DstPort: "1",
|
||||
}
|
||||
err = dissector.Dissect(bufferServer, api.Pcap, false, tcpIDServer, counterPair, &api.SuperTimer{}, superIdentifier, emitter, options, reqResMatcher)
|
||||
err = dissector.Dissect(bufferServer, &api.ReadProgress{}, api.Pcap, false, tcpIDServer, counterPair, &api.SuperTimer{}, superIdentifier, emitter, options, reqResMatcher)
|
||||
if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF {
|
||||
log.Println(err)
|
||||
}
|
||||
@@ -320,7 +320,7 @@ func TestRepresent(t *testing.T) {
|
||||
|
||||
var objects []string
|
||||
for _, entry := range entries {
|
||||
object, _, err := dissector.Represent(entry.Request, entry.Response)
|
||||
object, err := dissector.Represent(entry.Request, entry.Response)
|
||||
assert.Nil(t, err)
|
||||
objects = append(objects, string(object))
|
||||
}
|
||||
|
||||
@@ -265,6 +265,7 @@ func ReadResponse(r io.Reader, capture api.Capture, tcpID *api.TcpID, counterPai
|
||||
Request: api.GenericMessage{
|
||||
IsRequest: true,
|
||||
CaptureTime: reqResPair.Request.CaptureTime,
|
||||
CaptureSize: int(reqResPair.Request.Size),
|
||||
Payload: KafkaPayload{
|
||||
Data: &KafkaWrapper{
|
||||
Method: apiNames[apiKey],
|
||||
@@ -276,6 +277,7 @@ func ReadResponse(r io.Reader, capture api.Capture, tcpID *api.TcpID, counterPai
|
||||
Response: api.GenericMessage{
|
||||
IsRequest: false,
|
||||
CaptureTime: reqResPair.Response.CaptureTime,
|
||||
CaptureSize: int(reqResPair.Response.Size),
|
||||
Payload: KafkaPayload{
|
||||
Data: &KafkaWrapper{
|
||||
Method: apiNames[apiKey],
|
||||
|
||||
@@ -13,4 +13,4 @@ test-pull-bin:
|
||||
|
||||
test-pull-expect:
|
||||
@mkdir -p expect
|
||||
@[ "${skipexpect}" ] && echo "Skipping downloading expected JSONs" || gsutil -o 'GSUtil:parallel_process_count=5' -o 'GSUtil:parallel_thread_count=5' -m cp -r gs://static.up9.io/mizu/test-pcap/expect4/redis/\* expect
|
||||
@[ "${skipexpect}" ] && echo "Skipping downloading expected JSONs" || gsutil -o 'GSUtil:parallel_process_count=5' -o 'GSUtil:parallel_thread_count=5' -m cp -r gs://static.up9.io/mizu/test-pcap/expect5/redis/\* expect
|
||||
|
||||
@@ -6,7 +6,7 @@ import (
|
||||
"github.com/up9inc/mizu/tap/api"
|
||||
)
|
||||
|
||||
func handleClientStream(capture api.Capture, tcpID *api.TcpID, counterPair *api.CounterPair, superTimer *api.SuperTimer, emitter api.Emitter, request *RedisPacket, reqResMatcher *requestResponseMatcher) error {
|
||||
func handleClientStream(progress *api.ReadProgress, capture api.Capture, tcpID *api.TcpID, counterPair *api.CounterPair, superTimer *api.SuperTimer, emitter api.Emitter, request *RedisPacket, reqResMatcher *requestResponseMatcher) error {
|
||||
counterPair.Lock()
|
||||
counterPair.Request++
|
||||
requestCounter := counterPair.Request
|
||||
@@ -21,7 +21,7 @@ func handleClientStream(capture api.Capture, tcpID *api.TcpID, counterPair *api.
|
||||
requestCounter,
|
||||
)
|
||||
|
||||
item := reqResMatcher.registerRequest(ident, request, superTimer.CaptureTime)
|
||||
item := reqResMatcher.registerRequest(ident, request, superTimer.CaptureTime, progress.Current())
|
||||
if item != nil {
|
||||
item.Capture = capture
|
||||
item.ConnectionInfo = &api.ConnectionInfo{
|
||||
@@ -36,7 +36,7 @@ func handleClientStream(capture api.Capture, tcpID *api.TcpID, counterPair *api.
|
||||
return nil
|
||||
}
|
||||
|
||||
func handleServerStream(capture api.Capture, tcpID *api.TcpID, counterPair *api.CounterPair, superTimer *api.SuperTimer, emitter api.Emitter, response *RedisPacket, reqResMatcher *requestResponseMatcher) error {
|
||||
func handleServerStream(progress *api.ReadProgress, capture api.Capture, tcpID *api.TcpID, counterPair *api.CounterPair, superTimer *api.SuperTimer, emitter api.Emitter, response *RedisPacket, reqResMatcher *requestResponseMatcher) error {
|
||||
counterPair.Lock()
|
||||
counterPair.Response++
|
||||
responseCounter := counterPair.Response
|
||||
@@ -51,7 +51,7 @@ func handleServerStream(capture api.Capture, tcpID *api.TcpID, counterPair *api.
|
||||
responseCounter,
|
||||
)
|
||||
|
||||
item := reqResMatcher.registerResponse(ident, response, superTimer.CaptureTime)
|
||||
item := reqResMatcher.registerResponse(ident, response, superTimer.CaptureTime, progress.Current())
|
||||
if item != nil {
|
||||
item.Capture = capture
|
||||
item.ConnectionInfo = &api.ConnectionInfo{
|
||||
|
||||
@@ -34,7 +34,7 @@ func (d dissecting) Ping() {
|
||||
log.Printf("pong %s", protocol.Name)
|
||||
}
|
||||
|
||||
func (d dissecting) Dissect(b *bufio.Reader, capture api.Capture, isClient bool, tcpID *api.TcpID, counterPair *api.CounterPair, superTimer *api.SuperTimer, superIdentifier *api.SuperIdentifier, emitter api.Emitter, options *api.TrafficFilteringOptions, _reqResMatcher api.RequestResponseMatcher) error {
|
||||
func (d dissecting) Dissect(b *bufio.Reader, progress *api.ReadProgress, capture api.Capture, isClient bool, tcpID *api.TcpID, counterPair *api.CounterPair, superTimer *api.SuperTimer, superIdentifier *api.SuperIdentifier, emitter api.Emitter, options *api.TrafficFilteringOptions, _reqResMatcher api.RequestResponseMatcher) error {
|
||||
reqResMatcher := _reqResMatcher.(*requestResponseMatcher)
|
||||
is := &RedisInputStream{
|
||||
Reader: b,
|
||||
@@ -48,9 +48,9 @@ func (d dissecting) Dissect(b *bufio.Reader, capture api.Capture, isClient bool,
|
||||
}
|
||||
|
||||
if isClient {
|
||||
err = handleClientStream(capture, tcpID, counterPair, superTimer, emitter, redisPacket, reqResMatcher)
|
||||
err = handleClientStream(progress, capture, tcpID, counterPair, superTimer, emitter, redisPacket, reqResMatcher)
|
||||
} else {
|
||||
err = handleServerStream(capture, tcpID, counterPair, superTimer, emitter, redisPacket, reqResMatcher)
|
||||
err = handleServerStream(progress, capture, tcpID, counterPair, superTimer, emitter, redisPacket, reqResMatcher)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
@@ -82,13 +82,15 @@ func (d dissecting) Analyze(item *api.OutputChannelItem, resolvedSource string,
|
||||
IP: item.ConnectionInfo.ServerIP,
|
||||
Port: item.ConnectionInfo.ServerPort,
|
||||
},
|
||||
Namespace: namespace,
|
||||
Outgoing: item.ConnectionInfo.IsOutgoing,
|
||||
Request: reqDetails,
|
||||
Response: resDetails,
|
||||
Timestamp: item.Timestamp,
|
||||
StartTime: item.Pair.Request.CaptureTime,
|
||||
ElapsedTime: elapsedTime,
|
||||
Namespace: namespace,
|
||||
Outgoing: item.ConnectionInfo.IsOutgoing,
|
||||
Request: reqDetails,
|
||||
Response: resDetails,
|
||||
RequestSize: item.Pair.Request.CaptureSize,
|
||||
ResponseSize: item.Pair.Response.CaptureSize,
|
||||
Timestamp: item.Timestamp,
|
||||
StartTime: item.Pair.Request.CaptureTime,
|
||||
ElapsedTime: elapsedTime,
|
||||
}
|
||||
|
||||
}
|
||||
@@ -131,8 +133,7 @@ func (d dissecting) Summarize(entry *api.Entry) *api.BaseEntry {
|
||||
}
|
||||
}
|
||||
|
||||
func (d dissecting) Represent(request map[string]interface{}, response map[string]interface{}) (object []byte, bodySize int64, err error) {
|
||||
bodySize = 0
|
||||
func (d dissecting) Represent(request map[string]interface{}, response map[string]interface{}) (object []byte, err error) {
|
||||
representation := make(map[string]interface{})
|
||||
repRequest := representGeneric(request, `request.`)
|
||||
repResponse := representGeneric(response, `response.`)
|
||||
|
||||
@@ -123,7 +123,7 @@ func TestDissect(t *testing.T) {
|
||||
DstPort: "2",
|
||||
}
|
||||
reqResMatcher := dissector.NewResponseRequestMatcher()
|
||||
err = dissector.Dissect(bufferClient, api.Pcap, true, tcpIDClient, counterPair, &api.SuperTimer{}, superIdentifier, emitter, options, reqResMatcher)
|
||||
err = dissector.Dissect(bufferClient, &api.ReadProgress{}, api.Pcap, true, tcpIDClient, counterPair, &api.SuperTimer{}, superIdentifier, emitter, options, reqResMatcher)
|
||||
if err != nil && reflect.TypeOf(err) != reflect.TypeOf(&ConnectError{}) && err != io.EOF && err != io.ErrUnexpectedEOF {
|
||||
log.Println(err)
|
||||
}
|
||||
@@ -141,7 +141,7 @@ func TestDissect(t *testing.T) {
|
||||
SrcPort: "2",
|
||||
DstPort: "1",
|
||||
}
|
||||
err = dissector.Dissect(bufferServer, api.Pcap, false, tcpIDServer, counterPair, &api.SuperTimer{}, superIdentifier, emitter, options, reqResMatcher)
|
||||
err = dissector.Dissect(bufferServer, &api.ReadProgress{}, api.Pcap, false, tcpIDServer, counterPair, &api.SuperTimer{}, superIdentifier, emitter, options, reqResMatcher)
|
||||
if err != nil && reflect.TypeOf(err) != reflect.TypeOf(&ConnectError{}) && err != io.EOF && err != io.ErrUnexpectedEOF {
|
||||
log.Println(err)
|
||||
}
|
||||
@@ -320,7 +320,7 @@ func TestRepresent(t *testing.T) {
|
||||
|
||||
var objects []string
|
||||
for _, entry := range entries {
|
||||
object, _, err := dissector.Represent(entry.Request, entry.Response)
|
||||
object, err := dissector.Represent(entry.Request, entry.Response)
|
||||
assert.Nil(t, err)
|
||||
objects = append(objects, string(object))
|
||||
}
|
||||
|
||||
@@ -22,10 +22,11 @@ func (matcher *requestResponseMatcher) GetMap() *sync.Map {
|
||||
func (matcher *requestResponseMatcher) SetMaxTry(value int) {
|
||||
}
|
||||
|
||||
func (matcher *requestResponseMatcher) registerRequest(ident string, request *RedisPacket, captureTime time.Time) *api.OutputChannelItem {
|
||||
func (matcher *requestResponseMatcher) registerRequest(ident string, request *RedisPacket, captureTime time.Time, captureSize int) *api.OutputChannelItem {
|
||||
requestRedisMessage := api.GenericMessage{
|
||||
IsRequest: true,
|
||||
CaptureTime: captureTime,
|
||||
CaptureSize: captureSize,
|
||||
Payload: RedisPayload{
|
||||
Data: &RedisWrapper{
|
||||
Method: string(request.Command),
|
||||
@@ -48,10 +49,11 @@ func (matcher *requestResponseMatcher) registerRequest(ident string, request *Re
|
||||
return nil
|
||||
}
|
||||
|
||||
func (matcher *requestResponseMatcher) registerResponse(ident string, response *RedisPacket, captureTime time.Time) *api.OutputChannelItem {
|
||||
func (matcher *requestResponseMatcher) registerResponse(ident string, response *RedisPacket, captureTime time.Time, captureSize int) *api.OutputChannelItem {
|
||||
responseRedisMessage := api.GenericMessage{
|
||||
IsRequest: false,
|
||||
CaptureTime: captureTime,
|
||||
CaptureSize: captureSize,
|
||||
Payload: RedisPayload{
|
||||
Data: &RedisWrapper{
|
||||
Method: string(response.Command),
|
||||
|
||||
@@ -59,7 +59,6 @@ var memprofile = flag.String("memprofile", "", "Write memory profile")
|
||||
|
||||
type TapOpts struct {
|
||||
HostMode bool
|
||||
FilterAuthorities []v1.Pod
|
||||
}
|
||||
|
||||
var extensions []*api.Extension // global
|
||||
@@ -67,6 +66,7 @@ var filteringOptions *api.TrafficFilteringOptions // global
|
||||
var tapTargets []v1.Pod // global
|
||||
var packetSourceManager *source.PacketSourceManager // global
|
||||
var mainPacketInputChan chan source.TcpPacketInfo // global
|
||||
var tlsTapperInstance *tlstapper.TlsTapper // global
|
||||
|
||||
func inArrayInt(arr []int, valueToCheck int) bool {
|
||||
for _, value := range arr {
|
||||
@@ -90,16 +90,10 @@ func StartPassiveTapper(opts *TapOpts, outputItems chan *api.OutputChannelItem,
|
||||
extensions = extensionsRef
|
||||
filteringOptions = options
|
||||
|
||||
if opts.FilterAuthorities == nil {
|
||||
tapTargets = []v1.Pod{}
|
||||
} else {
|
||||
tapTargets = opts.FilterAuthorities
|
||||
}
|
||||
|
||||
if *tls {
|
||||
for _, e := range extensions {
|
||||
if e.Protocol.Name == "http" {
|
||||
startTlsTapper(e, outputItems, options)
|
||||
tlsTapperInstance = startTlsTapper(e, outputItems, options)
|
||||
break
|
||||
}
|
||||
}
|
||||
@@ -109,24 +103,39 @@ func StartPassiveTapper(opts *TapOpts, outputItems chan *api.OutputChannelItem,
|
||||
diagnose.StartMemoryProfiler(os.Getenv(MemoryProfilingDumpPath), os.Getenv(MemoryProfilingTimeIntervalSeconds))
|
||||
}
|
||||
|
||||
go startPassiveTapper(opts, outputItems)
|
||||
streamsMap, assembler := initializePassiveTapper(opts, outputItems)
|
||||
go startPassiveTapper(streamsMap, assembler)
|
||||
}
|
||||
|
||||
func UpdateTapTargets(newTapTargets []v1.Pod) {
|
||||
success := true
|
||||
|
||||
tapTargets = newTapTargets
|
||||
if err := initializePacketSources(); err != nil {
|
||||
logger.Log.Fatal(err)
|
||||
|
||||
packetSourceManager.UpdatePods(tapTargets)
|
||||
|
||||
if tlsTapperInstance != nil {
|
||||
if err := tlstapper.UpdateTapTargets(tlsTapperInstance, &tapTargets, *procfs); err != nil {
|
||||
tlstapper.LogError(err)
|
||||
success = false
|
||||
}
|
||||
}
|
||||
printNewTapTargets()
|
||||
|
||||
printNewTapTargets(success)
|
||||
}
|
||||
|
||||
func printNewTapTargets() {
|
||||
func printNewTapTargets(success bool) {
|
||||
printStr := ""
|
||||
for _, tapTarget := range tapTargets {
|
||||
printStr += fmt.Sprintf("%s (%s), ", tapTarget.Status.PodIP, tapTarget.Name)
|
||||
}
|
||||
printStr = strings.TrimRight(printStr, ", ")
|
||||
logger.Log.Infof("Now tapping: %s", printStr)
|
||||
|
||||
if success {
|
||||
logger.Log.Infof("Now tapping: %s", printStr)
|
||||
} else {
|
||||
logger.Log.Errorf("Failed to start tapping: %s", printStr)
|
||||
}
|
||||
}
|
||||
|
||||
func printPeriodicStats(cleaner *Cleaner) {
|
||||
@@ -197,9 +206,8 @@ func initializePacketSources() error {
|
||||
}
|
||||
}
|
||||
|
||||
func startPassiveTapper(opts *TapOpts, outputItems chan *api.OutputChannelItem) {
|
||||
func initializePassiveTapper(opts *TapOpts, outputItems chan *api.OutputChannelItem) (*tcpStreamMap, *tcpAssembler) {
|
||||
streamsMap := NewTcpStreamMap()
|
||||
go streamsMap.closeTimedoutTcpStreamChannels()
|
||||
|
||||
diagnose.InitializeErrorsMap(*debug, *verbose, *quiet)
|
||||
diagnose.InitializeTapperInternalStats()
|
||||
@@ -212,6 +220,12 @@ func startPassiveTapper(opts *TapOpts, outputItems chan *api.OutputChannelItem)
|
||||
|
||||
assembler := NewTcpAssembler(outputItems, streamsMap, opts)
|
||||
|
||||
return streamsMap, assembler
|
||||
}
|
||||
|
||||
func startPassiveTapper(streamsMap *tcpStreamMap, assembler *tcpAssembler) {
|
||||
go streamsMap.closeTimedoutTcpStreamChannels()
|
||||
|
||||
diagnose.AppStats.SetStartTime(time.Now())
|
||||
|
||||
staleConnectionTimeout := time.Second * time.Duration(*staleTimeoutSeconds)
|
||||
@@ -243,13 +257,18 @@ func startPassiveTapper(opts *TapOpts, outputItems chan *api.OutputChannelItem)
|
||||
logger.Log.Infof("AppStats: %v", diagnose.AppStats)
|
||||
}
|
||||
|
||||
func startTlsTapper(extension *api.Extension, outputItems chan *api.OutputChannelItem, options *api.TrafficFilteringOptions) {
|
||||
func startTlsTapper(extension *api.Extension, outputItems chan *api.OutputChannelItem, options *api.TrafficFilteringOptions) *tlstapper.TlsTapper {
|
||||
tls := tlstapper.TlsTapper{}
|
||||
tlsPerfBufferSize := os.Getpagesize() * 100
|
||||
|
||||
if err := tls.Init(tlsPerfBufferSize, *procfs, extension); err != nil {
|
||||
tlstapper.LogError(err)
|
||||
return
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := tlstapper.UpdateTapTargets(&tls, &tapTargets, *procfs); err != nil {
|
||||
tlstapper.LogError(err)
|
||||
return nil
|
||||
}
|
||||
|
||||
// A quick way to instrument libssl.so without PID filtering - used for debuging and troubleshooting
|
||||
@@ -257,19 +276,16 @@ func startTlsTapper(extension *api.Extension, outputItems chan *api.OutputChanne
|
||||
if os.Getenv("MIZU_GLOBAL_SSL_LIBRARY") != "" {
|
||||
if err := tls.GlobalTap(os.Getenv("MIZU_GLOBAL_SSL_LIBRARY")); err != nil {
|
||||
tlstapper.LogError(err)
|
||||
return
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
if err := tlstapper.UpdateTapTargets(&tls, &tapTargets, *procfs); err != nil {
|
||||
tlstapper.LogError(err)
|
||||
return
|
||||
}
|
||||
|
||||
var emitter api.Emitter = &api.Emitting{
|
||||
AppStats: &diagnose.AppStats,
|
||||
OutputChannel: outputItems,
|
||||
}
|
||||
|
||||
go tls.Poll(emitter, options)
|
||||
|
||||
return &tls
|
||||
}
|
||||
|
||||
@@ -11,8 +11,16 @@ import (
|
||||
const bpfFilterMaxPods = 150
|
||||
const hostSourcePid = "0"
|
||||
|
||||
type PacketSourceManagerConfig struct {
|
||||
mtls bool
|
||||
procfs string
|
||||
interfaceName string
|
||||
behaviour TcpPacketSourceBehaviour
|
||||
}
|
||||
|
||||
type PacketSourceManager struct {
|
||||
sources map[string]*tcpPacketSource
|
||||
config PacketSourceManagerConfig
|
||||
}
|
||||
|
||||
func NewPacketSourceManager(procfs string, filename string, interfaceName string,
|
||||
@@ -28,7 +36,14 @@ func NewPacketSourceManager(procfs string, filename string, interfaceName string
|
||||
},
|
||||
}
|
||||
|
||||
sourceManager.UpdatePods(mtls, procfs, pods, interfaceName, behaviour)
|
||||
sourceManager.config = PacketSourceManagerConfig{
|
||||
mtls: mtls,
|
||||
procfs: procfs,
|
||||
interfaceName: interfaceName,
|
||||
behaviour: behaviour,
|
||||
}
|
||||
|
||||
sourceManager.UpdatePods(pods)
|
||||
return sourceManager, nil
|
||||
}
|
||||
|
||||
@@ -49,10 +64,9 @@ func newHostPacketSource(filename string, interfaceName string,
|
||||
return source, nil
|
||||
}
|
||||
|
||||
func (m *PacketSourceManager) UpdatePods(mtls bool, procfs string, pods []v1.Pod,
|
||||
interfaceName string, behaviour TcpPacketSourceBehaviour) {
|
||||
if mtls {
|
||||
m.updateMtlsPods(procfs, pods, interfaceName, behaviour)
|
||||
func (m *PacketSourceManager) UpdatePods(pods []v1.Pod) {
|
||||
if m.config.mtls {
|
||||
m.updateMtlsPods(m.config.procfs, pods, m.config.interfaceName, m.config.behaviour)
|
||||
}
|
||||
|
||||
m.setBPFFilter(pods)
|
||||
|
||||
@@ -40,6 +40,7 @@ type tcpReader struct {
|
||||
isOutgoing bool
|
||||
msgQueue chan tcpReaderDataMsg // Channel of captured reassembled tcp payload
|
||||
data []byte
|
||||
progress *api.ReadProgress
|
||||
superTimer *api.SuperTimer
|
||||
parent *tcpStream
|
||||
packetsSeen uint
|
||||
@@ -80,6 +81,8 @@ func (h *tcpReader) Read(p []byte) (int, error) {
|
||||
|
||||
l := copy(p, h.data)
|
||||
h.data = h.data[l:]
|
||||
h.progress.Feed(l)
|
||||
|
||||
return l, nil
|
||||
}
|
||||
|
||||
@@ -96,7 +99,7 @@ func (h *tcpReader) run(wg *sync.WaitGroup) {
|
||||
defer wg.Done()
|
||||
b := bufio.NewReader(h)
|
||||
// TODO: Add api.Pcap, api.Envoy and api.Linkerd distinction by refactoring NewPacketSourceManager method
|
||||
err := h.extension.Dissector.Dissect(b, api.Pcap, h.isClient, h.tcpID, h.counterPair, h.superTimer, h.parent.superIdentifier, h.emitter, filteringOptions, h.reqResMatcher)
|
||||
err := h.extension.Dissector.Dissect(b, h.progress, api.Pcap, h.isClient, h.tcpID, h.counterPair, h.superTimer, h.parent.superIdentifier, h.emitter, filteringOptions, h.reqResMatcher)
|
||||
if err != nil {
|
||||
_, err = io.Copy(ioutil.Discard, b)
|
||||
if err != nil {
|
||||
|
||||
@@ -89,6 +89,7 @@ func (factory *tcpStreamFactory) New(net, transport gopacket.Flow, tcp *layers.T
|
||||
}
|
||||
stream.clients = append(stream.clients, tcpReader{
|
||||
msgQueue: make(chan tcpReaderDataMsg),
|
||||
progress: &api.ReadProgress{},
|
||||
superTimer: &api.SuperTimer{},
|
||||
ident: fmt.Sprintf("%s %s", net, transport),
|
||||
tcpID: &api.TcpID{
|
||||
@@ -108,6 +109,7 @@ func (factory *tcpStreamFactory) New(net, transport gopacket.Flow, tcp *layers.T
|
||||
})
|
||||
stream.servers = append(stream.servers, tcpReader{
|
||||
msgQueue: make(chan tcpReaderDataMsg),
|
||||
progress: &api.ReadProgress{},
|
||||
superTimer: &api.SuperTimer{},
|
||||
ident: fmt.Sprintf("%s %s", net, transport),
|
||||
tcpID: &api.TcpID{
|
||||
|
||||
@@ -1,5 +1,7 @@
|
||||
#!/bin/bash
|
||||
|
||||
pushd "$(dirname "$0")" || exit 1
|
||||
|
||||
MIZU_HOME=$(realpath ../../../)
|
||||
|
||||
docker build -t mizu-ebpf-builder . || exit 1
|
||||
@@ -7,6 +9,7 @@ docker build -t mizu-ebpf-builder . || exit 1
|
||||
docker run --rm \
|
||||
--name mizu-ebpf-builder \
|
||||
-v $MIZU_HOME:/mizu \
|
||||
-v $(go env GOPATH):/root/go \
|
||||
-it mizu-ebpf-builder \
|
||||
sh -c "
|
||||
go generate tap/tlstapper/tls_tapper.go
|
||||
@@ -15,3 +18,5 @@ docker run --rm \
|
||||
chown $(id -u):$(id -g) tap/tlstapper/tlstapper_bpfel.go
|
||||
chown $(id -u):$(id -g) tap/tlstapper/tlstapper_bpfel.o
|
||||
" || exit 1
|
||||
|
||||
popd
|
||||
|
||||
@@ -69,7 +69,7 @@ void sys_exit_accept4(struct sys_exit_accept4_ctx *ctx) {
|
||||
|
||||
struct accept_info *infoPtr = bpf_map_lookup_elem(&accept_syscall_context, &id);
|
||||
|
||||
if (infoPtr == 0) {
|
||||
if (infoPtr == NULL) {
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -175,7 +175,7 @@ void sys_exit_connect(struct sys_exit_connect_ctx *ctx) {
|
||||
|
||||
struct connect_info *infoPtr = bpf_map_lookup_elem(&connect_syscall_info, &id);
|
||||
|
||||
if (infoPtr == 0) {
|
||||
if (infoPtr == NULL) {
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
@@ -28,7 +28,7 @@ void sys_enter_read(struct sys_enter_read_ctx *ctx) {
|
||||
|
||||
struct ssl_info *infoPtr = bpf_map_lookup_elem(&ssl_read_context, &id);
|
||||
|
||||
if (infoPtr == 0) {
|
||||
if (infoPtr == NULL) {
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -71,7 +71,7 @@ void sys_enter_write(struct sys_enter_write_ctx *ctx) {
|
||||
|
||||
struct ssl_info *infoPtr = bpf_map_lookup_elem(&ssl_write_context, &id);
|
||||
|
||||
if (infoPtr == 0) {
|
||||
if (infoPtr == NULL) {
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
@@ -10,6 +10,12 @@ Copyright (C) UP9 Inc.
|
||||
#define FLAGS_IS_CLIENT_BIT (1 << 0)
|
||||
#define FLAGS_IS_READ_BIT (1 << 1)
|
||||
|
||||
#define CHUNK_SIZE (1 << 12)
|
||||
#define MAX_CHUNKS_PER_OPERATION (8)
|
||||
|
||||
// One minute in nano seconds. Chosen by gut feeling.
|
||||
#define SSL_INFO_MAX_TTL_NANO (1000000000l * 60l)
|
||||
|
||||
// The same struct can be found in chunk.go
|
||||
//
|
||||
// Be careful when editing, alignment and padding should be exactly the same in go/c.
|
||||
@@ -18,16 +24,18 @@ struct tlsChunk {
|
||||
__u32 pid;
|
||||
__u32 tgid;
|
||||
__u32 len;
|
||||
__u32 start;
|
||||
__u32 recorded;
|
||||
__u32 fd;
|
||||
__u32 flags;
|
||||
__u8 address[16];
|
||||
__u8 data[4096]; // Must be N^2
|
||||
__u8 data[CHUNK_SIZE]; // Must be N^2
|
||||
};
|
||||
|
||||
struct ssl_info {
|
||||
void* buffer;
|
||||
__u32 fd;
|
||||
__u64 created_at_nano;
|
||||
|
||||
// for ssl_write and ssl_read must be zero
|
||||
// for ssl_write_ex and ssl_read_ex save the *written/*readbytes pointer.
|
||||
@@ -53,10 +61,13 @@ struct fd_info {
|
||||
|
||||
#define BPF_PERF_OUTPUT(_name) \
|
||||
BPF_MAP(_name, BPF_MAP_TYPE_PERF_EVENT_ARRAY, int, __u32, 1024)
|
||||
|
||||
#define BPF_LRU_HASH(_name, _key_type, _value_type) \
|
||||
BPF_MAP(_name, BPF_MAP_TYPE_LRU_HASH, _key_type, _value_type, 16384)
|
||||
|
||||
BPF_HASH(pids_map, __u32, __u32);
|
||||
BPF_HASH(ssl_write_context, __u64, struct ssl_info);
|
||||
BPF_HASH(ssl_read_context, __u64, struct ssl_info);
|
||||
BPF_LRU_HASH(ssl_write_context, __u64, struct ssl_info);
|
||||
BPF_LRU_HASH(ssl_read_context, __u64, struct ssl_info);
|
||||
BPF_HASH(file_descriptor_to_ipv4, __u64, struct fd_info);
|
||||
BPF_PERF_OUTPUT(chunks_buffer);
|
||||
|
||||
|
||||
@@ -18,16 +18,166 @@ struct {
|
||||
__type(value, struct tlsChunk);
|
||||
} heap SEC(".maps");
|
||||
|
||||
static __always_inline int ssl_uprobe(void* ssl, void* buffer, int num, struct bpf_map_def* map_fd, size_t *count_ptr) {
|
||||
__u64 id = bpf_get_current_pid_tgid();
|
||||
static __always_inline int get_count_bytes(struct pt_regs *ctx, struct ssl_info* info, __u64 id) {
|
||||
int returnValue = PT_REGS_RC(ctx);
|
||||
|
||||
if (!should_tap(id >> 32)) {
|
||||
if (info->count_ptr == NULL) {
|
||||
// ssl_read and ssl_write return the number of bytes written/read
|
||||
//
|
||||
return returnValue;
|
||||
}
|
||||
|
||||
// ssl_read_ex and ssl_write_ex return 1 for success
|
||||
//
|
||||
if (returnValue != 1) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
// ssl_read_ex and ssl_write_ex write the number of bytes to an arg named *count
|
||||
//
|
||||
size_t countBytes;
|
||||
long err = bpf_probe_read(&countBytes, sizeof(size_t), (void*) info->count_ptr);
|
||||
|
||||
if (err != 0) {
|
||||
char msg[] = "Error reading bytes count of _ex (id: %ld) (err: %ld)";
|
||||
bpf_trace_printk(msg, sizeof(msg), id, err);
|
||||
return 0;
|
||||
}
|
||||
|
||||
return countBytes;
|
||||
}
|
||||
|
||||
static __always_inline void add_address_to_chunk(struct tlsChunk* chunk, __u64 id, __u32 fd) {
|
||||
__u32 pid = id >> 32;
|
||||
__u64 key = (__u64) pid << 32 | fd;
|
||||
|
||||
struct fd_info *fdinfo = bpf_map_lookup_elem(&file_descriptor_to_ipv4, &key);
|
||||
|
||||
if (fdinfo == NULL) {
|
||||
return;
|
||||
}
|
||||
|
||||
int err = bpf_probe_read(chunk->address, sizeof(chunk->address), fdinfo->ipv4_addr);
|
||||
chunk->flags |= (fdinfo->flags & FLAGS_IS_CLIENT_BIT);
|
||||
|
||||
if (err != 0) {
|
||||
char msg[] = "Error reading from fd address %ld - %ld";
|
||||
bpf_trace_printk(msg, sizeof(msg), id, err);
|
||||
}
|
||||
}
|
||||
|
||||
static __always_inline void send_chunk_part(struct pt_regs *ctx, __u8* buffer, __u64 id,
|
||||
struct tlsChunk* chunk, int start, int end) {
|
||||
size_t recorded = MIN(end - start, sizeof(chunk->data));
|
||||
|
||||
if (recorded <= 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
chunk->recorded = recorded;
|
||||
chunk->start = start;
|
||||
|
||||
// This ugly trick is for the ebpf verifier happiness
|
||||
//
|
||||
long err = 0;
|
||||
if (chunk->recorded == sizeof(chunk->data)) {
|
||||
err = bpf_probe_read(chunk->data, sizeof(chunk->data), buffer + start);
|
||||
} else {
|
||||
recorded &= (sizeof(chunk->data) - 1); // Buffer must be N^2
|
||||
err = bpf_probe_read(chunk->data, recorded, buffer + start);
|
||||
}
|
||||
|
||||
if (err != 0) {
|
||||
char msg[] = "Error reading from ssl buffer %ld - %ld";
|
||||
bpf_trace_printk(msg, sizeof(msg), id, err);
|
||||
return;
|
||||
}
|
||||
|
||||
bpf_perf_event_output(ctx, &chunks_buffer, BPF_F_CURRENT_CPU, chunk, sizeof(struct tlsChunk));
|
||||
}
|
||||
|
||||
static __always_inline void send_chunk(struct pt_regs *ctx, __u8* buffer, __u64 id, struct tlsChunk* chunk) {
|
||||
// ebpf loops must be bounded at compile time, we can't use (i < chunk->len / CHUNK_SIZE)
|
||||
//
|
||||
// https://lwn.net/Articles/794934/
|
||||
//
|
||||
// If we want to compile in kernel older than 5.3, we should add "#pragma unroll" to this loop
|
||||
//
|
||||
for (int i = 0; i < MAX_CHUNKS_PER_OPERATION; i++) {
|
||||
if (chunk->len <= (CHUNK_SIZE * i)) {
|
||||
break;
|
||||
}
|
||||
|
||||
send_chunk_part(ctx, buffer, id, chunk, CHUNK_SIZE * i, chunk->len);
|
||||
}
|
||||
}
|
||||
|
||||
static __always_inline void output_ssl_chunk(struct pt_regs *ctx, struct ssl_info* info, __u64 id, __u32 flags) {
|
||||
int countBytes = get_count_bytes(ctx, info, id);
|
||||
|
||||
if (countBytes <= 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (countBytes > (CHUNK_SIZE * MAX_CHUNKS_PER_OPERATION)) {
|
||||
char msg[] = "Buffer too big %d (id: %ld)";
|
||||
bpf_trace_printk(msg, sizeof(msg), countBytes, id);
|
||||
return;
|
||||
}
|
||||
|
||||
struct tlsChunk* chunk;
|
||||
int zero = 0;
|
||||
|
||||
// If other thread, running on the same CPU get to this point at the same time like us (context switch)
|
||||
// the data will be corrupted - protection may be added in the future
|
||||
//
|
||||
chunk = bpf_map_lookup_elem(&heap, &zero);
|
||||
|
||||
if (!chunk) {
|
||||
char msg[] = "Unable to allocate chunk (id: %ld)";
|
||||
bpf_trace_printk(msg, sizeof(msg), id);
|
||||
return;
|
||||
}
|
||||
|
||||
chunk->flags = flags;
|
||||
chunk->pid = id >> 32;
|
||||
chunk->tgid = id;
|
||||
chunk->len = countBytes;
|
||||
chunk->fd = info->fd;
|
||||
|
||||
add_address_to_chunk(chunk, id, chunk->fd);
|
||||
send_chunk(ctx, info->buffer, id, chunk);
|
||||
}
|
||||
|
||||
static __always_inline void ssl_uprobe(void* ssl, void* buffer, int num, struct bpf_map_def* map_fd, size_t *count_ptr) {
|
||||
__u64 id = bpf_get_current_pid_tgid();
|
||||
|
||||
if (!should_tap(id >> 32)) {
|
||||
return;
|
||||
}
|
||||
|
||||
struct ssl_info *infoPtr = bpf_map_lookup_elem(map_fd, &id);
|
||||
struct ssl_info info = {};
|
||||
|
||||
info.fd = -1;
|
||||
if (infoPtr == NULL) {
|
||||
info.fd = -1;
|
||||
info.created_at_nano = bpf_ktime_get_ns();
|
||||
} else {
|
||||
long err = bpf_probe_read(&info, sizeof(struct ssl_info), infoPtr);
|
||||
|
||||
if (err != 0) {
|
||||
char msg[] = "Error reading old ssl context (id: %ld) (err: %ld)";
|
||||
bpf_trace_printk(msg, sizeof(msg), id, err);
|
||||
}
|
||||
|
||||
if ((bpf_ktime_get_ns() - info.created_at_nano) > SSL_INFO_MAX_TTL_NANO) {
|
||||
// If the ssl info is too old, we don't want to use its info because it may be incorrect.
|
||||
//
|
||||
info.fd = -1;
|
||||
info.created_at_nano = bpf_ktime_get_ns();
|
||||
}
|
||||
}
|
||||
|
||||
info.count_ptr = count_ptr;
|
||||
info.buffer = buffer;
|
||||
|
||||
@@ -36,163 +186,90 @@ static __always_inline int ssl_uprobe(void* ssl, void* buffer, int num, struct b
|
||||
if (err != 0) {
|
||||
char msg[] = "Error putting ssl context (id: %ld) (err: %ld)";
|
||||
bpf_trace_printk(msg, sizeof(msg), id, err);
|
||||
return 0;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static __always_inline int ssl_uretprobe(struct pt_regs *ctx, struct bpf_map_def* map_fd, __u32 flags) {
|
||||
static __always_inline void ssl_uretprobe(struct pt_regs *ctx, struct bpf_map_def* map_fd, __u32 flags) {
|
||||
__u64 id = bpf_get_current_pid_tgid();
|
||||
|
||||
if (!should_tap(id >> 32)) {
|
||||
return 0;
|
||||
return;
|
||||
}
|
||||
|
||||
struct ssl_info *infoPtr = bpf_map_lookup_elem(map_fd, &id);
|
||||
|
||||
if (infoPtr == 0) {
|
||||
if (infoPtr == NULL) {
|
||||
char msg[] = "Error getting ssl context info (id: %ld)";
|
||||
bpf_trace_printk(msg, sizeof(msg), id);
|
||||
return 0;
|
||||
return;
|
||||
}
|
||||
|
||||
struct ssl_info info;
|
||||
long err = bpf_probe_read(&info, sizeof(struct ssl_info), infoPtr);
|
||||
|
||||
bpf_map_delete_elem(map_fd, &id);
|
||||
// Do not clean map on purpose, sometimes there are two calls to ssl_read in a raw
|
||||
// while the first call actually goes to read from socket, and we get the chance
|
||||
// to find the fd. The other call already have all the information and we don't
|
||||
// have the chance to get the fd.
|
||||
//
|
||||
// There are two risks keeping the map items
|
||||
// 1. It gets full - we solve it by using BPF_MAP_TYPE_LRU_HASH with hard limit
|
||||
// 2. We get wrong info of an old call - we solve it by comparing the timestamp
|
||||
// info before using it
|
||||
//
|
||||
// bpf_map_delete_elem(map_fd, &id);
|
||||
|
||||
if (err != 0) {
|
||||
char msg[] = "Error reading ssl context (id: %ld) (err: %ld)";
|
||||
bpf_trace_printk(msg, sizeof(msg), id, err);
|
||||
return 0;
|
||||
return;
|
||||
}
|
||||
|
||||
if (info.fd == -1) {
|
||||
char msg[] = "File descriptor is missing from ssl info (id: %ld)";
|
||||
bpf_trace_printk(msg, sizeof(msg), id);
|
||||
return 0;
|
||||
return;
|
||||
}
|
||||
|
||||
int countBytes = PT_REGS_RC(ctx);
|
||||
|
||||
if (info.count_ptr != 0) {
|
||||
// ssl_read_ex and ssl_write_ex return 1 for success
|
||||
//
|
||||
if (countBytes != 1) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
size_t tempCount;
|
||||
long err = bpf_probe_read(&tempCount, sizeof(size_t), (void*) info.count_ptr);
|
||||
|
||||
if (err != 0) {
|
||||
char msg[] = "Error reading bytes count of _ex (id: %ld) (err: %ld)";
|
||||
bpf_trace_printk(msg, sizeof(msg), id, err);
|
||||
return 0;
|
||||
}
|
||||
|
||||
countBytes = tempCount;
|
||||
}
|
||||
|
||||
if (countBytes <= 0) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct tlsChunk* c;
|
||||
int zero = 0;
|
||||
|
||||
// If other thread, running on the same CPU get to this point at the same time like us
|
||||
// the data will be corrupted - protection may be added in the future
|
||||
//
|
||||
c = bpf_map_lookup_elem(&heap, &zero);
|
||||
|
||||
if (!c) {
|
||||
char msg[] = "Unable to allocate chunk (id: %ld)";
|
||||
bpf_trace_printk(msg, sizeof(msg), id);
|
||||
return 0;
|
||||
}
|
||||
|
||||
size_t recorded = MIN(countBytes, sizeof(c->data));
|
||||
|
||||
c->flags = flags;
|
||||
c->pid = id >> 32;
|
||||
c->tgid = id;
|
||||
c->len = countBytes;
|
||||
c->recorded = recorded;
|
||||
c->fd = info.fd;
|
||||
|
||||
// This ugly trick is for the ebpf verifier happiness
|
||||
//
|
||||
if (recorded == sizeof(c->data)) {
|
||||
err = bpf_probe_read(c->data, sizeof(c->data), info.buffer);
|
||||
} else {
|
||||
recorded &= sizeof(c->data) - 1; // Buffer must be N^2
|
||||
err = bpf_probe_read(c->data, recorded, info.buffer);
|
||||
}
|
||||
|
||||
if (err != 0) {
|
||||
char msg[] = "Error reading from ssl buffer %ld - %ld";
|
||||
bpf_trace_printk(msg, sizeof(msg), id, err);
|
||||
return 0;
|
||||
}
|
||||
|
||||
__u32 pid = id >> 32;
|
||||
__u32 fd = info.fd;
|
||||
__u64 key = (__u64) pid << 32 | fd;
|
||||
|
||||
struct fd_info *fdinfo = bpf_map_lookup_elem(&file_descriptor_to_ipv4, &key);
|
||||
|
||||
if (fdinfo != 0) {
|
||||
err = bpf_probe_read(c->address, sizeof(c->address), fdinfo->ipv4_addr);
|
||||
c->flags |= (fdinfo->flags & FLAGS_IS_CLIENT_BIT);
|
||||
|
||||
if (err != 0) {
|
||||
char msg[] = "Error reading from fd address %ld - %ld";
|
||||
bpf_trace_printk(msg, sizeof(msg), id, err);
|
||||
}
|
||||
}
|
||||
|
||||
bpf_perf_event_output(ctx, &chunks_buffer, BPF_F_CURRENT_CPU, c, sizeof(struct tlsChunk));
|
||||
return 0;
|
||||
output_ssl_chunk(ctx, &info, id, flags);
|
||||
}
|
||||
|
||||
SEC("uprobe/ssl_write")
|
||||
int BPF_KPROBE(ssl_write, void* ssl, void* buffer, int num) {
|
||||
return ssl_uprobe(ssl, buffer, num, &ssl_write_context, 0);
|
||||
void BPF_KPROBE(ssl_write, void* ssl, void* buffer, int num) {
|
||||
ssl_uprobe(ssl, buffer, num, &ssl_write_context, 0);
|
||||
}
|
||||
|
||||
SEC("uretprobe/ssl_write")
|
||||
int BPF_KPROBE(ssl_ret_write) {
|
||||
return ssl_uretprobe(ctx, &ssl_write_context, 0);
|
||||
void BPF_KPROBE(ssl_ret_write) {
|
||||
ssl_uretprobe(ctx, &ssl_write_context, 0);
|
||||
}
|
||||
|
||||
SEC("uprobe/ssl_read")
|
||||
int BPF_KPROBE(ssl_read, void* ssl, void* buffer, int num) {
|
||||
return ssl_uprobe(ssl, buffer, num, &ssl_read_context, 0);
|
||||
void BPF_KPROBE(ssl_read, void* ssl, void* buffer, int num) {
|
||||
ssl_uprobe(ssl, buffer, num, &ssl_read_context, 0);
|
||||
}
|
||||
|
||||
SEC("uretprobe/ssl_read")
|
||||
int BPF_KPROBE(ssl_ret_read) {
|
||||
return ssl_uretprobe(ctx, &ssl_read_context, FLAGS_IS_READ_BIT);
|
||||
void BPF_KPROBE(ssl_ret_read) {
|
||||
ssl_uretprobe(ctx, &ssl_read_context, FLAGS_IS_READ_BIT);
|
||||
}
|
||||
|
||||
SEC("uprobe/ssl_write_ex")
|
||||
int BPF_KPROBE(ssl_write_ex, void* ssl, void* buffer, size_t num, size_t *written) {
|
||||
return ssl_uprobe(ssl, buffer, num, &ssl_write_context, written);
|
||||
void BPF_KPROBE(ssl_write_ex, void* ssl, void* buffer, size_t num, size_t *written) {
|
||||
ssl_uprobe(ssl, buffer, num, &ssl_write_context, written);
|
||||
}
|
||||
|
||||
SEC("uretprobe/ssl_write_ex")
|
||||
int BPF_KPROBE(ssl_ret_write_ex) {
|
||||
return ssl_uretprobe(ctx, &ssl_write_context, 0);
|
||||
void BPF_KPROBE(ssl_ret_write_ex) {
|
||||
ssl_uretprobe(ctx, &ssl_write_context, 0);
|
||||
}
|
||||
|
||||
SEC("uprobe/ssl_read_ex")
|
||||
int BPF_KPROBE(ssl_read_ex, void* ssl, void* buffer, size_t num, size_t *readbytes) {
|
||||
return ssl_uprobe(ssl, buffer, num, &ssl_read_context, readbytes);
|
||||
void BPF_KPROBE(ssl_read_ex, void* ssl, void* buffer, size_t num, size_t *readbytes) {
|
||||
ssl_uprobe(ssl, buffer, num, &ssl_read_context, readbytes);
|
||||
}
|
||||
|
||||
SEC("uretprobe/ssl_read_ex")
|
||||
int BPF_KPROBE(ssl_ret_read_ex) {
|
||||
return ssl_uretprobe(ctx, &ssl_read_context, FLAGS_IS_READ_BIT);
|
||||
void BPF_KPROBE(ssl_ret_read_ex) {
|
||||
ssl_uretprobe(ctx, &ssl_read_context, FLAGS_IS_READ_BIT);
|
||||
}
|
||||
|
||||
@@ -16,14 +16,15 @@ const FLAGS_IS_READ_BIT uint32 = (1 << 1)
|
||||
// Be careful when editing, alignment and padding should be exactly the same in go/c.
|
||||
//
|
||||
type tlsChunk struct {
|
||||
Pid uint32
|
||||
Tgid uint32
|
||||
Len uint32
|
||||
Recorded uint32
|
||||
Fd uint32
|
||||
Flags uint32
|
||||
Address [16]byte
|
||||
Data [4096]byte
|
||||
Pid uint32 // process id
|
||||
Tgid uint32 // thread id inside the process
|
||||
Len uint32 // the size of the native buffer used to read/write the tls data (may be bigger than tlsChunk.Data[])
|
||||
Start uint32 // the start offset withing the native buffer
|
||||
Recorded uint32 // number of bytes copied from the native buffer to tlsChunk.Data[]
|
||||
Fd uint32 // the file descriptor used to read/write the tls data (probably socket file descriptor)
|
||||
Flags uint32 // bitwise flags
|
||||
Address [16]byte // ipv4 address and port
|
||||
Data [4096]byte // actual tls data
|
||||
}
|
||||
|
||||
func (c *tlsChunk) getAddress() (net.IP, uint16, error) {
|
||||
|
||||
@@ -146,6 +146,7 @@ func (p *tlsPoller) startNewTlsReader(chunk *tlsChunk, ip net.IP, port uint16, k
|
||||
doneHandler: func(r *tlsReader) {
|
||||
p.closeReader(key, r)
|
||||
},
|
||||
progress: &api.ReadProgress{},
|
||||
}
|
||||
|
||||
tcpid := p.buildTcpId(chunk, ip, port)
|
||||
@@ -158,7 +159,7 @@ func dissect(extension *api.Extension, reader *tlsReader, isRequest bool, tcpid
|
||||
emitter api.Emitter, options *api.TrafficFilteringOptions, reqResMatcher api.RequestResponseMatcher) {
|
||||
b := bufio.NewReader(reader)
|
||||
|
||||
err := extension.Dissector.Dissect(b, api.Ebpf, isRequest, tcpid, &api.CounterPair{},
|
||||
err := extension.Dissector.Dissect(b, reader.progress, api.Ebpf, isRequest, tcpid, &api.CounterPair{},
|
||||
&api.SuperTimer{}, &api.SuperIdentifier{}, emitter, options, reqResMatcher)
|
||||
|
||||
if err != nil {
|
||||
@@ -224,8 +225,8 @@ func (p *tlsPoller) logTls(chunk *tlsChunk, ip net.IP, port uint16) {
|
||||
|
||||
str := strings.ReplaceAll(strings.ReplaceAll(string(chunk.Data[0:chunk.Recorded]), "\n", " "), "\r", "")
|
||||
|
||||
logger.Log.Infof("PID: %v (tid: %v) (fd: %v) (client: %v) (addr: %v:%v) (fdaddr %v:%v>%v:%v) (recorded %v out of %v) - %v - %v",
|
||||
logger.Log.Infof("PID: %v (tid: %v) (fd: %v) (client: %v) (addr: %v:%v) (fdaddr %v:%v>%v:%v) (recorded %v out of %v starting at %v) - %v - %v",
|
||||
chunk.Pid, chunk.Tgid, chunk.Fd, flagsStr, ip, port,
|
||||
srcIp, srcPort, dstIp, dstPort,
|
||||
chunk.Recorded, chunk.Len, str, hex.EncodeToString(chunk.Data[0:chunk.Recorded]))
|
||||
chunk.Recorded, chunk.Len, chunk.Start, str, hex.EncodeToString(chunk.Data[0:chunk.Recorded]))
|
||||
}
|
||||
|
||||
@@ -24,6 +24,8 @@ func UpdateTapTargets(tls *TlsTapper, pods *[]v1.Pod, procfs string) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
tls.ClearPids()
|
||||
|
||||
for _, pid := range containerPids {
|
||||
if err := tls.AddPid(procfs, pid); err != nil {
|
||||
|
||||
@@ -3,6 +3,8 @@ package tlstapper
|
||||
import (
|
||||
"io"
|
||||
"time"
|
||||
|
||||
"github.com/up9inc/mizu/tap/api"
|
||||
)
|
||||
|
||||
type tlsReader struct {
|
||||
@@ -10,6 +12,7 @@ type tlsReader struct {
|
||||
chunks chan *tlsChunk
|
||||
data []byte
|
||||
doneHandler func(r *tlsReader)
|
||||
progress *api.ReadProgress
|
||||
}
|
||||
|
||||
func (r *tlsReader) Read(p []byte) (int, error) {
|
||||
@@ -36,6 +39,7 @@ func (r *tlsReader) Read(p []byte) (int, error) {
|
||||
|
||||
l := copy(p, r.data)
|
||||
r.data = r.data[l:]
|
||||
r.progress.Feed(l)
|
||||
|
||||
return l, nil
|
||||
}
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"github.com/go-errors/errors"
|
||||
"github.com/up9inc/mizu/shared/logger"
|
||||
"github.com/up9inc/mizu/tap/api"
|
||||
"sync"
|
||||
)
|
||||
|
||||
//go:generate go run github.com/cilium/ebpf/cmd/bpf2go tlsTapper bpf/tls_tapper.c -- -O2 -g -D__TARGET_ARCH_x86
|
||||
@@ -14,6 +15,7 @@ type TlsTapper struct {
|
||||
syscallHooks syscallHooks
|
||||
sslHooksStructs []sslHooks
|
||||
poller *tlsPoller
|
||||
registeredPids sync.Map
|
||||
}
|
||||
|
||||
func (t *TlsTapper) Init(bufferSize int, procfs string, extension *api.Extension) error {
|
||||
@@ -70,6 +72,16 @@ func (t *TlsTapper) RemovePid(pid uint32) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *TlsTapper) ClearPids() {
|
||||
t.registeredPids.Range(func(key, v interface{}) bool {
|
||||
if err := t.RemovePid(key.(uint32)); err != nil {
|
||||
LogError(err)
|
||||
}
|
||||
t.registeredPids.Delete(key)
|
||||
return true
|
||||
})
|
||||
}
|
||||
|
||||
func (t *TlsTapper) Close() []error {
|
||||
errors := make([]error, 0)
|
||||
|
||||
@@ -116,6 +128,8 @@ func (t *TlsTapper) tapPid(pid uint32, sslLibrary string) error {
|
||||
if err := pids.Put(pid, uint32(1)); err != nil {
|
||||
return errors.Wrap(err, 0)
|
||||
}
|
||||
|
||||
t.registeredPids.Store(pid, true)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
Binary file not shown.
Binary file not shown.
@@ -1,4 +1,4 @@
|
||||
import TrafficViewer,{useWS, DEFAULT_QUERY} from '@up9/mizu-common';
|
||||
import TrafficViewer,{useWS, DEFAULT_QUERY, OasModal} from '@up9/mizu-common';
|
||||
import "@up9/mizu-common/dist/index.css"
|
||||
import {useEffect} from 'react';
|
||||
import Api, {getWebsocketUrl} from "./api";
|
||||
@@ -17,8 +17,7 @@ const App = () => {
|
||||
},[])
|
||||
|
||||
return <>
|
||||
<TrafficViewer message={message} error={error} isWebSocketOpen={isOpen}
|
||||
trafficViewerApiProp={trafficViewerApi} ></TrafficViewer>
|
||||
|
||||
</>
|
||||
}
|
||||
|
||||
|
||||
4
ui-common/package-lock.json
generated
4
ui-common/package-lock.json
generated
@@ -1,12 +1,12 @@
|
||||
{
|
||||
"name": "@up9/mizu-common",
|
||||
"version": "1.0.10",
|
||||
"version": "1.0.135",
|
||||
"lockfileVersion": 2,
|
||||
"requires": true,
|
||||
"packages": {
|
||||
"": {
|
||||
"name": "@up9/mizu-common",
|
||||
"version": "1.0.10",
|
||||
"version": "1.0.135",
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"@craco/craco": "^6.4.3",
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@up9/mizu-common",
|
||||
"version": "1.0.128",
|
||||
"version": "1.0.139",
|
||||
"description": "Made with create-react-library",
|
||||
"author": "",
|
||||
"license": "MIT",
|
||||
|
||||
@@ -6,23 +6,32 @@
|
||||
padding: 10px
|
||||
|
||||
.selectHeader
|
||||
font-family: Source Sans Pro,Lucida Grande,Tahoma,sans-serif
|
||||
display: flex
|
||||
align-items: center
|
||||
font-weight: 900
|
||||
width: 100%
|
||||
margin-top: -1%
|
||||
|
||||
.openApilogo
|
||||
width: 36px
|
||||
width: 43px
|
||||
|
||||
.title
|
||||
color:#494677
|
||||
font-family: Lato
|
||||
font-size: 20px
|
||||
font-family: Source Sans Pro,Lucida Grande,Tahoma,sans-serif
|
||||
font-size: 28px
|
||||
font-weight: 600
|
||||
|
||||
.selectContainer
|
||||
margin-left: 1%
|
||||
width: 14%
|
||||
margin-bottom: 1%
|
||||
margin-top: 1%
|
||||
|
||||
.redoc
|
||||
height: 98%
|
||||
overflow-y: scroll
|
||||
height: 85%
|
||||
overflow-y: scroll
|
||||
|
||||
.borderLine
|
||||
border-top: 1px solid #dee6fe
|
||||
margin-bottom: 1%
|
||||
|
||||
|
||||
@@ -7,8 +7,10 @@ import style from './OasModal.module.sass';
|
||||
import openApiLogo from 'assets/openApiLogo.png'
|
||||
import { redocThemeOptions } from "./redocThemeOptions";
|
||||
import React from "react";
|
||||
import { TOAST_CONTAINER_ID } from "../../configs/Consts";
|
||||
import { Select } from "../UI/Select";
|
||||
|
||||
|
||||
const modalStyle = {
|
||||
position: 'absolute',
|
||||
top: '6%',
|
||||
@@ -25,34 +27,34 @@ const modalStyle = {
|
||||
|
||||
const ipAddressWithPortRegex = new RegExp('([0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3}.[0-9]{1,3}):([0-9]{1,5})');
|
||||
|
||||
const OasModal = ({ openModal, handleCloseModal, getOasServices, getOasByService}) => {
|
||||
const OasModal = ({ openModal, handleCloseModal, getOasServices, getOasByService }) => {
|
||||
const [oasServices, setOasServices] = useState([] as string[])
|
||||
const [selectedServiceName, setSelectedServiceName] = useState("");
|
||||
const [selectedServiceSpec, setSelectedServiceSpec] = useState(null);
|
||||
const [resolvedServices, setResolvedServices] = useState([]);
|
||||
const [unResolvedServices, setUnResolvedServices] = useState([]);
|
||||
|
||||
const onSelectedOASService = useCallback( async (selectedService) => {
|
||||
if (!!selectedService){
|
||||
const onSelectedOASService = useCallback(async (selectedService) => {
|
||||
if (!!selectedService) {
|
||||
setSelectedServiceName(selectedService);
|
||||
if(oasServices.length === 0){
|
||||
if (oasServices.length === 0) {
|
||||
return
|
||||
}
|
||||
try {
|
||||
const data = await getOasByService(selectedService);
|
||||
setSelectedServiceSpec(data);
|
||||
} catch (e) {
|
||||
toast.error("Error occurred while fetching service OAS spec");
|
||||
toast.error("Error occurred while fetching service OAS spec", { containerId: TOAST_CONTAINER_ID });
|
||||
console.error(e);
|
||||
}
|
||||
}
|
||||
},[oasServices.length])
|
||||
}, [oasServices.length])
|
||||
|
||||
const resolvedArrayBuilder = useCallback(async(services) => {
|
||||
const resolvedArrayBuilder = useCallback(async (services) => {
|
||||
const resServices = [];
|
||||
const unResServices = [];
|
||||
services.forEach(s => {
|
||||
if(ipAddressWithPortRegex.test(s)){
|
||||
if (ipAddressWithPortRegex.test(s)) {
|
||||
unResServices.push(s);
|
||||
}
|
||||
else {
|
||||
@@ -62,10 +64,16 @@ const OasModal = ({ openModal, handleCloseModal, getOasServices, getOasByService
|
||||
|
||||
resServices.sort();
|
||||
unResServices.sort();
|
||||
onSelectedOASService(resServices[0]);
|
||||
if (resServices.length > 0) {
|
||||
onSelectedOASService(resServices[0]);
|
||||
}
|
||||
else {
|
||||
onSelectedOASService(unResServices[0]);
|
||||
}
|
||||
|
||||
setResolvedServices(resServices);
|
||||
setUnResolvedServices(unResServices);
|
||||
},[onSelectedOASService])
|
||||
}, [onSelectedOASService])
|
||||
|
||||
useEffect(() => {
|
||||
(async () => {
|
||||
@@ -77,8 +85,7 @@ const OasModal = ({ openModal, handleCloseModal, getOasServices, getOasByService
|
||||
console.error(e);
|
||||
}
|
||||
})();
|
||||
}, [openModal,resolvedArrayBuilder]);
|
||||
|
||||
}, [openModal, resolvedArrayBuilder]);
|
||||
|
||||
|
||||
return (
|
||||
@@ -90,48 +97,48 @@ const OasModal = ({ openModal, handleCloseModal, getOasServices, getOasByService
|
||||
closeAfterTransition
|
||||
BackdropComponent={Backdrop}
|
||||
BackdropProps={{
|
||||
timeout: 500,
|
||||
timeout: 500,
|
||||
}}
|
||||
>
|
||||
<Fade in={openModal}>
|
||||
<Box sx={modalStyle}>
|
||||
<div className={style.boxContainer}>
|
||||
<div className={style.selectHeader}>
|
||||
<div><img src={openApiLogo} alt="openApi" className={style.openApilogo}/></div>
|
||||
<div className={style.title}>OpenAPI selected service: </div>
|
||||
<div className={style.selectContainer} >
|
||||
<FormControl>
|
||||
<Select
|
||||
labelId="service-select-label"
|
||||
id="service-select"
|
||||
placeholder="Show OAS"
|
||||
value={selectedServiceName}
|
||||
onChangeCb={onSelectedOASService}
|
||||
>
|
||||
<ListSubheader disableSticky={true}>Resolved</ListSubheader>
|
||||
{resolvedServices.map((service) => (
|
||||
<MenuItem key={service} value={service}>
|
||||
{service}
|
||||
</MenuItem>
|
||||
))}
|
||||
<ListSubheader disableSticky={true}>UnResolved</ListSubheader>
|
||||
{unResolvedServices.map((service) => (
|
||||
<MenuItem key={service} value={service}>
|
||||
{service}
|
||||
</MenuItem>
|
||||
))}
|
||||
</Select>
|
||||
</FormControl>
|
||||
</div>
|
||||
<div><img src={openApiLogo} alt="openApi" className={style.openApilogo} /></div>
|
||||
<div className={style.title}>OpenApi </div>
|
||||
</div>
|
||||
<div style={{ cursor: "pointer" }}>
|
||||
<img src={closeIcon} alt="close" onClick={handleCloseModal} />
|
||||
</div>
|
||||
</div>
|
||||
<div className={style.selectContainer} >
|
||||
<FormControl>
|
||||
<Select
|
||||
labelId="service-select-label"
|
||||
id="service-select"
|
||||
value={selectedServiceName}
|
||||
onChangeCb={onSelectedOASService}
|
||||
>
|
||||
<ListSubheader disableSticky={true}>Resolved</ListSubheader>
|
||||
{resolvedServices.map((service) => (
|
||||
<MenuItem key={service} value={service}>
|
||||
{service}
|
||||
</MenuItem>
|
||||
))}
|
||||
<ListSubheader disableSticky={true}>UnResolved</ListSubheader>
|
||||
{unResolvedServices.map((service) => (
|
||||
<MenuItem key={service} value={service}>
|
||||
{service}
|
||||
</MenuItem>
|
||||
))}
|
||||
</Select>
|
||||
</FormControl>
|
||||
</div>
|
||||
<div className={style.borderLine}></div>
|
||||
<div className={style.redoc}>
|
||||
{selectedServiceSpec && <RedocStandalone
|
||||
spec={selectedServiceSpec}
|
||||
options={redocThemeOptions}/>}
|
||||
{selectedServiceSpec && <RedocStandalone
|
||||
spec={selectedServiceSpec}
|
||||
options={redocThemeOptions} />}
|
||||
</div>
|
||||
</Box>
|
||||
</Fade>
|
||||
|
||||
@@ -1,35 +1,52 @@
|
||||
export const redocThemeOptions = {
|
||||
theme:{
|
||||
codeBlock:{
|
||||
backgroundColor:"#11171a",
|
||||
},
|
||||
colors:{
|
||||
responses:{
|
||||
error:{
|
||||
tabTextColor:"#1b1b29"
|
||||
},
|
||||
info:{
|
||||
tabTextColor:"#1b1b29",
|
||||
},
|
||||
success:{
|
||||
tabTextColor:"#0c0b1a"
|
||||
},
|
||||
},
|
||||
text:{
|
||||
primary:"#1b1b29",
|
||||
secondary:"#4d4d4d"
|
||||
}
|
||||
},
|
||||
rightPanel:{
|
||||
backgroundColor:"#253237",
|
||||
},
|
||||
sidebar:{
|
||||
backgroundColor:"#ffffff"
|
||||
},
|
||||
typography:{
|
||||
code:{
|
||||
color:"#0c0b1a"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
const fontFamilyVar = "Source Sans Pro, Lucida Grande, Tahoma, sans-serif"
|
||||
|
||||
export const redocThemeOptions = {
|
||||
theme: {
|
||||
codeBlock: {
|
||||
backgroundColor: "#14161c",
|
||||
},
|
||||
components: {
|
||||
buttons: {
|
||||
fontFamily: fontFamilyVar,
|
||||
},
|
||||
httpBadges: {
|
||||
fontFamily: fontFamilyVar,
|
||||
}
|
||||
},
|
||||
colors: {
|
||||
responses: {
|
||||
error: {
|
||||
tabTextColor: "#1b1b29"
|
||||
},
|
||||
info: {
|
||||
tabTextColor: "#1b1b29",
|
||||
},
|
||||
success: {
|
||||
tabTextColor: "#0c0b1a"
|
||||
},
|
||||
},
|
||||
text: {
|
||||
primary: "#1b1b29",
|
||||
secondary: "#4d4d4d"
|
||||
}
|
||||
},
|
||||
rightPanel: {
|
||||
backgroundColor: "#0D0B1D",
|
||||
},
|
||||
sidebar: {
|
||||
backgroundColor: "#ffffff"
|
||||
},
|
||||
typography: {
|
||||
code: {
|
||||
color: "#0c0b1a",
|
||||
fontFamily: fontFamilyVar
|
||||
},
|
||||
fontFamily: fontFamilyVar,
|
||||
fontSize: "90%",
|
||||
fontWieght: "normal",
|
||||
headings: {
|
||||
fontFamily: fontFamilyVar
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -5,10 +5,8 @@ import Moment from 'moment';
|
||||
import {EntryItem} from "./EntryListItem/EntryListItem";
|
||||
import down from "assets/downImg.svg";
|
||||
import spinner from 'assets/spinner.svg';
|
||||
|
||||
import {RecoilState, useRecoilState, useRecoilValue} from "recoil";
|
||||
import entriesAtom from "../../recoil/entries";
|
||||
import wsConnectionAtom, {WsConnectionStatus} from "../../recoil/wsConnection";
|
||||
import queryAtom from "../../recoil/query";
|
||||
import TrafficViewerApiAtom from "../../recoil/TrafficViewerApi";
|
||||
import TrafficViewerApi from "./TrafficViewerApi";
|
||||
@@ -32,14 +30,15 @@ interface EntriesListProps {
|
||||
truncatedTimestamp: number;
|
||||
setTruncatedTimestamp: any;
|
||||
scrollableRef: any;
|
||||
ws: any;
|
||||
}
|
||||
|
||||
export const EntriesList: React.FC<EntriesListProps> = ({listEntryREF, onSnapBrokenEvent, isSnappedToBottom, setIsSnappedToBottom, queriedCurrent, setQueriedCurrent, queriedTotal, setQueriedTotal, startTime, noMoreDataTop, setNoMoreDataTop, leftOffTop, setLeftOffTop, openWebSocket, leftOffBottom, truncatedTimestamp, setTruncatedTimestamp, scrollableRef}) => {
|
||||
export const EntriesList: React.FC<EntriesListProps> = ({listEntryREF, onSnapBrokenEvent, isSnappedToBottom, setIsSnappedToBottom, queriedCurrent, setQueriedCurrent, queriedTotal, setQueriedTotal, startTime, noMoreDataTop, setNoMoreDataTop, leftOffTop, setLeftOffTop, openWebSocket, leftOffBottom, truncatedTimestamp, setTruncatedTimestamp, scrollableRef, ws}) => {
|
||||
|
||||
const [entries, setEntries] = useRecoilState(entriesAtom);
|
||||
const wsConnection = useRecoilValue(wsConnectionAtom);
|
||||
const query = useRecoilValue(queryAtom);
|
||||
const isWsConnectionClosed = wsConnection === WsConnectionStatus.Closed;
|
||||
const isWsConnectionClosed = ws?.current?.readyState !== WebSocket.OPEN;
|
||||
|
||||
const trafficViewerApi = useRecoilValue(TrafficViewerApiAtom as RecoilState<TrafficViewerApi>)
|
||||
|
||||
const [loadMoreTop, setLoadMoreTop] = useState(false);
|
||||
|
||||
@@ -1,16 +1,18 @@
|
||||
import React, {useEffect, useState} from "react";
|
||||
import React, { useEffect, useState } from "react";
|
||||
import EntryViewer from "./EntryDetailed/EntryViewer";
|
||||
import {EntryItem} from "./EntryListItem/EntryListItem";
|
||||
import {makeStyles} from "@material-ui/core";
|
||||
import { EntryItem } from "./EntryListItem/EntryListItem";
|
||||
import { makeStyles } from "@material-ui/core";
|
||||
import Protocol from "../UI/Protocol"
|
||||
import Queryable from "../UI/Queryable";
|
||||
import {toast} from "react-toastify";
|
||||
import {RecoilState, useRecoilState, useRecoilValue} from "recoil";
|
||||
import { toast } from "react-toastify";
|
||||
import { RecoilState, useRecoilState, useRecoilValue } from "recoil";
|
||||
import focusedEntryIdAtom from "../../recoil/focusedEntryId";
|
||||
import trafficViewerApi from "../../recoil/TrafficViewerApi";
|
||||
import TrafficViewerApi from "./TrafficViewerApi";
|
||||
import TrafficViewerApiAtom from "../../recoil/TrafficViewerApi/atom";
|
||||
import queryAtom from "../../recoil/query/atom";
|
||||
import useWindowDimensions, { useRequestTextByWidth } from "../../hooks/WindowDimensionsHook";
|
||||
import { TOAST_CONTAINER_ID } from "../../configs/Consts";
|
||||
|
||||
const useStyles = makeStyles(() => ({
|
||||
entryTitle: {
|
||||
@@ -35,43 +37,59 @@ const useStyles = makeStyles(() => ({
|
||||
}));
|
||||
|
||||
export const formatSize = (n: number) => n > 1000 ? `${Math.round(n / 1000)}KB` : `${n} B`;
|
||||
|
||||
const EntryTitle: React.FC<any> = ({protocol, data, bodySize, elapsedTime}) => {
|
||||
const minSizeDisplayRequestSize = 880;
|
||||
const EntryTitle: React.FC<any> = ({ protocol, data, elapsedTime }) => {
|
||||
const classes = useStyles();
|
||||
const request = data.request;
|
||||
const response = data.response;
|
||||
|
||||
const { width } = useWindowDimensions();
|
||||
const { requestText, responseText, elapsedTimeText } = useRequestTextByWidth(width)
|
||||
|
||||
return <div className={classes.entryTitle}>
|
||||
<Protocol protocol={protocol} horizontal={true}/>
|
||||
<div style={{right: "30px", position: "absolute", display: "flex"}}>
|
||||
{response && <Queryable
|
||||
query={`response.bodySize == ${bodySize}`}
|
||||
style={{margin: "0 18px"}}
|
||||
<Protocol protocol={protocol} horizontal={true} />
|
||||
{(width > minSizeDisplayRequestSize) && <div style={{ right: "30px", position: "absolute", display: "flex" }}>
|
||||
{request && <Queryable
|
||||
query={`requestSize == ${data.requestSize}`}
|
||||
style={{ margin: "0 18px" }}
|
||||
displayIconOnMouseOver={true}
|
||||
>
|
||||
<div
|
||||
style={{opacity: 0.5}}
|
||||
id="entryDetailedTitleBodySize"
|
||||
style={{ opacity: 0.5 }}
|
||||
id="entryDetailedTitleRequestSize"
|
||||
>
|
||||
{formatSize(bodySize)}
|
||||
{`${requestText}${formatSize(data.requestSize)}`}
|
||||
</div>
|
||||
</Queryable>}
|
||||
{response && <Queryable
|
||||
query={`responseSize == ${data.responseSize}`}
|
||||
style={{ margin: "0 18px" }}
|
||||
displayIconOnMouseOver={true}
|
||||
>
|
||||
<div
|
||||
style={{ opacity: 0.5 }}
|
||||
id="entryDetailedTitleResponseSize"
|
||||
>
|
||||
{`${responseText}${formatSize(data.responseSize)}`}
|
||||
</div>
|
||||
</Queryable>}
|
||||
{response && <Queryable
|
||||
query={`elapsedTime >= ${elapsedTime}`}
|
||||
style={{marginRight: 18}}
|
||||
style={{ margin: "0 0 0 18px" }}
|
||||
displayIconOnMouseOver={true}
|
||||
>
|
||||
<div
|
||||
style={{opacity: 0.5}}
|
||||
style={{ opacity: 0.5 }}
|
||||
id="entryDetailedTitleElapsedTime"
|
||||
>
|
||||
{Math.round(elapsedTime)}ms
|
||||
{`${elapsedTimeText}${Math.round(elapsedTime)}ms`}
|
||||
</div>
|
||||
</Queryable>}
|
||||
</div>
|
||||
</div>}
|
||||
</div>;
|
||||
};
|
||||
|
||||
const EntrySummary: React.FC<any> = ({entry}) => {
|
||||
const EntrySummary: React.FC<any> = ({ entry }) => {
|
||||
return <EntryItem
|
||||
key={`entry-${entry.id}`}
|
||||
entry={entry}
|
||||
@@ -108,7 +126,7 @@ export const EntryDetailed = () => {
|
||||
pauseOnHover: true,
|
||||
draggable: true,
|
||||
progress: undefined,
|
||||
});
|
||||
}, { containerId: TOAST_CONTAINER_ID });
|
||||
}
|
||||
console.error(error);
|
||||
}
|
||||
@@ -120,10 +138,9 @@ export const EntryDetailed = () => {
|
||||
{entryData && <EntryTitle
|
||||
protocol={entryData.protocol}
|
||||
data={entryData.data}
|
||||
bodySize={entryData.bodySize}
|
||||
elapsedTime={entryData.data.elapsedTime}
|
||||
/>}
|
||||
{entryData && <EntrySummary entry={entryData.base}/>}
|
||||
{entryData && <EntrySummary entry={entryData.base} />}
|
||||
<React.Fragment>
|
||||
{entryData && <EntryViewer
|
||||
representation={entryData.representation}
|
||||
|
||||
@@ -4,7 +4,7 @@ import SwapHorizIcon from '@material-ui/icons/SwapHoriz';
|
||||
import styles from './EntryListItem.module.sass';
|
||||
import StatusCode, {getClassification, StatusCodeClassification} from "../../UI/StatusCode";
|
||||
import Protocol, {ProtocolInterface} from "../../UI/Protocol"
|
||||
import eBPFLogo from '../assets/ebpf.png';
|
||||
import eBPFLogo from 'assets/ebpf.png';
|
||||
import {Summary} from "../../UI/Summary";
|
||||
import Queryable from "../../UI/Queryable";
|
||||
import ingoingIconSuccess from "assets/ingoing-traffic-success.svg"
|
||||
|
||||
|
Before Width: | Height: | Size: 21 KiB After Width: | Height: | Size: 21 KiB |
@@ -1,4 +1,4 @@
|
||||
import React, { useCallback, useEffect, useMemo, useRef, useState } from "react";
|
||||
import React, { useEffect, useMemo, useRef, useState } from "react";
|
||||
import { Filters } from "./Filters";
|
||||
import { EntriesList } from "./EntriesList";
|
||||
import { makeStyles } from "@material-ui/core";
|
||||
@@ -8,19 +8,18 @@ import { EntryDetailed } from "./EntryDetailed";
|
||||
import playIcon from 'assets/run.svg';
|
||||
import pauseIcon from 'assets/pause.svg';
|
||||
import variables from '../../variables.module.scss';
|
||||
import { toast,ToastContainer } from 'react-toastify';
|
||||
import 'react-toastify/dist/ReactToastify.css';
|
||||
import { toast, ToastContainer } from 'react-toastify';
|
||||
import debounce from 'lodash/debounce';
|
||||
import { RecoilRoot, RecoilState, useRecoilState, useRecoilValue, useSetRecoilState } from "recoil";
|
||||
import entriesAtom from "../../recoil/entries";
|
||||
import focusedEntryIdAtom from "../../recoil/focusedEntryId";
|
||||
import websocketConnectionAtom, { WsConnectionStatus } from "../../recoil/wsConnection";
|
||||
import queryAtom from "../../recoil/query";
|
||||
import { TLSWarning } from "../TLSWarning/TLSWarning";
|
||||
import trafficViewerApiAtom from "../../recoil/TrafficViewerApi"
|
||||
import TrafficViewerApi from "./TrafficViewerApi";
|
||||
import { StatusBar } from "../UI/StatusBar";
|
||||
import tappingStatusAtom from "../../recoil/tappingStatus/atom";
|
||||
import { TOAST_CONTAINER_ID } from "../../configs/Consts";
|
||||
|
||||
const useLayoutStyles = makeStyles(() => ({
|
||||
details: {
|
||||
@@ -48,21 +47,25 @@ interface TrafficViewerProps {
|
||||
trafficViewerApiProp: TrafficViewerApi,
|
||||
actionButtons?: JSX.Element,
|
||||
isShowStatusBar?: boolean,
|
||||
webSocketUrl : string
|
||||
webSocketUrl: string,
|
||||
isCloseWebSocket: boolean,
|
||||
isDemoBannerView: boolean
|
||||
}
|
||||
|
||||
export const TrafficViewer : React.FC<TrafficViewerProps> = ({setAnalyzeStatus, trafficViewerApiProp, actionButtons,isShowStatusBar,webSocketUrl}) => {
|
||||
export const TrafficViewer: React.FC<TrafficViewerProps> = ({ setAnalyzeStatus, trafficViewerApiProp,
|
||||
actionButtons, isShowStatusBar, webSocketUrl,
|
||||
isCloseWebSocket, isDemoBannerView }) => {
|
||||
|
||||
const classes = useLayoutStyles();
|
||||
|
||||
const [entries, setEntries] = useRecoilState(entriesAtom);
|
||||
const [focusedEntryId, setFocusedEntryId] = useRecoilState(focusedEntryIdAtom);
|
||||
const [wsConnection, setWsConnection] = useRecoilState(websocketConnectionAtom);
|
||||
const query = useRecoilValue(queryAtom);
|
||||
const setTrafficViewerApiState = useSetRecoilState(trafficViewerApiAtom as RecoilState<TrafficViewerApi>)
|
||||
const [tappingStatus, setTappingStatus] = useRecoilState(tappingStatusAtom);
|
||||
const [noMoreDataTop, setNoMoreDataTop] = useState(false);
|
||||
const [isSnappedToBottom, setIsSnappedToBottom] = useState(true);
|
||||
const [forceRender, setForceRender] = useState(0);
|
||||
|
||||
const [queryBackgroundColor, setQueryBackgroundColor] = useState("#f5f5f5");
|
||||
|
||||
@@ -103,6 +106,10 @@ export const TrafficViewer : React.FC<TrafficViewerProps> = ({setAnalyzeStatus,
|
||||
handleQueryChange(query);
|
||||
}, [query, handleQueryChange]);
|
||||
|
||||
useEffect(() => {
|
||||
isCloseWebSocket && closeWebSocket()
|
||||
}, [isCloseWebSocket])
|
||||
|
||||
const ws = useRef(null);
|
||||
|
||||
const listEntry = useRef(null);
|
||||
@@ -114,34 +121,44 @@ export const TrafficViewer : React.FC<TrafficViewerProps> = ({setAnalyzeStatus,
|
||||
setLeftOffTop(null);
|
||||
setNoMoreDataTop(false);
|
||||
}
|
||||
ws.current = new WebSocket(webSocketUrl);
|
||||
ws.current.onopen = () => {
|
||||
setWsConnection(WsConnectionStatus.Connected);
|
||||
try {
|
||||
ws.current = new WebSocket(webSocketUrl);
|
||||
sendQueryWhenWsOpen(query);
|
||||
}
|
||||
ws.current.onclose = () => {
|
||||
setWsConnection(WsConnectionStatus.Closed);
|
||||
}
|
||||
ws.current.onerror = (event) => {
|
||||
console.error("WebSocket error:", event);
|
||||
if (query) {
|
||||
openWebSocket(`(${query}) and leftOff(${leftOffBottom})`, false);
|
||||
} else {
|
||||
openWebSocket(`leftOff(${leftOffBottom})`, false);
|
||||
|
||||
ws.current.onclose = () => {
|
||||
if (window.location.pathname === "/")
|
||||
setForceRender(forceRender + 1);
|
||||
}
|
||||
}
|
||||
ws.current.onerror = (event) => {
|
||||
console.error("WebSocket error:", event);
|
||||
if (ws?.current?.readyState === WebSocket.OPEN) {
|
||||
ws.current.close();
|
||||
}
|
||||
if (query) {
|
||||
openWebSocket(`(${query}) and leftOff(${leftOffBottom})`, false);
|
||||
} else {
|
||||
openWebSocket(`leftOff(${leftOffBottom})`, false);
|
||||
}
|
||||
}
|
||||
} catch (e) { }
|
||||
}
|
||||
|
||||
const sendQueryWhenWsOpen = (query) => {
|
||||
setTimeout(() => {
|
||||
if (ws?.current?.readyState === WebSocket.OPEN) {
|
||||
ws.current.send(JSON.stringify({"query": query, "enableFullEntries": false}));
|
||||
ws.current.send(JSON.stringify({ "query": query, "enableFullEntries": false }));
|
||||
} else {
|
||||
sendQueryWhenWsOpen(query);
|
||||
}
|
||||
}, 500)
|
||||
}
|
||||
|
||||
const closeWebSocket = () => {
|
||||
if (ws?.current?.readyState === WebSocket.OPEN) {
|
||||
ws.current.close();
|
||||
}
|
||||
}
|
||||
|
||||
if (ws.current) {
|
||||
ws.current.onmessage = (e) => {
|
||||
if (!e?.data) return;
|
||||
@@ -177,7 +194,7 @@ export const TrafficViewer : React.FC<TrafficViewerProps> = ({setAnalyzeStatus,
|
||||
pauseOnHover: true,
|
||||
draggable: true,
|
||||
progress: undefined,
|
||||
});
|
||||
}, { containerId: TOAST_CONTAINER_ID });
|
||||
break;
|
||||
case "queryMetadata":
|
||||
setQueriedCurrent(queriedCurrent + message.data.current);
|
||||
@@ -200,13 +217,13 @@ export const TrafficViewer : React.FC<TrafficViewerProps> = ({setAnalyzeStatus,
|
||||
}
|
||||
|
||||
useEffect(() => {
|
||||
setTrafficViewerApiState({...trafficViewerApiProp, webSocket : {close : () => ws.current.close()}});
|
||||
setTrafficViewerApiState({ ...trafficViewerApiProp, webSocket: { close: closeWebSocket } });
|
||||
(async () => {
|
||||
openWebSocket("leftOff(-1)", true);
|
||||
try{
|
||||
try {
|
||||
const tapStatusResponse = await trafficViewerApiProp.tapStatus();
|
||||
setTappingStatus(tapStatusResponse);
|
||||
if(setAnalyzeStatus) {
|
||||
if (setAnalyzeStatus) {
|
||||
const analyzeStatusResponse = await trafficViewerApiProp.analyzeStatus();
|
||||
setAnalyzeStatus(analyzeStatusResponse);
|
||||
}
|
||||
@@ -218,8 +235,9 @@ export const TrafficViewer : React.FC<TrafficViewerProps> = ({setAnalyzeStatus,
|
||||
}, []);
|
||||
|
||||
const toggleConnection = () => {
|
||||
ws.current.close();
|
||||
if (wsConnection !== WsConnectionStatus.Connected) {
|
||||
if (ws?.current?.readyState === WebSocket.OPEN) {
|
||||
ws?.current?.close();
|
||||
} else {
|
||||
if (query) {
|
||||
openWebSocket(`(${query}) and leftOff(-1)`, true);
|
||||
} else {
|
||||
@@ -230,6 +248,12 @@ export const TrafficViewer : React.FC<TrafficViewerProps> = ({setAnalyzeStatus,
|
||||
}
|
||||
}
|
||||
|
||||
useEffect(() => {
|
||||
return () => {
|
||||
ws.current.close();
|
||||
};
|
||||
}, []);
|
||||
|
||||
const onTLSDetected = (destAddress: string) => {
|
||||
addressesWithTLS.add(destAddress);
|
||||
setAddressesWithTLS(new Set(addressesWithTLS));
|
||||
@@ -240,8 +264,8 @@ export const TrafficViewer : React.FC<TrafficViewerProps> = ({setAnalyzeStatus,
|
||||
};
|
||||
|
||||
const getConnectionIndicator = () => {
|
||||
switch (wsConnection) {
|
||||
case WsConnectionStatus.Connected:
|
||||
switch (ws?.current?.readyState) {
|
||||
case WebSocket.OPEN:
|
||||
return <div className={`${TrafficViewerStyles.indicatorContainer} ${TrafficViewerStyles.greenIndicatorContainer}`}>
|
||||
<div className={`${TrafficViewerStyles.indicator} ${TrafficViewerStyles.greenIndicator}`} />
|
||||
</div>
|
||||
@@ -253,8 +277,8 @@ export const TrafficViewer : React.FC<TrafficViewerProps> = ({setAnalyzeStatus,
|
||||
}
|
||||
|
||||
const getConnectionTitle = () => {
|
||||
switch (wsConnection) {
|
||||
case WsConnectionStatus.Connected:
|
||||
switch (ws?.current?.readyState) {
|
||||
case WebSocket.OPEN:
|
||||
return "streaming live traffic"
|
||||
default:
|
||||
return "streaming paused";
|
||||
@@ -263,19 +287,19 @@ export const TrafficViewer : React.FC<TrafficViewerProps> = ({setAnalyzeStatus,
|
||||
|
||||
const onSnapBrokenEvent = () => {
|
||||
setIsSnappedToBottom(false);
|
||||
if (wsConnection === WsConnectionStatus.Connected) {
|
||||
if (ws?.current?.readyState === WebSocket.OPEN) {
|
||||
ws.current.close();
|
||||
}
|
||||
}
|
||||
|
||||
return (
|
||||
<div className={TrafficViewerStyles.TrafficPage}>
|
||||
{tappingStatus && isShowStatusBar && <StatusBar />}
|
||||
{tappingStatus && isShowStatusBar && <StatusBar isDemoBannerView={isDemoBannerView} />}
|
||||
<div className={TrafficViewerStyles.TrafficPageHeader}>
|
||||
<div className={TrafficViewerStyles.TrafficPageStreamStatus}>
|
||||
<img className={TrafficViewerStyles.playPauseIcon} style={{ visibility: wsConnection === WsConnectionStatus.Connected ? "visible" : "hidden" }} alt="pause"
|
||||
<img className={TrafficViewerStyles.playPauseIcon} style={{ visibility: ws?.current?.readyState === WebSocket.OPEN ? "visible" : "hidden" }} alt="pause"
|
||||
src={pauseIcon} onClick={toggleConnection} />
|
||||
<img className={TrafficViewerStyles.playPauseIcon} style={{ position: "absolute", visibility: wsConnection === WsConnectionStatus.Connected ? "hidden" : "visible" }} alt="play"
|
||||
<img className={TrafficViewerStyles.playPauseIcon} style={{ position: "absolute", visibility: ws?.current?.readyState === WebSocket.OPEN ? "hidden" : "visible" }} alt="play"
|
||||
src={playIcon} onClick={toggleConnection} />
|
||||
<div className={TrafficViewerStyles.connectionText}>
|
||||
{getConnectionTitle()}
|
||||
@@ -311,6 +335,7 @@ export const TrafficViewer : React.FC<TrafficViewerProps> = ({setAnalyzeStatus,
|
||||
truncatedTimestamp={truncatedTimestamp}
|
||||
setTruncatedTimestamp={setTruncatedTimestamp}
|
||||
scrollableRef={scrollableRef}
|
||||
ws={ws}
|
||||
/>
|
||||
</div>
|
||||
</div>
|
||||
@@ -324,16 +349,28 @@ export const TrafficViewer : React.FC<TrafficViewerProps> = ({setAnalyzeStatus,
|
||||
setAddressesWithTLS={setAddressesWithTLS}
|
||||
userDismissedTLSWarning={userDismissedTLSWarning}
|
||||
setUserDismissedTLSWarning={setUserDismissedTLSWarning} />
|
||||
<ToastContainer/>
|
||||
</div>
|
||||
);
|
||||
};
|
||||
|
||||
const MemoiedTrafficViewer = React.memo(TrafficViewer)
|
||||
const TrafficViewerContainer: React.FC<TrafficViewerProps> = ({ setAnalyzeStatus, trafficViewerApiProp, actionButtons, isShowStatusBar = true ,webSocketUrl}) => {
|
||||
const TrafficViewerContainer: React.FC<TrafficViewerProps> = ({ setAnalyzeStatus, trafficViewerApiProp,
|
||||
actionButtons, isShowStatusBar = true,
|
||||
webSocketUrl, isCloseWebSocket, isDemoBannerView }) => {
|
||||
return <RecoilRoot>
|
||||
<MemoiedTrafficViewer actionButtons={actionButtons} isShowStatusBar={isShowStatusBar} webSocketUrl={webSocketUrl}
|
||||
trafficViewerApiProp={trafficViewerApiProp} setAnalyzeStatus={setAnalyzeStatus} />
|
||||
isCloseWebSocket={isCloseWebSocket} trafficViewerApiProp={trafficViewerApiProp}
|
||||
setAnalyzeStatus={setAnalyzeStatus} isDemoBannerView={isDemoBannerView} />
|
||||
<ToastContainer enableMultiContainer containerId={TOAST_CONTAINER_ID}
|
||||
position="bottom-right"
|
||||
autoClose={5000}
|
||||
hideProgressBar={false}
|
||||
newestOnTop={false}
|
||||
closeOnClick
|
||||
rtl={false}
|
||||
pauseOnFocusLoss
|
||||
draggable
|
||||
pauseOnHover />
|
||||
</RecoilRoot>
|
||||
}
|
||||
|
||||
|
||||
@@ -1,13 +1,13 @@
|
||||
type TrafficViewerApi = {
|
||||
validateQuery : (query: any) => any
|
||||
tapStatus : () => any
|
||||
analyzeStatus : () => any
|
||||
fetchEntries : (leftOff: any, direction: number, query: any, limit: number, timeoutMs: number) => any
|
||||
getEntry : (entryId : any, query:string) => any
|
||||
getRecentTLSLinks : () => any,
|
||||
webSocket : {
|
||||
close : () => {}
|
||||
}
|
||||
validateQuery: (query: any) => any
|
||||
tapStatus: () => any
|
||||
analyzeStatus: () => any
|
||||
fetchEntries: (leftOff: any, direction: number, query: any, limit: number, timeoutMs: number) => any
|
||||
getEntry: (entryId: any, query: string) => any
|
||||
getRecentTLSLinks: () => any,
|
||||
webSocket: {
|
||||
close: () => void
|
||||
}
|
||||
}
|
||||
|
||||
export default TrafficViewerApi
|
||||
export default TrafficViewerApi
|
||||
|
||||
@@ -6,11 +6,11 @@ export interface Props {
|
||||
disabled?: boolean;
|
||||
}
|
||||
|
||||
const Checkbox: React.FC<Props> = ({checked, onToggle, disabled}) => {
|
||||
const Checkbox: React.FC<Props> = ({checked, onToggle, disabled, ...props}) => {
|
||||
|
||||
return (
|
||||
<div>
|
||||
<input style={!disabled ? {cursor: "pointer"}: {}} type="checkbox" checked={checked} disabled={disabled} onChange={(event) => onToggle(event.target.checked)}/>
|
||||
<input style={!disabled ? {cursor: "pointer"}: {}} type="checkbox" checked={checked} disabled={disabled} onChange={(event) => onToggle(event.target.checked)} {...props}/>
|
||||
</div>
|
||||
);
|
||||
};
|
||||
|
||||
20
ui-common/src/components/UI/InformationIcon.tsx
Normal file
20
ui-common/src/components/UI/InformationIcon.tsx
Normal file
@@ -0,0 +1,20 @@
|
||||
import React, { CSSProperties } from "react";
|
||||
import infoImg from 'assets/info.svg';
|
||||
import styles from "./style/InformationIcon.module.sass"
|
||||
|
||||
const DEFUALT_LINK = "https://getmizu.io/docs"
|
||||
|
||||
export interface InformationIconProps{
|
||||
link?: string,
|
||||
style? : CSSProperties
|
||||
}
|
||||
|
||||
export const InformationIcon: React.FC<InformationIconProps> = ({link,style}) => {
|
||||
return <React.Fragment>
|
||||
<a href={DEFUALT_LINK ? DEFUALT_LINK : link} style={style} className={styles.flex} title="documentation" target="_blank">
|
||||
<img className="headerIcon" src={infoImg} alt="Info icon"/>
|
||||
</a>
|
||||
</React.Fragment>
|
||||
}
|
||||
|
||||
|
||||
@@ -19,7 +19,7 @@ const menuProps: any = {
|
||||
};
|
||||
|
||||
// icons styles are not overwritten from the Props, only as a separate object
|
||||
const classes = {icon: styles.icon, selectMenu: styles.list};
|
||||
const classes = {icon: styles.icon, selectMenu: styles.list, select: styles.oasSelect, root:styles.root};
|
||||
|
||||
const defaultProps = {
|
||||
MenuProps: menuProps,
|
||||
|
||||
@@ -10,12 +10,16 @@ const pluralize = (noun: string, amount: number) => {
|
||||
return `${noun}${amount !== 1 ? 's' : ''}`
|
||||
}
|
||||
|
||||
export const StatusBar = () => {
|
||||
interface StatusBarProps {
|
||||
isDemoBannerView: boolean;
|
||||
}
|
||||
|
||||
export const StatusBar = ({isDemoBannerView}) => {
|
||||
const tappingStatus = useRecoilValue(tappingStatusAtom);
|
||||
const [expandedBar, setExpandedBar] = useState(false);
|
||||
const {uniqueNamespaces, amountOfPods, amountOfTappedPods, amountOfUntappedPods} = useRecoilValue(tappingStatusDetails);
|
||||
|
||||
return <div className={`${style.statusBar} ${(expandedBar ? `${style.expandedStatusBar}` : "")}`} onMouseOver={() => setExpandedBar(true)} onMouseLeave={() => setExpandedBar(false)} data-cy="expandedStatusBar">
|
||||
return <div className={`${isDemoBannerView ? `${style.banner}` : ''} ${style.statusBar} ${(expandedBar ? `${style.expandedStatusBar}` : "")}`} onMouseOver={() => setExpandedBar(true)} onMouseLeave={() => setExpandedBar(false)} data-cy="expandedStatusBar">
|
||||
<div className={style.podsCount}>
|
||||
{tappingStatus.some(pod => !pod.isTapped) && <img src={warningIcon} alt="warning"/>}
|
||||
<span className={style.podsCountText} data-cy="podsCountText">
|
||||
|
||||
@@ -15,7 +15,7 @@ interface Props {
|
||||
classes?: any,
|
||||
tabs: Tab[],
|
||||
currentTab: string,
|
||||
color: string,
|
||||
color?: string,
|
||||
onChange: (string) => void,
|
||||
leftAligned?: boolean,
|
||||
dark?: boolean,
|
||||
@@ -76,6 +76,7 @@ const Tabs: React.FC<Props> = ({classes={}, tabs, currentTab, color, onChange, l
|
||||
{tabs.map(({tab, disabled, disabledMessage, highlight, badge}, index) => {
|
||||
const active = currentTab === tab;
|
||||
const tabLink = <span
|
||||
data-cy={"tab-" + tab}
|
||||
key={tab}
|
||||
className={`${_classes.tab} ${active ? _classes.active : ''} ${disabled ? _classes.disabled : ''} ${highlight ? _classes.highlight : ''} ${dark ? 'dark' : ''}`}
|
||||
onClick={() => !disabled && onChange(tab)}
|
||||
|
||||
5
ui-common/src/components/UI/assets/info.svg
Normal file
5
ui-common/src/components/UI/assets/info.svg
Normal file
@@ -0,0 +1,5 @@
|
||||
<svg width="24" height="24" viewBox="0 0 24 24" fill="none" xmlns="http://www.w3.org/2000/svg">
|
||||
<path d="M19 21H6.14286C5.07143 21 4 20.32 4 18.96C4 17.6 5.07143 16.92 6.14286 16.92H19V4H6.14286C5.07143 4 4 5.02 4 6.04V18.96M16.8571 17.6V20.32V17.6Z" stroke="#627EF7" stroke-width="2" stroke-linecap="round" stroke-linejoin="round"/>
|
||||
<rect x="8" y="7" width="7" height="2" fill="#627EF7"/>
|
||||
<rect x="8" y="11" width="4" height="2" fill="#627EF7"/>
|
||||
</svg>
|
||||
|
After Width: | Height: | Size: 454 B |
@@ -5,7 +5,8 @@ import Tooltip from "./Tooltip";
|
||||
import Checkbox from "./Checkbox"
|
||||
import { StatusBar } from "./StatusBar";
|
||||
import CustomModal from "./CustomModal";
|
||||
import { InformationIcon } from "./InformationIcon";
|
||||
|
||||
|
||||
export {LoadingOverlay,Select,Tabs,Tooltip,Checkbox,CustomModal}
|
||||
export {LoadingOverlay,Select,Tabs,Tooltip,Checkbox,CustomModal,InformationIcon}
|
||||
export {StatusBar}
|
||||
@@ -0,0 +1,2 @@
|
||||
.flex
|
||||
display: flex
|
||||
@@ -3,3 +3,15 @@
|
||||
|
||||
.list
|
||||
margin-top: 8px
|
||||
|
||||
.oasSelect
|
||||
font-weight: normal
|
||||
padding: 8px 4px 8px 12px !important
|
||||
border: 1px solid #9d9d9d !important
|
||||
border-radius: 9px !important
|
||||
font-family: Source Sans Pro, Lucida Grande, Tahoma, sans-serif !important
|
||||
width: 216px !important
|
||||
|
||||
.root
|
||||
font-family: Source Sans Pro, Lucida Grande, Tahoma, sans-serif !important
|
||||
|
||||
|
||||
@@ -19,6 +19,9 @@
|
||||
overflow: hidden
|
||||
max-width: clamp(150px,50%,600px)
|
||||
|
||||
&.banner
|
||||
top: 53px
|
||||
|
||||
.podsCount
|
||||
display: flex
|
||||
justify-content: center
|
||||
|
||||
1
ui-common/src/configs/Consts.ts
Normal file
1
ui-common/src/configs/Consts.ts
Normal file
@@ -0,0 +1 @@
|
||||
export const TOAST_CONTAINER_ID = "Common";
|
||||
43
ui-common/src/hooks/WindowDimensionsHook.tsx
Normal file
43
ui-common/src/hooks/WindowDimensionsHook.tsx
Normal file
@@ -0,0 +1,43 @@
|
||||
import { useState, useEffect } from 'react';
|
||||
|
||||
function getWindowDimensions() {
|
||||
const { innerWidth: width, innerHeight: height } = window;
|
||||
return {
|
||||
width,
|
||||
height
|
||||
};
|
||||
}
|
||||
|
||||
export function useRequestTextByWidth(windowWidth){
|
||||
|
||||
let requestText = "Request: "
|
||||
let responseText = "Response: "
|
||||
let elapsedTimeText = "Elapsed Time: "
|
||||
|
||||
if (windowWidth < 1078) {
|
||||
requestText = ""
|
||||
responseText = ""
|
||||
elapsedTimeText = ""
|
||||
} else if (windowWidth < 1356) {
|
||||
requestText = "Req: "
|
||||
responseText = "Res: "
|
||||
elapsedTimeText = "ET: "
|
||||
}
|
||||
|
||||
return {requestText, responseText, elapsedTimeText}
|
||||
}
|
||||
|
||||
export default function useWindowDimensions() {
|
||||
const [windowDimensions, setWindowDimensions] = useState(getWindowDimensions());
|
||||
|
||||
useEffect(() => {
|
||||
function handleResize() {
|
||||
setWindowDimensions(getWindowDimensions());
|
||||
}
|
||||
|
||||
window.addEventListener('resize', handleResize);
|
||||
return () => window.removeEventListener('resize', handleResize);
|
||||
}, []);
|
||||
|
||||
return windowDimensions;
|
||||
}
|
||||
@@ -1,8 +0,0 @@
|
||||
import { atom } from "recoil";
|
||||
|
||||
const wsConnectionAtom = atom({
|
||||
key: "wsConnectionAtom",
|
||||
default: 0
|
||||
});
|
||||
|
||||
export default wsConnectionAtom;
|
||||
@@ -1,10 +0,0 @@
|
||||
import atom from "./atom";
|
||||
|
||||
enum WsConnectionStatus {
|
||||
Closed,
|
||||
Connected,
|
||||
}
|
||||
|
||||
export {WsConnectionStatus};
|
||||
|
||||
export default atom
|
||||
@@ -13,7 +13,7 @@
|
||||
"@types/jest": "^26.0.22",
|
||||
"@types/node": "^12.20.10",
|
||||
"@uiw/react-textarea-code-editor": "^1.4.12",
|
||||
"@up9/mizu-common": "^1.0.128",
|
||||
"@up9/mizu-common": "1.0.139",
|
||||
"axios": "^0.25.0",
|
||||
"core-js": "^3.20.2",
|
||||
"craco-babel-loader": "^1.0.3",
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user