Compare commits

..

11 Commits

Author SHA1 Message Date
Igor Gov
41a7587088 Remove redundant Google auth from test workflow (#911)
* Remove google auth for test workflow
2022-03-23 11:51:42 +02:00
David Levanon
12f46da5c6 Support TLS big buffers (#893) 2022-03-23 11:27:06 +02:00
Adam Kol
17f7879cff Fixing Tabs for Ent (#909)
Co-authored-by: gadotroee <55343099+gadotroee@users.noreply.github.com>
2022-03-22 16:04:04 +02:00
lirazyehezkel
bc7776cbd3 Fix ws errors and warnings (#908)
* Fix ws errors and warnings

* versioning
2022-03-22 15:43:05 +02:00
gadotroee
2a31739100 Fix acceptance test (body size and right side pane changes) (#907) 2022-03-22 09:56:34 +02:00
M. Mert Yıldıran
308fa78955 TRA-4383 Calculate request and response sizes and display them instead of BodySize field (#897)
* Define `ReadProgress` struct and update `Dissector` interface such that the `bufio.Reader` progress can be learned on item emitting

* Display the `requestSize` and `responseSize` fields in the UI

* Update the tests

* publish ui-common version 1.0.130 and bump to this version in ui/package.json file

Co-authored-by: gadotroee <55343099+gadotroee@users.noreply.github.com>
Co-authored-by: Roee Gadot <roee.gadot@up9.com>
2022-03-21 19:34:59 +02:00
RoyUP9
cff5987ed4 Added check pre install (#905) 2022-03-21 17:19:04 +02:00
Adam Kol
7893b4596d closing ws on modal open (#904)
Co-authored-by: gadotroee <55343099+gadotroee@users.noreply.github.com>
2022-03-21 16:44:40 +02:00
M. Mert Yıldıran
774f07fccd Add /db/flush and /db/reset API endpoints (#896)
* Add `/db/flush` and `/db/reset` API endpoints

* Handle the unmarshalling errors better in the WebSocket

* Handle Basenine connection error better in the WebSocket

* Upgrade to Basenine `v0.6.5`

* Fix the duplicated `StartTime` state

Co-authored-by: gadotroee <55343099+gadotroee@users.noreply.github.com>
2022-03-21 15:54:36 +02:00
RoyUP9
482e5c8b69 Added check pull image flag (#899)
Co-authored-by: gadotroee <55343099+gadotroee@users.noreply.github.com>
2022-03-21 15:24:03 +02:00
RamiBerm
21902b5f86 Fix tapping status falling out of sync (#898)
Co-authored-by: gadotroee <55343099+gadotroee@users.noreply.github.com>
2022-03-21 14:54:25 +02:00
68 changed files with 1067 additions and 775 deletions

View File

@@ -34,14 +34,6 @@ jobs:
run: | run: |
sudo apt-get install libpcap-dev sudo apt-get install libpcap-dev
- id: 'auth'
uses: 'google-github-actions/auth@v0'
with:
credentials_json: '${{ secrets.GCR_JSON_KEY }}'
- name: 'Set up Cloud SDK'
uses: 'google-github-actions/setup-gcloud@v0'
- name: Check CLI modified files - name: Check CLI modified files
id: cli_modified_files id: cli_modified_files
run: devops/check_modified_files.sh cli/ run: devops/check_modified_files.sh cli/

View File

@@ -77,8 +77,8 @@ RUN go build -ldflags="-extldflags=-static -s -w \
-X 'github.com/up9inc/mizu/agent/pkg/version.Ver=${VER}'" -o mizuagent . -X 'github.com/up9inc/mizu/agent/pkg/version.Ver=${VER}'" -o mizuagent .
# Download Basenine executable, verify the sha1sum # Download Basenine executable, verify the sha1sum
ADD https://github.com/up9inc/basenine/releases/download/v0.6.3/basenine_linux_${GOARCH} ./basenine_linux_${GOARCH} ADD https://github.com/up9inc/basenine/releases/download/v0.6.5/basenine_linux_${GOARCH} ./basenine_linux_${GOARCH}
ADD https://github.com/up9inc/basenine/releases/download/v0.6.3/basenine_linux_${GOARCH}.sha256 ./basenine_linux_${GOARCH}.sha256 ADD https://github.com/up9inc/basenine/releases/download/v0.6.5/basenine_linux_${GOARCH}.sha256 ./basenine_linux_${GOARCH}.sha256
RUN shasum -a 256 -c basenine_linux_${GOARCH}.sha256 RUN shasum -a 256 -c basenine_linux_${GOARCH}.sha256
RUN chmod +x ./basenine_linux_${GOARCH} RUN chmod +x ./basenine_linux_${GOARCH}
RUN mv ./basenine_linux_${GOARCH} ./basenine RUN mv ./basenine_linux_${GOARCH} ./basenine

View File

@@ -142,7 +142,9 @@ function deepCheck(generalDict, protocolDict, methodDict, entry) {
if (value) { if (value) {
if (value.tab === valueTabs.response) if (value.tab === valueTabs.response)
cy.contains('Response').click(); // temporary fix, change to some "data-cy" attribute,
// this will fix the issue that happen because we have "response:" in the header of the right side
cy.get('#rightSideContainer > :nth-child(3)').contains('Response').click();
cy.get(Cypress.env('bodyJsonClass')).then(text => { cy.get(Cypress.env('bodyJsonClass')).then(text => {
expect(text.text()).to.match(value.regex) expect(text.text()).to.match(value.regex)
}); });

View File

@@ -40,32 +40,23 @@ it('filtering guide check', function () {
}); });
it('right side sanity test', function () { it('right side sanity test', function () {
cy.get('#entryDetailedTitleBodySize').then(sizeTopLine => { cy.get('#entryDetailedTitleElapsedTime').then(timeInMs => {
const sizeOnTopLine = sizeTopLine.text().replace(' B', ''); const time = timeInMs.text();
cy.contains('Response').click(); if (time < '0ms') {
cy.contains('Body Size (bytes)').parent().next().then(size => { throw new Error(`The time in the top line cannot be negative ${time}`);
const bodySizeByDetails = size.text(); }
expect(sizeOnTopLine).to.equal(bodySizeByDetails, 'The body size in the top line should match the details in the response'); });
if (parseInt(bodySizeByDetails) < 0) { // temporary fix, change to some "data-cy" attribute,
throw new Error(`The body size cannot be negative. got the size: ${bodySizeByDetails}`) // this will fix the issue that happen because we have "response:" in the header of the right side
} cy.get('#rightSideContainer > :nth-child(3)').contains('Response').click();
cy.get('#entryDetailedTitleElapsedTime').then(timeInMs => { cy.get('#rightSideContainer [title="Status Code"]').then(status => {
const time = timeInMs.text(); const statusCode = status.text();
if (time < '0ms') { cy.contains('Status').parent().next().then(statusInDetails => {
throw new Error(`The time in the top line cannot be negative ${time}`); const statusCodeInDetails = statusInDetails.text();
}
cy.get('#rightSideContainer [title="Status Code"]').then(status => { expect(statusCode).to.equal(statusCodeInDetails, 'The status code in the top line should match the status code in details');
const statusCode = status.text();
cy.contains('Status').parent().next().then(statusInDetails => {
const statusCodeInDetails = statusInDetails.text();
expect(statusCode).to.equal(statusCodeInDetails, 'The status code in the top line should match the status code in details');
});
});
});
}); });
}); });
}); });
@@ -252,7 +243,9 @@ function deeperChcek(leftSidePath, rightSidePath, filterName, leftSideExpectedTe
} }
function checkRightSideResponseBody() { function checkRightSideResponseBody() {
cy.contains('Response').click(); // temporary fix, change to some "data-cy" attribute,
// this will fix the issue that happen because we have "response:" in the header of the right side
cy.get('#rightSideContainer > :nth-child(3)').contains('Response').click();
clickCheckbox('Decode Base64'); clickCheckbox('Decode Base64');
cy.get(`${Cypress.env('bodyJsonClass')}`).then(value => { cy.get(`${Cypress.env('bodyJsonClass')}`).then(value => {

View File

@@ -20,7 +20,7 @@ require (
github.com/orcaman/concurrent-map v1.0.0 github.com/orcaman/concurrent-map v1.0.0
github.com/patrickmn/go-cache v2.1.0+incompatible github.com/patrickmn/go-cache v2.1.0+incompatible
github.com/stretchr/testify v1.7.0 github.com/stretchr/testify v1.7.0
github.com/up9inc/basenine/client/go v0.0.0-20220315070758-3a76cfc4378e github.com/up9inc/basenine/client/go v0.0.0-20220317230530-8472d80307f6
github.com/up9inc/mizu/shared v0.0.0 github.com/up9inc/mizu/shared v0.0.0
github.com/up9inc/mizu/tap v0.0.0 github.com/up9inc/mizu/tap v0.0.0
github.com/up9inc/mizu/tap/api v0.0.0 github.com/up9inc/mizu/tap/api v0.0.0

View File

@@ -655,8 +655,8 @@ github.com/ugorji/go v1.2.6/go.mod h1:anCg0y61KIhDlPZmnH+so+RQbysYVyDko0IMgJv0Nn
github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY= github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY=
github.com/ugorji/go/codec v1.2.6 h1:7kbGefxLoDBuYXOms4yD7223OpNMMPNPZxXk5TvFcyQ= github.com/ugorji/go/codec v1.2.6 h1:7kbGefxLoDBuYXOms4yD7223OpNMMPNPZxXk5TvFcyQ=
github.com/ugorji/go/codec v1.2.6/go.mod h1:V6TCNZ4PHqoHGFZuSG1W8nrCzzdgA2DozYxWFFpvxTw= github.com/ugorji/go/codec v1.2.6/go.mod h1:V6TCNZ4PHqoHGFZuSG1W8nrCzzdgA2DozYxWFFpvxTw=
github.com/up9inc/basenine/client/go v0.0.0-20220315070758-3a76cfc4378e h1:/9dFXqvRDHcwPQdIGHP6iz6M0iAWBPOxYf6C+Ntq5w0= github.com/up9inc/basenine/client/go v0.0.0-20220317230530-8472d80307f6 h1:c0aVbLKYeFDAg246+NDgie2y484bsc20NaKLo8ODV3E=
github.com/up9inc/basenine/client/go v0.0.0-20220315070758-3a76cfc4378e/go.mod h1:SvJGPoa/6erhUQV7kvHBwM/0x5LyO6XaG2lUaCaKiUI= github.com/up9inc/basenine/client/go v0.0.0-20220317230530-8472d80307f6/go.mod h1:SvJGPoa/6erhUQV7kvHBwM/0x5LyO6XaG2lUaCaKiUI=
github.com/vishvananda/netns v0.0.0-20211101163701-50045581ed74 h1:gga7acRE695APm9hlsSMoOoE65U4/TcqNj90mc69Rlg= github.com/vishvananda/netns v0.0.0-20211101163701-50045581ed74 h1:gga7acRE695APm9hlsSMoOoE65U4/TcqNj90mc69Rlg=
github.com/vishvananda/netns v0.0.0-20211101163701-50045581ed74/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0= github.com/vishvananda/netns v0.0.0-20211101163701-50045581ed74/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0=
github.com/wI2L/jsondiff v0.1.1 h1:r2TkoEet7E4JMO5+s1RCY2R0LrNPNHY6hbDeow2hRHw= github.com/wI2L/jsondiff v0.1.1 h1:r2TkoEet7E4JMO5+s1RCY2R0LrNPNHY6hbDeow2hRHw=

View File

@@ -47,7 +47,6 @@ var apiServerAddress = flag.String("api-server-address", "", "Address of mizu AP
var namespace = flag.String("namespace", "", "Resolve IPs if they belong to resources in this namespace (default is all)") var namespace = flag.String("namespace", "", "Resolve IPs if they belong to resources in this namespace (default is all)")
var harsReaderMode = flag.Bool("hars-read", false, "Run in hars-read mode") var harsReaderMode = flag.Bool("hars-read", false, "Run in hars-read mode")
var harsDir = flag.String("hars-dir", "", "Directory to read hars from") var harsDir = flag.String("hars-dir", "", "Directory to read hars from")
var startTime int64
const ( const (
socketConnectionRetries = 30 socketConnectionRetries = 30
@@ -110,7 +109,7 @@ func hostApi(socketHarOutputChannel chan<- *tapApi.OutputChannelItem) *gin.Engin
app.Use(middlewares.CORSMiddleware()) // This has to be called after the static middleware, does not work if its called before app.Use(middlewares.CORSMiddleware()) // This has to be called after the static middleware, does not work if its called before
api.WebSocketRoutes(app, &eventHandlers, startTime) api.WebSocketRoutes(app, &eventHandlers)
if config.Config.OAS { if config.Config.OAS {
routes.OASRoutes(app) routes.OASRoutes(app)
@@ -124,6 +123,7 @@ func hostApi(socketHarOutputChannel chan<- *tapApi.OutputChannelItem) *gin.Engin
routes.EntriesRoutes(app) routes.EntriesRoutes(app)
routes.MetadataRoutes(app) routes.MetadataRoutes(app)
routes.StatusRoutes(app) routes.StatusRoutes(app)
routes.DbRoutes(app)
return app return app
} }
@@ -133,7 +133,6 @@ func runInApiServerMode(namespace string) *gin.Engine {
logger.Log.Fatalf("Error loading config file %v", err) logger.Log.Fatalf("Error loading config file %v", err)
} }
app.ConfigureBasenineServer(shared.BasenineHost, shared.BaseninePort, config.Config.MaxDBSizeBytes, config.Config.LogLevel, config.Config.InsertionFilter) app.ConfigureBasenineServer(shared.BasenineHost, shared.BaseninePort, config.Config.MaxDBSizeBytes, config.Config.LogLevel, config.Config.InsertionFilter)
startTime = time.Now().UnixNano() / int64(time.Millisecond)
api.StartResolving(namespace) api.StartResolving(namespace)
enableExpFeatureIfNeeded() enableExpFeatureIfNeeded()

View File

@@ -8,6 +8,7 @@ import (
"time" "time"
"github.com/up9inc/mizu/agent/pkg/models" "github.com/up9inc/mizu/agent/pkg/models"
"github.com/up9inc/mizu/agent/pkg/utils"
"github.com/gin-gonic/gin" "github.com/gin-gonic/gin"
"github.com/gorilla/websocket" "github.com/gorilla/websocket"
@@ -59,13 +60,13 @@ func init() {
connectedWebsockets = make(map[int]*SocketConnection) connectedWebsockets = make(map[int]*SocketConnection)
} }
func WebSocketRoutes(app *gin.Engine, eventHandlers EventHandlers, startTime int64) { func WebSocketRoutes(app *gin.Engine, eventHandlers EventHandlers) {
SocketGetBrowserHandler = func(c *gin.Context) { SocketGetBrowserHandler = func(c *gin.Context) {
websocketHandler(c.Writer, c.Request, eventHandlers, false, startTime) websocketHandler(c.Writer, c.Request, eventHandlers, false)
} }
SocketGetTapperHandler = func(c *gin.Context) { SocketGetTapperHandler = func(c *gin.Context) {
websocketHandler(c.Writer, c.Request, eventHandlers, true, startTime) websocketHandler(c.Writer, c.Request, eventHandlers, true)
} }
app.GET("/ws", func(c *gin.Context) { app.GET("/ws", func(c *gin.Context) {
@@ -77,7 +78,7 @@ func WebSocketRoutes(app *gin.Engine, eventHandlers EventHandlers, startTime int
}) })
} }
func websocketHandler(w http.ResponseWriter, r *http.Request, eventHandlers EventHandlers, isTapper bool, startTime int64) { func websocketHandler(w http.ResponseWriter, r *http.Request, eventHandlers EventHandlers, isTapper bool) {
ws, err := websocketUpgrader.Upgrade(w, r, nil) ws, err := websocketUpgrader.Upgrade(w, r, nil)
if err != nil { if err != nil {
logger.Log.Errorf("Failed to set websocket upgrade: %v", err) logger.Log.Errorf("Failed to set websocket upgrade: %v", err)
@@ -99,7 +100,9 @@ func websocketHandler(w http.ResponseWriter, r *http.Request, eventHandlers Even
if !isTapper { if !isTapper {
connection, err = basenine.NewConnection(shared.BasenineHost, shared.BaseninePort) connection, err = basenine.NewConnection(shared.BasenineHost, shared.BaseninePort)
if err != nil { if err != nil {
panic(err) logger.Log.Errorf("Failed to establish a connection to Basenine: %v", err)
socketCleanup(socketId, connectedWebsockets[socketId])
return
} }
} }
@@ -115,7 +118,7 @@ func websocketHandler(w http.ResponseWriter, r *http.Request, eventHandlers Even
eventHandlers.WebSocketConnect(socketId, isTapper) eventHandlers.WebSocketConnect(socketId, isTapper)
startTimeBytes, _ := models.CreateWebsocketStartTimeMessage(startTime) startTimeBytes, _ := models.CreateWebsocketStartTimeMessage(utils.StartTime)
if err = SendToSocket(socketId, startTimeBytes); err != nil { if err = SendToSocket(socketId, startTimeBytes); err != nil {
logger.Log.Error(err) logger.Log.Error(err)
@@ -137,7 +140,8 @@ func websocketHandler(w http.ResponseWriter, r *http.Request, eventHandlers Even
if !isTapper && !isQuerySet { if !isTapper && !isQuerySet {
if err := json.Unmarshal(msg, &params); err != nil { if err := json.Unmarshal(msg, &params); err != nil {
logger.Log.Errorf("Error: %v", socketId, err) logger.Log.Errorf("Error unmarshalling parameters: %v", socketId, err)
continue
} }
query := params.Query query := params.Query
@@ -166,6 +170,10 @@ func websocketHandler(w http.ResponseWriter, r *http.Request, eventHandlers Even
var entry *tapApi.Entry var entry *tapApi.Entry
err = json.Unmarshal(bytes, &entry) err = json.Unmarshal(bytes, &entry)
if err != nil {
logger.Log.Debugf("Error unmarshalling entry: %v", err.Error())
continue
}
var message []byte var message []byte
if params.EnableFullEntries { if params.EnableFullEntries {
@@ -193,7 +201,8 @@ func websocketHandler(w http.ResponseWriter, r *http.Request, eventHandlers Even
var metadata *basenine.Metadata var metadata *basenine.Metadata
err = json.Unmarshal(bytes, &metadata) err = json.Unmarshal(bytes, &metadata)
if err != nil { if err != nil {
logger.Log.Debugf("Error recieving metadata: %v", err.Error()) logger.Log.Debugf("Error unmarshalling metadata: %v", err.Error())
continue
} }
metadataBytes, _ := models.CreateWebsocketQueryMetadataMessage(metadata) metadataBytes, _ := models.CreateWebsocketQueryMetadataMessage(metadata)

View File

@@ -9,7 +9,7 @@ import (
"github.com/op/go-logging" "github.com/op/go-logging"
basenine "github.com/up9inc/basenine/client/go" basenine "github.com/up9inc/basenine/client/go"
"github.com/up9inc/mizu/agent/pkg/api" "github.com/up9inc/mizu/agent/pkg/api"
"github.com/up9inc/mizu/agent/pkg/controllers" "github.com/up9inc/mizu/agent/pkg/utils"
"github.com/up9inc/mizu/shared/logger" "github.com/up9inc/mizu/shared/logger"
tapApi "github.com/up9inc/mizu/tap/api" tapApi "github.com/up9inc/mizu/tap/api"
amqpExt "github.com/up9inc/mizu/tap/extensions/amqp" amqpExt "github.com/up9inc/mizu/tap/extensions/amqp"
@@ -59,7 +59,6 @@ func LoadExtensions() {
return Extensions[i].Protocol.Priority < Extensions[j].Protocol.Priority return Extensions[i].Protocol.Priority < Extensions[j].Protocol.Priority
}) })
controllers.InitExtensionsMap(ExtensionsMap)
api.InitExtensionsMap(ExtensionsMap) api.InitExtensionsMap(ExtensionsMap)
} }
@@ -92,6 +91,8 @@ func ConfigureBasenineServer(host string, port string, dbSize int64, logLevel lo
if err := basenine.InsertionFilter(host, port, insertionFilter); err != nil { if err := basenine.InsertionFilter(host, port, insertionFilter); err != nil {
logger.Log.Errorf("Error while setting the insertion filter: %v", err) logger.Log.Errorf("Error while setting the insertion filter: %v", err)
} }
utils.StartTime = time.Now().UnixNano() / int64(time.Millisecond)
} }
func GetEntryInputChannel() chan *tapApi.OutputChannelItem { func GetEntryInputChannel() chan *tapApi.OutputChannelItem {

View File

@@ -0,0 +1,28 @@
package controllers
import (
"net/http"
"github.com/gin-gonic/gin"
basenine "github.com/up9inc/basenine/client/go"
"github.com/up9inc/mizu/agent/pkg/app"
"github.com/up9inc/mizu/agent/pkg/config"
"github.com/up9inc/mizu/shared"
)
func Flush(c *gin.Context) {
if err := basenine.Flush(shared.BasenineHost, shared.BaseninePort); err != nil {
c.JSON(http.StatusBadRequest, err)
} else {
c.JSON(http.StatusOK, "Flushed.")
}
}
func Reset(c *gin.Context) {
if err := basenine.Reset(shared.BasenineHost, shared.BaseninePort); err != nil {
c.JSON(http.StatusBadRequest, err)
} else {
app.ConfigureBasenineServer(shared.BasenineHost, shared.BaseninePort, config.Config.MaxDBSizeBytes, config.Config.LogLevel, config.Config.InsertionFilter)
c.JSON(http.StatusOK, "Resetted.")
}
}

View File

@@ -6,6 +6,7 @@ import (
"strconv" "strconv"
"time" "time"
"github.com/up9inc/mizu/agent/pkg/app"
"github.com/up9inc/mizu/agent/pkg/har" "github.com/up9inc/mizu/agent/pkg/har"
"github.com/up9inc/mizu/agent/pkg/models" "github.com/up9inc/mizu/agent/pkg/models"
"github.com/up9inc/mizu/agent/pkg/validation" "github.com/up9inc/mizu/agent/pkg/validation"
@@ -18,12 +19,6 @@ import (
tapApi "github.com/up9inc/mizu/tap/api" tapApi "github.com/up9inc/mizu/tap/api"
) )
var extensionsMap map[string]*tapApi.Extension // global
func InitExtensionsMap(ref map[string]*tapApi.Extension) {
extensionsMap = ref
}
func Error(c *gin.Context, err error) bool { func Error(c *gin.Context, err error) bool {
if err != nil { if err != nil {
logger.Log.Errorf("Error getting entry: %v", err) logger.Log.Errorf("Error getting entry: %v", err)
@@ -77,7 +72,7 @@ func GetEntries(c *gin.Context) {
return // exit return // exit
} }
extension := extensionsMap[entry.Protocol.Name] extension := app.ExtensionsMap[entry.Protocol.Name]
base := extension.Dissector.Summarize(entry) base := extension.Dissector.Summarize(entry)
dataSlice = append(dataSlice, base) dataSlice = append(dataSlice, base)
@@ -123,9 +118,19 @@ func GetEntry(c *gin.Context) {
return // exit return // exit
} }
extension := extensionsMap[entry.Protocol.Name] extension := app.ExtensionsMap[entry.Protocol.Name]
base := extension.Dissector.Summarize(entry) base := extension.Dissector.Summarize(entry)
representation, bodySize, _ := extension.Dissector.Represent(entry.Request, entry.Response) var representation []byte
representation, err = extension.Dissector.Represent(entry.Request, entry.Response)
if err != nil {
c.JSON(http.StatusNotFound, gin.H{
"error": true,
"type": "error",
"autoClose": "5000",
"msg": err.Error(),
})
return // exit
}
var rules []map[string]interface{} var rules []map[string]interface{}
var isRulesEnabled bool var isRulesEnabled bool
@@ -142,7 +147,6 @@ func GetEntry(c *gin.Context) {
c.JSON(http.StatusOK, tapApi.EntryWrapper{ c.JSON(http.StatusOK, tapApi.EntryWrapper{
Protocol: entry.Protocol, Protocol: entry.Protocol,
Representation: string(representation), Representation: string(representation),
BodySize: bodySize,
Data: entry, Data: entry,
Base: base, Base: base,
Rules: rules, Rules: rules,

View File

@@ -0,0 +1,15 @@
package routes
import (
"github.com/up9inc/mizu/agent/pkg/controllers"
"github.com/gin-gonic/gin"
)
// DdRoutes defines the group of database routes.
func DbRoutes(app *gin.Engine) {
routeGroup := app.Group("/db")
routeGroup.GET("/flush", controllers.Flush)
routeGroup.GET("/reset", controllers.Reset)
}

View File

@@ -17,6 +17,10 @@ import (
"github.com/up9inc/mizu/shared/logger" "github.com/up9inc/mizu/shared/logger"
) )
var (
StartTime int64 // global
)
// StartServer starts the server with a graceful shutdown // StartServer starts the server with a graceful shutdown
func StartServer(app *gin.Engine) { func StartServer(app *gin.Engine) {
signals := make(chan os.Signal, 2) signals := make(chan os.Signal, 2)

View File

@@ -27,4 +27,6 @@ func init() {
} }
checkCmd.Flags().Bool(configStructs.PreTapCheckName, defaultCheckConfig.PreTap, "Check pre-tap Mizu installation for potential problems") checkCmd.Flags().Bool(configStructs.PreTapCheckName, defaultCheckConfig.PreTap, "Check pre-tap Mizu installation for potential problems")
checkCmd.Flags().Bool(configStructs.PreInstallCheckName, defaultCheckConfig.PreInstall, "Check pre-install Mizu installation for potential problems")
checkCmd.Flags().Bool(configStructs.ImagePullCheckName, defaultCheckConfig.ImagePull, "Test connectivity to container image registry by creating and removing a temporary pod in 'default' namespace")
} }

View File

@@ -0,0 +1,102 @@
package check
import (
"context"
"fmt"
"github.com/up9inc/mizu/cli/uiUtils"
"github.com/up9inc/mizu/shared/kubernetes"
"github.com/up9inc/mizu/shared/logger"
core "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"regexp"
"time"
)
func ImagePullInCluster(ctx context.Context, kubernetesProvider *kubernetes.Provider) bool {
logger.Log.Infof("\nimage-pull-in-cluster\n--------------------")
namespace := "default"
podName := "mizu-test"
defer func() {
if err := kubernetesProvider.RemovePod(ctx, namespace, podName); err != nil {
logger.Log.Errorf("%v error while removing test pod in cluster, err: %v", fmt.Sprintf(uiUtils.Red, "✗"), err)
}
}()
if err := createImagePullInClusterPod(ctx, kubernetesProvider, namespace, podName); err != nil {
logger.Log.Errorf("%v error while creating test pod in cluster, err: %v", fmt.Sprintf(uiUtils.Red, "✗"), err)
return false
}
if err := checkImagePulled(ctx, kubernetesProvider, namespace, podName); err != nil {
logger.Log.Errorf("%v cluster is not able to pull mizu containers from docker hub, err: %v", fmt.Sprintf(uiUtils.Red, "✗"), err)
return false
}
logger.Log.Infof("%v cluster is able to pull mizu containers from docker hub", fmt.Sprintf(uiUtils.Green, "√"))
return true
}
func checkImagePulled(ctx context.Context, kubernetesProvider *kubernetes.Provider, namespace string, podName string) error {
podExactRegex := regexp.MustCompile(fmt.Sprintf("^%s$", podName))
podWatchHelper := kubernetes.NewPodWatchHelper(kubernetesProvider, podExactRegex)
eventChan, errorChan := kubernetes.FilteredWatch(ctx, podWatchHelper, []string{namespace}, podWatchHelper)
timeAfter := time.After(30 * time.Second)
for {
select {
case wEvent, ok := <-eventChan:
if !ok {
eventChan = nil
continue
}
pod, err := wEvent.ToPod()
if err != nil {
return err
}
if pod.Status.Phase == core.PodRunning {
return nil
}
case err, ok := <-errorChan:
if !ok {
errorChan = nil
continue
}
return err
case <-timeAfter:
return fmt.Errorf("image not pulled in time")
}
}
}
func createImagePullInClusterPod(ctx context.Context, kubernetesProvider *kubernetes.Provider, namespace string, podName string) error {
var zero int64
pod := &core.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: podName,
},
Spec: core.PodSpec{
Containers: []core.Container{
{
Name: "probe",
Image: "up9inc/busybox",
ImagePullPolicy: "Always",
Command: []string{"cat"},
Stdin: true,
},
},
TerminationGracePeriodSeconds: &zero,
},
}
if _, err := kubernetesProvider.CreatePod(ctx, namespace, pod); err != nil {
return err
}
return nil
}

View File

@@ -0,0 +1,31 @@
package check
import (
"fmt"
"github.com/up9inc/mizu/cli/config"
"github.com/up9inc/mizu/cli/uiUtils"
"github.com/up9inc/mizu/shared/kubernetes"
"github.com/up9inc/mizu/shared/logger"
"github.com/up9inc/mizu/shared/semver"
)
func KubernetesApi() (*kubernetes.Provider, *semver.SemVersion, bool) {
logger.Log.Infof("\nkubernetes-api\n--------------------")
kubernetesProvider, err := kubernetes.NewProvider(config.Config.KubeConfigPath(), config.Config.KubeContext)
if err != nil {
logger.Log.Errorf("%v can't initialize the client, err: %v", fmt.Sprintf(uiUtils.Red, "✗"), err)
return nil, nil, false
}
logger.Log.Infof("%v can initialize the client", fmt.Sprintf(uiUtils.Green, "√"))
kubernetesVersion, err := kubernetesProvider.GetKubernetesVersion()
if err != nil {
logger.Log.Errorf("%v can't query the Kubernetes API, err: %v", fmt.Sprintf(uiUtils.Red, "✗"), err)
return nil, nil, false
}
logger.Log.Infof("%v can query the Kubernetes API", fmt.Sprintf(uiUtils.Green, "√"))
return kubernetesProvider, kubernetesVersion, true
}

View File

@@ -0,0 +1,131 @@
package check
import (
"context"
"embed"
"fmt"
"github.com/up9inc/mizu/cli/bucket"
"github.com/up9inc/mizu/cli/config"
"github.com/up9inc/mizu/cli/uiUtils"
"github.com/up9inc/mizu/shared/kubernetes"
"github.com/up9inc/mizu/shared/logger"
rbac "k8s.io/api/rbac/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes/scheme"
"strings"
)
func TapKubernetesPermissions(ctx context.Context, embedFS embed.FS, kubernetesProvider *kubernetes.Provider) bool {
logger.Log.Infof("\nkubernetes-permissions\n--------------------")
var filePath string
if config.Config.IsNsRestrictedMode() {
filePath = "permissionFiles/permissions-ns-tap.yaml"
} else {
filePath = "permissionFiles/permissions-all-namespaces-tap.yaml"
}
data, err := embedFS.ReadFile(filePath)
if err != nil {
logger.Log.Errorf("%v error while checking kubernetes permissions, err: %v", fmt.Sprintf(uiUtils.Red, "✗"), err)
return false
}
decode := scheme.Codecs.UniversalDeserializer().Decode
obj, _, err := decode(data, nil, nil)
if err != nil {
logger.Log.Errorf("%v error while checking kubernetes permissions, err: %v", fmt.Sprintf(uiUtils.Red, "✗"), err)
return false
}
switch resource := obj.(type) {
case *rbac.Role:
return checkRulesPermissions(ctx, kubernetesProvider, resource.Rules, config.Config.MizuResourcesNamespace)
case *rbac.ClusterRole:
return checkRulesPermissions(ctx, kubernetesProvider, resource.Rules, "")
}
logger.Log.Errorf("%v error while checking kubernetes permissions, err: resource of type 'Role' or 'ClusterRole' not found in permission files", fmt.Sprintf(uiUtils.Red, "✗"))
return false
}
func InstallKubernetesPermissions(ctx context.Context, kubernetesProvider *kubernetes.Provider) bool {
logger.Log.Infof("\nkubernetes-permissions\n--------------------")
bucketProvider := bucket.NewProvider(config.Config.Install.TemplateUrl, bucket.DefaultTimeout)
installTemplate, err := bucketProvider.GetInstallTemplate(config.Config.Install.TemplateName)
if err != nil {
logger.Log.Errorf("%v error while checking kubernetes permissions, err: %v", fmt.Sprintf(uiUtils.Red, "✗"), err)
return false
}
resourcesTemplate := strings.Split(installTemplate, "---")[1:]
permissionsExist := true
decode := scheme.Codecs.UniversalDeserializer().Decode
for _, resourceTemplate := range resourcesTemplate {
obj, _, err := decode([]byte(resourceTemplate), nil, nil)
if err != nil {
logger.Log.Errorf("%v error while checking kubernetes permissions, err: %v", fmt.Sprintf(uiUtils.Red, "✗"), err)
return false
}
groupVersionKind := obj.GetObjectKind().GroupVersionKind()
resource := fmt.Sprintf("%vs", strings.ToLower(groupVersionKind.Kind))
permissionsExist = checkCreatePermission(ctx, kubernetesProvider, resource, groupVersionKind.Group, obj.(metav1.Object).GetNamespace()) && permissionsExist
switch resourceObj := obj.(type) {
case *rbac.Role:
permissionsExist = checkRulesPermissions(ctx, kubernetesProvider, resourceObj.Rules, resourceObj.Namespace) && permissionsExist
case *rbac.ClusterRole:
permissionsExist = checkRulesPermissions(ctx, kubernetesProvider, resourceObj.Rules, "") && permissionsExist
}
}
return permissionsExist
}
func checkCreatePermission(ctx context.Context, kubernetesProvider *kubernetes.Provider, resource string, group string, namespace string) bool {
exist, err := kubernetesProvider.CanI(ctx, namespace, resource, "create", group)
return checkPermissionExist(group, resource, "create", namespace, exist, err)
}
func checkRulesPermissions(ctx context.Context, kubernetesProvider *kubernetes.Provider, rules []rbac.PolicyRule, namespace string) bool {
permissionsExist := true
for _, rule := range rules {
for _, group := range rule.APIGroups {
for _, resource := range rule.Resources {
for _, verb := range rule.Verbs {
exist, err := kubernetesProvider.CanI(ctx, namespace, resource, verb, group)
permissionsExist = checkPermissionExist(group, resource, verb, namespace, exist, err) && permissionsExist
}
}
}
}
return permissionsExist
}
func checkPermissionExist(group string, resource string, verb string, namespace string, exist bool, err error) bool {
var groupAndNamespace string
if group != "" && namespace != "" {
groupAndNamespace = fmt.Sprintf("in group '%v' and namespace '%v'", group, namespace)
} else if group != "" {
groupAndNamespace = fmt.Sprintf("in group '%v'", group)
} else if namespace != "" {
groupAndNamespace = fmt.Sprintf("in namespace '%v'", namespace)
}
if err != nil {
logger.Log.Errorf("%v error checking permission for %v %v %v, err: %v", fmt.Sprintf(uiUtils.Red, "✗"), verb, resource, groupAndNamespace, err)
return false
} else if !exist {
logger.Log.Errorf("%v can't %v %v %v", fmt.Sprintf(uiUtils.Red, "✗"), verb, resource, groupAndNamespace)
return false
}
logger.Log.Infof("%v can %v %v %v", fmt.Sprintf(uiUtils.Green, "√"), verb, resource, groupAndNamespace)
return true
}

View File

@@ -0,0 +1,95 @@
package check
import (
"context"
"fmt"
"github.com/up9inc/mizu/cli/config"
"github.com/up9inc/mizu/cli/uiUtils"
"github.com/up9inc/mizu/shared/kubernetes"
"github.com/up9inc/mizu/shared/logger"
)
func KubernetesResources(ctx context.Context, kubernetesProvider *kubernetes.Provider) bool {
logger.Log.Infof("\nk8s-components\n--------------------")
exist, err := kubernetesProvider.DoesNamespaceExist(ctx, config.Config.MizuResourcesNamespace)
allResourcesExist := checkResourceExist(config.Config.MizuResourcesNamespace, "namespace", exist, err)
exist, err = kubernetesProvider.DoesConfigMapExist(ctx, config.Config.MizuResourcesNamespace, kubernetes.ConfigMapName)
allResourcesExist = checkResourceExist(kubernetes.ConfigMapName, "config map", exist, err) && allResourcesExist
exist, err = kubernetesProvider.DoesServiceAccountExist(ctx, config.Config.MizuResourcesNamespace, kubernetes.ServiceAccountName)
allResourcesExist = checkResourceExist(kubernetes.ServiceAccountName, "service account", exist, err) && allResourcesExist
if config.Config.IsNsRestrictedMode() {
exist, err = kubernetesProvider.DoesRoleExist(ctx, config.Config.MizuResourcesNamespace, kubernetes.RoleName)
allResourcesExist = checkResourceExist(kubernetes.RoleName, "role", exist, err) && allResourcesExist
exist, err = kubernetesProvider.DoesRoleBindingExist(ctx, config.Config.MizuResourcesNamespace, kubernetes.RoleBindingName)
allResourcesExist = checkResourceExist(kubernetes.RoleBindingName, "role binding", exist, err) && allResourcesExist
} else {
exist, err = kubernetesProvider.DoesClusterRoleExist(ctx, kubernetes.ClusterRoleName)
allResourcesExist = checkResourceExist(kubernetes.ClusterRoleName, "cluster role", exist, err) && allResourcesExist
exist, err = kubernetesProvider.DoesClusterRoleBindingExist(ctx, kubernetes.ClusterRoleBindingName)
allResourcesExist = checkResourceExist(kubernetes.ClusterRoleBindingName, "cluster role binding", exist, err) && allResourcesExist
}
exist, err = kubernetesProvider.DoesServiceExist(ctx, config.Config.MizuResourcesNamespace, kubernetes.ApiServerPodName)
allResourcesExist = checkResourceExist(kubernetes.ApiServerPodName, "service", exist, err) && allResourcesExist
allResourcesExist = checkPodResourcesExist(ctx, kubernetesProvider) && allResourcesExist
return allResourcesExist
}
func checkPodResourcesExist(ctx context.Context, kubernetesProvider *kubernetes.Provider) bool {
if pods, err := kubernetesProvider.ListPodsByAppLabel(ctx, config.Config.MizuResourcesNamespace, kubernetes.ApiServerPodName); err != nil {
logger.Log.Errorf("%v error checking if '%v' pod is running, err: %v", fmt.Sprintf(uiUtils.Red, "✗"), kubernetes.ApiServerPodName, err)
return false
} else if len(pods) == 0 {
logger.Log.Errorf("%v '%v' pod doesn't exist", fmt.Sprintf(uiUtils.Red, "✗"), kubernetes.ApiServerPodName)
return false
} else if !kubernetes.IsPodRunning(&pods[0]) {
logger.Log.Errorf("%v '%v' pod not running", fmt.Sprintf(uiUtils.Red, "✗"), kubernetes.ApiServerPodName)
return false
}
logger.Log.Infof("%v '%v' pod running", fmt.Sprintf(uiUtils.Green, "√"), kubernetes.ApiServerPodName)
if pods, err := kubernetesProvider.ListPodsByAppLabel(ctx, config.Config.MizuResourcesNamespace, kubernetes.TapperPodName); err != nil {
logger.Log.Errorf("%v error checking if '%v' pods are running, err: %v", fmt.Sprintf(uiUtils.Red, "✗"), kubernetes.TapperPodName, err)
return false
} else {
tappers := 0
notRunningTappers := 0
for _, pod := range pods {
tappers += 1
if !kubernetes.IsPodRunning(&pod) {
notRunningTappers += 1
}
}
if notRunningTappers > 0 {
logger.Log.Errorf("%v '%v' %v/%v pods are not running", fmt.Sprintf(uiUtils.Red, "✗"), kubernetes.TapperPodName, notRunningTappers, tappers)
return false
}
logger.Log.Infof("%v '%v' %v pods running", fmt.Sprintf(uiUtils.Green, "√"), kubernetes.TapperPodName, tappers)
return true
}
}
func checkResourceExist(resourceName string, resourceType string, exist bool, err error) bool {
if err != nil {
logger.Log.Errorf("%v error checking if '%v' %v exists, err: %v", fmt.Sprintf(uiUtils.Red, "✗"), resourceName, resourceType, err)
return false
} else if !exist {
logger.Log.Errorf("%v '%v' %v doesn't exist", fmt.Sprintf(uiUtils.Red, "✗"), resourceName, resourceType)
return false
}
logger.Log.Infof("%v '%v' %v exists", fmt.Sprintf(uiUtils.Green, "√"), resourceName, resourceType)
return true
}

View File

@@ -0,0 +1,21 @@
package check
import (
"fmt"
"github.com/up9inc/mizu/cli/uiUtils"
"github.com/up9inc/mizu/shared/kubernetes"
"github.com/up9inc/mizu/shared/logger"
"github.com/up9inc/mizu/shared/semver"
)
func KubernetesVersion(kubernetesVersion *semver.SemVersion) bool {
logger.Log.Infof("\nkubernetes-version\n--------------------")
if err := kubernetes.ValidateKubernetesVersion(kubernetesVersion); err != nil {
logger.Log.Errorf("%v not running the minimum Kubernetes API version, err: %v", fmt.Sprintf(uiUtils.Red, "✗"), err)
return false
}
logger.Log.Infof("%v is running the minimum Kubernetes API version", fmt.Sprintf(uiUtils.Green, "√"))
return true
}

View File

@@ -0,0 +1,83 @@
package check
import (
"context"
"fmt"
"github.com/up9inc/mizu/cli/apiserver"
"github.com/up9inc/mizu/cli/config"
"github.com/up9inc/mizu/cli/uiUtils"
"github.com/up9inc/mizu/shared/kubernetes"
"github.com/up9inc/mizu/shared/logger"
"regexp"
)
func ServerConnection(kubernetesProvider *kubernetes.Provider) bool {
logger.Log.Infof("\nAPI-server-connectivity\n--------------------")
serverUrl := fmt.Sprintf("http://%s", kubernetes.GetMizuApiServerProxiedHostAndPath(config.Config.Tap.GuiPort))
apiServerProvider := apiserver.NewProvider(serverUrl, 1, apiserver.DefaultTimeout)
if err := apiServerProvider.TestConnection(); err == nil {
logger.Log.Infof("%v found Mizu server tunnel available and connected successfully to API server", fmt.Sprintf(uiUtils.Green, "√"))
return true
}
connectedToApiServer := false
if err := checkProxy(serverUrl, kubernetesProvider); err != nil {
logger.Log.Errorf("%v couldn't connect to API server using proxy, err: %v", fmt.Sprintf(uiUtils.Red, "✗"), err)
} else {
connectedToApiServer = true
logger.Log.Infof("%v connected successfully to API server using proxy", fmt.Sprintf(uiUtils.Green, "√"))
}
if err := checkPortForward(serverUrl, kubernetesProvider); err != nil {
logger.Log.Errorf("%v couldn't connect to API server using port-forward, err: %v", fmt.Sprintf(uiUtils.Red, "✗"), err)
} else {
connectedToApiServer = true
logger.Log.Infof("%v connected successfully to API server using port-forward", fmt.Sprintf(uiUtils.Green, "√"))
}
return connectedToApiServer
}
func checkProxy(serverUrl string, kubernetesProvider *kubernetes.Provider) error {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
httpServer, err := kubernetes.StartProxy(kubernetesProvider, config.Config.Tap.ProxyHost, config.Config.Tap.GuiPort, config.Config.MizuResourcesNamespace, kubernetes.ApiServerPodName, cancel)
if err != nil {
return err
}
apiServerProvider := apiserver.NewProvider(serverUrl, apiserver.DefaultRetries, apiserver.DefaultTimeout)
if err := apiServerProvider.TestConnection(); err != nil {
return err
}
if err := httpServer.Shutdown(ctx); err != nil {
logger.Log.Debugf("Error occurred while stopping proxy, err: %v", err)
}
return nil
}
func checkPortForward(serverUrl string, kubernetesProvider *kubernetes.Provider) error {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
podRegex, _ := regexp.Compile(kubernetes.ApiServerPodName)
forwarder, err := kubernetes.NewPortForward(kubernetesProvider, config.Config.MizuResourcesNamespace, podRegex, config.Config.Tap.GuiPort, ctx, cancel)
if err != nil {
return err
}
apiServerProvider := apiserver.NewProvider(serverUrl, apiserver.DefaultRetries, apiserver.DefaultTimeout)
if err := apiServerProvider.TestConnection(); err != nil {
return err
}
forwarder.Close()
return nil
}

View File

@@ -4,20 +4,10 @@ import (
"context" "context"
"embed" "embed"
"fmt" "fmt"
core "k8s.io/api/core/v1" "github.com/up9inc/mizu/cli/cmd/check"
rbac "k8s.io/api/rbac/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/kubernetes/scheme"
"regexp"
"time"
"github.com/up9inc/mizu/cli/apiserver"
"github.com/up9inc/mizu/cli/config" "github.com/up9inc/mizu/cli/config"
"github.com/up9inc/mizu/cli/uiUtils" "github.com/up9inc/mizu/cli/uiUtils"
"github.com/up9inc/mizu/shared/kubernetes"
"github.com/up9inc/mizu/shared/logger" "github.com/up9inc/mizu/shared/logger"
"github.com/up9inc/mizu/shared/semver"
) )
var ( var (
@@ -31,27 +21,33 @@ func runMizuCheck() {
ctx, cancel := context.WithCancel(context.Background()) ctx, cancel := context.WithCancel(context.Background())
defer cancel() // cancel will be called when this function exits defer cancel() // cancel will be called when this function exits
kubernetesProvider, kubernetesVersion, checkPassed := checkKubernetesApi() kubernetesProvider, kubernetesVersion, checkPassed := check.KubernetesApi()
if checkPassed { if checkPassed {
checkPassed = checkKubernetesVersion(kubernetesVersion) checkPassed = check.KubernetesVersion(kubernetesVersion)
} }
if config.Config.Check.PreTap { if config.Config.Check.PreTap {
if checkPassed { if checkPassed {
checkPassed = checkK8sTapPermissions(ctx, kubernetesProvider) checkPassed = check.TapKubernetesPermissions(ctx, embedFS, kubernetesProvider)
} }
} else if config.Config.Check.PreInstall {
if checkPassed { if checkPassed {
checkPassed = checkImagePullInCluster(ctx, kubernetesProvider) checkPassed = check.InstallKubernetesPermissions(ctx, kubernetesProvider)
} }
} else { } else {
if checkPassed { if checkPassed {
checkPassed = checkK8sResources(ctx, kubernetesProvider) checkPassed = check.KubernetesResources(ctx, kubernetesProvider)
} }
if checkPassed { if checkPassed {
checkPassed = checkServerConnection(kubernetesProvider) checkPassed = check.ServerConnection(kubernetesProvider)
}
}
if config.Config.Check.ImagePull {
if checkPassed {
checkPassed = check.ImagePullInCluster(ctx, kubernetesProvider)
} }
} }
@@ -61,365 +57,3 @@ func runMizuCheck() {
logger.Log.Errorf("\nStatus check results are %v", fmt.Sprintf(uiUtils.Red, "✗")) logger.Log.Errorf("\nStatus check results are %v", fmt.Sprintf(uiUtils.Red, "✗"))
} }
} }
func checkKubernetesApi() (*kubernetes.Provider, *semver.SemVersion, bool) {
logger.Log.Infof("\nkubernetes-api\n--------------------")
kubernetesProvider, err := kubernetes.NewProvider(config.Config.KubeConfigPath(), config.Config.KubeContext)
if err != nil {
logger.Log.Errorf("%v can't initialize the client, err: %v", fmt.Sprintf(uiUtils.Red, "✗"), err)
return nil, nil, false
}
logger.Log.Infof("%v can initialize the client", fmt.Sprintf(uiUtils.Green, "√"))
kubernetesVersion, err := kubernetesProvider.GetKubernetesVersion()
if err != nil {
logger.Log.Errorf("%v can't query the Kubernetes API, err: %v", fmt.Sprintf(uiUtils.Red, "✗"), err)
return nil, nil, false
}
logger.Log.Infof("%v can query the Kubernetes API", fmt.Sprintf(uiUtils.Green, "√"))
return kubernetesProvider, kubernetesVersion, true
}
func checkKubernetesVersion(kubernetesVersion *semver.SemVersion) bool {
logger.Log.Infof("\nkubernetes-version\n--------------------")
if err := kubernetes.ValidateKubernetesVersion(kubernetesVersion); err != nil {
logger.Log.Errorf("%v not running the minimum Kubernetes API version, err: %v", fmt.Sprintf(uiUtils.Red, "✗"), err)
return false
}
logger.Log.Infof("%v is running the minimum Kubernetes API version", fmt.Sprintf(uiUtils.Green, "√"))
return true
}
func checkServerConnection(kubernetesProvider *kubernetes.Provider) bool {
logger.Log.Infof("\nAPI-server-connectivity\n--------------------")
serverUrl := GetApiServerUrl(config.Config.Tap.GuiPort)
apiServerProvider := apiserver.NewProvider(serverUrl, 1, apiserver.DefaultTimeout)
if err := apiServerProvider.TestConnection(); err == nil {
logger.Log.Infof("%v found Mizu server tunnel available and connected successfully to API server", fmt.Sprintf(uiUtils.Green, "√"))
return true
}
connectedToApiServer := false
if err := checkProxy(serverUrl, kubernetesProvider); err != nil {
logger.Log.Errorf("%v couldn't connect to API server using proxy, err: %v", fmt.Sprintf(uiUtils.Red, "✗"), err)
} else {
connectedToApiServer = true
logger.Log.Infof("%v connected successfully to API server using proxy", fmt.Sprintf(uiUtils.Green, "√"))
}
if err := checkPortForward(serverUrl, kubernetesProvider); err != nil {
logger.Log.Errorf("%v couldn't connect to API server using port-forward, err: %v", fmt.Sprintf(uiUtils.Red, "✗"), err)
} else {
connectedToApiServer = true
logger.Log.Infof("%v connected successfully to API server using port-forward", fmt.Sprintf(uiUtils.Green, "√"))
}
return connectedToApiServer
}
func checkProxy(serverUrl string, kubernetesProvider *kubernetes.Provider) error {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
httpServer, err := kubernetes.StartProxy(kubernetesProvider, config.Config.Tap.ProxyHost, config.Config.Tap.GuiPort, config.Config.MizuResourcesNamespace, kubernetes.ApiServerPodName, cancel)
if err != nil {
return err
}
apiServerProvider := apiserver.NewProvider(serverUrl, apiserver.DefaultRetries, apiserver.DefaultTimeout)
if err := apiServerProvider.TestConnection(); err != nil {
return err
}
if err := httpServer.Shutdown(ctx); err != nil {
logger.Log.Debugf("Error occurred while stopping proxy, err: %v", err)
}
return nil
}
func checkPortForward(serverUrl string, kubernetesProvider *kubernetes.Provider) error {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
podRegex, _ := regexp.Compile(kubernetes.ApiServerPodName)
forwarder, err := kubernetes.NewPortForward(kubernetesProvider, config.Config.MizuResourcesNamespace, podRegex, config.Config.Tap.GuiPort, ctx, cancel)
if err != nil {
return err
}
apiServerProvider := apiserver.NewProvider(serverUrl, apiserver.DefaultRetries, apiserver.DefaultTimeout)
if err := apiServerProvider.TestConnection(); err != nil {
return err
}
forwarder.Close()
return nil
}
func checkK8sResources(ctx context.Context, kubernetesProvider *kubernetes.Provider) bool {
logger.Log.Infof("\nk8s-components\n--------------------")
exist, err := kubernetesProvider.DoesNamespaceExist(ctx, config.Config.MizuResourcesNamespace)
allResourcesExist := checkResourceExist(config.Config.MizuResourcesNamespace, "namespace", exist, err)
exist, err = kubernetesProvider.DoesConfigMapExist(ctx, config.Config.MizuResourcesNamespace, kubernetes.ConfigMapName)
allResourcesExist = checkResourceExist(kubernetes.ConfigMapName, "config map", exist, err) && allResourcesExist
exist, err = kubernetesProvider.DoesServiceAccountExist(ctx, config.Config.MizuResourcesNamespace, kubernetes.ServiceAccountName)
allResourcesExist = checkResourceExist(kubernetes.ServiceAccountName, "service account", exist, err) && allResourcesExist
if config.Config.IsNsRestrictedMode() {
exist, err = kubernetesProvider.DoesRoleExist(ctx, config.Config.MizuResourcesNamespace, kubernetes.RoleName)
allResourcesExist = checkResourceExist(kubernetes.RoleName, "role", exist, err) && allResourcesExist
exist, err = kubernetesProvider.DoesRoleBindingExist(ctx, config.Config.MizuResourcesNamespace, kubernetes.RoleBindingName)
allResourcesExist = checkResourceExist(kubernetes.RoleBindingName, "role binding", exist, err) && allResourcesExist
} else {
exist, err = kubernetesProvider.DoesClusterRoleExist(ctx, kubernetes.ClusterRoleName)
allResourcesExist = checkResourceExist(kubernetes.ClusterRoleName, "cluster role", exist, err) && allResourcesExist
exist, err = kubernetesProvider.DoesClusterRoleBindingExist(ctx, kubernetes.ClusterRoleBindingName)
allResourcesExist = checkResourceExist(kubernetes.ClusterRoleBindingName, "cluster role binding", exist, err) && allResourcesExist
}
exist, err = kubernetesProvider.DoesServiceExist(ctx, config.Config.MizuResourcesNamespace, kubernetes.ApiServerPodName)
allResourcesExist = checkResourceExist(kubernetes.ApiServerPodName, "service", exist, err) && allResourcesExist
allResourcesExist = checkPodResourcesExist(ctx, kubernetesProvider) && allResourcesExist
return allResourcesExist
}
func checkPodResourcesExist(ctx context.Context, kubernetesProvider *kubernetes.Provider) bool {
if pods, err := kubernetesProvider.ListPodsByAppLabel(ctx, config.Config.MizuResourcesNamespace, kubernetes.ApiServerPodName); err != nil {
logger.Log.Errorf("%v error checking if '%v' pod is running, err: %v", fmt.Sprintf(uiUtils.Red, "✗"), kubernetes.ApiServerPodName, err)
return false
} else if len(pods) == 0 {
logger.Log.Errorf("%v '%v' pod doesn't exist", fmt.Sprintf(uiUtils.Red, "✗"), kubernetes.ApiServerPodName)
return false
} else if !kubernetes.IsPodRunning(&pods[0]) {
logger.Log.Errorf("%v '%v' pod not running", fmt.Sprintf(uiUtils.Red, "✗"), kubernetes.ApiServerPodName)
return false
}
logger.Log.Infof("%v '%v' pod running", fmt.Sprintf(uiUtils.Green, "√"), kubernetes.ApiServerPodName)
if pods, err := kubernetesProvider.ListPodsByAppLabel(ctx, config.Config.MizuResourcesNamespace, kubernetes.TapperPodName); err != nil {
logger.Log.Errorf("%v error checking if '%v' pods are running, err: %v", fmt.Sprintf(uiUtils.Red, "✗"), kubernetes.TapperPodName, err)
return false
} else {
tappers := 0
notRunningTappers := 0
for _, pod := range pods {
tappers += 1
if !kubernetes.IsPodRunning(&pod) {
notRunningTappers += 1
}
}
if notRunningTappers > 0 {
logger.Log.Errorf("%v '%v' %v/%v pods are not running", fmt.Sprintf(uiUtils.Red, "✗"), kubernetes.TapperPodName, notRunningTappers, tappers)
return false
}
logger.Log.Infof("%v '%v' %v pods running", fmt.Sprintf(uiUtils.Green, "√"), kubernetes.TapperPodName, tappers)
return true
}
}
func checkResourceExist(resourceName string, resourceType string, exist bool, err error) bool {
if err != nil {
logger.Log.Errorf("%v error checking if '%v' %v exists, err: %v", fmt.Sprintf(uiUtils.Red, "✗"), resourceName, resourceType, err)
return false
} else if !exist {
logger.Log.Errorf("%v '%v' %v doesn't exist", fmt.Sprintf(uiUtils.Red, "✗"), resourceName, resourceType)
return false
}
logger.Log.Infof("%v '%v' %v exists", fmt.Sprintf(uiUtils.Green, "√"), resourceName, resourceType)
return true
}
func checkK8sTapPermissions(ctx context.Context, kubernetesProvider *kubernetes.Provider) bool {
logger.Log.Infof("\nkubernetes-permissions\n--------------------")
var filePath string
if config.Config.IsNsRestrictedMode() {
filePath = "permissionFiles/permissions-ns-tap.yaml"
} else {
filePath = "permissionFiles/permissions-all-namespaces-tap.yaml"
}
data, err := embedFS.ReadFile(filePath)
if err != nil {
logger.Log.Errorf("%v error while checking kubernetes permissions, err: %v", fmt.Sprintf(uiUtils.Red, "✗"), err)
return false
}
obj, err := getDecodedObject(data)
if err != nil {
logger.Log.Errorf("%v error while checking kubernetes permissions, err: %v", fmt.Sprintf(uiUtils.Red, "✗"), err)
return false
}
var rules []rbac.PolicyRule
if config.Config.IsNsRestrictedMode() {
rules = obj.(*rbac.Role).Rules
} else {
rules = obj.(*rbac.ClusterRole).Rules
}
return checkPermissions(ctx, kubernetesProvider, rules)
}
func getDecodedObject(data []byte) (runtime.Object, error) {
decode := scheme.Codecs.UniversalDeserializer().Decode
obj, _, err := decode(data, nil, nil)
if err != nil {
return nil, err
}
return obj, nil
}
func checkPermissions(ctx context.Context, kubernetesProvider *kubernetes.Provider, rules []rbac.PolicyRule) bool {
permissionsExist := true
for _, rule := range rules {
for _, group := range rule.APIGroups {
for _, resource := range rule.Resources {
for _, verb := range rule.Verbs {
exist, err := kubernetesProvider.CanI(ctx, config.Config.MizuResourcesNamespace, resource, verb, group)
permissionsExist = checkPermissionExist(group, resource, verb, exist, err) && permissionsExist
}
}
}
}
return permissionsExist
}
func checkPermissionExist(group string, resource string, verb string, exist bool, err error) bool {
if err != nil {
logger.Log.Errorf("%v error checking permission for %v %v in group '%v', err: %v", fmt.Sprintf(uiUtils.Red, "✗"), verb, resource, group, err)
return false
} else if !exist {
logger.Log.Errorf("%v can't %v %v in group '%v'", fmt.Sprintf(uiUtils.Red, "✗"), verb, resource, group)
return false
}
logger.Log.Infof("%v can %v %v in group '%v'", fmt.Sprintf(uiUtils.Green, "√"), verb, resource, group)
return true
}
func checkImagePullInCluster(ctx context.Context, kubernetesProvider *kubernetes.Provider) bool {
logger.Log.Infof("\nimage-pull-in-cluster\n--------------------")
podName := "image-pull-in-cluster"
defer removeImagePullInClusterResources(ctx, kubernetesProvider, podName)
if err := createImagePullInClusterResources(ctx, kubernetesProvider, podName); err != nil {
logger.Log.Errorf("%v error while creating image pull in cluster resources, err: %v", fmt.Sprintf(uiUtils.Red, "✗"), err)
return false
}
if err := checkImagePulled(ctx, kubernetesProvider, podName); err != nil {
logger.Log.Errorf("%v cluster is not able to pull mizu containers from docker hub, err: %v", fmt.Sprintf(uiUtils.Red, "✗"), err)
return false
}
logger.Log.Infof("%v cluster is able to pull mizu containers from docker hub", fmt.Sprintf(uiUtils.Green, "√"))
return true
}
func checkImagePulled(ctx context.Context, kubernetesProvider *kubernetes.Provider, podName string) error {
podExactRegex := regexp.MustCompile(fmt.Sprintf("^%s$", podName))
podWatchHelper := kubernetes.NewPodWatchHelper(kubernetesProvider, podExactRegex)
eventChan, errorChan := kubernetes.FilteredWatch(ctx, podWatchHelper, []string{config.Config.MizuResourcesNamespace}, podWatchHelper)
timeAfter := time.After(30 * time.Second)
for {
select {
case wEvent, ok := <-eventChan:
if !ok {
eventChan = nil
continue
}
pod, err := wEvent.ToPod()
if err != nil {
return err
}
if pod.Status.Phase == core.PodRunning {
return nil
}
case err, ok := <-errorChan:
if !ok {
errorChan = nil
continue
}
return err
case <-timeAfter:
return fmt.Errorf("image not pulled in time")
}
}
}
func removeImagePullInClusterResources(ctx context.Context, kubernetesProvider *kubernetes.Provider, podName string) {
if err := kubernetesProvider.RemovePod(ctx, config.Config.MizuResourcesNamespace, podName); err != nil {
logger.Log.Debugf("error while removing image pull in cluster resources, err: %v", err)
}
if !config.Config.IsNsRestrictedMode() {
if err := kubernetesProvider.RemoveNamespace(ctx, config.Config.MizuResourcesNamespace); err != nil {
logger.Log.Debugf("error while removing image pull in cluster resources, err: %v", err)
}
}
}
func createImagePullInClusterResources(ctx context.Context, kubernetesProvider *kubernetes.Provider, podName string) error {
if !config.Config.IsNsRestrictedMode() {
if _, err := kubernetesProvider.CreateNamespace(ctx, config.Config.MizuResourcesNamespace); err != nil {
return err
}
}
var zero int64
pod := &core.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: podName,
},
Spec: core.PodSpec{
Containers: []core.Container{
{
Name: "probe",
Image: "up9inc/busybox",
ImagePullPolicy: "Always",
Command: []string{"cat"},
Stdin: true,
},
},
TerminationGracePeriodSeconds: &zero,
},
}
if _, err := kubernetesProvider.CreatePod(ctx, config.Config.MizuResourcesNamespace, pod); err != nil {
return err
}
return nil
}

View File

@@ -1,9 +1,13 @@
package configStructs package configStructs
const ( const (
PreTapCheckName = "pre-tap" PreTapCheckName = "pre-tap"
PreInstallCheckName = "pre-install"
ImagePullCheckName = "image-pull"
) )
type CheckConfig struct { type CheckConfig struct {
PreTap bool `yaml:"pre-tap"` PreTap bool `yaml:"pre-tap"`
PreInstall bool `yaml:"pre-install"`
ImagePull bool `yaml:"image-pull"`
} }

View File

@@ -11,7 +11,7 @@ require (
github.com/op/go-logging v0.0.0-20160315200505-970db520ece7 github.com/op/go-logging v0.0.0-20160315200505-970db520ece7
github.com/spf13/cobra v1.3.0 github.com/spf13/cobra v1.3.0
github.com/spf13/pflag v1.0.5 github.com/spf13/pflag v1.0.5
github.com/up9inc/basenine/server/lib v0.0.0-20220315070758-3a76cfc4378e github.com/up9inc/basenine/server/lib v0.0.0-20220317230530-8472d80307f6
github.com/up9inc/mizu/shared v0.0.0 github.com/up9inc/mizu/shared v0.0.0
github.com/up9inc/mizu/tap/api v0.0.0 github.com/up9inc/mizu/tap/api v0.0.0
golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8 golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8

View File

@@ -600,8 +600,8 @@ github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69
github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM=
github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc=
github.com/up9inc/basenine/server/lib v0.0.0-20220315070758-3a76cfc4378e h1:reG/QwyxdfvGObfdrae7DZc3rTMiGwQ6S/4PRkwtBoE= github.com/up9inc/basenine/server/lib v0.0.0-20220317230530-8472d80307f6 h1:+RZTD+HdfIW2SMbc65yWkruTY+g5/1Av074m62A74ls=
github.com/up9inc/basenine/server/lib v0.0.0-20220315070758-3a76cfc4378e/go.mod h1:ZIkxWiJm65jYQIso9k+OZKhR7gQ1we2jNyE2kQX9IQI= github.com/up9inc/basenine/server/lib v0.0.0-20220317230530-8472d80307f6/go.mod h1:ZIkxWiJm65jYQIso9k+OZKhR7gQ1we2jNyE2kQX9IQI=
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
github.com/xlab/treeprint v0.0.0-20181112141820-a009c3971eca/go.mod h1:ce1O1j6UtZfjr22oyGxGLbauSBp2YVXpARAosm7dHBg= github.com/xlab/treeprint v0.0.0-20181112141820-a009c3971eca/go.mod h1:ce1O1j6UtZfjr22oyGxGLbauSBp2YVXpARAosm7dHBg=
github.com/xlab/treeprint v1.1.0 h1:G/1DjNkPpfZCFt9CSh6b5/nY4VimlbHF3Rh4obvtzDk= github.com/xlab/treeprint v1.1.0 h1:G/1DjNkPpfZCFt9CSh6b5/nY4VimlbHF3Rh4obvtzDk=

View File

@@ -94,10 +94,6 @@ func (tapperSyncer *MizuTapperSyncer) watchTapperPods() {
continue continue
} }
if tapperSyncer.startTime.After(pod.CreationTimestamp.Time) {
continue
}
logger.Log.Debugf("Watching tapper pods loop, tapper: %v, node: %v, status: %v", pod.Name, pod.Spec.NodeName, pod.Status.Phase) logger.Log.Debugf("Watching tapper pods loop, tapper: %v, node: %v, status: %v", pod.Name, pod.Spec.NodeName, pod.Status.Phase)
if pod.Spec.NodeName != "" { if pod.Spec.NodeName != "" {
tapperStatus := shared.TapperStatus{TapperName: pod.Name, NodeName: pod.Spec.NodeName, Status: string(pod.Status.Phase)} tapperStatus := shared.TapperStatus{TapperName: pod.Name, NodeName: pod.Spec.NodeName, Status: string(pod.Status.Phase)}
@@ -137,10 +133,6 @@ func (tapperSyncer *MizuTapperSyncer) watchTapperEvents() {
continue continue
} }
if tapperSyncer.startTime.After(event.CreationTimestamp.Time) {
continue
}
logger.Log.Debugf( logger.Log.Debugf(
fmt.Sprintf("Watching tapper events loop, event %s, time: %v, resource: %s (%s), reason: %s, note: %s", fmt.Sprintf("Watching tapper events loop, event %s, time: %v, resource: %s (%s), reason: %s, note: %s",
event.Name, event.Name,

View File

@@ -19,7 +19,7 @@ import (
const mizuTestEnvVar = "MIZU_TEST" const mizuTestEnvVar = "MIZU_TEST"
var UnknownIp net.IP = net.IP{0, 0, 0, 0} var UnknownIp net.IP = net.IP{0, 0, 0, 0}
var UnknownPort uint16 = 0 var UnknownPort uint16 = 0
type Protocol struct { type Protocol struct {
@@ -83,6 +83,7 @@ type CounterPair struct {
type GenericMessage struct { type GenericMessage struct {
IsRequest bool `json:"isRequest"` IsRequest bool `json:"isRequest"`
CaptureTime time.Time `json:"captureTime"` CaptureTime time.Time `json:"captureTime"`
CaptureSize int `json:"captureSize"`
Payload interface{} `json:"payload"` Payload interface{} `json:"payload"`
} }
@@ -110,13 +111,27 @@ type SuperIdentifier struct {
IsClosedOthers bool IsClosedOthers bool
} }
type ReadProgress struct {
readBytes int
lastCurrent int
}
func (p *ReadProgress) Feed(n int) {
p.readBytes += n
}
func (p *ReadProgress) Current() (n int) {
p.lastCurrent = p.readBytes - p.lastCurrent
return p.lastCurrent
}
type Dissector interface { type Dissector interface {
Register(*Extension) Register(*Extension)
Ping() Ping()
Dissect(b *bufio.Reader, capture Capture, isClient bool, tcpID *TcpID, counterPair *CounterPair, superTimer *SuperTimer, superIdentifier *SuperIdentifier, emitter Emitter, options *TrafficFilteringOptions, reqResMatcher RequestResponseMatcher) error Dissect(b *bufio.Reader, progress *ReadProgress, capture Capture, isClient bool, tcpID *TcpID, counterPair *CounterPair, superTimer *SuperTimer, superIdentifier *SuperIdentifier, emitter Emitter, options *TrafficFilteringOptions, reqResMatcher RequestResponseMatcher) error
Analyze(item *OutputChannelItem, resolvedSource string, resolvedDestination string, namespace string) *Entry Analyze(item *OutputChannelItem, resolvedSource string, resolvedDestination string, namespace string) *Entry
Summarize(entry *Entry) *BaseEntry Summarize(entry *Entry) *BaseEntry
Represent(request map[string]interface{}, response map[string]interface{}) (object []byte, bodySize int64, err error) Represent(request map[string]interface{}, response map[string]interface{}) (object []byte, err error)
Macros() map[string]string Macros() map[string]string
NewResponseRequestMatcher() RequestResponseMatcher NewResponseRequestMatcher() RequestResponseMatcher
} }
@@ -152,6 +167,8 @@ type Entry struct {
StartTime time.Time `json:"startTime"` StartTime time.Time `json:"startTime"`
Request map[string]interface{} `json:"request"` Request map[string]interface{} `json:"request"`
Response map[string]interface{} `json:"response"` Response map[string]interface{} `json:"response"`
RequestSize int `json:"requestSize"`
ResponseSize int `json:"responseSize"`
ElapsedTime int64 `json:"elapsedTime"` ElapsedTime int64 `json:"elapsedTime"`
Rules ApplicableRules `json:"rules,omitempty"` Rules ApplicableRules `json:"rules,omitempty"`
ContractStatus ContractStatus `json:"contractStatus,omitempty"` ContractStatus ContractStatus `json:"contractStatus,omitempty"`
@@ -164,7 +181,6 @@ type Entry struct {
type EntryWrapper struct { type EntryWrapper struct {
Protocol Protocol `json:"protocol"` Protocol Protocol `json:"protocol"`
Representation string `json:"representation"` Representation string `json:"representation"`
BodySize int64 `json:"bodySize"`
Data *Entry `json:"data"` Data *Entry `json:"data"`
Base *BaseEntry `json:"base"` Base *BaseEntry `json:"base"`
Rules []map[string]interface{} `json:"rulesMatched,omitempty"` Rules []map[string]interface{} `json:"rulesMatched,omitempty"`

View File

@@ -13,4 +13,4 @@ test-pull-bin:
test-pull-expect: test-pull-expect:
@mkdir -p expect @mkdir -p expect
@[ "${skipexpect}" ] && echo "Skipping downloading expected JSONs" || gsutil -o 'GSUtil:parallel_process_count=5' -o 'GSUtil:parallel_thread_count=5' -m cp -r gs://static.up9.io/mizu/test-pcap/expect4/amqp/\* expect @[ "${skipexpect}" ] && echo "Skipping downloading expected JSONs" || gsutil -o 'GSUtil:parallel_process_count=5' -o 'GSUtil:parallel_thread_count=5' -m cp -r gs://static.up9.io/mizu/test-pcap/expect5/amqp/\* expect

View File

@@ -94,7 +94,7 @@ type AMQPWrapper struct {
Details interface{} `json:"details"` Details interface{} `json:"details"`
} }
func emitAMQP(event interface{}, _type string, method string, connectionInfo *api.ConnectionInfo, captureTime time.Time, emitter api.Emitter, capture api.Capture) { func emitAMQP(event interface{}, _type string, method string, connectionInfo *api.ConnectionInfo, captureTime time.Time, captureSize int, emitter api.Emitter, capture api.Capture) {
request := &api.GenericMessage{ request := &api.GenericMessage{
IsRequest: true, IsRequest: true,
CaptureTime: captureTime, CaptureTime: captureTime,

View File

@@ -39,7 +39,7 @@ func (d dissecting) Ping() {
const amqpRequest string = "amqp_request" const amqpRequest string = "amqp_request"
func (d dissecting) Dissect(b *bufio.Reader, capture api.Capture, isClient bool, tcpID *api.TcpID, counterPair *api.CounterPair, superTimer *api.SuperTimer, superIdentifier *api.SuperIdentifier, emitter api.Emitter, options *api.TrafficFilteringOptions, _reqResMatcher api.RequestResponseMatcher) error { func (d dissecting) Dissect(b *bufio.Reader, progress *api.ReadProgress, capture api.Capture, isClient bool, tcpID *api.TcpID, counterPair *api.CounterPair, superTimer *api.SuperTimer, superIdentifier *api.SuperIdentifier, emitter api.Emitter, options *api.TrafficFilteringOptions, _reqResMatcher api.RequestResponseMatcher) error {
r := AmqpReader{b} r := AmqpReader{b}
var remaining int var remaining int
@@ -113,11 +113,11 @@ func (d dissecting) Dissect(b *bufio.Reader, capture api.Capture, isClient bool,
case *BasicPublish: case *BasicPublish:
eventBasicPublish.Body = f.Body eventBasicPublish.Body = f.Body
superIdentifier.Protocol = &protocol superIdentifier.Protocol = &protocol
emitAMQP(*eventBasicPublish, amqpRequest, basicMethodMap[40], connectionInfo, superTimer.CaptureTime, emitter, capture) emitAMQP(*eventBasicPublish, amqpRequest, basicMethodMap[40], connectionInfo, superTimer.CaptureTime, progress.Current(), emitter, capture)
case *BasicDeliver: case *BasicDeliver:
eventBasicDeliver.Body = f.Body eventBasicDeliver.Body = f.Body
superIdentifier.Protocol = &protocol superIdentifier.Protocol = &protocol
emitAMQP(*eventBasicDeliver, amqpRequest, basicMethodMap[60], connectionInfo, superTimer.CaptureTime, emitter, capture) emitAMQP(*eventBasicDeliver, amqpRequest, basicMethodMap[60], connectionInfo, superTimer.CaptureTime, progress.Current(), emitter, capture)
} }
case *MethodFrame: case *MethodFrame:
@@ -138,7 +138,7 @@ func (d dissecting) Dissect(b *bufio.Reader, capture api.Capture, isClient bool,
Arguments: m.Arguments, Arguments: m.Arguments,
} }
superIdentifier.Protocol = &protocol superIdentifier.Protocol = &protocol
emitAMQP(*eventQueueBind, amqpRequest, queueMethodMap[20], connectionInfo, superTimer.CaptureTime, emitter, capture) emitAMQP(*eventQueueBind, amqpRequest, queueMethodMap[20], connectionInfo, superTimer.CaptureTime, progress.Current(), emitter, capture)
case *BasicConsume: case *BasicConsume:
eventBasicConsume := &BasicConsume{ eventBasicConsume := &BasicConsume{
@@ -151,7 +151,7 @@ func (d dissecting) Dissect(b *bufio.Reader, capture api.Capture, isClient bool,
Arguments: m.Arguments, Arguments: m.Arguments,
} }
superIdentifier.Protocol = &protocol superIdentifier.Protocol = &protocol
emitAMQP(*eventBasicConsume, amqpRequest, basicMethodMap[20], connectionInfo, superTimer.CaptureTime, emitter, capture) emitAMQP(*eventBasicConsume, amqpRequest, basicMethodMap[20], connectionInfo, superTimer.CaptureTime, progress.Current(), emitter, capture)
case *BasicDeliver: case *BasicDeliver:
eventBasicDeliver.ConsumerTag = m.ConsumerTag eventBasicDeliver.ConsumerTag = m.ConsumerTag
@@ -171,7 +171,7 @@ func (d dissecting) Dissect(b *bufio.Reader, capture api.Capture, isClient bool,
Arguments: m.Arguments, Arguments: m.Arguments,
} }
superIdentifier.Protocol = &protocol superIdentifier.Protocol = &protocol
emitAMQP(*eventQueueDeclare, amqpRequest, queueMethodMap[10], connectionInfo, superTimer.CaptureTime, emitter, capture) emitAMQP(*eventQueueDeclare, amqpRequest, queueMethodMap[10], connectionInfo, superTimer.CaptureTime, progress.Current(), emitter, capture)
case *ExchangeDeclare: case *ExchangeDeclare:
eventExchangeDeclare := &ExchangeDeclare{ eventExchangeDeclare := &ExchangeDeclare{
@@ -185,7 +185,7 @@ func (d dissecting) Dissect(b *bufio.Reader, capture api.Capture, isClient bool,
Arguments: m.Arguments, Arguments: m.Arguments,
} }
superIdentifier.Protocol = &protocol superIdentifier.Protocol = &protocol
emitAMQP(*eventExchangeDeclare, amqpRequest, exchangeMethodMap[10], connectionInfo, superTimer.CaptureTime, emitter, capture) emitAMQP(*eventExchangeDeclare, amqpRequest, exchangeMethodMap[10], connectionInfo, superTimer.CaptureTime, progress.Current(), emitter, capture)
case *ConnectionStart: case *ConnectionStart:
eventConnectionStart := &ConnectionStart{ eventConnectionStart := &ConnectionStart{
@@ -196,7 +196,7 @@ func (d dissecting) Dissect(b *bufio.Reader, capture api.Capture, isClient bool,
Locales: m.Locales, Locales: m.Locales,
} }
superIdentifier.Protocol = &protocol superIdentifier.Protocol = &protocol
emitAMQP(*eventConnectionStart, amqpRequest, connectionMethodMap[10], connectionInfo, superTimer.CaptureTime, emitter, capture) emitAMQP(*eventConnectionStart, amqpRequest, connectionMethodMap[10], connectionInfo, superTimer.CaptureTime, progress.Current(), emitter, capture)
case *ConnectionClose: case *ConnectionClose:
eventConnectionClose := &ConnectionClose{ eventConnectionClose := &ConnectionClose{
@@ -206,7 +206,7 @@ func (d dissecting) Dissect(b *bufio.Reader, capture api.Capture, isClient bool,
MethodId: m.MethodId, MethodId: m.MethodId,
} }
superIdentifier.Protocol = &protocol superIdentifier.Protocol = &protocol
emitAMQP(*eventConnectionClose, amqpRequest, connectionMethodMap[50], connectionInfo, superTimer.CaptureTime, emitter, capture) emitAMQP(*eventConnectionClose, amqpRequest, connectionMethodMap[50], connectionInfo, superTimer.CaptureTime, progress.Current(), emitter, capture)
} }
default: default:
@@ -236,6 +236,7 @@ func (d dissecting) Analyze(item *api.OutputChannelItem, resolvedSource string,
Namespace: namespace, Namespace: namespace,
Outgoing: item.ConnectionInfo.IsOutgoing, Outgoing: item.ConnectionInfo.IsOutgoing,
Request: reqDetails, Request: reqDetails,
RequestSize: item.Pair.Request.CaptureSize,
Timestamp: item.Timestamp, Timestamp: item.Timestamp,
StartTime: item.Pair.Request.CaptureTime, StartTime: item.Pair.Request.CaptureTime,
ElapsedTime: 0, ElapsedTime: 0,
@@ -301,8 +302,7 @@ func (d dissecting) Summarize(entry *api.Entry) *api.BaseEntry {
} }
} }
func (d dissecting) Represent(request map[string]interface{}, response map[string]interface{}) (object []byte, bodySize int64, err error) { func (d dissecting) Represent(request map[string]interface{}, response map[string]interface{}) (object []byte, err error) {
bodySize = 0
representation := make(map[string]interface{}) representation := make(map[string]interface{})
var repRequest []interface{} var repRequest []interface{}
switch request["method"].(string) { switch request["method"].(string) {

View File

@@ -122,7 +122,7 @@ func TestDissect(t *testing.T) {
DstPort: "2", DstPort: "2",
} }
reqResMatcher := dissector.NewResponseRequestMatcher() reqResMatcher := dissector.NewResponseRequestMatcher()
err = dissector.Dissect(bufferClient, api.Pcap, true, tcpIDClient, counterPair, &api.SuperTimer{}, superIdentifier, emitter, options, reqResMatcher) err = dissector.Dissect(bufferClient, &api.ReadProgress{}, api.Pcap, true, tcpIDClient, counterPair, &api.SuperTimer{}, superIdentifier, emitter, options, reqResMatcher)
if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF { if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF {
panic(err) panic(err)
} }
@@ -140,7 +140,7 @@ func TestDissect(t *testing.T) {
SrcPort: "2", SrcPort: "2",
DstPort: "1", DstPort: "1",
} }
err = dissector.Dissect(bufferServer, api.Pcap, false, tcpIDServer, counterPair, &api.SuperTimer{}, superIdentifier, emitter, options, reqResMatcher) err = dissector.Dissect(bufferServer, &api.ReadProgress{}, api.Pcap, false, tcpIDServer, counterPair, &api.SuperTimer{}, superIdentifier, emitter, options, reqResMatcher)
if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF { if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF {
panic(err) panic(err)
} }
@@ -319,7 +319,7 @@ func TestRepresent(t *testing.T) {
var objects []string var objects []string
for _, entry := range entries { for _, entry := range entries {
object, _, err := dissector.Represent(entry.Request, entry.Response) object, err := dissector.Represent(entry.Request, entry.Response)
assert.Nil(t, err) assert.Nil(t, err)
objects = append(objects, string(object)) objects = append(objects, string(object))
} }

View File

@@ -13,4 +13,4 @@ test-pull-bin:
test-pull-expect: test-pull-expect:
@mkdir -p expect @mkdir -p expect
@[ "${skipexpect}" ] && echo "Skipping downloading expected JSONs" || gsutil -o 'GSUtil:parallel_process_count=5' -o 'GSUtil:parallel_thread_count=5' -m cp -r gs://static.up9.io/mizu/test-pcap/expect4/http/\* expect @[ "${skipexpect}" ] && echo "Skipping downloading expected JSONs" || gsutil -o 'GSUtil:parallel_process_count=5' -o 'GSUtil:parallel_thread_count=5' -m cp -r gs://static.up9.io/mizu/test-pcap/expect5/http/\* expect

View File

@@ -47,7 +47,7 @@ func replaceForwardedFor(item *api.OutputChannelItem) {
item.ConnectionInfo.ClientPort = "" item.ConnectionInfo.ClientPort = ""
} }
func handleHTTP2Stream(http2Assembler *Http2Assembler, capture api.Capture, tcpID *api.TcpID, superTimer *api.SuperTimer, emitter api.Emitter, options *api.TrafficFilteringOptions, reqResMatcher *requestResponseMatcher) error { func handleHTTP2Stream(http2Assembler *Http2Assembler, progress *api.ReadProgress, capture api.Capture, tcpID *api.TcpID, superTimer *api.SuperTimer, emitter api.Emitter, options *api.TrafficFilteringOptions, reqResMatcher *requestResponseMatcher) error {
streamID, messageHTTP1, isGrpc, err := http2Assembler.readMessage() streamID, messageHTTP1, isGrpc, err := http2Assembler.readMessage()
if err != nil { if err != nil {
return err return err
@@ -66,7 +66,7 @@ func handleHTTP2Stream(http2Assembler *Http2Assembler, capture api.Capture, tcpI
streamID, streamID,
"HTTP2", "HTTP2",
) )
item = reqResMatcher.registerRequest(ident, &messageHTTP1, superTimer.CaptureTime, messageHTTP1.ProtoMinor) item = reqResMatcher.registerRequest(ident, &messageHTTP1, superTimer.CaptureTime, progress.Current(), messageHTTP1.ProtoMinor)
if item != nil { if item != nil {
item.ConnectionInfo = &api.ConnectionInfo{ item.ConnectionInfo = &api.ConnectionInfo{
ClientIP: tcpID.SrcIP, ClientIP: tcpID.SrcIP,
@@ -86,7 +86,7 @@ func handleHTTP2Stream(http2Assembler *Http2Assembler, capture api.Capture, tcpI
streamID, streamID,
"HTTP2", "HTTP2",
) )
item = reqResMatcher.registerResponse(ident, &messageHTTP1, superTimer.CaptureTime, messageHTTP1.ProtoMinor) item = reqResMatcher.registerResponse(ident, &messageHTTP1, superTimer.CaptureTime, progress.Current(), messageHTTP1.ProtoMinor)
if item != nil { if item != nil {
item.ConnectionInfo = &api.ConnectionInfo{ item.ConnectionInfo = &api.ConnectionInfo{
ClientIP: tcpID.DstIP, ClientIP: tcpID.DstIP,
@@ -111,7 +111,7 @@ func handleHTTP2Stream(http2Assembler *Http2Assembler, capture api.Capture, tcpI
return nil return nil
} }
func handleHTTP1ClientStream(b *bufio.Reader, capture api.Capture, tcpID *api.TcpID, counterPair *api.CounterPair, superTimer *api.SuperTimer, emitter api.Emitter, options *api.TrafficFilteringOptions, reqResMatcher *requestResponseMatcher) (switchingProtocolsHTTP2 bool, req *http.Request, err error) { func handleHTTP1ClientStream(b *bufio.Reader, progress *api.ReadProgress, capture api.Capture, tcpID *api.TcpID, counterPair *api.CounterPair, superTimer *api.SuperTimer, emitter api.Emitter, options *api.TrafficFilteringOptions, reqResMatcher *requestResponseMatcher) (switchingProtocolsHTTP2 bool, req *http.Request, err error) {
req, err = http.ReadRequest(b) req, err = http.ReadRequest(b)
if err != nil { if err != nil {
return return
@@ -139,7 +139,7 @@ func handleHTTP1ClientStream(b *bufio.Reader, capture api.Capture, tcpID *api.Tc
requestCounter, requestCounter,
"HTTP1", "HTTP1",
) )
item := reqResMatcher.registerRequest(ident, req, superTimer.CaptureTime, req.ProtoMinor) item := reqResMatcher.registerRequest(ident, req, superTimer.CaptureTime, progress.Current(), req.ProtoMinor)
if item != nil { if item != nil {
item.ConnectionInfo = &api.ConnectionInfo{ item.ConnectionInfo = &api.ConnectionInfo{
ClientIP: tcpID.SrcIP, ClientIP: tcpID.SrcIP,
@@ -154,7 +154,7 @@ func handleHTTP1ClientStream(b *bufio.Reader, capture api.Capture, tcpID *api.Tc
return return
} }
func handleHTTP1ServerStream(b *bufio.Reader, capture api.Capture, tcpID *api.TcpID, counterPair *api.CounterPair, superTimer *api.SuperTimer, emitter api.Emitter, options *api.TrafficFilteringOptions, reqResMatcher *requestResponseMatcher) (switchingProtocolsHTTP2 bool, err error) { func handleHTTP1ServerStream(b *bufio.Reader, progress *api.ReadProgress, capture api.Capture, tcpID *api.TcpID, counterPair *api.CounterPair, superTimer *api.SuperTimer, emitter api.Emitter, options *api.TrafficFilteringOptions, reqResMatcher *requestResponseMatcher) (switchingProtocolsHTTP2 bool, err error) {
var res *http.Response var res *http.Response
res, err = http.ReadResponse(b, nil) res, err = http.ReadResponse(b, nil)
if err != nil { if err != nil {
@@ -183,7 +183,7 @@ func handleHTTP1ServerStream(b *bufio.Reader, capture api.Capture, tcpID *api.Tc
responseCounter, responseCounter,
"HTTP1", "HTTP1",
) )
item := reqResMatcher.registerResponse(ident, res, superTimer.CaptureTime, res.ProtoMinor) item := reqResMatcher.registerResponse(ident, res, superTimer.CaptureTime, progress.Current(), res.ProtoMinor)
if item != nil { if item != nil {
item.ConnectionInfo = &api.ConnectionInfo{ item.ConnectionInfo = &api.ConnectionInfo{
ClientIP: tcpID.DstIP, ClientIP: tcpID.DstIP,

View File

@@ -86,7 +86,7 @@ func (d dissecting) Ping() {
log.Printf("pong %s", http11protocol.Name) log.Printf("pong %s", http11protocol.Name)
} }
func (d dissecting) Dissect(b *bufio.Reader, capture api.Capture, isClient bool, tcpID *api.TcpID, counterPair *api.CounterPair, superTimer *api.SuperTimer, superIdentifier *api.SuperIdentifier, emitter api.Emitter, options *api.TrafficFilteringOptions, _reqResMatcher api.RequestResponseMatcher) error { func (d dissecting) Dissect(b *bufio.Reader, progress *api.ReadProgress, capture api.Capture, isClient bool, tcpID *api.TcpID, counterPair *api.CounterPair, superTimer *api.SuperTimer, superIdentifier *api.SuperIdentifier, emitter api.Emitter, options *api.TrafficFilteringOptions, _reqResMatcher api.RequestResponseMatcher) error {
reqResMatcher := _reqResMatcher.(*requestResponseMatcher) reqResMatcher := _reqResMatcher.(*requestResponseMatcher)
var err error var err error
@@ -121,7 +121,7 @@ func (d dissecting) Dissect(b *bufio.Reader, capture api.Capture, isClient bool,
} }
if isHTTP2 { if isHTTP2 {
err = handleHTTP2Stream(http2Assembler, capture, tcpID, superTimer, emitter, options, reqResMatcher) err = handleHTTP2Stream(http2Assembler, progress, capture, tcpID, superTimer, emitter, options, reqResMatcher)
if err == io.EOF || err == io.ErrUnexpectedEOF { if err == io.EOF || err == io.ErrUnexpectedEOF {
break break
} else if err != nil { } else if err != nil {
@@ -130,7 +130,7 @@ func (d dissecting) Dissect(b *bufio.Reader, capture api.Capture, isClient bool,
superIdentifier.Protocol = &http11protocol superIdentifier.Protocol = &http11protocol
} else if isClient { } else if isClient {
var req *http.Request var req *http.Request
switchingProtocolsHTTP2, req, err = handleHTTP1ClientStream(b, capture, tcpID, counterPair, superTimer, emitter, options, reqResMatcher) switchingProtocolsHTTP2, req, err = handleHTTP1ClientStream(b, progress, capture, tcpID, counterPair, superTimer, emitter, options, reqResMatcher)
if err == io.EOF || err == io.ErrUnexpectedEOF { if err == io.EOF || err == io.ErrUnexpectedEOF {
break break
} else if err != nil { } else if err != nil {
@@ -148,7 +148,7 @@ func (d dissecting) Dissect(b *bufio.Reader, capture api.Capture, isClient bool,
tcpID.DstPort, tcpID.DstPort,
"HTTP2", "HTTP2",
) )
item := reqResMatcher.registerRequest(ident, req, superTimer.CaptureTime, req.ProtoMinor) item := reqResMatcher.registerRequest(ident, req, superTimer.CaptureTime, progress.Current(), req.ProtoMinor)
if item != nil { if item != nil {
item.ConnectionInfo = &api.ConnectionInfo{ item.ConnectionInfo = &api.ConnectionInfo{
ClientIP: tcpID.SrcIP, ClientIP: tcpID.SrcIP,
@@ -162,7 +162,7 @@ func (d dissecting) Dissect(b *bufio.Reader, capture api.Capture, isClient bool,
} }
} }
} else { } else {
switchingProtocolsHTTP2, err = handleHTTP1ServerStream(b, capture, tcpID, counterPair, superTimer, emitter, options, reqResMatcher) switchingProtocolsHTTP2, err = handleHTTP1ServerStream(b, progress, capture, tcpID, counterPair, superTimer, emitter, options, reqResMatcher)
if err == io.EOF || err == io.ErrUnexpectedEOF { if err == io.EOF || err == io.ErrUnexpectedEOF {
break break
} else if err != nil { } else if err != nil {
@@ -271,14 +271,16 @@ func (d dissecting) Analyze(item *api.OutputChannelItem, resolvedSource string,
IP: item.ConnectionInfo.ServerIP, IP: item.ConnectionInfo.ServerIP,
Port: item.ConnectionInfo.ServerPort, Port: item.ConnectionInfo.ServerPort,
}, },
Namespace: namespace, Namespace: namespace,
Outgoing: item.ConnectionInfo.IsOutgoing, Outgoing: item.ConnectionInfo.IsOutgoing,
Request: reqDetails, Request: reqDetails,
Response: resDetails, Response: resDetails,
Timestamp: item.Timestamp, RequestSize: item.Pair.Request.CaptureSize,
StartTime: item.Pair.Request.CaptureTime, ResponseSize: item.Pair.Response.CaptureSize,
ElapsedTime: elapsedTime, Timestamp: item.Timestamp,
HTTPPair: string(httpPair), StartTime: item.Pair.Request.CaptureTime,
ElapsedTime: elapsedTime,
HTTPPair: string(httpPair),
} }
} }
@@ -410,11 +412,9 @@ func representRequest(request map[string]interface{}) (repRequest []interface{})
return return
} }
func representResponse(response map[string]interface{}) (repResponse []interface{}, bodySize int64) { func representResponse(response map[string]interface{}) (repResponse []interface{}) {
repResponse = make([]interface{}, 0) repResponse = make([]interface{}, 0)
bodySize = int64(response["bodySize"].(float64))
details, _ := json.Marshal([]api.TableData{ details, _ := json.Marshal([]api.TableData{
{ {
Name: "Status", Name: "Status",
@@ -428,7 +428,7 @@ func representResponse(response map[string]interface{}) (repResponse []interface
}, },
{ {
Name: "Body Size (bytes)", Name: "Body Size (bytes)",
Value: bodySize, Value: int64(response["bodySize"].(float64)),
Selector: `response.bodySize`, Selector: `response.bodySize`,
}, },
}) })
@@ -471,10 +471,10 @@ func representResponse(response map[string]interface{}) (repResponse []interface
return return
} }
func (d dissecting) Represent(request map[string]interface{}, response map[string]interface{}) (object []byte, bodySize int64, err error) { func (d dissecting) Represent(request map[string]interface{}, response map[string]interface{}) (object []byte, err error) {
representation := make(map[string]interface{}) representation := make(map[string]interface{})
repRequest := representRequest(request) repRequest := representRequest(request)
repResponse, bodySize := representResponse(response) repResponse := representResponse(response)
representation["request"] = repRequest representation["request"] = repRequest
representation["response"] = repResponse representation["response"] = repResponse
object, err = json.Marshal(representation) object, err = json.Marshal(representation)

View File

@@ -124,7 +124,7 @@ func TestDissect(t *testing.T) {
DstPort: "2", DstPort: "2",
} }
reqResMatcher := dissector.NewResponseRequestMatcher() reqResMatcher := dissector.NewResponseRequestMatcher()
err = dissector.Dissect(bufferClient, api.Pcap, true, tcpIDClient, counterPair, &api.SuperTimer{}, superIdentifier, emitter, options, reqResMatcher) err = dissector.Dissect(bufferClient, &api.ReadProgress{}, api.Pcap, true, tcpIDClient, counterPair, &api.SuperTimer{}, superIdentifier, emitter, options, reqResMatcher)
if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF { if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF {
panic(err) panic(err)
} }
@@ -142,7 +142,7 @@ func TestDissect(t *testing.T) {
SrcPort: "2", SrcPort: "2",
DstPort: "1", DstPort: "1",
} }
err = dissector.Dissect(bufferServer, api.Pcap, false, tcpIDServer, counterPair, &api.SuperTimer{}, superIdentifier, emitter, options, reqResMatcher) err = dissector.Dissect(bufferServer, &api.ReadProgress{}, api.Pcap, false, tcpIDServer, counterPair, &api.SuperTimer{}, superIdentifier, emitter, options, reqResMatcher)
if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF { if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF {
panic(err) panic(err)
} }
@@ -321,7 +321,7 @@ func TestRepresent(t *testing.T) {
var objects []string var objects []string
for _, entry := range entries { for _, entry := range entries {
object, _, err := dissector.Represent(entry.Request, entry.Response) object, err := dissector.Represent(entry.Request, entry.Response)
assert.Nil(t, err) assert.Nil(t, err)
objects = append(objects, string(object)) objects = append(objects, string(object))
} }

View File

@@ -24,10 +24,11 @@ func (matcher *requestResponseMatcher) GetMap() *sync.Map {
func (matcher *requestResponseMatcher) SetMaxTry(value int) { func (matcher *requestResponseMatcher) SetMaxTry(value int) {
} }
func (matcher *requestResponseMatcher) registerRequest(ident string, request *http.Request, captureTime time.Time, protoMinor int) *api.OutputChannelItem { func (matcher *requestResponseMatcher) registerRequest(ident string, request *http.Request, captureTime time.Time, captureSize int, protoMinor int) *api.OutputChannelItem {
requestHTTPMessage := api.GenericMessage{ requestHTTPMessage := api.GenericMessage{
IsRequest: true, IsRequest: true,
CaptureTime: captureTime, CaptureTime: captureTime,
CaptureSize: captureSize,
Payload: api.HTTPPayload{ Payload: api.HTTPPayload{
Type: TypeHttpRequest, Type: TypeHttpRequest,
Data: request, Data: request,
@@ -47,10 +48,11 @@ func (matcher *requestResponseMatcher) registerRequest(ident string, request *ht
return nil return nil
} }
func (matcher *requestResponseMatcher) registerResponse(ident string, response *http.Response, captureTime time.Time, protoMinor int) *api.OutputChannelItem { func (matcher *requestResponseMatcher) registerResponse(ident string, response *http.Response, captureTime time.Time, captureSize int, protoMinor int) *api.OutputChannelItem {
responseHTTPMessage := api.GenericMessage{ responseHTTPMessage := api.GenericMessage{
IsRequest: false, IsRequest: false,
CaptureTime: captureTime, CaptureTime: captureTime,
CaptureSize: captureSize,
Payload: api.HTTPPayload{ Payload: api.HTTPPayload{
Type: TypeHttpResponse, Type: TypeHttpResponse,
Data: response, Data: response,

View File

@@ -13,4 +13,4 @@ test-pull-bin:
test-pull-expect: test-pull-expect:
@mkdir -p expect @mkdir -p expect
@[ "${skipexpect}" ] && echo "Skipping downloading expected JSONs" || gsutil -o 'GSUtil:parallel_process_count=5' -o 'GSUtil:parallel_thread_count=5' -m cp -r gs://static.up9.io/mizu/test-pcap/expect4/kafka/\* expect @[ "${skipexpect}" ] && echo "Skipping downloading expected JSONs" || gsutil -o 'GSUtil:parallel_process_count=5' -o 'GSUtil:parallel_thread_count=5' -m cp -r gs://static.up9.io/mizu/test-pcap/expect5/kafka/\* expect

View File

@@ -35,7 +35,7 @@ func (d dissecting) Ping() {
log.Printf("pong %s", _protocol.Name) log.Printf("pong %s", _protocol.Name)
} }
func (d dissecting) Dissect(b *bufio.Reader, capture api.Capture, isClient bool, tcpID *api.TcpID, counterPair *api.CounterPair, superTimer *api.SuperTimer, superIdentifier *api.SuperIdentifier, emitter api.Emitter, options *api.TrafficFilteringOptions, _reqResMatcher api.RequestResponseMatcher) error { func (d dissecting) Dissect(b *bufio.Reader, progress *api.ReadProgress, capture api.Capture, isClient bool, tcpID *api.TcpID, counterPair *api.CounterPair, superTimer *api.SuperTimer, superIdentifier *api.SuperIdentifier, emitter api.Emitter, options *api.TrafficFilteringOptions, _reqResMatcher api.RequestResponseMatcher) error {
reqResMatcher := _reqResMatcher.(*requestResponseMatcher) reqResMatcher := _reqResMatcher.(*requestResponseMatcher)
for { for {
if superIdentifier.Protocol != nil && superIdentifier.Protocol != &_protocol { if superIdentifier.Protocol != nil && superIdentifier.Protocol != &_protocol {
@@ -79,13 +79,15 @@ func (d dissecting) Analyze(item *api.OutputChannelItem, resolvedSource string,
IP: item.ConnectionInfo.ServerIP, IP: item.ConnectionInfo.ServerIP,
Port: item.ConnectionInfo.ServerPort, Port: item.ConnectionInfo.ServerPort,
}, },
Namespace: namespace, Namespace: namespace,
Outgoing: item.ConnectionInfo.IsOutgoing, Outgoing: item.ConnectionInfo.IsOutgoing,
Request: reqDetails, Request: reqDetails,
Response: item.Pair.Response.Payload.(map[string]interface{})["details"].(map[string]interface{}), Response: item.Pair.Response.Payload.(map[string]interface{})["details"].(map[string]interface{}),
Timestamp: item.Timestamp, RequestSize: item.Pair.Request.CaptureSize,
StartTime: item.Pair.Request.CaptureTime, ResponseSize: item.Pair.Response.CaptureSize,
ElapsedTime: elapsedTime, Timestamp: item.Timestamp,
StartTime: item.Pair.Request.CaptureTime,
ElapsedTime: elapsedTime,
} }
} }
@@ -208,8 +210,7 @@ func (d dissecting) Summarize(entry *api.Entry) *api.BaseEntry {
} }
} }
func (d dissecting) Represent(request map[string]interface{}, response map[string]interface{}) (object []byte, bodySize int64, err error) { func (d dissecting) Represent(request map[string]interface{}, response map[string]interface{}) (object []byte, err error) {
bodySize = 0
representation := make(map[string]interface{}) representation := make(map[string]interface{})
apiKey := ApiKey(request["apiKey"].(float64)) apiKey := ApiKey(request["apiKey"].(float64))

View File

@@ -123,7 +123,7 @@ func TestDissect(t *testing.T) {
} }
reqResMatcher := dissector.NewResponseRequestMatcher() reqResMatcher := dissector.NewResponseRequestMatcher()
reqResMatcher.SetMaxTry(10) reqResMatcher.SetMaxTry(10)
err = dissector.Dissect(bufferClient, api.Pcap, true, tcpIDClient, counterPair, &api.SuperTimer{}, superIdentifier, emitter, options, reqResMatcher) err = dissector.Dissect(bufferClient, &api.ReadProgress{}, api.Pcap, true, tcpIDClient, counterPair, &api.SuperTimer{}, superIdentifier, emitter, options, reqResMatcher)
if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF { if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF {
log.Println(err) log.Println(err)
} }
@@ -141,7 +141,7 @@ func TestDissect(t *testing.T) {
SrcPort: "2", SrcPort: "2",
DstPort: "1", DstPort: "1",
} }
err = dissector.Dissect(bufferServer, api.Pcap, false, tcpIDServer, counterPair, &api.SuperTimer{}, superIdentifier, emitter, options, reqResMatcher) err = dissector.Dissect(bufferServer, &api.ReadProgress{}, api.Pcap, false, tcpIDServer, counterPair, &api.SuperTimer{}, superIdentifier, emitter, options, reqResMatcher)
if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF { if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF {
log.Println(err) log.Println(err)
} }
@@ -320,7 +320,7 @@ func TestRepresent(t *testing.T) {
var objects []string var objects []string
for _, entry := range entries { for _, entry := range entries {
object, _, err := dissector.Represent(entry.Request, entry.Response) object, err := dissector.Represent(entry.Request, entry.Response)
assert.Nil(t, err) assert.Nil(t, err)
objects = append(objects, string(object)) objects = append(objects, string(object))
} }

View File

@@ -265,6 +265,7 @@ func ReadResponse(r io.Reader, capture api.Capture, tcpID *api.TcpID, counterPai
Request: api.GenericMessage{ Request: api.GenericMessage{
IsRequest: true, IsRequest: true,
CaptureTime: reqResPair.Request.CaptureTime, CaptureTime: reqResPair.Request.CaptureTime,
CaptureSize: int(reqResPair.Request.Size),
Payload: KafkaPayload{ Payload: KafkaPayload{
Data: &KafkaWrapper{ Data: &KafkaWrapper{
Method: apiNames[apiKey], Method: apiNames[apiKey],
@@ -276,6 +277,7 @@ func ReadResponse(r io.Reader, capture api.Capture, tcpID *api.TcpID, counterPai
Response: api.GenericMessage{ Response: api.GenericMessage{
IsRequest: false, IsRequest: false,
CaptureTime: reqResPair.Response.CaptureTime, CaptureTime: reqResPair.Response.CaptureTime,
CaptureSize: int(reqResPair.Response.Size),
Payload: KafkaPayload{ Payload: KafkaPayload{
Data: &KafkaWrapper{ Data: &KafkaWrapper{
Method: apiNames[apiKey], Method: apiNames[apiKey],

View File

@@ -13,4 +13,4 @@ test-pull-bin:
test-pull-expect: test-pull-expect:
@mkdir -p expect @mkdir -p expect
@[ "${skipexpect}" ] && echo "Skipping downloading expected JSONs" || gsutil -o 'GSUtil:parallel_process_count=5' -o 'GSUtil:parallel_thread_count=5' -m cp -r gs://static.up9.io/mizu/test-pcap/expect4/redis/\* expect @[ "${skipexpect}" ] && echo "Skipping downloading expected JSONs" || gsutil -o 'GSUtil:parallel_process_count=5' -o 'GSUtil:parallel_thread_count=5' -m cp -r gs://static.up9.io/mizu/test-pcap/expect5/redis/\* expect

View File

@@ -6,7 +6,7 @@ import (
"github.com/up9inc/mizu/tap/api" "github.com/up9inc/mizu/tap/api"
) )
func handleClientStream(capture api.Capture, tcpID *api.TcpID, counterPair *api.CounterPair, superTimer *api.SuperTimer, emitter api.Emitter, request *RedisPacket, reqResMatcher *requestResponseMatcher) error { func handleClientStream(progress *api.ReadProgress, capture api.Capture, tcpID *api.TcpID, counterPair *api.CounterPair, superTimer *api.SuperTimer, emitter api.Emitter, request *RedisPacket, reqResMatcher *requestResponseMatcher) error {
counterPair.Lock() counterPair.Lock()
counterPair.Request++ counterPair.Request++
requestCounter := counterPair.Request requestCounter := counterPair.Request
@@ -21,7 +21,7 @@ func handleClientStream(capture api.Capture, tcpID *api.TcpID, counterPair *api.
requestCounter, requestCounter,
) )
item := reqResMatcher.registerRequest(ident, request, superTimer.CaptureTime) item := reqResMatcher.registerRequest(ident, request, superTimer.CaptureTime, progress.Current())
if item != nil { if item != nil {
item.Capture = capture item.Capture = capture
item.ConnectionInfo = &api.ConnectionInfo{ item.ConnectionInfo = &api.ConnectionInfo{
@@ -36,7 +36,7 @@ func handleClientStream(capture api.Capture, tcpID *api.TcpID, counterPair *api.
return nil return nil
} }
func handleServerStream(capture api.Capture, tcpID *api.TcpID, counterPair *api.CounterPair, superTimer *api.SuperTimer, emitter api.Emitter, response *RedisPacket, reqResMatcher *requestResponseMatcher) error { func handleServerStream(progress *api.ReadProgress, capture api.Capture, tcpID *api.TcpID, counterPair *api.CounterPair, superTimer *api.SuperTimer, emitter api.Emitter, response *RedisPacket, reqResMatcher *requestResponseMatcher) error {
counterPair.Lock() counterPair.Lock()
counterPair.Response++ counterPair.Response++
responseCounter := counterPair.Response responseCounter := counterPair.Response
@@ -51,7 +51,7 @@ func handleServerStream(capture api.Capture, tcpID *api.TcpID, counterPair *api.
responseCounter, responseCounter,
) )
item := reqResMatcher.registerResponse(ident, response, superTimer.CaptureTime) item := reqResMatcher.registerResponse(ident, response, superTimer.CaptureTime, progress.Current())
if item != nil { if item != nil {
item.Capture = capture item.Capture = capture
item.ConnectionInfo = &api.ConnectionInfo{ item.ConnectionInfo = &api.ConnectionInfo{

View File

@@ -34,7 +34,7 @@ func (d dissecting) Ping() {
log.Printf("pong %s", protocol.Name) log.Printf("pong %s", protocol.Name)
} }
func (d dissecting) Dissect(b *bufio.Reader, capture api.Capture, isClient bool, tcpID *api.TcpID, counterPair *api.CounterPair, superTimer *api.SuperTimer, superIdentifier *api.SuperIdentifier, emitter api.Emitter, options *api.TrafficFilteringOptions, _reqResMatcher api.RequestResponseMatcher) error { func (d dissecting) Dissect(b *bufio.Reader, progress *api.ReadProgress, capture api.Capture, isClient bool, tcpID *api.TcpID, counterPair *api.CounterPair, superTimer *api.SuperTimer, superIdentifier *api.SuperIdentifier, emitter api.Emitter, options *api.TrafficFilteringOptions, _reqResMatcher api.RequestResponseMatcher) error {
reqResMatcher := _reqResMatcher.(*requestResponseMatcher) reqResMatcher := _reqResMatcher.(*requestResponseMatcher)
is := &RedisInputStream{ is := &RedisInputStream{
Reader: b, Reader: b,
@@ -48,9 +48,9 @@ func (d dissecting) Dissect(b *bufio.Reader, capture api.Capture, isClient bool,
} }
if isClient { if isClient {
err = handleClientStream(capture, tcpID, counterPair, superTimer, emitter, redisPacket, reqResMatcher) err = handleClientStream(progress, capture, tcpID, counterPair, superTimer, emitter, redisPacket, reqResMatcher)
} else { } else {
err = handleServerStream(capture, tcpID, counterPair, superTimer, emitter, redisPacket, reqResMatcher) err = handleServerStream(progress, capture, tcpID, counterPair, superTimer, emitter, redisPacket, reqResMatcher)
} }
if err != nil { if err != nil {
@@ -82,13 +82,15 @@ func (d dissecting) Analyze(item *api.OutputChannelItem, resolvedSource string,
IP: item.ConnectionInfo.ServerIP, IP: item.ConnectionInfo.ServerIP,
Port: item.ConnectionInfo.ServerPort, Port: item.ConnectionInfo.ServerPort,
}, },
Namespace: namespace, Namespace: namespace,
Outgoing: item.ConnectionInfo.IsOutgoing, Outgoing: item.ConnectionInfo.IsOutgoing,
Request: reqDetails, Request: reqDetails,
Response: resDetails, Response: resDetails,
Timestamp: item.Timestamp, RequestSize: item.Pair.Request.CaptureSize,
StartTime: item.Pair.Request.CaptureTime, ResponseSize: item.Pair.Response.CaptureSize,
ElapsedTime: elapsedTime, Timestamp: item.Timestamp,
StartTime: item.Pair.Request.CaptureTime,
ElapsedTime: elapsedTime,
} }
} }
@@ -131,8 +133,7 @@ func (d dissecting) Summarize(entry *api.Entry) *api.BaseEntry {
} }
} }
func (d dissecting) Represent(request map[string]interface{}, response map[string]interface{}) (object []byte, bodySize int64, err error) { func (d dissecting) Represent(request map[string]interface{}, response map[string]interface{}) (object []byte, err error) {
bodySize = 0
representation := make(map[string]interface{}) representation := make(map[string]interface{})
repRequest := representGeneric(request, `request.`) repRequest := representGeneric(request, `request.`)
repResponse := representGeneric(response, `response.`) repResponse := representGeneric(response, `response.`)

View File

@@ -123,7 +123,7 @@ func TestDissect(t *testing.T) {
DstPort: "2", DstPort: "2",
} }
reqResMatcher := dissector.NewResponseRequestMatcher() reqResMatcher := dissector.NewResponseRequestMatcher()
err = dissector.Dissect(bufferClient, api.Pcap, true, tcpIDClient, counterPair, &api.SuperTimer{}, superIdentifier, emitter, options, reqResMatcher) err = dissector.Dissect(bufferClient, &api.ReadProgress{}, api.Pcap, true, tcpIDClient, counterPair, &api.SuperTimer{}, superIdentifier, emitter, options, reqResMatcher)
if err != nil && reflect.TypeOf(err) != reflect.TypeOf(&ConnectError{}) && err != io.EOF && err != io.ErrUnexpectedEOF { if err != nil && reflect.TypeOf(err) != reflect.TypeOf(&ConnectError{}) && err != io.EOF && err != io.ErrUnexpectedEOF {
log.Println(err) log.Println(err)
} }
@@ -141,7 +141,7 @@ func TestDissect(t *testing.T) {
SrcPort: "2", SrcPort: "2",
DstPort: "1", DstPort: "1",
} }
err = dissector.Dissect(bufferServer, api.Pcap, false, tcpIDServer, counterPair, &api.SuperTimer{}, superIdentifier, emitter, options, reqResMatcher) err = dissector.Dissect(bufferServer, &api.ReadProgress{}, api.Pcap, false, tcpIDServer, counterPair, &api.SuperTimer{}, superIdentifier, emitter, options, reqResMatcher)
if err != nil && reflect.TypeOf(err) != reflect.TypeOf(&ConnectError{}) && err != io.EOF && err != io.ErrUnexpectedEOF { if err != nil && reflect.TypeOf(err) != reflect.TypeOf(&ConnectError{}) && err != io.EOF && err != io.ErrUnexpectedEOF {
log.Println(err) log.Println(err)
} }
@@ -320,7 +320,7 @@ func TestRepresent(t *testing.T) {
var objects []string var objects []string
for _, entry := range entries { for _, entry := range entries {
object, _, err := dissector.Represent(entry.Request, entry.Response) object, err := dissector.Represent(entry.Request, entry.Response)
assert.Nil(t, err) assert.Nil(t, err)
objects = append(objects, string(object)) objects = append(objects, string(object))
} }

View File

@@ -22,10 +22,11 @@ func (matcher *requestResponseMatcher) GetMap() *sync.Map {
func (matcher *requestResponseMatcher) SetMaxTry(value int) { func (matcher *requestResponseMatcher) SetMaxTry(value int) {
} }
func (matcher *requestResponseMatcher) registerRequest(ident string, request *RedisPacket, captureTime time.Time) *api.OutputChannelItem { func (matcher *requestResponseMatcher) registerRequest(ident string, request *RedisPacket, captureTime time.Time, captureSize int) *api.OutputChannelItem {
requestRedisMessage := api.GenericMessage{ requestRedisMessage := api.GenericMessage{
IsRequest: true, IsRequest: true,
CaptureTime: captureTime, CaptureTime: captureTime,
CaptureSize: captureSize,
Payload: RedisPayload{ Payload: RedisPayload{
Data: &RedisWrapper{ Data: &RedisWrapper{
Method: string(request.Command), Method: string(request.Command),
@@ -48,10 +49,11 @@ func (matcher *requestResponseMatcher) registerRequest(ident string, request *Re
return nil return nil
} }
func (matcher *requestResponseMatcher) registerResponse(ident string, response *RedisPacket, captureTime time.Time) *api.OutputChannelItem { func (matcher *requestResponseMatcher) registerResponse(ident string, response *RedisPacket, captureTime time.Time, captureSize int) *api.OutputChannelItem {
responseRedisMessage := api.GenericMessage{ responseRedisMessage := api.GenericMessage{
IsRequest: false, IsRequest: false,
CaptureTime: captureTime, CaptureTime: captureTime,
CaptureSize: captureSize,
Payload: RedisPayload{ Payload: RedisPayload{
Data: &RedisWrapper{ Data: &RedisWrapper{
Method: string(response.Command), Method: string(response.Command),

View File

@@ -40,6 +40,7 @@ type tcpReader struct {
isOutgoing bool isOutgoing bool
msgQueue chan tcpReaderDataMsg // Channel of captured reassembled tcp payload msgQueue chan tcpReaderDataMsg // Channel of captured reassembled tcp payload
data []byte data []byte
progress *api.ReadProgress
superTimer *api.SuperTimer superTimer *api.SuperTimer
parent *tcpStream parent *tcpStream
packetsSeen uint packetsSeen uint
@@ -80,6 +81,8 @@ func (h *tcpReader) Read(p []byte) (int, error) {
l := copy(p, h.data) l := copy(p, h.data)
h.data = h.data[l:] h.data = h.data[l:]
h.progress.Feed(l)
return l, nil return l, nil
} }
@@ -96,7 +99,7 @@ func (h *tcpReader) run(wg *sync.WaitGroup) {
defer wg.Done() defer wg.Done()
b := bufio.NewReader(h) b := bufio.NewReader(h)
// TODO: Add api.Pcap, api.Envoy and api.Linkerd distinction by refactoring NewPacketSourceManager method // TODO: Add api.Pcap, api.Envoy and api.Linkerd distinction by refactoring NewPacketSourceManager method
err := h.extension.Dissector.Dissect(b, api.Pcap, h.isClient, h.tcpID, h.counterPair, h.superTimer, h.parent.superIdentifier, h.emitter, filteringOptions, h.reqResMatcher) err := h.extension.Dissector.Dissect(b, h.progress, api.Pcap, h.isClient, h.tcpID, h.counterPair, h.superTimer, h.parent.superIdentifier, h.emitter, filteringOptions, h.reqResMatcher)
if err != nil { if err != nil {
_, err = io.Copy(ioutil.Discard, b) _, err = io.Copy(ioutil.Discard, b)
if err != nil { if err != nil {

View File

@@ -89,6 +89,7 @@ func (factory *tcpStreamFactory) New(net, transport gopacket.Flow, tcp *layers.T
} }
stream.clients = append(stream.clients, tcpReader{ stream.clients = append(stream.clients, tcpReader{
msgQueue: make(chan tcpReaderDataMsg), msgQueue: make(chan tcpReaderDataMsg),
progress: &api.ReadProgress{},
superTimer: &api.SuperTimer{}, superTimer: &api.SuperTimer{},
ident: fmt.Sprintf("%s %s", net, transport), ident: fmt.Sprintf("%s %s", net, transport),
tcpID: &api.TcpID{ tcpID: &api.TcpID{
@@ -108,6 +109,7 @@ func (factory *tcpStreamFactory) New(net, transport gopacket.Flow, tcp *layers.T
}) })
stream.servers = append(stream.servers, tcpReader{ stream.servers = append(stream.servers, tcpReader{
msgQueue: make(chan tcpReaderDataMsg), msgQueue: make(chan tcpReaderDataMsg),
progress: &api.ReadProgress{},
superTimer: &api.SuperTimer{}, superTimer: &api.SuperTimer{},
ident: fmt.Sprintf("%s %s", net, transport), ident: fmt.Sprintf("%s %s", net, transport),
tcpID: &api.TcpID{ tcpID: &api.TcpID{

View File

@@ -1,5 +1,7 @@
#!/bin/bash #!/bin/bash
pushd "$(dirname "$0")" || exit 1
MIZU_HOME=$(realpath ../../../) MIZU_HOME=$(realpath ../../../)
docker build -t mizu-ebpf-builder . || exit 1 docker build -t mizu-ebpf-builder . || exit 1
@@ -7,6 +9,7 @@ docker build -t mizu-ebpf-builder . || exit 1
docker run --rm \ docker run --rm \
--name mizu-ebpf-builder \ --name mizu-ebpf-builder \
-v $MIZU_HOME:/mizu \ -v $MIZU_HOME:/mizu \
-v $(go env GOPATH):/root/go \
-it mizu-ebpf-builder \ -it mizu-ebpf-builder \
sh -c " sh -c "
go generate tap/tlstapper/tls_tapper.go go generate tap/tlstapper/tls_tapper.go
@@ -15,3 +18,5 @@ docker run --rm \
chown $(id -u):$(id -g) tap/tlstapper/tlstapper_bpfel.go chown $(id -u):$(id -g) tap/tlstapper/tlstapper_bpfel.go
chown $(id -u):$(id -g) tap/tlstapper/tlstapper_bpfel.o chown $(id -u):$(id -g) tap/tlstapper/tlstapper_bpfel.o
" || exit 1 " || exit 1
popd

View File

@@ -69,7 +69,7 @@ void sys_exit_accept4(struct sys_exit_accept4_ctx *ctx) {
struct accept_info *infoPtr = bpf_map_lookup_elem(&accept_syscall_context, &id); struct accept_info *infoPtr = bpf_map_lookup_elem(&accept_syscall_context, &id);
if (infoPtr == 0) { if (infoPtr == NULL) {
return; return;
} }
@@ -175,7 +175,7 @@ void sys_exit_connect(struct sys_exit_connect_ctx *ctx) {
struct connect_info *infoPtr = bpf_map_lookup_elem(&connect_syscall_info, &id); struct connect_info *infoPtr = bpf_map_lookup_elem(&connect_syscall_info, &id);
if (infoPtr == 0) { if (infoPtr == NULL) {
return; return;
} }

View File

@@ -28,7 +28,7 @@ void sys_enter_read(struct sys_enter_read_ctx *ctx) {
struct ssl_info *infoPtr = bpf_map_lookup_elem(&ssl_read_context, &id); struct ssl_info *infoPtr = bpf_map_lookup_elem(&ssl_read_context, &id);
if (infoPtr == 0) { if (infoPtr == NULL) {
return; return;
} }
@@ -71,7 +71,7 @@ void sys_enter_write(struct sys_enter_write_ctx *ctx) {
struct ssl_info *infoPtr = bpf_map_lookup_elem(&ssl_write_context, &id); struct ssl_info *infoPtr = bpf_map_lookup_elem(&ssl_write_context, &id);
if (infoPtr == 0) { if (infoPtr == NULL) {
return; return;
} }

View File

@@ -10,6 +10,12 @@ Copyright (C) UP9 Inc.
#define FLAGS_IS_CLIENT_BIT (1 << 0) #define FLAGS_IS_CLIENT_BIT (1 << 0)
#define FLAGS_IS_READ_BIT (1 << 1) #define FLAGS_IS_READ_BIT (1 << 1)
#define CHUNK_SIZE (1 << 12)
#define MAX_CHUNKS_PER_OPERATION (8)
// One minute in nano seconds. Chosen by gut feeling.
#define SSL_INFO_MAX_TTL_NANO (1000000000l * 60l)
// The same struct can be found in chunk.go // The same struct can be found in chunk.go
// //
// Be careful when editing, alignment and padding should be exactly the same in go/c. // Be careful when editing, alignment and padding should be exactly the same in go/c.
@@ -18,16 +24,18 @@ struct tlsChunk {
__u32 pid; __u32 pid;
__u32 tgid; __u32 tgid;
__u32 len; __u32 len;
__u32 start;
__u32 recorded; __u32 recorded;
__u32 fd; __u32 fd;
__u32 flags; __u32 flags;
__u8 address[16]; __u8 address[16];
__u8 data[4096]; // Must be N^2 __u8 data[CHUNK_SIZE]; // Must be N^2
}; };
struct ssl_info { struct ssl_info {
void* buffer; void* buffer;
__u32 fd; __u32 fd;
__u64 created_at_nano;
// for ssl_write and ssl_read must be zero // for ssl_write and ssl_read must be zero
// for ssl_write_ex and ssl_read_ex save the *written/*readbytes pointer. // for ssl_write_ex and ssl_read_ex save the *written/*readbytes pointer.
@@ -53,10 +61,13 @@ struct fd_info {
#define BPF_PERF_OUTPUT(_name) \ #define BPF_PERF_OUTPUT(_name) \
BPF_MAP(_name, BPF_MAP_TYPE_PERF_EVENT_ARRAY, int, __u32, 1024) BPF_MAP(_name, BPF_MAP_TYPE_PERF_EVENT_ARRAY, int, __u32, 1024)
#define BPF_LRU_HASH(_name, _key_type, _value_type) \
BPF_MAP(_name, BPF_MAP_TYPE_LRU_HASH, _key_type, _value_type, 16384)
BPF_HASH(pids_map, __u32, __u32); BPF_HASH(pids_map, __u32, __u32);
BPF_HASH(ssl_write_context, __u64, struct ssl_info); BPF_LRU_HASH(ssl_write_context, __u64, struct ssl_info);
BPF_HASH(ssl_read_context, __u64, struct ssl_info); BPF_LRU_HASH(ssl_read_context, __u64, struct ssl_info);
BPF_HASH(file_descriptor_to_ipv4, __u64, struct fd_info); BPF_HASH(file_descriptor_to_ipv4, __u64, struct fd_info);
BPF_PERF_OUTPUT(chunks_buffer); BPF_PERF_OUTPUT(chunks_buffer);

View File

@@ -18,16 +18,166 @@ struct {
__type(value, struct tlsChunk); __type(value, struct tlsChunk);
} heap SEC(".maps"); } heap SEC(".maps");
static __always_inline int ssl_uprobe(void* ssl, void* buffer, int num, struct bpf_map_def* map_fd, size_t *count_ptr) { static __always_inline int get_count_bytes(struct pt_regs *ctx, struct ssl_info* info, __u64 id) {
__u64 id = bpf_get_current_pid_tgid(); int returnValue = PT_REGS_RC(ctx);
if (!should_tap(id >> 32)) { if (info->count_ptr == NULL) {
// ssl_read and ssl_write return the number of bytes written/read
//
return returnValue;
}
// ssl_read_ex and ssl_write_ex return 1 for success
//
if (returnValue != 1) {
return 0; return 0;
} }
// ssl_read_ex and ssl_write_ex write the number of bytes to an arg named *count
//
size_t countBytes;
long err = bpf_probe_read(&countBytes, sizeof(size_t), (void*) info->count_ptr);
if (err != 0) {
char msg[] = "Error reading bytes count of _ex (id: %ld) (err: %ld)";
bpf_trace_printk(msg, sizeof(msg), id, err);
return 0;
}
return countBytes;
}
static __always_inline void add_address_to_chunk(struct tlsChunk* chunk, __u64 id, __u32 fd) {
__u32 pid = id >> 32;
__u64 key = (__u64) pid << 32 | fd;
struct fd_info *fdinfo = bpf_map_lookup_elem(&file_descriptor_to_ipv4, &key);
if (fdinfo == NULL) {
return;
}
int err = bpf_probe_read(chunk->address, sizeof(chunk->address), fdinfo->ipv4_addr);
chunk->flags |= (fdinfo->flags & FLAGS_IS_CLIENT_BIT);
if (err != 0) {
char msg[] = "Error reading from fd address %ld - %ld";
bpf_trace_printk(msg, sizeof(msg), id, err);
}
}
static __always_inline void send_chunk_part(struct pt_regs *ctx, __u8* buffer, __u64 id,
struct tlsChunk* chunk, int start, int end) {
size_t recorded = MIN(end - start, sizeof(chunk->data));
if (recorded <= 0) {
return;
}
chunk->recorded = recorded;
chunk->start = start;
// This ugly trick is for the ebpf verifier happiness
//
long err = 0;
if (chunk->recorded == sizeof(chunk->data)) {
err = bpf_probe_read(chunk->data, sizeof(chunk->data), buffer + start);
} else {
recorded &= (sizeof(chunk->data) - 1); // Buffer must be N^2
err = bpf_probe_read(chunk->data, recorded, buffer + start);
}
if (err != 0) {
char msg[] = "Error reading from ssl buffer %ld - %ld";
bpf_trace_printk(msg, sizeof(msg), id, err);
return;
}
bpf_perf_event_output(ctx, &chunks_buffer, BPF_F_CURRENT_CPU, chunk, sizeof(struct tlsChunk));
}
static __always_inline void send_chunk(struct pt_regs *ctx, __u8* buffer, __u64 id, struct tlsChunk* chunk) {
// ebpf loops must be bounded at compile time, we can't use (i < chunk->len / CHUNK_SIZE)
//
// https://lwn.net/Articles/794934/
//
// If we want to compile in kernel older than 5.3, we should add "#pragma unroll" to this loop
//
for (int i = 0; i < MAX_CHUNKS_PER_OPERATION; i++) {
if (chunk->len <= (CHUNK_SIZE * i)) {
break;
}
send_chunk_part(ctx, buffer, id, chunk, CHUNK_SIZE * i, chunk->len);
}
}
static __always_inline void output_ssl_chunk(struct pt_regs *ctx, struct ssl_info* info, __u64 id, __u32 flags) {
int countBytes = get_count_bytes(ctx, info, id);
if (countBytes <= 0) {
return;
}
if (countBytes > (CHUNK_SIZE * MAX_CHUNKS_PER_OPERATION)) {
char msg[] = "Buffer too big %d (id: %ld)";
bpf_trace_printk(msg, sizeof(msg), countBytes, id);
return;
}
struct tlsChunk* chunk;
int zero = 0;
// If other thread, running on the same CPU get to this point at the same time like us (context switch)
// the data will be corrupted - protection may be added in the future
//
chunk = bpf_map_lookup_elem(&heap, &zero);
if (!chunk) {
char msg[] = "Unable to allocate chunk (id: %ld)";
bpf_trace_printk(msg, sizeof(msg), id);
return;
}
chunk->flags = flags;
chunk->pid = id >> 32;
chunk->tgid = id;
chunk->len = countBytes;
chunk->fd = info->fd;
add_address_to_chunk(chunk, id, chunk->fd);
send_chunk(ctx, info->buffer, id, chunk);
}
static __always_inline void ssl_uprobe(void* ssl, void* buffer, int num, struct bpf_map_def* map_fd, size_t *count_ptr) {
__u64 id = bpf_get_current_pid_tgid();
if (!should_tap(id >> 32)) {
return;
}
struct ssl_info *infoPtr = bpf_map_lookup_elem(map_fd, &id);
struct ssl_info info = {}; struct ssl_info info = {};
info.fd = -1; if (infoPtr == NULL) {
info.fd = -1;
info.created_at_nano = bpf_ktime_get_ns();
} else {
long err = bpf_probe_read(&info, sizeof(struct ssl_info), infoPtr);
if (err != 0) {
char msg[] = "Error reading old ssl context (id: %ld) (err: %ld)";
bpf_trace_printk(msg, sizeof(msg), id, err);
}
if ((bpf_ktime_get_ns() - info.created_at_nano) > SSL_INFO_MAX_TTL_NANO) {
// If the ssl info is too old, we don't want to use its info because it may be incorrect.
//
info.fd = -1;
info.created_at_nano = bpf_ktime_get_ns();
}
}
info.count_ptr = count_ptr; info.count_ptr = count_ptr;
info.buffer = buffer; info.buffer = buffer;
@@ -36,163 +186,90 @@ static __always_inline int ssl_uprobe(void* ssl, void* buffer, int num, struct b
if (err != 0) { if (err != 0) {
char msg[] = "Error putting ssl context (id: %ld) (err: %ld)"; char msg[] = "Error putting ssl context (id: %ld) (err: %ld)";
bpf_trace_printk(msg, sizeof(msg), id, err); bpf_trace_printk(msg, sizeof(msg), id, err);
return 0;
} }
return 0;
} }
static __always_inline int ssl_uretprobe(struct pt_regs *ctx, struct bpf_map_def* map_fd, __u32 flags) { static __always_inline void ssl_uretprobe(struct pt_regs *ctx, struct bpf_map_def* map_fd, __u32 flags) {
__u64 id = bpf_get_current_pid_tgid(); __u64 id = bpf_get_current_pid_tgid();
if (!should_tap(id >> 32)) { if (!should_tap(id >> 32)) {
return 0; return;
} }
struct ssl_info *infoPtr = bpf_map_lookup_elem(map_fd, &id); struct ssl_info *infoPtr = bpf_map_lookup_elem(map_fd, &id);
if (infoPtr == 0) { if (infoPtr == NULL) {
char msg[] = "Error getting ssl context info (id: %ld)"; char msg[] = "Error getting ssl context info (id: %ld)";
bpf_trace_printk(msg, sizeof(msg), id); bpf_trace_printk(msg, sizeof(msg), id);
return 0; return;
} }
struct ssl_info info; struct ssl_info info;
long err = bpf_probe_read(&info, sizeof(struct ssl_info), infoPtr); long err = bpf_probe_read(&info, sizeof(struct ssl_info), infoPtr);
bpf_map_delete_elem(map_fd, &id); // Do not clean map on purpose, sometimes there are two calls to ssl_read in a raw
// while the first call actually goes to read from socket, and we get the chance
// to find the fd. The other call already have all the information and we don't
// have the chance to get the fd.
//
// There are two risks keeping the map items
// 1. It gets full - we solve it by using BPF_MAP_TYPE_LRU_HASH with hard limit
// 2. We get wrong info of an old call - we solve it by comparing the timestamp
// info before using it
//
// bpf_map_delete_elem(map_fd, &id);
if (err != 0) { if (err != 0) {
char msg[] = "Error reading ssl context (id: %ld) (err: %ld)"; char msg[] = "Error reading ssl context (id: %ld) (err: %ld)";
bpf_trace_printk(msg, sizeof(msg), id, err); bpf_trace_printk(msg, sizeof(msg), id, err);
return 0; return;
} }
if (info.fd == -1) { if (info.fd == -1) {
char msg[] = "File descriptor is missing from ssl info (id: %ld)"; char msg[] = "File descriptor is missing from ssl info (id: %ld)";
bpf_trace_printk(msg, sizeof(msg), id); bpf_trace_printk(msg, sizeof(msg), id);
return 0; return;
} }
int countBytes = PT_REGS_RC(ctx); output_ssl_chunk(ctx, &info, id, flags);
if (info.count_ptr != 0) {
// ssl_read_ex and ssl_write_ex return 1 for success
//
if (countBytes != 1) {
return 0;
}
size_t tempCount;
long err = bpf_probe_read(&tempCount, sizeof(size_t), (void*) info.count_ptr);
if (err != 0) {
char msg[] = "Error reading bytes count of _ex (id: %ld) (err: %ld)";
bpf_trace_printk(msg, sizeof(msg), id, err);
return 0;
}
countBytes = tempCount;
}
if (countBytes <= 0) {
return 0;
}
struct tlsChunk* c;
int zero = 0;
// If other thread, running on the same CPU get to this point at the same time like us
// the data will be corrupted - protection may be added in the future
//
c = bpf_map_lookup_elem(&heap, &zero);
if (!c) {
char msg[] = "Unable to allocate chunk (id: %ld)";
bpf_trace_printk(msg, sizeof(msg), id);
return 0;
}
size_t recorded = MIN(countBytes, sizeof(c->data));
c->flags = flags;
c->pid = id >> 32;
c->tgid = id;
c->len = countBytes;
c->recorded = recorded;
c->fd = info.fd;
// This ugly trick is for the ebpf verifier happiness
//
if (recorded == sizeof(c->data)) {
err = bpf_probe_read(c->data, sizeof(c->data), info.buffer);
} else {
recorded &= sizeof(c->data) - 1; // Buffer must be N^2
err = bpf_probe_read(c->data, recorded, info.buffer);
}
if (err != 0) {
char msg[] = "Error reading from ssl buffer %ld - %ld";
bpf_trace_printk(msg, sizeof(msg), id, err);
return 0;
}
__u32 pid = id >> 32;
__u32 fd = info.fd;
__u64 key = (__u64) pid << 32 | fd;
struct fd_info *fdinfo = bpf_map_lookup_elem(&file_descriptor_to_ipv4, &key);
if (fdinfo != 0) {
err = bpf_probe_read(c->address, sizeof(c->address), fdinfo->ipv4_addr);
c->flags |= (fdinfo->flags & FLAGS_IS_CLIENT_BIT);
if (err != 0) {
char msg[] = "Error reading from fd address %ld - %ld";
bpf_trace_printk(msg, sizeof(msg), id, err);
}
}
bpf_perf_event_output(ctx, &chunks_buffer, BPF_F_CURRENT_CPU, c, sizeof(struct tlsChunk));
return 0;
} }
SEC("uprobe/ssl_write") SEC("uprobe/ssl_write")
int BPF_KPROBE(ssl_write, void* ssl, void* buffer, int num) { void BPF_KPROBE(ssl_write, void* ssl, void* buffer, int num) {
return ssl_uprobe(ssl, buffer, num, &ssl_write_context, 0); ssl_uprobe(ssl, buffer, num, &ssl_write_context, 0);
} }
SEC("uretprobe/ssl_write") SEC("uretprobe/ssl_write")
int BPF_KPROBE(ssl_ret_write) { void BPF_KPROBE(ssl_ret_write) {
return ssl_uretprobe(ctx, &ssl_write_context, 0); ssl_uretprobe(ctx, &ssl_write_context, 0);
} }
SEC("uprobe/ssl_read") SEC("uprobe/ssl_read")
int BPF_KPROBE(ssl_read, void* ssl, void* buffer, int num) { void BPF_KPROBE(ssl_read, void* ssl, void* buffer, int num) {
return ssl_uprobe(ssl, buffer, num, &ssl_read_context, 0); ssl_uprobe(ssl, buffer, num, &ssl_read_context, 0);
} }
SEC("uretprobe/ssl_read") SEC("uretprobe/ssl_read")
int BPF_KPROBE(ssl_ret_read) { void BPF_KPROBE(ssl_ret_read) {
return ssl_uretprobe(ctx, &ssl_read_context, FLAGS_IS_READ_BIT); ssl_uretprobe(ctx, &ssl_read_context, FLAGS_IS_READ_BIT);
} }
SEC("uprobe/ssl_write_ex") SEC("uprobe/ssl_write_ex")
int BPF_KPROBE(ssl_write_ex, void* ssl, void* buffer, size_t num, size_t *written) { void BPF_KPROBE(ssl_write_ex, void* ssl, void* buffer, size_t num, size_t *written) {
return ssl_uprobe(ssl, buffer, num, &ssl_write_context, written); ssl_uprobe(ssl, buffer, num, &ssl_write_context, written);
} }
SEC("uretprobe/ssl_write_ex") SEC("uretprobe/ssl_write_ex")
int BPF_KPROBE(ssl_ret_write_ex) { void BPF_KPROBE(ssl_ret_write_ex) {
return ssl_uretprobe(ctx, &ssl_write_context, 0); ssl_uretprobe(ctx, &ssl_write_context, 0);
} }
SEC("uprobe/ssl_read_ex") SEC("uprobe/ssl_read_ex")
int BPF_KPROBE(ssl_read_ex, void* ssl, void* buffer, size_t num, size_t *readbytes) { void BPF_KPROBE(ssl_read_ex, void* ssl, void* buffer, size_t num, size_t *readbytes) {
return ssl_uprobe(ssl, buffer, num, &ssl_read_context, readbytes); ssl_uprobe(ssl, buffer, num, &ssl_read_context, readbytes);
} }
SEC("uretprobe/ssl_read_ex") SEC("uretprobe/ssl_read_ex")
int BPF_KPROBE(ssl_ret_read_ex) { void BPF_KPROBE(ssl_ret_read_ex) {
return ssl_uretprobe(ctx, &ssl_read_context, FLAGS_IS_READ_BIT); ssl_uretprobe(ctx, &ssl_read_context, FLAGS_IS_READ_BIT);
} }

View File

@@ -16,14 +16,15 @@ const FLAGS_IS_READ_BIT uint32 = (1 << 1)
// Be careful when editing, alignment and padding should be exactly the same in go/c. // Be careful when editing, alignment and padding should be exactly the same in go/c.
// //
type tlsChunk struct { type tlsChunk struct {
Pid uint32 Pid uint32 // process id
Tgid uint32 Tgid uint32 // thread id inside the process
Len uint32 Len uint32 // the size of the native buffer used to read/write the tls data (may be bigger than tlsChunk.Data[])
Recorded uint32 Start uint32 // the start offset withing the native buffer
Fd uint32 Recorded uint32 // number of bytes copied from the native buffer to tlsChunk.Data[]
Flags uint32 Fd uint32 // the file descriptor used to read/write the tls data (probably socket file descriptor)
Address [16]byte Flags uint32 // bitwise flags
Data [4096]byte Address [16]byte // ipv4 address and port
Data [4096]byte // actual tls data
} }
func (c *tlsChunk) getAddress() (net.IP, uint16, error) { func (c *tlsChunk) getAddress() (net.IP, uint16, error) {

View File

@@ -146,6 +146,7 @@ func (p *tlsPoller) startNewTlsReader(chunk *tlsChunk, ip net.IP, port uint16, k
doneHandler: func(r *tlsReader) { doneHandler: func(r *tlsReader) {
p.closeReader(key, r) p.closeReader(key, r)
}, },
progress: &api.ReadProgress{},
} }
tcpid := p.buildTcpId(chunk, ip, port) tcpid := p.buildTcpId(chunk, ip, port)
@@ -158,7 +159,7 @@ func dissect(extension *api.Extension, reader *tlsReader, isRequest bool, tcpid
emitter api.Emitter, options *api.TrafficFilteringOptions, reqResMatcher api.RequestResponseMatcher) { emitter api.Emitter, options *api.TrafficFilteringOptions, reqResMatcher api.RequestResponseMatcher) {
b := bufio.NewReader(reader) b := bufio.NewReader(reader)
err := extension.Dissector.Dissect(b, api.Ebpf, isRequest, tcpid, &api.CounterPair{}, err := extension.Dissector.Dissect(b, reader.progress, api.Ebpf, isRequest, tcpid, &api.CounterPair{},
&api.SuperTimer{}, &api.SuperIdentifier{}, emitter, options, reqResMatcher) &api.SuperTimer{}, &api.SuperIdentifier{}, emitter, options, reqResMatcher)
if err != nil { if err != nil {
@@ -224,8 +225,8 @@ func (p *tlsPoller) logTls(chunk *tlsChunk, ip net.IP, port uint16) {
str := strings.ReplaceAll(strings.ReplaceAll(string(chunk.Data[0:chunk.Recorded]), "\n", " "), "\r", "") str := strings.ReplaceAll(strings.ReplaceAll(string(chunk.Data[0:chunk.Recorded]), "\n", " "), "\r", "")
logger.Log.Infof("PID: %v (tid: %v) (fd: %v) (client: %v) (addr: %v:%v) (fdaddr %v:%v>%v:%v) (recorded %v out of %v) - %v - %v", logger.Log.Infof("PID: %v (tid: %v) (fd: %v) (client: %v) (addr: %v:%v) (fdaddr %v:%v>%v:%v) (recorded %v out of %v starting at %v) - %v - %v",
chunk.Pid, chunk.Tgid, chunk.Fd, flagsStr, ip, port, chunk.Pid, chunk.Tgid, chunk.Fd, flagsStr, ip, port,
srcIp, srcPort, dstIp, dstPort, srcIp, srcPort, dstIp, dstPort,
chunk.Recorded, chunk.Len, str, hex.EncodeToString(chunk.Data[0:chunk.Recorded])) chunk.Recorded, chunk.Len, chunk.Start, str, hex.EncodeToString(chunk.Data[0:chunk.Recorded]))
} }

View File

@@ -3,6 +3,8 @@ package tlstapper
import ( import (
"io" "io"
"time" "time"
"github.com/up9inc/mizu/tap/api"
) )
type tlsReader struct { type tlsReader struct {
@@ -10,6 +12,7 @@ type tlsReader struct {
chunks chan *tlsChunk chunks chan *tlsChunk
data []byte data []byte
doneHandler func(r *tlsReader) doneHandler func(r *tlsReader)
progress *api.ReadProgress
} }
func (r *tlsReader) Read(p []byte) (int, error) { func (r *tlsReader) Read(p []byte) (int, error) {
@@ -36,6 +39,7 @@ func (r *tlsReader) Read(p []byte) (int, error) {
l := copy(p, r.data) l := copy(p, r.data)
r.data = r.data[l:] r.data = r.data[l:]
r.progress.Feed(l)
return l, nil return l, nil
} }

Binary file not shown.

Binary file not shown.

View File

@@ -1,6 +1,6 @@
{ {
"name": "@up9/mizu-common", "name": "@up9/mizu-common",
"version": "1.0.128", "version": "1.0.132",
"description": "Made with create-react-library", "description": "Made with create-react-library",
"author": "", "author": "",
"license": "MIT", "license": "MIT",

View File

@@ -5,10 +5,8 @@ import Moment from 'moment';
import {EntryItem} from "./EntryListItem/EntryListItem"; import {EntryItem} from "./EntryListItem/EntryListItem";
import down from "assets/downImg.svg"; import down from "assets/downImg.svg";
import spinner from 'assets/spinner.svg'; import spinner from 'assets/spinner.svg';
import {RecoilState, useRecoilState, useRecoilValue} from "recoil"; import {RecoilState, useRecoilState, useRecoilValue} from "recoil";
import entriesAtom from "../../recoil/entries"; import entriesAtom from "../../recoil/entries";
import wsConnectionAtom, {WsConnectionStatus} from "../../recoil/wsConnection";
import queryAtom from "../../recoil/query"; import queryAtom from "../../recoil/query";
import TrafficViewerApiAtom from "../../recoil/TrafficViewerApi"; import TrafficViewerApiAtom from "../../recoil/TrafficViewerApi";
import TrafficViewerApi from "./TrafficViewerApi"; import TrafficViewerApi from "./TrafficViewerApi";
@@ -32,14 +30,15 @@ interface EntriesListProps {
truncatedTimestamp: number; truncatedTimestamp: number;
setTruncatedTimestamp: any; setTruncatedTimestamp: any;
scrollableRef: any; scrollableRef: any;
ws: any;
} }
export const EntriesList: React.FC<EntriesListProps> = ({listEntryREF, onSnapBrokenEvent, isSnappedToBottom, setIsSnappedToBottom, queriedCurrent, setQueriedCurrent, queriedTotal, setQueriedTotal, startTime, noMoreDataTop, setNoMoreDataTop, leftOffTop, setLeftOffTop, openWebSocket, leftOffBottom, truncatedTimestamp, setTruncatedTimestamp, scrollableRef}) => { export const EntriesList: React.FC<EntriesListProps> = ({listEntryREF, onSnapBrokenEvent, isSnappedToBottom, setIsSnappedToBottom, queriedCurrent, setQueriedCurrent, queriedTotal, setQueriedTotal, startTime, noMoreDataTop, setNoMoreDataTop, leftOffTop, setLeftOffTop, openWebSocket, leftOffBottom, truncatedTimestamp, setTruncatedTimestamp, scrollableRef, ws}) => {
const [entries, setEntries] = useRecoilState(entriesAtom); const [entries, setEntries] = useRecoilState(entriesAtom);
const wsConnection = useRecoilValue(wsConnectionAtom);
const query = useRecoilValue(queryAtom); const query = useRecoilValue(queryAtom);
const isWsConnectionClosed = wsConnection === WsConnectionStatus.Closed; const isWsConnectionClosed = ws?.current?.readyState !== WebSocket.OPEN;
const trafficViewerApi = useRecoilValue(TrafficViewerApiAtom as RecoilState<TrafficViewerApi>) const trafficViewerApi = useRecoilValue(TrafficViewerApiAtom as RecoilState<TrafficViewerApi>)
const [loadMoreTop, setLoadMoreTop] = useState(false); const [loadMoreTop, setLoadMoreTop] = useState(false);

View File

@@ -36,23 +36,36 @@ const useStyles = makeStyles(() => ({
export const formatSize = (n: number) => n > 1000 ? `${Math.round(n / 1000)}KB` : `${n} B`; export const formatSize = (n: number) => n > 1000 ? `${Math.round(n / 1000)}KB` : `${n} B`;
const EntryTitle: React.FC<any> = ({protocol, data, bodySize, elapsedTime}) => { const EntryTitle: React.FC<any> = ({protocol, data, elapsedTime}) => {
const classes = useStyles(); const classes = useStyles();
const request = data.request;
const response = data.response; const response = data.response;
return <div className={classes.entryTitle}> return <div className={classes.entryTitle}>
<Protocol protocol={protocol} horizontal={true}/> <Protocol protocol={protocol} horizontal={true}/>
<div style={{right: "30px", position: "absolute", display: "flex"}}> <div style={{right: "30px", position: "absolute", display: "flex"}}>
{response && <Queryable {request && <Queryable
query={`response.bodySize == ${bodySize}`} query={`requestSize == ${data.requestSize}`}
style={{margin: "0 18px"}} style={{margin: "0 18px"}}
displayIconOnMouseOver={true} displayIconOnMouseOver={true}
> >
<div <div
style={{opacity: 0.5}} style={{opacity: 0.5}}
id="entryDetailedTitleBodySize" id="entryDetailedTitleRequestSize"
> >
{formatSize(bodySize)} {`Request: ${formatSize(data.requestSize)}`}
</div>
</Queryable>}
{response && <Queryable
query={`responseSize == ${data.responseSize}`}
style={{margin: "0 18px"}}
displayIconOnMouseOver={true}
>
<div
style={{opacity: 0.5}}
id="entryDetailedTitleResponseSize"
>
{`Response: ${formatSize(data.responseSize)}`}
</div> </div>
</Queryable>} </Queryable>}
{response && <Queryable {response && <Queryable
@@ -64,7 +77,7 @@ const EntryTitle: React.FC<any> = ({protocol, data, bodySize, elapsedTime}) => {
style={{opacity: 0.5}} style={{opacity: 0.5}}
id="entryDetailedTitleElapsedTime" id="entryDetailedTitleElapsedTime"
> >
{Math.round(elapsedTime)}ms {`Elapsed Time: ${Math.round(elapsedTime)}ms`}
</div> </div>
</Queryable>} </Queryable>}
</div> </div>
@@ -120,7 +133,6 @@ export const EntryDetailed = () => {
{entryData && <EntryTitle {entryData && <EntryTitle
protocol={entryData.protocol} protocol={entryData.protocol}
data={entryData.data} data={entryData.data}
bodySize={entryData.bodySize}
elapsedTime={entryData.data.elapsedTime} elapsedTime={entryData.data.elapsedTime}
/>} />}
{entryData && <EntrySummary entry={entryData.base}/>} {entryData && <EntrySummary entry={entryData.base}/>}

View File

@@ -4,7 +4,7 @@ import SwapHorizIcon from '@material-ui/icons/SwapHoriz';
import styles from './EntryListItem.module.sass'; import styles from './EntryListItem.module.sass';
import StatusCode, {getClassification, StatusCodeClassification} from "../../UI/StatusCode"; import StatusCode, {getClassification, StatusCodeClassification} from "../../UI/StatusCode";
import Protocol, {ProtocolInterface} from "../../UI/Protocol" import Protocol, {ProtocolInterface} from "../../UI/Protocol"
import eBPFLogo from '../assets/ebpf.png'; import eBPFLogo from '../../assets/ebpf.png';
import {Summary} from "../../UI/Summary"; import {Summary} from "../../UI/Summary";
import Queryable from "../../UI/Queryable"; import Queryable from "../../UI/Queryable";
import ingoingIconSuccess from "assets/ingoing-traffic-success.svg" import ingoingIconSuccess from "assets/ingoing-traffic-success.svg"

View File

@@ -1,4 +1,4 @@
import React, { useCallback, useEffect, useMemo, useRef, useState } from "react"; import React, { useEffect, useMemo, useRef, useState } from "react";
import { Filters } from "./Filters"; import { Filters } from "./Filters";
import { EntriesList } from "./EntriesList"; import { EntriesList } from "./EntriesList";
import { makeStyles } from "@material-ui/core"; import { makeStyles } from "@material-ui/core";
@@ -14,7 +14,6 @@ import debounce from 'lodash/debounce';
import { RecoilRoot, RecoilState, useRecoilState, useRecoilValue, useSetRecoilState } from "recoil"; import { RecoilRoot, RecoilState, useRecoilState, useRecoilValue, useSetRecoilState } from "recoil";
import entriesAtom from "../../recoil/entries"; import entriesAtom from "../../recoil/entries";
import focusedEntryIdAtom from "../../recoil/focusedEntryId"; import focusedEntryIdAtom from "../../recoil/focusedEntryId";
import websocketConnectionAtom, { WsConnectionStatus } from "../../recoil/wsConnection";
import queryAtom from "../../recoil/query"; import queryAtom from "../../recoil/query";
import { TLSWarning } from "../TLSWarning/TLSWarning"; import { TLSWarning } from "../TLSWarning/TLSWarning";
import trafficViewerApiAtom from "../../recoil/TrafficViewerApi" import trafficViewerApiAtom from "../../recoil/TrafficViewerApi"
@@ -48,21 +47,24 @@ interface TrafficViewerProps {
trafficViewerApiProp: TrafficViewerApi, trafficViewerApiProp: TrafficViewerApi,
actionButtons?: JSX.Element, actionButtons?: JSX.Element,
isShowStatusBar?: boolean, isShowStatusBar?: boolean,
webSocketUrl : string webSocketUrl : string,
isCloseWebSocket : boolean
} }
export const TrafficViewer : React.FC<TrafficViewerProps> = ({setAnalyzeStatus, trafficViewerApiProp, actionButtons,isShowStatusBar,webSocketUrl}) => { export const TrafficViewer : React.FC<TrafficViewerProps> = ({setAnalyzeStatus, trafficViewerApiProp,
actionButtons,isShowStatusBar,webSocketUrl,
isCloseWebSocket}) => {
const classes = useLayoutStyles(); const classes = useLayoutStyles();
const [entries, setEntries] = useRecoilState(entriesAtom); const [entries, setEntries] = useRecoilState(entriesAtom);
const [focusedEntryId, setFocusedEntryId] = useRecoilState(focusedEntryIdAtom); const [focusedEntryId, setFocusedEntryId] = useRecoilState(focusedEntryIdAtom);
const [wsConnection, setWsConnection] = useRecoilState(websocketConnectionAtom);
const query = useRecoilValue(queryAtom); const query = useRecoilValue(queryAtom);
const setTrafficViewerApiState = useSetRecoilState(trafficViewerApiAtom as RecoilState<TrafficViewerApi>) const setTrafficViewerApiState = useSetRecoilState(trafficViewerApiAtom as RecoilState<TrafficViewerApi>)
const [tappingStatus, setTappingStatus] = useRecoilState(tappingStatusAtom); const [tappingStatus, setTappingStatus] = useRecoilState(tappingStatusAtom);
const [noMoreDataTop, setNoMoreDataTop] = useState(false); const [noMoreDataTop, setNoMoreDataTop] = useState(false);
const [isSnappedToBottom, setIsSnappedToBottom] = useState(true); const [isSnappedToBottom, setIsSnappedToBottom] = useState(true);
const [forceRender, setForceRender] = useState(0);
const [queryBackgroundColor, setQueryBackgroundColor] = useState("#f5f5f5"); const [queryBackgroundColor, setQueryBackgroundColor] = useState("#f5f5f5");
@@ -103,6 +105,10 @@ export const TrafficViewer : React.FC<TrafficViewerProps> = ({setAnalyzeStatus,
handleQueryChange(query); handleQueryChange(query);
}, [query, handleQueryChange]); }, [query, handleQueryChange]);
useEffect(()=>{
isCloseWebSocket && closeWebSocket()
},[isCloseWebSocket])
const ws = useRef(null); const ws = useRef(null);
const listEntry = useRef(null); const listEntry = useRef(null);
@@ -114,22 +120,26 @@ export const TrafficViewer : React.FC<TrafficViewerProps> = ({setAnalyzeStatus,
setLeftOffTop(null); setLeftOffTop(null);
setNoMoreDataTop(false); setNoMoreDataTop(false);
} }
ws.current = new WebSocket(webSocketUrl); try {
ws.current.onopen = () => { ws.current = new WebSocket(webSocketUrl);
setWsConnection(WsConnectionStatus.Connected);
sendQueryWhenWsOpen(query); sendQueryWhenWsOpen(query);
}
ws.current.onclose = () => { ws.current.onclose = () => {
setWsConnection(WsConnectionStatus.Closed); if(window.location.pathname === "/")
} setForceRender(forceRender + 1);
ws.current.onerror = (event) => {
console.error("WebSocket error:", event);
if (query) {
openWebSocket(`(${query}) and leftOff(${leftOffBottom})`, false);
} else {
openWebSocket(`leftOff(${leftOffBottom})`, false);
} }
} ws.current.onerror = (event) => {
console.error("WebSocket error:", event);
if (ws?.current?.readyState === WebSocket.OPEN) {
ws.current.close();
}
if (query) {
openWebSocket(`(${query}) and leftOff(${leftOffBottom})`, false);
} else {
openWebSocket(`leftOff(${leftOffBottom})`, false);
}
}
} catch (e) {}
} }
const sendQueryWhenWsOpen = (query) => { const sendQueryWhenWsOpen = (query) => {
@@ -142,6 +152,12 @@ export const TrafficViewer : React.FC<TrafficViewerProps> = ({setAnalyzeStatus,
}, 500) }, 500)
} }
const closeWebSocket = () => {
if(ws?.current?.readyState === WebSocket.OPEN) {
ws.current.close();
}
}
if (ws.current) { if (ws.current) {
ws.current.onmessage = (e) => { ws.current.onmessage = (e) => {
if (!e?.data) return; if (!e?.data) return;
@@ -200,7 +216,7 @@ export const TrafficViewer : React.FC<TrafficViewerProps> = ({setAnalyzeStatus,
} }
useEffect(() => { useEffect(() => {
setTrafficViewerApiState({...trafficViewerApiProp, webSocket : {close : () => ws.current.close()}}); setTrafficViewerApiState({...trafficViewerApiProp, webSocket : {close : closeWebSocket}});
(async () => { (async () => {
openWebSocket("leftOff(-1)", true); openWebSocket("leftOff(-1)", true);
try{ try{
@@ -218,8 +234,9 @@ export const TrafficViewer : React.FC<TrafficViewerProps> = ({setAnalyzeStatus,
}, []); }, []);
const toggleConnection = () => { const toggleConnection = () => {
ws.current.close(); if(ws?.current?.readyState === WebSocket.OPEN) {
if (wsConnection !== WsConnectionStatus.Connected) { ws?.current?.close();
} else {
if (query) { if (query) {
openWebSocket(`(${query}) and leftOff(-1)`, true); openWebSocket(`(${query}) and leftOff(-1)`, true);
} else { } else {
@@ -230,6 +247,12 @@ export const TrafficViewer : React.FC<TrafficViewerProps> = ({setAnalyzeStatus,
} }
} }
useEffect(() => {
return () => {
ws.current.close();
};
}, []);
const onTLSDetected = (destAddress: string) => { const onTLSDetected = (destAddress: string) => {
addressesWithTLS.add(destAddress); addressesWithTLS.add(destAddress);
setAddressesWithTLS(new Set(addressesWithTLS)); setAddressesWithTLS(new Set(addressesWithTLS));
@@ -240,8 +263,8 @@ export const TrafficViewer : React.FC<TrafficViewerProps> = ({setAnalyzeStatus,
}; };
const getConnectionIndicator = () => { const getConnectionIndicator = () => {
switch (wsConnection) { switch (ws?.current?.readyState) {
case WsConnectionStatus.Connected: case WebSocket.OPEN:
return <div className={`${TrafficViewerStyles.indicatorContainer} ${TrafficViewerStyles.greenIndicatorContainer}`}> return <div className={`${TrafficViewerStyles.indicatorContainer} ${TrafficViewerStyles.greenIndicatorContainer}`}>
<div className={`${TrafficViewerStyles.indicator} ${TrafficViewerStyles.greenIndicator}`} /> <div className={`${TrafficViewerStyles.indicator} ${TrafficViewerStyles.greenIndicator}`} />
</div> </div>
@@ -253,8 +276,8 @@ export const TrafficViewer : React.FC<TrafficViewerProps> = ({setAnalyzeStatus,
} }
const getConnectionTitle = () => { const getConnectionTitle = () => {
switch (wsConnection) { switch (ws?.current?.readyState) {
case WsConnectionStatus.Connected: case WebSocket.OPEN:
return "streaming live traffic" return "streaming live traffic"
default: default:
return "streaming paused"; return "streaming paused";
@@ -263,7 +286,7 @@ export const TrafficViewer : React.FC<TrafficViewerProps> = ({setAnalyzeStatus,
const onSnapBrokenEvent = () => { const onSnapBrokenEvent = () => {
setIsSnappedToBottom(false); setIsSnappedToBottom(false);
if (wsConnection === WsConnectionStatus.Connected) { if (ws?.current?.readyState === WebSocket.OPEN) {
ws.current.close(); ws.current.close();
} }
} }
@@ -273,9 +296,9 @@ export const TrafficViewer : React.FC<TrafficViewerProps> = ({setAnalyzeStatus,
{tappingStatus && isShowStatusBar && <StatusBar />} {tappingStatus && isShowStatusBar && <StatusBar />}
<div className={TrafficViewerStyles.TrafficPageHeader}> <div className={TrafficViewerStyles.TrafficPageHeader}>
<div className={TrafficViewerStyles.TrafficPageStreamStatus}> <div className={TrafficViewerStyles.TrafficPageStreamStatus}>
<img className={TrafficViewerStyles.playPauseIcon} style={{ visibility: wsConnection === WsConnectionStatus.Connected ? "visible" : "hidden" }} alt="pause" <img className={TrafficViewerStyles.playPauseIcon} style={{ visibility: ws?.current?.readyState === WebSocket.OPEN ? "visible" : "hidden" }} alt="pause"
src={pauseIcon} onClick={toggleConnection} /> src={pauseIcon} onClick={toggleConnection} />
<img className={TrafficViewerStyles.playPauseIcon} style={{ position: "absolute", visibility: wsConnection === WsConnectionStatus.Connected ? "hidden" : "visible" }} alt="play" <img className={TrafficViewerStyles.playPauseIcon} style={{ position: "absolute", visibility: ws?.current?.readyState === WebSocket.OPEN ? "hidden" : "visible" }} alt="play"
src={playIcon} onClick={toggleConnection} /> src={playIcon} onClick={toggleConnection} />
<div className={TrafficViewerStyles.connectionText}> <div className={TrafficViewerStyles.connectionText}>
{getConnectionTitle()} {getConnectionTitle()}
@@ -311,6 +334,7 @@ export const TrafficViewer : React.FC<TrafficViewerProps> = ({setAnalyzeStatus,
truncatedTimestamp={truncatedTimestamp} truncatedTimestamp={truncatedTimestamp}
setTruncatedTimestamp={setTruncatedTimestamp} setTruncatedTimestamp={setTruncatedTimestamp}
scrollableRef={scrollableRef} scrollableRef={scrollableRef}
ws={ws}
/> />
</div> </div>
</div> </div>
@@ -330,10 +354,13 @@ export const TrafficViewer : React.FC<TrafficViewerProps> = ({setAnalyzeStatus,
}; };
const MemoiedTrafficViewer = React.memo(TrafficViewer) const MemoiedTrafficViewer = React.memo(TrafficViewer)
const TrafficViewerContainer: React.FC<TrafficViewerProps> = ({ setAnalyzeStatus, trafficViewerApiProp, actionButtons, isShowStatusBar = true ,webSocketUrl}) => { const TrafficViewerContainer: React.FC<TrafficViewerProps> = ({ setAnalyzeStatus, trafficViewerApiProp,
actionButtons, isShowStatusBar = true ,
webSocketUrl, isCloseWebSocket}) => {
return <RecoilRoot> return <RecoilRoot>
<MemoiedTrafficViewer actionButtons={actionButtons} isShowStatusBar={isShowStatusBar} webSocketUrl={webSocketUrl} <MemoiedTrafficViewer actionButtons={actionButtons} isShowStatusBar={isShowStatusBar} webSocketUrl={webSocketUrl}
trafficViewerApiProp={trafficViewerApiProp} setAnalyzeStatus={setAnalyzeStatus} /> isCloseWebSocket={isCloseWebSocket} trafficViewerApiProp={trafficViewerApiProp}
setAnalyzeStatus={setAnalyzeStatus} />
</RecoilRoot> </RecoilRoot>
} }

View File

@@ -1,13 +1,13 @@
type TrafficViewerApi = { type TrafficViewerApi = {
validateQuery : (query: any) => any validateQuery: (query: any) => any
tapStatus : () => any tapStatus: () => any
analyzeStatus : () => any analyzeStatus: () => any
fetchEntries : (leftOff: any, direction: number, query: any, limit: number, timeoutMs: number) => any fetchEntries: (leftOff: any, direction: number, query: any, limit: number, timeoutMs: number) => any
getEntry : (entryId : any, query:string) => any getEntry: (entryId: any, query: string) => any
getRecentTLSLinks : () => any, getRecentTLSLinks: () => any,
webSocket : { webSocket: {
close : () => {} close: () => void
}
} }
}
export default TrafficViewerApi export default TrafficViewerApi

View File

@@ -15,7 +15,7 @@ interface Props {
classes?: any, classes?: any,
tabs: Tab[], tabs: Tab[],
currentTab: string, currentTab: string,
color: string, color?: string,
onChange: (string) => void, onChange: (string) => void,
leftAligned?: boolean, leftAligned?: boolean,
dark?: boolean, dark?: boolean,

View File

Before

Width:  |  Height:  |  Size: 21 KiB

After

Width:  |  Height:  |  Size: 21 KiB

View File

@@ -1,8 +0,0 @@
import { atom } from "recoil";
const wsConnectionAtom = atom({
key: "wsConnectionAtom",
default: 0
});
export default wsConnectionAtom;

View File

@@ -1,10 +0,0 @@
import atom from "./atom";
enum WsConnectionStatus {
Closed,
Connected,
}
export {WsConnectionStatus};
export default atom

View File

@@ -13,7 +13,7 @@
"@types/jest": "^26.0.22", "@types/jest": "^26.0.22",
"@types/node": "^12.20.10", "@types/node": "^12.20.10",
"@uiw/react-textarea-code-editor": "^1.4.12", "@uiw/react-textarea-code-editor": "^1.4.12",
"@up9/mizu-common": "^1.0.128", "@up9/mizu-common": "1.0.131",
"axios": "^0.25.0", "axios": "^0.25.0",
"core-js": "^3.20.2", "core-js": "^3.20.2",
"craco-babel-loader": "^1.0.3", "craco-babel-loader": "^1.0.3",

View File

@@ -1,4 +1,4 @@
import React, {useEffect} from "react"; import React, {useEffect, useState} from "react";
import { Button } from "@material-ui/core"; import { Button } from "@material-ui/core";
import Api,{getWebsocketUrl} from "../../../helpers/api"; import Api,{getWebsocketUrl} from "../../../helpers/api";
import debounce from 'lodash/debounce'; import debounce from 'lodash/debounce';
@@ -8,8 +8,8 @@ import serviceMapModalOpenAtom from "../../../recoil/serviceMapModalOpen";
import TrafficViewer from "@up9/mizu-common" import TrafficViewer from "@up9/mizu-common"
import "@up9/mizu-common/dist/index.css" import "@up9/mizu-common/dist/index.css"
import oasModalOpenAtom from "../../../recoil/oasModalOpen/atom"; import oasModalOpenAtom from "../../../recoil/oasModalOpen/atom";
import serviceMap from "../../assets/serviceMap.svg"; import serviceMap from "../../assets/serviceMap.svg";
import services from "../../assets/services.svg"; import services from "../../assets/services.svg";
interface TrafficPageProps { interface TrafficPageProps {
setAnalyzeStatus?: (status: any) => void; setAnalyzeStatus?: (status: any) => void;
@@ -21,38 +21,40 @@ export const TrafficPage: React.FC<TrafficPageProps> = ({setAnalyzeStatus}) => {
const commonClasses = useCommonStyles(); const commonClasses = useCommonStyles();
const setServiceMapModalOpen = useSetRecoilState(serviceMapModalOpenAtom); const setServiceMapModalOpen = useSetRecoilState(serviceMapModalOpenAtom);
const [openOasModal, setOpenOasModal] = useRecoilState(oasModalOpenAtom); const [openOasModal, setOpenOasModal] = useRecoilState(oasModalOpenAtom);
const [openWebSocket, setOpenWebSocket] = useState(true);
const trafficViewerApi = {...api} const trafficViewerApi = {...api}
const handleOpenOasModal = () => { const handleOpenOasModal = () => {
//closeSocket() -- Todo: Add Close webSocket setOpenWebSocket(false)
setOpenOasModal(true); setOpenOasModal(true);
} }
const openServiceMapModalDebounce = debounce(() => { const openServiceMapModalDebounce = debounce(() => {
setOpenWebSocket(false)
setServiceMapModalOpen(true) setServiceMapModalOpen(true)
}, 500); }, 500);
const actionButtons = (window["isOasEnabled"] || window["isServiceMapEnabled"]) && const actionButtons = (window["isOasEnabled"] || window["isServiceMapEnabled"]) &&
<div style={{ display: 'flex', height: "100%" }}> <div style={{ display: 'flex', height: "100%" }}>
{window["isOasEnabled"] && <Button {window["isOasEnabled"] && <Button
startIcon={<img className="custom" src={services} alt="services"></img>} startIcon={<img className="custom" src={services} alt="services"></img>}
size="large" size="large"
type="submit" type="submit"
variant="contained" variant="contained"
className={commonClasses.outlinedButton + " " + commonClasses.imagedButton} className={commonClasses.outlinedButton + " " + commonClasses.imagedButton}
style={{ marginRight: 25 }} style={{ marginRight: 25 }}
onClick={handleOpenOasModal}> onClick={handleOpenOasModal}>
Show OAS Show OAS
</Button>} </Button>}
{window["isServiceMapEnabled"] && <Button {window["isServiceMapEnabled"] && <Button
startIcon={<img src={serviceMap} className="custom" alt="service-map" style={{marginRight:"8%"}}></img>} startIcon={<img src={serviceMap} className="custom" alt="service-map" style={{marginRight:"8%"}}></img>}
size="large" size="large"
variant="contained" variant="contained"
className={commonClasses.outlinedButton + " " + commonClasses.imagedButton} className={commonClasses.outlinedButton + " " + commonClasses.imagedButton}
onClick={openServiceMapModalDebounce}> onClick={openServiceMapModalDebounce}>
Service Map Service Map
</Button>} </Button>}
</div> </div>
useEffect(() => { useEffect(() => {
@@ -61,9 +63,9 @@ const trafficViewerApi = {...api}
} }
},[]) },[])
return ( return (
<> <>
<TrafficViewer setAnalyzeStatus={setAnalyzeStatus} webSocketUrl={getWebsocketUrl()} <TrafficViewer setAnalyzeStatus={setAnalyzeStatus} webSocketUrl={getWebsocketUrl()} isCloseWebSocket={!openWebSocket}
trafficViewerApiProp={trafficViewerApi} actionButtons={actionButtons} isShowStatusBar={!openOasModal}/> trafficViewerApiProp={trafficViewerApi} actionButtons={actionButtons} isShowStatusBar={!openOasModal}/>
</> </>
); );