mirror of
https://github.com/kubeshark/kubeshark.git
synced 2026-02-15 02:19:54 +00:00
Compare commits
31 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
7abf8b83e3 | ||
|
|
12d873d344 | ||
|
|
672accba0c | ||
|
|
566eab3527 | ||
|
|
0f52533cd8 | ||
|
|
eef58496b5 | ||
|
|
1137f9386b | ||
|
|
93714ab902 | ||
|
|
fc03ba2eda | ||
|
|
3662fbcdf6 | ||
|
|
b762e3c194 | ||
|
|
35ef211477 | ||
|
|
feb386ba1f | ||
|
|
ed4a818a53 | ||
|
|
fa733025dc | ||
|
|
5f603e3291 | ||
|
|
b84c698c1a | ||
|
|
c59aadb221 | ||
|
|
6aaee4b519 | ||
|
|
6f47ad862e | ||
|
|
f18f3da99c | ||
|
|
3e32c889d9 | ||
|
|
f604a3a35d | ||
|
|
5d205b5082 | ||
|
|
756f5f5720 | ||
|
|
9a1c17cc61 | ||
|
|
64253cd919 | ||
|
|
accad7c058 | ||
|
|
485bc7fd2b | ||
|
|
bc3efc6d4c | ||
|
|
135b1a5e1e |
33
.github/workflows/publish-cli.yml
vendored
33
.github/workflows/publish-cli.yml
vendored
@@ -2,8 +2,8 @@ name: public-cli
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- 'develop'
|
||||
- 'main'
|
||||
- develop
|
||||
- main
|
||||
jobs:
|
||||
docker:
|
||||
runs-on: ubuntu-latest
|
||||
@@ -15,5 +15,32 @@ jobs:
|
||||
with:
|
||||
service_account_key: ${{ secrets.GCR_JSON_KEY }}
|
||||
export_default_credentials: true
|
||||
- uses: haya14busa/action-cond@v1
|
||||
id: condval
|
||||
with:
|
||||
cond: ${{ github.ref == 'refs/heads/main' }}
|
||||
if_true: "minor"
|
||||
if_false: "patch"
|
||||
- name: Auto Increment Semver Action
|
||||
uses: MCKanpolat/auto-semver-action@1.0.5
|
||||
id: versioning
|
||||
with:
|
||||
releaseType: ${{ steps.condval.outputs.value }}
|
||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: Get base image name
|
||||
shell: bash
|
||||
run: |
|
||||
echo "##[set-output name=build_timestamp;]$(echo $(date +%s))"
|
||||
echo "##[set-output name=branch;]$(echo ${GITHUB_REF#refs/heads/})"
|
||||
id: version_parameters
|
||||
- name: Build and Push CLI
|
||||
run: make push-cli
|
||||
run: make push-cli SEM_VER='${{ steps.versioning.outputs.version }}' BUILD_TIMESTAMP='${{ steps.version_parameters.outputs.build_timestamp }}'
|
||||
- name: publish
|
||||
uses: ncipollo/release-action@v1
|
||||
with:
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
artifacts: "cli/bin/*"
|
||||
commit: ${{ steps.version_parameters.outputs.branch }}
|
||||
tag: ${{ steps.versioning.outputs.version }}
|
||||
prerelease: ${{ github.ref != 'refs/heads/main' }}
|
||||
bodyFile: 'cli/bin/README.md'
|
||||
|
||||
@@ -18,12 +18,14 @@ WORKDIR /app/api-build
|
||||
|
||||
COPY api/go.mod api/go.sum ./
|
||||
COPY shared/go.mod shared/go.mod ../shared/
|
||||
COPY tap/go.mod tap/go.mod ../tap/
|
||||
RUN go mod download
|
||||
# cheap trick to make the build faster (As long as go.mod wasn't changes)
|
||||
RUN go list -f '{{.Path}}@{{.Version}}' -m all | sed 1d | grep -e 'go-cache' -e 'sqlite' | xargs go get
|
||||
|
||||
# Copy and build api code
|
||||
COPY shared ../shared
|
||||
COPY tap ../tap
|
||||
COPY api .
|
||||
RUN go build -ldflags="-s -w" -o mizuagent .
|
||||
|
||||
|
||||
64
README.md
64
README.md
@@ -1,18 +1,70 @@
|
||||
# 水 mizu
|
||||
standalone web app traffic viewer for Kubernetes
|
||||
A simple-yet-powerful API traffic viewer for Kubernetes to help you troubleshoot and debug your microservices. Think TCPDump and Chrome Dev Tools combined.
|
||||
|
||||
## Download
|
||||
|
||||
Download `mizu` for your platform as
|
||||
Download `mizu` for your platform and operating system
|
||||
|
||||
* for MacOS - `curl -o mizu https://static.up9.com/mizu/mizu-darwin-amd64 && chmod 755 mizu`
|
||||
* for Linux - `curl -o mizu https://static.up9.com/mizu/mizu-linux-amd64 && chmod 755 mizu`
|
||||
### Latest stable release
|
||||
|
||||
## Run
|
||||
* for MacOS - Intel
|
||||
```
|
||||
curl -Lo mizu \
|
||||
https://github.com/up9inc/mizu/releases/latest/download/mizu_darwin_amd64 \
|
||||
&& chmod 755 mizu
|
||||
```
|
||||
|
||||
* for Linux - Intel 64bit
|
||||
```
|
||||
curl -Lo mizu \
|
||||
https://github.com/up9inc/mizu/releases/latest/download/mizu_linux_amd64 \
|
||||
&& chmod 755 mizu
|
||||
```
|
||||
|
||||
SHA256 checksums are available on the [Releases](https://github.com/up9inc/mizu/releases) page.
|
||||
|
||||
### Development (unstable) build
|
||||
Pick one from the [Releases](https://github.com/up9inc/mizu/releases) page.
|
||||
|
||||
## How to run
|
||||
|
||||
1. Find pod you'd like to tap to in your Kubernetes cluster
|
||||
2. Run `mizu --pod podname`
|
||||
2. Run `mizu tap PODNAME` or `mizu tap REGEX`
|
||||
3. Open browser on `http://localhost:8899` as instructed ..
|
||||
4. Watch the WebAPI traffic flowing ..
|
||||
5. Type ^C to stop
|
||||
|
||||
## Examples
|
||||
|
||||
Run `mizu help` for usage options
|
||||
|
||||
|
||||
To tap specific pod -
|
||||
```
|
||||
$ kubectl get pods
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
front-end-649fc5fd6-kqbtn 2/2 Running 0 7m
|
||||
..
|
||||
|
||||
$ mizu tap front-end-649fc5fd6-kqbtn
|
||||
+front-end-649fc5fd6-kqbtn
|
||||
Web interface is now available at http://localhost:8899
|
||||
^C
|
||||
```
|
||||
|
||||
To tap multiple pods using regex -
|
||||
```
|
||||
$ kubectl get pods
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
carts-66c77f5fbb-fq65r 2/2 Running 0 20m
|
||||
catalogue-5f4cb7cf5-7zrmn 2/2 Running 0 20m
|
||||
front-end-649fc5fd6-kqbtn 2/2 Running 0 20m
|
||||
..
|
||||
|
||||
$ mizu tap "^ca.*"
|
||||
+carts-66c77f5fbb-fq65r
|
||||
+catalogue-5f4cb7cf5-7zrmn
|
||||
Web interface is now available at http://localhost:8899
|
||||
^C
|
||||
```
|
||||
|
||||
|
||||
@@ -11,15 +11,12 @@ require (
|
||||
github.com/go-playground/universal-translator v0.17.0
|
||||
github.com/go-playground/validator/v10 v10.5.0
|
||||
github.com/gofiber/fiber/v2 v2.8.0
|
||||
github.com/google/gopacket v1.1.19
|
||||
github.com/google/martian v2.1.0+incompatible
|
||||
github.com/gorilla/websocket v1.4.2
|
||||
github.com/leodido/go-urn v1.2.1 // indirect
|
||||
github.com/orcaman/concurrent-map v0.0.0-20210106121528-16402b402231
|
||||
github.com/patrickmn/go-cache v2.1.0+incompatible
|
||||
github.com/up9inc/mizu/shared v0.0.0
|
||||
github.com/up9inc/mizu/tap v0.0.0
|
||||
go.mongodb.org/mongo-driver v1.5.1
|
||||
golang.org/x/net v0.0.0-20210421230115-4e50805a0758
|
||||
gorm.io/driver/sqlite v1.1.4
|
||||
gorm.io/gorm v1.21.8
|
||||
k8s.io/api v0.21.0
|
||||
@@ -28,3 +25,5 @@ require (
|
||||
)
|
||||
|
||||
replace github.com/up9inc/mizu/shared v0.0.0 => ../shared
|
||||
|
||||
replace github.com/up9inc/mizu/tap v0.0.0 => ../tap
|
||||
|
||||
@@ -251,7 +251,6 @@ github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGV
|
||||
github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
||||
github.com/orcaman/concurrent-map v0.0.0-20210106121528-16402b402231 h1:fa50YL1pzKW+1SsBnJDOHppJN9stOEwS+CRWyUtyYGU=
|
||||
github.com/orcaman/concurrent-map v0.0.0-20210106121528-16402b402231/go.mod h1:Lu3tH6HLW3feq74c2GC+jIMS/K2CFcDWnWD9XkenwhI=
|
||||
github.com/patrickmn/go-cache v2.1.0+incompatible h1:HRMgzkcYKYpi3C8ajMPV8OFXaaRUnok+kx1WdO15EQc=
|
||||
github.com/patrickmn/go-cache v2.1.0+incompatible/go.mod h1:3Qf8kWWT7OJRJbdiICTKqZju1ZixQ/KpMGzzAfe6+WQ=
|
||||
github.com/pelletier/go-toml v1.7.0/go.mod h1:vwGMzjaWMwyfHwgIBhI2YUM4fB6nL6lVAvS1LBMMhTE=
|
||||
github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU=
|
||||
|
||||
31
api/main.go
31
api/main.go
@@ -7,12 +7,12 @@ import (
|
||||
"github.com/gofiber/fiber/v2"
|
||||
"github.com/gorilla/websocket"
|
||||
"github.com/up9inc/mizu/shared"
|
||||
"github.com/up9inc/mizu/tap"
|
||||
"mizuserver/pkg/api"
|
||||
"mizuserver/pkg/middleware"
|
||||
"mizuserver/pkg/models"
|
||||
"mizuserver/pkg/routes"
|
||||
"mizuserver/pkg/sensitiveDataFiltering"
|
||||
"mizuserver/pkg/tap"
|
||||
"mizuserver/pkg/utils"
|
||||
"os"
|
||||
"os/signal"
|
||||
@@ -26,16 +26,21 @@ var aggregatorAddress = flag.String("aggregator-address", "", "Address of mizu c
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
hostMode := os.Getenv(shared.HostModeEnvVar) == "1"
|
||||
tapOpts := &tap.TapOpts{HostMode: hostMode}
|
||||
|
||||
if !*shouldTap && !*aggregator && !*standalone{
|
||||
panic("One of the flags --tap, --api or --standalone must be provided")
|
||||
}
|
||||
|
||||
if *standalone {
|
||||
harOutputChannel := tap.StartPassiveTapper()
|
||||
harOutputChannel, outboundLinkOutputChannel := tap.StartPassiveTapper(tapOpts)
|
||||
filteredHarChannel := make(chan *tap.OutputChannelItem)
|
||||
go filterHarHeaders(harOutputChannel, filteredHarChannel, getTrafficFilteringOptions())
|
||||
|
||||
go filterHarItems(harOutputChannel, filteredHarChannel, getTrafficFilteringOptions())
|
||||
go api.StartReadingEntries(filteredHarChannel, nil)
|
||||
go api.StartReadingOutbound(outboundLinkOutputChannel)
|
||||
|
||||
hostApi(nil)
|
||||
} else if *shouldTap {
|
||||
if *aggregatorAddress == "" {
|
||||
@@ -44,21 +49,26 @@ func main() {
|
||||
|
||||
tapTargets := getTapTargets()
|
||||
if tapTargets != nil {
|
||||
tap.HostAppAddresses = tapTargets
|
||||
fmt.Println("Filtering for the following addresses:", tap.HostAppAddresses)
|
||||
tap.SetFilterAuthorities(tapTargets)
|
||||
fmt.Println("Filtering for the following authorities:", tap.GetFilterIPs())
|
||||
}
|
||||
|
||||
harOutputChannel := tap.StartPassiveTapper()
|
||||
harOutputChannel, outboundLinkOutputChannel := tap.StartPassiveTapper(tapOpts)
|
||||
|
||||
socketConnection, err := shared.ConnectToSocketServer(*aggregatorAddress, shared.DEFAULT_SOCKET_RETRIES, shared.DEFAULT_SOCKET_RETRY_SLEEP_TIME, false)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("Error connecting to socket server at %s %v", *aggregatorAddress, err))
|
||||
}
|
||||
|
||||
go pipeChannelToSocket(socketConnection, harOutputChannel)
|
||||
go api.StartReadingOutbound(outboundLinkOutputChannel)
|
||||
} else if *aggregator {
|
||||
socketHarOutChannel := make(chan *tap.OutputChannelItem, 1000)
|
||||
filteredHarChannel := make(chan *tap.OutputChannelItem)
|
||||
|
||||
go filterHarItems(socketHarOutChannel, filteredHarChannel, getTrafficFilteringOptions())
|
||||
go api.StartReadingEntries(filteredHarChannel, nil)
|
||||
go filterHarHeaders(socketHarOutChannel, filteredHarChannel, getTrafficFilteringOptions())
|
||||
|
||||
hostApi(socketHarOutChannel)
|
||||
}
|
||||
|
||||
@@ -115,9 +125,14 @@ func getTrafficFilteringOptions() *shared.TrafficFilteringOptions {
|
||||
return &filteringOptions
|
||||
}
|
||||
|
||||
func filterHarHeaders(inChannel <- chan *tap.OutputChannelItem, outChannel chan *tap.OutputChannelItem, filterOptions *shared.TrafficFilteringOptions) {
|
||||
func filterHarItems(inChannel <- chan *tap.OutputChannelItem, outChannel chan *tap.OutputChannelItem, filterOptions *shared.TrafficFilteringOptions) {
|
||||
for message := range inChannel {
|
||||
if message.ConnectionInfo.IsOutgoing && api.CheckIsServiceIP(message.ConnectionInfo.ServerIP) {
|
||||
continue
|
||||
}
|
||||
|
||||
sensitiveDataFiltering.FilterSensitiveInfoFromHarRequest(message, filterOptions)
|
||||
|
||||
outChannel <- message
|
||||
}
|
||||
}
|
||||
|
||||
@@ -5,18 +5,21 @@ import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"github.com/google/martian/har"
|
||||
"go.mongodb.org/mongo-driver/bson/primitive"
|
||||
"mizuserver/pkg/database"
|
||||
"mizuserver/pkg/models"
|
||||
"mizuserver/pkg/resolver"
|
||||
"mizuserver/pkg/tap"
|
||||
"mizuserver/pkg/utils"
|
||||
"net/url"
|
||||
"os"
|
||||
"path"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/google/martian/har"
|
||||
"github.com/up9inc/mizu/tap"
|
||||
"go.mongodb.org/mongo-driver/bson/primitive"
|
||||
|
||||
"mizuserver/pkg/database"
|
||||
"mizuserver/pkg/models"
|
||||
"mizuserver/pkg/resolver"
|
||||
"mizuserver/pkg/utils"
|
||||
)
|
||||
|
||||
var k8sResolver *resolver.Resolver
|
||||
@@ -57,14 +60,21 @@ func startReadingFiles(workingDir string) {
|
||||
for true {
|
||||
dir, _ := os.Open(workingDir)
|
||||
dirFiles, _ := dir.Readdir(-1)
|
||||
sort.Sort(utils.ByModTime(dirFiles))
|
||||
|
||||
if len(dirFiles) == 0 {
|
||||
var harFiles []os.FileInfo
|
||||
for _, fileInfo := range dirFiles {
|
||||
if strings.HasSuffix(fileInfo.Name(), ".har") {
|
||||
harFiles = append(harFiles, fileInfo)
|
||||
}
|
||||
}
|
||||
sort.Sort(utils.ByModTime(harFiles))
|
||||
|
||||
if len(harFiles) == 0 {
|
||||
fmt.Printf("Waiting for new files\n")
|
||||
time.Sleep(3 * time.Second)
|
||||
continue
|
||||
}
|
||||
fileInfo := dirFiles[0]
|
||||
fileInfo := harFiles[0]
|
||||
inputFilePath := path.Join(workingDir, fileInfo.Name())
|
||||
file, err := os.Open(inputFilePath)
|
||||
utils.CheckErr(err)
|
||||
@@ -75,7 +85,14 @@ func startReadingFiles(workingDir string) {
|
||||
|
||||
for _, entry := range inputHar.Log.Entries {
|
||||
time.Sleep(time.Millisecond * 250)
|
||||
saveHarToDb(entry, fileInfo.Name())
|
||||
connectionInfo := &tap.ConnectionInfo{
|
||||
ClientIP: fileInfo.Name(),
|
||||
ClientPort: "",
|
||||
ServerIP: "",
|
||||
ServerPort: "",
|
||||
IsOutgoing: false,
|
||||
}
|
||||
saveHarToDb(entry, connectionInfo)
|
||||
}
|
||||
rmErr := os.Remove(inputFilePath)
|
||||
utils.CheckErr(rmErr)
|
||||
@@ -88,21 +105,29 @@ func startReadingChannel(outputItems <-chan *tap.OutputChannelItem) {
|
||||
}
|
||||
|
||||
for item := range outputItems {
|
||||
saveHarToDb(item.HarEntry, item.RequestSenderIp)
|
||||
saveHarToDb(item.HarEntry, item.ConnectionInfo)
|
||||
}
|
||||
}
|
||||
|
||||
func saveHarToDb(entry *har.Entry, sender string) {
|
||||
func StartReadingOutbound(outboundLinkChannel <-chan *tap.OutboundLink) {
|
||||
// tcpStreamFactory will block on write to channel. Empty channel to unblock.
|
||||
// TODO: Make write to channel optional.
|
||||
for range outboundLinkChannel {
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
func saveHarToDb(entry *har.Entry, connectionInfo *tap.ConnectionInfo) {
|
||||
entryBytes, _ := json.Marshal(entry)
|
||||
serviceName, urlPath, serviceHostName := getServiceNameFromUrl(entry.Request.URL)
|
||||
serviceName, urlPath := getServiceNameFromUrl(entry.Request.URL)
|
||||
entryId := primitive.NewObjectID().Hex()
|
||||
var (
|
||||
resolvedSource string
|
||||
resolvedDestination string
|
||||
)
|
||||
if k8sResolver != nil {
|
||||
resolvedSource = k8sResolver.Resolve(sender)
|
||||
resolvedDestination = k8sResolver.Resolve(serviceHostName)
|
||||
resolvedSource = k8sResolver.Resolve(connectionInfo.ClientIP)
|
||||
resolvedDestination = k8sResolver.Resolve(fmt.Sprintf("%s:%s", connectionInfo.ServerIP, connectionInfo.ServerPort))
|
||||
}
|
||||
mizuEntry := models.MizuEntry{
|
||||
EntryId: entryId,
|
||||
@@ -112,20 +137,28 @@ func saveHarToDb(entry *har.Entry, sender string) {
|
||||
Path: urlPath,
|
||||
Method: entry.Request.Method,
|
||||
Status: entry.Response.Status,
|
||||
RequestSenderIp: sender,
|
||||
RequestSenderIp: connectionInfo.ClientIP,
|
||||
Timestamp: entry.StartedDateTime.UnixNano() / int64(time.Millisecond),
|
||||
ResolvedSource: resolvedSource,
|
||||
ResolvedDestination: resolvedDestination,
|
||||
IsOutgoing: connectionInfo.IsOutgoing,
|
||||
}
|
||||
database.GetEntriesTable().Create(&mizuEntry)
|
||||
|
||||
baseEntry := utils.GetResolvedBaseEntry(mizuEntry)
|
||||
baseEntry := models.BaseEntryDetails{}
|
||||
if err := models.GetEntry(&mizuEntry, &baseEntry); err != nil {
|
||||
return
|
||||
}
|
||||
baseEntryBytes, _ := models.CreateBaseEntryWebSocketMessage(&baseEntry)
|
||||
broadcastToBrowserClients(baseEntryBytes)
|
||||
}
|
||||
|
||||
func getServiceNameFromUrl(inputUrl string) (string, string, string) {
|
||||
func getServiceNameFromUrl(inputUrl string) (string, string) {
|
||||
parsed, err := url.Parse(inputUrl)
|
||||
utils.CheckErr(err)
|
||||
return fmt.Sprintf("%s://%s", parsed.Scheme, parsed.Host), parsed.Path, parsed.Host
|
||||
return fmt.Sprintf("%s://%s", parsed.Scheme, parsed.Host), parsed.Path
|
||||
}
|
||||
|
||||
func CheckIsServiceIP(address string) bool {
|
||||
return k8sResolver.CheckIsServiceIP(address)
|
||||
}
|
||||
|
||||
@@ -5,10 +5,11 @@ import (
|
||||
"fmt"
|
||||
"github.com/antoniodipinto/ikisocket"
|
||||
"github.com/up9inc/mizu/shared"
|
||||
"github.com/up9inc/mizu/tap"
|
||||
"mizuserver/pkg/controllers"
|
||||
"mizuserver/pkg/models"
|
||||
"mizuserver/pkg/routes"
|
||||
"mizuserver/pkg/tap"
|
||||
"mizuserver/pkg/up9"
|
||||
)
|
||||
|
||||
var browserClientSocketUUIDs = make([]string, 0)
|
||||
@@ -18,6 +19,9 @@ type RoutesEventHandlers struct {
|
||||
SocketHarOutChannel chan<- *tap.OutputChannelItem
|
||||
}
|
||||
|
||||
func init() {
|
||||
go up9.UpdateAnalyzeStatus(broadcastToBrowserClients)
|
||||
}
|
||||
|
||||
func (h *RoutesEventHandlers) WebSocketConnect(ep *ikisocket.EventPayload) {
|
||||
if ep.Kws.GetAttribute("is_tapper") == true {
|
||||
@@ -84,7 +88,6 @@ func (h *RoutesEventHandlers) WebSocketMessage(ep *ikisocket.EventPayload) {
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
func removeSocketUUIDFromBrowserSlice(uuidToRemove string) {
|
||||
newUUIDSlice := make([]string, 0, len(browserClientSocketUUIDs))
|
||||
for _, uuid := range browserClientSocketUUIDs {
|
||||
|
||||
@@ -7,26 +7,11 @@ import (
|
||||
"github.com/google/martian/har"
|
||||
"mizuserver/pkg/database"
|
||||
"mizuserver/pkg/models"
|
||||
"mizuserver/pkg/up9"
|
||||
"mizuserver/pkg/utils"
|
||||
"mizuserver/pkg/validation"
|
||||
)
|
||||
|
||||
const (
|
||||
OrderDesc = "desc"
|
||||
OrderAsc = "asc"
|
||||
LT = "lt"
|
||||
GT = "gt"
|
||||
)
|
||||
|
||||
var (
|
||||
operatorToSymbolMapping = map[string]string{
|
||||
LT: "<",
|
||||
GT: ">",
|
||||
}
|
||||
operatorToOrderMapping = map[string]string{
|
||||
LT: OrderDesc,
|
||||
GT: OrderAsc,
|
||||
}
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
func GetEntries(c *fiber.Ctx) error {
|
||||
@@ -40,8 +25,8 @@ func GetEntries(c *fiber.Ctx) error {
|
||||
return c.Status(fiber.StatusBadRequest).JSON(err)
|
||||
}
|
||||
|
||||
order := operatorToOrderMapping[entriesFilter.Operator]
|
||||
operatorSymbol := operatorToSymbolMapping[entriesFilter.Operator]
|
||||
order := database.OperatorToOrderMapping[entriesFilter.Operator]
|
||||
operatorSymbol := database.OperatorToSymbolMapping[entriesFilter.Operator]
|
||||
var entries []models.MizuEntry
|
||||
database.GetEntriesTable().
|
||||
Order(fmt.Sprintf("timestamp %s", order)).
|
||||
@@ -50,15 +35,18 @@ func GetEntries(c *fiber.Ctx) error {
|
||||
Limit(entriesFilter.Limit).
|
||||
Find(&entries)
|
||||
|
||||
if len(entries) > 0 && order == OrderDesc {
|
||||
if len(entries) > 0 && order == database.OrderDesc {
|
||||
// the entries always order from oldest to newest so we should revers
|
||||
utils.ReverseSlice(entries)
|
||||
}
|
||||
|
||||
// Convert to base entries
|
||||
baseEntries := make([]models.BaseEntryDetails, 0, entriesFilter.Limit)
|
||||
for _, entry := range entries {
|
||||
baseEntries = append(baseEntries, utils.GetResolvedBaseEntry(entry))
|
||||
baseEntries := make([]models.BaseEntryDetails, 0)
|
||||
for _, data := range entries {
|
||||
harEntry := models.BaseEntryDetails{}
|
||||
if err := models.GetEntry(&data, &harEntry); err != nil {
|
||||
continue
|
||||
}
|
||||
baseEntries = append(baseEntries, harEntry)
|
||||
}
|
||||
|
||||
return c.Status(fiber.StatusOK).JSON(baseEntries)
|
||||
@@ -66,7 +54,7 @@ func GetEntries(c *fiber.Ctx) error {
|
||||
|
||||
func GetHARs(c *fiber.Ctx) error {
|
||||
entriesFilter := &models.HarFetchRequestBody{}
|
||||
order := OrderDesc
|
||||
order := database.OrderDesc
|
||||
if err := c.QueryParser(entriesFilter); err != nil {
|
||||
return c.Status(fiber.StatusBadRequest).JSON(err)
|
||||
}
|
||||
@@ -75,11 +63,23 @@ func GetHARs(c *fiber.Ctx) error {
|
||||
return c.Status(fiber.StatusBadRequest).JSON(err)
|
||||
}
|
||||
|
||||
var timestampFrom, timestampTo int64
|
||||
|
||||
if entriesFilter.From < 0 {
|
||||
timestampFrom = 0
|
||||
} else {
|
||||
timestampFrom = entriesFilter.From
|
||||
}
|
||||
if entriesFilter.To <= 0 {
|
||||
timestampTo = time.Now().UnixNano() / int64(time.Millisecond)
|
||||
} else {
|
||||
timestampTo = entriesFilter.To
|
||||
}
|
||||
|
||||
var entries []models.MizuEntry
|
||||
database.GetEntriesTable().
|
||||
Where(fmt.Sprintf("timestamp BETWEEN %v AND %v", timestampFrom, timestampTo)).
|
||||
Order(fmt.Sprintf("timestamp %s", order)).
|
||||
// Where(fmt.Sprintf("timestamp %s %v", operatorSymbol, entriesFilter.Timestamp)).
|
||||
Limit(1000).
|
||||
Find(&entries)
|
||||
|
||||
if len(entries) > 0 {
|
||||
@@ -92,9 +92,20 @@ func GetHARs(c *fiber.Ctx) error {
|
||||
for _, entryData := range entries {
|
||||
var harEntry har.Entry
|
||||
_ = json.Unmarshal([]byte(entryData.Entry), &harEntry)
|
||||
if entryData.ResolvedDestination != "" {
|
||||
harEntry.Request.URL = utils.SetHostname(harEntry.Request.URL, entryData.ResolvedDestination)
|
||||
}
|
||||
|
||||
var fileName string
|
||||
sourceOfEntry := entryData.ResolvedSource
|
||||
fileName := fmt.Sprintf("%s.har", sourceOfEntry)
|
||||
if sourceOfEntry != "" {
|
||||
// naively assumes the proper service source is http
|
||||
sourceOfEntry = fmt.Sprintf("http://%s", sourceOfEntry)
|
||||
//replace / from the file name cause they end up creating a corrupted folder
|
||||
fileName = fmt.Sprintf("%s.har", strings.ReplaceAll(sourceOfEntry, "/", "_"))
|
||||
} else {
|
||||
fileName = "unknown_source.har"
|
||||
}
|
||||
if harOfSource, ok := harsObject[fileName]; ok {
|
||||
harOfSource.Log.Entries = append(harOfSource.Log.Entries, &harEntry)
|
||||
} else {
|
||||
@@ -108,11 +119,14 @@ func GetHARs(c *fiber.Ctx) error {
|
||||
Name: "mizu",
|
||||
Version: "0.0.2",
|
||||
},
|
||||
Source: sourceOfEntry,
|
||||
},
|
||||
Entries: entriesHar,
|
||||
},
|
||||
}
|
||||
// leave undefined when no source is present, otherwise modeler assumes source is empty string ""
|
||||
if sourceOfEntry != "" {
|
||||
harsObject[fileName].Log.Creator.Source = &sourceOfEntry
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -125,21 +139,72 @@ func GetHARs(c *fiber.Ctx) error {
|
||||
return c.Status(fiber.StatusOK).SendStream(buffer)
|
||||
}
|
||||
|
||||
func UploadEntries(c *fiber.Ctx) error {
|
||||
uploadRequestBody := &models.UploadEntriesRequestBody{}
|
||||
if err := c.QueryParser(uploadRequestBody); err != nil {
|
||||
return c.Status(fiber.StatusBadRequest).JSON(err)
|
||||
}
|
||||
if err := validation.Validate(uploadRequestBody); err != nil {
|
||||
return c.Status(fiber.StatusBadRequest).JSON(err)
|
||||
}
|
||||
if up9.GetAnalyzeInfo().IsAnalyzing {
|
||||
return c.Status(fiber.StatusBadRequest).SendString("Cannot analyze, mizu is already analyzing")
|
||||
}
|
||||
|
||||
token, _ := up9.CreateAnonymousToken(uploadRequestBody.Dest)
|
||||
go up9.UploadEntriesImpl(token.Token, token.Model, uploadRequestBody.Dest)
|
||||
return c.Status(fiber.StatusOK).SendString("OK")
|
||||
}
|
||||
|
||||
func GetFullEntries(c *fiber.Ctx) error {
|
||||
entriesFilter := &models.HarFetchRequestBody{}
|
||||
if err := c.QueryParser(entriesFilter); err != nil {
|
||||
return c.Status(fiber.StatusBadRequest).JSON(err)
|
||||
}
|
||||
err := validation.Validate(entriesFilter)
|
||||
if err != nil {
|
||||
return c.Status(fiber.StatusBadRequest).JSON(err)
|
||||
}
|
||||
|
||||
var timestampFrom, timestampTo int64
|
||||
|
||||
if entriesFilter.From < 0 {
|
||||
timestampFrom = 0
|
||||
} else {
|
||||
timestampFrom = entriesFilter.From
|
||||
}
|
||||
if entriesFilter.To <= 0 {
|
||||
timestampTo = time.Now().UnixNano() / int64(time.Millisecond)
|
||||
} else {
|
||||
timestampTo = entriesFilter.To
|
||||
}
|
||||
|
||||
entriesArray := database.GetEntriesFromDb(timestampFrom, timestampTo)
|
||||
result := make([]models.FullEntryDetails, 0)
|
||||
for _, data := range entriesArray {
|
||||
harEntry := models.FullEntryDetails{}
|
||||
if err := models.GetEntry(&data, &harEntry); err != nil {
|
||||
continue
|
||||
}
|
||||
result = append(result, harEntry)
|
||||
}
|
||||
|
||||
return c.Status(fiber.StatusOK).JSON(result)
|
||||
}
|
||||
|
||||
func GetEntry(c *fiber.Ctx) error {
|
||||
var entryData models.EntryData
|
||||
var entryData models.MizuEntry
|
||||
database.GetEntriesTable().
|
||||
Select("entry", "resolvedDestination").
|
||||
Where(map[string]string{"entryId": c.Params("entryId")}).
|
||||
First(&entryData)
|
||||
|
||||
var fullEntry har.Entry
|
||||
unmarshallErr := json.Unmarshal([]byte(entryData.Entry), &fullEntry)
|
||||
utils.CheckErr(unmarshallErr)
|
||||
|
||||
if entryData.ResolvedDestination != "" {
|
||||
fullEntry.Request.URL = utils.SetHostname(fullEntry.Request.URL, entryData.ResolvedDestination)
|
||||
fullEntry := models.FullEntryDetails{}
|
||||
if err := models.GetEntry(&entryData, &fullEntry); err != nil {
|
||||
return c.Status(fiber.StatusInternalServerError).JSON(fiber.Map{
|
||||
"error": true,
|
||||
"msg": "Can't get entry details",
|
||||
})
|
||||
}
|
||||
|
||||
return c.Status(fiber.StatusOK).JSON(fullEntry)
|
||||
}
|
||||
|
||||
|
||||
@@ -3,6 +3,7 @@ package controllers
|
||||
import (
|
||||
"github.com/gofiber/fiber/v2"
|
||||
"github.com/up9inc/mizu/shared"
|
||||
"mizuserver/pkg/up9"
|
||||
)
|
||||
|
||||
var TapStatus shared.TapStatus
|
||||
@@ -10,3 +11,7 @@ var TapStatus shared.TapStatus
|
||||
func GetTappingStatus(c *fiber.Ctx) error {
|
||||
return c.Status(fiber.StatusOK).JSON(TapStatus)
|
||||
}
|
||||
|
||||
func AnalyzeInformation(c *fiber.Ctx) error {
|
||||
return c.Status(fiber.StatusOK).JSON(up9.GetAnalyzeInfo())
|
||||
}
|
||||
|
||||
@@ -1,9 +1,11 @@
|
||||
package database
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"gorm.io/driver/sqlite"
|
||||
"gorm.io/gorm"
|
||||
"mizuserver/pkg/models"
|
||||
"mizuserver/pkg/utils"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -14,6 +16,24 @@ var (
|
||||
DB = initDataBase(DBPath)
|
||||
)
|
||||
|
||||
const (
|
||||
OrderDesc = "desc"
|
||||
OrderAsc = "asc"
|
||||
LT = "lt"
|
||||
GT = "gt"
|
||||
)
|
||||
|
||||
var (
|
||||
OperatorToSymbolMapping = map[string]string{
|
||||
LT: "<",
|
||||
GT: ">",
|
||||
}
|
||||
OperatorToOrderMapping = map[string]string{
|
||||
LT: OrderDesc,
|
||||
GT: OrderAsc,
|
||||
}
|
||||
)
|
||||
|
||||
func GetEntriesTable() *gorm.DB {
|
||||
return DB.Table("mizu_entries")
|
||||
}
|
||||
@@ -23,3 +43,20 @@ func initDataBase(databasePath string) *gorm.DB {
|
||||
_ = temp.AutoMigrate(&models.MizuEntry{}) // this will ensure table is created
|
||||
return temp
|
||||
}
|
||||
|
||||
|
||||
func GetEntriesFromDb(timestampFrom int64, timestampTo int64) []models.MizuEntry {
|
||||
order := OrderDesc
|
||||
var entries []models.MizuEntry
|
||||
GetEntriesTable().
|
||||
Where(fmt.Sprintf("timestamp BETWEEN %v AND %v", timestampFrom, timestampTo)).
|
||||
Order(fmt.Sprintf("timestamp %s", order)).
|
||||
Find(&entries)
|
||||
|
||||
if len(entries) > 0 {
|
||||
// the entries always order from oldest to newest so we should revers
|
||||
utils.ReverseSlice(entries)
|
||||
}
|
||||
return entries
|
||||
}
|
||||
|
||||
|
||||
@@ -4,25 +4,35 @@ import (
|
||||
"encoding/json"
|
||||
"github.com/google/martian/har"
|
||||
"github.com/up9inc/mizu/shared"
|
||||
"mizuserver/pkg/tap"
|
||||
"github.com/up9inc/mizu/tap"
|
||||
"mizuserver/pkg/utils"
|
||||
"time"
|
||||
)
|
||||
|
||||
type DataUnmarshaler interface {
|
||||
UnmarshalData(*MizuEntry) error
|
||||
}
|
||||
|
||||
func GetEntry(r *MizuEntry, v DataUnmarshaler) error {
|
||||
return v.UnmarshalData(r)
|
||||
}
|
||||
|
||||
type MizuEntry struct {
|
||||
ID uint `gorm:"primarykey"`
|
||||
CreatedAt time.Time
|
||||
UpdatedAt time.Time
|
||||
Entry string `json:"entry,omitempty" gorm:"column:entry"`
|
||||
EntryId string `json:"entryId" gorm:"column:entryId"`
|
||||
Url string `json:"url" gorm:"column:url"`
|
||||
Method string `json:"method" gorm:"column:method"`
|
||||
Status int `json:"status" gorm:"column:status"`
|
||||
RequestSenderIp string `json:"requestSenderIp" gorm:"column:requestSenderIp"`
|
||||
Service string `json:"service" gorm:"column:service"`
|
||||
Timestamp int64 `json:"timestamp" gorm:"column:timestamp"`
|
||||
Path string `json:"path" gorm:"column:path"`
|
||||
Entry string `json:"entry,omitempty" gorm:"column:entry"`
|
||||
EntryId string `json:"entryId" gorm:"column:entryId"`
|
||||
Url string `json:"url" gorm:"column:url"`
|
||||
Method string `json:"method" gorm:"column:method"`
|
||||
Status int `json:"status" gorm:"column:status"`
|
||||
RequestSenderIp string `json:"requestSenderIp" gorm:"column:requestSenderIp"`
|
||||
Service string `json:"service" gorm:"column:service"`
|
||||
Timestamp int64 `json:"timestamp" gorm:"column:timestamp"`
|
||||
Path string `json:"path" gorm:"column:path"`
|
||||
ResolvedSource string `json:"resolvedSource,omitempty" gorm:"column:resolvedSource"`
|
||||
ResolvedDestination string `json:"resolvedDestination,omitempty" gorm:"column:resolvedDestination"`
|
||||
IsOutgoing bool `json:"isOutgoing,omitempty" gorm:"column:isOutgoing"`
|
||||
}
|
||||
|
||||
type BaseEntryDetails struct {
|
||||
@@ -34,10 +44,66 @@ type BaseEntryDetails struct {
|
||||
StatusCode int `json:"statusCode,omitempty"`
|
||||
Method string `json:"method,omitempty"`
|
||||
Timestamp int64 `json:"timestamp,omitempty"`
|
||||
IsOutgoing bool `json:"isOutgoing,omitempty"`
|
||||
}
|
||||
|
||||
type FullEntryDetails struct {
|
||||
har.Entry
|
||||
}
|
||||
|
||||
type FullEntryDetailsExtra struct {
|
||||
har.Entry
|
||||
}
|
||||
|
||||
func (bed *BaseEntryDetails) UnmarshalData(entry *MizuEntry) error {
|
||||
entryUrl := entry.Url
|
||||
service := entry.Service
|
||||
if entry.ResolvedDestination != "" {
|
||||
entryUrl = utils.SetHostname(entryUrl, entry.ResolvedDestination)
|
||||
service = utils.SetHostname(service, entry.ResolvedDestination)
|
||||
}
|
||||
bed.Id = entry.EntryId
|
||||
bed.Url = entryUrl
|
||||
bed.Service = service
|
||||
bed.Path = entry.Path
|
||||
bed.StatusCode = entry.Status
|
||||
bed.Method = entry.Method
|
||||
bed.Timestamp = entry.Timestamp
|
||||
bed.RequestSenderIp = entry.RequestSenderIp
|
||||
bed.IsOutgoing = entry.IsOutgoing
|
||||
return nil
|
||||
}
|
||||
|
||||
func (fed *FullEntryDetails) UnmarshalData(entry *MizuEntry) error {
|
||||
if err := json.Unmarshal([]byte(entry.Entry), &fed.Entry); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if entry.ResolvedDestination != "" {
|
||||
fed.Entry.Request.URL = utils.SetHostname(fed.Entry.Request.URL, entry.ResolvedDestination)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (fedex *FullEntryDetailsExtra) UnmarshalData(entry *MizuEntry) error {
|
||||
if err := json.Unmarshal([]byte(entry.Entry), &fedex.Entry); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if entry.ResolvedSource != "" {
|
||||
fedex.Entry.Request.Headers = append(fedex.Request.Headers, har.Header{Name: "x-mizu-source", Value: entry.ResolvedSource})
|
||||
}
|
||||
if entry.ResolvedDestination != "" {
|
||||
fedex.Entry.Request.Headers = append(fedex.Request.Headers, har.Header{Name: "x-mizu-destination", Value: entry.ResolvedDestination})
|
||||
fedex.Entry.Request.URL = utils.SetHostname(fedex.Entry.Request.URL, entry.ResolvedDestination)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
|
||||
type EntryData struct {
|
||||
Entry string `json:"entry,omitempty"`
|
||||
Entry string `json:"entry,omitempty"`
|
||||
ResolvedDestination string `json:"resolvedDestination,omitempty" gorm:"column:resolvedDestination"`
|
||||
}
|
||||
|
||||
@@ -47,8 +113,13 @@ type EntriesFilter struct {
|
||||
Timestamp int64 `query:"timestamp" validate:"required,min=1"`
|
||||
}
|
||||
|
||||
type UploadEntriesRequestBody struct {
|
||||
Dest string `query:"dest"`
|
||||
}
|
||||
|
||||
type HarFetchRequestBody struct {
|
||||
Limit int `query:"limit"`
|
||||
From int64 `query:"from"`
|
||||
To int64 `query:"to"`
|
||||
}
|
||||
|
||||
type WebSocketEntryMessage struct {
|
||||
@@ -56,7 +127,6 @@ type WebSocketEntryMessage struct {
|
||||
Data *BaseEntryDetails `json:"data,omitempty"`
|
||||
}
|
||||
|
||||
|
||||
type WebSocketTappedEntryMessage struct {
|
||||
*shared.WebSocketMessageMetadata
|
||||
Data *tap.OutputChannelItem
|
||||
@@ -82,7 +152,6 @@ func CreateWebsocketTappedEntryMessage(base *tap.OutputChannelItem) ([]byte, err
|
||||
return json.Marshal(message)
|
||||
}
|
||||
|
||||
|
||||
// ExtendedHAR is the top level object of a HAR log.
|
||||
type ExtendedHAR struct {
|
||||
Log *ExtendedLog `json:"log"`
|
||||
@@ -100,5 +169,5 @@ type ExtendedLog struct {
|
||||
|
||||
type ExtendedCreator struct {
|
||||
*har.Creator
|
||||
Source string `json:"_source"`
|
||||
}
|
||||
Source *string `json:"_source"`
|
||||
}
|
||||
|
||||
@@ -21,7 +21,7 @@ func NewFromInCluster(errOut chan error) (*Resolver, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &Resolver{clientConfig: config, clientSet: clientset, nameMap: make(map[string]string), errOut: errOut}, nil
|
||||
return &Resolver{clientConfig: config, clientSet: clientset, nameMap: make(map[string]string), serviceMap: make(map[string]string), errOut: errOut}, nil
|
||||
}
|
||||
|
||||
func NewFromOutOfCluster(kubeConfigPath string, errOut chan error) (*Resolver, error) {
|
||||
@@ -53,9 +53,9 @@ func NewFromOutOfCluster(kubeConfigPath string, errOut chan error) (*Resolver, e
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &Resolver{clientConfig: clientConfig, clientSet: clientset, nameMap: make(map[string]string), errOut: errOut}, nil
|
||||
return &Resolver{clientConfig: clientConfig, clientSet: clientset, nameMap: make(map[string]string), serviceMap: make(map[string]string), errOut: errOut}, nil
|
||||
}
|
||||
|
||||
func NewFromExisting(clientConfig *restclient.Config, clientSet *kubernetes.Clientset, errOut chan error) *Resolver {
|
||||
return &Resolver{clientConfig: clientConfig, clientSet: clientSet, nameMap: make(map[string]string), errOut: errOut}
|
||||
return &Resolver{clientConfig: clientConfig, clientSet: clientSet, nameMap: make(map[string]string), serviceMap: make(map[string]string), errOut: errOut}
|
||||
}
|
||||
|
||||
@@ -20,6 +20,7 @@ type Resolver struct {
|
||||
clientConfig *restclient.Config
|
||||
clientSet *kubernetes.Clientset
|
||||
nameMap map[string]string
|
||||
serviceMap map[string]string
|
||||
isStarted bool
|
||||
errOut chan error
|
||||
}
|
||||
@@ -41,6 +42,11 @@ func (resolver *Resolver) Resolve(name string) string {
|
||||
return resolvedName
|
||||
}
|
||||
|
||||
func (resolver *Resolver) CheckIsServiceIP(address string) bool {
|
||||
_, isFound := resolver.serviceMap[address]
|
||||
return isFound
|
||||
}
|
||||
|
||||
func (resolver *Resolver) watchPods(ctx context.Context) error {
|
||||
// empty namespace makes the client watch all namespaces
|
||||
watcher, err := resolver.clientSet.CoreV1().Pods("").Watch(ctx, metav1.ListOptions{Watch: true})
|
||||
@@ -124,6 +130,7 @@ func (resolver *Resolver) watchServices(ctx context.Context) error {
|
||||
serviceHostname := fmt.Sprintf("%s.%s", service.Name, service.Namespace)
|
||||
if service.Spec.ClusterIP != "" && service.Spec.ClusterIP != kubClientNullString {
|
||||
resolver.saveResolvedName(service.Spec.ClusterIP, serviceHostname, event.Type)
|
||||
resolver.saveServiceIP(service.Spec.ClusterIP, serviceHostname, event.Type)
|
||||
}
|
||||
if service.Status.LoadBalancer.Ingress != nil {
|
||||
for _, ingress := range service.Status.LoadBalancer.Ingress {
|
||||
@@ -147,6 +154,14 @@ func (resolver *Resolver) saveResolvedName(key string, resolved string, eventTyp
|
||||
}
|
||||
}
|
||||
|
||||
func (resolver *Resolver) saveServiceIP(key string, resolved string, eventType watch.EventType) {
|
||||
if eventType == watch.Deleted {
|
||||
delete(resolver.serviceMap, key)
|
||||
} else {
|
||||
resolver.serviceMap[key] = resolved
|
||||
}
|
||||
}
|
||||
|
||||
func (resolver *Resolver) infiniteErrorHandleRetryFunc(ctx context.Context, fun func(ctx context.Context) error) {
|
||||
for {
|
||||
err := fun(ctx)
|
||||
|
||||
@@ -11,10 +11,14 @@ func EntriesRoutes(fiberApp *fiber.App) {
|
||||
|
||||
routeGroup.Get("/entries", controllers.GetEntries) // get entries (base/thin entries)
|
||||
routeGroup.Get("/entries/:entryId", controllers.GetEntry) // get single (full) entry
|
||||
routeGroup.Get("/exportEntries", controllers.GetFullEntries)
|
||||
routeGroup.Get("/uploadEntries", controllers.UploadEntries)
|
||||
|
||||
routeGroup.Get("/har", controllers.GetHARs)
|
||||
|
||||
routeGroup.Get("/resetDB", controllers.DeleteAllEntries) // get single (full) entry
|
||||
routeGroup.Get("/generalStats", controllers.GetGeneralStats) // get general stats about entries in DB
|
||||
|
||||
routeGroup.Get("/tapStatus", controllers.GetTappingStatus) // get tapping status
|
||||
routeGroup.Get("/analyzeStatus", controllers.AnalyzeInformation)
|
||||
}
|
||||
|
||||
@@ -5,7 +5,7 @@ import (
|
||||
"encoding/xml"
|
||||
"errors"
|
||||
"fmt"
|
||||
"mizuserver/pkg/tap"
|
||||
"github.com/up9inc/mizu/tap"
|
||||
"net/url"
|
||||
"strings"
|
||||
|
||||
@@ -15,8 +15,8 @@ import (
|
||||
)
|
||||
|
||||
func FilterSensitiveInfoFromHarRequest(harOutputItem *tap.OutputChannelItem, options *shared.TrafficFilteringOptions) {
|
||||
filterHarHeaders(harOutputItem.HarEntry.Request.Headers)
|
||||
filterHarHeaders(harOutputItem.HarEntry.Response.Headers)
|
||||
harOutputItem.HarEntry.Request.Headers = filterHarHeaders(harOutputItem.HarEntry.Request.Headers)
|
||||
harOutputItem.HarEntry.Response.Headers = filterHarHeaders(harOutputItem.HarEntry.Response.Headers)
|
||||
|
||||
harOutputItem.HarEntry.Request.Cookies = make([]har.Cookie, 0, 0)
|
||||
harOutputItem.HarEntry.Response.Cookies = make([]har.Cookie, 0, 0)
|
||||
@@ -44,12 +44,19 @@ func FilterSensitiveInfoFromHarRequest(harOutputItem *tap.OutputChannelItem, opt
|
||||
}
|
||||
}
|
||||
|
||||
func filterHarHeaders(headers []har.Header) {
|
||||
func filterHarHeaders(headers []har.Header) []har.Header {
|
||||
newHeaders := make([]har.Header, 0)
|
||||
for i, header := range headers {
|
||||
if isFieldNameSensitive(header.Name) {
|
||||
if strings.ToLower(header.Name) == "cookie" {
|
||||
continue
|
||||
} else if isFieldNameSensitive(header.Name) {
|
||||
newHeaders = append(newHeaders, har.Header{Name: header.Name, Value: maskedFieldPlaceholderValue})
|
||||
headers[i].Value = maskedFieldPlaceholderValue
|
||||
} else {
|
||||
newHeaders = append(newHeaders, header)
|
||||
}
|
||||
}
|
||||
return newHeaders
|
||||
}
|
||||
|
||||
func getContentTypeHeaderValue(headers []har.Header) string {
|
||||
|
||||
@@ -1,209 +0,0 @@
|
||||
package tap
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/orcaman/concurrent-map"
|
||||
)
|
||||
|
||||
type requestResponsePair struct {
|
||||
Request httpMessage `json:"request"`
|
||||
Response httpMessage `json:"response"`
|
||||
}
|
||||
|
||||
type envoyMessageWrapper struct {
|
||||
HttpBufferedTrace requestResponsePair `json:"http_buffered_trace"`
|
||||
}
|
||||
|
||||
type headerKeyVal struct {
|
||||
Key string `json:"key"`
|
||||
Value string `json:"value"`
|
||||
}
|
||||
|
||||
type messageBody struct {
|
||||
Truncated bool `json:"truncated"`
|
||||
AsBytes string `json:"as_bytes"`
|
||||
}
|
||||
|
||||
type httpMessage struct {
|
||||
IsRequest bool
|
||||
Headers []headerKeyVal `json:"headers"`
|
||||
HTTPVersion string `json:"httpVersion"`
|
||||
Body messageBody `json:"body"`
|
||||
captureTime time.Time
|
||||
orig interface {}
|
||||
requestSenderIp string
|
||||
}
|
||||
|
||||
|
||||
// Key is {client_addr}:{client_port}->{dest_addr}:{dest_port}
|
||||
type requestResponseMatcher struct {
|
||||
openMessagesMap cmap.ConcurrentMap
|
||||
|
||||
}
|
||||
|
||||
func createResponseRequestMatcher() requestResponseMatcher {
|
||||
newMatcher := &requestResponseMatcher{openMessagesMap: cmap.New()}
|
||||
return *newMatcher
|
||||
}
|
||||
|
||||
func (matcher *requestResponseMatcher) registerRequest(ident string, request *http.Request, captureTime time.Time, body string, isHTTP2 bool) *envoyMessageWrapper {
|
||||
split := splitIdent(ident)
|
||||
key := genKey(split)
|
||||
|
||||
messageExtraHeaders := []headerKeyVal{
|
||||
{Key: "x-up9-source", Value: split[0]},
|
||||
{Key: "x-up9-destination", Value: split[1] + ":" + split[3]},
|
||||
}
|
||||
|
||||
requestHTTPMessage := requestToMessage(request, captureTime, body, &messageExtraHeaders, isHTTP2, split[0])
|
||||
|
||||
if response, found := matcher.openMessagesMap.Pop(key); found {
|
||||
// Type assertion always succeeds because all of the map's values are of httpMessage type
|
||||
responseHTTPMessage := response.(*httpMessage)
|
||||
if responseHTTPMessage.IsRequest {
|
||||
SilentError("Request-Duplicate", "Got duplicate request with same identifier\n")
|
||||
return nil
|
||||
}
|
||||
Debug("Matched open Response for %s\n", key)
|
||||
return matcher.preparePair(&requestHTTPMessage, responseHTTPMessage)
|
||||
}
|
||||
|
||||
matcher.openMessagesMap.Set(key, &requestHTTPMessage)
|
||||
Debug("Registered open Request for %s\n", key)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (matcher *requestResponseMatcher) registerResponse(ident string, response *http.Response, captureTime time.Time, body string, isHTTP2 bool) *envoyMessageWrapper {
|
||||
split := splitIdent(ident)
|
||||
key := genKey(split)
|
||||
|
||||
responseHTTPMessage := responseToMessage(response, captureTime, body, isHTTP2)
|
||||
|
||||
if request, found := matcher.openMessagesMap.Pop(key); found {
|
||||
// Type assertion always succeeds because all of the map's values are of httpMessage type
|
||||
requestHTTPMessage := request.(*httpMessage)
|
||||
if !requestHTTPMessage.IsRequest {
|
||||
SilentError("Response-Duplicate", "Got duplicate response with same identifier\n")
|
||||
return nil
|
||||
}
|
||||
Debug("Matched open Request for %s\n", key)
|
||||
return matcher.preparePair(requestHTTPMessage, &responseHTTPMessage)
|
||||
}
|
||||
|
||||
matcher.openMessagesMap.Set(key, &responseHTTPMessage)
|
||||
Debug("Registered open Response for %s\n", key)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (matcher *requestResponseMatcher) preparePair(requestHTTPMessage *httpMessage, responseHTTPMessage *httpMessage) *envoyMessageWrapper {
|
||||
matcher.addDuration(requestHTTPMessage, responseHTTPMessage)
|
||||
|
||||
return &envoyMessageWrapper{
|
||||
HttpBufferedTrace: requestResponsePair{
|
||||
Request: *requestHTTPMessage,
|
||||
Response: *responseHTTPMessage,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func requestToMessage(request *http.Request, captureTime time.Time, body string, messageExtraHeaders *[]headerKeyVal, isHTTP2 bool, requestSenderIp string) httpMessage {
|
||||
messageHeaders := make([]headerKeyVal, 0)
|
||||
|
||||
for key, value := range request.Header {
|
||||
messageHeaders = append(messageHeaders, headerKeyVal{Key: key, Value: value[0]})
|
||||
}
|
||||
|
||||
if !isHTTP2 {
|
||||
messageHeaders = append(messageHeaders, headerKeyVal{Key: ":method", Value: request.Method})
|
||||
messageHeaders = append(messageHeaders, headerKeyVal{Key: ":path", Value: request.RequestURI})
|
||||
messageHeaders = append(messageHeaders, headerKeyVal{Key: ":authority", Value: request.Host})
|
||||
messageHeaders = append(messageHeaders, headerKeyVal{Key: ":scheme", Value: "http"})
|
||||
}
|
||||
|
||||
messageHeaders = append(messageHeaders, headerKeyVal{Key: "x-request-start", Value: fmt.Sprintf("%.3f", float64(captureTime.UnixNano()) / float64(1000000000))})
|
||||
|
||||
messageHeaders = append(messageHeaders, *messageExtraHeaders...)
|
||||
|
||||
httpVersion := request.Proto
|
||||
|
||||
requestBody := messageBody{Truncated: false, AsBytes: body}
|
||||
|
||||
return httpMessage{
|
||||
IsRequest: true,
|
||||
Headers: messageHeaders,
|
||||
HTTPVersion: httpVersion,
|
||||
Body: requestBody,
|
||||
captureTime: captureTime,
|
||||
orig: request,
|
||||
requestSenderIp: requestSenderIp,
|
||||
}
|
||||
}
|
||||
|
||||
func responseToMessage(response *http.Response, captureTime time.Time, body string, isHTTP2 bool) httpMessage {
|
||||
messageHeaders := make([]headerKeyVal, 0)
|
||||
|
||||
for key, value := range response.Header {
|
||||
messageHeaders = append(messageHeaders, headerKeyVal{Key: key, Value: value[0]})
|
||||
}
|
||||
|
||||
if !isHTTP2 {
|
||||
messageHeaders = append(messageHeaders, headerKeyVal{Key: ":status", Value: strconv.Itoa(response.StatusCode)})
|
||||
}
|
||||
|
||||
httpVersion := response.Proto
|
||||
|
||||
requestBody := messageBody{Truncated: false, AsBytes: body}
|
||||
|
||||
return httpMessage{
|
||||
IsRequest: false,
|
||||
Headers: messageHeaders,
|
||||
HTTPVersion: httpVersion,
|
||||
Body: requestBody,
|
||||
captureTime: captureTime,
|
||||
orig: response,
|
||||
}
|
||||
}
|
||||
|
||||
func (matcher *requestResponseMatcher) addDuration(requestHTTPMessage *httpMessage, responseHTTPMessage *httpMessage) {
|
||||
durationMs := float64(responseHTTPMessage.captureTime.UnixNano() / 1000000) - float64(requestHTTPMessage.captureTime.UnixNano() / 1000000)
|
||||
if durationMs < 1 {
|
||||
durationMs = 1
|
||||
}
|
||||
|
||||
responseHTTPMessage.Headers = append(responseHTTPMessage.Headers, headerKeyVal{Key: "x-up9-duration-ms", Value: fmt.Sprintf("%.0f", durationMs)})
|
||||
}
|
||||
|
||||
func splitIdent(ident string) []string {
|
||||
ident = strings.Replace(ident, "->", " ", -1)
|
||||
return strings.Split(ident, " ")
|
||||
}
|
||||
|
||||
func genKey(split []string) string {
|
||||
key := fmt.Sprintf("%s:%s->%s:%s,%s", split[0], split[2], split[1], split[3], split[4])
|
||||
return key
|
||||
}
|
||||
|
||||
func (matcher *requestResponseMatcher) deleteOlderThan(t time.Time) int {
|
||||
keysToPop := make([]string, 0)
|
||||
for item := range matcher.openMessagesMap.IterBuffered() {
|
||||
// Map only contains values of type httpMessage
|
||||
message, _ := item.Val.(*httpMessage)
|
||||
|
||||
if message.captureTime.Before(t) {
|
||||
keysToPop = append(keysToPop, item.Key)
|
||||
}
|
||||
}
|
||||
|
||||
numDeleted := len(keysToPop)
|
||||
|
||||
for _, key := range keysToPop {
|
||||
_, _ = matcher.openMessagesMap.Pop(key)
|
||||
}
|
||||
|
||||
return numDeleted
|
||||
}
|
||||
@@ -1,239 +0,0 @@
|
||||
package tap
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"log"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/gorilla/websocket"
|
||||
"github.com/patrickmn/go-cache"
|
||||
)
|
||||
|
||||
|
||||
const (
|
||||
// Time allowed to write a message to the peer.
|
||||
writeWait = 10 * time.Second
|
||||
|
||||
// Time allowed to read the next pong message from the peer.
|
||||
pongWait = 60 * time.Second
|
||||
|
||||
// Send pings to peer with this period. Must be less than pongWait.
|
||||
pingPeriod = (pongWait * 9) / 10
|
||||
|
||||
// Maximum message size allowed from peer.
|
||||
maxMessageSize = 512
|
||||
)
|
||||
|
||||
var (
|
||||
newline = []byte{'\n'}
|
||||
space = []byte{' '}
|
||||
hub *Hub
|
||||
outboundSocketNotifyExpiringCache = cache.New(outboundThrottleCacheExpiryPeriod, outboundThrottleCacheExpiryPeriod)
|
||||
)
|
||||
|
||||
var upgrader = websocket.Upgrader{
|
||||
ReadBufferSize: 1024,
|
||||
WriteBufferSize: 1024,
|
||||
CheckOrigin: func (_ *http.Request) bool { return true },
|
||||
}
|
||||
|
||||
// Client is a middleman between the websocket connection and the hub.
|
||||
type Client struct {
|
||||
hub *Hub
|
||||
|
||||
// The websocket connection.
|
||||
conn *websocket.Conn
|
||||
|
||||
// Buffered channel of outbound messages.
|
||||
send chan []byte
|
||||
}
|
||||
|
||||
type OutBoundLinkMessage struct {
|
||||
SourceIP string `json:"sourceIP"`
|
||||
IP string `json:"ip"`
|
||||
Port int `json:"port"`
|
||||
Type string `json:"type"`
|
||||
}
|
||||
|
||||
|
||||
// readPump pumps messages from the websocket connection to the hub.
|
||||
//
|
||||
// The application runs readPump in a per-connection goroutine. The application
|
||||
// ensures that there is at most one reader on a connection by executing all
|
||||
// reads from this goroutine.
|
||||
func (c *Client) readPump() {
|
||||
defer func() {
|
||||
c.hub.unregister <- c
|
||||
c.conn.Close()
|
||||
}()
|
||||
c.conn.SetReadLimit(maxMessageSize)
|
||||
c.conn.SetReadDeadline(time.Now().Add(pongWait))
|
||||
c.conn.SetPongHandler(func(string) error { c.conn.SetReadDeadline(time.Now().Add(pongWait)); return nil })
|
||||
for {
|
||||
_, message, err := c.conn.ReadMessage()
|
||||
if err != nil {
|
||||
if websocket.IsUnexpectedCloseError(err, websocket.CloseGoingAway, websocket.CloseAbnormalClosure) {
|
||||
log.Printf("error: %v", err)
|
||||
}
|
||||
break
|
||||
}
|
||||
message = bytes.TrimSpace(bytes.Replace(message, newline, space, -1))
|
||||
c.hub.onMessageCallback(message)
|
||||
}
|
||||
}
|
||||
|
||||
// writePump pumps messages from the hub to the websocket connection.
|
||||
//
|
||||
// A goroutine running writePump is started for each connection. The
|
||||
// application ensures that there is at most one writer to a connection by
|
||||
// executing all writes from this goroutine.
|
||||
func (c *Client) writePump() {
|
||||
ticker := time.NewTicker(pingPeriod)
|
||||
defer func() {
|
||||
ticker.Stop()
|
||||
c.conn.Close()
|
||||
}()
|
||||
for {
|
||||
select {
|
||||
case message, ok := <-c.send:
|
||||
c.conn.SetWriteDeadline(time.Now().Add(writeWait))
|
||||
if !ok {
|
||||
// The hub closed the channel.
|
||||
c.conn.WriteMessage(websocket.CloseMessage, []byte{})
|
||||
return
|
||||
}
|
||||
|
||||
w, err := c.conn.NextWriter(websocket.TextMessage)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
w.Write(message)
|
||||
|
||||
|
||||
if err := w.Close(); err != nil {
|
||||
return
|
||||
}
|
||||
case <-ticker.C:
|
||||
c.conn.SetWriteDeadline(time.Now().Add(writeWait))
|
||||
if err := c.conn.WriteMessage(websocket.PingMessage, nil); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type Hub struct {
|
||||
// Registered clients.
|
||||
clients map[*Client]bool
|
||||
|
||||
// Inbound messages from the clients.
|
||||
broadcast chan []byte
|
||||
|
||||
// Register requests from the clients.
|
||||
register chan *Client
|
||||
|
||||
// Unregister requests from clients.
|
||||
unregister chan *Client
|
||||
|
||||
// Handle messages from client
|
||||
onMessageCallback func([]byte)
|
||||
|
||||
|
||||
}
|
||||
|
||||
func newHub(onMessageCallback func([]byte)) *Hub {
|
||||
return &Hub{
|
||||
broadcast: make(chan []byte),
|
||||
register: make(chan *Client),
|
||||
unregister: make(chan *Client),
|
||||
clients: make(map[*Client]bool),
|
||||
onMessageCallback: onMessageCallback,
|
||||
}
|
||||
}
|
||||
|
||||
func (h *Hub) run() {
|
||||
for {
|
||||
select {
|
||||
case client := <-h.register:
|
||||
h.clients[client] = true
|
||||
case client := <-h.unregister:
|
||||
if _, ok := h.clients[client]; ok {
|
||||
delete(h.clients, client)
|
||||
close(client.send)
|
||||
}
|
||||
case message := <-h.broadcast:
|
||||
// matched messages counter is incremented in this thread instead of in multiple http reader
|
||||
// threads in order to reduce contention.
|
||||
statsTracker.incMatchedMessages()
|
||||
|
||||
for client := range h.clients {
|
||||
select {
|
||||
case client.send <- message:
|
||||
default:
|
||||
close(client.send)
|
||||
delete(h.clients, client)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// serveWs handles websocket requests from the peer.
|
||||
func serveWs(hub *Hub, w http.ResponseWriter, r *http.Request) {
|
||||
conn, err := upgrader.Upgrade(w, r, nil)
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
return
|
||||
}
|
||||
client := &Client{hub: hub, conn: conn, send: make(chan []byte, 256)}
|
||||
client.hub.register <- client
|
||||
|
||||
// Allow collection of memory referenced by the caller by doing all work in
|
||||
// new goroutines.
|
||||
go client.writePump()
|
||||
go client.readPump()
|
||||
}
|
||||
|
||||
func startOutputServer(port string, messageCallback func([]byte)) {
|
||||
hub = newHub(messageCallback)
|
||||
go hub.run()
|
||||
http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
|
||||
serveWs(hub, w, r)
|
||||
})
|
||||
err := http.ListenAndServe("0.0.0.0:" + port, nil)
|
||||
if err != nil {
|
||||
log.Fatal("Output server error: ", err)
|
||||
}
|
||||
}
|
||||
|
||||
func broadcastReqResPair(reqResJson []byte) {
|
||||
hub.broadcast <- reqResJson
|
||||
}
|
||||
|
||||
func broadcastOutboundLink(srcIP string, dstIP string, dstPort int) {
|
||||
cacheKey := fmt.Sprintf("%s -> %s:%d", srcIP, dstIP, dstPort)
|
||||
_, isInCache := outboundSocketNotifyExpiringCache.Get(cacheKey)
|
||||
if isInCache {
|
||||
return
|
||||
} else {
|
||||
outboundSocketNotifyExpiringCache.SetDefault(cacheKey, true)
|
||||
}
|
||||
|
||||
socketMessage := OutBoundLinkMessage{
|
||||
SourceIP: srcIP,
|
||||
IP: dstIP,
|
||||
Port: dstPort,
|
||||
Type: "outboundSocketDetected",
|
||||
}
|
||||
|
||||
jsonStr, err := json.Marshal(socketMessage)
|
||||
if err != nil {
|
||||
log.Printf("error marshalling outbound socket detection object: %v", err)
|
||||
} else {
|
||||
hub.broadcast <- jsonStr
|
||||
}
|
||||
}
|
||||
188
api/pkg/up9/main.go
Normal file
188
api/pkg/up9/main.go
Normal file
@@ -0,0 +1,188 @@
|
||||
package up9
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"compress/zlib"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"github.com/up9inc/mizu/shared"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"mizuserver/pkg/database"
|
||||
"mizuserver/pkg/models"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"time"
|
||||
)
|
||||
|
||||
|
||||
const (
|
||||
AnalyzeCheckSleepTime = 5 * time.Second
|
||||
)
|
||||
|
||||
type GuestToken struct {
|
||||
Token string `json:"token"`
|
||||
Model string `json:"model"`
|
||||
}
|
||||
|
||||
type ModelStatus struct {
|
||||
LastMajorGeneration float64 `json:"lastMajorGeneration"`
|
||||
}
|
||||
|
||||
func getGuestToken(url string, target *GuestToken) error {
|
||||
resp, err := http.Get(url)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
return json.NewDecoder(resp.Body).Decode(target)
|
||||
}
|
||||
|
||||
func CreateAnonymousToken(envPrefix string) (*GuestToken, error) {
|
||||
tokenUrl := fmt.Sprintf("https://trcc.%v/anonymous/token", envPrefix)
|
||||
token := &GuestToken{}
|
||||
if err := getGuestToken(tokenUrl, token); err != nil {
|
||||
fmt.Println(err)
|
||||
return nil, err
|
||||
}
|
||||
return token, nil
|
||||
}
|
||||
|
||||
func GetRemoteUrl(analyzeDestination string, analyzeToken string) string {
|
||||
return fmt.Sprintf("https://%s/share/%s", analyzeDestination, analyzeToken)
|
||||
}
|
||||
|
||||
func CheckIfModelReady(analyzeDestination string, analyzeModel string, analyzeToken string) bool {
|
||||
statusUrl, _ := url.Parse(fmt.Sprintf("https://trcc.%s/models/%s/status", analyzeDestination, analyzeModel))
|
||||
req := &http.Request{
|
||||
Method: http.MethodGet,
|
||||
URL: statusUrl,
|
||||
Header: map[string][]string{
|
||||
"Content-Type": {"application/json"},
|
||||
"Guest-Auth": {analyzeToken},
|
||||
},
|
||||
}
|
||||
statusResp, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
defer statusResp.Body.Close()
|
||||
|
||||
target := &ModelStatus{}
|
||||
_ = json.NewDecoder(statusResp.Body).Decode(&target)
|
||||
|
||||
return target.LastMajorGeneration > 0
|
||||
}
|
||||
|
||||
func GetTrafficDumpUrl(analyzeDestination string, analyzeModel string) *url.URL {
|
||||
postUrl, _ := url.Parse(fmt.Sprintf("https://traffic.%s/dumpTrafficBulk/%s", analyzeDestination, analyzeModel))
|
||||
return postUrl
|
||||
}
|
||||
|
||||
type AnalyzeInformation struct {
|
||||
IsAnalyzing bool
|
||||
AnalyzedModel string
|
||||
AnalyzeToken string
|
||||
AnalyzeDestination string
|
||||
}
|
||||
|
||||
func (info *AnalyzeInformation) Reset() {
|
||||
info.IsAnalyzing = false
|
||||
info.AnalyzedModel = ""
|
||||
info.AnalyzeToken = ""
|
||||
info.AnalyzeDestination = ""
|
||||
}
|
||||
|
||||
var analyzeInformation = &AnalyzeInformation{}
|
||||
|
||||
func GetAnalyzeInfo() *shared.AnalyzeStatus {
|
||||
return &shared.AnalyzeStatus{
|
||||
IsAnalyzing: analyzeInformation.IsAnalyzing,
|
||||
RemoteUrl: GetRemoteUrl(analyzeInformation.AnalyzeDestination, analyzeInformation.AnalyzeToken),
|
||||
IsRemoteReady: CheckIfModelReady(analyzeInformation.AnalyzeDestination, analyzeInformation.AnalyzedModel, analyzeInformation.AnalyzeToken),
|
||||
}
|
||||
}
|
||||
|
||||
func UploadEntriesImpl(token string, model string, envPrefix string) {
|
||||
analyzeInformation.IsAnalyzing = true
|
||||
analyzeInformation.AnalyzedModel = model
|
||||
analyzeInformation.AnalyzeToken = token
|
||||
analyzeInformation.AnalyzeDestination = envPrefix
|
||||
|
||||
sleepTime := time.Second * 10
|
||||
|
||||
var timestampFrom int64 = 0
|
||||
|
||||
for {
|
||||
timestampTo := time.Now().UnixNano() / int64(time.Millisecond)
|
||||
fmt.Printf("Getting entries from %v, to %v\n", timestampFrom, timestampTo)
|
||||
entriesArray := database.GetEntriesFromDb(timestampFrom, timestampTo)
|
||||
|
||||
if len(entriesArray) > 0 {
|
||||
|
||||
fullEntriesExtra := make([]models.FullEntryDetailsExtra, 0)
|
||||
for _, data := range entriesArray {
|
||||
harEntry := models.FullEntryDetailsExtra{}
|
||||
if err := models.GetEntry(&data, &harEntry); err != nil {
|
||||
continue
|
||||
}
|
||||
fullEntriesExtra = append(fullEntriesExtra, harEntry)
|
||||
}
|
||||
fmt.Printf("About to upload %v entries\n", len(fullEntriesExtra))
|
||||
|
||||
body, jMarshalErr := json.Marshal(fullEntriesExtra)
|
||||
if jMarshalErr != nil {
|
||||
analyzeInformation.Reset()
|
||||
fmt.Println("Stopping analyzing")
|
||||
log.Fatal(jMarshalErr)
|
||||
}
|
||||
|
||||
var in bytes.Buffer
|
||||
w := zlib.NewWriter(&in)
|
||||
_, _ = w.Write(body)
|
||||
_ = w.Close()
|
||||
reqBody := ioutil.NopCloser(bytes.NewReader(in.Bytes()))
|
||||
|
||||
req := &http.Request{
|
||||
Method: http.MethodPost,
|
||||
URL: GetTrafficDumpUrl(envPrefix, model),
|
||||
Header: map[string][]string{
|
||||
"Content-Encoding": {"deflate"},
|
||||
"Content-Type": {"application/octet-stream"},
|
||||
"Guest-Auth": {token},
|
||||
},
|
||||
Body: reqBody,
|
||||
}
|
||||
|
||||
if _, postErr := http.DefaultClient.Do(req); postErr != nil {
|
||||
analyzeInformation.Reset()
|
||||
log.Println("Stopping analyzing")
|
||||
log.Fatal(postErr)
|
||||
}
|
||||
fmt.Printf("Finish uploading %v entries to %s\n", len(entriesArray), GetTrafficDumpUrl(envPrefix, model))
|
||||
|
||||
} else {
|
||||
fmt.Println("Nothing to upload")
|
||||
}
|
||||
|
||||
fmt.Printf("Sleeping for %v...\n", sleepTime)
|
||||
time.Sleep(sleepTime)
|
||||
timestampFrom = timestampTo
|
||||
}
|
||||
}
|
||||
|
||||
func UpdateAnalyzeStatus(callback func(data []byte)) {
|
||||
for {
|
||||
if !analyzeInformation.IsAnalyzing {
|
||||
time.Sleep(AnalyzeCheckSleepTime)
|
||||
continue
|
||||
}
|
||||
analyzeStatus := GetAnalyzeInfo()
|
||||
socketMessage := shared.CreateWebSocketMessageTypeAnalyzeStatus(*analyzeStatus)
|
||||
|
||||
jsonMessage, _ := json.Marshal(socketMessage)
|
||||
callback(jsonMessage)
|
||||
time.Sleep(AnalyzeCheckSleepTime)
|
||||
}
|
||||
}
|
||||
@@ -1,11 +1,9 @@
|
||||
package utils
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"github.com/gofiber/fiber/v2"
|
||||
"log"
|
||||
"mizuserver/pkg/models"
|
||||
"net/url"
|
||||
"os"
|
||||
"os/signal"
|
||||
@@ -61,27 +59,3 @@ func SetHostname(address, newHostname string) string {
|
||||
return replacedUrl.String()
|
||||
|
||||
}
|
||||
|
||||
func GetResolvedBaseEntry(entry models.MizuEntry) models.BaseEntryDetails {
|
||||
entryUrl := entry.Url
|
||||
service := entry.Service
|
||||
if entry.ResolvedDestination != "" {
|
||||
entryUrl = SetHostname(entryUrl, entry.ResolvedDestination)
|
||||
service = SetHostname(service, entry.ResolvedDestination)
|
||||
}
|
||||
return models.BaseEntryDetails{
|
||||
Id: entry.EntryId,
|
||||
Url: entryUrl,
|
||||
Service: service,
|
||||
Path: entry.Path,
|
||||
StatusCode: entry.Status,
|
||||
Method: entry.Method,
|
||||
Timestamp: entry.Timestamp,
|
||||
RequestSenderIp: entry.RequestSenderIp,
|
||||
}
|
||||
}
|
||||
|
||||
func GetBytesFromStruct(v interface{}) []byte{
|
||||
a, _ := json.Marshal(v)
|
||||
return a
|
||||
}
|
||||
|
||||
14
cli/Makefile
14
cli/Makefile
@@ -1,6 +1,8 @@
|
||||
FOLDER=$(GOOS).$(GOARCH)
|
||||
SUFFIX=$(GOOS)_$(GOARCH)
|
||||
COMMIT_HASH=$(shell git rev-parse HEAD)
|
||||
GIT_BRANCH=$(shell git branch --show-current | tr '[:upper:]' '[:lower:]')
|
||||
GIT_VERSION=$(shell git branch --show-current | tr '[:upper:]' '[:lower:]')
|
||||
BUILD_TIMESTAMP=$(shell date +%s)
|
||||
|
||||
.PHONY: help
|
||||
.DEFAULT_GOAL := help
|
||||
@@ -12,16 +14,22 @@ install:
|
||||
go install mizu.go
|
||||
|
||||
build: ## build mizu CLI binary (select platform via GOOS / GOARCH env variables)
|
||||
go build -ldflags="-X 'github.com/up9inc/mizu/cli/mizu.GitCommitHash=$(COMMIT_HASH)' -X 'github.com/up9inc/mizu/cli/mizu.Branch=$(GIT_BRANCH)'" -o bin/$(FOLDER)/mizu mizu.go
|
||||
go build -ldflags="-X 'github.com/up9inc/mizu/cli/mizu.GitCommitHash=$(COMMIT_HASH)' \
|
||||
-X 'github.com/up9inc/mizu/cli/mizu.Branch=$(GIT_BRANCH)' \
|
||||
-X 'github.com/up9inc/mizu/cli/mizu.BuildTimestamp=$(BUILD_TIMESTAMP)' \
|
||||
-X 'github.com/up9inc/mizu/cli/mizu.SemVer=$(SEM_VER)'" \
|
||||
-o bin/mizu_$(SUFFIX) mizu.go
|
||||
(cd bin && shasum -a 256 mizu_${SUFFIX} > mizu_${SUFFIX}.sha256)
|
||||
|
||||
build-all: ## build for all supported platforms
|
||||
@echo "Compiling for every OS and Platform"
|
||||
@mkdir -p bin && echo "SHA256 checksums available for compiled binaries \n\nRun \`shasum -a 256 -c mizu_OS_ARCH.sha256\` to verify\n\n" > bin/README.md
|
||||
@$(MAKE) build GOOS=darwin GOARCH=amd64
|
||||
@$(MAKE) build GOOS=linux GOARCH=amd64
|
||||
@# $(MAKE) build GOOS=darwin GOARCH=arm64
|
||||
@# $(MAKE) GOOS=windows GOARCH=amd64
|
||||
@# $(MAKE) GOOS=linux GOARCH=386
|
||||
@# $(MAKE) GOOS=windows GOARCH=386
|
||||
@# $(MAKE) GOOS=darwin GOARCH=arm64
|
||||
@# $(MAKE) GOOS=linux GOARCH=arm64
|
||||
@# $(MAKE) GOOS=windows GOARCH=arm64
|
||||
@echo "---------"
|
||||
|
||||
@@ -5,8 +5,10 @@ import (
|
||||
)
|
||||
|
||||
type MizuFetchOptions struct {
|
||||
Limit uint16
|
||||
Directory string
|
||||
FromTimestamp int64
|
||||
ToTimestamp int64
|
||||
Directory string
|
||||
MizuPort uint
|
||||
}
|
||||
|
||||
var mizuFetchOptions = MizuFetchOptions{}
|
||||
@@ -23,6 +25,8 @@ var fetchCmd = &cobra.Command{
|
||||
func init() {
|
||||
rootCmd.AddCommand(fetchCmd)
|
||||
|
||||
fetchCmd.Flags().Uint16VarP(&mizuFetchOptions.Limit, "limit", "l", 1000, "Provide a custom limit for entries to fetch")
|
||||
fetchCmd.Flags().StringVarP(&mizuFetchOptions.Directory, "directory", "d", ".", "Provide a custom directory for fetched entries")
|
||||
fetchCmd.Flags().Int64Var(&mizuFetchOptions.FromTimestamp, "from", 0, "Custom start timestamp for fetched entries")
|
||||
fetchCmd.Flags().Int64Var(&mizuFetchOptions.ToTimestamp, "to", 0, "Custom end timestamp fetched entries")
|
||||
fetchCmd.Flags().UintVarP(&mizuFetchOptions.MizuPort, "port", "p", 8899, "Custom port for mizu")
|
||||
}
|
||||
|
||||
@@ -14,7 +14,7 @@ import (
|
||||
)
|
||||
|
||||
func RunMizuFetch(fetch *MizuFetchOptions) {
|
||||
resp, err := http.Get(fmt.Sprintf("http://localhost:8899/api/har?limit=%v", fetch.Limit))
|
||||
resp, err := http.Get(fmt.Sprintf("http://localhost:%v/api/har?from=%v&to=%v", fetch.MizuPort, fetch.FromTimestamp, fetch.ToTimestamp))
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
@@ -53,7 +53,7 @@ func Unzip(reader *zip.Reader, dest string) error {
|
||||
path := filepath.Join(dest, f.Name)
|
||||
|
||||
// Check for ZipSlip (Directory traversal)
|
||||
if !strings.HasPrefix(path, filepath.Clean(dest) + string(os.PathSeparator)) {
|
||||
if !strings.HasPrefix(path, filepath.Clean(dest)+string(os.PathSeparator)) {
|
||||
return fmt.Errorf("illegal file path: %s", path)
|
||||
}
|
||||
|
||||
@@ -61,7 +61,7 @@ func Unzip(reader *zip.Reader, dest string) error {
|
||||
_ = os.MkdirAll(path, f.Mode())
|
||||
} else {
|
||||
_ = os.MkdirAll(filepath.Dir(path), f.Mode())
|
||||
fmt.Print("writing HAR file [ ", path, " ] .. ")
|
||||
fmt.Print("writing HAR file [ ", path, " ] .. ")
|
||||
f, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, f.Mode())
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -70,7 +70,7 @@ func Unzip(reader *zip.Reader, dest string) error {
|
||||
if err := f.Close(); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
fmt.Println(" done")
|
||||
fmt.Println(" done")
|
||||
}()
|
||||
|
||||
_, err = io.Copy(f, rc)
|
||||
@@ -90,5 +90,3 @@ func Unzip(reader *zip.Reader, dest string) error {
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
|
||||
@@ -3,8 +3,10 @@ package cmd
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/up9inc/mizu/cli/mizu"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"github.com/up9inc/mizu/cli/mizu"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
@@ -13,20 +15,23 @@ type MizuTapOptions struct {
|
||||
GuiPort uint16
|
||||
Namespace string
|
||||
AllNamespaces bool
|
||||
Analyze bool
|
||||
AnalyzeDestination string
|
||||
KubeConfigPath string
|
||||
MizuImage string
|
||||
MizuPodPort uint16
|
||||
PlainTextFilterRegexes []string
|
||||
TapOutgoing bool
|
||||
}
|
||||
|
||||
|
||||
var mizuTapOptions = &MizuTapOptions{}
|
||||
var direction string
|
||||
|
||||
var tapCmd = &cobra.Command{
|
||||
Use: "tap [POD REGEX]",
|
||||
Short: "Record ingoing traffic of a kubernetes pod",
|
||||
Long: `Record the ingoing traffic of a kubernetes pod.
|
||||
Supported protocols are HTTP and gRPC.`,
|
||||
Supported protocols are HTTP and gRPC.`,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
if len(args) == 0 {
|
||||
return errors.New("POD REGEX argument is required")
|
||||
@@ -39,6 +44,15 @@ var tapCmd = &cobra.Command{
|
||||
return errors.New(fmt.Sprintf("%s is not a valid regex %s", args[0], err))
|
||||
}
|
||||
|
||||
directionLowerCase := strings.ToLower(direction)
|
||||
if directionLowerCase == "any" {
|
||||
mizuTapOptions.TapOutgoing = true
|
||||
} else if directionLowerCase == "in" {
|
||||
mizuTapOptions.TapOutgoing = false
|
||||
} else {
|
||||
return errors.New(fmt.Sprintf("%s is not a valid value for flag --direction. Acceptable values are in/any.", direction))
|
||||
}
|
||||
|
||||
RunMizuTap(regex, mizuTapOptions)
|
||||
return nil
|
||||
},
|
||||
@@ -49,9 +63,12 @@ func init() {
|
||||
|
||||
tapCmd.Flags().Uint16VarP(&mizuTapOptions.GuiPort, "gui-port", "p", 8899, "Provide a custom port for the web interface webserver")
|
||||
tapCmd.Flags().StringVarP(&mizuTapOptions.Namespace, "namespace", "n", "", "Namespace selector")
|
||||
tapCmd.Flags().BoolVar(&mizuTapOptions.Analyze, "analyze", false, "Uploads traffic to UP9 cloud for further analysis (Beta)")
|
||||
tapCmd.Flags().StringVar(&mizuTapOptions.AnalyzeDestination, "dest", "up9.app", "Destination environment")
|
||||
tapCmd.Flags().BoolVarP(&mizuTapOptions.AllNamespaces, "all-namespaces", "A", false, "Tap all namespaces")
|
||||
tapCmd.Flags().StringVarP(&mizuTapOptions.KubeConfigPath, "kube-config", "k", "", "Path to kube-config file")
|
||||
tapCmd.Flags().StringVarP(&mizuTapOptions.MizuImage, "mizu-image", "", fmt.Sprintf("gcr.io/up9-docker-hub/mizu/%s:latest", mizu.Branch), "Custom image for mizu collector")
|
||||
tapCmd.Flags().Uint16VarP(&mizuTapOptions.MizuPodPort, "mizu-port", "", 8899, "Port which mizu cli will attempt to forward from the mizu collector pod")
|
||||
tapCmd.Flags().StringArrayVarP(&mizuTapOptions.PlainTextFilterRegexes, "regex-masking", "r", nil, "List of regex expressions that are used to filter matching values from text/plain http bodies")
|
||||
tapCmd.Flags().StringVarP(&direction, "direction", "", "in", "Record traffic that goes in this direction (relative to the tapped pod): in/any")
|
||||
}
|
||||
|
||||
@@ -3,13 +3,15 @@ package cmd
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/up9inc/mizu/shared"
|
||||
"net/http"
|
||||
"os"
|
||||
"os/signal"
|
||||
"regexp"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/up9inc/mizu/shared"
|
||||
|
||||
core "k8s.io/api/core/v1"
|
||||
|
||||
"github.com/up9inc/mizu/cli/debounce"
|
||||
@@ -38,12 +40,29 @@ func RunMizuTap(podRegexQuery *regexp.Regexp, tappingOptions *MizuTapOptions) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel() // cancel will be called when this function exits
|
||||
|
||||
if matchingPods, err := kubernetesProvider.GetAllPodsMatchingRegex(ctx, podRegexQuery); err != nil {
|
||||
targetNamespace := getNamespace(tappingOptions, kubernetesProvider)
|
||||
if matchingPods, err := kubernetesProvider.GetAllPodsMatchingRegex(ctx, podRegexQuery, targetNamespace); err != nil {
|
||||
return
|
||||
} else {
|
||||
currentlyTappedPods = matchingPods
|
||||
}
|
||||
|
||||
var namespacesStr string
|
||||
if targetNamespace != mizu.K8sAllNamespaces {
|
||||
namespacesStr = fmt.Sprintf("namespace \"%s\"", targetNamespace)
|
||||
} else {
|
||||
namespacesStr = "all namespaces"
|
||||
}
|
||||
fmt.Printf("Tapping pods in %s\n", namespacesStr)
|
||||
|
||||
if len(currentlyTappedPods) == 0 {
|
||||
var suggestionStr string
|
||||
if targetNamespace != mizu.K8sAllNamespaces {
|
||||
suggestionStr = "\nSelect a different namespace with -n or tap all namespaces with -A"
|
||||
}
|
||||
fmt.Printf("Did not find any pods matching the regex argument%s\n", suggestionStr)
|
||||
}
|
||||
|
||||
nodeToTappedPodIPMap, err := getNodeHostToTappedPodIpsMap(currentlyTappedPods)
|
||||
if err != nil {
|
||||
return
|
||||
@@ -59,16 +78,15 @@ func RunMizuTap(podRegexQuery *regexp.Regexp, tappingOptions *MizuTapOptions) {
|
||||
|
||||
//block until exit signal or error
|
||||
waitForFinish(ctx, cancel)
|
||||
|
||||
// TODO handle incoming traffic from tapper using a channel
|
||||
}
|
||||
|
||||
|
||||
func createMizuResources(ctx context.Context, kubernetesProvider *kubernetes.Provider, nodeToTappedPodIPMap map[string][]string, tappingOptions *MizuTapOptions, mizuApiFilteringOptions *shared.TrafficFilteringOptions) error {
|
||||
if err := createMizuAggregator(ctx, kubernetesProvider, tappingOptions, mizuApiFilteringOptions); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := createMizuTappers(ctx, kubernetesProvider, nodeToTappedPodIPMap, tappingOptions); err != nil {
|
||||
if err := updateMizuTappers(ctx, kubernetesProvider, nodeToTappedPodIPMap, tappingOptions); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -112,19 +130,27 @@ func getMizuApiFilteringOptions(tappingOptions *MizuTapOptions) (*shared.Traffic
|
||||
return &shared.TrafficFilteringOptions{PlainTextMaskingRegexes: compiledRegexSlice}, nil
|
||||
}
|
||||
|
||||
func createMizuTappers(ctx context.Context, kubernetesProvider *kubernetes.Provider, nodeToTappedPodIPMap map[string][]string, tappingOptions *MizuTapOptions) error {
|
||||
if err := kubernetesProvider.ApplyMizuTapperDaemonSet(
|
||||
ctx,
|
||||
mizu.ResourcesNamespace,
|
||||
mizu.TapperDaemonSetName,
|
||||
tappingOptions.MizuImage,
|
||||
mizu.TapperPodName,
|
||||
fmt.Sprintf("%s.%s.svc.cluster.local", aggregatorService.Name, aggregatorService.Namespace),
|
||||
nodeToTappedPodIPMap,
|
||||
mizuServiceAccountExists,
|
||||
); err != nil {
|
||||
fmt.Printf("Error creating mizu tapper daemonset: %v\n", err)
|
||||
return err
|
||||
func updateMizuTappers(ctx context.Context, kubernetesProvider *kubernetes.Provider, nodeToTappedPodIPMap map[string][]string, tappingOptions *MizuTapOptions) error {
|
||||
if len(nodeToTappedPodIPMap) > 0 {
|
||||
if err := kubernetesProvider.ApplyMizuTapperDaemonSet(
|
||||
ctx,
|
||||
mizu.ResourcesNamespace,
|
||||
mizu.TapperDaemonSetName,
|
||||
tappingOptions.MizuImage,
|
||||
mizu.TapperPodName,
|
||||
fmt.Sprintf("%s.%s.svc.cluster.local", aggregatorService.Name, aggregatorService.Namespace),
|
||||
nodeToTappedPodIPMap,
|
||||
mizuServiceAccountExists,
|
||||
tappingOptions.TapOutgoing,
|
||||
); err != nil {
|
||||
fmt.Printf("Error creating mizu tapper daemonset: %v\n", err)
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if err := kubernetesProvider.RemoveDaemonSet(ctx, mizu.ResourcesNamespace, mizu.TapperDaemonSetName); err != nil {
|
||||
fmt.Printf("Error deleting mizu tapper daemonset: %v\n", err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -146,10 +172,12 @@ func cleanUpMizuResources(kubernetesProvider *kubernetes.Provider) {
|
||||
}
|
||||
|
||||
func watchPodsForTapping(ctx context.Context, kubernetesProvider *kubernetes.Provider, cancel context.CancelFunc, podRegex *regexp.Regexp, tappingOptions *MizuTapOptions) {
|
||||
added, modified, removed, errorChan := kubernetes.FilteredWatch(ctx, kubernetesProvider.GetPodWatcher(ctx, getNamespace(tappingOptions, kubernetesProvider)), podRegex)
|
||||
targetNamespace := getNamespace(tappingOptions, kubernetesProvider)
|
||||
|
||||
added, modified, removed, errorChan := kubernetes.FilteredWatch(ctx, kubernetesProvider.GetPodWatcher(ctx, targetNamespace), podRegex)
|
||||
|
||||
restartTappers := func() {
|
||||
if matchingPods, err := kubernetesProvider.GetAllPodsMatchingRegex(ctx, podRegex); err != nil {
|
||||
if matchingPods, err := kubernetesProvider.GetAllPodsMatchingRegex(ctx, podRegex, targetNamespace); err != nil {
|
||||
fmt.Printf("Error getting pods by regex: %s (%v,%+v)\n", err, err, err)
|
||||
cancel()
|
||||
} else {
|
||||
@@ -162,7 +190,7 @@ func watchPodsForTapping(ctx context.Context, kubernetesProvider *kubernetes.Pro
|
||||
cancel()
|
||||
}
|
||||
|
||||
if err := createMizuTappers(ctx, kubernetesProvider, nodeToTappedPodIPMap, tappingOptions); err != nil {
|
||||
if err := updateMizuTappers(ctx, kubernetesProvider, nodeToTappedPodIPMap, tappingOptions); err != nil {
|
||||
fmt.Printf("Error updating daemonset: %s (%v,%+v)\n", err, err, err)
|
||||
cancel()
|
||||
}
|
||||
@@ -172,10 +200,10 @@ func watchPodsForTapping(ctx context.Context, kubernetesProvider *kubernetes.Pro
|
||||
for {
|
||||
select {
|
||||
case newTarget := <-added:
|
||||
fmt.Printf("+%s\n", newTarget.Name)
|
||||
fmt.Printf(mizu.Green, fmt.Sprintf("+%s\n", newTarget.Name))
|
||||
|
||||
case removedTarget := <-removed:
|
||||
fmt.Printf("-%s\n", removedTarget.Name)
|
||||
fmt.Printf(mizu.Red, fmt.Sprintf("-%s\n", removedTarget.Name))
|
||||
restartTappersDebouncer.SetOn()
|
||||
|
||||
case modifiedTarget := <-modified:
|
||||
@@ -215,12 +243,21 @@ func portForwardApiPod(ctx context.Context, kubernetesProvider *kubernetes.Provi
|
||||
case modifiedPod := <-modified:
|
||||
if modifiedPod.Status.Phase == "Running" && !isPodReady {
|
||||
isPodReady = true
|
||||
var err error
|
||||
portForward, err = kubernetes.NewPortForward(kubernetesProvider, mizu.ResourcesNamespace, mizu.AggregatorPodName, tappingOptions.GuiPort, tappingOptions.MizuPodPort, cancel)
|
||||
fmt.Printf("Web interface is now available at http://localhost:%d\n", tappingOptions.GuiPort)
|
||||
if err != nil {
|
||||
fmt.Printf("error forwarding port to pod %s\n", err)
|
||||
var portForwardCreateError error
|
||||
if portForward, portForwardCreateError = kubernetes.NewPortForward(kubernetesProvider, mizu.ResourcesNamespace, mizu.AggregatorPodName, tappingOptions.GuiPort, tappingOptions.MizuPodPort, cancel); portForwardCreateError != nil {
|
||||
fmt.Printf("error forwarding port to pod %s\n", portForwardCreateError)
|
||||
cancel()
|
||||
} else {
|
||||
fmt.Printf("Web interface is now available at http://localhost:%d\n", tappingOptions.GuiPort)
|
||||
time.Sleep(time.Second * 5) // Waiting to be sure port forwarding finished
|
||||
if tappingOptions.Analyze {
|
||||
if _, err := http.Get(fmt.Sprintf("http://localhost:%d/api/uploadEntries?dest=%s", tappingOptions.GuiPort, tappingOptions.AnalyzeDestination)); err != nil {
|
||||
fmt.Println(err)
|
||||
} else {
|
||||
fmt.Printf(mizu.Purple, "Traffic is uploading to UP9 cloud for further analsys")
|
||||
fmt.Println()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -249,11 +286,7 @@ func createRBACIfNecessary(ctx context.Context, kubernetesProvider *kubernetes.P
|
||||
return false
|
||||
}
|
||||
if !mizuRBACExists {
|
||||
var versionString = mizu.Version
|
||||
if mizu.GitCommitHash != "" {
|
||||
versionString += "-" + mizu.GitCommitHash
|
||||
}
|
||||
err := kubernetesProvider.CreateMizuRBAC(ctx, mizu.ResourcesNamespace, versionString)
|
||||
err := kubernetesProvider.CreateMizuRBAC(ctx, mizu.ResourcesNamespace, mizu.RBACVersion)
|
||||
if err != nil {
|
||||
fmt.Printf("warning: could not create mizu rbac resources %v\n", err)
|
||||
return false
|
||||
|
||||
@@ -3,19 +3,38 @@ package cmd
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/up9inc/mizu/cli/mizu"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
type MizuVersionOptions struct {
|
||||
DebugInfo bool
|
||||
}
|
||||
|
||||
|
||||
var mizuVersionOptions = &MizuVersionOptions{}
|
||||
|
||||
var versionCmd = &cobra.Command{
|
||||
Use: "version",
|
||||
Short: "Print version info",
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
fmt.Printf("%s (%s) %s\n", mizu.Version, mizu.Branch, mizu.GitCommitHash)
|
||||
if mizuVersionOptions.DebugInfo {
|
||||
timeStampInt, _ := strconv.ParseInt(mizu.BuildTimestamp, 10, 0)
|
||||
fmt.Printf("Version: %s \nBranch: %s (%s) \n", mizu.SemVer, mizu.Branch, mizu.GitCommitHash)
|
||||
fmt.Printf("Build Time: %s (%s)\n", mizu.BuildTimestamp, time.Unix(timeStampInt, 0))
|
||||
|
||||
} else {
|
||||
fmt.Printf("Version: %s (%s)\n", mizu.SemVer, mizu.Branch)
|
||||
}
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
rootCmd.AddCommand(versionCmd)
|
||||
|
||||
versionCmd.Flags().BoolVarP(&mizuVersionOptions.DebugInfo, "debug", "d", false, "Provide all information about version")
|
||||
|
||||
}
|
||||
|
||||
@@ -6,7 +6,6 @@ import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/up9inc/mizu/cli/mizu"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
|
||||
@@ -103,7 +102,6 @@ func (provider *Provider) CreateMizuAggregatorPod(ctx context.Context, namespace
|
||||
},
|
||||
DNSPolicy: core.DNSClusterFirstWithHostNet,
|
||||
TerminationGracePeriodSeconds: new(int64),
|
||||
// Affinity: TODO: define node selector for all relevant nodes for this mizu instance
|
||||
},
|
||||
}
|
||||
//define the service account only when it exists to prevent pod crash
|
||||
@@ -216,30 +214,117 @@ func (provider *Provider) CreateMizuRBAC(ctx context.Context, namespace string,
|
||||
}
|
||||
|
||||
func (provider *Provider) RemovePod(ctx context.Context, namespace string, podName string) error {
|
||||
if isFound, err := provider.CheckPodExists(ctx, namespace, podName);
|
||||
err != nil {
|
||||
return err
|
||||
} else if !isFound {
|
||||
return nil
|
||||
}
|
||||
|
||||
return provider.clientSet.CoreV1().Pods(namespace).Delete(ctx, podName, metav1.DeleteOptions{})
|
||||
}
|
||||
|
||||
func (provider *Provider) RemoveService(ctx context.Context, namespace string, serviceName string) error {
|
||||
if isFound, err := provider.CheckServiceExists(ctx, namespace, serviceName);
|
||||
err != nil {
|
||||
return err
|
||||
} else if !isFound {
|
||||
return nil
|
||||
}
|
||||
|
||||
return provider.clientSet.CoreV1().Services(namespace).Delete(ctx, serviceName, metav1.DeleteOptions{})
|
||||
}
|
||||
|
||||
func (provider *Provider) RemoveDaemonSet(ctx context.Context, namespace string, daemonSetName string) error {
|
||||
if isFound, err := provider.CheckDaemonSetExists(ctx, namespace, daemonSetName);
|
||||
err != nil {
|
||||
return err
|
||||
} else if !isFound {
|
||||
return nil
|
||||
}
|
||||
|
||||
return provider.clientSet.AppsV1().DaemonSets(namespace).Delete(ctx, daemonSetName, metav1.DeleteOptions{})
|
||||
}
|
||||
|
||||
func (provider *Provider) ApplyMizuTapperDaemonSet(ctx context.Context, namespace string, daemonSetName string, podImage string, tapperPodName string, aggregatorPodIp string, nodeToTappedPodIPMap map[string][]string, linkServiceAccount bool) error {
|
||||
func (provider *Provider) CheckPodExists(ctx context.Context, namespace string, name string) (bool, error) {
|
||||
listOptions := metav1.ListOptions{
|
||||
FieldSelector: fmt.Sprintf("metadata.name=%s", name),
|
||||
Limit: 1,
|
||||
}
|
||||
resourceList, err := provider.clientSet.CoreV1().Pods(namespace).List(ctx, listOptions)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if len(resourceList.Items) > 0 {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func (provider *Provider) CheckServiceExists(ctx context.Context, namespace string, name string) (bool, error) {
|
||||
listOptions := metav1.ListOptions{
|
||||
FieldSelector: fmt.Sprintf("metadata.name=%s", name),
|
||||
Limit: 1,
|
||||
}
|
||||
resourceList, err := provider.clientSet.CoreV1().Services(namespace).List(ctx, listOptions)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if len(resourceList.Items) > 0 {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func (provider *Provider) CheckDaemonSetExists(ctx context.Context, namespace string, name string) (bool, error) {
|
||||
listOptions := metav1.ListOptions{
|
||||
FieldSelector: fmt.Sprintf("metadata.name=%s", name),
|
||||
Limit: 1,
|
||||
}
|
||||
resourceList, err := provider.clientSet.AppsV1().DaemonSets(namespace).List(ctx, listOptions)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if len(resourceList.Items) > 0 {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func (provider *Provider) ApplyMizuTapperDaemonSet(ctx context.Context, namespace string, daemonSetName string, podImage string, tapperPodName string, aggregatorPodIp string, nodeToTappedPodIPMap map[string][]string, linkServiceAccount bool, tapOutgoing bool) error {
|
||||
if len(nodeToTappedPodIPMap) == 0 {
|
||||
return fmt.Errorf("Daemon set %s must tap at least 1 pod", daemonSetName)
|
||||
}
|
||||
|
||||
nodeToTappedPodIPMapJsonStr, err := json.Marshal(nodeToTappedPodIPMap)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
mizuCmd := []string{
|
||||
"./mizuagent",
|
||||
"-i", "any",
|
||||
"--tap",
|
||||
"--hardump",
|
||||
"--aggregator-address", fmt.Sprintf("ws://%s/wsTapper", aggregatorPodIp),
|
||||
}
|
||||
if tapOutgoing {
|
||||
mizuCmd = append(mizuCmd, "--anydirection")
|
||||
}
|
||||
|
||||
privileged := true
|
||||
agentContainer := applyconfcore.Container()
|
||||
agentContainer.WithName(tapperPodName)
|
||||
agentContainer.WithImage(podImage)
|
||||
agentContainer.WithImagePullPolicy(core.PullAlways)
|
||||
agentContainer.WithSecurityContext(applyconfcore.SecurityContext().WithPrivileged(privileged))
|
||||
agentContainer.WithCommand("./mizuagent", "-i", "any", "--tap", "--hardump", "--aggregator-address", fmt.Sprintf("ws://%s/wsTapper", aggregatorPodIp))
|
||||
agentContainer.WithCommand(mizuCmd...)
|
||||
agentContainer.WithEnv(
|
||||
applyconfcore.EnvVar().WithName(shared.HostModeEnvVar).WithValue("1"),
|
||||
applyconfcore.EnvVar().WithName(shared.TappedAddressesPerNodeDictEnvVar).WithValue(string(nodeToTappedPodIPMapJsonStr)),
|
||||
@@ -301,8 +386,8 @@ func (provider *Provider) ApplyMizuTapperDaemonSet(ctx context.Context, namespac
|
||||
return err
|
||||
}
|
||||
|
||||
func (provider *Provider) GetAllPodsMatchingRegex(ctx context.Context, regex *regexp.Regexp) ([]core.Pod, error) {
|
||||
pods, err := provider.clientSet.CoreV1().Pods(mizu.K8sAllNamespaces).List(ctx, metav1.ListOptions{})
|
||||
func (provider *Provider) GetAllPodsMatchingRegex(ctx context.Context, regex *regexp.Regexp, namespace string) ([]core.Pod, error) {
|
||||
pods, err := provider.clientSet.CoreV1().Pods(namespace).List(ctx, metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -1,9 +1,11 @@
|
||||
package mizu
|
||||
|
||||
var (
|
||||
Version = "v0.0.1"
|
||||
Branch = "develop"
|
||||
GitCommitHash = "" // this var is overridden using ldflags in makefile when building
|
||||
SemVer = "0.0.1"
|
||||
Branch = "develop"
|
||||
GitCommitHash = "" // this var is overridden using ldflags in makefile when building
|
||||
BuildTimestamp = "" // this var is overridden using ldflags in makefile when building
|
||||
RBACVersion = "v1"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -13,3 +15,14 @@ const (
|
||||
TapperPodName = "mizu-tapper"
|
||||
K8sAllNamespaces = ""
|
||||
)
|
||||
|
||||
const (
|
||||
Black = "\033[1;30m%s\033[0m"
|
||||
Red = "\033[1;31m%s\033[0m"
|
||||
Green = "\033[1;32m%s\033[0m"
|
||||
Yellow = "\033[1;33m%s\033[0m"
|
||||
Purple = "\033[1;34m%s\033[0m"
|
||||
Magenta = "\033[1;35m%s\033[0m"
|
||||
Teal = "\033[1;36m%s\033[0m"
|
||||
White = "\033[1;37m%s\033[0m"
|
||||
)
|
||||
|
||||
@@ -1,28 +1,41 @@
|
||||
package shared
|
||||
|
||||
type WebSocketMessageType string
|
||||
|
||||
const (
|
||||
WebSocketMessageTypeEntry WebSocketMessageType = "entry"
|
||||
WebSocketMessageTypeTappedEntry WebSocketMessageType = "tappedEntry"
|
||||
WebSocketMessageTypeUpdateStatus WebSocketMessageType = "status"
|
||||
WebSocketMessageTypeEntry WebSocketMessageType = "entry"
|
||||
WebSocketMessageTypeTappedEntry WebSocketMessageType = "tappedEntry"
|
||||
WebSocketMessageTypeUpdateStatus WebSocketMessageType = "status"
|
||||
WebSocketMessageTypeAnalyzeStatus WebSocketMessageType = "analyzeStatus"
|
||||
)
|
||||
|
||||
type WebSocketMessageMetadata struct {
|
||||
MessageType WebSocketMessageType `json:"messageType,omitempty"`
|
||||
}
|
||||
|
||||
type WebSocketAnalyzeStatusMessage struct {
|
||||
*WebSocketMessageMetadata
|
||||
AnalyzeStatus AnalyzeStatus `json:"analyzeStatus"`
|
||||
}
|
||||
|
||||
type AnalyzeStatus struct {
|
||||
IsAnalyzing bool `json:"isAnalyzing"`
|
||||
RemoteUrl string `json:"remoteUrl"`
|
||||
IsRemoteReady bool `json:"isRemoteReady"`
|
||||
}
|
||||
|
||||
type WebSocketStatusMessage struct {
|
||||
*WebSocketMessageMetadata
|
||||
TappingStatus TapStatus `json:"tappingStatus"`
|
||||
}
|
||||
|
||||
type TapStatus struct {
|
||||
Pods []PodInfo `json:"pods"`
|
||||
Pods []PodInfo `json:"pods"`
|
||||
}
|
||||
|
||||
type PodInfo struct {
|
||||
Namespace string `json:"namespace"`
|
||||
Name string `json:"name"`
|
||||
Name string `json:"name"`
|
||||
}
|
||||
|
||||
func CreateWebSocketStatusMessage(tappingStatus TapStatus) WebSocketStatusMessage {
|
||||
@@ -34,6 +47,15 @@ func CreateWebSocketStatusMessage(tappingStatus TapStatus) WebSocketStatusMessag
|
||||
}
|
||||
}
|
||||
|
||||
func CreateWebSocketMessageTypeAnalyzeStatus(analyzeStatus AnalyzeStatus) WebSocketAnalyzeStatusMessage {
|
||||
return WebSocketAnalyzeStatusMessage{
|
||||
WebSocketMessageMetadata: &WebSocketMessageMetadata{
|
||||
MessageType: WebSocketMessageTypeAnalyzeStatus,
|
||||
},
|
||||
AnalyzeStatus: analyzeStatus,
|
||||
}
|
||||
}
|
||||
|
||||
type TrafficFilteringOptions struct {
|
||||
PlainTextMaskingRegexes []*SerializableRegexp
|
||||
}
|
||||
|
||||
12
tap/go.mod
Normal file
12
tap/go.mod
Normal file
@@ -0,0 +1,12 @@
|
||||
module github.com/up9inc/mizu/tap
|
||||
|
||||
go 1.16
|
||||
|
||||
require (
|
||||
github.com/google/gopacket v1.1.19
|
||||
github.com/google/martian v2.1.0+incompatible
|
||||
github.com/gorilla/websocket v1.4.2
|
||||
github.com/orcaman/concurrent-map v0.0.0-20210106121528-16402b402231
|
||||
github.com/patrickmn/go-cache v2.1.0+incompatible
|
||||
golang.org/x/net v0.0.0-20210421230115-4e50805a0758
|
||||
)
|
||||
31
tap/go.sum
Normal file
31
tap/go.sum
Normal file
@@ -0,0 +1,31 @@
|
||||
github.com/google/gopacket v1.1.19 h1:ves8RnFZPGiFnTS0uPQStjwru6uO6h+nlr9j6fL7kF8=
|
||||
github.com/google/gopacket v1.1.19/go.mod h1:iJ8V8n6KS+z2U1A8pUwu8bW5SyEMkXJB8Yo/Vo+TKTo=
|
||||
github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no=
|
||||
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
|
||||
github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc=
|
||||
github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
github.com/orcaman/concurrent-map v0.0.0-20210501183033-44dafcb38ecc h1:Ak86L+yDSOzKFa7WM5bf5itSOo1e3Xh8bm5YCMUXIjQ=
|
||||
github.com/orcaman/concurrent-map v0.0.0-20210501183033-44dafcb38ecc/go.mod h1:Lu3tH6HLW3feq74c2GC+jIMS/K2CFcDWnWD9XkenwhI=
|
||||
github.com/patrickmn/go-cache v2.1.0+incompatible h1:HRMgzkcYKYpi3C8ajMPV8OFXaaRUnok+kx1WdO15EQc=
|
||||
github.com/patrickmn/go-cache v2.1.0+incompatible/go.mod h1:3Qf8kWWT7OJRJbdiICTKqZju1ZixQ/KpMGzzAfe6+WQ=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
|
||||
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20210525063256-abc453219eb5 h1:wjuX4b5yYQnEQHzd+CBcrcC6OVR2J1CN6mUy0oSxIPo=
|
||||
golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210423082822-04245dca01da h1:b3NXsE2LusjYGGjL5bxEVZZORm/YEFFrWFjR8eFrw/c=
|
||||
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.6 h1:aRYxNxv6iGQlyVaZmk6ZgYEDa+Jg18DxebPSrd6bg1M=
|
||||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
@@ -84,14 +84,14 @@ type GrpcAssembler struct {
|
||||
framer *http2.Framer
|
||||
}
|
||||
|
||||
func (ga *GrpcAssembler) readMessage() (uint32, interface{}, string, error) {
|
||||
func (ga *GrpcAssembler) readMessage() (uint32, interface{}, error) {
|
||||
// Exactly one Framer is used for each half connection.
|
||||
// (Instead of creating a new Framer for each ReadFrame operation)
|
||||
// This is needed in order to decompress the headers,
|
||||
// because the compression context is updated with each requests/response.
|
||||
frame, err := ga.framer.ReadFrame()
|
||||
if err != nil {
|
||||
return 0, nil, "", err
|
||||
return 0, nil, err
|
||||
}
|
||||
|
||||
streamID := frame.Header().StreamID
|
||||
@@ -99,7 +99,7 @@ func (ga *GrpcAssembler) readMessage() (uint32, interface{}, string, error) {
|
||||
ga.fragmentsByStream.appendFrame(streamID, frame)
|
||||
|
||||
if !(ga.isStreamEnd(frame)) {
|
||||
return 0, nil, "", nil
|
||||
return 0, nil, nil
|
||||
}
|
||||
|
||||
headers, data := ga.fragmentsByStream.pop(streamID)
|
||||
@@ -137,10 +137,10 @@ func (ga *GrpcAssembler) readMessage() (uint32, interface{}, string, error) {
|
||||
ContentLength: int64(len(dataString)),
|
||||
}
|
||||
} else {
|
||||
return 0, nil, "", errors.New("Failed to assemble stream: neither a request nor a message")
|
||||
return 0, nil, errors.New("Failed to assemble stream: neither a request nor a message")
|
||||
}
|
||||
|
||||
return streamID, messageHTTP1, dataString, nil
|
||||
return streamID, messageHTTP1, nil
|
||||
}
|
||||
|
||||
func (ga *GrpcAssembler) isStreamEnd(frame http2.Frame) bool {
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
@@ -15,7 +16,8 @@ import (
|
||||
)
|
||||
|
||||
const readPermission = 0644
|
||||
const tempFilenamePrefix = "har_writer"
|
||||
const harFilenameSuffix = ".har"
|
||||
const tempFilenameSuffix = ".har.tmp"
|
||||
|
||||
type PairChanItem struct {
|
||||
Request *http.Request
|
||||
@@ -23,12 +25,13 @@ type PairChanItem struct {
|
||||
Response *http.Response
|
||||
ResponseTime time.Time
|
||||
RequestSenderIp string
|
||||
ConnectionInfo *ConnectionInfo
|
||||
}
|
||||
|
||||
func openNewHarFile(filename string) *HarFile {
|
||||
file, err := os.OpenFile(filename, os.O_APPEND|os.O_CREATE|os.O_WRONLY, readPermission)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("Failed to open output file: %s (%v,%+v)", err, err, err))
|
||||
log.Panicf("Failed to open output file: %s (%v,%+v)", err, err, err)
|
||||
}
|
||||
|
||||
harFile := HarFile{file: file, entryCount: 0}
|
||||
@@ -45,13 +48,13 @@ type HarFile struct {
|
||||
func NewEntry(request *http.Request, requestTime time.Time, response *http.Response, responseTime time.Time) (*har.Entry, error) {
|
||||
harRequest, err := har.NewRequest(request, true)
|
||||
if err != nil {
|
||||
SilentError("convert-request-to-har", "Failed converting request to HAR %s (%v,%+v)\n", err, err, err)
|
||||
SilentError("convert-request-to-har", "Failed converting request to HAR %s (%v,%+v)", err, err, err)
|
||||
return nil, errors.New("Failed converting request to HAR")
|
||||
}
|
||||
|
||||
harResponse, err := har.NewResponse(response, true)
|
||||
if err != nil {
|
||||
SilentError("convert-response-to-har", "Failed converting response to HAR %s (%v,%+v)\n", err, err, err)
|
||||
SilentError("convert-response-to-har", "Failed converting response to HAR %s (%v,%+v)", err, err, err)
|
||||
return nil, errors.New("Failed converting response to HAR")
|
||||
}
|
||||
|
||||
@@ -62,7 +65,7 @@ func NewEntry(request *http.Request, requestTime time.Time, response *http.Respo
|
||||
|
||||
status, err := strconv.Atoi(response.Header.Get(":status"))
|
||||
if err != nil {
|
||||
SilentError("convert-response-status-for-har", "Failed converting status to int %s (%v,%+v)\n", err, err, err)
|
||||
SilentError("convert-response-status-for-har", "Failed converting status to int %s (%v,%+v)", err, err, err)
|
||||
return nil, errors.New("Failed converting response status to int for HAR")
|
||||
}
|
||||
harResponse.Status = status
|
||||
@@ -102,7 +105,7 @@ func NewEntry(request *http.Request, requestTime time.Time, response *http.Respo
|
||||
func (f *HarFile) WriteEntry(harEntry *har.Entry) {
|
||||
harEntryJson, err := json.Marshal(harEntry)
|
||||
if err != nil {
|
||||
SilentError("har-entry-marshal", "Failed converting har entry object to JSON%s (%v,%+v)\n", err, err, err)
|
||||
SilentError("har-entry-marshal", "Failed converting har entry object to JSON%s (%v,%+v)", err, err, err)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -116,7 +119,7 @@ func (f *HarFile) WriteEntry(harEntry *har.Entry) {
|
||||
harEntryString := append([]byte(separator), harEntryJson...)
|
||||
|
||||
if _, err := f.file.Write(harEntryString); err != nil {
|
||||
panic(fmt.Sprintf("Failed to write to output file: %s (%v,%+v)", err, err, err))
|
||||
log.Panicf("Failed to write to output file: %s (%v,%+v)", err, err, err)
|
||||
}
|
||||
|
||||
f.entryCount++
|
||||
@@ -131,21 +134,21 @@ func (f *HarFile) Close() {
|
||||
|
||||
err := f.file.Close()
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("Failed to close output file: %s (%v,%+v)", err, err, err))
|
||||
log.Panicf("Failed to close output file: %s (%v,%+v)", err, err, err)
|
||||
}
|
||||
}
|
||||
|
||||
func (f*HarFile) writeHeader() {
|
||||
header := []byte(`{"log": {"version": "1.2", "creator": {"name": "Mizu", "version": "0.0.1"}, "entries": [`)
|
||||
if _, err := f.file.Write(header); err != nil {
|
||||
panic(fmt.Sprintf("Failed to write header to output file: %s (%v,%+v)", err, err, err))
|
||||
log.Panicf("Failed to write header to output file: %s (%v,%+v)", err, err, err)
|
||||
}
|
||||
}
|
||||
|
||||
func (f*HarFile) writeTrailer() {
|
||||
trailer := []byte("]}}")
|
||||
if _, err := f.file.Write(trailer); err != nil {
|
||||
panic(fmt.Sprintf("Failed to write trailer to output file: %s (%v,%+v)", err, err, err))
|
||||
log.Panicf("Failed to write trailer to output file: %s (%v,%+v)", err, err, err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -161,8 +164,8 @@ func NewHarWriter(outputDir string, maxEntries int) *HarWriter {
|
||||
}
|
||||
|
||||
type OutputChannelItem struct {
|
||||
HarEntry *har.Entry
|
||||
RequestSenderIp string
|
||||
HarEntry *har.Entry
|
||||
ConnectionInfo *ConnectionInfo
|
||||
}
|
||||
|
||||
type HarWriter struct {
|
||||
@@ -174,20 +177,20 @@ type HarWriter struct {
|
||||
done chan bool
|
||||
}
|
||||
|
||||
func (hw *HarWriter) WritePair(request *http.Request, requestTime time.Time, response *http.Response, responseTime time.Time, requestSenderIp string) {
|
||||
func (hw *HarWriter) WritePair(request *http.Request, requestTime time.Time, response *http.Response, responseTime time.Time, connectionInfo *ConnectionInfo) {
|
||||
hw.PairChan <- &PairChanItem{
|
||||
Request: request,
|
||||
RequestTime: requestTime,
|
||||
Response: response,
|
||||
ResponseTime: responseTime,
|
||||
RequestSenderIp: requestSenderIp,
|
||||
Request: request,
|
||||
RequestTime: requestTime,
|
||||
Response: response,
|
||||
ResponseTime: responseTime,
|
||||
ConnectionInfo: connectionInfo,
|
||||
}
|
||||
}
|
||||
|
||||
func (hw *HarWriter) Start() {
|
||||
if hw.OutputDirPath != "" {
|
||||
if err := os.MkdirAll(hw.OutputDirPath, os.ModePerm); err != nil {
|
||||
panic(fmt.Sprintf("Failed to create output directory: %s (%v,%+v)", err, err, err))
|
||||
log.Panicf("Failed to create output directory: %s (%v,%+v)", err, err, err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -210,8 +213,8 @@ func (hw *HarWriter) Start() {
|
||||
}
|
||||
} else {
|
||||
hw.OutChan <- &OutputChannelItem{
|
||||
HarEntry: harEntry,
|
||||
RequestSenderIp: pair.RequestSenderIp,
|
||||
HarEntry: harEntry,
|
||||
ConnectionInfo: pair.ConnectionInfo,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -226,10 +229,11 @@ func (hw *HarWriter) Start() {
|
||||
func (hw *HarWriter) Stop() {
|
||||
close(hw.PairChan)
|
||||
<-hw.done
|
||||
close(hw.OutChan)
|
||||
}
|
||||
|
||||
func (hw *HarWriter) openNewFile() {
|
||||
filename := filepath.Join(os.TempDir(), fmt.Sprintf("%s_%d", tempFilenamePrefix, time.Now().UnixNano()))
|
||||
filename := buildFilename(hw.OutputDirPath, time.Now(), tempFilenameSuffix)
|
||||
hw.currentFile = openNewHarFile(filename)
|
||||
}
|
||||
|
||||
@@ -238,15 +242,15 @@ func (hw *HarWriter) closeFile() {
|
||||
tmpFilename := hw.currentFile.file.Name()
|
||||
hw.currentFile = nil
|
||||
|
||||
filename := buildFilename(hw.OutputDirPath, time.Now())
|
||||
filename := buildFilename(hw.OutputDirPath, time.Now(), harFilenameSuffix)
|
||||
err := os.Rename(tmpFilename, filename)
|
||||
if err != nil {
|
||||
SilentError("Rename-file", "cannot rename file: %s (%v,%+v)\n", err, err, err)
|
||||
SilentError("Rename-file", "cannot rename file: %s (%v,%+v)", err, err, err)
|
||||
}
|
||||
}
|
||||
|
||||
func buildFilename(dir string, t time.Time) string {
|
||||
func buildFilename(dir string, t time.Time, suffix string) string {
|
||||
// (epoch time in nanoseconds)__(YYYY_Month_DD__hh-mm-ss).har
|
||||
filename := fmt.Sprintf("%d__%s.har", t.UnixNano(), t.Format("2006_Jan_02__15-04-05"))
|
||||
filename := fmt.Sprintf("%d__%s%s", t.UnixNano(), t.Format("2006_Jan_02__15-04-05"), suffix)
|
||||
return filepath.Join(dir, filename)
|
||||
}
|
||||
122
tap/http_matcher.go
Normal file
122
tap/http_matcher.go
Normal file
@@ -0,0 +1,122 @@
|
||||
package tap
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/orcaman/concurrent-map"
|
||||
)
|
||||
|
||||
type requestResponsePair struct {
|
||||
Request httpMessage `json:"request"`
|
||||
Response httpMessage `json:"response"`
|
||||
}
|
||||
|
||||
type httpMessage struct {
|
||||
isRequest bool
|
||||
captureTime time.Time
|
||||
orig interface{}
|
||||
}
|
||||
|
||||
|
||||
// Key is {client_addr}:{client_port}->{dest_addr}:{dest_port}
|
||||
type requestResponseMatcher struct {
|
||||
openMessagesMap cmap.ConcurrentMap
|
||||
|
||||
}
|
||||
|
||||
func createResponseRequestMatcher() requestResponseMatcher {
|
||||
newMatcher := &requestResponseMatcher{openMessagesMap: cmap.New()}
|
||||
return *newMatcher
|
||||
}
|
||||
|
||||
func (matcher *requestResponseMatcher) registerRequest(ident string, request *http.Request, captureTime time.Time) *requestResponsePair {
|
||||
split := splitIdent(ident)
|
||||
key := genKey(split)
|
||||
|
||||
requestHTTPMessage := httpMessage{
|
||||
isRequest: true,
|
||||
captureTime: captureTime,
|
||||
orig: request,
|
||||
}
|
||||
|
||||
if response, found := matcher.openMessagesMap.Pop(key); found {
|
||||
// Type assertion always succeeds because all of the map's values are of httpMessage type
|
||||
responseHTTPMessage := response.(*httpMessage)
|
||||
if responseHTTPMessage.isRequest {
|
||||
SilentError("Request-Duplicate", "Got duplicate request with same identifier")
|
||||
return nil
|
||||
}
|
||||
Debug("Matched open Response for %s", key)
|
||||
return matcher.preparePair(&requestHTTPMessage, responseHTTPMessage)
|
||||
}
|
||||
|
||||
matcher.openMessagesMap.Set(key, &requestHTTPMessage)
|
||||
Debug("Registered open Request for %s", key)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (matcher *requestResponseMatcher) registerResponse(ident string, response *http.Response, captureTime time.Time) *requestResponsePair {
|
||||
split := splitIdent(ident)
|
||||
key := genKey(split)
|
||||
|
||||
responseHTTPMessage := httpMessage{
|
||||
isRequest: false,
|
||||
captureTime: captureTime,
|
||||
orig: response,
|
||||
}
|
||||
|
||||
if request, found := matcher.openMessagesMap.Pop(key); found {
|
||||
// Type assertion always succeeds because all of the map's values are of httpMessage type
|
||||
requestHTTPMessage := request.(*httpMessage)
|
||||
if !requestHTTPMessage.isRequest {
|
||||
SilentError("Response-Duplicate", "Got duplicate response with same identifier")
|
||||
return nil
|
||||
}
|
||||
Debug("Matched open Request for %s", key)
|
||||
return matcher.preparePair(requestHTTPMessage, &responseHTTPMessage)
|
||||
}
|
||||
|
||||
matcher.openMessagesMap.Set(key, &responseHTTPMessage)
|
||||
Debug("Registered open Response for %s", key)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (matcher *requestResponseMatcher) preparePair(requestHTTPMessage *httpMessage, responseHTTPMessage *httpMessage) *requestResponsePair {
|
||||
return &requestResponsePair{
|
||||
Request: *requestHTTPMessage,
|
||||
Response: *responseHTTPMessage,
|
||||
}
|
||||
}
|
||||
|
||||
func splitIdent(ident string) []string {
|
||||
ident = strings.Replace(ident, "->", " ", -1)
|
||||
return strings.Split(ident, " ")
|
||||
}
|
||||
|
||||
func genKey(split []string) string {
|
||||
key := fmt.Sprintf("%s:%s->%s:%s,%s", split[0], split[2], split[1], split[3], split[4])
|
||||
return key
|
||||
}
|
||||
|
||||
func (matcher *requestResponseMatcher) deleteOlderThan(t time.Time) int {
|
||||
keysToPop := make([]string, 0)
|
||||
for item := range matcher.openMessagesMap.IterBuffered() {
|
||||
// Map only contains values of type httpMessage
|
||||
message, _ := item.Val.(*httpMessage)
|
||||
|
||||
if message.captureTime.Before(t) {
|
||||
keysToPop = append(keysToPop, item.Key)
|
||||
}
|
||||
}
|
||||
|
||||
numDeleted := len(keysToPop)
|
||||
|
||||
for _, key := range keysToPop {
|
||||
_, _ = matcher.openMessagesMap.Pop(key)
|
||||
}
|
||||
|
||||
return numDeleted
|
||||
}
|
||||
@@ -3,10 +3,7 @@ package tap
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
b64 "encoding/base64"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
@@ -27,6 +24,14 @@ type tcpID struct {
|
||||
dstPort string
|
||||
}
|
||||
|
||||
type ConnectionInfo struct {
|
||||
ClientIP string
|
||||
ClientPort string
|
||||
ServerIP string
|
||||
ServerPort string
|
||||
IsOutgoing bool
|
||||
}
|
||||
|
||||
func (tid *tcpID) String() string {
|
||||
return fmt.Sprintf("%s->%s %s->%s", tid.srcIP, tid.dstIP, tid.srcPort, tid.dstPort)
|
||||
}
|
||||
@@ -41,6 +46,7 @@ type httpReader struct {
|
||||
tcpID tcpID
|
||||
isClient bool
|
||||
isHTTP2 bool
|
||||
isOutgoing bool
|
||||
msgQueue chan httpReaderDataMsg // Channel of captured reassembled tcp payload
|
||||
data []byte
|
||||
captureTime time.Time
|
||||
@@ -73,7 +79,7 @@ func (h *httpReader) run(wg *sync.WaitGroup) {
|
||||
b := bufio.NewReader(h)
|
||||
|
||||
if isHTTP2, err := checkIsHTTP2Connection(b, h.isClient); err != nil {
|
||||
SilentError("HTTP/2-Prepare-Connection", "stream %s Failed to check if client is HTTP/2: %s (%v,%+v)\n", h.ident, err, err, err)
|
||||
SilentError("HTTP/2-Prepare-Connection", "stream %s Failed to check if client is HTTP/2: %s (%v,%+v)", h.ident, err, err, err)
|
||||
// Do something?
|
||||
} else {
|
||||
h.isHTTP2 = isHTTP2
|
||||
@@ -82,7 +88,7 @@ func (h *httpReader) run(wg *sync.WaitGroup) {
|
||||
if h.isHTTP2 {
|
||||
err := prepareHTTP2Connection(b, h.isClient)
|
||||
if err != nil {
|
||||
SilentError("HTTP/2-Prepare-Connection-After-Check", "stream %s error: %s (%v,%+v)\n", h.ident, err, err, err)
|
||||
SilentError("HTTP/2-Prepare-Connection-After-Check", "stream %s error: %s (%v,%+v)", h.ident, err, err, err)
|
||||
}
|
||||
h.grpcAssembler = createGrpcAssembler(b)
|
||||
}
|
||||
@@ -93,7 +99,7 @@ func (h *httpReader) run(wg *sync.WaitGroup) {
|
||||
if err == io.EOF || err == io.ErrUnexpectedEOF {
|
||||
break
|
||||
} else if err != nil {
|
||||
SilentError("HTTP/2", "stream %s error: %s (%v,%+v)\n", h.ident, err, err, err)
|
||||
SilentError("HTTP/2", "stream %s error: %s (%v,%+v)", h.ident, err, err, err)
|
||||
continue
|
||||
}
|
||||
} else if h.isClient {
|
||||
@@ -101,7 +107,7 @@ func (h *httpReader) run(wg *sync.WaitGroup) {
|
||||
if err == io.EOF || err == io.ErrUnexpectedEOF {
|
||||
break
|
||||
} else if err != nil {
|
||||
SilentError("HTTP-request", "stream %s Request error: %s (%v,%+v)\n", h.ident, err, err, err)
|
||||
SilentError("HTTP-request", "stream %s Request error: %s (%v,%+v)", h.ident, err, err, err)
|
||||
continue
|
||||
}
|
||||
} else {
|
||||
@@ -109,7 +115,7 @@ func (h *httpReader) run(wg *sync.WaitGroup) {
|
||||
if err == io.EOF || err == io.ErrUnexpectedEOF {
|
||||
break
|
||||
} else if err != nil {
|
||||
SilentError("HTTP-response", "stream %s Response error: %s (%v,%+v)\n", h.ident, err, err, err)
|
||||
SilentError("HTTP-response", "stream %s Response error: %s (%v,%+v)", h.ident, err, err, err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
@@ -117,38 +123,49 @@ func (h *httpReader) run(wg *sync.WaitGroup) {
|
||||
}
|
||||
|
||||
func (h *httpReader) handleHTTP2Stream() error {
|
||||
streamID, messageHTTP1, body, err := h.grpcAssembler.readMessage()
|
||||
streamID, messageHTTP1, err := h.grpcAssembler.readMessage()
|
||||
h.messageCount++
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var reqResPair *envoyMessageWrapper
|
||||
var reqResPair *requestResponsePair
|
||||
var connectionInfo *ConnectionInfo
|
||||
|
||||
switch messageHTTP1 := messageHTTP1.(type) {
|
||||
case http.Request:
|
||||
ident := fmt.Sprintf("%s->%s %s->%s %d", h.tcpID.srcIP, h.tcpID.dstIP, h.tcpID.srcPort, h.tcpID.dstPort, streamID)
|
||||
reqResPair = reqResMatcher.registerRequest(ident, &messageHTTP1, h.captureTime, body, true)
|
||||
connectionInfo = &ConnectionInfo{
|
||||
ClientIP: h.tcpID.srcIP,
|
||||
ClientPort: h.tcpID.srcPort,
|
||||
ServerIP: h.tcpID.dstIP,
|
||||
ServerPort: h.tcpID.dstPort,
|
||||
IsOutgoing: h.isOutgoing,
|
||||
}
|
||||
reqResPair = reqResMatcher.registerRequest(ident, &messageHTTP1, h.captureTime)
|
||||
case http.Response:
|
||||
ident := fmt.Sprintf("%s->%s %s->%s %d", h.tcpID.dstIP, h.tcpID.srcIP, h.tcpID.dstPort, h.tcpID.srcPort, streamID)
|
||||
reqResPair = reqResMatcher.registerResponse(ident, &messageHTTP1, h.captureTime, body, true)
|
||||
connectionInfo = &ConnectionInfo{
|
||||
ClientIP: h.tcpID.dstIP,
|
||||
ClientPort: h.tcpID.dstPort,
|
||||
ServerIP: h.tcpID.srcIP,
|
||||
ServerPort: h.tcpID.srcPort,
|
||||
IsOutgoing: h.isOutgoing,
|
||||
}
|
||||
reqResPair = reqResMatcher.registerResponse(ident, &messageHTTP1, h.captureTime)
|
||||
}
|
||||
|
||||
if reqResPair != nil {
|
||||
statsTracker.incMatchedMessages()
|
||||
|
||||
if h.harWriter != nil {
|
||||
h.harWriter.WritePair(
|
||||
reqResPair.HttpBufferedTrace.Request.orig.(*http.Request),
|
||||
reqResPair.HttpBufferedTrace.Request.captureTime,
|
||||
reqResPair.HttpBufferedTrace.Response.orig.(*http.Response),
|
||||
reqResPair.HttpBufferedTrace.Response.captureTime,
|
||||
reqResPair.HttpBufferedTrace.Request.requestSenderIp,
|
||||
reqResPair.Request.orig.(*http.Request),
|
||||
reqResPair.Request.captureTime,
|
||||
reqResPair.Response.orig.(*http.Response),
|
||||
reqResPair.Response.captureTime,
|
||||
connectionInfo,
|
||||
)
|
||||
} else {
|
||||
jsonStr, err := json.Marshal(reqResPair)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
broadcastReqResPair(jsonStr)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -165,37 +182,35 @@ func (h *httpReader) handleHTTP1ClientStream(b *bufio.Reader) error {
|
||||
req.Body = io.NopCloser(bytes.NewBuffer(body)) // rewind
|
||||
s := len(body)
|
||||
if err != nil {
|
||||
SilentError("HTTP-request-body", "stream %s Got body err: %s\n", h.ident, err)
|
||||
SilentError("HTTP-request-body", "stream %s Got body err: %s", h.ident, err)
|
||||
} else if h.hexdump {
|
||||
Info("Body(%d/0x%x)\n%s\n", len(body), len(body), hex.Dump(body))
|
||||
Info("Body(%d/0x%x) - %s", len(body), len(body), hex.Dump(body))
|
||||
}
|
||||
if err := req.Body.Close(); err != nil {
|
||||
SilentError("HTTP-request-body-close", "stream %s Failed to close request body: %s\n", h.ident, err)
|
||||
SilentError("HTTP-request-body-close", "stream %s Failed to close request body: %s", h.ident, err)
|
||||
}
|
||||
encoding := req.Header["Content-Encoding"]
|
||||
bodyStr, err := readBody(body, encoding)
|
||||
if err != nil {
|
||||
SilentError("HTTP-request-body-decode", "stream %s Failed to decode body: %s\n", h.ident, err)
|
||||
}
|
||||
Info("HTTP/%s Request: %s %s (Body:%d)\n", h.ident, req.Method, req.URL, s)
|
||||
Info("HTTP/1 Request: %s %s %s (Body:%d) -> %s", h.ident, req.Method, req.URL, s, encoding)
|
||||
|
||||
ident := fmt.Sprintf("%s->%s %s->%s %d", h.tcpID.srcIP, h.tcpID.dstIP, h.tcpID.srcPort, h.tcpID.dstPort, h.messageCount)
|
||||
reqResPair := reqResMatcher.registerRequest(ident, req, h.captureTime, bodyStr, false)
|
||||
reqResPair := reqResMatcher.registerRequest(ident, req, h.captureTime)
|
||||
if reqResPair != nil {
|
||||
statsTracker.incMatchedMessages()
|
||||
|
||||
if h.harWriter != nil {
|
||||
h.harWriter.WritePair(
|
||||
reqResPair.HttpBufferedTrace.Request.orig.(*http.Request),
|
||||
reqResPair.HttpBufferedTrace.Request.captureTime,
|
||||
reqResPair.HttpBufferedTrace.Response.orig.(*http.Response),
|
||||
reqResPair.HttpBufferedTrace.Response.captureTime,
|
||||
reqResPair.HttpBufferedTrace.Request.requestSenderIp,
|
||||
reqResPair.Request.orig.(*http.Request),
|
||||
reqResPair.Request.captureTime,
|
||||
reqResPair.Response.orig.(*http.Response),
|
||||
reqResPair.Response.captureTime,
|
||||
&ConnectionInfo{
|
||||
ClientIP: h.tcpID.srcIP,
|
||||
ClientPort: h.tcpID.srcPort,
|
||||
ServerIP: h.tcpID.dstIP,
|
||||
ServerPort: h.tcpID.dstPort,
|
||||
IsOutgoing: h.isOutgoing,
|
||||
},
|
||||
)
|
||||
} else {
|
||||
jsonStr, err := json.Marshal(reqResPair)
|
||||
if err != nil {
|
||||
SilentError("HTTP-marshal", "stream %s Error convert request response to json: %s\n", h.ident, err)
|
||||
}
|
||||
broadcastReqResPair(jsonStr)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -224,13 +239,13 @@ func (h *httpReader) handleHTTP1ServerStream(b *bufio.Reader) error {
|
||||
res.Body = io.NopCloser(bytes.NewBuffer(body)) // rewind
|
||||
s := len(body)
|
||||
if err != nil {
|
||||
SilentError("HTTP-response-body", "HTTP/%s: failed to get body(parsed len:%d): %s\n", h.ident, s, err)
|
||||
SilentError("HTTP-response-body", "HTTP/%s: failed to get body(parsed len:%d): %s", h.ident, s, err)
|
||||
}
|
||||
if h.hexdump {
|
||||
Info("Body(%d/0x%x)\n%s\n", len(body), len(body), hex.Dump(body))
|
||||
Info("Body(%d/0x%x) - %s", len(body), len(body), hex.Dump(body))
|
||||
}
|
||||
if err := res.Body.Close(); err != nil {
|
||||
SilentError("HTTP-response-body-close", "HTTP/%s: failed to close body(parsed len:%d): %s\n", h.ident, s, err)
|
||||
SilentError("HTTP-response-body-close", "HTTP/%s: failed to close body(parsed len:%d): %s", h.ident, s, err)
|
||||
}
|
||||
sym := ","
|
||||
if res.ContentLength > 0 && res.ContentLength != int64(s) {
|
||||
@@ -241,54 +256,29 @@ func (h *httpReader) handleHTTP1ServerStream(b *bufio.Reader) error {
|
||||
contentType = []string{http.DetectContentType(body)}
|
||||
}
|
||||
encoding := res.Header["Content-Encoding"]
|
||||
Info("HTTP/%s Response: %s URL:%s (%d%s%d%s) -> %s\n", h.ident, res.Status, req, res.ContentLength, sym, s, contentType, encoding)
|
||||
bodyStr, err := readBody(body, encoding)
|
||||
if err != nil {
|
||||
SilentError("HTTP-response-body-decode", "stream %s Failed to decode body: %s\n", h.ident, err)
|
||||
}
|
||||
Info("HTTP/1 Response: %s %s URL:%s (%d%s%d%s) -> %s", h.ident, res.Status, req, res.ContentLength, sym, s, contentType, encoding)
|
||||
|
||||
ident := fmt.Sprintf("%s->%s %s->%s %d", h.tcpID.dstIP, h.tcpID.srcIP, h.tcpID.dstPort, h.tcpID.srcPort, h.messageCount)
|
||||
reqResPair := reqResMatcher.registerResponse(ident, res, h.captureTime, bodyStr, false)
|
||||
reqResPair := reqResMatcher.registerResponse(ident, res, h.captureTime)
|
||||
if reqResPair != nil {
|
||||
statsTracker.incMatchedMessages()
|
||||
|
||||
if h.harWriter != nil {
|
||||
h.harWriter.WritePair(
|
||||
reqResPair.HttpBufferedTrace.Request.orig.(*http.Request),
|
||||
reqResPair.HttpBufferedTrace.Request.captureTime,
|
||||
reqResPair.HttpBufferedTrace.Response.orig.(*http.Response),
|
||||
reqResPair.HttpBufferedTrace.Response.captureTime,
|
||||
reqResPair.HttpBufferedTrace.Request.requestSenderIp,
|
||||
reqResPair.Request.orig.(*http.Request),
|
||||
reqResPair.Request.captureTime,
|
||||
reqResPair.Response.orig.(*http.Response),
|
||||
reqResPair.Response.captureTime,
|
||||
&ConnectionInfo{
|
||||
ClientIP: h.tcpID.dstIP,
|
||||
ClientPort: h.tcpID.dstPort,
|
||||
ServerIP: h.tcpID.srcIP,
|
||||
ServerPort: h.tcpID.srcPort,
|
||||
IsOutgoing: h.isOutgoing,
|
||||
},
|
||||
)
|
||||
} else {
|
||||
jsonStr, err := json.Marshal(reqResPair)
|
||||
if err != nil {
|
||||
SilentError("HTTP-marshal", "stream %s Error convert request response to json: %s\n", h.ident, err)
|
||||
}
|
||||
broadcastReqResPair(jsonStr)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func readBody(bodyBytes []byte, encoding []string) (string, error) {
|
||||
var bodyBuffer io.Reader
|
||||
bodyBuffer = bytes.NewBuffer(bodyBytes)
|
||||
var err error
|
||||
if len(encoding) > 0 && (encoding[0] == "gzip" || encoding[0] == "deflate") {
|
||||
bodyBuffer, err = gzip.NewReader(bodyBuffer)
|
||||
if err != nil {
|
||||
SilentError("HTTP-gunzip", "Failed to gzip decode: %s\n", err)
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
if _, ok := bodyBuffer.(*gzip.Reader); ok {
|
||||
err = bodyBuffer.(*gzip.Reader).Close()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
|
||||
buf := new(bytes.Buffer)
|
||||
_, err = buf.ReadFrom(bodyBuffer)
|
||||
return b64.StdEncoding.EncodeToString(buf.Bytes()), err
|
||||
}
|
||||
29
tap/outboundlinks.go
Normal file
29
tap/outboundlinks.go
Normal file
@@ -0,0 +1,29 @@
|
||||
package tap
|
||||
|
||||
type OutboundLink struct {
|
||||
Src string
|
||||
DstIP string
|
||||
DstPort int
|
||||
}
|
||||
|
||||
func NewOutboundLinkWriter() *OutboundLinkWriter {
|
||||
return &OutboundLinkWriter{
|
||||
OutChan: make(chan *OutboundLink),
|
||||
}
|
||||
}
|
||||
|
||||
type OutboundLinkWriter struct {
|
||||
OutChan chan *OutboundLink
|
||||
}
|
||||
|
||||
func (olw *OutboundLinkWriter) WriteOutboundLink(src string, DstIP string, DstPort int) {
|
||||
olw.OutChan <- &OutboundLink{
|
||||
Src: src,
|
||||
DstIP: DstIP,
|
||||
DstPort: DstPort,
|
||||
}
|
||||
}
|
||||
|
||||
func (olw *OutboundLinkWriter) Stop() {
|
||||
close(olw.OutChan)
|
||||
}
|
||||
@@ -10,10 +10,8 @@ package tap
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"flag"
|
||||
"fmt"
|
||||
"github.com/up9inc/mizu/shared"
|
||||
"log"
|
||||
"os"
|
||||
"os/signal"
|
||||
@@ -33,12 +31,10 @@ import (
|
||||
)
|
||||
|
||||
const AppPortsEnvVar = "APP_PORTS"
|
||||
const OutPortEnvVar = "WEB_SOCKET_PORT"
|
||||
const maxHTTP2DataLenEnvVar = "HTTP2_DATA_SIZE_LIMIT"
|
||||
// default is 1MB, more than the max size accepted by collector and traffic-dumper
|
||||
const maxHTTP2DataLenDefault = 1 * 1024 * 1024
|
||||
const cleanPeriod = time.Second * 10
|
||||
const outboundThrottleCacheExpiryPeriod = time.Minute * 15
|
||||
var remoteOnlyOutboundPorts = []int { 80, 443 }
|
||||
|
||||
func parseAppPorts(appPortsList string) []int {
|
||||
@@ -46,7 +42,7 @@ func parseAppPorts(appPortsList string) []int {
|
||||
for _, portStr := range strings.Split(appPortsList, ",") {
|
||||
parsedInt, parseError := strconv.Atoi(portStr)
|
||||
if parseError != nil {
|
||||
fmt.Println("Provided app port ", portStr, " is not a valid number!")
|
||||
log.Printf("Provided app port %v is not a valid number!", portStr)
|
||||
} else {
|
||||
ports = append(ports, parsedInt)
|
||||
}
|
||||
@@ -54,13 +50,6 @@ func parseAppPorts(appPortsList string) []int {
|
||||
return ports
|
||||
}
|
||||
|
||||
func parseHostAppAddresses(hostAppAddressesString string) []string {
|
||||
if len(hostAppAddressesString) == 0 {
|
||||
return []string{}
|
||||
}
|
||||
return strings.Split(hostAppAddressesString, ",")
|
||||
}
|
||||
|
||||
var maxcount = flag.Int("c", -1, "Only grab this many packets, then exit")
|
||||
var decoder = flag.String("decoder", "", "Name of the decoder to use (default: guess from capture)")
|
||||
var statsevery = flag.Int("stats", 60, "Output statistics every N seconds")
|
||||
@@ -90,7 +79,6 @@ var tstype = flag.String("timestamp_type", "", "Type of timestamps to use")
|
||||
var promisc = flag.Bool("promisc", true, "Set promiscuous mode")
|
||||
var anydirection = flag.Bool("anydirection", false, "Capture http requests to other hosts")
|
||||
var staleTimeoutSeconds = flag.Int("staletimout", 120, "Max time in seconds to keep connections which don't transmit data")
|
||||
var hostAppAddressesString = flag.String("targets", "", "Comma separated list of ip:ports to tap")
|
||||
|
||||
var memprofile = flag.String("memprofile", "", "Write memory profile")
|
||||
|
||||
@@ -121,24 +109,20 @@ var stats struct {
|
||||
overlapPackets int
|
||||
}
|
||||
|
||||
type CollectorMessage struct {
|
||||
MessageType string
|
||||
Ports *[]int `json:"ports,omitempty"`
|
||||
Addresses *[]string `json:"addresses,omitempty"`
|
||||
type TapOpts struct {
|
||||
HostMode bool
|
||||
}
|
||||
|
||||
var outputLevel int
|
||||
var errorsMap map[string]uint
|
||||
var errorsMapMutex sync.Mutex
|
||||
var nErrors uint
|
||||
var appPorts []int // global
|
||||
var ownIps []string //global
|
||||
var hostMode bool //global
|
||||
var HostAppAddresses []string //global
|
||||
var ownIps []string // global
|
||||
var hostMode bool // global
|
||||
|
||||
/* minOutputLevel: Error will be printed only if outputLevel is above this value
|
||||
* t: key for errorsMap (counting errors)
|
||||
* s, a: arguments fmt.Printf
|
||||
* s, a: arguments log.Printf
|
||||
* Note: Too bad for perf that a... is evaluated
|
||||
*/
|
||||
func logError(minOutputLevel int, t string, s string, a ...interface{}) {
|
||||
@@ -149,7 +133,7 @@ func logError(minOutputLevel int, t string, s string, a ...interface{}) {
|
||||
errorsMapMutex.Unlock()
|
||||
if outputLevel >= minOutputLevel {
|
||||
formatStr := fmt.Sprintf("%s: %s", t, s)
|
||||
fmt.Printf(formatStr, a...)
|
||||
log.Printf(formatStr, a...)
|
||||
}
|
||||
}
|
||||
func Error(t string, s string, a ...interface{}) {
|
||||
@@ -160,12 +144,12 @@ func SilentError(t string, s string, a ...interface{}) {
|
||||
}
|
||||
func Info(s string, a ...interface{}) {
|
||||
if outputLevel >= 1 {
|
||||
fmt.Printf(s, a...)
|
||||
log.Printf(s, a...)
|
||||
}
|
||||
}
|
||||
func Debug(s string, a ...interface{}) {
|
||||
if outputLevel >= 2 {
|
||||
fmt.Printf(s, a...)
|
||||
log.Printf(s, a...)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -187,9 +171,8 @@ func inArrayString(arr []string, valueToCheck string) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
/*
|
||||
* The assembler context
|
||||
*/
|
||||
// Context
|
||||
// The assembler context
|
||||
type Context struct {
|
||||
CaptureInfo gopacket.CaptureInfo
|
||||
}
|
||||
@@ -198,22 +181,27 @@ func (c *Context) GetCaptureInfo() gopacket.CaptureInfo {
|
||||
return c.CaptureInfo
|
||||
}
|
||||
|
||||
func StartPassiveTapper() <-chan *OutputChannelItem {
|
||||
func StartPassiveTapper(opts *TapOpts) (<-chan *OutputChannelItem, <-chan *OutboundLink) {
|
||||
hostMode = opts.HostMode
|
||||
|
||||
var harWriter *HarWriter
|
||||
if *dumpToHar {
|
||||
harWriter = NewHarWriter(*HarOutputDir, *harEntriesPerFile)
|
||||
}
|
||||
outboundLinkWriter := NewOutboundLinkWriter()
|
||||
|
||||
go startPassiveTapper(harWriter)
|
||||
go startPassiveTapper(harWriter, outboundLinkWriter)
|
||||
|
||||
if harWriter != nil {
|
||||
return harWriter.OutChan
|
||||
return harWriter.OutChan, outboundLinkWriter.OutChan
|
||||
}
|
||||
|
||||
return nil
|
||||
return nil, outboundLinkWriter.OutChan
|
||||
}
|
||||
|
||||
func startPassiveTapper(harWriter *HarWriter) {
|
||||
func startPassiveTapper(harWriter *HarWriter, outboundLinkWriter *OutboundLinkWriter) {
|
||||
log.SetFlags(log.LstdFlags | log.LUTC | log.Lshortfile)
|
||||
|
||||
defer util.Run()()
|
||||
if *debug {
|
||||
outputLevel = 2
|
||||
@@ -226,68 +214,43 @@ func startPassiveTapper(harWriter *HarWriter) {
|
||||
|
||||
if localhostIPs, err := getLocalhostIPs(); err != nil {
|
||||
// TODO: think this over
|
||||
fmt.Println("Failed to get self IP addresses")
|
||||
Error("Getting-Self-Address", "Error getting self ip address: %s (%v,%+v)\n", err, err, err)
|
||||
log.Println("Failed to get self IP addresses")
|
||||
Error("Getting-Self-Address", "Error getting self ip address: %s (%v,%+v)", err, err, err)
|
||||
ownIps = make([]string, 0)
|
||||
} else {
|
||||
ownIps = localhostIPs
|
||||
}
|
||||
|
||||
appPortsStr := os.Getenv(AppPortsEnvVar)
|
||||
var appPorts []int
|
||||
if appPortsStr == "" {
|
||||
fmt.Println("Received empty/no APP_PORTS env var! only listening to http on port 80!")
|
||||
log.Println("Received empty/no APP_PORTS env var! only listening to http on port 80!")
|
||||
appPorts = make([]int, 0)
|
||||
} else {
|
||||
appPorts = parseAppPorts(appPortsStr)
|
||||
}
|
||||
tapOutputPort := os.Getenv(OutPortEnvVar)
|
||||
if tapOutputPort == "" {
|
||||
fmt.Println("Received empty/no WEB_SOCKET_PORT env var! falling back to port 8080")
|
||||
tapOutputPort = "8080"
|
||||
}
|
||||
SetFilterPorts(appPorts)
|
||||
envVal := os.Getenv(maxHTTP2DataLenEnvVar)
|
||||
if envVal == "" {
|
||||
fmt.Println("Received empty/no HTTP2_DATA_SIZE_LIMIT env var! falling back to", maxHTTP2DataLenDefault)
|
||||
log.Println("Received empty/no HTTP2_DATA_SIZE_LIMIT env var! falling back to", maxHTTP2DataLenDefault)
|
||||
maxHTTP2DataLen = maxHTTP2DataLenDefault
|
||||
} else {
|
||||
if convertedInt, err := strconv.Atoi(envVal); err != nil {
|
||||
fmt.Println("Received invalid HTTP2_DATA_SIZE_LIMIT env var! falling back to", maxHTTP2DataLenDefault)
|
||||
log.Println("Received invalid HTTP2_DATA_SIZE_LIMIT env var! falling back to", maxHTTP2DataLenDefault)
|
||||
maxHTTP2DataLen = maxHTTP2DataLenDefault
|
||||
} else {
|
||||
fmt.Println("Received HTTP2_DATA_SIZE_LIMIT env var:", maxHTTP2DataLenDefault)
|
||||
log.Println("Received HTTP2_DATA_SIZE_LIMIT env var:", maxHTTP2DataLenDefault)
|
||||
maxHTTP2DataLen = convertedInt
|
||||
}
|
||||
}
|
||||
hostMode = os.Getenv(shared.HostModeEnvVar) == "1"
|
||||
|
||||
fmt.Printf("App Ports: %v\n", appPorts)
|
||||
fmt.Printf("Tap output websocket port: %s\n", tapOutputPort)
|
||||
|
||||
var onCollectorMessage = func(message []byte) {
|
||||
var parsedMessage CollectorMessage
|
||||
err := json.Unmarshal(message, &parsedMessage)
|
||||
if err == nil {
|
||||
|
||||
if parsedMessage.MessageType == "setPorts" {
|
||||
Debug("Got message from collector. Type: %s, Ports: %v\n", parsedMessage.MessageType, parsedMessage.Ports)
|
||||
appPorts = *parsedMessage.Ports
|
||||
} else if parsedMessage.MessageType == "setAddresses" {
|
||||
Debug("Got message from collector. Type: %s, IPs: %v\n", parsedMessage.MessageType, parsedMessage.Addresses)
|
||||
HostAppAddresses = *parsedMessage.Addresses
|
||||
Info("Filtering for the following addresses: %s\n", HostAppAddresses)
|
||||
}
|
||||
} else {
|
||||
Error("Collector-Message-Parsing", "Error parsing message from collector: %s (%v,%+v)\n", err, err, err)
|
||||
}
|
||||
}
|
||||
|
||||
go startOutputServer(tapOutputPort, onCollectorMessage)
|
||||
log.Printf("App Ports: %v", gSettings.filterPorts)
|
||||
|
||||
var handle *pcap.Handle
|
||||
var err error
|
||||
if *fname != "" {
|
||||
if handle, err = pcap.OpenOffline(*fname); err != nil {
|
||||
log.Fatal("PCAP OpenOffline error:", err)
|
||||
log.Fatalf("PCAP OpenOffline error: %v", err)
|
||||
}
|
||||
} else {
|
||||
// This is a little complicated because we want to allow all possible options
|
||||
@@ -313,15 +276,15 @@ func startPassiveTapper(harWriter *HarWriter) {
|
||||
}
|
||||
}
|
||||
if handle, err = inactive.Activate(); err != nil {
|
||||
log.Fatal("PCAP Activate error:", err)
|
||||
log.Fatalf("PCAP Activate error: %v", err)
|
||||
}
|
||||
defer handle.Close()
|
||||
}
|
||||
if len(flag.Args()) > 0 {
|
||||
bpffilter := strings.Join(flag.Args(), " ")
|
||||
Info("Using BPF filter %q\n", bpffilter)
|
||||
Info("Using BPF filter %q", bpffilter)
|
||||
if err = handle.SetBPFFilter(bpffilter); err != nil {
|
||||
log.Fatal("BPF filter error:", err)
|
||||
log.Fatalf("BPF filter error: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -329,6 +292,7 @@ func startPassiveTapper(harWriter *HarWriter) {
|
||||
harWriter.Start()
|
||||
defer harWriter.Stop()
|
||||
}
|
||||
defer outboundLinkWriter.Stop()
|
||||
|
||||
var dec gopacket.Decoder
|
||||
var ok bool
|
||||
@@ -342,13 +306,18 @@ func startPassiveTapper(harWriter *HarWriter) {
|
||||
source := gopacket.NewPacketSource(handle, dec)
|
||||
source.Lazy = *lazy
|
||||
source.NoCopy = true
|
||||
Info("Starting to read packets\n")
|
||||
Info("Starting to read packets")
|
||||
count := 0
|
||||
bytes := int64(0)
|
||||
start := time.Now()
|
||||
defragger := ip4defrag.NewIPv4Defragmenter()
|
||||
|
||||
streamFactory := &tcpStreamFactory{doHTTP: !*nohttp, harWriter: harWriter}
|
||||
streamFactory := &tcpStreamFactory{
|
||||
doHTTP: !*nohttp,
|
||||
harWriter: harWriter,
|
||||
outbountLinkWriter: outboundLinkWriter,
|
||||
|
||||
}
|
||||
streamPool := reassembly.NewStreamPool(streamFactory)
|
||||
assembler := reassembly.NewAssembler(streamPool)
|
||||
var assemblerMutex sync.Mutex
|
||||
@@ -378,7 +347,7 @@ func startPassiveTapper(harWriter *HarWriter) {
|
||||
errorMapLen := len(errorsMap)
|
||||
errorsSummery := fmt.Sprintf("%v", errorsMap)
|
||||
errorsMapMutex.Unlock()
|
||||
fmt.Printf("Processed %v packets (%v bytes) in %v (errors: %v, errTypes:%v)\nErrors Summary: %s\n",
|
||||
log.Printf("Processed %v packets (%v bytes) in %v (errors: %v, errTypes:%v) - Errors Summary: %s",
|
||||
count,
|
||||
bytes,
|
||||
time.Since(start),
|
||||
@@ -390,8 +359,8 @@ func startPassiveTapper(harWriter *HarWriter) {
|
||||
// At this moment
|
||||
memStats := runtime.MemStats{}
|
||||
runtime.ReadMemStats(&memStats)
|
||||
fmt.Printf(
|
||||
"mem: %d, goroutines: %d, unmatched messages: %d\n",
|
||||
log.Printf(
|
||||
"mem: %d, goroutines: %d, unmatched messages: %d",
|
||||
memStats.HeapAlloc,
|
||||
runtime.NumGoroutine(),
|
||||
reqResMatcher.openMessagesMap.Count(),
|
||||
@@ -400,8 +369,8 @@ func startPassiveTapper(harWriter *HarWriter) {
|
||||
// Since the last print
|
||||
cleanStats := cleaner.dumpStats()
|
||||
appStats := statsTracker.dumpStats()
|
||||
fmt.Printf(
|
||||
"flushed connections %d, closed connections: %d, deleted messages: %d, matched messages: %d\n",
|
||||
log.Printf(
|
||||
"flushed connections %d, closed connections: %d, deleted messages: %d, matched messages: %d",
|
||||
cleanStats.flushed,
|
||||
cleanStats.closed,
|
||||
cleanStats.deleted,
|
||||
@@ -412,11 +381,11 @@ func startPassiveTapper(harWriter *HarWriter) {
|
||||
|
||||
for packet := range source.Packets() {
|
||||
count++
|
||||
Debug("PACKET #%d\n", count)
|
||||
Debug("PACKET #%d", count)
|
||||
data := packet.Data()
|
||||
bytes += int64(len(data))
|
||||
if *hexdumppkt {
|
||||
Debug("Packet content (%d/0x%x)\n%s\n", len(data), len(data), hex.Dump(data))
|
||||
Debug("Packet content (%d/0x%x) - %s", len(data), len(data), hex.Dump(data))
|
||||
}
|
||||
|
||||
// defrag the IPv4 packet if required
|
||||
@@ -431,18 +400,18 @@ func startPassiveTapper(harWriter *HarWriter) {
|
||||
if err != nil {
|
||||
log.Fatalln("Error while de-fragmenting", err)
|
||||
} else if newip4 == nil {
|
||||
Debug("Fragment...\n")
|
||||
Debug("Fragment...")
|
||||
continue // packet fragment, we don't have whole packet yet.
|
||||
}
|
||||
if newip4.Length != l {
|
||||
stats.ipdefrag++
|
||||
Debug("Decoding re-assembled packet: %s\n", newip4.NextLayerType())
|
||||
Debug("Decoding re-assembled packet: %s", newip4.NextLayerType())
|
||||
pb, ok := packet.(gopacket.PacketBuilder)
|
||||
if !ok {
|
||||
panic("Not a PacketBuilder")
|
||||
log.Panic("Not a PacketBuilder")
|
||||
}
|
||||
nextDecoder := newip4.NextLayerType()
|
||||
nextDecoder.Decode(newip4.Payload, pb)
|
||||
_ = nextDecoder.Decode(newip4.Payload, pb)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -459,7 +428,7 @@ func startPassiveTapper(harWriter *HarWriter) {
|
||||
CaptureInfo: packet.Metadata().CaptureInfo,
|
||||
}
|
||||
stats.totalsz += len(tcp.Payload)
|
||||
//fmt.Println(packet.NetworkLayer().NetworkFlow().Src(), ":", tcp.SrcPort, " -> ", packet.NetworkLayer().NetworkFlow().Dst(), ":", tcp.DstPort)
|
||||
// log.Println(packet.NetworkLayer().NetworkFlow().Src(), ":", tcp.SrcPort, " -> ", packet.NetworkLayer().NetworkFlow().Dst(), ":", tcp.DstPort)
|
||||
assemblerMutex.Lock()
|
||||
assembler.AssembleWithContext(packet.NetworkLayer().NetworkFlow(), tcp, &c)
|
||||
assemblerMutex.Unlock()
|
||||
@@ -470,11 +439,11 @@ func startPassiveTapper(harWriter *HarWriter) {
|
||||
errorsMapMutex.Lock()
|
||||
errorMapLen := len(errorsMap)
|
||||
errorsMapMutex.Unlock()
|
||||
fmt.Fprintf(os.Stderr, "Processed %v packets (%v bytes) in %v (errors: %v, errTypes:%v)\n", count, bytes, time.Since(start), nErrors, errorMapLen)
|
||||
log.Printf("Processed %v packets (%v bytes) in %v (errors: %v, errTypes:%v)", count, bytes, time.Since(start), nErrors, errorMapLen)
|
||||
}
|
||||
select {
|
||||
case <-signalChan:
|
||||
fmt.Fprintf(os.Stderr, "\nCaught SIGINT: aborting\n")
|
||||
log.Printf("Caught SIGINT: aborting")
|
||||
done = true
|
||||
default:
|
||||
// NOP: continue
|
||||
@@ -497,34 +466,34 @@ func startPassiveTapper(harWriter *HarWriter) {
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
pprof.WriteHeapProfile(f)
|
||||
f.Close()
|
||||
_ = pprof.WriteHeapProfile(f)
|
||||
_ = f.Close()
|
||||
}
|
||||
|
||||
streamFactory.WaitGoRoutines()
|
||||
assemblerMutex.Lock()
|
||||
Debug("%s\n", assembler.Dump())
|
||||
Debug("%s", assembler.Dump())
|
||||
assemblerMutex.Unlock()
|
||||
if !*nodefrag {
|
||||
fmt.Printf("IPdefrag:\t\t%d\n", stats.ipdefrag)
|
||||
log.Printf("IPdefrag:\t\t%d", stats.ipdefrag)
|
||||
}
|
||||
fmt.Printf("TCP stats:\n")
|
||||
fmt.Printf(" missed bytes:\t\t%d\n", stats.missedBytes)
|
||||
fmt.Printf(" total packets:\t\t%d\n", stats.pkt)
|
||||
fmt.Printf(" rejected FSM:\t\t%d\n", stats.rejectFsm)
|
||||
fmt.Printf(" rejected Options:\t%d\n", stats.rejectOpt)
|
||||
fmt.Printf(" reassembled bytes:\t%d\n", stats.sz)
|
||||
fmt.Printf(" total TCP bytes:\t%d\n", stats.totalsz)
|
||||
fmt.Printf(" conn rejected FSM:\t%d\n", stats.rejectConnFsm)
|
||||
fmt.Printf(" reassembled chunks:\t%d\n", stats.reassembled)
|
||||
fmt.Printf(" out-of-order packets:\t%d\n", stats.outOfOrderPackets)
|
||||
fmt.Printf(" out-of-order bytes:\t%d\n", stats.outOfOrderBytes)
|
||||
fmt.Printf(" biggest-chunk packets:\t%d\n", stats.biggestChunkPackets)
|
||||
fmt.Printf(" biggest-chunk bytes:\t%d\n", stats.biggestChunkBytes)
|
||||
fmt.Printf(" overlap packets:\t%d\n", stats.overlapPackets)
|
||||
fmt.Printf(" overlap bytes:\t\t%d\n", stats.overlapBytes)
|
||||
fmt.Printf("Errors: %d\n", nErrors)
|
||||
log.Printf("TCP stats:")
|
||||
log.Printf(" missed bytes:\t\t%d", stats.missedBytes)
|
||||
log.Printf(" total packets:\t\t%d", stats.pkt)
|
||||
log.Printf(" rejected FSM:\t\t%d", stats.rejectFsm)
|
||||
log.Printf(" rejected Options:\t%d", stats.rejectOpt)
|
||||
log.Printf(" reassembled bytes:\t%d", stats.sz)
|
||||
log.Printf(" total TCP bytes:\t%d", stats.totalsz)
|
||||
log.Printf(" conn rejected FSM:\t%d", stats.rejectConnFsm)
|
||||
log.Printf(" reassembled chunks:\t%d", stats.reassembled)
|
||||
log.Printf(" out-of-order packets:\t%d", stats.outOfOrderPackets)
|
||||
log.Printf(" out-of-order bytes:\t%d", stats.outOfOrderBytes)
|
||||
log.Printf(" biggest-chunk packets:\t%d", stats.biggestChunkPackets)
|
||||
log.Printf(" biggest-chunk bytes:\t%d", stats.biggestChunkBytes)
|
||||
log.Printf(" overlap packets:\t%d", stats.overlapPackets)
|
||||
log.Printf(" overlap bytes:\t\t%d", stats.overlapBytes)
|
||||
log.Printf("Errors: %d", nErrors)
|
||||
for e := range errorsMap {
|
||||
fmt.Printf(" %s:\t\t%d\n", e, errorsMap[e])
|
||||
log.Printf(" %s:\t\t%d", e, errorsMap[e])
|
||||
}
|
||||
}
|
||||
31
tap/settings.go
Normal file
31
tap/settings.go
Normal file
@@ -0,0 +1,31 @@
|
||||
package tap
|
||||
|
||||
type globalSettings struct {
|
||||
filterPorts []int
|
||||
filterAuthorities []string
|
||||
}
|
||||
|
||||
var gSettings = &globalSettings{
|
||||
filterPorts: []int{},
|
||||
filterAuthorities: []string{},
|
||||
}
|
||||
|
||||
func SetFilterPorts(ports []int) {
|
||||
gSettings.filterPorts = ports
|
||||
}
|
||||
|
||||
func GetFilterPorts() []int {
|
||||
ports := make([]int, len(gSettings.filterPorts))
|
||||
copy(ports, gSettings.filterPorts)
|
||||
return ports
|
||||
}
|
||||
|
||||
func SetFilterAuthorities(ipAddresses []string) {
|
||||
gSettings.filterAuthorities = ipAddresses
|
||||
}
|
||||
|
||||
func GetFilterIPs() []string {
|
||||
addresses := make([]string, len(gSettings.filterAuthorities))
|
||||
copy(addresses, gSettings.filterAuthorities)
|
||||
return addresses
|
||||
}
|
||||
@@ -34,7 +34,7 @@ type tcpStream struct {
|
||||
func (t *tcpStream) Accept(tcp *layers.TCP, ci gopacket.CaptureInfo, dir reassembly.TCPFlowDirection, nextSeq reassembly.Sequence, start *bool, ac reassembly.AssemblerContext) bool {
|
||||
// FSM
|
||||
if !t.tcpstate.CheckState(tcp, dir) {
|
||||
//SilentError("FSM", "%s: Packet rejected by FSM (state:%s)\n", t.ident, t.tcpstate.String())
|
||||
SilentError("FSM-rejection", "%s: Packet rejected by FSM (state:%s)", t.ident, t.tcpstate.String())
|
||||
stats.rejectFsm++
|
||||
if !t.fsmerr {
|
||||
t.fsmerr = true
|
||||
@@ -47,7 +47,7 @@ func (t *tcpStream) Accept(tcp *layers.TCP, ci gopacket.CaptureInfo, dir reassem
|
||||
// Options
|
||||
err := t.optchecker.Accept(tcp, ci, dir, nextSeq, start)
|
||||
if err != nil {
|
||||
//SilentError("OptionChecker", "%s: Packet rejected by OptionChecker: %s\n", t.ident, err)
|
||||
SilentError("OptionChecker-rejection", "%s: Packet rejected by OptionChecker: %s", t.ident, err)
|
||||
stats.rejectOpt++
|
||||
if !*nooptcheck {
|
||||
return false
|
||||
@@ -58,10 +58,10 @@ func (t *tcpStream) Accept(tcp *layers.TCP, ci gopacket.CaptureInfo, dir reassem
|
||||
if *checksum {
|
||||
c, err := tcp.ComputeChecksum()
|
||||
if err != nil {
|
||||
SilentError("ChecksumCompute", "%s: Got error computing checksum: %s\n", t.ident, err)
|
||||
SilentError("ChecksumCompute", "%s: Got error computing checksum: %s", t.ident, err)
|
||||
accept = false
|
||||
} else if c != 0x0 {
|
||||
SilentError("Checksum", "%s: Invalid checksum: 0x%x\n", t.ident, c)
|
||||
SilentError("Checksum", "%s: Invalid checksum: 0x%x", t.ident, c)
|
||||
accept = false
|
||||
}
|
||||
}
|
||||
@@ -95,7 +95,7 @@ func (t *tcpStream) ReassembledSG(sg reassembly.ScatterGather, ac reassembly.Ass
|
||||
if sgStats.OverlapBytes != 0 && sgStats.OverlapPackets == 0 {
|
||||
// In the original example this was handled with panic().
|
||||
// I don't know what this error means or how to handle it properly.
|
||||
SilentError("Invalid-Overlap", "bytes:%d, pkts:%d\n", sgStats.OverlapBytes, sgStats.OverlapPackets)
|
||||
SilentError("Invalid-Overlap", "bytes:%d, pkts:%d", sgStats.OverlapBytes, sgStats.OverlapPackets)
|
||||
}
|
||||
stats.overlapBytes += sgStats.OverlapBytes
|
||||
stats.overlapPackets += sgStats.OverlapPackets
|
||||
@@ -106,7 +106,7 @@ func (t *tcpStream) ReassembledSG(sg reassembly.ScatterGather, ac reassembly.Ass
|
||||
} else {
|
||||
ident = fmt.Sprintf("%v %v(%s): ", t.net.Reverse(), t.transport.Reverse(), dir)
|
||||
}
|
||||
Debug("%s: SG reassembled packet with %d bytes (start:%v,end:%v,skip:%d,saved:%d,nb:%d,%d,overlap:%d,%d)\n", ident, length, start, end, skip, saved, sgStats.Packets, sgStats.Chunks, sgStats.OverlapBytes, sgStats.OverlapPackets)
|
||||
Debug("%s: SG reassembled packet with %d bytes (start:%v,end:%v,skip:%d,saved:%d,nb:%d,%d,overlap:%d,%d)", ident, length, start, end, skip, saved, sgStats.Packets, sgStats.Chunks, sgStats.OverlapBytes, sgStats.OverlapPackets)
|
||||
if skip == -1 && *allowmissinginit {
|
||||
// this is allowed
|
||||
} else if skip != 0 {
|
||||
@@ -125,18 +125,18 @@ func (t *tcpStream) ReassembledSG(sg reassembly.ScatterGather, ac reassembly.Ass
|
||||
}
|
||||
dnsSize := binary.BigEndian.Uint16(data[:2])
|
||||
missing := int(dnsSize) - len(data[2:])
|
||||
Debug("dnsSize: %d, missing: %d\n", dnsSize, missing)
|
||||
Debug("dnsSize: %d, missing: %d", dnsSize, missing)
|
||||
if missing > 0 {
|
||||
Info("Missing some bytes: %d\n", missing)
|
||||
Info("Missing some bytes: %d", missing)
|
||||
sg.KeepFrom(0)
|
||||
return
|
||||
}
|
||||
p := gopacket.NewDecodingLayerParser(layers.LayerTypeDNS, dns)
|
||||
err := p.DecodeLayers(data[2:], &decoded)
|
||||
if err != nil {
|
||||
SilentError("DNS-parser", "Failed to decode DNS: %v\n", err)
|
||||
SilentError("DNS-parser", "Failed to decode DNS: %v", err)
|
||||
} else {
|
||||
Debug("DNS: %s\n", gopacket.LayerDump(dns))
|
||||
Debug("DNS: %s", gopacket.LayerDump(dns))
|
||||
}
|
||||
if len(data) > 2+int(dnsSize) {
|
||||
sg.KeepFrom(2 + int(dnsSize))
|
||||
@@ -144,7 +144,7 @@ func (t *tcpStream) ReassembledSG(sg reassembly.ScatterGather, ac reassembly.Ass
|
||||
} else if t.isHTTP {
|
||||
if length > 0 {
|
||||
if *hexdump {
|
||||
Debug("Feeding http with:\n%s", hex.Dump(data))
|
||||
Debug("Feeding http with:%s", hex.Dump(data))
|
||||
}
|
||||
// This is where we pass the reassembled information onwards
|
||||
// This channel is read by an httpReader object
|
||||
@@ -158,7 +158,7 @@ func (t *tcpStream) ReassembledSG(sg reassembly.ScatterGather, ac reassembly.Ass
|
||||
}
|
||||
|
||||
func (t *tcpStream) ReassemblyComplete(ac reassembly.AssemblerContext) bool {
|
||||
Debug("%s: Connection closed\n", t.ident)
|
||||
Debug("%s: Connection closed", t.ident)
|
||||
if t.isHTTP {
|
||||
close(t.client.msgQueue)
|
||||
close(t.server.msgQueue)
|
||||
@@ -15,24 +15,27 @@ import (
|
||||
* Generates a new tcp stream for each new tcp connection. Closes the stream when the connection closes.
|
||||
*/
|
||||
type tcpStreamFactory struct {
|
||||
wg sync.WaitGroup
|
||||
doHTTP bool
|
||||
harWriter *HarWriter
|
||||
wg sync.WaitGroup
|
||||
doHTTP bool
|
||||
harWriter *HarWriter
|
||||
outbountLinkWriter *OutboundLinkWriter
|
||||
}
|
||||
|
||||
func (factory *tcpStreamFactory) New(net, transport gopacket.Flow, tcp *layers.TCP, ac reassembly.AssemblerContext) reassembly.Stream {
|
||||
Debug("* NEW: %s %s\n", net, transport)
|
||||
Debug("* NEW: %s %s", net, transport)
|
||||
fsmOptions := reassembly.TCPSimpleFSMOptions{
|
||||
SupportMissingEstablishment: *allowmissinginit,
|
||||
}
|
||||
Debug("Current App Ports: %v\n", appPorts)
|
||||
Debug("Current App Ports: %v", gSettings.filterPorts)
|
||||
srcIp := net.Src().String()
|
||||
dstIp := net.Dst().String()
|
||||
dstPort := int(tcp.DstPort)
|
||||
|
||||
if factory.shouldNotifyOnOutboundLink(dstIp, dstPort) {
|
||||
broadcastOutboundLink(net.Src().String(), dstIp, dstPort)
|
||||
factory.outbountLinkWriter.WriteOutboundLink(net.Src().String(), dstIp, dstPort)
|
||||
}
|
||||
isHTTP := factory.shouldTap(dstIp, dstPort)
|
||||
props := factory.getStreamProps(srcIp, dstIp, dstPort)
|
||||
isHTTP := props.isTapTarget
|
||||
stream := &tcpStream{
|
||||
net: net,
|
||||
transport: transport,
|
||||
@@ -56,6 +59,7 @@ func (factory *tcpStreamFactory) New(net, transport gopacket.Flow, tcp *layers.T
|
||||
hexdump: *hexdump,
|
||||
parent: stream,
|
||||
isClient: true,
|
||||
isOutgoing: props.isOutgoing,
|
||||
harWriter: factory.harWriter,
|
||||
}
|
||||
stream.server = httpReader{
|
||||
@@ -69,6 +73,7 @@ func (factory *tcpStreamFactory) New(net, transport gopacket.Flow, tcp *layers.T
|
||||
},
|
||||
hexdump: *hexdump,
|
||||
parent: stream,
|
||||
isOutgoing: props.isOutgoing,
|
||||
harWriter: factory.harWriter,
|
||||
}
|
||||
factory.wg.Add(2)
|
||||
@@ -83,28 +88,29 @@ func (factory *tcpStreamFactory) WaitGoRoutines() {
|
||||
factory.wg.Wait()
|
||||
}
|
||||
|
||||
func (factory *tcpStreamFactory) shouldTap(dstIP string, dstPort int) bool {
|
||||
func (factory *tcpStreamFactory) getStreamProps(srcIP string, dstIP string, dstPort int) *streamProps {
|
||||
if hostMode {
|
||||
if inArrayString(HostAppAddresses, fmt.Sprintf("%s:%d", dstIP, dstPort)) == true {
|
||||
return true
|
||||
} else if inArrayString(HostAppAddresses, dstIP) == true {
|
||||
return true
|
||||
if inArrayString(gSettings.filterAuthorities, fmt.Sprintf("%s:%d", dstIP, dstPort)) == true {
|
||||
return &streamProps{isTapTarget: true, isOutgoing: false}
|
||||
} else if inArrayString(gSettings.filterAuthorities, dstIP) == true {
|
||||
return &streamProps{isTapTarget: true, isOutgoing: false}
|
||||
} else if *anydirection && inArrayString(gSettings.filterAuthorities, srcIP) == true {
|
||||
return &streamProps{isTapTarget: true, isOutgoing: true}
|
||||
}
|
||||
return false
|
||||
return &streamProps{isTapTarget: false}
|
||||
} else {
|
||||
isTappedPort := dstPort == 80 || (appPorts != nil && (inArrayInt(appPorts, dstPort)))
|
||||
isTappedPort := dstPort == 80 || (gSettings.filterPorts != nil && (inArrayInt(gSettings.filterPorts, dstPort)))
|
||||
if !isTappedPort {
|
||||
return false
|
||||
return &streamProps{isTapTarget: false, isOutgoing: false}
|
||||
}
|
||||
|
||||
if !*anydirection {
|
||||
isDirectedHere := inArrayString(ownIps, dstIP)
|
||||
if !isDirectedHere {
|
||||
return false
|
||||
}
|
||||
isOutgoing := !inArrayString(ownIps, dstIP)
|
||||
|
||||
if !*anydirection && isOutgoing {
|
||||
return &streamProps{isTapTarget: false, isOutgoing: isOutgoing}
|
||||
}
|
||||
|
||||
return true
|
||||
return &streamProps{isTapTarget: true}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -115,3 +121,9 @@ func (factory *tcpStreamFactory) shouldNotifyOnOutboundLink(dstIP string, dstPor
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
type streamProps struct {
|
||||
isTapTarget bool
|
||||
isOutgoing bool
|
||||
}
|
||||
|
||||
@@ -10,6 +10,8 @@
|
||||
display: flex
|
||||
align-items: center
|
||||
padding-left: 24px
|
||||
padding-right: 24px
|
||||
justify-content: space-between
|
||||
|
||||
.title
|
||||
font-size: 45px
|
||||
|
||||
@@ -1,18 +1,41 @@
|
||||
import React from 'react';
|
||||
import {HarPage} from "./components/HarPage";
|
||||
import React, {useState} from 'react';
|
||||
import './App.sass';
|
||||
import logo from './components/assets/Mizu.svg';
|
||||
import {Button} from "@material-ui/core";
|
||||
import {HarPage} from "./components/HarPage";
|
||||
|
||||
|
||||
const App = () => {
|
||||
return (
|
||||
<div className="mizuApp">
|
||||
<div className="header">
|
||||
<div className="title"><img src={logo} alt="logo"/></div>
|
||||
<div className="description">Traffic viewer for Kubernetes</div>
|
||||
|
||||
const [analyzeStatus, setAnalyzeStatus] = useState(null);
|
||||
|
||||
return (
|
||||
<div className="mizuApp">
|
||||
<div className="header">
|
||||
<div style={{display: "flex", alignItems: "center"}}>
|
||||
<div className="title"><img src={logo} alt="logo"/></div>
|
||||
<div className="description">Traffic viewer for Kubernetes</div>
|
||||
</div>
|
||||
<div>
|
||||
{analyzeStatus?.isAnalyzing &&
|
||||
<div
|
||||
title={!analyzeStatus?.isRemoteReady ? "Analysis is not ready yet" : "Go To see further analysis"}>
|
||||
<Button
|
||||
variant="contained"
|
||||
color="primary"
|
||||
disabled={!analyzeStatus?.isRemoteReady}
|
||||
onClick={() => {
|
||||
window.open(analyzeStatus?.remoteUrl)
|
||||
}}>
|
||||
Analysis
|
||||
</Button>
|
||||
</div>
|
||||
}
|
||||
</div>
|
||||
</div>
|
||||
<HarPage setAnalyzeStatus={setAnalyzeStatus}/>
|
||||
</div>
|
||||
<HarPage/>
|
||||
</div>
|
||||
);
|
||||
);
|
||||
}
|
||||
|
||||
export default App;
|
||||
|
||||
@@ -1,7 +1,13 @@
|
||||
import React from "react";
|
||||
import styles from './style/HarEntry.module.sass';
|
||||
import StatusCode from "./StatusCode";
|
||||
import StatusCode, {getClassification, StatusCodeClassification} from "./StatusCode";
|
||||
import {EndpointPath} from "./EndpointPath";
|
||||
import ingoingIconSuccess from "./assets/ingoing-traffic-success.svg"
|
||||
import ingoingIconFailure from "./assets/ingoing-traffic-failure.svg"
|
||||
import ingoingIconNeutral from "./assets/ingoing-traffic-neutral.svg"
|
||||
import outgoingIconSuccess from "./assets/outgoing-traffic-success.svg"
|
||||
import outgoingIconFailure from "./assets/outgoing-traffic-failure.svg"
|
||||
import outgoingIconNeutral from "./assets/outgoing-traffic-neutral.svg"
|
||||
|
||||
interface HAREntry {
|
||||
method?: string,
|
||||
@@ -12,6 +18,7 @@ interface HAREntry {
|
||||
url?: string;
|
||||
isCurrentRevision?: boolean;
|
||||
timestamp: Date;
|
||||
isOutgoing?: boolean;
|
||||
}
|
||||
|
||||
interface HAREntryProps {
|
||||
@@ -21,6 +28,26 @@ interface HAREntryProps {
|
||||
}
|
||||
|
||||
export const HarEntry: React.FC<HAREntryProps> = ({entry, setFocusedEntryId, isSelected}) => {
|
||||
const classification = getClassification(entry.statusCode)
|
||||
let ingoingIcon;
|
||||
let outgoingIcon;
|
||||
switch(classification) {
|
||||
case StatusCodeClassification.SUCCESS: {
|
||||
ingoingIcon = ingoingIconSuccess;
|
||||
outgoingIcon = outgoingIconSuccess;
|
||||
break;
|
||||
}
|
||||
case StatusCodeClassification.FAILURE: {
|
||||
ingoingIcon = ingoingIconFailure;
|
||||
outgoingIcon = outgoingIconFailure;
|
||||
break;
|
||||
}
|
||||
case StatusCodeClassification.NEUTRAL: {
|
||||
ingoingIcon = ingoingIconNeutral;
|
||||
outgoingIcon = outgoingIconNeutral;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return <>
|
||||
<div id={entry.id} className={`${styles.row} ${isSelected ? styles.rowSelected : ''}`} onClick={() => setFocusedEntryId(entry.id)}>
|
||||
@@ -33,7 +60,14 @@ export const HarEntry: React.FC<HAREntryProps> = ({entry, setFocusedEntryId, isS
|
||||
{entry.service}
|
||||
</div>
|
||||
</div>
|
||||
<div className={styles.directionContainer}>
|
||||
{entry.isOutgoing ?
|
||||
<img src={outgoingIcon} alt="outgoing traffic" title="outgoing"/>
|
||||
:
|
||||
<img src={ingoingIcon} alt="ingoing traffic" title="ingoing"/>
|
||||
}
|
||||
</div>
|
||||
<div className={styles.timestamp}>{new Date(+entry.timestamp)?.toLocaleString()}</div>
|
||||
</div>
|
||||
</>
|
||||
};
|
||||
};
|
||||
|
||||
@@ -35,7 +35,11 @@ enum ConnectionStatus {
|
||||
Paused
|
||||
}
|
||||
|
||||
export const HarPage: React.FC = () => {
|
||||
interface HarPageProps {
|
||||
setAnalyzeStatus: (status: any) => void;
|
||||
}
|
||||
|
||||
export const HarPage: React.FC<HarPageProps> = ({setAnalyzeStatus}) => {
|
||||
|
||||
const classes = useLayoutStyles();
|
||||
|
||||
@@ -60,21 +64,21 @@ export const HarPage: React.FC = () => {
|
||||
ws.current.onclose = () => setConnection(ConnectionStatus.Closed);
|
||||
}
|
||||
|
||||
if(ws.current) {
|
||||
if (ws.current) {
|
||||
ws.current.onmessage = e => {
|
||||
if(!e?.data) return;
|
||||
if (!e?.data) return;
|
||||
const message = JSON.parse(e.data);
|
||||
|
||||
switch (message.messageType) {
|
||||
case "entry":
|
||||
const entry = message.data
|
||||
if(connection === ConnectionStatus.Paused) {
|
||||
if (connection === ConnectionStatus.Paused) {
|
||||
setNoMoreDataBottom(false)
|
||||
return;
|
||||
}
|
||||
if(!focusedEntryId) setFocusedEntryId(entry.id)
|
||||
if (!focusedEntryId) setFocusedEntryId(entry.id)
|
||||
let newEntries = [...entries];
|
||||
if(entries.length === 1000) {
|
||||
if (entries.length === 1000) {
|
||||
newEntries = newEntries.splice(1);
|
||||
setNoMoreDataTop(false);
|
||||
}
|
||||
@@ -83,6 +87,9 @@ export const HarPage: React.FC = () => {
|
||||
case "status":
|
||||
setTappingStatus(message.tappingStatus);
|
||||
break
|
||||
case "analyzeStatus":
|
||||
setAnalyzeStatus(message.analyzeStatus);
|
||||
break
|
||||
default:
|
||||
console.error(`unsupported websocket message type, Got: ${message.messageType}`)
|
||||
}
|
||||
@@ -94,19 +101,23 @@ export const HarPage: React.FC = () => {
|
||||
fetch(`http://localhost:8899/api/tapStatus`)
|
||||
.then(response => response.json())
|
||||
.then(data => setTappingStatus(data));
|
||||
|
||||
fetch(`http://localhost:8899/api/analyzeStatus`)
|
||||
.then(response => response.json())
|
||||
.then(data => setAnalyzeStatus(data));
|
||||
}, []);
|
||||
|
||||
|
||||
useEffect(() => {
|
||||
if(!focusedEntryId) return;
|
||||
if (!focusedEntryId) return;
|
||||
setSelectedHarEntry(null)
|
||||
fetch(`http://localhost:8899/api/entries/${focusedEntryId}`)
|
||||
.then(response => response.json())
|
||||
.then(data => setSelectedHarEntry(data));
|
||||
},[focusedEntryId])
|
||||
}, [focusedEntryId])
|
||||
|
||||
const toggleConnection = () => {
|
||||
setConnection(connection === ConnectionStatus.Connected ? ConnectionStatus.Paused : ConnectionStatus.Connected );
|
||||
setConnection(connection === ConnectionStatus.Connected ? ConnectionStatus.Paused : ConnectionStatus.Connected);
|
||||
}
|
||||
|
||||
const getConnectionStatusClass = (isContainer) => {
|
||||
@@ -135,11 +146,12 @@ export const HarPage: React.FC = () => {
|
||||
return (
|
||||
<div className="HarPage">
|
||||
<div className="harPageHeader">
|
||||
<img style={{cursor: "pointer", marginRight: 15, height: 30}} alt="pause" src={connection === ConnectionStatus.Connected ? pauseIcon : playIcon} onClick={toggleConnection}/>
|
||||
<img style={{cursor: "pointer", marginRight: 15, height: 30}} alt="pause"
|
||||
src={connection === ConnectionStatus.Connected ? pauseIcon : playIcon} onClick={toggleConnection}/>
|
||||
<div className="connectionText">
|
||||
{getConnectionTitle()}
|
||||
<div className={"indicatorContainer " + getConnectionStatusClass(true)}>
|
||||
<div className={"indicator " + getConnectionStatusClass(false)} />
|
||||
<div className={"indicator " + getConnectionStatusClass(false)}/>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
@@ -169,7 +181,8 @@ export const HarPage: React.FC = () => {
|
||||
</div>
|
||||
</div>
|
||||
<div className={classes.details}>
|
||||
{selectedHarEntry && <HAREntryDetailed harEntry={selectedHarEntry} classes={{root: classes.harViewer}}/>}
|
||||
{selectedHarEntry &&
|
||||
<HAREntryDetailed harEntry={selectedHarEntry} classes={{root: classes.harViewer}}/>}
|
||||
</div>
|
||||
</div>}
|
||||
{tappingStatus?.pods != null && <StatusBar tappingStatus={tappingStatus}/>}
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
import React from "react";
|
||||
import styles from './style/StatusCode.module.sass';
|
||||
|
||||
enum StatusCodeClassification {
|
||||
export enum StatusCodeClassification {
|
||||
SUCCESS = "success",
|
||||
FAILURE = "failure",
|
||||
NEUTRAL = "neutral"
|
||||
@@ -14,6 +14,12 @@ interface HAREntryProps {
|
||||
|
||||
const StatusCode: React.FC<HAREntryProps> = ({statusCode}) => {
|
||||
|
||||
const classification = getClassification(statusCode)
|
||||
|
||||
return <span className={`${styles[classification]} ${styles.base}`}>{statusCode}</span>
|
||||
};
|
||||
|
||||
export function getClassification(statusCode: number): string {
|
||||
let classification = StatusCodeClassification.NEUTRAL;
|
||||
|
||||
if (statusCode >= 200 && statusCode <= 399) {
|
||||
@@ -22,7 +28,7 @@ const StatusCode: React.FC<HAREntryProps> = ({statusCode}) => {
|
||||
classification = StatusCodeClassification.FAILURE;
|
||||
}
|
||||
|
||||
return <span className={`${styles[classification]} ${styles.base}`}>{statusCode}</span>
|
||||
};
|
||||
return classification
|
||||
}
|
||||
|
||||
export default StatusCode;
|
||||
export default StatusCode;
|
||||
|
||||
5
ui/src/components/assets/ingoing-traffic-failure.svg
Normal file
5
ui/src/components/assets/ingoing-traffic-failure.svg
Normal file
@@ -0,0 +1,5 @@
|
||||
<svg width="20" height="20" viewBox="0 0 20 20" fill="none" xmlns="http://www.w3.org/2000/svg">
|
||||
<path fill-rule="evenodd" clip-rule="evenodd" d="M16.5175 11.1465C16.8392 10.8869 17 10.4434 17 10C17 9.55657 16.8392 9.11314 16.5175 8.85348L12.5425 5.64459C13.2682 5.23422 14.1067 5 15 5C17.7614 5 20 7.23858 20 10C20 12.7614 17.7614 15 15 15C14.1067 15 13.2682 14.7658 12.5425 14.3554L16.5175 11.1465Z" fill="#BCCEFD"/>
|
||||
<path d="M16 10C16 10.3167 15.8749 10.6335 15.6247 10.8189L10.1706 14.8624C9.65543 15.2444 9 14.7858 9 14.0435V5.95652C9 5.21417 9.65543 4.75564 10.1706 5.13758L15.6247 9.18106C15.8749 9.36653 16 9.68326 16 10Z" fill="#EB5757"/>
|
||||
<path d="M0 10C0 8.89543 0.895431 8 2 8H10C11.1046 8 12 8.89543 12 10C12 11.1046 11.1046 12 10 12H2C0.895431 12 0 11.1046 0 10Z" fill="#EB5757"/>
|
||||
</svg>
|
||||
|
After Width: | Height: | Size: 800 B |
5
ui/src/components/assets/ingoing-traffic-neutral.svg
Normal file
5
ui/src/components/assets/ingoing-traffic-neutral.svg
Normal file
@@ -0,0 +1,5 @@
|
||||
<svg width="20" height="20" viewBox="0 0 20 20" fill="none" xmlns="http://www.w3.org/2000/svg">
|
||||
<path fill-rule="evenodd" clip-rule="evenodd" d="M16.5175 11.1465C16.8392 10.8869 17 10.4434 17 10C17 9.55657 16.8392 9.11314 16.5175 8.85348L12.5425 5.64459C13.2682 5.23422 14.1067 5 15 5C17.7614 5 20 7.23858 20 10C20 12.7614 17.7614 15 15 15C14.1067 15 13.2682 14.7658 12.5425 14.3554L16.5175 11.1465Z" fill="#BCCEFD"/>
|
||||
<path d="M16 10C16 10.3167 15.8749 10.6335 15.6247 10.8189L10.1706 14.8624C9.65543 15.2444 9 14.7858 9 14.0435V5.95652C9 5.21417 9.65543 4.75564 10.1706 5.13758L15.6247 9.18106C15.8749 9.36653 16 9.68326 16 10Z" fill="gray"/>
|
||||
<path d="M0 10C0 8.89543 0.895431 8 2 8H10C11.1046 8 12 8.89543 12 10C12 11.1046 11.1046 12 10 12H2C0.895431 12 0 11.1046 0 10Z" fill="gray"/>
|
||||
</svg>
|
||||
|
After Width: | Height: | Size: 794 B |
5
ui/src/components/assets/ingoing-traffic-success.svg
Normal file
5
ui/src/components/assets/ingoing-traffic-success.svg
Normal file
@@ -0,0 +1,5 @@
|
||||
<svg width="20" height="20" viewBox="0 0 20 20" fill="none" xmlns="http://www.w3.org/2000/svg">
|
||||
<path fill-rule="evenodd" clip-rule="evenodd" d="M16.5175 11.1465C16.8392 10.8869 17 10.4434 17 10C17 9.55657 16.8392 9.11314 16.5175 8.85348L12.5425 5.64459C13.2682 5.23422 14.1067 5 15 5C17.7614 5 20 7.23858 20 10C20 12.7614 17.7614 15 15 15C14.1067 15 13.2682 14.7658 12.5425 14.3554L16.5175 11.1465Z" fill="#BCCEFD"/>
|
||||
<path d="M16 10C16 10.3167 15.8749 10.6335 15.6247 10.8189L10.1706 14.8624C9.65543 15.2444 9 14.7858 9 14.0435V5.95652C9 5.21417 9.65543 4.75564 10.1706 5.13758L15.6247 9.18106C15.8749 9.36653 16 9.68326 16 10Z" fill="#27AE60"/>
|
||||
<path d="M0 10C0 8.89543 0.895431 8 2 8H10C11.1046 8 12 8.89543 12 10C12 11.1046 11.1046 12 10 12H2C0.895431 12 0 11.1046 0 10Z" fill="#27AE60"/>
|
||||
</svg>
|
||||
|
After Width: | Height: | Size: 800 B |
5
ui/src/components/assets/outgoing-traffic-failure.svg
Normal file
5
ui/src/components/assets/outgoing-traffic-failure.svg
Normal file
@@ -0,0 +1,5 @@
|
||||
<svg width="20" height="20" viewBox="0 0 20 20" fill="none" xmlns="http://www.w3.org/2000/svg">
|
||||
<path fill-rule="evenodd" clip-rule="evenodd" d="M15 15C17.7614 15 20 12.7615 20 10C20 7.23861 17.7614 5.00003 15 5.00003C13.3642 5.00003 11.9118 5.78558 10.9996 7.00003H14C15.6569 7.00003 17 8.34318 17 10C17 11.6569 15.6569 13 14 13H10.9996C11.9118 14.2145 13.3642 15 15 15Z" fill="#BCCEFD"/>
|
||||
<rect x="4" y="8.00003" width="12" height="4" rx="2" fill="#EB5757"/>
|
||||
<path d="M5.96244e-08 10C6.34015e-08 9.68329 0.125088 9.36656 0.375266 9.18109L5.82939 5.13761C6.34457 4.75567 7 5.2142 7 5.95655L7 14.0435C7 14.7859 6.34457 15.2444 5.82939 14.8625L0.375266 10.819C0.125088 10.6335 5.58474e-08 10.3168 5.96244e-08 10Z" fill="#EB5757"/>
|
||||
</svg>
|
||||
|
After Width: | Height: | Size: 736 B |
5
ui/src/components/assets/outgoing-traffic-neutral.svg
Normal file
5
ui/src/components/assets/outgoing-traffic-neutral.svg
Normal file
@@ -0,0 +1,5 @@
|
||||
<svg width="20" height="20" viewBox="0 0 20 20" fill="none" xmlns="http://www.w3.org/2000/svg">
|
||||
<path fill-rule="evenodd" clip-rule="evenodd" d="M15 15C17.7614 15 20 12.7615 20 10C20 7.23861 17.7614 5.00003 15 5.00003C13.3642 5.00003 11.9118 5.78558 10.9996 7.00003H14C15.6569 7.00003 17 8.34318 17 10C17 11.6569 15.6569 13 14 13H10.9996C11.9118 14.2145 13.3642 15 15 15Z" fill="#BCCEFD"/>
|
||||
<rect x="4" y="8.00003" width="12" height="4" rx="2" fill="gray"/>
|
||||
<path d="M5.96244e-08 10C6.34015e-08 9.68329 0.125088 9.36656 0.375266 9.18109L5.82939 5.13761C6.34457 4.75567 7 5.2142 7 5.95655L7 14.0435C7 14.7859 6.34457 15.2444 5.82939 14.8625L0.375266 10.819C0.125088 10.6335 5.58474e-08 10.3168 5.96244e-08 10Z" fill="gray"/>
|
||||
</svg>
|
||||
|
After Width: | Height: | Size: 730 B |
5
ui/src/components/assets/outgoing-traffic-success.svg
Normal file
5
ui/src/components/assets/outgoing-traffic-success.svg
Normal file
@@ -0,0 +1,5 @@
|
||||
<svg width="20" height="20" viewBox="0 0 20 20" fill="none" xmlns="http://www.w3.org/2000/svg">
|
||||
<path fill-rule="evenodd" clip-rule="evenodd" d="M15 15C17.7614 15 20 12.7615 20 10C20 7.23861 17.7614 5.00003 15 5.00003C13.3642 5.00003 11.9118 5.78558 10.9996 7.00003H14C15.6569 7.00003 17 8.34318 17 10C17 11.6569 15.6569 13 14 13H10.9996C11.9118 14.2145 13.3642 15 15 15Z" fill="#BCCEFD"/>
|
||||
<rect x="4" y="8.00003" width="12" height="4" rx="2" fill="#27AE60"/>
|
||||
<path d="M5.96244e-08 10C6.34015e-08 9.68329 0.125088 9.36656 0.375266 9.18109L5.82939 5.13761C6.34457 4.75567 7 5.2142 7 5.95655L7 14.0435C7 14.7859 6.34457 15.2444 5.82939 14.8625L0.375266 10.819C0.125088 10.6335 5.58474e-08 10.3168 5.96244e-08 10Z" fill="#27AE60"/>
|
||||
</svg>
|
||||
|
After Width: | Height: | Size: 736 B |
@@ -37,9 +37,10 @@
|
||||
.timestamp
|
||||
font-size: 12px
|
||||
color: $secondary-font-color
|
||||
padding-left: 8px
|
||||
padding-right: 8px
|
||||
padding-left: 12px
|
||||
flex-shrink: 0
|
||||
width: 145px
|
||||
text-align: left
|
||||
|
||||
.endpointServiceContainer
|
||||
display: flex
|
||||
@@ -47,4 +48,10 @@
|
||||
overflow: hidden
|
||||
padding-right: 10px
|
||||
padding-left: 10px
|
||||
flex-grow: 1
|
||||
flex-grow: 1
|
||||
|
||||
.directionContainer
|
||||
display: flex
|
||||
border-right: 1px solid $data-background-color
|
||||
padding: 4px
|
||||
padding-right: 12px
|
||||
|
||||
@@ -19,4 +19,4 @@ $blue-gray: #494677;
|
||||
successColor: $success-color;
|
||||
failureColor: $failure-color;
|
||||
blueGray: $blue-gray;
|
||||
}
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user