Compare commits

...

9 Commits

Author SHA1 Message Date
David Levanon
27a73e21fb Read from service mesh network namespaces upon update (#944) 2022-03-30 13:56:37 +03:00
Igor Gov
8eeb0e54c9 Changing unit tests workflow timeout to 30 minutes 2022-03-30 11:52:47 +03:00
Andrey Pokhilko
97db24aeba OAS: rework data feeding + sampleIDs (#917)
* Call OAS feeder

* Don't call old OAS code

* Rework calls

* Work on it

* Put back rules

* Make it compile

* start thinking of test

* Compiles

* Save

* Fixes

* Save

* Fixing

* Trying to fake conn

* add timeout

* Test timeout

* Fix tests

* Only build OAS for HTTP entries

* Remove some dead code

* Adding SampleIDs

* Cosmetics

* lint

* Revert rename

* Sample ID for content

* Cleanuo

* Add more sample IDs

* Checking hypothesis

* Move assignment place a bit

* Cosmetics

* Update test.yml

Co-authored-by: undera <undera@undera-old-desktop.home>
Co-authored-by: Igor Gov <iggvrv@gmail.com>
2022-03-30 11:14:25 +03:00
RamiBerm
63cf7ac34e Refactor entries controller logic (#949)
* wip

* Update entries_controller.go and entries_provider.go

* Update entries_controller.go

* change entries provider into a struct + interface

* Update entries_provider.go

* Update entries_provider.go
2022-03-29 18:30:19 +03:00
Igor Gov
e867b7d0f1 Build ui-common part of CI (#914)
* Build ui-common always locally
2022-03-29 14:14:52 +03:00
lirazyehezkel
dcd8a64f43 Hotfix/remove token from community (#948) 2022-03-29 13:16:50 +03:00
lirazyehezkel
bf8d5ed069 Support multiple workspaces (TRA-4365) (#945)
* support multiple workspaces

* reopen by websocket url dep

* open websocket only when websocketURL is changed

* upgrade common version

Co-authored-by: gadotroee <55343099+gadotroee@users.noreply.github.com>
2022-03-29 09:56:59 +03:00
Nimrod Gilboa Markevich
1f6e539590 Add commit message and committer to acceptance tests slack alert (#946)
* Add commit message and committer username to slack alerts
* Use name instead of username
* Use name and email
2022-03-29 09:15:42 +03:00
David Levanon
590fa08c81 EBPF error handling 2022-03-28 14:19:06 +03:00
41 changed files with 898 additions and 447 deletions

View File

@@ -43,7 +43,7 @@ jobs:
with:
status: ${{ job.status }}
notification_title: 'Mizu {workflow} has {status_message}'
message_format: '{emoji} *{workflow}* {status_message} during <{run_url}|run>, after commit: <{commit_url}|{commit_sha}>'
message_format: '{emoji} *{workflow}* {status_message} during <{run_url}|run>, after commit <{commit_url}|{commit_sha} ${{ github.event.head_commit.message }}> ${{ github.event.head_commit.committer.name }} <${{ github.event.head_commit.committer.email }}>'
footer: 'Linked Repo <{repo_url}|{repo}>'
notify_when: 'failure'
env:

View File

@@ -45,7 +45,7 @@ jobs:
- name: Check modified files
id: modified_files
run: devops/check_modified_files.sh agent/ shared/ tap/ ui/ Dockerfile
run: devops/check_modified_files.sh agent/ shared/ tap/ ui/ ui-common/ Dockerfile
- name: Set up Docker Buildx
if: steps.modified_files.outputs.matched == 'true'

View File

@@ -18,6 +18,7 @@ jobs:
run-unit-tests:
name: Unit Tests
runs-on: ubuntu-latest
timeout-minutes: 20
steps:
- name: Check out code into the Go module directory
uses: actions/checkout@v2

View File

@@ -1,12 +1,23 @@
ARG BUILDARCH=amd64
ARG TARGETARCH=amd64
### Front-end common
FROM node:16 AS front-end-common
WORKDIR /app/ui-build
COPY ui-common/package.json .
COPY ui-common/package-lock.json .
RUN npm i
COPY ui-common .
RUN npm pack
### Front-end
FROM node:16 AS front-end
WORKDIR /app/ui-build
COPY ui/package.json ui/package-lock.json ./
COPY --from=front-end-common ["/app/ui-build/up9-mizu-common-0.0.0.tgz", "."]
RUN npm i
COPY ui .
RUN npm run build

View File

@@ -18,6 +18,7 @@ import (
"github.com/gin-gonic/gin"
"github.com/up9inc/mizu/agent/pkg/dependency"
"github.com/up9inc/mizu/agent/pkg/elastic"
"github.com/up9inc/mizu/agent/pkg/entries"
"github.com/up9inc/mizu/agent/pkg/middlewares"
"github.com/up9inc/mizu/agent/pkg/models"
"github.com/up9inc/mizu/agent/pkg/oas"
@@ -370,5 +371,6 @@ func handleIncomingMessageAsTapper(socketConnection *websocket.Conn) {
func initializeDependencies() {
dependency.RegisterGenerator(dependency.ServiceMapGeneratorDependency, func() interface{} { return servicemap.GetDefaultServiceMapInstance() })
dependency.RegisterGenerator(dependency.OasGeneratorDependency, func() interface{} { return oas.GetDefaultOasGeneratorInstance() })
dependency.RegisterGenerator(dependency.OasGeneratorDependency, func() interface{} { return oas.GetDefaultOasGeneratorInstance(nil) })
dependency.RegisterGenerator(dependency.EntriesProvider, func() interface{} { return &entries.BasenineEntriesProvider{} })
}

View File

@@ -5,6 +5,7 @@ import (
"context"
"encoding/json"
"fmt"
"github.com/up9inc/mizu/agent/pkg/models"
"os"
"path"
"sort"
@@ -19,8 +20,6 @@ import (
"github.com/up9inc/mizu/agent/pkg/servicemap"
"github.com/up9inc/mizu/agent/pkg/models"
"github.com/up9inc/mizu/agent/pkg/oas"
"github.com/up9inc/mizu/agent/pkg/resolver"
"github.com/up9inc/mizu/agent/pkg/utils"
@@ -140,20 +139,6 @@ func startReadingChannel(outputItems <-chan *tapApi.OutputChannelItem, extension
rules, _, _ := models.RunValidationRulesState(*harEntry, mizuEntry.Destination.Name)
mizuEntry.Rules = rules
}
entryWSource := oas.EntryWithSource{
Entry: *harEntry,
Source: mizuEntry.Source.Name,
Destination: mizuEntry.Destination.Name,
Id: mizuEntry.Id,
}
if entryWSource.Destination == "" {
entryWSource.Destination = mizuEntry.Destination.IP + ":" + mizuEntry.Destination.Port
}
oasGenerator := dependency.GetInstance(dependency.OasGeneratorDependency).(oas.OasGeneratorSink)
oasGenerator.PushEntry(&entryWSource)
}
data, err := json.Marshal(mizuEntry)

View File

@@ -1,25 +1,20 @@
package controllers
import (
"encoding/json"
"net/http"
"strconv"
"time"
"github.com/up9inc/mizu/agent/pkg/app"
"github.com/up9inc/mizu/agent/pkg/har"
"github.com/up9inc/mizu/agent/pkg/dependency"
"github.com/up9inc/mizu/agent/pkg/entries"
"github.com/up9inc/mizu/agent/pkg/models"
"github.com/up9inc/mizu/agent/pkg/validation"
"github.com/gin-gonic/gin"
basenine "github.com/up9inc/basenine/client/go"
"github.com/up9inc/mizu/shared"
"github.com/up9inc/mizu/shared/logger"
tapApi "github.com/up9inc/mizu/tap/api"
)
func Error(c *gin.Context, err error) bool {
func HandleEntriesError(c *gin.Context, err error) bool {
if err != nil {
logger.Log.Errorf("Error getting entry: %v", err)
_ = c.Error(err)
@@ -49,45 +44,18 @@ func GetEntries(c *gin.Context) {
entriesRequest.TimeoutMs = 3000
}
data, meta, err := basenine.Fetch(shared.BasenineHost, shared.BaseninePort,
entriesRequest.LeftOff, entriesRequest.Direction, entriesRequest.Query,
entriesRequest.Limit, time.Duration(entriesRequest.TimeoutMs)*time.Millisecond)
if err != nil {
c.JSON(http.StatusInternalServerError, validationError)
}
response := &models.EntriesResponse{}
var dataSlice []interface{}
for _, row := range data {
var entry *tapApi.Entry
err = json.Unmarshal(row, &entry)
if err != nil {
c.JSON(http.StatusBadRequest, gin.H{
"error": true,
"type": "error",
"autoClose": "5000",
"msg": string(row),
})
return // exit
entriesProvider := dependency.GetInstance(dependency.EntriesProvider).(entries.EntriesProvider)
entries, metadata, err := entriesProvider.GetEntries(entriesRequest)
if !HandleEntriesError(c, err) {
baseEntries := make([]interface{}, 0)
for _, entry := range entries {
baseEntries = append(baseEntries, entry.Base)
}
extension := app.ExtensionsMap[entry.Protocol.Name]
base := extension.Dissector.Summarize(entry)
dataSlice = append(dataSlice, base)
c.JSON(http.StatusOK, models.EntriesResponse{
Data: baseEntries,
Meta: metadata,
})
}
var metadata *basenine.Metadata
err = json.Unmarshal(meta, &metadata)
if err != nil {
logger.Log.Debugf("Error recieving metadata: %v", err.Error())
}
response.Data = dataSlice
response.Meta = metadata
c.JSON(http.StatusOK, response)
}
func GetEntry(c *gin.Context) {
@@ -102,54 +70,11 @@ func GetEntry(c *gin.Context) {
}
id, _ := strconv.Atoi(c.Param("id"))
var entry *tapApi.Entry
bytes, err := basenine.Single(shared.BasenineHost, shared.BaseninePort, id, singleEntryRequest.Query)
if Error(c, err) {
return // exit
}
err = json.Unmarshal(bytes, &entry)
if err != nil {
c.JSON(http.StatusNotFound, gin.H{
"error": true,
"type": "error",
"autoClose": "5000",
"msg": string(bytes),
})
return // exit
}
extension := app.ExtensionsMap[entry.Protocol.Name]
base := extension.Dissector.Summarize(entry)
var representation []byte
representation, err = extension.Dissector.Represent(entry.Request, entry.Response)
if err != nil {
c.JSON(http.StatusNotFound, gin.H{
"error": true,
"type": "error",
"autoClose": "5000",
"msg": err.Error(),
})
return // exit
}
entriesProvider := dependency.GetInstance(dependency.EntriesProvider).(entries.EntriesProvider)
entry, err := entriesProvider.GetEntry(singleEntryRequest, id)
var rules []map[string]interface{}
var isRulesEnabled bool
if entry.Protocol.Name == "http" {
harEntry, _ := har.NewEntry(entry.Request, entry.Response, entry.StartTime, entry.ElapsedTime)
_, rulesMatched, _isRulesEnabled := models.RunValidationRulesState(*harEntry, entry.Destination.Name)
isRulesEnabled = _isRulesEnabled
inrec, _ := json.Marshal(rulesMatched)
if err := json.Unmarshal(inrec, &rules); err != nil {
logger.Log.Error(err)
}
if !HandleEntriesError(c, err) {
c.JSON(http.StatusOK, entry)
}
c.JSON(http.StatusOK, tapApi.EntryWrapper{
Protocol: entry.Protocol,
Representation: string(representation),
Data: entry,
Base: base,
Rules: rules,
IsRulesEnabled: isRulesEnabled,
})
}

View File

@@ -1,8 +1,12 @@
package controllers
import (
"bytes"
basenine "github.com/up9inc/basenine/client/go"
"net"
"net/http/httptest"
"testing"
"time"
"github.com/up9inc/mizu/agent/pkg/dependency"
"github.com/up9inc/mizu/agent/pkg/oas"
@@ -11,39 +15,55 @@ import (
)
func TestGetOASServers(t *testing.T) {
dependency.RegisterGenerator(dependency.OasGeneratorDependency, func() interface{} { return oas.GetDefaultOasGeneratorInstance() })
recorder := httptest.NewRecorder()
c, _ := gin.CreateTestContext(recorder)
oas.GetDefaultOasGeneratorInstance().Start()
oas.GetDefaultOasGeneratorInstance().GetServiceSpecs().Store("some", oas.NewGen("some"))
recorder, c := getRecorderAndContext()
GetOASServers(c)
t.Logf("Written body: %s", recorder.Body.String())
}
func TestGetOASAllSpecs(t *testing.T) {
dependency.RegisterGenerator(dependency.OasGeneratorDependency, func() interface{} { return oas.GetDefaultOasGeneratorInstance() })
recorder := httptest.NewRecorder()
c, _ := gin.CreateTestContext(recorder)
oas.GetDefaultOasGeneratorInstance().Start()
oas.GetDefaultOasGeneratorInstance().GetServiceSpecs().Store("some", oas.NewGen("some"))
recorder, c := getRecorderAndContext()
GetOASAllSpecs(c)
t.Logf("Written body: %s", recorder.Body.String())
}
func TestGetOASSpec(t *testing.T) {
dependency.RegisterGenerator(dependency.OasGeneratorDependency, func() interface{} { return oas.GetDefaultOasGeneratorInstance() })
recorder := httptest.NewRecorder()
c, _ := gin.CreateTestContext(recorder)
oas.GetDefaultOasGeneratorInstance().Start()
oas.GetDefaultOasGeneratorInstance().GetServiceSpecs().Store("some", oas.NewGen("some"))
recorder, c := getRecorderAndContext()
c.Params = []gin.Param{{Key: "id", Value: "some"}}
GetOASSpec(c)
t.Logf("Written body: %s", recorder.Body.String())
}
type fakeConn struct {
sendBuffer *bytes.Buffer
receiveBuffer *bytes.Buffer
}
func (f fakeConn) Read(p []byte) (int, error) { return f.sendBuffer.Read(p) }
func (f fakeConn) Write(p []byte) (int, error) { return f.receiveBuffer.Write(p) }
func (fakeConn) Close() error { return nil }
func (fakeConn) LocalAddr() net.Addr { return nil }
func (fakeConn) RemoteAddr() net.Addr { return nil }
func (fakeConn) SetDeadline(t time.Time) error { return nil }
func (fakeConn) SetReadDeadline(t time.Time) error { return nil }
func (fakeConn) SetWriteDeadline(t time.Time) error { return nil }
func getRecorderAndContext() (*httptest.ResponseRecorder, *gin.Context) {
dummyConn := new(basenine.Connection)
dummyConn.Conn = fakeConn{
sendBuffer: bytes.NewBufferString("\n"),
receiveBuffer: bytes.NewBufferString("\n"),
}
dependency.RegisterGenerator(dependency.OasGeneratorDependency, func() interface{} {
return oas.GetDefaultOasGeneratorInstance(dummyConn)
})
recorder := httptest.NewRecorder()
c, _ := gin.CreateTestContext(recorder)
oas.GetDefaultOasGeneratorInstance(dummyConn).Start()
oas.GetDefaultOasGeneratorInstance(dummyConn).GetServiceSpecs().Store("some", oas.NewGen("some"))
return recorder, c
}

View File

@@ -5,4 +5,5 @@ type DependencyContainerType string
const (
ServiceMapGeneratorDependency = "ServiceMapGeneratorDependency"
OasGeneratorDependency = "OasGeneratorDependency"
EntriesProvider = "EntriesProvider"
)

View File

@@ -0,0 +1,98 @@
package entries
import (
"encoding/json"
"time"
basenine "github.com/up9inc/basenine/client/go"
"github.com/up9inc/mizu/agent/pkg/app"
"github.com/up9inc/mizu/agent/pkg/har"
"github.com/up9inc/mizu/agent/pkg/models"
"github.com/up9inc/mizu/shared"
"github.com/up9inc/mizu/shared/logger"
tapApi "github.com/up9inc/mizu/tap/api"
)
type EntriesProvider interface {
GetEntries(entriesRequest *models.EntriesRequest) ([]*tapApi.EntryWrapper, *basenine.Metadata, error)
GetEntry(singleEntryRequest *models.SingleEntryRequest, entryId int) (*tapApi.EntryWrapper, error)
}
type BasenineEntriesProvider struct{}
func (e *BasenineEntriesProvider) GetEntries(entriesRequest *models.EntriesRequest) ([]*tapApi.EntryWrapper, *basenine.Metadata, error) {
data, meta, err := basenine.Fetch(shared.BasenineHost, shared.BaseninePort,
entriesRequest.LeftOff, entriesRequest.Direction, entriesRequest.Query,
entriesRequest.Limit, time.Duration(entriesRequest.TimeoutMs)*time.Millisecond)
if err != nil {
return nil, nil, err
}
var dataSlice []*tapApi.EntryWrapper
for _, row := range data {
var entry *tapApi.Entry
err = json.Unmarshal(row, &entry)
if err != nil {
return nil, nil, err
}
extension := app.ExtensionsMap[entry.Protocol.Name]
base := extension.Dissector.Summarize(entry)
dataSlice = append(dataSlice, &tapApi.EntryWrapper{
Protocol: entry.Protocol,
Data: entry,
Base: base,
})
}
var metadata *basenine.Metadata
err = json.Unmarshal(meta, &metadata)
if err != nil {
logger.Log.Debugf("Error recieving metadata: %v", err.Error())
}
return dataSlice, metadata, nil
}
func (e *BasenineEntriesProvider) GetEntry(singleEntryRequest *models.SingleEntryRequest, entryId int) (*tapApi.EntryWrapper, error) {
var entry *tapApi.Entry
bytes, err := basenine.Single(shared.BasenineHost, shared.BaseninePort, entryId, singleEntryRequest.Query)
if err != nil {
return nil, err
}
err = json.Unmarshal(bytes, &entry)
if err != nil {
return nil, err
}
extension := app.ExtensionsMap[entry.Protocol.Name]
base := extension.Dissector.Summarize(entry)
var representation []byte
representation, err = extension.Dissector.Represent(entry.Request, entry.Response)
if err != nil {
return nil, err
}
var rules []map[string]interface{}
var isRulesEnabled bool
if entry.Protocol.Name == "http" {
harEntry, _ := har.NewEntry(entry.Request, entry.Response, entry.StartTime, entry.ElapsedTime)
_, rulesMatched, _isRulesEnabled := models.RunValidationRulesState(*harEntry, entry.Destination.Name)
isRulesEnabled = _isRulesEnabled
inrec, _ := json.Marshal(rulesMatched)
if err := json.Unmarshal(inrec, &rules); err != nil {
logger.Log.Error(err)
}
}
return &tapApi.EntryWrapper{
Protocol: entry.Protocol,
Representation: string(representation),
Data: entry,
Base: base,
Rules: rules,
IsRulesEnabled: isRulesEnabled,
}, nil
}

View File

@@ -67,23 +67,23 @@ func fileSize(fname string) int64 {
return fi.Size()
}
func feedEntries(fromFiles []string, isSync bool) (count int, err error) {
func feedEntries(fromFiles []string, isSync bool, gen *defaultOasGenerator) (count uint, err error) {
badFiles := make([]string, 0)
cnt := 0
cnt := uint(0)
for _, file := range fromFiles {
logger.Log.Info("Processing file: " + file)
ext := strings.ToLower(filepath.Ext(file))
eCnt := 0
eCnt := uint(0)
switch ext {
case ".har":
eCnt, err = feedFromHAR(file, isSync)
eCnt, err = feedFromHAR(file, isSync, gen)
if err != nil {
logger.Log.Warning("Failed processing file: " + err.Error())
badFiles = append(badFiles, file)
continue
}
case ".ldjson":
eCnt, err = feedFromLDJSON(file, isSync)
eCnt, err = feedFromLDJSON(file, isSync, gen)
if err != nil {
logger.Log.Warning("Failed processing file: " + err.Error())
badFiles = append(badFiles, file)
@@ -102,7 +102,7 @@ func feedEntries(fromFiles []string, isSync bool) (count int, err error) {
return cnt, nil
}
func feedFromHAR(file string, isSync bool) (int, error) {
func feedFromHAR(file string, isSync bool, gen *defaultOasGenerator) (uint, error) {
fd, err := os.Open(file)
if err != nil {
panic(err)
@@ -121,16 +121,16 @@ func feedFromHAR(file string, isSync bool) (int, error) {
return 0, err
}
cnt := 0
cnt := uint(0)
for _, entry := range harDoc.Log.Entries {
cnt += 1
feedEntry(&entry, "", isSync, file)
feedEntry(&entry, "", file, gen, cnt)
}
return cnt, nil
}
func feedEntry(entry *har.Entry, source string, isSync bool, file string) {
func feedEntry(entry *har.Entry, source string, file string, gen *defaultOasGenerator, cnt uint) {
entry.Comment = file
if entry.Response.Status == 302 {
logger.Log.Debugf("Dropped traffic entry due to permanent redirect status: %s", entry.StartedDateTime)
@@ -145,15 +145,11 @@ func feedEntry(entry *har.Entry, source string, isSync bool, file string) {
logger.Log.Errorf("Failed to parse entry URL: %v, err: %v", entry.Request.URL, err)
}
ews := EntryWithSource{Entry: *entry, Source: source, Destination: u.Host, Id: uint(0)}
if isSync {
GetDefaultOasGeneratorInstance().entriesChan <- ews // blocking variant, right?
} else {
GetDefaultOasGeneratorInstance().PushEntry(&ews)
}
ews := EntryWithSource{Entry: *entry, Source: source, Destination: u.Host, Id: cnt}
gen.handleHARWithSource(&ews)
}
func feedFromLDJSON(file string, isSync bool) (int, error) {
func feedFromLDJSON(file string, isSync bool, gen *defaultOasGenerator) (uint, error) {
fd, err := os.Open(file)
if err != nil {
panic(err)
@@ -165,7 +161,7 @@ func feedFromLDJSON(file string, isSync bool) (int, error) {
var meta map[string]interface{}
buf := strings.Builder{}
cnt := 0
cnt := uint(0)
source := ""
for {
substr, isPrefix, err := reader.ReadLine()
@@ -196,7 +192,7 @@ func feedFromLDJSON(file string, isSync bool) (int, error) {
logger.Log.Warningf("Failed decoding entry: %s", line)
} else {
cnt += 1
feedEntry(&entry, source, isSync, file)
feedEntry(&entry, source, file, gen, cnt)
}
}
}

View File

@@ -3,10 +3,13 @@ package oas
import (
"context"
"encoding/json"
basenine "github.com/up9inc/basenine/client/go"
"github.com/up9inc/mizu/agent/pkg/har"
"github.com/up9inc/mizu/shared"
"github.com/up9inc/mizu/tap/api"
"net/url"
"sync"
"github.com/up9inc/mizu/agent/pkg/har"
"github.com/up9inc/mizu/shared/logger"
)
@@ -15,10 +18,6 @@ var (
instance *defaultOasGenerator
)
type OasGeneratorSink interface {
PushEntry(entryWithSource *EntryWithSource)
}
type OasGenerator interface {
Start()
Stop()
@@ -32,12 +31,20 @@ type defaultOasGenerator struct {
ctx context.Context
cancel context.CancelFunc
serviceSpecs *sync.Map
entriesChan chan EntryWithSource
dbConn *basenine.Connection
}
func GetDefaultOasGeneratorInstance() *defaultOasGenerator {
func GetDefaultOasGeneratorInstance(conn *basenine.Connection) *defaultOasGenerator {
syncOnce.Do(func() {
instance = NewDefaultOasGenerator()
if conn == nil {
c, err := basenine.NewConnection(shared.BasenineHost, shared.BaseninePort)
if err != nil {
panic(err)
}
conn = c
}
instance = NewDefaultOasGenerator(conn)
logger.Log.Debug("OAS Generator Initialized")
})
return instance
@@ -50,7 +57,6 @@ func (g *defaultOasGenerator) Start() {
ctx, cancel := context.WithCancel(context.Background())
g.cancel = cancel
g.ctx = ctx
g.entriesChan = make(chan EntryWithSource, 100) // buffer up to 100 entries for OAS processing
g.serviceSpecs = &sync.Map{}
g.started = true
go g.runGenerator()
@@ -70,80 +76,117 @@ func (g *defaultOasGenerator) IsStarted() bool {
}
func (g *defaultOasGenerator) runGenerator() {
// Make []byte channels to recieve the data and the meta
dataChan := make(chan []byte)
metaChan := make(chan []byte)
g.dbConn.Query("", dataChan, metaChan)
for {
select {
case <-g.ctx.Done():
logger.Log.Infof("OAS Generator was canceled")
return
case entryWithSource, ok := <-g.entriesChan:
case metaBytes, ok := <-metaChan:
if !ok {
logger.Log.Infof("OAS Generator - meta channel closed")
break
}
logger.Log.Debugf("Meta: %s", metaBytes)
case dataBytes, ok := <-dataChan:
if !ok {
logger.Log.Infof("OAS Generator - entries channel closed")
break
}
entry := entryWithSource.Entry
u, err := url.Parse(entry.Request.URL)
logger.Log.Debugf("Data: %s", dataBytes)
e := new(api.Entry)
err := json.Unmarshal(dataBytes, e)
if err != nil {
logger.Log.Errorf("Failed to parse entry URL: %v, err: %v", entry.Request.URL, err)
}
val, found := g.serviceSpecs.Load(entryWithSource.Destination)
var gen *SpecGen
if !found {
gen = NewGen(u.Scheme + "://" + entryWithSource.Destination)
g.serviceSpecs.Store(entryWithSource.Destination, gen)
} else {
gen = val.(*SpecGen)
}
opId, err := gen.feedEntry(entryWithSource)
if err != nil {
txt, suberr := json.Marshal(entry)
if suberr == nil {
logger.Log.Debugf("Problematic entry: %s", txt)
}
logger.Log.Warningf("Failed processing entry: %s", err)
continue
}
logger.Log.Debugf("Handled entry %s as opId: %s", entry.Request.URL, opId) // TODO: set opId back to entry?
g.handleEntry(e)
}
}
}
func (g *defaultOasGenerator) handleEntry(mizuEntry *api.Entry) {
if mizuEntry.Protocol.Name == "http" {
entry, err := har.NewEntry(mizuEntry.Request, mizuEntry.Response, mizuEntry.StartTime, mizuEntry.ElapsedTime)
if err != nil {
logger.Log.Warningf("Failed to turn MizuEntry %d into HAR Entry: %s", mizuEntry.Id, err)
return
}
dest := mizuEntry.Destination.Name
if dest == "" {
dest = mizuEntry.Destination.IP + ":" + mizuEntry.Destination.Port
}
entryWSource := &EntryWithSource{
Entry: *entry,
Source: mizuEntry.Source.Name,
Destination: dest,
Id: mizuEntry.Id,
}
g.handleHARWithSource(entryWSource)
} else {
logger.Log.Debugf("OAS: Unsupported protocol in entry %d: %s", mizuEntry.Id, mizuEntry.Protocol.Name)
}
}
func (g *defaultOasGenerator) handleHARWithSource(entryWSource *EntryWithSource) {
entry := entryWSource.Entry
gen := g.getGen(entryWSource.Destination, entry.Request.URL)
opId, err := gen.feedEntry(entryWSource)
if err != nil {
txt, suberr := json.Marshal(entry)
if suberr == nil {
logger.Log.Debugf("Problematic entry: %s", txt)
}
logger.Log.Warningf("Failed processing entry %d: %s", entryWSource.Id, err)
return
}
logger.Log.Debugf("Handled entry %d as opId: %s", entryWSource.Id, opId) // TODO: set opId back to entry?
}
func (g *defaultOasGenerator) getGen(dest string, urlStr string) *SpecGen {
u, err := url.Parse(urlStr)
if err != nil {
logger.Log.Errorf("Failed to parse entry URL: %v, err: %v", urlStr, err)
}
val, found := g.serviceSpecs.Load(dest)
var gen *SpecGen
if !found {
gen = NewGen(u.Scheme + "://" + dest)
g.serviceSpecs.Store(dest, gen)
} else {
gen = val.(*SpecGen)
}
return gen
}
func (g *defaultOasGenerator) Reset() {
g.serviceSpecs = &sync.Map{}
}
func (g *defaultOasGenerator) PushEntry(entryWithSource *EntryWithSource) {
if !g.started {
return
}
select {
case g.entriesChan <- *entryWithSource:
default:
logger.Log.Warningf("OAS Generator - entry wasn't sent to channel because the channel has no buffer or there is no receiver")
}
}
func (g *defaultOasGenerator) GetServiceSpecs() *sync.Map {
return g.serviceSpecs
}
func NewDefaultOasGenerator() *defaultOasGenerator {
func NewDefaultOasGenerator(c *basenine.Connection) *defaultOasGenerator {
return &defaultOasGenerator{
started: false,
ctx: nil,
cancel: nil,
serviceSpecs: nil,
entriesChan: nil,
dbConn: c,
}
}
type EntryWithSource struct {
Source string
Destination string
Entry har.Entry
Id uint
}

View File

@@ -0,0 +1,36 @@
package oas
import (
"encoding/json"
"github.com/up9inc/mizu/agent/pkg/har"
"sync"
"testing"
)
func TestOASGen(t *testing.T) {
gen := new(defaultOasGenerator)
gen.serviceSpecs = &sync.Map{}
e := new(har.Entry)
err := json.Unmarshal([]byte(`{"startedDateTime": "20000101","request": {"url": "https://host/path", "method": "GET"}, "response": {"status": 200}}`), e)
if err != nil {
panic(err)
}
ews := &EntryWithSource{
Destination: "some",
Entry: *e,
}
gen.handleHARWithSource(ews)
g, ok := gen.serviceSpecs.Load("some")
if !ok {
panic("Failed")
}
sg := g.(*SpecGen)
spec, err := sg.GetSpec()
if err != nil {
panic(err)
}
specText, _ := json.Marshal(spec)
t.Log(string(specText))
}

View File

@@ -28,6 +28,13 @@ const CountersTotal = "x-counters-total"
const CountersPerSource = "x-counters-per-source"
const SampleId = "x-sample-entry"
type EntryWithSource struct {
Source string
Destination string
Entry har.Entry
Id uint
}
type reqResp struct { // hello, generics in Go
Req *har.Request
Resp *har.Response
@@ -60,7 +67,7 @@ func (g *SpecGen) StartFromSpec(oas *openapi.OpenAPI) {
g.tree = new(Node)
for pathStr, pathObj := range oas.Paths.Items {
pathSplit := strings.Split(string(pathStr), "/")
g.tree.getOrSet(pathSplit, pathObj)
g.tree.getOrSet(pathSplit, pathObj, 0)
// clean "last entry timestamp" markers from the past
for _, pathAndOp := range g.tree.listOps() {
@@ -69,11 +76,11 @@ func (g *SpecGen) StartFromSpec(oas *openapi.OpenAPI) {
}
}
func (g *SpecGen) feedEntry(entryWithSource EntryWithSource) (string, error) {
func (g *SpecGen) feedEntry(entryWithSource *EntryWithSource) (string, error) {
g.lock.Lock()
defer g.lock.Unlock()
opId, err := g.handlePathObj(&entryWithSource)
opId, err := g.handlePathObj(entryWithSource)
if err != nil {
return "", err
}
@@ -219,7 +226,7 @@ func (g *SpecGen) handlePathObj(entryWithSource *EntryWithSource) (string, error
} else {
split = strings.Split(urlParsed.Path, "/")
}
node := g.tree.getOrSet(split, new(openapi.PathObj))
node := g.tree.getOrSet(split, new(openapi.PathObj), entryWithSource.Id)
opObj, err := handleOpObj(entryWithSource, node.pathObj)
if opObj != nil {
@@ -242,12 +249,12 @@ func handleOpObj(entryWithSource *EntryWithSource, pathObj *openapi.PathObj) (*o
return nil, nil
}
err = handleRequest(&entry.Request, opObj, isSuccess)
err = handleRequest(&entry.Request, opObj, isSuccess, entryWithSource.Id)
if err != nil {
return nil, err
}
err = handleResponse(&entry.Response, opObj, isSuccess)
err = handleResponse(&entry.Response, opObj, isSuccess, entryWithSource.Id)
if err != nil {
return nil, err
}
@@ -257,6 +264,8 @@ func handleOpObj(entryWithSource *EntryWithSource, pathObj *openapi.PathObj) (*o
return nil, err
}
setSampleID(&opObj.Extensions, entryWithSource.Id)
return opObj, nil
}
@@ -329,15 +338,10 @@ func handleCounters(opObj *openapi.Operation, success bool, entryWithSource *Ent
return err
}
err = opObj.Extensions.SetExtension(SampleId, entryWithSource.Id)
if err != nil {
return err
}
return nil
}
func handleRequest(req *har.Request, opObj *openapi.Operation, isSuccess bool) error {
func handleRequest(req *har.Request, opObj *openapi.Operation, isSuccess bool, sampleId uint) error {
// TODO: we don't handle the situation when header/qstr param can be defined on pathObj level. Also the path param defined on opObj
urlParsed, err := url.Parse(req.URL)
if err != nil {
@@ -361,7 +365,7 @@ func handleRequest(req *har.Request, opObj *openapi.Operation, isSuccess bool) e
IsIgnored: func(name string) bool { return false },
GeneralizeName: func(name string) string { return name },
}
handleNameVals(qstrGW, &opObj.Parameters, false)
handleNameVals(qstrGW, &opObj.Parameters, false, sampleId)
hdrGW := nvParams{
In: openapi.InHeader,
@@ -369,7 +373,7 @@ func handleRequest(req *har.Request, opObj *openapi.Operation, isSuccess bool) e
IsIgnored: isHeaderIgnored,
GeneralizeName: strings.ToLower,
}
handleNameVals(hdrGW, &opObj.Parameters, true)
handleNameVals(hdrGW, &opObj.Parameters, true, sampleId)
if isSuccess {
reqBody, err := getRequestBody(req, opObj)
@@ -378,12 +382,14 @@ func handleRequest(req *har.Request, opObj *openapi.Operation, isSuccess bool) e
}
if reqBody != nil {
setSampleID(&reqBody.Extensions, sampleId)
if req.PostData.Text == "" {
reqBody.Required = false
} else {
reqCtype, _ := getReqCtype(req)
reqMedia, err := fillContent(reqResp{Req: req}, reqBody.Content, reqCtype)
reqMedia, err := fillContent(reqResp{Req: req}, reqBody.Content, reqCtype, sampleId)
if err != nil {
return err
}
@@ -395,18 +401,20 @@ func handleRequest(req *har.Request, opObj *openapi.Operation, isSuccess bool) e
return nil
}
func handleResponse(resp *har.Response, opObj *openapi.Operation, isSuccess bool) error {
func handleResponse(resp *har.Response, opObj *openapi.Operation, isSuccess bool, sampleId uint) error {
// TODO: we don't support "default" response
respObj, err := getResponseObj(resp, opObj, isSuccess)
if err != nil {
return err
}
handleRespHeaders(resp.Headers, respObj)
setSampleID(&respObj.Extensions, sampleId)
handleRespHeaders(resp.Headers, respObj, sampleId)
respCtype := getRespCtype(resp)
respContent := respObj.Content
respMedia, err := fillContent(reqResp{Resp: resp}, respContent, respCtype)
respMedia, err := fillContent(reqResp{Resp: resp}, respContent, respCtype, sampleId)
if err != nil {
return err
}
@@ -414,7 +422,7 @@ func handleResponse(resp *har.Response, opObj *openapi.Operation, isSuccess bool
return nil
}
func handleRespHeaders(reqHeaders []har.Header, respObj *openapi.ResponseObj) {
func handleRespHeaders(reqHeaders []har.Header, respObj *openapi.ResponseObj, sampleId uint) {
visited := map[string]*openapi.HeaderObj{}
for _, pair := range reqHeaders {
if isHeaderIgnored(pair.Name) {
@@ -436,6 +444,8 @@ func handleRespHeaders(reqHeaders []har.Header, respObj *openapi.ResponseObj) {
logger.Log.Warningf("Failed to add example to a parameter: %s", err)
}
visited[nameGeneral] = param
setSampleID(&param.Extensions, sampleId)
}
// maintain "required" flag
@@ -456,13 +466,15 @@ func handleRespHeaders(reqHeaders []har.Header, respObj *openapi.ResponseObj) {
}
}
func fillContent(reqResp reqResp, respContent openapi.Content, ctype string) (*openapi.MediaType, error) {
func fillContent(reqResp reqResp, respContent openapi.Content, ctype string, sampleId uint) (*openapi.MediaType, error) {
content, found := respContent[ctype]
if !found {
respContent[ctype] = &openapi.MediaType{}
content = respContent[ctype]
}
setSampleID(&content.Extensions, sampleId)
var text string
var isBinary bool
if reqResp.Req != nil {
@@ -474,10 +486,10 @@ func fillContent(reqResp reqResp, respContent openapi.Content, ctype string) (*o
if !isBinary && text != "" {
var exampleMsg []byte
// try treating it as json
any, isJSON := anyJSON(text)
anyVal, isJSON := anyJSON(text)
if isJSON {
// re-marshal with forced indent
if msg, err := json.MarshalIndent(any, "", "\t"); err != nil {
if msg, err := json.MarshalIndent(anyVal, "", "\t"); err != nil {
panic("Failed to re-marshal value, super-strange")
} else {
exampleMsg = msg

View File

@@ -6,6 +6,7 @@ import (
"os"
"regexp"
"strings"
"sync"
"testing"
"time"
@@ -19,7 +20,7 @@ import (
// if started via env, write file into subdir
func outputSpec(label string, spec *openapi.OpenAPI, t *testing.T) string {
content, err := json.MarshalIndent(spec, "", "\t")
content, err := json.MarshalIndent(spec, "", " ")
if err != nil {
panic(err)
}
@@ -48,14 +49,16 @@ func TestEntries(t *testing.T) {
t.Log(err)
t.FailNow()
}
GetDefaultOasGeneratorInstance().Start()
loadStartingOAS("test_artifacts/catalogue.json", "catalogue")
loadStartingOAS("test_artifacts/trcc.json", "trcc-api-service")
gen := NewDefaultOasGenerator(nil)
gen.serviceSpecs = new(sync.Map)
loadStartingOAS("test_artifacts/catalogue.json", "catalogue", gen.serviceSpecs)
loadStartingOAS("test_artifacts/trcc.json", "trcc-api-service", gen.serviceSpecs)
go func() {
for {
time.Sleep(1 * time.Second)
GetDefaultOasGeneratorInstance().GetServiceSpecs().Range(func(key, val interface{}) bool {
gen.serviceSpecs.Range(func(key, val interface{}) bool {
svc := key.(string)
t.Logf("Getting spec for %s", svc)
gen := val.(*SpecGen)
@@ -68,16 +71,14 @@ func TestEntries(t *testing.T) {
}
}()
cnt, err := feedEntries(files, true)
cnt, err := feedEntries(files, true, gen)
if err != nil {
t.Log(err)
t.Fail()
}
waitQueueProcessed()
svcs := strings.Builder{}
GetDefaultOasGeneratorInstance().GetServiceSpecs().Range(func(key, val interface{}) bool {
gen.serviceSpecs.Range(func(key, val interface{}) bool {
gen := val.(*SpecGen)
svc := key.(string)
svcs.WriteString(svc + ",")
@@ -99,7 +100,7 @@ func TestEntries(t *testing.T) {
return true
})
GetDefaultOasGeneratorInstance().GetServiceSpecs().Range(func(key, val interface{}) bool {
gen.serviceSpecs.Range(func(key, val interface{}) bool {
svc := key.(string)
gen := val.(*SpecGen)
spec, err := gen.GetSpec()
@@ -123,20 +124,18 @@ func TestEntries(t *testing.T) {
}
func TestFileSingle(t *testing.T) {
GetDefaultOasGeneratorInstance().Start()
GetDefaultOasGeneratorInstance().Reset()
gen := NewDefaultOasGenerator(nil)
gen.serviceSpecs = new(sync.Map)
// loadStartingOAS()
file := "test_artifacts/params.har"
files := []string{file}
cnt, err := feedEntries(files, true)
cnt, err := feedEntries(files, true, gen)
if err != nil {
logger.Log.Warning("Failed processing file: " + err.Error())
t.Fail()
}
waitQueueProcessed()
GetDefaultOasGeneratorInstance().GetServiceSpecs().Range(func(key, val interface{}) bool {
gen.serviceSpecs.Range(func(key, val interface{}) bool {
svc := key.(string)
gen := val.(*SpecGen)
spec, err := gen.GetSpec()
@@ -189,18 +188,7 @@ func TestFileSingle(t *testing.T) {
logger.Log.Infof("Processed entries: %d", cnt)
}
func waitQueueProcessed() {
for {
time.Sleep(100 * time.Millisecond)
queue := len(GetDefaultOasGeneratorInstance().entriesChan)
logger.Log.Infof("Queue: %d", queue)
if queue < 1 {
break
}
}
}
func loadStartingOAS(file string, label string) {
func loadStartingOAS(file string, label string, specs *sync.Map) {
fd, err := os.Open(file)
if err != nil {
panic(err)
@@ -222,12 +210,14 @@ func loadStartingOAS(file string, label string) {
gen := NewGen(label)
gen.StartFromSpec(doc)
GetDefaultOasGeneratorInstance().GetServiceSpecs().Store(label, gen)
specs.Store(label, gen)
}
func TestEntriesNegative(t *testing.T) {
gen := NewDefaultOasGenerator(nil)
gen.serviceSpecs = new(sync.Map)
files := []string{"invalid"}
_, err := feedEntries(files, false)
_, err := feedEntries(files, false, gen)
if err == nil {
t.Logf("Should have failed")
t.Fail()
@@ -235,8 +225,10 @@ func TestEntriesNegative(t *testing.T) {
}
func TestEntriesPositive(t *testing.T) {
gen := NewDefaultOasGenerator(nil)
gen.serviceSpecs = new(sync.Map)
files := []string{"test_artifacts/params.har"}
_, err := feedEntries(files, false)
_, err := feedEntries(files, false, gen)
if err != nil {
t.Logf("Failed")
t.Fail()

View File

@@ -21,9 +21,11 @@
"description": "Successful call with status 200",
"content": {
"application/json": {
"example": null
"example": null,
"x-sample-entry": 4
}
}
},
"x-sample-entry": 4
}
},
"x-counters-per-source": {
@@ -45,7 +47,7 @@
"sumDuration": 0
},
"x-last-seen-ts": 1567750580.04,
"x-sample-entry": 0
"x-sample-entry": 4
}
},
"/appears-twice": {
@@ -58,9 +60,11 @@
"description": "Successful call with status 200",
"content": {
"application/json": {
"example": null
"example": null,
"x-sample-entry": 6
}
}
},
"x-sample-entry": 6
}
},
"x-counters-per-source": {
@@ -82,7 +86,7 @@
"sumDuration": 1
},
"x-last-seen-ts": 1567750581.74,
"x-sample-entry": 0
"x-sample-entry": 6
}
},
"/body-optional": {
@@ -94,8 +98,11 @@
"200": {
"description": "Successful call with status 200",
"content": {
"": {}
}
"": {
"x-sample-entry": 12
}
},
"x-sample-entry": 12
}
},
"x-counters-per-source": {
@@ -117,14 +124,16 @@
"sumDuration": 0.01
},
"x-last-seen-ts": 1567750581.75,
"x-sample-entry": 0,
"x-sample-entry": 12,
"requestBody": {
"description": "Generic request body",
"content": {
"application/json": {
"example": "{\"key\", \"val\"}"
"example": "{\"key\", \"val\"}",
"x-sample-entry": 11
}
}
},
"x-sample-entry": 12
}
}
},
@@ -137,8 +146,11 @@
"200": {
"description": "Successful call with status 200",
"content": {
"": {}
}
"": {
"x-sample-entry": 13
}
},
"x-sample-entry": 13
}
},
"x-counters-per-source": {
@@ -160,15 +172,17 @@
"sumDuration": 0
},
"x-last-seen-ts": 1567750581.75,
"x-sample-entry": 0,
"x-sample-entry": 13,
"requestBody": {
"description": "Generic request body",
"content": {
"": {
"example": "body exists"
"example": "body exists",
"x-sample-entry": 13
}
},
"required": true
"required": true,
"x-sample-entry": 13
}
}
},
@@ -182,9 +196,11 @@
"description": "Successful call with status 200",
"content": {
"": {
"example": {}
"example": {},
"x-sample-entry": 9
}
}
},
"x-sample-entry": 9
}
},
"x-counters-per-source": {
@@ -206,7 +222,7 @@
"sumDuration": 0
},
"x-last-seen-ts": 1567750582.74,
"x-sample-entry": 0,
"x-sample-entry": 9,
"requestBody": {
"description": "Generic request body",
"content": {
@@ -233,10 +249,12 @@
}
}
},
"example": "--BOUNDARY\r\nContent-Disposition: form-data; name=\"file\"; filename=\"metadata.json\"\r\nContent-Type: application/json\r\n\r\n{\"functions\": 123}\r\n--BOUNDARY\r\nContent-Disposition: form-data; name=\"path\"\r\n\r\n/content/components\r\n--BOUNDARY--\r\n"
"example": "--BOUNDARY\r\nContent-Disposition: form-data; name=\"file\"; filename=\"metadata.json\"\r\nContent-Type: application/json\r\n\r\n{\"functions\": 123}\r\n--BOUNDARY\r\nContent-Disposition: form-data; name=\"path\"\r\n\r\n/content/components\r\n--BOUNDARY--\r\n",
"x-sample-entry": 9
}
},
"required": true
"required": true,
"x-sample-entry": 9
}
}
},
@@ -249,8 +267,11 @@
"200": {
"description": "Successful call with status 200",
"content": {
"": {}
}
"": {
"x-sample-entry": 8
}
},
"x-sample-entry": 8
}
},
"x-counters-per-source": {
@@ -272,7 +293,7 @@
"sumDuration": 1
},
"x-last-seen-ts": 1567750581.74,
"x-sample-entry": 0,
"x-sample-entry": 8,
"requestBody": {
"description": "Generic request body",
"content": {
@@ -312,10 +333,12 @@
}
}
},
"example": "agent-id=ade\u0026callback-url=\u0026token=sometoken"
"example": "agent-id=ade\u0026callback-url=\u0026token=sometoken",
"x-sample-entry": 8
}
},
"required": true
"required": true,
"x-sample-entry": 8
}
}
},
@@ -331,8 +354,11 @@
"200": {
"description": "Successful call with status 200",
"content": {
"": {}
}
"": {
"x-sample-entry": 14
}
},
"x-sample-entry": 14
}
},
"x-counters-per-source": {
@@ -354,7 +380,7 @@
"sumDuration": 0
},
"x-last-seen-ts": 1567750582,
"x-sample-entry": 0
"x-sample-entry": 14
},
"parameters": [
{
@@ -369,7 +395,8 @@
"example #0": {
"value": "234324"
}
}
},
"x-sample-entry": 14
}
]
},
@@ -385,8 +412,11 @@
"200": {
"description": "Successful call with status 200",
"content": {
"": {}
}
"": {
"x-sample-entry": 18
}
},
"x-sample-entry": 18
}
},
"x-counters-per-source": {
@@ -408,7 +438,7 @@
"sumDuration": 9.53e-7
},
"x-last-seen-ts": 1567750582.00,
"x-sample-entry": 0
"x-sample-entry": 18
},
"parameters": [
{
@@ -436,7 +466,8 @@
"example #4": {
"value": "prefix-gibberish-afterwards"
}
}
},
"x-sample-entry": 19
}
]
},
@@ -452,8 +483,11 @@
"200": {
"description": "Successful call with status 200",
"content": {
"": {}
}
"": {
"x-sample-entry": 15
}
},
"x-sample-entry": 15
}
},
"x-counters-per-source": {
@@ -475,7 +509,7 @@
"sumDuration": 0
},
"x-last-seen-ts": 1567750582.00,
"x-sample-entry": 0
"x-sample-entry": 15
},
"parameters": [
{
@@ -503,7 +537,8 @@
"example #4": {
"value": "prefix-gibberish-afterwards"
}
}
},
"x-sample-entry": 19
}
]
},
@@ -519,8 +554,11 @@
"200": {
"description": "Successful call with status 200",
"content": {
"": {}
}
"": {
"x-sample-entry": 16
}
},
"x-sample-entry": 16
}
},
"x-counters-per-source": {
@@ -542,7 +580,7 @@
"sumDuration": 0
},
"x-last-seen-ts": 1567750582.00,
"x-sample-entry": 0
"x-sample-entry": 16
},
"parameters": [
{
@@ -570,7 +608,8 @@
"example #4": {
"value": "prefix-gibberish-afterwards"
}
}
},
"x-sample-entry": 19
}
]
},
@@ -586,8 +625,11 @@
"200": {
"description": "Successful call with status 200",
"content": {
"": {}
}
"": {
"x-sample-entry": 19
}
},
"x-sample-entry": 19
}
},
"x-counters-per-source": {
@@ -609,7 +651,7 @@
"sumDuration": 0
},
"x-last-seen-ts": 1567750582.00,
"x-sample-entry": 0
"x-sample-entry": 19
},
"parameters": [
{
@@ -624,7 +666,8 @@
"example #0": {
"value": "23421"
}
}
},
"x-sample-entry": 19
},
{
"name": "parampatternId",
@@ -651,7 +694,8 @@
"example #4": {
"value": "prefix-gibberish-afterwards"
}
}
},
"x-sample-entry": 19
}
]
},
@@ -665,9 +709,11 @@
"description": "Successful call with status 200",
"content": {
"application/json": {
"example": null
"example": null,
"x-sample-entry": 3
}
}
},
"x-sample-entry": 3
}
},
"x-counters-per-source": {
@@ -689,7 +735,7 @@
"sumDuration": 0
},
"x-last-seen-ts": 1567750579.74,
"x-sample-entry": 0
"x-sample-entry": 3
},
"parameters": [
{
@@ -707,7 +753,8 @@
"example #1": {
"value": "<UUID4>"
}
}
},
"x-sample-entry": 3
}
]
},
@@ -720,8 +767,11 @@
"200": {
"description": "Successful call with status 200",
"content": {
"text/html": {}
}
"text/html": {
"x-sample-entry": 1
}
},
"x-sample-entry": 1
}
},
"x-counters-per-source": {
@@ -743,7 +793,7 @@
"sumDuration": 0
},
"x-last-seen-ts": 1567750483.86,
"x-sample-entry": 0
"x-sample-entry": 1
},
"parameters": [
{
@@ -761,7 +811,8 @@
"example #1": {
"value": "<UUID4>"
}
}
},
"x-sample-entry": 3
}
]
},
@@ -775,9 +826,11 @@
"description": "Successful call with status 200",
"content": {
"application/json": {
"example": null
"example": null,
"x-sample-entry": 2
}
}
},
"x-sample-entry": 2
}
},
"x-counters-per-source": {
@@ -799,7 +852,7 @@
"sumDuration": 0
},
"x-last-seen-ts": 1567750578.74,
"x-sample-entry": 0
"x-sample-entry": 2
},
"parameters": [
{
@@ -817,7 +870,8 @@
"example #1": {
"value": "<UUID4>"
}
}
},
"x-sample-entry": 3
}
]
}

View File

@@ -20,7 +20,7 @@ type Node struct {
children []*Node
}
func (n *Node) getOrSet(path NodePath, existingPathObj *openapi.PathObj) (node *Node) {
func (n *Node) getOrSet(path NodePath, existingPathObj *openapi.PathObj, sampleId uint) (node *Node) {
if existingPathObj == nil {
panic("Invalid function call")
}
@@ -70,6 +70,10 @@ func (n *Node) getOrSet(path NodePath, existingPathObj *openapi.PathObj) (node *
}
}
if node.pathParam != nil {
setSampleID(&node.pathParam.Extensions, sampleId)
}
// add example if it's a gibberish chunk
if node.pathParam != nil && !chunkIsParam {
exmp := &node.pathParam.Examples
@@ -85,7 +89,7 @@ func (n *Node) getOrSet(path NodePath, existingPathObj *openapi.PathObj) (node *
// TODO: eat up trailing slash, in a smart way: node.pathObj!=nil && path[1]==""
if len(path) > 1 {
return node.getOrSet(path[1:], existingPathObj)
return node.getOrSet(path[1:], existingPathObj, sampleId)
} else if node.pathObj == nil {
node.pathObj = existingPathObj
}

View File

@@ -20,10 +20,10 @@ func TestTree(t *testing.T) {
}
tree := new(Node)
for _, tc := range testCases {
for i, tc := range testCases {
split := strings.Split(tc.inp, "/")
pathObj := new(openapi.PathObj)
node := tree.getOrSet(split, pathObj)
node := tree.getOrSet(split, pathObj, uint(i))
fillPathParams(node, pathObj)

View File

@@ -115,7 +115,7 @@ type nvParams struct {
GeneralizeName func(name string) string
}
func handleNameVals(gw nvParams, params **openapi.ParameterList, checkIgnore bool) {
func handleNameVals(gw nvParams, params **openapi.ParameterList, checkIgnore bool, sampleId uint) {
visited := map[string]*openapi.ParameterObj{}
for _, pair := range gw.Pairs {
if (checkIgnore && gw.IsIgnored(pair.Name)) || pair.Name == "" {
@@ -137,6 +137,8 @@ func handleNameVals(gw nvParams, params **openapi.ParameterList, checkIgnore boo
logger.Log.Warningf("Failed to add example to a parameter: %s", err)
}
visited[nameGeneral] = param
setSampleID(&param.Extensions, sampleId)
}
// maintain "required" flag
@@ -474,3 +476,15 @@ func intersectSliceWithMap(required []string, names map[string]struct{}) []strin
}
return required
}
func setSampleID(extensions *openapi.Extensions, id uint) {
if id > 0 {
if *extensions == nil {
*extensions = openapi.Extensions{}
}
err := (extensions).SetExtension(SampleId, id)
if err != nil {
logger.Log.Warningf("Failed to set sample ID: %s", err)
}
}
}

View File

@@ -112,7 +112,7 @@ func UpdateTapTargets(newTapTargets []v1.Pod) {
tapTargets = newTapTargets
packetSourceManager.UpdatePods(tapTargets)
packetSourceManager.UpdatePods(tapTargets, !*nodefrag, mainPacketInputChan)
if tlsTapperInstance != nil {
if err := tlstapper.UpdateTapTargets(tlsTapperInstance, &tapTargets, *procfs); err != nil {
@@ -198,12 +198,8 @@ func initializePacketSources() error {
}
var err error
if packetSourceManager, err = source.NewPacketSourceManager(*procfs, *fname, *iface, *servicemesh, tapTargets, behaviour); err != nil {
return err
} else {
packetSourceManager.ReadPackets(!*nodefrag, mainPacketInputChan)
return nil
}
packetSourceManager, err = source.NewPacketSourceManager(*procfs, *fname, *iface, *servicemesh, tapTargets, behaviour, !*nodefrag, mainPacketInputChan)
return err
}
func initializePassiveTapper(opts *TapOpts, outputItems chan *api.OutputChannelItem) (*tcpStreamMap, *tcpAssembler) {
@@ -259,9 +255,10 @@ func startPassiveTapper(streamsMap *tcpStreamMap, assembler *tcpAssembler) {
func startTlsTapper(extension *api.Extension, outputItems chan *api.OutputChannelItem, options *api.TrafficFilteringOptions) *tlstapper.TlsTapper {
tls := tlstapper.TlsTapper{}
tlsPerfBufferSize := os.Getpagesize() * 100
chunksBufferSize := os.Getpagesize() * 100
logBufferSize := os.Getpagesize()
if err := tls.Init(tlsPerfBufferSize, *procfs, extension); err != nil {
if err := tls.Init(chunksBufferSize, logBufferSize, *procfs, extension); err != nil {
tlstapper.LogError(err)
return nil
}
@@ -285,6 +282,7 @@ func startTlsTapper(extension *api.Extension, outputItems chan *api.OutputChanne
OutputChannel: outputItems,
}
go tls.PollForLogging()
go tls.Poll(emitter, options)
return &tls

View File

@@ -24,7 +24,7 @@ type PacketSourceManager struct {
}
func NewPacketSourceManager(procfs string, filename string, interfaceName string,
mtls bool, pods []v1.Pod, behaviour TcpPacketSourceBehaviour) (*PacketSourceManager, error) {
mtls bool, pods []v1.Pod, behaviour TcpPacketSourceBehaviour, ipdefrag bool, packets chan<- TcpPacketInfo) (*PacketSourceManager, error) {
hostSource, err := newHostPacketSource(filename, interfaceName, behaviour)
if err != nil {
return nil, err
@@ -43,7 +43,7 @@ func NewPacketSourceManager(procfs string, filename string, interfaceName string
behaviour: behaviour,
}
sourceManager.UpdatePods(pods)
go hostSource.readPackets(ipdefrag, packets)
return sourceManager, nil
}
@@ -64,16 +64,16 @@ func newHostPacketSource(filename string, interfaceName string,
return source, nil
}
func (m *PacketSourceManager) UpdatePods(pods []v1.Pod) {
func (m *PacketSourceManager) UpdatePods(pods []v1.Pod, ipdefrag bool, packets chan<- TcpPacketInfo) {
if m.config.mtls {
m.updateMtlsPods(m.config.procfs, pods, m.config.interfaceName, m.config.behaviour)
m.updateMtlsPods(m.config.procfs, pods, m.config.interfaceName, m.config.behaviour, ipdefrag, packets)
}
m.setBPFFilter(pods)
}
func (m *PacketSourceManager) updateMtlsPods(procfs string, pods []v1.Pod,
interfaceName string, behaviour TcpPacketSourceBehaviour) {
interfaceName string, behaviour TcpPacketSourceBehaviour, ipdefrag bool, packets chan<- TcpPacketInfo) {
relevantPids := m.getRelevantPids(procfs, pods)
logger.Log.Infof("Updating mtls pods (new: %v) (current: %v)", relevantPids, m.sources)
@@ -90,6 +90,7 @@ func (m *PacketSourceManager) updateMtlsPods(procfs string, pods []v1.Pod,
source, err := newNetnsPacketSource(procfs, pid, interfaceName, behaviour)
if err == nil {
go source.readPackets(ipdefrag, packets)
m.sources[pid] = source
}
}
@@ -153,12 +154,6 @@ func (m *PacketSourceManager) setBPFFilter(pods []v1.Pod) {
}
}
func (m *PacketSourceManager) ReadPackets(ipdefrag bool, packets chan<- TcpPacketInfo) {
for _, src := range m.sources {
go src.readPackets(ipdefrag, packets)
}
}
func (m *PacketSourceManager) Close() {
for _, src := range m.sources {
src.close()

View File

@@ -7,8 +7,12 @@ Copyright (C) UP9 Inc.
#include "include/headers.h"
#include "include/util.h"
#include "include/maps.h"
#include "include/log.h"
#include "include/logger_messages.h"
#include "include/pids.h"
#define IPV4_ADDR_LEN (16)
struct accept_info {
__u64* sockaddr;
__u32* addrlen;
@@ -41,9 +45,7 @@ void sys_enter_accept4(struct sys_enter_accept4_ctx *ctx) {
long err = bpf_map_update_elem(&accept_syscall_context, &id, &info, BPF_ANY);
if (err != 0) {
char msg[] = "Error putting accept info (id: %ld) (err: %ld)";
bpf_trace_printk(msg, sizeof(msg), id, err);
return;
log_error(ctx, LOG_ERROR_PUTTING_ACCEPT_INFO, id, err, 0l);
}
}
@@ -70,6 +72,7 @@ void sys_exit_accept4(struct sys_exit_accept4_ctx *ctx) {
struct accept_info *infoPtr = bpf_map_lookup_elem(&accept_syscall_context, &id);
if (infoPtr == NULL) {
log_error(ctx, LOG_ERROR_GETTING_ACCEPT_INFO, id, 0l, 0l);
return;
}
@@ -79,15 +82,14 @@ void sys_exit_accept4(struct sys_exit_accept4_ctx *ctx) {
bpf_map_delete_elem(&accept_syscall_context, &id);
if (err != 0) {
char msg[] = "Error reading accept info from accept syscall (id: %ld) (err: %ld)";
bpf_trace_printk(msg, sizeof(msg), id, err);
log_error(ctx, LOG_ERROR_READING_ACCEPT_INFO, id, err, 0l);
return;
}
__u32 addrlen;
bpf_probe_read(&addrlen, sizeof(__u32), info.addrlen);
if (addrlen != 16) {
if (addrlen != IPV4_ADDR_LEN) {
// Currently only ipv4 is supported linux-src/include/linux/inet.h
return;
}
@@ -105,9 +107,7 @@ void sys_exit_accept4(struct sys_exit_accept4_ctx *ctx) {
err = bpf_map_update_elem(&file_descriptor_to_ipv4, &key, &fdinfo, BPF_ANY);
if (err != 0) {
char msg[] = "Error putting fd to address mapping from accept (key: %ld) (err: %ld)";
bpf_trace_printk(msg, sizeof(msg), key, err);
return;
log_error(ctx, LOG_ERROR_PUTTING_FD_MAPPING, id, err, ORIGIN_SYS_EXIT_ACCEPT4_CODE);
}
}
@@ -145,9 +145,7 @@ void sys_enter_connect(struct sys_enter_connect_ctx *ctx) {
long err = bpf_map_update_elem(&connect_syscall_info, &id, &info, BPF_ANY);
if (err != 0) {
char msg[] = "Error putting connect info (id: %ld) (err: %ld)";
bpf_trace_printk(msg, sizeof(msg), id, err);
return;
log_error(ctx, LOG_ERROR_PUTTING_CONNECT_INFO, id, err, 0l);
}
}
@@ -176,6 +174,7 @@ void sys_exit_connect(struct sys_exit_connect_ctx *ctx) {
struct connect_info *infoPtr = bpf_map_lookup_elem(&connect_syscall_info, &id);
if (infoPtr == NULL) {
log_error(ctx, LOG_ERROR_GETTING_CONNECT_INFO, id, 0l, 0l);
return;
}
@@ -185,12 +184,11 @@ void sys_exit_connect(struct sys_exit_connect_ctx *ctx) {
bpf_map_delete_elem(&connect_syscall_info, &id);
if (err != 0) {
char msg[] = "Error reading connect info from connect syscall (id: %ld) (err: %ld)";
bpf_trace_printk(msg, sizeof(msg), id, err);
log_error(ctx, LOG_ERROR_READING_CONNECT_INFO, id, err, 0l);
return;
}
if (info.addrlen != 16) {
if (info.addrlen != IPV4_ADDR_LEN) {
// Currently only ipv4 is supported linux-src/include/linux/inet.h
return;
}
@@ -208,8 +206,6 @@ void sys_exit_connect(struct sys_exit_connect_ctx *ctx) {
err = bpf_map_update_elem(&file_descriptor_to_ipv4, &key, &fdinfo, BPF_ANY);
if (err != 0) {
char msg[] = "Error putting fd to address mapping from connect (key: %ld) (err: %ld)";
bpf_trace_printk(msg, sizeof(msg), key, err);
return;
log_error(ctx, LOG_ERROR_PUTTING_FD_MAPPING, id, err, ORIGIN_SYS_EXIT_CONNECT_CODE);
}
}

View File

@@ -7,6 +7,8 @@ Copyright (C) UP9 Inc.
#include "include/headers.h"
#include "include/util.h"
#include "include/maps.h"
#include "include/log.h"
#include "include/logger_messages.h"
#include "include/pids.h"
struct sys_enter_read_ctx {
@@ -36,8 +38,7 @@ void sys_enter_read(struct sys_enter_read_ctx *ctx) {
long err = bpf_probe_read(&info, sizeof(struct ssl_info), infoPtr);
if (err != 0) {
char msg[] = "Error reading read info from read syscall (id: %ld) (err: %ld)";
bpf_trace_printk(msg, sizeof(msg), id, err);
log_error(ctx, LOG_ERROR_READING_SSL_CONTEXT, id, err, ORIGIN_SYS_ENTER_READ_CODE);
return;
}
@@ -46,9 +47,7 @@ void sys_enter_read(struct sys_enter_read_ctx *ctx) {
err = bpf_map_update_elem(&ssl_read_context, &id, &info, BPF_ANY);
if (err != 0) {
char msg[] = "Error putting file descriptor from read syscall (id: %ld) (err: %ld)";
bpf_trace_printk(msg, sizeof(msg), id, err);
return;
log_error(ctx, LOG_ERROR_PUTTING_FILE_DESCRIPTOR, id, err, ORIGIN_SYS_ENTER_READ_CODE);
}
}
@@ -79,8 +78,7 @@ void sys_enter_write(struct sys_enter_write_ctx *ctx) {
long err = bpf_probe_read(&info, sizeof(struct ssl_info), infoPtr);
if (err != 0) {
char msg[] = "Error reading write context from write syscall (id: %ld) (err: %ld)";
bpf_trace_printk(msg, sizeof(msg), id, err);
log_error(ctx, LOG_ERROR_READING_SSL_CONTEXT, id, err, ORIGIN_SYS_ENTER_WRITE_CODE);
return;
}
@@ -89,8 +87,6 @@ void sys_enter_write(struct sys_enter_write_ctx *ctx) {
err = bpf_map_update_elem(&ssl_write_context, &id, &info, BPF_ANY);
if (err != 0) {
char msg[] = "Error putting file descriptor from write syscall (id: %ld) (err: %ld)";
bpf_trace_printk(msg, sizeof(msg), id, err);
return;
log_error(ctx, LOG_ERROR_PUTTING_FILE_DESCRIPTOR, id, err, ORIGIN_SYS_ENTER_WRITE_CODE);
}
}

View File

@@ -0,0 +1,79 @@
/*
Note: This file is licenced differently from the rest of the project
SPDX-License-Identifier: GPL-2.0
Copyright (C) UP9 Inc.
*/
#ifndef __LOG__
#define __LOG__
// The same consts defined in bpf_logger.go
//
#define LOG_LEVEL_ERROR (0)
#define LOG_LEVEL_INFO (1)
#define LOG_LEVEL_DEBUG (2)
// The same struct can be found in bpf_logger.go
//
// Be careful when editing, alignment and padding should be exactly the same in go/c.
//
struct log_message {
__u32 level;
__u32 message_code;
__u64 arg1;
__u64 arg2;
__u64 arg3;
};
static __always_inline void log_error(void* ctx, __u16 message_code, __u64 arg1, __u64 arg2, __u64 arg3) {
struct log_message entry = {};
entry.level = LOG_LEVEL_ERROR;
entry.message_code = message_code;
entry.arg1 = arg1;
entry.arg2 = arg2;
entry.arg3 = arg3;
long err = bpf_perf_event_output(ctx, &log_buffer, BPF_F_CURRENT_CPU, &entry, sizeof(struct log_message));
if (err != 0) {
char msg[] = "Error writing log error to perf buffer - %ld";
bpf_trace_printk(msg, sizeof(msg), err);
}
}
static __always_inline void log_info(void* ctx, __u16 message_code, __u64 arg1, __u64 arg2, __u64 arg3) {
struct log_message entry = {};
entry.level = LOG_LEVEL_INFO;
entry.message_code = message_code;
entry.arg1 = arg1;
entry.arg2 = arg2;
entry.arg3 = arg3;
long err = bpf_perf_event_output(ctx, &log_buffer, BPF_F_CURRENT_CPU, &entry, sizeof(struct log_message));
if (err != 0) {
char msg[] = "Error writing log info to perf buffer - %ld";
bpf_trace_printk(msg, sizeof(msg), arg1, err);
}
}
static __always_inline void log_debug(void* ctx, __u16 message_code, __u64 arg1, __u64 arg2, __u64 arg3) {
struct log_message entry = {};
entry.level = LOG_LEVEL_DEBUG;
entry.message_code = message_code;
entry.arg1 = arg1;
entry.arg2 = arg2;
entry.arg3 = arg3;
long err = bpf_perf_event_output(ctx, &log_buffer, BPF_F_CURRENT_CPU, &entry, sizeof(struct log_message));
if (err != 0) {
char msg[] = "Error writing log debug to perf buffer - %ld";
bpf_trace_printk(msg, sizeof(msg), arg1, err);
}
}
#endif /* __LOG__ */

View File

@@ -0,0 +1,42 @@
/*
Note: This file is licenced differently from the rest of the project
SPDX-License-Identifier: GPL-2.0
Copyright (C) UP9 Inc.
*/
#ifndef __LOG_MESSAGES__
#define __LOG_MESSAGES__
// Must be synced with bpf_logger_messages.go
//
#define LOG_ERROR_READING_BYTES_COUNT (0)
#define LOG_ERROR_READING_FD_ADDRESS (1)
#define LOG_ERROR_READING_FROM_SSL_BUFFER (2)
#define LOG_ERROR_BUFFER_TOO_BIG (3)
#define LOG_ERROR_ALLOCATING_CHUNK (4)
#define LOG_ERROR_READING_SSL_CONTEXT (5)
#define LOG_ERROR_PUTTING_SSL_CONTEXT (6)
#define LOG_ERROR_GETTING_SSL_CONTEXT (7)
#define LOG_ERROR_MISSING_FILE_DESCRIPTOR (8)
#define LOG_ERROR_PUTTING_FILE_DESCRIPTOR (9)
#define LOG_ERROR_PUTTING_ACCEPT_INFO (10)
#define LOG_ERROR_GETTING_ACCEPT_INFO (11)
#define LOG_ERROR_READING_ACCEPT_INFO (12)
#define LOG_ERROR_PUTTING_FD_MAPPING (13)
#define LOG_ERROR_PUTTING_CONNECT_INFO (14)
#define LOG_ERROR_GETTING_CONNECT_INFO (15)
#define LOG_ERROR_READING_CONNECT_INFO (16)
// Sometimes we have the same error, happening from different locations.
// in order to be able to distinct between them in the log, we add an
// extra number that identify the location. The number can be anything,
// but do not give the same number to different origins.
//
#define ORIGIN_SSL_UPROBE_CODE (0l)
#define ORIGIN_SSL_URETPROBE_CODE (1l)
#define ORIGIN_SYS_ENTER_READ_CODE (2l)
#define ORIGIN_SYS_ENTER_WRITE_CODE (3l)
#define ORIGIN_SYS_EXIT_ACCEPT4_CODE (4l)
#define ORIGIN_SYS_EXIT_CONNECT_CODE (5l)
#endif /* __LOG_MESSAGES__ */

View File

@@ -70,5 +70,6 @@ BPF_LRU_HASH(ssl_write_context, __u64, struct ssl_info);
BPF_LRU_HASH(ssl_read_context, __u64, struct ssl_info);
BPF_HASH(file_descriptor_to_ipv4, __u64, struct fd_info);
BPF_PERF_OUTPUT(chunks_buffer);
BPF_PERF_OUTPUT(log_buffer);
#endif /* __MAPS__ */

View File

@@ -7,6 +7,8 @@ Copyright (C) UP9 Inc.
#include "include/headers.h"
#include "include/util.h"
#include "include/maps.h"
#include "include/log.h"
#include "include/logger_messages.h"
#include "include/pids.h"
// Heap-like area for eBPF programs - stack size limited to 512 bytes, we must use maps for bigger (chunk) objects.
@@ -39,15 +41,14 @@ static __always_inline int get_count_bytes(struct pt_regs *ctx, struct ssl_info*
long err = bpf_probe_read(&countBytes, sizeof(size_t), (void*) info->count_ptr);
if (err != 0) {
char msg[] = "Error reading bytes count of _ex (id: %ld) (err: %ld)";
bpf_trace_printk(msg, sizeof(msg), id, err);
log_error(ctx, LOG_ERROR_READING_BYTES_COUNT, id, err, 0l);
return 0;
}
return countBytes;
}
static __always_inline void add_address_to_chunk(struct tlsChunk* chunk, __u64 id, __u32 fd) {
static __always_inline void add_address_to_chunk(struct pt_regs *ctx, struct tlsChunk* chunk, __u64 id, __u32 fd) {
__u32 pid = id >> 32;
__u64 key = (__u64) pid << 32 | fd;
@@ -61,8 +62,7 @@ static __always_inline void add_address_to_chunk(struct tlsChunk* chunk, __u64 i
chunk->flags |= (fdinfo->flags & FLAGS_IS_CLIENT_BIT);
if (err != 0) {
char msg[] = "Error reading from fd address %ld - %ld";
bpf_trace_printk(msg, sizeof(msg), id, err);
log_error(ctx, LOG_ERROR_READING_FD_ADDRESS, id, err, 0l);
}
}
@@ -88,8 +88,7 @@ static __always_inline void send_chunk_part(struct pt_regs *ctx, __u8* buffer, _
}
if (err != 0) {
char msg[] = "Error reading from ssl buffer %ld - %ld";
bpf_trace_printk(msg, sizeof(msg), id, err);
log_error(ctx, LOG_ERROR_READING_FROM_SSL_BUFFER, id, err, 0l);
return;
}
@@ -101,8 +100,9 @@ static __always_inline void send_chunk(struct pt_regs *ctx, __u8* buffer, __u64
//
// https://lwn.net/Articles/794934/
//
// If we want to compile in kernel older than 5.3, we should add "#pragma unroll" to this loop
// However we want to run in kernel older than 5.3, hence we use "#pragma unroll" anyway
//
#pragma unroll
for (int i = 0; i < MAX_CHUNKS_PER_OPERATION; i++) {
if (chunk->len <= (CHUNK_SIZE * i)) {
break;
@@ -120,8 +120,7 @@ static __always_inline void output_ssl_chunk(struct pt_regs *ctx, struct ssl_inf
}
if (countBytes > (CHUNK_SIZE * MAX_CHUNKS_PER_OPERATION)) {
char msg[] = "Buffer too big %d (id: %ld)";
bpf_trace_printk(msg, sizeof(msg), countBytes, id);
log_error(ctx, LOG_ERROR_BUFFER_TOO_BIG, id, countBytes, 0l);
return;
}
@@ -134,8 +133,7 @@ static __always_inline void output_ssl_chunk(struct pt_regs *ctx, struct ssl_inf
chunk = bpf_map_lookup_elem(&heap, &zero);
if (!chunk) {
char msg[] = "Unable to allocate chunk (id: %ld)";
bpf_trace_printk(msg, sizeof(msg), id);
log_error(ctx, LOG_ERROR_ALLOCATING_CHUNK, id, 0l, 0l);
return;
}
@@ -145,11 +143,11 @@ static __always_inline void output_ssl_chunk(struct pt_regs *ctx, struct ssl_inf
chunk->len = countBytes;
chunk->fd = info->fd;
add_address_to_chunk(chunk, id, chunk->fd);
add_address_to_chunk(ctx, chunk, id, chunk->fd);
send_chunk(ctx, info->buffer, id, chunk);
}
static __always_inline void ssl_uprobe(void* ssl, void* buffer, int num, struct bpf_map_def* map_fd, size_t *count_ptr) {
static __always_inline void ssl_uprobe(struct pt_regs *ctx, void* ssl, void* buffer, int num, struct bpf_map_def* map_fd, size_t *count_ptr) {
__u64 id = bpf_get_current_pid_tgid();
if (!should_tap(id >> 32)) {
@@ -166,8 +164,7 @@ static __always_inline void ssl_uprobe(void* ssl, void* buffer, int num, struct
long err = bpf_probe_read(&info, sizeof(struct ssl_info), infoPtr);
if (err != 0) {
char msg[] = "Error reading old ssl context (id: %ld) (err: %ld)";
bpf_trace_printk(msg, sizeof(msg), id, err);
log_error(ctx, LOG_ERROR_READING_SSL_CONTEXT, id, err, ORIGIN_SSL_UPROBE_CODE);
}
if ((bpf_ktime_get_ns() - info.created_at_nano) > SSL_INFO_MAX_TTL_NANO) {
@@ -184,8 +181,7 @@ static __always_inline void ssl_uprobe(void* ssl, void* buffer, int num, struct
long err = bpf_map_update_elem(map_fd, &id, &info, BPF_ANY);
if (err != 0) {
char msg[] = "Error putting ssl context (id: %ld) (err: %ld)";
bpf_trace_printk(msg, sizeof(msg), id, err);
log_error(ctx, LOG_ERROR_PUTTING_SSL_CONTEXT, id, err, 0l);
}
}
@@ -199,8 +195,7 @@ static __always_inline void ssl_uretprobe(struct pt_regs *ctx, struct bpf_map_de
struct ssl_info *infoPtr = bpf_map_lookup_elem(map_fd, &id);
if (infoPtr == NULL) {
char msg[] = "Error getting ssl context info (id: %ld)";
bpf_trace_printk(msg, sizeof(msg), id);
log_error(ctx, LOG_ERROR_GETTING_SSL_CONTEXT, id, 0l, 0l);
return;
}
@@ -220,14 +215,12 @@ static __always_inline void ssl_uretprobe(struct pt_regs *ctx, struct bpf_map_de
// bpf_map_delete_elem(map_fd, &id);
if (err != 0) {
char msg[] = "Error reading ssl context (id: %ld) (err: %ld)";
bpf_trace_printk(msg, sizeof(msg), id, err);
log_error(ctx, LOG_ERROR_READING_SSL_CONTEXT, id, err, ORIGIN_SSL_URETPROBE_CODE);
return;
}
if (info.fd == -1) {
char msg[] = "File descriptor is missing from ssl info (id: %ld)";
bpf_trace_printk(msg, sizeof(msg), id);
log_error(ctx, LOG_ERROR_MISSING_FILE_DESCRIPTOR, id, 0l, 0l);
return;
}
@@ -236,7 +229,7 @@ static __always_inline void ssl_uretprobe(struct pt_regs *ctx, struct bpf_map_de
SEC("uprobe/ssl_write")
void BPF_KPROBE(ssl_write, void* ssl, void* buffer, int num) {
ssl_uprobe(ssl, buffer, num, &ssl_write_context, 0);
ssl_uprobe(ctx, ssl, buffer, num, &ssl_write_context, 0);
}
SEC("uretprobe/ssl_write")
@@ -246,7 +239,7 @@ void BPF_KPROBE(ssl_ret_write) {
SEC("uprobe/ssl_read")
void BPF_KPROBE(ssl_read, void* ssl, void* buffer, int num) {
ssl_uprobe(ssl, buffer, num, &ssl_read_context, 0);
ssl_uprobe(ctx, ssl, buffer, num, &ssl_read_context, 0);
}
SEC("uretprobe/ssl_read")
@@ -256,7 +249,7 @@ void BPF_KPROBE(ssl_ret_read) {
SEC("uprobe/ssl_write_ex")
void BPF_KPROBE(ssl_write_ex, void* ssl, void* buffer, size_t num, size_t *written) {
ssl_uprobe(ssl, buffer, num, &ssl_write_context, written);
ssl_uprobe(ctx, ssl, buffer, num, &ssl_write_context, written);
}
SEC("uretprobe/ssl_write_ex")
@@ -266,7 +259,7 @@ void BPF_KPROBE(ssl_ret_write_ex) {
SEC("uprobe/ssl_read_ex")
void BPF_KPROBE(ssl_read_ex, void* ssl, void* buffer, size_t num, size_t *readbytes) {
ssl_uprobe(ssl, buffer, num, &ssl_read_context, readbytes);
ssl_uprobe(ctx, ssl, buffer, num, &ssl_read_context, readbytes);
}
SEC("uretprobe/ssl_read_ex")

View File

@@ -7,6 +7,8 @@ Copyright (C) UP9 Inc.
#include "include/headers.h"
#include "include/util.h"
#include "include/maps.h"
#include "include/log.h"
#include "include/logger_messages.h"
#include "include/pids.h"
// To avoid multiple .o files

116
tap/tlstapper/bpf_logger.go Normal file
View File

@@ -0,0 +1,116 @@
package tlstapper
import (
"bytes"
"encoding/binary"
"strings"
"github.com/cilium/ebpf/perf"
"github.com/go-errors/errors"
"github.com/up9inc/mizu/shared/logger"
)
const logPrefix = "[bpf] "
// The same consts defined in log.h
//
const logLevelError = 0
const logLevelInfo = 1
const logLevelDebug = 2
type logMessage struct {
Level uint32
MessageCode uint32
Arg1 uint64
Arg2 uint64
Arg3 uint64
}
type bpfLogger struct {
logReader *perf.Reader
}
func newBpfLogger() *bpfLogger {
return &bpfLogger{
logReader: nil,
}
}
func (p *bpfLogger) init(bpfObjects *tlsTapperObjects, bufferSize int) error {
var err error
p.logReader, err = perf.NewReader(bpfObjects.LogBuffer, bufferSize)
if err != nil {
return errors.Wrap(err, 0)
}
return nil
}
func (p *bpfLogger) close() error {
return p.logReader.Close()
}
func (p *bpfLogger) poll() {
logger.Log.Infof("Start polling for bpf logs")
for {
record, err := p.logReader.Read()
if err != nil {
if errors.Is(err, perf.ErrClosed) {
return
}
LogError(errors.Errorf("Error reading from bpf logger perf buffer, aboring logger! %w", err))
return
}
if record.LostSamples != 0 {
logger.Log.Infof("Log buffer is full, dropped %d logs", record.LostSamples)
continue
}
buffer := bytes.NewReader(record.RawSample)
var log logMessage
if err := binary.Read(buffer, binary.LittleEndian, &log); err != nil {
LogError(errors.Errorf("Error parsing log %v", err))
continue
}
p.log(&log)
}
}
func (p *bpfLogger) log(log *logMessage) {
if int(log.MessageCode) >= len(bpfLogMessages) {
logger.Log.Errorf("Unknown message code from bpf logger %d", log.MessageCode)
return
}
format := bpfLogMessages[log.MessageCode]
tokensCount := strings.Count(format, "%")
if tokensCount == 0 {
p.logLevel(log.Level, format)
} else if tokensCount == 1 {
p.logLevel(log.Level, format, log.Arg1)
} else if tokensCount == 2 {
p.logLevel(log.Level, format, log.Arg1, log.Arg2)
} else if tokensCount == 3 {
p.logLevel(log.Level, format, log.Arg1, log.Arg2, log.Arg3)
}
}
func (p *bpfLogger) logLevel(level uint32, format string, args ...interface{}) {
if level == logLevelError {
logger.Log.Errorf(logPrefix+format, args...)
} else if level == logLevelInfo {
logger.Log.Infof(logPrefix+format, args...)
} else if level == logLevelDebug {
logger.Log.Debugf(logPrefix+format, args...)
}
}

View File

@@ -0,0 +1,25 @@
package tlstapper
// Must be synced with logger_messages.h
//
var bpfLogMessages = []string {
/*0000*/ "[%d] Unable to read bytes count from _ex methods [err: %d]",
/*0001*/ "[%d] Unable to read ipv4 address [err: %d]",
/*0002*/ "[%d] Unable to read ssl buffer [err: %d]",
/*0003*/ "[%d] Buffer is too big [size: %d]",
/*0004*/ "[%d] Unable to allocate chunk in bpf heap",
/*0005*/ "[%d] Unable to read ssl context [err: %d] [origin: %d]",
/*0006*/ "[%d] Unable to put ssl context [err: %d]",
/*0007*/ "[%d] Unable to get ssl context",
/*0008*/ "[%d] File descriptor is missing for tls chunk",
/*0009*/ "[%d] Unable to put file descriptor [err: %d] [origin: %d]",
/*0010*/ "[%d] Unable to put accept info [err: %d]",
/*0011*/ "[%d] Unable to get accept info",
/*0012*/ "[%d] Unable to read accept info [err: %d]",
/*0013*/ "[%d] Unable to put file descriptor to address mapping [err: %d] [origin: %d]",
/*0014*/ "[%d] Unable to put connect info [err: %d]",
/*0015*/ "[%d] Unable to get connect info",
/*0016*/ "[%d] Unable to read connect info [err: %d]",
}

View File

@@ -8,6 +8,8 @@ import (
"sync"
)
const GLOABL_TAP_PID = 0
//go:generate go run github.com/cilium/ebpf/cmd/bpf2go tlsTapper bpf/tls_tapper.c -- -O2 -g -D__TARGET_ARCH_x86
type TlsTapper struct {
@@ -15,11 +17,12 @@ type TlsTapper struct {
syscallHooks syscallHooks
sslHooksStructs []sslHooks
poller *tlsPoller
bpfLogger *bpfLogger
registeredPids sync.Map
}
func (t *TlsTapper) Init(bufferSize int, procfs string, extension *api.Extension) error {
logger.Log.Infof("Initializing tls tapper (bufferSize: %v)", bufferSize)
func (t *TlsTapper) Init(chunksBufferSize int, logBufferSize int, procfs string, extension *api.Extension) error {
logger.Log.Infof("Initializing tls tapper (chunksSize: %d) (logSize: %d)", chunksBufferSize, logBufferSize)
if err := setupRLimit(); err != nil {
return err
@@ -37,16 +40,25 @@ func (t *TlsTapper) Init(bufferSize int, procfs string, extension *api.Extension
t.sslHooksStructs = make([]sslHooks, 0)
t.bpfLogger = newBpfLogger()
if err := t.bpfLogger.init(&t.bpfObjects, logBufferSize); err != nil {
return err
}
t.poller = newTlsPoller(t, extension, procfs)
return t.poller.init(&t.bpfObjects, bufferSize)
return t.poller.init(&t.bpfObjects, chunksBufferSize)
}
func (t *TlsTapper) Poll(emitter api.Emitter, options *api.TrafficFilteringOptions) {
t.poller.poll(emitter, options)
}
func (t *TlsTapper) PollForLogging() {
t.bpfLogger.poll()
}
func (t *TlsTapper) GlobalTap(sslLibrary string) error {
return t.tapPid(0, sslLibrary)
return t.tapPid(GLOABL_TAP_PID, sslLibrary)
}
func (t *TlsTapper) AddPid(procfs string, pid uint32) error {
@@ -74,7 +86,12 @@ func (t *TlsTapper) RemovePid(pid uint32) error {
func (t *TlsTapper) ClearPids() {
t.registeredPids.Range(func(key, v interface{}) bool {
if err := t.RemovePid(key.(uint32)); err != nil {
pid := key.(uint32)
if pid == GLOABL_TAP_PID {
return true
}
if err := t.RemovePid(pid); err != nil {
LogError(err)
}
t.registeredPids.Delete(key)
@@ -95,6 +112,10 @@ func (t *TlsTapper) Close() []error {
errors = append(errors, sslHooks.close()...)
}
if err := t.bpfLogger.close(); err != nil {
errors = append(errors, err)
}
if err := t.poller.close(); err != nil {
errors = append(errors, err)
}

View File

@@ -78,6 +78,7 @@ type tlsTapperMapSpecs struct {
ConnectSyscallInfo *ebpf.MapSpec `ebpf:"connect_syscall_info"`
FileDescriptorToIpv4 *ebpf.MapSpec `ebpf:"file_descriptor_to_ipv4"`
Heap *ebpf.MapSpec `ebpf:"heap"`
LogBuffer *ebpf.MapSpec `ebpf:"log_buffer"`
PidsMap *ebpf.MapSpec `ebpf:"pids_map"`
SslReadContext *ebpf.MapSpec `ebpf:"ssl_read_context"`
SslWriteContext *ebpf.MapSpec `ebpf:"ssl_write_context"`
@@ -107,6 +108,7 @@ type tlsTapperMaps struct {
ConnectSyscallInfo *ebpf.Map `ebpf:"connect_syscall_info"`
FileDescriptorToIpv4 *ebpf.Map `ebpf:"file_descriptor_to_ipv4"`
Heap *ebpf.Map `ebpf:"heap"`
LogBuffer *ebpf.Map `ebpf:"log_buffer"`
PidsMap *ebpf.Map `ebpf:"pids_map"`
SslReadContext *ebpf.Map `ebpf:"ssl_read_context"`
SslWriteContext *ebpf.Map `ebpf:"ssl_write_context"`
@@ -119,6 +121,7 @@ func (m *tlsTapperMaps) Close() error {
m.ConnectSyscallInfo,
m.FileDescriptorToIpv4,
m.Heap,
m.LogBuffer,
m.PidsMap,
m.SslReadContext,
m.SslWriteContext,

Binary file not shown.

View File

@@ -78,6 +78,7 @@ type tlsTapperMapSpecs struct {
ConnectSyscallInfo *ebpf.MapSpec `ebpf:"connect_syscall_info"`
FileDescriptorToIpv4 *ebpf.MapSpec `ebpf:"file_descriptor_to_ipv4"`
Heap *ebpf.MapSpec `ebpf:"heap"`
LogBuffer *ebpf.MapSpec `ebpf:"log_buffer"`
PidsMap *ebpf.MapSpec `ebpf:"pids_map"`
SslReadContext *ebpf.MapSpec `ebpf:"ssl_read_context"`
SslWriteContext *ebpf.MapSpec `ebpf:"ssl_write_context"`
@@ -107,6 +108,7 @@ type tlsTapperMaps struct {
ConnectSyscallInfo *ebpf.Map `ebpf:"connect_syscall_info"`
FileDescriptorToIpv4 *ebpf.Map `ebpf:"file_descriptor_to_ipv4"`
Heap *ebpf.Map `ebpf:"heap"`
LogBuffer *ebpf.Map `ebpf:"log_buffer"`
PidsMap *ebpf.Map `ebpf:"pids_map"`
SslReadContext *ebpf.Map `ebpf:"ssl_read_context"`
SslWriteContext *ebpf.Map `ebpf:"ssl_write_context"`
@@ -119,6 +121,7 @@ func (m *tlsTapperMaps) Close() error {
m.ConnectSyscallInfo,
m.FileDescriptorToIpv4,
m.Heap,
m.LogBuffer,
m.PidsMap,
m.SslReadContext,
m.SslWriteContext,

Binary file not shown.

View File

@@ -1,12 +1,12 @@
{
"name": "@up9/mizu-common",
"version": "1.0.135",
"version": "0.0.0",
"lockfileVersion": 2,
"requires": true,
"packages": {
"": {
"name": "@up9/mizu-common",
"version": "1.0.135",
"version": "0.0.0",
"license": "MIT",
"dependencies": {
"@craco/craco": "^6.4.3",

View File

@@ -1,6 +1,6 @@
{
"name": "@up9/mizu-common",
"version": "1.0.144",
"version": "0.0.0",
"description": "Made with create-react-library",
"author": "",
"license": "MIT",
@@ -90,4 +90,4 @@
"files": [
"dist"
]
}
}

View File

@@ -110,8 +110,27 @@ export const TrafficViewer: React.FC<TrafficViewerProps> = ({ setAnalyzeStatus,
isCloseWebSocket && closeWebSocket()
}, [isCloseWebSocket])
useEffect(() => {
reopenConnection()
}, [webSocketUrl])
const ws = useRef(null);
const openEmptyWebSocket = () => {
if (query) {
openWebSocket(`(${query}) and leftOff(-1)`, true);
} else {
openWebSocket(`leftOff(-1)`, true);
}
}
const closeWebSocket = () => {
if(ws?.current?.readyState === WebSocket.OPEN) {
ws.current.close();
return true;
}
}
const listEntry = useRef(null);
const openWebSocket = (query: string, resetEntries: boolean) => {
if (resetEntries) {
@@ -153,12 +172,6 @@ export const TrafficViewer: React.FC<TrafficViewerProps> = ({ setAnalyzeStatus,
}, 500)
}
const closeWebSocket = () => {
if (ws?.current?.readyState === WebSocket.OPEN) {
ws.current.close();
}
}
if (ws.current) {
ws.current.onmessage = (e) => {
if (!e?.data) return;
@@ -216,8 +229,7 @@ export const TrafficViewer: React.FC<TrafficViewerProps> = ({ setAnalyzeStatus,
useEffect(() => {
setTrafficViewerApiState({ ...trafficViewerApiProp, webSocket: { close: closeWebSocket } });
(async () => {
openWebSocket("leftOff(-1)", true);
try {
try{
const tapStatusResponse = await trafficViewerApiProp.tapStatus();
setTappingStatus(tapStatusResponse);
if (setAnalyzeStatus) {
@@ -232,19 +244,18 @@ export const TrafficViewer: React.FC<TrafficViewerProps> = ({ setAnalyzeStatus,
}, []);
const toggleConnection = () => {
if (ws?.current?.readyState === WebSocket.OPEN) {
ws?.current?.close();
} else {
if (query) {
openWebSocket(`(${query}) and leftOff(-1)`, true);
} else {
openWebSocket(`leftOff(-1)`, true);
}
if(!closeWebSocket()) {
openEmptyWebSocket();
scrollableRef.current.jumpToBottom();
setIsSnappedToBottom(true);
}
}
const reopenConnection = async () => {
closeWebSocket()
openEmptyWebSocket();
}
useEffect(() => {
return () => {
ws.current.close();

View File

@@ -13,7 +13,7 @@
"@types/jest": "^26.0.22",
"@types/node": "^12.20.10",
"@uiw/react-textarea-code-editor": "^1.4.12",
"@up9/mizu-common": "1.0.144",
"@up9/mizu-common": "file:up9-mizu-common-0.0.0.tgz",
"axios": "^0.25.0",
"core-js": "^3.20.2",
"craco-babel-loader": "^1.0.3",
@@ -75,4 +75,4 @@
"last 1 safari version"
]
}
}
}

View File

@@ -1,6 +1,6 @@
import React, {useEffect, useState} from "react";
import { Button } from "@material-ui/core";
import Api,{getWebsocketUrl} from "../../../helpers/api";
import Api, { MizuWebsocketURL } from "../../../helpers/api";
import debounce from 'lodash/debounce';
import {useSetRecoilState, useRecoilState} from "recoil";
import {useCommonStyles} from "../../../helpers/commonStyle"
@@ -65,7 +65,7 @@ const trafficViewerApi = {...api}
return (
<>
<TrafficViewer setAnalyzeStatus={setAnalyzeStatus} webSocketUrl={getWebsocketUrl()} isCloseWebSocket={!openWebSocket}
<TrafficViewer setAnalyzeStatus={setAnalyzeStatus} webSocketUrl={MizuWebsocketURL} isCloseWebSocket={!openWebSocket}
trafficViewerApiProp={trafficViewerApi} actionButtons={actionButtons} isShowStatusBar={!openOasModal} isDemoBannerView={false}/>
</>
);

View File

@@ -3,13 +3,10 @@ import * as axios from "axios";
export const MizuWebsocketURL = process.env.REACT_APP_OVERRIDE_WS_URL ? process.env.REACT_APP_OVERRIDE_WS_URL :
window.location.protocol === 'https:' ? `wss://${window.location.host}/ws` : `ws://${window.location.host}/ws`;
export const FormValidationErrorType = "formError";
const CancelToken = axios.CancelToken;
const apiURL = process.env.REACT_APP_OVERRIDE_API_URL ? process.env.REACT_APP_OVERRIDE_API_URL : `${window.location.origin}/`;
let token = ""
let client = null
let source = null
@@ -24,8 +21,6 @@ export default class Api {
}
constructor() {
token = localStorage.getItem("token");
client = this.getAxiosClient();
source = null;
}
@@ -125,20 +120,10 @@ export default class Api {
return response.data;
}
persistToken = (tk) => {
token = tk;
client = this.getAxiosClient();
localStorage.setItem('token', token);
}
getAxiosClient = () => {
const headers = {
Accept: "application/json"
}
if (token) {
headers['x-session-token'] = `${token}`; // we use `x-session-token` instead of `Authorization` because the latter is reserved by kubectl proxy, making mizu view not work
}
return axios.create({
baseURL: apiURL,
timeout: 31000,
@@ -146,12 +131,3 @@ export default class Api {
});
}
}
export function getWebsocketUrl() {
let websocketUrl = MizuWebsocketURL;
if (token) {
websocketUrl += `/${token}`;
}
return websocketUrl;
}