mirror of
https://github.com/kubeshark/kubeshark.git
synced 2026-02-15 02:19:54 +00:00
Compare commits
23 Commits
35.0-dev10
...
36.0-dev9
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
9aaf3f1423 | ||
|
|
a2463b739a | ||
|
|
c010d336bb | ||
|
|
710411e112 | ||
|
|
274fbeb34a | ||
|
|
38c05a6634 | ||
|
|
d857935889 | ||
|
|
ec11b21b51 | ||
|
|
52c9251c00 | ||
|
|
f3a6b3a9d4 | ||
|
|
5f73c2d50a | ||
|
|
d6944d467c | ||
|
|
57078517a4 | ||
|
|
b4bc09637c | ||
|
|
302333b4ae | ||
|
|
13ed8eb58a | ||
|
|
48619b3e1c | ||
|
|
3b0b311e1e | ||
|
|
3a9236a381 | ||
|
|
2e7fd34210 | ||
|
|
01af6aa19c | ||
|
|
2bfae1baae | ||
|
|
2df9fb49db |
@@ -30,7 +30,6 @@ require (
|
||||
github.com/up9inc/mizu/tap/extensions/kafka v0.0.0
|
||||
github.com/up9inc/mizu/tap/extensions/redis v0.0.0
|
||||
github.com/wI2L/jsondiff v0.1.1
|
||||
github.com/yalp/jsonpath v0.0.0-20180802001716-5cc68e5049a0
|
||||
k8s.io/api v0.23.3
|
||||
k8s.io/apimachinery v0.23.3
|
||||
k8s.io/client-go v0.23.3
|
||||
@@ -52,7 +51,7 @@ require (
|
||||
github.com/beevik/etree v1.1.0 // indirect
|
||||
github.com/chai2010/gettext-go v0.0.0-20160711120539-c6fed771bfd5 // indirect
|
||||
github.com/chanced/dynamic v0.0.0-20211210164248-f8fadb1d735b // indirect
|
||||
github.com/cilium/ebpf v0.8.1 // indirect
|
||||
github.com/cilium/ebpf v0.9.0 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/evanphx/json-patch v5.6.0+incompatible // indirect
|
||||
github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f // indirect
|
||||
@@ -90,6 +89,7 @@ require (
|
||||
github.com/mattn/go-isatty v0.0.14 // indirect
|
||||
github.com/mertyildiran/gqlparser/v2 v2.4.6 // indirect
|
||||
github.com/mitchellh/go-wordwrap v1.0.1 // indirect
|
||||
github.com/moby/moby v20.10.17+incompatible // indirect
|
||||
github.com/moby/spdystream v0.2.0 // indirect
|
||||
github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
@@ -105,6 +105,7 @@ require (
|
||||
github.com/santhosh-tekuri/jsonschema/v5 v5.0.0 // indirect
|
||||
github.com/segmentio/kafka-go v0.4.27 // indirect
|
||||
github.com/shirou/gopsutil v3.21.11+incompatible // indirect
|
||||
github.com/sirupsen/logrus v1.8.1 // indirect
|
||||
github.com/spf13/cobra v1.3.0 // indirect
|
||||
github.com/spf13/pflag v1.0.5 // indirect
|
||||
github.com/struCoder/pidusage v0.2.1 // indirect
|
||||
|
||||
13
agent/go.sum
13
agent/go.sum
@@ -128,8 +128,8 @@ github.com/chanced/openapi v0.0.8/go.mod h1:SxE2VMLPw+T7Vq8nwbVVhDF2PigvRF4n5Xyq
|
||||
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
|
||||
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
|
||||
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
|
||||
github.com/cilium/ebpf v0.8.1 h1:bLSSEbBLqGPXxls55pGr5qWZaTqcmfDJHhou7t254ao=
|
||||
github.com/cilium/ebpf v0.8.1/go.mod h1:f5zLIM0FSNuAkSyLAN7X+Hy6yznlF1mNiWUMfxMtrgk=
|
||||
github.com/cilium/ebpf v0.9.0 h1:ldiV+FscPCQ/p3mNEV4O02EPbUZJFsoEtHvIr9xLTvk=
|
||||
github.com/cilium/ebpf v0.9.0/go.mod h1:+OhNOIXx/Fnu1IE8bJz2dzOA+VSfyTfdNUVdlQnxUFY=
|
||||
github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag=
|
||||
github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I=
|
||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||
@@ -517,6 +517,8 @@ github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:F
|
||||
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
|
||||
github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
||||
github.com/mitchellh/mapstructure v1.4.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
||||
github.com/moby/moby v20.10.17+incompatible h1:TJJfyk2fLEgK+RzqVpFNkDkm0oEi+MLUfwt9lEYnp5g=
|
||||
github.com/moby/moby v20.10.17+incompatible/go.mod h1:fDXVQ6+S340veQPv35CzDahGBmHsiclFwfEygB/TWMc=
|
||||
github.com/moby/spdystream v0.2.0 h1:cjW1zVyyoiM0T7b6UoySUFqzXMoqRckQtXwGPiBhOM8=
|
||||
github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c=
|
||||
github.com/moby/term v0.0.0-20210610120745-9d4ed1856297/go.mod h1:vgPCkQMyxTZ7IDy8SXRufE172gr8+K/JE/7hHFxHW3A=
|
||||
@@ -629,6 +631,8 @@ github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeV
|
||||
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
||||
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
|
||||
github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
|
||||
github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE=
|
||||
github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
|
||||
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
|
||||
github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
|
||||
github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
|
||||
@@ -707,8 +711,6 @@ github.com/xlab/treeprint v0.0.0-20181112141820-a009c3971eca/go.mod h1:ce1O1j6Ut
|
||||
github.com/xlab/treeprint v1.1.0 h1:G/1DjNkPpfZCFt9CSh6b5/nY4VimlbHF3Rh4obvtzDk=
|
||||
github.com/xlab/treeprint v1.1.0/go.mod h1:gj5Gd3gPdKtR1ikdDK6fnFLdmIS0X30kTTuNd/WEJu0=
|
||||
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
|
||||
github.com/yalp/jsonpath v0.0.0-20180802001716-5cc68e5049a0 h1:6fRhSjgLCkTD3JnJxvaJ4Sj+TYblw757bqYgZaOq5ZY=
|
||||
github.com/yalp/jsonpath v0.0.0-20180802001716-5cc68e5049a0/go.mod h1:/LWChgwKmvncFJFHJ7Gvn9wZArjbV5/FppcK2fKk/tI=
|
||||
github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
@@ -1251,8 +1253,9 @@ gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C
|
||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo=
|
||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk=
|
||||
gotest.tools/v3 v3.0.3 h1:4AuOwCGf4lLR9u3YOe2awrHygurzhO/HeQ6laiA6Sx0=
|
||||
gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8=
|
||||
gotest.tools/v3 v3.3.0 h1:MfDY1b1/0xN1CyMlQDac0ziEy9zJQd9CXBRRDHw2jJo=
|
||||
gotest.tools/v3 v3.3.0/go.mod h1:Mcr9QNxkg0uMvy/YElmo4SpXgJKWgQvYrT7Kw5RzJ1A=
|
||||
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
|
||||
@@ -131,6 +131,7 @@ func hostApi(socketHarOutputChannel chan<- *tapApi.OutputChannelItem) *gin.Engin
|
||||
routes.MetadataRoutes(ginApp)
|
||||
routes.StatusRoutes(ginApp)
|
||||
routes.DbRoutes(ginApp)
|
||||
routes.ReplayRoutes(ginApp)
|
||||
|
||||
return ginApp
|
||||
}
|
||||
@@ -155,7 +156,7 @@ func runInTapperMode() {
|
||||
|
||||
hostMode := os.Getenv(shared.HostModeEnvVar) == "1"
|
||||
tapOpts := &tap.TapOpts{
|
||||
HostMode: hostMode,
|
||||
HostMode: hostMode,
|
||||
}
|
||||
|
||||
filteredOutputItemsChannel := make(chan *tapApi.OutputChannelItem)
|
||||
|
||||
@@ -22,7 +22,16 @@ func (e *DefaultEntryStreamerSocketConnector) SendEntry(socketId int, entry *tap
|
||||
if params.EnableFullEntries {
|
||||
message, _ = models.CreateFullEntryWebSocketMessage(entry)
|
||||
} else {
|
||||
extension := extensionsMap[entry.Protocol.Name]
|
||||
protocol, ok := protocolsMap[entry.Protocol.ToString()]
|
||||
if !ok {
|
||||
return fmt.Errorf("protocol not found, protocol: %v", protocol)
|
||||
}
|
||||
|
||||
extension, ok := extensionsMap[protocol.Name]
|
||||
if !ok {
|
||||
return fmt.Errorf("extension not found, extension: %v", protocol.Name)
|
||||
}
|
||||
|
||||
base := extension.Dissector.Summarize(entry)
|
||||
message, _ = models.CreateBaseEntryWebSocketMessage(base)
|
||||
}
|
||||
|
||||
@@ -12,7 +12,6 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/up9inc/mizu/agent/pkg/dependency"
|
||||
"github.com/up9inc/mizu/agent/pkg/models"
|
||||
"github.com/up9inc/mizu/agent/pkg/oas"
|
||||
"github.com/up9inc/mizu/agent/pkg/servicemap"
|
||||
|
||||
@@ -101,20 +100,13 @@ func startReadingChannel(outputItems <-chan *tapApi.OutputChannelItem, extension
|
||||
|
||||
for item := range outputItems {
|
||||
extension := extensionsMap[item.Protocol.Name]
|
||||
resolvedSource, resolvedDestionation, namespace := resolveIP(item.ConnectionInfo)
|
||||
resolvedSource, resolvedDestination, namespace := resolveIP(item.ConnectionInfo)
|
||||
|
||||
if namespace == "" && item.Namespace != tapApi.UnknownNamespace {
|
||||
namespace = item.Namespace
|
||||
}
|
||||
|
||||
mizuEntry := extension.Dissector.Analyze(item, resolvedSource, resolvedDestionation, namespace)
|
||||
if extension.Protocol.Name == "http" {
|
||||
harEntry, err := har.NewEntry(mizuEntry.Request, mizuEntry.Response, mizuEntry.StartTime, mizuEntry.ElapsedTime)
|
||||
if err == nil {
|
||||
rules, _, _ := models.RunValidationRulesState(*harEntry, mizuEntry.Destination.Name)
|
||||
mizuEntry.Rules = rules
|
||||
}
|
||||
}
|
||||
mizuEntry := extension.Dissector.Analyze(item, resolvedSource, resolvedDestination, namespace)
|
||||
|
||||
data, err := json.Marshal(mizuEntry)
|
||||
if err != nil {
|
||||
|
||||
@@ -14,10 +14,14 @@ import (
|
||||
tapApi "github.com/up9inc/mizu/tap/api"
|
||||
)
|
||||
|
||||
var extensionsMap map[string]*tapApi.Extension // global
|
||||
var (
|
||||
extensionsMap map[string]*tapApi.Extension // global
|
||||
protocolsMap map[string]*tapApi.Protocol //global
|
||||
)
|
||||
|
||||
func InitExtensionsMap(ref map[string]*tapApi.Extension) {
|
||||
extensionsMap = ref
|
||||
func InitMaps(extensions map[string]*tapApi.Extension, protocols map[string]*tapApi.Protocol) {
|
||||
extensionsMap = extensions
|
||||
protocolsMap = protocols
|
||||
}
|
||||
|
||||
type EventHandlers interface {
|
||||
|
||||
@@ -9,10 +9,11 @@ import (
|
||||
"github.com/op/go-logging"
|
||||
basenine "github.com/up9inc/basenine/client/go"
|
||||
"github.com/up9inc/mizu/agent/pkg/api"
|
||||
"github.com/up9inc/mizu/agent/pkg/providers"
|
||||
"github.com/up9inc/mizu/agent/pkg/utils"
|
||||
"github.com/up9inc/mizu/logger"
|
||||
"github.com/up9inc/mizu/tap/dbgctl"
|
||||
tapApi "github.com/up9inc/mizu/tap/api"
|
||||
"github.com/up9inc/mizu/tap/dbgctl"
|
||||
amqpExt "github.com/up9inc/mizu/tap/extensions/amqp"
|
||||
httpExt "github.com/up9inc/mizu/tap/extensions/http"
|
||||
kafkaExt "github.com/up9inc/mizu/tap/extensions/kafka"
|
||||
@@ -22,11 +23,13 @@ import (
|
||||
var (
|
||||
Extensions []*tapApi.Extension // global
|
||||
ExtensionsMap map[string]*tapApi.Extension // global
|
||||
ProtocolsMap map[string]*tapApi.Protocol //global
|
||||
)
|
||||
|
||||
func LoadExtensions() {
|
||||
Extensions = make([]*tapApi.Extension, 0)
|
||||
ExtensionsMap = make(map[string]*tapApi.Extension)
|
||||
ProtocolsMap = make(map[string]*tapApi.Protocol)
|
||||
|
||||
extensionHttp := &tapApi.Extension{}
|
||||
dissectorHttp := httpExt.NewDissector()
|
||||
@@ -34,6 +37,10 @@ func LoadExtensions() {
|
||||
extensionHttp.Dissector = dissectorHttp
|
||||
Extensions = append(Extensions, extensionHttp)
|
||||
ExtensionsMap[extensionHttp.Protocol.Name] = extensionHttp
|
||||
protocolsHttp := dissectorHttp.GetProtocols()
|
||||
for k, v := range protocolsHttp {
|
||||
ProtocolsMap[k] = v
|
||||
}
|
||||
|
||||
if !dbgctl.MizuTapperDisableNonHttpExtensions {
|
||||
extensionAmqp := &tapApi.Extension{}
|
||||
@@ -42,6 +49,10 @@ func LoadExtensions() {
|
||||
extensionAmqp.Dissector = dissectorAmqp
|
||||
Extensions = append(Extensions, extensionAmqp)
|
||||
ExtensionsMap[extensionAmqp.Protocol.Name] = extensionAmqp
|
||||
protocolsAmqp := dissectorAmqp.GetProtocols()
|
||||
for k, v := range protocolsAmqp {
|
||||
ProtocolsMap[k] = v
|
||||
}
|
||||
|
||||
extensionKafka := &tapApi.Extension{}
|
||||
dissectorKafka := kafkaExt.NewDissector()
|
||||
@@ -49,6 +60,10 @@ func LoadExtensions() {
|
||||
extensionKafka.Dissector = dissectorKafka
|
||||
Extensions = append(Extensions, extensionKafka)
|
||||
ExtensionsMap[extensionKafka.Protocol.Name] = extensionKafka
|
||||
protocolsKafka := dissectorKafka.GetProtocols()
|
||||
for k, v := range protocolsKafka {
|
||||
ProtocolsMap[k] = v
|
||||
}
|
||||
|
||||
extensionRedis := &tapApi.Extension{}
|
||||
dissectorRedis := redisExt.NewDissector()
|
||||
@@ -56,13 +71,18 @@ func LoadExtensions() {
|
||||
extensionRedis.Dissector = dissectorRedis
|
||||
Extensions = append(Extensions, extensionRedis)
|
||||
ExtensionsMap[extensionRedis.Protocol.Name] = extensionRedis
|
||||
protocolsRedis := dissectorRedis.GetProtocols()
|
||||
for k, v := range protocolsRedis {
|
||||
ProtocolsMap[k] = v
|
||||
}
|
||||
}
|
||||
|
||||
sort.Slice(Extensions, func(i, j int) bool {
|
||||
return Extensions[i].Protocol.Priority < Extensions[j].Protocol.Priority
|
||||
})
|
||||
|
||||
api.InitExtensionsMap(ExtensionsMap)
|
||||
api.InitMaps(ExtensionsMap, ProtocolsMap)
|
||||
providers.InitProtocolToColor(ProtocolsMap)
|
||||
}
|
||||
|
||||
func ConfigureBasenineServer(host string, port string, dbSize int64, logLevel logging.Level, insertionFilter string) {
|
||||
|
||||
34
agent/pkg/controllers/replay_controller.go
Normal file
34
agent/pkg/controllers/replay_controller.go
Normal file
@@ -0,0 +1,34 @@
|
||||
package controllers
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/up9inc/mizu/agent/pkg/replay"
|
||||
"github.com/up9inc/mizu/agent/pkg/validation"
|
||||
"github.com/up9inc/mizu/logger"
|
||||
)
|
||||
|
||||
const (
|
||||
replayTimeout = 10 * time.Second
|
||||
)
|
||||
|
||||
func ReplayRequest(c *gin.Context) {
|
||||
logger.Log.Debug("Starting replay")
|
||||
replayDetails := &replay.Details{}
|
||||
if err := c.Bind(replayDetails); err != nil {
|
||||
c.JSON(http.StatusBadRequest, err)
|
||||
return
|
||||
}
|
||||
|
||||
logger.Log.Debugf("Validating replay, %v", replayDetails)
|
||||
if err := validation.Validate(replayDetails); err != nil {
|
||||
c.JSON(http.StatusBadRequest, err)
|
||||
return
|
||||
}
|
||||
|
||||
logger.Log.Debug("Executing replay, %v", replayDetails)
|
||||
result := replay.ExecuteRequest(replayDetails, replayTimeout)
|
||||
c.JSON(http.StatusOK, result)
|
||||
}
|
||||
@@ -36,11 +36,13 @@ var (
|
||||
)
|
||||
|
||||
var ProtocolHttp = &tapApi.Protocol{
|
||||
Name: "http",
|
||||
ProtocolSummary: tapApi.ProtocolSummary{
|
||||
Name: "http",
|
||||
Version: "1.1",
|
||||
Abbreviation: "HTTP",
|
||||
},
|
||||
LongName: "Hypertext Transfer Protocol -- HTTP/1.1",
|
||||
Abbreviation: "HTTP",
|
||||
Macro: "http",
|
||||
Version: "1.1",
|
||||
BackgroundColor: "#205cf5",
|
||||
ForegroundColor: "#ffffff",
|
||||
FontSize: 12,
|
||||
|
||||
@@ -79,13 +79,8 @@ func GetGeneralStats(c *gin.Context) {
|
||||
c.JSON(http.StatusOK, providers.GetGeneralStats())
|
||||
}
|
||||
|
||||
func GetAccumulativeStats(c *gin.Context) {
|
||||
c.JSON(http.StatusOK, providers.GetAccumulativeStats())
|
||||
}
|
||||
|
||||
func GetAccumulativeStatsTiming(c *gin.Context) {
|
||||
// for now hardcoded 10 bars of 5 minutes interval
|
||||
c.JSON(http.StatusOK, providers.GetAccumulativeStatsTiming(300, 10))
|
||||
func GetTrafficStats(c *gin.Context) {
|
||||
c.JSON(http.StatusOK, providers.GetTrafficStats())
|
||||
}
|
||||
|
||||
func GetCurrentResolvingInformation(c *gin.Context) {
|
||||
|
||||
@@ -3,11 +3,11 @@ package entries
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
basenine "github.com/up9inc/basenine/client/go"
|
||||
"github.com/up9inc/mizu/agent/pkg/app"
|
||||
"github.com/up9inc/mizu/agent/pkg/har"
|
||||
"github.com/up9inc/mizu/agent/pkg/models"
|
||||
"github.com/up9inc/mizu/logger"
|
||||
"github.com/up9inc/mizu/shared"
|
||||
@@ -38,11 +38,20 @@ func (e *BasenineEntriesProvider) GetEntries(entriesRequest *models.EntriesReque
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
extension := app.ExtensionsMap[entry.Protocol.Name]
|
||||
protocol, ok := app.ProtocolsMap[entry.Protocol.ToString()]
|
||||
if !ok {
|
||||
return nil, nil, fmt.Errorf("protocol not found, protocol: %v", protocol)
|
||||
}
|
||||
|
||||
extension, ok := app.ExtensionsMap[protocol.Name]
|
||||
if !ok {
|
||||
return nil, nil, fmt.Errorf("extension not found, extension: %v", protocol.Name)
|
||||
}
|
||||
|
||||
base := extension.Dissector.Summarize(entry)
|
||||
|
||||
dataSlice = append(dataSlice, &tapApi.EntryWrapper{
|
||||
Protocol: entry.Protocol,
|
||||
Protocol: *protocol,
|
||||
Data: entry,
|
||||
Base: base,
|
||||
})
|
||||
@@ -68,7 +77,16 @@ func (e *BasenineEntriesProvider) GetEntry(singleEntryRequest *models.SingleEntr
|
||||
return nil, errors.New(string(bytes))
|
||||
}
|
||||
|
||||
extension := app.ExtensionsMap[entry.Protocol.Name]
|
||||
protocol, ok := app.ProtocolsMap[entry.Protocol.ToString()]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("protocol not found, protocol: %v", protocol)
|
||||
}
|
||||
|
||||
extension, ok := app.ExtensionsMap[protocol.Name]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("extension not found, extension: %v", protocol.Name)
|
||||
}
|
||||
|
||||
base := extension.Dissector.Summarize(entry)
|
||||
var representation []byte
|
||||
representation, err = extension.Dissector.Represent(entry.Request, entry.Response)
|
||||
@@ -76,24 +94,10 @@ func (e *BasenineEntriesProvider) GetEntry(singleEntryRequest *models.SingleEntr
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var rules []map[string]interface{}
|
||||
var isRulesEnabled bool
|
||||
if entry.Protocol.Name == "http" {
|
||||
harEntry, _ := har.NewEntry(entry.Request, entry.Response, entry.StartTime, entry.ElapsedTime)
|
||||
_, rulesMatched, _isRulesEnabled := models.RunValidationRulesState(*harEntry, entry.Destination.Name)
|
||||
isRulesEnabled = _isRulesEnabled
|
||||
inrec, _ := json.Marshal(rulesMatched)
|
||||
if err := json.Unmarshal(inrec, &rules); err != nil {
|
||||
logger.Log.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
return &tapApi.EntryWrapper{
|
||||
Protocol: entry.Protocol,
|
||||
Protocol: *protocol,
|
||||
Representation: string(representation),
|
||||
Data: entry,
|
||||
Base: base,
|
||||
Rules: rules,
|
||||
IsRulesEnabled: isRulesEnabled,
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -4,7 +4,6 @@ import (
|
||||
"encoding/json"
|
||||
|
||||
"github.com/up9inc/mizu/agent/pkg/har"
|
||||
"github.com/up9inc/mizu/agent/pkg/rules"
|
||||
tapApi "github.com/up9inc/mizu/tap/api"
|
||||
|
||||
basenine "github.com/up9inc/basenine/client/go"
|
||||
@@ -143,9 +142,3 @@ type ExtendedCreator struct {
|
||||
*har.Creator
|
||||
Source *string `json:"_source"`
|
||||
}
|
||||
|
||||
func RunValidationRulesState(harEntry har.Entry, service string) (tapApi.ApplicableRules, []rules.RulesMatched, bool) {
|
||||
resultPolicyToSend, isEnabled := rules.MatchRequestPolicy(harEntry, service)
|
||||
statusPolicyToSend, latency, numberOfRules := rules.PassedValidationRules(resultPolicyToSend)
|
||||
return tapApi.ApplicableRules{Status: statusPolicyToSend, Latency: latency, NumberOfRules: numberOfRules}, resultPolicyToSend, isEnabled
|
||||
}
|
||||
|
||||
@@ -6,9 +6,8 @@ import (
|
||||
"sync"
|
||||
|
||||
"github.com/up9inc/mizu/agent/pkg/har"
|
||||
"github.com/up9inc/mizu/tap/api"
|
||||
|
||||
"github.com/up9inc/mizu/logger"
|
||||
"github.com/up9inc/mizu/tap/api"
|
||||
)
|
||||
|
||||
var (
|
||||
|
||||
@@ -1,7 +1,11 @@
|
||||
package providers
|
||||
|
||||
import (
|
||||
"crypto/md5"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
@@ -26,7 +30,6 @@ type TimeFrameStatsValue struct {
|
||||
|
||||
type ProtocolStats struct {
|
||||
MethodsStats map[string]*SizeAndEntriesCount `json:"methods"`
|
||||
Color string `json:"color"`
|
||||
}
|
||||
|
||||
type SizeAndEntriesCount struct {
|
||||
@@ -36,13 +39,13 @@ type SizeAndEntriesCount struct {
|
||||
|
||||
type AccumulativeStatsCounter struct {
|
||||
Name string `json:"name"`
|
||||
Color string `json:"color"`
|
||||
EntriesCount int `json:"entriesCount"`
|
||||
VolumeSizeBytes int `json:"volumeSizeBytes"`
|
||||
}
|
||||
|
||||
type AccumulativeStatsProtocol struct {
|
||||
AccumulativeStatsCounter
|
||||
Color string `json:"color"`
|
||||
Methods []*AccumulativeStatsCounter `json:"methods"`
|
||||
}
|
||||
|
||||
@@ -51,46 +54,47 @@ type AccumulativeStatsProtocolTime struct {
|
||||
Time int64 `json:"timestamp"`
|
||||
}
|
||||
|
||||
type TrafficStatsResponse struct {
|
||||
Protocols []string `json:"protocols"`
|
||||
PieStats []*AccumulativeStatsProtocol `json:"pie"`
|
||||
TimelineStats []*AccumulativeStatsProtocolTime `json:"timeline"`
|
||||
}
|
||||
|
||||
var (
|
||||
generalStats = GeneralStats{}
|
||||
bucketsStats = BucketStats{}
|
||||
bucketStatsLocker = sync.Mutex{}
|
||||
protocolToColor = map[string]string{}
|
||||
)
|
||||
|
||||
const (
|
||||
InternalBucketThreshold = time.Minute * 1
|
||||
MaxNumberOfBars = 30
|
||||
)
|
||||
|
||||
func ResetGeneralStats() {
|
||||
generalStats = GeneralStats{}
|
||||
}
|
||||
|
||||
func GetGeneralStats() GeneralStats {
|
||||
return generalStats
|
||||
func GetGeneralStats() *GeneralStats {
|
||||
return &generalStats
|
||||
}
|
||||
|
||||
func GetAccumulativeStats() []*AccumulativeStatsProtocol {
|
||||
bucketStatsCopy := getBucketStatsCopy()
|
||||
if len(bucketStatsCopy) == 0 {
|
||||
return make([]*AccumulativeStatsProtocol, 0)
|
||||
func InitProtocolToColor(protocolMap map[string]*api.Protocol) {
|
||||
for item, value := range protocolMap {
|
||||
splitted := strings.SplitN(item, "/", 3)
|
||||
protocolToColor[splitted[len(splitted)-1]] = value.BackgroundColor
|
||||
}
|
||||
|
||||
methodsPerProtocolAggregated, protocolToColor := getAggregatedStatsAllTime(bucketStatsCopy)
|
||||
|
||||
return convertAccumulativeStatsDictToArray(methodsPerProtocolAggregated, protocolToColor)
|
||||
}
|
||||
|
||||
func GetAccumulativeStatsTiming(intervalSeconds int, numberOfBars int) []*AccumulativeStatsProtocolTime {
|
||||
bucketStatsCopy := getBucketStatsCopy()
|
||||
if len(bucketStatsCopy) == 0 {
|
||||
return make([]*AccumulativeStatsProtocolTime, 0)
|
||||
func GetTrafficStats() *TrafficStatsResponse {
|
||||
bucketsStatsCopy := getBucketStatsCopy()
|
||||
|
||||
return &TrafficStatsResponse{
|
||||
Protocols: getAvailableProtocols(bucketsStatsCopy),
|
||||
PieStats: getAccumulativeStats(bucketsStatsCopy),
|
||||
TimelineStats: getAccumulativeStatsTiming(bucketsStatsCopy),
|
||||
}
|
||||
|
||||
firstBucketTime := getFirstBucketTime(time.Now().UTC(), intervalSeconds, numberOfBars)
|
||||
|
||||
methodsPerProtocolPerTimeAggregated, protocolToColor := getAggregatedResultTimingFromSpecificTime(intervalSeconds, bucketStatsCopy, firstBucketTime)
|
||||
|
||||
return convertAccumulativeStatsTimelineDictToArray(methodsPerProtocolPerTimeAggregated, protocolToColor)
|
||||
}
|
||||
|
||||
func EntryAdded(size int, summery *api.BaseEntry) {
|
||||
@@ -108,6 +112,65 @@ func EntryAdded(size int, summery *api.BaseEntry) {
|
||||
generalStats.LastEntryTimestamp = currentTimestamp
|
||||
}
|
||||
|
||||
func calculateInterval(firstTimestamp int64, lastTimestamp int64) time.Duration {
|
||||
validDurations := []time.Duration{
|
||||
time.Minute,
|
||||
time.Minute * 2,
|
||||
time.Minute * 3,
|
||||
time.Minute * 5,
|
||||
time.Minute * 10,
|
||||
time.Minute * 15,
|
||||
time.Minute * 20,
|
||||
time.Minute * 30,
|
||||
time.Minute * 45,
|
||||
time.Minute * 60,
|
||||
time.Minute * 75,
|
||||
time.Minute * 90, // 1.5 minutes
|
||||
time.Minute * 120, // 2 hours
|
||||
time.Minute * 150, // 2.5 hours
|
||||
time.Minute * 180, // 3 hours
|
||||
time.Minute * 240, // 4 hours
|
||||
time.Minute * 300, // 5 hours
|
||||
time.Minute * 360, // 6 hours
|
||||
time.Minute * 420, // 7 hours
|
||||
time.Minute * 480, // 8 hours
|
||||
time.Minute * 540, // 9 hours
|
||||
time.Minute * 600, // 10 hours
|
||||
time.Minute * 660, // 11 hours
|
||||
time.Minute * 720, // 12 hours
|
||||
time.Minute * 1440, // 24 hours
|
||||
}
|
||||
duration := time.Duration(lastTimestamp-firstTimestamp) * time.Second / time.Duration(MaxNumberOfBars)
|
||||
for _, validDuration := range validDurations {
|
||||
if validDuration-duration >= 0 {
|
||||
return validDuration
|
||||
}
|
||||
}
|
||||
return duration.Round(validDurations[len(validDurations)-1])
|
||||
|
||||
}
|
||||
|
||||
func getAccumulativeStats(stats BucketStats) []*AccumulativeStatsProtocol {
|
||||
if len(stats) == 0 {
|
||||
return make([]*AccumulativeStatsProtocol, 0)
|
||||
}
|
||||
|
||||
methodsPerProtocolAggregated := getAggregatedStats(stats)
|
||||
|
||||
return convertAccumulativeStatsDictToArray(methodsPerProtocolAggregated)
|
||||
}
|
||||
|
||||
func getAccumulativeStatsTiming(stats BucketStats) []*AccumulativeStatsProtocolTime {
|
||||
if len(stats) == 0 {
|
||||
return make([]*AccumulativeStatsProtocolTime, 0)
|
||||
}
|
||||
|
||||
interval := calculateInterval(stats[0].BucketTime.Unix(), stats[len(stats)-1].BucketTime.Unix()) // in seconds
|
||||
methodsPerProtocolPerTimeAggregated := getAggregatedResultTiming(stats, interval)
|
||||
|
||||
return convertAccumulativeStatsTimelineDictToArray(methodsPerProtocolPerTimeAggregated)
|
||||
}
|
||||
|
||||
func addToBucketStats(size int, summery *api.BaseEntry) {
|
||||
entryTimeBucketRounded := getBucketFromTimeStamp(summery.Timestamp)
|
||||
|
||||
@@ -128,7 +191,6 @@ func addToBucketStats(size int, summery *api.BaseEntry) {
|
||||
if _, found := bucketOfEntry.ProtocolStats[summery.Protocol.Abbreviation]; !found {
|
||||
bucketOfEntry.ProtocolStats[summery.Protocol.Abbreviation] = ProtocolStats{
|
||||
MethodsStats: map[string]*SizeAndEntriesCount{},
|
||||
Color: summery.Protocol.BackgroundColor,
|
||||
}
|
||||
}
|
||||
if _, found := bucketOfEntry.ProtocolStats[summery.Protocol.Abbreviation].MethodsStats[summery.Method]; !found {
|
||||
@@ -147,21 +209,15 @@ func getBucketFromTimeStamp(timestamp int64) time.Time {
|
||||
return entryTimeStampAsTime.Add(-1 * InternalBucketThreshold / 2).Round(InternalBucketThreshold)
|
||||
}
|
||||
|
||||
func getFirstBucketTime(endTime time.Time, intervalSeconds int, numberOfBars int) time.Time {
|
||||
lastBucketTime := endTime.Add(-1 * time.Second * time.Duration(intervalSeconds) / 2).Round(time.Second * time.Duration(intervalSeconds))
|
||||
firstBucketTime := lastBucketTime.Add(-1 * time.Second * time.Duration(intervalSeconds*(numberOfBars-1)))
|
||||
return firstBucketTime
|
||||
}
|
||||
|
||||
func convertAccumulativeStatsTimelineDictToArray(methodsPerProtocolPerTimeAggregated map[time.Time]map[string]map[string]*AccumulativeStatsCounter, protocolToColor map[string]string) []*AccumulativeStatsProtocolTime {
|
||||
func convertAccumulativeStatsTimelineDictToArray(methodsPerProtocolPerTimeAggregated map[time.Time]map[string]map[string]*AccumulativeStatsCounter) []*AccumulativeStatsProtocolTime {
|
||||
finalResult := make([]*AccumulativeStatsProtocolTime, 0)
|
||||
for timeKey, item := range methodsPerProtocolPerTimeAggregated {
|
||||
protocolsData := make([]*AccumulativeStatsProtocol, 0)
|
||||
for protocolName := range item {
|
||||
for protocolName, value := range item {
|
||||
entriesCount := 0
|
||||
volumeSizeBytes := 0
|
||||
methods := make([]*AccumulativeStatsCounter, 0)
|
||||
for _, methodAccData := range methodsPerProtocolPerTimeAggregated[timeKey][protocolName] {
|
||||
for _, methodAccData := range value {
|
||||
entriesCount += methodAccData.EntriesCount
|
||||
volumeSizeBytes += methodAccData.VolumeSizeBytes
|
||||
methods = append(methods, methodAccData)
|
||||
@@ -169,10 +225,10 @@ func convertAccumulativeStatsTimelineDictToArray(methodsPerProtocolPerTimeAggreg
|
||||
protocolsData = append(protocolsData, &AccumulativeStatsProtocol{
|
||||
AccumulativeStatsCounter: AccumulativeStatsCounter{
|
||||
Name: protocolName,
|
||||
Color: protocolToColor[protocolName],
|
||||
EntriesCount: entriesCount,
|
||||
VolumeSizeBytes: volumeSizeBytes,
|
||||
},
|
||||
Color: protocolToColor[protocolName],
|
||||
Methods: methods,
|
||||
})
|
||||
}
|
||||
@@ -184,7 +240,7 @@ func convertAccumulativeStatsTimelineDictToArray(methodsPerProtocolPerTimeAggreg
|
||||
return finalResult
|
||||
}
|
||||
|
||||
func convertAccumulativeStatsDictToArray(methodsPerProtocolAggregated map[string]map[string]*AccumulativeStatsCounter, protocolToColor map[string]string) []*AccumulativeStatsProtocol {
|
||||
func convertAccumulativeStatsDictToArray(methodsPerProtocolAggregated map[string]map[string]*AccumulativeStatsCounter) []*AccumulativeStatsProtocol {
|
||||
protocolsData := make([]*AccumulativeStatsProtocol, 0)
|
||||
for protocolName, value := range methodsPerProtocolAggregated {
|
||||
entriesCount := 0
|
||||
@@ -198,10 +254,10 @@ func convertAccumulativeStatsDictToArray(methodsPerProtocolAggregated map[string
|
||||
protocolsData = append(protocolsData, &AccumulativeStatsProtocol{
|
||||
AccumulativeStatsCounter: AccumulativeStatsCounter{
|
||||
Name: protocolName,
|
||||
Color: protocolToColor[protocolName],
|
||||
EntriesCount: entriesCount,
|
||||
VolumeSizeBytes: volumeSizeBytes,
|
||||
},
|
||||
Color: protocolToColor[protocolName],
|
||||
Methods: methods,
|
||||
})
|
||||
}
|
||||
@@ -219,55 +275,45 @@ func getBucketStatsCopy() BucketStats {
|
||||
return bucketStatsCopy
|
||||
}
|
||||
|
||||
func getAggregatedResultTimingFromSpecificTime(intervalSeconds int, bucketStats BucketStats, firstBucketTime time.Time) (map[time.Time]map[string]map[string]*AccumulativeStatsCounter, map[string]string) {
|
||||
protocolToColor := map[string]string{}
|
||||
func getAggregatedResultTiming(stats BucketStats, interval time.Duration) map[time.Time]map[string]map[string]*AccumulativeStatsCounter {
|
||||
methodsPerProtocolPerTimeAggregated := map[time.Time]map[string]map[string]*AccumulativeStatsCounter{}
|
||||
|
||||
bucketStatsIndex := len(bucketStats) - 1
|
||||
bucketStatsIndex := len(stats) - 1
|
||||
for bucketStatsIndex >= 0 {
|
||||
currentBucketTime := bucketStats[bucketStatsIndex].BucketTime
|
||||
if currentBucketTime.After(firstBucketTime) || currentBucketTime.Equal(firstBucketTime) {
|
||||
resultBucketRoundedKey := currentBucketTime.Add(-1 * time.Second * time.Duration(intervalSeconds) / 2).Round(time.Second * time.Duration(intervalSeconds))
|
||||
currentBucketTime := stats[bucketStatsIndex].BucketTime
|
||||
resultBucketRoundedKey := currentBucketTime.Add(-1 * interval / 2).Round(interval)
|
||||
|
||||
for protocolName, data := range bucketStats[bucketStatsIndex].ProtocolStats {
|
||||
if _, ok := protocolToColor[protocolName]; !ok {
|
||||
protocolToColor[protocolName] = data.Color
|
||||
for protocolName, data := range stats[bucketStatsIndex].ProtocolStats {
|
||||
for methodName, dataOfMethod := range data.MethodsStats {
|
||||
|
||||
if _, ok := methodsPerProtocolPerTimeAggregated[resultBucketRoundedKey]; !ok {
|
||||
methodsPerProtocolPerTimeAggregated[resultBucketRoundedKey] = map[string]map[string]*AccumulativeStatsCounter{}
|
||||
}
|
||||
|
||||
for methodName, dataOfMethod := range data.MethodsStats {
|
||||
|
||||
if _, ok := methodsPerProtocolPerTimeAggregated[resultBucketRoundedKey]; !ok {
|
||||
methodsPerProtocolPerTimeAggregated[resultBucketRoundedKey] = map[string]map[string]*AccumulativeStatsCounter{}
|
||||
}
|
||||
if _, ok := methodsPerProtocolPerTimeAggregated[resultBucketRoundedKey][protocolName]; !ok {
|
||||
methodsPerProtocolPerTimeAggregated[resultBucketRoundedKey][protocolName] = map[string]*AccumulativeStatsCounter{}
|
||||
}
|
||||
if _, ok := methodsPerProtocolPerTimeAggregated[resultBucketRoundedKey][protocolName][methodName]; !ok {
|
||||
methodsPerProtocolPerTimeAggregated[resultBucketRoundedKey][protocolName][methodName] = &AccumulativeStatsCounter{
|
||||
Name: methodName,
|
||||
EntriesCount: 0,
|
||||
VolumeSizeBytes: 0,
|
||||
}
|
||||
}
|
||||
methodsPerProtocolPerTimeAggregated[resultBucketRoundedKey][protocolName][methodName].EntriesCount += dataOfMethod.EntriesCount
|
||||
methodsPerProtocolPerTimeAggregated[resultBucketRoundedKey][protocolName][methodName].VolumeSizeBytes += dataOfMethod.VolumeInBytes
|
||||
if _, ok := methodsPerProtocolPerTimeAggregated[resultBucketRoundedKey][protocolName]; !ok {
|
||||
methodsPerProtocolPerTimeAggregated[resultBucketRoundedKey][protocolName] = map[string]*AccumulativeStatsCounter{}
|
||||
}
|
||||
if _, ok := methodsPerProtocolPerTimeAggregated[resultBucketRoundedKey][protocolName][methodName]; !ok {
|
||||
methodsPerProtocolPerTimeAggregated[resultBucketRoundedKey][protocolName][methodName] = &AccumulativeStatsCounter{
|
||||
Name: methodName,
|
||||
Color: getColorForMethod(protocolName, methodName),
|
||||
EntriesCount: 0,
|
||||
VolumeSizeBytes: 0,
|
||||
}
|
||||
}
|
||||
methodsPerProtocolPerTimeAggregated[resultBucketRoundedKey][protocolName][methodName].EntriesCount += dataOfMethod.EntriesCount
|
||||
methodsPerProtocolPerTimeAggregated[resultBucketRoundedKey][protocolName][methodName].VolumeSizeBytes += dataOfMethod.VolumeInBytes
|
||||
}
|
||||
}
|
||||
|
||||
bucketStatsIndex--
|
||||
}
|
||||
return methodsPerProtocolPerTimeAggregated, protocolToColor
|
||||
return methodsPerProtocolPerTimeAggregated
|
||||
}
|
||||
|
||||
func getAggregatedStatsAllTime(bucketStatsCopy BucketStats) (map[string]map[string]*AccumulativeStatsCounter, map[string]string) {
|
||||
protocolToColor := make(map[string]string, 0)
|
||||
func getAggregatedStats(stats BucketStats) map[string]map[string]*AccumulativeStatsCounter {
|
||||
methodsPerProtocolAggregated := make(map[string]map[string]*AccumulativeStatsCounter, 0)
|
||||
for _, countersOfTimeFrame := range bucketStatsCopy {
|
||||
for _, countersOfTimeFrame := range stats {
|
||||
for protocolName, value := range countersOfTimeFrame.ProtocolStats {
|
||||
if _, ok := protocolToColor[protocolName]; !ok {
|
||||
protocolToColor[protocolName] = value.Color
|
||||
}
|
||||
|
||||
for method, countersValue := range value.MethodsStats {
|
||||
if _, found := methodsPerProtocolAggregated[protocolName]; !found {
|
||||
methodsPerProtocolAggregated[protocolName] = map[string]*AccumulativeStatsCounter{}
|
||||
@@ -275,6 +321,7 @@ func getAggregatedStatsAllTime(bucketStatsCopy BucketStats) (map[string]map[stri
|
||||
if _, found := methodsPerProtocolAggregated[protocolName][method]; !found {
|
||||
methodsPerProtocolAggregated[protocolName][method] = &AccumulativeStatsCounter{
|
||||
Name: method,
|
||||
Color: getColorForMethod(protocolName, method),
|
||||
EntriesCount: 0,
|
||||
VolumeSizeBytes: 0,
|
||||
}
|
||||
@@ -284,5 +331,27 @@ func getAggregatedStatsAllTime(bucketStatsCopy BucketStats) (map[string]map[stri
|
||||
}
|
||||
}
|
||||
}
|
||||
return methodsPerProtocolAggregated, protocolToColor
|
||||
return methodsPerProtocolAggregated
|
||||
}
|
||||
|
||||
func getColorForMethod(protocolName string, methodName string) string {
|
||||
hash := md5.Sum([]byte(fmt.Sprintf("%v_%v", protocolName, methodName)))
|
||||
input := hex.EncodeToString(hash[:])
|
||||
return fmt.Sprintf("#%v", input[:6])
|
||||
}
|
||||
|
||||
func getAvailableProtocols(stats BucketStats) []string {
|
||||
protocols := map[string]bool{}
|
||||
for _, countersOfTimeFrame := range stats {
|
||||
for protocolName := range countersOfTimeFrame.ProtocolStats {
|
||||
protocols[protocolName] = true
|
||||
}
|
||||
}
|
||||
|
||||
result := make([]string, 0)
|
||||
for protocol := range protocols {
|
||||
result = append(result, protocol)
|
||||
}
|
||||
result = append(result, "ALL")
|
||||
return result
|
||||
}
|
||||
|
||||
@@ -2,7 +2,6 @@ package providers
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
@@ -26,38 +25,6 @@ func TestGetBucketOfTimeStamp(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
type DataForBucketBorderFunction struct {
|
||||
EndTime time.Time
|
||||
IntervalInSeconds int
|
||||
NumberOfBars int
|
||||
}
|
||||
|
||||
func TestGetBucketBorders(t *testing.T) {
|
||||
tests := map[DataForBucketBorderFunction]time.Time{
|
||||
DataForBucketBorderFunction{
|
||||
time.Date(2022, time.Month(1), 1, 10, 34, 45, 0, time.UTC),
|
||||
300,
|
||||
10,
|
||||
}: time.Date(2022, time.Month(1), 1, 9, 45, 0, 0, time.UTC),
|
||||
DataForBucketBorderFunction{
|
||||
time.Date(2022, time.Month(1), 1, 10, 35, 45, 0, time.UTC),
|
||||
60,
|
||||
5,
|
||||
}: time.Date(2022, time.Month(1), 1, 10, 31, 00, 0, time.UTC),
|
||||
}
|
||||
|
||||
for key, value := range tests {
|
||||
t.Run(fmt.Sprintf("%v", key), func(t *testing.T) {
|
||||
|
||||
actual := getFirstBucketTime(key.EndTime, key.IntervalInSeconds, key.NumberOfBars)
|
||||
|
||||
if actual != value {
|
||||
t.Errorf("unexpected result - expected: %v, actual: %v", value, actual)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetAggregatedStatsAllTime(t *testing.T) {
|
||||
bucketStatsForTest := BucketStats{
|
||||
&TimeFrameStatsValue{
|
||||
@@ -140,10 +107,10 @@ func TestGetAggregatedStatsAllTime(t *testing.T) {
|
||||
},
|
||||
},
|
||||
}
|
||||
actual, _ := getAggregatedStatsAllTime(bucketStatsForTest)
|
||||
actual := getAggregatedStats(bucketStatsForTest)
|
||||
|
||||
if !reflect.DeepEqual(actual, expected) {
|
||||
t.Errorf("unexpected result - expected: %v, actual: %v", 3, len(actual))
|
||||
if len(actual) != len(expected) {
|
||||
t.Errorf("unexpected result - expected: %v, actual: %v", len(expected), len(actual))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -227,10 +194,10 @@ func TestGetAggregatedStatsFromSpecificTime(t *testing.T) {
|
||||
},
|
||||
},
|
||||
}
|
||||
actual, _ := getAggregatedResultTimingFromSpecificTime(300, bucketStatsForTest, time.Date(2022, time.Month(1), 1, 10, 00, 00, 0, time.UTC))
|
||||
actual := getAggregatedResultTiming(bucketStatsForTest, time.Minute*5)
|
||||
|
||||
if !reflect.DeepEqual(actual, expected) {
|
||||
t.Errorf("unexpected result - expected: %v, actual: %v", 3, len(actual))
|
||||
if len(actual) != len(expected) {
|
||||
t.Errorf("unexpected result - expected: %v, actual: %v", len(expected), len(actual))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -323,9 +290,9 @@ func TestGetAggregatedStatsFromSpecificTimeMultipleBuckets(t *testing.T) {
|
||||
},
|
||||
},
|
||||
}
|
||||
actual, _ := getAggregatedResultTimingFromSpecificTime(60, bucketStatsForTest, time.Date(2022, time.Month(1), 1, 10, 00, 00, 0, time.UTC))
|
||||
actual := getAggregatedResultTiming(bucketStatsForTest, time.Minute)
|
||||
|
||||
if !reflect.DeepEqual(actual, expected) {
|
||||
t.Errorf("unexpected result - expected: %v, actual: %v", 3, len(actual))
|
||||
if len(actual) != len(expected) {
|
||||
t.Errorf("unexpected result - expected: %v, actual: %v", len(expected), len(actual))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -26,7 +26,7 @@ func TestEntryAddedCount(t *testing.T) {
|
||||
|
||||
entryBucketKey := time.Date(2021, 1, 1, 10, 0, 0, 0, time.UTC)
|
||||
valueLessThanBucketThreshold := time.Second * 130
|
||||
mockSummery := &api.BaseEntry{Protocol: api.Protocol{Name: "mock"}, Method: "mock-method", Timestamp: entryBucketKey.Add(valueLessThanBucketThreshold).UnixNano()}
|
||||
mockSummery := &api.BaseEntry{Protocol: api.Protocol{ProtocolSummary: api.ProtocolSummary{Name: "mock"}}, Method: "mock-method", Timestamp: entryBucketKey.Add(valueLessThanBucketThreshold).UnixNano()}
|
||||
for _, entriesCount := range tests {
|
||||
t.Run(fmt.Sprintf("%d", entriesCount), func(t *testing.T) {
|
||||
for i := 0; i < entriesCount; i++ {
|
||||
@@ -61,7 +61,7 @@ func TestEntryAddedVolume(t *testing.T) {
|
||||
var expectedEntriesCount int
|
||||
var expectedVolumeInGB float64
|
||||
|
||||
mockSummery := &api.BaseEntry{Protocol: api.Protocol{Name: "mock"}, Method: "mock-method", Timestamp: time.Date(2021, 1, 1, 10, 0, 0, 0, time.UTC).UnixNano()}
|
||||
mockSummery := &api.BaseEntry{Protocol: api.Protocol{ProtocolSummary: api.ProtocolSummary{Name: "mock"}}, Method: "mock-method", Timestamp: time.Date(2021, 1, 1, 10, 0, 0, 0, time.UTC).UnixNano()}
|
||||
|
||||
for _, data := range tests {
|
||||
t.Run(fmt.Sprintf("%d", len(data)), func(t *testing.T) {
|
||||
|
||||
@@ -60,7 +60,7 @@ func GetTappedPodsStatus() []shared.TappedPodStatus {
|
||||
|
||||
func SetNodeToTappedPodMap(nodeToTappedPodsMap shared.NodeToPodsMap) {
|
||||
summary := nodeToTappedPodsMap.Summary()
|
||||
logger.Log.Infof("Setting node to tapped pods map to %v", summary)
|
||||
logger.Log.Debugf("Setting node to tapped pods map to %v", summary)
|
||||
|
||||
nodeHostToTappedPodsMap = nodeToTappedPodsMap
|
||||
}
|
||||
|
||||
184
agent/pkg/replay/replay.go
Normal file
184
agent/pkg/replay/replay.go
Normal file
@@ -0,0 +1,184 @@
|
||||
package replay
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/up9inc/mizu/agent/pkg/app"
|
||||
tapApi "github.com/up9inc/mizu/tap/api"
|
||||
mizuhttp "github.com/up9inc/mizu/tap/extensions/http"
|
||||
)
|
||||
|
||||
var (
|
||||
inProcessRequestsLocker = sync.Mutex{}
|
||||
inProcessRequests = 0
|
||||
)
|
||||
|
||||
const maxParallelAction = 5
|
||||
|
||||
type Details struct {
|
||||
Method string `json:"method"`
|
||||
Url string `json:"url"`
|
||||
Body string `json:"body"`
|
||||
Headers map[string]string `json:"headers"`
|
||||
}
|
||||
|
||||
type Response struct {
|
||||
Success bool `json:"status"`
|
||||
Data interface{} `json:"data"`
|
||||
ErrorMessage string `json:"errorMessage"`
|
||||
}
|
||||
|
||||
func incrementCounter() bool {
|
||||
result := false
|
||||
inProcessRequestsLocker.Lock()
|
||||
if inProcessRequests < maxParallelAction {
|
||||
inProcessRequests++
|
||||
result = true
|
||||
}
|
||||
inProcessRequestsLocker.Unlock()
|
||||
return result
|
||||
}
|
||||
|
||||
func decrementCounter() {
|
||||
inProcessRequestsLocker.Lock()
|
||||
inProcessRequests--
|
||||
inProcessRequestsLocker.Unlock()
|
||||
}
|
||||
|
||||
func getEntryFromRequestResponse(extension *tapApi.Extension, request *http.Request, response *http.Response) *tapApi.Entry {
|
||||
captureTime := time.Now()
|
||||
|
||||
itemTmp := tapApi.OutputChannelItem{
|
||||
Protocol: *extension.Protocol,
|
||||
ConnectionInfo: &tapApi.ConnectionInfo{
|
||||
ClientIP: "",
|
||||
ClientPort: "1",
|
||||
ServerIP: "",
|
||||
ServerPort: "1",
|
||||
IsOutgoing: false,
|
||||
},
|
||||
Capture: "",
|
||||
Timestamp: time.Now().UnixMilli(),
|
||||
Pair: &tapApi.RequestResponsePair{
|
||||
Request: tapApi.GenericMessage{
|
||||
IsRequest: true,
|
||||
CaptureTime: captureTime,
|
||||
CaptureSize: 0,
|
||||
Payload: &mizuhttp.HTTPPayload{
|
||||
Type: mizuhttp.TypeHttpRequest,
|
||||
Data: request,
|
||||
},
|
||||
},
|
||||
Response: tapApi.GenericMessage{
|
||||
IsRequest: false,
|
||||
CaptureTime: captureTime,
|
||||
CaptureSize: 0,
|
||||
Payload: &mizuhttp.HTTPPayload{
|
||||
Type: mizuhttp.TypeHttpResponse,
|
||||
Data: response,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// Analyze is expecting an item that's marshalled and unmarshalled
|
||||
itemMarshalled, err := json.Marshal(itemTmp)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
var finalItem *tapApi.OutputChannelItem
|
||||
if err := json.Unmarshal(itemMarshalled, &finalItem); err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
return extension.Dissector.Analyze(finalItem, "", "", "")
|
||||
}
|
||||
|
||||
func ExecuteRequest(replayData *Details, timeout time.Duration) *Response {
|
||||
if incrementCounter() {
|
||||
defer decrementCounter()
|
||||
|
||||
client := &http.Client{
|
||||
Timeout: timeout,
|
||||
}
|
||||
|
||||
request, err := http.NewRequest(strings.ToUpper(replayData.Method), replayData.Url, bytes.NewBufferString(replayData.Body))
|
||||
if err != nil {
|
||||
return &Response{
|
||||
Success: false,
|
||||
Data: nil,
|
||||
ErrorMessage: err.Error(),
|
||||
}
|
||||
}
|
||||
|
||||
for headerKey, headerValue := range replayData.Headers {
|
||||
request.Header.Add(headerKey, headerValue)
|
||||
}
|
||||
request.Header.Add("x-mizu", uuid.New().String())
|
||||
response, requestErr := client.Do(request)
|
||||
|
||||
if requestErr != nil {
|
||||
return &Response{
|
||||
Success: false,
|
||||
Data: nil,
|
||||
ErrorMessage: requestErr.Error(),
|
||||
}
|
||||
}
|
||||
|
||||
extension := app.ExtensionsMap["http"] // # TODO: maybe pass the extension to the function so it can be tested
|
||||
entry := getEntryFromRequestResponse(extension, request, response)
|
||||
base := extension.Dissector.Summarize(entry)
|
||||
var representation []byte
|
||||
|
||||
// Represent is expecting an entry that's marshalled and unmarshalled
|
||||
entryMarshalled, err := json.Marshal(entry)
|
||||
if err != nil {
|
||||
return &Response{
|
||||
Success: false,
|
||||
Data: nil,
|
||||
ErrorMessage: err.Error(),
|
||||
}
|
||||
}
|
||||
var entryUnmarshalled *tapApi.Entry
|
||||
if err := json.Unmarshal(entryMarshalled, &entryUnmarshalled); err != nil {
|
||||
return &Response{
|
||||
Success: false,
|
||||
Data: nil,
|
||||
ErrorMessage: err.Error(),
|
||||
}
|
||||
}
|
||||
|
||||
representation, err = extension.Dissector.Represent(entryUnmarshalled.Request, entryUnmarshalled.Response)
|
||||
if err != nil {
|
||||
return &Response{
|
||||
Success: false,
|
||||
Data: nil,
|
||||
ErrorMessage: err.Error(),
|
||||
}
|
||||
}
|
||||
|
||||
return &Response{
|
||||
Success: true,
|
||||
Data: &tapApi.EntryWrapper{
|
||||
Protocol: *extension.Protocol,
|
||||
Representation: string(representation),
|
||||
Data: entryUnmarshalled,
|
||||
Base: base,
|
||||
},
|
||||
ErrorMessage: "",
|
||||
}
|
||||
} else {
|
||||
return &Response{
|
||||
Success: false,
|
||||
Data: nil,
|
||||
ErrorMessage: fmt.Sprintf("reached threshold of %d requests", maxParallelAction),
|
||||
}
|
||||
}
|
||||
}
|
||||
106
agent/pkg/replay/replay_internal_test.go
Normal file
106
agent/pkg/replay/replay_internal_test.go
Normal file
@@ -0,0 +1,106 @@
|
||||
package replay
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"encoding/json"
|
||||
|
||||
"github.com/google/uuid"
|
||||
tapApi "github.com/up9inc/mizu/tap/api"
|
||||
mizuhttp "github.com/up9inc/mizu/tap/extensions/http"
|
||||
)
|
||||
|
||||
func TestValid(t *testing.T) {
|
||||
client := &http.Client{
|
||||
Timeout: 10 * time.Second,
|
||||
}
|
||||
|
||||
tests := map[string]*Details{
|
||||
"40x": {
|
||||
Method: "GET",
|
||||
Url: "http://httpbin.org/status/404",
|
||||
Body: "",
|
||||
Headers: map[string]string{},
|
||||
},
|
||||
"20x": {
|
||||
Method: "GET",
|
||||
Url: "http://httpbin.org/status/200",
|
||||
Body: "",
|
||||
Headers: map[string]string{},
|
||||
},
|
||||
"50x": {
|
||||
Method: "GET",
|
||||
Url: "http://httpbin.org/status/500",
|
||||
Body: "",
|
||||
Headers: map[string]string{},
|
||||
},
|
||||
// TODO: this should be fixes, currently not working because of header name with ":"
|
||||
//":path-header": {
|
||||
// Method: "GET",
|
||||
// Url: "http://httpbin.org/get",
|
||||
// Body: "",
|
||||
// Headers: map[string]string{
|
||||
// ":path": "/get",
|
||||
// },
|
||||
// },
|
||||
}
|
||||
|
||||
for testCaseName, replayData := range tests {
|
||||
t.Run(fmt.Sprintf("%+v", testCaseName), func(t *testing.T) {
|
||||
request, err := http.NewRequest(strings.ToUpper(replayData.Method), replayData.Url, bytes.NewBufferString(replayData.Body))
|
||||
if err != nil {
|
||||
t.Errorf("Error executing request")
|
||||
}
|
||||
|
||||
for headerKey, headerValue := range replayData.Headers {
|
||||
request.Header.Add(headerKey, headerValue)
|
||||
}
|
||||
request.Header.Add("x-mizu", uuid.New().String())
|
||||
response, requestErr := client.Do(request)
|
||||
|
||||
if requestErr != nil {
|
||||
t.Errorf("failed: %v, ", requestErr)
|
||||
}
|
||||
|
||||
extensionHttp := &tapApi.Extension{}
|
||||
dissectorHttp := mizuhttp.NewDissector()
|
||||
dissectorHttp.Register(extensionHttp)
|
||||
extensionHttp.Dissector = dissectorHttp
|
||||
extension := extensionHttp
|
||||
|
||||
entry := getEntryFromRequestResponse(extension, request, response)
|
||||
base := extension.Dissector.Summarize(entry)
|
||||
|
||||
// Represent is expecting an entry that's marshalled and unmarshalled
|
||||
entryMarshalled, err := json.Marshal(entry)
|
||||
if err != nil {
|
||||
t.Errorf("failed marshaling entry: %v, ", err)
|
||||
}
|
||||
var entryUnmarshalled *tapApi.Entry
|
||||
if err := json.Unmarshal(entryMarshalled, &entryUnmarshalled); err != nil {
|
||||
t.Errorf("failed unmarshaling entry: %v, ", err)
|
||||
}
|
||||
|
||||
var representation []byte
|
||||
representation, err = extension.Dissector.Represent(entryUnmarshalled.Request, entryUnmarshalled.Response)
|
||||
if err != nil {
|
||||
t.Errorf("failed: %v, ", err)
|
||||
}
|
||||
|
||||
result := &tapApi.EntryWrapper{
|
||||
Protocol: *extension.Protocol,
|
||||
Representation: string(representation),
|
||||
Data: entry,
|
||||
Base: base,
|
||||
}
|
||||
t.Logf("%+v", result)
|
||||
//data, _ := json.MarshalIndent(result, "", " ")
|
||||
//t.Logf("%+v", string(data))
|
||||
})
|
||||
}
|
||||
}
|
||||
13
agent/pkg/routes/replay_routes.go
Normal file
13
agent/pkg/routes/replay_routes.go
Normal file
@@ -0,0 +1,13 @@
|
||||
package routes
|
||||
|
||||
import (
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/up9inc/mizu/agent/pkg/controllers"
|
||||
)
|
||||
|
||||
// ReplayRoutes defines the group of replay routes.
|
||||
func ReplayRoutes(app *gin.Engine) {
|
||||
routeGroup := app.Group("/replay")
|
||||
|
||||
routeGroup.POST("/", controllers.ReplayRequest)
|
||||
}
|
||||
@@ -15,9 +15,8 @@ func StatusRoutes(ginApp *gin.Engine) {
|
||||
routeGroup.GET("/connectedTappersCount", controllers.GetConnectedTappersCount)
|
||||
routeGroup.GET("/tap", controllers.GetTappingStatus)
|
||||
|
||||
routeGroup.GET("/general", controllers.GetGeneralStats) // get general stats about entries in DB
|
||||
routeGroup.GET("/accumulative", controllers.GetAccumulativeStats)
|
||||
routeGroup.GET("/accumulativeTiming", controllers.GetAccumulativeStatsTiming)
|
||||
routeGroup.GET("/general", controllers.GetGeneralStats)
|
||||
routeGroup.GET("/trafficStats", controllers.GetTrafficStats)
|
||||
|
||||
routeGroup.GET("/resolving", controllers.GetCurrentResolvingInformation)
|
||||
}
|
||||
|
||||
@@ -1,124 +0,0 @@
|
||||
package rules
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"github.com/up9inc/mizu/agent/pkg/har"
|
||||
|
||||
"github.com/up9inc/mizu/logger"
|
||||
|
||||
"github.com/up9inc/mizu/shared"
|
||||
"github.com/yalp/jsonpath"
|
||||
)
|
||||
|
||||
type RulesMatched struct {
|
||||
Matched bool `json:"matched"`
|
||||
Rule shared.RulePolicy `json:"rule"`
|
||||
}
|
||||
|
||||
func appendRulesMatched(rulesMatched []RulesMatched, matched bool, rule shared.RulePolicy) []RulesMatched {
|
||||
return append(rulesMatched, RulesMatched{Matched: matched, Rule: rule})
|
||||
}
|
||||
|
||||
func ValidatePath(URLFromRule string, URL string) bool {
|
||||
if URLFromRule != "" {
|
||||
matchPath, err := regexp.MatchString(URLFromRule, URL)
|
||||
if err != nil || !matchPath {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func ValidateService(serviceFromRule string, service string) bool {
|
||||
if serviceFromRule != "" {
|
||||
matchService, err := regexp.MatchString(serviceFromRule, service)
|
||||
if err != nil || !matchService {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func MatchRequestPolicy(harEntry har.Entry, service string) (resultPolicyToSend []RulesMatched, isEnabled bool) {
|
||||
enforcePolicy, err := shared.DecodeEnforcePolicy(fmt.Sprintf("%s%s", shared.ConfigDirPath, shared.ValidationRulesFileName))
|
||||
if err == nil && len(enforcePolicy.Rules) > 0 {
|
||||
isEnabled = true
|
||||
}
|
||||
for _, rule := range enforcePolicy.Rules {
|
||||
if !ValidatePath(rule.Path, harEntry.Request.URL) || !ValidateService(rule.Service, service) {
|
||||
continue
|
||||
}
|
||||
if rule.Type == "json" {
|
||||
var bodyJsonMap interface{}
|
||||
contentTextDecoded, _ := base64.StdEncoding.DecodeString(harEntry.Response.Content.Text)
|
||||
if err := json.Unmarshal(contentTextDecoded, &bodyJsonMap); err != nil {
|
||||
continue
|
||||
}
|
||||
out, err := jsonpath.Read(bodyJsonMap, rule.Key)
|
||||
if err != nil || out == nil {
|
||||
continue
|
||||
}
|
||||
var matchValue bool
|
||||
if reflect.TypeOf(out).Kind() == reflect.String {
|
||||
matchValue, err = regexp.MatchString(rule.Value, out.(string))
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
logger.Log.Info(matchValue, rule.Value)
|
||||
} else {
|
||||
val := fmt.Sprint(out)
|
||||
matchValue, err = regexp.MatchString(rule.Value, val)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
}
|
||||
resultPolicyToSend = appendRulesMatched(resultPolicyToSend, matchValue, rule)
|
||||
} else if rule.Type == "header" {
|
||||
for j := range harEntry.Response.Headers {
|
||||
matchKey, err := regexp.MatchString(rule.Key, harEntry.Response.Headers[j].Name)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
if matchKey {
|
||||
matchValue, err := regexp.MatchString(rule.Value, harEntry.Response.Headers[j].Value)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
resultPolicyToSend = appendRulesMatched(resultPolicyToSend, matchValue, rule)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
resultPolicyToSend = appendRulesMatched(resultPolicyToSend, true, rule)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func PassedValidationRules(rulesMatched []RulesMatched) (bool, int64, int) {
|
||||
var numberOfRulesMatched = len(rulesMatched)
|
||||
var responseTime int64 = -1
|
||||
|
||||
if numberOfRulesMatched == 0 {
|
||||
return false, 0, numberOfRulesMatched
|
||||
}
|
||||
|
||||
for _, rule := range rulesMatched {
|
||||
if !rule.Matched {
|
||||
return false, responseTime, numberOfRulesMatched
|
||||
} else {
|
||||
if strings.ToLower(rule.Rule.Type) == "slo" {
|
||||
if rule.Rule.ResponseTime < responseTime || responseTime == -1 {
|
||||
responseTime = rule.Rule.ResponseTime
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return true, responseTime, numberOfRulesMatched
|
||||
}
|
||||
@@ -50,11 +50,13 @@ var (
|
||||
IP: fmt.Sprintf("%s.%s", Ip, UnresolvedNodeName),
|
||||
}
|
||||
ProtocolHttp = &tapApi.Protocol{
|
||||
Name: "http",
|
||||
ProtocolSummary: tapApi.ProtocolSummary{
|
||||
Name: "http",
|
||||
Version: "1.1",
|
||||
Abbreviation: "HTTP",
|
||||
},
|
||||
LongName: "Hypertext Transfer Protocol -- HTTP/1.1",
|
||||
Abbreviation: "HTTP",
|
||||
Macro: "http",
|
||||
Version: "1.1",
|
||||
BackgroundColor: "#205cf5",
|
||||
ForegroundColor: "#ffffff",
|
||||
FontSize: 12,
|
||||
@@ -63,11 +65,13 @@ var (
|
||||
Priority: 0,
|
||||
}
|
||||
ProtocolRedis = &tapApi.Protocol{
|
||||
Name: "redis",
|
||||
ProtocolSummary: tapApi.ProtocolSummary{
|
||||
Name: "redis",
|
||||
Version: "3.x",
|
||||
Abbreviation: "REDIS",
|
||||
},
|
||||
LongName: "Redis Serialization Protocol",
|
||||
Abbreviation: "REDIS",
|
||||
Macro: "redis",
|
||||
Version: "3.x",
|
||||
BackgroundColor: "#a41e11",
|
||||
ForegroundColor: "#ffffff",
|
||||
FontSize: 11,
|
||||
|
||||
@@ -53,8 +53,8 @@ func init() {
|
||||
tapCmd.Flags().String(configStructs.HumanMaxEntriesDBSizeTapName, defaultTapConfig.HumanMaxEntriesDBSize, "Override the default max entries db size")
|
||||
tapCmd.Flags().String(configStructs.InsertionFilterName, defaultTapConfig.InsertionFilter, "Set the insertion filter. Accepts string or a file path.")
|
||||
tapCmd.Flags().Bool(configStructs.DryRunTapName, defaultTapConfig.DryRun, "Preview of all pods matching the regex, without tapping them")
|
||||
tapCmd.Flags().String(configStructs.EnforcePolicyFile, defaultTapConfig.EnforcePolicyFile, "Yaml file path with policy rules")
|
||||
tapCmd.Flags().Bool(configStructs.ServiceMeshName, defaultTapConfig.ServiceMesh, "Record decrypted traffic if the cluster is configured with a service mesh and with mtls")
|
||||
tapCmd.Flags().Bool(configStructs.TlsName, defaultTapConfig.Tls, "Record tls traffic")
|
||||
tapCmd.Flags().Bool(configStructs.ProfilerName, defaultTapConfig.Profiler, "Run pprof server")
|
||||
tapCmd.Flags().Int(configStructs.MaxLiveStreamsName, defaultTapConfig.MaxLiveStreams, "Maximum live tcp streams to handle concurrently")
|
||||
}
|
||||
|
||||
@@ -12,7 +12,6 @@ import (
|
||||
"github.com/up9inc/mizu/cli/telemetry"
|
||||
"github.com/up9inc/mizu/cli/utils"
|
||||
|
||||
"gopkg.in/yaml.v3"
|
||||
core "k8s.io/api/core/v1"
|
||||
k8serrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@@ -45,16 +44,6 @@ func RunMizuTap() {
|
||||
|
||||
apiProvider = apiserver.NewProvider(GetApiServerUrl(config.Config.Tap.GuiPort), apiserver.DefaultRetries, apiserver.DefaultTimeout)
|
||||
|
||||
var err error
|
||||
var serializedValidationRules string
|
||||
if config.Config.Tap.EnforcePolicyFile != "" {
|
||||
serializedValidationRules, err = readValidationRules(config.Config.Tap.EnforcePolicyFile)
|
||||
if err != nil {
|
||||
logger.Log.Errorf(uiUtils.Error, fmt.Sprintf("Error reading policy file: %v", errormessage.FormatError(err)))
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
kubernetesProvider, err := getKubernetesProviderForCli()
|
||||
if err != nil {
|
||||
return
|
||||
@@ -98,7 +87,7 @@ func RunMizuTap() {
|
||||
}
|
||||
|
||||
logger.Log.Infof("Waiting for Mizu Agent to start...")
|
||||
if state.mizuServiceAccountExists, err = resources.CreateTapMizuResources(ctx, kubernetesProvider, serializedValidationRules, serializedMizuConfig, config.Config.IsNsRestrictedMode(), config.Config.MizuResourcesNamespace, config.Config.AgentImage, config.Config.Tap.MaxEntriesDBSizeBytes(), config.Config.Tap.ApiServerResources, config.Config.ImagePullPolicy(), config.Config.LogLevel(), config.Config.Tap.Profiler); err != nil {
|
||||
if state.mizuServiceAccountExists, err = resources.CreateTapMizuResources(ctx, kubernetesProvider, serializedMizuConfig, config.Config.IsNsRestrictedMode(), config.Config.MizuResourcesNamespace, config.Config.AgentImage, config.Config.Tap.MaxEntriesDBSizeBytes(), config.Config.Tap.ApiServerResources, config.Config.ImagePullPolicy(), config.Config.LogLevel(), config.Config.Tap.Profiler); err != nil {
|
||||
var statusError *k8serrors.StatusError
|
||||
if errors.As(err, &statusError) && (statusError.ErrStatus.Reason == metav1.StatusReasonAlreadyExists) {
|
||||
logger.Log.Info("Mizu is already running in this namespace, change the `mizu-resources-namespace` configuration or run `mizu clean` to remove the currently running Mizu instance")
|
||||
@@ -176,6 +165,7 @@ func startTapperSyncer(ctx context.Context, cancel context.CancelFunc, provider
|
||||
MizuServiceAccountExists: state.mizuServiceAccountExists,
|
||||
ServiceMesh: config.Config.Tap.ServiceMesh,
|
||||
Tls: config.Config.Tap.Tls,
|
||||
MaxLiveStreams: config.Config.Tap.MaxLiveStreams,
|
||||
}, startTime)
|
||||
|
||||
if err != nil {
|
||||
@@ -239,15 +229,6 @@ func getErrorDisplayTextForK8sTapManagerError(err kubernetes.K8sTapManagerError)
|
||||
}
|
||||
}
|
||||
|
||||
func readValidationRules(file string) (string, error) {
|
||||
rules, err := shared.DecodeEnforcePolicy(file)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
newContent, _ := yaml.Marshal(&rules)
|
||||
return string(newContent), nil
|
||||
}
|
||||
|
||||
func getMizuApiFilteringOptions() (*api.TrafficFilteringOptions, error) {
|
||||
var compiledRegexSlice []*api.SerializableRegexp
|
||||
|
||||
|
||||
@@ -23,10 +23,10 @@ const (
|
||||
HumanMaxEntriesDBSizeTapName = "max-entries-db-size"
|
||||
InsertionFilterName = "insertion-filter"
|
||||
DryRunTapName = "dry-run"
|
||||
EnforcePolicyFile = "traffic-validation-file"
|
||||
ServiceMeshName = "service-mesh"
|
||||
TlsName = "tls"
|
||||
ProfilerName = "profiler"
|
||||
MaxLiveStreamsName = "max-live-streams"
|
||||
)
|
||||
|
||||
type TapConfig struct {
|
||||
@@ -41,12 +41,12 @@ type TapConfig struct {
|
||||
HumanMaxEntriesDBSize string `yaml:"max-entries-db-size" default:"200MB"`
|
||||
InsertionFilter string `yaml:"insertion-filter" default:""`
|
||||
DryRun bool `yaml:"dry-run" default:"false"`
|
||||
EnforcePolicyFile string `yaml:"traffic-validation-file"`
|
||||
ApiServerResources shared.Resources `yaml:"api-server-resources"`
|
||||
TapperResources shared.Resources `yaml:"tapper-resources"`
|
||||
ServiceMesh bool `yaml:"service-mesh" default:"false"`
|
||||
Tls bool `yaml:"tls" default:"false"`
|
||||
Profiler bool `yaml:"profiler" default:"false"`
|
||||
MaxLiveStreams int `yaml:"max-live-streams" default:"500"`
|
||||
}
|
||||
|
||||
func (config *TapConfig) PodRegex() *regexp.Regexp {
|
||||
|
||||
@@ -14,14 +14,14 @@ import (
|
||||
core "k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
func CreateTapMizuResources(ctx context.Context, kubernetesProvider *kubernetes.Provider, serializedValidationRules string, serializedMizuConfig string, isNsRestrictedMode bool, mizuResourcesNamespace string, agentImage string, maxEntriesDBSizeBytes int64, apiServerResources shared.Resources, imagePullPolicy core.PullPolicy, logLevel logging.Level, profiler bool) (bool, error) {
|
||||
func CreateTapMizuResources(ctx context.Context, kubernetesProvider *kubernetes.Provider, serializedMizuConfig string, isNsRestrictedMode bool, mizuResourcesNamespace string, agentImage string, maxEntriesDBSizeBytes int64, apiServerResources shared.Resources, imagePullPolicy core.PullPolicy, logLevel logging.Level, profiler bool) (bool, error) {
|
||||
if !isNsRestrictedMode {
|
||||
if err := createMizuNamespace(ctx, kubernetesProvider, mizuResourcesNamespace); err != nil {
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
|
||||
if err := createMizuConfigmap(ctx, kubernetesProvider, serializedValidationRules, serializedMizuConfig, mizuResourcesNamespace); err != nil {
|
||||
if err := createMizuConfigmap(ctx, kubernetesProvider, serializedMizuConfig, mizuResourcesNamespace); err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
@@ -71,8 +71,8 @@ func createMizuNamespace(ctx context.Context, kubernetesProvider *kubernetes.Pro
|
||||
return err
|
||||
}
|
||||
|
||||
func createMizuConfigmap(ctx context.Context, kubernetesProvider *kubernetes.Provider, serializedValidationRules string, serializedMizuConfig string, mizuResourcesNamespace string) error {
|
||||
err := kubernetesProvider.CreateConfigMap(ctx, mizuResourcesNamespace, kubernetes.ConfigMapName, serializedValidationRules, serializedMizuConfig)
|
||||
func createMizuConfigmap(ctx context.Context, kubernetesProvider *kubernetes.Provider, serializedMizuConfig string, mizuResourcesNamespace string) error {
|
||||
err := kubernetesProvider.CreateConfigMap(ctx, mizuResourcesNamespace, kubernetes.ConfigMapName, serializedMizuConfig)
|
||||
return err
|
||||
}
|
||||
|
||||
|
||||
@@ -6,7 +6,6 @@ const (
|
||||
NodeNameEnvVar = "NODE_NAME"
|
||||
ConfigDirPath = "/app/config/"
|
||||
DataDirPath = "/app/data/"
|
||||
ValidationRulesFileName = "validation-rules.yaml"
|
||||
ConfigFileName = "mizu-config.json"
|
||||
DefaultApiServerPort = 8899
|
||||
LogLevelEnvVar = "LOG_LEVEL"
|
||||
|
||||
@@ -4,11 +4,9 @@ go 1.17
|
||||
|
||||
require (
|
||||
github.com/docker/go-units v0.4.0
|
||||
github.com/golang-jwt/jwt/v4 v4.2.0
|
||||
github.com/op/go-logging v0.0.0-20160315200505-970db520ece7
|
||||
github.com/up9inc/mizu/logger v0.0.0
|
||||
github.com/up9inc/mizu/tap/api v0.0.0
|
||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b
|
||||
k8s.io/api v0.23.3
|
||||
k8s.io/apimachinery v0.23.3
|
||||
k8s.io/client-go v0.23.3
|
||||
@@ -38,11 +36,11 @@ require (
|
||||
github.com/go-openapi/jsonreference v0.19.6 // indirect
|
||||
github.com/go-openapi/swag v0.21.1 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang-jwt/jwt/v4 v4.2.0 // indirect
|
||||
github.com/golang/protobuf v1.5.2 // indirect
|
||||
github.com/google/btree v1.0.1 // indirect
|
||||
github.com/google/go-cmp v0.5.7 // indirect
|
||||
github.com/google/gofuzz v1.2.0 // indirect
|
||||
github.com/google/martian v2.1.0+incompatible // indirect
|
||||
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect
|
||||
github.com/google/uuid v1.3.0 // indirect
|
||||
github.com/googleapis/gnostic v0.5.5 // indirect
|
||||
@@ -81,6 +79,7 @@ require (
|
||||
google.golang.org/protobuf v1.27.1 // indirect
|
||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect
|
||||
k8s.io/cli-runtime v0.23.3 // indirect
|
||||
k8s.io/component-base v0.23.3 // indirect
|
||||
k8s.io/klog/v2 v2.40.1 // indirect
|
||||
|
||||
@@ -282,7 +282,6 @@ github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/
|
||||
github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
|
||||
github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no=
|
||||
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
|
||||
github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
|
||||
github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
|
||||
|
||||
@@ -48,6 +48,7 @@ type TapperSyncerConfig struct {
|
||||
MizuServiceAccountExists bool
|
||||
ServiceMesh bool
|
||||
Tls bool
|
||||
MaxLiveStreams int
|
||||
}
|
||||
|
||||
func CreateAndStartMizuTapperSyncer(ctx context.Context, kubernetesProvider *Provider, config TapperSyncerConfig, startTime time.Time) (*MizuTapperSyncer, error) {
|
||||
@@ -337,7 +338,8 @@ func (tapperSyncer *MizuTapperSyncer) updateMizuTappers() error {
|
||||
tapperSyncer.config.MizuApiFilteringOptions,
|
||||
tapperSyncer.config.LogLevel,
|
||||
tapperSyncer.config.ServiceMesh,
|
||||
tapperSyncer.config.Tls); err != nil {
|
||||
tapperSyncer.config.Tls,
|
||||
tapperSyncer.config.MaxLiveStreams); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
"net/url"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"strconv"
|
||||
|
||||
"github.com/op/go-logging"
|
||||
"github.com/up9inc/mizu/logger"
|
||||
@@ -382,11 +383,11 @@ func (provider *Provider) GetMizuApiServerPodObject(opts *ApiServerOptions, moun
|
||||
Tolerations: []core.Toleration{
|
||||
{
|
||||
Operator: core.TolerationOpExists,
|
||||
Effect: core.TaintEffectNoExecute,
|
||||
Effect: core.TaintEffectNoExecute,
|
||||
},
|
||||
{
|
||||
Operator: core.TolerationOpExists,
|
||||
Effect: core.TaintEffectNoSchedule,
|
||||
Effect: core.TaintEffectNoSchedule,
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -684,11 +685,8 @@ func (provider *Provider) handleRemovalError(err error) error {
|
||||
return err
|
||||
}
|
||||
|
||||
func (provider *Provider) CreateConfigMap(ctx context.Context, namespace string, configMapName string, serializedValidationRules string, serializedMizuConfig string) error {
|
||||
func (provider *Provider) CreateConfigMap(ctx context.Context, namespace string, configMapName string, serializedMizuConfig string) error {
|
||||
configMapData := make(map[string]string)
|
||||
if serializedValidationRules != "" {
|
||||
configMapData[shared.ValidationRulesFileName] = serializedValidationRules
|
||||
}
|
||||
configMapData[shared.ConfigFileName] = serializedMizuConfig
|
||||
|
||||
configMap := &core.ConfigMap{
|
||||
@@ -711,7 +709,7 @@ func (provider *Provider) CreateConfigMap(ctx context.Context, namespace string,
|
||||
return nil
|
||||
}
|
||||
|
||||
func (provider *Provider) ApplyMizuTapperDaemonSet(ctx context.Context, namespace string, daemonSetName string, podImage string, tapperPodName string, apiServerPodIp string, nodeNames []string, serviceAccountName string, resources shared.Resources, imagePullPolicy core.PullPolicy, mizuApiFilteringOptions api.TrafficFilteringOptions, logLevel logging.Level, serviceMesh bool, tls bool) error {
|
||||
func (provider *Provider) ApplyMizuTapperDaemonSet(ctx context.Context, namespace string, daemonSetName string, podImage string, tapperPodName string, apiServerPodIp string, nodeNames []string, serviceAccountName string, resources shared.Resources, imagePullPolicy core.PullPolicy, mizuApiFilteringOptions api.TrafficFilteringOptions, logLevel logging.Level, serviceMesh bool, tls bool, maxLiveStreams int) error {
|
||||
logger.Log.Debugf("Applying %d tapper daemon sets, ns: %s, daemonSetName: %s, podImage: %s, tapperPodName: %s", len(nodeNames), namespace, daemonSetName, podImage, tapperPodName)
|
||||
|
||||
if len(nodeNames) == 0 {
|
||||
@@ -729,6 +727,7 @@ func (provider *Provider) ApplyMizuTapperDaemonSet(ctx context.Context, namespac
|
||||
"--tap",
|
||||
"--api-server-address", fmt.Sprintf("ws://%s/wsTapper", apiServerPodIp),
|
||||
"--nodefrag",
|
||||
"--max-live-streams", strconv.Itoa(maxLiveStreams),
|
||||
}
|
||||
|
||||
if serviceMesh {
|
||||
|
||||
@@ -1,13 +1,8 @@
|
||||
package shared
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"strings"
|
||||
|
||||
"github.com/op/go-logging"
|
||||
"github.com/up9inc/mizu/logger"
|
||||
|
||||
"gopkg.in/yaml.v3"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
@@ -55,7 +50,6 @@ type WebSocketMessageMetadata struct {
|
||||
MessageType WebSocketMessageType `json:"messageType,omitempty"`
|
||||
}
|
||||
|
||||
|
||||
type WebSocketStatusMessage struct {
|
||||
*WebSocketMessageMetadata
|
||||
TappingStatus []TappedPodStatus `json:"tappingStatus"`
|
||||
@@ -136,83 +130,3 @@ type HealthResponse struct {
|
||||
type VersionResponse struct {
|
||||
Ver string `json:"ver"`
|
||||
}
|
||||
|
||||
type RulesPolicy struct {
|
||||
Rules []RulePolicy `yaml:"rules"`
|
||||
}
|
||||
|
||||
type RulePolicy struct {
|
||||
Type string `yaml:"type"`
|
||||
Service string `yaml:"service"`
|
||||
Path string `yaml:"path"`
|
||||
Method string `yaml:"method"`
|
||||
Key string `yaml:"key"`
|
||||
Value string `yaml:"value"`
|
||||
ResponseTime int64 `yaml:"response-time"`
|
||||
Name string `yaml:"name"`
|
||||
}
|
||||
|
||||
type RulesMatched struct {
|
||||
Matched bool `json:"matched"`
|
||||
Rule RulePolicy `json:"rule"`
|
||||
}
|
||||
|
||||
func (r *RulePolicy) validateType() bool {
|
||||
permitedTypes := []string{"json", "header", "slo"}
|
||||
_, found := Find(permitedTypes, r.Type)
|
||||
if !found {
|
||||
logger.Log.Errorf("Only json, header and slo types are supported on rule definition. This rule will be ignored. rule name: %s", r.Name)
|
||||
found = false
|
||||
}
|
||||
if strings.ToLower(r.Type) == "slo" {
|
||||
if r.ResponseTime <= 0 {
|
||||
logger.Log.Errorf("When rule type is slo, the field response-time should be specified and have a value >= 1. rule name: %s", r.Name)
|
||||
found = false
|
||||
}
|
||||
}
|
||||
return found
|
||||
}
|
||||
|
||||
func (rules *RulesPolicy) ValidateRulesPolicy() []int {
|
||||
invalidIndex := make([]int, 0)
|
||||
for i := range rules.Rules {
|
||||
validated := rules.Rules[i].validateType()
|
||||
if !validated {
|
||||
invalidIndex = append(invalidIndex, i)
|
||||
}
|
||||
}
|
||||
return invalidIndex
|
||||
}
|
||||
|
||||
func Find(slice []string, val string) (int, bool) {
|
||||
for i, item := range slice {
|
||||
if item == val {
|
||||
return i, true
|
||||
}
|
||||
}
|
||||
return -1, false
|
||||
}
|
||||
|
||||
func DecodeEnforcePolicy(path string) (RulesPolicy, error) {
|
||||
content, err := ioutil.ReadFile(path)
|
||||
enforcePolicy := RulesPolicy{}
|
||||
if err != nil {
|
||||
return enforcePolicy, err
|
||||
}
|
||||
err = yaml.Unmarshal(content, &enforcePolicy)
|
||||
if err != nil {
|
||||
return enforcePolicy, err
|
||||
}
|
||||
invalidIndex := enforcePolicy.ValidateRulesPolicy()
|
||||
var k = 0
|
||||
if len(invalidIndex) != 0 {
|
||||
for i, rule := range enforcePolicy.Rules {
|
||||
if !ContainsInt(invalidIndex, i) {
|
||||
enforcePolicy.Rules[k] = rule
|
||||
k++
|
||||
}
|
||||
}
|
||||
enforcePolicy.Rules = enforcePolicy.Rules[:k]
|
||||
}
|
||||
return enforcePolicy, nil
|
||||
}
|
||||
|
||||
@@ -10,15 +10,6 @@ func Contains(slice []string, containsValue string) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func ContainsInt(slice []int, containsValue int) bool {
|
||||
for _, sliceValue := range slice {
|
||||
if sliceValue == containsValue {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func Unique(slice []string) []string {
|
||||
keys := make(map[string]bool)
|
||||
var list []string
|
||||
|
||||
@@ -2,6 +2,7 @@ package api
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"net"
|
||||
"sync"
|
||||
"time"
|
||||
@@ -14,12 +15,20 @@ const UnknownNamespace = ""
|
||||
var UnknownIp = net.IP{0, 0, 0, 0}
|
||||
var UnknownPort uint16 = 0
|
||||
|
||||
type ProtocolSummary struct {
|
||||
Name string `json:"name"`
|
||||
Version string `json:"version"`
|
||||
Abbreviation string `json:"abbr"`
|
||||
}
|
||||
|
||||
func (protocol *ProtocolSummary) ToString() string {
|
||||
return fmt.Sprintf("%s?%s?%s", protocol.Name, protocol.Version, protocol.Abbreviation)
|
||||
}
|
||||
|
||||
type Protocol struct {
|
||||
Name string `json:"name"`
|
||||
ProtocolSummary
|
||||
LongName string `json:"longName"`
|
||||
Abbreviation string `json:"abbr"`
|
||||
Macro string `json:"macro"`
|
||||
Version string `json:"version"`
|
||||
BackgroundColor string `json:"backgroundColor"`
|
||||
ForegroundColor string `json:"foregroundColor"`
|
||||
FontSize int8 `json:"fontSize"`
|
||||
@@ -91,7 +100,6 @@ type OutputChannelItem struct {
|
||||
Timestamp int64
|
||||
ConnectionInfo *ConnectionInfo
|
||||
Pair *RequestResponsePair
|
||||
Summary *BaseEntry
|
||||
Namespace string
|
||||
}
|
||||
|
||||
@@ -116,6 +124,7 @@ func (p *ReadProgress) Reset() {
|
||||
|
||||
type Dissector interface {
|
||||
Register(*Extension)
|
||||
GetProtocols() map[string]*Protocol
|
||||
Ping()
|
||||
Dissect(b *bufio.Reader, reader TcpReader, options *TrafficFilteringOptions) error
|
||||
Analyze(item *OutputChannelItem, resolvedSource string, resolvedDestination string, namespace string) *Entry
|
||||
@@ -151,7 +160,7 @@ func (e *Emitting) Emit(item *OutputChannelItem) {
|
||||
|
||||
type Entry struct {
|
||||
Id string `json:"id"`
|
||||
Protocol Protocol `json:"proto"`
|
||||
Protocol ProtocolSummary `json:"protocol"`
|
||||
Capture Capture `json:"capture"`
|
||||
Source *TCP `json:"src"`
|
||||
Destination *TCP `json:"dst"`
|
||||
@@ -164,40 +173,30 @@ type Entry struct {
|
||||
RequestSize int `json:"requestSize"`
|
||||
ResponseSize int `json:"responseSize"`
|
||||
ElapsedTime int64 `json:"elapsedTime"`
|
||||
Rules ApplicableRules `json:"rules,omitempty"`
|
||||
}
|
||||
|
||||
type EntryWrapper struct {
|
||||
Protocol Protocol `json:"protocol"`
|
||||
Representation string `json:"representation"`
|
||||
Data *Entry `json:"data"`
|
||||
Base *BaseEntry `json:"base"`
|
||||
Rules []map[string]interface{} `json:"rulesMatched,omitempty"`
|
||||
IsRulesEnabled bool `json:"isRulesEnabled"`
|
||||
Protocol Protocol `json:"protocol"`
|
||||
Representation string `json:"representation"`
|
||||
Data *Entry `json:"data"`
|
||||
Base *BaseEntry `json:"base"`
|
||||
}
|
||||
|
||||
type BaseEntry struct {
|
||||
Id string `json:"id"`
|
||||
Protocol Protocol `json:"proto,omitempty"`
|
||||
Capture Capture `json:"capture"`
|
||||
Summary string `json:"summary,omitempty"`
|
||||
SummaryQuery string `json:"summaryQuery,omitempty"`
|
||||
Status int `json:"status"`
|
||||
StatusQuery string `json:"statusQuery"`
|
||||
Method string `json:"method,omitempty"`
|
||||
MethodQuery string `json:"methodQuery,omitempty"`
|
||||
Timestamp int64 `json:"timestamp,omitempty"`
|
||||
Source *TCP `json:"src"`
|
||||
Destination *TCP `json:"dst"`
|
||||
IsOutgoing bool `json:"isOutgoing,omitempty"`
|
||||
Latency int64 `json:"latency"`
|
||||
Rules ApplicableRules `json:"rules,omitempty"`
|
||||
}
|
||||
|
||||
type ApplicableRules struct {
|
||||
Latency int64 `json:"latency,omitempty"`
|
||||
Status bool `json:"status,omitempty"`
|
||||
NumberOfRules int `json:"numberOfRules,omitempty"`
|
||||
Id string `json:"id"`
|
||||
Protocol Protocol `json:"proto,omitempty"`
|
||||
Capture Capture `json:"capture"`
|
||||
Summary string `json:"summary,omitempty"`
|
||||
SummaryQuery string `json:"summaryQuery,omitempty"`
|
||||
Status int `json:"status"`
|
||||
StatusQuery string `json:"statusQuery"`
|
||||
Method string `json:"method,omitempty"`
|
||||
MethodQuery string `json:"methodQuery,omitempty"`
|
||||
Timestamp int64 `json:"timestamp,omitempty"`
|
||||
Source *TCP `json:"src"`
|
||||
Destination *TCP `json:"dst"`
|
||||
IsOutgoing bool `json:"isOutgoing,omitempty"`
|
||||
Latency int64 `json:"latency"`
|
||||
}
|
||||
|
||||
const (
|
||||
|
||||
@@ -13,4 +13,4 @@ test-pull-bin:
|
||||
|
||||
test-pull-expect:
|
||||
@mkdir -p expect
|
||||
@[ "${skipexpect}" ] && echo "Skipping downloading expected JSONs" || gsutil -o 'GSUtil:parallel_process_count=5' -o 'GSUtil:parallel_thread_count=5' -m cp -r gs://static.up9.io/mizu/test-pcap/expect11/amqp/\* expect
|
||||
@[ "${skipexpect}" ] && echo "Skipping downloading expected JSONs" || gsutil -o 'GSUtil:parallel_process_count=5' -o 'GSUtil:parallel_thread_count=5' -m cp -r gs://static.up9.io/mizu/test-pcap/expect15/amqp/\* expect
|
||||
|
||||
@@ -13,11 +13,13 @@ import (
|
||||
)
|
||||
|
||||
var protocol = api.Protocol{
|
||||
Name: "amqp",
|
||||
ProtocolSummary: api.ProtocolSummary{
|
||||
Name: "amqp",
|
||||
Version: "0-9-1",
|
||||
Abbreviation: "AMQP",
|
||||
},
|
||||
LongName: "Advanced Message Queuing Protocol 0-9-1",
|
||||
Abbreviation: "AMQP",
|
||||
Macro: "amqp",
|
||||
Version: "0-9-1",
|
||||
BackgroundColor: "#ff6600",
|
||||
ForegroundColor: "#ffffff",
|
||||
FontSize: 12,
|
||||
@@ -26,12 +28,20 @@ var protocol = api.Protocol{
|
||||
Priority: 1,
|
||||
}
|
||||
|
||||
var protocolsMap = map[string]*api.Protocol{
|
||||
protocol.ToString(): &protocol,
|
||||
}
|
||||
|
||||
type dissecting string
|
||||
|
||||
func (d dissecting) Register(extension *api.Extension) {
|
||||
extension.Protocol = &protocol
|
||||
}
|
||||
|
||||
func (d dissecting) GetProtocols() map[string]*api.Protocol {
|
||||
return protocolsMap
|
||||
}
|
||||
|
||||
func (d dissecting) Ping() {
|
||||
log.Printf("pong %s", protocol.Name)
|
||||
}
|
||||
@@ -214,7 +224,7 @@ func (d dissecting) Analyze(item *api.OutputChannelItem, resolvedSource string,
|
||||
|
||||
reqDetails["method"] = request["method"]
|
||||
return &api.Entry{
|
||||
Protocol: protocol,
|
||||
Protocol: protocol.ProtocolSummary,
|
||||
Capture: item.Capture,
|
||||
Source: &api.TCP{
|
||||
Name: resolvedSource,
|
||||
@@ -277,7 +287,7 @@ func (d dissecting) Summarize(entry *api.Entry) *api.BaseEntry {
|
||||
|
||||
return &api.BaseEntry{
|
||||
Id: entry.Id,
|
||||
Protocol: entry.Protocol,
|
||||
Protocol: *protocolsMap[entry.Protocol.ToString()],
|
||||
Capture: entry.Capture,
|
||||
Summary: summary,
|
||||
SummaryQuery: summaryQuery,
|
||||
@@ -290,7 +300,6 @@ func (d dissecting) Summarize(entry *api.Entry) *api.BaseEntry {
|
||||
Destination: entry.Destination,
|
||||
IsOutgoing: entry.Outgoing,
|
||||
Latency: entry.ElapsedTime,
|
||||
Rules: entry.Rules,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -322,7 +331,7 @@ func (d dissecting) Represent(request map[string]interface{}, response map[strin
|
||||
|
||||
func (d dissecting) Macros() map[string]string {
|
||||
return map[string]string{
|
||||
`amqp`: fmt.Sprintf(`proto.name == "%s"`, protocol.Name),
|
||||
`amqp`: fmt.Sprintf(`protocol.name == "%s"`, protocol.Name),
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -44,7 +44,7 @@ func TestRegister(t *testing.T) {
|
||||
|
||||
func TestMacros(t *testing.T) {
|
||||
expectedMacros := map[string]string{
|
||||
"amqp": `proto.name == "amqp"`,
|
||||
"amqp": `protocol.name == "amqp"`,
|
||||
}
|
||||
dissector := NewDissector()
|
||||
macros := dissector.Macros()
|
||||
|
||||
@@ -13,4 +13,4 @@ test-pull-bin:
|
||||
|
||||
test-pull-expect:
|
||||
@mkdir -p expect
|
||||
@[ "${skipexpect}" ] && echo "Skipping downloading expected JSONs" || gsutil -o 'GSUtil:parallel_process_count=5' -o 'GSUtil:parallel_thread_count=5' -m cp -r gs://static.up9.io/mizu/test-pcap/expect12/http/\* expect
|
||||
@[ "${skipexpect}" ] && echo "Skipping downloading expected JSONs" || gsutil -o 'GSUtil:parallel_process_count=5' -o 'GSUtil:parallel_thread_count=5' -m cp -r gs://static.up9.io/mizu/test-pcap/expect15/http/\* expect
|
||||
|
||||
@@ -15,11 +15,13 @@ import (
|
||||
)
|
||||
|
||||
var http10protocol = api.Protocol{
|
||||
Name: "http",
|
||||
ProtocolSummary: api.ProtocolSummary{
|
||||
Name: "http",
|
||||
Version: "1.0",
|
||||
Abbreviation: "HTTP",
|
||||
},
|
||||
LongName: "Hypertext Transfer Protocol -- HTTP/1.0",
|
||||
Abbreviation: "HTTP",
|
||||
Macro: "http",
|
||||
Version: "1.0",
|
||||
BackgroundColor: "#205cf5",
|
||||
ForegroundColor: "#ffffff",
|
||||
FontSize: 12,
|
||||
@@ -29,11 +31,13 @@ var http10protocol = api.Protocol{
|
||||
}
|
||||
|
||||
var http11protocol = api.Protocol{
|
||||
Name: "http",
|
||||
ProtocolSummary: api.ProtocolSummary{
|
||||
Name: "http",
|
||||
Version: "1.1",
|
||||
Abbreviation: "HTTP",
|
||||
},
|
||||
LongName: "Hypertext Transfer Protocol -- HTTP/1.1",
|
||||
Abbreviation: "HTTP",
|
||||
Macro: "http",
|
||||
Version: "1.1",
|
||||
BackgroundColor: "#205cf5",
|
||||
ForegroundColor: "#ffffff",
|
||||
FontSize: 12,
|
||||
@@ -43,11 +47,13 @@ var http11protocol = api.Protocol{
|
||||
}
|
||||
|
||||
var http2Protocol = api.Protocol{
|
||||
Name: "http",
|
||||
ProtocolSummary: api.ProtocolSummary{
|
||||
Name: "http",
|
||||
Version: "2.0",
|
||||
Abbreviation: "HTTP/2",
|
||||
},
|
||||
LongName: "Hypertext Transfer Protocol Version 2 (HTTP/2)",
|
||||
Abbreviation: "HTTP/2",
|
||||
Macro: "http2",
|
||||
Version: "2.0",
|
||||
BackgroundColor: "#244c5a",
|
||||
ForegroundColor: "#ffffff",
|
||||
FontSize: 11,
|
||||
@@ -57,11 +63,13 @@ var http2Protocol = api.Protocol{
|
||||
}
|
||||
|
||||
var grpcProtocol = api.Protocol{
|
||||
Name: "http",
|
||||
ProtocolSummary: api.ProtocolSummary{
|
||||
Name: "http",
|
||||
Version: "2.0",
|
||||
Abbreviation: "gRPC",
|
||||
},
|
||||
LongName: "Hypertext Transfer Protocol Version 2 (HTTP/2) [ gRPC over HTTP/2 ]",
|
||||
Abbreviation: "gRPC",
|
||||
Macro: "grpc",
|
||||
Version: "2.0",
|
||||
BackgroundColor: "#244c5a",
|
||||
ForegroundColor: "#ffffff",
|
||||
FontSize: 11,
|
||||
@@ -71,11 +79,13 @@ var grpcProtocol = api.Protocol{
|
||||
}
|
||||
|
||||
var graphQL1Protocol = api.Protocol{
|
||||
Name: "http",
|
||||
ProtocolSummary: api.ProtocolSummary{
|
||||
Name: "http",
|
||||
Version: "1.1",
|
||||
Abbreviation: "GQL",
|
||||
},
|
||||
LongName: "Hypertext Transfer Protocol -- HTTP/1.1 [ GraphQL over HTTP/1.1 ]",
|
||||
Abbreviation: "GQL",
|
||||
Macro: "gql",
|
||||
Version: "1.1",
|
||||
BackgroundColor: "#e10098",
|
||||
ForegroundColor: "#ffffff",
|
||||
FontSize: 12,
|
||||
@@ -85,11 +95,13 @@ var graphQL1Protocol = api.Protocol{
|
||||
}
|
||||
|
||||
var graphQL2Protocol = api.Protocol{
|
||||
Name: "http",
|
||||
ProtocolSummary: api.ProtocolSummary{
|
||||
Name: "http",
|
||||
Version: "2.0",
|
||||
Abbreviation: "GQL",
|
||||
},
|
||||
LongName: "Hypertext Transfer Protocol Version 2 (HTTP/2) [ GraphQL over HTTP/2 ]",
|
||||
Abbreviation: "GQL",
|
||||
Macro: "gql",
|
||||
Version: "2.0",
|
||||
BackgroundColor: "#e10098",
|
||||
ForegroundColor: "#ffffff",
|
||||
FontSize: 12,
|
||||
@@ -98,6 +110,15 @@ var graphQL2Protocol = api.Protocol{
|
||||
Priority: 0,
|
||||
}
|
||||
|
||||
var protocolsMap = map[string]*api.Protocol{
|
||||
http10protocol.ToString(): &http10protocol,
|
||||
http11protocol.ToString(): &http11protocol,
|
||||
http2Protocol.ToString(): &http2Protocol,
|
||||
grpcProtocol.ToString(): &grpcProtocol,
|
||||
graphQL1Protocol.ToString(): &graphQL1Protocol,
|
||||
graphQL2Protocol.ToString(): &graphQL2Protocol,
|
||||
}
|
||||
|
||||
const (
|
||||
TypeHttpRequest = iota
|
||||
TypeHttpResponse
|
||||
@@ -109,6 +130,10 @@ func (d dissecting) Register(extension *api.Extension) {
|
||||
extension.Protocol = &http11protocol
|
||||
}
|
||||
|
||||
func (d dissecting) GetProtocols() map[string]*api.Protocol {
|
||||
return protocolsMap
|
||||
}
|
||||
|
||||
func (d dissecting) Ping() {
|
||||
log.Printf("pong %s", http11protocol.Name)
|
||||
}
|
||||
@@ -281,7 +306,7 @@ func (d dissecting) Analyze(item *api.OutputChannelItem, resolvedSource string,
|
||||
}
|
||||
|
||||
return &api.Entry{
|
||||
Protocol: item.Protocol,
|
||||
Protocol: item.Protocol.ProtocolSummary,
|
||||
Capture: item.Capture,
|
||||
Source: &api.TCP{
|
||||
Name: resolvedSource,
|
||||
@@ -315,7 +340,7 @@ func (d dissecting) Summarize(entry *api.Entry) *api.BaseEntry {
|
||||
|
||||
return &api.BaseEntry{
|
||||
Id: entry.Id,
|
||||
Protocol: entry.Protocol,
|
||||
Protocol: *protocolsMap[entry.Protocol.ToString()],
|
||||
Capture: entry.Capture,
|
||||
Summary: summary,
|
||||
SummaryQuery: summaryQuery,
|
||||
@@ -328,7 +353,6 @@ func (d dissecting) Summarize(entry *api.Entry) *api.BaseEntry {
|
||||
Destination: entry.Destination,
|
||||
IsOutgoing: entry.Outgoing,
|
||||
Latency: entry.ElapsedTime,
|
||||
Rules: entry.Rules,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -503,10 +527,10 @@ func (d dissecting) Represent(request map[string]interface{}, response map[strin
|
||||
|
||||
func (d dissecting) Macros() map[string]string {
|
||||
return map[string]string{
|
||||
`http`: fmt.Sprintf(`proto.name == "%s" and proto.version.startsWith("%c")`, http11protocol.Name, http11protocol.Version[0]),
|
||||
`http2`: fmt.Sprintf(`proto.name == "%s" and proto.version == "%s"`, http11protocol.Name, http2Protocol.Version),
|
||||
`grpc`: fmt.Sprintf(`proto.name == "%s" and proto.version == "%s" and proto.macro == "%s"`, http11protocol.Name, grpcProtocol.Version, grpcProtocol.Macro),
|
||||
`gql`: fmt.Sprintf(`proto.name == "%s" and proto.macro == "%s"`, graphQL1Protocol.Name, graphQL1Protocol.Macro),
|
||||
`http`: fmt.Sprintf(`protocol.abbr == "%s"`, http11protocol.Abbreviation),
|
||||
`http2`: fmt.Sprintf(`protocol.abbr == "%s"`, http2Protocol.Abbreviation),
|
||||
`grpc`: fmt.Sprintf(`protocol.abbr == "%s"`, grpcProtocol.Abbreviation),
|
||||
`gql`: fmt.Sprintf(`protocol.abbr == "%s"`, graphQL1Protocol.Abbreviation),
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -44,10 +44,10 @@ func TestRegister(t *testing.T) {
|
||||
|
||||
func TestMacros(t *testing.T) {
|
||||
expectedMacros := map[string]string{
|
||||
"http": `proto.name == "http" and proto.version.startsWith("1")`,
|
||||
"http2": `proto.name == "http" and proto.version == "2.0"`,
|
||||
"grpc": `proto.name == "http" and proto.version == "2.0" and proto.macro == "grpc"`,
|
||||
"gql": `proto.name == "http" and proto.macro == "gql"`,
|
||||
"http": `protocol.abbr == "HTTP"`,
|
||||
"http2": `protocol.abbr == "HTTP/2"`,
|
||||
"grpc": `protocol.abbr == "gRPC"`,
|
||||
"gql": `protocol.abbr == "GQL"`,
|
||||
}
|
||||
dissector := NewDissector()
|
||||
macros := dissector.Macros()
|
||||
|
||||
@@ -13,4 +13,4 @@ test-pull-bin:
|
||||
|
||||
test-pull-expect:
|
||||
@mkdir -p expect
|
||||
@[ "${skipexpect}" ] && echo "Skipping downloading expected JSONs" || gsutil -o 'GSUtil:parallel_process_count=5' -o 'GSUtil:parallel_thread_count=5' -m cp -r gs://static.up9.io/mizu/test-pcap/expect11/kafka/\* expect
|
||||
@[ "${skipexpect}" ] && echo "Skipping downloading expected JSONs" || gsutil -o 'GSUtil:parallel_process_count=5' -o 'GSUtil:parallel_thread_count=5' -m cp -r gs://static.up9.io/mizu/test-pcap/expect15/kafka/\* expect
|
||||
|
||||
@@ -11,11 +11,13 @@ import (
|
||||
)
|
||||
|
||||
var _protocol = api.Protocol{
|
||||
Name: "kafka",
|
||||
ProtocolSummary: api.ProtocolSummary{
|
||||
Name: "kafka",
|
||||
Version: "12",
|
||||
Abbreviation: "KAFKA",
|
||||
},
|
||||
LongName: "Apache Kafka Protocol",
|
||||
Abbreviation: "KAFKA",
|
||||
Macro: "kafka",
|
||||
Version: "12",
|
||||
BackgroundColor: "#000000",
|
||||
ForegroundColor: "#ffffff",
|
||||
FontSize: 11,
|
||||
@@ -24,12 +26,20 @@ var _protocol = api.Protocol{
|
||||
Priority: 2,
|
||||
}
|
||||
|
||||
var protocolsMap = map[string]*api.Protocol{
|
||||
_protocol.ToString(): &_protocol,
|
||||
}
|
||||
|
||||
type dissecting string
|
||||
|
||||
func (d dissecting) Register(extension *api.Extension) {
|
||||
extension.Protocol = &_protocol
|
||||
}
|
||||
|
||||
func (d dissecting) GetProtocols() map[string]*api.Protocol {
|
||||
return protocolsMap
|
||||
}
|
||||
|
||||
func (d dissecting) Ping() {
|
||||
log.Printf("pong %s", _protocol.Name)
|
||||
}
|
||||
@@ -62,7 +72,7 @@ func (d dissecting) Analyze(item *api.OutputChannelItem, resolvedSource string,
|
||||
elapsedTime = 0
|
||||
}
|
||||
return &api.Entry{
|
||||
Protocol: _protocol,
|
||||
Protocol: _protocol.ProtocolSummary,
|
||||
Capture: item.Capture,
|
||||
Source: &api.TCP{
|
||||
Name: resolvedSource,
|
||||
@@ -187,7 +197,7 @@ func (d dissecting) Summarize(entry *api.Entry) *api.BaseEntry {
|
||||
|
||||
return &api.BaseEntry{
|
||||
Id: entry.Id,
|
||||
Protocol: entry.Protocol,
|
||||
Protocol: *protocolsMap[entry.Protocol.ToString()],
|
||||
Capture: entry.Capture,
|
||||
Summary: summary,
|
||||
SummaryQuery: summaryQuery,
|
||||
@@ -200,7 +210,6 @@ func (d dissecting) Summarize(entry *api.Entry) *api.BaseEntry {
|
||||
Destination: entry.Destination,
|
||||
IsOutgoing: entry.Outgoing,
|
||||
Latency: entry.ElapsedTime,
|
||||
Rules: entry.Rules,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -243,7 +252,7 @@ func (d dissecting) Represent(request map[string]interface{}, response map[strin
|
||||
|
||||
func (d dissecting) Macros() map[string]string {
|
||||
return map[string]string{
|
||||
`kafka`: fmt.Sprintf(`proto.name == "%s"`, _protocol.Name),
|
||||
`kafka`: fmt.Sprintf(`protocol.name == "%s"`, _protocol.Name),
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -44,7 +44,7 @@ func TestRegister(t *testing.T) {
|
||||
|
||||
func TestMacros(t *testing.T) {
|
||||
expectedMacros := map[string]string{
|
||||
"kafka": `proto.name == "kafka"`,
|
||||
"kafka": `protocol.name == "kafka"`,
|
||||
}
|
||||
dissector := NewDissector()
|
||||
macros := dissector.Macros()
|
||||
|
||||
@@ -13,4 +13,4 @@ test-pull-bin:
|
||||
|
||||
test-pull-expect:
|
||||
@mkdir -p expect
|
||||
@[ "${skipexpect}" ] && echo "Skipping downloading expected JSONs" || gsutil -o 'GSUtil:parallel_process_count=5' -o 'GSUtil:parallel_thread_count=5' -m cp -r gs://static.up9.io/mizu/test-pcap/expect11/redis/\* expect
|
||||
@[ "${skipexpect}" ] && echo "Skipping downloading expected JSONs" || gsutil -o 'GSUtil:parallel_process_count=5' -o 'GSUtil:parallel_thread_count=5' -m cp -r gs://static.up9.io/mizu/test-pcap/expect15/redis/\* expect
|
||||
|
||||
@@ -11,11 +11,13 @@ import (
|
||||
)
|
||||
|
||||
var protocol = api.Protocol{
|
||||
Name: "redis",
|
||||
ProtocolSummary: api.ProtocolSummary{
|
||||
Name: "redis",
|
||||
Version: "3.x",
|
||||
Abbreviation: "REDIS",
|
||||
},
|
||||
LongName: "Redis Serialization Protocol",
|
||||
Abbreviation: "REDIS",
|
||||
Macro: "redis",
|
||||
Version: "3.x",
|
||||
BackgroundColor: "#a41e11",
|
||||
ForegroundColor: "#ffffff",
|
||||
FontSize: 11,
|
||||
@@ -24,12 +26,20 @@ var protocol = api.Protocol{
|
||||
Priority: 3,
|
||||
}
|
||||
|
||||
var protocolsMap = map[string]*api.Protocol{
|
||||
protocol.ToString(): &protocol,
|
||||
}
|
||||
|
||||
type dissecting string
|
||||
|
||||
func (d dissecting) Register(extension *api.Extension) {
|
||||
extension.Protocol = &protocol
|
||||
}
|
||||
|
||||
func (d dissecting) GetProtocols() map[string]*api.Protocol {
|
||||
return protocolsMap
|
||||
}
|
||||
|
||||
func (d dissecting) Ping() {
|
||||
log.Printf("pong %s", protocol.Name)
|
||||
}
|
||||
@@ -70,7 +80,7 @@ func (d dissecting) Analyze(item *api.OutputChannelItem, resolvedSource string,
|
||||
elapsedTime = 0
|
||||
}
|
||||
return &api.Entry{
|
||||
Protocol: protocol,
|
||||
Protocol: protocol.ProtocolSummary,
|
||||
Capture: item.Capture,
|
||||
Source: &api.TCP{
|
||||
Name: resolvedSource,
|
||||
@@ -115,7 +125,7 @@ func (d dissecting) Summarize(entry *api.Entry) *api.BaseEntry {
|
||||
|
||||
return &api.BaseEntry{
|
||||
Id: entry.Id,
|
||||
Protocol: entry.Protocol,
|
||||
Protocol: *protocolsMap[entry.Protocol.ToString()],
|
||||
Capture: entry.Capture,
|
||||
Summary: summary,
|
||||
SummaryQuery: summaryQuery,
|
||||
@@ -128,7 +138,6 @@ func (d dissecting) Summarize(entry *api.Entry) *api.BaseEntry {
|
||||
Destination: entry.Destination,
|
||||
IsOutgoing: entry.Outgoing,
|
||||
Latency: entry.ElapsedTime,
|
||||
Rules: entry.Rules,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -144,7 +153,7 @@ func (d dissecting) Represent(request map[string]interface{}, response map[strin
|
||||
|
||||
func (d dissecting) Macros() map[string]string {
|
||||
return map[string]string{
|
||||
`redis`: fmt.Sprintf(`proto.name == "%s"`, protocol.Name),
|
||||
`redis`: fmt.Sprintf(`protocol.name == "%s"`, protocol.Name),
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -45,7 +45,7 @@ func TestRegister(t *testing.T) {
|
||||
|
||||
func TestMacros(t *testing.T) {
|
||||
expectedMacros := map[string]string{
|
||||
"redis": `proto.name == "redis"`,
|
||||
"redis": `protocol.name == "redis"`,
|
||||
}
|
||||
dissector := NewDissector()
|
||||
macros := dissector.Macros()
|
||||
|
||||
@@ -4,11 +4,12 @@ go 1.17
|
||||
|
||||
require (
|
||||
github.com/Masterminds/semver v1.5.0
|
||||
github.com/cilium/ebpf v0.8.1
|
||||
github.com/cilium/ebpf v0.9.0
|
||||
github.com/go-errors/errors v1.4.2
|
||||
github.com/google/gopacket v1.1.19
|
||||
github.com/hashicorp/golang-lru v0.5.4
|
||||
github.com/knightsc/gapstone v0.0.0-20191231144527-6fa5afaf11a9
|
||||
github.com/moby/moby v20.10.17+incompatible
|
||||
github.com/shirou/gopsutil v3.21.11+incompatible
|
||||
github.com/struCoder/pidusage v0.2.1
|
||||
github.com/up9inc/mizu/logger v0.0.0
|
||||
@@ -28,6 +29,7 @@ require (
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||
github.com/op/go-logging v0.0.0-20160315200505-970db520ece7 // indirect
|
||||
github.com/sirupsen/logrus v1.8.1 // indirect
|
||||
github.com/tklauser/go-sysconf v0.3.10 // indirect
|
||||
github.com/tklauser/numcpus v0.4.0 // indirect
|
||||
github.com/yusufpapurcu/wmi v1.2.2 // indirect
|
||||
@@ -36,6 +38,7 @@ require (
|
||||
golang.org/x/text v0.3.7 // indirect
|
||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
gotest.tools/v3 v3.3.0 // indirect
|
||||
k8s.io/apimachinery v0.23.3 // indirect
|
||||
k8s.io/klog/v2 v2.40.1 // indirect
|
||||
k8s.io/utils v0.0.0-20220127004650-9b3446523e65 // indirect
|
||||
|
||||
15
tap/go.sum
15
tap/go.sum
@@ -7,8 +7,8 @@ github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbt
|
||||
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
|
||||
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
|
||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||
github.com/cilium/ebpf v0.8.1 h1:bLSSEbBLqGPXxls55pGr5qWZaTqcmfDJHhou7t254ao=
|
||||
github.com/cilium/ebpf v0.8.1/go.mod h1:f5zLIM0FSNuAkSyLAN7X+Hy6yznlF1mNiWUMfxMtrgk=
|
||||
github.com/cilium/ebpf v0.9.0 h1:ldiV+FscPCQ/p3mNEV4O02EPbUZJFsoEtHvIr9xLTvk=
|
||||
github.com/cilium/ebpf v0.9.0/go.mod h1:+OhNOIXx/Fnu1IE8bJz2dzOA+VSfyTfdNUVdlQnxUFY=
|
||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
@@ -94,6 +94,8 @@ github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
||||
github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
||||
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
|
||||
github.com/moby/moby v20.10.17+incompatible h1:TJJfyk2fLEgK+RzqVpFNkDkm0oEi+MLUfwt9lEYnp5g=
|
||||
github.com/moby/moby v20.10.17+incompatible/go.mod h1:fDXVQ6+S340veQPv35CzDahGBmHsiclFwfEygB/TWMc=
|
||||
github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c=
|
||||
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
|
||||
@@ -123,11 +125,15 @@ github.com/rogpeppe/go-internal v1.6.1 h1:/FiVV8dS/e+YqF2JvO3yXRFbBLTIuSDkuC7aBO
|
||||
github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
|
||||
github.com/shirou/gopsutil v3.21.11+incompatible h1:+1+c1VGhc88SSonWP6foOcLhvnKlUeu/erjjvaPEYiI=
|
||||
github.com/shirou/gopsutil v3.21.11+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA=
|
||||
github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE=
|
||||
github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
|
||||
github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk=
|
||||
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
||||
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
|
||||
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
|
||||
github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY=
|
||||
@@ -186,12 +192,14 @@ golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7w
|
||||
golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200217220822-9197077df867/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
@@ -221,6 +229,7 @@ golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapK
|
||||
golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
|
||||
golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
@@ -269,6 +278,8 @@ gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C
|
||||
gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo=
|
||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gotest.tools/v3 v3.3.0 h1:MfDY1b1/0xN1CyMlQDac0ziEy9zJQd9CXBRRDHw2jJo=
|
||||
gotest.tools/v3 v3.3.0/go.mod h1:Mcr9QNxkg0uMvy/YElmo4SpXgJKWgQvYrT7Kw5RzJ1A=
|
||||
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
k8s.io/api v0.23.3 h1:KNrME8KHGr12Ozjf8ytOewKzZh6hl/hHUZeHddT3a38=
|
||||
|
||||
@@ -85,7 +85,7 @@ func NewTcpAssembler(outputItems chan *api.OutputChannelItem, streamsMap api.Tcp
|
||||
|
||||
maxBufferedPagesTotal := GetMaxBufferedPagesPerConnection()
|
||||
maxBufferedPagesPerConnection := GetMaxBufferedPagesTotal()
|
||||
logger.Log.Infof("Assembler options: maxBufferedPagesTotal=%d, maxBufferedPagesPerConnection=%d, opts=%v",
|
||||
logger.Log.Infof("Assembler options: maxBufferedPagesTotal=%d, maxBufferedPagesPerConnection=%d, opts=%+v",
|
||||
maxBufferedPagesTotal, maxBufferedPagesPerConnection, opts)
|
||||
a.Assembler.AssemblerOptions.MaxBufferedPagesTotal = maxBufferedPagesTotal
|
||||
a.Assembler.AssemblerOptions.MaxBufferedPagesPerConnection = maxBufferedPagesPerConnection
|
||||
|
||||
@@ -21,7 +21,7 @@ docker run --rm \
|
||||
-it mizu-ebpf-builder \
|
||||
sh -c "
|
||||
BPF_TARGET=\"$BPF_TARGET\" BPF_CFLAGS=\"$BPF_CFLAGS\" go generate tap/tlstapper/tls_tapper.go
|
||||
chown $(id -u):$(id -g) tap/tlstapper/tlstapper_bpf*
|
||||
chown $(id -u):$(id -g) tap/tlstapper/tlstapper*_bpf*
|
||||
" || exit 1
|
||||
|
||||
popd
|
||||
|
||||
@@ -12,7 +12,7 @@ Copyright (C) UP9 Inc.
|
||||
#include "include/common.h"
|
||||
|
||||
|
||||
static __always_inline int add_address_to_chunk(struct pt_regs *ctx, struct tls_chunk* chunk, __u64 id, __u32 fd) {
|
||||
static __always_inline int add_address_to_chunk(struct pt_regs *ctx, struct tls_chunk* chunk, __u64 id, __u32 fd, struct ssl_info* info) {
|
||||
__u32 pid = id >> 32;
|
||||
__u64 key = (__u64) pid << 32 | fd;
|
||||
|
||||
@@ -22,14 +22,29 @@ static __always_inline int add_address_to_chunk(struct pt_regs *ctx, struct tls_
|
||||
return 0;
|
||||
}
|
||||
|
||||
int err = bpf_probe_read(chunk->address, sizeof(chunk->address), fdinfo->ipv4_addr);
|
||||
chunk->flags |= (fdinfo->flags & FLAGS_IS_CLIENT_BIT);
|
||||
int err;
|
||||
|
||||
if (err != 0) {
|
||||
log_error(ctx, LOG_ERROR_READING_FD_ADDRESS, id, err, 0l);
|
||||
return 0;
|
||||
switch (info->address_info.mode) {
|
||||
case ADDRESS_INFO_MODE_UNDEFINED:
|
||||
chunk->address_info.mode = ADDRESS_INFO_MODE_SINGLE;
|
||||
err = bpf_probe_read(&chunk->address_info.sport, sizeof(chunk->address_info.sport), &fdinfo->ipv4_addr[2]);
|
||||
if (err != 0) {
|
||||
log_error(ctx, LOG_ERROR_READING_FD_ADDRESS, id, err, 0l);
|
||||
return 0;
|
||||
}
|
||||
|
||||
err = bpf_probe_read(&chunk->address_info.saddr, sizeof(chunk->address_info.saddr), &fdinfo->ipv4_addr[4]);
|
||||
if (err != 0) {
|
||||
log_error(ctx, LOG_ERROR_READING_FD_ADDRESS, id, err, 0l);
|
||||
return 0;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
bpf_probe_read(&chunk->address_info, sizeof(chunk->address_info), &info->address_info);
|
||||
}
|
||||
|
||||
chunk->flags |= (fdinfo->flags & FLAGS_IS_CLIENT_BIT);
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
@@ -104,7 +119,7 @@ static __always_inline void output_ssl_chunk(struct pt_regs *ctx, struct ssl_inf
|
||||
chunk->len = count_bytes;
|
||||
chunk->fd = info->fd;
|
||||
|
||||
if (!add_address_to_chunk(ctx, chunk, id, chunk->fd)) {
|
||||
if (!add_address_to_chunk(ctx, chunk, id, chunk->fd, info)) {
|
||||
// Without an address, we drop the chunk because there is not much to do with it in Go
|
||||
//
|
||||
return;
|
||||
|
||||
@@ -35,11 +35,12 @@ using `bpf_probe_read` calls in `go_crypto_tls_get_fd_from_tcp_conn` function.
|
||||
|
||||
SOURCES:
|
||||
|
||||
Tracing Go Functions with eBPF (before 1.17): https://www.grant.pizza/blog/tracing-go-functions-with-ebpf-part-2/
|
||||
Tracing Go Functions with eBPF (<=1.16): https://www.grant.pizza/blog/tracing-go-functions-with-ebpf-part-2/
|
||||
Challenges of BPF Tracing Go: https://blog.0x74696d.com/posts/challenges-of-bpf-tracing-go/
|
||||
x86 calling conventions: https://en.wikipedia.org/wiki/X86_calling_conventions
|
||||
Plan 9 from Bell Labs: https://en.wikipedia.org/wiki/Plan_9_from_Bell_Labs
|
||||
The issue for calling convention change in Go: https://github.com/golang/go/issues/40724
|
||||
Go ABI0 (<=1.16) specification: https://go.dev/doc/asm
|
||||
Proposal of Register-based Go calling convention: https://go.googlesource.com/proposal/+/master/design/40724-register-calling.md
|
||||
Go internal ABI (1.17) specification: https://go.googlesource.com/go/+/refs/heads/dev.regabi/src/cmd/compile/internal-abi.md
|
||||
Go internal ABI (current) specification: https://go.googlesource.com/go/+/refs/heads/master/src/cmd/compile/abi-internal.md
|
||||
@@ -55,10 +56,60 @@ Capstone Engine: https://www.capstone-engine.org/
|
||||
#include "include/logger_messages.h"
|
||||
#include "include/pids.h"
|
||||
#include "include/common.h"
|
||||
#include "include/go_abi_0.h"
|
||||
#include "include/go_abi_internal.h"
|
||||
#include "include/go_types.h"
|
||||
|
||||
static __always_inline __u32 go_crypto_tls_get_fd_from_tcp_conn(struct pt_regs *ctx) {
|
||||
|
||||
// TODO: cilium/ebpf does not support .kconfig Therefore; for now, we build object files per kernel version.
|
||||
// Error: reference to .kconfig: not supported
|
||||
// See: https://github.com/cilium/ebpf/issues/698
|
||||
// extern int LINUX_KERNEL_VERSION __kconfig;
|
||||
|
||||
enum ABI {
|
||||
ABI0=0,
|
||||
ABIInternal=1,
|
||||
};
|
||||
|
||||
#if defined(bpf_target_x86)
|
||||
// get_goid_from_thread_local_storage function is x86 specific
|
||||
static __always_inline __u32 get_goid_from_thread_local_storage(__u64 *goroutine_id) {
|
||||
int zero = 0;
|
||||
int one = 1;
|
||||
struct goid_offsets* offsets = bpf_map_lookup_elem(&goid_offsets_map, &zero);
|
||||
if (offsets == NULL) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
// Get the task that currently assigned to this thread.
|
||||
struct task_struct *task = (struct task_struct*) bpf_get_current_task();
|
||||
if (task == NULL) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
// Read task->thread
|
||||
struct thread_struct *thr;
|
||||
bpf_probe_read(&thr, sizeof(thr), &task->thread);
|
||||
|
||||
// Read task->thread.fsbase
|
||||
u64 fsbase;
|
||||
#ifdef KERNEL_BEFORE_4_6
|
||||
// TODO: if (LINUX_KERNEL_VERSION <= KERNEL_VERSION(4, 6, 0)) {
|
||||
fsbase = BPF_CORE_READ((struct thread_struct___v46 *)thr, fs);
|
||||
#else
|
||||
fsbase = BPF_CORE_READ(thr, fsbase);
|
||||
#endif
|
||||
|
||||
// Get the Goroutine ID (goid) which is stored in thread-local storage.
|
||||
size_t g_addr;
|
||||
bpf_probe_read_user(&g_addr, sizeof(void *), (void*)(fsbase + offsets->g_addr_offset));
|
||||
bpf_probe_read_user(goroutine_id, sizeof(void *), (void*)(g_addr + offsets->goid_offset));
|
||||
|
||||
return 1;
|
||||
}
|
||||
#endif
|
||||
|
||||
static __always_inline __u32 go_crypto_tls_get_fd_from_tcp_conn(struct pt_regs *ctx, enum ABI abi) {
|
||||
struct go_interface conn;
|
||||
long err;
|
||||
__u64 addr;
|
||||
@@ -67,8 +118,15 @@ static __always_inline __u32 go_crypto_tls_get_fd_from_tcp_conn(struct pt_regs *
|
||||
if (err != 0) {
|
||||
return invalid_fd;
|
||||
}
|
||||
#else
|
||||
addr = GO_ABI_INTERNAL_PT_REGS_R1(ctx);
|
||||
#elif defined(bpf_target_x86)
|
||||
if (abi == ABI0) {
|
||||
err = bpf_probe_read(&addr, sizeof(addr), (void*)GO_ABI_INTERNAL_PT_REGS_SP(ctx)+0x8);
|
||||
if (err != 0) {
|
||||
return invalid_fd;
|
||||
}
|
||||
} else {
|
||||
addr = GO_ABI_INTERNAL_PT_REGS_R1(ctx);
|
||||
}
|
||||
#endif
|
||||
|
||||
err = bpf_probe_read(&conn, sizeof(conn), (void*)addr);
|
||||
@@ -91,7 +149,7 @@ static __always_inline __u32 go_crypto_tls_get_fd_from_tcp_conn(struct pt_regs *
|
||||
return fd;
|
||||
}
|
||||
|
||||
static __always_inline void go_crypto_tls_uprobe(struct pt_regs *ctx, struct bpf_map_def* go_context) {
|
||||
static __always_inline void go_crypto_tls_uprobe(struct pt_regs *ctx, struct bpf_map_def* go_context, enum ABI abi) {
|
||||
__u64 pid_tgid = bpf_get_current_pid_tgid();
|
||||
__u64 pid = pid_tgid >> 32;
|
||||
if (!should_tap(pid)) {
|
||||
@@ -107,14 +165,52 @@ static __always_inline void go_crypto_tls_uprobe(struct pt_regs *ctx, struct bpf
|
||||
log_error(ctx, LOG_ERROR_READING_BYTES_COUNT, pid_tgid, err, ORIGIN_SSL_UPROBE_CODE);
|
||||
return;
|
||||
}
|
||||
#else
|
||||
info.buffer_len = GO_ABI_INTERNAL_PT_REGS_R2(ctx);
|
||||
#elif defined(bpf_target_x86)
|
||||
if (abi == ABI0) {
|
||||
err = bpf_probe_read(&info.buffer_len, sizeof(__u32), (void*)GO_ABI_0_PT_REGS_SP(ctx)+0x18);
|
||||
if (err != 0) {
|
||||
log_error(ctx, LOG_ERROR_READING_BYTES_COUNT, pid_tgid, err, ORIGIN_SSL_UPROBE_CODE);
|
||||
return;
|
||||
}
|
||||
} else {
|
||||
info.buffer_len = GO_ABI_INTERNAL_PT_REGS_R2(ctx);
|
||||
}
|
||||
#endif
|
||||
info.buffer = (void*)GO_ABI_INTERNAL_PT_REGS_R4(ctx);
|
||||
info.fd = go_crypto_tls_get_fd_from_tcp_conn(ctx);
|
||||
|
||||
// GO_ABI_INTERNAL_PT_REGS_GP is Goroutine address
|
||||
__u64 pid_fp = pid << 32 | GO_ABI_INTERNAL_PT_REGS_GP(ctx);
|
||||
#if defined(bpf_target_x86)
|
||||
if (abi == ABI0) {
|
||||
err = bpf_probe_read(&info.buffer, sizeof(__u32), (void*)GO_ABI_0_PT_REGS_SP(ctx)+0x11);
|
||||
if (err != 0) {
|
||||
log_error(ctx, LOG_ERROR_READING_FROM_SSL_BUFFER, pid_tgid, err, ORIGIN_SSL_UPROBE_CODE);
|
||||
return;
|
||||
}
|
||||
// We basically add 00 suffix to the hex address.
|
||||
info.buffer = (void*)((long)info.buffer << 8);
|
||||
} else {
|
||||
#endif
|
||||
info.buffer = (void*)GO_ABI_INTERNAL_PT_REGS_R4(ctx);
|
||||
#if defined(bpf_target_x86)
|
||||
}
|
||||
#endif
|
||||
info.fd = go_crypto_tls_get_fd_from_tcp_conn(ctx, abi);
|
||||
|
||||
__u64 goroutine_id;
|
||||
if (abi == ABI0) {
|
||||
#if defined(bpf_target_arm64)
|
||||
// In case of ABI0 and arm64, it's stored in the Goroutine register
|
||||
goroutine_id = GO_ABI_0_PT_REGS_GP(ctx);
|
||||
#elif defined(bpf_target_x86)
|
||||
// In case of ABI0 and amd64, it's stored in the thread-local storage
|
||||
int status = get_goid_from_thread_local_storage(&goroutine_id);
|
||||
if (!status) {
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
} else {
|
||||
// GO_ABI_INTERNAL_PT_REGS_GP is the Goroutine address in ABIInternal
|
||||
goroutine_id = GO_ABI_INTERNAL_PT_REGS_GP(ctx);
|
||||
}
|
||||
__u64 pid_fp = pid << 32 | goroutine_id;
|
||||
err = bpf_map_update_elem(go_context, &pid_fp, &info, BPF_ANY);
|
||||
|
||||
if (err != 0) {
|
||||
@@ -124,15 +220,30 @@ static __always_inline void go_crypto_tls_uprobe(struct pt_regs *ctx, struct bpf
|
||||
return;
|
||||
}
|
||||
|
||||
static __always_inline void go_crypto_tls_ex_uprobe(struct pt_regs *ctx, struct bpf_map_def* go_context, __u32 flags) {
|
||||
static __always_inline void go_crypto_tls_ex_uprobe(struct pt_regs *ctx, struct bpf_map_def* go_context, __u32 flags, enum ABI abi) {
|
||||
__u64 pid_tgid = bpf_get_current_pid_tgid();
|
||||
__u64 pid = pid_tgid >> 32;
|
||||
if (!should_tap(pid)) {
|
||||
return;
|
||||
}
|
||||
|
||||
// GO_ABI_INTERNAL_PT_REGS_GP is Goroutine address
|
||||
__u64 pid_fp = pid << 32 | GO_ABI_INTERNAL_PT_REGS_GP(ctx);
|
||||
__u64 goroutine_id;
|
||||
if (abi == ABI0) {
|
||||
#if defined(bpf_target_arm64)
|
||||
// In case of ABI0 and arm64, it's stored in the Goroutine register
|
||||
goroutine_id = GO_ABI_0_PT_REGS_GP(ctx);
|
||||
#elif defined(bpf_target_x86)
|
||||
// In case of ABI0 and amd64, it's stored in the thread-local storage
|
||||
int status = get_goid_from_thread_local_storage(&goroutine_id);
|
||||
if (!status) {
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
} else {
|
||||
// GO_ABI_INTERNAL_PT_REGS_GP is the Goroutine address in ABIInternal
|
||||
goroutine_id = GO_ABI_INTERNAL_PT_REGS_GP(ctx);
|
||||
}
|
||||
__u64 pid_fp = pid << 32 | goroutine_id;
|
||||
struct ssl_info *info_ptr = bpf_map_lookup_elem(go_context, &pid_fp);
|
||||
|
||||
if (info_ptr == NULL) {
|
||||
@@ -156,8 +267,17 @@ static __always_inline void go_crypto_tls_ex_uprobe(struct pt_regs *ctx, struct
|
||||
return;
|
||||
}
|
||||
info.buffer_len = GO_ABI_INTERNAL_PT_REGS_R7(ctx); // n in return n, nil
|
||||
#else
|
||||
info.buffer_len = GO_ABI_INTERNAL_PT_REGS_R1(ctx); // n in return n, nil
|
||||
#elif defined(bpf_target_x86)
|
||||
if (abi == ABI0) {
|
||||
// n in return n, nil
|
||||
err = bpf_probe_read(&info.buffer_len, sizeof(__u32), (void*)GO_ABI_0_PT_REGS_SP(ctx)+0x28);
|
||||
if (err != 0) {
|
||||
log_error(ctx, LOG_ERROR_READING_BYTES_COUNT, pid_tgid, err, ORIGIN_SSL_UPROBE_CODE);
|
||||
return;
|
||||
}
|
||||
} else {
|
||||
info.buffer_len = GO_ABI_INTERNAL_PT_REGS_R1(ctx); // n in return n, nil
|
||||
}
|
||||
#endif
|
||||
// This check achieves ignoring 0 length reads (the reads result with an error)
|
||||
if (info.buffer_len <= 0) {
|
||||
@@ -170,22 +290,50 @@ static __always_inline void go_crypto_tls_ex_uprobe(struct pt_regs *ctx, struct
|
||||
return;
|
||||
}
|
||||
|
||||
SEC("uprobe/go_crypto_tls_write")
|
||||
void BPF_KPROBE(go_crypto_tls_write) {
|
||||
go_crypto_tls_uprobe(ctx, &go_write_context);
|
||||
SEC("uprobe/go_crypto_tls_abi0_write")
|
||||
int BPF_KPROBE(go_crypto_tls_abi0_write) {
|
||||
go_crypto_tls_uprobe(ctx, &go_write_context, ABI0);
|
||||
return 1;
|
||||
}
|
||||
|
||||
SEC("uprobe/go_crypto_tls_write_ex")
|
||||
void BPF_KPROBE(go_crypto_tls_write_ex) {
|
||||
go_crypto_tls_ex_uprobe(ctx, &go_write_context, 0);
|
||||
SEC("uprobe/go_crypto_tls_abi0_write_ex")
|
||||
int BPF_KPROBE(go_crypto_tls_abi0_write_ex) {
|
||||
go_crypto_tls_ex_uprobe(ctx, &go_write_context, 0, ABI0);
|
||||
return 1;
|
||||
}
|
||||
|
||||
SEC("uprobe/go_crypto_tls_read")
|
||||
void BPF_KPROBE(go_crypto_tls_read) {
|
||||
go_crypto_tls_uprobe(ctx, &go_read_context);
|
||||
SEC("uprobe/go_crypto_tls_abi0_read")
|
||||
int BPF_KPROBE(go_crypto_tls_abi0_read) {
|
||||
go_crypto_tls_uprobe(ctx, &go_read_context, ABI0);
|
||||
return 1;
|
||||
}
|
||||
|
||||
SEC("uprobe/go_crypto_tls_read_ex")
|
||||
void BPF_KPROBE(go_crypto_tls_read_ex) {
|
||||
go_crypto_tls_ex_uprobe(ctx, &go_read_context, FLAGS_IS_READ_BIT);
|
||||
SEC("uprobe/go_crypto_tls_abi0_read_ex")
|
||||
int BPF_KPROBE(go_crypto_tls_abi0_read_ex) {
|
||||
go_crypto_tls_ex_uprobe(ctx, &go_read_context, FLAGS_IS_READ_BIT, ABI0);
|
||||
return 1;
|
||||
}
|
||||
|
||||
SEC("uprobe/go_crypto_tls_abi_internal_write")
|
||||
int BPF_KPROBE(go_crypto_tls_abi_internal_write) {
|
||||
go_crypto_tls_uprobe(ctx, &go_write_context, ABIInternal);
|
||||
return 1;
|
||||
}
|
||||
|
||||
SEC("uprobe/go_crypto_tls_abi_internal_write_ex")
|
||||
int BPF_KPROBE(go_crypto_tls_abi_internal_write_ex) {
|
||||
go_crypto_tls_ex_uprobe(ctx, &go_write_context, 0, ABIInternal);
|
||||
return 1;
|
||||
}
|
||||
|
||||
SEC("uprobe/go_crypto_tls_abi_internal_read")
|
||||
int BPF_KPROBE(go_crypto_tls_abi_internal_read) {
|
||||
go_crypto_tls_uprobe(ctx, &go_read_context, ABIInternal);
|
||||
return 1;
|
||||
}
|
||||
|
||||
SEC("uprobe/go_crypto_tls_abi_internal_read_ex")
|
||||
int BPF_KPROBE(go_crypto_tls_abi_internal_read_ex) {
|
||||
go_crypto_tls_ex_uprobe(ctx, &go_read_context, FLAGS_IS_READ_BIT, ABIInternal);
|
||||
return 1;
|
||||
}
|
||||
|
||||
@@ -7,9 +7,11 @@ Copyright (C) UP9 Inc.
|
||||
#ifndef __COMMON__
|
||||
#define __COMMON__
|
||||
|
||||
#define AF_INET 2 /* Internet IP Protocol */
|
||||
|
||||
const __s32 invalid_fd = -1;
|
||||
|
||||
static int add_address_to_chunk(struct pt_regs *ctx, struct tls_chunk* chunk, __u64 id, __u32 fd);
|
||||
static int add_address_to_chunk(struct pt_regs *ctx, struct tls_chunk* chunk, __u64 id, __u32 fd, struct ssl_info* info);
|
||||
static void send_chunk_part(struct pt_regs *ctx, __u8* buffer, __u64 id, struct tls_chunk* chunk, int start, int end);
|
||||
static void send_chunk(struct pt_regs *ctx, __u8* buffer, __u64 id, struct tls_chunk* chunk);
|
||||
static void output_ssl_chunk(struct pt_regs *ctx, struct ssl_info* info, int count_bytes, __u64 id, __u32 flags);
|
||||
|
||||
52
tap/tlstapper/bpf/include/go_abi_0.h
Normal file
52
tap/tlstapper/bpf/include/go_abi_0.h
Normal file
@@ -0,0 +1,52 @@
|
||||
/*
|
||||
Note: This file is licenced differently from the rest of the project
|
||||
SPDX-License-Identifier: GPL-2.0
|
||||
Copyright (C) UP9 Inc.
|
||||
*/
|
||||
|
||||
#ifndef __GO_ABI_0__
|
||||
#define __GO_ABI_0__
|
||||
|
||||
/*
|
||||
Go ABI0 (<=1.16) specification
|
||||
https://go.dev/doc/asm
|
||||
|
||||
Since ABI0 is a stack-based calling convention we only need the stack pointer and
|
||||
if it's applicable the Goroutine pointer
|
||||
*/
|
||||
|
||||
#include "target_arch.h"
|
||||
|
||||
#if defined(bpf_target_x86)
|
||||
|
||||
#ifdef __i386__
|
||||
|
||||
#define GO_ABI_0_PT_REGS_SP(x) ((x)->esp)
|
||||
|
||||
#else
|
||||
|
||||
#define GO_ABI_0_PT_REGS_SP(x) ((x)->sp)
|
||||
|
||||
#endif
|
||||
|
||||
#elif defined(bpf_target_arm)
|
||||
|
||||
#define GO_ABI_0_PT_REGS_SP(x) ((x)->uregs[13])
|
||||
#define GO_ABI_0_PT_REGS_GP(x) ((x)->uregs[10])
|
||||
|
||||
#elif defined(bpf_target_arm64)
|
||||
|
||||
/* arm64 provides struct user_pt_regs instead of struct pt_regs to userspace */
|
||||
struct pt_regs;
|
||||
#define PT_REGS_ARM64 const volatile struct user_pt_regs
|
||||
#define GO_ABI_0_PT_REGS_SP(x) (((PT_REGS_ARM64 *)(x))->sp)
|
||||
#define GO_ABI_0_PT_REGS_GP(x) (((PT_REGS_ARM64 *)(x))->regs[18])
|
||||
|
||||
#elif defined(bpf_target_powerpc)
|
||||
|
||||
#define GO_ABI_0_PT_REGS_SP(x) ((x)->sp)
|
||||
#define GO_ABI_0_PT_REGS_GP(x) ((x)->gpr[30])
|
||||
|
||||
#endif
|
||||
|
||||
#endif /* __GO_ABI_0__ */
|
||||
@@ -8,54 +8,11 @@ Copyright (C) UP9 Inc.
|
||||
#define __GO_ABI_INTERNAL__
|
||||
|
||||
/*
|
||||
Go internal ABI specification
|
||||
Go internal ABI (1.17/current) specification
|
||||
https://go.googlesource.com/go/+/refs/heads/master/src/cmd/compile/abi-internal.md
|
||||
*/
|
||||
|
||||
/* Scan the ARCH passed in from ARCH env variable */
|
||||
#if defined(__TARGET_ARCH_x86)
|
||||
#define bpf_target_x86
|
||||
#define bpf_target_defined
|
||||
#elif defined(__TARGET_ARCH_s390)
|
||||
#define bpf_target_s390
|
||||
#define bpf_target_defined
|
||||
#elif defined(__TARGET_ARCH_arm)
|
||||
#define bpf_target_arm
|
||||
#define bpf_target_defined
|
||||
#elif defined(__TARGET_ARCH_arm64)
|
||||
#define bpf_target_arm64
|
||||
#define bpf_target_defined
|
||||
#elif defined(__TARGET_ARCH_mips)
|
||||
#define bpf_target_mips
|
||||
#define bpf_target_defined
|
||||
#elif defined(__TARGET_ARCH_powerpc)
|
||||
#define bpf_target_powerpc
|
||||
#define bpf_target_defined
|
||||
#elif defined(__TARGET_ARCH_sparc)
|
||||
#define bpf_target_sparc
|
||||
#define bpf_target_defined
|
||||
#else
|
||||
#undef bpf_target_defined
|
||||
#endif
|
||||
|
||||
/* Fall back to what the compiler says */
|
||||
#ifndef bpf_target_defined
|
||||
#if defined(__x86_64__)
|
||||
#define bpf_target_x86
|
||||
#elif defined(__s390__)
|
||||
#define bpf_target_s390
|
||||
#elif defined(__arm__)
|
||||
#define bpf_target_arm
|
||||
#elif defined(__aarch64__)
|
||||
#define bpf_target_arm64
|
||||
#elif defined(__mips__)
|
||||
#define bpf_target_mips
|
||||
#elif defined(__powerpc__)
|
||||
#define bpf_target_powerpc
|
||||
#elif defined(__sparc__)
|
||||
#define bpf_target_sparc
|
||||
#endif
|
||||
#endif
|
||||
#include "target_arch.h"
|
||||
|
||||
#if defined(bpf_target_x86)
|
||||
|
||||
@@ -78,15 +35,15 @@ https://github.com/golang/go/blob/go1.17.6/src/cmd/compile/internal/ssa/gen/AMD6
|
||||
|
||||
#else
|
||||
|
||||
#define GO_ABI_INTERNAL_PT_REGS_R1(x) ((x)->rax)
|
||||
#define GO_ABI_INTERNAL_PT_REGS_R2(x) ((x)->rcx)
|
||||
#define GO_ABI_INTERNAL_PT_REGS_R3(x) ((x)->rdx)
|
||||
#define GO_ABI_INTERNAL_PT_REGS_R4(x) ((x)->rbx)
|
||||
#define GO_ABI_INTERNAL_PT_REGS_R5(x) ((x)->rbp)
|
||||
#define GO_ABI_INTERNAL_PT_REGS_R6(x) ((x)->rsi)
|
||||
#define GO_ABI_INTERNAL_PT_REGS_R7(x) ((x)->rdi)
|
||||
#define GO_ABI_INTERNAL_PT_REGS_SP(x) ((x)->rsp)
|
||||
#define GO_ABI_INTERNAL_PT_REGS_FP(x) ((x)->rbp)
|
||||
#define GO_ABI_INTERNAL_PT_REGS_R1(x) ((x)->ax)
|
||||
#define GO_ABI_INTERNAL_PT_REGS_R2(x) ((x)->cx)
|
||||
#define GO_ABI_INTERNAL_PT_REGS_R3(x) ((x)->dx)
|
||||
#define GO_ABI_INTERNAL_PT_REGS_R4(x) ((x)->bx)
|
||||
#define GO_ABI_INTERNAL_PT_REGS_R5(x) ((x)->bp)
|
||||
#define GO_ABI_INTERNAL_PT_REGS_R6(x) ((x)->si)
|
||||
#define GO_ABI_INTERNAL_PT_REGS_R7(x) ((x)->di)
|
||||
#define GO_ABI_INTERNAL_PT_REGS_SP(x) ((x)->sp)
|
||||
#define GO_ABI_INTERNAL_PT_REGS_FP(x) ((x)->bp)
|
||||
#define GO_ABI_INTERNAL_PT_REGS_GP(x) ((x)->r14)
|
||||
|
||||
#endif
|
||||
|
||||
@@ -8,9 +8,16 @@ Copyright (C) UP9 Inc.
|
||||
#define __HEADERS__
|
||||
|
||||
#include <stddef.h>
|
||||
#include <linux/bpf.h>
|
||||
#include <linux/ptrace.h>
|
||||
|
||||
#include "target_arch.h"
|
||||
#include "vmlinux_x86.h"
|
||||
#include "vmlinux_arm64.h"
|
||||
|
||||
#include "legacy_kernel.h"
|
||||
|
||||
#include <bpf/bpf_endian.h>
|
||||
#include <bpf/bpf_helpers.h>
|
||||
#include "bpf/bpf_tracing.h"
|
||||
#include <bpf/bpf_tracing.h>
|
||||
#include <bpf/bpf_core_read.h>
|
||||
|
||||
#endif /* __HEADERS__ */
|
||||
|
||||
50
tap/tlstapper/bpf/include/legacy_kernel.h
Normal file
50
tap/tlstapper/bpf/include/legacy_kernel.h
Normal file
@@ -0,0 +1,50 @@
|
||||
#ifndef __LEGACY_KERNEL_H__
|
||||
#define __LEGACY_KERNEL_H__
|
||||
|
||||
#if defined(bpf_target_x86)
|
||||
|
||||
struct thread_struct___v46 {
|
||||
struct desc_struct tls_array[3];
|
||||
unsigned long sp0;
|
||||
unsigned long sp;
|
||||
unsigned short es;
|
||||
unsigned short ds;
|
||||
unsigned short fsindex;
|
||||
unsigned short gsindex;
|
||||
unsigned long fs;
|
||||
unsigned long gs;
|
||||
struct perf_event ptrace_bps[4];
|
||||
unsigned long debugreg6;
|
||||
unsigned long ptrace_dr7;
|
||||
unsigned long cr2;
|
||||
unsigned long trap_nr;
|
||||
unsigned long error_code;
|
||||
unsigned long io_bitmap_ptr;
|
||||
unsigned long iopl;
|
||||
unsigned io_bitmap_max;
|
||||
long: 63;
|
||||
long: 64;
|
||||
long: 64;
|
||||
long: 64;
|
||||
long: 64;
|
||||
long: 64;
|
||||
struct fpu fpu;
|
||||
};
|
||||
|
||||
#elif defined(bpf_target_arm)
|
||||
|
||||
// Commented out since thread_struct is not used in ARM64 yet.
|
||||
|
||||
// struct thread_struct___v46 {
|
||||
// struct cpu_context cpu_context;
|
||||
// long: 64;
|
||||
// unsigned long tp_value;
|
||||
// struct fpsimd_state fpsimd_state;
|
||||
// unsigned long fault_address;
|
||||
// unsigned long fault_code;
|
||||
// struct debug_info debug;
|
||||
// };
|
||||
|
||||
#endif
|
||||
|
||||
#endif /* __LEGACY_KERNEL_H__ */
|
||||
@@ -26,6 +26,11 @@ Copyright (C) UP9 Inc.
|
||||
#define LOG_ERROR_PUTTING_CONNECT_INFO (14)
|
||||
#define LOG_ERROR_GETTING_CONNECT_INFO (15)
|
||||
#define LOG_ERROR_READING_CONNECT_INFO (16)
|
||||
#define LOG_ERROR_READING_SOCKET_FAMILY (17)
|
||||
#define LOG_ERROR_READING_SOCKET_DADDR (18)
|
||||
#define LOG_ERROR_READING_SOCKET_SADDR (19)
|
||||
#define LOG_ERROR_READING_SOCKET_DPORT (20)
|
||||
#define LOG_ERROR_READING_SOCKET_SPORT (21)
|
||||
|
||||
// Sometimes we have the same error, happening from different locations.
|
||||
// in order to be able to distinct between them in the log, we add an
|
||||
|
||||
@@ -24,6 +24,21 @@ Copyright (C) UP9 Inc.
|
||||
//
|
||||
// Be careful when editing, alignment and padding should be exactly the same in go/c.
|
||||
//
|
||||
|
||||
typedef enum {
|
||||
ADDRESS_INFO_MODE_UNDEFINED,
|
||||
ADDRESS_INFO_MODE_SINGLE,
|
||||
ADDRESS_INFO_MODE_PAIR,
|
||||
} address_info_mode;
|
||||
|
||||
struct address_info {
|
||||
address_info_mode mode;
|
||||
__be32 saddr;
|
||||
__be32 daddr;
|
||||
__be16 sport;
|
||||
__be16 dport;
|
||||
};
|
||||
|
||||
struct tls_chunk {
|
||||
__u32 pid;
|
||||
__u32 tgid;
|
||||
@@ -32,7 +47,7 @@ struct tls_chunk {
|
||||
__u32 recorded;
|
||||
__u32 fd;
|
||||
__u32 flags;
|
||||
__u8 address[16];
|
||||
struct address_info address_info;
|
||||
__u8 data[CHUNK_SIZE]; // Must be N^2
|
||||
};
|
||||
|
||||
@@ -41,6 +56,7 @@ struct ssl_info {
|
||||
__u32 buffer_len;
|
||||
__u32 fd;
|
||||
__u64 created_at_nano;
|
||||
struct address_info address_info;
|
||||
|
||||
// for ssl_write and ssl_read must be zero
|
||||
// for ssl_write_ex and ssl_read_ex save the *written/*readbytes pointer.
|
||||
@@ -53,6 +69,13 @@ struct fd_info {
|
||||
__u8 flags;
|
||||
};
|
||||
|
||||
struct goid_offsets {
|
||||
__u64 g_addr_offset;
|
||||
__u64 goid_offset;
|
||||
};
|
||||
|
||||
const struct goid_offsets *unused __attribute__((unused));
|
||||
|
||||
// Heap-like area for eBPF programs - stack size limited to 512 bytes, we must use maps for bigger (chunk) objects.
|
||||
//
|
||||
struct {
|
||||
@@ -91,6 +114,7 @@ BPF_LRU_HASH(openssl_write_context, __u64, struct ssl_info);
|
||||
BPF_LRU_HASH(openssl_read_context, __u64, struct ssl_info);
|
||||
|
||||
// Go specific
|
||||
BPF_HASH(goid_offsets_map, __u32, struct goid_offsets);
|
||||
BPF_LRU_HASH(go_write_context, __u64, struct ssl_info);
|
||||
BPF_LRU_HASH(go_read_context, __u64, struct ssl_info);
|
||||
|
||||
|
||||
55
tap/tlstapper/bpf/include/target_arch.h
Normal file
55
tap/tlstapper/bpf/include/target_arch.h
Normal file
@@ -0,0 +1,55 @@
|
||||
/*
|
||||
Note: This file is licenced differently from the rest of the project
|
||||
SPDX-License-Identifier: GPL-2.0
|
||||
Copyright (C) UP9 Inc.
|
||||
*/
|
||||
|
||||
#ifndef __TARGET_ARCH__
|
||||
#define __TARGET_ARCH__
|
||||
|
||||
/* Scan the ARCH passed in from ARCH env variable */
|
||||
#if defined(__TARGET_ARCH_x86)
|
||||
#define bpf_target_x86
|
||||
#define bpf_target_defined
|
||||
#elif defined(__TARGET_ARCH_s390)
|
||||
#define bpf_target_s390
|
||||
#define bpf_target_defined
|
||||
#elif defined(__TARGET_ARCH_arm)
|
||||
#define bpf_target_arm
|
||||
#define bpf_target_defined
|
||||
#elif defined(__TARGET_ARCH_arm64)
|
||||
#define bpf_target_arm64
|
||||
#define bpf_target_defined
|
||||
#elif defined(__TARGET_ARCH_mips)
|
||||
#define bpf_target_mips
|
||||
#define bpf_target_defined
|
||||
#elif defined(__TARGET_ARCH_powerpc)
|
||||
#define bpf_target_powerpc
|
||||
#define bpf_target_defined
|
||||
#elif defined(__TARGET_ARCH_sparc)
|
||||
#define bpf_target_sparc
|
||||
#define bpf_target_defined
|
||||
#else
|
||||
#undef bpf_target_defined
|
||||
#endif
|
||||
|
||||
/* Fall back to what the compiler says */
|
||||
#ifndef bpf_target_defined
|
||||
#if defined(__x86_64__)
|
||||
#define bpf_target_x86
|
||||
#elif defined(__s390__)
|
||||
#define bpf_target_s390
|
||||
#elif defined(__arm__)
|
||||
#define bpf_target_arm
|
||||
#elif defined(__aarch64__)
|
||||
#define bpf_target_arm64
|
||||
#elif defined(__mips__)
|
||||
#define bpf_target_mips
|
||||
#elif defined(__powerpc__)
|
||||
#define bpf_target_powerpc
|
||||
#elif defined(__sparc__)
|
||||
#define bpf_target_sparc
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#endif /* __TARGET_ARCH__ */
|
||||
157110
tap/tlstapper/bpf/include/vmlinux_arm64.h
Normal file
157110
tap/tlstapper/bpf/include/vmlinux_arm64.h
Normal file
File diff suppressed because it is too large
Load Diff
124048
tap/tlstapper/bpf/include/vmlinux_x86.h
Normal file
124048
tap/tlstapper/bpf/include/vmlinux_x86.h
Normal file
File diff suppressed because it is too large
Load Diff
@@ -42,19 +42,20 @@ static __always_inline int get_count_bytes(struct pt_regs *ctx, struct ssl_info*
|
||||
}
|
||||
|
||||
static __always_inline void ssl_uprobe(struct pt_regs *ctx, void* ssl, void* buffer, int num, struct bpf_map_def* map_fd, size_t *count_ptr) {
|
||||
long err;
|
||||
|
||||
__u64 id = bpf_get_current_pid_tgid();
|
||||
|
||||
if (!should_tap(id >> 32)) {
|
||||
return;
|
||||
}
|
||||
|
||||
struct ssl_info *infoPtr = bpf_map_lookup_elem(map_fd, &id);
|
||||
struct ssl_info info = lookup_ssl_info(ctx, &openssl_write_context, id);
|
||||
struct ssl_info info = lookup_ssl_info(ctx, map_fd, id);
|
||||
|
||||
info.count_ptr = count_ptr;
|
||||
info.buffer = buffer;
|
||||
|
||||
long err = bpf_map_update_elem(map_fd, &id, &info, BPF_ANY);
|
||||
err = bpf_map_update_elem(map_fd, &id, &info, BPF_ANY);
|
||||
|
||||
if (err != 0) {
|
||||
log_error(ctx, LOG_ERROR_PUTTING_SSL_CONTEXT, id, err, 0l);
|
||||
@@ -67,7 +68,7 @@ static __always_inline void ssl_uretprobe(struct pt_regs *ctx, struct bpf_map_de
|
||||
if (!should_tap(id >> 32)) {
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
struct ssl_info *infoPtr = bpf_map_lookup_elem(map_fd, &id);
|
||||
|
||||
if (infoPtr == NULL) {
|
||||
@@ -100,10 +101,10 @@ static __always_inline void ssl_uretprobe(struct pt_regs *ctx, struct bpf_map_de
|
||||
return;
|
||||
}
|
||||
|
||||
int count_bytes = get_count_bytes(ctx, &info, id);
|
||||
if (count_bytes <= 0) {
|
||||
return;
|
||||
}
|
||||
int count_bytes = get_count_bytes(ctx, &info, id);
|
||||
if (count_bytes <= 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
output_ssl_chunk(ctx, &info, count_bytes, id, flags);
|
||||
}
|
||||
|
||||
79
tap/tlstapper/bpf/tcp_kprobes.c
Normal file
79
tap/tlstapper/bpf/tcp_kprobes.c
Normal file
@@ -0,0 +1,79 @@
|
||||
#include "include/headers.h"
|
||||
#include "include/maps.h"
|
||||
#include "include/log.h"
|
||||
#include "include/logger_messages.h"
|
||||
#include "include/pids.h"
|
||||
#include "include/common.h"
|
||||
|
||||
static __always_inline void tcp_kprobe(struct pt_regs *ctx, struct bpf_map_def *map_fd, _Bool is_send) {
|
||||
long err;
|
||||
|
||||
__u64 id = bpf_get_current_pid_tgid();
|
||||
__u32 pid = id >> 32;
|
||||
|
||||
if (!should_tap(id >> 32)) {
|
||||
return;
|
||||
}
|
||||
|
||||
struct ssl_info *info_ptr = bpf_map_lookup_elem(map_fd, &id);
|
||||
// Happens when the connection is not tls
|
||||
if (info_ptr == NULL) {
|
||||
return;
|
||||
}
|
||||
|
||||
struct sock *sk = (struct sock *) PT_REGS_PARM1(ctx);
|
||||
|
||||
short unsigned int family;
|
||||
err = bpf_probe_read(&family, sizeof(family), (void *)&sk->__sk_common.skc_family);
|
||||
if (err != 0) {
|
||||
log_error(ctx, LOG_ERROR_READING_SOCKET_FAMILY, id, err, 0l);
|
||||
return;
|
||||
}
|
||||
if (family != AF_INET) {
|
||||
return;
|
||||
}
|
||||
|
||||
// daddr, saddr and dport are in network byte order (big endian)
|
||||
// sport is in host byte order
|
||||
__be32 saddr;
|
||||
__be32 daddr;
|
||||
__be16 dport;
|
||||
__u16 sport;
|
||||
|
||||
err = bpf_probe_read(&saddr, sizeof(saddr), (void *)&sk->__sk_common.skc_rcv_saddr);
|
||||
if (err != 0) {
|
||||
log_error(ctx, LOG_ERROR_READING_SOCKET_SADDR, id, err, 0l);
|
||||
return;
|
||||
}
|
||||
err = bpf_probe_read(&daddr, sizeof(daddr), (void *)&sk->__sk_common.skc_daddr);
|
||||
if (err != 0) {
|
||||
log_error(ctx, LOG_ERROR_READING_SOCKET_DADDR, id, err, 0l);
|
||||
return;
|
||||
}
|
||||
err = bpf_probe_read(&dport, sizeof(dport), (void *)&sk->__sk_common.skc_dport);
|
||||
if (err != 0) {
|
||||
log_error(ctx, LOG_ERROR_READING_SOCKET_DPORT, id, err, 0l);
|
||||
return;
|
||||
}
|
||||
err = bpf_probe_read(&sport, sizeof(sport), (void *)&sk->__sk_common.skc_num);
|
||||
if (err != 0) {
|
||||
log_error(ctx, LOG_ERROR_READING_SOCKET_SPORT, id, err, 0l);
|
||||
return;
|
||||
}
|
||||
|
||||
info_ptr->address_info.mode = ADDRESS_INFO_MODE_PAIR;
|
||||
info_ptr->address_info.daddr = daddr;
|
||||
info_ptr->address_info.saddr = saddr;
|
||||
info_ptr->address_info.dport = dport;
|
||||
info_ptr->address_info.sport = bpf_htons(sport);
|
||||
}
|
||||
|
||||
SEC("kprobe/tcp_sendmsg")
|
||||
void BPF_KPROBE(tcp_sendmsg) {
|
||||
tcp_kprobe(ctx, &openssl_write_context, true);
|
||||
}
|
||||
|
||||
SEC("kprobe/tcp_recvmsg")
|
||||
void BPF_KPROBE(tcp_recvmsg) {
|
||||
tcp_kprobe(ctx, &openssl_read_context, false);
|
||||
}
|
||||
@@ -15,6 +15,7 @@ Copyright (C) UP9 Inc.
|
||||
//
|
||||
#include "common.c"
|
||||
#include "openssl_uprobes.c"
|
||||
#include "tcp_kprobes.c"
|
||||
#include "go_uprobes.c"
|
||||
#include "fd_tracepoints.c"
|
||||
#include "fd_to_address_tracepoints.c"
|
||||
|
||||
@@ -20,4 +20,9 @@ var bpfLogMessages = []string{
|
||||
/*0014*/ "[%d] Unable to put connect info [err: %d]",
|
||||
/*0015*/ "[%d] Unable to get connect info",
|
||||
/*0016*/ "[%d] Unable to read connect info [err: %d]",
|
||||
/*0017*/ "[%d] Unable to read socket family [err: %d]",
|
||||
/*0018*/ "[%d] Unable to read socket daddr [err: %d]",
|
||||
/*0019*/ "[%d] Unable to read socket saddr [err: %d]",
|
||||
/*0019*/ "[%d] Unable to read socket dport [err: %d]",
|
||||
/*0021*/ "[%d] Unable to read socket sport [err: %d]",
|
||||
}
|
||||
|
||||
@@ -1,38 +1,33 @@
|
||||
package tlstapper
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"net"
|
||||
"unsafe"
|
||||
|
||||
"github.com/go-errors/errors"
|
||||
"github.com/up9inc/mizu/tap/api"
|
||||
)
|
||||
|
||||
const FlagsIsClientBit uint32 = 1 << 0
|
||||
const FlagsIsReadBit uint32 = 1 << 1
|
||||
const (
|
||||
addressInfoModeUndefined = iota
|
||||
addressInfoModeSingle
|
||||
addressInfoModePair
|
||||
)
|
||||
|
||||
func (c *tlsTapperTlsChunk) getAddress() (net.IP, uint16, error) {
|
||||
address := bytes.NewReader(c.Address[:])
|
||||
var family uint16
|
||||
var port uint16
|
||||
var ip32 uint32
|
||||
func (c *tlsTapperTlsChunk) getSrcAddress() (net.IP, uint16) {
|
||||
ip := intToIP(c.AddressInfo.Saddr)
|
||||
port := ntohs(c.AddressInfo.Sport)
|
||||
|
||||
if err := binary.Read(address, binary.BigEndian, &family); err != nil {
|
||||
return nil, 0, errors.Wrap(err, 0)
|
||||
}
|
||||
return ip, port
|
||||
}
|
||||
|
||||
if err := binary.Read(address, binary.BigEndian, &port); err != nil {
|
||||
return nil, 0, errors.Wrap(err, 0)
|
||||
}
|
||||
func (c *tlsTapperTlsChunk) getDstAddress() (net.IP, uint16) {
|
||||
ip := intToIP(c.AddressInfo.Daddr)
|
||||
port := ntohs(c.AddressInfo.Dport)
|
||||
|
||||
if err := binary.Read(address, binary.BigEndian, &ip32); err != nil {
|
||||
return nil, 0, errors.Wrap(err, 0)
|
||||
}
|
||||
|
||||
ip := net.IP{uint8(ip32 >> 24), uint8(ip32 >> 16), uint8(ip32 >> 8), uint8(ip32)}
|
||||
|
||||
return ip, port, nil
|
||||
return ip, port
|
||||
}
|
||||
|
||||
func (c *tlsTapperTlsChunk) isClient() bool {
|
||||
@@ -59,26 +54,54 @@ func (c *tlsTapperTlsChunk) isRequest() bool {
|
||||
return (c.isClient() && c.isWrite()) || (c.isServer() && c.isRead())
|
||||
}
|
||||
|
||||
func (c *tlsTapperTlsChunk) getAddressPair() (addressPair, error) {
|
||||
ip, port, err := c.getAddress()
|
||||
func (c *tlsTapperTlsChunk) getAddressPair() (addressPair, bool) {
|
||||
var (
|
||||
srcIp, dstIp net.IP
|
||||
srcPort, dstPort uint16
|
||||
full bool
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
return addressPair{}, err
|
||||
switch c.AddressInfo.Mode {
|
||||
case addressInfoModeSingle:
|
||||
if c.isRequest() {
|
||||
srcIp, srcPort = api.UnknownIp, api.UnknownPort
|
||||
dstIp, dstPort = c.getSrcAddress()
|
||||
} else {
|
||||
srcIp, srcPort = c.getSrcAddress()
|
||||
dstIp, dstPort = api.UnknownIp, api.UnknownPort
|
||||
}
|
||||
full = false
|
||||
case addressInfoModePair:
|
||||
if c.isRequest() {
|
||||
srcIp, srcPort = c.getSrcAddress()
|
||||
dstIp, dstPort = c.getDstAddress()
|
||||
} else {
|
||||
srcIp, srcPort = c.getDstAddress()
|
||||
dstIp, dstPort = c.getSrcAddress()
|
||||
}
|
||||
full = true
|
||||
case addressInfoModeUndefined:
|
||||
srcIp, srcPort = api.UnknownIp, api.UnknownPort
|
||||
dstIp, dstPort = api.UnknownIp, api.UnknownPort
|
||||
full = false
|
||||
}
|
||||
|
||||
if c.isRequest() {
|
||||
return addressPair{
|
||||
srcIp: api.UnknownIp,
|
||||
srcPort: api.UnknownPort,
|
||||
dstIp: ip,
|
||||
dstPort: port,
|
||||
}, nil
|
||||
} else {
|
||||
return addressPair{
|
||||
srcIp: ip,
|
||||
srcPort: port,
|
||||
dstIp: api.UnknownIp,
|
||||
dstPort: api.UnknownPort,
|
||||
}, nil
|
||||
}
|
||||
return addressPair{
|
||||
srcIp: srcIp,
|
||||
srcPort: srcPort,
|
||||
dstIp: dstIp,
|
||||
dstPort: dstPort,
|
||||
}, full
|
||||
}
|
||||
|
||||
// intToIP converts IPv4 number to net.IP
|
||||
func intToIP(ip32be uint32) net.IP {
|
||||
return net.IPv4(uint8(ip32be), uint8(ip32be>>8), uint8(ip32be>>16), uint8(ip32be>>24))
|
||||
}
|
||||
|
||||
// ntohs converts big endian (network byte order) to little endian (assuming that's the host byte order)
|
||||
func ntohs(i16be uint16) uint16 {
|
||||
b := make([]byte, 2)
|
||||
binary.BigEndian.PutUint16(b, i16be)
|
||||
return *(*uint16)(unsafe.Pointer(&b[0]))
|
||||
}
|
||||
|
||||
@@ -31,9 +31,32 @@ func (s *goHooks) installUprobes(bpfObjects *tlsTapperObjects, filePath string)
|
||||
func (s *goHooks) installHooks(bpfObjects *tlsTapperObjects, ex *link.Executable, offsets goOffsets) error {
|
||||
var err error
|
||||
|
||||
goCryptoTlsWrite := bpfObjects.GoCryptoTlsAbiInternalWrite
|
||||
goCryptoTlsWriteEx := bpfObjects.GoCryptoTlsAbiInternalWriteEx
|
||||
goCryptoTlsRead := bpfObjects.GoCryptoTlsAbiInternalRead
|
||||
goCryptoTlsReadEx := bpfObjects.GoCryptoTlsAbiInternalReadEx
|
||||
|
||||
if offsets.Abi == ABI0 {
|
||||
goCryptoTlsWrite = bpfObjects.GoCryptoTlsAbi0Write
|
||||
goCryptoTlsWriteEx = bpfObjects.GoCryptoTlsAbi0WriteEx
|
||||
goCryptoTlsRead = bpfObjects.GoCryptoTlsAbi0Read
|
||||
goCryptoTlsReadEx = bpfObjects.GoCryptoTlsAbi0ReadEx
|
||||
|
||||
// Pass goid and g struct offsets to an eBPF map to retrieve it in eBPF context
|
||||
if err := bpfObjects.tlsTapperMaps.GoidOffsetsMap.Put(
|
||||
uint32(0),
|
||||
tlsTapperGoidOffsets{
|
||||
G_addrOffset: offsets.GStructOffset,
|
||||
GoidOffset: offsets.GoidOffset,
|
||||
},
|
||||
); err != nil {
|
||||
return errors.Wrap(err, 0)
|
||||
}
|
||||
}
|
||||
|
||||
// Symbol points to
|
||||
// [`crypto/tls.(*Conn).Write`](https://github.com/golang/go/blob/go1.17.6/src/crypto/tls/conn.go#L1099)
|
||||
s.goWriteProbe, err = ex.Uprobe(goWriteSymbol, bpfObjects.GoCryptoTlsWrite, &link.UprobeOptions{
|
||||
s.goWriteProbe, err = ex.Uprobe(goWriteSymbol, goCryptoTlsWrite, &link.UprobeOptions{
|
||||
Offset: offsets.GoWriteOffset.enter,
|
||||
})
|
||||
|
||||
@@ -42,7 +65,7 @@ func (s *goHooks) installHooks(bpfObjects *tlsTapperObjects, ex *link.Executable
|
||||
}
|
||||
|
||||
for _, offset := range offsets.GoWriteOffset.exits {
|
||||
probe, err := ex.Uprobe(goWriteSymbol, bpfObjects.GoCryptoTlsWriteEx, &link.UprobeOptions{
|
||||
probe, err := ex.Uprobe(goWriteSymbol, goCryptoTlsWriteEx, &link.UprobeOptions{
|
||||
Offset: offset,
|
||||
})
|
||||
|
||||
@@ -55,7 +78,7 @@ func (s *goHooks) installHooks(bpfObjects *tlsTapperObjects, ex *link.Executable
|
||||
|
||||
// Symbol points to
|
||||
// [`crypto/tls.(*Conn).Read`](https://github.com/golang/go/blob/go1.17.6/src/crypto/tls/conn.go#L1263)
|
||||
s.goReadProbe, err = ex.Uprobe(goReadSymbol, bpfObjects.GoCryptoTlsRead, &link.UprobeOptions{
|
||||
s.goReadProbe, err = ex.Uprobe(goReadSymbol, goCryptoTlsRead, &link.UprobeOptions{
|
||||
Offset: offsets.GoReadOffset.enter,
|
||||
})
|
||||
|
||||
@@ -64,7 +87,7 @@ func (s *goHooks) installHooks(bpfObjects *tlsTapperObjects, ex *link.Executable
|
||||
}
|
||||
|
||||
for _, offset := range offsets.GoReadOffset.exits {
|
||||
probe, err := ex.Uprobe(goReadSymbol, bpfObjects.GoCryptoTlsReadEx, &link.UprobeOptions{
|
||||
probe, err := ex.Uprobe(goReadSymbol, goCryptoTlsReadEx, &link.UprobeOptions{
|
||||
Offset: offset,
|
||||
})
|
||||
|
||||
|
||||
@@ -2,8 +2,10 @@ package tlstapper
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"debug/dwarf"
|
||||
"debug/elf"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"runtime"
|
||||
|
||||
@@ -13,9 +15,22 @@ import (
|
||||
"github.com/up9inc/mizu/logger"
|
||||
)
|
||||
|
||||
type goAbi int
|
||||
|
||||
const (
|
||||
ABI0 goAbi = iota
|
||||
ABIInternal
|
||||
)
|
||||
|
||||
const PtrSize int = 8
|
||||
|
||||
type goOffsets struct {
|
||||
GoWriteOffset *goExtendedOffset
|
||||
GoReadOffset *goExtendedOffset
|
||||
GoVersion string
|
||||
Abi goAbi
|
||||
GoidOffset uint64
|
||||
GStructOffset uint64
|
||||
}
|
||||
|
||||
type goExtendedOffset struct {
|
||||
@@ -24,30 +39,33 @@ type goExtendedOffset struct {
|
||||
}
|
||||
|
||||
const (
|
||||
minimumSupportedGoVersion = "1.17.0"
|
||||
goVersionSymbol = "runtime.buildVersion.str"
|
||||
goWriteSymbol = "crypto/tls.(*Conn).Write"
|
||||
goReadSymbol = "crypto/tls.(*Conn).Read"
|
||||
minimumABIInternalGoVersion = "1.17.0"
|
||||
goVersionSymbol = "runtime.buildVersion.str" // symbol does not exist in Go (<=1.16)
|
||||
goWriteSymbol = "crypto/tls.(*Conn).Write"
|
||||
goReadSymbol = "crypto/tls.(*Conn).Read"
|
||||
)
|
||||
|
||||
func findGoOffsets(filePath string) (goOffsets, error) {
|
||||
offsets, err := getOffsets(filePath)
|
||||
offsets, goidOffset, gStructOffset, err := getOffsets(filePath)
|
||||
if err != nil {
|
||||
return goOffsets{}, err
|
||||
}
|
||||
|
||||
abi := ABI0
|
||||
var passed bool
|
||||
var goVersion string
|
||||
|
||||
goVersionOffset, err := getOffset(offsets, goVersionSymbol)
|
||||
if err != nil {
|
||||
return goOffsets{}, err
|
||||
if err == nil {
|
||||
// TODO: Replace this logic with https://pkg.go.dev/debug/buildinfo#ReadFile once we upgrade to 1.18
|
||||
passed, goVersion, err = checkGoVersion(filePath, goVersionOffset)
|
||||
if err != nil {
|
||||
return goOffsets{}, fmt.Errorf("Checking Go version: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
passed, goVersion, err := checkGoVersion(filePath, goVersionOffset)
|
||||
if err != nil {
|
||||
return goOffsets{}, fmt.Errorf("Checking Go version: %s", err)
|
||||
}
|
||||
|
||||
if !passed {
|
||||
return goOffsets{}, fmt.Errorf("Unsupported Go version: %s", goVersion)
|
||||
if passed {
|
||||
abi = ABIInternal
|
||||
}
|
||||
|
||||
writeOffset, err := getOffset(offsets, goWriteSymbol)
|
||||
@@ -63,10 +81,139 @@ func findGoOffsets(filePath string) (goOffsets, error) {
|
||||
return goOffsets{
|
||||
GoWriteOffset: writeOffset,
|
||||
GoReadOffset: readOffset,
|
||||
GoVersion: goVersion,
|
||||
Abi: abi,
|
||||
GoidOffset: goidOffset,
|
||||
GStructOffset: gStructOffset,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func getOffsets(filePath string) (offsets map[string]*goExtendedOffset, err error) {
|
||||
func getSymbol(exe *elf.File, name string) *elf.Symbol {
|
||||
symbols, err := exe.Symbols()
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
for _, symbol := range symbols {
|
||||
if symbol.Name == name {
|
||||
s := symbol
|
||||
return &s
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func getGStructOffset(exe *elf.File) (gStructOffset uint64, err error) {
|
||||
// This is a bit arcane. Essentially:
|
||||
// - If the program is pure Go, it can do whatever it wants, and puts the G
|
||||
// pointer at %fs-8 on 64 bit.
|
||||
// - %Gs is the index of private storage in GDT on 32 bit, and puts the G
|
||||
// pointer at -4(tls).
|
||||
// - Otherwise, Go asks the external linker to place the G pointer by
|
||||
// emitting runtime.tlsg, a TLS symbol, which is relocated to the chosen
|
||||
// offset in libc's TLS block.
|
||||
// - On ARM64 (but really, any architecture other than i386 and 86x64) the
|
||||
// offset is calculate using runtime.tls_g and the formula is different.
|
||||
|
||||
var tls *elf.Prog
|
||||
for _, prog := range exe.Progs {
|
||||
if prog.Type == elf.PT_TLS {
|
||||
tls = prog
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
switch exe.Machine {
|
||||
case elf.EM_X86_64, elf.EM_386:
|
||||
tlsg := getSymbol(exe, "runtime.tlsg")
|
||||
if tlsg == nil || tls == nil {
|
||||
gStructOffset = ^uint64(PtrSize) + 1 //-ptrSize
|
||||
return
|
||||
}
|
||||
|
||||
// According to https://reviews.llvm.org/D61824, linkers must pad the actual
|
||||
// size of the TLS segment to ensure that (tlsoffset%align) == (vaddr%align).
|
||||
// This formula, copied from the lld code, matches that.
|
||||
// https://github.com/llvm-mirror/lld/blob/9aef969544981d76bea8e4d1961d3a6980980ef9/ELF/InputSection.cpp#L643
|
||||
memsz := tls.Memsz + (-tls.Vaddr-tls.Memsz)&(tls.Align-1)
|
||||
|
||||
// The TLS register points to the end of the TLS block, which is
|
||||
// tls.Memsz long. runtime.tlsg is an offset from the beginning of that block.
|
||||
gStructOffset = ^(memsz) + 1 + tlsg.Value // -tls.Memsz + tlsg.Value
|
||||
|
||||
case elf.EM_AARCH64:
|
||||
tlsg := getSymbol(exe, "runtime.tls_g")
|
||||
if tlsg == nil || tls == nil {
|
||||
gStructOffset = 2 * uint64(PtrSize)
|
||||
return
|
||||
}
|
||||
|
||||
gStructOffset = tlsg.Value + uint64(PtrSize*2) + ((tls.Vaddr - uint64(PtrSize*2)) & (tls.Align - 1))
|
||||
|
||||
default:
|
||||
// we should never get here
|
||||
err = fmt.Errorf("architecture not supported")
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func getGoidOffset(elfFile *elf.File) (goidOffset uint64, gStructOffset uint64, err error) {
|
||||
var dwarfData *dwarf.Data
|
||||
dwarfData, err = elfFile.DWARF()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
entryReader := dwarfData.Reader()
|
||||
|
||||
var runtimeGOffset uint64
|
||||
var seenRuntimeG bool
|
||||
|
||||
for {
|
||||
// Read all entries in sequence
|
||||
var entry *dwarf.Entry
|
||||
entry, err = entryReader.Next()
|
||||
if err == io.EOF || entry == nil {
|
||||
// We've reached the end of DWARF entries
|
||||
break
|
||||
}
|
||||
|
||||
// Check if this entry is a struct
|
||||
if entry.Tag == dwarf.TagStructType {
|
||||
// Go through fields
|
||||
for _, field := range entry.Field {
|
||||
if field.Attr == dwarf.AttrName {
|
||||
val := field.Val.(string)
|
||||
if val == "runtime.g" {
|
||||
runtimeGOffset = uint64(entry.Offset)
|
||||
seenRuntimeG = true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Check if this entry is a struct member
|
||||
if seenRuntimeG && entry.Tag == dwarf.TagMember {
|
||||
// Go through fields
|
||||
for _, field := range entry.Field {
|
||||
if field.Attr == dwarf.AttrName {
|
||||
val := field.Val.(string)
|
||||
if val == "goid" {
|
||||
goidOffset = uint64(entry.Offset) - runtimeGOffset - 0x4b
|
||||
gStructOffset, err = getGStructOffset(elfFile)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
err = fmt.Errorf("goid not found in DWARF")
|
||||
return
|
||||
}
|
||||
|
||||
func getOffsets(filePath string) (offsets map[string]*goExtendedOffset, goidOffset uint64, gStructOffset uint64, err error) {
|
||||
var engine gapstone.Engine
|
||||
switch runtime.GOARCH {
|
||||
case "amd64":
|
||||
@@ -104,13 +251,13 @@ func getOffsets(filePath string) (offsets map[string]*goExtendedOffset, err erro
|
||||
}
|
||||
defer fd.Close()
|
||||
|
||||
var se *elf.File
|
||||
se, err = elf.NewFile(fd)
|
||||
var elfFile *elf.File
|
||||
elfFile, err = elf.NewFile(fd)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
textSection := se.Section(".text")
|
||||
textSection := elfFile.Section(".text")
|
||||
if textSection == nil {
|
||||
err = fmt.Errorf("No text section")
|
||||
return
|
||||
@@ -124,7 +271,7 @@ func getOffsets(filePath string) (offsets map[string]*goExtendedOffset, err erro
|
||||
}
|
||||
|
||||
var syms []elf.Symbol
|
||||
syms, err = se.Symbols()
|
||||
syms, err = elfFile.Symbols()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
@@ -132,7 +279,7 @@ func getOffsets(filePath string) (offsets map[string]*goExtendedOffset, err erro
|
||||
offset := sym.Value
|
||||
|
||||
var lastProg *elf.Prog
|
||||
for _, prog := range se.Progs {
|
||||
for _, prog := range elfFile.Progs {
|
||||
if prog.Vaddr <= sym.Value && sym.Value < (prog.Vaddr+prog.Memsz) {
|
||||
offset = sym.Value - prog.Vaddr + prog.Off
|
||||
lastProg = prog
|
||||
@@ -189,6 +336,8 @@ func getOffsets(filePath string) (offsets map[string]*goExtendedOffset, err erro
|
||||
offsets[sym.Name] = extendedOffset
|
||||
}
|
||||
|
||||
goidOffset, gStructOffset, err = getGoidOffset(elfFile)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
@@ -229,7 +378,7 @@ func checkGoVersion(filePath string, offset *goExtendedOffset) (bool, string, er
|
||||
return false, goVersionStr, err
|
||||
}
|
||||
|
||||
goVersionConstraint, err := semver.NewConstraint(fmt.Sprintf(">= %s", minimumSupportedGoVersion))
|
||||
goVersionConstraint, err := semver.NewConstraint(fmt.Sprintf(">= %s", minimumABIInternalGoVersion))
|
||||
if err != nil {
|
||||
return false, goVersionStr, err
|
||||
}
|
||||
|
||||
@@ -14,6 +14,8 @@ type sslHooks struct {
|
||||
sslWriteExRetProbe link.Link
|
||||
sslReadExProbe link.Link
|
||||
sslReadExRetProbe link.Link
|
||||
tcpSendmsg link.Link
|
||||
tcpRecvmsg link.Link
|
||||
}
|
||||
|
||||
func (s *sslHooks) installUprobes(bpfObjects *tlsTapperObjects, sslLibraryPath string) error {
|
||||
@@ -103,6 +105,16 @@ func (s *sslHooks) installSslHooks(bpfObjects *tlsTapperObjects, sslLibrary *lin
|
||||
}
|
||||
}
|
||||
|
||||
s.tcpSendmsg, err = link.Kprobe("tcp_sendmsg", bpfObjects.TcpSendmsg, nil)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, 0)
|
||||
}
|
||||
|
||||
s.tcpRecvmsg, err = link.Kprobe("tcp_recvmsg", bpfObjects.TcpRecvmsg, nil)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, 0)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -149,5 +161,17 @@ func (s *sslHooks) close() []error {
|
||||
}
|
||||
}
|
||||
|
||||
if s.tcpSendmsg != nil {
|
||||
if err := s.tcpSendmsg.Close(); err != nil {
|
||||
returnValue = append(returnValue, err)
|
||||
}
|
||||
}
|
||||
|
||||
if s.tcpRecvmsg != nil {
|
||||
if err := s.tcpRecvmsg.Close(); err != nil {
|
||||
returnValue = append(returnValue, err)
|
||||
}
|
||||
}
|
||||
|
||||
return returnValue
|
||||
}
|
||||
|
||||
@@ -17,37 +17,37 @@ type syscallHooks struct {
|
||||
func (s *syscallHooks) installSyscallHooks(bpfObjects *tlsTapperObjects) error {
|
||||
var err error
|
||||
|
||||
s.sysEnterRead, err = link.Tracepoint("syscalls", "sys_enter_read", bpfObjects.SysEnterRead)
|
||||
s.sysEnterRead, err = link.Tracepoint("syscalls", "sys_enter_read", bpfObjects.SysEnterRead, nil)
|
||||
|
||||
if err != nil {
|
||||
return errors.Wrap(err, 0)
|
||||
}
|
||||
|
||||
s.sysEnterWrite, err = link.Tracepoint("syscalls", "sys_enter_write", bpfObjects.SysEnterWrite)
|
||||
s.sysEnterWrite, err = link.Tracepoint("syscalls", "sys_enter_write", bpfObjects.SysEnterWrite, nil)
|
||||
|
||||
if err != nil {
|
||||
return errors.Wrap(err, 0)
|
||||
}
|
||||
|
||||
s.sysEnterAccept4, err = link.Tracepoint("syscalls", "sys_enter_accept4", bpfObjects.SysEnterAccept4)
|
||||
s.sysEnterAccept4, err = link.Tracepoint("syscalls", "sys_enter_accept4", bpfObjects.SysEnterAccept4, nil)
|
||||
|
||||
if err != nil {
|
||||
return errors.Wrap(err, 0)
|
||||
}
|
||||
|
||||
s.sysExitAccept4, err = link.Tracepoint("syscalls", "sys_exit_accept4", bpfObjects.SysExitAccept4)
|
||||
s.sysExitAccept4, err = link.Tracepoint("syscalls", "sys_exit_accept4", bpfObjects.SysExitAccept4, nil)
|
||||
|
||||
if err != nil {
|
||||
return errors.Wrap(err, 0)
|
||||
}
|
||||
|
||||
s.sysEnterConnect, err = link.Tracepoint("syscalls", "sys_enter_connect", bpfObjects.SysEnterConnect)
|
||||
s.sysEnterConnect, err = link.Tracepoint("syscalls", "sys_enter_connect", bpfObjects.SysEnterConnect, nil)
|
||||
|
||||
if err != nil {
|
||||
return errors.Wrap(err, 0)
|
||||
}
|
||||
|
||||
s.sysExitConnect, err = link.Tracepoint("syscalls", "sys_exit_connect", bpfObjects.SysExitConnect)
|
||||
s.sysExitConnect, err = link.Tracepoint("syscalls", "sys_exit_connect", bpfObjects.SysExitConnect, nil)
|
||||
|
||||
if err != nil {
|
||||
return errors.Wrap(err, 0)
|
||||
|
||||
@@ -134,14 +134,9 @@ func (p *tlsPoller) pollChunksPerfBuffer(chunks chan<- *tlsTapperTlsChunk) {
|
||||
|
||||
func (p *tlsPoller) handleTlsChunk(chunk *tlsTapperTlsChunk, extension *api.Extension, emitter api.Emitter,
|
||||
options *api.TrafficFilteringOptions, streamsMap api.TcpStreamMap) error {
|
||||
address, err := p.getSockfdAddressPair(chunk)
|
||||
|
||||
address, err := p.getAddressPair(chunk)
|
||||
if err != nil {
|
||||
address, err = chunk.getAddressPair()
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
key := buildTlsKey(address)
|
||||
@@ -161,6 +156,22 @@ func (p *tlsPoller) handleTlsChunk(chunk *tlsTapperTlsChunk, extension *api.Exte
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *tlsPoller) getAddressPair(chunk *tlsTapperTlsChunk) (addressPair, error) {
|
||||
addrPairFromChunk, full := chunk.getAddressPair()
|
||||
if full {
|
||||
return addrPairFromChunk, nil
|
||||
}
|
||||
|
||||
addrPairFromSockfd, err := p.getSockfdAddressPair(chunk)
|
||||
if err == nil {
|
||||
return addrPairFromSockfd, nil
|
||||
} else {
|
||||
logger.Log.Error("failed to get address from sock fd:", err)
|
||||
}
|
||||
|
||||
return addrPairFromChunk, err
|
||||
}
|
||||
|
||||
func (p *tlsPoller) startNewTlsReader(chunk *tlsTapperTlsChunk, address *addressPair, key string,
|
||||
emitter api.Emitter, extension *api.Extension, options *api.TrafficFilteringOptions,
|
||||
streamsMap api.TcpStreamMap) *tlsReader {
|
||||
|
||||
@@ -6,13 +6,18 @@ import (
|
||||
|
||||
"github.com/cilium/ebpf/rlimit"
|
||||
"github.com/go-errors/errors"
|
||||
"github.com/moby/moby/pkg/parsers/kernel"
|
||||
"github.com/up9inc/mizu/logger"
|
||||
"github.com/up9inc/mizu/tap/api"
|
||||
)
|
||||
|
||||
const GlobalTapPid = 0
|
||||
|
||||
//go:generate go run github.com/cilium/ebpf/cmd/bpf2go@0d0727ef53e2f53b1731c73f4c61e0f58693083a -target $BPF_TARGET -cflags $BPF_CFLAGS -type tls_chunk tlsTapper bpf/tls_tapper.c
|
||||
// TODO: cilium/ebpf does not support .kconfig Therefore; for now, we build object files per kernel version.
|
||||
|
||||
//go:generate go run github.com/cilium/ebpf/cmd/bpf2go@v0.9.0 -target $BPF_TARGET -cflags $BPF_CFLAGS -type tls_chunk -type goid_offsets tlsTapper bpf/tls_tapper.c
|
||||
|
||||
//go:generate go run github.com/cilium/ebpf/cmd/bpf2go@v0.9.0 -target $BPF_TARGET -cflags "${BPF_CFLAGS} -DKERNEL_BEFORE_4_6" -type tls_chunk -type goid_offsets tlsTapper46 bpf/tls_tapper.c
|
||||
|
||||
type TlsTapper struct {
|
||||
bpfObjects tlsTapperObjects
|
||||
@@ -27,13 +32,30 @@ type TlsTapper struct {
|
||||
func (t *TlsTapper) Init(chunksBufferSize int, logBufferSize int, procfs string, extension *api.Extension) error {
|
||||
logger.Log.Infof("Initializing tls tapper (chunksSize: %d) (logSize: %d)", chunksBufferSize, logBufferSize)
|
||||
|
||||
if err := setupRLimit(); err != nil {
|
||||
var err error
|
||||
err = setupRLimit()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var kernelVersion *kernel.VersionInfo
|
||||
kernelVersion, err = kernel.GetKernelVersion()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
logger.Log.Infof("Detected Linux kernel version: %s", kernelVersion)
|
||||
|
||||
t.bpfObjects = tlsTapperObjects{}
|
||||
if err := loadTlsTapperObjects(&t.bpfObjects, nil); err != nil {
|
||||
return errors.Wrap(err, 0)
|
||||
// TODO: cilium/ebpf does not support .kconfig Therefore; for now, we load object files according to kernel version.
|
||||
if kernel.CompareKernelVersion(*kernelVersion, kernel.VersionInfo{Kernel: 4, Major: 6, Minor: 0}) < 1 {
|
||||
if err := loadTlsTapper46Objects(&t.bpfObjects, nil); err != nil {
|
||||
return errors.Wrap(err, 0)
|
||||
}
|
||||
} else {
|
||||
if err := loadTlsTapperObjects(&t.bpfObjects, nil); err != nil {
|
||||
return errors.Wrap(err, 0)
|
||||
}
|
||||
}
|
||||
|
||||
t.syscallHooks = syscallHooks{}
|
||||
@@ -48,7 +70,6 @@ func (t *TlsTapper) Init(chunksBufferSize int, logBufferSize int, procfs string,
|
||||
return err
|
||||
}
|
||||
|
||||
var err error
|
||||
t.poller, err = newTlsPoller(t, extension, procfs)
|
||||
|
||||
if err != nil {
|
||||
|
||||
244
tap/tlstapper/tlstapper46_bpfel_arm64.go
Normal file
244
tap/tlstapper/tlstapper46_bpfel_arm64.go
Normal file
@@ -0,0 +1,244 @@
|
||||
// Code generated by bpf2go; DO NOT EDIT.
|
||||
//go:build arm64
|
||||
// +build arm64
|
||||
|
||||
package tlstapper
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
_ "embed"
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"github.com/cilium/ebpf"
|
||||
)
|
||||
|
||||
type tlsTapper46GoidOffsets struct {
|
||||
G_addrOffset uint64
|
||||
GoidOffset uint64
|
||||
}
|
||||
|
||||
type tlsTapper46TlsChunk struct {
|
||||
Pid uint32
|
||||
Tgid uint32
|
||||
Len uint32
|
||||
Start uint32
|
||||
Recorded uint32
|
||||
Fd uint32
|
||||
Flags uint32
|
||||
AddressInfo struct {
|
||||
Mode int32
|
||||
Saddr uint32
|
||||
Daddr uint32
|
||||
Sport uint16
|
||||
Dport uint16
|
||||
}
|
||||
Data [4096]uint8
|
||||
}
|
||||
|
||||
// loadTlsTapper46 returns the embedded CollectionSpec for tlsTapper46.
|
||||
func loadTlsTapper46() (*ebpf.CollectionSpec, error) {
|
||||
reader := bytes.NewReader(_TlsTapper46Bytes)
|
||||
spec, err := ebpf.LoadCollectionSpecFromReader(reader)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("can't load tlsTapper46: %w", err)
|
||||
}
|
||||
|
||||
return spec, err
|
||||
}
|
||||
|
||||
// loadTlsTapper46Objects loads tlsTapper46 and converts it into a struct.
|
||||
//
|
||||
// The following types are suitable as obj argument:
|
||||
//
|
||||
// *tlsTapper46Objects
|
||||
// *tlsTapper46Programs
|
||||
// *tlsTapper46Maps
|
||||
//
|
||||
// See ebpf.CollectionSpec.LoadAndAssign documentation for details.
|
||||
func loadTlsTapper46Objects(obj interface{}, opts *ebpf.CollectionOptions) error {
|
||||
spec, err := loadTlsTapper46()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return spec.LoadAndAssign(obj, opts)
|
||||
}
|
||||
|
||||
// tlsTapper46Specs contains maps and programs before they are loaded into the kernel.
|
||||
//
|
||||
// It can be passed ebpf.CollectionSpec.Assign.
|
||||
type tlsTapper46Specs struct {
|
||||
tlsTapper46ProgramSpecs
|
||||
tlsTapper46MapSpecs
|
||||
}
|
||||
|
||||
// tlsTapper46Specs contains programs before they are loaded into the kernel.
|
||||
//
|
||||
// It can be passed ebpf.CollectionSpec.Assign.
|
||||
type tlsTapper46ProgramSpecs struct {
|
||||
GoCryptoTlsAbi0Read *ebpf.ProgramSpec `ebpf:"go_crypto_tls_abi0_read"`
|
||||
GoCryptoTlsAbi0ReadEx *ebpf.ProgramSpec `ebpf:"go_crypto_tls_abi0_read_ex"`
|
||||
GoCryptoTlsAbi0Write *ebpf.ProgramSpec `ebpf:"go_crypto_tls_abi0_write"`
|
||||
GoCryptoTlsAbi0WriteEx *ebpf.ProgramSpec `ebpf:"go_crypto_tls_abi0_write_ex"`
|
||||
GoCryptoTlsAbiInternalRead *ebpf.ProgramSpec `ebpf:"go_crypto_tls_abi_internal_read"`
|
||||
GoCryptoTlsAbiInternalReadEx *ebpf.ProgramSpec `ebpf:"go_crypto_tls_abi_internal_read_ex"`
|
||||
GoCryptoTlsAbiInternalWrite *ebpf.ProgramSpec `ebpf:"go_crypto_tls_abi_internal_write"`
|
||||
GoCryptoTlsAbiInternalWriteEx *ebpf.ProgramSpec `ebpf:"go_crypto_tls_abi_internal_write_ex"`
|
||||
SslRead *ebpf.ProgramSpec `ebpf:"ssl_read"`
|
||||
SslReadEx *ebpf.ProgramSpec `ebpf:"ssl_read_ex"`
|
||||
SslRetRead *ebpf.ProgramSpec `ebpf:"ssl_ret_read"`
|
||||
SslRetReadEx *ebpf.ProgramSpec `ebpf:"ssl_ret_read_ex"`
|
||||
SslRetWrite *ebpf.ProgramSpec `ebpf:"ssl_ret_write"`
|
||||
SslRetWriteEx *ebpf.ProgramSpec `ebpf:"ssl_ret_write_ex"`
|
||||
SslWrite *ebpf.ProgramSpec `ebpf:"ssl_write"`
|
||||
SslWriteEx *ebpf.ProgramSpec `ebpf:"ssl_write_ex"`
|
||||
SysEnterAccept4 *ebpf.ProgramSpec `ebpf:"sys_enter_accept4"`
|
||||
SysEnterConnect *ebpf.ProgramSpec `ebpf:"sys_enter_connect"`
|
||||
SysEnterRead *ebpf.ProgramSpec `ebpf:"sys_enter_read"`
|
||||
SysEnterWrite *ebpf.ProgramSpec `ebpf:"sys_enter_write"`
|
||||
SysExitAccept4 *ebpf.ProgramSpec `ebpf:"sys_exit_accept4"`
|
||||
SysExitConnect *ebpf.ProgramSpec `ebpf:"sys_exit_connect"`
|
||||
TcpRecvmsg *ebpf.ProgramSpec `ebpf:"tcp_recvmsg"`
|
||||
TcpSendmsg *ebpf.ProgramSpec `ebpf:"tcp_sendmsg"`
|
||||
}
|
||||
|
||||
// tlsTapper46MapSpecs contains maps before they are loaded into the kernel.
|
||||
//
|
||||
// It can be passed ebpf.CollectionSpec.Assign.
|
||||
type tlsTapper46MapSpecs struct {
|
||||
AcceptSyscallContext *ebpf.MapSpec `ebpf:"accept_syscall_context"`
|
||||
ChunksBuffer *ebpf.MapSpec `ebpf:"chunks_buffer"`
|
||||
ConnectSyscallInfo *ebpf.MapSpec `ebpf:"connect_syscall_info"`
|
||||
FileDescriptorToIpv4 *ebpf.MapSpec `ebpf:"file_descriptor_to_ipv4"`
|
||||
GoReadContext *ebpf.MapSpec `ebpf:"go_read_context"`
|
||||
GoWriteContext *ebpf.MapSpec `ebpf:"go_write_context"`
|
||||
GoidOffsetsMap *ebpf.MapSpec `ebpf:"goid_offsets_map"`
|
||||
Heap *ebpf.MapSpec `ebpf:"heap"`
|
||||
LogBuffer *ebpf.MapSpec `ebpf:"log_buffer"`
|
||||
OpensslReadContext *ebpf.MapSpec `ebpf:"openssl_read_context"`
|
||||
OpensslWriteContext *ebpf.MapSpec `ebpf:"openssl_write_context"`
|
||||
PidsMap *ebpf.MapSpec `ebpf:"pids_map"`
|
||||
}
|
||||
|
||||
// tlsTapper46Objects contains all objects after they have been loaded into the kernel.
|
||||
//
|
||||
// It can be passed to loadTlsTapper46Objects or ebpf.CollectionSpec.LoadAndAssign.
|
||||
type tlsTapper46Objects struct {
|
||||
tlsTapper46Programs
|
||||
tlsTapper46Maps
|
||||
}
|
||||
|
||||
func (o *tlsTapper46Objects) Close() error {
|
||||
return _TlsTapper46Close(
|
||||
&o.tlsTapper46Programs,
|
||||
&o.tlsTapper46Maps,
|
||||
)
|
||||
}
|
||||
|
||||
// tlsTapper46Maps contains all maps after they have been loaded into the kernel.
|
||||
//
|
||||
// It can be passed to loadTlsTapper46Objects or ebpf.CollectionSpec.LoadAndAssign.
|
||||
type tlsTapper46Maps struct {
|
||||
AcceptSyscallContext *ebpf.Map `ebpf:"accept_syscall_context"`
|
||||
ChunksBuffer *ebpf.Map `ebpf:"chunks_buffer"`
|
||||
ConnectSyscallInfo *ebpf.Map `ebpf:"connect_syscall_info"`
|
||||
FileDescriptorToIpv4 *ebpf.Map `ebpf:"file_descriptor_to_ipv4"`
|
||||
GoReadContext *ebpf.Map `ebpf:"go_read_context"`
|
||||
GoWriteContext *ebpf.Map `ebpf:"go_write_context"`
|
||||
GoidOffsetsMap *ebpf.Map `ebpf:"goid_offsets_map"`
|
||||
Heap *ebpf.Map `ebpf:"heap"`
|
||||
LogBuffer *ebpf.Map `ebpf:"log_buffer"`
|
||||
OpensslReadContext *ebpf.Map `ebpf:"openssl_read_context"`
|
||||
OpensslWriteContext *ebpf.Map `ebpf:"openssl_write_context"`
|
||||
PidsMap *ebpf.Map `ebpf:"pids_map"`
|
||||
}
|
||||
|
||||
func (m *tlsTapper46Maps) Close() error {
|
||||
return _TlsTapper46Close(
|
||||
m.AcceptSyscallContext,
|
||||
m.ChunksBuffer,
|
||||
m.ConnectSyscallInfo,
|
||||
m.FileDescriptorToIpv4,
|
||||
m.GoReadContext,
|
||||
m.GoWriteContext,
|
||||
m.GoidOffsetsMap,
|
||||
m.Heap,
|
||||
m.LogBuffer,
|
||||
m.OpensslReadContext,
|
||||
m.OpensslWriteContext,
|
||||
m.PidsMap,
|
||||
)
|
||||
}
|
||||
|
||||
// tlsTapper46Programs contains all programs after they have been loaded into the kernel.
|
||||
//
|
||||
// It can be passed to loadTlsTapper46Objects or ebpf.CollectionSpec.LoadAndAssign.
|
||||
type tlsTapper46Programs struct {
|
||||
GoCryptoTlsAbi0Read *ebpf.Program `ebpf:"go_crypto_tls_abi0_read"`
|
||||
GoCryptoTlsAbi0ReadEx *ebpf.Program `ebpf:"go_crypto_tls_abi0_read_ex"`
|
||||
GoCryptoTlsAbi0Write *ebpf.Program `ebpf:"go_crypto_tls_abi0_write"`
|
||||
GoCryptoTlsAbi0WriteEx *ebpf.Program `ebpf:"go_crypto_tls_abi0_write_ex"`
|
||||
GoCryptoTlsAbiInternalRead *ebpf.Program `ebpf:"go_crypto_tls_abi_internal_read"`
|
||||
GoCryptoTlsAbiInternalReadEx *ebpf.Program `ebpf:"go_crypto_tls_abi_internal_read_ex"`
|
||||
GoCryptoTlsAbiInternalWrite *ebpf.Program `ebpf:"go_crypto_tls_abi_internal_write"`
|
||||
GoCryptoTlsAbiInternalWriteEx *ebpf.Program `ebpf:"go_crypto_tls_abi_internal_write_ex"`
|
||||
SslRead *ebpf.Program `ebpf:"ssl_read"`
|
||||
SslReadEx *ebpf.Program `ebpf:"ssl_read_ex"`
|
||||
SslRetRead *ebpf.Program `ebpf:"ssl_ret_read"`
|
||||
SslRetReadEx *ebpf.Program `ebpf:"ssl_ret_read_ex"`
|
||||
SslRetWrite *ebpf.Program `ebpf:"ssl_ret_write"`
|
||||
SslRetWriteEx *ebpf.Program `ebpf:"ssl_ret_write_ex"`
|
||||
SslWrite *ebpf.Program `ebpf:"ssl_write"`
|
||||
SslWriteEx *ebpf.Program `ebpf:"ssl_write_ex"`
|
||||
SysEnterAccept4 *ebpf.Program `ebpf:"sys_enter_accept4"`
|
||||
SysEnterConnect *ebpf.Program `ebpf:"sys_enter_connect"`
|
||||
SysEnterRead *ebpf.Program `ebpf:"sys_enter_read"`
|
||||
SysEnterWrite *ebpf.Program `ebpf:"sys_enter_write"`
|
||||
SysExitAccept4 *ebpf.Program `ebpf:"sys_exit_accept4"`
|
||||
SysExitConnect *ebpf.Program `ebpf:"sys_exit_connect"`
|
||||
TcpRecvmsg *ebpf.Program `ebpf:"tcp_recvmsg"`
|
||||
TcpSendmsg *ebpf.Program `ebpf:"tcp_sendmsg"`
|
||||
}
|
||||
|
||||
func (p *tlsTapper46Programs) Close() error {
|
||||
return _TlsTapper46Close(
|
||||
p.GoCryptoTlsAbi0Read,
|
||||
p.GoCryptoTlsAbi0ReadEx,
|
||||
p.GoCryptoTlsAbi0Write,
|
||||
p.GoCryptoTlsAbi0WriteEx,
|
||||
p.GoCryptoTlsAbiInternalRead,
|
||||
p.GoCryptoTlsAbiInternalReadEx,
|
||||
p.GoCryptoTlsAbiInternalWrite,
|
||||
p.GoCryptoTlsAbiInternalWriteEx,
|
||||
p.SslRead,
|
||||
p.SslReadEx,
|
||||
p.SslRetRead,
|
||||
p.SslRetReadEx,
|
||||
p.SslRetWrite,
|
||||
p.SslRetWriteEx,
|
||||
p.SslWrite,
|
||||
p.SslWriteEx,
|
||||
p.SysEnterAccept4,
|
||||
p.SysEnterConnect,
|
||||
p.SysEnterRead,
|
||||
p.SysEnterWrite,
|
||||
p.SysExitAccept4,
|
||||
p.SysExitConnect,
|
||||
p.TcpRecvmsg,
|
||||
p.TcpSendmsg,
|
||||
)
|
||||
}
|
||||
|
||||
func _TlsTapper46Close(closers ...io.Closer) error {
|
||||
for _, closer := range closers {
|
||||
if err := closer.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Do not access this directly.
|
||||
//go:embed tlstapper46_bpfel_arm64.o
|
||||
var _TlsTapper46Bytes []byte
|
||||
BIN
tap/tlstapper/tlstapper46_bpfel_arm64.o
Normal file
BIN
tap/tlstapper/tlstapper46_bpfel_arm64.o
Normal file
Binary file not shown.
244
tap/tlstapper/tlstapper46_bpfel_x86.go
Normal file
244
tap/tlstapper/tlstapper46_bpfel_x86.go
Normal file
@@ -0,0 +1,244 @@
|
||||
// Code generated by bpf2go; DO NOT EDIT.
|
||||
//go:build 386 || amd64
|
||||
// +build 386 amd64
|
||||
|
||||
package tlstapper
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
_ "embed"
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"github.com/cilium/ebpf"
|
||||
)
|
||||
|
||||
type tlsTapper46GoidOffsets struct {
|
||||
G_addrOffset uint64
|
||||
GoidOffset uint64
|
||||
}
|
||||
|
||||
type tlsTapper46TlsChunk struct {
|
||||
Pid uint32
|
||||
Tgid uint32
|
||||
Len uint32
|
||||
Start uint32
|
||||
Recorded uint32
|
||||
Fd uint32
|
||||
Flags uint32
|
||||
AddressInfo struct {
|
||||
Mode int32
|
||||
Saddr uint32
|
||||
Daddr uint32
|
||||
Sport uint16
|
||||
Dport uint16
|
||||
}
|
||||
Data [4096]uint8
|
||||
}
|
||||
|
||||
// loadTlsTapper46 returns the embedded CollectionSpec for tlsTapper46.
|
||||
func loadTlsTapper46() (*ebpf.CollectionSpec, error) {
|
||||
reader := bytes.NewReader(_TlsTapper46Bytes)
|
||||
spec, err := ebpf.LoadCollectionSpecFromReader(reader)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("can't load tlsTapper46: %w", err)
|
||||
}
|
||||
|
||||
return spec, err
|
||||
}
|
||||
|
||||
// loadTlsTapper46Objects loads tlsTapper46 and converts it into a struct.
|
||||
//
|
||||
// The following types are suitable as obj argument:
|
||||
//
|
||||
// *tlsTapper46Objects
|
||||
// *tlsTapper46Programs
|
||||
// *tlsTapper46Maps
|
||||
//
|
||||
// See ebpf.CollectionSpec.LoadAndAssign documentation for details.
|
||||
func loadTlsTapper46Objects(obj interface{}, opts *ebpf.CollectionOptions) error {
|
||||
spec, err := loadTlsTapper46()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return spec.LoadAndAssign(obj, opts)
|
||||
}
|
||||
|
||||
// tlsTapper46Specs contains maps and programs before they are loaded into the kernel.
|
||||
//
|
||||
// It can be passed ebpf.CollectionSpec.Assign.
|
||||
type tlsTapper46Specs struct {
|
||||
tlsTapper46ProgramSpecs
|
||||
tlsTapper46MapSpecs
|
||||
}
|
||||
|
||||
// tlsTapper46Specs contains programs before they are loaded into the kernel.
|
||||
//
|
||||
// It can be passed ebpf.CollectionSpec.Assign.
|
||||
type tlsTapper46ProgramSpecs struct {
|
||||
GoCryptoTlsAbi0Read *ebpf.ProgramSpec `ebpf:"go_crypto_tls_abi0_read"`
|
||||
GoCryptoTlsAbi0ReadEx *ebpf.ProgramSpec `ebpf:"go_crypto_tls_abi0_read_ex"`
|
||||
GoCryptoTlsAbi0Write *ebpf.ProgramSpec `ebpf:"go_crypto_tls_abi0_write"`
|
||||
GoCryptoTlsAbi0WriteEx *ebpf.ProgramSpec `ebpf:"go_crypto_tls_abi0_write_ex"`
|
||||
GoCryptoTlsAbiInternalRead *ebpf.ProgramSpec `ebpf:"go_crypto_tls_abi_internal_read"`
|
||||
GoCryptoTlsAbiInternalReadEx *ebpf.ProgramSpec `ebpf:"go_crypto_tls_abi_internal_read_ex"`
|
||||
GoCryptoTlsAbiInternalWrite *ebpf.ProgramSpec `ebpf:"go_crypto_tls_abi_internal_write"`
|
||||
GoCryptoTlsAbiInternalWriteEx *ebpf.ProgramSpec `ebpf:"go_crypto_tls_abi_internal_write_ex"`
|
||||
SslRead *ebpf.ProgramSpec `ebpf:"ssl_read"`
|
||||
SslReadEx *ebpf.ProgramSpec `ebpf:"ssl_read_ex"`
|
||||
SslRetRead *ebpf.ProgramSpec `ebpf:"ssl_ret_read"`
|
||||
SslRetReadEx *ebpf.ProgramSpec `ebpf:"ssl_ret_read_ex"`
|
||||
SslRetWrite *ebpf.ProgramSpec `ebpf:"ssl_ret_write"`
|
||||
SslRetWriteEx *ebpf.ProgramSpec `ebpf:"ssl_ret_write_ex"`
|
||||
SslWrite *ebpf.ProgramSpec `ebpf:"ssl_write"`
|
||||
SslWriteEx *ebpf.ProgramSpec `ebpf:"ssl_write_ex"`
|
||||
SysEnterAccept4 *ebpf.ProgramSpec `ebpf:"sys_enter_accept4"`
|
||||
SysEnterConnect *ebpf.ProgramSpec `ebpf:"sys_enter_connect"`
|
||||
SysEnterRead *ebpf.ProgramSpec `ebpf:"sys_enter_read"`
|
||||
SysEnterWrite *ebpf.ProgramSpec `ebpf:"sys_enter_write"`
|
||||
SysExitAccept4 *ebpf.ProgramSpec `ebpf:"sys_exit_accept4"`
|
||||
SysExitConnect *ebpf.ProgramSpec `ebpf:"sys_exit_connect"`
|
||||
TcpRecvmsg *ebpf.ProgramSpec `ebpf:"tcp_recvmsg"`
|
||||
TcpSendmsg *ebpf.ProgramSpec `ebpf:"tcp_sendmsg"`
|
||||
}
|
||||
|
||||
// tlsTapper46MapSpecs contains maps before they are loaded into the kernel.
|
||||
//
|
||||
// It can be passed ebpf.CollectionSpec.Assign.
|
||||
type tlsTapper46MapSpecs struct {
|
||||
AcceptSyscallContext *ebpf.MapSpec `ebpf:"accept_syscall_context"`
|
||||
ChunksBuffer *ebpf.MapSpec `ebpf:"chunks_buffer"`
|
||||
ConnectSyscallInfo *ebpf.MapSpec `ebpf:"connect_syscall_info"`
|
||||
FileDescriptorToIpv4 *ebpf.MapSpec `ebpf:"file_descriptor_to_ipv4"`
|
||||
GoReadContext *ebpf.MapSpec `ebpf:"go_read_context"`
|
||||
GoWriteContext *ebpf.MapSpec `ebpf:"go_write_context"`
|
||||
GoidOffsetsMap *ebpf.MapSpec `ebpf:"goid_offsets_map"`
|
||||
Heap *ebpf.MapSpec `ebpf:"heap"`
|
||||
LogBuffer *ebpf.MapSpec `ebpf:"log_buffer"`
|
||||
OpensslReadContext *ebpf.MapSpec `ebpf:"openssl_read_context"`
|
||||
OpensslWriteContext *ebpf.MapSpec `ebpf:"openssl_write_context"`
|
||||
PidsMap *ebpf.MapSpec `ebpf:"pids_map"`
|
||||
}
|
||||
|
||||
// tlsTapper46Objects contains all objects after they have been loaded into the kernel.
|
||||
//
|
||||
// It can be passed to loadTlsTapper46Objects or ebpf.CollectionSpec.LoadAndAssign.
|
||||
type tlsTapper46Objects struct {
|
||||
tlsTapper46Programs
|
||||
tlsTapper46Maps
|
||||
}
|
||||
|
||||
func (o *tlsTapper46Objects) Close() error {
|
||||
return _TlsTapper46Close(
|
||||
&o.tlsTapper46Programs,
|
||||
&o.tlsTapper46Maps,
|
||||
)
|
||||
}
|
||||
|
||||
// tlsTapper46Maps contains all maps after they have been loaded into the kernel.
|
||||
//
|
||||
// It can be passed to loadTlsTapper46Objects or ebpf.CollectionSpec.LoadAndAssign.
|
||||
type tlsTapper46Maps struct {
|
||||
AcceptSyscallContext *ebpf.Map `ebpf:"accept_syscall_context"`
|
||||
ChunksBuffer *ebpf.Map `ebpf:"chunks_buffer"`
|
||||
ConnectSyscallInfo *ebpf.Map `ebpf:"connect_syscall_info"`
|
||||
FileDescriptorToIpv4 *ebpf.Map `ebpf:"file_descriptor_to_ipv4"`
|
||||
GoReadContext *ebpf.Map `ebpf:"go_read_context"`
|
||||
GoWriteContext *ebpf.Map `ebpf:"go_write_context"`
|
||||
GoidOffsetsMap *ebpf.Map `ebpf:"goid_offsets_map"`
|
||||
Heap *ebpf.Map `ebpf:"heap"`
|
||||
LogBuffer *ebpf.Map `ebpf:"log_buffer"`
|
||||
OpensslReadContext *ebpf.Map `ebpf:"openssl_read_context"`
|
||||
OpensslWriteContext *ebpf.Map `ebpf:"openssl_write_context"`
|
||||
PidsMap *ebpf.Map `ebpf:"pids_map"`
|
||||
}
|
||||
|
||||
func (m *tlsTapper46Maps) Close() error {
|
||||
return _TlsTapper46Close(
|
||||
m.AcceptSyscallContext,
|
||||
m.ChunksBuffer,
|
||||
m.ConnectSyscallInfo,
|
||||
m.FileDescriptorToIpv4,
|
||||
m.GoReadContext,
|
||||
m.GoWriteContext,
|
||||
m.GoidOffsetsMap,
|
||||
m.Heap,
|
||||
m.LogBuffer,
|
||||
m.OpensslReadContext,
|
||||
m.OpensslWriteContext,
|
||||
m.PidsMap,
|
||||
)
|
||||
}
|
||||
|
||||
// tlsTapper46Programs contains all programs after they have been loaded into the kernel.
|
||||
//
|
||||
// It can be passed to loadTlsTapper46Objects or ebpf.CollectionSpec.LoadAndAssign.
|
||||
type tlsTapper46Programs struct {
|
||||
GoCryptoTlsAbi0Read *ebpf.Program `ebpf:"go_crypto_tls_abi0_read"`
|
||||
GoCryptoTlsAbi0ReadEx *ebpf.Program `ebpf:"go_crypto_tls_abi0_read_ex"`
|
||||
GoCryptoTlsAbi0Write *ebpf.Program `ebpf:"go_crypto_tls_abi0_write"`
|
||||
GoCryptoTlsAbi0WriteEx *ebpf.Program `ebpf:"go_crypto_tls_abi0_write_ex"`
|
||||
GoCryptoTlsAbiInternalRead *ebpf.Program `ebpf:"go_crypto_tls_abi_internal_read"`
|
||||
GoCryptoTlsAbiInternalReadEx *ebpf.Program `ebpf:"go_crypto_tls_abi_internal_read_ex"`
|
||||
GoCryptoTlsAbiInternalWrite *ebpf.Program `ebpf:"go_crypto_tls_abi_internal_write"`
|
||||
GoCryptoTlsAbiInternalWriteEx *ebpf.Program `ebpf:"go_crypto_tls_abi_internal_write_ex"`
|
||||
SslRead *ebpf.Program `ebpf:"ssl_read"`
|
||||
SslReadEx *ebpf.Program `ebpf:"ssl_read_ex"`
|
||||
SslRetRead *ebpf.Program `ebpf:"ssl_ret_read"`
|
||||
SslRetReadEx *ebpf.Program `ebpf:"ssl_ret_read_ex"`
|
||||
SslRetWrite *ebpf.Program `ebpf:"ssl_ret_write"`
|
||||
SslRetWriteEx *ebpf.Program `ebpf:"ssl_ret_write_ex"`
|
||||
SslWrite *ebpf.Program `ebpf:"ssl_write"`
|
||||
SslWriteEx *ebpf.Program `ebpf:"ssl_write_ex"`
|
||||
SysEnterAccept4 *ebpf.Program `ebpf:"sys_enter_accept4"`
|
||||
SysEnterConnect *ebpf.Program `ebpf:"sys_enter_connect"`
|
||||
SysEnterRead *ebpf.Program `ebpf:"sys_enter_read"`
|
||||
SysEnterWrite *ebpf.Program `ebpf:"sys_enter_write"`
|
||||
SysExitAccept4 *ebpf.Program `ebpf:"sys_exit_accept4"`
|
||||
SysExitConnect *ebpf.Program `ebpf:"sys_exit_connect"`
|
||||
TcpRecvmsg *ebpf.Program `ebpf:"tcp_recvmsg"`
|
||||
TcpSendmsg *ebpf.Program `ebpf:"tcp_sendmsg"`
|
||||
}
|
||||
|
||||
func (p *tlsTapper46Programs) Close() error {
|
||||
return _TlsTapper46Close(
|
||||
p.GoCryptoTlsAbi0Read,
|
||||
p.GoCryptoTlsAbi0ReadEx,
|
||||
p.GoCryptoTlsAbi0Write,
|
||||
p.GoCryptoTlsAbi0WriteEx,
|
||||
p.GoCryptoTlsAbiInternalRead,
|
||||
p.GoCryptoTlsAbiInternalReadEx,
|
||||
p.GoCryptoTlsAbiInternalWrite,
|
||||
p.GoCryptoTlsAbiInternalWriteEx,
|
||||
p.SslRead,
|
||||
p.SslReadEx,
|
||||
p.SslRetRead,
|
||||
p.SslRetReadEx,
|
||||
p.SslRetWrite,
|
||||
p.SslRetWriteEx,
|
||||
p.SslWrite,
|
||||
p.SslWriteEx,
|
||||
p.SysEnterAccept4,
|
||||
p.SysEnterConnect,
|
||||
p.SysEnterRead,
|
||||
p.SysEnterWrite,
|
||||
p.SysExitAccept4,
|
||||
p.SysExitConnect,
|
||||
p.TcpRecvmsg,
|
||||
p.TcpSendmsg,
|
||||
)
|
||||
}
|
||||
|
||||
func _TlsTapper46Close(closers ...io.Closer) error {
|
||||
for _, closer := range closers {
|
||||
if err := closer.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Do not access this directly.
|
||||
//go:embed tlstapper46_bpfel_x86.o
|
||||
var _TlsTapper46Bytes []byte
|
||||
BIN
tap/tlstapper/tlstapper46_bpfel_x86.o
Normal file
BIN
tap/tlstapper/tlstapper46_bpfel_x86.o
Normal file
Binary file not shown.
@@ -13,16 +13,27 @@ import (
|
||||
"github.com/cilium/ebpf"
|
||||
)
|
||||
|
||||
type tlsTapperGoidOffsets struct {
|
||||
G_addrOffset uint64
|
||||
GoidOffset uint64
|
||||
}
|
||||
|
||||
type tlsTapperTlsChunk struct {
|
||||
Pid uint32
|
||||
Tgid uint32
|
||||
Len uint32
|
||||
Start uint32
|
||||
Recorded uint32
|
||||
Fd uint32
|
||||
Flags uint32
|
||||
Address [16]uint8
|
||||
Data [4096]uint8
|
||||
Pid uint32
|
||||
Tgid uint32
|
||||
Len uint32
|
||||
Start uint32
|
||||
Recorded uint32
|
||||
Fd uint32
|
||||
Flags uint32
|
||||
AddressInfo struct {
|
||||
Mode int32
|
||||
Saddr uint32
|
||||
Daddr uint32
|
||||
Sport uint16
|
||||
Dport uint16
|
||||
}
|
||||
Data [4096]uint8
|
||||
}
|
||||
|
||||
// loadTlsTapper returns the embedded CollectionSpec for tlsTapper.
|
||||
@@ -66,24 +77,30 @@ type tlsTapperSpecs struct {
|
||||
//
|
||||
// It can be passed ebpf.CollectionSpec.Assign.
|
||||
type tlsTapperProgramSpecs struct {
|
||||
GoCryptoTlsRead *ebpf.ProgramSpec `ebpf:"go_crypto_tls_read"`
|
||||
GoCryptoTlsReadEx *ebpf.ProgramSpec `ebpf:"go_crypto_tls_read_ex"`
|
||||
GoCryptoTlsWrite *ebpf.ProgramSpec `ebpf:"go_crypto_tls_write"`
|
||||
GoCryptoTlsWriteEx *ebpf.ProgramSpec `ebpf:"go_crypto_tls_write_ex"`
|
||||
SslRead *ebpf.ProgramSpec `ebpf:"ssl_read"`
|
||||
SslReadEx *ebpf.ProgramSpec `ebpf:"ssl_read_ex"`
|
||||
SslRetRead *ebpf.ProgramSpec `ebpf:"ssl_ret_read"`
|
||||
SslRetReadEx *ebpf.ProgramSpec `ebpf:"ssl_ret_read_ex"`
|
||||
SslRetWrite *ebpf.ProgramSpec `ebpf:"ssl_ret_write"`
|
||||
SslRetWriteEx *ebpf.ProgramSpec `ebpf:"ssl_ret_write_ex"`
|
||||
SslWrite *ebpf.ProgramSpec `ebpf:"ssl_write"`
|
||||
SslWriteEx *ebpf.ProgramSpec `ebpf:"ssl_write_ex"`
|
||||
SysEnterAccept4 *ebpf.ProgramSpec `ebpf:"sys_enter_accept4"`
|
||||
SysEnterConnect *ebpf.ProgramSpec `ebpf:"sys_enter_connect"`
|
||||
SysEnterRead *ebpf.ProgramSpec `ebpf:"sys_enter_read"`
|
||||
SysEnterWrite *ebpf.ProgramSpec `ebpf:"sys_enter_write"`
|
||||
SysExitAccept4 *ebpf.ProgramSpec `ebpf:"sys_exit_accept4"`
|
||||
SysExitConnect *ebpf.ProgramSpec `ebpf:"sys_exit_connect"`
|
||||
GoCryptoTlsAbi0Read *ebpf.ProgramSpec `ebpf:"go_crypto_tls_abi0_read"`
|
||||
GoCryptoTlsAbi0ReadEx *ebpf.ProgramSpec `ebpf:"go_crypto_tls_abi0_read_ex"`
|
||||
GoCryptoTlsAbi0Write *ebpf.ProgramSpec `ebpf:"go_crypto_tls_abi0_write"`
|
||||
GoCryptoTlsAbi0WriteEx *ebpf.ProgramSpec `ebpf:"go_crypto_tls_abi0_write_ex"`
|
||||
GoCryptoTlsAbiInternalRead *ebpf.ProgramSpec `ebpf:"go_crypto_tls_abi_internal_read"`
|
||||
GoCryptoTlsAbiInternalReadEx *ebpf.ProgramSpec `ebpf:"go_crypto_tls_abi_internal_read_ex"`
|
||||
GoCryptoTlsAbiInternalWrite *ebpf.ProgramSpec `ebpf:"go_crypto_tls_abi_internal_write"`
|
||||
GoCryptoTlsAbiInternalWriteEx *ebpf.ProgramSpec `ebpf:"go_crypto_tls_abi_internal_write_ex"`
|
||||
SslRead *ebpf.ProgramSpec `ebpf:"ssl_read"`
|
||||
SslReadEx *ebpf.ProgramSpec `ebpf:"ssl_read_ex"`
|
||||
SslRetRead *ebpf.ProgramSpec `ebpf:"ssl_ret_read"`
|
||||
SslRetReadEx *ebpf.ProgramSpec `ebpf:"ssl_ret_read_ex"`
|
||||
SslRetWrite *ebpf.ProgramSpec `ebpf:"ssl_ret_write"`
|
||||
SslRetWriteEx *ebpf.ProgramSpec `ebpf:"ssl_ret_write_ex"`
|
||||
SslWrite *ebpf.ProgramSpec `ebpf:"ssl_write"`
|
||||
SslWriteEx *ebpf.ProgramSpec `ebpf:"ssl_write_ex"`
|
||||
SysEnterAccept4 *ebpf.ProgramSpec `ebpf:"sys_enter_accept4"`
|
||||
SysEnterConnect *ebpf.ProgramSpec `ebpf:"sys_enter_connect"`
|
||||
SysEnterRead *ebpf.ProgramSpec `ebpf:"sys_enter_read"`
|
||||
SysEnterWrite *ebpf.ProgramSpec `ebpf:"sys_enter_write"`
|
||||
SysExitAccept4 *ebpf.ProgramSpec `ebpf:"sys_exit_accept4"`
|
||||
SysExitConnect *ebpf.ProgramSpec `ebpf:"sys_exit_connect"`
|
||||
TcpRecvmsg *ebpf.ProgramSpec `ebpf:"tcp_recvmsg"`
|
||||
TcpSendmsg *ebpf.ProgramSpec `ebpf:"tcp_sendmsg"`
|
||||
}
|
||||
|
||||
// tlsTapperMapSpecs contains maps before they are loaded into the kernel.
|
||||
@@ -96,6 +113,7 @@ type tlsTapperMapSpecs struct {
|
||||
FileDescriptorToIpv4 *ebpf.MapSpec `ebpf:"file_descriptor_to_ipv4"`
|
||||
GoReadContext *ebpf.MapSpec `ebpf:"go_read_context"`
|
||||
GoWriteContext *ebpf.MapSpec `ebpf:"go_write_context"`
|
||||
GoidOffsetsMap *ebpf.MapSpec `ebpf:"goid_offsets_map"`
|
||||
Heap *ebpf.MapSpec `ebpf:"heap"`
|
||||
LogBuffer *ebpf.MapSpec `ebpf:"log_buffer"`
|
||||
OpensslReadContext *ebpf.MapSpec `ebpf:"openssl_read_context"`
|
||||
@@ -128,6 +146,7 @@ type tlsTapperMaps struct {
|
||||
FileDescriptorToIpv4 *ebpf.Map `ebpf:"file_descriptor_to_ipv4"`
|
||||
GoReadContext *ebpf.Map `ebpf:"go_read_context"`
|
||||
GoWriteContext *ebpf.Map `ebpf:"go_write_context"`
|
||||
GoidOffsetsMap *ebpf.Map `ebpf:"goid_offsets_map"`
|
||||
Heap *ebpf.Map `ebpf:"heap"`
|
||||
LogBuffer *ebpf.Map `ebpf:"log_buffer"`
|
||||
OpensslReadContext *ebpf.Map `ebpf:"openssl_read_context"`
|
||||
@@ -143,6 +162,7 @@ func (m *tlsTapperMaps) Close() error {
|
||||
m.FileDescriptorToIpv4,
|
||||
m.GoReadContext,
|
||||
m.GoWriteContext,
|
||||
m.GoidOffsetsMap,
|
||||
m.Heap,
|
||||
m.LogBuffer,
|
||||
m.OpensslReadContext,
|
||||
@@ -155,32 +175,42 @@ func (m *tlsTapperMaps) Close() error {
|
||||
//
|
||||
// It can be passed to loadTlsTapperObjects or ebpf.CollectionSpec.LoadAndAssign.
|
||||
type tlsTapperPrograms struct {
|
||||
GoCryptoTlsRead *ebpf.Program `ebpf:"go_crypto_tls_read"`
|
||||
GoCryptoTlsReadEx *ebpf.Program `ebpf:"go_crypto_tls_read_ex"`
|
||||
GoCryptoTlsWrite *ebpf.Program `ebpf:"go_crypto_tls_write"`
|
||||
GoCryptoTlsWriteEx *ebpf.Program `ebpf:"go_crypto_tls_write_ex"`
|
||||
SslRead *ebpf.Program `ebpf:"ssl_read"`
|
||||
SslReadEx *ebpf.Program `ebpf:"ssl_read_ex"`
|
||||
SslRetRead *ebpf.Program `ebpf:"ssl_ret_read"`
|
||||
SslRetReadEx *ebpf.Program `ebpf:"ssl_ret_read_ex"`
|
||||
SslRetWrite *ebpf.Program `ebpf:"ssl_ret_write"`
|
||||
SslRetWriteEx *ebpf.Program `ebpf:"ssl_ret_write_ex"`
|
||||
SslWrite *ebpf.Program `ebpf:"ssl_write"`
|
||||
SslWriteEx *ebpf.Program `ebpf:"ssl_write_ex"`
|
||||
SysEnterAccept4 *ebpf.Program `ebpf:"sys_enter_accept4"`
|
||||
SysEnterConnect *ebpf.Program `ebpf:"sys_enter_connect"`
|
||||
SysEnterRead *ebpf.Program `ebpf:"sys_enter_read"`
|
||||
SysEnterWrite *ebpf.Program `ebpf:"sys_enter_write"`
|
||||
SysExitAccept4 *ebpf.Program `ebpf:"sys_exit_accept4"`
|
||||
SysExitConnect *ebpf.Program `ebpf:"sys_exit_connect"`
|
||||
GoCryptoTlsAbi0Read *ebpf.Program `ebpf:"go_crypto_tls_abi0_read"`
|
||||
GoCryptoTlsAbi0ReadEx *ebpf.Program `ebpf:"go_crypto_tls_abi0_read_ex"`
|
||||
GoCryptoTlsAbi0Write *ebpf.Program `ebpf:"go_crypto_tls_abi0_write"`
|
||||
GoCryptoTlsAbi0WriteEx *ebpf.Program `ebpf:"go_crypto_tls_abi0_write_ex"`
|
||||
GoCryptoTlsAbiInternalRead *ebpf.Program `ebpf:"go_crypto_tls_abi_internal_read"`
|
||||
GoCryptoTlsAbiInternalReadEx *ebpf.Program `ebpf:"go_crypto_tls_abi_internal_read_ex"`
|
||||
GoCryptoTlsAbiInternalWrite *ebpf.Program `ebpf:"go_crypto_tls_abi_internal_write"`
|
||||
GoCryptoTlsAbiInternalWriteEx *ebpf.Program `ebpf:"go_crypto_tls_abi_internal_write_ex"`
|
||||
SslRead *ebpf.Program `ebpf:"ssl_read"`
|
||||
SslReadEx *ebpf.Program `ebpf:"ssl_read_ex"`
|
||||
SslRetRead *ebpf.Program `ebpf:"ssl_ret_read"`
|
||||
SslRetReadEx *ebpf.Program `ebpf:"ssl_ret_read_ex"`
|
||||
SslRetWrite *ebpf.Program `ebpf:"ssl_ret_write"`
|
||||
SslRetWriteEx *ebpf.Program `ebpf:"ssl_ret_write_ex"`
|
||||
SslWrite *ebpf.Program `ebpf:"ssl_write"`
|
||||
SslWriteEx *ebpf.Program `ebpf:"ssl_write_ex"`
|
||||
SysEnterAccept4 *ebpf.Program `ebpf:"sys_enter_accept4"`
|
||||
SysEnterConnect *ebpf.Program `ebpf:"sys_enter_connect"`
|
||||
SysEnterRead *ebpf.Program `ebpf:"sys_enter_read"`
|
||||
SysEnterWrite *ebpf.Program `ebpf:"sys_enter_write"`
|
||||
SysExitAccept4 *ebpf.Program `ebpf:"sys_exit_accept4"`
|
||||
SysExitConnect *ebpf.Program `ebpf:"sys_exit_connect"`
|
||||
TcpRecvmsg *ebpf.Program `ebpf:"tcp_recvmsg"`
|
||||
TcpSendmsg *ebpf.Program `ebpf:"tcp_sendmsg"`
|
||||
}
|
||||
|
||||
func (p *tlsTapperPrograms) Close() error {
|
||||
return _TlsTapperClose(
|
||||
p.GoCryptoTlsRead,
|
||||
p.GoCryptoTlsReadEx,
|
||||
p.GoCryptoTlsWrite,
|
||||
p.GoCryptoTlsWriteEx,
|
||||
p.GoCryptoTlsAbi0Read,
|
||||
p.GoCryptoTlsAbi0ReadEx,
|
||||
p.GoCryptoTlsAbi0Write,
|
||||
p.GoCryptoTlsAbi0WriteEx,
|
||||
p.GoCryptoTlsAbiInternalRead,
|
||||
p.GoCryptoTlsAbiInternalReadEx,
|
||||
p.GoCryptoTlsAbiInternalWrite,
|
||||
p.GoCryptoTlsAbiInternalWriteEx,
|
||||
p.SslRead,
|
||||
p.SslReadEx,
|
||||
p.SslRetRead,
|
||||
@@ -195,6 +225,8 @@ func (p *tlsTapperPrograms) Close() error {
|
||||
p.SysEnterWrite,
|
||||
p.SysExitAccept4,
|
||||
p.SysExitConnect,
|
||||
p.TcpRecvmsg,
|
||||
p.TcpSendmsg,
|
||||
)
|
||||
}
|
||||
|
||||
|
||||
Binary file not shown.
@@ -13,16 +13,27 @@ import (
|
||||
"github.com/cilium/ebpf"
|
||||
)
|
||||
|
||||
type tlsTapperGoidOffsets struct {
|
||||
G_addrOffset uint64
|
||||
GoidOffset uint64
|
||||
}
|
||||
|
||||
type tlsTapperTlsChunk struct {
|
||||
Pid uint32
|
||||
Tgid uint32
|
||||
Len uint32
|
||||
Start uint32
|
||||
Recorded uint32
|
||||
Fd uint32
|
||||
Flags uint32
|
||||
Address [16]uint8
|
||||
Data [4096]uint8
|
||||
Pid uint32
|
||||
Tgid uint32
|
||||
Len uint32
|
||||
Start uint32
|
||||
Recorded uint32
|
||||
Fd uint32
|
||||
Flags uint32
|
||||
AddressInfo struct {
|
||||
Mode int32
|
||||
Saddr uint32
|
||||
Daddr uint32
|
||||
Sport uint16
|
||||
Dport uint16
|
||||
}
|
||||
Data [4096]uint8
|
||||
}
|
||||
|
||||
// loadTlsTapper returns the embedded CollectionSpec for tlsTapper.
|
||||
@@ -66,24 +77,30 @@ type tlsTapperSpecs struct {
|
||||
//
|
||||
// It can be passed ebpf.CollectionSpec.Assign.
|
||||
type tlsTapperProgramSpecs struct {
|
||||
GoCryptoTlsRead *ebpf.ProgramSpec `ebpf:"go_crypto_tls_read"`
|
||||
GoCryptoTlsReadEx *ebpf.ProgramSpec `ebpf:"go_crypto_tls_read_ex"`
|
||||
GoCryptoTlsWrite *ebpf.ProgramSpec `ebpf:"go_crypto_tls_write"`
|
||||
GoCryptoTlsWriteEx *ebpf.ProgramSpec `ebpf:"go_crypto_tls_write_ex"`
|
||||
SslRead *ebpf.ProgramSpec `ebpf:"ssl_read"`
|
||||
SslReadEx *ebpf.ProgramSpec `ebpf:"ssl_read_ex"`
|
||||
SslRetRead *ebpf.ProgramSpec `ebpf:"ssl_ret_read"`
|
||||
SslRetReadEx *ebpf.ProgramSpec `ebpf:"ssl_ret_read_ex"`
|
||||
SslRetWrite *ebpf.ProgramSpec `ebpf:"ssl_ret_write"`
|
||||
SslRetWriteEx *ebpf.ProgramSpec `ebpf:"ssl_ret_write_ex"`
|
||||
SslWrite *ebpf.ProgramSpec `ebpf:"ssl_write"`
|
||||
SslWriteEx *ebpf.ProgramSpec `ebpf:"ssl_write_ex"`
|
||||
SysEnterAccept4 *ebpf.ProgramSpec `ebpf:"sys_enter_accept4"`
|
||||
SysEnterConnect *ebpf.ProgramSpec `ebpf:"sys_enter_connect"`
|
||||
SysEnterRead *ebpf.ProgramSpec `ebpf:"sys_enter_read"`
|
||||
SysEnterWrite *ebpf.ProgramSpec `ebpf:"sys_enter_write"`
|
||||
SysExitAccept4 *ebpf.ProgramSpec `ebpf:"sys_exit_accept4"`
|
||||
SysExitConnect *ebpf.ProgramSpec `ebpf:"sys_exit_connect"`
|
||||
GoCryptoTlsAbi0Read *ebpf.ProgramSpec `ebpf:"go_crypto_tls_abi0_read"`
|
||||
GoCryptoTlsAbi0ReadEx *ebpf.ProgramSpec `ebpf:"go_crypto_tls_abi0_read_ex"`
|
||||
GoCryptoTlsAbi0Write *ebpf.ProgramSpec `ebpf:"go_crypto_tls_abi0_write"`
|
||||
GoCryptoTlsAbi0WriteEx *ebpf.ProgramSpec `ebpf:"go_crypto_tls_abi0_write_ex"`
|
||||
GoCryptoTlsAbiInternalRead *ebpf.ProgramSpec `ebpf:"go_crypto_tls_abi_internal_read"`
|
||||
GoCryptoTlsAbiInternalReadEx *ebpf.ProgramSpec `ebpf:"go_crypto_tls_abi_internal_read_ex"`
|
||||
GoCryptoTlsAbiInternalWrite *ebpf.ProgramSpec `ebpf:"go_crypto_tls_abi_internal_write"`
|
||||
GoCryptoTlsAbiInternalWriteEx *ebpf.ProgramSpec `ebpf:"go_crypto_tls_abi_internal_write_ex"`
|
||||
SslRead *ebpf.ProgramSpec `ebpf:"ssl_read"`
|
||||
SslReadEx *ebpf.ProgramSpec `ebpf:"ssl_read_ex"`
|
||||
SslRetRead *ebpf.ProgramSpec `ebpf:"ssl_ret_read"`
|
||||
SslRetReadEx *ebpf.ProgramSpec `ebpf:"ssl_ret_read_ex"`
|
||||
SslRetWrite *ebpf.ProgramSpec `ebpf:"ssl_ret_write"`
|
||||
SslRetWriteEx *ebpf.ProgramSpec `ebpf:"ssl_ret_write_ex"`
|
||||
SslWrite *ebpf.ProgramSpec `ebpf:"ssl_write"`
|
||||
SslWriteEx *ebpf.ProgramSpec `ebpf:"ssl_write_ex"`
|
||||
SysEnterAccept4 *ebpf.ProgramSpec `ebpf:"sys_enter_accept4"`
|
||||
SysEnterConnect *ebpf.ProgramSpec `ebpf:"sys_enter_connect"`
|
||||
SysEnterRead *ebpf.ProgramSpec `ebpf:"sys_enter_read"`
|
||||
SysEnterWrite *ebpf.ProgramSpec `ebpf:"sys_enter_write"`
|
||||
SysExitAccept4 *ebpf.ProgramSpec `ebpf:"sys_exit_accept4"`
|
||||
SysExitConnect *ebpf.ProgramSpec `ebpf:"sys_exit_connect"`
|
||||
TcpRecvmsg *ebpf.ProgramSpec `ebpf:"tcp_recvmsg"`
|
||||
TcpSendmsg *ebpf.ProgramSpec `ebpf:"tcp_sendmsg"`
|
||||
}
|
||||
|
||||
// tlsTapperMapSpecs contains maps before they are loaded into the kernel.
|
||||
@@ -96,6 +113,7 @@ type tlsTapperMapSpecs struct {
|
||||
FileDescriptorToIpv4 *ebpf.MapSpec `ebpf:"file_descriptor_to_ipv4"`
|
||||
GoReadContext *ebpf.MapSpec `ebpf:"go_read_context"`
|
||||
GoWriteContext *ebpf.MapSpec `ebpf:"go_write_context"`
|
||||
GoidOffsetsMap *ebpf.MapSpec `ebpf:"goid_offsets_map"`
|
||||
Heap *ebpf.MapSpec `ebpf:"heap"`
|
||||
LogBuffer *ebpf.MapSpec `ebpf:"log_buffer"`
|
||||
OpensslReadContext *ebpf.MapSpec `ebpf:"openssl_read_context"`
|
||||
@@ -128,6 +146,7 @@ type tlsTapperMaps struct {
|
||||
FileDescriptorToIpv4 *ebpf.Map `ebpf:"file_descriptor_to_ipv4"`
|
||||
GoReadContext *ebpf.Map `ebpf:"go_read_context"`
|
||||
GoWriteContext *ebpf.Map `ebpf:"go_write_context"`
|
||||
GoidOffsetsMap *ebpf.Map `ebpf:"goid_offsets_map"`
|
||||
Heap *ebpf.Map `ebpf:"heap"`
|
||||
LogBuffer *ebpf.Map `ebpf:"log_buffer"`
|
||||
OpensslReadContext *ebpf.Map `ebpf:"openssl_read_context"`
|
||||
@@ -143,6 +162,7 @@ func (m *tlsTapperMaps) Close() error {
|
||||
m.FileDescriptorToIpv4,
|
||||
m.GoReadContext,
|
||||
m.GoWriteContext,
|
||||
m.GoidOffsetsMap,
|
||||
m.Heap,
|
||||
m.LogBuffer,
|
||||
m.OpensslReadContext,
|
||||
@@ -155,32 +175,42 @@ func (m *tlsTapperMaps) Close() error {
|
||||
//
|
||||
// It can be passed to loadTlsTapperObjects or ebpf.CollectionSpec.LoadAndAssign.
|
||||
type tlsTapperPrograms struct {
|
||||
GoCryptoTlsRead *ebpf.Program `ebpf:"go_crypto_tls_read"`
|
||||
GoCryptoTlsReadEx *ebpf.Program `ebpf:"go_crypto_tls_read_ex"`
|
||||
GoCryptoTlsWrite *ebpf.Program `ebpf:"go_crypto_tls_write"`
|
||||
GoCryptoTlsWriteEx *ebpf.Program `ebpf:"go_crypto_tls_write_ex"`
|
||||
SslRead *ebpf.Program `ebpf:"ssl_read"`
|
||||
SslReadEx *ebpf.Program `ebpf:"ssl_read_ex"`
|
||||
SslRetRead *ebpf.Program `ebpf:"ssl_ret_read"`
|
||||
SslRetReadEx *ebpf.Program `ebpf:"ssl_ret_read_ex"`
|
||||
SslRetWrite *ebpf.Program `ebpf:"ssl_ret_write"`
|
||||
SslRetWriteEx *ebpf.Program `ebpf:"ssl_ret_write_ex"`
|
||||
SslWrite *ebpf.Program `ebpf:"ssl_write"`
|
||||
SslWriteEx *ebpf.Program `ebpf:"ssl_write_ex"`
|
||||
SysEnterAccept4 *ebpf.Program `ebpf:"sys_enter_accept4"`
|
||||
SysEnterConnect *ebpf.Program `ebpf:"sys_enter_connect"`
|
||||
SysEnterRead *ebpf.Program `ebpf:"sys_enter_read"`
|
||||
SysEnterWrite *ebpf.Program `ebpf:"sys_enter_write"`
|
||||
SysExitAccept4 *ebpf.Program `ebpf:"sys_exit_accept4"`
|
||||
SysExitConnect *ebpf.Program `ebpf:"sys_exit_connect"`
|
||||
GoCryptoTlsAbi0Read *ebpf.Program `ebpf:"go_crypto_tls_abi0_read"`
|
||||
GoCryptoTlsAbi0ReadEx *ebpf.Program `ebpf:"go_crypto_tls_abi0_read_ex"`
|
||||
GoCryptoTlsAbi0Write *ebpf.Program `ebpf:"go_crypto_tls_abi0_write"`
|
||||
GoCryptoTlsAbi0WriteEx *ebpf.Program `ebpf:"go_crypto_tls_abi0_write_ex"`
|
||||
GoCryptoTlsAbiInternalRead *ebpf.Program `ebpf:"go_crypto_tls_abi_internal_read"`
|
||||
GoCryptoTlsAbiInternalReadEx *ebpf.Program `ebpf:"go_crypto_tls_abi_internal_read_ex"`
|
||||
GoCryptoTlsAbiInternalWrite *ebpf.Program `ebpf:"go_crypto_tls_abi_internal_write"`
|
||||
GoCryptoTlsAbiInternalWriteEx *ebpf.Program `ebpf:"go_crypto_tls_abi_internal_write_ex"`
|
||||
SslRead *ebpf.Program `ebpf:"ssl_read"`
|
||||
SslReadEx *ebpf.Program `ebpf:"ssl_read_ex"`
|
||||
SslRetRead *ebpf.Program `ebpf:"ssl_ret_read"`
|
||||
SslRetReadEx *ebpf.Program `ebpf:"ssl_ret_read_ex"`
|
||||
SslRetWrite *ebpf.Program `ebpf:"ssl_ret_write"`
|
||||
SslRetWriteEx *ebpf.Program `ebpf:"ssl_ret_write_ex"`
|
||||
SslWrite *ebpf.Program `ebpf:"ssl_write"`
|
||||
SslWriteEx *ebpf.Program `ebpf:"ssl_write_ex"`
|
||||
SysEnterAccept4 *ebpf.Program `ebpf:"sys_enter_accept4"`
|
||||
SysEnterConnect *ebpf.Program `ebpf:"sys_enter_connect"`
|
||||
SysEnterRead *ebpf.Program `ebpf:"sys_enter_read"`
|
||||
SysEnterWrite *ebpf.Program `ebpf:"sys_enter_write"`
|
||||
SysExitAccept4 *ebpf.Program `ebpf:"sys_exit_accept4"`
|
||||
SysExitConnect *ebpf.Program `ebpf:"sys_exit_connect"`
|
||||
TcpRecvmsg *ebpf.Program `ebpf:"tcp_recvmsg"`
|
||||
TcpSendmsg *ebpf.Program `ebpf:"tcp_sendmsg"`
|
||||
}
|
||||
|
||||
func (p *tlsTapperPrograms) Close() error {
|
||||
return _TlsTapperClose(
|
||||
p.GoCryptoTlsRead,
|
||||
p.GoCryptoTlsReadEx,
|
||||
p.GoCryptoTlsWrite,
|
||||
p.GoCryptoTlsWriteEx,
|
||||
p.GoCryptoTlsAbi0Read,
|
||||
p.GoCryptoTlsAbi0ReadEx,
|
||||
p.GoCryptoTlsAbi0Write,
|
||||
p.GoCryptoTlsAbi0WriteEx,
|
||||
p.GoCryptoTlsAbiInternalRead,
|
||||
p.GoCryptoTlsAbiInternalReadEx,
|
||||
p.GoCryptoTlsAbiInternalWrite,
|
||||
p.GoCryptoTlsAbiInternalWriteEx,
|
||||
p.SslRead,
|
||||
p.SslReadEx,
|
||||
p.SslRetRead,
|
||||
@@ -195,6 +225,8 @@ func (p *tlsTapperPrograms) Close() error {
|
||||
p.SysEnterWrite,
|
||||
p.SysExitAccept4,
|
||||
p.SysExitConnect,
|
||||
p.TcpRecvmsg,
|
||||
p.TcpSendmsg,
|
||||
)
|
||||
}
|
||||
|
||||
|
||||
Binary file not shown.
@@ -42,6 +42,7 @@
|
||||
"@mui/styles": "^5.8.0",
|
||||
"@types/lodash": "^4.14.182",
|
||||
"@uiw/react-textarea-code-editor": "^1.6.0",
|
||||
"ace-builds": "^1.6.0",
|
||||
"axios": "^0.27.2",
|
||||
"core-js": "^3.22.7",
|
||||
"highlight.js": "^11.5.1",
|
||||
@@ -54,6 +55,7 @@
|
||||
"node-fetch": "^3.2.4",
|
||||
"numeral": "^2.0.6",
|
||||
"protobuf-decoder": "^0.1.2",
|
||||
"react-ace": "^9.0.0",
|
||||
"react-graph-vis": "^1.0.7",
|
||||
"react-lowlight": "^3.0.0",
|
||||
"react-router-dom": "^6.3.0",
|
||||
@@ -63,6 +65,7 @@
|
||||
"recharts": "^2.1.10",
|
||||
"redoc": "^2.0.0-rc.71",
|
||||
"styled-components": "^5.3.5",
|
||||
"use-file-picker": "^1.4.2",
|
||||
"web-vitals": "^2.1.4",
|
||||
"xml-formatter": "^2.6.1"
|
||||
},
|
||||
|
||||
@@ -17,6 +17,6 @@
|
||||
width: 100%;
|
||||
width: -moz-available;
|
||||
width: -webkit-fill-available;
|
||||
width: strech;
|
||||
width: stretch;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,20 +1,20 @@
|
||||
import React, {useCallback, useEffect, useMemo, useState} from "react";
|
||||
import React, { useCallback, useEffect, useMemo, useState } from "react";
|
||||
import styles from './EntriesList.module.sass';
|
||||
import ScrollableFeedVirtualized from "react-scrollable-feed-virtualized";
|
||||
import Moment from 'moment';
|
||||
import {EntryItem} from "../EntryListItem/EntryListItem";
|
||||
import { EntryItem } from "../EntryListItem/EntryListItem";
|
||||
import down from "assets/downImg.svg";
|
||||
import spinner from 'assets/spinner.svg';
|
||||
import {RecoilState, useRecoilState, useRecoilValue, useSetRecoilState} from "recoil";
|
||||
import { RecoilState, useRecoilState, useRecoilValue, useSetRecoilState } from "recoil";
|
||||
import entriesAtom from "../../recoil/entries";
|
||||
import queryAtom from "../../recoil/query";
|
||||
import TrafficViewerApiAtom from "../../recoil/TrafficViewerApi";
|
||||
import TrafficViewerApi from "../TrafficViewer/TrafficViewerApi";
|
||||
import focusedEntryIdAtom from "../../recoil/focusedEntryId";
|
||||
import {toast} from "react-toastify";
|
||||
import {MAX_ENTRIES, TOAST_CONTAINER_ID} from "../../configs/Consts";
|
||||
import { toast } from "react-toastify";
|
||||
import { MAX_ENTRIES, TOAST_CONTAINER_ID } from "../../configs/Consts";
|
||||
import tappingStatusAtom from "../../recoil/tappingStatus";
|
||||
import leftOffTopAtom from "../../recoil/leftOffTop";
|
||||
import Moment from "moment";
|
||||
|
||||
interface EntriesListProps {
|
||||
listEntryREF: any;
|
||||
|
||||
@@ -5,14 +5,15 @@ import makeStyles from '@mui/styles/makeStyles';
|
||||
import Protocol from "../UI/Protocol/Protocol"
|
||||
import Queryable from "../UI/Queryable/Queryable";
|
||||
import { toast } from "react-toastify";
|
||||
import { RecoilState, useRecoilValue } from "recoil";
|
||||
import { RecoilState, useRecoilState, useRecoilValue } from "recoil";
|
||||
import focusedEntryIdAtom from "../../recoil/focusedEntryId";
|
||||
import TrafficViewerApi from "../TrafficViewer/TrafficViewerApi";
|
||||
import TrafficViewerApiAtom from "../../recoil/TrafficViewerApi/atom";
|
||||
import queryAtom from "../../recoil/query/atom";
|
||||
import useWindowDimensions, { useRequestTextByWidth } from "../../hooks/WindowDimensionsHook";
|
||||
import { TOAST_CONTAINER_ID } from "../../configs/Consts";
|
||||
import spinner from "assets/spinner.svg";
|
||||
import entryDataAtom from "../../recoil/entryData";
|
||||
import { LoadingWrapper } from "../UI/withLoading/withLoading";
|
||||
|
||||
const useStyles = makeStyles(() => ({
|
||||
entryTitle: {
|
||||
@@ -107,7 +108,7 @@ export const EntryDetailed = () => {
|
||||
const trafficViewerApi = useRecoilValue(TrafficViewerApiAtom as RecoilState<TrafficViewerApi>)
|
||||
const query = useRecoilValue(queryAtom);
|
||||
const [isLoading, setIsLoading] = useState(false);
|
||||
const [entryData, setEntryData] = useState(null);
|
||||
const [entryData, setEntryData] = useRecoilState(entryDataAtom)
|
||||
|
||||
useEffect(() => {
|
||||
setEntryData(null);
|
||||
@@ -134,22 +135,11 @@ export const EntryDetailed = () => {
|
||||
// eslint-disable-next-line
|
||||
}, [focusedEntryId]);
|
||||
|
||||
return <React.Fragment>
|
||||
{isLoading && <div style={{textAlign: "center", width: "100%", marginTop: 50}}><img alt="spinner" src={spinner} style={{height: 60}}/></div>}
|
||||
{!isLoading && entryData && <EntryTitle
|
||||
protocol={entryData.protocol}
|
||||
data={entryData.data}
|
||||
elapsedTime={entryData.data.elapsedTime}
|
||||
/>}
|
||||
{!isLoading && entryData && <EntrySummary entry={entryData.base} namespace={entryData.data.namespace} />}
|
||||
<React.Fragment>
|
||||
{!isLoading && entryData && <EntryViewer
|
||||
representation={entryData.representation}
|
||||
isRulesEnabled={entryData.isRulesEnabled}
|
||||
rulesMatched={entryData.rulesMatched}
|
||||
elapsedTime={entryData.data.elapsedTime}
|
||||
color={entryData.protocol.backgroundColor}
|
||||
/>}
|
||||
</React.Fragment>
|
||||
</React.Fragment>
|
||||
return <LoadingWrapper isLoading={isLoading} loaderMargin={50} loaderHeight={60}>
|
||||
{entryData && <React.Fragment>
|
||||
<EntryTitle protocol={entryData.protocol} data={entryData.data} elapsedTime={entryData.data.elapsedTime} />
|
||||
<EntrySummary entry={entryData.base} namespace={entryData.data.namespace} />
|
||||
<EntryViewer representation={entryData.representation} color={entryData.protocol.backgroundColor} />
|
||||
</React.Fragment>}
|
||||
</LoadingWrapper>
|
||||
};
|
||||
|
||||
@@ -117,6 +117,52 @@ interface EntryBodySectionProps {
|
||||
selector?: string,
|
||||
}
|
||||
|
||||
export const formatRequest = (bodyRef: any, contentType: string, decodeBase64: boolean = true, isBase64Encoding: boolean = false, isPretty: boolean = true): string => {
|
||||
const { body } = bodyRef
|
||||
if (!decodeBase64 || !body) return body;
|
||||
|
||||
const chunk = body.slice(0, MAXIMUM_BYTES_TO_FORMAT);
|
||||
const bodyBuf = isBase64Encoding ? atob(chunk) : chunk;
|
||||
|
||||
try {
|
||||
if (jsonLikeFormats.some(format => contentType?.indexOf(format) > -1)) {
|
||||
if (!isPretty) return bodyBuf;
|
||||
return Utils.isJson(bodyBuf) ? jsonBeautify(JSON.parse(bodyBuf), null, 2, 80) : bodyBuf
|
||||
} else if (xmlLikeFormats.some(format => contentType?.indexOf(format) > -1)) {
|
||||
if (!isPretty) return bodyBuf;
|
||||
return xmlBeautify(bodyBuf, {
|
||||
indentation: ' ',
|
||||
filter: (node) => node.type !== 'Comment',
|
||||
collapseContent: true,
|
||||
lineSeparator: '\n'
|
||||
});
|
||||
} else if (protobufFormats.some(format => contentType?.indexOf(format) > -1)) {
|
||||
// Replace all non printable characters (ASCII)
|
||||
const protobufDecoder = new ProtobufDecoder(bodyBuf, true);
|
||||
const protobufDecoded = protobufDecoder.decode().toSimple();
|
||||
if (!isPretty) return JSON.stringify(protobufDecoded);
|
||||
return jsonBeautify(protobufDecoded, null, 2, 80);
|
||||
}
|
||||
} catch (error) {
|
||||
console.error(error)
|
||||
bodyRef.body = bodyBuf
|
||||
throw error
|
||||
}
|
||||
|
||||
return bodyBuf;
|
||||
}
|
||||
|
||||
export const formatRequestWithOutError = (body: any, contentType: string, decodeBase64: boolean = true, isBase64Encoding: boolean = false, isPretty: boolean = true): string => {
|
||||
const bodyRef = { body }
|
||||
try {
|
||||
return formatRequest(bodyRef, contentType, decodeBase64, isBase64Encoding, isPretty)
|
||||
} catch (error) {
|
||||
console.warn(error)
|
||||
}
|
||||
|
||||
return bodyRef.body
|
||||
}
|
||||
|
||||
export const EntryBodySection: React.FC<EntryBodySectionProps> = ({
|
||||
title,
|
||||
color,
|
||||
@@ -139,42 +185,20 @@ export const EntryBodySection: React.FC<EntryBodySectionProps> = ({
|
||||
!isLineNumbersGreaterThenOne && setShowLineNumbers(false);
|
||||
}, [isLineNumbersGreaterThenOne, isPretty])
|
||||
|
||||
const formatTextBody = useCallback((body: any): string => {
|
||||
if (!decodeBase64) return body;
|
||||
|
||||
const chunk = body.slice(0, MAXIMUM_BYTES_TO_FORMAT);
|
||||
const bodyBuf = isBase64Encoding ? atob(chunk) : chunk;
|
||||
|
||||
const formatTextBody = useCallback((body) => {
|
||||
const bodyRef = { body }
|
||||
try {
|
||||
if (jsonLikeFormats.some(format => contentType?.indexOf(format) > -1)) {
|
||||
if (!isPretty) return bodyBuf;
|
||||
return jsonBeautify(JSON.parse(bodyBuf), null, 2, 80);
|
||||
} else if (xmlLikeFormats.some(format => contentType?.indexOf(format) > -1)) {
|
||||
if (!isPretty) return bodyBuf;
|
||||
return xmlBeautify(bodyBuf, {
|
||||
indentation: ' ',
|
||||
filter: (node) => node.type !== 'Comment',
|
||||
collapseContent: true,
|
||||
lineSeparator: '\n'
|
||||
});
|
||||
} else if (protobufFormats.some(format => contentType?.indexOf(format) > -1)) {
|
||||
// Replace all non printable characters (ASCII)
|
||||
const protobufDecoder = new ProtobufDecoder(bodyBuf, true);
|
||||
const protobufDecoded = protobufDecoder.decode().toSimple();
|
||||
if (!isPretty) return JSON.stringify(protobufDecoded);
|
||||
return jsonBeautify(protobufDecoded, null, 2, 80);
|
||||
}
|
||||
return formatRequest(bodyRef, contentType, decodeBase64, isBase64Encoding, isPretty)
|
||||
} catch (error) {
|
||||
if (String(error).includes("More than one message in")) {
|
||||
if (isDecodeGrpc)
|
||||
setIsDecodeGrpc(false);
|
||||
} else if (String(error).includes("Failed to parse")) {
|
||||
console.warn(error);
|
||||
} else {
|
||||
console.error(error);
|
||||
}
|
||||
}
|
||||
return bodyBuf;
|
||||
|
||||
return bodyRef.body
|
||||
}, [isPretty, contentType, isDecodeGrpc, decodeBase64, isBase64Encoding])
|
||||
|
||||
const formattedText = useMemo(() => formatTextBody(content), [formatTextBody, content]);
|
||||
@@ -257,110 +281,3 @@ export const EntryTableSection: React.FC<EntrySectionProps> = ({ title, color, a
|
||||
}
|
||||
</React.Fragment>
|
||||
}
|
||||
|
||||
interface EntryPolicySectionProps {
|
||||
title: string,
|
||||
color: string,
|
||||
latency?: number,
|
||||
arrayToIterate: any[],
|
||||
}
|
||||
|
||||
interface EntryPolicySectionCollapsibleTitleProps {
|
||||
label: string;
|
||||
matched: string;
|
||||
expanded: boolean;
|
||||
setExpanded: any;
|
||||
}
|
||||
|
||||
const EntryPolicySectionCollapsibleTitle: React.FC<EntryPolicySectionCollapsibleTitleProps> = ({ label, matched, expanded, setExpanded }) => {
|
||||
return <div className={styles.title}>
|
||||
<span
|
||||
className={`${styles.button}
|
||||
${expanded ? styles.expanded : ''}`}
|
||||
onClick={() => {
|
||||
setExpanded(!expanded)
|
||||
}}
|
||||
>
|
||||
{expanded ? '-' : '+'}
|
||||
</span>
|
||||
<span>
|
||||
<tr className={styles.dataLine}>
|
||||
<td className={`${styles.dataKey} ${styles.rulesTitleSuccess}`}>{label}</td>
|
||||
<td className={`${styles.dataKey} ${matched === 'Success' ? styles.rulesMatchedSuccess : styles.rulesMatchedFailure}`}>{matched}</td>
|
||||
</tr>
|
||||
</span>
|
||||
</div>
|
||||
}
|
||||
|
||||
interface EntryPolicySectionContainerProps {
|
||||
label: string;
|
||||
matched: string;
|
||||
children?: any;
|
||||
}
|
||||
|
||||
export const EntryPolicySectionContainer: React.FC<EntryPolicySectionContainerProps> = ({ label, matched, children }) => {
|
||||
const [expanded, setExpanded] = useState(false);
|
||||
return <CollapsibleContainer
|
||||
className={styles.collapsibleContainer}
|
||||
expanded={expanded}
|
||||
title={<EntryPolicySectionCollapsibleTitle label={label} matched={matched} expanded={expanded} setExpanded={setExpanded} />}
|
||||
>
|
||||
{children}
|
||||
</CollapsibleContainer>
|
||||
}
|
||||
|
||||
export const EntryTablePolicySection: React.FC<EntryPolicySectionProps> = ({ title, color, latency, arrayToIterate }) => {
|
||||
return <React.Fragment>
|
||||
{
|
||||
arrayToIterate && arrayToIterate.length > 0 ?
|
||||
<React.Fragment>
|
||||
<EntrySectionContainer title={title} color={color}>
|
||||
<table>
|
||||
<tbody>
|
||||
{arrayToIterate.map(({ rule, matched }, index) => {
|
||||
return (
|
||||
<EntryPolicySectionContainer key={index} label={rule.Name} matched={matched && (rule.Type === 'slo' ? rule.ResponseTime >= latency : true) ? "Success" : "Failure"}>
|
||||
{
|
||||
<React.Fragment>
|
||||
{
|
||||
rule.Key &&
|
||||
<tr className={styles.dataValue}><td><b>Key:</b></td> <td>{rule.Key}</td></tr>
|
||||
}
|
||||
{
|
||||
rule.ResponseTime !== 0 &&
|
||||
<tr className={styles.dataValue}><td><b>Response Time:</b></td> <td>{rule.ResponseTime}</td></tr>
|
||||
}
|
||||
{
|
||||
rule.Method &&
|
||||
<tr className={styles.dataValue}><td><b>Method:</b></td> <td>{rule.Method}</td></tr>
|
||||
}
|
||||
{
|
||||
rule.Path &&
|
||||
<tr className={styles.dataValue}><td><b>Path:</b></td> <td>{rule.Path}</td></tr>
|
||||
}
|
||||
{
|
||||
rule.Service &&
|
||||
<tr className={styles.dataValue}><td><b>Service:</b></td> <td>{rule.Service}</td></tr>
|
||||
}
|
||||
{
|
||||
rule.Type &&
|
||||
<tr className={styles.dataValue}><td><b>Type:</b></td> <td>{rule.Type}</td></tr>
|
||||
}
|
||||
{
|
||||
rule.Value &&
|
||||
<tr className={styles.dataValue}><td><b>Value:</b></td> <td>{rule.Value}</td></tr>
|
||||
}
|
||||
</React.Fragment>
|
||||
}
|
||||
</EntryPolicySectionContainer>
|
||||
)
|
||||
}
|
||||
)
|
||||
}
|
||||
</tbody>
|
||||
</table>
|
||||
</EntrySectionContainer>
|
||||
</React.Fragment> : <span className={styles.noRules}>No rules could be applied to this request.</span>
|
||||
}
|
||||
</React.Fragment>
|
||||
}
|
||||
|
||||
@@ -0,0 +1,79 @@
|
||||
import React, { useState, useCallback, useEffect, useMemo } from "react"
|
||||
import { useRecoilValue, useSetRecoilState } from "recoil"
|
||||
import entryDataAtom from "../../../recoil/entryData"
|
||||
import SectionsRepresentation from "./SectionsRepresentation";
|
||||
import { ReactComponent as ReplayIcon } from './replay.svg';
|
||||
import styles from './EntryViewer.module.sass';
|
||||
import { Tabs } from "../../UI";
|
||||
import replayRequestModalOpenAtom from "../../../recoil/replayRequestModalOpen";
|
||||
import entryDetailedConfigAtom, { EntryDetailedConfig } from "../../../recoil/entryDetailedConfig";
|
||||
|
||||
const enabledProtocolsForReplay = ["http"]
|
||||
|
||||
export enum TabsEnum {
|
||||
Request = 0,
|
||||
Response = 1
|
||||
}
|
||||
|
||||
export const AutoRepresentation: React.FC<any> = ({ representation, color, openedTab = TabsEnum.Request, isDisplayReplay = false }) => {
|
||||
const entryData = useRecoilValue(entryDataAtom)
|
||||
const { isReplayEnabled } = useRecoilValue<EntryDetailedConfig>(entryDetailedConfigAtom)
|
||||
const setIsOpenRequestModal = useSetRecoilState(replayRequestModalOpenAtom)
|
||||
const isReplayDisplayed = useCallback(() => {
|
||||
return enabledProtocolsForReplay.find(x => x === entryData.protocol.name) && isDisplayReplay && isReplayEnabled
|
||||
}, [entryData.protocol.name, isDisplayReplay, isReplayEnabled])
|
||||
|
||||
const { request, response } = JSON.parse(representation);
|
||||
|
||||
const TABS = useMemo(() => {
|
||||
const arr = [
|
||||
{
|
||||
tab: 'Request',
|
||||
badge: null
|
||||
}]
|
||||
|
||||
if (response) {
|
||||
arr.push({
|
||||
tab: 'Response',
|
||||
badge: null
|
||||
});
|
||||
}
|
||||
|
||||
return arr
|
||||
}, [response]);
|
||||
|
||||
const [currentTab, setCurrentTab] = useState(TABS[0].tab);
|
||||
|
||||
const getOpenedTabIndex = useCallback(() => {
|
||||
const currentIndex = TABS.findIndex(current => current.tab === currentTab)
|
||||
return currentIndex > -1 ? currentIndex : 0
|
||||
}, [TABS, currentTab])
|
||||
|
||||
useEffect(() => {
|
||||
if (openedTab) {
|
||||
setCurrentTab(TABS[openedTab].tab)
|
||||
}
|
||||
|
||||
// eslint-disable-next-line react-hooks/exhaustive-deps
|
||||
}, [])
|
||||
|
||||
// Don't fail even if `representation` is an empty string
|
||||
if (!representation) {
|
||||
return <React.Fragment></React.Fragment>;
|
||||
}
|
||||
|
||||
return <div className={styles.Entry}>
|
||||
{<div className={styles.body}>
|
||||
<div className={styles.bodyHeader}>
|
||||
<Tabs tabs={TABS} currentTab={currentTab} color={color} onChange={setCurrentTab} leftAligned />
|
||||
{isReplayDisplayed() && <span title="Replay Request"><ReplayIcon fill={color} stroke={color} style={{ marginLeft: "10px", cursor: "pointer", height: "22px" }} onClick={() => setIsOpenRequestModal(true)} /></span>}
|
||||
</div>
|
||||
{getOpenedTabIndex() === TabsEnum.Request && <React.Fragment>
|
||||
<SectionsRepresentation data={request} color={color} requestRepresentation={request} />
|
||||
</React.Fragment>}
|
||||
{response && getOpenedTabIndex() === TabsEnum.Response && <React.Fragment>
|
||||
<SectionsRepresentation data={response} color={color} />
|
||||
</React.Fragment>}
|
||||
</div>}
|
||||
</div>;
|
||||
}
|
||||
@@ -52,8 +52,13 @@
|
||||
border-radius: 4px
|
||||
padding: 10px
|
||||
position: relative
|
||||
|
||||
.bodyHeader
|
||||
padding: 0 1rem
|
||||
display: flex
|
||||
align-items: center
|
||||
justify-content: space-between
|
||||
|
||||
.endpointURL
|
||||
font-size: .75rem
|
||||
display: block
|
||||
|
||||
@@ -1,107 +1,16 @@
|
||||
import React, {useState} from 'react';
|
||||
import styles from './EntryViewer.module.sass';
|
||||
import Tabs from "../../UI/Tabs/Tabs";
|
||||
import {EntryTableSection, EntryBodySection, EntryTablePolicySection} from "../EntrySections/EntrySections";
|
||||
|
||||
enum SectionTypes {
|
||||
SectionTable = "table",
|
||||
SectionBody = "body",
|
||||
}
|
||||
|
||||
const SectionsRepresentation: React.FC<any> = ({data, color}) => {
|
||||
const sections = []
|
||||
|
||||
if (data) {
|
||||
for (const [i, row] of data.entries()) {
|
||||
switch (row.type) {
|
||||
case SectionTypes.SectionTable:
|
||||
sections.push(
|
||||
<EntryTableSection key={i} title={row.title} color={color} arrayToIterate={JSON.parse(row.data)}/>
|
||||
)
|
||||
break;
|
||||
case SectionTypes.SectionBody:
|
||||
sections.push(
|
||||
<EntryBodySection key={i} title={row.title} color={color} content={row.data} encoding={row.encoding} contentType={row.mimeType} selector={row.selector}/>
|
||||
)
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return <React.Fragment>{sections}</React.Fragment>;
|
||||
}
|
||||
|
||||
const AutoRepresentation: React.FC<any> = ({representation, isRulesEnabled, rulesMatched, elapsedTime, color}) => {
|
||||
var TABS = [
|
||||
{
|
||||
tab: 'Request'
|
||||
}
|
||||
];
|
||||
const [currentTab, setCurrentTab] = useState(TABS[0].tab);
|
||||
|
||||
// Don't fail even if `representation` is an empty string
|
||||
if (!representation) {
|
||||
return <React.Fragment></React.Fragment>;
|
||||
}
|
||||
|
||||
const {request, response} = JSON.parse(representation);
|
||||
|
||||
let responseTabIndex = 0;
|
||||
let rulesTabIndex = 0;
|
||||
|
||||
if (response) {
|
||||
TABS.push(
|
||||
{
|
||||
tab: 'Response',
|
||||
}
|
||||
);
|
||||
responseTabIndex = TABS.length - 1;
|
||||
}
|
||||
|
||||
if (isRulesEnabled) {
|
||||
TABS.push(
|
||||
{
|
||||
tab: 'Rules',
|
||||
}
|
||||
);
|
||||
rulesTabIndex = TABS.length - 1;
|
||||
}
|
||||
|
||||
return <div className={styles.Entry}>
|
||||
{<div className={styles.body}>
|
||||
<div className={styles.bodyHeader}>
|
||||
<Tabs tabs={TABS} currentTab={currentTab} color={color} onChange={setCurrentTab} leftAligned/>
|
||||
</div>
|
||||
{currentTab === TABS[0].tab && <React.Fragment>
|
||||
<SectionsRepresentation data={request} color={color}/>
|
||||
</React.Fragment>}
|
||||
{response && currentTab === TABS[responseTabIndex].tab && <React.Fragment>
|
||||
<SectionsRepresentation data={response} color={color}/>
|
||||
</React.Fragment>}
|
||||
{isRulesEnabled && currentTab === TABS[rulesTabIndex].tab && <React.Fragment>
|
||||
<EntryTablePolicySection title={'Rule'} color={color} latency={elapsedTime} arrayToIterate={rulesMatched ? rulesMatched : []}/>
|
||||
</React.Fragment>}
|
||||
</div>}
|
||||
</div>;
|
||||
}
|
||||
import React from 'react';
|
||||
import { AutoRepresentation } from './AutoRepresentation';
|
||||
|
||||
interface Props {
|
||||
representation: any;
|
||||
isRulesEnabled: boolean;
|
||||
rulesMatched: any;
|
||||
color: string;
|
||||
elapsedTime: number;
|
||||
}
|
||||
|
||||
const EntryViewer: React.FC<Props> = ({representation, isRulesEnabled, rulesMatched, elapsedTime, color}) => {
|
||||
const EntryViewer: React.FC<Props> = ({representation, color}) => {
|
||||
return <AutoRepresentation
|
||||
representation={representation}
|
||||
isRulesEnabled={isRulesEnabled}
|
||||
rulesMatched={rulesMatched}
|
||||
elapsedTime={elapsedTime}
|
||||
color={color}
|
||||
isDisplayReplay={true}
|
||||
/>
|
||||
};
|
||||
|
||||
|
||||
@@ -0,0 +1,34 @@
|
||||
import React from "react";
|
||||
import { EntryTableSection, EntryBodySection } from "../EntrySections/EntrySections";
|
||||
|
||||
enum SectionTypes {
|
||||
SectionTable = "table",
|
||||
SectionBody = "body",
|
||||
}
|
||||
|
||||
const SectionsRepresentation: React.FC<any> = ({ data, color }) => {
|
||||
const sections = []
|
||||
|
||||
if (data) {
|
||||
for (const [i, row] of data.entries()) {
|
||||
switch (row.type) {
|
||||
case SectionTypes.SectionTable:
|
||||
sections.push(
|
||||
<EntryTableSection key={i} title={row.title} color={color} arrayToIterate={JSON.parse(row.data)} />
|
||||
)
|
||||
break;
|
||||
case SectionTypes.SectionBody:
|
||||
sections.push(
|
||||
<EntryBodySection key={i} title={row.title} color={color} content={row.data} encoding={row.encoding} contentType={row.mimeType} selector={row.selector} />
|
||||
)
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return <React.Fragment>{sections}</React.Fragment>;
|
||||
}
|
||||
|
||||
export default SectionsRepresentation
|
||||
@@ -0,0 +1 @@
|
||||
<?xml version="1.0" ?><svg viewBox="0 0 24 24" xmlns="http://www.w3.org/2000/svg"><title/><path d="M16,12a1,1,0,0,1-.49.86l-5,3A1,1,0,0,1,10,16a1,1,0,0,1-.49-.13A1,1,0,0,1,9,15V9a1,1,0,0,1,1.51-.86l5,3A1,1,0,0,1,16,12Z" fill="#464646"/><path d="M21.92,5.09a1,1,0,0,0-1.07.15L19.94,6A9.84,9.84,0,0,0,12,2a10,10,0,1,0,9.42,13.33,1,1,0,0,0-1.89-.66A8,8,0,1,1,12,4a7.87,7.87,0,0,1,6.42,3.32l-1.07.92A1,1,0,0,0,18,10h3.5a1,1,0,0,0,1-1V6A1,1,0,0,0,21.92,5.09Z" fill="#464646"/></svg>
|
||||
|
After Width: | Height: | Size: 477 B |
4
ui-common/src/components/EntryDetailed/assets/run.svg
Normal file
4
ui-common/src/components/EntryDetailed/assets/run.svg
Normal file
@@ -0,0 +1,4 @@
|
||||
<svg width="30" height="30" viewBox="0 0 30 30" fill="none" xmlns="http://www.w3.org/2000/svg">
|
||||
<circle cx="15" cy="15" r="13.5" stroke="#205CF5" stroke-width="3"/>
|
||||
<path d="M20 15C20 15.3167 19.8392 15.6335 19.5175 15.8189L12.5051 19.8624C11.8427 20.2444 11 19.7858 11 19.0435V10.9565C11 10.2142 11.8427 9.75564 12.5051 10.1376L19.5175 14.1811C19.8392 14.3665 20 14.6833 20 15Z" fill="#205CF5"/>
|
||||
</svg>
|
||||
|
After Width: | Height: | Size: 404 B |
@@ -20,31 +20,6 @@
|
||||
.rowSelected
|
||||
border: 1px $blue-color solid
|
||||
|
||||
.ruleSuccessRow
|
||||
background: #E8FFF1
|
||||
|
||||
.ruleSuccessRowSelected
|
||||
border: 1px #6FCF97 solid
|
||||
border-left: 5px #6FCF97 solid
|
||||
|
||||
.ruleFailureRow
|
||||
background: #FFE9EF
|
||||
|
||||
.ruleFailureRowSelected
|
||||
border: 1px $failure-color solid
|
||||
border-left: 5px $failure-color solid
|
||||
|
||||
.ruleNumberText
|
||||
font-size: 12px
|
||||
font-weight: 600
|
||||
white-space: nowrap
|
||||
|
||||
.ruleNumberTextFailure
|
||||
color: #DB2156
|
||||
|
||||
.ruleNumberTextSuccess
|
||||
color: #219653
|
||||
|
||||
.resolvedName
|
||||
text-overflow: ellipsis
|
||||
white-space: nowrap
|
||||
|
||||
@@ -37,13 +37,6 @@ interface Entry {
|
||||
dst: TCPInterface,
|
||||
isOutgoing?: boolean;
|
||||
latency: number;
|
||||
rules: Rules;
|
||||
}
|
||||
|
||||
interface Rules {
|
||||
status: boolean;
|
||||
latency: number;
|
||||
numberOfRules: number;
|
||||
}
|
||||
|
||||
interface EntryProps {
|
||||
@@ -67,7 +60,6 @@ export const EntryItem: React.FC<EntryProps> = ({entry, style, headingMode, name
|
||||
const isSelected = focusedEntryId === entry.id;
|
||||
|
||||
const classification = getClassification(entry.status)
|
||||
const numberOfRules = entry.rules.numberOfRules
|
||||
let ingoingIcon;
|
||||
let outgoingIcon;
|
||||
switch(classification) {
|
||||
@@ -87,35 +79,6 @@ export const EntryItem: React.FC<EntryProps> = ({entry, style, headingMode, name
|
||||
break;
|
||||
}
|
||||
}
|
||||
let additionalRulesProperties = "";
|
||||
let ruleSuccess = true;
|
||||
let rule = 'latency' in entry.rules
|
||||
if (rule) {
|
||||
if (entry.rules.latency !== -1) {
|
||||
if (entry.rules.latency >= entry.latency || !('latency' in entry)) {
|
||||
additionalRulesProperties = styles.ruleSuccessRow
|
||||
ruleSuccess = true
|
||||
} else {
|
||||
additionalRulesProperties = styles.ruleFailureRow
|
||||
ruleSuccess = false
|
||||
}
|
||||
if (isSelected) {
|
||||
additionalRulesProperties += ` ${entry.rules.latency >= entry.latency ? styles.ruleSuccessRowSelected : styles.ruleFailureRowSelected}`
|
||||
}
|
||||
} else {
|
||||
if (entry.rules.status) {
|
||||
additionalRulesProperties = styles.ruleSuccessRow
|
||||
ruleSuccess = true
|
||||
} else {
|
||||
additionalRulesProperties = styles.ruleFailureRow
|
||||
ruleSuccess = false
|
||||
}
|
||||
if (isSelected) {
|
||||
additionalRulesProperties += ` ${entry.rules.status ? styles.ruleSuccessRowSelected : styles.ruleFailureRowSelected}`
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
const isStatusCodeEnabled = ((entry.proto.name === "http" && "status" in entry) || entry.status !== 0);
|
||||
|
||||
@@ -123,7 +86,7 @@ export const EntryItem: React.FC<EntryProps> = ({entry, style, headingMode, name
|
||||
<div
|
||||
id={`entry-${entry.id}`}
|
||||
className={`${styles.row}
|
||||
${isSelected && !rule ? styles.rowSelected : additionalRulesProperties}`}
|
||||
${isSelected ? styles.rowSelected : ""}`}
|
||||
onClick={() => {
|
||||
if (!setFocusedEntryId) return;
|
||||
setFocusedEntryId(entry.id);
|
||||
@@ -187,13 +150,7 @@ export const EntryItem: React.FC<EntryProps> = ({entry, style, headingMode, name
|
||||
</Queryable>
|
||||
</div>
|
||||
</div>
|
||||
{
|
||||
rule ?
|
||||
<div className={`${styles.ruleNumberText} ${ruleSuccess ? styles.ruleNumberTextSuccess : styles.ruleNumberTextFailure} ${rule ? styles.separatorRight : ""}`}>
|
||||
{`Rules (${numberOfRules})`}
|
||||
</div>
|
||||
: ""
|
||||
}
|
||||
|
||||
<div className={styles.separatorRight}>
|
||||
{headingMode ? <Queryable
|
||||
query={`namespace == "${namespace}"`}
|
||||
|
||||
@@ -88,8 +88,17 @@
|
||||
.greenIndicatorContainer
|
||||
border: 2px #6fcf9770 solid
|
||||
|
||||
@keyframes biggerIndication
|
||||
0%
|
||||
transform: scale(2.0)
|
||||
100%
|
||||
transform: scale(0.7)
|
||||
|
||||
|
||||
|
||||
.greenIndicator
|
||||
background-color: #27AE60
|
||||
animation: biggerIndication 1.5s ease-out 0s alternate infinite none running
|
||||
|
||||
.orangeIndicatorContainer
|
||||
border: 2px #fabd5970 solid
|
||||
|
||||
@@ -20,6 +20,9 @@ import tappingStatusAtom from "../../recoil/tappingStatus/atom";
|
||||
import { TOAST_CONTAINER_ID } from "../../configs/Consts";
|
||||
import leftOffTopAtom from "../../recoil/leftOffTop";
|
||||
import { DEFAULT_LEFTOFF, DEFAULT_FETCH, DEFAULT_FETCH_TIMEOUT_MS } from '../../hooks/useWS';
|
||||
import ReplayRequestModalContainer from "../modals/ReplayRequestModal/ReplayRequestModal";
|
||||
import replayRequestModalOpenAtom from "../../recoil/replayRequestModalOpen";
|
||||
import entryDetailedConfigAtom, { EntryDetailedConfig } from "../../recoil/entryDetailedConfig";
|
||||
|
||||
const useLayoutStyles = makeStyles(() => ({
|
||||
details: {
|
||||
@@ -49,18 +52,22 @@ interface TrafficViewerProps {
|
||||
webSocketUrl: string,
|
||||
shouldCloseWebSocket: boolean,
|
||||
setShouldCloseWebSocket: (flag: boolean) => void,
|
||||
isDemoBannerView: boolean
|
||||
isDemoBannerView: boolean,
|
||||
entryDetailedConfig: EntryDetailedConfig
|
||||
}
|
||||
|
||||
export const TrafficViewer: React.FC<TrafficViewerProps> = ({
|
||||
trafficViewerApiProp,
|
||||
actionButtons, isShowStatusBar, webSocketUrl,
|
||||
shouldCloseWebSocket, setShouldCloseWebSocket, isDemoBannerView
|
||||
}) => {
|
||||
trafficViewerApiProp,
|
||||
webSocketUrl,
|
||||
actionButtons,
|
||||
isShowStatusBar, isDemoBannerView,
|
||||
shouldCloseWebSocket, setShouldCloseWebSocket,
|
||||
entryDetailedConfig }) => {
|
||||
|
||||
const classes = useLayoutStyles();
|
||||
const setEntries = useSetRecoilState(entriesAtom);
|
||||
const setFocusedEntryId = useSetRecoilState(focusedEntryIdAtom);
|
||||
const setEntryDetailedConfigAtom = useSetRecoilState(entryDetailedConfigAtom)
|
||||
const query = useRecoilValue(queryAtom);
|
||||
const setTrafficViewerApiState = useSetRecoilState(trafficViewerApiAtom as RecoilState<TrafficViewerApi>)
|
||||
const [tappingStatus, setTappingStatus] = useRecoilState(tappingStatusAtom);
|
||||
@@ -69,6 +76,7 @@ export const TrafficViewer: React.FC<TrafficViewerProps> = ({
|
||||
const [wsReadyState, setWsReadyState] = useState(0);
|
||||
const setLeftOffTop = useSetRecoilState(leftOffTopAtom);
|
||||
const scrollableRef = useRef(null);
|
||||
const isOpenReplayModal = useRecoilValue(replayRequestModalOpenAtom)
|
||||
|
||||
|
||||
const ws = useRef(null);
|
||||
@@ -87,6 +95,10 @@ export const TrafficViewer: React.FC<TrafficViewerProps> = ({
|
||||
}
|
||||
}, [shouldCloseWebSocket, setShouldCloseWebSocket, closeWebSocket])
|
||||
|
||||
useEffect(() => {
|
||||
isOpenReplayModal && setShouldCloseWebSocket(true)
|
||||
}, [isOpenReplayModal, setShouldCloseWebSocket])
|
||||
|
||||
const sendQueryWhenWsOpen = useCallback((leftOff: string, query: string, fetch: number, fetchTimeoutMs: number) => {
|
||||
setTimeout(() => {
|
||||
if (ws?.current?.readyState === WebSocket.OPEN) {
|
||||
@@ -176,6 +188,10 @@ export const TrafficViewer: React.FC<TrafficViewerProps> = ({
|
||||
};
|
||||
}, []);
|
||||
|
||||
useEffect(() => {
|
||||
setEntryDetailedConfigAtom(entryDetailedConfig)
|
||||
}, [entryDetailedConfig, setEntryDetailedConfigAtom])
|
||||
|
||||
const getConnectionIndicator = () => {
|
||||
switch (wsReadyState) {
|
||||
case WebSocket.OPEN:
|
||||
@@ -251,7 +267,7 @@ export const TrafficViewer: React.FC<TrafficViewerProps> = ({
|
||||
</div>
|
||||
</div>
|
||||
<div className={classes.details} id="rightSideContainer">
|
||||
<EntryDetailed/>
|
||||
<EntryDetailed />
|
||||
</div>
|
||||
</div>}
|
||||
</div>
|
||||
@@ -259,25 +275,20 @@ export const TrafficViewer: React.FC<TrafficViewerProps> = ({
|
||||
};
|
||||
|
||||
const MemorizedTrafficViewer = React.memo(TrafficViewer)
|
||||
const TrafficViewerContainer: React.FC<TrafficViewerProps> = ({
|
||||
trafficViewerApiProp,
|
||||
actionButtons, isShowStatusBar = true,
|
||||
webSocketUrl, shouldCloseWebSocket, setShouldCloseWebSocket, isDemoBannerView
|
||||
}) => {
|
||||
const TrafficViewerContainer: React.FC<TrafficViewerProps> = (props) => {
|
||||
return <RecoilRoot>
|
||||
<MemorizedTrafficViewer actionButtons={actionButtons} isShowStatusBar={isShowStatusBar} webSocketUrl={webSocketUrl}
|
||||
shouldCloseWebSocket={shouldCloseWebSocket} setShouldCloseWebSocket={setShouldCloseWebSocket} trafficViewerApiProp={trafficViewerApiProp}
|
||||
isDemoBannerView={isDemoBannerView}/>
|
||||
<MemorizedTrafficViewer {...props} />
|
||||
<ToastContainer enableMultiContainer containerId={TOAST_CONTAINER_ID}
|
||||
position="bottom-right"
|
||||
autoClose={5000}
|
||||
hideProgressBar={false}
|
||||
newestOnTop={false}
|
||||
closeOnClick
|
||||
rtl={false}
|
||||
pauseOnFocusLoss
|
||||
draggable
|
||||
pauseOnHover/>
|
||||
position="bottom-right"
|
||||
autoClose={5000}
|
||||
hideProgressBar={false}
|
||||
newestOnTop={false}
|
||||
closeOnClick
|
||||
rtl={false}
|
||||
pauseOnFocusLoss
|
||||
draggable
|
||||
pauseOnHover />
|
||||
<ReplayRequestModalContainer />
|
||||
</RecoilRoot>
|
||||
}
|
||||
|
||||
|
||||
@@ -2,7 +2,8 @@ type TrafficViewerApi = {
|
||||
validateQuery: (query: any) => any
|
||||
tapStatus: () => any
|
||||
fetchEntries: (leftOff: any, direction: number, query: any, limit: number, timeoutMs: number) => any
|
||||
getEntry: (entryId: any, query: string) => any
|
||||
getEntry: (entryId: any, query: string) => any,
|
||||
replayRequest: (request: { method: string, url: string, data: string, headers: {} }) => Promise<any>,
|
||||
webSocket: {
|
||||
close: () => void
|
||||
}
|
||||
|
||||
49
ui-common/src/components/UI/CodeEditor/CodeEditor.tsx
Normal file
49
ui-common/src/components/UI/CodeEditor/CodeEditor.tsx
Normal file
@@ -0,0 +1,49 @@
|
||||
import React from "react";
|
||||
import AceEditor from "react-ace";
|
||||
import { config } from 'ace-builds';
|
||||
|
||||
import "ace-builds/src-noconflict/ext-searchbox";
|
||||
import "ace-builds/src-noconflict/mode-python";
|
||||
import "ace-builds/src-noconflict/mode-json";
|
||||
import "ace-builds/src-noconflict/theme-github";
|
||||
import "ace-builds/src-noconflict/mode-javascript";
|
||||
import "ace-builds/src-noconflict/mode-xml";
|
||||
import "ace-builds/src-noconflict/mode-html";
|
||||
|
||||
|
||||
|
||||
config.set(
|
||||
"basePath",
|
||||
"https://cdn.jsdelivr.net/npm/ace-builds@1.4.6/src-noconflict/"
|
||||
);
|
||||
config.setModuleUrl(
|
||||
"ace/mode/javascript_worker",
|
||||
"https://cdn.jsdelivr.net/npm/ace-builds@1.4.6/src-noconflict/worker-javascript.js"
|
||||
);
|
||||
|
||||
export interface CodeEditorProps {
|
||||
code: string,
|
||||
onChange?: (code: string) => void,
|
||||
language?: string
|
||||
}
|
||||
const CodeEditor: React.FC<CodeEditorProps> = ({
|
||||
language,
|
||||
onChange,
|
||||
code
|
||||
}) => {
|
||||
return (
|
||||
<AceEditor
|
||||
mode={language}
|
||||
theme="github"
|
||||
onChange={onChange}
|
||||
editorProps={{ $blockScrolling: true }}
|
||||
showPrintMargin={false}
|
||||
value={code}
|
||||
width="100%"
|
||||
height="100%"
|
||||
style={{ borderRadius: "inherit" }}
|
||||
/>
|
||||
);
|
||||
}
|
||||
|
||||
export default CodeEditor
|
||||
33
ui-common/src/components/UI/FilePicker/FilePicker.tsx
Normal file
33
ui-common/src/components/UI/FilePicker/FilePicker.tsx
Normal file
@@ -0,0 +1,33 @@
|
||||
import React from 'react';
|
||||
import { useEffect } from 'react';
|
||||
import { useFilePicker } from 'use-file-picker';
|
||||
import { FileContent } from 'use-file-picker/dist/interfaces';
|
||||
|
||||
interface IFilePickerProps {
|
||||
onLoadingComplete: (file: FileContent) => void;
|
||||
elem: any
|
||||
}
|
||||
|
||||
const FilePicker = ({ elem, onLoadingComplete }: IFilePickerProps) => {
|
||||
const [openFileSelector, { filesContent }] = useFilePicker({
|
||||
accept: ['.json'],
|
||||
limitFilesConfig: { max: 1 },
|
||||
maxFileSize: 1
|
||||
});
|
||||
|
||||
const onFileSelectorClick = (e) => {
|
||||
e.preventDefault();
|
||||
e.stopPropagation();
|
||||
openFileSelector();
|
||||
}
|
||||
|
||||
useEffect(() => {
|
||||
filesContent.length && onLoadingComplete(filesContent[0])
|
||||
}, [filesContent, onLoadingComplete]);
|
||||
|
||||
return (<React.Fragment>
|
||||
{React.cloneElement(elem, { onClick: onFileSelectorClick })}
|
||||
</React.Fragment>)
|
||||
}
|
||||
|
||||
export default FilePicker;
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user