mirror of
https://github.com/kubeshark/kubeshark.git
synced 2026-02-15 02:19:54 +00:00
Compare commits
17 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
27fa0afb72 | ||
|
|
c98c99e488 | ||
|
|
2d838d7699 | ||
|
|
d5bb036939 | ||
|
|
87ef469e25 | ||
|
|
72df652f6b | ||
|
|
c67675c138 | ||
|
|
e8d2b7eb3c | ||
|
|
83722f1a02 | ||
|
|
bb3ae1ef70 | ||
|
|
5dfa94d76e | ||
|
|
dfe63f2318 | ||
|
|
9cf64a43f5 | ||
|
|
bf2362d836 | ||
|
|
1c11523d9d | ||
|
|
150a87413d | ||
|
|
f7221a7355 |
3
Makefile
3
Makefile
@@ -103,6 +103,9 @@ test-shared: ## Run shared tests
|
||||
|
||||
test-extensions: ## Run extensions tests
|
||||
@echo "running http tests"; cd tap/extensions/http && $(MAKE) test
|
||||
@echo "running redis tests"; cd tap/extensions/redis && $(MAKE) test
|
||||
@echo "running kafka tests"; cd tap/extensions/kafka && $(MAKE) test
|
||||
@echo "running amqp tests"; cd tap/extensions/amqp && $(MAKE) test
|
||||
|
||||
acceptance-test: ## Run acceptance tests
|
||||
@echo "running acceptance tests"; cd acceptanceTests && $(MAKE) test
|
||||
|
||||
@@ -14,7 +14,9 @@
|
||||
"tests/RegexMasking.js",
|
||||
"tests/IgnoredUserAgents.js",
|
||||
"tests/UiTest.js",
|
||||
"tests/Redis.js"
|
||||
"tests/Redis.js",
|
||||
"tests/Rabbit.js",
|
||||
"tests/serviceMapFunction.js"
|
||||
],
|
||||
|
||||
"env": {
|
||||
@@ -25,6 +27,9 @@
|
||||
"minimumEntries": 25,
|
||||
"greenFilterColor": "rgb(210, 250, 210)",
|
||||
"redFilterColor": "rgb(250, 214, 220)",
|
||||
"bodyJsonClass": ".hljs"
|
||||
"bodyJsonClass": ".hljs",
|
||||
"mizuWidth": 1920,
|
||||
"normalMizuHeight": 1080,
|
||||
"hugeMizuHeight": 3500
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,3 +1,11 @@
|
||||
export const valueTabs = {
|
||||
response: 'RESPONSE',
|
||||
request: 'REQUEST',
|
||||
none: null
|
||||
}
|
||||
|
||||
const maxEntriesInDom = 13;
|
||||
|
||||
export function isValueExistsInElement(shouldInclude, content, domPathToContainer){
|
||||
it(`should ${shouldInclude ? '' : 'not'} include '${content}'`, function () {
|
||||
cy.get(domPathToContainer).then(htmlText => {
|
||||
@@ -9,11 +17,11 @@ export function isValueExistsInElement(shouldInclude, content, domPathToContaine
|
||||
}
|
||||
|
||||
export function resizeToHugeMizu() {
|
||||
cy.viewport(1920, 3500);
|
||||
cy.viewport(Cypress.env('mizuWidth'), Cypress.env('hugeMizuHeight'));
|
||||
}
|
||||
|
||||
export function resizeToNormalMizu() {
|
||||
cy.viewport(1920, 1080);
|
||||
cy.viewport(Cypress.env('mizuWidth'), Cypress.env('normalMizuHeight'));
|
||||
}
|
||||
|
||||
export function verifyMinimumEntries() {
|
||||
@@ -53,3 +61,125 @@ export function checkThatAllEntriesShown() {
|
||||
cy.get('[title="Fetch old records"]').click();
|
||||
});
|
||||
}
|
||||
|
||||
export function checkFilterByMethod(funcDict) {
|
||||
const {protocol, method, summary, hugeMizu} = funcDict;
|
||||
const summaryDict = getSummeryDict(summary);
|
||||
const methodDict = getMethodDict(method);
|
||||
const protocolDict = getProtocolDict(protocol.name, protocol.text);
|
||||
|
||||
it(`Testing the method: ${method}`, function () {
|
||||
// applying filter
|
||||
cy.get('.w-tc-editor-text').clear().type(`method == "${method}"`);
|
||||
cy.get('[type="submit"]').click();
|
||||
cy.get('.w-tc-editor').should('have.attr', 'style').and('include', Cypress.env('greenFilterColor'));
|
||||
|
||||
cy.get('#entries-length').then(number => {
|
||||
// if the entries list isn't expanded it expands here
|
||||
if (number.text() === '0' || number.text() === '1') // todo change when TRA-4262 is fixed
|
||||
cy.get('[title="Fetch old records"]').click();
|
||||
|
||||
cy.get('#entries-length').should('not.have.text', '0').and('not.have.text', '1').then(() => {
|
||||
cy.get(`#list [id]`).then(elements => {
|
||||
const listElmWithIdAttr = Object.values(elements);
|
||||
let doneCheckOnFirst = false;
|
||||
|
||||
cy.get('#entries-length').invoke('text').then(len => {
|
||||
resizeIfNeeded(len);
|
||||
listElmWithIdAttr.forEach(entry => {
|
||||
if (entry?.id && entry.id.match(RegExp(/entry-(\d{2}|\d{1})$/gm))) {
|
||||
const entryNum = getEntryNumById(entry.id);
|
||||
|
||||
leftTextCheck(entryNum, methodDict.pathLeft, methodDict.expectedText);
|
||||
leftTextCheck(entryNum, protocolDict.pathLeft, protocolDict.expectedTextLeft);
|
||||
if (summaryDict)
|
||||
leftTextCheck(entryNum, summaryDict.pathLeft, summaryDict.expectedText);
|
||||
|
||||
if (!doneCheckOnFirst) {
|
||||
deepCheck(funcDict, protocolDict, methodDict, entry);
|
||||
doneCheckOnFirst = true;
|
||||
}
|
||||
}
|
||||
});
|
||||
resizeIfNeeded(len);
|
||||
});
|
||||
});
|
||||
});
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
function resizeIfNeeded(entriesLen) {
|
||||
if (entriesLen > maxEntriesInDom){
|
||||
Cypress.config().viewportHeight === Cypress.env('normalMizuHeight') ?
|
||||
resizeToHugeMizu() : resizeToNormalMizu()
|
||||
}
|
||||
}
|
||||
|
||||
function deepCheck(generalDict, protocolDict, methodDict, entry) {
|
||||
const entryNum = getEntryNumById(entry.id);
|
||||
const {summary, value} = generalDict;
|
||||
const summaryDict = getSummeryDict(summary);
|
||||
|
||||
leftOnHoverCheck(entryNum, methodDict.pathLeft, methodDict.expectedOnHover);
|
||||
leftOnHoverCheck(entryNum, protocolDict.pathLeft, protocolDict.expectedOnHover);
|
||||
if (summaryDict)
|
||||
leftOnHoverCheck(entryNum, summaryDict.pathLeft, summaryDict.expectedOnHover);
|
||||
|
||||
cy.get(`#${entry.id}`).click();
|
||||
|
||||
rightTextCheck(methodDict.pathRight, methodDict.expectedText);
|
||||
rightTextCheck(protocolDict.pathRight, protocolDict.expectedTextRight);
|
||||
if (summaryDict)
|
||||
rightTextCheck(summaryDict.pathRight, summaryDict.expectedText);
|
||||
|
||||
rightOnHoverCheck(methodDict.pathRight, methodDict.expectedOnHover);
|
||||
rightOnHoverCheck(protocolDict.pathRight, protocolDict.expectedOnHover);
|
||||
if (summaryDict)
|
||||
rightOnHoverCheck(summaryDict.pathRight, summaryDict.expectedOnHover);
|
||||
|
||||
if (value) {
|
||||
if (value.tab === valueTabs.response)
|
||||
cy.contains('Response').click();
|
||||
cy.get(Cypress.env('bodyJsonClass')).then(text => {
|
||||
expect(text.text()).to.match(value.regex)
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
function getSummeryDict(summary) {
|
||||
if (summary) {
|
||||
return {
|
||||
pathLeft: '> :nth-child(2) > :nth-child(1) > :nth-child(2) > :nth-child(2)',
|
||||
pathRight: '> :nth-child(2) > :nth-child(1) > :nth-child(1) > :nth-child(2) > :nth-child(2)',
|
||||
expectedText: summary,
|
||||
expectedOnHover: `summary == "${summary}"`
|
||||
};
|
||||
}
|
||||
else {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
function getMethodDict(method) {
|
||||
return {
|
||||
pathLeft: '> :nth-child(2) > :nth-child(1) > :nth-child(1) > :nth-child(2)',
|
||||
pathRight: '> :nth-child(2) > :nth-child(1) > :nth-child(1) > :nth-child(1) > :nth-child(2)',
|
||||
expectedText: method,
|
||||
expectedOnHover: `method == "${method}"`
|
||||
};
|
||||
}
|
||||
|
||||
function getProtocolDict(protocol, protocolText) {
|
||||
return {
|
||||
pathLeft: '> :nth-child(1) > :nth-child(1)',
|
||||
pathRight: '> :nth-child(1) > :nth-child(1) > :nth-child(1) > :nth-child(1)',
|
||||
expectedTextLeft: protocol.toUpperCase(),
|
||||
expectedTextRight: protocolText,
|
||||
expectedOnHover: protocol.toLowerCase()
|
||||
};
|
||||
}
|
||||
|
||||
function getEntryNumById (id) {
|
||||
return parseInt(id.split('-')[1]);
|
||||
}
|
||||
|
||||
50
acceptanceTests/cypress/integration/tests/Rabbit.js
Normal file
50
acceptanceTests/cypress/integration/tests/Rabbit.js
Normal file
@@ -0,0 +1,50 @@
|
||||
import {checkFilterByMethod, valueTabs,} from "../testHelpers/TrafficHelper";
|
||||
|
||||
it('opening mizu', function () {
|
||||
cy.visit(Cypress.env('testUrl'));
|
||||
cy.get('#total-entries').invoke('text').should('match', /^[4-7][0-9]$/m)
|
||||
});
|
||||
|
||||
const rabbitProtocolDetails = {name: 'AMQP', text: 'Advanced Message Queuing Protocol 0-9-1'};
|
||||
|
||||
checkFilterByMethod({
|
||||
protocol: rabbitProtocolDetails,
|
||||
method: 'exchange declare',
|
||||
summary: 'exchange',
|
||||
value: null
|
||||
});
|
||||
|
||||
checkFilterByMethod({
|
||||
protocol: rabbitProtocolDetails,
|
||||
method: 'queue declare',
|
||||
summary: 'queue',
|
||||
value: null
|
||||
});
|
||||
|
||||
checkFilterByMethod({
|
||||
protocol: rabbitProtocolDetails,
|
||||
method: 'queue bind',
|
||||
summary: 'queue',
|
||||
value: null
|
||||
});
|
||||
|
||||
checkFilterByMethod({
|
||||
protocol: rabbitProtocolDetails,
|
||||
method: 'basic publish',
|
||||
summary: 'exchange',
|
||||
value: {tab: valueTabs.request, regex: /^message$/mg}
|
||||
});
|
||||
|
||||
checkFilterByMethod({
|
||||
protocol: rabbitProtocolDetails,
|
||||
method: 'basic consume',
|
||||
summary: 'queue',
|
||||
value: null
|
||||
});
|
||||
|
||||
checkFilterByMethod({
|
||||
protocol: rabbitProtocolDetails,
|
||||
method: 'basic deliver',
|
||||
summary: 'exchange',
|
||||
value: {tab: valueTabs.request, regex: /^message$/mg}
|
||||
});
|
||||
@@ -1,155 +1,42 @@
|
||||
import {
|
||||
leftOnHoverCheck,
|
||||
leftTextCheck,
|
||||
rightOnHoverCheck,
|
||||
rightTextCheck,
|
||||
} from "../testHelpers/TrafficHelper";
|
||||
|
||||
const valueTabs = {
|
||||
response: 'RESPONSE',
|
||||
request: 'REQUEST',
|
||||
none: null
|
||||
}
|
||||
import {checkFilterByMethod, valueTabs,} from "../testHelpers/TrafficHelper";
|
||||
|
||||
it('opening mizu', function () {
|
||||
cy.visit(Cypress.env('testUrl'));
|
||||
});
|
||||
|
||||
checkRedisFilterByMethod({
|
||||
const redisProtocolDetails = {name: 'redis', text: 'Redis Serialization Protocol'};
|
||||
|
||||
checkFilterByMethod({
|
||||
protocol: redisProtocolDetails,
|
||||
method: 'PING',
|
||||
shouldCheckSummary: false,
|
||||
valueTab: valueTabs.none
|
||||
});
|
||||
summary: null,
|
||||
value: null
|
||||
})
|
||||
|
||||
checkRedisFilterByMethod({
|
||||
checkFilterByMethod({
|
||||
protocol: redisProtocolDetails,
|
||||
method: 'SET',
|
||||
shouldCheckSummary: true,
|
||||
valueTab: valueTabs.request,
|
||||
valueRegex: /^\[value, keepttl]$/mg
|
||||
});
|
||||
summary: 'key',
|
||||
value: {tab: valueTabs.request, regex: /^\[value, keepttl]$/mg}
|
||||
})
|
||||
|
||||
checkRedisFilterByMethod({
|
||||
checkFilterByMethod({
|
||||
protocol: redisProtocolDetails,
|
||||
method: 'EXISTS',
|
||||
shouldCheckSummary: true,
|
||||
valueTab: valueTabs.response,
|
||||
valueRegex: /^1$/mg
|
||||
});
|
||||
summary: 'key',
|
||||
value: {tab: valueTabs.response, regex: /^1$/mg}
|
||||
})
|
||||
|
||||
checkRedisFilterByMethod({
|
||||
checkFilterByMethod({
|
||||
protocol: redisProtocolDetails,
|
||||
method: 'GET',
|
||||
shouldCheckSummary: true,
|
||||
valueTab: valueTabs.response,
|
||||
valueRegex: /^value$/mg
|
||||
});
|
||||
summary: 'key',
|
||||
value: {tab: valueTabs.response, regex: /^value$/mg}
|
||||
})
|
||||
|
||||
checkRedisFilterByMethod({
|
||||
checkFilterByMethod({
|
||||
protocol: redisProtocolDetails,
|
||||
method: 'DEL',
|
||||
shouldCheckSummary: true,
|
||||
valueTab: valueTabs.response,
|
||||
valueRegex: /^1$|^0$/mg
|
||||
});
|
||||
|
||||
function checkRedisFilterByMethod(funcDict) {
|
||||
const {method, shouldCheckSummary} = funcDict
|
||||
const summaryDict = getSummeryDict();
|
||||
const methodDict = getMethodDict(method);
|
||||
const protocolDict = getProtocolDict();
|
||||
|
||||
it(`Testing the method: ${method}`, function () {
|
||||
// applying filter
|
||||
cy.get('.w-tc-editor-text').clear().type(`method == "${method}"`);
|
||||
cy.get('[type="submit"]').click();
|
||||
cy.get('.w-tc-editor').should('have.attr', 'style').and('include', Cypress.env('greenFilterColor'));
|
||||
|
||||
cy.get('#entries-length').then(number => {
|
||||
// if the entries list isn't expanded it expands here
|
||||
if (number.text() === '0' || number.text() === '1') // todo change when TRA-4262 is fixed
|
||||
cy.get('[title="Fetch old records"]').click();
|
||||
|
||||
cy.get('#entries-length').should('not.have.text', '0').and('not.have.text', '1').then(() => {
|
||||
cy.get(`#list [id]`).then(elements => {
|
||||
const listElmWithIdAttr = Object.values(elements);
|
||||
let doneCheckOnFirst = false;
|
||||
|
||||
listElmWithIdAttr.forEach(entry => {
|
||||
if (entry?.id && entry.id.match(RegExp(/entry-(\d{2}|\d{1})$/gm))) {
|
||||
const entryNum = getEntryNumById(entry.id);
|
||||
|
||||
leftTextCheck(entryNum, methodDict.pathLeft, methodDict.expectedText);
|
||||
leftTextCheck(entryNum, protocolDict.pathLeft, protocolDict.expectedTextLeft);
|
||||
if (shouldCheckSummary)
|
||||
leftTextCheck(entryNum, summaryDict.pathLeft, summaryDict.expectedText);
|
||||
|
||||
if (!doneCheckOnFirst) {
|
||||
deepCheck(funcDict, protocolDict, methodDict, summaryDict, entry);
|
||||
doneCheckOnFirst = true;
|
||||
}
|
||||
}
|
||||
});
|
||||
});
|
||||
});
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
function deepCheck(generalDict, protocolDict, methodDict, summaryDict, entry) {
|
||||
const entryNum = getEntryNumById(entry.id);
|
||||
const {shouldCheckSummary, valueTab, valueRegex} = generalDict;
|
||||
|
||||
leftOnHoverCheck(entryNum, methodDict.pathLeft, methodDict.expectedOnHover);
|
||||
leftOnHoverCheck(entryNum, protocolDict.pathLeft, protocolDict.expectedOnHover);
|
||||
if (shouldCheckSummary)
|
||||
leftOnHoverCheck(entryNum, summaryDict.pathLeft, summaryDict.expectedOnHover);
|
||||
|
||||
cy.get(`#${entry.id}`).click();
|
||||
|
||||
rightTextCheck(methodDict.pathRight, methodDict.expectedText);
|
||||
rightTextCheck(protocolDict.pathRight, protocolDict.expectedTextRight);
|
||||
if (shouldCheckSummary)
|
||||
rightTextCheck(summaryDict.pathRight, summaryDict.expectedText);
|
||||
|
||||
rightOnHoverCheck(methodDict.pathRight, methodDict.expectedOnHover);
|
||||
rightOnHoverCheck(protocolDict.pathRight, protocolDict.expectedOnHover);
|
||||
if (shouldCheckSummary)
|
||||
rightOnHoverCheck(summaryDict.pathRight, summaryDict.expectedOnHover);
|
||||
|
||||
if (valueTab) {
|
||||
if (valueTab === valueTabs.response)
|
||||
cy.contains('Response').click();
|
||||
cy.get(Cypress.env('bodyJsonClass')).then(text => {
|
||||
expect(text.text()).to.match(valueRegex)
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
function getSummeryDict() {
|
||||
return {
|
||||
pathLeft: '> :nth-child(2) > :nth-child(1) > :nth-child(2) > :nth-child(2)',
|
||||
pathRight: '> :nth-child(2) > :nth-child(1) > :nth-child(1) > :nth-child(2) > :nth-child(2)',
|
||||
expectedText: 'key',
|
||||
expectedOnHover: `summary == "key"`
|
||||
};
|
||||
}
|
||||
|
||||
function getMethodDict(method) {
|
||||
return {
|
||||
pathLeft: '> :nth-child(2) > :nth-child(1) > :nth-child(1) > :nth-child(2)',
|
||||
pathRight: '> :nth-child(2) > :nth-child(1) > :nth-child(1) > :nth-child(1) > :nth-child(2)',
|
||||
expectedText: method,
|
||||
expectedOnHover: `method == "${method}"`
|
||||
};
|
||||
}
|
||||
|
||||
function getProtocolDict() {
|
||||
return {
|
||||
pathLeft: '> :nth-child(1) > :nth-child(1)',
|
||||
pathRight: '> :nth-child(1) > :nth-child(1) > :nth-child(1) > :nth-child(1)',
|
||||
expectedTextLeft: 'REDIS',
|
||||
expectedTextRight: 'Redis Serialization Protocol',
|
||||
expectedOnHover: `redis`
|
||||
};
|
||||
}
|
||||
|
||||
function getEntryNumById (id) {
|
||||
return parseInt(id.split('-')[1]);
|
||||
}
|
||||
summary: 'key',
|
||||
value: {tab: valueTabs.response, regex: /^1$|^0$/mg}
|
||||
})
|
||||
|
||||
@@ -4,8 +4,10 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/go-redis/redis/v8"
|
||||
amqp "github.com/rabbitmq/amqp091-go"
|
||||
"os/exec"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestRedis(t *testing.T) {
|
||||
@@ -99,3 +101,128 @@ func TestRedis(t *testing.T) {
|
||||
|
||||
runCypressTests(t, "npx cypress run --spec \"cypress/integration/tests/Redis.js\"")
|
||||
}
|
||||
|
||||
func TestAmqp(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("ignored acceptance test")
|
||||
}
|
||||
|
||||
cliPath, cliPathErr := getCliPath()
|
||||
if cliPathErr != nil {
|
||||
t.Errorf("failed to get cli path, err: %v", cliPathErr)
|
||||
return
|
||||
}
|
||||
|
||||
tapCmdArgs := getDefaultTapCommandArgs()
|
||||
|
||||
tapNamespace := getDefaultTapNamespace()
|
||||
tapCmdArgs = append(tapCmdArgs, tapNamespace...)
|
||||
|
||||
tapCmd := exec.Command(cliPath, tapCmdArgs...)
|
||||
t.Logf("running command: %v", tapCmd.String())
|
||||
|
||||
t.Cleanup(func() {
|
||||
if err := cleanupCommand(tapCmd); err != nil {
|
||||
t.Logf("failed to cleanup tap command, err: %v", err)
|
||||
}
|
||||
})
|
||||
|
||||
if err := tapCmd.Start(); err != nil {
|
||||
t.Errorf("failed to start tap command, err: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
apiServerUrl := getApiServerUrl(defaultApiServerPort)
|
||||
|
||||
if err := waitTapPodsReady(apiServerUrl); err != nil {
|
||||
t.Errorf("failed to start tap pods on time, err: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
rabbitmqExternalIp, err := getServiceExternalIp(ctx, defaultNamespaceName, "rabbitmq")
|
||||
if err != nil {
|
||||
t.Errorf("failed to get RabbitMQ external ip, err: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
conn, err := amqp.Dial(fmt.Sprintf("amqp://guest:guest@%v:5672/", rabbitmqExternalIp))
|
||||
if err != nil {
|
||||
t.Errorf("failed to connect to RabbitMQ, err: %v", err)
|
||||
return
|
||||
}
|
||||
defer conn.Close()
|
||||
|
||||
// Temporary fix for missing amqp entries
|
||||
time.Sleep(5 * time.Second)
|
||||
|
||||
for i := 0; i < defaultEntriesCount/5; i++ {
|
||||
ch, err := conn.Channel()
|
||||
if err != nil {
|
||||
t.Errorf("failed to open a channel, err: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
exchangeName := "exchange"
|
||||
err = ch.ExchangeDeclare(exchangeName, "direct", true, false, false, false, nil)
|
||||
if err != nil {
|
||||
t.Errorf("failed to declare an exchange, err: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
q, err := ch.QueueDeclare("queue", true, false, false, false, nil)
|
||||
if err != nil {
|
||||
t.Errorf("failed to declare a queue, err: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
routingKey := "routing_key"
|
||||
err = ch.QueueBind(q.Name, routingKey, exchangeName, false, nil)
|
||||
if err != nil {
|
||||
t.Errorf("failed to bind the queue, err: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
err = ch.Publish(exchangeName, routingKey, false, false,
|
||||
amqp.Publishing{
|
||||
DeliveryMode: amqp.Persistent,
|
||||
ContentType: "text/plain",
|
||||
Body: []byte("message"),
|
||||
})
|
||||
if err != nil {
|
||||
t.Errorf("failed to publish a message, err: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
msgChan, err := ch.Consume(q.Name, "Consumer", true, false, false, false, nil)
|
||||
if err != nil {
|
||||
t.Errorf("failed to create a consumer, err: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
select {
|
||||
case <-msgChan:
|
||||
break
|
||||
case <-time.After(3 * time.Second):
|
||||
t.Errorf("failed to consume a message on time")
|
||||
return
|
||||
}
|
||||
|
||||
err = ch.ExchangeDelete(exchangeName, false, false)
|
||||
if err != nil {
|
||||
t.Errorf("failed to delete the exchange, err: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
_, err = ch.QueueDelete(q.Name, false, false, false)
|
||||
if err != nil {
|
||||
t.Errorf("failed to delete the queue, err: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
ch.Close()
|
||||
}
|
||||
|
||||
runCypressTests(t, "npx cypress run --spec \"cypress/integration/tests/Rabbit.js\"")
|
||||
}
|
||||
|
||||
@@ -4,6 +4,7 @@ go 1.17
|
||||
|
||||
require (
|
||||
github.com/go-redis/redis/v8 v8.11.4
|
||||
github.com/rabbitmq/amqp091-go v1.3.0
|
||||
github.com/up9inc/mizu/shared v0.0.0
|
||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b
|
||||
k8s.io/apimachinery v0.23.3
|
||||
|
||||
@@ -427,6 +427,8 @@ github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsT
|
||||
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
|
||||
github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
|
||||
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
|
||||
github.com/rabbitmq/amqp091-go v1.3.0 h1:A/QuHiNw7LMCJsxx9iZn5lrIz6OrhIn7Dfk5/1YatWM=
|
||||
github.com/rabbitmq/amqp091-go v1.3.0/go.mod h1:ogQDLSOACsLPsIq0NpbtiifNZi2YOz0VTJ0kHRghqbM=
|
||||
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
|
||||
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||
github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
|
||||
|
||||
@@ -39,6 +39,9 @@ kubectl create deployment httpbin --image=kennethreitz/httpbin -n mizu-tests2
|
||||
echo "Creating redis deployment"
|
||||
kubectl create deployment redis --image=redis -n mizu-tests
|
||||
|
||||
echo "Creating rabbitmq deployment"
|
||||
kubectl create deployment rabbitmq --image=rabbitmq -n mizu-tests
|
||||
|
||||
echo "Creating httpbin services"
|
||||
kubectl expose deployment httpbin --type=NodePort --port=80 -n mizu-tests
|
||||
kubectl expose deployment httpbin2 --type=NodePort --port=80 -n mizu-tests
|
||||
@@ -48,6 +51,9 @@ kubectl expose deployment httpbin --type=NodePort --port=80 -n mizu-tests2
|
||||
echo "Creating redis service"
|
||||
kubectl expose deployment redis --type=LoadBalancer --port=6379 -n mizu-tests
|
||||
|
||||
echo "Creating rabbitmq service"
|
||||
kubectl expose deployment rabbitmq --type=LoadBalancer --port=5672 -n mizu-tests
|
||||
|
||||
echo "Starting proxy"
|
||||
kubectl proxy --port=8080 &
|
||||
|
||||
|
||||
@@ -54,6 +54,7 @@ require (
|
||||
github.com/bradleyfalzon/tlsx v0.0.0-20170624122154-28fd0e59bac4 // indirect
|
||||
github.com/chai2010/gettext-go v0.0.0-20160711120539-c6fed771bfd5 // indirect
|
||||
github.com/chanced/dynamic v0.0.0-20211210164248-f8fadb1d735b // indirect
|
||||
github.com/cilium/ebpf v0.8.0 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/evanphx/json-patch v5.6.0+incompatible // indirect
|
||||
github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f // indirect
|
||||
|
||||
@@ -137,6 +137,8 @@ github.com/chanced/openapi v0.0.7/go.mod h1:SxE2VMLPw+T7Vq8nwbVVhDF2PigvRF4n5Xyq
|
||||
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
|
||||
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
|
||||
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
|
||||
github.com/cilium/ebpf v0.8.0 h1:2V6KSg3FRADVU2BMIRemZ0hV+9OM+aAHhZDjQyjJTAs=
|
||||
github.com/cilium/ebpf v0.8.0/go.mod h1:f5zLIM0FSNuAkSyLAN7X+Hy6yznlF1mNiWUMfxMtrgk=
|
||||
github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag=
|
||||
github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I=
|
||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||
@@ -210,8 +212,9 @@ github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYF
|
||||
github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
|
||||
github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k=
|
||||
github.com/form3tech-oss/jwt-go v3.2.3+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k=
|
||||
github.com/frankban/quicktest v1.11.3 h1:8sXhOn0uLys67V8EsXLc6eszDs8VXWxL3iRvebPhedY=
|
||||
github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k=
|
||||
github.com/frankban/quicktest v1.14.0 h1:+cqqvzZV87b4adx/5ayVOaYZ2CrvM4ejQvUdBzPPUss=
|
||||
github.com/frankban/quicktest v1.14.0/go.mod h1:NeW+ay9A/U67EYXNFA1nPE8e/tnQv/09mUdL/ijj8og=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
|
||||
github.com/fsnotify/fsnotify v1.5.1 h1:mZcQUHVQUQWoPXXtuf9yuEXKudkV2sx1E06UadKWpgI=
|
||||
@@ -1158,6 +1161,7 @@ golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBc
|
||||
golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210831042530-f4d43177bf5e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210906170528-6f6e22806c34/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
|
||||
@@ -110,20 +110,31 @@ func websocketHandler(w http.ResponseWriter, r *http.Request, eventHandlers Even
|
||||
logger.Log.Error(err)
|
||||
}
|
||||
|
||||
out:
|
||||
for {
|
||||
_, msg, err := ws.ReadMessage()
|
||||
if err != nil {
|
||||
if _, ok := err.(*websocket.CloseError); ok {
|
||||
logger.Log.Debugf("Received websocket close message, socket id: %d", socketId)
|
||||
} else {
|
||||
logger.Log.Errorf("Error reading message, socket id: %d, error: %v", socketId, err)
|
||||
}
|
||||
// params[0]: query
|
||||
// params[1]: enableFullEntries (empty: disable, non-empty: enable)
|
||||
params := make([][]byte, 2)
|
||||
for i := range params {
|
||||
_, params[i], err = ws.ReadMessage()
|
||||
if err != nil {
|
||||
if _, ok := err.(*websocket.CloseError); ok {
|
||||
logger.Log.Debugf("Received websocket close message, socket id: %d", socketId)
|
||||
} else {
|
||||
logger.Log.Errorf("Error reading message, socket id: %d, error: %v", socketId, err)
|
||||
}
|
||||
|
||||
break
|
||||
break out
|
||||
}
|
||||
}
|
||||
|
||||
enableFullEntries := false
|
||||
if len(params[1]) > 0 {
|
||||
enableFullEntries = true
|
||||
}
|
||||
|
||||
if !isTapper && !isQuerySet {
|
||||
query := string(msg)
|
||||
query := string(params[0])
|
||||
err = basenine.Validate(shared.BasenineHost, shared.BaseninePort, query)
|
||||
if err != nil {
|
||||
toastBytes, _ := models.CreateWebsocketToastMessage(&models.ToastMessage{
|
||||
@@ -150,10 +161,15 @@ func websocketHandler(w http.ResponseWriter, r *http.Request, eventHandlers Even
|
||||
var entry *tapApi.Entry
|
||||
err = json.Unmarshal(bytes, &entry)
|
||||
|
||||
base := tapApi.Summarize(entry)
|
||||
var message []byte
|
||||
if enableFullEntries {
|
||||
message, _ = models.CreateFullEntryWebSocketMessage(entry)
|
||||
} else {
|
||||
base := tapApi.Summarize(entry)
|
||||
message, _ = models.CreateBaseEntryWebSocketMessage(base)
|
||||
}
|
||||
|
||||
baseEntryBytes, _ := models.CreateBaseEntryWebSocketMessage(base)
|
||||
if err := SendToSocket(socketId, baseEntryBytes); err != nil {
|
||||
if err := SendToSocket(socketId, message); err != nil {
|
||||
logger.Log.Error(err)
|
||||
}
|
||||
}
|
||||
@@ -185,7 +201,7 @@ func websocketHandler(w http.ResponseWriter, r *http.Request, eventHandlers Even
|
||||
|
||||
connection.Query(query, data, meta)
|
||||
} else {
|
||||
eventHandlers.WebSocketMessage(socketId, msg)
|
||||
eventHandlers.WebSocketMessage(socketId, params[0])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -57,7 +57,7 @@ func PostTapConfig(c *gin.Context) {
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
|
||||
if _, err := startMizuTapperSyncer(ctx, kubernetesProvider, tappedNamespaces, *podRegex, []string{}, tapApi.TrafficFilteringOptions{}, false); err != nil {
|
||||
if _, err := startMizuTapperSyncer(ctx, kubernetesProvider, tappedNamespaces, *podRegex, []string{}, tapApi.TrafficFilteringOptions{}, false, false); err != nil {
|
||||
c.JSON(http.StatusInternalServerError, err)
|
||||
cancel()
|
||||
return
|
||||
@@ -100,7 +100,7 @@ func GetTapConfig(c *gin.Context) {
|
||||
c.JSON(http.StatusOK, tapConfigToReturn)
|
||||
}
|
||||
|
||||
func startMizuTapperSyncer(ctx context.Context, provider *kubernetes.Provider, targetNamespaces []string, podFilterRegex regexp.Regexp, ignoredUserAgents []string, mizuApiFilteringOptions tapApi.TrafficFilteringOptions, serviceMesh bool) (*kubernetes.MizuTapperSyncer, error) {
|
||||
func startMizuTapperSyncer(ctx context.Context, provider *kubernetes.Provider, targetNamespaces []string, podFilterRegex regexp.Regexp, ignoredUserAgents []string, mizuApiFilteringOptions tapApi.TrafficFilteringOptions, serviceMesh bool, tls bool) (*kubernetes.MizuTapperSyncer, error) {
|
||||
tapperSyncer, err := kubernetes.CreateAndStartMizuTapperSyncer(ctx, provider, kubernetes.TapperSyncerConfig{
|
||||
TargetNamespaces: targetNamespaces,
|
||||
PodFilterRegex: podFilterRegex,
|
||||
@@ -113,6 +113,7 @@ func startMizuTapperSyncer(ctx context.Context, provider *kubernetes.Provider, t
|
||||
MizuApiFilteringOptions: mizuApiFilteringOptions,
|
||||
MizuServiceAccountExists: true, //assume service account exists since install mode will not function without it anyway
|
||||
ServiceMesh: serviceMesh,
|
||||
Tls: tls,
|
||||
}, time.Now())
|
||||
|
||||
if err != nil {
|
||||
|
||||
@@ -102,13 +102,13 @@ func (s *ServiceMapControllerSuite) TestGet() {
|
||||
// response nodes
|
||||
aNode := servicemap.ServiceMapNode{
|
||||
Id: 1,
|
||||
Name: TCPEntryA.IP,
|
||||
Name: TCPEntryA.Name,
|
||||
Entry: TCPEntryA,
|
||||
Count: 1,
|
||||
}
|
||||
bNode := servicemap.ServiceMapNode{
|
||||
Id: 2,
|
||||
Name: TCPEntryB.IP,
|
||||
Name: TCPEntryB.Name,
|
||||
Entry: TCPEntryB,
|
||||
Count: 1,
|
||||
}
|
||||
|
||||
@@ -42,6 +42,11 @@ type WebSocketEntryMessage struct {
|
||||
Data *tapApi.BaseEntry `json:"data,omitempty"`
|
||||
}
|
||||
|
||||
type WebSocketFullEntryMessage struct {
|
||||
*shared.WebSocketMessageMetadata
|
||||
Data *tapApi.Entry `json:"data,omitempty"`
|
||||
}
|
||||
|
||||
type WebSocketTappedEntryMessage struct {
|
||||
*shared.WebSocketMessageMetadata
|
||||
Data *tapApi.OutputChannelItem
|
||||
@@ -88,6 +93,16 @@ func CreateBaseEntryWebSocketMessage(base *tapApi.BaseEntry) ([]byte, error) {
|
||||
return json.Marshal(message)
|
||||
}
|
||||
|
||||
func CreateFullEntryWebSocketMessage(entry *tapApi.Entry) ([]byte, error) {
|
||||
message := &WebSocketFullEntryMessage{
|
||||
WebSocketMessageMetadata: &shared.WebSocketMessageMetadata{
|
||||
MessageType: shared.WebSocketMessageTypeFullEntry,
|
||||
},
|
||||
Data: entry,
|
||||
}
|
||||
return json.Marshal(message)
|
||||
}
|
||||
|
||||
func CreateWebsocketTappedEntryMessage(base *tapApi.OutputChannelItem) ([]byte, error) {
|
||||
message := &WebSocketTappedEntryMessage{
|
||||
WebSocketMessageMetadata: &shared.WebSocketMessageMetadata{
|
||||
|
||||
@@ -172,20 +172,33 @@ func (s *serviceMap) NewTCPEntry(src *tapApi.TCP, dst *tapApi.TCP, p *tapApi.Pro
|
||||
return
|
||||
}
|
||||
|
||||
srcEntry := &entryData{
|
||||
key: key(src.IP),
|
||||
entry: src,
|
||||
}
|
||||
if len(srcEntry.entry.Name) == 0 {
|
||||
var srcEntry *entryData
|
||||
var dstEntry *entryData
|
||||
|
||||
if len(src.Name) == 0 {
|
||||
srcEntry = &entryData{
|
||||
key: key(src.IP),
|
||||
entry: src,
|
||||
}
|
||||
srcEntry.entry.Name = UnresolvedNodeName
|
||||
} else {
|
||||
srcEntry = &entryData{
|
||||
key: key(src.Name),
|
||||
entry: src,
|
||||
}
|
||||
}
|
||||
|
||||
dstEntry := &entryData{
|
||||
key: key(dst.IP),
|
||||
entry: dst,
|
||||
}
|
||||
if len(dstEntry.entry.Name) == 0 {
|
||||
if len(dst.Name) == 0 {
|
||||
dstEntry = &entryData{
|
||||
key: key(dst.IP),
|
||||
entry: dst,
|
||||
}
|
||||
dstEntry.entry.Name = UnresolvedNodeName
|
||||
} else {
|
||||
dstEntry = &entryData{
|
||||
key: key(dst.Name),
|
||||
entry: dst,
|
||||
}
|
||||
}
|
||||
|
||||
s.addEdge(srcEntry, dstEntry, p)
|
||||
|
||||
@@ -268,9 +268,14 @@ func (s *ServiceMapEnabledSuite) TestServiceMap() {
|
||||
assert.LessOrEqual(node.Id, expectedNodeCount)
|
||||
|
||||
// entry
|
||||
// node.Name is the key of the node, key = entry.IP
|
||||
// node.Name is the key of the node, key = entry.Name by default
|
||||
// entry.Name is the name of the service and could be unresolved
|
||||
assert.Equal(node.Name, node.Entry.IP)
|
||||
// when entry.Name is unresolved, key = entry.IP
|
||||
if node.Entry.Name == UnresolvedNodeName {
|
||||
assert.Equal(node.Name, node.Entry.IP)
|
||||
} else {
|
||||
assert.Equal(node.Name, node.Entry.Name)
|
||||
}
|
||||
assert.Equal(Port, node.Entry.Port)
|
||||
assert.Equal(entryName, node.Entry.Name)
|
||||
|
||||
@@ -320,16 +325,24 @@ func (s *ServiceMapEnabledSuite) TestServiceMap() {
|
||||
cdEdge := -1
|
||||
acEdge := -1
|
||||
var validateEdge = func(edge ServiceMapEdge, sourceEntryName string, destEntryName string, protocolName string, protocolCount int) {
|
||||
// source
|
||||
// source node
|
||||
assert.Contains(nodeIds, edge.Source.Id)
|
||||
assert.LessOrEqual(edge.Source.Id, expectedNodeCount)
|
||||
assert.Equal(edge.Source.Name, edge.Source.Entry.IP)
|
||||
if edge.Source.Entry.Name == UnresolvedNodeName {
|
||||
assert.Equal(edge.Source.Name, edge.Source.Entry.IP)
|
||||
} else {
|
||||
assert.Equal(edge.Source.Name, edge.Source.Entry.Name)
|
||||
}
|
||||
assert.Equal(sourceEntryName, edge.Source.Entry.Name)
|
||||
|
||||
// destination
|
||||
// destination node
|
||||
assert.Contains(nodeIds, edge.Destination.Id)
|
||||
assert.LessOrEqual(edge.Destination.Id, expectedNodeCount)
|
||||
assert.Equal(edge.Destination.Name, edge.Destination.Entry.IP)
|
||||
if edge.Destination.Entry.Name == UnresolvedNodeName {
|
||||
assert.Equal(edge.Destination.Name, edge.Destination.Entry.IP)
|
||||
} else {
|
||||
assert.Equal(edge.Destination.Name, edge.Destination.Entry.Name)
|
||||
}
|
||||
assert.Equal(destEntryName, edge.Destination.Entry.Name)
|
||||
|
||||
// protocol
|
||||
|
||||
30
cli/Makefile
30
cli/Makefile
@@ -1,3 +1,9 @@
|
||||
SHELL=/bin/bash
|
||||
|
||||
.PHONY: help
|
||||
.DEFAULT_GOAL := help
|
||||
.ONESHELL:
|
||||
|
||||
SUFFIX=$(GOOS)_$(GOARCH)
|
||||
COMMIT_HASH=$(shell git rev-parse HEAD)
|
||||
GIT_BRANCH=$(shell git branch --show-current | tr '[:upper:]' '[:lower:]')
|
||||
@@ -5,9 +11,6 @@ GIT_VERSION=$(shell git branch --show-current | tr '[:upper:]' '[:lower:]')
|
||||
BUILD_TIMESTAMP=$(shell date +%s)
|
||||
export VER?=0.0
|
||||
|
||||
.PHONY: help
|
||||
.DEFAULT_GOAL := help
|
||||
|
||||
help: ## This help.
|
||||
@awk 'BEGIN {FS = ":.*?## "} /^[a-zA-Z_-]+:.*?## / {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}' $(MAKEFILE_LIST)
|
||||
|
||||
@@ -16,15 +19,20 @@ install:
|
||||
|
||||
build-debug: ## Build mizu CLI for debug
|
||||
export GCLFAGS='-gcflags="all=-N -l"'
|
||||
${MAKE} build
|
||||
${MAKE} build-base
|
||||
|
||||
build: ## Build mizu CLI binary (select platform via GOOS / GOARCH env variables).
|
||||
go build ${GCLFAGS} -ldflags="-X 'github.com/up9inc/mizu/cli/mizu.GitCommitHash=$(COMMIT_HASH)' \
|
||||
-X 'github.com/up9inc/mizu/cli/mizu.Branch=$(GIT_BRANCH)' \
|
||||
-X 'github.com/up9inc/mizu/cli/mizu.BuildTimestamp=$(BUILD_TIMESTAMP)' \
|
||||
-X 'github.com/up9inc/mizu/cli/mizu.Platform=$(SUFFIX)' \
|
||||
-X 'github.com/up9inc/mizu/cli/mizu.Ver=$(VER)'" \
|
||||
-o bin/mizu_$(SUFFIX) mizu.go
|
||||
build:
|
||||
export LDFLAGS_EXT='-s -w'
|
||||
${MAKE} build-base
|
||||
|
||||
build-base: ## Build mizu CLI binary (select platform via GOOS / GOARCH env variables).
|
||||
go build ${GCLFAGS} -ldflags="${LDFLAGS_EXT} \
|
||||
-X 'github.com/up9inc/mizu/cli/mizu.GitCommitHash=$(COMMIT_HASH)' \
|
||||
-X 'github.com/up9inc/mizu/cli/mizu.Branch=$(GIT_BRANCH)' \
|
||||
-X 'github.com/up9inc/mizu/cli/mizu.BuildTimestamp=$(BUILD_TIMESTAMP)' \
|
||||
-X 'github.com/up9inc/mizu/cli/mizu.Platform=$(SUFFIX)' \
|
||||
-X 'github.com/up9inc/mizu/cli/mizu.Ver=$(VER)'" \
|
||||
-o bin/mizu_$(SUFFIX) mizu.go
|
||||
(cd bin && shasum -a 256 mizu_${SUFFIX} > mizu_${SUFFIX}.sha256)
|
||||
|
||||
build-all: ## Build for all supported platforms.
|
||||
|
||||
@@ -4,9 +4,11 @@ import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/up9inc/mizu/shared/kubernetes"
|
||||
@@ -57,10 +59,8 @@ func (provider *Provider) TestConnection() error {
|
||||
|
||||
func (provider *Provider) isReachable() (bool, error) {
|
||||
echoUrl := fmt.Sprintf("%s/echo", provider.url)
|
||||
if response, err := provider.client.Get(echoUrl); err != nil {
|
||||
if _, err := provider.get(echoUrl); err != nil {
|
||||
return false, err
|
||||
} else if response.StatusCode != 200 {
|
||||
return false, fmt.Errorf("invalid status code %v", response.StatusCode)
|
||||
} else {
|
||||
return true, nil
|
||||
}
|
||||
@@ -72,10 +72,8 @@ func (provider *Provider) ReportTapperStatus(tapperStatus shared.TapperStatus) e
|
||||
if jsonValue, err := json.Marshal(tapperStatus); err != nil {
|
||||
return fmt.Errorf("failed Marshal the tapper status %w", err)
|
||||
} else {
|
||||
if response, err := provider.client.Post(tapperStatusUrl, "application/json", bytes.NewBuffer(jsonValue)); err != nil {
|
||||
if _, err := provider.post(tapperStatusUrl, "application/json", bytes.NewBuffer(jsonValue)); err != nil {
|
||||
return fmt.Errorf("failed sending to API server the tapped pods %w", err)
|
||||
} else if response.StatusCode != 200 {
|
||||
return fmt.Errorf("failed sending to API server the tapper status, response status code %v", response.StatusCode)
|
||||
} else {
|
||||
logger.Log.Debugf("Reported to server API about tapper status: %v", tapperStatus)
|
||||
return nil
|
||||
@@ -91,10 +89,8 @@ func (provider *Provider) ReportTappedPods(pods []core.Pod) error {
|
||||
if jsonValue, err := json.Marshal(podInfos); err != nil {
|
||||
return fmt.Errorf("failed Marshal the tapped pods %w", err)
|
||||
} else {
|
||||
if response, err := provider.client.Post(tappedPodsUrl, "application/json", bytes.NewBuffer(jsonValue)); err != nil {
|
||||
if _, err := provider.post(tappedPodsUrl, "application/json", bytes.NewBuffer(jsonValue)); err != nil {
|
||||
return fmt.Errorf("failed sending to API server the tapped pods %w", err)
|
||||
} else if response.StatusCode != 200 {
|
||||
return fmt.Errorf("failed sending to API server the tapped pods, response status code %v", response.StatusCode)
|
||||
} else {
|
||||
logger.Log.Debugf("Reported to server API about %d taped pods successfully", len(podInfos))
|
||||
return nil
|
||||
@@ -105,11 +101,9 @@ func (provider *Provider) ReportTappedPods(pods []core.Pod) error {
|
||||
func (provider *Provider) GetGeneralStats() (map[string]interface{}, error) {
|
||||
generalStatsUrl := fmt.Sprintf("%s/status/general", provider.url)
|
||||
|
||||
response, requestErr := provider.client.Get(generalStatsUrl)
|
||||
response, requestErr := provider.get(generalStatsUrl)
|
||||
if requestErr != nil {
|
||||
return nil, fmt.Errorf("failed to get general stats for telemetry, err: %w", requestErr)
|
||||
} else if response.StatusCode != 200 {
|
||||
return nil, fmt.Errorf("failed to get general stats for telemetry, status code: %v", response.StatusCode)
|
||||
}
|
||||
|
||||
defer response.Body.Close()
|
||||
@@ -132,7 +126,7 @@ func (provider *Provider) GetVersion() (string, error) {
|
||||
Method: http.MethodGet,
|
||||
URL: versionUrl,
|
||||
}
|
||||
statusResp, err := provider.client.Do(req)
|
||||
statusResp, err := provider.do(req)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
@@ -145,3 +139,40 @@ func (provider *Provider) GetVersion() (string, error) {
|
||||
|
||||
return versionResponse.Ver, nil
|
||||
}
|
||||
|
||||
// When err is nil, resp always contains a non-nil resp.Body.
|
||||
// Caller should close resp.Body when done reading from it.
|
||||
func (provider *Provider) get(url string) (*http.Response, error) {
|
||||
return provider.checkError(provider.client.Get(url))
|
||||
}
|
||||
|
||||
// When err is nil, resp always contains a non-nil resp.Body.
|
||||
// Caller should close resp.Body when done reading from it.
|
||||
func (provider *Provider) post(url, contentType string, body io.Reader) (*http.Response, error) {
|
||||
return provider.checkError(provider.client.Post(url, contentType, body))
|
||||
}
|
||||
|
||||
// When err is nil, resp always contains a non-nil resp.Body.
|
||||
// Caller should close resp.Body when done reading from it.
|
||||
func (provider *Provider) do(req *http.Request) (*http.Response, error) {
|
||||
return provider.checkError(provider.client.Do(req))
|
||||
}
|
||||
|
||||
func (provider *Provider) checkError(response *http.Response, errInOperation error) (*http.Response, error) {
|
||||
if (errInOperation != nil) {
|
||||
return response, errInOperation
|
||||
// Check only if status != 200 (and not status >= 300). Agent APIs return only 200 on success.
|
||||
} else if response.StatusCode != http.StatusOK {
|
||||
body, err := ioutil.ReadAll(response.Body)
|
||||
response.Body.Close()
|
||||
response.Body = io.NopCloser(bytes.NewBuffer(body)) // rewind
|
||||
if err != nil {
|
||||
return response, err
|
||||
}
|
||||
|
||||
errorMsg := strings.ReplaceAll((string(body)), "\n", ";")
|
||||
return response, fmt.Errorf("got response with status code: %d, body: %s", response.StatusCode, errorMsg)
|
||||
}
|
||||
|
||||
return response, nil
|
||||
}
|
||||
|
||||
@@ -1,10 +1,9 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/up9inc/mizu/cli/config"
|
||||
"github.com/up9inc/mizu/cli/telemetry"
|
||||
"github.com/up9inc/mizu/shared/logger"
|
||||
)
|
||||
|
||||
var installCmd = &cobra.Command{
|
||||
@@ -12,13 +11,13 @@ var installCmd = &cobra.Command{
|
||||
Short: "Installs mizu components",
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
go telemetry.ReportRun("install", nil)
|
||||
runMizuInstall()
|
||||
return nil
|
||||
},
|
||||
PreRunE: func(cmd *cobra.Command, args []string) error {
|
||||
if config.Config.IsNsRestrictedMode() {
|
||||
return fmt.Errorf("install is not supported in restricted namespace mode")
|
||||
}
|
||||
logger.Log.Infof("This command has been deprecated, please use helm as described below.\n\n")
|
||||
|
||||
logger.Log.Infof("To install stable build of Mizu on your cluster using helm, run the following command:")
|
||||
logger.Log.Infof(" helm install mizu https://static.up9.com/mizu/helm --namespace=mizu --create-namespace\n\n")
|
||||
|
||||
logger.Log.Infof("To install development build of Mizu on your cluster using helm, run the following command:")
|
||||
logger.Log.Infof(" helm install mizu https://static.up9.com/mizu/helm-develop --namespace=mizu --create-namespace")
|
||||
|
||||
return nil
|
||||
},
|
||||
@@ -27,4 +26,3 @@ var installCmd = &cobra.Command{
|
||||
func init() {
|
||||
rootCmd.AddCommand(installCmd)
|
||||
}
|
||||
|
||||
|
||||
@@ -1,81 +0,0 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/creasty/defaults"
|
||||
"github.com/up9inc/mizu/cli/config"
|
||||
"github.com/up9inc/mizu/cli/errormessage"
|
||||
"github.com/up9inc/mizu/cli/resources"
|
||||
"github.com/up9inc/mizu/cli/uiUtils"
|
||||
"github.com/up9inc/mizu/shared"
|
||||
"github.com/up9inc/mizu/shared/logger"
|
||||
k8serrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
func runMizuInstall() {
|
||||
kubernetesProvider, err := getKubernetesProviderForCli()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel() // cancel will be called when this function exits
|
||||
|
||||
var serializedValidationRules string
|
||||
var serializedContract string
|
||||
|
||||
var defaultMaxEntriesDBSizeBytes int64 = 200 * 1000 * 1000
|
||||
|
||||
defaultResources := shared.Resources{}
|
||||
if err := defaults.Set(&defaultResources); err != nil {
|
||||
logger.Log.Debug(err)
|
||||
}
|
||||
|
||||
mizuAgentConfig := getInstallMizuAgentConfig(defaultMaxEntriesDBSizeBytes, defaultResources)
|
||||
serializedMizuConfig, err := getSerializedMizuAgentConfig(mizuAgentConfig)
|
||||
if err != nil {
|
||||
logger.Log.Errorf(uiUtils.Error, fmt.Sprintf("Error serializing mizu config: %v", errormessage.FormatError(err)))
|
||||
return
|
||||
}
|
||||
|
||||
if err = resources.CreateInstallMizuResources(ctx, kubernetesProvider, serializedValidationRules,
|
||||
serializedContract, serializedMizuConfig, config.Config.IsNsRestrictedMode(),
|
||||
config.Config.MizuResourcesNamespace, config.Config.AgentImage,
|
||||
config.Config.KratosImage, config.Config.KetoImage,
|
||||
nil, defaultMaxEntriesDBSizeBytes, defaultResources, config.Config.ImagePullPolicy(),
|
||||
config.Config.LogLevel(), false); err != nil {
|
||||
var statusError *k8serrors.StatusError
|
||||
if errors.As(err, &statusError) && (statusError.ErrStatus.Reason == metav1.StatusReasonAlreadyExists) {
|
||||
logger.Log.Info("Mizu is already running in this namespace, run `mizu clean` to remove the currently running Mizu instance")
|
||||
} else {
|
||||
defer resources.CleanUpMizuResources(ctx, cancel, kubernetesProvider, config.Config.IsNsRestrictedMode(), config.Config.MizuResourcesNamespace)
|
||||
logger.Log.Errorf(uiUtils.Error, fmt.Sprintf("Error creating resources: %v", errormessage.FormatError(err)))
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
logger.Log.Infof(uiUtils.Magenta, "Installation completed, run `mizu view` to connect to the mizu daemon instance")
|
||||
}
|
||||
|
||||
func getInstallMizuAgentConfig(maxDBSizeBytes int64, tapperResources shared.Resources) *shared.MizuAgentConfig {
|
||||
mizuAgentConfig := shared.MizuAgentConfig{
|
||||
MaxDBSizeBytes: maxDBSizeBytes,
|
||||
AgentImage: config.Config.AgentImage,
|
||||
PullPolicy: config.Config.ImagePullPolicyStr,
|
||||
LogLevel: config.Config.LogLevel(),
|
||||
TapperResources: tapperResources,
|
||||
MizuResourcesNamespace: config.Config.MizuResourcesNamespace,
|
||||
AgentDatabasePath: shared.DataDirPath,
|
||||
StandaloneMode: true,
|
||||
ServiceMap: config.Config.ServiceMap,
|
||||
OAS: config.Config.OAS,
|
||||
Elastic: config.Config.Elastic,
|
||||
}
|
||||
|
||||
return &mizuAgentConfig
|
||||
}
|
||||
@@ -120,4 +120,5 @@ func init() {
|
||||
tapCmd.Flags().String(configStructs.EnforcePolicyFile, defaultTapConfig.EnforcePolicyFile, "Yaml file path with policy rules")
|
||||
tapCmd.Flags().String(configStructs.ContractFile, defaultTapConfig.ContractFile, "OAS/Swagger file to validate to monitor the contracts")
|
||||
tapCmd.Flags().Bool(configStructs.ServiceMeshName, defaultTapConfig.ServiceMesh, "Record decrypted traffic if the cluster is configured with a service mesh and with mtls")
|
||||
tapCmd.Flags().Bool(configStructs.TlsName, defaultTapConfig.Tls, "Record tls traffic")
|
||||
}
|
||||
|
||||
@@ -162,6 +162,7 @@ func getTapMizuAgentConfig() *shared.MizuAgentConfig {
|
||||
AgentDatabasePath: shared.DataDirPath,
|
||||
ServiceMap: config.Config.ServiceMap,
|
||||
OAS: config.Config.OAS,
|
||||
Telemetry: config.Config.Telemetry,
|
||||
Elastic: config.Config.Elastic,
|
||||
}
|
||||
|
||||
@@ -200,6 +201,7 @@ func startTapperSyncer(ctx context.Context, cancel context.CancelFunc, provider
|
||||
MizuApiFilteringOptions: mizuApiFilteringOptions,
|
||||
MizuServiceAccountExists: state.mizuServiceAccountExists,
|
||||
ServiceMesh: config.Config.Tap.ServiceMesh,
|
||||
Tls: config.Config.Tap.Tls,
|
||||
}, startTime)
|
||||
|
||||
if err != nil {
|
||||
|
||||
@@ -3,13 +3,13 @@ package cmd
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/up9inc/mizu/cli/utils"
|
||||
"net/http"
|
||||
|
||||
"github.com/up9inc/mizu/cli/utils"
|
||||
|
||||
"github.com/up9inc/mizu/cli/apiserver"
|
||||
"github.com/up9inc/mizu/cli/config"
|
||||
"github.com/up9inc/mizu/cli/mizu/fsUtils"
|
||||
"github.com/up9inc/mizu/cli/mizu/version"
|
||||
"github.com/up9inc/mizu/cli/uiUtils"
|
||||
"github.com/up9inc/mizu/shared/kubernetes"
|
||||
"github.com/up9inc/mizu/shared/logger"
|
||||
@@ -62,14 +62,5 @@ func runMizuView() {
|
||||
uiUtils.OpenBrowser(url)
|
||||
}
|
||||
|
||||
if isCompatible, err := version.CheckVersionCompatibility(apiServerProvider); err != nil {
|
||||
logger.Log.Errorf("Failed to check versions compatibility %v", err)
|
||||
cancel()
|
||||
return
|
||||
} else if !isCompatible {
|
||||
cancel()
|
||||
return
|
||||
}
|
||||
|
||||
utils.WaitForFinish(ctx, cancel)
|
||||
}
|
||||
|
||||
@@ -38,7 +38,7 @@ type ConfigStruct struct {
|
||||
ConfigFilePath string `yaml:"config-path,omitempty" readonly:""`
|
||||
HeadlessMode bool `yaml:"headless" default:"false"`
|
||||
LogLevelStr string `yaml:"log-level,omitempty" default:"INFO" readonly:""`
|
||||
ServiceMap bool `yaml:"service-map" default:"false"`
|
||||
ServiceMap bool `yaml:"service-map" default:"true"`
|
||||
OAS bool `yaml:"oas,omitempty" default:"false" readonly:""`
|
||||
Elastic shared.ElasticConfig `yaml:"elastic"`
|
||||
}
|
||||
|
||||
@@ -23,6 +23,7 @@ const (
|
||||
EnforcePolicyFile = "traffic-validation-file"
|
||||
ContractFile = "contract"
|
||||
ServiceMeshName = "service-mesh"
|
||||
TlsName = "tls"
|
||||
)
|
||||
|
||||
type TapConfig struct {
|
||||
@@ -45,6 +46,7 @@ type TapConfig struct {
|
||||
ApiServerResources shared.Resources `yaml:"api-server-resources"`
|
||||
TapperResources shared.Resources `yaml:"tapper-resources"`
|
||||
ServiceMesh bool `yaml:"service-mesh" default:"false"`
|
||||
Tls bool `yaml:"tls" default:"false"`
|
||||
}
|
||||
|
||||
func (config *TapConfig) PodRegex() *regexp.Regexp {
|
||||
|
||||
@@ -46,6 +46,7 @@ type TapperSyncerConfig struct {
|
||||
MizuApiFilteringOptions api.TrafficFilteringOptions
|
||||
MizuServiceAccountExists bool
|
||||
ServiceMesh bool
|
||||
Tls bool
|
||||
}
|
||||
|
||||
func CreateAndStartMizuTapperSyncer(ctx context.Context, kubernetesProvider *Provider, config TapperSyncerConfig, startTime time.Time) (*MizuTapperSyncer, error) {
|
||||
@@ -324,6 +325,7 @@ func (tapperSyncer *MizuTapperSyncer) updateMizuTappers() error {
|
||||
tapperSyncer.config.MizuApiFilteringOptions,
|
||||
tapperSyncer.config.LogLevel,
|
||||
tapperSyncer.config.ServiceMesh,
|
||||
tapperSyncer.config.Tls,
|
||||
); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -51,6 +51,8 @@ const (
|
||||
fieldManagerName = "mizu-manager"
|
||||
procfsVolumeName = "proc"
|
||||
procfsMountPath = "/hostproc"
|
||||
sysfsVolumeName = "sys"
|
||||
sysfsMountPath = "/sys"
|
||||
)
|
||||
|
||||
func NewProvider(kubeConfigPath string) (*Provider, error) {
|
||||
@@ -795,7 +797,7 @@ func (provider *Provider) CreateConfigMap(ctx context.Context, namespace string,
|
||||
return nil
|
||||
}
|
||||
|
||||
func (provider *Provider) ApplyMizuTapperDaemonSet(ctx context.Context, namespace string, daemonSetName string, podImage string, tapperPodName string, apiServerPodIp string, nodeToTappedPodMap map[string][]core.Pod, serviceAccountName string, resources shared.Resources, imagePullPolicy core.PullPolicy, mizuApiFilteringOptions api.TrafficFilteringOptions, logLevel logging.Level, serviceMesh bool) error {
|
||||
func (provider *Provider) ApplyMizuTapperDaemonSet(ctx context.Context, namespace string, daemonSetName string, podImage string, tapperPodName string, apiServerPodIp string, nodeToTappedPodMap map[string][]core.Pod, serviceAccountName string, resources shared.Resources, imagePullPolicy core.PullPolicy, mizuApiFilteringOptions api.TrafficFilteringOptions, logLevel logging.Level, serviceMesh bool, tls bool) error {
|
||||
logger.Log.Debugf("Applying %d tapper daemon sets, ns: %s, daemonSetName: %s, podImage: %s, tapperPodName: %s", len(nodeToTappedPodMap), namespace, daemonSetName, podImage, tapperPodName)
|
||||
|
||||
if len(nodeToTappedPodMap) == 0 {
|
||||
@@ -821,7 +823,15 @@ func (provider *Provider) ApplyMizuTapperDaemonSet(ctx context.Context, namespac
|
||||
}
|
||||
|
||||
if serviceMesh {
|
||||
mizuCmd = append(mizuCmd, "--procfs", procfsMountPath, "--servicemesh")
|
||||
mizuCmd = append(mizuCmd, "--servicemesh")
|
||||
}
|
||||
|
||||
if tls {
|
||||
mizuCmd = append(mizuCmd, "--tls")
|
||||
}
|
||||
|
||||
if serviceMesh || tls {
|
||||
mizuCmd = append(mizuCmd, "--procfs", procfsMountPath)
|
||||
}
|
||||
|
||||
agentContainer := applyconfcore.Container()
|
||||
@@ -829,12 +839,21 @@ func (provider *Provider) ApplyMizuTapperDaemonSet(ctx context.Context, namespac
|
||||
agentContainer.WithImage(podImage)
|
||||
agentContainer.WithImagePullPolicy(imagePullPolicy)
|
||||
|
||||
caps := applyconfcore.Capabilities().WithDrop("ALL").WithAdd("NET_RAW").WithAdd("NET_ADMIN")
|
||||
caps := applyconfcore.Capabilities().WithDrop("ALL")
|
||||
|
||||
if serviceMesh {
|
||||
caps = caps.WithAdd("SYS_ADMIN") // for reading /proc/PID/net/ns
|
||||
caps = caps.WithAdd("SYS_PTRACE") // for setting netns to other process
|
||||
caps = caps.WithAdd("DAC_OVERRIDE") // for reading /proc/PID/environ
|
||||
caps = caps.WithAdd("NET_RAW").WithAdd("NET_ADMIN") // to listen to traffic using libpcap
|
||||
|
||||
if serviceMesh || tls {
|
||||
caps = caps.WithAdd("SYS_ADMIN") // to read /proc/PID/net/ns + to install eBPF programs (kernel < 5.8)
|
||||
caps = caps.WithAdd("SYS_PTRACE") // to set netns to other process + to open libssl.so of other process
|
||||
|
||||
if serviceMesh {
|
||||
caps = caps.WithAdd("DAC_OVERRIDE") // to read /proc/PID/environ
|
||||
}
|
||||
|
||||
if tls {
|
||||
caps = caps.WithAdd("SYS_RESOURCE") // to change rlimits for eBPF
|
||||
}
|
||||
}
|
||||
|
||||
agentContainer.WithSecurityContext(applyconfcore.SecurityContext().WithCapabilities(caps))
|
||||
@@ -910,8 +929,15 @@ func (provider *Provider) ApplyMizuTapperDaemonSet(ctx context.Context, namespac
|
||||
//
|
||||
procfsVolume := applyconfcore.Volume()
|
||||
procfsVolume.WithName(procfsVolumeName).WithHostPath(applyconfcore.HostPathVolumeSource().WithPath("/proc"))
|
||||
volumeMount := applyconfcore.VolumeMount().WithName(procfsVolumeName).WithMountPath(procfsMountPath).WithReadOnly(true)
|
||||
agentContainer.WithVolumeMounts(volumeMount)
|
||||
procfsVolumeMount := applyconfcore.VolumeMount().WithName(procfsVolumeName).WithMountPath(procfsMountPath).WithReadOnly(true)
|
||||
agentContainer.WithVolumeMounts(procfsVolumeMount)
|
||||
|
||||
// We need access to /sys in order to install certain eBPF tracepoints
|
||||
//
|
||||
sysfsVolume := applyconfcore.Volume()
|
||||
sysfsVolume.WithName(sysfsVolumeName).WithHostPath(applyconfcore.HostPathVolumeSource().WithPath("/sys"))
|
||||
sysfsVolumeMount := applyconfcore.VolumeMount().WithName(sysfsVolumeName).WithMountPath(sysfsMountPath).WithReadOnly(true)
|
||||
agentContainer.WithVolumeMounts(sysfsVolumeMount)
|
||||
|
||||
volumeName := ConfigMapName
|
||||
configMapVolume := applyconfcore.VolumeApplyConfiguration{
|
||||
@@ -941,7 +967,7 @@ func (provider *Provider) ApplyMizuTapperDaemonSet(ctx context.Context, namespac
|
||||
podSpec.WithContainers(agentContainer)
|
||||
podSpec.WithAffinity(affinity)
|
||||
podSpec.WithTolerations(noExecuteToleration, noScheduleToleration)
|
||||
podSpec.WithVolumes(&configMapVolume, procfsVolume)
|
||||
podSpec.WithVolumes(&configMapVolume, procfsVolume, sysfsVolume)
|
||||
|
||||
podTemplate := applyconfcore.PodTemplateSpec()
|
||||
podTemplate.WithLabels(map[string]string{
|
||||
|
||||
@@ -30,11 +30,24 @@ func getMinimizedPod(fullPod core.Pod) core.Pod {
|
||||
Name: fullPod.Name,
|
||||
},
|
||||
Status: v1.PodStatus{
|
||||
PodIP: fullPod.Status.PodIP,
|
||||
PodIP: fullPod.Status.PodIP,
|
||||
ContainerStatuses: getMinimizedContainerStatuses(fullPod),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func getMinimizedContainerStatuses(fullPod core.Pod) []v1.ContainerStatus {
|
||||
result := make([]v1.ContainerStatus, len(fullPod.Status.ContainerStatuses))
|
||||
|
||||
for i, container := range fullPod.Status.ContainerStatuses {
|
||||
result[i] = v1.ContainerStatus{
|
||||
ContainerID: container.ContainerID,
|
||||
}
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
func excludeMizuPods(pods []core.Pod) []core.Pod {
|
||||
mizuPrefixRegex := regexp.MustCompile("^" + MizuResourcesPrefix)
|
||||
|
||||
|
||||
@@ -15,6 +15,7 @@ type WebSocketMessageType string
|
||||
|
||||
const (
|
||||
WebSocketMessageTypeEntry WebSocketMessageType = "entry"
|
||||
WebSocketMessageTypeFullEntry WebSocketMessageType = "fullEntry"
|
||||
WebSocketMessageTypeTappedEntry WebSocketMessageType = "tappedEntry"
|
||||
WebSocketMessageTypeUpdateStatus WebSocketMessageType = "status"
|
||||
WebSocketMessageTypeAnalyzeStatus WebSocketMessageType = "analyzeStatus"
|
||||
@@ -43,6 +44,7 @@ type MizuAgentConfig struct {
|
||||
StandaloneMode bool `json:"standaloneMode"`
|
||||
ServiceMap bool `json:"serviceMap"`
|
||||
OAS bool `json:"oas"`
|
||||
Telemetry bool `json:"telemetry"`
|
||||
Elastic ElasticConfig `json:"elastic"`
|
||||
}
|
||||
|
||||
|
||||
@@ -107,6 +107,7 @@ type Dissector interface {
|
||||
|
||||
type RequestResponseMatcher interface {
|
||||
GetMap() *sync.Map
|
||||
SetMaxTry(value int)
|
||||
}
|
||||
|
||||
type Emitting struct {
|
||||
|
||||
16
tap/extensions/amqp/Makefile
Normal file
16
tap/extensions/amqp/Makefile
Normal file
@@ -0,0 +1,16 @@
|
||||
skipbin := $$(find bin -mindepth 1 -maxdepth 1)
|
||||
skipexpect := $$(find expect -mindepth 1 -maxdepth 1)
|
||||
|
||||
test: test-pull-bin test-pull-expect
|
||||
@MIZU_TEST=1 go test -v ./... -coverpkg=./... -race -coverprofile=coverage.out -covermode=atomic
|
||||
|
||||
test-update: test-pull-bin
|
||||
@MIZU_TEST=1 TEST_UPDATE=1 go test -v ./... -coverpkg=./... -coverprofile=coverage.out -covermode=atomic
|
||||
|
||||
test-pull-bin:
|
||||
@mkdir -p bin
|
||||
@[ "${skipbin}" ] && echo "Skipping downloading BINs" || gsutil -o 'GSUtil:parallel_process_count=5' -o 'GSUtil:parallel_thread_count=5' -m cp gs://static.up9.io/mizu/test-pcap/bin/amqp/\*.bin bin
|
||||
|
||||
test-pull-expect:
|
||||
@mkdir -p expect
|
||||
@[ "${skipexpect}" ] && echo "Skipping downloading expected JSONs" || gsutil -o 'GSUtil:parallel_process_count=5' -o 'GSUtil:parallel_thread_count=5' -m cp -r gs://static.up9.io/mizu/test-pcap/expect/amqp/\* expect
|
||||
@@ -2,8 +2,16 @@ module github.com/up9inc/mizu/tap/extensions/amqp
|
||||
|
||||
go 1.17
|
||||
|
||||
require github.com/up9inc/mizu/tap/api v0.0.0
|
||||
require (
|
||||
github.com/stretchr/testify v1.7.0
|
||||
github.com/up9inc/mizu/tap/api v0.0.0
|
||||
)
|
||||
|
||||
require github.com/google/martian v2.1.0+incompatible // indirect
|
||||
require (
|
||||
github.com/davecgh/go-spew v1.1.0 // indirect
|
||||
github.com/google/martian v2.1.0+incompatible // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c // indirect
|
||||
)
|
||||
|
||||
replace github.com/up9inc/mizu/tap/api v0.0.0 => ../../api
|
||||
|
||||
@@ -1,2 +1,13 @@
|
||||
github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no=
|
||||
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY=
|
||||
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
|
||||
@@ -3,6 +3,7 @@ package amqp
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"sort"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
@@ -282,6 +283,9 @@ func representBasicPublish(event map[string]interface{}) []interface{} {
|
||||
Selector: fmt.Sprintf(`request.properties.headers["%s"]`, name),
|
||||
})
|
||||
}
|
||||
sort.Slice(headers, func(i, j int) bool {
|
||||
return headers[i].Name < headers[j].Name
|
||||
})
|
||||
headersMarshaled, _ := json.Marshal(headers)
|
||||
rep = append(rep, api.SectionData{
|
||||
Type: api.TABLE,
|
||||
@@ -366,6 +370,9 @@ func representBasicDeliver(event map[string]interface{}) []interface{} {
|
||||
Selector: fmt.Sprintf(`request.properties.headers["%s"]`, name),
|
||||
})
|
||||
}
|
||||
sort.Slice(headers, func(i, j int) bool {
|
||||
return headers[i].Name < headers[j].Name
|
||||
})
|
||||
headersMarshaled, _ := json.Marshal(headers)
|
||||
rep = append(rep, api.SectionData{
|
||||
Type: api.TABLE,
|
||||
@@ -438,6 +445,9 @@ func representQueueDeclare(event map[string]interface{}) []interface{} {
|
||||
Selector: fmt.Sprintf(`request.arguments["%s"]`, name),
|
||||
})
|
||||
}
|
||||
sort.Slice(headers, func(i, j int) bool {
|
||||
return headers[i].Name < headers[j].Name
|
||||
})
|
||||
headersMarshaled, _ := json.Marshal(headers)
|
||||
rep = append(rep, api.SectionData{
|
||||
Type: api.TABLE,
|
||||
@@ -504,6 +514,9 @@ func representExchangeDeclare(event map[string]interface{}) []interface{} {
|
||||
Selector: fmt.Sprintf(`request.arguments["%s"]`, name),
|
||||
})
|
||||
}
|
||||
sort.Slice(headers, func(i, j int) bool {
|
||||
return headers[i].Name < headers[j].Name
|
||||
})
|
||||
headersMarshaled, _ := json.Marshal(headers)
|
||||
rep = append(rep, api.SectionData{
|
||||
Type: api.TABLE,
|
||||
@@ -565,6 +578,9 @@ func representConnectionStart(event map[string]interface{}) []interface{} {
|
||||
Selector: fmt.Sprintf(`request.serverProperties["%s"]`, name),
|
||||
})
|
||||
}
|
||||
sort.Slice(headers, func(i, j int) bool {
|
||||
return headers[i].Name < headers[j].Name
|
||||
})
|
||||
headersMarshaled, _ := json.Marshal(headers)
|
||||
rep = append(rep, api.SectionData{
|
||||
Type: api.TABLE,
|
||||
@@ -656,6 +672,9 @@ func representQueueBind(event map[string]interface{}) []interface{} {
|
||||
Selector: fmt.Sprintf(`request.arguments["%s"]`, name),
|
||||
})
|
||||
}
|
||||
sort.Slice(headers, func(i, j int) bool {
|
||||
return headers[i].Name < headers[j].Name
|
||||
})
|
||||
headersMarshaled, _ := json.Marshal(headers)
|
||||
rep = append(rep, api.SectionData{
|
||||
Type: api.TABLE,
|
||||
@@ -717,6 +736,9 @@ func representBasicConsume(event map[string]interface{}) []interface{} {
|
||||
Selector: fmt.Sprintf(`request.arguments["%s"]`, name),
|
||||
})
|
||||
}
|
||||
sort.Slice(headers, func(i, j int) bool {
|
||||
return headers[i].Name < headers[j].Name
|
||||
})
|
||||
headersMarshaled, _ := json.Marshal(headers)
|
||||
rep = append(rep, api.SectionData{
|
||||
Type: api.TABLE,
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
"io"
|
||||
"log"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/up9inc/mizu/tap/api"
|
||||
)
|
||||
@@ -85,7 +86,7 @@ func (d dissecting) Dissect(b *bufio.Reader, isClient bool, tcpID *api.TcpID, co
|
||||
frame, err := r.ReadFrame()
|
||||
if err == io.EOF {
|
||||
// We must read until we see an EOF... very important!
|
||||
return errors.New("AMQP EOF")
|
||||
return nil
|
||||
}
|
||||
|
||||
switch f := frame.(type) {
|
||||
@@ -96,6 +97,12 @@ func (d dissecting) Dissect(b *bufio.Reader, isClient bool, tcpID *api.TcpID, co
|
||||
// start content state
|
||||
header = f
|
||||
remaining = int(header.Size)
|
||||
|
||||
// Workaround for `Time.MarshalJSON: year outside of range [0,9999]` error
|
||||
if header.Properties.Timestamp.Year() > 9999 {
|
||||
header.Properties.Timestamp = time.Time{}.UTC()
|
||||
}
|
||||
|
||||
switch lastMethodFrameMessage.(type) {
|
||||
case *BasicPublish:
|
||||
eventBasicPublish.Properties = header.Properties
|
||||
|
||||
289
tap/extensions/amqp/main_test.go
Normal file
289
tap/extensions/amqp/main_test.go
Normal file
@@ -0,0 +1,289 @@
|
||||
package amqp
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/up9inc/mizu/tap/api"
|
||||
)
|
||||
|
||||
const (
|
||||
binDir = "bin"
|
||||
patternBin = "*_req.bin"
|
||||
patternDissect = "*.json"
|
||||
msgDissecting = "Dissecting:"
|
||||
msgAnalyzing = "Analyzing:"
|
||||
msgRepresenting = "Representing:"
|
||||
respSuffix = "_res.bin"
|
||||
expectDir = "expect"
|
||||
dissectDir = "dissect"
|
||||
analyzeDir = "analyze"
|
||||
representDir = "represent"
|
||||
testUpdate = "TEST_UPDATE"
|
||||
)
|
||||
|
||||
func TestRegister(t *testing.T) {
|
||||
dissector := NewDissector()
|
||||
extension := &api.Extension{}
|
||||
dissector.Register(extension)
|
||||
assert.Equal(t, "amqp", extension.Protocol.Name)
|
||||
}
|
||||
|
||||
func TestMacros(t *testing.T) {
|
||||
expectedMacros := map[string]string{
|
||||
"amqp": `proto.name == "amqp"`,
|
||||
}
|
||||
dissector := NewDissector()
|
||||
macros := dissector.Macros()
|
||||
assert.Equal(t, expectedMacros, macros)
|
||||
}
|
||||
|
||||
func TestPing(t *testing.T) {
|
||||
dissector := NewDissector()
|
||||
dissector.Ping()
|
||||
}
|
||||
|
||||
func TestDissect(t *testing.T) {
|
||||
_, testUpdateEnabled := os.LookupEnv(testUpdate)
|
||||
|
||||
expectDirDissect := path.Join(expectDir, dissectDir)
|
||||
|
||||
if testUpdateEnabled {
|
||||
os.RemoveAll(expectDirDissect)
|
||||
err := os.MkdirAll(expectDirDissect, 0775)
|
||||
assert.Nil(t, err)
|
||||
}
|
||||
|
||||
dissector := NewDissector()
|
||||
paths, err := filepath.Glob(path.Join(binDir, patternBin))
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
options := &api.TrafficFilteringOptions{
|
||||
IgnoredUserAgents: []string{},
|
||||
}
|
||||
|
||||
for _, _path := range paths {
|
||||
basePath := _path[:len(_path)-8]
|
||||
|
||||
// Channel to verify the output
|
||||
itemChannel := make(chan *api.OutputChannelItem)
|
||||
var emitter api.Emitter = &api.Emitting{
|
||||
AppStats: &api.AppStats{},
|
||||
OutputChannel: itemChannel,
|
||||
}
|
||||
|
||||
var items []*api.OutputChannelItem
|
||||
stop := make(chan bool)
|
||||
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
case <-stop:
|
||||
return
|
||||
case item := <-itemChannel:
|
||||
items = append(items, item)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
// Stream level
|
||||
counterPair := &api.CounterPair{
|
||||
Request: 0,
|
||||
Response: 0,
|
||||
}
|
||||
superIdentifier := &api.SuperIdentifier{}
|
||||
|
||||
// Request
|
||||
pathClient := _path
|
||||
fmt.Printf("%s %s\n", msgDissecting, pathClient)
|
||||
fileClient, err := os.Open(pathClient)
|
||||
assert.Nil(t, err)
|
||||
|
||||
bufferClient := bufio.NewReader(fileClient)
|
||||
tcpIDClient := &api.TcpID{
|
||||
SrcIP: "1",
|
||||
DstIP: "2",
|
||||
SrcPort: "1",
|
||||
DstPort: "2",
|
||||
}
|
||||
reqResMatcher := dissector.NewResponseRequestMatcher()
|
||||
err = dissector.Dissect(bufferClient, true, tcpIDClient, counterPair, &api.SuperTimer{}, superIdentifier, emitter, options, reqResMatcher)
|
||||
if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// Response
|
||||
pathServer := basePath + respSuffix
|
||||
fmt.Printf("%s %s\n", msgDissecting, pathServer)
|
||||
fileServer, err := os.Open(pathServer)
|
||||
assert.Nil(t, err)
|
||||
|
||||
bufferServer := bufio.NewReader(fileServer)
|
||||
tcpIDServer := &api.TcpID{
|
||||
SrcIP: "2",
|
||||
DstIP: "1",
|
||||
SrcPort: "2",
|
||||
DstPort: "1",
|
||||
}
|
||||
err = dissector.Dissect(bufferServer, false, tcpIDServer, counterPair, &api.SuperTimer{}, superIdentifier, emitter, options, reqResMatcher)
|
||||
if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
fileClient.Close()
|
||||
fileServer.Close()
|
||||
|
||||
pathExpect := path.Join(expectDirDissect, fmt.Sprintf("%s.json", basePath[4:]))
|
||||
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
|
||||
stop <- true
|
||||
|
||||
marshaled, err := json.Marshal(items)
|
||||
assert.Nil(t, err)
|
||||
|
||||
if testUpdateEnabled {
|
||||
if len(items) > 0 {
|
||||
err = os.WriteFile(pathExpect, marshaled, 0644)
|
||||
assert.Nil(t, err)
|
||||
}
|
||||
} else {
|
||||
if _, err := os.Stat(pathExpect); errors.Is(err, os.ErrNotExist) {
|
||||
assert.Len(t, items, 0)
|
||||
} else {
|
||||
expectedBytes, err := ioutil.ReadFile(pathExpect)
|
||||
assert.Nil(t, err)
|
||||
|
||||
assert.JSONEq(t, string(expectedBytes), string(marshaled))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestAnalyze(t *testing.T) {
|
||||
_, testUpdateEnabled := os.LookupEnv(testUpdate)
|
||||
|
||||
expectDirDissect := path.Join(expectDir, dissectDir)
|
||||
expectDirAnalyze := path.Join(expectDir, analyzeDir)
|
||||
|
||||
if testUpdateEnabled {
|
||||
os.RemoveAll(expectDirAnalyze)
|
||||
err := os.MkdirAll(expectDirAnalyze, 0775)
|
||||
assert.Nil(t, err)
|
||||
}
|
||||
|
||||
dissector := NewDissector()
|
||||
paths, err := filepath.Glob(path.Join(expectDirDissect, patternDissect))
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
for _, _path := range paths {
|
||||
fmt.Printf("%s %s\n", msgAnalyzing, _path)
|
||||
|
||||
bytes, err := ioutil.ReadFile(_path)
|
||||
assert.Nil(t, err)
|
||||
|
||||
var items []*api.OutputChannelItem
|
||||
err = json.Unmarshal(bytes, &items)
|
||||
assert.Nil(t, err)
|
||||
|
||||
var entries []*api.Entry
|
||||
for _, item := range items {
|
||||
entry := dissector.Analyze(item, "", "", "")
|
||||
entries = append(entries, entry)
|
||||
}
|
||||
|
||||
pathExpect := path.Join(expectDirAnalyze, filepath.Base(_path))
|
||||
|
||||
marshaled, err := json.Marshal(entries)
|
||||
assert.Nil(t, err)
|
||||
|
||||
if testUpdateEnabled {
|
||||
if len(entries) > 0 {
|
||||
err = os.WriteFile(pathExpect, marshaled, 0644)
|
||||
assert.Nil(t, err)
|
||||
}
|
||||
} else {
|
||||
if _, err := os.Stat(pathExpect); errors.Is(err, os.ErrNotExist) {
|
||||
assert.Len(t, items, 0)
|
||||
} else {
|
||||
expectedBytes, err := ioutil.ReadFile(pathExpect)
|
||||
assert.Nil(t, err)
|
||||
|
||||
assert.JSONEq(t, string(expectedBytes), string(marshaled))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestRepresent(t *testing.T) {
|
||||
_, testUpdateEnabled := os.LookupEnv(testUpdate)
|
||||
|
||||
expectDirAnalyze := path.Join(expectDir, analyzeDir)
|
||||
expectDirRepresent := path.Join(expectDir, representDir)
|
||||
|
||||
if testUpdateEnabled {
|
||||
os.RemoveAll(expectDirRepresent)
|
||||
err := os.MkdirAll(expectDirRepresent, 0775)
|
||||
assert.Nil(t, err)
|
||||
}
|
||||
|
||||
dissector := NewDissector()
|
||||
paths, err := filepath.Glob(path.Join(expectDirAnalyze, patternDissect))
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
for _, _path := range paths {
|
||||
fmt.Printf("%s %s\n", msgRepresenting, _path)
|
||||
|
||||
bytes, err := ioutil.ReadFile(_path)
|
||||
assert.Nil(t, err)
|
||||
|
||||
var entries []*api.Entry
|
||||
err = json.Unmarshal(bytes, &entries)
|
||||
assert.Nil(t, err)
|
||||
|
||||
var objects []string
|
||||
for _, entry := range entries {
|
||||
object, _, err := dissector.Represent(entry.Request, entry.Response)
|
||||
assert.Nil(t, err)
|
||||
objects = append(objects, string(object))
|
||||
}
|
||||
|
||||
pathExpect := path.Join(expectDirRepresent, filepath.Base(_path))
|
||||
|
||||
marshaled, err := json.Marshal(objects)
|
||||
assert.Nil(t, err)
|
||||
|
||||
if testUpdateEnabled {
|
||||
if len(objects) > 0 {
|
||||
err = os.WriteFile(pathExpect, marshaled, 0644)
|
||||
assert.Nil(t, err)
|
||||
}
|
||||
} else {
|
||||
if _, err := os.Stat(pathExpect); errors.Is(err, os.ErrNotExist) {
|
||||
assert.Len(t, objects, 0)
|
||||
} else {
|
||||
expectedBytes, err := ioutil.ReadFile(pathExpect)
|
||||
assert.Nil(t, err)
|
||||
|
||||
assert.JSONEq(t, string(expectedBytes), string(marshaled))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -140,7 +140,7 @@ func readTimestamp(r io.Reader) (v time.Time, err error) {
|
||||
if err = binary.Read(r, binary.BigEndian, &sec); err != nil {
|
||||
return
|
||||
}
|
||||
return time.Unix(sec, 0), nil
|
||||
return time.Unix(sec, 0).UTC(), nil
|
||||
}
|
||||
|
||||
/*
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -223,41 +223,8 @@ type Decimal struct {
|
||||
//
|
||||
type Table map[string]interface{}
|
||||
|
||||
func validateField(f interface{}) error {
|
||||
switch fv := f.(type) {
|
||||
case nil, bool, byte, int, int16, int32, int64, float32, float64, string, []byte, Decimal, time.Time:
|
||||
return nil
|
||||
|
||||
case []interface{}:
|
||||
for _, v := range fv {
|
||||
if err := validateField(v); err != nil {
|
||||
return fmt.Errorf("in array %s", err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
||||
case Table:
|
||||
for k, v := range fv {
|
||||
if err := validateField(v); err != nil {
|
||||
return fmt.Errorf("table field %q %s", k, err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
return fmt.Errorf("value %T not supported", f)
|
||||
}
|
||||
|
||||
// Validate returns and error if any Go types in the table are incompatible with AMQP types.
|
||||
func (t Table) Validate() error {
|
||||
return validateField(t)
|
||||
}
|
||||
|
||||
type Message interface {
|
||||
id() (uint16, uint16)
|
||||
wait() bool
|
||||
read(io.Reader) error
|
||||
write(io.Writer) error
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -286,8 +253,6 @@ system calls to read a frame.
|
||||
|
||||
*/
|
||||
type frame interface {
|
||||
write(io.Writer) error
|
||||
channel() uint16
|
||||
}
|
||||
|
||||
type AmqpReader struct {
|
||||
@@ -323,8 +288,6 @@ type MethodFrame struct {
|
||||
Method Message
|
||||
}
|
||||
|
||||
func (f *MethodFrame) channel() uint16 { return f.ChannelId }
|
||||
|
||||
/*
|
||||
Heartbeating is a technique designed to undo one of TCP/IP's features, namely
|
||||
its ability to recover from a broken physical connection by closing only after
|
||||
@@ -338,8 +301,6 @@ type HeartbeatFrame struct {
|
||||
ChannelId uint16
|
||||
}
|
||||
|
||||
func (f *HeartbeatFrame) channel() uint16 { return f.ChannelId }
|
||||
|
||||
/*
|
||||
Certain methods (such as Basic.Publish, Basic.Deliver, etc.) are formally
|
||||
defined as carrying content. When a peer sends such a method frame, it always
|
||||
@@ -367,8 +328,6 @@ type HeaderFrame struct {
|
||||
Properties Properties
|
||||
}
|
||||
|
||||
func (f *HeaderFrame) channel() uint16 { return f.ChannelId }
|
||||
|
||||
/*
|
||||
Content is the application data we carry from client-to-client via the AMQP
|
||||
server. Content is, roughly speaking, a set of properties plus a binary data
|
||||
@@ -388,5 +347,3 @@ type BodyFrame struct {
|
||||
ChannelId uint16
|
||||
Body []byte
|
||||
}
|
||||
|
||||
func (f *BodyFrame) channel() uint16 { return f.ChannelId }
|
||||
|
||||
@@ -1,403 +0,0 @@
|
||||
// Copyright (c) 2012, Sean Treadway, SoundCloud Ltd.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
// Source code and contact info at http://github.com/streadway/amqp
|
||||
|
||||
package amqp
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"io"
|
||||
"math"
|
||||
"time"
|
||||
)
|
||||
|
||||
func (f *MethodFrame) write(w io.Writer) (err error) {
|
||||
var payload bytes.Buffer
|
||||
|
||||
if f.Method == nil {
|
||||
return errors.New("malformed frame: missing method")
|
||||
}
|
||||
|
||||
class, method := f.Method.id()
|
||||
|
||||
if err = binary.Write(&payload, binary.BigEndian, class); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if err = binary.Write(&payload, binary.BigEndian, method); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if err = f.Method.write(&payload); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
return writeFrame(w, frameMethod, f.ChannelId, payload.Bytes())
|
||||
}
|
||||
|
||||
// Heartbeat
|
||||
//
|
||||
// Payload is empty
|
||||
func (f *HeartbeatFrame) write(w io.Writer) (err error) {
|
||||
return writeFrame(w, frameHeartbeat, f.ChannelId, []byte{})
|
||||
}
|
||||
|
||||
// CONTENT HEADER
|
||||
// 0 2 4 12 14
|
||||
// +----------+--------+-----------+----------------+------------- - -
|
||||
// | class-id | weight | body size | property flags | property list...
|
||||
// +----------+--------+-----------+----------------+------------- - -
|
||||
// short short long long short remainder...
|
||||
//
|
||||
func (f *HeaderFrame) write(w io.Writer) (err error) {
|
||||
var payload bytes.Buffer
|
||||
var zeroTime time.Time
|
||||
|
||||
if err = binary.Write(&payload, binary.BigEndian, f.ClassId); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if err = binary.Write(&payload, binary.BigEndian, f.weight); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if err = binary.Write(&payload, binary.BigEndian, f.Size); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// First pass will build the mask to be serialized, second pass will serialize
|
||||
// each of the fields that appear in the mask.
|
||||
|
||||
var mask uint16
|
||||
|
||||
if len(f.Properties.ContentType) > 0 {
|
||||
mask = mask | flagContentType
|
||||
}
|
||||
if len(f.Properties.ContentEncoding) > 0 {
|
||||
mask = mask | flagContentEncoding
|
||||
}
|
||||
if f.Properties.Headers != nil && len(f.Properties.Headers) > 0 {
|
||||
mask = mask | flagHeaders
|
||||
}
|
||||
if f.Properties.DeliveryMode > 0 {
|
||||
mask = mask | flagDeliveryMode
|
||||
}
|
||||
if f.Properties.Priority > 0 {
|
||||
mask = mask | flagPriority
|
||||
}
|
||||
if len(f.Properties.CorrelationId) > 0 {
|
||||
mask = mask | flagCorrelationId
|
||||
}
|
||||
if len(f.Properties.ReplyTo) > 0 {
|
||||
mask = mask | flagReplyTo
|
||||
}
|
||||
if len(f.Properties.Expiration) > 0 {
|
||||
mask = mask | flagExpiration
|
||||
}
|
||||
if len(f.Properties.MessageId) > 0 {
|
||||
mask = mask | flagMessageId
|
||||
}
|
||||
if f.Properties.Timestamp != zeroTime {
|
||||
mask = mask | flagTimestamp
|
||||
}
|
||||
if len(f.Properties.Type) > 0 {
|
||||
mask = mask | flagType
|
||||
}
|
||||
if len(f.Properties.UserId) > 0 {
|
||||
mask = mask | flagUserId
|
||||
}
|
||||
if len(f.Properties.AppId) > 0 {
|
||||
mask = mask | flagAppId
|
||||
}
|
||||
|
||||
if err = binary.Write(&payload, binary.BigEndian, mask); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if hasProperty(mask, flagContentType) {
|
||||
if err = writeShortstr(&payload, f.Properties.ContentType); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
if hasProperty(mask, flagContentEncoding) {
|
||||
if err = writeShortstr(&payload, f.Properties.ContentEncoding); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
if hasProperty(mask, flagHeaders) {
|
||||
if err = writeTable(&payload, f.Properties.Headers); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
if hasProperty(mask, flagDeliveryMode) {
|
||||
if err = binary.Write(&payload, binary.BigEndian, f.Properties.DeliveryMode); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
if hasProperty(mask, flagPriority) {
|
||||
if err = binary.Write(&payload, binary.BigEndian, f.Properties.Priority); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
if hasProperty(mask, flagCorrelationId) {
|
||||
if err = writeShortstr(&payload, f.Properties.CorrelationId); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
if hasProperty(mask, flagReplyTo) {
|
||||
if err = writeShortstr(&payload, f.Properties.ReplyTo); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
if hasProperty(mask, flagExpiration) {
|
||||
if err = writeShortstr(&payload, f.Properties.Expiration); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
if hasProperty(mask, flagMessageId) {
|
||||
if err = writeShortstr(&payload, f.Properties.MessageId); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
if hasProperty(mask, flagTimestamp) {
|
||||
if err = binary.Write(&payload, binary.BigEndian, uint64(f.Properties.Timestamp.Unix())); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
if hasProperty(mask, flagType) {
|
||||
if err = writeShortstr(&payload, f.Properties.Type); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
if hasProperty(mask, flagUserId) {
|
||||
if err = writeShortstr(&payload, f.Properties.UserId); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
if hasProperty(mask, flagAppId) {
|
||||
if err = writeShortstr(&payload, f.Properties.AppId); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
return writeFrame(w, frameHeader, f.ChannelId, payload.Bytes())
|
||||
}
|
||||
|
||||
// Body
|
||||
//
|
||||
// Payload is one byterange from the full body who's size is declared in the
|
||||
// Header frame
|
||||
func (f *BodyFrame) write(w io.Writer) (err error) {
|
||||
return writeFrame(w, frameBody, f.ChannelId, f.Body)
|
||||
}
|
||||
|
||||
func writeFrame(w io.Writer, typ uint8, channel uint16, payload []byte) (err error) {
|
||||
end := []byte{frameEnd}
|
||||
size := uint(len(payload))
|
||||
|
||||
_, err = w.Write([]byte{
|
||||
byte(typ),
|
||||
byte((channel & 0xff00) >> 8),
|
||||
byte((channel & 0x00ff) >> 0),
|
||||
byte((size & 0xff000000) >> 24),
|
||||
byte((size & 0x00ff0000) >> 16),
|
||||
byte((size & 0x0000ff00) >> 8),
|
||||
byte((size & 0x000000ff) >> 0),
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if _, err = w.Write(payload); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if _, err = w.Write(end); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func writeShortstr(w io.Writer, s string) (err error) {
|
||||
b := []byte(s)
|
||||
|
||||
var length = uint8(len(b))
|
||||
|
||||
if err = binary.Write(w, binary.BigEndian, length); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if _, err = w.Write(b[:length]); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func writeLongstr(w io.Writer, s string) (err error) {
|
||||
b := []byte(s)
|
||||
|
||||
var length = uint32(len(b))
|
||||
|
||||
if err = binary.Write(w, binary.BigEndian, length); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if _, err = w.Write(b[:length]); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
/*
|
||||
'A': []interface{}
|
||||
'D': Decimal
|
||||
'F': Table
|
||||
'I': int32
|
||||
'S': string
|
||||
'T': time.Time
|
||||
'V': nil
|
||||
'b': byte
|
||||
'd': float64
|
||||
'f': float32
|
||||
'l': int64
|
||||
's': int16
|
||||
't': bool
|
||||
'x': []byte
|
||||
*/
|
||||
func writeField(w io.Writer, value interface{}) (err error) {
|
||||
var buf [9]byte
|
||||
var enc []byte
|
||||
|
||||
switch v := value.(type) {
|
||||
case bool:
|
||||
buf[0] = 't'
|
||||
if v {
|
||||
buf[1] = byte(1)
|
||||
} else {
|
||||
buf[1] = byte(0)
|
||||
}
|
||||
enc = buf[:2]
|
||||
|
||||
case byte:
|
||||
buf[0] = 'b'
|
||||
buf[1] = byte(v)
|
||||
enc = buf[:2]
|
||||
|
||||
case int16:
|
||||
buf[0] = 's'
|
||||
binary.BigEndian.PutUint16(buf[1:3], uint16(v))
|
||||
enc = buf[:3]
|
||||
|
||||
case int:
|
||||
buf[0] = 'I'
|
||||
binary.BigEndian.PutUint32(buf[1:5], uint32(v))
|
||||
enc = buf[:5]
|
||||
|
||||
case int32:
|
||||
buf[0] = 'I'
|
||||
binary.BigEndian.PutUint32(buf[1:5], uint32(v))
|
||||
enc = buf[:5]
|
||||
|
||||
case int64:
|
||||
buf[0] = 'l'
|
||||
binary.BigEndian.PutUint64(buf[1:9], uint64(v))
|
||||
enc = buf[:9]
|
||||
|
||||
case float32:
|
||||
buf[0] = 'f'
|
||||
binary.BigEndian.PutUint32(buf[1:5], math.Float32bits(v))
|
||||
enc = buf[:5]
|
||||
|
||||
case float64:
|
||||
buf[0] = 'd'
|
||||
binary.BigEndian.PutUint64(buf[1:9], math.Float64bits(v))
|
||||
enc = buf[:9]
|
||||
|
||||
case Decimal:
|
||||
buf[0] = 'D'
|
||||
buf[1] = byte(v.Scale)
|
||||
binary.BigEndian.PutUint32(buf[2:6], uint32(v.Value))
|
||||
enc = buf[:6]
|
||||
|
||||
case string:
|
||||
buf[0] = 'S'
|
||||
binary.BigEndian.PutUint32(buf[1:5], uint32(len(v)))
|
||||
enc = append(buf[:5], []byte(v)...)
|
||||
|
||||
case []interface{}: // field-array
|
||||
buf[0] = 'A'
|
||||
|
||||
sec := new(bytes.Buffer)
|
||||
for _, val := range v {
|
||||
if err = writeField(sec, val); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
binary.BigEndian.PutUint32(buf[1:5], uint32(sec.Len()))
|
||||
if _, err = w.Write(buf[:5]); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if _, err = w.Write(sec.Bytes()); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
return
|
||||
|
||||
case time.Time:
|
||||
buf[0] = 'T'
|
||||
binary.BigEndian.PutUint64(buf[1:9], uint64(v.Unix()))
|
||||
enc = buf[:9]
|
||||
|
||||
case Table:
|
||||
if _, err = w.Write([]byte{'F'}); err != nil {
|
||||
return
|
||||
}
|
||||
return writeTable(w, v)
|
||||
|
||||
case []byte:
|
||||
buf[0] = 'x'
|
||||
binary.BigEndian.PutUint32(buf[1:5], uint32(len(v)))
|
||||
if _, err = w.Write(buf[0:5]); err != nil {
|
||||
return
|
||||
}
|
||||
if _, err = w.Write(v); err != nil {
|
||||
return
|
||||
}
|
||||
return
|
||||
|
||||
case nil:
|
||||
buf[0] = 'V'
|
||||
enc = buf[:1]
|
||||
|
||||
default:
|
||||
return ErrFieldType
|
||||
}
|
||||
|
||||
_, err = w.Write(enc)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func writeTable(w io.Writer, table Table) (err error) {
|
||||
var buf bytes.Buffer
|
||||
|
||||
for key, val := range table {
|
||||
if err = writeShortstr(&buf, key); err != nil {
|
||||
return
|
||||
}
|
||||
if err = writeField(&buf, val); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
return writeLongstr(w, buf.String())
|
||||
}
|
||||
@@ -21,6 +21,9 @@ func (matcher *requestResponseMatcher) GetMap() *sync.Map {
|
||||
return matcher.openMessagesMap
|
||||
}
|
||||
|
||||
func (matcher *requestResponseMatcher) SetMaxTry(value int) {
|
||||
}
|
||||
|
||||
func (matcher *requestResponseMatcher) registerRequest(ident string, request *http.Request, captureTime time.Time, protoMinor int) *api.OutputChannelItem {
|
||||
requestHTTPMessage := api.GenericMessage{
|
||||
IsRequest: true,
|
||||
|
||||
16
tap/extensions/kafka/Makefile
Normal file
16
tap/extensions/kafka/Makefile
Normal file
@@ -0,0 +1,16 @@
|
||||
skipbin := $$(find bin -mindepth 1 -maxdepth 1)
|
||||
skipexpect := $$(find expect -mindepth 1 -maxdepth 1)
|
||||
|
||||
test: test-pull-bin test-pull-expect
|
||||
@MIZU_TEST=1 go test -v ./... -coverpkg=./... -race -coverprofile=coverage.out -covermode=atomic
|
||||
|
||||
test-update: test-pull-bin
|
||||
@MIZU_TEST=1 TEST_UPDATE=1 go test -v ./... -coverpkg=./... -coverprofile=coverage.out -covermode=atomic
|
||||
|
||||
test-pull-bin:
|
||||
@mkdir -p bin
|
||||
@[ "${skipbin}" ] && echo "Skipping downloading BINs" || gsutil -o 'GSUtil:parallel_process_count=5' -o 'GSUtil:parallel_thread_count=5' -m cp gs://static.up9.io/mizu/test-pcap/bin/kafka/\*.bin bin
|
||||
|
||||
test-pull-expect:
|
||||
@mkdir -p expect
|
||||
@[ "${skipexpect}" ] && echo "Skipping downloading expected JSONs" || gsutil -o 'GSUtil:parallel_process_count=5' -o 'GSUtil:parallel_thread_count=5' -m cp -r gs://static.up9.io/mizu/test-pcap/expect/kafka/\* expect
|
||||
@@ -1,584 +0,0 @@
|
||||
package kafka
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
)
|
||||
|
||||
// Bytes is an interface implemented by types that represent immutable
|
||||
// sequences of bytes.
|
||||
//
|
||||
// Bytes values are used to abstract the location where record keys and
|
||||
// values are read from (e.g. in-memory buffers, network sockets, files).
|
||||
//
|
||||
// The Close method should be called to release resources held by the object
|
||||
// when the program is done with it.
|
||||
//
|
||||
// Bytes values are generally not safe to use concurrently from multiple
|
||||
// goroutines.
|
||||
type Bytes interface {
|
||||
io.ReadCloser
|
||||
// Returns the number of bytes remaining to be read from the payload.
|
||||
Len() int
|
||||
}
|
||||
|
||||
// NewBytes constructs a Bytes value from b.
|
||||
//
|
||||
// The returned value references b, it does not make a copy of the backing
|
||||
// array.
|
||||
//
|
||||
// If b is nil, nil is returned to represent a null BYTES value in the kafka
|
||||
// protocol.
|
||||
func NewBytes(b []byte) Bytes {
|
||||
if b == nil {
|
||||
return nil
|
||||
}
|
||||
r := new(bytesReader)
|
||||
r.Reset(b)
|
||||
return r
|
||||
}
|
||||
|
||||
// ReadAll is similar to ioutil.ReadAll, but it takes advantage of knowing the
|
||||
// length of b to minimize the memory footprint.
|
||||
//
|
||||
// The function returns a nil slice if b is nil.
|
||||
// func ReadAll(b Bytes) ([]byte, error) {
|
||||
// if b == nil {
|
||||
// return nil, nil
|
||||
// }
|
||||
// s := make([]byte, b.Len())
|
||||
// _, err := io.ReadFull(b, s)
|
||||
// return s, err
|
||||
// }
|
||||
|
||||
type bytesReader struct{ bytes.Reader }
|
||||
|
||||
func (*bytesReader) Close() error { return nil }
|
||||
|
||||
type refCount uintptr
|
||||
|
||||
func (rc *refCount) ref() { atomic.AddUintptr((*uintptr)(rc), 1) }
|
||||
|
||||
func (rc *refCount) unref(onZero func()) {
|
||||
if atomic.AddUintptr((*uintptr)(rc), ^uintptr(0)) == 0 {
|
||||
onZero()
|
||||
}
|
||||
}
|
||||
|
||||
const (
|
||||
// Size of the memory buffer for a single page. We use a farily
|
||||
// large size here (64 KiB) because batches exchanged with kafka
|
||||
// tend to be multiple kilobytes in size, sometimes hundreds.
|
||||
// Using large pages amortizes the overhead of the page metadata
|
||||
// and algorithms to manage the pages.
|
||||
pageSize = 65536
|
||||
)
|
||||
|
||||
type page struct {
|
||||
refc refCount
|
||||
offset int64
|
||||
length int
|
||||
buffer *[pageSize]byte
|
||||
}
|
||||
|
||||
func newPage(offset int64) *page {
|
||||
p, _ := pagePool.Get().(*page)
|
||||
if p != nil {
|
||||
p.offset = offset
|
||||
p.length = 0
|
||||
p.ref()
|
||||
} else {
|
||||
p = &page{
|
||||
refc: 1,
|
||||
offset: offset,
|
||||
buffer: &[pageSize]byte{},
|
||||
}
|
||||
}
|
||||
return p
|
||||
}
|
||||
|
||||
func (p *page) ref() { p.refc.ref() }
|
||||
|
||||
func (p *page) unref() { p.refc.unref(func() { pagePool.Put(p) }) }
|
||||
|
||||
func (p *page) slice(begin, end int64) []byte {
|
||||
i, j := begin-p.offset, end-p.offset
|
||||
|
||||
if i < 0 {
|
||||
i = 0
|
||||
} else if i > pageSize {
|
||||
i = pageSize
|
||||
}
|
||||
|
||||
if j < 0 {
|
||||
j = 0
|
||||
} else if j > pageSize {
|
||||
j = pageSize
|
||||
}
|
||||
|
||||
if i < j {
|
||||
return p.buffer[i:j]
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *page) Cap() int { return pageSize }
|
||||
|
||||
func (p *page) Len() int { return p.length }
|
||||
|
||||
func (p *page) Size() int64 { return int64(p.length) }
|
||||
|
||||
func (p *page) Truncate(n int) {
|
||||
if n < p.length {
|
||||
p.length = n
|
||||
}
|
||||
}
|
||||
|
||||
func (p *page) ReadAt(b []byte, off int64) (int, error) {
|
||||
if off -= p.offset; off < 0 || off > pageSize {
|
||||
panic("offset out of range")
|
||||
}
|
||||
if off > int64(p.length) {
|
||||
return 0, nil
|
||||
}
|
||||
return copy(b, p.buffer[off:p.length]), nil
|
||||
}
|
||||
|
||||
func (p *page) ReadFrom(r io.Reader) (int64, error) {
|
||||
n, err := io.ReadFull(r, p.buffer[p.length:])
|
||||
if err == io.EOF || err == io.ErrUnexpectedEOF {
|
||||
err = nil
|
||||
}
|
||||
p.length += n
|
||||
return int64(n), err
|
||||
}
|
||||
|
||||
func (p *page) WriteAt(b []byte, off int64) (int, error) {
|
||||
if off -= p.offset; off < 0 || off > pageSize {
|
||||
panic("offset out of range")
|
||||
}
|
||||
n := copy(p.buffer[off:], b)
|
||||
if end := int(off) + n; end > p.length {
|
||||
p.length = end
|
||||
}
|
||||
return n, nil
|
||||
}
|
||||
|
||||
func (p *page) Write(b []byte) (int, error) {
|
||||
return p.WriteAt(b, p.offset+int64(p.length))
|
||||
}
|
||||
|
||||
var (
|
||||
_ io.ReaderAt = (*page)(nil)
|
||||
_ io.ReaderFrom = (*page)(nil)
|
||||
_ io.Writer = (*page)(nil)
|
||||
_ io.WriterAt = (*page)(nil)
|
||||
)
|
||||
|
||||
type pageBuffer struct {
|
||||
refc refCount
|
||||
pages contiguousPages
|
||||
length int
|
||||
cursor int
|
||||
}
|
||||
|
||||
func newPageBuffer() *pageBuffer {
|
||||
b, _ := pageBufferPool.Get().(*pageBuffer)
|
||||
if b != nil {
|
||||
b.cursor = 0
|
||||
b.refc.ref()
|
||||
} else {
|
||||
b = &pageBuffer{
|
||||
refc: 1,
|
||||
pages: make(contiguousPages, 0, 16),
|
||||
}
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
func (pb *pageBuffer) unref() {
|
||||
pb.refc.unref(func() {
|
||||
pb.pages.unref()
|
||||
pb.pages.clear()
|
||||
pb.pages = pb.pages[:0]
|
||||
pb.length = 0
|
||||
pageBufferPool.Put(pb)
|
||||
})
|
||||
}
|
||||
|
||||
func (pb *pageBuffer) newPage() *page {
|
||||
return newPage(int64(pb.length))
|
||||
}
|
||||
|
||||
func (pb *pageBuffer) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (pb *pageBuffer) Len() int {
|
||||
return pb.length - pb.cursor
|
||||
}
|
||||
|
||||
func (pb *pageBuffer) Size() int64 {
|
||||
return int64(pb.length)
|
||||
}
|
||||
|
||||
func (pb *pageBuffer) Discard(n int) (int, error) {
|
||||
remain := pb.length - pb.cursor
|
||||
if remain < n {
|
||||
n = remain
|
||||
}
|
||||
pb.cursor += n
|
||||
return n, nil
|
||||
}
|
||||
|
||||
func (pb *pageBuffer) Truncate(n int) {
|
||||
if n < pb.length {
|
||||
pb.length = n
|
||||
|
||||
if n < pb.cursor {
|
||||
pb.cursor = n
|
||||
}
|
||||
|
||||
for i := range pb.pages {
|
||||
if p := pb.pages[i]; p.length <= n {
|
||||
n -= p.length
|
||||
} else {
|
||||
if n > 0 {
|
||||
pb.pages[i].Truncate(n)
|
||||
i++
|
||||
}
|
||||
pb.pages[i:].unref()
|
||||
pb.pages[i:].clear()
|
||||
pb.pages = pb.pages[:i]
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (pb *pageBuffer) Seek(offset int64, whence int) (int64, error) {
|
||||
c, err := seek(int64(pb.cursor), int64(pb.length), offset, whence)
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
pb.cursor = int(c)
|
||||
return c, nil
|
||||
}
|
||||
|
||||
func (pb *pageBuffer) ReadByte() (byte, error) {
|
||||
b := [1]byte{}
|
||||
_, err := pb.Read(b[:])
|
||||
return b[0], err
|
||||
}
|
||||
|
||||
func (pb *pageBuffer) Read(b []byte) (int, error) {
|
||||
if pb.cursor >= pb.length {
|
||||
return 0, io.EOF
|
||||
}
|
||||
n, err := pb.ReadAt(b, int64(pb.cursor))
|
||||
pb.cursor += n
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (pb *pageBuffer) ReadAt(b []byte, off int64) (int, error) {
|
||||
return pb.pages.ReadAt(b, off)
|
||||
}
|
||||
|
||||
func (pb *pageBuffer) ReadFrom(r io.Reader) (int64, error) {
|
||||
if len(pb.pages) == 0 {
|
||||
pb.pages = append(pb.pages, pb.newPage())
|
||||
}
|
||||
|
||||
rn := int64(0)
|
||||
|
||||
for {
|
||||
tail := pb.pages[len(pb.pages)-1]
|
||||
free := tail.Cap() - tail.Len()
|
||||
|
||||
if free == 0 {
|
||||
tail = pb.newPage()
|
||||
free = pageSize
|
||||
pb.pages = append(pb.pages, tail)
|
||||
}
|
||||
|
||||
n, err := tail.ReadFrom(r)
|
||||
pb.length += int(n)
|
||||
rn += n
|
||||
if n < int64(free) {
|
||||
return rn, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (pb *pageBuffer) WriteString(s string) (int, error) {
|
||||
return pb.Write([]byte(s))
|
||||
}
|
||||
|
||||
func (pb *pageBuffer) Write(b []byte) (int, error) {
|
||||
wn := len(b)
|
||||
if wn == 0 {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
if len(pb.pages) == 0 {
|
||||
pb.pages = append(pb.pages, pb.newPage())
|
||||
}
|
||||
|
||||
for len(b) != 0 {
|
||||
tail := pb.pages[len(pb.pages)-1]
|
||||
free := tail.Cap() - tail.Len()
|
||||
|
||||
if len(b) <= free {
|
||||
_, _ = tail.Write(b)
|
||||
pb.length += len(b)
|
||||
break
|
||||
}
|
||||
|
||||
_, _ = tail.Write(b[:free])
|
||||
b = b[free:]
|
||||
|
||||
pb.length += free
|
||||
pb.pages = append(pb.pages, pb.newPage())
|
||||
}
|
||||
|
||||
return wn, nil
|
||||
}
|
||||
|
||||
func (pb *pageBuffer) WriteAt(b []byte, off int64) (int, error) {
|
||||
n, err := pb.pages.WriteAt(b, off)
|
||||
if err != nil {
|
||||
return n, err
|
||||
}
|
||||
if n < len(b) {
|
||||
_, _ = pb.Write(b[n:])
|
||||
}
|
||||
return len(b), nil
|
||||
}
|
||||
|
||||
func (pb *pageBuffer) WriteTo(w io.Writer) (int64, error) {
|
||||
var wn int
|
||||
var err error
|
||||
pb.pages.scan(int64(pb.cursor), int64(pb.length), func(b []byte) bool {
|
||||
var n int
|
||||
n, err = w.Write(b)
|
||||
wn += n
|
||||
return err == nil
|
||||
})
|
||||
pb.cursor += wn
|
||||
return int64(wn), err
|
||||
}
|
||||
|
||||
var (
|
||||
_ io.ReaderAt = (*pageBuffer)(nil)
|
||||
_ io.ReaderFrom = (*pageBuffer)(nil)
|
||||
_ io.StringWriter = (*pageBuffer)(nil)
|
||||
_ io.Writer = (*pageBuffer)(nil)
|
||||
_ io.WriterAt = (*pageBuffer)(nil)
|
||||
_ io.WriterTo = (*pageBuffer)(nil)
|
||||
|
||||
pagePool sync.Pool
|
||||
pageBufferPool sync.Pool
|
||||
)
|
||||
|
||||
type contiguousPages []*page
|
||||
|
||||
func (pages contiguousPages) unref() {
|
||||
for _, p := range pages {
|
||||
p.unref()
|
||||
}
|
||||
}
|
||||
|
||||
func (pages contiguousPages) clear() {
|
||||
for i := range pages {
|
||||
pages[i] = nil
|
||||
}
|
||||
}
|
||||
|
||||
func (pages contiguousPages) ReadAt(b []byte, off int64) (int, error) {
|
||||
rn := 0
|
||||
|
||||
for _, p := range pages.slice(off, off+int64(len(b))) {
|
||||
n, _ := p.ReadAt(b, off)
|
||||
b = b[n:]
|
||||
rn += n
|
||||
off += int64(n)
|
||||
}
|
||||
|
||||
return rn, nil
|
||||
}
|
||||
|
||||
func (pages contiguousPages) WriteAt(b []byte, off int64) (int, error) {
|
||||
wn := 0
|
||||
|
||||
for _, p := range pages.slice(off, off+int64(len(b))) {
|
||||
n, _ := p.WriteAt(b, off)
|
||||
b = b[n:]
|
||||
wn += n
|
||||
off += int64(n)
|
||||
}
|
||||
|
||||
return wn, nil
|
||||
}
|
||||
|
||||
func (pages contiguousPages) slice(begin, end int64) contiguousPages {
|
||||
i := pages.indexOf(begin)
|
||||
j := pages.indexOf(end)
|
||||
if j < len(pages) {
|
||||
j++
|
||||
}
|
||||
return pages[i:j]
|
||||
}
|
||||
|
||||
func (pages contiguousPages) indexOf(offset int64) int {
|
||||
if len(pages) == 0 {
|
||||
return 0
|
||||
}
|
||||
return int((offset - pages[0].offset) / pageSize)
|
||||
}
|
||||
|
||||
func (pages contiguousPages) scan(begin, end int64, f func([]byte) bool) {
|
||||
for _, p := range pages.slice(begin, end) {
|
||||
if !f(p.slice(begin, end)) {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
_ io.ReaderAt = contiguousPages{}
|
||||
_ io.WriterAt = contiguousPages{}
|
||||
)
|
||||
|
||||
type pageRef struct {
|
||||
pages contiguousPages
|
||||
offset int64
|
||||
cursor int64
|
||||
length uint32
|
||||
once uint32
|
||||
}
|
||||
|
||||
func (ref *pageRef) unref() {
|
||||
if atomic.CompareAndSwapUint32(&ref.once, 0, 1) {
|
||||
ref.pages.unref()
|
||||
ref.pages.clear()
|
||||
ref.pages = nil
|
||||
ref.offset = 0
|
||||
ref.cursor = 0
|
||||
ref.length = 0
|
||||
}
|
||||
}
|
||||
|
||||
func (ref *pageRef) Len() int { return int(ref.Size() - ref.cursor) }
|
||||
|
||||
func (ref *pageRef) Size() int64 { return int64(ref.length) }
|
||||
|
||||
func (ref *pageRef) Close() error { ref.unref(); return nil }
|
||||
|
||||
func (ref *pageRef) String() string {
|
||||
return fmt.Sprintf("[offset=%d cursor=%d length=%d]", ref.offset, ref.cursor, ref.length)
|
||||
}
|
||||
|
||||
func (ref *pageRef) Seek(offset int64, whence int) (int64, error) {
|
||||
c, err := seek(ref.cursor, int64(ref.length), offset, whence)
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
ref.cursor = c
|
||||
return c, nil
|
||||
}
|
||||
|
||||
func (ref *pageRef) ReadByte() (byte, error) {
|
||||
var c byte
|
||||
var ok bool
|
||||
ref.scan(ref.cursor, func(b []byte) bool {
|
||||
c, ok = b[0], true
|
||||
return false
|
||||
})
|
||||
if ok {
|
||||
ref.cursor++
|
||||
} else {
|
||||
return 0, io.EOF
|
||||
}
|
||||
return c, nil
|
||||
}
|
||||
|
||||
func (ref *pageRef) Read(b []byte) (int, error) {
|
||||
if ref.cursor >= int64(ref.length) {
|
||||
return 0, io.EOF
|
||||
}
|
||||
n, err := ref.ReadAt(b, ref.cursor)
|
||||
ref.cursor += int64(n)
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (ref *pageRef) ReadAt(b []byte, off int64) (int, error) {
|
||||
limit := ref.offset + int64(ref.length)
|
||||
off += ref.offset
|
||||
|
||||
if off >= limit {
|
||||
return 0, io.EOF
|
||||
}
|
||||
|
||||
if off+int64(len(b)) > limit {
|
||||
b = b[:limit-off]
|
||||
}
|
||||
|
||||
if len(b) == 0 {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
n, err := ref.pages.ReadAt(b, off)
|
||||
if n == 0 && err == nil {
|
||||
err = io.EOF
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (ref *pageRef) WriteTo(w io.Writer) (wn int64, err error) {
|
||||
ref.scan(ref.cursor, func(b []byte) bool {
|
||||
var n int
|
||||
n, err = w.Write(b)
|
||||
wn += int64(n)
|
||||
return err == nil
|
||||
})
|
||||
ref.cursor += wn
|
||||
return
|
||||
}
|
||||
|
||||
func (ref *pageRef) scan(off int64, f func([]byte) bool) {
|
||||
begin := ref.offset + off
|
||||
end := ref.offset + int64(ref.length)
|
||||
ref.pages.scan(begin, end, f)
|
||||
}
|
||||
|
||||
var (
|
||||
_ io.Closer = (*pageRef)(nil)
|
||||
_ io.Seeker = (*pageRef)(nil)
|
||||
_ io.Reader = (*pageRef)(nil)
|
||||
_ io.ReaderAt = (*pageRef)(nil)
|
||||
_ io.WriterTo = (*pageRef)(nil)
|
||||
)
|
||||
|
||||
func seek(cursor, limit, offset int64, whence int) (int64, error) {
|
||||
switch whence {
|
||||
case io.SeekStart:
|
||||
// absolute offset
|
||||
case io.SeekCurrent:
|
||||
offset = cursor + offset
|
||||
case io.SeekEnd:
|
||||
offset = limit - offset
|
||||
default:
|
||||
return -1, fmt.Errorf("seek: invalid whence value: %d", whence)
|
||||
}
|
||||
if offset < 0 {
|
||||
offset = 0
|
||||
}
|
||||
if offset > limit {
|
||||
offset = limit
|
||||
}
|
||||
return offset, nil
|
||||
}
|
||||
@@ -1,143 +0,0 @@
|
||||
package kafka
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sort"
|
||||
"strings"
|
||||
"text/tabwriter"
|
||||
)
|
||||
|
||||
type Cluster struct {
|
||||
ClusterID string
|
||||
Controller int32
|
||||
Brokers map[int32]Broker
|
||||
Topics map[string]Topic
|
||||
}
|
||||
|
||||
func (c Cluster) BrokerIDs() []int32 {
|
||||
brokerIDs := make([]int32, 0, len(c.Brokers))
|
||||
for id := range c.Brokers {
|
||||
brokerIDs = append(brokerIDs, id)
|
||||
}
|
||||
sort.Slice(brokerIDs, func(i, j int) bool {
|
||||
return brokerIDs[i] < brokerIDs[j]
|
||||
})
|
||||
return brokerIDs
|
||||
}
|
||||
|
||||
func (c Cluster) TopicNames() []string {
|
||||
topicNames := make([]string, 0, len(c.Topics))
|
||||
for name := range c.Topics {
|
||||
topicNames = append(topicNames, name)
|
||||
}
|
||||
sort.Strings(topicNames)
|
||||
return topicNames
|
||||
}
|
||||
|
||||
func (c Cluster) IsZero() bool {
|
||||
return c.ClusterID == "" && c.Controller == 0 && len(c.Brokers) == 0 && len(c.Topics) == 0
|
||||
}
|
||||
|
||||
func (c Cluster) Format(w fmt.State, _ rune) {
|
||||
tw := new(tabwriter.Writer)
|
||||
fmt.Fprintf(w, "CLUSTER: %q\n\n", c.ClusterID)
|
||||
|
||||
tw.Init(w, 0, 8, 2, ' ', 0)
|
||||
fmt.Fprint(tw, " BROKER\tHOST\tPORT\tRACK\tCONTROLLER\n")
|
||||
|
||||
for _, id := range c.BrokerIDs() {
|
||||
broker := c.Brokers[id]
|
||||
fmt.Fprintf(tw, " %d\t%s\t%d\t%s\t%t\n", broker.ID, broker.Host, broker.Port, broker.Rack, broker.ID == c.Controller)
|
||||
}
|
||||
|
||||
tw.Flush()
|
||||
fmt.Fprintln(w)
|
||||
|
||||
tw.Init(w, 0, 8, 2, ' ', 0)
|
||||
fmt.Fprint(tw, " TOPIC\tPARTITIONS\tBROKERS\n")
|
||||
topicNames := c.TopicNames()
|
||||
brokers := make(map[int32]struct{}, len(c.Brokers))
|
||||
brokerIDs := make([]int32, 0, len(c.Brokers))
|
||||
|
||||
for _, name := range topicNames {
|
||||
topic := c.Topics[name]
|
||||
|
||||
for _, p := range topic.Partitions {
|
||||
for _, id := range p.Replicas {
|
||||
brokers[id] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
for id := range brokers {
|
||||
brokerIDs = append(brokerIDs, id)
|
||||
}
|
||||
|
||||
fmt.Fprintf(tw, " %s\t%d\t%s\n", topic.Name, len(topic.Partitions), formatBrokerIDs(brokerIDs, -1))
|
||||
|
||||
for id := range brokers {
|
||||
delete(brokers, id)
|
||||
}
|
||||
|
||||
brokerIDs = brokerIDs[:0]
|
||||
}
|
||||
|
||||
tw.Flush()
|
||||
fmt.Fprintln(w)
|
||||
|
||||
if w.Flag('+') {
|
||||
for _, name := range topicNames {
|
||||
fmt.Fprintf(w, " TOPIC: %q\n\n", name)
|
||||
|
||||
tw.Init(w, 0, 8, 2, ' ', 0)
|
||||
fmt.Fprint(tw, " PARTITION\tREPLICAS\tISR\tOFFLINE\n")
|
||||
|
||||
for _, p := range c.Topics[name].Partitions {
|
||||
fmt.Fprintf(tw, " %d\t%s\t%s\t%s\n", p.ID,
|
||||
formatBrokerIDs(p.Replicas, -1),
|
||||
formatBrokerIDs(p.ISR, p.Leader),
|
||||
formatBrokerIDs(p.Offline, -1),
|
||||
)
|
||||
}
|
||||
|
||||
tw.Flush()
|
||||
fmt.Fprintln(w)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func formatBrokerIDs(brokerIDs []int32, leader int32) string {
|
||||
if len(brokerIDs) == 0 {
|
||||
return ""
|
||||
}
|
||||
|
||||
if len(brokerIDs) == 1 {
|
||||
return itoa(brokerIDs[0])
|
||||
}
|
||||
|
||||
sort.Slice(brokerIDs, func(i, j int) bool {
|
||||
id1 := brokerIDs[i]
|
||||
id2 := brokerIDs[j]
|
||||
|
||||
if id1 == leader {
|
||||
return true
|
||||
}
|
||||
|
||||
if id2 == leader {
|
||||
return false
|
||||
}
|
||||
|
||||
return id1 < id2
|
||||
})
|
||||
|
||||
brokerNames := make([]string, len(brokerIDs))
|
||||
|
||||
for i, id := range brokerIDs {
|
||||
brokerNames[i] = itoa(id)
|
||||
}
|
||||
|
||||
return strings.Join(brokerNames, ",")
|
||||
}
|
||||
|
||||
var (
|
||||
_ fmt.Formatter = Cluster{}
|
||||
)
|
||||
@@ -1,7 +1,6 @@
|
||||
package kafka
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"hash/crc32"
|
||||
@@ -9,8 +8,6 @@ import (
|
||||
"io/ioutil"
|
||||
"reflect"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
)
|
||||
|
||||
type discarder interface {
|
||||
@@ -26,15 +23,6 @@ type decoder struct {
|
||||
crc32 uint32
|
||||
}
|
||||
|
||||
func (d *decoder) Reset(r io.Reader, n int) {
|
||||
d.reader = r
|
||||
d.remain = n
|
||||
d.buffer = [8]byte{}
|
||||
d.err = nil
|
||||
d.table = nil
|
||||
d.crc32 = 0
|
||||
}
|
||||
|
||||
func (d *decoder) Read(b []byte) (int, error) {
|
||||
if d.err != nil {
|
||||
return 0, d.err
|
||||
@@ -483,52 +471,3 @@ func decodeReadInt32(b []byte) int32 {
|
||||
func decodeReadInt64(b []byte) int64 {
|
||||
return int64(binary.BigEndian.Uint64(b))
|
||||
}
|
||||
|
||||
func Unmarshal(data []byte, version int16, value interface{}) error {
|
||||
typ := elemTypeOf(value)
|
||||
cache, _ := unmarshalers.Load().(map[versionedType]decodeFunc)
|
||||
key := versionedType{typ: typ, version: version}
|
||||
decode := cache[key]
|
||||
|
||||
if decode == nil {
|
||||
decode = decodeFuncOf(reflect.TypeOf(value).Elem(), version, false, structTag{
|
||||
MinVersion: -1,
|
||||
MaxVersion: -1,
|
||||
TagID: -2,
|
||||
Compact: true,
|
||||
Nullable: true,
|
||||
})
|
||||
|
||||
newCache := make(map[versionedType]decodeFunc, len(cache)+1)
|
||||
newCache[key] = decode
|
||||
|
||||
for typ, fun := range cache {
|
||||
newCache[typ] = fun
|
||||
}
|
||||
|
||||
unmarshalers.Store(newCache)
|
||||
}
|
||||
|
||||
d, _ := decoders.Get().(*decoder)
|
||||
if d == nil {
|
||||
d = &decoder{reader: bytes.NewReader(nil)}
|
||||
}
|
||||
|
||||
d.remain = len(data)
|
||||
r, _ := d.reader.(*bytes.Reader)
|
||||
r.Reset(data)
|
||||
|
||||
defer func() {
|
||||
r.Reset(nil)
|
||||
d.Reset(r, 0)
|
||||
decoders.Put(d)
|
||||
}()
|
||||
|
||||
decode(d, valueOf(value))
|
||||
return dontExpectEOF(d.err)
|
||||
}
|
||||
|
||||
var (
|
||||
decoders sync.Pool // *decoder
|
||||
unmarshalers atomic.Value // map[versionedType]decodeFunc
|
||||
)
|
||||
|
||||
@@ -1,16 +0,0 @@
|
||||
package kafka
|
||||
|
||||
import "bufio"
|
||||
|
||||
func discardN(r *bufio.Reader, sz int, n int) (int, error) {
|
||||
var err error
|
||||
if n <= sz {
|
||||
n, err = r.Discard(n)
|
||||
} else {
|
||||
n, err = r.Discard(sz)
|
||||
if err == nil {
|
||||
err = errShortRead
|
||||
}
|
||||
}
|
||||
return sz - n, err
|
||||
}
|
||||
@@ -1,537 +0,0 @@
|
||||
package kafka
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"hash/crc32"
|
||||
"io"
|
||||
"reflect"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
)
|
||||
|
||||
type encoder struct {
|
||||
writer io.Writer
|
||||
err error
|
||||
table *crc32.Table
|
||||
crc32 uint32
|
||||
buffer [32]byte
|
||||
}
|
||||
|
||||
type encoderChecksum struct {
|
||||
reader io.Reader
|
||||
encoder *encoder
|
||||
}
|
||||
|
||||
func (e *encoderChecksum) Read(b []byte) (int, error) {
|
||||
n, err := e.reader.Read(b)
|
||||
if n > 0 {
|
||||
e.encoder.update(b[:n])
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (e *encoder) Reset(w io.Writer) {
|
||||
e.writer = w
|
||||
e.err = nil
|
||||
e.table = nil
|
||||
e.crc32 = 0
|
||||
e.buffer = [32]byte{}
|
||||
}
|
||||
|
||||
func (e *encoder) ReadFrom(r io.Reader) (int64, error) {
|
||||
if e.table != nil {
|
||||
r = &encoderChecksum{
|
||||
reader: r,
|
||||
encoder: e,
|
||||
}
|
||||
}
|
||||
return io.Copy(e.writer, r)
|
||||
}
|
||||
|
||||
func (e *encoder) Write(b []byte) (int, error) {
|
||||
if e.err != nil {
|
||||
return 0, e.err
|
||||
}
|
||||
n, err := e.writer.Write(b)
|
||||
if n > 0 {
|
||||
e.update(b[:n])
|
||||
}
|
||||
if err != nil {
|
||||
e.err = err
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (e *encoder) WriteByte(b byte) error {
|
||||
e.buffer[0] = b
|
||||
_, err := e.Write(e.buffer[:1])
|
||||
return err
|
||||
}
|
||||
|
||||
func (e *encoder) WriteString(s string) (int, error) {
|
||||
// This implementation is an optimization to avoid the heap allocation that
|
||||
// would occur when converting the string to a []byte to call crc32.Update.
|
||||
//
|
||||
// Strings are rarely long in the kafka protocol, so the use of a 32 byte
|
||||
// buffer is a good comprise between keeping the encoder value small and
|
||||
// limiting the number of calls to Write.
|
||||
//
|
||||
// We introduced this optimization because memory profiles on the benchmarks
|
||||
// showed that most heap allocations were caused by this code path.
|
||||
n := 0
|
||||
|
||||
for len(s) != 0 {
|
||||
c := copy(e.buffer[:], s)
|
||||
w, err := e.Write(e.buffer[:c])
|
||||
n += w
|
||||
if err != nil {
|
||||
return n, err
|
||||
}
|
||||
s = s[c:]
|
||||
}
|
||||
|
||||
return n, nil
|
||||
}
|
||||
|
||||
func (e *encoder) update(b []byte) {
|
||||
if e.table != nil {
|
||||
e.crc32 = crc32.Update(e.crc32, e.table, b)
|
||||
}
|
||||
}
|
||||
|
||||
func (e *encoder) encodeBool(v value) {
|
||||
b := int8(0)
|
||||
if v.bool() {
|
||||
b = 1
|
||||
}
|
||||
e.writeInt8(b)
|
||||
}
|
||||
|
||||
func (e *encoder) encodeInt8(v value) {
|
||||
e.writeInt8(v.int8())
|
||||
}
|
||||
|
||||
func (e *encoder) encodeInt16(v value) {
|
||||
e.writeInt16(v.int16())
|
||||
}
|
||||
|
||||
func (e *encoder) encodeInt32(v value) {
|
||||
e.writeInt32(v.int32())
|
||||
}
|
||||
|
||||
func (e *encoder) encodeInt64(v value) {
|
||||
e.writeInt64(v.int64())
|
||||
}
|
||||
|
||||
func (e *encoder) encodeString(v value) {
|
||||
e.writeString(v.string())
|
||||
}
|
||||
|
||||
func (e *encoder) encodeCompactString(v value) {
|
||||
e.writeCompactString(v.string())
|
||||
}
|
||||
|
||||
func (e *encoder) encodeNullString(v value) {
|
||||
e.writeNullString(v.string())
|
||||
}
|
||||
|
||||
func (e *encoder) encodeCompactNullString(v value) {
|
||||
e.writeCompactNullString(v.string())
|
||||
}
|
||||
|
||||
func (e *encoder) encodeBytes(v value) {
|
||||
e.writeBytes(v.bytes())
|
||||
}
|
||||
|
||||
func (e *encoder) encodeCompactBytes(v value) {
|
||||
e.writeCompactBytes(v.bytes())
|
||||
}
|
||||
|
||||
func (e *encoder) encodeNullBytes(v value) {
|
||||
e.writeNullBytes(v.bytes())
|
||||
}
|
||||
|
||||
func (e *encoder) encodeCompactNullBytes(v value) {
|
||||
e.writeCompactNullBytes(v.bytes())
|
||||
}
|
||||
|
||||
func (e *encoder) encodeArray(v value, elemType reflect.Type, encodeElem encodeFunc) {
|
||||
a := v.array(elemType)
|
||||
n := a.length()
|
||||
e.writeInt32(int32(n))
|
||||
|
||||
for i := 0; i < n; i++ {
|
||||
encodeElem(e, a.index(i))
|
||||
}
|
||||
}
|
||||
|
||||
func (e *encoder) encodeCompactArray(v value, elemType reflect.Type, encodeElem encodeFunc) {
|
||||
a := v.array(elemType)
|
||||
n := a.length()
|
||||
e.writeUnsignedVarInt(uint64(n + 1))
|
||||
|
||||
for i := 0; i < n; i++ {
|
||||
encodeElem(e, a.index(i))
|
||||
}
|
||||
}
|
||||
|
||||
func (e *encoder) encodeNullArray(v value, elemType reflect.Type, encodeElem encodeFunc) {
|
||||
a := v.array(elemType)
|
||||
if a.isNil() {
|
||||
e.writeInt32(-1)
|
||||
return
|
||||
}
|
||||
|
||||
n := a.length()
|
||||
e.writeInt32(int32(n))
|
||||
|
||||
for i := 0; i < n; i++ {
|
||||
encodeElem(e, a.index(i))
|
||||
}
|
||||
}
|
||||
|
||||
func (e *encoder) encodeCompactNullArray(v value, elemType reflect.Type, encodeElem encodeFunc) {
|
||||
a := v.array(elemType)
|
||||
if a.isNil() {
|
||||
e.writeUnsignedVarInt(0)
|
||||
return
|
||||
}
|
||||
|
||||
n := a.length()
|
||||
e.writeUnsignedVarInt(uint64(n + 1))
|
||||
for i := 0; i < n; i++ {
|
||||
encodeElem(e, a.index(i))
|
||||
}
|
||||
}
|
||||
|
||||
func (e *encoder) writeInt8(i int8) {
|
||||
writeInt8(e.buffer[:1], i)
|
||||
_, _ = e.Write(e.buffer[:1])
|
||||
}
|
||||
|
||||
func (e *encoder) writeInt16(i int16) {
|
||||
writeInt16(e.buffer[:2], i)
|
||||
_, _ = e.Write(e.buffer[:2])
|
||||
}
|
||||
|
||||
func (e *encoder) writeInt32(i int32) {
|
||||
writeInt32(e.buffer[:4], i)
|
||||
_, _ = e.Write(e.buffer[:4])
|
||||
}
|
||||
|
||||
func (e *encoder) writeInt64(i int64) {
|
||||
writeInt64(e.buffer[:8], i)
|
||||
_, _ = e.Write(e.buffer[:8])
|
||||
}
|
||||
|
||||
func (e *encoder) writeString(s string) {
|
||||
e.writeInt16(int16(len(s)))
|
||||
_, _ = e.WriteString(s)
|
||||
}
|
||||
|
||||
func (e *encoder) writeCompactString(s string) {
|
||||
e.writeUnsignedVarInt(uint64(len(s)) + 1)
|
||||
_, _ = e.WriteString(s)
|
||||
}
|
||||
|
||||
func (e *encoder) writeNullString(s string) {
|
||||
if s == "" {
|
||||
e.writeInt16(-1)
|
||||
} else {
|
||||
e.writeInt16(int16(len(s)))
|
||||
_, _ = e.WriteString(s)
|
||||
}
|
||||
}
|
||||
|
||||
func (e *encoder) writeCompactNullString(s string) {
|
||||
if s == "" {
|
||||
e.writeUnsignedVarInt(0)
|
||||
} else {
|
||||
e.writeUnsignedVarInt(uint64(len(s)) + 1)
|
||||
_, _ = e.WriteString(s)
|
||||
}
|
||||
}
|
||||
|
||||
func (e *encoder) writeBytes(b []byte) {
|
||||
e.writeInt32(int32(len(b)))
|
||||
_, _ = e.Write(b)
|
||||
}
|
||||
|
||||
func (e *encoder) writeCompactBytes(b []byte) {
|
||||
e.writeUnsignedVarInt(uint64(len(b)) + 1)
|
||||
_, _ = e.Write(b)
|
||||
}
|
||||
|
||||
func (e *encoder) writeNullBytes(b []byte) {
|
||||
if b == nil {
|
||||
e.writeInt32(-1)
|
||||
} else {
|
||||
e.writeInt32(int32(len(b)))
|
||||
_, _ = e.Write(b)
|
||||
}
|
||||
}
|
||||
|
||||
func (e *encoder) writeCompactNullBytes(b []byte) {
|
||||
if b == nil {
|
||||
e.writeUnsignedVarInt(0)
|
||||
} else {
|
||||
e.writeUnsignedVarInt(uint64(len(b)) + 1)
|
||||
_, _ = e.Write(b)
|
||||
}
|
||||
}
|
||||
|
||||
func (e *encoder) writeUnsignedVarInt(i uint64) {
|
||||
b := e.buffer[:]
|
||||
n := 0
|
||||
|
||||
for i >= 0x80 && n < len(b) {
|
||||
b[n] = byte(i) | 0x80
|
||||
i >>= 7
|
||||
n++
|
||||
}
|
||||
|
||||
if n < len(b) {
|
||||
b[n] = byte(i)
|
||||
n++
|
||||
}
|
||||
|
||||
_, _ = e.Write(b[:n])
|
||||
}
|
||||
|
||||
type encodeFunc func(*encoder, value)
|
||||
|
||||
var (
|
||||
_ io.ReaderFrom = (*encoder)(nil)
|
||||
_ io.Writer = (*encoder)(nil)
|
||||
_ io.ByteWriter = (*encoder)(nil)
|
||||
_ io.StringWriter = (*encoder)(nil)
|
||||
|
||||
writerTo = reflect.TypeOf((*io.WriterTo)(nil)).Elem()
|
||||
)
|
||||
|
||||
func encodeFuncOf(typ reflect.Type, version int16, flexible bool, tag structTag) encodeFunc {
|
||||
if reflect.PtrTo(typ).Implements(writerTo) {
|
||||
return writerEncodeFuncOf(typ)
|
||||
}
|
||||
switch typ.Kind() {
|
||||
case reflect.Bool:
|
||||
return (*encoder).encodeBool
|
||||
case reflect.Int8:
|
||||
return (*encoder).encodeInt8
|
||||
case reflect.Int16:
|
||||
return (*encoder).encodeInt16
|
||||
case reflect.Int32:
|
||||
return (*encoder).encodeInt32
|
||||
case reflect.Int64:
|
||||
return (*encoder).encodeInt64
|
||||
case reflect.String:
|
||||
return stringEncodeFuncOf(flexible, tag)
|
||||
case reflect.Struct:
|
||||
return structEncodeFuncOf(typ, version, flexible)
|
||||
case reflect.Slice:
|
||||
if typ.Elem().Kind() == reflect.Uint8 { // []byte
|
||||
return bytesEncodeFuncOf(flexible, tag)
|
||||
}
|
||||
return arrayEncodeFuncOf(typ, version, flexible, tag)
|
||||
default:
|
||||
panic("unsupported type: " + typ.String())
|
||||
}
|
||||
}
|
||||
|
||||
func stringEncodeFuncOf(flexible bool, tag structTag) encodeFunc {
|
||||
switch {
|
||||
case flexible && tag.Nullable:
|
||||
// In flexible messages, all strings are compact
|
||||
return (*encoder).encodeCompactNullString
|
||||
case flexible:
|
||||
// In flexible messages, all strings are compact
|
||||
return (*encoder).encodeCompactString
|
||||
case tag.Nullable:
|
||||
return (*encoder).encodeNullString
|
||||
default:
|
||||
return (*encoder).encodeString
|
||||
}
|
||||
}
|
||||
|
||||
func bytesEncodeFuncOf(flexible bool, tag structTag) encodeFunc {
|
||||
switch {
|
||||
case flexible && tag.Nullable:
|
||||
// In flexible messages, all arrays are compact
|
||||
return (*encoder).encodeCompactNullBytes
|
||||
case flexible:
|
||||
// In flexible messages, all arrays are compact
|
||||
return (*encoder).encodeCompactBytes
|
||||
case tag.Nullable:
|
||||
return (*encoder).encodeNullBytes
|
||||
default:
|
||||
return (*encoder).encodeBytes
|
||||
}
|
||||
}
|
||||
|
||||
func structEncodeFuncOf(typ reflect.Type, version int16, flexible bool) encodeFunc {
|
||||
type field struct {
|
||||
encode encodeFunc
|
||||
index index
|
||||
tagID int
|
||||
}
|
||||
|
||||
var fields []field
|
||||
var taggedFields []field
|
||||
|
||||
forEachStructField(typ, func(typ reflect.Type, index index, tag string) {
|
||||
if typ.Size() != 0 { // skip struct{}
|
||||
forEachStructTag(tag, func(tag structTag) bool {
|
||||
if tag.MinVersion <= version && version <= tag.MaxVersion {
|
||||
f := field{
|
||||
encode: encodeFuncOf(typ, version, flexible, tag),
|
||||
index: index,
|
||||
tagID: tag.TagID,
|
||||
}
|
||||
|
||||
if tag.TagID < -1 {
|
||||
// Normal required field
|
||||
fields = append(fields, f)
|
||||
} else {
|
||||
// Optional tagged field (flexible messages only)
|
||||
taggedFields = append(taggedFields, f)
|
||||
}
|
||||
return false
|
||||
}
|
||||
return true
|
||||
})
|
||||
}
|
||||
})
|
||||
|
||||
return func(e *encoder, v value) {
|
||||
for i := range fields {
|
||||
f := &fields[i]
|
||||
f.encode(e, v.fieldByIndex(f.index))
|
||||
}
|
||||
|
||||
if flexible {
|
||||
// See https://cwiki.apache.org/confluence/display/KAFKA/KIP-482%3A+The+Kafka+Protocol+should+Support+Optional+Tagged+Fields
|
||||
// for details of tag buffers in "flexible" messages.
|
||||
e.writeUnsignedVarInt(uint64(len(taggedFields)))
|
||||
|
||||
for i := range taggedFields {
|
||||
f := &taggedFields[i]
|
||||
e.writeUnsignedVarInt(uint64(f.tagID))
|
||||
|
||||
buf := &bytes.Buffer{}
|
||||
se := &encoder{writer: buf}
|
||||
f.encode(se, v.fieldByIndex(f.index))
|
||||
e.writeUnsignedVarInt(uint64(buf.Len()))
|
||||
_, _ = e.Write(buf.Bytes())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func arrayEncodeFuncOf(typ reflect.Type, version int16, flexible bool, tag structTag) encodeFunc {
|
||||
elemType := typ.Elem()
|
||||
elemFunc := encodeFuncOf(elemType, version, flexible, tag)
|
||||
switch {
|
||||
case flexible && tag.Nullable:
|
||||
// In flexible messages, all arrays are compact
|
||||
return func(e *encoder, v value) { e.encodeCompactNullArray(v, elemType, elemFunc) }
|
||||
case flexible:
|
||||
// In flexible messages, all arrays are compact
|
||||
return func(e *encoder, v value) { e.encodeCompactArray(v, elemType, elemFunc) }
|
||||
case tag.Nullable:
|
||||
return func(e *encoder, v value) { e.encodeNullArray(v, elemType, elemFunc) }
|
||||
default:
|
||||
return func(e *encoder, v value) { e.encodeArray(v, elemType, elemFunc) }
|
||||
}
|
||||
}
|
||||
|
||||
func writerEncodeFuncOf(typ reflect.Type) encodeFunc {
|
||||
typ = reflect.PtrTo(typ)
|
||||
return func(e *encoder, v value) {
|
||||
// Optimization to write directly into the buffer when the encoder
|
||||
// does no need to compute a crc32 checksum.
|
||||
w := io.Writer(e)
|
||||
if e.table == nil {
|
||||
w = e.writer
|
||||
}
|
||||
_, err := v.iface(typ).(io.WriterTo).WriteTo(w)
|
||||
if err != nil {
|
||||
e.err = err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func writeInt8(b []byte, i int8) {
|
||||
b[0] = byte(i)
|
||||
}
|
||||
|
||||
func writeInt16(b []byte, i int16) {
|
||||
binary.BigEndian.PutUint16(b, uint16(i))
|
||||
}
|
||||
|
||||
func writeInt32(b []byte, i int32) {
|
||||
binary.BigEndian.PutUint32(b, uint32(i))
|
||||
}
|
||||
|
||||
func writeInt64(b []byte, i int64) {
|
||||
binary.BigEndian.PutUint64(b, uint64(i))
|
||||
}
|
||||
|
||||
func Marshal(version int16, value interface{}) ([]byte, error) {
|
||||
typ := typeOf(value)
|
||||
cache, _ := marshalers.Load().(map[versionedType]encodeFunc)
|
||||
key := versionedType{typ: typ, version: version}
|
||||
encode := cache[key]
|
||||
|
||||
if encode == nil {
|
||||
encode = encodeFuncOf(reflect.TypeOf(value), version, false, structTag{
|
||||
MinVersion: -1,
|
||||
MaxVersion: -1,
|
||||
TagID: -2,
|
||||
Compact: true,
|
||||
Nullable: true,
|
||||
})
|
||||
|
||||
newCache := make(map[versionedType]encodeFunc, len(cache)+1)
|
||||
newCache[key] = encode
|
||||
|
||||
for typ, fun := range cache {
|
||||
newCache[typ] = fun
|
||||
}
|
||||
|
||||
marshalers.Store(newCache)
|
||||
}
|
||||
|
||||
e, _ := encoders.Get().(*encoder)
|
||||
if e == nil {
|
||||
e = &encoder{writer: new(bytes.Buffer)}
|
||||
}
|
||||
|
||||
b, _ := e.writer.(*bytes.Buffer)
|
||||
defer func() {
|
||||
b.Reset()
|
||||
e.Reset(b)
|
||||
encoders.Put(e)
|
||||
}()
|
||||
|
||||
encode(e, nonAddressableValueOf(value))
|
||||
|
||||
if e.err != nil {
|
||||
return nil, e.err
|
||||
}
|
||||
|
||||
buf := b.Bytes()
|
||||
out := make([]byte, len(buf))
|
||||
copy(out, buf)
|
||||
return out, nil
|
||||
}
|
||||
|
||||
type versionedType struct {
|
||||
typ _type
|
||||
version int16
|
||||
}
|
||||
|
||||
var (
|
||||
encoders sync.Pool // *encoder
|
||||
marshalers atomic.Value // map[versionedType]encodeFunc
|
||||
)
|
||||
@@ -1,91 +0,0 @@
|
||||
package kafka
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// Error represents client-side protocol errors.
|
||||
type Error string
|
||||
|
||||
func (e Error) Error() string { return string(e) }
|
||||
|
||||
func Errorf(msg string, args ...interface{}) Error {
|
||||
return Error(fmt.Sprintf(msg, args...))
|
||||
}
|
||||
|
||||
const (
|
||||
// ErrNoTopic is returned when a request needs to be sent to a specific
|
||||
ErrNoTopic Error = "topic not found"
|
||||
|
||||
// ErrNoPartition is returned when a request needs to be sent to a specific
|
||||
// partition, but the client did not find it in the cluster metadata.
|
||||
ErrNoPartition Error = "topic partition not found"
|
||||
|
||||
// ErrNoLeader is returned when a request needs to be sent to a partition
|
||||
// leader, but the client could not determine what the leader was at this
|
||||
// time.
|
||||
ErrNoLeader Error = "topic partition has no leader"
|
||||
|
||||
// ErrNoRecord is returned when attempting to write a message containing an
|
||||
// empty record set (which kafka forbids).
|
||||
//
|
||||
// We handle this case client-side because kafka will close the connection
|
||||
// that it received an empty produce request on, causing all concurrent
|
||||
// requests to be aborted.
|
||||
ErrNoRecord Error = "record set contains no records"
|
||||
|
||||
// ErrNoReset is returned by ResetRecordReader when the record reader does
|
||||
// not support being reset.
|
||||
ErrNoReset Error = "record sequence does not support reset"
|
||||
)
|
||||
|
||||
type TopicError struct {
|
||||
Topic string
|
||||
Err error
|
||||
}
|
||||
|
||||
func NewTopicError(topic string, err error) *TopicError {
|
||||
return &TopicError{Topic: topic, Err: err}
|
||||
}
|
||||
|
||||
func NewErrNoTopic(topic string) *TopicError {
|
||||
return NewTopicError(topic, ErrNoTopic)
|
||||
}
|
||||
|
||||
func (e *TopicError) Error() string {
|
||||
return fmt.Sprintf("%v (topic=%q)", e.Err, e.Topic)
|
||||
}
|
||||
|
||||
func (e *TopicError) Unwrap() error {
|
||||
return e.Err
|
||||
}
|
||||
|
||||
type TopicPartitionError struct {
|
||||
Topic string
|
||||
Partition int32
|
||||
Err error
|
||||
}
|
||||
|
||||
func NewTopicPartitionError(topic string, partition int32, err error) *TopicPartitionError {
|
||||
return &TopicPartitionError{
|
||||
Topic: topic,
|
||||
Partition: partition,
|
||||
Err: err,
|
||||
}
|
||||
}
|
||||
|
||||
func NewErrNoPartition(topic string, partition int32) *TopicPartitionError {
|
||||
return NewTopicPartitionError(topic, partition, ErrNoPartition)
|
||||
}
|
||||
|
||||
func NewErrNoLeader(topic string, partition int32) *TopicPartitionError {
|
||||
return NewTopicPartitionError(topic, partition, ErrNoLeader)
|
||||
}
|
||||
|
||||
func (e *TopicPartitionError) Error() string {
|
||||
return fmt.Sprintf("%v (topic=%q partition=%d)", e.Err, e.Topic, e.Partition)
|
||||
}
|
||||
|
||||
func (e *TopicPartitionError) Unwrap() error {
|
||||
return e.Err
|
||||
}
|
||||
@@ -6,14 +6,18 @@ require (
|
||||
github.com/fatih/camelcase v1.0.0
|
||||
github.com/ohler55/ojg v1.12.12
|
||||
github.com/segmentio/kafka-go v0.4.27
|
||||
github.com/stretchr/testify v1.6.1
|
||||
github.com/up9inc/mizu/tap/api v0.0.0
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/davecgh/go-spew v1.1.0 // indirect
|
||||
github.com/golang/snappy v0.0.4 // indirect
|
||||
github.com/google/martian v2.1.0+incompatible // indirect
|
||||
github.com/klauspost/compress v1.14.2 // indirect
|
||||
github.com/pierrec/lz4 v2.6.1+incompatible // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c // indirect
|
||||
)
|
||||
|
||||
replace github.com/up9inc/mizu/tap/api v0.0.0 => ../../api
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21 h1:YEetp8/yCZMuEPMUDHG0CW/brkkEp8mzqk2+ODEitlw=
|
||||
github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU=
|
||||
@@ -25,10 +26,12 @@ github.com/ohler55/ojg v1.12.12/go.mod h1:LBbIVRAgoFbYBXQhRhuEpaJIqq+goSO63/FQ+n
|
||||
github.com/pierrec/lz4 v2.6.0+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
|
||||
github.com/pierrec/lz4 v2.6.1+incompatible h1:9UY3+iC23yxF0UfGaYrGplQ+79Rg+h/q9FV9ix19jjM=
|
||||
github.com/pierrec/lz4 v2.6.1+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/segmentio/kafka-go v0.4.27 h1:sIhEozeL/TLN2mZ5dkG462vcGEWYKS+u31sXPjKhAM4=
|
||||
github.com/segmentio/kafka-go v0.4.27/go.mod h1:XzMcoMjSzDGHcIwpWUI7GB43iKZ2fTVmryPSGLf/MPg=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0=
|
||||
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I=
|
||||
github.com/xdg/stringprep v1.0.0/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y=
|
||||
@@ -40,5 +43,7 @@ golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7w
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
@@ -897,6 +898,10 @@ func representMapAsTable(mapData map[string]interface{}, selectorPrefix string,
|
||||
})
|
||||
}
|
||||
|
||||
sort.Slice(table, func(i, j int) bool {
|
||||
return table[i].Name < table[j].Name
|
||||
})
|
||||
|
||||
obj, _ := json.Marshal(table)
|
||||
representation = string(obj)
|
||||
return
|
||||
|
||||
290
tap/extensions/kafka/main_test.go
Normal file
290
tap/extensions/kafka/main_test.go
Normal file
@@ -0,0 +1,290 @@
|
||||
package kafka
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/up9inc/mizu/tap/api"
|
||||
)
|
||||
|
||||
const (
|
||||
binDir = "bin"
|
||||
patternBin = "*_req.bin"
|
||||
patternDissect = "*.json"
|
||||
msgDissecting = "Dissecting:"
|
||||
msgAnalyzing = "Analyzing:"
|
||||
msgRepresenting = "Representing:"
|
||||
respSuffix = "_res.bin"
|
||||
expectDir = "expect"
|
||||
dissectDir = "dissect"
|
||||
analyzeDir = "analyze"
|
||||
representDir = "represent"
|
||||
testUpdate = "TEST_UPDATE"
|
||||
)
|
||||
|
||||
func TestRegister(t *testing.T) {
|
||||
dissector := NewDissector()
|
||||
extension := &api.Extension{}
|
||||
dissector.Register(extension)
|
||||
assert.Equal(t, "kafka", extension.Protocol.Name)
|
||||
}
|
||||
|
||||
func TestMacros(t *testing.T) {
|
||||
expectedMacros := map[string]string{
|
||||
"kafka": `proto.name == "kafka"`,
|
||||
}
|
||||
dissector := NewDissector()
|
||||
macros := dissector.Macros()
|
||||
assert.Equal(t, expectedMacros, macros)
|
||||
}
|
||||
|
||||
func TestPing(t *testing.T) {
|
||||
dissector := NewDissector()
|
||||
dissector.Ping()
|
||||
}
|
||||
|
||||
func TestDissect(t *testing.T) {
|
||||
_, testUpdateEnabled := os.LookupEnv(testUpdate)
|
||||
|
||||
expectDirDissect := path.Join(expectDir, dissectDir)
|
||||
|
||||
if testUpdateEnabled {
|
||||
os.RemoveAll(expectDirDissect)
|
||||
err := os.MkdirAll(expectDirDissect, 0775)
|
||||
assert.Nil(t, err)
|
||||
}
|
||||
|
||||
dissector := NewDissector()
|
||||
paths, err := filepath.Glob(path.Join(binDir, patternBin))
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
options := &api.TrafficFilteringOptions{
|
||||
IgnoredUserAgents: []string{},
|
||||
}
|
||||
|
||||
for _, _path := range paths {
|
||||
basePath := _path[:len(_path)-8]
|
||||
|
||||
// Channel to verify the output
|
||||
itemChannel := make(chan *api.OutputChannelItem)
|
||||
var emitter api.Emitter = &api.Emitting{
|
||||
AppStats: &api.AppStats{},
|
||||
OutputChannel: itemChannel,
|
||||
}
|
||||
|
||||
var items []*api.OutputChannelItem
|
||||
stop := make(chan bool)
|
||||
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
case <-stop:
|
||||
return
|
||||
case item := <-itemChannel:
|
||||
items = append(items, item)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
// Stream level
|
||||
counterPair := &api.CounterPair{
|
||||
Request: 0,
|
||||
Response: 0,
|
||||
}
|
||||
superIdentifier := &api.SuperIdentifier{}
|
||||
|
||||
// Request
|
||||
pathClient := _path
|
||||
fmt.Printf("%s %s\n", msgDissecting, pathClient)
|
||||
fileClient, err := os.Open(pathClient)
|
||||
assert.Nil(t, err)
|
||||
|
||||
bufferClient := bufio.NewReader(fileClient)
|
||||
tcpIDClient := &api.TcpID{
|
||||
SrcIP: "1",
|
||||
DstIP: "2",
|
||||
SrcPort: "1",
|
||||
DstPort: "2",
|
||||
}
|
||||
reqResMatcher := dissector.NewResponseRequestMatcher()
|
||||
reqResMatcher.SetMaxTry(10)
|
||||
err = dissector.Dissect(bufferClient, true, tcpIDClient, counterPair, &api.SuperTimer{}, superIdentifier, emitter, options, reqResMatcher)
|
||||
if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF {
|
||||
log.Println(err)
|
||||
}
|
||||
|
||||
// Response
|
||||
pathServer := basePath + respSuffix
|
||||
fmt.Printf("%s %s\n", msgDissecting, pathServer)
|
||||
fileServer, err := os.Open(pathServer)
|
||||
assert.Nil(t, err)
|
||||
|
||||
bufferServer := bufio.NewReader(fileServer)
|
||||
tcpIDServer := &api.TcpID{
|
||||
SrcIP: "2",
|
||||
DstIP: "1",
|
||||
SrcPort: "2",
|
||||
DstPort: "1",
|
||||
}
|
||||
err = dissector.Dissect(bufferServer, false, tcpIDServer, counterPair, &api.SuperTimer{}, superIdentifier, emitter, options, reqResMatcher)
|
||||
if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF {
|
||||
log.Println(err)
|
||||
}
|
||||
|
||||
fileClient.Close()
|
||||
fileServer.Close()
|
||||
|
||||
pathExpect := path.Join(expectDirDissect, fmt.Sprintf("%s.json", basePath[4:]))
|
||||
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
|
||||
stop <- true
|
||||
|
||||
marshaled, err := json.Marshal(items)
|
||||
assert.Nil(t, err)
|
||||
|
||||
if testUpdateEnabled {
|
||||
if len(items) > 0 {
|
||||
err = os.WriteFile(pathExpect, marshaled, 0644)
|
||||
assert.Nil(t, err)
|
||||
}
|
||||
} else {
|
||||
if _, err := os.Stat(pathExpect); errors.Is(err, os.ErrNotExist) {
|
||||
assert.Len(t, items, 0)
|
||||
} else {
|
||||
expectedBytes, err := ioutil.ReadFile(pathExpect)
|
||||
assert.Nil(t, err)
|
||||
|
||||
assert.JSONEq(t, string(expectedBytes), string(marshaled))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestAnalyze(t *testing.T) {
|
||||
_, testUpdateEnabled := os.LookupEnv(testUpdate)
|
||||
|
||||
expectDirDissect := path.Join(expectDir, dissectDir)
|
||||
expectDirAnalyze := path.Join(expectDir, analyzeDir)
|
||||
|
||||
if testUpdateEnabled {
|
||||
os.RemoveAll(expectDirAnalyze)
|
||||
err := os.MkdirAll(expectDirAnalyze, 0775)
|
||||
assert.Nil(t, err)
|
||||
}
|
||||
|
||||
dissector := NewDissector()
|
||||
paths, err := filepath.Glob(path.Join(expectDirDissect, patternDissect))
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
for _, _path := range paths {
|
||||
fmt.Printf("%s %s\n", msgAnalyzing, _path)
|
||||
|
||||
bytes, err := ioutil.ReadFile(_path)
|
||||
assert.Nil(t, err)
|
||||
|
||||
var items []*api.OutputChannelItem
|
||||
err = json.Unmarshal(bytes, &items)
|
||||
assert.Nil(t, err)
|
||||
|
||||
var entries []*api.Entry
|
||||
for _, item := range items {
|
||||
entry := dissector.Analyze(item, "", "", "")
|
||||
entries = append(entries, entry)
|
||||
}
|
||||
|
||||
pathExpect := path.Join(expectDirAnalyze, filepath.Base(_path))
|
||||
|
||||
marshaled, err := json.Marshal(entries)
|
||||
assert.Nil(t, err)
|
||||
|
||||
if testUpdateEnabled {
|
||||
if len(entries) > 0 {
|
||||
err = os.WriteFile(pathExpect, marshaled, 0644)
|
||||
assert.Nil(t, err)
|
||||
}
|
||||
} else {
|
||||
if _, err := os.Stat(pathExpect); errors.Is(err, os.ErrNotExist) {
|
||||
assert.Len(t, items, 0)
|
||||
} else {
|
||||
expectedBytes, err := ioutil.ReadFile(pathExpect)
|
||||
assert.Nil(t, err)
|
||||
|
||||
assert.JSONEq(t, string(expectedBytes), string(marshaled))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestRepresent(t *testing.T) {
|
||||
_, testUpdateEnabled := os.LookupEnv(testUpdate)
|
||||
|
||||
expectDirAnalyze := path.Join(expectDir, analyzeDir)
|
||||
expectDirRepresent := path.Join(expectDir, representDir)
|
||||
|
||||
if testUpdateEnabled {
|
||||
os.RemoveAll(expectDirRepresent)
|
||||
err := os.MkdirAll(expectDirRepresent, 0775)
|
||||
assert.Nil(t, err)
|
||||
}
|
||||
|
||||
dissector := NewDissector()
|
||||
paths, err := filepath.Glob(path.Join(expectDirAnalyze, patternDissect))
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
for _, _path := range paths {
|
||||
fmt.Printf("%s %s\n", msgRepresenting, _path)
|
||||
|
||||
bytes, err := ioutil.ReadFile(_path)
|
||||
assert.Nil(t, err)
|
||||
|
||||
var entries []*api.Entry
|
||||
err = json.Unmarshal(bytes, &entries)
|
||||
assert.Nil(t, err)
|
||||
|
||||
var objects []string
|
||||
for _, entry := range entries {
|
||||
object, _, err := dissector.Represent(entry.Request, entry.Response)
|
||||
assert.Nil(t, err)
|
||||
objects = append(objects, string(object))
|
||||
}
|
||||
|
||||
pathExpect := path.Join(expectDirRepresent, filepath.Base(_path))
|
||||
|
||||
marshaled, err := json.Marshal(objects)
|
||||
assert.Nil(t, err)
|
||||
|
||||
if testUpdateEnabled {
|
||||
if len(objects) > 0 {
|
||||
err = os.WriteFile(pathExpect, marshaled, 0644)
|
||||
assert.Nil(t, err)
|
||||
}
|
||||
} else {
|
||||
if _, err := os.Stat(pathExpect); errors.Is(err, os.ErrNotExist) {
|
||||
assert.Len(t, objects, 0)
|
||||
} else {
|
||||
expectedBytes, err := ioutil.ReadFile(pathExpect)
|
||||
assert.Nil(t, err)
|
||||
|
||||
assert.JSONEq(t, string(expectedBytes), string(marshaled))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -7,8 +7,6 @@ import (
|
||||
"github.com/up9inc/mizu/tap/api"
|
||||
)
|
||||
|
||||
const maxTry int = 3000
|
||||
|
||||
type RequestResponsePair struct {
|
||||
Request Request
|
||||
Response Response
|
||||
@@ -17,16 +15,21 @@ type RequestResponsePair struct {
|
||||
// Key is {client_addr}_{client_port}_{dest_addr}_{dest_port}_{correlation_id}
|
||||
type requestResponseMatcher struct {
|
||||
openMessagesMap *sync.Map
|
||||
maxTry int
|
||||
}
|
||||
|
||||
func createResponseRequestMatcher() api.RequestResponseMatcher {
|
||||
return &requestResponseMatcher{openMessagesMap: &sync.Map{}}
|
||||
return &requestResponseMatcher{openMessagesMap: &sync.Map{}, maxTry: 3000}
|
||||
}
|
||||
|
||||
func (matcher *requestResponseMatcher) GetMap() *sync.Map {
|
||||
return matcher.openMessagesMap
|
||||
}
|
||||
|
||||
func (matcher *requestResponseMatcher) SetMaxTry(value int) {
|
||||
matcher.maxTry = value
|
||||
}
|
||||
|
||||
func (matcher *requestResponseMatcher) registerRequest(key string, request *Request) *RequestResponsePair {
|
||||
if response, found := matcher.openMessagesMap.LoadAndDelete(key); found {
|
||||
// Check for a situation that only occurs when a Kafka broker is initiating
|
||||
@@ -44,7 +47,7 @@ func (matcher *requestResponseMatcher) registerResponse(key string, response *Re
|
||||
try := 0
|
||||
for {
|
||||
try++
|
||||
if try > maxTry {
|
||||
if try > matcher.maxTry {
|
||||
return nil
|
||||
}
|
||||
if request, found := matcher.openMessagesMap.LoadAndDelete(key); found {
|
||||
|
||||
@@ -3,7 +3,6 @@ package kafka
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
@@ -27,29 +26,20 @@ func (k ApiKey) String() string {
|
||||
return strconv.Itoa(int(k))
|
||||
}
|
||||
|
||||
func (k ApiKey) MinVersion() int16 { return k.apiType().minVersion() }
|
||||
|
||||
func (k ApiKey) MaxVersion() int16 { return k.apiType().maxVersion() }
|
||||
|
||||
func (k ApiKey) SelectVersion(minVersion, maxVersion int16) int16 {
|
||||
min := k.MinVersion()
|
||||
max := k.MaxVersion()
|
||||
switch {
|
||||
case min > maxVersion:
|
||||
return min
|
||||
case max < maxVersion:
|
||||
return max
|
||||
default:
|
||||
return maxVersion
|
||||
}
|
||||
}
|
||||
|
||||
func (k ApiKey) apiType() apiType {
|
||||
if i := int(k); i >= 0 && i < len(apiTypes) {
|
||||
return apiTypes[i]
|
||||
}
|
||||
return apiType{}
|
||||
}
|
||||
const (
|
||||
// v0 = 0
|
||||
v1 = 1
|
||||
v2 = 2
|
||||
v3 = 3
|
||||
v4 = 4
|
||||
v5 = 5
|
||||
v6 = 6
|
||||
v7 = 7
|
||||
v8 = 8
|
||||
v9 = 9
|
||||
v10 = 10
|
||||
v11 = 11
|
||||
)
|
||||
|
||||
const (
|
||||
Produce ApiKey = 0
|
||||
@@ -164,48 +154,6 @@ type messageType struct {
|
||||
flexible bool
|
||||
gotype reflect.Type
|
||||
decode decodeFunc
|
||||
encode encodeFunc
|
||||
}
|
||||
|
||||
type apiType struct {
|
||||
requests []messageType
|
||||
responses []messageType
|
||||
}
|
||||
|
||||
func (t apiType) minVersion() int16 {
|
||||
if len(t.requests) == 0 {
|
||||
return 0
|
||||
}
|
||||
return t.requests[0].version
|
||||
}
|
||||
|
||||
func (t apiType) maxVersion() int16 {
|
||||
if len(t.requests) == 0 {
|
||||
return 0
|
||||
}
|
||||
return t.requests[len(t.requests)-1].version
|
||||
}
|
||||
|
||||
var apiTypes [numApis]apiType
|
||||
|
||||
// Register is automatically called by sub-packages are imported to install a
|
||||
// new pair of request/response message types.
|
||||
func Register(req, res Message) {
|
||||
k1 := req.ApiKey()
|
||||
k2 := res.ApiKey()
|
||||
|
||||
if k1 != k2 {
|
||||
panic(fmt.Sprintf("[%T/%T]: request and response API keys mismatch: %d != %d", req, res, k1, k2))
|
||||
}
|
||||
|
||||
apiTypes[k1] = apiType{
|
||||
requests: typesOf(req),
|
||||
responses: typesOf(res),
|
||||
}
|
||||
}
|
||||
|
||||
func typesOf(v interface{}) []messageType {
|
||||
return makeTypes(reflect.TypeOf(v).Elem())
|
||||
}
|
||||
|
||||
func makeTypes(t reflect.Type) []messageType {
|
||||
@@ -241,7 +189,6 @@ func makeTypes(t reflect.Type) []messageType {
|
||||
gotype: t,
|
||||
flexible: flexible,
|
||||
decode: decodeFuncOf(t, v, flexible, structTag{}),
|
||||
encode: encodeFuncOf(t, v, flexible, structTag{}),
|
||||
})
|
||||
}
|
||||
|
||||
@@ -378,31 +325,6 @@ type Broker struct {
|
||||
Rack string
|
||||
}
|
||||
|
||||
func (b Broker) String() string {
|
||||
return net.JoinHostPort(b.Host, itoa(b.Port))
|
||||
}
|
||||
|
||||
func (b Broker) Format(w fmt.State, v rune) {
|
||||
switch v {
|
||||
case 'd':
|
||||
_, _ = io.WriteString(w, itoa(b.ID))
|
||||
case 's':
|
||||
_, _ = io.WriteString(w, b.String())
|
||||
case 'v':
|
||||
_, _ = io.WriteString(w, itoa(b.ID))
|
||||
_, _ = io.WriteString(w, " ")
|
||||
_, _ = io.WriteString(w, b.String())
|
||||
if b.Rack != "" {
|
||||
_, _ = io.WriteString(w, " ")
|
||||
_, _ = io.WriteString(w, b.Rack)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func itoa(i int32) string {
|
||||
return strconv.Itoa(int(i))
|
||||
}
|
||||
|
||||
type Topic struct {
|
||||
Name string
|
||||
Error int16
|
||||
@@ -418,14 +340,6 @@ type Partition struct {
|
||||
Offline []int32
|
||||
}
|
||||
|
||||
// BrokerMessage is an extension of the Message interface implemented by some
|
||||
// request types to customize the broker assignment logic.
|
||||
type BrokerMessage interface {
|
||||
// Given a representation of the kafka cluster state as argument, returns
|
||||
// the broker that the message should be routed to.
|
||||
Broker(Cluster) (Broker, error)
|
||||
}
|
||||
|
||||
// GroupMessage is an extension of the Message interface implemented by some
|
||||
// request types to inform the program that they should be routed to a group
|
||||
// coordinator.
|
||||
@@ -443,16 +357,6 @@ type PreparedMessage interface {
|
||||
Prepare(apiVersion int16)
|
||||
}
|
||||
|
||||
// Splitter is an interface implemented by messages that can be split into
|
||||
// multiple requests and have their results merged back by a Merger.
|
||||
type Splitter interface {
|
||||
// For a given cluster layout, returns the list of messages constructed
|
||||
// from the receiver for each requests that should be sent to the cluster.
|
||||
// The second return value is a Merger which can be used to merge back the
|
||||
// results of each request into a single message (or an error).
|
||||
Split(Cluster) ([]Message, Merger, error)
|
||||
}
|
||||
|
||||
// Merger is an interface implemented by messages which can merge multiple
|
||||
// results into one response.
|
||||
type Merger interface {
|
||||
@@ -461,16 +365,3 @@ type Merger interface {
|
||||
// values, other types should trigger a panic.
|
||||
Merge(messages []Message, results []interface{}) (Message, error)
|
||||
}
|
||||
|
||||
// Result converts r to a Message or and error, or panics if r could be be
|
||||
// converted to these types.
|
||||
func Result(r interface{}) (Message, error) {
|
||||
switch v := r.(type) {
|
||||
case Message:
|
||||
return v, nil
|
||||
case error:
|
||||
return nil, v
|
||||
default:
|
||||
panic(fmt.Errorf("BUG: result must be a message or an error but not %T", v))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,182 +0,0 @@
|
||||
package kafka
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
type ApiVersion struct {
|
||||
ApiKey int16
|
||||
MinVersion int16
|
||||
MaxVersion int16
|
||||
}
|
||||
|
||||
func (v ApiVersion) Format(w fmt.State, r rune) {
|
||||
switch r {
|
||||
case 's':
|
||||
fmt.Fprint(w, apiKey(v.ApiKey))
|
||||
case 'd':
|
||||
switch {
|
||||
case w.Flag('-'):
|
||||
fmt.Fprint(w, v.MinVersion)
|
||||
case w.Flag('+'):
|
||||
fmt.Fprint(w, v.MaxVersion)
|
||||
default:
|
||||
fmt.Fprint(w, v.ApiKey)
|
||||
}
|
||||
case 'v':
|
||||
switch {
|
||||
case w.Flag('-'):
|
||||
fmt.Fprintf(w, "v%d", v.MinVersion)
|
||||
case w.Flag('+'):
|
||||
fmt.Fprintf(w, "v%d", v.MaxVersion)
|
||||
case w.Flag('#'):
|
||||
fmt.Fprintf(w, "kafka.ApiVersion{ApiKey:%d MinVersion:%d MaxVersion:%d}", v.ApiKey, v.MinVersion, v.MaxVersion)
|
||||
default:
|
||||
fmt.Fprintf(w, "%s[v%d:v%d]", apiKey(v.ApiKey), v.MinVersion, v.MaxVersion)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type apiKey int16
|
||||
|
||||
const (
|
||||
produce apiKey = 0
|
||||
fetch apiKey = 1
|
||||
listOffsets apiKey = 2
|
||||
metadata apiKey = 3
|
||||
leaderAndIsr apiKey = 4
|
||||
stopReplica apiKey = 5
|
||||
updateMetadata apiKey = 6
|
||||
controlledShutdown apiKey = 7
|
||||
offsetCommit apiKey = 8
|
||||
offsetFetch apiKey = 9
|
||||
findCoordinator apiKey = 10
|
||||
joinGroup apiKey = 11
|
||||
heartbeat apiKey = 12
|
||||
leaveGroup apiKey = 13
|
||||
syncGroup apiKey = 14
|
||||
describeGroups apiKey = 15
|
||||
listGroups apiKey = 16
|
||||
saslHandshake apiKey = 17
|
||||
apiVersions apiKey = 18
|
||||
createTopics apiKey = 19
|
||||
deleteTopics apiKey = 20
|
||||
deleteRecords apiKey = 21
|
||||
initProducerId apiKey = 22
|
||||
offsetForLeaderEpoch apiKey = 23
|
||||
addPartitionsToTxn apiKey = 24
|
||||
addOffsetsToTxn apiKey = 25
|
||||
endTxn apiKey = 26
|
||||
writeTxnMarkers apiKey = 27
|
||||
txnOffsetCommit apiKey = 28
|
||||
describeAcls apiKey = 29
|
||||
createAcls apiKey = 30
|
||||
deleteAcls apiKey = 31
|
||||
describeConfigs apiKey = 32
|
||||
alterConfigs apiKey = 33
|
||||
alterReplicaLogDirs apiKey = 34
|
||||
describeLogDirs apiKey = 35
|
||||
saslAuthenticate apiKey = 36
|
||||
createPartitions apiKey = 37
|
||||
createDelegationToken apiKey = 38
|
||||
renewDelegationToken apiKey = 39
|
||||
expireDelegationToken apiKey = 40
|
||||
describeDelegationToken apiKey = 41
|
||||
deleteGroups apiKey = 42
|
||||
electLeaders apiKey = 43
|
||||
incrementalAlterConfigs apiKey = 44
|
||||
alterPartitionReassignments apiKey = 45
|
||||
listPartitionReassignments apiKey = 46
|
||||
offsetDelete apiKey = 47
|
||||
)
|
||||
|
||||
func (k apiKey) String() string {
|
||||
if i := int(k); i >= 0 && i < len(apiKeyStrings) {
|
||||
return apiKeyStrings[i]
|
||||
}
|
||||
return strconv.Itoa(int(k))
|
||||
}
|
||||
|
||||
const (
|
||||
// v0 = 0
|
||||
v1 = 1
|
||||
v2 = 2
|
||||
v3 = 3
|
||||
v4 = 4
|
||||
v5 = 5
|
||||
v6 = 6
|
||||
v7 = 7
|
||||
v8 = 8
|
||||
v9 = 9
|
||||
v10 = 10
|
||||
v11 = 11
|
||||
)
|
||||
|
||||
var apiKeyStrings = [...]string{
|
||||
produce: "Produce",
|
||||
fetch: "Fetch",
|
||||
listOffsets: "ListOffsets",
|
||||
metadata: "Metadata",
|
||||
leaderAndIsr: "LeaderAndIsr",
|
||||
stopReplica: "StopReplica",
|
||||
updateMetadata: "UpdateMetadata",
|
||||
controlledShutdown: "ControlledShutdown",
|
||||
offsetCommit: "OffsetCommit",
|
||||
offsetFetch: "OffsetFetch",
|
||||
findCoordinator: "FindCoordinator",
|
||||
joinGroup: "JoinGroup",
|
||||
heartbeat: "Heartbeat",
|
||||
leaveGroup: "LeaveGroup",
|
||||
syncGroup: "SyncGroup",
|
||||
describeGroups: "DescribeGroups",
|
||||
listGroups: "ListGroups",
|
||||
saslHandshake: "SaslHandshake",
|
||||
apiVersions: "ApiVersions",
|
||||
createTopics: "CreateTopics",
|
||||
deleteTopics: "DeleteTopics",
|
||||
deleteRecords: "DeleteRecords",
|
||||
initProducerId: "InitProducerId",
|
||||
offsetForLeaderEpoch: "OffsetForLeaderEpoch",
|
||||
addPartitionsToTxn: "AddPartitionsToTxn",
|
||||
addOffsetsToTxn: "AddOffsetsToTxn",
|
||||
endTxn: "EndTxn",
|
||||
writeTxnMarkers: "WriteTxnMarkers",
|
||||
txnOffsetCommit: "TxnOffsetCommit",
|
||||
describeAcls: "DescribeAcls",
|
||||
createAcls: "CreateAcls",
|
||||
deleteAcls: "DeleteAcls",
|
||||
describeConfigs: "DescribeConfigs",
|
||||
alterConfigs: "AlterConfigs",
|
||||
alterReplicaLogDirs: "AlterReplicaLogDirs",
|
||||
describeLogDirs: "DescribeLogDirs",
|
||||
saslAuthenticate: "SaslAuthenticate",
|
||||
createPartitions: "CreatePartitions",
|
||||
createDelegationToken: "CreateDelegationToken",
|
||||
renewDelegationToken: "RenewDelegationToken",
|
||||
expireDelegationToken: "ExpireDelegationToken",
|
||||
describeDelegationToken: "DescribeDelegationToken",
|
||||
deleteGroups: "DeleteGroups",
|
||||
electLeaders: "ElectLeaders",
|
||||
incrementalAlterConfigs: "IncrementalAlfterConfigs",
|
||||
alterPartitionReassignments: "AlterPartitionReassignments",
|
||||
listPartitionReassignments: "ListPartitionReassignments",
|
||||
offsetDelete: "OffsetDelete",
|
||||
}
|
||||
|
||||
func makeInt8(b []byte) int8 {
|
||||
return int8(b[0])
|
||||
}
|
||||
|
||||
func makeInt16(b []byte) int16 {
|
||||
return int16(binary.BigEndian.Uint16(b))
|
||||
}
|
||||
|
||||
func makeInt32(b []byte) int32 {
|
||||
return int32(binary.BigEndian.Uint32(b))
|
||||
}
|
||||
|
||||
func makeInt64(b []byte) int64 {
|
||||
return int64(binary.BigEndian.Uint64(b))
|
||||
}
|
||||
@@ -1,159 +0,0 @@
|
||||
package kafka
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
)
|
||||
|
||||
type readable interface {
|
||||
readFrom(*bufio.Reader, int) (int, error)
|
||||
}
|
||||
|
||||
var errShortRead = errors.New("not enough bytes available to load the response")
|
||||
|
||||
func peekRead(r *bufio.Reader, sz int, n int, f func([]byte)) (int, error) {
|
||||
if n > sz {
|
||||
return sz, errShortRead
|
||||
}
|
||||
b, err := r.Peek(n)
|
||||
if err != nil {
|
||||
return sz, err
|
||||
}
|
||||
f(b)
|
||||
return discardN(r, sz, n)
|
||||
}
|
||||
|
||||
func readInt8(r *bufio.Reader, sz int, v *int8) (int, error) {
|
||||
return peekRead(r, sz, 1, func(b []byte) { *v = makeInt8(b) })
|
||||
}
|
||||
|
||||
func readInt16(r *bufio.Reader, sz int, v *int16) (int, error) {
|
||||
return peekRead(r, sz, 2, func(b []byte) { *v = makeInt16(b) })
|
||||
}
|
||||
|
||||
func readInt32(r *bufio.Reader, sz int, v *int32) (int, error) {
|
||||
return peekRead(r, sz, 4, func(b []byte) { *v = makeInt32(b) })
|
||||
}
|
||||
|
||||
func readInt64(r *bufio.Reader, sz int, v *int64) (int, error) {
|
||||
return peekRead(r, sz, 8, func(b []byte) { *v = makeInt64(b) })
|
||||
}
|
||||
|
||||
func readString(r *bufio.Reader, sz int, v *string) (int, error) {
|
||||
return readStringWith(r, sz, func(r *bufio.Reader, sz int, n int) (remain int, err error) {
|
||||
*v, remain, err = readNewString(r, sz, n)
|
||||
return
|
||||
})
|
||||
}
|
||||
|
||||
func readStringWith(r *bufio.Reader, sz int, cb func(*bufio.Reader, int, int) (int, error)) (int, error) {
|
||||
var err error
|
||||
var len int16
|
||||
|
||||
if sz, err = readInt16(r, sz, &len); err != nil {
|
||||
return sz, err
|
||||
}
|
||||
|
||||
n := int(len)
|
||||
if n > sz {
|
||||
return sz, errShortRead
|
||||
}
|
||||
|
||||
return cb(r, sz, n)
|
||||
}
|
||||
|
||||
func readNewString(r *bufio.Reader, sz int, n int) (string, int, error) {
|
||||
b, sz, err := readNewBytes(r, sz, n)
|
||||
return string(b), sz, err
|
||||
}
|
||||
|
||||
func readBytes(r *bufio.Reader, sz int, v *[]byte) (int, error) {
|
||||
return readBytesWith(r, sz, func(r *bufio.Reader, sz int, n int) (remain int, err error) {
|
||||
*v, remain, err = readNewBytes(r, sz, n)
|
||||
return
|
||||
})
|
||||
}
|
||||
|
||||
func readBytesWith(r *bufio.Reader, sz int, cb func(*bufio.Reader, int, int) (int, error)) (int, error) {
|
||||
var err error
|
||||
var n int
|
||||
|
||||
if sz, err = readArrayLen(r, sz, &n); err != nil {
|
||||
return sz, err
|
||||
}
|
||||
|
||||
if n > sz {
|
||||
return sz, errShortRead
|
||||
}
|
||||
|
||||
return cb(r, sz, n)
|
||||
}
|
||||
|
||||
func readNewBytes(r *bufio.Reader, sz int, n int) ([]byte, int, error) {
|
||||
var err error
|
||||
var b []byte
|
||||
var shortRead bool
|
||||
|
||||
if n > 0 {
|
||||
if sz < n {
|
||||
n = sz
|
||||
shortRead = true
|
||||
}
|
||||
|
||||
b = make([]byte, n)
|
||||
n, err = io.ReadFull(r, b)
|
||||
b = b[:n]
|
||||
sz -= n
|
||||
|
||||
if err == nil && shortRead {
|
||||
err = errShortRead
|
||||
}
|
||||
}
|
||||
|
||||
return b, sz, err
|
||||
}
|
||||
|
||||
func readArrayLen(r *bufio.Reader, sz int, n *int) (int, error) {
|
||||
var err error
|
||||
var len int32
|
||||
if sz, err = readInt32(r, sz, &len); err != nil {
|
||||
return sz, err
|
||||
}
|
||||
*n = int(len)
|
||||
return sz, nil
|
||||
}
|
||||
|
||||
func ReadAll(r *bufio.Reader, sz int, ptrs ...interface{}) (int, error) {
|
||||
var err error
|
||||
|
||||
for _, ptr := range ptrs {
|
||||
if sz, err = readPtr(r, sz, ptr); err != nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return sz, err
|
||||
}
|
||||
|
||||
func readPtr(r *bufio.Reader, sz int, ptr interface{}) (int, error) {
|
||||
switch v := ptr.(type) {
|
||||
case *int8:
|
||||
return readInt8(r, sz, v)
|
||||
case *int16:
|
||||
return readInt16(r, sz, v)
|
||||
case *int32:
|
||||
return readInt32(r, sz, v)
|
||||
case *int64:
|
||||
return readInt64(r, sz, v)
|
||||
case *string:
|
||||
return readString(r, sz, v)
|
||||
case *[]byte:
|
||||
return readBytes(r, sz, v)
|
||||
case readable:
|
||||
return v.readFrom(r, sz)
|
||||
default:
|
||||
panic(fmt.Sprintf("unsupported type: %T", v))
|
||||
}
|
||||
}
|
||||
@@ -1,279 +0,0 @@
|
||||
package kafka
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"io"
|
||||
"time"
|
||||
|
||||
"github.com/segmentio/kafka-go/compress"
|
||||
)
|
||||
|
||||
// Attributes is a bitset representing special attributes set on records.
|
||||
type Attributes int16
|
||||
|
||||
const (
|
||||
Gzip Attributes = Attributes(compress.Gzip) // 1
|
||||
Snappy Attributes = Attributes(compress.Snappy) // 2
|
||||
Lz4 Attributes = Attributes(compress.Lz4) // 3
|
||||
Zstd Attributes = Attributes(compress.Zstd) // 4
|
||||
Transactional Attributes = 1 << 4
|
||||
Control Attributes = 1 << 5
|
||||
)
|
||||
|
||||
func (a Attributes) Compression() compress.Compression {
|
||||
return compress.Compression(a & 7)
|
||||
}
|
||||
|
||||
func (a Attributes) Transactional() bool {
|
||||
return (a & Transactional) != 0
|
||||
}
|
||||
|
||||
func (a Attributes) Control() bool {
|
||||
return (a & Control) != 0
|
||||
}
|
||||
|
||||
func (a Attributes) String() string {
|
||||
s := a.Compression().String()
|
||||
if a.Transactional() {
|
||||
s += "+transactional"
|
||||
}
|
||||
if a.Control() {
|
||||
s += "+control"
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// Header represents a single entry in a list of record headers.
|
||||
type Header struct {
|
||||
Key string
|
||||
Value []byte
|
||||
}
|
||||
|
||||
// Record is an interface representing a single kafka record.
|
||||
//
|
||||
// Record values are not safe to use concurrently from multiple goroutines.
|
||||
type Record struct {
|
||||
// The offset at which the record exists in a topic partition. This value
|
||||
// is ignored in produce requests.
|
||||
Offset int64
|
||||
|
||||
// Returns the time of the record. This value may be omitted in produce
|
||||
// requests to let kafka set the time when it saves the record.
|
||||
Time time.Time
|
||||
|
||||
// Returns a byte sequence containing the key of this record. The returned
|
||||
// sequence may be nil to indicate that the record has no key. If the record
|
||||
// is part of a RecordSet, the content of the key must remain valid at least
|
||||
// until the record set is closed (or until the key is closed).
|
||||
Key Bytes
|
||||
|
||||
// Returns a byte sequence containing the value of this record. The returned
|
||||
// sequence may be nil to indicate that the record has no value. If the
|
||||
// record is part of a RecordSet, the content of the value must remain valid
|
||||
// at least until the record set is closed (or until the value is closed).
|
||||
Value Bytes
|
||||
|
||||
// Returns the list of headers associated with this record. The returned
|
||||
// slice may be reused across calls, the program should use it as an
|
||||
// immutable value.
|
||||
Headers []Header
|
||||
}
|
||||
|
||||
// RecordSet represents a sequence of records in Produce requests and Fetch
|
||||
// responses. All v0, v1, and v2 formats are supported.
|
||||
type RecordSet struct {
|
||||
// The message version that this record set will be represented as, valid
|
||||
// values are 1, or 2.
|
||||
//
|
||||
// When reading, this is the value of the highest version used in the
|
||||
// batches that compose the record set.
|
||||
//
|
||||
// When writing, this value dictates the format that the records will be
|
||||
// encoded in.
|
||||
Version int8
|
||||
|
||||
// Attributes set on the record set.
|
||||
//
|
||||
// When reading, the attributes are the combination of all attributes in
|
||||
// the batches that compose the record set.
|
||||
//
|
||||
// When writing, the attributes apply to the whole sequence of records in
|
||||
// the set.
|
||||
Attributes Attributes
|
||||
|
||||
// A reader exposing the sequence of records.
|
||||
//
|
||||
// When reading a RecordSet from an io.Reader, the Records field will be a
|
||||
// *RecordStream. If the program needs to access the details of each batch
|
||||
// that compose the stream, it may use type assertions to access the
|
||||
// underlying types of each batch.
|
||||
Records RecordReader
|
||||
}
|
||||
|
||||
// ReadFrom reads the representation of a record set from r into rs, returning
|
||||
// the number of bytes consumed from r, and an non-nil error if the record set
|
||||
// could not be read.
|
||||
func (rs *RecordSet) ReadFrom(r io.Reader) (int64, error) {
|
||||
// d, _ := r.(*decoder)
|
||||
// if d == nil {
|
||||
// d = &decoder{
|
||||
// reader: r,
|
||||
// remain: 4,
|
||||
// }
|
||||
// }
|
||||
|
||||
// *rs = RecordSet{}
|
||||
// limit := d.remain
|
||||
// size := d.readInt32()
|
||||
|
||||
// if d.err != nil {
|
||||
// return int64(limit - d.remain), d.err
|
||||
// }
|
||||
|
||||
// if size <= 0 {
|
||||
// return 4, nil
|
||||
// }
|
||||
|
||||
// stream := &RecordStream{
|
||||
// Records: make([]RecordReader, 0, 4),
|
||||
// }
|
||||
|
||||
// var err error
|
||||
// d.remain = int(size)
|
||||
|
||||
// for d.remain > 0 && err == nil {
|
||||
// var version byte
|
||||
|
||||
// if d.remain < (magicByteOffset + 1) {
|
||||
// if len(stream.Records) != 0 {
|
||||
// break
|
||||
// }
|
||||
// return 4, fmt.Errorf("impossible record set shorter than %d bytes", magicByteOffset+1)
|
||||
// }
|
||||
|
||||
// switch r := d.reader.(type) {
|
||||
// case bufferedReader:
|
||||
// b, err := r.Peek(magicByteOffset + 1)
|
||||
// if err != nil {
|
||||
// n, _ := r.Discard(len(b))
|
||||
// return 4 + int64(n), dontExpectEOF(err)
|
||||
// }
|
||||
// version = b[magicByteOffset]
|
||||
// case bytesBuffer:
|
||||
// version = r.Bytes()[magicByteOffset]
|
||||
// default:
|
||||
// b := make([]byte, magicByteOffset+1)
|
||||
// if n, err := io.ReadFull(d.reader, b); err != nil {
|
||||
// return 4 + int64(n), dontExpectEOF(err)
|
||||
// }
|
||||
// version = b[magicByteOffset]
|
||||
// // Reconstruct the prefix that we had to read to determine the version
|
||||
// // of the record set from the magic byte.
|
||||
// //
|
||||
// // Technically this may recurisvely stack readers when consuming all
|
||||
// // items of the batch, which could hurt performance. In practice this
|
||||
// // path should not be taken tho, since the decoder would read from a
|
||||
// // *bufio.Reader which implements the bufferedReader interface.
|
||||
// d.reader = io.MultiReader(bytes.NewReader(b), d.reader)
|
||||
// }
|
||||
|
||||
// var tmp RecordSet
|
||||
// switch version {
|
||||
// case 0, 1:
|
||||
// err = tmp.readFromVersion1(d)
|
||||
// case 2:
|
||||
// err = tmp.readFromVersion2(d)
|
||||
// default:
|
||||
// err = fmt.Errorf("unsupported message version %d for message of size %d", version, size)
|
||||
// }
|
||||
|
||||
// if tmp.Version > rs.Version {
|
||||
// rs.Version = tmp.Version
|
||||
// }
|
||||
|
||||
// rs.Attributes |= tmp.Attributes
|
||||
|
||||
// if tmp.Records != nil {
|
||||
// stream.Records = append(stream.Records, tmp.Records)
|
||||
// }
|
||||
// }
|
||||
|
||||
// if len(stream.Records) != 0 {
|
||||
// rs.Records = stream
|
||||
// // Ignore errors if we've successfully read records, so the
|
||||
// // program can keep making progress.
|
||||
// err = nil
|
||||
// }
|
||||
|
||||
// d.discardAll()
|
||||
// rn := 4 + (int(size) - d.remain)
|
||||
// d.remain = limit - rn
|
||||
// return int64(rn), err
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
// WriteTo writes the representation of rs into w. The value of rs.Version
|
||||
// dictates which format that the record set will be represented as.
|
||||
//
|
||||
// The error will be ErrNoRecord if rs contained no records.
|
||||
//
|
||||
// Note: since this package is only compatible with kafka 0.10 and above, the
|
||||
// method never produces messages in version 0. If rs.Version is zero, the
|
||||
// method defaults to producing messages in version 1.
|
||||
func (rs *RecordSet) WriteTo(w io.Writer) (int64, error) {
|
||||
// if rs.Records == nil {
|
||||
// return 0, ErrNoRecord
|
||||
// }
|
||||
|
||||
// // This optimization avoids rendering the record set in an intermediary
|
||||
// // buffer when the writer is already a pageBuffer, which is a common case
|
||||
// // due to the way WriteRequest and WriteResponse are implemented.
|
||||
// buffer, _ := w.(*pageBuffer)
|
||||
// bufferOffset := int64(0)
|
||||
|
||||
// if buffer != nil {
|
||||
// bufferOffset = buffer.Size()
|
||||
// } else {
|
||||
// buffer = newPageBuffer()
|
||||
// defer buffer.unref()
|
||||
// }
|
||||
|
||||
// size := packUint32(0)
|
||||
// buffer.Write(size[:]) // size placeholder
|
||||
|
||||
// var err error
|
||||
// switch rs.Version {
|
||||
// case 0, 1:
|
||||
// err = rs.writeToVersion1(buffer, bufferOffset+4)
|
||||
// case 2:
|
||||
// err = rs.writeToVersion2(buffer, bufferOffset+4)
|
||||
// default:
|
||||
// err = fmt.Errorf("unsupported record set version %d", rs.Version)
|
||||
// }
|
||||
// if err != nil {
|
||||
// return 0, err
|
||||
// }
|
||||
|
||||
// n := buffer.Size() - bufferOffset
|
||||
// if n == 0 {
|
||||
// size = packUint32(^uint32(0))
|
||||
// } else {
|
||||
// size = packUint32(uint32(n) - 4)
|
||||
// }
|
||||
// buffer.WriteAt(size[:], bufferOffset)
|
||||
|
||||
// // This condition indicates that the output writer received by `WriteTo` was
|
||||
// // not a *pageBuffer, in which case we need to flush the buffered records
|
||||
// // data into it.
|
||||
// if buffer != w {
|
||||
// return buffer.WriteTo(w)
|
||||
// }
|
||||
|
||||
// return n, nil
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
func packUint32(u uint32) (b [4]byte) {
|
||||
binary.BigEndian.PutUint32(b[:], u)
|
||||
return
|
||||
}
|
||||
@@ -1,43 +0,0 @@
|
||||
package kafka
|
||||
|
||||
import (
|
||||
"github.com/segmentio/kafka-go/protocol"
|
||||
)
|
||||
|
||||
// Header is a key/value pair type representing headers set on records.
|
||||
// type Header = protocol.Header
|
||||
|
||||
// Bytes is an interface representing a sequence of bytes. This abstraction
|
||||
// makes it possible for programs to inject data into produce requests without
|
||||
// having to load in into an intermediary buffer, or read record keys and values
|
||||
// from a fetch response directly from internal buffers.
|
||||
//
|
||||
// Bytes are not safe to use concurrently from multiple goroutines.
|
||||
// type Bytes = protocol.Bytes
|
||||
|
||||
// NewBytes constructs a Bytes value from a byte slice.
|
||||
//
|
||||
// If b is nil, nil is returned.
|
||||
// func NewBytes(b []byte) Bytes { return protocol.NewBytes(b) }
|
||||
|
||||
// ReadAll reads b into a byte slice.
|
||||
// func ReadAll(b Bytes) ([]byte, error) { return protocol.ReadAll(b) }
|
||||
|
||||
// Record is an interface representing a single kafka record.
|
||||
//
|
||||
// Record values are not safe to use concurrently from multiple goroutines.
|
||||
// type Record = protocol.Record
|
||||
|
||||
// RecordReader is an interface representing a sequence of records. Record sets
|
||||
// are used in both produce and fetch requests to represent the sequence of
|
||||
// records that are sent to or receive from kafka brokers.
|
||||
//
|
||||
// RecordReader values are not safe to use concurrently from multiple goroutines.
|
||||
type RecordReader = protocol.RecordReader
|
||||
|
||||
// NewRecordReade rconstructs a RecordSet which exposes the sequence of records
|
||||
// passed as arguments.
|
||||
func NewRecordReader(records ...Record) RecordReader {
|
||||
// return protocol.NewRecordReader(records...)
|
||||
return nil
|
||||
}
|
||||
@@ -8,46 +8,14 @@ import (
|
||||
|
||||
type index []int
|
||||
|
||||
type _type struct{ typ reflect.Type }
|
||||
|
||||
func typeOf(x interface{}) _type {
|
||||
return makeType(reflect.TypeOf(x))
|
||||
}
|
||||
|
||||
func elemTypeOf(x interface{}) _type {
|
||||
return makeType(reflect.TypeOf(x).Elem())
|
||||
}
|
||||
|
||||
func makeType(t reflect.Type) _type {
|
||||
return _type{typ: t}
|
||||
}
|
||||
|
||||
type value struct {
|
||||
val reflect.Value
|
||||
}
|
||||
|
||||
func nonAddressableValueOf(x interface{}) value {
|
||||
return value{val: reflect.ValueOf(x)}
|
||||
}
|
||||
|
||||
func valueOf(x interface{}) value {
|
||||
return value{val: reflect.ValueOf(x).Elem()}
|
||||
}
|
||||
|
||||
func (v value) bool() bool { return v.val.Bool() }
|
||||
|
||||
func (v value) int8() int8 { return int8(v.int64()) }
|
||||
|
||||
func (v value) int16() int16 { return int16(v.int64()) }
|
||||
|
||||
func (v value) int32() int32 { return int32(v.int64()) }
|
||||
|
||||
func (v value) int64() int64 { return v.val.Int() }
|
||||
|
||||
func (v value) string() string { return v.val.String() }
|
||||
|
||||
func (v value) bytes() []byte { return v.val.Bytes() }
|
||||
|
||||
func (v value) iface(t reflect.Type) interface{} { return v.val.Addr().Interface() }
|
||||
|
||||
func (v value) array(t reflect.Type) array { return array{val: v.val} } //nolint
|
||||
@@ -88,10 +56,6 @@ func makeArray(t reflect.Type, n int) array {
|
||||
|
||||
func (a array) index(i int) value { return value{val: a.val.Index(i)} }
|
||||
|
||||
func (a array) length() int { return a.val.Len() }
|
||||
|
||||
func (a array) isNil() bool { return a.val.IsNil() }
|
||||
|
||||
func indexOf(s reflect.StructField) index { return index(s.Index) }
|
||||
|
||||
func bytesToString(b []byte) string { return string(b) }
|
||||
|
||||
@@ -28,6 +28,9 @@ func ReadRequest(r io.Reader, tcpID *api.TcpID, counterPair *api.CounterPair, su
|
||||
}
|
||||
|
||||
if size < 8 {
|
||||
if size == 0 {
|
||||
return 0, 0, io.EOF
|
||||
}
|
||||
return 0, 0, fmt.Errorf("A Kafka request header cannot be smaller than 8 bytes")
|
||||
}
|
||||
|
||||
@@ -42,7 +45,7 @@ func ReadRequest(r io.Reader, tcpID *api.TcpID, counterPair *api.CounterPair, su
|
||||
correlationID := d.readInt32()
|
||||
clientID := d.readString()
|
||||
|
||||
if i := int(apiKey); i < 0 || i >= len(apiTypes) {
|
||||
if i := int(apiKey); i < 0 || i >= numApis {
|
||||
err = fmt.Errorf("unsupported api key: %d", i)
|
||||
return apiKey, apiVersion, err
|
||||
}
|
||||
@@ -52,12 +55,6 @@ func ReadRequest(r io.Reader, tcpID *api.TcpID, counterPair *api.CounterPair, su
|
||||
return apiKey, apiVersion, err
|
||||
}
|
||||
|
||||
t := &apiTypes[apiKey]
|
||||
if t == nil {
|
||||
err = fmt.Errorf("unsupported api: %s", apiNames[apiKey])
|
||||
return apiKey, apiVersion, err
|
||||
}
|
||||
|
||||
var payload interface{}
|
||||
|
||||
switch apiKey {
|
||||
@@ -227,61 +224,3 @@ func ReadRequest(r io.Reader, tcpID *api.TcpID, counterPair *api.CounterPair, su
|
||||
|
||||
return apiKey, apiVersion, nil
|
||||
}
|
||||
|
||||
func WriteRequest(w io.Writer, apiVersion int16, correlationID int32, clientID string, msg Message) error {
|
||||
apiKey := msg.ApiKey()
|
||||
|
||||
if i := int(apiKey); i < 0 || i >= len(apiTypes) {
|
||||
return fmt.Errorf("unsupported api key: %d", i)
|
||||
}
|
||||
|
||||
t := &apiTypes[apiKey]
|
||||
if t == nil {
|
||||
return fmt.Errorf("unsupported api: %s", apiNames[apiKey])
|
||||
}
|
||||
|
||||
minVersion := t.minVersion()
|
||||
maxVersion := t.maxVersion()
|
||||
|
||||
if apiVersion < minVersion || apiVersion > maxVersion {
|
||||
return fmt.Errorf("unsupported %s version: v%d not in range v%d-v%d", apiKey, apiVersion, minVersion, maxVersion)
|
||||
}
|
||||
|
||||
r := &t.requests[apiVersion-minVersion]
|
||||
v := valueOf(msg)
|
||||
b := newPageBuffer()
|
||||
defer b.unref()
|
||||
|
||||
e := &encoder{writer: b}
|
||||
e.writeInt32(0) // placeholder for the request size
|
||||
e.writeInt16(int16(apiKey))
|
||||
e.writeInt16(apiVersion)
|
||||
e.writeInt32(correlationID)
|
||||
|
||||
if r.flexible {
|
||||
// Flexible messages use a nullable string for the client ID, then extra space for a
|
||||
// tag buffer, which begins with a size value. Since we're not writing any fields into the
|
||||
// latter, we can just write zero for now.
|
||||
//
|
||||
// See
|
||||
// https://cwiki.apache.org/confluence/display/KAFKA/KIP-482%3A+The+Kafka+Protocol+should+Support+Optional+Tagged+Fields
|
||||
// for details.
|
||||
e.writeNullString(clientID)
|
||||
e.writeUnsignedVarInt(0)
|
||||
} else {
|
||||
// Technically, recent versions of kafka interpret this field as a nullable
|
||||
// string, however kafka 0.10 expected a non-nullable string and fails with
|
||||
// a NullPointerException when it receives a null client id.
|
||||
e.writeString(clientID)
|
||||
}
|
||||
r.encode(e, v)
|
||||
err := e.err
|
||||
|
||||
if err == nil {
|
||||
size := packUint32(uint32(b.Size()) - 4)
|
||||
_, _ = b.WriteAt(size[:], 0)
|
||||
_, err = b.WriteTo(w)
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -25,6 +25,9 @@ func ReadResponse(r io.Reader, tcpID *api.TcpID, counterPair *api.CounterPair, s
|
||||
}
|
||||
|
||||
if size < 4 {
|
||||
if size == 0 {
|
||||
return io.EOF
|
||||
}
|
||||
return fmt.Errorf("A Kafka response header cannot be smaller than 8 bytes")
|
||||
}
|
||||
|
||||
@@ -53,7 +56,7 @@ func ReadResponse(r io.Reader, tcpID *api.TcpID, counterPair *api.CounterPair, s
|
||||
)
|
||||
reqResPair := reqResMatcher.registerResponse(key, response)
|
||||
if reqResPair == nil {
|
||||
return fmt.Errorf("Couldn't match a Kafka response to a Kafka request in 3 seconds!")
|
||||
return fmt.Errorf("Couldn't match a Kafka response to a Kafka request in %d milliseconds!", reqResMatcher.maxTry)
|
||||
}
|
||||
apiKey := reqResPair.Request.ApiKey
|
||||
apiVersion := reqResPair.Request.ApiVersion
|
||||
@@ -284,57 +287,12 @@ func ReadResponse(r io.Reader, tcpID *api.TcpID, counterPair *api.CounterPair, s
|
||||
}
|
||||
emitter.Emit(item)
|
||||
|
||||
if i := int(apiKey); i < 0 || i >= len(apiTypes) {
|
||||
if i := int(apiKey); i < 0 || i >= numApis {
|
||||
err = fmt.Errorf("unsupported api key: %d", i)
|
||||
return err
|
||||
}
|
||||
|
||||
t := &apiTypes[apiKey]
|
||||
if t == nil {
|
||||
err = fmt.Errorf("unsupported api: %s", apiNames[apiKey])
|
||||
return err
|
||||
}
|
||||
|
||||
d.discardAll()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func WriteResponse(w io.Writer, apiVersion int16, correlationID int32, msg Message) error {
|
||||
apiKey := msg.ApiKey()
|
||||
|
||||
if i := int(apiKey); i < 0 || i >= len(apiTypes) {
|
||||
return fmt.Errorf("unsupported api key: %d", i)
|
||||
}
|
||||
|
||||
t := &apiTypes[apiKey]
|
||||
if t == nil {
|
||||
return fmt.Errorf("unsupported api: %s", apiNames[apiKey])
|
||||
}
|
||||
|
||||
minVersion := t.minVersion()
|
||||
maxVersion := t.maxVersion()
|
||||
|
||||
if apiVersion < minVersion || apiVersion > maxVersion {
|
||||
return fmt.Errorf("unsupported %s version: v%d not in range v%d-v%d", apiKey, apiVersion, minVersion, maxVersion)
|
||||
}
|
||||
|
||||
r := &t.responses[apiVersion-minVersion]
|
||||
v := valueOf(msg)
|
||||
b := newPageBuffer()
|
||||
defer b.unref()
|
||||
|
||||
e := &encoder{writer: b}
|
||||
e.writeInt32(0) // placeholder for the response size
|
||||
e.writeInt32(correlationID)
|
||||
r.encode(e, v)
|
||||
err := e.err
|
||||
|
||||
if err == nil {
|
||||
size := packUint32(uint32(b.Size()) - 4)
|
||||
_, _ = b.WriteAt(size[:], 0)
|
||||
_, err = b.WriteTo(w)
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -12,19 +12,6 @@ const (
|
||||
RequireAll RequiredAcks = -1
|
||||
)
|
||||
|
||||
func (acks RequiredAcks) String() string {
|
||||
switch acks {
|
||||
case RequireNone:
|
||||
return "none"
|
||||
case RequireOne:
|
||||
return "one"
|
||||
case RequireAll:
|
||||
return "all"
|
||||
default:
|
||||
return "unknown"
|
||||
}
|
||||
}
|
||||
|
||||
type UUID struct {
|
||||
TimeLow int32 `json:"timeLow"`
|
||||
TimeMid int16 `json:"timeMid"`
|
||||
|
||||
16
tap/extensions/redis/Makefile
Normal file
16
tap/extensions/redis/Makefile
Normal file
@@ -0,0 +1,16 @@
|
||||
skipbin := $$(find bin -mindepth 1 -maxdepth 1)
|
||||
skipexpect := $$(find expect -mindepth 1 -maxdepth 1)
|
||||
|
||||
test: test-pull-bin test-pull-expect
|
||||
@MIZU_TEST=1 go test -v ./... -coverpkg=./... -race -coverprofile=coverage.out -covermode=atomic
|
||||
|
||||
test-update: test-pull-bin
|
||||
@MIZU_TEST=1 TEST_UPDATE=1 go test -v ./... -coverpkg=./... -coverprofile=coverage.out -covermode=atomic
|
||||
|
||||
test-pull-bin:
|
||||
@mkdir -p bin
|
||||
@[ "${skipbin}" ] && echo "Skipping downloading BINs" || gsutil -o 'GSUtil:parallel_process_count=5' -o 'GSUtil:parallel_thread_count=5' -m cp gs://static.up9.io/mizu/test-pcap/bin/redis/\*.bin bin
|
||||
|
||||
test-pull-expect:
|
||||
@mkdir -p expect
|
||||
@[ "${skipexpect}" ] && echo "Skipping downloading expected JSONs" || gsutil -o 'GSUtil:parallel_process_count=5' -o 'GSUtil:parallel_thread_count=5' -m cp -r gs://static.up9.io/mizu/test-pcap/expect/redis/\* expect
|
||||
@@ -2,8 +2,16 @@ module github.com/up9inc/mizu/tap/extensions/redis
|
||||
|
||||
go 1.17
|
||||
|
||||
require github.com/up9inc/mizu/tap/api v0.0.0
|
||||
require (
|
||||
github.com/stretchr/testify v1.7.0
|
||||
github.com/up9inc/mizu/tap/api v0.0.0
|
||||
)
|
||||
|
||||
require github.com/google/martian v2.1.0+incompatible // indirect
|
||||
require (
|
||||
github.com/davecgh/go-spew v1.1.0 // indirect
|
||||
github.com/google/martian v2.1.0+incompatible // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c // indirect
|
||||
)
|
||||
|
||||
replace github.com/up9inc/mizu/tap/api v0.0.0 => ../../api
|
||||
|
||||
@@ -1,2 +1,13 @@
|
||||
github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no=
|
||||
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY=
|
||||
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
|
||||
290
tap/extensions/redis/main_test.go
Normal file
290
tap/extensions/redis/main_test.go
Normal file
@@ -0,0 +1,290 @@
|
||||
package redis
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/up9inc/mizu/tap/api"
|
||||
)
|
||||
|
||||
const (
|
||||
binDir = "bin"
|
||||
patternBin = "*_req.bin"
|
||||
patternDissect = "*.json"
|
||||
msgDissecting = "Dissecting:"
|
||||
msgAnalyzing = "Analyzing:"
|
||||
msgRepresenting = "Representing:"
|
||||
respSuffix = "_res.bin"
|
||||
expectDir = "expect"
|
||||
dissectDir = "dissect"
|
||||
analyzeDir = "analyze"
|
||||
representDir = "represent"
|
||||
testUpdate = "TEST_UPDATE"
|
||||
)
|
||||
|
||||
func TestRegister(t *testing.T) {
|
||||
dissector := NewDissector()
|
||||
extension := &api.Extension{}
|
||||
dissector.Register(extension)
|
||||
assert.Equal(t, "redis", extension.Protocol.Name)
|
||||
}
|
||||
|
||||
func TestMacros(t *testing.T) {
|
||||
expectedMacros := map[string]string{
|
||||
"redis": `proto.name == "redis"`,
|
||||
}
|
||||
dissector := NewDissector()
|
||||
macros := dissector.Macros()
|
||||
assert.Equal(t, expectedMacros, macros)
|
||||
}
|
||||
|
||||
func TestPing(t *testing.T) {
|
||||
dissector := NewDissector()
|
||||
dissector.Ping()
|
||||
}
|
||||
|
||||
func TestDissect(t *testing.T) {
|
||||
_, testUpdateEnabled := os.LookupEnv(testUpdate)
|
||||
|
||||
expectDirDissect := path.Join(expectDir, dissectDir)
|
||||
|
||||
if testUpdateEnabled {
|
||||
os.RemoveAll(expectDirDissect)
|
||||
err := os.MkdirAll(expectDirDissect, 0775)
|
||||
assert.Nil(t, err)
|
||||
}
|
||||
|
||||
dissector := NewDissector()
|
||||
paths, err := filepath.Glob(path.Join(binDir, patternBin))
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
options := &api.TrafficFilteringOptions{
|
||||
IgnoredUserAgents: []string{},
|
||||
}
|
||||
|
||||
for _, _path := range paths {
|
||||
basePath := _path[:len(_path)-8]
|
||||
|
||||
// Channel to verify the output
|
||||
itemChannel := make(chan *api.OutputChannelItem)
|
||||
var emitter api.Emitter = &api.Emitting{
|
||||
AppStats: &api.AppStats{},
|
||||
OutputChannel: itemChannel,
|
||||
}
|
||||
|
||||
var items []*api.OutputChannelItem
|
||||
stop := make(chan bool)
|
||||
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
case <-stop:
|
||||
return
|
||||
case item := <-itemChannel:
|
||||
items = append(items, item)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
// Stream level
|
||||
counterPair := &api.CounterPair{
|
||||
Request: 0,
|
||||
Response: 0,
|
||||
}
|
||||
superIdentifier := &api.SuperIdentifier{}
|
||||
|
||||
// Request
|
||||
pathClient := _path
|
||||
fmt.Printf("%s %s\n", msgDissecting, pathClient)
|
||||
fileClient, err := os.Open(pathClient)
|
||||
assert.Nil(t, err)
|
||||
|
||||
bufferClient := bufio.NewReader(fileClient)
|
||||
tcpIDClient := &api.TcpID{
|
||||
SrcIP: "1",
|
||||
DstIP: "2",
|
||||
SrcPort: "1",
|
||||
DstPort: "2",
|
||||
}
|
||||
reqResMatcher := dissector.NewResponseRequestMatcher()
|
||||
err = dissector.Dissect(bufferClient, true, tcpIDClient, counterPair, &api.SuperTimer{}, superIdentifier, emitter, options, reqResMatcher)
|
||||
if err != nil && reflect.TypeOf(err) != reflect.TypeOf(&ConnectError{}) && err != io.EOF && err != io.ErrUnexpectedEOF {
|
||||
log.Println(err)
|
||||
}
|
||||
|
||||
// Response
|
||||
pathServer := basePath + respSuffix
|
||||
fmt.Printf("%s %s\n", msgDissecting, pathServer)
|
||||
fileServer, err := os.Open(pathServer)
|
||||
assert.Nil(t, err)
|
||||
|
||||
bufferServer := bufio.NewReader(fileServer)
|
||||
tcpIDServer := &api.TcpID{
|
||||
SrcIP: "2",
|
||||
DstIP: "1",
|
||||
SrcPort: "2",
|
||||
DstPort: "1",
|
||||
}
|
||||
err = dissector.Dissect(bufferServer, false, tcpIDServer, counterPair, &api.SuperTimer{}, superIdentifier, emitter, options, reqResMatcher)
|
||||
if err != nil && reflect.TypeOf(err) != reflect.TypeOf(&ConnectError{}) && err != io.EOF && err != io.ErrUnexpectedEOF {
|
||||
log.Println(err)
|
||||
}
|
||||
|
||||
fileClient.Close()
|
||||
fileServer.Close()
|
||||
|
||||
pathExpect := path.Join(expectDirDissect, fmt.Sprintf("%s.json", basePath[4:]))
|
||||
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
|
||||
stop <- true
|
||||
|
||||
marshaled, err := json.Marshal(items)
|
||||
assert.Nil(t, err)
|
||||
|
||||
if testUpdateEnabled {
|
||||
if len(items) > 0 {
|
||||
err = os.WriteFile(pathExpect, marshaled, 0644)
|
||||
assert.Nil(t, err)
|
||||
}
|
||||
} else {
|
||||
if _, err := os.Stat(pathExpect); errors.Is(err, os.ErrNotExist) {
|
||||
assert.Len(t, items, 0)
|
||||
} else {
|
||||
expectedBytes, err := ioutil.ReadFile(pathExpect)
|
||||
assert.Nil(t, err)
|
||||
|
||||
assert.JSONEq(t, string(expectedBytes), string(marshaled))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestAnalyze(t *testing.T) {
|
||||
_, testUpdateEnabled := os.LookupEnv(testUpdate)
|
||||
|
||||
expectDirDissect := path.Join(expectDir, dissectDir)
|
||||
expectDirAnalyze := path.Join(expectDir, analyzeDir)
|
||||
|
||||
if testUpdateEnabled {
|
||||
os.RemoveAll(expectDirAnalyze)
|
||||
err := os.MkdirAll(expectDirAnalyze, 0775)
|
||||
assert.Nil(t, err)
|
||||
}
|
||||
|
||||
dissector := NewDissector()
|
||||
paths, err := filepath.Glob(path.Join(expectDirDissect, patternDissect))
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
for _, _path := range paths {
|
||||
fmt.Printf("%s %s\n", msgAnalyzing, _path)
|
||||
|
||||
bytes, err := ioutil.ReadFile(_path)
|
||||
assert.Nil(t, err)
|
||||
|
||||
var items []*api.OutputChannelItem
|
||||
err = json.Unmarshal(bytes, &items)
|
||||
assert.Nil(t, err)
|
||||
|
||||
var entries []*api.Entry
|
||||
for _, item := range items {
|
||||
entry := dissector.Analyze(item, "", "", "")
|
||||
entries = append(entries, entry)
|
||||
}
|
||||
|
||||
pathExpect := path.Join(expectDirAnalyze, filepath.Base(_path))
|
||||
|
||||
marshaled, err := json.Marshal(entries)
|
||||
assert.Nil(t, err)
|
||||
|
||||
if testUpdateEnabled {
|
||||
if len(entries) > 0 {
|
||||
err = os.WriteFile(pathExpect, marshaled, 0644)
|
||||
assert.Nil(t, err)
|
||||
}
|
||||
} else {
|
||||
if _, err := os.Stat(pathExpect); errors.Is(err, os.ErrNotExist) {
|
||||
assert.Len(t, items, 0)
|
||||
} else {
|
||||
expectedBytes, err := ioutil.ReadFile(pathExpect)
|
||||
assert.Nil(t, err)
|
||||
|
||||
assert.JSONEq(t, string(expectedBytes), string(marshaled))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestRepresent(t *testing.T) {
|
||||
_, testUpdateEnabled := os.LookupEnv(testUpdate)
|
||||
|
||||
expectDirAnalyze := path.Join(expectDir, analyzeDir)
|
||||
expectDirRepresent := path.Join(expectDir, representDir)
|
||||
|
||||
if testUpdateEnabled {
|
||||
os.RemoveAll(expectDirRepresent)
|
||||
err := os.MkdirAll(expectDirRepresent, 0775)
|
||||
assert.Nil(t, err)
|
||||
}
|
||||
|
||||
dissector := NewDissector()
|
||||
paths, err := filepath.Glob(path.Join(expectDirAnalyze, patternDissect))
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
for _, _path := range paths {
|
||||
fmt.Printf("%s %s\n", msgRepresenting, _path)
|
||||
|
||||
bytes, err := ioutil.ReadFile(_path)
|
||||
assert.Nil(t, err)
|
||||
|
||||
var entries []*api.Entry
|
||||
err = json.Unmarshal(bytes, &entries)
|
||||
assert.Nil(t, err)
|
||||
|
||||
var objects []string
|
||||
for _, entry := range entries {
|
||||
object, _, err := dissector.Represent(entry.Request, entry.Response)
|
||||
assert.Nil(t, err)
|
||||
objects = append(objects, string(object))
|
||||
}
|
||||
|
||||
pathExpect := path.Join(expectDirRepresent, filepath.Base(_path))
|
||||
|
||||
marshaled, err := json.Marshal(objects)
|
||||
assert.Nil(t, err)
|
||||
|
||||
if testUpdateEnabled {
|
||||
if len(objects) > 0 {
|
||||
err = os.WriteFile(pathExpect, marshaled, 0644)
|
||||
assert.Nil(t, err)
|
||||
}
|
||||
} else {
|
||||
if _, err := os.Stat(pathExpect); errors.Is(err, os.ErrNotExist) {
|
||||
assert.Len(t, objects, 0)
|
||||
} else {
|
||||
expectedBytes, err := ioutil.ReadFile(pathExpect)
|
||||
assert.Nil(t, err)
|
||||
|
||||
assert.JSONEq(t, string(expectedBytes), string(marshaled))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -19,6 +19,8 @@ func createResponseRequestMatcher() api.RequestResponseMatcher {
|
||||
func (matcher *requestResponseMatcher) GetMap() *sync.Map {
|
||||
return matcher.openMessagesMap
|
||||
}
|
||||
func (matcher *requestResponseMatcher) SetMaxTry(value int) {
|
||||
}
|
||||
|
||||
func (matcher *requestResponseMatcher) registerRequest(ident string, request *RedisPacket, captureTime time.Time) *api.OutputChannelItem {
|
||||
requestRedisMessage := api.GenericMessage{
|
||||
|
||||
@@ -4,6 +4,8 @@ go 1.17
|
||||
|
||||
require (
|
||||
github.com/bradleyfalzon/tlsx v0.0.0-20170624122154-28fd0e59bac4
|
||||
github.com/cilium/ebpf v0.8.0
|
||||
github.com/go-errors/errors v1.4.2
|
||||
github.com/google/gopacket v1.1.19
|
||||
github.com/up9inc/mizu/shared v0.0.0
|
||||
github.com/up9inc/mizu/tap/api v0.0.0
|
||||
|
||||
10
tap/go.sum
10
tap/go.sum
@@ -102,6 +102,8 @@ github.com/chai2010/gettext-go v0.0.0-20160711120539-c6fed771bfd5/go.mod h1:/iP1
|
||||
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
|
||||
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
|
||||
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
|
||||
github.com/cilium/ebpf v0.8.0 h1:2V6KSg3FRADVU2BMIRemZ0hV+9OM+aAHhZDjQyjJTAs=
|
||||
github.com/cilium/ebpf v0.8.0/go.mod h1:f5zLIM0FSNuAkSyLAN7X+Hy6yznlF1mNiWUMfxMtrgk=
|
||||
github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag=
|
||||
github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I=
|
||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||
@@ -161,6 +163,8 @@ github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYF
|
||||
github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
|
||||
github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k=
|
||||
github.com/form3tech-oss/jwt-go v3.2.3+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k=
|
||||
github.com/frankban/quicktest v1.14.0 h1:+cqqvzZV87b4adx/5ayVOaYZ2CrvM4ejQvUdBzPPUss=
|
||||
github.com/frankban/quicktest v1.14.0/go.mod h1:NeW+ay9A/U67EYXNFA1nPE8e/tnQv/09mUdL/ijj8og=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
|
||||
github.com/fsnotify/fsnotify v1.5.1/go.mod h1:T3375wBYaZdLLcVNkcVbzGHY7f1l/uK5T5Ai1i3InKU=
|
||||
@@ -169,6 +173,7 @@ github.com/fvbommel/sortorder v1.0.2/go.mod h1:uk88iVf1ovNn1iLfgUVU2F9o5eO30ui72
|
||||
github.com/getkin/kin-openapi v0.76.0/go.mod h1:660oXbgy5JFMKreazJaQTw7o+X00qeSyhcnluiMv+Xg=
|
||||
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
||||
github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q=
|
||||
github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA=
|
||||
github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og=
|
||||
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
|
||||
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
|
||||
@@ -366,6 +371,8 @@ github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg=
|
||||
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
|
||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||
github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
||||
github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0=
|
||||
github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk=
|
||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||
@@ -481,6 +488,8 @@ github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40T
|
||||
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
|
||||
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
|
||||
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||
github.com/rogpeppe/go-internal v1.6.1 h1:/FiVV8dS/e+YqF2JvO3yXRFbBLTIuSDkuC7aBOAvL+k=
|
||||
github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
|
||||
github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
|
||||
github.com/russross/blackfriday v1.6.0/go.mod h1:ti0ldHuxg49ri4ksnFxlkCfN+hvslNlmVHqNRXXJNAY=
|
||||
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
@@ -788,6 +797,7 @@ golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBc
|
||||
golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210831042530-f4d43177bf5e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210906170528-6f6e22806c34/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
|
||||
@@ -21,6 +21,7 @@ import (
|
||||
"github.com/up9inc/mizu/tap/api"
|
||||
"github.com/up9inc/mizu/tap/diagnose"
|
||||
"github.com/up9inc/mizu/tap/source"
|
||||
"github.com/up9inc/mizu/tap/tlstapper"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
@@ -53,6 +54,7 @@ var promisc = flag.Bool("promisc", true, "Set promiscuous mode")
|
||||
var staleTimeoutSeconds = flag.Int("staletimout", 120, "Max time in seconds to keep connections which don't transmit data")
|
||||
var pids = flag.String("pids", "", "A comma separated list of PIDs to capture their network namespaces")
|
||||
var servicemesh = flag.Bool("servicemesh", false, "Record decrypted traffic if the cluster is configured with a service mesh and with mtls")
|
||||
var tls = flag.Bool("tls", false, "Enable TLS tapper")
|
||||
|
||||
var memprofile = flag.String("memprofile", "", "Write memory profile")
|
||||
|
||||
@@ -95,6 +97,15 @@ func StartPassiveTapper(opts *TapOpts, outputItems chan *api.OutputChannelItem,
|
||||
tapTargets = opts.FilterAuthorities
|
||||
}
|
||||
|
||||
if *tls {
|
||||
for _, e := range extensions {
|
||||
if e.Protocol.Name == "http" {
|
||||
startTlsTapper(e, outputItems, options)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if GetMemoryProfilingEnabled() {
|
||||
diagnose.StartMemoryProfiler(os.Getenv(MemoryProfilingDumpPath), os.Getenv(MemoryProfilingTimeIntervalSeconds))
|
||||
}
|
||||
@@ -232,3 +243,35 @@ func startPassiveTapper(opts *TapOpts, outputItems chan *api.OutputChannelItem)
|
||||
diagnose.TapErrors.PrintSummary()
|
||||
logger.Log.Infof("AppStats: %v", diagnose.AppStats)
|
||||
}
|
||||
|
||||
func startTlsTapper(extension *api.Extension, outputItems chan *api.OutputChannelItem, options *api.TrafficFilteringOptions) {
|
||||
tls := tlstapper.TlsTapper{}
|
||||
tlsPerfBufferSize := os.Getpagesize() * 100
|
||||
|
||||
if err := tls.Init(tlsPerfBufferSize); err != nil {
|
||||
tlstapper.LogError(err)
|
||||
return
|
||||
}
|
||||
|
||||
// A quick way to instrument libssl.so without PID filtering - used for debuging and troubleshooting
|
||||
//
|
||||
if os.Getenv("MIZU_GLOBAL_SSL_LIBRARY") != "" {
|
||||
if err := tls.GlobalTap(os.Getenv("MIZU_GLOBAL_SSL_LIBRARY")); err != nil {
|
||||
tlstapper.LogError(err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if err := tlstapper.UpdateTapTargets(&tls, &tapTargets, *procfs); err != nil {
|
||||
tlstapper.LogError(err)
|
||||
return
|
||||
}
|
||||
|
||||
var emitter api.Emitter = &api.Emitting{
|
||||
AppStats: &diagnose.AppStats,
|
||||
OutputChannel: outputItems,
|
||||
}
|
||||
|
||||
poller := tlstapper.NewTlsPoller(&tls, extension)
|
||||
go poller.Poll(extension, emitter, options)
|
||||
}
|
||||
|
||||
5
tap/tlstapper/bpf-builder/Dockerfile
Normal file
5
tap/tlstapper/bpf-builder/Dockerfile
Normal file
@@ -0,0 +1,5 @@
|
||||
FROM alpine:3
|
||||
|
||||
RUN apk --no-cache update && apk --no-cache add clang llvm libbpf-dev go linux-headers
|
||||
|
||||
WORKDIR /mizu
|
||||
16
tap/tlstapper/bpf-builder/README.md
Normal file
16
tap/tlstapper/bpf-builder/README.md
Normal file
@@ -0,0 +1,16 @@
|
||||
|
||||
# Bpf builder
|
||||
|
||||
Currently we push the ebpf `*.o` files to source control, the motivation for it is to avoid the need for everyone to compile it in their PC.
|
||||
|
||||
This directory helps those who do want to build the .o files, it also serve as a documentation for the process of compiling the ebpf code.
|
||||
|
||||
## How to run ebpf-builder
|
||||
|
||||
From you shell, go to this directory and run `./build.sh`
|
||||
|
||||
Once the docker finished successfully, make sure to commit the four relevant files.
|
||||
> tlstapper_bpfeb.go
|
||||
> tlstapper_bpfel.go
|
||||
> tlstapper_bpfeb.o
|
||||
> tlstapper_bpfel.o
|
||||
17
tap/tlstapper/bpf-builder/build.sh
Executable file
17
tap/tlstapper/bpf-builder/build.sh
Executable file
@@ -0,0 +1,17 @@
|
||||
#!/bin/bash
|
||||
|
||||
MIZU_HOME=$(realpath ../../../)
|
||||
|
||||
docker build -t mizu-ebpf-builder . || exit 1
|
||||
|
||||
docker run --rm \
|
||||
--name mizu-ebpf-builder \
|
||||
-v $MIZU_HOME:/mizu \
|
||||
-it mizu-ebpf-builder \
|
||||
sh -c "
|
||||
go generate tap/tlstapper/tls_tapper.go
|
||||
chown $(id -u):$(id -g) tap/tlstapper/tlstapper_bpfeb.go
|
||||
chown $(id -u):$(id -g) tap/tlstapper/tlstapper_bpfeb.o
|
||||
chown $(id -u):$(id -g) tap/tlstapper/tlstapper_bpfel.go
|
||||
chown $(id -u):$(id -g) tap/tlstapper/tlstapper_bpfel.o
|
||||
" || exit 1
|
||||
215
tap/tlstapper/bpf/fd_to_address_tracepoints.c
Normal file
215
tap/tlstapper/bpf/fd_to_address_tracepoints.c
Normal file
@@ -0,0 +1,215 @@
|
||||
/*
|
||||
Note: This file is licenced differently from the rest of the project
|
||||
SPDX-License-Identifier: GPL-2.0
|
||||
Copyright (C) UP9 Inc.
|
||||
*/
|
||||
|
||||
#include "include/headers.h"
|
||||
#include "include/util.h"
|
||||
#include "include/maps.h"
|
||||
#include "include/pids.h"
|
||||
|
||||
struct accept_info {
|
||||
__u64* sockaddr;
|
||||
__u32* addrlen;
|
||||
};
|
||||
|
||||
BPF_HASH(accept_syscall_context, __u64, struct accept_info);
|
||||
|
||||
struct sys_enter_accept4_ctx {
|
||||
__u64 __unused_syscall_header;
|
||||
__u32 __unused_syscall_nr;
|
||||
|
||||
__u64 fd;
|
||||
__u64* sockaddr;
|
||||
__u32* addrlen;
|
||||
};
|
||||
|
||||
SEC("tracepoint/syscalls/sys_enter_accept4")
|
||||
void sys_enter_accept4(struct sys_enter_accept4_ctx *ctx) {
|
||||
__u64 id = bpf_get_current_pid_tgid();
|
||||
|
||||
if (!should_tap(id >> 32)) {
|
||||
return;
|
||||
}
|
||||
|
||||
struct accept_info info = {};
|
||||
|
||||
info.sockaddr = ctx->sockaddr;
|
||||
info.addrlen = ctx->addrlen;
|
||||
|
||||
long err = bpf_map_update_elem(&accept_syscall_context, &id, &info, BPF_ANY);
|
||||
|
||||
if (err != 0) {
|
||||
char msg[] = "Error putting accept info (id: %ld) (err: %ld)";
|
||||
bpf_trace_printk(msg, sizeof(msg), id, err);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
struct sys_exit_accept4_ctx {
|
||||
__u64 __unused_syscall_header;
|
||||
__u32 __unused_syscall_nr;
|
||||
|
||||
__u64 ret;
|
||||
};
|
||||
|
||||
SEC("tracepoint/syscalls/sys_exit_accept4")
|
||||
void sys_exit_accept4(struct sys_exit_accept4_ctx *ctx) {
|
||||
__u64 id = bpf_get_current_pid_tgid();
|
||||
|
||||
if (!should_tap(id >> 32)) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (ctx->ret < 0) {
|
||||
bpf_map_delete_elem(&accept_syscall_context, &id);
|
||||
return;
|
||||
}
|
||||
|
||||
struct accept_info *infoPtr = bpf_map_lookup_elem(&accept_syscall_context, &id);
|
||||
|
||||
if (infoPtr == 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
struct accept_info info;
|
||||
long err = bpf_probe_read(&info, sizeof(struct accept_info), infoPtr);
|
||||
|
||||
bpf_map_delete_elem(&accept_syscall_context, &id);
|
||||
|
||||
if (err != 0) {
|
||||
char msg[] = "Error reading accept info from accept syscall (id: %ld) (err: %ld)";
|
||||
bpf_trace_printk(msg, sizeof(msg), id, err);
|
||||
return;
|
||||
}
|
||||
|
||||
__u32 addrlen;
|
||||
bpf_probe_read(&addrlen, sizeof(__u32), info.addrlen);
|
||||
|
||||
if (addrlen != 16) {
|
||||
// Currently only ipv4 is supported linux-src/include/linux/inet.h
|
||||
return;
|
||||
}
|
||||
|
||||
struct fd_info fdinfo = {
|
||||
.flags = 0
|
||||
};
|
||||
|
||||
bpf_probe_read(fdinfo.ipv4_addr, sizeof(fdinfo.ipv4_addr), info.sockaddr);
|
||||
|
||||
__u32 pid = id >> 32;
|
||||
__u32 fd = (__u32) ctx->ret;
|
||||
|
||||
__u64 key = (__u64) pid << 32 | fd;
|
||||
err = bpf_map_update_elem(&file_descriptor_to_ipv4, &key, &fdinfo, BPF_ANY);
|
||||
|
||||
if (err != 0) {
|
||||
char msg[] = "Error putting fd to address mapping from accept (key: %ld) (err: %ld)";
|
||||
bpf_trace_printk(msg, sizeof(msg), key, err);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
struct connect_info {
|
||||
__u64 fd;
|
||||
__u64* sockaddr;
|
||||
__u32 addrlen;
|
||||
};
|
||||
|
||||
BPF_HASH(connect_syscall_info, __u64, struct connect_info);
|
||||
|
||||
struct sys_enter_connect_ctx {
|
||||
__u64 __unused_syscall_header;
|
||||
__u32 __unused_syscall_nr;
|
||||
|
||||
__u64 fd;
|
||||
__u64* sockaddr;
|
||||
__u32 addrlen;
|
||||
};
|
||||
|
||||
SEC("tracepoint/syscalls/sys_enter_connect")
|
||||
void sys_enter_connect(struct sys_enter_connect_ctx *ctx) {
|
||||
__u64 id = bpf_get_current_pid_tgid();
|
||||
|
||||
if (!should_tap(id >> 32)) {
|
||||
return;
|
||||
}
|
||||
|
||||
struct connect_info info = {};
|
||||
|
||||
info.sockaddr = ctx->sockaddr;
|
||||
info.addrlen = ctx->addrlen;
|
||||
info.fd = ctx->fd;
|
||||
|
||||
long err = bpf_map_update_elem(&connect_syscall_info, &id, &info, BPF_ANY);
|
||||
|
||||
if (err != 0) {
|
||||
char msg[] = "Error putting connect info (id: %ld) (err: %ld)";
|
||||
bpf_trace_printk(msg, sizeof(msg), id, err);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
struct sys_exit_connect_ctx {
|
||||
__u64 __unused_syscall_header;
|
||||
__u32 __unused_syscall_nr;
|
||||
|
||||
__u64 ret;
|
||||
};
|
||||
|
||||
SEC("tracepoint/syscalls/sys_exit_connect")
|
||||
void sys_exit_connect(struct sys_exit_connect_ctx *ctx) {
|
||||
__u64 id = bpf_get_current_pid_tgid();
|
||||
|
||||
if (!should_tap(id >> 32)) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Commented because of async connect which set errno to EINPROGRESS
|
||||
//
|
||||
// if (ctx->ret != 0) {
|
||||
// bpf_map_delete_elem(&accept_syscall_context, &id);
|
||||
// return;
|
||||
// }
|
||||
|
||||
struct connect_info *infoPtr = bpf_map_lookup_elem(&connect_syscall_info, &id);
|
||||
|
||||
if (infoPtr == 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
struct connect_info info;
|
||||
long err = bpf_probe_read(&info, sizeof(struct connect_info), infoPtr);
|
||||
|
||||
bpf_map_delete_elem(&connect_syscall_info, &id);
|
||||
|
||||
if (err != 0) {
|
||||
char msg[] = "Error reading connect info from connect syscall (id: %ld) (err: %ld)";
|
||||
bpf_trace_printk(msg, sizeof(msg), id, err);
|
||||
return;
|
||||
}
|
||||
|
||||
if (info.addrlen != 16) {
|
||||
// Currently only ipv4 is supported linux-src/include/linux/inet.h
|
||||
return;
|
||||
}
|
||||
|
||||
struct fd_info fdinfo = {
|
||||
.flags = FLAGS_IS_CLIENT_BIT
|
||||
};
|
||||
|
||||
bpf_probe_read(fdinfo.ipv4_addr, sizeof(fdinfo.ipv4_addr), info.sockaddr);
|
||||
|
||||
__u32 pid = id >> 32;
|
||||
__u32 fd = (__u32) info.fd;
|
||||
|
||||
__u64 key = (__u64) pid << 32 | fd;
|
||||
err = bpf_map_update_elem(&file_descriptor_to_ipv4, &key, &fdinfo, BPF_ANY);
|
||||
|
||||
if (err != 0) {
|
||||
char msg[] = "Error putting fd to address mapping from connect (key: %ld) (err: %ld)";
|
||||
bpf_trace_printk(msg, sizeof(msg), key, err);
|
||||
return;
|
||||
}
|
||||
}
|
||||
96
tap/tlstapper/bpf/fd_tracepoints.c
Normal file
96
tap/tlstapper/bpf/fd_tracepoints.c
Normal file
@@ -0,0 +1,96 @@
|
||||
/*
|
||||
Note: This file is licenced differently from the rest of the project
|
||||
SPDX-License-Identifier: GPL-2.0
|
||||
Copyright (C) UP9 Inc.
|
||||
*/
|
||||
|
||||
#include "include/headers.h"
|
||||
#include "include/util.h"
|
||||
#include "include/maps.h"
|
||||
#include "include/pids.h"
|
||||
|
||||
struct sys_enter_read_ctx {
|
||||
__u64 __unused_syscall_header;
|
||||
__u32 __unused_syscall_nr;
|
||||
|
||||
__u64 fd;
|
||||
__u64* buf;
|
||||
__u64 count;
|
||||
};
|
||||
|
||||
SEC("tracepoint/syscalls/sys_enter_read")
|
||||
void sys_enter_read(struct sys_enter_read_ctx *ctx) {
|
||||
__u64 id = bpf_get_current_pid_tgid();
|
||||
|
||||
if (!should_tap(id >> 32)) {
|
||||
return;
|
||||
}
|
||||
|
||||
struct ssl_info *infoPtr = bpf_map_lookup_elem(&ssl_read_context, &id);
|
||||
|
||||
if (infoPtr == 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
struct ssl_info info;
|
||||
long err = bpf_probe_read(&info, sizeof(struct ssl_info), infoPtr);
|
||||
|
||||
if (err != 0) {
|
||||
char msg[] = "Error reading read info from read syscall (id: %ld) (err: %ld)";
|
||||
bpf_trace_printk(msg, sizeof(msg), id, err);
|
||||
return;
|
||||
}
|
||||
|
||||
info.fd = ctx->fd;
|
||||
|
||||
err = bpf_map_update_elem(&ssl_read_context, &id, &info, BPF_ANY);
|
||||
|
||||
if (err != 0) {
|
||||
char msg[] = "Error putting file descriptor from read syscall (id: %ld) (err: %ld)";
|
||||
bpf_trace_printk(msg, sizeof(msg), id, err);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
struct sys_enter_write_ctx {
|
||||
__u64 __unused_syscall_header;
|
||||
__u32 __unused_syscall_nr;
|
||||
|
||||
__u64 fd;
|
||||
__u64* buf;
|
||||
__u64 count;
|
||||
};
|
||||
|
||||
SEC("tracepoint/syscalls/sys_enter_write")
|
||||
void sys_enter_write(struct sys_enter_write_ctx *ctx) {
|
||||
__u64 id = bpf_get_current_pid_tgid();
|
||||
|
||||
if (!should_tap(id >> 32)) {
|
||||
return;
|
||||
}
|
||||
|
||||
struct ssl_info *infoPtr = bpf_map_lookup_elem(&ssl_write_context, &id);
|
||||
|
||||
if (infoPtr == 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
struct ssl_info info;
|
||||
long err = bpf_probe_read(&info, sizeof(struct ssl_info), infoPtr);
|
||||
|
||||
if (err != 0) {
|
||||
char msg[] = "Error reading write context from write syscall (id: %ld) (err: %ld)";
|
||||
bpf_trace_printk(msg, sizeof(msg), id, err);
|
||||
return;
|
||||
}
|
||||
|
||||
info.fd = ctx->fd;
|
||||
|
||||
err = bpf_map_update_elem(&ssl_write_context, &id, &info, BPF_ANY);
|
||||
|
||||
if (err != 0) {
|
||||
char msg[] = "Error putting file descriptor from write syscall (id: %ld) (err: %ld)";
|
||||
bpf_trace_printk(msg, sizeof(msg), id, err);
|
||||
return;
|
||||
}
|
||||
}
|
||||
16
tap/tlstapper/bpf/include/headers.h
Normal file
16
tap/tlstapper/bpf/include/headers.h
Normal file
@@ -0,0 +1,16 @@
|
||||
/*
|
||||
Note: This file is licenced differently from the rest of the project
|
||||
SPDX-License-Identifier: GPL-2.0
|
||||
Copyright (C) UP9 Inc.
|
||||
*/
|
||||
|
||||
#ifndef __HEADERS__
|
||||
#define __HEADERS__
|
||||
|
||||
#include <stddef.h>
|
||||
#include <linux/bpf.h>
|
||||
#include <linux/ptrace.h>
|
||||
#include <bpf/bpf_helpers.h>
|
||||
#include "bpf/bpf_tracing.h"
|
||||
|
||||
#endif /* __HEADERS__ */
|
||||
63
tap/tlstapper/bpf/include/maps.h
Normal file
63
tap/tlstapper/bpf/include/maps.h
Normal file
@@ -0,0 +1,63 @@
|
||||
/*
|
||||
Note: This file is licenced differently from the rest of the project
|
||||
SPDX-License-Identifier: GPL-2.0
|
||||
Copyright (C) UP9 Inc.
|
||||
*/
|
||||
|
||||
#ifndef __MAPS__
|
||||
#define __MAPS__
|
||||
|
||||
#define FLAGS_IS_CLIENT_BIT (1 << 0)
|
||||
#define FLAGS_IS_READ_BIT (1 << 1)
|
||||
|
||||
// The same struct can be found in Chunk.go
|
||||
//
|
||||
// Be careful when editing, alignment and padding should be exactly the same in go/c.
|
||||
//
|
||||
struct tlsChunk {
|
||||
__u32 pid;
|
||||
__u32 tgid;
|
||||
__u32 len;
|
||||
__u32 recorded;
|
||||
__u32 fd;
|
||||
__u32 flags;
|
||||
__u8 address[16];
|
||||
__u8 data[4096]; // Must be N^2
|
||||
};
|
||||
|
||||
struct ssl_info {
|
||||
void* buffer;
|
||||
__u32 fd;
|
||||
|
||||
// for ssl_write and ssl_read must be zero
|
||||
// for ssl_write_ex and ssl_read_ex save the *written/*readbytes pointer.
|
||||
//
|
||||
size_t *count_ptr;
|
||||
};
|
||||
|
||||
struct fd_info {
|
||||
__u8 ipv4_addr[16]; // struct sockaddr (linux-src/include/linux/socket.h)
|
||||
__u8 flags;
|
||||
};
|
||||
|
||||
#define BPF_MAP(_name, _type, _key_type, _value_type, _max_entries) \
|
||||
struct bpf_map_def SEC("maps") _name = { \
|
||||
.type = _type, \
|
||||
.key_size = sizeof(_key_type), \
|
||||
.value_size = sizeof(_value_type), \
|
||||
.max_entries = _max_entries, \
|
||||
};
|
||||
|
||||
#define BPF_HASH(_name, _key_type, _value_type) \
|
||||
BPF_MAP(_name, BPF_MAP_TYPE_HASH, _key_type, _value_type, 4096)
|
||||
|
||||
#define BPF_PERF_OUTPUT(_name) \
|
||||
BPF_MAP(_name, BPF_MAP_TYPE_PERF_EVENT_ARRAY, int, __u32, 1024)
|
||||
|
||||
BPF_HASH(pids_map, __u32, __u32);
|
||||
BPF_HASH(ssl_write_context, __u64, struct ssl_info);
|
||||
BPF_HASH(ssl_read_context, __u64, struct ssl_info);
|
||||
BPF_HASH(file_descriptor_to_ipv4, __u64, struct fd_info);
|
||||
BPF_PERF_OUTPUT(chunks_buffer);
|
||||
|
||||
#endif /* __MAPS__ */
|
||||
23
tap/tlstapper/bpf/include/pids.h
Normal file
23
tap/tlstapper/bpf/include/pids.h
Normal file
@@ -0,0 +1,23 @@
|
||||
/*
|
||||
Note: This file is licenced differently from the rest of the project
|
||||
SPDX-License-Identifier: GPL-2.0
|
||||
Copyright (C) UP9 Inc.
|
||||
*/
|
||||
|
||||
#ifndef __PIDS__
|
||||
#define __PIDS__
|
||||
|
||||
int should_tap(__u32 pid) {
|
||||
__u32* shouldTap = bpf_map_lookup_elem(&pids_map, &pid);
|
||||
|
||||
if (shouldTap != NULL && *shouldTap == 1) {
|
||||
return 1;
|
||||
}
|
||||
|
||||
__u32 globalPid = 0;
|
||||
__u32* shouldTapGlobally = bpf_map_lookup_elem(&pids_map, &globalPid);
|
||||
|
||||
return shouldTapGlobally != NULL && *shouldTapGlobally == 1;
|
||||
}
|
||||
|
||||
#endif /* __PIDS__ */
|
||||
12
tap/tlstapper/bpf/include/util.h
Normal file
12
tap/tlstapper/bpf/include/util.h
Normal file
@@ -0,0 +1,12 @@
|
||||
/*
|
||||
Note: This file is licenced differently from the rest of the project
|
||||
SPDX-License-Identifier: GPL-2.0
|
||||
Copyright (C) UP9 Inc.
|
||||
*/
|
||||
|
||||
#ifndef __UTIL__
|
||||
#define __UTIL__
|
||||
|
||||
#define MIN(a,b) (((a)<(b))?(a):(b))
|
||||
|
||||
#endif /* __UTIL__ */
|
||||
198
tap/tlstapper/bpf/openssl_uprobes.c
Normal file
198
tap/tlstapper/bpf/openssl_uprobes.c
Normal file
@@ -0,0 +1,198 @@
|
||||
/*
|
||||
Note: This file is licenced differently from the rest of the project
|
||||
SPDX-License-Identifier: GPL-2.0
|
||||
Copyright (C) UP9 Inc.
|
||||
*/
|
||||
|
||||
#include "include/headers.h"
|
||||
#include "include/util.h"
|
||||
#include "include/maps.h"
|
||||
#include "include/pids.h"
|
||||
|
||||
// Heap-like area for eBPF programs - stack size limited to 512 bytes, we must use maps for bigger (chunk) objects.
|
||||
//
|
||||
struct {
|
||||
__uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
|
||||
__uint(max_entries, 1);
|
||||
__type(key, int);
|
||||
__type(value, struct tlsChunk);
|
||||
} heap SEC(".maps");
|
||||
|
||||
static __always_inline int ssl_uprobe(void* ssl, void* buffer, int num, struct bpf_map_def* map_fd, size_t *count_ptr) {
|
||||
__u64 id = bpf_get_current_pid_tgid();
|
||||
|
||||
if (!should_tap(id >> 32)) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct ssl_info info = {};
|
||||
|
||||
info.fd = -1;
|
||||
info.count_ptr = count_ptr;
|
||||
info.buffer = buffer;
|
||||
|
||||
long err = bpf_map_update_elem(map_fd, &id, &info, BPF_ANY);
|
||||
|
||||
if (err != 0) {
|
||||
char msg[] = "Error putting ssl context (id: %ld) (err: %ld)";
|
||||
bpf_trace_printk(msg, sizeof(msg), id, err);
|
||||
return 0;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static __always_inline int ssl_uretprobe(struct pt_regs *ctx, struct bpf_map_def* map_fd, __u32 flags) {
|
||||
__u64 id = bpf_get_current_pid_tgid();
|
||||
|
||||
if (!should_tap(id >> 32)) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct ssl_info *infoPtr = bpf_map_lookup_elem(map_fd, &id);
|
||||
|
||||
if (infoPtr == 0) {
|
||||
char msg[] = "Error getting ssl context info (id: %ld)";
|
||||
bpf_trace_printk(msg, sizeof(msg), id);
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct ssl_info info;
|
||||
long err = bpf_probe_read(&info, sizeof(struct ssl_info), infoPtr);
|
||||
|
||||
bpf_map_delete_elem(map_fd, &id);
|
||||
|
||||
if (err != 0) {
|
||||
char msg[] = "Error reading ssl context (id: %ld) (err: %ld)";
|
||||
bpf_trace_printk(msg, sizeof(msg), id, err);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (info.fd == -1) {
|
||||
char msg[] = "File descriptor is missing from ssl info (id: %ld)";
|
||||
bpf_trace_printk(msg, sizeof(msg), id);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int countBytes = PT_REGS_RC(ctx);
|
||||
|
||||
if (info.count_ptr != 0) {
|
||||
// ssl_read_ex and ssl_write_ex return 1 for success
|
||||
//
|
||||
if (countBytes != 1) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
size_t tempCount;
|
||||
long err = bpf_probe_read(&tempCount, sizeof(size_t), (void*) info.count_ptr);
|
||||
|
||||
if (err != 0) {
|
||||
char msg[] = "Error reading bytes count of _ex (id: %ld) (err: %ld)";
|
||||
bpf_trace_printk(msg, sizeof(msg), id, err);
|
||||
return 0;
|
||||
}
|
||||
|
||||
countBytes = tempCount;
|
||||
}
|
||||
|
||||
if (countBytes <= 0) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct tlsChunk* c;
|
||||
int zero = 0;
|
||||
|
||||
// If other thread, running on the same CPU get to this point at the same time like us
|
||||
// the data will be corrupted - protection may be added in the future
|
||||
//
|
||||
c = bpf_map_lookup_elem(&heap, &zero);
|
||||
|
||||
if (!c) {
|
||||
char msg[] = "Unable to allocate chunk (id: %ld)";
|
||||
bpf_trace_printk(msg, sizeof(msg), id);
|
||||
return 0;
|
||||
}
|
||||
|
||||
size_t recorded = MIN(countBytes, sizeof(c->data));
|
||||
|
||||
c->flags = flags;
|
||||
c->pid = id >> 32;
|
||||
c->tgid = id;
|
||||
c->len = countBytes;
|
||||
c->recorded = recorded;
|
||||
c->fd = info.fd;
|
||||
|
||||
// This ugly trick is for the ebpf verifier happiness
|
||||
//
|
||||
if (recorded == sizeof(c->data)) {
|
||||
err = bpf_probe_read(c->data, sizeof(c->data), info.buffer);
|
||||
} else {
|
||||
recorded &= sizeof(c->data) - 1; // Buffer must be N^2
|
||||
err = bpf_probe_read(c->data, recorded, info.buffer);
|
||||
}
|
||||
|
||||
if (err != 0) {
|
||||
char msg[] = "Error reading from ssl buffer %ld - %ld";
|
||||
bpf_trace_printk(msg, sizeof(msg), id, err);
|
||||
return 0;
|
||||
}
|
||||
|
||||
__u32 pid = id >> 32;
|
||||
__u32 fd = info.fd;
|
||||
__u64 key = (__u64) pid << 32 | fd;
|
||||
|
||||
struct fd_info *fdinfo = bpf_map_lookup_elem(&file_descriptor_to_ipv4, &key);
|
||||
|
||||
if (fdinfo != 0) {
|
||||
err = bpf_probe_read(c->address, sizeof(c->address), fdinfo->ipv4_addr);
|
||||
c->flags |= (fdinfo->flags & FLAGS_IS_CLIENT_BIT);
|
||||
|
||||
if (err != 0) {
|
||||
char msg[] = "Error reading from fd address %ld - %ld";
|
||||
bpf_trace_printk(msg, sizeof(msg), id, err);
|
||||
}
|
||||
}
|
||||
|
||||
bpf_perf_event_output(ctx, &chunks_buffer, BPF_F_CURRENT_CPU, c, sizeof(struct tlsChunk));
|
||||
return 0;
|
||||
}
|
||||
|
||||
SEC("uprobe/ssl_write")
|
||||
int BPF_KPROBE(ssl_write, void* ssl, void* buffer, int num) {
|
||||
return ssl_uprobe(ssl, buffer, num, &ssl_write_context, 0);
|
||||
}
|
||||
|
||||
SEC("uretprobe/ssl_write")
|
||||
int BPF_KPROBE(ssl_ret_write) {
|
||||
return ssl_uretprobe(ctx, &ssl_write_context, 0);
|
||||
}
|
||||
|
||||
SEC("uprobe/ssl_read")
|
||||
int BPF_KPROBE(ssl_read, void* ssl, void* buffer, int num) {
|
||||
return ssl_uprobe(ssl, buffer, num, &ssl_read_context, 0);
|
||||
}
|
||||
|
||||
SEC("uretprobe/ssl_read")
|
||||
int BPF_KPROBE(ssl_ret_read) {
|
||||
return ssl_uretprobe(ctx, &ssl_read_context, FLAGS_IS_READ_BIT);
|
||||
}
|
||||
|
||||
SEC("uprobe/ssl_write_ex")
|
||||
int BPF_KPROBE(ssl_write_ex, void* ssl, void* buffer, size_t num, size_t *written) {
|
||||
return ssl_uprobe(ssl, buffer, num, &ssl_write_context, written);
|
||||
}
|
||||
|
||||
SEC("uretprobe/ssl_write_ex")
|
||||
int BPF_KPROBE(ssl_ret_write_ex) {
|
||||
return ssl_uretprobe(ctx, &ssl_write_context, 0);
|
||||
}
|
||||
|
||||
SEC("uprobe/ssl_read_ex")
|
||||
int BPF_KPROBE(ssl_read_ex, void* ssl, void* buffer, size_t num, size_t *readbytes) {
|
||||
return ssl_uprobe(ssl, buffer, num, &ssl_read_context, readbytes);
|
||||
}
|
||||
|
||||
SEC("uretprobe/ssl_read_ex")
|
||||
int BPF_KPROBE(ssl_ret_read_ex) {
|
||||
return ssl_uretprobe(ctx, &ssl_read_context, FLAGS_IS_READ_BIT);
|
||||
}
|
||||
18
tap/tlstapper/bpf/tls_tapper.c
Normal file
18
tap/tlstapper/bpf/tls_tapper.c
Normal file
@@ -0,0 +1,18 @@
|
||||
/*
|
||||
Note: This file is licenced differently from the rest of the project
|
||||
SPDX-License-Identifier: GPL-2.0
|
||||
Copyright (C) UP9 Inc.
|
||||
*/
|
||||
|
||||
#include "include/headers.h"
|
||||
#include "include/util.h"
|
||||
#include "include/maps.h"
|
||||
#include "include/pids.h"
|
||||
|
||||
// To avoid multiple .o files
|
||||
//
|
||||
#include "openssl_uprobes.c"
|
||||
#include "fd_tracepoints.c"
|
||||
#include "fd_to_address_tracepoints.c"
|
||||
|
||||
char _license[] SEC("license") = "GPL";
|
||||
70
tap/tlstapper/chunk.go
Normal file
70
tap/tlstapper/chunk.go
Normal file
@@ -0,0 +1,70 @@
|
||||
package tlstapper
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"net"
|
||||
|
||||
"github.com/go-errors/errors"
|
||||
)
|
||||
|
||||
const FLAGS_IS_CLIENT_BIT int32 = (1 << 0)
|
||||
const FLAGS_IS_READ_BIT int32 = (1 << 1)
|
||||
|
||||
// The same struct can be found in maps.h
|
||||
//
|
||||
// Be careful when editing, alignment and padding should be exactly the same in go/c.
|
||||
//
|
||||
type tlsChunk struct {
|
||||
Pid int32
|
||||
Tgid int32
|
||||
Len int32
|
||||
Recorded int32
|
||||
Fd int32
|
||||
Flags int32
|
||||
Address [16]byte
|
||||
Data [4096]byte
|
||||
}
|
||||
|
||||
func (c *tlsChunk) getAddress() (net.IP, uint16, error) {
|
||||
address := bytes.NewReader(c.Address[:])
|
||||
var family uint16
|
||||
var port uint16
|
||||
var ip32 uint32
|
||||
|
||||
if err := binary.Read(address, binary.BigEndian, &family); err != nil {
|
||||
return nil, 0, errors.Wrap(err, 0)
|
||||
}
|
||||
|
||||
if err := binary.Read(address, binary.BigEndian, &port); err != nil {
|
||||
return nil, 0, errors.Wrap(err, 0)
|
||||
}
|
||||
|
||||
if err := binary.Read(address, binary.BigEndian, &ip32); err != nil {
|
||||
return nil, 0, errors.Wrap(err, 0)
|
||||
}
|
||||
|
||||
ip := net.IP{uint8(ip32 >> 24), uint8(ip32 >> 16), uint8(ip32 >> 8), uint8(ip32)}
|
||||
|
||||
return ip, port, nil
|
||||
}
|
||||
|
||||
func (c *tlsChunk) isClient() bool {
|
||||
return c.Flags&FLAGS_IS_CLIENT_BIT != 0
|
||||
}
|
||||
|
||||
func (c *tlsChunk) isServer() bool {
|
||||
return !c.isClient()
|
||||
}
|
||||
|
||||
func (c *tlsChunk) isRead() bool {
|
||||
return c.Flags&FLAGS_IS_READ_BIT != 0
|
||||
}
|
||||
|
||||
func (c *tlsChunk) isWrite() bool {
|
||||
return !c.isRead()
|
||||
}
|
||||
|
||||
func (c *tlsChunk) getRecordedData() []byte {
|
||||
return c.Data[:c.Recorded]
|
||||
}
|
||||
63
tap/tlstapper/ssllib_finder.go
Normal file
63
tap/tlstapper/ssllib_finder.go
Normal file
@@ -0,0 +1,63 @@
|
||||
package tlstapper
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/go-errors/errors"
|
||||
"github.com/up9inc/mizu/shared/logger"
|
||||
)
|
||||
|
||||
func findSsllib(procfs string, pid uint32) (string, error) {
|
||||
binary, err := os.Readlink(fmt.Sprintf("%s/%d/exe", procfs, pid))
|
||||
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, 0)
|
||||
}
|
||||
|
||||
logger.Log.Debugf("Binary file for %v = %v", pid, binary)
|
||||
|
||||
if strings.HasSuffix(binary, "/node") {
|
||||
return findLibraryByPid(procfs, pid, binary)
|
||||
} else {
|
||||
return findLibraryByPid(procfs, pid, "libssl.so")
|
||||
}
|
||||
}
|
||||
|
||||
func findLibraryByPid(procfs string, pid uint32, libraryName string) (string, error) {
|
||||
file, err := os.Open(fmt.Sprintf("%v/%v/maps", procfs, pid))
|
||||
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
defer file.Close()
|
||||
scanner := bufio.NewScanner(file)
|
||||
scanner.Split(bufio.ScanLines)
|
||||
|
||||
for scanner.Scan() {
|
||||
parts := strings.Fields(scanner.Text())
|
||||
|
||||
if len(parts) <= 5 {
|
||||
continue
|
||||
}
|
||||
|
||||
filepath := parts[5]
|
||||
|
||||
if !strings.Contains(filepath, libraryName) {
|
||||
continue
|
||||
}
|
||||
|
||||
fullpath := fmt.Sprintf("%v/%v/root/%v", procfs, pid, filepath)
|
||||
|
||||
if _, err := os.Stat(fullpath); os.IsNotExist(err) {
|
||||
continue
|
||||
}
|
||||
|
||||
return fullpath, nil
|
||||
}
|
||||
|
||||
return "", errors.Errorf("%s not found for PID %d", libraryName, pid)
|
||||
}
|
||||
153
tap/tlstapper/ssllib_hooks.go
Normal file
153
tap/tlstapper/ssllib_hooks.go
Normal file
@@ -0,0 +1,153 @@
|
||||
package tlstapper
|
||||
|
||||
import (
|
||||
"github.com/cilium/ebpf/link"
|
||||
"github.com/go-errors/errors"
|
||||
)
|
||||
|
||||
type sslHooks struct {
|
||||
sslWriteProbe link.Link
|
||||
sslWriteRetProbe link.Link
|
||||
sslReadProbe link.Link
|
||||
sslReadRetProbe link.Link
|
||||
sslWriteExProbe link.Link
|
||||
sslWriteExRetProbe link.Link
|
||||
sslReadExProbe link.Link
|
||||
sslReadExRetProbe link.Link
|
||||
}
|
||||
|
||||
func (s *sslHooks) installUprobes(bpfObjects *tlsTapperObjects, sslLibraryPath string) error {
|
||||
sslLibrary, err := link.OpenExecutable(sslLibraryPath)
|
||||
|
||||
if err != nil {
|
||||
return errors.Wrap(err, 0)
|
||||
}
|
||||
|
||||
sslOffsets, err := getSslOffsets(sslLibraryPath)
|
||||
|
||||
if err != nil {
|
||||
return errors.Wrap(err, 0)
|
||||
}
|
||||
|
||||
return s.installSslHooks(bpfObjects, sslLibrary, sslOffsets)
|
||||
}
|
||||
|
||||
func (s *sslHooks) installSslHooks(bpfObjects *tlsTapperObjects, sslLibrary *link.Executable, offsets sslOffsets) error {
|
||||
var err error
|
||||
|
||||
s.sslWriteProbe, err = sslLibrary.Uprobe("SSL_write", bpfObjects.SslWrite, &link.UprobeOptions{
|
||||
Offset: offsets.SslWriteOffset,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return errors.Wrap(err, 0)
|
||||
}
|
||||
|
||||
s.sslWriteRetProbe, err = sslLibrary.Uretprobe("SSL_write", bpfObjects.SslRetWrite, &link.UprobeOptions{
|
||||
Offset: offsets.SslWriteOffset,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return errors.Wrap(err, 0)
|
||||
}
|
||||
|
||||
s.sslReadProbe, err = sslLibrary.Uprobe("SSL_read", bpfObjects.SslRead, &link.UprobeOptions{
|
||||
Offset: offsets.SslReadOffset,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return errors.Wrap(err, 0)
|
||||
}
|
||||
|
||||
s.sslReadRetProbe, err = sslLibrary.Uretprobe("SSL_read", bpfObjects.SslRetRead, &link.UprobeOptions{
|
||||
Offset: offsets.SslReadOffset,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return errors.Wrap(err, 0)
|
||||
}
|
||||
|
||||
if offsets.SslWriteExOffset != 0 {
|
||||
s.sslWriteExProbe, err = sslLibrary.Uprobe("SSL_write_ex", bpfObjects.SslWriteEx, &link.UprobeOptions{
|
||||
Offset: offsets.SslWriteExOffset,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return errors.Wrap(err, 0)
|
||||
}
|
||||
|
||||
s.sslWriteExRetProbe, err = sslLibrary.Uretprobe("SSL_write_ex", bpfObjects.SslRetWriteEx, &link.UprobeOptions{
|
||||
Offset: offsets.SslWriteExOffset,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return errors.Wrap(err, 0)
|
||||
}
|
||||
}
|
||||
|
||||
if offsets.SslReadExOffset != 0 {
|
||||
s.sslReadExProbe, err = sslLibrary.Uprobe("SSL_read_ex", bpfObjects.SslReadEx, &link.UprobeOptions{
|
||||
Offset: offsets.SslReadExOffset,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return errors.Wrap(err, 0)
|
||||
}
|
||||
|
||||
s.sslReadExRetProbe, err = sslLibrary.Uretprobe("SSL_read_ex", bpfObjects.SslRetReadEx, &link.UprobeOptions{
|
||||
Offset: offsets.SslReadExOffset,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return errors.Wrap(err, 0)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *sslHooks) close() []error {
|
||||
errors := make([]error, 0)
|
||||
|
||||
if err := s.sslWriteProbe.Close(); err != nil {
|
||||
errors = append(errors, err)
|
||||
}
|
||||
|
||||
if err := s.sslWriteRetProbe.Close(); err != nil {
|
||||
errors = append(errors, err)
|
||||
}
|
||||
|
||||
if err := s.sslReadProbe.Close(); err != nil {
|
||||
errors = append(errors, err)
|
||||
}
|
||||
|
||||
if err := s.sslReadRetProbe.Close(); err != nil {
|
||||
errors = append(errors, err)
|
||||
}
|
||||
|
||||
if s.sslWriteExProbe != nil {
|
||||
if err := s.sslWriteExProbe.Close(); err != nil {
|
||||
errors = append(errors, err)
|
||||
}
|
||||
}
|
||||
|
||||
if s.sslWriteExRetProbe != nil {
|
||||
if err := s.sslWriteExRetProbe.Close(); err != nil {
|
||||
errors = append(errors, err)
|
||||
}
|
||||
}
|
||||
|
||||
if s.sslReadExProbe != nil {
|
||||
if err := s.sslReadExProbe.Close(); err != nil {
|
||||
errors = append(errors, err)
|
||||
}
|
||||
}
|
||||
|
||||
if s.sslReadExRetProbe != nil {
|
||||
if err := s.sslReadExRetProbe.Close(); err != nil {
|
||||
errors = append(errors, err)
|
||||
}
|
||||
}
|
||||
|
||||
return errors
|
||||
}
|
||||
113
tap/tlstapper/ssllib_offsets.go
Normal file
113
tap/tlstapper/ssllib_offsets.go
Normal file
@@ -0,0 +1,113 @@
|
||||
package tlstapper
|
||||
|
||||
import (
|
||||
"debug/elf"
|
||||
"fmt"
|
||||
|
||||
"github.com/go-errors/errors"
|
||||
"github.com/up9inc/mizu/shared/logger"
|
||||
)
|
||||
|
||||
type sslOffsets struct {
|
||||
SslWriteOffset uint64
|
||||
SslReadOffset uint64
|
||||
SslWriteExOffset uint64
|
||||
SslReadExOffset uint64
|
||||
}
|
||||
|
||||
func getSslOffsets(sslLibraryPath string) (sslOffsets, error) {
|
||||
sslElf, err := elf.Open(sslLibraryPath)
|
||||
|
||||
if err != nil {
|
||||
return sslOffsets{}, errors.Wrap(err, 0)
|
||||
}
|
||||
|
||||
defer sslElf.Close()
|
||||
|
||||
base, err := findBaseAddress(sslElf, sslLibraryPath)
|
||||
|
||||
if err != nil {
|
||||
return sslOffsets{}, errors.Wrap(err, 0)
|
||||
}
|
||||
|
||||
offsets, err := findSslOffsets(sslElf, base)
|
||||
|
||||
if err != nil {
|
||||
return sslOffsets{}, errors.Wrap(err, 0)
|
||||
}
|
||||
|
||||
logger.Log.Debugf("Found TLS offsets (base: 0x%X) (write: 0x%X) (read: 0x%X)", base, offsets.SslWriteOffset, offsets.SslReadOffset)
|
||||
return offsets, nil
|
||||
}
|
||||
|
||||
func findBaseAddress(sslElf *elf.File, sslLibraryPath string) (uint64, error) {
|
||||
for _, prog := range sslElf.Progs {
|
||||
if prog.Type == elf.PT_LOAD {
|
||||
return prog.Paddr, nil
|
||||
}
|
||||
}
|
||||
|
||||
return 0, errors.New(fmt.Sprintf("Program header not found in %v", sslLibraryPath))
|
||||
}
|
||||
|
||||
func findSslOffsets(sslElf *elf.File, base uint64) (sslOffsets, error) {
|
||||
symbolsMap := make(map[string]elf.Symbol)
|
||||
|
||||
if err := buildSymbolsMap(sslElf.Symbols, symbolsMap); err != nil {
|
||||
return sslOffsets{}, errors.Wrap(err, 0)
|
||||
}
|
||||
|
||||
if err := buildSymbolsMap(sslElf.DynamicSymbols, symbolsMap); err != nil {
|
||||
return sslOffsets{}, errors.Wrap(err, 0)
|
||||
}
|
||||
|
||||
var sslWriteSymbol, sslReadSymbol, sslWriteExSymbol, sslReadExSymbol elf.Symbol
|
||||
var ok bool
|
||||
|
||||
if sslWriteSymbol, ok = symbolsMap["SSL_write"]; !ok {
|
||||
return sslOffsets{}, errors.New("SSL_write symbol not found")
|
||||
}
|
||||
|
||||
if sslReadSymbol, ok = symbolsMap["SSL_read"]; !ok {
|
||||
return sslOffsets{}, errors.New("SSL_read symbol not found")
|
||||
}
|
||||
|
||||
var sslWriteExOffset, sslReadExOffset uint64
|
||||
|
||||
if sslWriteExSymbol, ok = symbolsMap["SSL_write_ex"]; !ok {
|
||||
sslWriteExOffset = 0 // libssl.so.1.0 doesn't have the _ex functions
|
||||
} else {
|
||||
sslWriteExOffset = sslWriteExSymbol.Value - base
|
||||
}
|
||||
|
||||
if sslReadExSymbol, ok = symbolsMap["SSL_read_ex"]; !ok {
|
||||
sslReadExOffset = 0 // libssl.so.1.0 doesn't have the _ex functions
|
||||
} else {
|
||||
sslReadExOffset = sslReadExSymbol.Value - base
|
||||
}
|
||||
|
||||
return sslOffsets{
|
||||
SslWriteOffset: sslWriteSymbol.Value - base,
|
||||
SslReadOffset: sslReadSymbol.Value - base,
|
||||
SslWriteExOffset: sslWriteExOffset,
|
||||
SslReadExOffset: sslReadExOffset,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func buildSymbolsMap(sectionGetter func() ([]elf.Symbol, error), symbols map[string]elf.Symbol) error {
|
||||
syms, err := sectionGetter()
|
||||
|
||||
if err != nil && !errors.Is(err, elf.ErrNoSymbols) {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, sym := range syms {
|
||||
if elf.ST_TYPE(sym.Info) != elf.STT_FUNC {
|
||||
continue
|
||||
}
|
||||
|
||||
symbols[sym.Name] = sym
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
87
tap/tlstapper/syscall_hooks.go
Normal file
87
tap/tlstapper/syscall_hooks.go
Normal file
@@ -0,0 +1,87 @@
|
||||
package tlstapper
|
||||
|
||||
import (
|
||||
"github.com/cilium/ebpf/link"
|
||||
"github.com/go-errors/errors"
|
||||
)
|
||||
|
||||
type syscallHooks struct {
|
||||
sysEnterRead link.Link
|
||||
sysEnterWrite link.Link
|
||||
sysEnterAccept4 link.Link
|
||||
sysExitAccept4 link.Link
|
||||
sysEnterConnect link.Link
|
||||
sysExitConnect link.Link
|
||||
}
|
||||
|
||||
func (s *syscallHooks) installSyscallHooks(bpfObjects *tlsTapperObjects) error {
|
||||
var err error
|
||||
|
||||
s.sysEnterRead, err = link.Tracepoint("syscalls", "sys_enter_read", bpfObjects.SysEnterRead)
|
||||
|
||||
if err != nil {
|
||||
return errors.Wrap(err, 0)
|
||||
}
|
||||
|
||||
s.sysEnterWrite, err = link.Tracepoint("syscalls", "sys_enter_write", bpfObjects.SysEnterWrite)
|
||||
|
||||
if err != nil {
|
||||
return errors.Wrap(err, 0)
|
||||
}
|
||||
|
||||
s.sysEnterAccept4, err = link.Tracepoint("syscalls", "sys_enter_accept4", bpfObjects.SysEnterAccept4)
|
||||
|
||||
if err != nil {
|
||||
return errors.Wrap(err, 0)
|
||||
}
|
||||
|
||||
s.sysExitAccept4, err = link.Tracepoint("syscalls", "sys_exit_accept4", bpfObjects.SysExitAccept4)
|
||||
|
||||
if err != nil {
|
||||
return errors.Wrap(err, 0)
|
||||
}
|
||||
|
||||
s.sysEnterConnect, err = link.Tracepoint("syscalls", "sys_enter_connect", bpfObjects.SysEnterConnect)
|
||||
|
||||
if err != nil {
|
||||
return errors.Wrap(err, 0)
|
||||
}
|
||||
|
||||
s.sysExitConnect, err = link.Tracepoint("syscalls", "sys_exit_connect", bpfObjects.SysExitConnect)
|
||||
|
||||
if err != nil {
|
||||
return errors.Wrap(err, 0)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *syscallHooks) close() []error {
|
||||
errors := make([]error, 0)
|
||||
|
||||
if err := s.sysEnterRead.Close(); err != nil {
|
||||
errors = append(errors, err)
|
||||
}
|
||||
|
||||
if err := s.sysEnterWrite.Close(); err != nil {
|
||||
errors = append(errors, err)
|
||||
}
|
||||
|
||||
if err := s.sysEnterAccept4.Close(); err != nil {
|
||||
errors = append(errors, err)
|
||||
}
|
||||
|
||||
if err := s.sysExitAccept4.Close(); err != nil {
|
||||
errors = append(errors, err)
|
||||
}
|
||||
|
||||
if err := s.sysEnterConnect.Close(); err != nil {
|
||||
errors = append(errors, err)
|
||||
}
|
||||
|
||||
if err := s.sysExitConnect.Close(); err != nil {
|
||||
errors = append(errors, err)
|
||||
}
|
||||
|
||||
return errors
|
||||
}
|
||||
162
tap/tlstapper/tls_poller.go
Normal file
162
tap/tlstapper/tls_poller.go
Normal file
@@ -0,0 +1,162 @@
|
||||
package tlstapper
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"net"
|
||||
|
||||
"encoding/hex"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/up9inc/mizu/shared/logger"
|
||||
"github.com/up9inc/mizu/tap/api"
|
||||
)
|
||||
|
||||
const UNKNOWN_PORT uint16 = 80
|
||||
const UNKNOWN_HOST string = "127.0.0.1"
|
||||
|
||||
type tlsPoller struct {
|
||||
tls *TlsTapper
|
||||
readers map[string]*tlsReader
|
||||
closedReaders chan string
|
||||
reqResMatcher api.RequestResponseMatcher
|
||||
}
|
||||
|
||||
func NewTlsPoller(tls *TlsTapper, extension *api.Extension) *tlsPoller {
|
||||
return &tlsPoller{
|
||||
tls: tls,
|
||||
readers: make(map[string]*tlsReader),
|
||||
closedReaders: make(chan string, 100),
|
||||
reqResMatcher: extension.Dissector.NewResponseRequestMatcher(),
|
||||
}
|
||||
}
|
||||
|
||||
func (p *tlsPoller) Poll(extension *api.Extension,
|
||||
emitter api.Emitter, options *api.TrafficFilteringOptions) {
|
||||
|
||||
chunks := make(chan *tlsChunk)
|
||||
|
||||
go p.tls.pollPerf(chunks)
|
||||
|
||||
for {
|
||||
select {
|
||||
case chunk, ok := <-chunks:
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
if err := p.handleTlsChunk(chunk, extension, emitter, options); err != nil {
|
||||
LogError(err)
|
||||
}
|
||||
case key := <-p.closedReaders:
|
||||
delete(p.readers, key)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (p *tlsPoller) handleTlsChunk(chunk *tlsChunk, extension *api.Extension,
|
||||
emitter api.Emitter, options *api.TrafficFilteringOptions) error {
|
||||
ip, port, err := chunk.getAddress()
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
key := buildTlsKey(chunk, ip, port)
|
||||
reader, exists := p.readers[key]
|
||||
|
||||
if !exists {
|
||||
reader = p.startNewTlsReader(chunk, ip, port, key, extension, emitter, options)
|
||||
p.readers[key] = reader
|
||||
}
|
||||
|
||||
reader.chunks <- chunk
|
||||
|
||||
if os.Getenv("MIZU_VERBOSE_TLS_TAPPER") == "true" {
|
||||
logTls(chunk, ip, port)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *tlsPoller) startNewTlsReader(chunk *tlsChunk, ip net.IP, port uint16, key string, extension *api.Extension,
|
||||
emitter api.Emitter, options *api.TrafficFilteringOptions) *tlsReader {
|
||||
|
||||
reader := &tlsReader{
|
||||
key: key,
|
||||
chunks: make(chan *tlsChunk, 1),
|
||||
doneHandler: func(r *tlsReader) {
|
||||
p.closeReader(key, r)
|
||||
},
|
||||
}
|
||||
|
||||
isRequest := (chunk.isClient() && chunk.isWrite()) || (chunk.isServer() && chunk.isRead())
|
||||
tcpid := buildTcpId(isRequest, ip, port)
|
||||
|
||||
go dissect(extension, reader, isRequest, &tcpid, emitter, options, p.reqResMatcher)
|
||||
return reader
|
||||
}
|
||||
|
||||
func dissect(extension *api.Extension, reader *tlsReader, isRequest bool, tcpid *api.TcpID,
|
||||
emitter api.Emitter, options *api.TrafficFilteringOptions, reqResMatcher api.RequestResponseMatcher) {
|
||||
b := bufio.NewReader(reader)
|
||||
|
||||
err := extension.Dissector.Dissect(b, isRequest, tcpid, &api.CounterPair{},
|
||||
&api.SuperTimer{}, &api.SuperIdentifier{}, emitter, options, reqResMatcher)
|
||||
|
||||
if err != nil {
|
||||
logger.Log.Warningf("Error dissecting TLS %v - %v", tcpid, err)
|
||||
}
|
||||
}
|
||||
|
||||
func (p *tlsPoller) closeReader(key string, r *tlsReader) {
|
||||
close(r.chunks)
|
||||
p.closedReaders <- key
|
||||
}
|
||||
|
||||
func buildTlsKey(chunk *tlsChunk, ip net.IP, port uint16) string {
|
||||
return fmt.Sprintf("%v:%v-%v:%v", chunk.isClient(), chunk.isRead(), ip, port)
|
||||
}
|
||||
|
||||
func buildTcpId(isRequest bool, ip net.IP, port uint16) api.TcpID {
|
||||
if isRequest {
|
||||
return api.TcpID{
|
||||
SrcIP: UNKNOWN_HOST,
|
||||
DstIP: ip.String(),
|
||||
SrcPort: strconv.Itoa(int(UNKNOWN_PORT)),
|
||||
DstPort: strconv.FormatInt(int64(port), 10),
|
||||
Ident: "",
|
||||
}
|
||||
} else {
|
||||
return api.TcpID{
|
||||
SrcIP: ip.String(),
|
||||
DstIP: UNKNOWN_HOST,
|
||||
SrcPort: strconv.FormatInt(int64(port), 10),
|
||||
DstPort: strconv.Itoa(int(UNKNOWN_PORT)),
|
||||
Ident: "",
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func logTls(chunk *tlsChunk, ip net.IP, port uint16) {
|
||||
var flagsStr string
|
||||
|
||||
if chunk.isClient() {
|
||||
flagsStr = "C"
|
||||
} else {
|
||||
flagsStr = "S"
|
||||
}
|
||||
|
||||
if chunk.isRead() {
|
||||
flagsStr += "R"
|
||||
} else {
|
||||
flagsStr += "W"
|
||||
}
|
||||
|
||||
str := strings.ReplaceAll(strings.ReplaceAll(string(chunk.Data[0:chunk.Recorded]), "\n", " "), "\r", "")
|
||||
|
||||
logger.Log.Infof("PID: %v (tid: %v) (fd: %v) (client: %v) (addr: %v:%v) (recorded %v out of %v) - %v - %v",
|
||||
chunk.Pid, chunk.Tgid, chunk.Fd, flagsStr, ip, port, chunk.Recorded, chunk.Len, str, hex.EncodeToString(chunk.Data[0:chunk.Recorded]))
|
||||
}
|
||||
143
tap/tlstapper/tls_process_discoverer.go
Normal file
143
tap/tlstapper/tls_process_discoverer.go
Normal file
@@ -0,0 +1,143 @@
|
||||
package tlstapper
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/url"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/go-errors/errors"
|
||||
"github.com/up9inc/mizu/shared/logger"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
var numberRegex = regexp.MustCompile("[0-9]+")
|
||||
|
||||
func UpdateTapTargets(tls *TlsTapper, pods *[]v1.Pod, procfs string) error {
|
||||
containerIds := buildContainerIdsMap(pods)
|
||||
containerPids, err := findContainerPids(procfs, containerIds)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, pid := range containerPids {
|
||||
if err := tls.AddPid(procfs, pid); err != nil {
|
||||
LogError(err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func findContainerPids(procfs string, containerIds map[string]bool) ([]uint32, error) {
|
||||
result := make([]uint32, 0)
|
||||
|
||||
pids, err := ioutil.ReadDir(procfs)
|
||||
|
||||
if err != nil {
|
||||
return result, err
|
||||
}
|
||||
|
||||
logger.Log.Infof("Starting tls auto discoverer %v %v - scanning %v potential pids",
|
||||
procfs, containerIds, len(pids))
|
||||
|
||||
for _, pid := range pids {
|
||||
if !pid.IsDir() {
|
||||
continue
|
||||
}
|
||||
|
||||
if !numberRegex.MatchString(pid.Name()) {
|
||||
continue
|
||||
}
|
||||
|
||||
cgroup, err := getProcessCgroup(procfs, pid.Name())
|
||||
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
if _, ok := containerIds[cgroup]; !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
pidNumber, err := strconv.Atoi(pid.Name())
|
||||
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
result = append(result, uint32(pidNumber))
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func buildContainerIdsMap(pods *[]v1.Pod) map[string]bool {
|
||||
result := make(map[string]bool)
|
||||
|
||||
for _, pod := range *pods {
|
||||
for _, container := range pod.Status.ContainerStatuses {
|
||||
url, err := url.Parse(container.ContainerID)
|
||||
|
||||
if err != nil {
|
||||
logger.Log.Warningf("Expecting URL like container ID %v", container.ContainerID)
|
||||
continue
|
||||
}
|
||||
|
||||
result[url.Host] = true
|
||||
}
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
func getProcessCgroup(procfs string, pid string) (string, error) {
|
||||
filePath := fmt.Sprintf("%s/%s/cgroup", procfs, pid)
|
||||
|
||||
bytes, err := ioutil.ReadFile(filePath)
|
||||
|
||||
if err != nil {
|
||||
logger.Log.Warningf("Error reading cgroup file %s - %v", filePath, err)
|
||||
return "", err
|
||||
}
|
||||
|
||||
lines := strings.Split(string(bytes), "\n")
|
||||
cgrouppath := extractCgroup(lines)
|
||||
|
||||
if cgrouppath == "" {
|
||||
return "", errors.Errorf("Cgroup path not found for %s, %s", pid, lines)
|
||||
}
|
||||
|
||||
return normalizeCgroup(cgrouppath), nil
|
||||
}
|
||||
|
||||
func extractCgroup(lines []string) string {
|
||||
if len(lines) == 1 {
|
||||
parts := strings.Split(lines[0], ":")
|
||||
return parts[len(parts)-1]
|
||||
} else {
|
||||
for _, line := range lines {
|
||||
if strings.Contains(line, ":pids:") {
|
||||
parts := strings.Split(line, ":")
|
||||
return parts[len(parts)-1]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return ""
|
||||
}
|
||||
|
||||
func normalizeCgroup(cgrouppath string) string {
|
||||
basename := strings.TrimSpace(path.Base(cgrouppath))
|
||||
|
||||
if strings.Contains(basename, ".") {
|
||||
return strings.TrimSuffix(basename, filepath.Ext(basename))
|
||||
} else {
|
||||
return basename
|
||||
}
|
||||
}
|
||||
41
tap/tlstapper/tls_reader.go
Normal file
41
tap/tlstapper/tls_reader.go
Normal file
@@ -0,0 +1,41 @@
|
||||
package tlstapper
|
||||
|
||||
import (
|
||||
"io"
|
||||
"time"
|
||||
)
|
||||
|
||||
type tlsReader struct {
|
||||
key string
|
||||
chunks chan *tlsChunk
|
||||
data []byte
|
||||
doneHandler func(r *tlsReader)
|
||||
}
|
||||
|
||||
func (r *tlsReader) Read(p []byte) (int, error) {
|
||||
var chunk *tlsChunk
|
||||
|
||||
for len(r.data) == 0 {
|
||||
var ok bool
|
||||
select {
|
||||
case chunk, ok = <-r.chunks:
|
||||
if !ok {
|
||||
return 0, io.EOF
|
||||
}
|
||||
|
||||
r.data = chunk.getRecordedData()
|
||||
case <-time.After(time.Second * 3):
|
||||
r.doneHandler(r)
|
||||
return 0, io.EOF
|
||||
}
|
||||
|
||||
if len(r.data) > 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
l := copy(p, r.data)
|
||||
r.data = r.data[l:]
|
||||
|
||||
return l, nil
|
||||
}
|
||||
177
tap/tlstapper/tls_tapper.go
Normal file
177
tap/tlstapper/tls_tapper.go
Normal file
@@ -0,0 +1,177 @@
|
||||
package tlstapper
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
|
||||
"github.com/cilium/ebpf/perf"
|
||||
"github.com/cilium/ebpf/rlimit"
|
||||
"github.com/go-errors/errors"
|
||||
"github.com/up9inc/mizu/shared/logger"
|
||||
)
|
||||
|
||||
//go:generate go run github.com/cilium/ebpf/cmd/bpf2go tlsTapper bpf/tls_tapper.c -- -O2 -g -D__TARGET_ARCH_x86
|
||||
|
||||
type TlsTapper struct {
|
||||
bpfObjects tlsTapperObjects
|
||||
syscallHooks syscallHooks
|
||||
sslHooksStructs []sslHooks
|
||||
reader *perf.Reader
|
||||
}
|
||||
|
||||
func (t *TlsTapper) Init(bufferSize int) error {
|
||||
logger.Log.Infof("Initializing tls tapper (bufferSize: %v)", bufferSize)
|
||||
|
||||
if err := setupRLimit(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
t.bpfObjects = tlsTapperObjects{}
|
||||
|
||||
if err := loadTlsTapperObjects(&t.bpfObjects, nil); err != nil {
|
||||
return errors.Wrap(err, 0)
|
||||
}
|
||||
|
||||
t.syscallHooks = syscallHooks{}
|
||||
|
||||
if err := t.syscallHooks.installSyscallHooks(&t.bpfObjects); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
t.sslHooksStructs = make([]sslHooks, 0)
|
||||
|
||||
return t.initChunksReader(bufferSize)
|
||||
}
|
||||
|
||||
func (t *TlsTapper) pollPerf(chunks chan<- *tlsChunk) {
|
||||
logger.Log.Infof("Start polling for tls events")
|
||||
|
||||
for {
|
||||
record, err := t.reader.Read()
|
||||
|
||||
if err != nil {
|
||||
close(chunks)
|
||||
|
||||
if errors.Is(err, perf.ErrClosed) {
|
||||
return
|
||||
}
|
||||
|
||||
LogError(errors.Errorf("Error reading chunks from tls perf, aborting TLS! %v", err))
|
||||
return
|
||||
}
|
||||
|
||||
if record.LostSamples != 0 {
|
||||
logger.Log.Infof("Buffer is full, dropped %d chunks", record.LostSamples)
|
||||
continue
|
||||
}
|
||||
|
||||
buffer := bytes.NewReader(record.RawSample)
|
||||
|
||||
var chunk tlsChunk
|
||||
|
||||
if err := binary.Read(buffer, binary.LittleEndian, &chunk); err != nil {
|
||||
LogError(errors.Errorf("Error parsing chunk %v", err))
|
||||
continue
|
||||
}
|
||||
|
||||
chunks <- &chunk
|
||||
}
|
||||
}
|
||||
|
||||
func (t *TlsTapper) GlobalTap(sslLibrary string) error {
|
||||
return t.tapPid(0, sslLibrary)
|
||||
}
|
||||
|
||||
func (t *TlsTapper) AddPid(procfs string, pid uint32) error {
|
||||
sslLibrary, err := findSsllib(procfs, pid)
|
||||
|
||||
if err != nil {
|
||||
logger.Log.Infof("PID skipped no libssl.so found (pid: %d) %v", pid, err)
|
||||
return nil // hide the error on purpose, its OK for a process to not use libssl.so
|
||||
}
|
||||
|
||||
return t.tapPid(pid, sslLibrary)
|
||||
}
|
||||
|
||||
func (t *TlsTapper) RemovePid(pid uint32) error {
|
||||
logger.Log.Infof("Removing PID (pid: %v)", pid)
|
||||
|
||||
pids := t.bpfObjects.tlsTapperMaps.PidsMap
|
||||
|
||||
if err := pids.Delete(pid); err != nil {
|
||||
return errors.Wrap(err, 0)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *TlsTapper) Close() []error {
|
||||
errors := make([]error, 0)
|
||||
|
||||
if err := t.bpfObjects.Close(); err != nil {
|
||||
errors = append(errors, err)
|
||||
}
|
||||
|
||||
errors = append(errors, t.syscallHooks.close()...)
|
||||
|
||||
for _, sslHooks := range t.sslHooksStructs {
|
||||
errors = append(errors, sslHooks.close()...)
|
||||
}
|
||||
|
||||
if err := t.reader.Close(); err != nil {
|
||||
errors = append(errors, err)
|
||||
}
|
||||
|
||||
return errors
|
||||
}
|
||||
|
||||
func setupRLimit() error {
|
||||
err := rlimit.RemoveMemlock()
|
||||
|
||||
if err != nil {
|
||||
return errors.Wrap(err, 0)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *TlsTapper) initChunksReader(bufferSize int) error {
|
||||
var err error
|
||||
|
||||
t.reader, err = perf.NewReader(t.bpfObjects.ChunksBuffer, bufferSize)
|
||||
|
||||
if err != nil {
|
||||
return errors.Wrap(err, 0)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *TlsTapper) tapPid(pid uint32, sslLibrary string) error {
|
||||
logger.Log.Infof("Tapping TLS (pid: %v) (sslLibrary: %v)", pid, sslLibrary)
|
||||
|
||||
newSsl := sslHooks{}
|
||||
|
||||
if err := newSsl.installUprobes(&t.bpfObjects, sslLibrary); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
t.sslHooksStructs = append(t.sslHooksStructs, newSsl)
|
||||
|
||||
pids := t.bpfObjects.tlsTapperMaps.PidsMap
|
||||
|
||||
if err := pids.Put(pid, uint32(1)); err != nil {
|
||||
return errors.Wrap(err, 0)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func LogError(err error) {
|
||||
var e *errors.Error
|
||||
if errors.As(err, &e) {
|
||||
logger.Log.Errorf("Error: %v", e.ErrorStack())
|
||||
} else {
|
||||
logger.Log.Errorf("Error: %v", err)
|
||||
}
|
||||
}
|
||||
179
tap/tlstapper/tlstapper_bpfeb.go
Normal file
179
tap/tlstapper/tlstapper_bpfeb.go
Normal file
@@ -0,0 +1,179 @@
|
||||
// Code generated by bpf2go; DO NOT EDIT.
|
||||
//go:build arm64be || armbe || mips || mips64 || mips64p32 || ppc64 || s390 || s390x || sparc || sparc64
|
||||
// +build arm64be armbe mips mips64 mips64p32 ppc64 s390 s390x sparc sparc64
|
||||
|
||||
package tlstapper
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
_ "embed"
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"github.com/cilium/ebpf"
|
||||
)
|
||||
|
||||
// loadTlsTapper returns the embedded CollectionSpec for tlsTapper.
|
||||
func loadTlsTapper() (*ebpf.CollectionSpec, error) {
|
||||
reader := bytes.NewReader(_TlsTapperBytes)
|
||||
spec, err := ebpf.LoadCollectionSpecFromReader(reader)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("can't load tlsTapper: %w", err)
|
||||
}
|
||||
|
||||
return spec, err
|
||||
}
|
||||
|
||||
// loadTlsTapperObjects loads tlsTapper and converts it into a struct.
|
||||
//
|
||||
// The following types are suitable as obj argument:
|
||||
//
|
||||
// *tlsTapperObjects
|
||||
// *tlsTapperPrograms
|
||||
// *tlsTapperMaps
|
||||
//
|
||||
// See ebpf.CollectionSpec.LoadAndAssign documentation for details.
|
||||
func loadTlsTapperObjects(obj interface{}, opts *ebpf.CollectionOptions) error {
|
||||
spec, err := loadTlsTapper()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return spec.LoadAndAssign(obj, opts)
|
||||
}
|
||||
|
||||
// tlsTapperSpecs contains maps and programs before they are loaded into the kernel.
|
||||
//
|
||||
// It can be passed ebpf.CollectionSpec.Assign.
|
||||
type tlsTapperSpecs struct {
|
||||
tlsTapperProgramSpecs
|
||||
tlsTapperMapSpecs
|
||||
}
|
||||
|
||||
// tlsTapperSpecs contains programs before they are loaded into the kernel.
|
||||
//
|
||||
// It can be passed ebpf.CollectionSpec.Assign.
|
||||
type tlsTapperProgramSpecs struct {
|
||||
SslRead *ebpf.ProgramSpec `ebpf:"ssl_read"`
|
||||
SslReadEx *ebpf.ProgramSpec `ebpf:"ssl_read_ex"`
|
||||
SslRetRead *ebpf.ProgramSpec `ebpf:"ssl_ret_read"`
|
||||
SslRetReadEx *ebpf.ProgramSpec `ebpf:"ssl_ret_read_ex"`
|
||||
SslRetWrite *ebpf.ProgramSpec `ebpf:"ssl_ret_write"`
|
||||
SslRetWriteEx *ebpf.ProgramSpec `ebpf:"ssl_ret_write_ex"`
|
||||
SslWrite *ebpf.ProgramSpec `ebpf:"ssl_write"`
|
||||
SslWriteEx *ebpf.ProgramSpec `ebpf:"ssl_write_ex"`
|
||||
SysEnterAccept4 *ebpf.ProgramSpec `ebpf:"sys_enter_accept4"`
|
||||
SysEnterConnect *ebpf.ProgramSpec `ebpf:"sys_enter_connect"`
|
||||
SysEnterRead *ebpf.ProgramSpec `ebpf:"sys_enter_read"`
|
||||
SysEnterWrite *ebpf.ProgramSpec `ebpf:"sys_enter_write"`
|
||||
SysExitAccept4 *ebpf.ProgramSpec `ebpf:"sys_exit_accept4"`
|
||||
SysExitConnect *ebpf.ProgramSpec `ebpf:"sys_exit_connect"`
|
||||
}
|
||||
|
||||
// tlsTapperMapSpecs contains maps before they are loaded into the kernel.
|
||||
//
|
||||
// It can be passed ebpf.CollectionSpec.Assign.
|
||||
type tlsTapperMapSpecs struct {
|
||||
AcceptSyscallContext *ebpf.MapSpec `ebpf:"accept_syscall_context"`
|
||||
ChunksBuffer *ebpf.MapSpec `ebpf:"chunks_buffer"`
|
||||
ConnectSyscallInfo *ebpf.MapSpec `ebpf:"connect_syscall_info"`
|
||||
FileDescriptorToIpv4 *ebpf.MapSpec `ebpf:"file_descriptor_to_ipv4"`
|
||||
Heap *ebpf.MapSpec `ebpf:"heap"`
|
||||
PidsMap *ebpf.MapSpec `ebpf:"pids_map"`
|
||||
SslReadContext *ebpf.MapSpec `ebpf:"ssl_read_context"`
|
||||
SslWriteContext *ebpf.MapSpec `ebpf:"ssl_write_context"`
|
||||
}
|
||||
|
||||
// tlsTapperObjects contains all objects after they have been loaded into the kernel.
|
||||
//
|
||||
// It can be passed to loadTlsTapperObjects or ebpf.CollectionSpec.LoadAndAssign.
|
||||
type tlsTapperObjects struct {
|
||||
tlsTapperPrograms
|
||||
tlsTapperMaps
|
||||
}
|
||||
|
||||
func (o *tlsTapperObjects) Close() error {
|
||||
return _TlsTapperClose(
|
||||
&o.tlsTapperPrograms,
|
||||
&o.tlsTapperMaps,
|
||||
)
|
||||
}
|
||||
|
||||
// tlsTapperMaps contains all maps after they have been loaded into the kernel.
|
||||
//
|
||||
// It can be passed to loadTlsTapperObjects or ebpf.CollectionSpec.LoadAndAssign.
|
||||
type tlsTapperMaps struct {
|
||||
AcceptSyscallContext *ebpf.Map `ebpf:"accept_syscall_context"`
|
||||
ChunksBuffer *ebpf.Map `ebpf:"chunks_buffer"`
|
||||
ConnectSyscallInfo *ebpf.Map `ebpf:"connect_syscall_info"`
|
||||
FileDescriptorToIpv4 *ebpf.Map `ebpf:"file_descriptor_to_ipv4"`
|
||||
Heap *ebpf.Map `ebpf:"heap"`
|
||||
PidsMap *ebpf.Map `ebpf:"pids_map"`
|
||||
SslReadContext *ebpf.Map `ebpf:"ssl_read_context"`
|
||||
SslWriteContext *ebpf.Map `ebpf:"ssl_write_context"`
|
||||
}
|
||||
|
||||
func (m *tlsTapperMaps) Close() error {
|
||||
return _TlsTapperClose(
|
||||
m.AcceptSyscallContext,
|
||||
m.ChunksBuffer,
|
||||
m.ConnectSyscallInfo,
|
||||
m.FileDescriptorToIpv4,
|
||||
m.Heap,
|
||||
m.PidsMap,
|
||||
m.SslReadContext,
|
||||
m.SslWriteContext,
|
||||
)
|
||||
}
|
||||
|
||||
// tlsTapperPrograms contains all programs after they have been loaded into the kernel.
|
||||
//
|
||||
// It can be passed to loadTlsTapperObjects or ebpf.CollectionSpec.LoadAndAssign.
|
||||
type tlsTapperPrograms struct {
|
||||
SslRead *ebpf.Program `ebpf:"ssl_read"`
|
||||
SslReadEx *ebpf.Program `ebpf:"ssl_read_ex"`
|
||||
SslRetRead *ebpf.Program `ebpf:"ssl_ret_read"`
|
||||
SslRetReadEx *ebpf.Program `ebpf:"ssl_ret_read_ex"`
|
||||
SslRetWrite *ebpf.Program `ebpf:"ssl_ret_write"`
|
||||
SslRetWriteEx *ebpf.Program `ebpf:"ssl_ret_write_ex"`
|
||||
SslWrite *ebpf.Program `ebpf:"ssl_write"`
|
||||
SslWriteEx *ebpf.Program `ebpf:"ssl_write_ex"`
|
||||
SysEnterAccept4 *ebpf.Program `ebpf:"sys_enter_accept4"`
|
||||
SysEnterConnect *ebpf.Program `ebpf:"sys_enter_connect"`
|
||||
SysEnterRead *ebpf.Program `ebpf:"sys_enter_read"`
|
||||
SysEnterWrite *ebpf.Program `ebpf:"sys_enter_write"`
|
||||
SysExitAccept4 *ebpf.Program `ebpf:"sys_exit_accept4"`
|
||||
SysExitConnect *ebpf.Program `ebpf:"sys_exit_connect"`
|
||||
}
|
||||
|
||||
func (p *tlsTapperPrograms) Close() error {
|
||||
return _TlsTapperClose(
|
||||
p.SslRead,
|
||||
p.SslReadEx,
|
||||
p.SslRetRead,
|
||||
p.SslRetReadEx,
|
||||
p.SslRetWrite,
|
||||
p.SslRetWriteEx,
|
||||
p.SslWrite,
|
||||
p.SslWriteEx,
|
||||
p.SysEnterAccept4,
|
||||
p.SysEnterConnect,
|
||||
p.SysEnterRead,
|
||||
p.SysEnterWrite,
|
||||
p.SysExitAccept4,
|
||||
p.SysExitConnect,
|
||||
)
|
||||
}
|
||||
|
||||
func _TlsTapperClose(closers ...io.Closer) error {
|
||||
for _, closer := range closers {
|
||||
if err := closer.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Do not access this directly.
|
||||
//go:embed tlstapper_bpfeb.o
|
||||
var _TlsTapperBytes []byte
|
||||
BIN
tap/tlstapper/tlstapper_bpfeb.o
Normal file
BIN
tap/tlstapper/tlstapper_bpfeb.o
Normal file
Binary file not shown.
179
tap/tlstapper/tlstapper_bpfel.go
Normal file
179
tap/tlstapper/tlstapper_bpfel.go
Normal file
@@ -0,0 +1,179 @@
|
||||
// Code generated by bpf2go; DO NOT EDIT.
|
||||
//go:build 386 || amd64 || amd64p32 || arm || arm64 || mips64le || mips64p32le || mipsle || ppc64le || riscv64
|
||||
// +build 386 amd64 amd64p32 arm arm64 mips64le mips64p32le mipsle ppc64le riscv64
|
||||
|
||||
package tlstapper
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
_ "embed"
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"github.com/cilium/ebpf"
|
||||
)
|
||||
|
||||
// loadTlsTapper returns the embedded CollectionSpec for tlsTapper.
|
||||
func loadTlsTapper() (*ebpf.CollectionSpec, error) {
|
||||
reader := bytes.NewReader(_TlsTapperBytes)
|
||||
spec, err := ebpf.LoadCollectionSpecFromReader(reader)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("can't load tlsTapper: %w", err)
|
||||
}
|
||||
|
||||
return spec, err
|
||||
}
|
||||
|
||||
// loadTlsTapperObjects loads tlsTapper and converts it into a struct.
|
||||
//
|
||||
// The following types are suitable as obj argument:
|
||||
//
|
||||
// *tlsTapperObjects
|
||||
// *tlsTapperPrograms
|
||||
// *tlsTapperMaps
|
||||
//
|
||||
// See ebpf.CollectionSpec.LoadAndAssign documentation for details.
|
||||
func loadTlsTapperObjects(obj interface{}, opts *ebpf.CollectionOptions) error {
|
||||
spec, err := loadTlsTapper()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return spec.LoadAndAssign(obj, opts)
|
||||
}
|
||||
|
||||
// tlsTapperSpecs contains maps and programs before they are loaded into the kernel.
|
||||
//
|
||||
// It can be passed ebpf.CollectionSpec.Assign.
|
||||
type tlsTapperSpecs struct {
|
||||
tlsTapperProgramSpecs
|
||||
tlsTapperMapSpecs
|
||||
}
|
||||
|
||||
// tlsTapperSpecs contains programs before they are loaded into the kernel.
|
||||
//
|
||||
// It can be passed ebpf.CollectionSpec.Assign.
|
||||
type tlsTapperProgramSpecs struct {
|
||||
SslRead *ebpf.ProgramSpec `ebpf:"ssl_read"`
|
||||
SslReadEx *ebpf.ProgramSpec `ebpf:"ssl_read_ex"`
|
||||
SslRetRead *ebpf.ProgramSpec `ebpf:"ssl_ret_read"`
|
||||
SslRetReadEx *ebpf.ProgramSpec `ebpf:"ssl_ret_read_ex"`
|
||||
SslRetWrite *ebpf.ProgramSpec `ebpf:"ssl_ret_write"`
|
||||
SslRetWriteEx *ebpf.ProgramSpec `ebpf:"ssl_ret_write_ex"`
|
||||
SslWrite *ebpf.ProgramSpec `ebpf:"ssl_write"`
|
||||
SslWriteEx *ebpf.ProgramSpec `ebpf:"ssl_write_ex"`
|
||||
SysEnterAccept4 *ebpf.ProgramSpec `ebpf:"sys_enter_accept4"`
|
||||
SysEnterConnect *ebpf.ProgramSpec `ebpf:"sys_enter_connect"`
|
||||
SysEnterRead *ebpf.ProgramSpec `ebpf:"sys_enter_read"`
|
||||
SysEnterWrite *ebpf.ProgramSpec `ebpf:"sys_enter_write"`
|
||||
SysExitAccept4 *ebpf.ProgramSpec `ebpf:"sys_exit_accept4"`
|
||||
SysExitConnect *ebpf.ProgramSpec `ebpf:"sys_exit_connect"`
|
||||
}
|
||||
|
||||
// tlsTapperMapSpecs contains maps before they are loaded into the kernel.
|
||||
//
|
||||
// It can be passed ebpf.CollectionSpec.Assign.
|
||||
type tlsTapperMapSpecs struct {
|
||||
AcceptSyscallContext *ebpf.MapSpec `ebpf:"accept_syscall_context"`
|
||||
ChunksBuffer *ebpf.MapSpec `ebpf:"chunks_buffer"`
|
||||
ConnectSyscallInfo *ebpf.MapSpec `ebpf:"connect_syscall_info"`
|
||||
FileDescriptorToIpv4 *ebpf.MapSpec `ebpf:"file_descriptor_to_ipv4"`
|
||||
Heap *ebpf.MapSpec `ebpf:"heap"`
|
||||
PidsMap *ebpf.MapSpec `ebpf:"pids_map"`
|
||||
SslReadContext *ebpf.MapSpec `ebpf:"ssl_read_context"`
|
||||
SslWriteContext *ebpf.MapSpec `ebpf:"ssl_write_context"`
|
||||
}
|
||||
|
||||
// tlsTapperObjects contains all objects after they have been loaded into the kernel.
|
||||
//
|
||||
// It can be passed to loadTlsTapperObjects or ebpf.CollectionSpec.LoadAndAssign.
|
||||
type tlsTapperObjects struct {
|
||||
tlsTapperPrograms
|
||||
tlsTapperMaps
|
||||
}
|
||||
|
||||
func (o *tlsTapperObjects) Close() error {
|
||||
return _TlsTapperClose(
|
||||
&o.tlsTapperPrograms,
|
||||
&o.tlsTapperMaps,
|
||||
)
|
||||
}
|
||||
|
||||
// tlsTapperMaps contains all maps after they have been loaded into the kernel.
|
||||
//
|
||||
// It can be passed to loadTlsTapperObjects or ebpf.CollectionSpec.LoadAndAssign.
|
||||
type tlsTapperMaps struct {
|
||||
AcceptSyscallContext *ebpf.Map `ebpf:"accept_syscall_context"`
|
||||
ChunksBuffer *ebpf.Map `ebpf:"chunks_buffer"`
|
||||
ConnectSyscallInfo *ebpf.Map `ebpf:"connect_syscall_info"`
|
||||
FileDescriptorToIpv4 *ebpf.Map `ebpf:"file_descriptor_to_ipv4"`
|
||||
Heap *ebpf.Map `ebpf:"heap"`
|
||||
PidsMap *ebpf.Map `ebpf:"pids_map"`
|
||||
SslReadContext *ebpf.Map `ebpf:"ssl_read_context"`
|
||||
SslWriteContext *ebpf.Map `ebpf:"ssl_write_context"`
|
||||
}
|
||||
|
||||
func (m *tlsTapperMaps) Close() error {
|
||||
return _TlsTapperClose(
|
||||
m.AcceptSyscallContext,
|
||||
m.ChunksBuffer,
|
||||
m.ConnectSyscallInfo,
|
||||
m.FileDescriptorToIpv4,
|
||||
m.Heap,
|
||||
m.PidsMap,
|
||||
m.SslReadContext,
|
||||
m.SslWriteContext,
|
||||
)
|
||||
}
|
||||
|
||||
// tlsTapperPrograms contains all programs after they have been loaded into the kernel.
|
||||
//
|
||||
// It can be passed to loadTlsTapperObjects or ebpf.CollectionSpec.LoadAndAssign.
|
||||
type tlsTapperPrograms struct {
|
||||
SslRead *ebpf.Program `ebpf:"ssl_read"`
|
||||
SslReadEx *ebpf.Program `ebpf:"ssl_read_ex"`
|
||||
SslRetRead *ebpf.Program `ebpf:"ssl_ret_read"`
|
||||
SslRetReadEx *ebpf.Program `ebpf:"ssl_ret_read_ex"`
|
||||
SslRetWrite *ebpf.Program `ebpf:"ssl_ret_write"`
|
||||
SslRetWriteEx *ebpf.Program `ebpf:"ssl_ret_write_ex"`
|
||||
SslWrite *ebpf.Program `ebpf:"ssl_write"`
|
||||
SslWriteEx *ebpf.Program `ebpf:"ssl_write_ex"`
|
||||
SysEnterAccept4 *ebpf.Program `ebpf:"sys_enter_accept4"`
|
||||
SysEnterConnect *ebpf.Program `ebpf:"sys_enter_connect"`
|
||||
SysEnterRead *ebpf.Program `ebpf:"sys_enter_read"`
|
||||
SysEnterWrite *ebpf.Program `ebpf:"sys_enter_write"`
|
||||
SysExitAccept4 *ebpf.Program `ebpf:"sys_exit_accept4"`
|
||||
SysExitConnect *ebpf.Program `ebpf:"sys_exit_connect"`
|
||||
}
|
||||
|
||||
func (p *tlsTapperPrograms) Close() error {
|
||||
return _TlsTapperClose(
|
||||
p.SslRead,
|
||||
p.SslReadEx,
|
||||
p.SslRetRead,
|
||||
p.SslRetReadEx,
|
||||
p.SslRetWrite,
|
||||
p.SslRetWriteEx,
|
||||
p.SslWrite,
|
||||
p.SslWriteEx,
|
||||
p.SysEnterAccept4,
|
||||
p.SysEnterConnect,
|
||||
p.SysEnterRead,
|
||||
p.SysEnterWrite,
|
||||
p.SysExitAccept4,
|
||||
p.SysExitConnect,
|
||||
)
|
||||
}
|
||||
|
||||
func _TlsTapperClose(closers ...io.Closer) error {
|
||||
for _, closer := range closers {
|
||||
if err := closer.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Do not access this directly.
|
||||
//go:embed tlstapper_bpfel.o
|
||||
var _TlsTapperBytes []byte
|
||||
BIN
tap/tlstapper/tlstapper_bpfel.o
Normal file
BIN
tap/tlstapper/tlstapper_bpfel.o
Normal file
Binary file not shown.
@@ -122,6 +122,7 @@ export const TrafficPage: React.FC<TrafficPageProps> = ({setAnalyzeStatus}) => {
|
||||
ws.current.onopen = () => {
|
||||
setWsConnection(WsConnectionStatus.Connected);
|
||||
ws.current.send(query);
|
||||
ws.current.send("");
|
||||
}
|
||||
ws.current.onclose = () => {
|
||||
setWsConnection(WsConnectionStatus.Closed);
|
||||
|
||||
@@ -10,8 +10,7 @@ import debounce from 'lodash/debounce';
|
||||
import ServiceMapOptions from './ServiceMapOptions'
|
||||
import { useCommonStyles } from "../../helpers/commonStyle";
|
||||
import refresh from "../assets/refresh.svg";
|
||||
import reset from "../assets/reset.svg";
|
||||
import close from "../assets/close.svg"
|
||||
import close from "../assets/close.svg";
|
||||
|
||||
interface GraphData {
|
||||
nodes: Node[];
|
||||
@@ -154,19 +153,6 @@ export const ServiceMapModal: React.FC<ServiceMapModalProps> = ({ isOpen, onOpen
|
||||
return () => setGraphData({ nodes: [], edges: [] })
|
||||
}, [getServiceMapData])
|
||||
|
||||
const resetServiceMap = debounce(async () => {
|
||||
try {
|
||||
const serviceMapResetResponse = await api.serviceMapReset();
|
||||
if (serviceMapResetResponse["status"] === "enabled") {
|
||||
refreshServiceMap()
|
||||
}
|
||||
|
||||
} catch (ex) {
|
||||
toast.error("An error occurred while resetting Mizu Service Map, see console for mode details");
|
||||
console.error(ex);
|
||||
}
|
||||
}, 500);
|
||||
|
||||
const refreshServiceMap = debounce(() => {
|
||||
getServiceMapData();
|
||||
}, 500);
|
||||
@@ -192,16 +178,6 @@ export const ServiceMapModal: React.FC<ServiceMapModalProps> = ({ isOpen, onOpen
|
||||
{!isLoading && <div style={{ height: "100%", width: "100%" }}>
|
||||
<div style={{ display: "flex", justifyContent: "space-between" }}>
|
||||
<div>
|
||||
<Button
|
||||
startIcon={<img src={reset} className="custom" alt="reset" style={{ marginRight:"8%"}}></img>}
|
||||
size="large"
|
||||
variant="contained"
|
||||
className={commonClasses.outlinedButton + " " + commonClasses.imagedButton}
|
||||
style={{ marginRight: 25, paddingTop: "3px", paddingBottom: "1px"}}
|
||||
onClick={resetServiceMap}
|
||||
>
|
||||
Reset
|
||||
</Button>
|
||||
<Button
|
||||
startIcon={<img src={refresh} className="custom" alt="refresh" style={{ marginRight:"8%"}}></img>}
|
||||
size="medium"
|
||||
|
||||
@@ -1,4 +0,0 @@
|
||||
<svg width="22" height="22" viewBox="0 0 22 22" fill="none" xmlns="http://www.w3.org/2000/svg">
|
||||
<path d="M11 14C12.114 14 13 13.1127 13 12C13 10.8873 12.114 10 11 10C9.886 10 9 10.8873 9 12C9 13.1127 9.886 14 11 14Z" fill="#205CF5"/>
|
||||
<path d="M19.0823 10.2539C18.8662 9.1982 18.4442 8.19548 17.8402 7.30312C17.2467 6.42521 16.4906 5.66909 15.6127 5.07562C14.7202 4.47184 13.7175 4.04978 12.6619 3.83354C12.1074 3.72109 11.5429 3.6658 10.9771 3.66854V1.83337L7.33333 4.58337L10.9771 7.33337V5.50187C11.4208 5.50004 11.8644 5.54221 12.2925 5.63021C13.1129 5.79833 13.8923 6.12632 14.586 6.59546C15.2702 7.05676 15.859 7.64561 16.3203 8.32979C17.0366 9.38875 17.4185 10.6383 17.4167 11.9167C17.4165 12.7746 17.2451 13.6238 16.9125 14.4146C16.7507 14.7955 16.5531 15.1602 16.3222 15.5036C16.0904 15.845 15.827 16.1639 15.5357 16.456C14.6483 17.3417 13.5219 17.9492 12.2943 18.2041C11.4407 18.3765 10.5612 18.3765 9.7075 18.2041C8.88668 18.0358 8.10704 17.7075 7.41308 17.238C6.72969 16.7771 6.14148 16.1888 5.68058 15.5055C4.96518 14.4454 4.58306 13.1956 4.58333 11.9167H2.75C2.75098 13.5609 3.24215 15.1675 4.16075 16.5312C4.75461 17.4077 5.50996 18.163 6.38642 18.7569C7.74824 19.6786 9.3556 20.1697 11 20.1667C11.5585 20.1667 12.1156 20.1105 12.6628 19.999C13.7177 19.7811 14.7197 19.3592 15.6127 18.7569C16.0511 18.4615 16.4597 18.1241 16.8328 17.7495C17.2063 17.3749 17.5439 16.9661 17.8411 16.5285C18.762 15.167 19.2528 13.5604 19.25 11.9167C19.25 11.3582 19.1938 10.8011 19.0823 10.2539Z" fill="#205CF5"/>
|
||||
</svg>
|
||||
|
Before Width: | Height: | Size: 1.5 KiB |
Reference in New Issue
Block a user