mirror of
https://github.com/kubeshark/kubeshark.git
synced 2026-02-14 18:09:51 +00:00
Compare commits
17 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
72df652f6b | ||
|
|
c67675c138 | ||
|
|
e8d2b7eb3c | ||
|
|
83722f1a02 | ||
|
|
bb3ae1ef70 | ||
|
|
5dfa94d76e | ||
|
|
dfe63f2318 | ||
|
|
9cf64a43f5 | ||
|
|
bf2362d836 | ||
|
|
1c11523d9d | ||
|
|
150a87413d | ||
|
|
f7221a7355 | ||
|
|
4197ec198c | ||
|
|
5484b7c491 | ||
|
|
041223b558 | ||
|
|
71c04d20ef | ||
|
|
8852bac77b |
3
Makefile
3
Makefile
@@ -103,6 +103,9 @@ test-shared: ## Run shared tests
|
||||
|
||||
test-extensions: ## Run extensions tests
|
||||
@echo "running http tests"; cd tap/extensions/http && $(MAKE) test
|
||||
@echo "running redis tests"; cd tap/extensions/redis && $(MAKE) test
|
||||
@echo "running kafka tests"; cd tap/extensions/kafka && $(MAKE) test
|
||||
@echo "running amqp tests"; cd tap/extensions/amqp && $(MAKE) test
|
||||
|
||||
acceptance-test: ## Run acceptance tests
|
||||
@echo "running acceptance tests"; cd acceptanceTests && $(MAKE) test
|
||||
|
||||
@@ -14,7 +14,8 @@
|
||||
"tests/RegexMasking.js",
|
||||
"tests/IgnoredUserAgents.js",
|
||||
"tests/UiTest.js",
|
||||
"tests/Redis.js"
|
||||
"tests/Redis.js",
|
||||
"tests/Rabbit.js"
|
||||
],
|
||||
|
||||
"env": {
|
||||
|
||||
@@ -1,3 +1,9 @@
|
||||
export const valueTabs = {
|
||||
response: 'RESPONSE',
|
||||
request: 'REQUEST',
|
||||
none: null
|
||||
}
|
||||
|
||||
export function isValueExistsInElement(shouldInclude, content, domPathToContainer){
|
||||
it(`should ${shouldInclude ? '' : 'not'} include '${content}'`, function () {
|
||||
cy.get(domPathToContainer).then(htmlText => {
|
||||
@@ -53,3 +59,114 @@ export function checkThatAllEntriesShown() {
|
||||
cy.get('[title="Fetch old records"]').click();
|
||||
});
|
||||
}
|
||||
|
||||
export function checkFilterByMethod(funcDict) {
|
||||
const {protocol, method, summary} = funcDict;
|
||||
const summaryDict = getSummeryDict(summary);
|
||||
const methodDict = getMethodDict(method);
|
||||
const protocolDict = getProtocolDict(protocol.name, protocol.text);
|
||||
|
||||
it(`Testing the method: ${method}`, function () {
|
||||
// applying filter
|
||||
cy.get('.w-tc-editor-text').clear().type(`method == "${method}"`);
|
||||
cy.get('[type="submit"]').click();
|
||||
cy.get('.w-tc-editor').should('have.attr', 'style').and('include', Cypress.env('greenFilterColor'));
|
||||
|
||||
cy.get('#entries-length').then(number => {
|
||||
// if the entries list isn't expanded it expands here
|
||||
if (number.text() === '0' || number.text() === '1') // todo change when TRA-4262 is fixed
|
||||
cy.get('[title="Fetch old records"]').click();
|
||||
|
||||
cy.get('#entries-length').should('not.have.text', '0').and('not.have.text', '1').then(() => {
|
||||
cy.get(`#list [id]`).then(elements => {
|
||||
const listElmWithIdAttr = Object.values(elements);
|
||||
let doneCheckOnFirst = false;
|
||||
|
||||
listElmWithIdAttr.forEach(entry => {
|
||||
if (entry?.id && entry.id.match(RegExp(/entry-(\d{2}|\d{1})$/gm))) {
|
||||
const entryNum = getEntryNumById(entry.id);
|
||||
|
||||
leftTextCheck(entryNum, methodDict.pathLeft, methodDict.expectedText);
|
||||
leftTextCheck(entryNum, protocolDict.pathLeft, protocolDict.expectedTextLeft);
|
||||
if (summaryDict)
|
||||
leftTextCheck(entryNum, summaryDict.pathLeft, summaryDict.expectedText);
|
||||
|
||||
if (!doneCheckOnFirst) {
|
||||
deepCheck(funcDict, protocolDict, methodDict, entry);
|
||||
doneCheckOnFirst = true;
|
||||
}
|
||||
}
|
||||
});
|
||||
});
|
||||
});
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
function deepCheck(generalDict, protocolDict, methodDict, entry) {
|
||||
const entryNum = getEntryNumById(entry.id);
|
||||
const {summary, value} = generalDict;
|
||||
const summaryDict = getSummeryDict(summary);
|
||||
|
||||
leftOnHoverCheck(entryNum, methodDict.pathLeft, methodDict.expectedOnHover);
|
||||
leftOnHoverCheck(entryNum, protocolDict.pathLeft, protocolDict.expectedOnHover);
|
||||
if (summaryDict)
|
||||
leftOnHoverCheck(entryNum, summaryDict.pathLeft, summaryDict.expectedOnHover);
|
||||
|
||||
cy.get(`#${entry.id}`).click();
|
||||
|
||||
rightTextCheck(methodDict.pathRight, methodDict.expectedText);
|
||||
rightTextCheck(protocolDict.pathRight, protocolDict.expectedTextRight);
|
||||
if (summaryDict)
|
||||
rightTextCheck(summaryDict.pathRight, summaryDict.expectedText);
|
||||
|
||||
rightOnHoverCheck(methodDict.pathRight, methodDict.expectedOnHover);
|
||||
rightOnHoverCheck(protocolDict.pathRight, protocolDict.expectedOnHover);
|
||||
if (summaryDict)
|
||||
rightOnHoverCheck(summaryDict.pathRight, summaryDict.expectedOnHover);
|
||||
|
||||
if (value) {
|
||||
if (value.tab === valueTabs.response)
|
||||
cy.contains('Response').click();
|
||||
cy.get(Cypress.env('bodyJsonClass')).then(text => {
|
||||
expect(text.text()).to.match(value.regex)
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
function getSummeryDict(summary) {
|
||||
if (summary) {
|
||||
return {
|
||||
pathLeft: '> :nth-child(2) > :nth-child(1) > :nth-child(2) > :nth-child(2)',
|
||||
pathRight: '> :nth-child(2) > :nth-child(1) > :nth-child(1) > :nth-child(2) > :nth-child(2)',
|
||||
expectedText: summary,
|
||||
expectedOnHover: `summary == "${summary}"`
|
||||
};
|
||||
}
|
||||
else {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
function getMethodDict(method) {
|
||||
return {
|
||||
pathLeft: '> :nth-child(2) > :nth-child(1) > :nth-child(1) > :nth-child(2)',
|
||||
pathRight: '> :nth-child(2) > :nth-child(1) > :nth-child(1) > :nth-child(1) > :nth-child(2)',
|
||||
expectedText: method,
|
||||
expectedOnHover: `method == "${method}"`
|
||||
};
|
||||
}
|
||||
|
||||
function getProtocolDict(protocol, protocolText) {
|
||||
return {
|
||||
pathLeft: '> :nth-child(1) > :nth-child(1)',
|
||||
pathRight: '> :nth-child(1) > :nth-child(1) > :nth-child(1) > :nth-child(1)',
|
||||
expectedTextLeft: protocol.toUpperCase(),
|
||||
expectedTextRight: protocolText,
|
||||
expectedOnHover: protocol.toLowerCase()
|
||||
};
|
||||
}
|
||||
|
||||
function getEntryNumById (id) {
|
||||
return parseInt(id.split('-')[1]);
|
||||
}
|
||||
|
||||
50
acceptanceTests/cypress/integration/tests/Rabbit.js
Normal file
50
acceptanceTests/cypress/integration/tests/Rabbit.js
Normal file
@@ -0,0 +1,50 @@
|
||||
import {checkFilterByMethod, valueTabs,} from "../testHelpers/TrafficHelper";
|
||||
|
||||
|
||||
it('opening mizu', function () {
|
||||
cy.visit(Cypress.env('testUrl'));
|
||||
});
|
||||
|
||||
const rabbitProtocolDetails = {name: 'AMQP', text: 'Advanced Message Queuing Protocol 0-9-1'};
|
||||
|
||||
checkFilterByMethod({
|
||||
protocol: rabbitProtocolDetails,
|
||||
method: 'exchange declare',
|
||||
summary: 'exchange',
|
||||
value: null
|
||||
});
|
||||
|
||||
checkFilterByMethod({
|
||||
protocol: rabbitProtocolDetails,
|
||||
method: 'queue declare',
|
||||
summary: 'queue',
|
||||
value: null
|
||||
});
|
||||
|
||||
checkFilterByMethod({
|
||||
protocol: rabbitProtocolDetails,
|
||||
method: 'queue bind',
|
||||
summary: 'queue',
|
||||
value: null
|
||||
});
|
||||
|
||||
checkFilterByMethod({
|
||||
protocol: rabbitProtocolDetails,
|
||||
method: 'basic publish',
|
||||
summary: 'exchange',
|
||||
value: {tab: valueTabs.request, regex: /^message$/mg}
|
||||
});
|
||||
|
||||
checkFilterByMethod({
|
||||
protocol: rabbitProtocolDetails,
|
||||
method: 'basic consume',
|
||||
summary: 'queue',
|
||||
value: null
|
||||
});
|
||||
|
||||
checkFilterByMethod({
|
||||
protocol: rabbitProtocolDetails,
|
||||
method: 'basic deliver',
|
||||
summary: 'exchange',
|
||||
value: {tab: valueTabs.request, regex: /^message$/mg}
|
||||
});
|
||||
@@ -1,155 +1,42 @@
|
||||
import {
|
||||
leftOnHoverCheck,
|
||||
leftTextCheck,
|
||||
rightOnHoverCheck,
|
||||
rightTextCheck,
|
||||
} from "../testHelpers/TrafficHelper";
|
||||
|
||||
const valueTabs = {
|
||||
response: 'RESPONSE',
|
||||
request: 'REQUEST',
|
||||
none: null
|
||||
}
|
||||
import {checkFilterByMethod, valueTabs,} from "../testHelpers/TrafficHelper";
|
||||
|
||||
it('opening mizu', function () {
|
||||
cy.visit(Cypress.env('testUrl'));
|
||||
});
|
||||
|
||||
checkRedisFilterByMethod({
|
||||
const redisProtocolDetails = {name: 'redis', text: 'Redis Serialization Protocol'};
|
||||
|
||||
checkFilterByMethod({
|
||||
protocol: redisProtocolDetails,
|
||||
method: 'PING',
|
||||
shouldCheckSummary: false,
|
||||
valueTab: valueTabs.none
|
||||
});
|
||||
summary: null,
|
||||
value: null
|
||||
})
|
||||
|
||||
checkRedisFilterByMethod({
|
||||
checkFilterByMethod({
|
||||
protocol: redisProtocolDetails,
|
||||
method: 'SET',
|
||||
shouldCheckSummary: true,
|
||||
valueTab: valueTabs.request,
|
||||
valueRegex: /^\[value, keepttl]$/mg
|
||||
});
|
||||
summary: 'key',
|
||||
value: {tab: valueTabs.request, regex: /^\[value, keepttl]$/mg}
|
||||
})
|
||||
|
||||
checkRedisFilterByMethod({
|
||||
checkFilterByMethod({
|
||||
protocol: redisProtocolDetails,
|
||||
method: 'EXISTS',
|
||||
shouldCheckSummary: true,
|
||||
valueTab: valueTabs.response,
|
||||
valueRegex: /^1$/mg
|
||||
});
|
||||
summary: 'key',
|
||||
value: {tab: valueTabs.response, regex: /^1$/mg}
|
||||
})
|
||||
|
||||
checkRedisFilterByMethod({
|
||||
checkFilterByMethod({
|
||||
protocol: redisProtocolDetails,
|
||||
method: 'GET',
|
||||
shouldCheckSummary: true,
|
||||
valueTab: valueTabs.response,
|
||||
valueRegex: /^value$/mg
|
||||
});
|
||||
summary: 'key',
|
||||
value: {tab: valueTabs.response, regex: /^value$/mg}
|
||||
})
|
||||
|
||||
checkRedisFilterByMethod({
|
||||
checkFilterByMethod({
|
||||
protocol: redisProtocolDetails,
|
||||
method: 'DEL',
|
||||
shouldCheckSummary: true,
|
||||
valueTab: valueTabs.response,
|
||||
valueRegex: /^1$|^0$/mg
|
||||
});
|
||||
|
||||
function checkRedisFilterByMethod(funcDict) {
|
||||
const {method, shouldCheckSummary} = funcDict
|
||||
const summaryDict = getSummeryDict();
|
||||
const methodDict = getMethodDict(method);
|
||||
const protocolDict = getProtocolDict();
|
||||
|
||||
it(`Testing the method: ${method}`, function () {
|
||||
// applying filter
|
||||
cy.get('.w-tc-editor-text').clear().type(`method == "${method}"`);
|
||||
cy.get('[type="submit"]').click();
|
||||
cy.get('.w-tc-editor').should('have.attr', 'style').and('include', Cypress.env('greenFilterColor'));
|
||||
|
||||
cy.get('#entries-length').then(number => {
|
||||
// if the entries list isn't expanded it expands here
|
||||
if (number.text() === '0' || number.text() === '1') // todo change when TRA-4262 is fixed
|
||||
cy.get('[title="Fetch old records"]').click();
|
||||
|
||||
cy.get('#entries-length').should('not.have.text', '0').and('not.have.text', '1').then(() => {
|
||||
cy.get(`#list [id]`).then(elements => {
|
||||
const listElmWithIdAttr = Object.values(elements);
|
||||
let doneCheckOnFirst = false;
|
||||
|
||||
listElmWithIdAttr.forEach(entry => {
|
||||
if (entry?.id && entry.id.match(RegExp(/entry-(\d{2}|\d{1})$/gm))) {
|
||||
const entryNum = getEntryNumById(entry.id);
|
||||
|
||||
leftTextCheck(entryNum, methodDict.pathLeft, methodDict.expectedText);
|
||||
leftTextCheck(entryNum, protocolDict.pathLeft, protocolDict.expectedTextLeft);
|
||||
if (shouldCheckSummary)
|
||||
leftTextCheck(entryNum, summaryDict.pathLeft, summaryDict.expectedText);
|
||||
|
||||
if (!doneCheckOnFirst) {
|
||||
deepCheck(funcDict, protocolDict, methodDict, summaryDict, entry);
|
||||
doneCheckOnFirst = true;
|
||||
}
|
||||
}
|
||||
});
|
||||
});
|
||||
});
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
function deepCheck(generalDict, protocolDict, methodDict, summaryDict, entry) {
|
||||
const entryNum = getEntryNumById(entry.id);
|
||||
const {shouldCheckSummary, valueTab, valueRegex} = generalDict;
|
||||
|
||||
leftOnHoverCheck(entryNum, methodDict.pathLeft, methodDict.expectedOnHover);
|
||||
leftOnHoverCheck(entryNum, protocolDict.pathLeft, protocolDict.expectedOnHover);
|
||||
if (shouldCheckSummary)
|
||||
leftOnHoverCheck(entryNum, summaryDict.pathLeft, summaryDict.expectedOnHover);
|
||||
|
||||
cy.get(`#${entry.id}`).click();
|
||||
|
||||
rightTextCheck(methodDict.pathRight, methodDict.expectedText);
|
||||
rightTextCheck(protocolDict.pathRight, protocolDict.expectedTextRight);
|
||||
if (shouldCheckSummary)
|
||||
rightTextCheck(summaryDict.pathRight, summaryDict.expectedText);
|
||||
|
||||
rightOnHoverCheck(methodDict.pathRight, methodDict.expectedOnHover);
|
||||
rightOnHoverCheck(protocolDict.pathRight, protocolDict.expectedOnHover);
|
||||
if (shouldCheckSummary)
|
||||
rightOnHoverCheck(summaryDict.pathRight, summaryDict.expectedOnHover);
|
||||
|
||||
if (valueTab) {
|
||||
if (valueTab === valueTabs.response)
|
||||
cy.contains('Response').click();
|
||||
cy.get(Cypress.env('bodyJsonClass')).then(text => {
|
||||
expect(text.text()).to.match(valueRegex)
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
function getSummeryDict() {
|
||||
return {
|
||||
pathLeft: '> :nth-child(2) > :nth-child(1) > :nth-child(2) > :nth-child(2)',
|
||||
pathRight: '> :nth-child(2) > :nth-child(1) > :nth-child(1) > :nth-child(2) > :nth-child(2)',
|
||||
expectedText: 'key',
|
||||
expectedOnHover: `summary == "key"`
|
||||
};
|
||||
}
|
||||
|
||||
function getMethodDict(method) {
|
||||
return {
|
||||
pathLeft: '> :nth-child(2) > :nth-child(1) > :nth-child(1) > :nth-child(2)',
|
||||
pathRight: '> :nth-child(2) > :nth-child(1) > :nth-child(1) > :nth-child(1) > :nth-child(2)',
|
||||
expectedText: method,
|
||||
expectedOnHover: `method == "${method}"`
|
||||
};
|
||||
}
|
||||
|
||||
function getProtocolDict() {
|
||||
return {
|
||||
pathLeft: '> :nth-child(1) > :nth-child(1)',
|
||||
pathRight: '> :nth-child(1) > :nth-child(1) > :nth-child(1) > :nth-child(1)',
|
||||
expectedTextLeft: 'REDIS',
|
||||
expectedTextRight: 'Redis Serialization Protocol',
|
||||
expectedOnHover: `redis`
|
||||
};
|
||||
}
|
||||
|
||||
function getEntryNumById (id) {
|
||||
return parseInt(id.split('-')[1]);
|
||||
}
|
||||
summary: 'key',
|
||||
value: {tab: valueTabs.response, regex: /^1$|^0$/mg}
|
||||
})
|
||||
|
||||
@@ -4,8 +4,10 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/go-redis/redis/v8"
|
||||
amqp "github.com/rabbitmq/amqp091-go"
|
||||
"os/exec"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestRedis(t *testing.T) {
|
||||
@@ -99,3 +101,128 @@ func TestRedis(t *testing.T) {
|
||||
|
||||
runCypressTests(t, "npx cypress run --spec \"cypress/integration/tests/Redis.js\"")
|
||||
}
|
||||
|
||||
func TestAmqp(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("ignored acceptance test")
|
||||
}
|
||||
|
||||
cliPath, cliPathErr := getCliPath()
|
||||
if cliPathErr != nil {
|
||||
t.Errorf("failed to get cli path, err: %v", cliPathErr)
|
||||
return
|
||||
}
|
||||
|
||||
tapCmdArgs := getDefaultTapCommandArgs()
|
||||
|
||||
tapNamespace := getDefaultTapNamespace()
|
||||
tapCmdArgs = append(tapCmdArgs, tapNamespace...)
|
||||
|
||||
tapCmd := exec.Command(cliPath, tapCmdArgs...)
|
||||
t.Logf("running command: %v", tapCmd.String())
|
||||
|
||||
t.Cleanup(func() {
|
||||
if err := cleanupCommand(tapCmd); err != nil {
|
||||
t.Logf("failed to cleanup tap command, err: %v", err)
|
||||
}
|
||||
})
|
||||
|
||||
if err := tapCmd.Start(); err != nil {
|
||||
t.Errorf("failed to start tap command, err: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
apiServerUrl := getApiServerUrl(defaultApiServerPort)
|
||||
|
||||
if err := waitTapPodsReady(apiServerUrl); err != nil {
|
||||
t.Errorf("failed to start tap pods on time, err: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
rabbitmqExternalIp, err := getServiceExternalIp(ctx, defaultNamespaceName, "rabbitmq")
|
||||
if err != nil {
|
||||
t.Errorf("failed to get RabbitMQ external ip, err: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
conn, err := amqp.Dial(fmt.Sprintf("amqp://guest:guest@%v:5672/", rabbitmqExternalIp))
|
||||
if err != nil {
|
||||
t.Errorf("failed to connect to RabbitMQ, err: %v", err)
|
||||
return
|
||||
}
|
||||
defer conn.Close()
|
||||
|
||||
// Temporary fix for missing amqp entries
|
||||
time.Sleep(5 * time.Second)
|
||||
|
||||
for i := 0; i < defaultEntriesCount/5; i++ {
|
||||
ch, err := conn.Channel()
|
||||
if err != nil {
|
||||
t.Errorf("failed to open a channel, err: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
exchangeName := "exchange"
|
||||
err = ch.ExchangeDeclare(exchangeName, "direct", true, false, false, false, nil)
|
||||
if err != nil {
|
||||
t.Errorf("failed to declare an exchange, err: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
q, err := ch.QueueDeclare("queue", true, false, false, false, nil)
|
||||
if err != nil {
|
||||
t.Errorf("failed to declare a queue, err: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
routingKey := "routing_key"
|
||||
err = ch.QueueBind(q.Name, routingKey, exchangeName, false, nil)
|
||||
if err != nil {
|
||||
t.Errorf("failed to bind the queue, err: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
err = ch.Publish(exchangeName, routingKey, false, false,
|
||||
amqp.Publishing{
|
||||
DeliveryMode: amqp.Persistent,
|
||||
ContentType: "text/plain",
|
||||
Body: []byte("message"),
|
||||
})
|
||||
if err != nil {
|
||||
t.Errorf("failed to publish a message, err: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
msgChan, err := ch.Consume(q.Name, "Consumer", true, false, false, false, nil)
|
||||
if err != nil {
|
||||
t.Errorf("failed to create a consumer, err: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
select {
|
||||
case <-msgChan:
|
||||
break
|
||||
case <-time.After(3 * time.Second):
|
||||
t.Errorf("failed to consume a message on time")
|
||||
return
|
||||
}
|
||||
|
||||
err = ch.ExchangeDelete(exchangeName, false, false)
|
||||
if err != nil {
|
||||
t.Errorf("failed to delete the exchange, err: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
_, err = ch.QueueDelete(q.Name, false, false, false)
|
||||
if err != nil {
|
||||
t.Errorf("failed to delete the queue, err: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
ch.Close()
|
||||
}
|
||||
|
||||
runCypressTests(t, "npx cypress run --spec \"cypress/integration/tests/Rabbit.js\"")
|
||||
}
|
||||
|
||||
@@ -4,6 +4,7 @@ go 1.17
|
||||
|
||||
require (
|
||||
github.com/go-redis/redis/v8 v8.11.4
|
||||
github.com/rabbitmq/amqp091-go v1.3.0
|
||||
github.com/up9inc/mizu/shared v0.0.0
|
||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b
|
||||
k8s.io/apimachinery v0.23.3
|
||||
|
||||
@@ -427,6 +427,8 @@ github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsT
|
||||
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
|
||||
github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
|
||||
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
|
||||
github.com/rabbitmq/amqp091-go v1.3.0 h1:A/QuHiNw7LMCJsxx9iZn5lrIz6OrhIn7Dfk5/1YatWM=
|
||||
github.com/rabbitmq/amqp091-go v1.3.0/go.mod h1:ogQDLSOACsLPsIq0NpbtiifNZi2YOz0VTJ0kHRghqbM=
|
||||
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
|
||||
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||
github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
|
||||
|
||||
@@ -39,6 +39,9 @@ kubectl create deployment httpbin --image=kennethreitz/httpbin -n mizu-tests2
|
||||
echo "Creating redis deployment"
|
||||
kubectl create deployment redis --image=redis -n mizu-tests
|
||||
|
||||
echo "Creating rabbitmq deployment"
|
||||
kubectl create deployment rabbitmq --image=rabbitmq -n mizu-tests
|
||||
|
||||
echo "Creating httpbin services"
|
||||
kubectl expose deployment httpbin --type=NodePort --port=80 -n mizu-tests
|
||||
kubectl expose deployment httpbin2 --type=NodePort --port=80 -n mizu-tests
|
||||
@@ -48,6 +51,9 @@ kubectl expose deployment httpbin --type=NodePort --port=80 -n mizu-tests2
|
||||
echo "Creating redis service"
|
||||
kubectl expose deployment redis --type=LoadBalancer --port=6379 -n mizu-tests
|
||||
|
||||
echo "Creating rabbitmq service"
|
||||
kubectl expose deployment rabbitmq --type=LoadBalancer --port=5672 -n mizu-tests
|
||||
|
||||
echo "Starting proxy"
|
||||
kubectl proxy --port=8080 &
|
||||
|
||||
|
||||
@@ -118,8 +118,8 @@ func startReadingChannel(outputItems <-chan *tapApi.OutputChannelItem, extension
|
||||
|
||||
for item := range outputItems {
|
||||
extension := extensionsMap[item.Protocol.Name]
|
||||
resolvedSource, resolvedDestionation := resolveIP(item.ConnectionInfo)
|
||||
mizuEntry := extension.Dissector.Analyze(item, resolvedSource, resolvedDestionation)
|
||||
resolvedSource, resolvedDestionation, namespace := resolveIP(item.ConnectionInfo)
|
||||
mizuEntry := extension.Dissector.Analyze(item, resolvedSource, resolvedDestionation, namespace)
|
||||
if extension.Protocol.Name == "http" {
|
||||
if !disableOASValidation {
|
||||
var httpPair tapApi.HTTPRequestResponsePair
|
||||
@@ -158,26 +158,32 @@ func startReadingChannel(outputItems <-chan *tapApi.OutputChannelItem, extension
|
||||
}
|
||||
}
|
||||
|
||||
func resolveIP(connectionInfo *tapApi.ConnectionInfo) (resolvedSource string, resolvedDestination string) {
|
||||
func resolveIP(connectionInfo *tapApi.ConnectionInfo) (resolvedSource string, resolvedDestination string, namespace string) {
|
||||
if k8sResolver != nil {
|
||||
unresolvedSource := connectionInfo.ClientIP
|
||||
resolvedSource = k8sResolver.Resolve(unresolvedSource)
|
||||
if resolvedSource == "" {
|
||||
resolvedSourceObject := k8sResolver.Resolve(unresolvedSource)
|
||||
if resolvedSourceObject == nil {
|
||||
logger.Log.Debugf("Cannot find resolved name to source: %s", unresolvedSource)
|
||||
if os.Getenv("SKIP_NOT_RESOLVED_SOURCE") == "1" {
|
||||
return
|
||||
}
|
||||
} else {
|
||||
resolvedSource = resolvedSourceObject.FullAddress
|
||||
}
|
||||
|
||||
unresolvedDestination := fmt.Sprintf("%s:%s", connectionInfo.ServerIP, connectionInfo.ServerPort)
|
||||
resolvedDestination = k8sResolver.Resolve(unresolvedDestination)
|
||||
if resolvedDestination == "" {
|
||||
resolvedDestinationObject := k8sResolver.Resolve(unresolvedDestination)
|
||||
if resolvedDestinationObject == nil {
|
||||
logger.Log.Debugf("Cannot find resolved name to dest: %s", unresolvedDestination)
|
||||
if os.Getenv("SKIP_NOT_RESOLVED_DEST") == "1" {
|
||||
return
|
||||
}
|
||||
} else {
|
||||
resolvedDestination = resolvedDestinationObject.FullAddress
|
||||
namespace = resolvedDestinationObject.Namespace
|
||||
}
|
||||
}
|
||||
return resolvedSource, resolvedDestination
|
||||
return resolvedSource, resolvedDestination, namespace
|
||||
}
|
||||
|
||||
func CheckIsServiceIP(address string) bool {
|
||||
|
||||
@@ -104,9 +104,9 @@ func (h *RoutesEventHandlers) WebSocketMessage(_ int, message []byte) {
|
||||
}
|
||||
|
||||
func handleTLSLink(outboundLinkMessage models.WebsocketOutboundLinkMessage) {
|
||||
resolvedName := k8sResolver.Resolve(outboundLinkMessage.Data.DstIP)
|
||||
if resolvedName != "" {
|
||||
outboundLinkMessage.Data.DstIP = resolvedName
|
||||
resolvedNameObject := k8sResolver.Resolve(outboundLinkMessage.Data.DstIP)
|
||||
if resolvedNameObject != nil {
|
||||
outboundLinkMessage.Data.DstIP = resolvedNameObject.FullAddress
|
||||
} else if outboundLinkMessage.Data.SuggestedResolvedName != "" {
|
||||
outboundLinkMessage.Data.DstIP = outboundLinkMessage.Data.SuggestedResolvedName
|
||||
}
|
||||
|
||||
@@ -102,13 +102,13 @@ func (s *ServiceMapControllerSuite) TestGet() {
|
||||
// response nodes
|
||||
aNode := servicemap.ServiceMapNode{
|
||||
Id: 1,
|
||||
Name: TCPEntryA.IP,
|
||||
Name: TCPEntryA.Name,
|
||||
Entry: TCPEntryA,
|
||||
Count: 1,
|
||||
}
|
||||
bNode := servicemap.ServiceMapNode{
|
||||
Id: 2,
|
||||
Name: TCPEntryB.IP,
|
||||
Name: TCPEntryB.Name,
|
||||
Entry: TCPEntryB,
|
||||
Count: 1,
|
||||
}
|
||||
|
||||
@@ -7,7 +7,7 @@ func CORSMiddleware() gin.HandlerFunc {
|
||||
c.Writer.Header().Set("Access-Control-Allow-Origin", "*")
|
||||
c.Writer.Header().Set("Access-Control-Allow-Credentials", "true")
|
||||
c.Writer.Header().Set("Access-Control-Allow-Headers", "Content-Type, Content-Length, Accept-Encoding, X-CSRF-Token, Authorization, accept, origin, Cache-Control, X-Requested-With, x-session-token")
|
||||
c.Writer.Header().Set("Access-Control-Allow-Methods", "POST, OPTIONS, GET, PUT")
|
||||
c.Writer.Header().Set("Access-Control-Allow-Methods", "POST, OPTIONS, GET, PUT, DELETE")
|
||||
|
||||
if c.Request.Method == "OPTIONS" {
|
||||
c.AbortWithStatus(204)
|
||||
|
||||
@@ -30,6 +30,11 @@ type Resolver struct {
|
||||
namespace string
|
||||
}
|
||||
|
||||
type ResolvedObjectInfo struct {
|
||||
FullAddress string
|
||||
Namespace string
|
||||
}
|
||||
|
||||
func (resolver *Resolver) Start(ctx context.Context) {
|
||||
if !resolver.isStarted {
|
||||
resolver.isStarted = true
|
||||
@@ -40,12 +45,12 @@ func (resolver *Resolver) Start(ctx context.Context) {
|
||||
}
|
||||
}
|
||||
|
||||
func (resolver *Resolver) Resolve(name string) string {
|
||||
func (resolver *Resolver) Resolve(name string) *ResolvedObjectInfo {
|
||||
resolvedName, isFound := resolver.nameMap.Get(name)
|
||||
if !isFound {
|
||||
return ""
|
||||
return nil
|
||||
}
|
||||
return resolvedName.(string)
|
||||
return resolvedName.(*ResolvedObjectInfo)
|
||||
}
|
||||
|
||||
func (resolver *Resolver) GetMap() cmap.ConcurrentMap {
|
||||
@@ -71,7 +76,7 @@ func (resolver *Resolver) watchPods(ctx context.Context) error {
|
||||
}
|
||||
if event.Type == watch.Deleted {
|
||||
pod := event.Object.(*corev1.Pod)
|
||||
resolver.saveResolvedName(pod.Status.PodIP, "", event.Type)
|
||||
resolver.saveResolvedName(pod.Status.PodIP, "", pod.Namespace, event.Type)
|
||||
}
|
||||
case <-ctx.Done():
|
||||
watcher.Stop()
|
||||
@@ -106,10 +111,10 @@ func (resolver *Resolver) watchEndpoints(ctx context.Context) error {
|
||||
}
|
||||
if subset.Addresses != nil {
|
||||
for _, address := range subset.Addresses {
|
||||
resolver.saveResolvedName(address.IP, serviceHostname, event.Type)
|
||||
resolver.saveResolvedName(address.IP, serviceHostname, endpoint.Namespace, event.Type)
|
||||
for _, port := range ports {
|
||||
ipWithPort := fmt.Sprintf("%s:%d", address.IP, port)
|
||||
resolver.saveResolvedName(ipWithPort, serviceHostname, event.Type)
|
||||
resolver.saveResolvedName(ipWithPort, serviceHostname, endpoint.Namespace, event.Type)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -139,19 +144,19 @@ func (resolver *Resolver) watchServices(ctx context.Context) error {
|
||||
service := event.Object.(*corev1.Service)
|
||||
serviceHostname := fmt.Sprintf("%s.%s", service.Name, service.Namespace)
|
||||
if service.Spec.ClusterIP != "" && service.Spec.ClusterIP != kubClientNullString {
|
||||
resolver.saveResolvedName(service.Spec.ClusterIP, serviceHostname, event.Type)
|
||||
resolver.saveResolvedName(service.Spec.ClusterIP, serviceHostname, service.Namespace, event.Type)
|
||||
if service.Spec.Ports != nil {
|
||||
for _, port := range service.Spec.Ports {
|
||||
if port.Port > 0 {
|
||||
resolver.saveResolvedName(fmt.Sprintf("%s:%d", service.Spec.ClusterIP, port.Port), serviceHostname, event.Type)
|
||||
resolver.saveResolvedName(fmt.Sprintf("%s:%d", service.Spec.ClusterIP, port.Port), serviceHostname, service.Namespace, event.Type)
|
||||
}
|
||||
}
|
||||
}
|
||||
resolver.saveServiceIP(service.Spec.ClusterIP, serviceHostname, event.Type)
|
||||
resolver.saveServiceIP(service.Spec.ClusterIP, serviceHostname, service.Namespace, event.Type)
|
||||
}
|
||||
if service.Status.LoadBalancer.Ingress != nil {
|
||||
for _, ingress := range service.Status.LoadBalancer.Ingress {
|
||||
resolver.saveResolvedName(ingress.IP, serviceHostname, event.Type)
|
||||
resolver.saveResolvedName(ingress.IP, serviceHostname, service.Namespace, event.Type)
|
||||
}
|
||||
}
|
||||
case <-ctx.Done():
|
||||
@@ -161,21 +166,22 @@ func (resolver *Resolver) watchServices(ctx context.Context) error {
|
||||
}
|
||||
}
|
||||
|
||||
func (resolver *Resolver) saveResolvedName(key string, resolved string, eventType watch.EventType) {
|
||||
func (resolver *Resolver) saveResolvedName(key string, resolved string, namespace string, eventType watch.EventType) {
|
||||
if eventType == watch.Deleted {
|
||||
resolver.nameMap.Remove(key)
|
||||
logger.Log.Infof("setting %s=nil", key)
|
||||
} else {
|
||||
resolver.nameMap.Set(key, resolved)
|
||||
|
||||
resolver.nameMap.Set(key, &ResolvedObjectInfo{FullAddress: resolved, Namespace: namespace})
|
||||
logger.Log.Infof("setting %s=%s", key, resolved)
|
||||
}
|
||||
}
|
||||
|
||||
func (resolver *Resolver) saveServiceIP(key string, resolved string, eventType watch.EventType) {
|
||||
func (resolver *Resolver) saveServiceIP(key string, resolved string, namespace string, eventType watch.EventType) {
|
||||
if eventType == watch.Deleted {
|
||||
resolver.serviceMap.Remove(key)
|
||||
} else {
|
||||
resolver.serviceMap.Set(key, resolved)
|
||||
resolver.nameMap.Set(key, &ResolvedObjectInfo{FullAddress: resolved, Namespace: namespace})
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -172,20 +172,33 @@ func (s *serviceMap) NewTCPEntry(src *tapApi.TCP, dst *tapApi.TCP, p *tapApi.Pro
|
||||
return
|
||||
}
|
||||
|
||||
srcEntry := &entryData{
|
||||
key: key(src.IP),
|
||||
entry: src,
|
||||
}
|
||||
if len(srcEntry.entry.Name) == 0 {
|
||||
var srcEntry *entryData
|
||||
var dstEntry *entryData
|
||||
|
||||
if len(src.Name) == 0 {
|
||||
srcEntry = &entryData{
|
||||
key: key(src.IP),
|
||||
entry: src,
|
||||
}
|
||||
srcEntry.entry.Name = UnresolvedNodeName
|
||||
} else {
|
||||
srcEntry = &entryData{
|
||||
key: key(src.Name),
|
||||
entry: src,
|
||||
}
|
||||
}
|
||||
|
||||
dstEntry := &entryData{
|
||||
key: key(dst.IP),
|
||||
entry: dst,
|
||||
}
|
||||
if len(dstEntry.entry.Name) == 0 {
|
||||
if len(dst.Name) == 0 {
|
||||
dstEntry = &entryData{
|
||||
key: key(dst.IP),
|
||||
entry: dst,
|
||||
}
|
||||
dstEntry.entry.Name = UnresolvedNodeName
|
||||
} else {
|
||||
dstEntry = &entryData{
|
||||
key: key(dst.Name),
|
||||
entry: dst,
|
||||
}
|
||||
}
|
||||
|
||||
s.addEdge(srcEntry, dstEntry, p)
|
||||
|
||||
@@ -268,9 +268,14 @@ func (s *ServiceMapEnabledSuite) TestServiceMap() {
|
||||
assert.LessOrEqual(node.Id, expectedNodeCount)
|
||||
|
||||
// entry
|
||||
// node.Name is the key of the node, key = entry.IP
|
||||
// node.Name is the key of the node, key = entry.Name by default
|
||||
// entry.Name is the name of the service and could be unresolved
|
||||
assert.Equal(node.Name, node.Entry.IP)
|
||||
// when entry.Name is unresolved, key = entry.IP
|
||||
if node.Entry.Name == UnresolvedNodeName {
|
||||
assert.Equal(node.Name, node.Entry.IP)
|
||||
} else {
|
||||
assert.Equal(node.Name, node.Entry.Name)
|
||||
}
|
||||
assert.Equal(Port, node.Entry.Port)
|
||||
assert.Equal(entryName, node.Entry.Name)
|
||||
|
||||
@@ -320,16 +325,24 @@ func (s *ServiceMapEnabledSuite) TestServiceMap() {
|
||||
cdEdge := -1
|
||||
acEdge := -1
|
||||
var validateEdge = func(edge ServiceMapEdge, sourceEntryName string, destEntryName string, protocolName string, protocolCount int) {
|
||||
// source
|
||||
// source node
|
||||
assert.Contains(nodeIds, edge.Source.Id)
|
||||
assert.LessOrEqual(edge.Source.Id, expectedNodeCount)
|
||||
assert.Equal(edge.Source.Name, edge.Source.Entry.IP)
|
||||
if edge.Source.Entry.Name == UnresolvedNodeName {
|
||||
assert.Equal(edge.Source.Name, edge.Source.Entry.IP)
|
||||
} else {
|
||||
assert.Equal(edge.Source.Name, edge.Source.Entry.Name)
|
||||
}
|
||||
assert.Equal(sourceEntryName, edge.Source.Entry.Name)
|
||||
|
||||
// destination
|
||||
// destination node
|
||||
assert.Contains(nodeIds, edge.Destination.Id)
|
||||
assert.LessOrEqual(edge.Destination.Id, expectedNodeCount)
|
||||
assert.Equal(edge.Destination.Name, edge.Destination.Entry.IP)
|
||||
if edge.Destination.Entry.Name == UnresolvedNodeName {
|
||||
assert.Equal(edge.Destination.Name, edge.Destination.Entry.IP)
|
||||
} else {
|
||||
assert.Equal(edge.Destination.Name, edge.Destination.Entry.Name)
|
||||
}
|
||||
assert.Equal(destEntryName, edge.Destination.Entry.Name)
|
||||
|
||||
// protocol
|
||||
|
||||
@@ -4,9 +4,11 @@ import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/up9inc/mizu/shared/kubernetes"
|
||||
@@ -57,10 +59,8 @@ func (provider *Provider) TestConnection() error {
|
||||
|
||||
func (provider *Provider) isReachable() (bool, error) {
|
||||
echoUrl := fmt.Sprintf("%s/echo", provider.url)
|
||||
if response, err := provider.client.Get(echoUrl); err != nil {
|
||||
if _, err := provider.get(echoUrl); err != nil {
|
||||
return false, err
|
||||
} else if response.StatusCode != 200 {
|
||||
return false, fmt.Errorf("invalid status code %v", response.StatusCode)
|
||||
} else {
|
||||
return true, nil
|
||||
}
|
||||
@@ -72,10 +72,8 @@ func (provider *Provider) ReportTapperStatus(tapperStatus shared.TapperStatus) e
|
||||
if jsonValue, err := json.Marshal(tapperStatus); err != nil {
|
||||
return fmt.Errorf("failed Marshal the tapper status %w", err)
|
||||
} else {
|
||||
if response, err := provider.client.Post(tapperStatusUrl, "application/json", bytes.NewBuffer(jsonValue)); err != nil {
|
||||
if _, err := provider.post(tapperStatusUrl, "application/json", bytes.NewBuffer(jsonValue)); err != nil {
|
||||
return fmt.Errorf("failed sending to API server the tapped pods %w", err)
|
||||
} else if response.StatusCode != 200 {
|
||||
return fmt.Errorf("failed sending to API server the tapper status, response status code %v", response.StatusCode)
|
||||
} else {
|
||||
logger.Log.Debugf("Reported to server API about tapper status: %v", tapperStatus)
|
||||
return nil
|
||||
@@ -91,10 +89,8 @@ func (provider *Provider) ReportTappedPods(pods []core.Pod) error {
|
||||
if jsonValue, err := json.Marshal(podInfos); err != nil {
|
||||
return fmt.Errorf("failed Marshal the tapped pods %w", err)
|
||||
} else {
|
||||
if response, err := provider.client.Post(tappedPodsUrl, "application/json", bytes.NewBuffer(jsonValue)); err != nil {
|
||||
if _, err := provider.post(tappedPodsUrl, "application/json", bytes.NewBuffer(jsonValue)); err != nil {
|
||||
return fmt.Errorf("failed sending to API server the tapped pods %w", err)
|
||||
} else if response.StatusCode != 200 {
|
||||
return fmt.Errorf("failed sending to API server the tapped pods, response status code %v", response.StatusCode)
|
||||
} else {
|
||||
logger.Log.Debugf("Reported to server API about %d taped pods successfully", len(podInfos))
|
||||
return nil
|
||||
@@ -105,11 +101,9 @@ func (provider *Provider) ReportTappedPods(pods []core.Pod) error {
|
||||
func (provider *Provider) GetGeneralStats() (map[string]interface{}, error) {
|
||||
generalStatsUrl := fmt.Sprintf("%s/status/general", provider.url)
|
||||
|
||||
response, requestErr := provider.client.Get(generalStatsUrl)
|
||||
response, requestErr := provider.get(generalStatsUrl)
|
||||
if requestErr != nil {
|
||||
return nil, fmt.Errorf("failed to get general stats for telemetry, err: %w", requestErr)
|
||||
} else if response.StatusCode != 200 {
|
||||
return nil, fmt.Errorf("failed to get general stats for telemetry, status code: %v", response.StatusCode)
|
||||
}
|
||||
|
||||
defer response.Body.Close()
|
||||
@@ -132,7 +126,7 @@ func (provider *Provider) GetVersion() (string, error) {
|
||||
Method: http.MethodGet,
|
||||
URL: versionUrl,
|
||||
}
|
||||
statusResp, err := provider.client.Do(req)
|
||||
statusResp, err := provider.do(req)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
@@ -145,3 +139,40 @@ func (provider *Provider) GetVersion() (string, error) {
|
||||
|
||||
return versionResponse.Ver, nil
|
||||
}
|
||||
|
||||
// When err is nil, resp always contains a non-nil resp.Body.
|
||||
// Caller should close resp.Body when done reading from it.
|
||||
func (provider *Provider) get(url string) (*http.Response, error) {
|
||||
return provider.checkError(provider.client.Get(url))
|
||||
}
|
||||
|
||||
// When err is nil, resp always contains a non-nil resp.Body.
|
||||
// Caller should close resp.Body when done reading from it.
|
||||
func (provider *Provider) post(url, contentType string, body io.Reader) (*http.Response, error) {
|
||||
return provider.checkError(provider.client.Post(url, contentType, body))
|
||||
}
|
||||
|
||||
// When err is nil, resp always contains a non-nil resp.Body.
|
||||
// Caller should close resp.Body when done reading from it.
|
||||
func (provider *Provider) do(req *http.Request) (*http.Response, error) {
|
||||
return provider.checkError(provider.client.Do(req))
|
||||
}
|
||||
|
||||
func (provider *Provider) checkError(response *http.Response, errInOperation error) (*http.Response, error) {
|
||||
if (errInOperation != nil) {
|
||||
return response, errInOperation
|
||||
// Check only if status != 200 (and not status >= 300). Agent APIs return only 200 on success.
|
||||
} else if response.StatusCode != http.StatusOK {
|
||||
body, err := ioutil.ReadAll(response.Body)
|
||||
response.Body.Close()
|
||||
response.Body = io.NopCloser(bytes.NewBuffer(body)) // rewind
|
||||
if err != nil {
|
||||
return response, err
|
||||
}
|
||||
|
||||
errorMsg := strings.ReplaceAll((string(body)), "\n", ";")
|
||||
return response, fmt.Errorf("got response with status code: %d, body: %s", response.StatusCode, errorMsg)
|
||||
}
|
||||
|
||||
return response, nil
|
||||
}
|
||||
|
||||
@@ -1,10 +1,9 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/up9inc/mizu/cli/config"
|
||||
"github.com/up9inc/mizu/cli/telemetry"
|
||||
"github.com/up9inc/mizu/shared/logger"
|
||||
)
|
||||
|
||||
var installCmd = &cobra.Command{
|
||||
@@ -12,13 +11,13 @@ var installCmd = &cobra.Command{
|
||||
Short: "Installs mizu components",
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
go telemetry.ReportRun("install", nil)
|
||||
runMizuInstall()
|
||||
return nil
|
||||
},
|
||||
PreRunE: func(cmd *cobra.Command, args []string) error {
|
||||
if config.Config.IsNsRestrictedMode() {
|
||||
return fmt.Errorf("install is not supported in restricted namespace mode")
|
||||
}
|
||||
logger.Log.Infof("This command has been deprecated, please use helm as described below.\n\n")
|
||||
|
||||
logger.Log.Infof("To install stable build of Mizu on your cluster using helm, run the following command:")
|
||||
logger.Log.Infof(" helm install mizu https://static.up9.com/mizu/helm --namespace=mizu --create-namespace\n\n")
|
||||
|
||||
logger.Log.Infof("To install development build of Mizu on your cluster using helm, run the following command:")
|
||||
logger.Log.Infof(" helm install mizu https://static.up9.com/mizu/helm-develop --namespace=mizu --create-namespace")
|
||||
|
||||
return nil
|
||||
},
|
||||
@@ -27,4 +26,3 @@ var installCmd = &cobra.Command{
|
||||
func init() {
|
||||
rootCmd.AddCommand(installCmd)
|
||||
}
|
||||
|
||||
|
||||
@@ -1,81 +0,0 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/creasty/defaults"
|
||||
"github.com/up9inc/mizu/cli/config"
|
||||
"github.com/up9inc/mizu/cli/errormessage"
|
||||
"github.com/up9inc/mizu/cli/resources"
|
||||
"github.com/up9inc/mizu/cli/uiUtils"
|
||||
"github.com/up9inc/mizu/shared"
|
||||
"github.com/up9inc/mizu/shared/logger"
|
||||
k8serrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
func runMizuInstall() {
|
||||
kubernetesProvider, err := getKubernetesProviderForCli()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel() // cancel will be called when this function exits
|
||||
|
||||
var serializedValidationRules string
|
||||
var serializedContract string
|
||||
|
||||
var defaultMaxEntriesDBSizeBytes int64 = 200 * 1000 * 1000
|
||||
|
||||
defaultResources := shared.Resources{}
|
||||
if err := defaults.Set(&defaultResources); err != nil {
|
||||
logger.Log.Debug(err)
|
||||
}
|
||||
|
||||
mizuAgentConfig := getInstallMizuAgentConfig(defaultMaxEntriesDBSizeBytes, defaultResources)
|
||||
serializedMizuConfig, err := getSerializedMizuAgentConfig(mizuAgentConfig)
|
||||
if err != nil {
|
||||
logger.Log.Errorf(uiUtils.Error, fmt.Sprintf("Error serializing mizu config: %v", errormessage.FormatError(err)))
|
||||
return
|
||||
}
|
||||
|
||||
if err = resources.CreateInstallMizuResources(ctx, kubernetesProvider, serializedValidationRules,
|
||||
serializedContract, serializedMizuConfig, config.Config.IsNsRestrictedMode(),
|
||||
config.Config.MizuResourcesNamespace, config.Config.AgentImage,
|
||||
config.Config.KratosImage, config.Config.KetoImage,
|
||||
nil, defaultMaxEntriesDBSizeBytes, defaultResources, config.Config.ImagePullPolicy(),
|
||||
config.Config.LogLevel(), false); err != nil {
|
||||
var statusError *k8serrors.StatusError
|
||||
if errors.As(err, &statusError) && (statusError.ErrStatus.Reason == metav1.StatusReasonAlreadyExists) {
|
||||
logger.Log.Info("Mizu is already running in this namespace, run `mizu clean` to remove the currently running Mizu instance")
|
||||
} else {
|
||||
defer resources.CleanUpMizuResources(ctx, cancel, kubernetesProvider, config.Config.IsNsRestrictedMode(), config.Config.MizuResourcesNamespace)
|
||||
logger.Log.Errorf(uiUtils.Error, fmt.Sprintf("Error creating resources: %v", errormessage.FormatError(err)))
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
logger.Log.Infof(uiUtils.Magenta, "Installation completed, run `mizu view` to connect to the mizu daemon instance")
|
||||
}
|
||||
|
||||
func getInstallMizuAgentConfig(maxDBSizeBytes int64, tapperResources shared.Resources) *shared.MizuAgentConfig {
|
||||
mizuAgentConfig := shared.MizuAgentConfig{
|
||||
MaxDBSizeBytes: maxDBSizeBytes,
|
||||
AgentImage: config.Config.AgentImage,
|
||||
PullPolicy: config.Config.ImagePullPolicyStr,
|
||||
LogLevel: config.Config.LogLevel(),
|
||||
TapperResources: tapperResources,
|
||||
MizuResourcesNamespace: config.Config.MizuResourcesNamespace,
|
||||
AgentDatabasePath: shared.DataDirPath,
|
||||
StandaloneMode: true,
|
||||
ServiceMap: config.Config.ServiceMap,
|
||||
OAS: config.Config.OAS,
|
||||
Elastic: config.Config.Elastic,
|
||||
}
|
||||
|
||||
return &mizuAgentConfig
|
||||
}
|
||||
@@ -162,6 +162,7 @@ func getTapMizuAgentConfig() *shared.MizuAgentConfig {
|
||||
AgentDatabasePath: shared.DataDirPath,
|
||||
ServiceMap: config.Config.ServiceMap,
|
||||
OAS: config.Config.OAS,
|
||||
Telemetry: config.Config.Telemetry,
|
||||
Elastic: config.Config.Elastic,
|
||||
}
|
||||
|
||||
|
||||
@@ -3,13 +3,13 @@ package cmd
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/up9inc/mizu/cli/utils"
|
||||
"net/http"
|
||||
|
||||
"github.com/up9inc/mizu/cli/utils"
|
||||
|
||||
"github.com/up9inc/mizu/cli/apiserver"
|
||||
"github.com/up9inc/mizu/cli/config"
|
||||
"github.com/up9inc/mizu/cli/mizu/fsUtils"
|
||||
"github.com/up9inc/mizu/cli/mizu/version"
|
||||
"github.com/up9inc/mizu/cli/uiUtils"
|
||||
"github.com/up9inc/mizu/shared/kubernetes"
|
||||
"github.com/up9inc/mizu/shared/logger"
|
||||
@@ -62,14 +62,5 @@ func runMizuView() {
|
||||
uiUtils.OpenBrowser(url)
|
||||
}
|
||||
|
||||
if isCompatible, err := version.CheckVersionCompatibility(apiServerProvider); err != nil {
|
||||
logger.Log.Errorf("Failed to check versions compatibility %v", err)
|
||||
cancel()
|
||||
return
|
||||
} else if !isCompatible {
|
||||
cancel()
|
||||
return
|
||||
}
|
||||
|
||||
utils.WaitForFinish(ctx, cancel)
|
||||
}
|
||||
|
||||
@@ -38,7 +38,7 @@ type ConfigStruct struct {
|
||||
ConfigFilePath string `yaml:"config-path,omitempty" readonly:""`
|
||||
HeadlessMode bool `yaml:"headless" default:"false"`
|
||||
LogLevelStr string `yaml:"log-level,omitempty" default:"INFO" readonly:""`
|
||||
ServiceMap bool `yaml:"service-map" default:"false"`
|
||||
ServiceMap bool `yaml:"service-map" default:"true"`
|
||||
OAS bool `yaml:"oas,omitempty" default:"false" readonly:""`
|
||||
Elastic shared.ElasticConfig `yaml:"elastic"`
|
||||
}
|
||||
|
||||
@@ -76,6 +76,8 @@ func NewProvider(kubeConfigPath string) (*Provider, error) {
|
||||
"you can set alternative kube config file path by adding the kube-config-path field to the mizu config file, err: %w", kubeConfigPath, err)
|
||||
}
|
||||
|
||||
logger.Log.Debugf("K8s client config, host: %s, api path: %s, user agent: %s", restClientConfig.Host, restClientConfig.APIPath, restClientConfig.UserAgent)
|
||||
|
||||
return &Provider{
|
||||
clientSet: clientSet,
|
||||
kubernetesConfig: kubernetesConfig,
|
||||
@@ -952,6 +954,11 @@ func (provider *Provider) ApplyMizuTapperDaemonSet(ctx context.Context, namespac
|
||||
labelSelector := applyconfmeta.LabelSelector()
|
||||
labelSelector.WithMatchLabels(map[string]string{"app": tapperPodName})
|
||||
|
||||
applyOptions := metav1.ApplyOptions{
|
||||
Force: true,
|
||||
FieldManager: fieldManagerName,
|
||||
}
|
||||
|
||||
daemonSet := applyconfapp.DaemonSet(daemonSetName, namespace)
|
||||
daemonSet.
|
||||
WithLabels(map[string]string{
|
||||
@@ -960,7 +967,7 @@ func (provider *Provider) ApplyMizuTapperDaemonSet(ctx context.Context, namespac
|
||||
}).
|
||||
WithSpec(applyconfapp.DaemonSetSpec().WithSelector(labelSelector).WithTemplate(podTemplate))
|
||||
|
||||
_, err = provider.clientSet.AppsV1().DaemonSets(namespace).Apply(ctx, daemonSet, metav1.ApplyOptions{FieldManager: fieldManagerName})
|
||||
_, err = provider.clientSet.AppsV1().DaemonSets(namespace).Apply(ctx, daemonSet, applyOptions)
|
||||
return err
|
||||
}
|
||||
|
||||
|
||||
@@ -128,9 +128,14 @@ func getHttpDialer(kubernetesProvider *Provider, namespace string, podName strin
|
||||
return nil, err
|
||||
}
|
||||
|
||||
path := fmt.Sprintf("/api/v1/namespaces/%s/pods/%s/portforward", namespace, podName)
|
||||
hostIP := strings.TrimLeft(kubernetesProvider.clientConfig.Host, "htps:/") // no need specify "t" twice
|
||||
serverURL := url.URL{Scheme: "https", Path: path, Host: hostIP}
|
||||
clientConfigHostUrl, err := url.Parse(kubernetesProvider.clientConfig.Host)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed parsing client config host URL %s, error %w", kubernetesProvider.clientConfig.Host, err)
|
||||
}
|
||||
path := fmt.Sprintf("%s/api/v1/namespaces/%s/pods/%s/portforward", clientConfigHostUrl.Path, namespace, podName)
|
||||
|
||||
serverURL := url.URL{Scheme: "https", Path: path, Host: clientConfigHostUrl.Host}
|
||||
logger.Log.Debugf("Http dialer url %v", serverURL)
|
||||
|
||||
return spdy.NewDialer(upgrader, &http.Client{Transport: roundTripper}, http.MethodPost, &serverURL), nil
|
||||
}
|
||||
|
||||
@@ -43,6 +43,7 @@ type MizuAgentConfig struct {
|
||||
StandaloneMode bool `json:"standaloneMode"`
|
||||
ServiceMap bool `json:"serviceMap"`
|
||||
OAS bool `json:"oas"`
|
||||
Telemetry bool `json:"telemetry"`
|
||||
Elastic ElasticConfig `json:"elastic"`
|
||||
}
|
||||
|
||||
|
||||
@@ -39,10 +39,9 @@ type TCP struct {
|
||||
}
|
||||
|
||||
type Extension struct {
|
||||
Protocol *Protocol
|
||||
Path string
|
||||
Dissector Dissector
|
||||
MatcherMap *sync.Map
|
||||
Protocol *Protocol
|
||||
Path string
|
||||
Dissector Dissector
|
||||
}
|
||||
|
||||
type ConnectionInfo struct {
|
||||
@@ -62,7 +61,6 @@ type TcpID struct {
|
||||
}
|
||||
|
||||
type CounterPair struct {
|
||||
StreamId int64
|
||||
Request uint
|
||||
Response uint
|
||||
sync.Mutex
|
||||
@@ -100,10 +98,16 @@ type SuperIdentifier struct {
|
||||
type Dissector interface {
|
||||
Register(*Extension)
|
||||
Ping()
|
||||
Dissect(b *bufio.Reader, isClient bool, tcpID *TcpID, counterPair *CounterPair, superTimer *SuperTimer, superIdentifier *SuperIdentifier, emitter Emitter, options *TrafficFilteringOptions) error
|
||||
Analyze(item *OutputChannelItem, resolvedSource string, resolvedDestination string) *Entry
|
||||
Dissect(b *bufio.Reader, isClient bool, tcpID *TcpID, counterPair *CounterPair, superTimer *SuperTimer, superIdentifier *SuperIdentifier, emitter Emitter, options *TrafficFilteringOptions, reqResMatcher RequestResponseMatcher) error
|
||||
Analyze(item *OutputChannelItem, resolvedSource string, resolvedDestination string, namespace string) *Entry
|
||||
Represent(request map[string]interface{}, response map[string]interface{}) (object []byte, bodySize int64, err error)
|
||||
Macros() map[string]string
|
||||
NewResponseRequestMatcher() RequestResponseMatcher
|
||||
}
|
||||
|
||||
type RequestResponseMatcher interface {
|
||||
GetMap() *sync.Map
|
||||
SetMaxTry(value int)
|
||||
}
|
||||
|
||||
type Emitting struct {
|
||||
@@ -125,6 +129,7 @@ type Entry struct {
|
||||
Protocol Protocol `json:"proto"`
|
||||
Source *TCP `json:"src"`
|
||||
Destination *TCP `json:"dst"`
|
||||
Namespace string `json:"namespace,omitempty"`
|
||||
Outgoing bool `json:"outgoing"`
|
||||
Timestamp int64 `json:"timestamp"`
|
||||
StartTime time.Time `json:"startTime"`
|
||||
|
||||
@@ -22,6 +22,7 @@ type Cleaner struct {
|
||||
connectionTimeout time.Duration
|
||||
stats CleanerStats
|
||||
statsMutex sync.Mutex
|
||||
streamsMap *tcpStreamMap
|
||||
}
|
||||
|
||||
func (cl *Cleaner) clean() {
|
||||
@@ -32,10 +33,15 @@ func (cl *Cleaner) clean() {
|
||||
flushed, closed := cl.assembler.FlushCloseOlderThan(startCleanTime.Add(-cl.connectionTimeout))
|
||||
cl.assemblerMutex.Unlock()
|
||||
|
||||
for _, extension := range extensions {
|
||||
deleted := deleteOlderThan(extension.MatcherMap, startCleanTime.Add(-cl.connectionTimeout))
|
||||
cl.streamsMap.streams.Range(func(k, v interface{}) bool {
|
||||
reqResMatcher := v.(*tcpStreamWrapper).reqResMatcher
|
||||
if reqResMatcher == nil {
|
||||
return true
|
||||
}
|
||||
deleted := deleteOlderThan(reqResMatcher.GetMap(), startCleanTime.Add(-cl.connectionTimeout))
|
||||
cl.stats.deleted += deleted
|
||||
}
|
||||
return true
|
||||
})
|
||||
|
||||
cl.statsMutex.Lock()
|
||||
logger.Log.Debugf("Assembler Stats after cleaning %s", cl.assembler.Dump())
|
||||
|
||||
16
tap/extensions/amqp/Makefile
Normal file
16
tap/extensions/amqp/Makefile
Normal file
@@ -0,0 +1,16 @@
|
||||
skipbin := $$(find bin -mindepth 1 -maxdepth 1)
|
||||
skipexpect := $$(find expect -mindepth 1 -maxdepth 1)
|
||||
|
||||
test: test-pull-bin test-pull-expect
|
||||
@MIZU_TEST=1 go test -v ./... -coverpkg=./... -race -coverprofile=coverage.out -covermode=atomic
|
||||
|
||||
test-update: test-pull-bin
|
||||
@MIZU_TEST=1 TEST_UPDATE=1 go test -v ./... -coverpkg=./... -coverprofile=coverage.out -covermode=atomic
|
||||
|
||||
test-pull-bin:
|
||||
@mkdir -p bin
|
||||
@[ "${skipbin}" ] && echo "Skipping downloading BINs" || gsutil -o 'GSUtil:parallel_process_count=5' -o 'GSUtil:parallel_thread_count=5' -m cp gs://static.up9.io/mizu/test-pcap/bin/amqp/\*.bin bin
|
||||
|
||||
test-pull-expect:
|
||||
@mkdir -p expect
|
||||
@[ "${skipexpect}" ] && echo "Skipping downloading expected JSONs" || gsutil -o 'GSUtil:parallel_process_count=5' -o 'GSUtil:parallel_thread_count=5' -m cp -r gs://static.up9.io/mizu/test-pcap/expect/amqp/\* expect
|
||||
@@ -2,8 +2,16 @@ module github.com/up9inc/mizu/tap/extensions/amqp
|
||||
|
||||
go 1.17
|
||||
|
||||
require github.com/up9inc/mizu/tap/api v0.0.0
|
||||
require (
|
||||
github.com/stretchr/testify v1.7.0
|
||||
github.com/up9inc/mizu/tap/api v0.0.0
|
||||
)
|
||||
|
||||
require github.com/google/martian v2.1.0+incompatible // indirect
|
||||
require (
|
||||
github.com/davecgh/go-spew v1.1.0 // indirect
|
||||
github.com/google/martian v2.1.0+incompatible // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c // indirect
|
||||
)
|
||||
|
||||
replace github.com/up9inc/mizu/tap/api v0.0.0 => ../../api
|
||||
|
||||
@@ -1,2 +1,13 @@
|
||||
github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no=
|
||||
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY=
|
||||
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
|
||||
@@ -3,6 +3,7 @@ package amqp
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"sort"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
@@ -282,6 +283,9 @@ func representBasicPublish(event map[string]interface{}) []interface{} {
|
||||
Selector: fmt.Sprintf(`request.properties.headers["%s"]`, name),
|
||||
})
|
||||
}
|
||||
sort.Slice(headers, func(i, j int) bool {
|
||||
return headers[i].Name < headers[j].Name
|
||||
})
|
||||
headersMarshaled, _ := json.Marshal(headers)
|
||||
rep = append(rep, api.SectionData{
|
||||
Type: api.TABLE,
|
||||
@@ -366,6 +370,9 @@ func representBasicDeliver(event map[string]interface{}) []interface{} {
|
||||
Selector: fmt.Sprintf(`request.properties.headers["%s"]`, name),
|
||||
})
|
||||
}
|
||||
sort.Slice(headers, func(i, j int) bool {
|
||||
return headers[i].Name < headers[j].Name
|
||||
})
|
||||
headersMarshaled, _ := json.Marshal(headers)
|
||||
rep = append(rep, api.SectionData{
|
||||
Type: api.TABLE,
|
||||
@@ -438,6 +445,9 @@ func representQueueDeclare(event map[string]interface{}) []interface{} {
|
||||
Selector: fmt.Sprintf(`request.arguments["%s"]`, name),
|
||||
})
|
||||
}
|
||||
sort.Slice(headers, func(i, j int) bool {
|
||||
return headers[i].Name < headers[j].Name
|
||||
})
|
||||
headersMarshaled, _ := json.Marshal(headers)
|
||||
rep = append(rep, api.SectionData{
|
||||
Type: api.TABLE,
|
||||
@@ -504,6 +514,9 @@ func representExchangeDeclare(event map[string]interface{}) []interface{} {
|
||||
Selector: fmt.Sprintf(`request.arguments["%s"]`, name),
|
||||
})
|
||||
}
|
||||
sort.Slice(headers, func(i, j int) bool {
|
||||
return headers[i].Name < headers[j].Name
|
||||
})
|
||||
headersMarshaled, _ := json.Marshal(headers)
|
||||
rep = append(rep, api.SectionData{
|
||||
Type: api.TABLE,
|
||||
@@ -565,6 +578,9 @@ func representConnectionStart(event map[string]interface{}) []interface{} {
|
||||
Selector: fmt.Sprintf(`request.serverProperties["%s"]`, name),
|
||||
})
|
||||
}
|
||||
sort.Slice(headers, func(i, j int) bool {
|
||||
return headers[i].Name < headers[j].Name
|
||||
})
|
||||
headersMarshaled, _ := json.Marshal(headers)
|
||||
rep = append(rep, api.SectionData{
|
||||
Type: api.TABLE,
|
||||
@@ -656,6 +672,9 @@ func representQueueBind(event map[string]interface{}) []interface{} {
|
||||
Selector: fmt.Sprintf(`request.arguments["%s"]`, name),
|
||||
})
|
||||
}
|
||||
sort.Slice(headers, func(i, j int) bool {
|
||||
return headers[i].Name < headers[j].Name
|
||||
})
|
||||
headersMarshaled, _ := json.Marshal(headers)
|
||||
rep = append(rep, api.SectionData{
|
||||
Type: api.TABLE,
|
||||
@@ -717,6 +736,9 @@ func representBasicConsume(event map[string]interface{}) []interface{} {
|
||||
Selector: fmt.Sprintf(`request.arguments["%s"]`, name),
|
||||
})
|
||||
}
|
||||
sort.Slice(headers, func(i, j int) bool {
|
||||
return headers[i].Name < headers[j].Name
|
||||
})
|
||||
headersMarshaled, _ := json.Marshal(headers)
|
||||
rep = append(rep, api.SectionData{
|
||||
Type: api.TABLE,
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
"io"
|
||||
"log"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/up9inc/mizu/tap/api"
|
||||
)
|
||||
@@ -42,7 +43,7 @@ func (d dissecting) Ping() {
|
||||
|
||||
const amqpRequest string = "amqp_request"
|
||||
|
||||
func (d dissecting) Dissect(b *bufio.Reader, isClient bool, tcpID *api.TcpID, counterPair *api.CounterPair, superTimer *api.SuperTimer, superIdentifier *api.SuperIdentifier, emitter api.Emitter, options *api.TrafficFilteringOptions) error {
|
||||
func (d dissecting) Dissect(b *bufio.Reader, isClient bool, tcpID *api.TcpID, counterPair *api.CounterPair, superTimer *api.SuperTimer, superIdentifier *api.SuperIdentifier, emitter api.Emitter, options *api.TrafficFilteringOptions, _reqResMatcher api.RequestResponseMatcher) error {
|
||||
r := AmqpReader{b}
|
||||
|
||||
var remaining int
|
||||
@@ -85,7 +86,7 @@ func (d dissecting) Dissect(b *bufio.Reader, isClient bool, tcpID *api.TcpID, co
|
||||
frame, err := r.ReadFrame()
|
||||
if err == io.EOF {
|
||||
// We must read until we see an EOF... very important!
|
||||
return errors.New("AMQP EOF")
|
||||
return nil
|
||||
}
|
||||
|
||||
switch f := frame.(type) {
|
||||
@@ -96,6 +97,12 @@ func (d dissecting) Dissect(b *bufio.Reader, isClient bool, tcpID *api.TcpID, co
|
||||
// start content state
|
||||
header = f
|
||||
remaining = int(header.Size)
|
||||
|
||||
// Workaround for `Time.MarshalJSON: year outside of range [0,9999]` error
|
||||
if header.Properties.Timestamp.Year() > 9999 {
|
||||
header.Properties.Timestamp = time.Time{}.UTC()
|
||||
}
|
||||
|
||||
switch lastMethodFrameMessage.(type) {
|
||||
case *BasicPublish:
|
||||
eventBasicPublish.Properties = header.Properties
|
||||
@@ -212,7 +219,7 @@ func (d dissecting) Dissect(b *bufio.Reader, isClient bool, tcpID *api.TcpID, co
|
||||
}
|
||||
}
|
||||
|
||||
func (d dissecting) Analyze(item *api.OutputChannelItem, resolvedSource string, resolvedDestination string) *api.Entry {
|
||||
func (d dissecting) Analyze(item *api.OutputChannelItem, resolvedSource string, resolvedDestination string, namespace string) *api.Entry {
|
||||
request := item.Pair.Request.Payload.(map[string]interface{})
|
||||
reqDetails := request["details"].(map[string]interface{})
|
||||
|
||||
@@ -254,6 +261,7 @@ func (d dissecting) Analyze(item *api.OutputChannelItem, resolvedSource string,
|
||||
IP: item.ConnectionInfo.ServerIP,
|
||||
Port: item.ConnectionInfo.ServerPort,
|
||||
},
|
||||
Namespace: namespace,
|
||||
Outgoing: item.ConnectionInfo.IsOutgoing,
|
||||
Request: reqDetails,
|
||||
Method: request["method"].(string),
|
||||
@@ -300,6 +308,10 @@ func (d dissecting) Macros() map[string]string {
|
||||
}
|
||||
}
|
||||
|
||||
func (d dissecting) NewResponseRequestMatcher() api.RequestResponseMatcher {
|
||||
return nil
|
||||
}
|
||||
|
||||
var Dissector dissecting
|
||||
|
||||
func NewDissector() api.Dissector {
|
||||
|
||||
289
tap/extensions/amqp/main_test.go
Normal file
289
tap/extensions/amqp/main_test.go
Normal file
@@ -0,0 +1,289 @@
|
||||
package amqp
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/up9inc/mizu/tap/api"
|
||||
)
|
||||
|
||||
const (
|
||||
binDir = "bin"
|
||||
patternBin = "*_req.bin"
|
||||
patternDissect = "*.json"
|
||||
msgDissecting = "Dissecting:"
|
||||
msgAnalyzing = "Analyzing:"
|
||||
msgRepresenting = "Representing:"
|
||||
respSuffix = "_res.bin"
|
||||
expectDir = "expect"
|
||||
dissectDir = "dissect"
|
||||
analyzeDir = "analyze"
|
||||
representDir = "represent"
|
||||
testUpdate = "TEST_UPDATE"
|
||||
)
|
||||
|
||||
func TestRegister(t *testing.T) {
|
||||
dissector := NewDissector()
|
||||
extension := &api.Extension{}
|
||||
dissector.Register(extension)
|
||||
assert.Equal(t, "amqp", extension.Protocol.Name)
|
||||
}
|
||||
|
||||
func TestMacros(t *testing.T) {
|
||||
expectedMacros := map[string]string{
|
||||
"amqp": `proto.name == "amqp"`,
|
||||
}
|
||||
dissector := NewDissector()
|
||||
macros := dissector.Macros()
|
||||
assert.Equal(t, expectedMacros, macros)
|
||||
}
|
||||
|
||||
func TestPing(t *testing.T) {
|
||||
dissector := NewDissector()
|
||||
dissector.Ping()
|
||||
}
|
||||
|
||||
func TestDissect(t *testing.T) {
|
||||
_, testUpdateEnabled := os.LookupEnv(testUpdate)
|
||||
|
||||
expectDirDissect := path.Join(expectDir, dissectDir)
|
||||
|
||||
if testUpdateEnabled {
|
||||
os.RemoveAll(expectDirDissect)
|
||||
err := os.MkdirAll(expectDirDissect, 0775)
|
||||
assert.Nil(t, err)
|
||||
}
|
||||
|
||||
dissector := NewDissector()
|
||||
paths, err := filepath.Glob(path.Join(binDir, patternBin))
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
options := &api.TrafficFilteringOptions{
|
||||
IgnoredUserAgents: []string{},
|
||||
}
|
||||
|
||||
for _, _path := range paths {
|
||||
basePath := _path[:len(_path)-8]
|
||||
|
||||
// Channel to verify the output
|
||||
itemChannel := make(chan *api.OutputChannelItem)
|
||||
var emitter api.Emitter = &api.Emitting{
|
||||
AppStats: &api.AppStats{},
|
||||
OutputChannel: itemChannel,
|
||||
}
|
||||
|
||||
var items []*api.OutputChannelItem
|
||||
stop := make(chan bool)
|
||||
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
case <-stop:
|
||||
return
|
||||
case item := <-itemChannel:
|
||||
items = append(items, item)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
// Stream level
|
||||
counterPair := &api.CounterPair{
|
||||
Request: 0,
|
||||
Response: 0,
|
||||
}
|
||||
superIdentifier := &api.SuperIdentifier{}
|
||||
|
||||
// Request
|
||||
pathClient := _path
|
||||
fmt.Printf("%s %s\n", msgDissecting, pathClient)
|
||||
fileClient, err := os.Open(pathClient)
|
||||
assert.Nil(t, err)
|
||||
|
||||
bufferClient := bufio.NewReader(fileClient)
|
||||
tcpIDClient := &api.TcpID{
|
||||
SrcIP: "1",
|
||||
DstIP: "2",
|
||||
SrcPort: "1",
|
||||
DstPort: "2",
|
||||
}
|
||||
reqResMatcher := dissector.NewResponseRequestMatcher()
|
||||
err = dissector.Dissect(bufferClient, true, tcpIDClient, counterPair, &api.SuperTimer{}, superIdentifier, emitter, options, reqResMatcher)
|
||||
if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// Response
|
||||
pathServer := basePath + respSuffix
|
||||
fmt.Printf("%s %s\n", msgDissecting, pathServer)
|
||||
fileServer, err := os.Open(pathServer)
|
||||
assert.Nil(t, err)
|
||||
|
||||
bufferServer := bufio.NewReader(fileServer)
|
||||
tcpIDServer := &api.TcpID{
|
||||
SrcIP: "2",
|
||||
DstIP: "1",
|
||||
SrcPort: "2",
|
||||
DstPort: "1",
|
||||
}
|
||||
err = dissector.Dissect(bufferServer, false, tcpIDServer, counterPair, &api.SuperTimer{}, superIdentifier, emitter, options, reqResMatcher)
|
||||
if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
fileClient.Close()
|
||||
fileServer.Close()
|
||||
|
||||
pathExpect := path.Join(expectDirDissect, fmt.Sprintf("%s.json", basePath[4:]))
|
||||
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
|
||||
stop <- true
|
||||
|
||||
marshaled, err := json.Marshal(items)
|
||||
assert.Nil(t, err)
|
||||
|
||||
if testUpdateEnabled {
|
||||
if len(items) > 0 {
|
||||
err = os.WriteFile(pathExpect, marshaled, 0644)
|
||||
assert.Nil(t, err)
|
||||
}
|
||||
} else {
|
||||
if _, err := os.Stat(pathExpect); errors.Is(err, os.ErrNotExist) {
|
||||
assert.Len(t, items, 0)
|
||||
} else {
|
||||
expectedBytes, err := ioutil.ReadFile(pathExpect)
|
||||
assert.Nil(t, err)
|
||||
|
||||
assert.JSONEq(t, string(expectedBytes), string(marshaled))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestAnalyze(t *testing.T) {
|
||||
_, testUpdateEnabled := os.LookupEnv(testUpdate)
|
||||
|
||||
expectDirDissect := path.Join(expectDir, dissectDir)
|
||||
expectDirAnalyze := path.Join(expectDir, analyzeDir)
|
||||
|
||||
if testUpdateEnabled {
|
||||
os.RemoveAll(expectDirAnalyze)
|
||||
err := os.MkdirAll(expectDirAnalyze, 0775)
|
||||
assert.Nil(t, err)
|
||||
}
|
||||
|
||||
dissector := NewDissector()
|
||||
paths, err := filepath.Glob(path.Join(expectDirDissect, patternDissect))
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
for _, _path := range paths {
|
||||
fmt.Printf("%s %s\n", msgAnalyzing, _path)
|
||||
|
||||
bytes, err := ioutil.ReadFile(_path)
|
||||
assert.Nil(t, err)
|
||||
|
||||
var items []*api.OutputChannelItem
|
||||
err = json.Unmarshal(bytes, &items)
|
||||
assert.Nil(t, err)
|
||||
|
||||
var entries []*api.Entry
|
||||
for _, item := range items {
|
||||
entry := dissector.Analyze(item, "", "", "")
|
||||
entries = append(entries, entry)
|
||||
}
|
||||
|
||||
pathExpect := path.Join(expectDirAnalyze, filepath.Base(_path))
|
||||
|
||||
marshaled, err := json.Marshal(entries)
|
||||
assert.Nil(t, err)
|
||||
|
||||
if testUpdateEnabled {
|
||||
if len(entries) > 0 {
|
||||
err = os.WriteFile(pathExpect, marshaled, 0644)
|
||||
assert.Nil(t, err)
|
||||
}
|
||||
} else {
|
||||
if _, err := os.Stat(pathExpect); errors.Is(err, os.ErrNotExist) {
|
||||
assert.Len(t, items, 0)
|
||||
} else {
|
||||
expectedBytes, err := ioutil.ReadFile(pathExpect)
|
||||
assert.Nil(t, err)
|
||||
|
||||
assert.JSONEq(t, string(expectedBytes), string(marshaled))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestRepresent(t *testing.T) {
|
||||
_, testUpdateEnabled := os.LookupEnv(testUpdate)
|
||||
|
||||
expectDirAnalyze := path.Join(expectDir, analyzeDir)
|
||||
expectDirRepresent := path.Join(expectDir, representDir)
|
||||
|
||||
if testUpdateEnabled {
|
||||
os.RemoveAll(expectDirRepresent)
|
||||
err := os.MkdirAll(expectDirRepresent, 0775)
|
||||
assert.Nil(t, err)
|
||||
}
|
||||
|
||||
dissector := NewDissector()
|
||||
paths, err := filepath.Glob(path.Join(expectDirAnalyze, patternDissect))
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
for _, _path := range paths {
|
||||
fmt.Printf("%s %s\n", msgRepresenting, _path)
|
||||
|
||||
bytes, err := ioutil.ReadFile(_path)
|
||||
assert.Nil(t, err)
|
||||
|
||||
var entries []*api.Entry
|
||||
err = json.Unmarshal(bytes, &entries)
|
||||
assert.Nil(t, err)
|
||||
|
||||
var objects []string
|
||||
for _, entry := range entries {
|
||||
object, _, err := dissector.Represent(entry.Request, entry.Response)
|
||||
assert.Nil(t, err)
|
||||
objects = append(objects, string(object))
|
||||
}
|
||||
|
||||
pathExpect := path.Join(expectDirRepresent, filepath.Base(_path))
|
||||
|
||||
marshaled, err := json.Marshal(objects)
|
||||
assert.Nil(t, err)
|
||||
|
||||
if testUpdateEnabled {
|
||||
if len(objects) > 0 {
|
||||
err = os.WriteFile(pathExpect, marshaled, 0644)
|
||||
assert.Nil(t, err)
|
||||
}
|
||||
} else {
|
||||
if _, err := os.Stat(pathExpect); errors.Is(err, os.ErrNotExist) {
|
||||
assert.Len(t, objects, 0)
|
||||
} else {
|
||||
expectedBytes, err := ioutil.ReadFile(pathExpect)
|
||||
assert.Nil(t, err)
|
||||
|
||||
assert.JSONEq(t, string(expectedBytes), string(marshaled))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -140,7 +140,7 @@ func readTimestamp(r io.Reader) (v time.Time, err error) {
|
||||
if err = binary.Read(r, binary.BigEndian, &sec); err != nil {
|
||||
return
|
||||
}
|
||||
return time.Unix(sec, 0), nil
|
||||
return time.Unix(sec, 0).UTC(), nil
|
||||
}
|
||||
|
||||
/*
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -223,41 +223,8 @@ type Decimal struct {
|
||||
//
|
||||
type Table map[string]interface{}
|
||||
|
||||
func validateField(f interface{}) error {
|
||||
switch fv := f.(type) {
|
||||
case nil, bool, byte, int, int16, int32, int64, float32, float64, string, []byte, Decimal, time.Time:
|
||||
return nil
|
||||
|
||||
case []interface{}:
|
||||
for _, v := range fv {
|
||||
if err := validateField(v); err != nil {
|
||||
return fmt.Errorf("in array %s", err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
||||
case Table:
|
||||
for k, v := range fv {
|
||||
if err := validateField(v); err != nil {
|
||||
return fmt.Errorf("table field %q %s", k, err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
return fmt.Errorf("value %T not supported", f)
|
||||
}
|
||||
|
||||
// Validate returns and error if any Go types in the table are incompatible with AMQP types.
|
||||
func (t Table) Validate() error {
|
||||
return validateField(t)
|
||||
}
|
||||
|
||||
type Message interface {
|
||||
id() (uint16, uint16)
|
||||
wait() bool
|
||||
read(io.Reader) error
|
||||
write(io.Writer) error
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -286,8 +253,6 @@ system calls to read a frame.
|
||||
|
||||
*/
|
||||
type frame interface {
|
||||
write(io.Writer) error
|
||||
channel() uint16
|
||||
}
|
||||
|
||||
type AmqpReader struct {
|
||||
@@ -323,8 +288,6 @@ type MethodFrame struct {
|
||||
Method Message
|
||||
}
|
||||
|
||||
func (f *MethodFrame) channel() uint16 { return f.ChannelId }
|
||||
|
||||
/*
|
||||
Heartbeating is a technique designed to undo one of TCP/IP's features, namely
|
||||
its ability to recover from a broken physical connection by closing only after
|
||||
@@ -338,8 +301,6 @@ type HeartbeatFrame struct {
|
||||
ChannelId uint16
|
||||
}
|
||||
|
||||
func (f *HeartbeatFrame) channel() uint16 { return f.ChannelId }
|
||||
|
||||
/*
|
||||
Certain methods (such as Basic.Publish, Basic.Deliver, etc.) are formally
|
||||
defined as carrying content. When a peer sends such a method frame, it always
|
||||
@@ -367,8 +328,6 @@ type HeaderFrame struct {
|
||||
Properties Properties
|
||||
}
|
||||
|
||||
func (f *HeaderFrame) channel() uint16 { return f.ChannelId }
|
||||
|
||||
/*
|
||||
Content is the application data we carry from client-to-client via the AMQP
|
||||
server. Content is, roughly speaking, a set of properties plus a binary data
|
||||
@@ -388,5 +347,3 @@ type BodyFrame struct {
|
||||
ChannelId uint16
|
||||
Body []byte
|
||||
}
|
||||
|
||||
func (f *BodyFrame) channel() uint16 { return f.ChannelId }
|
||||
|
||||
@@ -1,403 +0,0 @@
|
||||
// Copyright (c) 2012, Sean Treadway, SoundCloud Ltd.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
// Source code and contact info at http://github.com/streadway/amqp
|
||||
|
||||
package amqp
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"io"
|
||||
"math"
|
||||
"time"
|
||||
)
|
||||
|
||||
func (f *MethodFrame) write(w io.Writer) (err error) {
|
||||
var payload bytes.Buffer
|
||||
|
||||
if f.Method == nil {
|
||||
return errors.New("malformed frame: missing method")
|
||||
}
|
||||
|
||||
class, method := f.Method.id()
|
||||
|
||||
if err = binary.Write(&payload, binary.BigEndian, class); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if err = binary.Write(&payload, binary.BigEndian, method); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if err = f.Method.write(&payload); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
return writeFrame(w, frameMethod, f.ChannelId, payload.Bytes())
|
||||
}
|
||||
|
||||
// Heartbeat
|
||||
//
|
||||
// Payload is empty
|
||||
func (f *HeartbeatFrame) write(w io.Writer) (err error) {
|
||||
return writeFrame(w, frameHeartbeat, f.ChannelId, []byte{})
|
||||
}
|
||||
|
||||
// CONTENT HEADER
|
||||
// 0 2 4 12 14
|
||||
// +----------+--------+-----------+----------------+------------- - -
|
||||
// | class-id | weight | body size | property flags | property list...
|
||||
// +----------+--------+-----------+----------------+------------- - -
|
||||
// short short long long short remainder...
|
||||
//
|
||||
func (f *HeaderFrame) write(w io.Writer) (err error) {
|
||||
var payload bytes.Buffer
|
||||
var zeroTime time.Time
|
||||
|
||||
if err = binary.Write(&payload, binary.BigEndian, f.ClassId); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if err = binary.Write(&payload, binary.BigEndian, f.weight); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if err = binary.Write(&payload, binary.BigEndian, f.Size); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// First pass will build the mask to be serialized, second pass will serialize
|
||||
// each of the fields that appear in the mask.
|
||||
|
||||
var mask uint16
|
||||
|
||||
if len(f.Properties.ContentType) > 0 {
|
||||
mask = mask | flagContentType
|
||||
}
|
||||
if len(f.Properties.ContentEncoding) > 0 {
|
||||
mask = mask | flagContentEncoding
|
||||
}
|
||||
if f.Properties.Headers != nil && len(f.Properties.Headers) > 0 {
|
||||
mask = mask | flagHeaders
|
||||
}
|
||||
if f.Properties.DeliveryMode > 0 {
|
||||
mask = mask | flagDeliveryMode
|
||||
}
|
||||
if f.Properties.Priority > 0 {
|
||||
mask = mask | flagPriority
|
||||
}
|
||||
if len(f.Properties.CorrelationId) > 0 {
|
||||
mask = mask | flagCorrelationId
|
||||
}
|
||||
if len(f.Properties.ReplyTo) > 0 {
|
||||
mask = mask | flagReplyTo
|
||||
}
|
||||
if len(f.Properties.Expiration) > 0 {
|
||||
mask = mask | flagExpiration
|
||||
}
|
||||
if len(f.Properties.MessageId) > 0 {
|
||||
mask = mask | flagMessageId
|
||||
}
|
||||
if f.Properties.Timestamp != zeroTime {
|
||||
mask = mask | flagTimestamp
|
||||
}
|
||||
if len(f.Properties.Type) > 0 {
|
||||
mask = mask | flagType
|
||||
}
|
||||
if len(f.Properties.UserId) > 0 {
|
||||
mask = mask | flagUserId
|
||||
}
|
||||
if len(f.Properties.AppId) > 0 {
|
||||
mask = mask | flagAppId
|
||||
}
|
||||
|
||||
if err = binary.Write(&payload, binary.BigEndian, mask); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if hasProperty(mask, flagContentType) {
|
||||
if err = writeShortstr(&payload, f.Properties.ContentType); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
if hasProperty(mask, flagContentEncoding) {
|
||||
if err = writeShortstr(&payload, f.Properties.ContentEncoding); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
if hasProperty(mask, flagHeaders) {
|
||||
if err = writeTable(&payload, f.Properties.Headers); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
if hasProperty(mask, flagDeliveryMode) {
|
||||
if err = binary.Write(&payload, binary.BigEndian, f.Properties.DeliveryMode); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
if hasProperty(mask, flagPriority) {
|
||||
if err = binary.Write(&payload, binary.BigEndian, f.Properties.Priority); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
if hasProperty(mask, flagCorrelationId) {
|
||||
if err = writeShortstr(&payload, f.Properties.CorrelationId); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
if hasProperty(mask, flagReplyTo) {
|
||||
if err = writeShortstr(&payload, f.Properties.ReplyTo); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
if hasProperty(mask, flagExpiration) {
|
||||
if err = writeShortstr(&payload, f.Properties.Expiration); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
if hasProperty(mask, flagMessageId) {
|
||||
if err = writeShortstr(&payload, f.Properties.MessageId); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
if hasProperty(mask, flagTimestamp) {
|
||||
if err = binary.Write(&payload, binary.BigEndian, uint64(f.Properties.Timestamp.Unix())); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
if hasProperty(mask, flagType) {
|
||||
if err = writeShortstr(&payload, f.Properties.Type); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
if hasProperty(mask, flagUserId) {
|
||||
if err = writeShortstr(&payload, f.Properties.UserId); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
if hasProperty(mask, flagAppId) {
|
||||
if err = writeShortstr(&payload, f.Properties.AppId); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
return writeFrame(w, frameHeader, f.ChannelId, payload.Bytes())
|
||||
}
|
||||
|
||||
// Body
|
||||
//
|
||||
// Payload is one byterange from the full body who's size is declared in the
|
||||
// Header frame
|
||||
func (f *BodyFrame) write(w io.Writer) (err error) {
|
||||
return writeFrame(w, frameBody, f.ChannelId, f.Body)
|
||||
}
|
||||
|
||||
func writeFrame(w io.Writer, typ uint8, channel uint16, payload []byte) (err error) {
|
||||
end := []byte{frameEnd}
|
||||
size := uint(len(payload))
|
||||
|
||||
_, err = w.Write([]byte{
|
||||
byte(typ),
|
||||
byte((channel & 0xff00) >> 8),
|
||||
byte((channel & 0x00ff) >> 0),
|
||||
byte((size & 0xff000000) >> 24),
|
||||
byte((size & 0x00ff0000) >> 16),
|
||||
byte((size & 0x0000ff00) >> 8),
|
||||
byte((size & 0x000000ff) >> 0),
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if _, err = w.Write(payload); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if _, err = w.Write(end); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func writeShortstr(w io.Writer, s string) (err error) {
|
||||
b := []byte(s)
|
||||
|
||||
var length = uint8(len(b))
|
||||
|
||||
if err = binary.Write(w, binary.BigEndian, length); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if _, err = w.Write(b[:length]); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func writeLongstr(w io.Writer, s string) (err error) {
|
||||
b := []byte(s)
|
||||
|
||||
var length = uint32(len(b))
|
||||
|
||||
if err = binary.Write(w, binary.BigEndian, length); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if _, err = w.Write(b[:length]); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
/*
|
||||
'A': []interface{}
|
||||
'D': Decimal
|
||||
'F': Table
|
||||
'I': int32
|
||||
'S': string
|
||||
'T': time.Time
|
||||
'V': nil
|
||||
'b': byte
|
||||
'd': float64
|
||||
'f': float32
|
||||
'l': int64
|
||||
's': int16
|
||||
't': bool
|
||||
'x': []byte
|
||||
*/
|
||||
func writeField(w io.Writer, value interface{}) (err error) {
|
||||
var buf [9]byte
|
||||
var enc []byte
|
||||
|
||||
switch v := value.(type) {
|
||||
case bool:
|
||||
buf[0] = 't'
|
||||
if v {
|
||||
buf[1] = byte(1)
|
||||
} else {
|
||||
buf[1] = byte(0)
|
||||
}
|
||||
enc = buf[:2]
|
||||
|
||||
case byte:
|
||||
buf[0] = 'b'
|
||||
buf[1] = byte(v)
|
||||
enc = buf[:2]
|
||||
|
||||
case int16:
|
||||
buf[0] = 's'
|
||||
binary.BigEndian.PutUint16(buf[1:3], uint16(v))
|
||||
enc = buf[:3]
|
||||
|
||||
case int:
|
||||
buf[0] = 'I'
|
||||
binary.BigEndian.PutUint32(buf[1:5], uint32(v))
|
||||
enc = buf[:5]
|
||||
|
||||
case int32:
|
||||
buf[0] = 'I'
|
||||
binary.BigEndian.PutUint32(buf[1:5], uint32(v))
|
||||
enc = buf[:5]
|
||||
|
||||
case int64:
|
||||
buf[0] = 'l'
|
||||
binary.BigEndian.PutUint64(buf[1:9], uint64(v))
|
||||
enc = buf[:9]
|
||||
|
||||
case float32:
|
||||
buf[0] = 'f'
|
||||
binary.BigEndian.PutUint32(buf[1:5], math.Float32bits(v))
|
||||
enc = buf[:5]
|
||||
|
||||
case float64:
|
||||
buf[0] = 'd'
|
||||
binary.BigEndian.PutUint64(buf[1:9], math.Float64bits(v))
|
||||
enc = buf[:9]
|
||||
|
||||
case Decimal:
|
||||
buf[0] = 'D'
|
||||
buf[1] = byte(v.Scale)
|
||||
binary.BigEndian.PutUint32(buf[2:6], uint32(v.Value))
|
||||
enc = buf[:6]
|
||||
|
||||
case string:
|
||||
buf[0] = 'S'
|
||||
binary.BigEndian.PutUint32(buf[1:5], uint32(len(v)))
|
||||
enc = append(buf[:5], []byte(v)...)
|
||||
|
||||
case []interface{}: // field-array
|
||||
buf[0] = 'A'
|
||||
|
||||
sec := new(bytes.Buffer)
|
||||
for _, val := range v {
|
||||
if err = writeField(sec, val); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
binary.BigEndian.PutUint32(buf[1:5], uint32(sec.Len()))
|
||||
if _, err = w.Write(buf[:5]); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if _, err = w.Write(sec.Bytes()); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
return
|
||||
|
||||
case time.Time:
|
||||
buf[0] = 'T'
|
||||
binary.BigEndian.PutUint64(buf[1:9], uint64(v.Unix()))
|
||||
enc = buf[:9]
|
||||
|
||||
case Table:
|
||||
if _, err = w.Write([]byte{'F'}); err != nil {
|
||||
return
|
||||
}
|
||||
return writeTable(w, v)
|
||||
|
||||
case []byte:
|
||||
buf[0] = 'x'
|
||||
binary.BigEndian.PutUint32(buf[1:5], uint32(len(v)))
|
||||
if _, err = w.Write(buf[0:5]); err != nil {
|
||||
return
|
||||
}
|
||||
if _, err = w.Write(v); err != nil {
|
||||
return
|
||||
}
|
||||
return
|
||||
|
||||
case nil:
|
||||
buf[0] = 'V'
|
||||
enc = buf[:1]
|
||||
|
||||
default:
|
||||
return ErrFieldType
|
||||
}
|
||||
|
||||
_, err = w.Write(enc)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func writeTable(w io.Writer, table Table) (err error) {
|
||||
var buf bytes.Buffer
|
||||
|
||||
for key, val := range table {
|
||||
if err = writeShortstr(&buf, key); err != nil {
|
||||
return
|
||||
}
|
||||
if err = writeField(&buf, val); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
return writeLongstr(w, buf.String())
|
||||
}
|
||||
@@ -13,4 +13,4 @@ test-pull-bin:
|
||||
|
||||
test-pull-expect:
|
||||
@mkdir -p expect
|
||||
@[ "${skipexpect}" ] && echo "Skipping downloading expected JSONs" || gsutil -o 'GSUtil:parallel_process_count=5' -o 'GSUtil:parallel_thread_count=5' -m cp -r gs://static.up9.io/mizu/test-pcap/expect/http/\* expect
|
||||
@[ "${skipexpect}" ] && echo "Skipping downloading expected JSONs" || gsutil -o 'GSUtil:parallel_process_count=5' -o 'GSUtil:parallel_thread_count=5' -m cp -r gs://static.up9.io/mizu/test-pcap/expect2/http/\* expect
|
||||
|
||||
@@ -47,7 +47,7 @@ func replaceForwardedFor(item *api.OutputChannelItem) {
|
||||
item.ConnectionInfo.ClientPort = ""
|
||||
}
|
||||
|
||||
func handleHTTP2Stream(http2Assembler *Http2Assembler, tcpID *api.TcpID, superTimer *api.SuperTimer, emitter api.Emitter, options *api.TrafficFilteringOptions) error {
|
||||
func handleHTTP2Stream(http2Assembler *Http2Assembler, tcpID *api.TcpID, superTimer *api.SuperTimer, emitter api.Emitter, options *api.TrafficFilteringOptions, reqResMatcher *requestResponseMatcher) error {
|
||||
streamID, messageHTTP1, isGrpc, err := http2Assembler.readMessage()
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -58,7 +58,7 @@ func handleHTTP2Stream(http2Assembler *Http2Assembler, tcpID *api.TcpID, superTi
|
||||
switch messageHTTP1 := messageHTTP1.(type) {
|
||||
case http.Request:
|
||||
ident := fmt.Sprintf(
|
||||
"%s->%s %s->%s %d %s",
|
||||
"%s_%s_%s_%s_%d_%s",
|
||||
tcpID.SrcIP,
|
||||
tcpID.DstIP,
|
||||
tcpID.SrcPort,
|
||||
@@ -78,7 +78,7 @@ func handleHTTP2Stream(http2Assembler *Http2Assembler, tcpID *api.TcpID, superTi
|
||||
}
|
||||
case http.Response:
|
||||
ident := fmt.Sprintf(
|
||||
"%s->%s %s->%s %d %s",
|
||||
"%s_%s_%s_%s_%d_%s",
|
||||
tcpID.DstIP,
|
||||
tcpID.SrcIP,
|
||||
tcpID.DstPort,
|
||||
@@ -110,7 +110,7 @@ func handleHTTP2Stream(http2Assembler *Http2Assembler, tcpID *api.TcpID, superTi
|
||||
return nil
|
||||
}
|
||||
|
||||
func handleHTTP1ClientStream(b *bufio.Reader, tcpID *api.TcpID, counterPair *api.CounterPair, superTimer *api.SuperTimer, emitter api.Emitter, options *api.TrafficFilteringOptions) (switchingProtocolsHTTP2 bool, req *http.Request, err error) {
|
||||
func handleHTTP1ClientStream(b *bufio.Reader, tcpID *api.TcpID, counterPair *api.CounterPair, superTimer *api.SuperTimer, emitter api.Emitter, options *api.TrafficFilteringOptions, reqResMatcher *requestResponseMatcher) (switchingProtocolsHTTP2 bool, req *http.Request, err error) {
|
||||
req, err = http.ReadRequest(b)
|
||||
if err != nil {
|
||||
return
|
||||
@@ -130,8 +130,7 @@ func handleHTTP1ClientStream(b *bufio.Reader, tcpID *api.TcpID, counterPair *api
|
||||
req.Body = io.NopCloser(bytes.NewBuffer(body)) // rewind
|
||||
|
||||
ident := fmt.Sprintf(
|
||||
"%d_%s:%s_%s:%s_%d_%s",
|
||||
counterPair.StreamId,
|
||||
"%s_%s_%s_%s_%d_%s",
|
||||
tcpID.SrcIP,
|
||||
tcpID.DstIP,
|
||||
tcpID.SrcPort,
|
||||
@@ -153,7 +152,7 @@ func handleHTTP1ClientStream(b *bufio.Reader, tcpID *api.TcpID, counterPair *api
|
||||
return
|
||||
}
|
||||
|
||||
func handleHTTP1ServerStream(b *bufio.Reader, tcpID *api.TcpID, counterPair *api.CounterPair, superTimer *api.SuperTimer, emitter api.Emitter, options *api.TrafficFilteringOptions) (switchingProtocolsHTTP2 bool, err error) {
|
||||
func handleHTTP1ServerStream(b *bufio.Reader, tcpID *api.TcpID, counterPair *api.CounterPair, superTimer *api.SuperTimer, emitter api.Emitter, options *api.TrafficFilteringOptions, reqResMatcher *requestResponseMatcher) (switchingProtocolsHTTP2 bool, err error) {
|
||||
var res *http.Response
|
||||
res, err = http.ReadResponse(b, nil)
|
||||
if err != nil {
|
||||
@@ -174,8 +173,7 @@ func handleHTTP1ServerStream(b *bufio.Reader, tcpID *api.TcpID, counterPair *api
|
||||
res.Body = io.NopCloser(bytes.NewBuffer(body)) // rewind
|
||||
|
||||
ident := fmt.Sprintf(
|
||||
"%d_%s:%s_%s:%s_%d_%s",
|
||||
counterPair.StreamId,
|
||||
"%s_%s_%s_%s_%d_%s",
|
||||
tcpID.DstIP,
|
||||
tcpID.SrcIP,
|
||||
tcpID.DstPort,
|
||||
|
||||
@@ -84,14 +84,15 @@ type dissecting string
|
||||
|
||||
func (d dissecting) Register(extension *api.Extension) {
|
||||
extension.Protocol = &http11protocol
|
||||
extension.MatcherMap = reqResMatcher.openMessagesMap
|
||||
}
|
||||
|
||||
func (d dissecting) Ping() {
|
||||
log.Printf("pong %s", http11protocol.Name)
|
||||
}
|
||||
|
||||
func (d dissecting) Dissect(b *bufio.Reader, isClient bool, tcpID *api.TcpID, counterPair *api.CounterPair, superTimer *api.SuperTimer, superIdentifier *api.SuperIdentifier, emitter api.Emitter, options *api.TrafficFilteringOptions) error {
|
||||
func (d dissecting) Dissect(b *bufio.Reader, isClient bool, tcpID *api.TcpID, counterPair *api.CounterPair, superTimer *api.SuperTimer, superIdentifier *api.SuperIdentifier, emitter api.Emitter, options *api.TrafficFilteringOptions, _reqResMatcher api.RequestResponseMatcher) error {
|
||||
reqResMatcher := _reqResMatcher.(*requestResponseMatcher)
|
||||
|
||||
var err error
|
||||
isHTTP2, _ := checkIsHTTP2Connection(b, isClient)
|
||||
|
||||
@@ -124,7 +125,7 @@ func (d dissecting) Dissect(b *bufio.Reader, isClient bool, tcpID *api.TcpID, co
|
||||
}
|
||||
|
||||
if isHTTP2 {
|
||||
err = handleHTTP2Stream(http2Assembler, tcpID, superTimer, emitter, options)
|
||||
err = handleHTTP2Stream(http2Assembler, tcpID, superTimer, emitter, options, reqResMatcher)
|
||||
if err == io.EOF || err == io.ErrUnexpectedEOF {
|
||||
break
|
||||
} else if err != nil {
|
||||
@@ -133,7 +134,7 @@ func (d dissecting) Dissect(b *bufio.Reader, isClient bool, tcpID *api.TcpID, co
|
||||
superIdentifier.Protocol = &http11protocol
|
||||
} else if isClient {
|
||||
var req *http.Request
|
||||
switchingProtocolsHTTP2, req, err = handleHTTP1ClientStream(b, tcpID, counterPair, superTimer, emitter, options)
|
||||
switchingProtocolsHTTP2, req, err = handleHTTP1ClientStream(b, tcpID, counterPair, superTimer, emitter, options, reqResMatcher)
|
||||
if err == io.EOF || err == io.ErrUnexpectedEOF {
|
||||
break
|
||||
} else if err != nil {
|
||||
@@ -144,7 +145,7 @@ func (d dissecting) Dissect(b *bufio.Reader, isClient bool, tcpID *api.TcpID, co
|
||||
// In case of an HTTP2 upgrade, duplicate the HTTP1 request into HTTP2 with stream ID 1
|
||||
if switchingProtocolsHTTP2 {
|
||||
ident := fmt.Sprintf(
|
||||
"%s->%s %s->%s 1 %s",
|
||||
"%s_%s_%s_%s_1_%s",
|
||||
tcpID.SrcIP,
|
||||
tcpID.DstIP,
|
||||
tcpID.SrcPort,
|
||||
@@ -164,7 +165,7 @@ func (d dissecting) Dissect(b *bufio.Reader, isClient bool, tcpID *api.TcpID, co
|
||||
}
|
||||
}
|
||||
} else {
|
||||
switchingProtocolsHTTP2, err = handleHTTP1ServerStream(b, tcpID, counterPair, superTimer, emitter, options)
|
||||
switchingProtocolsHTTP2, err = handleHTTP1ServerStream(b, tcpID, counterPair, superTimer, emitter, options, reqResMatcher)
|
||||
if err == io.EOF || err == io.ErrUnexpectedEOF {
|
||||
break
|
||||
} else if err != nil {
|
||||
@@ -181,7 +182,7 @@ func (d dissecting) Dissect(b *bufio.Reader, isClient bool, tcpID *api.TcpID, co
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d dissecting) Analyze(item *api.OutputChannelItem, resolvedSource string, resolvedDestination string) *api.Entry {
|
||||
func (d dissecting) Analyze(item *api.OutputChannelItem, resolvedSource string, resolvedDestination string, namespace string) *api.Entry {
|
||||
var host, authority, path string
|
||||
|
||||
request := item.Pair.Request.Payload.(map[string]interface{})
|
||||
@@ -279,6 +280,7 @@ func (d dissecting) Analyze(item *api.OutputChannelItem, resolvedSource string,
|
||||
IP: item.ConnectionInfo.ServerIP,
|
||||
Port: item.ConnectionInfo.ServerPort,
|
||||
},
|
||||
Namespace: namespace,
|
||||
Outgoing: item.ConnectionInfo.IsOutgoing,
|
||||
Request: reqDetails,
|
||||
Response: resDetails,
|
||||
@@ -472,6 +474,10 @@ func (d dissecting) Macros() map[string]string {
|
||||
}
|
||||
}
|
||||
|
||||
func (d dissecting) NewResponseRequestMatcher() api.RequestResponseMatcher {
|
||||
return createResponseRequestMatcher()
|
||||
}
|
||||
|
||||
var Dissector dissecting
|
||||
|
||||
func NewDissector() api.Dissector {
|
||||
|
||||
@@ -11,7 +11,6 @@ import (
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -39,7 +38,6 @@ func TestRegister(t *testing.T) {
|
||||
extension := &api.Extension{}
|
||||
dissector.Register(extension)
|
||||
assert.Equal(t, "http", extension.Protocol.Name)
|
||||
assert.NotNil(t, extension.MatcherMap)
|
||||
}
|
||||
|
||||
func TestMacros(t *testing.T) {
|
||||
@@ -123,7 +121,8 @@ func TestDissect(t *testing.T) {
|
||||
SrcPort: "1",
|
||||
DstPort: "2",
|
||||
}
|
||||
err = dissector.Dissect(bufferClient, true, tcpIDClient, counterPair, &api.SuperTimer{}, superIdentifier, emitter, options)
|
||||
reqResMatcher := dissector.NewResponseRequestMatcher()
|
||||
err = dissector.Dissect(bufferClient, true, tcpIDClient, counterPair, &api.SuperTimer{}, superIdentifier, emitter, options, reqResMatcher)
|
||||
if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF {
|
||||
panic(err)
|
||||
}
|
||||
@@ -141,7 +140,7 @@ func TestDissect(t *testing.T) {
|
||||
SrcPort: "2",
|
||||
DstPort: "1",
|
||||
}
|
||||
err = dissector.Dissect(bufferServer, false, tcpIDServer, counterPair, &api.SuperTimer{}, superIdentifier, emitter, options)
|
||||
err = dissector.Dissect(bufferServer, false, tcpIDServer, counterPair, &api.SuperTimer{}, superIdentifier, emitter, options, reqResMatcher)
|
||||
if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF {
|
||||
panic(err)
|
||||
}
|
||||
@@ -155,14 +154,6 @@ func TestDissect(t *testing.T) {
|
||||
|
||||
stop <- true
|
||||
|
||||
sort.Slice(items, func(i, j int) bool {
|
||||
iMarshaled, err := json.Marshal(items[i])
|
||||
assert.Nil(t, err)
|
||||
jMarshaled, err := json.Marshal(items[j])
|
||||
assert.Nil(t, err)
|
||||
return len(iMarshaled) < len(jMarshaled)
|
||||
})
|
||||
|
||||
marshaled, err := json.Marshal(items)
|
||||
assert.Nil(t, err)
|
||||
|
||||
@@ -214,7 +205,7 @@ func TestAnalyze(t *testing.T) {
|
||||
|
||||
var entries []*api.Entry
|
||||
for _, item := range items {
|
||||
entry := dissector.Analyze(item, "", "")
|
||||
entry := dissector.Analyze(item, "", "", "")
|
||||
entries = append(entries, entry)
|
||||
}
|
||||
|
||||
|
||||
@@ -8,16 +8,20 @@ import (
|
||||
"github.com/up9inc/mizu/tap/api"
|
||||
)
|
||||
|
||||
var reqResMatcher = createResponseRequestMatcher() // global
|
||||
|
||||
// Key is {client_addr}:{client_port}->{dest_addr}:{dest_port}_{incremental_counter}
|
||||
// Key is {client_addr}_{client_port}_{dest_addr}_{dest_port}_{incremental_counter}_{proto_ident}
|
||||
type requestResponseMatcher struct {
|
||||
openMessagesMap *sync.Map
|
||||
}
|
||||
|
||||
func createResponseRequestMatcher() requestResponseMatcher {
|
||||
newMatcher := &requestResponseMatcher{openMessagesMap: &sync.Map{}}
|
||||
return *newMatcher
|
||||
func createResponseRequestMatcher() api.RequestResponseMatcher {
|
||||
return &requestResponseMatcher{openMessagesMap: &sync.Map{}}
|
||||
}
|
||||
|
||||
func (matcher *requestResponseMatcher) GetMap() *sync.Map {
|
||||
return matcher.openMessagesMap
|
||||
}
|
||||
|
||||
func (matcher *requestResponseMatcher) SetMaxTry(value int) {
|
||||
}
|
||||
|
||||
func (matcher *requestResponseMatcher) registerRequest(ident string, request *http.Request, captureTime time.Time, protoMinor int) *api.OutputChannelItem {
|
||||
|
||||
16
tap/extensions/kafka/Makefile
Normal file
16
tap/extensions/kafka/Makefile
Normal file
@@ -0,0 +1,16 @@
|
||||
skipbin := $$(find bin -mindepth 1 -maxdepth 1)
|
||||
skipexpect := $$(find expect -mindepth 1 -maxdepth 1)
|
||||
|
||||
test: test-pull-bin test-pull-expect
|
||||
@MIZU_TEST=1 go test -v ./... -coverpkg=./... -race -coverprofile=coverage.out -covermode=atomic
|
||||
|
||||
test-update: test-pull-bin
|
||||
@MIZU_TEST=1 TEST_UPDATE=1 go test -v ./... -coverpkg=./... -coverprofile=coverage.out -covermode=atomic
|
||||
|
||||
test-pull-bin:
|
||||
@mkdir -p bin
|
||||
@[ "${skipbin}" ] && echo "Skipping downloading BINs" || gsutil -o 'GSUtil:parallel_process_count=5' -o 'GSUtil:parallel_thread_count=5' -m cp gs://static.up9.io/mizu/test-pcap/bin/kafka/\*.bin bin
|
||||
|
||||
test-pull-expect:
|
||||
@mkdir -p expect
|
||||
@[ "${skipexpect}" ] && echo "Skipping downloading expected JSONs" || gsutil -o 'GSUtil:parallel_process_count=5' -o 'GSUtil:parallel_thread_count=5' -m cp -r gs://static.up9.io/mizu/test-pcap/expect/kafka/\* expect
|
||||
@@ -1,584 +0,0 @@
|
||||
package kafka
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
)
|
||||
|
||||
// Bytes is an interface implemented by types that represent immutable
|
||||
// sequences of bytes.
|
||||
//
|
||||
// Bytes values are used to abstract the location where record keys and
|
||||
// values are read from (e.g. in-memory buffers, network sockets, files).
|
||||
//
|
||||
// The Close method should be called to release resources held by the object
|
||||
// when the program is done with it.
|
||||
//
|
||||
// Bytes values are generally not safe to use concurrently from multiple
|
||||
// goroutines.
|
||||
type Bytes interface {
|
||||
io.ReadCloser
|
||||
// Returns the number of bytes remaining to be read from the payload.
|
||||
Len() int
|
||||
}
|
||||
|
||||
// NewBytes constructs a Bytes value from b.
|
||||
//
|
||||
// The returned value references b, it does not make a copy of the backing
|
||||
// array.
|
||||
//
|
||||
// If b is nil, nil is returned to represent a null BYTES value in the kafka
|
||||
// protocol.
|
||||
func NewBytes(b []byte) Bytes {
|
||||
if b == nil {
|
||||
return nil
|
||||
}
|
||||
r := new(bytesReader)
|
||||
r.Reset(b)
|
||||
return r
|
||||
}
|
||||
|
||||
// ReadAll is similar to ioutil.ReadAll, but it takes advantage of knowing the
|
||||
// length of b to minimize the memory footprint.
|
||||
//
|
||||
// The function returns a nil slice if b is nil.
|
||||
// func ReadAll(b Bytes) ([]byte, error) {
|
||||
// if b == nil {
|
||||
// return nil, nil
|
||||
// }
|
||||
// s := make([]byte, b.Len())
|
||||
// _, err := io.ReadFull(b, s)
|
||||
// return s, err
|
||||
// }
|
||||
|
||||
type bytesReader struct{ bytes.Reader }
|
||||
|
||||
func (*bytesReader) Close() error { return nil }
|
||||
|
||||
type refCount uintptr
|
||||
|
||||
func (rc *refCount) ref() { atomic.AddUintptr((*uintptr)(rc), 1) }
|
||||
|
||||
func (rc *refCount) unref(onZero func()) {
|
||||
if atomic.AddUintptr((*uintptr)(rc), ^uintptr(0)) == 0 {
|
||||
onZero()
|
||||
}
|
||||
}
|
||||
|
||||
const (
|
||||
// Size of the memory buffer for a single page. We use a farily
|
||||
// large size here (64 KiB) because batches exchanged with kafka
|
||||
// tend to be multiple kilobytes in size, sometimes hundreds.
|
||||
// Using large pages amortizes the overhead of the page metadata
|
||||
// and algorithms to manage the pages.
|
||||
pageSize = 65536
|
||||
)
|
||||
|
||||
type page struct {
|
||||
refc refCount
|
||||
offset int64
|
||||
length int
|
||||
buffer *[pageSize]byte
|
||||
}
|
||||
|
||||
func newPage(offset int64) *page {
|
||||
p, _ := pagePool.Get().(*page)
|
||||
if p != nil {
|
||||
p.offset = offset
|
||||
p.length = 0
|
||||
p.ref()
|
||||
} else {
|
||||
p = &page{
|
||||
refc: 1,
|
||||
offset: offset,
|
||||
buffer: &[pageSize]byte{},
|
||||
}
|
||||
}
|
||||
return p
|
||||
}
|
||||
|
||||
func (p *page) ref() { p.refc.ref() }
|
||||
|
||||
func (p *page) unref() { p.refc.unref(func() { pagePool.Put(p) }) }
|
||||
|
||||
func (p *page) slice(begin, end int64) []byte {
|
||||
i, j := begin-p.offset, end-p.offset
|
||||
|
||||
if i < 0 {
|
||||
i = 0
|
||||
} else if i > pageSize {
|
||||
i = pageSize
|
||||
}
|
||||
|
||||
if j < 0 {
|
||||
j = 0
|
||||
} else if j > pageSize {
|
||||
j = pageSize
|
||||
}
|
||||
|
||||
if i < j {
|
||||
return p.buffer[i:j]
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *page) Cap() int { return pageSize }
|
||||
|
||||
func (p *page) Len() int { return p.length }
|
||||
|
||||
func (p *page) Size() int64 { return int64(p.length) }
|
||||
|
||||
func (p *page) Truncate(n int) {
|
||||
if n < p.length {
|
||||
p.length = n
|
||||
}
|
||||
}
|
||||
|
||||
func (p *page) ReadAt(b []byte, off int64) (int, error) {
|
||||
if off -= p.offset; off < 0 || off > pageSize {
|
||||
panic("offset out of range")
|
||||
}
|
||||
if off > int64(p.length) {
|
||||
return 0, nil
|
||||
}
|
||||
return copy(b, p.buffer[off:p.length]), nil
|
||||
}
|
||||
|
||||
func (p *page) ReadFrom(r io.Reader) (int64, error) {
|
||||
n, err := io.ReadFull(r, p.buffer[p.length:])
|
||||
if err == io.EOF || err == io.ErrUnexpectedEOF {
|
||||
err = nil
|
||||
}
|
||||
p.length += n
|
||||
return int64(n), err
|
||||
}
|
||||
|
||||
func (p *page) WriteAt(b []byte, off int64) (int, error) {
|
||||
if off -= p.offset; off < 0 || off > pageSize {
|
||||
panic("offset out of range")
|
||||
}
|
||||
n := copy(p.buffer[off:], b)
|
||||
if end := int(off) + n; end > p.length {
|
||||
p.length = end
|
||||
}
|
||||
return n, nil
|
||||
}
|
||||
|
||||
func (p *page) Write(b []byte) (int, error) {
|
||||
return p.WriteAt(b, p.offset+int64(p.length))
|
||||
}
|
||||
|
||||
var (
|
||||
_ io.ReaderAt = (*page)(nil)
|
||||
_ io.ReaderFrom = (*page)(nil)
|
||||
_ io.Writer = (*page)(nil)
|
||||
_ io.WriterAt = (*page)(nil)
|
||||
)
|
||||
|
||||
type pageBuffer struct {
|
||||
refc refCount
|
||||
pages contiguousPages
|
||||
length int
|
||||
cursor int
|
||||
}
|
||||
|
||||
func newPageBuffer() *pageBuffer {
|
||||
b, _ := pageBufferPool.Get().(*pageBuffer)
|
||||
if b != nil {
|
||||
b.cursor = 0
|
||||
b.refc.ref()
|
||||
} else {
|
||||
b = &pageBuffer{
|
||||
refc: 1,
|
||||
pages: make(contiguousPages, 0, 16),
|
||||
}
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
func (pb *pageBuffer) unref() {
|
||||
pb.refc.unref(func() {
|
||||
pb.pages.unref()
|
||||
pb.pages.clear()
|
||||
pb.pages = pb.pages[:0]
|
||||
pb.length = 0
|
||||
pageBufferPool.Put(pb)
|
||||
})
|
||||
}
|
||||
|
||||
func (pb *pageBuffer) newPage() *page {
|
||||
return newPage(int64(pb.length))
|
||||
}
|
||||
|
||||
func (pb *pageBuffer) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (pb *pageBuffer) Len() int {
|
||||
return pb.length - pb.cursor
|
||||
}
|
||||
|
||||
func (pb *pageBuffer) Size() int64 {
|
||||
return int64(pb.length)
|
||||
}
|
||||
|
||||
func (pb *pageBuffer) Discard(n int) (int, error) {
|
||||
remain := pb.length - pb.cursor
|
||||
if remain < n {
|
||||
n = remain
|
||||
}
|
||||
pb.cursor += n
|
||||
return n, nil
|
||||
}
|
||||
|
||||
func (pb *pageBuffer) Truncate(n int) {
|
||||
if n < pb.length {
|
||||
pb.length = n
|
||||
|
||||
if n < pb.cursor {
|
||||
pb.cursor = n
|
||||
}
|
||||
|
||||
for i := range pb.pages {
|
||||
if p := pb.pages[i]; p.length <= n {
|
||||
n -= p.length
|
||||
} else {
|
||||
if n > 0 {
|
||||
pb.pages[i].Truncate(n)
|
||||
i++
|
||||
}
|
||||
pb.pages[i:].unref()
|
||||
pb.pages[i:].clear()
|
||||
pb.pages = pb.pages[:i]
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (pb *pageBuffer) Seek(offset int64, whence int) (int64, error) {
|
||||
c, err := seek(int64(pb.cursor), int64(pb.length), offset, whence)
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
pb.cursor = int(c)
|
||||
return c, nil
|
||||
}
|
||||
|
||||
func (pb *pageBuffer) ReadByte() (byte, error) {
|
||||
b := [1]byte{}
|
||||
_, err := pb.Read(b[:])
|
||||
return b[0], err
|
||||
}
|
||||
|
||||
func (pb *pageBuffer) Read(b []byte) (int, error) {
|
||||
if pb.cursor >= pb.length {
|
||||
return 0, io.EOF
|
||||
}
|
||||
n, err := pb.ReadAt(b, int64(pb.cursor))
|
||||
pb.cursor += n
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (pb *pageBuffer) ReadAt(b []byte, off int64) (int, error) {
|
||||
return pb.pages.ReadAt(b, off)
|
||||
}
|
||||
|
||||
func (pb *pageBuffer) ReadFrom(r io.Reader) (int64, error) {
|
||||
if len(pb.pages) == 0 {
|
||||
pb.pages = append(pb.pages, pb.newPage())
|
||||
}
|
||||
|
||||
rn := int64(0)
|
||||
|
||||
for {
|
||||
tail := pb.pages[len(pb.pages)-1]
|
||||
free := tail.Cap() - tail.Len()
|
||||
|
||||
if free == 0 {
|
||||
tail = pb.newPage()
|
||||
free = pageSize
|
||||
pb.pages = append(pb.pages, tail)
|
||||
}
|
||||
|
||||
n, err := tail.ReadFrom(r)
|
||||
pb.length += int(n)
|
||||
rn += n
|
||||
if n < int64(free) {
|
||||
return rn, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (pb *pageBuffer) WriteString(s string) (int, error) {
|
||||
return pb.Write([]byte(s))
|
||||
}
|
||||
|
||||
func (pb *pageBuffer) Write(b []byte) (int, error) {
|
||||
wn := len(b)
|
||||
if wn == 0 {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
if len(pb.pages) == 0 {
|
||||
pb.pages = append(pb.pages, pb.newPage())
|
||||
}
|
||||
|
||||
for len(b) != 0 {
|
||||
tail := pb.pages[len(pb.pages)-1]
|
||||
free := tail.Cap() - tail.Len()
|
||||
|
||||
if len(b) <= free {
|
||||
_, _ = tail.Write(b)
|
||||
pb.length += len(b)
|
||||
break
|
||||
}
|
||||
|
||||
_, _ = tail.Write(b[:free])
|
||||
b = b[free:]
|
||||
|
||||
pb.length += free
|
||||
pb.pages = append(pb.pages, pb.newPage())
|
||||
}
|
||||
|
||||
return wn, nil
|
||||
}
|
||||
|
||||
func (pb *pageBuffer) WriteAt(b []byte, off int64) (int, error) {
|
||||
n, err := pb.pages.WriteAt(b, off)
|
||||
if err != nil {
|
||||
return n, err
|
||||
}
|
||||
if n < len(b) {
|
||||
_, _ = pb.Write(b[n:])
|
||||
}
|
||||
return len(b), nil
|
||||
}
|
||||
|
||||
func (pb *pageBuffer) WriteTo(w io.Writer) (int64, error) {
|
||||
var wn int
|
||||
var err error
|
||||
pb.pages.scan(int64(pb.cursor), int64(pb.length), func(b []byte) bool {
|
||||
var n int
|
||||
n, err = w.Write(b)
|
||||
wn += n
|
||||
return err == nil
|
||||
})
|
||||
pb.cursor += wn
|
||||
return int64(wn), err
|
||||
}
|
||||
|
||||
var (
|
||||
_ io.ReaderAt = (*pageBuffer)(nil)
|
||||
_ io.ReaderFrom = (*pageBuffer)(nil)
|
||||
_ io.StringWriter = (*pageBuffer)(nil)
|
||||
_ io.Writer = (*pageBuffer)(nil)
|
||||
_ io.WriterAt = (*pageBuffer)(nil)
|
||||
_ io.WriterTo = (*pageBuffer)(nil)
|
||||
|
||||
pagePool sync.Pool
|
||||
pageBufferPool sync.Pool
|
||||
)
|
||||
|
||||
type contiguousPages []*page
|
||||
|
||||
func (pages contiguousPages) unref() {
|
||||
for _, p := range pages {
|
||||
p.unref()
|
||||
}
|
||||
}
|
||||
|
||||
func (pages contiguousPages) clear() {
|
||||
for i := range pages {
|
||||
pages[i] = nil
|
||||
}
|
||||
}
|
||||
|
||||
func (pages contiguousPages) ReadAt(b []byte, off int64) (int, error) {
|
||||
rn := 0
|
||||
|
||||
for _, p := range pages.slice(off, off+int64(len(b))) {
|
||||
n, _ := p.ReadAt(b, off)
|
||||
b = b[n:]
|
||||
rn += n
|
||||
off += int64(n)
|
||||
}
|
||||
|
||||
return rn, nil
|
||||
}
|
||||
|
||||
func (pages contiguousPages) WriteAt(b []byte, off int64) (int, error) {
|
||||
wn := 0
|
||||
|
||||
for _, p := range pages.slice(off, off+int64(len(b))) {
|
||||
n, _ := p.WriteAt(b, off)
|
||||
b = b[n:]
|
||||
wn += n
|
||||
off += int64(n)
|
||||
}
|
||||
|
||||
return wn, nil
|
||||
}
|
||||
|
||||
func (pages contiguousPages) slice(begin, end int64) contiguousPages {
|
||||
i := pages.indexOf(begin)
|
||||
j := pages.indexOf(end)
|
||||
if j < len(pages) {
|
||||
j++
|
||||
}
|
||||
return pages[i:j]
|
||||
}
|
||||
|
||||
func (pages contiguousPages) indexOf(offset int64) int {
|
||||
if len(pages) == 0 {
|
||||
return 0
|
||||
}
|
||||
return int((offset - pages[0].offset) / pageSize)
|
||||
}
|
||||
|
||||
func (pages contiguousPages) scan(begin, end int64, f func([]byte) bool) {
|
||||
for _, p := range pages.slice(begin, end) {
|
||||
if !f(p.slice(begin, end)) {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
_ io.ReaderAt = contiguousPages{}
|
||||
_ io.WriterAt = contiguousPages{}
|
||||
)
|
||||
|
||||
type pageRef struct {
|
||||
pages contiguousPages
|
||||
offset int64
|
||||
cursor int64
|
||||
length uint32
|
||||
once uint32
|
||||
}
|
||||
|
||||
func (ref *pageRef) unref() {
|
||||
if atomic.CompareAndSwapUint32(&ref.once, 0, 1) {
|
||||
ref.pages.unref()
|
||||
ref.pages.clear()
|
||||
ref.pages = nil
|
||||
ref.offset = 0
|
||||
ref.cursor = 0
|
||||
ref.length = 0
|
||||
}
|
||||
}
|
||||
|
||||
func (ref *pageRef) Len() int { return int(ref.Size() - ref.cursor) }
|
||||
|
||||
func (ref *pageRef) Size() int64 { return int64(ref.length) }
|
||||
|
||||
func (ref *pageRef) Close() error { ref.unref(); return nil }
|
||||
|
||||
func (ref *pageRef) String() string {
|
||||
return fmt.Sprintf("[offset=%d cursor=%d length=%d]", ref.offset, ref.cursor, ref.length)
|
||||
}
|
||||
|
||||
func (ref *pageRef) Seek(offset int64, whence int) (int64, error) {
|
||||
c, err := seek(ref.cursor, int64(ref.length), offset, whence)
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
ref.cursor = c
|
||||
return c, nil
|
||||
}
|
||||
|
||||
func (ref *pageRef) ReadByte() (byte, error) {
|
||||
var c byte
|
||||
var ok bool
|
||||
ref.scan(ref.cursor, func(b []byte) bool {
|
||||
c, ok = b[0], true
|
||||
return false
|
||||
})
|
||||
if ok {
|
||||
ref.cursor++
|
||||
} else {
|
||||
return 0, io.EOF
|
||||
}
|
||||
return c, nil
|
||||
}
|
||||
|
||||
func (ref *pageRef) Read(b []byte) (int, error) {
|
||||
if ref.cursor >= int64(ref.length) {
|
||||
return 0, io.EOF
|
||||
}
|
||||
n, err := ref.ReadAt(b, ref.cursor)
|
||||
ref.cursor += int64(n)
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (ref *pageRef) ReadAt(b []byte, off int64) (int, error) {
|
||||
limit := ref.offset + int64(ref.length)
|
||||
off += ref.offset
|
||||
|
||||
if off >= limit {
|
||||
return 0, io.EOF
|
||||
}
|
||||
|
||||
if off+int64(len(b)) > limit {
|
||||
b = b[:limit-off]
|
||||
}
|
||||
|
||||
if len(b) == 0 {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
n, err := ref.pages.ReadAt(b, off)
|
||||
if n == 0 && err == nil {
|
||||
err = io.EOF
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (ref *pageRef) WriteTo(w io.Writer) (wn int64, err error) {
|
||||
ref.scan(ref.cursor, func(b []byte) bool {
|
||||
var n int
|
||||
n, err = w.Write(b)
|
||||
wn += int64(n)
|
||||
return err == nil
|
||||
})
|
||||
ref.cursor += wn
|
||||
return
|
||||
}
|
||||
|
||||
func (ref *pageRef) scan(off int64, f func([]byte) bool) {
|
||||
begin := ref.offset + off
|
||||
end := ref.offset + int64(ref.length)
|
||||
ref.pages.scan(begin, end, f)
|
||||
}
|
||||
|
||||
var (
|
||||
_ io.Closer = (*pageRef)(nil)
|
||||
_ io.Seeker = (*pageRef)(nil)
|
||||
_ io.Reader = (*pageRef)(nil)
|
||||
_ io.ReaderAt = (*pageRef)(nil)
|
||||
_ io.WriterTo = (*pageRef)(nil)
|
||||
)
|
||||
|
||||
func seek(cursor, limit, offset int64, whence int) (int64, error) {
|
||||
switch whence {
|
||||
case io.SeekStart:
|
||||
// absolute offset
|
||||
case io.SeekCurrent:
|
||||
offset = cursor + offset
|
||||
case io.SeekEnd:
|
||||
offset = limit - offset
|
||||
default:
|
||||
return -1, fmt.Errorf("seek: invalid whence value: %d", whence)
|
||||
}
|
||||
if offset < 0 {
|
||||
offset = 0
|
||||
}
|
||||
if offset > limit {
|
||||
offset = limit
|
||||
}
|
||||
return offset, nil
|
||||
}
|
||||
@@ -1,143 +0,0 @@
|
||||
package kafka
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sort"
|
||||
"strings"
|
||||
"text/tabwriter"
|
||||
)
|
||||
|
||||
type Cluster struct {
|
||||
ClusterID string
|
||||
Controller int32
|
||||
Brokers map[int32]Broker
|
||||
Topics map[string]Topic
|
||||
}
|
||||
|
||||
func (c Cluster) BrokerIDs() []int32 {
|
||||
brokerIDs := make([]int32, 0, len(c.Brokers))
|
||||
for id := range c.Brokers {
|
||||
brokerIDs = append(brokerIDs, id)
|
||||
}
|
||||
sort.Slice(brokerIDs, func(i, j int) bool {
|
||||
return brokerIDs[i] < brokerIDs[j]
|
||||
})
|
||||
return brokerIDs
|
||||
}
|
||||
|
||||
func (c Cluster) TopicNames() []string {
|
||||
topicNames := make([]string, 0, len(c.Topics))
|
||||
for name := range c.Topics {
|
||||
topicNames = append(topicNames, name)
|
||||
}
|
||||
sort.Strings(topicNames)
|
||||
return topicNames
|
||||
}
|
||||
|
||||
func (c Cluster) IsZero() bool {
|
||||
return c.ClusterID == "" && c.Controller == 0 && len(c.Brokers) == 0 && len(c.Topics) == 0
|
||||
}
|
||||
|
||||
func (c Cluster) Format(w fmt.State, _ rune) {
|
||||
tw := new(tabwriter.Writer)
|
||||
fmt.Fprintf(w, "CLUSTER: %q\n\n", c.ClusterID)
|
||||
|
||||
tw.Init(w, 0, 8, 2, ' ', 0)
|
||||
fmt.Fprint(tw, " BROKER\tHOST\tPORT\tRACK\tCONTROLLER\n")
|
||||
|
||||
for _, id := range c.BrokerIDs() {
|
||||
broker := c.Brokers[id]
|
||||
fmt.Fprintf(tw, " %d\t%s\t%d\t%s\t%t\n", broker.ID, broker.Host, broker.Port, broker.Rack, broker.ID == c.Controller)
|
||||
}
|
||||
|
||||
tw.Flush()
|
||||
fmt.Fprintln(w)
|
||||
|
||||
tw.Init(w, 0, 8, 2, ' ', 0)
|
||||
fmt.Fprint(tw, " TOPIC\tPARTITIONS\tBROKERS\n")
|
||||
topicNames := c.TopicNames()
|
||||
brokers := make(map[int32]struct{}, len(c.Brokers))
|
||||
brokerIDs := make([]int32, 0, len(c.Brokers))
|
||||
|
||||
for _, name := range topicNames {
|
||||
topic := c.Topics[name]
|
||||
|
||||
for _, p := range topic.Partitions {
|
||||
for _, id := range p.Replicas {
|
||||
brokers[id] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
for id := range brokers {
|
||||
brokerIDs = append(brokerIDs, id)
|
||||
}
|
||||
|
||||
fmt.Fprintf(tw, " %s\t%d\t%s\n", topic.Name, len(topic.Partitions), formatBrokerIDs(brokerIDs, -1))
|
||||
|
||||
for id := range brokers {
|
||||
delete(brokers, id)
|
||||
}
|
||||
|
||||
brokerIDs = brokerIDs[:0]
|
||||
}
|
||||
|
||||
tw.Flush()
|
||||
fmt.Fprintln(w)
|
||||
|
||||
if w.Flag('+') {
|
||||
for _, name := range topicNames {
|
||||
fmt.Fprintf(w, " TOPIC: %q\n\n", name)
|
||||
|
||||
tw.Init(w, 0, 8, 2, ' ', 0)
|
||||
fmt.Fprint(tw, " PARTITION\tREPLICAS\tISR\tOFFLINE\n")
|
||||
|
||||
for _, p := range c.Topics[name].Partitions {
|
||||
fmt.Fprintf(tw, " %d\t%s\t%s\t%s\n", p.ID,
|
||||
formatBrokerIDs(p.Replicas, -1),
|
||||
formatBrokerIDs(p.ISR, p.Leader),
|
||||
formatBrokerIDs(p.Offline, -1),
|
||||
)
|
||||
}
|
||||
|
||||
tw.Flush()
|
||||
fmt.Fprintln(w)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func formatBrokerIDs(brokerIDs []int32, leader int32) string {
|
||||
if len(brokerIDs) == 0 {
|
||||
return ""
|
||||
}
|
||||
|
||||
if len(brokerIDs) == 1 {
|
||||
return itoa(brokerIDs[0])
|
||||
}
|
||||
|
||||
sort.Slice(brokerIDs, func(i, j int) bool {
|
||||
id1 := brokerIDs[i]
|
||||
id2 := brokerIDs[j]
|
||||
|
||||
if id1 == leader {
|
||||
return true
|
||||
}
|
||||
|
||||
if id2 == leader {
|
||||
return false
|
||||
}
|
||||
|
||||
return id1 < id2
|
||||
})
|
||||
|
||||
brokerNames := make([]string, len(brokerIDs))
|
||||
|
||||
for i, id := range brokerIDs {
|
||||
brokerNames[i] = itoa(id)
|
||||
}
|
||||
|
||||
return strings.Join(brokerNames, ",")
|
||||
}
|
||||
|
||||
var (
|
||||
_ fmt.Formatter = Cluster{}
|
||||
)
|
||||
@@ -1,7 +1,6 @@
|
||||
package kafka
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"hash/crc32"
|
||||
@@ -9,8 +8,6 @@ import (
|
||||
"io/ioutil"
|
||||
"reflect"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
)
|
||||
|
||||
type discarder interface {
|
||||
@@ -26,15 +23,6 @@ type decoder struct {
|
||||
crc32 uint32
|
||||
}
|
||||
|
||||
func (d *decoder) Reset(r io.Reader, n int) {
|
||||
d.reader = r
|
||||
d.remain = n
|
||||
d.buffer = [8]byte{}
|
||||
d.err = nil
|
||||
d.table = nil
|
||||
d.crc32 = 0
|
||||
}
|
||||
|
||||
func (d *decoder) Read(b []byte) (int, error) {
|
||||
if d.err != nil {
|
||||
return 0, d.err
|
||||
@@ -483,52 +471,3 @@ func decodeReadInt32(b []byte) int32 {
|
||||
func decodeReadInt64(b []byte) int64 {
|
||||
return int64(binary.BigEndian.Uint64(b))
|
||||
}
|
||||
|
||||
func Unmarshal(data []byte, version int16, value interface{}) error {
|
||||
typ := elemTypeOf(value)
|
||||
cache, _ := unmarshalers.Load().(map[versionedType]decodeFunc)
|
||||
key := versionedType{typ: typ, version: version}
|
||||
decode := cache[key]
|
||||
|
||||
if decode == nil {
|
||||
decode = decodeFuncOf(reflect.TypeOf(value).Elem(), version, false, structTag{
|
||||
MinVersion: -1,
|
||||
MaxVersion: -1,
|
||||
TagID: -2,
|
||||
Compact: true,
|
||||
Nullable: true,
|
||||
})
|
||||
|
||||
newCache := make(map[versionedType]decodeFunc, len(cache)+1)
|
||||
newCache[key] = decode
|
||||
|
||||
for typ, fun := range cache {
|
||||
newCache[typ] = fun
|
||||
}
|
||||
|
||||
unmarshalers.Store(newCache)
|
||||
}
|
||||
|
||||
d, _ := decoders.Get().(*decoder)
|
||||
if d == nil {
|
||||
d = &decoder{reader: bytes.NewReader(nil)}
|
||||
}
|
||||
|
||||
d.remain = len(data)
|
||||
r, _ := d.reader.(*bytes.Reader)
|
||||
r.Reset(data)
|
||||
|
||||
defer func() {
|
||||
r.Reset(nil)
|
||||
d.Reset(r, 0)
|
||||
decoders.Put(d)
|
||||
}()
|
||||
|
||||
decode(d, valueOf(value))
|
||||
return dontExpectEOF(d.err)
|
||||
}
|
||||
|
||||
var (
|
||||
decoders sync.Pool // *decoder
|
||||
unmarshalers atomic.Value // map[versionedType]decodeFunc
|
||||
)
|
||||
|
||||
@@ -1,16 +0,0 @@
|
||||
package kafka
|
||||
|
||||
import "bufio"
|
||||
|
||||
func discardN(r *bufio.Reader, sz int, n int) (int, error) {
|
||||
var err error
|
||||
if n <= sz {
|
||||
n, err = r.Discard(n)
|
||||
} else {
|
||||
n, err = r.Discard(sz)
|
||||
if err == nil {
|
||||
err = errShortRead
|
||||
}
|
||||
}
|
||||
return sz - n, err
|
||||
}
|
||||
@@ -1,537 +0,0 @@
|
||||
package kafka
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"hash/crc32"
|
||||
"io"
|
||||
"reflect"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
)
|
||||
|
||||
type encoder struct {
|
||||
writer io.Writer
|
||||
err error
|
||||
table *crc32.Table
|
||||
crc32 uint32
|
||||
buffer [32]byte
|
||||
}
|
||||
|
||||
type encoderChecksum struct {
|
||||
reader io.Reader
|
||||
encoder *encoder
|
||||
}
|
||||
|
||||
func (e *encoderChecksum) Read(b []byte) (int, error) {
|
||||
n, err := e.reader.Read(b)
|
||||
if n > 0 {
|
||||
e.encoder.update(b[:n])
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (e *encoder) Reset(w io.Writer) {
|
||||
e.writer = w
|
||||
e.err = nil
|
||||
e.table = nil
|
||||
e.crc32 = 0
|
||||
e.buffer = [32]byte{}
|
||||
}
|
||||
|
||||
func (e *encoder) ReadFrom(r io.Reader) (int64, error) {
|
||||
if e.table != nil {
|
||||
r = &encoderChecksum{
|
||||
reader: r,
|
||||
encoder: e,
|
||||
}
|
||||
}
|
||||
return io.Copy(e.writer, r)
|
||||
}
|
||||
|
||||
func (e *encoder) Write(b []byte) (int, error) {
|
||||
if e.err != nil {
|
||||
return 0, e.err
|
||||
}
|
||||
n, err := e.writer.Write(b)
|
||||
if n > 0 {
|
||||
e.update(b[:n])
|
||||
}
|
||||
if err != nil {
|
||||
e.err = err
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (e *encoder) WriteByte(b byte) error {
|
||||
e.buffer[0] = b
|
||||
_, err := e.Write(e.buffer[:1])
|
||||
return err
|
||||
}
|
||||
|
||||
func (e *encoder) WriteString(s string) (int, error) {
|
||||
// This implementation is an optimization to avoid the heap allocation that
|
||||
// would occur when converting the string to a []byte to call crc32.Update.
|
||||
//
|
||||
// Strings are rarely long in the kafka protocol, so the use of a 32 byte
|
||||
// buffer is a good comprise between keeping the encoder value small and
|
||||
// limiting the number of calls to Write.
|
||||
//
|
||||
// We introduced this optimization because memory profiles on the benchmarks
|
||||
// showed that most heap allocations were caused by this code path.
|
||||
n := 0
|
||||
|
||||
for len(s) != 0 {
|
||||
c := copy(e.buffer[:], s)
|
||||
w, err := e.Write(e.buffer[:c])
|
||||
n += w
|
||||
if err != nil {
|
||||
return n, err
|
||||
}
|
||||
s = s[c:]
|
||||
}
|
||||
|
||||
return n, nil
|
||||
}
|
||||
|
||||
func (e *encoder) update(b []byte) {
|
||||
if e.table != nil {
|
||||
e.crc32 = crc32.Update(e.crc32, e.table, b)
|
||||
}
|
||||
}
|
||||
|
||||
func (e *encoder) encodeBool(v value) {
|
||||
b := int8(0)
|
||||
if v.bool() {
|
||||
b = 1
|
||||
}
|
||||
e.writeInt8(b)
|
||||
}
|
||||
|
||||
func (e *encoder) encodeInt8(v value) {
|
||||
e.writeInt8(v.int8())
|
||||
}
|
||||
|
||||
func (e *encoder) encodeInt16(v value) {
|
||||
e.writeInt16(v.int16())
|
||||
}
|
||||
|
||||
func (e *encoder) encodeInt32(v value) {
|
||||
e.writeInt32(v.int32())
|
||||
}
|
||||
|
||||
func (e *encoder) encodeInt64(v value) {
|
||||
e.writeInt64(v.int64())
|
||||
}
|
||||
|
||||
func (e *encoder) encodeString(v value) {
|
||||
e.writeString(v.string())
|
||||
}
|
||||
|
||||
func (e *encoder) encodeCompactString(v value) {
|
||||
e.writeCompactString(v.string())
|
||||
}
|
||||
|
||||
func (e *encoder) encodeNullString(v value) {
|
||||
e.writeNullString(v.string())
|
||||
}
|
||||
|
||||
func (e *encoder) encodeCompactNullString(v value) {
|
||||
e.writeCompactNullString(v.string())
|
||||
}
|
||||
|
||||
func (e *encoder) encodeBytes(v value) {
|
||||
e.writeBytes(v.bytes())
|
||||
}
|
||||
|
||||
func (e *encoder) encodeCompactBytes(v value) {
|
||||
e.writeCompactBytes(v.bytes())
|
||||
}
|
||||
|
||||
func (e *encoder) encodeNullBytes(v value) {
|
||||
e.writeNullBytes(v.bytes())
|
||||
}
|
||||
|
||||
func (e *encoder) encodeCompactNullBytes(v value) {
|
||||
e.writeCompactNullBytes(v.bytes())
|
||||
}
|
||||
|
||||
func (e *encoder) encodeArray(v value, elemType reflect.Type, encodeElem encodeFunc) {
|
||||
a := v.array(elemType)
|
||||
n := a.length()
|
||||
e.writeInt32(int32(n))
|
||||
|
||||
for i := 0; i < n; i++ {
|
||||
encodeElem(e, a.index(i))
|
||||
}
|
||||
}
|
||||
|
||||
func (e *encoder) encodeCompactArray(v value, elemType reflect.Type, encodeElem encodeFunc) {
|
||||
a := v.array(elemType)
|
||||
n := a.length()
|
||||
e.writeUnsignedVarInt(uint64(n + 1))
|
||||
|
||||
for i := 0; i < n; i++ {
|
||||
encodeElem(e, a.index(i))
|
||||
}
|
||||
}
|
||||
|
||||
func (e *encoder) encodeNullArray(v value, elemType reflect.Type, encodeElem encodeFunc) {
|
||||
a := v.array(elemType)
|
||||
if a.isNil() {
|
||||
e.writeInt32(-1)
|
||||
return
|
||||
}
|
||||
|
||||
n := a.length()
|
||||
e.writeInt32(int32(n))
|
||||
|
||||
for i := 0; i < n; i++ {
|
||||
encodeElem(e, a.index(i))
|
||||
}
|
||||
}
|
||||
|
||||
func (e *encoder) encodeCompactNullArray(v value, elemType reflect.Type, encodeElem encodeFunc) {
|
||||
a := v.array(elemType)
|
||||
if a.isNil() {
|
||||
e.writeUnsignedVarInt(0)
|
||||
return
|
||||
}
|
||||
|
||||
n := a.length()
|
||||
e.writeUnsignedVarInt(uint64(n + 1))
|
||||
for i := 0; i < n; i++ {
|
||||
encodeElem(e, a.index(i))
|
||||
}
|
||||
}
|
||||
|
||||
func (e *encoder) writeInt8(i int8) {
|
||||
writeInt8(e.buffer[:1], i)
|
||||
_, _ = e.Write(e.buffer[:1])
|
||||
}
|
||||
|
||||
func (e *encoder) writeInt16(i int16) {
|
||||
writeInt16(e.buffer[:2], i)
|
||||
_, _ = e.Write(e.buffer[:2])
|
||||
}
|
||||
|
||||
func (e *encoder) writeInt32(i int32) {
|
||||
writeInt32(e.buffer[:4], i)
|
||||
_, _ = e.Write(e.buffer[:4])
|
||||
}
|
||||
|
||||
func (e *encoder) writeInt64(i int64) {
|
||||
writeInt64(e.buffer[:8], i)
|
||||
_, _ = e.Write(e.buffer[:8])
|
||||
}
|
||||
|
||||
func (e *encoder) writeString(s string) {
|
||||
e.writeInt16(int16(len(s)))
|
||||
_, _ = e.WriteString(s)
|
||||
}
|
||||
|
||||
func (e *encoder) writeCompactString(s string) {
|
||||
e.writeUnsignedVarInt(uint64(len(s)) + 1)
|
||||
_, _ = e.WriteString(s)
|
||||
}
|
||||
|
||||
func (e *encoder) writeNullString(s string) {
|
||||
if s == "" {
|
||||
e.writeInt16(-1)
|
||||
} else {
|
||||
e.writeInt16(int16(len(s)))
|
||||
_, _ = e.WriteString(s)
|
||||
}
|
||||
}
|
||||
|
||||
func (e *encoder) writeCompactNullString(s string) {
|
||||
if s == "" {
|
||||
e.writeUnsignedVarInt(0)
|
||||
} else {
|
||||
e.writeUnsignedVarInt(uint64(len(s)) + 1)
|
||||
_, _ = e.WriteString(s)
|
||||
}
|
||||
}
|
||||
|
||||
func (e *encoder) writeBytes(b []byte) {
|
||||
e.writeInt32(int32(len(b)))
|
||||
_, _ = e.Write(b)
|
||||
}
|
||||
|
||||
func (e *encoder) writeCompactBytes(b []byte) {
|
||||
e.writeUnsignedVarInt(uint64(len(b)) + 1)
|
||||
_, _ = e.Write(b)
|
||||
}
|
||||
|
||||
func (e *encoder) writeNullBytes(b []byte) {
|
||||
if b == nil {
|
||||
e.writeInt32(-1)
|
||||
} else {
|
||||
e.writeInt32(int32(len(b)))
|
||||
_, _ = e.Write(b)
|
||||
}
|
||||
}
|
||||
|
||||
func (e *encoder) writeCompactNullBytes(b []byte) {
|
||||
if b == nil {
|
||||
e.writeUnsignedVarInt(0)
|
||||
} else {
|
||||
e.writeUnsignedVarInt(uint64(len(b)) + 1)
|
||||
_, _ = e.Write(b)
|
||||
}
|
||||
}
|
||||
|
||||
func (e *encoder) writeUnsignedVarInt(i uint64) {
|
||||
b := e.buffer[:]
|
||||
n := 0
|
||||
|
||||
for i >= 0x80 && n < len(b) {
|
||||
b[n] = byte(i) | 0x80
|
||||
i >>= 7
|
||||
n++
|
||||
}
|
||||
|
||||
if n < len(b) {
|
||||
b[n] = byte(i)
|
||||
n++
|
||||
}
|
||||
|
||||
_, _ = e.Write(b[:n])
|
||||
}
|
||||
|
||||
type encodeFunc func(*encoder, value)
|
||||
|
||||
var (
|
||||
_ io.ReaderFrom = (*encoder)(nil)
|
||||
_ io.Writer = (*encoder)(nil)
|
||||
_ io.ByteWriter = (*encoder)(nil)
|
||||
_ io.StringWriter = (*encoder)(nil)
|
||||
|
||||
writerTo = reflect.TypeOf((*io.WriterTo)(nil)).Elem()
|
||||
)
|
||||
|
||||
func encodeFuncOf(typ reflect.Type, version int16, flexible bool, tag structTag) encodeFunc {
|
||||
if reflect.PtrTo(typ).Implements(writerTo) {
|
||||
return writerEncodeFuncOf(typ)
|
||||
}
|
||||
switch typ.Kind() {
|
||||
case reflect.Bool:
|
||||
return (*encoder).encodeBool
|
||||
case reflect.Int8:
|
||||
return (*encoder).encodeInt8
|
||||
case reflect.Int16:
|
||||
return (*encoder).encodeInt16
|
||||
case reflect.Int32:
|
||||
return (*encoder).encodeInt32
|
||||
case reflect.Int64:
|
||||
return (*encoder).encodeInt64
|
||||
case reflect.String:
|
||||
return stringEncodeFuncOf(flexible, tag)
|
||||
case reflect.Struct:
|
||||
return structEncodeFuncOf(typ, version, flexible)
|
||||
case reflect.Slice:
|
||||
if typ.Elem().Kind() == reflect.Uint8 { // []byte
|
||||
return bytesEncodeFuncOf(flexible, tag)
|
||||
}
|
||||
return arrayEncodeFuncOf(typ, version, flexible, tag)
|
||||
default:
|
||||
panic("unsupported type: " + typ.String())
|
||||
}
|
||||
}
|
||||
|
||||
func stringEncodeFuncOf(flexible bool, tag structTag) encodeFunc {
|
||||
switch {
|
||||
case flexible && tag.Nullable:
|
||||
// In flexible messages, all strings are compact
|
||||
return (*encoder).encodeCompactNullString
|
||||
case flexible:
|
||||
// In flexible messages, all strings are compact
|
||||
return (*encoder).encodeCompactString
|
||||
case tag.Nullable:
|
||||
return (*encoder).encodeNullString
|
||||
default:
|
||||
return (*encoder).encodeString
|
||||
}
|
||||
}
|
||||
|
||||
func bytesEncodeFuncOf(flexible bool, tag structTag) encodeFunc {
|
||||
switch {
|
||||
case flexible && tag.Nullable:
|
||||
// In flexible messages, all arrays are compact
|
||||
return (*encoder).encodeCompactNullBytes
|
||||
case flexible:
|
||||
// In flexible messages, all arrays are compact
|
||||
return (*encoder).encodeCompactBytes
|
||||
case tag.Nullable:
|
||||
return (*encoder).encodeNullBytes
|
||||
default:
|
||||
return (*encoder).encodeBytes
|
||||
}
|
||||
}
|
||||
|
||||
func structEncodeFuncOf(typ reflect.Type, version int16, flexible bool) encodeFunc {
|
||||
type field struct {
|
||||
encode encodeFunc
|
||||
index index
|
||||
tagID int
|
||||
}
|
||||
|
||||
var fields []field
|
||||
var taggedFields []field
|
||||
|
||||
forEachStructField(typ, func(typ reflect.Type, index index, tag string) {
|
||||
if typ.Size() != 0 { // skip struct{}
|
||||
forEachStructTag(tag, func(tag structTag) bool {
|
||||
if tag.MinVersion <= version && version <= tag.MaxVersion {
|
||||
f := field{
|
||||
encode: encodeFuncOf(typ, version, flexible, tag),
|
||||
index: index,
|
||||
tagID: tag.TagID,
|
||||
}
|
||||
|
||||
if tag.TagID < -1 {
|
||||
// Normal required field
|
||||
fields = append(fields, f)
|
||||
} else {
|
||||
// Optional tagged field (flexible messages only)
|
||||
taggedFields = append(taggedFields, f)
|
||||
}
|
||||
return false
|
||||
}
|
||||
return true
|
||||
})
|
||||
}
|
||||
})
|
||||
|
||||
return func(e *encoder, v value) {
|
||||
for i := range fields {
|
||||
f := &fields[i]
|
||||
f.encode(e, v.fieldByIndex(f.index))
|
||||
}
|
||||
|
||||
if flexible {
|
||||
// See https://cwiki.apache.org/confluence/display/KAFKA/KIP-482%3A+The+Kafka+Protocol+should+Support+Optional+Tagged+Fields
|
||||
// for details of tag buffers in "flexible" messages.
|
||||
e.writeUnsignedVarInt(uint64(len(taggedFields)))
|
||||
|
||||
for i := range taggedFields {
|
||||
f := &taggedFields[i]
|
||||
e.writeUnsignedVarInt(uint64(f.tagID))
|
||||
|
||||
buf := &bytes.Buffer{}
|
||||
se := &encoder{writer: buf}
|
||||
f.encode(se, v.fieldByIndex(f.index))
|
||||
e.writeUnsignedVarInt(uint64(buf.Len()))
|
||||
_, _ = e.Write(buf.Bytes())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func arrayEncodeFuncOf(typ reflect.Type, version int16, flexible bool, tag structTag) encodeFunc {
|
||||
elemType := typ.Elem()
|
||||
elemFunc := encodeFuncOf(elemType, version, flexible, tag)
|
||||
switch {
|
||||
case flexible && tag.Nullable:
|
||||
// In flexible messages, all arrays are compact
|
||||
return func(e *encoder, v value) { e.encodeCompactNullArray(v, elemType, elemFunc) }
|
||||
case flexible:
|
||||
// In flexible messages, all arrays are compact
|
||||
return func(e *encoder, v value) { e.encodeCompactArray(v, elemType, elemFunc) }
|
||||
case tag.Nullable:
|
||||
return func(e *encoder, v value) { e.encodeNullArray(v, elemType, elemFunc) }
|
||||
default:
|
||||
return func(e *encoder, v value) { e.encodeArray(v, elemType, elemFunc) }
|
||||
}
|
||||
}
|
||||
|
||||
func writerEncodeFuncOf(typ reflect.Type) encodeFunc {
|
||||
typ = reflect.PtrTo(typ)
|
||||
return func(e *encoder, v value) {
|
||||
// Optimization to write directly into the buffer when the encoder
|
||||
// does no need to compute a crc32 checksum.
|
||||
w := io.Writer(e)
|
||||
if e.table == nil {
|
||||
w = e.writer
|
||||
}
|
||||
_, err := v.iface(typ).(io.WriterTo).WriteTo(w)
|
||||
if err != nil {
|
||||
e.err = err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func writeInt8(b []byte, i int8) {
|
||||
b[0] = byte(i)
|
||||
}
|
||||
|
||||
func writeInt16(b []byte, i int16) {
|
||||
binary.BigEndian.PutUint16(b, uint16(i))
|
||||
}
|
||||
|
||||
func writeInt32(b []byte, i int32) {
|
||||
binary.BigEndian.PutUint32(b, uint32(i))
|
||||
}
|
||||
|
||||
func writeInt64(b []byte, i int64) {
|
||||
binary.BigEndian.PutUint64(b, uint64(i))
|
||||
}
|
||||
|
||||
func Marshal(version int16, value interface{}) ([]byte, error) {
|
||||
typ := typeOf(value)
|
||||
cache, _ := marshalers.Load().(map[versionedType]encodeFunc)
|
||||
key := versionedType{typ: typ, version: version}
|
||||
encode := cache[key]
|
||||
|
||||
if encode == nil {
|
||||
encode = encodeFuncOf(reflect.TypeOf(value), version, false, structTag{
|
||||
MinVersion: -1,
|
||||
MaxVersion: -1,
|
||||
TagID: -2,
|
||||
Compact: true,
|
||||
Nullable: true,
|
||||
})
|
||||
|
||||
newCache := make(map[versionedType]encodeFunc, len(cache)+1)
|
||||
newCache[key] = encode
|
||||
|
||||
for typ, fun := range cache {
|
||||
newCache[typ] = fun
|
||||
}
|
||||
|
||||
marshalers.Store(newCache)
|
||||
}
|
||||
|
||||
e, _ := encoders.Get().(*encoder)
|
||||
if e == nil {
|
||||
e = &encoder{writer: new(bytes.Buffer)}
|
||||
}
|
||||
|
||||
b, _ := e.writer.(*bytes.Buffer)
|
||||
defer func() {
|
||||
b.Reset()
|
||||
e.Reset(b)
|
||||
encoders.Put(e)
|
||||
}()
|
||||
|
||||
encode(e, nonAddressableValueOf(value))
|
||||
|
||||
if e.err != nil {
|
||||
return nil, e.err
|
||||
}
|
||||
|
||||
buf := b.Bytes()
|
||||
out := make([]byte, len(buf))
|
||||
copy(out, buf)
|
||||
return out, nil
|
||||
}
|
||||
|
||||
type versionedType struct {
|
||||
typ _type
|
||||
version int16
|
||||
}
|
||||
|
||||
var (
|
||||
encoders sync.Pool // *encoder
|
||||
marshalers atomic.Value // map[versionedType]encodeFunc
|
||||
)
|
||||
@@ -1,91 +0,0 @@
|
||||
package kafka
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// Error represents client-side protocol errors.
|
||||
type Error string
|
||||
|
||||
func (e Error) Error() string { return string(e) }
|
||||
|
||||
func Errorf(msg string, args ...interface{}) Error {
|
||||
return Error(fmt.Sprintf(msg, args...))
|
||||
}
|
||||
|
||||
const (
|
||||
// ErrNoTopic is returned when a request needs to be sent to a specific
|
||||
ErrNoTopic Error = "topic not found"
|
||||
|
||||
// ErrNoPartition is returned when a request needs to be sent to a specific
|
||||
// partition, but the client did not find it in the cluster metadata.
|
||||
ErrNoPartition Error = "topic partition not found"
|
||||
|
||||
// ErrNoLeader is returned when a request needs to be sent to a partition
|
||||
// leader, but the client could not determine what the leader was at this
|
||||
// time.
|
||||
ErrNoLeader Error = "topic partition has no leader"
|
||||
|
||||
// ErrNoRecord is returned when attempting to write a message containing an
|
||||
// empty record set (which kafka forbids).
|
||||
//
|
||||
// We handle this case client-side because kafka will close the connection
|
||||
// that it received an empty produce request on, causing all concurrent
|
||||
// requests to be aborted.
|
||||
ErrNoRecord Error = "record set contains no records"
|
||||
|
||||
// ErrNoReset is returned by ResetRecordReader when the record reader does
|
||||
// not support being reset.
|
||||
ErrNoReset Error = "record sequence does not support reset"
|
||||
)
|
||||
|
||||
type TopicError struct {
|
||||
Topic string
|
||||
Err error
|
||||
}
|
||||
|
||||
func NewTopicError(topic string, err error) *TopicError {
|
||||
return &TopicError{Topic: topic, Err: err}
|
||||
}
|
||||
|
||||
func NewErrNoTopic(topic string) *TopicError {
|
||||
return NewTopicError(topic, ErrNoTopic)
|
||||
}
|
||||
|
||||
func (e *TopicError) Error() string {
|
||||
return fmt.Sprintf("%v (topic=%q)", e.Err, e.Topic)
|
||||
}
|
||||
|
||||
func (e *TopicError) Unwrap() error {
|
||||
return e.Err
|
||||
}
|
||||
|
||||
type TopicPartitionError struct {
|
||||
Topic string
|
||||
Partition int32
|
||||
Err error
|
||||
}
|
||||
|
||||
func NewTopicPartitionError(topic string, partition int32, err error) *TopicPartitionError {
|
||||
return &TopicPartitionError{
|
||||
Topic: topic,
|
||||
Partition: partition,
|
||||
Err: err,
|
||||
}
|
||||
}
|
||||
|
||||
func NewErrNoPartition(topic string, partition int32) *TopicPartitionError {
|
||||
return NewTopicPartitionError(topic, partition, ErrNoPartition)
|
||||
}
|
||||
|
||||
func NewErrNoLeader(topic string, partition int32) *TopicPartitionError {
|
||||
return NewTopicPartitionError(topic, partition, ErrNoLeader)
|
||||
}
|
||||
|
||||
func (e *TopicPartitionError) Error() string {
|
||||
return fmt.Sprintf("%v (topic=%q partition=%d)", e.Err, e.Topic, e.Partition)
|
||||
}
|
||||
|
||||
func (e *TopicPartitionError) Unwrap() error {
|
||||
return e.Err
|
||||
}
|
||||
@@ -6,14 +6,18 @@ require (
|
||||
github.com/fatih/camelcase v1.0.0
|
||||
github.com/ohler55/ojg v1.12.12
|
||||
github.com/segmentio/kafka-go v0.4.27
|
||||
github.com/stretchr/testify v1.6.1
|
||||
github.com/up9inc/mizu/tap/api v0.0.0
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/davecgh/go-spew v1.1.0 // indirect
|
||||
github.com/golang/snappy v0.0.4 // indirect
|
||||
github.com/google/martian v2.1.0+incompatible // indirect
|
||||
github.com/klauspost/compress v1.14.2 // indirect
|
||||
github.com/pierrec/lz4 v2.6.1+incompatible // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c // indirect
|
||||
)
|
||||
|
||||
replace github.com/up9inc/mizu/tap/api v0.0.0 => ../../api
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21 h1:YEetp8/yCZMuEPMUDHG0CW/brkkEp8mzqk2+ODEitlw=
|
||||
github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU=
|
||||
@@ -25,10 +26,12 @@ github.com/ohler55/ojg v1.12.12/go.mod h1:LBbIVRAgoFbYBXQhRhuEpaJIqq+goSO63/FQ+n
|
||||
github.com/pierrec/lz4 v2.6.0+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
|
||||
github.com/pierrec/lz4 v2.6.1+incompatible h1:9UY3+iC23yxF0UfGaYrGplQ+79Rg+h/q9FV9ix19jjM=
|
||||
github.com/pierrec/lz4 v2.6.1+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/segmentio/kafka-go v0.4.27 h1:sIhEozeL/TLN2mZ5dkG462vcGEWYKS+u31sXPjKhAM4=
|
||||
github.com/segmentio/kafka-go v0.4.27/go.mod h1:XzMcoMjSzDGHcIwpWUI7GB43iKZ2fTVmryPSGLf/MPg=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0=
|
||||
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I=
|
||||
github.com/xdg/stringprep v1.0.0/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y=
|
||||
@@ -40,5 +43,7 @@ golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7w
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
@@ -897,6 +898,10 @@ func representMapAsTable(mapData map[string]interface{}, selectorPrefix string,
|
||||
})
|
||||
}
|
||||
|
||||
sort.Slice(table, func(i, j int) bool {
|
||||
return table[i].Name < table[j].Name
|
||||
})
|
||||
|
||||
obj, _ := json.Marshal(table)
|
||||
representation = string(obj)
|
||||
return
|
||||
|
||||
@@ -33,27 +33,27 @@ type dissecting string
|
||||
|
||||
func (d dissecting) Register(extension *api.Extension) {
|
||||
extension.Protocol = &_protocol
|
||||
extension.MatcherMap = reqResMatcher.openMessagesMap
|
||||
}
|
||||
|
||||
func (d dissecting) Ping() {
|
||||
log.Printf("pong %s", _protocol.Name)
|
||||
}
|
||||
|
||||
func (d dissecting) Dissect(b *bufio.Reader, isClient bool, tcpID *api.TcpID, counterPair *api.CounterPair, superTimer *api.SuperTimer, superIdentifier *api.SuperIdentifier, emitter api.Emitter, options *api.TrafficFilteringOptions) error {
|
||||
func (d dissecting) Dissect(b *bufio.Reader, isClient bool, tcpID *api.TcpID, counterPair *api.CounterPair, superTimer *api.SuperTimer, superIdentifier *api.SuperIdentifier, emitter api.Emitter, options *api.TrafficFilteringOptions, _reqResMatcher api.RequestResponseMatcher) error {
|
||||
reqResMatcher := _reqResMatcher.(*requestResponseMatcher)
|
||||
for {
|
||||
if superIdentifier.Protocol != nil && superIdentifier.Protocol != &_protocol {
|
||||
return errors.New("Identified by another protocol")
|
||||
}
|
||||
|
||||
if isClient {
|
||||
_, _, err := ReadRequest(b, tcpID, counterPair, superTimer)
|
||||
_, _, err := ReadRequest(b, tcpID, counterPair, superTimer, reqResMatcher)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
superIdentifier.Protocol = &_protocol
|
||||
} else {
|
||||
err := ReadResponse(b, tcpID, counterPair, superTimer, emitter)
|
||||
err := ReadResponse(b, tcpID, counterPair, superTimer, emitter, reqResMatcher)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -62,7 +62,7 @@ func (d dissecting) Dissect(b *bufio.Reader, isClient bool, tcpID *api.TcpID, co
|
||||
}
|
||||
}
|
||||
|
||||
func (d dissecting) Analyze(item *api.OutputChannelItem, resolvedSource string, resolvedDestination string) *api.Entry {
|
||||
func (d dissecting) Analyze(item *api.OutputChannelItem, resolvedSource string, resolvedDestination string, namespace string) *api.Entry {
|
||||
request := item.Pair.Request.Payload.(map[string]interface{})
|
||||
reqDetails := request["details"].(map[string]interface{})
|
||||
apiKey := ApiKey(reqDetails["apiKey"].(float64))
|
||||
@@ -158,6 +158,7 @@ func (d dissecting) Analyze(item *api.OutputChannelItem, resolvedSource string,
|
||||
IP: item.ConnectionInfo.ServerIP,
|
||||
Port: item.ConnectionInfo.ServerPort,
|
||||
},
|
||||
Namespace: namespace,
|
||||
Outgoing: item.ConnectionInfo.IsOutgoing,
|
||||
Request: reqDetails,
|
||||
Response: item.Pair.Response.Payload.(map[string]interface{})["details"].(map[string]interface{}),
|
||||
@@ -215,6 +216,10 @@ func (d dissecting) Macros() map[string]string {
|
||||
}
|
||||
}
|
||||
|
||||
func (d dissecting) NewResponseRequestMatcher() api.RequestResponseMatcher {
|
||||
return createResponseRequestMatcher()
|
||||
}
|
||||
|
||||
var Dissector dissecting
|
||||
|
||||
func NewDissector() api.Dissector {
|
||||
|
||||
290
tap/extensions/kafka/main_test.go
Normal file
290
tap/extensions/kafka/main_test.go
Normal file
@@ -0,0 +1,290 @@
|
||||
package kafka
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/up9inc/mizu/tap/api"
|
||||
)
|
||||
|
||||
const (
|
||||
binDir = "bin"
|
||||
patternBin = "*_req.bin"
|
||||
patternDissect = "*.json"
|
||||
msgDissecting = "Dissecting:"
|
||||
msgAnalyzing = "Analyzing:"
|
||||
msgRepresenting = "Representing:"
|
||||
respSuffix = "_res.bin"
|
||||
expectDir = "expect"
|
||||
dissectDir = "dissect"
|
||||
analyzeDir = "analyze"
|
||||
representDir = "represent"
|
||||
testUpdate = "TEST_UPDATE"
|
||||
)
|
||||
|
||||
func TestRegister(t *testing.T) {
|
||||
dissector := NewDissector()
|
||||
extension := &api.Extension{}
|
||||
dissector.Register(extension)
|
||||
assert.Equal(t, "kafka", extension.Protocol.Name)
|
||||
}
|
||||
|
||||
func TestMacros(t *testing.T) {
|
||||
expectedMacros := map[string]string{
|
||||
"kafka": `proto.name == "kafka"`,
|
||||
}
|
||||
dissector := NewDissector()
|
||||
macros := dissector.Macros()
|
||||
assert.Equal(t, expectedMacros, macros)
|
||||
}
|
||||
|
||||
func TestPing(t *testing.T) {
|
||||
dissector := NewDissector()
|
||||
dissector.Ping()
|
||||
}
|
||||
|
||||
func TestDissect(t *testing.T) {
|
||||
_, testUpdateEnabled := os.LookupEnv(testUpdate)
|
||||
|
||||
expectDirDissect := path.Join(expectDir, dissectDir)
|
||||
|
||||
if testUpdateEnabled {
|
||||
os.RemoveAll(expectDirDissect)
|
||||
err := os.MkdirAll(expectDirDissect, 0775)
|
||||
assert.Nil(t, err)
|
||||
}
|
||||
|
||||
dissector := NewDissector()
|
||||
paths, err := filepath.Glob(path.Join(binDir, patternBin))
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
options := &api.TrafficFilteringOptions{
|
||||
IgnoredUserAgents: []string{},
|
||||
}
|
||||
|
||||
for _, _path := range paths {
|
||||
basePath := _path[:len(_path)-8]
|
||||
|
||||
// Channel to verify the output
|
||||
itemChannel := make(chan *api.OutputChannelItem)
|
||||
var emitter api.Emitter = &api.Emitting{
|
||||
AppStats: &api.AppStats{},
|
||||
OutputChannel: itemChannel,
|
||||
}
|
||||
|
||||
var items []*api.OutputChannelItem
|
||||
stop := make(chan bool)
|
||||
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
case <-stop:
|
||||
return
|
||||
case item := <-itemChannel:
|
||||
items = append(items, item)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
// Stream level
|
||||
counterPair := &api.CounterPair{
|
||||
Request: 0,
|
||||
Response: 0,
|
||||
}
|
||||
superIdentifier := &api.SuperIdentifier{}
|
||||
|
||||
// Request
|
||||
pathClient := _path
|
||||
fmt.Printf("%s %s\n", msgDissecting, pathClient)
|
||||
fileClient, err := os.Open(pathClient)
|
||||
assert.Nil(t, err)
|
||||
|
||||
bufferClient := bufio.NewReader(fileClient)
|
||||
tcpIDClient := &api.TcpID{
|
||||
SrcIP: "1",
|
||||
DstIP: "2",
|
||||
SrcPort: "1",
|
||||
DstPort: "2",
|
||||
}
|
||||
reqResMatcher := dissector.NewResponseRequestMatcher()
|
||||
reqResMatcher.SetMaxTry(10)
|
||||
err = dissector.Dissect(bufferClient, true, tcpIDClient, counterPair, &api.SuperTimer{}, superIdentifier, emitter, options, reqResMatcher)
|
||||
if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF {
|
||||
log.Println(err)
|
||||
}
|
||||
|
||||
// Response
|
||||
pathServer := basePath + respSuffix
|
||||
fmt.Printf("%s %s\n", msgDissecting, pathServer)
|
||||
fileServer, err := os.Open(pathServer)
|
||||
assert.Nil(t, err)
|
||||
|
||||
bufferServer := bufio.NewReader(fileServer)
|
||||
tcpIDServer := &api.TcpID{
|
||||
SrcIP: "2",
|
||||
DstIP: "1",
|
||||
SrcPort: "2",
|
||||
DstPort: "1",
|
||||
}
|
||||
err = dissector.Dissect(bufferServer, false, tcpIDServer, counterPair, &api.SuperTimer{}, superIdentifier, emitter, options, reqResMatcher)
|
||||
if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF {
|
||||
log.Println(err)
|
||||
}
|
||||
|
||||
fileClient.Close()
|
||||
fileServer.Close()
|
||||
|
||||
pathExpect := path.Join(expectDirDissect, fmt.Sprintf("%s.json", basePath[4:]))
|
||||
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
|
||||
stop <- true
|
||||
|
||||
marshaled, err := json.Marshal(items)
|
||||
assert.Nil(t, err)
|
||||
|
||||
if testUpdateEnabled {
|
||||
if len(items) > 0 {
|
||||
err = os.WriteFile(pathExpect, marshaled, 0644)
|
||||
assert.Nil(t, err)
|
||||
}
|
||||
} else {
|
||||
if _, err := os.Stat(pathExpect); errors.Is(err, os.ErrNotExist) {
|
||||
assert.Len(t, items, 0)
|
||||
} else {
|
||||
expectedBytes, err := ioutil.ReadFile(pathExpect)
|
||||
assert.Nil(t, err)
|
||||
|
||||
assert.JSONEq(t, string(expectedBytes), string(marshaled))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestAnalyze(t *testing.T) {
|
||||
_, testUpdateEnabled := os.LookupEnv(testUpdate)
|
||||
|
||||
expectDirDissect := path.Join(expectDir, dissectDir)
|
||||
expectDirAnalyze := path.Join(expectDir, analyzeDir)
|
||||
|
||||
if testUpdateEnabled {
|
||||
os.RemoveAll(expectDirAnalyze)
|
||||
err := os.MkdirAll(expectDirAnalyze, 0775)
|
||||
assert.Nil(t, err)
|
||||
}
|
||||
|
||||
dissector := NewDissector()
|
||||
paths, err := filepath.Glob(path.Join(expectDirDissect, patternDissect))
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
for _, _path := range paths {
|
||||
fmt.Printf("%s %s\n", msgAnalyzing, _path)
|
||||
|
||||
bytes, err := ioutil.ReadFile(_path)
|
||||
assert.Nil(t, err)
|
||||
|
||||
var items []*api.OutputChannelItem
|
||||
err = json.Unmarshal(bytes, &items)
|
||||
assert.Nil(t, err)
|
||||
|
||||
var entries []*api.Entry
|
||||
for _, item := range items {
|
||||
entry := dissector.Analyze(item, "", "", "")
|
||||
entries = append(entries, entry)
|
||||
}
|
||||
|
||||
pathExpect := path.Join(expectDirAnalyze, filepath.Base(_path))
|
||||
|
||||
marshaled, err := json.Marshal(entries)
|
||||
assert.Nil(t, err)
|
||||
|
||||
if testUpdateEnabled {
|
||||
if len(entries) > 0 {
|
||||
err = os.WriteFile(pathExpect, marshaled, 0644)
|
||||
assert.Nil(t, err)
|
||||
}
|
||||
} else {
|
||||
if _, err := os.Stat(pathExpect); errors.Is(err, os.ErrNotExist) {
|
||||
assert.Len(t, items, 0)
|
||||
} else {
|
||||
expectedBytes, err := ioutil.ReadFile(pathExpect)
|
||||
assert.Nil(t, err)
|
||||
|
||||
assert.JSONEq(t, string(expectedBytes), string(marshaled))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestRepresent(t *testing.T) {
|
||||
_, testUpdateEnabled := os.LookupEnv(testUpdate)
|
||||
|
||||
expectDirAnalyze := path.Join(expectDir, analyzeDir)
|
||||
expectDirRepresent := path.Join(expectDir, representDir)
|
||||
|
||||
if testUpdateEnabled {
|
||||
os.RemoveAll(expectDirRepresent)
|
||||
err := os.MkdirAll(expectDirRepresent, 0775)
|
||||
assert.Nil(t, err)
|
||||
}
|
||||
|
||||
dissector := NewDissector()
|
||||
paths, err := filepath.Glob(path.Join(expectDirAnalyze, patternDissect))
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
for _, _path := range paths {
|
||||
fmt.Printf("%s %s\n", msgRepresenting, _path)
|
||||
|
||||
bytes, err := ioutil.ReadFile(_path)
|
||||
assert.Nil(t, err)
|
||||
|
||||
var entries []*api.Entry
|
||||
err = json.Unmarshal(bytes, &entries)
|
||||
assert.Nil(t, err)
|
||||
|
||||
var objects []string
|
||||
for _, entry := range entries {
|
||||
object, _, err := dissector.Represent(entry.Request, entry.Response)
|
||||
assert.Nil(t, err)
|
||||
objects = append(objects, string(object))
|
||||
}
|
||||
|
||||
pathExpect := path.Join(expectDirRepresent, filepath.Base(_path))
|
||||
|
||||
marshaled, err := json.Marshal(objects)
|
||||
assert.Nil(t, err)
|
||||
|
||||
if testUpdateEnabled {
|
||||
if len(objects) > 0 {
|
||||
err = os.WriteFile(pathExpect, marshaled, 0644)
|
||||
assert.Nil(t, err)
|
||||
}
|
||||
} else {
|
||||
if _, err := os.Stat(pathExpect); errors.Is(err, os.ErrNotExist) {
|
||||
assert.Len(t, objects, 0)
|
||||
} else {
|
||||
expectedBytes, err := ioutil.ReadFile(pathExpect)
|
||||
assert.Nil(t, err)
|
||||
|
||||
assert.JSONEq(t, string(expectedBytes), string(marshaled))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -3,24 +3,31 @@ package kafka
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
var reqResMatcher = CreateResponseRequestMatcher() // global
|
||||
const maxTry int = 3000
|
||||
"github.com/up9inc/mizu/tap/api"
|
||||
)
|
||||
|
||||
type RequestResponsePair struct {
|
||||
Request Request
|
||||
Response Response
|
||||
}
|
||||
|
||||
// Key is {client_addr}:{client_port}->{dest_addr}:{dest_port}::{correlation_id}
|
||||
// Key is {client_addr}_{client_port}_{dest_addr}_{dest_port}_{correlation_id}
|
||||
type requestResponseMatcher struct {
|
||||
openMessagesMap *sync.Map
|
||||
maxTry int
|
||||
}
|
||||
|
||||
func CreateResponseRequestMatcher() requestResponseMatcher {
|
||||
newMatcher := &requestResponseMatcher{openMessagesMap: &sync.Map{}}
|
||||
return *newMatcher
|
||||
func createResponseRequestMatcher() api.RequestResponseMatcher {
|
||||
return &requestResponseMatcher{openMessagesMap: &sync.Map{}, maxTry: 3000}
|
||||
}
|
||||
|
||||
func (matcher *requestResponseMatcher) GetMap() *sync.Map {
|
||||
return matcher.openMessagesMap
|
||||
}
|
||||
|
||||
func (matcher *requestResponseMatcher) SetMaxTry(value int) {
|
||||
matcher.maxTry = value
|
||||
}
|
||||
|
||||
func (matcher *requestResponseMatcher) registerRequest(key string, request *Request) *RequestResponsePair {
|
||||
@@ -40,7 +47,7 @@ func (matcher *requestResponseMatcher) registerResponse(key string, response *Re
|
||||
try := 0
|
||||
for {
|
||||
try++
|
||||
if try > maxTry {
|
||||
if try > matcher.maxTry {
|
||||
return nil
|
||||
}
|
||||
if request, found := matcher.openMessagesMap.LoadAndDelete(key); found {
|
||||
|
||||
@@ -3,7 +3,6 @@ package kafka
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
@@ -27,29 +26,20 @@ func (k ApiKey) String() string {
|
||||
return strconv.Itoa(int(k))
|
||||
}
|
||||
|
||||
func (k ApiKey) MinVersion() int16 { return k.apiType().minVersion() }
|
||||
|
||||
func (k ApiKey) MaxVersion() int16 { return k.apiType().maxVersion() }
|
||||
|
||||
func (k ApiKey) SelectVersion(minVersion, maxVersion int16) int16 {
|
||||
min := k.MinVersion()
|
||||
max := k.MaxVersion()
|
||||
switch {
|
||||
case min > maxVersion:
|
||||
return min
|
||||
case max < maxVersion:
|
||||
return max
|
||||
default:
|
||||
return maxVersion
|
||||
}
|
||||
}
|
||||
|
||||
func (k ApiKey) apiType() apiType {
|
||||
if i := int(k); i >= 0 && i < len(apiTypes) {
|
||||
return apiTypes[i]
|
||||
}
|
||||
return apiType{}
|
||||
}
|
||||
const (
|
||||
// v0 = 0
|
||||
v1 = 1
|
||||
v2 = 2
|
||||
v3 = 3
|
||||
v4 = 4
|
||||
v5 = 5
|
||||
v6 = 6
|
||||
v7 = 7
|
||||
v8 = 8
|
||||
v9 = 9
|
||||
v10 = 10
|
||||
v11 = 11
|
||||
)
|
||||
|
||||
const (
|
||||
Produce ApiKey = 0
|
||||
@@ -164,48 +154,6 @@ type messageType struct {
|
||||
flexible bool
|
||||
gotype reflect.Type
|
||||
decode decodeFunc
|
||||
encode encodeFunc
|
||||
}
|
||||
|
||||
type apiType struct {
|
||||
requests []messageType
|
||||
responses []messageType
|
||||
}
|
||||
|
||||
func (t apiType) minVersion() int16 {
|
||||
if len(t.requests) == 0 {
|
||||
return 0
|
||||
}
|
||||
return t.requests[0].version
|
||||
}
|
||||
|
||||
func (t apiType) maxVersion() int16 {
|
||||
if len(t.requests) == 0 {
|
||||
return 0
|
||||
}
|
||||
return t.requests[len(t.requests)-1].version
|
||||
}
|
||||
|
||||
var apiTypes [numApis]apiType
|
||||
|
||||
// Register is automatically called by sub-packages are imported to install a
|
||||
// new pair of request/response message types.
|
||||
func Register(req, res Message) {
|
||||
k1 := req.ApiKey()
|
||||
k2 := res.ApiKey()
|
||||
|
||||
if k1 != k2 {
|
||||
panic(fmt.Sprintf("[%T/%T]: request and response API keys mismatch: %d != %d", req, res, k1, k2))
|
||||
}
|
||||
|
||||
apiTypes[k1] = apiType{
|
||||
requests: typesOf(req),
|
||||
responses: typesOf(res),
|
||||
}
|
||||
}
|
||||
|
||||
func typesOf(v interface{}) []messageType {
|
||||
return makeTypes(reflect.TypeOf(v).Elem())
|
||||
}
|
||||
|
||||
func makeTypes(t reflect.Type) []messageType {
|
||||
@@ -241,7 +189,6 @@ func makeTypes(t reflect.Type) []messageType {
|
||||
gotype: t,
|
||||
flexible: flexible,
|
||||
decode: decodeFuncOf(t, v, flexible, structTag{}),
|
||||
encode: encodeFuncOf(t, v, flexible, structTag{}),
|
||||
})
|
||||
}
|
||||
|
||||
@@ -378,31 +325,6 @@ type Broker struct {
|
||||
Rack string
|
||||
}
|
||||
|
||||
func (b Broker) String() string {
|
||||
return net.JoinHostPort(b.Host, itoa(b.Port))
|
||||
}
|
||||
|
||||
func (b Broker) Format(w fmt.State, v rune) {
|
||||
switch v {
|
||||
case 'd':
|
||||
_, _ = io.WriteString(w, itoa(b.ID))
|
||||
case 's':
|
||||
_, _ = io.WriteString(w, b.String())
|
||||
case 'v':
|
||||
_, _ = io.WriteString(w, itoa(b.ID))
|
||||
_, _ = io.WriteString(w, " ")
|
||||
_, _ = io.WriteString(w, b.String())
|
||||
if b.Rack != "" {
|
||||
_, _ = io.WriteString(w, " ")
|
||||
_, _ = io.WriteString(w, b.Rack)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func itoa(i int32) string {
|
||||
return strconv.Itoa(int(i))
|
||||
}
|
||||
|
||||
type Topic struct {
|
||||
Name string
|
||||
Error int16
|
||||
@@ -418,14 +340,6 @@ type Partition struct {
|
||||
Offline []int32
|
||||
}
|
||||
|
||||
// BrokerMessage is an extension of the Message interface implemented by some
|
||||
// request types to customize the broker assignment logic.
|
||||
type BrokerMessage interface {
|
||||
// Given a representation of the kafka cluster state as argument, returns
|
||||
// the broker that the message should be routed to.
|
||||
Broker(Cluster) (Broker, error)
|
||||
}
|
||||
|
||||
// GroupMessage is an extension of the Message interface implemented by some
|
||||
// request types to inform the program that they should be routed to a group
|
||||
// coordinator.
|
||||
@@ -443,16 +357,6 @@ type PreparedMessage interface {
|
||||
Prepare(apiVersion int16)
|
||||
}
|
||||
|
||||
// Splitter is an interface implemented by messages that can be split into
|
||||
// multiple requests and have their results merged back by a Merger.
|
||||
type Splitter interface {
|
||||
// For a given cluster layout, returns the list of messages constructed
|
||||
// from the receiver for each requests that should be sent to the cluster.
|
||||
// The second return value is a Merger which can be used to merge back the
|
||||
// results of each request into a single message (or an error).
|
||||
Split(Cluster) ([]Message, Merger, error)
|
||||
}
|
||||
|
||||
// Merger is an interface implemented by messages which can merge multiple
|
||||
// results into one response.
|
||||
type Merger interface {
|
||||
@@ -461,16 +365,3 @@ type Merger interface {
|
||||
// values, other types should trigger a panic.
|
||||
Merge(messages []Message, results []interface{}) (Message, error)
|
||||
}
|
||||
|
||||
// Result converts r to a Message or and error, or panics if r could be be
|
||||
// converted to these types.
|
||||
func Result(r interface{}) (Message, error) {
|
||||
switch v := r.(type) {
|
||||
case Message:
|
||||
return v, nil
|
||||
case error:
|
||||
return nil, v
|
||||
default:
|
||||
panic(fmt.Errorf("BUG: result must be a message or an error but not %T", v))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,182 +0,0 @@
|
||||
package kafka
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
type ApiVersion struct {
|
||||
ApiKey int16
|
||||
MinVersion int16
|
||||
MaxVersion int16
|
||||
}
|
||||
|
||||
func (v ApiVersion) Format(w fmt.State, r rune) {
|
||||
switch r {
|
||||
case 's':
|
||||
fmt.Fprint(w, apiKey(v.ApiKey))
|
||||
case 'd':
|
||||
switch {
|
||||
case w.Flag('-'):
|
||||
fmt.Fprint(w, v.MinVersion)
|
||||
case w.Flag('+'):
|
||||
fmt.Fprint(w, v.MaxVersion)
|
||||
default:
|
||||
fmt.Fprint(w, v.ApiKey)
|
||||
}
|
||||
case 'v':
|
||||
switch {
|
||||
case w.Flag('-'):
|
||||
fmt.Fprintf(w, "v%d", v.MinVersion)
|
||||
case w.Flag('+'):
|
||||
fmt.Fprintf(w, "v%d", v.MaxVersion)
|
||||
case w.Flag('#'):
|
||||
fmt.Fprintf(w, "kafka.ApiVersion{ApiKey:%d MinVersion:%d MaxVersion:%d}", v.ApiKey, v.MinVersion, v.MaxVersion)
|
||||
default:
|
||||
fmt.Fprintf(w, "%s[v%d:v%d]", apiKey(v.ApiKey), v.MinVersion, v.MaxVersion)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type apiKey int16
|
||||
|
||||
const (
|
||||
produce apiKey = 0
|
||||
fetch apiKey = 1
|
||||
listOffsets apiKey = 2
|
||||
metadata apiKey = 3
|
||||
leaderAndIsr apiKey = 4
|
||||
stopReplica apiKey = 5
|
||||
updateMetadata apiKey = 6
|
||||
controlledShutdown apiKey = 7
|
||||
offsetCommit apiKey = 8
|
||||
offsetFetch apiKey = 9
|
||||
findCoordinator apiKey = 10
|
||||
joinGroup apiKey = 11
|
||||
heartbeat apiKey = 12
|
||||
leaveGroup apiKey = 13
|
||||
syncGroup apiKey = 14
|
||||
describeGroups apiKey = 15
|
||||
listGroups apiKey = 16
|
||||
saslHandshake apiKey = 17
|
||||
apiVersions apiKey = 18
|
||||
createTopics apiKey = 19
|
||||
deleteTopics apiKey = 20
|
||||
deleteRecords apiKey = 21
|
||||
initProducerId apiKey = 22
|
||||
offsetForLeaderEpoch apiKey = 23
|
||||
addPartitionsToTxn apiKey = 24
|
||||
addOffsetsToTxn apiKey = 25
|
||||
endTxn apiKey = 26
|
||||
writeTxnMarkers apiKey = 27
|
||||
txnOffsetCommit apiKey = 28
|
||||
describeAcls apiKey = 29
|
||||
createAcls apiKey = 30
|
||||
deleteAcls apiKey = 31
|
||||
describeConfigs apiKey = 32
|
||||
alterConfigs apiKey = 33
|
||||
alterReplicaLogDirs apiKey = 34
|
||||
describeLogDirs apiKey = 35
|
||||
saslAuthenticate apiKey = 36
|
||||
createPartitions apiKey = 37
|
||||
createDelegationToken apiKey = 38
|
||||
renewDelegationToken apiKey = 39
|
||||
expireDelegationToken apiKey = 40
|
||||
describeDelegationToken apiKey = 41
|
||||
deleteGroups apiKey = 42
|
||||
electLeaders apiKey = 43
|
||||
incrementalAlterConfigs apiKey = 44
|
||||
alterPartitionReassignments apiKey = 45
|
||||
listPartitionReassignments apiKey = 46
|
||||
offsetDelete apiKey = 47
|
||||
)
|
||||
|
||||
func (k apiKey) String() string {
|
||||
if i := int(k); i >= 0 && i < len(apiKeyStrings) {
|
||||
return apiKeyStrings[i]
|
||||
}
|
||||
return strconv.Itoa(int(k))
|
||||
}
|
||||
|
||||
const (
|
||||
// v0 = 0
|
||||
v1 = 1
|
||||
v2 = 2
|
||||
v3 = 3
|
||||
v4 = 4
|
||||
v5 = 5
|
||||
v6 = 6
|
||||
v7 = 7
|
||||
v8 = 8
|
||||
v9 = 9
|
||||
v10 = 10
|
||||
v11 = 11
|
||||
)
|
||||
|
||||
var apiKeyStrings = [...]string{
|
||||
produce: "Produce",
|
||||
fetch: "Fetch",
|
||||
listOffsets: "ListOffsets",
|
||||
metadata: "Metadata",
|
||||
leaderAndIsr: "LeaderAndIsr",
|
||||
stopReplica: "StopReplica",
|
||||
updateMetadata: "UpdateMetadata",
|
||||
controlledShutdown: "ControlledShutdown",
|
||||
offsetCommit: "OffsetCommit",
|
||||
offsetFetch: "OffsetFetch",
|
||||
findCoordinator: "FindCoordinator",
|
||||
joinGroup: "JoinGroup",
|
||||
heartbeat: "Heartbeat",
|
||||
leaveGroup: "LeaveGroup",
|
||||
syncGroup: "SyncGroup",
|
||||
describeGroups: "DescribeGroups",
|
||||
listGroups: "ListGroups",
|
||||
saslHandshake: "SaslHandshake",
|
||||
apiVersions: "ApiVersions",
|
||||
createTopics: "CreateTopics",
|
||||
deleteTopics: "DeleteTopics",
|
||||
deleteRecords: "DeleteRecords",
|
||||
initProducerId: "InitProducerId",
|
||||
offsetForLeaderEpoch: "OffsetForLeaderEpoch",
|
||||
addPartitionsToTxn: "AddPartitionsToTxn",
|
||||
addOffsetsToTxn: "AddOffsetsToTxn",
|
||||
endTxn: "EndTxn",
|
||||
writeTxnMarkers: "WriteTxnMarkers",
|
||||
txnOffsetCommit: "TxnOffsetCommit",
|
||||
describeAcls: "DescribeAcls",
|
||||
createAcls: "CreateAcls",
|
||||
deleteAcls: "DeleteAcls",
|
||||
describeConfigs: "DescribeConfigs",
|
||||
alterConfigs: "AlterConfigs",
|
||||
alterReplicaLogDirs: "AlterReplicaLogDirs",
|
||||
describeLogDirs: "DescribeLogDirs",
|
||||
saslAuthenticate: "SaslAuthenticate",
|
||||
createPartitions: "CreatePartitions",
|
||||
createDelegationToken: "CreateDelegationToken",
|
||||
renewDelegationToken: "RenewDelegationToken",
|
||||
expireDelegationToken: "ExpireDelegationToken",
|
||||
describeDelegationToken: "DescribeDelegationToken",
|
||||
deleteGroups: "DeleteGroups",
|
||||
electLeaders: "ElectLeaders",
|
||||
incrementalAlterConfigs: "IncrementalAlfterConfigs",
|
||||
alterPartitionReassignments: "AlterPartitionReassignments",
|
||||
listPartitionReassignments: "ListPartitionReassignments",
|
||||
offsetDelete: "OffsetDelete",
|
||||
}
|
||||
|
||||
func makeInt8(b []byte) int8 {
|
||||
return int8(b[0])
|
||||
}
|
||||
|
||||
func makeInt16(b []byte) int16 {
|
||||
return int16(binary.BigEndian.Uint16(b))
|
||||
}
|
||||
|
||||
func makeInt32(b []byte) int32 {
|
||||
return int32(binary.BigEndian.Uint32(b))
|
||||
}
|
||||
|
||||
func makeInt64(b []byte) int64 {
|
||||
return int64(binary.BigEndian.Uint64(b))
|
||||
}
|
||||
@@ -1,159 +0,0 @@
|
||||
package kafka
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
)
|
||||
|
||||
type readable interface {
|
||||
readFrom(*bufio.Reader, int) (int, error)
|
||||
}
|
||||
|
||||
var errShortRead = errors.New("not enough bytes available to load the response")
|
||||
|
||||
func peekRead(r *bufio.Reader, sz int, n int, f func([]byte)) (int, error) {
|
||||
if n > sz {
|
||||
return sz, errShortRead
|
||||
}
|
||||
b, err := r.Peek(n)
|
||||
if err != nil {
|
||||
return sz, err
|
||||
}
|
||||
f(b)
|
||||
return discardN(r, sz, n)
|
||||
}
|
||||
|
||||
func readInt8(r *bufio.Reader, sz int, v *int8) (int, error) {
|
||||
return peekRead(r, sz, 1, func(b []byte) { *v = makeInt8(b) })
|
||||
}
|
||||
|
||||
func readInt16(r *bufio.Reader, sz int, v *int16) (int, error) {
|
||||
return peekRead(r, sz, 2, func(b []byte) { *v = makeInt16(b) })
|
||||
}
|
||||
|
||||
func readInt32(r *bufio.Reader, sz int, v *int32) (int, error) {
|
||||
return peekRead(r, sz, 4, func(b []byte) { *v = makeInt32(b) })
|
||||
}
|
||||
|
||||
func readInt64(r *bufio.Reader, sz int, v *int64) (int, error) {
|
||||
return peekRead(r, sz, 8, func(b []byte) { *v = makeInt64(b) })
|
||||
}
|
||||
|
||||
func readString(r *bufio.Reader, sz int, v *string) (int, error) {
|
||||
return readStringWith(r, sz, func(r *bufio.Reader, sz int, n int) (remain int, err error) {
|
||||
*v, remain, err = readNewString(r, sz, n)
|
||||
return
|
||||
})
|
||||
}
|
||||
|
||||
func readStringWith(r *bufio.Reader, sz int, cb func(*bufio.Reader, int, int) (int, error)) (int, error) {
|
||||
var err error
|
||||
var len int16
|
||||
|
||||
if sz, err = readInt16(r, sz, &len); err != nil {
|
||||
return sz, err
|
||||
}
|
||||
|
||||
n := int(len)
|
||||
if n > sz {
|
||||
return sz, errShortRead
|
||||
}
|
||||
|
||||
return cb(r, sz, n)
|
||||
}
|
||||
|
||||
func readNewString(r *bufio.Reader, sz int, n int) (string, int, error) {
|
||||
b, sz, err := readNewBytes(r, sz, n)
|
||||
return string(b), sz, err
|
||||
}
|
||||
|
||||
func readBytes(r *bufio.Reader, sz int, v *[]byte) (int, error) {
|
||||
return readBytesWith(r, sz, func(r *bufio.Reader, sz int, n int) (remain int, err error) {
|
||||
*v, remain, err = readNewBytes(r, sz, n)
|
||||
return
|
||||
})
|
||||
}
|
||||
|
||||
func readBytesWith(r *bufio.Reader, sz int, cb func(*bufio.Reader, int, int) (int, error)) (int, error) {
|
||||
var err error
|
||||
var n int
|
||||
|
||||
if sz, err = readArrayLen(r, sz, &n); err != nil {
|
||||
return sz, err
|
||||
}
|
||||
|
||||
if n > sz {
|
||||
return sz, errShortRead
|
||||
}
|
||||
|
||||
return cb(r, sz, n)
|
||||
}
|
||||
|
||||
func readNewBytes(r *bufio.Reader, sz int, n int) ([]byte, int, error) {
|
||||
var err error
|
||||
var b []byte
|
||||
var shortRead bool
|
||||
|
||||
if n > 0 {
|
||||
if sz < n {
|
||||
n = sz
|
||||
shortRead = true
|
||||
}
|
||||
|
||||
b = make([]byte, n)
|
||||
n, err = io.ReadFull(r, b)
|
||||
b = b[:n]
|
||||
sz -= n
|
||||
|
||||
if err == nil && shortRead {
|
||||
err = errShortRead
|
||||
}
|
||||
}
|
||||
|
||||
return b, sz, err
|
||||
}
|
||||
|
||||
func readArrayLen(r *bufio.Reader, sz int, n *int) (int, error) {
|
||||
var err error
|
||||
var len int32
|
||||
if sz, err = readInt32(r, sz, &len); err != nil {
|
||||
return sz, err
|
||||
}
|
||||
*n = int(len)
|
||||
return sz, nil
|
||||
}
|
||||
|
||||
func ReadAll(r *bufio.Reader, sz int, ptrs ...interface{}) (int, error) {
|
||||
var err error
|
||||
|
||||
for _, ptr := range ptrs {
|
||||
if sz, err = readPtr(r, sz, ptr); err != nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return sz, err
|
||||
}
|
||||
|
||||
func readPtr(r *bufio.Reader, sz int, ptr interface{}) (int, error) {
|
||||
switch v := ptr.(type) {
|
||||
case *int8:
|
||||
return readInt8(r, sz, v)
|
||||
case *int16:
|
||||
return readInt16(r, sz, v)
|
||||
case *int32:
|
||||
return readInt32(r, sz, v)
|
||||
case *int64:
|
||||
return readInt64(r, sz, v)
|
||||
case *string:
|
||||
return readString(r, sz, v)
|
||||
case *[]byte:
|
||||
return readBytes(r, sz, v)
|
||||
case readable:
|
||||
return v.readFrom(r, sz)
|
||||
default:
|
||||
panic(fmt.Sprintf("unsupported type: %T", v))
|
||||
}
|
||||
}
|
||||
@@ -1,279 +0,0 @@
|
||||
package kafka
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"io"
|
||||
"time"
|
||||
|
||||
"github.com/segmentio/kafka-go/compress"
|
||||
)
|
||||
|
||||
// Attributes is a bitset representing special attributes set on records.
|
||||
type Attributes int16
|
||||
|
||||
const (
|
||||
Gzip Attributes = Attributes(compress.Gzip) // 1
|
||||
Snappy Attributes = Attributes(compress.Snappy) // 2
|
||||
Lz4 Attributes = Attributes(compress.Lz4) // 3
|
||||
Zstd Attributes = Attributes(compress.Zstd) // 4
|
||||
Transactional Attributes = 1 << 4
|
||||
Control Attributes = 1 << 5
|
||||
)
|
||||
|
||||
func (a Attributes) Compression() compress.Compression {
|
||||
return compress.Compression(a & 7)
|
||||
}
|
||||
|
||||
func (a Attributes) Transactional() bool {
|
||||
return (a & Transactional) != 0
|
||||
}
|
||||
|
||||
func (a Attributes) Control() bool {
|
||||
return (a & Control) != 0
|
||||
}
|
||||
|
||||
func (a Attributes) String() string {
|
||||
s := a.Compression().String()
|
||||
if a.Transactional() {
|
||||
s += "+transactional"
|
||||
}
|
||||
if a.Control() {
|
||||
s += "+control"
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// Header represents a single entry in a list of record headers.
|
||||
type Header struct {
|
||||
Key string
|
||||
Value []byte
|
||||
}
|
||||
|
||||
// Record is an interface representing a single kafka record.
|
||||
//
|
||||
// Record values are not safe to use concurrently from multiple goroutines.
|
||||
type Record struct {
|
||||
// The offset at which the record exists in a topic partition. This value
|
||||
// is ignored in produce requests.
|
||||
Offset int64
|
||||
|
||||
// Returns the time of the record. This value may be omitted in produce
|
||||
// requests to let kafka set the time when it saves the record.
|
||||
Time time.Time
|
||||
|
||||
// Returns a byte sequence containing the key of this record. The returned
|
||||
// sequence may be nil to indicate that the record has no key. If the record
|
||||
// is part of a RecordSet, the content of the key must remain valid at least
|
||||
// until the record set is closed (or until the key is closed).
|
||||
Key Bytes
|
||||
|
||||
// Returns a byte sequence containing the value of this record. The returned
|
||||
// sequence may be nil to indicate that the record has no value. If the
|
||||
// record is part of a RecordSet, the content of the value must remain valid
|
||||
// at least until the record set is closed (or until the value is closed).
|
||||
Value Bytes
|
||||
|
||||
// Returns the list of headers associated with this record. The returned
|
||||
// slice may be reused across calls, the program should use it as an
|
||||
// immutable value.
|
||||
Headers []Header
|
||||
}
|
||||
|
||||
// RecordSet represents a sequence of records in Produce requests and Fetch
|
||||
// responses. All v0, v1, and v2 formats are supported.
|
||||
type RecordSet struct {
|
||||
// The message version that this record set will be represented as, valid
|
||||
// values are 1, or 2.
|
||||
//
|
||||
// When reading, this is the value of the highest version used in the
|
||||
// batches that compose the record set.
|
||||
//
|
||||
// When writing, this value dictates the format that the records will be
|
||||
// encoded in.
|
||||
Version int8
|
||||
|
||||
// Attributes set on the record set.
|
||||
//
|
||||
// When reading, the attributes are the combination of all attributes in
|
||||
// the batches that compose the record set.
|
||||
//
|
||||
// When writing, the attributes apply to the whole sequence of records in
|
||||
// the set.
|
||||
Attributes Attributes
|
||||
|
||||
// A reader exposing the sequence of records.
|
||||
//
|
||||
// When reading a RecordSet from an io.Reader, the Records field will be a
|
||||
// *RecordStream. If the program needs to access the details of each batch
|
||||
// that compose the stream, it may use type assertions to access the
|
||||
// underlying types of each batch.
|
||||
Records RecordReader
|
||||
}
|
||||
|
||||
// ReadFrom reads the representation of a record set from r into rs, returning
|
||||
// the number of bytes consumed from r, and an non-nil error if the record set
|
||||
// could not be read.
|
||||
func (rs *RecordSet) ReadFrom(r io.Reader) (int64, error) {
|
||||
// d, _ := r.(*decoder)
|
||||
// if d == nil {
|
||||
// d = &decoder{
|
||||
// reader: r,
|
||||
// remain: 4,
|
||||
// }
|
||||
// }
|
||||
|
||||
// *rs = RecordSet{}
|
||||
// limit := d.remain
|
||||
// size := d.readInt32()
|
||||
|
||||
// if d.err != nil {
|
||||
// return int64(limit - d.remain), d.err
|
||||
// }
|
||||
|
||||
// if size <= 0 {
|
||||
// return 4, nil
|
||||
// }
|
||||
|
||||
// stream := &RecordStream{
|
||||
// Records: make([]RecordReader, 0, 4),
|
||||
// }
|
||||
|
||||
// var err error
|
||||
// d.remain = int(size)
|
||||
|
||||
// for d.remain > 0 && err == nil {
|
||||
// var version byte
|
||||
|
||||
// if d.remain < (magicByteOffset + 1) {
|
||||
// if len(stream.Records) != 0 {
|
||||
// break
|
||||
// }
|
||||
// return 4, fmt.Errorf("impossible record set shorter than %d bytes", magicByteOffset+1)
|
||||
// }
|
||||
|
||||
// switch r := d.reader.(type) {
|
||||
// case bufferedReader:
|
||||
// b, err := r.Peek(magicByteOffset + 1)
|
||||
// if err != nil {
|
||||
// n, _ := r.Discard(len(b))
|
||||
// return 4 + int64(n), dontExpectEOF(err)
|
||||
// }
|
||||
// version = b[magicByteOffset]
|
||||
// case bytesBuffer:
|
||||
// version = r.Bytes()[magicByteOffset]
|
||||
// default:
|
||||
// b := make([]byte, magicByteOffset+1)
|
||||
// if n, err := io.ReadFull(d.reader, b); err != nil {
|
||||
// return 4 + int64(n), dontExpectEOF(err)
|
||||
// }
|
||||
// version = b[magicByteOffset]
|
||||
// // Reconstruct the prefix that we had to read to determine the version
|
||||
// // of the record set from the magic byte.
|
||||
// //
|
||||
// // Technically this may recurisvely stack readers when consuming all
|
||||
// // items of the batch, which could hurt performance. In practice this
|
||||
// // path should not be taken tho, since the decoder would read from a
|
||||
// // *bufio.Reader which implements the bufferedReader interface.
|
||||
// d.reader = io.MultiReader(bytes.NewReader(b), d.reader)
|
||||
// }
|
||||
|
||||
// var tmp RecordSet
|
||||
// switch version {
|
||||
// case 0, 1:
|
||||
// err = tmp.readFromVersion1(d)
|
||||
// case 2:
|
||||
// err = tmp.readFromVersion2(d)
|
||||
// default:
|
||||
// err = fmt.Errorf("unsupported message version %d for message of size %d", version, size)
|
||||
// }
|
||||
|
||||
// if tmp.Version > rs.Version {
|
||||
// rs.Version = tmp.Version
|
||||
// }
|
||||
|
||||
// rs.Attributes |= tmp.Attributes
|
||||
|
||||
// if tmp.Records != nil {
|
||||
// stream.Records = append(stream.Records, tmp.Records)
|
||||
// }
|
||||
// }
|
||||
|
||||
// if len(stream.Records) != 0 {
|
||||
// rs.Records = stream
|
||||
// // Ignore errors if we've successfully read records, so the
|
||||
// // program can keep making progress.
|
||||
// err = nil
|
||||
// }
|
||||
|
||||
// d.discardAll()
|
||||
// rn := 4 + (int(size) - d.remain)
|
||||
// d.remain = limit - rn
|
||||
// return int64(rn), err
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
// WriteTo writes the representation of rs into w. The value of rs.Version
|
||||
// dictates which format that the record set will be represented as.
|
||||
//
|
||||
// The error will be ErrNoRecord if rs contained no records.
|
||||
//
|
||||
// Note: since this package is only compatible with kafka 0.10 and above, the
|
||||
// method never produces messages in version 0. If rs.Version is zero, the
|
||||
// method defaults to producing messages in version 1.
|
||||
func (rs *RecordSet) WriteTo(w io.Writer) (int64, error) {
|
||||
// if rs.Records == nil {
|
||||
// return 0, ErrNoRecord
|
||||
// }
|
||||
|
||||
// // This optimization avoids rendering the record set in an intermediary
|
||||
// // buffer when the writer is already a pageBuffer, which is a common case
|
||||
// // due to the way WriteRequest and WriteResponse are implemented.
|
||||
// buffer, _ := w.(*pageBuffer)
|
||||
// bufferOffset := int64(0)
|
||||
|
||||
// if buffer != nil {
|
||||
// bufferOffset = buffer.Size()
|
||||
// } else {
|
||||
// buffer = newPageBuffer()
|
||||
// defer buffer.unref()
|
||||
// }
|
||||
|
||||
// size := packUint32(0)
|
||||
// buffer.Write(size[:]) // size placeholder
|
||||
|
||||
// var err error
|
||||
// switch rs.Version {
|
||||
// case 0, 1:
|
||||
// err = rs.writeToVersion1(buffer, bufferOffset+4)
|
||||
// case 2:
|
||||
// err = rs.writeToVersion2(buffer, bufferOffset+4)
|
||||
// default:
|
||||
// err = fmt.Errorf("unsupported record set version %d", rs.Version)
|
||||
// }
|
||||
// if err != nil {
|
||||
// return 0, err
|
||||
// }
|
||||
|
||||
// n := buffer.Size() - bufferOffset
|
||||
// if n == 0 {
|
||||
// size = packUint32(^uint32(0))
|
||||
// } else {
|
||||
// size = packUint32(uint32(n) - 4)
|
||||
// }
|
||||
// buffer.WriteAt(size[:], bufferOffset)
|
||||
|
||||
// // This condition indicates that the output writer received by `WriteTo` was
|
||||
// // not a *pageBuffer, in which case we need to flush the buffered records
|
||||
// // data into it.
|
||||
// if buffer != w {
|
||||
// return buffer.WriteTo(w)
|
||||
// }
|
||||
|
||||
// return n, nil
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
func packUint32(u uint32) (b [4]byte) {
|
||||
binary.BigEndian.PutUint32(b[:], u)
|
||||
return
|
||||
}
|
||||
@@ -1,43 +0,0 @@
|
||||
package kafka
|
||||
|
||||
import (
|
||||
"github.com/segmentio/kafka-go/protocol"
|
||||
)
|
||||
|
||||
// Header is a key/value pair type representing headers set on records.
|
||||
// type Header = protocol.Header
|
||||
|
||||
// Bytes is an interface representing a sequence of bytes. This abstraction
|
||||
// makes it possible for programs to inject data into produce requests without
|
||||
// having to load in into an intermediary buffer, or read record keys and values
|
||||
// from a fetch response directly from internal buffers.
|
||||
//
|
||||
// Bytes are not safe to use concurrently from multiple goroutines.
|
||||
// type Bytes = protocol.Bytes
|
||||
|
||||
// NewBytes constructs a Bytes value from a byte slice.
|
||||
//
|
||||
// If b is nil, nil is returned.
|
||||
// func NewBytes(b []byte) Bytes { return protocol.NewBytes(b) }
|
||||
|
||||
// ReadAll reads b into a byte slice.
|
||||
// func ReadAll(b Bytes) ([]byte, error) { return protocol.ReadAll(b) }
|
||||
|
||||
// Record is an interface representing a single kafka record.
|
||||
//
|
||||
// Record values are not safe to use concurrently from multiple goroutines.
|
||||
// type Record = protocol.Record
|
||||
|
||||
// RecordReader is an interface representing a sequence of records. Record sets
|
||||
// are used in both produce and fetch requests to represent the sequence of
|
||||
// records that are sent to or receive from kafka brokers.
|
||||
//
|
||||
// RecordReader values are not safe to use concurrently from multiple goroutines.
|
||||
type RecordReader = protocol.RecordReader
|
||||
|
||||
// NewRecordReade rconstructs a RecordSet which exposes the sequence of records
|
||||
// passed as arguments.
|
||||
func NewRecordReader(records ...Record) RecordReader {
|
||||
// return protocol.NewRecordReader(records...)
|
||||
return nil
|
||||
}
|
||||
@@ -8,46 +8,14 @@ import (
|
||||
|
||||
type index []int
|
||||
|
||||
type _type struct{ typ reflect.Type }
|
||||
|
||||
func typeOf(x interface{}) _type {
|
||||
return makeType(reflect.TypeOf(x))
|
||||
}
|
||||
|
||||
func elemTypeOf(x interface{}) _type {
|
||||
return makeType(reflect.TypeOf(x).Elem())
|
||||
}
|
||||
|
||||
func makeType(t reflect.Type) _type {
|
||||
return _type{typ: t}
|
||||
}
|
||||
|
||||
type value struct {
|
||||
val reflect.Value
|
||||
}
|
||||
|
||||
func nonAddressableValueOf(x interface{}) value {
|
||||
return value{val: reflect.ValueOf(x)}
|
||||
}
|
||||
|
||||
func valueOf(x interface{}) value {
|
||||
return value{val: reflect.ValueOf(x).Elem()}
|
||||
}
|
||||
|
||||
func (v value) bool() bool { return v.val.Bool() }
|
||||
|
||||
func (v value) int8() int8 { return int8(v.int64()) }
|
||||
|
||||
func (v value) int16() int16 { return int16(v.int64()) }
|
||||
|
||||
func (v value) int32() int32 { return int32(v.int64()) }
|
||||
|
||||
func (v value) int64() int64 { return v.val.Int() }
|
||||
|
||||
func (v value) string() string { return v.val.String() }
|
||||
|
||||
func (v value) bytes() []byte { return v.val.Bytes() }
|
||||
|
||||
func (v value) iface(t reflect.Type) interface{} { return v.val.Addr().Interface() }
|
||||
|
||||
func (v value) array(t reflect.Type) array { return array{val: v.val} } //nolint
|
||||
@@ -88,10 +56,6 @@ func makeArray(t reflect.Type, n int) array {
|
||||
|
||||
func (a array) index(i int) value { return value{val: a.val.Index(i)} }
|
||||
|
||||
func (a array) length() int { return a.val.Len() }
|
||||
|
||||
func (a array) isNil() bool { return a.val.IsNil() }
|
||||
|
||||
func indexOf(s reflect.StructField) index { return index(s.Index) }
|
||||
|
||||
func bytesToString(b []byte) string { return string(b) }
|
||||
|
||||
@@ -19,7 +19,7 @@ type Request struct {
|
||||
CaptureTime time.Time `json:"captureTime"`
|
||||
}
|
||||
|
||||
func ReadRequest(r io.Reader, tcpID *api.TcpID, counterPair *api.CounterPair, superTimer *api.SuperTimer) (apiKey ApiKey, apiVersion int16, err error) {
|
||||
func ReadRequest(r io.Reader, tcpID *api.TcpID, counterPair *api.CounterPair, superTimer *api.SuperTimer, reqResMatcher *requestResponseMatcher) (apiKey ApiKey, apiVersion int16, err error) {
|
||||
d := &decoder{reader: r, remain: 4}
|
||||
size := d.readInt32()
|
||||
|
||||
@@ -28,6 +28,9 @@ func ReadRequest(r io.Reader, tcpID *api.TcpID, counterPair *api.CounterPair, su
|
||||
}
|
||||
|
||||
if size < 8 {
|
||||
if size == 0 {
|
||||
return 0, 0, io.EOF
|
||||
}
|
||||
return 0, 0, fmt.Errorf("A Kafka request header cannot be smaller than 8 bytes")
|
||||
}
|
||||
|
||||
@@ -42,7 +45,7 @@ func ReadRequest(r io.Reader, tcpID *api.TcpID, counterPair *api.CounterPair, su
|
||||
correlationID := d.readInt32()
|
||||
clientID := d.readString()
|
||||
|
||||
if i := int(apiKey); i < 0 || i >= len(apiTypes) {
|
||||
if i := int(apiKey); i < 0 || i >= numApis {
|
||||
err = fmt.Errorf("unsupported api key: %d", i)
|
||||
return apiKey, apiVersion, err
|
||||
}
|
||||
@@ -52,12 +55,6 @@ func ReadRequest(r io.Reader, tcpID *api.TcpID, counterPair *api.CounterPair, su
|
||||
return apiKey, apiVersion, err
|
||||
}
|
||||
|
||||
t := &apiTypes[apiKey]
|
||||
if t == nil {
|
||||
err = fmt.Errorf("unsupported api: %s", apiNames[apiKey])
|
||||
return apiKey, apiVersion, err
|
||||
}
|
||||
|
||||
var payload interface{}
|
||||
|
||||
switch apiKey {
|
||||
@@ -214,8 +211,7 @@ func ReadRequest(r io.Reader, tcpID *api.TcpID, counterPair *api.CounterPair, su
|
||||
}
|
||||
|
||||
key := fmt.Sprintf(
|
||||
"%d_%s:%s_%s:%s_%d",
|
||||
counterPair.StreamId,
|
||||
"%s_%s_%s_%s_%d",
|
||||
tcpID.SrcIP,
|
||||
tcpID.SrcPort,
|
||||
tcpID.DstIP,
|
||||
@@ -228,61 +224,3 @@ func ReadRequest(r io.Reader, tcpID *api.TcpID, counterPair *api.CounterPair, su
|
||||
|
||||
return apiKey, apiVersion, nil
|
||||
}
|
||||
|
||||
func WriteRequest(w io.Writer, apiVersion int16, correlationID int32, clientID string, msg Message) error {
|
||||
apiKey := msg.ApiKey()
|
||||
|
||||
if i := int(apiKey); i < 0 || i >= len(apiTypes) {
|
||||
return fmt.Errorf("unsupported api key: %d", i)
|
||||
}
|
||||
|
||||
t := &apiTypes[apiKey]
|
||||
if t == nil {
|
||||
return fmt.Errorf("unsupported api: %s", apiNames[apiKey])
|
||||
}
|
||||
|
||||
minVersion := t.minVersion()
|
||||
maxVersion := t.maxVersion()
|
||||
|
||||
if apiVersion < minVersion || apiVersion > maxVersion {
|
||||
return fmt.Errorf("unsupported %s version: v%d not in range v%d-v%d", apiKey, apiVersion, minVersion, maxVersion)
|
||||
}
|
||||
|
||||
r := &t.requests[apiVersion-minVersion]
|
||||
v := valueOf(msg)
|
||||
b := newPageBuffer()
|
||||
defer b.unref()
|
||||
|
||||
e := &encoder{writer: b}
|
||||
e.writeInt32(0) // placeholder for the request size
|
||||
e.writeInt16(int16(apiKey))
|
||||
e.writeInt16(apiVersion)
|
||||
e.writeInt32(correlationID)
|
||||
|
||||
if r.flexible {
|
||||
// Flexible messages use a nullable string for the client ID, then extra space for a
|
||||
// tag buffer, which begins with a size value. Since we're not writing any fields into the
|
||||
// latter, we can just write zero for now.
|
||||
//
|
||||
// See
|
||||
// https://cwiki.apache.org/confluence/display/KAFKA/KIP-482%3A+The+Kafka+Protocol+should+Support+Optional+Tagged+Fields
|
||||
// for details.
|
||||
e.writeNullString(clientID)
|
||||
e.writeUnsignedVarInt(0)
|
||||
} else {
|
||||
// Technically, recent versions of kafka interpret this field as a nullable
|
||||
// string, however kafka 0.10 expected a non-nullable string and fails with
|
||||
// a NullPointerException when it receives a null client id.
|
||||
e.writeString(clientID)
|
||||
}
|
||||
r.encode(e, v)
|
||||
err := e.err
|
||||
|
||||
if err == nil {
|
||||
size := packUint32(uint32(b.Size()) - 4)
|
||||
_, _ = b.WriteAt(size[:], 0)
|
||||
_, err = b.WriteTo(w)
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -16,7 +16,7 @@ type Response struct {
|
||||
CaptureTime time.Time `json:"captureTime"`
|
||||
}
|
||||
|
||||
func ReadResponse(r io.Reader, tcpID *api.TcpID, counterPair *api.CounterPair, superTimer *api.SuperTimer, emitter api.Emitter) (err error) {
|
||||
func ReadResponse(r io.Reader, tcpID *api.TcpID, counterPair *api.CounterPair, superTimer *api.SuperTimer, emitter api.Emitter, reqResMatcher *requestResponseMatcher) (err error) {
|
||||
d := &decoder{reader: r, remain: 4}
|
||||
size := d.readInt32()
|
||||
|
||||
@@ -25,6 +25,9 @@ func ReadResponse(r io.Reader, tcpID *api.TcpID, counterPair *api.CounterPair, s
|
||||
}
|
||||
|
||||
if size < 4 {
|
||||
if size == 0 {
|
||||
return io.EOF
|
||||
}
|
||||
return fmt.Errorf("A Kafka response header cannot be smaller than 8 bytes")
|
||||
}
|
||||
|
||||
@@ -44,8 +47,7 @@ func ReadResponse(r io.Reader, tcpID *api.TcpID, counterPair *api.CounterPair, s
|
||||
}
|
||||
|
||||
key := fmt.Sprintf(
|
||||
"%d_%s:%s_%s:%s_%d",
|
||||
counterPair.StreamId,
|
||||
"%s_%s_%s_%s_%d",
|
||||
tcpID.DstIP,
|
||||
tcpID.DstPort,
|
||||
tcpID.SrcIP,
|
||||
@@ -54,7 +56,7 @@ func ReadResponse(r io.Reader, tcpID *api.TcpID, counterPair *api.CounterPair, s
|
||||
)
|
||||
reqResPair := reqResMatcher.registerResponse(key, response)
|
||||
if reqResPair == nil {
|
||||
return fmt.Errorf("Couldn't match a Kafka response to a Kafka request in 3 seconds!")
|
||||
return fmt.Errorf("Couldn't match a Kafka response to a Kafka request in %d milliseconds!", reqResMatcher.maxTry)
|
||||
}
|
||||
apiKey := reqResPair.Request.ApiKey
|
||||
apiVersion := reqResPair.Request.ApiVersion
|
||||
@@ -285,57 +287,12 @@ func ReadResponse(r io.Reader, tcpID *api.TcpID, counterPair *api.CounterPair, s
|
||||
}
|
||||
emitter.Emit(item)
|
||||
|
||||
if i := int(apiKey); i < 0 || i >= len(apiTypes) {
|
||||
if i := int(apiKey); i < 0 || i >= numApis {
|
||||
err = fmt.Errorf("unsupported api key: %d", i)
|
||||
return err
|
||||
}
|
||||
|
||||
t := &apiTypes[apiKey]
|
||||
if t == nil {
|
||||
err = fmt.Errorf("unsupported api: %s", apiNames[apiKey])
|
||||
return err
|
||||
}
|
||||
|
||||
d.discardAll()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func WriteResponse(w io.Writer, apiVersion int16, correlationID int32, msg Message) error {
|
||||
apiKey := msg.ApiKey()
|
||||
|
||||
if i := int(apiKey); i < 0 || i >= len(apiTypes) {
|
||||
return fmt.Errorf("unsupported api key: %d", i)
|
||||
}
|
||||
|
||||
t := &apiTypes[apiKey]
|
||||
if t == nil {
|
||||
return fmt.Errorf("unsupported api: %s", apiNames[apiKey])
|
||||
}
|
||||
|
||||
minVersion := t.minVersion()
|
||||
maxVersion := t.maxVersion()
|
||||
|
||||
if apiVersion < minVersion || apiVersion > maxVersion {
|
||||
return fmt.Errorf("unsupported %s version: v%d not in range v%d-v%d", apiKey, apiVersion, minVersion, maxVersion)
|
||||
}
|
||||
|
||||
r := &t.responses[apiVersion-minVersion]
|
||||
v := valueOf(msg)
|
||||
b := newPageBuffer()
|
||||
defer b.unref()
|
||||
|
||||
e := &encoder{writer: b}
|
||||
e.writeInt32(0) // placeholder for the response size
|
||||
e.writeInt32(correlationID)
|
||||
r.encode(e, v)
|
||||
err := e.err
|
||||
|
||||
if err == nil {
|
||||
size := packUint32(uint32(b.Size()) - 4)
|
||||
_, _ = b.WriteAt(size[:], 0)
|
||||
_, err = b.WriteTo(w)
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -12,19 +12,6 @@ const (
|
||||
RequireAll RequiredAcks = -1
|
||||
)
|
||||
|
||||
func (acks RequiredAcks) String() string {
|
||||
switch acks {
|
||||
case RequireNone:
|
||||
return "none"
|
||||
case RequireOne:
|
||||
return "one"
|
||||
case RequireAll:
|
||||
return "all"
|
||||
default:
|
||||
return "unknown"
|
||||
}
|
||||
}
|
||||
|
||||
type UUID struct {
|
||||
TimeLow int32 `json:"timeLow"`
|
||||
TimeMid int16 `json:"timeMid"`
|
||||
|
||||
16
tap/extensions/redis/Makefile
Normal file
16
tap/extensions/redis/Makefile
Normal file
@@ -0,0 +1,16 @@
|
||||
skipbin := $$(find bin -mindepth 1 -maxdepth 1)
|
||||
skipexpect := $$(find expect -mindepth 1 -maxdepth 1)
|
||||
|
||||
test: test-pull-bin test-pull-expect
|
||||
@MIZU_TEST=1 go test -v ./... -coverpkg=./... -race -coverprofile=coverage.out -covermode=atomic
|
||||
|
||||
test-update: test-pull-bin
|
||||
@MIZU_TEST=1 TEST_UPDATE=1 go test -v ./... -coverpkg=./... -coverprofile=coverage.out -covermode=atomic
|
||||
|
||||
test-pull-bin:
|
||||
@mkdir -p bin
|
||||
@[ "${skipbin}" ] && echo "Skipping downloading BINs" || gsutil -o 'GSUtil:parallel_process_count=5' -o 'GSUtil:parallel_thread_count=5' -m cp gs://static.up9.io/mizu/test-pcap/bin/redis/\*.bin bin
|
||||
|
||||
test-pull-expect:
|
||||
@mkdir -p expect
|
||||
@[ "${skipexpect}" ] && echo "Skipping downloading expected JSONs" || gsutil -o 'GSUtil:parallel_process_count=5' -o 'GSUtil:parallel_thread_count=5' -m cp -r gs://static.up9.io/mizu/test-pcap/expect/redis/\* expect
|
||||
@@ -2,8 +2,16 @@ module github.com/up9inc/mizu/tap/extensions/redis
|
||||
|
||||
go 1.17
|
||||
|
||||
require github.com/up9inc/mizu/tap/api v0.0.0
|
||||
require (
|
||||
github.com/stretchr/testify v1.7.0
|
||||
github.com/up9inc/mizu/tap/api v0.0.0
|
||||
)
|
||||
|
||||
require github.com/google/martian v2.1.0+incompatible // indirect
|
||||
require (
|
||||
github.com/davecgh/go-spew v1.1.0 // indirect
|
||||
github.com/google/martian v2.1.0+incompatible // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c // indirect
|
||||
)
|
||||
|
||||
replace github.com/up9inc/mizu/tap/api v0.0.0 => ../../api
|
||||
|
||||
@@ -1,2 +1,13 @@
|
||||
github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no=
|
||||
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY=
|
||||
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
|
||||
@@ -6,15 +6,14 @@ import (
|
||||
"github.com/up9inc/mizu/tap/api"
|
||||
)
|
||||
|
||||
func handleClientStream(tcpID *api.TcpID, counterPair *api.CounterPair, superTimer *api.SuperTimer, emitter api.Emitter, request *RedisPacket) error {
|
||||
func handleClientStream(tcpID *api.TcpID, counterPair *api.CounterPair, superTimer *api.SuperTimer, emitter api.Emitter, request *RedisPacket, reqResMatcher *requestResponseMatcher) error {
|
||||
counterPair.Lock()
|
||||
counterPair.Request++
|
||||
requestCounter := counterPair.Request
|
||||
counterPair.Unlock()
|
||||
|
||||
ident := fmt.Sprintf(
|
||||
"%d_%s:%s_%s:%s_%d",
|
||||
counterPair.StreamId,
|
||||
"%s_%s_%s_%s_%d",
|
||||
tcpID.SrcIP,
|
||||
tcpID.DstIP,
|
||||
tcpID.SrcPort,
|
||||
@@ -36,15 +35,14 @@ func handleClientStream(tcpID *api.TcpID, counterPair *api.CounterPair, superTim
|
||||
return nil
|
||||
}
|
||||
|
||||
func handleServerStream(tcpID *api.TcpID, counterPair *api.CounterPair, superTimer *api.SuperTimer, emitter api.Emitter, response *RedisPacket) error {
|
||||
func handleServerStream(tcpID *api.TcpID, counterPair *api.CounterPair, superTimer *api.SuperTimer, emitter api.Emitter, response *RedisPacket, reqResMatcher *requestResponseMatcher) error {
|
||||
counterPair.Lock()
|
||||
counterPair.Response++
|
||||
responseCounter := counterPair.Response
|
||||
counterPair.Unlock()
|
||||
|
||||
ident := fmt.Sprintf(
|
||||
"%d_%s:%s_%s:%s_%d",
|
||||
counterPair.StreamId,
|
||||
"%s_%s_%s_%s_%d",
|
||||
tcpID.DstIP,
|
||||
tcpID.SrcIP,
|
||||
tcpID.DstPort,
|
||||
|
||||
@@ -32,14 +32,14 @@ type dissecting string
|
||||
|
||||
func (d dissecting) Register(extension *api.Extension) {
|
||||
extension.Protocol = &protocol
|
||||
extension.MatcherMap = reqResMatcher.openMessagesMap
|
||||
}
|
||||
|
||||
func (d dissecting) Ping() {
|
||||
log.Printf("pong %s", protocol.Name)
|
||||
}
|
||||
|
||||
func (d dissecting) Dissect(b *bufio.Reader, isClient bool, tcpID *api.TcpID, counterPair *api.CounterPair, superTimer *api.SuperTimer, superIdentifier *api.SuperIdentifier, emitter api.Emitter, options *api.TrafficFilteringOptions) error {
|
||||
func (d dissecting) Dissect(b *bufio.Reader, isClient bool, tcpID *api.TcpID, counterPair *api.CounterPair, superTimer *api.SuperTimer, superIdentifier *api.SuperIdentifier, emitter api.Emitter, options *api.TrafficFilteringOptions, _reqResMatcher api.RequestResponseMatcher) error {
|
||||
reqResMatcher := _reqResMatcher.(*requestResponseMatcher)
|
||||
is := &RedisInputStream{
|
||||
Reader: b,
|
||||
Buf: make([]byte, 8192),
|
||||
@@ -52,9 +52,9 @@ func (d dissecting) Dissect(b *bufio.Reader, isClient bool, tcpID *api.TcpID, co
|
||||
}
|
||||
|
||||
if isClient {
|
||||
err = handleClientStream(tcpID, counterPair, superTimer, emitter, redisPacket)
|
||||
err = handleClientStream(tcpID, counterPair, superTimer, emitter, redisPacket, reqResMatcher)
|
||||
} else {
|
||||
err = handleServerStream(tcpID, counterPair, superTimer, emitter, redisPacket)
|
||||
err = handleServerStream(tcpID, counterPair, superTimer, emitter, redisPacket, reqResMatcher)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
@@ -63,7 +63,7 @@ func (d dissecting) Dissect(b *bufio.Reader, isClient bool, tcpID *api.TcpID, co
|
||||
}
|
||||
}
|
||||
|
||||
func (d dissecting) Analyze(item *api.OutputChannelItem, resolvedSource string, resolvedDestination string) *api.Entry {
|
||||
func (d dissecting) Analyze(item *api.OutputChannelItem, resolvedSource string, resolvedDestination string, namespace string) *api.Entry {
|
||||
request := item.Pair.Request.Payload.(map[string]interface{})
|
||||
response := item.Pair.Response.Payload.(map[string]interface{})
|
||||
reqDetails := request["details"].(map[string]interface{})
|
||||
@@ -96,6 +96,7 @@ func (d dissecting) Analyze(item *api.OutputChannelItem, resolvedSource string,
|
||||
IP: item.ConnectionInfo.ServerIP,
|
||||
Port: item.ConnectionInfo.ServerPort,
|
||||
},
|
||||
Namespace: namespace,
|
||||
Outgoing: item.ConnectionInfo.IsOutgoing,
|
||||
Request: reqDetails,
|
||||
Response: resDetails,
|
||||
@@ -127,6 +128,10 @@ func (d dissecting) Macros() map[string]string {
|
||||
}
|
||||
}
|
||||
|
||||
func (d dissecting) NewResponseRequestMatcher() api.RequestResponseMatcher {
|
||||
return createResponseRequestMatcher()
|
||||
}
|
||||
|
||||
var Dissector dissecting
|
||||
|
||||
func NewDissector() api.Dissector {
|
||||
|
||||
290
tap/extensions/redis/main_test.go
Normal file
290
tap/extensions/redis/main_test.go
Normal file
@@ -0,0 +1,290 @@
|
||||
package redis
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/up9inc/mizu/tap/api"
|
||||
)
|
||||
|
||||
const (
|
||||
binDir = "bin"
|
||||
patternBin = "*_req.bin"
|
||||
patternDissect = "*.json"
|
||||
msgDissecting = "Dissecting:"
|
||||
msgAnalyzing = "Analyzing:"
|
||||
msgRepresenting = "Representing:"
|
||||
respSuffix = "_res.bin"
|
||||
expectDir = "expect"
|
||||
dissectDir = "dissect"
|
||||
analyzeDir = "analyze"
|
||||
representDir = "represent"
|
||||
testUpdate = "TEST_UPDATE"
|
||||
)
|
||||
|
||||
func TestRegister(t *testing.T) {
|
||||
dissector := NewDissector()
|
||||
extension := &api.Extension{}
|
||||
dissector.Register(extension)
|
||||
assert.Equal(t, "redis", extension.Protocol.Name)
|
||||
}
|
||||
|
||||
func TestMacros(t *testing.T) {
|
||||
expectedMacros := map[string]string{
|
||||
"redis": `proto.name == "redis"`,
|
||||
}
|
||||
dissector := NewDissector()
|
||||
macros := dissector.Macros()
|
||||
assert.Equal(t, expectedMacros, macros)
|
||||
}
|
||||
|
||||
func TestPing(t *testing.T) {
|
||||
dissector := NewDissector()
|
||||
dissector.Ping()
|
||||
}
|
||||
|
||||
func TestDissect(t *testing.T) {
|
||||
_, testUpdateEnabled := os.LookupEnv(testUpdate)
|
||||
|
||||
expectDirDissect := path.Join(expectDir, dissectDir)
|
||||
|
||||
if testUpdateEnabled {
|
||||
os.RemoveAll(expectDirDissect)
|
||||
err := os.MkdirAll(expectDirDissect, 0775)
|
||||
assert.Nil(t, err)
|
||||
}
|
||||
|
||||
dissector := NewDissector()
|
||||
paths, err := filepath.Glob(path.Join(binDir, patternBin))
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
options := &api.TrafficFilteringOptions{
|
||||
IgnoredUserAgents: []string{},
|
||||
}
|
||||
|
||||
for _, _path := range paths {
|
||||
basePath := _path[:len(_path)-8]
|
||||
|
||||
// Channel to verify the output
|
||||
itemChannel := make(chan *api.OutputChannelItem)
|
||||
var emitter api.Emitter = &api.Emitting{
|
||||
AppStats: &api.AppStats{},
|
||||
OutputChannel: itemChannel,
|
||||
}
|
||||
|
||||
var items []*api.OutputChannelItem
|
||||
stop := make(chan bool)
|
||||
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
case <-stop:
|
||||
return
|
||||
case item := <-itemChannel:
|
||||
items = append(items, item)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
// Stream level
|
||||
counterPair := &api.CounterPair{
|
||||
Request: 0,
|
||||
Response: 0,
|
||||
}
|
||||
superIdentifier := &api.SuperIdentifier{}
|
||||
|
||||
// Request
|
||||
pathClient := _path
|
||||
fmt.Printf("%s %s\n", msgDissecting, pathClient)
|
||||
fileClient, err := os.Open(pathClient)
|
||||
assert.Nil(t, err)
|
||||
|
||||
bufferClient := bufio.NewReader(fileClient)
|
||||
tcpIDClient := &api.TcpID{
|
||||
SrcIP: "1",
|
||||
DstIP: "2",
|
||||
SrcPort: "1",
|
||||
DstPort: "2",
|
||||
}
|
||||
reqResMatcher := dissector.NewResponseRequestMatcher()
|
||||
err = dissector.Dissect(bufferClient, true, tcpIDClient, counterPair, &api.SuperTimer{}, superIdentifier, emitter, options, reqResMatcher)
|
||||
if err != nil && reflect.TypeOf(err) != reflect.TypeOf(&ConnectError{}) && err != io.EOF && err != io.ErrUnexpectedEOF {
|
||||
log.Println(err)
|
||||
}
|
||||
|
||||
// Response
|
||||
pathServer := basePath + respSuffix
|
||||
fmt.Printf("%s %s\n", msgDissecting, pathServer)
|
||||
fileServer, err := os.Open(pathServer)
|
||||
assert.Nil(t, err)
|
||||
|
||||
bufferServer := bufio.NewReader(fileServer)
|
||||
tcpIDServer := &api.TcpID{
|
||||
SrcIP: "2",
|
||||
DstIP: "1",
|
||||
SrcPort: "2",
|
||||
DstPort: "1",
|
||||
}
|
||||
err = dissector.Dissect(bufferServer, false, tcpIDServer, counterPair, &api.SuperTimer{}, superIdentifier, emitter, options, reqResMatcher)
|
||||
if err != nil && reflect.TypeOf(err) != reflect.TypeOf(&ConnectError{}) && err != io.EOF && err != io.ErrUnexpectedEOF {
|
||||
log.Println(err)
|
||||
}
|
||||
|
||||
fileClient.Close()
|
||||
fileServer.Close()
|
||||
|
||||
pathExpect := path.Join(expectDirDissect, fmt.Sprintf("%s.json", basePath[4:]))
|
||||
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
|
||||
stop <- true
|
||||
|
||||
marshaled, err := json.Marshal(items)
|
||||
assert.Nil(t, err)
|
||||
|
||||
if testUpdateEnabled {
|
||||
if len(items) > 0 {
|
||||
err = os.WriteFile(pathExpect, marshaled, 0644)
|
||||
assert.Nil(t, err)
|
||||
}
|
||||
} else {
|
||||
if _, err := os.Stat(pathExpect); errors.Is(err, os.ErrNotExist) {
|
||||
assert.Len(t, items, 0)
|
||||
} else {
|
||||
expectedBytes, err := ioutil.ReadFile(pathExpect)
|
||||
assert.Nil(t, err)
|
||||
|
||||
assert.JSONEq(t, string(expectedBytes), string(marshaled))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestAnalyze(t *testing.T) {
|
||||
_, testUpdateEnabled := os.LookupEnv(testUpdate)
|
||||
|
||||
expectDirDissect := path.Join(expectDir, dissectDir)
|
||||
expectDirAnalyze := path.Join(expectDir, analyzeDir)
|
||||
|
||||
if testUpdateEnabled {
|
||||
os.RemoveAll(expectDirAnalyze)
|
||||
err := os.MkdirAll(expectDirAnalyze, 0775)
|
||||
assert.Nil(t, err)
|
||||
}
|
||||
|
||||
dissector := NewDissector()
|
||||
paths, err := filepath.Glob(path.Join(expectDirDissect, patternDissect))
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
for _, _path := range paths {
|
||||
fmt.Printf("%s %s\n", msgAnalyzing, _path)
|
||||
|
||||
bytes, err := ioutil.ReadFile(_path)
|
||||
assert.Nil(t, err)
|
||||
|
||||
var items []*api.OutputChannelItem
|
||||
err = json.Unmarshal(bytes, &items)
|
||||
assert.Nil(t, err)
|
||||
|
||||
var entries []*api.Entry
|
||||
for _, item := range items {
|
||||
entry := dissector.Analyze(item, "", "", "")
|
||||
entries = append(entries, entry)
|
||||
}
|
||||
|
||||
pathExpect := path.Join(expectDirAnalyze, filepath.Base(_path))
|
||||
|
||||
marshaled, err := json.Marshal(entries)
|
||||
assert.Nil(t, err)
|
||||
|
||||
if testUpdateEnabled {
|
||||
if len(entries) > 0 {
|
||||
err = os.WriteFile(pathExpect, marshaled, 0644)
|
||||
assert.Nil(t, err)
|
||||
}
|
||||
} else {
|
||||
if _, err := os.Stat(pathExpect); errors.Is(err, os.ErrNotExist) {
|
||||
assert.Len(t, items, 0)
|
||||
} else {
|
||||
expectedBytes, err := ioutil.ReadFile(pathExpect)
|
||||
assert.Nil(t, err)
|
||||
|
||||
assert.JSONEq(t, string(expectedBytes), string(marshaled))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestRepresent(t *testing.T) {
|
||||
_, testUpdateEnabled := os.LookupEnv(testUpdate)
|
||||
|
||||
expectDirAnalyze := path.Join(expectDir, analyzeDir)
|
||||
expectDirRepresent := path.Join(expectDir, representDir)
|
||||
|
||||
if testUpdateEnabled {
|
||||
os.RemoveAll(expectDirRepresent)
|
||||
err := os.MkdirAll(expectDirRepresent, 0775)
|
||||
assert.Nil(t, err)
|
||||
}
|
||||
|
||||
dissector := NewDissector()
|
||||
paths, err := filepath.Glob(path.Join(expectDirAnalyze, patternDissect))
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
for _, _path := range paths {
|
||||
fmt.Printf("%s %s\n", msgRepresenting, _path)
|
||||
|
||||
bytes, err := ioutil.ReadFile(_path)
|
||||
assert.Nil(t, err)
|
||||
|
||||
var entries []*api.Entry
|
||||
err = json.Unmarshal(bytes, &entries)
|
||||
assert.Nil(t, err)
|
||||
|
||||
var objects []string
|
||||
for _, entry := range entries {
|
||||
object, _, err := dissector.Represent(entry.Request, entry.Response)
|
||||
assert.Nil(t, err)
|
||||
objects = append(objects, string(object))
|
||||
}
|
||||
|
||||
pathExpect := path.Join(expectDirRepresent, filepath.Base(_path))
|
||||
|
||||
marshaled, err := json.Marshal(objects)
|
||||
assert.Nil(t, err)
|
||||
|
||||
if testUpdateEnabled {
|
||||
if len(objects) > 0 {
|
||||
err = os.WriteFile(pathExpect, marshaled, 0644)
|
||||
assert.Nil(t, err)
|
||||
}
|
||||
} else {
|
||||
if _, err := os.Stat(pathExpect); errors.Is(err, os.ErrNotExist) {
|
||||
assert.Len(t, objects, 0)
|
||||
} else {
|
||||
expectedBytes, err := ioutil.ReadFile(pathExpect)
|
||||
assert.Nil(t, err)
|
||||
|
||||
assert.JSONEq(t, string(expectedBytes), string(marshaled))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -7,16 +7,19 @@ import (
|
||||
"github.com/up9inc/mizu/tap/api"
|
||||
)
|
||||
|
||||
var reqResMatcher = createResponseRequestMatcher() // global
|
||||
|
||||
// Key is `{stream_id}_{src_ip}:{dst_ip}_{src_ip}:{src_port}_{incremental_counter}`
|
||||
// Key is `{src_ip}_{dst_ip}_{src_ip}_{src_port}_{incremental_counter}`
|
||||
type requestResponseMatcher struct {
|
||||
openMessagesMap *sync.Map
|
||||
}
|
||||
|
||||
func createResponseRequestMatcher() requestResponseMatcher {
|
||||
newMatcher := &requestResponseMatcher{openMessagesMap: &sync.Map{}}
|
||||
return *newMatcher
|
||||
func createResponseRequestMatcher() api.RequestResponseMatcher {
|
||||
return &requestResponseMatcher{openMessagesMap: &sync.Map{}}
|
||||
}
|
||||
|
||||
func (matcher *requestResponseMatcher) GetMap() *sync.Map {
|
||||
return matcher.openMessagesMap
|
||||
}
|
||||
func (matcher *requestResponseMatcher) SetMaxTry(value int) {
|
||||
}
|
||||
|
||||
func (matcher *requestResponseMatcher) registerRequest(ident string, request *RedisPacket, captureTime time.Time) *api.OutputChannelItem {
|
||||
|
||||
@@ -210,6 +210,7 @@ func startPassiveTapper(opts *TapOpts, outputItems chan *api.OutputChannelItem)
|
||||
assemblerMutex: &assembler.assemblerMutex,
|
||||
cleanPeriod: cleanPeriod,
|
||||
connectionTimeout: staleConnectionTimeout,
|
||||
streamsMap: streamsMap,
|
||||
}
|
||||
cleaner.start()
|
||||
|
||||
|
||||
@@ -47,6 +47,7 @@ type tcpReader struct {
|
||||
extension *api.Extension
|
||||
emitter api.Emitter
|
||||
counterPair *api.CounterPair
|
||||
reqResMatcher api.RequestResponseMatcher
|
||||
sync.Mutex
|
||||
}
|
||||
|
||||
@@ -94,7 +95,7 @@ func (h *tcpReader) Close() {
|
||||
func (h *tcpReader) run(wg *sync.WaitGroup) {
|
||||
defer wg.Done()
|
||||
b := bufio.NewReader(h)
|
||||
err := h.extension.Dissector.Dissect(b, h.isClient, h.tcpID, h.counterPair, h.superTimer, h.parent.superIdentifier, h.emitter, filteringOptions)
|
||||
err := h.extension.Dissector.Dissect(b, h.isClient, h.tcpID, h.counterPair, h.superTimer, h.parent.superIdentifier, h.emitter, filteringOptions, h.reqResMatcher)
|
||||
if err != nil {
|
||||
_, err = io.Copy(ioutil.Discard, b)
|
||||
if err != nil {
|
||||
|
||||
@@ -29,8 +29,9 @@ type tcpStreamFactory struct {
|
||||
}
|
||||
|
||||
type tcpStreamWrapper struct {
|
||||
stream *tcpStream
|
||||
createdAt time.Time
|
||||
stream *tcpStream
|
||||
reqResMatcher api.RequestResponseMatcher
|
||||
createdAt time.Time
|
||||
}
|
||||
|
||||
func NewTcpStreamFactory(emitter api.Emitter, streamsMap *tcpStreamMap, opts *TapOpts) *tcpStreamFactory {
|
||||
@@ -81,8 +82,8 @@ func (factory *tcpStreamFactory) New(net, transport gopacket.Flow, tcp *layers.T
|
||||
if stream.isTapTarget {
|
||||
stream.id = factory.streamsMap.nextId()
|
||||
for i, extension := range extensions {
|
||||
reqResMatcher := extension.Dissector.NewResponseRequestMatcher()
|
||||
counterPair := &api.CounterPair{
|
||||
StreamId: stream.id,
|
||||
Request: 0,
|
||||
Response: 0,
|
||||
}
|
||||
@@ -103,6 +104,7 @@ func (factory *tcpStreamFactory) New(net, transport gopacket.Flow, tcp *layers.T
|
||||
extension: extension,
|
||||
emitter: factory.Emitter,
|
||||
counterPair: counterPair,
|
||||
reqResMatcher: reqResMatcher,
|
||||
})
|
||||
stream.servers = append(stream.servers, tcpReader{
|
||||
msgQueue: make(chan tcpReaderDataMsg),
|
||||
@@ -121,11 +123,13 @@ func (factory *tcpStreamFactory) New(net, transport gopacket.Flow, tcp *layers.T
|
||||
extension: extension,
|
||||
emitter: factory.Emitter,
|
||||
counterPair: counterPair,
|
||||
reqResMatcher: reqResMatcher,
|
||||
})
|
||||
|
||||
factory.streamsMap.Store(stream.id, &tcpStreamWrapper{
|
||||
stream: stream,
|
||||
createdAt: time.Now(),
|
||||
stream: stream,
|
||||
reqResMatcher: reqResMatcher,
|
||||
createdAt: time.Now(),
|
||||
})
|
||||
|
||||
factory.wg.Add(2)
|
||||
|
||||
@@ -10,8 +10,7 @@ import debounce from 'lodash/debounce';
|
||||
import ServiceMapOptions from './ServiceMapOptions'
|
||||
import { useCommonStyles } from "../../helpers/commonStyle";
|
||||
import refresh from "../assets/refresh.svg";
|
||||
import reset from "../assets/reset.svg";
|
||||
import close from "../assets/close.svg"
|
||||
import close from "../assets/close.svg";
|
||||
|
||||
interface GraphData {
|
||||
nodes: Node[];
|
||||
@@ -154,19 +153,6 @@ export const ServiceMapModal: React.FC<ServiceMapModalProps> = ({ isOpen, onOpen
|
||||
return () => setGraphData({ nodes: [], edges: [] })
|
||||
}, [getServiceMapData])
|
||||
|
||||
const resetServiceMap = debounce(async () => {
|
||||
try {
|
||||
const serviceMapResetResponse = await api.serviceMapReset();
|
||||
if (serviceMapResetResponse["status"] === "enabled") {
|
||||
refreshServiceMap()
|
||||
}
|
||||
|
||||
} catch (ex) {
|
||||
toast.error("An error occurred while resetting Mizu Service Map, see console for mode details");
|
||||
console.error(ex);
|
||||
}
|
||||
}, 500);
|
||||
|
||||
const refreshServiceMap = debounce(() => {
|
||||
getServiceMapData();
|
||||
}, 500);
|
||||
@@ -192,16 +178,6 @@ export const ServiceMapModal: React.FC<ServiceMapModalProps> = ({ isOpen, onOpen
|
||||
{!isLoading && <div style={{ height: "100%", width: "100%" }}>
|
||||
<div style={{ display: "flex", justifyContent: "space-between" }}>
|
||||
<div>
|
||||
<Button
|
||||
startIcon={<img src={reset} className="custom" alt="reset" style={{ marginRight:"8%"}}></img>}
|
||||
size="large"
|
||||
variant="contained"
|
||||
className={commonClasses.outlinedButton + " " + commonClasses.imagedButton}
|
||||
style={{ marginRight: 25, paddingTop: "3px", paddingBottom: "1px"}}
|
||||
onClick={resetServiceMap}
|
||||
>
|
||||
Reset
|
||||
</Button>
|
||||
<Button
|
||||
startIcon={<img src={refresh} className="custom" alt="refresh" style={{ marginRight:"8%"}}></img>}
|
||||
size="medium"
|
||||
|
||||
@@ -1,4 +0,0 @@
|
||||
<svg width="22" height="22" viewBox="0 0 22 22" fill="none" xmlns="http://www.w3.org/2000/svg">
|
||||
<path d="M11 14C12.114 14 13 13.1127 13 12C13 10.8873 12.114 10 11 10C9.886 10 9 10.8873 9 12C9 13.1127 9.886 14 11 14Z" fill="#205CF5"/>
|
||||
<path d="M19.0823 10.2539C18.8662 9.1982 18.4442 8.19548 17.8402 7.30312C17.2467 6.42521 16.4906 5.66909 15.6127 5.07562C14.7202 4.47184 13.7175 4.04978 12.6619 3.83354C12.1074 3.72109 11.5429 3.6658 10.9771 3.66854V1.83337L7.33333 4.58337L10.9771 7.33337V5.50187C11.4208 5.50004 11.8644 5.54221 12.2925 5.63021C13.1129 5.79833 13.8923 6.12632 14.586 6.59546C15.2702 7.05676 15.859 7.64561 16.3203 8.32979C17.0366 9.38875 17.4185 10.6383 17.4167 11.9167C17.4165 12.7746 17.2451 13.6238 16.9125 14.4146C16.7507 14.7955 16.5531 15.1602 16.3222 15.5036C16.0904 15.845 15.827 16.1639 15.5357 16.456C14.6483 17.3417 13.5219 17.9492 12.2943 18.2041C11.4407 18.3765 10.5612 18.3765 9.7075 18.2041C8.88668 18.0358 8.10704 17.7075 7.41308 17.238C6.72969 16.7771 6.14148 16.1888 5.68058 15.5055C4.96518 14.4454 4.58306 13.1956 4.58333 11.9167H2.75C2.75098 13.5609 3.24215 15.1675 4.16075 16.5312C4.75461 17.4077 5.50996 18.163 6.38642 18.7569C7.74824 19.6786 9.3556 20.1697 11 20.1667C11.5585 20.1667 12.1156 20.1105 12.6628 19.999C13.7177 19.7811 14.7197 19.3592 15.6127 18.7569C16.0511 18.4615 16.4597 18.1241 16.8328 17.7495C17.2063 17.3749 17.5439 16.9661 17.8411 16.5285C18.762 15.167 19.2528 13.5604 19.25 11.9167C19.25 11.3582 19.1938 10.8011 19.0823 10.2539Z" fill="#205CF5"/>
|
||||
</svg>
|
||||
|
Before Width: | Height: | Size: 1.5 KiB |
Reference in New Issue
Block a user