feat(ci): Separate action for loadtests

This commit is contained in:
TheiLLeniumStudios
2026-01-08 22:52:07 +01:00
parent 922cac120a
commit 958c6c2be7
5 changed files with 677 additions and 150 deletions

256
.github/actions/loadtest/action.yml vendored Normal file
View File

@@ -0,0 +1,256 @@
name: 'Reloader Load Test'
description: 'Run Reloader load tests with A/B comparison support'
inputs:
old-ref:
description: 'Git ref for "old" version (optional, enables A/B comparison)'
required: false
default: ''
new-ref:
description: 'Git ref for "new" version (defaults to current checkout)'
required: false
default: ''
old-image:
description: 'Pre-built container image for "old" version (alternative to old-ref)'
required: false
default: ''
new-image:
description: 'Pre-built container image for "new" version (alternative to new-ref)'
required: false
default: ''
scenarios:
description: 'Scenarios to run: S1,S4,S6 or all'
required: false
default: 'S1,S4,S6'
test-type:
description: 'Test type label for summary: quick or full'
required: false
default: 'quick'
duration:
description: 'Test duration in seconds'
required: false
default: '60'
kind-cluster:
description: 'Name of existing Kind cluster (if empty, creates new one)'
required: false
default: ''
post-comment:
description: 'Post results as PR comment'
required: false
default: 'false'
pr-number:
description: 'PR number for commenting (required if post-comment is true)'
required: false
default: ''
github-token:
description: 'GitHub token for posting comments'
required: false
default: ${{ github.token }}
comment-header:
description: 'Optional header text for the comment'
required: false
default: ''
outputs:
status:
description: 'Overall test status: pass or fail'
value: ${{ steps.run.outputs.status }}
summary:
description: 'Markdown summary of results'
value: ${{ steps.summary.outputs.summary }}
pass-count:
description: 'Number of passed scenarios'
value: ${{ steps.summary.outputs.pass_count }}
fail-count:
description: 'Number of failed scenarios'
value: ${{ steps.summary.outputs.fail_count }}
runs:
using: 'composite'
steps:
- name: Determine images to use
id: images
shell: bash
run: |
# Determine old image
if [ -n "${{ inputs.old-image }}" ]; then
echo "old=${{ inputs.old-image }}" >> $GITHUB_OUTPUT
elif [ -n "${{ inputs.old-ref }}" ]; then
echo "old=localhost/reloader:old" >> $GITHUB_OUTPUT
echo "build_old=true" >> $GITHUB_OUTPUT
else
echo "old=" >> $GITHUB_OUTPUT
fi
# Determine new image
if [ -n "${{ inputs.new-image }}" ]; then
echo "new=${{ inputs.new-image }}" >> $GITHUB_OUTPUT
elif [ -n "${{ inputs.new-ref }}" ]; then
echo "new=localhost/reloader:new" >> $GITHUB_OUTPUT
echo "build_new=true" >> $GITHUB_OUTPUT
else
# Default: build from current checkout
echo "new=localhost/reloader:new" >> $GITHUB_OUTPUT
echo "build_new_current=true" >> $GITHUB_OUTPUT
fi
- name: Build old image from ref
if: steps.images.outputs.build_old == 'true'
shell: bash
run: |
CURRENT_SHA=$(git rev-parse HEAD)
git checkout ${{ inputs.old-ref }}
docker build -t localhost/reloader:old .
echo "Built old image from ref: ${{ inputs.old-ref }}"
git checkout $CURRENT_SHA
- name: Build new image from ref
if: steps.images.outputs.build_new == 'true'
shell: bash
run: |
CURRENT_SHA=$(git rev-parse HEAD)
git checkout ${{ inputs.new-ref }}
docker build -t localhost/reloader:new .
echo "Built new image from ref: ${{ inputs.new-ref }}"
git checkout $CURRENT_SHA
- name: Build new image from current checkout
if: steps.images.outputs.build_new_current == 'true'
shell: bash
run: |
docker build -t localhost/reloader:new .
echo "Built new image from current checkout"
- name: Build loadtest binary
shell: bash
run: |
cd ${{ github.workspace }}/test/loadtest
go build -o loadtest ./cmd/loadtest
- name: Determine cluster name
id: cluster
shell: bash
run: |
if [ -n "${{ inputs.kind-cluster }}" ]; then
echo "name=${{ inputs.kind-cluster }}" >> $GITHUB_OUTPUT
echo "skip=true" >> $GITHUB_OUTPUT
else
echo "name=reloader-loadtest" >> $GITHUB_OUTPUT
echo "skip=false" >> $GITHUB_OUTPUT
fi
- name: Load images into Kind
shell: bash
run: |
CLUSTER="${{ steps.cluster.outputs.name }}"
if [ -n "${{ steps.images.outputs.old }}" ]; then
echo "Loading old image: ${{ steps.images.outputs.old }}"
kind load docker-image "${{ steps.images.outputs.old }}" --name "$CLUSTER" || true
fi
echo "Loading new image: ${{ steps.images.outputs.new }}"
kind load docker-image "${{ steps.images.outputs.new }}" --name "$CLUSTER" || true
- name: Run load tests
id: run
shell: bash
run: |
cd ${{ github.workspace }}/test/loadtest
ARGS="--new-image=${{ steps.images.outputs.new }}"
ARGS="$ARGS --scenario=${{ inputs.scenarios }}"
ARGS="$ARGS --duration=${{ inputs.duration }}"
ARGS="$ARGS --cluster-name=${{ steps.cluster.outputs.name }}"
if [ -n "${{ steps.images.outputs.old }}" ]; then
ARGS="$ARGS --old-image=${{ steps.images.outputs.old }}"
fi
if [ "${{ steps.cluster.outputs.skip }}" = "true" ]; then
ARGS="$ARGS --skip-cluster"
fi
echo "Running: ./loadtest run $ARGS"
if ./loadtest run $ARGS; then
echo "status=pass" >> $GITHUB_OUTPUT
else
echo "status=fail" >> $GITHUB_OUTPUT
fi
- name: Generate summary
id: summary
shell: bash
run: |
cd ${{ github.workspace }}/test/loadtest
# Generate markdown summary
./loadtest summary \
--results-dir=./results \
--test-type=${{ inputs.test-type }} \
--format=markdown > summary.md 2>/dev/null || true
# Output to GitHub Step Summary
cat summary.md >> $GITHUB_STEP_SUMMARY
# Store summary for output (using heredoc for multiline)
{
echo 'summary<<EOF'
cat summary.md
echo 'EOF'
} >> $GITHUB_OUTPUT
# Get pass/fail counts from JSON
COUNTS=$(./loadtest summary --format=json 2>/dev/null | head -20 || echo '{}')
echo "pass_count=$(echo "$COUNTS" | grep -o '"pass_count": [0-9]*' | grep -o '[0-9]*' || echo 0)" >> $GITHUB_OUTPUT
echo "fail_count=$(echo "$COUNTS" | grep -o '"fail_count": [0-9]*' | grep -o '[0-9]*' || echo 0)" >> $GITHUB_OUTPUT
- name: Post PR comment
if: inputs.post-comment == 'true' && inputs.pr-number != ''
uses: actions/github-script@v7
with:
github-token: ${{ inputs.github-token }}
script: |
const fs = require('fs');
const summaryPath = '${{ github.workspace }}/test/loadtest/summary.md';
let summary = 'No results available';
try {
summary = fs.readFileSync(summaryPath, 'utf8');
} catch (e) {
console.log('Could not read summary file:', e.message);
}
const header = '${{ inputs.comment-header }}';
const status = '${{ steps.run.outputs.status }}';
const statusEmoji = status === 'pass' ? ':white_check_mark:' : ':x:';
const body = [
header ? header : `## ${statusEmoji} Load Test Results (${{ inputs.test-type }})`,
'',
summary,
'',
'---',
`**Artifacts:** [Download](https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }})`,
].join('\n');
await github.rest.issues.createComment({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: ${{ inputs.pr-number }},
body: body
});
- name: Upload results
uses: actions/upload-artifact@v4
if: always()
with:
name: loadtest-${{ inputs.test-type }}-results
path: |
${{ github.workspace }}/test/loadtest/results/
retention-days: 30
- name: Cleanup Kind cluster (only if we created it)
if: always() && steps.cluster.outputs.skip == 'false'
shell: bash
run: |
kind delete cluster --name ${{ steps.cluster.outputs.name }} || true

View File

@@ -1,4 +1,4 @@
name: Load Test name: Load Test (Full)
on: on:
issue_comment: issue_comment:
@@ -10,6 +10,7 @@ permissions:
issues: write issues: write
jobs: jobs:
# Full load test suite triggered by /loadtest command
loadtest: loadtest:
# Only run on PR comments with /loadtest command # Only run on PR comments with /loadtest command
if: | if: |
@@ -45,6 +46,12 @@ jobs:
core.setOutput('base_sha', pr.data.base.sha); core.setOutput('base_sha', pr.data.base.sha);
console.log(`PR #${context.issue.number}: ${pr.data.head.ref} -> ${pr.data.base.ref}`); console.log(`PR #${context.issue.number}: ${pr.data.head.ref} -> ${pr.data.base.ref}`);
- name: Checkout PR branch
uses: actions/checkout@v4
with:
ref: ${{ steps.pr.outputs.head_sha }}
fetch-depth: 0 # Full history for building from base ref
- name: Set up Go - name: Set up Go
uses: actions/setup-go@v5 uses: actions/setup-go@v5
with: with:
@@ -66,151 +73,23 @@ jobs:
chmod +x kubectl chmod +x kubectl
sudo mv kubectl /usr/local/bin/kubectl sudo mv kubectl /usr/local/bin/kubectl
# Build OLD image from base branch (e.g., main) - name: Run full A/B comparison load test
- name: Checkout base branch (old)
uses: actions/checkout@v4
with:
ref: ${{ steps.pr.outputs.base_ref }}
path: old
- name: Build old image
run: |
cd old
docker build -t localhost/reloader:old -f Dockerfile .
echo "Built old image from ${{ steps.pr.outputs.base_ref }} (${{ steps.pr.outputs.base_sha }})"
# Build NEW image from PR branch
- name: Checkout PR branch (new)
uses: actions/checkout@v4
with:
ref: ${{ steps.pr.outputs.head_ref }}
path: new
- name: Build new image
run: |
cd new
docker build -t localhost/reloader:new -f Dockerfile .
echo "Built new image from ${{ steps.pr.outputs.head_ref }} (${{ steps.pr.outputs.head_sha }})"
# Build and run loadtest from PR branch
- name: Build loadtest tool
run: |
cd new/test/loadtest
go build -o loadtest ./cmd/loadtest
- name: Run A/B comparison load test
id: loadtest id: loadtest
run: | uses: ./.github/actions/loadtest
cd new/test/loadtest
./loadtest run \
--old-image=localhost/reloader:old \
--new-image=localhost/reloader:new \
--scenario=all \
--duration=60 2>&1 | tee loadtest-output.txt
echo "exitcode=${PIPESTATUS[0]}" >> $GITHUB_OUTPUT
- name: Upload results
uses: actions/upload-artifact@v4
if: always()
with: with:
name: loadtest-results old-ref: ${{ steps.pr.outputs.base_sha }}
path: | new-ref: ${{ steps.pr.outputs.head_sha }}
new/test/loadtest/results/ scenarios: 'all'
new/test/loadtest/loadtest-output.txt test-type: 'full'
retention-days: 30 post-comment: 'true'
pr-number: ${{ github.event.issue.number }}
- name: Post results comment comment-header: |
uses: actions/github-script@v7 ## Load Test Results (Full A/B Comparison)
if: always() **Comparing:** `${{ steps.pr.outputs.base_ref }}` → `${{ steps.pr.outputs.head_ref }}`
with: **Triggered by:** @${{ github.event.comment.user.login }}
script: |
const fs = require('fs');
let results = '';
const resultsDir = 'new/test/loadtest/results';
// Collect summary of all scenarios
let passCount = 0;
let failCount = 0;
const summaries = [];
if (fs.existsSync(resultsDir)) {
const scenarios = fs.readdirSync(resultsDir).sort();
for (const scenario of scenarios) {
const reportPath = `${resultsDir}/${scenario}/report.txt`;
if (fs.existsSync(reportPath)) {
const report = fs.readFileSync(reportPath, 'utf8');
// Extract status from report
const statusMatch = report.match(/Status:\s+(PASS|FAIL)/);
const status = statusMatch ? statusMatch[1] : 'UNKNOWN';
if (status === 'PASS') passCount++;
else failCount++;
// Extract key metrics for summary
const actionMatch = report.match(/action_total\s+[\d.]+\s+[\d.]+\s+[\d.]+/);
const errorsMatch = report.match(/errors_total\s+[\d.]+\s+[\d.]+/);
summaries.push(`| ${scenario} | ${status === 'PASS' ? '✅' : '❌'} ${status} |`);
results += `\n<details>\n<summary>${status === 'PASS' ? '✅' : '❌'} ${scenario}</summary>\n\n\`\`\`\n${report}\n\`\`\`\n</details>\n`;
}
}
}
if (!results) {
// Read raw output if no reports
if (fs.existsSync('new/test/loadtest/loadtest-output.txt')) {
const output = fs.readFileSync('new/test/loadtest/loadtest-output.txt', 'utf8');
const maxLen = 60000;
results = output.length > maxLen
? output.substring(output.length - maxLen)
: output;
results = `\`\`\`\n${results}\n\`\`\``;
} else {
results = 'No results available';
}
}
const overallStatus = failCount === 0 ? '✅ ALL PASSED' : `❌ ${failCount} FAILED`;
const body = [
`## Load Test Results ${overallStatus}`,
'',
`**Comparing:** \`${{ steps.pr.outputs.base_ref }}\` (old) vs \`${{ steps.pr.outputs.head_ref }}\` (new)`,
`**Old commit:** ${{ steps.pr.outputs.base_sha }}`,
`**New commit:** ${{ steps.pr.outputs.head_sha }}`,
`**Triggered by:** @${{ github.event.comment.user.login }}`,
'',
'### Summary',
'',
'| Scenario | Status |',
'|----------|--------|',
summaries.join('\n'),
'',
`**Total:** ${passCount} passed, ${failCount} failed`,
'',
'### Detailed Results',
'',
results,
'',
'<details>',
'<summary>📦 Download full results</summary>',
'',
`Artifacts are available in the [workflow run](https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}).`,
'</details>',
].join('\n');
await github.rest.issues.createComment({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: context.issue.number,
body: body
});
- name: Add success reaction - name: Add success reaction
if: success() if: steps.loadtest.outputs.status == 'pass'
uses: actions/github-script@v7 uses: actions/github-script@v7
with: with:
script: | script: |
@@ -222,7 +101,7 @@ jobs:
}); });
- name: Add failure reaction - name: Add failure reaction
if: failure() if: steps.loadtest.outputs.status == 'fail'
uses: actions/github-script@v7 uses: actions/github-script@v7
with: with:
script: | script: |

View File

@@ -35,6 +35,7 @@ jobs:
permissions: permissions:
contents: read contents: read
pull-requests: write
runs-on: ubuntu-latest runs-on: ubuntu-latest
name: Build name: Build
@@ -109,6 +110,17 @@ jobs:
- name: Test - name: Test
run: make test run: make test
- name: Run quick A/B load tests
uses: ./.github/actions/loadtest
with:
old-ref: ${{ github.event.pull_request.base.sha }}
# new-ref defaults to current checkout (PR branch)
scenarios: 'S1,S4,S6'
test-type: 'quick'
kind-cluster: 'kind' # Use the existing cluster created above
post-comment: 'true'
pr-number: ${{ github.event.pull_request.number }}
- name: Generate Tags - name: Generate Tags
id: generate_tag id: generate_tag
run: | run: |

View File

@@ -169,3 +169,43 @@ yq-install:
@curl -sL $(YQ_DOWNLOAD_URL) -o $(YQ_BIN) @curl -sL $(YQ_DOWNLOAD_URL) -o $(YQ_BIN)
@chmod +x $(YQ_BIN) @chmod +x $(YQ_BIN)
@echo "yq $(YQ_VERSION) installed at $(YQ_BIN)" @echo "yq $(YQ_VERSION) installed at $(YQ_BIN)"
# =============================================================================
# Load Testing
# =============================================================================
LOADTEST_BIN = test/loadtest/loadtest
LOADTEST_OLD_IMAGE ?= localhost/reloader:old
LOADTEST_NEW_IMAGE ?= localhost/reloader:new
LOADTEST_DURATION ?= 60
LOADTEST_SCENARIOS ?= all
.PHONY: loadtest-build loadtest-quick loadtest-full loadtest loadtest-clean
loadtest-build: ## Build loadtest binary
cd test/loadtest && $(GOCMD) build -o loadtest ./cmd/loadtest
loadtest-quick: loadtest-build ## Run quick load tests (S1, S4, S6)
cd test/loadtest && ./loadtest run \
--old-image=$(LOADTEST_OLD_IMAGE) \
--new-image=$(LOADTEST_NEW_IMAGE) \
--scenario=S1,S4,S6 \
--duration=$(LOADTEST_DURATION)
loadtest-full: loadtest-build ## Run full load test suite
cd test/loadtest && ./loadtest run \
--old-image=$(LOADTEST_OLD_IMAGE) \
--new-image=$(LOADTEST_NEW_IMAGE) \
--scenario=all \
--duration=$(LOADTEST_DURATION)
loadtest: loadtest-build ## Run load tests with configurable scenarios (default: all)
cd test/loadtest && ./loadtest run \
--old-image=$(LOADTEST_OLD_IMAGE) \
--new-image=$(LOADTEST_NEW_IMAGE) \
--scenario=$(LOADTEST_SCENARIOS) \
--duration=$(LOADTEST_DURATION)
loadtest-clean: ## Clean loadtest binary and results
rm -f $(LOADTEST_BIN)
rm -rf test/loadtest/results

View File

@@ -30,6 +30,15 @@ const (
testNamespace = "reloader-test" testNamespace = "reloader-test"
) )
// OutputFormat defines the output format for reports.
type OutputFormat string
const (
OutputFormatText OutputFormat = "text"
OutputFormatJSON OutputFormat = "json"
OutputFormatMarkdown OutputFormat = "markdown"
)
// workerContext holds all resources for a single worker (cluster + prometheus). // workerContext holds all resources for a single worker (cluster + prometheus).
type workerContext struct { type workerContext struct {
id int id int
@@ -47,6 +56,7 @@ type Config struct {
Scenario string Scenario string
Duration int Duration int
SkipCluster bool SkipCluster bool
ClusterName string // Custom cluster name (default: reloader-loadtest)
ResultsDir string ResultsDir string
ManifestsDir string ManifestsDir string
Parallelism int // Number of parallel clusters (1 = sequential) Parallelism int // Number of parallel clusters (1 = sequential)
@@ -64,6 +74,8 @@ func main() {
runCommand(os.Args[2:]) runCommand(os.Args[2:])
case "report": case "report":
reportCommand(os.Args[2:]) reportCommand(os.Args[2:])
case "summary":
summaryCommand(os.Args[2:])
case "help", "--help", "-h": case "help", "--help", "-h":
printUsage() printUsage()
default: default:
@@ -78,7 +90,8 @@ func printUsage() {
Usage: Usage:
loadtest run [options] Run A/B comparison tests loadtest run [options] Run A/B comparison tests
loadtest report [options] Generate comparison report loadtest report [options] Generate comparison report for a scenario
loadtest summary [options] Generate summary across all scenarios (for CI)
loadtest help Show this help loadtest help Show this help
Run Options: Run Options:
@@ -87,13 +100,21 @@ Run Options:
--scenario=ID Test scenario: S1-S13 or "all" (default: all) --scenario=ID Test scenario: S1-S13 or "all" (default: all)
--duration=SECONDS Test duration in seconds (default: 60) --duration=SECONDS Test duration in seconds (default: 60)
--parallelism=N Run N scenarios in parallel on N clusters (default: 1) --parallelism=N Run N scenarios in parallel on N clusters (default: 1)
--skip-cluster Skip kind cluster creation (use existing, only for parallelism=1) --skip-cluster Skip kind cluster creation (use existing)
--cluster-name=NAME Kind cluster name (default: reloader-loadtest)
--results-dir=DIR Directory for results (default: ./results) --results-dir=DIR Directory for results (default: ./results)
Report Options: Report Options:
--scenario=ID Scenario to report on (required) --scenario=ID Scenario to report on (required)
--results-dir=DIR Directory containing results (default: ./results) --results-dir=DIR Directory containing results (default: ./results)
--output=FILE Output file (default: stdout) --output=FILE Output file (default: stdout)
--format=FORMAT Output format: text, json, markdown (default: text)
Summary Options:
--results-dir=DIR Directory containing results (default: ./results)
--output=FILE Output file (default: stdout)
--format=FORMAT Output format: text, json, markdown (default: markdown)
--test-type=TYPE Test type label: quick, full (default: full)
Examples: Examples:
# Compare two images # Compare two images
@@ -111,8 +132,14 @@ Examples:
# Run all 13 scenarios in parallel (one cluster per scenario) # Run all 13 scenarios in parallel (one cluster per scenario)
loadtest run --new-image=localhost/reloader:test --parallelism=13 loadtest run --new-image=localhost/reloader:test --parallelism=13
# Generate report # Generate report for a scenario
loadtest report --scenario=S2 --results-dir=./results loadtest report --scenario=S2 --results-dir=./results
# Generate JSON report
loadtest report --scenario=S2 --format=json
# Generate markdown summary for CI
loadtest summary --results-dir=./results --format=markdown
`) `)
} }
@@ -122,6 +149,7 @@ func parseArgs(args []string) Config {
Duration: 60, Duration: 60,
ResultsDir: "./results", ResultsDir: "./results",
Parallelism: 1, Parallelism: 1,
ClusterName: clusterName, // default
} }
// Find manifests dir relative to executable or current dir // Find manifests dir relative to executable or current dir
@@ -151,6 +179,8 @@ func parseArgs(args []string) Config {
} }
case arg == "--skip-cluster": case arg == "--skip-cluster":
cfg.SkipCluster = true cfg.SkipCluster = true
case strings.HasPrefix(arg, "--cluster-name="):
cfg.ClusterName = strings.TrimPrefix(arg, "--cluster-name=")
case strings.HasPrefix(arg, "--results-dir="): case strings.HasPrefix(arg, "--results-dir="):
cfg.ResultsDir = strings.TrimPrefix(arg, "--results-dir=") cfg.ResultsDir = strings.TrimPrefix(arg, "--results-dir=")
case strings.HasPrefix(arg, "--manifests-dir="): case strings.HasPrefix(arg, "--manifests-dir="):
@@ -234,15 +264,15 @@ func runCommand(args []string) {
func runSequential(ctx context.Context, cfg Config, scenariosToRun []string, runtime string, runOld, runNew, runBoth bool) { func runSequential(ctx context.Context, cfg Config, scenariosToRun []string, runtime string, runOld, runNew, runBoth bool) {
// Create cluster manager // Create cluster manager
clusterMgr := cluster.NewManager(cluster.Config{ clusterMgr := cluster.NewManager(cluster.Config{
Name: clusterName, Name: cfg.ClusterName,
ContainerRuntime: runtime, ContainerRuntime: runtime,
}) })
// Create/verify cluster // Create/verify cluster
if cfg.SkipCluster { if cfg.SkipCluster {
log.Println("Skipping cluster creation (using existing)") log.Printf("Skipping cluster creation (using existing cluster: %s)", cfg.ClusterName)
if !clusterMgr.Exists() { if !clusterMgr.Exists() {
log.Fatalf("Cluster %s does not exist. Remove --skip-cluster to create it.", clusterName) log.Fatalf("Cluster %s does not exist. Remove --skip-cluster to create it.", cfg.ClusterName)
} }
} else { } else {
log.Println("Creating kind cluster...") log.Println("Creating kind cluster...")
@@ -781,6 +811,7 @@ func cleanupReloader(ctx context.Context, version string, kubeContext string) {
func reportCommand(args []string) { func reportCommand(args []string) {
var scenarioID, resultsDir, outputFile string var scenarioID, resultsDir, outputFile string
format := OutputFormatText
resultsDir = "./results" resultsDir = "./results"
for _, arg := range args { for _, arg := range args {
@@ -791,6 +822,8 @@ func reportCommand(args []string) {
resultsDir = strings.TrimPrefix(arg, "--results-dir=") resultsDir = strings.TrimPrefix(arg, "--results-dir=")
case strings.HasPrefix(arg, "--output="): case strings.HasPrefix(arg, "--output="):
outputFile = strings.TrimPrefix(arg, "--output=") outputFile = strings.TrimPrefix(arg, "--output=")
case strings.HasPrefix(arg, "--format="):
format = OutputFormat(strings.TrimPrefix(arg, "--format="))
} }
} }
@@ -803,7 +836,15 @@ func reportCommand(args []string) {
log.Fatalf("Failed to generate report: %v", err) log.Fatalf("Failed to generate report: %v", err)
} }
output := renderScenarioReport(report) var output string
switch format {
case OutputFormatJSON:
output = renderScenarioReportJSON(report)
case OutputFormatMarkdown:
output = renderScenarioReportMarkdown(report)
default:
output = renderScenarioReport(report)
}
if outputFile != "" { if outputFile != "" {
if err := os.WriteFile(outputFile, []byte(output), 0644); err != nil { if err := os.WriteFile(outputFile, []byte(output), 0644); err != nil {
@@ -1584,3 +1625,302 @@ func renderScenarioReport(report *ScenarioReport) string {
return sb.String() return sb.String()
} }
// renderScenarioReportJSON renders a scenario report as JSON.
func renderScenarioReportJSON(report *ScenarioReport) string {
data, err := json.MarshalIndent(report, "", " ")
if err != nil {
return fmt.Sprintf(`{"error": "%s"}`, err.Error())
}
return string(data)
}
// renderScenarioReportMarkdown renders a scenario report as concise markdown.
func renderScenarioReportMarkdown(report *ScenarioReport) string {
var sb strings.Builder
// Status emoji
emoji := "✅"
if report.OverallStatus != "PASS" {
emoji = "❌"
}
sb.WriteString(fmt.Sprintf("## %s %s: %s\n\n", emoji, report.Scenario, report.OverallStatus))
if report.TestDescription != "" {
sb.WriteString(fmt.Sprintf("> %s\n\n", report.TestDescription))
}
// Key metrics table
sb.WriteString("| Metric | Value | Expected | Status |\n")
sb.WriteString("|--------|------:|:--------:|:------:|\n")
// Show only key metrics
keyMetrics := []string{"action_total", "reload_executed_total", "errors_total", "reconcile_total"}
for _, name := range keyMetrics {
for _, c := range report.Comparisons {
if c.Name == name {
value := fmt.Sprintf("%.0f", c.NewValue)
expected := "-"
if c.Expected > 0 {
expected = fmt.Sprintf("%.0f", c.Expected)
}
status := "✅"
if c.Status == "fail" {
status = "❌"
} else if c.Status == "info" {
status = ""
}
sb.WriteString(fmt.Sprintf("| %s | %s | %s | %s |\n", c.DisplayName, value, expected, status))
break
}
}
}
return sb.String()
}
// ============================================================================
// SUMMARY COMMAND
// ============================================================================
// SummaryReport aggregates results from multiple scenarios.
type SummaryReport struct {
Timestamp time.Time `json:"timestamp"`
TestType string `json:"test_type"`
PassCount int `json:"pass_count"`
FailCount int `json:"fail_count"`
TotalCount int `json:"total_count"`
Scenarios []ScenarioSummary `json:"scenarios"`
}
// ScenarioSummary provides a brief summary of a single scenario.
type ScenarioSummary struct {
ID string `json:"id"`
Status string `json:"status"`
Description string `json:"description"`
ActionTotal float64 `json:"action_total"`
ActionExp float64 `json:"action_expected"`
ErrorsTotal float64 `json:"errors_total"`
}
func summaryCommand(args []string) {
var resultsDir, outputFile, testType string
format := OutputFormatMarkdown // Default to markdown for CI
resultsDir = "./results"
testType = "full"
for _, arg := range args {
switch {
case strings.HasPrefix(arg, "--results-dir="):
resultsDir = strings.TrimPrefix(arg, "--results-dir=")
case strings.HasPrefix(arg, "--output="):
outputFile = strings.TrimPrefix(arg, "--output=")
case strings.HasPrefix(arg, "--format="):
format = OutputFormat(strings.TrimPrefix(arg, "--format="))
case strings.HasPrefix(arg, "--test-type="):
testType = strings.TrimPrefix(arg, "--test-type=")
}
}
summary, err := generateSummaryReport(resultsDir, testType)
if err != nil {
log.Fatalf("Failed to generate summary: %v", err)
}
var output string
switch format {
case OutputFormatJSON:
output = renderSummaryJSON(summary)
case OutputFormatText:
output = renderSummaryText(summary)
default:
output = renderSummaryMarkdown(summary)
}
if outputFile != "" {
if err := os.WriteFile(outputFile, []byte(output), 0644); err != nil {
log.Fatalf("Failed to write output file: %v", err)
}
log.Printf("Summary written to %s", outputFile)
} else {
fmt.Print(output)
}
// Exit with non-zero status if any tests failed
if summary.FailCount > 0 {
os.Exit(1)
}
}
func generateSummaryReport(resultsDir, testType string) (*SummaryReport, error) {
summary := &SummaryReport{
Timestamp: time.Now(),
TestType: testType,
}
// Find all scenario directories
entries, err := os.ReadDir(resultsDir)
if err != nil {
return nil, fmt.Errorf("failed to read results directory: %w", err)
}
for _, entry := range entries {
if !entry.IsDir() || !strings.HasPrefix(entry.Name(), "S") {
continue
}
scenarioID := entry.Name()
report, err := generateScenarioReport(scenarioID, resultsDir)
if err != nil {
log.Printf("Warning: failed to load scenario %s: %v", scenarioID, err)
continue
}
scenarioSummary := ScenarioSummary{
ID: scenarioID,
Status: report.OverallStatus,
Description: report.TestDescription,
}
// Extract key metrics
for _, c := range report.Comparisons {
switch c.Name {
case "action_total":
scenarioSummary.ActionTotal = c.NewValue
scenarioSummary.ActionExp = c.Expected
case "errors_total":
scenarioSummary.ErrorsTotal = c.NewValue
}
}
summary.Scenarios = append(summary.Scenarios, scenarioSummary)
summary.TotalCount++
if report.OverallStatus == "PASS" {
summary.PassCount++
} else {
summary.FailCount++
}
}
// Sort scenarios by ID
sort.Slice(summary.Scenarios, func(i, j int) bool {
return naturalSort(summary.Scenarios[i].ID, summary.Scenarios[j].ID)
})
return summary, nil
}
// naturalSort compares two scenario IDs (S1, S2, ..., S10, S11)
func naturalSort(a, b string) bool {
var aNum, bNum int
fmt.Sscanf(a, "S%d", &aNum)
fmt.Sscanf(b, "S%d", &bNum)
return aNum < bNum
}
func renderSummaryJSON(summary *SummaryReport) string {
data, err := json.MarshalIndent(summary, "", " ")
if err != nil {
return fmt.Sprintf(`{"error": "%s"}`, err.Error())
}
return string(data)
}
func renderSummaryText(summary *SummaryReport) string {
var sb strings.Builder
sb.WriteString("================================================================================\n")
sb.WriteString(" LOAD TEST SUMMARY\n")
sb.WriteString("================================================================================\n\n")
passRate := 0
if summary.TotalCount > 0 {
passRate = summary.PassCount * 100 / summary.TotalCount
}
fmt.Fprintf(&sb, "Test Type: %s\n", summary.TestType)
fmt.Fprintf(&sb, "Results: %d/%d passed (%d%%)\n\n", summary.PassCount, summary.TotalCount, passRate)
fmt.Fprintf(&sb, "%-6s %-8s %-45s %10s %8s\n", "ID", "Status", "Description", "Actions", "Errors")
fmt.Fprintf(&sb, "%-6s %-8s %-45s %10s %8s\n", "------", "--------", strings.Repeat("-", 45), "----------", "--------")
for _, s := range summary.Scenarios {
desc := s.Description
if len(desc) > 45 {
desc = desc[:42] + "..."
}
actions := fmt.Sprintf("%.0f", s.ActionTotal)
if s.ActionExp > 0 {
actions = fmt.Sprintf("%.0f/%.0f", s.ActionTotal, s.ActionExp)
}
fmt.Fprintf(&sb, "%-6s %-8s %-45s %10s %8.0f\n", s.ID, s.Status, desc, actions, s.ErrorsTotal)
}
sb.WriteString("\n================================================================================\n")
return sb.String()
}
func renderSummaryMarkdown(summary *SummaryReport) string {
var sb strings.Builder
// Overall status
emoji := "✅"
title := "ALL TESTS PASSED"
if summary.FailCount > 0 {
emoji = "❌"
title = fmt.Sprintf("%d TEST(S) FAILED", summary.FailCount)
} else if summary.TotalCount == 0 {
emoji = "⚠️"
title = "NO RESULTS"
}
sb.WriteString(fmt.Sprintf("## %s Load Test Results: %s\n\n", emoji, title))
// Test type note
if summary.TestType == "quick" {
sb.WriteString("> 🚀 **Quick Test** (S1, S4, S6) — Use `/loadtest` for full suite\n\n")
}
// Pass rate
passRate := 0
if summary.TotalCount > 0 {
passRate = summary.PassCount * 100 / summary.TotalCount
}
sb.WriteString(fmt.Sprintf("**%d/%d passed** (%d%%)\n\n", summary.PassCount, summary.TotalCount, passRate))
// Results table
sb.WriteString("| | Scenario | Description | Actions | Errors |\n")
sb.WriteString("|:-:|:--------:|-------------|:-------:|:------:|\n")
for _, s := range summary.Scenarios {
icon := "✅"
if s.Status != "PASS" {
icon = "❌"
}
// Truncate description
desc := s.Description
if len(desc) > 45 {
desc = desc[:42] + "..."
}
// Format actions
actions := fmt.Sprintf("%.0f", s.ActionTotal)
if s.ActionExp > 0 {
actions = fmt.Sprintf("%.0f/%.0f", s.ActionTotal, s.ActionExp)
}
// Format errors
errors := fmt.Sprintf("%.0f", s.ErrorsTotal)
if s.ErrorsTotal > 0 {
errors = fmt.Sprintf("⚠️ %.0f", s.ErrorsTotal)
}
sb.WriteString(fmt.Sprintf("| %s | **%s** | %s | %s | %s |\n", icon, s.ID, desc, actions, errors))
}
sb.WriteString("\n📦 **[Download detailed results](../artifacts)**\n")
return sb.String()
}