mirror of
https://github.com/stakater/Reloader.git
synced 2026-02-14 18:09:50 +00:00
Compare commits
220 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
9aa957c839 | ||
|
|
20e2680539 | ||
|
|
f51b62dee9 | ||
|
|
f776e2dfd0 | ||
|
|
003fbbfa1f | ||
|
|
8834ab097d | ||
|
|
3ee87d3725 | ||
|
|
5c3593fb1c | ||
|
|
8537502bbd | ||
|
|
fd5f03adfb | ||
|
|
32899e1983 | ||
|
|
6c15e5db24 | ||
|
|
4a95a813cd | ||
|
|
3dd2741102 | ||
|
|
16ff7f6ac9 | ||
|
|
1be910749b | ||
|
|
1945a740d0 | ||
|
|
07f7365d63 | ||
|
|
ad6013adbf | ||
|
|
a132ed8dea | ||
|
|
2674f405ce | ||
|
|
e56323d582 | ||
|
|
c4f3255c78 | ||
|
|
2442eddd81 | ||
|
|
76287e0420 | ||
|
|
322c4bc130 | ||
|
|
958c6c2be7 | ||
|
|
922cac120a | ||
|
|
b945e5e828 | ||
|
|
1652c62775 | ||
|
|
193f64c0ec | ||
|
|
f7210204d4 | ||
|
|
eb3bc2447e | ||
|
|
27f49ecc0f | ||
|
|
8373b1e76c | ||
|
|
a419b07e02 | ||
|
|
fdd2474b3f | ||
|
|
4c0883b4cf | ||
|
|
157cf0f2e4 | ||
|
|
6fd7c8254a | ||
|
|
703319e732 | ||
|
|
b0ca635e49 | ||
|
|
8b64c9b9cd | ||
|
|
e1db875efe | ||
|
|
5b63610f4f | ||
|
|
512278d740 | ||
|
|
9a3edf13d2 | ||
|
|
eb38bf7470 | ||
|
|
4b90335362 | ||
|
|
7e9d571e1e | ||
|
|
0f1d02e975 | ||
|
|
85f1c13de9 | ||
|
|
9c8c511ae5 | ||
|
|
109971d8b7 | ||
|
|
c9cab4f6e0 | ||
|
|
eb96bab4e0 | ||
|
|
779c3b0895 | ||
|
|
a5d1012570 | ||
|
|
ebac11f904 | ||
|
|
0505dfb7c2 | ||
|
|
232bbdc68c | ||
|
|
8545fe8d8d | ||
|
|
32ac22dc33 | ||
|
|
3afe895045 | ||
|
|
64a18ff207 | ||
|
|
b71fb19882 | ||
|
|
27fb47ff52 | ||
|
|
04dec609f0 | ||
|
|
1725f17b0b | ||
|
|
4f8b22e954 | ||
|
|
cdaed4a8af | ||
|
|
d409b79a11 | ||
|
|
c3546066fa | ||
|
|
7080ec27cc | ||
|
|
6f1ecffb25 | ||
|
|
765053f21e | ||
|
|
fd9b7e2c1f | ||
|
|
bfb720e9e9 | ||
|
|
e9114d3455 | ||
|
|
10b3e077b5 | ||
|
|
9607da6d8a | ||
|
|
32046ebfe0 | ||
|
|
5d4b9f5a32 | ||
|
|
620959a03b | ||
|
|
008c45e9ac | ||
|
|
fa201d9762 | ||
|
|
174b57cdad | ||
|
|
4476fad274 | ||
|
|
16b26be5c2 | ||
|
|
7c429714ae | ||
|
|
64c3d8487b | ||
|
|
405069e691 | ||
|
|
4694b7570e | ||
|
|
3a9ca713bb | ||
|
|
3de9c688f2 | ||
|
|
90b9713b7f | ||
|
|
9139f838cf | ||
|
|
59738b2d6d | ||
|
|
91bdb47dad | ||
|
|
2835e5952f | ||
|
|
cadf4489e8 | ||
|
|
32f83fabc9 | ||
|
|
09f2a63b00 | ||
|
|
c860dcc402 | ||
|
|
5f2cf19213 | ||
|
|
8980e1fd80 | ||
|
|
644e5d51d3 | ||
|
|
65dc259b7b | ||
|
|
3cf845b596 | ||
|
|
9af46a363c | ||
|
|
999141df8c | ||
|
|
e99bb34451 | ||
|
|
196373a688 | ||
|
|
c3022c1255 | ||
|
|
c988b77933 | ||
|
|
e3e7cef752 | ||
|
|
f7d4fca874 | ||
|
|
956b3934da | ||
|
|
39352e4f4d | ||
|
|
a43dcc7b85 | ||
|
|
0078e3f814 | ||
|
|
acaa00e256 | ||
|
|
dffed992d6 | ||
|
|
eff894e919 | ||
|
|
6d640e2ca1 | ||
|
|
11c99a7c13 | ||
|
|
03c3f5947b | ||
|
|
1084574bd0 | ||
|
|
3103e5ac4d | ||
|
|
a77c10a2c6 | ||
|
|
bd767a7ef1 | ||
|
|
3a1cc8f348 | ||
|
|
dd0807e951 | ||
|
|
b8edc25177 | ||
|
|
f9d658d3b4 | ||
|
|
816ad6d430 | ||
|
|
19a76258d0 | ||
|
|
aa481d9568 | ||
|
|
177d2756a8 | ||
|
|
9b2af6f9b7 | ||
|
|
7c4899a7eb | ||
|
|
54d44858f8 | ||
|
|
6304a9e5ab | ||
|
|
1e6a6ec2d9 | ||
|
|
42cd7e71a2 | ||
|
|
1107fee109 | ||
|
|
9e33dac9ef | ||
|
|
517fd33fb1 | ||
|
|
1e46d44c7c | ||
|
|
49409dce54 | ||
|
|
9039956c32 | ||
|
|
82eb8d8b87 | ||
|
|
7af0728990 | ||
|
|
d9b36a56b5 | ||
|
|
dcf4b0d0f6 | ||
|
|
b8b7cdb610 | ||
|
|
d2580930e4 | ||
|
|
e0150145ec | ||
|
|
52ac7e307d | ||
|
|
e311fe2fff | ||
|
|
dec3410b7f | ||
|
|
1e1f094516 | ||
|
|
a9566fa672 | ||
|
|
51ee0a19bb | ||
|
|
19918e6fa8 | ||
|
|
95cea97d34 | ||
|
|
d22a0f25de | ||
|
|
2d2c35fcf4 | ||
|
|
cf600f7761 | ||
|
|
01f62cf823 | ||
|
|
85bd7a075c | ||
|
|
3999edb7a3 | ||
|
|
f69773c588 | ||
|
|
8b257a3f0c | ||
|
|
93936d12c1 | ||
|
|
2d810f9824 | ||
|
|
99c45b3ca3 | ||
|
|
81315adc9b | ||
|
|
ad0407517d | ||
|
|
cafa14f0e7 | ||
|
|
5089955691 | ||
|
|
379c428283 | ||
|
|
404c3700f7 | ||
|
|
df812c555c | ||
|
|
e8cb97a6d1 | ||
|
|
58ad781c0c | ||
|
|
be09beac29 | ||
|
|
516f9e8bc5 | ||
|
|
e69ea41ece | ||
|
|
c2107cccbb | ||
|
|
7ea10b48ec | ||
|
|
a6abbd278c | ||
|
|
70e58394d1 | ||
|
|
c807e6deaf | ||
|
|
52815a4ee1 | ||
|
|
4679d21e26 | ||
|
|
172de75f01 | ||
|
|
4eaaf2da79 | ||
|
|
144cc910af | ||
|
|
6faffdc0cf | ||
|
|
c5481c6e7b | ||
|
|
a47d927422 | ||
|
|
70e0598833 | ||
|
|
85bea39568 | ||
|
|
97e74ad11b | ||
|
|
9c77e27b2c | ||
|
|
ad8e6f78a0 | ||
|
|
93ba31821e | ||
|
|
5139d65f9c | ||
|
|
023425d4e1 | ||
|
|
8c2f2e574c | ||
|
|
b6d538dca8 | ||
|
|
e7e095cb4b | ||
|
|
570649e56b | ||
|
|
69b0d93f31 | ||
|
|
717291f173 | ||
|
|
75f9a23de3 | ||
|
|
3c39406ca9 | ||
|
|
6d1d017aa4 | ||
|
|
f6887b4d8a |
267
.github/actions/loadtest/action.yml
vendored
Normal file
267
.github/actions/loadtest/action.yml
vendored
Normal file
@@ -0,0 +1,267 @@
|
||||
name: 'Reloader Load Test'
|
||||
description: 'Run Reloader load tests with A/B comparison support'
|
||||
|
||||
inputs:
|
||||
old-ref:
|
||||
description: 'Git ref for "old" version (optional, enables A/B comparison)'
|
||||
required: false
|
||||
default: ''
|
||||
new-ref:
|
||||
description: 'Git ref for "new" version (defaults to current checkout)'
|
||||
required: false
|
||||
default: ''
|
||||
old-image:
|
||||
description: 'Pre-built container image for "old" version (alternative to old-ref)'
|
||||
required: false
|
||||
default: ''
|
||||
new-image:
|
||||
description: 'Pre-built container image for "new" version (alternative to new-ref)'
|
||||
required: false
|
||||
default: ''
|
||||
scenarios:
|
||||
description: 'Scenarios to run: S1,S4,S6 or all'
|
||||
required: false
|
||||
default: 'S1,S4,S6'
|
||||
test-type:
|
||||
description: 'Test type label for summary: quick or full'
|
||||
required: false
|
||||
default: 'quick'
|
||||
duration:
|
||||
description: 'Test duration in seconds'
|
||||
required: false
|
||||
default: '60'
|
||||
kind-cluster:
|
||||
description: 'Name of existing Kind cluster (if empty, creates new one)'
|
||||
required: false
|
||||
default: ''
|
||||
post-comment:
|
||||
description: 'Post results as PR comment'
|
||||
required: false
|
||||
default: 'false'
|
||||
pr-number:
|
||||
description: 'PR number for commenting (required if post-comment is true)'
|
||||
required: false
|
||||
default: ''
|
||||
github-token:
|
||||
description: 'GitHub token for posting comments'
|
||||
required: false
|
||||
default: ${{ github.token }}
|
||||
comment-header:
|
||||
description: 'Optional header text for the comment'
|
||||
required: false
|
||||
default: ''
|
||||
|
||||
outputs:
|
||||
status:
|
||||
description: 'Overall test status: pass or fail'
|
||||
value: ${{ steps.run.outputs.status }}
|
||||
summary:
|
||||
description: 'Markdown summary of results'
|
||||
value: ${{ steps.summary.outputs.summary }}
|
||||
pass-count:
|
||||
description: 'Number of passed scenarios'
|
||||
value: ${{ steps.summary.outputs.pass_count }}
|
||||
fail-count:
|
||||
description: 'Number of failed scenarios'
|
||||
value: ${{ steps.summary.outputs.fail_count }}
|
||||
|
||||
runs:
|
||||
using: 'composite'
|
||||
steps:
|
||||
- name: Determine images to use
|
||||
id: images
|
||||
shell: bash
|
||||
run: |
|
||||
# Determine old image
|
||||
if [ -n "${{ inputs.old-image }}" ]; then
|
||||
echo "old=${{ inputs.old-image }}" >> $GITHUB_OUTPUT
|
||||
elif [ -n "${{ inputs.old-ref }}" ]; then
|
||||
echo "old=localhost/reloader:old" >> $GITHUB_OUTPUT
|
||||
echo "build_old=true" >> $GITHUB_OUTPUT
|
||||
else
|
||||
echo "old=" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
|
||||
# Determine new image
|
||||
if [ -n "${{ inputs.new-image }}" ]; then
|
||||
echo "new=${{ inputs.new-image }}" >> $GITHUB_OUTPUT
|
||||
elif [ -n "${{ inputs.new-ref }}" ]; then
|
||||
echo "new=localhost/reloader:new" >> $GITHUB_OUTPUT
|
||||
echo "build_new=true" >> $GITHUB_OUTPUT
|
||||
else
|
||||
# Default: build from current checkout
|
||||
echo "new=localhost/reloader:new" >> $GITHUB_OUTPUT
|
||||
echo "build_new_current=true" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
|
||||
- name: Build old image from ref
|
||||
if: steps.images.outputs.build_old == 'true'
|
||||
shell: bash
|
||||
run: |
|
||||
CURRENT_SHA=$(git rev-parse HEAD)
|
||||
git checkout ${{ inputs.old-ref }}
|
||||
docker build -t localhost/reloader:old .
|
||||
echo "Built old image from ref: ${{ inputs.old-ref }}"
|
||||
git checkout $CURRENT_SHA
|
||||
|
||||
- name: Build new image from ref
|
||||
if: steps.images.outputs.build_new == 'true'
|
||||
shell: bash
|
||||
run: |
|
||||
CURRENT_SHA=$(git rev-parse HEAD)
|
||||
git checkout ${{ inputs.new-ref }}
|
||||
docker build -t localhost/reloader:new .
|
||||
echo "Built new image from ref: ${{ inputs.new-ref }}"
|
||||
git checkout $CURRENT_SHA
|
||||
|
||||
- name: Build new image from current checkout
|
||||
if: steps.images.outputs.build_new_current == 'true'
|
||||
shell: bash
|
||||
run: |
|
||||
docker build -t localhost/reloader:new .
|
||||
echo "Built new image from current checkout"
|
||||
|
||||
- name: Build loadtest binary
|
||||
shell: bash
|
||||
run: |
|
||||
cd ${{ github.workspace }}/test/loadtest
|
||||
go build -o loadtest ./cmd/loadtest
|
||||
|
||||
- name: Determine cluster name
|
||||
id: cluster
|
||||
shell: bash
|
||||
run: |
|
||||
if [ -n "${{ inputs.kind-cluster }}" ]; then
|
||||
echo "name=${{ inputs.kind-cluster }}" >> $GITHUB_OUTPUT
|
||||
echo "skip=true" >> $GITHUB_OUTPUT
|
||||
else
|
||||
echo "name=reloader-loadtest" >> $GITHUB_OUTPUT
|
||||
echo "skip=false" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
|
||||
- name: Load images into Kind
|
||||
shell: bash
|
||||
run: |
|
||||
CLUSTER="${{ steps.cluster.outputs.name }}"
|
||||
|
||||
if [ -n "${{ steps.images.outputs.old }}" ]; then
|
||||
echo "Loading old image: ${{ steps.images.outputs.old }}"
|
||||
kind load docker-image "${{ steps.images.outputs.old }}" --name "$CLUSTER" || true
|
||||
fi
|
||||
|
||||
echo "Loading new image: ${{ steps.images.outputs.new }}"
|
||||
kind load docker-image "${{ steps.images.outputs.new }}" --name "$CLUSTER" || true
|
||||
|
||||
- name: Run load tests
|
||||
id: run
|
||||
shell: bash
|
||||
run: |
|
||||
cd ${{ github.workspace }}/test/loadtest
|
||||
|
||||
ARGS="--new-image=${{ steps.images.outputs.new }}"
|
||||
ARGS="$ARGS --scenario=${{ inputs.scenarios }}"
|
||||
ARGS="$ARGS --duration=${{ inputs.duration }}"
|
||||
ARGS="$ARGS --cluster-name=${{ steps.cluster.outputs.name }}"
|
||||
ARGS="$ARGS --skip-image-load"
|
||||
|
||||
if [ -n "${{ steps.images.outputs.old }}" ]; then
|
||||
ARGS="$ARGS --old-image=${{ steps.images.outputs.old }}"
|
||||
fi
|
||||
|
||||
if [ "${{ steps.cluster.outputs.skip }}" = "true" ]; then
|
||||
ARGS="$ARGS --skip-cluster"
|
||||
fi
|
||||
|
||||
echo "Running: ./loadtest run $ARGS"
|
||||
if ./loadtest run $ARGS; then
|
||||
echo "status=pass" >> $GITHUB_OUTPUT
|
||||
else
|
||||
echo "status=fail" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
|
||||
- name: Generate summary
|
||||
id: summary
|
||||
shell: bash
|
||||
run: |
|
||||
cd ${{ github.workspace }}/test/loadtest
|
||||
|
||||
# Generate markdown summary
|
||||
./loadtest summary \
|
||||
--results-dir=./results \
|
||||
--test-type=${{ inputs.test-type }} \
|
||||
--format=markdown > summary.md 2>/dev/null || true
|
||||
|
||||
# Output to GitHub Step Summary
|
||||
cat summary.md >> $GITHUB_STEP_SUMMARY
|
||||
|
||||
# Store summary for output (using heredoc for multiline)
|
||||
{
|
||||
echo 'summary<<EOF'
|
||||
cat summary.md
|
||||
echo 'EOF'
|
||||
} >> $GITHUB_OUTPUT
|
||||
|
||||
# Get pass/fail counts from JSON
|
||||
COUNTS=$(./loadtest summary --format=json 2>/dev/null | head -20 || echo '{}')
|
||||
echo "pass_count=$(echo "$COUNTS" | grep -o '"pass_count": [0-9]*' | grep -o '[0-9]*' || echo 0)" >> $GITHUB_OUTPUT
|
||||
echo "fail_count=$(echo "$COUNTS" | grep -o '"fail_count": [0-9]*' | grep -o '[0-9]*' || echo 0)" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Post PR comment
|
||||
if: inputs.post-comment == 'true' && inputs.pr-number != ''
|
||||
continue-on-error: true
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
github-token: ${{ inputs.github-token }}
|
||||
script: |
|
||||
const fs = require('fs');
|
||||
const summaryPath = '${{ github.workspace }}/test/loadtest/summary.md';
|
||||
let summary = 'No results available';
|
||||
try {
|
||||
summary = fs.readFileSync(summaryPath, 'utf8');
|
||||
} catch (e) {
|
||||
console.log('Could not read summary file:', e.message);
|
||||
}
|
||||
|
||||
const header = '${{ inputs.comment-header }}';
|
||||
const status = '${{ steps.run.outputs.status }}';
|
||||
const statusEmoji = status === 'pass' ? ':white_check_mark:' : ':x:';
|
||||
|
||||
const body = [
|
||||
header ? header : `## ${statusEmoji} Load Test Results (${{ inputs.test-type }})`,
|
||||
'',
|
||||
summary,
|
||||
'',
|
||||
'---',
|
||||
`**Artifacts:** [Download](https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }})`,
|
||||
].join('\n');
|
||||
|
||||
try {
|
||||
await github.rest.issues.createComment({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
issue_number: ${{ inputs.pr-number }},
|
||||
body: body
|
||||
});
|
||||
console.log('Comment posted successfully');
|
||||
} catch (error) {
|
||||
if (error.status === 403) {
|
||||
console.log('Could not post comment (fork PR with restricted permissions). Use /loadtest command to run with comment posting.');
|
||||
} else {
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
- name: Upload results
|
||||
uses: actions/upload-artifact@v4
|
||||
if: always()
|
||||
with:
|
||||
name: loadtest-${{ inputs.test-type }}-results
|
||||
path: |
|
||||
${{ github.workspace }}/test/loadtest/results/
|
||||
retention-days: 30
|
||||
|
||||
- name: Cleanup Kind cluster (only if we created it)
|
||||
if: always() && steps.cluster.outputs.skip == 'false'
|
||||
shell: bash
|
||||
run: |
|
||||
kind delete cluster --name ${{ steps.cluster.outputs.name }} || true
|
||||
3
.github/md_config.json
vendored
3
.github/md_config.json
vendored
@@ -3,5 +3,6 @@
|
||||
{
|
||||
"pattern": "^(?!http).+"
|
||||
}
|
||||
]
|
||||
],
|
||||
"retryOn429": true
|
||||
}
|
||||
|
||||
4
.github/workflows/init-branch-release.yaml
vendored
4
.github/workflows/init-branch-release.yaml
vendored
@@ -23,7 +23,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4.2.2
|
||||
uses: actions/checkout@v5.0.0
|
||||
with:
|
||||
fetch-depth: 0
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
@@ -57,7 +57,7 @@ jobs:
|
||||
git diff
|
||||
|
||||
- name: Create pull request
|
||||
uses: peter-evans/create-pull-request@v7.0.6
|
||||
uses: peter-evans/create-pull-request@v7.0.8
|
||||
with:
|
||||
commit-message: "Bump version to ${{ inputs.TARGET_VERSION }}"
|
||||
title: "Bump version to ${{ inputs.TARGET_VERSION }} on ${{ inputs.TARGET_BRANCH }} branch"
|
||||
|
||||
112
.github/workflows/loadtest.yml
vendored
Normal file
112
.github/workflows/loadtest.yml
vendored
Normal file
@@ -0,0 +1,112 @@
|
||||
name: Load Test (Full)
|
||||
|
||||
on:
|
||||
issue_comment:
|
||||
types: [created]
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
pull-requests: write
|
||||
issues: write
|
||||
|
||||
jobs:
|
||||
loadtest:
|
||||
# Only run on PR comments with /loadtest command
|
||||
if: |
|
||||
github.event.issue.pull_request &&
|
||||
contains(github.event.comment.body, '/loadtest')
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Add reaction to comment
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
script: |
|
||||
await github.rest.reactions.createForIssueComment({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
comment_id: context.payload.comment.id,
|
||||
content: 'rocket'
|
||||
});
|
||||
|
||||
- name: Get PR details
|
||||
id: pr
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
script: |
|
||||
const pr = await github.rest.pulls.get({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
pull_number: context.issue.number
|
||||
});
|
||||
core.setOutput('head_ref', pr.data.head.ref);
|
||||
core.setOutput('head_sha', pr.data.head.sha);
|
||||
core.setOutput('base_ref', pr.data.base.ref);
|
||||
core.setOutput('base_sha', pr.data.base.sha);
|
||||
console.log(`PR #${context.issue.number}: ${pr.data.head.ref} -> ${pr.data.base.ref}`);
|
||||
|
||||
- name: Checkout PR branch
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ steps.pr.outputs.head_sha }}
|
||||
fetch-depth: 0 # Full history for building from base ref
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: '1.26'
|
||||
cache: false
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Install kind
|
||||
run: |
|
||||
curl -Lo ./kind https://kind.sigs.k8s.io/dl/v0.20.0/kind-linux-amd64
|
||||
chmod +x ./kind
|
||||
sudo mv ./kind /usr/local/bin/kind
|
||||
|
||||
- name: Install kubectl
|
||||
run: |
|
||||
curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl"
|
||||
chmod +x kubectl
|
||||
sudo mv kubectl /usr/local/bin/kubectl
|
||||
|
||||
- name: Run full A/B comparison load test
|
||||
id: loadtest
|
||||
uses: ./.github/actions/loadtest
|
||||
with:
|
||||
old-ref: ${{ steps.pr.outputs.base_sha }}
|
||||
new-ref: ${{ steps.pr.outputs.head_sha }}
|
||||
scenarios: 'all'
|
||||
test-type: 'full'
|
||||
post-comment: 'true'
|
||||
pr-number: ${{ github.event.issue.number }}
|
||||
comment-header: |
|
||||
## Load Test Results (Full A/B Comparison)
|
||||
**Comparing:** `${{ steps.pr.outputs.base_ref }}` → `${{ steps.pr.outputs.head_ref }}`
|
||||
**Triggered by:** @${{ github.event.comment.user.login }}
|
||||
|
||||
- name: Add success reaction
|
||||
if: steps.loadtest.outputs.status == 'pass'
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
script: |
|
||||
await github.rest.reactions.createForIssueComment({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
comment_id: context.payload.comment.id,
|
||||
content: '+1'
|
||||
});
|
||||
|
||||
- name: Add failure reaction
|
||||
if: steps.loadtest.outputs.status == 'fail'
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
script: |
|
||||
await github.rest.reactions.createForIssueComment({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
comment_id: context.payload.comment.id,
|
||||
content: '-1'
|
||||
});
|
||||
4
.github/workflows/pull_request-helm.yaml
vendored
4
.github/workflows/pull_request-helm.yaml
vendored
@@ -26,7 +26,7 @@ jobs:
|
||||
steps:
|
||||
|
||||
- name: Check out code
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v5
|
||||
with:
|
||||
ref: ${{github.event.pull_request.head.sha}}
|
||||
fetch-depth: 0
|
||||
@@ -55,7 +55,7 @@ jobs:
|
||||
steps:
|
||||
|
||||
- name: Check out code
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v5
|
||||
with:
|
||||
ref: ${{github.event.pull_request.head.sha}}
|
||||
fetch-depth: 0
|
||||
|
||||
44
.github/workflows/pull_request.yaml
vendored
44
.github/workflows/pull_request.yaml
vendored
@@ -22,10 +22,11 @@ env:
|
||||
KUBERNETES_VERSION: "1.30.0"
|
||||
KIND_VERSION: "0.23.0"
|
||||
REGISTRY: ghcr.io
|
||||
RELOADER_EDITION: oss
|
||||
|
||||
jobs:
|
||||
qa:
|
||||
uses: stakater/.github/.github/workflows/pull_request_doc_qa.yaml@v0.0.131
|
||||
uses: stakater/.github/.github/workflows/pull_request_doc_qa.yaml@v0.0.163
|
||||
with:
|
||||
MD_CONFIG: .github/md_config.json
|
||||
DOC_SRC: README.md
|
||||
@@ -35,12 +36,14 @@ jobs:
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
pull-requests: write
|
||||
issues: write
|
||||
|
||||
runs-on: ubuntu-latest
|
||||
name: Build
|
||||
steps:
|
||||
- name: Check out code
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v5
|
||||
with:
|
||||
ref: ${{github.event.pull_request.head.sha}}
|
||||
fetch-depth: 0
|
||||
@@ -57,12 +60,17 @@ jobs:
|
||||
charts: deployments/kubernetes/chart/reloader
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v5
|
||||
uses: actions/setup-go@v6
|
||||
with:
|
||||
go-version-file: 'go.mod'
|
||||
check-latest: true
|
||||
cache: true
|
||||
|
||||
- name: Create timestamp
|
||||
id: prep
|
||||
run: echo "created=$(date -u +'%Y-%m-%dT%H:%M:%SZ')" >> $GITHUB_OUTPUT
|
||||
|
||||
|
||||
# Get highest tag and remove any suffixes with '-'
|
||||
- name: Get Highest tag
|
||||
id: highest_tag
|
||||
@@ -75,11 +83,7 @@ jobs:
|
||||
make install
|
||||
|
||||
- name: Run golangci-lint
|
||||
uses: golangci/golangci-lint-action@v6
|
||||
with:
|
||||
version: latest
|
||||
only-new-issues: false
|
||||
args: --timeout 10m
|
||||
run: make lint
|
||||
|
||||
- name: Helm Lint
|
||||
run: |
|
||||
@@ -104,9 +108,21 @@ jobs:
|
||||
kind create cluster
|
||||
kubectl cluster-info
|
||||
|
||||
|
||||
- name: Test
|
||||
run: make test
|
||||
|
||||
- name: Run quick A/B load tests
|
||||
uses: ./.github/actions/loadtest
|
||||
with:
|
||||
old-ref: ${{ github.event.pull_request.base.sha }}
|
||||
# new-ref defaults to current checkout (PR branch)
|
||||
scenarios: 'S1,S4,S6'
|
||||
test-type: 'quick'
|
||||
kind-cluster: 'kind' # Use the existing cluster created above
|
||||
post-comment: 'true'
|
||||
pr-number: ${{ github.event.pull_request.number }}
|
||||
|
||||
- name: Generate Tags
|
||||
id: generate_tag
|
||||
run: |
|
||||
@@ -135,7 +151,13 @@ jobs:
|
||||
file: ${{ env.DOCKER_FILE_PATH }}
|
||||
pull: true
|
||||
push: false
|
||||
build-args: BUILD_PARAMETERS=${{ env.BUILD_PARAMETERS }}
|
||||
build-args: |
|
||||
VERSION=merge-${{ steps.generate_tag.outputs.GIT_TAG }}
|
||||
COMMIT=${{github.event.pull_request.head.sha}}
|
||||
BUILD_DATE=${{ steps.prep.outputs.created }}
|
||||
EDITION=${{ env.RELOADER_EDITION }}
|
||||
BUILD_PARAMETERS=${{ env.BUILD_PARAMETERS }}
|
||||
|
||||
cache-to: type=inline
|
||||
platforms: linux/amd64,linux/arm,linux/arm64
|
||||
tags: |
|
||||
@@ -153,6 +175,10 @@ jobs:
|
||||
pull: true
|
||||
push: false
|
||||
build-args: |
|
||||
VERSION=merge-${{ steps.generate_tag.outputs.GIT_UBI_TAG }}
|
||||
COMMIT=${{github.event.pull_request.head.sha}}
|
||||
BUILD_DATE=${{ steps.prep.outputs.created }}
|
||||
EDITION=${{ env.RELOADER_EDITION }}
|
||||
BUILD_PARAMETERS=${{ env.BUILD_PARAMETERS }}
|
||||
BUILDER_IMAGE=${{ env.GHCR_IMAGE_REPOSITORY }}:${{ steps.highest_tag.outputs.tag }}
|
||||
cache-to: type=inline
|
||||
|
||||
5
.github/workflows/pull_request_docs.yaml
vendored
5
.github/workflows/pull_request_docs.yaml
vendored
@@ -16,16 +16,17 @@ on:
|
||||
|
||||
jobs:
|
||||
qa:
|
||||
uses: stakater/.github/.github/workflows/pull_request_doc_qa.yaml@v0.0.131
|
||||
uses: stakater/.github/.github/workflows/pull_request_doc_qa.yaml@v0.0.163
|
||||
with:
|
||||
MD_CONFIG: .github/md_config.json
|
||||
DOC_SRC: docs
|
||||
MD_LINT_CONFIG: .markdownlint.yaml
|
||||
build:
|
||||
uses: stakater/.github/.github/workflows/pull_request_container_build.yaml@v0.0.131
|
||||
uses: stakater/.github/.github/workflows/pull_request_container_build.yaml@v0.0.163
|
||||
with:
|
||||
DOCKER_FILE_PATH: Dockerfile-docs
|
||||
CONTAINER_REGISTRY_URL: ghcr.io/stakater
|
||||
PUSH_IMAGE: false
|
||||
secrets:
|
||||
CONTAINER_REGISTRY_USERNAME: ${{ github.actor }}
|
||||
CONTAINER_REGISTRY_PASSWORD: ${{ secrets.GHCR_TOKEN }}
|
||||
|
||||
8
.github/workflows/push-helm-chart.yaml
vendored
8
.github/workflows/push-helm-chart.yaml
vendored
@@ -15,7 +15,7 @@ on:
|
||||
|
||||
env:
|
||||
HELM_REGISTRY_URL: "https://stakater.github.io/stakater-charts"
|
||||
REGISTRY: ghcr.io
|
||||
REGISTRY: ghcr.io # container registry
|
||||
|
||||
jobs:
|
||||
verify-and-push-helm-chart:
|
||||
@@ -31,7 +31,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Check out code
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v5
|
||||
with:
|
||||
token: ${{ secrets.PUBLISH_TOKEN }}
|
||||
fetch-depth: 0 # otherwise, you will fail to push refs to dest repo
|
||||
@@ -73,7 +73,7 @@ jobs:
|
||||
exit 1
|
||||
|
||||
- name: Install Cosign
|
||||
uses: sigstore/cosign-installer@v3.8.2
|
||||
uses: sigstore/cosign-installer@v4.0.0
|
||||
|
||||
- name: Login to GHCR Registry
|
||||
uses: docker/login-action@v3
|
||||
@@ -106,7 +106,7 @@ jobs:
|
||||
commit_email: stakater@gmail.com
|
||||
|
||||
- name: Push new chart tag
|
||||
uses: anothrNick/github-tag-action@1.71.0
|
||||
uses: anothrNick/github-tag-action@1.75.0
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.PUBLISH_TOKEN }}
|
||||
WITH_V: false
|
||||
|
||||
10
.github/workflows/push-pr-image.yaml
vendored
10
.github/workflows/push-pr-image.yaml
vendored
@@ -30,13 +30,13 @@ jobs:
|
||||
if: ${{ github.event.label.name == 'build-and-push-pr-image' }}
|
||||
steps:
|
||||
- name: Check out code
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v5
|
||||
with:
|
||||
ref: ${{github.event.pull_request.head.sha}}
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v5
|
||||
uses: actions/setup-go@v6
|
||||
with:
|
||||
go-version-file: 'go.mod'
|
||||
check-latest: true
|
||||
@@ -47,11 +47,7 @@ jobs:
|
||||
make install
|
||||
|
||||
- name: Run golangci-lint
|
||||
uses: golangci/golangci-lint-action@v6
|
||||
with:
|
||||
version: latest
|
||||
only-new-issues: false
|
||||
args: --timeout 10m
|
||||
run: make lint
|
||||
|
||||
- name: Generate Tags
|
||||
id: generate_tag
|
||||
|
||||
31
.github/workflows/push.yaml
vendored
31
.github/workflows/push.yaml
vendored
@@ -15,6 +15,7 @@ env:
|
||||
KIND_VERSION: "0.23.0"
|
||||
HELM_REGISTRY_URL: "https://stakater.github.io/stakater-charts"
|
||||
REGISTRY: ghcr.io
|
||||
RELOADER_EDITION: oss
|
||||
|
||||
jobs:
|
||||
build:
|
||||
@@ -29,7 +30,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Check out code
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v5
|
||||
with:
|
||||
token: ${{ secrets.PUBLISH_TOKEN }}
|
||||
fetch-depth: 0 # otherwise, you will fail to push refs to dest repo
|
||||
@@ -42,7 +43,7 @@ jobs:
|
||||
version: v3.11.3
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v5
|
||||
uses: actions/setup-go@v6
|
||||
with:
|
||||
go-version-file: 'go.mod'
|
||||
check-latest: true
|
||||
@@ -53,11 +54,7 @@ jobs:
|
||||
make install
|
||||
|
||||
- name: Run golangci-lint
|
||||
uses: golangci/golangci-lint-action@v6
|
||||
with:
|
||||
version: latest
|
||||
only-new-issues: false
|
||||
args: --timeout 10m
|
||||
run: make lint
|
||||
|
||||
- name: Install kubectl
|
||||
run: |
|
||||
@@ -91,6 +88,10 @@ jobs:
|
||||
with:
|
||||
username: ${{ secrets.STAKATER_DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.STAKATER_DOCKERHUB_PASSWORD }}
|
||||
|
||||
- name: Create timestamp
|
||||
id: prep
|
||||
run: echo "created=$(date -u +'%Y-%m-%dT%H:%M:%SZ')" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Generate image repository path for Docker registry
|
||||
run: |
|
||||
@@ -103,7 +104,12 @@ jobs:
|
||||
file: ${{ env.DOCKER_FILE_PATH }}
|
||||
pull: true
|
||||
push: true
|
||||
build-args: BUILD_PARAMETERS=${{ env.BUILD_PARAMETERS }}
|
||||
build-args: |
|
||||
VERSION=merge-${{ github.event.number }}
|
||||
COMMIT=${{ github.sha }}
|
||||
BUILD_DATE=${{ steps.prep.outputs.created }}
|
||||
EDITION=${{ env.RELOADER_EDITION }}
|
||||
BUILD_PARAMETERS=${{ env.BUILD_PARAMETERS }}
|
||||
cache-to: type=inline
|
||||
platforms: linux/amd64,linux/arm,linux/arm64
|
||||
tags: |
|
||||
@@ -148,7 +154,12 @@ jobs:
|
||||
file: ${{ env.DOCKER_FILE_PATH }}
|
||||
pull: true
|
||||
push: true
|
||||
build-args: BUILD_PARAMETERS=${{ env.BUILD_PARAMETERS }}
|
||||
build-args: |
|
||||
VERSION=merge-${{ github.event.number }}
|
||||
COMMIT=${{ github.sha }}
|
||||
BUILD_DATE=${{ steps.prep.outputs.created }}
|
||||
EDITION=${{ env.RELOADER_EDITION }}
|
||||
BUILD_PARAMETERS=${{ env.BUILD_PARAMETERS }}
|
||||
cache-to: type=inline
|
||||
platforms: linux/amd64,linux/arm,linux/arm64
|
||||
tags: |
|
||||
@@ -207,7 +218,7 @@ jobs:
|
||||
org.opencontainers.image.revision=${{ github.sha }}
|
||||
|
||||
- name: Push Latest Tag
|
||||
uses: anothrNick/github-tag-action@1.71.0
|
||||
uses: anothrNick/github-tag-action@1.75.0
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.PUBLISH_TOKEN }}
|
||||
WITH_V: false
|
||||
|
||||
2
.github/workflows/release-helm-chart.yaml
vendored
2
.github/workflows/release-helm-chart.yaml
vendored
@@ -15,7 +15,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Check out code
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v5
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
|
||||
25
.github/workflows/release.yaml
vendored
25
.github/workflows/release.yaml
vendored
@@ -11,6 +11,7 @@ env:
|
||||
KUBERNETES_VERSION: "1.30.0"
|
||||
KIND_VERSION: "0.23.0"
|
||||
REGISTRY: ghcr.io
|
||||
RELOADER_EDITION: oss
|
||||
|
||||
jobs:
|
||||
release:
|
||||
@@ -24,7 +25,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Check out code
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v5
|
||||
with:
|
||||
token: ${{ secrets.PUBLISH_TOKEN }}
|
||||
fetch-depth: 0 # otherwise, you will fail to push refs to dest repo
|
||||
@@ -37,7 +38,7 @@ jobs:
|
||||
version: v3.11.3
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v5
|
||||
uses: actions/setup-go@v6
|
||||
with:
|
||||
go-version-file: 'go.mod'
|
||||
check-latest: true
|
||||
@@ -48,11 +49,7 @@ jobs:
|
||||
make install
|
||||
|
||||
- name: Run golangci-lint
|
||||
uses: golangci/golangci-lint-action@v6
|
||||
with:
|
||||
version: latest
|
||||
only-new-issues: false
|
||||
args: --timeout 10m
|
||||
run: make lint
|
||||
|
||||
- name: Install kubectl
|
||||
run: |
|
||||
@@ -79,6 +76,10 @@ jobs:
|
||||
id: generate_tag
|
||||
run: echo "RELEASE_VERSION=${GITHUB_REF#refs/*/}" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Create timestamp
|
||||
id: prep
|
||||
run: echo "created=$(date -u +'%Y-%m-%dT%H:%M:%SZ')" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v3
|
||||
|
||||
@@ -106,6 +107,11 @@ jobs:
|
||||
platforms: linux/amd64,linux/arm,linux/arm64
|
||||
tags: |
|
||||
${{ env.DOCKER_IMAGE_REPOSITORY }}:${{ steps.generate_tag.outputs.RELEASE_VERSION }}
|
||||
build-args: |
|
||||
VERSION=${{ steps.generate_tag.outputs.RELEASE_VERSION }}
|
||||
COMMIT=${{ github.sha }}
|
||||
BUILD_DATE=${{ steps.prep.outputs.created }}
|
||||
EDITION=${{ env.RELOADER_EDITION }}
|
||||
labels: |
|
||||
org.opencontainers.image.source=${{ github.event.repository.clone_url }}
|
||||
org.opencontainers.image.created=${{ steps.prep.outputs.created }}
|
||||
@@ -152,6 +158,11 @@ jobs:
|
||||
platforms: linux/amd64,linux/arm,linux/arm64
|
||||
tags: |
|
||||
${{ env.GHCR_IMAGE_REPOSITORY }}:${{ steps.generate_tag.outputs.RELEASE_VERSION }},${{ env.GHCR_IMAGE_REPOSITORY }}:latest
|
||||
build-args: |
|
||||
VERSION=${{ steps.generate_tag.outputs.RELEASE_VERSION }}
|
||||
COMMIT=${{ github.sha }}
|
||||
BUILD_DATE=${{ steps.prep.outputs.created }}
|
||||
EDITION=${{ env.RELOADER_EDITION }}
|
||||
labels: |
|
||||
org.opencontainers.image.source=${{ github.event.repository.clone_url }}
|
||||
org.opencontainers.image.created=${{ steps.prep.outputs.created }}
|
||||
|
||||
7
.gitignore
vendored
7
.gitignore
vendored
@@ -11,9 +11,14 @@ vendor
|
||||
dist
|
||||
Reloader
|
||||
!**/chart/reloader
|
||||
!**/internal/reloader
|
||||
*.tgz
|
||||
styles/
|
||||
site/
|
||||
/mkdocs.yml
|
||||
yq
|
||||
bin
|
||||
bin
|
||||
test/loadtest/results
|
||||
test/loadtest/loadtest
|
||||
# Temporary NFS files
|
||||
.nfs*
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
StylesPath = styles
|
||||
MinAlertLevel = warning
|
||||
|
||||
Packages = https://github.com/stakater/vale-package/releases/download/v0.0.77/Stakater.zip
|
||||
Packages = https://github.com/stakater/vale-package/releases/download/v0.0.87/Stakater.zip
|
||||
Vocab = Stakater
|
||||
|
||||
# Only check MarkDown files
|
||||
|
||||
@@ -1,3 +1,3 @@
|
||||
# Code of Conduct
|
||||
|
||||
Reloader follows the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/master/code-of-conduct.md).
|
||||
Reloader follows the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/main/code-of-conduct.md).
|
||||
|
||||
13
Dockerfile
13
Dockerfile
@@ -2,13 +2,18 @@ ARG BUILDER_IMAGE
|
||||
ARG BASE_IMAGE
|
||||
|
||||
# Build the manager binary
|
||||
FROM --platform=${BUILDPLATFORM} ${BUILDER_IMAGE:-golang:1.24.4} AS builder
|
||||
FROM --platform=${BUILDPLATFORM} ${BUILDER_IMAGE:-golang:1.26} AS builder
|
||||
|
||||
ARG TARGETOS
|
||||
ARG TARGETARCH
|
||||
ARG GOPROXY
|
||||
ARG GOPRIVATE
|
||||
|
||||
ARG COMMIT
|
||||
ARG VERSION
|
||||
ARG BUILD_DATE
|
||||
ARG EDITION=oss
|
||||
|
||||
WORKDIR /workspace
|
||||
|
||||
# Copy the Go Modules manifests
|
||||
@@ -30,7 +35,11 @@ RUN CGO_ENABLED=0 \
|
||||
GOPROXY=${GOPROXY} \
|
||||
GOPRIVATE=${GOPRIVATE} \
|
||||
GO111MODULE=on \
|
||||
go build -mod=mod -a -o manager main.go
|
||||
go build -ldflags="-s -w -X github.com/stakater/Reloader/pkg/common.Version=${VERSION} \
|
||||
-X github.com/stakater/Reloader/pkg/common.Commit=${COMMIT} \
|
||||
-X github.com/stakater/Reloader/pkg/common.BuildDate=${BUILD_DATE} \
|
||||
-X github.com/stakater/Reloader/pkg/common.Edition=${EDITION}" \
|
||||
-installsuffix 'static' -mod=mod -a -o manager ./
|
||||
|
||||
# Use distroless as minimal base image to package the manager binary
|
||||
# Refer to https://github.com/GoogleContainerTools/distroless for more details
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
FROM python:3.13-alpine as builder
|
||||
FROM python:3.14-alpine as builder
|
||||
|
||||
# set workdir
|
||||
RUN mkdir -p $HOME/application
|
||||
@@ -17,7 +17,7 @@ RUN python theme_common/scripts/combine_mkdocs_config_yaml.py theme_common/mkdoc
|
||||
# build the docs
|
||||
RUN mkdocs build
|
||||
|
||||
FROM nginxinc/nginx-unprivileged:1.27-alpine as deploy
|
||||
FROM nginxinc/nginx-unprivileged:1.29-alpine as deploy
|
||||
COPY --from=builder $HOME/application/site/ /usr/share/nginx/html/reloader/
|
||||
COPY docs-nginx.conf /etc/nginx/conf.d/default.conf
|
||||
|
||||
|
||||
@@ -3,7 +3,7 @@ ARG BASE_IMAGE
|
||||
|
||||
FROM --platform=${BUILDPLATFORM} ${BUILDER_IMAGE} AS SRC
|
||||
|
||||
FROM ${BASE_IMAGE:-registry.access.redhat.com/ubi9/ubi:latest} AS ubi
|
||||
FROM ${BASE_IMAGE:-registry.access.redhat.com/ubi9/ubi:9.7} AS ubi
|
||||
ARG TARGETARCH
|
||||
|
||||
|
||||
@@ -20,7 +20,21 @@ RUN mkdir /image && \
|
||||
COPY ubi-build-files-${TARGETARCH}.txt /tmp
|
||||
# Copy all the required files from the base UBI image into the image directory
|
||||
# As the go binary is not statically compiled this includes everything needed for CGO to work, cacerts, tzdata and RH release files
|
||||
RUN tar cf /tmp/files.tar -T /tmp/ubi-build-files-${TARGETARCH}.txt && tar xf /tmp/files.tar -C /image/
|
||||
# Filter existing files and exclude temporary entitlement files that may be removed during build
|
||||
RUN while IFS= read -r file; do \
|
||||
[ -z "$file" ] && continue; \
|
||||
if [ -e "$file" ] || [ -L "$file" ]; then \
|
||||
echo "$file"; \
|
||||
fi; \
|
||||
done < /tmp/ubi-build-files-${TARGETARCH}.txt > /tmp/existing-files.txt && \
|
||||
if [ -s /tmp/existing-files.txt ]; then \
|
||||
tar -chf /tmp/files.tar --exclude='etc/pki/entitlement-host*' -T /tmp/existing-files.txt 2>&1 | grep -vE "(File removed before we read it|Cannot stat)" || true; \
|
||||
if [ -f /tmp/files.tar ]; then \
|
||||
tar xf /tmp/files.tar -C /image/ 2>/dev/null || true; \
|
||||
rm -f /tmp/files.tar; \
|
||||
fi; \
|
||||
fi && \
|
||||
rm -f /tmp/existing-files.txt
|
||||
|
||||
# Generate a rpm database which contains all the packages that you said were needed in ubi-build-files-*.txt
|
||||
RUN rpm --root /image --initdb \
|
||||
|
||||
47
Makefile
47
Makefile
@@ -41,7 +41,7 @@ YQ ?= $(LOCALBIN)/yq
|
||||
KUSTOMIZE_VERSION ?= v5.3.0
|
||||
CONTROLLER_TOOLS_VERSION ?= v0.14.0
|
||||
ENVTEST_VERSION ?= release-0.17
|
||||
GOLANGCI_LINT_VERSION ?= v1.57.2
|
||||
GOLANGCI_LINT_VERSION ?= v2.6.1
|
||||
|
||||
YQ_VERSION ?= v4.27.5
|
||||
YQ_DOWNLOAD_URL = "https://github.com/mikefarah/yq/releases/download/$(YQ_VERSION)/yq_$(OS)_$(ARCH)"
|
||||
@@ -75,7 +75,7 @@ $(ENVTEST): $(LOCALBIN)
|
||||
.PHONY: golangci-lint
|
||||
golangci-lint: $(GOLANGCI_LINT) ## Download golangci-lint locally if necessary.
|
||||
$(GOLANGCI_LINT): $(LOCALBIN)
|
||||
$(call go-install-tool,$(GOLANGCI_LINT),github.com/golangci/golangci-lint/cmd/golangci-lint,${GOLANGCI_LINT_VERSION})
|
||||
$(call go-install-tool,$(GOLANGCI_LINT),github.com/golangci/golangci-lint/v2/cmd/golangci-lint,${GOLANGCI_LINT_VERSION})
|
||||
|
||||
# go-install-tool will 'go install' any package with custom target and name of binary, if it doesn't exist
|
||||
# $1 - target path with name of binary (ideally with version)
|
||||
@@ -102,6 +102,9 @@ run:
|
||||
build:
|
||||
"$(GOCMD)" build ${GOFLAGS} ${LDFLAGS} -o "${BINARY}"
|
||||
|
||||
lint: golangci-lint ## Run golangci-lint on the codebase
|
||||
$(GOLANGCI_LINT) run ./...
|
||||
|
||||
build-image:
|
||||
docker buildx build \
|
||||
--platform ${OS}/${ARCH} \
|
||||
@@ -166,3 +169,43 @@ yq-install:
|
||||
@curl -sL $(YQ_DOWNLOAD_URL) -o $(YQ_BIN)
|
||||
@chmod +x $(YQ_BIN)
|
||||
@echo "yq $(YQ_VERSION) installed at $(YQ_BIN)"
|
||||
|
||||
# =============================================================================
|
||||
# Load Testing
|
||||
# =============================================================================
|
||||
|
||||
LOADTEST_BIN = test/loadtest/loadtest
|
||||
LOADTEST_OLD_IMAGE ?= localhost/reloader:old
|
||||
LOADTEST_NEW_IMAGE ?= localhost/reloader:new
|
||||
LOADTEST_DURATION ?= 60
|
||||
LOADTEST_SCENARIOS ?= all
|
||||
|
||||
.PHONY: loadtest-build loadtest-quick loadtest-full loadtest loadtest-clean
|
||||
|
||||
loadtest-build: ## Build loadtest binary
|
||||
cd test/loadtest && $(GOCMD) build -o loadtest ./cmd/loadtest
|
||||
|
||||
loadtest-quick: loadtest-build ## Run quick load tests (S1, S4, S6)
|
||||
cd test/loadtest && ./loadtest run \
|
||||
--old-image=$(LOADTEST_OLD_IMAGE) \
|
||||
--new-image=$(LOADTEST_NEW_IMAGE) \
|
||||
--scenario=S1,S4,S6 \
|
||||
--duration=$(LOADTEST_DURATION)
|
||||
|
||||
loadtest-full: loadtest-build ## Run full load test suite
|
||||
cd test/loadtest && ./loadtest run \
|
||||
--old-image=$(LOADTEST_OLD_IMAGE) \
|
||||
--new-image=$(LOADTEST_NEW_IMAGE) \
|
||||
--scenario=all \
|
||||
--duration=$(LOADTEST_DURATION)
|
||||
|
||||
loadtest: loadtest-build ## Run load tests with configurable scenarios (default: all)
|
||||
cd test/loadtest && ./loadtest run \
|
||||
--old-image=$(LOADTEST_OLD_IMAGE) \
|
||||
--new-image=$(LOADTEST_NEW_IMAGE) \
|
||||
--scenario=$(LOADTEST_SCENARIOS) \
|
||||
--duration=$(LOADTEST_DURATION)
|
||||
|
||||
loadtest-clean: ## Clean loadtest binary and results
|
||||
rm -f $(LOADTEST_BIN)
|
||||
rm -rf test/loadtest/results
|
||||
|
||||
160
README.md
160
README.md
@@ -2,6 +2,7 @@
|
||||
<img src="assets/web/reloader.jpg" alt="Reloader" width="40%"/>
|
||||
</p>
|
||||
|
||||
[](https://github.com/sponsors/stakater?utm_source=github&utm_medium=readme&utm_campaign=reloader)
|
||||
[](https://goreportcard.com/report/github.com/stakater/reloader)
|
||||
[](https://godoc.org/github.com/stakater/reloader)
|
||||
[](https://github.com/stakater/reloader/releases/latest)
|
||||
@@ -12,7 +13,7 @@
|
||||
|
||||
## 🔁 What is Reloader?
|
||||
|
||||
Reloader is a Kubernetes controller that automatically triggers rollouts of workloads (like Deployments, StatefulSets, and more) whenever referenced `Secrets` or `ConfigMaps` are updated.
|
||||
Reloader is a Kubernetes controller that automatically triggers rollouts of workloads (like Deployments, StatefulSets, and more) whenever referenced `Secrets`, `ConfigMaps` or **optionally CSI-mounted secrets** are updated.
|
||||
|
||||
In a traditional Kubernetes setup, updating a `Secret` or `ConfigMap` does not automatically restart or redeploy your workloads. This can lead to stale configurations running in production, especially when dealing with dynamic values like credentials, feature flags, or environment configs.
|
||||
|
||||
@@ -153,9 +154,26 @@ This pattern allows fine-grained reload control — workloads only restart if th
|
||||
1. ✅ You want to reload a workload only if it references a ConfigMap or Secret that has been explicitly tagged with `reloader.stakater.com/match: "true"`.
|
||||
1. ✅ Use this when you want full control over which shared or system-wide resources trigger reloads. Great in multi-tenant clusters or shared configs.
|
||||
|
||||
### 4. ⚙️ Workload-Specific Rollout Strategy
|
||||
### ⛔ Resource-Level Ignore Annotation
|
||||
|
||||
By default, Reloader uses the **rollout** strategy — it updates the pod template to trigger a new rollout. This works well in most cases, but it can cause problems if you're using GitOps tools like ArgoCD, which detect this as configuration drift.
|
||||
When you need to prevent specific ConfigMaps or Secrets from triggering any reloads, use the ignore annotation on the resource itself:
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: ConfigMap # or Secret
|
||||
metadata:
|
||||
name: my-config
|
||||
annotations:
|
||||
reloader.stakater.com/ignore: "true"
|
||||
```
|
||||
|
||||
This instructs Reloader to skip all reload logic for that resource across all workloads.
|
||||
|
||||
### 4. ⚙️ Workload-Specific Rollout Strategy (Argo Rollouts Only)
|
||||
|
||||
Note: This is only applicable when using [Argo Rollouts](https://argoproj.github.io/argo-rollouts/). It is ignored for standard Kubernetes `Deployments`, `StatefulSets`, or `DaemonSets`. To use this feature, Argo Rollouts support must be enabled in Reloader (for example via --is-argo-rollouts=true).
|
||||
|
||||
By default, Reloader triggers the Argo Rollout controller to perform a standard rollout by updating the pod template. This works well in most cases, however, because this modifies the workload spec, GitOps tools like ArgoCD will detect this as "Configuration Drift" and mark your application as OutOfSync.
|
||||
|
||||
To avoid that, you can switch to the **restart** strategy, which simply restarts the pod without changing the pod template.
|
||||
|
||||
@@ -176,6 +194,8 @@ metadata:
|
||||
1. You want a quick restart without changing the workload spec
|
||||
1. Your platform restricts metadata changes
|
||||
|
||||
This setting affects Argo Rollouts behavior, not Argo CD sync settings.
|
||||
|
||||
### 5. ❗ Annotation Behavior Rules & Compatibility
|
||||
|
||||
- `reloader.stakater.com/auto` and `reloader.stakater.com/search` **cannot be used together** — the `auto` annotation takes precedence.
|
||||
@@ -189,20 +209,95 @@ metadata:
|
||||
|
||||
Reloader can optionally **send alerts** whenever it triggers a rolling upgrade for a workload (e.g., `Deployment`, `StatefulSet`, etc.).
|
||||
|
||||
These alerts are sent to a configured **webhook endpoint**, which can be a generic receiver or services like Slack or Microsoft Teams.
|
||||
These alerts are sent to a configured **webhook endpoint**, which can be a generic receiver or services like Slack, Microsoft Teams or Google Chat.
|
||||
|
||||
To enable this feature, update the `reloader.env.secret` section in your `values.yaml` (when installing via Helm):
|
||||
|
||||
```yaml
|
||||
reloader:
|
||||
env:
|
||||
secret:
|
||||
ALERT_ON_RELOAD: "true" # Enable alerting (default: false)
|
||||
ALERT_SINK: "slack" # Options: slack, teams, webhook (default: webhook)
|
||||
ALERT_WEBHOOK_URL: "<your-webhook-url>" # Required if ALERT_ON_RELOAD is true
|
||||
ALERT_ADDITIONAL_INFO: "Triggered by Reloader in staging environment"
|
||||
deployment:
|
||||
env:
|
||||
secret:
|
||||
ALERT_ON_RELOAD: "true" # Enable alerting (default: false)
|
||||
ALERT_SINK: "slack" # Options: slack, teams, gchat or webhook (default: webhook)
|
||||
ALERT_WEBHOOK_URL: "<your-webhook-url>" # Required if ALERT_ON_RELOAD is true
|
||||
ALERT_ADDITIONAL_INFO: "Triggered by Reloader in staging environment"
|
||||
```
|
||||
|
||||
### 7. ⏸️ Pause Deployments
|
||||
|
||||
This feature allows you to pause rollouts for a deployment for a specified duration, helping to prevent multiple restarts when several ConfigMaps or Secrets are updated in quick succession.
|
||||
|
||||
| Annotation | Applies To | Description |
|
||||
|---------------------------------------------------------|--------------|-----------------------------------------------------------------------------|
|
||||
| `deployment.reloader.stakater.com/pause-period: "5m"` | Deployment | Pauses reloads for the specified period (e.g., `5m`, `1h`) |
|
||||
|
||||
#### How it works
|
||||
|
||||
1. Add the `deployment.reloader.stakater.com/pause-period` annotation to your Deployment, specifying the pause duration (e.g., `"5m"` for five minutes).
|
||||
1. When a watched ConfigMap or Secret changes, Reloader will still trigger a reload event, but if the deployment is paused, the rollout will have no effect until the pause period has elapsed.
|
||||
1. This avoids repeated restarts if multiple resources are updated close together.
|
||||
|
||||
#### Use when
|
||||
|
||||
1. ✅ Your deployment references multiple ConfigMaps or Secrets that may be updated at the same time.
|
||||
1. ✅ You want to minimize unnecessary rollouts and reduce downtime caused by back-to-back configuration changes.
|
||||
|
||||
### 8. 🔐 CSI Secret Provider Support
|
||||
|
||||
Reloader supports the [Secrets Store CSI Driver](https://secrets-store-csi-driver.sigs.k8s.io/), which allows mounting secrets from external secret stores (like AWS Secrets Manager, Azure Key Vault, HashiCorp Vault) directly into pods.
|
||||
Unlike Kubernetes Secret objects, CSI-mounted secrets do not always trigger native Kubernetes update events. Reloader solves this by watching CSI status resources and restarting affected workloads when mounted secret versions change.
|
||||
|
||||
#### How it works
|
||||
|
||||
When secret rotation is enabled, the Secrets Store CSI Driver updates a Kubernetes resource called: `SecretProviderClassPodStatus`
|
||||
|
||||
This resource reflects the currently mounted secret versions for a pod.
|
||||
Reloader watches these updates and triggers a rollout when a change is detected.
|
||||
|
||||
#### Prerequisites
|
||||
|
||||
- Secrets Store CSI Driver must be installed in your cluster
|
||||
- Secret rotation enabled in the CSI driver.
|
||||
- Enable CSI integration in Reloader: `--enable-csi-integration=true`
|
||||
|
||||
#### Annotations for CSI-mounted Secrets
|
||||
|
||||
| Annotation | Description |
|
||||
|------------------------------------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------|
|
||||
| `reloader.stakater.com/auto: "true"` | Global Discovery: Automatically discovers and reloads the workload when any mounted ConfigMap or Secret is updated. |
|
||||
| `secretproviderclass.reloader.stakater.com/auto: 'true'` | CSI Discovery: Specifically watches for updates to all SecretProviderClasses used by the workload (CSI driver integration). |
|
||||
| `secretproviderclass.reloader.stakater.com/reload: "my-secretproviderclass"` | Targeted Reload: Only reloads the workload when the specifically named SecretProviderClass(es) are updated. |
|
||||
|
||||
Reloader monitors changes at the **per-secret level** by watching the `SecretProviderClassPodStatus`. Make sure each secret you want to monitor is properly defined with a `secretKey` in your `SecretProviderClass`:
|
||||
|
||||
```yaml
|
||||
apiVersion: secrets-store.csi.x-k8s.io/v1
|
||||
kind: SecretProviderClass
|
||||
metadata:
|
||||
name: vault-reloader-demo
|
||||
namespace: test
|
||||
spec:
|
||||
provider: vault
|
||||
parameters:
|
||||
vaultAddress: "http://vault.vault.svc:8200"
|
||||
vaultSkipTLSVerify: "true"
|
||||
roleName: "demo-role"
|
||||
objects: |
|
||||
- objectName: "password"
|
||||
secretPath: "secret/data/reloader-demo"
|
||||
secretKey: "password"
|
||||
```
|
||||
|
||||
***Important***: Reloader tracks changes to individual secrets (identified by `secretKey`). If your SecretProviderClass doesn't specify `secretKey` for each object, Reloader may not detect updates correctly.
|
||||
|
||||
#### Notes & Limitations
|
||||
|
||||
Reloader reacts to CSI status changes, not direct updates to external secret stores
|
||||
Secret rotation must be enabled in the CSI driver for updates to be detected
|
||||
CSI limitations (such as `subPath` mounts) still apply and may require pod restarts
|
||||
If secrets are synced to Kubernetes Secret objects, standard Reloader behavior applies and CSI support may not be required
|
||||
|
||||
## 🚀 Installation
|
||||
|
||||
### 1. 📦 Helm
|
||||
@@ -294,13 +389,30 @@ Reloader supports multiple strategies for triggering rolling updates when a watc
|
||||
|------|-------------|
|
||||
| `--resources-to-ignore=configmaps` | Ignore ConfigMaps (only one type can be ignored at a time) |
|
||||
| `--resources-to-ignore=secrets` | Ignore Secrets (cannot combine with configMaps) |
|
||||
| `--ignored-workload-types=jobs,cronjobs` | Ignore specific workload types from reload monitoring |
|
||||
| `--resource-label-selector=key=value` | Only watch ConfigMaps/Secrets with matching labels |
|
||||
|
||||
> **⚠️ Note:**
|
||||
> Only **one** resource type can be ignored at a time.
|
||||
> Trying to ignore **both `configmaps` and `secrets`** will cause an error in Reloader.
|
||||
> **⚠️ Note:**
|
||||
>
|
||||
> Only **one** resource type can be ignored at a time.
|
||||
> Trying to ignore **both `configmaps` and `secrets`** will cause an error in Reloader.
|
||||
> ✅ **Workaround:** Scale the Reloader deployment to `0` replicas if you want to disable it completely.
|
||||
|
||||
**💡 Workload Type Examples:**
|
||||
|
||||
```bash
|
||||
# Ignore only Jobs
|
||||
--ignored-workload-types=jobs
|
||||
|
||||
# Ignore only CronJobs
|
||||
--ignored-workload-types=cronjobs
|
||||
|
||||
# Ignore both (comma-separated)
|
||||
--ignored-workload-types=jobs,cronjobs
|
||||
```
|
||||
|
||||
> **🔧 Use Case:** Ignoring workload types is useful when you don't want certain types of workloads to be automatically reloaded.
|
||||
|
||||
#### 3. 🧩 Namespace Filtering
|
||||
|
||||
| Flag | Description |
|
||||
@@ -321,6 +433,15 @@ These flags allow you to redefine annotation keys used in your workloads or reso
|
||||
| `--search-match-annotation` | Overrides `reloader.stakater.com/match` |
|
||||
| `--secret-annotation` | Overrides `secret.reloader.stakater.com/reload` |
|
||||
| `--configmap-annotation` | Overrides `configmap.reloader.stakater.com/reload` |
|
||||
| `--pause-deployment-annotation` | Overrides `deployment.reloader.stakater.com/pause-period` |
|
||||
| `--pause-deployment-time-annotation` | Overrides `deployment.reloader.stakater.com/paused-at` |
|
||||
|
||||
### 5. 🕷️ Debugging
|
||||
|
||||
| Flag | Description |
|
||||
|--- |-------------|
|
||||
| `--enable-pprof` | Enables `pprof` for profiling |
|
||||
| `--pprof-addr` | Address to start `pprof` server on. Default is `:6060` |
|
||||
|
||||
## Compatibility
|
||||
|
||||
@@ -368,17 +489,20 @@ PRs are welcome. In general, we follow the "fork-and-pull" Git workflow:
|
||||
|
||||
## Release Processes
|
||||
|
||||
_Repository GitHub releases_: As requested by the community in [issue 685](https://github.com/stakater/Reloader/issues/685), Reloader is now based on a manual release process. Releases are no longer done on every merged PR to the main branch, but manually on request.
|
||||
*Repository GitHub releases*: As requested by the community in [issue 685](https://github.com/stakater/Reloader/issues/685), Reloader is now based on a manual release process. Releases are no longer done on every merged PR to the main branch, but manually on request.
|
||||
|
||||
To make a GitHub release:
|
||||
|
||||
1. Code owners create a release branch `release-vX.Y.Z`
|
||||
1. Code owners run a dispatch mode workflow to automatically generate version and manifests on the release branch
|
||||
1. Code owners create a release branch `release-vX.Y.Z` from `master`
|
||||
1. Code owners run [Init Release](https://github.com/stakater/Reloader/actions/workflows/init-branch-release.yaml) workflow to automatically generate version and manifests on the release branch
|
||||
- Set the `TARGET_BRANCH` parameter to release branch i.e. `release-vX.Y.Z`
|
||||
- Set the `TARGET_VERSION` to release version without 'v' i.e. `X.Y.Z`
|
||||
1. A PR is created to bump the image version on the release branch, example: [PR-798](https://github.com/stakater/Reloader/pull/798)
|
||||
1. Code owners create a GitHub release with tag `vX.Y.Z` and target branch `release-vX.Y.Z`, which triggers creation of images
|
||||
1. Code owners create a PR to update the Helm chart version, example: [PR-846](https://github.com/stakater/Reloader/pull/846)
|
||||
1. Code owners create another branch from `master` and bump the helm chart version as well as Reloader image version.
|
||||
- Code owners create a PR with `release/helm-chart` label, example: [PR-846](https://github.com/stakater/Reloader/pull/846)
|
||||
|
||||
_Repository git tagging_: Push to the main branch will create a merge-image and merge-tag named `merge-${{ github.event.number }}`, for example `merge-800` when pull request number 800 is merged.
|
||||
*Repository git tagging*: Push to the main branch will create a merge-image and merge-tag named `merge-${{ github.event.number }}`, for example `merge-800` when pull request number 800 is merged.
|
||||
|
||||
## Changelog
|
||||
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
apiVersion: v1
|
||||
name: reloader
|
||||
description: Reloader chart that runs on kubernetes
|
||||
version: 2.1.4
|
||||
appVersion: v1.4.4
|
||||
version: 2.2.8
|
||||
appVersion: v1.4.13
|
||||
keywords:
|
||||
- Reloader
|
||||
- kubernetes
|
||||
|
||||
@@ -5,6 +5,7 @@ If you have configured helm on your cluster, you can add Reloader to helm from o
|
||||
## Installation
|
||||
|
||||
```bash
|
||||
# Add stakater helm repoository
|
||||
helm repo add stakater https://stakater.github.io/stakater-charts
|
||||
|
||||
helm repo update
|
||||
@@ -14,6 +15,8 @@ helm install stakater/reloader # For helm3 add --generate-name flag or set the r
|
||||
helm install {{RELEASE_NAME}} stakater/reloader -n {{NAMESPACE}} --set reloader.watchGlobally=false # By default, Reloader watches in all namespaces. To watch in single namespace, set watchGlobally=false
|
||||
|
||||
helm install stakater/reloader --set reloader.watchGlobally=false --namespace test --generate-name # Install Reloader in `test` namespace which will only watch `Deployments`, `Daemonsets` `Statefulsets` and `Rollouts` in `test` namespace.
|
||||
|
||||
helm install stakater/reloader --set reloader.ignoreJobs=true --set reloader.ignoreCronJobs=true --generate-name # Install Reloader ignoring Jobs and CronJobs from reload monitoring
|
||||
```
|
||||
|
||||
## Uninstalling
|
||||
@@ -47,16 +50,20 @@ helm uninstall {{RELEASE_NAME}} -n {{NAMESPACE}}
|
||||
| `reloader.isOpenshift` | Enable OpenShift DeploymentConfigs. Valid value are either `true` or `false` | boolean | `false` |
|
||||
| `reloader.ignoreSecrets` | To ignore secrets. Valid value are either `true` or `false`. Either `ignoreSecrets` or `ignoreConfigMaps` can be ignored, not both at the same time | boolean | `false` |
|
||||
| `reloader.ignoreConfigMaps` | To ignore configmaps. Valid value are either `true` or `false` | boolean | `false` |
|
||||
| `reloader.ignoreJobs` | To ignore jobs from reload monitoring. Valid value are either `true` or `false`. Translates to `--ignored-workload-types=jobs` | boolean | `false` |
|
||||
| `reloader.ignoreCronJobs` | To ignore CronJobs from reload monitoring. Valid value are either `true` or `false`. Translates to `--ignored-workload-types=cronjobs` | boolean | `false` |
|
||||
| `reloader.reloadOnCreate` | Enable reload on create events. Valid value are either `true` or `false` | boolean | `false` |
|
||||
| `reloader.reloadOnDelete` | Enable reload on delete events. Valid value are either `true` or `false` | boolean | `false` |
|
||||
| `reloader.syncAfterRestart` | Enable sync after Reloader restarts for **Add** events, works only when reloadOnCreate is `true`. Valid value are either `true` or `false` | boolean | `false` |
|
||||
| `reloader.reloadStrategy` | Strategy to trigger resource restart, set to either `default`, `env-vars` or `annotations` | enumeration | `default` |
|
||||
| `reloader.ignoreNamespaces` | List of comma separated namespaces to ignore, if multiple are provided, they are combined with the AND operator | string | `""` |
|
||||
| `reloader.namespaceSelector` | List of comma separated k8s label selectors for namespaces selection. See [LIST and WATCH filtering](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#list-and-watch-filtering) for more details on label-selector | string | `""` |
|
||||
| `reloader.namespaceSelector` | List of comma separated k8s label selectors for namespaces selection. The parameter only used when `reloader.watchGlobally` is `true`. See [LIST and WATCH filtering](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#list-and-watch-filtering) for more details on label-selector | string | `""` |
|
||||
| `reloader.resourceLabelSelector` | List of comma separated label selectors, if multiple are provided they are combined with the AND operator | string | `""` |
|
||||
| `reloader.logFormat` | Set type of log format. Value could be either `json` or `""` | string | `""` |
|
||||
| `reloader.watchGlobally` | Allow Reloader to watch in all namespaces (`true`) or just in a single namespace (`false`) | boolean | `true` |
|
||||
| `reloader.enableHA` | Enable leadership election allowing you to run multiple replicas | boolean | `false` |
|
||||
| `reloader.enablePProf` | Enables pprof for profiling | boolean | `false` |
|
||||
| `reloader.pprofAddr` | Address to start pprof server on | string | `:6060` |
|
||||
| `reloader.readOnlyRootFileSystem` | Enforce readOnlyRootFilesystem | boolean | `false` |
|
||||
| `reloader.legacy.rbac` | | boolean | `false` |
|
||||
| `reloader.matchLabels` | Pod labels to match | map | `{}` |
|
||||
@@ -82,7 +89,10 @@ helm uninstall {{RELEASE_NAME}} -n {{NAMESPACE}}
|
||||
| `reloader.deployment.resources` | Set container requests and limits (e.g. CPU or memory) | map | `{}` |
|
||||
| `reloader.deployment.pod.annotations` | Set annotations for pod | map | `{}` |
|
||||
| `reloader.deployment.priorityClassName` | Set priority class for pod in cluster | string | `""` |
|
||||
| `reloader.deployment.volumeMounts` | Mount volume | array | `[]` |
|
||||
| `reloader.deployment.volumes` | Add volume to a pod | array | `[]` |
|
||||
|
||||
| `reloader.deployment.dnsConfig` | dns configuration for pods | map | `{}` |
|
||||
### Other Reloader Parameters
|
||||
|
||||
| Parameter | Description | Type | Default |
|
||||
@@ -110,6 +120,10 @@ helm uninstall {{RELEASE_NAME}} -n {{NAMESPACE}}
|
||||
- Only one of these resources can be ignored at a time:
|
||||
- `ignoreConfigMaps` **or** `ignoreSecrets`
|
||||
- Trying to ignore both will cause Helm template compilation errors
|
||||
- The `ignoreJobs` and `ignoreCronJobs` flags can be used together or individually
|
||||
- When both are enabled, translates to `--ignored-workload-types=jobs,cronjobs`
|
||||
- When used individually, translates to `--ignored-workload-types=jobs` or `--ignored-workload-types=cronjobs`
|
||||
- These flags prevent Reloader from monitoring and reloading the specified workload types
|
||||
|
||||
### Special Integrations
|
||||
- OpenShift (`DeploymentConfig`) and Argo Rollouts support must be **explicitly enabled**
|
||||
@@ -118,7 +132,7 @@ helm uninstall {{RELEASE_NAME}} -n {{NAMESPACE}}
|
||||
### OpenShift Considerations
|
||||
- Recent OpenShift versions (tested on 4.13.3) require:
|
||||
- Users to be in a dynamically assigned UID range
|
||||
- **Solution**: Unset `runAsUser` via `deployment.securityContext.runAsUser=null`
|
||||
- **Solution**: Unset `runAsUser` via `reloader.deployment.securityContext.runAsUser=null`
|
||||
- Let OpenShift assign UID automatically during installation
|
||||
|
||||
### Core Functionality Flags
|
||||
|
||||
@@ -27,12 +27,20 @@ Create chart name and version as used by the chart label.
|
||||
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
|
||||
{{- end }}
|
||||
|
||||
{{- define "reloader-labels.chart" -}}
|
||||
{{- define "reloader-match-labels.chart" -}}
|
||||
app: {{ template "reloader-fullname" . }}
|
||||
chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}"
|
||||
release: {{ .Release.Name | quote }}
|
||||
{{- end -}}
|
||||
|
||||
{{- define "reloader-labels.chart" -}}
|
||||
{{ include "reloader-match-labels.chart" . }}
|
||||
app.kubernetes.io/name: {{ template "reloader-name" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name | quote }}
|
||||
helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}"
|
||||
chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}"
|
||||
heritage: {{ .Release.Service | quote }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service | quote }}
|
||||
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
@@ -45,10 +53,10 @@ podAntiAffinity:
|
||||
podAffinityTerm:
|
||||
labelSelector:
|
||||
matchExpressions:
|
||||
- key: app
|
||||
- key: app.kubernetes.io/instance
|
||||
operator: In
|
||||
values:
|
||||
- {{ template "reloader-fullname" . }}
|
||||
- {{ .Release.Name | quote }}
|
||||
topologyKey: "kubernetes.io/hostname"
|
||||
{{- end -}}
|
||||
|
||||
@@ -70,3 +78,28 @@ Create the annotations to support helm3
|
||||
meta.helm.sh/release-namespace: {{ .Release.Namespace | quote }}
|
||||
meta.helm.sh/release-name: {{ .Release.Name | quote }}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Create the namespace selector if it does not watch globally
|
||||
*/}}
|
||||
{{- define "reloader-namespaceSelector" -}}
|
||||
{{- if and .Values.reloader.watchGlobally .Values.reloader.namespaceSelector -}}
|
||||
{{ .Values.reloader.namespaceSelector }}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Normalizes global.imagePullSecrets to a list of objects with name fields.
|
||||
Supports both of these in values.yaml:
|
||||
# - name: my-pull-secret
|
||||
# - my-pull-secret
|
||||
*/}}
|
||||
{{- define "reloader-imagePullSecrets" -}}
|
||||
{{- range $s := .Values.global.imagePullSecrets }}
|
||||
{{- if kindIs "map" $s }}
|
||||
- {{ toYaml $s | nindent 2 | trim }}
|
||||
{{- else }}
|
||||
- name: {{ $s }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end -}}
|
||||
|
||||
@@ -31,7 +31,7 @@ rules:
|
||||
- list
|
||||
- get
|
||||
- watch
|
||||
{{- if .Values.reloader.namespaceSelector }}
|
||||
{{- if (include "reloader-namespaceSelector" .) }}
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
@@ -105,6 +105,17 @@ rules:
|
||||
- create
|
||||
- get
|
||||
- update
|
||||
{{- end}}
|
||||
{{- if .Values.reloader.enableCSIIntegration }}
|
||||
- apiGroups:
|
||||
- "secrets-store.csi.x-k8s.io"
|
||||
resources:
|
||||
- secretproviderclasspodstatuses
|
||||
- secretproviderclasses
|
||||
verbs:
|
||||
- list
|
||||
- get
|
||||
- watch
|
||||
{{- end}}
|
||||
- apiGroups:
|
||||
- ""
|
||||
|
||||
@@ -25,8 +25,7 @@ spec:
|
||||
revisionHistoryLimit: {{ .Values.reloader.deployment.revisionHistoryLimit }}
|
||||
selector:
|
||||
matchLabels:
|
||||
app: {{ template "reloader-fullname" . }}
|
||||
release: {{ .Release.Name | quote }}
|
||||
{{ include "reloader-match-labels.chart" . | indent 6 }}
|
||||
{{- if .Values.reloader.matchLabels }}
|
||||
{{ tpl (toYaml .Values.reloader.matchLabels) . | indent 6 }}
|
||||
{{- end }}
|
||||
@@ -45,9 +44,9 @@ spec:
|
||||
{{ tpl (toYaml .Values.reloader.matchLabels) . | indent 8 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
{{- with .Values.global.imagePullSecrets }}
|
||||
{{- if .Values.global.imagePullSecrets }}
|
||||
imagePullSecrets:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{ include "reloader-imagePullSecrets" . | indent 8 }}
|
||||
{{- end }}
|
||||
{{- if .Values.reloader.deployment.nodeSelector }}
|
||||
nodeSelector:
|
||||
@@ -72,6 +71,10 @@ spec:
|
||||
{{- if .Values.reloader.deployment.priorityClassName }}
|
||||
priorityClassName: {{ .Values.reloader.deployment.priorityClassName }}
|
||||
{{- end }}
|
||||
{{- with .Values.reloader.deployment.dnsConfig }}
|
||||
dnsConfig:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
containers:
|
||||
{{- if .Values.global.imageRegistry }}
|
||||
- image: "{{ .Values.global.imageRegistry }}/{{ .Values.image.name }}:{{ .Values.image.tag }}"
|
||||
@@ -144,6 +147,15 @@ spec:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
{{- end }}
|
||||
|
||||
- name: RELOADER_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
|
||||
- name: RELOADER_DEPLOYMENT_NAME
|
||||
value: {{ template "reloader-fullname" . }}
|
||||
|
||||
{{- if .Values.reloader.enableHA }}
|
||||
- name: POD_NAME
|
||||
valueFrom:
|
||||
@@ -198,7 +210,7 @@ spec:
|
||||
{{- . | toYaml | nindent 10 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if or (.Values.reloader.logFormat) (.Values.reloader.logLevel) (.Values.reloader.ignoreSecrets) (.Values.reloader.ignoreNamespaces) (.Values.reloader.namespaceSelector) (.Values.reloader.resourceLabelSelector) (.Values.reloader.ignoreConfigMaps) (.Values.reloader.custom_annotations) (eq .Values.reloader.isArgoRollouts true) (eq .Values.reloader.reloadOnCreate true) (eq .Values.reloader.reloadOnDelete true) (ne .Values.reloader.reloadStrategy "default") (.Values.reloader.enableHA) (.Values.reloader.autoReloadAll)}}
|
||||
{{- if or (.Values.reloader.logFormat) (.Values.reloader.logLevel) (.Values.reloader.ignoreSecrets) (.Values.reloader.ignoreNamespaces) (include "reloader-namespaceSelector" .) (.Values.reloader.resourceLabelSelector) (.Values.reloader.ignoreConfigMaps) (.Values.reloader.custom_annotations) (eq .Values.reloader.isArgoRollouts true) (eq .Values.reloader.reloadOnCreate true) (eq .Values.reloader.reloadOnDelete true) (ne .Values.reloader.reloadStrategy "default") (.Values.reloader.enableHA) (.Values.reloader.autoReloadAll) (.Values.reloader.ignoreJobs) (.Values.reloader.ignoreCronJobs) (.Values.reloader.enableCSIIntegration)}}
|
||||
args:
|
||||
{{- if .Values.reloader.logFormat }}
|
||||
- "--log-format={{ .Values.reloader.logFormat }}"
|
||||
@@ -212,15 +224,31 @@ spec:
|
||||
{{- if .Values.reloader.ignoreConfigMaps }}
|
||||
- "--resources-to-ignore=configMaps"
|
||||
{{- end }}
|
||||
{{- if and (.Values.reloader.ignoreJobs) (.Values.reloader.ignoreCronJobs) }}
|
||||
- "--ignored-workload-types=jobs,cronjobs"
|
||||
{{- else if .Values.reloader.ignoreJobs }}
|
||||
- "--ignored-workload-types=jobs"
|
||||
{{- else if .Values.reloader.ignoreCronJobs }}
|
||||
- "--ignored-workload-types=cronjobs"
|
||||
{{- end }}
|
||||
{{- if .Values.reloader.ignoreNamespaces }}
|
||||
- "--namespaces-to-ignore={{ .Values.reloader.ignoreNamespaces }}"
|
||||
{{- end }}
|
||||
{{- if .Values.reloader.namespaceSelector }}
|
||||
- "--namespace-selector={{ .Values.reloader.namespaceSelector }}"
|
||||
{{- if (include "reloader-namespaceSelector" .) }}
|
||||
- "--namespace-selector=\"{{ include "reloader-namespaceSelector" . }}\""
|
||||
{{- end }}
|
||||
{{- if .Values.reloader.resourceLabelSelector }}
|
||||
- "--resource-label-selector={{ .Values.reloader.resourceLabelSelector }}"
|
||||
{{- end }}
|
||||
{{- if .Values.reloader.enablePProf }}
|
||||
- "--enable-pprof"
|
||||
{{- if and .Values.reloader.pprofAddr }}
|
||||
- "--pprof-addr={{ .Values.reloader.pprofAddr }}"
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if .Values.reloader.enableCSIIntegration }}
|
||||
- "--enable-csi-integration=true"
|
||||
{{- end }}
|
||||
{{- if .Values.reloader.custom_annotations }}
|
||||
{{- if .Values.reloader.custom_annotations.configmap }}
|
||||
- "--configmap-annotation"
|
||||
@@ -249,6 +277,14 @@ spec:
|
||||
{{- if .Values.reloader.custom_annotations.match }}
|
||||
- "--search-match-annotation"
|
||||
- "{{ .Values.reloader.custom_annotations.match }}"
|
||||
{{- end }}
|
||||
{{- if .Values.reloader.custom_annotations.pausePeriod }}
|
||||
- "--pause-deployment-annotation"
|
||||
- "{{ .Values.reloader.custom_annotations.pausePeriod }}"
|
||||
{{- end }}
|
||||
{{- if .Values.reloader.custom_annotations.pauseTime }}
|
||||
- "--pause-deployment-time-annotation"
|
||||
- "{{ .Values.reloader.custom_annotations.pauseTime }}"
|
||||
{{- end }}
|
||||
{{- if .Values.reloader.webhookUrl }}
|
||||
- "--webhook-url"
|
||||
|
||||
@@ -14,8 +14,7 @@ metadata:
|
||||
spec:
|
||||
podSelector:
|
||||
matchLabels:
|
||||
app: {{ template "reloader-fullname" . }}
|
||||
release: {{ .Release.Name | quote }}
|
||||
{{ include "reloader-match-labels.chart" . | indent 6 }}
|
||||
{{- if .Values.reloader.matchLabels }}
|
||||
{{ tpl (toYaml .Values.reloader.matchLabels) . | indent 6 }}
|
||||
{{- end }}
|
||||
|
||||
@@ -13,5 +13,5 @@ spec:
|
||||
{{- end }}
|
||||
selector:
|
||||
matchLabels:
|
||||
app: {{ template "reloader-fullname" . }}
|
||||
{{ include "reloader-match-labels.chart" . | nindent 6 }}
|
||||
{{- end }}
|
||||
|
||||
@@ -56,5 +56,5 @@ spec:
|
||||
- {{ .Release.Namespace }}
|
||||
selector:
|
||||
matchLabels:
|
||||
{{ include "reloader-labels.chart" . | nindent 6 }}
|
||||
{{ include "reloader-match-labels.chart" . | nindent 6 }}
|
||||
{{- end }}
|
||||
|
||||
@@ -92,6 +92,17 @@ rules:
|
||||
- create
|
||||
- get
|
||||
- update
|
||||
{{- end}}
|
||||
{{- if .Values.reloader.enableCSIIntegration }}
|
||||
- apiGroups:
|
||||
- "secrets-store.csi.x-k8s.io"
|
||||
resources:
|
||||
- secretproviderclasspodstatuses
|
||||
- secretproviderclasses
|
||||
verbs:
|
||||
- list
|
||||
- get
|
||||
- watch
|
||||
{{- end}}
|
||||
- apiGroups:
|
||||
- ""
|
||||
@@ -101,3 +112,34 @@ rules:
|
||||
- create
|
||||
- patch
|
||||
{{- end }}
|
||||
|
||||
---
|
||||
|
||||
{{- if .Values.reloader.rbac.enabled }}
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
annotations:
|
||||
{{ include "reloader-helm3.annotations" . | indent 4 }}
|
||||
labels:
|
||||
{{ include "reloader-labels.chart" . | indent 4 }}
|
||||
{{- if .Values.reloader.rbac.labels }}
|
||||
{{ tpl (toYaml .Values.reloader.rbac.labels) . | indent 4 }}
|
||||
{{- end }}
|
||||
{{- if .Values.reloader.matchLabels }}
|
||||
{{ tpl (toYaml .Values.reloader.matchLabels) . | indent 4 }}
|
||||
{{- end }}
|
||||
name: {{ template "reloader-fullname" . }}-metadata-role
|
||||
namespace: {{ .Values.namespace | default .Release.Namespace }}
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- configmaps
|
||||
verbs:
|
||||
- list
|
||||
- get
|
||||
- watch
|
||||
- create
|
||||
- update
|
||||
{{- end }}
|
||||
@@ -27,3 +27,30 @@ subjects:
|
||||
name: {{ template "reloader-serviceAccountName" . }}
|
||||
namespace: {{ .Values.namespace | default .Release.Namespace }}
|
||||
{{- end }}
|
||||
|
||||
---
|
||||
{{- if .Values.reloader.rbac.enabled }}
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
annotations:
|
||||
{{ include "reloader-helm3.annotations" . | indent 4 }}
|
||||
labels:
|
||||
{{ include "reloader-labels.chart" . | indent 4 }}
|
||||
{{- if .Values.reloader.rbac.labels }}
|
||||
{{ tpl (toYaml .Values.reloader.rbac.labels) . | indent 4 }}
|
||||
{{- end }}
|
||||
{{- if .Values.reloader.matchLabels }}
|
||||
{{ tpl (toYaml .Values.reloader.matchLabels) . | indent 4 }}
|
||||
{{- end }}
|
||||
name: {{ template "reloader-fullname" . }}-metadata-role-binding
|
||||
namespace: {{ .Values.namespace | default .Release.Namespace }}
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: {{ template "reloader-fullname" . }}-metadata-role
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: {{ template "reloader-serviceAccountName" . }}
|
||||
namespace: {{ .Values.namespace | default .Release.Namespace }}
|
||||
{{- end }}
|
||||
@@ -2,7 +2,8 @@
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
{{- if .Values.global.imagePullSecrets }}
|
||||
imagePullSecrets: {{ toYaml .Values.global.imagePullSecrets | nindent 2 }}
|
||||
imagePullSecrets:
|
||||
{{ include "reloader-imagePullSecrets" . | indent 2 }}
|
||||
{{- end }}
|
||||
{{- if hasKey .Values.reloader.serviceAccount "automountServiceAccountToken" }}
|
||||
automountServiceAccountToken: {{ .Values.reloader.serviceAccount.automountServiceAccountToken }}
|
||||
|
||||
@@ -56,5 +56,5 @@ spec:
|
||||
- {{ .Release.Namespace }}
|
||||
selector:
|
||||
matchLabels:
|
||||
{{ include "reloader-labels.chart" . | nindent 6 }}
|
||||
{{ include "reloader-match-labels.chart" . | nindent 6 }}
|
||||
{{- end }}
|
||||
|
||||
@@ -61,3 +61,44 @@ tests:
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.name
|
||||
|
||||
- it: sets ignored-workload-types argument when ignoreJobs is true
|
||||
set:
|
||||
reloader:
|
||||
ignoreJobs: true
|
||||
asserts:
|
||||
- contains:
|
||||
path: spec.template.spec.containers[0].args
|
||||
content: "--ignored-workload-types=jobs"
|
||||
|
||||
- it: sets ignored-workload-types argument when ignoreCronJobs is true
|
||||
set:
|
||||
reloader:
|
||||
ignoreCronJobs: true
|
||||
asserts:
|
||||
- contains:
|
||||
path: spec.template.spec.containers[0].args
|
||||
content: "--ignored-workload-types=cronjobs"
|
||||
|
||||
- it: sets ignored-workload-types argument when both ignoreJobs and ignoreCronJobs are true
|
||||
set:
|
||||
reloader:
|
||||
ignoreJobs: true
|
||||
ignoreCronJobs: true
|
||||
asserts:
|
||||
- contains:
|
||||
path: spec.template.spec.containers[0].args
|
||||
content: "--ignored-workload-types=jobs,cronjobs"
|
||||
|
||||
- it: does not set ignored-workload-types argument when both ignoreJobs and ignoreCronJobs are false
|
||||
set:
|
||||
reloader:
|
||||
ignoreJobs: false
|
||||
ignoreCronJobs: false
|
||||
asserts:
|
||||
- notContains:
|
||||
path: spec.template.spec.containers[0].args
|
||||
content: "--ignored-workload-types=jobs"
|
||||
- notContains:
|
||||
path: spec.template.spec.containers[0].args
|
||||
content: "--ignored-workload-types=cronjobs"
|
||||
|
||||
@@ -7,6 +7,8 @@ global:
|
||||
imagePullSecrets: []
|
||||
#imagePullSecrets:
|
||||
# - name: my-pull-secret
|
||||
#imagePullSecrets:
|
||||
# - my-pull-secret
|
||||
|
||||
kubernetes:
|
||||
host: https://kubernetes.default
|
||||
@@ -17,7 +19,7 @@ fullnameOverride: ""
|
||||
image:
|
||||
name: stakater/reloader
|
||||
repository: ghcr.io/stakater/reloader
|
||||
tag: v1.4.4
|
||||
tag: v1.4.13
|
||||
# digest: sha256:1234567
|
||||
pullPolicy: IfNotPresent
|
||||
|
||||
@@ -27,7 +29,11 @@ reloader:
|
||||
isOpenshift: false
|
||||
ignoreSecrets: false
|
||||
ignoreConfigMaps: false
|
||||
# Set to true to exclude Job workloads from automatic reload monitoring
|
||||
# Useful when you don't want Jobs to be restarted when their referenced ConfigMaps/Secrets change
|
||||
ignoreJobs: false
|
||||
# Set to true to exclude CronJob workloads from automatic reload monitoring
|
||||
# Useful when you don't want CronJobs to be restarted when their referenced ConfigMaps/Secrets change
|
||||
ignoreCronJobs: false
|
||||
reloadOnCreate: false
|
||||
reloadOnDelete: false
|
||||
@@ -41,6 +47,11 @@ reloader:
|
||||
watchGlobally: true
|
||||
# Set to true to enable leadership election allowing you to run multiple replicas
|
||||
enableHA: false
|
||||
# Set to true to enable pprof for profiling
|
||||
enablePProf: false
|
||||
enableCSIIntegration: false
|
||||
# Address to start pprof server on. Default is ":6060"
|
||||
pprofAddr: ":6060"
|
||||
# Set to true if you have a pod security policy that enforces readOnlyRootFilesystem
|
||||
readOnlyRootFileSystem: false
|
||||
legacy:
|
||||
@@ -49,6 +60,19 @@ reloader:
|
||||
# Set to true to expose a prometheus counter of reloads by namespace (this metric may have high cardinality in clusters with many namespaces)
|
||||
enableMetricsByNamespace: false
|
||||
deployment:
|
||||
# Specifies the deployment DNS configuration.
|
||||
dnsConfig: {}
|
||||
# nameservers:
|
||||
# - 1.2.3.4
|
||||
# searches:
|
||||
# - ns1.svc.cluster-domain.example
|
||||
# - my.dns.search.suffix
|
||||
# options:
|
||||
# - name: ndots
|
||||
# value: "1"
|
||||
# - name: attempts
|
||||
# value: "3"
|
||||
|
||||
# If you wish to run multiple replicas set reloader.enableHA = true
|
||||
replicas: 1
|
||||
|
||||
@@ -68,6 +92,9 @@ reloader:
|
||||
# operator: "Exists"
|
||||
affinity: {}
|
||||
|
||||
volumeMounts: []
|
||||
volumes: []
|
||||
|
||||
securityContext:
|
||||
runAsNonRoot: true
|
||||
runAsUser: 65534
|
||||
@@ -99,14 +126,14 @@ reloader:
|
||||
# whenUnsatisfiable: DoNotSchedule
|
||||
# labelSelector:
|
||||
# matchLabels:
|
||||
# app: my-app
|
||||
# app.kubernetes.io/instance: my-app
|
||||
topologySpreadConstraints: []
|
||||
|
||||
annotations: {}
|
||||
labels:
|
||||
provider: stakater
|
||||
group: com.stakater.platform
|
||||
version: v1.4.4
|
||||
version: v1.4.13
|
||||
# Support for extra environment variables.
|
||||
env:
|
||||
# Open supports Key value pair as environment variables.
|
||||
@@ -337,8 +364,4 @@ reloader:
|
||||
# are applied during the life of a Pod. Possible values are "Off", "Initial", "Recreate", and "Auto".
|
||||
updateMode: Auto
|
||||
|
||||
volumeMounts: []
|
||||
|
||||
volumes: []
|
||||
|
||||
webhookUrl: ""
|
||||
|
||||
@@ -6,3 +6,4 @@ resources:
|
||||
- manifests/clusterrolebinding.yaml
|
||||
- manifests/serviceaccount.yaml
|
||||
- manifests/deployment.yaml
|
||||
- manifests/role.yaml
|
||||
|
||||
@@ -17,7 +17,7 @@ spec:
|
||||
app: reloader-reloader
|
||||
spec:
|
||||
containers:
|
||||
- image: "ghcr.io/stakater/reloader:v1.1.0"
|
||||
- image: "ghcr.io/stakater/reloader:v1.4.13"
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: reloader-reloader
|
||||
env:
|
||||
@@ -31,6 +31,13 @@ spec:
|
||||
resourceFieldRef:
|
||||
resource: limits.memory
|
||||
divisor: '1'
|
||||
- name: RELOADER_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
|
||||
- name: RELOADER_DEPLOYMENT_NAME
|
||||
value: reloader-reloader
|
||||
ports:
|
||||
- name: http
|
||||
containerPort: 9090
|
||||
|
||||
32
deployments/kubernetes/manifests/role.yaml
Normal file
32
deployments/kubernetes/manifests/role.yaml
Normal file
@@ -0,0 +1,32 @@
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
name: reloader-reloader-metadata-role
|
||||
namespace: default
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- configmaps
|
||||
verbs:
|
||||
- list
|
||||
- get
|
||||
- watch
|
||||
- create
|
||||
- update
|
||||
|
||||
---
|
||||
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: reloader-reloader-metadata-rolebinding
|
||||
namespace: default
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: reloader-reloader
|
||||
namespace: default
|
||||
roleRef:
|
||||
kind: Role
|
||||
name: reloader-reloader-metadata-role
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
@@ -5,6 +5,23 @@ metadata:
|
||||
namespace: default
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
name: reloader-reloader-metadata-role
|
||||
namespace: default
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- configmaps
|
||||
verbs:
|
||||
- list
|
||||
- get
|
||||
- watch
|
||||
- create
|
||||
- update
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: reloader-reloader-role
|
||||
@@ -64,6 +81,20 @@ rules:
|
||||
- patch
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: reloader-reloader-metadata-rolebinding
|
||||
namespace: default
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: reloader-reloader-metadata-role
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: reloader-reloader
|
||||
namespace: default
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: reloader-reloader-role-binding
|
||||
@@ -104,7 +135,13 @@ spec:
|
||||
resourceFieldRef:
|
||||
divisor: "1"
|
||||
resource: limits.memory
|
||||
image: "ghcr.io/stakater/reloader:latest"
|
||||
- name: RELOADER_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
- name: RELOADER_DEPLOYMENT_NAME
|
||||
value: reloader-reloader
|
||||
image: ghcr.io/stakater/reloader:v1.4.13
|
||||
imagePullPolicy: IfNotPresent
|
||||
livenessProbe:
|
||||
failureThreshold: 5
|
||||
|
||||
@@ -8,7 +8,7 @@ In-order to enable this feature, you need to update the `reloader.env.secret` se
|
||||
|
||||
```yaml
|
||||
ALERT_ON_RELOAD: [ true/false ] Default: false
|
||||
ALERT_SINK: [ slack/teams/webhook ] Default: webhook
|
||||
ALERT_SINK: [ slack/teams/gchat/webhook ] Default: webhook
|
||||
ALERT_WEBHOOK_URL: Required if ALERT_ON_RELOAD is true
|
||||
ALERT_ADDITIONAL_INFO: Any additional information to be added to alert
|
||||
```
|
||||
|
||||
@@ -2,10 +2,10 @@
|
||||
|
||||
Reloader is inspired from [`configmapcontroller`](https://github.com/fabric8io/configmapcontroller) but there are many ways in which it differs from `configmapcontroller`. Below is the small comparison between these two controllers.
|
||||
|
||||
| Reloader | ConfigMap |
|
||||
|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||
| Reloader can watch both `Secrets` and `ConfigMaps`. | `configmapcontroller` can only watch changes in `ConfigMaps`. It cannot detect changes in other resources like `Secrets`. |
|
||||
| Reloader can perform rolling upgrades on `deployments` as well as on `statefulsets` and `daemonsets` | `configmapcontroller` can only perform rolling upgrades on `deployments`. It currently does not support rolling upgrades on `statefulsets` and `daemonsets` |
|
||||
| Reloader provides both unit test cases and end to end integration test cases for future updates. So one can make sure that new changes do not break any old functionality. | Currently there are not any unit test cases or end to end integration test cases in `configmap-controller`. It add difficulties for any additional updates in `configmap-controller` and one can not know for sure whether new changes breaks any old functionality or not. |
|
||||
| Reloader uses SHA1 to encode the change in `ConfigMap` or `Secret`. It then saves the SHA1 value in `STAKATER_FOO_CONFIGMAP` or `STAKATER_FOO_SECRET` environment variable depending upon where the change has happened. The use of SHA1 provides a concise 40 characters encoded value that is very less prone to collision. | `configmap-controller` uses `FABRICB_FOO_REVISION` environment variable to store any change in `ConfigMap` controller. It does not encode it or convert it in suitable hash value to avoid data pollution in deployment. |
|
||||
| Reloader allows you to customize your own annotation (for both `Secrets` and `ConfigMaps`) using command line flags | `configmap-controller` restricts you to only their provided annotation |
|
||||
| Reloader | ConfigMap |
|
||||
|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||
| Reloader can watch both `Secrets` and `ConfigMaps`. | `configmapcontroller` can only watch changes in `ConfigMaps`. It cannot detect changes in other resources like `Secrets`. |
|
||||
| Reloader can perform rolling upgrades on `deployments` as well as on `statefulsets` and `daemonsets` | `configmapcontroller` can only perform rolling upgrades on `deployments`. It currently does not support rolling upgrades on `statefulsets` and `daemonsets` |
|
||||
| Reloader provides both unit test cases and end to end integration test cases for future updates. So one can make sure that new changes do not break any old functionality. | Currently there are not any unit test cases or end to end integration test cases in `configmap-controller`. It adds difficulties for any additional updates in `configmap-controller` and one can not know for sure whether new changes breaks any old functionality or not. |
|
||||
| Reloader uses SHA1 to encode the change in `ConfigMap` or `Secret`. It then saves the SHA1 value in `STAKATER_FOO_CONFIGMAP` or `STAKATER_FOO_SECRET` environment variable depending upon where the change has happened. The use of SHA1 provides a concise 40 characters encoded value that is very less prone to collision. | `configmap-controller` uses `FABRICB_FOO_REVISION` environment variable to store any change in `ConfigMap` controller. It does not encode it or convert it in suitable hash value to avoid data pollution in deployment. |
|
||||
| Reloader allows you to customize your own annotation (for both `Secrets` and `ConfigMaps`) using command line flags | `configmap-controller` restricts you to only their provided annotation |
|
||||
|
||||
@@ -6,7 +6,7 @@ Reloader and k8s-trigger-controller are both built for same purpose. So there ar
|
||||
|
||||
- Both controllers support change detection in `ConfigMaps` and `Secrets`
|
||||
- Both controllers support deployment `rollout`
|
||||
- Both controllers use SHA1 for hashing
|
||||
- Reloader controller use SHA1 for hashing
|
||||
- Both controllers have end to end as well as unit test cases.
|
||||
|
||||
## Differences
|
||||
|
||||
@@ -10,3 +10,17 @@ These are the key features of Reloader:
|
||||
1. Restart pod in a `rollout` on change in linked/related `ConfigMaps` or `Secrets`
|
||||
|
||||
This site contains more details on how Reloader works. For an overview, please see the repository's [README file](https://github.com/stakater/Reloader/blob/master/README.md).
|
||||
|
||||
---
|
||||
|
||||
<div align="center">
|
||||
|
||||
[](https://github.com/sponsors/stakater?utm_source=docs&utm_medium=footer&utm_campaign=reloader)
|
||||
|
||||
<p>
|
||||
Your support funds maintenance, security updates, and new features for Reloader, plus continued investment in other open source tools.
|
||||
</p>
|
||||
|
||||
</div>
|
||||
|
||||
---
|
||||
|
||||
73
go.mod
73
go.mod
@@ -1,21 +1,21 @@
|
||||
module github.com/stakater/Reloader
|
||||
|
||||
go 1.24.4
|
||||
go 1.26
|
||||
|
||||
require (
|
||||
github.com/argoproj/argo-rollouts v1.8.2
|
||||
github.com/openshift/api v0.0.0-20250411135543-10a8fa583797
|
||||
github.com/openshift/client-go v0.0.0-20250402181141-b3bad3b645f2
|
||||
github.com/argoproj/argo-rollouts v1.8.3
|
||||
github.com/openshift/api v0.0.0-20260102143802-d2ec16864f86
|
||||
github.com/openshift/client-go v0.0.0-20251223102348-558b0eef16bc
|
||||
github.com/parnurzeal/gorequest v0.3.0
|
||||
github.com/prometheus/client_golang v1.22.0
|
||||
github.com/prometheus/client_golang v1.23.2
|
||||
github.com/sirupsen/logrus v1.9.3
|
||||
github.com/spf13/cobra v1.9.1
|
||||
github.com/stretchr/testify v1.10.0
|
||||
k8s.io/api v0.32.3
|
||||
k8s.io/apimachinery v0.32.3
|
||||
k8s.io/client-go v0.32.3
|
||||
k8s.io/kubectl v0.32.3
|
||||
k8s.io/utils v0.0.0-20250321185631-1f6e0b77f77e
|
||||
github.com/spf13/cobra v1.10.2
|
||||
github.com/stretchr/testify v1.11.1
|
||||
k8s.io/api v0.35.0
|
||||
k8s.io/apimachinery v0.35.0
|
||||
k8s.io/client-go v0.35.0
|
||||
k8s.io/kubectl v0.35.0
|
||||
sigs.k8s.io/secrets-store-csi-driver v1.5.5
|
||||
)
|
||||
|
||||
require (
|
||||
@@ -24,16 +24,14 @@ require (
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
|
||||
github.com/elazarl/goproxy v0.0.0-20240726154733-8b0c20506380 // indirect
|
||||
github.com/emicklei/go-restful/v3 v3.12.2 // indirect
|
||||
github.com/fxamacker/cbor/v2 v2.8.0 // indirect
|
||||
github.com/go-logr/logr v1.4.2 // indirect
|
||||
github.com/fxamacker/cbor/v2 v2.9.0 // indirect
|
||||
github.com/go-logr/logr v1.4.3 // indirect
|
||||
github.com/go-openapi/jsonpointer v0.21.1 // indirect
|
||||
github.com/go-openapi/jsonreference v0.21.0 // indirect
|
||||
github.com/go-openapi/swag v0.23.1 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang/protobuf v1.5.4 // indirect
|
||||
github.com/google/gnostic-models v0.6.9 // indirect
|
||||
github.com/google/gnostic-models v0.7.0 // indirect
|
||||
github.com/google/go-cmp v0.7.0 // indirect
|
||||
github.com/google/gofuzz v1.2.0 // indirect
|
||||
github.com/google/uuid v1.6.0 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||
github.com/josharian/intern v1.0.0 // indirect
|
||||
@@ -41,41 +39,44 @@ require (
|
||||
github.com/kylelemons/godebug v1.1.0 // indirect
|
||||
github.com/mailru/easyjson v0.9.0 // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||
github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee // indirect
|
||||
github.com/moul/http2curl v1.0.0 // indirect
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
|
||||
github.com/prometheus/client_model v0.6.2 // indirect
|
||||
github.com/prometheus/common v0.63.0 // indirect
|
||||
github.com/prometheus/procfs v0.16.0 // indirect
|
||||
github.com/prometheus/common v0.66.1 // indirect
|
||||
github.com/prometheus/procfs v0.16.1 // indirect
|
||||
github.com/smartystreets/goconvey v1.7.2 // indirect
|
||||
github.com/spf13/pflag v1.0.6 // indirect
|
||||
github.com/spf13/pflag v1.0.9 // indirect
|
||||
github.com/x448/float16 v0.8.4 // indirect
|
||||
golang.org/x/net v0.39.0 // indirect
|
||||
golang.org/x/oauth2 v0.29.0 // indirect
|
||||
golang.org/x/sys v0.32.0 // indirect
|
||||
golang.org/x/term v0.31.0 // indirect
|
||||
golang.org/x/text v0.24.0 // indirect
|
||||
go.yaml.in/yaml/v2 v2.4.3 // indirect
|
||||
go.yaml.in/yaml/v3 v3.0.4 // indirect
|
||||
golang.org/x/net v0.47.0 // indirect
|
||||
golang.org/x/oauth2 v0.30.0 // indirect
|
||||
golang.org/x/sys v0.39.0 // indirect
|
||||
golang.org/x/term v0.38.0 // indirect
|
||||
golang.org/x/text v0.32.0 // indirect
|
||||
golang.org/x/time v0.11.0 // indirect
|
||||
google.golang.org/protobuf v1.36.6 // indirect
|
||||
gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect
|
||||
google.golang.org/protobuf v1.36.8 // indirect
|
||||
gopkg.in/evanphx/json-patch.v4 v4.13.0 // indirect
|
||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
k8s.io/klog/v2 v2.130.1 // indirect
|
||||
k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff // indirect
|
||||
sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 // indirect
|
||||
k8s.io/kube-openapi v0.0.0-20250910181357-589584f1c912 // indirect
|
||||
k8s.io/utils v0.0.0-20251222233032-718f0e51e6d2 // indirect
|
||||
sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730 // indirect
|
||||
sigs.k8s.io/randfill v1.0.0 // indirect
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.6.0 // indirect
|
||||
sigs.k8s.io/yaml v1.4.0 // indirect
|
||||
sigs.k8s.io/structured-merge-diff/v6 v6.3.0 // indirect
|
||||
sigs.k8s.io/yaml v1.6.0 // indirect
|
||||
)
|
||||
|
||||
// Replacements for argo-rollouts
|
||||
replace (
|
||||
github.com/go-check/check => github.com/go-check/check v0.0.0-20201130134442-10cb98267c6c
|
||||
k8s.io/api v0.0.0 => k8s.io/api v0.32.3
|
||||
k8s.io/apimachinery v0.0.0 => k8s.io/apimachinery v0.32.3
|
||||
k8s.io/client-go v0.0.0 => k8s.io/client-go v0.32.3
|
||||
k8s.io/api v0.0.0 => k8s.io/api v0.35.0
|
||||
k8s.io/apimachinery v0.0.0 => k8s.io/apimachinery v0.35.0
|
||||
k8s.io/client-go v0.0.0 => k8s.io/client-go v0.35.0
|
||||
k8s.io/cloud-provider v0.0.0 => k8s.io/cloud-provider v0.24.2
|
||||
k8s.io/controller-manager v0.0.0 => k8s.io/controller-manager v0.24.2
|
||||
k8s.io/cri-api v0.0.0 => k8s.io/cri-api v0.20.5-rc.0
|
||||
@@ -84,7 +85,7 @@ replace (
|
||||
k8s.io/kube-controller-manager v0.0.0 => k8s.io/kube-controller-manager v0.24.2
|
||||
k8s.io/kube-proxy v0.0.0 => k8s.io/kube-proxy v0.24.2
|
||||
k8s.io/kube-scheduler v0.0.0 => k8s.io/kube-scheduler v0.24.2
|
||||
k8s.io/kubectl v0.0.0 => k8s.io/kubectl v0.32.3
|
||||
k8s.io/kubectl v0.0.0 => k8s.io/kubectl v0.35.0
|
||||
k8s.io/kubelet v0.0.0 => k8s.io/kubelet v0.24.2
|
||||
k8s.io/legacy-cloud-providers v0.0.0 => k8s.io/legacy-cloud-providers v0.24.2
|
||||
k8s.io/mount-utils v0.0.0 => k8s.io/mount-utils v0.20.5-rc.0
|
||||
|
||||
156
go.sum
156
go.sum
@@ -1,5 +1,7 @@
|
||||
github.com/argoproj/argo-rollouts v1.8.2 h1:DBvkYvFTEH/zJ9MxJerqz/NMWEgZcHY5vxztyCBS5ak=
|
||||
github.com/argoproj/argo-rollouts v1.8.2/go.mod h1:xZIw+dg+B4IqMv5fNPenIBUiPb9xljL2st1xxkjhaC0=
|
||||
github.com/Masterminds/semver/v3 v3.4.0 h1:Zog+i5UMtVoCU8oKka5P7i9q9HgrJeGzI9SA1Xbatp0=
|
||||
github.com/Masterminds/semver/v3 v3.4.0/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM=
|
||||
github.com/argoproj/argo-rollouts v1.8.3 h1:blbtQva4IK9r6gFh+dWkCrLnFdPOWiv9ubQYu36qeaA=
|
||||
github.com/argoproj/argo-rollouts v1.8.3/go.mod h1:kCAUvIfMGfOyVf3lvQbBt0nqQn4Pd+zB5/YwKv+UBa8=
|
||||
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
|
||||
@@ -13,10 +15,10 @@ github.com/elazarl/goproxy v0.0.0-20240726154733-8b0c20506380 h1:1NyRx2f4W4WBRyg
|
||||
github.com/elazarl/goproxy v0.0.0-20240726154733-8b0c20506380/go.mod h1:thX175TtLTzLj3p7N/Q9IiKZ7NF+p72cvL91emV0hzo=
|
||||
github.com/emicklei/go-restful/v3 v3.12.2 h1:DhwDP0vY3k8ZzE0RunuJy8GhNpPL6zqLkDf9B/a0/xU=
|
||||
github.com/emicklei/go-restful/v3 v3.12.2/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
|
||||
github.com/fxamacker/cbor/v2 v2.8.0 h1:fFtUGXUzXPHTIUdne5+zzMPTfffl3RD5qYnkY40vtxU=
|
||||
github.com/fxamacker/cbor/v2 v2.8.0/go.mod h1:vM4b+DJCtHn+zz7h3FFp/hDAI9WNWCsZj23V5ytsSxQ=
|
||||
github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY=
|
||||
github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
|
||||
github.com/fxamacker/cbor/v2 v2.9.0 h1:NpKPmjDBgUfBms6tr6JZkTHtfFGcMKsw3eGcmD/sapM=
|
||||
github.com/fxamacker/cbor/v2 v2.9.0/go.mod h1:vM4b+DJCtHn+zz7h3FFp/hDAI9WNWCsZj23V5ytsSxQ=
|
||||
github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI=
|
||||
github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
|
||||
github.com/go-openapi/jsonpointer v0.21.1 h1:whnzv/pNXtK2FbX/W9yJfRmE2gsmkfahjMKB0fZvcic=
|
||||
github.com/go-openapi/jsonpointer v0.21.1/go.mod h1:50I1STOfbY1ycR8jGz8DaMeLCdXiI6aDteEdRNNzpdk=
|
||||
github.com/go-openapi/jsonreference v0.21.0 h1:Rs+Y7hSXT83Jacb7kFyjn4ijOuVGSvOdF2+tg1TRrwQ=
|
||||
@@ -27,18 +29,13 @@ github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1v
|
||||
github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8=
|
||||
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
|
||||
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
|
||||
github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
|
||||
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
|
||||
github.com/google/gnostic-models v0.6.9 h1:MU/8wDLif2qCXZmzncUQ/BOfxWfthHi63KqpoNbWqVw=
|
||||
github.com/google/gnostic-models v0.6.9/go.mod h1:CiWsm0s6BSQd1hRn8/QmxqB6BesYcbSZxsz9b0KuDBw=
|
||||
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/gnostic-models v0.7.0 h1:qwTtogB15McXDaNqTZdzPJRHvaVJlAl+HVQnLmJEJxo=
|
||||
github.com/google/gnostic-models v0.7.0/go.mod h1:whL5G0m6dmc5cPxKc5bdKdEN3UjI7OUGxBlw57miDrQ=
|
||||
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
|
||||
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
|
||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
|
||||
github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db h1:097atOisP2aRj7vFgYQBbFN4U4JNXUNYpxael3UzMyo=
|
||||
github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144=
|
||||
github.com/google/pprof v0.0.0-20250403155104-27863c87afa6 h1:BHT72Gu3keYf3ZEu2J0b1vyeLSOYI8bm5wbJM/8yDe8=
|
||||
github.com/google/pprof v0.0.0-20250403155104-27863c87afa6/go.mod h1:boTsfXsheKC2y+lKOCMpSfarhxDeIzfZG1jqGcPl3cA=
|
||||
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
||||
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8=
|
||||
@@ -66,20 +63,22 @@ github.com/mailru/easyjson v0.9.0/go.mod h1:1+xMtQp2MRNVL/V1bOzuP3aP8VNwRW55fQUt
|
||||
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
|
||||
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
|
||||
github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee h1:W5t00kpgFdJifH4BDsTlE89Zl93FEloxaWZfGcifgq8=
|
||||
github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
|
||||
github.com/moul/http2curl v1.0.0 h1:dRMWoAtb+ePxMlLkrCbAqh4TlPHXvoGUSQ323/9Zahs=
|
||||
github.com/moul/http2curl v1.0.0/go.mod h1:8UbvGypXm98wA/IqH45anm5Y2Z6ep6O31QGOAZ3H0fQ=
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
|
||||
github.com/onsi/ginkgo/v2 v2.21.0 h1:7rg/4f3rB88pb5obDgNZrNHrQ4e6WpjonchcpuBRnZM=
|
||||
github.com/onsi/ginkgo/v2 v2.21.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo=
|
||||
github.com/onsi/gomega v1.35.1 h1:Cwbd75ZBPxFSuZ6T+rN/WCb/gOc6YgFBXLlZLhC7Ds4=
|
||||
github.com/onsi/gomega v1.35.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog=
|
||||
github.com/openshift/api v0.0.0-20250411135543-10a8fa583797 h1:8x3G8QOZqo2bRAL8JFlPz/odqQECI/XmlZeRwnFxJ8I=
|
||||
github.com/openshift/api v0.0.0-20250411135543-10a8fa583797/go.mod h1:yk60tHAmHhtVpJQo3TwVYq2zpuP70iJIFDCmeKMIzPw=
|
||||
github.com/openshift/client-go v0.0.0-20250402181141-b3bad3b645f2 h1:bPXR0R8zp1o12nSUphN26hSM+OKYq5pMorbDCpApzDQ=
|
||||
github.com/openshift/client-go v0.0.0-20250402181141-b3bad3b645f2/go.mod h1:dT1cJyVTperQ53GvVRa+GZ27r02fDZy2k5j+9QoQsCo=
|
||||
github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE=
|
||||
github.com/onsi/ginkgo/v2 v2.27.2 h1:LzwLj0b89qtIy6SSASkzlNvX6WktqurSHwkk2ipF/Ns=
|
||||
github.com/onsi/ginkgo/v2 v2.27.2/go.mod h1:ArE1D/XhNXBXCBkKOLkbsb2c81dQHCRcF5zwn/ykDRo=
|
||||
github.com/onsi/gomega v1.38.2 h1:eZCjf2xjZAqe+LeWvKb5weQ+NcPwX84kqJ0cZNxok2A=
|
||||
github.com/onsi/gomega v1.38.2/go.mod h1:W2MJcYxRGV63b418Ai34Ud0hEdTVXq9NW9+Sx6uXf3k=
|
||||
github.com/openshift/api v0.0.0-20260102143802-d2ec16864f86 h1:Vsqg+WqSA91LjrwK5lzkSCjztK/B+T8MPKI3MIALx3w=
|
||||
github.com/openshift/api v0.0.0-20260102143802-d2ec16864f86/go.mod h1:d5uzF0YN2nQQFA0jIEWzzOZ+edmo6wzlGLvx5Fhz4uY=
|
||||
github.com/openshift/client-go v0.0.0-20251223102348-558b0eef16bc h1:nIlRaJfr/yGjPV15MNF5eVHLAGyXFjcUzO+hXeWDDk8=
|
||||
github.com/openshift/client-go v0.0.0-20251223102348-558b0eef16bc/go.mod h1:cs9BwTu96sm2vQvy7r9rOiltgu90M6ju2qIHFG9WU+o=
|
||||
github.com/parnurzeal/gorequest v0.3.0 h1:SoFyqCDC9COr1xuS6VA8fC8RU7XyrJZN2ona1kEX7FI=
|
||||
github.com/parnurzeal/gorequest v0.3.0/go.mod h1:3Kh2QUMJoqw3icWAecsyzkpY7UzRfDhbRdTjtNwNiUE=
|
||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||
@@ -87,16 +86,16 @@ github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/prometheus/client_golang v1.22.0 h1:rb93p9lokFEsctTys46VnV1kLCDpVZ0a/Y92Vm0Zc6Q=
|
||||
github.com/prometheus/client_golang v1.22.0/go.mod h1:R7ljNsLXhuQXYZYtw6GAE9AZg8Y7vEW5scdCXrWRXC0=
|
||||
github.com/prometheus/client_golang v1.23.2 h1:Je96obch5RDVy3FDMndoUsjAhG5Edi49h0RJWRi/o0o=
|
||||
github.com/prometheus/client_golang v1.23.2/go.mod h1:Tb1a6LWHB3/SPIzCoaDXI4I8UHKeFTEQ1YCr+0Gyqmg=
|
||||
github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk=
|
||||
github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE=
|
||||
github.com/prometheus/common v0.63.0 h1:YR/EIY1o3mEFP/kZCD7iDMnLPlGyuU2Gb3HIcXnA98k=
|
||||
github.com/prometheus/common v0.63.0/go.mod h1:VVFF/fBIoToEnWRVkYoXEkq3R3paCoxG9PXP74SnV18=
|
||||
github.com/prometheus/procfs v0.16.0 h1:xh6oHhKwnOJKMYiYBDWmkHqQPyiY40sny36Cmx2bbsM=
|
||||
github.com/prometheus/procfs v0.16.0/go.mod h1:8veyXUu3nGP7oaCxhX6yeaM5u4stL2FeMXnCqhDthZg=
|
||||
github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8=
|
||||
github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=
|
||||
github.com/prometheus/common v0.66.1 h1:h5E0h5/Y8niHc5DlaLlWLArTQI7tMrsfQjHV+d9ZoGs=
|
||||
github.com/prometheus/common v0.66.1/go.mod h1:gcaUsgf3KfRSwHY4dIMXLPV0K/Wg1oZ8+SbZk/HH/dA=
|
||||
github.com/prometheus/procfs v0.16.1 h1:hZ15bTNuirocR6u0JZ6BAHHmwS1p8B4P6MRqxtzMyRg=
|
||||
github.com/prometheus/procfs v0.16.1/go.mod h1:teAbpZRB1iIAJYREa1LsoWUXykVXA1KlTmWl8x/U+Is=
|
||||
github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ=
|
||||
github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
|
||||
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
|
||||
@@ -104,50 +103,60 @@ github.com/smartystreets/assertions v1.2.0 h1:42S6lae5dvLc7BrLu/0ugRtcFVjoJNMC/N
|
||||
github.com/smartystreets/assertions v1.2.0/go.mod h1:tcbTF8ujkAEcZ8TElKY+i30BzYlVhC/LOxJk7iOWnoo=
|
||||
github.com/smartystreets/goconvey v1.7.2 h1:9RBaZCeXEQ3UselpuwUQHltGVXvdwm6cv1hgR6gDIPg=
|
||||
github.com/smartystreets/goconvey v1.7.2/go.mod h1:Vw0tHAZW6lzCRk3xgdin6fKYcG+G3Pg9vgXWeJpQFMM=
|
||||
github.com/spf13/cobra v1.9.1 h1:CXSaggrXdbHK9CF+8ywj8Amf7PBRmPCOJugH954Nnlo=
|
||||
github.com/spf13/cobra v1.9.1/go.mod h1:nDyEzZ8ogv936Cinf6g1RU9MRY64Ir93oCnqb9wxYW0=
|
||||
github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o=
|
||||
github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
github.com/spf13/cobra v1.10.2 h1:DMTTonx5m65Ic0GOoRY2c16WCbHxOOw6xxezuLaBpcU=
|
||||
github.com/spf13/cobra v1.10.2/go.mod h1:7C1pvHqHw5A4vrJfjNwvOdzYu0Gml16OCs2GRiTUUS4=
|
||||
github.com/spf13/pflag v1.0.9 h1:9exaQaMOCwffKiiiYk6/BndUBv+iRViNW+4lEMi0PvY=
|
||||
github.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY=
|
||||
github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
|
||||
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U=
|
||||
github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
|
||||
github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM=
|
||||
github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg=
|
||||
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
|
||||
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
|
||||
go.yaml.in/yaml/v2 v2.4.3 h1:6gvOSjQoTB3vt1l+CU+tSyi/HOjfOjRLJ4YwYZGwRO0=
|
||||
go.yaml.in/yaml/v2 v2.4.3/go.mod h1:zSxWcmIDjOzPXpjlTTbAsKokqkDNAVtZO0WOMiT90s8=
|
||||
go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc=
|
||||
go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.30.0 h1:fDEXFVZ/fmCKProc/yAXXUijritrDzahmwwefnjoPFk=
|
||||
golang.org/x/mod v0.30.0/go.mod h1:lAsf5O2EvJeSFMiBxXDki7sCgAxEUcZHXoXMKT4GJKc=
|
||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.39.0 h1:ZCu7HMWDxpXpaiKdhzIfaltL9Lp31x/3fCP11bc6/fY=
|
||||
golang.org/x/net v0.39.0/go.mod h1:X7NRbYVEA+ewNkCNyJ513WmMdQ3BineSwVtN2zD/d+E=
|
||||
golang.org/x/oauth2 v0.29.0 h1:WdYw2tdTK1S8olAzWHdgeqfy+Mtm9XNhv/xJsY65d98=
|
||||
golang.org/x/oauth2 v0.29.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8=
|
||||
golang.org/x/net v0.47.0 h1:Mx+4dIFzqraBXUugkia1OOvlD6LemFo1ALMHjrXDOhY=
|
||||
golang.org/x/net v0.47.0/go.mod h1:/jNxtkgq5yWUGYkaZGqo27cfGZ1c5Nen03aYrrKpVRU=
|
||||
golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI=
|
||||
golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.19.0 h1:vV+1eWNmZ5geRlYjzm2adRgW2/mcpevXNg50YZtPCE4=
|
||||
golang.org/x/sync v0.19.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.32.0 h1:s77OFDvIQeibCmezSnk/q6iAfkdiQaJi4VzroCFrN20=
|
||||
golang.org/x/sys v0.32.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
|
||||
golang.org/x/term v0.31.0 h1:erwDkOK1Msy6offm1mOgvspSkslFnIGsFnxOKoufg3o=
|
||||
golang.org/x/term v0.31.0/go.mod h1:R4BeIy7D95HzImkxGkTW1UQTtP54tio2RyHz7PwK0aw=
|
||||
golang.org/x/sys v0.39.0 h1:CvCKL8MeisomCi6qNZ+wbb0DN9E5AATixKsvNtMoMFk=
|
||||
golang.org/x/sys v0.39.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
|
||||
golang.org/x/term v0.38.0 h1:PQ5pkm/rLO6HnxFR7N2lJHOZX6Kez5Y1gDSJla6jo7Q=
|
||||
golang.org/x/term v0.38.0/go.mod h1:bSEAKrOT1W+VSu9TSCMtoGEOUcKxOKgl3LE5QEF/xVg=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.24.0 h1:dd5Bzh4yt5KYA8f9CJHCP4FB4D51c2c6JvN37xJJkJ0=
|
||||
golang.org/x/text v0.24.0/go.mod h1:L8rBsPeo2pSS+xqN0d5u2ikmjtmoJbDBT1b7nHvFCdU=
|
||||
golang.org/x/text v0.32.0 h1:ZD01bjUt1FQ9WJ0ClOL5vxgxOI/sVCNgX1YtKwcY0mU=
|
||||
golang.org/x/text v0.32.0/go.mod h1:o/rUWzghvpD5TXrTIBuJU77MTaN0ljMWE47kxGJQ7jY=
|
||||
golang.org/x/time v0.11.0 h1:/bpjEDfN9tkoN/ryeYHnv5hcMlc8ncjMcM4XBk5NWV0=
|
||||
golang.org/x/time v0.11.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
@@ -155,44 +164,45 @@ golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3
|
||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.26.0 h1:v/60pFQmzmT9ExmjDv2gGIfi3OqfKoEP6I5+umXlbnQ=
|
||||
golang.org/x/tools v0.26.0/go.mod h1:TPVVj70c7JJ3WCazhD8OdXcZg/og+b9+tH/KxylGwH0=
|
||||
golang.org/x/tools v0.39.0 h1:ik4ho21kwuQln40uelmciQPp9SipgNDdrafrYA4TmQQ=
|
||||
golang.org/x/tools v0.39.0/go.mod h1:JnefbkDPyD8UU2kI5fuf8ZX4/yUeh9W877ZeBONxUqQ=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY=
|
||||
google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY=
|
||||
google.golang.org/protobuf v1.36.8 h1:xHScyCOEuuwZEc6UtSOvPbAT4zRh0xcNRYekJwfqyMc=
|
||||
google.golang.org/protobuf v1.36.8/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||
gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSPG+6V4=
|
||||
gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M=
|
||||
gopkg.in/evanphx/json-patch.v4 v4.13.0 h1:czT3CmqEaQ1aanPc5SdlgQrrEIb8w/wwCvWWnfEbYzo=
|
||||
gopkg.in/evanphx/json-patch.v4 v4.13.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M=
|
||||
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
|
||||
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
k8s.io/api v0.32.3 h1:Hw7KqxRusq+6QSplE3NYG4MBxZw1BZnq4aP4cJVINls=
|
||||
k8s.io/api v0.32.3/go.mod h1:2wEDTXADtm/HA7CCMD8D8bK4yuBUptzaRhYcYEEYA3k=
|
||||
k8s.io/apimachinery v0.32.3 h1:JmDuDarhDmA/Li7j3aPrwhpNBA94Nvk5zLeOge9HH1U=
|
||||
k8s.io/apimachinery v0.32.3/go.mod h1:GpHVgxoKlTxClKcteaeuF1Ul/lDVb74KpZcxcmLDElE=
|
||||
k8s.io/client-go v0.32.3 h1:RKPVltzopkSgHS7aS98QdscAgtgah/+zmpAogooIqVU=
|
||||
k8s.io/client-go v0.32.3/go.mod h1:3v0+3k4IcT9bXTc4V2rt+d2ZPPG700Xy6Oi0Gdl2PaY=
|
||||
k8s.io/api v0.35.0 h1:iBAU5LTyBI9vw3L5glmat1njFK34srdLmktWwLTprlY=
|
||||
k8s.io/api v0.35.0/go.mod h1:AQ0SNTzm4ZAczM03QH42c7l3bih1TbAXYo0DkF8ktnA=
|
||||
k8s.io/apimachinery v0.35.0 h1:Z2L3IHvPVv/MJ7xRxHEtk6GoJElaAqDCCU0S6ncYok8=
|
||||
k8s.io/apimachinery v0.35.0/go.mod h1:jQCgFZFR1F4Ik7hvr2g84RTJSZegBc8yHgFWKn//hns=
|
||||
k8s.io/client-go v0.35.0 h1:IAW0ifFbfQQwQmga0UdoH0yvdqrbwMdq9vIFEhRpxBE=
|
||||
k8s.io/client-go v0.35.0/go.mod h1:q2E5AAyqcbeLGPdoRB+Nxe3KYTfPce1Dnu1myQdqz9o=
|
||||
k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk=
|
||||
k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
|
||||
k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff h1:/usPimJzUKKu+m+TE36gUyGcf03XZEP0ZIKgKj35LS4=
|
||||
k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff/go.mod h1:5jIi+8yX4RIb8wk3XwBo5Pq2ccx4FP10ohkbSKCZoK8=
|
||||
k8s.io/kubectl v0.32.3 h1:VMi584rbboso+yjfv0d8uBHwwxbC438LKq+dXd5tOAI=
|
||||
k8s.io/kubectl v0.32.3/go.mod h1:6Euv2aso5GKzo/UVMacV6C7miuyevpfI91SvBvV9Zdg=
|
||||
k8s.io/utils v0.0.0-20250321185631-1f6e0b77f77e h1:KqK5c/ghOm8xkHYhlodbp6i6+r+ChV2vuAuVRdFbLro=
|
||||
k8s.io/utils v0.0.0-20250321185631-1f6e0b77f77e/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
|
||||
sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 h1:gBQPwqORJ8d8/YNZWEjoZs7npUVDpVXUUOFfW6CgAqE=
|
||||
sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg=
|
||||
sigs.k8s.io/randfill v0.0.0-20250304075658-069ef1bbf016/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY=
|
||||
k8s.io/kube-openapi v0.0.0-20250910181357-589584f1c912 h1:Y3gxNAuB0OBLImH611+UDZcmKS3g6CthxToOb37KgwE=
|
||||
k8s.io/kube-openapi v0.0.0-20250910181357-589584f1c912/go.mod h1:kdmbQkyfwUagLfXIad1y2TdrjPFWp2Q89B3qkRwf/pQ=
|
||||
k8s.io/kubectl v0.35.0 h1:cL/wJKHDe8E8+rP3G7avnymcMg6bH6JEcR5w5uo06wc=
|
||||
k8s.io/kubectl v0.35.0/go.mod h1:VR5/TSkYyxZwrRwY5I5dDq6l5KXmiCb+9w8IKplk3Qo=
|
||||
k8s.io/utils v0.0.0-20251222233032-718f0e51e6d2 h1:OfgiEo21hGiwx1oJUU5MpEaeOEg6coWndBkZF/lkFuE=
|
||||
k8s.io/utils v0.0.0-20251222233032-718f0e51e6d2/go.mod h1:xDxuJ0whA3d0I4mf/C4ppKHxXynQ+fxnkmQH0vTHnuk=
|
||||
sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730 h1:IpInykpT6ceI+QxKBbEflcR5EXP7sU1kvOlxwZh5txg=
|
||||
sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg=
|
||||
sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU=
|
||||
sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.6.0 h1:IUA9nvMmnKWcj5jl84xn+T5MnlZKThmUW1TdblaLVAc=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.6.0/go.mod h1:dDy58f92j70zLsuZVuUX5Wp9vtxXpaZnkPGWeqDfCps=
|
||||
sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E=
|
||||
sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY=
|
||||
sigs.k8s.io/secrets-store-csi-driver v1.5.5 h1:LJDpDL5TILhlP68nGvtGSlJFxSDgAD2m148NT0Ts7os=
|
||||
sigs.k8s.io/secrets-store-csi-driver v1.5.5/go.mod h1:i2WqLicYH00hrTG3JAzICPMF4HL4KMEORlDt9UQoZLk=
|
||||
sigs.k8s.io/structured-merge-diff/v6 v6.3.0 h1:jTijUJbW353oVOd9oTlifJqOGEkUw2jB/fXCbTiQEco=
|
||||
sigs.k8s.io/structured-merge-diff/v6 v6.3.0/go.mod h1:M3W8sfWvn2HhQDIbGWj3S099YozAsymCo/wrT5ohRUE=
|
||||
sigs.k8s.io/yaml v1.6.0 h1:G8fkbMSAFqgEFgh4b1wmtzDnioxFCUgTZhlbj5P9QYs=
|
||||
sigs.k8s.io/yaml v1.6.0/go.mod h1:796bPqUfzR/0jLAl6XjHl3Ck7MiyVv8dbTdyT3/pMf4=
|
||||
|
||||
@@ -9,6 +9,15 @@ import (
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
type AlertSink string
|
||||
|
||||
const (
|
||||
AlertSinkSlack AlertSink = "slack"
|
||||
AlertSinkTeams AlertSink = "teams"
|
||||
AlertSinkGoogleChat AlertSink = "gchat"
|
||||
AlertSinkRaw AlertSink = "raw"
|
||||
)
|
||||
|
||||
// function to send alert msg to webhook service
|
||||
func SendWebhookAlert(msg string) {
|
||||
webhook_url, ok := os.LookupEnv("ALERT_WEBHOOK_URL")
|
||||
@@ -31,12 +40,15 @@ func SendWebhookAlert(msg string) {
|
||||
msg = fmt.Sprintf("%s : %s", alert_additional_info, msg)
|
||||
}
|
||||
|
||||
if alert_sink == "slack" {
|
||||
switch AlertSink(alert_sink) {
|
||||
case AlertSinkSlack:
|
||||
sendSlackAlert(webhook_url, webhook_proxy, msg)
|
||||
} else if alert_sink == "teams" {
|
||||
case AlertSinkTeams:
|
||||
sendTeamsAlert(webhook_url, webhook_proxy, msg)
|
||||
} else {
|
||||
msg = strings.Replace(msg, "*", "", -1)
|
||||
case AlertSinkGoogleChat:
|
||||
sendGoogleChatAlert(webhook_url, webhook_proxy, msg)
|
||||
default:
|
||||
msg = strings.ReplaceAll(msg, "*", "")
|
||||
sendRawWebhookAlert(webhook_url, webhook_proxy, msg)
|
||||
}
|
||||
}
|
||||
@@ -98,6 +110,29 @@ func sendTeamsAlert(webhookUrl string, proxy string, msg string) []error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// function to send alert to Google Chat webhook
|
||||
func sendGoogleChatAlert(webhookUrl string, proxy string, msg string) []error {
|
||||
payload := map[string]interface{}{
|
||||
"text": msg,
|
||||
}
|
||||
|
||||
request := gorequest.New().Proxy(proxy)
|
||||
resp, _, err := request.
|
||||
Post(webhookUrl).
|
||||
RedirectPolicy(redirectPolicy).
|
||||
Send(payload).
|
||||
End()
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if resp.StatusCode != 200 {
|
||||
return []error{fmt.Errorf("error sending msg. status: %v", resp.Status)}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// function to send alert to webhook service as text
|
||||
func sendRawWebhookAlert(webhookUrl string, proxy string, msg string) []error {
|
||||
request := gorequest.New().Proxy(proxy)
|
||||
|
||||
@@ -16,6 +16,8 @@ import (
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
patchtypes "k8s.io/apimachinery/pkg/types"
|
||||
|
||||
"maps"
|
||||
|
||||
argorolloutv1alpha1 "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1"
|
||||
)
|
||||
|
||||
@@ -81,9 +83,9 @@ func GetDeploymentItem(clients kube.Clients, name string, namespace string) (run
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if deployment.Spec.Template.ObjectMeta.Annotations == nil {
|
||||
if deployment.Spec.Template.Annotations == nil {
|
||||
annotations := make(map[string]string)
|
||||
deployment.Spec.Template.ObjectMeta.Annotations = annotations
|
||||
deployment.Spec.Template.Annotations = annotations
|
||||
}
|
||||
|
||||
return deployment, nil
|
||||
@@ -99,9 +101,9 @@ func GetDeploymentItems(clients kube.Clients, namespace string) []runtime.Object
|
||||
items := make([]runtime.Object, len(deployments.Items))
|
||||
// Ensure we always have pod annotations to add to
|
||||
for i, v := range deployments.Items {
|
||||
if v.Spec.Template.ObjectMeta.Annotations == nil {
|
||||
if v.Spec.Template.Annotations == nil {
|
||||
annotations := make(map[string]string)
|
||||
deployments.Items[i].Spec.Template.ObjectMeta.Annotations = annotations
|
||||
deployments.Items[i].Spec.Template.Annotations = annotations
|
||||
}
|
||||
items[i] = &deployments.Items[i]
|
||||
}
|
||||
@@ -130,9 +132,9 @@ func GetCronJobItems(clients kube.Clients, namespace string) []runtime.Object {
|
||||
items := make([]runtime.Object, len(cronjobs.Items))
|
||||
// Ensure we always have pod annotations to add to
|
||||
for i, v := range cronjobs.Items {
|
||||
if v.Spec.JobTemplate.Spec.Template.ObjectMeta.Annotations == nil {
|
||||
if v.Spec.JobTemplate.Spec.Template.Annotations == nil {
|
||||
annotations := make(map[string]string)
|
||||
cronjobs.Items[i].Spec.JobTemplate.Spec.Template.ObjectMeta.Annotations = annotations
|
||||
cronjobs.Items[i].Spec.JobTemplate.Spec.Template.Annotations = annotations
|
||||
}
|
||||
items[i] = &cronjobs.Items[i]
|
||||
}
|
||||
@@ -161,9 +163,9 @@ func GetJobItems(clients kube.Clients, namespace string) []runtime.Object {
|
||||
items := make([]runtime.Object, len(jobs.Items))
|
||||
// Ensure we always have pod annotations to add to
|
||||
for i, v := range jobs.Items {
|
||||
if v.Spec.Template.ObjectMeta.Annotations == nil {
|
||||
if v.Spec.Template.Annotations == nil {
|
||||
annotations := make(map[string]string)
|
||||
jobs.Items[i].Spec.Template.ObjectMeta.Annotations = annotations
|
||||
jobs.Items[i].Spec.Template.Annotations = annotations
|
||||
}
|
||||
items[i] = &jobs.Items[i]
|
||||
}
|
||||
@@ -192,8 +194,8 @@ func GetDaemonSetItems(clients kube.Clients, namespace string) []runtime.Object
|
||||
items := make([]runtime.Object, len(daemonSets.Items))
|
||||
// Ensure we always have pod annotations to add to
|
||||
for i, v := range daemonSets.Items {
|
||||
if v.Spec.Template.ObjectMeta.Annotations == nil {
|
||||
daemonSets.Items[i].Spec.Template.ObjectMeta.Annotations = make(map[string]string)
|
||||
if v.Spec.Template.Annotations == nil {
|
||||
daemonSets.Items[i].Spec.Template.Annotations = make(map[string]string)
|
||||
}
|
||||
items[i] = &daemonSets.Items[i]
|
||||
}
|
||||
@@ -222,8 +224,8 @@ func GetStatefulSetItems(clients kube.Clients, namespace string) []runtime.Objec
|
||||
items := make([]runtime.Object, len(statefulSets.Items))
|
||||
// Ensure we always have pod annotations to add to
|
||||
for i, v := range statefulSets.Items {
|
||||
if v.Spec.Template.ObjectMeta.Annotations == nil {
|
||||
statefulSets.Items[i].Spec.Template.ObjectMeta.Annotations = make(map[string]string)
|
||||
if v.Spec.Template.Annotations == nil {
|
||||
statefulSets.Items[i].Spec.Template.Annotations = make(map[string]string)
|
||||
}
|
||||
items[i] = &statefulSets.Items[i]
|
||||
}
|
||||
@@ -252,8 +254,8 @@ func GetRolloutItems(clients kube.Clients, namespace string) []runtime.Object {
|
||||
items := make([]runtime.Object, len(rollouts.Items))
|
||||
// Ensure we always have pod annotations to add to
|
||||
for i, v := range rollouts.Items {
|
||||
if v.Spec.Template.ObjectMeta.Annotations == nil {
|
||||
rollouts.Items[i].Spec.Template.ObjectMeta.Annotations = make(map[string]string)
|
||||
if v.Spec.Template.Annotations == nil {
|
||||
rollouts.Items[i].Spec.Template.Annotations = make(map[string]string)
|
||||
}
|
||||
items[i] = &rollouts.Items[i]
|
||||
}
|
||||
@@ -263,98 +265,98 @@ func GetRolloutItems(clients kube.Clients, namespace string) []runtime.Object {
|
||||
|
||||
// GetDeploymentAnnotations returns the annotations of given deployment
|
||||
func GetDeploymentAnnotations(item runtime.Object) map[string]string {
|
||||
if item.(*appsv1.Deployment).ObjectMeta.Annotations == nil {
|
||||
item.(*appsv1.Deployment).ObjectMeta.Annotations = make(map[string]string)
|
||||
if item.(*appsv1.Deployment).Annotations == nil {
|
||||
item.(*appsv1.Deployment).Annotations = make(map[string]string)
|
||||
}
|
||||
return item.(*appsv1.Deployment).ObjectMeta.Annotations
|
||||
return item.(*appsv1.Deployment).Annotations
|
||||
}
|
||||
|
||||
// GetCronJobAnnotations returns the annotations of given cronjob
|
||||
func GetCronJobAnnotations(item runtime.Object) map[string]string {
|
||||
if item.(*batchv1.CronJob).ObjectMeta.Annotations == nil {
|
||||
item.(*batchv1.CronJob).ObjectMeta.Annotations = make(map[string]string)
|
||||
if item.(*batchv1.CronJob).Annotations == nil {
|
||||
item.(*batchv1.CronJob).Annotations = make(map[string]string)
|
||||
}
|
||||
return item.(*batchv1.CronJob).ObjectMeta.Annotations
|
||||
return item.(*batchv1.CronJob).Annotations
|
||||
}
|
||||
|
||||
// GetJobAnnotations returns the annotations of given job
|
||||
func GetJobAnnotations(item runtime.Object) map[string]string {
|
||||
if item.(*batchv1.Job).ObjectMeta.Annotations == nil {
|
||||
item.(*batchv1.Job).ObjectMeta.Annotations = make(map[string]string)
|
||||
if item.(*batchv1.Job).Annotations == nil {
|
||||
item.(*batchv1.Job).Annotations = make(map[string]string)
|
||||
}
|
||||
return item.(*batchv1.Job).ObjectMeta.Annotations
|
||||
return item.(*batchv1.Job).Annotations
|
||||
}
|
||||
|
||||
// GetDaemonSetAnnotations returns the annotations of given daemonSet
|
||||
func GetDaemonSetAnnotations(item runtime.Object) map[string]string {
|
||||
if item.(*appsv1.DaemonSet).ObjectMeta.Annotations == nil {
|
||||
item.(*appsv1.DaemonSet).ObjectMeta.Annotations = make(map[string]string)
|
||||
if item.(*appsv1.DaemonSet).Annotations == nil {
|
||||
item.(*appsv1.DaemonSet).Annotations = make(map[string]string)
|
||||
}
|
||||
return item.(*appsv1.DaemonSet).ObjectMeta.Annotations
|
||||
return item.(*appsv1.DaemonSet).Annotations
|
||||
}
|
||||
|
||||
// GetStatefulSetAnnotations returns the annotations of given statefulSet
|
||||
func GetStatefulSetAnnotations(item runtime.Object) map[string]string {
|
||||
if item.(*appsv1.StatefulSet).ObjectMeta.Annotations == nil {
|
||||
item.(*appsv1.StatefulSet).ObjectMeta.Annotations = make(map[string]string)
|
||||
if item.(*appsv1.StatefulSet).Annotations == nil {
|
||||
item.(*appsv1.StatefulSet).Annotations = make(map[string]string)
|
||||
}
|
||||
return item.(*appsv1.StatefulSet).ObjectMeta.Annotations
|
||||
return item.(*appsv1.StatefulSet).Annotations
|
||||
}
|
||||
|
||||
// GetRolloutAnnotations returns the annotations of given rollout
|
||||
func GetRolloutAnnotations(item runtime.Object) map[string]string {
|
||||
if item.(*argorolloutv1alpha1.Rollout).ObjectMeta.Annotations == nil {
|
||||
item.(*argorolloutv1alpha1.Rollout).ObjectMeta.Annotations = make(map[string]string)
|
||||
if item.(*argorolloutv1alpha1.Rollout).Annotations == nil {
|
||||
item.(*argorolloutv1alpha1.Rollout).Annotations = make(map[string]string)
|
||||
}
|
||||
return item.(*argorolloutv1alpha1.Rollout).ObjectMeta.Annotations
|
||||
return item.(*argorolloutv1alpha1.Rollout).Annotations
|
||||
}
|
||||
|
||||
// GetDeploymentPodAnnotations returns the pod's annotations of given deployment
|
||||
func GetDeploymentPodAnnotations(item runtime.Object) map[string]string {
|
||||
if item.(*appsv1.Deployment).Spec.Template.ObjectMeta.Annotations == nil {
|
||||
item.(*appsv1.Deployment).Spec.Template.ObjectMeta.Annotations = make(map[string]string)
|
||||
if item.(*appsv1.Deployment).Spec.Template.Annotations == nil {
|
||||
item.(*appsv1.Deployment).Spec.Template.Annotations = make(map[string]string)
|
||||
}
|
||||
return item.(*appsv1.Deployment).Spec.Template.ObjectMeta.Annotations
|
||||
return item.(*appsv1.Deployment).Spec.Template.Annotations
|
||||
}
|
||||
|
||||
// GetCronJobPodAnnotations returns the pod's annotations of given cronjob
|
||||
func GetCronJobPodAnnotations(item runtime.Object) map[string]string {
|
||||
if item.(*batchv1.CronJob).Spec.JobTemplate.Spec.Template.ObjectMeta.Annotations == nil {
|
||||
item.(*batchv1.CronJob).Spec.JobTemplate.Spec.Template.ObjectMeta.Annotations = make(map[string]string)
|
||||
if item.(*batchv1.CronJob).Spec.JobTemplate.Spec.Template.Annotations == nil {
|
||||
item.(*batchv1.CronJob).Spec.JobTemplate.Spec.Template.Annotations = make(map[string]string)
|
||||
}
|
||||
return item.(*batchv1.CronJob).Spec.JobTemplate.Spec.Template.ObjectMeta.Annotations
|
||||
return item.(*batchv1.CronJob).Spec.JobTemplate.Spec.Template.Annotations
|
||||
}
|
||||
|
||||
// GetJobPodAnnotations returns the pod's annotations of given job
|
||||
func GetJobPodAnnotations(item runtime.Object) map[string]string {
|
||||
if item.(*batchv1.Job).Spec.Template.ObjectMeta.Annotations == nil {
|
||||
item.(*batchv1.Job).Spec.Template.ObjectMeta.Annotations = make(map[string]string)
|
||||
if item.(*batchv1.Job).Spec.Template.Annotations == nil {
|
||||
item.(*batchv1.Job).Spec.Template.Annotations = make(map[string]string)
|
||||
}
|
||||
return item.(*batchv1.Job).Spec.Template.ObjectMeta.Annotations
|
||||
return item.(*batchv1.Job).Spec.Template.Annotations
|
||||
}
|
||||
|
||||
// GetDaemonSetPodAnnotations returns the pod's annotations of given daemonSet
|
||||
func GetDaemonSetPodAnnotations(item runtime.Object) map[string]string {
|
||||
if item.(*appsv1.DaemonSet).Spec.Template.ObjectMeta.Annotations == nil {
|
||||
item.(*appsv1.DaemonSet).Spec.Template.ObjectMeta.Annotations = make(map[string]string)
|
||||
if item.(*appsv1.DaemonSet).Spec.Template.Annotations == nil {
|
||||
item.(*appsv1.DaemonSet).Spec.Template.Annotations = make(map[string]string)
|
||||
}
|
||||
return item.(*appsv1.DaemonSet).Spec.Template.ObjectMeta.Annotations
|
||||
return item.(*appsv1.DaemonSet).Spec.Template.Annotations
|
||||
}
|
||||
|
||||
// GetStatefulSetPodAnnotations returns the pod's annotations of given statefulSet
|
||||
func GetStatefulSetPodAnnotations(item runtime.Object) map[string]string {
|
||||
if item.(*appsv1.StatefulSet).Spec.Template.ObjectMeta.Annotations == nil {
|
||||
item.(*appsv1.StatefulSet).Spec.Template.ObjectMeta.Annotations = make(map[string]string)
|
||||
if item.(*appsv1.StatefulSet).Spec.Template.Annotations == nil {
|
||||
item.(*appsv1.StatefulSet).Spec.Template.Annotations = make(map[string]string)
|
||||
}
|
||||
return item.(*appsv1.StatefulSet).Spec.Template.ObjectMeta.Annotations
|
||||
return item.(*appsv1.StatefulSet).Spec.Template.Annotations
|
||||
}
|
||||
|
||||
// GetRolloutPodAnnotations returns the pod's annotations of given rollout
|
||||
func GetRolloutPodAnnotations(item runtime.Object) map[string]string {
|
||||
if item.(*argorolloutv1alpha1.Rollout).Spec.Template.ObjectMeta.Annotations == nil {
|
||||
item.(*argorolloutv1alpha1.Rollout).Spec.Template.ObjectMeta.Annotations = make(map[string]string)
|
||||
if item.(*argorolloutv1alpha1.Rollout).Spec.Template.Annotations == nil {
|
||||
item.(*argorolloutv1alpha1.Rollout).Spec.Template.Annotations = make(map[string]string)
|
||||
}
|
||||
return item.(*argorolloutv1alpha1.Rollout).Spec.Template.ObjectMeta.Annotations
|
||||
return item.(*argorolloutv1alpha1.Rollout).Spec.Template.Annotations
|
||||
}
|
||||
|
||||
// GetDeploymentContainers returns the containers of given deployment
|
||||
@@ -443,11 +445,21 @@ func PatchDeployment(clients kube.Clients, namespace string, resource runtime.Ob
|
||||
// CreateJobFromCronjob performs rolling upgrade on cronjob
|
||||
func CreateJobFromCronjob(clients kube.Clients, namespace string, resource runtime.Object) error {
|
||||
cronJob := resource.(*batchv1.CronJob)
|
||||
|
||||
annotations := make(map[string]string)
|
||||
annotations["cronjob.kubernetes.io/instantiate"] = "manual"
|
||||
maps.Copy(annotations, cronJob.Spec.JobTemplate.Annotations)
|
||||
|
||||
job := &batchv1.Job{
|
||||
ObjectMeta: cronJob.Spec.JobTemplate.ObjectMeta,
|
||||
Spec: cronJob.Spec.JobTemplate.Spec,
|
||||
ObjectMeta: meta_v1.ObjectMeta{
|
||||
GenerateName: cronJob.Name + "-",
|
||||
Namespace: cronJob.Namespace,
|
||||
Annotations: annotations,
|
||||
Labels: cronJob.Spec.JobTemplate.Labels,
|
||||
OwnerReferences: []meta_v1.OwnerReference{*meta_v1.NewControllerRef(cronJob, batchv1.SchemeGroupVersion.WithKind("CronJob"))},
|
||||
},
|
||||
Spec: cronJob.Spec.JobTemplate.Spec,
|
||||
}
|
||||
job.GenerateName = cronJob.Name + "-"
|
||||
_, err := clients.KubernetesClient.BatchV1().Jobs(namespace).Create(context.TODO(), job, meta_v1.CreateOptions{FieldManager: "Reloader"})
|
||||
return err
|
||||
}
|
||||
@@ -469,9 +481,9 @@ func ReCreateJobFromjob(clients kube.Clients, namespace string, resource runtime
|
||||
}
|
||||
|
||||
// Remove fields that should not be specified when creating a new Job
|
||||
job.ObjectMeta.ResourceVersion = ""
|
||||
job.ObjectMeta.UID = ""
|
||||
job.ObjectMeta.CreationTimestamp = meta_v1.Time{}
|
||||
job.ResourceVersion = ""
|
||||
job.UID = ""
|
||||
job.CreationTimestamp = meta_v1.Time{}
|
||||
job.Status = batchv1.JobStatus{}
|
||||
|
||||
// Remove problematic labels
|
||||
|
||||
@@ -49,7 +49,7 @@ func newTestFixtures() testFixtures {
|
||||
|
||||
func setupTestClients() kube.Clients {
|
||||
return kube.Clients{
|
||||
KubernetesClient: fake.NewSimpleClientset(),
|
||||
KubernetesClient: fake.NewClientset(),
|
||||
ArgoRolloutClient: fakeargoclientset.NewSimpleClientset(),
|
||||
}
|
||||
}
|
||||
@@ -373,19 +373,19 @@ func TestPatchResources(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
patchedResource, err := callbacks.GetDeploymentItem(clients, "test-deployment", fixtures.namespace)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "test", patchedResource.(*appsv1.Deployment).ObjectMeta.Annotations["test"])
|
||||
assert.Equal(t, "test", patchedResource.(*appsv1.Deployment).Annotations["test"])
|
||||
}},
|
||||
{"DaemonSet", createTestDaemonSetWithAnnotations, callbacks.PatchDaemonSet, deleteTestDaemonSet, func(err error) {
|
||||
assert.NoError(t, err)
|
||||
patchedResource, err := callbacks.GetDaemonSetItem(clients, "test-daemonset", fixtures.namespace)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "test", patchedResource.(*appsv1.DaemonSet).ObjectMeta.Annotations["test"])
|
||||
assert.Equal(t, "test", patchedResource.(*appsv1.DaemonSet).Annotations["test"])
|
||||
}},
|
||||
{"StatefulSet", createTestStatefulSetWithAnnotations, callbacks.PatchStatefulSet, deleteTestStatefulSet, func(err error) {
|
||||
assert.NoError(t, err)
|
||||
patchedResource, err := callbacks.GetStatefulSetItem(clients, "test-statefulset", fixtures.namespace)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "test", patchedResource.(*appsv1.StatefulSet).ObjectMeta.Annotations["test"])
|
||||
assert.Equal(t, "test", patchedResource.(*appsv1.StatefulSet).Annotations["test"])
|
||||
}},
|
||||
{"CronJob", createTestCronJobWithAnnotations, callbacks.PatchCronJob, deleteTestCronJob, func(err error) {
|
||||
assert.EqualError(t, err, "not supported patching: CronJob")
|
||||
@@ -415,13 +415,26 @@ func TestPatchResources(t *testing.T) {
|
||||
func TestCreateJobFromCronjob(t *testing.T) {
|
||||
fixtures := newTestFixtures()
|
||||
|
||||
cronJob, err := createTestCronJobWithAnnotations(clients, fixtures.namespace, "1")
|
||||
runtimeObj, err := createTestCronJobWithAnnotations(clients, fixtures.namespace, "1")
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = callbacks.CreateJobFromCronjob(clients, fixtures.namespace, cronJob.(*batchv1.CronJob))
|
||||
cronJob := runtimeObj.(*batchv1.CronJob)
|
||||
err = callbacks.CreateJobFromCronjob(clients, fixtures.namespace, cronJob)
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = deleteTestCronJob(clients, fixtures.namespace, "test-cronjob")
|
||||
jobList, err := clients.KubernetesClient.BatchV1().Jobs(fixtures.namespace).List(context.TODO(), metav1.ListOptions{})
|
||||
assert.NoError(t, err)
|
||||
|
||||
ownerFound := false
|
||||
for _, job := range jobList.Items {
|
||||
if isControllerOwner("CronJob", cronJob.Name, job.OwnerReferences) {
|
||||
ownerFound = true
|
||||
break
|
||||
}
|
||||
}
|
||||
assert.Truef(t, ownerFound, "Missing CronJob owner reference")
|
||||
|
||||
err = deleteTestCronJob(clients, fixtures.namespace, cronJob.Name)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
@@ -608,17 +621,17 @@ func deleteTestStatefulSets(clients kube.Clients, namespace string) error {
|
||||
func createResourceWithPodAnnotations(obj runtime.Object, annotations map[string]string) runtime.Object {
|
||||
switch v := obj.(type) {
|
||||
case *appsv1.Deployment:
|
||||
v.Spec.Template.ObjectMeta.Annotations = annotations
|
||||
v.Spec.Template.Annotations = annotations
|
||||
case *appsv1.DaemonSet:
|
||||
v.Spec.Template.ObjectMeta.Annotations = annotations
|
||||
v.Spec.Template.Annotations = annotations
|
||||
case *appsv1.StatefulSet:
|
||||
v.Spec.Template.ObjectMeta.Annotations = annotations
|
||||
v.Spec.Template.Annotations = annotations
|
||||
case *batchv1.CronJob:
|
||||
v.Spec.JobTemplate.Spec.Template.ObjectMeta.Annotations = annotations
|
||||
v.Spec.JobTemplate.Spec.Template.Annotations = annotations
|
||||
case *batchv1.Job:
|
||||
v.Spec.Template.ObjectMeta.Annotations = annotations
|
||||
v.Spec.Template.Annotations = annotations
|
||||
case *argorolloutv1alpha1.Rollout:
|
||||
v.Spec.Template.ObjectMeta.Annotations = annotations
|
||||
v.Spec.Template.Annotations = annotations
|
||||
}
|
||||
return obj
|
||||
}
|
||||
@@ -749,3 +762,12 @@ func createTestJobWithAnnotations(clients kube.Clients, namespace, version strin
|
||||
func deleteTestJob(clients kube.Clients, namespace, name string) error {
|
||||
return clients.KubernetesClient.BatchV1().Jobs(namespace).Delete(context.TODO(), name, metav1.DeleteOptions{})
|
||||
}
|
||||
|
||||
func isControllerOwner(kind, name string, ownerRefs []metav1.OwnerReference) bool {
|
||||
for _, ownerRef := range ownerRefs {
|
||||
if *ownerRef.Controller && ownerRef.Kind == kind && ownerRef.Name == name {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
_ "net/http/pprof"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
@@ -14,12 +15,12 @@ import (
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
|
||||
"github.com/stakater/Reloader/internal/pkg/controller"
|
||||
"github.com/stakater/Reloader/internal/pkg/metrics"
|
||||
"github.com/stakater/Reloader/internal/pkg/options"
|
||||
"github.com/stakater/Reloader/internal/pkg/util"
|
||||
"github.com/stakater/Reloader/pkg/common"
|
||||
"github.com/stakater/Reloader/pkg/kube"
|
||||
)
|
||||
|
||||
@@ -33,27 +34,7 @@ func NewReloaderCommand() *cobra.Command {
|
||||
}
|
||||
|
||||
// options
|
||||
cmd.PersistentFlags().BoolVar(&options.AutoReloadAll, "auto-reload-all", false, "Auto reload all resources")
|
||||
cmd.PersistentFlags().StringVar(&options.ConfigmapUpdateOnChangeAnnotation, "configmap-annotation", "configmap.reloader.stakater.com/reload", "annotation to detect changes in configmaps, specified by name")
|
||||
cmd.PersistentFlags().StringVar(&options.SecretUpdateOnChangeAnnotation, "secret-annotation", "secret.reloader.stakater.com/reload", "annotation to detect changes in secrets, specified by name")
|
||||
cmd.PersistentFlags().StringVar(&options.ReloaderAutoAnnotation, "auto-annotation", "reloader.stakater.com/auto", "annotation to detect changes in secrets/configmaps")
|
||||
cmd.PersistentFlags().StringVar(&options.ConfigmapReloaderAutoAnnotation, "configmap-auto-annotation", "configmap.reloader.stakater.com/auto", "annotation to detect changes in configmaps")
|
||||
cmd.PersistentFlags().StringVar(&options.SecretReloaderAutoAnnotation, "secret-auto-annotation", "secret.reloader.stakater.com/auto", "annotation to detect changes in secrets")
|
||||
cmd.PersistentFlags().StringVar(&options.AutoSearchAnnotation, "auto-search-annotation", "reloader.stakater.com/search", "annotation to detect changes in configmaps or secrets tagged with special match annotation")
|
||||
cmd.PersistentFlags().StringVar(&options.SearchMatchAnnotation, "search-match-annotation", "reloader.stakater.com/match", "annotation to mark secrets or configmaps to match the search")
|
||||
cmd.PersistentFlags().StringVar(&options.LogFormat, "log-format", "", "Log format to use (empty string for text, or JSON)")
|
||||
cmd.PersistentFlags().StringVar(&options.LogLevel, "log-level", "info", "Log level to use (trace, debug, info, warning, error, fatal and panic)")
|
||||
cmd.PersistentFlags().StringVar(&options.WebhookUrl, "webhook-url", "", "webhook to trigger instead of performing a reload")
|
||||
cmd.PersistentFlags().StringSlice("resources-to-ignore", []string{}, "list of resources to ignore (valid options 'configMaps' or 'secrets')")
|
||||
cmd.PersistentFlags().StringSlice("namespaces-to-ignore", []string{}, "list of namespaces to ignore")
|
||||
cmd.PersistentFlags().StringSlice("namespace-selector", []string{}, "list of key:value labels to filter on for namespaces")
|
||||
cmd.PersistentFlags().StringSlice("resource-label-selector", []string{}, "list of key:value labels to filter on for configmaps and secrets")
|
||||
cmd.PersistentFlags().StringVar(&options.IsArgoRollouts, "is-Argo-Rollouts", "false", "Add support for argo rollouts")
|
||||
cmd.PersistentFlags().StringVar(&options.ReloadStrategy, constants.ReloadStrategyFlag, constants.EnvVarsReloadStrategy, "Specifies the desired reload strategy")
|
||||
cmd.PersistentFlags().StringVar(&options.ReloadOnCreate, "reload-on-create", "false", "Add support to watch create events")
|
||||
cmd.PersistentFlags().StringVar(&options.ReloadOnDelete, "reload-on-delete", "false", "Add support to watch delete events")
|
||||
cmd.PersistentFlags().BoolVar(&options.EnableHA, "enable-ha", false, "Adds support for running multiple replicas via leadership election")
|
||||
cmd.PersistentFlags().BoolVar(&options.SyncAfterRestart, "sync-after-restart", false, "Sync add events after reloader restarts")
|
||||
util.ConfigureReloaderFlags(cmd)
|
||||
|
||||
return cmd
|
||||
}
|
||||
@@ -122,15 +103,18 @@ func getHAEnvs() (string, string) {
|
||||
}
|
||||
|
||||
func startReloader(cmd *cobra.Command, args []string) {
|
||||
common.GetCommandLineOptions()
|
||||
err := configureLogging(options.LogFormat, options.LogLevel)
|
||||
if err != nil {
|
||||
logrus.Warn(err)
|
||||
}
|
||||
|
||||
logrus.Info("Starting Reloader")
|
||||
isGlobal := false
|
||||
currentNamespace := os.Getenv("KUBERNETES_NAMESPACE")
|
||||
if len(currentNamespace) == 0 {
|
||||
currentNamespace = v1.NamespaceAll
|
||||
isGlobal = true
|
||||
logrus.Warnf("KUBERNETES_NAMESPACE is unset, will detect changes in all namespaces.")
|
||||
}
|
||||
|
||||
@@ -140,22 +124,22 @@ func startReloader(cmd *cobra.Command, args []string) {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
|
||||
ignoredResourcesList, err := getIgnoredResourcesList(cmd)
|
||||
ignoredResourcesList, err := util.GetIgnoredResourcesList()
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
|
||||
ignoredNamespacesList, err := getIgnoredNamespacesList(cmd)
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
ignoredNamespacesList := options.NamespacesToIgnore
|
||||
namespaceLabelSelector := ""
|
||||
|
||||
if isGlobal {
|
||||
namespaceLabelSelector, err = common.GetNamespaceLabelSelector(options.NamespaceSelectors)
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
namespaceLabelSelector, err := getNamespaceLabelSelector(cmd)
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
|
||||
resourceLabelSelector, err := getResourceLabelSelector(cmd)
|
||||
resourceLabelSelector, err := common.GetResourceLabelSelector(options.ResourceSelectors)
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
@@ -176,6 +160,10 @@ func startReloader(cmd *cobra.Command, args []string) {
|
||||
|
||||
var controllers []*controller.Controller
|
||||
for k := range kube.ResourceMap {
|
||||
if k == constants.SecretProviderClassController && !shouldRunCSIController() {
|
||||
continue
|
||||
}
|
||||
|
||||
if ignoredResourcesList.Contains(k) || (len(namespaceLabelSelector) == 0 && k == "namespaces") {
|
||||
continue
|
||||
}
|
||||
@@ -207,107 +195,31 @@ func startReloader(cmd *cobra.Command, args []string) {
|
||||
go leadership.RunLeaderElection(lock, ctx, cancel, podName, controllers)
|
||||
}
|
||||
|
||||
common.PublishMetaInfoConfigmap(clientset)
|
||||
|
||||
if options.EnablePProf {
|
||||
go startPProfServer()
|
||||
}
|
||||
|
||||
leadership.SetupLivenessEndpoint()
|
||||
logrus.Fatal(http.ListenAndServe(constants.DefaultHttpListenAddr, nil))
|
||||
}
|
||||
|
||||
func getIgnoredNamespacesList(cmd *cobra.Command) (util.List, error) {
|
||||
return getStringSliceFromFlags(cmd, "namespaces-to-ignore")
|
||||
func startPProfServer() {
|
||||
logrus.Infof("Starting pprof server on %s", options.PProfAddr)
|
||||
if err := http.ListenAndServe(options.PProfAddr, nil); err != nil {
|
||||
logrus.Errorf("Failed to start pprof server: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func getNamespaceLabelSelector(cmd *cobra.Command) (string, error) {
|
||||
slice, err := getStringSliceFromFlags(cmd, "namespace-selector")
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
func shouldRunCSIController() bool {
|
||||
if !options.EnableCSIIntegration {
|
||||
logrus.Info("Skipping secretproviderclasspodstatuses controller: EnableCSIIntegration is disabled")
|
||||
return false
|
||||
}
|
||||
|
||||
for i, kv := range slice {
|
||||
// Legacy support for ":" as a delimiter and "*" for wildcard.
|
||||
if strings.Contains(kv, ":") {
|
||||
split := strings.Split(kv, ":")
|
||||
if split[1] == "*" {
|
||||
slice[i] = split[0]
|
||||
} else {
|
||||
slice[i] = split[0] + "=" + split[1]
|
||||
}
|
||||
}
|
||||
// Convert wildcard to valid apimachinery operator
|
||||
if strings.Contains(kv, "=") {
|
||||
split := strings.Split(kv, "=")
|
||||
if split[1] == "*" {
|
||||
slice[i] = split[0]
|
||||
}
|
||||
}
|
||||
if !kube.IsCSIInstalled {
|
||||
logrus.Info("Skipping secretproviderclasspodstatuses controller: CSI CRDs not installed")
|
||||
return false
|
||||
}
|
||||
|
||||
namespaceLabelSelector := strings.Join(slice[:], ",")
|
||||
_, err = labels.Parse(namespaceLabelSelector)
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
|
||||
return namespaceLabelSelector, nil
|
||||
}
|
||||
|
||||
func getResourceLabelSelector(cmd *cobra.Command) (string, error) {
|
||||
slice, err := getStringSliceFromFlags(cmd, "resource-label-selector")
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
|
||||
for i, kv := range slice {
|
||||
// Legacy support for ":" as a delimiter and "*" for wildcard.
|
||||
if strings.Contains(kv, ":") {
|
||||
split := strings.Split(kv, ":")
|
||||
if split[1] == "*" {
|
||||
slice[i] = split[0]
|
||||
} else {
|
||||
slice[i] = split[0] + "=" + split[1]
|
||||
}
|
||||
}
|
||||
// Convert wildcard to valid apimachinery operator
|
||||
if strings.Contains(kv, "=") {
|
||||
split := strings.Split(kv, "=")
|
||||
if split[1] == "*" {
|
||||
slice[i] = split[0]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
resourceLabelSelector := strings.Join(slice[:], ",")
|
||||
_, err = labels.Parse(resourceLabelSelector)
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
|
||||
return resourceLabelSelector, nil
|
||||
}
|
||||
|
||||
func getStringSliceFromFlags(cmd *cobra.Command, flag string) ([]string, error) {
|
||||
slice, err := cmd.Flags().GetStringSlice(flag)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return slice, nil
|
||||
}
|
||||
|
||||
func getIgnoredResourcesList(cmd *cobra.Command) (util.List, error) {
|
||||
|
||||
ignoredResourcesList, err := getStringSliceFromFlags(cmd, "resources-to-ignore")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, v := range ignoredResourcesList {
|
||||
if v != "configMaps" && v != "secrets" {
|
||||
return nil, fmt.Errorf("'resources-to-ignore' only accepts 'configMaps' or 'secrets', not '%s'", v)
|
||||
}
|
||||
}
|
||||
|
||||
if len(ignoredResourcesList) > 1 {
|
||||
return nil, errors.New("'resources-to-ignore' only accepts 'configMaps' or 'secrets', not both")
|
||||
}
|
||||
|
||||
return ignoredResourcesList, nil
|
||||
return true
|
||||
}
|
||||
|
||||
@@ -8,6 +8,8 @@ const (
|
||||
ConfigmapEnvVarPostfix = "CONFIGMAP"
|
||||
// SecretEnvVarPostfix is a postfix for secret envVar
|
||||
SecretEnvVarPostfix = "SECRET"
|
||||
// SecretProviderClassEnvVarPostfix is a postfix for secretproviderclasspodstatus envVar
|
||||
SecretProviderClassEnvVarPostfix = "SECRETPROVIDERCLASS"
|
||||
// EnvVarPrefix is a Prefix for environment variable
|
||||
EnvVarPrefix = "STAKATER_"
|
||||
|
||||
@@ -22,6 +24,8 @@ const (
|
||||
EnvVarsReloadStrategy = "env-vars"
|
||||
// AnnotationsReloadStrategy instructs Reloader to add pod template annotations to facilitate a restart
|
||||
AnnotationsReloadStrategy = "annotations"
|
||||
// SecretProviderClassController enables support for SecretProviderClassPodStatus resources
|
||||
SecretProviderClassController = "secretproviderclasspodstatuses"
|
||||
)
|
||||
|
||||
// Leadership election related consts
|
||||
|
||||
@@ -2,9 +2,11 @@ package controller
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"slices"
|
||||
"time"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/stakater/Reloader/internal/pkg/constants"
|
||||
"github.com/stakater/Reloader/internal/pkg/handler"
|
||||
"github.com/stakater/Reloader/internal/pkg/metrics"
|
||||
"github.com/stakater/Reloader/internal/pkg/options"
|
||||
@@ -21,7 +23,7 @@ import (
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
"k8s.io/kubectl/pkg/scheme"
|
||||
"k8s.io/utils/strings/slices"
|
||||
csiv1 "sigs.k8s.io/secrets-store-csi-driver/apis/v1"
|
||||
)
|
||||
|
||||
// Controller for checking events
|
||||
@@ -79,7 +81,12 @@ func NewController(
|
||||
}
|
||||
}
|
||||
|
||||
listWatcher := cache.NewFilteredListWatchFromClient(client.CoreV1().RESTClient(), resource, namespace, optionsModifier)
|
||||
getterRESTClient, err := getClientForResource(resource, client)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to initialize REST client for %s: %w", resource, err)
|
||||
}
|
||||
|
||||
listWatcher := cache.NewFilteredListWatchFromClient(getterRESTClient, resource, namespace, optionsModifier)
|
||||
|
||||
_, informer := cache.NewInformerWithOptions(cache.InformerOptions{
|
||||
ListerWatcher: listWatcher,
|
||||
@@ -103,30 +110,38 @@ func NewController(
|
||||
|
||||
// Add function to add a new object to the queue in case of creating a resource
|
||||
func (c *Controller) Add(obj interface{}) {
|
||||
c.collectors.RecordEventReceived("add", c.resource)
|
||||
|
||||
switch object := obj.(type) {
|
||||
case *v1.Namespace:
|
||||
c.addSelectedNamespaceToCache(*object)
|
||||
return
|
||||
case *csiv1.SecretProviderClassPodStatus:
|
||||
return
|
||||
}
|
||||
|
||||
if options.ReloadOnCreate == "true" {
|
||||
if !c.resourceInIgnoredNamespace(obj) && c.resourceInSelectedNamespaces(obj) && secretControllerInitialized && configmapControllerInitialized {
|
||||
c.queue.Add(handler.ResourceCreatedHandler{
|
||||
Resource: obj,
|
||||
Collectors: c.collectors,
|
||||
Recorder: c.recorder,
|
||||
c.enqueue(handler.ResourceCreatedHandler{
|
||||
Resource: obj,
|
||||
Collectors: c.collectors,
|
||||
Recorder: c.recorder,
|
||||
EnqueueTime: time.Now(),
|
||||
})
|
||||
} else {
|
||||
c.collectors.RecordSkipped("ignored_or_not_selected")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Controller) resourceInIgnoredNamespace(raw interface{}) bool {
|
||||
switch object := raw.(type) {
|
||||
switch obj := raw.(type) {
|
||||
case *v1.ConfigMap:
|
||||
return c.ignoredNamespaces.Contains(object.ObjectMeta.Namespace)
|
||||
return c.ignoredNamespaces.Contains(obj.Namespace)
|
||||
case *v1.Secret:
|
||||
return c.ignoredNamespaces.Contains(object.ObjectMeta.Namespace)
|
||||
return c.ignoredNamespaces.Contains(obj.Namespace)
|
||||
case *csiv1.SecretProviderClassPodStatus:
|
||||
return c.ignoredNamespaces.Contains(obj.Namespace)
|
||||
}
|
||||
return false
|
||||
}
|
||||
@@ -145,6 +160,10 @@ func (c *Controller) resourceInSelectedNamespaces(raw interface{}) bool {
|
||||
if slices.Contains(selectedNamespacesCache, object.GetNamespace()) {
|
||||
return true
|
||||
}
|
||||
case *csiv1.SecretProviderClassPodStatus:
|
||||
if slices.Contains(selectedNamespacesCache, object.GetNamespace()) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
@@ -166,31 +185,44 @@ func (c *Controller) removeSelectedNamespaceFromCache(namespace v1.Namespace) {
|
||||
|
||||
// Update function to add an old object and a new object to the queue in case of updating a resource
|
||||
func (c *Controller) Update(old interface{}, new interface{}) {
|
||||
c.collectors.RecordEventReceived("update", c.resource)
|
||||
|
||||
switch new.(type) {
|
||||
case *v1.Namespace:
|
||||
return
|
||||
}
|
||||
|
||||
if !c.resourceInIgnoredNamespace(new) && c.resourceInSelectedNamespaces(new) {
|
||||
c.queue.Add(handler.ResourceUpdatedHandler{
|
||||
c.enqueue(handler.ResourceUpdatedHandler{
|
||||
Resource: new,
|
||||
OldResource: old,
|
||||
Collectors: c.collectors,
|
||||
Recorder: c.recorder,
|
||||
EnqueueTime: time.Now(),
|
||||
})
|
||||
} else {
|
||||
c.collectors.RecordSkipped("ignored_or_not_selected")
|
||||
}
|
||||
}
|
||||
|
||||
// Delete function to add an object to the queue in case of deleting a resource
|
||||
func (c *Controller) Delete(old interface{}) {
|
||||
c.collectors.RecordEventReceived("delete", c.resource)
|
||||
|
||||
if _, ok := old.(*csiv1.SecretProviderClassPodStatus); ok {
|
||||
return
|
||||
}
|
||||
|
||||
if options.ReloadOnDelete == "true" {
|
||||
if !c.resourceInIgnoredNamespace(old) && c.resourceInSelectedNamespaces(old) && secretControllerInitialized && configmapControllerInitialized {
|
||||
c.queue.Add(handler.ResourceDeleteHandler{
|
||||
Resource: old,
|
||||
Collectors: c.collectors,
|
||||
Recorder: c.recorder,
|
||||
c.enqueue(handler.ResourceDeleteHandler{
|
||||
Resource: old,
|
||||
Collectors: c.collectors,
|
||||
Recorder: c.recorder,
|
||||
EnqueueTime: time.Now(),
|
||||
})
|
||||
} else {
|
||||
c.collectors.RecordSkipped("ignored_or_not_selected")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -201,6 +233,13 @@ func (c *Controller) Delete(old interface{}) {
|
||||
}
|
||||
}
|
||||
|
||||
// enqueue adds an item to the queue and records metrics
|
||||
func (c *Controller) enqueue(item interface{}) {
|
||||
c.queue.Add(item)
|
||||
c.collectors.RecordQueueAdd()
|
||||
c.collectors.SetQueueDepth(c.queue.Len())
|
||||
}
|
||||
|
||||
// Run function for controller which handles the queue
|
||||
func (c *Controller) Run(threadiness int, stopCh chan struct{}) {
|
||||
defer runtime.HandleCrash()
|
||||
@@ -212,7 +251,7 @@ func (c *Controller) Run(threadiness int, stopCh chan struct{}) {
|
||||
|
||||
// Wait for all involved caches to be synced, before processing items from the queue is started
|
||||
if !cache.WaitForCacheSync(stopCh, c.informer.HasSynced) {
|
||||
runtime.HandleError(fmt.Errorf("Timed out waiting for caches to sync"))
|
||||
runtime.HandleError(fmt.Errorf("timed out waiting for caches to sync"))
|
||||
return
|
||||
}
|
||||
|
||||
@@ -226,9 +265,9 @@ func (c *Controller) Run(threadiness int, stopCh chan struct{}) {
|
||||
|
||||
func (c *Controller) runWorker() {
|
||||
// At this point the controller is fully initialized and we can start processing the resources
|
||||
if c.resource == "secrets" {
|
||||
if c.resource == string(v1.ResourceSecrets) {
|
||||
secretControllerInitialized = true
|
||||
} else if c.resource == "configMaps" {
|
||||
} else if c.resource == string(v1.ResourceConfigMaps) {
|
||||
configmapControllerInitialized = true
|
||||
}
|
||||
|
||||
@@ -242,13 +281,34 @@ func (c *Controller) processNextItem() bool {
|
||||
if quit {
|
||||
return false
|
||||
}
|
||||
|
||||
c.collectors.SetQueueDepth(c.queue.Len())
|
||||
|
||||
// Tell the queue that we are done with processing this key. This unblocks the key for other workers
|
||||
// This allows safe parallel processing because two events with the same key are never processed in
|
||||
// parallel.
|
||||
defer c.queue.Done(resourceHandler)
|
||||
|
||||
// Record queue latency if the handler supports it
|
||||
if h, ok := resourceHandler.(handler.TimedHandler); ok {
|
||||
queueLatency := time.Since(h.GetEnqueueTime())
|
||||
c.collectors.RecordQueueLatency(queueLatency)
|
||||
}
|
||||
|
||||
// Track reconcile/handler duration
|
||||
startTime := time.Now()
|
||||
|
||||
// Invoke the method containing the business logic
|
||||
err := resourceHandler.(handler.ResourceHandler).Handle()
|
||||
|
||||
duration := time.Since(startTime)
|
||||
|
||||
if err != nil {
|
||||
c.collectors.RecordReconcile("error", duration)
|
||||
} else {
|
||||
c.collectors.RecordReconcile("success", duration)
|
||||
}
|
||||
|
||||
// Handle the error if something went wrong during the execution of the business logic
|
||||
c.handleErr(err, resourceHandler)
|
||||
return true
|
||||
@@ -261,16 +321,26 @@ func (c *Controller) handleErr(err error, key interface{}) {
|
||||
// This ensures that future processing of updates for this key is not delayed because of
|
||||
// an outdated error history.
|
||||
c.queue.Forget(key)
|
||||
|
||||
// Record successful event processing
|
||||
c.collectors.RecordEventProcessed("unknown", c.resource, "success")
|
||||
return
|
||||
}
|
||||
|
||||
// Record error
|
||||
c.collectors.RecordError("handler_error")
|
||||
|
||||
// This controller retries 5 times if something goes wrong. After that, it stops trying.
|
||||
if c.queue.NumRequeues(key) < 5 {
|
||||
logrus.Errorf("Error syncing events: %v", err)
|
||||
|
||||
// Record retry
|
||||
c.collectors.RecordRetry()
|
||||
|
||||
// Re-enqueue the key rate limited. Based on the rate limiter on the
|
||||
// queue and the re-enqueue history, the key will be processed later again.
|
||||
c.queue.AddRateLimited(key)
|
||||
c.collectors.SetQueueDepth(c.queue.Len())
|
||||
return
|
||||
}
|
||||
|
||||
@@ -279,4 +349,17 @@ func (c *Controller) handleErr(err error, key interface{}) {
|
||||
runtime.HandleError(err)
|
||||
logrus.Errorf("Dropping key out of the queue: %v", err)
|
||||
logrus.Debugf("Dropping the key %q out of the queue: %v", key, err)
|
||||
|
||||
c.collectors.RecordEventProcessed("unknown", c.resource, "dropped")
|
||||
}
|
||||
|
||||
func getClientForResource(resource string, coreClient kubernetes.Interface) (cache.Getter, error) {
|
||||
if resource == constants.SecretProviderClassController {
|
||||
csiClient, err := kube.GetCSIClient()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get CSI client: %w", err)
|
||||
}
|
||||
return csiClient.SecretsstoreV1().RESTClient(), nil
|
||||
}
|
||||
return coreClient.CoreV1().RESTClient(), nil
|
||||
}
|
||||
|
||||
@@ -15,6 +15,7 @@ import (
|
||||
"github.com/stakater/Reloader/internal/pkg/options"
|
||||
"github.com/stakater/Reloader/internal/pkg/testutil"
|
||||
"github.com/stakater/Reloader/internal/pkg/util"
|
||||
"github.com/stakater/Reloader/pkg/common"
|
||||
"github.com/stakater/Reloader/pkg/kube"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@@ -25,14 +26,15 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
clients = kube.GetClients()
|
||||
namespace = "test-reloader-" + testutil.RandSeq(5)
|
||||
configmapNamePrefix = "testconfigmap-reloader"
|
||||
secretNamePrefix = "testsecret-reloader"
|
||||
data = "dGVzdFNlY3JldEVuY29kaW5nRm9yUmVsb2FkZXI="
|
||||
newData = "dGVzdE5ld1NlY3JldEVuY29kaW5nRm9yUmVsb2FkZXI="
|
||||
updatedData = "dGVzdFVwZGF0ZWRTZWNyZXRFbmNvZGluZ0ZvclJlbG9hZGVy"
|
||||
collectors = metrics.NewCollectors()
|
||||
clients = kube.GetClients()
|
||||
namespace = "test-reloader-" + testutil.RandSeq(5)
|
||||
configmapNamePrefix = "testconfigmap-reloader"
|
||||
secretNamePrefix = "testsecret-reloader"
|
||||
secretProviderClassPodStatusPrefix = "testsecretproviderclasspodstatus-reloader"
|
||||
data = "dGVzdFNlY3JldEVuY29kaW5nRm9yUmVsb2FkZXI="
|
||||
newData = "dGVzdE5ld1NlY3JldEVuY29kaW5nRm9yUmVsb2FkZXI="
|
||||
updatedData = "dGVzdFVwZGF0ZWRTZWNyZXRFbmNvZGluZ0ZvclJlbG9hZGVy"
|
||||
collectors = metrics.NewCollectors()
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -45,6 +47,10 @@ func TestMain(m *testing.M) {
|
||||
|
||||
logrus.Infof("Creating controller")
|
||||
for k := range kube.ResourceMap {
|
||||
// Don't create controller if CSI provider is not installed
|
||||
if k == "secretproviderclasspodstatuses" && !kube.IsCSIInstalled {
|
||||
continue
|
||||
}
|
||||
if k == "namespaces" {
|
||||
continue
|
||||
}
|
||||
@@ -94,7 +100,7 @@ func TestControllerUpdatingConfigmapShouldCreatePodAnnotationInDeployment(t *tes
|
||||
// Verifying deployment update
|
||||
logrus.Infof("Verifying pod annotation has been created")
|
||||
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapName, "www.stakater.com")
|
||||
config := util.Config{
|
||||
config := common.Config{
|
||||
Namespace: namespace,
|
||||
ResourceName: configmapName,
|
||||
SHAValue: shaData,
|
||||
@@ -147,7 +153,7 @@ func TestControllerUpdatingConfigmapShouldAutoCreatePodAnnotationInDeployment(t
|
||||
// Verifying deployment update
|
||||
logrus.Infof("Verifying pod annotation has been created")
|
||||
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapName, "www.stakater.com")
|
||||
config := util.Config{
|
||||
config := common.Config{
|
||||
Namespace: namespace,
|
||||
ResourceName: configmapName,
|
||||
SHAValue: shaData,
|
||||
@@ -212,7 +218,7 @@ func TestControllerCreatingConfigmapShouldCreatePodAnnotationInDeployment(t *tes
|
||||
// Verifying deployment update
|
||||
logrus.Infof("Verifying pod annotation has been created")
|
||||
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapName, "www.stakater.com")
|
||||
config := util.Config{
|
||||
config := common.Config{
|
||||
Namespace: namespace,
|
||||
ResourceName: configmapName,
|
||||
SHAValue: shaData,
|
||||
@@ -271,7 +277,7 @@ func TestControllerForUpdatingConfigmapShouldUpdateDeploymentUsingArs(t *testing
|
||||
// Verifying deployment update
|
||||
logrus.Infof("Verifying pod annotation has been updated")
|
||||
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapName, "aurorasolutions.io")
|
||||
config := util.Config{
|
||||
config := common.Config{
|
||||
Namespace: namespace,
|
||||
ResourceName: configmapName,
|
||||
SHAValue: shaData,
|
||||
@@ -326,7 +332,7 @@ func TestControllerUpdatingConfigmapLabelsShouldNotCreateOrCreatePodAnnotationIn
|
||||
// Verifying deployment update
|
||||
logrus.Infof("Verifying pod annotation has been created")
|
||||
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapName, "www.google.com")
|
||||
config := util.Config{
|
||||
config := common.Config{
|
||||
Namespace: namespace,
|
||||
ResourceName: configmapName,
|
||||
SHAValue: shaData,
|
||||
@@ -390,7 +396,7 @@ func TestControllerCreatingSecretShouldCreatePodAnnotationInDeployment(t *testin
|
||||
// Verifying Upgrade
|
||||
logrus.Infof("Verifying pod annotation has been created")
|
||||
shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretName, newData)
|
||||
config := util.Config{
|
||||
config := common.Config{
|
||||
Namespace: namespace,
|
||||
ResourceName: secretName,
|
||||
SHAValue: shaData,
|
||||
@@ -443,7 +449,7 @@ func TestControllerUpdatingSecretShouldCreatePodAnnotationInDeployment(t *testin
|
||||
// Verifying Upgrade
|
||||
logrus.Infof("Verifying pod annotation has been created")
|
||||
shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretName, newData)
|
||||
config := util.Config{
|
||||
config := common.Config{
|
||||
Namespace: namespace,
|
||||
ResourceName: secretName,
|
||||
SHAValue: shaData,
|
||||
@@ -501,7 +507,7 @@ func TestControllerUpdatingSecretShouldUpdatePodAnnotationInDeployment(t *testin
|
||||
// Verifying Upgrade
|
||||
logrus.Infof("Verifying pod annotation has been updated")
|
||||
shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretName, updatedData)
|
||||
config := util.Config{
|
||||
config := common.Config{
|
||||
Namespace: namespace,
|
||||
ResourceName: secretName,
|
||||
SHAValue: shaData,
|
||||
@@ -552,7 +558,7 @@ func TestControllerUpdatingSecretLabelsShouldNotCreateOrUpdatePodAnnotationInDep
|
||||
// Verifying Upgrade
|
||||
logrus.Infof("Verifying pod annotation has been created")
|
||||
shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretName, data)
|
||||
config := util.Config{
|
||||
config := common.Config{
|
||||
Namespace: namespace,
|
||||
ResourceName: secretName,
|
||||
SHAValue: shaData,
|
||||
@@ -578,6 +584,217 @@ func TestControllerUpdatingSecretLabelsShouldNotCreateOrUpdatePodAnnotationInDep
|
||||
time.Sleep(sleepDuration)
|
||||
}
|
||||
|
||||
// Perform rolling upgrade on deployment and create pod annotation var upon updating the secretclassproviderpodstatus
|
||||
func TestControllerUpdatingSecretProviderClassPodStatusShouldCreatePodAnnotationInDeployment(t *testing.T) {
|
||||
options.ReloadStrategy = constants.AnnotationsReloadStrategy
|
||||
|
||||
if !kube.IsCSIInstalled {
|
||||
return
|
||||
}
|
||||
|
||||
// Creating secretproviderclass
|
||||
secretproviderclasspodstatusName := secretProviderClassPodStatusPrefix + "-update-" + testutil.RandSeq(5)
|
||||
_, err := testutil.CreateSecretProviderClass(clients.CSIClient, namespace, secretproviderclasspodstatusName, data)
|
||||
if err != nil {
|
||||
t.Errorf("Error while creating the secretproviderclass %v", err)
|
||||
}
|
||||
|
||||
// Creating secretproviderclasspodstatus
|
||||
spcpsClient, err := testutil.CreateSecretProviderClassPodStatus(clients.CSIClient, namespace, secretproviderclasspodstatusName, data)
|
||||
if err != nil {
|
||||
t.Errorf("Error while creating the secretclasssproviderpodstatus %v", err)
|
||||
}
|
||||
|
||||
// Creating deployment
|
||||
_, err = testutil.CreateDeployment(clients.KubernetesClient, secretproviderclasspodstatusName, namespace, true)
|
||||
if err != nil {
|
||||
t.Errorf("Error in deployment creation: %v", err)
|
||||
}
|
||||
|
||||
// Updating secretproviderclasspodstatus for first time
|
||||
updateErr := testutil.UpdateSecretProviderClassPodStatus(spcpsClient, namespace, secretproviderclasspodstatusName, "", newData)
|
||||
if updateErr != nil {
|
||||
t.Errorf("Secretproviderclasspodstatus was not updated")
|
||||
}
|
||||
|
||||
// Verifying deployment update
|
||||
logrus.Infof("Verifying pod annotation has been created")
|
||||
shaData := testutil.ConvertResourceToSHA(testutil.SecretProviderClassPodStatusResourceType, namespace, secretproviderclasspodstatusName, newData)
|
||||
config := common.Config{
|
||||
Namespace: namespace,
|
||||
ResourceName: secretproviderclasspodstatusName,
|
||||
SHAValue: shaData,
|
||||
Annotation: options.SecretProviderClassUpdateOnChangeAnnotation,
|
||||
}
|
||||
deploymentFuncs := handler.GetDeploymentRollingUpgradeFuncs()
|
||||
updated := testutil.VerifyResourceAnnotationUpdate(clients, config, deploymentFuncs)
|
||||
if !updated {
|
||||
t.Errorf("Deployment was not updated")
|
||||
}
|
||||
time.Sleep(sleepDuration)
|
||||
|
||||
// Deleting deployment
|
||||
err = testutil.DeleteDeployment(clients.KubernetesClient, namespace, secretproviderclasspodstatusName)
|
||||
if err != nil {
|
||||
logrus.Errorf("Error while deleting the deployment %v", err)
|
||||
}
|
||||
|
||||
// Deleting secretproviderclass
|
||||
err = testutil.DeleteSecretProviderClass(clients.CSIClient, namespace, secretproviderclasspodstatusName)
|
||||
if err != nil {
|
||||
logrus.Errorf("Error while deleting the secretproviderclass %v", err)
|
||||
}
|
||||
|
||||
// Deleting secretproviderclasspodstatus
|
||||
err = testutil.DeleteSecretProviderClassPodStatus(clients.CSIClient, namespace, secretproviderclasspodstatusName)
|
||||
if err != nil {
|
||||
logrus.Errorf("Error while deleting the secretproviderclasspodstatus %v", err)
|
||||
}
|
||||
time.Sleep(sleepDuration)
|
||||
}
|
||||
|
||||
// Perform rolling upgrade on deployment and update pod annotation var upon updating the secretproviderclasspodstatus
|
||||
func TestControllerUpdatingSecretProviderClassPodStatusShouldUpdatePodAnnotationInDeployment(t *testing.T) {
|
||||
options.ReloadStrategy = constants.AnnotationsReloadStrategy
|
||||
|
||||
if !kube.IsCSIInstalled {
|
||||
return
|
||||
}
|
||||
|
||||
// Creating secretproviderclass
|
||||
secretproviderclasspodstatusName := secretProviderClassPodStatusPrefix + "-update-" + testutil.RandSeq(5)
|
||||
_, err := testutil.CreateSecretProviderClass(clients.CSIClient, namespace, secretproviderclasspodstatusName, data)
|
||||
if err != nil {
|
||||
t.Errorf("Error while creating the secretproviderclass %v", err)
|
||||
}
|
||||
|
||||
// Creating secretproviderclasspodstatus
|
||||
spcpsClient, err := testutil.CreateSecretProviderClassPodStatus(clients.CSIClient, namespace, secretproviderclasspodstatusName, data)
|
||||
if err != nil {
|
||||
t.Errorf("Error while creating the secretclasssproviderpodstatus %v", err)
|
||||
}
|
||||
|
||||
// Creating deployment
|
||||
_, err = testutil.CreateDeployment(clients.KubernetesClient, secretproviderclasspodstatusName, namespace, true)
|
||||
if err != nil {
|
||||
t.Errorf("Error in deployment creation: %v", err)
|
||||
}
|
||||
|
||||
// Updating Secret
|
||||
err = testutil.UpdateSecretProviderClassPodStatus(spcpsClient, namespace, secretproviderclasspodstatusName, "", newData)
|
||||
if err != nil {
|
||||
t.Errorf("Error while updating secretproviderclasspodstatus %v", err)
|
||||
}
|
||||
|
||||
// Updating Secret
|
||||
err = testutil.UpdateSecretProviderClassPodStatus(spcpsClient, namespace, secretproviderclasspodstatusName, "", updatedData)
|
||||
if err != nil {
|
||||
t.Errorf("Error while updating secretproviderclasspodstatus %v", err)
|
||||
}
|
||||
|
||||
// Verifying Upgrade
|
||||
logrus.Infof("Verifying pod annotation has been updated")
|
||||
shaData := testutil.ConvertResourceToSHA(testutil.SecretProviderClassPodStatusResourceType, namespace, secretproviderclasspodstatusName, updatedData)
|
||||
config := common.Config{
|
||||
Namespace: namespace,
|
||||
ResourceName: secretproviderclasspodstatusName,
|
||||
SHAValue: shaData,
|
||||
Annotation: options.SecretProviderClassUpdateOnChangeAnnotation,
|
||||
}
|
||||
deploymentFuncs := handler.GetDeploymentRollingUpgradeFuncs()
|
||||
updated := testutil.VerifyResourceAnnotationUpdate(clients, config, deploymentFuncs)
|
||||
if !updated {
|
||||
t.Errorf("Deployment was not updated")
|
||||
}
|
||||
|
||||
// Deleting Deployment
|
||||
err = testutil.DeleteDeployment(clients.KubernetesClient, namespace, secretproviderclasspodstatusName)
|
||||
if err != nil {
|
||||
logrus.Errorf("Error while deleting the deployment %v", err)
|
||||
}
|
||||
|
||||
// Deleting secretproviderclass
|
||||
err = testutil.DeleteSecretProviderClass(clients.CSIClient, namespace, secretproviderclasspodstatusName)
|
||||
if err != nil {
|
||||
logrus.Errorf("Error while deleting the secretproviderclass %v", err)
|
||||
}
|
||||
|
||||
// Deleting secretproviderclasspodstatus
|
||||
err = testutil.DeleteSecretProviderClassPodStatus(clients.CSIClient, namespace, secretproviderclasspodstatusName)
|
||||
if err != nil {
|
||||
logrus.Errorf("Error while deleting the secretproviderclasspodstatus %v", err)
|
||||
}
|
||||
time.Sleep(sleepDuration)
|
||||
|
||||
}
|
||||
|
||||
// Do not Perform rolling upgrade on pod and create or update a pod annotation upon updating the label in secretproviderclasspodstatus
|
||||
func TestControllerUpdatingSecretProviderClassPodStatusWithSameDataShouldNotCreateOrUpdatePodAnnotationInDeployment(t *testing.T) {
|
||||
options.ReloadStrategy = constants.AnnotationsReloadStrategy
|
||||
|
||||
if !kube.IsCSIInstalled {
|
||||
return
|
||||
}
|
||||
|
||||
// Creating secretproviderclass
|
||||
secretproviderclasspodstatusName := secretProviderClassPodStatusPrefix + "-update-" + testutil.RandSeq(5)
|
||||
_, err := testutil.CreateSecretProviderClass(clients.CSIClient, namespace, secretproviderclasspodstatusName, data)
|
||||
if err != nil {
|
||||
t.Errorf("Error while creating the secretproviderclass %v", err)
|
||||
}
|
||||
|
||||
// Creating secretproviderclasspodstatus
|
||||
spcpsClient, err := testutil.CreateSecretProviderClassPodStatus(clients.CSIClient, namespace, secretproviderclasspodstatusName, data)
|
||||
if err != nil {
|
||||
t.Errorf("Error while creating the secretclasssproviderpodstatus %v", err)
|
||||
}
|
||||
|
||||
// Creating deployment
|
||||
_, err = testutil.CreateDeployment(clients.KubernetesClient, secretproviderclasspodstatusName, namespace, true)
|
||||
if err != nil {
|
||||
t.Errorf("Error in deployment creation: %v", err)
|
||||
}
|
||||
|
||||
err = testutil.UpdateSecretProviderClassPodStatus(spcpsClient, namespace, secretproviderclasspodstatusName, "", data)
|
||||
if err != nil {
|
||||
t.Errorf("Error while updating secretproviderclasspodstatus %v", err)
|
||||
}
|
||||
|
||||
// Verifying Upgrade
|
||||
logrus.Infof("Verifying pod annotation has been created")
|
||||
shaData := testutil.ConvertResourceToSHA(testutil.SecretProviderClassPodStatusResourceType, namespace, secretproviderclasspodstatusName, data)
|
||||
config := common.Config{
|
||||
Namespace: namespace,
|
||||
ResourceName: secretproviderclasspodstatusName,
|
||||
SHAValue: shaData,
|
||||
Annotation: options.SecretProviderClassUpdateOnChangeAnnotation,
|
||||
}
|
||||
deploymentFuncs := handler.GetDeploymentRollingUpgradeFuncs()
|
||||
updated := testutil.VerifyResourceAnnotationUpdate(clients, config, deploymentFuncs)
|
||||
if updated {
|
||||
t.Errorf("Deployment should not be updated by changing in secretproviderclasspodstatus")
|
||||
}
|
||||
|
||||
// Deleting Deployment
|
||||
err = testutil.DeleteDeployment(clients.KubernetesClient, namespace, secretproviderclasspodstatusName)
|
||||
if err != nil {
|
||||
logrus.Errorf("Error while deleting the deployment %v", err)
|
||||
}
|
||||
|
||||
// Deleting secretproviderclass
|
||||
err = testutil.DeleteSecretProviderClass(clients.CSIClient, namespace, secretproviderclasspodstatusName)
|
||||
if err != nil {
|
||||
logrus.Errorf("Error while deleting the secretproviderclass %v", err)
|
||||
}
|
||||
|
||||
// Deleting secretproviderclasspodstatus
|
||||
err = testutil.DeleteSecretProviderClassPodStatus(clients.CSIClient, namespace, secretproviderclasspodstatusName)
|
||||
if err != nil {
|
||||
logrus.Errorf("Error while deleting the secretproviderclasspodstatus %v", err)
|
||||
}
|
||||
time.Sleep(sleepDuration)
|
||||
}
|
||||
|
||||
// Perform rolling upgrade on DaemonSet and create pod annotation var upon updating the configmap
|
||||
func TestControllerUpdatingConfigmapShouldCreatePodAnnotationInDaemonSet(t *testing.T) {
|
||||
options.ReloadStrategy = constants.AnnotationsReloadStrategy
|
||||
@@ -604,7 +821,7 @@ func TestControllerUpdatingConfigmapShouldCreatePodAnnotationInDaemonSet(t *test
|
||||
// Verifying DaemonSet update
|
||||
logrus.Infof("Verifying pod annotation has been created")
|
||||
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapName, "www.stakater.com")
|
||||
config := util.Config{
|
||||
config := common.Config{
|
||||
Namespace: namespace,
|
||||
ResourceName: configmapName,
|
||||
SHAValue: shaData,
|
||||
@@ -667,7 +884,7 @@ func TestControllerForUpdatingConfigmapShouldUpdateDaemonSetUsingArs(t *testing.
|
||||
// Verifying DaemonSet update
|
||||
logrus.Infof("Verifying pod annotation has been updated")
|
||||
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapName, "aurorasolutions.io")
|
||||
config := util.Config{
|
||||
config := common.Config{
|
||||
Namespace: namespace,
|
||||
ResourceName: configmapName,
|
||||
SHAValue: shaData,
|
||||
@@ -720,7 +937,7 @@ func TestControllerUpdatingSecretShouldCreatePodAnnotationInDaemonSet(t *testing
|
||||
// Verifying Upgrade
|
||||
logrus.Infof("Verifying pod annotation has been created")
|
||||
shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretName, newData)
|
||||
config := util.Config{
|
||||
config := common.Config{
|
||||
Namespace: namespace,
|
||||
ResourceName: secretName,
|
||||
SHAValue: shaData,
|
||||
@@ -779,7 +996,7 @@ func TestControllerUpdatingSecretShouldUpdatePodAnnotationInDaemonSet(t *testing
|
||||
// Verifying Upgrade
|
||||
logrus.Infof("Verifying pod annotation has been updated")
|
||||
shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretName, updatedData)
|
||||
config := util.Config{
|
||||
config := common.Config{
|
||||
Namespace: namespace,
|
||||
ResourceName: secretName,
|
||||
SHAValue: shaData,
|
||||
@@ -830,7 +1047,7 @@ func TestControllerUpdatingSecretLabelsShouldNotCreateOrUpdatePodAnnotationInDae
|
||||
// Verifying Upgrade
|
||||
logrus.Infof("Verifying pod annotation has been created")
|
||||
shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretName, data)
|
||||
config := util.Config{
|
||||
config := common.Config{
|
||||
Namespace: namespace,
|
||||
ResourceName: secretName,
|
||||
SHAValue: shaData,
|
||||
@@ -882,7 +1099,7 @@ func TestControllerUpdatingConfigmapShouldCreatePodAnnotationInStatefulSet(t *te
|
||||
// Verifying StatefulSet update
|
||||
logrus.Infof("Verifying pod annotation has been created")
|
||||
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapName, "www.stakater.com")
|
||||
config := util.Config{
|
||||
config := common.Config{
|
||||
Namespace: namespace,
|
||||
ResourceName: configmapName,
|
||||
SHAValue: shaData,
|
||||
@@ -941,7 +1158,7 @@ func TestControllerForUpdatingConfigmapShouldUpdateStatefulSetUsingArs(t *testin
|
||||
// Verifying StatefulSet update
|
||||
logrus.Infof("Verifying pod annotation has been updated")
|
||||
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapName, "aurorasolutions.io")
|
||||
config := util.Config{
|
||||
config := common.Config{
|
||||
Namespace: namespace,
|
||||
ResourceName: configmapName,
|
||||
SHAValue: shaData,
|
||||
@@ -994,7 +1211,7 @@ func TestControllerUpdatingSecretShouldCreatePodAnnotationInStatefulSet(t *testi
|
||||
// Verifying Upgrade
|
||||
logrus.Infof("Verifying pod annotation has been created")
|
||||
shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretName, newData)
|
||||
config := util.Config{
|
||||
config := common.Config{
|
||||
Namespace: namespace,
|
||||
ResourceName: secretName,
|
||||
SHAValue: shaData,
|
||||
@@ -1046,7 +1263,7 @@ func TestControllerUpdatingConfigmapShouldCreateEnvInDeployment(t *testing.T) {
|
||||
// Verifying deployment update
|
||||
logrus.Infof("Verifying env var has been created")
|
||||
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapName, "www.stakater.com")
|
||||
config := util.Config{
|
||||
config := common.Config{
|
||||
Namespace: namespace,
|
||||
ResourceName: configmapName,
|
||||
SHAValue: shaData,
|
||||
@@ -1099,7 +1316,7 @@ func TestControllerUpdatingConfigmapShouldAutoCreateEnvInDeployment(t *testing.T
|
||||
// Verifying deployment update
|
||||
logrus.Infof("Verifying env var has been created")
|
||||
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapName, "www.stakater.com")
|
||||
config := util.Config{
|
||||
config := common.Config{
|
||||
Namespace: namespace,
|
||||
ResourceName: configmapName,
|
||||
SHAValue: shaData,
|
||||
@@ -1164,7 +1381,7 @@ func TestControllerCreatingConfigmapShouldCreateEnvInDeployment(t *testing.T) {
|
||||
// Verifying deployment update
|
||||
logrus.Infof("Verifying env var has been created")
|
||||
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapName, "www.stakater.com")
|
||||
config := util.Config{
|
||||
config := common.Config{
|
||||
Namespace: namespace,
|
||||
ResourceName: configmapName,
|
||||
SHAValue: shaData,
|
||||
@@ -1223,7 +1440,7 @@ func TestControllerForUpdatingConfigmapShouldUpdateDeploymentUsingErs(t *testing
|
||||
// Verifying deployment update
|
||||
logrus.Infof("Verifying env var has been updated")
|
||||
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapName, "aurorasolutions.io")
|
||||
config := util.Config{
|
||||
config := common.Config{
|
||||
Namespace: namespace,
|
||||
ResourceName: configmapName,
|
||||
SHAValue: shaData,
|
||||
@@ -1278,7 +1495,7 @@ func TestControllerUpdatingConfigmapLabelsShouldNotCreateOrUpdateEnvInDeployment
|
||||
// Verifying deployment update
|
||||
logrus.Infof("Verifying env var has been created")
|
||||
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapName, "www.google.com")
|
||||
config := util.Config{
|
||||
config := common.Config{
|
||||
Namespace: namespace,
|
||||
ResourceName: configmapName,
|
||||
SHAValue: shaData,
|
||||
@@ -1342,7 +1559,7 @@ func TestControllerCreatingSecretShouldCreateEnvInDeployment(t *testing.T) {
|
||||
// Verifying Upgrade
|
||||
logrus.Infof("Verifying env var has been created")
|
||||
shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretName, newData)
|
||||
config := util.Config{
|
||||
config := common.Config{
|
||||
Namespace: namespace,
|
||||
ResourceName: secretName,
|
||||
SHAValue: shaData,
|
||||
@@ -1395,7 +1612,7 @@ func TestControllerUpdatingSecretShouldCreateEnvInDeployment(t *testing.T) {
|
||||
// Verifying Upgrade
|
||||
logrus.Infof("Verifying env var has been created")
|
||||
shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretName, newData)
|
||||
config := util.Config{
|
||||
config := common.Config{
|
||||
Namespace: namespace,
|
||||
ResourceName: secretName,
|
||||
SHAValue: shaData,
|
||||
@@ -1453,7 +1670,7 @@ func TestControllerUpdatingSecretShouldUpdateEnvInDeployment(t *testing.T) {
|
||||
// Verifying Upgrade
|
||||
logrus.Infof("Verifying env var has been updated")
|
||||
shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretName, updatedData)
|
||||
config := util.Config{
|
||||
config := common.Config{
|
||||
Namespace: namespace,
|
||||
ResourceName: secretName,
|
||||
SHAValue: shaData,
|
||||
@@ -1504,7 +1721,7 @@ func TestControllerUpdatingSecretLabelsShouldNotCreateOrUpdateEnvInDeployment(t
|
||||
// Verifying Upgrade
|
||||
logrus.Infof("Verifying env var has been created")
|
||||
shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretName, data)
|
||||
config := util.Config{
|
||||
config := common.Config{
|
||||
Namespace: namespace,
|
||||
ResourceName: secretName,
|
||||
SHAValue: shaData,
|
||||
@@ -1530,6 +1747,215 @@ func TestControllerUpdatingSecretLabelsShouldNotCreateOrUpdateEnvInDeployment(t
|
||||
time.Sleep(sleepDuration)
|
||||
}
|
||||
|
||||
// Perform rolling upgrade on pod and create a env var upon updating the secretproviderclasspodstatus
|
||||
func TestControllerUpdatingSecretProviderClassPodStatusShouldCreateEnvInDeployment(t *testing.T) {
|
||||
options.ReloadStrategy = constants.EnvVarsReloadStrategy
|
||||
|
||||
if !kube.IsCSIInstalled {
|
||||
return
|
||||
}
|
||||
|
||||
// Creating secretproviderclass
|
||||
secretproviderclasspodstatusName := secretProviderClassPodStatusPrefix + "-update-" + testutil.RandSeq(5)
|
||||
_, err := testutil.CreateSecretProviderClass(clients.CSIClient, namespace, secretproviderclasspodstatusName, data)
|
||||
if err != nil {
|
||||
t.Errorf("Error while creating the secretproviderclass %v", err)
|
||||
}
|
||||
|
||||
// Creating secretproviderclasspodstatus
|
||||
spcpsClient, err := testutil.CreateSecretProviderClassPodStatus(clients.CSIClient, namespace, secretproviderclasspodstatusName, data)
|
||||
if err != nil {
|
||||
t.Errorf("Error while creating the secretclasssproviderpodstatus %v", err)
|
||||
}
|
||||
|
||||
// Creating deployment
|
||||
_, err = testutil.CreateDeployment(clients.KubernetesClient, secretproviderclasspodstatusName, namespace, true)
|
||||
if err != nil {
|
||||
t.Errorf("Error in deployment creation: %v", err)
|
||||
}
|
||||
|
||||
// Updating Secret
|
||||
err = testutil.UpdateSecretProviderClassPodStatus(spcpsClient, namespace, secretproviderclasspodstatusName, "", newData)
|
||||
if err != nil {
|
||||
t.Errorf("Error while updating secretproviderclasspodstatus %v", err)
|
||||
}
|
||||
|
||||
// Verifying Upgrade
|
||||
logrus.Infof("Verifying env var has been created")
|
||||
shaData := testutil.ConvertResourceToSHA(testutil.SecretProviderClassPodStatusResourceType, namespace, secretproviderclasspodstatusName, newData)
|
||||
config := common.Config{
|
||||
Namespace: namespace,
|
||||
ResourceName: secretproviderclasspodstatusName,
|
||||
SHAValue: shaData,
|
||||
Annotation: options.SecretProviderClassUpdateOnChangeAnnotation,
|
||||
}
|
||||
deploymentFuncs := handler.GetDeploymentRollingUpgradeFuncs()
|
||||
updated := testutil.VerifyResourceEnvVarUpdate(clients, config, constants.SecretProviderClassEnvVarPostfix, deploymentFuncs)
|
||||
if !updated {
|
||||
t.Errorf("Deployment was not updated")
|
||||
}
|
||||
|
||||
// Deleting Deployment
|
||||
err = testutil.DeleteDeployment(clients.KubernetesClient, namespace, secretproviderclasspodstatusName)
|
||||
if err != nil {
|
||||
logrus.Errorf("Error while deleting the deployment %v", err)
|
||||
}
|
||||
|
||||
// Deleting secretproviderclass
|
||||
err = testutil.DeleteSecretProviderClass(clients.CSIClient, namespace, secretproviderclasspodstatusName)
|
||||
if err != nil {
|
||||
logrus.Errorf("Error while deleting the secretproviderclass %v", err)
|
||||
}
|
||||
|
||||
// Deleting secretproviderclasspodstatus
|
||||
err = testutil.DeleteSecretProviderClassPodStatus(clients.CSIClient, namespace, secretproviderclasspodstatusName)
|
||||
if err != nil {
|
||||
logrus.Errorf("Error while deleting the secretproviderclasspodstatus %v", err)
|
||||
}
|
||||
time.Sleep(sleepDuration)
|
||||
}
|
||||
|
||||
// Perform rolling upgrade on deployment and update env var upon updating the secretproviderclasspodstatus
|
||||
func TestControllerUpdatingSecretProviderClassPodStatusShouldUpdateEnvInDeployment(t *testing.T) {
|
||||
options.ReloadStrategy = constants.EnvVarsReloadStrategy
|
||||
|
||||
if !kube.IsCSIInstalled {
|
||||
return
|
||||
}
|
||||
|
||||
// Creating secretproviderclass
|
||||
secretproviderclasspodstatusName := secretProviderClassPodStatusPrefix + "-update-" + testutil.RandSeq(5)
|
||||
_, err := testutil.CreateSecretProviderClass(clients.CSIClient, namespace, secretproviderclasspodstatusName, data)
|
||||
if err != nil {
|
||||
t.Errorf("Error while creating the secretproviderclass %v", err)
|
||||
}
|
||||
|
||||
// Creating secretproviderclasspodstatus
|
||||
spcpsClient, err := testutil.CreateSecretProviderClassPodStatus(clients.CSIClient, namespace, secretproviderclasspodstatusName, data)
|
||||
if err != nil {
|
||||
t.Errorf("Error while creating the secretclasssproviderpodstatus %v", err)
|
||||
}
|
||||
|
||||
// Creating deployment
|
||||
_, err = testutil.CreateDeployment(clients.KubernetesClient, secretproviderclasspodstatusName, namespace, true)
|
||||
if err != nil {
|
||||
t.Errorf("Error in deployment creation: %v", err)
|
||||
}
|
||||
|
||||
// Updating secretproviderclasspodstatus
|
||||
err = testutil.UpdateSecretProviderClassPodStatus(spcpsClient, namespace, secretproviderclasspodstatusName, "", newData)
|
||||
if err != nil {
|
||||
t.Errorf("Error while updating secretproviderclasspodstatus %v", err)
|
||||
}
|
||||
|
||||
// Updating secretproviderclasspodstatus
|
||||
err = testutil.UpdateSecretProviderClassPodStatus(spcpsClient, namespace, secretproviderclasspodstatusName, "", updatedData)
|
||||
if err != nil {
|
||||
t.Errorf("Error while updating secretproviderclasspodstatus %v", err)
|
||||
}
|
||||
|
||||
// Verifying Upgrade
|
||||
logrus.Infof("Verifying env var has been updated")
|
||||
shaData := testutil.ConvertResourceToSHA(testutil.SecretProviderClassPodStatusResourceType, namespace, secretproviderclasspodstatusName, updatedData)
|
||||
config := common.Config{
|
||||
Namespace: namespace,
|
||||
ResourceName: secretproviderclasspodstatusName,
|
||||
SHAValue: shaData,
|
||||
Annotation: options.SecretProviderClassUpdateOnChangeAnnotation,
|
||||
}
|
||||
deploymentFuncs := handler.GetDeploymentRollingUpgradeFuncs()
|
||||
updated := testutil.VerifyResourceEnvVarUpdate(clients, config, constants.SecretProviderClassEnvVarPostfix, deploymentFuncs)
|
||||
if !updated {
|
||||
t.Errorf("Deployment was not updated")
|
||||
}
|
||||
|
||||
// Deleting Deployment
|
||||
err = testutil.DeleteDeployment(clients.KubernetesClient, namespace, secretproviderclasspodstatusName)
|
||||
if err != nil {
|
||||
logrus.Errorf("Error while deleting the deployment %v", err)
|
||||
}
|
||||
|
||||
// Deleting secretproviderclass
|
||||
err = testutil.DeleteSecretProviderClass(clients.CSIClient, namespace, secretproviderclasspodstatusName)
|
||||
if err != nil {
|
||||
logrus.Errorf("Error while deleting the secretproviderclass %v", err)
|
||||
}
|
||||
|
||||
// Deleting secretproviderclasspodstatus
|
||||
err = testutil.DeleteSecretProviderClassPodStatus(clients.CSIClient, namespace, secretproviderclasspodstatusName)
|
||||
if err != nil {
|
||||
logrus.Errorf("Error while deleting the secretproviderclasspodstatus %v", err)
|
||||
}
|
||||
time.Sleep(sleepDuration)
|
||||
}
|
||||
|
||||
// Do not Perform rolling upgrade on pod and create or update a env var upon updating the label in secretclasssproviderpodstatus
|
||||
func TestControllerUpdatingSecretProviderClassPodStatusLabelsShouldNotCreateOrUpdateEnvInDeployment(t *testing.T) {
|
||||
options.ReloadStrategy = constants.EnvVarsReloadStrategy
|
||||
|
||||
if !kube.IsCSIInstalled {
|
||||
return
|
||||
}
|
||||
|
||||
// Creating secretproviderclass
|
||||
secretproviderclasspodstatusName := secretProviderClassPodStatusPrefix + "-update-" + testutil.RandSeq(5)
|
||||
_, err := testutil.CreateSecretProviderClass(clients.CSIClient, namespace, secretproviderclasspodstatusName, data)
|
||||
if err != nil {
|
||||
t.Errorf("Error while creating the secretproviderclass %v", err)
|
||||
}
|
||||
|
||||
// Creating secretproviderclasspodstatus
|
||||
spcpsClient, err := testutil.CreateSecretProviderClassPodStatus(clients.CSIClient, namespace, secretproviderclasspodstatusName, data)
|
||||
if err != nil {
|
||||
t.Errorf("Error while creating the secretclasssproviderpodstatus %v", err)
|
||||
}
|
||||
|
||||
// Creating deployment
|
||||
_, err = testutil.CreateDeployment(clients.KubernetesClient, secretproviderclasspodstatusName, namespace, true)
|
||||
if err != nil {
|
||||
t.Errorf("Error in deployment creation: %v", err)
|
||||
}
|
||||
|
||||
err = testutil.UpdateSecretProviderClassPodStatus(spcpsClient, namespace, secretproviderclasspodstatusName, "test", data)
|
||||
if err != nil {
|
||||
t.Errorf("Error while updating secretproviderclasspodstatus %v", err)
|
||||
}
|
||||
|
||||
// Verifying Upgrade
|
||||
logrus.Infof("Verifying env var has been created")
|
||||
shaData := testutil.ConvertResourceToSHA(testutil.SecretProviderClassPodStatusResourceType, namespace, secretproviderclasspodstatusName, data)
|
||||
config := common.Config{
|
||||
Namespace: namespace,
|
||||
ResourceName: secretproviderclasspodstatusName,
|
||||
SHAValue: shaData,
|
||||
Annotation: options.SecretProviderClassUpdateOnChangeAnnotation,
|
||||
}
|
||||
deploymentFuncs := handler.GetDeploymentRollingUpgradeFuncs()
|
||||
updated := testutil.VerifyResourceEnvVarUpdate(clients, config, constants.SecretProviderClassEnvVarPostfix, deploymentFuncs)
|
||||
if updated {
|
||||
t.Errorf("Deployment should not be updated by changing label in secretproviderclasspodstatus")
|
||||
}
|
||||
|
||||
// Deleting Deployment
|
||||
err = testutil.DeleteDeployment(clients.KubernetesClient, namespace, secretproviderclasspodstatusName)
|
||||
if err != nil {
|
||||
logrus.Errorf("Error while deleting the deployment %v", err)
|
||||
}
|
||||
|
||||
// Deleting secretproviderclass
|
||||
err = testutil.DeleteSecretProviderClass(clients.CSIClient, namespace, secretproviderclasspodstatusName)
|
||||
if err != nil {
|
||||
logrus.Errorf("Error while deleting the secretproviderclass %v", err)
|
||||
}
|
||||
|
||||
// Deleting secretproviderclasspodstatus
|
||||
err = testutil.DeleteSecretProviderClassPodStatus(clients.CSIClient, namespace, secretproviderclasspodstatusName)
|
||||
if err != nil {
|
||||
logrus.Errorf("Error while deleting the secretproviderclasspodstatus %v", err)
|
||||
}
|
||||
time.Sleep(sleepDuration)
|
||||
}
|
||||
|
||||
// Perform rolling upgrade on DaemonSet and create env var upon updating the configmap
|
||||
func TestControllerUpdatingConfigmapShouldCreateEnvInDaemonSet(t *testing.T) {
|
||||
options.ReloadStrategy = constants.EnvVarsReloadStrategy
|
||||
@@ -1556,7 +1982,7 @@ func TestControllerUpdatingConfigmapShouldCreateEnvInDaemonSet(t *testing.T) {
|
||||
// Verifying DaemonSet update
|
||||
logrus.Infof("Verifying env var has been created")
|
||||
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapName, "www.stakater.com")
|
||||
config := util.Config{
|
||||
config := common.Config{
|
||||
Namespace: namespace,
|
||||
ResourceName: configmapName,
|
||||
SHAValue: shaData,
|
||||
@@ -1619,7 +2045,7 @@ func TestControllerForUpdatingConfigmapShouldUpdateDaemonSetUsingErs(t *testing.
|
||||
// Verifying DaemonSet update
|
||||
logrus.Infof("Verifying env var has been updated")
|
||||
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapName, "aurorasolutions.io")
|
||||
config := util.Config{
|
||||
config := common.Config{
|
||||
Namespace: namespace,
|
||||
ResourceName: configmapName,
|
||||
SHAValue: shaData,
|
||||
@@ -1672,7 +2098,7 @@ func TestControllerUpdatingSecretShouldCreateEnvInDaemonSet(t *testing.T) {
|
||||
// Verifying Upgrade
|
||||
logrus.Infof("Verifying env var has been created")
|
||||
shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretName, newData)
|
||||
config := util.Config{
|
||||
config := common.Config{
|
||||
Namespace: namespace,
|
||||
ResourceName: secretName,
|
||||
SHAValue: shaData,
|
||||
@@ -1731,7 +2157,7 @@ func TestControllerUpdatingSecretShouldUpdateEnvInDaemonSet(t *testing.T) {
|
||||
// Verifying Upgrade
|
||||
logrus.Infof("Verifying env var has been updated")
|
||||
shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretName, updatedData)
|
||||
config := util.Config{
|
||||
config := common.Config{
|
||||
Namespace: namespace,
|
||||
ResourceName: secretName,
|
||||
SHAValue: shaData,
|
||||
@@ -1782,7 +2208,7 @@ func TestControllerUpdatingSecretLabelsShouldNotCreateOrUpdateEnvInDaemonSet(t *
|
||||
// Verifying Upgrade
|
||||
logrus.Infof("Verifying env var has been created")
|
||||
shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretName, data)
|
||||
config := util.Config{
|
||||
config := common.Config{
|
||||
Namespace: namespace,
|
||||
ResourceName: secretName,
|
||||
SHAValue: shaData,
|
||||
@@ -1834,7 +2260,7 @@ func TestControllerUpdatingConfigmapShouldCreateEnvInStatefulSet(t *testing.T) {
|
||||
// Verifying StatefulSet update
|
||||
logrus.Infof("Verifying env var has been created")
|
||||
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapName, "www.stakater.com")
|
||||
config := util.Config{
|
||||
config := common.Config{
|
||||
Namespace: namespace,
|
||||
ResourceName: configmapName,
|
||||
SHAValue: shaData,
|
||||
@@ -1893,7 +2319,7 @@ func TestControllerForUpdatingConfigmapShouldUpdateStatefulSetUsingErs(t *testin
|
||||
// Verifying StatefulSet update
|
||||
logrus.Infof("Verifying env var has been updated")
|
||||
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, namespace, configmapName, "aurorasolutions.io")
|
||||
config := util.Config{
|
||||
config := common.Config{
|
||||
Namespace: namespace,
|
||||
ResourceName: configmapName,
|
||||
SHAValue: shaData,
|
||||
@@ -1946,7 +2372,7 @@ func TestControllerUpdatingSecretShouldCreateEnvInStatefulSet(t *testing.T) {
|
||||
// Verifying Upgrade
|
||||
logrus.Infof("Verifying env var has been created")
|
||||
shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretName, newData)
|
||||
config := util.Config{
|
||||
config := common.Config{
|
||||
Namespace: namespace,
|
||||
ResourceName: secretName,
|
||||
SHAValue: shaData,
|
||||
@@ -2004,7 +2430,7 @@ func TestControllerUpdatingSecretShouldUpdateEnvInStatefulSet(t *testing.T) {
|
||||
// Verifying Upgrade
|
||||
logrus.Infof("Verifying env var has been updated")
|
||||
shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretName, updatedData)
|
||||
config := util.Config{
|
||||
config := common.Config{
|
||||
Namespace: namespace,
|
||||
ResourceName: secretName,
|
||||
SHAValue: shaData,
|
||||
@@ -2062,7 +2488,7 @@ func TestControllerUpdatingSecretShouldUpdatePodAnnotationInStatefulSet(t *testi
|
||||
// Verifying Upgrade
|
||||
logrus.Infof("Verifying pod annotation has been updated")
|
||||
shaData := testutil.ConvertResourceToSHA(testutil.SecretResourceType, namespace, secretName, updatedData)
|
||||
config := util.Config{
|
||||
config := common.Config{
|
||||
Namespace: namespace,
|
||||
ResourceName: secretName,
|
||||
SHAValue: shaData,
|
||||
@@ -2156,19 +2582,21 @@ func TestController_resourceInIgnoredNamespace(t *testing.T) {
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
c := &Controller{
|
||||
client: tt.fields.client,
|
||||
indexer: tt.fields.indexer,
|
||||
queue: tt.fields.queue,
|
||||
informer: tt.fields.informer,
|
||||
namespace: tt.fields.namespace,
|
||||
ignoredNamespaces: tt.fields.ignoredNamespaces,
|
||||
}
|
||||
if got := c.resourceInIgnoredNamespace(tt.args.raw); got != tt.want {
|
||||
t.Errorf("Controller.resourceInIgnoredNamespace() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
t.Run(
|
||||
tt.name, func(t *testing.T) {
|
||||
c := &Controller{
|
||||
client: tt.fields.client,
|
||||
indexer: tt.fields.indexer,
|
||||
queue: tt.fields.queue,
|
||||
informer: tt.fields.informer,
|
||||
namespace: tt.fields.namespace,
|
||||
ignoredNamespaces: tt.fields.ignoredNamespaces,
|
||||
}
|
||||
if got := c.resourceInIgnoredNamespace(tt.args.raw); got != tt.want {
|
||||
t.Errorf("Controller.resourceInIgnoredNamespace() = %v, want %v", got, tt.want)
|
||||
}
|
||||
},
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2330,35 +2758,37 @@ func TestController_resourceInNamespaceSelector(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
fakeClient := fake.NewSimpleClientset()
|
||||
namespace, _ := fakeClient.CoreV1().Namespaces().Create(context.Background(), &tt.fields.namespace, metav1.CreateOptions{})
|
||||
logrus.Infof("created fakeClient namespace for testing = %s", namespace.Name)
|
||||
t.Run(
|
||||
tt.name, func(t *testing.T) {
|
||||
fakeClient := fake.NewClientset()
|
||||
namespace, _ := fakeClient.CoreV1().Namespaces().Create(context.Background(), &tt.fields.namespace, metav1.CreateOptions{})
|
||||
logrus.Infof("created fakeClient namespace for testing = %s", namespace.Name)
|
||||
|
||||
c := &Controller{
|
||||
client: fakeClient,
|
||||
indexer: tt.fields.indexer,
|
||||
queue: tt.fields.queue,
|
||||
informer: tt.fields.informer,
|
||||
namespace: tt.fields.namespace.ObjectMeta.Name,
|
||||
namespaceSelector: tt.fields.namespaceSelector,
|
||||
}
|
||||
c := &Controller{
|
||||
client: fakeClient,
|
||||
indexer: tt.fields.indexer,
|
||||
queue: tt.fields.queue,
|
||||
informer: tt.fields.informer,
|
||||
namespace: tt.fields.namespace.Name,
|
||||
namespaceSelector: tt.fields.namespaceSelector,
|
||||
}
|
||||
|
||||
listOptions := metav1.ListOptions{}
|
||||
listOptions.LabelSelector = tt.fields.namespaceSelector
|
||||
namespaces, _ := fakeClient.CoreV1().Namespaces().List(context.Background(), listOptions)
|
||||
listOptions := metav1.ListOptions{}
|
||||
listOptions.LabelSelector = tt.fields.namespaceSelector
|
||||
namespaces, _ := fakeClient.CoreV1().Namespaces().List(context.Background(), listOptions)
|
||||
|
||||
for _, ns := range namespaces.Items {
|
||||
c.addSelectedNamespaceToCache(ns)
|
||||
}
|
||||
for _, ns := range namespaces.Items {
|
||||
c.addSelectedNamespaceToCache(ns)
|
||||
}
|
||||
|
||||
if got := c.resourceInSelectedNamespaces(tt.args.raw); got != tt.want {
|
||||
t.Errorf("Controller.resourceInNamespaceSelector() = %v, want %v", got, tt.want)
|
||||
}
|
||||
if got := c.resourceInSelectedNamespaces(tt.args.raw); got != tt.want {
|
||||
t.Errorf("Controller.resourceInNamespaceSelector() = %v, want %v", got, tt.want)
|
||||
}
|
||||
|
||||
for _, ns := range namespaces.Items {
|
||||
c.removeSelectedNamespaceFromCache(ns)
|
||||
}
|
||||
})
|
||||
for _, ns := range namespaces.Items {
|
||||
c.removeSelectedNamespaceFromCache(ns)
|
||||
}
|
||||
},
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -13,3 +13,16 @@ func TestGenerateSHA(t *testing.T) {
|
||||
t.Errorf("Failed to generate SHA")
|
||||
}
|
||||
}
|
||||
|
||||
// TestGenerateSHAEmptyString verifies that empty string generates a valid hash
|
||||
// This ensures consistent behavior and avoids issues with string matching operations
|
||||
func TestGenerateSHAEmptyString(t *testing.T) {
|
||||
result := GenerateSHA("")
|
||||
expected := "da39a3ee5e6b4b0d3255bfef95601890afd80709"
|
||||
if result != expected {
|
||||
t.Errorf("Failed to generate SHA for empty string. Expected: %s, Got: %s", expected, result)
|
||||
}
|
||||
if len(result) != 40 {
|
||||
t.Errorf("SHA hash should be 40 characters long, got %d", len(result))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,45 +1,68 @@
|
||||
package handler
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/stakater/Reloader/internal/pkg/metrics"
|
||||
"github.com/stakater/Reloader/internal/pkg/options"
|
||||
"github.com/stakater/Reloader/internal/pkg/util"
|
||||
"github.com/stakater/Reloader/pkg/common"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/client-go/tools/record"
|
||||
)
|
||||
|
||||
// ResourceCreatedHandler contains new objects
|
||||
type ResourceCreatedHandler struct {
|
||||
Resource interface{}
|
||||
Collectors metrics.Collectors
|
||||
Recorder record.EventRecorder
|
||||
Resource interface{}
|
||||
Collectors metrics.Collectors
|
||||
Recorder record.EventRecorder
|
||||
EnqueueTime time.Time // Time when this handler was added to the queue
|
||||
}
|
||||
|
||||
// GetEnqueueTime returns when this handler was enqueued
|
||||
func (r ResourceCreatedHandler) GetEnqueueTime() time.Time {
|
||||
return r.EnqueueTime
|
||||
}
|
||||
|
||||
// Handle processes the newly created resource
|
||||
func (r ResourceCreatedHandler) Handle() error {
|
||||
startTime := time.Now()
|
||||
result := "error"
|
||||
|
||||
defer func() {
|
||||
r.Collectors.RecordReconcile(result, time.Since(startTime))
|
||||
}()
|
||||
|
||||
if r.Resource == nil {
|
||||
logrus.Errorf("Resource creation handler received nil resource")
|
||||
} else {
|
||||
config, _ := r.GetConfig()
|
||||
// Send webhook
|
||||
if options.WebhookUrl != "" {
|
||||
return sendUpgradeWebhook(config, options.WebhookUrl)
|
||||
}
|
||||
// process resource based on its type
|
||||
return doRollingUpgrade(config, r.Collectors, r.Recorder, invokeReloadStrategy)
|
||||
return nil
|
||||
}
|
||||
return nil
|
||||
|
||||
config, _ := r.GetConfig()
|
||||
// Send webhook
|
||||
if options.WebhookUrl != "" {
|
||||
err := sendUpgradeWebhook(config, options.WebhookUrl)
|
||||
if err == nil {
|
||||
result = "success"
|
||||
}
|
||||
return err
|
||||
}
|
||||
// process resource based on its type
|
||||
err := doRollingUpgrade(config, r.Collectors, r.Recorder, invokeReloadStrategy)
|
||||
if err == nil {
|
||||
result = "success"
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// GetConfig gets configurations containing SHA, annotations, namespace and resource name
|
||||
func (r ResourceCreatedHandler) GetConfig() (util.Config, string) {
|
||||
func (r ResourceCreatedHandler) GetConfig() (common.Config, string) {
|
||||
var oldSHAData string
|
||||
var config util.Config
|
||||
var config common.Config
|
||||
if _, ok := r.Resource.(*v1.ConfigMap); ok {
|
||||
config = util.GetConfigmapConfig(r.Resource.(*v1.ConfigMap))
|
||||
config = common.GetConfigmapConfig(r.Resource.(*v1.ConfigMap))
|
||||
} else if _, ok := r.Resource.(*v1.Secret); ok {
|
||||
config = util.GetSecretConfig(r.Resource.(*v1.Secret))
|
||||
config = common.GetSecretConfig(r.Resource.(*v1.Secret))
|
||||
} else {
|
||||
logrus.Warnf("Invalid resource: Resource should be 'Secret' or 'Configmap' but found, %v", r.Resource)
|
||||
}
|
||||
|
||||
@@ -3,6 +3,7 @@ package handler
|
||||
import (
|
||||
"fmt"
|
||||
"slices"
|
||||
"time"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/stakater/Reloader/internal/pkg/callbacks"
|
||||
@@ -10,7 +11,7 @@ import (
|
||||
"github.com/stakater/Reloader/internal/pkg/metrics"
|
||||
"github.com/stakater/Reloader/internal/pkg/options"
|
||||
"github.com/stakater/Reloader/internal/pkg/testutil"
|
||||
"github.com/stakater/Reloader/internal/pkg/util"
|
||||
"github.com/stakater/Reloader/pkg/common"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
@@ -20,42 +21,63 @@ import (
|
||||
|
||||
// ResourceDeleteHandler contains new objects
|
||||
type ResourceDeleteHandler struct {
|
||||
Resource interface{}
|
||||
Collectors metrics.Collectors
|
||||
Recorder record.EventRecorder
|
||||
Resource interface{}
|
||||
Collectors metrics.Collectors
|
||||
Recorder record.EventRecorder
|
||||
EnqueueTime time.Time // Time when this handler was added to the queue
|
||||
}
|
||||
|
||||
// GetEnqueueTime returns when this handler was enqueued
|
||||
func (r ResourceDeleteHandler) GetEnqueueTime() time.Time {
|
||||
return r.EnqueueTime
|
||||
}
|
||||
|
||||
// Handle processes resources being deleted
|
||||
func (r ResourceDeleteHandler) Handle() error {
|
||||
startTime := time.Now()
|
||||
result := "error"
|
||||
|
||||
defer func() {
|
||||
r.Collectors.RecordReconcile(result, time.Since(startTime))
|
||||
}()
|
||||
|
||||
if r.Resource == nil {
|
||||
logrus.Errorf("Resource delete handler received nil resource")
|
||||
} else {
|
||||
config, _ := r.GetConfig()
|
||||
// Send webhook
|
||||
if options.WebhookUrl != "" {
|
||||
return sendUpgradeWebhook(config, options.WebhookUrl)
|
||||
}
|
||||
// process resource based on its type
|
||||
return doRollingUpgrade(config, r.Collectors, r.Recorder, invokeDeleteStrategy)
|
||||
return nil
|
||||
}
|
||||
return nil
|
||||
|
||||
config, _ := r.GetConfig()
|
||||
// Send webhook
|
||||
if options.WebhookUrl != "" {
|
||||
err := sendUpgradeWebhook(config, options.WebhookUrl)
|
||||
if err == nil {
|
||||
result = "success"
|
||||
}
|
||||
return err
|
||||
}
|
||||
// process resource based on its type
|
||||
err := doRollingUpgrade(config, r.Collectors, r.Recorder, invokeDeleteStrategy)
|
||||
if err == nil {
|
||||
result = "success"
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// GetConfig gets configurations containing SHA, annotations, namespace and resource name
|
||||
func (r ResourceDeleteHandler) GetConfig() (util.Config, string) {
|
||||
func (r ResourceDeleteHandler) GetConfig() (common.Config, string) {
|
||||
var oldSHAData string
|
||||
var config util.Config
|
||||
var config common.Config
|
||||
if _, ok := r.Resource.(*v1.ConfigMap); ok {
|
||||
config = util.GetConfigmapConfig(r.Resource.(*v1.ConfigMap))
|
||||
config = common.GetConfigmapConfig(r.Resource.(*v1.ConfigMap))
|
||||
} else if _, ok := r.Resource.(*v1.Secret); ok {
|
||||
config = util.GetSecretConfig(r.Resource.(*v1.Secret))
|
||||
config = common.GetSecretConfig(r.Resource.(*v1.Secret))
|
||||
} else {
|
||||
logrus.Warnf("Invalid resource: Resource should be 'Secret' or 'Configmap' but found, %v", r.Resource)
|
||||
}
|
||||
return config, oldSHAData
|
||||
}
|
||||
|
||||
func invokeDeleteStrategy(upgradeFuncs callbacks.RollingUpgradeFuncs, item runtime.Object, config util.Config, autoReload bool) InvokeStrategyResult {
|
||||
func invokeDeleteStrategy(upgradeFuncs callbacks.RollingUpgradeFuncs, item runtime.Object, config common.Config, autoReload bool) InvokeStrategyResult {
|
||||
if options.ReloadStrategy == constants.AnnotationsReloadStrategy {
|
||||
return removePodAnnotations(upgradeFuncs, item, config, autoReload)
|
||||
}
|
||||
@@ -63,12 +85,12 @@ func invokeDeleteStrategy(upgradeFuncs callbacks.RollingUpgradeFuncs, item runti
|
||||
return removeContainerEnvVars(upgradeFuncs, item, config, autoReload)
|
||||
}
|
||||
|
||||
func removePodAnnotations(upgradeFuncs callbacks.RollingUpgradeFuncs, item runtime.Object, config util.Config, autoReload bool) InvokeStrategyResult {
|
||||
func removePodAnnotations(upgradeFuncs callbacks.RollingUpgradeFuncs, item runtime.Object, config common.Config, autoReload bool) InvokeStrategyResult {
|
||||
config.SHAValue = testutil.GetSHAfromEmptyData()
|
||||
return updatePodAnnotations(upgradeFuncs, item, config, autoReload)
|
||||
}
|
||||
|
||||
func removeContainerEnvVars(upgradeFuncs callbacks.RollingUpgradeFuncs, item runtime.Object, config util.Config, autoReload bool) InvokeStrategyResult {
|
||||
func removeContainerEnvVars(upgradeFuncs callbacks.RollingUpgradeFuncs, item runtime.Object, config common.Config, autoReload bool) InvokeStrategyResult {
|
||||
envVar := getEnvVarName(config.ResourceName, config.Type)
|
||||
container := getContainerUsingResource(upgradeFuncs, item, config, autoReload)
|
||||
|
||||
|
||||
@@ -1,11 +1,18 @@
|
||||
package handler
|
||||
|
||||
import (
|
||||
"github.com/stakater/Reloader/internal/pkg/util"
|
||||
"time"
|
||||
|
||||
"github.com/stakater/Reloader/pkg/common"
|
||||
)
|
||||
|
||||
// ResourceHandler handles the creation and update of resources
|
||||
type ResourceHandler interface {
|
||||
Handle() error
|
||||
GetConfig() (util.Config, string)
|
||||
GetConfig() (common.Config, string)
|
||||
}
|
||||
|
||||
// TimedHandler is a handler that tracks when it was enqueued
|
||||
type TimedHandler interface {
|
||||
GetEnqueueTime() time.Time
|
||||
}
|
||||
|
||||
242
internal/pkg/handler/pause_deployment.go
Normal file
242
internal/pkg/handler/pause_deployment.go
Normal file
@@ -0,0 +1,242 @@
|
||||
package handler
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/stakater/Reloader/internal/pkg/options"
|
||||
"github.com/stakater/Reloader/pkg/kube"
|
||||
app "k8s.io/api/apps/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
patchtypes "k8s.io/apimachinery/pkg/types"
|
||||
)
|
||||
|
||||
// Keeps track of currently active timers
|
||||
var activeTimers = make(map[string]*time.Timer)
|
||||
|
||||
// Returns unique key for the activeTimers map
|
||||
func getTimerKey(namespace, deploymentName string) string {
|
||||
return fmt.Sprintf("%s/%s", namespace, deploymentName)
|
||||
}
|
||||
|
||||
// Checks if a deployment is currently paused
|
||||
func IsPaused(deployment *app.Deployment) bool {
|
||||
return deployment.Spec.Paused
|
||||
}
|
||||
|
||||
// Deployment paused by reloader ?
|
||||
func IsPausedByReloader(deployment *app.Deployment) bool {
|
||||
if IsPaused(deployment) {
|
||||
pausedAtAnnotationValue := deployment.Annotations[options.PauseDeploymentTimeAnnotation]
|
||||
return pausedAtAnnotationValue != ""
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Returns the time, the deployment was paused by reloader, nil otherwise
|
||||
func GetPauseStartTime(deployment *app.Deployment) (*time.Time, error) {
|
||||
if !IsPausedByReloader(deployment) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
pausedAtStr := deployment.Annotations[options.PauseDeploymentTimeAnnotation]
|
||||
parsedTime, err := time.Parse(time.RFC3339, pausedAtStr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &parsedTime, nil
|
||||
}
|
||||
|
||||
// ParsePauseDuration parses the pause interval value and returns a time.Duration
|
||||
func ParsePauseDuration(pauseIntervalValue string) (time.Duration, error) {
|
||||
pauseDuration, err := time.ParseDuration(pauseIntervalValue)
|
||||
if err != nil {
|
||||
logrus.Warnf("Failed to parse pause interval value '%s': %v", pauseIntervalValue, err)
|
||||
return 0, err
|
||||
}
|
||||
return pauseDuration, nil
|
||||
}
|
||||
|
||||
// Pauses a deployment for a specified duration and creates a timer to resume it
|
||||
// after the specified duration
|
||||
func PauseDeployment(deployment *app.Deployment, clients kube.Clients, namespace, pauseIntervalValue string) (*app.Deployment, error) {
|
||||
deploymentName := deployment.Name
|
||||
pauseDuration, err := ParsePauseDuration(pauseIntervalValue)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if !IsPaused(deployment) {
|
||||
logrus.Infof("Pausing Deployment '%s' in namespace '%s' for %s", deploymentName, namespace, pauseDuration)
|
||||
|
||||
deploymentFuncs := GetDeploymentRollingUpgradeFuncs()
|
||||
|
||||
pausePatch, err := CreatePausePatch()
|
||||
if err != nil {
|
||||
logrus.Errorf("Failed to create pause patch for deployment '%s': %v", deploymentName, err)
|
||||
return deployment, err
|
||||
}
|
||||
|
||||
err = deploymentFuncs.PatchFunc(clients, namespace, deployment, patchtypes.StrategicMergePatchType, pausePatch)
|
||||
|
||||
if err != nil {
|
||||
logrus.Errorf("Failed to patch deployment '%s' in namespace '%s': %v", deploymentName, namespace, err)
|
||||
return deployment, err
|
||||
}
|
||||
|
||||
updatedDeployment, err := clients.KubernetesClient.AppsV1().Deployments(namespace).Get(context.TODO(), deploymentName, metav1.GetOptions{})
|
||||
|
||||
CreateResumeTimer(deployment, clients, namespace, pauseDuration)
|
||||
return updatedDeployment, err
|
||||
}
|
||||
|
||||
if !IsPausedByReloader(deployment) {
|
||||
logrus.Infof("Deployment '%s' in namespace '%s' already paused", deploymentName, namespace)
|
||||
return deployment, nil
|
||||
}
|
||||
|
||||
// Deployment has already been paused by reloader, check for timer
|
||||
logrus.Debugf("Deployment '%s' in namespace '%s' is already paused by reloader", deploymentName, namespace)
|
||||
|
||||
timerKey := getTimerKey(namespace, deploymentName)
|
||||
_, timerExists := activeTimers[timerKey]
|
||||
|
||||
if !timerExists {
|
||||
logrus.Warnf("Timer does not exist for already paused deployment '%s' in namespace '%s', creating new one",
|
||||
deploymentName, namespace)
|
||||
HandleMissingTimer(deployment, pauseDuration, clients, namespace)
|
||||
}
|
||||
return deployment, nil
|
||||
}
|
||||
|
||||
// Handles the case where missing timers for deployments that have been paused by reloader.
|
||||
// Could occur after new leader election or reloader restart
|
||||
func HandleMissingTimer(deployment *app.Deployment, pauseDuration time.Duration, clients kube.Clients, namespace string) {
|
||||
deploymentName := deployment.Name
|
||||
pauseStartTime, err := GetPauseStartTime(deployment)
|
||||
if err != nil {
|
||||
logrus.Errorf("Error parsing pause start time for deployment '%s' in namespace '%s': %v. Resuming deployment immediately",
|
||||
deploymentName, namespace, err)
|
||||
ResumeDeployment(deployment, namespace, clients)
|
||||
return
|
||||
}
|
||||
|
||||
if pauseStartTime == nil {
|
||||
return
|
||||
}
|
||||
|
||||
elapsedPauseTime := time.Since(*pauseStartTime)
|
||||
remainingPauseTime := pauseDuration - elapsedPauseTime
|
||||
|
||||
if remainingPauseTime <= 0 {
|
||||
logrus.Infof("Pause period for deployment '%s' in namespace '%s' has expired. Resuming immediately",
|
||||
deploymentName, namespace)
|
||||
ResumeDeployment(deployment, namespace, clients)
|
||||
return
|
||||
}
|
||||
|
||||
logrus.Infof("Creating missing timer for already paused deployment '%s' in namespace '%s' with remaining time %s",
|
||||
deploymentName, namespace, remainingPauseTime)
|
||||
CreateResumeTimer(deployment, clients, namespace, remainingPauseTime)
|
||||
}
|
||||
|
||||
// CreateResumeTimer creates a timer to resume the deployment after the specified duration
|
||||
func CreateResumeTimer(deployment *app.Deployment, clients kube.Clients, namespace string, pauseDuration time.Duration) {
|
||||
deploymentName := deployment.Name
|
||||
timerKey := getTimerKey(namespace, deployment.Name)
|
||||
|
||||
// Check if there's an existing timer for this deployment
|
||||
if _, exists := activeTimers[timerKey]; exists {
|
||||
logrus.Debugf("Timer already exists for deployment '%s' in namespace '%s', Skipping creation",
|
||||
deploymentName, namespace)
|
||||
return
|
||||
}
|
||||
|
||||
// Create and store the new timer
|
||||
timer := time.AfterFunc(pauseDuration, func() {
|
||||
ResumeDeployment(deployment, namespace, clients)
|
||||
})
|
||||
|
||||
// Add the new timer to the map
|
||||
activeTimers[timerKey] = timer
|
||||
|
||||
logrus.Debugf("Created pause timer for deployment '%s' in namespace '%s' with duration %s",
|
||||
deploymentName, namespace, pauseDuration)
|
||||
}
|
||||
|
||||
// ResumeDeployment resumes a deployment that has been paused by reloader
|
||||
func ResumeDeployment(deployment *app.Deployment, namespace string, clients kube.Clients) {
|
||||
deploymentName := deployment.Name
|
||||
|
||||
currentDeployment, err := clients.KubernetesClient.AppsV1().Deployments(namespace).Get(context.TODO(), deploymentName, metav1.GetOptions{})
|
||||
|
||||
if err != nil {
|
||||
logrus.Errorf("Failed to get deployment '%s' in namespace '%s': %v", deploymentName, namespace, err)
|
||||
return
|
||||
}
|
||||
|
||||
if !IsPausedByReloader(currentDeployment) {
|
||||
logrus.Infof("Deployment '%s' in namespace '%s' not paused by Reloader. Skipping resume", deploymentName, namespace)
|
||||
return
|
||||
}
|
||||
|
||||
deploymentFuncs := GetDeploymentRollingUpgradeFuncs()
|
||||
|
||||
resumePatch, err := CreateResumePatch()
|
||||
if err != nil {
|
||||
logrus.Errorf("Failed to create resume patch for deployment '%s': %v", deploymentName, err)
|
||||
return
|
||||
}
|
||||
|
||||
// Remove the timer
|
||||
timerKey := getTimerKey(namespace, deploymentName)
|
||||
if timer, exists := activeTimers[timerKey]; exists {
|
||||
timer.Stop()
|
||||
delete(activeTimers, timerKey)
|
||||
logrus.Debugf("Removed pause timer for deployment '%s' in namespace '%s'", deploymentName, namespace)
|
||||
}
|
||||
|
||||
err = deploymentFuncs.PatchFunc(clients, namespace, currentDeployment, patchtypes.StrategicMergePatchType, resumePatch)
|
||||
|
||||
if err != nil {
|
||||
logrus.Errorf("Failed to resume deployment '%s' in namespace '%s': %v", deploymentName, namespace, err)
|
||||
return
|
||||
}
|
||||
|
||||
logrus.Infof("Successfully resumed deployment '%s' in namespace '%s'", deploymentName, namespace)
|
||||
}
|
||||
|
||||
func CreatePausePatch() ([]byte, error) {
|
||||
patchData := map[string]interface{}{
|
||||
"spec": map[string]interface{}{
|
||||
"paused": true,
|
||||
},
|
||||
"metadata": map[string]interface{}{
|
||||
"annotations": map[string]string{
|
||||
options.PauseDeploymentTimeAnnotation: time.Now().Format(time.RFC3339),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
return json.Marshal(patchData)
|
||||
}
|
||||
|
||||
func CreateResumePatch() ([]byte, error) {
|
||||
patchData := map[string]interface{}{
|
||||
"spec": map[string]interface{}{
|
||||
"paused": false,
|
||||
},
|
||||
"metadata": map[string]interface{}{
|
||||
"annotations": map[string]interface{}{
|
||||
options.PauseDeploymentTimeAnnotation: nil,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
return json.Marshal(patchData)
|
||||
}
|
||||
391
internal/pkg/handler/pause_deployment_test.go
Normal file
391
internal/pkg/handler/pause_deployment_test.go
Normal file
@@ -0,0 +1,391 @@
|
||||
package handler
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stakater/Reloader/internal/pkg/options"
|
||||
"github.com/stakater/Reloader/pkg/kube"
|
||||
"github.com/stretchr/testify/assert"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
testclient "k8s.io/client-go/kubernetes/fake"
|
||||
)
|
||||
|
||||
func TestIsPaused(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
deployment *appsv1.Deployment
|
||||
paused bool
|
||||
}{
|
||||
{
|
||||
name: "paused deployment",
|
||||
deployment: &appsv1.Deployment{
|
||||
Spec: appsv1.DeploymentSpec{
|
||||
Paused: true,
|
||||
},
|
||||
},
|
||||
paused: true,
|
||||
},
|
||||
{
|
||||
name: "unpaused deployment",
|
||||
deployment: &appsv1.Deployment{
|
||||
Spec: appsv1.DeploymentSpec{
|
||||
Paused: false,
|
||||
},
|
||||
},
|
||||
paused: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
result := IsPaused(test.deployment)
|
||||
assert.Equal(t, test.paused, result)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestIsPausedByReloader(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
deployment *appsv1.Deployment
|
||||
pausedByReloader bool
|
||||
}{
|
||||
{
|
||||
name: "paused by reloader",
|
||||
deployment: &appsv1.Deployment{
|
||||
Spec: appsv1.DeploymentSpec{
|
||||
Paused: true,
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Annotations: map[string]string{
|
||||
options.PauseDeploymentTimeAnnotation: time.Now().Format(time.RFC3339),
|
||||
},
|
||||
},
|
||||
},
|
||||
pausedByReloader: true,
|
||||
},
|
||||
{
|
||||
name: "not paused by reloader",
|
||||
deployment: &appsv1.Deployment{
|
||||
Spec: appsv1.DeploymentSpec{
|
||||
Paused: true,
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Annotations: map[string]string{},
|
||||
},
|
||||
},
|
||||
pausedByReloader: false,
|
||||
},
|
||||
{
|
||||
name: "not paused",
|
||||
deployment: &appsv1.Deployment{
|
||||
Spec: appsv1.DeploymentSpec{
|
||||
Paused: false,
|
||||
},
|
||||
},
|
||||
pausedByReloader: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
pausedByReloader := IsPausedByReloader(test.deployment)
|
||||
assert.Equal(t, test.pausedByReloader, pausedByReloader)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetPauseStartTime(t *testing.T) {
|
||||
now := time.Now()
|
||||
nowStr := now.Format(time.RFC3339)
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
deployment *appsv1.Deployment
|
||||
pausedByReloader bool
|
||||
expectedStartTime time.Time
|
||||
}{
|
||||
{
|
||||
name: "valid pause time",
|
||||
deployment: &appsv1.Deployment{
|
||||
Spec: appsv1.DeploymentSpec{
|
||||
Paused: true,
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Annotations: map[string]string{
|
||||
options.PauseDeploymentTimeAnnotation: nowStr,
|
||||
},
|
||||
},
|
||||
},
|
||||
pausedByReloader: true,
|
||||
expectedStartTime: now,
|
||||
},
|
||||
{
|
||||
name: "not paused by reloader",
|
||||
deployment: &appsv1.Deployment{
|
||||
Spec: appsv1.DeploymentSpec{
|
||||
Paused: false,
|
||||
},
|
||||
},
|
||||
pausedByReloader: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
actualStartTime, err := GetPauseStartTime(test.deployment)
|
||||
|
||||
assert.NoError(t, err)
|
||||
|
||||
if !test.pausedByReloader {
|
||||
assert.Nil(t, actualStartTime)
|
||||
} else {
|
||||
assert.NotNil(t, actualStartTime)
|
||||
assert.WithinDuration(t, test.expectedStartTime, *actualStartTime, time.Second)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestParsePauseDuration(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
pauseIntervalValue string
|
||||
expectedDuration time.Duration
|
||||
invalidDuration bool
|
||||
}{
|
||||
{
|
||||
name: "valid duration",
|
||||
pauseIntervalValue: "10s",
|
||||
expectedDuration: 10 * time.Second,
|
||||
invalidDuration: false,
|
||||
},
|
||||
{
|
||||
name: "valid minute duration",
|
||||
pauseIntervalValue: "2m",
|
||||
expectedDuration: 2 * time.Minute,
|
||||
invalidDuration: false,
|
||||
},
|
||||
{
|
||||
name: "invalid duration",
|
||||
pauseIntervalValue: "invalid",
|
||||
expectedDuration: 0,
|
||||
invalidDuration: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
actualDuration, err := ParsePauseDuration(test.pauseIntervalValue)
|
||||
|
||||
if test.invalidDuration {
|
||||
assert.Error(t, err)
|
||||
} else {
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, test.expectedDuration, actualDuration)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestHandleMissingTimerSimple(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
deployment *appsv1.Deployment
|
||||
shouldBePaused bool // Should be unpaused after HandleMissingTimer ?
|
||||
}{
|
||||
{
|
||||
name: "deployment paused by reloader, pause period has expired and no timer",
|
||||
deployment: &appsv1.Deployment{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-deployment-1",
|
||||
Annotations: map[string]string{
|
||||
options.PauseDeploymentTimeAnnotation: time.Now().Add(-6 * time.Minute).Format(time.RFC3339),
|
||||
options.PauseDeploymentAnnotation: "5m",
|
||||
},
|
||||
},
|
||||
Spec: appsv1.DeploymentSpec{
|
||||
Paused: true,
|
||||
},
|
||||
},
|
||||
shouldBePaused: false,
|
||||
},
|
||||
{
|
||||
name: "deployment paused by reloader, pause period expires in the future and no timer",
|
||||
deployment: &appsv1.Deployment{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-deployment-2",
|
||||
Annotations: map[string]string{
|
||||
options.PauseDeploymentTimeAnnotation: time.Now().Add(1 * time.Minute).Format(time.RFC3339),
|
||||
options.PauseDeploymentAnnotation: "5m",
|
||||
},
|
||||
},
|
||||
Spec: appsv1.DeploymentSpec{
|
||||
Paused: true,
|
||||
},
|
||||
},
|
||||
shouldBePaused: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
// Clean up any timers at the end of the test
|
||||
defer func() {
|
||||
for key, timer := range activeTimers {
|
||||
timer.Stop()
|
||||
delete(activeTimers, key)
|
||||
}
|
||||
}()
|
||||
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
fakeClient := testclient.NewClientset()
|
||||
clients := kube.Clients{
|
||||
KubernetesClient: fakeClient,
|
||||
}
|
||||
|
||||
_, err := fakeClient.AppsV1().Deployments("default").Create(
|
||||
context.TODO(),
|
||||
test.deployment,
|
||||
metav1.CreateOptions{})
|
||||
assert.NoError(t, err, "Expected no error when creating deployment")
|
||||
|
||||
pauseDuration, _ := ParsePauseDuration(test.deployment.Annotations[options.PauseDeploymentAnnotation])
|
||||
HandleMissingTimer(test.deployment, pauseDuration, clients, "default")
|
||||
|
||||
updatedDeployment, _ := fakeClient.AppsV1().Deployments("default").Get(context.TODO(), test.deployment.Name, metav1.GetOptions{})
|
||||
|
||||
assert.Equal(t, test.shouldBePaused, updatedDeployment.Spec.Paused,
|
||||
"Deployment should have correct paused state after timer expiration")
|
||||
|
||||
if test.shouldBePaused {
|
||||
pausedAtAnnotationValue := updatedDeployment.Annotations[options.PauseDeploymentTimeAnnotation]
|
||||
assert.NotEmpty(t, pausedAtAnnotationValue,
|
||||
"Pause annotation should be present and contain a value when deployment is paused")
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestPauseDeployment(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
deployment *appsv1.Deployment
|
||||
expectedError bool
|
||||
expectedPaused bool
|
||||
expectedAnnotation bool // Should have pause time annotation
|
||||
pauseInterval string
|
||||
}{
|
||||
{
|
||||
name: "deployment without pause annotation",
|
||||
deployment: &appsv1.Deployment{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-deployment",
|
||||
Annotations: map[string]string{},
|
||||
},
|
||||
Spec: appsv1.DeploymentSpec{
|
||||
Paused: false,
|
||||
},
|
||||
},
|
||||
expectedError: true,
|
||||
expectedPaused: false,
|
||||
expectedAnnotation: false,
|
||||
pauseInterval: "",
|
||||
},
|
||||
{
|
||||
name: "deployment already paused but not by reloader",
|
||||
deployment: &appsv1.Deployment{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-deployment",
|
||||
Annotations: map[string]string{
|
||||
options.PauseDeploymentAnnotation: "5m",
|
||||
},
|
||||
},
|
||||
Spec: appsv1.DeploymentSpec{
|
||||
Paused: true,
|
||||
},
|
||||
},
|
||||
expectedError: false,
|
||||
expectedPaused: true,
|
||||
expectedAnnotation: false,
|
||||
pauseInterval: "5m",
|
||||
},
|
||||
{
|
||||
name: "deployment unpaused that needs to be paused by reloader",
|
||||
deployment: &appsv1.Deployment{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-deployment-3",
|
||||
Annotations: map[string]string{
|
||||
options.PauseDeploymentAnnotation: "5m",
|
||||
},
|
||||
},
|
||||
Spec: appsv1.DeploymentSpec{
|
||||
Paused: false,
|
||||
},
|
||||
},
|
||||
expectedError: false,
|
||||
expectedPaused: true,
|
||||
expectedAnnotation: true,
|
||||
pauseInterval: "5m",
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
fakeClient := testclient.NewClientset()
|
||||
clients := kube.Clients{
|
||||
KubernetesClient: fakeClient,
|
||||
}
|
||||
|
||||
_, err := fakeClient.AppsV1().Deployments("default").Create(
|
||||
context.TODO(),
|
||||
test.deployment,
|
||||
metav1.CreateOptions{})
|
||||
assert.NoError(t, err, "Expected no error when creating deployment")
|
||||
|
||||
updatedDeployment, err := PauseDeployment(test.deployment, clients, "default", test.pauseInterval)
|
||||
if test.expectedError {
|
||||
assert.Error(t, err, "Expected an error pausing the deployment")
|
||||
return
|
||||
} else {
|
||||
assert.NoError(t, err, "Expected no error pausing the deployment")
|
||||
}
|
||||
|
||||
assert.Equal(t, test.expectedPaused, updatedDeployment.Spec.Paused,
|
||||
"Deployment should have correct paused state after pause")
|
||||
|
||||
if test.expectedAnnotation {
|
||||
pausedAtAnnotationValue := updatedDeployment.Annotations[options.PauseDeploymentTimeAnnotation]
|
||||
assert.NotEmpty(t, pausedAtAnnotationValue,
|
||||
"Pause annotation should be present and contain a value when deployment is paused")
|
||||
} else {
|
||||
pausedAtAnnotationValue := updatedDeployment.Annotations[options.PauseDeploymentTimeAnnotation]
|
||||
assert.Empty(t, pausedAtAnnotationValue,
|
||||
"Pause annotation should not be present when deployment has not been paused by reloader")
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Simple helper function for test cases
|
||||
func FindDeploymentByName(deployments []runtime.Object, deploymentName string) (*appsv1.Deployment, error) {
|
||||
for _, deployment := range deployments {
|
||||
accessor, err := meta.Accessor(deployment)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error getting accessor for item: %v", err)
|
||||
}
|
||||
if accessor.GetName() == deploymentName {
|
||||
deploymentObj, ok := deployment.(*appsv1.Deployment)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("failed to cast to Deployment")
|
||||
}
|
||||
return deploymentObj, nil
|
||||
}
|
||||
}
|
||||
return nil, fmt.Errorf("deployment '%s' not found", deploymentName)
|
||||
}
|
||||
@@ -1,12 +1,16 @@
|
||||
package handler
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/stakater/Reloader/internal/pkg/metrics"
|
||||
"github.com/stakater/Reloader/internal/pkg/options"
|
||||
"github.com/stakater/Reloader/internal/pkg/util"
|
||||
"github.com/stakater/Reloader/pkg/common"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/client-go/tools/record"
|
||||
csiv1 "sigs.k8s.io/secrets-store-csi-driver/apis/v1"
|
||||
)
|
||||
|
||||
// ResourceUpdatedHandler contains updated objects
|
||||
@@ -15,38 +19,79 @@ type ResourceUpdatedHandler struct {
|
||||
OldResource interface{}
|
||||
Collectors metrics.Collectors
|
||||
Recorder record.EventRecorder
|
||||
EnqueueTime time.Time // Time when this handler was added to the queue
|
||||
}
|
||||
|
||||
// GetEnqueueTime returns when this handler was enqueued
|
||||
func (r ResourceUpdatedHandler) GetEnqueueTime() time.Time {
|
||||
return r.EnqueueTime
|
||||
}
|
||||
|
||||
// Handle processes the updated resource
|
||||
func (r ResourceUpdatedHandler) Handle() error {
|
||||
startTime := time.Now()
|
||||
result := "error"
|
||||
|
||||
defer func() {
|
||||
r.Collectors.RecordReconcile(result, time.Since(startTime))
|
||||
}()
|
||||
|
||||
if r.Resource == nil || r.OldResource == nil {
|
||||
logrus.Errorf("Resource update handler received nil resource")
|
||||
} else {
|
||||
config, oldSHAData := r.GetConfig()
|
||||
if config.SHAValue != oldSHAData {
|
||||
// Send a webhook if update
|
||||
if options.WebhookUrl != "" {
|
||||
return sendUpgradeWebhook(config, options.WebhookUrl)
|
||||
}
|
||||
// process resource based on its type
|
||||
return doRollingUpgrade(config, r.Collectors, r.Recorder, invokeReloadStrategy)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
config, oldSHAData := r.GetConfig()
|
||||
if config.SHAValue != oldSHAData {
|
||||
// Send a webhook if update
|
||||
if options.WebhookUrl != "" {
|
||||
err := sendUpgradeWebhook(config, options.WebhookUrl)
|
||||
if err == nil {
|
||||
result = "success"
|
||||
}
|
||||
return err
|
||||
}
|
||||
// process resource based on its type
|
||||
err := doRollingUpgrade(config, r.Collectors, r.Recorder, invokeReloadStrategy)
|
||||
if err == nil {
|
||||
result = "success"
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// No data change - skip
|
||||
result = "skipped"
|
||||
r.Collectors.RecordSkipped("no_data_change")
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetConfig gets configurations containing SHA, annotations, namespace and resource name
|
||||
func (r ResourceUpdatedHandler) GetConfig() (util.Config, string) {
|
||||
var oldSHAData string
|
||||
var config util.Config
|
||||
if _, ok := r.Resource.(*v1.ConfigMap); ok {
|
||||
oldSHAData = util.GetSHAfromConfigmap(r.OldResource.(*v1.ConfigMap))
|
||||
config = util.GetConfigmapConfig(r.Resource.(*v1.ConfigMap))
|
||||
} else if _, ok := r.Resource.(*v1.Secret); ok {
|
||||
oldSHAData = util.GetSHAfromSecret(r.OldResource.(*v1.Secret).Data)
|
||||
config = util.GetSecretConfig(r.Resource.(*v1.Secret))
|
||||
} else {
|
||||
logrus.Warnf("Invalid resource: Resource should be 'Secret' or 'Configmap' but found, %v", r.Resource)
|
||||
func (r ResourceUpdatedHandler) GetConfig() (common.Config, string) {
|
||||
var (
|
||||
oldSHAData string
|
||||
config common.Config
|
||||
)
|
||||
|
||||
switch res := r.Resource.(type) {
|
||||
case *v1.ConfigMap:
|
||||
if old, ok := r.OldResource.(*v1.ConfigMap); ok && old != nil {
|
||||
oldSHAData = util.GetSHAfromConfigmap(old)
|
||||
}
|
||||
config = common.GetConfigmapConfig(res)
|
||||
|
||||
case *v1.Secret:
|
||||
if old, ok := r.OldResource.(*v1.Secret); ok && old != nil {
|
||||
oldSHAData = util.GetSHAfromSecret(old.Data)
|
||||
}
|
||||
config = common.GetSecretConfig(res)
|
||||
|
||||
case *csiv1.SecretProviderClassPodStatus:
|
||||
if old, ok := r.OldResource.(*csiv1.SecretProviderClassPodStatus); ok && old != nil && old.Status.Objects != nil {
|
||||
oldSHAData = util.GetSHAfromSecretProviderClassPodStatus(old.Status)
|
||||
}
|
||||
config = common.GetSecretProviderClassPodStatusConfig(res)
|
||||
default:
|
||||
logrus.Warnf("Invalid resource: Resource should be 'Secret', 'Configmap' or 'SecretProviderClassPodStatus' but found, %T", r.Resource)
|
||||
}
|
||||
return config, oldSHAData
|
||||
}
|
||||
|
||||
@@ -2,14 +2,14 @@ package handler
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/parnurzeal/gorequest"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
@@ -20,10 +20,13 @@ import (
|
||||
"github.com/stakater/Reloader/internal/pkg/metrics"
|
||||
"github.com/stakater/Reloader/internal/pkg/options"
|
||||
"github.com/stakater/Reloader/internal/pkg/util"
|
||||
"github.com/stakater/Reloader/pkg/common"
|
||||
"github.com/stakater/Reloader/pkg/kube"
|
||||
app "k8s.io/api/apps/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
patchtypes "k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
@@ -139,7 +142,7 @@ func GetArgoRolloutRollingUpgradeFuncs() callbacks.RollingUpgradeFuncs {
|
||||
}
|
||||
}
|
||||
|
||||
func sendUpgradeWebhook(config util.Config, webhookUrl string) error {
|
||||
func sendUpgradeWebhook(config common.Config, webhookUrl string) error {
|
||||
logrus.Infof("Changes detected in '%s' of type '%s' in namespace '%s', Sending webhook to '%s'",
|
||||
config.ResourceName, config.Type, config.Namespace, webhookUrl)
|
||||
|
||||
@@ -161,7 +164,12 @@ func sendWebhook(url string) (string, []error) {
|
||||
// the reloader seems to retry automatically so no retry logic added
|
||||
return "", err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
defer func() {
|
||||
closeErr := resp.Body.Close()
|
||||
if closeErr != nil {
|
||||
logrus.Error(closeErr)
|
||||
}
|
||||
}()
|
||||
var buffer bytes.Buffer
|
||||
_, bufferErr := io.Copy(&buffer, resp.Body)
|
||||
if bufferErr != nil {
|
||||
@@ -170,21 +178,37 @@ func sendWebhook(url string) (string, []error) {
|
||||
return buffer.String(), nil
|
||||
}
|
||||
|
||||
func doRollingUpgrade(config util.Config, collectors metrics.Collectors, recorder record.EventRecorder, invoke invokeStrategy) error {
|
||||
func doRollingUpgrade(config common.Config, collectors metrics.Collectors, recorder record.EventRecorder, invoke invokeStrategy) error {
|
||||
clients := kube.GetClients()
|
||||
|
||||
err := rollingUpgrade(clients, config, GetDeploymentRollingUpgradeFuncs(), collectors, recorder, invoke)
|
||||
// Get ignored workload types to avoid listing resources without RBAC permissions
|
||||
ignoredWorkloadTypes, err := util.GetIgnoredWorkloadTypesList()
|
||||
if err != nil {
|
||||
logrus.Errorf("Failed to parse ignored workload types: %v", err)
|
||||
ignoredWorkloadTypes = util.List{} // Continue with empty list if parsing fails
|
||||
}
|
||||
|
||||
err = rollingUpgrade(clients, config, GetDeploymentRollingUpgradeFuncs(), collectors, recorder, invoke)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = rollingUpgrade(clients, config, GetCronJobCreateJobFuncs(), collectors, recorder, invoke)
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
// Only process CronJobs if they are not ignored
|
||||
if !ignoredWorkloadTypes.Contains("cronjobs") {
|
||||
err = rollingUpgrade(clients, config, GetCronJobCreateJobFuncs(), collectors, recorder, invoke)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
err = rollingUpgrade(clients, config, GetJobCreateJobFuncs(), collectors, recorder, invoke)
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
// Only process Jobs if they are not ignored
|
||||
if !ignoredWorkloadTypes.Contains("jobs") {
|
||||
err = rollingUpgrade(clients, config, GetJobCreateJobFuncs(), collectors, recorder, invoke)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
err = rollingUpgrade(clients, config, GetDaemonSetRollingUpgradeFuncs(), collectors, recorder, invoke)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -204,7 +228,7 @@ func doRollingUpgrade(config util.Config, collectors metrics.Collectors, recorde
|
||||
return nil
|
||||
}
|
||||
|
||||
func rollingUpgrade(clients kube.Clients, config util.Config, upgradeFuncs callbacks.RollingUpgradeFuncs, collectors metrics.Collectors, recorder record.EventRecorder, strategy invokeStrategy) error {
|
||||
func rollingUpgrade(clients kube.Clients, config common.Config, upgradeFuncs callbacks.RollingUpgradeFuncs, collectors metrics.Collectors, recorder record.EventRecorder, strategy invokeStrategy) error {
|
||||
err := PerformAction(clients, config, upgradeFuncs, collectors, recorder, strategy)
|
||||
if err != nil {
|
||||
logrus.Errorf("Rolling upgrade for '%s' failed with error = %v", config.ResourceName, err)
|
||||
@@ -213,26 +237,38 @@ func rollingUpgrade(clients kube.Clients, config util.Config, upgradeFuncs callb
|
||||
}
|
||||
|
||||
// PerformAction invokes the deployment if there is any change in configmap or secret data
|
||||
func PerformAction(clients kube.Clients, config util.Config, upgradeFuncs callbacks.RollingUpgradeFuncs, collectors metrics.Collectors, recorder record.EventRecorder, strategy invokeStrategy) error {
|
||||
func PerformAction(clients kube.Clients, config common.Config, upgradeFuncs callbacks.RollingUpgradeFuncs, collectors metrics.Collectors, recorder record.EventRecorder, strategy invokeStrategy) error {
|
||||
items := upgradeFuncs.ItemsFunc(clients, config.Namespace)
|
||||
|
||||
// Record workloads scanned
|
||||
collectors.RecordWorkloadsScanned(upgradeFuncs.ResourceType, len(items))
|
||||
|
||||
matchedCount := 0
|
||||
for _, item := range items {
|
||||
err := retryOnConflict(retry.DefaultRetry, func(fetchResource bool) error {
|
||||
matched, err := retryOnConflict(retry.DefaultRetry, func(fetchResource bool) (bool, error) {
|
||||
return upgradeResource(clients, config, upgradeFuncs, collectors, recorder, strategy, item, fetchResource)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if matched {
|
||||
matchedCount++
|
||||
}
|
||||
}
|
||||
|
||||
// Record workloads matched
|
||||
collectors.RecordWorkloadsMatched(upgradeFuncs.ResourceType, matchedCount)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func retryOnConflict(backoff wait.Backoff, fn func(_ bool) error) error {
|
||||
func retryOnConflict(backoff wait.Backoff, fn func(_ bool) (bool, error)) (bool, error) {
|
||||
var lastError error
|
||||
var matched bool
|
||||
fetchResource := false // do not fetch resource on first attempt, already done by ItemsFunc
|
||||
err := wait.ExponentialBackoff(backoff, func() (bool, error) {
|
||||
err := fn(fetchResource)
|
||||
var err error
|
||||
matched, err = fn(fetchResource)
|
||||
fetchResource = true
|
||||
switch {
|
||||
case err == nil:
|
||||
@@ -247,144 +283,107 @@ func retryOnConflict(backoff wait.Backoff, fn func(_ bool) error) error {
|
||||
if wait.Interrupted(err) {
|
||||
err = lastError
|
||||
}
|
||||
return err
|
||||
return matched, err
|
||||
}
|
||||
|
||||
func upgradeResource(clients kube.Clients, config util.Config, upgradeFuncs callbacks.RollingUpgradeFuncs, collectors metrics.Collectors, recorder record.EventRecorder, strategy invokeStrategy, resource runtime.Object, fetchResource bool) error {
|
||||
func upgradeResource(clients kube.Clients, config common.Config, upgradeFuncs callbacks.RollingUpgradeFuncs, collectors metrics.Collectors, recorder record.EventRecorder, strategy invokeStrategy, resource runtime.Object, fetchResource bool) (bool, error) {
|
||||
actionStartTime := time.Now()
|
||||
|
||||
accessor, err := meta.Accessor(resource)
|
||||
if err != nil {
|
||||
return err
|
||||
return false, err
|
||||
}
|
||||
|
||||
resourceName := accessor.GetName()
|
||||
if fetchResource {
|
||||
resource, err = upgradeFuncs.ItemFunc(clients, resourceName, config.Namespace)
|
||||
if err != nil {
|
||||
return err
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
if config.Type == constants.SecretProviderClassEnvVarPostfix {
|
||||
populateAnnotationsFromSecretProviderClass(clients, &config)
|
||||
}
|
||||
|
||||
annotations := upgradeFuncs.AnnotationsFunc(resource)
|
||||
podAnnotations := upgradeFuncs.PodAnnotationsFunc(resource)
|
||||
result := common.ShouldReload(config, upgradeFuncs.ResourceType, annotations, podAnnotations, common.GetCommandLineOptions())
|
||||
|
||||
if !result.ShouldReload {
|
||||
logrus.Debugf("No changes detected in '%s' of type '%s' in namespace '%s'", config.ResourceName, config.Type, config.Namespace)
|
||||
return false, nil
|
||||
}
|
||||
|
||||
strategyResult := strategy(upgradeFuncs, resource, config, result.AutoReload)
|
||||
|
||||
if strategyResult.Result != constants.Updated {
|
||||
collectors.RecordSkipped("strategy_not_updated")
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// find correct annotation and update the resource
|
||||
annotations := upgradeFuncs.AnnotationsFunc(resource)
|
||||
annotationValue, found := annotations[config.Annotation]
|
||||
searchAnnotationValue, foundSearchAnn := annotations[options.AutoSearchAnnotation]
|
||||
reloaderEnabledValue, foundAuto := annotations[options.ReloaderAutoAnnotation]
|
||||
typedAutoAnnotationEnabledValue, foundTypedAuto := annotations[config.TypedAutoAnnotation]
|
||||
excludeConfigmapAnnotationValue, foundExcludeConfigmap := annotations[options.ConfigmapExcludeReloaderAnnotation]
|
||||
excludeSecretAnnotationValue, foundExcludeSecret := annotations[options.SecretExcludeReloaderAnnotation]
|
||||
pauseInterval, foundPauseInterval := annotations[options.PauseDeploymentAnnotation]
|
||||
|
||||
if !found && !foundAuto && !foundTypedAuto && !foundSearchAnn {
|
||||
annotations = upgradeFuncs.PodAnnotationsFunc(resource)
|
||||
annotationValue = annotations[config.Annotation]
|
||||
searchAnnotationValue = annotations[options.AutoSearchAnnotation]
|
||||
reloaderEnabledValue = annotations[options.ReloaderAutoAnnotation]
|
||||
typedAutoAnnotationEnabledValue = annotations[config.TypedAutoAnnotation]
|
||||
}
|
||||
|
||||
isResourceExcluded := false
|
||||
|
||||
switch config.Type {
|
||||
case constants.ConfigmapEnvVarPostfix:
|
||||
if foundExcludeConfigmap {
|
||||
isResourceExcluded = checkIfResourceIsExcluded(config.ResourceName, excludeConfigmapAnnotationValue)
|
||||
}
|
||||
case constants.SecretEnvVarPostfix:
|
||||
if foundExcludeSecret {
|
||||
isResourceExcluded = checkIfResourceIsExcluded(config.ResourceName, excludeSecretAnnotationValue)
|
||||
}
|
||||
}
|
||||
|
||||
if isResourceExcluded {
|
||||
return nil
|
||||
}
|
||||
|
||||
strategyResult := InvokeStrategyResult{constants.NotUpdated, nil}
|
||||
reloaderEnabled, _ := strconv.ParseBool(reloaderEnabledValue)
|
||||
typedAutoAnnotationEnabled, _ := strconv.ParseBool(typedAutoAnnotationEnabledValue)
|
||||
if reloaderEnabled || typedAutoAnnotationEnabled || reloaderEnabledValue == "" && typedAutoAnnotationEnabledValue == "" && options.AutoReloadAll {
|
||||
strategyResult = strategy(upgradeFuncs, resource, config, true)
|
||||
}
|
||||
|
||||
if strategyResult.Result != constants.Updated && annotationValue != "" {
|
||||
values := strings.Split(annotationValue, ",")
|
||||
for _, value := range values {
|
||||
value = strings.TrimSpace(value)
|
||||
re := regexp.MustCompile("^" + value + "$")
|
||||
if re.Match([]byte(config.ResourceName)) {
|
||||
strategyResult = strategy(upgradeFuncs, resource, config, false)
|
||||
if strategyResult.Result == constants.Updated {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if strategyResult.Result != constants.Updated && searchAnnotationValue == "true" {
|
||||
matchAnnotationValue := config.ResourceAnnotations[options.SearchMatchAnnotation]
|
||||
if matchAnnotationValue == "true" {
|
||||
strategyResult = strategy(upgradeFuncs, resource, config, true)
|
||||
}
|
||||
}
|
||||
if strategyResult.Result == constants.Updated {
|
||||
var err error
|
||||
if upgradeFuncs.SupportsPatch && strategyResult.Patch != nil {
|
||||
err = upgradeFuncs.PatchFunc(clients, config.Namespace, resource, strategyResult.Patch.Type, strategyResult.Patch.Bytes)
|
||||
if foundPauseInterval {
|
||||
deployment, ok := resource.(*app.Deployment)
|
||||
if !ok {
|
||||
logrus.Warnf("Annotation '%s' only applicable for deployments", options.PauseDeploymentAnnotation)
|
||||
} else {
|
||||
err = upgradeFuncs.UpdateFunc(clients, config.Namespace, resource)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
message := fmt.Sprintf("Update for '%s' of type '%s' in namespace '%s' failed with error %v", resourceName, upgradeFuncs.ResourceType, config.Namespace, err)
|
||||
logrus.Errorf("Update for '%s' of type '%s' in namespace '%s' failed with error %v", resourceName, upgradeFuncs.ResourceType, config.Namespace, err)
|
||||
|
||||
collectors.Reloaded.With(prometheus.Labels{"success": "false"}).Inc()
|
||||
collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "false", "namespace": config.Namespace}).Inc()
|
||||
if recorder != nil {
|
||||
recorder.Event(resource, v1.EventTypeWarning, "ReloadFail", message)
|
||||
}
|
||||
return err
|
||||
} else {
|
||||
message := fmt.Sprintf("Changes detected in '%s' of type '%s' in namespace '%s'", config.ResourceName, config.Type, config.Namespace)
|
||||
message += fmt.Sprintf(", Updated '%s' of type '%s' in namespace '%s'", resourceName, upgradeFuncs.ResourceType, config.Namespace)
|
||||
|
||||
logrus.Infof("Changes detected in '%s' of type '%s' in namespace '%s'; updated '%s' of type '%s' in namespace '%s'", config.ResourceName, config.Type, config.Namespace, resourceName, upgradeFuncs.ResourceType, config.Namespace)
|
||||
|
||||
collectors.Reloaded.With(prometheus.Labels{"success": "true"}).Inc()
|
||||
collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": config.Namespace}).Inc()
|
||||
alert_on_reload, ok := os.LookupEnv("ALERT_ON_RELOAD")
|
||||
if recorder != nil {
|
||||
recorder.Event(resource, v1.EventTypeNormal, "Reloaded", message)
|
||||
}
|
||||
if ok && alert_on_reload == "true" {
|
||||
msg := fmt.Sprintf(
|
||||
"Reloader detected changes in *%s* of type *%s* in namespace *%s*. Hence reloaded *%s* of type *%s* in namespace *%s*",
|
||||
config.ResourceName, config.Type, config.Namespace, resourceName, upgradeFuncs.ResourceType, config.Namespace)
|
||||
alert.SendWebhookAlert(msg)
|
||||
_, err = PauseDeployment(deployment, clients, config.Namespace, pauseInterval)
|
||||
if err != nil {
|
||||
logrus.Errorf("Failed to pause deployment '%s' in namespace '%s': %v", resourceName, config.Namespace, err)
|
||||
return true, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func checkIfResourceIsExcluded(resourceName, excludedResources string) bool {
|
||||
if excludedResources == "" {
|
||||
return false
|
||||
if upgradeFuncs.SupportsPatch && strategyResult.Patch != nil {
|
||||
err = upgradeFuncs.PatchFunc(clients, config.Namespace, resource, strategyResult.Patch.Type, strategyResult.Patch.Bytes)
|
||||
} else {
|
||||
err = upgradeFuncs.UpdateFunc(clients, config.Namespace, resource)
|
||||
}
|
||||
|
||||
excludedResourcesList := strings.Split(excludedResources, ",")
|
||||
for _, excludedResource := range excludedResourcesList {
|
||||
if strings.TrimSpace(excludedResource) == resourceName {
|
||||
return true
|
||||
actionLatency := time.Since(actionStartTime)
|
||||
|
||||
if err != nil {
|
||||
message := fmt.Sprintf("Update for '%s' of type '%s' in namespace '%s' failed with error %v", resourceName, upgradeFuncs.ResourceType, config.Namespace, err)
|
||||
logrus.Errorf("Update for '%s' of type '%s' in namespace '%s' failed with error %v", resourceName, upgradeFuncs.ResourceType, config.Namespace, err)
|
||||
|
||||
collectors.Reloaded.With(prometheus.Labels{"success": "false"}).Inc()
|
||||
collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "false", "namespace": config.Namespace}).Inc()
|
||||
collectors.RecordAction(upgradeFuncs.ResourceType, "error", actionLatency)
|
||||
if recorder != nil {
|
||||
recorder.Event(resource, v1.EventTypeWarning, "ReloadFail", message)
|
||||
}
|
||||
return true, err
|
||||
} else {
|
||||
message := fmt.Sprintf("Changes detected in '%s' of type '%s' in namespace '%s'", config.ResourceName, config.Type, config.Namespace)
|
||||
message += fmt.Sprintf(", Updated '%s' of type '%s' in namespace '%s'", resourceName, upgradeFuncs.ResourceType, config.Namespace)
|
||||
|
||||
logrus.Infof("Changes detected in '%s' of type '%s' in namespace '%s'; updated '%s' of type '%s' in namespace '%s'", config.ResourceName, config.Type, config.Namespace, resourceName, upgradeFuncs.ResourceType, config.Namespace)
|
||||
|
||||
collectors.Reloaded.With(prometheus.Labels{"success": "true"}).Inc()
|
||||
collectors.ReloadedByNamespace.With(prometheus.Labels{"success": "true", "namespace": config.Namespace}).Inc()
|
||||
collectors.RecordAction(upgradeFuncs.ResourceType, "success", actionLatency)
|
||||
alert_on_reload, ok := os.LookupEnv("ALERT_ON_RELOAD")
|
||||
if recorder != nil {
|
||||
recorder.Event(resource, v1.EventTypeNormal, "Reloaded", message)
|
||||
}
|
||||
if ok && alert_on_reload == "true" {
|
||||
msg := fmt.Sprintf(
|
||||
"Reloader detected changes in *%s* of type *%s* in namespace *%s*. Hence reloaded *%s* of type *%s* in namespace *%s*",
|
||||
config.ResourceName, config.Type, config.Namespace, resourceName, upgradeFuncs.ResourceType, config.Namespace)
|
||||
alert.SendWebhookAlert(msg)
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func getVolumeMountName(volumes []v1.Volume, mountType string, volumeName string) string {
|
||||
for i := range volumes {
|
||||
if mountType == constants.ConfigmapEnvVarPostfix {
|
||||
switch mountType {
|
||||
case constants.ConfigmapEnvVarPostfix:
|
||||
if volumes[i].ConfigMap != nil && volumes[i].ConfigMap.Name == volumeName {
|
||||
return volumes[i].Name
|
||||
}
|
||||
@@ -396,7 +395,7 @@ func getVolumeMountName(volumes []v1.Volume, mountType string, volumeName string
|
||||
}
|
||||
}
|
||||
}
|
||||
} else if mountType == constants.SecretEnvVarPostfix {
|
||||
case constants.SecretEnvVarPostfix:
|
||||
if volumes[i].Secret != nil && volumes[i].Secret.SecretName == volumeName {
|
||||
return volumes[i].Name
|
||||
}
|
||||
@@ -408,6 +407,10 @@ func getVolumeMountName(volumes []v1.Volume, mountType string, volumeName string
|
||||
}
|
||||
}
|
||||
}
|
||||
case constants.SecretProviderClassEnvVarPostfix:
|
||||
if volumes[i].CSI != nil && volumes[i].CSI.VolumeAttributes["secretProviderClass"] == volumeName {
|
||||
return volumes[i].Name
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -433,9 +436,9 @@ func getContainerWithEnvReference(containers []v1.Container, resourceName string
|
||||
for j := range envs {
|
||||
envVarSource := envs[j].ValueFrom
|
||||
if envVarSource != nil {
|
||||
if resourceType == constants.SecretEnvVarPostfix && envVarSource.SecretKeyRef != nil && envVarSource.SecretKeyRef.LocalObjectReference.Name == resourceName {
|
||||
if resourceType == constants.SecretEnvVarPostfix && envVarSource.SecretKeyRef != nil && envVarSource.SecretKeyRef.Name == resourceName {
|
||||
return &containers[i]
|
||||
} else if resourceType == constants.ConfigmapEnvVarPostfix && envVarSource.ConfigMapKeyRef != nil && envVarSource.ConfigMapKeyRef.LocalObjectReference.Name == resourceName {
|
||||
} else if resourceType == constants.ConfigmapEnvVarPostfix && envVarSource.ConfigMapKeyRef != nil && envVarSource.ConfigMapKeyRef.Name == resourceName {
|
||||
return &containers[i]
|
||||
}
|
||||
}
|
||||
@@ -443,9 +446,9 @@ func getContainerWithEnvReference(containers []v1.Container, resourceName string
|
||||
|
||||
envsFrom := containers[i].EnvFrom
|
||||
for j := range envsFrom {
|
||||
if resourceType == constants.SecretEnvVarPostfix && envsFrom[j].SecretRef != nil && envsFrom[j].SecretRef.LocalObjectReference.Name == resourceName {
|
||||
if resourceType == constants.SecretEnvVarPostfix && envsFrom[j].SecretRef != nil && envsFrom[j].SecretRef.Name == resourceName {
|
||||
return &containers[i]
|
||||
} else if resourceType == constants.ConfigmapEnvVarPostfix && envsFrom[j].ConfigMapRef != nil && envsFrom[j].ConfigMapRef.LocalObjectReference.Name == resourceName {
|
||||
} else if resourceType == constants.ConfigmapEnvVarPostfix && envsFrom[j].ConfigMapRef != nil && envsFrom[j].ConfigMapRef.Name == resourceName {
|
||||
return &containers[i]
|
||||
}
|
||||
}
|
||||
@@ -453,7 +456,7 @@ func getContainerWithEnvReference(containers []v1.Container, resourceName string
|
||||
return nil
|
||||
}
|
||||
|
||||
func getContainerUsingResource(upgradeFuncs callbacks.RollingUpgradeFuncs, item runtime.Object, config util.Config, autoReload bool) *v1.Container {
|
||||
func getContainerUsingResource(upgradeFuncs callbacks.RollingUpgradeFuncs, item runtime.Object, config common.Config, autoReload bool) *v1.Container {
|
||||
volumes := upgradeFuncs.VolumesFunc(item)
|
||||
containers := upgradeFuncs.ContainersFunc(item)
|
||||
initContainers := upgradeFuncs.InitContainersFunc(item)
|
||||
@@ -467,7 +470,11 @@ func getContainerUsingResource(upgradeFuncs callbacks.RollingUpgradeFuncs, item
|
||||
container = getContainerWithVolumeMount(initContainers, volumeMountName)
|
||||
if container != nil {
|
||||
// if configmap/secret is being used in init container then return the first Pod container to save reloader env
|
||||
return &containers[0]
|
||||
if len(containers) > 0 {
|
||||
return &containers[0]
|
||||
}
|
||||
// No containers available, return nil to avoid crash
|
||||
return nil
|
||||
}
|
||||
} else if container != nil {
|
||||
return container
|
||||
@@ -480,13 +487,21 @@ func getContainerUsingResource(upgradeFuncs callbacks.RollingUpgradeFuncs, item
|
||||
container = getContainerWithEnvReference(initContainers, config.ResourceName, config.Type)
|
||||
if container != nil {
|
||||
// if configmap/secret is being used in init container then return the first Pod container to save reloader env
|
||||
return &containers[0]
|
||||
if len(containers) > 0 {
|
||||
return &containers[0]
|
||||
}
|
||||
// No containers available, return nil to avoid crash
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// Get the first container if the annotation is related to specified configmap or secret i.e. configmap.reloader.stakater.com/reload
|
||||
if container == nil && !autoReload {
|
||||
return &containers[0]
|
||||
if len(containers) > 0 {
|
||||
return &containers[0]
|
||||
}
|
||||
// No containers available, return nil to avoid crash
|
||||
return nil
|
||||
}
|
||||
|
||||
return container
|
||||
@@ -502,16 +517,16 @@ type InvokeStrategyResult struct {
|
||||
Patch *Patch
|
||||
}
|
||||
|
||||
type invokeStrategy func(upgradeFuncs callbacks.RollingUpgradeFuncs, item runtime.Object, config util.Config, autoReload bool) InvokeStrategyResult
|
||||
type invokeStrategy func(upgradeFuncs callbacks.RollingUpgradeFuncs, item runtime.Object, config common.Config, autoReload bool) InvokeStrategyResult
|
||||
|
||||
func invokeReloadStrategy(upgradeFuncs callbacks.RollingUpgradeFuncs, item runtime.Object, config util.Config, autoReload bool) InvokeStrategyResult {
|
||||
func invokeReloadStrategy(upgradeFuncs callbacks.RollingUpgradeFuncs, item runtime.Object, config common.Config, autoReload bool) InvokeStrategyResult {
|
||||
if options.ReloadStrategy == constants.AnnotationsReloadStrategy {
|
||||
return updatePodAnnotations(upgradeFuncs, item, config, autoReload)
|
||||
}
|
||||
return updateContainerEnvVars(upgradeFuncs, item, config, autoReload)
|
||||
}
|
||||
|
||||
func updatePodAnnotations(upgradeFuncs callbacks.RollingUpgradeFuncs, item runtime.Object, config util.Config, autoReload bool) InvokeStrategyResult {
|
||||
func updatePodAnnotations(upgradeFuncs callbacks.RollingUpgradeFuncs, item runtime.Object, config common.Config, autoReload bool) InvokeStrategyResult {
|
||||
container := getContainerUsingResource(upgradeFuncs, item, config, autoReload)
|
||||
if container == nil {
|
||||
return InvokeStrategyResult{constants.NoContainerFound, nil}
|
||||
@@ -519,7 +534,7 @@ func updatePodAnnotations(upgradeFuncs callbacks.RollingUpgradeFuncs, item runti
|
||||
|
||||
// Generate reloaded annotations. Attaching this to the item's annotation will trigger a rollout
|
||||
// Note: the data on this struct is purely informational and is not used for future updates
|
||||
reloadSource := util.NewReloadSourceFromConfig(config, []string{container.Name})
|
||||
reloadSource := common.NewReloadSourceFromConfig(config, []string{container.Name})
|
||||
annotations, patch, err := createReloadedAnnotations(&reloadSource, upgradeFuncs)
|
||||
if err != nil {
|
||||
logrus.Errorf("Failed to create reloaded annotations for %s! error = %v", config.ResourceName, err)
|
||||
@@ -532,6 +547,10 @@ func updatePodAnnotations(upgradeFuncs callbacks.RollingUpgradeFuncs, item runti
|
||||
return InvokeStrategyResult{constants.NotUpdated, nil}
|
||||
}
|
||||
|
||||
if config.Type == constants.SecretProviderClassEnvVarPostfix && secretProviderClassAnnotationReloaded(pa, config) {
|
||||
return InvokeStrategyResult{constants.NotUpdated, nil}
|
||||
}
|
||||
|
||||
for k, v := range annotations {
|
||||
pa[k] = v
|
||||
}
|
||||
@@ -539,6 +558,11 @@ func updatePodAnnotations(upgradeFuncs callbacks.RollingUpgradeFuncs, item runti
|
||||
return InvokeStrategyResult{constants.Updated, &Patch{Type: patchtypes.StrategicMergePatchType, Bytes: patch}}
|
||||
}
|
||||
|
||||
func secretProviderClassAnnotationReloaded(oldAnnotations map[string]string, newConfig common.Config) bool {
|
||||
annotation := oldAnnotations[getReloaderAnnotationKey()]
|
||||
return strings.Contains(annotation, newConfig.ResourceName) && strings.Contains(annotation, newConfig.SHAValue)
|
||||
}
|
||||
|
||||
func getReloaderAnnotationKey() string {
|
||||
return fmt.Sprintf("%s/%s",
|
||||
constants.ReloaderAnnotationPrefix,
|
||||
@@ -546,7 +570,7 @@ func getReloaderAnnotationKey() string {
|
||||
)
|
||||
}
|
||||
|
||||
func createReloadedAnnotations(target *util.ReloadSource, upgradeFuncs callbacks.RollingUpgradeFuncs) (map[string]string, []byte, error) {
|
||||
func createReloadedAnnotations(target *common.ReloadSource, upgradeFuncs callbacks.RollingUpgradeFuncs) (map[string]string, []byte, error) {
|
||||
if target == nil {
|
||||
return nil, nil, errors.New("target is required")
|
||||
}
|
||||
@@ -581,7 +605,7 @@ func getEnvVarName(resourceName string, typeName string) string {
|
||||
return constants.EnvVarPrefix + util.ConvertToEnvVarName(resourceName) + "_" + typeName
|
||||
}
|
||||
|
||||
func updateContainerEnvVars(upgradeFuncs callbacks.RollingUpgradeFuncs, item runtime.Object, config util.Config, autoReload bool) InvokeStrategyResult {
|
||||
func updateContainerEnvVars(upgradeFuncs callbacks.RollingUpgradeFuncs, item runtime.Object, config common.Config, autoReload bool) InvokeStrategyResult {
|
||||
envVar := getEnvVarName(config.ResourceName, config.Type)
|
||||
container := getContainerUsingResource(upgradeFuncs, item, config, autoReload)
|
||||
|
||||
@@ -589,6 +613,10 @@ func updateContainerEnvVars(upgradeFuncs callbacks.RollingUpgradeFuncs, item run
|
||||
return InvokeStrategyResult{constants.NoContainerFound, nil}
|
||||
}
|
||||
|
||||
if config.Type == constants.SecretProviderClassEnvVarPostfix && secretProviderClassEnvReloaded(upgradeFuncs.ContainersFunc(item), envVar, config.SHAValue) {
|
||||
return InvokeStrategyResult{constants.NotUpdated, nil}
|
||||
}
|
||||
|
||||
//update if env var exists
|
||||
updateResult := updateEnvVar(container, envVar, config.SHAValue)
|
||||
|
||||
@@ -625,6 +653,32 @@ func updateEnvVar(container *v1.Container, envVar string, shaData string) consta
|
||||
return constants.NoEnvVarFound
|
||||
}
|
||||
|
||||
func secretProviderClassEnvReloaded(containers []v1.Container, envVar string, shaData string) bool {
|
||||
for _, container := range containers {
|
||||
for _, env := range container.Env {
|
||||
if env.Name == envVar {
|
||||
return env.Value == shaData
|
||||
}
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func populateAnnotationsFromSecretProviderClass(clients kube.Clients, config *common.Config) {
|
||||
obj, err := clients.CSIClient.SecretsstoreV1().SecretProviderClasses(config.Namespace).Get(context.Background(), config.ResourceName, metav1.GetOptions{})
|
||||
annotations := make(map[string]string)
|
||||
if err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
logrus.Warnf("SecretProviderClass '%s' not found in namespace '%s'", config.ResourceName, config.Namespace)
|
||||
} else {
|
||||
logrus.Errorf("Failed to get SecretProviderClass '%s' in namespace '%s': %v", config.ResourceName, config.Namespace, err)
|
||||
}
|
||||
} else if obj.Annotations != nil {
|
||||
annotations = obj.Annotations
|
||||
}
|
||||
config.ResourceAnnotations = annotations
|
||||
}
|
||||
|
||||
func jsonEscape(toEscape string) (string, error) {
|
||||
bytes, err := json.Marshal(toEscape)
|
||||
if err != nil {
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -16,7 +16,7 @@ import (
|
||||
"github.com/stakater/Reloader/internal/pkg/metrics"
|
||||
"github.com/stakater/Reloader/internal/pkg/options"
|
||||
"github.com/stakater/Reloader/internal/pkg/testutil"
|
||||
"github.com/stakater/Reloader/internal/pkg/util"
|
||||
"github.com/stakater/Reloader/pkg/common"
|
||||
"github.com/stakater/Reloader/pkg/kube"
|
||||
)
|
||||
|
||||
@@ -45,7 +45,7 @@ func TestHealthz(t *testing.T) {
|
||||
want := 200
|
||||
|
||||
if got != want {
|
||||
t.Fatalf("got: %q, want: %q", got, want)
|
||||
t.Fatalf("got: %d, want: %d", got, want)
|
||||
}
|
||||
|
||||
// Have the liveness probe serve a 500
|
||||
@@ -63,7 +63,7 @@ func TestHealthz(t *testing.T) {
|
||||
want = 500
|
||||
|
||||
if got != want {
|
||||
t.Fatalf("got: %q, want: %q", got, want)
|
||||
t.Fatalf("got: %d, want: %d", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -89,7 +89,7 @@ func TestRunLeaderElection(t *testing.T) {
|
||||
want := 500
|
||||
|
||||
if got != want {
|
||||
t.Fatalf("got: %q, want: %q", got, want)
|
||||
t.Fatalf("got: %d, want: %d", got, want)
|
||||
}
|
||||
|
||||
// Cancel the leader election context, so leadership is released and
|
||||
@@ -108,7 +108,7 @@ func TestRunLeaderElection(t *testing.T) {
|
||||
want = 500
|
||||
|
||||
if got != want {
|
||||
t.Fatalf("got: %q, want: %q", got, want)
|
||||
t.Fatalf("got: %d, want: %d", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -159,7 +159,7 @@ func TestRunLeaderElectionWithControllers(t *testing.T) {
|
||||
// Verifying deployment update
|
||||
logrus.Infof("Verifying pod envvars has been created")
|
||||
shaData := testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, testutil.Namespace, configmapName, "www.stakater.com")
|
||||
config := util.Config{
|
||||
config := common.Config{
|
||||
Namespace: testutil.Namespace,
|
||||
ResourceName: configmapName,
|
||||
SHAValue: shaData,
|
||||
@@ -186,7 +186,7 @@ func TestRunLeaderElectionWithControllers(t *testing.T) {
|
||||
// Verifying that the deployment was not updated as leadership has been lost
|
||||
logrus.Infof("Verifying pod envvars has not been updated")
|
||||
shaData = testutil.ConvertResourceToSHA(testutil.ConfigmapResourceType, testutil.Namespace, configmapName, "www.stakater.com/new")
|
||||
config = util.Config{
|
||||
config = common.Config{
|
||||
Namespace: testutil.Namespace,
|
||||
ResourceName: configmapName,
|
||||
SHAValue: shaData,
|
||||
|
||||
@@ -1,54 +1,390 @@
|
||||
package metrics
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
"k8s.io/client-go/tools/metrics"
|
||||
)
|
||||
|
||||
// clientGoRequestMetrics implements metrics.LatencyMetric and metrics.ResultMetric
|
||||
// to expose client-go's rest_client_requests_total metric
|
||||
type clientGoRequestMetrics struct {
|
||||
requestCounter *prometheus.CounterVec
|
||||
requestLatency *prometheus.HistogramVec
|
||||
}
|
||||
|
||||
func (m *clientGoRequestMetrics) Increment(ctx context.Context, code string, method string, host string) {
|
||||
m.requestCounter.WithLabelValues(code, method, host).Inc()
|
||||
}
|
||||
|
||||
func (m *clientGoRequestMetrics) Observe(ctx context.Context, verb string, u url.URL, latency time.Duration) {
|
||||
m.requestLatency.WithLabelValues(verb, u.Host).Observe(latency.Seconds())
|
||||
}
|
||||
|
||||
var clientGoMetrics = &clientGoRequestMetrics{
|
||||
requestCounter: prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Name: "rest_client_requests_total",
|
||||
Help: "Number of HTTP requests, partitioned by status code, method, and host.",
|
||||
},
|
||||
[]string{"code", "method", "host"},
|
||||
),
|
||||
requestLatency: prometheus.NewHistogramVec(
|
||||
prometheus.HistogramOpts{
|
||||
Name: "rest_client_request_duration_seconds",
|
||||
Help: "Request latency in seconds. Broken down by verb and host.",
|
||||
Buckets: []float64{0.001, 0.01, 0.05, 0.1, 0.5, 1, 5, 10, 30},
|
||||
},
|
||||
[]string{"verb", "host"},
|
||||
),
|
||||
}
|
||||
|
||||
func init() {
|
||||
// Register the metrics collectors
|
||||
prometheus.MustRegister(clientGoMetrics.requestCounter)
|
||||
prometheus.MustRegister(clientGoMetrics.requestLatency)
|
||||
|
||||
// Register our metrics implementation with client-go
|
||||
metrics.RequestResult = clientGoMetrics
|
||||
metrics.RequestLatency = clientGoMetrics
|
||||
}
|
||||
|
||||
// Collectors holds all Prometheus metrics collectors for Reloader.
|
||||
type Collectors struct {
|
||||
Reloaded *prometheus.CounterVec
|
||||
ReloadedByNamespace *prometheus.CounterVec
|
||||
countByNamespace bool
|
||||
|
||||
ReconcileTotal *prometheus.CounterVec // Total reconcile calls by result
|
||||
ReconcileDuration *prometheus.HistogramVec // Time spent in reconcile/handler
|
||||
ActionTotal *prometheus.CounterVec // Total actions by workload kind and result
|
||||
ActionLatency *prometheus.HistogramVec // Time from event to action applied
|
||||
SkippedTotal *prometheus.CounterVec // Skipped operations by reason
|
||||
QueueDepth prometheus.Gauge // Current queue depth
|
||||
QueueAdds prometheus.Counter // Total items added to queue
|
||||
QueueLatency *prometheus.HistogramVec // Time spent in queue
|
||||
ErrorsTotal *prometheus.CounterVec // Errors by type
|
||||
RetriesTotal prometheus.Counter // Total retries
|
||||
EventsReceived *prometheus.CounterVec // Events received by type (add/update/delete)
|
||||
EventsProcessed *prometheus.CounterVec // Events processed by type and result
|
||||
WorkloadsScanned *prometheus.CounterVec // Workloads scanned by kind
|
||||
WorkloadsMatched *prometheus.CounterVec // Workloads matched for reload by kind
|
||||
}
|
||||
|
||||
// RecordReload records a reload event with the given success status and namespace.
|
||||
// Preserved for backward compatibility.
|
||||
func (c *Collectors) RecordReload(success bool, namespace string) {
|
||||
if c == nil {
|
||||
return
|
||||
}
|
||||
|
||||
successLabel := "false"
|
||||
if success {
|
||||
successLabel = "true"
|
||||
}
|
||||
|
||||
c.Reloaded.With(prometheus.Labels{"success": successLabel}).Inc()
|
||||
|
||||
if c.countByNamespace {
|
||||
c.ReloadedByNamespace.With(prometheus.Labels{
|
||||
"success": successLabel,
|
||||
"namespace": namespace,
|
||||
}).Inc()
|
||||
}
|
||||
}
|
||||
|
||||
// RecordReconcile records a reconcile/handler invocation.
|
||||
func (c *Collectors) RecordReconcile(result string, duration time.Duration) {
|
||||
if c == nil {
|
||||
return
|
||||
}
|
||||
c.ReconcileTotal.With(prometheus.Labels{"result": result}).Inc()
|
||||
c.ReconcileDuration.With(prometheus.Labels{"result": result}).Observe(duration.Seconds())
|
||||
}
|
||||
|
||||
// RecordAction records a reload action on a workload.
|
||||
func (c *Collectors) RecordAction(workloadKind string, result string, latency time.Duration) {
|
||||
if c == nil {
|
||||
return
|
||||
}
|
||||
c.ActionTotal.With(prometheus.Labels{"workload_kind": workloadKind, "result": result}).Inc()
|
||||
c.ActionLatency.With(prometheus.Labels{"workload_kind": workloadKind}).Observe(latency.Seconds())
|
||||
}
|
||||
|
||||
// RecordSkipped records a skipped operation with reason.
|
||||
func (c *Collectors) RecordSkipped(reason string) {
|
||||
if c == nil {
|
||||
return
|
||||
}
|
||||
c.SkippedTotal.With(prometheus.Labels{"reason": reason}).Inc()
|
||||
}
|
||||
|
||||
// RecordQueueAdd records an item being added to the queue.
|
||||
func (c *Collectors) RecordQueueAdd() {
|
||||
if c == nil {
|
||||
return
|
||||
}
|
||||
c.QueueAdds.Inc()
|
||||
}
|
||||
|
||||
// SetQueueDepth sets the current queue depth.
|
||||
func (c *Collectors) SetQueueDepth(depth int) {
|
||||
if c == nil {
|
||||
return
|
||||
}
|
||||
c.QueueDepth.Set(float64(depth))
|
||||
}
|
||||
|
||||
// RecordQueueLatency records how long an item spent in the queue.
|
||||
func (c *Collectors) RecordQueueLatency(latency time.Duration) {
|
||||
if c == nil {
|
||||
return
|
||||
}
|
||||
c.QueueLatency.With(prometheus.Labels{}).Observe(latency.Seconds())
|
||||
}
|
||||
|
||||
// RecordError records an error by type.
|
||||
func (c *Collectors) RecordError(errorType string) {
|
||||
if c == nil {
|
||||
return
|
||||
}
|
||||
c.ErrorsTotal.With(prometheus.Labels{"type": errorType}).Inc()
|
||||
}
|
||||
|
||||
// RecordRetry records a retry attempt.
|
||||
func (c *Collectors) RecordRetry() {
|
||||
if c == nil {
|
||||
return
|
||||
}
|
||||
c.RetriesTotal.Inc()
|
||||
}
|
||||
|
||||
// RecordEventReceived records an event being received.
|
||||
func (c *Collectors) RecordEventReceived(eventType string, resourceType string) {
|
||||
if c == nil {
|
||||
return
|
||||
}
|
||||
c.EventsReceived.With(prometheus.Labels{"event_type": eventType, "resource_type": resourceType}).Inc()
|
||||
}
|
||||
|
||||
// RecordEventProcessed records an event being processed.
|
||||
func (c *Collectors) RecordEventProcessed(eventType string, resourceType string, result string) {
|
||||
if c == nil {
|
||||
return
|
||||
}
|
||||
c.EventsProcessed.With(prometheus.Labels{"event_type": eventType, "resource_type": resourceType, "result": result}).Inc()
|
||||
}
|
||||
|
||||
// RecordWorkloadsScanned records workloads scanned during a reconcile.
|
||||
func (c *Collectors) RecordWorkloadsScanned(kind string, count int) {
|
||||
if c == nil {
|
||||
return
|
||||
}
|
||||
c.WorkloadsScanned.With(prometheus.Labels{"kind": kind}).Add(float64(count))
|
||||
}
|
||||
|
||||
// RecordWorkloadsMatched records workloads matched for reload.
|
||||
func (c *Collectors) RecordWorkloadsMatched(kind string, count int) {
|
||||
if c == nil {
|
||||
return
|
||||
}
|
||||
c.WorkloadsMatched.With(prometheus.Labels{"kind": kind}).Add(float64(count))
|
||||
}
|
||||
|
||||
func NewCollectors() Collectors {
|
||||
// Existing metrics (preserved)
|
||||
reloaded := prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Namespace: "reloader",
|
||||
Name: "reload_executed_total",
|
||||
Help: "Counter of reloads executed by Reloader.",
|
||||
},
|
||||
[]string{
|
||||
"success",
|
||||
},
|
||||
[]string{"success"},
|
||||
)
|
||||
|
||||
//set 0 as default value
|
||||
reloaded.With(prometheus.Labels{"success": "true"}).Add(0)
|
||||
reloaded.With(prometheus.Labels{"success": "false"}).Add(0)
|
||||
|
||||
reloaded_by_namespace := prometheus.NewCounterVec(
|
||||
reloadedByNamespace := prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Namespace: "reloader",
|
||||
Name: "reload_executed_total_by_namespace",
|
||||
Help: "Counter of reloads executed by Reloader by namespace.",
|
||||
},
|
||||
[]string{
|
||||
"success",
|
||||
"namespace",
|
||||
[]string{"success", "namespace"},
|
||||
)
|
||||
|
||||
reconcileTotal := prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Namespace: "reloader",
|
||||
Name: "reconcile_total",
|
||||
Help: "Total number of reconcile/handler invocations by result.",
|
||||
},
|
||||
[]string{"result"},
|
||||
)
|
||||
|
||||
reconcileDuration := prometheus.NewHistogramVec(
|
||||
prometheus.HistogramOpts{
|
||||
Namespace: "reloader",
|
||||
Name: "reconcile_duration_seconds",
|
||||
Help: "Time spent in reconcile/handler in seconds.",
|
||||
Buckets: []float64{0.001, 0.005, 0.01, 0.025, 0.05, 0.1, 0.25, 0.5, 1, 2.5, 5, 10},
|
||||
},
|
||||
[]string{"result"},
|
||||
)
|
||||
|
||||
actionTotal := prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Namespace: "reloader",
|
||||
Name: "action_total",
|
||||
Help: "Total number of reload actions by workload kind and result.",
|
||||
},
|
||||
[]string{"workload_kind", "result"},
|
||||
)
|
||||
|
||||
actionLatency := prometheus.NewHistogramVec(
|
||||
prometheus.HistogramOpts{
|
||||
Namespace: "reloader",
|
||||
Name: "action_latency_seconds",
|
||||
Help: "Time from event received to action applied in seconds.",
|
||||
Buckets: []float64{0.01, 0.05, 0.1, 0.25, 0.5, 1, 2.5, 5, 10, 30, 60},
|
||||
},
|
||||
[]string{"workload_kind"},
|
||||
)
|
||||
|
||||
skippedTotal := prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Namespace: "reloader",
|
||||
Name: "skipped_total",
|
||||
Help: "Total number of skipped operations by reason.",
|
||||
},
|
||||
[]string{"reason"},
|
||||
)
|
||||
|
||||
queueDepth := prometheus.NewGauge(
|
||||
prometheus.GaugeOpts{
|
||||
Namespace: "reloader",
|
||||
Name: "workqueue_depth",
|
||||
Help: "Current depth of the work queue.",
|
||||
},
|
||||
)
|
||||
|
||||
queueAdds := prometheus.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Namespace: "reloader",
|
||||
Name: "workqueue_adds_total",
|
||||
Help: "Total number of items added to the work queue.",
|
||||
},
|
||||
)
|
||||
|
||||
queueLatency := prometheus.NewHistogramVec(
|
||||
prometheus.HistogramOpts{
|
||||
Namespace: "reloader",
|
||||
Name: "workqueue_latency_seconds",
|
||||
Help: "Time spent in the work queue in seconds.",
|
||||
Buckets: []float64{0.001, 0.005, 0.01, 0.025, 0.05, 0.1, 0.25, 0.5, 1, 2.5, 5},
|
||||
},
|
||||
[]string{},
|
||||
)
|
||||
|
||||
errorsTotal := prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Namespace: "reloader",
|
||||
Name: "errors_total",
|
||||
Help: "Total number of errors by type.",
|
||||
},
|
||||
[]string{"type"},
|
||||
)
|
||||
|
||||
retriesTotal := prometheus.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Namespace: "reloader",
|
||||
Name: "retries_total",
|
||||
Help: "Total number of retry attempts.",
|
||||
},
|
||||
)
|
||||
|
||||
eventsReceived := prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Namespace: "reloader",
|
||||
Name: "events_received_total",
|
||||
Help: "Total number of events received by type and resource.",
|
||||
},
|
||||
[]string{"event_type", "resource_type"},
|
||||
)
|
||||
|
||||
eventsProcessed := prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Namespace: "reloader",
|
||||
Name: "events_processed_total",
|
||||
Help: "Total number of events processed by type, resource, and result.",
|
||||
},
|
||||
[]string{"event_type", "resource_type", "result"},
|
||||
)
|
||||
|
||||
workloadsScanned := prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Namespace: "reloader",
|
||||
Name: "workloads_scanned_total",
|
||||
Help: "Total number of workloads scanned by kind.",
|
||||
},
|
||||
[]string{"kind"},
|
||||
)
|
||||
|
||||
workloadsMatched := prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Namespace: "reloader",
|
||||
Name: "workloads_matched_total",
|
||||
Help: "Total number of workloads matched for reload by kind.",
|
||||
},
|
||||
[]string{"kind"},
|
||||
)
|
||||
|
||||
return Collectors{
|
||||
Reloaded: reloaded,
|
||||
ReloadedByNamespace: reloaded_by_namespace,
|
||||
ReloadedByNamespace: reloadedByNamespace,
|
||||
countByNamespace: os.Getenv("METRICS_COUNT_BY_NAMESPACE") == "enabled",
|
||||
|
||||
ReconcileTotal: reconcileTotal,
|
||||
ReconcileDuration: reconcileDuration,
|
||||
ActionTotal: actionTotal,
|
||||
ActionLatency: actionLatency,
|
||||
SkippedTotal: skippedTotal,
|
||||
QueueDepth: queueDepth,
|
||||
QueueAdds: queueAdds,
|
||||
QueueLatency: queueLatency,
|
||||
ErrorsTotal: errorsTotal,
|
||||
RetriesTotal: retriesTotal,
|
||||
EventsReceived: eventsReceived,
|
||||
EventsProcessed: eventsProcessed,
|
||||
WorkloadsScanned: workloadsScanned,
|
||||
WorkloadsMatched: workloadsMatched,
|
||||
}
|
||||
}
|
||||
|
||||
func SetupPrometheusEndpoint() Collectors {
|
||||
collectors := NewCollectors()
|
||||
|
||||
prometheus.MustRegister(collectors.Reloaded)
|
||||
prometheus.MustRegister(collectors.ReconcileTotal)
|
||||
prometheus.MustRegister(collectors.ReconcileDuration)
|
||||
prometheus.MustRegister(collectors.ActionTotal)
|
||||
prometheus.MustRegister(collectors.ActionLatency)
|
||||
prometheus.MustRegister(collectors.SkippedTotal)
|
||||
prometheus.MustRegister(collectors.QueueDepth)
|
||||
prometheus.MustRegister(collectors.QueueAdds)
|
||||
prometheus.MustRegister(collectors.QueueLatency)
|
||||
prometheus.MustRegister(collectors.ErrorsTotal)
|
||||
prometheus.MustRegister(collectors.RetriesTotal)
|
||||
prometheus.MustRegister(collectors.EventsReceived)
|
||||
prometheus.MustRegister(collectors.EventsProcessed)
|
||||
prometheus.MustRegister(collectors.WorkloadsScanned)
|
||||
prometheus.MustRegister(collectors.WorkloadsMatched)
|
||||
|
||||
if os.Getenv("METRICS_COUNT_BY_NAMESPACE") == "enabled" {
|
||||
prometheus.MustRegister(collectors.ReloadedByNamespace)
|
||||
|
||||
@@ -20,16 +20,25 @@ var (
|
||||
// SecretUpdateOnChangeAnnotation is an annotation to detect changes in
|
||||
// secrets specified by name
|
||||
SecretUpdateOnChangeAnnotation = "secret.reloader.stakater.com/reload"
|
||||
// SecretProviderClassUpdateOnChangeAnnotation is an annotation to detect changes in
|
||||
// secretproviderclasses specified by name
|
||||
SecretProviderClassUpdateOnChangeAnnotation = "secretproviderclass.reloader.stakater.com/reload"
|
||||
// ReloaderAutoAnnotation is an annotation to detect changes in secrets/configmaps
|
||||
ReloaderAutoAnnotation = "reloader.stakater.com/auto"
|
||||
// IgnoreResourceAnnotation is an annotation to ignore changes in secrets/configmaps
|
||||
IgnoreResourceAnnotation = "reloader.stakater.com/ignore"
|
||||
// ConfigmapReloaderAutoAnnotation is an annotation to detect changes in configmaps
|
||||
ConfigmapReloaderAutoAnnotation = "configmap.reloader.stakater.com/auto"
|
||||
// SecretReloaderAutoAnnotation is an annotation to detect changes in secrets
|
||||
SecretReloaderAutoAnnotation = "secret.reloader.stakater.com/auto"
|
||||
// SecretProviderClassReloaderAutoAnnotation is an annotation to detect changes in secretproviderclasses
|
||||
SecretProviderClassReloaderAutoAnnotation = "secretproviderclass.reloader.stakater.com/auto"
|
||||
// ConfigmapReloaderAutoAnnotation is a comma separated list of configmaps that excludes detecting changes on cms
|
||||
ConfigmapExcludeReloaderAnnotation = "configmaps.exclude.reloader.stakater.com/reload"
|
||||
// SecretExcludeReloaderAnnotation is a comma separated list of secrets that excludes detecting changes on secrets
|
||||
SecretExcludeReloaderAnnotation = "secrets.exclude.reloader.stakater.com/reload"
|
||||
// SecretProviderClassExcludeReloaderAnnotation is a comma separated list of secret provider classes that excludes detecting changes on secret provider class
|
||||
SecretProviderClassExcludeReloaderAnnotation = "secretproviderclasses.exclude.reloader.stakater.com/reload"
|
||||
// AutoSearchAnnotation is an annotation to detect changes in
|
||||
// configmaps or triggers with the SearchMatchAnnotation
|
||||
AutoSearchAnnotation = "reloader.stakater.com/search"
|
||||
@@ -38,6 +47,12 @@ var (
|
||||
SearchMatchAnnotation = "reloader.stakater.com/match"
|
||||
// RolloutStrategyAnnotation is an annotation to define rollout update strategy
|
||||
RolloutStrategyAnnotation = "reloader.stakater.com/rollout-strategy"
|
||||
// PauseDeploymentAnnotation is an annotation to define the time period to pause a deployment after
|
||||
// a configmap/secret change has been detected. Valid values are described here: https://pkg.go.dev/time#ParseDuration
|
||||
// only positive values are allowed
|
||||
PauseDeploymentAnnotation = "deployment.reloader.stakater.com/pause-period"
|
||||
// Annotation set by reloader to indicate that the deployment has been paused
|
||||
PauseDeploymentTimeAnnotation = "deployment.reloader.stakater.com/paused-at"
|
||||
// LogFormat is the log format to use (json, or empty string for default)
|
||||
LogFormat = ""
|
||||
// LogLevel is the log level to use (trace, debug, info, warning, error, fatal and panic)
|
||||
@@ -55,6 +70,23 @@ var (
|
||||
EnableHA = false
|
||||
// Url to send a request to instead of triggering a reload
|
||||
WebhookUrl = ""
|
||||
// EnableCSIIntegration Adds support to watch SecretProviderClassPodStatus and restart deployment based on it
|
||||
EnableCSIIntegration = false
|
||||
// ResourcesToIgnore is a list of resources to ignore when watching for changes
|
||||
ResourcesToIgnore = []string{}
|
||||
// WorkloadTypesToIgnore is a list of workload types to ignore when watching for changes
|
||||
WorkloadTypesToIgnore = []string{}
|
||||
// NamespacesToIgnore is a list of namespace names to ignore when watching for changes
|
||||
NamespacesToIgnore = []string{}
|
||||
// NamespaceSelectors is a list of namespace selectors to watch for changes
|
||||
NamespaceSelectors = []string{}
|
||||
// ResourceSelectors is a list of resource selectors to watch for changes
|
||||
ResourceSelectors = []string{}
|
||||
// EnablePProf enables pprof for profiling
|
||||
EnablePProf = false
|
||||
// PProfAddr is the address to start pprof server on
|
||||
// Default is :6060
|
||||
PProfAddr = ":6060"
|
||||
)
|
||||
|
||||
func ToArgoRolloutStrategy(s string) ArgoRolloutStrategy {
|
||||
|
||||
@@ -21,6 +21,7 @@ import (
|
||||
"github.com/stakater/Reloader/internal/pkg/metrics"
|
||||
"github.com/stakater/Reloader/internal/pkg/options"
|
||||
"github.com/stakater/Reloader/internal/pkg/util"
|
||||
"github.com/stakater/Reloader/pkg/common"
|
||||
"github.com/stakater/Reloader/pkg/kube"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
batchv1 "k8s.io/api/batch/v1"
|
||||
@@ -29,6 +30,9 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
core_v1 "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
csiv1 "sigs.k8s.io/secrets-store-csi-driver/apis/v1"
|
||||
csiclient "sigs.k8s.io/secrets-store-csi-driver/pkg/client/clientset/versioned"
|
||||
csiclient_v1 "sigs.k8s.io/secrets-store-csi-driver/pkg/client/clientset/versioned/typed/apis/v1"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -37,6 +41,8 @@ var (
|
||||
ConfigmapResourceType = "configMaps"
|
||||
// SecretResourceType is a resource type which controller watches for changes
|
||||
SecretResourceType = "secrets"
|
||||
// SecretproviderclasspodstatusResourceType is a resource type which controller watches for changes
|
||||
SecretProviderClassPodStatusResourceType = "secretproviderclasspodstatuses"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -72,16 +78,16 @@ func DeleteNamespace(namespace string, client kubernetes.Interface) {
|
||||
}
|
||||
}
|
||||
|
||||
func getObjectMeta(namespace string, name string, autoReload bool, secretAutoReload bool, configmapAutoReload bool, extraAnnotations map[string]string) metav1.ObjectMeta {
|
||||
func getObjectMeta(namespace string, name string, autoReload bool, secretAutoReload bool, configmapAutoReload bool, secretproviderclass bool, extraAnnotations map[string]string) metav1.ObjectMeta {
|
||||
return metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Namespace: namespace,
|
||||
Labels: map[string]string{"firstLabel": "temp"},
|
||||
Annotations: getAnnotations(name, autoReload, secretAutoReload, configmapAutoReload, extraAnnotations),
|
||||
Annotations: getAnnotations(name, autoReload, secretAutoReload, configmapAutoReload, secretproviderclass, extraAnnotations),
|
||||
}
|
||||
}
|
||||
|
||||
func getAnnotations(name string, autoReload bool, secretAutoReload bool, configmapAutoReload bool, extraAnnotations map[string]string) map[string]string {
|
||||
func getAnnotations(name string, autoReload bool, secretAutoReload bool, configmapAutoReload bool, secretproviderclass bool, extraAnnotations map[string]string) map[string]string {
|
||||
annotations := make(map[string]string)
|
||||
if autoReload {
|
||||
annotations[options.ReloaderAutoAnnotation] = "true"
|
||||
@@ -92,11 +98,16 @@ func getAnnotations(name string, autoReload bool, secretAutoReload bool, configm
|
||||
if configmapAutoReload {
|
||||
annotations[options.ConfigmapReloaderAutoAnnotation] = "true"
|
||||
}
|
||||
if secretproviderclass {
|
||||
annotations[options.SecretProviderClassReloaderAutoAnnotation] = "true"
|
||||
}
|
||||
|
||||
if !(len(annotations) > 0) {
|
||||
if len(annotations) == 0 {
|
||||
annotations = map[string]string{
|
||||
options.ConfigmapUpdateOnChangeAnnotation: name,
|
||||
options.SecretUpdateOnChangeAnnotation: name}
|
||||
options.ConfigmapUpdateOnChangeAnnotation: name,
|
||||
options.SecretUpdateOnChangeAnnotation: name,
|
||||
options.SecretProviderClassUpdateOnChangeAnnotation: name,
|
||||
}
|
||||
}
|
||||
for k, v := range extraAnnotations {
|
||||
annotations[k] = v
|
||||
@@ -175,6 +186,15 @@ func getVolumes(name string) []v1.Volume {
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "secretproviderclass",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
CSI: &v1.CSIVolumeSource{
|
||||
Driver: "secrets-store.csi.k8s.io",
|
||||
VolumeAttributes: map[string]string{"secretProviderClass": name},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
@@ -188,6 +208,10 @@ func getVolumeMounts() []v1.VolumeMount {
|
||||
MountPath: "etc/sec",
|
||||
Name: "secret",
|
||||
},
|
||||
{
|
||||
MountPath: "etc/spc",
|
||||
Name: "secretproviderclass",
|
||||
},
|
||||
{
|
||||
MountPath: "etc/projectedconfig",
|
||||
Name: "projectedconfigmap",
|
||||
@@ -347,7 +371,7 @@ func getPodTemplateSpecWithInitContainerAndEnv(name string) v1.PodTemplateSpec {
|
||||
func GetDeployment(namespace string, deploymentName string) *appsv1.Deployment {
|
||||
replicaset := int32(1)
|
||||
return &appsv1.Deployment{
|
||||
ObjectMeta: getObjectMeta(namespace, deploymentName, false, false, false, map[string]string{}),
|
||||
ObjectMeta: getObjectMeta(namespace, deploymentName, false, false, false, false, map[string]string{}),
|
||||
Spec: appsv1.DeploymentSpec{
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{"secondLabel": "temp"},
|
||||
@@ -366,7 +390,7 @@ func GetDeploymentConfig(namespace string, deploymentConfigName string) *openshi
|
||||
replicaset := int32(1)
|
||||
podTemplateSpecWithVolume := getPodTemplateSpecWithVolumes(deploymentConfigName)
|
||||
return &openshiftv1.DeploymentConfig{
|
||||
ObjectMeta: getObjectMeta(namespace, deploymentConfigName, false, false, false, map[string]string{}),
|
||||
ObjectMeta: getObjectMeta(namespace, deploymentConfigName, false, false, false, false, map[string]string{}),
|
||||
Spec: openshiftv1.DeploymentConfigSpec{
|
||||
Replicas: replicaset,
|
||||
Strategy: openshiftv1.DeploymentStrategy{
|
||||
@@ -381,7 +405,7 @@ func GetDeploymentConfig(namespace string, deploymentConfigName string) *openshi
|
||||
func GetDeploymentWithInitContainer(namespace string, deploymentName string) *appsv1.Deployment {
|
||||
replicaset := int32(1)
|
||||
return &appsv1.Deployment{
|
||||
ObjectMeta: getObjectMeta(namespace, deploymentName, false, false, false, map[string]string{}),
|
||||
ObjectMeta: getObjectMeta(namespace, deploymentName, false, false, false, false, map[string]string{}),
|
||||
Spec: appsv1.DeploymentSpec{
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{"secondLabel": "temp"},
|
||||
@@ -399,7 +423,7 @@ func GetDeploymentWithInitContainer(namespace string, deploymentName string) *ap
|
||||
func GetDeploymentWithInitContainerAndEnv(namespace string, deploymentName string) *appsv1.Deployment {
|
||||
replicaset := int32(1)
|
||||
return &appsv1.Deployment{
|
||||
ObjectMeta: getObjectMeta(namespace, deploymentName, true, false, false, map[string]string{}),
|
||||
ObjectMeta: getObjectMeta(namespace, deploymentName, true, false, false, false, map[string]string{}),
|
||||
Spec: appsv1.DeploymentSpec{
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{"secondLabel": "temp"},
|
||||
@@ -416,7 +440,7 @@ func GetDeploymentWithInitContainerAndEnv(namespace string, deploymentName strin
|
||||
func GetDeploymentWithEnvVars(namespace string, deploymentName string) *appsv1.Deployment {
|
||||
replicaset := int32(1)
|
||||
return &appsv1.Deployment{
|
||||
ObjectMeta: getObjectMeta(namespace, deploymentName, true, false, false, map[string]string{}),
|
||||
ObjectMeta: getObjectMeta(namespace, deploymentName, true, false, false, false, map[string]string{}),
|
||||
Spec: appsv1.DeploymentSpec{
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{"secondLabel": "temp"},
|
||||
@@ -434,7 +458,7 @@ func GetDeploymentConfigWithEnvVars(namespace string, deploymentConfigName strin
|
||||
replicaset := int32(1)
|
||||
podTemplateSpecWithEnvVars := getPodTemplateSpecWithEnvVars(deploymentConfigName)
|
||||
return &openshiftv1.DeploymentConfig{
|
||||
ObjectMeta: getObjectMeta(namespace, deploymentConfigName, false, false, false, map[string]string{}),
|
||||
ObjectMeta: getObjectMeta(namespace, deploymentConfigName, false, false, false, false, map[string]string{}),
|
||||
Spec: openshiftv1.DeploymentConfigSpec{
|
||||
Replicas: replicaset,
|
||||
Strategy: openshiftv1.DeploymentStrategy{
|
||||
@@ -448,7 +472,7 @@ func GetDeploymentConfigWithEnvVars(namespace string, deploymentConfigName strin
|
||||
func GetDeploymentWithEnvVarSources(namespace string, deploymentName string) *appsv1.Deployment {
|
||||
replicaset := int32(1)
|
||||
return &appsv1.Deployment{
|
||||
ObjectMeta: getObjectMeta(namespace, deploymentName, true, false, false, map[string]string{}),
|
||||
ObjectMeta: getObjectMeta(namespace, deploymentName, true, false, false, false, map[string]string{}),
|
||||
Spec: appsv1.DeploymentSpec{
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{"secondLabel": "temp"},
|
||||
@@ -465,7 +489,7 @@ func GetDeploymentWithEnvVarSources(namespace string, deploymentName string) *ap
|
||||
func GetDeploymentWithPodAnnotations(namespace string, deploymentName string, both bool) *appsv1.Deployment {
|
||||
replicaset := int32(1)
|
||||
deployment := &appsv1.Deployment{
|
||||
ObjectMeta: getObjectMeta(namespace, deploymentName, false, false, false, map[string]string{}),
|
||||
ObjectMeta: getObjectMeta(namespace, deploymentName, false, false, false, false, map[string]string{}),
|
||||
Spec: appsv1.DeploymentSpec{
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{"secondLabel": "temp"},
|
||||
@@ -478,19 +502,22 @@ func GetDeploymentWithPodAnnotations(namespace string, deploymentName string, bo
|
||||
},
|
||||
}
|
||||
if !both {
|
||||
deployment.ObjectMeta.Annotations = nil
|
||||
deployment.Annotations = nil
|
||||
}
|
||||
deployment.Spec.Template.ObjectMeta.Annotations = getAnnotations(deploymentName, true, false, false, map[string]string{})
|
||||
deployment.Spec.Template.Annotations = getAnnotations(deploymentName, true, false, false, false, map[string]string{})
|
||||
return deployment
|
||||
}
|
||||
|
||||
func GetDeploymentWithTypedAutoAnnotation(namespace string, deploymentName string, resourceType string) *appsv1.Deployment {
|
||||
replicaset := int32(1)
|
||||
var objectMeta metav1.ObjectMeta
|
||||
if resourceType == SecretResourceType {
|
||||
objectMeta = getObjectMeta(namespace, deploymentName, false, true, false, map[string]string{})
|
||||
} else if resourceType == ConfigmapResourceType {
|
||||
objectMeta = getObjectMeta(namespace, deploymentName, false, false, true, map[string]string{})
|
||||
switch resourceType {
|
||||
case SecretResourceType:
|
||||
objectMeta = getObjectMeta(namespace, deploymentName, false, true, false, false, map[string]string{})
|
||||
case ConfigmapResourceType:
|
||||
objectMeta = getObjectMeta(namespace, deploymentName, false, false, true, false, map[string]string{})
|
||||
case SecretProviderClassPodStatusResourceType:
|
||||
objectMeta = getObjectMeta(namespace, deploymentName, false, false, false, true, map[string]string{})
|
||||
}
|
||||
|
||||
return &appsv1.Deployment{
|
||||
@@ -513,10 +540,13 @@ func GetDeploymentWithExcludeAnnotation(namespace string, deploymentName string,
|
||||
|
||||
annotation := map[string]string{}
|
||||
|
||||
if resourceType == SecretResourceType {
|
||||
switch resourceType {
|
||||
case SecretResourceType:
|
||||
annotation[options.SecretExcludeReloaderAnnotation] = deploymentName
|
||||
} else if resourceType == ConfigmapResourceType {
|
||||
case ConfigmapResourceType:
|
||||
annotation[options.ConfigmapExcludeReloaderAnnotation] = deploymentName
|
||||
case SecretProviderClassPodStatusResourceType:
|
||||
annotation[options.SecretProviderClassExcludeReloaderAnnotation] = deploymentName
|
||||
}
|
||||
|
||||
return &appsv1.Deployment{
|
||||
@@ -542,7 +572,7 @@ func GetDeploymentWithExcludeAnnotation(namespace string, deploymentName string,
|
||||
// GetDaemonSet provides daemonset for testing
|
||||
func GetDaemonSet(namespace string, daemonsetName string) *appsv1.DaemonSet {
|
||||
return &appsv1.DaemonSet{
|
||||
ObjectMeta: getObjectMeta(namespace, daemonsetName, false, false, false, map[string]string{}),
|
||||
ObjectMeta: getObjectMeta(namespace, daemonsetName, false, false, false, false, map[string]string{}),
|
||||
Spec: appsv1.DaemonSetSpec{
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{"secondLabel": "temp"},
|
||||
@@ -557,7 +587,7 @@ func GetDaemonSet(namespace string, daemonsetName string) *appsv1.DaemonSet {
|
||||
|
||||
func GetDaemonSetWithEnvVars(namespace string, daemonSetName string) *appsv1.DaemonSet {
|
||||
return &appsv1.DaemonSet{
|
||||
ObjectMeta: getObjectMeta(namespace, daemonSetName, true, false, false, map[string]string{}),
|
||||
ObjectMeta: getObjectMeta(namespace, daemonSetName, true, false, false, false, map[string]string{}),
|
||||
Spec: appsv1.DaemonSetSpec{
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{"secondLabel": "temp"},
|
||||
@@ -573,7 +603,7 @@ func GetDaemonSetWithEnvVars(namespace string, daemonSetName string) *appsv1.Dae
|
||||
// GetStatefulSet provides statefulset for testing
|
||||
func GetStatefulSet(namespace string, statefulsetName string) *appsv1.StatefulSet {
|
||||
return &appsv1.StatefulSet{
|
||||
ObjectMeta: getObjectMeta(namespace, statefulsetName, false, false, false, map[string]string{}),
|
||||
ObjectMeta: getObjectMeta(namespace, statefulsetName, false, false, false, false, map[string]string{}),
|
||||
Spec: appsv1.StatefulSetSpec{
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{"secondLabel": "temp"},
|
||||
@@ -589,7 +619,7 @@ func GetStatefulSet(namespace string, statefulsetName string) *appsv1.StatefulSe
|
||||
// GetStatefulSet provides statefulset for testing
|
||||
func GetStatefulSetWithEnvVar(namespace string, statefulsetName string) *appsv1.StatefulSet {
|
||||
return &appsv1.StatefulSet{
|
||||
ObjectMeta: getObjectMeta(namespace, statefulsetName, true, false, false, map[string]string{}),
|
||||
ObjectMeta: getObjectMeta(namespace, statefulsetName, true, false, false, false, map[string]string{}),
|
||||
Spec: appsv1.StatefulSetSpec{
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{"secondLabel": "temp"},
|
||||
@@ -614,6 +644,42 @@ func GetConfigmap(namespace string, configmapName string, testData string) *v1.C
|
||||
}
|
||||
}
|
||||
|
||||
func GetSecretProviderClass(namespace string, secretProviderClassName string, data string) *csiv1.SecretProviderClass {
|
||||
return &csiv1.SecretProviderClass{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: secretProviderClassName,
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: csiv1.SecretProviderClassSpec{
|
||||
Provider: "Test",
|
||||
Parameters: map[string]string{
|
||||
"parameter1": data,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func GetSecretProviderClassPodStatus(namespace string, secretProviderClassPodStatusName string, data string) *csiv1.SecretProviderClassPodStatus {
|
||||
return &csiv1.SecretProviderClassPodStatus{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: secretProviderClassPodStatusName,
|
||||
Namespace: namespace,
|
||||
},
|
||||
Status: csiv1.SecretProviderClassPodStatusStatus{
|
||||
PodName: "test123",
|
||||
SecretProviderClassName: secretProviderClassPodStatusName,
|
||||
TargetPath: "/var/lib/kubelet/d8771ddf-935a-4199-a20b-f35f71c1d9e7/volumes/kubernetes.io~csi/secrets-store-inline/mount",
|
||||
Mounted: true,
|
||||
Objects: []csiv1.SecretProviderClassObject{
|
||||
{
|
||||
ID: "parameter1",
|
||||
Version: data,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// GetConfigmapWithUpdatedLabel provides configmap for testing
|
||||
func GetConfigmapWithUpdatedLabel(namespace string, configmapName string, testLabel string, testData string) *v1.ConfigMap {
|
||||
return &v1.ConfigMap{
|
||||
@@ -640,7 +706,7 @@ func GetSecret(namespace string, secretName string, data string) *v1.Secret {
|
||||
|
||||
func GetCronJob(namespace string, cronJobName string) *batchv1.CronJob {
|
||||
return &batchv1.CronJob{
|
||||
ObjectMeta: getObjectMeta(namespace, cronJobName, false, false, false, map[string]string{}),
|
||||
ObjectMeta: getObjectMeta(namespace, cronJobName, false, false, false, false, map[string]string{}),
|
||||
Spec: batchv1.CronJobSpec{
|
||||
Schedule: "*/5 * * * *", // Run every 5 minutes
|
||||
JobTemplate: batchv1.JobTemplateSpec{
|
||||
@@ -657,7 +723,7 @@ func GetCronJob(namespace string, cronJobName string) *batchv1.CronJob {
|
||||
|
||||
func GetJob(namespace string, jobName string) *batchv1.Job {
|
||||
return &batchv1.Job{
|
||||
ObjectMeta: getObjectMeta(namespace, jobName, false, false, false, map[string]string{}),
|
||||
ObjectMeta: getObjectMeta(namespace, jobName, false, false, false, false, map[string]string{}),
|
||||
Spec: batchv1.JobSpec{
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{"secondLabel": "temp"},
|
||||
@@ -669,7 +735,7 @@ func GetJob(namespace string, jobName string) *batchv1.Job {
|
||||
|
||||
func GetCronJobWithEnvVar(namespace string, cronJobName string) *batchv1.CronJob {
|
||||
return &batchv1.CronJob{
|
||||
ObjectMeta: getObjectMeta(namespace, cronJobName, true, false, false, map[string]string{}),
|
||||
ObjectMeta: getObjectMeta(namespace, cronJobName, true, false, false, false, map[string]string{}),
|
||||
Spec: batchv1.CronJobSpec{
|
||||
Schedule: "*/5 * * * *", // Run every 5 minutes
|
||||
JobTemplate: batchv1.JobTemplateSpec{
|
||||
@@ -686,7 +752,7 @@ func GetCronJobWithEnvVar(namespace string, cronJobName string) *batchv1.CronJob
|
||||
|
||||
func GetJobWithEnvVar(namespace string, jobName string) *batchv1.Job {
|
||||
return &batchv1.Job{
|
||||
ObjectMeta: getObjectMeta(namespace, jobName, true, false, false, map[string]string{}),
|
||||
ObjectMeta: getObjectMeta(namespace, jobName, true, false, false, false, map[string]string{}),
|
||||
Spec: batchv1.JobSpec{
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{"secondLabel": "temp"},
|
||||
@@ -733,7 +799,7 @@ func GetResourceSHAFromAnnotation(podAnnotations map[string]string) string {
|
||||
return ""
|
||||
}
|
||||
|
||||
var last util.ReloadSource
|
||||
var last common.ReloadSource
|
||||
bytes := []byte(annotationJson)
|
||||
err := json.Unmarshal(bytes, &last)
|
||||
if err != nil {
|
||||
@@ -743,19 +809,26 @@ func GetResourceSHAFromAnnotation(podAnnotations map[string]string) string {
|
||||
return last.Hash
|
||||
}
|
||||
|
||||
// ConvertResourceToSHA generates SHA from secret or configmap data
|
||||
// ConvertResourceToSHA generates SHA from secret, configmap or secretproviderclasspodstatus data
|
||||
func ConvertResourceToSHA(resourceType string, namespace string, resourceName string, data string) string {
|
||||
values := []string{}
|
||||
if resourceType == SecretResourceType {
|
||||
switch resourceType {
|
||||
case SecretResourceType:
|
||||
secret := GetSecret(namespace, resourceName, data)
|
||||
for k, v := range secret.Data {
|
||||
values = append(values, k+"="+string(v[:]))
|
||||
}
|
||||
} else if resourceType == ConfigmapResourceType {
|
||||
case ConfigmapResourceType:
|
||||
configmap := GetConfigmap(namespace, resourceName, data)
|
||||
for k, v := range configmap.Data {
|
||||
values = append(values, k+"="+v)
|
||||
}
|
||||
case SecretProviderClassPodStatusResourceType:
|
||||
secretproviderclasspodstatus := GetSecretProviderClassPodStatus(namespace, resourceName, data)
|
||||
for _, v := range secretproviderclasspodstatus.Status.Objects {
|
||||
values = append(values, v.ID+"="+v.Version)
|
||||
}
|
||||
values = append(values, "SecretProviderClassName="+secretproviderclasspodstatus.Status.SecretProviderClassName)
|
||||
}
|
||||
sort.Strings(values)
|
||||
return crypto.GenerateSHA(strings.Join(values, ";"))
|
||||
@@ -770,6 +843,25 @@ func CreateConfigMap(client kubernetes.Interface, namespace string, configmapNam
|
||||
return configmapClient, err
|
||||
}
|
||||
|
||||
// CreateSecretProviderClass creates a SecretProviderClass in given namespace and returns the SecretProviderClassInterface
|
||||
func CreateSecretProviderClass(client csiclient.Interface, namespace string, secretProviderClassName string, data string) (csiclient_v1.SecretProviderClassInterface, error) {
|
||||
logrus.Infof("Creating SecretProviderClass")
|
||||
secretProviderClassClient := client.SecretsstoreV1().SecretProviderClasses(namespace)
|
||||
_, err := secretProviderClassClient.Create(context.TODO(), GetSecretProviderClass(namespace, secretProviderClassName, data), metav1.CreateOptions{})
|
||||
time.Sleep(3 * time.Second)
|
||||
return secretProviderClassClient, err
|
||||
}
|
||||
|
||||
// CreateSecretProviderClassPodStatus creates a SecretProviderClassPodStatus in given namespace and returns the SecretProviderClassPodStatusInterface
|
||||
func CreateSecretProviderClassPodStatus(client csiclient.Interface, namespace string, secretProviderClassPodStatusName string, data string) (csiclient_v1.SecretProviderClassPodStatusInterface, error) {
|
||||
logrus.Infof("Creating SecretProviderClassPodStatus")
|
||||
secretProviderClassPodStatusClient := client.SecretsstoreV1().SecretProviderClassPodStatuses(namespace)
|
||||
secretProviderClassPodStatus := GetSecretProviderClassPodStatus(namespace, secretProviderClassPodStatusName, data)
|
||||
_, err := secretProviderClassPodStatusClient.Create(context.TODO(), secretProviderClassPodStatus, metav1.CreateOptions{})
|
||||
time.Sleep(3 * time.Second)
|
||||
return secretProviderClassPodStatusClient, err
|
||||
}
|
||||
|
||||
// CreateSecret creates a secret in given namespace and returns the SecretInterface
|
||||
func CreateSecret(client kubernetes.Interface, namespace string, secretName string, data string) (core_v1.SecretInterface, error) {
|
||||
logrus.Infof("Creating secret")
|
||||
@@ -794,6 +886,26 @@ func CreateDeployment(client kubernetes.Interface, deploymentName string, namesp
|
||||
return deployment, err
|
||||
}
|
||||
|
||||
// CreateDeployment creates a deployment in given namespace and returns the Deployment
|
||||
func CreateDeploymentWithAnnotations(client kubernetes.Interface, deploymentName string, namespace string, additionalAnnotations map[string]string, volumeMount bool) (*appsv1.Deployment, error) {
|
||||
logrus.Infof("Creating Deployment")
|
||||
deploymentClient := client.AppsV1().Deployments(namespace)
|
||||
var deploymentObj *appsv1.Deployment
|
||||
if volumeMount {
|
||||
deploymentObj = GetDeployment(namespace, deploymentName)
|
||||
} else {
|
||||
deploymentObj = GetDeploymentWithEnvVars(namespace, deploymentName)
|
||||
}
|
||||
|
||||
for annotationKey, annotationValue := range additionalAnnotations {
|
||||
deploymentObj.Annotations[annotationKey] = annotationValue
|
||||
}
|
||||
|
||||
deployment, err := deploymentClient.Create(context.TODO(), deploymentObj, metav1.CreateOptions{})
|
||||
time.Sleep(3 * time.Second)
|
||||
return deployment, err
|
||||
}
|
||||
|
||||
// CreateDeploymentConfig creates a deploymentConfig in given namespace and returns the DeploymentConfig
|
||||
func CreateDeploymentConfig(client appsclient.Interface, deploymentName string, namespace string, volumeMount bool) (*openshiftv1.DeploymentConfig, error) {
|
||||
logrus.Infof("Creating DeploymentConfig")
|
||||
@@ -1012,6 +1124,27 @@ func UpdateSecret(secretClient core_v1.SecretInterface, namespace string, secret
|
||||
return updateErr
|
||||
}
|
||||
|
||||
// UpdateSecretProviderClassPodStatus updates a secretproviderclasspodstatus in given namespace and returns the error if any
|
||||
func UpdateSecretProviderClassPodStatus(spcpsClient csiclient_v1.SecretProviderClassPodStatusInterface, namespace string, spcpsName string, label string, data string) error {
|
||||
logrus.Infof("Updating secretproviderclasspodstatus %q.\n", spcpsName)
|
||||
updatedStatus := GetSecretProviderClassPodStatus(namespace, spcpsName, data).Status
|
||||
secretproviderclasspodstatus, err := spcpsClient.Get(context.TODO(), spcpsName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
secretproviderclasspodstatus.Status = updatedStatus
|
||||
if label != "" {
|
||||
labels := secretproviderclasspodstatus.Labels
|
||||
if labels == nil {
|
||||
labels = make(map[string]string)
|
||||
}
|
||||
labels["firstLabel"] = label
|
||||
}
|
||||
_, updateErr := spcpsClient.Update(context.TODO(), secretproviderclasspodstatus, metav1.UpdateOptions{})
|
||||
time.Sleep(3 * time.Second)
|
||||
return updateErr
|
||||
}
|
||||
|
||||
// DeleteConfigMap deletes a configmap in given namespace and returns the error if any
|
||||
func DeleteConfigMap(client kubernetes.Interface, namespace string, configmapName string) error {
|
||||
logrus.Infof("Deleting configmap %q.\n", configmapName)
|
||||
@@ -1028,6 +1161,22 @@ func DeleteSecret(client kubernetes.Interface, namespace string, secretName stri
|
||||
return err
|
||||
}
|
||||
|
||||
// DeleteSecretProviderClass deletes a secretproviderclass in given namespace and returns the error if any
|
||||
func DeleteSecretProviderClass(client csiclient.Interface, namespace string, secretProviderClassName string) error {
|
||||
logrus.Infof("Deleting secretproviderclass %q.\n", secretProviderClassName)
|
||||
err := client.SecretsstoreV1().SecretProviderClasses(namespace).Delete(context.TODO(), secretProviderClassName, metav1.DeleteOptions{})
|
||||
time.Sleep(3 * time.Second)
|
||||
return err
|
||||
}
|
||||
|
||||
// DeleteSecretProviderClassPodStatus deletes a secretproviderclasspodstatus in given namespace and returns the error if any
|
||||
func DeleteSecretProviderClassPodStatus(client csiclient.Interface, namespace string, secretProviderClassPodStatusName string) error {
|
||||
logrus.Infof("Deleting secretproviderclasspodstatus %q.\n", secretProviderClassPodStatusName)
|
||||
err := client.SecretsstoreV1().SecretProviderClassPodStatuses(namespace).Delete(context.TODO(), secretProviderClassPodStatusName, metav1.DeleteOptions{})
|
||||
time.Sleep(3 * time.Second)
|
||||
return err
|
||||
}
|
||||
|
||||
// RandSeq generates a random sequence
|
||||
func RandSeq(n int) string {
|
||||
b := make([]rune, n)
|
||||
@@ -1038,7 +1187,7 @@ func RandSeq(n int) string {
|
||||
}
|
||||
|
||||
// VerifyResourceEnvVarUpdate verifies whether the rolling upgrade happened or not
|
||||
func VerifyResourceEnvVarUpdate(clients kube.Clients, config util.Config, envVarPostfix string, upgradeFuncs callbacks.RollingUpgradeFuncs) bool {
|
||||
func VerifyResourceEnvVarUpdate(clients kube.Clients, config common.Config, envVarPostfix string, upgradeFuncs callbacks.RollingUpgradeFuncs) bool {
|
||||
items := upgradeFuncs.ItemsFunc(clients, config.Namespace)
|
||||
for _, i := range items {
|
||||
containers := upgradeFuncs.ContainersFunc(i)
|
||||
@@ -1084,7 +1233,7 @@ func VerifyResourceEnvVarUpdate(clients kube.Clients, config util.Config, envVar
|
||||
}
|
||||
|
||||
// VerifyResourceEnvVarRemoved verifies whether the rolling upgrade happened or not and all Envvars SKAKATER_name_CONFIGMAP/SECRET are removed
|
||||
func VerifyResourceEnvVarRemoved(clients kube.Clients, config util.Config, envVarPostfix string, upgradeFuncs callbacks.RollingUpgradeFuncs) bool {
|
||||
func VerifyResourceEnvVarRemoved(clients kube.Clients, config common.Config, envVarPostfix string, upgradeFuncs callbacks.RollingUpgradeFuncs) bool {
|
||||
items := upgradeFuncs.ItemsFunc(clients, config.Namespace)
|
||||
for _, i := range items {
|
||||
containers := upgradeFuncs.ContainersFunc(i)
|
||||
@@ -1133,7 +1282,7 @@ func VerifyResourceEnvVarRemoved(clients kube.Clients, config util.Config, envVa
|
||||
}
|
||||
|
||||
// VerifyResourceAnnotationUpdate verifies whether the rolling upgrade happened or not
|
||||
func VerifyResourceAnnotationUpdate(clients kube.Clients, config util.Config, upgradeFuncs callbacks.RollingUpgradeFuncs) bool {
|
||||
func VerifyResourceAnnotationUpdate(clients kube.Clients, config common.Config, upgradeFuncs callbacks.RollingUpgradeFuncs) bool {
|
||||
items := upgradeFuncs.ItemsFunc(clients, config.Namespace)
|
||||
for _, i := range items {
|
||||
podAnnotations := upgradeFuncs.PodAnnotationsFunc(i)
|
||||
@@ -1178,14 +1327,18 @@ func VerifyResourceAnnotationUpdate(clients kube.Clients, config util.Config, up
|
||||
}
|
||||
|
||||
func GetSHAfromEmptyData() string {
|
||||
return crypto.GenerateSHA("")
|
||||
// Use a special marker that represents "deleted" or "empty" state
|
||||
// This ensures we have a distinct, deterministic hash for the delete strategy
|
||||
// Note: We could use GenerateSHA("") which now returns a hash, but using a marker
|
||||
// makes the intent clearer and avoids potential confusion with actual empty data
|
||||
return crypto.GenerateSHA("__RELOADER_EMPTY_DELETE_MARKER__")
|
||||
}
|
||||
|
||||
// GetRollout provides rollout for testing
|
||||
func GetRollout(namespace string, rolloutName string, annotations map[string]string) *argorolloutv1alpha1.Rollout {
|
||||
replicaset := int32(1)
|
||||
return &argorolloutv1alpha1.Rollout{
|
||||
ObjectMeta: getObjectMeta(namespace, rolloutName, false, false, false, annotations),
|
||||
ObjectMeta: getObjectMeta(namespace, rolloutName, false, false, false, false, annotations),
|
||||
Spec: argorolloutv1alpha1.RolloutSpec{
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{"secondLabel": "temp"},
|
||||
|
||||
@@ -3,11 +3,17 @@ package util
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
"fmt"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/stakater/Reloader/internal/pkg/constants"
|
||||
"github.com/stakater/Reloader/internal/pkg/crypto"
|
||||
"github.com/stakater/Reloader/internal/pkg/options"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
csiv1 "sigs.k8s.io/secrets-store-csi-driver/apis/v1"
|
||||
)
|
||||
|
||||
// ConvertToEnvVarName converts the given text into a usable env var
|
||||
@@ -52,9 +58,17 @@ func GetSHAfromSecret(data map[string][]byte) string {
|
||||
return crypto.GenerateSHA(strings.Join(values, ";"))
|
||||
}
|
||||
|
||||
type List []string
|
||||
func GetSHAfromSecretProviderClassPodStatus(data csiv1.SecretProviderClassPodStatusStatus) string {
|
||||
values := []string{}
|
||||
for _, v := range data.Objects {
|
||||
values = append(values, v.ID+"="+v.Version)
|
||||
}
|
||||
values = append(values, "SecretProviderClassName="+data.SecretProviderClassName)
|
||||
sort.Strings(values)
|
||||
return crypto.GenerateSHA(strings.Join(values, ";"))
|
||||
}
|
||||
|
||||
type Map map[string]string
|
||||
type List []string
|
||||
|
||||
func (l *List) Contains(s string) bool {
|
||||
for _, v := range *l {
|
||||
@@ -64,3 +78,63 @@ func (l *List) Contains(s string) bool {
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func ConfigureReloaderFlags(cmd *cobra.Command) {
|
||||
cmd.PersistentFlags().BoolVar(&options.AutoReloadAll, "auto-reload-all", false, "Auto reload all resources")
|
||||
cmd.PersistentFlags().StringVar(&options.ConfigmapUpdateOnChangeAnnotation, "configmap-annotation", "configmap.reloader.stakater.com/reload", "annotation to detect changes in configmaps, specified by name")
|
||||
cmd.PersistentFlags().StringVar(&options.SecretUpdateOnChangeAnnotation, "secret-annotation", "secret.reloader.stakater.com/reload", "annotation to detect changes in secrets, specified by name")
|
||||
cmd.PersistentFlags().StringVar(&options.ReloaderAutoAnnotation, "auto-annotation", "reloader.stakater.com/auto", "annotation to detect changes in secrets/configmaps")
|
||||
cmd.PersistentFlags().StringVar(&options.ConfigmapReloaderAutoAnnotation, "configmap-auto-annotation", "configmap.reloader.stakater.com/auto", "annotation to detect changes in configmaps")
|
||||
cmd.PersistentFlags().StringVar(&options.SecretReloaderAutoAnnotation, "secret-auto-annotation", "secret.reloader.stakater.com/auto", "annotation to detect changes in secrets")
|
||||
cmd.PersistentFlags().StringVar(&options.AutoSearchAnnotation, "auto-search-annotation", "reloader.stakater.com/search", "annotation to detect changes in configmaps or secrets tagged with special match annotation")
|
||||
cmd.PersistentFlags().StringVar(&options.SearchMatchAnnotation, "search-match-annotation", "reloader.stakater.com/match", "annotation to mark secrets or configmaps to match the search")
|
||||
cmd.PersistentFlags().StringVar(&options.PauseDeploymentAnnotation, "pause-deployment-annotation", "deployment.reloader.stakater.com/pause-period", "annotation to define the time period to pause a deployment after a configmap/secret change has been detected")
|
||||
cmd.PersistentFlags().StringVar(&options.PauseDeploymentTimeAnnotation, "pause-deployment-time-annotation", "deployment.reloader.stakater.com/paused-at", "annotation to indicate when a deployment was paused by Reloader")
|
||||
cmd.PersistentFlags().StringVar(&options.LogFormat, "log-format", "", "Log format to use (empty string for text, or JSON)")
|
||||
cmd.PersistentFlags().StringVar(&options.LogLevel, "log-level", "info", "Log level to use (trace, debug, info, warning, error, fatal and panic)")
|
||||
cmd.PersistentFlags().StringVar(&options.WebhookUrl, "webhook-url", "", "webhook to trigger instead of performing a reload")
|
||||
cmd.PersistentFlags().StringSliceVar(&options.ResourcesToIgnore, "resources-to-ignore", options.ResourcesToIgnore, "list of resources to ignore (valid options 'configMaps' or 'secrets')")
|
||||
cmd.PersistentFlags().StringSliceVar(&options.WorkloadTypesToIgnore, "ignored-workload-types", options.WorkloadTypesToIgnore, "list of workload types to ignore (valid options: 'jobs', 'cronjobs', or both)")
|
||||
cmd.PersistentFlags().StringSliceVar(&options.NamespacesToIgnore, "namespaces-to-ignore", options.NamespacesToIgnore, "list of namespaces to ignore")
|
||||
cmd.PersistentFlags().StringSliceVar(&options.NamespaceSelectors, "namespace-selector", options.NamespaceSelectors, "list of key:value labels to filter on for namespaces")
|
||||
cmd.PersistentFlags().StringSliceVar(&options.ResourceSelectors, "resource-label-selector", options.ResourceSelectors, "list of key:value labels to filter on for configmaps and secrets")
|
||||
cmd.PersistentFlags().StringVar(&options.IsArgoRollouts, "is-Argo-Rollouts", "false", "Add support for argo rollouts")
|
||||
cmd.PersistentFlags().StringVar(&options.ReloadStrategy, constants.ReloadStrategyFlag, constants.EnvVarsReloadStrategy, "Specifies the desired reload strategy")
|
||||
cmd.PersistentFlags().StringVar(&options.ReloadOnCreate, "reload-on-create", "false", "Add support to watch create events")
|
||||
cmd.PersistentFlags().StringVar(&options.ReloadOnDelete, "reload-on-delete", "false", "Add support to watch delete events")
|
||||
cmd.PersistentFlags().BoolVar(&options.EnableHA, "enable-ha", false, "Adds support for running multiple replicas via leadership election")
|
||||
cmd.PersistentFlags().BoolVar(&options.SyncAfterRestart, "sync-after-restart", false, "Sync add events after reloader restarts")
|
||||
cmd.PersistentFlags().BoolVar(&options.EnablePProf, "enable-pprof", false, "Enable pprof for profiling")
|
||||
cmd.PersistentFlags().StringVar(&options.PProfAddr, "pprof-addr", ":6060", "Address to start pprof server on. Default is :6060")
|
||||
cmd.PersistentFlags().BoolVar(&options.EnableCSIIntegration, "enable-csi-integration", false, "Enables CSI integration. Default is :false")
|
||||
}
|
||||
|
||||
func GetIgnoredResourcesList() (List, error) {
|
||||
|
||||
ignoredResourcesList := options.ResourcesToIgnore // getStringSliceFromFlags(cmd, "resources-to-ignore")
|
||||
|
||||
for _, v := range ignoredResourcesList {
|
||||
if v != "configMaps" && v != "secrets" {
|
||||
return nil, fmt.Errorf("'resources-to-ignore' only accepts 'configMaps' or 'secrets', not '%s'", v)
|
||||
}
|
||||
}
|
||||
|
||||
if len(ignoredResourcesList) > 1 {
|
||||
return nil, errors.New("'resources-to-ignore' only accepts 'configMaps' or 'secrets', not both")
|
||||
}
|
||||
|
||||
return ignoredResourcesList, nil
|
||||
}
|
||||
|
||||
func GetIgnoredWorkloadTypesList() (List, error) {
|
||||
|
||||
ignoredWorkloadTypesList := options.WorkloadTypesToIgnore
|
||||
|
||||
for _, v := range ignoredWorkloadTypesList {
|
||||
if v != "jobs" && v != "cronjobs" {
|
||||
return nil, fmt.Errorf("'ignored-workload-types' accepts 'jobs', 'cronjobs', or both, not '%s'", v)
|
||||
}
|
||||
}
|
||||
|
||||
return ignoredWorkloadTypesList, nil
|
||||
}
|
||||
|
||||
@@ -3,6 +3,7 @@ package util
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stakater/Reloader/internal/pkg/options"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
@@ -45,3 +46,141 @@ func TestGetHashFromConfigMap(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetIgnoredWorkloadTypesList(t *testing.T) {
|
||||
// Save original state
|
||||
originalWorkloadTypes := options.WorkloadTypesToIgnore
|
||||
defer func() {
|
||||
options.WorkloadTypesToIgnore = originalWorkloadTypes
|
||||
}()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
workloadTypes []string
|
||||
expectError bool
|
||||
expected []string
|
||||
}{
|
||||
{
|
||||
name: "Both jobs and cronjobs",
|
||||
workloadTypes: []string{"jobs", "cronjobs"},
|
||||
expectError: false,
|
||||
expected: []string{"jobs", "cronjobs"},
|
||||
},
|
||||
{
|
||||
name: "Only jobs",
|
||||
workloadTypes: []string{"jobs"},
|
||||
expectError: false,
|
||||
expected: []string{"jobs"},
|
||||
},
|
||||
{
|
||||
name: "Only cronjobs",
|
||||
workloadTypes: []string{"cronjobs"},
|
||||
expectError: false,
|
||||
expected: []string{"cronjobs"},
|
||||
},
|
||||
{
|
||||
name: "Empty list",
|
||||
workloadTypes: []string{},
|
||||
expectError: false,
|
||||
expected: []string{},
|
||||
},
|
||||
{
|
||||
name: "Invalid workload type",
|
||||
workloadTypes: []string{"invalid"},
|
||||
expectError: true,
|
||||
expected: nil,
|
||||
},
|
||||
{
|
||||
name: "Mixed valid and invalid",
|
||||
workloadTypes: []string{"jobs", "invalid"},
|
||||
expectError: true,
|
||||
expected: nil,
|
||||
},
|
||||
{
|
||||
name: "Duplicate values",
|
||||
workloadTypes: []string{"jobs", "jobs"},
|
||||
expectError: false,
|
||||
expected: []string{"jobs", "jobs"},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
// Set the global option
|
||||
options.WorkloadTypesToIgnore = tt.workloadTypes
|
||||
|
||||
result, err := GetIgnoredWorkloadTypesList()
|
||||
|
||||
if tt.expectError && err == nil {
|
||||
t.Errorf("Expected error but got none")
|
||||
}
|
||||
|
||||
if !tt.expectError && err != nil {
|
||||
t.Errorf("Expected no error but got: %v", err)
|
||||
}
|
||||
|
||||
if !tt.expectError {
|
||||
if len(result) != len(tt.expected) {
|
||||
t.Errorf("Expected %v, got %v", tt.expected, result)
|
||||
return
|
||||
}
|
||||
|
||||
for i, expected := range tt.expected {
|
||||
if i >= len(result) || result[i] != expected {
|
||||
t.Errorf("Expected %v, got %v", tt.expected, result)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestListContains(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
list List
|
||||
item string
|
||||
expected bool
|
||||
}{
|
||||
{
|
||||
name: "List contains item",
|
||||
list: List{"jobs", "cronjobs"},
|
||||
item: "jobs",
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
name: "List does not contain item",
|
||||
list: List{"jobs"},
|
||||
item: "cronjobs",
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
name: "Empty list",
|
||||
list: List{},
|
||||
item: "jobs",
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
name: "Case sensitive matching",
|
||||
list: List{"jobs", "cronjobs"},
|
||||
item: "Jobs",
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
name: "Multiple occurrences",
|
||||
list: List{"jobs", "jobs", "cronjobs"},
|
||||
item: "jobs",
|
||||
expected: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
result := tt.list.Contains(tt.item)
|
||||
if result != tt.expected {
|
||||
t.Errorf("Expected %v, got %v", tt.expected, result)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
376
pkg/common/common.go
Normal file
376
pkg/common/common.go
Normal file
@@ -0,0 +1,376 @@
|
||||
package common
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/stakater/Reloader/internal/pkg/constants"
|
||||
"github.com/stakater/Reloader/internal/pkg/options"
|
||||
"github.com/stakater/Reloader/internal/pkg/util"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
)
|
||||
|
||||
type Map map[string]string
|
||||
|
||||
type ReloadCheckResult struct {
|
||||
ShouldReload bool
|
||||
AutoReload bool
|
||||
}
|
||||
|
||||
// ReloaderOptions contains all configurable options for the Reloader controller.
|
||||
// These options control how Reloader behaves when watching for changes in ConfigMaps and Secrets.
|
||||
type ReloaderOptions struct {
|
||||
// AutoReloadAll enables automatic reloading of all resources when their corresponding ConfigMaps/Secrets are updated
|
||||
AutoReloadAll bool `json:"autoReloadAll"`
|
||||
// ConfigmapUpdateOnChangeAnnotation is the annotation key used to detect changes in ConfigMaps specified by name
|
||||
ConfigmapUpdateOnChangeAnnotation string `json:"configmapUpdateOnChangeAnnotation"`
|
||||
// SecretUpdateOnChangeAnnotation is the annotation key used to detect changes in Secrets specified by name
|
||||
SecretUpdateOnChangeAnnotation string `json:"secretUpdateOnChangeAnnotation"`
|
||||
// SecretProviderClassUpdateOnChangeAnnotation is the annotation key used to detect changes in SecretProviderClasses specified by name
|
||||
SecretProviderClassUpdateOnChangeAnnotation string `json:"secretProviderClassUpdateOnChangeAnnotation"`
|
||||
// ReloaderAutoAnnotation is the annotation key used to detect changes in any referenced ConfigMaps or Secrets
|
||||
ReloaderAutoAnnotation string `json:"reloaderAutoAnnotation"`
|
||||
// IgnoreResourceAnnotation is the annotation key used to ignore resources from being watched
|
||||
IgnoreResourceAnnotation string `json:"ignoreResourceAnnotation"`
|
||||
// ConfigmapReloaderAutoAnnotation is the annotation key used to detect changes in ConfigMaps only
|
||||
ConfigmapReloaderAutoAnnotation string `json:"configmapReloaderAutoAnnotation"`
|
||||
// SecretReloaderAutoAnnotation is the annotation key used to detect changes in Secrets only
|
||||
SecretReloaderAutoAnnotation string `json:"secretReloaderAutoAnnotation"`
|
||||
// SecretProviderClassReloaderAutoAnnotation is the annotation key used to detect changes in SecretProviderClasses only
|
||||
SecretProviderClassReloaderAutoAnnotation string `json:"secretProviderClassReloaderAutoAnnotation"`
|
||||
// ConfigmapExcludeReloaderAnnotation is the annotation key containing comma-separated list of ConfigMaps to exclude from watching
|
||||
ConfigmapExcludeReloaderAnnotation string `json:"configmapExcludeReloaderAnnotation"`
|
||||
// SecretExcludeReloaderAnnotation is the annotation key containing comma-separated list of Secrets to exclude from watching
|
||||
SecretExcludeReloaderAnnotation string `json:"secretExcludeReloaderAnnotation"`
|
||||
// SecretProviderClassExcludeReloaderAnnotation is the annotation key containing comma-separated list of SecretProviderClasses to exclude from watching
|
||||
SecretProviderClassExcludeReloaderAnnotation string `json:"secretProviderClassExcludeReloaderAnnotation"`
|
||||
// AutoSearchAnnotation is the annotation key used to detect changes in ConfigMaps/Secrets tagged with SearchMatchAnnotation
|
||||
AutoSearchAnnotation string `json:"autoSearchAnnotation"`
|
||||
// SearchMatchAnnotation is the annotation key used to tag ConfigMaps/Secrets to be found by AutoSearchAnnotation
|
||||
SearchMatchAnnotation string `json:"searchMatchAnnotation"`
|
||||
// RolloutStrategyAnnotation is the annotation key used to define the rollout update strategy for workloads
|
||||
RolloutStrategyAnnotation string `json:"rolloutStrategyAnnotation"`
|
||||
// PauseDeploymentAnnotation is the annotation key used to define the time period to pause a deployment after
|
||||
PauseDeploymentAnnotation string `json:"pauseDeploymentAnnotation"`
|
||||
// PauseDeploymentTimeAnnotation is the annotation key used to indicate when a deployment was paused by Reloader
|
||||
PauseDeploymentTimeAnnotation string `json:"pauseDeploymentTimeAnnotation"`
|
||||
|
||||
// LogFormat specifies the log format to use (json, or empty string for default text format)
|
||||
LogFormat string `json:"logFormat"`
|
||||
// LogLevel specifies the log level to use (trace, debug, info, warning, error, fatal, panic)
|
||||
LogLevel string `json:"logLevel"`
|
||||
// IsArgoRollouts indicates whether support for Argo Rollouts is enabled
|
||||
IsArgoRollouts bool `json:"isArgoRollouts"`
|
||||
// ReloadStrategy specifies the strategy used to trigger resource reloads (env-vars or annotations)
|
||||
ReloadStrategy string `json:"reloadStrategy"`
|
||||
// ReloadOnCreate indicates whether to trigger reloads when ConfigMaps/Secrets are created
|
||||
ReloadOnCreate bool `json:"reloadOnCreate"`
|
||||
// ReloadOnDelete indicates whether to trigger reloads when ConfigMaps/Secrets are deleted
|
||||
ReloadOnDelete bool `json:"reloadOnDelete"`
|
||||
// SyncAfterRestart indicates whether to sync add events after Reloader restarts (only works when ReloadOnCreate is true)
|
||||
SyncAfterRestart bool `json:"syncAfterRestart"`
|
||||
// EnableHA indicates whether High Availability mode is enabled with leader election
|
||||
EnableHA bool `json:"enableHA"`
|
||||
// EnableCSIIntegration indicates whether CSI integration is enabled to watch SecretProviderClassPodStatus
|
||||
EnableCSIIntegration bool `json:"enableCSIIntegration"`
|
||||
// WebhookUrl is the URL to send webhook notifications to instead of performing reloads
|
||||
WebhookUrl string `json:"webhookUrl"`
|
||||
// ResourcesToIgnore is a list of resource types to ignore (e.g., "configmaps" or "secrets")
|
||||
ResourcesToIgnore []string `json:"resourcesToIgnore"`
|
||||
// WorkloadTypesToIgnore is a list of workload types to ignore (e.g., "jobs" or "cronjobs")
|
||||
WorkloadTypesToIgnore []string `json:"workloadTypesToIgnore"`
|
||||
// NamespaceSelectors is a list of label selectors to filter namespaces to watch
|
||||
NamespaceSelectors []string `json:"namespaceSelectors"`
|
||||
// ResourceSelectors is a list of label selectors to filter ConfigMaps and Secrets to watch
|
||||
ResourceSelectors []string `json:"resourceSelectors"`
|
||||
// NamespacesToIgnore is a list of namespace names to ignore when watching for changes
|
||||
NamespacesToIgnore []string `json:"namespacesToIgnore"`
|
||||
// EnablePProf enables pprof for profiling
|
||||
EnablePProf bool `json:"enablePProf"`
|
||||
// PProfAddr is the address to start pprof server on
|
||||
PProfAddr string `json:"pprofAddr"`
|
||||
}
|
||||
|
||||
var CommandLineOptions *ReloaderOptions
|
||||
|
||||
func PublishMetaInfoConfigmap(clientset kubernetes.Interface) {
|
||||
namespace := os.Getenv("RELOADER_NAMESPACE")
|
||||
if namespace == "" {
|
||||
logrus.Warn("RELOADER_NAMESPACE is not set, skipping meta info configmap creation")
|
||||
return
|
||||
}
|
||||
|
||||
metaInfo := &MetaInfo{
|
||||
BuildInfo: *NewBuildInfo(),
|
||||
ReloaderOptions: *GetCommandLineOptions(),
|
||||
DeploymentInfo: metav1.ObjectMeta{
|
||||
Name: os.Getenv("RELOADER_DEPLOYMENT_NAME"),
|
||||
Namespace: namespace,
|
||||
},
|
||||
}
|
||||
|
||||
configMap := metaInfo.ToConfigMap()
|
||||
|
||||
if _, err := clientset.CoreV1().ConfigMaps(namespace).Get(context.Background(), configMap.Name, metav1.GetOptions{}); err == nil {
|
||||
logrus.Info("Meta info configmap already exists, updating it")
|
||||
_, err = clientset.CoreV1().ConfigMaps(namespace).Update(context.Background(), configMap, metav1.UpdateOptions{})
|
||||
if err != nil {
|
||||
logrus.Warn("Failed to update existing meta info configmap: ", err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
_, err := clientset.CoreV1().ConfigMaps(namespace).Create(context.Background(), configMap, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
logrus.Warn("Failed to create meta info configmap: ", err)
|
||||
}
|
||||
}
|
||||
|
||||
func GetNamespaceLabelSelector(slice []string) (string, error) {
|
||||
for i, kv := range slice {
|
||||
// Legacy support for ":" as a delimiter and "*" for wildcard.
|
||||
if strings.Contains(kv, ":") {
|
||||
split := strings.Split(kv, ":")
|
||||
if split[1] == "*" {
|
||||
slice[i] = split[0]
|
||||
} else {
|
||||
slice[i] = split[0] + "=" + split[1]
|
||||
}
|
||||
}
|
||||
// Convert wildcard to valid apimachinery operator
|
||||
if strings.Contains(kv, "=") {
|
||||
split := strings.Split(kv, "=")
|
||||
if split[1] == "*" {
|
||||
slice[i] = split[0]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
namespaceLabelSelector := strings.Join(slice[:], ",")
|
||||
_, err := labels.Parse(namespaceLabelSelector)
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
|
||||
return namespaceLabelSelector, nil
|
||||
}
|
||||
|
||||
func GetResourceLabelSelector(slice []string) (string, error) {
|
||||
for i, kv := range slice {
|
||||
// Legacy support for ":" as a delimiter and "*" for wildcard.
|
||||
if strings.Contains(kv, ":") {
|
||||
split := strings.Split(kv, ":")
|
||||
if split[1] == "*" {
|
||||
slice[i] = split[0]
|
||||
} else {
|
||||
slice[i] = split[0] + "=" + split[1]
|
||||
}
|
||||
}
|
||||
// Convert wildcard to valid apimachinery operator
|
||||
if strings.Contains(kv, "=") {
|
||||
split := strings.Split(kv, "=")
|
||||
if split[1] == "*" {
|
||||
slice[i] = split[0]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
resourceLabelSelector := strings.Join(slice[:], ",")
|
||||
_, err := labels.Parse(resourceLabelSelector)
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
|
||||
return resourceLabelSelector, nil
|
||||
}
|
||||
|
||||
// ShouldReload checks if a resource should be reloaded based on its annotations and the provided options.
|
||||
func ShouldReload(config Config, resourceType string, annotations Map, podAnnotations Map, options *ReloaderOptions) ReloadCheckResult {
|
||||
|
||||
// Check if this workload type should be ignored
|
||||
if len(options.WorkloadTypesToIgnore) > 0 {
|
||||
ignoredWorkloadTypes, err := util.GetIgnoredWorkloadTypesList()
|
||||
if err != nil {
|
||||
logrus.Errorf("Failed to parse ignored workload types: %v", err)
|
||||
} else {
|
||||
// Map Kubernetes resource types to CLI-friendly names for comparison
|
||||
var resourceToCheck string
|
||||
switch resourceType {
|
||||
case "Job":
|
||||
resourceToCheck = "jobs"
|
||||
case "CronJob":
|
||||
resourceToCheck = "cronjobs"
|
||||
default:
|
||||
resourceToCheck = resourceType // For other types, use as-is
|
||||
}
|
||||
|
||||
// Check if current resource type should be ignored
|
||||
if ignoredWorkloadTypes.Contains(resourceToCheck) {
|
||||
return ReloadCheckResult{
|
||||
ShouldReload: false,
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
ignoreResourceAnnotatonValue := config.ResourceAnnotations[options.IgnoreResourceAnnotation]
|
||||
if ignoreResourceAnnotatonValue == "true" {
|
||||
return ReloadCheckResult{
|
||||
ShouldReload: false,
|
||||
}
|
||||
}
|
||||
|
||||
annotationValue, found := annotations[config.Annotation]
|
||||
searchAnnotationValue, foundSearchAnn := annotations[options.AutoSearchAnnotation]
|
||||
reloaderEnabledValue, foundAuto := annotations[options.ReloaderAutoAnnotation]
|
||||
typedAutoAnnotationEnabledValue, foundTypedAuto := annotations[config.TypedAutoAnnotation]
|
||||
excludeConfigmapAnnotationValue, foundExcludeConfigmap := annotations[options.ConfigmapExcludeReloaderAnnotation]
|
||||
excludeSecretAnnotationValue, foundExcludeSecret := annotations[options.SecretExcludeReloaderAnnotation]
|
||||
excludeSecretProviderClassProviderAnnotationValue, foundExcludeSecretProviderClass := annotations[options.SecretProviderClassExcludeReloaderAnnotation]
|
||||
|
||||
if !found && !foundAuto && !foundTypedAuto && !foundSearchAnn {
|
||||
annotations = podAnnotations
|
||||
annotationValue = annotations[config.Annotation]
|
||||
searchAnnotationValue = annotations[options.AutoSearchAnnotation]
|
||||
reloaderEnabledValue = annotations[options.ReloaderAutoAnnotation]
|
||||
typedAutoAnnotationEnabledValue = annotations[config.TypedAutoAnnotation]
|
||||
}
|
||||
|
||||
isResourceExcluded := false
|
||||
|
||||
switch config.Type {
|
||||
case constants.ConfigmapEnvVarPostfix:
|
||||
if foundExcludeConfigmap {
|
||||
isResourceExcluded = checkIfResourceIsExcluded(config.ResourceName, excludeConfigmapAnnotationValue)
|
||||
}
|
||||
case constants.SecretEnvVarPostfix:
|
||||
if foundExcludeSecret {
|
||||
isResourceExcluded = checkIfResourceIsExcluded(config.ResourceName, excludeSecretAnnotationValue)
|
||||
}
|
||||
|
||||
case constants.SecretProviderClassEnvVarPostfix:
|
||||
if foundExcludeSecretProviderClass {
|
||||
isResourceExcluded = checkIfResourceIsExcluded(config.ResourceName, excludeSecretProviderClassProviderAnnotationValue)
|
||||
}
|
||||
}
|
||||
|
||||
if isResourceExcluded {
|
||||
return ReloadCheckResult{
|
||||
ShouldReload: false,
|
||||
}
|
||||
}
|
||||
|
||||
values := strings.Split(annotationValue, ",")
|
||||
for _, value := range values {
|
||||
value = strings.TrimSpace(value)
|
||||
re := regexp.MustCompile("^" + value + "$")
|
||||
if re.Match([]byte(config.ResourceName)) {
|
||||
return ReloadCheckResult{
|
||||
ShouldReload: true,
|
||||
AutoReload: false,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if searchAnnotationValue == "true" {
|
||||
matchAnnotationValue := config.ResourceAnnotations[options.SearchMatchAnnotation]
|
||||
if matchAnnotationValue == "true" {
|
||||
return ReloadCheckResult{
|
||||
ShouldReload: true,
|
||||
AutoReload: true,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
reloaderEnabled, _ := strconv.ParseBool(reloaderEnabledValue)
|
||||
typedAutoAnnotationEnabled, _ := strconv.ParseBool(typedAutoAnnotationEnabledValue)
|
||||
if reloaderEnabled || typedAutoAnnotationEnabled || reloaderEnabledValue == "" && typedAutoAnnotationEnabledValue == "" && options.AutoReloadAll {
|
||||
return ReloadCheckResult{
|
||||
ShouldReload: true,
|
||||
AutoReload: true,
|
||||
}
|
||||
}
|
||||
|
||||
return ReloadCheckResult{
|
||||
ShouldReload: false,
|
||||
}
|
||||
}
|
||||
|
||||
func checkIfResourceIsExcluded(resourceName, excludedResources string) bool {
|
||||
if excludedResources == "" {
|
||||
return false
|
||||
}
|
||||
|
||||
excludedResourcesList := strings.Split(excludedResources, ",")
|
||||
for _, excludedResource := range excludedResourcesList {
|
||||
if strings.TrimSpace(excludedResource) == resourceName {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func init() {
|
||||
GetCommandLineOptions()
|
||||
}
|
||||
|
||||
func GetCommandLineOptions() *ReloaderOptions {
|
||||
if CommandLineOptions == nil {
|
||||
CommandLineOptions = &ReloaderOptions{}
|
||||
}
|
||||
|
||||
CommandLineOptions.AutoReloadAll = options.AutoReloadAll
|
||||
CommandLineOptions.ConfigmapUpdateOnChangeAnnotation = options.ConfigmapUpdateOnChangeAnnotation
|
||||
CommandLineOptions.SecretUpdateOnChangeAnnotation = options.SecretUpdateOnChangeAnnotation
|
||||
CommandLineOptions.SecretProviderClassUpdateOnChangeAnnotation = options.SecretProviderClassUpdateOnChangeAnnotation
|
||||
CommandLineOptions.ReloaderAutoAnnotation = options.ReloaderAutoAnnotation
|
||||
CommandLineOptions.IgnoreResourceAnnotation = options.IgnoreResourceAnnotation
|
||||
CommandLineOptions.ConfigmapReloaderAutoAnnotation = options.ConfigmapReloaderAutoAnnotation
|
||||
CommandLineOptions.SecretReloaderAutoAnnotation = options.SecretReloaderAutoAnnotation
|
||||
CommandLineOptions.SecretProviderClassReloaderAutoAnnotation = options.SecretProviderClassReloaderAutoAnnotation
|
||||
CommandLineOptions.ConfigmapExcludeReloaderAnnotation = options.ConfigmapExcludeReloaderAnnotation
|
||||
CommandLineOptions.SecretExcludeReloaderAnnotation = options.SecretExcludeReloaderAnnotation
|
||||
CommandLineOptions.SecretProviderClassExcludeReloaderAnnotation = options.SecretProviderClassExcludeReloaderAnnotation
|
||||
CommandLineOptions.AutoSearchAnnotation = options.AutoSearchAnnotation
|
||||
CommandLineOptions.SearchMatchAnnotation = options.SearchMatchAnnotation
|
||||
CommandLineOptions.RolloutStrategyAnnotation = options.RolloutStrategyAnnotation
|
||||
CommandLineOptions.PauseDeploymentAnnotation = options.PauseDeploymentAnnotation
|
||||
CommandLineOptions.PauseDeploymentTimeAnnotation = options.PauseDeploymentTimeAnnotation
|
||||
CommandLineOptions.LogFormat = options.LogFormat
|
||||
CommandLineOptions.LogLevel = options.LogLevel
|
||||
CommandLineOptions.ReloadStrategy = options.ReloadStrategy
|
||||
CommandLineOptions.SyncAfterRestart = options.SyncAfterRestart
|
||||
CommandLineOptions.EnableHA = options.EnableHA
|
||||
CommandLineOptions.EnableCSIIntegration = options.EnableCSIIntegration
|
||||
CommandLineOptions.WebhookUrl = options.WebhookUrl
|
||||
CommandLineOptions.ResourcesToIgnore = options.ResourcesToIgnore
|
||||
CommandLineOptions.WorkloadTypesToIgnore = options.WorkloadTypesToIgnore
|
||||
CommandLineOptions.NamespaceSelectors = options.NamespaceSelectors
|
||||
CommandLineOptions.ResourceSelectors = options.ResourceSelectors
|
||||
CommandLineOptions.NamespacesToIgnore = options.NamespacesToIgnore
|
||||
CommandLineOptions.IsArgoRollouts = parseBool(options.IsArgoRollouts)
|
||||
CommandLineOptions.ReloadOnCreate = parseBool(options.ReloadOnCreate)
|
||||
CommandLineOptions.ReloadOnDelete = parseBool(options.ReloadOnDelete)
|
||||
CommandLineOptions.EnablePProf = options.EnablePProf
|
||||
CommandLineOptions.PProfAddr = options.PProfAddr
|
||||
|
||||
return CommandLineOptions
|
||||
}
|
||||
|
||||
func parseBool(value string) bool {
|
||||
if value == "" {
|
||||
return false
|
||||
}
|
||||
result, err := strconv.ParseBool(value)
|
||||
if err != nil {
|
||||
return false // Default to false if parsing fails
|
||||
}
|
||||
return result
|
||||
}
|
||||
224
pkg/common/common_test.go
Normal file
224
pkg/common/common_test.go
Normal file
@@ -0,0 +1,224 @@
|
||||
package common
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stakater/Reloader/internal/pkg/options"
|
||||
)
|
||||
|
||||
func TestShouldReload_IgnoredWorkloadTypes(t *testing.T) {
|
||||
// Save original state
|
||||
originalWorkloadTypes := options.WorkloadTypesToIgnore
|
||||
defer func() {
|
||||
options.WorkloadTypesToIgnore = originalWorkloadTypes
|
||||
}()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
ignoredWorkloadTypes []string
|
||||
resourceType string
|
||||
shouldReload bool
|
||||
description string
|
||||
}{
|
||||
{
|
||||
name: "Jobs ignored - Job should not reload",
|
||||
ignoredWorkloadTypes: []string{"jobs"},
|
||||
resourceType: "Job",
|
||||
shouldReload: false,
|
||||
description: "When jobs are ignored, Job resources should not be reloaded",
|
||||
},
|
||||
{
|
||||
name: "Jobs ignored - CronJob should reload",
|
||||
ignoredWorkloadTypes: []string{"jobs"},
|
||||
resourceType: "CronJob",
|
||||
shouldReload: true,
|
||||
description: "When jobs are ignored, CronJob resources should still be processed",
|
||||
},
|
||||
{
|
||||
name: "CronJobs ignored - CronJob should not reload",
|
||||
ignoredWorkloadTypes: []string{"cronjobs"},
|
||||
resourceType: "CronJob",
|
||||
shouldReload: false,
|
||||
description: "When cronjobs are ignored, CronJob resources should not be reloaded",
|
||||
},
|
||||
{
|
||||
name: "CronJobs ignored - Job should reload",
|
||||
ignoredWorkloadTypes: []string{"cronjobs"},
|
||||
resourceType: "Job",
|
||||
shouldReload: true,
|
||||
description: "When cronjobs are ignored, Job resources should still be processed",
|
||||
},
|
||||
{
|
||||
name: "Both ignored - Job should not reload",
|
||||
ignoredWorkloadTypes: []string{"jobs", "cronjobs"},
|
||||
resourceType: "Job",
|
||||
shouldReload: false,
|
||||
description: "When both are ignored, Job resources should not be reloaded",
|
||||
},
|
||||
{
|
||||
name: "Both ignored - CronJob should not reload",
|
||||
ignoredWorkloadTypes: []string{"jobs", "cronjobs"},
|
||||
resourceType: "CronJob",
|
||||
shouldReload: false,
|
||||
description: "When both are ignored, CronJob resources should not be reloaded",
|
||||
},
|
||||
{
|
||||
name: "Both ignored - Deployment should reload",
|
||||
ignoredWorkloadTypes: []string{"jobs", "cronjobs"},
|
||||
resourceType: "Deployment",
|
||||
shouldReload: true,
|
||||
description: "When both are ignored, other workload types should still be processed",
|
||||
},
|
||||
{
|
||||
name: "None ignored - Job should reload",
|
||||
ignoredWorkloadTypes: []string{},
|
||||
resourceType: "Job",
|
||||
shouldReload: true,
|
||||
description: "When nothing is ignored, all workload types should be processed",
|
||||
},
|
||||
{
|
||||
name: "None ignored - CronJob should reload",
|
||||
ignoredWorkloadTypes: []string{},
|
||||
resourceType: "CronJob",
|
||||
shouldReload: true,
|
||||
description: "When nothing is ignored, all workload types should be processed",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
// Set the ignored workload types
|
||||
options.WorkloadTypesToIgnore = tt.ignoredWorkloadTypes
|
||||
|
||||
// Create minimal test config and options
|
||||
config := Config{
|
||||
ResourceName: "test-resource",
|
||||
Annotation: "configmap.reloader.stakater.com/reload",
|
||||
}
|
||||
|
||||
annotations := Map{
|
||||
"configmap.reloader.stakater.com/reload": "test-config",
|
||||
}
|
||||
|
||||
// Create ReloaderOptions with the ignored workload types
|
||||
opts := &ReloaderOptions{
|
||||
WorkloadTypesToIgnore: tt.ignoredWorkloadTypes,
|
||||
AutoReloadAll: true, // Enable auto-reload to simplify test
|
||||
ReloaderAutoAnnotation: "reloader.stakater.com/auto",
|
||||
}
|
||||
|
||||
// Call ShouldReload
|
||||
result := ShouldReload(config, tt.resourceType, annotations, Map{}, opts)
|
||||
|
||||
// Check the result
|
||||
if result.ShouldReload != tt.shouldReload {
|
||||
t.Errorf("For resource type %s with ignored types %v, expected ShouldReload=%v, got=%v",
|
||||
tt.resourceType, tt.ignoredWorkloadTypes, tt.shouldReload, result.ShouldReload)
|
||||
}
|
||||
|
||||
t.Logf("✓ %s", tt.description)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestShouldReload_IgnoredWorkloadTypes_ValidationError(t *testing.T) {
|
||||
// Save original state
|
||||
originalWorkloadTypes := options.WorkloadTypesToIgnore
|
||||
defer func() {
|
||||
options.WorkloadTypesToIgnore = originalWorkloadTypes
|
||||
}()
|
||||
|
||||
// Test with invalid workload type - should still continue processing
|
||||
options.WorkloadTypesToIgnore = []string{"invalid"}
|
||||
|
||||
config := Config{
|
||||
ResourceName: "test-resource",
|
||||
Annotation: "configmap.reloader.stakater.com/reload",
|
||||
}
|
||||
|
||||
annotations := Map{
|
||||
"configmap.reloader.stakater.com/reload": "test-config",
|
||||
}
|
||||
|
||||
opts := &ReloaderOptions{
|
||||
WorkloadTypesToIgnore: []string{"invalid"},
|
||||
AutoReloadAll: true, // Enable auto-reload to simplify test
|
||||
ReloaderAutoAnnotation: "reloader.stakater.com/auto",
|
||||
}
|
||||
|
||||
// Should not panic and should continue with normal processing
|
||||
result := ShouldReload(config, "Job", annotations, Map{}, opts)
|
||||
|
||||
// Since validation failed, it should continue with normal processing (should reload)
|
||||
if !result.ShouldReload {
|
||||
t.Errorf("Expected ShouldReload=true when validation fails, got=%v", result.ShouldReload)
|
||||
}
|
||||
}
|
||||
|
||||
// Test that validates the fix for issue #996
|
||||
func TestShouldReload_IssueRBACPermissionFixed(t *testing.T) {
|
||||
// Save original state
|
||||
originalWorkloadTypes := options.WorkloadTypesToIgnore
|
||||
defer func() {
|
||||
options.WorkloadTypesToIgnore = originalWorkloadTypes
|
||||
}()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
ignoredWorkloadTypes []string
|
||||
resourceType string
|
||||
description string
|
||||
}{
|
||||
{
|
||||
name: "Issue #996 - ignoreJobs prevents Job processing",
|
||||
ignoredWorkloadTypes: []string{"jobs"},
|
||||
resourceType: "Job",
|
||||
description: "Job resources are skipped entirely, preventing RBAC permission errors",
|
||||
},
|
||||
{
|
||||
name: "Issue #996 - ignoreCronJobs prevents CronJob processing",
|
||||
ignoredWorkloadTypes: []string{"cronjobs"},
|
||||
resourceType: "CronJob",
|
||||
description: "CronJob resources are skipped entirely, preventing RBAC permission errors",
|
||||
},
|
||||
{
|
||||
name: "Issue #996 - both ignored prevent both types",
|
||||
ignoredWorkloadTypes: []string{"jobs", "cronjobs"},
|
||||
resourceType: "Job",
|
||||
description: "Job resources are skipped entirely when both types are ignored",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
// Set the ignored workload types
|
||||
options.WorkloadTypesToIgnore = tt.ignoredWorkloadTypes
|
||||
|
||||
config := Config{
|
||||
ResourceName: "test-resource",
|
||||
Annotation: "configmap.reloader.stakater.com/reload",
|
||||
}
|
||||
|
||||
annotations := Map{
|
||||
"configmap.reloader.stakater.com/reload": "test-config",
|
||||
}
|
||||
|
||||
opts := &ReloaderOptions{
|
||||
WorkloadTypesToIgnore: tt.ignoredWorkloadTypes,
|
||||
AutoReloadAll: true, // Enable auto-reload to simplify test
|
||||
ReloaderAutoAnnotation: "reloader.stakater.com/auto",
|
||||
}
|
||||
|
||||
// Call ShouldReload
|
||||
result := ShouldReload(config, tt.resourceType, annotations, Map{}, opts)
|
||||
|
||||
// Should not reload when workload type is ignored
|
||||
if result.ShouldReload {
|
||||
t.Errorf("Expected ShouldReload=false for ignored workload type %s, got=%v",
|
||||
tt.resourceType, result.ShouldReload)
|
||||
}
|
||||
|
||||
t.Logf("✓ %s", tt.description)
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -1,12 +1,14 @@
|
||||
package util
|
||||
package common
|
||||
|
||||
import (
|
||||
"github.com/stakater/Reloader/internal/pkg/constants"
|
||||
"github.com/stakater/Reloader/internal/pkg/options"
|
||||
"github.com/stakater/Reloader/internal/pkg/util"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
csiv1 "sigs.k8s.io/secrets-store-csi-driver/apis/v1"
|
||||
)
|
||||
|
||||
//Config contains rolling upgrade configuration parameters
|
||||
// Config contains rolling upgrade configuration parameters
|
||||
type Config struct {
|
||||
Namespace string
|
||||
ResourceName string
|
||||
@@ -15,6 +17,7 @@ type Config struct {
|
||||
TypedAutoAnnotation string
|
||||
SHAValue string
|
||||
Type string
|
||||
Labels map[string]string
|
||||
}
|
||||
|
||||
// GetConfigmapConfig provides utility config for configmap
|
||||
@@ -25,8 +28,9 @@ func GetConfigmapConfig(configmap *v1.ConfigMap) Config {
|
||||
ResourceAnnotations: configmap.Annotations,
|
||||
Annotation: options.ConfigmapUpdateOnChangeAnnotation,
|
||||
TypedAutoAnnotation: options.ConfigmapReloaderAutoAnnotation,
|
||||
SHAValue: GetSHAfromConfigmap(configmap),
|
||||
SHAValue: util.GetSHAfromConfigmap(configmap),
|
||||
Type: constants.ConfigmapEnvVarPostfix,
|
||||
Labels: configmap.Labels,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -38,7 +42,21 @@ func GetSecretConfig(secret *v1.Secret) Config {
|
||||
ResourceAnnotations: secret.Annotations,
|
||||
Annotation: options.SecretUpdateOnChangeAnnotation,
|
||||
TypedAutoAnnotation: options.SecretReloaderAutoAnnotation,
|
||||
SHAValue: GetSHAfromSecret(secret.Data),
|
||||
SHAValue: util.GetSHAfromSecret(secret.Data),
|
||||
Type: constants.SecretEnvVarPostfix,
|
||||
Labels: secret.Labels,
|
||||
}
|
||||
}
|
||||
|
||||
func GetSecretProviderClassPodStatusConfig(podStatus *csiv1.SecretProviderClassPodStatus) Config {
|
||||
// As csi injects SecretProviderClass, we will create config for it instead of SecretProviderClassPodStatus
|
||||
// ResourceAnnotations will be retrieved during PerformAction call
|
||||
return Config{
|
||||
Namespace: podStatus.Namespace,
|
||||
ResourceName: podStatus.Status.SecretProviderClassName,
|
||||
Annotation: options.SecretProviderClassUpdateOnChangeAnnotation,
|
||||
TypedAutoAnnotation: options.SecretProviderClassReloaderAutoAnnotation,
|
||||
SHAValue: util.GetSHAfromSecretProviderClassPodStatus(podStatus.Status),
|
||||
Type: constants.SecretProviderClassEnvVarPostfix,
|
||||
}
|
||||
}
|
||||
134
pkg/common/metainfo.go
Normal file
134
pkg/common/metainfo.go
Normal file
@@ -0,0 +1,134 @@
|
||||
package common
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"runtime"
|
||||
"time"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
// Version, Commit, and BuildDate are set during the build process
|
||||
// using the -X linker flag to inject these values into the binary.
|
||||
// They provide metadata about the build version, commit hash, build date, and whether there are
|
||||
// uncommitted changes in the source code at the time of build.
|
||||
// This information is useful for debugging and tracking the specific build of the Reloader binary.
|
||||
var Version = "dev"
|
||||
var Commit = "unknown"
|
||||
var BuildDate = "unknown"
|
||||
var Edition = "oss"
|
||||
|
||||
const (
|
||||
MetaInfoConfigmapName = "reloader-meta-info"
|
||||
MetaInfoConfigmapLabelKey = "reloader.stakater.com/meta-info"
|
||||
MetaInfoConfigmapLabelValue = "reloader"
|
||||
)
|
||||
|
||||
// MetaInfo contains comprehensive metadata about the Reloader instance.
|
||||
// This includes build information, configuration options, and deployment details.
|
||||
type MetaInfo struct {
|
||||
// BuildInfo contains information about the build version, commit, and compilation details
|
||||
BuildInfo BuildInfo `json:"buildInfo"`
|
||||
// ReloaderOptions contains all the configuration options and flags used by this Reloader instance
|
||||
ReloaderOptions ReloaderOptions `json:"reloaderOptions"`
|
||||
// DeploymentInfo contains metadata about the Kubernetes deployment of this Reloader instance
|
||||
DeploymentInfo metav1.ObjectMeta `json:"deploymentInfo"`
|
||||
}
|
||||
|
||||
// BuildInfo contains information about the build and version of the Reloader binary.
|
||||
// This includes Go version, release version, commit details, and build timestamp.
|
||||
type BuildInfo struct {
|
||||
// GoVersion is the version of Go used to compile the binary
|
||||
GoVersion string `json:"goVersion"`
|
||||
// ReleaseVersion is the version tag or branch of the Reloader release
|
||||
ReleaseVersion string `json:"releaseVersion"`
|
||||
// CommitHash is the Git commit hash of the source code used to build this binary
|
||||
CommitHash string `json:"commitHash"`
|
||||
// CommitTime is the timestamp of the Git commit used to build this binary
|
||||
CommitTime time.Time `json:"commitTime"`
|
||||
|
||||
// Edition indicates the edition of Reloader (e.g., OSS, Enterprise)
|
||||
Edition string `json:"edition"`
|
||||
}
|
||||
|
||||
func NewBuildInfo() *BuildInfo {
|
||||
metaInfo := &BuildInfo{
|
||||
GoVersion: runtime.Version(),
|
||||
ReleaseVersion: Version,
|
||||
CommitHash: Commit,
|
||||
CommitTime: ParseUTCTime(BuildDate),
|
||||
Edition: Edition,
|
||||
}
|
||||
|
||||
return metaInfo
|
||||
}
|
||||
|
||||
func (m *MetaInfo) ToConfigMap() *v1.ConfigMap {
|
||||
return &v1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: MetaInfoConfigmapName,
|
||||
Namespace: m.DeploymentInfo.Namespace,
|
||||
Labels: map[string]string{
|
||||
MetaInfoConfigmapLabelKey: MetaInfoConfigmapLabelValue,
|
||||
},
|
||||
},
|
||||
Data: map[string]string{
|
||||
"buildInfo": toJson(m.BuildInfo),
|
||||
"reloaderOptions": toJson(m.ReloaderOptions),
|
||||
"deploymentInfo": toJson(m.DeploymentInfo),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func NewMetaInfo(configmap *v1.ConfigMap) (*MetaInfo, error) {
|
||||
var buildInfo BuildInfo
|
||||
if val, ok := configmap.Data["buildInfo"]; ok {
|
||||
err := json.Unmarshal([]byte(val), &buildInfo)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to unmarshal buildInfo: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
var reloaderOptions ReloaderOptions
|
||||
if val, ok := configmap.Data["reloaderOptions"]; ok {
|
||||
err := json.Unmarshal([]byte(val), &reloaderOptions)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to unmarshal reloaderOptions: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
var deploymentInfo metav1.ObjectMeta
|
||||
if val, ok := configmap.Data["deploymentInfo"]; ok {
|
||||
err := json.Unmarshal([]byte(val), &deploymentInfo)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to unmarshal deploymentInfo: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
return &MetaInfo{
|
||||
BuildInfo: buildInfo,
|
||||
ReloaderOptions: reloaderOptions,
|
||||
DeploymentInfo: deploymentInfo,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func toJson(data interface{}) string {
|
||||
jsonData, err := json.Marshal(data)
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
return string(jsonData)
|
||||
}
|
||||
|
||||
func ParseUTCTime(value string) time.Time {
|
||||
if value == "" {
|
||||
return time.Time{} // Return zero time if value is empty
|
||||
}
|
||||
t, err := time.Parse(time.RFC3339, value)
|
||||
if err != nil {
|
||||
return time.Time{} // Return zero time if parsing fails
|
||||
}
|
||||
return t
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
package util
|
||||
package common
|
||||
|
||||
import "time"
|
||||
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
"github.com/sirupsen/logrus"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/rest"
|
||||
csiclient "sigs.k8s.io/secrets-store-csi-driver/pkg/client/clientset/versioned"
|
||||
)
|
||||
|
||||
// Clients struct exposes interfaces for kubernetes as well as openshift if available
|
||||
@@ -18,11 +19,14 @@ type Clients struct {
|
||||
KubernetesClient kubernetes.Interface
|
||||
OpenshiftAppsClient appsclient.Interface
|
||||
ArgoRolloutClient argorollout.Interface
|
||||
CSIClient csiclient.Interface
|
||||
}
|
||||
|
||||
var (
|
||||
// IsOpenshift is true if environment is Openshift, it is false if environment is Kubernetes
|
||||
IsOpenshift = isOpenshift()
|
||||
// IsCSIEnabled is true if environment has CSI provider installed, otherwise false
|
||||
IsCSIInstalled = isCSIInstalled()
|
||||
)
|
||||
|
||||
// GetClients returns a `Clients` object containing both openshift and kubernetes clients with an openshift identifier
|
||||
@@ -48,10 +52,20 @@ func GetClients() Clients {
|
||||
logrus.Warnf("Unable to create ArgoRollout client error = %v", err)
|
||||
}
|
||||
|
||||
var csiClient *csiclient.Clientset
|
||||
|
||||
if IsCSIInstalled {
|
||||
csiClient, err = GetCSIClient()
|
||||
if err != nil {
|
||||
logrus.Warnf("Unable to create CSI client error = %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
return Clients{
|
||||
KubernetesClient: client,
|
||||
OpenshiftAppsClient: appsClient,
|
||||
ArgoRolloutClient: rolloutClient,
|
||||
CSIClient: csiClient,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -63,6 +77,28 @@ func GetArgoRolloutClient() (*argorollout.Clientset, error) {
|
||||
return argorollout.NewForConfig(config)
|
||||
}
|
||||
|
||||
func isCSIInstalled() bool {
|
||||
client, err := GetKubernetesClient()
|
||||
if err != nil {
|
||||
logrus.Fatalf("Unable to create Kubernetes client error = %v", err)
|
||||
}
|
||||
_, err = client.RESTClient().Get().AbsPath("/apis/secrets-store.csi.x-k8s.io/v1").Do(context.TODO()).Raw()
|
||||
if err == nil {
|
||||
logrus.Info("CSI provider is installed")
|
||||
return true
|
||||
}
|
||||
logrus.Info("CSI provider is not installed")
|
||||
return false
|
||||
}
|
||||
|
||||
func GetCSIClient() (*csiclient.Clientset, error) {
|
||||
config, err := getConfig()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return csiclient.NewForConfig(config)
|
||||
}
|
||||
|
||||
func isOpenshift() bool {
|
||||
client, err := GetKubernetesClient()
|
||||
if err != nil {
|
||||
|
||||
@@ -3,11 +3,13 @@ package kube
|
||||
import (
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
csiv1 "sigs.k8s.io/secrets-store-csi-driver/apis/v1"
|
||||
)
|
||||
|
||||
// ResourceMap are resources from where changes are going to be detected
|
||||
var ResourceMap = map[string]runtime.Object{
|
||||
"configMaps": &v1.ConfigMap{},
|
||||
"secrets": &v1.Secret{},
|
||||
"namespaces": &v1.Namespace{},
|
||||
"configmaps": &v1.ConfigMap{},
|
||||
"secrets": &v1.Secret{},
|
||||
"namespaces": &v1.Namespace{},
|
||||
"secretproviderclasspodstatuses": &csiv1.SecretProviderClassPodStatus{},
|
||||
}
|
||||
|
||||
544
test/loadtest/README.md
Normal file
544
test/loadtest/README.md
Normal file
@@ -0,0 +1,544 @@
|
||||
# Reloader Load Test Framework
|
||||
|
||||
This framework provides A/B comparison testing between two Reloader container images.
|
||||
|
||||
## Overview
|
||||
|
||||
The load test framework:
|
||||
1. Creates a local kind cluster (1 control-plane + 6 worker nodes)
|
||||
2. Deploys Prometheus for metrics collection
|
||||
3. Loads the provided Reloader container images into the cluster
|
||||
4. Runs standardized test scenarios (S1-S13)
|
||||
5. Collects metrics via Prometheus scraping
|
||||
6. Generates comparison reports with pass/fail criteria
|
||||
|
||||
## Prerequisites
|
||||
|
||||
- Docker or Podman
|
||||
- kind (Kubernetes in Docker)
|
||||
- kubectl
|
||||
- Go 1.22+
|
||||
|
||||
## Building
|
||||
|
||||
```bash
|
||||
cd test/loadtest
|
||||
go build -o loadtest ./cmd/loadtest
|
||||
```
|
||||
|
||||
## Quick Start
|
||||
|
||||
```bash
|
||||
# Compare two published images (e.g., different versions)
|
||||
./loadtest run \
|
||||
--old-image=stakater/reloader:v1.0.0 \
|
||||
--new-image=stakater/reloader:v1.1.0
|
||||
|
||||
# Run a specific scenario
|
||||
./loadtest run \
|
||||
--old-image=stakater/reloader:v1.0.0 \
|
||||
--new-image=stakater/reloader:v1.1.0 \
|
||||
--scenario=S2 \
|
||||
--duration=120
|
||||
|
||||
# Test only a single image (no comparison)
|
||||
./loadtest run --new-image=myregistry/reloader:dev
|
||||
|
||||
# Use local images built with docker/podman
|
||||
./loadtest run \
|
||||
--old-image=localhost/reloader:baseline \
|
||||
--new-image=localhost/reloader:feature-branch
|
||||
|
||||
# Skip cluster creation (use existing kind cluster)
|
||||
./loadtest run \
|
||||
--old-image=stakater/reloader:v1.0.0 \
|
||||
--new-image=stakater/reloader:v1.1.0 \
|
||||
--skip-cluster
|
||||
|
||||
# Run all scenarios in parallel on 4 clusters (faster execution)
|
||||
./loadtest run \
|
||||
--new-image=localhost/reloader:dev \
|
||||
--parallelism=4
|
||||
|
||||
# Run all 13 scenarios in parallel (one cluster per scenario)
|
||||
./loadtest run \
|
||||
--new-image=localhost/reloader:dev \
|
||||
--parallelism=13
|
||||
|
||||
# Generate report from existing results
|
||||
./loadtest report --scenario=S2 --results-dir=./results
|
||||
```
|
||||
|
||||
## Command Line Options
|
||||
|
||||
### Run Command
|
||||
|
||||
| Option | Description | Default |
|
||||
|--------|-------------|---------|
|
||||
| `--old-image=IMAGE` | Container image for "old" version | - |
|
||||
| `--new-image=IMAGE` | Container image for "new" version | - |
|
||||
| `--scenario=ID` | Test scenario: S1-S13 or "all" | all |
|
||||
| `--duration=SECONDS` | Test duration in seconds | 60 |
|
||||
| `--parallelism=N` | Run N scenarios in parallel on N kind clusters | 1 |
|
||||
| `--skip-cluster` | Skip kind cluster creation (use existing, only for parallelism=1) | false |
|
||||
| `--results-dir=DIR` | Directory for results | ./results |
|
||||
|
||||
**Note:** At least one of `--old-image` or `--new-image` is required. Provide both for A/B comparison.
|
||||
|
||||
### Report Command
|
||||
|
||||
| Option | Description | Default |
|
||||
|--------|-------------|---------|
|
||||
| `--scenario=ID` | Scenario to report on (required) | - |
|
||||
| `--results-dir=DIR` | Directory containing results | ./results |
|
||||
| `--output=FILE` | Output file (default: stdout) | - |
|
||||
|
||||
## Test Scenarios
|
||||
|
||||
| ID | Name | Description |
|
||||
|-----|-----------------------|-------------------------------------------------|
|
||||
| S1 | Burst Updates | Many ConfigMap/Secret updates in quick succession |
|
||||
| S2 | Fan-Out | One ConfigMap used by many (50) workloads |
|
||||
| S3 | High Cardinality | Many CMs/Secrets across many namespaces |
|
||||
| S4 | No-Op Updates | Updates that don't change data (annotation only)|
|
||||
| S5 | Workload Churn | Deployments created/deleted rapidly |
|
||||
| S6 | Controller Restart | Restart controller pod under load |
|
||||
| S7 | API Pressure | Many concurrent update requests |
|
||||
| S8 | Large Objects | ConfigMaps > 100KB |
|
||||
| S9 | Multi-Workload Types | Tests all workload types (Deploy, STS, DS) |
|
||||
| S10 | Secrets + Mixed | Secrets and mixed ConfigMap+Secret workloads |
|
||||
| S11 | Annotation Strategy | Tests `--reload-strategy=annotations` |
|
||||
| S12 | Pause & Resume | Tests pause-period during rapid updates |
|
||||
| S13 | Complex References | Init containers, valueFrom, projected volumes |
|
||||
|
||||
## Metrics Reference
|
||||
|
||||
This section explains each metric collected during load tests, what it measures, and what different values might indicate.
|
||||
|
||||
### Counter Metrics (Totals)
|
||||
|
||||
#### `reconcile_total`
|
||||
**What it measures:** The total number of reconciliation loops executed by the controller.
|
||||
|
||||
**What it indicates:**
|
||||
- **Higher in new vs old:** The new controller-runtime implementation may batch events differently. This is often expected behavior, not a problem.
|
||||
- **Lower in new vs old:** Better event batching/deduplication. Controller-runtime's work queue naturally deduplicates events.
|
||||
- **Expected behavior:** The new implementation typically has *fewer* reconciles due to intelligent event batching.
|
||||
|
||||
#### `action_total`
|
||||
**What it measures:** The total number of reload actions triggered (rolling restarts of Deployments/StatefulSets/DaemonSets).
|
||||
|
||||
**What it indicates:**
|
||||
- **Should match expected value:** Both implementations should trigger the same number of reloads for the same workload.
|
||||
- **Lower than expected:** Some updates were missed - potential bug or race condition.
|
||||
- **Higher than expected:** Duplicate reloads triggered - inefficiency but not data loss.
|
||||
|
||||
#### `reload_executed_total`
|
||||
**What it measures:** Successful reload operations executed, labeled by `success=true/false`.
|
||||
|
||||
**What it indicates:**
|
||||
- **`success=true` count:** Number of workloads successfully restarted.
|
||||
- **`success=false` count:** Failed restart attempts (API errors, permission issues).
|
||||
- **Should match `action_total`:** If significantly lower, reloads are failing.
|
||||
|
||||
#### `workloads_scanned_total`
|
||||
**What it measures:** Number of workloads (Deployments, etc.) scanned when checking for ConfigMap/Secret references.
|
||||
|
||||
**What it indicates:**
|
||||
- **High count:** Controller is scanning many workloads per reconcile.
|
||||
- **Expected behavior:** Should roughly match the number of workloads × number of reconciles.
|
||||
- **Optimization signal:** If very high, namespace filtering or label selectors could help.
|
||||
|
||||
#### `workloads_matched_total`
|
||||
**What it measures:** Number of workloads that matched (reference the changed ConfigMap/Secret).
|
||||
|
||||
**What it indicates:**
|
||||
- **Should match `reload_executed_total`:** Every matched workload should be reloaded.
|
||||
- **Higher than reloads:** Some matched workloads weren't reloaded (potential issue).
|
||||
|
||||
#### `errors_total`
|
||||
**What it measures:** Total errors encountered, labeled by error type.
|
||||
|
||||
**What it indicates:**
|
||||
- **Should be 0:** Any errors indicate problems.
|
||||
- **Common causes:** API server timeouts, RBAC issues, resource conflicts.
|
||||
- **Critical metric:** Non-zero errors in production should be investigated.
|
||||
|
||||
### API Efficiency Metrics (REST Client)
|
||||
|
||||
These metrics track Kubernetes API server calls made by Reloader. Lower values indicate more efficient operation with less API server load.
|
||||
|
||||
#### `rest_client_requests_total`
|
||||
**What it measures:** Total number of HTTP requests made to the Kubernetes API server.
|
||||
|
||||
**What it indicates:**
|
||||
- **Lower is better:** Fewer API calls means less load on the API server.
|
||||
- **High count:** May indicate inefficient caching or excessive reconciles.
|
||||
- **Comparison use:** Shows overall API efficiency between implementations.
|
||||
|
||||
#### `rest_client_requests_get`
|
||||
**What it measures:** Number of GET requests (fetching individual resources or listings).
|
||||
|
||||
**What it indicates:**
|
||||
- **Includes:** Fetching ConfigMaps, Secrets, Deployments, etc.
|
||||
- **Higher count:** More frequent resource fetching, possibly due to cache misses.
|
||||
- **Expected behavior:** Controller-runtime's caching should reduce GET requests compared to direct API calls.
|
||||
|
||||
#### `rest_client_requests_patch`
|
||||
**What it measures:** Number of PATCH requests (partial updates to resources).
|
||||
|
||||
**What it indicates:**
|
||||
- **Used for:** Rolling restart annotations on workloads.
|
||||
- **Should correlate with:** `reload_executed_total` - each reload typically requires one PATCH.
|
||||
- **Lower is better:** Fewer patches means more efficient batching or deduplication.
|
||||
|
||||
#### `rest_client_requests_put`
|
||||
**What it measures:** Number of PUT requests (full resource updates).
|
||||
|
||||
**What it indicates:**
|
||||
- **Used for:** Full object replacements (less common than PATCH).
|
||||
- **Should be low:** Most updates use PATCH for efficiency.
|
||||
- **High count:** May indicate suboptimal update strategy.
|
||||
|
||||
#### `rest_client_requests_errors`
|
||||
**What it measures:** Number of failed API requests (4xx/5xx responses).
|
||||
|
||||
**What it indicates:**
|
||||
- **Should be 0:** Errors indicate API server issues or permission problems.
|
||||
- **Common causes:** Rate limiting, RBAC issues, resource conflicts, network issues.
|
||||
- **Non-zero:** Investigate API server logs and Reloader permissions.
|
||||
|
||||
### Latency Metrics (Percentiles)
|
||||
|
||||
All latency metrics are reported in **seconds**. The report shows p50 (median), p95, and p99 percentiles.
|
||||
|
||||
#### `reconcile_duration (s)`
|
||||
**What it measures:** Time spent inside each reconcile loop, from start to finish.
|
||||
|
||||
**What it indicates:**
|
||||
- **p50 (median):** Typical reconcile time. Should be < 100ms for good performance.
|
||||
- **p95:** 95th percentile - only 5% of reconciles take longer than this.
|
||||
- **p99:** 99th percentile - indicates worst-case performance.
|
||||
|
||||
**Interpreting differences:**
|
||||
- **New higher than old:** Controller-runtime reconciles may do more work per loop but run fewer times. Check `reconcile_total` - if it's lower, this is expected.
|
||||
- **Minor differences (< 0.5s absolute):** Not significant for sub-second values.
|
||||
|
||||
#### `action_latency (s)`
|
||||
**What it measures:** End-to-end time from ConfigMap/Secret change detection to workload restart triggered.
|
||||
|
||||
**What it indicates:**
|
||||
- **This is the user-facing latency:** How long users wait for their config changes to take effect.
|
||||
- **p50 < 1s:** Excellent - most changes apply within a second.
|
||||
- **p95 < 5s:** Good - even under load, changes apply quickly.
|
||||
- **p99 > 10s:** May need investigation - some changes take too long.
|
||||
|
||||
**What affects this:**
|
||||
- API server responsiveness
|
||||
- Number of workloads to scan
|
||||
- Concurrent updates competing for resources
|
||||
|
||||
### Understanding the Report
|
||||
|
||||
#### Report Columns
|
||||
|
||||
```
|
||||
Metric Old New Expected Old✓ New✓ Status
|
||||
------ --- --- -------- ---- ---- ------
|
||||
action_total 100.00 100.00 100 ✓ ✓ pass
|
||||
action_latency_p95 (s) 0.15 0.04 - - - pass
|
||||
```
|
||||
|
||||
- **Old/New:** Measured values from each implementation
|
||||
- **Expected:** Known expected value (for throughput metrics)
|
||||
- **Old✓/New✓:** Whether the value is within 15% of expected (✓ = yes, ✗ = no, - = no expected value)
|
||||
- **Status:** pass/fail based on comparison thresholds
|
||||
|
||||
#### Pass/Fail Logic
|
||||
|
||||
| Metric Type | Pass Condition |
|
||||
|-------------|----------------|
|
||||
| Throughput (action_total, reload_executed_total) | New value within 15% of expected |
|
||||
| Latency (p50, p95, p99) | New not more than threshold% worse than old, OR absolute difference < minimum threshold |
|
||||
| Errors | New ≤ Old (ideally both 0) |
|
||||
| API Efficiency (rest_client_requests_*) | New ≤ Old (lower is better), or New not more than 50% higher |
|
||||
|
||||
#### Latency Thresholds
|
||||
|
||||
Latency comparisons use both percentage AND absolute thresholds to avoid false failures:
|
||||
|
||||
| Metric | Max % Worse | Min Absolute Diff |
|
||||
|--------|-------------|-------------------|
|
||||
| p50 | 100% | 0.5s |
|
||||
| p95 | 100% | 1.0s |
|
||||
| p99 | 100% | 1.0s |
|
||||
|
||||
**Example:** If old p50 = 0.01s and new p50 = 0.08s:
|
||||
- Percentage difference: +700% (would fail % check)
|
||||
- Absolute difference: 0.07s (< 0.5s threshold)
|
||||
- **Result: PASS** (both values are fast enough that the difference doesn't matter)
|
||||
|
||||
### Resource Consumption Metrics
|
||||
|
||||
These metrics track CPU, memory, and Go runtime resource usage. Lower values generally indicate more efficient operation.
|
||||
|
||||
#### Memory Metrics
|
||||
|
||||
| Metric | Description | Unit |
|
||||
|--------|-------------|------|
|
||||
| `memory_rss_mb_avg` | Average RSS (resident set size) memory | MB |
|
||||
| `memory_rss_mb_max` | Peak RSS memory during test | MB |
|
||||
| `memory_heap_mb_avg` | Average Go heap allocation | MB |
|
||||
| `memory_heap_mb_max` | Peak Go heap allocation | MB |
|
||||
|
||||
**What to watch for:**
|
||||
- **High RSS:** May indicate memory leaks or inefficient caching
|
||||
- **High heap:** Many objects being created (check GC metrics)
|
||||
- **Growing over time:** Potential memory leak
|
||||
|
||||
#### CPU Metrics
|
||||
|
||||
| Metric | Description | Unit |
|
||||
|--------|-------------|------|
|
||||
| `cpu_cores_avg` | Average CPU usage rate | cores |
|
||||
| `cpu_cores_max` | Peak CPU usage rate | cores |
|
||||
|
||||
**What to watch for:**
|
||||
- **High CPU:** Inefficient algorithms or excessive reconciles
|
||||
- **Spiky max:** May indicate burst handling issues
|
||||
|
||||
#### Go Runtime Metrics
|
||||
|
||||
| Metric | Description | Unit |
|
||||
|--------|-------------|------|
|
||||
| `goroutines_avg` | Average goroutine count | count |
|
||||
| `goroutines_max` | Peak goroutine count | count |
|
||||
| `gc_pause_p99_ms` | 99th percentile GC pause time | ms |
|
||||
|
||||
**What to watch for:**
|
||||
- **High goroutines:** Potential goroutine leak or unbounded concurrency
|
||||
- **High GC pause:** Large heap or allocation pressure
|
||||
|
||||
### Scenario-Specific Expectations
|
||||
|
||||
| Scenario | Key Metrics to Watch | Expected Behavior |
|
||||
|----------|---------------------|-------------------|
|
||||
| S1 (Burst) | action_latency_p99, cpu_cores_max, goroutines_max | Should handle bursts without queue backup |
|
||||
| S2 (Fan-Out) | reconcile_total, workloads_matched, memory_rss_mb_max | One CM change → 50 workload reloads |
|
||||
| S3 (High Cardinality) | reconcile_duration, memory_heap_mb_avg | Many namespaces shouldn't increase memory |
|
||||
| S4 (No-Op) | action_total = 0, cpu_cores_avg should be low | Minimal resource usage for no-op |
|
||||
| S5 (Churn) | errors_total, goroutines_avg | Graceful handling, no goroutine leak |
|
||||
| S6 (Restart) | All metrics captured | Metrics survive controller restart |
|
||||
| S7 (API Pressure) | errors_total, cpu_cores_max, goroutines_max | No errors under concurrent load |
|
||||
| S8 (Large Objects) | memory_rss_mb_max, gc_pause_p99_ms | Large ConfigMaps don't cause OOM or GC issues |
|
||||
| S9 (Multi-Workload) | reload_executed_total per type | All workload types (Deploy, STS, DS) reload |
|
||||
| S10 (Secrets) | reload_executed_total, workloads_matched | Both Secrets and ConfigMaps trigger reloads |
|
||||
| S11 (Annotation) | workload annotations present | Deployments get `last-reloaded-from` annotation |
|
||||
| S12 (Pause) | reload_executed_total << updates | Pause-period reduces reload frequency |
|
||||
| S13 (Complex) | reload_executed_total | All reference types trigger reloads |
|
||||
|
||||
### Troubleshooting
|
||||
|
||||
#### New implementation shows 0 for all metrics
|
||||
- Check if Prometheus is scraping the new Reloader pod
|
||||
- Verify pod annotations: `prometheus.io/scrape: "true"`
|
||||
- Check Prometheus targets: `http://localhost:9091/targets`
|
||||
|
||||
#### Metrics don't match expected values
|
||||
- Verify test ran to completion (check logs)
|
||||
- Ensure Prometheus scraped final metrics (18s wait after test)
|
||||
- Check for pod restarts during test (metrics reset on restart - handled by `increase()`)
|
||||
|
||||
#### High latency in new implementation
|
||||
- Check Reloader pod resource limits
|
||||
- Look for API server throttling in logs
|
||||
- Compare `reconcile_total` - fewer reconciles with higher duration may be normal
|
||||
|
||||
#### REST client errors are non-zero
|
||||
- **Common causes:**
|
||||
- Optional CRD schemes registered but CRDs not installed (e.g., Argo Rollouts, OpenShift DeploymentConfig)
|
||||
- API server rate limiting under high load
|
||||
- RBAC permissions missing for certain resource types
|
||||
- **Argo Rollouts errors:** If you see ~4 errors per test, ensure `--enable-argo-rollouts=false` if not using Argo Rollouts
|
||||
- **OpenShift errors:** Similarly, ensure DeploymentConfig support is disabled on non-OpenShift clusters
|
||||
|
||||
#### REST client requests much higher in new implementation
|
||||
- Check if caching is working correctly
|
||||
- Look for excessive re-queuing in controller logs
|
||||
- Compare `reconcile_total` - more reconciles naturally means more API calls
|
||||
|
||||
## Report Format
|
||||
|
||||
The report generator produces a comparison table with units and expected value indicators:
|
||||
|
||||
```
|
||||
================================================================================
|
||||
RELOADER A/B COMPARISON REPORT
|
||||
================================================================================
|
||||
|
||||
Scenario: S2
|
||||
Generated: 2026-01-03 14:30:00
|
||||
Status: PASS
|
||||
Summary: All metrics within acceptable thresholds
|
||||
|
||||
Test: S2: Fan-out test - 1 CM update triggers 50 deployment reloads
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
METRIC COMPARISONS
|
||||
--------------------------------------------------------------------------------
|
||||
(Old✓/New✓ = meets expected value within 15%)
|
||||
|
||||
Metric Old New Expected Old✓ New✓ Status
|
||||
------ --- --- -------- ---- ---- ------
|
||||
reconcile_total 50.00 25.00 - - - pass
|
||||
reconcile_duration_p50 (s) 0.01 0.05 - - - pass
|
||||
reconcile_duration_p95 (s) 0.02 0.15 - - - pass
|
||||
action_total 50.00 50.00 50 ✓ ✓ pass
|
||||
action_latency_p50 (s) 0.05 0.03 - - - pass
|
||||
action_latency_p95 (s) 0.12 0.08 - - - pass
|
||||
errors_total 0.00 0.00 - - - pass
|
||||
reload_executed_total 50.00 50.00 50 ✓ ✓ pass
|
||||
workloads_scanned_total 50.00 50.00 50 ✓ ✓ pass
|
||||
workloads_matched_total 50.00 50.00 50 ✓ ✓ pass
|
||||
rest_client_requests_total 850 720 - - - pass
|
||||
rest_client_requests_get 500 420 - - - pass
|
||||
rest_client_requests_patch 300 250 - - - pass
|
||||
rest_client_requests_errors 0 0 - - - pass
|
||||
```
|
||||
|
||||
Reports are saved to `results/<scenario>/report.txt` after each test.
|
||||
|
||||
## Directory Structure
|
||||
|
||||
```
|
||||
test/loadtest/
|
||||
├── cmd/
|
||||
│ └── loadtest/ # Unified CLI (run + report)
|
||||
│ └── main.go
|
||||
├── internal/
|
||||
│ ├── cluster/ # Kind cluster management
|
||||
│ │ └── kind.go
|
||||
│ ├── prometheus/ # Prometheus deployment & querying
|
||||
│ │ └── prometheus.go
|
||||
│ ├── reloader/ # Reloader deployment
|
||||
│ │ └── deploy.go
|
||||
│ └── scenarios/ # Test scenario implementations
|
||||
│ └── scenarios.go
|
||||
├── manifests/
|
||||
│ └── prometheus.yaml # Prometheus deployment manifest
|
||||
├── results/ # Generated after tests
|
||||
│ └── <scenario>/
|
||||
│ ├── old/ # Old version data
|
||||
│ │ ├── *.json # Prometheus metric snapshots
|
||||
│ │ └── reloader.log # Reloader pod logs
|
||||
│ ├── new/ # New version data
|
||||
│ │ ├── *.json # Prometheus metric snapshots
|
||||
│ │ └── reloader.log # Reloader pod logs
|
||||
│ ├── expected.json # Expected values from test
|
||||
│ └── report.txt # Comparison report
|
||||
├── go.mod
|
||||
├── go.sum
|
||||
└── README.md
|
||||
```
|
||||
|
||||
## Building Local Images for Testing
|
||||
|
||||
If you want to test local code changes:
|
||||
|
||||
```bash
|
||||
# Build the new Reloader image from current source
|
||||
docker build -t localhost/reloader:dev -f Dockerfile .
|
||||
|
||||
# Build from a different branch/commit
|
||||
git checkout feature-branch
|
||||
docker build -t localhost/reloader:feature -f Dockerfile .
|
||||
|
||||
# Then run comparison
|
||||
./loadtest run \
|
||||
--old-image=stakater/reloader:v1.0.0 \
|
||||
--new-image=localhost/reloader:feature
|
||||
```
|
||||
|
||||
## Interpreting Results
|
||||
|
||||
### PASS
|
||||
All metrics are within acceptable thresholds. The new implementation is comparable or better than the old one.
|
||||
|
||||
### FAIL
|
||||
One or more metrics exceeded thresholds. Review the specific metrics:
|
||||
- **Latency degradation**: p95/p99 latencies are significantly higher
|
||||
- **Missed reloads**: `reload_executed_total` differs significantly
|
||||
- **Errors increased**: `errors_total` is higher in new version
|
||||
|
||||
### Investigation
|
||||
|
||||
If tests fail, check:
|
||||
1. Pod logs: `kubectl logs -n reloader-new deployment/reloader` (or check `results/<scenario>/new/reloader.log`)
|
||||
2. Resource usage: `kubectl top pods -n reloader-new`
|
||||
3. Events: `kubectl get events -n reloader-test`
|
||||
|
||||
## Parallel Execution
|
||||
|
||||
The `--parallelism` option enables running scenarios on multiple kind clusters simultaneously, significantly reducing total test time.
|
||||
|
||||
### How It Works
|
||||
|
||||
1. **Multiple Clusters**: Creates N kind clusters named `reloader-loadtest-0`, `reloader-loadtest-1`, etc.
|
||||
2. **Separate Prometheus**: Each cluster gets its own Prometheus instance with a unique port (9091, 9092, etc.)
|
||||
3. **Worker Pool**: Scenarios are distributed to workers via a channel, with each worker running on its own cluster
|
||||
4. **Independent Execution**: Each scenario runs in complete isolation with no resource contention
|
||||
|
||||
### Usage
|
||||
|
||||
```bash
|
||||
# Run 4 scenarios at a time (creates 4 clusters)
|
||||
./loadtest run --new-image=my-image:tag --parallelism=4
|
||||
|
||||
# Run all 13 scenarios in parallel (creates 13 clusters)
|
||||
./loadtest run --new-image=my-image:tag --parallelism=13 --scenario=all
|
||||
```
|
||||
|
||||
### Resource Requirements
|
||||
|
||||
Parallel execution requires significant system resources:
|
||||
|
||||
| Parallelism | Clusters | Est. Memory | Est. CPU |
|
||||
|-------------|----------|-------------|----------|
|
||||
| 1 (default) | 1 | ~4GB | 2-4 cores |
|
||||
| 4 | 4 | ~16GB | 8-16 cores |
|
||||
| 13 | 13 | ~52GB | 26-52 cores |
|
||||
|
||||
### Notes
|
||||
|
||||
- The `--skip-cluster` option is not supported with parallelism > 1
|
||||
- Each worker loads images independently, so initial setup takes longer
|
||||
- All results are written to the same `--results-dir` with per-scenario subdirectories
|
||||
- If a cluster setup fails, remaining workers continue with available clusters
|
||||
- Parallelism automatically reduces to match scenario count if set higher
|
||||
|
||||
## CI Integration
|
||||
|
||||
### GitHub Actions
|
||||
|
||||
Load tests can be triggered on pull requests by commenting `/loadtest`:
|
||||
|
||||
```
|
||||
/loadtest
|
||||
```
|
||||
|
||||
This will:
|
||||
1. Build a container image from the PR branch
|
||||
2. Run all load test scenarios against it
|
||||
3. Post results as a PR comment
|
||||
4. Upload detailed results as artifacts
|
||||
|
||||
### Make Target
|
||||
|
||||
Run load tests locally or in CI:
|
||||
|
||||
```bash
|
||||
# From repository root
|
||||
make loadtest
|
||||
```
|
||||
|
||||
This builds the container image and runs all scenarios with a 60-second duration.
|
||||
7
test/loadtest/cmd/loadtest/main.go
Normal file
7
test/loadtest/cmd/loadtest/main.go
Normal file
@@ -0,0 +1,7 @@
|
||||
package main
|
||||
|
||||
import "github.com/stakater/Reloader/test/loadtest/internal/cmd"
|
||||
|
||||
func main() {
|
||||
cmd.Execute()
|
||||
}
|
||||
52
test/loadtest/go.mod
Normal file
52
test/loadtest/go.mod
Normal file
@@ -0,0 +1,52 @@
|
||||
module github.com/stakater/Reloader/test/loadtest
|
||||
|
||||
go 1.26
|
||||
|
||||
require (
|
||||
github.com/spf13/cobra v1.8.1
|
||||
k8s.io/api v0.31.0
|
||||
k8s.io/apimachinery v0.31.0
|
||||
k8s.io/client-go v0.31.0
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
|
||||
github.com/emicklei/go-restful/v3 v3.11.0 // indirect
|
||||
github.com/fxamacker/cbor/v2 v2.7.0 // indirect
|
||||
github.com/go-logr/logr v1.4.2 // indirect
|
||||
github.com/go-openapi/jsonpointer v0.19.6 // indirect
|
||||
github.com/go-openapi/jsonreference v0.20.2 // indirect
|
||||
github.com/go-openapi/swag v0.22.4 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang/protobuf v1.5.4 // indirect
|
||||
github.com/google/gnostic-models v0.6.8 // indirect
|
||||
github.com/google/go-cmp v0.6.0 // indirect
|
||||
github.com/google/gofuzz v1.2.0 // indirect
|
||||
github.com/google/uuid v1.6.0 // indirect
|
||||
github.com/imdario/mergo v0.3.6 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||
github.com/josharian/intern v1.0.0 // indirect
|
||||
github.com/json-iterator/go v1.1.12 // indirect
|
||||
github.com/mailru/easyjson v0.7.7 // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
||||
github.com/spf13/pflag v1.0.5 // indirect
|
||||
github.com/x448/float16 v0.8.4 // indirect
|
||||
golang.org/x/net v0.26.0 // indirect
|
||||
golang.org/x/oauth2 v0.21.0 // indirect
|
||||
golang.org/x/sys v0.21.0 // indirect
|
||||
golang.org/x/term v0.21.0 // indirect
|
||||
golang.org/x/text v0.16.0 // indirect
|
||||
golang.org/x/time v0.3.0 // indirect
|
||||
google.golang.org/protobuf v1.34.2 // indirect
|
||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
k8s.io/klog/v2 v2.130.1 // indirect
|
||||
k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 // indirect
|
||||
k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 // indirect
|
||||
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect
|
||||
sigs.k8s.io/yaml v1.4.0 // indirect
|
||||
)
|
||||
160
test/loadtest/go.sum
Normal file
160
test/loadtest/go.sum
Normal file
@@ -0,0 +1,160 @@
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g=
|
||||
github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
|
||||
github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E=
|
||||
github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ=
|
||||
github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY=
|
||||
github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
|
||||
github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE=
|
||||
github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs=
|
||||
github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE=
|
||||
github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k=
|
||||
github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14=
|
||||
github.com/go-openapi/swag v0.22.4 h1:QLMzNJnMGPRNDCbySlcj1x01tzU8/9LTTL9hZZZogBU=
|
||||
github.com/go-openapi/swag v0.22.4/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14=
|
||||
github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI=
|
||||
github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8=
|
||||
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
|
||||
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
|
||||
github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
|
||||
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
|
||||
github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I=
|
||||
github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U=
|
||||
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
|
||||
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
|
||||
github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/pprof v0.0.0-20240525223248-4bfdf5a9a2af h1:kmjWCqn2qkEml422C2Rrd27c3VGxi6a/6HNq8QmHRKM=
|
||||
github.com/google/pprof v0.0.0-20240525223248-4bfdf5a9a2af/go.mod h1:K1liHPHnj73Fdn/EKuT8nrFqBihUSKXoLYU0BuatOYo=
|
||||
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
||||
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/imdario/mergo v0.3.6 h1:xTNEAn+kxVO7dTZGu0CegyqKZmoWFI0rF8UxjlB2d28=
|
||||
github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
|
||||
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
|
||||
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
|
||||
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
|
||||
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
|
||||
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
|
||||
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
|
||||
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
||||
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
||||
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
|
||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
|
||||
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
|
||||
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
|
||||
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
|
||||
github.com/onsi/ginkgo/v2 v2.19.0 h1:9Cnnf7UHo57Hy3k6/m5k3dRfGTMXGvxhHFvkDTCTpvA=
|
||||
github.com/onsi/ginkgo/v2 v2.19.0/go.mod h1:rlwLi9PilAFJ8jCg9UE1QP6VBpd6/xj3SRC0d6TU0To=
|
||||
github.com/onsi/gomega v1.19.0 h1:4ieX6qQjPP/BfC3mpsAtIGGlxTWPeA3Inl/7DtXw1tw=
|
||||
github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8=
|
||||
github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM=
|
||||
github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y=
|
||||
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
|
||||
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
||||
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
|
||||
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM=
|
||||
github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg=
|
||||
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.26.0 h1:soB7SVo0PWrY4vPW/+ay0jKDNScG2X9wFeYlXIvJsOQ=
|
||||
golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE=
|
||||
golang.org/x/oauth2 v0.21.0 h1:tsimM75w1tF/uws5rbeHzIWxEqElMehnc+iW793zsZs=
|
||||
golang.org/x/oauth2 v0.21.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.21.0 h1:rF+pYz3DAGSQAxAu1CbC7catZg4ebC4UIeIhKxBZvws=
|
||||
golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/term v0.21.0 h1:WVXCp+/EBEHOj53Rvu+7KiT/iElMrO8ACK16SMZ3jaA=
|
||||
golang.org/x/term v0.21.0/go.mod h1:ooXLefLobQVslOqselCNF4SxFAaoS6KujMbsGzSDmX0=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4=
|
||||
golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI=
|
||||
golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4=
|
||||
golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d h1:vU5i/LfpvrRCpgM/VPfJLg5KjxD3E+hfT1SH+d9zLwg=
|
||||
golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg=
|
||||
google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
|
||||
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
|
||||
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
|
||||
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
k8s.io/api v0.31.0 h1:b9LiSjR2ym/SzTOlfMHm1tr7/21aD7fSkqgD/CVJBCo=
|
||||
k8s.io/api v0.31.0/go.mod h1:0YiFF+JfFxMM6+1hQei8FY8M7s1Mth+z/q7eF1aJkTE=
|
||||
k8s.io/apimachinery v0.31.0 h1:m9jOiSr3FoSSL5WO9bjm1n6B9KROYYgNZOb4tyZ1lBc=
|
||||
k8s.io/apimachinery v0.31.0/go.mod h1:rsPdaZJfTfLsNJSQzNHQvYoTmxhoOEofxtOsF3rtsMo=
|
||||
k8s.io/client-go v0.31.0 h1:QqEJzNjbN2Yv1H79SsS+SWnXkBgVu4Pj3CJQgbx0gI8=
|
||||
k8s.io/client-go v0.31.0/go.mod h1:Y9wvC76g4fLjmU0BA+rV+h2cncoadjvjjkkIGoTLcGU=
|
||||
k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk=
|
||||
k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
|
||||
k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 h1:BZqlfIlq5YbRMFko6/PM7FjZpUb45WallggurYhKGag=
|
||||
k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340/go.mod h1:yD4MZYeKMBwQKVht279WycxKyM84kkAx2DPrTXaeb98=
|
||||
k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 h1:pUdcCO1Lk/tbT5ztQWOBi5HBgbBP1J8+AsQnQCKsi8A=
|
||||
k8s.io/utils v0.0.0-20240711033017-18e509b52bc8/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
|
||||
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo=
|
||||
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08=
|
||||
sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E=
|
||||
sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY=
|
||||
314
test/loadtest/internal/cluster/kind.go
Normal file
314
test/loadtest/internal/cluster/kind.go
Normal file
@@ -0,0 +1,314 @@
|
||||
// Package cluster provides kind cluster management functionality.
|
||||
package cluster
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Config holds configuration for kind cluster operations.
|
||||
type Config struct {
|
||||
Name string
|
||||
ContainerRuntime string // "docker" or "podman"
|
||||
PortOffset int // Offset for host port mappings (for parallel clusters)
|
||||
}
|
||||
|
||||
// Manager handles kind cluster operations.
|
||||
type Manager struct {
|
||||
cfg Config
|
||||
}
|
||||
|
||||
// NewManager creates a new cluster manager.
|
||||
func NewManager(cfg Config) *Manager {
|
||||
return &Manager{cfg: cfg}
|
||||
}
|
||||
|
||||
// DetectContainerRuntime finds available container runtime.
|
||||
// It checks if the runtime daemon is actually running, not just if the binary exists.
|
||||
func DetectContainerRuntime() (string, error) {
|
||||
if _, err := exec.LookPath("docker"); err == nil {
|
||||
cmd := exec.Command("docker", "info")
|
||||
if err := cmd.Run(); err == nil {
|
||||
return "docker", nil
|
||||
}
|
||||
}
|
||||
if _, err := exec.LookPath("podman"); err == nil {
|
||||
cmd := exec.Command("podman", "info")
|
||||
if err := cmd.Run(); err == nil {
|
||||
return "podman", nil
|
||||
}
|
||||
}
|
||||
return "", fmt.Errorf("neither docker nor podman is running")
|
||||
}
|
||||
|
||||
// Exists checks if the cluster already exists.
|
||||
func (m *Manager) Exists() bool {
|
||||
cmd := exec.Command("kind", "get", "clusters")
|
||||
out, err := cmd.Output()
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
for _, line := range strings.Split(string(out), "\n") {
|
||||
if strings.TrimSpace(line) == m.cfg.Name {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Delete deletes the kind cluster.
|
||||
func (m *Manager) Delete(ctx context.Context) error {
|
||||
cmd := exec.CommandContext(ctx, "kind", "delete", "cluster", "--name", m.cfg.Name)
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
return cmd.Run()
|
||||
}
|
||||
|
||||
// Create creates a new kind cluster with optimized settings.
|
||||
func (m *Manager) Create(ctx context.Context) error {
|
||||
if m.cfg.ContainerRuntime == "podman" {
|
||||
os.Setenv("KIND_EXPERIMENTAL_PROVIDER", "podman")
|
||||
}
|
||||
|
||||
if m.Exists() {
|
||||
fmt.Printf("Cluster %s already exists, deleting...\n", m.cfg.Name)
|
||||
if err := m.Delete(ctx); err != nil {
|
||||
return fmt.Errorf("deleting existing cluster: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
httpPort := 8080 + m.cfg.PortOffset
|
||||
httpsPort := 8443 + m.cfg.PortOffset
|
||||
|
||||
config := fmt.Sprintf(`kind: Cluster
|
||||
apiVersion: kind.x-k8s.io/v1alpha4
|
||||
networking:
|
||||
podSubnet: "10.244.0.0/16"
|
||||
serviceSubnet: "10.96.0.0/16"
|
||||
nodes:
|
||||
- role: control-plane
|
||||
kubeadmConfigPatches:
|
||||
- |
|
||||
kind: InitConfiguration
|
||||
nodeRegistration:
|
||||
kubeletExtraArgs:
|
||||
node-labels: "ingress-ready=true"
|
||||
kube-api-qps: "50"
|
||||
kube-api-burst: "100"
|
||||
serialize-image-pulls: "false"
|
||||
event-qps: "50"
|
||||
event-burst: "100"
|
||||
- |
|
||||
kind: ClusterConfiguration
|
||||
apiServer:
|
||||
extraArgs:
|
||||
max-requests-inflight: "800"
|
||||
max-mutating-requests-inflight: "400"
|
||||
watch-cache-sizes: "configmaps#1000,secrets#1000,pods#1000"
|
||||
controllerManager:
|
||||
extraArgs:
|
||||
kube-api-qps: "200"
|
||||
kube-api-burst: "200"
|
||||
scheduler:
|
||||
extraArgs:
|
||||
kube-api-qps: "200"
|
||||
kube-api-burst: "200"
|
||||
extraPortMappings:
|
||||
- containerPort: 80
|
||||
hostPort: %d
|
||||
protocol: TCP
|
||||
- containerPort: 443
|
||||
hostPort: %d
|
||||
protocol: TCP
|
||||
- role: worker
|
||||
kubeadmConfigPatches:
|
||||
- |
|
||||
kind: JoinConfiguration
|
||||
nodeRegistration:
|
||||
kubeletExtraArgs:
|
||||
max-pods: "250"
|
||||
kube-api-qps: "50"
|
||||
kube-api-burst: "100"
|
||||
serialize-image-pulls: "false"
|
||||
event-qps: "50"
|
||||
event-burst: "100"
|
||||
- role: worker
|
||||
kubeadmConfigPatches:
|
||||
- |
|
||||
kind: JoinConfiguration
|
||||
nodeRegistration:
|
||||
kubeletExtraArgs:
|
||||
max-pods: "250"
|
||||
kube-api-qps: "50"
|
||||
kube-api-burst: "100"
|
||||
serialize-image-pulls: "false"
|
||||
event-qps: "50"
|
||||
event-burst: "100"
|
||||
- role: worker
|
||||
kubeadmConfigPatches:
|
||||
- |
|
||||
kind: JoinConfiguration
|
||||
nodeRegistration:
|
||||
kubeletExtraArgs:
|
||||
max-pods: "250"
|
||||
kube-api-qps: "50"
|
||||
kube-api-burst: "100"
|
||||
serialize-image-pulls: "false"
|
||||
event-qps: "50"
|
||||
event-burst: "100"
|
||||
- role: worker
|
||||
kubeadmConfigPatches:
|
||||
- |
|
||||
kind: JoinConfiguration
|
||||
nodeRegistration:
|
||||
kubeletExtraArgs:
|
||||
max-pods: "250"
|
||||
kube-api-qps: "50"
|
||||
kube-api-burst: "100"
|
||||
serialize-image-pulls: "false"
|
||||
event-qps: "50"
|
||||
event-burst: "100"
|
||||
- role: worker
|
||||
kubeadmConfigPatches:
|
||||
- |
|
||||
kind: JoinConfiguration
|
||||
nodeRegistration:
|
||||
kubeletExtraArgs:
|
||||
max-pods: "250"
|
||||
kube-api-qps: "50"
|
||||
kube-api-burst: "100"
|
||||
serialize-image-pulls: "false"
|
||||
event-qps: "50"
|
||||
event-burst: "100"
|
||||
- role: worker
|
||||
kubeadmConfigPatches:
|
||||
- |
|
||||
kind: JoinConfiguration
|
||||
nodeRegistration:
|
||||
kubeletExtraArgs:
|
||||
max-pods: "250"
|
||||
kube-api-qps: "50"
|
||||
kube-api-burst: "100"
|
||||
serialize-image-pulls: "false"
|
||||
event-qps: "50"
|
||||
event-burst: "100"
|
||||
`, httpPort, httpsPort)
|
||||
cmd := exec.CommandContext(ctx, "kind", "create", "cluster", "--name", m.cfg.Name, "--config=-")
|
||||
cmd.Stdin = strings.NewReader(config)
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
return cmd.Run()
|
||||
}
|
||||
|
||||
// GetKubeconfig returns the kubeconfig for the cluster.
|
||||
func (m *Manager) GetKubeconfig() (string, error) {
|
||||
cmd := exec.Command("kind", "get", "kubeconfig", "--name", m.cfg.Name)
|
||||
out, err := cmd.Output()
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("getting kubeconfig: %w", err)
|
||||
}
|
||||
return string(out), nil
|
||||
}
|
||||
|
||||
// Context returns the kubectl context name for this cluster.
|
||||
func (m *Manager) Context() string {
|
||||
return "kind-" + m.cfg.Name
|
||||
}
|
||||
|
||||
// Name returns the cluster name.
|
||||
func (m *Manager) Name() string {
|
||||
return m.cfg.Name
|
||||
}
|
||||
|
||||
// LoadImage loads a container image into the kind cluster.
|
||||
func (m *Manager) LoadImage(ctx context.Context, image string) error {
|
||||
if !m.imageExistsLocally(image) {
|
||||
fmt.Printf(" Image not found locally, pulling: %s\n", image)
|
||||
pullCmd := exec.CommandContext(ctx, m.cfg.ContainerRuntime, "pull", image)
|
||||
pullCmd.Stdout = os.Stdout
|
||||
pullCmd.Stderr = os.Stderr
|
||||
if err := pullCmd.Run(); err != nil {
|
||||
return fmt.Errorf("pulling image %s: %w", image, err)
|
||||
}
|
||||
} else {
|
||||
fmt.Printf(" Image found locally: %s\n", image)
|
||||
}
|
||||
|
||||
fmt.Printf(" Copying image to kind cluster...\n")
|
||||
|
||||
if m.cfg.ContainerRuntime == "podman" {
|
||||
tmpFile := fmt.Sprintf("/tmp/kind-image-%d.tar", time.Now().UnixNano())
|
||||
defer os.Remove(tmpFile)
|
||||
|
||||
saveCmd := exec.CommandContext(ctx, m.cfg.ContainerRuntime, "save", image, "-o", tmpFile)
|
||||
if err := saveCmd.Run(); err != nil {
|
||||
return fmt.Errorf("saving image %s: %w", image, err)
|
||||
}
|
||||
|
||||
loadCmd := exec.CommandContext(ctx, "kind", "load", "image-archive", tmpFile, "--name", m.cfg.Name)
|
||||
loadCmd.Stdout = os.Stdout
|
||||
loadCmd.Stderr = os.Stderr
|
||||
if err := loadCmd.Run(); err != nil {
|
||||
return fmt.Errorf("loading image archive: %w", err)
|
||||
}
|
||||
} else {
|
||||
loadCmd := exec.CommandContext(ctx, "kind", "load", "docker-image", image, "--name", m.cfg.Name)
|
||||
loadCmd.Stdout = os.Stdout
|
||||
loadCmd.Stderr = os.Stderr
|
||||
if err := loadCmd.Run(); err != nil {
|
||||
return fmt.Errorf("loading image %s: %w", image, err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// imageExistsLocally checks if an image exists in the local container runtime.
|
||||
func (m *Manager) imageExistsLocally(image string) bool {
|
||||
cmd := exec.Command(m.cfg.ContainerRuntime, "image", "exists", image)
|
||||
if err := cmd.Run(); err == nil {
|
||||
return true
|
||||
}
|
||||
|
||||
cmd = exec.Command(m.cfg.ContainerRuntime, "image", "inspect", image)
|
||||
if err := cmd.Run(); err == nil {
|
||||
return true
|
||||
}
|
||||
|
||||
cmd = exec.Command(m.cfg.ContainerRuntime, "images", "--format", "{{.Repository}}:{{.Tag}}")
|
||||
out, err := cmd.Output()
|
||||
if err == nil {
|
||||
for _, line := range strings.Split(string(out), "\n") {
|
||||
if strings.TrimSpace(line) == image {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// PullImage pulls an image using the container runtime.
|
||||
func (m *Manager) PullImage(ctx context.Context, image string) error {
|
||||
cmd := exec.CommandContext(ctx, m.cfg.ContainerRuntime, "pull", image)
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
return cmd.Run()
|
||||
}
|
||||
|
||||
// ExecKubectl runs a kubectl command against the cluster.
|
||||
func (m *Manager) ExecKubectl(ctx context.Context, args ...string) ([]byte, error) {
|
||||
cmd := exec.CommandContext(ctx, "kubectl", args...)
|
||||
var stdout, stderr bytes.Buffer
|
||||
cmd.Stdout = &stdout
|
||||
cmd.Stderr = &stderr
|
||||
if err := cmd.Run(); err != nil {
|
||||
return nil, fmt.Errorf("%w: %s", err, stderr.String())
|
||||
}
|
||||
return stdout.Bytes(), nil
|
||||
}
|
||||
860
test/loadtest/internal/cmd/report.go
Normal file
860
test/loadtest/internal/cmd/report.go
Normal file
@@ -0,0 +1,860 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"log"
|
||||
"math"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var (
|
||||
reportScenario string
|
||||
reportResultsDir string
|
||||
reportOutputFile string
|
||||
reportFormat string
|
||||
)
|
||||
|
||||
var reportCmd = &cobra.Command{
|
||||
Use: "report",
|
||||
Short: "Generate comparison report for a scenario",
|
||||
Long: `Generate a detailed report for a specific test scenario.
|
||||
|
||||
Examples:
|
||||
# Generate report for a scenario
|
||||
loadtest report --scenario=S2 --results-dir=./results
|
||||
|
||||
# Generate JSON report
|
||||
loadtest report --scenario=S2 --format=json`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
reportCommand()
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
reportCmd.Flags().StringVar(&reportScenario, "scenario", "", "Scenario to report on (required)")
|
||||
reportCmd.Flags().StringVar(&reportResultsDir, "results-dir", "./results", "Directory containing results")
|
||||
reportCmd.Flags().StringVar(&reportOutputFile, "output", "", "Output file (default: stdout)")
|
||||
reportCmd.Flags().StringVar(&reportFormat, "format", "text", "Output format: text, json, markdown")
|
||||
reportCmd.MarkFlagRequired("scenario")
|
||||
}
|
||||
|
||||
// PrometheusResponse represents a Prometheus API response for report parsing.
|
||||
type PrometheusResponse struct {
|
||||
Status string `json:"status"`
|
||||
Data struct {
|
||||
ResultType string `json:"resultType"`
|
||||
Result []struct {
|
||||
Metric map[string]string `json:"metric"`
|
||||
Value []interface{} `json:"value"`
|
||||
} `json:"result"`
|
||||
} `json:"data"`
|
||||
}
|
||||
|
||||
// MetricComparison represents the comparison of a single metric.
|
||||
type MetricComparison struct {
|
||||
Name string `json:"name"`
|
||||
DisplayName string `json:"display_name"`
|
||||
Unit string `json:"unit"`
|
||||
IsCounter bool `json:"is_counter"`
|
||||
OldValue float64 `json:"old_value"`
|
||||
NewValue float64 `json:"new_value"`
|
||||
Expected float64 `json:"expected"`
|
||||
Difference float64 `json:"difference"`
|
||||
DiffPct float64 `json:"diff_pct"`
|
||||
Status string `json:"status"`
|
||||
Threshold float64 `json:"threshold"`
|
||||
OldMeetsExpected string `json:"old_meets_expected"`
|
||||
NewMeetsExpected string `json:"new_meets_expected"`
|
||||
}
|
||||
|
||||
type metricInfo struct {
|
||||
unit string
|
||||
isCounter bool
|
||||
}
|
||||
|
||||
var metricInfoMap = map[string]metricInfo{
|
||||
"reconcile_total": {unit: "count", isCounter: true},
|
||||
"reconcile_duration_p50": {unit: "s", isCounter: false},
|
||||
"reconcile_duration_p95": {unit: "s", isCounter: false},
|
||||
"reconcile_duration_p99": {unit: "s", isCounter: false},
|
||||
"action_total": {unit: "count", isCounter: true},
|
||||
"action_latency_p50": {unit: "s", isCounter: false},
|
||||
"action_latency_p95": {unit: "s", isCounter: false},
|
||||
"action_latency_p99": {unit: "s", isCounter: false},
|
||||
"errors_total": {unit: "count", isCounter: true},
|
||||
"reload_executed_total": {unit: "count", isCounter: true},
|
||||
"workloads_scanned_total": {unit: "count", isCounter: true},
|
||||
"workloads_matched_total": {unit: "count", isCounter: true},
|
||||
"skipped_total_no_data_change": {unit: "count", isCounter: true},
|
||||
"rest_client_requests_total": {unit: "count", isCounter: true},
|
||||
"rest_client_requests_get": {unit: "count", isCounter: true},
|
||||
"rest_client_requests_patch": {unit: "count", isCounter: true},
|
||||
"rest_client_requests_put": {unit: "count", isCounter: true},
|
||||
"rest_client_requests_errors": {unit: "count", isCounter: true},
|
||||
"memory_rss_mb_avg": {unit: "MB", isCounter: false},
|
||||
"memory_rss_mb_max": {unit: "MB", isCounter: false},
|
||||
"memory_heap_mb_avg": {unit: "MB", isCounter: false},
|
||||
"memory_heap_mb_max": {unit: "MB", isCounter: false},
|
||||
"cpu_cores_avg": {unit: "cores", isCounter: false},
|
||||
"cpu_cores_max": {unit: "cores", isCounter: false},
|
||||
"goroutines_avg": {unit: "count", isCounter: false},
|
||||
"goroutines_max": {unit: "count", isCounter: false},
|
||||
"gc_pause_p99_ms": {unit: "ms", isCounter: false},
|
||||
}
|
||||
|
||||
// ReportExpectedMetrics matches the expected metrics from test scenarios.
|
||||
type ReportExpectedMetrics struct {
|
||||
ActionTotal int `json:"action_total"`
|
||||
ReloadExecutedTotal int `json:"reload_executed_total"`
|
||||
ReconcileTotal int `json:"reconcile_total"`
|
||||
WorkloadsScannedTotal int `json:"workloads_scanned_total"`
|
||||
WorkloadsMatchedTotal int `json:"workloads_matched_total"`
|
||||
SkippedTotal int `json:"skipped_total"`
|
||||
Description string `json:"description"`
|
||||
}
|
||||
|
||||
// ScenarioReport represents the full report for a scenario.
|
||||
type ScenarioReport struct {
|
||||
Scenario string `json:"scenario"`
|
||||
Timestamp time.Time `json:"timestamp"`
|
||||
Comparisons []MetricComparison `json:"comparisons"`
|
||||
OverallStatus string `json:"overall_status"`
|
||||
Summary string `json:"summary"`
|
||||
PassCriteria []string `json:"pass_criteria"`
|
||||
FailedCriteria []string `json:"failed_criteria"`
|
||||
Expected ReportExpectedMetrics `json:"expected"`
|
||||
TestDescription string `json:"test_description"`
|
||||
}
|
||||
|
||||
// MetricType defines how to evaluate a metric.
|
||||
type MetricType int
|
||||
|
||||
const (
|
||||
LowerIsBetter MetricType = iota
|
||||
ShouldMatch
|
||||
HigherIsBetter
|
||||
Informational
|
||||
)
|
||||
|
||||
type thresholdConfig struct {
|
||||
maxDiff float64
|
||||
metricType MetricType
|
||||
minAbsDiff float64
|
||||
}
|
||||
|
||||
var thresholds = map[string]thresholdConfig{
|
||||
"reconcile_total": {maxDiff: 60.0, metricType: LowerIsBetter},
|
||||
"reconcile_duration_p50": {maxDiff: 100.0, metricType: LowerIsBetter, minAbsDiff: 0.5},
|
||||
"reconcile_duration_p95": {maxDiff: 100.0, metricType: LowerIsBetter, minAbsDiff: 1.0},
|
||||
"reconcile_duration_p99": {maxDiff: 100.0, metricType: LowerIsBetter, minAbsDiff: 1.0},
|
||||
"action_latency_p50": {maxDiff: 100.0, metricType: LowerIsBetter, minAbsDiff: 0.5},
|
||||
"action_latency_p95": {maxDiff: 100.0, metricType: LowerIsBetter, minAbsDiff: 1.0},
|
||||
"action_latency_p99": {maxDiff: 100.0, metricType: LowerIsBetter, minAbsDiff: 1.0},
|
||||
"errors_total": {maxDiff: 0.0, metricType: LowerIsBetter},
|
||||
"action_total": {maxDiff: 15.0, metricType: ShouldMatch},
|
||||
"reload_executed_total": {maxDiff: 15.0, metricType: ShouldMatch},
|
||||
"workloads_scanned_total": {maxDiff: 15.0, metricType: ShouldMatch},
|
||||
"workloads_matched_total": {maxDiff: 15.0, metricType: ShouldMatch},
|
||||
"skipped_total_no_data_change": {maxDiff: 20.0, metricType: ShouldMatch},
|
||||
"rest_client_requests_total": {maxDiff: 100.0, metricType: LowerIsBetter, minAbsDiff: 50},
|
||||
"rest_client_requests_get": {maxDiff: 100.0, metricType: LowerIsBetter, minAbsDiff: 50},
|
||||
"rest_client_requests_patch": {maxDiff: 100.0, metricType: LowerIsBetter, minAbsDiff: 50},
|
||||
"rest_client_requests_put": {maxDiff: 100.0, metricType: LowerIsBetter, minAbsDiff: 20},
|
||||
"rest_client_requests_errors": {maxDiff: 0.0, metricType: LowerIsBetter, minAbsDiff: 100},
|
||||
"memory_rss_mb_avg": {maxDiff: 50.0, metricType: LowerIsBetter, minAbsDiff: 20},
|
||||
"memory_rss_mb_max": {maxDiff: 50.0, metricType: LowerIsBetter, minAbsDiff: 30},
|
||||
"memory_heap_mb_avg": {maxDiff: 50.0, metricType: LowerIsBetter, minAbsDiff: 15},
|
||||
"memory_heap_mb_max": {maxDiff: 50.0, metricType: LowerIsBetter, minAbsDiff: 20},
|
||||
"cpu_cores_avg": {maxDiff: 100.0, metricType: LowerIsBetter, minAbsDiff: 0.1},
|
||||
"cpu_cores_max": {maxDiff: 100.0, metricType: LowerIsBetter, minAbsDiff: 0.2},
|
||||
"goroutines_avg": {metricType: Informational},
|
||||
"goroutines_max": {metricType: Informational},
|
||||
"gc_pause_p99_ms": {maxDiff: 100.0, metricType: LowerIsBetter, minAbsDiff: 5},
|
||||
}
|
||||
|
||||
func reportCommand() {
|
||||
if reportScenario == "" {
|
||||
log.Fatal("--scenario is required for report command")
|
||||
}
|
||||
|
||||
report, err := generateScenarioReport(reportScenario, reportResultsDir)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to generate report: %v", err)
|
||||
}
|
||||
|
||||
var output string
|
||||
switch OutputFormat(reportFormat) {
|
||||
case OutputFormatJSON:
|
||||
output = renderScenarioReportJSON(report)
|
||||
case OutputFormatMarkdown:
|
||||
output = renderScenarioReportMarkdown(report)
|
||||
default:
|
||||
output = renderScenarioReport(report)
|
||||
}
|
||||
|
||||
if reportOutputFile != "" {
|
||||
if err := os.WriteFile(reportOutputFile, []byte(output), 0644); err != nil {
|
||||
log.Fatalf("Failed to write output file: %v", err)
|
||||
}
|
||||
log.Printf("Report written to %s", reportOutputFile)
|
||||
} else {
|
||||
fmt.Println(output)
|
||||
}
|
||||
}
|
||||
|
||||
func generateScenarioReport(scenario, resultsDir string) (*ScenarioReport, error) {
|
||||
oldDir := filepath.Join(resultsDir, scenario, "old")
|
||||
newDir := filepath.Join(resultsDir, scenario, "new")
|
||||
scenarioDir := filepath.Join(resultsDir, scenario)
|
||||
|
||||
_, oldErr := os.Stat(oldDir)
|
||||
_, newErr := os.Stat(newDir)
|
||||
hasOld := oldErr == nil
|
||||
hasNew := newErr == nil
|
||||
isComparison := hasOld && hasNew
|
||||
|
||||
singleVersion := ""
|
||||
singleDir := ""
|
||||
if !isComparison {
|
||||
if hasNew {
|
||||
singleVersion = "new"
|
||||
singleDir = newDir
|
||||
} else if hasOld {
|
||||
singleVersion = "old"
|
||||
singleDir = oldDir
|
||||
} else {
|
||||
return nil, fmt.Errorf("no results found in %s", scenarioDir)
|
||||
}
|
||||
}
|
||||
|
||||
report := &ScenarioReport{
|
||||
Scenario: scenario,
|
||||
Timestamp: time.Now(),
|
||||
}
|
||||
|
||||
expectedPath := filepath.Join(scenarioDir, "expected.json")
|
||||
if data, err := os.ReadFile(expectedPath); err == nil {
|
||||
if err := json.Unmarshal(data, &report.Expected); err != nil {
|
||||
log.Printf("Warning: Could not parse expected metrics: %v", err)
|
||||
} else {
|
||||
report.TestDescription = report.Expected.Description
|
||||
}
|
||||
}
|
||||
|
||||
if !isComparison {
|
||||
return generateSingleVersionReport(report, singleDir, singleVersion, scenario)
|
||||
}
|
||||
|
||||
metricsToCompare := []struct {
|
||||
name string
|
||||
file string
|
||||
selector func(data PrometheusResponse) float64
|
||||
}{
|
||||
{"reconcile_total", "reloader_reconcile_total.json", sumAllValues},
|
||||
{"reconcile_duration_p50", "reconcile_p50.json", getFirstValue},
|
||||
{"reconcile_duration_p95", "reconcile_p95.json", getFirstValue},
|
||||
{"reconcile_duration_p99", "reconcile_p99.json", getFirstValue},
|
||||
{"action_total", "reloader_action_total.json", sumAllValues},
|
||||
{"action_latency_p50", "action_p50.json", getFirstValue},
|
||||
{"action_latency_p95", "action_p95.json", getFirstValue},
|
||||
{"action_latency_p99", "action_p99.json", getFirstValue},
|
||||
{"errors_total", "reloader_errors_total.json", sumAllValues},
|
||||
{"reload_executed_total", "reloader_reload_executed_total.json", sumSuccessValues},
|
||||
{"workloads_scanned_total", "reloader_workloads_scanned_total.json", sumAllValues},
|
||||
{"workloads_matched_total", "reloader_workloads_matched_total.json", sumAllValues},
|
||||
{"rest_client_requests_total", "rest_client_requests_total.json", getFirstValue},
|
||||
{"rest_client_requests_get", "rest_client_requests_get.json", getFirstValue},
|
||||
{"rest_client_requests_patch", "rest_client_requests_patch.json", getFirstValue},
|
||||
{"rest_client_requests_put", "rest_client_requests_put.json", getFirstValue},
|
||||
{"rest_client_requests_errors", "rest_client_requests_errors.json", getFirstValue},
|
||||
{"memory_rss_mb_avg", "memory_rss_bytes_avg.json", bytesToMB},
|
||||
{"memory_rss_mb_max", "memory_rss_bytes_max.json", bytesToMB},
|
||||
{"memory_heap_mb_avg", "memory_heap_bytes_avg.json", bytesToMB},
|
||||
{"memory_heap_mb_max", "memory_heap_bytes_max.json", bytesToMB},
|
||||
{"cpu_cores_avg", "cpu_usage_cores_avg.json", getFirstValue},
|
||||
{"cpu_cores_max", "cpu_usage_cores_max.json", getFirstValue},
|
||||
{"goroutines_avg", "goroutines_avg.json", getFirstValue},
|
||||
{"goroutines_max", "goroutines_max.json", getFirstValue},
|
||||
{"gc_pause_p99_ms", "gc_duration_seconds_p99.json", secondsToMs},
|
||||
}
|
||||
|
||||
expectedValues := map[string]float64{
|
||||
"action_total": float64(report.Expected.ActionTotal),
|
||||
"reload_executed_total": float64(report.Expected.ReloadExecutedTotal),
|
||||
"reconcile_total": float64(report.Expected.ReconcileTotal),
|
||||
"workloads_scanned_total": float64(report.Expected.WorkloadsScannedTotal),
|
||||
"workloads_matched_total": float64(report.Expected.WorkloadsMatchedTotal),
|
||||
"skipped_total": float64(report.Expected.SkippedTotal),
|
||||
}
|
||||
|
||||
metricValues := make(map[string]struct{ old, new, expected float64 })
|
||||
|
||||
for _, m := range metricsToCompare {
|
||||
oldData, err := loadMetricFile(filepath.Join(oldDir, m.file))
|
||||
if err != nil {
|
||||
log.Printf("Warning: Could not load old metric %s: %v", m.name, err)
|
||||
continue
|
||||
}
|
||||
|
||||
newData, err := loadMetricFile(filepath.Join(newDir, m.file))
|
||||
if err != nil {
|
||||
log.Printf("Warning: Could not load new metric %s: %v", m.name, err)
|
||||
continue
|
||||
}
|
||||
|
||||
oldValue := m.selector(oldData)
|
||||
newValue := m.selector(newData)
|
||||
expected := expectedValues[m.name]
|
||||
|
||||
metricValues[m.name] = struct{ old, new, expected float64 }{oldValue, newValue, expected}
|
||||
}
|
||||
|
||||
newMeetsActionExpected := false
|
||||
newReconcileIsZero := false
|
||||
isChurnScenario := scenario == "S5"
|
||||
if v, ok := metricValues["action_total"]; ok && v.expected > 0 {
|
||||
tolerance := v.expected * 0.15
|
||||
newMeetsActionExpected = math.Abs(v.new-v.expected) <= tolerance
|
||||
}
|
||||
if v, ok := metricValues["reconcile_total"]; ok {
|
||||
newReconcileIsZero = v.new == 0
|
||||
}
|
||||
|
||||
for _, m := range metricsToCompare {
|
||||
v, ok := metricValues[m.name]
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
comparison := compareMetricWithExpected(m.name, v.old, v.new, v.expected)
|
||||
|
||||
if strings.HasPrefix(m.name, "rest_client_requests") {
|
||||
if newMeetsActionExpected && comparison.Status != "pass" {
|
||||
if oldMeets, ok := metricValues["action_total"]; ok {
|
||||
oldTolerance := oldMeets.expected * 0.15
|
||||
oldMissed := math.Abs(oldMeets.old-oldMeets.expected) > oldTolerance
|
||||
if oldMissed {
|
||||
comparison.Status = "pass"
|
||||
}
|
||||
}
|
||||
}
|
||||
if newReconcileIsZero && comparison.Status != "pass" {
|
||||
comparison.Status = "pass"
|
||||
}
|
||||
}
|
||||
|
||||
if isChurnScenario {
|
||||
if m.name == "errors_total" {
|
||||
if v.new < 50 && v.old < 50 {
|
||||
comparison.Status = "pass"
|
||||
} else if v.new <= v.old*1.5 {
|
||||
comparison.Status = "pass"
|
||||
}
|
||||
}
|
||||
if m.name == "action_total" || m.name == "reload_executed_total" {
|
||||
if v.old > 0 {
|
||||
diff := math.Abs(v.new-v.old) / v.old * 100
|
||||
if diff <= 20 {
|
||||
comparison.Status = "pass"
|
||||
}
|
||||
} else if v.new > 0 {
|
||||
comparison.Status = "pass"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
report.Comparisons = append(report.Comparisons, comparison)
|
||||
|
||||
if comparison.Status == "pass" {
|
||||
report.PassCriteria = append(report.PassCriteria, m.name)
|
||||
} else if comparison.Status == "fail" {
|
||||
report.FailedCriteria = append(report.FailedCriteria, m.name)
|
||||
}
|
||||
}
|
||||
|
||||
if len(report.FailedCriteria) == 0 {
|
||||
report.OverallStatus = "PASS"
|
||||
report.Summary = "All metrics within acceptable thresholds"
|
||||
} else {
|
||||
report.OverallStatus = "FAIL"
|
||||
report.Summary = fmt.Sprintf("%d metrics failed: %s",
|
||||
len(report.FailedCriteria),
|
||||
strings.Join(report.FailedCriteria, ", "))
|
||||
}
|
||||
|
||||
return report, nil
|
||||
}
|
||||
|
||||
func generateSingleVersionReport(report *ScenarioReport, dataDir, version, scenario string) (*ScenarioReport, error) {
|
||||
metricsToCollect := []struct {
|
||||
name string
|
||||
file string
|
||||
selector func(data PrometheusResponse) float64
|
||||
}{
|
||||
{"reconcile_total", "reloader_reconcile_total.json", sumAllValues},
|
||||
{"reconcile_duration_p50", "reconcile_p50.json", getFirstValue},
|
||||
{"reconcile_duration_p95", "reconcile_p95.json", getFirstValue},
|
||||
{"reconcile_duration_p99", "reconcile_p99.json", getFirstValue},
|
||||
{"action_total", "reloader_action_total.json", sumAllValues},
|
||||
{"action_latency_p50", "action_p50.json", getFirstValue},
|
||||
{"action_latency_p95", "action_p95.json", getFirstValue},
|
||||
{"action_latency_p99", "action_p99.json", getFirstValue},
|
||||
{"errors_total", "reloader_errors_total.json", sumAllValues},
|
||||
{"reload_executed_total", "reloader_reload_executed_total.json", sumSuccessValues},
|
||||
{"workloads_scanned_total", "reloader_workloads_scanned_total.json", sumAllValues},
|
||||
{"workloads_matched_total", "reloader_workloads_matched_total.json", sumAllValues},
|
||||
{"rest_client_requests_total", "rest_client_requests_total.json", getFirstValue},
|
||||
{"rest_client_requests_get", "rest_client_requests_get.json", getFirstValue},
|
||||
{"rest_client_requests_patch", "rest_client_requests_patch.json", getFirstValue},
|
||||
{"rest_client_requests_put", "rest_client_requests_put.json", getFirstValue},
|
||||
{"rest_client_requests_errors", "rest_client_requests_errors.json", getFirstValue},
|
||||
{"memory_rss_mb_avg", "memory_rss_bytes_avg.json", bytesToMB},
|
||||
{"memory_rss_mb_max", "memory_rss_bytes_max.json", bytesToMB},
|
||||
{"memory_heap_mb_avg", "memory_heap_bytes_avg.json", bytesToMB},
|
||||
{"memory_heap_mb_max", "memory_heap_bytes_max.json", bytesToMB},
|
||||
{"cpu_cores_avg", "cpu_usage_cores_avg.json", getFirstValue},
|
||||
{"cpu_cores_max", "cpu_usage_cores_max.json", getFirstValue},
|
||||
{"goroutines_avg", "goroutines_avg.json", getFirstValue},
|
||||
{"goroutines_max", "goroutines_max.json", getFirstValue},
|
||||
{"gc_pause_p99_ms", "gc_duration_seconds_p99.json", secondsToMs},
|
||||
}
|
||||
|
||||
expectedValues := map[string]float64{
|
||||
"action_total": float64(report.Expected.ActionTotal),
|
||||
"reload_executed_total": float64(report.Expected.ReloadExecutedTotal),
|
||||
"reconcile_total": float64(report.Expected.ReconcileTotal),
|
||||
"workloads_scanned_total": float64(report.Expected.WorkloadsScannedTotal),
|
||||
"workloads_matched_total": float64(report.Expected.WorkloadsMatchedTotal),
|
||||
"skipped_total": float64(report.Expected.SkippedTotal),
|
||||
}
|
||||
|
||||
for _, m := range metricsToCollect {
|
||||
data, err := loadMetricFile(filepath.Join(dataDir, m.file))
|
||||
if err != nil {
|
||||
log.Printf("Warning: Could not load metric %s: %v", m.name, err)
|
||||
continue
|
||||
}
|
||||
|
||||
value := m.selector(data)
|
||||
expected := expectedValues[m.name]
|
||||
|
||||
info := metricInfoMap[m.name]
|
||||
if info.unit == "" {
|
||||
info = metricInfo{unit: "count", isCounter: true}
|
||||
}
|
||||
|
||||
displayName := m.name
|
||||
if info.unit != "count" {
|
||||
displayName = fmt.Sprintf("%s (%s)", m.name, info.unit)
|
||||
}
|
||||
|
||||
status := "info"
|
||||
meetsExp := "-"
|
||||
|
||||
if expected > 0 {
|
||||
meetsExp = meetsExpected(value, expected)
|
||||
threshold, ok := thresholds[m.name]
|
||||
if ok && threshold.metricType == ShouldMatch {
|
||||
if meetsExp == "✓" {
|
||||
status = "pass"
|
||||
report.PassCriteria = append(report.PassCriteria, m.name)
|
||||
} else {
|
||||
status = "fail"
|
||||
report.FailedCriteria = append(report.FailedCriteria, m.name)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if info.isCounter {
|
||||
value = math.Round(value)
|
||||
}
|
||||
|
||||
report.Comparisons = append(report.Comparisons, MetricComparison{
|
||||
Name: m.name,
|
||||
DisplayName: displayName,
|
||||
Unit: info.unit,
|
||||
IsCounter: info.isCounter,
|
||||
OldValue: 0,
|
||||
NewValue: value,
|
||||
Expected: expected,
|
||||
OldMeetsExpected: "-",
|
||||
NewMeetsExpected: meetsExp,
|
||||
Status: status,
|
||||
})
|
||||
}
|
||||
|
||||
if len(report.FailedCriteria) == 0 {
|
||||
report.OverallStatus = "PASS"
|
||||
report.Summary = fmt.Sprintf("Single-version test (%s) completed successfully", version)
|
||||
} else {
|
||||
report.OverallStatus = "FAIL"
|
||||
report.Summary = fmt.Sprintf("%d metrics failed: %s",
|
||||
len(report.FailedCriteria),
|
||||
strings.Join(report.FailedCriteria, ", "))
|
||||
}
|
||||
|
||||
return report, nil
|
||||
}
|
||||
|
||||
func loadMetricFile(path string) (PrometheusResponse, error) {
|
||||
var resp PrometheusResponse
|
||||
data, err := os.ReadFile(path)
|
||||
if err != nil {
|
||||
return resp, err
|
||||
}
|
||||
err = json.Unmarshal(data, &resp)
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func sumAllValues(data PrometheusResponse) float64 {
|
||||
var sum float64
|
||||
for _, result := range data.Data.Result {
|
||||
if len(result.Value) >= 2 {
|
||||
if v, ok := result.Value[1].(string); ok {
|
||||
var f float64
|
||||
fmt.Sscanf(v, "%f", &f)
|
||||
sum += f
|
||||
}
|
||||
}
|
||||
}
|
||||
return sum
|
||||
}
|
||||
|
||||
func sumSuccessValues(data PrometheusResponse) float64 {
|
||||
var sum float64
|
||||
for _, result := range data.Data.Result {
|
||||
if result.Metric["success"] == "true" {
|
||||
if len(result.Value) >= 2 {
|
||||
if v, ok := result.Value[1].(string); ok {
|
||||
var f float64
|
||||
fmt.Sscanf(v, "%f", &f)
|
||||
sum += f
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return sum
|
||||
}
|
||||
|
||||
func getFirstValue(data PrometheusResponse) float64 {
|
||||
if len(data.Data.Result) > 0 && len(data.Data.Result[0].Value) >= 2 {
|
||||
if v, ok := data.Data.Result[0].Value[1].(string); ok {
|
||||
var f float64
|
||||
fmt.Sscanf(v, "%f", &f)
|
||||
return f
|
||||
}
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func bytesToMB(data PrometheusResponse) float64 {
|
||||
bytes := getFirstValue(data)
|
||||
return bytes / (1024 * 1024)
|
||||
}
|
||||
|
||||
func secondsToMs(data PrometheusResponse) float64 {
|
||||
seconds := getFirstValue(data)
|
||||
return seconds * 1000
|
||||
}
|
||||
|
||||
func meetsExpected(value, expected float64) string {
|
||||
if expected == 0 {
|
||||
return "-"
|
||||
}
|
||||
tolerance := expected * 0.15
|
||||
if math.Abs(value-expected) <= tolerance {
|
||||
return "✓"
|
||||
}
|
||||
return "✗"
|
||||
}
|
||||
|
||||
func compareMetricWithExpected(name string, oldValue, newValue, expected float64) MetricComparison {
|
||||
diff := newValue - oldValue
|
||||
absDiff := math.Abs(diff)
|
||||
var diffPct float64
|
||||
if oldValue != 0 {
|
||||
diffPct = (diff / oldValue) * 100
|
||||
} else if newValue != 0 {
|
||||
diffPct = 100
|
||||
}
|
||||
|
||||
threshold, ok := thresholds[name]
|
||||
if !ok {
|
||||
threshold = thresholdConfig{maxDiff: 10.0, metricType: ShouldMatch}
|
||||
}
|
||||
|
||||
info := metricInfoMap[name]
|
||||
if info.unit == "" {
|
||||
info = metricInfo{unit: "count", isCounter: true}
|
||||
}
|
||||
displayName := name
|
||||
if info.unit != "count" {
|
||||
displayName = fmt.Sprintf("%s (%s)", name, info.unit)
|
||||
}
|
||||
|
||||
if info.isCounter {
|
||||
oldValue = math.Round(oldValue)
|
||||
newValue = math.Round(newValue)
|
||||
}
|
||||
|
||||
status := "pass"
|
||||
oldMeetsExp := meetsExpected(oldValue, expected)
|
||||
newMeetsExp := meetsExpected(newValue, expected)
|
||||
|
||||
isNewMetric := info.isCounter && oldValue == 0 && newValue > 0 && expected == 0
|
||||
|
||||
if isNewMetric {
|
||||
status = "info"
|
||||
} else if expected > 0 && threshold.metricType == ShouldMatch {
|
||||
if newMeetsExp == "✗" {
|
||||
status = "fail"
|
||||
}
|
||||
} else {
|
||||
switch threshold.metricType {
|
||||
case LowerIsBetter:
|
||||
if threshold.minAbsDiff > 0 && absDiff < threshold.minAbsDiff {
|
||||
status = "pass"
|
||||
} else if diffPct > threshold.maxDiff {
|
||||
status = "fail"
|
||||
}
|
||||
case HigherIsBetter:
|
||||
if diffPct < -threshold.maxDiff {
|
||||
status = "fail"
|
||||
}
|
||||
case ShouldMatch:
|
||||
if math.Abs(diffPct) > threshold.maxDiff {
|
||||
status = "fail"
|
||||
}
|
||||
case Informational:
|
||||
status = "info"
|
||||
}
|
||||
}
|
||||
|
||||
return MetricComparison{
|
||||
Name: name,
|
||||
DisplayName: displayName,
|
||||
Unit: info.unit,
|
||||
IsCounter: info.isCounter,
|
||||
Expected: expected,
|
||||
OldMeetsExpected: oldMeetsExp,
|
||||
NewMeetsExpected: newMeetsExp,
|
||||
OldValue: oldValue,
|
||||
NewValue: newValue,
|
||||
Difference: diff,
|
||||
DiffPct: diffPct,
|
||||
Status: status,
|
||||
Threshold: threshold.maxDiff,
|
||||
}
|
||||
}
|
||||
|
||||
func renderScenarioReport(report *ScenarioReport) string {
|
||||
var sb strings.Builder
|
||||
|
||||
isSingleVersion := true
|
||||
for _, c := range report.Comparisons {
|
||||
if c.OldValue != 0 {
|
||||
isSingleVersion = false
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
sb.WriteString("\n")
|
||||
sb.WriteString("================================================================================\n")
|
||||
if isSingleVersion {
|
||||
sb.WriteString(" RELOADER TEST REPORT\n")
|
||||
} else {
|
||||
sb.WriteString(" RELOADER A/B COMPARISON REPORT\n")
|
||||
}
|
||||
sb.WriteString("================================================================================\n\n")
|
||||
|
||||
fmt.Fprintf(&sb, "Scenario: %s\n", report.Scenario)
|
||||
fmt.Fprintf(&sb, "Generated: %s\n", report.Timestamp.Format("2006-01-02 15:04:05"))
|
||||
fmt.Fprintf(&sb, "Status: %s\n", report.OverallStatus)
|
||||
fmt.Fprintf(&sb, "Summary: %s\n", report.Summary)
|
||||
|
||||
if report.TestDescription != "" {
|
||||
fmt.Fprintf(&sb, "Test: %s\n", report.TestDescription)
|
||||
}
|
||||
|
||||
if report.Expected.ActionTotal > 0 {
|
||||
sb.WriteString("\n--------------------------------------------------------------------------------\n")
|
||||
sb.WriteString(" EXPECTED VALUES\n")
|
||||
sb.WriteString("--------------------------------------------------------------------------------\n")
|
||||
fmt.Fprintf(&sb, "Expected Action Total: %d\n", report.Expected.ActionTotal)
|
||||
fmt.Fprintf(&sb, "Expected Reload Executed Total: %d\n", report.Expected.ReloadExecutedTotal)
|
||||
if report.Expected.SkippedTotal > 0 {
|
||||
fmt.Fprintf(&sb, "Expected Skipped Total: %d\n", report.Expected.SkippedTotal)
|
||||
}
|
||||
}
|
||||
|
||||
sb.WriteString("\n--------------------------------------------------------------------------------\n")
|
||||
if isSingleVersion {
|
||||
sb.WriteString(" METRICS\n")
|
||||
} else {
|
||||
sb.WriteString(" METRIC COMPARISONS\n")
|
||||
}
|
||||
sb.WriteString("--------------------------------------------------------------------------------\n")
|
||||
|
||||
if isSingleVersion {
|
||||
sb.WriteString("(✓ = meets expected value within 15%)\n\n")
|
||||
fmt.Fprintf(&sb, "%-32s %12s %10s %5s %8s\n",
|
||||
"Metric", "Value", "Expected", "Met?", "Status")
|
||||
fmt.Fprintf(&sb, "%-32s %12s %10s %5s %8s\n",
|
||||
"------", "-----", "--------", "----", "------")
|
||||
|
||||
for _, c := range report.Comparisons {
|
||||
if c.IsCounter {
|
||||
if c.Expected > 0 {
|
||||
fmt.Fprintf(&sb, "%-32s %12.0f %10.0f %5s %8s\n",
|
||||
c.DisplayName, c.NewValue, c.Expected,
|
||||
c.NewMeetsExpected, c.Status)
|
||||
} else {
|
||||
fmt.Fprintf(&sb, "%-32s %12.0f %10s %5s %8s\n",
|
||||
c.DisplayName, c.NewValue, "-",
|
||||
c.NewMeetsExpected, c.Status)
|
||||
}
|
||||
} else {
|
||||
fmt.Fprintf(&sb, "%-32s %12.4f %10s %5s %8s\n",
|
||||
c.DisplayName, c.NewValue, "-",
|
||||
c.NewMeetsExpected, c.Status)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
sb.WriteString("(Old✓/New✓ = meets expected value within 15%)\n\n")
|
||||
|
||||
fmt.Fprintf(&sb, "%-32s %12s %12s %10s %5s %5s %8s\n",
|
||||
"Metric", "Old", "New", "Expected", "Old✓", "New✓", "Status")
|
||||
fmt.Fprintf(&sb, "%-32s %12s %12s %10s %5s %5s %8s\n",
|
||||
"------", "---", "---", "--------", "----", "----", "------")
|
||||
|
||||
for _, c := range report.Comparisons {
|
||||
if c.IsCounter {
|
||||
if c.Expected > 0 {
|
||||
fmt.Fprintf(&sb, "%-32s %12.0f %12.0f %10.0f %5s %5s %8s\n",
|
||||
c.DisplayName, c.OldValue, c.NewValue, c.Expected,
|
||||
c.OldMeetsExpected, c.NewMeetsExpected, c.Status)
|
||||
} else {
|
||||
fmt.Fprintf(&sb, "%-32s %12.0f %12.0f %10s %5s %5s %8s\n",
|
||||
c.DisplayName, c.OldValue, c.NewValue, "-",
|
||||
c.OldMeetsExpected, c.NewMeetsExpected, c.Status)
|
||||
}
|
||||
} else {
|
||||
fmt.Fprintf(&sb, "%-32s %12.4f %12.4f %10s %5s %5s %8s\n",
|
||||
c.DisplayName, c.OldValue, c.NewValue, "-",
|
||||
c.OldMeetsExpected, c.NewMeetsExpected, c.Status)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
sb.WriteString("\n--------------------------------------------------------------------------------\n")
|
||||
sb.WriteString(" PASS/FAIL CRITERIA\n")
|
||||
sb.WriteString("--------------------------------------------------------------------------------\n\n")
|
||||
|
||||
fmt.Fprintf(&sb, "Passed (%d):\n", len(report.PassCriteria))
|
||||
for _, p := range report.PassCriteria {
|
||||
fmt.Fprintf(&sb, " ✓ %s\n", p)
|
||||
}
|
||||
|
||||
if len(report.FailedCriteria) > 0 {
|
||||
fmt.Fprintf(&sb, "\nFailed (%d):\n", len(report.FailedCriteria))
|
||||
for _, f := range report.FailedCriteria {
|
||||
fmt.Fprintf(&sb, " ✗ %s\n", f)
|
||||
}
|
||||
}
|
||||
|
||||
sb.WriteString("\n--------------------------------------------------------------------------------\n")
|
||||
sb.WriteString(" THRESHOLDS USED\n")
|
||||
sb.WriteString("--------------------------------------------------------------------------------\n\n")
|
||||
|
||||
fmt.Fprintf(&sb, "%-35s %10s %15s %18s\n",
|
||||
"Metric", "Max Diff%", "Min Abs Diff", "Direction")
|
||||
fmt.Fprintf(&sb, "%-35s %10s %15s %18s\n",
|
||||
"------", "---------", "------------", "---------")
|
||||
|
||||
var names []string
|
||||
for name := range thresholds {
|
||||
names = append(names, name)
|
||||
}
|
||||
sort.Strings(names)
|
||||
|
||||
for _, name := range names {
|
||||
t := thresholds[name]
|
||||
var direction string
|
||||
switch t.metricType {
|
||||
case LowerIsBetter:
|
||||
direction = "lower is better"
|
||||
case HigherIsBetter:
|
||||
direction = "higher is better"
|
||||
case ShouldMatch:
|
||||
direction = "should match"
|
||||
case Informational:
|
||||
direction = "info only"
|
||||
}
|
||||
minAbsDiff := "-"
|
||||
if t.minAbsDiff > 0 {
|
||||
minAbsDiff = fmt.Sprintf("%.1f", t.minAbsDiff)
|
||||
}
|
||||
fmt.Fprintf(&sb, "%-35s %9.1f%% %15s %18s\n",
|
||||
name, t.maxDiff, minAbsDiff, direction)
|
||||
}
|
||||
|
||||
sb.WriteString("\n================================================================================\n")
|
||||
|
||||
return sb.String()
|
||||
}
|
||||
|
||||
func renderScenarioReportJSON(report *ScenarioReport) string {
|
||||
data, err := json.MarshalIndent(report, "", " ")
|
||||
if err != nil {
|
||||
return fmt.Sprintf(`{"error": "%s"}`, err.Error())
|
||||
}
|
||||
return string(data)
|
||||
}
|
||||
|
||||
func renderScenarioReportMarkdown(report *ScenarioReport) string {
|
||||
var sb strings.Builder
|
||||
|
||||
emoji := "✅"
|
||||
if report.OverallStatus != "PASS" {
|
||||
emoji = "❌"
|
||||
}
|
||||
|
||||
sb.WriteString(fmt.Sprintf("## %s %s: %s\n\n", emoji, report.Scenario, report.OverallStatus))
|
||||
|
||||
if report.TestDescription != "" {
|
||||
sb.WriteString(fmt.Sprintf("> %s\n\n", report.TestDescription))
|
||||
}
|
||||
|
||||
sb.WriteString("| Metric | Value | Expected | Status |\n")
|
||||
sb.WriteString("|--------|------:|:--------:|:------:|\n")
|
||||
|
||||
keyMetrics := []string{"action_total", "reload_executed_total", "errors_total", "reconcile_total"}
|
||||
for _, name := range keyMetrics {
|
||||
for _, c := range report.Comparisons {
|
||||
if c.Name == name {
|
||||
value := fmt.Sprintf("%.0f", c.NewValue)
|
||||
expected := "-"
|
||||
if c.Expected > 0 {
|
||||
expected = fmt.Sprintf("%.0f", c.Expected)
|
||||
}
|
||||
status := "✅"
|
||||
if c.Status == "fail" {
|
||||
status = "❌"
|
||||
} else if c.Status == "info" {
|
||||
status = "ℹ️"
|
||||
}
|
||||
sb.WriteString(fmt.Sprintf("| %s | %s | %s | %s |\n", c.DisplayName, value, expected, status))
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return sb.String()
|
||||
}
|
||||
43
test/loadtest/internal/cmd/root.go
Normal file
43
test/loadtest/internal/cmd/root.go
Normal file
@@ -0,0 +1,43 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
const (
|
||||
// DefaultClusterName is the default kind cluster name.
|
||||
DefaultClusterName = "reloader-loadtest"
|
||||
// TestNamespace is the namespace used for test resources.
|
||||
TestNamespace = "reloader-test"
|
||||
)
|
||||
|
||||
// OutputFormat defines the output format for reports.
|
||||
type OutputFormat string
|
||||
|
||||
const (
|
||||
OutputFormatText OutputFormat = "text"
|
||||
OutputFormatJSON OutputFormat = "json"
|
||||
OutputFormatMarkdown OutputFormat = "markdown"
|
||||
)
|
||||
|
||||
// rootCmd is the base command.
|
||||
var rootCmd = &cobra.Command{
|
||||
Use: "loadtest",
|
||||
Short: "Reloader Load Test CLI",
|
||||
Long: `A CLI tool for running A/B comparison load tests on Reloader.`,
|
||||
}
|
||||
|
||||
func init() {
|
||||
rootCmd.AddCommand(runCmd)
|
||||
rootCmd.AddCommand(reportCmd)
|
||||
rootCmd.AddCommand(summaryCmd)
|
||||
}
|
||||
|
||||
// Execute runs the root command.
|
||||
func Execute() {
|
||||
if err := rootCmd.Execute(); err != nil {
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
648
test/loadtest/internal/cmd/run.go
Normal file
648
test/loadtest/internal/cmd/run.go
Normal file
@@ -0,0 +1,648 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"os/exec"
|
||||
"os/signal"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/stakater/Reloader/test/loadtest/internal/cluster"
|
||||
"github.com/stakater/Reloader/test/loadtest/internal/prometheus"
|
||||
"github.com/stakater/Reloader/test/loadtest/internal/reloader"
|
||||
"github.com/stakater/Reloader/test/loadtest/internal/scenarios"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
)
|
||||
|
||||
// RunConfig holds CLI configuration for the run command.
|
||||
type RunConfig struct {
|
||||
OldImage string
|
||||
NewImage string
|
||||
Scenario string
|
||||
Duration int
|
||||
SkipCluster bool
|
||||
SkipImageLoad bool
|
||||
ClusterName string
|
||||
ResultsDir string
|
||||
ManifestsDir string
|
||||
Parallelism int
|
||||
}
|
||||
|
||||
// workerContext holds all resources for a single worker (cluster + prometheus).
|
||||
type workerContext struct {
|
||||
id int
|
||||
clusterMgr *cluster.Manager
|
||||
promMgr *prometheus.Manager
|
||||
kubeClient kubernetes.Interface
|
||||
kubeContext string
|
||||
runtime string
|
||||
}
|
||||
|
||||
var runCfg RunConfig
|
||||
|
||||
var runCmd = &cobra.Command{
|
||||
Use: "run",
|
||||
Short: "Run A/B comparison tests",
|
||||
Long: `Run load tests comparing old and new versions of Reloader.
|
||||
|
||||
Examples:
|
||||
# Compare two images
|
||||
loadtest run --old-image=stakater/reloader:v1.0.0 --new-image=stakater/reloader:v1.1.0
|
||||
|
||||
# Run specific scenario
|
||||
loadtest run --old-image=stakater/reloader:v1.0.0 --new-image=localhost/reloader:dev --scenario=S2
|
||||
|
||||
# Test single image (no comparison)
|
||||
loadtest run --new-image=localhost/reloader:test
|
||||
|
||||
# Run all scenarios in parallel on 4 clusters
|
||||
loadtest run --new-image=localhost/reloader:test --parallelism=4`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
runCommand()
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
runCmd.Flags().StringVar(&runCfg.OldImage, "old-image", "", "Container image for \"old\" version (required for comparison)")
|
||||
runCmd.Flags().StringVar(&runCfg.NewImage, "new-image", "", "Container image for \"new\" version (required for comparison)")
|
||||
runCmd.Flags().StringVar(&runCfg.Scenario, "scenario", "all", "Test scenario: S1-S13 or \"all\"")
|
||||
runCmd.Flags().IntVar(&runCfg.Duration, "duration", 60, "Test duration in seconds")
|
||||
runCmd.Flags().IntVar(&runCfg.Parallelism, "parallelism", 1, "Run N scenarios in parallel on N clusters")
|
||||
runCmd.Flags().BoolVar(&runCfg.SkipCluster, "skip-cluster", false, "Skip kind cluster creation (use existing)")
|
||||
runCmd.Flags().BoolVar(&runCfg.SkipImageLoad, "skip-image-load", false, "Skip loading images into kind (use when images already loaded)")
|
||||
runCmd.Flags().StringVar(&runCfg.ClusterName, "cluster-name", DefaultClusterName, "Kind cluster name")
|
||||
runCmd.Flags().StringVar(&runCfg.ResultsDir, "results-dir", "./results", "Directory for results")
|
||||
runCmd.Flags().StringVar(&runCfg.ManifestsDir, "manifests-dir", "", "Directory containing manifests (auto-detected if not set)")
|
||||
}
|
||||
|
||||
func runCommand() {
|
||||
if runCfg.ManifestsDir == "" {
|
||||
execPath, _ := os.Executable()
|
||||
execDir := filepath.Dir(execPath)
|
||||
runCfg.ManifestsDir = filepath.Join(execDir, "..", "..", "manifests")
|
||||
if _, err := os.Stat(runCfg.ManifestsDir); os.IsNotExist(err) {
|
||||
runCfg.ManifestsDir = "./manifests"
|
||||
}
|
||||
}
|
||||
|
||||
if runCfg.Parallelism < 1 {
|
||||
runCfg.Parallelism = 1
|
||||
}
|
||||
|
||||
if runCfg.OldImage == "" && runCfg.NewImage == "" {
|
||||
log.Fatal("At least one of --old-image or --new-image is required")
|
||||
}
|
||||
|
||||
runOld := runCfg.OldImage != ""
|
||||
runNew := runCfg.NewImage != ""
|
||||
runBoth := runOld && runNew
|
||||
|
||||
log.Printf("Configuration:")
|
||||
log.Printf(" Scenario: %s", runCfg.Scenario)
|
||||
log.Printf(" Duration: %ds", runCfg.Duration)
|
||||
log.Printf(" Parallelism: %d", runCfg.Parallelism)
|
||||
if runCfg.OldImage != "" {
|
||||
log.Printf(" Old image: %s", runCfg.OldImage)
|
||||
}
|
||||
if runCfg.NewImage != "" {
|
||||
log.Printf(" New image: %s", runCfg.NewImage)
|
||||
}
|
||||
|
||||
runtime, err := cluster.DetectContainerRuntime()
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to detect container runtime: %v", err)
|
||||
}
|
||||
log.Printf(" Container runtime: %s", runtime)
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
sigCh := make(chan os.Signal, 1)
|
||||
signal.Notify(sigCh, syscall.SIGINT, syscall.SIGTERM)
|
||||
go func() {
|
||||
<-sigCh
|
||||
log.Println("Received shutdown signal...")
|
||||
cancel()
|
||||
}()
|
||||
|
||||
var scenariosToRun []string
|
||||
if runCfg.Scenario == "all" {
|
||||
scenariosToRun = []string{"S1", "S2", "S3", "S4", "S5", "S6", "S7", "S8", "S9", "S10", "S11", "S12", "S13"}
|
||||
} else {
|
||||
// Split comma-separated scenarios (e.g., "S1,S4,S6")
|
||||
for _, s := range strings.Split(runCfg.Scenario, ",") {
|
||||
if trimmed := strings.TrimSpace(s); trimmed != "" {
|
||||
scenariosToRun = append(scenariosToRun, trimmed)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if runCfg.SkipCluster && runCfg.Parallelism > 1 {
|
||||
log.Fatal("--skip-cluster is not supported with --parallelism > 1")
|
||||
}
|
||||
|
||||
if runCfg.Parallelism > 1 {
|
||||
runParallel(ctx, runCfg, scenariosToRun, runtime, runOld, runNew, runBoth)
|
||||
return
|
||||
}
|
||||
|
||||
runSequential(ctx, runCfg, scenariosToRun, runtime, runOld, runNew, runBoth)
|
||||
}
|
||||
|
||||
func runSequential(ctx context.Context, cfg RunConfig, scenariosToRun []string, runtime string, runOld, runNew, runBoth bool) {
|
||||
clusterMgr := cluster.NewManager(cluster.Config{
|
||||
Name: cfg.ClusterName,
|
||||
ContainerRuntime: runtime,
|
||||
})
|
||||
|
||||
if cfg.SkipCluster {
|
||||
log.Printf("Skipping cluster creation (using existing cluster: %s)", cfg.ClusterName)
|
||||
if !clusterMgr.Exists() {
|
||||
log.Fatalf("Cluster %s does not exist. Remove --skip-cluster to create it.", cfg.ClusterName)
|
||||
}
|
||||
} else {
|
||||
log.Println("Creating kind cluster...")
|
||||
if err := clusterMgr.Create(ctx); err != nil {
|
||||
log.Fatalf("Failed to create cluster: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
promManifest := filepath.Join(cfg.ManifestsDir, "prometheus.yaml")
|
||||
promMgr := prometheus.NewManager(promManifest)
|
||||
|
||||
log.Println("Installing Prometheus...")
|
||||
if err := promMgr.Deploy(ctx); err != nil {
|
||||
log.Fatalf("Failed to deploy Prometheus: %v", err)
|
||||
}
|
||||
|
||||
if err := promMgr.StartPortForward(ctx); err != nil {
|
||||
log.Fatalf("Failed to start Prometheus port-forward: %v", err)
|
||||
}
|
||||
defer promMgr.StopPortForward()
|
||||
|
||||
if cfg.SkipImageLoad {
|
||||
log.Println("Skipping image loading (--skip-image-load)")
|
||||
} else {
|
||||
log.Println("Loading images into kind cluster...")
|
||||
if runOld {
|
||||
log.Printf("Loading old image: %s", cfg.OldImage)
|
||||
if err := clusterMgr.LoadImage(ctx, cfg.OldImage); err != nil {
|
||||
log.Fatalf("Failed to load old image: %v", err)
|
||||
}
|
||||
}
|
||||
if runNew {
|
||||
log.Printf("Loading new image: %s", cfg.NewImage)
|
||||
if err := clusterMgr.LoadImage(ctx, cfg.NewImage); err != nil {
|
||||
log.Fatalf("Failed to load new image: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
log.Println("Pre-loading test images...")
|
||||
testImage := "gcr.io/google-containers/busybox:1.27"
|
||||
clusterMgr.LoadImage(ctx, testImage)
|
||||
}
|
||||
|
||||
kubeClient, err := getKubeClient("")
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to create kubernetes client: %v", err)
|
||||
}
|
||||
|
||||
for _, scenarioID := range scenariosToRun {
|
||||
log.Printf("========================================")
|
||||
log.Printf("=== Starting scenario %s ===", scenarioID)
|
||||
log.Printf("========================================")
|
||||
|
||||
cleanupTestNamespaces(ctx, "")
|
||||
reloader.CleanupByVersion(ctx, "old", "")
|
||||
reloader.CleanupByVersion(ctx, "new", "")
|
||||
|
||||
if err := promMgr.Reset(ctx); err != nil {
|
||||
log.Printf("Warning: failed to reset Prometheus: %v", err)
|
||||
}
|
||||
|
||||
createTestNamespace(ctx, "")
|
||||
|
||||
if runOld {
|
||||
oldMgr := reloader.NewManager(reloader.Config{
|
||||
Version: "old",
|
||||
Image: cfg.OldImage,
|
||||
})
|
||||
|
||||
if err := oldMgr.Deploy(ctx); err != nil {
|
||||
log.Printf("Failed to deploy old Reloader: %v", err)
|
||||
continue
|
||||
}
|
||||
|
||||
if err := promMgr.WaitForTarget(ctx, oldMgr.Job(), 60*time.Second); err != nil {
|
||||
log.Printf("Warning: %v", err)
|
||||
log.Println("Proceeding anyway, but metrics may be incomplete")
|
||||
}
|
||||
|
||||
runScenario(ctx, kubeClient, scenarioID, "old", cfg.OldImage, cfg.Duration, cfg.ResultsDir)
|
||||
collectMetrics(ctx, promMgr, oldMgr.Job(), scenarioID, "old", cfg.ResultsDir)
|
||||
collectLogs(ctx, oldMgr, scenarioID, "old", cfg.ResultsDir)
|
||||
|
||||
if runBoth {
|
||||
cleanupTestNamespaces(ctx, "")
|
||||
oldMgr.Cleanup(ctx)
|
||||
promMgr.Reset(ctx)
|
||||
createTestNamespace(ctx, "")
|
||||
}
|
||||
}
|
||||
|
||||
if runNew {
|
||||
newMgr := reloader.NewManager(reloader.Config{
|
||||
Version: "new",
|
||||
Image: cfg.NewImage,
|
||||
})
|
||||
|
||||
if err := newMgr.Deploy(ctx); err != nil {
|
||||
log.Printf("Failed to deploy new Reloader: %v", err)
|
||||
continue
|
||||
}
|
||||
|
||||
if err := promMgr.WaitForTarget(ctx, newMgr.Job(), 60*time.Second); err != nil {
|
||||
log.Printf("Warning: %v", err)
|
||||
log.Println("Proceeding anyway, but metrics may be incomplete")
|
||||
}
|
||||
|
||||
runScenario(ctx, kubeClient, scenarioID, "new", cfg.NewImage, cfg.Duration, cfg.ResultsDir)
|
||||
collectMetrics(ctx, promMgr, newMgr.Job(), scenarioID, "new", cfg.ResultsDir)
|
||||
collectLogs(ctx, newMgr, scenarioID, "new", cfg.ResultsDir)
|
||||
}
|
||||
|
||||
generateReport(scenarioID, cfg.ResultsDir, runBoth)
|
||||
log.Printf("=== Scenario %s complete ===", scenarioID)
|
||||
}
|
||||
|
||||
log.Println("Load test complete!")
|
||||
log.Printf("Results available in: %s", cfg.ResultsDir)
|
||||
}
|
||||
|
||||
func runParallel(ctx context.Context, cfg RunConfig, scenariosToRun []string, runtime string, runOld, runNew, runBoth bool) {
|
||||
numWorkers := cfg.Parallelism
|
||||
if numWorkers > len(scenariosToRun) {
|
||||
numWorkers = len(scenariosToRun)
|
||||
log.Printf("Reducing parallelism to %d (number of scenarios)", numWorkers)
|
||||
}
|
||||
|
||||
log.Printf("Starting parallel execution with %d workers", numWorkers)
|
||||
|
||||
workers := make([]*workerContext, numWorkers)
|
||||
var setupWg sync.WaitGroup
|
||||
setupErrors := make(chan error, numWorkers)
|
||||
|
||||
log.Println("Setting up worker clusters...")
|
||||
for i := range numWorkers {
|
||||
setupWg.Add(1)
|
||||
go func(workerID int) {
|
||||
defer setupWg.Done()
|
||||
worker, err := setupWorker(ctx, cfg, workerID, runtime, runOld, runNew)
|
||||
if err != nil {
|
||||
setupErrors <- fmt.Errorf("worker %d setup failed: %w", workerID, err)
|
||||
return
|
||||
}
|
||||
workers[workerID] = worker
|
||||
}(i)
|
||||
}
|
||||
|
||||
setupWg.Wait()
|
||||
close(setupErrors)
|
||||
|
||||
for err := range setupErrors {
|
||||
log.Printf("Error: %v", err)
|
||||
}
|
||||
|
||||
readyWorkers := 0
|
||||
for _, w := range workers {
|
||||
if w != nil {
|
||||
readyWorkers++
|
||||
}
|
||||
}
|
||||
if readyWorkers == 0 {
|
||||
log.Fatal("No workers ready, aborting")
|
||||
}
|
||||
if readyWorkers < numWorkers {
|
||||
log.Printf("Warning: only %d/%d workers ready", readyWorkers, numWorkers)
|
||||
}
|
||||
|
||||
defer func() {
|
||||
log.Println("Cleaning up worker clusters...")
|
||||
for _, w := range workers {
|
||||
if w != nil {
|
||||
w.promMgr.StopPortForward()
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
scenarioCh := make(chan string, len(scenariosToRun))
|
||||
for _, s := range scenariosToRun {
|
||||
scenarioCh <- s
|
||||
}
|
||||
close(scenarioCh)
|
||||
|
||||
var resultsMu sync.Mutex
|
||||
completedScenarios := make([]string, 0, len(scenariosToRun))
|
||||
|
||||
var wg sync.WaitGroup
|
||||
for _, worker := range workers {
|
||||
if worker == nil {
|
||||
continue
|
||||
}
|
||||
wg.Add(1)
|
||||
go func(w *workerContext) {
|
||||
defer wg.Done()
|
||||
for scenarioID := range scenarioCh {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
default:
|
||||
}
|
||||
|
||||
log.Printf("[Worker %d] Starting scenario %s", w.id, scenarioID)
|
||||
|
||||
cleanupTestNamespaces(ctx, w.kubeContext)
|
||||
reloader.CleanupByVersion(ctx, "old", w.kubeContext)
|
||||
reloader.CleanupByVersion(ctx, "new", w.kubeContext)
|
||||
|
||||
if err := w.promMgr.Reset(ctx); err != nil {
|
||||
log.Printf("[Worker %d] Warning: failed to reset Prometheus: %v", w.id, err)
|
||||
}
|
||||
|
||||
createTestNamespace(ctx, w.kubeContext)
|
||||
|
||||
if runOld {
|
||||
runVersionOnWorker(ctx, w, cfg, scenarioID, "old", cfg.OldImage, runBoth)
|
||||
}
|
||||
|
||||
if runNew {
|
||||
runVersionOnWorker(ctx, w, cfg, scenarioID, "new", cfg.NewImage, false)
|
||||
}
|
||||
|
||||
generateReport(scenarioID, cfg.ResultsDir, runBoth)
|
||||
|
||||
resultsMu.Lock()
|
||||
completedScenarios = append(completedScenarios, scenarioID)
|
||||
resultsMu.Unlock()
|
||||
|
||||
log.Printf("[Worker %d] Scenario %s complete", w.id, scenarioID)
|
||||
}
|
||||
}(worker)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
|
||||
log.Println("Load test complete!")
|
||||
log.Printf("Completed %d/%d scenarios", len(completedScenarios), len(scenariosToRun))
|
||||
log.Printf("Results available in: %s", cfg.ResultsDir)
|
||||
}
|
||||
|
||||
func setupWorker(ctx context.Context, cfg RunConfig, workerID int, runtime string, runOld, runNew bool) (*workerContext, error) {
|
||||
workerName := fmt.Sprintf("%s-%d", DefaultClusterName, workerID)
|
||||
promPort := 9091 + workerID
|
||||
|
||||
log.Printf("[Worker %d] Creating cluster %s (ports %d/%d)...", workerID, workerName, 8080+workerID, 8443+workerID)
|
||||
|
||||
clusterMgr := cluster.NewManager(cluster.Config{
|
||||
Name: workerName,
|
||||
ContainerRuntime: runtime,
|
||||
PortOffset: workerID,
|
||||
})
|
||||
|
||||
if err := clusterMgr.Create(ctx); err != nil {
|
||||
return nil, fmt.Errorf("creating cluster: %w", err)
|
||||
}
|
||||
|
||||
kubeContext := clusterMgr.Context()
|
||||
|
||||
promManifest := filepath.Join(cfg.ManifestsDir, "prometheus.yaml")
|
||||
promMgr := prometheus.NewManagerWithPort(promManifest, promPort, kubeContext)
|
||||
|
||||
log.Printf("[Worker %d] Installing Prometheus (port %d)...", workerID, promPort)
|
||||
if err := promMgr.Deploy(ctx); err != nil {
|
||||
return nil, fmt.Errorf("deploying prometheus: %w", err)
|
||||
}
|
||||
|
||||
if err := promMgr.StartPortForward(ctx); err != nil {
|
||||
return nil, fmt.Errorf("starting prometheus port-forward: %w", err)
|
||||
}
|
||||
|
||||
if cfg.SkipImageLoad {
|
||||
log.Printf("[Worker %d] Skipping image loading (--skip-image-load)", workerID)
|
||||
} else {
|
||||
log.Printf("[Worker %d] Loading images...", workerID)
|
||||
if runOld {
|
||||
if err := clusterMgr.LoadImage(ctx, cfg.OldImage); err != nil {
|
||||
log.Printf("[Worker %d] Warning: failed to load old image: %v", workerID, err)
|
||||
}
|
||||
}
|
||||
if runNew {
|
||||
if err := clusterMgr.LoadImage(ctx, cfg.NewImage); err != nil {
|
||||
log.Printf("[Worker %d] Warning: failed to load new image: %v", workerID, err)
|
||||
}
|
||||
}
|
||||
|
||||
testImage := "gcr.io/google-containers/busybox:1.27"
|
||||
clusterMgr.LoadImage(ctx, testImage)
|
||||
}
|
||||
|
||||
kubeClient, err := getKubeClient(kubeContext)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("creating kubernetes client: %w", err)
|
||||
}
|
||||
|
||||
log.Printf("[Worker %d] Ready", workerID)
|
||||
return &workerContext{
|
||||
id: workerID,
|
||||
clusterMgr: clusterMgr,
|
||||
promMgr: promMgr,
|
||||
kubeClient: kubeClient,
|
||||
kubeContext: kubeContext,
|
||||
runtime: runtime,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func runVersionOnWorker(ctx context.Context, w *workerContext, cfg RunConfig, scenarioID, version, image string, cleanupAfter bool) {
|
||||
mgr := reloader.NewManager(reloader.Config{
|
||||
Version: version,
|
||||
Image: image,
|
||||
})
|
||||
mgr.SetKubeContext(w.kubeContext)
|
||||
|
||||
if err := mgr.Deploy(ctx); err != nil {
|
||||
log.Printf("[Worker %d] Failed to deploy %s Reloader: %v", w.id, version, err)
|
||||
return
|
||||
}
|
||||
|
||||
if err := w.promMgr.WaitForTarget(ctx, mgr.Job(), 60*time.Second); err != nil {
|
||||
log.Printf("[Worker %d] Warning: %v", w.id, err)
|
||||
log.Printf("[Worker %d] Proceeding anyway, but metrics may be incomplete", w.id)
|
||||
}
|
||||
|
||||
runScenario(ctx, w.kubeClient, scenarioID, version, image, cfg.Duration, cfg.ResultsDir)
|
||||
collectMetrics(ctx, w.promMgr, mgr.Job(), scenarioID, version, cfg.ResultsDir)
|
||||
collectLogs(ctx, mgr, scenarioID, version, cfg.ResultsDir)
|
||||
|
||||
if cleanupAfter {
|
||||
cleanupTestNamespaces(ctx, w.kubeContext)
|
||||
mgr.Cleanup(ctx)
|
||||
w.promMgr.Reset(ctx)
|
||||
createTestNamespace(ctx, w.kubeContext)
|
||||
}
|
||||
}
|
||||
|
||||
func runScenario(ctx context.Context, client kubernetes.Interface, scenarioID, version, image string, duration int, resultsDir string) {
|
||||
runner, ok := scenarios.Registry[scenarioID]
|
||||
if !ok {
|
||||
log.Printf("Unknown scenario: %s", scenarioID)
|
||||
return
|
||||
}
|
||||
|
||||
if s6, ok := runner.(*scenarios.ControllerRestartScenario); ok {
|
||||
s6.ReloaderVersion = version
|
||||
}
|
||||
|
||||
if s11, ok := runner.(*scenarios.AnnotationStrategyScenario); ok {
|
||||
s11.Image = image
|
||||
}
|
||||
|
||||
log.Printf("Running scenario %s (%s): %s", scenarioID, version, runner.Description())
|
||||
|
||||
if ctx.Err() != nil {
|
||||
log.Printf("WARNING: Parent context already done: %v", ctx.Err())
|
||||
}
|
||||
|
||||
timeout := time.Duration(duration)*time.Second + 5*time.Minute
|
||||
log.Printf("Creating scenario context with timeout: %v (duration=%ds)", timeout, duration)
|
||||
|
||||
scenarioCtx, cancel := context.WithTimeout(ctx, timeout)
|
||||
defer cancel()
|
||||
|
||||
expected, err := runner.Run(scenarioCtx, client, TestNamespace, time.Duration(duration)*time.Second)
|
||||
if err != nil {
|
||||
log.Printf("Scenario %s failed: %v", scenarioID, err)
|
||||
}
|
||||
|
||||
scenarios.WriteExpectedMetrics(scenarioID, resultsDir, expected)
|
||||
}
|
||||
|
||||
func collectMetrics(ctx context.Context, promMgr *prometheus.Manager, job, scenarioID, version, resultsDir string) {
|
||||
log.Printf("Waiting 5s for Reloader to finish processing events...")
|
||||
time.Sleep(5 * time.Second)
|
||||
|
||||
log.Printf("Waiting 8s for Prometheus to scrape final metrics...")
|
||||
time.Sleep(8 * time.Second)
|
||||
|
||||
log.Printf("Collecting metrics for %s...", version)
|
||||
outputDir := filepath.Join(resultsDir, scenarioID, version)
|
||||
if err := promMgr.CollectMetrics(ctx, job, outputDir, scenarioID); err != nil {
|
||||
log.Printf("Failed to collect metrics: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func collectLogs(ctx context.Context, mgr *reloader.Manager, scenarioID, version, resultsDir string) {
|
||||
log.Printf("Collecting logs for %s...", version)
|
||||
logPath := filepath.Join(resultsDir, scenarioID, version, "reloader.log")
|
||||
if err := mgr.CollectLogs(ctx, logPath); err != nil {
|
||||
log.Printf("Failed to collect logs: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func generateReport(scenarioID, resultsDir string, isComparison bool) {
|
||||
if isComparison {
|
||||
log.Println("Generating comparison report...")
|
||||
} else {
|
||||
log.Println("Generating single-version report...")
|
||||
}
|
||||
|
||||
reportPath := filepath.Join(resultsDir, scenarioID, "report.txt")
|
||||
|
||||
cmd := exec.Command(os.Args[0], "report",
|
||||
fmt.Sprintf("--scenario=%s", scenarioID),
|
||||
fmt.Sprintf("--results-dir=%s", resultsDir),
|
||||
fmt.Sprintf("--output=%s", reportPath))
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
cmd.Run()
|
||||
|
||||
if data, err := os.ReadFile(reportPath); err == nil {
|
||||
fmt.Println(string(data))
|
||||
}
|
||||
|
||||
log.Printf("Report saved to: %s", reportPath)
|
||||
}
|
||||
|
||||
func getKubeClient(kubeContext string) (kubernetes.Interface, error) {
|
||||
kubeconfig := os.Getenv("KUBECONFIG")
|
||||
if kubeconfig == "" {
|
||||
home, _ := os.UserHomeDir()
|
||||
kubeconfig = filepath.Join(home, ".kube", "config")
|
||||
}
|
||||
|
||||
loadingRules := &clientcmd.ClientConfigLoadingRules{ExplicitPath: kubeconfig}
|
||||
configOverrides := &clientcmd.ConfigOverrides{}
|
||||
if kubeContext != "" {
|
||||
configOverrides.CurrentContext = kubeContext
|
||||
}
|
||||
|
||||
kubeConfig := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(loadingRules, configOverrides)
|
||||
config, err := kubeConfig.ClientConfig()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return kubernetes.NewForConfig(config)
|
||||
}
|
||||
|
||||
func createTestNamespace(ctx context.Context, kubeContext string) {
|
||||
args := []string{"create", "namespace", TestNamespace, "--dry-run=client", "-o", "yaml"}
|
||||
if kubeContext != "" {
|
||||
args = append([]string{"--context", kubeContext}, args...)
|
||||
}
|
||||
cmd := exec.CommandContext(ctx, "kubectl", args...)
|
||||
out, _ := cmd.Output()
|
||||
|
||||
applyArgs := []string{"apply", "-f", "-"}
|
||||
if kubeContext != "" {
|
||||
applyArgs = append([]string{"--context", kubeContext}, applyArgs...)
|
||||
}
|
||||
applyCmd := exec.CommandContext(ctx, "kubectl", applyArgs...)
|
||||
applyCmd.Stdin = strings.NewReader(string(out))
|
||||
applyCmd.Run()
|
||||
}
|
||||
|
||||
func cleanupTestNamespaces(ctx context.Context, kubeContext string) {
|
||||
log.Println("Cleaning up test resources...")
|
||||
|
||||
namespaces := []string{TestNamespace}
|
||||
for i := range 10 {
|
||||
namespaces = append(namespaces, fmt.Sprintf("%s-%d", TestNamespace, i))
|
||||
}
|
||||
|
||||
for _, ns := range namespaces {
|
||||
args := []string{"delete", "namespace", ns, "--wait=false", "--ignore-not-found"}
|
||||
if kubeContext != "" {
|
||||
args = append([]string{"--context", kubeContext}, args...)
|
||||
}
|
||||
exec.CommandContext(ctx, "kubectl", args...).Run()
|
||||
}
|
||||
|
||||
time.Sleep(2 * time.Second)
|
||||
|
||||
for _, ns := range namespaces {
|
||||
args := []string{"delete", "pods", "--all", "-n", ns, "--grace-period=0", "--force"}
|
||||
if kubeContext != "" {
|
||||
args = append([]string{"--context", kubeContext}, args...)
|
||||
}
|
||||
exec.CommandContext(ctx, "kubectl", args...).Run()
|
||||
}
|
||||
}
|
||||
|
||||
251
test/loadtest/internal/cmd/summary.go
Normal file
251
test/loadtest/internal/cmd/summary.go
Normal file
@@ -0,0 +1,251 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var (
|
||||
summaryResultsDir string
|
||||
summaryOutputFile string
|
||||
summaryFormat string
|
||||
summaryTestType string
|
||||
)
|
||||
|
||||
var summaryCmd = &cobra.Command{
|
||||
Use: "summary",
|
||||
Short: "Generate summary across all scenarios (for CI)",
|
||||
Long: `Generate an aggregated summary report across all test scenarios.
|
||||
|
||||
Examples:
|
||||
# Generate markdown summary for CI
|
||||
loadtest summary --results-dir=./results --format=markdown`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
summaryCommand()
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
summaryCmd.Flags().StringVar(&summaryResultsDir, "results-dir", "./results", "Directory containing results")
|
||||
summaryCmd.Flags().StringVar(&summaryOutputFile, "output", "", "Output file (default: stdout)")
|
||||
summaryCmd.Flags().StringVar(&summaryFormat, "format", "markdown", "Output format: text, json, markdown")
|
||||
summaryCmd.Flags().StringVar(&summaryTestType, "test-type", "full", "Test type label: quick, full")
|
||||
}
|
||||
|
||||
// SummaryReport aggregates results from multiple scenarios.
|
||||
type SummaryReport struct {
|
||||
Timestamp time.Time `json:"timestamp"`
|
||||
TestType string `json:"test_type"`
|
||||
PassCount int `json:"pass_count"`
|
||||
FailCount int `json:"fail_count"`
|
||||
TotalCount int `json:"total_count"`
|
||||
Scenarios []ScenarioSummary `json:"scenarios"`
|
||||
}
|
||||
|
||||
// ScenarioSummary provides a brief summary of a single scenario.
|
||||
type ScenarioSummary struct {
|
||||
ID string `json:"id"`
|
||||
Status string `json:"status"`
|
||||
Description string `json:"description"`
|
||||
ActionTotal float64 `json:"action_total"`
|
||||
ActionExp float64 `json:"action_expected"`
|
||||
ErrorsTotal float64 `json:"errors_total"`
|
||||
}
|
||||
|
||||
func summaryCommand() {
|
||||
summary, err := generateSummaryReport(summaryResultsDir, summaryTestType)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to generate summary: %v", err)
|
||||
}
|
||||
|
||||
var output string
|
||||
switch OutputFormat(summaryFormat) {
|
||||
case OutputFormatJSON:
|
||||
output = renderSummaryJSON(summary)
|
||||
case OutputFormatText:
|
||||
output = renderSummaryText(summary)
|
||||
default:
|
||||
output = renderSummaryMarkdown(summary)
|
||||
}
|
||||
|
||||
if summaryOutputFile != "" {
|
||||
if err := os.WriteFile(summaryOutputFile, []byte(output), 0644); err != nil {
|
||||
log.Fatalf("Failed to write output file: %v", err)
|
||||
}
|
||||
log.Printf("Summary written to %s", summaryOutputFile)
|
||||
} else {
|
||||
fmt.Print(output)
|
||||
}
|
||||
|
||||
if summary.FailCount > 0 {
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
func generateSummaryReport(resultsDir, testType string) (*SummaryReport, error) {
|
||||
summary := &SummaryReport{
|
||||
Timestamp: time.Now(),
|
||||
TestType: testType,
|
||||
}
|
||||
|
||||
entries, err := os.ReadDir(resultsDir)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read results directory: %w", err)
|
||||
}
|
||||
|
||||
for _, entry := range entries {
|
||||
if !entry.IsDir() || !strings.HasPrefix(entry.Name(), "S") {
|
||||
continue
|
||||
}
|
||||
|
||||
scenarioID := entry.Name()
|
||||
report, err := generateScenarioReport(scenarioID, resultsDir)
|
||||
if err != nil {
|
||||
log.Printf("Warning: failed to load scenario %s: %v", scenarioID, err)
|
||||
continue
|
||||
}
|
||||
|
||||
scenarioSummary := ScenarioSummary{
|
||||
ID: scenarioID,
|
||||
Status: report.OverallStatus,
|
||||
Description: report.TestDescription,
|
||||
}
|
||||
|
||||
for _, c := range report.Comparisons {
|
||||
switch c.Name {
|
||||
case "action_total":
|
||||
scenarioSummary.ActionTotal = c.NewValue
|
||||
scenarioSummary.ActionExp = c.Expected
|
||||
case "errors_total":
|
||||
scenarioSummary.ErrorsTotal = c.NewValue
|
||||
}
|
||||
}
|
||||
|
||||
summary.Scenarios = append(summary.Scenarios, scenarioSummary)
|
||||
summary.TotalCount++
|
||||
if report.OverallStatus == "PASS" {
|
||||
summary.PassCount++
|
||||
} else {
|
||||
summary.FailCount++
|
||||
}
|
||||
}
|
||||
|
||||
sort.Slice(summary.Scenarios, func(i, j int) bool {
|
||||
return naturalSort(summary.Scenarios[i].ID, summary.Scenarios[j].ID)
|
||||
})
|
||||
|
||||
return summary, nil
|
||||
}
|
||||
|
||||
func naturalSort(a, b string) bool {
|
||||
var aNum, bNum int
|
||||
fmt.Sscanf(a, "S%d", &aNum)
|
||||
fmt.Sscanf(b, "S%d", &bNum)
|
||||
return aNum < bNum
|
||||
}
|
||||
|
||||
func renderSummaryJSON(summary *SummaryReport) string {
|
||||
data, err := json.MarshalIndent(summary, "", " ")
|
||||
if err != nil {
|
||||
return fmt.Sprintf(`{"error": "%s"}`, err.Error())
|
||||
}
|
||||
return string(data)
|
||||
}
|
||||
|
||||
func renderSummaryText(summary *SummaryReport) string {
|
||||
var sb strings.Builder
|
||||
|
||||
sb.WriteString("================================================================================\n")
|
||||
sb.WriteString(" LOAD TEST SUMMARY\n")
|
||||
sb.WriteString("================================================================================\n\n")
|
||||
|
||||
passRate := 0
|
||||
if summary.TotalCount > 0 {
|
||||
passRate = summary.PassCount * 100 / summary.TotalCount
|
||||
}
|
||||
|
||||
fmt.Fprintf(&sb, "Test Type: %s\n", summary.TestType)
|
||||
fmt.Fprintf(&sb, "Results: %d/%d passed (%d%%)\n\n", summary.PassCount, summary.TotalCount, passRate)
|
||||
|
||||
fmt.Fprintf(&sb, "%-6s %-8s %-45s %10s %8s\n", "ID", "Status", "Description", "Actions", "Errors")
|
||||
fmt.Fprintf(&sb, "%-6s %-8s %-45s %10s %8s\n", "------", "--------", strings.Repeat("-", 45), "----------", "--------")
|
||||
|
||||
for _, s := range summary.Scenarios {
|
||||
desc := s.Description
|
||||
if len(desc) > 45 {
|
||||
desc = desc[:42] + "..."
|
||||
}
|
||||
actions := fmt.Sprintf("%.0f", s.ActionTotal)
|
||||
if s.ActionExp > 0 {
|
||||
actions = fmt.Sprintf("%.0f/%.0f", s.ActionTotal, s.ActionExp)
|
||||
}
|
||||
fmt.Fprintf(&sb, "%-6s %-8s %-45s %10s %8.0f\n", s.ID, s.Status, desc, actions, s.ErrorsTotal)
|
||||
}
|
||||
|
||||
sb.WriteString("\n================================================================================\n")
|
||||
return sb.String()
|
||||
}
|
||||
|
||||
func renderSummaryMarkdown(summary *SummaryReport) string {
|
||||
var sb strings.Builder
|
||||
|
||||
emoji := "✅"
|
||||
title := "ALL TESTS PASSED"
|
||||
if summary.FailCount > 0 {
|
||||
emoji = "❌"
|
||||
title = fmt.Sprintf("%d TEST(S) FAILED", summary.FailCount)
|
||||
} else if summary.TotalCount == 0 {
|
||||
emoji = "⚠️"
|
||||
title = "NO RESULTS"
|
||||
}
|
||||
|
||||
sb.WriteString(fmt.Sprintf("## %s Load Test Results: %s\n\n", emoji, title))
|
||||
|
||||
if summary.TestType == "quick" {
|
||||
sb.WriteString("> 🚀 **Quick Test** (S1, S4, S6) — Use `/loadtest` for full suite\n\n")
|
||||
}
|
||||
|
||||
passRate := 0
|
||||
if summary.TotalCount > 0 {
|
||||
passRate = summary.PassCount * 100 / summary.TotalCount
|
||||
}
|
||||
sb.WriteString(fmt.Sprintf("**%d/%d passed** (%d%%)\n\n", summary.PassCount, summary.TotalCount, passRate))
|
||||
|
||||
sb.WriteString("| | Scenario | Description | Actions | Errors |\n")
|
||||
sb.WriteString("|:-:|:--------:|-------------|:-------:|:------:|\n")
|
||||
|
||||
for _, s := range summary.Scenarios {
|
||||
icon := "✅"
|
||||
if s.Status != "PASS" {
|
||||
icon = "❌"
|
||||
}
|
||||
|
||||
desc := s.Description
|
||||
if len(desc) > 45 {
|
||||
desc = desc[:42] + "..."
|
||||
}
|
||||
|
||||
actions := fmt.Sprintf("%.0f", s.ActionTotal)
|
||||
if s.ActionExp > 0 {
|
||||
actions = fmt.Sprintf("%.0f/%.0f", s.ActionTotal, s.ActionExp)
|
||||
}
|
||||
|
||||
errors := fmt.Sprintf("%.0f", s.ErrorsTotal)
|
||||
if s.ErrorsTotal > 0 {
|
||||
errors = fmt.Sprintf("⚠️ %.0f", s.ErrorsTotal)
|
||||
}
|
||||
|
||||
sb.WriteString(fmt.Sprintf("| %s | **%s** | %s | %s | %s |\n", icon, s.ID, desc, actions, errors))
|
||||
}
|
||||
|
||||
sb.WriteString("\n📦 **[Download detailed results](../artifacts)**\n")
|
||||
|
||||
return sb.String()
|
||||
}
|
||||
429
test/loadtest/internal/prometheus/prometheus.go
Normal file
429
test/loadtest/internal/prometheus/prometheus.go
Normal file
@@ -0,0 +1,429 @@
|
||||
// Package prometheus provides Prometheus deployment and querying functionality.
|
||||
package prometheus
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Manager handles Prometheus operations.
|
||||
type Manager struct {
|
||||
manifestPath string
|
||||
portForward *exec.Cmd
|
||||
localPort int
|
||||
kubeContext string
|
||||
}
|
||||
|
||||
// NewManager creates a new Prometheus manager.
|
||||
func NewManager(manifestPath string) *Manager {
|
||||
return &Manager{
|
||||
manifestPath: manifestPath,
|
||||
localPort: 9091,
|
||||
}
|
||||
}
|
||||
|
||||
// NewManagerWithPort creates a Prometheus manager with a custom port.
|
||||
func NewManagerWithPort(manifestPath string, port int, kubeContext string) *Manager {
|
||||
return &Manager{
|
||||
manifestPath: manifestPath,
|
||||
localPort: port,
|
||||
kubeContext: kubeContext,
|
||||
}
|
||||
}
|
||||
|
||||
// kubectl returns kubectl args with optional context
|
||||
func (m *Manager) kubectl(args ...string) []string {
|
||||
if m.kubeContext != "" {
|
||||
return append([]string{"--context", m.kubeContext}, args...)
|
||||
}
|
||||
return args
|
||||
}
|
||||
|
||||
// Deploy deploys Prometheus to the cluster.
|
||||
func (m *Manager) Deploy(ctx context.Context) error {
|
||||
cmd := exec.CommandContext(ctx, "kubectl", m.kubectl("create", "namespace", "monitoring", "--dry-run=client", "-o", "yaml")...)
|
||||
out, err := cmd.Output()
|
||||
if err != nil {
|
||||
return fmt.Errorf("generating namespace yaml: %w", err)
|
||||
}
|
||||
|
||||
applyCmd := exec.CommandContext(ctx, "kubectl", m.kubectl("apply", "-f", "-")...)
|
||||
applyCmd.Stdin = strings.NewReader(string(out))
|
||||
if err := applyCmd.Run(); err != nil {
|
||||
return fmt.Errorf("applying namespace: %w", err)
|
||||
}
|
||||
|
||||
applyCmd = exec.CommandContext(ctx, "kubectl", m.kubectl("apply", "-f", m.manifestPath)...)
|
||||
applyCmd.Stdout = os.Stdout
|
||||
applyCmd.Stderr = os.Stderr
|
||||
if err := applyCmd.Run(); err != nil {
|
||||
return fmt.Errorf("applying prometheus manifest: %w", err)
|
||||
}
|
||||
|
||||
fmt.Println("Waiting for Prometheus to be ready...")
|
||||
waitCmd := exec.CommandContext(ctx, "kubectl", m.kubectl("wait", "--for=condition=ready", "pod",
|
||||
"-l", "app=prometheus", "-n", "monitoring", "--timeout=120s")...)
|
||||
waitCmd.Stdout = os.Stdout
|
||||
waitCmd.Stderr = os.Stderr
|
||||
if err := waitCmd.Run(); err != nil {
|
||||
return fmt.Errorf("waiting for prometheus: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// StartPortForward starts port-forwarding to Prometheus.
|
||||
func (m *Manager) StartPortForward(ctx context.Context) error {
|
||||
m.StopPortForward()
|
||||
|
||||
m.portForward = exec.CommandContext(ctx, "kubectl", m.kubectl("port-forward",
|
||||
"-n", "monitoring", "svc/prometheus", fmt.Sprintf("%d:9090", m.localPort))...)
|
||||
|
||||
if err := m.portForward.Start(); err != nil {
|
||||
return fmt.Errorf("starting port-forward: %w", err)
|
||||
}
|
||||
|
||||
for i := 0; i < 30; i++ {
|
||||
time.Sleep(time.Second)
|
||||
if m.isAccessible() {
|
||||
fmt.Printf("Prometheus accessible at http://localhost:%d\n", m.localPort)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
return fmt.Errorf("prometheus port-forward not ready after 30s")
|
||||
}
|
||||
|
||||
// StopPortForward stops the port-forward process.
|
||||
func (m *Manager) StopPortForward() {
|
||||
if m.portForward != nil && m.portForward.Process != nil {
|
||||
m.portForward.Process.Kill()
|
||||
m.portForward = nil
|
||||
}
|
||||
exec.Command("pkill", "-f", fmt.Sprintf("kubectl port-forward.*prometheus.*%d", m.localPort)).Run()
|
||||
}
|
||||
|
||||
// Reset restarts Prometheus to clear all metrics.
|
||||
func (m *Manager) Reset(ctx context.Context) error {
|
||||
m.StopPortForward()
|
||||
|
||||
cmd := exec.CommandContext(ctx, "kubectl", m.kubectl("delete", "pod", "-n", "monitoring",
|
||||
"-l", "app=prometheus", "--grace-period=0", "--force")...)
|
||||
cmd.Run()
|
||||
|
||||
fmt.Println("Waiting for Prometheus to restart...")
|
||||
waitCmd := exec.CommandContext(ctx, "kubectl", m.kubectl("wait", "--for=condition=ready", "pod",
|
||||
"-l", "app=prometheus", "-n", "monitoring", "--timeout=120s")...)
|
||||
if err := waitCmd.Run(); err != nil {
|
||||
return fmt.Errorf("waiting for prometheus restart: %w", err)
|
||||
}
|
||||
|
||||
if err := m.StartPortForward(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fmt.Println("Waiting 5s for Prometheus to initialize scraping...")
|
||||
time.Sleep(5 * time.Second)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *Manager) isAccessible() bool {
|
||||
conn, err := net.DialTimeout("tcp", fmt.Sprintf("localhost:%d", m.localPort), 2*time.Second)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
conn.Close()
|
||||
|
||||
resp, err := http.Get(fmt.Sprintf("http://localhost:%d/api/v1/status/config", m.localPort))
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
resp.Body.Close()
|
||||
return resp.StatusCode == 200
|
||||
}
|
||||
|
||||
// URL returns the local Prometheus URL.
|
||||
func (m *Manager) URL() string {
|
||||
return fmt.Sprintf("http://localhost:%d", m.localPort)
|
||||
}
|
||||
|
||||
// WaitForTarget waits for a specific job to be scraped by Prometheus.
|
||||
func (m *Manager) WaitForTarget(ctx context.Context, job string, timeout time.Duration) error {
|
||||
fmt.Printf("Waiting for Prometheus to discover and scrape job '%s'...\n", job)
|
||||
|
||||
deadline := time.Now().Add(timeout)
|
||||
for time.Now().Before(deadline) {
|
||||
if m.isTargetHealthy(job) {
|
||||
fmt.Printf("Prometheus is scraping job '%s'\n", job)
|
||||
return nil
|
||||
}
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
case <-time.After(2 * time.Second):
|
||||
}
|
||||
}
|
||||
|
||||
m.printTargetStatus(job)
|
||||
return fmt.Errorf("timeout waiting for Prometheus to scrape job '%s'", job)
|
||||
}
|
||||
|
||||
// isTargetHealthy checks if a job has at least one healthy target.
|
||||
func (m *Manager) isTargetHealthy(job string) bool {
|
||||
resp, err := http.Get(fmt.Sprintf("%s/api/v1/targets", m.URL()))
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
var result struct {
|
||||
Status string `json:"status"`
|
||||
Data struct {
|
||||
ActiveTargets []struct {
|
||||
Labels map[string]string `json:"labels"`
|
||||
Health string `json:"health"`
|
||||
} `json:"activeTargets"`
|
||||
} `json:"data"`
|
||||
}
|
||||
|
||||
if err := json.Unmarshal(body, &result); err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
for _, target := range result.Data.ActiveTargets {
|
||||
if target.Labels["job"] == job && target.Health == "up" {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// printTargetStatus prints debug info about targets.
|
||||
func (m *Manager) printTargetStatus(job string) {
|
||||
resp, err := http.Get(fmt.Sprintf("%s/api/v1/targets", m.URL()))
|
||||
if err != nil {
|
||||
fmt.Printf("Failed to get targets: %v\n", err)
|
||||
return
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
body, _ := io.ReadAll(resp.Body)
|
||||
|
||||
var result struct {
|
||||
Data struct {
|
||||
ActiveTargets []struct {
|
||||
Labels map[string]string `json:"labels"`
|
||||
Health string `json:"health"`
|
||||
LastError string `json:"lastError"`
|
||||
ScrapeURL string `json:"scrapeUrl"`
|
||||
} `json:"activeTargets"`
|
||||
} `json:"data"`
|
||||
}
|
||||
|
||||
if err := json.Unmarshal(body, &result); err != nil {
|
||||
fmt.Printf("Failed to parse targets: %v\n", err)
|
||||
return
|
||||
}
|
||||
|
||||
fmt.Printf("Prometheus targets for job '%s':\n", job)
|
||||
found := false
|
||||
for _, target := range result.Data.ActiveTargets {
|
||||
if target.Labels["job"] == job {
|
||||
found = true
|
||||
fmt.Printf(" - %s: health=%s, lastError=%s\n",
|
||||
target.ScrapeURL, target.Health, target.LastError)
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
fmt.Printf(" No targets found for job '%s'\n", job)
|
||||
fmt.Printf(" Available jobs: ")
|
||||
jobs := make(map[string]bool)
|
||||
for _, target := range result.Data.ActiveTargets {
|
||||
jobs[target.Labels["job"]] = true
|
||||
}
|
||||
for j := range jobs {
|
||||
fmt.Printf("%s ", j)
|
||||
}
|
||||
fmt.Println()
|
||||
}
|
||||
}
|
||||
|
||||
// HasMetrics checks if the specified job has any metrics available.
|
||||
func (m *Manager) HasMetrics(ctx context.Context, job string) bool {
|
||||
query := fmt.Sprintf(`up{job="%s"}`, job)
|
||||
result, err := m.Query(ctx, query)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
return len(result.Data.Result) > 0 && result.Data.Result[0].Value[1] == "1"
|
||||
}
|
||||
|
||||
// QueryResponse represents a Prometheus query response.
|
||||
type QueryResponse struct {
|
||||
Status string `json:"status"`
|
||||
Data struct {
|
||||
ResultType string `json:"resultType"`
|
||||
Result []struct {
|
||||
Metric map[string]string `json:"metric"`
|
||||
Value []interface{} `json:"value"`
|
||||
} `json:"result"`
|
||||
} `json:"data"`
|
||||
}
|
||||
|
||||
// Query executes a PromQL query and returns the response.
|
||||
func (m *Manager) Query(ctx context.Context, query string) (*QueryResponse, error) {
|
||||
u := fmt.Sprintf("%s/api/v1/query?query=%s", m.URL(), url.QueryEscape(query))
|
||||
|
||||
req, err := http.NewRequestWithContext(ctx, "GET", u, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
client := &http.Client{Timeout: 10 * time.Second}
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("querying prometheus: %w", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("reading response: %w", err)
|
||||
}
|
||||
|
||||
var result QueryResponse
|
||||
if err := json.Unmarshal(body, &result); err != nil {
|
||||
return nil, fmt.Errorf("parsing response: %w", err)
|
||||
}
|
||||
|
||||
return &result, nil
|
||||
}
|
||||
|
||||
// CollectMetrics collects all metrics for a scenario and writes to output directory.
|
||||
func (m *Manager) CollectMetrics(ctx context.Context, job, outputDir, scenario string) error {
|
||||
if err := os.MkdirAll(outputDir, 0755); err != nil {
|
||||
return fmt.Errorf("creating output directory: %w", err)
|
||||
}
|
||||
|
||||
timeRange := "10m"
|
||||
|
||||
// For S6 (restart scenario), use increase() to handle counter resets
|
||||
useIncrease := scenario == "S6"
|
||||
|
||||
counterMetrics := []string{
|
||||
"reloader_reconcile_total",
|
||||
"reloader_action_total",
|
||||
"reloader_skipped_total",
|
||||
"reloader_errors_total",
|
||||
"reloader_events_received_total",
|
||||
"reloader_workloads_scanned_total",
|
||||
"reloader_workloads_matched_total",
|
||||
"reloader_reload_executed_total",
|
||||
}
|
||||
|
||||
for _, metric := range counterMetrics {
|
||||
var query string
|
||||
if useIncrease {
|
||||
query = fmt.Sprintf(`sum(increase(%s{job="%s"}[%s])) by (success, reason)`, metric, job, timeRange)
|
||||
} else {
|
||||
query = fmt.Sprintf(`sum(%s{job="%s"}) by (success, reason)`, metric, job)
|
||||
}
|
||||
|
||||
if err := m.queryAndSave(ctx, query, filepath.Join(outputDir, metric+".json")); err != nil {
|
||||
fmt.Printf("Warning: failed to collect %s: %v\n", metric, err)
|
||||
}
|
||||
}
|
||||
|
||||
histogramMetrics := []struct {
|
||||
name string
|
||||
prefix string
|
||||
}{
|
||||
{"reloader_reconcile_duration_seconds", "reconcile"},
|
||||
{"reloader_action_latency_seconds", "action"},
|
||||
}
|
||||
|
||||
for _, hm := range histogramMetrics {
|
||||
for _, pct := range []int{50, 95, 99} {
|
||||
quantile := float64(pct) / 100
|
||||
query := fmt.Sprintf(`histogram_quantile(%v, sum(rate(%s_bucket{job="%s"}[%s])) by (le))`,
|
||||
quantile, hm.name, job, timeRange)
|
||||
outFile := filepath.Join(outputDir, fmt.Sprintf("%s_p%d.json", hm.prefix, pct))
|
||||
if err := m.queryAndSave(ctx, query, outFile); err != nil {
|
||||
fmt.Printf("Warning: failed to collect %s p%d: %v\n", hm.name, pct, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
restQueries := map[string]string{
|
||||
"rest_client_requests_total.json": fmt.Sprintf(`sum(rest_client_requests_total{job="%s"})`, job),
|
||||
"rest_client_requests_get.json": fmt.Sprintf(`sum(rest_client_requests_total{job="%s",method="GET"})`, job),
|
||||
"rest_client_requests_patch.json": fmt.Sprintf(`sum(rest_client_requests_total{job="%s",method="PATCH"})`, job),
|
||||
"rest_client_requests_put.json": fmt.Sprintf(`sum(rest_client_requests_total{job="%s",method="PUT"})`, job),
|
||||
"rest_client_requests_errors.json": fmt.Sprintf(`sum(rest_client_requests_total{job="%s",code=~"[45].."}) or vector(0)`, job),
|
||||
}
|
||||
|
||||
for filename, query := range restQueries {
|
||||
if err := m.queryAndSave(ctx, query, filepath.Join(outputDir, filename)); err != nil {
|
||||
fmt.Printf("Warning: failed to collect %s: %v\n", filename, err)
|
||||
}
|
||||
}
|
||||
|
||||
resourceQueries := map[string]string{
|
||||
"memory_rss_bytes_avg.json": fmt.Sprintf(`avg_over_time(process_resident_memory_bytes{job="%s"}[%s])`, job, timeRange),
|
||||
"memory_rss_bytes_max.json": fmt.Sprintf(`max_over_time(process_resident_memory_bytes{job="%s"}[%s])`, job, timeRange),
|
||||
"memory_rss_bytes_cur.json": fmt.Sprintf(`process_resident_memory_bytes{job="%s"}`, job),
|
||||
|
||||
"memory_heap_bytes_avg.json": fmt.Sprintf(`avg_over_time(go_memstats_heap_alloc_bytes{job="%s"}[%s])`, job, timeRange),
|
||||
"memory_heap_bytes_max.json": fmt.Sprintf(`max_over_time(go_memstats_heap_alloc_bytes{job="%s"}[%s])`, job, timeRange),
|
||||
|
||||
"cpu_usage_cores_avg.json": fmt.Sprintf(`rate(process_cpu_seconds_total{job="%s"}[%s])`, job, timeRange),
|
||||
"cpu_usage_cores_max.json": fmt.Sprintf(`max_over_time(rate(process_cpu_seconds_total{job="%s"}[1m])[%s:1m])`, job, timeRange),
|
||||
|
||||
"goroutines_avg.json": fmt.Sprintf(`avg_over_time(go_goroutines{job="%s"}[%s])`, job, timeRange),
|
||||
"goroutines_max.json": fmt.Sprintf(`max_over_time(go_goroutines{job="%s"}[%s])`, job, timeRange),
|
||||
"goroutines_cur.json": fmt.Sprintf(`go_goroutines{job="%s"}`, job),
|
||||
|
||||
"gc_duration_seconds_p99.json": fmt.Sprintf(`histogram_quantile(0.99, sum(rate(go_gc_duration_seconds_bucket{job="%s"}[%s])) by (le))`, job, timeRange),
|
||||
|
||||
"threads_cur.json": fmt.Sprintf(`go_threads{job="%s"}`, job),
|
||||
}
|
||||
|
||||
for filename, query := range resourceQueries {
|
||||
if err := m.queryAndSave(ctx, query, filepath.Join(outputDir, filename)); err != nil {
|
||||
fmt.Printf("Warning: failed to collect %s: %v\n", filename, err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *Manager) queryAndSave(ctx context.Context, query, outputPath string) error {
|
||||
result, err := m.Query(ctx, query)
|
||||
if err != nil {
|
||||
emptyResult := `{"status":"success","data":{"resultType":"vector","result":[]}}`
|
||||
return os.WriteFile(outputPath, []byte(emptyResult), 0644)
|
||||
}
|
||||
|
||||
data, err := json.MarshalIndent(result, "", " ")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return os.WriteFile(outputPath, data, 0644)
|
||||
}
|
||||
271
test/loadtest/internal/reloader/reloader.go
Normal file
271
test/loadtest/internal/reloader/reloader.go
Normal file
@@ -0,0 +1,271 @@
|
||||
package reloader
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Config holds configuration for a Reloader deployment.
|
||||
type Config struct {
|
||||
Version string
|
||||
Image string
|
||||
Namespace string
|
||||
ReloadStrategy string
|
||||
}
|
||||
|
||||
// Manager handles Reloader deployment operations.
|
||||
type Manager struct {
|
||||
config Config
|
||||
kubeContext string
|
||||
}
|
||||
|
||||
// NewManager creates a new Reloader manager.
|
||||
func NewManager(config Config) *Manager {
|
||||
return &Manager{
|
||||
config: config,
|
||||
}
|
||||
}
|
||||
|
||||
// SetKubeContext sets the kubeconfig context to use.
|
||||
func (m *Manager) SetKubeContext(kubeContext string) {
|
||||
m.kubeContext = kubeContext
|
||||
}
|
||||
|
||||
// kubectl returns kubectl command with optional context.
|
||||
func (m *Manager) kubectl(ctx context.Context, args ...string) *exec.Cmd {
|
||||
if m.kubeContext != "" {
|
||||
args = append([]string{"--context", m.kubeContext}, args...)
|
||||
}
|
||||
return exec.CommandContext(ctx, "kubectl", args...)
|
||||
}
|
||||
|
||||
// namespace returns the namespace for this reloader instance.
|
||||
func (m *Manager) namespace() string {
|
||||
if m.config.Namespace != "" {
|
||||
return m.config.Namespace
|
||||
}
|
||||
return fmt.Sprintf("reloader-%s", m.config.Version)
|
||||
}
|
||||
|
||||
// releaseName returns the release name for this instance.
|
||||
func (m *Manager) releaseName() string {
|
||||
return fmt.Sprintf("reloader-%s", m.config.Version)
|
||||
}
|
||||
|
||||
// Job returns the Prometheus job name for this Reloader instance.
|
||||
func (m *Manager) Job() string {
|
||||
return fmt.Sprintf("reloader-%s", m.config.Version)
|
||||
}
|
||||
|
||||
// Deploy deploys Reloader to the cluster using raw manifests.
|
||||
func (m *Manager) Deploy(ctx context.Context) error {
|
||||
ns := m.namespace()
|
||||
name := m.releaseName()
|
||||
|
||||
fmt.Printf("Deploying Reloader (%s) with image %s...\n", m.config.Version, m.config.Image)
|
||||
|
||||
manifest := m.buildManifest(ns, name)
|
||||
|
||||
applyCmd := m.kubectl(ctx, "apply", "-f", "-")
|
||||
applyCmd.Stdin = strings.NewReader(manifest)
|
||||
applyCmd.Stdout = os.Stdout
|
||||
applyCmd.Stderr = os.Stderr
|
||||
if err := applyCmd.Run(); err != nil {
|
||||
return fmt.Errorf("applying manifest: %w", err)
|
||||
}
|
||||
|
||||
fmt.Printf("Waiting for Reloader deployment to be ready...\n")
|
||||
waitCmd := m.kubectl(ctx, "rollout", "status", "deployment", name,
|
||||
"-n", ns,
|
||||
"--timeout=120s")
|
||||
waitCmd.Stdout = os.Stdout
|
||||
waitCmd.Stderr = os.Stderr
|
||||
if err := waitCmd.Run(); err != nil {
|
||||
return fmt.Errorf("waiting for deployment: %w", err)
|
||||
}
|
||||
|
||||
time.Sleep(2 * time.Second)
|
||||
|
||||
fmt.Printf("Reloader (%s) deployed successfully\n", m.config.Version)
|
||||
return nil
|
||||
}
|
||||
|
||||
// buildManifest creates the raw Kubernetes manifest for Reloader.
|
||||
func (m *Manager) buildManifest(ns, name string) string {
|
||||
var args []string
|
||||
args = append(args, "--log-format=json")
|
||||
if m.config.ReloadStrategy != "" && m.config.ReloadStrategy != "default" {
|
||||
args = append(args, fmt.Sprintf("--reload-strategy=%s", m.config.ReloadStrategy))
|
||||
}
|
||||
|
||||
argsYAML := ""
|
||||
if len(args) > 0 {
|
||||
argsYAML = " args:\n"
|
||||
for _, arg := range args {
|
||||
argsYAML += fmt.Sprintf(" - %q\n", arg)
|
||||
}
|
||||
}
|
||||
|
||||
return fmt.Sprintf(`---
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: %[1]s
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: %[2]s
|
||||
namespace: %[1]s
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: %[2]s
|
||||
rules:
|
||||
- apiGroups: ["*"]
|
||||
resources: ["*"]
|
||||
verbs: ["*"]
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: %[2]s
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: %[2]s
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: %[2]s
|
||||
namespace: %[1]s
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: %[2]s
|
||||
namespace: %[1]s
|
||||
labels:
|
||||
app: %[2]s
|
||||
app.kubernetes.io/name: reloader
|
||||
loadtest-version: %[3]s
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: %[2]s
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: %[2]s
|
||||
app.kubernetes.io/name: reloader
|
||||
loadtest-version: %[3]s
|
||||
annotations:
|
||||
prometheus.io/scrape: "true"
|
||||
prometheus.io/port: "9090"
|
||||
prometheus.io/path: "/metrics"
|
||||
spec:
|
||||
serviceAccountName: %[2]s
|
||||
securityContext:
|
||||
runAsNonRoot: true
|
||||
runAsUser: 65534
|
||||
containers:
|
||||
- name: reloader
|
||||
image: %[4]s
|
||||
imagePullPolicy: IfNotPresent
|
||||
ports:
|
||||
- name: http
|
||||
containerPort: 9090
|
||||
%[5]s resources:
|
||||
requests:
|
||||
cpu: 10m
|
||||
memory: 64Mi
|
||||
limits:
|
||||
cpu: 500m
|
||||
memory: 256Mi
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
readOnlyRootFilesystem: true
|
||||
`, ns, name, m.config.Version, m.config.Image, argsYAML)
|
||||
}
|
||||
|
||||
// Cleanup removes all Reloader resources from the cluster.
|
||||
func (m *Manager) Cleanup(ctx context.Context) error {
|
||||
ns := m.namespace()
|
||||
name := m.releaseName()
|
||||
|
||||
delDeploy := m.kubectl(ctx, "delete", "deployment", name, "-n", ns, "--ignore-not-found")
|
||||
delDeploy.Run()
|
||||
|
||||
delCRB := m.kubectl(ctx, "delete", "clusterrolebinding", name, "--ignore-not-found")
|
||||
delCRB.Run()
|
||||
|
||||
delCR := m.kubectl(ctx, "delete", "clusterrole", name, "--ignore-not-found")
|
||||
delCR.Run()
|
||||
|
||||
delNS := m.kubectl(ctx, "delete", "namespace", ns, "--wait=false", "--ignore-not-found")
|
||||
if err := delNS.Run(); err != nil {
|
||||
return fmt.Errorf("deleting namespace: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// CleanupByVersion removes Reloader resources for a specific version without needing a Manager instance.
|
||||
// This is useful for cleaning up from previous runs before creating a new Manager.
|
||||
func CleanupByVersion(ctx context.Context, version, kubeContext string) {
|
||||
ns := fmt.Sprintf("reloader-%s", version)
|
||||
name := fmt.Sprintf("reloader-%s", version)
|
||||
|
||||
nsArgs := []string{"delete", "namespace", ns, "--wait=false", "--ignore-not-found"}
|
||||
crArgs := []string{"delete", "clusterrole", name, "--ignore-not-found"}
|
||||
crbArgs := []string{"delete", "clusterrolebinding", name, "--ignore-not-found"}
|
||||
|
||||
if kubeContext != "" {
|
||||
nsArgs = append([]string{"--context", kubeContext}, nsArgs...)
|
||||
crArgs = append([]string{"--context", kubeContext}, crArgs...)
|
||||
crbArgs = append([]string{"--context", kubeContext}, crbArgs...)
|
||||
}
|
||||
|
||||
exec.CommandContext(ctx, "kubectl", nsArgs...).Run()
|
||||
exec.CommandContext(ctx, "kubectl", crArgs...).Run()
|
||||
exec.CommandContext(ctx, "kubectl", crbArgs...).Run()
|
||||
}
|
||||
|
||||
// CollectLogs collects logs from the Reloader pod and writes them to the specified file.
|
||||
func (m *Manager) CollectLogs(ctx context.Context, logPath string) error {
|
||||
ns := m.namespace()
|
||||
name := m.releaseName()
|
||||
|
||||
if err := os.MkdirAll(filepath.Dir(logPath), 0755); err != nil {
|
||||
return fmt.Errorf("creating log directory: %w", err)
|
||||
}
|
||||
|
||||
cmd := m.kubectl(ctx, "logs",
|
||||
"-n", ns,
|
||||
"-l", fmt.Sprintf("app=%s", name),
|
||||
"--tail=-1")
|
||||
|
||||
out, err := cmd.Output()
|
||||
if err != nil {
|
||||
cmd = m.kubectl(ctx, "logs",
|
||||
"-n", ns,
|
||||
"-l", "app.kubernetes.io/name=reloader",
|
||||
"--tail=-1")
|
||||
out, err = cmd.Output()
|
||||
if err != nil {
|
||||
return fmt.Errorf("collecting logs: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
if err := os.WriteFile(logPath, out, 0644); err != nil {
|
||||
return fmt.Errorf("writing logs: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
2037
test/loadtest/internal/scenarios/scenarios.go
Normal file
2037
test/loadtest/internal/scenarios/scenarios.go
Normal file
File diff suppressed because it is too large
Load Diff
181
test/loadtest/manifests/prometheus.yaml
Normal file
181
test/loadtest/manifests/prometheus.yaml
Normal file
@@ -0,0 +1,181 @@
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: prometheus-config
|
||||
namespace: monitoring
|
||||
data:
|
||||
prometheus.yml: |
|
||||
global:
|
||||
scrape_interval: 2s
|
||||
evaluation_interval: 2s
|
||||
|
||||
scrape_configs:
|
||||
- job_name: 'prometheus'
|
||||
static_configs:
|
||||
- targets: ['localhost:9090']
|
||||
|
||||
- job_name: 'reloader-old'
|
||||
kubernetes_sd_configs:
|
||||
- role: pod
|
||||
namespaces:
|
||||
names:
|
||||
- reloader-old
|
||||
relabel_configs:
|
||||
- source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_scrape]
|
||||
action: keep
|
||||
regex: true
|
||||
- source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_path]
|
||||
action: replace
|
||||
target_label: __metrics_path__
|
||||
regex: (.+)
|
||||
- source_labels: [__address__, __meta_kubernetes_pod_annotation_prometheus_io_port]
|
||||
action: replace
|
||||
regex: ([^:]+)(?::\d+)?;(\d+)
|
||||
replacement: $1:$2
|
||||
target_label: __address__
|
||||
- action: labelmap
|
||||
regex: __meta_kubernetes_pod_label_(.+)
|
||||
- source_labels: [__meta_kubernetes_namespace]
|
||||
action: replace
|
||||
target_label: kubernetes_namespace
|
||||
- source_labels: [__meta_kubernetes_pod_name]
|
||||
action: replace
|
||||
target_label: kubernetes_pod_name
|
||||
|
||||
- job_name: 'reloader-new'
|
||||
kubernetes_sd_configs:
|
||||
- role: pod
|
||||
namespaces:
|
||||
names:
|
||||
- reloader-new
|
||||
relabel_configs:
|
||||
- source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_scrape]
|
||||
action: keep
|
||||
regex: true
|
||||
- source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_path]
|
||||
action: replace
|
||||
target_label: __metrics_path__
|
||||
regex: (.+)
|
||||
- source_labels: [__address__, __meta_kubernetes_pod_annotation_prometheus_io_port]
|
||||
action: replace
|
||||
regex: ([^:]+)(?::\d+)?;(\d+)
|
||||
replacement: $1:$2
|
||||
target_label: __address__
|
||||
- action: labelmap
|
||||
regex: __meta_kubernetes_pod_label_(.+)
|
||||
- source_labels: [__meta_kubernetes_namespace]
|
||||
action: replace
|
||||
target_label: kubernetes_namespace
|
||||
- source_labels: [__meta_kubernetes_pod_name]
|
||||
action: replace
|
||||
target_label: kubernetes_pod_name
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: prometheus
|
||||
namespace: monitoring
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: prometheus
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources:
|
||||
- nodes
|
||||
- nodes/proxy
|
||||
- services
|
||||
- endpoints
|
||||
- pods
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: [""]
|
||||
resources:
|
||||
- configmaps
|
||||
verbs: ["get"]
|
||||
- nonResourceURLs: ["/metrics"]
|
||||
verbs: ["get"]
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: prometheus
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: prometheus
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: prometheus
|
||||
namespace: monitoring
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: prometheus
|
||||
namespace: monitoring
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: prometheus
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: prometheus
|
||||
spec:
|
||||
serviceAccountName: prometheus
|
||||
containers:
|
||||
- name: prometheus
|
||||
image: quay.io/prometheus/prometheus:v2.47.0
|
||||
args:
|
||||
- --config.file=/etc/prometheus/prometheus.yml
|
||||
- --storage.tsdb.path=/prometheus
|
||||
- --web.console.libraries=/usr/share/prometheus/console_libraries
|
||||
- --web.console.templates=/usr/share/prometheus/consoles
|
||||
- --web.enable-lifecycle
|
||||
ports:
|
||||
- containerPort: 9090
|
||||
volumeMounts:
|
||||
- name: config
|
||||
mountPath: /etc/prometheus
|
||||
- name: data
|
||||
mountPath: /prometheus
|
||||
resources:
|
||||
limits:
|
||||
cpu: 1000m
|
||||
memory: 1Gi
|
||||
requests:
|
||||
cpu: 200m
|
||||
memory: 512Mi
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /-/ready
|
||||
port: 9090
|
||||
initialDelaySeconds: 5
|
||||
periodSeconds: 5
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /-/healthy
|
||||
port: 9090
|
||||
initialDelaySeconds: 10
|
||||
periodSeconds: 10
|
||||
volumes:
|
||||
- name: config
|
||||
configMap:
|
||||
name: prometheus-config
|
||||
- name: data
|
||||
emptyDir: {}
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: prometheus
|
||||
namespace: monitoring
|
||||
spec:
|
||||
selector:
|
||||
app: prometheus
|
||||
ports:
|
||||
- port: 9090
|
||||
targetPort: 9090
|
||||
type: NodePort
|
||||
Reference in New Issue
Block a user