mirror of
https://github.com/hauler-dev/hauler.git
synced 2026-03-17 00:50:17 +00:00
Compare commits
308 Commits
v0.4.0-rc.
...
cherrypick
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
e59beb08d0 | ||
|
|
47479b1fa2 | ||
|
|
bbde34690f | ||
|
|
0dd1896191 | ||
|
|
cc6123918f | ||
|
|
995e538412 | ||
|
|
e2a59508af | ||
|
|
26b11d5abc | ||
|
|
a4b16c723d | ||
|
|
666d220d6c | ||
|
|
4ed7504264 | ||
|
|
e255eda007 | ||
|
|
16f47999b1 | ||
|
|
4c68654424 | ||
|
|
8ecd87d944 | ||
|
|
a355898171 | ||
|
|
3440b1a641 | ||
|
|
9081ac257b | ||
|
|
a01895bfff | ||
|
|
e8a5f82b7d | ||
|
|
dffcb8254c | ||
|
|
4a2b7b13a7 | ||
|
|
cf22fa8551 | ||
|
|
28432fc057 | ||
|
|
ac7d82b55f | ||
|
|
ded947d609 | ||
|
|
ff3cece87f | ||
|
|
c54065f316 | ||
|
|
382dea42a5 | ||
|
|
3c073688f3 | ||
|
|
96bab7b81f | ||
|
|
5ea9b29b8f | ||
|
|
15867e84ad | ||
|
|
c5da018450 | ||
|
|
5edc8802ee | ||
|
|
a3d62b204f | ||
|
|
d85a1b0775 | ||
|
|
ea10bc0256 | ||
|
|
1aea670588 | ||
|
|
f1a632a207 | ||
|
|
802e062f47 | ||
|
|
d227e1f18f | ||
|
|
33a9bb3f78 | ||
|
|
344c008607 | ||
|
|
09a149dab6 | ||
|
|
f7f1e2db8f | ||
|
|
0fafca87f9 | ||
|
|
38e676e934 | ||
|
|
369c85bab9 | ||
|
|
acbd1f1b6a | ||
|
|
3e44c53b75 | ||
|
|
062bb3ff2c | ||
|
|
c8b4e80371 | ||
|
|
d86957bf20 | ||
|
|
4a6fc8cec2 | ||
|
|
e089c31879 | ||
|
|
b7b599e6ed | ||
|
|
ea53002f3a | ||
|
|
4d0f779ae6 | ||
|
|
4d0b407452 | ||
|
|
3b96a95a94 | ||
|
|
f9a188259f | ||
|
|
5021f3ab6b | ||
|
|
db065a1088 | ||
|
|
01bf58de03 | ||
|
|
38b979d0c5 | ||
|
|
7de20a1f15 | ||
|
|
088fde5aa9 | ||
|
|
eb275b9690 | ||
|
|
7d28df1949 | ||
|
|
08f566fb28 | ||
|
|
c465d2c143 | ||
|
|
39325585eb | ||
|
|
535a82c1b5 | ||
|
|
53cf953750 | ||
|
|
ff144b1180 | ||
|
|
938914ba5c | ||
|
|
603249dea9 | ||
|
|
37032f5379 | ||
|
|
ec9ac48476 | ||
|
|
5f5cd64c2f | ||
|
|
882713b725 | ||
|
|
a20d7bf950 | ||
|
|
e97adcdfed | ||
|
|
cc17b030a9 | ||
|
|
090f4dc905 | ||
|
|
74aa40c69b | ||
|
|
f5e3b38a6d | ||
|
|
01faf396bb | ||
|
|
235218cfff | ||
|
|
4270a27819 | ||
|
|
1b77295438 | ||
|
|
38c7d1b17a | ||
|
|
2fa6c36208 | ||
|
|
dd50ed9dba | ||
|
|
fb100a27ac | ||
|
|
3406d5453d | ||
|
|
991f5b6bc1 | ||
|
|
0595ab043a | ||
|
|
73e5c1ec8b | ||
|
|
25d8cb83b2 | ||
|
|
9f7229a36b | ||
|
|
b294b6f026 | ||
|
|
ebd3fd66c8 | ||
|
|
6373a476b5 | ||
|
|
2c7aacd105 | ||
|
|
bbcbe0239a | ||
|
|
8a53a26a58 | ||
|
|
41d88954c6 | ||
|
|
caaed30297 | ||
|
|
aee296d48d | ||
|
|
407ed94a0b | ||
|
|
15a9e1a3c4 | ||
|
|
6510947bb9 | ||
|
|
01eebd54af | ||
|
|
5aa55e9eda | ||
|
|
6f8cd04a32 | ||
|
|
02231d716f | ||
|
|
16fa03fec8 | ||
|
|
51fe531c64 | ||
|
|
1a6ce4290f | ||
|
|
e4ec7bed76 | ||
|
|
cb81823487 | ||
|
|
2d930b5653 | ||
|
|
bd0cd8f428 | ||
|
|
d6b3c94920 | ||
|
|
20958826ef | ||
|
|
d633eeffcc | ||
|
|
c592551a37 | ||
|
|
ef3eb05fce | ||
|
|
3f64914097 | ||
|
|
6a74668e2c | ||
|
|
0c5cf20e87 | ||
|
|
513719bc9e | ||
|
|
047b7a7003 | ||
|
|
a4685169c6 | ||
|
|
47549615c4 | ||
|
|
2d725026dc | ||
|
|
60667b7116 | ||
|
|
7d62a1c98e | ||
|
|
894ffb1533 | ||
|
|
78b3442d23 | ||
|
|
cd46febb6b | ||
|
|
0957a930dd | ||
|
|
a6bc6308d9 | ||
|
|
1304cf6c76 | ||
|
|
f2e02c80c0 | ||
|
|
25806e993e | ||
|
|
05e67bc750 | ||
|
|
b43ed0503a | ||
|
|
27e2fc9de0 | ||
|
|
d32d75b93e | ||
|
|
ceb77601d0 | ||
|
|
d90545a9e4 | ||
|
|
bef141ab67 | ||
|
|
385d767c2a | ||
|
|
22edc77506 | ||
|
|
9058797bbc | ||
|
|
35e2f655da | ||
|
|
f5c0f6f0ae | ||
|
|
0ec77b4168 | ||
|
|
7a7906b8ea | ||
|
|
f4774445f6 | ||
|
|
d59b29bfce | ||
|
|
fd702202ac | ||
|
|
9e9565717b | ||
|
|
bfe47ae141 | ||
|
|
ebab7f38a0 | ||
|
|
f0cba3c2c6 | ||
|
|
286120da50 | ||
|
|
dcdeb93518 | ||
|
|
f7c24f6129 | ||
|
|
fe88d7033c | ||
|
|
ef31984c97 | ||
|
|
2889f30275 | ||
|
|
0674e0ab30 | ||
|
|
d645c52135 | ||
|
|
44baab3213 | ||
|
|
1a317b0172 | ||
|
|
128cb3b252 | ||
|
|
91ff998634 | ||
|
|
8ac1ecaf29 | ||
|
|
7447aad20a | ||
|
|
003456d8ab | ||
|
|
f44b8b93af | ||
|
|
e405840642 | ||
|
|
8c9aa909b0 | ||
|
|
8670489520 | ||
|
|
f20d4052a4 | ||
|
|
c84bca43d2 | ||
|
|
6863d91f69 | ||
|
|
16eea6ac2a | ||
|
|
f6f227567c | ||
|
|
eb810c16f5 | ||
|
|
b18f55ea60 | ||
|
|
4bbe622073 | ||
|
|
ea5bcb36ae | ||
|
|
5c7daddfef | ||
|
|
7083f3a4f3 | ||
|
|
8541d73a0d | ||
|
|
49d705d14c | ||
|
|
722851d809 | ||
|
|
82aedc867a | ||
|
|
e8fb37c6ed | ||
|
|
545b3f8acd | ||
|
|
3ae92fe20a | ||
|
|
35538bf45a | ||
|
|
b6701bbfbc | ||
|
|
14738c3cd6 | ||
|
|
0657fd80fe | ||
|
|
d132e8b8e0 | ||
|
|
29367c152e | ||
|
|
185ae6bd74 | ||
|
|
b6c78d3925 | ||
|
|
e718d40744 | ||
|
|
1505bfb3af | ||
|
|
e27b5b3cd1 | ||
|
|
0472c8fc65 | ||
|
|
70a48f2efe | ||
|
|
bb2a8bfbec | ||
|
|
2779c649c2 | ||
|
|
8120537af2 | ||
|
|
9cdab516f0 | ||
|
|
d136d1bfd2 | ||
|
|
003560c3b3 | ||
|
|
1b9d057f7a | ||
|
|
2764e2d3ea | ||
|
|
360049fe19 | ||
|
|
79b240d17f | ||
|
|
214704bcfb | ||
|
|
ef73fff01a | ||
|
|
0c6fdc86da | ||
|
|
7fb537a31a | ||
|
|
6ca7fb6255 | ||
|
|
d70a867283 | ||
|
|
46ea8b5df9 | ||
|
|
5592ec0f88 | ||
|
|
e8254371c0 | ||
|
|
8d2a84d27c | ||
|
|
72734ecc76 | ||
|
|
4759879a5d | ||
|
|
dbcfe13fb6 | ||
|
|
cd8d4f6e46 | ||
|
|
e15c8d54fa | ||
|
|
ccd529ab48 | ||
|
|
3cf4afe6d1 | ||
|
|
0c55d00d49 | ||
|
|
6c2b97042e | ||
|
|
be22e56f27 | ||
|
|
c8ea279c0d | ||
|
|
59ff02b52b | ||
|
|
8b3398018a | ||
|
|
ae80b482e4 | ||
|
|
1ae496fb8b | ||
|
|
7919dccffc | ||
|
|
fc7a19c755 | ||
|
|
ade0feccf0 | ||
|
|
f78fdf5e3d | ||
|
|
85d6bc0233 | ||
|
|
d1499b7738 | ||
|
|
27acb239e4 | ||
|
|
e8d084847d | ||
|
|
e70379870f | ||
|
|
a05d21c052 | ||
|
|
8256aa55ce | ||
|
|
0e6c3690b1 | ||
|
|
a977cec50c | ||
|
|
5edc96d152 | ||
|
|
fbafa60da5 | ||
|
|
cc917af0f2 | ||
|
|
f76160d8be | ||
|
|
b24b25d557 | ||
|
|
d9e298b725 | ||
|
|
e14453f730 | ||
|
|
990ade9cd0 | ||
|
|
aecd37d192 | ||
|
|
02f4946ead | ||
|
|
978dc659f8 | ||
|
|
f982f51d57 | ||
|
|
2174e96f0e | ||
|
|
8cfe4432fc | ||
|
|
f129484224 | ||
|
|
4dbff83459 | ||
|
|
e229c2a1da | ||
|
|
2a93e74b62 | ||
|
|
4d5d9eda7b | ||
|
|
a7cbfcb042 | ||
|
|
7751b12e5e | ||
|
|
6e3d3fc7b8 | ||
|
|
0f7f363d6c | ||
|
|
ab975a1dc7 | ||
|
|
2d92d41245 | ||
|
|
e2176d211a | ||
|
|
93ae968580 | ||
|
|
b0a37d21af | ||
|
|
aa16575c6f | ||
|
|
2959cfc346 | ||
|
|
c04211a55e | ||
|
|
c497f53972 | ||
|
|
f1fbd7e9c2 | ||
|
|
f348fb8d4d | ||
|
|
fe60b1fd1a | ||
|
|
756c0171c3 | ||
|
|
c394965f88 | ||
|
|
43e2dc56ec | ||
|
|
795a88218f | ||
|
|
ec2ada9dcb | ||
|
|
45cea89752 |
52
.github/ISSUE_TEMPLATE/bug_report.md
vendored
52
.github/ISSUE_TEMPLATE/bug_report.md
vendored
@@ -1,33 +1,51 @@
|
||||
---
|
||||
name: Bug Report
|
||||
about: Create a report to help us improve!
|
||||
about: Submit a bug report to help us improve!
|
||||
title: '[BUG]'
|
||||
labels: 'kind/bug'
|
||||
labels: 'bug'
|
||||
assignees: ''
|
||||
---
|
||||
|
||||
<!-- Thank you for helping us to improve Hauler! We welcome all bug reports. Please fill out each area of the template so we can better help you. Comments like this will be hidden when you post but you can delete them if you wish. -->
|
||||
<!-- Thank you for helping us to improve Hauler! We welcome all bug reports. Please fill out each area of the template so we can better assist you. Comments like this will be hidden when you submit, but you can delete them if you wish. -->
|
||||
|
||||
**Environmental Info:**
|
||||
*
|
||||
|
||||
<!-- Provide the output of "uname -a" -->
|
||||
|
||||
-
|
||||
|
||||
**Hauler Version:**
|
||||
*
|
||||
|
||||
**System CPU architecture, OS, and Version:**
|
||||
* <!-- Provide the output from "uname -a" on the system where Hauler is installed -->
|
||||
<!-- Provide the output of "hauler version" -->
|
||||
|
||||
**Describe the bug:**
|
||||
* <!-- A clear and concise description of the bug. -->
|
||||
-
|
||||
|
||||
**Steps To Reproduce:**
|
||||
* <!-- A clear and concise way to reproduce the bug. -->
|
||||
**Describe the Bug:**
|
||||
|
||||
**Expected behavior:**
|
||||
* <!-- A clear and concise description of what you expected to happen, without the bug. -->
|
||||
<!-- Provide a clear and concise description of the bug -->
|
||||
|
||||
**Actual behavior:**
|
||||
* <!-- A clear and concise description of what actually happened. -->
|
||||
-
|
||||
|
||||
**Additional context / logs:**
|
||||
* <!-- Add any other context and/or logs about the problem here. -->
|
||||
**Steps to Reproduce:**
|
||||
|
||||
<!-- Provide a clear and concise way to reproduce the bug -->
|
||||
|
||||
-
|
||||
|
||||
**Expected Behavior:**
|
||||
|
||||
<!-- Provide a clear and concise description of what you expected to happen -->
|
||||
|
||||
-
|
||||
|
||||
**Actual Behavior:**
|
||||
|
||||
<!-- Provide a clear and concise description of what actually happens -->
|
||||
|
||||
-
|
||||
|
||||
**Additional Context:**
|
||||
|
||||
<!-- Provide any other context and/or logs about the bug -->
|
||||
|
||||
-
|
||||
|
||||
36
.github/ISSUE_TEMPLATE/feature_request.md
vendored
36
.github/ISSUE_TEMPLATE/feature_request.md
vendored
@@ -1,21 +1,33 @@
|
||||
---
|
||||
name: Feature Request
|
||||
about: Create a report to help us improve!
|
||||
title: '[RFE]'
|
||||
labels: 'kind/rfe'
|
||||
about: Submit a feature request for us to improve!
|
||||
title: '[feature]'
|
||||
labels: 'enhancement'
|
||||
assignees: ''
|
||||
---
|
||||
|
||||
<!-- Thanks for helping us to improve Hauler! We welcome all requests for enhancements (RFEs). Please fill out each area of the template so we can better help you. Comments like this will be hidden when you post but you can delete them if you wish. -->
|
||||
<!-- Thank you for helping us to improve Hauler! We welcome all requests for enhancements (RFEs). Please fill out each area of the template so we can better assist you. Comments like this will be hidden when you submit, but you can delete them if you wish. -->
|
||||
|
||||
**Is your feature request related to a problem? Please describe.**
|
||||
* <!-- A clear and concise description of the problem. -->
|
||||
**Is this Feature/Enhancement related to an Existing Problem? If so, please describe:**
|
||||
|
||||
**Describe the solution you'd like**
|
||||
* <!-- A clear and concise description of what you want to happen. -->
|
||||
<!-- Provide a clear and concise description of the problem -->
|
||||
|
||||
**Describe alternatives you've considered**
|
||||
* <!-- A clear and concise description of any alternative solutions or features you've considered. -->
|
||||
-
|
||||
|
||||
**Additional context**
|
||||
* <!-- Add any other context or screenshots about the feature request here. -->
|
||||
**Describe Proposed Solution(s):**
|
||||
|
||||
<!-- Provide a clear and concise description of what you want to happen -->
|
||||
|
||||
-
|
||||
|
||||
**Describe Possible Alternatives:**
|
||||
|
||||
<!-- Provide a clear and concise description of any alternative solutions or features you've considered -->
|
||||
|
||||
-
|
||||
|
||||
**Additional Context:**
|
||||
|
||||
<!-- Provide a clear and concise description of the problem -->
|
||||
|
||||
-
|
||||
|
||||
42
.github/PULL_REQUEST_TEMPLATE.md
vendored
42
.github/PULL_REQUEST_TEMPLATE.md
vendored
@@ -1,20 +1,36 @@
|
||||
**Please check below, if the PR fulfills these requirements:**
|
||||
- [ ] The commit message follows the guidelines.
|
||||
- [ ] Tests for the changes have been added (for bug fixes / features).
|
||||
- [ ] Docs have been added / updated (for bug fixes / features).
|
||||
- [ ] Commit(s) and code follow the repositories guidelines.
|
||||
- [ ] Test(s) have been added or updated to support these change(s).
|
||||
- [ ] Doc(s) have been added or updated to support these change(s).
|
||||
|
||||
<!-- Comments like this will be hidden when you submit, but you can delete them if you wish. -->
|
||||
|
||||
**What kind of change does this PR introduce?**
|
||||
* <!-- Bug fix, feature, docs update, ... -->
|
||||
**Associated Links:**
|
||||
|
||||
**What is the current behavior?**
|
||||
* <!-- You can also link to an open issue here -->
|
||||
<!-- Provide any associated or linked related to these change(s) -->
|
||||
|
||||
**What is the new behavior (if this is a feature change)?**
|
||||
* <!-- What changes did this PR introduce? -->
|
||||
-
|
||||
|
||||
**Does this PR introduce a breaking change?**
|
||||
* <!-- What changes might users need to make in their application due to this PR? -->
|
||||
**Types of Changes:**
|
||||
|
||||
**Other information**:
|
||||
* <!-- Any additional information -->
|
||||
<!-- What is the type of change? Bugfix, Feature, Breaking Change, etc... -->
|
||||
|
||||
-
|
||||
|
||||
**Proposed Changes:**
|
||||
|
||||
<!-- Provide the high level and low level description of your change(s) so we can better understand these change(s) -->
|
||||
|
||||
-
|
||||
|
||||
**Verification/Testing of Changes:**
|
||||
|
||||
<!-- How can the changes be verified? Provide the steps necessary to reproduce and verify the proposed change(s) -->
|
||||
|
||||
-
|
||||
|
||||
**Additional Context:**
|
||||
|
||||
<!-- Provide any additional information, such as if this is a small or large or complex change. Feel free to kick off the discussion by explaining why you chose the solution you did and what alternatives you considered, etc... -->
|
||||
|
||||
-
|
||||
|
||||
298
.github/workflows/cherrypick.yml
vendored
Normal file
298
.github/workflows/cherrypick.yml
vendored
Normal file
@@ -0,0 +1,298 @@
|
||||
name: Cherry-pick to release branch
|
||||
|
||||
on:
|
||||
issue_comment:
|
||||
types: [created]
|
||||
pull_request:
|
||||
types: [closed]
|
||||
|
||||
permissions:
|
||||
contents: write
|
||||
pull-requests: write
|
||||
issues: write
|
||||
|
||||
jobs:
|
||||
# ──────────────────────────────────────────────────────────────
|
||||
# Trigger 1: /cherrypick-X.Y comment on a PR
|
||||
# - If already merged → run cherry-pick immediately
|
||||
# - If not yet merged → add label, cherry-pick will run on merge
|
||||
# ──────────────────────────────────────────────────────────────
|
||||
handle-comment:
|
||||
if: >
|
||||
github.event_name == 'issue_comment' &&
|
||||
github.event.issue.pull_request &&
|
||||
startsWith(github.event.comment.body, '/cherrypick-')
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Check commenter permissions
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
COMMENTER: ${{ github.event.comment.user.login }}
|
||||
run: |
|
||||
PERMISSION=$(gh api repos/${{ github.repository }}/collaborators/${COMMENTER}/permission \
|
||||
--jq '.permission')
|
||||
echo "Permission level for $COMMENTER: $PERMISSION"
|
||||
if [[ "$PERMISSION" != "admin" && "$PERMISSION" != "maintain" && "$PERMISSION" != "write" ]]; then
|
||||
echo "::warning::User $COMMENTER does not have write access, ignoring cherry-pick request"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
- name: Parse version from comment
|
||||
id: parse
|
||||
env:
|
||||
COMMENT_BODY: ${{ github.event.comment.body }}
|
||||
run: |
|
||||
VERSION=$(echo "$COMMENT_BODY" | head -1 | grep -oP '(?<=/cherrypick-)\d+\.\d+')
|
||||
if [ -z "$VERSION" ]; then
|
||||
echo "::error::Could not parse version from comment"
|
||||
exit 1
|
||||
fi
|
||||
echo "version=$VERSION" >> "$GITHUB_OUTPUT"
|
||||
echo "target_branch=release/$VERSION" >> "$GITHUB_OUTPUT"
|
||||
echo "label=cherrypick/$VERSION" >> "$GITHUB_OUTPUT"
|
||||
|
||||
- name: React to comment
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
run: |
|
||||
gh api repos/${{ github.repository }}/issues/comments/${{ github.event.comment.id }}/reactions \
|
||||
-f content='+1'
|
||||
|
||||
- name: Check if PR is merged
|
||||
id: check
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
run: |
|
||||
PR_JSON=$(gh api repos/${{ github.repository }}/pulls/${{ github.event.issue.number }})
|
||||
MERGED=$(echo "$PR_JSON" | jq -r '.merged')
|
||||
echo "merged=$MERGED" >> "$GITHUB_OUTPUT"
|
||||
echo "pr_title=$(echo "$PR_JSON" | jq -r '.title')" >> "$GITHUB_OUTPUT"
|
||||
echo "base_sha=$(echo "$PR_JSON" | jq -r '.base.sha')" >> "$GITHUB_OUTPUT"
|
||||
echo "head_sha=$(echo "$PR_JSON" | jq -r '.head.sha')" >> "$GITHUB_OUTPUT"
|
||||
|
||||
- name: Add cherry-pick label
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
LABEL: ${{ steps.parse.outputs.label }}
|
||||
PR_NUMBER: ${{ github.event.issue.number }}
|
||||
run: |
|
||||
gh api repos/${{ github.repository }}/labels \
|
||||
-f name="$LABEL" -f color="fbca04" -f description="Queued for cherry-pick" 2>/dev/null || true
|
||||
gh api repos/${{ github.repository }}/issues/${PR_NUMBER}/labels \
|
||||
-f "labels[]=$LABEL"
|
||||
|
||||
- name: Notify if queued (not yet merged)
|
||||
if: steps.check.outputs.merged != 'true'
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
LABEL: ${{ steps.parse.outputs.label }}
|
||||
TARGET_BRANCH: ${{ steps.parse.outputs.target_branch }}
|
||||
PR_NUMBER: ${{ github.event.issue.number }}
|
||||
run: |
|
||||
gh api repos/${{ github.repository }}/issues/${PR_NUMBER}/comments \
|
||||
-f body="🏷️ Labeled \`$LABEL\` — backport to \`$TARGET_BRANCH\` will be created automatically when this PR is merged."
|
||||
|
||||
- name: Checkout repository
|
||||
if: steps.check.outputs.merged == 'true'
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Verify target branch exists
|
||||
if: steps.check.outputs.merged == 'true'
|
||||
env:
|
||||
TARGET_BRANCH: ${{ steps.parse.outputs.target_branch }}
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
PR_NUMBER: ${{ github.event.issue.number }}
|
||||
run: |
|
||||
if ! git ls-remote --exit-code --heads origin "$TARGET_BRANCH" > /dev/null 2>&1; then
|
||||
gh api repos/${{ github.repository }}/issues/${PR_NUMBER}/comments \
|
||||
-f body="❌ Cannot cherry-pick: branch \`$TARGET_BRANCH\` does not exist."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
- name: Apply PR diff and push
|
||||
if: steps.check.outputs.merged == 'true'
|
||||
id: apply
|
||||
env:
|
||||
TARGET_BRANCH: ${{ steps.parse.outputs.target_branch }}
|
||||
PR_NUMBER: ${{ github.event.issue.number }}
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
run: |
|
||||
BACKPORT_BRANCH="backport/${PR_NUMBER}-to-${TARGET_BRANCH//\//-}"
|
||||
echo "backport_branch=$BACKPORT_BRANCH" >> "$GITHUB_OUTPUT"
|
||||
|
||||
git config user.name "github-actions[bot]"
|
||||
git config user.email "github-actions[bot]@users.noreply.github.com"
|
||||
|
||||
git checkout "$TARGET_BRANCH"
|
||||
git checkout -b "$BACKPORT_BRANCH"
|
||||
|
||||
# Download the PR's patch from GitHub (pure diff of the PR's changes)
|
||||
gh api repos/${{ github.repository }}/pulls/${PR_NUMBER} \
|
||||
-H "Accept: application/vnd.github.v3.patch" > /tmp/pr.patch
|
||||
|
||||
# Apply the patch
|
||||
HAS_CONFLICTS="false"
|
||||
CONFLICTED_FILES=""
|
||||
|
||||
if git apply --check /tmp/pr.patch 2>/dev/null; then
|
||||
# Clean apply
|
||||
git apply /tmp/pr.patch
|
||||
git add -A
|
||||
git commit -m "Backport PR #${PR_NUMBER} to ${TARGET_BRANCH}"
|
||||
elif git apply --3way /tmp/pr.patch; then
|
||||
# Applied with 3-way merge (auto-resolved)
|
||||
git add -A
|
||||
git commit -m "Backport PR #${PR_NUMBER} to ${TARGET_BRANCH}" || true
|
||||
else
|
||||
# Has real conflicts — apply what we can
|
||||
HAS_CONFLICTS="true"
|
||||
CONFLICTED_FILES=$(git diff --name-only --diff-filter=U | tr '\n' ',' | sed 's/,$//')
|
||||
# Take the incoming version for conflicted files
|
||||
git diff --name-only --diff-filter=U | while read -r file; do
|
||||
git checkout --theirs -- "$file"
|
||||
done
|
||||
git add -A
|
||||
git commit -m "Backport PR #${PR_NUMBER} to ${TARGET_BRANCH} (conflicts)" || true
|
||||
fi
|
||||
|
||||
echo "has_conflicts=$HAS_CONFLICTS" >> "$GITHUB_OUTPUT"
|
||||
echo "conflicted_files=$CONFLICTED_FILES" >> "$GITHUB_OUTPUT"
|
||||
|
||||
git push origin "$BACKPORT_BRANCH"
|
||||
|
||||
- name: Create backport PR
|
||||
if: steps.check.outputs.merged == 'true'
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
TARGET_BRANCH: ${{ steps.parse.outputs.target_branch }}
|
||||
VERSION: ${{ steps.parse.outputs.version }}
|
||||
PR_TITLE: ${{ steps.check.outputs.pr_title }}
|
||||
PR_NUMBER: ${{ github.event.issue.number }}
|
||||
BACKPORT_BRANCH: ${{ steps.apply.outputs.backport_branch }}
|
||||
run: |
|
||||
TITLE="[${VERSION}] ${PR_TITLE}"
|
||||
BODY="Backport of #${PR_NUMBER} to \`${TARGET_BRANCH}\`."
|
||||
|
||||
PR_URL=$(gh pr create \
|
||||
--base "$TARGET_BRANCH" \
|
||||
--head "$BACKPORT_BRANCH" \
|
||||
--title "$TITLE" \
|
||||
--body "$BODY")
|
||||
|
||||
gh api repos/${{ github.repository }}/issues/${PR_NUMBER}/comments \
|
||||
-f body="✅ Backport PR created: ${PR_URL}"
|
||||
|
||||
# ──────────────────────────────────────────────────────────────
|
||||
# Trigger 2: PR merged → process any queued cherrypick/* labels
|
||||
# ──────────────────────────────────────────────────────────────
|
||||
handle-merge:
|
||||
if: >
|
||||
github.event_name == 'pull_request' &&
|
||||
github.event.pull_request.merged == true
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Collect cherry-pick labels
|
||||
id: labels
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
PR_NUMBER: ${{ github.event.pull_request.number }}
|
||||
run: |
|
||||
LABELS=$(gh api repos/${{ github.repository }}/issues/${PR_NUMBER}/labels \
|
||||
--jq '[.[] | select(.name | startswith("cherrypick/")) | .name] | join(",")')
|
||||
|
||||
if [ -z "$LABELS" ]; then
|
||||
echo "No cherrypick labels found, nothing to do."
|
||||
echo "has_labels=false" >> "$GITHUB_OUTPUT"
|
||||
else
|
||||
echo "Found labels: $LABELS"
|
||||
echo "has_labels=true" >> "$GITHUB_OUTPUT"
|
||||
echo "labels=$LABELS" >> "$GITHUB_OUTPUT"
|
||||
fi
|
||||
|
||||
- name: Checkout repository
|
||||
if: steps.labels.outputs.has_labels == 'true'
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Download PR patch
|
||||
if: steps.labels.outputs.has_labels == 'true'
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
PR_NUMBER: ${{ github.event.pull_request.number }}
|
||||
run: |
|
||||
gh api repos/${{ github.repository }}/pulls/${PR_NUMBER} \
|
||||
-H "Accept: application/vnd.github.v3.patch" > /tmp/pr.patch
|
||||
|
||||
- name: Process each cherry-pick label
|
||||
if: steps.labels.outputs.has_labels == 'true'
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
LABELS: ${{ steps.labels.outputs.labels }}
|
||||
PR_NUMBER: ${{ github.event.pull_request.number }}
|
||||
PR_TITLE: ${{ github.event.pull_request.title }}
|
||||
run: |
|
||||
git config user.name "github-actions[bot]"
|
||||
git config user.email "github-actions[bot]@users.noreply.github.com"
|
||||
|
||||
IFS=',' read -ra LABEL_ARRAY <<< "$LABELS"
|
||||
for LABEL in "${LABEL_ARRAY[@]}"; do
|
||||
VERSION="${LABEL#cherrypick/}"
|
||||
TARGET_BRANCH="release/$VERSION"
|
||||
|
||||
echo "=== Processing backport to $TARGET_BRANCH ==="
|
||||
|
||||
# Verify target branch exists
|
||||
if ! git ls-remote --exit-code --heads origin "$TARGET_BRANCH" > /dev/null 2>&1; then
|
||||
gh api repos/${{ github.repository }}/issues/${PR_NUMBER}/comments \
|
||||
-f body="❌ Cannot cherry-pick to \`$TARGET_BRANCH\`: branch does not exist."
|
||||
continue
|
||||
fi
|
||||
|
||||
BACKPORT_BRANCH="backport/${PR_NUMBER}-to-${TARGET_BRANCH//\//-}"
|
||||
|
||||
git checkout "$TARGET_BRANCH"
|
||||
git checkout -b "$BACKPORT_BRANCH"
|
||||
|
||||
# Apply the patch
|
||||
HAS_CONFLICTS="false"
|
||||
CONFLICTED_FILES=""
|
||||
|
||||
if git apply --check /tmp/pr.patch 2>/dev/null; then
|
||||
git apply /tmp/pr.patch
|
||||
git add -A
|
||||
git commit -m "Backport PR #${PR_NUMBER} to ${TARGET_BRANCH}"
|
||||
elif git apply --3way /tmp/pr.patch; then
|
||||
git add -A
|
||||
git commit -m "Backport PR #${PR_NUMBER} to ${TARGET_BRANCH}" || true
|
||||
else
|
||||
HAS_CONFLICTS="true"
|
||||
CONFLICTED_FILES=$(git diff --name-only --diff-filter=U | tr '\n' ',' | sed 's/,$//')
|
||||
git diff --name-only --diff-filter=U | while read -r file; do
|
||||
git checkout --theirs -- "$file"
|
||||
done
|
||||
git add -A
|
||||
git commit -m "Backport PR #${PR_NUMBER} to ${TARGET_BRANCH} (conflicts)" || true
|
||||
fi
|
||||
|
||||
git push origin "$BACKPORT_BRANCH"
|
||||
|
||||
# Build PR title and body
|
||||
TITLE="[${VERSION}] ${PR_TITLE}"
|
||||
BODY="Backport of #${PR_NUMBER} to \`${TARGET_BRANCH}\`."
|
||||
|
||||
PR_URL=$(gh pr create \
|
||||
--base "$TARGET_BRANCH" \
|
||||
--head "$BACKPORT_BRANCH" \
|
||||
--title "$TITLE" \
|
||||
--body "$BODY")
|
||||
|
||||
gh api repos/${{ github.repository }}/issues/${PR_NUMBER}/comments \
|
||||
-f body="✅ Backport PR to \`$TARGET_BRANCH\` created: ${PR_URL}"
|
||||
|
||||
# Clean up for next iteration
|
||||
git checkout "$TARGET_BRANCH"
|
||||
git branch -D "$BACKPORT_BRANCH"
|
||||
done
|
||||
51
.github/workflows/pages.yaml
vendored
Normal file
51
.github/workflows/pages.yaml
vendored
Normal file
@@ -0,0 +1,51 @@
|
||||
name: Pages Workflow
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
pages: write
|
||||
id-token: write
|
||||
|
||||
concurrency:
|
||||
group: "pages"
|
||||
cancel-in-progress: false
|
||||
|
||||
jobs:
|
||||
deploy-pages:
|
||||
name: Deploy GitHub Pages
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
environment:
|
||||
name: github-pages
|
||||
url: ${{ steps.deployment.outputs.page_url }}
|
||||
|
||||
steps:
|
||||
- name: Clean Up Actions Tools Cache
|
||||
run: rm -rf /opt/hostedtoolcache
|
||||
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v6
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Configure Git
|
||||
run: |
|
||||
git config user.name "github-actions[bot]"
|
||||
git config user.email "github-actions[bot]@users.noreply.github.com"
|
||||
|
||||
- name: Setup Pages
|
||||
uses: actions/configure-pages@v5
|
||||
|
||||
- name: Upload Pages Artifacts
|
||||
uses: actions/upload-pages-artifact@v4
|
||||
with:
|
||||
path: './static'
|
||||
|
||||
- name: Deploy to GitHub Pages
|
||||
id: deployment
|
||||
uses: actions/deploy-pages@v4
|
||||
54
.github/workflows/release.yaml
vendored
54
.github/workflows/release.yaml
vendored
@@ -1,30 +1,64 @@
|
||||
name: CI
|
||||
name: Release Workflow
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
push:
|
||||
tags:
|
||||
tags:
|
||||
- '*'
|
||||
|
||||
jobs:
|
||||
goreleaser:
|
||||
name: GoReleaser Job
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
timeout-minutes: 60
|
||||
|
||||
steps:
|
||||
- name: Clean Up Actions Tools Cache
|
||||
run: rm -rf /opt/hostedtoolcache
|
||||
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v6
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v2
|
||||
|
||||
- name: Configure Git
|
||||
run: |
|
||||
git config user.name "github-actions[bot]"
|
||||
git config user.email "github-actions[bot]@users.noreply.github.com"
|
||||
|
||||
- name: Set Up Go
|
||||
uses: actions/setup-go@v6
|
||||
with:
|
||||
go-version: 1.21.x
|
||||
go-version-file: go.mod
|
||||
check-latest: true
|
||||
|
||||
- name: Set Up QEMU
|
||||
uses: docker/setup-qemu-action@v3
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Authenticate to GitHub Container Registry
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.repository_owner }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Authenticate to DockerHub Container Registry
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: docker.io
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
- name: Run GoReleaser
|
||||
uses: goreleaser/goreleaser-action@v2
|
||||
uses: goreleaser/goreleaser-action@v6
|
||||
with:
|
||||
distribution: goreleaser
|
||||
version: latest
|
||||
args: release --rm-dist
|
||||
version: "~> v2"
|
||||
args: "release --clean --timeout 60m"
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
HOMEBREW_TAP_GITHUB_TOKEN: ${{ secrets.HOMEBREW_TAP_GITHUB_TOKEN }}
|
||||
DOCKER_CLI_EXPERIMENTAL: "enabled"
|
||||
|
||||
41
.github/workflows/testdata.yaml
vendored
Normal file
41
.github/workflows/testdata.yaml
vendored
Normal file
@@ -0,0 +1,41 @@
|
||||
name: Refresh Hauler Testdata
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
refresh-testdata:
|
||||
name: Refresh Hauler Testdata
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
|
||||
steps:
|
||||
- name: Clean Up Actions Tools Cache
|
||||
run: rm -rf /opt/hostedtoolcache
|
||||
|
||||
- name: Checkout Repository
|
||||
uses: actions/checkout@v6
|
||||
|
||||
- name: Fetch Hauler Binary
|
||||
run: curl -sfL https://get.hauler.dev | bash
|
||||
|
||||
- name: Login to GitHub Container Registry and Docker Hub Container Registry
|
||||
run: |
|
||||
hauler login ghcr.io --username ${{ github.repository_owner }} --password ${{ secrets.GITHUB_TOKEN }}
|
||||
hauler login docker.io --username ${{ secrets.DOCKERHUB_USERNAME }} --password ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
- name: Process Images for Tests
|
||||
run: |
|
||||
hauler store add image nginx:1.25-alpine
|
||||
hauler store add image nginx:1.26-alpine
|
||||
hauler store add image busybox
|
||||
hauler store add image busybox:stable
|
||||
hauler store add image gcr.io/distroless/base
|
||||
hauler store add image gcr.io/distroless/base@sha256:7fa7445dfbebae4f4b7ab0e6ef99276e96075ae42584af6286ba080750d6dfe5
|
||||
|
||||
- name: Push Store Contents to Hauler-Dev GitHub Container Registry
|
||||
run: |
|
||||
hauler store copy registry://ghcr.io/${{ github.repository_owner }}
|
||||
|
||||
- name: Verify Hauler Store Contents
|
||||
run: hauler store info
|
||||
484
.github/workflows/tests.yaml
vendored
Normal file
484
.github/workflows/tests.yaml
vendored
Normal file
@@ -0,0 +1,484 @@
|
||||
name: Tests Workflow
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
pull_request:
|
||||
branches:
|
||||
- main
|
||||
|
||||
jobs:
|
||||
unit-tests:
|
||||
name: Unit Tests
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
|
||||
steps:
|
||||
- name: Clean Up Actions Tools Cache
|
||||
run: rm -rf /opt/hostedtoolcache
|
||||
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v6
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Configure Git
|
||||
run: |
|
||||
git config user.name "github-actions[bot]"
|
||||
git config user.email "github-actions[bot]@users.noreply.github.com"
|
||||
|
||||
- name: Set Up Go
|
||||
uses: actions/setup-go@v6
|
||||
with:
|
||||
go-version-file: go.mod
|
||||
check-latest: true
|
||||
|
||||
- name: Install Go Releaser
|
||||
uses: goreleaser/goreleaser-action@v6
|
||||
with:
|
||||
install-only: true
|
||||
|
||||
- name: Install Dependencies
|
||||
run: |
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y make
|
||||
sudo apt-get install -y build-essential
|
||||
|
||||
- name: Run Makefile Targets
|
||||
run: |
|
||||
make build-all
|
||||
|
||||
- name: Upload Hauler Binaries
|
||||
uses: actions/upload-artifact@v6
|
||||
with:
|
||||
name: hauler-binaries
|
||||
path: dist/*
|
||||
|
||||
- name: Upload Coverage Report
|
||||
uses: actions/upload-artifact@v6
|
||||
with:
|
||||
name: coverage-report
|
||||
path: coverage.out
|
||||
|
||||
integration-tests:
|
||||
name: Integration Tests
|
||||
runs-on: ubuntu-latest
|
||||
needs: [unit-tests]
|
||||
timeout-minutes: 30
|
||||
|
||||
steps:
|
||||
- name: Clean Up Actions Tools Cache
|
||||
run: rm -rf /opt/hostedtoolcache
|
||||
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v6
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Configure Git
|
||||
run: |
|
||||
git config user.name "github-actions[bot]"
|
||||
git config user.email "github-actions[bot]@users.noreply.github.com"
|
||||
|
||||
- name: Install Dependencies
|
||||
run: |
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y unzip
|
||||
sudo apt-get install -y tree
|
||||
|
||||
- name: Download Artifacts
|
||||
uses: actions/download-artifact@v6
|
||||
with:
|
||||
name: hauler-binaries
|
||||
path: dist
|
||||
|
||||
- name: Prepare Hauler for Tests
|
||||
run: |
|
||||
pwd
|
||||
ls -la
|
||||
ls -la dist/
|
||||
chmod -R 755 dist/ testdata/certificate-script.sh
|
||||
sudo mv dist/hauler_linux_amd64_v1/hauler /usr/local/bin/hauler
|
||||
./testdata/certificate-script.sh && sudo chown -R $(whoami) testdata/certs/
|
||||
|
||||
- name: Verify - hauler version
|
||||
run: |
|
||||
hauler version
|
||||
|
||||
- name: Verify - hauler completion
|
||||
run: |
|
||||
hauler completion
|
||||
hauler completion bash
|
||||
hauler completion fish
|
||||
hauler completion powershell
|
||||
hauler completion zsh
|
||||
|
||||
- name: Verify - hauler help
|
||||
run: |
|
||||
hauler help
|
||||
|
||||
- name: Verify - hauler login
|
||||
run: |
|
||||
hauler login --help
|
||||
hauler login docker.io --username ${{ secrets.DOCKERHUB_USERNAME }} --password ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
echo ${{ secrets.GITHUB_TOKEN }} | hauler login ghcr.io --username ${{ github.repository_owner }} --password-stdin
|
||||
|
||||
- name: Verify - hauler store
|
||||
run: |
|
||||
hauler store --help
|
||||
|
||||
- name: Verify - hauler store add
|
||||
run: |
|
||||
hauler store add --help
|
||||
|
||||
- name: Verify - hauler store add chart
|
||||
run: |
|
||||
hauler store add chart --help
|
||||
# verify via helm repository
|
||||
hauler store add chart rancher --repo https://releases.rancher.com/server-charts/stable
|
||||
hauler store add chart rancher --repo https://releases.rancher.com/server-charts/stable --version 2.8.4
|
||||
hauler store add chart rancher --repo https://releases.rancher.com/server-charts/stable --version 2.8.3 --verify
|
||||
# verify via oci helm repository
|
||||
hauler store add chart hauler-helm --repo oci://ghcr.io/hauler-dev
|
||||
hauler store add chart hauler-helm --repo oci://ghcr.io/hauler-dev --version 1.0.6
|
||||
hauler store add chart hauler-helm --repo oci://ghcr.io/hauler-dev --version 1.0.4 --verify
|
||||
# verify via local helm repository
|
||||
curl -sfOL https://github.com/rancherfederal/rancher-cluster-templates/releases/download/rancher-cluster-templates-0.5.2/rancher-cluster-templates-0.5.2.tgz
|
||||
hauler store add chart rancher-cluster-templates-0.5.2.tgz --repo .
|
||||
curl -sfOL https://github.com/rancherfederal/rancher-cluster-templates/releases/download/rancher-cluster-templates-0.5.1/rancher-cluster-templates-0.5.1.tgz
|
||||
hauler store add chart rancher-cluster-templates-0.5.1.tgz --repo . --version 0.5.1
|
||||
curl -sfOL https://github.com/rancherfederal/rancher-cluster-templates/releases/download/rancher-cluster-templates-0.5.0/rancher-cluster-templates-0.5.0.tgz
|
||||
hauler store add chart rancher-cluster-templates-0.5.0.tgz --repo . --version 0.5.0 --verify
|
||||
# verify via the hauler store contents
|
||||
hauler store info
|
||||
|
||||
- name: Verify - hauler store add chart --rewrite
|
||||
run: |
|
||||
# add chart with rewrite flag
|
||||
hauler store add chart rancher --repo https://releases.rancher.com/server-charts/stable --version 2.8.4 --rewrite custom-path/rancher:2.8.4
|
||||
# verify new ref in store
|
||||
hauler store info | grep 'custom-path/rancher:2.8.4'
|
||||
# confrim leading slash trimmed from rewrite
|
||||
hauler store add chart rancher --repo https://releases.rancher.com/server-charts/stable --version 2.8.4 --rewrite /custom-path/rancher:2.8.4
|
||||
# verify no leading slash
|
||||
! hauler store info | grep '/custom-path/rancher:2.8.4'
|
||||
# confirm old tag used if not specified
|
||||
hauler store add chart rancher --repo https://releases.rancher.com/server-charts/stable --version 2.8.4 --rewrite /custom-path/rancher
|
||||
# confirm tag
|
||||
hauler store info | grep '2.8.4'
|
||||
|
||||
- name: Verify - hauler store add file
|
||||
run: |
|
||||
hauler store add file --help
|
||||
# verify via remote file
|
||||
hauler store add file https://get.rke2.io/install.sh
|
||||
hauler store add file https://get.rke2.io/install.sh --name rke2-install.sh
|
||||
# verify via local file
|
||||
hauler store add file testdata/hauler-manifest.yaml
|
||||
hauler store add file testdata/hauler-manifest.yaml --name hauler-manifest-local.yaml
|
||||
# verify via the hauler store contents
|
||||
hauler store info
|
||||
|
||||
- name: Verify - hauler store add image
|
||||
run: |
|
||||
hauler store add image --help
|
||||
# verify via image reference
|
||||
hauler store add image ghcr.io/hauler-dev/library/busybox
|
||||
# verify via image reference with version and platform
|
||||
hauler store add image ghcr.io/hauler-dev/library/busybox:stable --platform linux/amd64
|
||||
# verify via image reference with full reference
|
||||
hauler store add image ghcr.io/hauler-dev/distroless/base@sha256:7fa7445dfbebae4f4b7ab0e6ef99276e96075ae42584af6286ba080750d6dfe5
|
||||
# verify via the hauler store contents
|
||||
hauler store info
|
||||
|
||||
- name: Verify - hauler store add image --rewrite
|
||||
run: |
|
||||
# add image with rewrite flag
|
||||
hauler store add image ghcr.io/hauler-dev/library/busybox --rewrite custom-registry.io/custom-path/busybox:latest
|
||||
# verify new ref in store
|
||||
hauler store info | grep 'custom-registry.io/custom-path/busybox:latest'
|
||||
# confrim leading slash trimmed from rewrite
|
||||
hauler store add image ghcr.io/hauler-dev/library/busybox --rewrite /custom-path/busybox:latest
|
||||
# verify no leading slash
|
||||
! hauler store info | grep '/custom-path/busybox:latest'
|
||||
# confirm old tag used if not specified
|
||||
hauler store add image ghcr.io/hauler-dev/library/busybox:stable --rewrite /custom-path/busybox
|
||||
# confirm tag
|
||||
hauler store info | grep ':stable'
|
||||
# confirm old registry used if not specified
|
||||
hauler store add image ghcr.io/hauler-dev/library/nginx:1.25-alpine --rewrite custom-path/nginx
|
||||
# verify existing registry associated with rewritten image in store
|
||||
hauler store info | grep 'ghcr.io/custom-path/nginx'
|
||||
|
||||
- name: Verify - hauler store copy
|
||||
run: |
|
||||
hauler store copy --help
|
||||
# need more tests here
|
||||
|
||||
- name: Verify - hauler store extract
|
||||
run: |
|
||||
hauler store extract --help
|
||||
# verify via extracting hauler store content
|
||||
hauler store extract hauler/hauler-manifest-local.yaml:latest
|
||||
# view extracted content from store
|
||||
cat hauler-manifest-local.yaml
|
||||
|
||||
- name: Verify - hauler store info
|
||||
run: |
|
||||
hauler store info --help
|
||||
# verify via table output
|
||||
hauler store info --output table
|
||||
# verify via json output
|
||||
hauler store info --output json
|
||||
# verify via filtered output (chart)
|
||||
hauler store info --type chart
|
||||
# verify via filtered output (file)
|
||||
hauler store info --type file
|
||||
# verify via filtered output (image)
|
||||
hauler store info --type image
|
||||
# verify store directory structure
|
||||
tree -hC store
|
||||
|
||||
- name: Verify - hauler store save
|
||||
run: |
|
||||
hauler store save --help
|
||||
# verify via save
|
||||
hauler store save
|
||||
# verify via save with filename
|
||||
hauler store save --filename store.tar.zst
|
||||
# verify via save with filename and platform (amd64)
|
||||
hauler store save --filename store-amd64.tar.zst --platform linux/amd64
|
||||
|
||||
- name: Remove Hauler Store Contents
|
||||
run: |
|
||||
rm -rf store
|
||||
hauler store info
|
||||
|
||||
- name: Verify - hauler store load
|
||||
run: |
|
||||
hauler store load --help
|
||||
# verify via load
|
||||
hauler store load
|
||||
# verify via load with multiple files
|
||||
hauler store load --filename haul.tar.zst --filename store.tar.zst
|
||||
# confirm store contents
|
||||
tar -xOf store.tar.zst index.json
|
||||
# verify via load with filename and temp directory
|
||||
hauler store load --filename store.tar.zst --tempdir /opt
|
||||
# verify via load with filename and platform (amd64)
|
||||
hauler store load --filename store-amd64.tar.zst
|
||||
|
||||
- name: Verify Hauler Store Contents
|
||||
run: |
|
||||
# verify store
|
||||
hauler store info
|
||||
# verify store directory structure
|
||||
tree -hC store
|
||||
|
||||
- name: Verify - docker load
|
||||
run: |
|
||||
docker load --help
|
||||
# verify via load
|
||||
docker load --input store-amd64.tar.zst
|
||||
|
||||
- name: Verify Docker Images Contents
|
||||
run: |
|
||||
docker images --help
|
||||
# verify images
|
||||
docker images --all
|
||||
|
||||
- name: Remove Hauler Store Contents
|
||||
run: |
|
||||
rm -rf store haul.tar.zst store.tar.zst store-amd64.tar.zst
|
||||
hauler store info
|
||||
|
||||
- name: Verify - hauler store sync
|
||||
run: |
|
||||
hauler store sync --help
|
||||
# verify via sync
|
||||
hauler store sync --filename testdata/hauler-manifest-pipeline.yaml
|
||||
# verify via sync with multiple files
|
||||
hauler store sync --filename testdata/hauler-manifest-pipeline.yaml --filename testdata/hauler-manifest.yaml
|
||||
# need more tests here
|
||||
|
||||
- name: Verify - hauler store serve
|
||||
run: |
|
||||
hauler store serve --help
|
||||
|
||||
- name: Verify - hauler store serve registry
|
||||
run: |
|
||||
hauler store serve registry --help
|
||||
# verify via registry
|
||||
hauler store serve registry &
|
||||
until curl -sf http://localhost:5000/v2/_catalog; do : ; done
|
||||
pkill -f "hauler store serve registry"
|
||||
# verify via registry with different port
|
||||
hauler store serve registry --port 5001 &
|
||||
until curl -sf http://localhost:5001/v2/_catalog; do : ; done
|
||||
pkill -f "hauler store serve registry --port 5001"
|
||||
# verify via registry with different port and readonly
|
||||
hauler store serve registry --port 5001 --readonly &
|
||||
until curl -sf http://localhost:5001/v2/_catalog; do : ; done
|
||||
pkill -f "hauler store serve registry --port 5001 --readonly"
|
||||
# verify via registry with different port with readonly with tls
|
||||
# hauler store serve registry --port 5001 --readonly --tls-cert testdata/certs/server-cert.crt --tls-key testdata/certs/server-cert.key &
|
||||
# until curl -sf --cacert testdata/certs/cacerts.pem https://localhost:5001/v2/_catalog; do : ; done
|
||||
# pkill -f "hauler store serve registry --port 5001 --readonly --tls-cert testdata/certs/server-cert.crt --tls-key testdata/certs/server-cert.key"
|
||||
|
||||
- name: Verify - hauler store serve fileserver
|
||||
run: |
|
||||
hauler store serve fileserver --help
|
||||
# verify via fileserver
|
||||
hauler store serve fileserver &
|
||||
until curl -sf http://localhost:8080; do : ; done
|
||||
pkill -f "hauler store serve fileserver"
|
||||
# verify via fileserver with different port
|
||||
hauler store serve fileserver --port 8000 &
|
||||
until curl -sf http://localhost:8000; do : ; done
|
||||
pkill -f "hauler store serve fileserver --port 8000"
|
||||
# verify via fileserver with different port and timeout
|
||||
hauler store serve fileserver --port 8000 --timeout 120 &
|
||||
until curl -sf http://localhost:8000; do : ; done
|
||||
pkill -f "hauler store serve fileserver --port 8000 --timeout 120"
|
||||
# verify via fileserver with different port with timeout and tls
|
||||
# hauler store serve fileserver --port 8000 --timeout 120 --tls-cert testdata/certs/server-cert.crt --tls-key testdata/certs/server-cert.key &
|
||||
# until curl -sf --cacert testdata/certs/cacerts.pem https://localhost:8000; do : ; done
|
||||
# pkill -f "hauler store serve fileserver --port 8000 --timeout 120 --tls-cert testdata/certs/server-cert.crt --tls-key testdata/certs/server-cert.key"
|
||||
|
||||
- name: Verify Hauler Store Contents
|
||||
run: |
|
||||
# verify store
|
||||
hauler store info
|
||||
# verify store directory structure
|
||||
tree -hC store
|
||||
# verify registry directory structure
|
||||
tree -hC registry
|
||||
# verify fileserver directory structure
|
||||
tree -hC fileserver
|
||||
|
||||
- name: Verify - hauler store remove (image)
|
||||
run: |
|
||||
hauler store remove --help
|
||||
# add test images
|
||||
hauler store add image ghcr.io/hauler-dev/library/nginx:1.25-alpine
|
||||
hauler store add image ghcr.io/hauler-dev/library/nginx:1.26-alpine
|
||||
# confirm artifacts
|
||||
hauler store info | grep 'nginx:1.25'
|
||||
hauler store info | grep 'nginx:1.26'
|
||||
# count blobs before delete
|
||||
BLOBS_BEFORE=$(find store/blobs/sha256 -type f | wc -l | xargs)
|
||||
echo "blobs before deletion: $BLOBS_BEFORE"
|
||||
# delete one artifact
|
||||
hauler store remove nginx:1.25 --force
|
||||
# verify artifact removed
|
||||
! hauler store info | grep -q "nginx:1.25"
|
||||
# non-deleted artifact exists
|
||||
hauler store info | grep -q "nginx:1.26"
|
||||
# count blobs after delete
|
||||
BLOBS_AFTER=$(find store/blobs/sha256 -type f | wc -l | xargs)
|
||||
echo "blobs after deletion: $BLOBS_AFTER"
|
||||
# verify only unreferenced blobs removed
|
||||
if [ "$BLOBS_AFTER" -ge "$BLOBS_BEFORE" ]; then
|
||||
echo "ERROR: No blobs were cleaned up"
|
||||
exit 1
|
||||
fi
|
||||
if [ "$BLOBS_AFTER" -eq 0 ]; then
|
||||
echo "ERROR: All blobs deleted (shared layers removed)"
|
||||
exit 1
|
||||
fi
|
||||
# verify remaining image not missing layers
|
||||
hauler store extract ghcr.io/hauler-dev/library/nginx:1.26-alpine
|
||||
|
||||
- name: Verify - hauler store remove (chart)
|
||||
run: |
|
||||
hauler store remove --help
|
||||
# add test images
|
||||
hauler store add chart rancher --repo https://releases.rancher.com/server-charts/stable --version 2.8.4
|
||||
hauler store add chart rancher --repo https://releases.rancher.com/server-charts/stable --version 2.8.5
|
||||
# confirm artifacts
|
||||
hauler store info | grep '2.8.4'
|
||||
hauler store info | grep '2.8.5'
|
||||
# count blobs before delete
|
||||
BLOBS_BEFORE=$(find store/blobs/sha256 -type f | wc -l | xargs)
|
||||
echo "blobs before deletion: $BLOBS_BEFORE"
|
||||
# delete one artifact
|
||||
hauler store remove 2.8.4 --force
|
||||
# verify artifact removed
|
||||
! hauler store info | grep -q "2.8.4"
|
||||
# non-deleted artifact exists
|
||||
hauler store info | grep -q "2.8.5"
|
||||
# count blobs after delete
|
||||
BLOBS_AFTER=$(find store/blobs/sha256 -type f | wc -l | xargs)
|
||||
echo "blobs after deletion: $BLOBS_AFTER"
|
||||
# verify only unreferenced blobs removed
|
||||
if [ "$BLOBS_AFTER" -ge "$BLOBS_BEFORE" ]; then
|
||||
echo "ERROR: No blobs were cleaned up"
|
||||
exit 1
|
||||
fi
|
||||
if [ "$BLOBS_AFTER" -eq 0 ]; then
|
||||
echo "ERROR: All blobs deleted (shared layers removed)"
|
||||
exit 1
|
||||
fi
|
||||
# verify remaining image not missing layers
|
||||
hauler store extract hauler/rancher:2.8.5
|
||||
|
||||
- name: Verify - hauler store remove (file)
|
||||
run: |
|
||||
hauler store remove --help
|
||||
# add test images
|
||||
hauler store add file https://get.hauler.dev
|
||||
hauler store add file https://get.rke2.io/install.sh
|
||||
# confirm artifacts
|
||||
hauler store info | grep 'get.hauler.dev'
|
||||
hauler store info | grep 'install.sh'
|
||||
# count blobs before delete
|
||||
BLOBS_BEFORE=$(find store/blobs/sha256 -type f | wc -l | xargs)
|
||||
echo "blobs before deletion: $BLOBS_BEFORE"
|
||||
# delete one artifact
|
||||
hauler store remove get.hauler.dev --force
|
||||
# verify artifact removed
|
||||
! hauler store info | grep -q "get.hauler.dev"
|
||||
# non-deleted artifact exists
|
||||
hauler store info | grep -q "install.sh"
|
||||
# count blobs after delete
|
||||
BLOBS_AFTER=$(find store/blobs/sha256 -type f | wc -l | xargs)
|
||||
echo "blobs after deletion: $BLOBS_AFTER"
|
||||
# verify only unreferenced blobs removed
|
||||
if [ "$BLOBS_AFTER" -ge "$BLOBS_BEFORE" ]; then
|
||||
echo "ERROR: No blobs were cleaned up"
|
||||
exit 1
|
||||
fi
|
||||
if [ "$BLOBS_AFTER" -eq 0 ]; then
|
||||
echo "ERROR: All blobs deleted (shared layers removed)"
|
||||
exit 1
|
||||
fi
|
||||
# verify remaining image not missing layers
|
||||
hauler store extract hauler/install.sh:latest
|
||||
|
||||
- name: Create Hauler Report
|
||||
run: |
|
||||
hauler version >> hauler-report.txt
|
||||
hauler store info --output table >> hauler-report.txt
|
||||
|
||||
- name: Remove Hauler Store Contents
|
||||
run: |
|
||||
rm -rf store registry fileserver
|
||||
hauler store info
|
||||
|
||||
- name: Upload Hauler Report
|
||||
uses: actions/upload-artifact@v6
|
||||
with:
|
||||
name: hauler-report
|
||||
path: hauler-report.txt
|
||||
|
||||
- name: Verify - hauler logout
|
||||
run: |
|
||||
hauler logout --help
|
||||
hauler logout docker.io
|
||||
hauler logout ghcr.io
|
||||
|
||||
- name: Remove Hauler Store Credentials
|
||||
run: |
|
||||
rm -rf ~/.docker/config.json
|
||||
39
.github/workflows/unittest.yaml
vendored
39
.github/workflows/unittest.yaml
vendored
@@ -1,39 +0,0 @@
|
||||
name: Unit Test
|
||||
on:
|
||||
push:
|
||||
paths-ignore:
|
||||
- "**.md"
|
||||
- ".github/**"
|
||||
- "!.github/workflows/unittest.yaml"
|
||||
pull_request:
|
||||
paths-ignore:
|
||||
- "**.md"
|
||||
- ".github/**"
|
||||
- "!.github/workflows/unitcoverage.yaml"
|
||||
workflow_dispatch: {}
|
||||
jobs:
|
||||
test:
|
||||
name: Unit Tests
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 20
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.21.x
|
||||
- name: Run Unit Tests
|
||||
run: |
|
||||
go test -race -covermode=atomic -coverprofile=coverage.out ./pkg/... ./internal/... ./cmd/...
|
||||
- name: On Failure, Launch Debug Session
|
||||
if: ${{ failure() }}
|
||||
uses: mxschmitt/action-tmate@v3
|
||||
timeout-minutes: 5
|
||||
- name: Upload Results To Codecov
|
||||
uses: codecov/codecov-action@v1
|
||||
with:
|
||||
files: ./coverage.out
|
||||
verbose: true # optional (default = false)
|
||||
24
.gitignore
vendored
24
.gitignore
vendored
@@ -1,9 +1,4 @@
|
||||
.DS_Store
|
||||
|
||||
# Vagrant
|
||||
.vagrant
|
||||
|
||||
# Editor directories and files
|
||||
**/.DS_Store
|
||||
.idea
|
||||
.vscode
|
||||
*.suo
|
||||
@@ -12,19 +7,12 @@
|
||||
*.sln
|
||||
*.sw?
|
||||
*.dir-locals.el
|
||||
|
||||
# old, ad-hoc ignores
|
||||
artifacts
|
||||
local-artifacts
|
||||
airgap-scp.sh
|
||||
|
||||
# test artifacts
|
||||
*.tar*
|
||||
*.out
|
||||
|
||||
# generated
|
||||
dist/
|
||||
tmp/
|
||||
bin/
|
||||
/store/
|
||||
/registry/
|
||||
registry/
|
||||
fileserver/
|
||||
cmd/hauler/binaries
|
||||
testdata/certs/
|
||||
coverage.out
|
||||
|
||||
@@ -1,14 +1,23 @@
|
||||
version: 2
|
||||
|
||||
project_name: hauler
|
||||
before:
|
||||
hooks:
|
||||
- go mod tidy
|
||||
- go mod download
|
||||
- go fmt ./...
|
||||
- go vet ./...
|
||||
- go test ./... -cover -race -covermode=atomic -coverprofile=coverage.out
|
||||
|
||||
release:
|
||||
prerelease: auto
|
||||
make_latest: false
|
||||
|
||||
env:
|
||||
- vpkg=github.com/rancherfederal/hauler/pkg/version
|
||||
- vpkg=hauler.dev/go/hauler/internal/version
|
||||
|
||||
builds:
|
||||
- main: cmd/hauler/main.go
|
||||
- dir: ./cmd/hauler/.
|
||||
goos:
|
||||
- linux
|
||||
- darwin
|
||||
@@ -17,22 +26,65 @@ builds:
|
||||
- amd64
|
||||
- arm64
|
||||
ldflags:
|
||||
- -s -w -X {{ .Env.vpkg }}.GitVersion={{ .Version }} -X {{ .Env.vpkg }}.commit={{ .ShortCommit }} -X {{ .Env.vpkg }}.buildDate={{ .Date }}
|
||||
- -s -w -X {{ .Env.vpkg }}.gitVersion={{ .Version }} -X {{ .Env.vpkg }}.gitCommit={{ .ShortCommit }} -X {{ .Env.vpkg }}.gitTreeState={{if .IsGitDirty}}dirty{{else}}clean{{end}} -X {{ .Env.vpkg }}.buildDate={{ .Date }}
|
||||
env:
|
||||
- CGO_ENABLED=0
|
||||
- GOEXPERIMENT=boringcrypto
|
||||
|
||||
universal_binaries:
|
||||
- replace: false
|
||||
|
||||
changelog:
|
||||
skip: false
|
||||
disable: false
|
||||
use: git
|
||||
|
||||
brews:
|
||||
homebrew_casks:
|
||||
- name: hauler
|
||||
tap:
|
||||
owner: rancherfederal
|
||||
repository:
|
||||
owner: hauler-dev
|
||||
name: homebrew-tap
|
||||
token: "{{ .Env.HOMEBREW_TAP_GITHUB_TOKEN }}"
|
||||
folder: Formula
|
||||
description: "Hauler CLI"
|
||||
description: "Hauler: Airgap Swiss Army Knife"
|
||||
|
||||
dockers_v2:
|
||||
- id: hauler
|
||||
dockerfile: Dockerfile
|
||||
flags:
|
||||
- "--target=release"
|
||||
images:
|
||||
- docker.io/hauler/hauler
|
||||
- ghcr.io/hauler-dev/hauler
|
||||
tags:
|
||||
- "{{ .Version }}"
|
||||
platforms:
|
||||
- linux/amd64
|
||||
- linux/arm64
|
||||
labels:
|
||||
"classification": "UNCLASSIFIED"
|
||||
"org.opencontainers.image.created": "{{.Date}}"
|
||||
"org.opencontainers.image.description": "Hauler: Airgap Swiss Army Knife"
|
||||
"org.opencontainers.image.name": "{{.ProjectName}}-debug"
|
||||
"org.opencontainers.image.revision": "{{.FullCommit}}"
|
||||
"org.opencontainers.image.source": "{{.GitURL}}"
|
||||
"org.opencontainers.image.version": "{{.Version}}"
|
||||
|
||||
- id: hauler-debug
|
||||
dockerfile: Dockerfile
|
||||
flags:
|
||||
- "--target=debug"
|
||||
images:
|
||||
- docker.io/hauler/hauler-debug
|
||||
- ghcr.io/hauler-dev/hauler-debug
|
||||
tags:
|
||||
- "{{ .Version }}"
|
||||
platforms:
|
||||
- linux/amd64
|
||||
- linux/arm64
|
||||
labels:
|
||||
"classification": "UNCLASSIFIED"
|
||||
"org.opencontainers.image.created": "{{.Date}}"
|
||||
"org.opencontainers.image.description": "Hauler: Airgap Swiss Army Knife"
|
||||
"org.opencontainers.image.name": "{{.ProjectName}}-debug"
|
||||
"org.opencontainers.image.revision": "{{.FullCommit}}"
|
||||
"org.opencontainers.image.source": "{{.GitURL}}"
|
||||
"org.opencontainers.image.version": "{{.Version}}"
|
||||
|
||||
155
DEVELOPMENT.md
Normal file
155
DEVELOPMENT.md
Normal file
@@ -0,0 +1,155 @@
|
||||
# Development Guide
|
||||
|
||||
This document covers how to build `hauler` locally and how the project's branching strategy works.
|
||||
|
||||
It's intended for contributors making code changes or maintainers managing releases.
|
||||
|
||||
---
|
||||
|
||||
## Local Build
|
||||
|
||||
### Prerequisites
|
||||
|
||||
- **Git** - version control of the repository
|
||||
- **Go** — check `go.mod` for the minimum required version
|
||||
- **Make** - optional... for common commands used for builds
|
||||
- **Docker** - optional... for container image builds
|
||||
|
||||
### Clone the Repository
|
||||
|
||||
```bash
|
||||
git clone https://github.com/hauler-dev/hauler.git
|
||||
cd hauler
|
||||
```
|
||||
|
||||
### Build the Binary
|
||||
|
||||
Using `make`...
|
||||
|
||||
```bash
|
||||
# run this command from the project root
|
||||
make build
|
||||
|
||||
# the compiled binary will be output to a directory structure and you can run it directly...
|
||||
./dist/hauler_linux_amd64_v1/hauler
|
||||
./dist/hauler_linux_arm64_v8.0/hauler
|
||||
./dist/hauler_darwin_amd64_v1/hauler
|
||||
./dist/hauler_darwin_arm64_v8.0/hauler
|
||||
./dist/hauler_windows_amd64_v1/hauler.exe
|
||||
./dist/hauler_windows_arm64_v8.0/hauler.exe
|
||||
```
|
||||
|
||||
Using `go`...
|
||||
|
||||
```bash
|
||||
# run this command from the project root
|
||||
go build -o hauler ./cmd/hauler
|
||||
|
||||
# the compiled binary will be output to the project root and you can run it directly...
|
||||
./hauler version
|
||||
```
|
||||
|
||||
### Run Tests
|
||||
|
||||
Using `make`...
|
||||
|
||||
```bash
|
||||
make test
|
||||
```
|
||||
|
||||
Using `go`...
|
||||
|
||||
```bash
|
||||
go test ./...
|
||||
```
|
||||
|
||||
### Useful Tips
|
||||
|
||||
- The `--store` flag defaults to `./store` in the current working directory during local testing, so running `./hauler store add ...` from the project root is safe and self-contained. Use `rm -rf store` in the working directory to clear.
|
||||
- Set `--log-level debug` when developing to get verbose output.
|
||||
|
||||
---
|
||||
|
||||
## Branching Strategy
|
||||
|
||||
Hauler uses a **main-first, release branch** model. All development flows through `main` and `release/x.x` branches are maintained for each minor version to support patching older release lines in parallel.
|
||||
|
||||
### Branch Structure
|
||||
|
||||
```
|
||||
main ← source of truth, all development targets here
|
||||
release/1.3 ← 1.3.x patch line
|
||||
release/1.4 ← 1.4.x patch line
|
||||
```
|
||||
|
||||
Release tags (`v1.4.1`, `v1.3.2`, etc.) are always cut from the corresponding `release/X.Y` branch, never directly from `main`.
|
||||
|
||||
### Where to Target Your Changes
|
||||
|
||||
All pull requests should target `main` by default and maintainers are responsible for cherry picking fixes onto release branches as part of the patch release process.
|
||||
|
||||
| Change Type | Target branch |
|
||||
| :---------: | :-----------: |
|
||||
| New features | `main` |
|
||||
| Bug fixes | `main` |
|
||||
| Security patches | `main` (expedited backport to affected branches) |
|
||||
| Release-specific fix (see below) | `release/X.Y` directly |
|
||||
|
||||
### Creating a New Release Branch
|
||||
|
||||
When `main` is ready to ship a new minor version, a release branch is cut:
|
||||
|
||||
```bash
|
||||
git checkout main
|
||||
git pull origin main
|
||||
git checkout -b release/1.4
|
||||
git push origin release/1.4
|
||||
```
|
||||
|
||||
The first release is then tagged from that branch:
|
||||
|
||||
```bash
|
||||
git tag v1.4.0
|
||||
git push origin v1.4.0
|
||||
```
|
||||
|
||||
Development on `main` immediately continues toward the next minor.
|
||||
|
||||
### Backporting a Fix to a Release Branch
|
||||
|
||||
When a bug fix merged to `main` also needs to apply to an active release line, cherry-pick the commit onto the release branch and open a PR targeting it:
|
||||
|
||||
```bash
|
||||
git checkout release/1.3
|
||||
git pull origin release/1.3
|
||||
git checkout -b backport/fix-description-to-1.3
|
||||
git cherry-pick <commit-sha>
|
||||
git push origin backport/fix-description-to-1.3
|
||||
```
|
||||
|
||||
Open a PR targeting `release/1.3` and reference the original PR in the description. If the cherry-pick doesn't apply cleanly, resolve conflicts and note them in the PR.
|
||||
|
||||
### Fixes That Only Apply to an Older Release Line
|
||||
|
||||
Sometimes a bug exists in an older release but the relevant code has been removed or significantly changed in `main` — making a forward-port unnecessary or nonsensical. In these cases, it's acceptable to open a PR directly against the affected `release/X.Y` branch.
|
||||
|
||||
When doing this, the PR description must explain:
|
||||
|
||||
- Which versions are affected
|
||||
- Why the fix does not apply to `main` or newer release lines (e.g., "this code path was removed in 1.4 when X was refactored")
|
||||
|
||||
This keeps the history auditable and prevents future contributors from wondering why the fix never made it forward.
|
||||
|
||||
### Summary
|
||||
|
||||
```
|
||||
┌─────────────────────────────────────────► main (next minor)
|
||||
│
|
||||
│ cherry-pick / backport PRs
|
||||
│ ─────────────────────────► release/1.4 (v1.4.0, v1.4.1 ...)
|
||||
│
|
||||
│ ─────────────────────────► release/1.3 (v1.3.0, v1.3.1 ...)
|
||||
│
|
||||
│ direct fix (older-only bug)
|
||||
│ ─────────────────────────► release/1.2 (critical fixes only)
|
||||
```
|
||||
43
Dockerfile
Normal file
43
Dockerfile
Normal file
@@ -0,0 +1,43 @@
|
||||
# builder stage
|
||||
FROM registry.suse.com/bci/bci-base:15.7 AS builder
|
||||
ARG TARGETPLATFORM
|
||||
|
||||
# fetched from goreleaser build process
|
||||
COPY $TARGETPLATFORM/hauler /hauler
|
||||
|
||||
RUN echo "hauler:x:1001:1001::/home/hauler:" > /etc/passwd \
|
||||
&& echo "hauler:x:1001:hauler" > /etc/group \
|
||||
&& mkdir /home/hauler \
|
||||
&& mkdir /store \
|
||||
&& mkdir /fileserver \
|
||||
&& mkdir /registry
|
||||
|
||||
# release stage
|
||||
FROM scratch AS release
|
||||
|
||||
COPY --from=builder /var/lib/ca-certificates/ca-bundle.pem /etc/ssl/certs/ca-certificates.crt
|
||||
COPY --from=builder /etc/passwd /etc/passwd
|
||||
COPY --from=builder /etc/group /etc/group
|
||||
COPY --from=builder --chown=hauler:hauler /home/hauler/. /home/hauler
|
||||
COPY --from=builder --chown=hauler:hauler /tmp/. /tmp
|
||||
COPY --from=builder --chown=hauler:hauler /store/. /store
|
||||
COPY --from=builder --chown=hauler:hauler /registry/. /registry
|
||||
COPY --from=builder --chown=hauler:hauler /fileserver/. /fileserver
|
||||
COPY --from=builder --chown=hauler:hauler /hauler /hauler
|
||||
|
||||
USER hauler
|
||||
ENTRYPOINT [ "/hauler" ]
|
||||
|
||||
# debug stage
|
||||
FROM alpine AS debug
|
||||
|
||||
COPY --from=builder /var/lib/ca-certificates/ca-bundle.pem /etc/ssl/certs/ca-certificates.crt
|
||||
COPY --from=builder /etc/passwd /etc/passwd
|
||||
COPY --from=builder /etc/group /etc/group
|
||||
COPY --from=builder --chown=hauler:hauler /home/hauler/. /home/hauler
|
||||
COPY --from=builder --chown=hauler:hauler /hauler /usr/local/bin/hauler
|
||||
|
||||
RUN apk --no-cache add curl
|
||||
|
||||
USER hauler
|
||||
WORKDIR /home/hauler
|
||||
177
LICENSE
Normal file
177
LICENSE
Normal file
@@ -0,0 +1,177 @@
|
||||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
56
Makefile
56
Makefile
@@ -1,35 +1,49 @@
|
||||
SHELL:=/bin/bash
|
||||
GO_BUILD_ENV=GOOS=linux GOARCH=amd64
|
||||
GO_FILES=$(shell go list ./... | grep -v /vendor/)
|
||||
# Makefile for hauler
|
||||
|
||||
BUILD_VERSION=$(shell cat VERSION)
|
||||
BUILD_TAG=$(BUILD_VERSION)
|
||||
# set shell
|
||||
SHELL=/bin/bash
|
||||
|
||||
.SILENT:
|
||||
# set go variables
|
||||
GO_FILES=./...
|
||||
GO_COVERPROFILE=coverage.out
|
||||
|
||||
all: fmt vet install test
|
||||
# set build variables
|
||||
BIN_DIRECTORY=bin
|
||||
DIST_DIRECTORY=dist
|
||||
|
||||
# local build of hauler for current platform
|
||||
# references/configuration from .goreleaser.yaml
|
||||
build:
|
||||
mkdir bin;\
|
||||
GOENV=GOARCH=$(uname -m) CGO_ENABLED=0 go build -o bin ./cmd/...;\
|
||||
goreleaser build --clean --snapshot --timeout 60m --single-target
|
||||
|
||||
build-all: fmt vet
|
||||
goreleaser build --rm-dist --snapshot
|
||||
|
||||
# local build of hauler for all platforms
|
||||
# references/configuration from .goreleaser.yaml
|
||||
build-all:
|
||||
goreleaser build --clean --snapshot --timeout 60m
|
||||
|
||||
# local release of hauler for all platforms
|
||||
# references/configuration from .goreleaser.yaml
|
||||
release:
|
||||
goreleaser release --clean --snapshot --timeout 60m
|
||||
|
||||
# install depedencies
|
||||
install:
|
||||
GOENV=GOARCH=$(uname -m) CGO_ENABLED=0 go install ./cmd/...;\
|
||||
|
||||
vet:
|
||||
go vet $(GO_FILES)
|
||||
go mod tidy
|
||||
go mod download
|
||||
CGO_ENABLED=0 go install ./cmd/...
|
||||
|
||||
# format go code
|
||||
fmt:
|
||||
go fmt $(GO_FILES)
|
||||
|
||||
# vet go code
|
||||
vet:
|
||||
go vet $(GO_FILES)
|
||||
|
||||
# test go code
|
||||
test:
|
||||
go test $(GO_FILES) -cover
|
||||
|
||||
integration_test:
|
||||
go test -tags=integration $(GO_FILES)
|
||||
go test $(GO_FILES) -cover -race -covermode=atomic -coverprofile=$(GO_COVERPROFILE)
|
||||
|
||||
# cleanup artifacts
|
||||
clean:
|
||||
rm -rf bin 2> /dev/null
|
||||
rm -rf $(BIN_DIRECTORY) $(DIST_DIRECTORY) $(GO_COVERPROFILE)
|
||||
|
||||
72
README.md
72
README.md
@@ -1,31 +1,73 @@
|
||||
# Rancher Government Hauler
|
||||
|
||||

|
||||
|
||||
## Airgap Swiss Army Knife
|
||||
|
||||
> ⚠️ This project is still in active development and *not* Generally Available (GA). Most of the core functionality and features are ready, but may have breaking changes. Please review the [Release Notes](https://github.com/rancherfederal/hauler/releases) for more information!
|
||||
`Rancher Government Hauler` simplifies the airgap experience without requiring operators to adopt a specific workflow. **Hauler** simplifies the airgapping process, by representing assets (images, charts, files, etc...) as content and collections to allow operators to easily fetch, store, package, and distribute these assets with declarative manifests or through the command line.
|
||||
|
||||
`Rancher Government Hauler` simplifies the airgap experience without requiring users to adopt a specific workflow. **Hauler** simplifies the airgapping process, by representing assets (images, charts, files, etc...) as content and collections to allow users to easily fetch, store, package, and distribute these assets with declarative manifests or through the command line.
|
||||
`Hauler` does this by storing contents and collections as OCI Artifacts and allows operators to serve contents and collections with an embedded registry and fileserver. Additionally, `Hauler` has the ability to store and inspect various non-image OCI Artifacts.
|
||||
|
||||
`Hauler` does this by storing contents and collections as OCI Artifacts and allows users to serve contents and collections with an embedded registry and fileserver. Additionally, `Hauler` has the ability to store and inspect various non-image OCI Artifacts.
|
||||
For more information, please review the **[Hauler Documentation](https://hauler.dev)!**
|
||||
|
||||
For more information, please review the **[Hauler Documentation](https://rancherfederal.github.io/hauler-docs)!**
|
||||
## Recent Changes
|
||||
|
||||
### In Hauler v2.0.0...
|
||||
|
||||
- Removed support for `apiVersion` of `v1alpha` and removed the automated conversion functionality to `v1`.
|
||||
- Please note that notices have been provided in this `README`, the `docs`, and in `cli` warnings since Hauler `v1.2.x`.
|
||||
|
||||
### In Hauler v1.4.0...
|
||||
|
||||
- Added a notice to `hauler store sync --products/--product-registry` to warn users the default registry will be updated in a future release.
|
||||
- Users will see logging notices when using the `--products/--product-registry` such as...
|
||||
- `!!! WARNING !!! [--products] will be updating its default registry in a future release...`
|
||||
- `!!! WARNING !!! [--product-registry] will be updating its default registry in a future release...`
|
||||
|
||||
### From older releases...
|
||||
|
||||
- Updated the behavior of `hauler store load` to default to loading a `haul` with the name of `haul.tar.zst` and requires the flag of `--filename/-f` to load a `haul` with a different name
|
||||
- Users can load multiple `hauls` by specifying multiple flags of `--filename/-f`
|
||||
- updated command usage: `hauler store load --filename hauling-hauls.tar.zst`
|
||||
- previous command usage (do not use): `hauler store load hauling-hauls.tar.zst`
|
||||
|
||||
---
|
||||
|
||||
- Updated the behavior of `hauler store sync` to default to syncing a `manifest` with the name of `hauler-manifest.yaml` and requires the flag of `--filename/-f` to sync a `manifest` with a different name
|
||||
- Users can sync multiple `manifests` by specifying multiple flags of `--filename/-f`
|
||||
- updated command usage: `hauler store sync --filename hauling-hauls-manifest.yaml`
|
||||
- previous command usage (do not use): `hauler store sync --files hauling-hauls-manifest.yaml`
|
||||
|
||||
---
|
||||
|
||||
Please review the documentation for any additional [Known Limits, Issues, and Notices](https://docs.hauler.dev/docs/known-limits)!
|
||||
|
||||
## Installation
|
||||
|
||||
### Linux/Darwin
|
||||
|
||||
```bash
|
||||
curl -#OL https://github.com/rancherfederal/hauler/releases/download/v0.3.0/hauler_0.3.0_linux_amd64.tar.gz
|
||||
tar -xf hauler_0.3.0_linux_amd64.tar.gz
|
||||
sudo mv hauler /usr/bin/hauler
|
||||
# installs latest release
|
||||
curl -sfL https://get.hauler.dev | bash
|
||||
```
|
||||
|
||||
### Homebrew
|
||||
|
||||
```bash
|
||||
# installs latest release
|
||||
brew tap hauler-dev/homebrew-tap
|
||||
brew install hauler
|
||||
```
|
||||
|
||||
### Windows
|
||||
|
||||
```bash
|
||||
# coming soon
|
||||
```
|
||||
|
||||
## Acknowledgements
|
||||
|
||||
`Hauler` wouldn't be possible without the open-source community, but there are a few projects that stand out:
|
||||
* [go-containerregistry](https://github.com/google/go-containerregistry)
|
||||
* [oras cli](https://github.com/oras-project/oras)
|
||||
* [cosign](https://github.com/sigstore/cosign)
|
||||
|
||||
## Notices
|
||||
**WARNING - Upcoming Deprecated Command(s):**
|
||||
|
||||
`hauler download` (alternatively, `dl`) and `hauler serve` (_not_ `hauler store serve`) commands are deprecated and will be removed in a future release.
|
||||
- [containerd](https://github.com/containerd/containerd)
|
||||
- [go-containerregistry](https://github.com/google/go-containerregistry)
|
||||
- [cosign](https://github.com/sigstore/cosign)
|
||||
50
ROADMAP.md
50
ROADMAP.md
@@ -1,50 +0,0 @@
|
||||
# Hauler Roadmap
|
||||
|
||||
## \> v0.2.0
|
||||
|
||||
- Leverage `referrers` api to robustly link content/collection
|
||||
- Support signing for all `artifact.OCI` contents
|
||||
- Support encryption for `artifact.OCI` layers
|
||||
- Support incremental updates to stores (some implementation of layer diffing)
|
||||
- Safely embed container runtime for user created `collections` creation and transformation
|
||||
- Better defaults/configuration/security around for long-lived embedded registry
|
||||
- Better support multi-platform content
|
||||
- Better leverage `oras` (`>=0.5.0`) for content relocation
|
||||
- Store git repos as CAS in OCI format
|
||||
|
||||
## v0.2.0 - MVP 2
|
||||
|
||||
- Re-focus on cli and framework for oci content fetching and delivery
|
||||
- Focus on initial key contents
|
||||
- Files (local/remote)
|
||||
- Charts (local/remote)
|
||||
- Images
|
||||
- Establish framework for `content` and `collections`
|
||||
- Define initial `content` types (`file`, `chart`, `image`)
|
||||
- Define initial `collection` types (`thickchart`, `k3s`)
|
||||
- Define framework for manipulating OCI content (`artifact.OCI`, `artifact.Collection`)
|
||||
|
||||
## v0.1.0 - MVP 1
|
||||
|
||||
- Install single-node k3s cluster
|
||||
- Support tarball and rpm installation methods
|
||||
- Target narrow set of known Operating Systems to have OS-specific code if needed
|
||||
- Serve container images
|
||||
- Collect images from image list file
|
||||
- Collect images from image archives
|
||||
- Deploy docker registry
|
||||
- Populate registry with all images
|
||||
- Serve git repositories
|
||||
- Collect repos
|
||||
- Deploy git server (Caddy? NGINX?)
|
||||
- Populate git server with repos
|
||||
- Serve files
|
||||
- Collect files from directory, including subdirectories
|
||||
- Deploy caddy file server
|
||||
- Populate file server with directory contents
|
||||
- NOTE: "generic" option - most other use cases can be satisfied by a specially crafted file
|
||||
server directory
|
||||
|
||||
## v0.0.x
|
||||
|
||||
- Install single-node k3s cluster into an Ubuntu machine using the tarball installation method
|
||||
49
VAGRANT.md
49
VAGRANT.md
@@ -1,49 +0,0 @@
|
||||
## Hauler Vagrant machine
|
||||
|
||||
A Vagrantfile is provided to allow easy provisioning of a local air-gapped CentOS environment. Some artifacts need to be collected from the internet; below are the steps required for successfully provisioning this machine, downloading all dependencies, and installing k3s (without hauler) into this machine.
|
||||
|
||||
### First-time setup
|
||||
|
||||
1. Install vagrant, if needed: <https://www.vagrantup.com/downloads>
|
||||
2. Install `vagrant-vbguest` plugin, as noted in the Vagrantfile:
|
||||
```shell
|
||||
vagrant plugin install vagrant-vbguest
|
||||
```
|
||||
3. Deploy Vagrant machine, disabling SELinux:
|
||||
```shell
|
||||
SELINUX=Disabled vagrant up
|
||||
```
|
||||
4. Access the Vagrant machine via SSH:
|
||||
```shell
|
||||
vagrant ssh
|
||||
```
|
||||
5. Run all prep scripts inside of the Vagrant machine:
|
||||
> This script temporarily enables internet access from within the VM to allow downloading all dependencies. Even so, the air-gapped network configuration IS restored before completion.
|
||||
```shell
|
||||
sudo /opt/hauler/vagrant-scripts/prep-all.sh
|
||||
```
|
||||
|
||||
All dependencies for all `vagrant-scripts/*-install.sh` scripts are now downloaded to the local
|
||||
repository under `local-artifacts`.
|
||||
|
||||
### Installing k3s manually
|
||||
|
||||
1. Access the Vagrant machine via SSH:
|
||||
```bash
|
||||
vagrant ssh
|
||||
```
|
||||
2. Run the k3s install script inside of the Vagrant machine:
|
||||
```shell
|
||||
sudo /opt/hauler/vagrant-scripts/k3s-install.sh
|
||||
```
|
||||
|
||||
### Installing RKE2 manually
|
||||
|
||||
1. Access the Vagrant machine via SSH:
|
||||
```shell
|
||||
vagrant ssh
|
||||
```
|
||||
2. Run the RKE2 install script inside of the Vagrant machine:
|
||||
```shell
|
||||
sudo /opt/hauler/vagrant-scripts/rke2-install.sh
|
||||
```
|
||||
65
Vagrantfile
vendored
65
Vagrantfile
vendored
@@ -1,65 +0,0 @@
|
||||
##################################
|
||||
# The vagrant-vbguest plugin is required for CentOS 7.
|
||||
# Run the following command to install/update this plugin:
|
||||
# vagrant plugin install vagrant-vbguest
|
||||
##################################
|
||||
|
||||
Vagrant.configure("2") do |config|
|
||||
config.vm.box = "centos/8"
|
||||
config.vm.hostname = "airgap"
|
||||
config.vm.network "private_network", type: "dhcp"
|
||||
|
||||
config.vm.synced_folder ".", "/vagrant"
|
||||
|
||||
config.vm.provider "virtualbox" do |vb|
|
||||
vb.memory = "2048"
|
||||
vb.cpus = "2"
|
||||
|
||||
config.vm.provision "airgap", type: "shell", run: "always",
|
||||
inline: "/vagrant/vagrant-scripts/airgap.sh airgap"
|
||||
end
|
||||
|
||||
# SELinux is Enforcing by default.
|
||||
# To set SELinux as Disabled on a VM that has already been provisioned:
|
||||
# SELINUX=Disabled vagrant up --provision-with=selinux
|
||||
# To set SELinux as Permissive on a VM that has already been provsioned
|
||||
# SELINUX=Permissive vagrant up --provision-with=selinux
|
||||
config.vm.provision "selinux", type: "shell", run: "once" do |sh|
|
||||
sh.upload_path = "/tmp/vagrant-selinux"
|
||||
sh.env = {
|
||||
'SELINUX': ENV['SELINUX'] || "Enforcing"
|
||||
}
|
||||
sh.inline = <<~SHELL
|
||||
#!/usr/bin/env bash
|
||||
set -eux -o pipefail
|
||||
|
||||
if ! type -p getenforce setenforce &>/dev/null; then
|
||||
echo SELinux is Disabled
|
||||
exit 0
|
||||
fi
|
||||
|
||||
case "${SELINUX}" in
|
||||
Disabled)
|
||||
if mountpoint -q /sys/fs/selinux; then
|
||||
setenforce 0
|
||||
umount -v /sys/fs/selinux
|
||||
fi
|
||||
;;
|
||||
Enforcing)
|
||||
mountpoint -q /sys/fs/selinux || mount -o rw,relatime -t selinuxfs selinuxfs /sys/fs/selinux
|
||||
setenforce 1
|
||||
;;
|
||||
Permissive)
|
||||
mountpoint -q /sys/fs/selinux || mount -o rw,relatime -t selinuxfs selinuxfs /sys/fs/selinux
|
||||
setenforce 0
|
||||
;;
|
||||
*)
|
||||
echo "SELinux mode not supported: ${SELINUX}" >&2
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
echo SELinux is $(getenforce)
|
||||
SHELL
|
||||
end
|
||||
end
|
||||
6
cmd/hauler/boringcrypto.go
Normal file
6
cmd/hauler/boringcrypto.go
Normal file
@@ -0,0 +1,6 @@
|
||||
//go:build boringcrypto
|
||||
// +build boringcrypto
|
||||
|
||||
package main
|
||||
|
||||
import _ "crypto/tls/fipsonly"
|
||||
@@ -1,25 +1,34 @@
|
||||
package cli
|
||||
|
||||
import (
|
||||
"github.com/spf13/cobra"
|
||||
"context"
|
||||
|
||||
"github.com/rancherfederal/hauler/pkg/log"
|
||||
cranecmd "github.com/google/go-containerregistry/cmd/crane/cmd"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
"hauler.dev/go/hauler/internal/flags"
|
||||
"hauler.dev/go/hauler/pkg/consts"
|
||||
"hauler.dev/go/hauler/pkg/log"
|
||||
)
|
||||
|
||||
type rootOpts struct {
|
||||
logLevel string
|
||||
}
|
||||
|
||||
var ro = &rootOpts{}
|
||||
|
||||
func New() *cobra.Command {
|
||||
func New(ctx context.Context, ro *flags.CliRootOpts) *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "hauler",
|
||||
Short: "",
|
||||
Use: "hauler",
|
||||
Short: "Airgap Swiss Army Knife",
|
||||
Example: " View the Docs: https://docs.hauler.dev\n Environment Variables: " + consts.HaulerDir + " | " + consts.HaulerTempDir + " | " + consts.HaulerStoreDir + " | " + consts.HaulerIgnoreErrors + "\n Warnings: Hauler commands and flags marked with (EXPERIMENTAL) are not yet stable and may change in the future.",
|
||||
PersistentPreRunE: func(cmd *cobra.Command, args []string) error {
|
||||
l := log.FromContext(cmd.Context())
|
||||
l.SetLevel(ro.logLevel)
|
||||
l := log.FromContext(ctx)
|
||||
l.SetLevel(ro.LogLevel)
|
||||
l.Debugf("running cli command [%s]", cmd.CommandPath())
|
||||
|
||||
// Suppress WARN-level messages from containerd and other
|
||||
// libraries that use the global logrus logger.
|
||||
if ro.LogLevel == "debug" {
|
||||
logrus.SetLevel(logrus.DebugLevel)
|
||||
} else {
|
||||
logrus.SetLevel(logrus.ErrorLevel)
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
@@ -27,14 +36,13 @@ func New() *cobra.Command {
|
||||
},
|
||||
}
|
||||
|
||||
pf := cmd.PersistentFlags()
|
||||
pf.StringVarP(&ro.logLevel, "log-level", "l", "info", "")
|
||||
flags.AddRootFlags(cmd, ro)
|
||||
|
||||
// Add subcommands
|
||||
addDownload(cmd)
|
||||
addStore(cmd)
|
||||
addServe(cmd)
|
||||
addVersion(cmd)
|
||||
cmd.AddCommand(cranecmd.NewCmdAuthLogin("hauler"))
|
||||
cmd.AddCommand(cranecmd.NewCmdAuthLogout("hauler"))
|
||||
addStore(cmd, ro)
|
||||
addVersion(cmd, ro)
|
||||
addCompletion(cmd, ro)
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
116
cmd/hauler/cli/completion.go
Normal file
116
cmd/hauler/cli/completion.go
Normal file
@@ -0,0 +1,116 @@
|
||||
package cli
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
"hauler.dev/go/hauler/internal/flags"
|
||||
)
|
||||
|
||||
func addCompletion(parent *cobra.Command, ro *flags.CliRootOpts) {
|
||||
cmd := &cobra.Command{
|
||||
Use: "completion",
|
||||
Short: "Generate auto-completion scripts for various shells",
|
||||
}
|
||||
|
||||
cmd.AddCommand(
|
||||
addCompletionZsh(ro),
|
||||
addCompletionBash(ro),
|
||||
addCompletionFish(ro),
|
||||
addCompletionPowershell(ro),
|
||||
)
|
||||
|
||||
parent.AddCommand(cmd)
|
||||
}
|
||||
|
||||
func addCompletionZsh(ro *flags.CliRootOpts) *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "zsh",
|
||||
Short: "Generates auto-completion scripts for zsh",
|
||||
Example: `To load completion run
|
||||
|
||||
. <(hauler completion zsh)
|
||||
|
||||
To configure your zsh shell to load completions for each session add to your zshrc
|
||||
|
||||
# ~/.zshrc or ~/.profile
|
||||
command -v hauler >/dev/null && . <(hauler completion zsh)
|
||||
|
||||
or write a cached file in one of the completion directories in your ${fpath}:
|
||||
|
||||
echo "${fpath// /\n}" | grep -i completion
|
||||
hauler completion zsh > _hauler
|
||||
|
||||
mv _hauler ~/.oh-my-zsh/completions # oh-my-zsh
|
||||
mv _hauler ~/.zprezto/modules/completion/external/src/ # zprezto`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
cmd.Root().GenZshCompletion(os.Stdout)
|
||||
// Cobra doesn't source zsh completion file, explicitly doing it here
|
||||
fmt.Println("compdef _hauler hauler")
|
||||
},
|
||||
}
|
||||
return cmd
|
||||
}
|
||||
|
||||
func addCompletionBash(ro *flags.CliRootOpts) *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "bash",
|
||||
Short: "Generates auto-completion scripts for bash",
|
||||
Example: `To load completion run
|
||||
|
||||
. <(hauler completion bash)
|
||||
|
||||
To configure your bash shell to load completions for each session add to your bashrc
|
||||
|
||||
# ~/.bashrc or ~/.profile
|
||||
command -v hauler >/dev/null && . <(hauler completion bash)`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
cmd.Root().GenBashCompletion(os.Stdout)
|
||||
},
|
||||
}
|
||||
return cmd
|
||||
}
|
||||
|
||||
func addCompletionFish(ro *flags.CliRootOpts) *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "fish",
|
||||
Short: "Generates auto-completion scripts for fish",
|
||||
Example: `To configure your fish shell to load completions for each session write this script to your completions dir:
|
||||
|
||||
hauler completion fish > ~/.config/fish/completions/hauler.fish
|
||||
|
||||
See http://fishshell.com/docs/current/index.html#completion-own for more details`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
cmd.Root().GenFishCompletion(os.Stdout, true)
|
||||
},
|
||||
}
|
||||
return cmd
|
||||
}
|
||||
|
||||
func addCompletionPowershell(ro *flags.CliRootOpts) *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "powershell",
|
||||
Short: "Generates auto-completion scripts for powershell",
|
||||
Example: `To load completion run
|
||||
|
||||
. <(hauler completion powershell)
|
||||
|
||||
To configure your powershell shell to load completions for each session add to your powershell profile
|
||||
|
||||
Windows:
|
||||
|
||||
cd "$env:USERPROFILE\Documents\WindowsPowerShell\Modules"
|
||||
hauler completion powershell >> hauler-completion.ps1
|
||||
|
||||
Linux:
|
||||
|
||||
cd "${XDG_CONFIG_HOME:-"$HOME/.config/"}/powershell/modules"
|
||||
hauler completion powershell >> hauler-completions.ps1`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
cmd.Root().GenPowerShellCompletion(os.Stdout)
|
||||
},
|
||||
}
|
||||
return cmd
|
||||
}
|
||||
@@ -1,45 +0,0 @@
|
||||
package cli
|
||||
|
||||
import (
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
"github.com/rancherfederal/hauler/cmd/hauler/cli/download"
|
||||
)
|
||||
|
||||
func addDownload(parent *cobra.Command) {
|
||||
o := &download.Opts{}
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: "download",
|
||||
Short: "Download OCI content from a registry and populate it on disk",
|
||||
Long: `*** WARNING: Deprecated Command ***
|
||||
The 'download (dl)' command is deprecated and will be removed in a future release of Hauler.
|
||||
|
||||
Locate OCI content based on it's reference in a compatible registry and download the contents to disk.
|
||||
|
||||
Note that the content type determines it's format on disk. Hauler's built in content types act as follows:
|
||||
|
||||
- File: as a file named after the pushed contents source name (ex: my-file.yaml:latest --> my-file.yaml)
|
||||
- Image: as a .tar named after the image (ex: alpine:latest --> alpine:latest.tar)
|
||||
- Chart: as a .tar.gz named after the chart (ex: loki:2.0.2 --> loki-2.0.2.tar.gz)`,
|
||||
Example: `
|
||||
# Download a file
|
||||
hauler dl localhost:5000/my-file.yaml:latest
|
||||
|
||||
# Download an image
|
||||
hauler dl localhost:5000/rancher/k3s:v1.22.2-k3s2
|
||||
|
||||
# Download a chart
|
||||
hauler dl localhost:5000/hauler/longhorn:1.2.0`,
|
||||
Aliases: []string{"dl"},
|
||||
Args: cobra.ExactArgs(1),
|
||||
RunE: func(cmd *cobra.Command, arg []string) error {
|
||||
ctx := cmd.Context()
|
||||
|
||||
return download.Cmd(ctx, o, arg[0])
|
||||
},
|
||||
}
|
||||
o.AddArgs(cmd)
|
||||
|
||||
parent.AddCommand(cmd)
|
||||
}
|
||||
@@ -1,87 +0,0 @@
|
||||
package download
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
|
||||
"github.com/google/go-containerregistry/pkg/authn"
|
||||
"github.com/google/go-containerregistry/pkg/v1/remote"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/spf13/cobra"
|
||||
"oras.land/oras-go/pkg/content"
|
||||
"oras.land/oras-go/pkg/oras"
|
||||
|
||||
"github.com/rancherfederal/hauler/pkg/consts"
|
||||
|
||||
"github.com/rancherfederal/hauler/internal/mapper"
|
||||
"github.com/rancherfederal/hauler/pkg/log"
|
||||
"github.com/rancherfederal/hauler/pkg/reference"
|
||||
)
|
||||
|
||||
type Opts struct {
|
||||
DestinationDir string
|
||||
|
||||
Username string
|
||||
Password string
|
||||
Insecure bool
|
||||
PlainHTTP bool
|
||||
}
|
||||
|
||||
func (o *Opts) AddArgs(cmd *cobra.Command) {
|
||||
f := cmd.Flags()
|
||||
|
||||
f.StringVarP(&o.DestinationDir, "output", "o", "", "Directory to save contents to (defaults to current directory)")
|
||||
f.StringVarP(&o.Username, "username", "u", "", "Username when copying to an authenticated remote registry")
|
||||
f.StringVarP(&o.Password, "password", "p", "", "Password when copying to an authenticated remote registry")
|
||||
f.BoolVar(&o.Insecure, "insecure", false, "Toggle allowing insecure connections when copying to a remote registry")
|
||||
f.BoolVar(&o.PlainHTTP, "plain-http", false, "Toggle allowing plain http connections when copying to a remote registry")
|
||||
}
|
||||
|
||||
func Cmd(ctx context.Context, o *Opts, ref string) error {
|
||||
l := log.FromContext(ctx)
|
||||
|
||||
ropts := content.RegistryOptions{
|
||||
Username: o.Username,
|
||||
Password: o.Password,
|
||||
Insecure: o.Insecure,
|
||||
PlainHTTP: o.PlainHTTP,
|
||||
}
|
||||
rs, err := content.NewRegistry(ropts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
r, err := reference.Parse(ref)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
desc, err := remote.Get(r, remote.WithAuthFromKeychain(authn.DefaultKeychain), remote.WithContext(ctx))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
manifestData, err := desc.RawManifest()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var manifest ocispec.Manifest
|
||||
if err := json.Unmarshal(manifestData, &manifest); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
mapperStore, err := mapper.FromManifest(manifest, o.DestinationDir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
pushedDesc, err := oras.Copy(ctx, rs, r.Name(), mapperStore, "",
|
||||
oras.WithAdditionalCachedMediaTypes(consts.DockerManifestSchema2))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
l.Infof("downloaded [%s] with digest [%s]", pushedDesc.MediaType, pushedDesc.Digest.String())
|
||||
return nil
|
||||
}
|
||||
@@ -1,57 +0,0 @@
|
||||
package cli
|
||||
|
||||
import (
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
"github.com/rancherfederal/hauler/cmd/hauler/cli/serve"
|
||||
)
|
||||
|
||||
func addServe(parent *cobra.Command) {
|
||||
cmd := &cobra.Command{
|
||||
Use: "serve",
|
||||
Short: "Run one or more of hauler's embedded servers types",
|
||||
Long: `*** WARNING: Deprecated Command ***
|
||||
The 'serve' command is deprecated and will be removed in a future release of Hauler.`,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
return cmd.Help()
|
||||
},
|
||||
}
|
||||
|
||||
cmd.AddCommand(
|
||||
addServeFiles(),
|
||||
addServeRegistry(),
|
||||
)
|
||||
|
||||
parent.AddCommand(cmd)
|
||||
}
|
||||
|
||||
func addServeFiles() *cobra.Command {
|
||||
o := &serve.FilesOpts{}
|
||||
cmd := &cobra.Command{
|
||||
Use: "files",
|
||||
Short: "Start a fileserver",
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
ctx := cmd.Context()
|
||||
return serve.FilesCmd(ctx, o)
|
||||
},
|
||||
}
|
||||
o.AddFlags(cmd)
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
func addServeRegistry() *cobra.Command {
|
||||
o := &serve.RegistryOpts{}
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: "registry",
|
||||
Short: "Start a registry",
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
ctx := cmd.Context()
|
||||
return serve.RegistryCmd(ctx, o)
|
||||
},
|
||||
}
|
||||
o.AddFlags(cmd)
|
||||
|
||||
return cmd
|
||||
}
|
||||
@@ -1,37 +0,0 @@
|
||||
package serve
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
"github.com/rancherfederal/hauler/internal/server"
|
||||
)
|
||||
|
||||
type FilesOpts struct {
|
||||
Root string
|
||||
Port int
|
||||
}
|
||||
|
||||
func (o *FilesOpts) AddFlags(cmd *cobra.Command) {
|
||||
f := cmd.Flags()
|
||||
f.StringVarP(&o.Root, "root", "r", ".", "Path to root of the directory to serve")
|
||||
f.IntVarP(&o.Port, "port", "p", 8080, "Port to listen on")
|
||||
}
|
||||
|
||||
func FilesCmd(ctx context.Context, o *FilesOpts) error {
|
||||
cfg := server.FileConfig{
|
||||
Root: o.Root,
|
||||
Port: o.Port,
|
||||
}
|
||||
|
||||
s, err := server.NewFile(ctx, cfg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := s.ListenAndServe(); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -1,81 +0,0 @@
|
||||
package serve
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"os"
|
||||
|
||||
"github.com/distribution/distribution/v3/configuration"
|
||||
dcontext "github.com/distribution/distribution/v3/context"
|
||||
"github.com/distribution/distribution/v3/version"
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
"github.com/rancherfederal/hauler/internal/server"
|
||||
)
|
||||
|
||||
type RegistryOpts struct {
|
||||
Root string
|
||||
Port int
|
||||
ConfigFile string
|
||||
}
|
||||
|
||||
func (o *RegistryOpts) AddFlags(cmd *cobra.Command) {
|
||||
f := cmd.Flags()
|
||||
f.StringVarP(&o.Root, "root", "r", ".", "Path to root of the directory to serve")
|
||||
f.IntVarP(&o.Port, "port", "p", 5000, "Port to listen on")
|
||||
f.StringVarP(&o.ConfigFile, "config", "c", "", "Path to a config file, will override all other configs")
|
||||
}
|
||||
|
||||
func RegistryCmd(ctx context.Context, o *RegistryOpts) error {
|
||||
ctx = dcontext.WithVersion(ctx, version.Version)
|
||||
|
||||
cfg := o.defaultConfig()
|
||||
if o.ConfigFile != "" {
|
||||
ucfg, err := loadConfig(o.ConfigFile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cfg = ucfg
|
||||
}
|
||||
|
||||
s, err := server.NewRegistry(ctx, cfg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := s.ListenAndServe(); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func loadConfig(filename string) (*configuration.Configuration, error) {
|
||||
f, err := os.Open(filename)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return configuration.Parse(f)
|
||||
}
|
||||
|
||||
func (o *RegistryOpts) defaultConfig() *configuration.Configuration {
|
||||
cfg := &configuration.Configuration{
|
||||
Version: "0.1",
|
||||
Storage: configuration.Storage{
|
||||
"cache": configuration.Parameters{"blobdescriptor": "inmemory"},
|
||||
"filesystem": configuration.Parameters{"rootdirectory": o.Root},
|
||||
|
||||
// TODO: Ensure this is toggleable via cli arg if necessary
|
||||
// "maintenance": configuration.Parameters{"readonly.enabled": false},
|
||||
},
|
||||
}
|
||||
cfg.Log.Level = "info"
|
||||
cfg.HTTP.Addr = fmt.Sprintf(":%d", o.Port)
|
||||
cfg.HTTP.Headers = http.Header{
|
||||
"X-Content-Type-Options": []string{"nosniff"},
|
||||
"Accept": []string{"application/vnd.dsse.envelope.v1+json, application/json"},
|
||||
}
|
||||
|
||||
return cfg
|
||||
}
|
||||
@@ -1,47 +1,50 @@
|
||||
package cli
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"helm.sh/helm/v3/pkg/action"
|
||||
|
||||
"github.com/rancherfederal/hauler/cmd/hauler/cli/store"
|
||||
"hauler.dev/go/hauler/cmd/hauler/cli/store"
|
||||
"hauler.dev/go/hauler/internal/flags"
|
||||
"hauler.dev/go/hauler/pkg/log"
|
||||
)
|
||||
|
||||
var rootStoreOpts = &store.RootOpts{}
|
||||
func addStore(parent *cobra.Command, ro *flags.CliRootOpts) {
|
||||
rso := &flags.StoreRootOpts{}
|
||||
|
||||
func addStore(parent *cobra.Command) {
|
||||
cmd := &cobra.Command{
|
||||
Use: "store",
|
||||
Aliases: []string{"s"},
|
||||
Short: "Interact with hauler's embedded content store",
|
||||
Short: "Interact with the content store",
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
return cmd.Help()
|
||||
},
|
||||
}
|
||||
rootStoreOpts.AddArgs(cmd)
|
||||
rso.AddFlags(cmd)
|
||||
|
||||
cmd.AddCommand(
|
||||
addStoreSync(),
|
||||
addStoreExtract(),
|
||||
addStoreLoad(),
|
||||
addStoreSave(),
|
||||
addStoreServe(),
|
||||
addStoreInfo(),
|
||||
addStoreCopy(),
|
||||
|
||||
// TODO: Remove this in favor of sync?
|
||||
addStoreAdd(),
|
||||
addStoreSync(rso, ro),
|
||||
addStoreExtract(rso, ro),
|
||||
addStoreLoad(rso, ro),
|
||||
addStoreSave(rso, ro),
|
||||
addStoreServe(rso, ro),
|
||||
addStoreInfo(rso, ro),
|
||||
addStoreCopy(rso, ro),
|
||||
addStoreAdd(rso, ro),
|
||||
addStoreRemove(rso, ro),
|
||||
)
|
||||
|
||||
parent.AddCommand(cmd)
|
||||
}
|
||||
|
||||
func addStoreExtract() *cobra.Command {
|
||||
o := &store.ExtractOpts{RootOpts: rootStoreOpts}
|
||||
func addStoreExtract(rso *flags.StoreRootOpts, ro *flags.CliRootOpts) *cobra.Command {
|
||||
o := &flags.ExtractOpts{StoreRootOpts: rso}
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: "extract",
|
||||
Short: "Extract content from the store to disk",
|
||||
Short: "Extract artifacts from the content store to disk",
|
||||
Aliases: []string{"x"},
|
||||
Args: cobra.ExactArgs(1),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
@@ -55,17 +58,35 @@ func addStoreExtract() *cobra.Command {
|
||||
return store.ExtractCmd(ctx, o, s, args[0])
|
||||
},
|
||||
}
|
||||
o.AddArgs(cmd)
|
||||
o.AddFlags(cmd)
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
func addStoreSync() *cobra.Command {
|
||||
o := &store.SyncOpts{RootOpts: rootStoreOpts}
|
||||
func addStoreSync(rso *flags.StoreRootOpts, ro *flags.CliRootOpts) *cobra.Command {
|
||||
o := &flags.SyncOpts{StoreRootOpts: rso}
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: "sync",
|
||||
Short: "Sync content to the embedded content store",
|
||||
Short: "Sync content to the content store",
|
||||
Args: cobra.ExactArgs(0),
|
||||
PreRunE: func(cmd *cobra.Command, args []string) error {
|
||||
// warn if products or product-registry flag is used by the user
|
||||
if cmd.Flags().Changed("products") {
|
||||
log.FromContext(cmd.Context()).Warnf("!!! WARNING !!! [--products] will be updating its default registry in a future release.")
|
||||
}
|
||||
if cmd.Flags().Changed("product-registry") {
|
||||
log.FromContext(cmd.Context()).Warnf("!!! WARNING !!! [--product-registry] will be updating its default registry in a future release.")
|
||||
}
|
||||
// check if the products flag was passed
|
||||
if len(o.Products) > 0 {
|
||||
// only clear the default if the user did not explicitly set it
|
||||
if !cmd.Flags().Changed("filename") {
|
||||
o.FileName = []string{}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
},
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
ctx := cmd.Context()
|
||||
|
||||
@@ -74,7 +95,7 @@ func addStoreSync() *cobra.Command {
|
||||
return err
|
||||
}
|
||||
|
||||
return store.SyncCmd(ctx, o, s)
|
||||
return store.SyncCmd(ctx, o, s, rso, ro)
|
||||
},
|
||||
}
|
||||
o.AddFlags(cmd)
|
||||
@@ -82,13 +103,13 @@ func addStoreSync() *cobra.Command {
|
||||
return cmd
|
||||
}
|
||||
|
||||
func addStoreLoad() *cobra.Command {
|
||||
o := &store.LoadOpts{RootOpts: rootStoreOpts}
|
||||
func addStoreLoad(rso *flags.StoreRootOpts, ro *flags.CliRootOpts) *cobra.Command {
|
||||
o := &flags.LoadOpts{StoreRootOpts: rso}
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: "load",
|
||||
Short: "Load a content store from a store archive",
|
||||
Args: cobra.MinimumNArgs(1),
|
||||
Args: cobra.ExactArgs(0),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
ctx := cmd.Context()
|
||||
|
||||
@@ -98,7 +119,7 @@ func addStoreLoad() *cobra.Command {
|
||||
}
|
||||
_ = s
|
||||
|
||||
return store.LoadCmd(ctx, o, args...)
|
||||
return store.LoadCmd(ctx, o, rso, ro)
|
||||
},
|
||||
}
|
||||
o.AddFlags(cmd)
|
||||
@@ -106,12 +127,28 @@ func addStoreLoad() *cobra.Command {
|
||||
return cmd
|
||||
}
|
||||
|
||||
func addStoreServe() *cobra.Command {
|
||||
o := &store.ServeOpts{RootOpts: rootStoreOpts}
|
||||
|
||||
func addStoreServe(rso *flags.StoreRootOpts, ro *flags.CliRootOpts) *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "serve",
|
||||
Short: "Expose the content of a local store through an OCI compliant server",
|
||||
Short: "Serve the content store via an OCI Compliant Registry or Fileserver",
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
return cmd.Help()
|
||||
},
|
||||
}
|
||||
cmd.AddCommand(
|
||||
addStoreServeRegistry(rso, ro),
|
||||
addStoreServeFiles(rso, ro),
|
||||
)
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
func addStoreServeRegistry(rso *flags.StoreRootOpts, ro *flags.CliRootOpts) *cobra.Command {
|
||||
o := &flags.ServeRegistryOpts{StoreRootOpts: rso}
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: "registry",
|
||||
Short: "Serve the OCI Compliant Registry",
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
ctx := cmd.Context()
|
||||
|
||||
@@ -120,16 +157,40 @@ func addStoreServe() *cobra.Command {
|
||||
return err
|
||||
}
|
||||
|
||||
return store.ServeCmd(ctx, o, s)
|
||||
return store.ServeRegistryCmd(ctx, o, s, rso, ro)
|
||||
},
|
||||
}
|
||||
|
||||
o.AddFlags(cmd)
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
func addStoreSave() *cobra.Command {
|
||||
o := &store.SaveOpts{RootOpts: rootStoreOpts}
|
||||
func addStoreServeFiles(rso *flags.StoreRootOpts, ro *flags.CliRootOpts) *cobra.Command {
|
||||
o := &flags.ServeFilesOpts{StoreRootOpts: rso}
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: "fileserver",
|
||||
Short: "Serve the Fileserver",
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
ctx := cmd.Context()
|
||||
|
||||
s, err := o.Store(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return store.ServeFilesCmd(ctx, o, s, ro)
|
||||
},
|
||||
}
|
||||
|
||||
o.AddFlags(cmd)
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
func addStoreSave(rso *flags.StoreRootOpts, ro *flags.CliRootOpts) *cobra.Command {
|
||||
o := &flags.SaveOpts{StoreRootOpts: rso}
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: "save",
|
||||
@@ -144,16 +205,18 @@ func addStoreSave() *cobra.Command {
|
||||
}
|
||||
_ = s
|
||||
|
||||
return store.SaveCmd(ctx, o, o.FileName)
|
||||
return store.SaveCmd(ctx, o, rso, ro)
|
||||
},
|
||||
}
|
||||
o.AddArgs(cmd)
|
||||
o.AddFlags(cmd)
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
func addStoreInfo() *cobra.Command {
|
||||
o := &store.InfoOpts{RootOpts: rootStoreOpts}
|
||||
func addStoreInfo(rso *flags.StoreRootOpts, ro *flags.CliRootOpts) *cobra.Command {
|
||||
o := &flags.InfoOpts{StoreRootOpts: rso}
|
||||
|
||||
var allowedValues = []string{"image", "chart", "file", "sigs", "atts", "sbom", "referrer", "all"}
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: "info",
|
||||
@@ -168,7 +231,12 @@ func addStoreInfo() *cobra.Command {
|
||||
return err
|
||||
}
|
||||
|
||||
return store.InfoCmd(ctx, o, s)
|
||||
for _, allowed := range allowedValues {
|
||||
if o.TypeFilter == allowed {
|
||||
return store.InfoCmd(ctx, o, s)
|
||||
}
|
||||
}
|
||||
return fmt.Errorf("type must be one of %v", allowedValues)
|
||||
},
|
||||
}
|
||||
o.AddFlags(cmd)
|
||||
@@ -176,12 +244,12 @@ func addStoreInfo() *cobra.Command {
|
||||
return cmd
|
||||
}
|
||||
|
||||
func addStoreCopy() *cobra.Command {
|
||||
o := &store.CopyOpts{RootOpts: rootStoreOpts}
|
||||
func addStoreCopy(rso *flags.StoreRootOpts, ro *flags.CliRootOpts) *cobra.Command {
|
||||
o := &flags.CopyOpts{StoreRootOpts: rso}
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: "copy",
|
||||
Short: "Copy all store contents to another OCI registry",
|
||||
Short: "Copy all store content to another location",
|
||||
Args: cobra.ExactArgs(1),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
ctx := cmd.Context()
|
||||
@@ -191,7 +259,7 @@ func addStoreCopy() *cobra.Command {
|
||||
return err
|
||||
}
|
||||
|
||||
return store.CopyCmd(ctx, o, s, args[0])
|
||||
return store.CopyCmd(ctx, o, s, args[0], ro)
|
||||
},
|
||||
}
|
||||
o.AddFlags(cmd)
|
||||
@@ -199,31 +267,39 @@ func addStoreCopy() *cobra.Command {
|
||||
return cmd
|
||||
}
|
||||
|
||||
func addStoreAdd() *cobra.Command {
|
||||
func addStoreAdd(rso *flags.StoreRootOpts, ro *flags.CliRootOpts) *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "add",
|
||||
Short: "Add content to store",
|
||||
Short: "Add content to the store",
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
return cmd.Help()
|
||||
},
|
||||
}
|
||||
|
||||
cmd.AddCommand(
|
||||
addStoreAddFile(),
|
||||
addStoreAddImage(),
|
||||
addStoreAddChart(),
|
||||
addStoreAddFile(rso, ro),
|
||||
addStoreAddImage(rso, ro),
|
||||
addStoreAddChart(rso, ro),
|
||||
)
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
func addStoreAddFile() *cobra.Command {
|
||||
o := &store.AddFileOpts{RootOpts: rootStoreOpts}
|
||||
func addStoreAddFile(rso *flags.StoreRootOpts, ro *flags.CliRootOpts) *cobra.Command {
|
||||
o := &flags.AddFileOpts{StoreRootOpts: rso}
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: "file",
|
||||
Short: "Add a file to the content store",
|
||||
Args: cobra.ExactArgs(1),
|
||||
Short: "Add a file to the store",
|
||||
Example: `# fetch local file
|
||||
hauler store add file file.txt
|
||||
|
||||
# fetch remote file
|
||||
hauler store add file https://get.rke2.io/install.sh
|
||||
|
||||
# fetch remote file and assign new name
|
||||
hauler store add file https://get.hauler.dev --name hauler-install.sh`,
|
||||
Args: cobra.ExactArgs(1),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
ctx := cmd.Context()
|
||||
|
||||
@@ -240,51 +316,30 @@ func addStoreAddFile() *cobra.Command {
|
||||
return cmd
|
||||
}
|
||||
|
||||
func addStoreAddImage() *cobra.Command {
|
||||
o := &store.AddImageOpts{RootOpts: rootStoreOpts}
|
||||
func addStoreAddImage(rso *flags.StoreRootOpts, ro *flags.CliRootOpts) *cobra.Command {
|
||||
o := &flags.AddImageOpts{StoreRootOpts: rso}
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: "image",
|
||||
Short: "Add an image to the content store",
|
||||
Args: cobra.ExactArgs(1),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
ctx := cmd.Context()
|
||||
Short: "Add a image to the store",
|
||||
Example: `# fetch image
|
||||
hauler store add image busybox
|
||||
|
||||
s, err := o.Store(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
# fetch image with repository and tag
|
||||
hauler store add image library/busybox:stable
|
||||
|
||||
return store.AddImageCmd(ctx, o, s, args[0])
|
||||
},
|
||||
}
|
||||
o.AddFlags(cmd)
|
||||
# fetch image with full image reference and specific platform
|
||||
hauler store add image ghcr.io/hauler-dev/hauler-debug:v1.2.0 --platform linux/amd64
|
||||
|
||||
return cmd
|
||||
}
|
||||
# fetch image with full image reference via digest
|
||||
hauler store add image gcr.io/distroless/base@sha256:7fa7445dfbebae4f4b7ab0e6ef99276e96075ae42584af6286ba080750d6dfe5
|
||||
|
||||
func addStoreAddChart() *cobra.Command {
|
||||
o := &store.AddChartOpts{
|
||||
RootOpts: rootStoreOpts,
|
||||
ChartOpts: &action.ChartPathOptions{},
|
||||
}
|
||||
# fetch image with full image reference, specific platform, and signature verification
|
||||
curl -sfOL https://raw.githubusercontent.com/rancherfederal/carbide-releases/main/carbide-key.pub
|
||||
hauler store add image rgcrprod.azurecr.us/rancher/rke2-runtime:v1.31.5-rke2r1 --platform linux/amd64 --key carbide-key.pub
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: "chart",
|
||||
Short: "Add a local or remote chart to the content store",
|
||||
Example: `
|
||||
# add a local chart
|
||||
hauler store add chart path/to/chart/directory
|
||||
|
||||
# add a local compressed chart
|
||||
hauler store add chart path/to/chart.tar.gz
|
||||
|
||||
# add a remote chart
|
||||
hauler store add chart longhorn --repo "https://charts.longhorn.io"
|
||||
|
||||
# add a specific version of a chart
|
||||
hauler store add chart rancher --repo "https://releases.rancher.com/server-charts/latest" --version "2.6.2"
|
||||
`,
|
||||
# fetch image and rewrite path
|
||||
hauler store add image busybox --rewrite custom-path/busybox:latest`,
|
||||
Args: cobra.ExactArgs(1),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
ctx := cmd.Context()
|
||||
@@ -294,7 +349,92 @@ hauler store add chart rancher --repo "https://releases.rancher.com/server-chart
|
||||
return err
|
||||
}
|
||||
|
||||
return store.AddChartCmd(ctx, o, s, args[0])
|
||||
return store.AddImageCmd(ctx, o, s, args[0], rso, ro)
|
||||
},
|
||||
}
|
||||
o.AddFlags(cmd)
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
func addStoreAddChart(rso *flags.StoreRootOpts, ro *flags.CliRootOpts) *cobra.Command {
|
||||
o := &flags.AddChartOpts{StoreRootOpts: rso, ChartOpts: &action.ChartPathOptions{}}
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: "chart",
|
||||
Short: "Add a helm chart to the store",
|
||||
Example: `# fetch local helm chart
|
||||
hauler store add chart path/to/chart/directory --repo .
|
||||
|
||||
# fetch local compressed helm chart
|
||||
hauler store add chart path/to/chart.tar.gz --repo .
|
||||
|
||||
# fetch remote oci helm chart
|
||||
hauler store add chart hauler-helm --repo oci://ghcr.io/hauler-dev
|
||||
|
||||
# fetch remote oci helm chart with version
|
||||
hauler store add chart hauler-helm --repo oci://ghcr.io/hauler-dev --version 1.2.0
|
||||
|
||||
# fetch remote helm chart
|
||||
hauler store add chart rancher --repo https://releases.rancher.com/server-charts/stable
|
||||
|
||||
# fetch remote helm chart with specific version
|
||||
hauler store add chart rancher --repo https://releases.rancher.com/server-charts/latest --version 2.10.1
|
||||
|
||||
# fetch remote helm chart and rewrite path
|
||||
hauler store add chart hauler-helm --repo oci://ghcr.io/hauler-dev --rewrite custom-path/hauler-chart:latest`,
|
||||
Args: cobra.ExactArgs(1),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
ctx := cmd.Context()
|
||||
|
||||
s, err := o.Store(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return store.AddChartCmd(ctx, o, s, args[0], rso, ro)
|
||||
},
|
||||
}
|
||||
o.AddFlags(cmd)
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
func addStoreRemove(rso *flags.StoreRootOpts, ro *flags.CliRootOpts) *cobra.Command {
|
||||
o := &flags.RemoveOpts{}
|
||||
cmd := &cobra.Command{
|
||||
Use: "remove <artifact-ref>",
|
||||
Short: "(EXPERIMENTAL) Remove an artifact from the content store",
|
||||
Example: `# remove an image using full store reference
|
||||
hauler store info
|
||||
hauler store remove index.docker.io/library/busybox:stable
|
||||
|
||||
# remove a chart using full store reference
|
||||
hauler store info
|
||||
hauler store remove hauler/rancher:2.8.4
|
||||
|
||||
# remove a file using full store reference
|
||||
hauler store info
|
||||
hauler store remove hauler/rke2-install.sh
|
||||
|
||||
# remove any artifact with the latest tag
|
||||
hauler store remove :latest
|
||||
|
||||
# remove any artifact with 'busybox' in the reference
|
||||
hauler store remove busybox
|
||||
|
||||
# force remove without verification
|
||||
hauler store remove busybox:latest --force`,
|
||||
Args: cobra.ExactArgs(1),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
ctx := cmd.Context()
|
||||
|
||||
s, err := rso.Store(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return store.RemoveCmd(ctx, o, s, args[0])
|
||||
},
|
||||
}
|
||||
o.AddFlags(cmd)
|
||||
|
||||
@@ -2,42 +2,44 @@ package store
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"slices"
|
||||
"strings"
|
||||
|
||||
"github.com/google/go-containerregistry/pkg/name"
|
||||
"github.com/rancherfederal/hauler/pkg/artifacts/file/getter"
|
||||
"github.com/spf13/cobra"
|
||||
"helm.sh/helm/v3/pkg/action"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
helmchart "helm.sh/helm/v3/pkg/chart"
|
||||
"helm.sh/helm/v3/pkg/chartutil"
|
||||
"helm.sh/helm/v3/pkg/engine"
|
||||
"k8s.io/apimachinery/pkg/util/yaml"
|
||||
|
||||
"github.com/rancherfederal/hauler/pkg/artifacts/file"
|
||||
|
||||
"github.com/rancherfederal/hauler/pkg/store"
|
||||
|
||||
"github.com/rancherfederal/hauler/pkg/apis/hauler.cattle.io/v1alpha1"
|
||||
"github.com/rancherfederal/hauler/pkg/content/chart"
|
||||
"github.com/rancherfederal/hauler/pkg/cosign"
|
||||
"github.com/rancherfederal/hauler/pkg/log"
|
||||
"github.com/rancherfederal/hauler/pkg/reference"
|
||||
"hauler.dev/go/hauler/internal/flags"
|
||||
v1 "hauler.dev/go/hauler/pkg/apis/hauler.cattle.io/v1"
|
||||
"hauler.dev/go/hauler/pkg/artifacts/file"
|
||||
"hauler.dev/go/hauler/pkg/consts"
|
||||
"hauler.dev/go/hauler/pkg/content/chart"
|
||||
"hauler.dev/go/hauler/pkg/cosign"
|
||||
"hauler.dev/go/hauler/pkg/getter"
|
||||
"hauler.dev/go/hauler/pkg/log"
|
||||
"hauler.dev/go/hauler/pkg/reference"
|
||||
"hauler.dev/go/hauler/pkg/retry"
|
||||
"hauler.dev/go/hauler/pkg/store"
|
||||
)
|
||||
|
||||
type AddFileOpts struct {
|
||||
*RootOpts
|
||||
Name string
|
||||
}
|
||||
|
||||
func (o *AddFileOpts) AddFlags(cmd *cobra.Command) {
|
||||
f := cmd.Flags()
|
||||
f.StringVarP(&o.Name, "name", "n", "", "(Optional) Name to assign to file in store")
|
||||
}
|
||||
|
||||
func AddFileCmd(ctx context.Context, o *AddFileOpts, s *store.Layout, reference string) error {
|
||||
cfg := v1alpha1.File{
|
||||
func AddFileCmd(ctx context.Context, o *flags.AddFileOpts, s *store.Layout, reference string) error {
|
||||
cfg := v1.File{
|
||||
Path: reference,
|
||||
}
|
||||
|
||||
if len(o.Name) > 0 {
|
||||
cfg.Name = o.Name
|
||||
}
|
||||
return storeFile(ctx, s, cfg)
|
||||
}
|
||||
|
||||
func storeFile(ctx context.Context, s *store.Layout, fi v1alpha1.File) error {
|
||||
func storeFile(ctx context.Context, s *store.Layout, fi v1.File) error {
|
||||
l := log.FromContext(ctx)
|
||||
|
||||
copts := getter.ClientOptions{
|
||||
@@ -45,107 +47,320 @@ func storeFile(ctx context.Context, s *store.Layout, fi v1alpha1.File) error {
|
||||
}
|
||||
|
||||
f := file.NewFile(fi.Path, file.WithClient(getter.NewClient(copts)))
|
||||
ref, err := reference.NewTagged(f.Name(fi.Path), reference.DefaultTag)
|
||||
ref, err := reference.NewTagged(f.Name(fi.Path), consts.DefaultTag)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
desc, err := s.AddOCI(ctx, f, ref.Name())
|
||||
l.Infof("adding file [%s] to the store as [%s]", fi.Path, ref.Name())
|
||||
_, err = s.AddArtifact(ctx, f, ref.Name())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
l.Infof("added 'file' to store at [%s], with digest [%s]", ref.Name(), desc.Digest.String())
|
||||
l.Infof("successfully added file [%s]", ref.Name())
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type AddImageOpts struct {
|
||||
*RootOpts
|
||||
Name string
|
||||
Key string
|
||||
}
|
||||
|
||||
func (o *AddImageOpts) AddFlags(cmd *cobra.Command) {
|
||||
f := cmd.Flags()
|
||||
f.StringVarP(&o.Key, "key", "k", "", "(Optional) Path to the key for digital signature verification")
|
||||
}
|
||||
|
||||
func AddImageCmd(ctx context.Context, o *AddImageOpts, s *store.Layout, reference string) error {
|
||||
func AddImageCmd(ctx context.Context, o *flags.AddImageOpts, s *store.Layout, reference string, rso *flags.StoreRootOpts, ro *flags.CliRootOpts) error {
|
||||
l := log.FromContext(ctx)
|
||||
cfg := v1alpha1.Image{
|
||||
Name: reference,
|
||||
|
||||
cfg := v1.Image{
|
||||
Name: reference,
|
||||
Rewrite: o.Rewrite,
|
||||
}
|
||||
|
||||
// Check if the user provided a key.
|
||||
if o.Key != "" {
|
||||
// verify signature using the provided key.
|
||||
err := cosign.VerifySignature(ctx, s, o.Key, cfg.Name)
|
||||
err := cosign.VerifySignature(ctx, o.Key, o.Tlog, cfg.Name, rso, ro)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
l.Infof("signature verified for image [%s]", cfg.Name)
|
||||
} else if o.CertIdentityRegexp != "" || o.CertIdentity != "" {
|
||||
// verify signature using keyless details.
|
||||
// Keyless (Fulcio) certificates expire after ~10 minutes, so the transparency
|
||||
// log is always required to prove the cert was valid at signing time — ignore
|
||||
// --use-tlog-verify for this path and always check tlog.
|
||||
l.Infof("verifying keyless signature for [%s]", cfg.Name)
|
||||
err := cosign.VerifyKeylessSignature(ctx, o.CertIdentity, o.CertIdentityRegexp, o.CertOidcIssuer, o.CertOidcIssuerRegexp, o.CertGithubWorkflowRepository, cfg.Name, rso, ro)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
l.Infof("keyless signature verified for image [%s]", cfg.Name)
|
||||
}
|
||||
|
||||
return storeImage(ctx, s, cfg)
|
||||
return storeImage(ctx, s, cfg, o.Platform, rso, ro, o.Rewrite)
|
||||
}
|
||||
|
||||
func storeImage(ctx context.Context, s *store.Layout, i v1alpha1.Image) error {
|
||||
func storeImage(ctx context.Context, s *store.Layout, i v1.Image, platform string, rso *flags.StoreRootOpts, ro *flags.CliRootOpts, rewrite string) error {
|
||||
l := log.FromContext(ctx)
|
||||
|
||||
if !ro.IgnoreErrors {
|
||||
envVar := os.Getenv(consts.HaulerIgnoreErrors)
|
||||
if envVar == "true" {
|
||||
ro.IgnoreErrors = true
|
||||
}
|
||||
}
|
||||
|
||||
l.Infof("adding image [%s] to the store", i.Name)
|
||||
|
||||
r, err := name.ParseReference(i.Name)
|
||||
if err != nil {
|
||||
return err
|
||||
if ro.IgnoreErrors {
|
||||
l.Warnf("unable to parse image [%s]: %v... skipping...", i.Name, err)
|
||||
return nil
|
||||
} else {
|
||||
l.Errorf("unable to parse image [%s]: %v", i.Name, err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
err = cosign.SaveImage(ctx, s, r.Name())
|
||||
//desc, err := s.AddOCI(ctx, img, r.Name())
|
||||
// fetch image along with any associated signatures and attestations
|
||||
err = retry.Operation(ctx, rso, ro, func() error {
|
||||
return s.AddImage(ctx, r.Name(), platform)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
if ro.IgnoreErrors {
|
||||
l.Warnf("unable to add image [%s] to store: %v... skipping...", r.Name(), err)
|
||||
return nil
|
||||
} else {
|
||||
l.Errorf("unable to add image [%s] to store: %v", r.Name(), err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
l.Infof("added 'image' to store at [%s]", r.Name())
|
||||
if rewrite != "" {
|
||||
rewrite = strings.TrimPrefix(rewrite, "/")
|
||||
if !strings.Contains(rewrite, ":") {
|
||||
if tag, ok := r.(name.Tag); ok {
|
||||
rewrite = rewrite + ":" + tag.TagStr()
|
||||
} else {
|
||||
return fmt.Errorf("cannot rewrite digest reference [%s] without an explicit tag in the rewrite", r.Name())
|
||||
}
|
||||
}
|
||||
// rename image name in store
|
||||
newRef, err := name.ParseReference(rewrite)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to parse rewrite name [%s]: %w", rewrite, err)
|
||||
}
|
||||
if err := rewriteReference(ctx, s, r, newRef); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
l.Infof("successfully added image [%s]", r.Name())
|
||||
return nil
|
||||
}
|
||||
|
||||
type AddChartOpts struct {
|
||||
*RootOpts
|
||||
func rewriteReference(ctx context.Context, s *store.Layout, oldRef name.Reference, newRef name.Reference) error {
|
||||
l := log.FromContext(ctx)
|
||||
|
||||
if err := s.OCI.LoadIndex(); err != nil {
|
||||
return fmt.Errorf("failed to load index: %w", err)
|
||||
}
|
||||
|
||||
//TODO: improve string manipulation
|
||||
oldRefContext := oldRef.Context()
|
||||
newRefContext := newRef.Context()
|
||||
oldRepo := oldRefContext.RepositoryStr()
|
||||
newRepo := newRefContext.RepositoryStr()
|
||||
|
||||
oldTag := oldRef.Identifier()
|
||||
if tag, ok := oldRef.(name.Tag); ok {
|
||||
oldTag = tag.TagStr()
|
||||
}
|
||||
newTag := newRef.Identifier()
|
||||
if tag, ok := newRef.(name.Tag); ok {
|
||||
newTag = tag.TagStr()
|
||||
}
|
||||
|
||||
// ContainerdImageNameKey stores annotationRef.Name() verbatim, which includes the
|
||||
// "index.docker.io" prefix for docker.io images. Do not strip "index." here or the
|
||||
// comparison will never match images stored by writeImage/writeIndex.
|
||||
oldRegistry := oldRefContext.RegistryStr()
|
||||
newRegistry := newRefContext.RegistryStr()
|
||||
// If user omitted a registry in the rewrite string, go-containerregistry defaults to
|
||||
// index.docker.io. Preserve the original registry when the source is non-docker.
|
||||
if newRegistry == "index.docker.io" && oldRegistry != "index.docker.io" {
|
||||
newRegistry = oldRegistry
|
||||
}
|
||||
oldTotal := oldRepo + ":" + oldTag
|
||||
newTotal := newRepo + ":" + newTag
|
||||
oldTotalReg := oldRegistry + "/" + oldTotal
|
||||
newTotalReg := newRegistry + "/" + newTotal
|
||||
|
||||
l.Infof("rewriting [%s] to [%s]", oldTotalReg, newTotalReg)
|
||||
|
||||
//find and update reference
|
||||
found := false
|
||||
if err := s.OCI.Walk(func(k string, d ocispec.Descriptor) error {
|
||||
if d.Annotations[ocispec.AnnotationRefName] == oldTotal && d.Annotations[consts.ContainerdImageNameKey] == oldTotalReg {
|
||||
d.Annotations[ocispec.AnnotationRefName] = newTotal
|
||||
d.Annotations[consts.ContainerdImageNameKey] = newTotalReg
|
||||
found = true
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !found {
|
||||
return fmt.Errorf("could not find image [%s] in store", oldRef.Name())
|
||||
}
|
||||
|
||||
return s.OCI.SaveIndex()
|
||||
|
||||
ChartOpts *action.ChartPathOptions
|
||||
}
|
||||
|
||||
func (o *AddChartOpts) AddFlags(cmd *cobra.Command) {
|
||||
f := cmd.Flags()
|
||||
|
||||
f.StringVar(&o.ChartOpts.RepoURL, "repo", "", "chart repository url where to locate the requested chart")
|
||||
f.StringVar(&o.ChartOpts.Version, "version", "", "specify a version constraint for the chart version to use. This constraint can be a specific tag (e.g. 1.1.1) or it may reference a valid range (e.g. ^2.0.0). If this is not specified, the latest version is used")
|
||||
f.BoolVar(&o.ChartOpts.Verify, "verify", false, "verify the package before using it")
|
||||
f.StringVar(&o.ChartOpts.Username, "username", "", "chart repository username where to locate the requested chart")
|
||||
f.StringVar(&o.ChartOpts.Password, "password", "", "chart repository password where to locate the requested chart")
|
||||
f.StringVar(&o.ChartOpts.CertFile, "cert-file", "", "identify HTTPS client using this SSL certificate file")
|
||||
f.StringVar(&o.ChartOpts.KeyFile, "key-file", "", "identify HTTPS client using this SSL key file")
|
||||
f.BoolVar(&o.ChartOpts.InsecureSkipTLSverify, "insecure-skip-tls-verify", false, "skip tls certificate checks for the chart download")
|
||||
f.StringVar(&o.ChartOpts.CaFile, "ca-file", "", "verify certificates of HTTPS-enabled servers using this CA bundle")
|
||||
}
|
||||
|
||||
func AddChartCmd(ctx context.Context, o *AddChartOpts, s *store.Layout, chartName string) error {
|
||||
// TODO: Reduce duplicates between api chart and upstream helm opts
|
||||
cfg := v1alpha1.Chart{
|
||||
func AddChartCmd(ctx context.Context, o *flags.AddChartOpts, s *store.Layout, chartName string, rso *flags.StoreRootOpts, ro *flags.CliRootOpts) error {
|
||||
cfg := v1.Chart{
|
||||
Name: chartName,
|
||||
RepoURL: o.ChartOpts.RepoURL,
|
||||
Version: o.ChartOpts.Version,
|
||||
}
|
||||
|
||||
return storeChart(ctx, s, cfg, o.ChartOpts)
|
||||
rewrite := ""
|
||||
if o.Rewrite != "" {
|
||||
rewrite = o.Rewrite
|
||||
}
|
||||
return storeChart(ctx, s, cfg, o, rso, ro, rewrite)
|
||||
}
|
||||
|
||||
func storeChart(ctx context.Context, s *store.Layout, cfg v1alpha1.Chart, opts *action.ChartPathOptions) error {
|
||||
// unexported type for the context key to avoid collisions
|
||||
type isSubchartKey struct{}
|
||||
|
||||
// imageregex parses image references starting with "image:" and with optional spaces or optional quotes
|
||||
var imageRegex = regexp.MustCompile(`(?m)^[ \t]*image:[ \t]*['"]?([^\s'"#]+)`)
|
||||
|
||||
// helmAnnotatedImage parses images references from helm chart annotations
|
||||
type helmAnnotatedImage struct {
|
||||
Image string `yaml:"image"`
|
||||
Name string `yaml:"name,omitempty"`
|
||||
}
|
||||
|
||||
// imagesFromChartAnnotations parses image references from helm chart annotations
|
||||
func imagesFromChartAnnotations(c *helmchart.Chart) ([]string, error) {
|
||||
if c == nil || c.Metadata == nil || c.Metadata.Annotations == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// support multiple annotations
|
||||
keys := []string{
|
||||
"helm.sh/images",
|
||||
"images",
|
||||
}
|
||||
|
||||
var out []string
|
||||
for _, k := range keys {
|
||||
raw, ok := c.Metadata.Annotations[k]
|
||||
if !ok || strings.TrimSpace(raw) == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
var items []helmAnnotatedImage
|
||||
if err := yaml.Unmarshal([]byte(raw), &items); err != nil {
|
||||
return nil, fmt.Errorf("failed to parse helm chart annotation %q: %w", k, err)
|
||||
}
|
||||
|
||||
for _, it := range items {
|
||||
img := strings.TrimSpace(it.Image)
|
||||
if img == "" {
|
||||
continue
|
||||
}
|
||||
img = strings.TrimPrefix(img, "/")
|
||||
out = append(out, img)
|
||||
}
|
||||
}
|
||||
|
||||
slices.Sort(out)
|
||||
out = slices.Compact(out)
|
||||
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// imagesFromImagesLock parses image references from images lock files in the chart directory
|
||||
func imagesFromImagesLock(chartDir string) ([]string, error) {
|
||||
var out []string
|
||||
|
||||
for _, name := range []string{
|
||||
"images.lock",
|
||||
"images-lock.yaml",
|
||||
"images.lock.yaml",
|
||||
".images.lock.yaml",
|
||||
} {
|
||||
p := filepath.Join(chartDir, name)
|
||||
b, err := os.ReadFile(p)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
matches := imageRegex.FindAllSubmatch(b, -1)
|
||||
for _, m := range matches {
|
||||
if len(m) > 1 {
|
||||
out = append(out, string(m[1]))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(out) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
for i := range out {
|
||||
out[i] = strings.TrimPrefix(out[i], "/")
|
||||
}
|
||||
slices.Sort(out)
|
||||
out = slices.Compact(out)
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func applyDefaultRegistry(img string, defaultRegistry string) (string, error) {
|
||||
img = strings.TrimSpace(strings.TrimPrefix(img, "/"))
|
||||
if img == "" || defaultRegistry == "" {
|
||||
return img, nil
|
||||
}
|
||||
|
||||
ref, err := reference.Parse(img)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if ref.Context().RegistryStr() != "" {
|
||||
return img, nil
|
||||
}
|
||||
|
||||
newRef, err := reference.Relocate(img, defaultRegistry)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return newRef.Name(), nil
|
||||
}
|
||||
|
||||
func storeChart(ctx context.Context, s *store.Layout, cfg v1.Chart, opts *flags.AddChartOpts, rso *flags.StoreRootOpts, ro *flags.CliRootOpts, rewrite string) error {
|
||||
l := log.FromContext(ctx)
|
||||
|
||||
// TODO: This shouldn't be necessary
|
||||
opts.RepoURL = cfg.RepoURL
|
||||
opts.Version = cfg.Version
|
||||
// subchart logging prefix
|
||||
isSubchart := ctx.Value(isSubchartKey{}) == true
|
||||
prefix := ""
|
||||
if isSubchart {
|
||||
prefix = " ↳ "
|
||||
}
|
||||
|
||||
chrt, err := chart.NewChart(cfg.Name, opts)
|
||||
// normalize chart name for logging
|
||||
displayName := cfg.Name
|
||||
if strings.Contains(cfg.Name, string(os.PathSeparator)) {
|
||||
displayName = filepath.Base(cfg.Name)
|
||||
}
|
||||
l.Infof("%sadding chart [%s] to the store", prefix, displayName)
|
||||
|
||||
opts.ChartOpts.RepoURL = cfg.RepoURL
|
||||
opts.ChartOpts.Version = cfg.Version
|
||||
|
||||
chrt, err := chart.NewChart(cfg.Name, opts.ChartOpts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -159,11 +374,267 @@ func storeChart(ctx context.Context, s *store.Layout, cfg v1alpha1.Chart, opts *
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
desc, err := s.AddOCI(ctx, chrt, ref.Name())
|
||||
if err != nil {
|
||||
|
||||
if _, err := s.AddArtifact(ctx, chrt, ref.Name()); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.OCI.SaveIndex(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
l.Infof("added 'chart' to store at [%s], with digest [%s]", ref.Name(), desc.Digest.String())
|
||||
l.Infof("%ssuccessfully added chart [%s:%s]", prefix, c.Name(), c.Metadata.Version)
|
||||
|
||||
tempOverride := rso.TempOverride
|
||||
if tempOverride == "" {
|
||||
tempOverride = os.Getenv(consts.HaulerTempDir)
|
||||
}
|
||||
tempDir, err := os.MkdirTemp(tempOverride, consts.DefaultHaulerTempDirName)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create temp dir: %w", err)
|
||||
}
|
||||
defer os.RemoveAll(tempDir)
|
||||
|
||||
chartPath := chrt.Path()
|
||||
if strings.HasSuffix(chartPath, ".tgz") {
|
||||
l.Debugf("%sextracting chart archive [%s]", prefix, filepath.Base(chartPath))
|
||||
if err := chartutil.ExpandFile(tempDir, chartPath); err != nil {
|
||||
return fmt.Errorf("failed to extract chart: %w", err)
|
||||
}
|
||||
|
||||
// expanded chart should be in a directory matching the chart name
|
||||
expectedChartDir := filepath.Join(tempDir, c.Name())
|
||||
if _, err := os.Stat(expectedChartDir); err != nil {
|
||||
return fmt.Errorf("chart archive did not expand into expected directory '%s': %w", c.Name(), err)
|
||||
}
|
||||
chartPath = expectedChartDir
|
||||
}
|
||||
|
||||
// add-images
|
||||
if opts.AddImages {
|
||||
userValues := chartutil.Values{}
|
||||
if opts.HelmValues != "" {
|
||||
userValues, err = chartutil.ReadValuesFile(opts.HelmValues)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to read helm values file [%s]: %w", opts.HelmValues, err)
|
||||
}
|
||||
}
|
||||
|
||||
// set helm default capabilities
|
||||
caps := chartutil.DefaultCapabilities.Copy()
|
||||
|
||||
// only parse and override if provided kube version
|
||||
if opts.KubeVersion != "" {
|
||||
kubeVersion, err := chartutil.ParseKubeVersion(opts.KubeVersion)
|
||||
if err != nil {
|
||||
l.Warnf("%sinvalid kube-version [%s], using default kubernetes version", prefix, opts.KubeVersion)
|
||||
} else {
|
||||
caps.KubeVersion = *kubeVersion
|
||||
}
|
||||
}
|
||||
|
||||
values, err := chartutil.ToRenderValues(c, userValues, chartutil.ReleaseOptions{Namespace: "hauler"}, caps)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// helper for normalization and deduping slices
|
||||
normalizeUniq := func(in []string) []string {
|
||||
if len(in) == 0 {
|
||||
return nil
|
||||
}
|
||||
for i := range in {
|
||||
in[i] = strings.TrimPrefix(in[i], "/")
|
||||
}
|
||||
slices.Sort(in)
|
||||
return slices.Compact(in)
|
||||
}
|
||||
|
||||
// Collect images by method so we can debug counts
|
||||
var (
|
||||
templateImages []string
|
||||
annotationImages []string
|
||||
lockImages []string
|
||||
)
|
||||
|
||||
// parse helm chart templates and values for images
|
||||
rendered, err := engine.Render(c, values)
|
||||
if err != nil {
|
||||
// charts may fail due to values so still try helm chart annotations and lock
|
||||
l.Warnf("%sfailed to render chart [%s]: %v", prefix, c.Name(), err)
|
||||
rendered = map[string]string{}
|
||||
}
|
||||
|
||||
for _, manifest := range rendered {
|
||||
matches := imageRegex.FindAllStringSubmatch(manifest, -1)
|
||||
for _, match := range matches {
|
||||
if len(match) > 1 {
|
||||
templateImages = append(templateImages, match[1])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// parse helm chart annotations for images
|
||||
annotationImages, err = imagesFromChartAnnotations(c)
|
||||
if err != nil {
|
||||
l.Warnf("%sfailed to parse helm chart annotation for [%s:%s]: %v", prefix, c.Name(), c.Metadata.Version, err)
|
||||
annotationImages = nil
|
||||
}
|
||||
|
||||
// parse images lock files for images
|
||||
lockImages, err = imagesFromImagesLock(chartPath)
|
||||
if err != nil {
|
||||
l.Warnf("%sfailed to parse images lock: %v", prefix, err)
|
||||
lockImages = nil
|
||||
}
|
||||
|
||||
// normalization and deduping the slices
|
||||
templateImages = normalizeUniq(templateImages)
|
||||
annotationImages = normalizeUniq(annotationImages)
|
||||
lockImages = normalizeUniq(lockImages)
|
||||
|
||||
// merge all sources then final dedupe
|
||||
images := append(append(templateImages, annotationImages...), lockImages...)
|
||||
images = normalizeUniq(images)
|
||||
|
||||
l.Debugf("%simage references identified for helm template: [%d] image(s)", prefix, len(templateImages))
|
||||
|
||||
l.Debugf("%simage references identified for helm chart annotations: [%d] image(s)", prefix, len(annotationImages))
|
||||
|
||||
l.Debugf("%simage references identified for helm image lock file: [%d] image(s)", prefix, len(lockImages))
|
||||
l.Debugf("%ssuccessfully parsed and deduped image references: [%d] image(s)", prefix, len(images))
|
||||
|
||||
l.Debugf("%ssuccessfully parsed image references %v", prefix, images)
|
||||
|
||||
if len(images) > 0 {
|
||||
l.Infof("%s ↳ identified [%d] image(s) in [%s:%s]", prefix, len(images), c.Name(), c.Metadata.Version)
|
||||
}
|
||||
|
||||
for _, image := range images {
|
||||
image, err := applyDefaultRegistry(image, opts.Registry)
|
||||
if err != nil {
|
||||
if ro.IgnoreErrors {
|
||||
l.Warnf("%s ↳ unable to apply registry to image [%s]: %v... skipping...", prefix, image, err)
|
||||
continue
|
||||
}
|
||||
return fmt.Errorf("unable to apply registry to image [%s]: %w", image, err)
|
||||
}
|
||||
|
||||
imgCfg := v1.Image{Name: image}
|
||||
if err := storeImage(ctx, s, imgCfg, opts.Platform, rso, ro, ""); err != nil {
|
||||
if ro.IgnoreErrors {
|
||||
l.Warnf("%s ↳ failed to store image [%s]: %v... skipping...", prefix, image, err)
|
||||
continue
|
||||
}
|
||||
return fmt.Errorf("failed to store image [%s]: %w", image, err)
|
||||
}
|
||||
if err := s.OCI.LoadIndex(); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.OCI.SaveIndex(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// add-dependencies
|
||||
if opts.AddDependencies && len(c.Metadata.Dependencies) > 0 {
|
||||
for _, dep := range c.Metadata.Dependencies {
|
||||
l.Infof("%sadding dependent chart [%s:%s]", prefix, dep.Name, dep.Version)
|
||||
|
||||
depOpts := *opts
|
||||
depOpts.AddDependencies = true
|
||||
depOpts.AddImages = true
|
||||
subCtx := context.WithValue(ctx, isSubchartKey{}, true)
|
||||
|
||||
var depCfg v1.Chart
|
||||
var err error
|
||||
|
||||
if strings.HasPrefix(dep.Repository, "file://") || dep.Repository == "" {
|
||||
subchartPath := filepath.Join(chartPath, "charts", dep.Name)
|
||||
|
||||
depCfg = v1.Chart{Name: subchartPath, RepoURL: "", Version: ""}
|
||||
depOpts.ChartOpts.RepoURL = ""
|
||||
depOpts.ChartOpts.Version = ""
|
||||
|
||||
err = storeChart(subCtx, s, depCfg, &depOpts, rso, ro, "")
|
||||
} else {
|
||||
depCfg = v1.Chart{Name: dep.Name, RepoURL: dep.Repository, Version: dep.Version}
|
||||
depOpts.ChartOpts.RepoURL = dep.Repository
|
||||
depOpts.ChartOpts.Version = dep.Version
|
||||
|
||||
err = storeChart(subCtx, s, depCfg, &depOpts, rso, ro, "")
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
if ro.IgnoreErrors {
|
||||
l.Warnf("%s ↳ failed to add dependent chart [%s]: %v... skipping...", prefix, dep.Name, err)
|
||||
} else {
|
||||
l.Errorf("%s ↳ failed to add dependent chart [%s]: %v", prefix, dep.Name, err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// chart rewrite functionality
|
||||
if rewrite != "" {
|
||||
rewrite = strings.TrimPrefix(rewrite, "/")
|
||||
newRef, err := name.ParseReference(rewrite)
|
||||
if err != nil {
|
||||
// error... don't continue with a bad reference
|
||||
return fmt.Errorf("unable to parse rewrite name [%s]: %w", rewrite, err)
|
||||
}
|
||||
|
||||
// if rewrite omits a tag... keep the existing tag
|
||||
oldTag := ref.Identifier()
|
||||
if tag, ok := ref.(name.Tag); ok {
|
||||
oldTag = tag.TagStr()
|
||||
}
|
||||
if !strings.Contains(rewrite, ":") {
|
||||
rewrite = strings.Join([]string{rewrite, oldTag}, ":")
|
||||
newRef, err = name.ParseReference(rewrite)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to parse rewrite name [%s]: %w", rewrite, err)
|
||||
}
|
||||
}
|
||||
|
||||
// rename chart name in store
|
||||
if err := s.OCI.LoadIndex(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
oldRefContext := ref.Context()
|
||||
newRefContext := newRef.Context()
|
||||
|
||||
oldRepo := oldRefContext.RepositoryStr()
|
||||
newRepo := newRefContext.RepositoryStr()
|
||||
newTag := newRef.Identifier()
|
||||
if tag, ok := newRef.(name.Tag); ok {
|
||||
newTag = tag.TagStr()
|
||||
}
|
||||
|
||||
oldTotal := oldRepo + ":" + oldTag
|
||||
newTotal := newRepo + ":" + newTag
|
||||
|
||||
found := false
|
||||
if err := s.OCI.Walk(func(k string, d ocispec.Descriptor) error {
|
||||
if d.Annotations[ocispec.AnnotationRefName] == oldTotal {
|
||||
d.Annotations[ocispec.AnnotationRefName] = newTotal
|
||||
found = true
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !found {
|
||||
return fmt.Errorf("could not find chart [%s] in store", ref.Name())
|
||||
}
|
||||
|
||||
if err := s.OCI.SaveIndex(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
561
cmd/hauler/cli/store/add_test.go
Normal file
561
cmd/hauler/cli/store/add_test.go
Normal file
@@ -0,0 +1,561 @@
|
||||
package store
|
||||
|
||||
import (
|
||||
"net"
|
||||
"net/http/httptest"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/google/go-containerregistry/pkg/name"
|
||||
"github.com/google/go-containerregistry/pkg/registry"
|
||||
"github.com/google/go-containerregistry/pkg/v1/remote"
|
||||
helmchart "helm.sh/helm/v3/pkg/chart"
|
||||
|
||||
"hauler.dev/go/hauler/internal/flags"
|
||||
v1 "hauler.dev/go/hauler/pkg/apis/hauler.cattle.io/v1"
|
||||
"hauler.dev/go/hauler/pkg/consts"
|
||||
)
|
||||
|
||||
// newLocalhostRegistry creates an in-memory OCI registry server listening on
|
||||
// localhost (rather than 127.0.0.1) so go-containerregistry's Scheme() method
|
||||
// automatically selects plain HTTP for "localhost:PORT/…" refs. This is
|
||||
// required for tests that exercise storeImage, which calls s.AddImage without
|
||||
// any custom transport options.
|
||||
func newLocalhostRegistry(t *testing.T) (host string, remoteOpts []remote.Option) {
|
||||
t.Helper()
|
||||
l, err := net.Listen("tcp", "localhost:0")
|
||||
if err != nil {
|
||||
t.Fatalf("newLocalhostRegistry listen: %v", err)
|
||||
}
|
||||
srv := httptest.NewUnstartedServer(registry.New())
|
||||
srv.Listener = l
|
||||
srv.Start()
|
||||
t.Cleanup(srv.Close)
|
||||
host = strings.TrimPrefix(srv.URL, "http://")
|
||||
remoteOpts = []remote.Option{remote.WithTransport(srv.Client().Transport)}
|
||||
return host, remoteOpts
|
||||
}
|
||||
|
||||
// chartTestdataDir is the relative path from cmd/hauler/cli/store/ to the
|
||||
// top-level testdata directory, matching the convention in chart_test.go.
|
||||
// It must remain relative so that url.ParseRequestURI rejects it (an absolute
|
||||
// path would be mistakenly treated as a URL by chart.NewChart's isUrl check).
|
||||
const chartTestdataDir = "../../../../testdata"
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
// Unit tests — unexported helpers
|
||||
// --------------------------------------------------------------------------
|
||||
|
||||
func TestImagesFromChartAnnotations(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
chart *helmchart.Chart
|
||||
want []string
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "nil chart returns nil",
|
||||
chart: nil,
|
||||
want: nil,
|
||||
},
|
||||
{
|
||||
name: "no annotations returns nil",
|
||||
chart: &helmchart.Chart{Metadata: &helmchart.Metadata{}},
|
||||
want: nil,
|
||||
},
|
||||
{
|
||||
name: "helm.sh/images annotation returns sorted refs",
|
||||
chart: &helmchart.Chart{
|
||||
Metadata: &helmchart.Metadata{
|
||||
Annotations: map[string]string{
|
||||
"helm.sh/images": "- image: nginx:1.24\n- image: alpine:3.18\n",
|
||||
},
|
||||
},
|
||||
},
|
||||
want: []string{"alpine:3.18", "nginx:1.24"},
|
||||
},
|
||||
{
|
||||
name: "both annotations with overlap returns deduped union",
|
||||
chart: &helmchart.Chart{
|
||||
Metadata: &helmchart.Metadata{
|
||||
Annotations: map[string]string{
|
||||
"helm.sh/images": "- image: nginx:1.24\n- image: alpine:3.18\n",
|
||||
"images": "- image: nginx:1.24\n- image: busybox:latest\n",
|
||||
},
|
||||
},
|
||||
},
|
||||
want: []string{"alpine:3.18", "busybox:latest", "nginx:1.24"},
|
||||
},
|
||||
{
|
||||
name: "malformed YAML returns error",
|
||||
chart: &helmchart.Chart{
|
||||
Metadata: &helmchart.Metadata{
|
||||
Annotations: map[string]string{
|
||||
// Unclosed flow sequence → YAML syntax error.
|
||||
"helm.sh/images": "- image: [unclosed bracket",
|
||||
},
|
||||
},
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
}
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
got, err := imagesFromChartAnnotations(tc.chart)
|
||||
if (err != nil) != tc.wantErr {
|
||||
t.Fatalf("error = %v, wantErr %v", err, tc.wantErr)
|
||||
}
|
||||
if !reflect.DeepEqual(got, tc.want) {
|
||||
t.Errorf("got %v, want %v", got, tc.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestImagesFromImagesLock(t *testing.T) {
|
||||
writeFile := func(dir, fname, content string) {
|
||||
t.Helper()
|
||||
if err := os.WriteFile(filepath.Join(dir, fname), []byte(content), 0o644); err != nil {
|
||||
t.Fatalf("write %s: %v", fname, err)
|
||||
}
|
||||
}
|
||||
|
||||
t.Run("images.lock with image lines returns sorted refs", func(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
writeFile(dir, "images.lock", "image: rancher/rancher:v2.9\nimage: nginx:1.24\n")
|
||||
got, err := imagesFromImagesLock(dir)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
want := []string{"nginx:1.24", "rancher/rancher:v2.9"}
|
||||
if !reflect.DeepEqual(got, want) {
|
||||
t.Errorf("got %v, want %v", got, want)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("images-lock.yaml returns refs", func(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
writeFile(dir, "images-lock.yaml", "image: alpine:3.18\n")
|
||||
got, err := imagesFromImagesLock(dir)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
want := []string{"alpine:3.18"}
|
||||
if !reflect.DeepEqual(got, want) {
|
||||
t.Errorf("got %v, want %v", got, want)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("empty dir returns nil", func(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
got, err := imagesFromImagesLock(dir)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
if got != nil {
|
||||
t.Errorf("expected nil, got %v", got)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("multiple lock files merged and deduped", func(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
writeFile(dir, "images.lock", "image: nginx:1.24\nimage: alpine:3.18\n")
|
||||
writeFile(dir, "images-lock.yaml", "image: nginx:1.24\nimage: busybox:latest\n")
|
||||
got, err := imagesFromImagesLock(dir)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
want := []string{"alpine:3.18", "busybox:latest", "nginx:1.24"}
|
||||
if !reflect.DeepEqual(got, want) {
|
||||
t.Errorf("got %v, want %v", got, want)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestApplyDefaultRegistry(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
img string
|
||||
registry string
|
||||
want string
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "empty img returns empty",
|
||||
img: "",
|
||||
registry: "myregistry.io",
|
||||
want: "",
|
||||
},
|
||||
{
|
||||
name: "empty registry returns img unchanged",
|
||||
img: "rancher/rancher:v2.9",
|
||||
registry: "",
|
||||
want: "rancher/rancher:v2.9",
|
||||
},
|
||||
{
|
||||
name: "img without registry gets registry prepended",
|
||||
img: "rancher/rancher:v2.9",
|
||||
registry: "myregistry.io",
|
||||
want: "myregistry.io/rancher/rancher:v2.9",
|
||||
},
|
||||
{
|
||||
name: "img with existing registry unchanged",
|
||||
img: "ghcr.io/rancher/rancher:v2.9",
|
||||
registry: "myregistry.io",
|
||||
want: "ghcr.io/rancher/rancher:v2.9",
|
||||
},
|
||||
{
|
||||
name: "invalid ref with spaces returns error",
|
||||
img: "invalid ref with spaces",
|
||||
registry: "myregistry.io",
|
||||
wantErr: true,
|
||||
},
|
||||
}
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
got, err := applyDefaultRegistry(tc.img, tc.registry)
|
||||
if (err != nil) != tc.wantErr {
|
||||
t.Fatalf("error = %v, wantErr %v", err, tc.wantErr)
|
||||
}
|
||||
if !tc.wantErr && got != tc.want {
|
||||
t.Errorf("got %q, want %q", got, tc.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestRewriteReference(t *testing.T) {
|
||||
ctx := newTestContext(t)
|
||||
|
||||
t.Run("valid rewrite updates store annotations", func(t *testing.T) {
|
||||
host, rOpts := newTestRegistry(t)
|
||||
seedImage(t, host, "src/repo", "v1", rOpts...)
|
||||
|
||||
s := newTestStore(t)
|
||||
if err := s.AddImage(ctx, host+"/src/repo:v1", "", rOpts...); err != nil {
|
||||
t.Fatalf("AddImage: %v", err)
|
||||
}
|
||||
|
||||
oldRef, err := name.NewTag(host+"/src/repo:v1", name.Insecure)
|
||||
if err != nil {
|
||||
t.Fatalf("parse oldRef: %v", err)
|
||||
}
|
||||
newRef, err := name.NewTag(host+"/dst/repo:v2", name.Insecure)
|
||||
if err != nil {
|
||||
t.Fatalf("parse newRef: %v", err)
|
||||
}
|
||||
|
||||
if err := rewriteReference(ctx, s, oldRef, newRef); err != nil {
|
||||
t.Fatalf("rewriteReference: %v", err)
|
||||
}
|
||||
|
||||
assertArtifactInStore(t, s, "dst/repo:v2")
|
||||
})
|
||||
|
||||
t.Run("old ref not found returns error", func(t *testing.T) {
|
||||
s := newTestStore(t)
|
||||
oldRef, _ := name.NewTag("docker.io/missing/repo:v1")
|
||||
newRef, _ := name.NewTag("docker.io/new/repo:v2")
|
||||
|
||||
err := rewriteReference(ctx, s, oldRef, newRef)
|
||||
if err == nil {
|
||||
t.Fatal("expected error, got nil")
|
||||
}
|
||||
if !strings.Contains(err.Error(), "could not find") {
|
||||
t.Errorf("expected 'could not find' in error, got: %v", err)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
// Integration tests
|
||||
// --------------------------------------------------------------------------
|
||||
|
||||
func TestStoreFile(t *testing.T) {
|
||||
ctx := newTestContext(t)
|
||||
|
||||
t.Run("local file stored successfully", func(t *testing.T) {
|
||||
tmp, err := os.CreateTemp(t.TempDir(), "testfile-*.txt")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
tmp.WriteString("hello hauler") //nolint:errcheck
|
||||
tmp.Close()
|
||||
|
||||
s := newTestStore(t)
|
||||
if err := storeFile(ctx, s, v1.File{Path: tmp.Name()}); err != nil {
|
||||
t.Fatalf("storeFile: %v", err)
|
||||
}
|
||||
assertArtifactInStore(t, s, filepath.Base(tmp.Name()))
|
||||
})
|
||||
|
||||
t.Run("HTTP URL stored under basename", func(t *testing.T) {
|
||||
url := seedFileInHTTPServer(t, "script.sh", "#!/bin/sh\necho ok")
|
||||
s := newTestStore(t)
|
||||
if err := storeFile(ctx, s, v1.File{Path: url}); err != nil {
|
||||
t.Fatalf("storeFile: %v", err)
|
||||
}
|
||||
assertArtifactInStore(t, s, "script.sh")
|
||||
})
|
||||
|
||||
t.Run("name override changes stored ref", func(t *testing.T) {
|
||||
tmp, err := os.CreateTemp(t.TempDir(), "orig-*.txt")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
tmp.Close()
|
||||
|
||||
s := newTestStore(t)
|
||||
if err := storeFile(ctx, s, v1.File{Path: tmp.Name(), Name: "custom.sh"}); err != nil {
|
||||
t.Fatalf("storeFile: %v", err)
|
||||
}
|
||||
assertArtifactInStore(t, s, "custom.sh")
|
||||
})
|
||||
|
||||
t.Run("nonexistent local path returns error", func(t *testing.T) {
|
||||
s := newTestStore(t)
|
||||
err := storeFile(ctx, s, v1.File{Path: "/nonexistent/path/missing-file.txt"})
|
||||
if err == nil {
|
||||
t.Fatal("expected error for nonexistent path, got nil")
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestAddFileCmd(t *testing.T) {
|
||||
ctx := newTestContext(t)
|
||||
s := newTestStore(t)
|
||||
|
||||
tmp, err := os.CreateTemp(t.TempDir(), "rawfile-*.txt")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
tmp.WriteString("raw content") //nolint:errcheck
|
||||
tmp.Close()
|
||||
|
||||
o := &flags.AddFileOpts{Name: "renamed.txt"}
|
||||
if err := AddFileCmd(ctx, o, s, tmp.Name()); err != nil {
|
||||
t.Fatalf("AddFileCmd: %v", err)
|
||||
}
|
||||
assertArtifactInStore(t, s, "renamed.txt")
|
||||
}
|
||||
|
||||
func TestStoreImage(t *testing.T) {
|
||||
ctx := newTestContext(t)
|
||||
host, rOpts := newLocalhostRegistry(t)
|
||||
seedImage(t, host, "test/repo", "v1", rOpts...)
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
imageName string
|
||||
ignoreErrors bool
|
||||
wantErr bool
|
||||
wantInStore string
|
||||
}{
|
||||
{
|
||||
name: "valid image tag stored",
|
||||
imageName: host + "/test/repo:v1",
|
||||
wantInStore: "test/repo:v1",
|
||||
},
|
||||
{
|
||||
name: "invalid ref string returns error",
|
||||
imageName: "INVALID IMAGE REF !! ##",
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "nonexistent image with IgnoreErrors returns nil",
|
||||
imageName: host + "/nonexistent/image:missing",
|
||||
ignoreErrors: true,
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "nonexistent image without IgnoreErrors returns error",
|
||||
imageName: host + "/nonexistent/image:missing",
|
||||
wantErr: true,
|
||||
},
|
||||
}
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
s := newTestStore(t)
|
||||
rso := defaultRootOpts(s.Root)
|
||||
ro := defaultCliOpts()
|
||||
ro.IgnoreErrors = tc.ignoreErrors
|
||||
|
||||
err := storeImage(ctx, s, v1.Image{Name: tc.imageName}, "", rso, ro, "")
|
||||
if (err != nil) != tc.wantErr {
|
||||
t.Fatalf("error = %v, wantErr %v", err, tc.wantErr)
|
||||
}
|
||||
if tc.wantInStore != "" {
|
||||
assertArtifactInStore(t, s, tc.wantInStore)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestStoreImage_Rewrite(t *testing.T) {
|
||||
ctx := newTestContext(t)
|
||||
host, rOpts := newLocalhostRegistry(t)
|
||||
|
||||
t.Run("explicit rewrite tag changes ref", func(t *testing.T) {
|
||||
seedImage(t, host, "src/repo", "v1", rOpts...)
|
||||
s := newTestStore(t)
|
||||
rso := defaultRootOpts(s.Root)
|
||||
ro := defaultCliOpts()
|
||||
|
||||
err := storeImage(ctx, s, v1.Image{Name: host + "/src/repo:v1"}, "", rso, ro, "newrepo/img:v2")
|
||||
if err != nil {
|
||||
t.Fatalf("storeImage with rewrite: %v", err)
|
||||
}
|
||||
assertArtifactInStore(t, s, "newrepo/img:v2")
|
||||
})
|
||||
|
||||
t.Run("rewrite without tag inherits source tag", func(t *testing.T) {
|
||||
seedImage(t, host, "src/repo", "v3", rOpts...)
|
||||
s := newTestStore(t)
|
||||
rso := defaultRootOpts(s.Root)
|
||||
ro := defaultCliOpts()
|
||||
|
||||
err := storeImage(ctx, s, v1.Image{Name: host + "/src/repo:v3"}, "", rso, ro, "newrepo/img")
|
||||
if err != nil {
|
||||
t.Fatalf("storeImage with tagless rewrite: %v", err)
|
||||
}
|
||||
// tag is inherited from source ("v3")
|
||||
assertArtifactInStore(t, s, "newrepo/img:v3")
|
||||
})
|
||||
|
||||
t.Run("rewrite without tag on digest source ref returns error", func(t *testing.T) {
|
||||
img := seedImage(t, host, "src/repo", "digest-src", rOpts...)
|
||||
h, err := img.Digest()
|
||||
if err != nil {
|
||||
t.Fatalf("img.Digest: %v", err)
|
||||
}
|
||||
|
||||
s := newTestStore(t)
|
||||
rso := defaultRootOpts(s.Root)
|
||||
ro := defaultCliOpts()
|
||||
|
||||
digestRef := host + "/src/repo@" + h.String()
|
||||
err = storeImage(ctx, s, v1.Image{Name: digestRef}, "", rso, ro, "newrepo/img")
|
||||
if err == nil {
|
||||
t.Fatal("expected error for digest ref rewrite without explicit tag, got nil")
|
||||
}
|
||||
if !strings.Contains(err.Error(), "cannot rewrite digest reference") {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestStoreImage_MultiArch(t *testing.T) {
|
||||
ctx := newTestContext(t)
|
||||
host, rOpts := newLocalhostRegistry(t)
|
||||
seedIndex(t, host, "test/multiarch", "v1", rOpts...)
|
||||
|
||||
s := newTestStore(t)
|
||||
rso := defaultRootOpts(s.Root)
|
||||
ro := defaultCliOpts()
|
||||
|
||||
if err := storeImage(ctx, s, v1.Image{Name: host + "/test/multiarch:v1"}, "", rso, ro, ""); err != nil {
|
||||
t.Fatalf("storeImage multi-arch index: %v", err)
|
||||
}
|
||||
// Full index (both platforms) must be stored as an index, not a single image.
|
||||
assertArtifactKindInStore(t, s, "test/multiarch:v1", consts.KindAnnotationIndex)
|
||||
}
|
||||
|
||||
func TestStoreImage_PlatformFilter(t *testing.T) {
|
||||
ctx := newTestContext(t)
|
||||
host, rOpts := newLocalhostRegistry(t)
|
||||
seedIndex(t, host, "test/multiarch", "v2", rOpts...)
|
||||
|
||||
s := newTestStore(t)
|
||||
rso := defaultRootOpts(s.Root)
|
||||
ro := defaultCliOpts()
|
||||
|
||||
if err := storeImage(ctx, s, v1.Image{Name: host + "/test/multiarch:v2"}, "linux/amd64", rso, ro, ""); err != nil {
|
||||
t.Fatalf("storeImage with platform filter: %v", err)
|
||||
}
|
||||
// Platform filter resolves a single manifest from the index → stored as a single image.
|
||||
assertArtifactKindInStore(t, s, "test/multiarch:v2", consts.KindAnnotationImage)
|
||||
}
|
||||
|
||||
func TestStoreImage_CosignV2Artifacts(t *testing.T) {
|
||||
ctx := newTestContext(t)
|
||||
host, rOpts := newLocalhostRegistry(t)
|
||||
|
||||
img := seedImage(t, host, "test/signed", "v1", rOpts...)
|
||||
seedCosignV2Artifacts(t, host, "test/signed", img, rOpts...)
|
||||
|
||||
s := newTestStore(t)
|
||||
rso := defaultRootOpts(s.Root)
|
||||
ro := defaultCliOpts()
|
||||
|
||||
if err := storeImage(ctx, s, v1.Image{Name: host + "/test/signed:v1"}, "", rso, ro, ""); err != nil {
|
||||
t.Fatalf("storeImage: %v", err)
|
||||
}
|
||||
assertArtifactKindInStore(t, s, "test/signed:v1", consts.KindAnnotationSigs)
|
||||
assertArtifactKindInStore(t, s, "test/signed:v1", consts.KindAnnotationAtts)
|
||||
assertArtifactKindInStore(t, s, "test/signed:v1", consts.KindAnnotationSboms)
|
||||
}
|
||||
|
||||
func TestStoreImage_CosignV3Referrer(t *testing.T) {
|
||||
ctx := newTestContext(t)
|
||||
host, rOpts := newLocalhostRegistry(t)
|
||||
|
||||
img := seedImage(t, host, "test/image", "v1", rOpts...)
|
||||
seedOCI11Referrer(t, host, "test/image", img, rOpts...)
|
||||
|
||||
s := newTestStore(t)
|
||||
rso := defaultRootOpts(s.Root)
|
||||
ro := defaultCliOpts()
|
||||
|
||||
if err := storeImage(ctx, s, v1.Image{Name: host + "/test/image:v1"}, "", rso, ro, ""); err != nil {
|
||||
t.Fatalf("storeImage: %v", err)
|
||||
}
|
||||
assertReferrerInStore(t, s, "test/image:v1")
|
||||
}
|
||||
|
||||
func TestAddChartCmd_LocalTgz(t *testing.T) {
|
||||
ctx := newTestContext(t)
|
||||
s := newTestStore(t)
|
||||
rso := defaultRootOpts(s.Root)
|
||||
ro := defaultCliOpts()
|
||||
|
||||
o := newAddChartOpts(chartTestdataDir, "")
|
||||
if err := AddChartCmd(ctx, o, s, "rancher-cluster-templates-0.5.2.tgz", rso, ro); err != nil {
|
||||
t.Fatalf("AddChartCmd: %v", err)
|
||||
}
|
||||
// Hauler stores all artifacts (files, charts) via store.AddArtifact, which
|
||||
// unconditionally sets KindAnnotationName = KindAnnotationImage (see
|
||||
// pkg/store/store.go). There is no separate "chart" kind — charts are
|
||||
// wrapped in an OCI image manifest and tagged with KindAnnotationImage.
|
||||
assertArtifactKindInStore(t, s, "rancher-cluster-templates", consts.KindAnnotationImage)
|
||||
}
|
||||
|
||||
func TestAddChartCmd_WithFileDep(t *testing.T) {
|
||||
ctx := newTestContext(t)
|
||||
s := newTestStore(t)
|
||||
rso := defaultRootOpts(s.Root)
|
||||
ro := defaultCliOpts()
|
||||
|
||||
o := newAddChartOpts(chartTestdataDir, "")
|
||||
if err := AddChartCmd(ctx, o, s, "chart-with-file-dependency-chart-1.0.0.tgz", rso, ro); err != nil {
|
||||
t.Fatalf("AddChartCmd: %v", err)
|
||||
}
|
||||
assertArtifactInStore(t, s, "chart-with-file-dependency-chart")
|
||||
}
|
||||
|
||||
func TestStoreChart_Rewrite(t *testing.T) {
|
||||
ctx := newTestContext(t)
|
||||
s := newTestStore(t)
|
||||
rso := defaultRootOpts(s.Root)
|
||||
ro := defaultCliOpts()
|
||||
|
||||
o := newAddChartOpts(chartTestdataDir, "")
|
||||
o.Rewrite = "myorg/custom-chart"
|
||||
|
||||
if err := AddChartCmd(ctx, o, s, "rancher-cluster-templates-0.5.2.tgz", rso, ro); err != nil {
|
||||
t.Fatalf("AddChartCmd with rewrite: %v", err)
|
||||
}
|
||||
assertArtifactInStore(t, s, "myorg/custom-chart")
|
||||
}
|
||||
@@ -2,61 +2,274 @@ package store
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"oras.land/oras-go/pkg/content"
|
||||
"github.com/containerd/containerd/remotes"
|
||||
"github.com/containerd/errdefs"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
|
||||
"github.com/rancherfederal/hauler/pkg/cosign"
|
||||
"github.com/rancherfederal/hauler/pkg/store"
|
||||
|
||||
"github.com/rancherfederal/hauler/pkg/log"
|
||||
"hauler.dev/go/hauler/internal/flags"
|
||||
"hauler.dev/go/hauler/internal/mapper"
|
||||
"hauler.dev/go/hauler/pkg/consts"
|
||||
"hauler.dev/go/hauler/pkg/content"
|
||||
"hauler.dev/go/hauler/pkg/log"
|
||||
"hauler.dev/go/hauler/pkg/retry"
|
||||
"hauler.dev/go/hauler/pkg/store"
|
||||
)
|
||||
|
||||
type CopyOpts struct {
|
||||
*RootOpts
|
||||
|
||||
Username string
|
||||
Password string
|
||||
Insecure bool
|
||||
PlainHTTP bool
|
||||
}
|
||||
|
||||
func (o *CopyOpts) AddFlags(cmd *cobra.Command) {
|
||||
f := cmd.Flags()
|
||||
|
||||
f.StringVarP(&o.Username, "username", "u", "", "Username when copying to an authenticated remote registry")
|
||||
f.StringVarP(&o.Password, "password", "p", "", "Password when copying to an authenticated remote registry")
|
||||
f.BoolVar(&o.Insecure, "insecure", false, "Toggle allowing insecure connections when copying to a remote registry")
|
||||
f.BoolVar(&o.PlainHTTP, "plain-http", false, "Toggle allowing plain http connections when copying to a remote registry")
|
||||
}
|
||||
|
||||
func CopyCmd(ctx context.Context, o *CopyOpts, s *store.Layout, targetRef string) error {
|
||||
func CopyCmd(ctx context.Context, o *flags.CopyOpts, s *store.Layout, targetRef string, ro *flags.CliRootOpts) error {
|
||||
l := log.FromContext(ctx)
|
||||
|
||||
if o.Username != "" || o.Password != "" {
|
||||
return fmt.Errorf("--username/--password have been deprecated, please use 'hauler login'")
|
||||
}
|
||||
|
||||
if !s.IndexExists() {
|
||||
return fmt.Errorf("store index not found: run 'hauler store add/sync/load' first")
|
||||
}
|
||||
|
||||
components := strings.SplitN(targetRef, "://", 2)
|
||||
switch components[0] {
|
||||
case "dir":
|
||||
l.Debugf("identified directory target reference")
|
||||
fs := content.NewFile(components[1])
|
||||
defer fs.Close()
|
||||
l.Debugf("identified directory target reference of [%s]", components[1])
|
||||
|
||||
_, err := s.CopyAll(ctx, fs, nil)
|
||||
// Create destination directory if it doesn't exist
|
||||
if err := os.MkdirAll(components[1], 0755); err != nil {
|
||||
return fmt.Errorf("failed to create destination directory: %w", err)
|
||||
}
|
||||
|
||||
// For directory targets, extract files and charts (not images)
|
||||
err := s.Walk(func(reference string, desc ocispec.Descriptor) error {
|
||||
// Skip cosign sig/att/sbom artifacts — they're registry-only metadata,
|
||||
// not extractable as files or charts.
|
||||
kind := desc.Annotations[consts.KindAnnotationName]
|
||||
switch kind {
|
||||
case consts.KindAnnotationSigs, consts.KindAnnotationAtts, consts.KindAnnotationSboms:
|
||||
l.Debugf("skipping cosign artifact [%s] for directory target", reference)
|
||||
return nil
|
||||
}
|
||||
if strings.HasPrefix(kind, consts.KindAnnotationReferrers) {
|
||||
l.Debugf("skipping OCI referrer [%s] for directory target", reference)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Handle different media types
|
||||
switch desc.MediaType {
|
||||
case ocispec.MediaTypeImageIndex, consts.DockerManifestListSchema2:
|
||||
// Multi-platform index - process each child manifest
|
||||
rc, err := s.Fetch(ctx, desc)
|
||||
if err != nil {
|
||||
l.Warnf("failed to fetch index [%s]: %v", reference, err)
|
||||
return nil
|
||||
}
|
||||
|
||||
var index ocispec.Index
|
||||
if err := json.NewDecoder(rc).Decode(&index); err != nil {
|
||||
if cerr := rc.Close(); cerr != nil {
|
||||
l.Warnf("failed to close index reader for [%s]: %v", reference, cerr)
|
||||
}
|
||||
l.Warnf("failed to decode index for [%s]: %v", reference, err)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Close rc immediately after decoding - we're done reading from it
|
||||
if cerr := rc.Close(); cerr != nil {
|
||||
l.Warnf("failed to close index reader for [%s]: %v", reference, cerr)
|
||||
}
|
||||
|
||||
// Process each manifest in the index
|
||||
for _, manifestDesc := range index.Manifests {
|
||||
manifestRC, err := s.Fetch(ctx, manifestDesc)
|
||||
if err != nil {
|
||||
l.Warnf("failed to fetch child manifest: %v", err)
|
||||
continue
|
||||
}
|
||||
|
||||
var m ocispec.Manifest
|
||||
if err := json.NewDecoder(manifestRC).Decode(&m); err != nil {
|
||||
manifestRC.Close()
|
||||
l.Warnf("failed to decode child manifest: %v", err)
|
||||
continue
|
||||
}
|
||||
manifestRC.Close()
|
||||
|
||||
// Skip images - only extract files and charts
|
||||
if m.Config.MediaType == consts.DockerConfigJSON ||
|
||||
m.Config.MediaType == ocispec.MediaTypeImageConfig {
|
||||
l.Debugf("skipping image manifest in index [%s]", reference)
|
||||
continue
|
||||
}
|
||||
|
||||
// Create mapper and extract
|
||||
mapperStore, err := mapper.FromManifest(m, components[1])
|
||||
if err != nil {
|
||||
l.Warnf("failed to create mapper for child: %v", err)
|
||||
continue
|
||||
}
|
||||
|
||||
// Note: We can't call s.Copy with manifestDesc because it's not in the nameMap
|
||||
// Instead, we need to manually push through the mapper
|
||||
if err := extractManifestContent(ctx, s, manifestDesc, m, mapperStore); err != nil {
|
||||
l.Warnf("failed to extract child: %v", err)
|
||||
continue
|
||||
}
|
||||
|
||||
l.Debugf("extracted child manifest from index [%s]", reference)
|
||||
}
|
||||
|
||||
case ocispec.MediaTypeImageManifest, consts.DockerManifestSchema2:
|
||||
// Single-platform manifest
|
||||
rc, err := s.Fetch(ctx, desc)
|
||||
if err != nil {
|
||||
l.Warnf("failed to fetch [%s]: %v", reference, err)
|
||||
return nil
|
||||
}
|
||||
|
||||
var m ocispec.Manifest
|
||||
if err := json.NewDecoder(rc).Decode(&m); err != nil {
|
||||
rc.Close()
|
||||
l.Warnf("failed to decode manifest for [%s]: %v", reference, err)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Skip images - only extract files and charts for directory targets
|
||||
if m.Config.MediaType == consts.DockerConfigJSON ||
|
||||
m.Config.MediaType == ocispec.MediaTypeImageConfig {
|
||||
rc.Close()
|
||||
l.Debugf("skipping image [%s] for directory target", reference)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Create a mapper store based on the manifest type
|
||||
mapperStore, err := mapper.FromManifest(m, components[1])
|
||||
if err != nil {
|
||||
rc.Close()
|
||||
l.Warnf("failed to create mapper for [%s]: %v", reference, err)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Copy/extract the content
|
||||
_, err = s.Copy(ctx, reference, mapperStore, "")
|
||||
if err != nil {
|
||||
rc.Close()
|
||||
l.Warnf("failed to extract [%s]: %v", reference, err)
|
||||
return nil
|
||||
}
|
||||
rc.Close()
|
||||
|
||||
l.Debugf("extracted [%s] to directory", reference)
|
||||
|
||||
default:
|
||||
l.Debugf("skipping unsupported media type [%s] for [%s]", desc.MediaType, reference)
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
case "registry":
|
||||
l.Debugf("identified registry target reference")
|
||||
ropts := content.RegistryOptions{
|
||||
Username: o.Username,
|
||||
Password: o.Password,
|
||||
Insecure: o.Insecure,
|
||||
l.Debugf("identified registry target reference of [%s]", components[1])
|
||||
registryOpts := content.RegistryOptions{
|
||||
PlainHTTP: o.PlainHTTP,
|
||||
Insecure: o.Insecure,
|
||||
}
|
||||
|
||||
err := cosign.LoadImage(ctx, s, components[1], ropts)
|
||||
// Pre-build a map from base ref → image manifest digest so that sig/att/sbom
|
||||
// descriptors (which store the base image ref, not the cosign tag) can be routed
|
||||
// to the correct destination tag using the cosign tag convention.
|
||||
refDigest := make(map[string]string)
|
||||
if err := s.Walk(func(_ string, desc ocispec.Descriptor) error {
|
||||
kind := desc.Annotations[consts.KindAnnotationName]
|
||||
if kind == consts.KindAnnotationImage || kind == consts.KindAnnotationIndex {
|
||||
if baseRef := desc.Annotations[ocispec.AnnotationRefName]; baseRef != "" {
|
||||
refDigest[baseRef] = desc.Digest.String()
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
sigExts := map[string]string{
|
||||
consts.KindAnnotationSigs: ".sig",
|
||||
consts.KindAnnotationAtts: ".att",
|
||||
consts.KindAnnotationSboms: ".sbom",
|
||||
}
|
||||
|
||||
var fatalErr error
|
||||
err := s.Walk(func(reference string, desc ocispec.Descriptor) error {
|
||||
if fatalErr != nil {
|
||||
return nil
|
||||
}
|
||||
baseRef := desc.Annotations[ocispec.AnnotationRefName]
|
||||
if baseRef == "" {
|
||||
return nil
|
||||
}
|
||||
if o.Only != "" && !strings.Contains(baseRef, o.Only) {
|
||||
l.Debugf("skipping [%s] (not matching --only filter)", baseRef)
|
||||
return nil
|
||||
}
|
||||
|
||||
// For sig/att/sbom descriptors, derive the cosign tag from the parent
|
||||
// image's manifest digest rather than using AnnotationRefName directly.
|
||||
destRef := baseRef
|
||||
kind := desc.Annotations[consts.KindAnnotationName]
|
||||
if ext, isSigKind := sigExts[kind]; isSigKind {
|
||||
if imgDigest, ok := refDigest[baseRef]; ok {
|
||||
digestTag := strings.ReplaceAll(imgDigest, ":", "-")
|
||||
repo := baseRef
|
||||
if colon := strings.LastIndex(baseRef, ":"); colon != -1 {
|
||||
repo = baseRef[:colon]
|
||||
}
|
||||
destRef = repo + ":" + digestTag + ext
|
||||
}
|
||||
} else if strings.HasPrefix(kind, consts.KindAnnotationReferrers) {
|
||||
// OCI 1.1 referrer (cosign v3 new-bundle-format): push by manifest digest so
|
||||
// the target registry wires it up via the OCI Referrers API (subject field).
|
||||
// For registries that don't support the Referrers API natively, the manifest
|
||||
// is still pushed intact; the subject linkage depends on registry support.
|
||||
repo := baseRef
|
||||
if colon := strings.LastIndex(baseRef, ":"); colon != -1 {
|
||||
repo = baseRef[:colon]
|
||||
}
|
||||
destRef = repo + "@" + desc.Digest.String()
|
||||
}
|
||||
|
||||
toRef, err := content.RewriteRefToRegistry(destRef, components[1])
|
||||
if err != nil {
|
||||
l.Warnf("failed to rewrite ref [%s]: %v", baseRef, err)
|
||||
return nil
|
||||
}
|
||||
l.Infof("%s", destRef)
|
||||
// A fresh target per artifact gives each push its own in-memory status
|
||||
// tracker. Containerd's tracker keys blobs by digest only (not repo),
|
||||
// so a shared tracker would mark shared blobs as "already exists" after
|
||||
// the first image, skipping the per-repository blob link creation that
|
||||
// Docker Distribution requires for manifest validation.
|
||||
target := content.NewRegistryTarget(components[1], registryOpts)
|
||||
var pushed ocispec.Descriptor
|
||||
if err := retry.Operation(ctx, o.StoreRootOpts, ro, func() error {
|
||||
var copyErr error
|
||||
pushed, copyErr = s.Copy(ctx, reference, target, toRef)
|
||||
return copyErr
|
||||
}); err != nil {
|
||||
if !ro.IgnoreErrors {
|
||||
fatalErr = err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
l.Infof("%s: digest: %s size: %d", toRef, pushed.Digest, pushed.Size)
|
||||
return nil
|
||||
})
|
||||
if fatalErr != nil {
|
||||
return fatalErr
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -65,6 +278,76 @@ func CopyCmd(ctx context.Context, o *CopyOpts, s *store.Layout, targetRef string
|
||||
return fmt.Errorf("detecting protocol from [%s]", targetRef)
|
||||
}
|
||||
|
||||
l.Infof("Copied artifacts to [%s]", components[1])
|
||||
l.Infof("copied artifacts to [%s]", components[1])
|
||||
return nil
|
||||
}
|
||||
|
||||
// extractManifestContent extracts a manifest's layers through a mapper target
|
||||
// This is used for child manifests in indexes that aren't in the store's nameMap
|
||||
func extractManifestContent(ctx context.Context, s *store.Layout, desc ocispec.Descriptor, m ocispec.Manifest, target content.Target) error {
|
||||
// Get a pusher from the target
|
||||
pusher, err := target.Pusher(ctx, "")
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get pusher: %w", err)
|
||||
}
|
||||
|
||||
// Copy config blob
|
||||
if err := copyBlobDescriptor(ctx, s, m.Config, pusher); err != nil {
|
||||
return fmt.Errorf("failed to copy config: %w", err)
|
||||
}
|
||||
|
||||
// Copy each layer blob
|
||||
for _, layer := range m.Layers {
|
||||
if err := copyBlobDescriptor(ctx, s, layer, pusher); err != nil {
|
||||
return fmt.Errorf("failed to copy layer: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Copy the manifest itself
|
||||
if err := copyBlobDescriptor(ctx, s, desc, pusher); err != nil {
|
||||
return fmt.Errorf("failed to copy manifest: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// copyBlobDescriptor copies a single descriptor blob from the store to a pusher
|
||||
func copyBlobDescriptor(ctx context.Context, s *store.Layout, desc ocispec.Descriptor, pusher remotes.Pusher) (err error) {
|
||||
// Fetch the content from the store
|
||||
rc, err := s.OCI.Fetch(ctx, desc)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to fetch blob: %w", err)
|
||||
}
|
||||
defer func() {
|
||||
if closeErr := rc.Close(); closeErr != nil && err == nil {
|
||||
err = fmt.Errorf("failed to close reader: %w", closeErr)
|
||||
}
|
||||
}()
|
||||
|
||||
// Get a writer from the pusher
|
||||
writer, err := pusher.Push(ctx, desc)
|
||||
if err != nil {
|
||||
if errdefs.IsAlreadyExists(err) {
|
||||
return nil // content already present on remote
|
||||
}
|
||||
return fmt.Errorf("failed to push: %w", err)
|
||||
}
|
||||
defer func() {
|
||||
if closeErr := writer.Close(); closeErr != nil && err == nil {
|
||||
err = fmt.Errorf("failed to close writer: %w", closeErr)
|
||||
}
|
||||
}()
|
||||
|
||||
// Copy the content
|
||||
n, err := io.Copy(writer, rc)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to copy content: %w", err)
|
||||
}
|
||||
|
||||
// Commit the written content
|
||||
if err := writer.Commit(ctx, n, desc.Digest); err != nil {
|
||||
return fmt.Errorf("failed to commit: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
338
cmd/hauler/cli/store/copy_test.go
Normal file
338
cmd/hauler/cli/store/copy_test.go
Normal file
@@ -0,0 +1,338 @@
|
||||
package store
|
||||
|
||||
// copy_test.go covers CopyCmd for both registry:// and dir:// targets.
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/google/go-containerregistry/pkg/name"
|
||||
"github.com/google/go-containerregistry/pkg/v1/remote"
|
||||
|
||||
"hauler.dev/go/hauler/internal/flags"
|
||||
v1 "hauler.dev/go/hauler/pkg/apis/hauler.cattle.io/v1"
|
||||
)
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
// Error / guard tests
|
||||
// --------------------------------------------------------------------------
|
||||
|
||||
// TestCopyCmd_EmptyStoreFails verifies that CopyCmd returns an error when the
|
||||
// store has no index.json on disk (i.e. nothing has been added yet).
|
||||
func TestCopyCmd_EmptyStoreFails(t *testing.T) {
|
||||
ctx := newTestContext(t)
|
||||
s := newTestStore(t) // freshly created — index.json not yet on disk
|
||||
|
||||
o := &flags.CopyOpts{StoreRootOpts: defaultRootOpts(s.Root)}
|
||||
err := CopyCmd(ctx, o, s, "registry://127.0.0.1:5000", defaultCliOpts())
|
||||
if err == nil {
|
||||
t.Fatal("expected error for empty store, got nil")
|
||||
}
|
||||
if !strings.Contains(err.Error(), "store index not found") {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// TestCopyCmd_DeprecatedCredentials verifies that passing Username returns the
|
||||
// deprecation error before any other check.
|
||||
func TestCopyCmd_DeprecatedCredentials(t *testing.T) {
|
||||
ctx := newTestContext(t)
|
||||
s := newTestStore(t)
|
||||
|
||||
o := &flags.CopyOpts{
|
||||
StoreRootOpts: defaultRootOpts(s.Root),
|
||||
Username: "user",
|
||||
Password: "pass",
|
||||
}
|
||||
err := CopyCmd(ctx, o, s, "registry://127.0.0.1:5000", defaultCliOpts())
|
||||
if err == nil {
|
||||
t.Fatal("expected deprecation error, got nil")
|
||||
}
|
||||
if !strings.Contains(err.Error(), "deprecated") {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// TestCopyCmd_UnknownProtocol verifies that an unrecognized scheme returns an
|
||||
// error containing "detecting protocol".
|
||||
func TestCopyCmd_UnknownProtocol(t *testing.T) {
|
||||
ctx := newTestContext(t)
|
||||
s := newTestStore(t)
|
||||
// Write index.json so IndexExists() passes.
|
||||
if err := s.SaveIndex(); err != nil {
|
||||
t.Fatalf("SaveIndex: %v", err)
|
||||
}
|
||||
|
||||
o := &flags.CopyOpts{StoreRootOpts: defaultRootOpts(s.Root)}
|
||||
err := CopyCmd(ctx, o, s, "ftp://somehost/path", defaultCliOpts())
|
||||
if err == nil {
|
||||
t.Fatal("expected error for unknown protocol, got nil")
|
||||
}
|
||||
if !strings.Contains(err.Error(), "detecting protocol") {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
// Registry copy tests
|
||||
// --------------------------------------------------------------------------
|
||||
|
||||
// TestCopyCmd_Registry seeds a store with a single image, copies it to an
|
||||
// in-memory target registry, and verifies the image is reachable there.
|
||||
func TestCopyCmd_Registry(t *testing.T) {
|
||||
ctx := newTestContext(t)
|
||||
|
||||
srcHost, _ := newLocalhostRegistry(t)
|
||||
seedImage(t, srcHost, "test/copy", "v1")
|
||||
|
||||
s := newTestStore(t)
|
||||
rso := defaultRootOpts(s.Root)
|
||||
ro := defaultCliOpts()
|
||||
if err := storeImage(ctx, s, v1.Image{Name: srcHost + "/test/copy:v1"}, "", rso, ro, ""); err != nil {
|
||||
t.Fatalf("storeImage: %v", err)
|
||||
}
|
||||
|
||||
dstHost, dstOpts := newTestRegistry(t)
|
||||
o := &flags.CopyOpts{
|
||||
StoreRootOpts: defaultRootOpts(s.Root),
|
||||
PlainHTTP: true,
|
||||
}
|
||||
if err := CopyCmd(ctx, o, s, "registry://"+dstHost, ro); err != nil {
|
||||
t.Fatalf("CopyCmd registry: %v", err)
|
||||
}
|
||||
|
||||
// Verify the image is reachable in the target registry.
|
||||
dstRef, err := name.NewTag(dstHost+"/test/copy:v1", name.Insecure)
|
||||
if err != nil {
|
||||
t.Fatalf("name.NewTag: %v", err)
|
||||
}
|
||||
if _, err := remote.Get(dstRef, dstOpts...); err != nil {
|
||||
t.Errorf("image not found in target registry after copy: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// TestCopyCmd_Registry_OnlyFilter seeds two images in distinct repos, copies
|
||||
// with --only=repo1, and asserts only repo1 reaches the target.
|
||||
func TestCopyCmd_Registry_OnlyFilter(t *testing.T) {
|
||||
ctx := newTestContext(t)
|
||||
|
||||
srcHost, _ := newLocalhostRegistry(t)
|
||||
seedImage(t, srcHost, "myorg/repo1", "v1")
|
||||
seedImage(t, srcHost, "myorg/repo2", "v1")
|
||||
|
||||
s := newTestStore(t)
|
||||
rso := defaultRootOpts(s.Root)
|
||||
ro := defaultCliOpts()
|
||||
for _, repo := range []string{"myorg/repo1:v1", "myorg/repo2:v1"} {
|
||||
if err := storeImage(ctx, s, v1.Image{Name: srcHost + "/" + repo}, "", rso, ro, ""); err != nil {
|
||||
t.Fatalf("storeImage %s: %v", repo, err)
|
||||
}
|
||||
}
|
||||
|
||||
dstHost, dstOpts := newTestRegistry(t)
|
||||
o := &flags.CopyOpts{
|
||||
StoreRootOpts: defaultRootOpts(s.Root),
|
||||
PlainHTTP: true,
|
||||
Only: "repo1",
|
||||
}
|
||||
if err := CopyCmd(ctx, o, s, "registry://"+dstHost, ro); err != nil {
|
||||
t.Fatalf("CopyCmd with --only: %v", err)
|
||||
}
|
||||
|
||||
// repo1 must be in target.
|
||||
ref1, err := name.NewTag(dstHost+"/myorg/repo1:v1", name.Insecure)
|
||||
if err != nil {
|
||||
t.Fatalf("name.NewTag repo1: %v", err)
|
||||
}
|
||||
if _, err := remote.Get(ref1, dstOpts...); err != nil {
|
||||
t.Errorf("repo1 should be in target registry but was not found: %v", err)
|
||||
}
|
||||
|
||||
// repo2 must NOT be in target.
|
||||
ref2, err := name.NewTag(dstHost+"/myorg/repo2:v1", name.Insecure)
|
||||
if err != nil {
|
||||
t.Fatalf("name.NewTag repo2: %v", err)
|
||||
}
|
||||
if _, err := remote.Get(ref2, dstOpts...); err == nil {
|
||||
t.Error("repo2 should NOT be in target registry after --only=repo1, but was found")
|
||||
}
|
||||
}
|
||||
|
||||
// TestCopyCmd_Registry_SigTagDerivation seeds a base image along with cosign
|
||||
// v2 signature artifacts, adds everything to the store via AddImage (which
|
||||
// auto-discovers the .sig/.att/.sbom tags), then copies to a target registry
|
||||
// and verifies the sig arrives at the expected sha256-<hex>.sig tag.
|
||||
func TestCopyCmd_Registry_SigTagDerivation(t *testing.T) {
|
||||
ctx := newTestContext(t)
|
||||
|
||||
srcHost, _ := newLocalhostRegistry(t)
|
||||
srcImg := seedImage(t, srcHost, "test/signed", "v1")
|
||||
seedCosignV2Artifacts(t, srcHost, "test/signed", srcImg)
|
||||
|
||||
// AddImage discovers and stores the .sig/.att/.sbom tags automatically.
|
||||
s := newTestStore(t)
|
||||
if err := s.AddImage(ctx, srcHost+"/test/signed:v1", ""); err != nil {
|
||||
t.Fatalf("AddImage: %v", err)
|
||||
}
|
||||
|
||||
dstHost, dstOpts := newTestRegistry(t)
|
||||
o := &flags.CopyOpts{
|
||||
StoreRootOpts: defaultRootOpts(s.Root),
|
||||
PlainHTTP: true,
|
||||
}
|
||||
if err := CopyCmd(ctx, o, s, "registry://"+dstHost, defaultCliOpts()); err != nil {
|
||||
t.Fatalf("CopyCmd: %v", err)
|
||||
}
|
||||
|
||||
// Compute the expected cosign sig tag from the image's manifest digest.
|
||||
hash, err := srcImg.Digest()
|
||||
if err != nil {
|
||||
t.Fatalf("srcImg.Digest: %v", err)
|
||||
}
|
||||
sigTag := strings.ReplaceAll(hash.String(), ":", "-") + ".sig"
|
||||
|
||||
sigRef, err := name.NewTag(dstHost+"/test/signed:"+sigTag, name.Insecure)
|
||||
if err != nil {
|
||||
t.Fatalf("name.NewTag sigRef: %v", err)
|
||||
}
|
||||
if _, err := remote.Get(sigRef, dstOpts...); err != nil {
|
||||
t.Errorf("sig not found at expected tag %s in target registry: %v", sigTag, err)
|
||||
}
|
||||
}
|
||||
|
||||
// TestCopyCmd_Registry_IgnoreErrors verifies that a push failure to a
|
||||
// non-listening address is swallowed when IgnoreErrors is set.
|
||||
func TestCopyCmd_Registry_IgnoreErrors(t *testing.T) {
|
||||
ctx := newTestContext(t)
|
||||
|
||||
srcHost, _ := newLocalhostRegistry(t)
|
||||
seedImage(t, srcHost, "test/ignore", "v1")
|
||||
|
||||
s := newTestStore(t)
|
||||
rso := defaultRootOpts(s.Root)
|
||||
ro := defaultCliOpts()
|
||||
if err := storeImage(ctx, s, v1.Image{Name: srcHost + "/test/ignore:v1"}, "", rso, ro, ""); err != nil {
|
||||
t.Fatalf("storeImage: %v", err)
|
||||
}
|
||||
|
||||
// localhost:1 is a port that is never listening.
|
||||
o := &flags.CopyOpts{
|
||||
StoreRootOpts: defaultRootOpts(s.Root),
|
||||
PlainHTTP: true,
|
||||
}
|
||||
roIgnore := defaultCliOpts()
|
||||
roIgnore.IgnoreErrors = true
|
||||
if err := CopyCmd(ctx, o, s, "registry://localhost:1", roIgnore); err != nil {
|
||||
t.Errorf("expected no error with IgnoreErrors=true, got: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
// Directory copy tests
|
||||
// --------------------------------------------------------------------------
|
||||
|
||||
// TestCopyCmd_Dir_Files copies a file artifact to a directory target and
|
||||
// verifies the file appears under its original basename.
|
||||
func TestCopyCmd_Dir_Files(t *testing.T) {
|
||||
ctx := newTestContext(t)
|
||||
|
||||
content := "hello from hauler file"
|
||||
url := seedFileInHTTPServer(t, "data.txt", content)
|
||||
|
||||
s := newTestStore(t)
|
||||
if err := storeFile(ctx, s, v1.File{Path: url}); err != nil {
|
||||
t.Fatalf("storeFile: %v", err)
|
||||
}
|
||||
|
||||
destDir := t.TempDir()
|
||||
o := &flags.CopyOpts{StoreRootOpts: defaultRootOpts(s.Root)}
|
||||
if err := CopyCmd(ctx, o, s, "dir://"+destDir, defaultCliOpts()); err != nil {
|
||||
t.Fatalf("CopyCmd dir: %v", err)
|
||||
}
|
||||
|
||||
outPath := filepath.Join(destDir, "data.txt")
|
||||
data, err := os.ReadFile(outPath)
|
||||
if err != nil {
|
||||
t.Fatalf("file not found in destDir after dir copy: %v", err)
|
||||
}
|
||||
if string(data) != content {
|
||||
t.Errorf("file content mismatch: got %q, want %q", string(data), content)
|
||||
}
|
||||
}
|
||||
|
||||
// TestCopyCmd_Dir_SkipsImages verifies that container images are not extracted
|
||||
// when copying to a directory target.
|
||||
func TestCopyCmd_Dir_SkipsImages(t *testing.T) {
|
||||
ctx := newTestContext(t)
|
||||
|
||||
srcHost, _ := newLocalhostRegistry(t)
|
||||
seedImage(t, srcHost, "test/imgskip", "v1")
|
||||
|
||||
s := newTestStore(t)
|
||||
rso := defaultRootOpts(s.Root)
|
||||
ro := defaultCliOpts()
|
||||
if err := storeImage(ctx, s, v1.Image{Name: srcHost + "/test/imgskip:v1"}, "", rso, ro, ""); err != nil {
|
||||
t.Fatalf("storeImage: %v", err)
|
||||
}
|
||||
|
||||
destDir := t.TempDir()
|
||||
o := &flags.CopyOpts{StoreRootOpts: defaultRootOpts(s.Root)}
|
||||
if err := CopyCmd(ctx, o, s, "dir://"+destDir, ro); err != nil {
|
||||
t.Fatalf("CopyCmd dir: %v", err)
|
||||
}
|
||||
|
||||
entries, err := os.ReadDir(destDir)
|
||||
if err != nil {
|
||||
t.Fatalf("ReadDir: %v", err)
|
||||
}
|
||||
if len(entries) != 0 {
|
||||
names := make([]string, len(entries))
|
||||
for i, e := range entries {
|
||||
names[i] = e.Name()
|
||||
}
|
||||
t.Errorf("expected empty destDir for image-only store, found: %s", strings.Join(names, ", "))
|
||||
}
|
||||
}
|
||||
|
||||
// TestCopyCmd_Dir_Charts copies a local Helm chart artifact to a directory
|
||||
// target and verifies a .tgz file is present.
|
||||
func TestCopyCmd_Dir_Charts(t *testing.T) {
|
||||
ctx := newTestContext(t)
|
||||
|
||||
s := newTestStore(t)
|
||||
rso := defaultRootOpts(s.Root)
|
||||
ro := defaultCliOpts()
|
||||
|
||||
o := newAddChartOpts(chartTestdataDir, "")
|
||||
if err := AddChartCmd(ctx, o, s, "rancher-cluster-templates-0.5.2.tgz", rso, ro); err != nil {
|
||||
t.Fatalf("AddChartCmd: %v", err)
|
||||
}
|
||||
|
||||
destDir := t.TempDir()
|
||||
copyOpts := &flags.CopyOpts{StoreRootOpts: defaultRootOpts(s.Root)}
|
||||
if err := CopyCmd(ctx, copyOpts, s, "dir://"+destDir, ro); err != nil {
|
||||
t.Fatalf("CopyCmd dir charts: %v", err)
|
||||
}
|
||||
|
||||
entries, err := os.ReadDir(destDir)
|
||||
if err != nil {
|
||||
t.Fatalf("ReadDir: %v", err)
|
||||
}
|
||||
|
||||
var found bool
|
||||
for _, e := range entries {
|
||||
if strings.HasSuffix(e.Name(), ".tgz") || strings.HasSuffix(e.Name(), ".tar.gz") {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
names := make([]string, len(entries))
|
||||
for i, e := range entries {
|
||||
names[i] = e.Name()
|
||||
}
|
||||
t.Errorf("no .tgz found in destDir after chart copy; found: %v", names)
|
||||
}
|
||||
}
|
||||
@@ -2,32 +2,88 @@ package store
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strings"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
"github.com/rancherfederal/hauler/pkg/store"
|
||||
|
||||
"github.com/rancherfederal/hauler/internal/mapper"
|
||||
"github.com/rancherfederal/hauler/pkg/log"
|
||||
"github.com/rancherfederal/hauler/pkg/reference"
|
||||
"hauler.dev/go/hauler/internal/flags"
|
||||
"hauler.dev/go/hauler/internal/mapper"
|
||||
"hauler.dev/go/hauler/pkg/consts"
|
||||
"hauler.dev/go/hauler/pkg/log"
|
||||
"hauler.dev/go/hauler/pkg/reference"
|
||||
"hauler.dev/go/hauler/pkg/store"
|
||||
)
|
||||
|
||||
type ExtractOpts struct {
|
||||
*RootOpts
|
||||
DestinationDir string
|
||||
// isIndexMediaType returns true for OCI and Docker manifest list media types.
|
||||
func isIndexMediaType(mt string) bool {
|
||||
return mt == ocispec.MediaTypeImageIndex || mt == consts.DockerManifestListSchema2
|
||||
}
|
||||
|
||||
func (o *ExtractOpts) AddArgs(cmd *cobra.Command) {
|
||||
f := cmd.Flags()
|
||||
// firstLeafManifest walks a (potentially nested) OCI index and returns the
|
||||
// decoded manifest of the first non-index child. It prefers non-index children
|
||||
// at each level; if all children are indexes it descends into the first one.
|
||||
// Returns an error if any nested index or manifest cannot be decoded.
|
||||
func firstLeafManifest(ctx context.Context, s *store.Layout, idx ocispec.Index) (ocispec.Manifest, error) {
|
||||
for {
|
||||
if len(idx.Manifests) == 0 {
|
||||
return ocispec.Manifest{}, fmt.Errorf("image index has no child manifests")
|
||||
}
|
||||
|
||||
f.StringVarP(&o.DestinationDir, "output", "o", "", "Directory to save contents to (defaults to current directory)")
|
||||
// Prefer the first non-index child; fall back to the first child (an index) if all are indexes.
|
||||
desc := idx.Manifests[0]
|
||||
for _, d := range idx.Manifests {
|
||||
if !isIndexMediaType(d.MediaType) {
|
||||
desc = d
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
rc, err := s.Fetch(ctx, desc)
|
||||
if err != nil {
|
||||
return ocispec.Manifest{}, err
|
||||
}
|
||||
|
||||
if isIndexMediaType(desc.MediaType) {
|
||||
var nested ocispec.Index
|
||||
err = json.NewDecoder(rc).Decode(&nested)
|
||||
rc.Close()
|
||||
if err != nil {
|
||||
return ocispec.Manifest{}, fmt.Errorf("decoding nested index: %w", err)
|
||||
}
|
||||
idx = nested
|
||||
continue
|
||||
}
|
||||
|
||||
var m ocispec.Manifest
|
||||
err = json.NewDecoder(rc).Decode(&m)
|
||||
rc.Close()
|
||||
if err != nil {
|
||||
return ocispec.Manifest{}, fmt.Errorf("decoding child manifest: %w", err)
|
||||
}
|
||||
return m, nil
|
||||
}
|
||||
}
|
||||
|
||||
func ExtractCmd(ctx context.Context, o *ExtractOpts, s *store.Layout, ref string) error {
|
||||
// isContainerImageManifest returns true when the manifest describes a real
|
||||
// container image — i.e. an OCI/Docker image config with no AnnotationTitle on
|
||||
// any layer. File artifacts distributed as OCI images always carry AnnotationTitle
|
||||
// on their layers, so they are NOT considered container images by this check.
|
||||
func isContainerImageManifest(m ocispec.Manifest) bool {
|
||||
switch m.Config.MediaType {
|
||||
case consts.DockerConfigJSON, ocispec.MediaTypeImageConfig:
|
||||
for _, layer := range m.Layers {
|
||||
if _, ok := layer.Annotations[ocispec.AnnotationTitle]; ok {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func ExtractCmd(ctx context.Context, o *flags.ExtractOpts, s *store.Layout, ref string) error {
|
||||
l := log.FromContext(ctx)
|
||||
|
||||
r, err := reference.Parse(ref)
|
||||
@@ -35,10 +91,12 @@ func ExtractCmd(ctx context.Context, o *ExtractOpts, s *store.Layout, ref string
|
||||
return err
|
||||
}
|
||||
|
||||
// use the repository from the context and the identifier from the reference
|
||||
repo := r.Context().RepositoryStr() + ":" + r.Identifier()
|
||||
|
||||
found := false
|
||||
if err := s.Walk(func(reference string, desc ocispec.Descriptor) error {
|
||||
|
||||
if !strings.Contains(reference, r.Name()) {
|
||||
if !strings.Contains(reference, repo) {
|
||||
return nil
|
||||
}
|
||||
found = true
|
||||
@@ -49,9 +107,36 @@ func ExtractCmd(ctx context.Context, o *ExtractOpts, s *store.Layout, ref string
|
||||
}
|
||||
defer rc.Close()
|
||||
|
||||
// For image indexes, decoding the index JSON as ocispec.Manifest produces
|
||||
// an empty Config.MediaType and nil Layers — causing FromManifest to fall
|
||||
// back to Default() mapper, which writes config blobs as sha256:<digest>.bin.
|
||||
// Instead, peek at the first child manifest to get real config/layer info.
|
||||
var m ocispec.Manifest
|
||||
if err := json.NewDecoder(rc).Decode(&m); err != nil {
|
||||
return err
|
||||
if desc.MediaType == ocispec.MediaTypeImageIndex || desc.MediaType == consts.DockerManifestListSchema2 {
|
||||
var idx ocispec.Index
|
||||
if err := json.NewDecoder(rc).Decode(&idx); err != nil {
|
||||
return err
|
||||
}
|
||||
if len(idx.Manifests) == 0 {
|
||||
l.Warnf("skipping [%s]: image index has no child manifests", reference)
|
||||
return nil
|
||||
}
|
||||
var err error
|
||||
m, err = firstLeafManifest(ctx, s, idx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if err := json.NewDecoder(rc).Decode(&m); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Container images (no AnnotationTitle on any layer) are not extractable
|
||||
// to disk in a meaningful way — use `hauler store copy` to push to a registry.
|
||||
if isContainerImageManifest(m) {
|
||||
l.Warnf("skipping [%s]: container images cannot be extracted (use `hauler store copy` to push to a registry)", reference)
|
||||
return nil
|
||||
}
|
||||
|
||||
mapperStore, err := mapper.FromManifest(m, o.DestinationDir)
|
||||
|
||||
556
cmd/hauler/cli/store/extract_test.go
Normal file
556
cmd/hauler/cli/store/extract_test.go
Normal file
@@ -0,0 +1,556 @@
|
||||
package store
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/google/go-containerregistry/pkg/name"
|
||||
gcrv1 "github.com/google/go-containerregistry/pkg/v1"
|
||||
"github.com/google/go-containerregistry/pkg/v1/empty"
|
||||
"github.com/google/go-containerregistry/pkg/v1/mutate"
|
||||
"github.com/google/go-containerregistry/pkg/v1/remote"
|
||||
"github.com/google/go-containerregistry/pkg/v1/static"
|
||||
gvtypes "github.com/google/go-containerregistry/pkg/v1/types"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
|
||||
"hauler.dev/go/hauler/internal/flags"
|
||||
v1 "hauler.dev/go/hauler/pkg/apis/hauler.cattle.io/v1"
|
||||
"hauler.dev/go/hauler/pkg/consts"
|
||||
)
|
||||
|
||||
// chartTestdataDir is defined in add_test.go as "../../../../testdata".
|
||||
|
||||
func TestExtractCmd_File(t *testing.T) {
|
||||
ctx := newTestContext(t)
|
||||
s := newTestStore(t)
|
||||
|
||||
fileContent := "hello extract test"
|
||||
url := seedFileInHTTPServer(t, "extract-me.txt", fileContent)
|
||||
if err := storeFile(ctx, s, v1.File{Path: url}); err != nil {
|
||||
t.Fatalf("storeFile: %v", err)
|
||||
}
|
||||
|
||||
// reference.Parse("extract-me.txt") normalises to "hauler/extract-me.txt:latest"
|
||||
// (DefaultNamespace = "hauler", DefaultTag = "latest"). ExtractCmd builds
|
||||
// repo = RepositoryStr() + ":" + Identifier() = "hauler/extract-me.txt:latest"
|
||||
// and uses strings.Contains against the stored ref — which matches exactly.
|
||||
ref := "hauler/extract-me.txt:latest"
|
||||
|
||||
destDir := t.TempDir()
|
||||
eo := &flags.ExtractOpts{
|
||||
StoreRootOpts: defaultRootOpts(s.Root),
|
||||
DestinationDir: destDir,
|
||||
}
|
||||
|
||||
if err := ExtractCmd(ctx, eo, s, ref); err != nil {
|
||||
t.Fatalf("ExtractCmd: %v", err)
|
||||
}
|
||||
|
||||
// The file mapper writes the layer using its AnnotationTitle ("extract-me.txt").
|
||||
outPath := filepath.Join(destDir, "extract-me.txt")
|
||||
data, err := os.ReadFile(outPath)
|
||||
if err != nil {
|
||||
t.Fatalf("expected extracted file at %s: %v", outPath, err)
|
||||
}
|
||||
if string(data) != fileContent {
|
||||
t.Errorf("content mismatch: got %q, want %q", string(data), fileContent)
|
||||
}
|
||||
}
|
||||
|
||||
func TestExtractCmd_Chart(t *testing.T) {
|
||||
ctx := newTestContext(t)
|
||||
s := newTestStore(t)
|
||||
rso := defaultRootOpts(s.Root)
|
||||
ro := defaultCliOpts()
|
||||
|
||||
o := newAddChartOpts(chartTestdataDir, "")
|
||||
if err := AddChartCmd(ctx, o, s, "rancher-cluster-templates-0.5.2.tgz", rso, ro); err != nil {
|
||||
t.Fatalf("AddChartCmd: %v", err)
|
||||
}
|
||||
|
||||
// Chart stored as "hauler/rancher-cluster-templates:0.5.2".
|
||||
ref := "hauler/rancher-cluster-templates:0.5.2"
|
||||
|
||||
destDir := t.TempDir()
|
||||
eo := &flags.ExtractOpts{
|
||||
StoreRootOpts: defaultRootOpts(s.Root),
|
||||
DestinationDir: destDir,
|
||||
}
|
||||
|
||||
if err := ExtractCmd(ctx, eo, s, ref); err != nil {
|
||||
t.Fatalf("ExtractCmd: %v", err)
|
||||
}
|
||||
|
||||
// The chart mapper writes the chart layer as a .tgz (using AnnotationTitle,
|
||||
// or "chart.tar.gz" as fallback).
|
||||
entries, err := os.ReadDir(destDir)
|
||||
if err != nil {
|
||||
t.Fatalf("ReadDir: %v", err)
|
||||
}
|
||||
|
||||
found := false
|
||||
for _, e := range entries {
|
||||
if strings.HasSuffix(e.Name(), ".tgz") || strings.HasSuffix(e.Name(), ".tar.gz") {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
names := make([]string, len(entries))
|
||||
for i, e := range entries {
|
||||
names[i] = e.Name()
|
||||
}
|
||||
t.Errorf("expected a .tgz or .tar.gz in destDir, got: %v", names)
|
||||
}
|
||||
}
|
||||
|
||||
func TestExtractCmd_NotFound(t *testing.T) {
|
||||
ctx := newTestContext(t)
|
||||
s := newTestStore(t)
|
||||
|
||||
eo := &flags.ExtractOpts{
|
||||
StoreRootOpts: defaultRootOpts(s.Root),
|
||||
DestinationDir: t.TempDir(),
|
||||
}
|
||||
|
||||
err := ExtractCmd(ctx, eo, s, "hauler/nonexistent:v99")
|
||||
if err == nil {
|
||||
t.Fatal("expected error for nonexistent ref, got nil")
|
||||
}
|
||||
if !strings.Contains(err.Error(), "not found in store") {
|
||||
t.Errorf("expected 'not found in store' in error, got: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestExtractCmd_OciArtifactKindImage(t *testing.T) {
|
||||
// OCI artifacts pulled from a registry via AddImage() are always labelled
|
||||
// kind=KindAnnotationImage regardless of their actual content type (file,
|
||||
// chart, etc.). ExtractCmd must still dispatch via the manifest's
|
||||
// Config.MediaType — not the kind annotation — so extraction works correctly.
|
||||
ctx := newTestContext(t)
|
||||
|
||||
// newLocalhostRegistry is required: s.AddImage uses authn.DefaultKeychain and
|
||||
// go-containerregistry auto-selects plain HTTP only for "localhost:" hosts.
|
||||
host, rOpts := newLocalhostRegistry(t)
|
||||
|
||||
// Build a synthetic OCI file artifact:
|
||||
// config.MediaType = FileLocalConfigMediaType (triggers Files() mapper)
|
||||
// layer.MediaType = FileLayerMediaType
|
||||
// layer annotation AnnotationTitle = "oci-pulled-file.txt"
|
||||
fileContent := []byte("oci file content from registry")
|
||||
fileLayer := static.NewLayer(fileContent, gvtypes.MediaType(consts.FileLayerMediaType))
|
||||
img, err := mutate.Append(empty.Image, mutate.Addendum{
|
||||
Layer: fileLayer,
|
||||
Annotations: map[string]string{
|
||||
ocispec.AnnotationTitle: "oci-pulled-file.txt",
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("mutate.Append: %v", err)
|
||||
}
|
||||
img = mutate.MediaType(img, gvtypes.OCIManifestSchema1)
|
||||
img = mutate.ConfigMediaType(img, gvtypes.MediaType(consts.FileLocalConfigMediaType))
|
||||
|
||||
ref := host + "/oci-artifacts/myfile:v1"
|
||||
tag, err := name.NewTag(ref, name.Insecure)
|
||||
if err != nil {
|
||||
t.Fatalf("name.NewTag: %v", err)
|
||||
}
|
||||
if err := remote.Write(tag, img, rOpts...); err != nil {
|
||||
t.Fatalf("remote.Write: %v", err)
|
||||
}
|
||||
|
||||
// Pull into a fresh store — AddImage sets kind=KindAnnotationImage on all manifests.
|
||||
s := newTestStore(t)
|
||||
if err := s.AddImage(ctx, ref, "", rOpts...); err != nil {
|
||||
t.Fatalf("AddImage: %v", err)
|
||||
}
|
||||
|
||||
// ExtractCmd receives the short ref (no registry prefix) as stored in AnnotationRefName.
|
||||
// reference.Parse("oci-artifacts/myfile:v1") → repo "oci-artifacts/myfile:v1" matches.
|
||||
destDir := t.TempDir()
|
||||
eo := &flags.ExtractOpts{
|
||||
StoreRootOpts: defaultRootOpts(s.Root),
|
||||
DestinationDir: destDir,
|
||||
}
|
||||
if err := ExtractCmd(ctx, eo, s, "oci-artifacts/myfile:v1"); err != nil {
|
||||
t.Fatalf("ExtractCmd: %v", err)
|
||||
}
|
||||
|
||||
// Files() mapper uses AnnotationTitle → "oci-pulled-file.txt".
|
||||
outPath := filepath.Join(destDir, "oci-pulled-file.txt")
|
||||
data, err := os.ReadFile(outPath)
|
||||
if err != nil {
|
||||
t.Fatalf("expected extracted file at %s: %v", outPath, err)
|
||||
}
|
||||
if string(data) != string(fileContent) {
|
||||
t.Errorf("content mismatch: got %q, want %q", string(data), string(fileContent))
|
||||
}
|
||||
}
|
||||
|
||||
func TestExtractCmd_OciImageIndex_NoBinFiles(t *testing.T) {
|
||||
// Regression test: extracting an OCI image index whose platform manifests
|
||||
// carry binary layers with AnnotationTitle must yield only the named binary
|
||||
// files — no sha256:<digest>.bin metadata files.
|
||||
// Before the fix, decoding the index as an ocispec.Manifest produced an
|
||||
// empty Config.MediaType, causing FromManifest to select Default() mapper
|
||||
// which wrote config blobs and child manifests as sha256:<digest>.bin.
|
||||
ctx := newTestContext(t)
|
||||
host, rOpts := newLocalhostRegistry(t)
|
||||
|
||||
buildPlatformImg := func(content []byte, title string) gcrv1.Image {
|
||||
layer := static.NewLayer(content, gvtypes.OCILayer)
|
||||
img, err := mutate.Append(empty.Image, mutate.Addendum{
|
||||
Layer: layer,
|
||||
Annotations: map[string]string{
|
||||
ocispec.AnnotationTitle: title,
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("mutate.Append: %v", err)
|
||||
}
|
||||
img = mutate.MediaType(img, gvtypes.OCIManifestSchema1)
|
||||
img = mutate.ConfigMediaType(img, gvtypes.MediaType(ocispec.MediaTypeImageConfig))
|
||||
return img
|
||||
}
|
||||
|
||||
amd64Img := buildPlatformImg([]byte("amd64 binary content"), "mybinary.linux-amd64")
|
||||
arm64Img := buildPlatformImg([]byte("arm64 binary content"), "mybinary.linux-arm64")
|
||||
|
||||
idx := mutate.AppendManifests(empty.Index,
|
||||
mutate.IndexAddendum{
|
||||
Add: amd64Img,
|
||||
Descriptor: gcrv1.Descriptor{
|
||||
MediaType: gvtypes.OCIManifestSchema1,
|
||||
Platform: &gcrv1.Platform{OS: "linux", Architecture: "amd64"},
|
||||
},
|
||||
},
|
||||
mutate.IndexAddendum{
|
||||
Add: arm64Img,
|
||||
Descriptor: gcrv1.Descriptor{
|
||||
MediaType: gvtypes.OCIManifestSchema1,
|
||||
Platform: &gcrv1.Platform{OS: "linux", Architecture: "arm64"},
|
||||
},
|
||||
},
|
||||
)
|
||||
|
||||
ref := host + "/binaries/mybinary:v1"
|
||||
tag, err := name.NewTag(ref, name.Insecure)
|
||||
if err != nil {
|
||||
t.Fatalf("name.NewTag: %v", err)
|
||||
}
|
||||
if err := remote.WriteIndex(tag, idx, rOpts...); err != nil {
|
||||
t.Fatalf("remote.WriteIndex: %v", err)
|
||||
}
|
||||
|
||||
s := newTestStore(t)
|
||||
if err := s.AddImage(ctx, ref, "", rOpts...); err != nil {
|
||||
t.Fatalf("AddImage: %v", err)
|
||||
}
|
||||
|
||||
destDir := t.TempDir()
|
||||
eo := &flags.ExtractOpts{
|
||||
StoreRootOpts: defaultRootOpts(s.Root),
|
||||
DestinationDir: destDir,
|
||||
}
|
||||
if err := ExtractCmd(ctx, eo, s, "binaries/mybinary:v1"); err != nil {
|
||||
t.Fatalf("ExtractCmd: %v", err)
|
||||
}
|
||||
|
||||
entries, err := os.ReadDir(destDir)
|
||||
if err != nil {
|
||||
t.Fatalf("ReadDir: %v", err)
|
||||
}
|
||||
var names []string
|
||||
for _, e := range entries {
|
||||
names = append(names, e.Name())
|
||||
}
|
||||
|
||||
// No sha256: digest-named files should be extracted
|
||||
for _, n := range names {
|
||||
if strings.HasPrefix(n, "sha256:") {
|
||||
t.Errorf("unexpected digest-named file %q extracted (all files: %v)", n, names)
|
||||
}
|
||||
}
|
||||
|
||||
// Both platform binaries must be present
|
||||
for _, want := range []string{"mybinary.linux-amd64", "mybinary.linux-arm64"} {
|
||||
found := false
|
||||
for _, n := range names {
|
||||
if n == want {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
t.Errorf("expected binary %q not found; got: %v", want, names)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestExtractCmd_NestedImageIndex_NoBinFiles(t *testing.T) {
|
||||
// Regression test: extracting a nested OCI image index (outer index whose only
|
||||
// children are inner indexes, which in turn contain the platform manifests) must
|
||||
// yield only the named binary files — no sha256:<digest>.bin metadata files.
|
||||
// firstLeafManifest must descend through the outer index into the inner index to
|
||||
// find a leaf manifest so that FromManifest selects the correct Files() mapper.
|
||||
ctx := newTestContext(t)
|
||||
host, rOpts := newLocalhostRegistry(t)
|
||||
|
||||
buildPlatformImg := func(content []byte, title string) gcrv1.Image {
|
||||
layer := static.NewLayer(content, gvtypes.OCILayer)
|
||||
img, err := mutate.Append(empty.Image, mutate.Addendum{
|
||||
Layer: layer,
|
||||
Annotations: map[string]string{
|
||||
ocispec.AnnotationTitle: title,
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("mutate.Append: %v", err)
|
||||
}
|
||||
img = mutate.MediaType(img, gvtypes.OCIManifestSchema1)
|
||||
img = mutate.ConfigMediaType(img, gvtypes.MediaType(ocispec.MediaTypeImageConfig))
|
||||
return img
|
||||
}
|
||||
|
||||
amd64Img := buildPlatformImg([]byte("amd64 binary content"), "mybinary.linux-amd64")
|
||||
arm64Img := buildPlatformImg([]byte("arm64 binary content"), "mybinary.linux-arm64")
|
||||
|
||||
// Inner index contains the leaf platform manifests.
|
||||
innerIdx := mutate.AppendManifests(empty.Index,
|
||||
mutate.IndexAddendum{
|
||||
Add: amd64Img,
|
||||
Descriptor: gcrv1.Descriptor{
|
||||
MediaType: gvtypes.OCIManifestSchema1,
|
||||
Platform: &gcrv1.Platform{OS: "linux", Architecture: "amd64"},
|
||||
},
|
||||
},
|
||||
mutate.IndexAddendum{
|
||||
Add: arm64Img,
|
||||
Descriptor: gcrv1.Descriptor{
|
||||
MediaType: gvtypes.OCIManifestSchema1,
|
||||
Platform: &gcrv1.Platform{OS: "linux", Architecture: "arm64"},
|
||||
},
|
||||
},
|
||||
)
|
||||
|
||||
// Outer index contains only the inner index — all children are indexes.
|
||||
outerIdx := mutate.AppendManifests(empty.Index,
|
||||
mutate.IndexAddendum{
|
||||
Add: innerIdx,
|
||||
Descriptor: gcrv1.Descriptor{
|
||||
MediaType: gvtypes.OCIImageIndex,
|
||||
},
|
||||
},
|
||||
)
|
||||
|
||||
ref := host + "/binaries/nested:v1"
|
||||
tag, err := name.NewTag(ref, name.Insecure)
|
||||
if err != nil {
|
||||
t.Fatalf("name.NewTag: %v", err)
|
||||
}
|
||||
if err := remote.WriteIndex(tag, outerIdx, rOpts...); err != nil {
|
||||
t.Fatalf("remote.WriteIndex: %v", err)
|
||||
}
|
||||
|
||||
s := newTestStore(t)
|
||||
if err := s.AddImage(ctx, ref, "", rOpts...); err != nil {
|
||||
t.Fatalf("AddImage: %v", err)
|
||||
}
|
||||
|
||||
destDir := t.TempDir()
|
||||
eo := &flags.ExtractOpts{
|
||||
StoreRootOpts: defaultRootOpts(s.Root),
|
||||
DestinationDir: destDir,
|
||||
}
|
||||
if err := ExtractCmd(ctx, eo, s, "binaries/nested:v1"); err != nil {
|
||||
t.Fatalf("ExtractCmd: %v", err)
|
||||
}
|
||||
|
||||
entries, err := os.ReadDir(destDir)
|
||||
if err != nil {
|
||||
t.Fatalf("ReadDir: %v", err)
|
||||
}
|
||||
var names []string
|
||||
for _, e := range entries {
|
||||
names = append(names, e.Name())
|
||||
}
|
||||
|
||||
// No sha256: digest-named files should be extracted.
|
||||
for _, n := range names {
|
||||
if strings.HasPrefix(n, "sha256:") {
|
||||
t.Errorf("unexpected digest-named file %q extracted (all files: %v)", n, names)
|
||||
}
|
||||
}
|
||||
|
||||
// Both platform binaries must be present.
|
||||
for _, want := range []string{"mybinary.linux-amd64", "mybinary.linux-arm64"} {
|
||||
found := false
|
||||
for _, n := range names {
|
||||
if n == want {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
t.Errorf("expected binary %q not found; got: %v", want, names)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestExtractCmd_ContainerImage_Skipped(t *testing.T) {
|
||||
// A real container image (no AnnotationTitle on any layer) should be skipped
|
||||
// without error and without writing any files to the destination directory.
|
||||
ctx := newTestContext(t)
|
||||
host, rOpts := newLocalhostRegistry(t)
|
||||
|
||||
layer := static.NewLayer([]byte("layer content"), gvtypes.OCILayer)
|
||||
img, err := mutate.Append(empty.Image, mutate.Addendum{Layer: layer})
|
||||
if err != nil {
|
||||
t.Fatalf("mutate.Append: %v", err)
|
||||
}
|
||||
img = mutate.MediaType(img, gvtypes.OCIManifestSchema1)
|
||||
img = mutate.ConfigMediaType(img, gvtypes.MediaType(ocispec.MediaTypeImageConfig))
|
||||
|
||||
ref := host + "/myapp/myimage:v1"
|
||||
tag, err := name.NewTag(ref, name.Insecure)
|
||||
if err != nil {
|
||||
t.Fatalf("name.NewTag: %v", err)
|
||||
}
|
||||
if err := remote.Write(tag, img, rOpts...); err != nil {
|
||||
t.Fatalf("remote.Write: %v", err)
|
||||
}
|
||||
|
||||
s := newTestStore(t)
|
||||
if err := s.AddImage(ctx, ref, "", rOpts...); err != nil {
|
||||
t.Fatalf("AddImage: %v", err)
|
||||
}
|
||||
|
||||
destDir := t.TempDir()
|
||||
eo := &flags.ExtractOpts{
|
||||
StoreRootOpts: defaultRootOpts(s.Root),
|
||||
DestinationDir: destDir,
|
||||
}
|
||||
if err := ExtractCmd(ctx, eo, s, "myapp/myimage:v1"); err != nil {
|
||||
t.Fatalf("ExtractCmd: %v", err)
|
||||
}
|
||||
|
||||
entries, err := os.ReadDir(destDir)
|
||||
if err != nil {
|
||||
t.Fatalf("ReadDir: %v", err)
|
||||
}
|
||||
if len(entries) != 0 {
|
||||
names := make([]string, len(entries))
|
||||
for i, e := range entries {
|
||||
names[i] = e.Name()
|
||||
}
|
||||
t.Errorf("expected no files extracted for container image, got: %v", names)
|
||||
}
|
||||
}
|
||||
|
||||
func TestExtractCmd_ContainerImageIndex_Skipped(t *testing.T) {
|
||||
// A real multi-arch container image index (no AnnotationTitle on any layer)
|
||||
// should be skipped without error and without writing any files.
|
||||
ctx := newTestContext(t)
|
||||
host, rOpts := newLocalhostRegistry(t)
|
||||
|
||||
buildPlatformImg := func(content []byte) gcrv1.Image {
|
||||
layer := static.NewLayer(content, gvtypes.OCILayer)
|
||||
img, err := mutate.Append(empty.Image, mutate.Addendum{Layer: layer})
|
||||
if err != nil {
|
||||
t.Fatalf("mutate.Append: %v", err)
|
||||
}
|
||||
img = mutate.MediaType(img, gvtypes.OCIManifestSchema1)
|
||||
img = mutate.ConfigMediaType(img, gvtypes.MediaType(ocispec.MediaTypeImageConfig))
|
||||
return img
|
||||
}
|
||||
|
||||
idx := mutate.AppendManifests(empty.Index,
|
||||
mutate.IndexAddendum{
|
||||
Add: buildPlatformImg([]byte("amd64 content")),
|
||||
Descriptor: gcrv1.Descriptor{
|
||||
MediaType: gvtypes.OCIManifestSchema1,
|
||||
Platform: &gcrv1.Platform{OS: "linux", Architecture: "amd64"},
|
||||
},
|
||||
},
|
||||
mutate.IndexAddendum{
|
||||
Add: buildPlatformImg([]byte("arm64 content")),
|
||||
Descriptor: gcrv1.Descriptor{
|
||||
MediaType: gvtypes.OCIManifestSchema1,
|
||||
Platform: &gcrv1.Platform{OS: "linux", Architecture: "arm64"},
|
||||
},
|
||||
},
|
||||
)
|
||||
|
||||
ref := host + "/myapp/multiarch:v1"
|
||||
tag, err := name.NewTag(ref, name.Insecure)
|
||||
if err != nil {
|
||||
t.Fatalf("name.NewTag: %v", err)
|
||||
}
|
||||
if err := remote.WriteIndex(tag, idx, rOpts...); err != nil {
|
||||
t.Fatalf("remote.WriteIndex: %v", err)
|
||||
}
|
||||
|
||||
s := newTestStore(t)
|
||||
if err := s.AddImage(ctx, ref, "", rOpts...); err != nil {
|
||||
t.Fatalf("AddImage: %v", err)
|
||||
}
|
||||
|
||||
destDir := t.TempDir()
|
||||
eo := &flags.ExtractOpts{
|
||||
StoreRootOpts: defaultRootOpts(s.Root),
|
||||
DestinationDir: destDir,
|
||||
}
|
||||
if err := ExtractCmd(ctx, eo, s, "myapp/multiarch:v1"); err != nil {
|
||||
t.Fatalf("ExtractCmd: %v", err)
|
||||
}
|
||||
|
||||
entries, err := os.ReadDir(destDir)
|
||||
if err != nil {
|
||||
t.Fatalf("ReadDir: %v", err)
|
||||
}
|
||||
if len(entries) != 0 {
|
||||
names := make([]string, len(entries))
|
||||
for i, e := range entries {
|
||||
names[i] = e.Name()
|
||||
}
|
||||
t.Errorf("expected no files extracted for container image index, got: %v", names)
|
||||
}
|
||||
}
|
||||
|
||||
func TestExtractCmd_SubstringMatch(t *testing.T) {
|
||||
// reference.Parse applies DefaultTag ("latest") when no tag is given, so
|
||||
// Parse("hauler/extract-sub.txt") and Parse("hauler/extract-sub.txt:latest")
|
||||
// produce the same repo string "hauler/extract-sub.txt:latest".
|
||||
// This means a no-tag ref substring-matches a stored "hauler/...:latest" key.
|
||||
ctx := newTestContext(t)
|
||||
s := newTestStore(t)
|
||||
|
||||
fileContent := "substring match content"
|
||||
url := seedFileInHTTPServer(t, "extract-sub.txt", fileContent)
|
||||
if err := storeFile(ctx, s, v1.File{Path: url}); err != nil {
|
||||
t.Fatalf("storeFile: %v", err)
|
||||
}
|
||||
|
||||
destDir := t.TempDir()
|
||||
eo := &flags.ExtractOpts{
|
||||
StoreRootOpts: defaultRootOpts(s.Root),
|
||||
DestinationDir: destDir,
|
||||
}
|
||||
|
||||
// No explicit tag — Parse adds ":latest" as default, which still matches.
|
||||
if err := ExtractCmd(ctx, eo, s, "hauler/extract-sub.txt"); err != nil {
|
||||
t.Fatalf("ExtractCmd with no-tag ref: %v", err)
|
||||
}
|
||||
|
||||
outPath := filepath.Join(destDir, "extract-sub.txt")
|
||||
data, err := os.ReadFile(outPath)
|
||||
if err != nil {
|
||||
t.Fatalf("expected extracted file at %s: %v", outPath, err)
|
||||
}
|
||||
if string(data) != fileContent {
|
||||
t.Errorf("content mismatch: got %q, want %q", string(data), fileContent)
|
||||
}
|
||||
}
|
||||
@@ -1,84 +0,0 @@
|
||||
package store
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/rancherfederal/hauler/pkg/layer"
|
||||
"github.com/rancherfederal/hauler/pkg/store"
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
"github.com/rancherfederal/hauler/pkg/log"
|
||||
)
|
||||
|
||||
const (
|
||||
DefaultStoreName = "store"
|
||||
DefaultCacheDir = "hauler"
|
||||
)
|
||||
|
||||
type RootOpts struct {
|
||||
StoreDir string
|
||||
CacheDir string
|
||||
}
|
||||
|
||||
func (o *RootOpts) AddArgs(cmd *cobra.Command) {
|
||||
pf := cmd.PersistentFlags()
|
||||
pf.StringVar(&o.CacheDir, "cache", "", "Location of where to store cache data (defaults to $XDG_CACHE_DIR/hauler)")
|
||||
pf.StringVarP(&o.StoreDir, "store", "s", DefaultStoreName, "Location to create store at")
|
||||
}
|
||||
|
||||
func (o *RootOpts) Store(ctx context.Context) (*store.Layout, error) {
|
||||
l := log.FromContext(ctx)
|
||||
dir := o.StoreDir
|
||||
|
||||
abs, err := filepath.Abs(dir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
l.Debugf("using store at %s", abs)
|
||||
if _, err := os.Stat(abs); errors.Is(err, os.ErrNotExist) {
|
||||
err := os.Mkdir(abs, os.ModePerm)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// TODO: Do we want this to be configurable?
|
||||
c, err := o.Cache(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
s, err := store.NewLayout(abs, store.WithCache(c))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return s, nil
|
||||
}
|
||||
|
||||
func (o *RootOpts) Cache(ctx context.Context) (layer.Cache, error) {
|
||||
dir := o.CacheDir
|
||||
|
||||
if dir == "" {
|
||||
// Default to $XDG_CACHE_HOME
|
||||
cachedir, err := os.UserCacheDir()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
abs, _ := filepath.Abs(filepath.Join(cachedir, DefaultCacheDir))
|
||||
if err := os.MkdirAll(abs, os.ModePerm); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
dir = abs
|
||||
}
|
||||
|
||||
c := layer.NewFilesystemCache(dir)
|
||||
return c, nil
|
||||
}
|
||||
@@ -4,35 +4,21 @@ import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"sort"
|
||||
"strings"
|
||||
"text/tabwriter"
|
||||
|
||||
"github.com/olekukonko/tablewriter"
|
||||
"github.com/olekukonko/tablewriter/tw"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
"github.com/rancherfederal/hauler/pkg/consts"
|
||||
|
||||
"github.com/rancherfederal/hauler/pkg/store"
|
||||
|
||||
"github.com/rancherfederal/hauler/pkg/reference"
|
||||
"hauler.dev/go/hauler/internal/flags"
|
||||
"hauler.dev/go/hauler/pkg/consts"
|
||||
"hauler.dev/go/hauler/pkg/reference"
|
||||
"hauler.dev/go/hauler/pkg/store"
|
||||
)
|
||||
|
||||
type InfoOpts struct {
|
||||
*RootOpts
|
||||
|
||||
OutputFormat string
|
||||
SizeUnit string
|
||||
}
|
||||
|
||||
func (o *InfoOpts) AddFlags(cmd *cobra.Command) {
|
||||
f := cmd.Flags()
|
||||
|
||||
f.StringVarP(&o.OutputFormat, "output", "o", "table", "Output format (table, json)")
|
||||
|
||||
// TODO: Regex/globbing
|
||||
}
|
||||
|
||||
func InfoCmd(ctx context.Context, o *InfoOpts, s *store.Layout) error {
|
||||
func InfoCmd(ctx context.Context, o *flags.InfoOpts, s *store.Layout) error {
|
||||
var items []item
|
||||
if err := s.Walk(func(ref string, desc ocispec.Descriptor) error {
|
||||
if _, ok := desc.Annotations[ocispec.AnnotationRefName]; !ok {
|
||||
@@ -44,14 +30,85 @@ func InfoCmd(ctx context.Context, o *InfoOpts, s *store.Layout) error {
|
||||
}
|
||||
defer rc.Close()
|
||||
|
||||
var m ocispec.Manifest
|
||||
if err := json.NewDecoder(rc).Decode(&m); err != nil {
|
||||
return err
|
||||
}
|
||||
i := newItem(s, desc, m)
|
||||
var emptyItem item
|
||||
if i != emptyItem {
|
||||
items = append(items, i)
|
||||
// handle multi-arch images
|
||||
if desc.MediaType == consts.OCIImageIndexSchema || desc.MediaType == consts.DockerManifestListSchema2 {
|
||||
var idx ocispec.Index
|
||||
if err := json.NewDecoder(rc).Decode(&idx); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, internalDesc := range idx.Manifests {
|
||||
rc, err := s.Fetch(ctx, internalDesc)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer rc.Close()
|
||||
|
||||
var internalManifest ocispec.Manifest
|
||||
if err := json.NewDecoder(rc).Decode(&internalManifest); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
i := newItemWithDigest(
|
||||
s,
|
||||
internalDesc.Digest.String(),
|
||||
desc,
|
||||
internalManifest,
|
||||
fmt.Sprintf("%s/%s", internalDesc.Platform.OS, internalDesc.Platform.Architecture),
|
||||
o,
|
||||
)
|
||||
var emptyItem item
|
||||
if i != emptyItem {
|
||||
items = append(items, i)
|
||||
}
|
||||
}
|
||||
|
||||
// handle "non" multi-arch images
|
||||
} else if desc.MediaType == consts.DockerManifestSchema2 || desc.MediaType == consts.OCIManifestSchema1 {
|
||||
var m ocispec.Manifest
|
||||
if err := json.NewDecoder(rc).Decode(&m); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
rc, err := s.FetchManifest(ctx, m)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer rc.Close()
|
||||
|
||||
// unmarshal the oci image content
|
||||
var internalManifest ocispec.Image
|
||||
if err := json.NewDecoder(rc).Decode(&internalManifest); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if internalManifest.Architecture != "" {
|
||||
i := newItem(s, desc, m,
|
||||
fmt.Sprintf("%s/%s", internalManifest.OS, internalManifest.Architecture), o)
|
||||
var emptyItem item
|
||||
if i != emptyItem {
|
||||
items = append(items, i)
|
||||
}
|
||||
} else {
|
||||
i := newItem(s, desc, m, "-", o)
|
||||
var emptyItem item
|
||||
if i != emptyItem {
|
||||
items = append(items, i)
|
||||
}
|
||||
}
|
||||
|
||||
// handle everything else (charts, files, sigs, etc.)
|
||||
} else {
|
||||
var m ocispec.Manifest
|
||||
if err := json.NewDecoder(rc).Decode(&m); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
i := newItem(s, desc, m, "-", o)
|
||||
var emptyItem item
|
||||
if i != emptyItem {
|
||||
items = append(items, i)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -59,34 +116,125 @@ func InfoCmd(ctx context.Context, o *InfoOpts, s *store.Layout) error {
|
||||
return err
|
||||
}
|
||||
|
||||
if o.ListRepos {
|
||||
buildListRepos(items...)
|
||||
return nil
|
||||
}
|
||||
|
||||
// sort items by ref and arch
|
||||
sort.Sort(byReferenceAndArch(items))
|
||||
|
||||
var msg string
|
||||
switch o.OutputFormat {
|
||||
case "json":
|
||||
msg = buildJson(items...)
|
||||
|
||||
fmt.Println(msg)
|
||||
default:
|
||||
msg = buildTable(items...)
|
||||
if err := buildTable(o.ShowDigests, items...); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
fmt.Println(msg)
|
||||
return nil
|
||||
}
|
||||
|
||||
func buildTable(items ...item) string {
|
||||
b := strings.Builder{}
|
||||
tw := tabwriter.NewWriter(&b, 1, 1, 3, ' ', 0)
|
||||
|
||||
fmt.Fprintf(tw, "Reference\tType\t# Layers\tSize\n")
|
||||
fmt.Fprintf(tw, "---------\t----\t--------\t----\n")
|
||||
func buildListRepos(items ...item) {
|
||||
// create map to track unique repository names
|
||||
repos := make(map[string]bool)
|
||||
|
||||
for _, i := range items {
|
||||
if i.Type != "" {
|
||||
fmt.Fprintf(tw, "%s\t%s\t%d\t%s\n",
|
||||
i.Reference, i.Type, i.Layers, i.Size,
|
||||
)
|
||||
repoName := ""
|
||||
for j := 0; j < len(i.Reference); j++ {
|
||||
if i.Reference[j] == '/' {
|
||||
repoName = i.Reference[:j]
|
||||
break
|
||||
}
|
||||
}
|
||||
if repoName == "" {
|
||||
repoName = i.Reference
|
||||
}
|
||||
repos[repoName] = true
|
||||
}
|
||||
|
||||
// collect and print unique repository names
|
||||
for repoName := range repos {
|
||||
fmt.Println(repoName)
|
||||
}
|
||||
}
|
||||
|
||||
func buildTable(showDigests bool, items ...item) error {
|
||||
table := tablewriter.NewTable(os.Stdout)
|
||||
table.Configure(func(cfg *tablewriter.Config) {
|
||||
cfg.Header.Alignment.Global = tw.AlignLeft
|
||||
cfg.Row.Merging.Mode = tw.MergeVertical
|
||||
cfg.Row.Merging.ByColumnIndex = tw.NewBoolMapper(0)
|
||||
})
|
||||
|
||||
if showDigests {
|
||||
table.Header("Reference", "Type", "Platform", "Digest", "# Layers", "Size")
|
||||
} else {
|
||||
table.Header("Reference", "Type", "Platform", "# Layers", "Size")
|
||||
}
|
||||
|
||||
totalSize := int64(0)
|
||||
|
||||
for _, i := range items {
|
||||
if i.Type == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
ref := truncateReference(i.Reference)
|
||||
var row []string
|
||||
|
||||
if showDigests {
|
||||
digest := i.Digest
|
||||
if digest == "" {
|
||||
digest = "-"
|
||||
}
|
||||
row = []string{
|
||||
ref,
|
||||
i.Type,
|
||||
i.Platform,
|
||||
digest,
|
||||
fmt.Sprintf("%d", i.Layers),
|
||||
byteCountSI(i.Size),
|
||||
}
|
||||
} else {
|
||||
row = []string{
|
||||
ref,
|
||||
i.Type,
|
||||
i.Platform,
|
||||
fmt.Sprintf("%d", i.Layers),
|
||||
byteCountSI(i.Size),
|
||||
}
|
||||
}
|
||||
|
||||
totalSize += i.Size
|
||||
if err := table.Append(row); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
tw.Flush()
|
||||
return b.String()
|
||||
|
||||
// align total column based on digest visibility
|
||||
if showDigests {
|
||||
table.Footer("", "", "", "", "Total", byteCountSI(totalSize))
|
||||
} else {
|
||||
table.Footer("", "", "", "Total", byteCountSI(totalSize))
|
||||
}
|
||||
|
||||
return table.Render()
|
||||
}
|
||||
|
||||
// truncateReference shortens the digest of a reference
|
||||
func truncateReference(ref string) string {
|
||||
const prefix = "@sha256:"
|
||||
idx := strings.Index(ref, prefix)
|
||||
if idx == -1 {
|
||||
return ref
|
||||
}
|
||||
if len(ref) > idx+len(prefix)+12 {
|
||||
return ref[:idx+len(prefix)+12] + "…"
|
||||
}
|
||||
return ref
|
||||
}
|
||||
|
||||
func buildJson(item ...item) string {
|
||||
@@ -100,20 +248,43 @@ func buildJson(item ...item) string {
|
||||
type item struct {
|
||||
Reference string
|
||||
Type string
|
||||
Platform string
|
||||
Digest string
|
||||
Layers int
|
||||
Size string
|
||||
Size int64
|
||||
}
|
||||
|
||||
func newItem(s *store.Layout, desc ocispec.Descriptor, m ocispec.Manifest) item {
|
||||
if desc.Annotations["kind"] == "dev.cosignproject.cosign/atts" ||
|
||||
desc.Annotations["kind"] == "dev.cosignproject.cosign/sigs" ||
|
||||
desc.Annotations["kind"] == "dev.cosignproject.cosign/sboms" {
|
||||
return item{}
|
||||
}
|
||||
type byReferenceAndArch []item
|
||||
|
||||
func (a byReferenceAndArch) Len() int { return len(a) }
|
||||
func (a byReferenceAndArch) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
||||
func (a byReferenceAndArch) Less(i, j int) bool {
|
||||
if a[i].Reference == a[j].Reference {
|
||||
if a[i].Type == "image" && a[j].Type == "image" {
|
||||
return a[i].Platform < a[j].Platform
|
||||
}
|
||||
if a[i].Type == "image" {
|
||||
return true
|
||||
}
|
||||
if a[j].Type == "image" {
|
||||
return false
|
||||
}
|
||||
return a[i].Type < a[j].Type
|
||||
}
|
||||
return a[i].Reference < a[j].Reference
|
||||
}
|
||||
|
||||
// overrides the digest with a specific per platform digest
|
||||
func newItemWithDigest(s *store.Layout, digestStr string, desc ocispec.Descriptor, m ocispec.Manifest, plat string, o *flags.InfoOpts) item {
|
||||
item := newItem(s, desc, m, plat, o)
|
||||
item.Digest = digestStr
|
||||
return item
|
||||
}
|
||||
|
||||
func newItem(s *store.Layout, desc ocispec.Descriptor, m ocispec.Manifest, plat string, o *flags.InfoOpts) item {
|
||||
var size int64 = 0
|
||||
for _, l := range m.Layers {
|
||||
size = +l.Size
|
||||
size += l.Size
|
||||
}
|
||||
|
||||
// Generate a human-readable content type
|
||||
@@ -129,16 +300,37 @@ func newItem(s *store.Layout, desc ocispec.Descriptor, m ocispec.Manifest) item
|
||||
ctype = "image"
|
||||
}
|
||||
|
||||
ref, err := reference.Parse(desc.Annotations[ocispec.AnnotationRefName])
|
||||
switch {
|
||||
case desc.Annotations[consts.KindAnnotationName] == consts.KindAnnotationSigs:
|
||||
ctype = "sigs"
|
||||
case desc.Annotations[consts.KindAnnotationName] == consts.KindAnnotationAtts:
|
||||
ctype = "atts"
|
||||
case desc.Annotations[consts.KindAnnotationName] == consts.KindAnnotationSboms:
|
||||
ctype = "sbom"
|
||||
case strings.HasPrefix(desc.Annotations[consts.KindAnnotationName], consts.KindAnnotationReferrers):
|
||||
ctype = "referrer"
|
||||
}
|
||||
|
||||
refName := desc.Annotations["io.containerd.image.name"]
|
||||
if refName == "" {
|
||||
refName = desc.Annotations[ocispec.AnnotationRefName]
|
||||
}
|
||||
ref, err := reference.Parse(refName)
|
||||
if err != nil {
|
||||
return item{}
|
||||
}
|
||||
|
||||
if o.TypeFilter != "all" && ctype != o.TypeFilter {
|
||||
return item{}
|
||||
}
|
||||
|
||||
return item{
|
||||
Reference: ref.Name(),
|
||||
Type: ctype,
|
||||
Platform: plat,
|
||||
Digest: desc.Digest.String(),
|
||||
Layers: len(m.Layers),
|
||||
Size: byteCountSI(size),
|
||||
Size: size,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
238
cmd/hauler/cli/store/info_test.go
Normal file
238
cmd/hauler/cli/store/info_test.go
Normal file
@@ -0,0 +1,238 @@
|
||||
package store
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
|
||||
"hauler.dev/go/hauler/internal/flags"
|
||||
v1 "hauler.dev/go/hauler/pkg/apis/hauler.cattle.io/v1"
|
||||
"hauler.dev/go/hauler/pkg/consts"
|
||||
)
|
||||
|
||||
func TestByteCountSI(t *testing.T) {
|
||||
tests := []struct {
|
||||
input int64
|
||||
want string
|
||||
}{
|
||||
{0, "0 B"},
|
||||
{999, "999 B"},
|
||||
{1000, "1.0 kB"},
|
||||
{1500000, "1.5 MB"},
|
||||
{1000000000, "1.0 GB"},
|
||||
}
|
||||
for _, tc := range tests {
|
||||
got := byteCountSI(tc.input)
|
||||
if got != tc.want {
|
||||
t.Errorf("byteCountSI(%d) = %q, want %q", tc.input, got, tc.want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestTruncateReference(t *testing.T) {
|
||||
longDigest := "sha256:abcdefabcdefabcdefabcdefabcdefabcdefabcdefabcdefabcdefabcdefabcd"
|
||||
tests := []struct {
|
||||
name string
|
||||
input string
|
||||
want string
|
||||
}{
|
||||
{"tag ref unchanged", "nginx:latest", "nginx:latest"},
|
||||
{"long digest truncated", "nginx@" + longDigest, "nginx@sha256:abcdefabcdef\u2026"},
|
||||
{"short digest not truncated", "nginx@sha256:abcdef", "nginx@sha256:abcdef"},
|
||||
{"no digest unchanged", "myrepo/myimage:v1", "myrepo/myimage:v1"},
|
||||
}
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
got := truncateReference(tc.input)
|
||||
if got != tc.want {
|
||||
t.Errorf("truncateReference(%q) = %q, want %q", tc.input, got, tc.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestBuildJson(t *testing.T) {
|
||||
items := []item{
|
||||
{Reference: "myrepo/myimage:v1", Type: "image", Platform: "linux/amd64", Size: 1024, Layers: 2},
|
||||
{Reference: "myrepo/mychart:v1", Type: "chart", Platform: "-", Size: 512, Layers: 1},
|
||||
}
|
||||
out := buildJson(items...)
|
||||
if out == "" {
|
||||
t.Fatal("buildJson returned empty string")
|
||||
}
|
||||
var got []item
|
||||
if err := json.Unmarshal([]byte(out), &got); err != nil {
|
||||
t.Fatalf("buildJson output is not valid JSON: %v\noutput: %s", err, out)
|
||||
}
|
||||
if len(got) != len(items) {
|
||||
t.Fatalf("buildJson: got %d items, want %d", len(got), len(items))
|
||||
}
|
||||
for i, want := range items {
|
||||
if got[i].Reference != want.Reference {
|
||||
t.Errorf("item[%d].Reference = %q, want %q", i, got[i].Reference, want.Reference)
|
||||
}
|
||||
if got[i].Type != want.Type {
|
||||
t.Errorf("item[%d].Type = %q, want %q", i, got[i].Type, want.Type)
|
||||
}
|
||||
if got[i].Size != want.Size {
|
||||
t.Errorf("item[%d].Size = %d, want %d", i, got[i].Size, want.Size)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewItem(t *testing.T) {
|
||||
// newItem uses s only for its signature; it does not dereference s in practice.
|
||||
// We pass nil to keep tests dependency-free.
|
||||
const validRef = "myrepo/myimage:latest"
|
||||
|
||||
makeDesc := func(kindAnnotation string) ocispec.Descriptor {
|
||||
desc := ocispec.Descriptor{
|
||||
Annotations: map[string]string{
|
||||
"io.containerd.image.name": validRef,
|
||||
},
|
||||
}
|
||||
if kindAnnotation != "" {
|
||||
desc.Annotations[consts.KindAnnotationName] = kindAnnotation
|
||||
}
|
||||
return desc
|
||||
}
|
||||
makeManifest := func(configMediaType string) ocispec.Manifest {
|
||||
return ocispec.Manifest{
|
||||
Config: ocispec.Descriptor{MediaType: configMediaType},
|
||||
}
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
configMedia string
|
||||
kindAnnotation string
|
||||
typeFilter string
|
||||
wantType string
|
||||
wantEmpty bool
|
||||
}{
|
||||
{
|
||||
name: "DockerConfigJSON → image",
|
||||
configMedia: consts.DockerConfigJSON,
|
||||
typeFilter: "all",
|
||||
wantType: "image",
|
||||
},
|
||||
{
|
||||
name: "ChartConfigMediaType → chart",
|
||||
configMedia: consts.ChartConfigMediaType,
|
||||
typeFilter: "all",
|
||||
wantType: "chart",
|
||||
},
|
||||
{
|
||||
name: "FileLocalConfigMediaType → file",
|
||||
configMedia: consts.FileLocalConfigMediaType,
|
||||
typeFilter: "all",
|
||||
wantType: "file",
|
||||
},
|
||||
{
|
||||
name: "KindAnnotationSigs → sigs",
|
||||
configMedia: consts.DockerConfigJSON,
|
||||
kindAnnotation: consts.KindAnnotationSigs,
|
||||
typeFilter: "all",
|
||||
wantType: "sigs",
|
||||
},
|
||||
{
|
||||
name: "KindAnnotationAtts → atts",
|
||||
configMedia: consts.DockerConfigJSON,
|
||||
kindAnnotation: consts.KindAnnotationAtts,
|
||||
typeFilter: "all",
|
||||
wantType: "atts",
|
||||
},
|
||||
{
|
||||
name: "KindAnnotationReferrers prefix → referrer",
|
||||
configMedia: consts.DockerConfigJSON,
|
||||
kindAnnotation: consts.KindAnnotationReferrers + "/abc123",
|
||||
typeFilter: "all",
|
||||
wantType: "referrer",
|
||||
},
|
||||
{
|
||||
name: "TypeFilter:image with chart → empty item",
|
||||
configMedia: consts.ChartConfigMediaType,
|
||||
typeFilter: "image",
|
||||
wantEmpty: true,
|
||||
},
|
||||
{
|
||||
name: "TypeFilter:file with image → empty item",
|
||||
configMedia: consts.DockerConfigJSON,
|
||||
typeFilter: "file",
|
||||
wantEmpty: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
desc := makeDesc(tc.kindAnnotation)
|
||||
m := makeManifest(tc.configMedia)
|
||||
o := &flags.InfoOpts{TypeFilter: tc.typeFilter}
|
||||
|
||||
got := newItem(nil, desc, m, "linux/amd64", o)
|
||||
var empty item
|
||||
if tc.wantEmpty {
|
||||
if got != empty {
|
||||
t.Errorf("expected empty item, got %+v", got)
|
||||
}
|
||||
return
|
||||
}
|
||||
if got == empty {
|
||||
t.Fatalf("got empty item, want type %q", tc.wantType)
|
||||
}
|
||||
if got.Type != tc.wantType {
|
||||
t.Errorf("got type %q, want %q", got.Type, tc.wantType)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestInfoCmd(t *testing.T) {
|
||||
ctx := newTestContext(t)
|
||||
s := newTestStore(t)
|
||||
|
||||
// Seed a file artifact using a local temp file.
|
||||
tmpFile := t.TempDir() + "/hello.txt"
|
||||
if err := os.WriteFile(tmpFile, []byte("hello hauler"), 0o644); err != nil {
|
||||
t.Fatalf("write tmpFile: %v", err)
|
||||
}
|
||||
fi := v1.File{Path: tmpFile}
|
||||
if err := storeFile(ctx, s, fi); err != nil {
|
||||
t.Fatalf("storeFile: %v", err)
|
||||
}
|
||||
|
||||
baseOpts := func(typeFilter, format string) *flags.InfoOpts {
|
||||
return &flags.InfoOpts{
|
||||
StoreRootOpts: defaultRootOpts(s.Root),
|
||||
OutputFormat: format,
|
||||
TypeFilter: typeFilter,
|
||||
}
|
||||
}
|
||||
|
||||
t.Run("TypeFilter:all json", func(t *testing.T) {
|
||||
if err := InfoCmd(ctx, baseOpts("all", "json"), s); err != nil {
|
||||
t.Errorf("InfoCmd(all, json): %v", err)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("TypeFilter:file json", func(t *testing.T) {
|
||||
if err := InfoCmd(ctx, baseOpts("file", "json"), s); err != nil {
|
||||
t.Errorf("InfoCmd(file, json): %v", err)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("TypeFilter:image json", func(t *testing.T) {
|
||||
// Store has only a file artifact; image filter returns no items (no error).
|
||||
if err := InfoCmd(ctx, baseOpts("image", "json"), s); err != nil {
|
||||
t.Errorf("InfoCmd(image, json): %v", err)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("TypeFilter:all table", func(t *testing.T) {
|
||||
if err := InfoCmd(ctx, baseOpts("all", "table"), s); err != nil {
|
||||
t.Errorf("InfoCmd(all, table): %v", err)
|
||||
}
|
||||
})
|
||||
}
|
||||
309
cmd/hauler/cli/store/lifecycle_test.go
Normal file
309
cmd/hauler/cli/store/lifecycle_test.go
Normal file
@@ -0,0 +1,309 @@
|
||||
package store
|
||||
|
||||
// lifecycle_test.go covers the end-to-end add->save->load->copy/extract lifecycle
|
||||
// for file, image, and chart artifact types.
|
||||
//
|
||||
// Do NOT use t.Parallel() -- SaveCmd calls os.Chdir(storeDir).
|
||||
// Always use absolute paths for StoreDir and FileName.
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/google/go-containerregistry/pkg/name"
|
||||
"github.com/google/go-containerregistry/pkg/v1/remote"
|
||||
|
||||
"hauler.dev/go/hauler/internal/flags"
|
||||
v1 "hauler.dev/go/hauler/pkg/apis/hauler.cattle.io/v1"
|
||||
"hauler.dev/go/hauler/pkg/store"
|
||||
)
|
||||
|
||||
// TestLifecycle_FileArtifact_AddSaveLoadCopy exercises the full lifecycle for a
|
||||
// file artifact: seed HTTP server -> storeFile -> SaveCmd -> LoadCmd -> CopyCmd dir://.
|
||||
func TestLifecycle_FileArtifact_AddSaveLoadCopy(t *testing.T) {
|
||||
ctx := newTestContext(t)
|
||||
|
||||
// Step 1: Seed an HTTP file server with known content.
|
||||
fileContent := "lifecycle file artifact content"
|
||||
url := seedFileInHTTPServer(t, "lifecycle.txt", fileContent)
|
||||
|
||||
// Step 2: storeFile into store A.
|
||||
storeA := newTestStore(t)
|
||||
if err := storeFile(ctx, storeA, v1.File{Path: url}); err != nil {
|
||||
t.Fatalf("storeFile: %v", err)
|
||||
}
|
||||
assertArtifactInStore(t, storeA, "lifecycle.txt")
|
||||
|
||||
// Flush index.json so SaveCmd can read it from disk.
|
||||
if err := storeA.SaveIndex(); err != nil {
|
||||
t.Fatalf("SaveIndex: %v", err)
|
||||
}
|
||||
|
||||
// Step 3: SaveCmd -> archive (absolute paths required).
|
||||
archivePath := filepath.Join(t.TempDir(), "lifecycle-file.tar.zst")
|
||||
saveOpts := newSaveOpts(storeA.Root, archivePath)
|
||||
if err := SaveCmd(ctx, saveOpts, defaultRootOpts(storeA.Root), defaultCliOpts()); err != nil {
|
||||
t.Fatalf("SaveCmd: %v", err)
|
||||
}
|
||||
|
||||
fi, err := os.Stat(archivePath)
|
||||
if err != nil {
|
||||
t.Fatalf("archive stat: %v", err)
|
||||
}
|
||||
if fi.Size() == 0 {
|
||||
t.Fatal("archive is empty")
|
||||
}
|
||||
|
||||
// Step 4: LoadCmd -> store B.
|
||||
storeBDir := t.TempDir()
|
||||
loadOpts := &flags.LoadOpts{
|
||||
StoreRootOpts: defaultRootOpts(storeBDir),
|
||||
FileName: []string{archivePath},
|
||||
}
|
||||
if err := LoadCmd(ctx, loadOpts, defaultRootOpts(storeBDir), defaultCliOpts()); err != nil {
|
||||
t.Fatalf("LoadCmd: %v", err)
|
||||
}
|
||||
|
||||
storeB, err := store.NewLayout(storeBDir)
|
||||
if err != nil {
|
||||
t.Fatalf("store.NewLayout(storeB): %v", err)
|
||||
}
|
||||
assertArtifactInStore(t, storeB, "lifecycle.txt")
|
||||
|
||||
// Step 5: CopyCmd dir:// -> extract file to destDir.
|
||||
extractDir := t.TempDir()
|
||||
copyOpts := &flags.CopyOpts{StoreRootOpts: defaultRootOpts(storeB.Root)}
|
||||
if err := CopyCmd(ctx, copyOpts, storeB, "dir://"+extractDir, defaultCliOpts()); err != nil {
|
||||
t.Fatalf("CopyCmd dir: %v", err)
|
||||
}
|
||||
|
||||
// Step 6: Assert file content matches original.
|
||||
outPath := filepath.Join(extractDir, "lifecycle.txt")
|
||||
data, err := os.ReadFile(outPath)
|
||||
if err != nil {
|
||||
t.Fatalf("expected extracted file at %s: %v", outPath, err)
|
||||
}
|
||||
if string(data) != fileContent {
|
||||
t.Errorf("file content mismatch: got %q, want %q", string(data), fileContent)
|
||||
}
|
||||
}
|
||||
|
||||
// TestLifecycle_Image_AddSaveLoadCopyRegistry exercises the full lifecycle for
|
||||
// a container image: seed registry 1 -> storeImage -> SaveCmd -> LoadCmd ->
|
||||
// CopyCmd registry:// -> verify in registry 2.
|
||||
func TestLifecycle_Image_AddSaveLoadCopyRegistry(t *testing.T) {
|
||||
ctx := newTestContext(t)
|
||||
|
||||
// Step 1: Seed image into in-memory registry 1.
|
||||
srcHost, srcOpts := newLocalhostRegistry(t)
|
||||
srcImg := seedImage(t, srcHost, "lifecycle/app", "v1", srcOpts...)
|
||||
|
||||
srcDigest, err := srcImg.Digest()
|
||||
if err != nil {
|
||||
t.Fatalf("srcImg.Digest: %v", err)
|
||||
}
|
||||
|
||||
// Step 2: storeImage into store A.
|
||||
storeA := newTestStore(t)
|
||||
rso := defaultRootOpts(storeA.Root)
|
||||
ro := defaultCliOpts()
|
||||
if err := storeImage(ctx, storeA, v1.Image{Name: srcHost + "/lifecycle/app:v1"}, "", rso, ro, ""); err != nil {
|
||||
t.Fatalf("storeImage: %v", err)
|
||||
}
|
||||
assertArtifactInStore(t, storeA, "lifecycle/app:v1")
|
||||
|
||||
// Flush index.json for SaveCmd.
|
||||
if err := storeA.SaveIndex(); err != nil {
|
||||
t.Fatalf("SaveIndex: %v", err)
|
||||
}
|
||||
|
||||
// Step 3: SaveCmd -> archive.
|
||||
archivePath := filepath.Join(t.TempDir(), "lifecycle-image.tar.zst")
|
||||
saveOpts := newSaveOpts(storeA.Root, archivePath)
|
||||
if err := SaveCmd(ctx, saveOpts, defaultRootOpts(storeA.Root), defaultCliOpts()); err != nil {
|
||||
t.Fatalf("SaveCmd: %v", err)
|
||||
}
|
||||
|
||||
// Step 4: LoadCmd -> store B.
|
||||
storeBDir := t.TempDir()
|
||||
loadOpts := &flags.LoadOpts{
|
||||
StoreRootOpts: defaultRootOpts(storeBDir),
|
||||
FileName: []string{archivePath},
|
||||
}
|
||||
if err := LoadCmd(ctx, loadOpts, defaultRootOpts(storeBDir), defaultCliOpts()); err != nil {
|
||||
t.Fatalf("LoadCmd: %v", err)
|
||||
}
|
||||
|
||||
storeB, err := store.NewLayout(storeBDir)
|
||||
if err != nil {
|
||||
t.Fatalf("store.NewLayout(storeB): %v", err)
|
||||
}
|
||||
assertArtifactInStore(t, storeB, "lifecycle/app:v1")
|
||||
|
||||
// Step 5: CopyCmd registry:// -> in-memory registry 2.
|
||||
dstHost, dstOpts := newTestRegistry(t)
|
||||
copyOpts := &flags.CopyOpts{
|
||||
StoreRootOpts: defaultRootOpts(storeB.Root),
|
||||
PlainHTTP: true,
|
||||
}
|
||||
if err := CopyCmd(ctx, copyOpts, storeB, "registry://"+dstHost, defaultCliOpts()); err != nil {
|
||||
t.Fatalf("CopyCmd registry: %v", err)
|
||||
}
|
||||
|
||||
// Step 6: Pull from registry 2 and compare digest to original.
|
||||
dstRef, err := name.NewTag(dstHost+"/lifecycle/app:v1", name.Insecure)
|
||||
if err != nil {
|
||||
t.Fatalf("name.NewTag: %v", err)
|
||||
}
|
||||
desc, err := remote.Get(dstRef, dstOpts...)
|
||||
if err != nil {
|
||||
t.Fatalf("image not found in target registry: %v", err)
|
||||
}
|
||||
if desc.Digest != srcDigest {
|
||||
t.Errorf("digest mismatch: got %s, want %s", desc.Digest, srcDigest)
|
||||
}
|
||||
}
|
||||
|
||||
// TestLifecycle_Chart_AddSaveLoadExtract exercises the full lifecycle for a
|
||||
// Helm chart: AddChartCmd -> SaveCmd -> LoadCmd -> ExtractCmd -> .tgz in destDir.
|
||||
func TestLifecycle_Chart_AddSaveLoadExtract(t *testing.T) {
|
||||
ctx := newTestContext(t)
|
||||
|
||||
// Step 1: AddChartCmd with local testdata chart into store A.
|
||||
storeA := newTestStore(t)
|
||||
rso := defaultRootOpts(storeA.Root)
|
||||
ro := defaultCliOpts()
|
||||
|
||||
chartOpts := newAddChartOpts(chartTestdataDir, "")
|
||||
if err := AddChartCmd(ctx, chartOpts, storeA, "rancher-cluster-templates-0.5.2.tgz", rso, ro); err != nil {
|
||||
t.Fatalf("AddChartCmd: %v", err)
|
||||
}
|
||||
assertArtifactInStore(t, storeA, "rancher-cluster-templates")
|
||||
|
||||
// Flush index.json for SaveCmd.
|
||||
if err := storeA.SaveIndex(); err != nil {
|
||||
t.Fatalf("SaveIndex: %v", err)
|
||||
}
|
||||
|
||||
// Step 2: SaveCmd -> archive.
|
||||
archivePath := filepath.Join(t.TempDir(), "lifecycle-chart.tar.zst")
|
||||
saveOpts := newSaveOpts(storeA.Root, archivePath)
|
||||
if err := SaveCmd(ctx, saveOpts, defaultRootOpts(storeA.Root), defaultCliOpts()); err != nil {
|
||||
t.Fatalf("SaveCmd: %v", err)
|
||||
}
|
||||
|
||||
// Step 3: LoadCmd -> new store.
|
||||
storeBDir := t.TempDir()
|
||||
loadOpts := &flags.LoadOpts{
|
||||
StoreRootOpts: defaultRootOpts(storeBDir),
|
||||
FileName: []string{archivePath},
|
||||
}
|
||||
if err := LoadCmd(ctx, loadOpts, defaultRootOpts(storeBDir), defaultCliOpts()); err != nil {
|
||||
t.Fatalf("LoadCmd: %v", err)
|
||||
}
|
||||
|
||||
storeB, err := store.NewLayout(storeBDir)
|
||||
if err != nil {
|
||||
t.Fatalf("store.NewLayout(storeB): %v", err)
|
||||
}
|
||||
assertArtifactInStore(t, storeB, "rancher-cluster-templates")
|
||||
|
||||
// Step 4: ExtractCmd -> .tgz in destDir.
|
||||
destDir := t.TempDir()
|
||||
extractOpts := &flags.ExtractOpts{
|
||||
StoreRootOpts: defaultRootOpts(storeB.Root),
|
||||
DestinationDir: destDir,
|
||||
}
|
||||
if err := ExtractCmd(ctx, extractOpts, storeB, "hauler/rancher-cluster-templates:0.5.2"); err != nil {
|
||||
t.Fatalf("ExtractCmd: %v", err)
|
||||
}
|
||||
|
||||
entries, err := os.ReadDir(destDir)
|
||||
if err != nil {
|
||||
t.Fatalf("ReadDir: %v", err)
|
||||
}
|
||||
|
||||
found := false
|
||||
for _, e := range entries {
|
||||
if strings.HasSuffix(e.Name(), ".tgz") || strings.HasSuffix(e.Name(), ".tar.gz") {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
names := make([]string, len(entries))
|
||||
for i, e := range entries {
|
||||
names[i] = e.Name()
|
||||
}
|
||||
t.Errorf("expected a .tgz or .tar.gz in destDir, got: %v", names)
|
||||
}
|
||||
}
|
||||
|
||||
// TestLifecycle_Remove_ThenSave verifies that removing one artifact from a store
|
||||
// with two file artifacts, then saving/loading, results in only the retained
|
||||
// artifact being present.
|
||||
func TestLifecycle_Remove_ThenSave(t *testing.T) {
|
||||
ctx := newTestContext(t)
|
||||
|
||||
// Step 1: Add two file artifacts.
|
||||
url1 := seedFileInHTTPServer(t, "keep-me.txt", "content to keep")
|
||||
url2 := seedFileInHTTPServer(t, "remove-me.txt", "content to remove")
|
||||
|
||||
storeA := newTestStore(t)
|
||||
if err := storeFile(ctx, storeA, v1.File{Path: url1}); err != nil {
|
||||
t.Fatalf("storeFile keep-me: %v", err)
|
||||
}
|
||||
if err := storeFile(ctx, storeA, v1.File{Path: url2}); err != nil {
|
||||
t.Fatalf("storeFile remove-me: %v", err)
|
||||
}
|
||||
|
||||
if n := countArtifactsInStore(t, storeA); n != 2 {
|
||||
t.Fatalf("expected 2 artifacts after adding both files, got %d", n)
|
||||
}
|
||||
|
||||
// Step 2: RemoveCmd(Force:true) on the "remove-me" artifact.
|
||||
if err := RemoveCmd(ctx, &flags.RemoveOpts{Force: true}, storeA, "remove-me"); err != nil {
|
||||
t.Fatalf("RemoveCmd: %v", err)
|
||||
}
|
||||
|
||||
if n := countArtifactsInStore(t, storeA); n != 1 {
|
||||
t.Fatalf("expected 1 artifact after removal, got %d", n)
|
||||
}
|
||||
assertArtifactInStore(t, storeA, "keep-me.txt")
|
||||
|
||||
// Flush index.json for SaveCmd. RemoveCmd calls OCI.SaveIndex() internally
|
||||
// (via Layout.Remove), but call it again for safety.
|
||||
if err := storeA.SaveIndex(); err != nil {
|
||||
t.Fatalf("SaveIndex: %v", err)
|
||||
}
|
||||
|
||||
// Step 3: SaveCmd -> archive.
|
||||
archivePath := filepath.Join(t.TempDir(), "lifecycle-remove.tar.zst")
|
||||
saveOpts := newSaveOpts(storeA.Root, archivePath)
|
||||
if err := SaveCmd(ctx, saveOpts, defaultRootOpts(storeA.Root), defaultCliOpts()); err != nil {
|
||||
t.Fatalf("SaveCmd: %v", err)
|
||||
}
|
||||
|
||||
// Step 4: LoadCmd -> new store.
|
||||
storeBDir := t.TempDir()
|
||||
loadOpts := &flags.LoadOpts{
|
||||
StoreRootOpts: defaultRootOpts(storeBDir),
|
||||
FileName: []string{archivePath},
|
||||
}
|
||||
if err := LoadCmd(ctx, loadOpts, defaultRootOpts(storeBDir), defaultCliOpts()); err != nil {
|
||||
t.Fatalf("LoadCmd: %v", err)
|
||||
}
|
||||
|
||||
storeB, err := store.NewLayout(storeBDir)
|
||||
if err != nil {
|
||||
t.Fatalf("store.NewLayout(storeB): %v", err)
|
||||
}
|
||||
|
||||
// Step 5: Assert only the retained artifact is present.
|
||||
if n := countArtifactsInStore(t, storeB); n != 1 {
|
||||
t.Errorf("expected 1 artifact in loaded store, got %d", n)
|
||||
}
|
||||
assertArtifactInStore(t, storeB, "keep-me.txt")
|
||||
}
|
||||
@@ -2,54 +2,130 @@ package store
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"io"
|
||||
"net/url"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/mholt/archiver/v3"
|
||||
"github.com/rancherfederal/hauler/pkg/content"
|
||||
"github.com/rancherfederal/hauler/pkg/store"
|
||||
"github.com/spf13/cobra"
|
||||
"hauler.dev/go/hauler/internal/flags"
|
||||
"hauler.dev/go/hauler/pkg/archives"
|
||||
"hauler.dev/go/hauler/pkg/consts"
|
||||
"hauler.dev/go/hauler/pkg/content"
|
||||
"hauler.dev/go/hauler/pkg/getter"
|
||||
"hauler.dev/go/hauler/pkg/log"
|
||||
"hauler.dev/go/hauler/pkg/store"
|
||||
|
||||
"github.com/rancherfederal/hauler/pkg/log"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
)
|
||||
|
||||
type LoadOpts struct {
|
||||
*RootOpts
|
||||
}
|
||||
|
||||
func (o *LoadOpts) AddFlags(cmd *cobra.Command) {
|
||||
f := cmd.Flags()
|
||||
_ = f
|
||||
}
|
||||
|
||||
// LoadCmd
|
||||
// TODO: Just use mholt/archiver for now, even though we don't need most of it
|
||||
func LoadCmd(ctx context.Context, o *LoadOpts, archiveRefs ...string) error {
|
||||
// extracts the contents of an archived oci layout to an existing oci layout
|
||||
func LoadCmd(ctx context.Context, o *flags.LoadOpts, rso *flags.StoreRootOpts, ro *flags.CliRootOpts) error {
|
||||
l := log.FromContext(ctx)
|
||||
|
||||
for _, archiveRef := range archiveRefs {
|
||||
l.Infof("loading content from [%s] to [%s]", archiveRef, o.StoreDir)
|
||||
err := unarchiveLayoutTo(ctx, archiveRef, o.StoreDir)
|
||||
tempOverride := rso.TempOverride
|
||||
|
||||
if tempOverride == "" {
|
||||
tempOverride = os.Getenv(consts.HaulerTempDir)
|
||||
}
|
||||
|
||||
tempDir, err := os.MkdirTemp(tempOverride, consts.DefaultHaulerTempDirName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer os.RemoveAll(tempDir)
|
||||
|
||||
l.Debugf("using temporary directory at [%s]", tempDir)
|
||||
|
||||
for _, fileName := range o.FileName {
|
||||
l.Infof("loading haul [%s] to [%s]", fileName, o.StoreDir)
|
||||
err := unarchiveLayoutTo(ctx, fileName, o.StoreDir, tempDir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
clearDir(tempDir)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// unarchiveLayoutTo accepts an archived oci layout and extracts the contents to an existing oci layout, preserving the index
|
||||
func unarchiveLayoutTo(ctx context.Context, archivePath string, dest string) error {
|
||||
tmpdir, err := os.MkdirTemp("", "hauler")
|
||||
// accepts an archived OCI layout, extracts the contents to an existing OCI layout, and preserves the index
|
||||
func unarchiveLayoutTo(ctx context.Context, haulPath string, dest string, tempDir string) error {
|
||||
l := log.FromContext(ctx)
|
||||
|
||||
if strings.HasPrefix(haulPath, "http://") || strings.HasPrefix(haulPath, "https://") {
|
||||
l.Debugf("detected remote archive... starting download... [%s]", haulPath)
|
||||
|
||||
h := getter.NewHttp()
|
||||
parsedURL, err := url.Parse(haulPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rc, err := h.Open(ctx, parsedURL)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer rc.Close()
|
||||
|
||||
fileName := h.Name(parsedURL)
|
||||
if fileName == "" {
|
||||
fileName = filepath.Base(parsedURL.Path)
|
||||
}
|
||||
haulPath = filepath.Join(tempDir, fileName)
|
||||
|
||||
out, err := os.Create(haulPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer out.Close()
|
||||
|
||||
if _, err = io.Copy(out, rc); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if err := archives.Unarchive(ctx, haulPath, tempDir); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// ensure the incoming index.json has the correct annotations.
|
||||
data, err := os.ReadFile(tempDir + "/index.json")
|
||||
if err != nil {
|
||||
return (err)
|
||||
}
|
||||
|
||||
var idx ocispec.Index
|
||||
if err := json.Unmarshal(data, &idx); err != nil {
|
||||
return (err)
|
||||
}
|
||||
|
||||
for i := range idx.Manifests {
|
||||
if idx.Manifests[i].Annotations == nil {
|
||||
idx.Manifests[i].Annotations = make(map[string]string)
|
||||
}
|
||||
if _, exists := idx.Manifests[i].Annotations[consts.KindAnnotationName]; !exists {
|
||||
idx.Manifests[i].Annotations[consts.KindAnnotationName] = consts.KindAnnotationImage
|
||||
}
|
||||
if ref, ok := idx.Manifests[i].Annotations[consts.ContainerdImageNameKey]; ok {
|
||||
if slash := strings.Index(ref, "/"); slash != -1 {
|
||||
ref = ref[slash+1:]
|
||||
}
|
||||
if idx.Manifests[i].Annotations[consts.ImageRefKey] != ref {
|
||||
idx.Manifests[i].Annotations[consts.ImageRefKey] = ref
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
out, err := json.MarshalIndent(idx, "", " ")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer os.RemoveAll(tmpdir)
|
||||
|
||||
if err := archiver.Unarchive(archivePath, tmpdir); err != nil {
|
||||
if err := os.WriteFile(tempDir+"/index.json", out, 0644); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
s, err := store.NewLayout(tmpdir)
|
||||
s, err := store.NewLayout(tempDir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -62,3 +138,19 @@ func unarchiveLayoutTo(ctx context.Context, archivePath string, dest string) err
|
||||
_, err = s.CopyAll(ctx, ts, nil)
|
||||
return err
|
||||
}
|
||||
|
||||
func clearDir(path string) error {
|
||||
entries, err := os.ReadDir(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, entry := range entries {
|
||||
err = os.RemoveAll(filepath.Join(path, entry.Name()))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
323
cmd/hauler/cli/store/load_test.go
Normal file
323
cmd/hauler/cli/store/load_test.go
Normal file
@@ -0,0 +1,323 @@
|
||||
package store
|
||||
|
||||
// load_test.go covers unarchiveLayoutTo, LoadCmd, and clearDir.
|
||||
//
|
||||
// Do NOT call t.Parallel() on tests that invoke createRootLevelArchive —
|
||||
// that helper uses the mholt/archives library directly to avoid os.Chdir,
|
||||
// so it is safe for concurrent use, but the tests themselves exercise
|
||||
// unarchiveLayoutTo which is already sequential.
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
mholtarchives "github.com/mholt/archives"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
|
||||
"hauler.dev/go/hauler/internal/flags"
|
||||
"hauler.dev/go/hauler/pkg/archives"
|
||||
"hauler.dev/go/hauler/pkg/consts"
|
||||
"hauler.dev/go/hauler/pkg/store"
|
||||
)
|
||||
|
||||
// testHaulArchive is the relative path from cmd/hauler/cli/store/ to the
|
||||
// bundled test haul archive produced by the project's own CI/test setup.
|
||||
const testHaulArchive = "../../../../testdata/haul.tar.zst"
|
||||
|
||||
// createRootLevelArchive creates a tar.zst archive from dir with files placed
|
||||
// at the archive root (no directory prefix). This matches the layout produced
|
||||
// by SaveCmd, which uses os.Chdir + Archive(".", ...) to achieve the same
|
||||
// effect. Using mholt/archives directly avoids the os.Chdir side-effect.
|
||||
func createRootLevelArchive(dir, outfile string) error {
|
||||
// A trailing path separator tells mholt/archives to enumerate the
|
||||
// directory's *contents* only — files land at archive root with no prefix.
|
||||
// Without the trailing slash, an empty value uses filepath.Base(dir) as
|
||||
// the archive subdirectory name instead of placing files at root.
|
||||
files, err := mholtarchives.FilesFromDisk(context.Background(), nil, map[string]string{
|
||||
dir + string(filepath.Separator): "",
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
f, err := os.Create(outfile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
format := mholtarchives.CompressedArchive{
|
||||
Compression: mholtarchives.Zstd{},
|
||||
Archival: mholtarchives.Tar{},
|
||||
}
|
||||
return format.Archive(context.Background(), f, files)
|
||||
}
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
// TestUnarchiveLayoutTo
|
||||
// --------------------------------------------------------------------------
|
||||
|
||||
// TestUnarchiveLayoutTo verifies that unarchiveLayoutTo correctly extracts a
|
||||
// haul archive into a destination OCI layout, backfills missing annotations,
|
||||
// and propagates the ContainerdImageNameKey → ImageRefKey mapping.
|
||||
func TestUnarchiveLayoutTo(t *testing.T) {
|
||||
ctx := newTestContext(t)
|
||||
destDir := t.TempDir()
|
||||
tempDir := t.TempDir()
|
||||
|
||||
if err := unarchiveLayoutTo(ctx, testHaulArchive, destDir, tempDir); err != nil {
|
||||
t.Fatalf("unarchiveLayoutTo: %v", err)
|
||||
}
|
||||
|
||||
s, err := store.NewLayout(destDir)
|
||||
if err != nil {
|
||||
t.Fatalf("store.NewLayout(destDir): %v", err)
|
||||
}
|
||||
|
||||
if count := countArtifactsInStore(t, s); count == 0 {
|
||||
t.Fatal("expected at least one descriptor in dest store after unarchiveLayoutTo")
|
||||
}
|
||||
|
||||
// Every top-level descriptor must carry KindAnnotationName.
|
||||
// Descriptors that were loaded with ContainerdImageNameKey must also have
|
||||
// ImageRefKey set (the backfill logic in unarchiveLayoutTo ensures this).
|
||||
if err := s.OCI.Walk(func(_ string, desc ocispec.Descriptor) error {
|
||||
if desc.Annotations[consts.KindAnnotationName] == "" {
|
||||
t.Errorf("descriptor %s missing KindAnnotationName", desc.Digest)
|
||||
}
|
||||
if _, hasContainerd := desc.Annotations[consts.ContainerdImageNameKey]; hasContainerd {
|
||||
if desc.Annotations[consts.ImageRefKey] == "" {
|
||||
t.Errorf("descriptor %s has %s but missing %s",
|
||||
desc.Digest, consts.ContainerdImageNameKey, consts.ImageRefKey)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
t.Fatalf("Walk: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
// TestLoadCmd_LocalFile
|
||||
// --------------------------------------------------------------------------
|
||||
|
||||
// TestLoadCmd_LocalFile verifies that LoadCmd loads one or more local haul
|
||||
// archives into the destination store.
|
||||
func TestLoadCmd_LocalFile(t *testing.T) {
|
||||
ctx := newTestContext(t)
|
||||
|
||||
t.Run("single archive", func(t *testing.T) {
|
||||
destDir := t.TempDir()
|
||||
o := &flags.LoadOpts{
|
||||
StoreRootOpts: defaultRootOpts(destDir),
|
||||
FileName: []string{testHaulArchive},
|
||||
}
|
||||
if err := LoadCmd(ctx, o, defaultRootOpts(destDir), defaultCliOpts()); err != nil {
|
||||
t.Fatalf("LoadCmd: %v", err)
|
||||
}
|
||||
s, err := store.NewLayout(destDir)
|
||||
if err != nil {
|
||||
t.Fatalf("store.NewLayout: %v", err)
|
||||
}
|
||||
if countArtifactsInStore(t, s) == 0 {
|
||||
t.Error("expected artifacts in store after LoadCmd")
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("two archives", func(t *testing.T) {
|
||||
// Loading the same archive twice must be idempotent: duplicate blobs are
|
||||
// silently discarded by the OCI pusher. The descriptor count after two
|
||||
// loads must equal the count after a single load.
|
||||
singleDir := t.TempDir()
|
||||
singleOpts := &flags.LoadOpts{
|
||||
StoreRootOpts: defaultRootOpts(singleDir),
|
||||
FileName: []string{testHaulArchive},
|
||||
}
|
||||
if err := LoadCmd(ctx, singleOpts, defaultRootOpts(singleDir), defaultCliOpts()); err != nil {
|
||||
t.Fatalf("LoadCmd single: %v", err)
|
||||
}
|
||||
singleStore, err := store.NewLayout(singleDir)
|
||||
if err != nil {
|
||||
t.Fatalf("store.NewLayout single: %v", err)
|
||||
}
|
||||
singleCount := countArtifactsInStore(t, singleStore)
|
||||
|
||||
doubleDir := t.TempDir()
|
||||
doubleOpts := &flags.LoadOpts{
|
||||
StoreRootOpts: defaultRootOpts(doubleDir),
|
||||
FileName: []string{testHaulArchive, testHaulArchive},
|
||||
}
|
||||
if err := LoadCmd(ctx, doubleOpts, defaultRootOpts(doubleDir), defaultCliOpts()); err != nil {
|
||||
t.Fatalf("LoadCmd double: %v", err)
|
||||
}
|
||||
doubleStore, err := store.NewLayout(doubleDir)
|
||||
if err != nil {
|
||||
t.Fatalf("store.NewLayout double: %v", err)
|
||||
}
|
||||
doubleCount := countArtifactsInStore(t, doubleStore)
|
||||
|
||||
if doubleCount != singleCount {
|
||||
t.Errorf("loading the same archive twice: got %d descriptors, want %d (same as single load)",
|
||||
doubleCount, singleCount)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
// TestLoadCmd_RemoteArchive
|
||||
// --------------------------------------------------------------------------
|
||||
|
||||
// TestLoadCmd_RemoteArchive verifies that LoadCmd can fetch and load a haul
|
||||
// archive served over HTTP.
|
||||
func TestLoadCmd_RemoteArchive(t *testing.T) {
|
||||
ctx := newTestContext(t)
|
||||
|
||||
archiveData, err := os.ReadFile(testHaulArchive)
|
||||
if err != nil {
|
||||
t.Fatalf("read test archive: %v", err)
|
||||
}
|
||||
|
||||
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/octet-stream")
|
||||
w.Write(archiveData) //nolint:errcheck
|
||||
}))
|
||||
t.Cleanup(srv.Close)
|
||||
|
||||
destDir := t.TempDir()
|
||||
remoteURL := srv.URL + "/haul.tar.zst"
|
||||
|
||||
o := &flags.LoadOpts{
|
||||
StoreRootOpts: defaultRootOpts(destDir),
|
||||
FileName: []string{remoteURL},
|
||||
}
|
||||
|
||||
if err := LoadCmd(ctx, o, defaultRootOpts(destDir), defaultCliOpts()); err != nil {
|
||||
t.Fatalf("LoadCmd remote: %v", err)
|
||||
}
|
||||
|
||||
s, err := store.NewLayout(destDir)
|
||||
if err != nil {
|
||||
t.Fatalf("store.NewLayout: %v", err)
|
||||
}
|
||||
if countArtifactsInStore(t, s) == 0 {
|
||||
t.Error("expected artifacts in store after remote LoadCmd")
|
||||
}
|
||||
}
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
// TestUnarchiveLayoutTo_AnnotationBackfill
|
||||
// --------------------------------------------------------------------------
|
||||
|
||||
// TestUnarchiveLayoutTo_AnnotationBackfill crafts a haul archive whose
|
||||
// index.json entries are missing KindAnnotationName, then verifies that
|
||||
// unarchiveLayoutTo backfills every entry with KindAnnotationImage.
|
||||
func TestUnarchiveLayoutTo_AnnotationBackfill(t *testing.T) {
|
||||
ctx := newTestContext(t)
|
||||
|
||||
// Step 1: Extract the real test archive to obtain a valid OCI layout on disk.
|
||||
extractDir := t.TempDir()
|
||||
if err := archives.Unarchive(ctx, testHaulArchive, extractDir); err != nil {
|
||||
t.Fatalf("Unarchive: %v", err)
|
||||
}
|
||||
|
||||
// Step 2: Read index.json and strip KindAnnotationName from every descriptor.
|
||||
indexPath := filepath.Join(extractDir, "index.json")
|
||||
data, err := os.ReadFile(indexPath)
|
||||
if err != nil {
|
||||
t.Fatalf("read index.json: %v", err)
|
||||
}
|
||||
|
||||
var idx ocispec.Index
|
||||
if err := json.Unmarshal(data, &idx); err != nil {
|
||||
t.Fatalf("unmarshal index.json: %v", err)
|
||||
}
|
||||
if len(idx.Manifests) == 0 {
|
||||
t.Skip("testdata/haul.tar.zst has no top-level manifests — cannot test backfill")
|
||||
}
|
||||
for i := range idx.Manifests {
|
||||
delete(idx.Manifests[i].Annotations, consts.KindAnnotationName)
|
||||
}
|
||||
|
||||
out, err := json.MarshalIndent(idx, "", " ")
|
||||
if err != nil {
|
||||
t.Fatalf("marshal stripped index.json: %v", err)
|
||||
}
|
||||
if err := os.WriteFile(indexPath, out, 0644); err != nil {
|
||||
t.Fatalf("write stripped index.json: %v", err)
|
||||
}
|
||||
|
||||
// Step 3: Re-archive with files at the archive root (no subdir prefix) so
|
||||
// the layout matches what unarchiveLayoutTo expects after extraction.
|
||||
strippedArchive := filepath.Join(t.TempDir(), "stripped.tar.zst")
|
||||
if err := createRootLevelArchive(extractDir, strippedArchive); err != nil {
|
||||
t.Fatalf("createRootLevelArchive: %v", err)
|
||||
}
|
||||
|
||||
// Step 4: Load the stripped archive.
|
||||
destDir := t.TempDir()
|
||||
tempDir := t.TempDir()
|
||||
if err := unarchiveLayoutTo(ctx, strippedArchive, destDir, tempDir); err != nil {
|
||||
t.Fatalf("unarchiveLayoutTo stripped: %v", err)
|
||||
}
|
||||
|
||||
// Step 5: Every descriptor in the dest store must now have
|
||||
// KindAnnotationName set to KindAnnotationImage (the backfill default).
|
||||
s, err := store.NewLayout(destDir)
|
||||
if err != nil {
|
||||
t.Fatalf("store.NewLayout(destDir): %v", err)
|
||||
}
|
||||
|
||||
if err := s.OCI.Walk(func(_ string, desc ocispec.Descriptor) error {
|
||||
kind := desc.Annotations[consts.KindAnnotationName]
|
||||
if kind == "" {
|
||||
t.Errorf("descriptor %s missing KindAnnotationName after backfill", desc.Digest)
|
||||
} else if kind != consts.KindAnnotationImage {
|
||||
t.Errorf("descriptor %s: expected backfilled kind=%q, got %q",
|
||||
desc.Digest, consts.KindAnnotationImage, kind)
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
t.Fatalf("Walk: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
// TestClearDir
|
||||
// --------------------------------------------------------------------------
|
||||
|
||||
// TestClearDir verifies that clearDir removes all entries from a directory
|
||||
// without removing the directory itself.
|
||||
func TestClearDir(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
|
||||
for _, name := range []string{"a.txt", "b.txt"} {
|
||||
if err := os.WriteFile(filepath.Join(dir, name), []byte(name), 0644); err != nil {
|
||||
t.Fatalf("write %s: %v", name, err)
|
||||
}
|
||||
}
|
||||
if err := os.Mkdir(filepath.Join(dir, "subdir"), 0755); err != nil {
|
||||
t.Fatalf("mkdir subdir: %v", err)
|
||||
}
|
||||
|
||||
if err := clearDir(dir); err != nil {
|
||||
t.Fatalf("clearDir: %v", err)
|
||||
}
|
||||
|
||||
entries, err := os.ReadDir(dir)
|
||||
if err != nil {
|
||||
t.Fatalf("ReadDir after clearDir: %v", err)
|
||||
}
|
||||
if len(entries) != 0 {
|
||||
names := make([]string, len(entries))
|
||||
for i, e := range entries {
|
||||
names[i] = e.Name()
|
||||
}
|
||||
t.Errorf("clearDir: expected empty dir, found: %s", strings.Join(names, ", "))
|
||||
}
|
||||
}
|
||||
122
cmd/hauler/cli/store/remove.go
Normal file
122
cmd/hauler/cli/store/remove.go
Normal file
@@ -0,0 +1,122 @@
|
||||
package store
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
|
||||
"hauler.dev/go/hauler/internal/flags"
|
||||
"hauler.dev/go/hauler/pkg/log"
|
||||
"hauler.dev/go/hauler/pkg/store"
|
||||
)
|
||||
|
||||
func formatReference(ref string) string {
|
||||
tagIdx := strings.LastIndex(ref, ":")
|
||||
if tagIdx == -1 {
|
||||
return ref
|
||||
}
|
||||
|
||||
dashIdx := strings.Index(ref[tagIdx+1:], "-")
|
||||
if dashIdx == -1 {
|
||||
return ref
|
||||
}
|
||||
|
||||
dashIdx = tagIdx + 1 + dashIdx
|
||||
|
||||
base := ref[:dashIdx]
|
||||
suffix := ref[dashIdx+1:]
|
||||
|
||||
if base == "" || suffix == "" {
|
||||
return ref
|
||||
}
|
||||
|
||||
return fmt.Sprintf("%s [%s]", base, suffix)
|
||||
}
|
||||
|
||||
func RemoveCmd(ctx context.Context, o *flags.RemoveOpts, s *store.Layout, ref string) error {
|
||||
l := log.FromContext(ctx)
|
||||
|
||||
// collect matching artifacts
|
||||
type match struct {
|
||||
reference string
|
||||
desc ocispec.Descriptor
|
||||
}
|
||||
var matches []match
|
||||
|
||||
if err := s.Walk(func(reference string, desc ocispec.Descriptor) error {
|
||||
if !strings.Contains(reference, ref) {
|
||||
return nil
|
||||
}
|
||||
|
||||
matches = append(matches, match{
|
||||
reference: reference,
|
||||
desc: desc,
|
||||
})
|
||||
|
||||
return nil // continue walking
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(matches) == 0 {
|
||||
return fmt.Errorf("reference [%s] not found in store (use `hauler store info` to list store contents)", ref)
|
||||
}
|
||||
|
||||
if len(matches) >= 1 {
|
||||
l.Infof("found %d matching references:", len(matches))
|
||||
for _, m := range matches {
|
||||
l.Infof(" - [%s]", formatReference(m.reference))
|
||||
}
|
||||
}
|
||||
|
||||
if !o.Force {
|
||||
fmt.Printf(" ↳ are you sure you want to remove [%d] artifact(s) from the store? (yes/no) ", len(matches))
|
||||
|
||||
reader := bufio.NewReader(os.Stdin)
|
||||
|
||||
line, err := reader.ReadString('\n')
|
||||
if err != nil && !errors.Is(err, io.EOF) {
|
||||
return fmt.Errorf("failed to read response: [%w]... please answer 'yes' or 'no'", err)
|
||||
}
|
||||
|
||||
response := strings.ToLower(strings.TrimSpace(line))
|
||||
|
||||
switch response {
|
||||
case "yes", "y":
|
||||
l.Infof("starting to remove artifacts from store...")
|
||||
case "no", "n":
|
||||
l.Infof("successfully cancelled removal of artifacts from store")
|
||||
return nil
|
||||
case "":
|
||||
return fmt.Errorf("failed to read response... please answer 'yes' or 'no'")
|
||||
default:
|
||||
return fmt.Errorf("invalid response [%s]... please answer 'yes' or 'no'", response)
|
||||
}
|
||||
}
|
||||
|
||||
// remove artifact(s)
|
||||
for _, m := range matches {
|
||||
if err := s.RemoveArtifact(ctx, m.reference, m.desc); err != nil {
|
||||
return fmt.Errorf("failed to remove artifact [%s]: %w", formatReference(m.reference), err)
|
||||
}
|
||||
|
||||
l.Infof("successfully removed [%s] of type [%s] with digest [%s]", formatReference(m.reference), m.desc.MediaType, m.desc.Digest.String())
|
||||
}
|
||||
|
||||
// clean up unreferenced blobs
|
||||
l.Infof("cleaning up all unreferenced blobs...")
|
||||
removedCount, removedSize, err := s.CleanUp(ctx)
|
||||
if err != nil {
|
||||
l.Warnf("garbage collection failed: [%v]", err)
|
||||
} else if removedCount > 0 {
|
||||
l.Infof("successfully removed [%d] unreferenced blobs [freed %d bytes]", removedCount, removedSize)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
155
cmd/hauler/cli/store/remove_test.go
Normal file
155
cmd/hauler/cli/store/remove_test.go
Normal file
@@ -0,0 +1,155 @@
|
||||
package store
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
|
||||
"hauler.dev/go/hauler/internal/flags"
|
||||
v1 "hauler.dev/go/hauler/pkg/apis/hauler.cattle.io/v1"
|
||||
)
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
// Unit tests — formatReference
|
||||
// --------------------------------------------------------------------------
|
||||
|
||||
func TestFormatReference(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
ref string
|
||||
want string
|
||||
}{
|
||||
{
|
||||
name: "empty string returns empty",
|
||||
ref: "",
|
||||
want: "",
|
||||
},
|
||||
{
|
||||
name: "no colon returns unchanged",
|
||||
ref: "nocolon",
|
||||
want: "nocolon",
|
||||
},
|
||||
{
|
||||
name: "tag without dash returns unchanged",
|
||||
ref: "rancher/rancher:v2.8.5",
|
||||
want: "rancher/rancher:v2.8.5",
|
||||
},
|
||||
{
|
||||
name: "cosign sig tag splits at first dash after last colon",
|
||||
ref: "repo:sha256-abc123.sig",
|
||||
want: "repo:sha256 [abc123.sig]",
|
||||
},
|
||||
{
|
||||
name: "cosign att tag format",
|
||||
ref: "myrepo:sha256-deadbeef.att",
|
||||
want: "myrepo:sha256 [deadbeef.att]",
|
||||
},
|
||||
{
|
||||
name: "cosign sbom tag format",
|
||||
ref: "myrepo:sha256-deadbeef.sbom",
|
||||
want: "myrepo:sha256 [deadbeef.sbom]",
|
||||
},
|
||||
{
|
||||
name: "tag is only a dash returns unchanged (empty suffix)",
|
||||
ref: "repo:-",
|
||||
want: "repo:-",
|
||||
},
|
||||
{
|
||||
name: "multiple colons uses last one",
|
||||
ref: "host:5000/repo:sha256-abc.sig",
|
||||
want: "host:5000/repo:sha256 [abc.sig]",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
got := formatReference(tc.ref)
|
||||
if got != tc.want {
|
||||
t.Errorf("formatReference(%q) = %q, want %q", tc.ref, got, tc.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
// Integration tests — RemoveCmd
|
||||
// --------------------------------------------------------------------------
|
||||
|
||||
func TestRemoveCmd_Force(t *testing.T) {
|
||||
ctx := newTestContext(t)
|
||||
s := newTestStore(t)
|
||||
|
||||
url := seedFileInHTTPServer(t, "removeme.txt", "file-to-remove")
|
||||
if err := storeFile(ctx, s, v1.File{Path: url}); err != nil {
|
||||
t.Fatalf("storeFile: %v", err)
|
||||
}
|
||||
|
||||
if n := countArtifactsInStore(t, s); n == 0 {
|
||||
t.Fatal("expected at least 1 artifact after storeFile, got 0")
|
||||
}
|
||||
|
||||
// Confirm the artifact ref contains "removeme".
|
||||
var ref string
|
||||
if err := s.Walk(func(reference string, _ ocispec.Descriptor) error {
|
||||
if strings.Contains(reference, "removeme") {
|
||||
ref = reference
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
t.Fatalf("walk to find ref: %v", err)
|
||||
}
|
||||
if ref == "" {
|
||||
t.Fatal("could not find stored artifact reference containing 'removeme'")
|
||||
}
|
||||
|
||||
if err := RemoveCmd(ctx, &flags.RemoveOpts{Force: true}, s, "removeme"); err != nil {
|
||||
t.Fatalf("RemoveCmd: %v", err)
|
||||
}
|
||||
|
||||
if n := countArtifactsInStore(t, s); n != 0 {
|
||||
t.Errorf("expected 0 artifacts after removal, got %d", n)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRemoveCmd_NotFound(t *testing.T) {
|
||||
ctx := newTestContext(t)
|
||||
s := newTestStore(t)
|
||||
|
||||
err := RemoveCmd(ctx, &flags.RemoveOpts{Force: true}, s, "nonexistent-ref")
|
||||
if err == nil {
|
||||
t.Fatal("expected error for non-existent ref, got nil")
|
||||
}
|
||||
if !strings.Contains(err.Error(), "not found") {
|
||||
t.Errorf("expected error containing 'not found', got: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRemoveCmd_Force_MultipleMatches(t *testing.T) {
|
||||
ctx := newTestContext(t)
|
||||
s := newTestStore(t)
|
||||
|
||||
// Seed two file artifacts whose names share the substring "testfile".
|
||||
url1 := seedFileInHTTPServer(t, "testfile-alpha.txt", "content-alpha")
|
||||
url2 := seedFileInHTTPServer(t, "testfile-beta.txt", "content-beta")
|
||||
|
||||
if err := storeFile(ctx, s, v1.File{Path: url1}); err != nil {
|
||||
t.Fatalf("storeFile alpha: %v", err)
|
||||
}
|
||||
if err := storeFile(ctx, s, v1.File{Path: url2}); err != nil {
|
||||
t.Fatalf("storeFile beta: %v", err)
|
||||
}
|
||||
|
||||
if n := countArtifactsInStore(t, s); n < 2 {
|
||||
t.Fatalf("expected at least 2 artifacts, got %d", n)
|
||||
}
|
||||
|
||||
// Remove using a substring that matches both.
|
||||
if err := RemoveCmd(ctx, &flags.RemoveOpts{Force: true}, s, "testfile"); err != nil {
|
||||
t.Fatalf("RemoveCmd: %v", err)
|
||||
}
|
||||
|
||||
if n := countArtifactsInStore(t, s); n != 0 {
|
||||
t.Errorf("expected 0 artifacts after removal of both, got %d", n)
|
||||
}
|
||||
}
|
||||
@@ -1,37 +1,41 @@
|
||||
package store
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"slices"
|
||||
|
||||
"github.com/mholt/archiver/v3"
|
||||
"github.com/spf13/cobra"
|
||||
referencev3 "github.com/distribution/distribution/v3/reference"
|
||||
"github.com/google/go-containerregistry/pkg/name"
|
||||
libv1 "github.com/google/go-containerregistry/pkg/v1"
|
||||
"github.com/google/go-containerregistry/pkg/v1/layout"
|
||||
"github.com/google/go-containerregistry/pkg/v1/tarball"
|
||||
"github.com/google/go-containerregistry/pkg/v1/types"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
|
||||
"github.com/rancherfederal/hauler/pkg/log"
|
||||
"hauler.dev/go/hauler/internal/flags"
|
||||
"hauler.dev/go/hauler/pkg/archives"
|
||||
"hauler.dev/go/hauler/pkg/consts"
|
||||
"hauler.dev/go/hauler/pkg/log"
|
||||
)
|
||||
|
||||
type SaveOpts struct {
|
||||
*RootOpts
|
||||
FileName string
|
||||
}
|
||||
|
||||
func (o *SaveOpts) AddArgs(cmd *cobra.Command) {
|
||||
f := cmd.Flags()
|
||||
|
||||
f.StringVarP(&o.FileName, "filename", "f", "pkg.tar.zst", "Name of archive")
|
||||
}
|
||||
|
||||
// SaveCmd
|
||||
// TODO: Just use mholt/archiver for now, even though we don't need most of it
|
||||
func SaveCmd(ctx context.Context, o *SaveOpts, outputFile string) error {
|
||||
// saves a content store to store archives
|
||||
func SaveCmd(ctx context.Context, o *flags.SaveOpts, rso *flags.StoreRootOpts, ro *flags.CliRootOpts) error {
|
||||
l := log.FromContext(ctx)
|
||||
|
||||
// TODO: Support more formats?
|
||||
a := archiver.NewTarZstd()
|
||||
a.OverwriteExisting = true
|
||||
// maps to handle compression and archival types
|
||||
compressionMap := archives.CompressionMap
|
||||
archivalMap := archives.ArchivalMap
|
||||
|
||||
absOutputfile, err := filepath.Abs(outputFile)
|
||||
// select the compression and archival type based parsed filename extension
|
||||
compression := compressionMap["zst"]
|
||||
archival := archivalMap["tar"]
|
||||
|
||||
absOutputfile, err := filepath.Abs(o.FileName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -45,11 +49,227 @@ func SaveCmd(ctx context.Context, o *SaveOpts, outputFile string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
err = a.Archive([]string{"."}, absOutputfile)
|
||||
// create the manifest.json file
|
||||
if err := writeExportsManifest(ctx, ".", o.Platform); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// strip out the oci-layout file from the haul
|
||||
// required for containerd to be able to interpret the haul correctly for all mediatypes and artifactypes
|
||||
if o.ContainerdCompatibility {
|
||||
if err := os.Remove(filepath.Join(".", ocispec.ImageLayoutFile)); err != nil {
|
||||
if !os.IsNotExist(err) {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
l.Warnf("compatibility warning... containerd... removing 'oci-layout' file to support containerd importing of images")
|
||||
}
|
||||
}
|
||||
|
||||
// create the archive
|
||||
err = archives.Archive(ctx, ".", absOutputfile, compression, archival)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
l.Infof("saved store [%s] -> [%s]", o.StoreDir, absOutputfile)
|
||||
l.Infof("saving store [%s] to archive [%s]", o.StoreDir, o.FileName)
|
||||
return nil
|
||||
}
|
||||
|
||||
type exports struct {
|
||||
digests []string
|
||||
records map[string]tarball.Descriptor
|
||||
}
|
||||
|
||||
func writeExportsManifest(ctx context.Context, dir string, platformStr string) error {
|
||||
l := log.FromContext(ctx)
|
||||
|
||||
// validate platform format
|
||||
platform, err := libv1.ParsePlatform(platformStr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
oci, err := layout.FromPath(dir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
idx, err := oci.ImageIndex()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
imx, err := idx.IndexManifest()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
x := &exports{
|
||||
digests: []string{},
|
||||
records: map[string]tarball.Descriptor{},
|
||||
}
|
||||
|
||||
for _, desc := range imx.Manifests {
|
||||
l.Debugf("descriptor [%s] = [%s]", desc.Digest.String(), desc.MediaType)
|
||||
if artifactType := types.MediaType(desc.ArtifactType); artifactType != "" && !artifactType.IsImage() && !artifactType.IsIndex() {
|
||||
l.Debugf("descriptor [%s] <<< SKIPPING ARTIFACT [%q]", desc.Digest.String(), desc.ArtifactType)
|
||||
continue
|
||||
}
|
||||
// The kind annotation is the only reliable way to distinguish container images from
|
||||
// cosign signatures/attestations/SBOMs: those are stored as standard Docker/OCI
|
||||
// manifests (same media type as real images) so media type alone is insufficient.
|
||||
kind := desc.Annotations[consts.KindAnnotationName]
|
||||
if kind != consts.KindAnnotationImage && kind != consts.KindAnnotationIndex {
|
||||
l.Debugf("descriptor [%s] <<< SKIPPING KIND [%q]", desc.Digest.String(), kind)
|
||||
continue
|
||||
}
|
||||
|
||||
refName, hasRefName := desc.Annotations[consts.ContainerdImageNameKey]
|
||||
if !hasRefName {
|
||||
l.Debugf("descriptor [%s] <<< SKIPPING (no containerd image name)", desc.Digest.String())
|
||||
continue
|
||||
}
|
||||
|
||||
// Use the descriptor's actual media type to discriminate single-image manifests
|
||||
// from multi-arch indexes, rather than relying on the kind string for this.
|
||||
switch {
|
||||
case desc.MediaType.IsImage():
|
||||
if err := x.record(ctx, idx, desc, refName); err != nil {
|
||||
return err
|
||||
}
|
||||
case desc.MediaType.IsIndex():
|
||||
l.Debugf("index [%s]: digest=[%s]... type=[%s]... size=[%d]", refName, desc.Digest.String(), desc.MediaType, desc.Size)
|
||||
|
||||
// when no platform is inputted... warn the user of potential mismatch on import for docker
|
||||
// required for docker to be able to interpret and load the image correctly
|
||||
if platform.String() == "" {
|
||||
l.Warnf("compatibility warning... docker... specify platform to prevent potential mismatch on import of index [%s]", refName)
|
||||
}
|
||||
|
||||
iix, err := idx.ImageIndex(desc.Digest)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ixm, err := iix.IndexManifest()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, ixd := range ixm.Manifests {
|
||||
if ixd.MediaType.IsImage() {
|
||||
if platform.String() != "" {
|
||||
if ixd.Platform.Architecture != platform.Architecture || ixd.Platform.OS != platform.OS {
|
||||
l.Debugf("index [%s]: digest=[%s], platform=[%s/%s]: does not match the supplied platform... skipping...", refName, desc.Digest.String(), ixd.Platform.OS, ixd.Platform.Architecture)
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
// skip any platforms of 'unknown/unknown'... docker hates
|
||||
// required for docker to be able to interpret and load the image correctly
|
||||
if ixd.Platform.Architecture == "unknown" && ixd.Platform.OS == "unknown" {
|
||||
l.Debugf("index [%s]: digest=[%s], platform=[%s/%s]: matches unknown platform... skipping...", refName, desc.Digest.String(), ixd.Platform.OS, ixd.Platform.Architecture)
|
||||
continue
|
||||
}
|
||||
|
||||
if err := x.record(ctx, iix, ixd, refName); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
default:
|
||||
l.Debugf("descriptor [%s] <<< SKIPPING media type [%q]", desc.Digest.String(), desc.MediaType)
|
||||
}
|
||||
}
|
||||
|
||||
buf := bytes.Buffer{}
|
||||
mnf := x.describe()
|
||||
err = json.NewEncoder(&buf).Encode(mnf)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return oci.WriteFile(consts.ImageManifestFile, buf.Bytes(), 0666)
|
||||
}
|
||||
|
||||
func (x *exports) describe() tarball.Manifest {
|
||||
m := make(tarball.Manifest, len(x.digests))
|
||||
for i, d := range x.digests {
|
||||
m[i] = x.records[d]
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
func (x *exports) record(ctx context.Context, index libv1.ImageIndex, desc libv1.Descriptor, refname string) error {
|
||||
l := log.FromContext(ctx)
|
||||
|
||||
digest := desc.Digest.String()
|
||||
image, err := index.Image(desc.Digest)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Verify this is a real container image by inspecting its manifest config media type.
|
||||
// Non-image OCI artifacts (Helm charts, files, cosign sigs) use distinct config types.
|
||||
manifest, err := image.Manifest()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if manifest.Config.MediaType != types.DockerConfigJSON && manifest.Config.MediaType != types.OCIConfigJSON {
|
||||
l.Debugf("descriptor [%s] <<< SKIPPING NON-IMAGE config media type [%q]", desc.Digest.String(), manifest.Config.MediaType)
|
||||
return nil
|
||||
}
|
||||
|
||||
config, err := image.ConfigName()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
xd, recorded := x.records[digest]
|
||||
if !recorded {
|
||||
// record one export record per digest
|
||||
x.digests = append(x.digests, digest)
|
||||
xd = tarball.Descriptor{
|
||||
Config: path.Join(ocispec.ImageBlobsDir, config.Algorithm, config.Hex),
|
||||
RepoTags: []string{},
|
||||
Layers: []string{},
|
||||
}
|
||||
|
||||
layers, err := image.Layers()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, layer := range layers {
|
||||
xl, err := layer.Digest()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
xd.Layers = append(xd.Layers[:], path.Join(ocispec.ImageBlobsDir, xl.Algorithm, xl.Hex))
|
||||
}
|
||||
}
|
||||
|
||||
ref, err := name.ParseReference(refname)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// record tags for the digest, eliminating dupes
|
||||
switch tag := ref.(type) {
|
||||
case name.Tag:
|
||||
named, err := referencev3.ParseNormalizedNamed(refname)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
named = referencev3.TagNameOnly(named)
|
||||
repotag := referencev3.FamiliarString(named)
|
||||
xd.RepoTags = append(xd.RepoTags[:], repotag)
|
||||
slices.Sort(xd.RepoTags)
|
||||
xd.RepoTags = slices.Compact(xd.RepoTags)
|
||||
ref = tag.Digest(digest)
|
||||
}
|
||||
|
||||
l.Debugf("image [%s]: type=%s, size=%d", ref.Name(), desc.MediaType, desc.Size)
|
||||
// record export descriptor for the digest
|
||||
x.records[digest] = xd
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
223
cmd/hauler/cli/store/save_test.go
Normal file
223
cmd/hauler/cli/store/save_test.go
Normal file
@@ -0,0 +1,223 @@
|
||||
package store
|
||||
|
||||
// save_test.go covers writeExportsManifest and SaveCmd.
|
||||
//
|
||||
// IMPORTANT: SaveCmd calls os.Chdir(storeDir) and defers os.Chdir back. Do
|
||||
// NOT call t.Parallel() on any SaveCmd test, and always use absolute paths for
|
||||
// StoreDir and FileName so they remain valid after the chdir.
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"hauler.dev/go/hauler/internal/flags"
|
||||
v1 "hauler.dev/go/hauler/pkg/apis/hauler.cattle.io/v1"
|
||||
"hauler.dev/go/hauler/pkg/archives"
|
||||
"hauler.dev/go/hauler/pkg/consts"
|
||||
)
|
||||
|
||||
// manifestEntry mirrors tarball.Descriptor for asserting manifest.json contents.
|
||||
type manifestEntry struct {
|
||||
Config string `json:"Config"`
|
||||
RepoTags []string `json:"RepoTags"`
|
||||
Layers []string `json:"Layers"`
|
||||
}
|
||||
|
||||
// readManifestJSON reads and unmarshals manifest.json from the given OCI layout dir.
|
||||
func readManifestJSON(t *testing.T, dir string) []manifestEntry {
|
||||
t.Helper()
|
||||
data, err := os.ReadFile(filepath.Join(dir, consts.ImageManifestFile))
|
||||
if err != nil {
|
||||
t.Fatalf("readManifestJSON: %v", err)
|
||||
}
|
||||
var entries []manifestEntry
|
||||
if err := json.Unmarshal(data, &entries); err != nil {
|
||||
t.Fatalf("readManifestJSON unmarshal: %v", err)
|
||||
}
|
||||
return entries
|
||||
}
|
||||
|
||||
// newSaveOpts builds a SaveOpts pointing at storeDir with an absolute archive path.
|
||||
func newSaveOpts(storeDir, archivePath string) *flags.SaveOpts {
|
||||
return &flags.SaveOpts{
|
||||
StoreRootOpts: defaultRootOpts(storeDir),
|
||||
FileName: archivePath,
|
||||
}
|
||||
}
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
// writeExportsManifest unit tests
|
||||
// --------------------------------------------------------------------------
|
||||
|
||||
func TestWriteExportsManifest(t *testing.T) {
|
||||
ctx := newTestContext(t)
|
||||
|
||||
t.Run("no platform filter includes all platforms", func(t *testing.T) {
|
||||
host, rOpts := newLocalhostRegistry(t)
|
||||
seedIndex(t, host, "test/multiarch", "v1", rOpts...)
|
||||
|
||||
s := newTestStore(t)
|
||||
if err := s.AddImage(ctx, host+"/test/multiarch:v1", ""); err != nil {
|
||||
t.Fatalf("AddImage: %v", err)
|
||||
}
|
||||
|
||||
if err := writeExportsManifest(ctx, s.Root, ""); err != nil {
|
||||
t.Fatalf("writeExportsManifest: %v", err)
|
||||
}
|
||||
|
||||
entries := readManifestJSON(t, s.Root)
|
||||
if len(entries) < 2 {
|
||||
t.Errorf("expected >=2 entries (all platforms), got %d", len(entries))
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("linux/amd64 filter yields single entry", func(t *testing.T) {
|
||||
host, rOpts := newLocalhostRegistry(t)
|
||||
seedIndex(t, host, "test/multiarch", "v2", rOpts...)
|
||||
|
||||
s := newTestStore(t)
|
||||
if err := s.AddImage(ctx, host+"/test/multiarch:v2", ""); err != nil {
|
||||
t.Fatalf("AddImage: %v", err)
|
||||
}
|
||||
|
||||
if err := writeExportsManifest(ctx, s.Root, "linux/amd64"); err != nil {
|
||||
t.Fatalf("writeExportsManifest: %v", err)
|
||||
}
|
||||
|
||||
entries := readManifestJSON(t, s.Root)
|
||||
if len(entries) != 1 {
|
||||
t.Errorf("expected 1 entry for linux/amd64, got %d", len(entries))
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("chart artifact excluded via config media type check", func(t *testing.T) {
|
||||
s := newTestStore(t)
|
||||
rso := defaultRootOpts(s.Root)
|
||||
ro := defaultCliOpts()
|
||||
|
||||
o := newAddChartOpts(chartTestdataDir, "")
|
||||
if err := AddChartCmd(ctx, o, s, "rancher-cluster-templates-0.5.2.tgz", rso, ro); err != nil {
|
||||
t.Fatalf("AddChartCmd: %v", err)
|
||||
}
|
||||
|
||||
if err := writeExportsManifest(ctx, s.Root, ""); err != nil {
|
||||
t.Fatalf("writeExportsManifest: %v", err)
|
||||
}
|
||||
|
||||
entries := readManifestJSON(t, s.Root)
|
||||
if len(entries) != 0 {
|
||||
t.Errorf("expected 0 entries (chart excluded from manifest.json), got %d", len(entries))
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestWriteExportsManifest_SkipsNonImages(t *testing.T) {
|
||||
ctx := newTestContext(t)
|
||||
|
||||
url := seedFileInHTTPServer(t, "skip.sh", "#!/bin/sh\necho skip")
|
||||
s := newTestStore(t)
|
||||
if err := storeFile(ctx, s, v1.File{Path: url}); err != nil {
|
||||
t.Fatalf("storeFile: %v", err)
|
||||
}
|
||||
|
||||
if err := writeExportsManifest(ctx, s.Root, ""); err != nil {
|
||||
t.Fatalf("writeExportsManifest: %v", err)
|
||||
}
|
||||
|
||||
entries := readManifestJSON(t, s.Root)
|
||||
if len(entries) != 0 {
|
||||
t.Errorf("expected 0 entries for file-only store, got %d", len(entries))
|
||||
}
|
||||
}
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
// SaveCmd integration tests
|
||||
// Do NOT use t.Parallel() — SaveCmd calls os.Chdir.
|
||||
// --------------------------------------------------------------------------
|
||||
|
||||
func TestSaveCmd(t *testing.T) {
|
||||
ctx := newTestContext(t)
|
||||
host, _ := newLocalhostRegistry(t)
|
||||
seedImage(t, host, "test/save", "v1")
|
||||
|
||||
s := newTestStore(t)
|
||||
if err := s.AddImage(ctx, host+"/test/save:v1", ""); err != nil {
|
||||
t.Fatalf("AddImage: %v", err)
|
||||
}
|
||||
|
||||
// FileName must be absolute so it remains valid after SaveCmd's os.Chdir.
|
||||
archivePath := filepath.Join(t.TempDir(), "haul.tar.zst")
|
||||
o := newSaveOpts(s.Root, archivePath)
|
||||
|
||||
if err := SaveCmd(ctx, o, defaultRootOpts(s.Root), defaultCliOpts()); err != nil {
|
||||
t.Fatalf("SaveCmd: %v", err)
|
||||
}
|
||||
|
||||
fi, err := os.Stat(archivePath)
|
||||
if err != nil {
|
||||
t.Fatalf("archive stat: %v", err)
|
||||
}
|
||||
if fi.Size() == 0 {
|
||||
t.Fatal("archive is empty")
|
||||
}
|
||||
|
||||
// Validate it is a well-formed zst archive by unarchiving it.
|
||||
destDir := t.TempDir()
|
||||
if err := archives.Unarchive(ctx, archivePath, destDir); err != nil {
|
||||
t.Fatalf("Unarchive: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSaveCmd_ContainerdCompatibility(t *testing.T) {
|
||||
ctx := newTestContext(t)
|
||||
host, _ := newLocalhostRegistry(t)
|
||||
seedImage(t, host, "test/containerd-compat", "v1")
|
||||
|
||||
s := newTestStore(t)
|
||||
if err := s.AddImage(ctx, host+"/test/containerd-compat:v1", ""); err != nil {
|
||||
t.Fatalf("AddImage: %v", err)
|
||||
}
|
||||
|
||||
archivePath := filepath.Join(t.TempDir(), "haul-compat.tar.zst")
|
||||
o := newSaveOpts(s.Root, archivePath)
|
||||
o.ContainerdCompatibility = true
|
||||
|
||||
if err := SaveCmd(ctx, o, defaultRootOpts(s.Root), defaultCliOpts()); err != nil {
|
||||
t.Fatalf("SaveCmd ContainerdCompatibility: %v", err)
|
||||
}
|
||||
|
||||
destDir := t.TempDir()
|
||||
if err := archives.Unarchive(ctx, archivePath, destDir); err != nil {
|
||||
t.Fatalf("Unarchive: %v", err)
|
||||
}
|
||||
|
||||
// oci-layout must be absent from the extracted archive.
|
||||
ociLayoutPath := filepath.Join(destDir, "oci-layout")
|
||||
if _, err := os.Stat(ociLayoutPath); !os.IsNotExist(err) {
|
||||
t.Errorf("expected oci-layout to be absent in containerd-compatible archive, got: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSaveCmd_EmptyStore(t *testing.T) {
|
||||
ctx := newTestContext(t)
|
||||
s := newTestStore(t)
|
||||
|
||||
// SaveCmd uses layout.FromPath which stats index.json — it must exist on
|
||||
// disk. A fresh store holds the index only in memory; SaveIndex flushes it.
|
||||
if err := s.SaveIndex(); err != nil {
|
||||
t.Fatalf("SaveIndex: %v", err)
|
||||
}
|
||||
|
||||
archivePath := filepath.Join(t.TempDir(), "haul-empty.tar.zst")
|
||||
o := newSaveOpts(s.Root, archivePath)
|
||||
|
||||
if err := SaveCmd(ctx, o, defaultRootOpts(s.Root), defaultCliOpts()); err != nil {
|
||||
t.Fatalf("SaveCmd empty store: %v", err)
|
||||
}
|
||||
|
||||
if _, err := os.Stat(archivePath); err != nil {
|
||||
t.Fatalf("archive not created for empty store: %v", err)
|
||||
}
|
||||
}
|
||||
@@ -2,9 +2,13 @@ package store
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/fs"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/distribution/distribution/v3/configuration"
|
||||
dcontext "github.com/distribution/distribution/v3/context"
|
||||
@@ -12,50 +16,95 @@ import (
|
||||
_ "github.com/distribution/distribution/v3/registry/storage/driver/filesystem"
|
||||
_ "github.com/distribution/distribution/v3/registry/storage/driver/inmemory"
|
||||
"github.com/distribution/distribution/v3/version"
|
||||
"github.com/spf13/cobra"
|
||||
"gopkg.in/yaml.v3"
|
||||
|
||||
"github.com/rancherfederal/hauler/pkg/store"
|
||||
|
||||
"github.com/rancherfederal/hauler/internal/server"
|
||||
"hauler.dev/go/hauler/internal/flags"
|
||||
"hauler.dev/go/hauler/internal/server"
|
||||
"hauler.dev/go/hauler/pkg/log"
|
||||
"hauler.dev/go/hauler/pkg/store"
|
||||
)
|
||||
|
||||
type ServeOpts struct {
|
||||
*RootOpts
|
||||
func validateStoreExists(s *store.Layout) error {
|
||||
indexPath := filepath.Join(s.Root, "index.json")
|
||||
|
||||
Port int
|
||||
RootDir string
|
||||
ConfigFile string
|
||||
Daemon bool
|
||||
_, err := os.Stat(indexPath)
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
storedir string
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return fmt.Errorf(
|
||||
"no store found at [%s]\n ↳ does the hauler store exist? (verify with `hauler store info`)",
|
||||
s.Root,
|
||||
)
|
||||
}
|
||||
|
||||
return fmt.Errorf(
|
||||
"unable to access store at [%s]: %w",
|
||||
s.Root,
|
||||
err,
|
||||
)
|
||||
}
|
||||
|
||||
func (o *ServeOpts) AddFlags(cmd *cobra.Command) {
|
||||
f := cmd.Flags()
|
||||
func loadConfig(filename string) (*configuration.Configuration, error) {
|
||||
f, err := os.Open(filename)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
f.IntVarP(&o.Port, "port", "p", 5000, "Port to listen on")
|
||||
f.StringVar(&o.RootDir, "directory", "registry", "Directory to use for registry backend (defaults to '$PWD/registry')")
|
||||
f.StringVarP(&o.ConfigFile, "config", "c", "", "Path to a config file, will override all other configs")
|
||||
f.BoolVarP(&o.Daemon, "daemon", "d", false, "Toggle serving as a daemon")
|
||||
return configuration.Parse(f)
|
||||
}
|
||||
|
||||
// ServeCmd serves the embedded registry almost identically to how distribution/v3 does it
|
||||
func ServeCmd(ctx context.Context, o *ServeOpts, s *store.Layout) error {
|
||||
func DefaultRegistryConfig(o *flags.ServeRegistryOpts, rso *flags.StoreRootOpts, ro *flags.CliRootOpts) *configuration.Configuration {
|
||||
cfg := &configuration.Configuration{
|
||||
Version: "0.1",
|
||||
Storage: configuration.Storage{
|
||||
"cache": configuration.Parameters{"blobdescriptor": "inmemory"},
|
||||
"filesystem": configuration.Parameters{"rootdirectory": o.RootDir},
|
||||
"maintenance": configuration.Parameters{
|
||||
"readonly": map[any]any{"enabled": o.ReadOnly},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
if o.TLSCert != "" && o.TLSKey != "" {
|
||||
cfg.HTTP.TLS.Certificate = o.TLSCert
|
||||
cfg.HTTP.TLS.Key = o.TLSKey
|
||||
}
|
||||
|
||||
cfg.HTTP.Addr = fmt.Sprintf(":%d", o.Port)
|
||||
cfg.HTTP.Headers = http.Header{
|
||||
"X-Content-Type-Options": []string{"nosniff"},
|
||||
}
|
||||
|
||||
cfg.Log.Level = configuration.Loglevel(ro.LogLevel)
|
||||
cfg.Validation.Manifests.URLs.Allow = []string{".+"}
|
||||
|
||||
return cfg
|
||||
}
|
||||
|
||||
func ServeRegistryCmd(ctx context.Context, o *flags.ServeRegistryOpts, s *store.Layout, rso *flags.StoreRootOpts, ro *flags.CliRootOpts) error {
|
||||
l := log.FromContext(ctx)
|
||||
ctx = dcontext.WithVersion(ctx, version.Version)
|
||||
|
||||
if err := validateStoreExists(s); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
tr := server.NewTempRegistry(ctx, o.RootDir)
|
||||
if err := tr.Start(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
opts := &CopyOpts{}
|
||||
if err := CopyCmd(ctx, opts, s, "registry://"+tr.Registry()); err != nil {
|
||||
opts := &flags.CopyOpts{StoreRootOpts: rso, PlainHTTP: true}
|
||||
if err := CopyCmd(ctx, opts, s, "registry://"+tr.Registry(), ro); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
tr.Close()
|
||||
|
||||
cfg := o.defaultConfig()
|
||||
cfg := DefaultRegistryConfig(o, rso, ro)
|
||||
if o.ConfigFile != "" {
|
||||
ucfg, err := loadConfig(o.ConfigFile)
|
||||
if err != nil {
|
||||
@@ -64,6 +113,17 @@ func ServeCmd(ctx context.Context, o *ServeOpts, s *store.Layout) error {
|
||||
cfg = ucfg
|
||||
}
|
||||
|
||||
l.Infof("starting registry on port [%d]", o.Port)
|
||||
|
||||
yamlConfig, err := yaml.Marshal(cfg)
|
||||
if err != nil {
|
||||
l.Errorf("failed to validate/output registry configuration: %v", err)
|
||||
} else {
|
||||
l.Infof("using registry configuration... \n%s", strings.TrimSpace(string(yamlConfig)))
|
||||
}
|
||||
|
||||
l.Debugf("detailed registry configuration: %+v", cfg)
|
||||
|
||||
r, err := server.NewRegistry(ctx, cfg)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -72,34 +132,39 @@ func ServeCmd(ctx context.Context, o *ServeOpts, s *store.Layout) error {
|
||||
if err = r.ListenAndServe(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func loadConfig(filename string) (*configuration.Configuration, error) {
|
||||
f, err := os.Open(filename)
|
||||
func ServeFilesCmd(ctx context.Context, o *flags.ServeFilesOpts, s *store.Layout, ro *flags.CliRootOpts) error {
|
||||
l := log.FromContext(ctx)
|
||||
ctx = dcontext.WithVersion(ctx, version.Version)
|
||||
|
||||
if err := validateStoreExists(s); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
opts := &flags.CopyOpts{StoreRootOpts: &flags.StoreRootOpts{}}
|
||||
if err := CopyCmd(ctx, opts, s, "dir://"+o.RootDir, ro); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
f, err := server.NewFile(ctx, *o)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
|
||||
return configuration.Parse(f)
|
||||
}
|
||||
|
||||
func (o *ServeOpts) defaultConfig() *configuration.Configuration {
|
||||
cfg := &configuration.Configuration{
|
||||
Version: "0.1",
|
||||
Storage: configuration.Storage{
|
||||
"cache": configuration.Parameters{"blobdescriptor": "inmemory"},
|
||||
"filesystem": configuration.Parameters{"rootdirectory": o.RootDir},
|
||||
|
||||
// TODO: Ensure this is toggleable via cli arg if necessary
|
||||
// "maintenance": configuration.Parameters{"readonly.enabled": false},
|
||||
},
|
||||
}
|
||||
cfg.Log.Level = "info"
|
||||
cfg.HTTP.Addr = fmt.Sprintf(":%d", o.Port)
|
||||
cfg.HTTP.Headers = http.Header{
|
||||
"X-Content-Type-Options": []string{"nosniff"},
|
||||
}
|
||||
|
||||
return cfg
|
||||
if o.TLSCert != "" && o.TLSKey != "" {
|
||||
l.Infof("starting file server with tls on port [%d]", o.Port)
|
||||
if err := f.ListenAndServeTLS(o.TLSCert, o.TLSKey); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
l.Infof("starting file server on port [%d]", o.Port)
|
||||
if err := f.ListenAndServe(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
164
cmd/hauler/cli/store/serve_test.go
Normal file
164
cmd/hauler/cli/store/serve_test.go
Normal file
@@ -0,0 +1,164 @@
|
||||
package store
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"hauler.dev/go/hauler/internal/flags"
|
||||
"hauler.dev/go/hauler/pkg/consts"
|
||||
)
|
||||
|
||||
// writeIndexJSON writes a minimal valid OCI index.json to dir so that
|
||||
// validateStoreExists can find it. NewLayout only writes index.json on
|
||||
// SaveIndex, which is triggered by adding content — so tests that need a
|
||||
// "valid store on disk" must create the file themselves.
|
||||
func writeIndexJSON(t *testing.T, dir string) {
|
||||
t.Helper()
|
||||
const minimal = `{"schemaVersion":2,"mediaType":"application/vnd.oci.image.index.v1+json","manifests":[]}`
|
||||
if err := os.WriteFile(filepath.Join(dir, "index.json"), []byte(minimal), 0o644); err != nil {
|
||||
t.Fatalf("writeIndexJSON: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateStoreExists(t *testing.T) {
|
||||
t.Run("valid store", func(t *testing.T) {
|
||||
s := newTestStore(t)
|
||||
writeIndexJSON(t, s.Root)
|
||||
if err := validateStoreExists(s); err != nil {
|
||||
t.Errorf("validateStoreExists on valid store: %v", err)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("missing index.json", func(t *testing.T) {
|
||||
s := newTestStore(t)
|
||||
err := validateStoreExists(s)
|
||||
if err == nil {
|
||||
t.Fatal("expected error for missing index.json, got nil")
|
||||
}
|
||||
if !strings.Contains(err.Error(), "no store found") {
|
||||
t.Errorf("expected 'no store found' in error, got: %v", err)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("nonexistent directory", func(t *testing.T) {
|
||||
s := newTestStore(t)
|
||||
// Point the layout root at a path that does not exist.
|
||||
s.Root = filepath.Join(t.TempDir(), "does-not-exist", "nested")
|
||||
err := validateStoreExists(s)
|
||||
if err == nil {
|
||||
t.Fatal("expected error for nonexistent dir, got nil")
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestDefaultRegistryConfig(t *testing.T) {
|
||||
rootDir := t.TempDir()
|
||||
o := &flags.ServeRegistryOpts{
|
||||
Port: consts.DefaultRegistryPort,
|
||||
RootDir: rootDir,
|
||||
}
|
||||
rso := defaultRootOpts(rootDir)
|
||||
ro := defaultCliOpts()
|
||||
|
||||
cfg := DefaultRegistryConfig(o, rso, ro)
|
||||
if cfg == nil {
|
||||
t.Fatal("DefaultRegistryConfig returned nil")
|
||||
}
|
||||
|
||||
// Port
|
||||
wantAddr := ":5000"
|
||||
if cfg.HTTP.Addr != wantAddr {
|
||||
t.Errorf("HTTP.Addr = %q, want %q", cfg.HTTP.Addr, wantAddr)
|
||||
}
|
||||
|
||||
// No TLS by default.
|
||||
if cfg.HTTP.TLS.Certificate != "" || cfg.HTTP.TLS.Key != "" {
|
||||
t.Errorf("expected no TLS cert/key by default, got cert=%q key=%q",
|
||||
cfg.HTTP.TLS.Certificate, cfg.HTTP.TLS.Key)
|
||||
}
|
||||
|
||||
// Log level matches ro.LogLevel.
|
||||
if string(cfg.Log.Level) != ro.LogLevel {
|
||||
t.Errorf("Log.Level = %q, want %q", cfg.Log.Level, ro.LogLevel)
|
||||
}
|
||||
|
||||
// Storage rootdirectory.
|
||||
fsParams := cfg.Storage["filesystem"]
|
||||
if fsParams == nil {
|
||||
t.Fatal("storage.filesystem not set")
|
||||
}
|
||||
if fsParams["rootdirectory"] != rootDir {
|
||||
t.Errorf("storage.filesystem.rootdirectory = %v, want %q", fsParams["rootdirectory"], rootDir)
|
||||
}
|
||||
|
||||
// URL allow rules.
|
||||
if len(cfg.Validation.Manifests.URLs.Allow) == 0 {
|
||||
t.Error("Validation.Manifests.URLs.Allow is empty, want at least one rule")
|
||||
}
|
||||
}
|
||||
|
||||
func TestDefaultRegistryConfig_WithTLS(t *testing.T) {
|
||||
rootDir := t.TempDir()
|
||||
o := &flags.ServeRegistryOpts{
|
||||
Port: consts.DefaultRegistryPort,
|
||||
RootDir: rootDir,
|
||||
TLSCert: "/path/to/cert.pem",
|
||||
TLSKey: "/path/to/key.pem",
|
||||
}
|
||||
rso := defaultRootOpts(rootDir)
|
||||
ro := defaultCliOpts()
|
||||
|
||||
cfg := DefaultRegistryConfig(o, rso, ro)
|
||||
if cfg.HTTP.TLS.Certificate != o.TLSCert {
|
||||
t.Errorf("TLS.Certificate = %q, want %q", cfg.HTTP.TLS.Certificate, o.TLSCert)
|
||||
}
|
||||
if cfg.HTTP.TLS.Key != o.TLSKey {
|
||||
t.Errorf("TLS.Key = %q, want %q", cfg.HTTP.TLS.Key, o.TLSKey)
|
||||
}
|
||||
}
|
||||
|
||||
func TestLoadConfig_ValidFile(t *testing.T) {
|
||||
// Write a minimal valid distribution registry config.
|
||||
cfg := `
|
||||
version: 0.1
|
||||
log:
|
||||
level: info
|
||||
storage:
|
||||
filesystem:
|
||||
rootdirectory: /tmp/registry
|
||||
cache:
|
||||
blobdescriptor: inmemory
|
||||
http:
|
||||
addr: :5000
|
||||
headers:
|
||||
X-Content-Type-Options: [nosniff]
|
||||
`
|
||||
f, err := os.CreateTemp(t.TempDir(), "registry-config-*.yaml")
|
||||
if err != nil {
|
||||
t.Fatalf("create temp file: %v", err)
|
||||
}
|
||||
if _, err := f.WriteString(cfg); err != nil {
|
||||
t.Fatalf("write config: %v", err)
|
||||
}
|
||||
f.Close()
|
||||
|
||||
got, err := loadConfig(f.Name())
|
||||
if err != nil {
|
||||
t.Fatalf("loadConfig: %v", err)
|
||||
}
|
||||
if got == nil {
|
||||
t.Fatal("loadConfig returned nil config")
|
||||
}
|
||||
if got.HTTP.Addr != ":5000" {
|
||||
t.Errorf("HTTP.Addr = %q, want %q", got.HTTP.Addr, ":5000")
|
||||
}
|
||||
}
|
||||
|
||||
func TestLoadConfig_InvalidFile(t *testing.T) {
|
||||
_, err := loadConfig("/nonexistent/path/to/config.yaml")
|
||||
if err == nil {
|
||||
t.Fatal("expected error for nonexistent config file, got nil")
|
||||
}
|
||||
}
|
||||
@@ -5,96 +5,136 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/url"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/mitchellh/go-homedir"
|
||||
"helm.sh/helm/v3/pkg/action"
|
||||
"k8s.io/apimachinery/pkg/util/yaml"
|
||||
"github.com/mitchellh/go-homedir"
|
||||
|
||||
"github.com/rancherfederal/hauler/pkg/store"
|
||||
"github.com/rancherfederal/hauler/pkg/apis/hauler.cattle.io/v1alpha1"
|
||||
tchart "github.com/rancherfederal/hauler/pkg/collection/chart"
|
||||
"github.com/rancherfederal/hauler/pkg/collection/imagetxt"
|
||||
"github.com/rancherfederal/hauler/pkg/collection/k3s"
|
||||
"github.com/rancherfederal/hauler/pkg/consts"
|
||||
"github.com/rancherfederal/hauler/pkg/content"
|
||||
"github.com/rancherfederal/hauler/pkg/cosign"
|
||||
"github.com/rancherfederal/hauler/pkg/log"
|
||||
"hauler.dev/go/hauler/internal/flags"
|
||||
v1 "hauler.dev/go/hauler/pkg/apis/hauler.cattle.io/v1"
|
||||
"hauler.dev/go/hauler/pkg/consts"
|
||||
"hauler.dev/go/hauler/pkg/content"
|
||||
"hauler.dev/go/hauler/pkg/cosign"
|
||||
"hauler.dev/go/hauler/pkg/getter"
|
||||
"hauler.dev/go/hauler/pkg/log"
|
||||
"hauler.dev/go/hauler/pkg/reference"
|
||||
"hauler.dev/go/hauler/pkg/store"
|
||||
)
|
||||
|
||||
type SyncOpts struct {
|
||||
*RootOpts
|
||||
ContentFiles []string
|
||||
Key string
|
||||
Products []string
|
||||
}
|
||||
|
||||
func (o *SyncOpts) AddFlags(cmd *cobra.Command) {
|
||||
f := cmd.Flags()
|
||||
|
||||
f.StringSliceVarP(&o.ContentFiles, "files", "f", []string{}, "Path to content files")
|
||||
f.StringVarP(&o.Key, "key", "k", "", "(Optional) Path to the key for signature verification")
|
||||
f.StringSliceVar(&o.Products, "products", []string{}, "Used for RGS Carbide customers to supply a product and version and Hauler will retrieve the images. i.e. '--product rancher=v2.7.6'")
|
||||
}
|
||||
|
||||
func SyncCmd(ctx context.Context, o *SyncOpts, s *store.Layout) error {
|
||||
func SyncCmd(ctx context.Context, o *flags.SyncOpts, s *store.Layout, rso *flags.StoreRootOpts, ro *flags.CliRootOpts) error {
|
||||
l := log.FromContext(ctx)
|
||||
|
||||
// Start from an empty store (contents are cached elsewhere)
|
||||
l.Debugf("flushing content store")
|
||||
if err := s.Flush(ctx); err != nil {
|
||||
tempOverride := rso.TempOverride
|
||||
|
||||
if tempOverride == "" {
|
||||
tempOverride = os.Getenv(consts.HaulerTempDir)
|
||||
}
|
||||
|
||||
tempDir, err := os.MkdirTemp(tempOverride, consts.DefaultHaulerTempDirName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer os.RemoveAll(tempDir)
|
||||
|
||||
// if passed products, check for a remote manifest to retrieve and use.
|
||||
for _, product := range o.Products {
|
||||
l.Infof("processing content file for product: '%s'", product)
|
||||
parts := strings.Split(product, "=")
|
||||
l.Debugf("using temporary directory at [%s]", tempDir)
|
||||
|
||||
// if passed products, check for a remote manifest to retrieve and use
|
||||
for _, productName := range o.Products {
|
||||
l.Infof("processing product manifest for [%s] to store [%s]", productName, o.StoreDir)
|
||||
parts := strings.Split(productName, "=")
|
||||
tag := strings.ReplaceAll(parts[1], "+", "-")
|
||||
manifestLoc := fmt.Sprintf("%s/hauler/%s-manifest.yaml:%s", consts.CarbideRegistry, parts[0], tag)
|
||||
l.Infof("retrieving product manifest from: '%s'", manifestLoc)
|
||||
img := v1alpha1.Image{
|
||||
|
||||
ProductRegistry := o.ProductRegistry // cli flag
|
||||
// if no cli flag use CarbideRegistry.
|
||||
if o.ProductRegistry == "" {
|
||||
ProductRegistry = consts.CarbideRegistry
|
||||
}
|
||||
|
||||
manifestLoc := fmt.Sprintf("%s/hauler/%s-manifest.yaml:%s", ProductRegistry, parts[0], tag)
|
||||
l.Infof("fetching product manifest from [%s]", manifestLoc)
|
||||
img := v1.Image{
|
||||
Name: manifestLoc,
|
||||
}
|
||||
err := storeImage(ctx, s, img)
|
||||
err := storeImage(ctx, s, img, o.Platform, rso, ro, "")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = ExtractCmd(ctx, &ExtractOpts{RootOpts: o.RootOpts}, s, fmt.Sprintf("hauler/%s-manifest.yaml:%s", parts[0],tag))
|
||||
err = ExtractCmd(ctx, &flags.ExtractOpts{StoreRootOpts: o.StoreRootOpts}, s, fmt.Sprintf("hauler/%s-manifest.yaml:%s", parts[0], tag))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
filename := fmt.Sprintf("%s-manifest.yaml", parts[0])
|
||||
fileName := fmt.Sprintf("%s-manifest.yaml", parts[0])
|
||||
|
||||
fi, err := os.Open(filename)
|
||||
fi, err := os.Open(fileName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = processContent(ctx, fi, o, s)
|
||||
defer fi.Close()
|
||||
err = processContent(ctx, fi, o, s, rso, ro)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
l.Infof("processing completed successfully")
|
||||
}
|
||||
|
||||
// if passed a local manifest, process it
|
||||
for _, filename := range o.ContentFiles {
|
||||
l.Debugf("processing content file: '%s'", filename)
|
||||
fi, err := os.Open(filename)
|
||||
// If passed a local manifest, process it
|
||||
for _, fileName := range o.FileName {
|
||||
l.Infof("processing manifest [%s] to store [%s]", fileName, o.StoreDir)
|
||||
|
||||
haulPath := fileName
|
||||
if strings.HasPrefix(haulPath, "http://") || strings.HasPrefix(haulPath, "https://") {
|
||||
l.Debugf("detected remote manifest... starting download... [%s]", haulPath)
|
||||
|
||||
h := getter.NewHttp()
|
||||
parsedURL, err := url.Parse(haulPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rc, err := h.Open(ctx, parsedURL)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer rc.Close()
|
||||
|
||||
fileName := h.Name(parsedURL)
|
||||
if fileName == "" {
|
||||
fileName = filepath.Base(parsedURL.Path)
|
||||
}
|
||||
haulPath = filepath.Join(tempDir, fileName)
|
||||
|
||||
out, err := os.Create(haulPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer out.Close()
|
||||
|
||||
if _, err = io.Copy(out, rc); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
fi, err := os.Open(haulPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = processContent(ctx, fi, o, s)
|
||||
defer fi.Close()
|
||||
|
||||
err = processContent(ctx, fi, o, s, rso, ro)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
l.Infof("processing completed successfully")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func processContent(ctx context.Context, fi *os.File, o *SyncOpts, s *store.Layout) error {
|
||||
func processContent(ctx context.Context, fi *os.File, o *flags.SyncOpts, s *store.Layout, rso *flags.StoreRootOpts, ro *flags.CliRootOpts) error {
|
||||
l := log.FromContext(ctx)
|
||||
|
||||
reader := yaml.NewYAMLReader(bufio.NewReader(fi))
|
||||
@@ -108,136 +148,217 @@ func processContent(ctx context.Context, fi *os.File, o *SyncOpts, s *store.Layo
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
docs = append(docs, raw)
|
||||
}
|
||||
|
||||
for _, doc := range docs {
|
||||
obj, err := content.Load(doc)
|
||||
if err != nil {
|
||||
l.Debugf("skipping sync of unknown content")
|
||||
l.Warnf("skipping syncing due to %v", err)
|
||||
continue
|
||||
}
|
||||
|
||||
l.Infof("syncing [%s] to store", obj.GroupVersionKind().String())
|
||||
gvk := obj.GroupVersionKind()
|
||||
l.Infof("syncing content [%s] with [kind=%s] to store [%s]", gvk.GroupVersion(), gvk.Kind, o.StoreDir)
|
||||
|
||||
// TODO: Should type switch instead...
|
||||
switch obj.GroupVersionKind().Kind {
|
||||
case v1alpha1.FilesContentKind:
|
||||
var cfg v1alpha1.Files
|
||||
if err := yaml.Unmarshal(doc, &cfg); err != nil {
|
||||
return err
|
||||
}
|
||||
switch gvk.Kind {
|
||||
|
||||
for _, f := range cfg.Spec.Files {
|
||||
err := storeFile(ctx, s, f)
|
||||
if err != nil {
|
||||
case consts.FilesContentKind:
|
||||
switch gvk.Version {
|
||||
case "v1":
|
||||
var cfg v1.Files
|
||||
if err := yaml.Unmarshal(doc, &cfg); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
case v1alpha1.ImagesContentKind:
|
||||
var cfg v1alpha1.Images
|
||||
if err := yaml.Unmarshal(doc, &cfg); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, i := range cfg.Spec.Images {
|
||||
|
||||
// Check if the user provided a key.
|
||||
if o.Key != "" || i.Key != "" {
|
||||
key := o.Key
|
||||
if i.Key != "" {
|
||||
key, err = homedir.Expand(i.Key)
|
||||
for _, f := range cfg.Spec.Files {
|
||||
if err := storeFile(ctx, s, f); err != nil {
|
||||
return err
|
||||
}
|
||||
l.Debugf("key for image [%s]", key)
|
||||
|
||||
// verify signature using the provided key.
|
||||
err := cosign.VerifySignature(ctx, s, key, i.Name)
|
||||
if err != nil {
|
||||
l.Errorf("signature verification failed for image [%s]. ** hauler will skip adding this image to the store **:\n%v", i.Name, err)
|
||||
continue
|
||||
}
|
||||
|
||||
default:
|
||||
return fmt.Errorf("unsupported version [%s] for kind [%s]... valid versions are [v1]", gvk.Version, gvk.Kind)
|
||||
}
|
||||
|
||||
case consts.ImagesContentKind:
|
||||
switch gvk.Version {
|
||||
case "v1":
|
||||
var cfg v1.Images
|
||||
if err := yaml.Unmarshal(doc, &cfg); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
a := cfg.GetAnnotations()
|
||||
for _, i := range cfg.Spec.Images {
|
||||
|
||||
if a[consts.ImageAnnotationRegistry] != "" || o.Registry != "" {
|
||||
newRef, _ := reference.Parse(i.Name)
|
||||
newReg := o.Registry
|
||||
if o.Registry == "" && a[consts.ImageAnnotationRegistry] != "" {
|
||||
newReg = a[consts.ImageAnnotationRegistry]
|
||||
}
|
||||
if newRef.Context().RegistryStr() == "" {
|
||||
newRef, err = reference.Relocate(i.Name, newReg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
i.Name = newRef.Name()
|
||||
}
|
||||
|
||||
hasAnnotationIdentityOptions := a[consts.ImageAnnotationCertIdentityRegexp] != "" || a[consts.ImageAnnotationCertIdentity] != ""
|
||||
hasCliIdentityOptions := o.CertIdentityRegexp != "" || o.CertIdentity != ""
|
||||
hasImageIdentityOptions := i.CertIdentityRegexp != "" || i.CertIdentity != ""
|
||||
|
||||
needsKeylessVerificaton := hasAnnotationIdentityOptions || hasCliIdentityOptions || hasImageIdentityOptions
|
||||
needsPubKeyVerification := a[consts.ImageAnnotationKey] != "" || o.Key != "" || i.Key != ""
|
||||
if needsPubKeyVerification {
|
||||
key := o.Key
|
||||
if o.Key == "" && a[consts.ImageAnnotationKey] != "" {
|
||||
key, err = homedir.Expand(a[consts.ImageAnnotationKey])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if i.Key != "" {
|
||||
key, err = homedir.Expand(i.Key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
l.Debugf("key for image [%s]", key)
|
||||
|
||||
tlog := o.Tlog
|
||||
if !o.Tlog && a[consts.ImageAnnotationTlog] == "true" {
|
||||
tlog = true
|
||||
}
|
||||
if i.Tlog {
|
||||
tlog = i.Tlog
|
||||
}
|
||||
l.Debugf("transparency log for verification [%b]", tlog)
|
||||
|
||||
if err := cosign.VerifySignature(ctx, key, tlog, i.Name, rso, ro); err != nil {
|
||||
l.Errorf("signature verification failed for image [%s]... skipping...\n%v", i.Name, err)
|
||||
continue
|
||||
}
|
||||
l.Infof("signature verified for image [%s]", i.Name)
|
||||
} else if needsKeylessVerificaton { //Keyless signature verification
|
||||
certIdentityRegexp := o.CertIdentityRegexp
|
||||
if o.CertIdentityRegexp == "" && a[consts.ImageAnnotationCertIdentityRegexp] != "" {
|
||||
certIdentityRegexp = a[consts.ImageAnnotationCertIdentityRegexp]
|
||||
}
|
||||
if i.CertIdentityRegexp != "" {
|
||||
certIdentityRegexp = i.CertIdentityRegexp
|
||||
}
|
||||
l.Debugf("certIdentityRegexp for image [%s]", certIdentityRegexp)
|
||||
|
||||
certIdentity := o.CertIdentity
|
||||
if o.CertIdentity == "" && a[consts.ImageAnnotationCertIdentity] != "" {
|
||||
certIdentity = a[consts.ImageAnnotationCertIdentity]
|
||||
}
|
||||
if i.CertIdentity != "" {
|
||||
certIdentity = i.CertIdentity
|
||||
}
|
||||
l.Debugf("certIdentity for image [%s]", certIdentity)
|
||||
|
||||
certOidcIssuer := o.CertOidcIssuer
|
||||
if o.CertOidcIssuer == "" && a[consts.ImageAnnotationCertOidcIssuer] != "" {
|
||||
certOidcIssuer = a[consts.ImageAnnotationCertOidcIssuer]
|
||||
}
|
||||
if i.CertOidcIssuer != "" {
|
||||
certOidcIssuer = i.CertOidcIssuer
|
||||
}
|
||||
l.Debugf("certOidcIssuer for image [%s]", certOidcIssuer)
|
||||
|
||||
certOidcIssuerRegexp := o.CertOidcIssuerRegexp
|
||||
if o.CertOidcIssuerRegexp == "" && a[consts.ImageAnnotationCertOidcIssuerRegexp] != "" {
|
||||
certOidcIssuerRegexp = a[consts.ImageAnnotationCertOidcIssuerRegexp]
|
||||
}
|
||||
if i.CertOidcIssuerRegexp != "" {
|
||||
certOidcIssuerRegexp = i.CertOidcIssuerRegexp
|
||||
}
|
||||
l.Debugf("certOidcIssuerRegexp for image [%s]", certOidcIssuerRegexp)
|
||||
|
||||
certGithubWorkflowRepository := o.CertGithubWorkflowRepository
|
||||
if o.CertGithubWorkflowRepository == "" && a[consts.ImageAnnotationCertGithubWorkflowRepository] != "" {
|
||||
certGithubWorkflowRepository = a[consts.ImageAnnotationCertGithubWorkflowRepository]
|
||||
}
|
||||
if i.CertGithubWorkflowRepository != "" {
|
||||
certGithubWorkflowRepository = i.CertGithubWorkflowRepository
|
||||
}
|
||||
l.Debugf("certGithubWorkflowRepository for image [%s]", certGithubWorkflowRepository)
|
||||
|
||||
// Keyless (Fulcio) certs expire after ~10 min; tlog is always
|
||||
// required to prove the cert was valid at signing time.
|
||||
if err := cosign.VerifyKeylessSignature(ctx, certIdentity, certIdentityRegexp, certOidcIssuer, certOidcIssuerRegexp, certGithubWorkflowRepository, i.Name, rso, ro); err != nil {
|
||||
l.Errorf("signature verification failed for image [%s]... skipping...\n%v", i.Name, err)
|
||||
continue
|
||||
}
|
||||
l.Infof("keyless signature verified for image [%s]", i.Name)
|
||||
}
|
||||
platform := o.Platform
|
||||
if o.Platform == "" && a[consts.ImageAnnotationPlatform] != "" {
|
||||
platform = a[consts.ImageAnnotationPlatform]
|
||||
}
|
||||
if i.Platform != "" {
|
||||
platform = i.Platform
|
||||
}
|
||||
|
||||
rewrite := ""
|
||||
if i.Rewrite != "" {
|
||||
rewrite = i.Rewrite
|
||||
}
|
||||
|
||||
if err := storeImage(ctx, s, i, platform, rso, ro, rewrite); err != nil {
|
||||
return err
|
||||
}
|
||||
l.Infof("signature verified for image [%s]", i.Name)
|
||||
}
|
||||
|
||||
err = storeImage(ctx, s, i)
|
||||
if err != nil {
|
||||
s.CopyAll(ctx, s.OCI, nil)
|
||||
|
||||
default:
|
||||
return fmt.Errorf("unsupported version [%s] for kind [%s]... valid versions are [v1]", gvk.Version, gvk.Kind)
|
||||
}
|
||||
|
||||
case consts.ChartsContentKind:
|
||||
switch gvk.Version {
|
||||
case "v1":
|
||||
var cfg v1.Charts
|
||||
if err := yaml.Unmarshal(doc, &cfg); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
case v1alpha1.ChartsContentKind:
|
||||
var cfg v1alpha1.Charts
|
||||
if err := yaml.Unmarshal(doc, &cfg); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, ch := range cfg.Spec.Charts {
|
||||
// TODO: Provide a way to configure syncs
|
||||
err := storeChart(ctx, s, ch, &action.ChartPathOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
case v1alpha1.K3sCollectionKind:
|
||||
var cfg v1alpha1.K3s
|
||||
if err := yaml.Unmarshal(doc, &cfg); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
k, err := k3s.NewK3s(cfg.Spec.Version)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := s.AddOCICollection(ctx, k); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
case v1alpha1.ChartsCollectionKind:
|
||||
var cfg v1alpha1.ThickCharts
|
||||
if err := yaml.Unmarshal(doc, &cfg); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, cfg := range cfg.Spec.Charts {
|
||||
tc, err := tchart.NewThickChart(cfg, &action.ChartPathOptions{
|
||||
RepoURL: cfg.RepoURL,
|
||||
Version: cfg.Version,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
registry := o.Registry
|
||||
if registry == "" {
|
||||
annotation := cfg.GetAnnotations()
|
||||
if annotation != nil {
|
||||
registry = annotation[consts.ImageAnnotationRegistry]
|
||||
}
|
||||
}
|
||||
|
||||
if _, err := s.AddOCICollection(ctx, tc); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
case v1alpha1.ImageTxtsContentKind:
|
||||
var cfg v1alpha1.ImageTxts
|
||||
if err := yaml.Unmarshal(doc, &cfg); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, cfgIt := range cfg.Spec.ImageTxts {
|
||||
it, err := imagetxt.New(cfgIt.Ref,
|
||||
imagetxt.WithIncludeSources(cfgIt.Sources.Include...),
|
||||
imagetxt.WithExcludeSources(cfgIt.Sources.Exclude...),
|
||||
)
|
||||
if err != nil {
|
||||
return fmt.Errorf("convert ImageTxt %s: %v", cfg.Name, err)
|
||||
for i, ch := range cfg.Spec.Charts {
|
||||
if err := storeChart(ctx, s, ch,
|
||||
&flags.AddChartOpts{
|
||||
ChartOpts: &action.ChartPathOptions{
|
||||
RepoURL: ch.RepoURL,
|
||||
Version: ch.Version,
|
||||
},
|
||||
AddImages: ch.AddImages,
|
||||
AddDependencies: ch.AddDependencies,
|
||||
Registry: registry,
|
||||
Platform: o.Platform,
|
||||
},
|
||||
rso, ro,
|
||||
cfg.Spec.Charts[i].Rewrite,
|
||||
); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if _, err := s.AddOCICollection(ctx, it); err != nil {
|
||||
return fmt.Errorf("add ImageTxt %s to store: %v", cfg.Name, err)
|
||||
}
|
||||
default:
|
||||
return fmt.Errorf("unsupported version [%s] for kind [%s]... valid versions are [v1]", gvk.Version, gvk.Kind)
|
||||
}
|
||||
|
||||
default:
|
||||
return fmt.Errorf("unrecognized content/collection type: %s", obj.GroupVersionKind().String())
|
||||
return fmt.Errorf("unsupported kind [%s]... valid kinds are [Files, Images, Charts]", gvk.Kind)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
||||
290
cmd/hauler/cli/store/sync_test.go
Normal file
290
cmd/hauler/cli/store/sync_test.go
Normal file
@@ -0,0 +1,290 @@
|
||||
package store
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"hauler.dev/go/hauler/internal/flags"
|
||||
)
|
||||
|
||||
// writeManifestFile writes yamlContent to a temp file, seeks back to the
|
||||
// start, and registers t.Cleanup to close + remove it. Returns the open
|
||||
// *os.File, ready for processContent to read.
|
||||
func writeManifestFile(t *testing.T, yamlContent string) *os.File {
|
||||
t.Helper()
|
||||
fi, err := os.CreateTemp(t.TempDir(), "hauler-manifest-*.yaml")
|
||||
if err != nil {
|
||||
t.Fatalf("writeManifestFile CreateTemp: %v", err)
|
||||
}
|
||||
t.Cleanup(func() { fi.Close() })
|
||||
if _, err := fi.WriteString(yamlContent); err != nil {
|
||||
t.Fatalf("writeManifestFile WriteString: %v", err)
|
||||
}
|
||||
if _, err := fi.Seek(0, io.SeekStart); err != nil {
|
||||
t.Fatalf("writeManifestFile Seek: %v", err)
|
||||
}
|
||||
return fi
|
||||
}
|
||||
|
||||
// newSyncOpts builds a SyncOpts pointing at storeDir.
|
||||
func newSyncOpts(storeDir string) *flags.SyncOpts {
|
||||
return &flags.SyncOpts{
|
||||
StoreRootOpts: defaultRootOpts(storeDir),
|
||||
}
|
||||
}
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
// processContent tests
|
||||
// --------------------------------------------------------------------------
|
||||
|
||||
func TestProcessContent_Files_v1(t *testing.T) {
|
||||
ctx := newTestContext(t)
|
||||
s := newTestStore(t)
|
||||
|
||||
fileURL := seedFileInHTTPServer(t, "synced.sh", "#!/bin/sh\necho hello")
|
||||
|
||||
manifest := fmt.Sprintf(`apiVersion: content.hauler.cattle.io/v1
|
||||
kind: Files
|
||||
metadata:
|
||||
name: test-files
|
||||
spec:
|
||||
files:
|
||||
- path: %s
|
||||
`, fileURL)
|
||||
|
||||
fi := writeManifestFile(t, manifest)
|
||||
o := newSyncOpts(s.Root)
|
||||
ro := defaultCliOpts()
|
||||
|
||||
if err := processContent(ctx, fi, o, s, o.StoreRootOpts, ro); err != nil {
|
||||
t.Fatalf("processContent Files v1: %v", err)
|
||||
}
|
||||
assertArtifactInStore(t, s, "synced.sh")
|
||||
}
|
||||
|
||||
func TestProcessContent_Charts_v1(t *testing.T) {
|
||||
ctx := newTestContext(t)
|
||||
s := newTestStore(t)
|
||||
|
||||
// Use the same relative path as add_test.go: url.ParseRequestURI accepts
|
||||
// absolute Unix paths, making isUrl() return true for them. A relative
|
||||
// path correctly keeps isUrl() false so Helm sees it as a local directory.
|
||||
manifest := fmt.Sprintf(`apiVersion: content.hauler.cattle.io/v1
|
||||
kind: Charts
|
||||
metadata:
|
||||
name: test-charts
|
||||
spec:
|
||||
charts:
|
||||
- name: rancher-cluster-templates-0.5.2.tgz
|
||||
repoURL: %s
|
||||
`, chartTestdataDir)
|
||||
|
||||
fi := writeManifestFile(t, manifest)
|
||||
o := newSyncOpts(s.Root)
|
||||
ro := defaultCliOpts()
|
||||
|
||||
if err := processContent(ctx, fi, o, s, o.StoreRootOpts, ro); err != nil {
|
||||
t.Fatalf("processContent Charts v1: %v", err)
|
||||
}
|
||||
assertArtifactInStore(t, s, "rancher-cluster-templates")
|
||||
}
|
||||
|
||||
func TestProcessContent_Images_v1(t *testing.T) {
|
||||
ctx := newTestContext(t)
|
||||
s := newTestStore(t)
|
||||
|
||||
host, _ := newLocalhostRegistry(t)
|
||||
seedImage(t, host, "myorg/myimage", "v1") // transport not needed; AddImage reads via localhost scheme
|
||||
|
||||
manifest := fmt.Sprintf(`apiVersion: content.hauler.cattle.io/v1
|
||||
kind: Images
|
||||
metadata:
|
||||
name: test-images
|
||||
spec:
|
||||
images:
|
||||
- name: %s/myorg/myimage:v1
|
||||
`, host)
|
||||
|
||||
fi := writeManifestFile(t, manifest)
|
||||
o := newSyncOpts(s.Root)
|
||||
ro := defaultCliOpts()
|
||||
|
||||
if err := processContent(ctx, fi, o, s, o.StoreRootOpts, ro); err != nil {
|
||||
t.Fatalf("processContent Images v1: %v", err)
|
||||
}
|
||||
assertArtifactInStore(t, s, "myorg/myimage")
|
||||
}
|
||||
|
||||
func TestProcessContent_UnsupportedKind(t *testing.T) {
|
||||
ctx := newTestContext(t)
|
||||
s := newTestStore(t)
|
||||
|
||||
// A valid apiVersion with an unsupported kind passes content.Load but hits
|
||||
// the default branch of the kind switch, returning an error.
|
||||
manifest := `apiVersion: content.hauler.cattle.io/v1
|
||||
kind: Unknown
|
||||
metadata:
|
||||
name: test
|
||||
`
|
||||
|
||||
fi := writeManifestFile(t, manifest)
|
||||
o := newSyncOpts(s.Root)
|
||||
ro := defaultCliOpts()
|
||||
|
||||
if err := processContent(ctx, fi, o, s, o.StoreRootOpts, ro); err == nil {
|
||||
t.Fatal("expected error for unsupported kind, got nil")
|
||||
}
|
||||
}
|
||||
|
||||
func TestProcessContent_UnsupportedVersion(t *testing.T) {
|
||||
ctx := newTestContext(t)
|
||||
s := newTestStore(t)
|
||||
|
||||
// An unrecognized apiVersion causes content.Load to return an error, which
|
||||
// processContent treats as a warn-and-skip — the function returns nil and
|
||||
// no artifact is added to the store.
|
||||
manifest := `apiVersion: content.hauler.cattle.io/v2
|
||||
kind: Files
|
||||
metadata:
|
||||
name: test
|
||||
spec:
|
||||
files:
|
||||
- path: /dev/null
|
||||
`
|
||||
|
||||
fi := writeManifestFile(t, manifest)
|
||||
o := newSyncOpts(s.Root)
|
||||
ro := defaultCliOpts()
|
||||
|
||||
if err := processContent(ctx, fi, o, s, o.StoreRootOpts, ro); err != nil {
|
||||
t.Fatalf("expected nil for unrecognized apiVersion (warn-and-skip), got: %v", err)
|
||||
}
|
||||
if n := countArtifactsInStore(t, s); n != 0 {
|
||||
t.Errorf("expected 0 artifacts after skipped document, got %d", n)
|
||||
}
|
||||
}
|
||||
|
||||
func TestProcessContent_MultiDoc(t *testing.T) {
|
||||
ctx := newTestContext(t)
|
||||
s := newTestStore(t)
|
||||
|
||||
fileURL := seedFileInHTTPServer(t, "multi.sh", "#!/bin/sh\necho multi")
|
||||
host, _ := newLocalhostRegistry(t)
|
||||
seedImage(t, host, "myorg/multiimage", "v1")
|
||||
|
||||
manifest := fmt.Sprintf(`apiVersion: content.hauler.cattle.io/v1
|
||||
kind: Files
|
||||
metadata:
|
||||
name: test-files
|
||||
spec:
|
||||
files:
|
||||
- path: %s
|
||||
---
|
||||
apiVersion: content.hauler.cattle.io/v1
|
||||
kind: Charts
|
||||
metadata:
|
||||
name: test-charts
|
||||
spec:
|
||||
charts:
|
||||
- name: rancher-cluster-templates-0.5.2.tgz
|
||||
repoURL: %s
|
||||
---
|
||||
apiVersion: content.hauler.cattle.io/v1
|
||||
kind: Images
|
||||
metadata:
|
||||
name: test-images
|
||||
spec:
|
||||
images:
|
||||
- name: %s/myorg/multiimage:v1
|
||||
`, fileURL, chartTestdataDir, host)
|
||||
|
||||
fi := writeManifestFile(t, manifest)
|
||||
o := newSyncOpts(s.Root)
|
||||
ro := defaultCliOpts()
|
||||
|
||||
if err := processContent(ctx, fi, o, s, o.StoreRootOpts, ro); err != nil {
|
||||
t.Fatalf("processContent MultiDoc: %v", err)
|
||||
}
|
||||
assertArtifactInStore(t, s, "multi.sh")
|
||||
assertArtifactInStore(t, s, "rancher-cluster-templates")
|
||||
assertArtifactInStore(t, s, "myorg/multiimage")
|
||||
}
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
// SyncCmd integration tests
|
||||
// --------------------------------------------------------------------------
|
||||
|
||||
func TestSyncCmd_LocalFile(t *testing.T) {
|
||||
ctx := newTestContext(t)
|
||||
s := newTestStore(t)
|
||||
|
||||
fileURL := seedFileInHTTPServer(t, "synced-local.sh", "#!/bin/sh\necho local")
|
||||
|
||||
manifest := fmt.Sprintf(`apiVersion: content.hauler.cattle.io/v1
|
||||
kind: Files
|
||||
metadata:
|
||||
name: test-sync-local
|
||||
spec:
|
||||
files:
|
||||
- path: %s
|
||||
`, fileURL)
|
||||
|
||||
// SyncCmd reads by file path, so write and close the manifest file first.
|
||||
manifestFile, err := os.CreateTemp(t.TempDir(), "hauler-sync-local-*.yaml")
|
||||
if err != nil {
|
||||
t.Fatalf("CreateTemp: %v", err)
|
||||
}
|
||||
manifestPath := manifestFile.Name()
|
||||
if _, err := manifestFile.WriteString(manifest); err != nil {
|
||||
manifestFile.Close()
|
||||
t.Fatalf("WriteString: %v", err)
|
||||
}
|
||||
manifestFile.Close()
|
||||
|
||||
o := newSyncOpts(s.Root)
|
||||
o.FileName = []string{manifestPath}
|
||||
rso := defaultRootOpts(s.Root)
|
||||
ro := defaultCliOpts()
|
||||
|
||||
if err := SyncCmd(ctx, o, s, rso, ro); err != nil {
|
||||
t.Fatalf("SyncCmd LocalFile: %v", err)
|
||||
}
|
||||
assertArtifactInStore(t, s, "synced-local.sh")
|
||||
}
|
||||
|
||||
func TestSyncCmd_RemoteManifest(t *testing.T) {
|
||||
ctx := newTestContext(t)
|
||||
s := newTestStore(t)
|
||||
|
||||
fileURL := seedFileInHTTPServer(t, "synced-remote.sh", "#!/bin/sh\necho remote")
|
||||
|
||||
manifest := fmt.Sprintf(`apiVersion: content.hauler.cattle.io/v1
|
||||
kind: Files
|
||||
metadata:
|
||||
name: test-sync-remote
|
||||
spec:
|
||||
files:
|
||||
- path: %s
|
||||
`, fileURL)
|
||||
|
||||
// Serve the manifest itself over HTTP so SyncCmd's remote-download path is exercised.
|
||||
manifestSrv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/yaml")
|
||||
io.WriteString(w, manifest) //nolint:errcheck
|
||||
}))
|
||||
t.Cleanup(manifestSrv.Close)
|
||||
|
||||
o := newSyncOpts(s.Root)
|
||||
o.FileName = []string{manifestSrv.URL + "/manifest.yaml"}
|
||||
rso := defaultRootOpts(s.Root)
|
||||
ro := defaultCliOpts()
|
||||
|
||||
if err := SyncCmd(ctx, o, s, rso, ro); err != nil {
|
||||
t.Fatalf("SyncCmd RemoteManifest: %v", err)
|
||||
}
|
||||
assertArtifactInStore(t, s, "synced-remote.sh")
|
||||
}
|
||||
302
cmd/hauler/cli/store/testhelpers_test.go
Normal file
302
cmd/hauler/cli/store/testhelpers_test.go
Normal file
@@ -0,0 +1,302 @@
|
||||
package store
|
||||
|
||||
// testhelpers_test.go provides shared test helpers for cmd/hauler/cli/store tests.
|
||||
//
|
||||
// This file is in-package (package store) so tests can call unexported
|
||||
// helpers like storeImage, storeFile, rewriteReference, etc.
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/google/go-containerregistry/pkg/name"
|
||||
"github.com/google/go-containerregistry/pkg/registry"
|
||||
gcrv1 "github.com/google/go-containerregistry/pkg/v1"
|
||||
"github.com/google/go-containerregistry/pkg/v1/empty"
|
||||
"github.com/google/go-containerregistry/pkg/v1/mutate"
|
||||
"github.com/google/go-containerregistry/pkg/v1/random"
|
||||
"github.com/google/go-containerregistry/pkg/v1/remote"
|
||||
"github.com/google/go-containerregistry/pkg/v1/static"
|
||||
gvtypes "github.com/google/go-containerregistry/pkg/v1/types"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/rs/zerolog"
|
||||
"helm.sh/helm/v3/pkg/action"
|
||||
|
||||
"hauler.dev/go/hauler/internal/flags"
|
||||
"hauler.dev/go/hauler/pkg/consts"
|
||||
"hauler.dev/go/hauler/pkg/store"
|
||||
)
|
||||
|
||||
// newTestStore creates a fresh store in a temp directory. Fatal on error.
|
||||
func newTestStore(t *testing.T) *store.Layout {
|
||||
t.Helper()
|
||||
s, err := store.NewLayout(t.TempDir())
|
||||
if err != nil {
|
||||
t.Fatalf("newTestStore: %v", err)
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// newTestRegistry starts an in-memory OCI registry backed by httptest.
|
||||
// Returns the host (host:port) and remote.Options that route requests through
|
||||
// the server's plain-HTTP transport. The server is shut down via t.Cleanup.
|
||||
//
|
||||
// Pass the returned remoteOpts to seedImage/seedIndex and to store.AddImage
|
||||
// calls so that both sides use the same plain-HTTP transport.
|
||||
func newTestRegistry(t *testing.T) (host string, remoteOpts []remote.Option) {
|
||||
t.Helper()
|
||||
srv := httptest.NewServer(registry.New())
|
||||
t.Cleanup(srv.Close)
|
||||
host = strings.TrimPrefix(srv.URL, "http://")
|
||||
remoteOpts = []remote.Option{remote.WithTransport(srv.Client().Transport)}
|
||||
return host, remoteOpts
|
||||
}
|
||||
|
||||
// seedImage pushes a random single-platform image to the test registry.
|
||||
// repo is a bare path like "myorg/myimage"; tag is the image tag string.
|
||||
// Pass the remoteOpts from newTestRegistry so writes use the correct transport.
|
||||
func seedImage(t *testing.T, host, repo, tag string, opts ...remote.Option) gcrv1.Image {
|
||||
t.Helper()
|
||||
img, err := random.Image(512, 2)
|
||||
if err != nil {
|
||||
t.Fatalf("seedImage random.Image: %v", err)
|
||||
}
|
||||
ref, err := name.NewTag(host+"/"+repo+":"+tag, name.Insecure)
|
||||
if err != nil {
|
||||
t.Fatalf("seedImage name.NewTag: %v", err)
|
||||
}
|
||||
if err := remote.Write(ref, img, opts...); err != nil {
|
||||
t.Fatalf("seedImage remote.Write: %v", err)
|
||||
}
|
||||
return img
|
||||
}
|
||||
|
||||
// seedIndex pushes a 2-platform image index (linux/amd64 + linux/arm64) to
|
||||
// the test registry. Pass the remoteOpts from newTestRegistry.
|
||||
func seedIndex(t *testing.T, host, repo, tag string, opts ...remote.Option) gcrv1.ImageIndex {
|
||||
t.Helper()
|
||||
amd64Img, err := random.Image(512, 2)
|
||||
if err != nil {
|
||||
t.Fatalf("seedIndex random.Image amd64: %v", err)
|
||||
}
|
||||
arm64Img, err := random.Image(512, 2)
|
||||
if err != nil {
|
||||
t.Fatalf("seedIndex random.Image arm64: %v", err)
|
||||
}
|
||||
idx := mutate.AppendManifests(
|
||||
empty.Index,
|
||||
mutate.IndexAddendum{
|
||||
Add: amd64Img,
|
||||
Descriptor: gcrv1.Descriptor{
|
||||
MediaType: gvtypes.OCIManifestSchema1,
|
||||
Platform: &gcrv1.Platform{OS: "linux", Architecture: "amd64"},
|
||||
},
|
||||
},
|
||||
mutate.IndexAddendum{
|
||||
Add: arm64Img,
|
||||
Descriptor: gcrv1.Descriptor{
|
||||
MediaType: gvtypes.OCIManifestSchema1,
|
||||
Platform: &gcrv1.Platform{OS: "linux", Architecture: "arm64"},
|
||||
},
|
||||
},
|
||||
)
|
||||
ref, err := name.NewTag(host+"/"+repo+":"+tag, name.Insecure)
|
||||
if err != nil {
|
||||
t.Fatalf("seedIndex name.NewTag: %v", err)
|
||||
}
|
||||
if err := remote.WriteIndex(ref, idx, opts...); err != nil {
|
||||
t.Fatalf("seedIndex remote.WriteIndex: %v", err)
|
||||
}
|
||||
return idx
|
||||
}
|
||||
|
||||
// seedFileInHTTPServer starts an httptest server serving a single file at
|
||||
// /filename with the given content. Returns the full URL. Server closed via t.Cleanup.
|
||||
func seedFileInHTTPServer(t *testing.T, filename, content string) string {
|
||||
t.Helper()
|
||||
mux := http.NewServeMux()
|
||||
mux.HandleFunc("/"+filename, func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/octet-stream")
|
||||
io.WriteString(w, content) //nolint:errcheck
|
||||
})
|
||||
srv := httptest.NewServer(mux)
|
||||
t.Cleanup(srv.Close)
|
||||
return srv.URL + "/" + filename
|
||||
}
|
||||
|
||||
// defaultRootOpts returns a StoreRootOpts pointed at storeDir with Retries=1.
|
||||
// Using Retries=1 avoids the 5-second RetriesInterval sleep in failure tests.
|
||||
func defaultRootOpts(storeDir string) *flags.StoreRootOpts {
|
||||
return &flags.StoreRootOpts{
|
||||
StoreDir: storeDir,
|
||||
Retries: 1,
|
||||
}
|
||||
}
|
||||
|
||||
// defaultCliOpts returns CliRootOpts with error-level logging and IgnoreErrors=false.
|
||||
func defaultCliOpts() *flags.CliRootOpts {
|
||||
return &flags.CliRootOpts{
|
||||
IgnoreErrors: false,
|
||||
LogLevel: "error",
|
||||
}
|
||||
}
|
||||
|
||||
// newTestContext returns a context with a no-op zerolog logger attached so that
|
||||
// log.FromContext does not emit to stdout/stderr during tests.
|
||||
func newTestContext(t *testing.T) context.Context {
|
||||
t.Helper()
|
||||
zl := zerolog.New(io.Discard)
|
||||
return zl.WithContext(context.Background())
|
||||
}
|
||||
|
||||
// newAddChartOpts builds an AddChartOpts for loading a local .tgz chart from
|
||||
// repoURL (typically a testdata directory path) at the given version string.
|
||||
func newAddChartOpts(repoURL, version string) *flags.AddChartOpts {
|
||||
return &flags.AddChartOpts{
|
||||
ChartOpts: &action.ChartPathOptions{
|
||||
RepoURL: repoURL,
|
||||
Version: version,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// assertArtifactInStore walks the store and fails the test if no descriptor
|
||||
// has an AnnotationRefName containing refSubstring.
|
||||
func assertArtifactInStore(t *testing.T, s *store.Layout, refSubstring string) {
|
||||
t.Helper()
|
||||
found := false
|
||||
if err := s.OCI.Walk(func(_ string, desc ocispec.Descriptor) error {
|
||||
if strings.Contains(desc.Annotations[ocispec.AnnotationRefName], refSubstring) {
|
||||
found = true
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
t.Fatalf("assertArtifactInStore walk: %v", err)
|
||||
}
|
||||
if !found {
|
||||
t.Errorf("no artifact with ref containing %q found in store", refSubstring)
|
||||
}
|
||||
}
|
||||
|
||||
// assertArtifactKindInStore walks the store and fails if no descriptor has an
|
||||
// AnnotationRefName containing refSubstring AND KindAnnotationName equal to kind.
|
||||
func assertArtifactKindInStore(t *testing.T, s *store.Layout, refSubstring, kind string) {
|
||||
t.Helper()
|
||||
found := false
|
||||
if err := s.OCI.Walk(func(_ string, desc ocispec.Descriptor) error {
|
||||
if strings.Contains(desc.Annotations[ocispec.AnnotationRefName], refSubstring) &&
|
||||
desc.Annotations[consts.KindAnnotationName] == kind {
|
||||
found = true
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
t.Fatalf("assertArtifactKindInStore walk: %v", err)
|
||||
}
|
||||
if !found {
|
||||
t.Errorf("no artifact with ref containing %q and kind %q found in store", refSubstring, kind)
|
||||
}
|
||||
}
|
||||
|
||||
// countArtifactsInStore returns the number of descriptors in the store index.
|
||||
func countArtifactsInStore(t *testing.T, s *store.Layout) int {
|
||||
t.Helper()
|
||||
count := 0
|
||||
if err := s.OCI.Walk(func(_ string, _ ocispec.Descriptor) error {
|
||||
count++
|
||||
return nil
|
||||
}); err != nil {
|
||||
t.Fatalf("countArtifactsInStore walk: %v", err)
|
||||
}
|
||||
return count
|
||||
}
|
||||
|
||||
// seedCosignV2Artifacts pushes synthetic cosign v2 signature, attestation, and SBOM
|
||||
// manifests at the sha256-<hex>.sig / .att / .sbom tags derived from baseImg's digest.
|
||||
// Pass the remoteOpts from newLocalhostRegistry or newTestRegistry.
|
||||
func seedCosignV2Artifacts(t *testing.T, host, repo string, baseImg gcrv1.Image, opts ...remote.Option) {
|
||||
t.Helper()
|
||||
hash, err := baseImg.Digest()
|
||||
if err != nil {
|
||||
t.Fatalf("seedCosignV2Artifacts: get digest: %v", err)
|
||||
}
|
||||
tagPrefix := strings.ReplaceAll(hash.String(), ":", "-")
|
||||
for _, suffix := range []string{".sig", ".att", ".sbom"} {
|
||||
img, err := random.Image(64, 1)
|
||||
if err != nil {
|
||||
t.Fatalf("seedCosignV2Artifacts: random.Image (%s): %v", suffix, err)
|
||||
}
|
||||
ref, err := name.NewTag(host+"/"+repo+":"+tagPrefix+suffix, name.Insecure)
|
||||
if err != nil {
|
||||
t.Fatalf("seedCosignV2Artifacts: NewTag (%s): %v", suffix, err)
|
||||
}
|
||||
if err := remote.Write(ref, img, opts...); err != nil {
|
||||
t.Fatalf("seedCosignV2Artifacts: Write (%s): %v", suffix, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// seedOCI11Referrer pushes a synthetic OCI 1.1 / cosign v3 Sigstore bundle manifest
|
||||
// whose subject field points at baseImg. The in-process registry auto-registers it in
|
||||
// the referrers index so remote.Referrers returns it.
|
||||
// Pass the remoteOpts from newLocalhostRegistry or newTestRegistry.
|
||||
func seedOCI11Referrer(t *testing.T, host, repo string, baseImg gcrv1.Image, opts ...remote.Option) {
|
||||
t.Helper()
|
||||
hash, err := baseImg.Digest()
|
||||
if err != nil {
|
||||
t.Fatalf("seedOCI11Referrer: get digest: %v", err)
|
||||
}
|
||||
rawManifest, err := baseImg.RawManifest()
|
||||
if err != nil {
|
||||
t.Fatalf("seedOCI11Referrer: raw manifest: %v", err)
|
||||
}
|
||||
mt, err := baseImg.MediaType()
|
||||
if err != nil {
|
||||
t.Fatalf("seedOCI11Referrer: media type: %v", err)
|
||||
}
|
||||
baseDesc := gcrv1.Descriptor{
|
||||
MediaType: mt,
|
||||
Digest: hash,
|
||||
Size: int64(len(rawManifest)),
|
||||
}
|
||||
|
||||
bundleJSON := []byte(`{"mediaType":"application/vnd.dev.sigstore.bundle.v0.3+json"}`)
|
||||
bundleLayer := static.NewLayer(bundleJSON, gvtypes.MediaType(consts.SigstoreBundleMediaType))
|
||||
referrerImg, err := mutate.AppendLayers(empty.Image, bundleLayer)
|
||||
if err != nil {
|
||||
t.Fatalf("seedOCI11Referrer: AppendLayers: %v", err)
|
||||
}
|
||||
referrerImg = mutate.MediaType(referrerImg, gvtypes.OCIManifestSchema1)
|
||||
referrerImg = mutate.ConfigMediaType(referrerImg, gvtypes.MediaType(consts.OCIEmptyConfigMediaType))
|
||||
referrerImg = mutate.Subject(referrerImg, baseDesc).(gcrv1.Image)
|
||||
|
||||
referrerTag, err := name.NewTag(host+"/"+repo+":bundle-referrer", name.Insecure)
|
||||
if err != nil {
|
||||
t.Fatalf("seedOCI11Referrer: NewTag: %v", err)
|
||||
}
|
||||
if err := remote.Write(referrerTag, referrerImg, opts...); err != nil {
|
||||
t.Fatalf("seedOCI11Referrer: Write: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// assertReferrerInStore walks the store and fails if no descriptor has a kind
|
||||
// annotation with the KindAnnotationReferrers prefix and a ref containing refSubstring.
|
||||
func assertReferrerInStore(t *testing.T, s *store.Layout, refSubstring string) {
|
||||
t.Helper()
|
||||
found := false
|
||||
if err := s.OCI.Walk(func(_ string, desc ocispec.Descriptor) error {
|
||||
if strings.Contains(desc.Annotations[ocispec.AnnotationRefName], refSubstring) &&
|
||||
strings.HasPrefix(desc.Annotations[consts.KindAnnotationName], consts.KindAnnotationReferrers) {
|
||||
found = true
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
t.Fatalf("assertReferrerInStore walk: %v", err)
|
||||
}
|
||||
if !found {
|
||||
t.Errorf("no OCI referrer with ref containing %q found in store", refSubstring)
|
||||
}
|
||||
}
|
||||
@@ -5,11 +5,12 @@ import (
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
"github.com/rancherfederal/hauler/internal/version"
|
||||
"hauler.dev/go/hauler/internal/flags"
|
||||
"hauler.dev/go/hauler/internal/version"
|
||||
)
|
||||
|
||||
func addVersion(parent *cobra.Command) {
|
||||
var json bool
|
||||
func addVersion(parent *cobra.Command, ro *flags.CliRootOpts) {
|
||||
o := &flags.VersionOpts{}
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: "version",
|
||||
@@ -17,19 +18,24 @@ func addVersion(parent *cobra.Command) {
|
||||
Aliases: []string{"v"},
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
v := version.GetVersionInfo()
|
||||
response := v.String()
|
||||
if json {
|
||||
data, err := v.JSONString()
|
||||
v.Name = cmd.Root().Name()
|
||||
v.Description = cmd.Root().Short
|
||||
v.FontName = "starwars"
|
||||
cmd.SetOut(cmd.OutOrStdout())
|
||||
|
||||
if o.JSON {
|
||||
out, err := v.JSONString()
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("unable to generate JSON from version info: %w", err)
|
||||
}
|
||||
response = data
|
||||
cmd.Println(out)
|
||||
} else {
|
||||
cmd.Println(v.String())
|
||||
}
|
||||
fmt.Print(response)
|
||||
return nil
|
||||
},
|
||||
}
|
||||
cmd.Flags().BoolVar(&json, "json", false, "toggle output in JSON")
|
||||
o.AddFlags(cmd)
|
||||
|
||||
parent.AddCommand(cmd)
|
||||
}
|
||||
|
||||
@@ -4,8 +4,9 @@ import (
|
||||
"context"
|
||||
"os"
|
||||
|
||||
"github.com/rancherfederal/hauler/cmd/hauler/cli"
|
||||
"github.com/rancherfederal/hauler/pkg/log"
|
||||
"hauler.dev/go/hauler/cmd/hauler/cli"
|
||||
"hauler.dev/go/hauler/internal/flags"
|
||||
"hauler.dev/go/hauler/pkg/log"
|
||||
)
|
||||
|
||||
func main() {
|
||||
@@ -15,7 +16,9 @@ func main() {
|
||||
logger := log.NewLogger(os.Stdout)
|
||||
ctx = logger.WithContext(ctx)
|
||||
|
||||
if err := cli.New().ExecuteContext(ctx); err != nil {
|
||||
if err := cli.New(ctx, &flags.CliRootOpts{}).ExecuteContext(ctx); err != nil {
|
||||
logger.Errorf("%v", err)
|
||||
cancel()
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
410
go.mod
410
go.mod
@@ -1,171 +1,351 @@
|
||||
module github.com/rancherfederal/hauler
|
||||
module hauler.dev/go/hauler
|
||||
|
||||
go 1.21
|
||||
go 1.25.5
|
||||
|
||||
replace github.com/distribution/distribution/v3 => github.com/distribution/distribution/v3 v3.0.0-20221208165359-362910506bc2
|
||||
|
||||
require (
|
||||
github.com/common-nighthawk/go-figure v0.0.0-20210622060536-734e95fb86be
|
||||
github.com/containerd/containerd v1.7.6
|
||||
github.com/distribution/distribution/v3 v3.0.0-20221208165359-362910506bc2
|
||||
github.com/docker/go-metrics v0.0.1
|
||||
github.com/google/go-containerregistry v0.16.1
|
||||
github.com/gorilla/handlers v1.5.1
|
||||
github.com/gorilla/mux v1.8.0
|
||||
github.com/mholt/archiver/v3 v3.5.1
|
||||
github.com/containerd/containerd v1.7.29
|
||||
github.com/containerd/errdefs v1.0.0
|
||||
github.com/distribution/distribution/v3 v3.0.0
|
||||
github.com/google/go-containerregistry v0.20.7
|
||||
github.com/gorilla/handlers v1.5.2
|
||||
github.com/gorilla/mux v1.8.1
|
||||
github.com/mholt/archives v0.1.5
|
||||
github.com/mitchellh/go-homedir v1.1.0
|
||||
github.com/olekukonko/tablewriter v1.1.2
|
||||
github.com/opencontainers/go-digest v1.0.0
|
||||
github.com/opencontainers/image-spec v1.1.0-rc5
|
||||
github.com/opencontainers/image-spec v1.1.1
|
||||
github.com/pkg/errors v0.9.1
|
||||
github.com/rs/zerolog v1.31.0
|
||||
github.com/sirupsen/logrus v1.9.3
|
||||
github.com/spf13/afero v1.10.0
|
||||
github.com/spf13/cobra v1.7.0
|
||||
golang.org/x/sync v0.4.0
|
||||
helm.sh/helm/v3 v3.13.0
|
||||
k8s.io/apimachinery v0.28.2
|
||||
k8s.io/client-go v0.28.2
|
||||
oras.land/oras-go v1.2.4
|
||||
github.com/rs/zerolog v1.34.0
|
||||
github.com/sigstore/cosign/v3 v3.0.5
|
||||
github.com/sirupsen/logrus v1.9.4
|
||||
github.com/spf13/afero v1.15.0
|
||||
github.com/spf13/cobra v1.10.2
|
||||
golang.org/x/sync v0.19.0
|
||||
gopkg.in/yaml.v3 v3.0.1
|
||||
helm.sh/helm/v3 v3.19.0
|
||||
k8s.io/apimachinery v0.35.1
|
||||
)
|
||||
|
||||
require (
|
||||
cloud.google.com/go/auth v0.18.1 // indirect
|
||||
cloud.google.com/go/auth/oauth2adapt v0.2.8 // indirect
|
||||
cloud.google.com/go/compute/metadata v0.9.0 // indirect
|
||||
cuelabs.dev/go/oci/ociregistry v0.0.0-20250722084951-074d06050084 // indirect
|
||||
cuelang.org/go v0.15.4 // indirect
|
||||
dario.cat/mergo v1.0.1 // indirect
|
||||
github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 // indirect
|
||||
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect
|
||||
github.com/BurntSushi/toml v1.3.2 // indirect
|
||||
github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider v0.14.0 // indirect
|
||||
github.com/Azure/azure-sdk-for-go v68.0.0+incompatible // indirect
|
||||
github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c // indirect
|
||||
github.com/Azure/go-autorest v14.2.0+incompatible // indirect
|
||||
github.com/Azure/go-autorest/autorest v0.11.29 // indirect
|
||||
github.com/Azure/go-autorest/autorest/adal v0.9.23 // indirect
|
||||
github.com/Azure/go-autorest/autorest/azure/auth v0.5.12 // indirect
|
||||
github.com/Azure/go-autorest/autorest/azure/cli v0.4.6 // indirect
|
||||
github.com/Azure/go-autorest/autorest/date v0.3.0 // indirect
|
||||
github.com/Azure/go-autorest/logger v0.2.1 // indirect
|
||||
github.com/Azure/go-autorest/tracing v0.6.0 // indirect
|
||||
github.com/BurntSushi/toml v1.5.0 // indirect
|
||||
github.com/MakeNowJust/heredoc v1.0.0 // indirect
|
||||
github.com/Masterminds/goutils v1.1.1 // indirect
|
||||
github.com/Masterminds/semver/v3 v3.2.1 // indirect
|
||||
github.com/Masterminds/sprig/v3 v3.2.3 // indirect
|
||||
github.com/Masterminds/semver/v3 v3.4.0 // indirect
|
||||
github.com/Masterminds/sprig/v3 v3.3.0 // indirect
|
||||
github.com/Masterminds/squirrel v1.5.4 // indirect
|
||||
github.com/Microsoft/hcsshim v0.11.0 // indirect
|
||||
github.com/Microsoft/go-winio v0.6.2 // indirect
|
||||
github.com/STARRY-S/zip v0.2.3 // indirect
|
||||
github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d // indirect
|
||||
github.com/andybalholm/brotli v1.0.1 // indirect
|
||||
github.com/asaskevich/govalidator v0.0.0-20200428143746-21a406dcc535 // indirect
|
||||
github.com/ThalesIgnite/crypto11 v1.2.5 // indirect
|
||||
github.com/agnivade/levenshtein v1.2.1 // indirect
|
||||
github.com/alibabacloud-go/alibabacloud-gateway-spi v0.0.4 // indirect
|
||||
github.com/alibabacloud-go/cr-20160607 v1.0.1 // indirect
|
||||
github.com/alibabacloud-go/cr-20181201 v1.0.10 // indirect
|
||||
github.com/alibabacloud-go/darabonba-openapi v0.2.1 // indirect
|
||||
github.com/alibabacloud-go/debug v1.0.0 // indirect
|
||||
github.com/alibabacloud-go/endpoint-util v1.1.1 // indirect
|
||||
github.com/alibabacloud-go/openapi-util v0.1.0 // indirect
|
||||
github.com/alibabacloud-go/tea v1.2.1 // indirect
|
||||
github.com/alibabacloud-go/tea-utils v1.4.5 // indirect
|
||||
github.com/alibabacloud-go/tea-xml v1.1.3 // indirect
|
||||
github.com/aliyun/credentials-go v1.3.2 // indirect
|
||||
github.com/andybalholm/brotli v1.2.0 // indirect
|
||||
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect
|
||||
github.com/aws/aws-sdk-go-v2 v1.41.1 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/config v1.32.7 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.19.7 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.17 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.17 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.17 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/ecr v1.51.2 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/ecrpublic v1.38.2 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.4 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.17 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/signin v1.0.5 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.30.9 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.13 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.41.6 // indirect
|
||||
github.com/aws/smithy-go v1.24.0 // indirect
|
||||
github.com/awslabs/amazon-ecr-credential-helper/ecr-login v0.11.0 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/blang/semver v3.5.1+incompatible // indirect
|
||||
github.com/blang/semver/v4 v4.0.0 // indirect
|
||||
github.com/bodgit/plumbing v1.3.0 // indirect
|
||||
github.com/bodgit/sevenzip v1.6.1 // indirect
|
||||
github.com/bodgit/windows v1.0.1 // indirect
|
||||
github.com/bshuster-repo/logrus-logstash-hook v1.0.0 // indirect
|
||||
github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd // indirect
|
||||
github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b // indirect
|
||||
github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.2.0 // indirect
|
||||
github.com/buildkite/agent/v3 v3.115.4 // indirect
|
||||
github.com/buildkite/go-pipeline v0.16.0 // indirect
|
||||
github.com/buildkite/interpolate v0.1.5 // indirect
|
||||
github.com/buildkite/roko v1.4.0 // indirect
|
||||
github.com/cenkalti/backoff/v5 v5.0.3 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.3.0 // indirect
|
||||
github.com/chai2010/gettext-go v1.0.2 // indirect
|
||||
github.com/containerd/stargz-snapshotter/estargz v0.14.3 // indirect
|
||||
github.com/cyphar/filepath-securejoin v0.2.4 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/docker/cli v24.0.6+incompatible // indirect
|
||||
github.com/docker/distribution v2.8.2+incompatible // indirect
|
||||
github.com/docker/docker v24.0.6+incompatible // indirect
|
||||
github.com/docker/docker-credential-helpers v0.7.0 // indirect
|
||||
github.com/docker/go-connections v0.4.0 // indirect
|
||||
github.com/chrismellard/docker-credential-acr-env v0.0.0-20230304212654-82a0ddb27589 // indirect
|
||||
github.com/clbanning/mxj/v2 v2.7.0 // indirect
|
||||
github.com/clipperhouse/displaywidth v0.6.0 // indirect
|
||||
github.com/clipperhouse/stringish v0.1.1 // indirect
|
||||
github.com/clipperhouse/uax29/v2 v2.3.0 // indirect
|
||||
github.com/cockroachdb/apd/v3 v3.2.1 // indirect
|
||||
github.com/containerd/log v0.1.0 // indirect
|
||||
github.com/containerd/platforms v1.0.0-rc.2 // indirect
|
||||
github.com/containerd/stargz-snapshotter/estargz v0.18.1 // indirect
|
||||
github.com/coreos/go-oidc/v3 v3.17.0 // indirect
|
||||
github.com/cyberphone/json-canonicalization v0.0.0-20241213102144-19d51d7fe467 // indirect
|
||||
github.com/cyphar/filepath-securejoin v0.4.1 // indirect
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
|
||||
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0 // indirect
|
||||
github.com/digitorus/pkcs7 v0.0.0-20230818184609-3a137a874352 // indirect
|
||||
github.com/digitorus/timestamp v0.0.0-20231217203849-220c5c2851b7 // indirect
|
||||
github.com/dimchansky/utfbom v1.1.1 // indirect
|
||||
github.com/docker/cli v29.2.0+incompatible // indirect
|
||||
github.com/docker/distribution v2.8.3+incompatible // indirect
|
||||
github.com/docker/docker-credential-helpers v0.9.4 // indirect
|
||||
github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c // indirect
|
||||
github.com/docker/go-units v0.5.0 // indirect
|
||||
github.com/docker/go-metrics v0.0.1 // indirect
|
||||
github.com/docker/libtrust v0.0.0-20150114040149-fa567046d9b1 // indirect
|
||||
github.com/dsnet/compress v0.0.2-0.20210315054119-f66993602bf5 // indirect
|
||||
github.com/emicklei/go-restful/v3 v3.10.1 // indirect
|
||||
github.com/evanphx/json-patch v5.6.0+incompatible // indirect
|
||||
github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d // indirect
|
||||
github.com/fatih/color v1.13.0 // indirect
|
||||
github.com/felixge/httpsnoop v1.0.3 // indirect
|
||||
github.com/dsnet/compress v0.0.2-0.20230904184137-39efe44ab707 // indirect
|
||||
github.com/dustin/go-humanize v1.0.1 // indirect
|
||||
github.com/emicklei/go-restful/v3 v3.13.0 // indirect
|
||||
github.com/emicklei/proto v1.14.2 // indirect
|
||||
github.com/evanphx/json-patch v5.9.11+incompatible // indirect
|
||||
github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f // indirect
|
||||
github.com/fatih/color v1.18.0 // indirect
|
||||
github.com/felixge/httpsnoop v1.0.4 // indirect
|
||||
github.com/fsnotify/fsnotify v1.9.0 // indirect
|
||||
github.com/fxamacker/cbor/v2 v2.9.0 // indirect
|
||||
github.com/go-chi/chi/v5 v5.2.4 // indirect
|
||||
github.com/go-errors/errors v1.4.2 // indirect
|
||||
github.com/go-gorp/gorp/v3 v3.1.0 // indirect
|
||||
github.com/go-logr/logr v1.2.4 // indirect
|
||||
github.com/go-ini/ini v1.67.0 // indirect
|
||||
github.com/go-jose/go-jose/v4 v4.1.3 // indirect
|
||||
github.com/go-logr/logr v1.4.3 // indirect
|
||||
github.com/go-logr/stdr v1.2.2 // indirect
|
||||
github.com/go-openapi/jsonpointer v0.19.6 // indirect
|
||||
github.com/go-openapi/jsonreference v0.20.2 // indirect
|
||||
github.com/go-openapi/swag v0.22.3 // indirect
|
||||
github.com/go-openapi/analysis v0.24.1 // indirect
|
||||
github.com/go-openapi/errors v0.22.6 // indirect
|
||||
github.com/go-openapi/jsonpointer v0.22.4 // indirect
|
||||
github.com/go-openapi/jsonreference v0.21.4 // indirect
|
||||
github.com/go-openapi/loads v0.23.2 // indirect
|
||||
github.com/go-openapi/runtime v0.29.2 // indirect
|
||||
github.com/go-openapi/spec v0.22.3 // indirect
|
||||
github.com/go-openapi/strfmt v0.25.0 // indirect
|
||||
github.com/go-openapi/swag v0.25.4 // indirect
|
||||
github.com/go-openapi/swag/cmdutils v0.25.4 // indirect
|
||||
github.com/go-openapi/swag/conv v0.25.4 // indirect
|
||||
github.com/go-openapi/swag/fileutils v0.25.4 // indirect
|
||||
github.com/go-openapi/swag/jsonname v0.25.4 // indirect
|
||||
github.com/go-openapi/swag/jsonutils v0.25.4 // indirect
|
||||
github.com/go-openapi/swag/loading v0.25.4 // indirect
|
||||
github.com/go-openapi/swag/mangling v0.25.4 // indirect
|
||||
github.com/go-openapi/swag/netutils v0.25.4 // indirect
|
||||
github.com/go-openapi/swag/stringutils v0.25.4 // indirect
|
||||
github.com/go-openapi/swag/typeutils v0.25.4 // indirect
|
||||
github.com/go-openapi/swag/yamlutils v0.25.4 // indirect
|
||||
github.com/go-openapi/validate v0.25.1 // indirect
|
||||
github.com/go-piv/piv-go/v2 v2.4.0 // indirect
|
||||
github.com/go-viper/mapstructure/v2 v2.4.0 // indirect
|
||||
github.com/gobwas/glob v0.2.3 // indirect
|
||||
github.com/goccy/go-json v0.10.5 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang/protobuf v1.5.3 // indirect
|
||||
github.com/golang/snappy v0.0.2 // indirect
|
||||
github.com/golang-jwt/jwt/v4 v4.5.2 // indirect
|
||||
github.com/golang/snappy v0.0.4 // indirect
|
||||
github.com/gomodule/redigo v1.8.2 // indirect
|
||||
github.com/google/btree v1.0.1 // indirect
|
||||
github.com/google/gnostic-models v0.6.8 // indirect
|
||||
github.com/google/go-cmp v0.5.9 // indirect
|
||||
github.com/google/gofuzz v1.2.0 // indirect
|
||||
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect
|
||||
github.com/google/uuid v1.3.0 // indirect
|
||||
github.com/google/btree v1.1.3 // indirect
|
||||
github.com/google/certificate-transparency-go v1.3.2 // indirect
|
||||
github.com/google/gnostic-models v0.7.0 // indirect
|
||||
github.com/google/go-cmp v0.7.0 // indirect
|
||||
github.com/google/go-github/v73 v73.0.0 // indirect
|
||||
github.com/google/go-querystring v1.2.0 // indirect
|
||||
github.com/google/s2a-go v0.1.9 // indirect
|
||||
github.com/google/uuid v1.6.0 // indirect
|
||||
github.com/googleapis/enterprise-certificate-proxy v0.3.11 // indirect
|
||||
github.com/googleapis/gax-go/v2 v2.17.0 // indirect
|
||||
github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 // indirect
|
||||
github.com/gosuri/uitable v0.0.4 // indirect
|
||||
github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7 // indirect
|
||||
github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 // indirect
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.5 // indirect
|
||||
github.com/hashicorp/errwrap v1.1.0 // indirect
|
||||
github.com/hashicorp/go-cleanhttp v0.5.2 // indirect
|
||||
github.com/hashicorp/go-multierror v1.1.1 // indirect
|
||||
github.com/hashicorp/golang-lru v0.5.4 // indirect
|
||||
github.com/huandu/xstrings v1.4.0 // indirect
|
||||
github.com/imdario/mergo v0.3.13 // indirect
|
||||
github.com/hashicorp/go-retryablehttp v0.7.8 // indirect
|
||||
github.com/hashicorp/golang-lru v1.0.2 // indirect
|
||||
github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect
|
||||
github.com/huandu/xstrings v1.5.0 // indirect
|
||||
github.com/in-toto/attestation v1.1.2 // indirect
|
||||
github.com/in-toto/in-toto-golang v0.9.0 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||
github.com/jmoiron/sqlx v1.3.5 // indirect
|
||||
github.com/josharian/intern v1.0.0 // indirect
|
||||
github.com/jedisct1/go-minisign v0.0.0-20230811132847-661be99b8267 // indirect
|
||||
github.com/jmoiron/sqlx v1.4.0 // indirect
|
||||
github.com/json-iterator/go v1.1.12 // indirect
|
||||
github.com/klauspost/compress v1.16.5 // indirect
|
||||
github.com/klauspost/pgzip v1.2.5 // indirect
|
||||
github.com/klauspost/compress v1.18.2 // indirect
|
||||
github.com/klauspost/pgzip v1.2.6 // indirect
|
||||
github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 // indirect
|
||||
github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 // indirect
|
||||
github.com/lestrrat-go/blackmagic v1.0.4 // indirect
|
||||
github.com/lestrrat-go/dsig v1.0.0 // indirect
|
||||
github.com/lestrrat-go/dsig-secp256k1 v1.0.0 // indirect
|
||||
github.com/lestrrat-go/httpcc v1.0.1 // indirect
|
||||
github.com/lestrrat-go/httprc/v3 v3.0.1 // indirect
|
||||
github.com/lestrrat-go/jwx/v3 v3.0.12 // indirect
|
||||
github.com/lestrrat-go/option v1.0.1 // indirect
|
||||
github.com/lestrrat-go/option/v2 v2.0.0 // indirect
|
||||
github.com/letsencrypt/boulder v0.20251110.0 // indirect
|
||||
github.com/lib/pq v1.10.9 // indirect
|
||||
github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect
|
||||
github.com/mailru/easyjson v0.7.7 // indirect
|
||||
github.com/mattn/go-colorable v0.1.13 // indirect
|
||||
github.com/mattn/go-isatty v0.0.19 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.9 // indirect
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
|
||||
github.com/mattn/go-colorable v0.1.14 // indirect
|
||||
github.com/mattn/go-isatty v0.0.20 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.19 // indirect
|
||||
github.com/miekg/pkcs11 v1.1.2 // indirect
|
||||
github.com/mikelolasagasti/xz v1.0.1 // indirect
|
||||
github.com/minio/minlz v1.0.1 // indirect
|
||||
github.com/mitchellh/copystructure v1.2.0 // indirect
|
||||
github.com/mitchellh/go-wordwrap v1.0.1 // indirect
|
||||
github.com/mitchellh/reflectwalk v1.0.2 // indirect
|
||||
github.com/moby/locker v1.0.1 // indirect
|
||||
github.com/moby/spdystream v0.2.0 // indirect
|
||||
github.com/moby/term v0.5.0 // indirect
|
||||
github.com/moby/spdystream v0.5.0 // indirect
|
||||
github.com/moby/term v0.5.2 // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||
github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee // indirect
|
||||
github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 // indirect
|
||||
github.com/morikuni/aec v1.0.0 // indirect
|
||||
github.com/mozillazg/docker-credential-acr-helper v0.4.0 // indirect
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
||||
github.com/nwaples/rardecode v1.1.0 // indirect
|
||||
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect
|
||||
github.com/nozzle/throttler v0.0.0-20180817012639-2ea982251481 // indirect
|
||||
github.com/nwaples/rardecode/v2 v2.2.1 // indirect
|
||||
github.com/oklog/ulid v1.3.1 // indirect
|
||||
github.com/oleiade/reflections v1.1.0 // indirect
|
||||
github.com/olekukonko/cat v0.0.0-20250911104152-50322a0618f6 // indirect
|
||||
github.com/olekukonko/errors v1.1.0 // indirect
|
||||
github.com/olekukonko/ll v0.1.3 // indirect
|
||||
github.com/open-policy-agent/opa v1.12.3 // indirect
|
||||
github.com/pborman/uuid v1.2.1 // indirect
|
||||
github.com/pelletier/go-toml/v2 v2.2.4 // indirect
|
||||
github.com/peterbourgon/diskv v2.0.1+incompatible // indirect
|
||||
github.com/pierrec/lz4/v4 v4.1.2 // indirect
|
||||
github.com/prometheus/client_golang v1.16.0 // indirect
|
||||
github.com/prometheus/client_model v0.4.0 // indirect
|
||||
github.com/prometheus/common v0.44.0 // indirect
|
||||
github.com/prometheus/procfs v0.10.1 // indirect
|
||||
github.com/rubenv/sql-migrate v1.5.2 // indirect
|
||||
github.com/pierrec/lz4/v4 v4.1.22 // indirect
|
||||
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
|
||||
github.com/prometheus/client_golang v1.23.2 // indirect
|
||||
github.com/prometheus/client_model v0.6.2 // indirect
|
||||
github.com/prometheus/common v0.67.5 // indirect
|
||||
github.com/prometheus/procfs v0.19.2 // indirect
|
||||
github.com/protocolbuffers/txtpbfmt v0.0.0-20251016062345-16587c79cd91 // indirect
|
||||
github.com/rcrowley/go-metrics v0.0.0-20250401214520-65e299d6c5c9 // indirect
|
||||
github.com/rogpeppe/go-internal v1.14.1 // indirect
|
||||
github.com/rubenv/sql-migrate v1.8.0 // indirect
|
||||
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
||||
github.com/shopspring/decimal v1.3.1 // indirect
|
||||
github.com/spf13/cast v1.5.0 // indirect
|
||||
github.com/spf13/pflag v1.0.5 // indirect
|
||||
github.com/ulikunitz/xz v0.5.9 // indirect
|
||||
github.com/vbatts/tar-split v0.11.3 // indirect
|
||||
github.com/sagikazarmark/locafero v0.11.0 // indirect
|
||||
github.com/santhosh-tekuri/jsonschema/v6 v6.0.2 // indirect
|
||||
github.com/sassoftware/relic v7.2.1+incompatible // indirect
|
||||
github.com/secure-systems-lab/go-securesystemslib v0.10.0 // indirect
|
||||
github.com/segmentio/asm v1.2.1 // indirect
|
||||
github.com/shibumi/go-pathspec v1.3.0 // indirect
|
||||
github.com/shopspring/decimal v1.4.0 // indirect
|
||||
github.com/sigstore/fulcio v1.8.5 // indirect
|
||||
github.com/sigstore/protobuf-specs v0.5.0 // indirect
|
||||
github.com/sigstore/rekor v1.5.0 // indirect
|
||||
github.com/sigstore/rekor-tiles/v2 v2.2.0 // indirect
|
||||
github.com/sigstore/sigstore v1.10.4 // indirect
|
||||
github.com/sigstore/sigstore-go v1.1.4 // indirect
|
||||
github.com/sigstore/timestamp-authority/v2 v2.0.4 // indirect
|
||||
github.com/sorairolake/lzip-go v0.3.8 // indirect
|
||||
github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8 // indirect
|
||||
github.com/spf13/cast v1.10.0 // indirect
|
||||
github.com/spf13/pflag v1.0.10 // indirect
|
||||
github.com/spf13/viper v1.21.0 // indirect
|
||||
github.com/spiffe/go-spiffe/v2 v2.6.0 // indirect
|
||||
github.com/subosito/gotenv v1.6.0 // indirect
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d // indirect
|
||||
github.com/tchap/go-patricia/v2 v2.3.3 // indirect
|
||||
github.com/thales-e-security/pool v0.0.2 // indirect
|
||||
github.com/theupdateframework/go-tuf v0.7.0 // indirect
|
||||
github.com/theupdateframework/go-tuf/v2 v2.4.1 // indirect
|
||||
github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 // indirect
|
||||
github.com/tjfoc/gmsm v1.4.1 // indirect
|
||||
github.com/transparency-dev/formats v0.0.0-20251017110053-404c0d5b696c // indirect
|
||||
github.com/transparency-dev/merkle v0.0.2 // indirect
|
||||
github.com/ulikunitz/xz v0.5.15 // indirect
|
||||
github.com/valyala/fastjson v1.6.4 // indirect
|
||||
github.com/vbatts/tar-split v0.12.2 // indirect
|
||||
github.com/vektah/gqlparser/v2 v2.5.31 // indirect
|
||||
github.com/x448/float16 v0.8.4 // indirect
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect
|
||||
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
|
||||
github.com/xeipuuv/gojsonschema v1.2.0 // indirect
|
||||
github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8 // indirect
|
||||
github.com/xlab/treeprint v1.2.0 // indirect
|
||||
github.com/yashtewari/glob-intersection v0.2.0 // indirect
|
||||
github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43 // indirect
|
||||
github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50 // indirect
|
||||
github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f // indirect
|
||||
go.opentelemetry.io/otel v1.16.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.16.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.16.0 // indirect
|
||||
go.starlark.net v0.0.0-20230525235612-a134d8f9ddca // indirect
|
||||
golang.org/x/crypto v0.14.0 // indirect
|
||||
golang.org/x/net v0.17.0 // indirect
|
||||
golang.org/x/oauth2 v0.8.0 // indirect
|
||||
golang.org/x/sys v0.13.0 // indirect
|
||||
golang.org/x/term v0.13.0 // indirect
|
||||
golang.org/x/text v0.13.0 // indirect
|
||||
golang.org/x/time v0.3.0 // indirect
|
||||
google.golang.org/appengine v1.6.7 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19 // indirect
|
||||
google.golang.org/grpc v1.54.0 // indirect
|
||||
google.golang.org/protobuf v1.30.0 // indirect
|
||||
gitlab.com/gitlab-org/api/client-go v1.25.0 // indirect
|
||||
go.mongodb.org/mongo-driver v1.17.6 // indirect
|
||||
go.opentelemetry.io/auto/sdk v1.2.1 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0 // indirect
|
||||
go.opentelemetry.io/otel v1.40.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.40.0 // indirect
|
||||
go.opentelemetry.io/otel/sdk v1.40.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.40.0 // indirect
|
||||
go.uber.org/multierr v1.11.0 // indirect
|
||||
go.uber.org/zap v1.27.1 // indirect
|
||||
go.yaml.in/yaml/v2 v2.4.3 // indirect
|
||||
go.yaml.in/yaml/v3 v3.0.4 // indirect
|
||||
go4.org v0.0.0-20230225012048-214862532bf5 // indirect
|
||||
golang.org/x/crypto v0.47.0 // indirect
|
||||
golang.org/x/mod v0.32.0 // indirect
|
||||
golang.org/x/net v0.49.0 // indirect
|
||||
golang.org/x/oauth2 v0.35.0 // indirect
|
||||
golang.org/x/sys v0.40.0 // indirect
|
||||
golang.org/x/term v0.39.0 // indirect
|
||||
golang.org/x/text v0.33.0 // indirect
|
||||
golang.org/x/time v0.14.0 // indirect
|
||||
google.golang.org/api v0.267.0 // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20260128011058-8636f8732409 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20260203192932-546029d2fa20 // indirect
|
||||
google.golang.org/grpc v1.78.0 // indirect
|
||||
google.golang.org/protobuf v1.36.11 // indirect
|
||||
gopkg.in/evanphx/json-patch.v4 v4.13.0 // indirect
|
||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||
gopkg.in/ini.v1 v1.67.1 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
k8s.io/api v0.28.2 // indirect
|
||||
k8s.io/apiextensions-apiserver v0.28.2 // indirect
|
||||
k8s.io/apiserver v0.28.2 // indirect
|
||||
k8s.io/cli-runtime v0.28.2 // indirect
|
||||
k8s.io/component-base v0.28.2 // indirect
|
||||
k8s.io/klog/v2 v2.100.1 // indirect
|
||||
k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 // indirect
|
||||
k8s.io/kubectl v0.28.2 // indirect
|
||||
k8s.io/utils v0.0.0-20230406110748-d93618cff8a2 // indirect
|
||||
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect
|
||||
sigs.k8s.io/kustomize/api v0.13.5-0.20230601165947-6ce0bf390ce3 // indirect
|
||||
sigs.k8s.io/kustomize/kyaml v0.14.3-0.20230601165947-6ce0bf390ce3 // indirect
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect
|
||||
sigs.k8s.io/yaml v1.3.0 // indirect
|
||||
k8s.io/api v0.35.1 // indirect
|
||||
k8s.io/apiextensions-apiserver v0.34.0 // indirect
|
||||
k8s.io/apiserver v0.34.0 // indirect
|
||||
k8s.io/cli-runtime v0.34.0 // indirect
|
||||
k8s.io/client-go v0.35.1 // indirect
|
||||
k8s.io/component-base v0.34.0 // indirect
|
||||
k8s.io/klog/v2 v2.130.1 // indirect
|
||||
k8s.io/kube-openapi v0.0.0-20250910181357-589584f1c912 // indirect
|
||||
k8s.io/kubectl v0.34.0 // indirect
|
||||
k8s.io/utils v0.0.0-20251002143259-bc988d571ff4 // indirect
|
||||
oras.land/oras-go/v2 v2.6.0 // indirect
|
||||
sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730 // indirect
|
||||
sigs.k8s.io/kustomize/api v0.20.1 // indirect
|
||||
sigs.k8s.io/kustomize/kyaml v0.20.1 // indirect
|
||||
sigs.k8s.io/randfill v1.0.0 // indirect
|
||||
sigs.k8s.io/release-utils v0.12.3 // indirect
|
||||
sigs.k8s.io/structured-merge-diff/v6 v6.3.0 // indirect
|
||||
sigs.k8s.io/yaml v1.6.0 // indirect
|
||||
)
|
||||
|
||||
223
install.sh
Executable file
223
install.sh
Executable file
@@ -0,0 +1,223 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Usage:
|
||||
# - curl -sfL... | ENV_VAR=... bash
|
||||
# - ENV_VAR=... ./install.sh
|
||||
#
|
||||
# Install Usage:
|
||||
# Install Latest Release
|
||||
# - curl -sfL https://get.hauler.dev | bash
|
||||
# - ./install.sh
|
||||
#
|
||||
# Install Specific Release
|
||||
# - curl -sfL https://get.hauler.dev | HAULER_VERSION=1.0.0 bash
|
||||
# - HAULER_VERSION=1.0.0 ./install.sh
|
||||
#
|
||||
# Set Install Directory
|
||||
# - curl -sfL https://get.hauler.dev | HAULER_INSTALL_DIR=/usr/local/bin bash
|
||||
# - HAULER_INSTALL_DIR=/usr/local/bin ./install.sh
|
||||
#
|
||||
# Set Hauler Directory
|
||||
# - curl -sfL https://get.hauler.dev | HAULER_DIR=$HOME/.hauler bash
|
||||
# - HAULER_DIR=$HOME/.hauler ./install.sh
|
||||
#
|
||||
# Debug Usage:
|
||||
# - curl -sfL https://get.hauler.dev | HAULER_DEBUG=true bash
|
||||
# - HAULER_DEBUG=true ./install.sh
|
||||
#
|
||||
# Uninstall Usage:
|
||||
# - curl -sfL https://get.hauler.dev | HAULER_UNINSTALL=true bash
|
||||
# - HAULER_UNINSTALL=true ./install.sh
|
||||
#
|
||||
# Documentation:
|
||||
# - https://hauler.dev
|
||||
# - https://github.com/hauler-dev/hauler
|
||||
|
||||
# set functions for logging
|
||||
function verbose {
|
||||
echo "$1"
|
||||
}
|
||||
|
||||
function info {
|
||||
echo && echo "[INFO] Hauler: $1"
|
||||
}
|
||||
|
||||
function warn {
|
||||
echo && echo "[WARN] Hauler: $1"
|
||||
}
|
||||
|
||||
function fatal {
|
||||
echo && echo "[ERROR] Hauler: $1"
|
||||
exit 1
|
||||
}
|
||||
|
||||
# debug hauler from argument or environment variable
|
||||
if [ "${HAULER_DEBUG}" = "true" ]; then
|
||||
set -x
|
||||
fi
|
||||
|
||||
# start hauler preflight checks
|
||||
info "Starting Preflight Checks..."
|
||||
|
||||
# check for required packages and dependencies
|
||||
for cmd in echo curl grep sed rm mkdir awk openssl tar install source; do
|
||||
if ! command -v "$cmd" &> /dev/null; then
|
||||
fatal "$cmd is required to install Hauler"
|
||||
fi
|
||||
done
|
||||
|
||||
# set install directory from argument or environment variable
|
||||
HAULER_INSTALL_DIR=${HAULER_INSTALL_DIR:-/usr/local/bin}
|
||||
|
||||
# ensure install directory exists and/or create it
|
||||
if [ ! -d "${HAULER_INSTALL_DIR}" ]; then
|
||||
mkdir -p "${HAULER_INSTALL_DIR}" || fatal "Failed to Create Install Directory: ${HAULER_INSTALL_DIR}"
|
||||
fi
|
||||
|
||||
# ensure install directory is writable (by user or root privileges)
|
||||
if [ ! -w "${HAULER_INSTALL_DIR}" ]; then
|
||||
if [ "$(id -u)" -ne 0 ]; then
|
||||
fatal "Root privileges are required to install Hauler to Directory: ${HAULER_INSTALL_DIR}"
|
||||
fi
|
||||
fi
|
||||
|
||||
# uninstall hauler from argument or environment variable
|
||||
if [ "${HAULER_UNINSTALL}" = "true" ]; then
|
||||
# remove the hauler binary
|
||||
rm -rf "${HAULER_INSTALL_DIR}/hauler" || fatal "Failed to Remove Hauler from ${HAULER_INSTALL_DIR}"
|
||||
|
||||
# remove the hauler directory
|
||||
rm -rf "${HAULER_DIR}" || fatal "Failed to Remove Hauler Directory: ${HAULER_DIR}"
|
||||
|
||||
info "Successfully Uninstalled Hauler" && echo
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# set version environment variable
|
||||
if [ -z "${HAULER_VERSION}" ]; then
|
||||
# attempt to retrieve the latest version from GitHub
|
||||
HAULER_VERSION=$(curl -sI https://github.com/hauler-dev/hauler/releases/latest | grep -i location | sed -e 's#.*tag/v##' -e 's/^[[:space:]]*//g' -e 's/[[:space:]]*$//g')
|
||||
|
||||
# exit if the version could not be detected
|
||||
if [ -z "${HAULER_VERSION}" ]; then
|
||||
fatal "HAULER_VERSION is unable to be detected and/or retrieved from GitHub. Please set: HAULER_VERSION"
|
||||
fi
|
||||
fi
|
||||
|
||||
# detect the operating system
|
||||
PLATFORM=$(uname -s | tr '[:upper:]' '[:lower:]')
|
||||
case $PLATFORM in
|
||||
linux)
|
||||
PLATFORM="linux"
|
||||
;;
|
||||
darwin)
|
||||
PLATFORM="darwin"
|
||||
;;
|
||||
*)
|
||||
fatal "Unsupported Platform: ${PLATFORM}"
|
||||
;;
|
||||
esac
|
||||
|
||||
# detect the architecture
|
||||
ARCH=$(uname -m)
|
||||
case $ARCH in
|
||||
x86_64 | x86-32 | x64 | x32 | amd64)
|
||||
ARCH="amd64"
|
||||
;;
|
||||
aarch64 | arm64)
|
||||
ARCH="arm64"
|
||||
;;
|
||||
*)
|
||||
fatal "Unsupported Architecture: ${ARCH}"
|
||||
;;
|
||||
esac
|
||||
|
||||
# set hauler directory from argument or environment variable
|
||||
HAULER_DIR=${HAULER_DIR:-$HOME/.hauler}
|
||||
|
||||
# start hauler installation
|
||||
info "Starting Installation..."
|
||||
|
||||
# display the version, platform, and architecture
|
||||
verbose "- Version: v${HAULER_VERSION}"
|
||||
verbose "- Platform: ${PLATFORM}"
|
||||
verbose "- Architecture: ${ARCH}"
|
||||
verbose "- Install Directory: ${HAULER_INSTALL_DIR}"
|
||||
verbose "- Hauler Directory: ${HAULER_DIR}"
|
||||
|
||||
# ensure hauler directory exists and/or create it
|
||||
if [ ! -d "${HAULER_DIR}" ]; then
|
||||
mkdir -p "${HAULER_DIR}" || fatal "Failed to Create Hauler Directory: ${HAULER_DIR}"
|
||||
fi
|
||||
|
||||
# ensure hauler directory is writable (by user or root privileges)
|
||||
chmod -R 777 "${HAULER_DIR}" || fatal "Failed to Update Permissions of Hauler Directory: ${HAULER_DIR}"
|
||||
|
||||
# change to hauler directory
|
||||
cd "${HAULER_DIR}" || fatal "Failed to Change Directory to Hauler Directory: ${HAULER_DIR}"
|
||||
|
||||
# start hauler artifacts download
|
||||
info "Starting Download..."
|
||||
|
||||
# download the checksum file
|
||||
if ! curl -sfOL "https://github.com/hauler-dev/hauler/releases/download/v${HAULER_VERSION}/hauler_${HAULER_VERSION}_checksums.txt"; then
|
||||
fatal "Failed to Download: hauler_${HAULER_VERSION}_checksums.txt"
|
||||
fi
|
||||
|
||||
# download the archive file
|
||||
if ! curl -sfOL "https://github.com/hauler-dev/hauler/releases/download/v${HAULER_VERSION}/hauler_${HAULER_VERSION}_${PLATFORM}_${ARCH}.tar.gz"; then
|
||||
fatal "Failed to Download: hauler_${HAULER_VERSION}_${PLATFORM}_${ARCH}.tar.gz"
|
||||
fi
|
||||
|
||||
# start hauler checksum verification
|
||||
info "Starting Checksum Verification..."
|
||||
|
||||
# verify the Hauler checksum
|
||||
EXPECTED_CHECKSUM=$(awk -v HAULER_VERSION="${HAULER_VERSION}" -v PLATFORM="${PLATFORM}" -v ARCH="${ARCH}" '$2 == "hauler_"HAULER_VERSION"_"PLATFORM"_"ARCH".tar.gz" {print $1}' "hauler_${HAULER_VERSION}_checksums.txt")
|
||||
DETERMINED_CHECKSUM=$(openssl dgst -sha256 "hauler_${HAULER_VERSION}_${PLATFORM}_${ARCH}.tar.gz" | awk '{print $2}')
|
||||
|
||||
if [ -z "${EXPECTED_CHECKSUM}" ]; then
|
||||
fatal "Failed to Locate Checksum: hauler_${HAULER_VERSION}_${PLATFORM}_${ARCH}.tar.gz"
|
||||
elif [ "${DETERMINED_CHECKSUM}" = "${EXPECTED_CHECKSUM}" ]; then
|
||||
verbose "- Expected Checksum: ${EXPECTED_CHECKSUM}"
|
||||
verbose "- Determined Checksum: ${DETERMINED_CHECKSUM}"
|
||||
verbose "- Successfully Verified Checksum: hauler_${HAULER_VERSION}_${PLATFORM}_${ARCH}.tar.gz"
|
||||
else
|
||||
verbose "- Expected: ${EXPECTED_CHECKSUM}"
|
||||
verbose "- Determined: ${DETERMINED_CHECKSUM}"
|
||||
fatal "Failed Checksum Verification: hauler_${HAULER_VERSION}_${PLATFORM}_${ARCH}.tar.gz"
|
||||
fi
|
||||
|
||||
# uncompress the hauler archive
|
||||
tar -xzf "hauler_${HAULER_VERSION}_${PLATFORM}_${ARCH}.tar.gz" || fatal "Failed to Extract: hauler_${HAULER_VERSION}_${PLATFORM}_${ARCH}.tar.gz"
|
||||
|
||||
# install the hauler binary
|
||||
install -m 755 hauler "${HAULER_INSTALL_DIR}" || fatal "Failed to Install Hauler: ${HAULER_INSTALL_DIR}"
|
||||
|
||||
# add hauler to the path
|
||||
if [[ ":$PATH:" != *":${HAULER_INSTALL_DIR}:"* ]]; then
|
||||
if [ -f "$HOME/.bashrc" ]; then
|
||||
echo "export PATH=\$PATH:${HAULER_INSTALL_DIR}" >> "$HOME/.bashrc"
|
||||
source "$HOME/.bashrc"
|
||||
elif [ -f "$HOME/.bash_profile" ]; then
|
||||
echo "export PATH=\$PATH:${HAULER_INSTALL_DIR}" >> "$HOME/.bash_profile"
|
||||
source "$HOME/.bash_profile"
|
||||
elif [ -f "$HOME/.zshrc" ]; then
|
||||
echo "export PATH=\$PATH:${HAULER_INSTALL_DIR}" >> "$HOME/.zshrc"
|
||||
source "$HOME/.zshrc"
|
||||
elif [ -f "$HOME/.profile" ]; then
|
||||
echo "export PATH=\$PATH:${HAULER_INSTALL_DIR}" >> "$HOME/.profile"
|
||||
source "$HOME/.profile"
|
||||
else
|
||||
warn "Failed to add ${HAULER_INSTALL_DIR} to PATH: Unsupported Shell"
|
||||
fi
|
||||
fi
|
||||
|
||||
# display success message
|
||||
info "Successfully Installed Hauler at ${HAULER_INSTALL_DIR}/hauler"
|
||||
|
||||
# display availability message
|
||||
info "Hauler v${HAULER_VERSION} is now available for use!"
|
||||
|
||||
# display hauler docs message
|
||||
verbose "- Documentation: https://hauler.dev" && echo
|
||||
81
internal/flags/add.go
Normal file
81
internal/flags/add.go
Normal file
@@ -0,0 +1,81 @@
|
||||
package flags
|
||||
|
||||
import (
|
||||
"github.com/spf13/cobra"
|
||||
"helm.sh/helm/v3/pkg/action"
|
||||
)
|
||||
|
||||
type AddImageOpts struct {
|
||||
*StoreRootOpts
|
||||
Name string
|
||||
Key string
|
||||
CertOidcIssuer string
|
||||
CertOidcIssuerRegexp string
|
||||
CertIdentity string
|
||||
CertIdentityRegexp string
|
||||
CertGithubWorkflowRepository string
|
||||
Tlog bool
|
||||
Platform string
|
||||
Rewrite string
|
||||
}
|
||||
|
||||
func (o *AddImageOpts) AddFlags(cmd *cobra.Command) {
|
||||
f := cmd.Flags()
|
||||
f.StringVarP(&o.Key, "key", "k", "", "(Optional) Location of public key to use for signature verification")
|
||||
f.StringVar(&o.CertIdentity, "certificate-identity", "", "(Optional) Cosign certificate-identity (either --certificate-identity or --certificate-identity-regexp required for keyless verification)")
|
||||
f.StringVar(&o.CertIdentityRegexp, "certificate-identity-regexp", "", "(Optional) Cosign certificate-identity-regexp (either --certificate-identity or --certificate-identity-regexp required for keyless verification)")
|
||||
f.StringVar(&o.CertOidcIssuer, "certificate-oidc-issuer", "", "(Optional) Cosign option to validate oidc issuer")
|
||||
f.StringVar(&o.CertOidcIssuerRegexp, "certificate-oidc-issuer-regexp", "", "(Optional) Cosign option to validate oidc issuer with regex")
|
||||
f.StringVar(&o.CertGithubWorkflowRepository, "certificate-github-workflow-repository", "", "(Optional) Cosign certificate-github-workflow-repository option")
|
||||
f.BoolVar(&o.Tlog, "use-tlog-verify", false, "(Optional) Enable transparency log verification for key-based signature verification (keyless/OIDC verification always uses the tlog)")
|
||||
f.StringVarP(&o.Platform, "platform", "p", "", "(Optional) Specify the platform of the image... i.e. linux/amd64 (defaults to all)")
|
||||
f.StringVar(&o.Rewrite, "rewrite", "", "(EXPERIMENTAL & Optional) Rewrite artifact path to specified string")
|
||||
}
|
||||
|
||||
type AddFileOpts struct {
|
||||
*StoreRootOpts
|
||||
Name string
|
||||
}
|
||||
|
||||
func (o *AddFileOpts) AddFlags(cmd *cobra.Command) {
|
||||
f := cmd.Flags()
|
||||
f.StringVarP(&o.Name, "name", "n", "", "(Optional) Rewrite the name of the file")
|
||||
}
|
||||
|
||||
type AddChartOpts struct {
|
||||
*StoreRootOpts
|
||||
|
||||
ChartOpts *action.ChartPathOptions
|
||||
Rewrite string
|
||||
AddDependencies bool
|
||||
AddImages bool
|
||||
HelmValues string
|
||||
Platform string
|
||||
Registry string
|
||||
KubeVersion string
|
||||
}
|
||||
|
||||
func (o *AddChartOpts) AddFlags(cmd *cobra.Command) {
|
||||
f := cmd.Flags()
|
||||
|
||||
f.StringVar(&o.ChartOpts.RepoURL, "repo", "", "Location of the chart (https:// | http:// | oci://)")
|
||||
f.StringVar(&o.ChartOpts.Version, "version", "", "(Optional) Specify the version of the chart (v1.0.0 | 2.0.0 | ^2.0.0)")
|
||||
f.BoolVar(&o.ChartOpts.Verify, "verify", false, "(Optional) Verify the chart before fetching it")
|
||||
f.StringVar(&o.ChartOpts.Username, "username", "", "(Optional) Username to use for authentication")
|
||||
f.StringVar(&o.ChartOpts.Password, "password", "", "(Optional) Password to use for authentication")
|
||||
f.StringVar(&o.ChartOpts.CertFile, "cert-file", "", "(Optional) Location of the TLS Certificate to use for client authentication")
|
||||
f.StringVar(&o.ChartOpts.KeyFile, "key-file", "", "(Optional) Location of the TLS Key to use for client authentication")
|
||||
f.BoolVar(&o.ChartOpts.InsecureSkipTLSverify, "insecure-skip-tls-verify", false, "(Optional) Skip TLS certificate verification")
|
||||
f.StringVar(&o.ChartOpts.CaFile, "ca-file", "", "(Optional) Location of CA Bundle to enable certification verification")
|
||||
f.StringVar(&o.Rewrite, "rewrite", "", "(EXPERIMENTAL & Optional) Rewrite artifact path to specified string")
|
||||
|
||||
cmd.MarkFlagsRequiredTogether("username", "password")
|
||||
cmd.MarkFlagsRequiredTogether("cert-file", "key-file", "ca-file")
|
||||
|
||||
cmd.Flags().BoolVar(&o.AddDependencies, "add-dependencies", false, "(EXPERIMENTAL & Optional) Fetch dependent helm charts")
|
||||
f.BoolVar(&o.AddImages, "add-images", false, "(EXPERIMENTAL & Optional) Fetch images referenced in helm charts")
|
||||
f.StringVar(&o.HelmValues, "values", "", "(EXPERIMENTAL & Optional) Specify helm chart values when fetching images")
|
||||
f.StringVarP(&o.Platform, "platform", "p", "", "(Optional) Specify the platform of the image, e.g. linux/amd64")
|
||||
f.StringVarP(&o.Registry, "registry", "g", "", "(Optional) Specify the registry of the image for images that do not alredy define one")
|
||||
f.StringVar(&o.KubeVersion, "kube-version", "v1.34.1", "(EXPERIMENTAL & Optional) Override the kubernetes version for helm template rendering")
|
||||
}
|
||||
17
internal/flags/cli.go
Normal file
17
internal/flags/cli.go
Normal file
@@ -0,0 +1,17 @@
|
||||
package flags
|
||||
|
||||
import "github.com/spf13/cobra"
|
||||
|
||||
type CliRootOpts struct {
|
||||
LogLevel string
|
||||
HaulerDir string
|
||||
IgnoreErrors bool
|
||||
}
|
||||
|
||||
func AddRootFlags(cmd *cobra.Command, ro *CliRootOpts) {
|
||||
pf := cmd.PersistentFlags()
|
||||
|
||||
pf.StringVarP(&ro.LogLevel, "log-level", "l", "info", "Set the logging level (i.e. info, debug, warn)")
|
||||
pf.StringVarP(&ro.HaulerDir, "haulerdir", "d", "", "Set the location of the hauler directory (default $HOME/.hauler)")
|
||||
pf.BoolVar(&ro.IgnoreErrors, "ignore-errors", false, "Ignore/Bypass errors (i.e. warn on error) (defaults false)")
|
||||
}
|
||||
38
internal/flags/copy.go
Normal file
38
internal/flags/copy.go
Normal file
@@ -0,0 +1,38 @@
|
||||
package flags
|
||||
|
||||
import "github.com/spf13/cobra"
|
||||
|
||||
type CopyOpts struct {
|
||||
*StoreRootOpts
|
||||
|
||||
Username string
|
||||
Password string
|
||||
Insecure bool
|
||||
PlainHTTP bool
|
||||
Only string
|
||||
}
|
||||
|
||||
func (o *CopyOpts) AddFlags(cmd *cobra.Command) {
|
||||
f := cmd.Flags()
|
||||
|
||||
f.StringVarP(&o.Username, "username", "u", "", "(Deprecated) Please use 'hauler login'")
|
||||
f.StringVarP(&o.Password, "password", "p", "", "(Deprecated) Please use 'hauler login'")
|
||||
f.BoolVar(&o.Insecure, "insecure", false, "(Optional) Allow insecure connections")
|
||||
f.BoolVar(&o.PlainHTTP, "plain-http", false, "(Optional) Allow plain HTTP connections")
|
||||
f.StringVarP(&o.Only, "only", "o", "", "(Optional) Custom string array to only copy specific 'image' items")
|
||||
|
||||
cmd.MarkFlagsRequiredTogether("username", "password")
|
||||
|
||||
if err := f.MarkDeprecated("username", "please use 'hauler login'"); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
if err := f.MarkDeprecated("password", "please use 'hauler login'"); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
if err := f.MarkHidden("username"); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
if err := f.MarkHidden("password"); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
14
internal/flags/extract.go
Normal file
14
internal/flags/extract.go
Normal file
@@ -0,0 +1,14 @@
|
||||
package flags
|
||||
|
||||
import "github.com/spf13/cobra"
|
||||
|
||||
type ExtractOpts struct {
|
||||
*StoreRootOpts
|
||||
DestinationDir string
|
||||
}
|
||||
|
||||
func (o *ExtractOpts) AddFlags(cmd *cobra.Command) {
|
||||
f := cmd.Flags()
|
||||
|
||||
f.StringVarP(&o.DestinationDir, "output", "o", "", "(Optional) Set the directory to output (defaults to current directory)")
|
||||
}
|
||||
22
internal/flags/info.go
Normal file
22
internal/flags/info.go
Normal file
@@ -0,0 +1,22 @@
|
||||
package flags
|
||||
|
||||
import "github.com/spf13/cobra"
|
||||
|
||||
type InfoOpts struct {
|
||||
*StoreRootOpts
|
||||
|
||||
OutputFormat string
|
||||
TypeFilter string
|
||||
SizeUnit string
|
||||
ListRepos bool
|
||||
ShowDigests bool
|
||||
}
|
||||
|
||||
func (o *InfoOpts) AddFlags(cmd *cobra.Command) {
|
||||
f := cmd.Flags()
|
||||
|
||||
f.StringVarP(&o.OutputFormat, "output", "o", "table", "(Optional) Specify the output format (table | json)")
|
||||
f.StringVar(&o.TypeFilter, "type", "all", "(Optional) Filter on content type (image | chart | file | sigs | atts | sbom | referrer)")
|
||||
f.BoolVar(&o.ListRepos, "list-repos", false, "(Optional) List all repository names")
|
||||
f.BoolVar(&o.ShowDigests, "digests", false, "(Optional) Show digests of each artifact in the output table")
|
||||
}
|
||||
17
internal/flags/load.go
Normal file
17
internal/flags/load.go
Normal file
@@ -0,0 +1,17 @@
|
||||
package flags
|
||||
|
||||
import (
|
||||
"github.com/spf13/cobra"
|
||||
"hauler.dev/go/hauler/pkg/consts"
|
||||
)
|
||||
|
||||
type LoadOpts struct {
|
||||
*StoreRootOpts
|
||||
FileName []string
|
||||
}
|
||||
|
||||
func (o *LoadOpts) AddFlags(cmd *cobra.Command) {
|
||||
f := cmd.Flags()
|
||||
|
||||
f.StringSliceVarP(&o.FileName, "filename", "f", []string{consts.DefaultHaulerArchiveName}, "(Optional) Specify the name of inputted haul(s)")
|
||||
}
|
||||
11
internal/flags/remove.go
Normal file
11
internal/flags/remove.go
Normal file
@@ -0,0 +1,11 @@
|
||||
package flags
|
||||
|
||||
import "github.com/spf13/cobra"
|
||||
|
||||
type RemoveOpts struct {
|
||||
Force bool // skip remove confirmation
|
||||
}
|
||||
|
||||
func (o *RemoveOpts) AddFlags(cmd *cobra.Command) {
|
||||
cmd.Flags().BoolVarP(&o.Force, "force", "f", false, "(Optional) Remove artifact(s) without confirmation")
|
||||
}
|
||||
22
internal/flags/save.go
Normal file
22
internal/flags/save.go
Normal file
@@ -0,0 +1,22 @@
|
||||
package flags
|
||||
|
||||
import (
|
||||
"github.com/spf13/cobra"
|
||||
"hauler.dev/go/hauler/pkg/consts"
|
||||
)
|
||||
|
||||
type SaveOpts struct {
|
||||
*StoreRootOpts
|
||||
FileName string
|
||||
Platform string
|
||||
ContainerdCompatibility bool
|
||||
}
|
||||
|
||||
func (o *SaveOpts) AddFlags(cmd *cobra.Command) {
|
||||
f := cmd.Flags()
|
||||
|
||||
f.StringVarP(&o.FileName, "filename", "f", consts.DefaultHaulerArchiveName, "(Optional) Specify the name of outputted haul")
|
||||
f.StringVarP(&o.Platform, "platform", "p", "", "(Optional) Specify the platform for runtime imports... i.e. linux/amd64 (unspecified implies all)")
|
||||
f.BoolVar(&o.ContainerdCompatibility, "containerd", false, "(Optional) Enable import compatibility with containerd... removes oci-layout from the haul")
|
||||
|
||||
}
|
||||
56
internal/flags/serve.go
Normal file
56
internal/flags/serve.go
Normal file
@@ -0,0 +1,56 @@
|
||||
package flags
|
||||
|
||||
import (
|
||||
"github.com/spf13/cobra"
|
||||
"hauler.dev/go/hauler/pkg/consts"
|
||||
)
|
||||
|
||||
type ServeRegistryOpts struct {
|
||||
*StoreRootOpts
|
||||
|
||||
Port int
|
||||
RootDir string
|
||||
ConfigFile string
|
||||
ReadOnly bool
|
||||
|
||||
TLSCert string
|
||||
TLSKey string
|
||||
}
|
||||
|
||||
func (o *ServeRegistryOpts) AddFlags(cmd *cobra.Command) {
|
||||
f := cmd.Flags()
|
||||
|
||||
f.IntVarP(&o.Port, "port", "p", consts.DefaultRegistryPort, "(Optional) Set the port to use for incoming connections")
|
||||
f.StringVar(&o.RootDir, "directory", consts.DefaultRegistryRootDir, "(Optional) Directory to use for backend. Defaults to $PWD/registry")
|
||||
f.StringVarP(&o.ConfigFile, "config", "c", "", "(Optional) Location of config file (overrides all flags)")
|
||||
f.BoolVar(&o.ReadOnly, "readonly", true, "(Optional) Run the registry as readonly")
|
||||
|
||||
f.StringVar(&o.TLSCert, "tls-cert", "", "(Optional) Location of the TLS Certificate to use for server authenication")
|
||||
f.StringVar(&o.TLSKey, "tls-key", "", "(Optional) Location of the TLS Key to use for server authenication")
|
||||
|
||||
cmd.MarkFlagsRequiredTogether("tls-cert", "tls-key")
|
||||
}
|
||||
|
||||
type ServeFilesOpts struct {
|
||||
*StoreRootOpts
|
||||
|
||||
Port int
|
||||
Timeout int
|
||||
RootDir string
|
||||
|
||||
TLSCert string
|
||||
TLSKey string
|
||||
}
|
||||
|
||||
func (o *ServeFilesOpts) AddFlags(cmd *cobra.Command) {
|
||||
f := cmd.Flags()
|
||||
|
||||
f.IntVarP(&o.Port, "port", "p", consts.DefaultFileserverPort, "(Optional) Set the port to use for incoming connections")
|
||||
f.IntVar(&o.Timeout, "timeout", consts.DefaultFileserverTimeout, "(Optional) Timeout duration for HTTP Requests in seconds for both reads/writes")
|
||||
f.StringVar(&o.RootDir, "directory", consts.DefaultFileserverRootDir, "(Optional) Directory to use for backend. Defaults to $PWD/fileserver")
|
||||
|
||||
f.StringVar(&o.TLSCert, "tls-cert", "", "(Optional) Location of the TLS Certificate to use for server authenication")
|
||||
f.StringVar(&o.TLSKey, "tls-key", "", "(Optional) Location of the TLS Key to use for server authenication")
|
||||
|
||||
cmd.MarkFlagsRequiredTogether("tls-cert", "tls-key")
|
||||
}
|
||||
63
internal/flags/store.go
Normal file
63
internal/flags/store.go
Normal file
@@ -0,0 +1,63 @@
|
||||
package flags
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"hauler.dev/go/hauler/pkg/consts"
|
||||
"hauler.dev/go/hauler/pkg/log"
|
||||
"hauler.dev/go/hauler/pkg/store"
|
||||
)
|
||||
|
||||
type StoreRootOpts struct {
|
||||
StoreDir string
|
||||
Retries int
|
||||
TempOverride string
|
||||
}
|
||||
|
||||
func (o *StoreRootOpts) AddFlags(cmd *cobra.Command) {
|
||||
pf := cmd.PersistentFlags()
|
||||
pf.StringVarP(&o.StoreDir, "store", "s", "", "Set the directory to use for the content store")
|
||||
pf.IntVarP(&o.Retries, "retries", "r", consts.DefaultRetries, "Set the number of retries for operations")
|
||||
pf.StringVarP(&o.TempOverride, "tempdir", "t", "", "(Optional) Override the default temporary directory determined by the OS")
|
||||
}
|
||||
|
||||
func (o *StoreRootOpts) Store(ctx context.Context) (*store.Layout, error) {
|
||||
l := log.FromContext(ctx)
|
||||
|
||||
storeDir := o.StoreDir
|
||||
|
||||
if storeDir == "" {
|
||||
storeDir = os.Getenv(consts.HaulerStoreDir)
|
||||
}
|
||||
|
||||
if storeDir == "" {
|
||||
storeDir = consts.DefaultStoreName
|
||||
}
|
||||
|
||||
abs, err := filepath.Abs(storeDir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
o.StoreDir = abs
|
||||
|
||||
l.Debugf("using store at [%s]", abs)
|
||||
|
||||
if _, err := os.Stat(abs); errors.Is(err, os.ErrNotExist) {
|
||||
if err := os.MkdirAll(abs, os.ModePerm); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
s, err := store.NewLayout(abs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return s, nil
|
||||
}
|
||||
41
internal/flags/sync.go
Normal file
41
internal/flags/sync.go
Normal file
@@ -0,0 +1,41 @@
|
||||
package flags
|
||||
|
||||
import (
|
||||
"github.com/spf13/cobra"
|
||||
"hauler.dev/go/hauler/pkg/consts"
|
||||
)
|
||||
|
||||
type SyncOpts struct {
|
||||
*StoreRootOpts
|
||||
FileName []string
|
||||
Key string
|
||||
CertOidcIssuer string
|
||||
CertOidcIssuerRegexp string
|
||||
CertIdentity string
|
||||
CertIdentityRegexp string
|
||||
CertGithubWorkflowRepository string
|
||||
Products []string
|
||||
Platform string
|
||||
Registry string
|
||||
ProductRegistry string
|
||||
Tlog bool
|
||||
Rewrite string
|
||||
}
|
||||
|
||||
func (o *SyncOpts) AddFlags(cmd *cobra.Command) {
|
||||
f := cmd.Flags()
|
||||
|
||||
f.StringSliceVarP(&o.FileName, "filename", "f", []string{consts.DefaultHaulerManifestName}, "Specify the name of manifest(s) to sync")
|
||||
f.StringVarP(&o.Key, "key", "k", "", "(Optional) Location of public key to use for signature verification")
|
||||
f.StringVar(&o.CertIdentity, "certificate-identity", "", "(Optional) Cosign certificate-identity (either --certificate-identity or --certificate-identity-regexp required for keyless verification)")
|
||||
f.StringVar(&o.CertIdentityRegexp, "certificate-identity-regexp", "", "(Optional) Cosign certificate-identity-regexp (either --certificate-identity or --certificate-identity-regexp required for keyless verification)")
|
||||
f.StringVar(&o.CertOidcIssuer, "certificate-oidc-issuer", "", "(Optional) Cosign option to validate oidc issuer")
|
||||
f.StringVar(&o.CertOidcIssuerRegexp, "certificate-oidc-issuer-regexp", "", "(Optional) Cosign option to validate oidc issuer with regex")
|
||||
f.StringVar(&o.CertGithubWorkflowRepository, "certificate-github-workflow-repository", "", "(Optional) Cosign certificate-github-workflow-repository option")
|
||||
f.StringSliceVar(&o.Products, "products", []string{}, "(Optional) Specify the product name to fetch collections from the product registry i.e. rancher=v2.10.1,rke2=v1.31.5+rke2r1")
|
||||
f.StringVarP(&o.Platform, "platform", "p", "", "(Optional) Specify the platform of the image... i.e linux/amd64 (defaults to all)")
|
||||
f.StringVarP(&o.Registry, "registry", "g", "", "(Optional) Specify the registry of the image for images that do not alredy define one")
|
||||
f.StringVarP(&o.ProductRegistry, "product-registry", "c", "", "(Optional) Specify the product registry. Defaults to RGS Carbide Registry (rgcrprod.azurecr.us)")
|
||||
f.BoolVar(&o.Tlog, "use-tlog-verify", false, "(Optional) Allow transparency log verification (defaults to false)")
|
||||
f.StringVar(&o.Rewrite, "rewrite", "", "(EXPERIMENTAL & Optional) Rewrite artifact path to specified string")
|
||||
}
|
||||
12
internal/flags/version.go
Normal file
12
internal/flags/version.go
Normal file
@@ -0,0 +1,12 @@
|
||||
package flags
|
||||
|
||||
import "github.com/spf13/cobra"
|
||||
|
||||
type VersionOpts struct {
|
||||
JSON bool
|
||||
}
|
||||
|
||||
func (o *VersionOpts) AddFlags(cmd *cobra.Command) {
|
||||
f := cmd.Flags()
|
||||
f.BoolVar(&o.JSON, "json", false, "Set the output format to JSON")
|
||||
}
|
||||
@@ -2,7 +2,8 @@ package mapper
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io/ioutil"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
@@ -11,17 +12,21 @@ import (
|
||||
"github.com/containerd/containerd/remotes"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
"oras.land/oras-go/pkg/content"
|
||||
"hauler.dev/go/hauler/pkg/content"
|
||||
)
|
||||
|
||||
// NewMapperFileStore creates a new file store that uses mapper functions for each detected descriptor.
|
||||
// This extends content.File, and differs in that it allows much more functionality into how each descriptor is written.
|
||||
func NewMapperFileStore(root string, mapper map[string]Fn) *store {
|
||||
fs := content.NewFile(root)
|
||||
return &store{
|
||||
File: fs,
|
||||
mapper: mapper,
|
||||
//
|
||||
// This extends content.OCI, and differs in that it allows much more functionality into how each descriptor is written.
|
||||
func NewMapperFileStore(root string, mapper map[string]Fn) (*store, error) {
|
||||
fs, err := content.NewOCI(root)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &store{
|
||||
OCI: fs,
|
||||
mapper: mapper,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *store) Pusher(ctx context.Context, ref string) (remotes.Pusher, error) {
|
||||
@@ -34,7 +39,7 @@ func (s *store) Pusher(ctx context.Context, ref string) (remotes.Pusher, error)
|
||||
hash = parts[1]
|
||||
}
|
||||
return &pusher{
|
||||
store: s.File,
|
||||
store: s.OCI,
|
||||
tag: tag,
|
||||
ref: hash,
|
||||
mapper: s.mapper,
|
||||
@@ -42,43 +47,76 @@ func (s *store) Pusher(ctx context.Context, ref string) (remotes.Pusher, error)
|
||||
}
|
||||
|
||||
type store struct {
|
||||
*content.File
|
||||
*content.OCI
|
||||
mapper map[string]Fn
|
||||
}
|
||||
|
||||
func (s *pusher) Push(ctx context.Context, desc ocispec.Descriptor) (ccontent.Writer, error) {
|
||||
// TODO: This is suuuuuper ugly... redo this when oras v2 is out
|
||||
// For manifests and indexes (which have AnnotationRefName), discard them
|
||||
// They're metadata and don't need to be extracted
|
||||
if _, ok := content.ResolveName(desc); ok {
|
||||
p, err := s.store.Pusher(ctx, s.ref)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return p.Push(ctx, desc)
|
||||
// Discard manifests/indexes, they're just metadata
|
||||
return content.NewIoContentWriter(&nopCloser{io.Discard}, content.WithOutputHash(desc.Digest.String())), nil
|
||||
}
|
||||
|
||||
// If no custom mapper found, fall back to content.File mapper
|
||||
if _, ok := s.mapper[desc.MediaType]; !ok {
|
||||
return content.NewIoContentWriter(ioutil.Discard, content.WithOutputHash(desc.Digest)), nil
|
||||
// Check if this descriptor has a mapper for its media type
|
||||
mapperFn, hasMapper := s.mapper[desc.MediaType]
|
||||
if !hasMapper {
|
||||
// Fall back to catch-all sentinel, then discard
|
||||
mapperFn, hasMapper = s.mapper[DefaultCatchAll]
|
||||
}
|
||||
if !hasMapper {
|
||||
// No mapper for this media type, discard it (config blobs, etc.)
|
||||
return content.NewIoContentWriter(&nopCloser{io.Discard}, content.WithOutputHash(desc.Digest.String())), nil
|
||||
}
|
||||
|
||||
filename, err := s.mapper[desc.MediaType](desc)
|
||||
// Get the filename from the mapper function.
|
||||
// An empty filename means the mapper explicitly declined this descriptor (e.g. a
|
||||
// config blob that has no title annotation); treat it the same as no mapper.
|
||||
filename, err := mapperFn(desc)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
fullFileName := filepath.Join(s.store.ResolvePath(""), filename)
|
||||
// TODO: Don't rewrite everytime, we can check the digest
|
||||
f, err := os.OpenFile(fullFileName, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "pushing file")
|
||||
if filename == "" {
|
||||
return content.NewIoContentWriter(&nopCloser{io.Discard}, content.WithOutputHash(desc.Digest.String())), nil
|
||||
}
|
||||
|
||||
w := content.NewIoContentWriter(f, content.WithInputHash(desc.Digest), content.WithOutputHash(desc.Digest))
|
||||
// Get the destination directory and create the full path.
|
||||
// Use absolute paths so the traversal check works even when destDir is relative (e.g. ".").
|
||||
destDir, err := filepath.Abs(s.store.ResolvePath(""))
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "resolving destination dir")
|
||||
}
|
||||
fullFileName := filepath.Join(destDir, filename)
|
||||
|
||||
// Guard against path traversal (e.g. filename containing "../")
|
||||
if !strings.HasPrefix(fullFileName, destDir+string(filepath.Separator)) {
|
||||
return nil, fmt.Errorf("path_traversal_disallowed: %q resolves outside destination dir", filename)
|
||||
}
|
||||
|
||||
// Create parent directories (e.g. when filename is "subdir/file.txt")
|
||||
if err := os.MkdirAll(filepath.Dir(fullFileName), 0755); err != nil {
|
||||
return nil, errors.Wrap(err, fmt.Sprintf("creating directory for %s", fullFileName))
|
||||
}
|
||||
|
||||
// Create the file
|
||||
f, err := os.OpenFile(fullFileName, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, fmt.Sprintf("creating file %s", fullFileName))
|
||||
}
|
||||
|
||||
w := content.NewIoContentWriter(f, content.WithOutputHash(desc.Digest.String()))
|
||||
return w, nil
|
||||
}
|
||||
|
||||
type nopCloser struct {
|
||||
io.Writer
|
||||
}
|
||||
|
||||
func (*nopCloser) Close() error { return nil }
|
||||
|
||||
type pusher struct {
|
||||
store *content.File
|
||||
store *content.OCI
|
||||
tag string
|
||||
ref string
|
||||
mapper map[string]Fn
|
||||
|
||||
349
internal/mapper/mapper_test.go
Normal file
349
internal/mapper/mapper_test.go
Normal file
@@ -0,0 +1,349 @@
|
||||
package mapper
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
|
||||
"hauler.dev/go/hauler/pkg/consts"
|
||||
)
|
||||
|
||||
func TestFromManifest_DockerImage(t *testing.T) {
|
||||
manifest := ocispec.Manifest{
|
||||
Config: ocispec.Descriptor{
|
||||
MediaType: consts.DockerConfigJSON,
|
||||
},
|
||||
}
|
||||
|
||||
target, err := FromManifest(manifest, t.TempDir())
|
||||
if err != nil {
|
||||
t.Fatalf("expected no error, got: %v", err)
|
||||
}
|
||||
if target == nil {
|
||||
t.Fatal("expected non-nil Target")
|
||||
}
|
||||
}
|
||||
|
||||
func TestFromManifest_HelmChart(t *testing.T) {
|
||||
manifest := ocispec.Manifest{
|
||||
Config: ocispec.Descriptor{
|
||||
MediaType: consts.ChartConfigMediaType,
|
||||
},
|
||||
}
|
||||
|
||||
target, err := FromManifest(manifest, t.TempDir())
|
||||
if err != nil {
|
||||
t.Fatalf("expected no error, got: %v", err)
|
||||
}
|
||||
if target == nil {
|
||||
t.Fatal("expected non-nil Target")
|
||||
}
|
||||
}
|
||||
|
||||
func TestFromManifest_File(t *testing.T) {
|
||||
manifest := ocispec.Manifest{
|
||||
Config: ocispec.Descriptor{
|
||||
MediaType: consts.FileLocalConfigMediaType,
|
||||
},
|
||||
}
|
||||
|
||||
target, err := FromManifest(manifest, t.TempDir())
|
||||
if err != nil {
|
||||
t.Fatalf("expected no error, got: %v", err)
|
||||
}
|
||||
if target == nil {
|
||||
t.Fatal("expected non-nil Target")
|
||||
}
|
||||
}
|
||||
|
||||
func TestFromManifest_OciImageConfigWithTitleAnnotation(t *testing.T) {
|
||||
// OCI artifacts distributed as "fake images" (e.g. rke2-binary) use the standard
|
||||
// OCI image config type but set AnnotationTitle on their layers. FromManifest must
|
||||
// dispatch to Files() (not Images()) so the title is used as the output filename.
|
||||
manifest := ocispec.Manifest{
|
||||
Config: ocispec.Descriptor{
|
||||
MediaType: ocispec.MediaTypeImageConfig,
|
||||
},
|
||||
Layers: []ocispec.Descriptor{
|
||||
{
|
||||
MediaType: consts.OCILayer,
|
||||
Annotations: map[string]string{
|
||||
ocispec.AnnotationTitle: "rke2.linux-amd64",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
target, err := FromManifest(manifest, t.TempDir())
|
||||
if err != nil {
|
||||
t.Fatalf("expected no error, got: %v", err)
|
||||
}
|
||||
s, ok := target.(*store)
|
||||
if !ok {
|
||||
t.Fatal("expected target to be *store")
|
||||
}
|
||||
if _, exists := s.mapper[consts.OCILayer]; !exists {
|
||||
t.Fatal("expected Files() mapper (OCILayer key) for OCI image config with title annotation")
|
||||
}
|
||||
}
|
||||
|
||||
func TestFromManifest_FileLayerFallback(t *testing.T) {
|
||||
manifest := ocispec.Manifest{
|
||||
Config: ocispec.Descriptor{
|
||||
MediaType: "application/vnd.unknown.config.v1+json",
|
||||
},
|
||||
Layers: []ocispec.Descriptor{
|
||||
{
|
||||
MediaType: consts.FileLayerMediaType,
|
||||
Annotations: map[string]string{
|
||||
ocispec.AnnotationTitle: "somefile.txt",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
target, err := FromManifest(manifest, t.TempDir())
|
||||
if err != nil {
|
||||
t.Fatalf("expected no error, got: %v", err)
|
||||
}
|
||||
if target == nil {
|
||||
t.Fatal("expected non-nil Target")
|
||||
}
|
||||
|
||||
// Verify the returned store uses the Files() mapper by checking that the
|
||||
// mapper contains the FileLayerMediaType key.
|
||||
s, ok := target.(*store)
|
||||
if !ok {
|
||||
t.Fatal("expected target to be *store")
|
||||
}
|
||||
if s.mapper == nil {
|
||||
t.Fatal("expected non-nil mapper for file layer fallback")
|
||||
}
|
||||
if _, exists := s.mapper[consts.FileLayerMediaType]; !exists {
|
||||
t.Fatal("expected mapper to contain consts.FileLayerMediaType key")
|
||||
}
|
||||
}
|
||||
|
||||
func TestFromManifest_UnknownNoTitle(t *testing.T) {
|
||||
manifest := ocispec.Manifest{
|
||||
Config: ocispec.Descriptor{
|
||||
MediaType: "application/vnd.unknown.config.v1+json",
|
||||
},
|
||||
Layers: []ocispec.Descriptor{
|
||||
{
|
||||
MediaType: "application/vnd.unknown.layer",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
target, err := FromManifest(manifest, t.TempDir())
|
||||
if err != nil {
|
||||
t.Fatalf("expected no error, got: %v", err)
|
||||
}
|
||||
if target == nil {
|
||||
t.Fatal("expected non-nil Target")
|
||||
}
|
||||
|
||||
// Unknown artifacts must use the Default catch-all mapper so blobs are not silently discarded
|
||||
s, ok := target.(*store)
|
||||
if !ok {
|
||||
t.Fatal("expected target to be *store")
|
||||
}
|
||||
if _, exists := s.mapper[DefaultCatchAll]; !exists {
|
||||
t.Fatal("expected default catch-all mapper for unknown artifact type")
|
||||
}
|
||||
}
|
||||
|
||||
func TestFiles_CatchAll_WithTitle(t *testing.T) {
|
||||
// OCI artifacts with custom layer media types (e.g. rke2-binary) must be
|
||||
// extracted by the Files() catch-all when they carry AnnotationTitle.
|
||||
mappers := Files()
|
||||
|
||||
fn, ok := mappers[DefaultCatchAll]
|
||||
if !ok {
|
||||
t.Fatal("Files() must contain a DefaultCatchAll entry")
|
||||
}
|
||||
|
||||
d := digest.Digest("sha256:" + strings.Repeat("b", 64))
|
||||
desc := ocispec.Descriptor{
|
||||
MediaType: "application/vnd.rancher.rke2.binary",
|
||||
Digest: d,
|
||||
Annotations: map[string]string{
|
||||
ocispec.AnnotationTitle: "rke2.linux-amd64",
|
||||
},
|
||||
}
|
||||
|
||||
result, err := fn(desc)
|
||||
if err != nil {
|
||||
t.Fatalf("expected no error, got: %v", err)
|
||||
}
|
||||
if result != "rke2.linux-amd64" {
|
||||
t.Errorf("expected %q, got %q", "rke2.linux-amd64", result)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFiles_CatchAll_NoTitle(t *testing.T) {
|
||||
// Blobs without AnnotationTitle (e.g. config blobs) must be discarded by the
|
||||
// Files() catch-all (empty filename = discard signal for Push).
|
||||
mappers := Files()
|
||||
|
||||
fn, ok := mappers[DefaultCatchAll]
|
||||
if !ok {
|
||||
t.Fatal("Files() must contain a DefaultCatchAll entry")
|
||||
}
|
||||
|
||||
d := digest.Digest("sha256:" + strings.Repeat("c", 64))
|
||||
desc := ocispec.Descriptor{
|
||||
MediaType: "application/vnd.oci.image.config.v1+json",
|
||||
Digest: d,
|
||||
}
|
||||
|
||||
result, err := fn(desc)
|
||||
if err != nil {
|
||||
t.Fatalf("expected no error, got: %v", err)
|
||||
}
|
||||
if result != "" {
|
||||
t.Errorf("expected empty string (discard) for config blob, got %q", result)
|
||||
}
|
||||
}
|
||||
|
||||
func TestImages_MapperFn(t *testing.T) {
|
||||
mappers := Images()
|
||||
|
||||
fn, ok := mappers[consts.DockerLayer]
|
||||
if !ok {
|
||||
t.Fatalf("expected mapper for %s", consts.DockerLayer)
|
||||
}
|
||||
|
||||
d := digest.Digest("sha256:abcdef1234567890abcdef1234567890abcdef1234567890abcdef1234567890")
|
||||
desc := ocispec.Descriptor{
|
||||
Digest: d,
|
||||
}
|
||||
|
||||
result, err := fn(desc)
|
||||
if err != nil {
|
||||
t.Fatalf("expected no error, got: %v", err)
|
||||
}
|
||||
|
||||
expected := "sha256:abcdef1234567890abcdef1234567890abcdef1234567890abcdef1234567890.tar.gz"
|
||||
if result != expected {
|
||||
t.Fatalf("expected %q, got %q", expected, result)
|
||||
}
|
||||
}
|
||||
|
||||
func TestImages_ConfigMapperFn(t *testing.T) {
|
||||
mappers := Images()
|
||||
|
||||
fn, ok := mappers[consts.DockerConfigJSON]
|
||||
if !ok {
|
||||
t.Fatalf("expected mapper for %s", consts.DockerConfigJSON)
|
||||
}
|
||||
|
||||
desc := ocispec.Descriptor{}
|
||||
result, err := fn(desc)
|
||||
if err != nil {
|
||||
t.Fatalf("expected no error, got: %v", err)
|
||||
}
|
||||
|
||||
if result != consts.ImageConfigFile {
|
||||
t.Fatalf("expected %q, got %q", consts.ImageConfigFile, result)
|
||||
}
|
||||
}
|
||||
|
||||
func TestChart_MapperFn_WithTitle(t *testing.T) {
|
||||
mappers := Chart()
|
||||
|
||||
fn, ok := mappers[consts.ChartLayerMediaType]
|
||||
if !ok {
|
||||
t.Fatalf("expected mapper for %s", consts.ChartLayerMediaType)
|
||||
}
|
||||
|
||||
desc := ocispec.Descriptor{
|
||||
Annotations: map[string]string{
|
||||
ocispec.AnnotationTitle: "mychart-1.0.0.tgz",
|
||||
},
|
||||
}
|
||||
|
||||
result, err := fn(desc)
|
||||
if err != nil {
|
||||
t.Fatalf("expected no error, got: %v", err)
|
||||
}
|
||||
|
||||
if result != "mychart-1.0.0.tgz" {
|
||||
t.Fatalf("expected %q, got %q", "mychart-1.0.0.tgz", result)
|
||||
}
|
||||
}
|
||||
|
||||
func TestChart_MapperFn_NoTitle(t *testing.T) {
|
||||
mappers := Chart()
|
||||
|
||||
fn, ok := mappers[consts.ChartLayerMediaType]
|
||||
if !ok {
|
||||
t.Fatalf("expected mapper for %s", consts.ChartLayerMediaType)
|
||||
}
|
||||
|
||||
desc := ocispec.Descriptor{}
|
||||
|
||||
result, err := fn(desc)
|
||||
if err != nil {
|
||||
t.Fatalf("expected no error, got: %v", err)
|
||||
}
|
||||
|
||||
if result != "chart.tar.gz" {
|
||||
t.Fatalf("expected %q, got %q", "chart.tar.gz", result)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFiles_MapperFn_WithTitle(t *testing.T) {
|
||||
mappers := Files()
|
||||
|
||||
fn, ok := mappers[consts.FileLayerMediaType]
|
||||
if !ok {
|
||||
t.Fatalf("expected mapper for %s", consts.FileLayerMediaType)
|
||||
}
|
||||
|
||||
desc := ocispec.Descriptor{
|
||||
Annotations: map[string]string{
|
||||
ocispec.AnnotationTitle: "install.sh",
|
||||
},
|
||||
}
|
||||
|
||||
result, err := fn(desc)
|
||||
if err != nil {
|
||||
t.Fatalf("expected no error, got: %v", err)
|
||||
}
|
||||
|
||||
if result != "install.sh" {
|
||||
t.Fatalf("expected %q, got %q", "install.sh", result)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFiles_MapperFn_NoTitle(t *testing.T) {
|
||||
mappers := Files()
|
||||
|
||||
fn, ok := mappers[consts.FileLayerMediaType]
|
||||
if !ok {
|
||||
t.Fatalf("expected mapper for %s", consts.FileLayerMediaType)
|
||||
}
|
||||
|
||||
d := digest.Digest("sha256:" + strings.Repeat("a", 64))
|
||||
desc := ocispec.Descriptor{
|
||||
Digest: d,
|
||||
}
|
||||
|
||||
result, err := fn(desc)
|
||||
if err != nil {
|
||||
t.Fatalf("expected no error, got: %v", err)
|
||||
}
|
||||
|
||||
if !strings.HasSuffix(result, ".file") {
|
||||
t.Fatalf("expected result to end with .file, got %q", result)
|
||||
}
|
||||
|
||||
expected := "sha256:" + strings.Repeat("a", 64) + ".file"
|
||||
if result != expected {
|
||||
t.Fatalf("expected %q, got %q", expected, result)
|
||||
}
|
||||
}
|
||||
@@ -4,39 +4,51 @@ import (
|
||||
"fmt"
|
||||
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"oras.land/oras-go/pkg/target"
|
||||
|
||||
"github.com/rancherfederal/hauler/pkg/consts"
|
||||
"hauler.dev/go/hauler/pkg/consts"
|
||||
"hauler.dev/go/hauler/pkg/content"
|
||||
)
|
||||
|
||||
type Fn func(desc ocispec.Descriptor) (string, error)
|
||||
|
||||
// FromManifest will return the appropriate content store given a reference and source type adequate for storing the results on disk
|
||||
func FromManifest(manifest ocispec.Manifest, root string) (target.Target, error) {
|
||||
// TODO: Don't rely solely on config mediatype
|
||||
func FromManifest(manifest ocispec.Manifest, root string) (content.Target, error) {
|
||||
// First, switch on config mediatype to identify known types.
|
||||
switch manifest.Config.MediaType {
|
||||
case consts.DockerConfigJSON, consts.OCIManifestSchema1:
|
||||
s := NewMapperFileStore(root, Images())
|
||||
defer s.Close()
|
||||
return s, nil
|
||||
|
||||
case consts.ChartLayerMediaType, consts.ChartConfigMediaType:
|
||||
s := NewMapperFileStore(root, Chart())
|
||||
defer s.Close()
|
||||
return s, nil
|
||||
return NewMapperFileStore(root, Chart())
|
||||
|
||||
default:
|
||||
s := NewMapperFileStore(root, nil)
|
||||
defer s.Close()
|
||||
return s, nil
|
||||
case consts.FileLocalConfigMediaType, consts.FileDirectoryConfigMediaType, consts.FileHttpConfigMediaType:
|
||||
return NewMapperFileStore(root, Files())
|
||||
|
||||
case consts.DockerConfigJSON, ocispec.MediaTypeImageConfig:
|
||||
// Standard OCI/Docker image config. OCI artifacts that distribute files
|
||||
// (e.g. rke2-binary) reuse this config type but set AnnotationTitle on their
|
||||
// layers. When title annotations are present prefer Files() so the title is
|
||||
// used as the output filename; otherwise treat as a container image.
|
||||
for _, layer := range manifest.Layers {
|
||||
if _, ok := layer.Annotations[ocispec.AnnotationTitle]; ok {
|
||||
return NewMapperFileStore(root, Files())
|
||||
}
|
||||
}
|
||||
return NewMapperFileStore(root, Images())
|
||||
}
|
||||
|
||||
// Unknown config type: title annotation indicates a file artifact; otherwise use
|
||||
// a catch-all mapper that writes blobs by digest.
|
||||
for _, layer := range manifest.Layers {
|
||||
if _, ok := layer.Annotations[ocispec.AnnotationTitle]; ok {
|
||||
return NewMapperFileStore(root, Files())
|
||||
}
|
||||
}
|
||||
return NewMapperFileStore(root, Default())
|
||||
}
|
||||
|
||||
func Images() map[string]Fn {
|
||||
m := make(map[string]Fn)
|
||||
|
||||
manifestMapperFn := Fn(func(desc ocispec.Descriptor) (string, error) {
|
||||
return "manifest.json", nil
|
||||
return consts.ImageManifestFile, nil
|
||||
})
|
||||
|
||||
for _, l := range []string{consts.DockerManifestSchema2, consts.DockerManifestListSchema2, consts.OCIManifestSchema1} {
|
||||
@@ -52,7 +64,7 @@ func Images() map[string]Fn {
|
||||
}
|
||||
|
||||
configMapperFn := Fn(func(desc ocispec.Descriptor) (string, error) {
|
||||
return "config.json", nil
|
||||
return consts.ImageConfigFile, nil
|
||||
})
|
||||
|
||||
for _, l := range []string{consts.DockerConfigJSON} {
|
||||
@@ -81,3 +93,52 @@ func Chart() map[string]Fn {
|
||||
m[consts.ProvLayerMediaType] = provMapperFn
|
||||
return m
|
||||
}
|
||||
|
||||
// DefaultCatchAll is the sentinel key used in a mapper map to match any media type
|
||||
// not explicitly registered. Push checks for this key as a fallback.
|
||||
const DefaultCatchAll = ""
|
||||
|
||||
// Default returns a catch-all mapper that extracts any layer blob using its title
|
||||
// annotation as the filename, falling back to a digest-based name. Used when the
|
||||
// manifest config media type is not a known hauler type.
|
||||
func Default() map[string]Fn {
|
||||
m := make(map[string]Fn)
|
||||
m[DefaultCatchAll] = Fn(func(desc ocispec.Descriptor) (string, error) {
|
||||
if title, ok := desc.Annotations[ocispec.AnnotationTitle]; ok {
|
||||
return title, nil
|
||||
}
|
||||
return fmt.Sprintf("%s.bin", desc.Digest.String()), nil
|
||||
})
|
||||
return m
|
||||
}
|
||||
|
||||
func Files() map[string]Fn {
|
||||
m := make(map[string]Fn)
|
||||
|
||||
fileMapperFn := Fn(func(desc ocispec.Descriptor) (string, error) {
|
||||
// Use the title annotation to determine the filename
|
||||
if title, ok := desc.Annotations[ocispec.AnnotationTitle]; ok {
|
||||
return title, nil
|
||||
}
|
||||
// Fallback to digest-based filename if no title
|
||||
return fmt.Sprintf("%s.file", desc.Digest.String()), nil
|
||||
})
|
||||
|
||||
// Match the media type that's actually used in the manifest
|
||||
// (set by getter.LayerFrom in pkg/getter/getter.go)
|
||||
m[consts.FileLayerMediaType] = fileMapperFn
|
||||
m[consts.OCILayer] = fileMapperFn // Also handle standard OCI layers that have title annotation
|
||||
m["application/vnd.oci.image.layer.v1.tar"] = fileMapperFn // And the tar variant
|
||||
|
||||
// Catch-all for OCI artifacts that use custom layer media types (e.g. rke2-binary).
|
||||
// Write the blob if it carries an AnnotationTitle; silently discard everything else
|
||||
// (config blobs, metadata) by returning an empty filename.
|
||||
m[DefaultCatchAll] = Fn(func(desc ocispec.Descriptor) (string, error) {
|
||||
if title, ok := desc.Annotations[ocispec.AnnotationTitle]; ok {
|
||||
return title, nil
|
||||
}
|
||||
return "", nil // No title → discard (config blob or unrecognised metadata)
|
||||
})
|
||||
|
||||
return m
|
||||
}
|
||||
|
||||
@@ -9,33 +9,32 @@ import (
|
||||
|
||||
"github.com/gorilla/handlers"
|
||||
"github.com/gorilla/mux"
|
||||
"hauler.dev/go/hauler/internal/flags"
|
||||
"hauler.dev/go/hauler/pkg/consts"
|
||||
)
|
||||
|
||||
type FileConfig struct {
|
||||
Root string
|
||||
Host string
|
||||
Port int
|
||||
}
|
||||
|
||||
// NewFile returns a fileserver
|
||||
// TODO: Better configs
|
||||
func NewFile(ctx context.Context, cfg FileConfig) (Server, error) {
|
||||
func NewFile(ctx context.Context, cfg flags.ServeFilesOpts) (Server, error) {
|
||||
r := mux.NewRouter()
|
||||
r.Handle("/", handlers.LoggingHandler(os.Stdout, http.FileServer(http.Dir(cfg.Root))))
|
||||
|
||||
if cfg.Root == "" {
|
||||
cfg.Root = "."
|
||||
r.PathPrefix("/").Handler(handlers.LoggingHandler(os.Stdout, http.StripPrefix("/", http.FileServer(http.Dir(cfg.RootDir)))))
|
||||
if cfg.RootDir == "" {
|
||||
cfg.RootDir = "."
|
||||
}
|
||||
|
||||
if cfg.Port == 0 {
|
||||
cfg.Port = 8080
|
||||
cfg.Port = consts.DefaultFileserverPort
|
||||
}
|
||||
|
||||
if cfg.Timeout == 0 {
|
||||
cfg.Timeout = consts.DefaultFileserverTimeout
|
||||
}
|
||||
|
||||
srv := &http.Server{
|
||||
Handler: r,
|
||||
Addr: fmt.Sprintf(":%d", cfg.Port),
|
||||
WriteTimeout: 15 * time.Second,
|
||||
ReadTimeout: 15 * time.Second,
|
||||
WriteTimeout: time.Duration(cfg.Timeout) * time.Second,
|
||||
ReadTimeout: time.Duration(cfg.Timeout) * time.Second,
|
||||
}
|
||||
|
||||
return srv, nil
|
||||
|
||||
@@ -11,7 +11,6 @@ import (
|
||||
"github.com/distribution/distribution/v3/configuration"
|
||||
"github.com/distribution/distribution/v3/registry"
|
||||
"github.com/distribution/distribution/v3/registry/handlers"
|
||||
"github.com/docker/go-metrics"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
@@ -22,14 +21,6 @@ func NewRegistry(ctx context.Context, cfg *configuration.Configuration) (*regist
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if cfg.HTTP.Debug.Prometheus.Enabled {
|
||||
path := cfg.HTTP.Debug.Prometheus.Path
|
||||
if path == "" {
|
||||
path = "/metrics"
|
||||
}
|
||||
http.Handle(path, metrics.Handler())
|
||||
}
|
||||
|
||||
return r, nil
|
||||
}
|
||||
|
||||
@@ -45,6 +36,9 @@ func NewTempRegistry(ctx context.Context, root string) *tmpRegistryServer {
|
||||
"filesystem": configuration.Parameters{"rootdirectory": root},
|
||||
},
|
||||
}
|
||||
|
||||
cfg.Validation.Manifests.URLs.Allow = []string{".+"}
|
||||
|
||||
cfg.Log.Level = "error"
|
||||
cfg.HTTP.Headers = http.Header{
|
||||
"X-Content-Type-Options": []string{"nosniff"},
|
||||
|
||||
@@ -2,4 +2,5 @@ package server
|
||||
|
||||
type Server interface {
|
||||
ListenAndServe() error
|
||||
ListenAndServeTLS(string, string) error
|
||||
}
|
||||
|
||||
90
internal/server/server_test.go
Normal file
90
internal/server/server_test.go
Normal file
@@ -0,0 +1,90 @@
|
||||
package server
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
// Register the filesystem storage driver for the distribution registry.
|
||||
_ "github.com/distribution/distribution/v3/registry/storage/driver/filesystem"
|
||||
|
||||
"hauler.dev/go/hauler/internal/flags"
|
||||
)
|
||||
|
||||
func TestNewTempRegistry_StartStop(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
srv := NewTempRegistry(ctx, t.TempDir())
|
||||
|
||||
// Start the httptest server directly to avoid the Start() method's
|
||||
// retry logic which only accepts HTTP 200, while /v2 returns 401
|
||||
// from the distribution registry.
|
||||
srv.Server.Start()
|
||||
t.Cleanup(func() { srv.Stop() })
|
||||
|
||||
resp, err := http.Get(srv.Server.URL + "/v2")
|
||||
if err != nil {
|
||||
t.Fatalf("expected GET /v2 to succeed, got error: %v", err)
|
||||
}
|
||||
resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusUnauthorized {
|
||||
t.Fatalf("expected status 200 or 401, got %d", resp.StatusCode)
|
||||
}
|
||||
|
||||
// Stop and verify unreachable.
|
||||
srv.Stop()
|
||||
|
||||
_, err = http.Get(srv.Server.URL + "/v2")
|
||||
if err == nil {
|
||||
t.Fatal("expected error after stopping server, got nil")
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewTempRegistry_Registry(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
srv := NewTempRegistry(ctx, t.TempDir())
|
||||
|
||||
srv.Server.Start()
|
||||
t.Cleanup(func() { srv.Stop() })
|
||||
|
||||
host := srv.Registry()
|
||||
if host == "" {
|
||||
t.Fatal("expected non-empty registry host")
|
||||
}
|
||||
if strings.Contains(host, "http://") {
|
||||
t.Fatalf("registry host should not contain protocol prefix, got %q", host)
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewFile_Configuration(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
opts := flags.ServeFilesOpts{
|
||||
RootDir: t.TempDir(),
|
||||
Port: 0,
|
||||
Timeout: 0,
|
||||
}
|
||||
|
||||
srv, err := NewFile(ctx, opts)
|
||||
if err != nil {
|
||||
t.Fatalf("expected no error, got: %v", err)
|
||||
}
|
||||
if srv == nil {
|
||||
t.Fatal("expected non-nil server")
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewFile_DefaultPort(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
opts := flags.ServeFilesOpts{
|
||||
RootDir: t.TempDir(),
|
||||
}
|
||||
|
||||
srv, err := NewFile(ctx, opts)
|
||||
if err != nil {
|
||||
t.Fatalf("expected no error, got: %v", err)
|
||||
}
|
||||
if srv == nil {
|
||||
t.Fatal("expected non-nil server")
|
||||
}
|
||||
}
|
||||
@@ -28,10 +28,9 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/common-nighthawk/go-figure"
|
||||
"hauler.dev/go/hauler/pkg/consts"
|
||||
)
|
||||
|
||||
const unknown = "unknown"
|
||||
|
||||
// Base version information.
|
||||
//
|
||||
// This is the fallback data used when version information from git is not
|
||||
@@ -41,19 +40,19 @@ var (
|
||||
// branch should be tagged using the correct versioning strategy.
|
||||
gitVersion = "devel"
|
||||
// SHA1 from git, output of $(git rev-parse HEAD)
|
||||
gitCommit = unknown
|
||||
gitCommit = consts.Unknown
|
||||
// State of git tree, either "clean" or "dirty"
|
||||
gitTreeState = unknown
|
||||
gitTreeState = consts.Unknown
|
||||
// Build date in ISO8601 format, output of $(date -u +'%Y-%m-%dT%H:%M:%SZ')
|
||||
buildDate = unknown
|
||||
buildDate = consts.Unknown
|
||||
// flag to print the ascii name banner
|
||||
asciiName = "true"
|
||||
// goVersion is the used golang version.
|
||||
goVersion = unknown
|
||||
goVersion = consts.Unknown
|
||||
// compiler is the used golang compiler.
|
||||
compiler = unknown
|
||||
compiler = consts.Unknown
|
||||
// platform is the used os/arch identifier.
|
||||
platform = unknown
|
||||
platform = consts.Unknown
|
||||
|
||||
once sync.Once
|
||||
info = Info{}
|
||||
@@ -84,7 +83,7 @@ func getBuildInfo() *debug.BuildInfo {
|
||||
|
||||
func getGitVersion(bi *debug.BuildInfo) string {
|
||||
if bi == nil {
|
||||
return unknown
|
||||
return consts.Unknown
|
||||
}
|
||||
|
||||
// TODO: remove this when the issue https://github.com/golang/go/issues/29228 is fixed
|
||||
@@ -107,28 +106,28 @@ func getDirty(bi *debug.BuildInfo) string {
|
||||
if modified == "false" {
|
||||
return "clean"
|
||||
}
|
||||
return unknown
|
||||
return consts.Unknown
|
||||
}
|
||||
|
||||
func getBuildDate(bi *debug.BuildInfo) string {
|
||||
buildTime := getKey(bi, "vcs.time")
|
||||
t, err := time.Parse("2006-01-02T15:04:05Z", buildTime)
|
||||
if err != nil {
|
||||
return unknown
|
||||
return consts.Unknown
|
||||
}
|
||||
return t.Format("2006-01-02T15:04:05")
|
||||
}
|
||||
|
||||
func getKey(bi *debug.BuildInfo, key string) string {
|
||||
if bi == nil {
|
||||
return unknown
|
||||
return consts.Unknown
|
||||
}
|
||||
for _, iter := range bi.Settings {
|
||||
if iter.Key == key {
|
||||
return iter.Value
|
||||
}
|
||||
}
|
||||
return unknown
|
||||
return consts.Unknown
|
||||
}
|
||||
|
||||
// GetVersionInfo represents known information on how this binary was built.
|
||||
@@ -136,27 +135,27 @@ func GetVersionInfo() Info {
|
||||
once.Do(func() {
|
||||
buildInfo := getBuildInfo()
|
||||
gitVersion = getGitVersion(buildInfo)
|
||||
if gitCommit == unknown {
|
||||
if gitCommit == consts.Unknown {
|
||||
gitCommit = getCommit(buildInfo)
|
||||
}
|
||||
|
||||
if gitTreeState == unknown {
|
||||
if gitTreeState == consts.Unknown {
|
||||
gitTreeState = getDirty(buildInfo)
|
||||
}
|
||||
|
||||
if buildDate == unknown {
|
||||
if buildDate == consts.Unknown {
|
||||
buildDate = getBuildDate(buildInfo)
|
||||
}
|
||||
|
||||
if goVersion == unknown {
|
||||
if goVersion == consts.Unknown {
|
||||
goVersion = runtime.Version()
|
||||
}
|
||||
|
||||
if compiler == unknown {
|
||||
if compiler == consts.Unknown {
|
||||
compiler = runtime.Compiler
|
||||
}
|
||||
|
||||
if platform == unknown {
|
||||
if platform == consts.Unknown {
|
||||
platform = fmt.Sprintf("%s/%s", runtime.GOOS, runtime.GOARCH)
|
||||
}
|
||||
|
||||
@@ -226,4 +225,4 @@ func (i *Info) CheckFontName(fontName string) bool {
|
||||
|
||||
fmt.Fprintln(os.Stderr, "font not valid, using default")
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
26
pkg/apis/hauler.cattle.io/v1/chart.go
Normal file
26
pkg/apis/hauler.cattle.io/v1/chart.go
Normal file
@@ -0,0 +1,26 @@
|
||||
package v1
|
||||
|
||||
import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
type Charts struct {
|
||||
*metav1.TypeMeta `json:",inline"`
|
||||
metav1.ObjectMeta `json:"metadata,omitempty"`
|
||||
|
||||
Spec ChartSpec `json:"spec,omitempty"`
|
||||
}
|
||||
|
||||
type ChartSpec struct {
|
||||
Charts []Chart `json:"charts,omitempty"`
|
||||
}
|
||||
|
||||
type Chart struct {
|
||||
Name string `json:"name,omitempty"`
|
||||
RepoURL string `json:"repoURL,omitempty"`
|
||||
Version string `json:"version,omitempty"`
|
||||
Rewrite string `json:"rewrite,omitempty"`
|
||||
|
||||
AddImages bool `json:"add-images,omitempty"`
|
||||
AddDependencies bool `json:"add-dependencies,omitempty"`
|
||||
}
|
||||
@@ -1,13 +1,9 @@
|
||||
package v1alpha1
|
||||
package v1
|
||||
|
||||
import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
const (
|
||||
DriverContentKind = "Driver"
|
||||
)
|
||||
|
||||
type Driver struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ObjectMeta `json:"metadata,omitempty"`
|
||||
@@ -1,11 +1,9 @@
|
||||
package v1alpha1
|
||||
package v1
|
||||
|
||||
import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
const FilesContentKind = "Files"
|
||||
|
||||
type Files struct {
|
||||
*metav1.TypeMeta `json:",inline"`
|
||||
metav1.ObjectMeta `json:"metadata,omitempty"`
|
||||
12
pkg/apis/hauler.cattle.io/v1/groupversion_info.go
Normal file
12
pkg/apis/hauler.cattle.io/v1/groupversion_info.go
Normal file
@@ -0,0 +1,12 @@
|
||||
package v1
|
||||
|
||||
import (
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
|
||||
"hauler.dev/go/hauler/pkg/consts"
|
||||
)
|
||||
|
||||
var (
|
||||
ContentGroupVersion = schema.GroupVersion{Group: consts.ContentGroup, Version: "v1"}
|
||||
CollectionGroupVersion = schema.GroupVersion{Group: consts.CollectionGroup, Version: "v1"}
|
||||
)
|
||||
41
pkg/apis/hauler.cattle.io/v1/image.go
Normal file
41
pkg/apis/hauler.cattle.io/v1/image.go
Normal file
@@ -0,0 +1,41 @@
|
||||
package v1
|
||||
|
||||
import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
type Images struct {
|
||||
*metav1.TypeMeta `json:",inline"`
|
||||
metav1.ObjectMeta `json:"metadata,omitempty"`
|
||||
|
||||
Spec ImageSpec `json:"spec,omitempty"`
|
||||
}
|
||||
|
||||
type ImageSpec struct {
|
||||
Images []Image `json:"images,omitempty"`
|
||||
}
|
||||
|
||||
type Image struct {
|
||||
// Name is the full location for the image, can be referenced by tags or digests
|
||||
Name string `json:"name"`
|
||||
|
||||
// Path is the path to the cosign public key used for verifying image signatures
|
||||
//Key string `json:"key,omitempty"`
|
||||
Key string `json:"key"`
|
||||
|
||||
// Path is the path to the cosign public key used for verifying image signatures
|
||||
//Tlog string `json:"use-tlog-verify,omitempty"`
|
||||
Tlog bool `json:"use-tlog-verify"`
|
||||
|
||||
// cosign keyless validation options
|
||||
CertIdentity string `json:"certificate-identity"`
|
||||
CertIdentityRegexp string `json:"certificate-identity-regexp"`
|
||||
CertOidcIssuer string `json:"certificate-oidc-issuer"`
|
||||
CertOidcIssuerRegexp string `json:"certificate-oidc-issuer-regexp"`
|
||||
CertGithubWorkflowRepository string `json:"certificate-github-workflow-repository"`
|
||||
|
||||
// Platform of the image to be pulled. If not specified, all platforms will be pulled.
|
||||
//Platform string `json:"key,omitempty"`
|
||||
Platform string `json:"platform"`
|
||||
Rewrite string `json:"rewrite"`
|
||||
}
|
||||
@@ -1,47 +0,0 @@
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
const (
|
||||
ChartsContentKind = "Charts"
|
||||
ChartsCollectionKind = "ThickCharts"
|
||||
)
|
||||
|
||||
type Charts struct {
|
||||
*metav1.TypeMeta `json:",inline"`
|
||||
metav1.ObjectMeta `json:"metadata,omitempty"`
|
||||
|
||||
Spec ChartSpec `json:"spec,omitempty"`
|
||||
}
|
||||
|
||||
type ChartSpec struct {
|
||||
Charts []Chart `json:"charts,omitempty"`
|
||||
}
|
||||
|
||||
type Chart struct {
|
||||
Name string `json:"name,omitempty"`
|
||||
RepoURL string `json:"repoURL,omitempty"`
|
||||
Version string `json:"version,omitempty"`
|
||||
}
|
||||
|
||||
type ThickCharts struct {
|
||||
*metav1.TypeMeta `json:",inline"`
|
||||
metav1.ObjectMeta `json:"metadata,omitempty"`
|
||||
|
||||
Spec ThickChartSpec `json:"spec,omitempty"`
|
||||
}
|
||||
|
||||
type ThickChartSpec struct {
|
||||
Charts []ThickChart `json:"charts,omitempty"`
|
||||
}
|
||||
|
||||
type ThickChart struct {
|
||||
Chart `json:",inline,omitempty"`
|
||||
ExtraImages []ChartImage `json:"extraImages,omitempty"`
|
||||
}
|
||||
|
||||
type ChartImage struct {
|
||||
Reference string `json:"ref"`
|
||||
}
|
||||
@@ -1,18 +0,0 @@
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
)
|
||||
|
||||
const (
|
||||
Version = "v1alpha1"
|
||||
ContentGroup = "content.hauler.cattle.io"
|
||||
CollectionGroup = "collection.hauler.cattle.io"
|
||||
)
|
||||
|
||||
var (
|
||||
ContentGroupVersion = schema.GroupVersion{Group: ContentGroup, Version: Version}
|
||||
// SchemeBuilder = &scheme.Builder{GroupVersion: ContentGroupVersion}
|
||||
|
||||
CollectionGroupVersion = schema.GroupVersion{Group: CollectionGroup, Version: Version}
|
||||
)
|
||||
@@ -1,27 +0,0 @@
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
const ImagesContentKind = "Images"
|
||||
|
||||
type Images struct {
|
||||
*metav1.TypeMeta `json:",inline"`
|
||||
metav1.ObjectMeta `json:"metadata,omitempty"`
|
||||
|
||||
Spec ImageSpec `json:"spec,omitempty"`
|
||||
}
|
||||
|
||||
type ImageSpec struct {
|
||||
Images []Image `json:"images,omitempty"`
|
||||
}
|
||||
|
||||
type Image struct {
|
||||
// Name is the full location for the image, can be referenced by tags or digests
|
||||
Name string `json:"name"`
|
||||
|
||||
// Path is the path to the cosign public key used for verifying image signatures
|
||||
//Key string `json:"key,omitempty"`
|
||||
Key string `json:"key"`
|
||||
}
|
||||
@@ -1,30 +0,0 @@
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
const (
|
||||
ImageTxtsContentKind = "ImageTxts"
|
||||
)
|
||||
|
||||
type ImageTxts struct {
|
||||
*metav1.TypeMeta `json:",inline"`
|
||||
metav1.ObjectMeta `json:"metadata,omitempty"`
|
||||
|
||||
Spec ImageTxtsSpec `json:"spec,omitempty"`
|
||||
}
|
||||
|
||||
type ImageTxtsSpec struct {
|
||||
ImageTxts []ImageTxt `json:"imageTxts,omitempty"`
|
||||
}
|
||||
|
||||
type ImageTxt struct {
|
||||
Ref string `json:"ref,omitempty"`
|
||||
Sources ImageTxtSources `json:"sources,omitempty"`
|
||||
}
|
||||
|
||||
type ImageTxtSources struct {
|
||||
Include []string `json:"include,omitempty"`
|
||||
Exclude []string `json:"exclude,omitempty"`
|
||||
}
|
||||
@@ -1,17 +0,0 @@
|
||||
package v1alpha1
|
||||
|
||||
import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
const K3sCollectionKind = "K3s"
|
||||
|
||||
type K3s struct {
|
||||
*metav1.TypeMeta `json:",inline"`
|
||||
metav1.ObjectMeta `json:"metadata,omitempty"`
|
||||
|
||||
Spec K3sSpec `json:"spec,omitempty"`
|
||||
}
|
||||
|
||||
type K3sSpec struct {
|
||||
Version string `json:"version"`
|
||||
Arch string `json:"arch"`
|
||||
}
|
||||
104
pkg/archives/archiver.go
Normal file
104
pkg/archives/archiver.go
Normal file
@@ -0,0 +1,104 @@
|
||||
package archives
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/mholt/archives"
|
||||
"hauler.dev/go/hauler/pkg/log"
|
||||
)
|
||||
|
||||
// maps to handle compression types
|
||||
var CompressionMap = map[string]archives.Compression{
|
||||
"gz": archives.Gz{},
|
||||
"bz2": archives.Bz2{},
|
||||
"xz": archives.Xz{},
|
||||
"zst": archives.Zstd{},
|
||||
"lz4": archives.Lz4{},
|
||||
"br": archives.Brotli{},
|
||||
}
|
||||
|
||||
// maps to handle archival types
|
||||
var ArchivalMap = map[string]archives.Archival{
|
||||
"tar": archives.Tar{},
|
||||
"zip": archives.Zip{},
|
||||
}
|
||||
|
||||
// check if a path exists
|
||||
func isExist(path string) bool {
|
||||
_, statErr := os.Stat(path)
|
||||
return !os.IsNotExist(statErr)
|
||||
}
|
||||
|
||||
// archives the files in a directory
|
||||
// dir: the directory to Archive
|
||||
// outfile: the output file
|
||||
// compression: the compression to use (gzip, bzip2, etc.)
|
||||
// archival: the archival to use (tar, zip, etc.)
|
||||
func Archive(ctx context.Context, dir, outfile string, compression archives.Compression, archival archives.Archival) error {
|
||||
l := log.FromContext(ctx)
|
||||
l.Debugf("starting the archival process for [%s]", dir)
|
||||
|
||||
// remove outfile
|
||||
l.Debugf("removing existing output file: [%s]", outfile)
|
||||
if err := os.RemoveAll(outfile); err != nil {
|
||||
errMsg := fmt.Errorf("failed to remove existing output file [%s]: %w", outfile, err)
|
||||
l.Debugf(errMsg.Error())
|
||||
return errMsg
|
||||
}
|
||||
|
||||
if !isExist(dir) {
|
||||
errMsg := fmt.Errorf("directory [%s] does not exist, cannot proceed with archival", dir)
|
||||
l.Debugf(errMsg.Error())
|
||||
return errMsg
|
||||
}
|
||||
|
||||
// map files on disk to their paths in the archive
|
||||
l.Debugf("mapping files in directory [%s]", dir)
|
||||
archiveDirName := filepath.Base(filepath.Clean(dir))
|
||||
if dir == "." {
|
||||
archiveDirName = ""
|
||||
}
|
||||
files, err := archives.FilesFromDisk(context.Background(), nil, map[string]string{
|
||||
dir: archiveDirName,
|
||||
})
|
||||
if err != nil {
|
||||
errMsg := fmt.Errorf("error mapping files from directory [%s]: %w", dir, err)
|
||||
l.Debugf(errMsg.Error())
|
||||
return errMsg
|
||||
}
|
||||
l.Debugf("successfully mapped files for directory [%s]", dir)
|
||||
|
||||
// create the output file we'll write to
|
||||
l.Debugf("creating output file [%s]", outfile)
|
||||
outf, err := os.Create(outfile)
|
||||
if err != nil {
|
||||
errMsg := fmt.Errorf("error creating output file [%s]: %w", outfile, err)
|
||||
l.Debugf(errMsg.Error())
|
||||
return errMsg
|
||||
}
|
||||
defer func() {
|
||||
l.Debugf("closing output file [%s]", outfile)
|
||||
outf.Close()
|
||||
}()
|
||||
|
||||
// define the archive format
|
||||
l.Debugf("defining the archive format: [%T]/[%T]", archival, compression)
|
||||
format := archives.CompressedArchive{
|
||||
Compression: compression,
|
||||
Archival: archival,
|
||||
}
|
||||
|
||||
// create the archive
|
||||
l.Debugf("starting archive for [%s]", outfile)
|
||||
err = format.Archive(context.Background(), outf, files)
|
||||
if err != nil {
|
||||
errMsg := fmt.Errorf("error during archive creation for output file [%s]: %w", outfile, err)
|
||||
l.Debugf(errMsg.Error())
|
||||
return errMsg
|
||||
}
|
||||
l.Debugf("archive created successfully [%s]", outfile)
|
||||
return nil
|
||||
}
|
||||
164
pkg/archives/archives_test.go
Normal file
164
pkg/archives/archives_test.go
Normal file
@@ -0,0 +1,164 @@
|
||||
package archives
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/mholt/archives"
|
||||
"github.com/rs/zerolog"
|
||||
)
|
||||
|
||||
func testContext(t *testing.T) context.Context {
|
||||
t.Helper()
|
||||
l := zerolog.New(io.Discard)
|
||||
return l.WithContext(context.Background())
|
||||
}
|
||||
|
||||
func TestArchive_RoundTrip(t *testing.T) {
|
||||
ctx := testContext(t)
|
||||
|
||||
srcDir := t.TempDir()
|
||||
files := map[string]string{
|
||||
"file1.txt": "hello world",
|
||||
"subdir/file2.txt": "nested content",
|
||||
"subdir/file3.json": `{"key":"value"}`,
|
||||
}
|
||||
for relPath, content := range files {
|
||||
full := filepath.Join(srcDir, relPath)
|
||||
if err := os.MkdirAll(filepath.Dir(full), 0o755); err != nil {
|
||||
t.Fatalf("create parent dir for %s: %v", relPath, err)
|
||||
}
|
||||
if err := os.WriteFile(full, []byte(content), 0o644); err != nil {
|
||||
t.Fatalf("write %s: %v", relPath, err)
|
||||
}
|
||||
}
|
||||
|
||||
outFile := filepath.Join(t.TempDir(), "test.tar.zst")
|
||||
if err := Archive(ctx, srcDir, outFile, archives.Zstd{}, archives.Tar{}); err != nil {
|
||||
t.Fatalf("Archive() error: %v", err)
|
||||
}
|
||||
|
||||
info, err := os.Stat(outFile)
|
||||
if err != nil {
|
||||
t.Fatalf("archive file missing: %v", err)
|
||||
}
|
||||
if info.Size() == 0 {
|
||||
t.Fatal("archive file is empty")
|
||||
}
|
||||
|
||||
dstDir := t.TempDir()
|
||||
if err := Unarchive(ctx, outFile, dstDir); err != nil {
|
||||
t.Fatalf("Unarchive() error: %v", err)
|
||||
}
|
||||
|
||||
// Archive maps files under the source directory's base name.
|
||||
baseName := filepath.Base(srcDir)
|
||||
for relPath, expectedContent := range files {
|
||||
full := filepath.Join(dstDir, baseName, relPath)
|
||||
data, err := os.ReadFile(full)
|
||||
if err != nil {
|
||||
t.Errorf("read extracted file %s: %v", relPath, err)
|
||||
continue
|
||||
}
|
||||
if string(data) != expectedContent {
|
||||
t.Errorf("content mismatch for %s: got %q, want %q", relPath, string(data), expectedContent)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestArchive_NonExistentDir(t *testing.T) {
|
||||
ctx := testContext(t)
|
||||
nonExistent := filepath.Join(t.TempDir(), "does-not-exist")
|
||||
outFile := filepath.Join(t.TempDir(), "out.tar.zst")
|
||||
if err := Archive(ctx, nonExistent, outFile, archives.Zstd{}, archives.Tar{}); err == nil {
|
||||
t.Fatal("Archive() should return an error for a non-existent source directory")
|
||||
}
|
||||
}
|
||||
|
||||
func TestUnarchive_ExistingHaul(t *testing.T) {
|
||||
ctx := testContext(t)
|
||||
|
||||
// testdata/ is two levels up from pkg/archives/
|
||||
haulPath := filepath.Join("..", "..", "testdata", "haul.tar.zst")
|
||||
if _, err := os.Stat(haulPath); err != nil {
|
||||
t.Skipf("testdata/haul.tar.zst not found at %s: %v", haulPath, err)
|
||||
}
|
||||
|
||||
dstDir := t.TempDir()
|
||||
if err := Unarchive(ctx, haulPath, dstDir); err != nil {
|
||||
t.Fatalf("Unarchive() error: %v", err)
|
||||
}
|
||||
|
||||
var indexPath string
|
||||
if err := filepath.Walk(dstDir, func(path string, info os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if info.Name() == "index.json" {
|
||||
indexPath = path
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
t.Fatalf("walk extracted dir: %v", err)
|
||||
}
|
||||
if indexPath == "" {
|
||||
t.Fatal("index.json not found in extracted haul archive")
|
||||
}
|
||||
|
||||
data, err := os.ReadFile(indexPath)
|
||||
if err != nil {
|
||||
t.Fatalf("read index.json: %v", err)
|
||||
}
|
||||
if !json.Valid(data) {
|
||||
t.Fatal("index.json is not valid JSON")
|
||||
}
|
||||
}
|
||||
|
||||
func TestSecurePath(t *testing.T) {
|
||||
basePath := "/tmp/extract"
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
relativePath string
|
||||
wantResult string
|
||||
}{
|
||||
{
|
||||
name: "normal relative path",
|
||||
relativePath: "subdir/file.txt",
|
||||
wantResult: "/tmp/extract/subdir/file.txt",
|
||||
},
|
||||
{
|
||||
name: "simple filename",
|
||||
relativePath: "readme.txt",
|
||||
wantResult: "/tmp/extract/readme.txt",
|
||||
},
|
||||
// Path traversal attempts are sanitized (not rejected): "/../../../etc/passwd"
|
||||
// cleans to "/etc/passwd", strips leading "/" → "etc/passwd", joined → base/etc/passwd.
|
||||
{
|
||||
name: "path traversal is sanitized to safe path",
|
||||
relativePath: "../../../etc/passwd",
|
||||
wantResult: "/tmp/extract/etc/passwd",
|
||||
},
|
||||
{
|
||||
name: "deeply nested traversal is sanitized",
|
||||
relativePath: "a/b/../../../../etc/shadow",
|
||||
wantResult: "/tmp/extract/etc/shadow",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
result, err := securePath(basePath, tt.relativePath)
|
||||
if err != nil {
|
||||
t.Fatalf("securePath(%q, %q) unexpected error: %v", basePath, tt.relativePath, err)
|
||||
}
|
||||
if result != tt.wantResult {
|
||||
t.Errorf("securePath(%q, %q) = %q, want %q", basePath, tt.relativePath, result, tt.wantResult)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
158
pkg/archives/unarchiver.go
Normal file
158
pkg/archives/unarchiver.go
Normal file
@@ -0,0 +1,158 @@
|
||||
package archives
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/mholt/archives"
|
||||
"hauler.dev/go/hauler/pkg/log"
|
||||
)
|
||||
|
||||
const (
|
||||
dirPermissions = 0o700 // default directory permissions
|
||||
filePermissions = 0o600 // default file permissions
|
||||
)
|
||||
|
||||
// ensures the path is safely relative to the target directory
|
||||
func securePath(basePath, relativePath string) (string, error) {
|
||||
relativePath = filepath.Clean("/" + relativePath)
|
||||
relativePath = strings.TrimPrefix(relativePath, string(os.PathSeparator))
|
||||
|
||||
dstPath := filepath.Join(basePath, relativePath)
|
||||
|
||||
if !strings.HasPrefix(filepath.Clean(dstPath)+string(os.PathSeparator), filepath.Clean(basePath)+string(os.PathSeparator)) {
|
||||
return "", fmt.Errorf("illegal file path: %s", dstPath)
|
||||
}
|
||||
return dstPath, nil
|
||||
}
|
||||
|
||||
// creates a directory with specified permissions
|
||||
func createDirWithPermissions(ctx context.Context, path string, mode os.FileMode) error {
|
||||
l := log.FromContext(ctx)
|
||||
l.Debugf("creating directory [%s]", path)
|
||||
if err := os.MkdirAll(path, mode); err != nil {
|
||||
return fmt.Errorf("failed to mkdir: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// sets permissions to a file or directory
|
||||
func setPermissions(path string, mode os.FileMode) error {
|
||||
if err := os.Chmod(path, mode); err != nil {
|
||||
return fmt.Errorf("failed to chmod: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// handles the extraction of a file from the archive.
|
||||
func handleFile(ctx context.Context, f archives.FileInfo, dst string) error {
|
||||
l := log.FromContext(ctx)
|
||||
l.Debugf("handling file [%s]", f.NameInArchive)
|
||||
|
||||
// validate and construct the destination path
|
||||
dstPath, pathErr := securePath(dst, f.NameInArchive)
|
||||
if pathErr != nil {
|
||||
return pathErr
|
||||
}
|
||||
|
||||
// ensure the parent directory exists
|
||||
parentDir := filepath.Dir(dstPath)
|
||||
if dirErr := createDirWithPermissions(ctx, parentDir, dirPermissions); dirErr != nil {
|
||||
return dirErr
|
||||
}
|
||||
|
||||
// handle directories
|
||||
if f.IsDir() {
|
||||
// create the directory with permissions from the archive
|
||||
if dirErr := createDirWithPermissions(ctx, dstPath, f.Mode()); dirErr != nil {
|
||||
return fmt.Errorf("failed to create directory: %w", dirErr)
|
||||
}
|
||||
l.Debugf("successfully created directory [%s]", dstPath)
|
||||
return nil
|
||||
}
|
||||
|
||||
// ignore symlinks (or hardlinks)
|
||||
if f.LinkTarget != "" {
|
||||
l.Debugf("skipping symlink [%s] to [%s]", dstPath, f.LinkTarget)
|
||||
return nil
|
||||
}
|
||||
|
||||
// check and handle parent directory permissions
|
||||
originalMode, statErr := os.Stat(parentDir)
|
||||
if statErr != nil {
|
||||
return fmt.Errorf("failed to stat parent directory: %w", statErr)
|
||||
}
|
||||
|
||||
// if parent directory is read only, temporarily make it writable
|
||||
if originalMode.Mode().Perm()&0o200 == 0 {
|
||||
l.Debugf("parent directory is read only... temporarily making it writable [%s]", parentDir)
|
||||
if chmodErr := os.Chmod(parentDir, originalMode.Mode()|0o200); chmodErr != nil {
|
||||
return fmt.Errorf("failed to chmod parent directory: %w", chmodErr)
|
||||
}
|
||||
defer func() {
|
||||
// restore the original permissions after writing
|
||||
if chmodErr := os.Chmod(parentDir, originalMode.Mode()); chmodErr != nil {
|
||||
l.Debugf("failed to restore original permissions for [%s]: %v", parentDir, chmodErr)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// handle regular files
|
||||
reader, openErr := f.Open()
|
||||
if openErr != nil {
|
||||
return fmt.Errorf("failed to open file: %w", openErr)
|
||||
}
|
||||
defer reader.Close()
|
||||
|
||||
dstFile, createErr := os.OpenFile(dstPath, os.O_CREATE|os.O_WRONLY, f.Mode())
|
||||
if createErr != nil {
|
||||
return fmt.Errorf("failed to create file: %w", createErr)
|
||||
}
|
||||
defer dstFile.Close()
|
||||
|
||||
if _, copyErr := io.Copy(dstFile, reader); copyErr != nil {
|
||||
return fmt.Errorf("failed to copy: %w", copyErr)
|
||||
}
|
||||
l.Debugf("successfully extracted file [%s]", dstPath)
|
||||
return nil
|
||||
}
|
||||
|
||||
// unarchives a tarball to a directory, symlinks, and hardlinks are ignored
|
||||
func Unarchive(ctx context.Context, tarball, dst string) error {
|
||||
l := log.FromContext(ctx)
|
||||
l.Debugf("unarchiving temporary archive [%s] to temporary store [%s]", tarball, dst)
|
||||
archiveFile, openErr := os.Open(tarball)
|
||||
if openErr != nil {
|
||||
return fmt.Errorf("failed to open tarball %s: %w", tarball, openErr)
|
||||
}
|
||||
defer archiveFile.Close()
|
||||
|
||||
format, input, identifyErr := archives.Identify(context.Background(), tarball, archiveFile)
|
||||
if identifyErr != nil {
|
||||
return fmt.Errorf("failed to identify format: %w", identifyErr)
|
||||
}
|
||||
|
||||
extractor, ok := format.(archives.Extractor)
|
||||
if !ok {
|
||||
return fmt.Errorf("unsupported format for extraction")
|
||||
}
|
||||
|
||||
if dirErr := createDirWithPermissions(ctx, dst, dirPermissions); dirErr != nil {
|
||||
return fmt.Errorf("failed to create destination directory: %w", dirErr)
|
||||
}
|
||||
|
||||
handler := func(ctx context.Context, f archives.FileInfo) error {
|
||||
return handleFile(ctx, f, dst)
|
||||
}
|
||||
|
||||
if extractErr := extractor.Extract(context.Background(), input, handler); extractErr != nil {
|
||||
return fmt.Errorf("failed to extract: %w", extractErr)
|
||||
}
|
||||
|
||||
l.Infof("unarchiving completed successfully")
|
||||
return nil
|
||||
}
|
||||
@@ -8,7 +8,7 @@ import (
|
||||
"github.com/google/go-containerregistry/pkg/v1/partial"
|
||||
"github.com/google/go-containerregistry/pkg/v1/types"
|
||||
|
||||
"github.com/rancherfederal/hauler/pkg/consts"
|
||||
"hauler.dev/go/hauler/pkg/consts"
|
||||
)
|
||||
|
||||
var _ partial.Describable = (*marshallableConfig)(nil)
|
||||
|
||||
@@ -6,10 +6,11 @@ import (
|
||||
gv1 "github.com/google/go-containerregistry/pkg/v1"
|
||||
"github.com/google/go-containerregistry/pkg/v1/partial"
|
||||
gtypes "github.com/google/go-containerregistry/pkg/v1/types"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
|
||||
"github.com/rancherfederal/hauler/pkg/artifacts"
|
||||
"github.com/rancherfederal/hauler/pkg/artifacts/file/getter"
|
||||
"github.com/rancherfederal/hauler/pkg/consts"
|
||||
"hauler.dev/go/hauler/pkg/artifacts"
|
||||
"hauler.dev/go/hauler/pkg/consts"
|
||||
"hauler.dev/go/hauler/pkg/getter"
|
||||
)
|
||||
|
||||
// interface guard
|
||||
@@ -90,6 +91,13 @@ func (f *File) compute() error {
|
||||
return err
|
||||
}
|
||||
|
||||
// Manually preserve the Title annotation from the layer
|
||||
// The layer was created with this annotation in getter.LayerFrom
|
||||
if layer.Annotations == nil {
|
||||
layer.Annotations = make(map[string]string)
|
||||
}
|
||||
layer.Annotations[ocispec.AnnotationTitle] = f.client.Name(f.Path)
|
||||
|
||||
cfg := f.client.Config(f.Path)
|
||||
if cfg == nil {
|
||||
cfg = f.client.Config(f.Path)
|
||||
|
||||
@@ -13,9 +13,9 @@ import (
|
||||
|
||||
"github.com/spf13/afero"
|
||||
|
||||
"github.com/rancherfederal/hauler/pkg/artifacts/file"
|
||||
"github.com/rancherfederal/hauler/pkg/artifacts/file/getter"
|
||||
"github.com/rancherfederal/hauler/pkg/consts"
|
||||
"hauler.dev/go/hauler/pkg/artifacts/file"
|
||||
"hauler.dev/go/hauler/pkg/consts"
|
||||
"hauler.dev/go/hauler/pkg/getter"
|
||||
)
|
||||
|
||||
var (
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
package file
|
||||
|
||||
import (
|
||||
"github.com/rancherfederal/hauler/pkg/artifacts"
|
||||
"github.com/rancherfederal/hauler/pkg/artifacts/file/getter"
|
||||
"hauler.dev/go/hauler/pkg/artifacts"
|
||||
"hauler.dev/go/hauler/pkg/getter"
|
||||
)
|
||||
|
||||
type Option func(*File)
|
||||
|
||||
@@ -1,12 +1,13 @@
|
||||
package image
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/google/go-containerregistry/pkg/authn"
|
||||
gname "github.com/google/go-containerregistry/pkg/name"
|
||||
gv1 "github.com/google/go-containerregistry/pkg/v1"
|
||||
"github.com/google/go-containerregistry/pkg/v1/remote"
|
||||
|
||||
"github.com/rancherfederal/hauler/pkg/artifacts"
|
||||
"hauler.dev/go/hauler/pkg/artifacts"
|
||||
)
|
||||
|
||||
var _ artifacts.OCI = (*Image)(nil)
|
||||
@@ -51,3 +52,29 @@ func NewImage(name string, opts ...remote.Option) (*Image, error) {
|
||||
Image: img,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func IsMultiArchImage(name string, opts ...remote.Option) (bool, error) {
|
||||
ref, err := gname.ParseReference(name)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("parsing reference %q: %v", name, err)
|
||||
}
|
||||
|
||||
defaultOpts := []remote.Option{
|
||||
remote.WithAuthFromKeychain(authn.DefaultKeychain),
|
||||
}
|
||||
opts = append(opts, defaultOpts...)
|
||||
|
||||
desc, err := remote.Get(ref, opts...)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("getting image %q: %v", name, err)
|
||||
}
|
||||
|
||||
_, err = desc.ImageIndex()
|
||||
if err != nil {
|
||||
// if the descriptor could not be converted to an image index... it's not a multi-arch image
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// if the descriptor could be converted to an image index... it's a multi-arch image
|
||||
return true, nil
|
||||
}
|
||||
|
||||
@@ -6,8 +6,8 @@ import (
|
||||
"github.com/google/go-containerregistry/pkg/v1/static"
|
||||
"github.com/google/go-containerregistry/pkg/v1/types"
|
||||
|
||||
"github.com/rancherfederal/hauler/pkg/artifacts"
|
||||
"github.com/rancherfederal/hauler/pkg/consts"
|
||||
"hauler.dev/go/hauler/pkg/artifacts"
|
||||
"hauler.dev/go/hauler/pkg/consts"
|
||||
)
|
||||
|
||||
var _ artifacts.OCI = (*Memory)(nil)
|
||||
|
||||
@@ -7,7 +7,7 @@ import (
|
||||
v1 "github.com/google/go-containerregistry/pkg/v1"
|
||||
"github.com/opencontainers/go-digest"
|
||||
|
||||
"github.com/rancherfederal/hauler/pkg/artifacts/memory"
|
||||
"hauler.dev/go/hauler/pkg/artifacts/memory"
|
||||
)
|
||||
|
||||
func TestMemory_Layers(t *testing.T) {
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
package memory
|
||||
|
||||
import "github.com/rancherfederal/hauler/pkg/artifacts"
|
||||
import "hauler.dev/go/hauler/pkg/artifacts"
|
||||
|
||||
type Option func(*Memory)
|
||||
|
||||
|
||||
@@ -3,8 +3,9 @@ package artifacts
|
||||
import "github.com/google/go-containerregistry/pkg/v1"
|
||||
|
||||
// OCI is the bare minimum we need to represent an artifact in an oci layout
|
||||
// At a high level, it is not constrained by an Image's config, manifests, and layer ordinality
|
||||
// This specific implementation fully encapsulates v1.Layer's within a more generic form
|
||||
//
|
||||
// At a high level, it is not constrained by an Image's config, manifests, and layer ordinality
|
||||
// This specific implementation fully encapsulates v1.Layer's within a more generic form
|
||||
type OCI interface {
|
||||
MediaType() string
|
||||
|
||||
|
||||
@@ -1,107 +0,0 @@
|
||||
package chart
|
||||
|
||||
import (
|
||||
"github.com/rancherfederal/hauler/pkg/artifacts"
|
||||
"github.com/rancherfederal/hauler/pkg/artifacts/image"
|
||||
"helm.sh/helm/v3/pkg/action"
|
||||
|
||||
"github.com/rancherfederal/hauler/pkg/apis/hauler.cattle.io/v1alpha1"
|
||||
"github.com/rancherfederal/hauler/pkg/content/chart"
|
||||
"github.com/rancherfederal/hauler/pkg/reference"
|
||||
)
|
||||
|
||||
var _ artifacts.OCICollection = (*tchart)(nil)
|
||||
|
||||
// tchart is a thick chart that includes all the dependent images as well as the chart itself
|
||||
type tchart struct {
|
||||
chart *chart.Chart
|
||||
config v1alpha1.ThickChart
|
||||
|
||||
computed bool
|
||||
contents map[string]artifacts.OCI
|
||||
}
|
||||
|
||||
func NewThickChart(cfg v1alpha1.ThickChart, opts *action.ChartPathOptions) (artifacts.OCICollection, error) {
|
||||
o, err := chart.NewChart(cfg.Chart.Name, opts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &tchart{
|
||||
chart: o,
|
||||
config: cfg,
|
||||
contents: make(map[string]artifacts.OCI),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (c *tchart) Contents() (map[string]artifacts.OCI, error) {
|
||||
if err := c.compute(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return c.contents, nil
|
||||
}
|
||||
|
||||
func (c *tchart) compute() error {
|
||||
if c.computed {
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := c.dependentImages(); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := c.chartContents(); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := c.extraImages(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
c.computed = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *tchart) chartContents() error {
|
||||
ch, err := c.chart.Load()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ref, err := reference.NewTagged(ch.Name(), ch.Metadata.Version)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
c.contents[ref.Name()] = c.chart
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *tchart) dependentImages() error {
|
||||
ch, err := c.chart.Load()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
imgs, err := ImagesInChart(ch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, img := range imgs.Spec.Images {
|
||||
i, err := image.NewImage(img.Name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
c.contents[img.Name] = i
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *tchart) extraImages() error {
|
||||
for _, img := range c.config.ExtraImages {
|
||||
i, err := image.NewImage(img.Reference)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
c.contents[img.Reference] = i
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -1,129 +0,0 @@
|
||||
package chart
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"io"
|
||||
"strings"
|
||||
|
||||
"helm.sh/helm/v3/pkg/action"
|
||||
helmchart "helm.sh/helm/v3/pkg/chart"
|
||||
"helm.sh/helm/v3/pkg/chartutil"
|
||||
"helm.sh/helm/v3/pkg/kube/fake"
|
||||
"helm.sh/helm/v3/pkg/storage"
|
||||
"helm.sh/helm/v3/pkg/storage/driver"
|
||||
"k8s.io/apimachinery/pkg/util/yaml"
|
||||
"k8s.io/client-go/util/jsonpath"
|
||||
|
||||
"github.com/rancherfederal/hauler/pkg/apis/hauler.cattle.io/v1alpha1"
|
||||
)
|
||||
|
||||
var defaultKnownImagePaths = []string{
|
||||
// Deployments & DaemonSets
|
||||
"{.spec.template.spec.initContainers[*].image}",
|
||||
"{.spec.template.spec.containers[*].image}",
|
||||
|
||||
// Pods
|
||||
"{.spec.initContainers[*].image}",
|
||||
"{.spec.containers[*].image}",
|
||||
}
|
||||
|
||||
// ImagesInChart will render a chart and identify all dependent images from it
|
||||
func ImagesInChart(c *helmchart.Chart) (v1alpha1.Images, error) {
|
||||
docs, err := template(c)
|
||||
if err != nil {
|
||||
return v1alpha1.Images{}, err
|
||||
}
|
||||
|
||||
var images []v1alpha1.Image
|
||||
reader := yaml.NewYAMLReader(bufio.NewReader(strings.NewReader(docs)))
|
||||
for {
|
||||
raw, err := reader.Read()
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
return v1alpha1.Images{}, err
|
||||
}
|
||||
|
||||
found := find(raw, defaultKnownImagePaths...)
|
||||
for _, f := range found {
|
||||
images = append(images, v1alpha1.Image{Name: f})
|
||||
}
|
||||
}
|
||||
|
||||
ims := v1alpha1.Images{
|
||||
Spec: v1alpha1.ImageSpec{
|
||||
Images: images,
|
||||
},
|
||||
}
|
||||
return ims, nil
|
||||
}
|
||||
|
||||
func template(c *helmchart.Chart) (string, error) {
|
||||
s := storage.Init(driver.NewMemory())
|
||||
|
||||
templateCfg := &action.Configuration{
|
||||
RESTClientGetter: nil,
|
||||
Releases: s,
|
||||
KubeClient: &fake.PrintingKubeClient{Out: io.Discard},
|
||||
Capabilities: chartutil.DefaultCapabilities,
|
||||
Log: func(format string, v ...interface{}) {},
|
||||
}
|
||||
|
||||
// TODO: Do we need values if we're claiming this is best effort image detection?
|
||||
// Justification being: if users are relying on us to get images from their values, they could just add images to the []ImagesInChart spec of the Store api
|
||||
vals := make(map[string]interface{})
|
||||
|
||||
client := action.NewInstall(templateCfg)
|
||||
client.ReleaseName = "dry"
|
||||
client.DryRun = true
|
||||
client.Replace = true
|
||||
client.ClientOnly = true
|
||||
client.IncludeCRDs = true
|
||||
|
||||
release, err := client.Run(c, vals)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return release.Manifest, nil
|
||||
}
|
||||
|
||||
func find(data []byte, paths ...string) []string {
|
||||
var (
|
||||
pathMatches []string
|
||||
obj interface{}
|
||||
)
|
||||
|
||||
if err := yaml.Unmarshal(data, &obj); err != nil {
|
||||
return nil
|
||||
}
|
||||
j := jsonpath.New("")
|
||||
j.AllowMissingKeys(true)
|
||||
|
||||
for _, p := range paths {
|
||||
r, err := parseJSONPath(obj, j, p)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
pathMatches = append(pathMatches, r...)
|
||||
}
|
||||
return pathMatches
|
||||
}
|
||||
|
||||
func parseJSONPath(data interface{}, parser *jsonpath.JSONPath, template string) ([]string, error) {
|
||||
buf := new(bytes.Buffer)
|
||||
if err := parser.Parse(template); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := parser.Execute(buf, data); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
f := func(s rune) bool { return s == ' ' }
|
||||
r := strings.FieldsFunc(buf.String(), f)
|
||||
return r, nil
|
||||
}
|
||||
@@ -1,232 +0,0 @@
|
||||
package imagetxt
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/rancherfederal/hauler/pkg/log"
|
||||
|
||||
"github.com/google/go-containerregistry/pkg/name"
|
||||
artifact "github.com/rancherfederal/hauler/pkg/artifacts"
|
||||
"github.com/rancherfederal/hauler/pkg/artifacts/file/getter"
|
||||
"github.com/rancherfederal/hauler/pkg/artifacts/image"
|
||||
)
|
||||
|
||||
type ImageTxt struct {
|
||||
Ref string
|
||||
IncludeSources map[string]bool
|
||||
ExcludeSources map[string]bool
|
||||
|
||||
lock *sync.Mutex
|
||||
client *getter.Client
|
||||
computed bool
|
||||
contents map[string]artifact.OCI
|
||||
}
|
||||
|
||||
var _ artifact.OCICollection = (*ImageTxt)(nil)
|
||||
|
||||
type Option interface {
|
||||
Apply(*ImageTxt) error
|
||||
}
|
||||
|
||||
type withIncludeSources []string
|
||||
|
||||
func (o withIncludeSources) Apply(it *ImageTxt) error {
|
||||
if it.IncludeSources == nil {
|
||||
it.IncludeSources = make(map[string]bool)
|
||||
}
|
||||
for _, s := range o {
|
||||
it.IncludeSources[s] = true
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func WithIncludeSources(include ...string) Option {
|
||||
return withIncludeSources(include)
|
||||
}
|
||||
|
||||
type withExcludeSources []string
|
||||
|
||||
func (o withExcludeSources) Apply(it *ImageTxt) error {
|
||||
if it.ExcludeSources == nil {
|
||||
it.ExcludeSources = make(map[string]bool)
|
||||
}
|
||||
for _, s := range o {
|
||||
it.ExcludeSources[s] = true
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func WithExcludeSources(exclude ...string) Option {
|
||||
return withExcludeSources(exclude)
|
||||
}
|
||||
|
||||
func New(ref string, opts ...Option) (*ImageTxt, error) {
|
||||
it := &ImageTxt{
|
||||
Ref: ref,
|
||||
|
||||
client: getter.NewClient(getter.ClientOptions{}),
|
||||
lock: &sync.Mutex{},
|
||||
}
|
||||
|
||||
for i, o := range opts {
|
||||
if err := o.Apply(it); err != nil {
|
||||
return nil, fmt.Errorf("invalid option %d: %v", i, err)
|
||||
}
|
||||
}
|
||||
|
||||
return it, nil
|
||||
}
|
||||
|
||||
func (it *ImageTxt) Contents() (map[string]artifact.OCI, error) {
|
||||
it.lock.Lock()
|
||||
defer it.lock.Unlock()
|
||||
if !it.computed {
|
||||
if err := it.compute(); err != nil {
|
||||
return nil, fmt.Errorf("compute OCI layout: %v", err)
|
||||
}
|
||||
it.computed = true
|
||||
}
|
||||
return it.contents, nil
|
||||
}
|
||||
|
||||
func (it *ImageTxt) compute() error {
|
||||
// TODO - pass in logger from context
|
||||
l := log.NewLogger(os.Stdout)
|
||||
|
||||
it.contents = make(map[string]artifact.OCI)
|
||||
|
||||
ctx := context.TODO()
|
||||
|
||||
rc, err := it.client.ContentFrom(ctx, it.Ref)
|
||||
if err != nil {
|
||||
return fmt.Errorf("fetch image.txt ref %s: %w", it.Ref, err)
|
||||
}
|
||||
defer rc.Close()
|
||||
|
||||
entries, err := splitImagesTxt(rc)
|
||||
if err != nil {
|
||||
return fmt.Errorf("parse image.txt ref %s: %v", it.Ref, err)
|
||||
}
|
||||
|
||||
foundSources := make(map[string]bool)
|
||||
for _, e := range entries {
|
||||
for s := range e.Sources {
|
||||
foundSources[s] = true
|
||||
}
|
||||
}
|
||||
|
||||
var pullAll bool
|
||||
targetSources := make(map[string]bool)
|
||||
|
||||
if len(foundSources) == 0 || (len(it.IncludeSources) == 0 && len(it.ExcludeSources) == 0) {
|
||||
// pull all found images
|
||||
pullAll = true
|
||||
|
||||
if len(foundSources) == 0 {
|
||||
l.Infof("image txt file appears to have no sources; pulling all found images")
|
||||
if len(it.IncludeSources) != 0 || len(it.ExcludeSources) != 0 {
|
||||
l.Warnf("ImageTxt provided include or exclude sources; ignoring")
|
||||
}
|
||||
} else if len(it.IncludeSources) == 0 && len(it.ExcludeSources) == 0 {
|
||||
l.Infof("image-sources txt file not filtered; pulling all found images")
|
||||
}
|
||||
} else {
|
||||
// determine sources to pull
|
||||
if len(it.IncludeSources) != 0 && len(it.ExcludeSources) != 0 {
|
||||
l.Warnf("ImageTxt provided include and exclude sources; using only include sources")
|
||||
}
|
||||
|
||||
if len(it.IncludeSources) != 0 {
|
||||
targetSources = it.IncludeSources
|
||||
} else {
|
||||
for s := range foundSources {
|
||||
targetSources[s] = true
|
||||
}
|
||||
for s := range it.ExcludeSources {
|
||||
delete(targetSources, s)
|
||||
}
|
||||
}
|
||||
var targetSourcesArr []string
|
||||
for s := range targetSources {
|
||||
targetSourcesArr = append(targetSourcesArr, s)
|
||||
}
|
||||
l.Infof("pulling images covering sources %s", strings.Join(targetSourcesArr, ", "))
|
||||
}
|
||||
|
||||
for _, e := range entries {
|
||||
var matchesSourceFilter bool
|
||||
if pullAll {
|
||||
l.Infof("pulling image %s", e.Reference)
|
||||
} else {
|
||||
for s := range e.Sources {
|
||||
if targetSources[s] {
|
||||
matchesSourceFilter = true
|
||||
l.Infof("pulling image %s (matched source %s)", e.Reference, s)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if pullAll || matchesSourceFilter {
|
||||
curImage, err := image.NewImage(e.Reference.String())
|
||||
if err != nil {
|
||||
return fmt.Errorf("pull image %s: %v", e.Reference, err)
|
||||
}
|
||||
it.contents[e.Reference.String()] = curImage
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type imageTxtEntry struct {
|
||||
Reference name.Reference
|
||||
Sources map[string]bool
|
||||
}
|
||||
|
||||
func splitImagesTxt(r io.Reader) ([]imageTxtEntry, error) {
|
||||
var entries []imageTxtEntry
|
||||
scanner := bufio.NewScanner(r)
|
||||
for scanner.Scan() {
|
||||
curEntry := imageTxtEntry{
|
||||
Sources: make(map[string]bool),
|
||||
}
|
||||
|
||||
lineContent := scanner.Text()
|
||||
if lineContent == "" || strings.HasPrefix(lineContent, "#") {
|
||||
// skip past empty and commented lines
|
||||
continue
|
||||
}
|
||||
splitContent := strings.Split(lineContent, " ")
|
||||
if len(splitContent) > 2 {
|
||||
return nil, fmt.Errorf(
|
||||
"invalid image.txt format: must contain only an image reference and sources separated by space; invalid line: %q",
|
||||
lineContent)
|
||||
}
|
||||
|
||||
curRef, err := name.ParseReference(splitContent[0])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid reference %s: %v", splitContent[0], err)
|
||||
}
|
||||
curEntry.Reference = curRef
|
||||
|
||||
if len(splitContent) == 2 {
|
||||
for _, source := range strings.Split(splitContent[1], ",") {
|
||||
curEntry.Sources[source] = true
|
||||
}
|
||||
}
|
||||
|
||||
entries = append(entries, curEntry)
|
||||
}
|
||||
if err := scanner.Err(); err != nil {
|
||||
return nil, fmt.Errorf("scan contents: %v", err)
|
||||
}
|
||||
|
||||
return entries, nil
|
||||
}
|
||||
@@ -1,209 +0,0 @@
|
||||
package imagetxt
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/rancherfederal/hauler/pkg/artifacts"
|
||||
"github.com/rancherfederal/hauler/pkg/artifacts/image"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrRefNotFound = errors.New("ref not found")
|
||||
ErrRefNotImage = errors.New("ref is not image")
|
||||
ErrExtraRefsFound = errors.New("extra refs found in contents")
|
||||
)
|
||||
|
||||
var (
|
||||
testServer *httptest.Server
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
setup()
|
||||
code := m.Run()
|
||||
teardown()
|
||||
os.Exit(code)
|
||||
}
|
||||
|
||||
func setup() {
|
||||
dir := http.Dir("./testdata/http/")
|
||||
h := http.FileServer(dir)
|
||||
testServer = httptest.NewServer(h)
|
||||
}
|
||||
|
||||
func teardown() {
|
||||
if testServer != nil {
|
||||
testServer.Close()
|
||||
}
|
||||
}
|
||||
|
||||
type failKind string
|
||||
|
||||
const (
|
||||
failKindNew = failKind("New")
|
||||
failKindContents = failKind("Contents")
|
||||
)
|
||||
|
||||
func checkError(checkedFailKind failKind) func(*testing.T, error, bool, failKind) {
|
||||
return func(cet *testing.T, err error, testShouldFail bool, testFailKind failKind) {
|
||||
if err != nil {
|
||||
// if error should not have happened at all OR error should have happened
|
||||
// at a different point, test failed
|
||||
if !testShouldFail || testFailKind != checkedFailKind {
|
||||
cet.Fatalf("unexpected error at %s: %v", checkedFailKind, err)
|
||||
}
|
||||
// test should fail at this point, test passed
|
||||
return
|
||||
}
|
||||
// if no error occurred but error should have happened at this point, test
|
||||
// failed
|
||||
if testShouldFail && testFailKind == checkedFailKind {
|
||||
cet.Fatalf("unexpected nil error at %s", checkedFailKind)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestImageTxtCollection(t *testing.T) {
|
||||
type testEntry struct {
|
||||
Name string
|
||||
Ref string
|
||||
IncludeSources []string
|
||||
ExcludeSources []string
|
||||
ExpectedImages []string
|
||||
ShouldFail bool
|
||||
FailKind failKind
|
||||
}
|
||||
tt := []testEntry{
|
||||
{
|
||||
Name: "http ref basic",
|
||||
Ref: fmt.Sprintf("%s/images-http.txt", testServer.URL),
|
||||
ExpectedImages: []string{
|
||||
"busybox",
|
||||
"nginx:1.19",
|
||||
"rancher/hyperkube:v1.21.7-rancher1",
|
||||
"docker.io/rancher/klipper-lb:v0.3.4",
|
||||
"quay.io/jetstack/cert-manager-controller:v1.6.1",
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "http ref sources format pull all",
|
||||
Ref: fmt.Sprintf("%s/images-src-http.txt", testServer.URL),
|
||||
ExpectedImages: []string{
|
||||
"busybox",
|
||||
"nginx:1.19",
|
||||
"rancher/hyperkube:v1.21.7-rancher1",
|
||||
"docker.io/rancher/klipper-lb:v0.3.4",
|
||||
"quay.io/jetstack/cert-manager-controller:v1.6.1",
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "http ref sources format include sources A",
|
||||
Ref: fmt.Sprintf("%s/images-src-http.txt", testServer.URL),
|
||||
IncludeSources: []string{
|
||||
"core", "rke",
|
||||
},
|
||||
ExpectedImages: []string{
|
||||
"busybox",
|
||||
"nginx:1.19",
|
||||
"rancher/hyperkube:v1.21.7-rancher1",
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "http ref sources format include sources B",
|
||||
Ref: fmt.Sprintf("%s/images-src-http.txt", testServer.URL),
|
||||
IncludeSources: []string{
|
||||
"nginx", "rancher", "cert-manager",
|
||||
},
|
||||
ExpectedImages: []string{
|
||||
"nginx:1.19",
|
||||
"rancher/hyperkube:v1.21.7-rancher1",
|
||||
"docker.io/rancher/klipper-lb:v0.3.4",
|
||||
"quay.io/jetstack/cert-manager-controller:v1.6.1",
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "http ref sources format exclude sources A",
|
||||
Ref: fmt.Sprintf("%s/images-src-http.txt", testServer.URL),
|
||||
ExcludeSources: []string{
|
||||
"cert-manager",
|
||||
},
|
||||
ExpectedImages: []string{
|
||||
"busybox",
|
||||
"nginx:1.19",
|
||||
"rancher/hyperkube:v1.21.7-rancher1",
|
||||
"docker.io/rancher/klipper-lb:v0.3.4",
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "http ref sources format exclude sources B",
|
||||
Ref: fmt.Sprintf("%s/images-src-http.txt", testServer.URL),
|
||||
ExcludeSources: []string{
|
||||
"core",
|
||||
},
|
||||
ExpectedImages: []string{
|
||||
"nginx:1.19",
|
||||
"rancher/hyperkube:v1.21.7-rancher1",
|
||||
"docker.io/rancher/klipper-lb:v0.3.4",
|
||||
"quay.io/jetstack/cert-manager-controller:v1.6.1",
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "local file ref",
|
||||
Ref: "./testdata/images-file.txt",
|
||||
ExpectedImages: []string{
|
||||
"busybox",
|
||||
"nginx:1.19",
|
||||
"rancher/hyperkube:v1.21.7-rancher1",
|
||||
"docker.io/rancher/klipper-lb:v0.3.4",
|
||||
"quay.io/jetstack/cert-manager-controller:v1.6.1",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
checkErrorNew := checkError(failKindNew)
|
||||
checkErrorContents := checkError(failKindContents)
|
||||
|
||||
for _, curTest := range tt {
|
||||
t.Run(curTest.Name, func(innerT *testing.T) {
|
||||
curImageTxt, err := New(curTest.Ref,
|
||||
WithIncludeSources(curTest.IncludeSources...),
|
||||
WithExcludeSources(curTest.ExcludeSources...),
|
||||
)
|
||||
checkErrorNew(innerT, err, curTest.ShouldFail, curTest.FailKind)
|
||||
|
||||
ociContents, err := curImageTxt.Contents()
|
||||
checkErrorContents(innerT, err, curTest.ShouldFail, curTest.FailKind)
|
||||
|
||||
if err := checkImages(ociContents, curTest.ExpectedImages); err != nil {
|
||||
innerT.Fatal(err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func checkImages(content map[string]artifacts.OCI, refs []string) error {
|
||||
contentCopy := make(map[string]artifacts.OCI, len(content))
|
||||
for k, v := range content {
|
||||
contentCopy[k] = v
|
||||
}
|
||||
for _, ref := range refs {
|
||||
target, ok := content[ref]
|
||||
if !ok {
|
||||
return fmt.Errorf("ref %s: %w", ref, ErrRefNotFound)
|
||||
}
|
||||
if _, ok := target.(*image.Image); !ok {
|
||||
return fmt.Errorf("got underlying type %T: %w", target, ErrRefNotImage)
|
||||
}
|
||||
delete(contentCopy, ref)
|
||||
}
|
||||
|
||||
if len(contentCopy) != 0 {
|
||||
return ErrExtraRefsFound
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user