mirror of
https://github.com/hauler-dev/hauler.git
synced 2026-03-16 16:41:11 +00:00
Compare commits
61 Commits
v1.2.2
...
v2.0.0-dev
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
47479b1fa2 | ||
|
|
bbde34690f | ||
|
|
0dd1896191 | ||
|
|
cc6123918f | ||
|
|
995e538412 | ||
|
|
e2a59508af | ||
|
|
26b11d5abc | ||
|
|
a4b16c723d | ||
|
|
666d220d6c | ||
|
|
4ed7504264 | ||
|
|
e255eda007 | ||
|
|
16f47999b1 | ||
|
|
4c68654424 | ||
|
|
8ecd87d944 | ||
|
|
a355898171 | ||
|
|
3440b1a641 | ||
|
|
9081ac257b | ||
|
|
a01895bfff | ||
|
|
e8a5f82b7d | ||
|
|
dffcb8254c | ||
|
|
4a2b7b13a7 | ||
|
|
cf22fa8551 | ||
|
|
28432fc057 | ||
|
|
ac7d82b55f | ||
|
|
ded947d609 | ||
|
|
ff3cece87f | ||
|
|
c54065f316 | ||
|
|
382dea42a5 | ||
|
|
3c073688f3 | ||
|
|
96bab7b81f | ||
|
|
5ea9b29b8f | ||
|
|
15867e84ad | ||
|
|
c5da018450 | ||
|
|
5edc8802ee | ||
|
|
a3d62b204f | ||
|
|
d85a1b0775 | ||
|
|
ea10bc0256 | ||
|
|
1aea670588 | ||
|
|
f1a632a207 | ||
|
|
802e062f47 | ||
|
|
d227e1f18f | ||
|
|
33a9bb3f78 | ||
|
|
344c008607 | ||
|
|
09a149dab6 | ||
|
|
f7f1e2db8f | ||
|
|
0fafca87f9 | ||
|
|
38e676e934 | ||
|
|
369c85bab9 | ||
|
|
acbd1f1b6a | ||
|
|
3e44c53b75 | ||
|
|
062bb3ff2c | ||
|
|
c8b4e80371 | ||
|
|
d86957bf20 | ||
|
|
4a6fc8cec2 | ||
|
|
e089c31879 | ||
|
|
b7b599e6ed | ||
|
|
ea53002f3a | ||
|
|
4d0f779ae6 | ||
|
|
4d0b407452 | ||
|
|
3b96a95a94 | ||
|
|
f9a188259f |
35
.github/workflows/pages.yaml
vendored
35
.github/workflows/pages.yaml
vendored
@@ -1,40 +1,51 @@
|
||||
# Simple workflow for deploying static content to GitHub Pages
|
||||
name: 📋
|
||||
name: Pages Workflow
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
workflow_dispatch:
|
||||
|
||||
# Sets permissions of the GITHUB_TOKEN to allow deployment to GitHub Pages
|
||||
permissions:
|
||||
contents: read
|
||||
pages: write
|
||||
id-token: write
|
||||
|
||||
# Allow only one concurrent deployment, skipping runs queued between the run in-progress and latest queued.
|
||||
# However, do NOT cancel in-progress runs as we want to allow these production deployments to complete.
|
||||
concurrency:
|
||||
group: "pages"
|
||||
cancel-in-progress: false
|
||||
|
||||
jobs:
|
||||
# Single deploy job since we're just deploying
|
||||
deploy:
|
||||
deploy-pages:
|
||||
name: Deploy GitHub Pages
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
environment:
|
||||
name: github-pages
|
||||
url: ${{ steps.deployment.outputs.page_url }}
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Clean Up Actions Tools Cache
|
||||
run: rm -rf /opt/hostedtoolcache
|
||||
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v6
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Configure Git
|
||||
run: |
|
||||
git config user.name "github-actions[bot]"
|
||||
git config user.email "github-actions[bot]@users.noreply.github.com"
|
||||
|
||||
- name: Setup Pages
|
||||
uses: actions/configure-pages@v5
|
||||
- name: Upload artifact
|
||||
uses: actions/upload-pages-artifact@v3
|
||||
|
||||
- name: Upload Pages Artifacts
|
||||
uses: actions/upload-pages-artifact@v4
|
||||
with:
|
||||
path: './static'
|
||||
|
||||
- name: Deploy to GitHub Pages
|
||||
id: deployment
|
||||
uses: actions/deploy-pages@v4
|
||||
|
||||
8
.github/workflows/release.yaml
vendored
8
.github/workflows/release.yaml
vendored
@@ -11,9 +11,13 @@ jobs:
|
||||
name: GoReleaser Job
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 60
|
||||
|
||||
steps:
|
||||
- name: Clean Up Actions Tools Cache
|
||||
run: rm -rf /opt/hostedtoolcache
|
||||
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v6
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
@@ -23,7 +27,7 @@ jobs:
|
||||
git config user.email "github-actions[bot]@users.noreply.github.com"
|
||||
|
||||
- name: Set Up Go
|
||||
uses: actions/setup-go@v5
|
||||
uses: actions/setup-go@v6
|
||||
with:
|
||||
go-version-file: go.mod
|
||||
check-latest: true
|
||||
|
||||
41
.github/workflows/testdata.yaml
vendored
Normal file
41
.github/workflows/testdata.yaml
vendored
Normal file
@@ -0,0 +1,41 @@
|
||||
name: Refresh Hauler Testdata
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
refresh-testdata:
|
||||
name: Refresh Hauler Testdata
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
|
||||
steps:
|
||||
- name: Clean Up Actions Tools Cache
|
||||
run: rm -rf /opt/hostedtoolcache
|
||||
|
||||
- name: Checkout Repository
|
||||
uses: actions/checkout@v6
|
||||
|
||||
- name: Fetch Hauler Binary
|
||||
run: curl -sfL https://get.hauler.dev | bash
|
||||
|
||||
- name: Login to GitHub Container Registry and Docker Hub Container Registry
|
||||
run: |
|
||||
hauler login ghcr.io --username ${{ github.repository_owner }} --password ${{ secrets.GITHUB_TOKEN }}
|
||||
hauler login docker.io --username ${{ secrets.DOCKERHUB_USERNAME }} --password ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
- name: Process Images for Tests
|
||||
run: |
|
||||
hauler store add image nginx:1.25-alpine
|
||||
hauler store add image nginx:1.26-alpine
|
||||
hauler store add image busybox
|
||||
hauler store add image busybox:stable
|
||||
hauler store add image gcr.io/distroless/base
|
||||
hauler store add image gcr.io/distroless/base@sha256:7fa7445dfbebae4f4b7ab0e6ef99276e96075ae42584af6286ba080750d6dfe5
|
||||
|
||||
- name: Push Store Contents to Hauler-Dev GitHub Container Registry
|
||||
run: |
|
||||
hauler store copy registry://ghcr.io/${{ github.repository_owner }}
|
||||
|
||||
- name: Verify Hauler Store Contents
|
||||
run: hauler store info
|
||||
183
.github/workflows/tests.yaml
vendored
183
.github/workflows/tests.yaml
vendored
@@ -14,9 +14,13 @@ jobs:
|
||||
name: Unit Tests
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
|
||||
steps:
|
||||
- name: Clean Up Actions Tools Cache
|
||||
run: rm -rf /opt/hostedtoolcache
|
||||
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v6
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
@@ -26,7 +30,7 @@ jobs:
|
||||
git config user.email "github-actions[bot]@users.noreply.github.com"
|
||||
|
||||
- name: Set Up Go
|
||||
uses: actions/setup-go@v5
|
||||
uses: actions/setup-go@v6
|
||||
with:
|
||||
go-version-file: go.mod
|
||||
check-latest: true
|
||||
@@ -47,13 +51,13 @@ jobs:
|
||||
make build-all
|
||||
|
||||
- name: Upload Hauler Binaries
|
||||
uses: actions/upload-artifact@v4
|
||||
uses: actions/upload-artifact@v6
|
||||
with:
|
||||
name: hauler-binaries
|
||||
path: dist/*
|
||||
|
||||
- name: Upload Coverage Report
|
||||
uses: actions/upload-artifact@v4
|
||||
uses: actions/upload-artifact@v6
|
||||
with:
|
||||
name: coverage-report
|
||||
path: coverage.out
|
||||
@@ -63,9 +67,13 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
needs: [unit-tests]
|
||||
timeout-minutes: 30
|
||||
|
||||
steps:
|
||||
- name: Clean Up Actions Tools Cache
|
||||
run: rm -rf /opt/hostedtoolcache
|
||||
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v6
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
@@ -81,7 +89,7 @@ jobs:
|
||||
sudo apt-get install -y tree
|
||||
|
||||
- name: Download Artifacts
|
||||
uses: actions/download-artifact@v4
|
||||
uses: actions/download-artifact@v6
|
||||
with:
|
||||
name: hauler-binaries
|
||||
path: dist
|
||||
@@ -114,12 +122,8 @@ jobs:
|
||||
- name: Verify - hauler login
|
||||
run: |
|
||||
hauler login --help
|
||||
hauler login docker.io --username bob --password haulin
|
||||
echo "hauler" | hauler login docker.io -u bob --password-stdin
|
||||
|
||||
- name: Remove Hauler Store Credentials
|
||||
run: |
|
||||
rm -rf ~/.docker/config.json
|
||||
hauler login docker.io --username ${{ secrets.DOCKERHUB_USERNAME }} --password ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
echo ${{ secrets.GITHUB_TOKEN }} | hauler login ghcr.io --username ${{ github.repository_owner }} --password-stdin
|
||||
|
||||
- name: Verify - hauler store
|
||||
run: |
|
||||
@@ -150,6 +154,21 @@ jobs:
|
||||
# verify via the hauler store contents
|
||||
hauler store info
|
||||
|
||||
- name: Verify - hauler store add chart --rewrite
|
||||
run: |
|
||||
# add chart with rewrite flag
|
||||
hauler store add chart rancher --repo https://releases.rancher.com/server-charts/stable --version 2.8.4 --rewrite custom-path/rancher:2.8.4
|
||||
# verify new ref in store
|
||||
hauler store info | grep 'custom-path/rancher:2.8.4'
|
||||
# confrim leading slash trimmed from rewrite
|
||||
hauler store add chart rancher --repo https://releases.rancher.com/server-charts/stable --version 2.8.4 --rewrite /custom-path/rancher:2.8.4
|
||||
# verify no leading slash
|
||||
! hauler store info | grep '/custom-path/rancher:2.8.4'
|
||||
# confirm old tag used if not specified
|
||||
hauler store add chart rancher --repo https://releases.rancher.com/server-charts/stable --version 2.8.4 --rewrite /custom-path/rancher
|
||||
# confirm tag
|
||||
hauler store info | grep '2.8.4'
|
||||
|
||||
- name: Verify - hauler store add file
|
||||
run: |
|
||||
hauler store add file --help
|
||||
@@ -166,14 +185,33 @@ jobs:
|
||||
run: |
|
||||
hauler store add image --help
|
||||
# verify via image reference
|
||||
hauler store add image busybox
|
||||
hauler store add image ghcr.io/hauler-dev/library/busybox
|
||||
# verify via image reference with version and platform
|
||||
hauler store add image busybox:stable --platform linux/amd64
|
||||
hauler store add image ghcr.io/hauler-dev/library/busybox:stable --platform linux/amd64
|
||||
# verify via image reference with full reference
|
||||
hauler store add image gcr.io/distroless/base@sha256:7fa7445dfbebae4f4b7ab0e6ef99276e96075ae42584af6286ba080750d6dfe5
|
||||
hauler store add image ghcr.io/hauler-dev/distroless/base@sha256:7fa7445dfbebae4f4b7ab0e6ef99276e96075ae42584af6286ba080750d6dfe5
|
||||
# verify via the hauler store contents
|
||||
hauler store info
|
||||
|
||||
- name: Verify - hauler store add image --rewrite
|
||||
run: |
|
||||
# add image with rewrite flag
|
||||
hauler store add image ghcr.io/hauler-dev/library/busybox --rewrite custom-registry.io/custom-path/busybox:latest
|
||||
# verify new ref in store
|
||||
hauler store info | grep 'custom-registry.io/custom-path/busybox:latest'
|
||||
# confrim leading slash trimmed from rewrite
|
||||
hauler store add image ghcr.io/hauler-dev/library/busybox --rewrite /custom-path/busybox:latest
|
||||
# verify no leading slash
|
||||
! hauler store info | grep '/custom-path/busybox:latest'
|
||||
# confirm old tag used if not specified
|
||||
hauler store add image ghcr.io/hauler-dev/library/busybox:stable --rewrite /custom-path/busybox
|
||||
# confirm tag
|
||||
hauler store info | grep ':stable'
|
||||
# confirm old registry used if not specified
|
||||
hauler store add image ghcr.io/hauler-dev/library/nginx:1.25-alpine --rewrite custom-path/nginx
|
||||
# verify existing registry associated with rewritten image in store
|
||||
hauler store info | grep 'ghcr.io/custom-path/nginx'
|
||||
|
||||
- name: Verify - hauler store copy
|
||||
run: |
|
||||
hauler store copy --help
|
||||
@@ -225,6 +263,8 @@ jobs:
|
||||
hauler store load
|
||||
# verify via load with multiple files
|
||||
hauler store load --filename haul.tar.zst --filename store.tar.zst
|
||||
# confirm store contents
|
||||
tar -xOf store.tar.zst index.json
|
||||
# verify via load with filename and temp directory
|
||||
hauler store load --filename store.tar.zst --tempdir /opt
|
||||
# verify via load with filename and platform (amd64)
|
||||
@@ -257,8 +297,6 @@ jobs:
|
||||
- name: Verify - hauler store sync
|
||||
run: |
|
||||
hauler store sync --help
|
||||
# download local helm repository
|
||||
curl -sfOL https://github.com/rancherfederal/rancher-cluster-templates/releases/download/rancher-cluster-templates-0.5.2/rancher-cluster-templates-0.5.2.tgz
|
||||
# verify via sync
|
||||
hauler store sync --filename testdata/hauler-manifest-pipeline.yaml
|
||||
# verify via sync with multiple files
|
||||
@@ -320,6 +358,105 @@ jobs:
|
||||
# verify fileserver directory structure
|
||||
tree -hC fileserver
|
||||
|
||||
- name: Verify - hauler store remove (image)
|
||||
run: |
|
||||
hauler store remove --help
|
||||
# add test images
|
||||
hauler store add image ghcr.io/hauler-dev/library/nginx:1.25-alpine
|
||||
hauler store add image ghcr.io/hauler-dev/library/nginx:1.26-alpine
|
||||
# confirm artifacts
|
||||
hauler store info | grep 'nginx:1.25'
|
||||
hauler store info | grep 'nginx:1.26'
|
||||
# count blobs before delete
|
||||
BLOBS_BEFORE=$(find store/blobs/sha256 -type f | wc -l | xargs)
|
||||
echo "blobs before deletion: $BLOBS_BEFORE"
|
||||
# delete one artifact
|
||||
hauler store remove nginx:1.25 --force
|
||||
# verify artifact removed
|
||||
! hauler store info | grep -q "nginx:1.25"
|
||||
# non-deleted artifact exists
|
||||
hauler store info | grep -q "nginx:1.26"
|
||||
# count blobs after delete
|
||||
BLOBS_AFTER=$(find store/blobs/sha256 -type f | wc -l | xargs)
|
||||
echo "blobs after deletion: $BLOBS_AFTER"
|
||||
# verify only unreferenced blobs removed
|
||||
if [ "$BLOBS_AFTER" -ge "$BLOBS_BEFORE" ]; then
|
||||
echo "ERROR: No blobs were cleaned up"
|
||||
exit 1
|
||||
fi
|
||||
if [ "$BLOBS_AFTER" -eq 0 ]; then
|
||||
echo "ERROR: All blobs deleted (shared layers removed)"
|
||||
exit 1
|
||||
fi
|
||||
# verify remaining image not missing layers
|
||||
hauler store extract ghcr.io/hauler-dev/library/nginx:1.26-alpine
|
||||
|
||||
- name: Verify - hauler store remove (chart)
|
||||
run: |
|
||||
hauler store remove --help
|
||||
# add test images
|
||||
hauler store add chart rancher --repo https://releases.rancher.com/server-charts/stable --version 2.8.4
|
||||
hauler store add chart rancher --repo https://releases.rancher.com/server-charts/stable --version 2.8.5
|
||||
# confirm artifacts
|
||||
hauler store info | grep '2.8.4'
|
||||
hauler store info | grep '2.8.5'
|
||||
# count blobs before delete
|
||||
BLOBS_BEFORE=$(find store/blobs/sha256 -type f | wc -l | xargs)
|
||||
echo "blobs before deletion: $BLOBS_BEFORE"
|
||||
# delete one artifact
|
||||
hauler store remove 2.8.4 --force
|
||||
# verify artifact removed
|
||||
! hauler store info | grep -q "2.8.4"
|
||||
# non-deleted artifact exists
|
||||
hauler store info | grep -q "2.8.5"
|
||||
# count blobs after delete
|
||||
BLOBS_AFTER=$(find store/blobs/sha256 -type f | wc -l | xargs)
|
||||
echo "blobs after deletion: $BLOBS_AFTER"
|
||||
# verify only unreferenced blobs removed
|
||||
if [ "$BLOBS_AFTER" -ge "$BLOBS_BEFORE" ]; then
|
||||
echo "ERROR: No blobs were cleaned up"
|
||||
exit 1
|
||||
fi
|
||||
if [ "$BLOBS_AFTER" -eq 0 ]; then
|
||||
echo "ERROR: All blobs deleted (shared layers removed)"
|
||||
exit 1
|
||||
fi
|
||||
# verify remaining image not missing layers
|
||||
hauler store extract hauler/rancher:2.8.5
|
||||
|
||||
- name: Verify - hauler store remove (file)
|
||||
run: |
|
||||
hauler store remove --help
|
||||
# add test images
|
||||
hauler store add file https://get.hauler.dev
|
||||
hauler store add file https://get.rke2.io/install.sh
|
||||
# confirm artifacts
|
||||
hauler store info | grep 'get.hauler.dev'
|
||||
hauler store info | grep 'install.sh'
|
||||
# count blobs before delete
|
||||
BLOBS_BEFORE=$(find store/blobs/sha256 -type f | wc -l | xargs)
|
||||
echo "blobs before deletion: $BLOBS_BEFORE"
|
||||
# delete one artifact
|
||||
hauler store remove get.hauler.dev --force
|
||||
# verify artifact removed
|
||||
! hauler store info | grep -q "get.hauler.dev"
|
||||
# non-deleted artifact exists
|
||||
hauler store info | grep -q "install.sh"
|
||||
# count blobs after delete
|
||||
BLOBS_AFTER=$(find store/blobs/sha256 -type f | wc -l | xargs)
|
||||
echo "blobs after deletion: $BLOBS_AFTER"
|
||||
# verify only unreferenced blobs removed
|
||||
if [ "$BLOBS_AFTER" -ge "$BLOBS_BEFORE" ]; then
|
||||
echo "ERROR: No blobs were cleaned up"
|
||||
exit 1
|
||||
fi
|
||||
if [ "$BLOBS_AFTER" -eq 0 ]; then
|
||||
echo "ERROR: All blobs deleted (shared layers removed)"
|
||||
exit 1
|
||||
fi
|
||||
# verify remaining image not missing layers
|
||||
hauler store extract hauler/install.sh:latest
|
||||
|
||||
- name: Create Hauler Report
|
||||
run: |
|
||||
hauler version >> hauler-report.txt
|
||||
@@ -331,7 +468,17 @@ jobs:
|
||||
hauler store info
|
||||
|
||||
- name: Upload Hauler Report
|
||||
uses: actions/upload-artifact@v4
|
||||
uses: actions/upload-artifact@v6
|
||||
with:
|
||||
name: hauler-report
|
||||
path: hauler-report.txt
|
||||
|
||||
- name: Verify - hauler logout
|
||||
run: |
|
||||
hauler logout --help
|
||||
hauler logout docker.io
|
||||
hauler logout ghcr.io
|
||||
|
||||
- name: Remove Hauler Store Credentials
|
||||
run: |
|
||||
rm -rf ~/.docker/config.json
|
||||
|
||||
113
.goreleaser.yaml
113
.goreleaser.yaml
@@ -15,7 +15,6 @@ release:
|
||||
|
||||
env:
|
||||
- vpkg=hauler.dev/go/hauler/internal/version
|
||||
- cosign_version=v2.2.3+carbide.3
|
||||
|
||||
builds:
|
||||
- dir: ./cmd/hauler/.
|
||||
@@ -39,83 +38,53 @@ changelog:
|
||||
disable: false
|
||||
use: git
|
||||
|
||||
brews:
|
||||
homebrew_casks:
|
||||
- name: hauler
|
||||
repository:
|
||||
owner: hauler-dev
|
||||
name: homebrew-tap
|
||||
token: "{{ .Env.HOMEBREW_TAP_GITHUB_TOKEN }}"
|
||||
directory: Formula
|
||||
description: "Hauler CLI"
|
||||
description: "Hauler: Airgap Swiss Army Knife"
|
||||
|
||||
dockers:
|
||||
- id: hauler-amd64
|
||||
goos: linux
|
||||
goarch: amd64
|
||||
use: buildx
|
||||
dockers_v2:
|
||||
- id: hauler
|
||||
dockerfile: Dockerfile
|
||||
build_flag_templates:
|
||||
- "--platform=linux/amd64"
|
||||
flags:
|
||||
- "--target=release"
|
||||
image_templates:
|
||||
- "docker.io/hauler/hauler-amd64:{{ .Version }}"
|
||||
- "ghcr.io/hauler-dev/hauler-amd64:{{ .Version }}"
|
||||
- id: hauler-arm64
|
||||
goos: linux
|
||||
goarch: arm64
|
||||
use: buildx
|
||||
dockerfile: Dockerfile
|
||||
build_flag_templates:
|
||||
- "--platform=linux/arm64"
|
||||
- "--target=release"
|
||||
image_templates:
|
||||
- "docker.io/hauler/hauler-arm64:{{ .Version }}"
|
||||
- "ghcr.io/hauler-dev/hauler-arm64:{{ .Version }}"
|
||||
- id: hauler-debug-amd64
|
||||
goos: linux
|
||||
goarch: amd64
|
||||
use: buildx
|
||||
dockerfile: Dockerfile
|
||||
build_flag_templates:
|
||||
- "--platform=linux/amd64"
|
||||
- "--target=debug"
|
||||
image_templates:
|
||||
- "docker.io/hauler/hauler-debug-amd64:{{ .Version }}"
|
||||
- "ghcr.io/hauler-dev/hauler-debug-amd64:{{ .Version }}"
|
||||
- id: hauler-debug-arm64
|
||||
goos: linux
|
||||
goarch: arm64
|
||||
use: buildx
|
||||
dockerfile: Dockerfile
|
||||
build_flag_templates:
|
||||
- "--platform=linux/arm64"
|
||||
- "--target=debug"
|
||||
image_templates:
|
||||
- "docker.io/hauler/hauler-debug-arm64:{{ .Version }}"
|
||||
- "ghcr.io/hauler-dev/hauler-debug-arm64:{{ .Version }}"
|
||||
images:
|
||||
- docker.io/hauler/hauler
|
||||
- ghcr.io/hauler-dev/hauler
|
||||
tags:
|
||||
- "{{ .Version }}"
|
||||
platforms:
|
||||
- linux/amd64
|
||||
- linux/arm64
|
||||
labels:
|
||||
"classification": "UNCLASSIFIED"
|
||||
"org.opencontainers.image.created": "{{.Date}}"
|
||||
"org.opencontainers.image.description": "Hauler: Airgap Swiss Army Knife"
|
||||
"org.opencontainers.image.name": "{{.ProjectName}}-debug"
|
||||
"org.opencontainers.image.revision": "{{.FullCommit}}"
|
||||
"org.opencontainers.image.source": "{{.GitURL}}"
|
||||
"org.opencontainers.image.version": "{{.Version}}"
|
||||
|
||||
docker_manifests:
|
||||
- id: hauler-docker
|
||||
use: docker
|
||||
name_template: "docker.io/hauler/hauler:{{ .Version }}"
|
||||
image_templates:
|
||||
- "docker.io/hauler/hauler-amd64:{{ .Version }}"
|
||||
- "docker.io/hauler/hauler-arm64:{{ .Version }}"
|
||||
- id: hauler-ghcr
|
||||
use: docker
|
||||
name_template: "ghcr.io/hauler-dev/hauler:{{ .Version }}"
|
||||
image_templates:
|
||||
- "ghcr.io/hauler-dev/hauler-amd64:{{ .Version }}"
|
||||
- "ghcr.io/hauler-dev/hauler-arm64:{{ .Version }}"
|
||||
- id: hauler-debug-docker
|
||||
use: docker
|
||||
name_template: "docker.io/hauler/hauler-debug:{{ .Version }}"
|
||||
image_templates:
|
||||
- "docker.io/hauler/hauler-debug-amd64:{{ .Version }}"
|
||||
- "docker.io/hauler/hauler-debug-arm64:{{ .Version }}"
|
||||
- id: hauler-debug-ghcr
|
||||
use: docker
|
||||
name_template: "ghcr.io/hauler-dev/hauler-debug:{{ .Version }}"
|
||||
image_templates:
|
||||
- "ghcr.io/hauler-dev/hauler-debug-amd64:{{ .Version }}"
|
||||
- "ghcr.io/hauler-dev/hauler-debug-arm64:{{ .Version }}"
|
||||
- id: hauler-debug
|
||||
dockerfile: Dockerfile
|
||||
flags:
|
||||
- "--target=debug"
|
||||
images:
|
||||
- docker.io/hauler/hauler-debug
|
||||
- ghcr.io/hauler-dev/hauler-debug
|
||||
tags:
|
||||
- "{{ .Version }}"
|
||||
platforms:
|
||||
- linux/amd64
|
||||
- linux/arm64
|
||||
labels:
|
||||
"classification": "UNCLASSIFIED"
|
||||
"org.opencontainers.image.created": "{{.Date}}"
|
||||
"org.opencontainers.image.description": "Hauler: Airgap Swiss Army Knife"
|
||||
"org.opencontainers.image.name": "{{.ProjectName}}-debug"
|
||||
"org.opencontainers.image.revision": "{{.FullCommit}}"
|
||||
"org.opencontainers.image.source": "{{.GitURL}}"
|
||||
"org.opencontainers.image.version": "{{.Version}}"
|
||||
|
||||
155
DEVELOPMENT.md
Normal file
155
DEVELOPMENT.md
Normal file
@@ -0,0 +1,155 @@
|
||||
# Development Guide
|
||||
|
||||
This document covers how to build `hauler` locally and how the project's branching strategy works.
|
||||
|
||||
It's intended for contributors making code changes or maintainers managing releases.
|
||||
|
||||
---
|
||||
|
||||
## Local Build
|
||||
|
||||
### Prerequisites
|
||||
|
||||
- **Git** - version control of the repository
|
||||
- **Go** — check `go.mod` for the minimum required version
|
||||
- **Make** - optional... for common commands used for builds
|
||||
- **Docker** - optional... for container image builds
|
||||
|
||||
### Clone the Repository
|
||||
|
||||
```bash
|
||||
git clone https://github.com/hauler-dev/hauler.git
|
||||
cd hauler
|
||||
```
|
||||
|
||||
### Build the Binary
|
||||
|
||||
Using `make`...
|
||||
|
||||
```bash
|
||||
# run this command from the project root
|
||||
make build
|
||||
|
||||
# the compiled binary will be output to a directory structure and you can run it directly...
|
||||
./dist/hauler_linux_amd64_v1/hauler
|
||||
./dist/hauler_linux_arm64_v8.0/hauler
|
||||
./dist/hauler_darwin_amd64_v1/hauler
|
||||
./dist/hauler_darwin_arm64_v8.0/hauler
|
||||
./dist/hauler_windows_amd64_v1/hauler.exe
|
||||
./dist/hauler_windows_arm64_v8.0/hauler.exe
|
||||
```
|
||||
|
||||
Using `go`...
|
||||
|
||||
```bash
|
||||
# run this command from the project root
|
||||
go build -o hauler ./cmd/hauler
|
||||
|
||||
# the compiled binary will be output to the project root and you can run it directly...
|
||||
./hauler version
|
||||
```
|
||||
|
||||
### Run Tests
|
||||
|
||||
Using `make`...
|
||||
|
||||
```bash
|
||||
make test
|
||||
```
|
||||
|
||||
Using `go`...
|
||||
|
||||
```bash
|
||||
go test ./...
|
||||
```
|
||||
|
||||
### Useful Tips
|
||||
|
||||
- The `--store` flag defaults to `./store` in the current working directory during local testing, so running `./hauler store add ...` from the project root is safe and self-contained. Use `rm -rf store` in the working directory to clear.
|
||||
- Set `--log-level debug` when developing to get verbose output.
|
||||
|
||||
---
|
||||
|
||||
## Branching Strategy
|
||||
|
||||
Hauler uses a **main-first, release branch** model. All development flows through `main` and `release/x.x` branches are maintained for each minor version to support patching older release lines in parallel.
|
||||
|
||||
### Branch Structure
|
||||
|
||||
```
|
||||
main ← source of truth, all development targets here
|
||||
release/1.3 ← 1.3.x patch line
|
||||
release/1.4 ← 1.4.x patch line
|
||||
```
|
||||
|
||||
Release tags (`v1.4.1`, `v1.3.2`, etc.) are always cut from the corresponding `release/X.Y` branch, never directly from `main`.
|
||||
|
||||
### Where to Target Your Changes
|
||||
|
||||
All pull requests should target `main` by default and maintainers are responsible for cherry picking fixes onto release branches as part of the patch release process.
|
||||
|
||||
| Change Type | Target branch |
|
||||
| :---------: | :-----------: |
|
||||
| New features | `main` |
|
||||
| Bug fixes | `main` |
|
||||
| Security patches | `main` (expedited backport to affected branches) |
|
||||
| Release-specific fix (see below) | `release/X.Y` directly |
|
||||
|
||||
### Creating a New Release Branch
|
||||
|
||||
When `main` is ready to ship a new minor version, a release branch is cut:
|
||||
|
||||
```bash
|
||||
git checkout main
|
||||
git pull origin main
|
||||
git checkout -b release/1.4
|
||||
git push origin release/1.4
|
||||
```
|
||||
|
||||
The first release is then tagged from that branch:
|
||||
|
||||
```bash
|
||||
git tag v1.4.0
|
||||
git push origin v1.4.0
|
||||
```
|
||||
|
||||
Development on `main` immediately continues toward the next minor.
|
||||
|
||||
### Backporting a Fix to a Release Branch
|
||||
|
||||
When a bug fix merged to `main` also needs to apply to an active release line, cherry-pick the commit onto the release branch and open a PR targeting it:
|
||||
|
||||
```bash
|
||||
git checkout release/1.3
|
||||
git pull origin release/1.3
|
||||
git checkout -b backport/fix-description-to-1.3
|
||||
git cherry-pick <commit-sha>
|
||||
git push origin backport/fix-description-to-1.3
|
||||
```
|
||||
|
||||
Open a PR targeting `release/1.3` and reference the original PR in the description. If the cherry-pick doesn't apply cleanly, resolve conflicts and note them in the PR.
|
||||
|
||||
### Fixes That Only Apply to an Older Release Line
|
||||
|
||||
Sometimes a bug exists in an older release but the relevant code has been removed or significantly changed in `main` — making a forward-port unnecessary or nonsensical. In these cases, it's acceptable to open a PR directly against the affected `release/X.Y` branch.
|
||||
|
||||
When doing this, the PR description must explain:
|
||||
|
||||
- Which versions are affected
|
||||
- Why the fix does not apply to `main` or newer release lines (e.g., "this code path was removed in 1.4 when X was refactored")
|
||||
|
||||
This keeps the history auditable and prevents future contributors from wondering why the fix never made it forward.
|
||||
|
||||
### Summary
|
||||
|
||||
```
|
||||
┌─────────────────────────────────────────► main (next minor)
|
||||
│
|
||||
│ cherry-pick / backport PRs
|
||||
│ ─────────────────────────► release/1.4 (v1.4.0, v1.4.1 ...)
|
||||
│
|
||||
│ ─────────────────────────► release/1.3 (v1.3.0, v1.3.1 ...)
|
||||
│
|
||||
│ direct fix (older-only bug)
|
||||
│ ─────────────────────────► release/1.2 (critical fixes only)
|
||||
```
|
||||
@@ -1,8 +1,9 @@
|
||||
# builder stage
|
||||
FROM registry.suse.com/bci/bci-base:15.5 AS builder
|
||||
FROM registry.suse.com/bci/bci-base:15.7 AS builder
|
||||
ARG TARGETPLATFORM
|
||||
|
||||
# fetched from goreleaser build proccess
|
||||
COPY hauler /hauler
|
||||
# fetched from goreleaser build process
|
||||
COPY $TARGETPLATFORM/hauler /hauler
|
||||
|
||||
RUN echo "hauler:x:1001:1001::/home/hauler:" > /etc/passwd \
|
||||
&& echo "hauler:x:1001:hauler" > /etc/group \
|
||||
@@ -39,4 +40,4 @@ COPY --from=builder --chown=hauler:hauler /hauler /usr/local/bin/hauler
|
||||
RUN apk --no-cache add curl
|
||||
|
||||
USER hauler
|
||||
WORKDIR /home/hauler
|
||||
WORKDIR /home/hauler
|
||||
|
||||
28
README.md
28
README.md
@@ -12,23 +12,34 @@ For more information, please review the **[Hauler Documentation](https://hauler.
|
||||
|
||||
## Recent Changes
|
||||
|
||||
### In Hauler v1.2.0...
|
||||
### In Hauler v2.0.0...
|
||||
|
||||
- Removed support for `apiVersion` of `v1alpha` and removed the automated conversion functionality to `v1`.
|
||||
- Please note that notices have been provided in this `README`, the `docs`, and in `cli` warnings since Hauler `v1.2.x`.
|
||||
|
||||
### In Hauler v1.4.0...
|
||||
|
||||
- Added a notice to `hauler store sync --products/--product-registry` to warn users the default registry will be updated in a future release.
|
||||
- Users will see logging notices when using the `--products/--product-registry` such as...
|
||||
- `!!! WARNING !!! [--products] will be updating its default registry in a future release...`
|
||||
- `!!! WARNING !!! [--product-registry] will be updating its default registry in a future release...`
|
||||
|
||||
### From older releases...
|
||||
|
||||
- Upgraded the `apiVersion` to `v1` from `v1alpha1`
|
||||
- Users are able to use `v1` and `v1alpha1`, but `v1alpha1` is now deprecated and will be removed in a future release. We will update the community when we fully deprecate and remove the functionality of `v1alpha1`
|
||||
- Users will see logging notices when using the old `apiVersion` such as...
|
||||
- `!!! DEPRECATION WARNING !!! apiVersion [v1alpha1] will be removed in a future release !!! DEPRECATION WARNING !!!`
|
||||
---
|
||||
- Updated the behavior of `hauler store load` to default to loading a `haul` with the name of `haul.tar.zst` and requires the flag of `--filename/-f` to load a `haul` with a different name
|
||||
- Users can load multiple `hauls` by specifying multiple flags of `--filename/-f`
|
||||
- updated command usage: `hauler store load --filename hauling-hauls.tar.zst`
|
||||
- previous command usage (do not use): `hauler store load hauling-hauls.tar.zst`
|
||||
|
||||
---
|
||||
|
||||
- Updated the behavior of `hauler store sync` to default to syncing a `manifest` with the name of `hauler-manifest.yaml` and requires the flag of `--filename/-f` to sync a `manifest` with a different name
|
||||
- Users can sync multiple `manifests` by specifying multiple flags of `--filename/-f`
|
||||
- updated command usage: `hauler store sync --filename hauling-hauls-manifest.yaml`
|
||||
- previous command usage (do not use): `hauler store sync --files hauling-hauls-manifest.yaml`
|
||||
|
||||
---
|
||||
|
||||
Please review the documentation for any additional [Known Limits, Issues, and Notices](https://docs.hauler.dev/docs/known-limits)!
|
||||
|
||||
## Installation
|
||||
@@ -57,7 +68,6 @@ brew install hauler
|
||||
## Acknowledgements
|
||||
|
||||
`Hauler` wouldn't be possible without the open-source community, but there are a few projects that stand out:
|
||||
|
||||
- [oras cli](https://github.com/oras-project/oras)
|
||||
- [cosign](https://github.com/sigstore/cosign)
|
||||
- [containerd](https://github.com/containerd/containerd)
|
||||
- [go-containerregistry](https://github.com/google/go-containerregistry)
|
||||
- [cosign](https://github.com/sigstore/cosign)
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
|
||||
cranecmd "github.com/google/go-containerregistry/cmd/crane/cmd"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
"hauler.dev/go/hauler/internal/flags"
|
||||
"hauler.dev/go/hauler/pkg/consts"
|
||||
@@ -14,12 +15,20 @@ func New(ctx context.Context, ro *flags.CliRootOpts) *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "hauler",
|
||||
Short: "Airgap Swiss Army Knife",
|
||||
Example: " View the Docs: https://docs.hauler.dev\n Environment Variables: " + consts.HaulerDir + " | " + consts.HaulerTempDir + " | " + consts.HaulerStoreDir + " | " + consts.HaulerIgnoreErrors,
|
||||
Example: " View the Docs: https://docs.hauler.dev\n Environment Variables: " + consts.HaulerDir + " | " + consts.HaulerTempDir + " | " + consts.HaulerStoreDir + " | " + consts.HaulerIgnoreErrors + "\n Warnings: Hauler commands and flags marked with (EXPERIMENTAL) are not yet stable and may change in the future.",
|
||||
PersistentPreRunE: func(cmd *cobra.Command, args []string) error {
|
||||
l := log.FromContext(ctx)
|
||||
l.SetLevel(ro.LogLevel)
|
||||
l.Debugf("running cli command [%s]", cmd.CommandPath())
|
||||
|
||||
// Suppress WARN-level messages from containerd and other
|
||||
// libraries that use the global logrus logger.
|
||||
if ro.LogLevel == "debug" {
|
||||
logrus.SetLevel(logrus.DebugLevel)
|
||||
} else {
|
||||
logrus.SetLevel(logrus.ErrorLevel)
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
@@ -30,6 +39,7 @@ func New(ctx context.Context, ro *flags.CliRootOpts) *cobra.Command {
|
||||
flags.AddRootFlags(cmd, ro)
|
||||
|
||||
cmd.AddCommand(cranecmd.NewCmdAuthLogin("hauler"))
|
||||
cmd.AddCommand(cranecmd.NewCmdAuthLogout("hauler"))
|
||||
addStore(cmd, ro)
|
||||
addVersion(cmd, ro)
|
||||
addCompletion(cmd, ro)
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
|
||||
"hauler.dev/go/hauler/cmd/hauler/cli/store"
|
||||
"hauler.dev/go/hauler/internal/flags"
|
||||
"hauler.dev/go/hauler/pkg/log"
|
||||
)
|
||||
|
||||
func addStore(parent *cobra.Command, ro *flags.CliRootOpts) {
|
||||
@@ -32,6 +33,7 @@ func addStore(parent *cobra.Command, ro *flags.CliRootOpts) {
|
||||
addStoreInfo(rso, ro),
|
||||
addStoreCopy(rso, ro),
|
||||
addStoreAdd(rso, ro),
|
||||
addStoreRemove(rso, ro),
|
||||
)
|
||||
|
||||
parent.AddCommand(cmd)
|
||||
@@ -69,9 +71,16 @@ func addStoreSync(rso *flags.StoreRootOpts, ro *flags.CliRootOpts) *cobra.Comman
|
||||
Short: "Sync content to the content store",
|
||||
Args: cobra.ExactArgs(0),
|
||||
PreRunE: func(cmd *cobra.Command, args []string) error {
|
||||
// Check if the products flag was passed
|
||||
// warn if products or product-registry flag is used by the user
|
||||
if cmd.Flags().Changed("products") {
|
||||
log.FromContext(cmd.Context()).Warnf("!!! WARNING !!! [--products] will be updating its default registry in a future release.")
|
||||
}
|
||||
if cmd.Flags().Changed("product-registry") {
|
||||
log.FromContext(cmd.Context()).Warnf("!!! WARNING !!! [--product-registry] will be updating its default registry in a future release.")
|
||||
}
|
||||
// check if the products flag was passed
|
||||
if len(o.Products) > 0 {
|
||||
// Only clear the default if the user did NOT explicitly set --filename
|
||||
// only clear the default if the user did not explicitly set it
|
||||
if !cmd.Flags().Changed("filename") {
|
||||
o.FileName = []string{}
|
||||
}
|
||||
@@ -207,7 +216,7 @@ func addStoreSave(rso *flags.StoreRootOpts, ro *flags.CliRootOpts) *cobra.Comman
|
||||
func addStoreInfo(rso *flags.StoreRootOpts, ro *flags.CliRootOpts) *cobra.Command {
|
||||
o := &flags.InfoOpts{StoreRootOpts: rso}
|
||||
|
||||
var allowedValues = []string{"image", "chart", "file", "sigs", "atts", "sbom", "all"}
|
||||
var allowedValues = []string{"image", "chart", "file", "sigs", "atts", "sbom", "referrer", "all"}
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: "info",
|
||||
@@ -327,7 +336,10 @@ hauler store add image gcr.io/distroless/base@sha256:7fa7445dfbebae4f4b7ab0e6ef9
|
||||
|
||||
# fetch image with full image reference, specific platform, and signature verification
|
||||
curl -sfOL https://raw.githubusercontent.com/rancherfederal/carbide-releases/main/carbide-key.pub
|
||||
hauler store add image rgcrprod.azurecr.us/rancher/rke2-runtime:v1.31.5-rke2r1 --platform linux/amd64 --key carbide-key.pub`,
|
||||
hauler store add image rgcrprod.azurecr.us/rancher/rke2-runtime:v1.31.5-rke2r1 --platform linux/amd64 --key carbide-key.pub
|
||||
|
||||
# fetch image and rewrite path
|
||||
hauler store add image busybox --rewrite custom-path/busybox:latest`,
|
||||
Args: cobra.ExactArgs(1),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
ctx := cmd.Context()
|
||||
@@ -367,7 +379,10 @@ hauler store add chart hauler-helm --repo oci://ghcr.io/hauler-dev --version 1.2
|
||||
hauler store add chart rancher --repo https://releases.rancher.com/server-charts/stable
|
||||
|
||||
# fetch remote helm chart with specific version
|
||||
hauler store add chart rancher --repo https://releases.rancher.com/server-charts/latest --version 2.10.1`,
|
||||
hauler store add chart rancher --repo https://releases.rancher.com/server-charts/latest --version 2.10.1
|
||||
|
||||
# fetch remote helm chart and rewrite path
|
||||
hauler store add chart hauler-helm --repo oci://ghcr.io/hauler-dev --rewrite custom-path/hauler-chart:latest`,
|
||||
Args: cobra.ExactArgs(1),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
ctx := cmd.Context()
|
||||
@@ -377,7 +392,49 @@ hauler store add chart rancher --repo https://releases.rancher.com/server-charts
|
||||
return err
|
||||
}
|
||||
|
||||
return store.AddChartCmd(ctx, o, s, args[0])
|
||||
return store.AddChartCmd(ctx, o, s, args[0], rso, ro)
|
||||
},
|
||||
}
|
||||
o.AddFlags(cmd)
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
func addStoreRemove(rso *flags.StoreRootOpts, ro *flags.CliRootOpts) *cobra.Command {
|
||||
o := &flags.RemoveOpts{}
|
||||
cmd := &cobra.Command{
|
||||
Use: "remove <artifact-ref>",
|
||||
Short: "(EXPERIMENTAL) Remove an artifact from the content store",
|
||||
Example: `# remove an image using full store reference
|
||||
hauler store info
|
||||
hauler store remove index.docker.io/library/busybox:stable
|
||||
|
||||
# remove a chart using full store reference
|
||||
hauler store info
|
||||
hauler store remove hauler/rancher:2.8.4
|
||||
|
||||
# remove a file using full store reference
|
||||
hauler store info
|
||||
hauler store remove hauler/rke2-install.sh
|
||||
|
||||
# remove any artifact with the latest tag
|
||||
hauler store remove :latest
|
||||
|
||||
# remove any artifact with 'busybox' in the reference
|
||||
hauler store remove busybox
|
||||
|
||||
# force remove without verification
|
||||
hauler store remove busybox:latest --force`,
|
||||
Args: cobra.ExactArgs(1),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
ctx := cmd.Context()
|
||||
|
||||
s, err := rso.Store(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return store.RemoveCmd(ctx, o, s, args[0])
|
||||
},
|
||||
}
|
||||
o.AddFlags(cmd)
|
||||
|
||||
@@ -2,10 +2,19 @@ package store
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"slices"
|
||||
"strings"
|
||||
|
||||
"github.com/google/go-containerregistry/pkg/name"
|
||||
"helm.sh/helm/v3/pkg/action"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
helmchart "helm.sh/helm/v3/pkg/chart"
|
||||
"helm.sh/helm/v3/pkg/chartutil"
|
||||
"helm.sh/helm/v3/pkg/engine"
|
||||
"k8s.io/apimachinery/pkg/util/yaml"
|
||||
|
||||
"hauler.dev/go/hauler/internal/flags"
|
||||
v1 "hauler.dev/go/hauler/pkg/apis/hauler.cattle.io/v1"
|
||||
@@ -16,6 +25,7 @@ import (
|
||||
"hauler.dev/go/hauler/pkg/getter"
|
||||
"hauler.dev/go/hauler/pkg/log"
|
||||
"hauler.dev/go/hauler/pkg/reference"
|
||||
"hauler.dev/go/hauler/pkg/retry"
|
||||
"hauler.dev/go/hauler/pkg/store"
|
||||
)
|
||||
|
||||
@@ -43,7 +53,7 @@ func storeFile(ctx context.Context, s *store.Layout, fi v1.File) error {
|
||||
}
|
||||
|
||||
l.Infof("adding file [%s] to the store as [%s]", fi.Path, ref.Name())
|
||||
_, err = s.AddOCI(ctx, f, ref.Name())
|
||||
_, err = s.AddArtifact(ctx, f, ref.Name())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -57,23 +67,35 @@ func AddImageCmd(ctx context.Context, o *flags.AddImageOpts, s *store.Layout, re
|
||||
l := log.FromContext(ctx)
|
||||
|
||||
cfg := v1.Image{
|
||||
Name: reference,
|
||||
Name: reference,
|
||||
Rewrite: o.Rewrite,
|
||||
}
|
||||
|
||||
// Check if the user provided a key.
|
||||
if o.Key != "" {
|
||||
// verify signature using the provided key.
|
||||
err := cosign.VerifySignature(ctx, s, o.Key, o.Tlog, cfg.Name, rso, ro)
|
||||
err := cosign.VerifySignature(ctx, o.Key, o.Tlog, cfg.Name, rso, ro)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
l.Infof("signature verified for image [%s]", cfg.Name)
|
||||
} else if o.CertIdentityRegexp != "" || o.CertIdentity != "" {
|
||||
// verify signature using keyless details.
|
||||
// Keyless (Fulcio) certificates expire after ~10 minutes, so the transparency
|
||||
// log is always required to prove the cert was valid at signing time — ignore
|
||||
// --use-tlog-verify for this path and always check tlog.
|
||||
l.Infof("verifying keyless signature for [%s]", cfg.Name)
|
||||
err := cosign.VerifyKeylessSignature(ctx, o.CertIdentity, o.CertIdentityRegexp, o.CertOidcIssuer, o.CertOidcIssuerRegexp, o.CertGithubWorkflowRepository, cfg.Name, rso, ro)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
l.Infof("keyless signature verified for image [%s]", cfg.Name)
|
||||
}
|
||||
|
||||
return storeImage(ctx, s, cfg, o.Platform, rso, ro)
|
||||
return storeImage(ctx, s, cfg, o.Platform, rso, ro, o.Rewrite)
|
||||
}
|
||||
|
||||
func storeImage(ctx context.Context, s *store.Layout, i v1.Image, platform string, rso *flags.StoreRootOpts, ro *flags.CliRootOpts) error {
|
||||
func storeImage(ctx context.Context, s *store.Layout, i v1.Image, platform string, rso *flags.StoreRootOpts, ro *flags.CliRootOpts, rewrite string) error {
|
||||
l := log.FromContext(ctx)
|
||||
|
||||
if !ro.IgnoreErrors {
|
||||
@@ -96,7 +118,10 @@ func storeImage(ctx context.Context, s *store.Layout, i v1.Image, platform strin
|
||||
}
|
||||
}
|
||||
|
||||
err = cosign.SaveImage(ctx, s, r.Name(), platform, rso, ro)
|
||||
// fetch image along with any associated signatures and attestations
|
||||
err = retry.Operation(ctx, rso, ro, func() error {
|
||||
return s.AddImage(ctx, r.Name(), platform)
|
||||
})
|
||||
if err != nil {
|
||||
if ro.IgnoreErrors {
|
||||
l.Warnf("unable to add image [%s] to store: %v... skipping...", r.Name(), err)
|
||||
@@ -107,30 +132,235 @@ func storeImage(ctx context.Context, s *store.Layout, i v1.Image, platform strin
|
||||
}
|
||||
}
|
||||
|
||||
if rewrite != "" {
|
||||
rewrite = strings.TrimPrefix(rewrite, "/")
|
||||
if !strings.Contains(rewrite, ":") {
|
||||
if tag, ok := r.(name.Tag); ok {
|
||||
rewrite = rewrite + ":" + tag.TagStr()
|
||||
} else {
|
||||
return fmt.Errorf("cannot rewrite digest reference [%s] without an explicit tag in the rewrite", r.Name())
|
||||
}
|
||||
}
|
||||
// rename image name in store
|
||||
newRef, err := name.ParseReference(rewrite)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to parse rewrite name [%s]: %w", rewrite, err)
|
||||
}
|
||||
if err := rewriteReference(ctx, s, r, newRef); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
l.Infof("successfully added image [%s]", r.Name())
|
||||
return nil
|
||||
}
|
||||
|
||||
func AddChartCmd(ctx context.Context, o *flags.AddChartOpts, s *store.Layout, chartName string) error {
|
||||
func rewriteReference(ctx context.Context, s *store.Layout, oldRef name.Reference, newRef name.Reference) error {
|
||||
l := log.FromContext(ctx)
|
||||
|
||||
if err := s.OCI.LoadIndex(); err != nil {
|
||||
return fmt.Errorf("failed to load index: %w", err)
|
||||
}
|
||||
|
||||
//TODO: improve string manipulation
|
||||
oldRefContext := oldRef.Context()
|
||||
newRefContext := newRef.Context()
|
||||
oldRepo := oldRefContext.RepositoryStr()
|
||||
newRepo := newRefContext.RepositoryStr()
|
||||
|
||||
oldTag := oldRef.Identifier()
|
||||
if tag, ok := oldRef.(name.Tag); ok {
|
||||
oldTag = tag.TagStr()
|
||||
}
|
||||
newTag := newRef.Identifier()
|
||||
if tag, ok := newRef.(name.Tag); ok {
|
||||
newTag = tag.TagStr()
|
||||
}
|
||||
|
||||
// ContainerdImageNameKey stores annotationRef.Name() verbatim, which includes the
|
||||
// "index.docker.io" prefix for docker.io images. Do not strip "index." here or the
|
||||
// comparison will never match images stored by writeImage/writeIndex.
|
||||
oldRegistry := oldRefContext.RegistryStr()
|
||||
newRegistry := newRefContext.RegistryStr()
|
||||
// If user omitted a registry in the rewrite string, go-containerregistry defaults to
|
||||
// index.docker.io. Preserve the original registry when the source is non-docker.
|
||||
if newRegistry == "index.docker.io" && oldRegistry != "index.docker.io" {
|
||||
newRegistry = oldRegistry
|
||||
}
|
||||
oldTotal := oldRepo + ":" + oldTag
|
||||
newTotal := newRepo + ":" + newTag
|
||||
oldTotalReg := oldRegistry + "/" + oldTotal
|
||||
newTotalReg := newRegistry + "/" + newTotal
|
||||
|
||||
l.Infof("rewriting [%s] to [%s]", oldTotalReg, newTotalReg)
|
||||
|
||||
//find and update reference
|
||||
found := false
|
||||
if err := s.OCI.Walk(func(k string, d ocispec.Descriptor) error {
|
||||
if d.Annotations[ocispec.AnnotationRefName] == oldTotal && d.Annotations[consts.ContainerdImageNameKey] == oldTotalReg {
|
||||
d.Annotations[ocispec.AnnotationRefName] = newTotal
|
||||
d.Annotations[consts.ContainerdImageNameKey] = newTotalReg
|
||||
found = true
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !found {
|
||||
return fmt.Errorf("could not find image [%s] in store", oldRef.Name())
|
||||
}
|
||||
|
||||
return s.OCI.SaveIndex()
|
||||
|
||||
}
|
||||
|
||||
func AddChartCmd(ctx context.Context, o *flags.AddChartOpts, s *store.Layout, chartName string, rso *flags.StoreRootOpts, ro *flags.CliRootOpts) error {
|
||||
cfg := v1.Chart{
|
||||
Name: chartName,
|
||||
RepoURL: o.ChartOpts.RepoURL,
|
||||
Version: o.ChartOpts.Version,
|
||||
}
|
||||
|
||||
return storeChart(ctx, s, cfg, o.ChartOpts)
|
||||
rewrite := ""
|
||||
if o.Rewrite != "" {
|
||||
rewrite = o.Rewrite
|
||||
}
|
||||
return storeChart(ctx, s, cfg, o, rso, ro, rewrite)
|
||||
}
|
||||
|
||||
func storeChart(ctx context.Context, s *store.Layout, cfg v1.Chart, opts *action.ChartPathOptions) error {
|
||||
// unexported type for the context key to avoid collisions
|
||||
type isSubchartKey struct{}
|
||||
|
||||
// imageregex parses image references starting with "image:" and with optional spaces or optional quotes
|
||||
var imageRegex = regexp.MustCompile(`(?m)^[ \t]*image:[ \t]*['"]?([^\s'"#]+)`)
|
||||
|
||||
// helmAnnotatedImage parses images references from helm chart annotations
|
||||
type helmAnnotatedImage struct {
|
||||
Image string `yaml:"image"`
|
||||
Name string `yaml:"name,omitempty"`
|
||||
}
|
||||
|
||||
// imagesFromChartAnnotations parses image references from helm chart annotations
|
||||
func imagesFromChartAnnotations(c *helmchart.Chart) ([]string, error) {
|
||||
if c == nil || c.Metadata == nil || c.Metadata.Annotations == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// support multiple annotations
|
||||
keys := []string{
|
||||
"helm.sh/images",
|
||||
"images",
|
||||
}
|
||||
|
||||
var out []string
|
||||
for _, k := range keys {
|
||||
raw, ok := c.Metadata.Annotations[k]
|
||||
if !ok || strings.TrimSpace(raw) == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
var items []helmAnnotatedImage
|
||||
if err := yaml.Unmarshal([]byte(raw), &items); err != nil {
|
||||
return nil, fmt.Errorf("failed to parse helm chart annotation %q: %w", k, err)
|
||||
}
|
||||
|
||||
for _, it := range items {
|
||||
img := strings.TrimSpace(it.Image)
|
||||
if img == "" {
|
||||
continue
|
||||
}
|
||||
img = strings.TrimPrefix(img, "/")
|
||||
out = append(out, img)
|
||||
}
|
||||
}
|
||||
|
||||
slices.Sort(out)
|
||||
out = slices.Compact(out)
|
||||
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// imagesFromImagesLock parses image references from images lock files in the chart directory
|
||||
func imagesFromImagesLock(chartDir string) ([]string, error) {
|
||||
var out []string
|
||||
|
||||
for _, name := range []string{
|
||||
"images.lock",
|
||||
"images-lock.yaml",
|
||||
"images.lock.yaml",
|
||||
".images.lock.yaml",
|
||||
} {
|
||||
p := filepath.Join(chartDir, name)
|
||||
b, err := os.ReadFile(p)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
matches := imageRegex.FindAllSubmatch(b, -1)
|
||||
for _, m := range matches {
|
||||
if len(m) > 1 {
|
||||
out = append(out, string(m[1]))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(out) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
for i := range out {
|
||||
out[i] = strings.TrimPrefix(out[i], "/")
|
||||
}
|
||||
slices.Sort(out)
|
||||
out = slices.Compact(out)
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func applyDefaultRegistry(img string, defaultRegistry string) (string, error) {
|
||||
img = strings.TrimSpace(strings.TrimPrefix(img, "/"))
|
||||
if img == "" || defaultRegistry == "" {
|
||||
return img, nil
|
||||
}
|
||||
|
||||
ref, err := reference.Parse(img)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if ref.Context().RegistryStr() != "" {
|
||||
return img, nil
|
||||
}
|
||||
|
||||
newRef, err := reference.Relocate(img, defaultRegistry)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return newRef.Name(), nil
|
||||
}
|
||||
|
||||
func storeChart(ctx context.Context, s *store.Layout, cfg v1.Chart, opts *flags.AddChartOpts, rso *flags.StoreRootOpts, ro *flags.CliRootOpts, rewrite string) error {
|
||||
l := log.FromContext(ctx)
|
||||
|
||||
l.Infof("adding chart [%s] to the store", cfg.Name)
|
||||
// subchart logging prefix
|
||||
isSubchart := ctx.Value(isSubchartKey{}) == true
|
||||
prefix := ""
|
||||
if isSubchart {
|
||||
prefix = " ↳ "
|
||||
}
|
||||
|
||||
// TODO: This shouldn't be necessary
|
||||
opts.RepoURL = cfg.RepoURL
|
||||
opts.Version = cfg.Version
|
||||
// normalize chart name for logging
|
||||
displayName := cfg.Name
|
||||
if strings.Contains(cfg.Name, string(os.PathSeparator)) {
|
||||
displayName = filepath.Base(cfg.Name)
|
||||
}
|
||||
l.Infof("%sadding chart [%s] to the store", prefix, displayName)
|
||||
|
||||
chrt, err := chart.NewChart(cfg.Name, opts)
|
||||
opts.ChartOpts.RepoURL = cfg.RepoURL
|
||||
opts.ChartOpts.Version = cfg.Version
|
||||
|
||||
chrt, err := chart.NewChart(cfg.Name, opts.ChartOpts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -144,11 +374,267 @@ func storeChart(ctx context.Context, s *store.Layout, cfg v1.Chart, opts *action
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = s.AddOCI(ctx, chrt, ref.Name())
|
||||
if err != nil {
|
||||
|
||||
if _, err := s.AddArtifact(ctx, chrt, ref.Name()); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.OCI.SaveIndex(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
l.Infof("successfully added chart [%s]", ref.Name())
|
||||
l.Infof("%ssuccessfully added chart [%s:%s]", prefix, c.Name(), c.Metadata.Version)
|
||||
|
||||
tempOverride := rso.TempOverride
|
||||
if tempOverride == "" {
|
||||
tempOverride = os.Getenv(consts.HaulerTempDir)
|
||||
}
|
||||
tempDir, err := os.MkdirTemp(tempOverride, consts.DefaultHaulerTempDirName)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create temp dir: %w", err)
|
||||
}
|
||||
defer os.RemoveAll(tempDir)
|
||||
|
||||
chartPath := chrt.Path()
|
||||
if strings.HasSuffix(chartPath, ".tgz") {
|
||||
l.Debugf("%sextracting chart archive [%s]", prefix, filepath.Base(chartPath))
|
||||
if err := chartutil.ExpandFile(tempDir, chartPath); err != nil {
|
||||
return fmt.Errorf("failed to extract chart: %w", err)
|
||||
}
|
||||
|
||||
// expanded chart should be in a directory matching the chart name
|
||||
expectedChartDir := filepath.Join(tempDir, c.Name())
|
||||
if _, err := os.Stat(expectedChartDir); err != nil {
|
||||
return fmt.Errorf("chart archive did not expand into expected directory '%s': %w", c.Name(), err)
|
||||
}
|
||||
chartPath = expectedChartDir
|
||||
}
|
||||
|
||||
// add-images
|
||||
if opts.AddImages {
|
||||
userValues := chartutil.Values{}
|
||||
if opts.HelmValues != "" {
|
||||
userValues, err = chartutil.ReadValuesFile(opts.HelmValues)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to read helm values file [%s]: %w", opts.HelmValues, err)
|
||||
}
|
||||
}
|
||||
|
||||
// set helm default capabilities
|
||||
caps := chartutil.DefaultCapabilities.Copy()
|
||||
|
||||
// only parse and override if provided kube version
|
||||
if opts.KubeVersion != "" {
|
||||
kubeVersion, err := chartutil.ParseKubeVersion(opts.KubeVersion)
|
||||
if err != nil {
|
||||
l.Warnf("%sinvalid kube-version [%s], using default kubernetes version", prefix, opts.KubeVersion)
|
||||
} else {
|
||||
caps.KubeVersion = *kubeVersion
|
||||
}
|
||||
}
|
||||
|
||||
values, err := chartutil.ToRenderValues(c, userValues, chartutil.ReleaseOptions{Namespace: "hauler"}, caps)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// helper for normalization and deduping slices
|
||||
normalizeUniq := func(in []string) []string {
|
||||
if len(in) == 0 {
|
||||
return nil
|
||||
}
|
||||
for i := range in {
|
||||
in[i] = strings.TrimPrefix(in[i], "/")
|
||||
}
|
||||
slices.Sort(in)
|
||||
return slices.Compact(in)
|
||||
}
|
||||
|
||||
// Collect images by method so we can debug counts
|
||||
var (
|
||||
templateImages []string
|
||||
annotationImages []string
|
||||
lockImages []string
|
||||
)
|
||||
|
||||
// parse helm chart templates and values for images
|
||||
rendered, err := engine.Render(c, values)
|
||||
if err != nil {
|
||||
// charts may fail due to values so still try helm chart annotations and lock
|
||||
l.Warnf("%sfailed to render chart [%s]: %v", prefix, c.Name(), err)
|
||||
rendered = map[string]string{}
|
||||
}
|
||||
|
||||
for _, manifest := range rendered {
|
||||
matches := imageRegex.FindAllStringSubmatch(manifest, -1)
|
||||
for _, match := range matches {
|
||||
if len(match) > 1 {
|
||||
templateImages = append(templateImages, match[1])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// parse helm chart annotations for images
|
||||
annotationImages, err = imagesFromChartAnnotations(c)
|
||||
if err != nil {
|
||||
l.Warnf("%sfailed to parse helm chart annotation for [%s:%s]: %v", prefix, c.Name(), c.Metadata.Version, err)
|
||||
annotationImages = nil
|
||||
}
|
||||
|
||||
// parse images lock files for images
|
||||
lockImages, err = imagesFromImagesLock(chartPath)
|
||||
if err != nil {
|
||||
l.Warnf("%sfailed to parse images lock: %v", prefix, err)
|
||||
lockImages = nil
|
||||
}
|
||||
|
||||
// normalization and deduping the slices
|
||||
templateImages = normalizeUniq(templateImages)
|
||||
annotationImages = normalizeUniq(annotationImages)
|
||||
lockImages = normalizeUniq(lockImages)
|
||||
|
||||
// merge all sources then final dedupe
|
||||
images := append(append(templateImages, annotationImages...), lockImages...)
|
||||
images = normalizeUniq(images)
|
||||
|
||||
l.Debugf("%simage references identified for helm template: [%d] image(s)", prefix, len(templateImages))
|
||||
|
||||
l.Debugf("%simage references identified for helm chart annotations: [%d] image(s)", prefix, len(annotationImages))
|
||||
|
||||
l.Debugf("%simage references identified for helm image lock file: [%d] image(s)", prefix, len(lockImages))
|
||||
l.Debugf("%ssuccessfully parsed and deduped image references: [%d] image(s)", prefix, len(images))
|
||||
|
||||
l.Debugf("%ssuccessfully parsed image references %v", prefix, images)
|
||||
|
||||
if len(images) > 0 {
|
||||
l.Infof("%s ↳ identified [%d] image(s) in [%s:%s]", prefix, len(images), c.Name(), c.Metadata.Version)
|
||||
}
|
||||
|
||||
for _, image := range images {
|
||||
image, err := applyDefaultRegistry(image, opts.Registry)
|
||||
if err != nil {
|
||||
if ro.IgnoreErrors {
|
||||
l.Warnf("%s ↳ unable to apply registry to image [%s]: %v... skipping...", prefix, image, err)
|
||||
continue
|
||||
}
|
||||
return fmt.Errorf("unable to apply registry to image [%s]: %w", image, err)
|
||||
}
|
||||
|
||||
imgCfg := v1.Image{Name: image}
|
||||
if err := storeImage(ctx, s, imgCfg, opts.Platform, rso, ro, ""); err != nil {
|
||||
if ro.IgnoreErrors {
|
||||
l.Warnf("%s ↳ failed to store image [%s]: %v... skipping...", prefix, image, err)
|
||||
continue
|
||||
}
|
||||
return fmt.Errorf("failed to store image [%s]: %w", image, err)
|
||||
}
|
||||
if err := s.OCI.LoadIndex(); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.OCI.SaveIndex(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// add-dependencies
|
||||
if opts.AddDependencies && len(c.Metadata.Dependencies) > 0 {
|
||||
for _, dep := range c.Metadata.Dependencies {
|
||||
l.Infof("%sadding dependent chart [%s:%s]", prefix, dep.Name, dep.Version)
|
||||
|
||||
depOpts := *opts
|
||||
depOpts.AddDependencies = true
|
||||
depOpts.AddImages = true
|
||||
subCtx := context.WithValue(ctx, isSubchartKey{}, true)
|
||||
|
||||
var depCfg v1.Chart
|
||||
var err error
|
||||
|
||||
if strings.HasPrefix(dep.Repository, "file://") || dep.Repository == "" {
|
||||
subchartPath := filepath.Join(chartPath, "charts", dep.Name)
|
||||
|
||||
depCfg = v1.Chart{Name: subchartPath, RepoURL: "", Version: ""}
|
||||
depOpts.ChartOpts.RepoURL = ""
|
||||
depOpts.ChartOpts.Version = ""
|
||||
|
||||
err = storeChart(subCtx, s, depCfg, &depOpts, rso, ro, "")
|
||||
} else {
|
||||
depCfg = v1.Chart{Name: dep.Name, RepoURL: dep.Repository, Version: dep.Version}
|
||||
depOpts.ChartOpts.RepoURL = dep.Repository
|
||||
depOpts.ChartOpts.Version = dep.Version
|
||||
|
||||
err = storeChart(subCtx, s, depCfg, &depOpts, rso, ro, "")
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
if ro.IgnoreErrors {
|
||||
l.Warnf("%s ↳ failed to add dependent chart [%s]: %v... skipping...", prefix, dep.Name, err)
|
||||
} else {
|
||||
l.Errorf("%s ↳ failed to add dependent chart [%s]: %v", prefix, dep.Name, err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// chart rewrite functionality
|
||||
if rewrite != "" {
|
||||
rewrite = strings.TrimPrefix(rewrite, "/")
|
||||
newRef, err := name.ParseReference(rewrite)
|
||||
if err != nil {
|
||||
// error... don't continue with a bad reference
|
||||
return fmt.Errorf("unable to parse rewrite name [%s]: %w", rewrite, err)
|
||||
}
|
||||
|
||||
// if rewrite omits a tag... keep the existing tag
|
||||
oldTag := ref.Identifier()
|
||||
if tag, ok := ref.(name.Tag); ok {
|
||||
oldTag = tag.TagStr()
|
||||
}
|
||||
if !strings.Contains(rewrite, ":") {
|
||||
rewrite = strings.Join([]string{rewrite, oldTag}, ":")
|
||||
newRef, err = name.ParseReference(rewrite)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to parse rewrite name [%s]: %w", rewrite, err)
|
||||
}
|
||||
}
|
||||
|
||||
// rename chart name in store
|
||||
if err := s.OCI.LoadIndex(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
oldRefContext := ref.Context()
|
||||
newRefContext := newRef.Context()
|
||||
|
||||
oldRepo := oldRefContext.RepositoryStr()
|
||||
newRepo := newRefContext.RepositoryStr()
|
||||
newTag := newRef.Identifier()
|
||||
if tag, ok := newRef.(name.Tag); ok {
|
||||
newTag = tag.TagStr()
|
||||
}
|
||||
|
||||
oldTotal := oldRepo + ":" + oldTag
|
||||
newTotal := newRepo + ":" + newTag
|
||||
|
||||
found := false
|
||||
if err := s.OCI.Walk(func(k string, d ocispec.Descriptor) error {
|
||||
if d.Annotations[ocispec.AnnotationRefName] == oldTotal {
|
||||
d.Annotations[ocispec.AnnotationRefName] = newTotal
|
||||
found = true
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !found {
|
||||
return fmt.Errorf("could not find chart [%s] in store", ref.Name())
|
||||
}
|
||||
|
||||
if err := s.OCI.SaveIndex(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
561
cmd/hauler/cli/store/add_test.go
Normal file
561
cmd/hauler/cli/store/add_test.go
Normal file
@@ -0,0 +1,561 @@
|
||||
package store
|
||||
|
||||
import (
|
||||
"net"
|
||||
"net/http/httptest"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/google/go-containerregistry/pkg/name"
|
||||
"github.com/google/go-containerregistry/pkg/registry"
|
||||
"github.com/google/go-containerregistry/pkg/v1/remote"
|
||||
helmchart "helm.sh/helm/v3/pkg/chart"
|
||||
|
||||
"hauler.dev/go/hauler/internal/flags"
|
||||
v1 "hauler.dev/go/hauler/pkg/apis/hauler.cattle.io/v1"
|
||||
"hauler.dev/go/hauler/pkg/consts"
|
||||
)
|
||||
|
||||
// newLocalhostRegistry creates an in-memory OCI registry server listening on
|
||||
// localhost (rather than 127.0.0.1) so go-containerregistry's Scheme() method
|
||||
// automatically selects plain HTTP for "localhost:PORT/…" refs. This is
|
||||
// required for tests that exercise storeImage, which calls s.AddImage without
|
||||
// any custom transport options.
|
||||
func newLocalhostRegistry(t *testing.T) (host string, remoteOpts []remote.Option) {
|
||||
t.Helper()
|
||||
l, err := net.Listen("tcp", "localhost:0")
|
||||
if err != nil {
|
||||
t.Fatalf("newLocalhostRegistry listen: %v", err)
|
||||
}
|
||||
srv := httptest.NewUnstartedServer(registry.New())
|
||||
srv.Listener = l
|
||||
srv.Start()
|
||||
t.Cleanup(srv.Close)
|
||||
host = strings.TrimPrefix(srv.URL, "http://")
|
||||
remoteOpts = []remote.Option{remote.WithTransport(srv.Client().Transport)}
|
||||
return host, remoteOpts
|
||||
}
|
||||
|
||||
// chartTestdataDir is the relative path from cmd/hauler/cli/store/ to the
|
||||
// top-level testdata directory, matching the convention in chart_test.go.
|
||||
// It must remain relative so that url.ParseRequestURI rejects it (an absolute
|
||||
// path would be mistakenly treated as a URL by chart.NewChart's isUrl check).
|
||||
const chartTestdataDir = "../../../../testdata"
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
// Unit tests — unexported helpers
|
||||
// --------------------------------------------------------------------------
|
||||
|
||||
func TestImagesFromChartAnnotations(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
chart *helmchart.Chart
|
||||
want []string
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "nil chart returns nil",
|
||||
chart: nil,
|
||||
want: nil,
|
||||
},
|
||||
{
|
||||
name: "no annotations returns nil",
|
||||
chart: &helmchart.Chart{Metadata: &helmchart.Metadata{}},
|
||||
want: nil,
|
||||
},
|
||||
{
|
||||
name: "helm.sh/images annotation returns sorted refs",
|
||||
chart: &helmchart.Chart{
|
||||
Metadata: &helmchart.Metadata{
|
||||
Annotations: map[string]string{
|
||||
"helm.sh/images": "- image: nginx:1.24\n- image: alpine:3.18\n",
|
||||
},
|
||||
},
|
||||
},
|
||||
want: []string{"alpine:3.18", "nginx:1.24"},
|
||||
},
|
||||
{
|
||||
name: "both annotations with overlap returns deduped union",
|
||||
chart: &helmchart.Chart{
|
||||
Metadata: &helmchart.Metadata{
|
||||
Annotations: map[string]string{
|
||||
"helm.sh/images": "- image: nginx:1.24\n- image: alpine:3.18\n",
|
||||
"images": "- image: nginx:1.24\n- image: busybox:latest\n",
|
||||
},
|
||||
},
|
||||
},
|
||||
want: []string{"alpine:3.18", "busybox:latest", "nginx:1.24"},
|
||||
},
|
||||
{
|
||||
name: "malformed YAML returns error",
|
||||
chart: &helmchart.Chart{
|
||||
Metadata: &helmchart.Metadata{
|
||||
Annotations: map[string]string{
|
||||
// Unclosed flow sequence → YAML syntax error.
|
||||
"helm.sh/images": "- image: [unclosed bracket",
|
||||
},
|
||||
},
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
}
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
got, err := imagesFromChartAnnotations(tc.chart)
|
||||
if (err != nil) != tc.wantErr {
|
||||
t.Fatalf("error = %v, wantErr %v", err, tc.wantErr)
|
||||
}
|
||||
if !reflect.DeepEqual(got, tc.want) {
|
||||
t.Errorf("got %v, want %v", got, tc.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestImagesFromImagesLock(t *testing.T) {
|
||||
writeFile := func(dir, fname, content string) {
|
||||
t.Helper()
|
||||
if err := os.WriteFile(filepath.Join(dir, fname), []byte(content), 0o644); err != nil {
|
||||
t.Fatalf("write %s: %v", fname, err)
|
||||
}
|
||||
}
|
||||
|
||||
t.Run("images.lock with image lines returns sorted refs", func(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
writeFile(dir, "images.lock", "image: rancher/rancher:v2.9\nimage: nginx:1.24\n")
|
||||
got, err := imagesFromImagesLock(dir)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
want := []string{"nginx:1.24", "rancher/rancher:v2.9"}
|
||||
if !reflect.DeepEqual(got, want) {
|
||||
t.Errorf("got %v, want %v", got, want)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("images-lock.yaml returns refs", func(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
writeFile(dir, "images-lock.yaml", "image: alpine:3.18\n")
|
||||
got, err := imagesFromImagesLock(dir)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
want := []string{"alpine:3.18"}
|
||||
if !reflect.DeepEqual(got, want) {
|
||||
t.Errorf("got %v, want %v", got, want)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("empty dir returns nil", func(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
got, err := imagesFromImagesLock(dir)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
if got != nil {
|
||||
t.Errorf("expected nil, got %v", got)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("multiple lock files merged and deduped", func(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
writeFile(dir, "images.lock", "image: nginx:1.24\nimage: alpine:3.18\n")
|
||||
writeFile(dir, "images-lock.yaml", "image: nginx:1.24\nimage: busybox:latest\n")
|
||||
got, err := imagesFromImagesLock(dir)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
want := []string{"alpine:3.18", "busybox:latest", "nginx:1.24"}
|
||||
if !reflect.DeepEqual(got, want) {
|
||||
t.Errorf("got %v, want %v", got, want)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestApplyDefaultRegistry(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
img string
|
||||
registry string
|
||||
want string
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "empty img returns empty",
|
||||
img: "",
|
||||
registry: "myregistry.io",
|
||||
want: "",
|
||||
},
|
||||
{
|
||||
name: "empty registry returns img unchanged",
|
||||
img: "rancher/rancher:v2.9",
|
||||
registry: "",
|
||||
want: "rancher/rancher:v2.9",
|
||||
},
|
||||
{
|
||||
name: "img without registry gets registry prepended",
|
||||
img: "rancher/rancher:v2.9",
|
||||
registry: "myregistry.io",
|
||||
want: "myregistry.io/rancher/rancher:v2.9",
|
||||
},
|
||||
{
|
||||
name: "img with existing registry unchanged",
|
||||
img: "ghcr.io/rancher/rancher:v2.9",
|
||||
registry: "myregistry.io",
|
||||
want: "ghcr.io/rancher/rancher:v2.9",
|
||||
},
|
||||
{
|
||||
name: "invalid ref with spaces returns error",
|
||||
img: "invalid ref with spaces",
|
||||
registry: "myregistry.io",
|
||||
wantErr: true,
|
||||
},
|
||||
}
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
got, err := applyDefaultRegistry(tc.img, tc.registry)
|
||||
if (err != nil) != tc.wantErr {
|
||||
t.Fatalf("error = %v, wantErr %v", err, tc.wantErr)
|
||||
}
|
||||
if !tc.wantErr && got != tc.want {
|
||||
t.Errorf("got %q, want %q", got, tc.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestRewriteReference(t *testing.T) {
|
||||
ctx := newTestContext(t)
|
||||
|
||||
t.Run("valid rewrite updates store annotations", func(t *testing.T) {
|
||||
host, rOpts := newTestRegistry(t)
|
||||
seedImage(t, host, "src/repo", "v1", rOpts...)
|
||||
|
||||
s := newTestStore(t)
|
||||
if err := s.AddImage(ctx, host+"/src/repo:v1", "", rOpts...); err != nil {
|
||||
t.Fatalf("AddImage: %v", err)
|
||||
}
|
||||
|
||||
oldRef, err := name.NewTag(host+"/src/repo:v1", name.Insecure)
|
||||
if err != nil {
|
||||
t.Fatalf("parse oldRef: %v", err)
|
||||
}
|
||||
newRef, err := name.NewTag(host+"/dst/repo:v2", name.Insecure)
|
||||
if err != nil {
|
||||
t.Fatalf("parse newRef: %v", err)
|
||||
}
|
||||
|
||||
if err := rewriteReference(ctx, s, oldRef, newRef); err != nil {
|
||||
t.Fatalf("rewriteReference: %v", err)
|
||||
}
|
||||
|
||||
assertArtifactInStore(t, s, "dst/repo:v2")
|
||||
})
|
||||
|
||||
t.Run("old ref not found returns error", func(t *testing.T) {
|
||||
s := newTestStore(t)
|
||||
oldRef, _ := name.NewTag("docker.io/missing/repo:v1")
|
||||
newRef, _ := name.NewTag("docker.io/new/repo:v2")
|
||||
|
||||
err := rewriteReference(ctx, s, oldRef, newRef)
|
||||
if err == nil {
|
||||
t.Fatal("expected error, got nil")
|
||||
}
|
||||
if !strings.Contains(err.Error(), "could not find") {
|
||||
t.Errorf("expected 'could not find' in error, got: %v", err)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
// Integration tests
|
||||
// --------------------------------------------------------------------------
|
||||
|
||||
func TestStoreFile(t *testing.T) {
|
||||
ctx := newTestContext(t)
|
||||
|
||||
t.Run("local file stored successfully", func(t *testing.T) {
|
||||
tmp, err := os.CreateTemp(t.TempDir(), "testfile-*.txt")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
tmp.WriteString("hello hauler") //nolint:errcheck
|
||||
tmp.Close()
|
||||
|
||||
s := newTestStore(t)
|
||||
if err := storeFile(ctx, s, v1.File{Path: tmp.Name()}); err != nil {
|
||||
t.Fatalf("storeFile: %v", err)
|
||||
}
|
||||
assertArtifactInStore(t, s, filepath.Base(tmp.Name()))
|
||||
})
|
||||
|
||||
t.Run("HTTP URL stored under basename", func(t *testing.T) {
|
||||
url := seedFileInHTTPServer(t, "script.sh", "#!/bin/sh\necho ok")
|
||||
s := newTestStore(t)
|
||||
if err := storeFile(ctx, s, v1.File{Path: url}); err != nil {
|
||||
t.Fatalf("storeFile: %v", err)
|
||||
}
|
||||
assertArtifactInStore(t, s, "script.sh")
|
||||
})
|
||||
|
||||
t.Run("name override changes stored ref", func(t *testing.T) {
|
||||
tmp, err := os.CreateTemp(t.TempDir(), "orig-*.txt")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
tmp.Close()
|
||||
|
||||
s := newTestStore(t)
|
||||
if err := storeFile(ctx, s, v1.File{Path: tmp.Name(), Name: "custom.sh"}); err != nil {
|
||||
t.Fatalf("storeFile: %v", err)
|
||||
}
|
||||
assertArtifactInStore(t, s, "custom.sh")
|
||||
})
|
||||
|
||||
t.Run("nonexistent local path returns error", func(t *testing.T) {
|
||||
s := newTestStore(t)
|
||||
err := storeFile(ctx, s, v1.File{Path: "/nonexistent/path/missing-file.txt"})
|
||||
if err == nil {
|
||||
t.Fatal("expected error for nonexistent path, got nil")
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestAddFileCmd(t *testing.T) {
|
||||
ctx := newTestContext(t)
|
||||
s := newTestStore(t)
|
||||
|
||||
tmp, err := os.CreateTemp(t.TempDir(), "rawfile-*.txt")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
tmp.WriteString("raw content") //nolint:errcheck
|
||||
tmp.Close()
|
||||
|
||||
o := &flags.AddFileOpts{Name: "renamed.txt"}
|
||||
if err := AddFileCmd(ctx, o, s, tmp.Name()); err != nil {
|
||||
t.Fatalf("AddFileCmd: %v", err)
|
||||
}
|
||||
assertArtifactInStore(t, s, "renamed.txt")
|
||||
}
|
||||
|
||||
func TestStoreImage(t *testing.T) {
|
||||
ctx := newTestContext(t)
|
||||
host, rOpts := newLocalhostRegistry(t)
|
||||
seedImage(t, host, "test/repo", "v1", rOpts...)
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
imageName string
|
||||
ignoreErrors bool
|
||||
wantErr bool
|
||||
wantInStore string
|
||||
}{
|
||||
{
|
||||
name: "valid image tag stored",
|
||||
imageName: host + "/test/repo:v1",
|
||||
wantInStore: "test/repo:v1",
|
||||
},
|
||||
{
|
||||
name: "invalid ref string returns error",
|
||||
imageName: "INVALID IMAGE REF !! ##",
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "nonexistent image with IgnoreErrors returns nil",
|
||||
imageName: host + "/nonexistent/image:missing",
|
||||
ignoreErrors: true,
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "nonexistent image without IgnoreErrors returns error",
|
||||
imageName: host + "/nonexistent/image:missing",
|
||||
wantErr: true,
|
||||
},
|
||||
}
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
s := newTestStore(t)
|
||||
rso := defaultRootOpts(s.Root)
|
||||
ro := defaultCliOpts()
|
||||
ro.IgnoreErrors = tc.ignoreErrors
|
||||
|
||||
err := storeImage(ctx, s, v1.Image{Name: tc.imageName}, "", rso, ro, "")
|
||||
if (err != nil) != tc.wantErr {
|
||||
t.Fatalf("error = %v, wantErr %v", err, tc.wantErr)
|
||||
}
|
||||
if tc.wantInStore != "" {
|
||||
assertArtifactInStore(t, s, tc.wantInStore)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestStoreImage_Rewrite(t *testing.T) {
|
||||
ctx := newTestContext(t)
|
||||
host, rOpts := newLocalhostRegistry(t)
|
||||
|
||||
t.Run("explicit rewrite tag changes ref", func(t *testing.T) {
|
||||
seedImage(t, host, "src/repo", "v1", rOpts...)
|
||||
s := newTestStore(t)
|
||||
rso := defaultRootOpts(s.Root)
|
||||
ro := defaultCliOpts()
|
||||
|
||||
err := storeImage(ctx, s, v1.Image{Name: host + "/src/repo:v1"}, "", rso, ro, "newrepo/img:v2")
|
||||
if err != nil {
|
||||
t.Fatalf("storeImage with rewrite: %v", err)
|
||||
}
|
||||
assertArtifactInStore(t, s, "newrepo/img:v2")
|
||||
})
|
||||
|
||||
t.Run("rewrite without tag inherits source tag", func(t *testing.T) {
|
||||
seedImage(t, host, "src/repo", "v3", rOpts...)
|
||||
s := newTestStore(t)
|
||||
rso := defaultRootOpts(s.Root)
|
||||
ro := defaultCliOpts()
|
||||
|
||||
err := storeImage(ctx, s, v1.Image{Name: host + "/src/repo:v3"}, "", rso, ro, "newrepo/img")
|
||||
if err != nil {
|
||||
t.Fatalf("storeImage with tagless rewrite: %v", err)
|
||||
}
|
||||
// tag is inherited from source ("v3")
|
||||
assertArtifactInStore(t, s, "newrepo/img:v3")
|
||||
})
|
||||
|
||||
t.Run("rewrite without tag on digest source ref returns error", func(t *testing.T) {
|
||||
img := seedImage(t, host, "src/repo", "digest-src", rOpts...)
|
||||
h, err := img.Digest()
|
||||
if err != nil {
|
||||
t.Fatalf("img.Digest: %v", err)
|
||||
}
|
||||
|
||||
s := newTestStore(t)
|
||||
rso := defaultRootOpts(s.Root)
|
||||
ro := defaultCliOpts()
|
||||
|
||||
digestRef := host + "/src/repo@" + h.String()
|
||||
err = storeImage(ctx, s, v1.Image{Name: digestRef}, "", rso, ro, "newrepo/img")
|
||||
if err == nil {
|
||||
t.Fatal("expected error for digest ref rewrite without explicit tag, got nil")
|
||||
}
|
||||
if !strings.Contains(err.Error(), "cannot rewrite digest reference") {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestStoreImage_MultiArch(t *testing.T) {
|
||||
ctx := newTestContext(t)
|
||||
host, rOpts := newLocalhostRegistry(t)
|
||||
seedIndex(t, host, "test/multiarch", "v1", rOpts...)
|
||||
|
||||
s := newTestStore(t)
|
||||
rso := defaultRootOpts(s.Root)
|
||||
ro := defaultCliOpts()
|
||||
|
||||
if err := storeImage(ctx, s, v1.Image{Name: host + "/test/multiarch:v1"}, "", rso, ro, ""); err != nil {
|
||||
t.Fatalf("storeImage multi-arch index: %v", err)
|
||||
}
|
||||
// Full index (both platforms) must be stored as an index, not a single image.
|
||||
assertArtifactKindInStore(t, s, "test/multiarch:v1", consts.KindAnnotationIndex)
|
||||
}
|
||||
|
||||
func TestStoreImage_PlatformFilter(t *testing.T) {
|
||||
ctx := newTestContext(t)
|
||||
host, rOpts := newLocalhostRegistry(t)
|
||||
seedIndex(t, host, "test/multiarch", "v2", rOpts...)
|
||||
|
||||
s := newTestStore(t)
|
||||
rso := defaultRootOpts(s.Root)
|
||||
ro := defaultCliOpts()
|
||||
|
||||
if err := storeImage(ctx, s, v1.Image{Name: host + "/test/multiarch:v2"}, "linux/amd64", rso, ro, ""); err != nil {
|
||||
t.Fatalf("storeImage with platform filter: %v", err)
|
||||
}
|
||||
// Platform filter resolves a single manifest from the index → stored as a single image.
|
||||
assertArtifactKindInStore(t, s, "test/multiarch:v2", consts.KindAnnotationImage)
|
||||
}
|
||||
|
||||
func TestStoreImage_CosignV2Artifacts(t *testing.T) {
|
||||
ctx := newTestContext(t)
|
||||
host, rOpts := newLocalhostRegistry(t)
|
||||
|
||||
img := seedImage(t, host, "test/signed", "v1", rOpts...)
|
||||
seedCosignV2Artifacts(t, host, "test/signed", img, rOpts...)
|
||||
|
||||
s := newTestStore(t)
|
||||
rso := defaultRootOpts(s.Root)
|
||||
ro := defaultCliOpts()
|
||||
|
||||
if err := storeImage(ctx, s, v1.Image{Name: host + "/test/signed:v1"}, "", rso, ro, ""); err != nil {
|
||||
t.Fatalf("storeImage: %v", err)
|
||||
}
|
||||
assertArtifactKindInStore(t, s, "test/signed:v1", consts.KindAnnotationSigs)
|
||||
assertArtifactKindInStore(t, s, "test/signed:v1", consts.KindAnnotationAtts)
|
||||
assertArtifactKindInStore(t, s, "test/signed:v1", consts.KindAnnotationSboms)
|
||||
}
|
||||
|
||||
func TestStoreImage_CosignV3Referrer(t *testing.T) {
|
||||
ctx := newTestContext(t)
|
||||
host, rOpts := newLocalhostRegistry(t)
|
||||
|
||||
img := seedImage(t, host, "test/image", "v1", rOpts...)
|
||||
seedOCI11Referrer(t, host, "test/image", img, rOpts...)
|
||||
|
||||
s := newTestStore(t)
|
||||
rso := defaultRootOpts(s.Root)
|
||||
ro := defaultCliOpts()
|
||||
|
||||
if err := storeImage(ctx, s, v1.Image{Name: host + "/test/image:v1"}, "", rso, ro, ""); err != nil {
|
||||
t.Fatalf("storeImage: %v", err)
|
||||
}
|
||||
assertReferrerInStore(t, s, "test/image:v1")
|
||||
}
|
||||
|
||||
func TestAddChartCmd_LocalTgz(t *testing.T) {
|
||||
ctx := newTestContext(t)
|
||||
s := newTestStore(t)
|
||||
rso := defaultRootOpts(s.Root)
|
||||
ro := defaultCliOpts()
|
||||
|
||||
o := newAddChartOpts(chartTestdataDir, "")
|
||||
if err := AddChartCmd(ctx, o, s, "rancher-cluster-templates-0.5.2.tgz", rso, ro); err != nil {
|
||||
t.Fatalf("AddChartCmd: %v", err)
|
||||
}
|
||||
// Hauler stores all artifacts (files, charts) via store.AddArtifact, which
|
||||
// unconditionally sets KindAnnotationName = KindAnnotationImage (see
|
||||
// pkg/store/store.go). There is no separate "chart" kind — charts are
|
||||
// wrapped in an OCI image manifest and tagged with KindAnnotationImage.
|
||||
assertArtifactKindInStore(t, s, "rancher-cluster-templates", consts.KindAnnotationImage)
|
||||
}
|
||||
|
||||
func TestAddChartCmd_WithFileDep(t *testing.T) {
|
||||
ctx := newTestContext(t)
|
||||
s := newTestStore(t)
|
||||
rso := defaultRootOpts(s.Root)
|
||||
ro := defaultCliOpts()
|
||||
|
||||
o := newAddChartOpts(chartTestdataDir, "")
|
||||
if err := AddChartCmd(ctx, o, s, "chart-with-file-dependency-chart-1.0.0.tgz", rso, ro); err != nil {
|
||||
t.Fatalf("AddChartCmd: %v", err)
|
||||
}
|
||||
assertArtifactInStore(t, s, "chart-with-file-dependency-chart")
|
||||
}
|
||||
|
||||
func TestStoreChart_Rewrite(t *testing.T) {
|
||||
ctx := newTestContext(t)
|
||||
s := newTestStore(t)
|
||||
rso := defaultRootOpts(s.Root)
|
||||
ro := defaultCliOpts()
|
||||
|
||||
o := newAddChartOpts(chartTestdataDir, "")
|
||||
o.Rewrite = "myorg/custom-chart"
|
||||
|
||||
if err := AddChartCmd(ctx, o, s, "rancher-cluster-templates-0.5.2.tgz", rso, ro); err != nil {
|
||||
t.Fatalf("AddChartCmd with rewrite: %v", err)
|
||||
}
|
||||
assertArtifactInStore(t, s, "myorg/custom-chart")
|
||||
}
|
||||
@@ -2,42 +2,274 @@ package store
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"oras.land/oras-go/pkg/content"
|
||||
"github.com/containerd/containerd/remotes"
|
||||
"github.com/containerd/errdefs"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
|
||||
"hauler.dev/go/hauler/internal/flags"
|
||||
"hauler.dev/go/hauler/pkg/cosign"
|
||||
"hauler.dev/go/hauler/internal/mapper"
|
||||
"hauler.dev/go/hauler/pkg/consts"
|
||||
"hauler.dev/go/hauler/pkg/content"
|
||||
"hauler.dev/go/hauler/pkg/log"
|
||||
"hauler.dev/go/hauler/pkg/retry"
|
||||
"hauler.dev/go/hauler/pkg/store"
|
||||
)
|
||||
|
||||
func CopyCmd(ctx context.Context, o *flags.CopyOpts, s *store.Layout, targetRef string, ro *flags.CliRootOpts) error {
|
||||
l := log.FromContext(ctx)
|
||||
|
||||
if o.Username != "" || o.Password != "" {
|
||||
return fmt.Errorf("--username/--password have been deprecated, please use 'hauler login'")
|
||||
}
|
||||
|
||||
if !s.IndexExists() {
|
||||
return fmt.Errorf("store index not found: run 'hauler store add/sync/load' first")
|
||||
}
|
||||
|
||||
components := strings.SplitN(targetRef, "://", 2)
|
||||
switch components[0] {
|
||||
case "dir":
|
||||
l.Debugf("identified directory target reference of [%s]", components[1])
|
||||
fs := content.NewFile(components[1])
|
||||
defer fs.Close()
|
||||
|
||||
_, err := s.CopyAll(ctx, fs, nil)
|
||||
// Create destination directory if it doesn't exist
|
||||
if err := os.MkdirAll(components[1], 0755); err != nil {
|
||||
return fmt.Errorf("failed to create destination directory: %w", err)
|
||||
}
|
||||
|
||||
// For directory targets, extract files and charts (not images)
|
||||
err := s.Walk(func(reference string, desc ocispec.Descriptor) error {
|
||||
// Skip cosign sig/att/sbom artifacts — they're registry-only metadata,
|
||||
// not extractable as files or charts.
|
||||
kind := desc.Annotations[consts.KindAnnotationName]
|
||||
switch kind {
|
||||
case consts.KindAnnotationSigs, consts.KindAnnotationAtts, consts.KindAnnotationSboms:
|
||||
l.Debugf("skipping cosign artifact [%s] for directory target", reference)
|
||||
return nil
|
||||
}
|
||||
if strings.HasPrefix(kind, consts.KindAnnotationReferrers) {
|
||||
l.Debugf("skipping OCI referrer [%s] for directory target", reference)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Handle different media types
|
||||
switch desc.MediaType {
|
||||
case ocispec.MediaTypeImageIndex, consts.DockerManifestListSchema2:
|
||||
// Multi-platform index - process each child manifest
|
||||
rc, err := s.Fetch(ctx, desc)
|
||||
if err != nil {
|
||||
l.Warnf("failed to fetch index [%s]: %v", reference, err)
|
||||
return nil
|
||||
}
|
||||
|
||||
var index ocispec.Index
|
||||
if err := json.NewDecoder(rc).Decode(&index); err != nil {
|
||||
if cerr := rc.Close(); cerr != nil {
|
||||
l.Warnf("failed to close index reader for [%s]: %v", reference, cerr)
|
||||
}
|
||||
l.Warnf("failed to decode index for [%s]: %v", reference, err)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Close rc immediately after decoding - we're done reading from it
|
||||
if cerr := rc.Close(); cerr != nil {
|
||||
l.Warnf("failed to close index reader for [%s]: %v", reference, cerr)
|
||||
}
|
||||
|
||||
// Process each manifest in the index
|
||||
for _, manifestDesc := range index.Manifests {
|
||||
manifestRC, err := s.Fetch(ctx, manifestDesc)
|
||||
if err != nil {
|
||||
l.Warnf("failed to fetch child manifest: %v", err)
|
||||
continue
|
||||
}
|
||||
|
||||
var m ocispec.Manifest
|
||||
if err := json.NewDecoder(manifestRC).Decode(&m); err != nil {
|
||||
manifestRC.Close()
|
||||
l.Warnf("failed to decode child manifest: %v", err)
|
||||
continue
|
||||
}
|
||||
manifestRC.Close()
|
||||
|
||||
// Skip images - only extract files and charts
|
||||
if m.Config.MediaType == consts.DockerConfigJSON ||
|
||||
m.Config.MediaType == ocispec.MediaTypeImageConfig {
|
||||
l.Debugf("skipping image manifest in index [%s]", reference)
|
||||
continue
|
||||
}
|
||||
|
||||
// Create mapper and extract
|
||||
mapperStore, err := mapper.FromManifest(m, components[1])
|
||||
if err != nil {
|
||||
l.Warnf("failed to create mapper for child: %v", err)
|
||||
continue
|
||||
}
|
||||
|
||||
// Note: We can't call s.Copy with manifestDesc because it's not in the nameMap
|
||||
// Instead, we need to manually push through the mapper
|
||||
if err := extractManifestContent(ctx, s, manifestDesc, m, mapperStore); err != nil {
|
||||
l.Warnf("failed to extract child: %v", err)
|
||||
continue
|
||||
}
|
||||
|
||||
l.Debugf("extracted child manifest from index [%s]", reference)
|
||||
}
|
||||
|
||||
case ocispec.MediaTypeImageManifest, consts.DockerManifestSchema2:
|
||||
// Single-platform manifest
|
||||
rc, err := s.Fetch(ctx, desc)
|
||||
if err != nil {
|
||||
l.Warnf("failed to fetch [%s]: %v", reference, err)
|
||||
return nil
|
||||
}
|
||||
|
||||
var m ocispec.Manifest
|
||||
if err := json.NewDecoder(rc).Decode(&m); err != nil {
|
||||
rc.Close()
|
||||
l.Warnf("failed to decode manifest for [%s]: %v", reference, err)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Skip images - only extract files and charts for directory targets
|
||||
if m.Config.MediaType == consts.DockerConfigJSON ||
|
||||
m.Config.MediaType == ocispec.MediaTypeImageConfig {
|
||||
rc.Close()
|
||||
l.Debugf("skipping image [%s] for directory target", reference)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Create a mapper store based on the manifest type
|
||||
mapperStore, err := mapper.FromManifest(m, components[1])
|
||||
if err != nil {
|
||||
rc.Close()
|
||||
l.Warnf("failed to create mapper for [%s]: %v", reference, err)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Copy/extract the content
|
||||
_, err = s.Copy(ctx, reference, mapperStore, "")
|
||||
if err != nil {
|
||||
rc.Close()
|
||||
l.Warnf("failed to extract [%s]: %v", reference, err)
|
||||
return nil
|
||||
}
|
||||
rc.Close()
|
||||
|
||||
l.Debugf("extracted [%s] to directory", reference)
|
||||
|
||||
default:
|
||||
l.Debugf("skipping unsupported media type [%s] for [%s]", desc.MediaType, reference)
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
case "registry":
|
||||
l.Debugf("identified registry target reference of [%s]", components[1])
|
||||
ropts := content.RegistryOptions{
|
||||
Username: o.Username,
|
||||
Password: o.Password,
|
||||
Insecure: o.Insecure,
|
||||
registryOpts := content.RegistryOptions{
|
||||
PlainHTTP: o.PlainHTTP,
|
||||
Insecure: o.Insecure,
|
||||
}
|
||||
|
||||
err := cosign.LoadImages(ctx, s, components[1], ropts, ro)
|
||||
// Pre-build a map from base ref → image manifest digest so that sig/att/sbom
|
||||
// descriptors (which store the base image ref, not the cosign tag) can be routed
|
||||
// to the correct destination tag using the cosign tag convention.
|
||||
refDigest := make(map[string]string)
|
||||
if err := s.Walk(func(_ string, desc ocispec.Descriptor) error {
|
||||
kind := desc.Annotations[consts.KindAnnotationName]
|
||||
if kind == consts.KindAnnotationImage || kind == consts.KindAnnotationIndex {
|
||||
if baseRef := desc.Annotations[ocispec.AnnotationRefName]; baseRef != "" {
|
||||
refDigest[baseRef] = desc.Digest.String()
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
sigExts := map[string]string{
|
||||
consts.KindAnnotationSigs: ".sig",
|
||||
consts.KindAnnotationAtts: ".att",
|
||||
consts.KindAnnotationSboms: ".sbom",
|
||||
}
|
||||
|
||||
var fatalErr error
|
||||
err := s.Walk(func(reference string, desc ocispec.Descriptor) error {
|
||||
if fatalErr != nil {
|
||||
return nil
|
||||
}
|
||||
baseRef := desc.Annotations[ocispec.AnnotationRefName]
|
||||
if baseRef == "" {
|
||||
return nil
|
||||
}
|
||||
if o.Only != "" && !strings.Contains(baseRef, o.Only) {
|
||||
l.Debugf("skipping [%s] (not matching --only filter)", baseRef)
|
||||
return nil
|
||||
}
|
||||
|
||||
// For sig/att/sbom descriptors, derive the cosign tag from the parent
|
||||
// image's manifest digest rather than using AnnotationRefName directly.
|
||||
destRef := baseRef
|
||||
kind := desc.Annotations[consts.KindAnnotationName]
|
||||
if ext, isSigKind := sigExts[kind]; isSigKind {
|
||||
if imgDigest, ok := refDigest[baseRef]; ok {
|
||||
digestTag := strings.ReplaceAll(imgDigest, ":", "-")
|
||||
repo := baseRef
|
||||
if colon := strings.LastIndex(baseRef, ":"); colon != -1 {
|
||||
repo = baseRef[:colon]
|
||||
}
|
||||
destRef = repo + ":" + digestTag + ext
|
||||
}
|
||||
} else if strings.HasPrefix(kind, consts.KindAnnotationReferrers) {
|
||||
// OCI 1.1 referrer (cosign v3 new-bundle-format): push by manifest digest so
|
||||
// the target registry wires it up via the OCI Referrers API (subject field).
|
||||
// For registries that don't support the Referrers API natively, the manifest
|
||||
// is still pushed intact; the subject linkage depends on registry support.
|
||||
repo := baseRef
|
||||
if colon := strings.LastIndex(baseRef, ":"); colon != -1 {
|
||||
repo = baseRef[:colon]
|
||||
}
|
||||
destRef = repo + "@" + desc.Digest.String()
|
||||
}
|
||||
|
||||
toRef, err := content.RewriteRefToRegistry(destRef, components[1])
|
||||
if err != nil {
|
||||
l.Warnf("failed to rewrite ref [%s]: %v", baseRef, err)
|
||||
return nil
|
||||
}
|
||||
l.Infof("%s", destRef)
|
||||
// A fresh target per artifact gives each push its own in-memory status
|
||||
// tracker. Containerd's tracker keys blobs by digest only (not repo),
|
||||
// so a shared tracker would mark shared blobs as "already exists" after
|
||||
// the first image, skipping the per-repository blob link creation that
|
||||
// Docker Distribution requires for manifest validation.
|
||||
target := content.NewRegistryTarget(components[1], registryOpts)
|
||||
var pushed ocispec.Descriptor
|
||||
if err := retry.Operation(ctx, o.StoreRootOpts, ro, func() error {
|
||||
var copyErr error
|
||||
pushed, copyErr = s.Copy(ctx, reference, target, toRef)
|
||||
return copyErr
|
||||
}); err != nil {
|
||||
if !ro.IgnoreErrors {
|
||||
fatalErr = err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
l.Infof("%s: digest: %s size: %d", toRef, pushed.Digest, pushed.Size)
|
||||
return nil
|
||||
})
|
||||
if fatalErr != nil {
|
||||
return fatalErr
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -49,3 +281,73 @@ func CopyCmd(ctx context.Context, o *flags.CopyOpts, s *store.Layout, targetRef
|
||||
l.Infof("copied artifacts to [%s]", components[1])
|
||||
return nil
|
||||
}
|
||||
|
||||
// extractManifestContent extracts a manifest's layers through a mapper target
|
||||
// This is used for child manifests in indexes that aren't in the store's nameMap
|
||||
func extractManifestContent(ctx context.Context, s *store.Layout, desc ocispec.Descriptor, m ocispec.Manifest, target content.Target) error {
|
||||
// Get a pusher from the target
|
||||
pusher, err := target.Pusher(ctx, "")
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get pusher: %w", err)
|
||||
}
|
||||
|
||||
// Copy config blob
|
||||
if err := copyBlobDescriptor(ctx, s, m.Config, pusher); err != nil {
|
||||
return fmt.Errorf("failed to copy config: %w", err)
|
||||
}
|
||||
|
||||
// Copy each layer blob
|
||||
for _, layer := range m.Layers {
|
||||
if err := copyBlobDescriptor(ctx, s, layer, pusher); err != nil {
|
||||
return fmt.Errorf("failed to copy layer: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Copy the manifest itself
|
||||
if err := copyBlobDescriptor(ctx, s, desc, pusher); err != nil {
|
||||
return fmt.Errorf("failed to copy manifest: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// copyBlobDescriptor copies a single descriptor blob from the store to a pusher
|
||||
func copyBlobDescriptor(ctx context.Context, s *store.Layout, desc ocispec.Descriptor, pusher remotes.Pusher) (err error) {
|
||||
// Fetch the content from the store
|
||||
rc, err := s.OCI.Fetch(ctx, desc)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to fetch blob: %w", err)
|
||||
}
|
||||
defer func() {
|
||||
if closeErr := rc.Close(); closeErr != nil && err == nil {
|
||||
err = fmt.Errorf("failed to close reader: %w", closeErr)
|
||||
}
|
||||
}()
|
||||
|
||||
// Get a writer from the pusher
|
||||
writer, err := pusher.Push(ctx, desc)
|
||||
if err != nil {
|
||||
if errdefs.IsAlreadyExists(err) {
|
||||
return nil // content already present on remote
|
||||
}
|
||||
return fmt.Errorf("failed to push: %w", err)
|
||||
}
|
||||
defer func() {
|
||||
if closeErr := writer.Close(); closeErr != nil && err == nil {
|
||||
err = fmt.Errorf("failed to close writer: %w", closeErr)
|
||||
}
|
||||
}()
|
||||
|
||||
// Copy the content
|
||||
n, err := io.Copy(writer, rc)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to copy content: %w", err)
|
||||
}
|
||||
|
||||
// Commit the written content
|
||||
if err := writer.Commit(ctx, n, desc.Digest); err != nil {
|
||||
return fmt.Errorf("failed to commit: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
338
cmd/hauler/cli/store/copy_test.go
Normal file
338
cmd/hauler/cli/store/copy_test.go
Normal file
@@ -0,0 +1,338 @@
|
||||
package store
|
||||
|
||||
// copy_test.go covers CopyCmd for both registry:// and dir:// targets.
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/google/go-containerregistry/pkg/name"
|
||||
"github.com/google/go-containerregistry/pkg/v1/remote"
|
||||
|
||||
"hauler.dev/go/hauler/internal/flags"
|
||||
v1 "hauler.dev/go/hauler/pkg/apis/hauler.cattle.io/v1"
|
||||
)
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
// Error / guard tests
|
||||
// --------------------------------------------------------------------------
|
||||
|
||||
// TestCopyCmd_EmptyStoreFails verifies that CopyCmd returns an error when the
|
||||
// store has no index.json on disk (i.e. nothing has been added yet).
|
||||
func TestCopyCmd_EmptyStoreFails(t *testing.T) {
|
||||
ctx := newTestContext(t)
|
||||
s := newTestStore(t) // freshly created — index.json not yet on disk
|
||||
|
||||
o := &flags.CopyOpts{StoreRootOpts: defaultRootOpts(s.Root)}
|
||||
err := CopyCmd(ctx, o, s, "registry://127.0.0.1:5000", defaultCliOpts())
|
||||
if err == nil {
|
||||
t.Fatal("expected error for empty store, got nil")
|
||||
}
|
||||
if !strings.Contains(err.Error(), "store index not found") {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// TestCopyCmd_DeprecatedCredentials verifies that passing Username returns the
|
||||
// deprecation error before any other check.
|
||||
func TestCopyCmd_DeprecatedCredentials(t *testing.T) {
|
||||
ctx := newTestContext(t)
|
||||
s := newTestStore(t)
|
||||
|
||||
o := &flags.CopyOpts{
|
||||
StoreRootOpts: defaultRootOpts(s.Root),
|
||||
Username: "user",
|
||||
Password: "pass",
|
||||
}
|
||||
err := CopyCmd(ctx, o, s, "registry://127.0.0.1:5000", defaultCliOpts())
|
||||
if err == nil {
|
||||
t.Fatal("expected deprecation error, got nil")
|
||||
}
|
||||
if !strings.Contains(err.Error(), "deprecated") {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// TestCopyCmd_UnknownProtocol verifies that an unrecognized scheme returns an
|
||||
// error containing "detecting protocol".
|
||||
func TestCopyCmd_UnknownProtocol(t *testing.T) {
|
||||
ctx := newTestContext(t)
|
||||
s := newTestStore(t)
|
||||
// Write index.json so IndexExists() passes.
|
||||
if err := s.SaveIndex(); err != nil {
|
||||
t.Fatalf("SaveIndex: %v", err)
|
||||
}
|
||||
|
||||
o := &flags.CopyOpts{StoreRootOpts: defaultRootOpts(s.Root)}
|
||||
err := CopyCmd(ctx, o, s, "ftp://somehost/path", defaultCliOpts())
|
||||
if err == nil {
|
||||
t.Fatal("expected error for unknown protocol, got nil")
|
||||
}
|
||||
if !strings.Contains(err.Error(), "detecting protocol") {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
// Registry copy tests
|
||||
// --------------------------------------------------------------------------
|
||||
|
||||
// TestCopyCmd_Registry seeds a store with a single image, copies it to an
|
||||
// in-memory target registry, and verifies the image is reachable there.
|
||||
func TestCopyCmd_Registry(t *testing.T) {
|
||||
ctx := newTestContext(t)
|
||||
|
||||
srcHost, _ := newLocalhostRegistry(t)
|
||||
seedImage(t, srcHost, "test/copy", "v1")
|
||||
|
||||
s := newTestStore(t)
|
||||
rso := defaultRootOpts(s.Root)
|
||||
ro := defaultCliOpts()
|
||||
if err := storeImage(ctx, s, v1.Image{Name: srcHost + "/test/copy:v1"}, "", rso, ro, ""); err != nil {
|
||||
t.Fatalf("storeImage: %v", err)
|
||||
}
|
||||
|
||||
dstHost, dstOpts := newTestRegistry(t)
|
||||
o := &flags.CopyOpts{
|
||||
StoreRootOpts: defaultRootOpts(s.Root),
|
||||
PlainHTTP: true,
|
||||
}
|
||||
if err := CopyCmd(ctx, o, s, "registry://"+dstHost, ro); err != nil {
|
||||
t.Fatalf("CopyCmd registry: %v", err)
|
||||
}
|
||||
|
||||
// Verify the image is reachable in the target registry.
|
||||
dstRef, err := name.NewTag(dstHost+"/test/copy:v1", name.Insecure)
|
||||
if err != nil {
|
||||
t.Fatalf("name.NewTag: %v", err)
|
||||
}
|
||||
if _, err := remote.Get(dstRef, dstOpts...); err != nil {
|
||||
t.Errorf("image not found in target registry after copy: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// TestCopyCmd_Registry_OnlyFilter seeds two images in distinct repos, copies
|
||||
// with --only=repo1, and asserts only repo1 reaches the target.
|
||||
func TestCopyCmd_Registry_OnlyFilter(t *testing.T) {
|
||||
ctx := newTestContext(t)
|
||||
|
||||
srcHost, _ := newLocalhostRegistry(t)
|
||||
seedImage(t, srcHost, "myorg/repo1", "v1")
|
||||
seedImage(t, srcHost, "myorg/repo2", "v1")
|
||||
|
||||
s := newTestStore(t)
|
||||
rso := defaultRootOpts(s.Root)
|
||||
ro := defaultCliOpts()
|
||||
for _, repo := range []string{"myorg/repo1:v1", "myorg/repo2:v1"} {
|
||||
if err := storeImage(ctx, s, v1.Image{Name: srcHost + "/" + repo}, "", rso, ro, ""); err != nil {
|
||||
t.Fatalf("storeImage %s: %v", repo, err)
|
||||
}
|
||||
}
|
||||
|
||||
dstHost, dstOpts := newTestRegistry(t)
|
||||
o := &flags.CopyOpts{
|
||||
StoreRootOpts: defaultRootOpts(s.Root),
|
||||
PlainHTTP: true,
|
||||
Only: "repo1",
|
||||
}
|
||||
if err := CopyCmd(ctx, o, s, "registry://"+dstHost, ro); err != nil {
|
||||
t.Fatalf("CopyCmd with --only: %v", err)
|
||||
}
|
||||
|
||||
// repo1 must be in target.
|
||||
ref1, err := name.NewTag(dstHost+"/myorg/repo1:v1", name.Insecure)
|
||||
if err != nil {
|
||||
t.Fatalf("name.NewTag repo1: %v", err)
|
||||
}
|
||||
if _, err := remote.Get(ref1, dstOpts...); err != nil {
|
||||
t.Errorf("repo1 should be in target registry but was not found: %v", err)
|
||||
}
|
||||
|
||||
// repo2 must NOT be in target.
|
||||
ref2, err := name.NewTag(dstHost+"/myorg/repo2:v1", name.Insecure)
|
||||
if err != nil {
|
||||
t.Fatalf("name.NewTag repo2: %v", err)
|
||||
}
|
||||
if _, err := remote.Get(ref2, dstOpts...); err == nil {
|
||||
t.Error("repo2 should NOT be in target registry after --only=repo1, but was found")
|
||||
}
|
||||
}
|
||||
|
||||
// TestCopyCmd_Registry_SigTagDerivation seeds a base image along with cosign
|
||||
// v2 signature artifacts, adds everything to the store via AddImage (which
|
||||
// auto-discovers the .sig/.att/.sbom tags), then copies to a target registry
|
||||
// and verifies the sig arrives at the expected sha256-<hex>.sig tag.
|
||||
func TestCopyCmd_Registry_SigTagDerivation(t *testing.T) {
|
||||
ctx := newTestContext(t)
|
||||
|
||||
srcHost, _ := newLocalhostRegistry(t)
|
||||
srcImg := seedImage(t, srcHost, "test/signed", "v1")
|
||||
seedCosignV2Artifacts(t, srcHost, "test/signed", srcImg)
|
||||
|
||||
// AddImage discovers and stores the .sig/.att/.sbom tags automatically.
|
||||
s := newTestStore(t)
|
||||
if err := s.AddImage(ctx, srcHost+"/test/signed:v1", ""); err != nil {
|
||||
t.Fatalf("AddImage: %v", err)
|
||||
}
|
||||
|
||||
dstHost, dstOpts := newTestRegistry(t)
|
||||
o := &flags.CopyOpts{
|
||||
StoreRootOpts: defaultRootOpts(s.Root),
|
||||
PlainHTTP: true,
|
||||
}
|
||||
if err := CopyCmd(ctx, o, s, "registry://"+dstHost, defaultCliOpts()); err != nil {
|
||||
t.Fatalf("CopyCmd: %v", err)
|
||||
}
|
||||
|
||||
// Compute the expected cosign sig tag from the image's manifest digest.
|
||||
hash, err := srcImg.Digest()
|
||||
if err != nil {
|
||||
t.Fatalf("srcImg.Digest: %v", err)
|
||||
}
|
||||
sigTag := strings.ReplaceAll(hash.String(), ":", "-") + ".sig"
|
||||
|
||||
sigRef, err := name.NewTag(dstHost+"/test/signed:"+sigTag, name.Insecure)
|
||||
if err != nil {
|
||||
t.Fatalf("name.NewTag sigRef: %v", err)
|
||||
}
|
||||
if _, err := remote.Get(sigRef, dstOpts...); err != nil {
|
||||
t.Errorf("sig not found at expected tag %s in target registry: %v", sigTag, err)
|
||||
}
|
||||
}
|
||||
|
||||
// TestCopyCmd_Registry_IgnoreErrors verifies that a push failure to a
|
||||
// non-listening address is swallowed when IgnoreErrors is set.
|
||||
func TestCopyCmd_Registry_IgnoreErrors(t *testing.T) {
|
||||
ctx := newTestContext(t)
|
||||
|
||||
srcHost, _ := newLocalhostRegistry(t)
|
||||
seedImage(t, srcHost, "test/ignore", "v1")
|
||||
|
||||
s := newTestStore(t)
|
||||
rso := defaultRootOpts(s.Root)
|
||||
ro := defaultCliOpts()
|
||||
if err := storeImage(ctx, s, v1.Image{Name: srcHost + "/test/ignore:v1"}, "", rso, ro, ""); err != nil {
|
||||
t.Fatalf("storeImage: %v", err)
|
||||
}
|
||||
|
||||
// localhost:1 is a port that is never listening.
|
||||
o := &flags.CopyOpts{
|
||||
StoreRootOpts: defaultRootOpts(s.Root),
|
||||
PlainHTTP: true,
|
||||
}
|
||||
roIgnore := defaultCliOpts()
|
||||
roIgnore.IgnoreErrors = true
|
||||
if err := CopyCmd(ctx, o, s, "registry://localhost:1", roIgnore); err != nil {
|
||||
t.Errorf("expected no error with IgnoreErrors=true, got: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
// Directory copy tests
|
||||
// --------------------------------------------------------------------------
|
||||
|
||||
// TestCopyCmd_Dir_Files copies a file artifact to a directory target and
|
||||
// verifies the file appears under its original basename.
|
||||
func TestCopyCmd_Dir_Files(t *testing.T) {
|
||||
ctx := newTestContext(t)
|
||||
|
||||
content := "hello from hauler file"
|
||||
url := seedFileInHTTPServer(t, "data.txt", content)
|
||||
|
||||
s := newTestStore(t)
|
||||
if err := storeFile(ctx, s, v1.File{Path: url}); err != nil {
|
||||
t.Fatalf("storeFile: %v", err)
|
||||
}
|
||||
|
||||
destDir := t.TempDir()
|
||||
o := &flags.CopyOpts{StoreRootOpts: defaultRootOpts(s.Root)}
|
||||
if err := CopyCmd(ctx, o, s, "dir://"+destDir, defaultCliOpts()); err != nil {
|
||||
t.Fatalf("CopyCmd dir: %v", err)
|
||||
}
|
||||
|
||||
outPath := filepath.Join(destDir, "data.txt")
|
||||
data, err := os.ReadFile(outPath)
|
||||
if err != nil {
|
||||
t.Fatalf("file not found in destDir after dir copy: %v", err)
|
||||
}
|
||||
if string(data) != content {
|
||||
t.Errorf("file content mismatch: got %q, want %q", string(data), content)
|
||||
}
|
||||
}
|
||||
|
||||
// TestCopyCmd_Dir_SkipsImages verifies that container images are not extracted
|
||||
// when copying to a directory target.
|
||||
func TestCopyCmd_Dir_SkipsImages(t *testing.T) {
|
||||
ctx := newTestContext(t)
|
||||
|
||||
srcHost, _ := newLocalhostRegistry(t)
|
||||
seedImage(t, srcHost, "test/imgskip", "v1")
|
||||
|
||||
s := newTestStore(t)
|
||||
rso := defaultRootOpts(s.Root)
|
||||
ro := defaultCliOpts()
|
||||
if err := storeImage(ctx, s, v1.Image{Name: srcHost + "/test/imgskip:v1"}, "", rso, ro, ""); err != nil {
|
||||
t.Fatalf("storeImage: %v", err)
|
||||
}
|
||||
|
||||
destDir := t.TempDir()
|
||||
o := &flags.CopyOpts{StoreRootOpts: defaultRootOpts(s.Root)}
|
||||
if err := CopyCmd(ctx, o, s, "dir://"+destDir, ro); err != nil {
|
||||
t.Fatalf("CopyCmd dir: %v", err)
|
||||
}
|
||||
|
||||
entries, err := os.ReadDir(destDir)
|
||||
if err != nil {
|
||||
t.Fatalf("ReadDir: %v", err)
|
||||
}
|
||||
if len(entries) != 0 {
|
||||
names := make([]string, len(entries))
|
||||
for i, e := range entries {
|
||||
names[i] = e.Name()
|
||||
}
|
||||
t.Errorf("expected empty destDir for image-only store, found: %s", strings.Join(names, ", "))
|
||||
}
|
||||
}
|
||||
|
||||
// TestCopyCmd_Dir_Charts copies a local Helm chart artifact to a directory
|
||||
// target and verifies a .tgz file is present.
|
||||
func TestCopyCmd_Dir_Charts(t *testing.T) {
|
||||
ctx := newTestContext(t)
|
||||
|
||||
s := newTestStore(t)
|
||||
rso := defaultRootOpts(s.Root)
|
||||
ro := defaultCliOpts()
|
||||
|
||||
o := newAddChartOpts(chartTestdataDir, "")
|
||||
if err := AddChartCmd(ctx, o, s, "rancher-cluster-templates-0.5.2.tgz", rso, ro); err != nil {
|
||||
t.Fatalf("AddChartCmd: %v", err)
|
||||
}
|
||||
|
||||
destDir := t.TempDir()
|
||||
copyOpts := &flags.CopyOpts{StoreRootOpts: defaultRootOpts(s.Root)}
|
||||
if err := CopyCmd(ctx, copyOpts, s, "dir://"+destDir, ro); err != nil {
|
||||
t.Fatalf("CopyCmd dir charts: %v", err)
|
||||
}
|
||||
|
||||
entries, err := os.ReadDir(destDir)
|
||||
if err != nil {
|
||||
t.Fatalf("ReadDir: %v", err)
|
||||
}
|
||||
|
||||
var found bool
|
||||
for _, e := range entries {
|
||||
if strings.HasSuffix(e.Name(), ".tgz") || strings.HasSuffix(e.Name(), ".tar.gz") {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
names := make([]string, len(entries))
|
||||
for i, e := range entries {
|
||||
names[i] = e.Name()
|
||||
}
|
||||
t.Errorf("no .tgz found in destDir after chart copy; found: %v", names)
|
||||
}
|
||||
}
|
||||
@@ -10,11 +10,79 @@ import (
|
||||
|
||||
"hauler.dev/go/hauler/internal/flags"
|
||||
"hauler.dev/go/hauler/internal/mapper"
|
||||
"hauler.dev/go/hauler/pkg/consts"
|
||||
"hauler.dev/go/hauler/pkg/log"
|
||||
"hauler.dev/go/hauler/pkg/reference"
|
||||
"hauler.dev/go/hauler/pkg/store"
|
||||
)
|
||||
|
||||
// isIndexMediaType returns true for OCI and Docker manifest list media types.
|
||||
func isIndexMediaType(mt string) bool {
|
||||
return mt == ocispec.MediaTypeImageIndex || mt == consts.DockerManifestListSchema2
|
||||
}
|
||||
|
||||
// firstLeafManifest walks a (potentially nested) OCI index and returns the
|
||||
// decoded manifest of the first non-index child. It prefers non-index children
|
||||
// at each level; if all children are indexes it descends into the first one.
|
||||
// Returns an error if any nested index or manifest cannot be decoded.
|
||||
func firstLeafManifest(ctx context.Context, s *store.Layout, idx ocispec.Index) (ocispec.Manifest, error) {
|
||||
for {
|
||||
if len(idx.Manifests) == 0 {
|
||||
return ocispec.Manifest{}, fmt.Errorf("image index has no child manifests")
|
||||
}
|
||||
|
||||
// Prefer the first non-index child; fall back to the first child (an index) if all are indexes.
|
||||
desc := idx.Manifests[0]
|
||||
for _, d := range idx.Manifests {
|
||||
if !isIndexMediaType(d.MediaType) {
|
||||
desc = d
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
rc, err := s.Fetch(ctx, desc)
|
||||
if err != nil {
|
||||
return ocispec.Manifest{}, err
|
||||
}
|
||||
|
||||
if isIndexMediaType(desc.MediaType) {
|
||||
var nested ocispec.Index
|
||||
err = json.NewDecoder(rc).Decode(&nested)
|
||||
rc.Close()
|
||||
if err != nil {
|
||||
return ocispec.Manifest{}, fmt.Errorf("decoding nested index: %w", err)
|
||||
}
|
||||
idx = nested
|
||||
continue
|
||||
}
|
||||
|
||||
var m ocispec.Manifest
|
||||
err = json.NewDecoder(rc).Decode(&m)
|
||||
rc.Close()
|
||||
if err != nil {
|
||||
return ocispec.Manifest{}, fmt.Errorf("decoding child manifest: %w", err)
|
||||
}
|
||||
return m, nil
|
||||
}
|
||||
}
|
||||
|
||||
// isContainerImageManifest returns true when the manifest describes a real
|
||||
// container image — i.e. an OCI/Docker image config with no AnnotationTitle on
|
||||
// any layer. File artifacts distributed as OCI images always carry AnnotationTitle
|
||||
// on their layers, so they are NOT considered container images by this check.
|
||||
func isContainerImageManifest(m ocispec.Manifest) bool {
|
||||
switch m.Config.MediaType {
|
||||
case consts.DockerConfigJSON, ocispec.MediaTypeImageConfig:
|
||||
for _, layer := range m.Layers {
|
||||
if _, ok := layer.Annotations[ocispec.AnnotationTitle]; ok {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func ExtractCmd(ctx context.Context, o *flags.ExtractOpts, s *store.Layout, ref string) error {
|
||||
l := log.FromContext(ctx)
|
||||
|
||||
@@ -39,9 +107,36 @@ func ExtractCmd(ctx context.Context, o *flags.ExtractOpts, s *store.Layout, ref
|
||||
}
|
||||
defer rc.Close()
|
||||
|
||||
// For image indexes, decoding the index JSON as ocispec.Manifest produces
|
||||
// an empty Config.MediaType and nil Layers — causing FromManifest to fall
|
||||
// back to Default() mapper, which writes config blobs as sha256:<digest>.bin.
|
||||
// Instead, peek at the first child manifest to get real config/layer info.
|
||||
var m ocispec.Manifest
|
||||
if err := json.NewDecoder(rc).Decode(&m); err != nil {
|
||||
return err
|
||||
if desc.MediaType == ocispec.MediaTypeImageIndex || desc.MediaType == consts.DockerManifestListSchema2 {
|
||||
var idx ocispec.Index
|
||||
if err := json.NewDecoder(rc).Decode(&idx); err != nil {
|
||||
return err
|
||||
}
|
||||
if len(idx.Manifests) == 0 {
|
||||
l.Warnf("skipping [%s]: image index has no child manifests", reference)
|
||||
return nil
|
||||
}
|
||||
var err error
|
||||
m, err = firstLeafManifest(ctx, s, idx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if err := json.NewDecoder(rc).Decode(&m); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Container images (no AnnotationTitle on any layer) are not extractable
|
||||
// to disk in a meaningful way — use `hauler store copy` to push to a registry.
|
||||
if isContainerImageManifest(m) {
|
||||
l.Warnf("skipping [%s]: container images cannot be extracted (use `hauler store copy` to push to a registry)", reference)
|
||||
return nil
|
||||
}
|
||||
|
||||
mapperStore, err := mapper.FromManifest(m, o.DestinationDir)
|
||||
|
||||
556
cmd/hauler/cli/store/extract_test.go
Normal file
556
cmd/hauler/cli/store/extract_test.go
Normal file
@@ -0,0 +1,556 @@
|
||||
package store
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/google/go-containerregistry/pkg/name"
|
||||
gcrv1 "github.com/google/go-containerregistry/pkg/v1"
|
||||
"github.com/google/go-containerregistry/pkg/v1/empty"
|
||||
"github.com/google/go-containerregistry/pkg/v1/mutate"
|
||||
"github.com/google/go-containerregistry/pkg/v1/remote"
|
||||
"github.com/google/go-containerregistry/pkg/v1/static"
|
||||
gvtypes "github.com/google/go-containerregistry/pkg/v1/types"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
|
||||
"hauler.dev/go/hauler/internal/flags"
|
||||
v1 "hauler.dev/go/hauler/pkg/apis/hauler.cattle.io/v1"
|
||||
"hauler.dev/go/hauler/pkg/consts"
|
||||
)
|
||||
|
||||
// chartTestdataDir is defined in add_test.go as "../../../../testdata".
|
||||
|
||||
func TestExtractCmd_File(t *testing.T) {
|
||||
ctx := newTestContext(t)
|
||||
s := newTestStore(t)
|
||||
|
||||
fileContent := "hello extract test"
|
||||
url := seedFileInHTTPServer(t, "extract-me.txt", fileContent)
|
||||
if err := storeFile(ctx, s, v1.File{Path: url}); err != nil {
|
||||
t.Fatalf("storeFile: %v", err)
|
||||
}
|
||||
|
||||
// reference.Parse("extract-me.txt") normalises to "hauler/extract-me.txt:latest"
|
||||
// (DefaultNamespace = "hauler", DefaultTag = "latest"). ExtractCmd builds
|
||||
// repo = RepositoryStr() + ":" + Identifier() = "hauler/extract-me.txt:latest"
|
||||
// and uses strings.Contains against the stored ref — which matches exactly.
|
||||
ref := "hauler/extract-me.txt:latest"
|
||||
|
||||
destDir := t.TempDir()
|
||||
eo := &flags.ExtractOpts{
|
||||
StoreRootOpts: defaultRootOpts(s.Root),
|
||||
DestinationDir: destDir,
|
||||
}
|
||||
|
||||
if err := ExtractCmd(ctx, eo, s, ref); err != nil {
|
||||
t.Fatalf("ExtractCmd: %v", err)
|
||||
}
|
||||
|
||||
// The file mapper writes the layer using its AnnotationTitle ("extract-me.txt").
|
||||
outPath := filepath.Join(destDir, "extract-me.txt")
|
||||
data, err := os.ReadFile(outPath)
|
||||
if err != nil {
|
||||
t.Fatalf("expected extracted file at %s: %v", outPath, err)
|
||||
}
|
||||
if string(data) != fileContent {
|
||||
t.Errorf("content mismatch: got %q, want %q", string(data), fileContent)
|
||||
}
|
||||
}
|
||||
|
||||
func TestExtractCmd_Chart(t *testing.T) {
|
||||
ctx := newTestContext(t)
|
||||
s := newTestStore(t)
|
||||
rso := defaultRootOpts(s.Root)
|
||||
ro := defaultCliOpts()
|
||||
|
||||
o := newAddChartOpts(chartTestdataDir, "")
|
||||
if err := AddChartCmd(ctx, o, s, "rancher-cluster-templates-0.5.2.tgz", rso, ro); err != nil {
|
||||
t.Fatalf("AddChartCmd: %v", err)
|
||||
}
|
||||
|
||||
// Chart stored as "hauler/rancher-cluster-templates:0.5.2".
|
||||
ref := "hauler/rancher-cluster-templates:0.5.2"
|
||||
|
||||
destDir := t.TempDir()
|
||||
eo := &flags.ExtractOpts{
|
||||
StoreRootOpts: defaultRootOpts(s.Root),
|
||||
DestinationDir: destDir,
|
||||
}
|
||||
|
||||
if err := ExtractCmd(ctx, eo, s, ref); err != nil {
|
||||
t.Fatalf("ExtractCmd: %v", err)
|
||||
}
|
||||
|
||||
// The chart mapper writes the chart layer as a .tgz (using AnnotationTitle,
|
||||
// or "chart.tar.gz" as fallback).
|
||||
entries, err := os.ReadDir(destDir)
|
||||
if err != nil {
|
||||
t.Fatalf("ReadDir: %v", err)
|
||||
}
|
||||
|
||||
found := false
|
||||
for _, e := range entries {
|
||||
if strings.HasSuffix(e.Name(), ".tgz") || strings.HasSuffix(e.Name(), ".tar.gz") {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
names := make([]string, len(entries))
|
||||
for i, e := range entries {
|
||||
names[i] = e.Name()
|
||||
}
|
||||
t.Errorf("expected a .tgz or .tar.gz in destDir, got: %v", names)
|
||||
}
|
||||
}
|
||||
|
||||
func TestExtractCmd_NotFound(t *testing.T) {
|
||||
ctx := newTestContext(t)
|
||||
s := newTestStore(t)
|
||||
|
||||
eo := &flags.ExtractOpts{
|
||||
StoreRootOpts: defaultRootOpts(s.Root),
|
||||
DestinationDir: t.TempDir(),
|
||||
}
|
||||
|
||||
err := ExtractCmd(ctx, eo, s, "hauler/nonexistent:v99")
|
||||
if err == nil {
|
||||
t.Fatal("expected error for nonexistent ref, got nil")
|
||||
}
|
||||
if !strings.Contains(err.Error(), "not found in store") {
|
||||
t.Errorf("expected 'not found in store' in error, got: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestExtractCmd_OciArtifactKindImage(t *testing.T) {
|
||||
// OCI artifacts pulled from a registry via AddImage() are always labelled
|
||||
// kind=KindAnnotationImage regardless of their actual content type (file,
|
||||
// chart, etc.). ExtractCmd must still dispatch via the manifest's
|
||||
// Config.MediaType — not the kind annotation — so extraction works correctly.
|
||||
ctx := newTestContext(t)
|
||||
|
||||
// newLocalhostRegistry is required: s.AddImage uses authn.DefaultKeychain and
|
||||
// go-containerregistry auto-selects plain HTTP only for "localhost:" hosts.
|
||||
host, rOpts := newLocalhostRegistry(t)
|
||||
|
||||
// Build a synthetic OCI file artifact:
|
||||
// config.MediaType = FileLocalConfigMediaType (triggers Files() mapper)
|
||||
// layer.MediaType = FileLayerMediaType
|
||||
// layer annotation AnnotationTitle = "oci-pulled-file.txt"
|
||||
fileContent := []byte("oci file content from registry")
|
||||
fileLayer := static.NewLayer(fileContent, gvtypes.MediaType(consts.FileLayerMediaType))
|
||||
img, err := mutate.Append(empty.Image, mutate.Addendum{
|
||||
Layer: fileLayer,
|
||||
Annotations: map[string]string{
|
||||
ocispec.AnnotationTitle: "oci-pulled-file.txt",
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("mutate.Append: %v", err)
|
||||
}
|
||||
img = mutate.MediaType(img, gvtypes.OCIManifestSchema1)
|
||||
img = mutate.ConfigMediaType(img, gvtypes.MediaType(consts.FileLocalConfigMediaType))
|
||||
|
||||
ref := host + "/oci-artifacts/myfile:v1"
|
||||
tag, err := name.NewTag(ref, name.Insecure)
|
||||
if err != nil {
|
||||
t.Fatalf("name.NewTag: %v", err)
|
||||
}
|
||||
if err := remote.Write(tag, img, rOpts...); err != nil {
|
||||
t.Fatalf("remote.Write: %v", err)
|
||||
}
|
||||
|
||||
// Pull into a fresh store — AddImage sets kind=KindAnnotationImage on all manifests.
|
||||
s := newTestStore(t)
|
||||
if err := s.AddImage(ctx, ref, "", rOpts...); err != nil {
|
||||
t.Fatalf("AddImage: %v", err)
|
||||
}
|
||||
|
||||
// ExtractCmd receives the short ref (no registry prefix) as stored in AnnotationRefName.
|
||||
// reference.Parse("oci-artifacts/myfile:v1") → repo "oci-artifacts/myfile:v1" matches.
|
||||
destDir := t.TempDir()
|
||||
eo := &flags.ExtractOpts{
|
||||
StoreRootOpts: defaultRootOpts(s.Root),
|
||||
DestinationDir: destDir,
|
||||
}
|
||||
if err := ExtractCmd(ctx, eo, s, "oci-artifacts/myfile:v1"); err != nil {
|
||||
t.Fatalf("ExtractCmd: %v", err)
|
||||
}
|
||||
|
||||
// Files() mapper uses AnnotationTitle → "oci-pulled-file.txt".
|
||||
outPath := filepath.Join(destDir, "oci-pulled-file.txt")
|
||||
data, err := os.ReadFile(outPath)
|
||||
if err != nil {
|
||||
t.Fatalf("expected extracted file at %s: %v", outPath, err)
|
||||
}
|
||||
if string(data) != string(fileContent) {
|
||||
t.Errorf("content mismatch: got %q, want %q", string(data), string(fileContent))
|
||||
}
|
||||
}
|
||||
|
||||
func TestExtractCmd_OciImageIndex_NoBinFiles(t *testing.T) {
|
||||
// Regression test: extracting an OCI image index whose platform manifests
|
||||
// carry binary layers with AnnotationTitle must yield only the named binary
|
||||
// files — no sha256:<digest>.bin metadata files.
|
||||
// Before the fix, decoding the index as an ocispec.Manifest produced an
|
||||
// empty Config.MediaType, causing FromManifest to select Default() mapper
|
||||
// which wrote config blobs and child manifests as sha256:<digest>.bin.
|
||||
ctx := newTestContext(t)
|
||||
host, rOpts := newLocalhostRegistry(t)
|
||||
|
||||
buildPlatformImg := func(content []byte, title string) gcrv1.Image {
|
||||
layer := static.NewLayer(content, gvtypes.OCILayer)
|
||||
img, err := mutate.Append(empty.Image, mutate.Addendum{
|
||||
Layer: layer,
|
||||
Annotations: map[string]string{
|
||||
ocispec.AnnotationTitle: title,
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("mutate.Append: %v", err)
|
||||
}
|
||||
img = mutate.MediaType(img, gvtypes.OCIManifestSchema1)
|
||||
img = mutate.ConfigMediaType(img, gvtypes.MediaType(ocispec.MediaTypeImageConfig))
|
||||
return img
|
||||
}
|
||||
|
||||
amd64Img := buildPlatformImg([]byte("amd64 binary content"), "mybinary.linux-amd64")
|
||||
arm64Img := buildPlatformImg([]byte("arm64 binary content"), "mybinary.linux-arm64")
|
||||
|
||||
idx := mutate.AppendManifests(empty.Index,
|
||||
mutate.IndexAddendum{
|
||||
Add: amd64Img,
|
||||
Descriptor: gcrv1.Descriptor{
|
||||
MediaType: gvtypes.OCIManifestSchema1,
|
||||
Platform: &gcrv1.Platform{OS: "linux", Architecture: "amd64"},
|
||||
},
|
||||
},
|
||||
mutate.IndexAddendum{
|
||||
Add: arm64Img,
|
||||
Descriptor: gcrv1.Descriptor{
|
||||
MediaType: gvtypes.OCIManifestSchema1,
|
||||
Platform: &gcrv1.Platform{OS: "linux", Architecture: "arm64"},
|
||||
},
|
||||
},
|
||||
)
|
||||
|
||||
ref := host + "/binaries/mybinary:v1"
|
||||
tag, err := name.NewTag(ref, name.Insecure)
|
||||
if err != nil {
|
||||
t.Fatalf("name.NewTag: %v", err)
|
||||
}
|
||||
if err := remote.WriteIndex(tag, idx, rOpts...); err != nil {
|
||||
t.Fatalf("remote.WriteIndex: %v", err)
|
||||
}
|
||||
|
||||
s := newTestStore(t)
|
||||
if err := s.AddImage(ctx, ref, "", rOpts...); err != nil {
|
||||
t.Fatalf("AddImage: %v", err)
|
||||
}
|
||||
|
||||
destDir := t.TempDir()
|
||||
eo := &flags.ExtractOpts{
|
||||
StoreRootOpts: defaultRootOpts(s.Root),
|
||||
DestinationDir: destDir,
|
||||
}
|
||||
if err := ExtractCmd(ctx, eo, s, "binaries/mybinary:v1"); err != nil {
|
||||
t.Fatalf("ExtractCmd: %v", err)
|
||||
}
|
||||
|
||||
entries, err := os.ReadDir(destDir)
|
||||
if err != nil {
|
||||
t.Fatalf("ReadDir: %v", err)
|
||||
}
|
||||
var names []string
|
||||
for _, e := range entries {
|
||||
names = append(names, e.Name())
|
||||
}
|
||||
|
||||
// No sha256: digest-named files should be extracted
|
||||
for _, n := range names {
|
||||
if strings.HasPrefix(n, "sha256:") {
|
||||
t.Errorf("unexpected digest-named file %q extracted (all files: %v)", n, names)
|
||||
}
|
||||
}
|
||||
|
||||
// Both platform binaries must be present
|
||||
for _, want := range []string{"mybinary.linux-amd64", "mybinary.linux-arm64"} {
|
||||
found := false
|
||||
for _, n := range names {
|
||||
if n == want {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
t.Errorf("expected binary %q not found; got: %v", want, names)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestExtractCmd_NestedImageIndex_NoBinFiles(t *testing.T) {
|
||||
// Regression test: extracting a nested OCI image index (outer index whose only
|
||||
// children are inner indexes, which in turn contain the platform manifests) must
|
||||
// yield only the named binary files — no sha256:<digest>.bin metadata files.
|
||||
// firstLeafManifest must descend through the outer index into the inner index to
|
||||
// find a leaf manifest so that FromManifest selects the correct Files() mapper.
|
||||
ctx := newTestContext(t)
|
||||
host, rOpts := newLocalhostRegistry(t)
|
||||
|
||||
buildPlatformImg := func(content []byte, title string) gcrv1.Image {
|
||||
layer := static.NewLayer(content, gvtypes.OCILayer)
|
||||
img, err := mutate.Append(empty.Image, mutate.Addendum{
|
||||
Layer: layer,
|
||||
Annotations: map[string]string{
|
||||
ocispec.AnnotationTitle: title,
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("mutate.Append: %v", err)
|
||||
}
|
||||
img = mutate.MediaType(img, gvtypes.OCIManifestSchema1)
|
||||
img = mutate.ConfigMediaType(img, gvtypes.MediaType(ocispec.MediaTypeImageConfig))
|
||||
return img
|
||||
}
|
||||
|
||||
amd64Img := buildPlatformImg([]byte("amd64 binary content"), "mybinary.linux-amd64")
|
||||
arm64Img := buildPlatformImg([]byte("arm64 binary content"), "mybinary.linux-arm64")
|
||||
|
||||
// Inner index contains the leaf platform manifests.
|
||||
innerIdx := mutate.AppendManifests(empty.Index,
|
||||
mutate.IndexAddendum{
|
||||
Add: amd64Img,
|
||||
Descriptor: gcrv1.Descriptor{
|
||||
MediaType: gvtypes.OCIManifestSchema1,
|
||||
Platform: &gcrv1.Platform{OS: "linux", Architecture: "amd64"},
|
||||
},
|
||||
},
|
||||
mutate.IndexAddendum{
|
||||
Add: arm64Img,
|
||||
Descriptor: gcrv1.Descriptor{
|
||||
MediaType: gvtypes.OCIManifestSchema1,
|
||||
Platform: &gcrv1.Platform{OS: "linux", Architecture: "arm64"},
|
||||
},
|
||||
},
|
||||
)
|
||||
|
||||
// Outer index contains only the inner index — all children are indexes.
|
||||
outerIdx := mutate.AppendManifests(empty.Index,
|
||||
mutate.IndexAddendum{
|
||||
Add: innerIdx,
|
||||
Descriptor: gcrv1.Descriptor{
|
||||
MediaType: gvtypes.OCIImageIndex,
|
||||
},
|
||||
},
|
||||
)
|
||||
|
||||
ref := host + "/binaries/nested:v1"
|
||||
tag, err := name.NewTag(ref, name.Insecure)
|
||||
if err != nil {
|
||||
t.Fatalf("name.NewTag: %v", err)
|
||||
}
|
||||
if err := remote.WriteIndex(tag, outerIdx, rOpts...); err != nil {
|
||||
t.Fatalf("remote.WriteIndex: %v", err)
|
||||
}
|
||||
|
||||
s := newTestStore(t)
|
||||
if err := s.AddImage(ctx, ref, "", rOpts...); err != nil {
|
||||
t.Fatalf("AddImage: %v", err)
|
||||
}
|
||||
|
||||
destDir := t.TempDir()
|
||||
eo := &flags.ExtractOpts{
|
||||
StoreRootOpts: defaultRootOpts(s.Root),
|
||||
DestinationDir: destDir,
|
||||
}
|
||||
if err := ExtractCmd(ctx, eo, s, "binaries/nested:v1"); err != nil {
|
||||
t.Fatalf("ExtractCmd: %v", err)
|
||||
}
|
||||
|
||||
entries, err := os.ReadDir(destDir)
|
||||
if err != nil {
|
||||
t.Fatalf("ReadDir: %v", err)
|
||||
}
|
||||
var names []string
|
||||
for _, e := range entries {
|
||||
names = append(names, e.Name())
|
||||
}
|
||||
|
||||
// No sha256: digest-named files should be extracted.
|
||||
for _, n := range names {
|
||||
if strings.HasPrefix(n, "sha256:") {
|
||||
t.Errorf("unexpected digest-named file %q extracted (all files: %v)", n, names)
|
||||
}
|
||||
}
|
||||
|
||||
// Both platform binaries must be present.
|
||||
for _, want := range []string{"mybinary.linux-amd64", "mybinary.linux-arm64"} {
|
||||
found := false
|
||||
for _, n := range names {
|
||||
if n == want {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
t.Errorf("expected binary %q not found; got: %v", want, names)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestExtractCmd_ContainerImage_Skipped(t *testing.T) {
|
||||
// A real container image (no AnnotationTitle on any layer) should be skipped
|
||||
// without error and without writing any files to the destination directory.
|
||||
ctx := newTestContext(t)
|
||||
host, rOpts := newLocalhostRegistry(t)
|
||||
|
||||
layer := static.NewLayer([]byte("layer content"), gvtypes.OCILayer)
|
||||
img, err := mutate.Append(empty.Image, mutate.Addendum{Layer: layer})
|
||||
if err != nil {
|
||||
t.Fatalf("mutate.Append: %v", err)
|
||||
}
|
||||
img = mutate.MediaType(img, gvtypes.OCIManifestSchema1)
|
||||
img = mutate.ConfigMediaType(img, gvtypes.MediaType(ocispec.MediaTypeImageConfig))
|
||||
|
||||
ref := host + "/myapp/myimage:v1"
|
||||
tag, err := name.NewTag(ref, name.Insecure)
|
||||
if err != nil {
|
||||
t.Fatalf("name.NewTag: %v", err)
|
||||
}
|
||||
if err := remote.Write(tag, img, rOpts...); err != nil {
|
||||
t.Fatalf("remote.Write: %v", err)
|
||||
}
|
||||
|
||||
s := newTestStore(t)
|
||||
if err := s.AddImage(ctx, ref, "", rOpts...); err != nil {
|
||||
t.Fatalf("AddImage: %v", err)
|
||||
}
|
||||
|
||||
destDir := t.TempDir()
|
||||
eo := &flags.ExtractOpts{
|
||||
StoreRootOpts: defaultRootOpts(s.Root),
|
||||
DestinationDir: destDir,
|
||||
}
|
||||
if err := ExtractCmd(ctx, eo, s, "myapp/myimage:v1"); err != nil {
|
||||
t.Fatalf("ExtractCmd: %v", err)
|
||||
}
|
||||
|
||||
entries, err := os.ReadDir(destDir)
|
||||
if err != nil {
|
||||
t.Fatalf("ReadDir: %v", err)
|
||||
}
|
||||
if len(entries) != 0 {
|
||||
names := make([]string, len(entries))
|
||||
for i, e := range entries {
|
||||
names[i] = e.Name()
|
||||
}
|
||||
t.Errorf("expected no files extracted for container image, got: %v", names)
|
||||
}
|
||||
}
|
||||
|
||||
func TestExtractCmd_ContainerImageIndex_Skipped(t *testing.T) {
|
||||
// A real multi-arch container image index (no AnnotationTitle on any layer)
|
||||
// should be skipped without error and without writing any files.
|
||||
ctx := newTestContext(t)
|
||||
host, rOpts := newLocalhostRegistry(t)
|
||||
|
||||
buildPlatformImg := func(content []byte) gcrv1.Image {
|
||||
layer := static.NewLayer(content, gvtypes.OCILayer)
|
||||
img, err := mutate.Append(empty.Image, mutate.Addendum{Layer: layer})
|
||||
if err != nil {
|
||||
t.Fatalf("mutate.Append: %v", err)
|
||||
}
|
||||
img = mutate.MediaType(img, gvtypes.OCIManifestSchema1)
|
||||
img = mutate.ConfigMediaType(img, gvtypes.MediaType(ocispec.MediaTypeImageConfig))
|
||||
return img
|
||||
}
|
||||
|
||||
idx := mutate.AppendManifests(empty.Index,
|
||||
mutate.IndexAddendum{
|
||||
Add: buildPlatformImg([]byte("amd64 content")),
|
||||
Descriptor: gcrv1.Descriptor{
|
||||
MediaType: gvtypes.OCIManifestSchema1,
|
||||
Platform: &gcrv1.Platform{OS: "linux", Architecture: "amd64"},
|
||||
},
|
||||
},
|
||||
mutate.IndexAddendum{
|
||||
Add: buildPlatformImg([]byte("arm64 content")),
|
||||
Descriptor: gcrv1.Descriptor{
|
||||
MediaType: gvtypes.OCIManifestSchema1,
|
||||
Platform: &gcrv1.Platform{OS: "linux", Architecture: "arm64"},
|
||||
},
|
||||
},
|
||||
)
|
||||
|
||||
ref := host + "/myapp/multiarch:v1"
|
||||
tag, err := name.NewTag(ref, name.Insecure)
|
||||
if err != nil {
|
||||
t.Fatalf("name.NewTag: %v", err)
|
||||
}
|
||||
if err := remote.WriteIndex(tag, idx, rOpts...); err != nil {
|
||||
t.Fatalf("remote.WriteIndex: %v", err)
|
||||
}
|
||||
|
||||
s := newTestStore(t)
|
||||
if err := s.AddImage(ctx, ref, "", rOpts...); err != nil {
|
||||
t.Fatalf("AddImage: %v", err)
|
||||
}
|
||||
|
||||
destDir := t.TempDir()
|
||||
eo := &flags.ExtractOpts{
|
||||
StoreRootOpts: defaultRootOpts(s.Root),
|
||||
DestinationDir: destDir,
|
||||
}
|
||||
if err := ExtractCmd(ctx, eo, s, "myapp/multiarch:v1"); err != nil {
|
||||
t.Fatalf("ExtractCmd: %v", err)
|
||||
}
|
||||
|
||||
entries, err := os.ReadDir(destDir)
|
||||
if err != nil {
|
||||
t.Fatalf("ReadDir: %v", err)
|
||||
}
|
||||
if len(entries) != 0 {
|
||||
names := make([]string, len(entries))
|
||||
for i, e := range entries {
|
||||
names[i] = e.Name()
|
||||
}
|
||||
t.Errorf("expected no files extracted for container image index, got: %v", names)
|
||||
}
|
||||
}
|
||||
|
||||
func TestExtractCmd_SubstringMatch(t *testing.T) {
|
||||
// reference.Parse applies DefaultTag ("latest") when no tag is given, so
|
||||
// Parse("hauler/extract-sub.txt") and Parse("hauler/extract-sub.txt:latest")
|
||||
// produce the same repo string "hauler/extract-sub.txt:latest".
|
||||
// This means a no-tag ref substring-matches a stored "hauler/...:latest" key.
|
||||
ctx := newTestContext(t)
|
||||
s := newTestStore(t)
|
||||
|
||||
fileContent := "substring match content"
|
||||
url := seedFileInHTTPServer(t, "extract-sub.txt", fileContent)
|
||||
if err := storeFile(ctx, s, v1.File{Path: url}); err != nil {
|
||||
t.Fatalf("storeFile: %v", err)
|
||||
}
|
||||
|
||||
destDir := t.TempDir()
|
||||
eo := &flags.ExtractOpts{
|
||||
StoreRootOpts: defaultRootOpts(s.Root),
|
||||
DestinationDir: destDir,
|
||||
}
|
||||
|
||||
// No explicit tag — Parse adds ":latest" as default, which still matches.
|
||||
if err := ExtractCmd(ctx, eo, s, "hauler/extract-sub.txt"); err != nil {
|
||||
t.Fatalf("ExtractCmd with no-tag ref: %v", err)
|
||||
}
|
||||
|
||||
outPath := filepath.Join(destDir, "extract-sub.txt")
|
||||
data, err := os.ReadFile(outPath)
|
||||
if err != nil {
|
||||
t.Fatalf("expected extracted file at %s: %v", outPath, err)
|
||||
}
|
||||
if string(data) != fileContent {
|
||||
t.Errorf("content mismatch: got %q, want %q", string(data), fileContent)
|
||||
}
|
||||
}
|
||||
@@ -6,8 +6,10 @@ import (
|
||||
"fmt"
|
||||
"os"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/olekukonko/tablewriter"
|
||||
"github.com/olekukonko/tablewriter/tw"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
|
||||
"hauler.dev/go/hauler/internal/flags"
|
||||
@@ -47,12 +49,20 @@ func InfoCmd(ctx context.Context, o *flags.InfoOpts, s *store.Layout) error {
|
||||
return err
|
||||
}
|
||||
|
||||
i := newItem(s, desc, internalManifest, fmt.Sprintf("%s/%s", internalDesc.Platform.OS, internalDesc.Platform.Architecture), o)
|
||||
i := newItemWithDigest(
|
||||
s,
|
||||
internalDesc.Digest.String(),
|
||||
desc,
|
||||
internalManifest,
|
||||
fmt.Sprintf("%s/%s", internalDesc.Platform.OS, internalDesc.Platform.Architecture),
|
||||
o,
|
||||
)
|
||||
var emptyItem item
|
||||
if i != emptyItem {
|
||||
items = append(items, i)
|
||||
}
|
||||
}
|
||||
|
||||
// handle "non" multi-arch images
|
||||
} else if desc.MediaType == consts.DockerManifestSchema2 || desc.MediaType == consts.OCIManifestSchema1 {
|
||||
var m ocispec.Manifest
|
||||
@@ -66,14 +76,15 @@ func InfoCmd(ctx context.Context, o *flags.InfoOpts, s *store.Layout) error {
|
||||
}
|
||||
defer rc.Close()
|
||||
|
||||
// Unmarshal the OCI image content
|
||||
// unmarshal the oci image content
|
||||
var internalManifest ocispec.Image
|
||||
if err := json.NewDecoder(rc).Decode(&internalManifest); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if internalManifest.Architecture != "" {
|
||||
i := newItem(s, desc, m, fmt.Sprintf("%s/%s", internalManifest.OS, internalManifest.Architecture), o)
|
||||
i := newItem(s, desc, m,
|
||||
fmt.Sprintf("%s/%s", internalManifest.OS, internalManifest.Architecture), o)
|
||||
var emptyItem item
|
||||
if i != emptyItem {
|
||||
items = append(items, i)
|
||||
@@ -85,7 +96,8 @@ func InfoCmd(ctx context.Context, o *flags.InfoOpts, s *store.Layout) error {
|
||||
items = append(items, i)
|
||||
}
|
||||
}
|
||||
// handle the rest
|
||||
|
||||
// handle everything else (charts, files, sigs, etc.)
|
||||
} else {
|
||||
var m ocispec.Manifest
|
||||
if err := json.NewDecoder(rc).Decode(&m); err != nil {
|
||||
@@ -118,13 +130,15 @@ func InfoCmd(ctx context.Context, o *flags.InfoOpts, s *store.Layout) error {
|
||||
msg = buildJson(items...)
|
||||
fmt.Println(msg)
|
||||
default:
|
||||
buildTable(items...)
|
||||
if err := buildTable(o.ShowDigests, items...); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func buildListRepos(items ...item) {
|
||||
// Create map to track unique repository names
|
||||
// create map to track unique repository names
|
||||
repos := make(map[string]bool)
|
||||
|
||||
for _, i := range items {
|
||||
@@ -141,37 +155,86 @@ func buildListRepos(items ...item) {
|
||||
repos[repoName] = true
|
||||
}
|
||||
|
||||
// Collect and print unique repository names
|
||||
// collect and print unique repository names
|
||||
for repoName := range repos {
|
||||
fmt.Println(repoName)
|
||||
}
|
||||
}
|
||||
|
||||
func buildTable(items ...item) {
|
||||
// Create a table for the results
|
||||
table := tablewriter.NewWriter(os.Stdout)
|
||||
table.SetHeader([]string{"Reference", "Type", "Platform", "# Layers", "Size"})
|
||||
table.SetHeaderAlignment(tablewriter.ALIGN_LEFT)
|
||||
table.SetRowLine(false)
|
||||
table.SetAutoMergeCellsByColumnIndex([]int{0})
|
||||
func buildTable(showDigests bool, items ...item) error {
|
||||
table := tablewriter.NewTable(os.Stdout)
|
||||
table.Configure(func(cfg *tablewriter.Config) {
|
||||
cfg.Header.Alignment.Global = tw.AlignLeft
|
||||
cfg.Row.Merging.Mode = tw.MergeVertical
|
||||
cfg.Row.Merging.ByColumnIndex = tw.NewBoolMapper(0)
|
||||
})
|
||||
|
||||
if showDigests {
|
||||
table.Header("Reference", "Type", "Platform", "Digest", "# Layers", "Size")
|
||||
} else {
|
||||
table.Header("Reference", "Type", "Platform", "# Layers", "Size")
|
||||
}
|
||||
|
||||
totalSize := int64(0)
|
||||
|
||||
for _, i := range items {
|
||||
if i.Type != "" {
|
||||
row := []string{
|
||||
i.Reference,
|
||||
if i.Type == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
ref := truncateReference(i.Reference)
|
||||
var row []string
|
||||
|
||||
if showDigests {
|
||||
digest := i.Digest
|
||||
if digest == "" {
|
||||
digest = "-"
|
||||
}
|
||||
row = []string{
|
||||
ref,
|
||||
i.Type,
|
||||
i.Platform,
|
||||
digest,
|
||||
fmt.Sprintf("%d", i.Layers),
|
||||
byteCountSI(i.Size),
|
||||
}
|
||||
} else {
|
||||
row = []string{
|
||||
ref,
|
||||
i.Type,
|
||||
i.Platform,
|
||||
fmt.Sprintf("%d", i.Layers),
|
||||
byteCountSI(i.Size),
|
||||
}
|
||||
totalSize += i.Size
|
||||
table.Append(row)
|
||||
}
|
||||
|
||||
totalSize += i.Size
|
||||
if err := table.Append(row); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
table.SetFooter([]string{"", "", "", "Total", byteCountSI(totalSize)})
|
||||
|
||||
table.Render()
|
||||
// align total column based on digest visibility
|
||||
if showDigests {
|
||||
table.Footer("", "", "", "", "Total", byteCountSI(totalSize))
|
||||
} else {
|
||||
table.Footer("", "", "", "Total", byteCountSI(totalSize))
|
||||
}
|
||||
|
||||
return table.Render()
|
||||
}
|
||||
|
||||
// truncateReference shortens the digest of a reference
|
||||
func truncateReference(ref string) string {
|
||||
const prefix = "@sha256:"
|
||||
idx := strings.Index(ref, prefix)
|
||||
if idx == -1 {
|
||||
return ref
|
||||
}
|
||||
if len(ref) > idx+len(prefix)+12 {
|
||||
return ref[:idx+len(prefix)+12] + "…"
|
||||
}
|
||||
return ref
|
||||
}
|
||||
|
||||
func buildJson(item ...item) string {
|
||||
@@ -186,6 +249,7 @@ type item struct {
|
||||
Reference string
|
||||
Type string
|
||||
Platform string
|
||||
Digest string
|
||||
Layers int
|
||||
Size int64
|
||||
}
|
||||
@@ -210,6 +274,13 @@ func (a byReferenceAndArch) Less(i, j int) bool {
|
||||
return a[i].Reference < a[j].Reference
|
||||
}
|
||||
|
||||
// overrides the digest with a specific per platform digest
|
||||
func newItemWithDigest(s *store.Layout, digestStr string, desc ocispec.Descriptor, m ocispec.Manifest, plat string, o *flags.InfoOpts) item {
|
||||
item := newItem(s, desc, m, plat, o)
|
||||
item.Digest = digestStr
|
||||
return item
|
||||
}
|
||||
|
||||
func newItem(s *store.Layout, desc ocispec.Descriptor, m ocispec.Manifest, plat string, o *flags.InfoOpts) item {
|
||||
var size int64 = 0
|
||||
for _, l := range m.Layers {
|
||||
@@ -229,13 +300,15 @@ func newItem(s *store.Layout, desc ocispec.Descriptor, m ocispec.Manifest, plat
|
||||
ctype = "image"
|
||||
}
|
||||
|
||||
switch desc.Annotations["kind"] {
|
||||
case "dev.cosignproject.cosign/sigs":
|
||||
switch {
|
||||
case desc.Annotations[consts.KindAnnotationName] == consts.KindAnnotationSigs:
|
||||
ctype = "sigs"
|
||||
case "dev.cosignproject.cosign/atts":
|
||||
case desc.Annotations[consts.KindAnnotationName] == consts.KindAnnotationAtts:
|
||||
ctype = "atts"
|
||||
case "dev.cosignproject.cosign/sboms":
|
||||
case desc.Annotations[consts.KindAnnotationName] == consts.KindAnnotationSboms:
|
||||
ctype = "sbom"
|
||||
case strings.HasPrefix(desc.Annotations[consts.KindAnnotationName], consts.KindAnnotationReferrers):
|
||||
ctype = "referrer"
|
||||
}
|
||||
|
||||
refName := desc.Annotations["io.containerd.image.name"]
|
||||
@@ -255,6 +328,7 @@ func newItem(s *store.Layout, desc ocispec.Descriptor, m ocispec.Manifest, plat
|
||||
Reference: ref.Name(),
|
||||
Type: ctype,
|
||||
Platform: plat,
|
||||
Digest: desc.Digest.String(),
|
||||
Layers: len(m.Layers),
|
||||
Size: size,
|
||||
}
|
||||
|
||||
238
cmd/hauler/cli/store/info_test.go
Normal file
238
cmd/hauler/cli/store/info_test.go
Normal file
@@ -0,0 +1,238 @@
|
||||
package store
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
|
||||
"hauler.dev/go/hauler/internal/flags"
|
||||
v1 "hauler.dev/go/hauler/pkg/apis/hauler.cattle.io/v1"
|
||||
"hauler.dev/go/hauler/pkg/consts"
|
||||
)
|
||||
|
||||
func TestByteCountSI(t *testing.T) {
|
||||
tests := []struct {
|
||||
input int64
|
||||
want string
|
||||
}{
|
||||
{0, "0 B"},
|
||||
{999, "999 B"},
|
||||
{1000, "1.0 kB"},
|
||||
{1500000, "1.5 MB"},
|
||||
{1000000000, "1.0 GB"},
|
||||
}
|
||||
for _, tc := range tests {
|
||||
got := byteCountSI(tc.input)
|
||||
if got != tc.want {
|
||||
t.Errorf("byteCountSI(%d) = %q, want %q", tc.input, got, tc.want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestTruncateReference(t *testing.T) {
|
||||
longDigest := "sha256:abcdefabcdefabcdefabcdefabcdefabcdefabcdefabcdefabcdefabcdefabcd"
|
||||
tests := []struct {
|
||||
name string
|
||||
input string
|
||||
want string
|
||||
}{
|
||||
{"tag ref unchanged", "nginx:latest", "nginx:latest"},
|
||||
{"long digest truncated", "nginx@" + longDigest, "nginx@sha256:abcdefabcdef\u2026"},
|
||||
{"short digest not truncated", "nginx@sha256:abcdef", "nginx@sha256:abcdef"},
|
||||
{"no digest unchanged", "myrepo/myimage:v1", "myrepo/myimage:v1"},
|
||||
}
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
got := truncateReference(tc.input)
|
||||
if got != tc.want {
|
||||
t.Errorf("truncateReference(%q) = %q, want %q", tc.input, got, tc.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestBuildJson(t *testing.T) {
|
||||
items := []item{
|
||||
{Reference: "myrepo/myimage:v1", Type: "image", Platform: "linux/amd64", Size: 1024, Layers: 2},
|
||||
{Reference: "myrepo/mychart:v1", Type: "chart", Platform: "-", Size: 512, Layers: 1},
|
||||
}
|
||||
out := buildJson(items...)
|
||||
if out == "" {
|
||||
t.Fatal("buildJson returned empty string")
|
||||
}
|
||||
var got []item
|
||||
if err := json.Unmarshal([]byte(out), &got); err != nil {
|
||||
t.Fatalf("buildJson output is not valid JSON: %v\noutput: %s", err, out)
|
||||
}
|
||||
if len(got) != len(items) {
|
||||
t.Fatalf("buildJson: got %d items, want %d", len(got), len(items))
|
||||
}
|
||||
for i, want := range items {
|
||||
if got[i].Reference != want.Reference {
|
||||
t.Errorf("item[%d].Reference = %q, want %q", i, got[i].Reference, want.Reference)
|
||||
}
|
||||
if got[i].Type != want.Type {
|
||||
t.Errorf("item[%d].Type = %q, want %q", i, got[i].Type, want.Type)
|
||||
}
|
||||
if got[i].Size != want.Size {
|
||||
t.Errorf("item[%d].Size = %d, want %d", i, got[i].Size, want.Size)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewItem(t *testing.T) {
|
||||
// newItem uses s only for its signature; it does not dereference s in practice.
|
||||
// We pass nil to keep tests dependency-free.
|
||||
const validRef = "myrepo/myimage:latest"
|
||||
|
||||
makeDesc := func(kindAnnotation string) ocispec.Descriptor {
|
||||
desc := ocispec.Descriptor{
|
||||
Annotations: map[string]string{
|
||||
"io.containerd.image.name": validRef,
|
||||
},
|
||||
}
|
||||
if kindAnnotation != "" {
|
||||
desc.Annotations[consts.KindAnnotationName] = kindAnnotation
|
||||
}
|
||||
return desc
|
||||
}
|
||||
makeManifest := func(configMediaType string) ocispec.Manifest {
|
||||
return ocispec.Manifest{
|
||||
Config: ocispec.Descriptor{MediaType: configMediaType},
|
||||
}
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
configMedia string
|
||||
kindAnnotation string
|
||||
typeFilter string
|
||||
wantType string
|
||||
wantEmpty bool
|
||||
}{
|
||||
{
|
||||
name: "DockerConfigJSON → image",
|
||||
configMedia: consts.DockerConfigJSON,
|
||||
typeFilter: "all",
|
||||
wantType: "image",
|
||||
},
|
||||
{
|
||||
name: "ChartConfigMediaType → chart",
|
||||
configMedia: consts.ChartConfigMediaType,
|
||||
typeFilter: "all",
|
||||
wantType: "chart",
|
||||
},
|
||||
{
|
||||
name: "FileLocalConfigMediaType → file",
|
||||
configMedia: consts.FileLocalConfigMediaType,
|
||||
typeFilter: "all",
|
||||
wantType: "file",
|
||||
},
|
||||
{
|
||||
name: "KindAnnotationSigs → sigs",
|
||||
configMedia: consts.DockerConfigJSON,
|
||||
kindAnnotation: consts.KindAnnotationSigs,
|
||||
typeFilter: "all",
|
||||
wantType: "sigs",
|
||||
},
|
||||
{
|
||||
name: "KindAnnotationAtts → atts",
|
||||
configMedia: consts.DockerConfigJSON,
|
||||
kindAnnotation: consts.KindAnnotationAtts,
|
||||
typeFilter: "all",
|
||||
wantType: "atts",
|
||||
},
|
||||
{
|
||||
name: "KindAnnotationReferrers prefix → referrer",
|
||||
configMedia: consts.DockerConfigJSON,
|
||||
kindAnnotation: consts.KindAnnotationReferrers + "/abc123",
|
||||
typeFilter: "all",
|
||||
wantType: "referrer",
|
||||
},
|
||||
{
|
||||
name: "TypeFilter:image with chart → empty item",
|
||||
configMedia: consts.ChartConfigMediaType,
|
||||
typeFilter: "image",
|
||||
wantEmpty: true,
|
||||
},
|
||||
{
|
||||
name: "TypeFilter:file with image → empty item",
|
||||
configMedia: consts.DockerConfigJSON,
|
||||
typeFilter: "file",
|
||||
wantEmpty: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
desc := makeDesc(tc.kindAnnotation)
|
||||
m := makeManifest(tc.configMedia)
|
||||
o := &flags.InfoOpts{TypeFilter: tc.typeFilter}
|
||||
|
||||
got := newItem(nil, desc, m, "linux/amd64", o)
|
||||
var empty item
|
||||
if tc.wantEmpty {
|
||||
if got != empty {
|
||||
t.Errorf("expected empty item, got %+v", got)
|
||||
}
|
||||
return
|
||||
}
|
||||
if got == empty {
|
||||
t.Fatalf("got empty item, want type %q", tc.wantType)
|
||||
}
|
||||
if got.Type != tc.wantType {
|
||||
t.Errorf("got type %q, want %q", got.Type, tc.wantType)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestInfoCmd(t *testing.T) {
|
||||
ctx := newTestContext(t)
|
||||
s := newTestStore(t)
|
||||
|
||||
// Seed a file artifact using a local temp file.
|
||||
tmpFile := t.TempDir() + "/hello.txt"
|
||||
if err := os.WriteFile(tmpFile, []byte("hello hauler"), 0o644); err != nil {
|
||||
t.Fatalf("write tmpFile: %v", err)
|
||||
}
|
||||
fi := v1.File{Path: tmpFile}
|
||||
if err := storeFile(ctx, s, fi); err != nil {
|
||||
t.Fatalf("storeFile: %v", err)
|
||||
}
|
||||
|
||||
baseOpts := func(typeFilter, format string) *flags.InfoOpts {
|
||||
return &flags.InfoOpts{
|
||||
StoreRootOpts: defaultRootOpts(s.Root),
|
||||
OutputFormat: format,
|
||||
TypeFilter: typeFilter,
|
||||
}
|
||||
}
|
||||
|
||||
t.Run("TypeFilter:all json", func(t *testing.T) {
|
||||
if err := InfoCmd(ctx, baseOpts("all", "json"), s); err != nil {
|
||||
t.Errorf("InfoCmd(all, json): %v", err)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("TypeFilter:file json", func(t *testing.T) {
|
||||
if err := InfoCmd(ctx, baseOpts("file", "json"), s); err != nil {
|
||||
t.Errorf("InfoCmd(file, json): %v", err)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("TypeFilter:image json", func(t *testing.T) {
|
||||
// Store has only a file artifact; image filter returns no items (no error).
|
||||
if err := InfoCmd(ctx, baseOpts("image", "json"), s); err != nil {
|
||||
t.Errorf("InfoCmd(image, json): %v", err)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("TypeFilter:all table", func(t *testing.T) {
|
||||
if err := InfoCmd(ctx, baseOpts("all", "table"), s); err != nil {
|
||||
t.Errorf("InfoCmd(all, table): %v", err)
|
||||
}
|
||||
})
|
||||
}
|
||||
309
cmd/hauler/cli/store/lifecycle_test.go
Normal file
309
cmd/hauler/cli/store/lifecycle_test.go
Normal file
@@ -0,0 +1,309 @@
|
||||
package store
|
||||
|
||||
// lifecycle_test.go covers the end-to-end add->save->load->copy/extract lifecycle
|
||||
// for file, image, and chart artifact types.
|
||||
//
|
||||
// Do NOT use t.Parallel() -- SaveCmd calls os.Chdir(storeDir).
|
||||
// Always use absolute paths for StoreDir and FileName.
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/google/go-containerregistry/pkg/name"
|
||||
"github.com/google/go-containerregistry/pkg/v1/remote"
|
||||
|
||||
"hauler.dev/go/hauler/internal/flags"
|
||||
v1 "hauler.dev/go/hauler/pkg/apis/hauler.cattle.io/v1"
|
||||
"hauler.dev/go/hauler/pkg/store"
|
||||
)
|
||||
|
||||
// TestLifecycle_FileArtifact_AddSaveLoadCopy exercises the full lifecycle for a
|
||||
// file artifact: seed HTTP server -> storeFile -> SaveCmd -> LoadCmd -> CopyCmd dir://.
|
||||
func TestLifecycle_FileArtifact_AddSaveLoadCopy(t *testing.T) {
|
||||
ctx := newTestContext(t)
|
||||
|
||||
// Step 1: Seed an HTTP file server with known content.
|
||||
fileContent := "lifecycle file artifact content"
|
||||
url := seedFileInHTTPServer(t, "lifecycle.txt", fileContent)
|
||||
|
||||
// Step 2: storeFile into store A.
|
||||
storeA := newTestStore(t)
|
||||
if err := storeFile(ctx, storeA, v1.File{Path: url}); err != nil {
|
||||
t.Fatalf("storeFile: %v", err)
|
||||
}
|
||||
assertArtifactInStore(t, storeA, "lifecycle.txt")
|
||||
|
||||
// Flush index.json so SaveCmd can read it from disk.
|
||||
if err := storeA.SaveIndex(); err != nil {
|
||||
t.Fatalf("SaveIndex: %v", err)
|
||||
}
|
||||
|
||||
// Step 3: SaveCmd -> archive (absolute paths required).
|
||||
archivePath := filepath.Join(t.TempDir(), "lifecycle-file.tar.zst")
|
||||
saveOpts := newSaveOpts(storeA.Root, archivePath)
|
||||
if err := SaveCmd(ctx, saveOpts, defaultRootOpts(storeA.Root), defaultCliOpts()); err != nil {
|
||||
t.Fatalf("SaveCmd: %v", err)
|
||||
}
|
||||
|
||||
fi, err := os.Stat(archivePath)
|
||||
if err != nil {
|
||||
t.Fatalf("archive stat: %v", err)
|
||||
}
|
||||
if fi.Size() == 0 {
|
||||
t.Fatal("archive is empty")
|
||||
}
|
||||
|
||||
// Step 4: LoadCmd -> store B.
|
||||
storeBDir := t.TempDir()
|
||||
loadOpts := &flags.LoadOpts{
|
||||
StoreRootOpts: defaultRootOpts(storeBDir),
|
||||
FileName: []string{archivePath},
|
||||
}
|
||||
if err := LoadCmd(ctx, loadOpts, defaultRootOpts(storeBDir), defaultCliOpts()); err != nil {
|
||||
t.Fatalf("LoadCmd: %v", err)
|
||||
}
|
||||
|
||||
storeB, err := store.NewLayout(storeBDir)
|
||||
if err != nil {
|
||||
t.Fatalf("store.NewLayout(storeB): %v", err)
|
||||
}
|
||||
assertArtifactInStore(t, storeB, "lifecycle.txt")
|
||||
|
||||
// Step 5: CopyCmd dir:// -> extract file to destDir.
|
||||
extractDir := t.TempDir()
|
||||
copyOpts := &flags.CopyOpts{StoreRootOpts: defaultRootOpts(storeB.Root)}
|
||||
if err := CopyCmd(ctx, copyOpts, storeB, "dir://"+extractDir, defaultCliOpts()); err != nil {
|
||||
t.Fatalf("CopyCmd dir: %v", err)
|
||||
}
|
||||
|
||||
// Step 6: Assert file content matches original.
|
||||
outPath := filepath.Join(extractDir, "lifecycle.txt")
|
||||
data, err := os.ReadFile(outPath)
|
||||
if err != nil {
|
||||
t.Fatalf("expected extracted file at %s: %v", outPath, err)
|
||||
}
|
||||
if string(data) != fileContent {
|
||||
t.Errorf("file content mismatch: got %q, want %q", string(data), fileContent)
|
||||
}
|
||||
}
|
||||
|
||||
// TestLifecycle_Image_AddSaveLoadCopyRegistry exercises the full lifecycle for
|
||||
// a container image: seed registry 1 -> storeImage -> SaveCmd -> LoadCmd ->
|
||||
// CopyCmd registry:// -> verify in registry 2.
|
||||
func TestLifecycle_Image_AddSaveLoadCopyRegistry(t *testing.T) {
|
||||
ctx := newTestContext(t)
|
||||
|
||||
// Step 1: Seed image into in-memory registry 1.
|
||||
srcHost, srcOpts := newLocalhostRegistry(t)
|
||||
srcImg := seedImage(t, srcHost, "lifecycle/app", "v1", srcOpts...)
|
||||
|
||||
srcDigest, err := srcImg.Digest()
|
||||
if err != nil {
|
||||
t.Fatalf("srcImg.Digest: %v", err)
|
||||
}
|
||||
|
||||
// Step 2: storeImage into store A.
|
||||
storeA := newTestStore(t)
|
||||
rso := defaultRootOpts(storeA.Root)
|
||||
ro := defaultCliOpts()
|
||||
if err := storeImage(ctx, storeA, v1.Image{Name: srcHost + "/lifecycle/app:v1"}, "", rso, ro, ""); err != nil {
|
||||
t.Fatalf("storeImage: %v", err)
|
||||
}
|
||||
assertArtifactInStore(t, storeA, "lifecycle/app:v1")
|
||||
|
||||
// Flush index.json for SaveCmd.
|
||||
if err := storeA.SaveIndex(); err != nil {
|
||||
t.Fatalf("SaveIndex: %v", err)
|
||||
}
|
||||
|
||||
// Step 3: SaveCmd -> archive.
|
||||
archivePath := filepath.Join(t.TempDir(), "lifecycle-image.tar.zst")
|
||||
saveOpts := newSaveOpts(storeA.Root, archivePath)
|
||||
if err := SaveCmd(ctx, saveOpts, defaultRootOpts(storeA.Root), defaultCliOpts()); err != nil {
|
||||
t.Fatalf("SaveCmd: %v", err)
|
||||
}
|
||||
|
||||
// Step 4: LoadCmd -> store B.
|
||||
storeBDir := t.TempDir()
|
||||
loadOpts := &flags.LoadOpts{
|
||||
StoreRootOpts: defaultRootOpts(storeBDir),
|
||||
FileName: []string{archivePath},
|
||||
}
|
||||
if err := LoadCmd(ctx, loadOpts, defaultRootOpts(storeBDir), defaultCliOpts()); err != nil {
|
||||
t.Fatalf("LoadCmd: %v", err)
|
||||
}
|
||||
|
||||
storeB, err := store.NewLayout(storeBDir)
|
||||
if err != nil {
|
||||
t.Fatalf("store.NewLayout(storeB): %v", err)
|
||||
}
|
||||
assertArtifactInStore(t, storeB, "lifecycle/app:v1")
|
||||
|
||||
// Step 5: CopyCmd registry:// -> in-memory registry 2.
|
||||
dstHost, dstOpts := newTestRegistry(t)
|
||||
copyOpts := &flags.CopyOpts{
|
||||
StoreRootOpts: defaultRootOpts(storeB.Root),
|
||||
PlainHTTP: true,
|
||||
}
|
||||
if err := CopyCmd(ctx, copyOpts, storeB, "registry://"+dstHost, defaultCliOpts()); err != nil {
|
||||
t.Fatalf("CopyCmd registry: %v", err)
|
||||
}
|
||||
|
||||
// Step 6: Pull from registry 2 and compare digest to original.
|
||||
dstRef, err := name.NewTag(dstHost+"/lifecycle/app:v1", name.Insecure)
|
||||
if err != nil {
|
||||
t.Fatalf("name.NewTag: %v", err)
|
||||
}
|
||||
desc, err := remote.Get(dstRef, dstOpts...)
|
||||
if err != nil {
|
||||
t.Fatalf("image not found in target registry: %v", err)
|
||||
}
|
||||
if desc.Digest != srcDigest {
|
||||
t.Errorf("digest mismatch: got %s, want %s", desc.Digest, srcDigest)
|
||||
}
|
||||
}
|
||||
|
||||
// TestLifecycle_Chart_AddSaveLoadExtract exercises the full lifecycle for a
|
||||
// Helm chart: AddChartCmd -> SaveCmd -> LoadCmd -> ExtractCmd -> .tgz in destDir.
|
||||
func TestLifecycle_Chart_AddSaveLoadExtract(t *testing.T) {
|
||||
ctx := newTestContext(t)
|
||||
|
||||
// Step 1: AddChartCmd with local testdata chart into store A.
|
||||
storeA := newTestStore(t)
|
||||
rso := defaultRootOpts(storeA.Root)
|
||||
ro := defaultCliOpts()
|
||||
|
||||
chartOpts := newAddChartOpts(chartTestdataDir, "")
|
||||
if err := AddChartCmd(ctx, chartOpts, storeA, "rancher-cluster-templates-0.5.2.tgz", rso, ro); err != nil {
|
||||
t.Fatalf("AddChartCmd: %v", err)
|
||||
}
|
||||
assertArtifactInStore(t, storeA, "rancher-cluster-templates")
|
||||
|
||||
// Flush index.json for SaveCmd.
|
||||
if err := storeA.SaveIndex(); err != nil {
|
||||
t.Fatalf("SaveIndex: %v", err)
|
||||
}
|
||||
|
||||
// Step 2: SaveCmd -> archive.
|
||||
archivePath := filepath.Join(t.TempDir(), "lifecycle-chart.tar.zst")
|
||||
saveOpts := newSaveOpts(storeA.Root, archivePath)
|
||||
if err := SaveCmd(ctx, saveOpts, defaultRootOpts(storeA.Root), defaultCliOpts()); err != nil {
|
||||
t.Fatalf("SaveCmd: %v", err)
|
||||
}
|
||||
|
||||
// Step 3: LoadCmd -> new store.
|
||||
storeBDir := t.TempDir()
|
||||
loadOpts := &flags.LoadOpts{
|
||||
StoreRootOpts: defaultRootOpts(storeBDir),
|
||||
FileName: []string{archivePath},
|
||||
}
|
||||
if err := LoadCmd(ctx, loadOpts, defaultRootOpts(storeBDir), defaultCliOpts()); err != nil {
|
||||
t.Fatalf("LoadCmd: %v", err)
|
||||
}
|
||||
|
||||
storeB, err := store.NewLayout(storeBDir)
|
||||
if err != nil {
|
||||
t.Fatalf("store.NewLayout(storeB): %v", err)
|
||||
}
|
||||
assertArtifactInStore(t, storeB, "rancher-cluster-templates")
|
||||
|
||||
// Step 4: ExtractCmd -> .tgz in destDir.
|
||||
destDir := t.TempDir()
|
||||
extractOpts := &flags.ExtractOpts{
|
||||
StoreRootOpts: defaultRootOpts(storeB.Root),
|
||||
DestinationDir: destDir,
|
||||
}
|
||||
if err := ExtractCmd(ctx, extractOpts, storeB, "hauler/rancher-cluster-templates:0.5.2"); err != nil {
|
||||
t.Fatalf("ExtractCmd: %v", err)
|
||||
}
|
||||
|
||||
entries, err := os.ReadDir(destDir)
|
||||
if err != nil {
|
||||
t.Fatalf("ReadDir: %v", err)
|
||||
}
|
||||
|
||||
found := false
|
||||
for _, e := range entries {
|
||||
if strings.HasSuffix(e.Name(), ".tgz") || strings.HasSuffix(e.Name(), ".tar.gz") {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
names := make([]string, len(entries))
|
||||
for i, e := range entries {
|
||||
names[i] = e.Name()
|
||||
}
|
||||
t.Errorf("expected a .tgz or .tar.gz in destDir, got: %v", names)
|
||||
}
|
||||
}
|
||||
|
||||
// TestLifecycle_Remove_ThenSave verifies that removing one artifact from a store
|
||||
// with two file artifacts, then saving/loading, results in only the retained
|
||||
// artifact being present.
|
||||
func TestLifecycle_Remove_ThenSave(t *testing.T) {
|
||||
ctx := newTestContext(t)
|
||||
|
||||
// Step 1: Add two file artifacts.
|
||||
url1 := seedFileInHTTPServer(t, "keep-me.txt", "content to keep")
|
||||
url2 := seedFileInHTTPServer(t, "remove-me.txt", "content to remove")
|
||||
|
||||
storeA := newTestStore(t)
|
||||
if err := storeFile(ctx, storeA, v1.File{Path: url1}); err != nil {
|
||||
t.Fatalf("storeFile keep-me: %v", err)
|
||||
}
|
||||
if err := storeFile(ctx, storeA, v1.File{Path: url2}); err != nil {
|
||||
t.Fatalf("storeFile remove-me: %v", err)
|
||||
}
|
||||
|
||||
if n := countArtifactsInStore(t, storeA); n != 2 {
|
||||
t.Fatalf("expected 2 artifacts after adding both files, got %d", n)
|
||||
}
|
||||
|
||||
// Step 2: RemoveCmd(Force:true) on the "remove-me" artifact.
|
||||
if err := RemoveCmd(ctx, &flags.RemoveOpts{Force: true}, storeA, "remove-me"); err != nil {
|
||||
t.Fatalf("RemoveCmd: %v", err)
|
||||
}
|
||||
|
||||
if n := countArtifactsInStore(t, storeA); n != 1 {
|
||||
t.Fatalf("expected 1 artifact after removal, got %d", n)
|
||||
}
|
||||
assertArtifactInStore(t, storeA, "keep-me.txt")
|
||||
|
||||
// Flush index.json for SaveCmd. RemoveCmd calls OCI.SaveIndex() internally
|
||||
// (via Layout.Remove), but call it again for safety.
|
||||
if err := storeA.SaveIndex(); err != nil {
|
||||
t.Fatalf("SaveIndex: %v", err)
|
||||
}
|
||||
|
||||
// Step 3: SaveCmd -> archive.
|
||||
archivePath := filepath.Join(t.TempDir(), "lifecycle-remove.tar.zst")
|
||||
saveOpts := newSaveOpts(storeA.Root, archivePath)
|
||||
if err := SaveCmd(ctx, saveOpts, defaultRootOpts(storeA.Root), defaultCliOpts()); err != nil {
|
||||
t.Fatalf("SaveCmd: %v", err)
|
||||
}
|
||||
|
||||
// Step 4: LoadCmd -> new store.
|
||||
storeBDir := t.TempDir()
|
||||
loadOpts := &flags.LoadOpts{
|
||||
StoreRootOpts: defaultRootOpts(storeBDir),
|
||||
FileName: []string{archivePath},
|
||||
}
|
||||
if err := LoadCmd(ctx, loadOpts, defaultRootOpts(storeBDir), defaultCliOpts()); err != nil {
|
||||
t.Fatalf("LoadCmd: %v", err)
|
||||
}
|
||||
|
||||
storeB, err := store.NewLayout(storeBDir)
|
||||
if err != nil {
|
||||
t.Fatalf("store.NewLayout(storeB): %v", err)
|
||||
}
|
||||
|
||||
// Step 5: Assert only the retained artifact is present.
|
||||
if n := countArtifactsInStore(t, storeB); n != 1 {
|
||||
t.Errorf("expected 1 artifact in loaded store, got %d", n)
|
||||
}
|
||||
assertArtifactInStore(t, storeB, "keep-me.txt")
|
||||
}
|
||||
@@ -2,6 +2,7 @@ package store
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"io"
|
||||
"net/url"
|
||||
"os"
|
||||
@@ -15,13 +16,15 @@ import (
|
||||
"hauler.dev/go/hauler/pkg/getter"
|
||||
"hauler.dev/go/hauler/pkg/log"
|
||||
"hauler.dev/go/hauler/pkg/store"
|
||||
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
)
|
||||
|
||||
// extracts the contents of an archived oci layout to an existing oci layout
|
||||
func LoadCmd(ctx context.Context, o *flags.LoadOpts, rso *flags.StoreRootOpts, ro *flags.CliRootOpts) error {
|
||||
l := log.FromContext(ctx)
|
||||
|
||||
tempOverride := o.TempOverride
|
||||
tempOverride := rso.TempOverride
|
||||
|
||||
if tempOverride == "" {
|
||||
tempOverride = os.Getenv(consts.HaulerTempDir)
|
||||
@@ -41,6 +44,7 @@ func LoadCmd(ctx context.Context, o *flags.LoadOpts, rso *flags.StoreRootOpts, r
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
clearDir(tempDir)
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -85,6 +89,42 @@ func unarchiveLayoutTo(ctx context.Context, haulPath string, dest string, tempDi
|
||||
return err
|
||||
}
|
||||
|
||||
// ensure the incoming index.json has the correct annotations.
|
||||
data, err := os.ReadFile(tempDir + "/index.json")
|
||||
if err != nil {
|
||||
return (err)
|
||||
}
|
||||
|
||||
var idx ocispec.Index
|
||||
if err := json.Unmarshal(data, &idx); err != nil {
|
||||
return (err)
|
||||
}
|
||||
|
||||
for i := range idx.Manifests {
|
||||
if idx.Manifests[i].Annotations == nil {
|
||||
idx.Manifests[i].Annotations = make(map[string]string)
|
||||
}
|
||||
if _, exists := idx.Manifests[i].Annotations[consts.KindAnnotationName]; !exists {
|
||||
idx.Manifests[i].Annotations[consts.KindAnnotationName] = consts.KindAnnotationImage
|
||||
}
|
||||
if ref, ok := idx.Manifests[i].Annotations[consts.ContainerdImageNameKey]; ok {
|
||||
if slash := strings.Index(ref, "/"); slash != -1 {
|
||||
ref = ref[slash+1:]
|
||||
}
|
||||
if idx.Manifests[i].Annotations[consts.ImageRefKey] != ref {
|
||||
idx.Manifests[i].Annotations[consts.ImageRefKey] = ref
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
out, err := json.MarshalIndent(idx, "", " ")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := os.WriteFile(tempDir+"/index.json", out, 0644); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
s, err := store.NewLayout(tempDir)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -98,3 +138,19 @@ func unarchiveLayoutTo(ctx context.Context, haulPath string, dest string, tempDi
|
||||
_, err = s.CopyAll(ctx, ts, nil)
|
||||
return err
|
||||
}
|
||||
|
||||
func clearDir(path string) error {
|
||||
entries, err := os.ReadDir(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, entry := range entries {
|
||||
err = os.RemoveAll(filepath.Join(path, entry.Name()))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
323
cmd/hauler/cli/store/load_test.go
Normal file
323
cmd/hauler/cli/store/load_test.go
Normal file
@@ -0,0 +1,323 @@
|
||||
package store
|
||||
|
||||
// load_test.go covers unarchiveLayoutTo, LoadCmd, and clearDir.
|
||||
//
|
||||
// Do NOT call t.Parallel() on tests that invoke createRootLevelArchive —
|
||||
// that helper uses the mholt/archives library directly to avoid os.Chdir,
|
||||
// so it is safe for concurrent use, but the tests themselves exercise
|
||||
// unarchiveLayoutTo which is already sequential.
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
mholtarchives "github.com/mholt/archives"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
|
||||
"hauler.dev/go/hauler/internal/flags"
|
||||
"hauler.dev/go/hauler/pkg/archives"
|
||||
"hauler.dev/go/hauler/pkg/consts"
|
||||
"hauler.dev/go/hauler/pkg/store"
|
||||
)
|
||||
|
||||
// testHaulArchive is the relative path from cmd/hauler/cli/store/ to the
|
||||
// bundled test haul archive produced by the project's own CI/test setup.
|
||||
const testHaulArchive = "../../../../testdata/haul.tar.zst"
|
||||
|
||||
// createRootLevelArchive creates a tar.zst archive from dir with files placed
|
||||
// at the archive root (no directory prefix). This matches the layout produced
|
||||
// by SaveCmd, which uses os.Chdir + Archive(".", ...) to achieve the same
|
||||
// effect. Using mholt/archives directly avoids the os.Chdir side-effect.
|
||||
func createRootLevelArchive(dir, outfile string) error {
|
||||
// A trailing path separator tells mholt/archives to enumerate the
|
||||
// directory's *contents* only — files land at archive root with no prefix.
|
||||
// Without the trailing slash, an empty value uses filepath.Base(dir) as
|
||||
// the archive subdirectory name instead of placing files at root.
|
||||
files, err := mholtarchives.FilesFromDisk(context.Background(), nil, map[string]string{
|
||||
dir + string(filepath.Separator): "",
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
f, err := os.Create(outfile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
format := mholtarchives.CompressedArchive{
|
||||
Compression: mholtarchives.Zstd{},
|
||||
Archival: mholtarchives.Tar{},
|
||||
}
|
||||
return format.Archive(context.Background(), f, files)
|
||||
}
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
// TestUnarchiveLayoutTo
|
||||
// --------------------------------------------------------------------------
|
||||
|
||||
// TestUnarchiveLayoutTo verifies that unarchiveLayoutTo correctly extracts a
|
||||
// haul archive into a destination OCI layout, backfills missing annotations,
|
||||
// and propagates the ContainerdImageNameKey → ImageRefKey mapping.
|
||||
func TestUnarchiveLayoutTo(t *testing.T) {
|
||||
ctx := newTestContext(t)
|
||||
destDir := t.TempDir()
|
||||
tempDir := t.TempDir()
|
||||
|
||||
if err := unarchiveLayoutTo(ctx, testHaulArchive, destDir, tempDir); err != nil {
|
||||
t.Fatalf("unarchiveLayoutTo: %v", err)
|
||||
}
|
||||
|
||||
s, err := store.NewLayout(destDir)
|
||||
if err != nil {
|
||||
t.Fatalf("store.NewLayout(destDir): %v", err)
|
||||
}
|
||||
|
||||
if count := countArtifactsInStore(t, s); count == 0 {
|
||||
t.Fatal("expected at least one descriptor in dest store after unarchiveLayoutTo")
|
||||
}
|
||||
|
||||
// Every top-level descriptor must carry KindAnnotationName.
|
||||
// Descriptors that were loaded with ContainerdImageNameKey must also have
|
||||
// ImageRefKey set (the backfill logic in unarchiveLayoutTo ensures this).
|
||||
if err := s.OCI.Walk(func(_ string, desc ocispec.Descriptor) error {
|
||||
if desc.Annotations[consts.KindAnnotationName] == "" {
|
||||
t.Errorf("descriptor %s missing KindAnnotationName", desc.Digest)
|
||||
}
|
||||
if _, hasContainerd := desc.Annotations[consts.ContainerdImageNameKey]; hasContainerd {
|
||||
if desc.Annotations[consts.ImageRefKey] == "" {
|
||||
t.Errorf("descriptor %s has %s but missing %s",
|
||||
desc.Digest, consts.ContainerdImageNameKey, consts.ImageRefKey)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
t.Fatalf("Walk: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
// TestLoadCmd_LocalFile
|
||||
// --------------------------------------------------------------------------
|
||||
|
||||
// TestLoadCmd_LocalFile verifies that LoadCmd loads one or more local haul
|
||||
// archives into the destination store.
|
||||
func TestLoadCmd_LocalFile(t *testing.T) {
|
||||
ctx := newTestContext(t)
|
||||
|
||||
t.Run("single archive", func(t *testing.T) {
|
||||
destDir := t.TempDir()
|
||||
o := &flags.LoadOpts{
|
||||
StoreRootOpts: defaultRootOpts(destDir),
|
||||
FileName: []string{testHaulArchive},
|
||||
}
|
||||
if err := LoadCmd(ctx, o, defaultRootOpts(destDir), defaultCliOpts()); err != nil {
|
||||
t.Fatalf("LoadCmd: %v", err)
|
||||
}
|
||||
s, err := store.NewLayout(destDir)
|
||||
if err != nil {
|
||||
t.Fatalf("store.NewLayout: %v", err)
|
||||
}
|
||||
if countArtifactsInStore(t, s) == 0 {
|
||||
t.Error("expected artifacts in store after LoadCmd")
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("two archives", func(t *testing.T) {
|
||||
// Loading the same archive twice must be idempotent: duplicate blobs are
|
||||
// silently discarded by the OCI pusher. The descriptor count after two
|
||||
// loads must equal the count after a single load.
|
||||
singleDir := t.TempDir()
|
||||
singleOpts := &flags.LoadOpts{
|
||||
StoreRootOpts: defaultRootOpts(singleDir),
|
||||
FileName: []string{testHaulArchive},
|
||||
}
|
||||
if err := LoadCmd(ctx, singleOpts, defaultRootOpts(singleDir), defaultCliOpts()); err != nil {
|
||||
t.Fatalf("LoadCmd single: %v", err)
|
||||
}
|
||||
singleStore, err := store.NewLayout(singleDir)
|
||||
if err != nil {
|
||||
t.Fatalf("store.NewLayout single: %v", err)
|
||||
}
|
||||
singleCount := countArtifactsInStore(t, singleStore)
|
||||
|
||||
doubleDir := t.TempDir()
|
||||
doubleOpts := &flags.LoadOpts{
|
||||
StoreRootOpts: defaultRootOpts(doubleDir),
|
||||
FileName: []string{testHaulArchive, testHaulArchive},
|
||||
}
|
||||
if err := LoadCmd(ctx, doubleOpts, defaultRootOpts(doubleDir), defaultCliOpts()); err != nil {
|
||||
t.Fatalf("LoadCmd double: %v", err)
|
||||
}
|
||||
doubleStore, err := store.NewLayout(doubleDir)
|
||||
if err != nil {
|
||||
t.Fatalf("store.NewLayout double: %v", err)
|
||||
}
|
||||
doubleCount := countArtifactsInStore(t, doubleStore)
|
||||
|
||||
if doubleCount != singleCount {
|
||||
t.Errorf("loading the same archive twice: got %d descriptors, want %d (same as single load)",
|
||||
doubleCount, singleCount)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
// TestLoadCmd_RemoteArchive
|
||||
// --------------------------------------------------------------------------
|
||||
|
||||
// TestLoadCmd_RemoteArchive verifies that LoadCmd can fetch and load a haul
|
||||
// archive served over HTTP.
|
||||
func TestLoadCmd_RemoteArchive(t *testing.T) {
|
||||
ctx := newTestContext(t)
|
||||
|
||||
archiveData, err := os.ReadFile(testHaulArchive)
|
||||
if err != nil {
|
||||
t.Fatalf("read test archive: %v", err)
|
||||
}
|
||||
|
||||
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/octet-stream")
|
||||
w.Write(archiveData) //nolint:errcheck
|
||||
}))
|
||||
t.Cleanup(srv.Close)
|
||||
|
||||
destDir := t.TempDir()
|
||||
remoteURL := srv.URL + "/haul.tar.zst"
|
||||
|
||||
o := &flags.LoadOpts{
|
||||
StoreRootOpts: defaultRootOpts(destDir),
|
||||
FileName: []string{remoteURL},
|
||||
}
|
||||
|
||||
if err := LoadCmd(ctx, o, defaultRootOpts(destDir), defaultCliOpts()); err != nil {
|
||||
t.Fatalf("LoadCmd remote: %v", err)
|
||||
}
|
||||
|
||||
s, err := store.NewLayout(destDir)
|
||||
if err != nil {
|
||||
t.Fatalf("store.NewLayout: %v", err)
|
||||
}
|
||||
if countArtifactsInStore(t, s) == 0 {
|
||||
t.Error("expected artifacts in store after remote LoadCmd")
|
||||
}
|
||||
}
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
// TestUnarchiveLayoutTo_AnnotationBackfill
|
||||
// --------------------------------------------------------------------------
|
||||
|
||||
// TestUnarchiveLayoutTo_AnnotationBackfill crafts a haul archive whose
|
||||
// index.json entries are missing KindAnnotationName, then verifies that
|
||||
// unarchiveLayoutTo backfills every entry with KindAnnotationImage.
|
||||
func TestUnarchiveLayoutTo_AnnotationBackfill(t *testing.T) {
|
||||
ctx := newTestContext(t)
|
||||
|
||||
// Step 1: Extract the real test archive to obtain a valid OCI layout on disk.
|
||||
extractDir := t.TempDir()
|
||||
if err := archives.Unarchive(ctx, testHaulArchive, extractDir); err != nil {
|
||||
t.Fatalf("Unarchive: %v", err)
|
||||
}
|
||||
|
||||
// Step 2: Read index.json and strip KindAnnotationName from every descriptor.
|
||||
indexPath := filepath.Join(extractDir, "index.json")
|
||||
data, err := os.ReadFile(indexPath)
|
||||
if err != nil {
|
||||
t.Fatalf("read index.json: %v", err)
|
||||
}
|
||||
|
||||
var idx ocispec.Index
|
||||
if err := json.Unmarshal(data, &idx); err != nil {
|
||||
t.Fatalf("unmarshal index.json: %v", err)
|
||||
}
|
||||
if len(idx.Manifests) == 0 {
|
||||
t.Skip("testdata/haul.tar.zst has no top-level manifests — cannot test backfill")
|
||||
}
|
||||
for i := range idx.Manifests {
|
||||
delete(idx.Manifests[i].Annotations, consts.KindAnnotationName)
|
||||
}
|
||||
|
||||
out, err := json.MarshalIndent(idx, "", " ")
|
||||
if err != nil {
|
||||
t.Fatalf("marshal stripped index.json: %v", err)
|
||||
}
|
||||
if err := os.WriteFile(indexPath, out, 0644); err != nil {
|
||||
t.Fatalf("write stripped index.json: %v", err)
|
||||
}
|
||||
|
||||
// Step 3: Re-archive with files at the archive root (no subdir prefix) so
|
||||
// the layout matches what unarchiveLayoutTo expects after extraction.
|
||||
strippedArchive := filepath.Join(t.TempDir(), "stripped.tar.zst")
|
||||
if err := createRootLevelArchive(extractDir, strippedArchive); err != nil {
|
||||
t.Fatalf("createRootLevelArchive: %v", err)
|
||||
}
|
||||
|
||||
// Step 4: Load the stripped archive.
|
||||
destDir := t.TempDir()
|
||||
tempDir := t.TempDir()
|
||||
if err := unarchiveLayoutTo(ctx, strippedArchive, destDir, tempDir); err != nil {
|
||||
t.Fatalf("unarchiveLayoutTo stripped: %v", err)
|
||||
}
|
||||
|
||||
// Step 5: Every descriptor in the dest store must now have
|
||||
// KindAnnotationName set to KindAnnotationImage (the backfill default).
|
||||
s, err := store.NewLayout(destDir)
|
||||
if err != nil {
|
||||
t.Fatalf("store.NewLayout(destDir): %v", err)
|
||||
}
|
||||
|
||||
if err := s.OCI.Walk(func(_ string, desc ocispec.Descriptor) error {
|
||||
kind := desc.Annotations[consts.KindAnnotationName]
|
||||
if kind == "" {
|
||||
t.Errorf("descriptor %s missing KindAnnotationName after backfill", desc.Digest)
|
||||
} else if kind != consts.KindAnnotationImage {
|
||||
t.Errorf("descriptor %s: expected backfilled kind=%q, got %q",
|
||||
desc.Digest, consts.KindAnnotationImage, kind)
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
t.Fatalf("Walk: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
// TestClearDir
|
||||
// --------------------------------------------------------------------------
|
||||
|
||||
// TestClearDir verifies that clearDir removes all entries from a directory
|
||||
// without removing the directory itself.
|
||||
func TestClearDir(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
|
||||
for _, name := range []string{"a.txt", "b.txt"} {
|
||||
if err := os.WriteFile(filepath.Join(dir, name), []byte(name), 0644); err != nil {
|
||||
t.Fatalf("write %s: %v", name, err)
|
||||
}
|
||||
}
|
||||
if err := os.Mkdir(filepath.Join(dir, "subdir"), 0755); err != nil {
|
||||
t.Fatalf("mkdir subdir: %v", err)
|
||||
}
|
||||
|
||||
if err := clearDir(dir); err != nil {
|
||||
t.Fatalf("clearDir: %v", err)
|
||||
}
|
||||
|
||||
entries, err := os.ReadDir(dir)
|
||||
if err != nil {
|
||||
t.Fatalf("ReadDir after clearDir: %v", err)
|
||||
}
|
||||
if len(entries) != 0 {
|
||||
names := make([]string, len(entries))
|
||||
for i, e := range entries {
|
||||
names[i] = e.Name()
|
||||
}
|
||||
t.Errorf("clearDir: expected empty dir, found: %s", strings.Join(names, ", "))
|
||||
}
|
||||
}
|
||||
122
cmd/hauler/cli/store/remove.go
Normal file
122
cmd/hauler/cli/store/remove.go
Normal file
@@ -0,0 +1,122 @@
|
||||
package store
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
|
||||
"hauler.dev/go/hauler/internal/flags"
|
||||
"hauler.dev/go/hauler/pkg/log"
|
||||
"hauler.dev/go/hauler/pkg/store"
|
||||
)
|
||||
|
||||
func formatReference(ref string) string {
|
||||
tagIdx := strings.LastIndex(ref, ":")
|
||||
if tagIdx == -1 {
|
||||
return ref
|
||||
}
|
||||
|
||||
dashIdx := strings.Index(ref[tagIdx+1:], "-")
|
||||
if dashIdx == -1 {
|
||||
return ref
|
||||
}
|
||||
|
||||
dashIdx = tagIdx + 1 + dashIdx
|
||||
|
||||
base := ref[:dashIdx]
|
||||
suffix := ref[dashIdx+1:]
|
||||
|
||||
if base == "" || suffix == "" {
|
||||
return ref
|
||||
}
|
||||
|
||||
return fmt.Sprintf("%s [%s]", base, suffix)
|
||||
}
|
||||
|
||||
func RemoveCmd(ctx context.Context, o *flags.RemoveOpts, s *store.Layout, ref string) error {
|
||||
l := log.FromContext(ctx)
|
||||
|
||||
// collect matching artifacts
|
||||
type match struct {
|
||||
reference string
|
||||
desc ocispec.Descriptor
|
||||
}
|
||||
var matches []match
|
||||
|
||||
if err := s.Walk(func(reference string, desc ocispec.Descriptor) error {
|
||||
if !strings.Contains(reference, ref) {
|
||||
return nil
|
||||
}
|
||||
|
||||
matches = append(matches, match{
|
||||
reference: reference,
|
||||
desc: desc,
|
||||
})
|
||||
|
||||
return nil // continue walking
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(matches) == 0 {
|
||||
return fmt.Errorf("reference [%s] not found in store (use `hauler store info` to list store contents)", ref)
|
||||
}
|
||||
|
||||
if len(matches) >= 1 {
|
||||
l.Infof("found %d matching references:", len(matches))
|
||||
for _, m := range matches {
|
||||
l.Infof(" - [%s]", formatReference(m.reference))
|
||||
}
|
||||
}
|
||||
|
||||
if !o.Force {
|
||||
fmt.Printf(" ↳ are you sure you want to remove [%d] artifact(s) from the store? (yes/no) ", len(matches))
|
||||
|
||||
reader := bufio.NewReader(os.Stdin)
|
||||
|
||||
line, err := reader.ReadString('\n')
|
||||
if err != nil && !errors.Is(err, io.EOF) {
|
||||
return fmt.Errorf("failed to read response: [%w]... please answer 'yes' or 'no'", err)
|
||||
}
|
||||
|
||||
response := strings.ToLower(strings.TrimSpace(line))
|
||||
|
||||
switch response {
|
||||
case "yes", "y":
|
||||
l.Infof("starting to remove artifacts from store...")
|
||||
case "no", "n":
|
||||
l.Infof("successfully cancelled removal of artifacts from store")
|
||||
return nil
|
||||
case "":
|
||||
return fmt.Errorf("failed to read response... please answer 'yes' or 'no'")
|
||||
default:
|
||||
return fmt.Errorf("invalid response [%s]... please answer 'yes' or 'no'", response)
|
||||
}
|
||||
}
|
||||
|
||||
// remove artifact(s)
|
||||
for _, m := range matches {
|
||||
if err := s.RemoveArtifact(ctx, m.reference, m.desc); err != nil {
|
||||
return fmt.Errorf("failed to remove artifact [%s]: %w", formatReference(m.reference), err)
|
||||
}
|
||||
|
||||
l.Infof("successfully removed [%s] of type [%s] with digest [%s]", formatReference(m.reference), m.desc.MediaType, m.desc.Digest.String())
|
||||
}
|
||||
|
||||
// clean up unreferenced blobs
|
||||
l.Infof("cleaning up all unreferenced blobs...")
|
||||
removedCount, removedSize, err := s.CleanUp(ctx)
|
||||
if err != nil {
|
||||
l.Warnf("garbage collection failed: [%v]", err)
|
||||
} else if removedCount > 0 {
|
||||
l.Infof("successfully removed [%d] unreferenced blobs [freed %d bytes]", removedCount, removedSize)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
155
cmd/hauler/cli/store/remove_test.go
Normal file
155
cmd/hauler/cli/store/remove_test.go
Normal file
@@ -0,0 +1,155 @@
|
||||
package store
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
|
||||
"hauler.dev/go/hauler/internal/flags"
|
||||
v1 "hauler.dev/go/hauler/pkg/apis/hauler.cattle.io/v1"
|
||||
)
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
// Unit tests — formatReference
|
||||
// --------------------------------------------------------------------------
|
||||
|
||||
func TestFormatReference(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
ref string
|
||||
want string
|
||||
}{
|
||||
{
|
||||
name: "empty string returns empty",
|
||||
ref: "",
|
||||
want: "",
|
||||
},
|
||||
{
|
||||
name: "no colon returns unchanged",
|
||||
ref: "nocolon",
|
||||
want: "nocolon",
|
||||
},
|
||||
{
|
||||
name: "tag without dash returns unchanged",
|
||||
ref: "rancher/rancher:v2.8.5",
|
||||
want: "rancher/rancher:v2.8.5",
|
||||
},
|
||||
{
|
||||
name: "cosign sig tag splits at first dash after last colon",
|
||||
ref: "repo:sha256-abc123.sig",
|
||||
want: "repo:sha256 [abc123.sig]",
|
||||
},
|
||||
{
|
||||
name: "cosign att tag format",
|
||||
ref: "myrepo:sha256-deadbeef.att",
|
||||
want: "myrepo:sha256 [deadbeef.att]",
|
||||
},
|
||||
{
|
||||
name: "cosign sbom tag format",
|
||||
ref: "myrepo:sha256-deadbeef.sbom",
|
||||
want: "myrepo:sha256 [deadbeef.sbom]",
|
||||
},
|
||||
{
|
||||
name: "tag is only a dash returns unchanged (empty suffix)",
|
||||
ref: "repo:-",
|
||||
want: "repo:-",
|
||||
},
|
||||
{
|
||||
name: "multiple colons uses last one",
|
||||
ref: "host:5000/repo:sha256-abc.sig",
|
||||
want: "host:5000/repo:sha256 [abc.sig]",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
got := formatReference(tc.ref)
|
||||
if got != tc.want {
|
||||
t.Errorf("formatReference(%q) = %q, want %q", tc.ref, got, tc.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
// Integration tests — RemoveCmd
|
||||
// --------------------------------------------------------------------------
|
||||
|
||||
func TestRemoveCmd_Force(t *testing.T) {
|
||||
ctx := newTestContext(t)
|
||||
s := newTestStore(t)
|
||||
|
||||
url := seedFileInHTTPServer(t, "removeme.txt", "file-to-remove")
|
||||
if err := storeFile(ctx, s, v1.File{Path: url}); err != nil {
|
||||
t.Fatalf("storeFile: %v", err)
|
||||
}
|
||||
|
||||
if n := countArtifactsInStore(t, s); n == 0 {
|
||||
t.Fatal("expected at least 1 artifact after storeFile, got 0")
|
||||
}
|
||||
|
||||
// Confirm the artifact ref contains "removeme".
|
||||
var ref string
|
||||
if err := s.Walk(func(reference string, _ ocispec.Descriptor) error {
|
||||
if strings.Contains(reference, "removeme") {
|
||||
ref = reference
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
t.Fatalf("walk to find ref: %v", err)
|
||||
}
|
||||
if ref == "" {
|
||||
t.Fatal("could not find stored artifact reference containing 'removeme'")
|
||||
}
|
||||
|
||||
if err := RemoveCmd(ctx, &flags.RemoveOpts{Force: true}, s, "removeme"); err != nil {
|
||||
t.Fatalf("RemoveCmd: %v", err)
|
||||
}
|
||||
|
||||
if n := countArtifactsInStore(t, s); n != 0 {
|
||||
t.Errorf("expected 0 artifacts after removal, got %d", n)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRemoveCmd_NotFound(t *testing.T) {
|
||||
ctx := newTestContext(t)
|
||||
s := newTestStore(t)
|
||||
|
||||
err := RemoveCmd(ctx, &flags.RemoveOpts{Force: true}, s, "nonexistent-ref")
|
||||
if err == nil {
|
||||
t.Fatal("expected error for non-existent ref, got nil")
|
||||
}
|
||||
if !strings.Contains(err.Error(), "not found") {
|
||||
t.Errorf("expected error containing 'not found', got: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRemoveCmd_Force_MultipleMatches(t *testing.T) {
|
||||
ctx := newTestContext(t)
|
||||
s := newTestStore(t)
|
||||
|
||||
// Seed two file artifacts whose names share the substring "testfile".
|
||||
url1 := seedFileInHTTPServer(t, "testfile-alpha.txt", "content-alpha")
|
||||
url2 := seedFileInHTTPServer(t, "testfile-beta.txt", "content-beta")
|
||||
|
||||
if err := storeFile(ctx, s, v1.File{Path: url1}); err != nil {
|
||||
t.Fatalf("storeFile alpha: %v", err)
|
||||
}
|
||||
if err := storeFile(ctx, s, v1.File{Path: url2}); err != nil {
|
||||
t.Fatalf("storeFile beta: %v", err)
|
||||
}
|
||||
|
||||
if n := countArtifactsInStore(t, s); n < 2 {
|
||||
t.Fatalf("expected at least 2 artifacts, got %d", n)
|
||||
}
|
||||
|
||||
// Remove using a substring that matches both.
|
||||
if err := RemoveCmd(ctx, &flags.RemoveOpts{Force: true}, s, "testfile"); err != nil {
|
||||
t.Fatalf("RemoveCmd: %v", err)
|
||||
}
|
||||
|
||||
if n := countArtifactsInStore(t, s); n != 0 {
|
||||
t.Errorf("expected 0 artifacts after removal of both, got %d", n)
|
||||
}
|
||||
}
|
||||
@@ -15,7 +15,7 @@ import (
|
||||
"github.com/google/go-containerregistry/pkg/v1/layout"
|
||||
"github.com/google/go-containerregistry/pkg/v1/tarball"
|
||||
"github.com/google/go-containerregistry/pkg/v1/types"
|
||||
imagev1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
|
||||
"hauler.dev/go/hauler/internal/flags"
|
||||
"hauler.dev/go/hauler/pkg/archives"
|
||||
@@ -31,8 +31,7 @@ func SaveCmd(ctx context.Context, o *flags.SaveOpts, rso *flags.StoreRootOpts, r
|
||||
compressionMap := archives.CompressionMap
|
||||
archivalMap := archives.ArchivalMap
|
||||
|
||||
// TODO: Support more formats?
|
||||
// Select the correct compression and archival type based on user input
|
||||
// select the compression and archival type based parsed filename extension
|
||||
compression := compressionMap["zst"]
|
||||
archival := archivalMap["tar"]
|
||||
|
||||
@@ -55,6 +54,18 @@ func SaveCmd(ctx context.Context, o *flags.SaveOpts, rso *flags.StoreRootOpts, r
|
||||
return err
|
||||
}
|
||||
|
||||
// strip out the oci-layout file from the haul
|
||||
// required for containerd to be able to interpret the haul correctly for all mediatypes and artifactypes
|
||||
if o.ContainerdCompatibility {
|
||||
if err := os.Remove(filepath.Join(".", ocispec.ImageLayoutFile)); err != nil {
|
||||
if !os.IsNotExist(err) {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
l.Warnf("compatibility warning... containerd... removing 'oci-layout' file to support containerd importing of images")
|
||||
}
|
||||
}
|
||||
|
||||
// create the archive
|
||||
err = archives.Archive(ctx, ".", absOutputfile, compression, archival)
|
||||
if err != nil {
|
||||
@@ -105,58 +116,68 @@ func writeExportsManifest(ctx context.Context, dir string, platformStr string) e
|
||||
l.Debugf("descriptor [%s] <<< SKIPPING ARTIFACT [%q]", desc.Digest.String(), desc.ArtifactType)
|
||||
continue
|
||||
}
|
||||
if desc.Annotations != nil {
|
||||
// we only care about images that cosign has added to the layout index
|
||||
if kind, hasKind := desc.Annotations[consts.KindAnnotationName]; hasKind {
|
||||
if refName, hasRefName := desc.Annotations["io.containerd.image.name"]; hasRefName {
|
||||
// branch on image (aka image manifest) or image index
|
||||
switch kind {
|
||||
case consts.KindAnnotationImage:
|
||||
if err := x.record(ctx, idx, desc, refName); err != nil {
|
||||
return err
|
||||
}
|
||||
case consts.KindAnnotationIndex:
|
||||
l.Debugf("index [%s]: digest=[%s]... type=[%s]... size=[%d]", refName, desc.Digest.String(), desc.MediaType, desc.Size)
|
||||
// The kind annotation is the only reliable way to distinguish container images from
|
||||
// cosign signatures/attestations/SBOMs: those are stored as standard Docker/OCI
|
||||
// manifests (same media type as real images) so media type alone is insufficient.
|
||||
kind := desc.Annotations[consts.KindAnnotationName]
|
||||
if kind != consts.KindAnnotationImage && kind != consts.KindAnnotationIndex {
|
||||
l.Debugf("descriptor [%s] <<< SKIPPING KIND [%q]", desc.Digest.String(), kind)
|
||||
continue
|
||||
}
|
||||
|
||||
// when no platform is provided, warn the user of potential mismatch on import
|
||||
if platform.String() == "" {
|
||||
l.Warnf("specify an export platform to prevent potential platform mismatch on import of index [%s]", refName)
|
||||
}
|
||||
refName, hasRefName := desc.Annotations[consts.ContainerdImageNameKey]
|
||||
if !hasRefName {
|
||||
l.Debugf("descriptor [%s] <<< SKIPPING (no containerd image name)", desc.Digest.String())
|
||||
continue
|
||||
}
|
||||
|
||||
iix, err := idx.ImageIndex(desc.Digest)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ixm, err := iix.IndexManifest()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, ixd := range ixm.Manifests {
|
||||
if ixd.MediaType.IsImage() {
|
||||
// check if platform is provided, if so, skip anything that doesn't match
|
||||
if platform.String() != "" {
|
||||
if ixd.Platform.Architecture != platform.Architecture || ixd.Platform.OS != platform.OS {
|
||||
l.Debugf("index [%s]: digest=[%s], platform=[%s/%s]: does not match the supplied platform... skipping...", refName, desc.Digest.String(), ixd.Platform.OS, ixd.Platform.Architecture)
|
||||
continue
|
||||
}
|
||||
}
|
||||
// Use the descriptor's actual media type to discriminate single-image manifests
|
||||
// from multi-arch indexes, rather than relying on the kind string for this.
|
||||
switch {
|
||||
case desc.MediaType.IsImage():
|
||||
if err := x.record(ctx, idx, desc, refName); err != nil {
|
||||
return err
|
||||
}
|
||||
case desc.MediaType.IsIndex():
|
||||
l.Debugf("index [%s]: digest=[%s]... type=[%s]... size=[%d]", refName, desc.Digest.String(), desc.MediaType, desc.Size)
|
||||
|
||||
// skip 'unknown' platforms... docker hates
|
||||
if ixd.Platform.Architecture == "unknown" && ixd.Platform.OS == "unknown" {
|
||||
l.Debugf("index [%s]: digest=[%s], platform=[%s/%s]: matches unknown platform... skipping...", refName, desc.Digest.String(), ixd.Platform.OS, ixd.Platform.Architecture)
|
||||
continue
|
||||
}
|
||||
// when no platform is inputted... warn the user of potential mismatch on import for docker
|
||||
// required for docker to be able to interpret and load the image correctly
|
||||
if platform.String() == "" {
|
||||
l.Warnf("compatibility warning... docker... specify platform to prevent potential mismatch on import of index [%s]", refName)
|
||||
}
|
||||
|
||||
if err := x.record(ctx, iix, ixd, refName); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
iix, err := idx.ImageIndex(desc.Digest)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ixm, err := iix.IndexManifest()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, ixd := range ixm.Manifests {
|
||||
if ixd.MediaType.IsImage() {
|
||||
if platform.String() != "" {
|
||||
if ixd.Platform.Architecture != platform.Architecture || ixd.Platform.OS != platform.OS {
|
||||
l.Debugf("index [%s]: digest=[%s], platform=[%s/%s]: does not match the supplied platform... skipping...", refName, desc.Digest.String(), ixd.Platform.OS, ixd.Platform.Architecture)
|
||||
continue
|
||||
}
|
||||
default:
|
||||
l.Debugf("descriptor [%s] <<< SKIPPING KIND [%q]", desc.Digest.String(), kind)
|
||||
}
|
||||
|
||||
// skip any platforms of 'unknown/unknown'... docker hates
|
||||
// required for docker to be able to interpret and load the image correctly
|
||||
if ixd.Platform.Architecture == "unknown" && ixd.Platform.OS == "unknown" {
|
||||
l.Debugf("index [%s]: digest=[%s], platform=[%s/%s]: matches unknown platform... skipping...", refName, desc.Digest.String(), ixd.Platform.OS, ixd.Platform.Architecture)
|
||||
continue
|
||||
}
|
||||
|
||||
if err := x.record(ctx, iix, ixd, refName); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
default:
|
||||
l.Debugf("descriptor [%s] <<< SKIPPING media type [%q]", desc.Digest.String(), desc.MediaType)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -187,6 +208,17 @@ func (x *exports) record(ctx context.Context, index libv1.ImageIndex, desc libv1
|
||||
return err
|
||||
}
|
||||
|
||||
// Verify this is a real container image by inspecting its manifest config media type.
|
||||
// Non-image OCI artifacts (Helm charts, files, cosign sigs) use distinct config types.
|
||||
manifest, err := image.Manifest()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if manifest.Config.MediaType != types.DockerConfigJSON && manifest.Config.MediaType != types.OCIConfigJSON {
|
||||
l.Debugf("descriptor [%s] <<< SKIPPING NON-IMAGE config media type [%q]", desc.Digest.String(), manifest.Config.MediaType)
|
||||
return nil
|
||||
}
|
||||
|
||||
config, err := image.ConfigName()
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -197,7 +229,7 @@ func (x *exports) record(ctx context.Context, index libv1.ImageIndex, desc libv1
|
||||
// record one export record per digest
|
||||
x.digests = append(x.digests, digest)
|
||||
xd = tarball.Descriptor{
|
||||
Config: path.Join(imagev1.ImageBlobsDir, config.Algorithm, config.Hex),
|
||||
Config: path.Join(ocispec.ImageBlobsDir, config.Algorithm, config.Hex),
|
||||
RepoTags: []string{},
|
||||
Layers: []string{},
|
||||
}
|
||||
@@ -211,7 +243,7 @@ func (x *exports) record(ctx context.Context, index libv1.ImageIndex, desc libv1
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
xd.Layers = append(xd.Layers[:], path.Join(imagev1.ImageBlobsDir, xl.Algorithm, xl.Hex))
|
||||
xd.Layers = append(xd.Layers[:], path.Join(ocispec.ImageBlobsDir, xl.Algorithm, xl.Hex))
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
223
cmd/hauler/cli/store/save_test.go
Normal file
223
cmd/hauler/cli/store/save_test.go
Normal file
@@ -0,0 +1,223 @@
|
||||
package store
|
||||
|
||||
// save_test.go covers writeExportsManifest and SaveCmd.
|
||||
//
|
||||
// IMPORTANT: SaveCmd calls os.Chdir(storeDir) and defers os.Chdir back. Do
|
||||
// NOT call t.Parallel() on any SaveCmd test, and always use absolute paths for
|
||||
// StoreDir and FileName so they remain valid after the chdir.
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"hauler.dev/go/hauler/internal/flags"
|
||||
v1 "hauler.dev/go/hauler/pkg/apis/hauler.cattle.io/v1"
|
||||
"hauler.dev/go/hauler/pkg/archives"
|
||||
"hauler.dev/go/hauler/pkg/consts"
|
||||
)
|
||||
|
||||
// manifestEntry mirrors tarball.Descriptor for asserting manifest.json contents.
|
||||
type manifestEntry struct {
|
||||
Config string `json:"Config"`
|
||||
RepoTags []string `json:"RepoTags"`
|
||||
Layers []string `json:"Layers"`
|
||||
}
|
||||
|
||||
// readManifestJSON reads and unmarshals manifest.json from the given OCI layout dir.
|
||||
func readManifestJSON(t *testing.T, dir string) []manifestEntry {
|
||||
t.Helper()
|
||||
data, err := os.ReadFile(filepath.Join(dir, consts.ImageManifestFile))
|
||||
if err != nil {
|
||||
t.Fatalf("readManifestJSON: %v", err)
|
||||
}
|
||||
var entries []manifestEntry
|
||||
if err := json.Unmarshal(data, &entries); err != nil {
|
||||
t.Fatalf("readManifestJSON unmarshal: %v", err)
|
||||
}
|
||||
return entries
|
||||
}
|
||||
|
||||
// newSaveOpts builds a SaveOpts pointing at storeDir with an absolute archive path.
|
||||
func newSaveOpts(storeDir, archivePath string) *flags.SaveOpts {
|
||||
return &flags.SaveOpts{
|
||||
StoreRootOpts: defaultRootOpts(storeDir),
|
||||
FileName: archivePath,
|
||||
}
|
||||
}
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
// writeExportsManifest unit tests
|
||||
// --------------------------------------------------------------------------
|
||||
|
||||
func TestWriteExportsManifest(t *testing.T) {
|
||||
ctx := newTestContext(t)
|
||||
|
||||
t.Run("no platform filter includes all platforms", func(t *testing.T) {
|
||||
host, rOpts := newLocalhostRegistry(t)
|
||||
seedIndex(t, host, "test/multiarch", "v1", rOpts...)
|
||||
|
||||
s := newTestStore(t)
|
||||
if err := s.AddImage(ctx, host+"/test/multiarch:v1", ""); err != nil {
|
||||
t.Fatalf("AddImage: %v", err)
|
||||
}
|
||||
|
||||
if err := writeExportsManifest(ctx, s.Root, ""); err != nil {
|
||||
t.Fatalf("writeExportsManifest: %v", err)
|
||||
}
|
||||
|
||||
entries := readManifestJSON(t, s.Root)
|
||||
if len(entries) < 2 {
|
||||
t.Errorf("expected >=2 entries (all platforms), got %d", len(entries))
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("linux/amd64 filter yields single entry", func(t *testing.T) {
|
||||
host, rOpts := newLocalhostRegistry(t)
|
||||
seedIndex(t, host, "test/multiarch", "v2", rOpts...)
|
||||
|
||||
s := newTestStore(t)
|
||||
if err := s.AddImage(ctx, host+"/test/multiarch:v2", ""); err != nil {
|
||||
t.Fatalf("AddImage: %v", err)
|
||||
}
|
||||
|
||||
if err := writeExportsManifest(ctx, s.Root, "linux/amd64"); err != nil {
|
||||
t.Fatalf("writeExportsManifest: %v", err)
|
||||
}
|
||||
|
||||
entries := readManifestJSON(t, s.Root)
|
||||
if len(entries) != 1 {
|
||||
t.Errorf("expected 1 entry for linux/amd64, got %d", len(entries))
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("chart artifact excluded via config media type check", func(t *testing.T) {
|
||||
s := newTestStore(t)
|
||||
rso := defaultRootOpts(s.Root)
|
||||
ro := defaultCliOpts()
|
||||
|
||||
o := newAddChartOpts(chartTestdataDir, "")
|
||||
if err := AddChartCmd(ctx, o, s, "rancher-cluster-templates-0.5.2.tgz", rso, ro); err != nil {
|
||||
t.Fatalf("AddChartCmd: %v", err)
|
||||
}
|
||||
|
||||
if err := writeExportsManifest(ctx, s.Root, ""); err != nil {
|
||||
t.Fatalf("writeExportsManifest: %v", err)
|
||||
}
|
||||
|
||||
entries := readManifestJSON(t, s.Root)
|
||||
if len(entries) != 0 {
|
||||
t.Errorf("expected 0 entries (chart excluded from manifest.json), got %d", len(entries))
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestWriteExportsManifest_SkipsNonImages(t *testing.T) {
|
||||
ctx := newTestContext(t)
|
||||
|
||||
url := seedFileInHTTPServer(t, "skip.sh", "#!/bin/sh\necho skip")
|
||||
s := newTestStore(t)
|
||||
if err := storeFile(ctx, s, v1.File{Path: url}); err != nil {
|
||||
t.Fatalf("storeFile: %v", err)
|
||||
}
|
||||
|
||||
if err := writeExportsManifest(ctx, s.Root, ""); err != nil {
|
||||
t.Fatalf("writeExportsManifest: %v", err)
|
||||
}
|
||||
|
||||
entries := readManifestJSON(t, s.Root)
|
||||
if len(entries) != 0 {
|
||||
t.Errorf("expected 0 entries for file-only store, got %d", len(entries))
|
||||
}
|
||||
}
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
// SaveCmd integration tests
|
||||
// Do NOT use t.Parallel() — SaveCmd calls os.Chdir.
|
||||
// --------------------------------------------------------------------------
|
||||
|
||||
func TestSaveCmd(t *testing.T) {
|
||||
ctx := newTestContext(t)
|
||||
host, _ := newLocalhostRegistry(t)
|
||||
seedImage(t, host, "test/save", "v1")
|
||||
|
||||
s := newTestStore(t)
|
||||
if err := s.AddImage(ctx, host+"/test/save:v1", ""); err != nil {
|
||||
t.Fatalf("AddImage: %v", err)
|
||||
}
|
||||
|
||||
// FileName must be absolute so it remains valid after SaveCmd's os.Chdir.
|
||||
archivePath := filepath.Join(t.TempDir(), "haul.tar.zst")
|
||||
o := newSaveOpts(s.Root, archivePath)
|
||||
|
||||
if err := SaveCmd(ctx, o, defaultRootOpts(s.Root), defaultCliOpts()); err != nil {
|
||||
t.Fatalf("SaveCmd: %v", err)
|
||||
}
|
||||
|
||||
fi, err := os.Stat(archivePath)
|
||||
if err != nil {
|
||||
t.Fatalf("archive stat: %v", err)
|
||||
}
|
||||
if fi.Size() == 0 {
|
||||
t.Fatal("archive is empty")
|
||||
}
|
||||
|
||||
// Validate it is a well-formed zst archive by unarchiving it.
|
||||
destDir := t.TempDir()
|
||||
if err := archives.Unarchive(ctx, archivePath, destDir); err != nil {
|
||||
t.Fatalf("Unarchive: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSaveCmd_ContainerdCompatibility(t *testing.T) {
|
||||
ctx := newTestContext(t)
|
||||
host, _ := newLocalhostRegistry(t)
|
||||
seedImage(t, host, "test/containerd-compat", "v1")
|
||||
|
||||
s := newTestStore(t)
|
||||
if err := s.AddImage(ctx, host+"/test/containerd-compat:v1", ""); err != nil {
|
||||
t.Fatalf("AddImage: %v", err)
|
||||
}
|
||||
|
||||
archivePath := filepath.Join(t.TempDir(), "haul-compat.tar.zst")
|
||||
o := newSaveOpts(s.Root, archivePath)
|
||||
o.ContainerdCompatibility = true
|
||||
|
||||
if err := SaveCmd(ctx, o, defaultRootOpts(s.Root), defaultCliOpts()); err != nil {
|
||||
t.Fatalf("SaveCmd ContainerdCompatibility: %v", err)
|
||||
}
|
||||
|
||||
destDir := t.TempDir()
|
||||
if err := archives.Unarchive(ctx, archivePath, destDir); err != nil {
|
||||
t.Fatalf("Unarchive: %v", err)
|
||||
}
|
||||
|
||||
// oci-layout must be absent from the extracted archive.
|
||||
ociLayoutPath := filepath.Join(destDir, "oci-layout")
|
||||
if _, err := os.Stat(ociLayoutPath); !os.IsNotExist(err) {
|
||||
t.Errorf("expected oci-layout to be absent in containerd-compatible archive, got: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSaveCmd_EmptyStore(t *testing.T) {
|
||||
ctx := newTestContext(t)
|
||||
s := newTestStore(t)
|
||||
|
||||
// SaveCmd uses layout.FromPath which stats index.json — it must exist on
|
||||
// disk. A fresh store holds the index only in memory; SaveIndex flushes it.
|
||||
if err := s.SaveIndex(); err != nil {
|
||||
t.Fatalf("SaveIndex: %v", err)
|
||||
}
|
||||
|
||||
archivePath := filepath.Join(t.TempDir(), "haul-empty.tar.zst")
|
||||
o := newSaveOpts(s.Root, archivePath)
|
||||
|
||||
if err := SaveCmd(ctx, o, defaultRootOpts(s.Root), defaultCliOpts()); err != nil {
|
||||
t.Fatalf("SaveCmd empty store: %v", err)
|
||||
}
|
||||
|
||||
if _, err := os.Stat(archivePath); err != nil {
|
||||
t.Fatalf("archive not created for empty store: %v", err)
|
||||
}
|
||||
}
|
||||
@@ -2,9 +2,12 @@ package store
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/fs"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/distribution/distribution/v3/configuration"
|
||||
@@ -21,6 +24,38 @@ import (
|
||||
"hauler.dev/go/hauler/pkg/store"
|
||||
)
|
||||
|
||||
func validateStoreExists(s *store.Layout) error {
|
||||
indexPath := filepath.Join(s.Root, "index.json")
|
||||
|
||||
_, err := os.Stat(indexPath)
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return fmt.Errorf(
|
||||
"no store found at [%s]\n ↳ does the hauler store exist? (verify with `hauler store info`)",
|
||||
s.Root,
|
||||
)
|
||||
}
|
||||
|
||||
return fmt.Errorf(
|
||||
"unable to access store at [%s]: %w",
|
||||
s.Root,
|
||||
err,
|
||||
)
|
||||
}
|
||||
|
||||
func loadConfig(filename string) (*configuration.Configuration, error) {
|
||||
f, err := os.Open(filename)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
return configuration.Parse(f)
|
||||
}
|
||||
|
||||
func DefaultRegistryConfig(o *flags.ServeRegistryOpts, rso *flags.StoreRootOpts, ro *flags.CliRootOpts) *configuration.Configuration {
|
||||
cfg := &configuration.Configuration{
|
||||
Version: "0.1",
|
||||
@@ -53,12 +88,16 @@ func ServeRegistryCmd(ctx context.Context, o *flags.ServeRegistryOpts, s *store.
|
||||
l := log.FromContext(ctx)
|
||||
ctx = dcontext.WithVersion(ctx, version.Version)
|
||||
|
||||
if err := validateStoreExists(s); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
tr := server.NewTempRegistry(ctx, o.RootDir)
|
||||
if err := tr.Start(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
opts := &flags.CopyOpts{}
|
||||
opts := &flags.CopyOpts{StoreRootOpts: rso, PlainHTTP: true}
|
||||
if err := CopyCmd(ctx, opts, s, "registry://"+tr.Registry(), ro); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -101,7 +140,11 @@ func ServeFilesCmd(ctx context.Context, o *flags.ServeFilesOpts, s *store.Layout
|
||||
l := log.FromContext(ctx)
|
||||
ctx = dcontext.WithVersion(ctx, version.Version)
|
||||
|
||||
opts := &flags.CopyOpts{}
|
||||
if err := validateStoreExists(s); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
opts := &flags.CopyOpts{StoreRootOpts: &flags.StoreRootOpts{}}
|
||||
if err := CopyCmd(ctx, opts, s, "dir://"+o.RootDir, ro); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -125,12 +168,3 @@ func ServeFilesCmd(ctx context.Context, o *flags.ServeFilesOpts, s *store.Layout
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func loadConfig(filename string) (*configuration.Configuration, error) {
|
||||
f, err := os.Open(filename)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return configuration.Parse(f)
|
||||
}
|
||||
|
||||
164
cmd/hauler/cli/store/serve_test.go
Normal file
164
cmd/hauler/cli/store/serve_test.go
Normal file
@@ -0,0 +1,164 @@
|
||||
package store
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"hauler.dev/go/hauler/internal/flags"
|
||||
"hauler.dev/go/hauler/pkg/consts"
|
||||
)
|
||||
|
||||
// writeIndexJSON writes a minimal valid OCI index.json to dir so that
|
||||
// validateStoreExists can find it. NewLayout only writes index.json on
|
||||
// SaveIndex, which is triggered by adding content — so tests that need a
|
||||
// "valid store on disk" must create the file themselves.
|
||||
func writeIndexJSON(t *testing.T, dir string) {
|
||||
t.Helper()
|
||||
const minimal = `{"schemaVersion":2,"mediaType":"application/vnd.oci.image.index.v1+json","manifests":[]}`
|
||||
if err := os.WriteFile(filepath.Join(dir, "index.json"), []byte(minimal), 0o644); err != nil {
|
||||
t.Fatalf("writeIndexJSON: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateStoreExists(t *testing.T) {
|
||||
t.Run("valid store", func(t *testing.T) {
|
||||
s := newTestStore(t)
|
||||
writeIndexJSON(t, s.Root)
|
||||
if err := validateStoreExists(s); err != nil {
|
||||
t.Errorf("validateStoreExists on valid store: %v", err)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("missing index.json", func(t *testing.T) {
|
||||
s := newTestStore(t)
|
||||
err := validateStoreExists(s)
|
||||
if err == nil {
|
||||
t.Fatal("expected error for missing index.json, got nil")
|
||||
}
|
||||
if !strings.Contains(err.Error(), "no store found") {
|
||||
t.Errorf("expected 'no store found' in error, got: %v", err)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("nonexistent directory", func(t *testing.T) {
|
||||
s := newTestStore(t)
|
||||
// Point the layout root at a path that does not exist.
|
||||
s.Root = filepath.Join(t.TempDir(), "does-not-exist", "nested")
|
||||
err := validateStoreExists(s)
|
||||
if err == nil {
|
||||
t.Fatal("expected error for nonexistent dir, got nil")
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestDefaultRegistryConfig(t *testing.T) {
|
||||
rootDir := t.TempDir()
|
||||
o := &flags.ServeRegistryOpts{
|
||||
Port: consts.DefaultRegistryPort,
|
||||
RootDir: rootDir,
|
||||
}
|
||||
rso := defaultRootOpts(rootDir)
|
||||
ro := defaultCliOpts()
|
||||
|
||||
cfg := DefaultRegistryConfig(o, rso, ro)
|
||||
if cfg == nil {
|
||||
t.Fatal("DefaultRegistryConfig returned nil")
|
||||
}
|
||||
|
||||
// Port
|
||||
wantAddr := ":5000"
|
||||
if cfg.HTTP.Addr != wantAddr {
|
||||
t.Errorf("HTTP.Addr = %q, want %q", cfg.HTTP.Addr, wantAddr)
|
||||
}
|
||||
|
||||
// No TLS by default.
|
||||
if cfg.HTTP.TLS.Certificate != "" || cfg.HTTP.TLS.Key != "" {
|
||||
t.Errorf("expected no TLS cert/key by default, got cert=%q key=%q",
|
||||
cfg.HTTP.TLS.Certificate, cfg.HTTP.TLS.Key)
|
||||
}
|
||||
|
||||
// Log level matches ro.LogLevel.
|
||||
if string(cfg.Log.Level) != ro.LogLevel {
|
||||
t.Errorf("Log.Level = %q, want %q", cfg.Log.Level, ro.LogLevel)
|
||||
}
|
||||
|
||||
// Storage rootdirectory.
|
||||
fsParams := cfg.Storage["filesystem"]
|
||||
if fsParams == nil {
|
||||
t.Fatal("storage.filesystem not set")
|
||||
}
|
||||
if fsParams["rootdirectory"] != rootDir {
|
||||
t.Errorf("storage.filesystem.rootdirectory = %v, want %q", fsParams["rootdirectory"], rootDir)
|
||||
}
|
||||
|
||||
// URL allow rules.
|
||||
if len(cfg.Validation.Manifests.URLs.Allow) == 0 {
|
||||
t.Error("Validation.Manifests.URLs.Allow is empty, want at least one rule")
|
||||
}
|
||||
}
|
||||
|
||||
func TestDefaultRegistryConfig_WithTLS(t *testing.T) {
|
||||
rootDir := t.TempDir()
|
||||
o := &flags.ServeRegistryOpts{
|
||||
Port: consts.DefaultRegistryPort,
|
||||
RootDir: rootDir,
|
||||
TLSCert: "/path/to/cert.pem",
|
||||
TLSKey: "/path/to/key.pem",
|
||||
}
|
||||
rso := defaultRootOpts(rootDir)
|
||||
ro := defaultCliOpts()
|
||||
|
||||
cfg := DefaultRegistryConfig(o, rso, ro)
|
||||
if cfg.HTTP.TLS.Certificate != o.TLSCert {
|
||||
t.Errorf("TLS.Certificate = %q, want %q", cfg.HTTP.TLS.Certificate, o.TLSCert)
|
||||
}
|
||||
if cfg.HTTP.TLS.Key != o.TLSKey {
|
||||
t.Errorf("TLS.Key = %q, want %q", cfg.HTTP.TLS.Key, o.TLSKey)
|
||||
}
|
||||
}
|
||||
|
||||
func TestLoadConfig_ValidFile(t *testing.T) {
|
||||
// Write a minimal valid distribution registry config.
|
||||
cfg := `
|
||||
version: 0.1
|
||||
log:
|
||||
level: info
|
||||
storage:
|
||||
filesystem:
|
||||
rootdirectory: /tmp/registry
|
||||
cache:
|
||||
blobdescriptor: inmemory
|
||||
http:
|
||||
addr: :5000
|
||||
headers:
|
||||
X-Content-Type-Options: [nosniff]
|
||||
`
|
||||
f, err := os.CreateTemp(t.TempDir(), "registry-config-*.yaml")
|
||||
if err != nil {
|
||||
t.Fatalf("create temp file: %v", err)
|
||||
}
|
||||
if _, err := f.WriteString(cfg); err != nil {
|
||||
t.Fatalf("write config: %v", err)
|
||||
}
|
||||
f.Close()
|
||||
|
||||
got, err := loadConfig(f.Name())
|
||||
if err != nil {
|
||||
t.Fatalf("loadConfig: %v", err)
|
||||
}
|
||||
if got == nil {
|
||||
t.Fatal("loadConfig returned nil config")
|
||||
}
|
||||
if got.HTTP.Addr != ":5000" {
|
||||
t.Errorf("HTTP.Addr = %q, want %q", got.HTTP.Addr, ":5000")
|
||||
}
|
||||
}
|
||||
|
||||
func TestLoadConfig_InvalidFile(t *testing.T) {
|
||||
_, err := loadConfig("/nonexistent/path/to/config.yaml")
|
||||
if err == nil {
|
||||
t.Fatal("expected error for nonexistent config file, got nil")
|
||||
}
|
||||
}
|
||||
@@ -15,11 +15,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/yaml"
|
||||
|
||||
"hauler.dev/go/hauler/internal/flags"
|
||||
convert "hauler.dev/go/hauler/pkg/apis/hauler.cattle.io/convert"
|
||||
v1 "hauler.dev/go/hauler/pkg/apis/hauler.cattle.io/v1"
|
||||
v1alpha1 "hauler.dev/go/hauler/pkg/apis/hauler.cattle.io/v1alpha1"
|
||||
tchart "hauler.dev/go/hauler/pkg/collection/chart"
|
||||
"hauler.dev/go/hauler/pkg/collection/imagetxt"
|
||||
"hauler.dev/go/hauler/pkg/consts"
|
||||
"hauler.dev/go/hauler/pkg/content"
|
||||
"hauler.dev/go/hauler/pkg/cosign"
|
||||
@@ -32,7 +28,7 @@ import (
|
||||
func SyncCmd(ctx context.Context, o *flags.SyncOpts, s *store.Layout, rso *flags.StoreRootOpts, ro *flags.CliRootOpts) error {
|
||||
l := log.FromContext(ctx)
|
||||
|
||||
tempOverride := o.TempOverride
|
||||
tempOverride := rso.TempOverride
|
||||
|
||||
if tempOverride == "" {
|
||||
tempOverride = os.Getenv(consts.HaulerTempDir)
|
||||
@@ -63,7 +59,7 @@ func SyncCmd(ctx context.Context, o *flags.SyncOpts, s *store.Layout, rso *flags
|
||||
img := v1.Image{
|
||||
Name: manifestLoc,
|
||||
}
|
||||
err := storeImage(ctx, s, img, o.Platform, rso, ro)
|
||||
err := storeImage(ctx, s, img, o.Platform, rso, ro, "")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -77,6 +73,7 @@ func SyncCmd(ctx context.Context, o *flags.SyncOpts, s *store.Layout, rso *flags
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer fi.Close()
|
||||
err = processContent(ctx, fi, o, s, rso, ro)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -168,23 +165,6 @@ func processContent(ctx context.Context, fi *os.File, o *flags.SyncOpts, s *stor
|
||||
|
||||
case consts.FilesContentKind:
|
||||
switch gvk.Version {
|
||||
case "v1alpha1":
|
||||
l.Warnf("!!! DEPRECATION WARNING !!! apiVersion [%s] will be removed in a future release !!! DEPRECATION WARNING !!!", gvk.Version)
|
||||
|
||||
var alphaCfg v1alpha1.Files
|
||||
if err := yaml.Unmarshal(doc, &alphaCfg); err != nil {
|
||||
return err
|
||||
}
|
||||
var v1Cfg v1.Files
|
||||
if err := convert.ConvertFiles(&alphaCfg, &v1Cfg); err != nil {
|
||||
return err
|
||||
}
|
||||
for _, f := range v1Cfg.Spec.Files {
|
||||
if err := storeFile(ctx, s, f); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
case "v1":
|
||||
var cfg v1.Files
|
||||
if err := yaml.Unmarshal(doc, &cfg); err != nil {
|
||||
@@ -197,87 +177,11 @@ func processContent(ctx context.Context, fi *os.File, o *flags.SyncOpts, s *stor
|
||||
}
|
||||
|
||||
default:
|
||||
return fmt.Errorf("unsupported version [%s] for kind [%s]... valid versions are [v1 and v1alpha1]", gvk.Version, gvk.Kind)
|
||||
return fmt.Errorf("unsupported version [%s] for kind [%s]... valid versions are [v1]", gvk.Version, gvk.Kind)
|
||||
}
|
||||
|
||||
case consts.ImagesContentKind:
|
||||
switch gvk.Version {
|
||||
case "v1alpha1":
|
||||
l.Warnf("!!! DEPRECATION WARNING !!! apiVersion [%s] will be removed in a future release !!! DEPRECATION WARNING !!!", gvk.Version)
|
||||
|
||||
var alphaCfg v1alpha1.Images
|
||||
if err := yaml.Unmarshal(doc, &alphaCfg); err != nil {
|
||||
return err
|
||||
}
|
||||
var v1Cfg v1.Images
|
||||
if err := convert.ConvertImages(&alphaCfg, &v1Cfg); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
a := v1Cfg.GetAnnotations()
|
||||
for _, i := range v1Cfg.Spec.Images {
|
||||
|
||||
if a[consts.ImageAnnotationRegistry] != "" || o.Registry != "" {
|
||||
newRef, _ := reference.Parse(i.Name)
|
||||
newReg := o.Registry
|
||||
if o.Registry == "" && a[consts.ImageAnnotationRegistry] != "" {
|
||||
newReg = a[consts.ImageAnnotationRegistry]
|
||||
}
|
||||
if newRef.Context().RegistryStr() == "" {
|
||||
newRef, err = reference.Relocate(i.Name, newReg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
i.Name = newRef.Name()
|
||||
}
|
||||
|
||||
if a[consts.ImageAnnotationKey] != "" || o.Key != "" || i.Key != "" {
|
||||
key := o.Key
|
||||
if o.Key == "" && a[consts.ImageAnnotationKey] != "" {
|
||||
key, err = homedir.Expand(a[consts.ImageAnnotationKey])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if i.Key != "" {
|
||||
key, err = homedir.Expand(i.Key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
l.Debugf("key for image [%s]", key)
|
||||
|
||||
tlog := o.Tlog
|
||||
if !o.Tlog && a[consts.ImageAnnotationTlog] == "true" {
|
||||
tlog = true
|
||||
}
|
||||
if i.Tlog {
|
||||
tlog = i.Tlog
|
||||
}
|
||||
l.Debugf("transparency log for verification [%b]", tlog)
|
||||
|
||||
if err := cosign.VerifySignature(ctx, s, key, tlog, i.Name, rso, ro); err != nil {
|
||||
l.Errorf("signature verification failed for image [%s]... skipping...\n%v", i.Name, err)
|
||||
continue
|
||||
}
|
||||
l.Infof("signature verified for image [%s]", i.Name)
|
||||
}
|
||||
|
||||
platform := o.Platform
|
||||
if o.Platform == "" && a[consts.ImageAnnotationPlatform] != "" {
|
||||
platform = a[consts.ImageAnnotationPlatform]
|
||||
}
|
||||
if i.Platform != "" {
|
||||
platform = i.Platform
|
||||
}
|
||||
|
||||
if err := storeImage(ctx, s, i, platform, rso, ro); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
s.CopyAll(ctx, s.OCI, nil)
|
||||
|
||||
case "v1":
|
||||
var cfg v1.Images
|
||||
if err := yaml.Unmarshal(doc, &cfg); err != nil {
|
||||
@@ -302,7 +206,13 @@ func processContent(ctx context.Context, fi *os.File, o *flags.SyncOpts, s *stor
|
||||
i.Name = newRef.Name()
|
||||
}
|
||||
|
||||
if a[consts.ImageAnnotationKey] != "" || o.Key != "" || i.Key != "" {
|
||||
hasAnnotationIdentityOptions := a[consts.ImageAnnotationCertIdentityRegexp] != "" || a[consts.ImageAnnotationCertIdentity] != ""
|
||||
hasCliIdentityOptions := o.CertIdentityRegexp != "" || o.CertIdentity != ""
|
||||
hasImageIdentityOptions := i.CertIdentityRegexp != "" || i.CertIdentity != ""
|
||||
|
||||
needsKeylessVerificaton := hasAnnotationIdentityOptions || hasCliIdentityOptions || hasImageIdentityOptions
|
||||
needsPubKeyVerification := a[consts.ImageAnnotationKey] != "" || o.Key != "" || i.Key != ""
|
||||
if needsPubKeyVerification {
|
||||
key := o.Key
|
||||
if o.Key == "" && a[consts.ImageAnnotationKey] != "" {
|
||||
key, err = homedir.Expand(a[consts.ImageAnnotationKey])
|
||||
@@ -327,13 +237,65 @@ func processContent(ctx context.Context, fi *os.File, o *flags.SyncOpts, s *stor
|
||||
}
|
||||
l.Debugf("transparency log for verification [%b]", tlog)
|
||||
|
||||
if err := cosign.VerifySignature(ctx, s, key, tlog, i.Name, rso, ro); err != nil {
|
||||
if err := cosign.VerifySignature(ctx, key, tlog, i.Name, rso, ro); err != nil {
|
||||
l.Errorf("signature verification failed for image [%s]... skipping...\n%v", i.Name, err)
|
||||
continue
|
||||
}
|
||||
l.Infof("signature verified for image [%s]", i.Name)
|
||||
}
|
||||
} else if needsKeylessVerificaton { //Keyless signature verification
|
||||
certIdentityRegexp := o.CertIdentityRegexp
|
||||
if o.CertIdentityRegexp == "" && a[consts.ImageAnnotationCertIdentityRegexp] != "" {
|
||||
certIdentityRegexp = a[consts.ImageAnnotationCertIdentityRegexp]
|
||||
}
|
||||
if i.CertIdentityRegexp != "" {
|
||||
certIdentityRegexp = i.CertIdentityRegexp
|
||||
}
|
||||
l.Debugf("certIdentityRegexp for image [%s]", certIdentityRegexp)
|
||||
|
||||
certIdentity := o.CertIdentity
|
||||
if o.CertIdentity == "" && a[consts.ImageAnnotationCertIdentity] != "" {
|
||||
certIdentity = a[consts.ImageAnnotationCertIdentity]
|
||||
}
|
||||
if i.CertIdentity != "" {
|
||||
certIdentity = i.CertIdentity
|
||||
}
|
||||
l.Debugf("certIdentity for image [%s]", certIdentity)
|
||||
|
||||
certOidcIssuer := o.CertOidcIssuer
|
||||
if o.CertOidcIssuer == "" && a[consts.ImageAnnotationCertOidcIssuer] != "" {
|
||||
certOidcIssuer = a[consts.ImageAnnotationCertOidcIssuer]
|
||||
}
|
||||
if i.CertOidcIssuer != "" {
|
||||
certOidcIssuer = i.CertOidcIssuer
|
||||
}
|
||||
l.Debugf("certOidcIssuer for image [%s]", certOidcIssuer)
|
||||
|
||||
certOidcIssuerRegexp := o.CertOidcIssuerRegexp
|
||||
if o.CertOidcIssuerRegexp == "" && a[consts.ImageAnnotationCertOidcIssuerRegexp] != "" {
|
||||
certOidcIssuerRegexp = a[consts.ImageAnnotationCertOidcIssuerRegexp]
|
||||
}
|
||||
if i.CertOidcIssuerRegexp != "" {
|
||||
certOidcIssuerRegexp = i.CertOidcIssuerRegexp
|
||||
}
|
||||
l.Debugf("certOidcIssuerRegexp for image [%s]", certOidcIssuerRegexp)
|
||||
|
||||
certGithubWorkflowRepository := o.CertGithubWorkflowRepository
|
||||
if o.CertGithubWorkflowRepository == "" && a[consts.ImageAnnotationCertGithubWorkflowRepository] != "" {
|
||||
certGithubWorkflowRepository = a[consts.ImageAnnotationCertGithubWorkflowRepository]
|
||||
}
|
||||
if i.CertGithubWorkflowRepository != "" {
|
||||
certGithubWorkflowRepository = i.CertGithubWorkflowRepository
|
||||
}
|
||||
l.Debugf("certGithubWorkflowRepository for image [%s]", certGithubWorkflowRepository)
|
||||
|
||||
// Keyless (Fulcio) certs expire after ~10 min; tlog is always
|
||||
// required to prove the cert was valid at signing time.
|
||||
if err := cosign.VerifyKeylessSignature(ctx, certIdentity, certIdentityRegexp, certOidcIssuer, certOidcIssuerRegexp, certGithubWorkflowRepository, i.Name, rso, ro); err != nil {
|
||||
l.Errorf("signature verification failed for image [%s]... skipping...\n%v", i.Name, err)
|
||||
continue
|
||||
}
|
||||
l.Infof("keyless signature verified for image [%s]", i.Name)
|
||||
}
|
||||
platform := o.Platform
|
||||
if o.Platform == "" && a[consts.ImageAnnotationPlatform] != "" {
|
||||
platform = a[consts.ImageAnnotationPlatform]
|
||||
@@ -342,148 +304,61 @@ func processContent(ctx context.Context, fi *os.File, o *flags.SyncOpts, s *stor
|
||||
platform = i.Platform
|
||||
}
|
||||
|
||||
if err := storeImage(ctx, s, i, platform, rso, ro); err != nil {
|
||||
rewrite := ""
|
||||
if i.Rewrite != "" {
|
||||
rewrite = i.Rewrite
|
||||
}
|
||||
|
||||
if err := storeImage(ctx, s, i, platform, rso, ro, rewrite); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
s.CopyAll(ctx, s.OCI, nil)
|
||||
|
||||
default:
|
||||
return fmt.Errorf("unsupported version [%s] for kind [%s]... valid versions are [v1 and v1alpha1]", gvk.Version, gvk.Kind)
|
||||
return fmt.Errorf("unsupported version [%s] for kind [%s]... valid versions are [v1]", gvk.Version, gvk.Kind)
|
||||
}
|
||||
|
||||
case consts.ChartsContentKind:
|
||||
switch gvk.Version {
|
||||
case "v1alpha1":
|
||||
l.Warnf("!!! DEPRECATION WARNING !!! apiVersion [%s] will be removed in a future release !!! DEPRECATION WARNING !!!", gvk.Version)
|
||||
|
||||
var alphaCfg v1alpha1.Charts
|
||||
if err := yaml.Unmarshal(doc, &alphaCfg); err != nil {
|
||||
return err
|
||||
}
|
||||
var v1Cfg v1.Charts
|
||||
if err := convert.ConvertCharts(&alphaCfg, &v1Cfg); err != nil {
|
||||
return err
|
||||
}
|
||||
for _, ch := range v1Cfg.Spec.Charts {
|
||||
if err := storeChart(ctx, s, ch, &action.ChartPathOptions{}); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
case "v1":
|
||||
var cfg v1.Charts
|
||||
if err := yaml.Unmarshal(doc, &cfg); err != nil {
|
||||
return err
|
||||
}
|
||||
for _, ch := range cfg.Spec.Charts {
|
||||
if err := storeChart(ctx, s, ch, &action.ChartPathOptions{}); err != nil {
|
||||
registry := o.Registry
|
||||
if registry == "" {
|
||||
annotation := cfg.GetAnnotations()
|
||||
if annotation != nil {
|
||||
registry = annotation[consts.ImageAnnotationRegistry]
|
||||
}
|
||||
}
|
||||
|
||||
for i, ch := range cfg.Spec.Charts {
|
||||
if err := storeChart(ctx, s, ch,
|
||||
&flags.AddChartOpts{
|
||||
ChartOpts: &action.ChartPathOptions{
|
||||
RepoURL: ch.RepoURL,
|
||||
Version: ch.Version,
|
||||
},
|
||||
AddImages: ch.AddImages,
|
||||
AddDependencies: ch.AddDependencies,
|
||||
Registry: registry,
|
||||
Platform: o.Platform,
|
||||
},
|
||||
rso, ro,
|
||||
cfg.Spec.Charts[i].Rewrite,
|
||||
); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
default:
|
||||
return fmt.Errorf("unsupported version [%s] for kind [%s]... valid versions are [v1 and v1alpha1]", gvk.Version, gvk.Kind)
|
||||
}
|
||||
|
||||
case consts.ChartsCollectionKind:
|
||||
switch gvk.Version {
|
||||
case "v1alpha1":
|
||||
l.Warnf("!!! DEPRECATION WARNING !!! apiVersion [%s] will be removed in a future release !!! DEPRECATION WARNING !!!", gvk.Version)
|
||||
|
||||
var alphaCfg v1alpha1.ThickCharts
|
||||
if err := yaml.Unmarshal(doc, &alphaCfg); err != nil {
|
||||
return err
|
||||
}
|
||||
var v1Cfg v1.ThickCharts
|
||||
if err := convert.ConvertThickCharts(&alphaCfg, &v1Cfg); err != nil {
|
||||
return err
|
||||
}
|
||||
for _, chObj := range v1Cfg.Spec.Charts {
|
||||
tc, err := tchart.NewThickChart(chObj, &action.ChartPathOptions{
|
||||
RepoURL: chObj.RepoURL,
|
||||
Version: chObj.Version,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := s.AddOCICollection(ctx, tc); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
case "v1":
|
||||
var cfg v1.ThickCharts
|
||||
if err := yaml.Unmarshal(doc, &cfg); err != nil {
|
||||
return err
|
||||
}
|
||||
for _, chObj := range cfg.Spec.Charts {
|
||||
tc, err := tchart.NewThickChart(chObj, &action.ChartPathOptions{
|
||||
RepoURL: chObj.RepoURL,
|
||||
Version: chObj.Version,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := s.AddOCICollection(ctx, tc); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
default:
|
||||
return fmt.Errorf("unsupported version [%s] for kind [%s]... valid versions are [v1 and v1alpha1]", gvk.Version, gvk.Kind)
|
||||
}
|
||||
|
||||
case consts.ImageTxtsContentKind:
|
||||
switch gvk.Version {
|
||||
case "v1alpha1":
|
||||
l.Warnf("!!! DEPRECATION WARNING !!! apiVersion [%s] will be removed in a future release !!! DEPRECATION WARNING !!!", gvk.Version)
|
||||
|
||||
var alphaCfg v1alpha1.ImageTxts
|
||||
if err := yaml.Unmarshal(doc, &alphaCfg); err != nil {
|
||||
return err
|
||||
}
|
||||
var v1Cfg v1.ImageTxts
|
||||
if err := convert.ConvertImageTxts(&alphaCfg, &v1Cfg); err != nil {
|
||||
return err
|
||||
}
|
||||
for _, cfgIt := range v1Cfg.Spec.ImageTxts {
|
||||
it, err := imagetxt.New(cfgIt.Ref,
|
||||
imagetxt.WithIncludeSources(cfgIt.Sources.Include...),
|
||||
imagetxt.WithExcludeSources(cfgIt.Sources.Exclude...),
|
||||
)
|
||||
if err != nil {
|
||||
return fmt.Errorf("convert ImageTxt %s: %v", v1Cfg.Name, err)
|
||||
}
|
||||
if _, err := s.AddOCICollection(ctx, it); err != nil {
|
||||
return fmt.Errorf("add ImageTxt %s to store: %v", v1Cfg.Name, err)
|
||||
}
|
||||
}
|
||||
|
||||
case "v1":
|
||||
var cfg v1.ImageTxts
|
||||
if err := yaml.Unmarshal(doc, &cfg); err != nil {
|
||||
return err
|
||||
}
|
||||
for _, cfgIt := range cfg.Spec.ImageTxts {
|
||||
it, err := imagetxt.New(cfgIt.Ref,
|
||||
imagetxt.WithIncludeSources(cfgIt.Sources.Include...),
|
||||
imagetxt.WithExcludeSources(cfgIt.Sources.Exclude...),
|
||||
)
|
||||
if err != nil {
|
||||
return fmt.Errorf("convert ImageTxt %s: %v", cfg.Name, err)
|
||||
}
|
||||
if _, err := s.AddOCICollection(ctx, it); err != nil {
|
||||
return fmt.Errorf("add ImageTxt %s to store: %v", cfg.Name, err)
|
||||
}
|
||||
}
|
||||
|
||||
default:
|
||||
return fmt.Errorf("unsupported version [%s] for kind [%s]... valid versions are [v1 and v1alpha1]", gvk.Version, gvk.Kind)
|
||||
return fmt.Errorf("unsupported version [%s] for kind [%s]... valid versions are [v1]", gvk.Version, gvk.Kind)
|
||||
}
|
||||
|
||||
default:
|
||||
return fmt.Errorf("unsupported kind [%s]... valid kinds are [Files, Images, Charts, ThickCharts, ImageTxts]", gvk.Kind)
|
||||
return fmt.Errorf("unsupported kind [%s]... valid kinds are [Files, Images, Charts]", gvk.Kind)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
||||
290
cmd/hauler/cli/store/sync_test.go
Normal file
290
cmd/hauler/cli/store/sync_test.go
Normal file
@@ -0,0 +1,290 @@
|
||||
package store
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"hauler.dev/go/hauler/internal/flags"
|
||||
)
|
||||
|
||||
// writeManifestFile writes yamlContent to a temp file, seeks back to the
|
||||
// start, and registers t.Cleanup to close + remove it. Returns the open
|
||||
// *os.File, ready for processContent to read.
|
||||
func writeManifestFile(t *testing.T, yamlContent string) *os.File {
|
||||
t.Helper()
|
||||
fi, err := os.CreateTemp(t.TempDir(), "hauler-manifest-*.yaml")
|
||||
if err != nil {
|
||||
t.Fatalf("writeManifestFile CreateTemp: %v", err)
|
||||
}
|
||||
t.Cleanup(func() { fi.Close() })
|
||||
if _, err := fi.WriteString(yamlContent); err != nil {
|
||||
t.Fatalf("writeManifestFile WriteString: %v", err)
|
||||
}
|
||||
if _, err := fi.Seek(0, io.SeekStart); err != nil {
|
||||
t.Fatalf("writeManifestFile Seek: %v", err)
|
||||
}
|
||||
return fi
|
||||
}
|
||||
|
||||
// newSyncOpts builds a SyncOpts pointing at storeDir.
|
||||
func newSyncOpts(storeDir string) *flags.SyncOpts {
|
||||
return &flags.SyncOpts{
|
||||
StoreRootOpts: defaultRootOpts(storeDir),
|
||||
}
|
||||
}
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
// processContent tests
|
||||
// --------------------------------------------------------------------------
|
||||
|
||||
func TestProcessContent_Files_v1(t *testing.T) {
|
||||
ctx := newTestContext(t)
|
||||
s := newTestStore(t)
|
||||
|
||||
fileURL := seedFileInHTTPServer(t, "synced.sh", "#!/bin/sh\necho hello")
|
||||
|
||||
manifest := fmt.Sprintf(`apiVersion: content.hauler.cattle.io/v1
|
||||
kind: Files
|
||||
metadata:
|
||||
name: test-files
|
||||
spec:
|
||||
files:
|
||||
- path: %s
|
||||
`, fileURL)
|
||||
|
||||
fi := writeManifestFile(t, manifest)
|
||||
o := newSyncOpts(s.Root)
|
||||
ro := defaultCliOpts()
|
||||
|
||||
if err := processContent(ctx, fi, o, s, o.StoreRootOpts, ro); err != nil {
|
||||
t.Fatalf("processContent Files v1: %v", err)
|
||||
}
|
||||
assertArtifactInStore(t, s, "synced.sh")
|
||||
}
|
||||
|
||||
func TestProcessContent_Charts_v1(t *testing.T) {
|
||||
ctx := newTestContext(t)
|
||||
s := newTestStore(t)
|
||||
|
||||
// Use the same relative path as add_test.go: url.ParseRequestURI accepts
|
||||
// absolute Unix paths, making isUrl() return true for them. A relative
|
||||
// path correctly keeps isUrl() false so Helm sees it as a local directory.
|
||||
manifest := fmt.Sprintf(`apiVersion: content.hauler.cattle.io/v1
|
||||
kind: Charts
|
||||
metadata:
|
||||
name: test-charts
|
||||
spec:
|
||||
charts:
|
||||
- name: rancher-cluster-templates-0.5.2.tgz
|
||||
repoURL: %s
|
||||
`, chartTestdataDir)
|
||||
|
||||
fi := writeManifestFile(t, manifest)
|
||||
o := newSyncOpts(s.Root)
|
||||
ro := defaultCliOpts()
|
||||
|
||||
if err := processContent(ctx, fi, o, s, o.StoreRootOpts, ro); err != nil {
|
||||
t.Fatalf("processContent Charts v1: %v", err)
|
||||
}
|
||||
assertArtifactInStore(t, s, "rancher-cluster-templates")
|
||||
}
|
||||
|
||||
func TestProcessContent_Images_v1(t *testing.T) {
|
||||
ctx := newTestContext(t)
|
||||
s := newTestStore(t)
|
||||
|
||||
host, _ := newLocalhostRegistry(t)
|
||||
seedImage(t, host, "myorg/myimage", "v1") // transport not needed; AddImage reads via localhost scheme
|
||||
|
||||
manifest := fmt.Sprintf(`apiVersion: content.hauler.cattle.io/v1
|
||||
kind: Images
|
||||
metadata:
|
||||
name: test-images
|
||||
spec:
|
||||
images:
|
||||
- name: %s/myorg/myimage:v1
|
||||
`, host)
|
||||
|
||||
fi := writeManifestFile(t, manifest)
|
||||
o := newSyncOpts(s.Root)
|
||||
ro := defaultCliOpts()
|
||||
|
||||
if err := processContent(ctx, fi, o, s, o.StoreRootOpts, ro); err != nil {
|
||||
t.Fatalf("processContent Images v1: %v", err)
|
||||
}
|
||||
assertArtifactInStore(t, s, "myorg/myimage")
|
||||
}
|
||||
|
||||
func TestProcessContent_UnsupportedKind(t *testing.T) {
|
||||
ctx := newTestContext(t)
|
||||
s := newTestStore(t)
|
||||
|
||||
// A valid apiVersion with an unsupported kind passes content.Load but hits
|
||||
// the default branch of the kind switch, returning an error.
|
||||
manifest := `apiVersion: content.hauler.cattle.io/v1
|
||||
kind: Unknown
|
||||
metadata:
|
||||
name: test
|
||||
`
|
||||
|
||||
fi := writeManifestFile(t, manifest)
|
||||
o := newSyncOpts(s.Root)
|
||||
ro := defaultCliOpts()
|
||||
|
||||
if err := processContent(ctx, fi, o, s, o.StoreRootOpts, ro); err == nil {
|
||||
t.Fatal("expected error for unsupported kind, got nil")
|
||||
}
|
||||
}
|
||||
|
||||
func TestProcessContent_UnsupportedVersion(t *testing.T) {
|
||||
ctx := newTestContext(t)
|
||||
s := newTestStore(t)
|
||||
|
||||
// An unrecognized apiVersion causes content.Load to return an error, which
|
||||
// processContent treats as a warn-and-skip — the function returns nil and
|
||||
// no artifact is added to the store.
|
||||
manifest := `apiVersion: content.hauler.cattle.io/v2
|
||||
kind: Files
|
||||
metadata:
|
||||
name: test
|
||||
spec:
|
||||
files:
|
||||
- path: /dev/null
|
||||
`
|
||||
|
||||
fi := writeManifestFile(t, manifest)
|
||||
o := newSyncOpts(s.Root)
|
||||
ro := defaultCliOpts()
|
||||
|
||||
if err := processContent(ctx, fi, o, s, o.StoreRootOpts, ro); err != nil {
|
||||
t.Fatalf("expected nil for unrecognized apiVersion (warn-and-skip), got: %v", err)
|
||||
}
|
||||
if n := countArtifactsInStore(t, s); n != 0 {
|
||||
t.Errorf("expected 0 artifacts after skipped document, got %d", n)
|
||||
}
|
||||
}
|
||||
|
||||
func TestProcessContent_MultiDoc(t *testing.T) {
|
||||
ctx := newTestContext(t)
|
||||
s := newTestStore(t)
|
||||
|
||||
fileURL := seedFileInHTTPServer(t, "multi.sh", "#!/bin/sh\necho multi")
|
||||
host, _ := newLocalhostRegistry(t)
|
||||
seedImage(t, host, "myorg/multiimage", "v1")
|
||||
|
||||
manifest := fmt.Sprintf(`apiVersion: content.hauler.cattle.io/v1
|
||||
kind: Files
|
||||
metadata:
|
||||
name: test-files
|
||||
spec:
|
||||
files:
|
||||
- path: %s
|
||||
---
|
||||
apiVersion: content.hauler.cattle.io/v1
|
||||
kind: Charts
|
||||
metadata:
|
||||
name: test-charts
|
||||
spec:
|
||||
charts:
|
||||
- name: rancher-cluster-templates-0.5.2.tgz
|
||||
repoURL: %s
|
||||
---
|
||||
apiVersion: content.hauler.cattle.io/v1
|
||||
kind: Images
|
||||
metadata:
|
||||
name: test-images
|
||||
spec:
|
||||
images:
|
||||
- name: %s/myorg/multiimage:v1
|
||||
`, fileURL, chartTestdataDir, host)
|
||||
|
||||
fi := writeManifestFile(t, manifest)
|
||||
o := newSyncOpts(s.Root)
|
||||
ro := defaultCliOpts()
|
||||
|
||||
if err := processContent(ctx, fi, o, s, o.StoreRootOpts, ro); err != nil {
|
||||
t.Fatalf("processContent MultiDoc: %v", err)
|
||||
}
|
||||
assertArtifactInStore(t, s, "multi.sh")
|
||||
assertArtifactInStore(t, s, "rancher-cluster-templates")
|
||||
assertArtifactInStore(t, s, "myorg/multiimage")
|
||||
}
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
// SyncCmd integration tests
|
||||
// --------------------------------------------------------------------------
|
||||
|
||||
func TestSyncCmd_LocalFile(t *testing.T) {
|
||||
ctx := newTestContext(t)
|
||||
s := newTestStore(t)
|
||||
|
||||
fileURL := seedFileInHTTPServer(t, "synced-local.sh", "#!/bin/sh\necho local")
|
||||
|
||||
manifest := fmt.Sprintf(`apiVersion: content.hauler.cattle.io/v1
|
||||
kind: Files
|
||||
metadata:
|
||||
name: test-sync-local
|
||||
spec:
|
||||
files:
|
||||
- path: %s
|
||||
`, fileURL)
|
||||
|
||||
// SyncCmd reads by file path, so write and close the manifest file first.
|
||||
manifestFile, err := os.CreateTemp(t.TempDir(), "hauler-sync-local-*.yaml")
|
||||
if err != nil {
|
||||
t.Fatalf("CreateTemp: %v", err)
|
||||
}
|
||||
manifestPath := manifestFile.Name()
|
||||
if _, err := manifestFile.WriteString(manifest); err != nil {
|
||||
manifestFile.Close()
|
||||
t.Fatalf("WriteString: %v", err)
|
||||
}
|
||||
manifestFile.Close()
|
||||
|
||||
o := newSyncOpts(s.Root)
|
||||
o.FileName = []string{manifestPath}
|
||||
rso := defaultRootOpts(s.Root)
|
||||
ro := defaultCliOpts()
|
||||
|
||||
if err := SyncCmd(ctx, o, s, rso, ro); err != nil {
|
||||
t.Fatalf("SyncCmd LocalFile: %v", err)
|
||||
}
|
||||
assertArtifactInStore(t, s, "synced-local.sh")
|
||||
}
|
||||
|
||||
func TestSyncCmd_RemoteManifest(t *testing.T) {
|
||||
ctx := newTestContext(t)
|
||||
s := newTestStore(t)
|
||||
|
||||
fileURL := seedFileInHTTPServer(t, "synced-remote.sh", "#!/bin/sh\necho remote")
|
||||
|
||||
manifest := fmt.Sprintf(`apiVersion: content.hauler.cattle.io/v1
|
||||
kind: Files
|
||||
metadata:
|
||||
name: test-sync-remote
|
||||
spec:
|
||||
files:
|
||||
- path: %s
|
||||
`, fileURL)
|
||||
|
||||
// Serve the manifest itself over HTTP so SyncCmd's remote-download path is exercised.
|
||||
manifestSrv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/yaml")
|
||||
io.WriteString(w, manifest) //nolint:errcheck
|
||||
}))
|
||||
t.Cleanup(manifestSrv.Close)
|
||||
|
||||
o := newSyncOpts(s.Root)
|
||||
o.FileName = []string{manifestSrv.URL + "/manifest.yaml"}
|
||||
rso := defaultRootOpts(s.Root)
|
||||
ro := defaultCliOpts()
|
||||
|
||||
if err := SyncCmd(ctx, o, s, rso, ro); err != nil {
|
||||
t.Fatalf("SyncCmd RemoteManifest: %v", err)
|
||||
}
|
||||
assertArtifactInStore(t, s, "synced-remote.sh")
|
||||
}
|
||||
302
cmd/hauler/cli/store/testhelpers_test.go
Normal file
302
cmd/hauler/cli/store/testhelpers_test.go
Normal file
@@ -0,0 +1,302 @@
|
||||
package store
|
||||
|
||||
// testhelpers_test.go provides shared test helpers for cmd/hauler/cli/store tests.
|
||||
//
|
||||
// This file is in-package (package store) so tests can call unexported
|
||||
// helpers like storeImage, storeFile, rewriteReference, etc.
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/google/go-containerregistry/pkg/name"
|
||||
"github.com/google/go-containerregistry/pkg/registry"
|
||||
gcrv1 "github.com/google/go-containerregistry/pkg/v1"
|
||||
"github.com/google/go-containerregistry/pkg/v1/empty"
|
||||
"github.com/google/go-containerregistry/pkg/v1/mutate"
|
||||
"github.com/google/go-containerregistry/pkg/v1/random"
|
||||
"github.com/google/go-containerregistry/pkg/v1/remote"
|
||||
"github.com/google/go-containerregistry/pkg/v1/static"
|
||||
gvtypes "github.com/google/go-containerregistry/pkg/v1/types"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/rs/zerolog"
|
||||
"helm.sh/helm/v3/pkg/action"
|
||||
|
||||
"hauler.dev/go/hauler/internal/flags"
|
||||
"hauler.dev/go/hauler/pkg/consts"
|
||||
"hauler.dev/go/hauler/pkg/store"
|
||||
)
|
||||
|
||||
// newTestStore creates a fresh store in a temp directory. Fatal on error.
|
||||
func newTestStore(t *testing.T) *store.Layout {
|
||||
t.Helper()
|
||||
s, err := store.NewLayout(t.TempDir())
|
||||
if err != nil {
|
||||
t.Fatalf("newTestStore: %v", err)
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// newTestRegistry starts an in-memory OCI registry backed by httptest.
|
||||
// Returns the host (host:port) and remote.Options that route requests through
|
||||
// the server's plain-HTTP transport. The server is shut down via t.Cleanup.
|
||||
//
|
||||
// Pass the returned remoteOpts to seedImage/seedIndex and to store.AddImage
|
||||
// calls so that both sides use the same plain-HTTP transport.
|
||||
func newTestRegistry(t *testing.T) (host string, remoteOpts []remote.Option) {
|
||||
t.Helper()
|
||||
srv := httptest.NewServer(registry.New())
|
||||
t.Cleanup(srv.Close)
|
||||
host = strings.TrimPrefix(srv.URL, "http://")
|
||||
remoteOpts = []remote.Option{remote.WithTransport(srv.Client().Transport)}
|
||||
return host, remoteOpts
|
||||
}
|
||||
|
||||
// seedImage pushes a random single-platform image to the test registry.
|
||||
// repo is a bare path like "myorg/myimage"; tag is the image tag string.
|
||||
// Pass the remoteOpts from newTestRegistry so writes use the correct transport.
|
||||
func seedImage(t *testing.T, host, repo, tag string, opts ...remote.Option) gcrv1.Image {
|
||||
t.Helper()
|
||||
img, err := random.Image(512, 2)
|
||||
if err != nil {
|
||||
t.Fatalf("seedImage random.Image: %v", err)
|
||||
}
|
||||
ref, err := name.NewTag(host+"/"+repo+":"+tag, name.Insecure)
|
||||
if err != nil {
|
||||
t.Fatalf("seedImage name.NewTag: %v", err)
|
||||
}
|
||||
if err := remote.Write(ref, img, opts...); err != nil {
|
||||
t.Fatalf("seedImage remote.Write: %v", err)
|
||||
}
|
||||
return img
|
||||
}
|
||||
|
||||
// seedIndex pushes a 2-platform image index (linux/amd64 + linux/arm64) to
|
||||
// the test registry. Pass the remoteOpts from newTestRegistry.
|
||||
func seedIndex(t *testing.T, host, repo, tag string, opts ...remote.Option) gcrv1.ImageIndex {
|
||||
t.Helper()
|
||||
amd64Img, err := random.Image(512, 2)
|
||||
if err != nil {
|
||||
t.Fatalf("seedIndex random.Image amd64: %v", err)
|
||||
}
|
||||
arm64Img, err := random.Image(512, 2)
|
||||
if err != nil {
|
||||
t.Fatalf("seedIndex random.Image arm64: %v", err)
|
||||
}
|
||||
idx := mutate.AppendManifests(
|
||||
empty.Index,
|
||||
mutate.IndexAddendum{
|
||||
Add: amd64Img,
|
||||
Descriptor: gcrv1.Descriptor{
|
||||
MediaType: gvtypes.OCIManifestSchema1,
|
||||
Platform: &gcrv1.Platform{OS: "linux", Architecture: "amd64"},
|
||||
},
|
||||
},
|
||||
mutate.IndexAddendum{
|
||||
Add: arm64Img,
|
||||
Descriptor: gcrv1.Descriptor{
|
||||
MediaType: gvtypes.OCIManifestSchema1,
|
||||
Platform: &gcrv1.Platform{OS: "linux", Architecture: "arm64"},
|
||||
},
|
||||
},
|
||||
)
|
||||
ref, err := name.NewTag(host+"/"+repo+":"+tag, name.Insecure)
|
||||
if err != nil {
|
||||
t.Fatalf("seedIndex name.NewTag: %v", err)
|
||||
}
|
||||
if err := remote.WriteIndex(ref, idx, opts...); err != nil {
|
||||
t.Fatalf("seedIndex remote.WriteIndex: %v", err)
|
||||
}
|
||||
return idx
|
||||
}
|
||||
|
||||
// seedFileInHTTPServer starts an httptest server serving a single file at
|
||||
// /filename with the given content. Returns the full URL. Server closed via t.Cleanup.
|
||||
func seedFileInHTTPServer(t *testing.T, filename, content string) string {
|
||||
t.Helper()
|
||||
mux := http.NewServeMux()
|
||||
mux.HandleFunc("/"+filename, func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/octet-stream")
|
||||
io.WriteString(w, content) //nolint:errcheck
|
||||
})
|
||||
srv := httptest.NewServer(mux)
|
||||
t.Cleanup(srv.Close)
|
||||
return srv.URL + "/" + filename
|
||||
}
|
||||
|
||||
// defaultRootOpts returns a StoreRootOpts pointed at storeDir with Retries=1.
|
||||
// Using Retries=1 avoids the 5-second RetriesInterval sleep in failure tests.
|
||||
func defaultRootOpts(storeDir string) *flags.StoreRootOpts {
|
||||
return &flags.StoreRootOpts{
|
||||
StoreDir: storeDir,
|
||||
Retries: 1,
|
||||
}
|
||||
}
|
||||
|
||||
// defaultCliOpts returns CliRootOpts with error-level logging and IgnoreErrors=false.
|
||||
func defaultCliOpts() *flags.CliRootOpts {
|
||||
return &flags.CliRootOpts{
|
||||
IgnoreErrors: false,
|
||||
LogLevel: "error",
|
||||
}
|
||||
}
|
||||
|
||||
// newTestContext returns a context with a no-op zerolog logger attached so that
|
||||
// log.FromContext does not emit to stdout/stderr during tests.
|
||||
func newTestContext(t *testing.T) context.Context {
|
||||
t.Helper()
|
||||
zl := zerolog.New(io.Discard)
|
||||
return zl.WithContext(context.Background())
|
||||
}
|
||||
|
||||
// newAddChartOpts builds an AddChartOpts for loading a local .tgz chart from
|
||||
// repoURL (typically a testdata directory path) at the given version string.
|
||||
func newAddChartOpts(repoURL, version string) *flags.AddChartOpts {
|
||||
return &flags.AddChartOpts{
|
||||
ChartOpts: &action.ChartPathOptions{
|
||||
RepoURL: repoURL,
|
||||
Version: version,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// assertArtifactInStore walks the store and fails the test if no descriptor
|
||||
// has an AnnotationRefName containing refSubstring.
|
||||
func assertArtifactInStore(t *testing.T, s *store.Layout, refSubstring string) {
|
||||
t.Helper()
|
||||
found := false
|
||||
if err := s.OCI.Walk(func(_ string, desc ocispec.Descriptor) error {
|
||||
if strings.Contains(desc.Annotations[ocispec.AnnotationRefName], refSubstring) {
|
||||
found = true
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
t.Fatalf("assertArtifactInStore walk: %v", err)
|
||||
}
|
||||
if !found {
|
||||
t.Errorf("no artifact with ref containing %q found in store", refSubstring)
|
||||
}
|
||||
}
|
||||
|
||||
// assertArtifactKindInStore walks the store and fails if no descriptor has an
|
||||
// AnnotationRefName containing refSubstring AND KindAnnotationName equal to kind.
|
||||
func assertArtifactKindInStore(t *testing.T, s *store.Layout, refSubstring, kind string) {
|
||||
t.Helper()
|
||||
found := false
|
||||
if err := s.OCI.Walk(func(_ string, desc ocispec.Descriptor) error {
|
||||
if strings.Contains(desc.Annotations[ocispec.AnnotationRefName], refSubstring) &&
|
||||
desc.Annotations[consts.KindAnnotationName] == kind {
|
||||
found = true
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
t.Fatalf("assertArtifactKindInStore walk: %v", err)
|
||||
}
|
||||
if !found {
|
||||
t.Errorf("no artifact with ref containing %q and kind %q found in store", refSubstring, kind)
|
||||
}
|
||||
}
|
||||
|
||||
// countArtifactsInStore returns the number of descriptors in the store index.
|
||||
func countArtifactsInStore(t *testing.T, s *store.Layout) int {
|
||||
t.Helper()
|
||||
count := 0
|
||||
if err := s.OCI.Walk(func(_ string, _ ocispec.Descriptor) error {
|
||||
count++
|
||||
return nil
|
||||
}); err != nil {
|
||||
t.Fatalf("countArtifactsInStore walk: %v", err)
|
||||
}
|
||||
return count
|
||||
}
|
||||
|
||||
// seedCosignV2Artifacts pushes synthetic cosign v2 signature, attestation, and SBOM
|
||||
// manifests at the sha256-<hex>.sig / .att / .sbom tags derived from baseImg's digest.
|
||||
// Pass the remoteOpts from newLocalhostRegistry or newTestRegistry.
|
||||
func seedCosignV2Artifacts(t *testing.T, host, repo string, baseImg gcrv1.Image, opts ...remote.Option) {
|
||||
t.Helper()
|
||||
hash, err := baseImg.Digest()
|
||||
if err != nil {
|
||||
t.Fatalf("seedCosignV2Artifacts: get digest: %v", err)
|
||||
}
|
||||
tagPrefix := strings.ReplaceAll(hash.String(), ":", "-")
|
||||
for _, suffix := range []string{".sig", ".att", ".sbom"} {
|
||||
img, err := random.Image(64, 1)
|
||||
if err != nil {
|
||||
t.Fatalf("seedCosignV2Artifacts: random.Image (%s): %v", suffix, err)
|
||||
}
|
||||
ref, err := name.NewTag(host+"/"+repo+":"+tagPrefix+suffix, name.Insecure)
|
||||
if err != nil {
|
||||
t.Fatalf("seedCosignV2Artifacts: NewTag (%s): %v", suffix, err)
|
||||
}
|
||||
if err := remote.Write(ref, img, opts...); err != nil {
|
||||
t.Fatalf("seedCosignV2Artifacts: Write (%s): %v", suffix, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// seedOCI11Referrer pushes a synthetic OCI 1.1 / cosign v3 Sigstore bundle manifest
|
||||
// whose subject field points at baseImg. The in-process registry auto-registers it in
|
||||
// the referrers index so remote.Referrers returns it.
|
||||
// Pass the remoteOpts from newLocalhostRegistry or newTestRegistry.
|
||||
func seedOCI11Referrer(t *testing.T, host, repo string, baseImg gcrv1.Image, opts ...remote.Option) {
|
||||
t.Helper()
|
||||
hash, err := baseImg.Digest()
|
||||
if err != nil {
|
||||
t.Fatalf("seedOCI11Referrer: get digest: %v", err)
|
||||
}
|
||||
rawManifest, err := baseImg.RawManifest()
|
||||
if err != nil {
|
||||
t.Fatalf("seedOCI11Referrer: raw manifest: %v", err)
|
||||
}
|
||||
mt, err := baseImg.MediaType()
|
||||
if err != nil {
|
||||
t.Fatalf("seedOCI11Referrer: media type: %v", err)
|
||||
}
|
||||
baseDesc := gcrv1.Descriptor{
|
||||
MediaType: mt,
|
||||
Digest: hash,
|
||||
Size: int64(len(rawManifest)),
|
||||
}
|
||||
|
||||
bundleJSON := []byte(`{"mediaType":"application/vnd.dev.sigstore.bundle.v0.3+json"}`)
|
||||
bundleLayer := static.NewLayer(bundleJSON, gvtypes.MediaType(consts.SigstoreBundleMediaType))
|
||||
referrerImg, err := mutate.AppendLayers(empty.Image, bundleLayer)
|
||||
if err != nil {
|
||||
t.Fatalf("seedOCI11Referrer: AppendLayers: %v", err)
|
||||
}
|
||||
referrerImg = mutate.MediaType(referrerImg, gvtypes.OCIManifestSchema1)
|
||||
referrerImg = mutate.ConfigMediaType(referrerImg, gvtypes.MediaType(consts.OCIEmptyConfigMediaType))
|
||||
referrerImg = mutate.Subject(referrerImg, baseDesc).(gcrv1.Image)
|
||||
|
||||
referrerTag, err := name.NewTag(host+"/"+repo+":bundle-referrer", name.Insecure)
|
||||
if err != nil {
|
||||
t.Fatalf("seedOCI11Referrer: NewTag: %v", err)
|
||||
}
|
||||
if err := remote.Write(referrerTag, referrerImg, opts...); err != nil {
|
||||
t.Fatalf("seedOCI11Referrer: Write: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// assertReferrerInStore walks the store and fails if no descriptor has a kind
|
||||
// annotation with the KindAnnotationReferrers prefix and a ref containing refSubstring.
|
||||
func assertReferrerInStore(t *testing.T, s *store.Layout, refSubstring string) {
|
||||
t.Helper()
|
||||
found := false
|
||||
if err := s.OCI.Walk(func(_ string, desc ocispec.Descriptor) error {
|
||||
if strings.Contains(desc.Annotations[ocispec.AnnotationRefName], refSubstring) &&
|
||||
strings.HasPrefix(desc.Annotations[consts.KindAnnotationName], consts.KindAnnotationReferrers) {
|
||||
found = true
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
t.Fatalf("assertReferrerInStore walk: %v", err)
|
||||
}
|
||||
if !found {
|
||||
t.Errorf("no OCI referrer with ref containing %q found in store", refSubstring)
|
||||
}
|
||||
}
|
||||
409
go.mod
409
go.mod
@@ -1,49 +1,45 @@
|
||||
module hauler.dev/go/hauler
|
||||
|
||||
go 1.23.0
|
||||
go 1.25.5
|
||||
|
||||
toolchain go1.23.5
|
||||
|
||||
replace github.com/sigstore/cosign/v2 => github.com/hauler-dev/cosign/v2 v2.4.2-0.20250126162449-3b34bda542a5
|
||||
replace github.com/distribution/distribution/v3 => github.com/distribution/distribution/v3 v3.0.0-20221208165359-362910506bc2
|
||||
|
||||
require (
|
||||
github.com/common-nighthawk/go-figure v0.0.0-20210622060536-734e95fb86be
|
||||
github.com/containerd/containerd v1.7.27
|
||||
github.com/distribution/distribution/v3 v3.0.0-20221208165359-362910506bc2
|
||||
github.com/google/go-containerregistry v0.20.2
|
||||
github.com/gorilla/handlers v1.5.1
|
||||
github.com/containerd/containerd v1.7.29
|
||||
github.com/containerd/errdefs v1.0.0
|
||||
github.com/distribution/distribution/v3 v3.0.0
|
||||
github.com/google/go-containerregistry v0.20.7
|
||||
github.com/gorilla/handlers v1.5.2
|
||||
github.com/gorilla/mux v1.8.1
|
||||
github.com/mholt/archives v0.1.0
|
||||
github.com/mholt/archives v0.1.5
|
||||
github.com/mitchellh/go-homedir v1.1.0
|
||||
github.com/olekukonko/tablewriter v0.0.5
|
||||
github.com/olekukonko/tablewriter v1.1.2
|
||||
github.com/opencontainers/go-digest v1.0.0
|
||||
github.com/opencontainers/image-spec v1.1.0
|
||||
github.com/opencontainers/image-spec v1.1.1
|
||||
github.com/pkg/errors v0.9.1
|
||||
github.com/rs/zerolog v1.31.0
|
||||
github.com/sigstore/cosign/v2 v2.4.1
|
||||
github.com/sirupsen/logrus v1.9.3
|
||||
github.com/spf13/afero v1.11.0
|
||||
github.com/spf13/cobra v1.8.1
|
||||
golang.org/x/sync v0.11.0
|
||||
github.com/rs/zerolog v1.34.0
|
||||
github.com/sigstore/cosign/v3 v3.0.5
|
||||
github.com/sirupsen/logrus v1.9.4
|
||||
github.com/spf13/afero v1.15.0
|
||||
github.com/spf13/cobra v1.10.2
|
||||
golang.org/x/sync v0.19.0
|
||||
gopkg.in/yaml.v3 v3.0.1
|
||||
helm.sh/helm/v3 v3.17.0
|
||||
k8s.io/apimachinery v0.32.1
|
||||
k8s.io/client-go v0.32.1
|
||||
oras.land/oras-go v1.2.5
|
||||
helm.sh/helm/v3 v3.19.0
|
||||
k8s.io/apimachinery v0.35.1
|
||||
)
|
||||
|
||||
require (
|
||||
cloud.google.com/go/auth v0.9.3 // indirect
|
||||
cloud.google.com/go/auth/oauth2adapt v0.2.4 // indirect
|
||||
cloud.google.com/go/compute/metadata v0.6.0 // indirect
|
||||
cuelabs.dev/go/oci/ociregistry v0.0.0-20240404174027-a39bec0462d2 // indirect
|
||||
cuelang.org/go v0.9.2 // indirect
|
||||
cloud.google.com/go/auth v0.18.1 // indirect
|
||||
cloud.google.com/go/auth/oauth2adapt v0.2.8 // indirect
|
||||
cloud.google.com/go/compute/metadata v0.9.0 // indirect
|
||||
cuelabs.dev/go/oci/ociregistry v0.0.0-20250722084951-074d06050084 // indirect
|
||||
cuelang.org/go v0.15.4 // indirect
|
||||
dario.cat/mergo v1.0.1 // indirect
|
||||
filippo.io/edwards25519 v1.1.0 // indirect
|
||||
github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 // indirect
|
||||
github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider v0.14.0 // indirect
|
||||
github.com/Azure/azure-sdk-for-go v68.0.0+incompatible // indirect
|
||||
github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 // indirect
|
||||
github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c // indirect
|
||||
github.com/Azure/go-autorest v14.2.0+incompatible // indirect
|
||||
github.com/Azure/go-autorest/autorest v0.11.29 // indirect
|
||||
github.com/Azure/go-autorest/autorest/adal v0.9.23 // indirect
|
||||
@@ -52,19 +48,17 @@ require (
|
||||
github.com/Azure/go-autorest/autorest/date v0.3.0 // indirect
|
||||
github.com/Azure/go-autorest/logger v0.2.1 // indirect
|
||||
github.com/Azure/go-autorest/tracing v0.6.0 // indirect
|
||||
github.com/BurntSushi/toml v1.4.0 // indirect
|
||||
github.com/BurntSushi/toml v1.5.0 // indirect
|
||||
github.com/MakeNowJust/heredoc v1.0.0 // indirect
|
||||
github.com/Masterminds/goutils v1.1.1 // indirect
|
||||
github.com/Masterminds/semver/v3 v3.3.0 // indirect
|
||||
github.com/Masterminds/semver/v3 v3.4.0 // indirect
|
||||
github.com/Masterminds/sprig/v3 v3.3.0 // indirect
|
||||
github.com/Masterminds/squirrel v1.5.4 // indirect
|
||||
github.com/Microsoft/go-winio v0.6.2 // indirect
|
||||
github.com/OneOfOne/xxhash v1.2.8 // indirect
|
||||
github.com/ProtonMail/go-crypto v0.0.0-20230923063757-afb1ddc0824c // indirect
|
||||
github.com/STARRY-S/zip v0.2.1 // indirect
|
||||
github.com/STARRY-S/zip v0.2.3 // indirect
|
||||
github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d // indirect
|
||||
github.com/ThalesIgnite/crypto11 v1.2.5 // indirect
|
||||
github.com/agnivade/levenshtein v1.1.1 // indirect
|
||||
github.com/agnivade/levenshtein v1.2.1 // indirect
|
||||
github.com/alibabacloud-go/alibabacloud-gateway-spi v0.0.4 // indirect
|
||||
github.com/alibabacloud-go/cr-20160607 v1.0.1 // indirect
|
||||
github.com/alibabacloud-go/cr-20181201 v1.0.10 // indirect
|
||||
@@ -76,265 +70,282 @@ require (
|
||||
github.com/alibabacloud-go/tea-utils v1.4.5 // indirect
|
||||
github.com/alibabacloud-go/tea-xml v1.1.3 // indirect
|
||||
github.com/aliyun/credentials-go v1.3.2 // indirect
|
||||
github.com/andybalholm/brotli v1.1.1 // indirect
|
||||
github.com/andybalholm/brotli v1.2.0 // indirect
|
||||
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect
|
||||
github.com/aws/aws-sdk-go-v2 v1.30.5 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/config v1.27.33 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.17.32 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.13 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.17 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.17 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/ecr v1.20.2 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/ecrpublic v1.18.2 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.4 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.19 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.22.7 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.26.7 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.30.7 // indirect
|
||||
github.com/aws/smithy-go v1.20.4 // indirect
|
||||
github.com/awslabs/amazon-ecr-credential-helper/ecr-login v0.0.0-20231024185945-8841054dbdb8 // indirect
|
||||
github.com/aws/aws-sdk-go-v2 v1.41.1 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/config v1.32.7 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.19.7 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.17 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.17 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.17 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/ecr v1.51.2 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/ecrpublic v1.38.2 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.4 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.17 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/signin v1.0.5 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.30.9 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.13 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.41.6 // indirect
|
||||
github.com/aws/smithy-go v1.24.0 // indirect
|
||||
github.com/awslabs/amazon-ecr-credential-helper/ecr-login v0.11.0 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/blang/semver v3.5.1+incompatible // indirect
|
||||
github.com/blang/semver/v4 v4.0.0 // indirect
|
||||
github.com/bodgit/plumbing v1.3.0 // indirect
|
||||
github.com/bodgit/sevenzip v1.6.0 // indirect
|
||||
github.com/bodgit/sevenzip v1.6.1 // indirect
|
||||
github.com/bodgit/windows v1.0.1 // indirect
|
||||
github.com/bshuster-repo/logrus-logstash-hook v1.0.0 // indirect
|
||||
github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd // indirect
|
||||
github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b // indirect
|
||||
github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0 // indirect
|
||||
github.com/buildkite/agent/v3 v3.81.0 // indirect
|
||||
github.com/buildkite/go-pipeline v0.13.1 // indirect
|
||||
github.com/buildkite/interpolate v0.1.3 // indirect
|
||||
github.com/buildkite/roko v1.2.0 // indirect
|
||||
github.com/buildkite/agent/v3 v3.115.4 // indirect
|
||||
github.com/buildkite/go-pipeline v0.16.0 // indirect
|
||||
github.com/buildkite/interpolate v0.1.5 // indirect
|
||||
github.com/buildkite/roko v1.4.0 // indirect
|
||||
github.com/cenkalti/backoff/v5 v5.0.3 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.3.0 // indirect
|
||||
github.com/chai2010/gettext-go v1.0.2 // indirect
|
||||
github.com/chrismellard/docker-credential-acr-env v0.0.0-20230304212654-82a0ddb27589 // indirect
|
||||
github.com/chzyer/readline v1.5.1 // indirect
|
||||
github.com/clbanning/mxj/v2 v2.7.0 // indirect
|
||||
github.com/cloudflare/circl v1.3.7 // indirect
|
||||
github.com/clipperhouse/displaywidth v0.6.0 // indirect
|
||||
github.com/clipperhouse/stringish v0.1.1 // indirect
|
||||
github.com/clipperhouse/uax29/v2 v2.3.0 // indirect
|
||||
github.com/cockroachdb/apd/v3 v3.2.1 // indirect
|
||||
github.com/containerd/errdefs v0.3.0 // indirect
|
||||
github.com/containerd/log v0.1.0 // indirect
|
||||
github.com/containerd/platforms v0.2.1 // indirect
|
||||
github.com/containerd/stargz-snapshotter/estargz v0.16.3 // indirect
|
||||
github.com/coreos/go-oidc/v3 v3.11.0 // indirect
|
||||
github.com/cyberphone/json-canonicalization v0.0.0-20231011164504-785e29786b46 // indirect
|
||||
github.com/cyphar/filepath-securejoin v0.3.6 // indirect
|
||||
github.com/containerd/platforms v1.0.0-rc.2 // indirect
|
||||
github.com/containerd/stargz-snapshotter/estargz v0.18.1 // indirect
|
||||
github.com/coreos/go-oidc/v3 v3.17.0 // indirect
|
||||
github.com/cyberphone/json-canonicalization v0.0.0-20241213102144-19d51d7fe467 // indirect
|
||||
github.com/cyphar/filepath-securejoin v0.4.1 // indirect
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
|
||||
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0 // indirect
|
||||
github.com/digitorus/pkcs7 v0.0.0-20230818184609-3a137a874352 // indirect
|
||||
github.com/digitorus/timestamp v0.0.0-20231217203849-220c5c2851b7 // indirect
|
||||
github.com/dimchansky/utfbom v1.1.1 // indirect
|
||||
github.com/distribution/reference v0.6.0 // indirect
|
||||
github.com/docker/cli v27.5.0+incompatible // indirect
|
||||
github.com/docker/cli v29.2.0+incompatible // indirect
|
||||
github.com/docker/distribution v2.8.3+incompatible // indirect
|
||||
github.com/docker/docker v27.5.0+incompatible // indirect
|
||||
github.com/docker/docker-credential-helpers v0.8.2 // indirect
|
||||
github.com/docker/go-connections v0.5.0 // indirect
|
||||
github.com/docker/docker-credential-helpers v0.9.4 // indirect
|
||||
github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c // indirect
|
||||
github.com/docker/go-metrics v0.0.1 // indirect
|
||||
github.com/docker/libtrust v0.0.0-20150114040149-fa567046d9b1 // indirect
|
||||
github.com/dsnet/compress v0.0.2-0.20230904184137-39efe44ab707 // indirect
|
||||
github.com/dustin/go-humanize v1.0.1 // indirect
|
||||
github.com/emicklei/go-restful/v3 v3.11.0 // indirect
|
||||
github.com/emicklei/proto v1.12.1 // indirect
|
||||
github.com/evanphx/json-patch v5.9.0+incompatible // indirect
|
||||
github.com/emicklei/go-restful/v3 v3.13.0 // indirect
|
||||
github.com/emicklei/proto v1.14.2 // indirect
|
||||
github.com/evanphx/json-patch v5.9.11+incompatible // indirect
|
||||
github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f // indirect
|
||||
github.com/fatih/color v1.16.0 // indirect
|
||||
github.com/fatih/color v1.18.0 // indirect
|
||||
github.com/felixge/httpsnoop v1.0.4 // indirect
|
||||
github.com/fsnotify/fsnotify v1.7.0 // indirect
|
||||
github.com/fxamacker/cbor/v2 v2.7.0 // indirect
|
||||
github.com/go-chi/chi v4.1.2+incompatible // indirect
|
||||
github.com/fsnotify/fsnotify v1.9.0 // indirect
|
||||
github.com/fxamacker/cbor/v2 v2.9.0 // indirect
|
||||
github.com/go-chi/chi/v5 v5.2.4 // indirect
|
||||
github.com/go-errors/errors v1.4.2 // indirect
|
||||
github.com/go-gorp/gorp/v3 v3.1.0 // indirect
|
||||
github.com/go-ini/ini v1.67.0 // indirect
|
||||
github.com/go-jose/go-jose/v3 v3.0.4 // indirect
|
||||
github.com/go-jose/go-jose/v4 v4.0.5 // indirect
|
||||
github.com/go-logr/logr v1.4.2 // indirect
|
||||
github.com/go-jose/go-jose/v4 v4.1.3 // indirect
|
||||
github.com/go-logr/logr v1.4.3 // indirect
|
||||
github.com/go-logr/stdr v1.2.2 // indirect
|
||||
github.com/go-openapi/analysis v0.23.0 // indirect
|
||||
github.com/go-openapi/errors v0.22.0 // indirect
|
||||
github.com/go-openapi/jsonpointer v0.21.0 // indirect
|
||||
github.com/go-openapi/jsonreference v0.21.0 // indirect
|
||||
github.com/go-openapi/loads v0.22.0 // indirect
|
||||
github.com/go-openapi/runtime v0.28.0 // indirect
|
||||
github.com/go-openapi/spec v0.21.0 // indirect
|
||||
github.com/go-openapi/strfmt v0.23.0 // indirect
|
||||
github.com/go-openapi/swag v0.23.0 // indirect
|
||||
github.com/go-openapi/validate v0.24.0 // indirect
|
||||
github.com/go-piv/piv-go v1.11.0 // indirect
|
||||
github.com/go-openapi/analysis v0.24.1 // indirect
|
||||
github.com/go-openapi/errors v0.22.6 // indirect
|
||||
github.com/go-openapi/jsonpointer v0.22.4 // indirect
|
||||
github.com/go-openapi/jsonreference v0.21.4 // indirect
|
||||
github.com/go-openapi/loads v0.23.2 // indirect
|
||||
github.com/go-openapi/runtime v0.29.2 // indirect
|
||||
github.com/go-openapi/spec v0.22.3 // indirect
|
||||
github.com/go-openapi/strfmt v0.25.0 // indirect
|
||||
github.com/go-openapi/swag v0.25.4 // indirect
|
||||
github.com/go-openapi/swag/cmdutils v0.25.4 // indirect
|
||||
github.com/go-openapi/swag/conv v0.25.4 // indirect
|
||||
github.com/go-openapi/swag/fileutils v0.25.4 // indirect
|
||||
github.com/go-openapi/swag/jsonname v0.25.4 // indirect
|
||||
github.com/go-openapi/swag/jsonutils v0.25.4 // indirect
|
||||
github.com/go-openapi/swag/loading v0.25.4 // indirect
|
||||
github.com/go-openapi/swag/mangling v0.25.4 // indirect
|
||||
github.com/go-openapi/swag/netutils v0.25.4 // indirect
|
||||
github.com/go-openapi/swag/stringutils v0.25.4 // indirect
|
||||
github.com/go-openapi/swag/typeutils v0.25.4 // indirect
|
||||
github.com/go-openapi/swag/yamlutils v0.25.4 // indirect
|
||||
github.com/go-openapi/validate v0.25.1 // indirect
|
||||
github.com/go-piv/piv-go/v2 v2.4.0 // indirect
|
||||
github.com/go-viper/mapstructure/v2 v2.4.0 // indirect
|
||||
github.com/gobwas/glob v0.2.3 // indirect
|
||||
github.com/goccy/go-json v0.10.5 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang-jwt/jwt/v4 v4.5.2 // indirect
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
||||
github.com/golang/protobuf v1.5.4 // indirect
|
||||
github.com/golang/snappy v0.0.4 // indirect
|
||||
github.com/gomodule/redigo v1.8.2 // indirect
|
||||
github.com/google/btree v1.1.2 // indirect
|
||||
github.com/google/certificate-transparency-go v1.2.1 // indirect
|
||||
github.com/google/gnostic-models v0.6.9-0.20230804172637-c7be7c783f49 // indirect
|
||||
github.com/google/go-cmp v0.6.0 // indirect
|
||||
github.com/google/go-github/v55 v55.0.0 // indirect
|
||||
github.com/google/go-querystring v1.1.0 // indirect
|
||||
github.com/google/gofuzz v1.2.0 // indirect
|
||||
github.com/google/s2a-go v0.1.8 // indirect
|
||||
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect
|
||||
github.com/google/btree v1.1.3 // indirect
|
||||
github.com/google/certificate-transparency-go v1.3.2 // indirect
|
||||
github.com/google/gnostic-models v0.7.0 // indirect
|
||||
github.com/google/go-cmp v0.7.0 // indirect
|
||||
github.com/google/go-github/v73 v73.0.0 // indirect
|
||||
github.com/google/go-querystring v1.2.0 // indirect
|
||||
github.com/google/s2a-go v0.1.9 // indirect
|
||||
github.com/google/uuid v1.6.0 // indirect
|
||||
github.com/googleapis/enterprise-certificate-proxy v0.3.3 // indirect
|
||||
github.com/gorilla/websocket v1.5.0 // indirect
|
||||
github.com/googleapis/enterprise-certificate-proxy v0.3.11 // indirect
|
||||
github.com/googleapis/gax-go/v2 v2.17.0 // indirect
|
||||
github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 // indirect
|
||||
github.com/gosuri/uitable v0.0.4 // indirect
|
||||
github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 // indirect
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.5 // indirect
|
||||
github.com/hashicorp/errwrap v1.1.0 // indirect
|
||||
github.com/hashicorp/go-cleanhttp v0.5.2 // indirect
|
||||
github.com/hashicorp/go-multierror v1.1.1 // indirect
|
||||
github.com/hashicorp/go-retryablehttp v0.7.7 // indirect
|
||||
github.com/hashicorp/go-retryablehttp v0.7.8 // indirect
|
||||
github.com/hashicorp/golang-lru v1.0.2 // indirect
|
||||
github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect
|
||||
github.com/hashicorp/hcl v1.0.1-vault-5 // indirect
|
||||
github.com/huandu/xstrings v1.5.0 // indirect
|
||||
github.com/in-toto/attestation v1.1.0 // indirect
|
||||
github.com/in-toto/attestation v1.1.2 // indirect
|
||||
github.com/in-toto/in-toto-golang v0.9.0 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||
github.com/jedisct1/go-minisign v0.0.0-20230811132847-661be99b8267 // indirect
|
||||
github.com/jmespath/go-jmespath v0.4.0 // indirect
|
||||
github.com/jmoiron/sqlx v1.4.0 // indirect
|
||||
github.com/josharian/intern v1.0.0 // indirect
|
||||
github.com/json-iterator/go v1.1.12 // indirect
|
||||
github.com/klauspost/compress v1.17.11 // indirect
|
||||
github.com/klauspost/compress v1.18.2 // indirect
|
||||
github.com/klauspost/pgzip v1.2.6 // indirect
|
||||
github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 // indirect
|
||||
github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 // indirect
|
||||
github.com/letsencrypt/boulder v0.0.0-20240620165639-de9c06129bec // indirect
|
||||
github.com/lestrrat-go/blackmagic v1.0.4 // indirect
|
||||
github.com/lestrrat-go/dsig v1.0.0 // indirect
|
||||
github.com/lestrrat-go/dsig-secp256k1 v1.0.0 // indirect
|
||||
github.com/lestrrat-go/httpcc v1.0.1 // indirect
|
||||
github.com/lestrrat-go/httprc/v3 v3.0.1 // indirect
|
||||
github.com/lestrrat-go/jwx/v3 v3.0.12 // indirect
|
||||
github.com/lestrrat-go/option v1.0.1 // indirect
|
||||
github.com/lestrrat-go/option/v2 v2.0.0 // indirect
|
||||
github.com/letsencrypt/boulder v0.20251110.0 // indirect
|
||||
github.com/lib/pq v1.10.9 // indirect
|
||||
github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect
|
||||
github.com/magiconair/properties v1.8.7 // indirect
|
||||
github.com/mailru/easyjson v0.7.7 // indirect
|
||||
github.com/manifoldco/promptui v0.9.0 // indirect
|
||||
github.com/mattn/go-colorable v0.1.13 // indirect
|
||||
github.com/mattn/go-colorable v0.1.14 // indirect
|
||||
github.com/mattn/go-isatty v0.0.20 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.13 // indirect
|
||||
github.com/miekg/pkcs11 v1.1.1 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.19 // indirect
|
||||
github.com/miekg/pkcs11 v1.1.2 // indirect
|
||||
github.com/mikelolasagasti/xz v1.0.1 // indirect
|
||||
github.com/minio/minlz v1.0.1 // indirect
|
||||
github.com/mitchellh/copystructure v1.2.0 // indirect
|
||||
github.com/mitchellh/go-wordwrap v1.0.1 // indirect
|
||||
github.com/mitchellh/mapstructure v1.5.0 // indirect
|
||||
github.com/mitchellh/reflectwalk v1.0.2 // indirect
|
||||
github.com/moby/locker v1.0.1 // indirect
|
||||
github.com/moby/spdystream v0.5.0 // indirect
|
||||
github.com/moby/term v0.5.0 // indirect
|
||||
github.com/moby/term v0.5.2 // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||
github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee // indirect
|
||||
github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 // indirect
|
||||
github.com/mozillazg/docker-credential-acr-helper v0.4.0 // indirect
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
||||
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect
|
||||
github.com/nozzle/throttler v0.0.0-20180817012639-2ea982251481 // indirect
|
||||
github.com/nwaples/rardecode/v2 v2.0.0-beta.4.0.20241112120701-034e449c6e78 // indirect
|
||||
github.com/nwaples/rardecode/v2 v2.2.1 // indirect
|
||||
github.com/oklog/ulid v1.3.1 // indirect
|
||||
github.com/oleiade/reflections v1.1.0 // indirect
|
||||
github.com/open-policy-agent/opa v0.68.0 // indirect
|
||||
github.com/opentracing/opentracing-go v1.2.0 // indirect
|
||||
github.com/olekukonko/cat v0.0.0-20250911104152-50322a0618f6 // indirect
|
||||
github.com/olekukonko/errors v1.1.0 // indirect
|
||||
github.com/olekukonko/ll v0.1.3 // indirect
|
||||
github.com/open-policy-agent/opa v1.12.3 // indirect
|
||||
github.com/pborman/uuid v1.2.1 // indirect
|
||||
github.com/pelletier/go-toml/v2 v2.2.2 // indirect
|
||||
github.com/pelletier/go-toml/v2 v2.2.4 // indirect
|
||||
github.com/peterbourgon/diskv v2.0.1+incompatible // indirect
|
||||
github.com/pierrec/lz4/v4 v4.1.21 // indirect
|
||||
github.com/prometheus/client_golang v1.20.2 // indirect
|
||||
github.com/prometheus/client_model v0.6.1 // indirect
|
||||
github.com/prometheus/common v0.55.0 // indirect
|
||||
github.com/prometheus/procfs v0.15.1 // indirect
|
||||
github.com/protocolbuffers/txtpbfmt v0.0.0-20231025115547-084445ff1adf // indirect
|
||||
github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 // indirect
|
||||
github.com/rivo/uniseg v0.4.4 // indirect
|
||||
github.com/rogpeppe/go-internal v1.13.1 // indirect
|
||||
github.com/rubenv/sql-migrate v1.7.1 // indirect
|
||||
github.com/pierrec/lz4/v4 v4.1.22 // indirect
|
||||
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
|
||||
github.com/prometheus/client_golang v1.23.2 // indirect
|
||||
github.com/prometheus/client_model v0.6.2 // indirect
|
||||
github.com/prometheus/common v0.67.5 // indirect
|
||||
github.com/prometheus/procfs v0.19.2 // indirect
|
||||
github.com/protocolbuffers/txtpbfmt v0.0.0-20251016062345-16587c79cd91 // indirect
|
||||
github.com/rcrowley/go-metrics v0.0.0-20250401214520-65e299d6c5c9 // indirect
|
||||
github.com/rogpeppe/go-internal v1.14.1 // indirect
|
||||
github.com/rubenv/sql-migrate v1.8.0 // indirect
|
||||
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
||||
github.com/sagikazarmark/locafero v0.4.0 // indirect
|
||||
github.com/sagikazarmark/slog-shim v0.1.0 // indirect
|
||||
github.com/sagikazarmark/locafero v0.11.0 // indirect
|
||||
github.com/santhosh-tekuri/jsonschema/v6 v6.0.2 // indirect
|
||||
github.com/sassoftware/relic v7.2.1+incompatible // indirect
|
||||
github.com/secure-systems-lab/go-securesystemslib v0.8.0 // indirect
|
||||
github.com/segmentio/ksuid v1.0.4 // indirect
|
||||
github.com/secure-systems-lab/go-securesystemslib v0.10.0 // indirect
|
||||
github.com/segmentio/asm v1.2.1 // indirect
|
||||
github.com/shibumi/go-pathspec v1.3.0 // indirect
|
||||
github.com/shopspring/decimal v1.4.0 // indirect
|
||||
github.com/sigstore/fulcio v1.6.3 // indirect
|
||||
github.com/sigstore/protobuf-specs v0.3.2 // indirect
|
||||
github.com/sigstore/rekor v1.3.6 // indirect
|
||||
github.com/sigstore/sigstore v1.8.9 // indirect
|
||||
github.com/sigstore/sigstore-go v0.6.1 // indirect
|
||||
github.com/sigstore/timestamp-authority v1.2.2 // indirect
|
||||
github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966 // indirect
|
||||
github.com/sorairolake/lzip-go v0.3.5 // indirect
|
||||
github.com/sourcegraph/conc v0.3.0 // indirect
|
||||
github.com/spf13/cast v1.7.0 // indirect
|
||||
github.com/spf13/pflag v1.0.5 // indirect
|
||||
github.com/spf13/viper v1.19.0 // indirect
|
||||
github.com/spiffe/go-spiffe/v2 v2.3.0 // indirect
|
||||
github.com/sigstore/fulcio v1.8.5 // indirect
|
||||
github.com/sigstore/protobuf-specs v0.5.0 // indirect
|
||||
github.com/sigstore/rekor v1.5.0 // indirect
|
||||
github.com/sigstore/rekor-tiles/v2 v2.2.0 // indirect
|
||||
github.com/sigstore/sigstore v1.10.4 // indirect
|
||||
github.com/sigstore/sigstore-go v1.1.4 // indirect
|
||||
github.com/sigstore/timestamp-authority/v2 v2.0.4 // indirect
|
||||
github.com/sorairolake/lzip-go v0.3.8 // indirect
|
||||
github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8 // indirect
|
||||
github.com/spf13/cast v1.10.0 // indirect
|
||||
github.com/spf13/pflag v1.0.10 // indirect
|
||||
github.com/spf13/viper v1.21.0 // indirect
|
||||
github.com/spiffe/go-spiffe/v2 v2.6.0 // indirect
|
||||
github.com/subosito/gotenv v1.6.0 // indirect
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d // indirect
|
||||
github.com/tchap/go-patricia/v2 v2.3.1 // indirect
|
||||
github.com/tchap/go-patricia/v2 v2.3.3 // indirect
|
||||
github.com/thales-e-security/pool v0.0.2 // indirect
|
||||
github.com/therootcompany/xz v1.0.1 // indirect
|
||||
github.com/theupdateframework/go-tuf v0.7.0 // indirect
|
||||
github.com/theupdateframework/go-tuf/v2 v2.0.1 // indirect
|
||||
github.com/theupdateframework/go-tuf/v2 v2.4.1 // indirect
|
||||
github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 // indirect
|
||||
github.com/tjfoc/gmsm v1.4.1 // indirect
|
||||
github.com/transparency-dev/formats v0.0.0-20251017110053-404c0d5b696c // indirect
|
||||
github.com/transparency-dev/merkle v0.0.2 // indirect
|
||||
github.com/ulikunitz/xz v0.5.12 // indirect
|
||||
github.com/vbatts/tar-split v0.11.6 // indirect
|
||||
github.com/withfig/autocomplete-tools/integrations/cobra v1.2.1 // indirect
|
||||
github.com/ulikunitz/xz v0.5.15 // indirect
|
||||
github.com/valyala/fastjson v1.6.4 // indirect
|
||||
github.com/vbatts/tar-split v0.12.2 // indirect
|
||||
github.com/vektah/gqlparser/v2 v2.5.31 // indirect
|
||||
github.com/x448/float16 v0.8.4 // indirect
|
||||
github.com/xanzy/go-gitlab v0.109.0 // indirect
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect
|
||||
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
|
||||
github.com/xeipuuv/gojsonschema v1.2.0 // indirect
|
||||
github.com/xlab/treeprint v1.2.0 // indirect
|
||||
github.com/yashtewari/glob-intersection v0.2.0 // indirect
|
||||
github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43 // indirect
|
||||
github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50 // indirect
|
||||
github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f // indirect
|
||||
github.com/zeebo/errs v1.3.0 // indirect
|
||||
go.mongodb.org/mongo-driver v1.14.0 // indirect
|
||||
go.opencensus.io v0.24.0 // indirect
|
||||
go.opentelemetry.io/auto/sdk v1.1.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0 // indirect
|
||||
go.opentelemetry.io/otel v1.33.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.33.0 // indirect
|
||||
go.opentelemetry.io/otel/sdk v1.33.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.33.0 // indirect
|
||||
go.step.sm/crypto v0.51.2 // indirect
|
||||
gitlab.com/gitlab-org/api/client-go v1.25.0 // indirect
|
||||
go.mongodb.org/mongo-driver v1.17.6 // indirect
|
||||
go.opentelemetry.io/auto/sdk v1.2.1 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0 // indirect
|
||||
go.opentelemetry.io/otel v1.40.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.40.0 // indirect
|
||||
go.opentelemetry.io/otel/sdk v1.40.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.40.0 // indirect
|
||||
go.uber.org/multierr v1.11.0 // indirect
|
||||
go.uber.org/zap v1.27.0 // indirect
|
||||
go.uber.org/zap v1.27.1 // indirect
|
||||
go.yaml.in/yaml/v2 v2.4.3 // indirect
|
||||
go.yaml.in/yaml/v3 v3.0.4 // indirect
|
||||
go4.org v0.0.0-20230225012048-214862532bf5 // indirect
|
||||
golang.org/x/crypto v0.35.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 // indirect
|
||||
golang.org/x/mod v0.22.0 // indirect
|
||||
golang.org/x/net v0.36.0 // indirect
|
||||
golang.org/x/oauth2 v0.25.0 // indirect
|
||||
golang.org/x/sys v0.30.0 // indirect
|
||||
golang.org/x/term v0.29.0 // indirect
|
||||
golang.org/x/text v0.22.0 // indirect
|
||||
golang.org/x/time v0.7.0 // indirect
|
||||
google.golang.org/api v0.196.0 // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20240827150818-7e3bb234dfed // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 // indirect
|
||||
google.golang.org/grpc v1.66.0 // indirect
|
||||
google.golang.org/protobuf v1.36.3 // indirect
|
||||
gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect
|
||||
golang.org/x/crypto v0.47.0 // indirect
|
||||
golang.org/x/mod v0.32.0 // indirect
|
||||
golang.org/x/net v0.49.0 // indirect
|
||||
golang.org/x/oauth2 v0.35.0 // indirect
|
||||
golang.org/x/sys v0.40.0 // indirect
|
||||
golang.org/x/term v0.39.0 // indirect
|
||||
golang.org/x/text v0.33.0 // indirect
|
||||
golang.org/x/time v0.14.0 // indirect
|
||||
google.golang.org/api v0.267.0 // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20260128011058-8636f8732409 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20260203192932-546029d2fa20 // indirect
|
||||
google.golang.org/grpc v1.78.0 // indirect
|
||||
google.golang.org/protobuf v1.36.11 // indirect
|
||||
gopkg.in/evanphx/json-patch.v4 v4.13.0 // indirect
|
||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||
gopkg.in/ini.v1 v1.67.0 // indirect
|
||||
gopkg.in/ini.v1 v1.67.1 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
k8s.io/api v0.32.1 // indirect
|
||||
k8s.io/apiextensions-apiserver v0.32.0 // indirect
|
||||
k8s.io/apiserver v0.32.0 // indirect
|
||||
k8s.io/cli-runtime v0.32.0 // indirect
|
||||
k8s.io/component-base v0.32.0 // indirect
|
||||
k8s.io/api v0.35.1 // indirect
|
||||
k8s.io/apiextensions-apiserver v0.34.0 // indirect
|
||||
k8s.io/apiserver v0.34.0 // indirect
|
||||
k8s.io/cli-runtime v0.34.0 // indirect
|
||||
k8s.io/client-go v0.35.1 // indirect
|
||||
k8s.io/component-base v0.34.0 // indirect
|
||||
k8s.io/klog/v2 v2.130.1 // indirect
|
||||
k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f // indirect
|
||||
k8s.io/kubectl v0.32.0 // indirect
|
||||
k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 // indirect
|
||||
sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 // indirect
|
||||
sigs.k8s.io/kustomize/api v0.18.0 // indirect
|
||||
sigs.k8s.io/kustomize/kyaml v0.18.1 // indirect
|
||||
sigs.k8s.io/release-utils v0.8.4 // indirect
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.4.2 // indirect
|
||||
sigs.k8s.io/yaml v1.4.0 // indirect
|
||||
k8s.io/kube-openapi v0.0.0-20250910181357-589584f1c912 // indirect
|
||||
k8s.io/kubectl v0.34.0 // indirect
|
||||
k8s.io/utils v0.0.0-20251002143259-bc988d571ff4 // indirect
|
||||
oras.land/oras-go/v2 v2.6.0 // indirect
|
||||
sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730 // indirect
|
||||
sigs.k8s.io/kustomize/api v0.20.1 // indirect
|
||||
sigs.k8s.io/kustomize/kyaml v0.20.1 // indirect
|
||||
sigs.k8s.io/randfill v1.0.0 // indirect
|
||||
sigs.k8s.io/release-utils v0.12.3 // indirect
|
||||
sigs.k8s.io/structured-merge-diff/v6 v6.3.0 // indirect
|
||||
sigs.k8s.io/yaml v1.6.0 // indirect
|
||||
)
|
||||
|
||||
@@ -7,17 +7,29 @@ import (
|
||||
|
||||
type AddImageOpts struct {
|
||||
*StoreRootOpts
|
||||
Name string
|
||||
Key string
|
||||
Tlog bool
|
||||
Platform string
|
||||
Name string
|
||||
Key string
|
||||
CertOidcIssuer string
|
||||
CertOidcIssuerRegexp string
|
||||
CertIdentity string
|
||||
CertIdentityRegexp string
|
||||
CertGithubWorkflowRepository string
|
||||
Tlog bool
|
||||
Platform string
|
||||
Rewrite string
|
||||
}
|
||||
|
||||
func (o *AddImageOpts) AddFlags(cmd *cobra.Command) {
|
||||
f := cmd.Flags()
|
||||
f.StringVarP(&o.Key, "key", "k", "", "(Optional) Location of public key to use for signature verification")
|
||||
f.BoolVarP(&o.Tlog, "use-tlog-verify", "v", false, "(Optional) Allow transparency log verification. (defaults to false)")
|
||||
f.StringVarP(&o.Platform, "platform", "p", "", "(Optional) Specifiy the platform of the image... i.e. linux/amd64 (defaults to all)")
|
||||
f.StringVar(&o.CertIdentity, "certificate-identity", "", "(Optional) Cosign certificate-identity (either --certificate-identity or --certificate-identity-regexp required for keyless verification)")
|
||||
f.StringVar(&o.CertIdentityRegexp, "certificate-identity-regexp", "", "(Optional) Cosign certificate-identity-regexp (either --certificate-identity or --certificate-identity-regexp required for keyless verification)")
|
||||
f.StringVar(&o.CertOidcIssuer, "certificate-oidc-issuer", "", "(Optional) Cosign option to validate oidc issuer")
|
||||
f.StringVar(&o.CertOidcIssuerRegexp, "certificate-oidc-issuer-regexp", "", "(Optional) Cosign option to validate oidc issuer with regex")
|
||||
f.StringVar(&o.CertGithubWorkflowRepository, "certificate-github-workflow-repository", "", "(Optional) Cosign certificate-github-workflow-repository option")
|
||||
f.BoolVar(&o.Tlog, "use-tlog-verify", false, "(Optional) Enable transparency log verification for key-based signature verification (keyless/OIDC verification always uses the tlog)")
|
||||
f.StringVarP(&o.Platform, "platform", "p", "", "(Optional) Specify the platform of the image... i.e. linux/amd64 (defaults to all)")
|
||||
f.StringVar(&o.Rewrite, "rewrite", "", "(EXPERIMENTAL & Optional) Rewrite artifact path to specified string")
|
||||
}
|
||||
|
||||
type AddFileOpts struct {
|
||||
@@ -33,19 +45,37 @@ func (o *AddFileOpts) AddFlags(cmd *cobra.Command) {
|
||||
type AddChartOpts struct {
|
||||
*StoreRootOpts
|
||||
|
||||
ChartOpts *action.ChartPathOptions
|
||||
ChartOpts *action.ChartPathOptions
|
||||
Rewrite string
|
||||
AddDependencies bool
|
||||
AddImages bool
|
||||
HelmValues string
|
||||
Platform string
|
||||
Registry string
|
||||
KubeVersion string
|
||||
}
|
||||
|
||||
func (o *AddChartOpts) AddFlags(cmd *cobra.Command) {
|
||||
f := cmd.Flags()
|
||||
|
||||
f.StringVar(&o.ChartOpts.RepoURL, "repo", "", "Location of the chart (https:// | http:// | oci://)")
|
||||
f.StringVar(&o.ChartOpts.Version, "version", "", "(Optional) Specifiy the version of the chart (v1.0.0 | 2.0.0 | ^2.0.0)")
|
||||
f.StringVar(&o.ChartOpts.Version, "version", "", "(Optional) Specify the version of the chart (v1.0.0 | 2.0.0 | ^2.0.0)")
|
||||
f.BoolVar(&o.ChartOpts.Verify, "verify", false, "(Optional) Verify the chart before fetching it")
|
||||
f.StringVar(&o.ChartOpts.Username, "username", "", "(Optional) Username to use for authentication")
|
||||
f.StringVar(&o.ChartOpts.Password, "password", "", "(Optional) Password to use for authentication")
|
||||
f.StringVar(&o.ChartOpts.CertFile, "cert-file", "", "(Optional) Location of the TLS Certificate to use for client authenication")
|
||||
f.StringVar(&o.ChartOpts.KeyFile, "key-file", "", "(Optional) Location of the TLS Key to use for client authenication")
|
||||
f.StringVar(&o.ChartOpts.CertFile, "cert-file", "", "(Optional) Location of the TLS Certificate to use for client authentication")
|
||||
f.StringVar(&o.ChartOpts.KeyFile, "key-file", "", "(Optional) Location of the TLS Key to use for client authentication")
|
||||
f.BoolVar(&o.ChartOpts.InsecureSkipTLSverify, "insecure-skip-tls-verify", false, "(Optional) Skip TLS certificate verification")
|
||||
f.StringVar(&o.ChartOpts.CaFile, "ca-file", "", "(Optional) Location of CA Bundle to enable certification verification")
|
||||
f.StringVar(&o.Rewrite, "rewrite", "", "(EXPERIMENTAL & Optional) Rewrite artifact path to specified string")
|
||||
|
||||
cmd.MarkFlagsRequiredTogether("username", "password")
|
||||
cmd.MarkFlagsRequiredTogether("cert-file", "key-file", "ca-file")
|
||||
|
||||
cmd.Flags().BoolVar(&o.AddDependencies, "add-dependencies", false, "(EXPERIMENTAL & Optional) Fetch dependent helm charts")
|
||||
f.BoolVar(&o.AddImages, "add-images", false, "(EXPERIMENTAL & Optional) Fetch images referenced in helm charts")
|
||||
f.StringVar(&o.HelmValues, "values", "", "(EXPERIMENTAL & Optional) Specify helm chart values when fetching images")
|
||||
f.StringVarP(&o.Platform, "platform", "p", "", "(Optional) Specify the platform of the image, e.g. linux/amd64")
|
||||
f.StringVarP(&o.Registry, "registry", "g", "", "(Optional) Specify the registry of the image for images that do not alredy define one")
|
||||
f.StringVar(&o.KubeVersion, "kube-version", "v1.34.1", "(EXPERIMENTAL & Optional) Override the kubernetes version for helm template rendering")
|
||||
}
|
||||
|
||||
@@ -9,13 +9,30 @@ type CopyOpts struct {
|
||||
Password string
|
||||
Insecure bool
|
||||
PlainHTTP bool
|
||||
Only string
|
||||
}
|
||||
|
||||
func (o *CopyOpts) AddFlags(cmd *cobra.Command) {
|
||||
f := cmd.Flags()
|
||||
|
||||
f.StringVarP(&o.Username, "username", "u", "", "(Optional) Username to use for authentication")
|
||||
f.StringVarP(&o.Password, "password", "p", "", "(Optional) Password to use for authentication")
|
||||
f.StringVarP(&o.Username, "username", "u", "", "(Deprecated) Please use 'hauler login'")
|
||||
f.StringVarP(&o.Password, "password", "p", "", "(Deprecated) Please use 'hauler login'")
|
||||
f.BoolVar(&o.Insecure, "insecure", false, "(Optional) Allow insecure connections")
|
||||
f.BoolVar(&o.PlainHTTP, "plain-http", false, "(Optional) Allow plain HTTP connections")
|
||||
f.StringVarP(&o.Only, "only", "o", "", "(Optional) Custom string array to only copy specific 'image' items")
|
||||
|
||||
cmd.MarkFlagsRequiredTogether("username", "password")
|
||||
|
||||
if err := f.MarkDeprecated("username", "please use 'hauler login'"); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
if err := f.MarkDeprecated("password", "please use 'hauler login'"); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
if err := f.MarkHidden("username"); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
if err := f.MarkHidden("password"); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -9,12 +9,14 @@ type InfoOpts struct {
|
||||
TypeFilter string
|
||||
SizeUnit string
|
||||
ListRepos bool
|
||||
ShowDigests bool
|
||||
}
|
||||
|
||||
func (o *InfoOpts) AddFlags(cmd *cobra.Command) {
|
||||
f := cmd.Flags()
|
||||
|
||||
f.StringVarP(&o.OutputFormat, "output", "o", "table", "(Optional) Specify the output format (table | json)")
|
||||
f.StringVarP(&o.TypeFilter, "type", "t", "all", "(Optional) Filter on content type (image | chart | file | sigs | atts | sbom)")
|
||||
f.StringVar(&o.TypeFilter, "type", "all", "(Optional) Filter on content type (image | chart | file | sigs | atts | sbom | referrer)")
|
||||
f.BoolVar(&o.ListRepos, "list-repos", false, "(Optional) List all repository names")
|
||||
f.BoolVar(&o.ShowDigests, "digests", false, "(Optional) Show digests of each artifact in the output table")
|
||||
}
|
||||
|
||||
@@ -7,15 +7,11 @@ import (
|
||||
|
||||
type LoadOpts struct {
|
||||
*StoreRootOpts
|
||||
FileName []string
|
||||
TempOverride string
|
||||
FileName []string
|
||||
}
|
||||
|
||||
func (o *LoadOpts) AddFlags(cmd *cobra.Command) {
|
||||
f := cmd.Flags()
|
||||
|
||||
// On Unix systems, the default is $TMPDIR if non-empty, else /tmp
|
||||
// On Windows, the default is GetTempPath, returning the first value from %TMP%, %TEMP%, %USERPROFILE%, or Windows directory
|
||||
f.StringSliceVarP(&o.FileName, "filename", "f", []string{consts.DefaultHaulerArchiveName}, "(Optional) Specify the name of inputted haul(s)")
|
||||
f.StringVarP(&o.TempOverride, "tempdir", "t", "", "(Optional) Override the default temporary directiory determined by the OS")
|
||||
}
|
||||
|
||||
11
internal/flags/remove.go
Normal file
11
internal/flags/remove.go
Normal file
@@ -0,0 +1,11 @@
|
||||
package flags
|
||||
|
||||
import "github.com/spf13/cobra"
|
||||
|
||||
type RemoveOpts struct {
|
||||
Force bool // skip remove confirmation
|
||||
}
|
||||
|
||||
func (o *RemoveOpts) AddFlags(cmd *cobra.Command) {
|
||||
cmd.Flags().BoolVarP(&o.Force, "force", "f", false, "(Optional) Remove artifact(s) without confirmation")
|
||||
}
|
||||
@@ -7,8 +7,9 @@ import (
|
||||
|
||||
type SaveOpts struct {
|
||||
*StoreRootOpts
|
||||
FileName string
|
||||
Platform string
|
||||
FileName string
|
||||
Platform string
|
||||
ContainerdCompatibility bool
|
||||
}
|
||||
|
||||
func (o *SaveOpts) AddFlags(cmd *cobra.Command) {
|
||||
@@ -16,4 +17,6 @@ func (o *SaveOpts) AddFlags(cmd *cobra.Command) {
|
||||
|
||||
f.StringVarP(&o.FileName, "filename", "f", consts.DefaultHaulerArchiveName, "(Optional) Specify the name of outputted haul")
|
||||
f.StringVarP(&o.Platform, "platform", "p", "", "(Optional) Specify the platform for runtime imports... i.e. linux/amd64 (unspecified implies all)")
|
||||
f.BoolVar(&o.ContainerdCompatibility, "containerd", false, "(Optional) Enable import compatibility with containerd... removes oci-layout from the haul")
|
||||
|
||||
}
|
||||
|
||||
@@ -46,7 +46,7 @@ func (o *ServeFilesOpts) AddFlags(cmd *cobra.Command) {
|
||||
f := cmd.Flags()
|
||||
|
||||
f.IntVarP(&o.Port, "port", "p", consts.DefaultFileserverPort, "(Optional) Set the port to use for incoming connections")
|
||||
f.IntVarP(&o.Timeout, "timeout", "t", consts.DefaultFileserverTimeout, "(Optional) Timeout duration for HTTP Requests in seconds for both reads/writes")
|
||||
f.IntVar(&o.Timeout, "timeout", consts.DefaultFileserverTimeout, "(Optional) Timeout duration for HTTP Requests in seconds for both reads/writes")
|
||||
f.StringVar(&o.RootDir, "directory", consts.DefaultFileserverRootDir, "(Optional) Directory to use for backend. Defaults to $PWD/fileserver")
|
||||
|
||||
f.StringVar(&o.TLSCert, "tls-cert", "", "(Optional) Location of the TLS Certificate to use for server authenication")
|
||||
|
||||
@@ -13,14 +13,16 @@ import (
|
||||
)
|
||||
|
||||
type StoreRootOpts struct {
|
||||
StoreDir string
|
||||
Retries int
|
||||
StoreDir string
|
||||
Retries int
|
||||
TempOverride string
|
||||
}
|
||||
|
||||
func (o *StoreRootOpts) AddFlags(cmd *cobra.Command) {
|
||||
pf := cmd.PersistentFlags()
|
||||
pf.StringVarP(&o.StoreDir, "store", "s", "", "Set the directory to use for the content store")
|
||||
pf.IntVarP(&o.Retries, "retries", "r", consts.DefaultRetries, "Set the number of retries for operations")
|
||||
pf.StringVarP(&o.TempOverride, "tempdir", "t", "", "(Optional) Override the default temporary directory determined by the OS")
|
||||
}
|
||||
|
||||
func (o *StoreRootOpts) Store(ctx context.Context) (*store.Layout, error) {
|
||||
|
||||
@@ -7,14 +7,19 @@ import (
|
||||
|
||||
type SyncOpts struct {
|
||||
*StoreRootOpts
|
||||
FileName []string
|
||||
Key string
|
||||
Products []string
|
||||
Platform string
|
||||
Registry string
|
||||
ProductRegistry string
|
||||
TempOverride string
|
||||
Tlog bool
|
||||
FileName []string
|
||||
Key string
|
||||
CertOidcIssuer string
|
||||
CertOidcIssuerRegexp string
|
||||
CertIdentity string
|
||||
CertIdentityRegexp string
|
||||
CertGithubWorkflowRepository string
|
||||
Products []string
|
||||
Platform string
|
||||
Registry string
|
||||
ProductRegistry string
|
||||
Tlog bool
|
||||
Rewrite string
|
||||
}
|
||||
|
||||
func (o *SyncOpts) AddFlags(cmd *cobra.Command) {
|
||||
@@ -22,10 +27,15 @@ func (o *SyncOpts) AddFlags(cmd *cobra.Command) {
|
||||
|
||||
f.StringSliceVarP(&o.FileName, "filename", "f", []string{consts.DefaultHaulerManifestName}, "Specify the name of manifest(s) to sync")
|
||||
f.StringVarP(&o.Key, "key", "k", "", "(Optional) Location of public key to use for signature verification")
|
||||
f.StringVar(&o.CertIdentity, "certificate-identity", "", "(Optional) Cosign certificate-identity (either --certificate-identity or --certificate-identity-regexp required for keyless verification)")
|
||||
f.StringVar(&o.CertIdentityRegexp, "certificate-identity-regexp", "", "(Optional) Cosign certificate-identity-regexp (either --certificate-identity or --certificate-identity-regexp required for keyless verification)")
|
||||
f.StringVar(&o.CertOidcIssuer, "certificate-oidc-issuer", "", "(Optional) Cosign option to validate oidc issuer")
|
||||
f.StringVar(&o.CertOidcIssuerRegexp, "certificate-oidc-issuer-regexp", "", "(Optional) Cosign option to validate oidc issuer with regex")
|
||||
f.StringVar(&o.CertGithubWorkflowRepository, "certificate-github-workflow-repository", "", "(Optional) Cosign certificate-github-workflow-repository option")
|
||||
f.StringSliceVar(&o.Products, "products", []string{}, "(Optional) Specify the product name to fetch collections from the product registry i.e. rancher=v2.10.1,rke2=v1.31.5+rke2r1")
|
||||
f.StringVarP(&o.Platform, "platform", "p", "", "(Optional) Specify the platform of the image... i.e linux/amd64 (defaults to all)")
|
||||
f.StringVarP(&o.Registry, "registry", "g", "", "(Optional) Specify the registry of the image for images that do not alredy define one")
|
||||
f.StringVarP(&o.ProductRegistry, "product-registry", "c", "", "(Optional) Specify the product registry. Defaults to RGS Carbide Registry (rgcrprod.azurecr.us)")
|
||||
f.StringVarP(&o.TempOverride, "tempdir", "t", "", "(Optional) Override the default temporary directiory determined by the OS")
|
||||
f.BoolVarP(&o.Tlog, "use-tlog-verify", "v", false, "(Optional) Allow transparency log verification. (defaults to false)")
|
||||
f.BoolVar(&o.Tlog, "use-tlog-verify", false, "(Optional) Allow transparency log verification (defaults to false)")
|
||||
f.StringVar(&o.Rewrite, "rewrite", "", "(EXPERIMENTAL & Optional) Rewrite artifact path to specified string")
|
||||
}
|
||||
|
||||
@@ -2,6 +2,7 @@ package mapper
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
@@ -11,18 +12,21 @@ import (
|
||||
"github.com/containerd/containerd/remotes"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
"oras.land/oras-go/pkg/content"
|
||||
"hauler.dev/go/hauler/pkg/content"
|
||||
)
|
||||
|
||||
// NewMapperFileStore creates a new file store that uses mapper functions for each detected descriptor.
|
||||
//
|
||||
// This extends content.File, and differs in that it allows much more functionality into how each descriptor is written.
|
||||
func NewMapperFileStore(root string, mapper map[string]Fn) *store {
|
||||
fs := content.NewFile(root)
|
||||
return &store{
|
||||
File: fs,
|
||||
mapper: mapper,
|
||||
// This extends content.OCI, and differs in that it allows much more functionality into how each descriptor is written.
|
||||
func NewMapperFileStore(root string, mapper map[string]Fn) (*store, error) {
|
||||
fs, err := content.NewOCI(root)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &store{
|
||||
OCI: fs,
|
||||
mapper: mapper,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *store) Pusher(ctx context.Context, ref string) (remotes.Pusher, error) {
|
||||
@@ -35,7 +39,7 @@ func (s *store) Pusher(ctx context.Context, ref string) (remotes.Pusher, error)
|
||||
hash = parts[1]
|
||||
}
|
||||
return &pusher{
|
||||
store: s.File,
|
||||
store: s.OCI,
|
||||
tag: tag,
|
||||
ref: hash,
|
||||
mapper: s.mapper,
|
||||
@@ -43,43 +47,76 @@ func (s *store) Pusher(ctx context.Context, ref string) (remotes.Pusher, error)
|
||||
}
|
||||
|
||||
type store struct {
|
||||
*content.File
|
||||
*content.OCI
|
||||
mapper map[string]Fn
|
||||
}
|
||||
|
||||
func (s *pusher) Push(ctx context.Context, desc ocispec.Descriptor) (ccontent.Writer, error) {
|
||||
// TODO: This is suuuuuper ugly... redo this when oras v2 is out
|
||||
// For manifests and indexes (which have AnnotationRefName), discard them
|
||||
// They're metadata and don't need to be extracted
|
||||
if _, ok := content.ResolveName(desc); ok {
|
||||
p, err := s.store.Pusher(ctx, s.ref)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return p.Push(ctx, desc)
|
||||
// Discard manifests/indexes, they're just metadata
|
||||
return content.NewIoContentWriter(&nopCloser{io.Discard}, content.WithOutputHash(desc.Digest.String())), nil
|
||||
}
|
||||
|
||||
// If no custom mapper found, fall back to content.File mapper
|
||||
if _, ok := s.mapper[desc.MediaType]; !ok {
|
||||
return content.NewIoContentWriter(io.Discard, content.WithOutputHash(desc.Digest)), nil
|
||||
// Check if this descriptor has a mapper for its media type
|
||||
mapperFn, hasMapper := s.mapper[desc.MediaType]
|
||||
if !hasMapper {
|
||||
// Fall back to catch-all sentinel, then discard
|
||||
mapperFn, hasMapper = s.mapper[DefaultCatchAll]
|
||||
}
|
||||
if !hasMapper {
|
||||
// No mapper for this media type, discard it (config blobs, etc.)
|
||||
return content.NewIoContentWriter(&nopCloser{io.Discard}, content.WithOutputHash(desc.Digest.String())), nil
|
||||
}
|
||||
|
||||
filename, err := s.mapper[desc.MediaType](desc)
|
||||
// Get the filename from the mapper function.
|
||||
// An empty filename means the mapper explicitly declined this descriptor (e.g. a
|
||||
// config blob that has no title annotation); treat it the same as no mapper.
|
||||
filename, err := mapperFn(desc)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
fullFileName := filepath.Join(s.store.ResolvePath(""), filename)
|
||||
// TODO: Don't rewrite everytime, we can check the digest
|
||||
f, err := os.OpenFile(fullFileName, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "pushing file")
|
||||
if filename == "" {
|
||||
return content.NewIoContentWriter(&nopCloser{io.Discard}, content.WithOutputHash(desc.Digest.String())), nil
|
||||
}
|
||||
|
||||
w := content.NewIoContentWriter(f, content.WithInputHash(desc.Digest), content.WithOutputHash(desc.Digest))
|
||||
// Get the destination directory and create the full path.
|
||||
// Use absolute paths so the traversal check works even when destDir is relative (e.g. ".").
|
||||
destDir, err := filepath.Abs(s.store.ResolvePath(""))
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "resolving destination dir")
|
||||
}
|
||||
fullFileName := filepath.Join(destDir, filename)
|
||||
|
||||
// Guard against path traversal (e.g. filename containing "../")
|
||||
if !strings.HasPrefix(fullFileName, destDir+string(filepath.Separator)) {
|
||||
return nil, fmt.Errorf("path_traversal_disallowed: %q resolves outside destination dir", filename)
|
||||
}
|
||||
|
||||
// Create parent directories (e.g. when filename is "subdir/file.txt")
|
||||
if err := os.MkdirAll(filepath.Dir(fullFileName), 0755); err != nil {
|
||||
return nil, errors.Wrap(err, fmt.Sprintf("creating directory for %s", fullFileName))
|
||||
}
|
||||
|
||||
// Create the file
|
||||
f, err := os.OpenFile(fullFileName, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, fmt.Sprintf("creating file %s", fullFileName))
|
||||
}
|
||||
|
||||
w := content.NewIoContentWriter(f, content.WithOutputHash(desc.Digest.String()))
|
||||
return w, nil
|
||||
}
|
||||
|
||||
type nopCloser struct {
|
||||
io.Writer
|
||||
}
|
||||
|
||||
func (*nopCloser) Close() error { return nil }
|
||||
|
||||
type pusher struct {
|
||||
store *content.File
|
||||
store *content.OCI
|
||||
tag string
|
||||
ref string
|
||||
mapper map[string]Fn
|
||||
|
||||
349
internal/mapper/mapper_test.go
Normal file
349
internal/mapper/mapper_test.go
Normal file
@@ -0,0 +1,349 @@
|
||||
package mapper
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
|
||||
"hauler.dev/go/hauler/pkg/consts"
|
||||
)
|
||||
|
||||
func TestFromManifest_DockerImage(t *testing.T) {
|
||||
manifest := ocispec.Manifest{
|
||||
Config: ocispec.Descriptor{
|
||||
MediaType: consts.DockerConfigJSON,
|
||||
},
|
||||
}
|
||||
|
||||
target, err := FromManifest(manifest, t.TempDir())
|
||||
if err != nil {
|
||||
t.Fatalf("expected no error, got: %v", err)
|
||||
}
|
||||
if target == nil {
|
||||
t.Fatal("expected non-nil Target")
|
||||
}
|
||||
}
|
||||
|
||||
func TestFromManifest_HelmChart(t *testing.T) {
|
||||
manifest := ocispec.Manifest{
|
||||
Config: ocispec.Descriptor{
|
||||
MediaType: consts.ChartConfigMediaType,
|
||||
},
|
||||
}
|
||||
|
||||
target, err := FromManifest(manifest, t.TempDir())
|
||||
if err != nil {
|
||||
t.Fatalf("expected no error, got: %v", err)
|
||||
}
|
||||
if target == nil {
|
||||
t.Fatal("expected non-nil Target")
|
||||
}
|
||||
}
|
||||
|
||||
func TestFromManifest_File(t *testing.T) {
|
||||
manifest := ocispec.Manifest{
|
||||
Config: ocispec.Descriptor{
|
||||
MediaType: consts.FileLocalConfigMediaType,
|
||||
},
|
||||
}
|
||||
|
||||
target, err := FromManifest(manifest, t.TempDir())
|
||||
if err != nil {
|
||||
t.Fatalf("expected no error, got: %v", err)
|
||||
}
|
||||
if target == nil {
|
||||
t.Fatal("expected non-nil Target")
|
||||
}
|
||||
}
|
||||
|
||||
func TestFromManifest_OciImageConfigWithTitleAnnotation(t *testing.T) {
|
||||
// OCI artifacts distributed as "fake images" (e.g. rke2-binary) use the standard
|
||||
// OCI image config type but set AnnotationTitle on their layers. FromManifest must
|
||||
// dispatch to Files() (not Images()) so the title is used as the output filename.
|
||||
manifest := ocispec.Manifest{
|
||||
Config: ocispec.Descriptor{
|
||||
MediaType: ocispec.MediaTypeImageConfig,
|
||||
},
|
||||
Layers: []ocispec.Descriptor{
|
||||
{
|
||||
MediaType: consts.OCILayer,
|
||||
Annotations: map[string]string{
|
||||
ocispec.AnnotationTitle: "rke2.linux-amd64",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
target, err := FromManifest(manifest, t.TempDir())
|
||||
if err != nil {
|
||||
t.Fatalf("expected no error, got: %v", err)
|
||||
}
|
||||
s, ok := target.(*store)
|
||||
if !ok {
|
||||
t.Fatal("expected target to be *store")
|
||||
}
|
||||
if _, exists := s.mapper[consts.OCILayer]; !exists {
|
||||
t.Fatal("expected Files() mapper (OCILayer key) for OCI image config with title annotation")
|
||||
}
|
||||
}
|
||||
|
||||
func TestFromManifest_FileLayerFallback(t *testing.T) {
|
||||
manifest := ocispec.Manifest{
|
||||
Config: ocispec.Descriptor{
|
||||
MediaType: "application/vnd.unknown.config.v1+json",
|
||||
},
|
||||
Layers: []ocispec.Descriptor{
|
||||
{
|
||||
MediaType: consts.FileLayerMediaType,
|
||||
Annotations: map[string]string{
|
||||
ocispec.AnnotationTitle: "somefile.txt",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
target, err := FromManifest(manifest, t.TempDir())
|
||||
if err != nil {
|
||||
t.Fatalf("expected no error, got: %v", err)
|
||||
}
|
||||
if target == nil {
|
||||
t.Fatal("expected non-nil Target")
|
||||
}
|
||||
|
||||
// Verify the returned store uses the Files() mapper by checking that the
|
||||
// mapper contains the FileLayerMediaType key.
|
||||
s, ok := target.(*store)
|
||||
if !ok {
|
||||
t.Fatal("expected target to be *store")
|
||||
}
|
||||
if s.mapper == nil {
|
||||
t.Fatal("expected non-nil mapper for file layer fallback")
|
||||
}
|
||||
if _, exists := s.mapper[consts.FileLayerMediaType]; !exists {
|
||||
t.Fatal("expected mapper to contain consts.FileLayerMediaType key")
|
||||
}
|
||||
}
|
||||
|
||||
func TestFromManifest_UnknownNoTitle(t *testing.T) {
|
||||
manifest := ocispec.Manifest{
|
||||
Config: ocispec.Descriptor{
|
||||
MediaType: "application/vnd.unknown.config.v1+json",
|
||||
},
|
||||
Layers: []ocispec.Descriptor{
|
||||
{
|
||||
MediaType: "application/vnd.unknown.layer",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
target, err := FromManifest(manifest, t.TempDir())
|
||||
if err != nil {
|
||||
t.Fatalf("expected no error, got: %v", err)
|
||||
}
|
||||
if target == nil {
|
||||
t.Fatal("expected non-nil Target")
|
||||
}
|
||||
|
||||
// Unknown artifacts must use the Default catch-all mapper so blobs are not silently discarded
|
||||
s, ok := target.(*store)
|
||||
if !ok {
|
||||
t.Fatal("expected target to be *store")
|
||||
}
|
||||
if _, exists := s.mapper[DefaultCatchAll]; !exists {
|
||||
t.Fatal("expected default catch-all mapper for unknown artifact type")
|
||||
}
|
||||
}
|
||||
|
||||
func TestFiles_CatchAll_WithTitle(t *testing.T) {
|
||||
// OCI artifacts with custom layer media types (e.g. rke2-binary) must be
|
||||
// extracted by the Files() catch-all when they carry AnnotationTitle.
|
||||
mappers := Files()
|
||||
|
||||
fn, ok := mappers[DefaultCatchAll]
|
||||
if !ok {
|
||||
t.Fatal("Files() must contain a DefaultCatchAll entry")
|
||||
}
|
||||
|
||||
d := digest.Digest("sha256:" + strings.Repeat("b", 64))
|
||||
desc := ocispec.Descriptor{
|
||||
MediaType: "application/vnd.rancher.rke2.binary",
|
||||
Digest: d,
|
||||
Annotations: map[string]string{
|
||||
ocispec.AnnotationTitle: "rke2.linux-amd64",
|
||||
},
|
||||
}
|
||||
|
||||
result, err := fn(desc)
|
||||
if err != nil {
|
||||
t.Fatalf("expected no error, got: %v", err)
|
||||
}
|
||||
if result != "rke2.linux-amd64" {
|
||||
t.Errorf("expected %q, got %q", "rke2.linux-amd64", result)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFiles_CatchAll_NoTitle(t *testing.T) {
|
||||
// Blobs without AnnotationTitle (e.g. config blobs) must be discarded by the
|
||||
// Files() catch-all (empty filename = discard signal for Push).
|
||||
mappers := Files()
|
||||
|
||||
fn, ok := mappers[DefaultCatchAll]
|
||||
if !ok {
|
||||
t.Fatal("Files() must contain a DefaultCatchAll entry")
|
||||
}
|
||||
|
||||
d := digest.Digest("sha256:" + strings.Repeat("c", 64))
|
||||
desc := ocispec.Descriptor{
|
||||
MediaType: "application/vnd.oci.image.config.v1+json",
|
||||
Digest: d,
|
||||
}
|
||||
|
||||
result, err := fn(desc)
|
||||
if err != nil {
|
||||
t.Fatalf("expected no error, got: %v", err)
|
||||
}
|
||||
if result != "" {
|
||||
t.Errorf("expected empty string (discard) for config blob, got %q", result)
|
||||
}
|
||||
}
|
||||
|
||||
func TestImages_MapperFn(t *testing.T) {
|
||||
mappers := Images()
|
||||
|
||||
fn, ok := mappers[consts.DockerLayer]
|
||||
if !ok {
|
||||
t.Fatalf("expected mapper for %s", consts.DockerLayer)
|
||||
}
|
||||
|
||||
d := digest.Digest("sha256:abcdef1234567890abcdef1234567890abcdef1234567890abcdef1234567890")
|
||||
desc := ocispec.Descriptor{
|
||||
Digest: d,
|
||||
}
|
||||
|
||||
result, err := fn(desc)
|
||||
if err != nil {
|
||||
t.Fatalf("expected no error, got: %v", err)
|
||||
}
|
||||
|
||||
expected := "sha256:abcdef1234567890abcdef1234567890abcdef1234567890abcdef1234567890.tar.gz"
|
||||
if result != expected {
|
||||
t.Fatalf("expected %q, got %q", expected, result)
|
||||
}
|
||||
}
|
||||
|
||||
func TestImages_ConfigMapperFn(t *testing.T) {
|
||||
mappers := Images()
|
||||
|
||||
fn, ok := mappers[consts.DockerConfigJSON]
|
||||
if !ok {
|
||||
t.Fatalf("expected mapper for %s", consts.DockerConfigJSON)
|
||||
}
|
||||
|
||||
desc := ocispec.Descriptor{}
|
||||
result, err := fn(desc)
|
||||
if err != nil {
|
||||
t.Fatalf("expected no error, got: %v", err)
|
||||
}
|
||||
|
||||
if result != consts.ImageConfigFile {
|
||||
t.Fatalf("expected %q, got %q", consts.ImageConfigFile, result)
|
||||
}
|
||||
}
|
||||
|
||||
func TestChart_MapperFn_WithTitle(t *testing.T) {
|
||||
mappers := Chart()
|
||||
|
||||
fn, ok := mappers[consts.ChartLayerMediaType]
|
||||
if !ok {
|
||||
t.Fatalf("expected mapper for %s", consts.ChartLayerMediaType)
|
||||
}
|
||||
|
||||
desc := ocispec.Descriptor{
|
||||
Annotations: map[string]string{
|
||||
ocispec.AnnotationTitle: "mychart-1.0.0.tgz",
|
||||
},
|
||||
}
|
||||
|
||||
result, err := fn(desc)
|
||||
if err != nil {
|
||||
t.Fatalf("expected no error, got: %v", err)
|
||||
}
|
||||
|
||||
if result != "mychart-1.0.0.tgz" {
|
||||
t.Fatalf("expected %q, got %q", "mychart-1.0.0.tgz", result)
|
||||
}
|
||||
}
|
||||
|
||||
func TestChart_MapperFn_NoTitle(t *testing.T) {
|
||||
mappers := Chart()
|
||||
|
||||
fn, ok := mappers[consts.ChartLayerMediaType]
|
||||
if !ok {
|
||||
t.Fatalf("expected mapper for %s", consts.ChartLayerMediaType)
|
||||
}
|
||||
|
||||
desc := ocispec.Descriptor{}
|
||||
|
||||
result, err := fn(desc)
|
||||
if err != nil {
|
||||
t.Fatalf("expected no error, got: %v", err)
|
||||
}
|
||||
|
||||
if result != "chart.tar.gz" {
|
||||
t.Fatalf("expected %q, got %q", "chart.tar.gz", result)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFiles_MapperFn_WithTitle(t *testing.T) {
|
||||
mappers := Files()
|
||||
|
||||
fn, ok := mappers[consts.FileLayerMediaType]
|
||||
if !ok {
|
||||
t.Fatalf("expected mapper for %s", consts.FileLayerMediaType)
|
||||
}
|
||||
|
||||
desc := ocispec.Descriptor{
|
||||
Annotations: map[string]string{
|
||||
ocispec.AnnotationTitle: "install.sh",
|
||||
},
|
||||
}
|
||||
|
||||
result, err := fn(desc)
|
||||
if err != nil {
|
||||
t.Fatalf("expected no error, got: %v", err)
|
||||
}
|
||||
|
||||
if result != "install.sh" {
|
||||
t.Fatalf("expected %q, got %q", "install.sh", result)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFiles_MapperFn_NoTitle(t *testing.T) {
|
||||
mappers := Files()
|
||||
|
||||
fn, ok := mappers[consts.FileLayerMediaType]
|
||||
if !ok {
|
||||
t.Fatalf("expected mapper for %s", consts.FileLayerMediaType)
|
||||
}
|
||||
|
||||
d := digest.Digest("sha256:" + strings.Repeat("a", 64))
|
||||
desc := ocispec.Descriptor{
|
||||
Digest: d,
|
||||
}
|
||||
|
||||
result, err := fn(desc)
|
||||
if err != nil {
|
||||
t.Fatalf("expected no error, got: %v", err)
|
||||
}
|
||||
|
||||
if !strings.HasSuffix(result, ".file") {
|
||||
t.Fatalf("expected result to end with .file, got %q", result)
|
||||
}
|
||||
|
||||
expected := "sha256:" + strings.Repeat("a", 64) + ".file"
|
||||
if result != expected {
|
||||
t.Fatalf("expected %q, got %q", expected, result)
|
||||
}
|
||||
}
|
||||
@@ -4,32 +4,44 @@ import (
|
||||
"fmt"
|
||||
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"oras.land/oras-go/pkg/target"
|
||||
|
||||
"hauler.dev/go/hauler/pkg/consts"
|
||||
"hauler.dev/go/hauler/pkg/content"
|
||||
)
|
||||
|
||||
type Fn func(desc ocispec.Descriptor) (string, error)
|
||||
|
||||
// FromManifest will return the appropriate content store given a reference and source type adequate for storing the results on disk
|
||||
func FromManifest(manifest ocispec.Manifest, root string) (target.Target, error) {
|
||||
// TODO: Don't rely solely on config mediatype
|
||||
func FromManifest(manifest ocispec.Manifest, root string) (content.Target, error) {
|
||||
// First, switch on config mediatype to identify known types.
|
||||
switch manifest.Config.MediaType {
|
||||
case consts.DockerConfigJSON, consts.OCIManifestSchema1:
|
||||
s := NewMapperFileStore(root, Images())
|
||||
defer s.Close()
|
||||
return s, nil
|
||||
|
||||
case consts.ChartLayerMediaType, consts.ChartConfigMediaType:
|
||||
s := NewMapperFileStore(root, Chart())
|
||||
defer s.Close()
|
||||
return s, nil
|
||||
return NewMapperFileStore(root, Chart())
|
||||
|
||||
default:
|
||||
s := NewMapperFileStore(root, nil)
|
||||
defer s.Close()
|
||||
return s, nil
|
||||
case consts.FileLocalConfigMediaType, consts.FileDirectoryConfigMediaType, consts.FileHttpConfigMediaType:
|
||||
return NewMapperFileStore(root, Files())
|
||||
|
||||
case consts.DockerConfigJSON, ocispec.MediaTypeImageConfig:
|
||||
// Standard OCI/Docker image config. OCI artifacts that distribute files
|
||||
// (e.g. rke2-binary) reuse this config type but set AnnotationTitle on their
|
||||
// layers. When title annotations are present prefer Files() so the title is
|
||||
// used as the output filename; otherwise treat as a container image.
|
||||
for _, layer := range manifest.Layers {
|
||||
if _, ok := layer.Annotations[ocispec.AnnotationTitle]; ok {
|
||||
return NewMapperFileStore(root, Files())
|
||||
}
|
||||
}
|
||||
return NewMapperFileStore(root, Images())
|
||||
}
|
||||
|
||||
// Unknown config type: title annotation indicates a file artifact; otherwise use
|
||||
// a catch-all mapper that writes blobs by digest.
|
||||
for _, layer := range manifest.Layers {
|
||||
if _, ok := layer.Annotations[ocispec.AnnotationTitle]; ok {
|
||||
return NewMapperFileStore(root, Files())
|
||||
}
|
||||
}
|
||||
return NewMapperFileStore(root, Default())
|
||||
}
|
||||
|
||||
func Images() map[string]Fn {
|
||||
@@ -81,3 +93,52 @@ func Chart() map[string]Fn {
|
||||
m[consts.ProvLayerMediaType] = provMapperFn
|
||||
return m
|
||||
}
|
||||
|
||||
// DefaultCatchAll is the sentinel key used in a mapper map to match any media type
|
||||
// not explicitly registered. Push checks for this key as a fallback.
|
||||
const DefaultCatchAll = ""
|
||||
|
||||
// Default returns a catch-all mapper that extracts any layer blob using its title
|
||||
// annotation as the filename, falling back to a digest-based name. Used when the
|
||||
// manifest config media type is not a known hauler type.
|
||||
func Default() map[string]Fn {
|
||||
m := make(map[string]Fn)
|
||||
m[DefaultCatchAll] = Fn(func(desc ocispec.Descriptor) (string, error) {
|
||||
if title, ok := desc.Annotations[ocispec.AnnotationTitle]; ok {
|
||||
return title, nil
|
||||
}
|
||||
return fmt.Sprintf("%s.bin", desc.Digest.String()), nil
|
||||
})
|
||||
return m
|
||||
}
|
||||
|
||||
func Files() map[string]Fn {
|
||||
m := make(map[string]Fn)
|
||||
|
||||
fileMapperFn := Fn(func(desc ocispec.Descriptor) (string, error) {
|
||||
// Use the title annotation to determine the filename
|
||||
if title, ok := desc.Annotations[ocispec.AnnotationTitle]; ok {
|
||||
return title, nil
|
||||
}
|
||||
// Fallback to digest-based filename if no title
|
||||
return fmt.Sprintf("%s.file", desc.Digest.String()), nil
|
||||
})
|
||||
|
||||
// Match the media type that's actually used in the manifest
|
||||
// (set by getter.LayerFrom in pkg/getter/getter.go)
|
||||
m[consts.FileLayerMediaType] = fileMapperFn
|
||||
m[consts.OCILayer] = fileMapperFn // Also handle standard OCI layers that have title annotation
|
||||
m["application/vnd.oci.image.layer.v1.tar"] = fileMapperFn // And the tar variant
|
||||
|
||||
// Catch-all for OCI artifacts that use custom layer media types (e.g. rke2-binary).
|
||||
// Write the blob if it carries an AnnotationTitle; silently discard everything else
|
||||
// (config blobs, metadata) by returning an empty filename.
|
||||
m[DefaultCatchAll] = Fn(func(desc ocispec.Descriptor) (string, error) {
|
||||
if title, ok := desc.Annotations[ocispec.AnnotationTitle]; ok {
|
||||
return title, nil
|
||||
}
|
||||
return "", nil // No title → discard (config blob or unrecognised metadata)
|
||||
})
|
||||
|
||||
return m
|
||||
}
|
||||
|
||||
90
internal/server/server_test.go
Normal file
90
internal/server/server_test.go
Normal file
@@ -0,0 +1,90 @@
|
||||
package server
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
// Register the filesystem storage driver for the distribution registry.
|
||||
_ "github.com/distribution/distribution/v3/registry/storage/driver/filesystem"
|
||||
|
||||
"hauler.dev/go/hauler/internal/flags"
|
||||
)
|
||||
|
||||
func TestNewTempRegistry_StartStop(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
srv := NewTempRegistry(ctx, t.TempDir())
|
||||
|
||||
// Start the httptest server directly to avoid the Start() method's
|
||||
// retry logic which only accepts HTTP 200, while /v2 returns 401
|
||||
// from the distribution registry.
|
||||
srv.Server.Start()
|
||||
t.Cleanup(func() { srv.Stop() })
|
||||
|
||||
resp, err := http.Get(srv.Server.URL + "/v2")
|
||||
if err != nil {
|
||||
t.Fatalf("expected GET /v2 to succeed, got error: %v", err)
|
||||
}
|
||||
resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusUnauthorized {
|
||||
t.Fatalf("expected status 200 or 401, got %d", resp.StatusCode)
|
||||
}
|
||||
|
||||
// Stop and verify unreachable.
|
||||
srv.Stop()
|
||||
|
||||
_, err = http.Get(srv.Server.URL + "/v2")
|
||||
if err == nil {
|
||||
t.Fatal("expected error after stopping server, got nil")
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewTempRegistry_Registry(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
srv := NewTempRegistry(ctx, t.TempDir())
|
||||
|
||||
srv.Server.Start()
|
||||
t.Cleanup(func() { srv.Stop() })
|
||||
|
||||
host := srv.Registry()
|
||||
if host == "" {
|
||||
t.Fatal("expected non-empty registry host")
|
||||
}
|
||||
if strings.Contains(host, "http://") {
|
||||
t.Fatalf("registry host should not contain protocol prefix, got %q", host)
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewFile_Configuration(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
opts := flags.ServeFilesOpts{
|
||||
RootDir: t.TempDir(),
|
||||
Port: 0,
|
||||
Timeout: 0,
|
||||
}
|
||||
|
||||
srv, err := NewFile(ctx, opts)
|
||||
if err != nil {
|
||||
t.Fatalf("expected no error, got: %v", err)
|
||||
}
|
||||
if srv == nil {
|
||||
t.Fatal("expected non-nil server")
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewFile_DefaultPort(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
opts := flags.ServeFilesOpts{
|
||||
RootDir: t.TempDir(),
|
||||
}
|
||||
|
||||
srv, err := NewFile(ctx, opts)
|
||||
if err != nil {
|
||||
t.Fatalf("expected no error, got: %v", err)
|
||||
}
|
||||
if srv == nil {
|
||||
t.Fatal("expected non-nil server")
|
||||
}
|
||||
}
|
||||
@@ -1,121 +0,0 @@
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
v1 "hauler.dev/go/hauler/pkg/apis/hauler.cattle.io/v1"
|
||||
v1alpha1 "hauler.dev/go/hauler/pkg/apis/hauler.cattle.io/v1alpha1"
|
||||
)
|
||||
|
||||
// converts v1alpha1.Files -> v1.Files
|
||||
func ConvertFiles(in *v1alpha1.Files, out *v1.Files) error {
|
||||
out.TypeMeta = in.TypeMeta
|
||||
out.ObjectMeta = in.ObjectMeta
|
||||
out.Spec.Files = make([]v1.File, len(in.Spec.Files))
|
||||
for i := range in.Spec.Files {
|
||||
out.Spec.Files[i].Name = in.Spec.Files[i].Name
|
||||
out.Spec.Files[i].Path = in.Spec.Files[i].Path
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// converts v1alpha1.Images -> v1.Images
|
||||
func ConvertImages(in *v1alpha1.Images, out *v1.Images) error {
|
||||
out.TypeMeta = in.TypeMeta
|
||||
out.ObjectMeta = in.ObjectMeta
|
||||
out.Spec.Images = make([]v1.Image, len(in.Spec.Images))
|
||||
for i := range in.Spec.Images {
|
||||
out.Spec.Images[i].Name = in.Spec.Images[i].Name
|
||||
out.Spec.Images[i].Platform = in.Spec.Images[i].Platform
|
||||
out.Spec.Images[i].Key = in.Spec.Images[i].Key
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// converts v1alpha1.Charts -> v1.Charts
|
||||
func ConvertCharts(in *v1alpha1.Charts, out *v1.Charts) error {
|
||||
out.TypeMeta = in.TypeMeta
|
||||
out.ObjectMeta = in.ObjectMeta
|
||||
out.Spec.Charts = make([]v1.Chart, len(in.Spec.Charts))
|
||||
for i := range in.Spec.Charts {
|
||||
out.Spec.Charts[i].Name = in.Spec.Charts[i].Name
|
||||
out.Spec.Charts[i].RepoURL = in.Spec.Charts[i].RepoURL
|
||||
out.Spec.Charts[i].Version = in.Spec.Charts[i].Version
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// converts v1alpha1.ThickCharts -> v1.ThickCharts
|
||||
func ConvertThickCharts(in *v1alpha1.ThickCharts, out *v1.ThickCharts) error {
|
||||
out.TypeMeta = in.TypeMeta
|
||||
out.ObjectMeta = in.ObjectMeta
|
||||
out.Spec.Charts = make([]v1.ThickChart, len(in.Spec.Charts))
|
||||
for i := range in.Spec.Charts {
|
||||
out.Spec.Charts[i].Chart.Name = in.Spec.Charts[i].Chart.Name
|
||||
out.Spec.Charts[i].Chart.RepoURL = in.Spec.Charts[i].Chart.RepoURL
|
||||
out.Spec.Charts[i].Chart.Version = in.Spec.Charts[i].Chart.Version
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// converts v1alpha1.ImageTxts -> v1.ImageTxts
|
||||
func ConvertImageTxts(in *v1alpha1.ImageTxts, out *v1.ImageTxts) error {
|
||||
out.TypeMeta = in.TypeMeta
|
||||
out.ObjectMeta = in.ObjectMeta
|
||||
out.Spec.ImageTxts = make([]v1.ImageTxt, len(in.Spec.ImageTxts))
|
||||
for i := range in.Spec.ImageTxts {
|
||||
out.Spec.ImageTxts[i].Ref = in.Spec.ImageTxts[i].Ref
|
||||
out.Spec.ImageTxts[i].Sources.Include = append(
|
||||
out.Spec.ImageTxts[i].Sources.Include,
|
||||
in.Spec.ImageTxts[i].Sources.Include...,
|
||||
)
|
||||
out.Spec.ImageTxts[i].Sources.Exclude = append(
|
||||
out.Spec.ImageTxts[i].Sources.Exclude,
|
||||
in.Spec.ImageTxts[i].Sources.Exclude...,
|
||||
)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// convert v1alpha1 object to v1 object
|
||||
func ConvertObject(in interface{}) (interface{}, error) {
|
||||
switch src := in.(type) {
|
||||
|
||||
case *v1alpha1.Files:
|
||||
dst := &v1.Files{}
|
||||
if err := ConvertFiles(src, dst); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return dst, nil
|
||||
|
||||
case *v1alpha1.Images:
|
||||
dst := &v1.Images{}
|
||||
if err := ConvertImages(src, dst); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return dst, nil
|
||||
|
||||
case *v1alpha1.Charts:
|
||||
dst := &v1.Charts{}
|
||||
if err := ConvertCharts(src, dst); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return dst, nil
|
||||
|
||||
case *v1alpha1.ThickCharts:
|
||||
dst := &v1.ThickCharts{}
|
||||
if err := ConvertThickCharts(src, dst); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return dst, nil
|
||||
|
||||
case *v1alpha1.ImageTxts:
|
||||
dst := &v1.ImageTxts{}
|
||||
if err := ConvertImageTxts(src, dst); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return dst, nil
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("unsupported object type [%T]", in)
|
||||
}
|
||||
@@ -19,24 +19,8 @@ type Chart struct {
|
||||
Name string `json:"name,omitempty"`
|
||||
RepoURL string `json:"repoURL,omitempty"`
|
||||
Version string `json:"version,omitempty"`
|
||||
}
|
||||
Rewrite string `json:"rewrite,omitempty"`
|
||||
|
||||
type ThickCharts struct {
|
||||
*metav1.TypeMeta `json:",inline"`
|
||||
metav1.ObjectMeta `json:"metadata,omitempty"`
|
||||
|
||||
Spec ThickChartSpec `json:"spec,omitempty"`
|
||||
}
|
||||
|
||||
type ThickChartSpec struct {
|
||||
Charts []ThickChart `json:"charts,omitempty"`
|
||||
}
|
||||
|
||||
type ThickChart struct {
|
||||
Chart `json:",inline,omitempty"`
|
||||
ExtraImages []ChartImage `json:"extraImages,omitempty"`
|
||||
}
|
||||
|
||||
type ChartImage struct {
|
||||
Reference string `json:"ref"`
|
||||
AddImages bool `json:"add-images,omitempty"`
|
||||
AddDependencies bool `json:"add-dependencies,omitempty"`
|
||||
}
|
||||
|
||||
@@ -27,7 +27,15 @@ type Image struct {
|
||||
//Tlog string `json:"use-tlog-verify,omitempty"`
|
||||
Tlog bool `json:"use-tlog-verify"`
|
||||
|
||||
// cosign keyless validation options
|
||||
CertIdentity string `json:"certificate-identity"`
|
||||
CertIdentityRegexp string `json:"certificate-identity-regexp"`
|
||||
CertOidcIssuer string `json:"certificate-oidc-issuer"`
|
||||
CertOidcIssuerRegexp string `json:"certificate-oidc-issuer-regexp"`
|
||||
CertGithubWorkflowRepository string `json:"certificate-github-workflow-repository"`
|
||||
|
||||
// Platform of the image to be pulled. If not specified, all platforms will be pulled.
|
||||
//Platform string `json:"key,omitempty"`
|
||||
Platform string `json:"platform"`
|
||||
Rewrite string `json:"rewrite"`
|
||||
}
|
||||
|
||||
@@ -1,26 +0,0 @@
|
||||
package v1
|
||||
|
||||
import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
type ImageTxts struct {
|
||||
*metav1.TypeMeta `json:",inline"`
|
||||
metav1.ObjectMeta `json:"metadata,omitempty"`
|
||||
|
||||
Spec ImageTxtsSpec `json:"spec,omitempty"`
|
||||
}
|
||||
|
||||
type ImageTxtsSpec struct {
|
||||
ImageTxts []ImageTxt `json:"imageTxts,omitempty"`
|
||||
}
|
||||
|
||||
type ImageTxt struct {
|
||||
Ref string `json:"ref,omitempty"`
|
||||
Sources ImageTxtSources `json:"sources,omitempty"`
|
||||
}
|
||||
|
||||
type ImageTxtSources struct {
|
||||
Include []string `json:"include,omitempty"`
|
||||
Exclude []string `json:"exclude,omitempty"`
|
||||
}
|
||||
@@ -1,42 +0,0 @@
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
type Charts struct {
|
||||
*metav1.TypeMeta `json:",inline"`
|
||||
metav1.ObjectMeta `json:"metadata,omitempty"`
|
||||
|
||||
Spec ChartSpec `json:"spec,omitempty"`
|
||||
}
|
||||
|
||||
type ChartSpec struct {
|
||||
Charts []Chart `json:"charts,omitempty"`
|
||||
}
|
||||
|
||||
type Chart struct {
|
||||
Name string `json:"name,omitempty"`
|
||||
RepoURL string `json:"repoURL,omitempty"`
|
||||
Version string `json:"version,omitempty"`
|
||||
}
|
||||
|
||||
type ThickCharts struct {
|
||||
*metav1.TypeMeta `json:",inline"`
|
||||
metav1.ObjectMeta `json:"metadata,omitempty"`
|
||||
|
||||
Spec ThickChartSpec `json:"spec,omitempty"`
|
||||
}
|
||||
|
||||
type ThickChartSpec struct {
|
||||
Charts []ThickChart `json:"charts,omitempty"`
|
||||
}
|
||||
|
||||
type ThickChart struct {
|
||||
Chart `json:",inline,omitempty"`
|
||||
ExtraImages []ChartImage `json:"extraImages,omitempty"`
|
||||
}
|
||||
|
||||
type ChartImage struct {
|
||||
Reference string `json:"ref"`
|
||||
}
|
||||
@@ -1,17 +0,0 @@
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
type Driver struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ObjectMeta `json:"metadata,omitempty"`
|
||||
|
||||
Spec DriverSpec `json:"spec"`
|
||||
}
|
||||
|
||||
type DriverSpec struct {
|
||||
Type string `json:"type"`
|
||||
Version string `json:"version"`
|
||||
}
|
||||
@@ -1,25 +0,0 @@
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
type Files struct {
|
||||
*metav1.TypeMeta `json:",inline"`
|
||||
metav1.ObjectMeta `json:"metadata,omitempty"`
|
||||
|
||||
Spec FileSpec `json:"spec,omitempty"`
|
||||
}
|
||||
|
||||
type FileSpec struct {
|
||||
Files []File `json:"files,omitempty"`
|
||||
}
|
||||
|
||||
type File struct {
|
||||
// Path is the path to the file contents, can be a local or remote path
|
||||
Path string `json:"path"`
|
||||
|
||||
// Name is an optional field specifying the name of the file when specified,
|
||||
// it will override any dynamic name discovery from Path
|
||||
Name string `json:"name,omitempty"`
|
||||
}
|
||||
@@ -1,12 +0,0 @@
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
|
||||
"hauler.dev/go/hauler/pkg/consts"
|
||||
)
|
||||
|
||||
var (
|
||||
ContentGroupVersion = schema.GroupVersion{Group: consts.ContentGroup, Version: "v1alpha1"}
|
||||
CollectionGroupVersion = schema.GroupVersion{Group: consts.CollectionGroup, Version: "v1alpha1"}
|
||||
)
|
||||
@@ -1,33 +0,0 @@
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
type Images struct {
|
||||
*metav1.TypeMeta `json:",inline"`
|
||||
metav1.ObjectMeta `json:"metadata,omitempty"`
|
||||
|
||||
Spec ImageSpec `json:"spec,omitempty"`
|
||||
}
|
||||
|
||||
type ImageSpec struct {
|
||||
Images []Image `json:"images,omitempty"`
|
||||
}
|
||||
|
||||
type Image struct {
|
||||
// Name is the full location for the image, can be referenced by tags or digests
|
||||
Name string `json:"name"`
|
||||
|
||||
// Path is the path to the cosign public key used for verifying image signatures
|
||||
//Key string `json:"key,omitempty"`
|
||||
Key string `json:"key"`
|
||||
|
||||
// Path is the path to the cosign public key used for verifying image signatures
|
||||
//Tlog string `json:"use-tlog-verify,omitempty"`
|
||||
Tlog bool `json:"use-tlog-verify"`
|
||||
|
||||
// Platform of the image to be pulled. If not specified, all platforms will be pulled.
|
||||
//Platform string `json:"key,omitempty"`
|
||||
Platform string `json:"platform"`
|
||||
}
|
||||
@@ -1,26 +0,0 @@
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
type ImageTxts struct {
|
||||
*metav1.TypeMeta `json:",inline"`
|
||||
metav1.ObjectMeta `json:"metadata,omitempty"`
|
||||
|
||||
Spec ImageTxtsSpec `json:"spec,omitempty"`
|
||||
}
|
||||
|
||||
type ImageTxtsSpec struct {
|
||||
ImageTxts []ImageTxt `json:"imageTxts,omitempty"`
|
||||
}
|
||||
|
||||
type ImageTxt struct {
|
||||
Ref string `json:"ref,omitempty"`
|
||||
Sources ImageTxtSources `json:"sources,omitempty"`
|
||||
}
|
||||
|
||||
type ImageTxtSources struct {
|
||||
Include []string `json:"include,omitempty"`
|
||||
Exclude []string `json:"exclude,omitempty"`
|
||||
}
|
||||
164
pkg/archives/archives_test.go
Normal file
164
pkg/archives/archives_test.go
Normal file
@@ -0,0 +1,164 @@
|
||||
package archives
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/mholt/archives"
|
||||
"github.com/rs/zerolog"
|
||||
)
|
||||
|
||||
func testContext(t *testing.T) context.Context {
|
||||
t.Helper()
|
||||
l := zerolog.New(io.Discard)
|
||||
return l.WithContext(context.Background())
|
||||
}
|
||||
|
||||
func TestArchive_RoundTrip(t *testing.T) {
|
||||
ctx := testContext(t)
|
||||
|
||||
srcDir := t.TempDir()
|
||||
files := map[string]string{
|
||||
"file1.txt": "hello world",
|
||||
"subdir/file2.txt": "nested content",
|
||||
"subdir/file3.json": `{"key":"value"}`,
|
||||
}
|
||||
for relPath, content := range files {
|
||||
full := filepath.Join(srcDir, relPath)
|
||||
if err := os.MkdirAll(filepath.Dir(full), 0o755); err != nil {
|
||||
t.Fatalf("create parent dir for %s: %v", relPath, err)
|
||||
}
|
||||
if err := os.WriteFile(full, []byte(content), 0o644); err != nil {
|
||||
t.Fatalf("write %s: %v", relPath, err)
|
||||
}
|
||||
}
|
||||
|
||||
outFile := filepath.Join(t.TempDir(), "test.tar.zst")
|
||||
if err := Archive(ctx, srcDir, outFile, archives.Zstd{}, archives.Tar{}); err != nil {
|
||||
t.Fatalf("Archive() error: %v", err)
|
||||
}
|
||||
|
||||
info, err := os.Stat(outFile)
|
||||
if err != nil {
|
||||
t.Fatalf("archive file missing: %v", err)
|
||||
}
|
||||
if info.Size() == 0 {
|
||||
t.Fatal("archive file is empty")
|
||||
}
|
||||
|
||||
dstDir := t.TempDir()
|
||||
if err := Unarchive(ctx, outFile, dstDir); err != nil {
|
||||
t.Fatalf("Unarchive() error: %v", err)
|
||||
}
|
||||
|
||||
// Archive maps files under the source directory's base name.
|
||||
baseName := filepath.Base(srcDir)
|
||||
for relPath, expectedContent := range files {
|
||||
full := filepath.Join(dstDir, baseName, relPath)
|
||||
data, err := os.ReadFile(full)
|
||||
if err != nil {
|
||||
t.Errorf("read extracted file %s: %v", relPath, err)
|
||||
continue
|
||||
}
|
||||
if string(data) != expectedContent {
|
||||
t.Errorf("content mismatch for %s: got %q, want %q", relPath, string(data), expectedContent)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestArchive_NonExistentDir(t *testing.T) {
|
||||
ctx := testContext(t)
|
||||
nonExistent := filepath.Join(t.TempDir(), "does-not-exist")
|
||||
outFile := filepath.Join(t.TempDir(), "out.tar.zst")
|
||||
if err := Archive(ctx, nonExistent, outFile, archives.Zstd{}, archives.Tar{}); err == nil {
|
||||
t.Fatal("Archive() should return an error for a non-existent source directory")
|
||||
}
|
||||
}
|
||||
|
||||
func TestUnarchive_ExistingHaul(t *testing.T) {
|
||||
ctx := testContext(t)
|
||||
|
||||
// testdata/ is two levels up from pkg/archives/
|
||||
haulPath := filepath.Join("..", "..", "testdata", "haul.tar.zst")
|
||||
if _, err := os.Stat(haulPath); err != nil {
|
||||
t.Skipf("testdata/haul.tar.zst not found at %s: %v", haulPath, err)
|
||||
}
|
||||
|
||||
dstDir := t.TempDir()
|
||||
if err := Unarchive(ctx, haulPath, dstDir); err != nil {
|
||||
t.Fatalf("Unarchive() error: %v", err)
|
||||
}
|
||||
|
||||
var indexPath string
|
||||
if err := filepath.Walk(dstDir, func(path string, info os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if info.Name() == "index.json" {
|
||||
indexPath = path
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
t.Fatalf("walk extracted dir: %v", err)
|
||||
}
|
||||
if indexPath == "" {
|
||||
t.Fatal("index.json not found in extracted haul archive")
|
||||
}
|
||||
|
||||
data, err := os.ReadFile(indexPath)
|
||||
if err != nil {
|
||||
t.Fatalf("read index.json: %v", err)
|
||||
}
|
||||
if !json.Valid(data) {
|
||||
t.Fatal("index.json is not valid JSON")
|
||||
}
|
||||
}
|
||||
|
||||
func TestSecurePath(t *testing.T) {
|
||||
basePath := "/tmp/extract"
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
relativePath string
|
||||
wantResult string
|
||||
}{
|
||||
{
|
||||
name: "normal relative path",
|
||||
relativePath: "subdir/file.txt",
|
||||
wantResult: "/tmp/extract/subdir/file.txt",
|
||||
},
|
||||
{
|
||||
name: "simple filename",
|
||||
relativePath: "readme.txt",
|
||||
wantResult: "/tmp/extract/readme.txt",
|
||||
},
|
||||
// Path traversal attempts are sanitized (not rejected): "/../../../etc/passwd"
|
||||
// cleans to "/etc/passwd", strips leading "/" → "etc/passwd", joined → base/etc/passwd.
|
||||
{
|
||||
name: "path traversal is sanitized to safe path",
|
||||
relativePath: "../../../etc/passwd",
|
||||
wantResult: "/tmp/extract/etc/passwd",
|
||||
},
|
||||
{
|
||||
name: "deeply nested traversal is sanitized",
|
||||
relativePath: "a/b/../../../../etc/shadow",
|
||||
wantResult: "/tmp/extract/etc/shadow",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
result, err := securePath(basePath, tt.relativePath)
|
||||
if err != nil {
|
||||
t.Fatalf("securePath(%q, %q) unexpected error: %v", basePath, tt.relativePath, err)
|
||||
}
|
||||
if result != tt.wantResult {
|
||||
t.Errorf("securePath(%q, %q) = %q, want %q", basePath, tt.relativePath, result, tt.wantResult)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
gv1 "github.com/google/go-containerregistry/pkg/v1"
|
||||
"github.com/google/go-containerregistry/pkg/v1/partial"
|
||||
gtypes "github.com/google/go-containerregistry/pkg/v1/types"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
|
||||
"hauler.dev/go/hauler/pkg/artifacts"
|
||||
"hauler.dev/go/hauler/pkg/consts"
|
||||
@@ -90,6 +91,13 @@ func (f *File) compute() error {
|
||||
return err
|
||||
}
|
||||
|
||||
// Manually preserve the Title annotation from the layer
|
||||
// The layer was created with this annotation in getter.LayerFrom
|
||||
if layer.Annotations == nil {
|
||||
layer.Annotations = make(map[string]string)
|
||||
}
|
||||
layer.Annotations[ocispec.AnnotationTitle] = f.client.Name(f.Path)
|
||||
|
||||
cfg := f.client.Config(f.Path)
|
||||
if cfg == nil {
|
||||
cfg = f.client.Config(f.Path)
|
||||
|
||||
@@ -71,10 +71,10 @@ func IsMultiArchImage(name string, opts ...remote.Option) (bool, error) {
|
||||
|
||||
_, err = desc.ImageIndex()
|
||||
if err != nil {
|
||||
// If the descriptor could not be converted to an image index, it's not a multi-arch image
|
||||
// if the descriptor could not be converted to an image index... it's not a multi-arch image
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// If the descriptor could be converted to an image index, it's a multi-arch image
|
||||
// if the descriptor could be converted to an image index... it's a multi-arch image
|
||||
return true, nil
|
||||
}
|
||||
|
||||
@@ -1,107 +0,0 @@
|
||||
package chart
|
||||
|
||||
import (
|
||||
"helm.sh/helm/v3/pkg/action"
|
||||
|
||||
"hauler.dev/go/hauler/pkg/apis/hauler.cattle.io/v1"
|
||||
"hauler.dev/go/hauler/pkg/artifacts"
|
||||
"hauler.dev/go/hauler/pkg/artifacts/image"
|
||||
"hauler.dev/go/hauler/pkg/content/chart"
|
||||
"hauler.dev/go/hauler/pkg/reference"
|
||||
)
|
||||
|
||||
var _ artifacts.OCICollection = (*tchart)(nil)
|
||||
|
||||
// tchart is a thick chart that includes all the dependent images as well as the chart itself
|
||||
type tchart struct {
|
||||
chart *chart.Chart
|
||||
config v1.ThickChart
|
||||
|
||||
computed bool
|
||||
contents map[string]artifacts.OCI
|
||||
}
|
||||
|
||||
func NewThickChart(cfg v1.ThickChart, opts *action.ChartPathOptions) (artifacts.OCICollection, error) {
|
||||
o, err := chart.NewChart(cfg.Chart.Name, opts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &tchart{
|
||||
chart: o,
|
||||
config: cfg,
|
||||
contents: make(map[string]artifacts.OCI),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (c *tchart) Contents() (map[string]artifacts.OCI, error) {
|
||||
if err := c.compute(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return c.contents, nil
|
||||
}
|
||||
|
||||
func (c *tchart) compute() error {
|
||||
if c.computed {
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := c.dependentImages(); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := c.chartContents(); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := c.extraImages(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
c.computed = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *tchart) chartContents() error {
|
||||
ch, err := c.chart.Load()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ref, err := reference.NewTagged(ch.Name(), ch.Metadata.Version)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
c.contents[ref.Name()] = c.chart
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *tchart) dependentImages() error {
|
||||
ch, err := c.chart.Load()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
imgs, err := ImagesInChart(ch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, img := range imgs.Spec.Images {
|
||||
i, err := image.NewImage(img.Name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
c.contents[img.Name] = i
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *tchart) extraImages() error {
|
||||
for _, img := range c.config.ExtraImages {
|
||||
i, err := image.NewImage(img.Reference)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
c.contents[img.Reference] = i
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -1,129 +0,0 @@
|
||||
package chart
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"io"
|
||||
"strings"
|
||||
|
||||
"helm.sh/helm/v3/pkg/action"
|
||||
helmchart "helm.sh/helm/v3/pkg/chart"
|
||||
"helm.sh/helm/v3/pkg/chartutil"
|
||||
"helm.sh/helm/v3/pkg/kube/fake"
|
||||
"helm.sh/helm/v3/pkg/storage"
|
||||
"helm.sh/helm/v3/pkg/storage/driver"
|
||||
"k8s.io/apimachinery/pkg/util/yaml"
|
||||
"k8s.io/client-go/util/jsonpath"
|
||||
|
||||
"hauler.dev/go/hauler/pkg/apis/hauler.cattle.io/v1"
|
||||
)
|
||||
|
||||
var defaultKnownImagePaths = []string{
|
||||
// Deployments & DaemonSets
|
||||
"{.spec.template.spec.initContainers[*].image}",
|
||||
"{.spec.template.spec.containers[*].image}",
|
||||
|
||||
// Pods
|
||||
"{.spec.initContainers[*].image}",
|
||||
"{.spec.containers[*].image}",
|
||||
}
|
||||
|
||||
// ImagesInChart will render a chart and identify all dependent images from it
|
||||
func ImagesInChart(c *helmchart.Chart) (v1.Images, error) {
|
||||
docs, err := template(c)
|
||||
if err != nil {
|
||||
return v1.Images{}, err
|
||||
}
|
||||
|
||||
var images []v1.Image
|
||||
reader := yaml.NewYAMLReader(bufio.NewReader(strings.NewReader(docs)))
|
||||
for {
|
||||
raw, err := reader.Read()
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
return v1.Images{}, err
|
||||
}
|
||||
|
||||
found := find(raw, defaultKnownImagePaths...)
|
||||
for _, f := range found {
|
||||
images = append(images, v1.Image{Name: f})
|
||||
}
|
||||
}
|
||||
|
||||
ims := v1.Images{
|
||||
Spec: v1.ImageSpec{
|
||||
Images: images,
|
||||
},
|
||||
}
|
||||
return ims, nil
|
||||
}
|
||||
|
||||
func template(c *helmchart.Chart) (string, error) {
|
||||
s := storage.Init(driver.NewMemory())
|
||||
|
||||
templateCfg := &action.Configuration{
|
||||
RESTClientGetter: nil,
|
||||
Releases: s,
|
||||
KubeClient: &fake.PrintingKubeClient{Out: io.Discard},
|
||||
Capabilities: chartutil.DefaultCapabilities,
|
||||
Log: func(format string, v ...interface{}) {},
|
||||
}
|
||||
|
||||
// TODO: Do we need values if we're claiming this is best effort image detection?
|
||||
// Justification being: if users are relying on us to get images from their values, they could just add images to the []ImagesInChart spec of the Store api
|
||||
vals := make(map[string]interface{})
|
||||
|
||||
client := action.NewInstall(templateCfg)
|
||||
client.ReleaseName = "dry"
|
||||
client.DryRun = true
|
||||
client.Replace = true
|
||||
client.ClientOnly = true
|
||||
client.IncludeCRDs = true
|
||||
|
||||
release, err := client.Run(c, vals)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return release.Manifest, nil
|
||||
}
|
||||
|
||||
func find(data []byte, paths ...string) []string {
|
||||
var (
|
||||
pathMatches []string
|
||||
obj interface{}
|
||||
)
|
||||
|
||||
if err := yaml.Unmarshal(data, &obj); err != nil {
|
||||
return nil
|
||||
}
|
||||
j := jsonpath.New("")
|
||||
j.AllowMissingKeys(true)
|
||||
|
||||
for _, p := range paths {
|
||||
r, err := parseJSONPath(obj, j, p)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
pathMatches = append(pathMatches, r...)
|
||||
}
|
||||
return pathMatches
|
||||
}
|
||||
|
||||
func parseJSONPath(data interface{}, parser *jsonpath.JSONPath, template string) ([]string, error) {
|
||||
buf := new(bytes.Buffer)
|
||||
if err := parser.Parse(template); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := parser.Execute(buf, data); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
f := func(s rune) bool { return s == ' ' }
|
||||
r := strings.FieldsFunc(buf.String(), f)
|
||||
return r, nil
|
||||
}
|
||||
@@ -1,232 +0,0 @@
|
||||
package imagetxt
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/google/go-containerregistry/pkg/name"
|
||||
|
||||
artifact "hauler.dev/go/hauler/pkg/artifacts"
|
||||
"hauler.dev/go/hauler/pkg/artifacts/image"
|
||||
"hauler.dev/go/hauler/pkg/getter"
|
||||
"hauler.dev/go/hauler/pkg/log"
|
||||
)
|
||||
|
||||
type ImageTxt struct {
|
||||
Ref string
|
||||
IncludeSources map[string]bool
|
||||
ExcludeSources map[string]bool
|
||||
|
||||
lock *sync.Mutex
|
||||
client *getter.Client
|
||||
computed bool
|
||||
contents map[string]artifact.OCI
|
||||
}
|
||||
|
||||
var _ artifact.OCICollection = (*ImageTxt)(nil)
|
||||
|
||||
type Option interface {
|
||||
Apply(*ImageTxt) error
|
||||
}
|
||||
|
||||
type withIncludeSources []string
|
||||
|
||||
func (o withIncludeSources) Apply(it *ImageTxt) error {
|
||||
if it.IncludeSources == nil {
|
||||
it.IncludeSources = make(map[string]bool)
|
||||
}
|
||||
for _, s := range o {
|
||||
it.IncludeSources[s] = true
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func WithIncludeSources(include ...string) Option {
|
||||
return withIncludeSources(include)
|
||||
}
|
||||
|
||||
type withExcludeSources []string
|
||||
|
||||
func (o withExcludeSources) Apply(it *ImageTxt) error {
|
||||
if it.ExcludeSources == nil {
|
||||
it.ExcludeSources = make(map[string]bool)
|
||||
}
|
||||
for _, s := range o {
|
||||
it.ExcludeSources[s] = true
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func WithExcludeSources(exclude ...string) Option {
|
||||
return withExcludeSources(exclude)
|
||||
}
|
||||
|
||||
func New(ref string, opts ...Option) (*ImageTxt, error) {
|
||||
it := &ImageTxt{
|
||||
Ref: ref,
|
||||
|
||||
client: getter.NewClient(getter.ClientOptions{}),
|
||||
lock: &sync.Mutex{},
|
||||
}
|
||||
|
||||
for i, o := range opts {
|
||||
if err := o.Apply(it); err != nil {
|
||||
return nil, fmt.Errorf("invalid option %d: %v", i, err)
|
||||
}
|
||||
}
|
||||
|
||||
return it, nil
|
||||
}
|
||||
|
||||
func (it *ImageTxt) Contents() (map[string]artifact.OCI, error) {
|
||||
it.lock.Lock()
|
||||
defer it.lock.Unlock()
|
||||
if !it.computed {
|
||||
if err := it.compute(); err != nil {
|
||||
return nil, fmt.Errorf("compute OCI layout: %v", err)
|
||||
}
|
||||
it.computed = true
|
||||
}
|
||||
return it.contents, nil
|
||||
}
|
||||
|
||||
func (it *ImageTxt) compute() error {
|
||||
// TODO - pass in logger from context
|
||||
l := log.NewLogger(os.Stdout)
|
||||
|
||||
it.contents = make(map[string]artifact.OCI)
|
||||
|
||||
ctx := context.TODO()
|
||||
|
||||
rc, err := it.client.ContentFrom(ctx, it.Ref)
|
||||
if err != nil {
|
||||
return fmt.Errorf("fetch image.txt ref %s: %w", it.Ref, err)
|
||||
}
|
||||
defer rc.Close()
|
||||
|
||||
entries, err := splitImagesTxt(rc)
|
||||
if err != nil {
|
||||
return fmt.Errorf("parse image.txt ref %s: %v", it.Ref, err)
|
||||
}
|
||||
|
||||
foundSources := make(map[string]bool)
|
||||
for _, e := range entries {
|
||||
for s := range e.Sources {
|
||||
foundSources[s] = true
|
||||
}
|
||||
}
|
||||
|
||||
var pullAll bool
|
||||
targetSources := make(map[string]bool)
|
||||
|
||||
if len(foundSources) == 0 || (len(it.IncludeSources) == 0 && len(it.ExcludeSources) == 0) {
|
||||
// pull all found images
|
||||
pullAll = true
|
||||
|
||||
if len(foundSources) == 0 {
|
||||
l.Infof("image txt file appears to have no sources; pulling all found images")
|
||||
if len(it.IncludeSources) != 0 || len(it.ExcludeSources) != 0 {
|
||||
l.Warnf("ImageTxt provided include or exclude sources; ignoring")
|
||||
}
|
||||
} else if len(it.IncludeSources) == 0 && len(it.ExcludeSources) == 0 {
|
||||
l.Infof("image-sources txt file not filtered; pulling all found images")
|
||||
}
|
||||
} else {
|
||||
// determine sources to pull
|
||||
if len(it.IncludeSources) != 0 && len(it.ExcludeSources) != 0 {
|
||||
l.Warnf("ImageTxt provided include and exclude sources; using only include sources")
|
||||
}
|
||||
|
||||
if len(it.IncludeSources) != 0 {
|
||||
targetSources = it.IncludeSources
|
||||
} else {
|
||||
for s := range foundSources {
|
||||
targetSources[s] = true
|
||||
}
|
||||
for s := range it.ExcludeSources {
|
||||
delete(targetSources, s)
|
||||
}
|
||||
}
|
||||
var targetSourcesArr []string
|
||||
for s := range targetSources {
|
||||
targetSourcesArr = append(targetSourcesArr, s)
|
||||
}
|
||||
l.Infof("pulling images covering sources %s", strings.Join(targetSourcesArr, ", "))
|
||||
}
|
||||
|
||||
for _, e := range entries {
|
||||
var matchesSourceFilter bool
|
||||
if pullAll {
|
||||
l.Infof("pulling image %s", e.Reference)
|
||||
} else {
|
||||
for s := range e.Sources {
|
||||
if targetSources[s] {
|
||||
matchesSourceFilter = true
|
||||
l.Infof("pulling image %s (matched source %s)", e.Reference, s)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if pullAll || matchesSourceFilter {
|
||||
curImage, err := image.NewImage(e.Reference.String())
|
||||
if err != nil {
|
||||
return fmt.Errorf("pull image %s: %v", e.Reference, err)
|
||||
}
|
||||
it.contents[e.Reference.String()] = curImage
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type imageTxtEntry struct {
|
||||
Reference name.Reference
|
||||
Sources map[string]bool
|
||||
}
|
||||
|
||||
func splitImagesTxt(r io.Reader) ([]imageTxtEntry, error) {
|
||||
var entries []imageTxtEntry
|
||||
scanner := bufio.NewScanner(r)
|
||||
for scanner.Scan() {
|
||||
curEntry := imageTxtEntry{
|
||||
Sources: make(map[string]bool),
|
||||
}
|
||||
|
||||
lineContent := scanner.Text()
|
||||
if lineContent == "" || strings.HasPrefix(lineContent, "#") {
|
||||
// skip past empty and commented lines
|
||||
continue
|
||||
}
|
||||
splitContent := strings.Split(lineContent, " ")
|
||||
if len(splitContent) > 2 {
|
||||
return nil, fmt.Errorf(
|
||||
"invalid image.txt format: must contain only an image reference and sources separated by space; invalid line: %q",
|
||||
lineContent)
|
||||
}
|
||||
|
||||
curRef, err := name.ParseReference(splitContent[0])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid reference %s: %v", splitContent[0], err)
|
||||
}
|
||||
curEntry.Reference = curRef
|
||||
|
||||
if len(splitContent) == 2 {
|
||||
for _, source := range strings.Split(splitContent[1], ",") {
|
||||
curEntry.Sources[source] = true
|
||||
}
|
||||
}
|
||||
|
||||
entries = append(entries, curEntry)
|
||||
}
|
||||
if err := scanner.Err(); err != nil {
|
||||
return nil, fmt.Errorf("scan contents: %v", err)
|
||||
}
|
||||
|
||||
return entries, nil
|
||||
}
|
||||
@@ -1,209 +0,0 @@
|
||||
package imagetxt
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"hauler.dev/go/hauler/pkg/artifacts"
|
||||
"hauler.dev/go/hauler/pkg/artifacts/image"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrRefNotFound = errors.New("ref not found")
|
||||
ErrRefNotImage = errors.New("ref is not image")
|
||||
ErrExtraRefsFound = errors.New("extra refs found in contents")
|
||||
)
|
||||
|
||||
var (
|
||||
testServer *httptest.Server
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
setup()
|
||||
code := m.Run()
|
||||
teardown()
|
||||
os.Exit(code)
|
||||
}
|
||||
|
||||
func setup() {
|
||||
dir := http.Dir("./testdata/http/")
|
||||
h := http.FileServer(dir)
|
||||
testServer = httptest.NewServer(h)
|
||||
}
|
||||
|
||||
func teardown() {
|
||||
if testServer != nil {
|
||||
testServer.Close()
|
||||
}
|
||||
}
|
||||
|
||||
type failKind string
|
||||
|
||||
const (
|
||||
failKindNew = failKind("New")
|
||||
failKindContents = failKind("Contents")
|
||||
)
|
||||
|
||||
func checkError(checkedFailKind failKind) func(*testing.T, error, bool, failKind) {
|
||||
return func(cet *testing.T, err error, testShouldFail bool, testFailKind failKind) {
|
||||
if err != nil {
|
||||
// if error should not have happened at all OR error should have happened
|
||||
// at a different point, test failed
|
||||
if !testShouldFail || testFailKind != checkedFailKind {
|
||||
cet.Fatalf("unexpected error at %s: %v", checkedFailKind, err)
|
||||
}
|
||||
// test should fail at this point, test passed
|
||||
return
|
||||
}
|
||||
// if no error occurred but error should have happened at this point, test
|
||||
// failed
|
||||
if testShouldFail && testFailKind == checkedFailKind {
|
||||
cet.Fatalf("unexpected nil error at %s", checkedFailKind)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestImageTxtCollection(t *testing.T) {
|
||||
type testEntry struct {
|
||||
Name string
|
||||
Ref string
|
||||
IncludeSources []string
|
||||
ExcludeSources []string
|
||||
ExpectedImages []string
|
||||
ShouldFail bool
|
||||
FailKind failKind
|
||||
}
|
||||
tt := []testEntry{
|
||||
{
|
||||
Name: "http ref basic",
|
||||
Ref: fmt.Sprintf("%s/images-http.txt", testServer.URL),
|
||||
ExpectedImages: []string{
|
||||
"busybox",
|
||||
"nginx:1.19",
|
||||
"rancher/hyperkube:v1.21.7-rancher1",
|
||||
"docker.io/rancher/klipper-lb:v0.3.4",
|
||||
"quay.io/jetstack/cert-manager-controller:v1.6.1",
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "http ref sources format pull all",
|
||||
Ref: fmt.Sprintf("%s/images-src-http.txt", testServer.URL),
|
||||
ExpectedImages: []string{
|
||||
"busybox",
|
||||
"nginx:1.19",
|
||||
"rancher/hyperkube:v1.21.7-rancher1",
|
||||
"docker.io/rancher/klipper-lb:v0.3.4",
|
||||
"quay.io/jetstack/cert-manager-controller:v1.6.1",
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "http ref sources format include sources A",
|
||||
Ref: fmt.Sprintf("%s/images-src-http.txt", testServer.URL),
|
||||
IncludeSources: []string{
|
||||
"core", "rke",
|
||||
},
|
||||
ExpectedImages: []string{
|
||||
"busybox",
|
||||
"nginx:1.19",
|
||||
"rancher/hyperkube:v1.21.7-rancher1",
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "http ref sources format include sources B",
|
||||
Ref: fmt.Sprintf("%s/images-src-http.txt", testServer.URL),
|
||||
IncludeSources: []string{
|
||||
"nginx", "rancher", "cert-manager",
|
||||
},
|
||||
ExpectedImages: []string{
|
||||
"nginx:1.19",
|
||||
"rancher/hyperkube:v1.21.7-rancher1",
|
||||
"docker.io/rancher/klipper-lb:v0.3.4",
|
||||
"quay.io/jetstack/cert-manager-controller:v1.6.1",
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "http ref sources format exclude sources A",
|
||||
Ref: fmt.Sprintf("%s/images-src-http.txt", testServer.URL),
|
||||
ExcludeSources: []string{
|
||||
"cert-manager",
|
||||
},
|
||||
ExpectedImages: []string{
|
||||
"busybox",
|
||||
"nginx:1.19",
|
||||
"rancher/hyperkube:v1.21.7-rancher1",
|
||||
"docker.io/rancher/klipper-lb:v0.3.4",
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "http ref sources format exclude sources B",
|
||||
Ref: fmt.Sprintf("%s/images-src-http.txt", testServer.URL),
|
||||
ExcludeSources: []string{
|
||||
"core",
|
||||
},
|
||||
ExpectedImages: []string{
|
||||
"nginx:1.19",
|
||||
"rancher/hyperkube:v1.21.7-rancher1",
|
||||
"docker.io/rancher/klipper-lb:v0.3.4",
|
||||
"quay.io/jetstack/cert-manager-controller:v1.6.1",
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "local file ref",
|
||||
Ref: "./testdata/images-file.txt",
|
||||
ExpectedImages: []string{
|
||||
"busybox",
|
||||
"nginx:1.19",
|
||||
"rancher/hyperkube:v1.21.7-rancher1",
|
||||
"docker.io/rancher/klipper-lb:v0.3.4",
|
||||
"quay.io/jetstack/cert-manager-controller:v1.6.1",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
checkErrorNew := checkError(failKindNew)
|
||||
checkErrorContents := checkError(failKindContents)
|
||||
|
||||
for _, curTest := range tt {
|
||||
t.Run(curTest.Name, func(innerT *testing.T) {
|
||||
curImageTxt, err := New(curTest.Ref,
|
||||
WithIncludeSources(curTest.IncludeSources...),
|
||||
WithExcludeSources(curTest.ExcludeSources...),
|
||||
)
|
||||
checkErrorNew(innerT, err, curTest.ShouldFail, curTest.FailKind)
|
||||
|
||||
ociContents, err := curImageTxt.Contents()
|
||||
checkErrorContents(innerT, err, curTest.ShouldFail, curTest.FailKind)
|
||||
|
||||
if err := checkImages(ociContents, curTest.ExpectedImages); err != nil {
|
||||
innerT.Fatal(err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func checkImages(content map[string]artifacts.OCI, refs []string) error {
|
||||
contentCopy := make(map[string]artifacts.OCI, len(content))
|
||||
for k, v := range content {
|
||||
contentCopy[k] = v
|
||||
}
|
||||
for _, ref := range refs {
|
||||
target, ok := content[ref]
|
||||
if !ok {
|
||||
return fmt.Errorf("ref %s: %w", ref, ErrRefNotFound)
|
||||
}
|
||||
if _, ok := target.(*image.Image); !ok {
|
||||
return fmt.Errorf("got underlying type %T: %w", target, ErrRefNotImage)
|
||||
}
|
||||
delete(contentCopy, ref)
|
||||
}
|
||||
|
||||
if len(contentCopy) != 0 {
|
||||
return ErrExtraRefsFound
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -1,5 +0,0 @@
|
||||
busybox
|
||||
nginx:1.19
|
||||
rancher/hyperkube:v1.21.7-rancher1
|
||||
docker.io/rancher/klipper-lb:v0.3.4
|
||||
quay.io/jetstack/cert-manager-controller:v1.6.1
|
||||
@@ -1,5 +0,0 @@
|
||||
busybox core
|
||||
nginx:1.19 core,nginx
|
||||
rancher/hyperkube:v1.21.7-rancher1 rancher,rke
|
||||
docker.io/rancher/klipper-lb:v0.3.4 rancher,k3s
|
||||
quay.io/jetstack/cert-manager-controller:v1.6.1 cert-manager
|
||||
@@ -1,5 +0,0 @@
|
||||
busybox
|
||||
nginx:1.19
|
||||
rancher/hyperkube:v1.21.7-rancher1
|
||||
docker.io/rancher/klipper-lb:v0.3.4
|
||||
quay.io/jetstack/cert-manager-controller:v1.6.1
|
||||
@@ -42,21 +42,42 @@ const (
|
||||
HaulerVendorPrefix = "vnd.hauler"
|
||||
|
||||
// annotation keys
|
||||
KindAnnotationName = "kind"
|
||||
KindAnnotationImage = "dev.cosignproject.cosign/image"
|
||||
KindAnnotationIndex = "dev.cosignproject.cosign/imageIndex"
|
||||
ContainerdImageNameKey = "io.containerd.image.name"
|
||||
KindAnnotationName = "kind"
|
||||
KindAnnotationImage = "dev.cosignproject.cosign/image"
|
||||
KindAnnotationIndex = "dev.cosignproject.cosign/imageIndex"
|
||||
KindAnnotationSigs = "dev.cosignproject.cosign/sigs"
|
||||
KindAnnotationAtts = "dev.cosignproject.cosign/atts"
|
||||
KindAnnotationSboms = "dev.cosignproject.cosign/sboms"
|
||||
// KindAnnotationReferrers is the kind prefix for OCI 1.1 referrer manifests (cosign v3
|
||||
// new-bundle-format). Each referrer gets a unique kind with the referrer manifest digest
|
||||
// appended (e.g. "dev.cosignproject.cosign/referrers/sha256hex") so multiple referrers
|
||||
// for the same base image coexist in the OCI index.
|
||||
KindAnnotationReferrers = "dev.cosignproject.cosign/referrers"
|
||||
|
||||
// Sigstore / OCI 1.1 artifact media types used by cosign v3 new-bundle-format.
|
||||
SigstoreBundleMediaType = "application/vnd.dev.sigstore.bundle.v0.3+json"
|
||||
OCIEmptyConfigMediaType = "application/vnd.oci.empty.v1+json"
|
||||
|
||||
ImageAnnotationKey = "hauler.dev/key"
|
||||
ImageAnnotationPlatform = "hauler.dev/platform"
|
||||
ImageAnnotationRegistry = "hauler.dev/registry"
|
||||
ImageAnnotationTlog = "hauler.dev/use-tlog-verify"
|
||||
ImageAnnotationRewrite = "hauler.dev/rewrite"
|
||||
ImageRefKey = "org.opencontainers.image.ref.name"
|
||||
|
||||
// cosign keyless validation options
|
||||
ImageAnnotationCertIdentity = "hauler.dev/certificate-identity"
|
||||
ImageAnnotationCertIdentityRegexp = "hauler.dev/certificate-identity-regexp"
|
||||
ImageAnnotationCertOidcIssuer = "hauler.dev/certificate-oidc-issuer"
|
||||
ImageAnnotationCertOidcIssuerRegexp = "hauler.dev/certificate-oidc-issuer-regexp"
|
||||
ImageAnnotationCertGithubWorkflowRepository = "hauler.dev/certificate-github-workflow-repository"
|
||||
|
||||
// content kinds
|
||||
ImagesContentKind = "Images"
|
||||
ChartsContentKind = "Charts"
|
||||
FilesContentKind = "Files"
|
||||
DriverContentKind = "Driver"
|
||||
ImageTxtsContentKind = "ImageTxts"
|
||||
ChartsCollectionKind = "ThickCharts"
|
||||
ImagesContentKind = "Images"
|
||||
ChartsContentKind = "Charts"
|
||||
FilesContentKind = "Files"
|
||||
// DriverContentKind = "Driver"
|
||||
|
||||
// content groups
|
||||
ContentGroup = "content.hauler.cattle.io"
|
||||
|
||||
@@ -33,14 +33,13 @@ var (
|
||||
settings = cli.New()
|
||||
)
|
||||
|
||||
// Chart implements the OCI interface for Chart API objects. API spec values are
|
||||
// stored into the Repo, Name, and Version fields.
|
||||
// chart implements the oci interface for chart api objects... api spec values are stored into the name, repo, and version fields
|
||||
type Chart struct {
|
||||
path string
|
||||
annotations map[string]string
|
||||
}
|
||||
|
||||
// NewChart is a helper method that returns NewLocalChart or NewRemoteChart depending on chart contents
|
||||
// newchart is a helper method that returns newlocalchart or newremotechart depending on chart contents
|
||||
func NewChart(name string, opts *action.ChartPathOptions) (*Chart, error) {
|
||||
chartRef := name
|
||||
actionConfig := new(action.Configuration)
|
||||
@@ -60,13 +59,31 @@ func NewChart(name string, opts *action.ChartPathOptions) (*Chart, error) {
|
||||
client.SetRegistryClient(registryClient)
|
||||
if registry.IsOCI(opts.RepoURL) {
|
||||
chartRef = opts.RepoURL + "/" + name
|
||||
} else if isUrl(opts.RepoURL) { // OCI Protocol registers as a valid URL
|
||||
} else if isUrl(opts.RepoURL) { // oci protocol registers as a valid url
|
||||
client.ChartPathOptions.RepoURL = opts.RepoURL
|
||||
} else { // Handles cases like grafana/loki
|
||||
} else { // handles cases like grafana and loki
|
||||
chartRef = opts.RepoURL + "/" + name
|
||||
}
|
||||
|
||||
// suppress helm downloader oci logs (stdout/stderr)
|
||||
oldStdout := os.Stdout
|
||||
oldStderr := os.Stderr
|
||||
rOut, wOut, _ := os.Pipe()
|
||||
rErr, wErr, _ := os.Pipe()
|
||||
os.Stdout = wOut
|
||||
os.Stderr = wErr
|
||||
|
||||
chartPath, err := client.ChartPathOptions.LocateChart(chartRef, settings)
|
||||
|
||||
wOut.Close()
|
||||
wErr.Close()
|
||||
os.Stdout = oldStdout
|
||||
os.Stderr = oldStderr
|
||||
_, _ = io.Copy(io.Discard, rOut)
|
||||
_, _ = io.Copy(io.Discard, rErr)
|
||||
rOut.Close()
|
||||
rErr.Close()
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -151,9 +168,8 @@ func (h *Chart) RawChartData() ([]byte, error) {
|
||||
return os.ReadFile(h.path)
|
||||
}
|
||||
|
||||
// chartData loads the chart contents into memory and returns a NopCloser for the contents
|
||||
//
|
||||
// Normally we avoid loading into memory, but charts sizes are strictly capped at ~1MB
|
||||
// chartdata loads the chart contents into memory and returns a NopCloser for the contents
|
||||
// normally we avoid loading into memory, but charts sizes are strictly capped at ~1MB
|
||||
func (h *Chart) chartData() (gv1.Layer, error) {
|
||||
info, err := os.Stat(h.path)
|
||||
if err != nil {
|
||||
@@ -256,14 +272,14 @@ func newDefaultRegistryClient(plainHTTP bool) (*registry.Client, error) {
|
||||
opts := []registry.ClientOption{
|
||||
registry.ClientOptDebug(settings.Debug),
|
||||
registry.ClientOptEnableCache(true),
|
||||
registry.ClientOptWriter(os.Stderr),
|
||||
registry.ClientOptWriter(io.Discard),
|
||||
registry.ClientOptCredentialsFile(settings.RegistryConfig),
|
||||
}
|
||||
if plainHTTP {
|
||||
opts = append(opts, registry.ClientOptPlainHTTP())
|
||||
}
|
||||
|
||||
// Create a new registry client
|
||||
// create a new registry client
|
||||
registryClient, err := registry.NewClient(opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -272,12 +288,21 @@ func newDefaultRegistryClient(plainHTTP bool) (*registry.Client, error) {
|
||||
}
|
||||
|
||||
func newRegistryClientWithTLS(certFile, keyFile, caFile string, insecureSkipTLSverify bool) (*registry.Client, error) {
|
||||
// Create a new registry client
|
||||
registryClient, err := registry.NewRegistryClientWithTLS(os.Stderr, certFile, keyFile, caFile, insecureSkipTLSverify,
|
||||
settings.RegistryConfig, settings.Debug,
|
||||
// create a new registry client
|
||||
registryClient, err := registry.NewRegistryClientWithTLS(
|
||||
io.Discard,
|
||||
certFile, keyFile, caFile,
|
||||
insecureSkipTLSverify,
|
||||
settings.RegistryConfig,
|
||||
settings.Debug,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return registryClient, nil
|
||||
}
|
||||
|
||||
// path returns the local filesystem path to the chart archive or directory
|
||||
func (h *Chart) Path() string {
|
||||
return h.path
|
||||
}
|
||||
|
||||
@@ -8,7 +8,6 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/yaml"
|
||||
|
||||
v1 "hauler.dev/go/hauler/pkg/apis/hauler.cattle.io/v1"
|
||||
v1alpha1 "hauler.dev/go/hauler/pkg/apis/hauler.cattle.io/v1alpha1"
|
||||
)
|
||||
|
||||
func Load(data []byte) (schema.ObjectKind, error) {
|
||||
@@ -26,12 +25,10 @@ func Load(data []byte) (schema.ObjectKind, error) {
|
||||
}
|
||||
|
||||
gv := tm.GroupVersionKind().GroupVersion()
|
||||
// allow v1 and v1alpha1 content/collection
|
||||
// allow v1 content and collections
|
||||
if gv != v1.ContentGroupVersion &&
|
||||
gv != v1.CollectionGroupVersion &&
|
||||
gv != v1alpha1.ContentGroupVersion &&
|
||||
gv != v1alpha1.CollectionGroupVersion {
|
||||
return nil, fmt.Errorf("unrecognized content/collection [%s] with [kind=%s]", tm.APIVersion, tm.Kind)
|
||||
gv != v1.CollectionGroupVersion {
|
||||
return nil, fmt.Errorf("unrecognized content or collection [%s] with [kind=%s]", tm.APIVersion, tm.Kind)
|
||||
}
|
||||
|
||||
return &tm, nil
|
||||
|
||||
@@ -17,14 +17,12 @@ import (
|
||||
"github.com/containerd/containerd/remotes"
|
||||
"github.com/opencontainers/image-spec/specs-go"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"oras.land/oras-go/pkg/content"
|
||||
"oras.land/oras-go/pkg/target"
|
||||
|
||||
"hauler.dev/go/hauler/pkg/consts"
|
||||
"hauler.dev/go/hauler/pkg/reference"
|
||||
)
|
||||
|
||||
var _ target.Target = (*OCI)(nil)
|
||||
var _ Target = (*OCI)(nil)
|
||||
|
||||
type OCI struct {
|
||||
root string
|
||||
@@ -76,6 +74,7 @@ func (o *OCI) LoadIndex() error {
|
||||
Versioned: specs.Versioned{
|
||||
SchemaVersion: 2,
|
||||
},
|
||||
MediaType: ocispec.MediaTypeImageIndex,
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -88,15 +87,22 @@ func (o *OCI) LoadIndex() error {
|
||||
for _, desc := range o.index.Manifests {
|
||||
key, err := reference.Parse(desc.Annotations[ocispec.AnnotationRefName])
|
||||
if err != nil {
|
||||
return err
|
||||
// skip malformed entries rather than making the entire store unreadable
|
||||
continue
|
||||
}
|
||||
|
||||
// Set default kind if missing
|
||||
kind := desc.Annotations[consts.KindAnnotationName]
|
||||
if kind == "" {
|
||||
kind = consts.KindAnnotationImage
|
||||
}
|
||||
|
||||
if strings.TrimSpace(key.String()) != "--" {
|
||||
switch key.(type) {
|
||||
case name.Digest:
|
||||
o.nameMap.Store(fmt.Sprintf("%s-%s", key.Context().String(), desc.Annotations[consts.KindAnnotationName]), desc)
|
||||
o.nameMap.Store(fmt.Sprintf("%s-%s", key.Context().String(), kind), desc)
|
||||
case name.Tag:
|
||||
o.nameMap.Store(fmt.Sprintf("%s-%s", key.String(), desc.Annotations[consts.KindAnnotationName]), desc)
|
||||
o.nameMap.Store(fmt.Sprintf("%s-%s", key.String(), kind), desc)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -152,16 +158,16 @@ func (o *OCI) SaveIndex() error {
|
||||
// While the name may differ from ref, it should itself be a valid ref.
|
||||
//
|
||||
// If the resolution fails, an error will be returned.
|
||||
func (o *OCI) Resolve(ctx context.Context, ref string) (name string, desc ocispec.Descriptor, err error) {
|
||||
func (o *OCI) Resolve(ctx context.Context, ref string) (ocispec.Descriptor, error) {
|
||||
if err := o.LoadIndex(); err != nil {
|
||||
return "", ocispec.Descriptor{}, err
|
||||
return ocispec.Descriptor{}, err
|
||||
}
|
||||
d, ok := o.nameMap.Load(ref)
|
||||
if !ok {
|
||||
return "", ocispec.Descriptor{}, err
|
||||
return ocispec.Descriptor{}, fmt.Errorf("reference %s not found", ref)
|
||||
}
|
||||
desc = d.(ocispec.Descriptor)
|
||||
return ref, desc, nil
|
||||
desc := d.(ocispec.Descriptor)
|
||||
return desc, nil
|
||||
}
|
||||
|
||||
// Fetcher returns a new fetcher for the provided reference.
|
||||
@@ -229,7 +235,7 @@ func (o *OCI) Walk(fn func(reference string, desc ocispec.Descriptor) error) err
|
||||
return true
|
||||
})
|
||||
if errst != nil {
|
||||
return fmt.Errorf(strings.Join(errst, "; "))
|
||||
return fmt.Errorf("%s", strings.Join(errst, "; "))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -271,6 +277,12 @@ func (o *OCI) path(elem ...string) string {
|
||||
return filepath.Join(append(complete, elem...)...)
|
||||
}
|
||||
|
||||
// IndexExists reports whether the store's OCI layout index.json exists on disk.
|
||||
func (o *OCI) IndexExists() bool {
|
||||
_, err := os.Stat(o.path(ocispec.ImageIndexFile))
|
||||
return err == nil
|
||||
}
|
||||
|
||||
type ociPusher struct {
|
||||
oci *OCI
|
||||
ref string
|
||||
@@ -287,7 +299,13 @@ func (p *ociPusher) Push(ctx context.Context, d ocispec.Descriptor) (ccontent.Wr
|
||||
if err := p.oci.LoadIndex(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
p.oci.nameMap.Store(p.ref, d)
|
||||
// Use compound key format: "reference-kind"
|
||||
kind := d.Annotations[consts.KindAnnotationName]
|
||||
if kind == "" {
|
||||
kind = consts.KindAnnotationImage
|
||||
}
|
||||
key := fmt.Sprintf("%s-%s", p.ref, kind)
|
||||
p.oci.nameMap.Store(key, d)
|
||||
if err := p.oci.SaveIndex(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -301,7 +319,7 @@ func (p *ociPusher) Push(ctx context.Context, d ocispec.Descriptor) (ccontent.Wr
|
||||
|
||||
if _, err := os.Stat(blobPath); err == nil {
|
||||
// file already exists, discard (but validate digest)
|
||||
return content.NewIoContentWriter(io.Discard, content.WithOutputHash(d.Digest)), nil
|
||||
return NewIoContentWriter(nopCloser{io.Discard}, WithOutputHash(d.Digest.String())), nil
|
||||
}
|
||||
|
||||
f, err := os.Create(blobPath)
|
||||
@@ -309,6 +327,25 @@ func (p *ociPusher) Push(ctx context.Context, d ocispec.Descriptor) (ccontent.Wr
|
||||
return nil, err
|
||||
}
|
||||
|
||||
w := content.NewIoContentWriter(f, content.WithInputHash(d.Digest), content.WithOutputHash(d.Digest))
|
||||
w := NewIoContentWriter(f, WithOutputHash(d.Digest.String()))
|
||||
return w, nil
|
||||
}
|
||||
|
||||
func (o *OCI) RemoveFromIndex(ref string) {
|
||||
o.nameMap.Delete(ref)
|
||||
}
|
||||
|
||||
// ResolvePath returns the absolute path for a given relative path within the OCI root
|
||||
func (o *OCI) ResolvePath(elem string) string {
|
||||
if elem == "" {
|
||||
return o.root
|
||||
}
|
||||
return filepath.Join(o.root, elem)
|
||||
}
|
||||
|
||||
// nopCloser wraps an io.Writer to implement io.WriteCloser
|
||||
type nopCloser struct {
|
||||
io.Writer
|
||||
}
|
||||
|
||||
func (nopCloser) Close() error { return nil }
|
||||
|
||||
102
pkg/content/registry.go
Normal file
102
pkg/content/registry.go
Normal file
@@ -0,0 +1,102 @@
|
||||
package content
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"github.com/containerd/containerd/remotes"
|
||||
cdocker "github.com/containerd/containerd/remotes/docker"
|
||||
goauthn "github.com/google/go-containerregistry/pkg/authn"
|
||||
goname "github.com/google/go-containerregistry/pkg/name"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
)
|
||||
|
||||
var _ Target = (*RegistryTarget)(nil)
|
||||
|
||||
// RegistryTarget implements Target for pushing to a remote OCI registry.
|
||||
// Authentication is sourced from the local Docker credential store via go-containerregistry's
|
||||
// default keychain unless explicit credentials are provided in RegistryOptions.
|
||||
type RegistryTarget struct {
|
||||
resolver remotes.Resolver
|
||||
}
|
||||
|
||||
// NewRegistryTarget returns a RegistryTarget that pushes to host (e.g. "localhost:5000").
|
||||
func NewRegistryTarget(host string, opts RegistryOptions) *RegistryTarget {
|
||||
authorizer := cdocker.NewDockerAuthorizer(
|
||||
cdocker.WithAuthCreds(func(h string) (string, string, error) {
|
||||
if opts.Username != "" {
|
||||
return opts.Username, opts.Password, nil
|
||||
}
|
||||
// Bridge to go-containerregistry's keychain for credential lookup.
|
||||
reg, err := goname.NewRegistry(h, goname.Insecure)
|
||||
if err != nil {
|
||||
return "", "", nil
|
||||
}
|
||||
a, err := goauthn.DefaultKeychain.Resolve(reg)
|
||||
if err != nil || a == goauthn.Anonymous {
|
||||
return "", "", nil
|
||||
}
|
||||
cfg, err := a.Authorization()
|
||||
if err != nil {
|
||||
return "", "", nil
|
||||
}
|
||||
return cfg.Username, cfg.Password, nil
|
||||
}),
|
||||
)
|
||||
|
||||
hosts := func(h string) ([]cdocker.RegistryHost, error) {
|
||||
scheme := "https"
|
||||
if opts.PlainHTTP || opts.Insecure {
|
||||
scheme = "http"
|
||||
}
|
||||
return []cdocker.RegistryHost{{
|
||||
Client: http.DefaultClient,
|
||||
Authorizer: authorizer,
|
||||
Scheme: scheme,
|
||||
Host: h,
|
||||
Path: "/v2",
|
||||
Capabilities: cdocker.HostCapabilityPull | cdocker.HostCapabilityResolve | cdocker.HostCapabilityPush,
|
||||
}}, nil
|
||||
}
|
||||
|
||||
return &RegistryTarget{
|
||||
resolver: cdocker.NewResolver(cdocker.ResolverOptions{
|
||||
Hosts: hosts,
|
||||
}),
|
||||
}
|
||||
}
|
||||
|
||||
func (t *RegistryTarget) Resolve(ctx context.Context, ref string) (ocispec.Descriptor, error) {
|
||||
_, desc, err := t.resolver.Resolve(ctx, ref)
|
||||
return desc, err
|
||||
}
|
||||
|
||||
func (t *RegistryTarget) Fetcher(ctx context.Context, ref string) (remotes.Fetcher, error) {
|
||||
return t.resolver.Fetcher(ctx, ref)
|
||||
}
|
||||
|
||||
func (t *RegistryTarget) Pusher(ctx context.Context, ref string) (remotes.Pusher, error) {
|
||||
return t.resolver.Pusher(ctx, ref)
|
||||
}
|
||||
|
||||
// RewriteRefToRegistry rewrites sourceRef to use targetRegistry as its host, preserving the
|
||||
// repository path and tag or digest. For example:
|
||||
//
|
||||
// "index.docker.io/library/nginx:latest" + "localhost:5000" → "localhost:5000/library/nginx:latest"
|
||||
func RewriteRefToRegistry(sourceRef string, targetRegistry string) (string, error) {
|
||||
ref, err := goname.ParseReference(sourceRef)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("parsing reference %q: %w", sourceRef, err)
|
||||
}
|
||||
repo := strings.TrimPrefix(ref.Context().RepositoryStr(), "/")
|
||||
switch r := ref.(type) {
|
||||
case goname.Tag:
|
||||
return fmt.Sprintf("%s/%s:%s", targetRegistry, repo, r.TagStr()), nil
|
||||
case goname.Digest:
|
||||
return fmt.Sprintf("%s/%s@%s", targetRegistry, repo, r.DigestStr()), nil
|
||||
default:
|
||||
return fmt.Sprintf("%s/%s:latest", targetRegistry, repo), nil
|
||||
}
|
||||
}
|
||||
106
pkg/content/types.go
Normal file
106
pkg/content/types.go
Normal file
@@ -0,0 +1,106 @@
|
||||
package content
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
ccontent "github.com/containerd/containerd/content"
|
||||
"github.com/containerd/containerd/remotes"
|
||||
"github.com/opencontainers/go-digest"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
)
|
||||
|
||||
// Target represents a content storage target with resolver, fetcher, and pusher capabilities
|
||||
type Target interface {
|
||||
Resolve(ctx context.Context, ref string) (ocispec.Descriptor, error)
|
||||
Fetcher(ctx context.Context, ref string) (remotes.Fetcher, error)
|
||||
Pusher(ctx context.Context, ref string) (remotes.Pusher, error)
|
||||
}
|
||||
|
||||
// RegistryOptions holds registry configuration
|
||||
type RegistryOptions struct {
|
||||
PlainHTTP bool
|
||||
Insecure bool
|
||||
Username string
|
||||
Password string
|
||||
}
|
||||
|
||||
// ResolveName extracts the reference name from a descriptor's annotations
|
||||
func ResolveName(desc ocispec.Descriptor) (string, bool) {
|
||||
name, ok := desc.Annotations[ocispec.AnnotationRefName]
|
||||
return name, ok
|
||||
}
|
||||
|
||||
// IoContentWriter wraps an io.Writer to implement containerd's content.Writer
|
||||
type IoContentWriter struct {
|
||||
writer io.WriteCloser
|
||||
digester digest.Digester
|
||||
status ccontent.Status
|
||||
outputHash string
|
||||
}
|
||||
|
||||
// Write writes data to the underlying writer and updates the digest
|
||||
func (w *IoContentWriter) Write(p []byte) (n int, err error) {
|
||||
n, err = w.writer.Write(p)
|
||||
if n > 0 {
|
||||
w.digester.Hash().Write(p[:n])
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
// Close closes the writer and verifies the digest if configured
|
||||
func (w *IoContentWriter) Close() error {
|
||||
if w.outputHash != "" {
|
||||
computed := w.digester.Digest().String()
|
||||
if computed != w.outputHash {
|
||||
return fmt.Errorf("digest mismatch: expected %s, got %s", w.outputHash, computed)
|
||||
}
|
||||
}
|
||||
return w.writer.Close()
|
||||
}
|
||||
|
||||
// Digest returns the current digest of written data
|
||||
func (w *IoContentWriter) Digest() digest.Digest {
|
||||
return w.digester.Digest()
|
||||
}
|
||||
|
||||
// Commit is a no-op for this implementation
|
||||
func (w *IoContentWriter) Commit(ctx context.Context, size int64, expected digest.Digest, opts ...ccontent.Opt) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Status returns the current status
|
||||
func (w *IoContentWriter) Status() (ccontent.Status, error) {
|
||||
return w.status, nil
|
||||
}
|
||||
|
||||
// Truncate is not supported
|
||||
func (w *IoContentWriter) Truncate(size int64) error {
|
||||
return fmt.Errorf("truncate not supported")
|
||||
}
|
||||
|
||||
type writerOption func(*IoContentWriter)
|
||||
|
||||
// WithOutputHash configures expected output hash for verification
|
||||
func WithOutputHash(hash string) writerOption {
|
||||
return func(w *IoContentWriter) {
|
||||
w.outputHash = hash
|
||||
}
|
||||
}
|
||||
|
||||
// NewIoContentWriter creates a new IoContentWriter
|
||||
func NewIoContentWriter(writer io.WriteCloser, opts ...writerOption) *IoContentWriter {
|
||||
w := &IoContentWriter{
|
||||
writer: writer,
|
||||
digester: digest.Canonical.Digester(),
|
||||
status: ccontent.Status{},
|
||||
}
|
||||
for _, opt := range opts {
|
||||
opt(w)
|
||||
}
|
||||
return w
|
||||
}
|
||||
|
||||
// AnnotationUnpack is the annotation key for unpacking
|
||||
const AnnotationUnpack = "io.containerd.image.unpack"
|
||||
@@ -2,158 +2,57 @@ package cosign
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/sigstore/cosign/v2/cmd/cosign/cli"
|
||||
"github.com/sigstore/cosign/v2/cmd/cosign/cli/options"
|
||||
"github.com/sigstore/cosign/v2/cmd/cosign/cli/verify"
|
||||
|
||||
"github.com/sigstore/cosign/v3/cmd/cosign/cli/options"
|
||||
"github.com/sigstore/cosign/v3/cmd/cosign/cli/verify"
|
||||
"hauler.dev/go/hauler/internal/flags"
|
||||
"hauler.dev/go/hauler/pkg/artifacts/image"
|
||||
"hauler.dev/go/hauler/pkg/consts"
|
||||
"hauler.dev/go/hauler/pkg/log"
|
||||
"hauler.dev/go/hauler/pkg/store"
|
||||
"oras.land/oras-go/pkg/content"
|
||||
"os"
|
||||
"time"
|
||||
"hauler.dev/go/hauler/pkg/retry"
|
||||
)
|
||||
|
||||
// VerifyFileSignature verifies the digital signature of a file using Sigstore/Cosign.
|
||||
func VerifySignature(ctx context.Context, s *store.Layout, keyPath string, useTlog bool, ref string, rso *flags.StoreRootOpts, ro *flags.CliRootOpts) error {
|
||||
// VerifySignature verifies the digital signature of an image using Sigstore/Cosign.
|
||||
func VerifySignature(ctx context.Context, keyPath string, useTlog bool, ref string, rso *flags.StoreRootOpts, ro *flags.CliRootOpts) error {
|
||||
l := log.FromContext(ctx)
|
||||
operation := func() error {
|
||||
v := &verify.VerifyCommand{
|
||||
KeyRef: keyPath,
|
||||
IgnoreTlog: true, // Ignore transparency log by default.
|
||||
KeyRef: keyPath,
|
||||
IgnoreTlog: true, // Ignore transparency log by default.
|
||||
NewBundleFormat: true,
|
||||
}
|
||||
|
||||
// if the user wants to use the transparency log, set the flag to false
|
||||
if useTlog {
|
||||
v.IgnoreTlog = false
|
||||
}
|
||||
|
||||
err := log.CaptureOutput(l, true, func() error {
|
||||
return log.CaptureOutput(l, true, func() error {
|
||||
return v.Exec(ctx, []string{ref})
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
return RetryOperation(ctx, rso, ro, operation)
|
||||
return retry.Operation(ctx, rso, ro, operation)
|
||||
}
|
||||
|
||||
// SaveImage saves image and any signatures/attestations to the store.
|
||||
func SaveImage(ctx context.Context, s *store.Layout, ref string, platform string, rso *flags.StoreRootOpts, ro *flags.CliRootOpts) error {
|
||||
// VerifyKeylessSignature verifies an image signature using keyless/OIDC identity.
|
||||
func VerifyKeylessSignature(ctx context.Context, identity string, identityRegexp string, oidcIssuer string, oidcIssuerRegexp string, ghWorkflowRepository string, ref string, rso *flags.StoreRootOpts, ro *flags.CliRootOpts) error {
|
||||
l := log.FromContext(ctx)
|
||||
|
||||
if !ro.IgnoreErrors {
|
||||
envVar := os.Getenv(consts.HaulerIgnoreErrors)
|
||||
if envVar == "true" {
|
||||
ro.IgnoreErrors = true
|
||||
}
|
||||
}
|
||||
|
||||
operation := func() error {
|
||||
o := &options.SaveOptions{
|
||||
Directory: s.Root,
|
||||
certVerifyOptions := options.CertVerifyOptions{
|
||||
CertOidcIssuer: oidcIssuer,
|
||||
CertOidcIssuerRegexp: oidcIssuerRegexp,
|
||||
CertIdentity: identity,
|
||||
CertIdentityRegexp: identityRegexp,
|
||||
CertGithubWorkflowRepository: ghWorkflowRepository,
|
||||
}
|
||||
|
||||
// check to see if the image is multi-arch
|
||||
isMultiArch, err := image.IsMultiArchImage(ref)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
l.Debugf("multi-arch image [%v]", isMultiArch)
|
||||
|
||||
// Conditionally add platform.
|
||||
if platform != "" && isMultiArch {
|
||||
l.Debugf("platform for image [%s]", platform)
|
||||
o.Platform = platform
|
||||
v := &verify.VerifyCommand{
|
||||
CertVerifyOptions: certVerifyOptions,
|
||||
IgnoreTlog: false, // Use transparency log by default for keyless verification.
|
||||
CertGithubWorkflowRepository: ghWorkflowRepository,
|
||||
NewBundleFormat: true,
|
||||
}
|
||||
|
||||
err = cli.SaveCmd(ctx, *o, ref)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
return log.CaptureOutput(l, true, func() error {
|
||||
return v.Exec(ctx, []string{ref})
|
||||
})
|
||||
}
|
||||
|
||||
return RetryOperation(ctx, rso, ro, operation)
|
||||
}
|
||||
|
||||
// LoadImage loads store to a remote registry.
|
||||
func LoadImages(ctx context.Context, s *store.Layout, registry string, ropts content.RegistryOptions, ro *flags.CliRootOpts) error {
|
||||
l := log.FromContext(ctx)
|
||||
|
||||
o := &options.LoadOptions{
|
||||
Directory: s.Root,
|
||||
Registry: options.RegistryOptions{
|
||||
Name: registry,
|
||||
},
|
||||
}
|
||||
|
||||
// Conditionally add extra registry flags.
|
||||
if ropts.Insecure {
|
||||
o.Registry.AllowInsecure = true
|
||||
}
|
||||
if ropts.PlainHTTP {
|
||||
o.Registry.AllowHTTPRegistry = true
|
||||
}
|
||||
|
||||
if ropts.Username != "" {
|
||||
o.Registry.AuthConfig.Username = ropts.Username
|
||||
o.Registry.AuthConfig.Password = ropts.Password
|
||||
}
|
||||
|
||||
// execute the cosign load and capture the output in our logger
|
||||
err := log.CaptureOutput(l, false, func() error {
|
||||
return cli.LoadCmd(ctx, *o, "")
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func RetryOperation(ctx context.Context, rso *flags.StoreRootOpts, ro *flags.CliRootOpts, operation func() error) error {
|
||||
l := log.FromContext(ctx)
|
||||
|
||||
if !ro.IgnoreErrors {
|
||||
envVar := os.Getenv(consts.HaulerIgnoreErrors)
|
||||
if envVar == "true" {
|
||||
ro.IgnoreErrors = true
|
||||
}
|
||||
}
|
||||
|
||||
// Validate retries and fall back to a default
|
||||
retries := rso.Retries
|
||||
if retries <= 0 {
|
||||
retries = consts.DefaultRetries
|
||||
}
|
||||
|
||||
for attempt := 1; attempt <= rso.Retries; attempt++ {
|
||||
err := operation()
|
||||
if err == nil {
|
||||
// If the operation succeeds, return nil (no error)
|
||||
return nil
|
||||
}
|
||||
|
||||
if ro.IgnoreErrors {
|
||||
l.Warnf("warning (attempt %d/%d)... %v", attempt, rso.Retries, err)
|
||||
} else {
|
||||
l.Errorf("error (attempt %d/%d)... %v", attempt, rso.Retries, err)
|
||||
}
|
||||
|
||||
// If this is not the last attempt, wait before retrying
|
||||
if attempt < rso.Retries {
|
||||
time.Sleep(time.Second * consts.RetriesInterval)
|
||||
}
|
||||
}
|
||||
|
||||
// If all attempts fail, return an error
|
||||
return fmt.Errorf("operation unsuccessful after %d attempts", rso.Retries)
|
||||
return retry.Operation(ctx, rso, ro, operation)
|
||||
}
|
||||
|
||||
@@ -33,30 +33,37 @@ func (d directory) Open(ctx context.Context, u *url.URL) (io.ReadCloser, error)
|
||||
|
||||
digester := digest.Canonical.Digester()
|
||||
zw := gzip.NewWriter(io.MultiWriter(tmpfile, digester.Hash()))
|
||||
defer zw.Close()
|
||||
|
||||
tarDigester := digest.Canonical.Digester()
|
||||
if err := tarDir(d.path(u), d.Name(u), io.MultiWriter(zw, tarDigester.Hash()), false); err != nil {
|
||||
zw.Close()
|
||||
tmpfile.Close()
|
||||
os.Remove(tmpfile.Name())
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := zw.Close(); err != nil {
|
||||
tmpfile.Close()
|
||||
os.Remove(tmpfile.Name())
|
||||
return nil, err
|
||||
}
|
||||
if err := tmpfile.Sync(); err != nil {
|
||||
tmpfile.Close()
|
||||
os.Remove(tmpfile.Name())
|
||||
return nil, err
|
||||
}
|
||||
|
||||
fi, err := os.Open(tmpfile.Name())
|
||||
// Close the write handle; re-open as read-only
|
||||
tmpName := tmpfile.Name()
|
||||
tmpfile.Close()
|
||||
|
||||
fi, err := os.Open(tmpName)
|
||||
if err != nil {
|
||||
os.Remove(tmpName)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// rc := &closer{
|
||||
// t: io.TeeReader(tmpfile, fi),
|
||||
// closes: []func() error{fi.Close, tmpfile.Close, zw.Close},
|
||||
// }
|
||||
return fi, nil
|
||||
return &tempFileReadCloser{File: fi, path: tmpName}, nil
|
||||
}
|
||||
|
||||
func (d directory) Detect(u *url.URL) bool {
|
||||
@@ -144,22 +151,15 @@ func tarDir(root string, prefix string, w io.Writer, stripTimes bool) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
type closer struct {
|
||||
t io.Reader
|
||||
closes []func() error
|
||||
// tempFileReadCloser wraps an *os.File and removes the underlying
|
||||
// temp file when closed.
|
||||
type tempFileReadCloser struct {
|
||||
*os.File
|
||||
path string
|
||||
}
|
||||
|
||||
func (c *closer) Read(p []byte) (n int, err error) {
|
||||
return c.t.Read(p)
|
||||
}
|
||||
|
||||
func (c *closer) Close() error {
|
||||
var err error
|
||||
for _, c := range c.closes {
|
||||
lastErr := c()
|
||||
if err == nil {
|
||||
err = lastErr
|
||||
}
|
||||
}
|
||||
func (t *tempFileReadCloser) Close() error {
|
||||
err := t.File.Close()
|
||||
os.Remove(t.path)
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -9,10 +9,10 @@ import (
|
||||
v1 "github.com/google/go-containerregistry/pkg/v1"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
"oras.land/oras-go/pkg/content"
|
||||
|
||||
content2 "hauler.dev/go/hauler/pkg/artifacts"
|
||||
"hauler.dev/go/hauler/pkg/consts"
|
||||
"hauler.dev/go/hauler/pkg/content"
|
||||
"hauler.dev/go/hauler/pkg/layer"
|
||||
)
|
||||
|
||||
|
||||
@@ -2,6 +2,7 @@ package getter
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"mime"
|
||||
"net/http"
|
||||
@@ -24,8 +25,9 @@ func (h Http) Name(u *url.URL) string {
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
name, _ := url.PathUnescape(u.String())
|
||||
unescaped, err := url.PathUnescape(u.String())
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
@@ -40,8 +42,7 @@ func (h Http) Name(u *url.URL) string {
|
||||
_ = t
|
||||
}
|
||||
|
||||
// TODO: Not this
|
||||
return filepath.Base(name)
|
||||
return filepath.Base(unescaped)
|
||||
}
|
||||
|
||||
func (h Http) Open(ctx context.Context, u *url.URL) (io.ReadCloser, error) {
|
||||
@@ -49,6 +50,10 @@ func (h Http) Open(ctx context.Context, u *url.URL) (io.ReadCloser, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
resp.Body.Close()
|
||||
return nil, fmt.Errorf("unexpected status fetching %s: %s", u.String(), resp.Status)
|
||||
}
|
||||
return resp.Body, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -66,10 +66,11 @@ func (l *cachedLayer) create(h v1.Hash) (io.WriteCloser, error) {
|
||||
func (l *cachedLayer) Compressed() (io.ReadCloser, error) {
|
||||
f, err := l.create(l.digest)
|
||||
if err != nil {
|
||||
return nil, nil
|
||||
return nil, err
|
||||
}
|
||||
rc, err := l.Layer.Compressed()
|
||||
if err != nil {
|
||||
f.Close()
|
||||
return nil, err
|
||||
}
|
||||
return &readcloser{
|
||||
@@ -85,6 +86,7 @@ func (l *cachedLayer) Uncompressed() (io.ReadCloser, error) {
|
||||
}
|
||||
rc, err := l.Layer.Uncompressed()
|
||||
if err != nil {
|
||||
f.Close()
|
||||
return nil, err
|
||||
}
|
||||
return &readcloser{
|
||||
|
||||
57
pkg/retry/retry.go
Normal file
57
pkg/retry/retry.go
Normal file
@@ -0,0 +1,57 @@
|
||||
package retry
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"hauler.dev/go/hauler/internal/flags"
|
||||
"hauler.dev/go/hauler/pkg/consts"
|
||||
"hauler.dev/go/hauler/pkg/log"
|
||||
)
|
||||
|
||||
// Operation retries the given operation according to the retry settings in rso/ro.
|
||||
func Operation(ctx context.Context, rso *flags.StoreRootOpts, ro *flags.CliRootOpts, operation func() error) error {
|
||||
l := log.FromContext(ctx)
|
||||
|
||||
if !ro.IgnoreErrors {
|
||||
if os.Getenv(consts.HaulerIgnoreErrors) == "true" {
|
||||
ro.IgnoreErrors = true
|
||||
}
|
||||
}
|
||||
|
||||
retries := rso.Retries
|
||||
if retries <= 0 {
|
||||
retries = consts.DefaultRetries
|
||||
}
|
||||
|
||||
for attempt := 1; attempt <= retries; attempt++ {
|
||||
err := operation()
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
isTlogErr := strings.HasPrefix(err.Error(), "function execution failed: no matching signatures: rekor client not provided for online verification")
|
||||
if ro.IgnoreErrors {
|
||||
if isTlogErr {
|
||||
l.Warnf("warning (attempt %d/%d)... failed tlog verification", attempt, retries)
|
||||
} else {
|
||||
l.Warnf("warning (attempt %d/%d)... %v", attempt, retries, err)
|
||||
}
|
||||
} else {
|
||||
if isTlogErr {
|
||||
l.Errorf("error (attempt %d/%d)... failed tlog verification", attempt, retries)
|
||||
} else {
|
||||
l.Errorf("error (attempt %d/%d)... %v", attempt, retries, err)
|
||||
}
|
||||
}
|
||||
|
||||
if attempt < retries {
|
||||
time.Sleep(time.Second * consts.RetriesInterval)
|
||||
}
|
||||
}
|
||||
|
||||
return fmt.Errorf("operation unsuccessful after %d attempts", retries)
|
||||
}
|
||||
160
pkg/retry/retry_test.go
Normal file
160
pkg/retry/retry_test.go
Normal file
@@ -0,0 +1,160 @@
|
||||
package retry
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"testing"
|
||||
|
||||
"github.com/rs/zerolog"
|
||||
|
||||
"hauler.dev/go/hauler/internal/flags"
|
||||
"hauler.dev/go/hauler/pkg/consts"
|
||||
)
|
||||
|
||||
func testContext() context.Context {
|
||||
l := zerolog.New(io.Discard)
|
||||
return l.WithContext(context.Background())
|
||||
}
|
||||
|
||||
func TestOperation_SucceedsFirstAttempt(t *testing.T) {
|
||||
ctx := testContext()
|
||||
rso := &flags.StoreRootOpts{Retries: 1}
|
||||
ro := &flags.CliRootOpts{}
|
||||
|
||||
callCount := 0
|
||||
err := Operation(ctx, rso, ro, func() error {
|
||||
callCount++
|
||||
return nil
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("expected no error, got: %v", err)
|
||||
}
|
||||
if callCount != 1 {
|
||||
t.Fatalf("expected 1 call, got %d", callCount)
|
||||
}
|
||||
}
|
||||
|
||||
func TestOperation_ExhaustsRetries(t *testing.T) {
|
||||
ctx := testContext()
|
||||
// Retries=1 → 1 attempt, 0 sleeps (sleep is skipped on last attempt).
|
||||
rso := &flags.StoreRootOpts{Retries: 1}
|
||||
ro := &flags.CliRootOpts{}
|
||||
|
||||
callCount := 0
|
||||
err := Operation(ctx, rso, ro, func() error {
|
||||
callCount++
|
||||
return fmt.Errorf("always fails")
|
||||
})
|
||||
|
||||
if err == nil {
|
||||
t.Fatal("expected error after exhausting retries, got nil")
|
||||
}
|
||||
if callCount != 1 {
|
||||
t.Fatalf("expected 1 call, got %d", callCount)
|
||||
}
|
||||
want := fmt.Sprintf("operation unsuccessful after %d attempts", 1)
|
||||
if err.Error() != want {
|
||||
t.Fatalf("error = %q, want %q", err.Error(), want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestOperation_RetriesAndSucceeds(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping: requires one RetriesInterval sleep (5s)")
|
||||
}
|
||||
ctx := testContext()
|
||||
// Retries=2: fails on attempt 1, succeeds on attempt 2 (one 5s sleep).
|
||||
rso := &flags.StoreRootOpts{Retries: 2}
|
||||
ro := &flags.CliRootOpts{}
|
||||
|
||||
callCount := 0
|
||||
err := Operation(ctx, rso, ro, func() error {
|
||||
callCount++
|
||||
if callCount < 2 {
|
||||
return fmt.Errorf("transient error")
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("expected success on retry, got: %v", err)
|
||||
}
|
||||
if callCount != 2 {
|
||||
t.Fatalf("expected 2 calls, got %d", callCount)
|
||||
}
|
||||
}
|
||||
|
||||
func TestOperation_DefaultRetries(t *testing.T) {
|
||||
ctx := testContext()
|
||||
// Retries=0 → falls back to consts.DefaultRetries (3).
|
||||
// Verify happy path (success first attempt) is unaffected.
|
||||
rso := &flags.StoreRootOpts{Retries: 0}
|
||||
ro := &flags.CliRootOpts{}
|
||||
|
||||
callCount := 0
|
||||
err := Operation(ctx, rso, ro, func() error {
|
||||
callCount++
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("expected no error, got: %v", err)
|
||||
}
|
||||
if callCount != 1 {
|
||||
t.Fatalf("expected 1 call, got %d", callCount)
|
||||
}
|
||||
|
||||
// Exhausting all default retries requires (DefaultRetries-1) sleeps of 5s each.
|
||||
// Only run this sub-test in non-short mode.
|
||||
t.Run("FailAllWithDefault", func(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping: requires (DefaultRetries-1) * 5s sleeps")
|
||||
}
|
||||
rso2 := &flags.StoreRootOpts{Retries: 0}
|
||||
ro2 := &flags.CliRootOpts{}
|
||||
callCount2 := 0
|
||||
err2 := Operation(ctx, rso2, ro2, func() error {
|
||||
callCount2++
|
||||
return fmt.Errorf("fail")
|
||||
})
|
||||
if err2 == nil {
|
||||
t.Fatal("expected error, got nil")
|
||||
}
|
||||
if callCount2 != consts.DefaultRetries {
|
||||
t.Fatalf("expected %d calls (DefaultRetries), got %d", consts.DefaultRetries, callCount2)
|
||||
}
|
||||
want := fmt.Sprintf("operation unsuccessful after %d attempts", consts.DefaultRetries)
|
||||
if err2.Error() != want {
|
||||
t.Fatalf("error = %q, want %q", err2.Error(), want)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestOperation_EnvVar_IgnoreErrors(t *testing.T) {
|
||||
ctx := testContext()
|
||||
// Retries=1 → 1 attempt, no sleep.
|
||||
rso := &flags.StoreRootOpts{Retries: 1}
|
||||
ro := &flags.CliRootOpts{IgnoreErrors: false}
|
||||
|
||||
t.Setenv(consts.HaulerIgnoreErrors, "true")
|
||||
|
||||
callCount := 0
|
||||
err := Operation(ctx, rso, ro, func() error {
|
||||
callCount++
|
||||
return fmt.Errorf("some error")
|
||||
})
|
||||
|
||||
// IgnoreErrors controls logging severity (WARN instead of ERR) — it does NOT
|
||||
// suppress error returns. Operation always returns an error after exhausting
|
||||
// all retries regardless of this flag (see pkg/retry/retry.go).
|
||||
if err == nil {
|
||||
t.Fatal("expected error after exhausting retries, got nil")
|
||||
}
|
||||
if !ro.IgnoreErrors {
|
||||
t.Fatal("expected ro.IgnoreErrors=true after env var override")
|
||||
}
|
||||
if callCount != 1 {
|
||||
t.Fatalf("expected 1 call, got %d", callCount)
|
||||
}
|
||||
}
|
||||
@@ -1,19 +1,26 @@
|
||||
package store
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/containerd/containerd/remotes"
|
||||
"github.com/containerd/errdefs"
|
||||
"github.com/google/go-containerregistry/pkg/authn"
|
||||
gname "github.com/google/go-containerregistry/pkg/name"
|
||||
v1 "github.com/google/go-containerregistry/pkg/v1"
|
||||
"github.com/google/go-containerregistry/pkg/v1/remote"
|
||||
"github.com/google/go-containerregistry/pkg/v1/static"
|
||||
"github.com/opencontainers/go-digest"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/rs/zerolog"
|
||||
"golang.org/x/sync/errgroup"
|
||||
"oras.land/oras-go/pkg/oras"
|
||||
"oras.land/oras-go/pkg/target"
|
||||
|
||||
"hauler.dev/go/hauler/pkg/artifacts"
|
||||
"hauler.dev/go/hauler/pkg/consts"
|
||||
@@ -57,13 +64,13 @@ func NewLayout(rootdir string, opts ...Options) (*Layout, error) {
|
||||
return l, nil
|
||||
}
|
||||
|
||||
// AddOCI adds an artifacts.OCI to the store
|
||||
// AddArtifact adds an artifacts.OCI to the store
|
||||
//
|
||||
// The method to achieve this is to save artifact.OCI to a temporary directory in an OCI layout compatible form. Once
|
||||
// saved, the entirety of the layout is copied to the store (which is just a registry). This allows us to not only use
|
||||
// strict types to define generic content, but provides a processing pipeline suitable for extensibility. In the
|
||||
// future we'll allow users to define their own content that must adhere either by artifact.OCI or simply an OCI layout.
|
||||
func (l *Layout) AddOCI(ctx context.Context, oci artifacts.OCI, ref string) (ocispec.Descriptor, error) {
|
||||
func (l *Layout) AddArtifact(ctx context.Context, oci artifacts.OCI, ref string) (ocispec.Descriptor, error) {
|
||||
if l.cache != nil {
|
||||
cached := layer.OCICache(oci, l.cache)
|
||||
oci = cached
|
||||
@@ -89,8 +96,6 @@ func (l *Layout) AddOCI(ctx context.Context, oci artifacts.OCI, ref string) (oci
|
||||
return ocispec.Descriptor{}, err
|
||||
}
|
||||
|
||||
static.NewLayer(cdata, "")
|
||||
|
||||
if err := l.writeBlobData(cdata); err != nil {
|
||||
return ocispec.Descriptor{}, err
|
||||
}
|
||||
@@ -128,8 +133,8 @@ func (l *Layout) AddOCI(ctx context.Context, oci artifacts.OCI, ref string) (oci
|
||||
return idx, l.OCI.AddIndex(idx)
|
||||
}
|
||||
|
||||
// AddOCICollection .
|
||||
func (l *Layout) AddOCICollection(ctx context.Context, collection artifacts.OCICollection) ([]ocispec.Descriptor, error) {
|
||||
// AddArtifactCollection .
|
||||
func (l *Layout) AddArtifactCollection(ctx context.Context, collection artifacts.OCICollection) ([]ocispec.Descriptor, error) {
|
||||
cnts, err := collection.Contents()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -137,7 +142,7 @@ func (l *Layout) AddOCICollection(ctx context.Context, collection artifacts.OCIC
|
||||
|
||||
var descs []ocispec.Descriptor
|
||||
for ref, oci := range cnts {
|
||||
desc, err := l.AddOCI(ctx, oci, ref)
|
||||
desc, err := l.AddArtifact(ctx, oci, ref)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -146,6 +151,325 @@ func (l *Layout) AddOCICollection(ctx context.Context, collection artifacts.OCIC
|
||||
return descs, nil
|
||||
}
|
||||
|
||||
// AddImage fetches a container image (or full index for multi-arch images) from a remote registry
|
||||
// and saves it to the store along with any associated signatures, attestations, and SBOMs
|
||||
// discovered via cosign's tag convention (<digest>.sig, <digest>.att, <digest>.sbom).
|
||||
// When platform is non-empty and the ref is a multi-arch index, only that platform is fetched.
|
||||
func (l *Layout) AddImage(ctx context.Context, ref string, platform string, opts ...remote.Option) error {
|
||||
allOpts := append([]remote.Option{
|
||||
remote.WithAuthFromKeychain(authn.DefaultKeychain),
|
||||
remote.WithContext(ctx),
|
||||
}, opts...)
|
||||
|
||||
parsedRef, err := gname.ParseReference(ref)
|
||||
if err != nil {
|
||||
return fmt.Errorf("parsing reference %q: %w", ref, err)
|
||||
}
|
||||
|
||||
desc, err := remote.Get(parsedRef, allOpts...)
|
||||
if err != nil {
|
||||
return fmt.Errorf("fetching descriptor for %q: %w", ref, err)
|
||||
}
|
||||
|
||||
var imageDigest v1.Hash
|
||||
|
||||
if idx, idxErr := desc.ImageIndex(); idxErr == nil && platform == "" {
|
||||
// Multi-arch image with no platform filter: save the full index.
|
||||
imageDigest, err = idx.Digest()
|
||||
if err != nil {
|
||||
return fmt.Errorf("getting index digest for %q: %w", ref, err)
|
||||
}
|
||||
if err := l.writeIndex(parsedRef, idx, consts.KindAnnotationIndex); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
// Single-platform image, or the caller requested a specific platform.
|
||||
imgOpts := append([]remote.Option{}, allOpts...)
|
||||
if platform != "" {
|
||||
p, err := parsePlatform(platform)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
imgOpts = append(imgOpts, remote.WithPlatform(p))
|
||||
}
|
||||
img, err := remote.Image(parsedRef, imgOpts...)
|
||||
if err != nil {
|
||||
return fmt.Errorf("fetching image %q: %w", ref, err)
|
||||
}
|
||||
imageDigest, err = img.Digest()
|
||||
if err != nil {
|
||||
return fmt.Errorf("getting image digest for %q: %w", ref, err)
|
||||
}
|
||||
if err := l.writeImage(parsedRef, img, consts.KindAnnotationImage, ""); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
savedDigests, err := l.saveRelatedArtifacts(ctx, parsedRef, imageDigest, allOpts...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return l.saveReferrers(ctx, parsedRef, imageDigest, savedDigests, allOpts...)
|
||||
}
|
||||
|
||||
// writeImageBlobs writes all blobs for a single image (layers, config, manifest) to the store's
|
||||
// blob directory. It does not add an entry to the OCI index.
|
||||
func (l *Layout) writeImageBlobs(img v1.Image) error {
|
||||
layers, err := img.Layers()
|
||||
if err != nil {
|
||||
return fmt.Errorf("getting layers: %w", err)
|
||||
}
|
||||
var g errgroup.Group
|
||||
for _, lyr := range layers {
|
||||
lyr := lyr
|
||||
g.Go(func() error { return l.writeLayer(lyr) })
|
||||
}
|
||||
if err := g.Wait(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cfgData, err := img.RawConfigFile()
|
||||
if err != nil {
|
||||
return fmt.Errorf("getting config: %w", err)
|
||||
}
|
||||
if err := l.writeBlobData(cfgData); err != nil {
|
||||
return fmt.Errorf("writing config blob: %w", err)
|
||||
}
|
||||
|
||||
manifestData, err := img.RawManifest()
|
||||
if err != nil {
|
||||
return fmt.Errorf("getting manifest: %w", err)
|
||||
}
|
||||
return l.writeBlobData(manifestData)
|
||||
}
|
||||
|
||||
// writeImage writes all blobs for img and adds a descriptor entry to the OCI index with the
|
||||
// given annotationRef and kind. containerdName overrides the io.containerd.image.name annotation;
|
||||
// if empty it defaults to annotationRef.Name().
|
||||
func (l *Layout) writeImage(annotationRef gname.Reference, img v1.Image, kind string, containerdName string) error {
|
||||
if err := l.writeImageBlobs(img); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
mt, err := img.MediaType()
|
||||
if err != nil {
|
||||
return fmt.Errorf("getting media type: %w", err)
|
||||
}
|
||||
hash, err := img.Digest()
|
||||
if err != nil {
|
||||
return fmt.Errorf("getting digest: %w", err)
|
||||
}
|
||||
d, err := digest.Parse(hash.String())
|
||||
if err != nil {
|
||||
return fmt.Errorf("parsing digest: %w", err)
|
||||
}
|
||||
raw, err := img.RawManifest()
|
||||
if err != nil {
|
||||
return fmt.Errorf("getting raw manifest size: %w", err)
|
||||
}
|
||||
|
||||
if containerdName == "" {
|
||||
containerdName = annotationRef.Name()
|
||||
}
|
||||
desc := ocispec.Descriptor{
|
||||
MediaType: string(mt),
|
||||
Digest: d,
|
||||
Size: int64(len(raw)),
|
||||
Annotations: map[string]string{
|
||||
consts.KindAnnotationName: kind,
|
||||
ocispec.AnnotationRefName: strings.TrimPrefix(annotationRef.Name(), annotationRef.Context().RegistryStr()+"/"),
|
||||
consts.ContainerdImageNameKey: containerdName,
|
||||
},
|
||||
}
|
||||
return l.OCI.AddIndex(desc)
|
||||
}
|
||||
|
||||
// writeIndexBlobs recursively writes all child image blobs for an image index to the store's blob
|
||||
// directory. It does not write the top-level index manifest or add index entries.
|
||||
func (l *Layout) writeIndexBlobs(idx v1.ImageIndex) error {
|
||||
manifest, err := idx.IndexManifest()
|
||||
if err != nil {
|
||||
return fmt.Errorf("getting index manifest: %w", err)
|
||||
}
|
||||
|
||||
for _, childDesc := range manifest.Manifests {
|
||||
// Try as a nested index first, then fall back to a regular image.
|
||||
if childIdx, err := idx.ImageIndex(childDesc.Digest); err == nil {
|
||||
if err := l.writeIndexBlobs(childIdx); err != nil {
|
||||
return err
|
||||
}
|
||||
raw, err := childIdx.RawManifest()
|
||||
if err != nil {
|
||||
return fmt.Errorf("getting nested index manifest: %w", err)
|
||||
}
|
||||
if err := l.writeBlobData(raw); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
childImg, err := idx.Image(childDesc.Digest)
|
||||
if err != nil {
|
||||
return fmt.Errorf("getting child image %v: %w", childDesc.Digest, err)
|
||||
}
|
||||
if err := l.writeImageBlobs(childImg); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// writeIndex writes all blobs for an image index (including all child platform images) and adds
|
||||
// a descriptor entry to the OCI index with the given annotationRef and kind.
|
||||
func (l *Layout) writeIndex(annotationRef gname.Reference, idx v1.ImageIndex, kind string) error {
|
||||
if err := l.writeIndexBlobs(idx); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
raw, err := idx.RawManifest()
|
||||
if err != nil {
|
||||
return fmt.Errorf("getting index manifest: %w", err)
|
||||
}
|
||||
if err := l.writeBlobData(raw); err != nil {
|
||||
return fmt.Errorf("writing index manifest blob: %w", err)
|
||||
}
|
||||
|
||||
mt, err := idx.MediaType()
|
||||
if err != nil {
|
||||
return fmt.Errorf("getting index media type: %w", err)
|
||||
}
|
||||
hash, err := idx.Digest()
|
||||
if err != nil {
|
||||
return fmt.Errorf("getting index digest: %w", err)
|
||||
}
|
||||
d, err := digest.Parse(hash.String())
|
||||
if err != nil {
|
||||
return fmt.Errorf("parsing index digest: %w", err)
|
||||
}
|
||||
|
||||
desc := ocispec.Descriptor{
|
||||
MediaType: string(mt),
|
||||
Digest: d,
|
||||
Size: int64(len(raw)),
|
||||
Annotations: map[string]string{
|
||||
consts.KindAnnotationName: kind,
|
||||
ocispec.AnnotationRefName: strings.TrimPrefix(annotationRef.Name(), annotationRef.Context().RegistryStr()+"/"),
|
||||
consts.ContainerdImageNameKey: annotationRef.Name(),
|
||||
},
|
||||
}
|
||||
return l.OCI.AddIndex(desc)
|
||||
}
|
||||
|
||||
// saveReferrers discovers and saves OCI 1.1 referrers for the image identified by ref/hash.
|
||||
// This captures cosign v3 new-bundle-format signatures/attestations stored as OCI referrers
|
||||
// (via the subject field) rather than the legacy sha256-<hex>.sig/.att/.sbom tag convention.
|
||||
// go-containerregistry handles both the native referrers API and the tag-based fallback.
|
||||
// Missing referrers and fetch errors are logged at debug level and silently skipped.
|
||||
func (l *Layout) saveReferrers(ctx context.Context, ref gname.Reference, hash v1.Hash, alreadySaved map[string]bool, opts ...remote.Option) error {
|
||||
log := zerolog.Ctx(ctx)
|
||||
|
||||
imageDigestRef, err := gname.NewDigest(ref.Context().String() + "@" + hash.String())
|
||||
if err != nil {
|
||||
log.Debug().Err(err).Msgf("saveReferrers: could not construct digest ref for %s", ref.Name())
|
||||
return nil
|
||||
}
|
||||
|
||||
idx, err := remote.Referrers(imageDigestRef, opts...)
|
||||
if err != nil {
|
||||
// Most registries that don't support the referrers API return 404; not an error.
|
||||
log.Debug().Err(err).Msgf("no OCI referrers found for %s@%s", ref.Name(), hash)
|
||||
return nil
|
||||
}
|
||||
|
||||
idxManifest, err := idx.IndexManifest()
|
||||
if err != nil {
|
||||
log.Debug().Err(err).Msgf("saveReferrers: could not read referrers index for %s", ref.Name())
|
||||
return nil
|
||||
}
|
||||
|
||||
for _, referrerDesc := range idxManifest.Manifests {
|
||||
digestRef, err := gname.NewDigest(ref.Context().String() + "@" + referrerDesc.Digest.String())
|
||||
if err != nil {
|
||||
log.Debug().Err(err).Msgf("saveReferrers: could not construct digest ref for referrer %s", referrerDesc.Digest)
|
||||
continue
|
||||
}
|
||||
|
||||
img, err := remote.Image(digestRef, opts...)
|
||||
if err != nil {
|
||||
log.Debug().Err(err).Msgf("saveReferrers: could not fetch referrer manifest %s", referrerDesc.Digest)
|
||||
continue
|
||||
}
|
||||
|
||||
// Skip referrers already saved via the cosign tag convention to avoid duplicates.
|
||||
// Registries like Harbor expose the same manifest via both the .sig/.att/.sbom tags
|
||||
// and the OCI Referrers API when the manifest carries a subject field.
|
||||
if alreadySaved[referrerDesc.Digest.String()] {
|
||||
log.Debug().Msgf("saveReferrers: skipping referrer %s (already saved via tag convention)", referrerDesc.Digest)
|
||||
continue
|
||||
}
|
||||
|
||||
// Embed the referrer manifest digest in the kind annotation so that multiple
|
||||
// referrers for the same base image each get a unique entry in the OCI index.
|
||||
kind := consts.KindAnnotationReferrers + "/" + referrerDesc.Digest.Hex
|
||||
if err := l.writeImage(ref, img, kind, ""); err != nil {
|
||||
return fmt.Errorf("saving OCI referrer %s for %s: %w", referrerDesc.Digest, ref.Name(), err)
|
||||
}
|
||||
log.Debug().Msgf("saved OCI referrer %s (%s) for %s", referrerDesc.Digest, string(referrerDesc.ArtifactType), ref.Name())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// saveRelatedArtifacts discovers and saves cosign-compatible signature, attestation, and SBOM
|
||||
// artifacts for the image identified by ref/hash. Missing artifacts are silently skipped.
|
||||
// Returns the set of manifest digest strings (e.g. "sha256:abc...") that were saved, so that
|
||||
// saveReferrers can skip duplicates when a registry exposes the same manifest via both paths.
|
||||
func (l *Layout) saveRelatedArtifacts(ctx context.Context, ref gname.Reference, hash v1.Hash, opts ...remote.Option) (map[string]bool, error) {
|
||||
saved := make(map[string]bool)
|
||||
|
||||
// Cosign tag convention: "sha256:hexvalue" → "sha256-hexvalue.sig" / ".att" / ".sbom"
|
||||
tagPrefix := strings.ReplaceAll(hash.String(), ":", "-")
|
||||
|
||||
related := []struct {
|
||||
tag string
|
||||
kind string
|
||||
}{
|
||||
{tagPrefix + ".sig", consts.KindAnnotationSigs},
|
||||
{tagPrefix + ".att", consts.KindAnnotationAtts},
|
||||
{tagPrefix + ".sbom", consts.KindAnnotationSboms},
|
||||
}
|
||||
|
||||
for _, r := range related {
|
||||
artifactRef, err := gname.ParseReference(ref.Context().String() + ":" + r.tag)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
img, err := remote.Image(artifactRef, opts...)
|
||||
if err != nil {
|
||||
// Artifact doesn't exist at this registry; skip silently.
|
||||
continue
|
||||
}
|
||||
if err := l.writeImage(ref, img, r.kind, ""); err != nil {
|
||||
return saved, fmt.Errorf("saving %s for %s: %w", r.kind, ref.Name(), err)
|
||||
}
|
||||
if d, err := img.Digest(); err == nil {
|
||||
saved[d.String()] = true
|
||||
}
|
||||
}
|
||||
return saved, nil
|
||||
}
|
||||
|
||||
// parsePlatform parses a platform string in "os/arch[/variant]" format into a v1.Platform.
|
||||
func parsePlatform(s string) (v1.Platform, error) {
|
||||
parts := strings.SplitN(s, "/", 3)
|
||||
if len(parts) < 2 {
|
||||
return v1.Platform{}, fmt.Errorf("invalid platform %q: expected os/arch[/variant]", s)
|
||||
}
|
||||
p := v1.Platform{OS: parts[0], Architecture: parts[1]}
|
||||
if len(parts) == 3 {
|
||||
p.Variant = parts[2]
|
||||
}
|
||||
return p, nil
|
||||
}
|
||||
|
||||
// Flush is a fancy name for delete-all-the-things, in this case it's as trivial as deleting oci-layout content
|
||||
//
|
||||
// This can be a highly destructive operation if the store's directory happens to be inline with other non-store contents
|
||||
@@ -169,27 +493,217 @@ func (l *Layout) Flush(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Copy will copy a given reference to a given target.Target
|
||||
// Copy will copy a given reference to a given content.Target
|
||||
//
|
||||
// This is essentially a wrapper around oras.Copy, but locked to this content store
|
||||
func (l *Layout) Copy(ctx context.Context, ref string, to target.Target, toRef string) (ocispec.Descriptor, error) {
|
||||
return oras.Copy(ctx, l.OCI, ref, to, toRef,
|
||||
oras.WithAdditionalCachedMediaTypes(consts.DockerManifestSchema2, consts.DockerManifestListSchema2))
|
||||
// This is essentially a replacement for oras.Copy, custom implementation for content stores
|
||||
func (l *Layout) Copy(ctx context.Context, ref string, to content.Target, toRef string) (ocispec.Descriptor, error) {
|
||||
// Resolve the source descriptor
|
||||
desc, err := l.OCI.Resolve(ctx, ref)
|
||||
if err != nil {
|
||||
return ocispec.Descriptor{}, fmt.Errorf("failed to resolve reference: %w", err)
|
||||
}
|
||||
|
||||
// Get fetcher and pusher
|
||||
fetcher, err := l.OCI.Fetcher(ctx, ref)
|
||||
if err != nil {
|
||||
return ocispec.Descriptor{}, fmt.Errorf("failed to get fetcher: %w", err)
|
||||
}
|
||||
|
||||
pusher, err := to.Pusher(ctx, toRef)
|
||||
if err != nil {
|
||||
return ocispec.Descriptor{}, fmt.Errorf("failed to get pusher: %w", err)
|
||||
}
|
||||
|
||||
// Recursively copy the descriptor graph (matches oras.Copy behavior)
|
||||
if err := l.copyDescriptorGraph(ctx, desc, fetcher, pusher); err != nil {
|
||||
return ocispec.Descriptor{}, err
|
||||
}
|
||||
|
||||
return desc, nil
|
||||
}
|
||||
|
||||
// CopyAll performs bulk copy operations on the stores oci layout to a provided target.Target
|
||||
func (l *Layout) CopyAll(ctx context.Context, to target.Target, toMapper func(string) (string, error)) ([]ocispec.Descriptor, error) {
|
||||
// copyDescriptorGraph recursively copies a descriptor and all its referenced content
|
||||
// This matches the behavior of oras.Copy by walking the entire descriptor graph
|
||||
func (l *Layout) copyDescriptorGraph(ctx context.Context, desc ocispec.Descriptor, fetcher remotes.Fetcher, pusher remotes.Pusher) (err error) {
|
||||
switch desc.MediaType {
|
||||
case ocispec.MediaTypeImageManifest, consts.DockerManifestSchema2:
|
||||
// Fetch and parse the manifest
|
||||
rc, err := fetcher.Fetch(ctx, desc)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to fetch manifest: %w", err)
|
||||
}
|
||||
defer func() {
|
||||
if closeErr := rc.Close(); closeErr != nil && err == nil {
|
||||
err = fmt.Errorf("failed to close manifest reader: %w", closeErr)
|
||||
}
|
||||
}()
|
||||
|
||||
data, err := io.ReadAll(rc)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to read manifest: %w", err)
|
||||
}
|
||||
|
||||
var manifest ocispec.Manifest
|
||||
if err := json.Unmarshal(data, &manifest); err != nil {
|
||||
return fmt.Errorf("failed to unmarshal manifest: %w", err)
|
||||
}
|
||||
|
||||
// Copy config blob
|
||||
if err := l.copyDescriptor(ctx, manifest.Config, fetcher, pusher); err != nil {
|
||||
return fmt.Errorf("failed to copy config: %w", err)
|
||||
}
|
||||
|
||||
// Copy all layer blobs
|
||||
for _, layer := range manifest.Layers {
|
||||
if err := l.copyDescriptor(ctx, layer, fetcher, pusher); err != nil {
|
||||
return fmt.Errorf("failed to copy layer: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Push the manifest itself using the already-fetched data to avoid double-fetching
|
||||
if err := l.pushData(ctx, desc, data, pusher); err != nil {
|
||||
return fmt.Errorf("failed to push manifest: %w", err)
|
||||
}
|
||||
|
||||
case ocispec.MediaTypeImageIndex, consts.DockerManifestListSchema2:
|
||||
// Fetch and parse the index
|
||||
rc, err := fetcher.Fetch(ctx, desc)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to fetch index: %w", err)
|
||||
}
|
||||
defer func() {
|
||||
if closeErr := rc.Close(); closeErr != nil && err == nil {
|
||||
err = fmt.Errorf("failed to close index reader: %w", closeErr)
|
||||
}
|
||||
}()
|
||||
|
||||
data, err := io.ReadAll(rc)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to read index: %w", err)
|
||||
}
|
||||
|
||||
var index ocispec.Index
|
||||
if err := json.Unmarshal(data, &index); err != nil {
|
||||
return fmt.Errorf("failed to unmarshal index: %w", err)
|
||||
}
|
||||
|
||||
// Recursively copy each child (could be manifest or nested index)
|
||||
for _, child := range index.Manifests {
|
||||
if err := l.copyDescriptorGraph(ctx, child, fetcher, pusher); err != nil {
|
||||
return fmt.Errorf("failed to copy child: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Push the index itself using the already-fetched data to avoid double-fetching
|
||||
if err := l.pushData(ctx, desc, data, pusher); err != nil {
|
||||
return fmt.Errorf("failed to push index: %w", err)
|
||||
}
|
||||
|
||||
default:
|
||||
// For other types (config blobs, layers, etc.), just copy the blob
|
||||
if err := l.copyDescriptor(ctx, desc, fetcher, pusher); err != nil {
|
||||
return fmt.Errorf("failed to copy descriptor: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// copyDescriptor copies a single descriptor from source to target
|
||||
func (l *Layout) copyDescriptor(ctx context.Context, desc ocispec.Descriptor, fetcher remotes.Fetcher, pusher remotes.Pusher) (err error) {
|
||||
// Fetch the content
|
||||
rc, err := fetcher.Fetch(ctx, desc)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
if closeErr := rc.Close(); closeErr != nil && err == nil {
|
||||
err = fmt.Errorf("failed to close reader: %w", closeErr)
|
||||
}
|
||||
}()
|
||||
|
||||
// Get a writer from the pusher
|
||||
writer, err := pusher.Push(ctx, desc)
|
||||
if err != nil {
|
||||
if errdefs.IsAlreadyExists(err) {
|
||||
zerolog.Ctx(ctx).Debug().Msgf("existing blob: %s", desc.Digest)
|
||||
return nil // content already present on remote
|
||||
}
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
if closeErr := writer.Close(); closeErr != nil && err == nil {
|
||||
err = closeErr
|
||||
}
|
||||
}()
|
||||
|
||||
// Copy the content
|
||||
n, err := io.Copy(writer, rc)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Commit the written content with the expected digest
|
||||
if err := writer.Commit(ctx, n, desc.Digest); err != nil {
|
||||
return err
|
||||
}
|
||||
zerolog.Ctx(ctx).Debug().Msgf("pushed blob: %s", desc.Digest)
|
||||
return nil
|
||||
}
|
||||
|
||||
// pushData pushes already-fetched data to the pusher without re-fetching.
|
||||
// This is used when we've already read the data for parsing and want to avoid double-fetching.
|
||||
func (l *Layout) pushData(ctx context.Context, desc ocispec.Descriptor, data []byte, pusher remotes.Pusher) (err error) {
|
||||
// Get a writer from the pusher
|
||||
writer, err := pusher.Push(ctx, desc)
|
||||
if err != nil {
|
||||
if errdefs.IsAlreadyExists(err) {
|
||||
return nil // content already present on remote
|
||||
}
|
||||
return fmt.Errorf("failed to get writer: %w", err)
|
||||
}
|
||||
defer func() {
|
||||
if closeErr := writer.Close(); closeErr != nil && err == nil {
|
||||
err = fmt.Errorf("failed to close writer: %w", closeErr)
|
||||
}
|
||||
}()
|
||||
|
||||
// Write the data using io.Copy to handle short writes properly
|
||||
n, err := io.Copy(writer, bytes.NewReader(data))
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to write data: %w", err)
|
||||
}
|
||||
|
||||
// Commit the written content with the expected digest
|
||||
return writer.Commit(ctx, n, desc.Digest)
|
||||
}
|
||||
|
||||
// CopyAll performs bulk copy operations on the stores oci layout to a provided target
|
||||
func (l *Layout) CopyAll(ctx context.Context, to content.Target, toMapper func(string) (string, error)) ([]ocispec.Descriptor, error) {
|
||||
var descs []ocispec.Descriptor
|
||||
err := l.OCI.Walk(func(reference string, desc ocispec.Descriptor) error {
|
||||
toRef := ""
|
||||
// Use the clean reference from annotations (without -kind suffix) as the base
|
||||
// The reference parameter from Walk is the nameMap key with format "ref-kind",
|
||||
// but we need the clean ref for the destination to avoid double-appending kind
|
||||
baseRef := desc.Annotations[ocispec.AnnotationRefName]
|
||||
if baseRef == "" {
|
||||
return fmt.Errorf("descriptor %s missing required annotation %q", reference, ocispec.AnnotationRefName)
|
||||
}
|
||||
toRef := baseRef
|
||||
if toMapper != nil {
|
||||
tr, err := toMapper(reference)
|
||||
tr, err := toMapper(baseRef)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
toRef = tr
|
||||
}
|
||||
|
||||
// Append the digest to help the target pusher identify the root descriptor
|
||||
// Format: "reference@digest" allows the pusher to update its index.json
|
||||
if desc.Digest.Validate() == nil {
|
||||
toRef = fmt.Sprintf("%s@%s", toRef, desc.Digest)
|
||||
}
|
||||
|
||||
desc, err := l.Copy(ctx, reference, to, toRef)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -235,11 +749,6 @@ func (l *Layout) writeLayer(layer v1.Layer) error {
|
||||
return err
|
||||
}
|
||||
|
||||
r, err := layer.Compressed()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
dir := filepath.Join(l.Root, ocispec.ImageBlobsDir, d.Algorithm)
|
||||
if err := os.MkdirAll(dir, os.ModePerm); err != nil && !os.IsExist(err) {
|
||||
return err
|
||||
@@ -251,12 +760,153 @@ func (l *Layout) writeLayer(layer v1.Layer) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
r, err := layer.Compressed()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer r.Close()
|
||||
|
||||
w, err := os.Create(blobPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer w.Close()
|
||||
|
||||
_, err = io.Copy(w, r)
|
||||
return err
|
||||
_, copyErr := io.Copy(w, r)
|
||||
if closeErr := w.Close(); closeErr != nil && copyErr == nil {
|
||||
copyErr = closeErr
|
||||
}
|
||||
|
||||
// Remove a partially-written or corrupt blob on any failure so retries
|
||||
// can attempt a fresh download rather than skipping the file.
|
||||
if copyErr != nil {
|
||||
os.Remove(blobPath)
|
||||
}
|
||||
|
||||
return copyErr
|
||||
}
|
||||
|
||||
// Remove artifact reference from the store
|
||||
func (l *Layout) RemoveArtifact(ctx context.Context, reference string, desc ocispec.Descriptor) error {
|
||||
if err := l.OCI.LoadIndex(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
l.OCI.RemoveFromIndex(reference)
|
||||
return l.OCI.SaveIndex()
|
||||
}
|
||||
|
||||
func (l *Layout) CleanUp(ctx context.Context) (int, int64, error) {
|
||||
referencedDigests := make(map[string]bool)
|
||||
|
||||
if err := l.OCI.LoadIndex(); err != nil {
|
||||
return 0, 0, fmt.Errorf("failed to load index: %w", err)
|
||||
}
|
||||
|
||||
var processManifest func(desc ocispec.Descriptor) error
|
||||
processManifest = func(desc ocispec.Descriptor) (err error) {
|
||||
if desc.Digest.Validate() != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// mark digest as referenced by existing artifact
|
||||
referencedDigests[desc.Digest.Hex()] = true
|
||||
|
||||
// fetch and parse manifests for layer digests
|
||||
rc, err := l.OCI.Fetch(ctx, desc)
|
||||
if err != nil {
|
||||
return nil // skip if can't be read
|
||||
}
|
||||
defer func() {
|
||||
if closeErr := rc.Close(); closeErr != nil && err == nil {
|
||||
err = closeErr
|
||||
}
|
||||
}()
|
||||
|
||||
var manifest struct {
|
||||
Config struct {
|
||||
Digest digest.Digest `json:"digest"`
|
||||
} `json:"config"`
|
||||
Layers []struct {
|
||||
digest.Digest `json:"digest"`
|
||||
} `json:"layers"`
|
||||
Manifests []struct {
|
||||
Digest digest.Digest `json:"digest"`
|
||||
MediaType string `json:"mediaType"`
|
||||
Size int64 `json:"size"`
|
||||
} `json:"manifests"`
|
||||
}
|
||||
|
||||
if err := json.NewDecoder(rc).Decode(&manifest); err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// handle image manifest
|
||||
if manifest.Config.Digest.Validate() == nil {
|
||||
referencedDigests[manifest.Config.Digest.Hex()] = true
|
||||
}
|
||||
|
||||
for _, layer := range manifest.Layers {
|
||||
if layer.Digest.Validate() == nil {
|
||||
referencedDigests[layer.Digest.Hex()] = true
|
||||
}
|
||||
}
|
||||
|
||||
// handle manifest list
|
||||
for _, m := range manifest.Manifests {
|
||||
if m.Digest.Validate() == nil {
|
||||
// mark manifest
|
||||
referencedDigests[m.Digest.Hex()] = true
|
||||
// process manifest for layers
|
||||
manifestDesc := ocispec.Descriptor{
|
||||
MediaType: m.MediaType,
|
||||
Digest: m.Digest,
|
||||
Size: m.Size,
|
||||
}
|
||||
processManifest(manifestDesc) // calls helper func on manifests in list
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// walk through artifacts
|
||||
if err := l.OCI.Walk(func(reference string, desc ocispec.Descriptor) error {
|
||||
return processManifest(desc)
|
||||
}); err != nil {
|
||||
return 0, 0, fmt.Errorf("failed to walk artifacts: %w", err)
|
||||
}
|
||||
|
||||
// read all entries
|
||||
blobsPath := filepath.Join(l.Root, ocispec.ImageBlobsDir, digest.Canonical.String())
|
||||
entries, err := os.ReadDir(blobsPath)
|
||||
if err != nil {
|
||||
return 0, 0, fmt.Errorf("failed to read blobs directory: %w", err)
|
||||
}
|
||||
|
||||
// track count and size of deletions
|
||||
deletedCount := 0
|
||||
var deletedSize int64
|
||||
|
||||
// scan blobs
|
||||
for _, entry := range entries {
|
||||
if entry.IsDir() {
|
||||
continue
|
||||
}
|
||||
|
||||
digest := entry.Name()
|
||||
|
||||
if !referencedDigests[digest] {
|
||||
blobPath := filepath.Join(blobsPath, digest)
|
||||
if info, err := entry.Info(); err == nil {
|
||||
deletedSize += info.Size()
|
||||
}
|
||||
|
||||
if err := os.Remove(blobPath); err != nil {
|
||||
return deletedCount, deletedSize, fmt.Errorf("failed to remove blob %s: %w", digest, err)
|
||||
}
|
||||
deletedCount++
|
||||
}
|
||||
}
|
||||
|
||||
return deletedCount, deletedSize, nil
|
||||
}
|
||||
|
||||
@@ -1,14 +1,32 @@
|
||||
package store_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http/httptest"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
ccontent "github.com/containerd/containerd/content"
|
||||
gname "github.com/google/go-containerregistry/pkg/name"
|
||||
"github.com/google/go-containerregistry/pkg/registry"
|
||||
v1 "github.com/google/go-containerregistry/pkg/v1"
|
||||
"github.com/google/go-containerregistry/pkg/v1/empty"
|
||||
"github.com/google/go-containerregistry/pkg/v1/mutate"
|
||||
"github.com/google/go-containerregistry/pkg/v1/random"
|
||||
"github.com/google/go-containerregistry/pkg/v1/remote"
|
||||
"github.com/google/go-containerregistry/pkg/v1/static"
|
||||
"github.com/google/go-containerregistry/pkg/v1/types"
|
||||
"github.com/opencontainers/go-digest"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
|
||||
"hauler.dev/go/hauler/pkg/artifacts"
|
||||
"hauler.dev/go/hauler/pkg/consts"
|
||||
"hauler.dev/go/hauler/pkg/store"
|
||||
)
|
||||
|
||||
@@ -17,7 +35,7 @@ var (
|
||||
root string
|
||||
)
|
||||
|
||||
func TestLayout_AddOCI(t *testing.T) {
|
||||
func TestLayout_AddArtifact(t *testing.T) {
|
||||
teardown := setup(t)
|
||||
defer teardown()
|
||||
|
||||
@@ -46,16 +64,16 @@ func TestLayout_AddOCI(t *testing.T) {
|
||||
}
|
||||
moci := genArtifact(t, tt.args.ref)
|
||||
|
||||
got, err := s.AddOCI(ctx, moci, tt.args.ref)
|
||||
got, err := s.AddArtifact(ctx, moci, tt.args.ref)
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("AddOCI() error = %v, wantErr %v", err, tt.wantErr)
|
||||
t.Errorf("AddArtifact() error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
}
|
||||
_ = got
|
||||
|
||||
_, err = s.AddOCI(ctx, moci, tt.args.ref)
|
||||
_, err = s.AddArtifact(ctx, moci, tt.args.ref)
|
||||
if err != nil {
|
||||
t.Errorf("AddOCI() error = %v, wantErr %v", err, tt.wantErr)
|
||||
t.Errorf("AddArtifact() error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
}
|
||||
})
|
||||
@@ -103,3 +121,669 @@ func genArtifact(t *testing.T, ref string) artifacts.OCI {
|
||||
img,
|
||||
}
|
||||
}
|
||||
|
||||
// Mock fetcher/pusher for testing
|
||||
type mockFetcher struct {
|
||||
blobs map[digest.Digest][]byte
|
||||
}
|
||||
|
||||
func newMockFetcher() *mockFetcher {
|
||||
return &mockFetcher{
|
||||
blobs: make(map[digest.Digest][]byte),
|
||||
}
|
||||
}
|
||||
|
||||
func (m *mockFetcher) addBlob(data []byte) ocispec.Descriptor {
|
||||
dgst := digest.FromBytes(data)
|
||||
m.blobs[dgst] = data
|
||||
return ocispec.Descriptor{
|
||||
MediaType: "application/octet-stream",
|
||||
Digest: dgst,
|
||||
Size: int64(len(data)),
|
||||
}
|
||||
}
|
||||
|
||||
func (m *mockFetcher) Fetch(ctx context.Context, desc ocispec.Descriptor) (io.ReadCloser, error) {
|
||||
data, ok := m.blobs[desc.Digest]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("blob not found: %s", desc.Digest)
|
||||
}
|
||||
return io.NopCloser(bytes.NewReader(data)), nil
|
||||
}
|
||||
|
||||
type mockPusher struct {
|
||||
blobs map[digest.Digest][]byte
|
||||
}
|
||||
|
||||
func newMockPusher() *mockPusher {
|
||||
return &mockPusher{
|
||||
blobs: make(map[digest.Digest][]byte),
|
||||
}
|
||||
}
|
||||
|
||||
func (m *mockPusher) Push(ctx context.Context, desc ocispec.Descriptor) (ccontent.Writer, error) {
|
||||
return &mockWriter{
|
||||
pusher: m,
|
||||
desc: desc,
|
||||
buf: &bytes.Buffer{},
|
||||
}, nil
|
||||
}
|
||||
|
||||
type mockWriter struct {
|
||||
pusher *mockPusher
|
||||
desc ocispec.Descriptor
|
||||
buf *bytes.Buffer
|
||||
closed bool
|
||||
}
|
||||
|
||||
func (m *mockWriter) Write(p []byte) (int, error) {
|
||||
if m.closed {
|
||||
return 0, fmt.Errorf("writer closed")
|
||||
}
|
||||
return m.buf.Write(p)
|
||||
}
|
||||
|
||||
func (m *mockWriter) Close() error {
|
||||
m.closed = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *mockWriter) Commit(ctx context.Context, size int64, expected digest.Digest, opts ...ccontent.Opt) error {
|
||||
data := m.buf.Bytes()
|
||||
if int64(len(data)) != size {
|
||||
return fmt.Errorf("size mismatch: expected %d, got %d", size, len(data))
|
||||
}
|
||||
dgst := digest.FromBytes(data)
|
||||
if expected != "" && dgst != expected {
|
||||
return fmt.Errorf("digest mismatch: expected %s, got %s", expected, dgst)
|
||||
}
|
||||
m.pusher.blobs[dgst] = data
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *mockWriter) Digest() digest.Digest {
|
||||
return digest.FromBytes(m.buf.Bytes())
|
||||
}
|
||||
|
||||
func (m *mockWriter) Status() (ccontent.Status, error) {
|
||||
return ccontent.Status{}, nil
|
||||
}
|
||||
|
||||
func (m *mockWriter) Truncate(size int64) error {
|
||||
return fmt.Errorf("truncate not supported")
|
||||
}
|
||||
|
||||
// blobPath returns the expected filesystem path for a blob in an OCI layout store.
|
||||
func blobPath(root string, d digest.Digest) string {
|
||||
return filepath.Join(root, "blobs", d.Algorithm().String(), d.Encoded())
|
||||
}
|
||||
|
||||
// findRefKey walks the store's index and returns the nameMap key for the first
|
||||
// descriptor whose AnnotationRefName matches ref.
|
||||
func findRefKey(t *testing.T, s *store.Layout, ref string) string {
|
||||
t.Helper()
|
||||
var key string
|
||||
_ = s.OCI.Walk(func(reference string, desc ocispec.Descriptor) error {
|
||||
if desc.Annotations[ocispec.AnnotationRefName] == ref && key == "" {
|
||||
key = reference
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if key == "" {
|
||||
t.Fatalf("reference %q not found in store", ref)
|
||||
}
|
||||
return key
|
||||
}
|
||||
|
||||
// findRefKeyByKind walks the store's index and returns the nameMap key for the
|
||||
// descriptor whose AnnotationRefName matches ref and whose kind annotation matches kind.
|
||||
func findRefKeyByKind(t *testing.T, s *store.Layout, ref, kind string) string {
|
||||
t.Helper()
|
||||
var key string
|
||||
_ = s.OCI.Walk(func(reference string, desc ocispec.Descriptor) error {
|
||||
if desc.Annotations[ocispec.AnnotationRefName] == ref &&
|
||||
desc.Annotations[consts.KindAnnotationName] == kind {
|
||||
key = reference
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if key == "" {
|
||||
t.Fatalf("reference %q with kind %q not found in store", ref, kind)
|
||||
}
|
||||
return key
|
||||
}
|
||||
|
||||
// readManifestBlob reads and parses an OCI manifest from the store's blob directory.
|
||||
func readManifestBlob(t *testing.T, root string, d digest.Digest) ocispec.Manifest {
|
||||
t.Helper()
|
||||
data, err := os.ReadFile(blobPath(root, d))
|
||||
if err != nil {
|
||||
t.Fatalf("read manifest blob %s: %v", d, err)
|
||||
}
|
||||
var m ocispec.Manifest
|
||||
if err := json.Unmarshal(data, &m); err != nil {
|
||||
t.Fatalf("unmarshal manifest: %v", err)
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
// TestCopyDescriptor verifies that copyDescriptor (exercised via Copy) transfers
|
||||
// each individual blob — config and every layer — into the destination store's blob
|
||||
// directory, and that a second Copy of the same content succeeds gracefully when
|
||||
// blobs are already present (AlreadyExists path).
|
||||
func TestCopyDescriptor(t *testing.T) {
|
||||
teardown := setup(t)
|
||||
defer teardown()
|
||||
|
||||
srcRoot := t.TempDir()
|
||||
src, err := store.NewLayout(srcRoot)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
ref := "test/blob:v1"
|
||||
// genArtifact creates random.Image(1024, 3): 1 config blob + 3 layer blobs.
|
||||
manifestDesc, err := src.AddArtifact(ctx, genArtifact(t, ref), ref)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := src.OCI.SaveIndex(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
refKey := findRefKey(t, src, ref)
|
||||
manifest := readManifestBlob(t, srcRoot, manifestDesc.Digest)
|
||||
|
||||
dstRoot := t.TempDir()
|
||||
dst, err := store.NewLayout(dstRoot)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// First copy: should transfer all individual blobs via copyDescriptor.
|
||||
gotDesc, err := src.Copy(ctx, refKey, dst.OCI, "test/blob:dst")
|
||||
if err != nil {
|
||||
t.Fatalf("Copy failed: %v", err)
|
||||
}
|
||||
if gotDesc.Digest != manifestDesc.Digest {
|
||||
t.Errorf("returned descriptor digest mismatch: got %s, want %s", gotDesc.Digest, manifestDesc.Digest)
|
||||
}
|
||||
|
||||
// Verify the config blob is present in the destination.
|
||||
if _, err := os.Stat(blobPath(dstRoot, manifest.Config.Digest)); err != nil {
|
||||
t.Errorf("config blob missing in dest: %v", err)
|
||||
}
|
||||
|
||||
// Verify every layer blob is present in the destination.
|
||||
for i, layer := range manifest.Layers {
|
||||
if _, err := os.Stat(blobPath(dstRoot, layer.Digest)); err != nil {
|
||||
t.Errorf("layer[%d] blob missing in dest: %v", i, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Verify the manifest blob itself was pushed.
|
||||
if _, err := os.Stat(blobPath(dstRoot, manifestDesc.Digest)); err != nil {
|
||||
t.Errorf("manifest blob missing in dest: %v", err)
|
||||
}
|
||||
|
||||
// Second copy: blobs already exist — AlreadyExists must be handled without error.
|
||||
gotDesc2, err := src.Copy(ctx, refKey, dst.OCI, "test/blob:dst2")
|
||||
if err != nil {
|
||||
t.Fatalf("second Copy failed (AlreadyExists should be a no-op): %v", err)
|
||||
}
|
||||
if gotDesc2.Digest != manifestDesc.Digest {
|
||||
t.Errorf("second Copy digest mismatch: got %s, want %s", gotDesc2.Digest, manifestDesc.Digest)
|
||||
}
|
||||
}
|
||||
|
||||
// TestCopyDescriptorGraph_Manifest verifies that copyDescriptorGraph reconstructs a
|
||||
// complete manifest in the destination (config digest and each layer digest match the
|
||||
// source), and returns an error when a required blob is absent from the source.
|
||||
func TestCopyDescriptorGraph_Manifest(t *testing.T) {
|
||||
teardown := setup(t)
|
||||
defer teardown()
|
||||
|
||||
srcRoot := t.TempDir()
|
||||
src, err := store.NewLayout(srcRoot)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
ref := "test/manifest:v1"
|
||||
manifestDesc, err := src.AddArtifact(ctx, genArtifact(t, ref), ref)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := src.OCI.SaveIndex(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
refKey := findRefKey(t, src, ref)
|
||||
srcManifest := readManifestBlob(t, srcRoot, manifestDesc.Digest)
|
||||
|
||||
// --- Happy path: all blobs present, manifest structure preserved ---
|
||||
dstRoot := t.TempDir()
|
||||
dst, err := store.NewLayout(dstRoot)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
gotDesc, err := src.Copy(ctx, refKey, dst.OCI, "test/manifest:dst")
|
||||
if err != nil {
|
||||
t.Fatalf("Copy failed: %v", err)
|
||||
}
|
||||
|
||||
// Parse the manifest from the destination and compare structure with source.
|
||||
dstManifest := readManifestBlob(t, dstRoot, gotDesc.Digest)
|
||||
if dstManifest.Config.Digest != srcManifest.Config.Digest {
|
||||
t.Errorf("config digest mismatch: got %s, want %s",
|
||||
dstManifest.Config.Digest, srcManifest.Config.Digest)
|
||||
}
|
||||
if len(dstManifest.Layers) != len(srcManifest.Layers) {
|
||||
t.Fatalf("layer count mismatch: dst=%d src=%d",
|
||||
len(dstManifest.Layers), len(srcManifest.Layers))
|
||||
}
|
||||
for i, l := range srcManifest.Layers {
|
||||
if dstManifest.Layers[i].Digest != l.Digest {
|
||||
t.Errorf("layer[%d] digest mismatch: got %s, want %s",
|
||||
i, dstManifest.Layers[i].Digest, l.Digest)
|
||||
}
|
||||
}
|
||||
|
||||
// --- Error path: delete a layer blob from source, expect Copy to fail ---
|
||||
if len(srcManifest.Layers) == 0 {
|
||||
t.Skip("artifact has no layers; skipping missing-blob error path")
|
||||
}
|
||||
if err := os.Remove(blobPath(srcRoot, srcManifest.Layers[0].Digest)); err != nil {
|
||||
t.Fatalf("could not remove layer blob to simulate corruption: %v", err)
|
||||
}
|
||||
|
||||
dst2Root := t.TempDir()
|
||||
dst2, err := store.NewLayout(dst2Root)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
_, err = src.Copy(ctx, refKey, dst2.OCI, "test/manifest:missing-blob")
|
||||
if err == nil {
|
||||
t.Error("expected Copy to fail when a source layer blob is missing, but it succeeded")
|
||||
}
|
||||
}
|
||||
|
||||
// TestCopyDescriptorGraph_Index verifies that copyDescriptorGraph handles an OCI
|
||||
// image index (multi-platform) by recursively copying all child manifests and their
|
||||
// blobs into the destination store, and that the index blob itself is present.
|
||||
func TestCopyDescriptorGraph_Index(t *testing.T) {
|
||||
teardown := setup(t)
|
||||
defer teardown()
|
||||
|
||||
// Start an in-process OCI registry.
|
||||
srv := httptest.NewServer(registry.New())
|
||||
t.Cleanup(srv.Close)
|
||||
host := strings.TrimPrefix(srv.URL, "http://")
|
||||
remoteOpts := []remote.Option{remote.WithTransport(srv.Client().Transport)}
|
||||
|
||||
// Build a 2-platform image index.
|
||||
img1, err := random.Image(512, 2)
|
||||
if err != nil {
|
||||
t.Fatalf("random image (amd64): %v", err)
|
||||
}
|
||||
img2, err := random.Image(512, 2)
|
||||
if err != nil {
|
||||
t.Fatalf("random image (arm64): %v", err)
|
||||
}
|
||||
idx := mutate.AppendManifests(
|
||||
empty.Index,
|
||||
mutate.IndexAddendum{
|
||||
Add: img1,
|
||||
Descriptor: v1.Descriptor{
|
||||
MediaType: types.OCIManifestSchema1,
|
||||
Platform: &v1.Platform{OS: "linux", Architecture: "amd64"},
|
||||
},
|
||||
},
|
||||
mutate.IndexAddendum{
|
||||
Add: img2,
|
||||
Descriptor: v1.Descriptor{
|
||||
MediaType: types.OCIManifestSchema1,
|
||||
Platform: &v1.Platform{OS: "linux", Architecture: "arm64"},
|
||||
},
|
||||
},
|
||||
)
|
||||
|
||||
idxTag, err := gname.NewTag(host+"/test/multiarch:v1", gname.Insecure)
|
||||
if err != nil {
|
||||
t.Fatalf("new tag: %v", err)
|
||||
}
|
||||
if err := remote.WriteIndex(idxTag, idx, remoteOpts...); err != nil {
|
||||
t.Fatalf("push index: %v", err)
|
||||
}
|
||||
|
||||
// Pull the index into a hauler store via AddImage.
|
||||
srcRoot := t.TempDir()
|
||||
src, err := store.NewLayout(srcRoot)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := src.AddImage(ctx, idxTag.Name(), "", remoteOpts...); err != nil {
|
||||
t.Fatalf("AddImage: %v", err)
|
||||
}
|
||||
if err := src.OCI.SaveIndex(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Locate the index descriptor (kind=imageIndex) in the source store.
|
||||
refKey := findRefKeyByKind(t, src, "test/multiarch:v1", consts.KindAnnotationIndex)
|
||||
|
||||
// Copy the entire index graph to a fresh destination store.
|
||||
dstRoot := t.TempDir()
|
||||
dst, err := store.NewLayout(dstRoot)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
gotDesc, err := src.Copy(ctx, refKey, dst.OCI, "test/multiarch:copied")
|
||||
if err != nil {
|
||||
t.Fatalf("Copy of image index failed: %v", err)
|
||||
}
|
||||
|
||||
// The index blob itself must be present in the destination.
|
||||
if _, err := os.Stat(blobPath(dstRoot, gotDesc.Digest)); err != nil {
|
||||
t.Errorf("index manifest blob missing in dest: %v", err)
|
||||
}
|
||||
|
||||
// Parse the index from the source and verify every child manifest blob landed
|
||||
// in the destination (exercising recursive copyDescriptorGraph for each child).
|
||||
var ociIdx ocispec.Index
|
||||
if err := json.Unmarshal(mustReadFile(t, blobPath(srcRoot, gotDesc.Digest)), &ociIdx); err != nil {
|
||||
t.Fatalf("unmarshal index: %v", err)
|
||||
}
|
||||
if len(ociIdx.Manifests) < 2 {
|
||||
t.Fatalf("expected ≥2 child manifests in index, got %d", len(ociIdx.Manifests))
|
||||
}
|
||||
for i, child := range ociIdx.Manifests {
|
||||
if _, err := os.Stat(blobPath(dstRoot, child.Digest)); err != nil {
|
||||
t.Errorf("child manifest[%d] (platform=%v) blob missing in dest: %v",
|
||||
i, child.Platform, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// mustReadFile reads a file and fails the test on error.
|
||||
func mustReadFile(t *testing.T, path string) []byte {
|
||||
t.Helper()
|
||||
data, err := os.ReadFile(path)
|
||||
if err != nil {
|
||||
t.Fatalf("read %s: %v", path, err)
|
||||
}
|
||||
return data
|
||||
}
|
||||
|
||||
// TestCopy_Integration tests the full Copy workflow including copyDescriptorGraph
|
||||
func TestCopy_Integration(t *testing.T) {
|
||||
teardown := setup(t)
|
||||
defer teardown()
|
||||
|
||||
// Create source store
|
||||
sourceRoot, err := os.MkdirTemp("", "hauler-source")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(sourceRoot)
|
||||
|
||||
sourceStore, err := store.NewLayout(sourceRoot)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Add an artifact to source
|
||||
ref := "test/image:v1"
|
||||
artifact := genArtifact(t, ref)
|
||||
_, err = sourceStore.AddArtifact(ctx, artifact, ref)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Save the index to persist the reference
|
||||
if err := sourceStore.OCI.SaveIndex(); err != nil {
|
||||
t.Fatalf("Failed to save index: %v", err)
|
||||
}
|
||||
|
||||
// Find the actual reference key in the nameMap (includes kind suffix)
|
||||
var sourceRefKey string
|
||||
err = sourceStore.OCI.Walk(func(reference string, desc ocispec.Descriptor) error {
|
||||
if desc.Annotations[ocispec.AnnotationRefName] == ref {
|
||||
sourceRefKey = reference
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to walk source store: %v", err)
|
||||
}
|
||||
if sourceRefKey == "" {
|
||||
t.Fatal("Failed to find reference in source store")
|
||||
}
|
||||
|
||||
// Create destination store
|
||||
destRoot, err := os.MkdirTemp("", "hauler-dest")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(destRoot)
|
||||
|
||||
destStore, err := store.NewLayout(destRoot)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Copy from source to destination
|
||||
destRef := "test/image:copied"
|
||||
desc, err := sourceStore.Copy(ctx, sourceRefKey, destStore.OCI, destRef)
|
||||
if err != nil {
|
||||
t.Fatalf("Copy failed: %v", err)
|
||||
}
|
||||
|
||||
// Copy doesn't automatically add to destination index for generic targets
|
||||
// For OCI stores, we need to add the descriptor manually with the reference
|
||||
desc.Annotations = map[string]string{
|
||||
ocispec.AnnotationRefName: destRef,
|
||||
consts.KindAnnotationName: consts.KindAnnotationImage,
|
||||
}
|
||||
if err := destStore.OCI.AddIndex(desc); err != nil {
|
||||
t.Fatalf("Failed to add descriptor to destination index: %v", err)
|
||||
}
|
||||
|
||||
// Verify the descriptor was copied
|
||||
if desc.Digest == "" {
|
||||
t.Error("Expected non-empty digest")
|
||||
}
|
||||
|
||||
// Find the copied reference in destination
|
||||
var foundInDest bool
|
||||
var destDesc ocispec.Descriptor
|
||||
err = destStore.OCI.Walk(func(reference string, d ocispec.Descriptor) error {
|
||||
if d.Digest == desc.Digest {
|
||||
foundInDest = true
|
||||
destDesc = d
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to walk destination store: %v", err)
|
||||
}
|
||||
|
||||
if !foundInDest {
|
||||
t.Error("Copied descriptor not found in destination store")
|
||||
}
|
||||
|
||||
if destDesc.Digest != desc.Digest {
|
||||
t.Errorf("Digest mismatch: got %s, want %s", destDesc.Digest, desc.Digest)
|
||||
}
|
||||
}
|
||||
|
||||
// TestCopy_ErrorHandling tests error cases
|
||||
func TestCopy_ErrorHandling(t *testing.T) {
|
||||
teardown := setup(t)
|
||||
defer teardown()
|
||||
|
||||
sourceRoot, err := os.MkdirTemp("", "hauler-source")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(sourceRoot)
|
||||
|
||||
sourceStore, err := store.NewLayout(sourceRoot)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
destRoot, err := os.MkdirTemp("", "hauler-dest")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(destRoot)
|
||||
|
||||
destStore, err := store.NewLayout(destRoot)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Test copying non-existent reference
|
||||
_, err = sourceStore.Copy(ctx, "nonexistent:tag", destStore.OCI, "dest:tag")
|
||||
if err == nil {
|
||||
t.Error("Expected error when copying non-existent reference")
|
||||
}
|
||||
}
|
||||
|
||||
// TestCopy_DockerFormats tests copying Docker manifest formats
|
||||
func TestCopy_DockerFormats(t *testing.T) {
|
||||
// This test verifies that Docker format media types are recognized
|
||||
// The actual copying is tested in the integration test
|
||||
if consts.DockerManifestSchema2 == "" {
|
||||
t.Error("DockerManifestSchema2 constant should not be empty")
|
||||
}
|
||||
t.Skip("Docker format copying is tested via integration tests")
|
||||
}
|
||||
|
||||
// TestCopy_MultiPlatform tests copying multi-platform images with manifest lists
|
||||
func TestCopy_MultiPlatform(t *testing.T) {
|
||||
teardown := setup(t)
|
||||
defer teardown()
|
||||
|
||||
sourceRoot, err := os.MkdirTemp("", "hauler-source")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(sourceRoot)
|
||||
|
||||
// This test would require creating a multi-platform image
|
||||
// which is more complex - marking as future enhancement
|
||||
t.Skip("Multi-platform image test requires additional setup")
|
||||
}
|
||||
|
||||
// TestAddImage_OCI11Referrers verifies that AddImage captures OCI 1.1 referrers
|
||||
// (cosign v3 new-bundle-format) stored via the subject field rather than the legacy
|
||||
// sha256-<hex>.sig/.att/.sbom tag convention.
|
||||
//
|
||||
// The test:
|
||||
// 1. Starts an in-process OCI 1.1–capable registry (go-containerregistry/pkg/registry)
|
||||
// 2. Pushes a random base image to it
|
||||
// 3. Builds a synthetic cosign v3-style Sigstore bundle referrer manifest (with a
|
||||
// "subject" field pointing at the base image) and pushes it so the registry
|
||||
// registers it in the referrers index automatically
|
||||
// 4. Calls store.AddImage and then walks the OCI layout to confirm that a
|
||||
// KindAnnotationReferrers-prefixed entry was saved
|
||||
func TestAddImage_OCI11Referrers(t *testing.T) {
|
||||
// 1. Start an in-process OCI 1.1 registry.
|
||||
srv := httptest.NewServer(registry.New())
|
||||
t.Cleanup(srv.Close)
|
||||
host := strings.TrimPrefix(srv.URL, "http://")
|
||||
|
||||
remoteOpts := []remote.Option{
|
||||
remote.WithTransport(srv.Client().Transport),
|
||||
}
|
||||
|
||||
// 2. Push a random base image.
|
||||
baseTag, err := gname.NewTag(host+"/test/image:v1", gname.Insecure)
|
||||
if err != nil {
|
||||
t.Fatalf("new tag: %v", err)
|
||||
}
|
||||
baseImg, err := random.Image(512, 2)
|
||||
if err != nil {
|
||||
t.Fatalf("random image: %v", err)
|
||||
}
|
||||
if err := remote.Write(baseTag, baseImg, remoteOpts...); err != nil {
|
||||
t.Fatalf("push base image: %v", err)
|
||||
}
|
||||
|
||||
// Build the v1.Descriptor for the base image so we can set it as the referrer subject.
|
||||
baseHash, err := baseImg.Digest()
|
||||
if err != nil {
|
||||
t.Fatalf("base image digest: %v", err)
|
||||
}
|
||||
baseRawManifest, err := baseImg.RawManifest()
|
||||
if err != nil {
|
||||
t.Fatalf("base image raw manifest: %v", err)
|
||||
}
|
||||
baseMT, err := baseImg.MediaType()
|
||||
if err != nil {
|
||||
t.Fatalf("base image media type: %v", err)
|
||||
}
|
||||
baseDesc := v1.Descriptor{
|
||||
MediaType: baseMT,
|
||||
Digest: baseHash,
|
||||
Size: int64(len(baseRawManifest)),
|
||||
}
|
||||
|
||||
// 3. Build a synthetic cosign v3 Sigstore bundle referrer.
|
||||
//
|
||||
// Real cosign new-bundle-format: artifactType=application/vnd.dev.sigstore.bundle.v0.3+json,
|
||||
// config.mediaType=application/vnd.oci.empty.v1+json, single layer containing the bundle JSON,
|
||||
// and a "subject" field pointing at the base image digest.
|
||||
bundleJSON := []byte(`{"mediaType":"application/vnd.dev.sigstore.bundle.v0.3+json",` +
|
||||
`"verificationMaterial":{},"messageSignature":{"messageDigest":` +
|
||||
`{"algorithm":"SHA2_256","digest":"AAAA"},"signature":"AAAA"}}`)
|
||||
bundleLayer := static.NewLayer(bundleJSON, types.MediaType(consts.SigstoreBundleMediaType))
|
||||
|
||||
referrerImg, err := mutate.AppendLayers(empty.Image, bundleLayer)
|
||||
if err != nil {
|
||||
t.Fatalf("append bundle layer: %v", err)
|
||||
}
|
||||
referrerImg = mutate.MediaType(referrerImg, types.OCIManifestSchema1)
|
||||
referrerImg = mutate.ConfigMediaType(referrerImg, types.MediaType(consts.OCIEmptyConfigMediaType))
|
||||
referrerImg = mutate.Subject(referrerImg, baseDesc).(v1.Image)
|
||||
|
||||
// Push the referrer under an arbitrary tag; the in-process registry auto-wires the
|
||||
// subject field and makes the manifest discoverable via GET /v2/.../referrers/<digest>.
|
||||
referrerTag, err := gname.NewTag(host+"/test/image:bundle-referrer", gname.Insecure)
|
||||
if err != nil {
|
||||
t.Fatalf("referrer tag: %v", err)
|
||||
}
|
||||
if err := remote.Write(referrerTag, referrerImg, remoteOpts...); err != nil {
|
||||
t.Fatalf("push referrer: %v", err)
|
||||
}
|
||||
|
||||
// 4. Let hauler add the base image (which should also fetch its OCI referrers).
|
||||
storeRoot := t.TempDir()
|
||||
s, err := store.NewLayout(storeRoot)
|
||||
if err != nil {
|
||||
t.Fatalf("new layout: %v", err)
|
||||
}
|
||||
if err := s.AddImage(context.Background(), baseTag.Name(), "", remoteOpts...); err != nil {
|
||||
t.Fatalf("AddImage: %v", err)
|
||||
}
|
||||
|
||||
// 5. Walk the store and verify that at least one referrer entry was captured.
|
||||
var referrerCount int
|
||||
if err := s.Walk(func(_ string, desc ocispec.Descriptor) error {
|
||||
if strings.HasPrefix(desc.Annotations[consts.KindAnnotationName], consts.KindAnnotationReferrers) {
|
||||
referrerCount++
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
t.Fatalf("Walk: %v", err)
|
||||
}
|
||||
|
||||
if referrerCount == 0 {
|
||||
t.Fatal("expected at least one OCI referrer entry in the store, got none")
|
||||
}
|
||||
t.Logf("captured %d OCI referrer(s) for %s", referrerCount, baseTag.Name())
|
||||
}
|
||||
|
||||
BIN
testdata/chart-with-file-dependency-chart-1.0.0.tgz
vendored
Normal file
BIN
testdata/chart-with-file-dependency-chart-1.0.0.tgz
vendored
Normal file
Binary file not shown.
65
testdata/hauler-manifest-pipeline.yaml
vendored
65
testdata/hauler-manifest-pipeline.yaml
vendored
@@ -1,14 +1,18 @@
|
||||
# v1 manifests
|
||||
# hauler manifests
|
||||
# api version of v1
|
||||
apiVersion: content.hauler.cattle.io/v1
|
||||
kind: Images
|
||||
metadata:
|
||||
name: hauler-content-images-example
|
||||
spec:
|
||||
images:
|
||||
- name: busybox
|
||||
- name: busybox:stable
|
||||
- name: ghcr.io/hauler-dev/library/busybox
|
||||
- name: ghcr.io/hauler-dev/library/busybox:stable
|
||||
platform: linux/amd64
|
||||
- name: gcr.io/distroless/base@sha256:7fa7445dfbebae4f4b7ab0e6ef99276e96075ae42584af6286ba080750d6dfe5
|
||||
- name: ghcr.io/kubewarden/audit-scanner:v1.30.0-rc1
|
||||
certificate-identity-regexp: https://github.com/kubewarden/audit-scanner/.github/workflows/release.yml@refs/tags/v1.30.0-rc1
|
||||
certificate-oidc-issuer: https://token.actions.githubusercontent.com
|
||||
---
|
||||
apiVersion: content.hauler.cattle.io/v1
|
||||
kind: Charts
|
||||
@@ -33,7 +37,10 @@ spec:
|
||||
repoURL: oci://ghcr.io/hauler-dev
|
||||
version: 1.0.4
|
||||
- name: rancher-cluster-templates-0.5.2.tgz
|
||||
repoURL: .
|
||||
repoURL: testdata
|
||||
- name: chart-with-file-dependency-chart-1.0.0.tgz
|
||||
repoURL: testdata
|
||||
add-dependencies: true
|
||||
---
|
||||
apiVersion: content.hauler.cattle.io/v1
|
||||
kind: Files
|
||||
@@ -47,53 +54,3 @@ spec:
|
||||
- path: testdata/hauler-manifest.yaml
|
||||
- path: testdata/hauler-manifest.yaml
|
||||
name: hauler-manifest-local.yaml
|
||||
---
|
||||
# v1alpha1 manifests
|
||||
apiVersion: content.hauler.cattle.io/v1alpha1
|
||||
kind: Images
|
||||
metadata:
|
||||
name: hauler-content-images-example
|
||||
spec:
|
||||
images:
|
||||
- name: busybox
|
||||
- name: busybox:stable
|
||||
platform: linux/amd64
|
||||
- name: gcr.io/distroless/base@sha256:7fa7445dfbebae4f4b7ab0e6ef99276e96075ae42584af6286ba080750d6dfe5
|
||||
---
|
||||
apiVersion: content.hauler.cattle.io/v1alpha1
|
||||
kind: Charts
|
||||
metadata:
|
||||
name: hauler-content-charts-example
|
||||
spec:
|
||||
charts:
|
||||
- name: rancher
|
||||
repoURL: https://releases.rancher.com/server-charts/stable
|
||||
- name: rancher
|
||||
repoURL: https://releases.rancher.com/server-charts/stable
|
||||
version: 2.8.4
|
||||
- name: rancher
|
||||
repoURL: https://releases.rancher.com/server-charts/stable
|
||||
version: 2.8.3
|
||||
- name: hauler-helm
|
||||
repoURL: oci://ghcr.io/hauler-dev
|
||||
- name: hauler-helm
|
||||
repoURL: oci://ghcr.io/hauler-dev
|
||||
version: 1.0.6
|
||||
- name: hauler-helm
|
||||
repoURL: oci://ghcr.io/hauler-dev
|
||||
version: 1.0.4
|
||||
- name: rancher-cluster-templates-0.5.2.tgz
|
||||
repoURL: .
|
||||
---
|
||||
apiVersion: content.hauler.cattle.io/v1alpha1
|
||||
kind: Files
|
||||
metadata:
|
||||
name: hauler-content-files-example
|
||||
spec:
|
||||
files:
|
||||
- path: https://get.rke2.io/install.sh
|
||||
- path: https://get.rke2.io/install.sh
|
||||
name: rke2-install.sh
|
||||
- path: testdata/hauler-manifest.yaml
|
||||
- path: testdata/hauler-manifest.yaml
|
||||
name: hauler-manifest-local.yaml
|
||||
|
||||
44
testdata/hauler-manifest.yaml
vendored
44
testdata/hauler-manifest.yaml
vendored
@@ -1,14 +1,18 @@
|
||||
# v1 manifests
|
||||
# hauler manifest
|
||||
# api version of v1
|
||||
apiVersion: content.hauler.cattle.io/v1
|
||||
kind: Images
|
||||
metadata:
|
||||
name: hauler-content-images-example
|
||||
spec:
|
||||
images:
|
||||
- name: busybox
|
||||
- name: busybox:stable
|
||||
- name: ghcr.io/hauler-dev/library/busybox
|
||||
- name: ghcr.io/hauler-dev/library/busybox:stable
|
||||
platform: linux/amd64
|
||||
- name: gcr.io/distroless/base@sha256:7fa7445dfbebae4f4b7ab0e6ef99276e96075ae42584af6286ba080750d6dfe5
|
||||
- name: ghcr.io/kubewarden/audit-scanner:v1.30.0-rc1
|
||||
certificate-identity-regexp: https://github.com/kubewarden/audit-scanner/.github/workflows/release.yml@refs/tags/v1.30.0-rc1
|
||||
certificate-oidc-issuer: https://token.actions.githubusercontent.com
|
||||
---
|
||||
apiVersion: content.hauler.cattle.io/v1
|
||||
kind: Charts
|
||||
@@ -31,37 +35,3 @@ spec:
|
||||
- path: https://get.rke2.io
|
||||
name: install.sh
|
||||
- path: testdata/hauler-manifest.yaml
|
||||
---
|
||||
# v1alpha1 manifests
|
||||
apiVersion: content.hauler.cattle.io/v1alpha1
|
||||
kind: Images
|
||||
metadata:
|
||||
name: hauler-content-images-example
|
||||
spec:
|
||||
images:
|
||||
- name: busybox
|
||||
- name: busybox:stable
|
||||
platform: linux/amd64
|
||||
- name: gcr.io/distroless/base@sha256:7fa7445dfbebae4f4b7ab0e6ef99276e96075ae42584af6286ba080750d6dfe5
|
||||
---
|
||||
apiVersion: content.hauler.cattle.io/v1alpha1
|
||||
kind: Charts
|
||||
metadata:
|
||||
name: hauler-content-charts-example
|
||||
spec:
|
||||
charts:
|
||||
- name: rancher
|
||||
repoURL: https://releases.rancher.com/server-charts/stable
|
||||
version: 2.8.5
|
||||
- name: hauler-helm
|
||||
repoURL: oci://ghcr.io/hauler-dev
|
||||
---
|
||||
apiVersion: content.hauler.cattle.io/v1alpha1
|
||||
kind: Files
|
||||
metadata:
|
||||
name: hauler-content-files-example
|
||||
spec:
|
||||
files:
|
||||
- path: https://get.rke2.io
|
||||
name: install.sh
|
||||
- path: testdata/hauler-manifest.yaml
|
||||
|
||||
Reference in New Issue
Block a user