diff --git a/.github/workflows/cherrypick.yml b/.github/workflows/cherrypick.yml new file mode 100644 index 0000000..3752241 --- /dev/null +++ b/.github/workflows/cherrypick.yml @@ -0,0 +1,298 @@ +name: Cherry-pick to release branch + +on: + issue_comment: + types: [created] + pull_request: + types: [closed] + +permissions: + contents: write + pull-requests: write + issues: write + +jobs: + # ────────────────────────────────────────────────────────────── + # Trigger 1: /cherrypick-X.Y comment on a PR + # - If already merged → run cherry-pick immediately + # - If not yet merged → add label, cherry-pick will run on merge + # ────────────────────────────────────────────────────────────── + handle-comment: + if: > + github.event_name == 'issue_comment' && + github.event.issue.pull_request && + startsWith(github.event.comment.body, '/cherrypick-') + runs-on: ubuntu-latest + steps: + - name: Check commenter permissions + env: + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + COMMENTER: ${{ github.event.comment.user.login }} + run: | + PERMISSION=$(gh api repos/${{ github.repository }}/collaborators/${COMMENTER}/permission \ + --jq '.permission') + echo "Permission level for $COMMENTER: $PERMISSION" + if [[ "$PERMISSION" != "admin" && "$PERMISSION" != "maintain" && "$PERMISSION" != "write" ]]; then + echo "::warning::User $COMMENTER does not have write access, ignoring cherry-pick request" + exit 1 + fi + + - name: Parse version from comment + id: parse + env: + COMMENT_BODY: ${{ github.event.comment.body }} + run: | + VERSION=$(echo "$COMMENT_BODY" | head -1 | grep -oP '(?<=/cherrypick-)\d+\.\d+') + if [ -z "$VERSION" ]; then + echo "::error::Could not parse version from comment" + exit 1 + fi + echo "version=$VERSION" >> "$GITHUB_OUTPUT" + echo "target_branch=release/$VERSION" >> "$GITHUB_OUTPUT" + echo "label=cherrypick/$VERSION" >> "$GITHUB_OUTPUT" + + - name: React to comment + env: + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + run: | + gh api repos/${{ github.repository }}/issues/comments/${{ github.event.comment.id }}/reactions \ + -f content='+1' + + - name: Check if PR is merged + id: check + env: + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + run: | + PR_JSON=$(gh api repos/${{ github.repository }}/pulls/${{ github.event.issue.number }}) + MERGED=$(echo "$PR_JSON" | jq -r '.merged') + echo "merged=$MERGED" >> "$GITHUB_OUTPUT" + echo "pr_title=$(echo "$PR_JSON" | jq -r '.title')" >> "$GITHUB_OUTPUT" + echo "base_sha=$(echo "$PR_JSON" | jq -r '.base.sha')" >> "$GITHUB_OUTPUT" + echo "head_sha=$(echo "$PR_JSON" | jq -r '.head.sha')" >> "$GITHUB_OUTPUT" + + - name: Add cherry-pick label + env: + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + LABEL: ${{ steps.parse.outputs.label }} + PR_NUMBER: ${{ github.event.issue.number }} + run: | + gh api repos/${{ github.repository }}/labels \ + -f name="$LABEL" -f color="fbca04" -f description="Queued for cherry-pick" 2>/dev/null || true + gh api repos/${{ github.repository }}/issues/${PR_NUMBER}/labels \ + -f "labels[]=$LABEL" + + - name: Notify if queued (not yet merged) + if: steps.check.outputs.merged != 'true' + env: + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + LABEL: ${{ steps.parse.outputs.label }} + TARGET_BRANCH: ${{ steps.parse.outputs.target_branch }} + PR_NUMBER: ${{ github.event.issue.number }} + run: | + gh api repos/${{ github.repository }}/issues/${PR_NUMBER}/comments \ + -f body="🏷️ Labeled \`$LABEL\` — backport to \`$TARGET_BRANCH\` will be created automatically when this PR is merged." + + - name: Checkout repository + if: steps.check.outputs.merged == 'true' + uses: actions/checkout@v4 + with: + fetch-depth: 0 + + - name: Verify target branch exists + if: steps.check.outputs.merged == 'true' + env: + TARGET_BRANCH: ${{ steps.parse.outputs.target_branch }} + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + PR_NUMBER: ${{ github.event.issue.number }} + run: | + if ! git ls-remote --exit-code --heads origin "$TARGET_BRANCH" > /dev/null 2>&1; then + gh api repos/${{ github.repository }}/issues/${PR_NUMBER}/comments \ + -f body="❌ Cannot cherry-pick: branch \`$TARGET_BRANCH\` does not exist." + exit 1 + fi + + - name: Apply PR diff and push + if: steps.check.outputs.merged == 'true' + id: apply + env: + TARGET_BRANCH: ${{ steps.parse.outputs.target_branch }} + PR_NUMBER: ${{ github.event.issue.number }} + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + run: | + BACKPORT_BRANCH="backport/${PR_NUMBER}-to-${TARGET_BRANCH//\//-}" + echo "backport_branch=$BACKPORT_BRANCH" >> "$GITHUB_OUTPUT" + + git config user.name "github-actions[bot]" + git config user.email "github-actions[bot]@users.noreply.github.com" + + git checkout "$TARGET_BRANCH" + git checkout -b "$BACKPORT_BRANCH" + + # Download the PR's patch from GitHub (pure diff of the PR's changes) + gh api repos/${{ github.repository }}/pulls/${PR_NUMBER} \ + -H "Accept: application/vnd.github.v3.patch" > /tmp/pr.patch + + # Apply the patch + HAS_CONFLICTS="false" + CONFLICTED_FILES="" + + if git apply --check /tmp/pr.patch 2>/dev/null; then + # Clean apply + git apply /tmp/pr.patch + git add -A + git commit -m "Backport PR #${PR_NUMBER} to ${TARGET_BRANCH}" + elif git apply --3way /tmp/pr.patch; then + # Applied with 3-way merge (auto-resolved) + git add -A + git commit -m "Backport PR #${PR_NUMBER} to ${TARGET_BRANCH}" || true + else + # Has real conflicts — apply what we can + HAS_CONFLICTS="true" + CONFLICTED_FILES=$(git diff --name-only --diff-filter=U | tr '\n' ',' | sed 's/,$//') + # Take the incoming version for conflicted files + git diff --name-only --diff-filter=U | while read -r file; do + git checkout --theirs -- "$file" + done + git add -A + git commit -m "Backport PR #${PR_NUMBER} to ${TARGET_BRANCH} (conflicts)" || true + fi + + echo "has_conflicts=$HAS_CONFLICTS" >> "$GITHUB_OUTPUT" + echo "conflicted_files=$CONFLICTED_FILES" >> "$GITHUB_OUTPUT" + + git push origin "$BACKPORT_BRANCH" + + - name: Create backport PR + if: steps.check.outputs.merged == 'true' + env: + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + TARGET_BRANCH: ${{ steps.parse.outputs.target_branch }} + VERSION: ${{ steps.parse.outputs.version }} + PR_TITLE: ${{ steps.check.outputs.pr_title }} + PR_NUMBER: ${{ github.event.issue.number }} + BACKPORT_BRANCH: ${{ steps.apply.outputs.backport_branch }} + run: | + TITLE="[${VERSION}] ${PR_TITLE}" + BODY="Backport of #${PR_NUMBER} to \`${TARGET_BRANCH}\`." + + PR_URL=$(gh pr create \ + --base "$TARGET_BRANCH" \ + --head "$BACKPORT_BRANCH" \ + --title "$TITLE" \ + --body "$BODY") + + gh api repos/${{ github.repository }}/issues/${PR_NUMBER}/comments \ + -f body="✅ Backport PR created: ${PR_URL}" + + # ────────────────────────────────────────────────────────────── + # Trigger 2: PR merged → process any queued cherrypick/* labels + # ────────────────────────────────────────────────────────────── + handle-merge: + if: > + github.event_name == 'pull_request' && + github.event.pull_request.merged == true + runs-on: ubuntu-latest + steps: + - name: Collect cherry-pick labels + id: labels + env: + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + PR_NUMBER: ${{ github.event.pull_request.number }} + run: | + LABELS=$(gh api repos/${{ github.repository }}/issues/${PR_NUMBER}/labels \ + --jq '[.[] | select(.name | startswith("cherrypick/")) | .name] | join(",")') + + if [ -z "$LABELS" ]; then + echo "No cherrypick labels found, nothing to do." + echo "has_labels=false" >> "$GITHUB_OUTPUT" + else + echo "Found labels: $LABELS" + echo "has_labels=true" >> "$GITHUB_OUTPUT" + echo "labels=$LABELS" >> "$GITHUB_OUTPUT" + fi + + - name: Checkout repository + if: steps.labels.outputs.has_labels == 'true' + uses: actions/checkout@v4 + with: + fetch-depth: 0 + + - name: Download PR patch + if: steps.labels.outputs.has_labels == 'true' + env: + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + PR_NUMBER: ${{ github.event.pull_request.number }} + run: | + gh api repos/${{ github.repository }}/pulls/${PR_NUMBER} \ + -H "Accept: application/vnd.github.v3.patch" > /tmp/pr.patch + + - name: Process each cherry-pick label + if: steps.labels.outputs.has_labels == 'true' + env: + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + LABELS: ${{ steps.labels.outputs.labels }} + PR_NUMBER: ${{ github.event.pull_request.number }} + PR_TITLE: ${{ github.event.pull_request.title }} + run: | + git config user.name "github-actions[bot]" + git config user.email "github-actions[bot]@users.noreply.github.com" + + IFS=',' read -ra LABEL_ARRAY <<< "$LABELS" + for LABEL in "${LABEL_ARRAY[@]}"; do + VERSION="${LABEL#cherrypick/}" + TARGET_BRANCH="release/$VERSION" + + echo "=== Processing backport to $TARGET_BRANCH ===" + + # Verify target branch exists + if ! git ls-remote --exit-code --heads origin "$TARGET_BRANCH" > /dev/null 2>&1; then + gh api repos/${{ github.repository }}/issues/${PR_NUMBER}/comments \ + -f body="❌ Cannot cherry-pick to \`$TARGET_BRANCH\`: branch does not exist." + continue + fi + + BACKPORT_BRANCH="backport/${PR_NUMBER}-to-${TARGET_BRANCH//\//-}" + + git checkout "$TARGET_BRANCH" + git checkout -b "$BACKPORT_BRANCH" + + # Apply the patch + HAS_CONFLICTS="false" + CONFLICTED_FILES="" + + if git apply --check /tmp/pr.patch 2>/dev/null; then + git apply /tmp/pr.patch + git add -A + git commit -m "Backport PR #${PR_NUMBER} to ${TARGET_BRANCH}" + elif git apply --3way /tmp/pr.patch; then + git add -A + git commit -m "Backport PR #${PR_NUMBER} to ${TARGET_BRANCH}" || true + else + HAS_CONFLICTS="true" + CONFLICTED_FILES=$(git diff --name-only --diff-filter=U | tr '\n' ',' | sed 's/,$//') + git diff --name-only --diff-filter=U | while read -r file; do + git checkout --theirs -- "$file" + done + git add -A + git commit -m "Backport PR #${PR_NUMBER} to ${TARGET_BRANCH} (conflicts)" || true + fi + + git push origin "$BACKPORT_BRANCH" + + # Build PR title and body + TITLE="[${VERSION}] ${PR_TITLE}" + BODY="Backport of #${PR_NUMBER} to \`${TARGET_BRANCH}\`." + + PR_URL=$(gh pr create \ + --base "$TARGET_BRANCH" \ + --head "$BACKPORT_BRANCH" \ + --title "$TITLE" \ + --body "$BODY") + + gh api repos/${{ github.repository }}/issues/${PR_NUMBER}/comments \ + -f body="✅ Backport PR to \`$TARGET_BRANCH\` created: ${PR_URL}" + + # Clean up for next iteration + git checkout "$TARGET_BRANCH" + git branch -D "$BACKPORT_BRANCH" + done diff --git a/.github/workflows/pages.yaml b/.github/workflows/pages.yaml index 7b36333..bdef9c7 100644 --- a/.github/workflows/pages.yaml +++ b/.github/workflows/pages.yaml @@ -29,7 +29,7 @@ jobs: run: rm -rf /opt/hostedtoolcache - name: Checkout - uses: actions/checkout@v6.0.1 + uses: actions/checkout@v6 with: fetch-depth: 0 @@ -42,7 +42,7 @@ jobs: uses: actions/configure-pages@v5 - name: Upload Pages Artifacts - uses: actions/upload-pages-artifact@v3 + uses: actions/upload-pages-artifact@v4 with: path: './static' diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index 7f521d7..8377aae 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -17,7 +17,7 @@ jobs: run: rm -rf /opt/hostedtoolcache - name: Checkout - uses: actions/checkout@v6.0.1 + uses: actions/checkout@v6 with: fetch-depth: 0 @@ -27,33 +27,33 @@ jobs: git config user.email "github-actions[bot]@users.noreply.github.com" - name: Set Up Go - uses: actions/setup-go@v6.1.0 + uses: actions/setup-go@v6 with: go-version-file: go.mod check-latest: true - name: Set Up QEMU - uses: docker/setup-qemu-action@v3.7.0 + uses: docker/setup-qemu-action@v3 - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v3.11.1 + uses: docker/setup-buildx-action@v3 - name: Authenticate to GitHub Container Registry - uses: docker/login-action@v3.6.0 + uses: docker/login-action@v3 with: registry: ghcr.io username: ${{ github.repository_owner }} password: ${{ secrets.GITHUB_TOKEN }} - name: Authenticate to DockerHub Container Registry - uses: docker/login-action@v3.6.0 + uses: docker/login-action@v3 with: registry: docker.io username: ${{ secrets.DOCKERHUB_USERNAME }} password: ${{ secrets.DOCKERHUB_TOKEN }} - name: Run GoReleaser - uses: goreleaser/goreleaser-action@v6.4.0 + uses: goreleaser/goreleaser-action@v6 with: distribution: goreleaser version: "~> v2" diff --git a/.github/workflows/testdata.yaml b/.github/workflows/testdata.yaml index e11f0f7..5854cd7 100644 --- a/.github/workflows/testdata.yaml +++ b/.github/workflows/testdata.yaml @@ -14,7 +14,7 @@ jobs: run: rm -rf /opt/hostedtoolcache - name: Checkout Repository - uses: actions/checkout@v6.0.1 + uses: actions/checkout@v6 - name: Fetch Hauler Binary run: curl -sfL https://get.hauler.dev | bash diff --git a/.github/workflows/tests.yaml b/.github/workflows/tests.yaml index 1e8c1e0..2f09d55 100644 --- a/.github/workflows/tests.yaml +++ b/.github/workflows/tests.yaml @@ -20,7 +20,7 @@ jobs: run: rm -rf /opt/hostedtoolcache - name: Checkout - uses: actions/checkout@v6.0.1 + uses: actions/checkout@v6 with: fetch-depth: 0 @@ -30,13 +30,13 @@ jobs: git config user.email "github-actions[bot]@users.noreply.github.com" - name: Set Up Go - uses: actions/setup-go@v6.1.0 + uses: actions/setup-go@v6 with: go-version-file: go.mod check-latest: true - name: Install Go Releaser - uses: goreleaser/goreleaser-action@v6.4.0 + uses: goreleaser/goreleaser-action@v6 with: install-only: true @@ -51,13 +51,13 @@ jobs: make build-all - name: Upload Hauler Binaries - uses: actions/upload-artifact@v4.6.2 + uses: actions/upload-artifact@v6 with: name: hauler-binaries path: dist/* - name: Upload Coverage Report - uses: actions/upload-artifact@v4.6.2 + uses: actions/upload-artifact@v6 with: name: coverage-report path: coverage.out @@ -73,7 +73,7 @@ jobs: run: rm -rf /opt/hostedtoolcache - name: Checkout - uses: actions/checkout@v6.0.1 + uses: actions/checkout@v6 with: fetch-depth: 0 @@ -89,7 +89,7 @@ jobs: sudo apt-get install -y tree - name: Download Artifacts - uses: actions/download-artifact@v6.0.0 + uses: actions/download-artifact@v6 with: name: hauler-binaries path: dist @@ -250,6 +250,13 @@ jobs: hauler store save --filename store.tar.zst # verify via save with filename and platform (amd64) hauler store save --filename store-amd64.tar.zst --platform linux/amd64 + # verify via save with chunk-size (splits into haul-chunked_0.tar.zst, haul-chunked_1.tar.zst, ...) + hauler store save --filename haul-chunked.tar.zst --chunk-size 50M + # verify chunk files exist and original is removed + ls haul-chunked_*.tar.zst + ! test -f haul-chunked.tar.zst + # verify at least two chunks were produced + [ $(ls haul-chunked_*.tar.zst | wc -l) -ge 2 ] - name: Remove Hauler Store Contents run: | @@ -269,6 +276,14 @@ jobs: hauler store load --filename store.tar.zst --tempdir /opt # verify via load with filename and platform (amd64) hauler store load --filename store-amd64.tar.zst + # verify via load from chunks using explicit first chunk + rm -rf store + hauler store load --filename haul-chunked_0.tar.zst + hauler store info + # verify via load from chunks using base filename (auto-detect) + rm -rf store + hauler store load --filename haul-chunked.tar.zst + hauler store info - name: Verify Hauler Store Contents run: | @@ -291,7 +306,7 @@ jobs: - name: Remove Hauler Store Contents run: | - rm -rf store haul.tar.zst store.tar.zst store-amd64.tar.zst + rm -rf store haul.tar.zst store.tar.zst store-amd64.tar.zst haul-chunked_*.tar.zst hauler store info - name: Verify - hauler store sync @@ -303,6 +318,17 @@ jobs: hauler store sync --filename testdata/hauler-manifest-pipeline.yaml --filename testdata/hauler-manifest.yaml # need more tests here + - name: Verify - hauler store sync (image list) + run: | + # verify via local image list file + hauler store sync --image-txt testdata/images.txt + # verify via multiple image list files + hauler store sync --image-txt testdata/images.txt --image-txt testdata/images.txt + # verify via remote image list file + hauler store sync --image-txt https://raw.githubusercontent.com/hauler-dev/hauler/main/testdata/images.txt + # confirm images are present in the store + hauler store info | grep 'busybox' + - name: Verify - hauler store serve run: | hauler store serve --help @@ -468,7 +494,7 @@ jobs: hauler store info - name: Upload Hauler Report - uses: actions/upload-artifact@v4.6.2 + uses: actions/upload-artifact@v6 with: name: hauler-report path: hauler-report.txt diff --git a/.goreleaser.yaml b/.goreleaser.yaml index 0859d49..a0ac032 100644 --- a/.goreleaser.yaml +++ b/.goreleaser.yaml @@ -15,7 +15,6 @@ release: env: - vpkg=hauler.dev/go/hauler/internal/version - - cosign_version=v2.2.3+carbide.3 builds: - dir: ./cmd/hauler/. @@ -88,4 +87,4 @@ dockers_v2: "org.opencontainers.image.name": "{{.ProjectName}}-debug" "org.opencontainers.image.revision": "{{.FullCommit}}" "org.opencontainers.image.source": "{{.GitURL}}" - "org.opencontainers.image.version": "{{.Version}}" \ No newline at end of file + "org.opencontainers.image.version": "{{.Version}}" diff --git a/DEVELOPMENT.md b/DEVELOPMENT.md new file mode 100644 index 0000000..dde27a7 --- /dev/null +++ b/DEVELOPMENT.md @@ -0,0 +1,155 @@ +# Development Guide + +This document covers how to build `hauler` locally and how the project's branching strategy works. + +It's intended for contributors making code changes or maintainers managing releases. + +--- + +## Local Build + +### Prerequisites + +- **Git** - version control of the repository +- **Go** — check `go.mod` for the minimum required version +- **Make** - optional... for common commands used for builds +- **Docker** - optional... for container image builds + +### Clone the Repository + +```bash +git clone https://github.com/hauler-dev/hauler.git +cd hauler +``` + +### Build the Binary + +Using `make`... + +```bash +# run this command from the project root +make build + +# the compiled binary will be output to a directory structure and you can run it directly... +./dist/hauler_linux_amd64_v1/hauler +./dist/hauler_linux_arm64_v8.0/hauler +./dist/hauler_darwin_amd64_v1/hauler +./dist/hauler_darwin_arm64_v8.0/hauler +./dist/hauler_windows_amd64_v1/hauler.exe +./dist/hauler_windows_arm64_v8.0/hauler.exe +``` + +Using `go`... + +```bash +# run this command from the project root +go build -o hauler ./cmd/hauler + +# the compiled binary will be output to the project root and you can run it directly... +./hauler version +``` + +### Run Tests + +Using `make`... + +```bash +make test +``` + +Using `go`... + +```bash +go test ./... +``` + +### Useful Tips + +- The `--store` flag defaults to `./store` in the current working directory during local testing, so running `./hauler store add ...` from the project root is safe and self-contained. Use `rm -rf store` in the working directory to clear. +- Set `--log-level debug` when developing to get verbose output. + +--- + +## Branching Strategy + +Hauler uses a **main-first, release branch** model. All development flows through `main` and `release/x.x` branches are maintained for each minor version to support patching older release lines in parallel. + +### Branch Structure + +``` +main ← source of truth, all development targets here +release/1.3 ← 1.3.x patch line +release/1.4 ← 1.4.x patch line +``` + +Release tags (`v1.4.1`, `v1.3.2`, etc.) are always cut from the corresponding `release/X.Y` branch, never directly from `main`. + +### Where to Target Your Changes + +All pull requests should target `main` by default and maintainers are responsible for cherry picking fixes onto release branches as part of the patch release process. + +| Change Type | Target branch | +| :---------: | :-----------: | +| New features | `main` | +| Bug fixes | `main` | +| Security patches | `main` (expedited backport to affected branches) | +| Release-specific fix (see below) | `release/X.Y` directly | + +### Creating a New Release Branch + +When `main` is ready to ship a new minor version, a release branch is cut: + +```bash +git checkout main +git pull origin main +git checkout -b release/1.4 +git push origin release/1.4 +``` + +The first release is then tagged from that branch: + +```bash +git tag v1.4.0 +git push origin v1.4.0 +``` + +Development on `main` immediately continues toward the next minor. + +### Backporting a Fix to a Release Branch + +When a bug fix merged to `main` also needs to apply to an active release line, cherry-pick the commit onto the release branch and open a PR targeting it: + +```bash +git checkout release/1.3 +git pull origin release/1.3 +git checkout -b backport/fix-description-to-1.3 +git cherry-pick +git push origin backport/fix-description-to-1.3 +``` + +Open a PR targeting `release/1.3` and reference the original PR in the description. If the cherry-pick doesn't apply cleanly, resolve conflicts and note them in the PR. + +### Fixes That Only Apply to an Older Release Line + +Sometimes a bug exists in an older release but the relevant code has been removed or significantly changed in `main` — making a forward-port unnecessary or nonsensical. In these cases, it's acceptable to open a PR directly against the affected `release/X.Y` branch. + +When doing this, the PR description must explain: + +- Which versions are affected +- Why the fix does not apply to `main` or newer release lines (e.g., "this code path was removed in 1.4 when X was refactored") + +This keeps the history auditable and prevents future contributors from wondering why the fix never made it forward. + +### Summary + +``` + ┌─────────────────────────────────────────► main (next minor) + │ + │ cherry-pick / backport PRs + │ ─────────────────────────► release/1.4 (v1.4.0, v1.4.1 ...) + │ + │ ─────────────────────────► release/1.3 (v1.3.0, v1.3.1 ...) + │ + │ direct fix (older-only bug) + │ ─────────────────────────► release/1.2 (critical fixes only) +``` diff --git a/README.md b/README.md index 23dc051..37da585 100644 --- a/README.md +++ b/README.md @@ -12,6 +12,11 @@ For more information, please review the **[Hauler Documentation](https://hauler. ## Recent Changes +### In Hauler v2.0.0... + +- Removed support for `apiVersion` of `v1alpha` and removed the automated conversion functionality to `v1`. + - Please note that notices have been provided in this `README`, the `docs`, and in `cli` warnings since Hauler `v1.2.x`. + ### In Hauler v1.4.0... - Added a notice to `hauler store sync --products/--product-registry` to warn users the default registry will be updated in a future release. @@ -19,23 +24,22 @@ For more information, please review the **[Hauler Documentation](https://hauler. - `!!! WARNING !!! [--products] will be updating its default registry in a future release...` - `!!! WARNING !!! [--product-registry] will be updating its default registry in a future release...` -### In Hauler v1.2.0... +### From older releases... -- Upgraded the `apiVersion` to `v1` from `v1alpha1` - - Users are able to use `v1` and `v1alpha1`, but `v1alpha1` is now deprecated and will be removed in a future release. We will update the community when we fully deprecate and remove the functionality of `v1alpha1` - - Users will see logging notices when using the old `apiVersion` such as... - - `!!! DEPRECATION WARNING !!! apiVersion [v1alpha1] will be removed in a future release...` ---- - Updated the behavior of `hauler store load` to default to loading a `haul` with the name of `haul.tar.zst` and requires the flag of `--filename/-f` to load a `haul` with a different name - Users can load multiple `hauls` by specifying multiple flags of `--filename/-f` - updated command usage: `hauler store load --filename hauling-hauls.tar.zst` - previous command usage (do not use): `hauler store load hauling-hauls.tar.zst` + --- + - Updated the behavior of `hauler store sync` to default to syncing a `manifest` with the name of `hauler-manifest.yaml` and requires the flag of `--filename/-f` to sync a `manifest` with a different name - Users can sync multiple `manifests` by specifying multiple flags of `--filename/-f` - updated command usage: `hauler store sync --filename hauling-hauls-manifest.yaml` - previous command usage (do not use): `hauler store sync --files hauling-hauls-manifest.yaml` + --- + Please review the documentation for any additional [Known Limits, Issues, and Notices](https://docs.hauler.dev/docs/known-limits)! ## Installation @@ -64,7 +68,6 @@ brew install hauler ## Acknowledgements `Hauler` wouldn't be possible without the open-source community, but there are a few projects that stand out: - -- [oras cli](https://github.com/oras-project/oras) -- [cosign](https://github.com/sigstore/cosign) +- [containerd](https://github.com/containerd/containerd) - [go-containerregistry](https://github.com/google/go-containerregistry) +- [cosign](https://github.com/sigstore/cosign) \ No newline at end of file diff --git a/cmd/hauler/cli/cli.go b/cmd/hauler/cli/cli.go index 3cb68b8..ff64527 100644 --- a/cmd/hauler/cli/cli.go +++ b/cmd/hauler/cli/cli.go @@ -4,6 +4,7 @@ import ( "context" cranecmd "github.com/google/go-containerregistry/cmd/crane/cmd" + "github.com/sirupsen/logrus" "github.com/spf13/cobra" "hauler.dev/go/hauler/internal/flags" "hauler.dev/go/hauler/pkg/consts" @@ -20,6 +21,14 @@ func New(ctx context.Context, ro *flags.CliRootOpts) *cobra.Command { l.SetLevel(ro.LogLevel) l.Debugf("running cli command [%s]", cmd.CommandPath()) + // Suppress WARN-level messages from containerd and other + // libraries that use the global logrus logger. + if ro.LogLevel == "debug" { + logrus.SetLevel(logrus.DebugLevel) + } else { + logrus.SetLevel(logrus.ErrorLevel) + } + return nil }, RunE: func(cmd *cobra.Command, args []string) error { diff --git a/cmd/hauler/cli/store.go b/cmd/hauler/cli/store.go index 089ec79..ccc03f0 100644 --- a/cmd/hauler/cli/store.go +++ b/cmd/hauler/cli/store.go @@ -216,7 +216,7 @@ func addStoreSave(rso *flags.StoreRootOpts, ro *flags.CliRootOpts) *cobra.Comman func addStoreInfo(rso *flags.StoreRootOpts, ro *flags.CliRootOpts) *cobra.Command { o := &flags.InfoOpts{StoreRootOpts: rso} - var allowedValues = []string{"image", "chart", "file", "sigs", "atts", "sbom", "all"} + var allowedValues = []string{"image", "chart", "file", "sigs", "atts", "sbom", "referrer", "all"} cmd := &cobra.Command{ Use: "info", @@ -250,7 +250,10 @@ func addStoreCopy(rso *flags.StoreRootOpts, ro *flags.CliRootOpts) *cobra.Comman cmd := &cobra.Command{ Use: "copy", Short: "Copy all store content to another location", - Args: cobra.ExactArgs(1), + Example: ` # supported copy target prefixes + registry:// | reg:// | oci:// - Pushes the store to an OCI registry + directory:// | dir:// - Extracts the store to a directory`, + Args: cobra.ExactArgs(1), RunE: func(cmd *cobra.Command, args []string) error { ctx := cmd.Context() @@ -291,14 +294,14 @@ func addStoreAddFile(rso *flags.StoreRootOpts, ro *flags.CliRootOpts) *cobra.Com cmd := &cobra.Command{ Use: "file", Short: "Add a file to the store", - Example: `# fetch local file -hauler store add file file.txt + Example: ` # fetch local file + hauler store add file file.txt -# fetch remote file -hauler store add file https://get.rke2.io/install.sh + # fetch remote file + hauler store add file https://get.rke2.io/install.sh -# fetch remote file and assign new name -hauler store add file https://get.hauler.dev --name hauler-install.sh`, + # fetch remote file and assign new name + hauler store add file https://get.hauler.dev --name hauler-install.sh`, Args: cobra.ExactArgs(1), RunE: func(cmd *cobra.Command, args []string) error { ctx := cmd.Context() @@ -322,24 +325,24 @@ func addStoreAddImage(rso *flags.StoreRootOpts, ro *flags.CliRootOpts) *cobra.Co cmd := &cobra.Command{ Use: "image", Short: "Add a image to the store", - Example: `# fetch image -hauler store add image busybox + Example: ` # fetch image + hauler store add image busybox -# fetch image with repository and tag -hauler store add image library/busybox:stable + # fetch image with repository and tag + hauler store add image library/busybox:stable -# fetch image with full image reference and specific platform -hauler store add image ghcr.io/hauler-dev/hauler-debug:v1.2.0 --platform linux/amd64 + # fetch image with full image reference and specific platform + hauler store add image ghcr.io/hauler-dev/hauler-debug:v1.2.0 --platform linux/amd64 -# fetch image with full image reference via digest -hauler store add image gcr.io/distroless/base@sha256:7fa7445dfbebae4f4b7ab0e6ef99276e96075ae42584af6286ba080750d6dfe5 + # fetch image with full image reference via digest + hauler store add image gcr.io/distroless/base@sha256:7fa7445dfbebae4f4b7ab0e6ef99276e96075ae42584af6286ba080750d6dfe5 -# fetch image with full image reference, specific platform, and signature verification -curl -sfOL https://raw.githubusercontent.com/rancherfederal/carbide-releases/main/carbide-key.pub -hauler store add image rgcrprod.azurecr.us/rancher/rke2-runtime:v1.31.5-rke2r1 --platform linux/amd64 --key carbide-key.pub + # fetch image with full image reference, specific platform, and signature verification + curl -sfOL https://raw.githubusercontent.com/rancherfederal/carbide-releases/main/carbide-key.pub + hauler store add image rgcrprod.azurecr.us/rancher/rke2-runtime:v1.31.5-rke2r1 --platform linux/amd64 --key carbide-key.pub -# fetch image and rewrite path -hauler store add image busybox --rewrite custom-path/busybox:latest`, + # fetch image and rewrite path + hauler store add image busybox --rewrite custom-path/busybox:latest`, Args: cobra.ExactArgs(1), RunE: func(cmd *cobra.Command, args []string) error { ctx := cmd.Context() @@ -363,26 +366,26 @@ func addStoreAddChart(rso *flags.StoreRootOpts, ro *flags.CliRootOpts) *cobra.Co cmd := &cobra.Command{ Use: "chart", Short: "Add a helm chart to the store", - Example: `# fetch local helm chart -hauler store add chart path/to/chart/directory --repo . + Example: ` # fetch local helm chart + hauler store add chart path/to/chart/directory --repo . -# fetch local compressed helm chart -hauler store add chart path/to/chart.tar.gz --repo . + # fetch local compressed helm chart + hauler store add chart path/to/chart.tar.gz --repo . -# fetch remote oci helm chart -hauler store add chart hauler-helm --repo oci://ghcr.io/hauler-dev + # fetch remote oci helm chart + hauler store add chart hauler-helm --repo oci://ghcr.io/hauler-dev -# fetch remote oci helm chart with version -hauler store add chart hauler-helm --repo oci://ghcr.io/hauler-dev --version 1.2.0 + # fetch remote oci helm chart with version + hauler store add chart hauler-helm --repo oci://ghcr.io/hauler-dev --version 1.2.0 -# fetch remote helm chart -hauler store add chart rancher --repo https://releases.rancher.com/server-charts/stable + # fetch remote helm chart + hauler store add chart rancher --repo https://releases.rancher.com/server-charts/stable -# fetch remote helm chart with specific version -hauler store add chart rancher --repo https://releases.rancher.com/server-charts/latest --version 2.10.1 + # fetch remote helm chart with specific version + hauler store add chart rancher --repo https://releases.rancher.com/server-charts/latest --version 2.10.1 -# fetch remote helm chart and rewrite path -hauler store add chart hauler-helm --repo oci://ghcr.io/hauler-dev --rewrite custom-path/hauler-chart:latest`, + # fetch remote helm chart and rewrite path + hauler store add chart hauler-helm --repo oci://ghcr.io/hauler-dev --rewrite custom-path/hauler-chart:latest`, Args: cobra.ExactArgs(1), RunE: func(cmd *cobra.Command, args []string) error { ctx := cmd.Context() @@ -405,26 +408,26 @@ func addStoreRemove(rso *flags.StoreRootOpts, ro *flags.CliRootOpts) *cobra.Comm cmd := &cobra.Command{ Use: "remove ", Short: "(EXPERIMENTAL) Remove an artifact from the content store", - Example: `# remove an image using full store reference -hauler store info -hauler store remove index.docker.io/library/busybox:stable + Example: ` # remove an image using full store reference + hauler store info + hauler store remove index.docker.io/library/busybox:stable -# remove a chart using full store reference -hauler store info -hauler store remove hauler/rancher:2.8.4 + # remove a chart using full store reference + hauler store info + hauler store remove hauler/rancher:2.8.4 -# remove a file using full store reference -hauler store info -hauler store remove hauler/rke2-install.sh + # remove a file using full store reference + hauler store info + hauler store remove hauler/rke2-install.sh -# remove any artifact with the latest tag -hauler store remove :latest + # remove any artifact with the latest tag + hauler store remove :latest -# remove any artifact with 'busybox' in the reference -hauler store remove busybox + # remove any artifact with 'busybox' in the reference + hauler store remove busybox -# force remove without verification -hauler store remove busybox:latest --force`, + # force remove without verification + hauler store remove busybox:latest --force`, Args: cobra.ExactArgs(1), RunE: func(cmd *cobra.Command, args []string) error { ctx := cmd.Context() diff --git a/cmd/hauler/cli/store/add.go b/cmd/hauler/cli/store/add.go index f5432c5..9f92288 100644 --- a/cmd/hauler/cli/store/add.go +++ b/cmd/hauler/cli/store/add.go @@ -25,6 +25,7 @@ import ( "hauler.dev/go/hauler/pkg/getter" "hauler.dev/go/hauler/pkg/log" "hauler.dev/go/hauler/pkg/reference" + "hauler.dev/go/hauler/pkg/retry" "hauler.dev/go/hauler/pkg/store" ) @@ -52,7 +53,7 @@ func storeFile(ctx context.Context, s *store.Layout, fi v1.File) error { } l.Infof("adding file [%s] to the store as [%s]", fi.Path, ref.Name()) - _, err = s.AddOCI(ctx, f, ref.Name()) + _, err = s.AddArtifact(ctx, f, ref.Name()) if err != nil { return err } @@ -73,25 +74,28 @@ func AddImageCmd(ctx context.Context, o *flags.AddImageOpts, s *store.Layout, re // Check if the user provided a key. if o.Key != "" { // verify signature using the provided key. - err := cosign.VerifySignature(ctx, s, o.Key, o.Tlog, cfg.Name, rso, ro) + err := cosign.VerifySignature(ctx, o.Key, o.Tlog, cfg.Name, rso, ro) if err != nil { return err } l.Infof("signature verified for image [%s]", cfg.Name) } else if o.CertIdentityRegexp != "" || o.CertIdentity != "" { - // verify signature using keyless details + // verify signature using keyless details. + // Keyless (Fulcio) certificates expire after ~10 minutes, so the transparency + // log is always required to prove the cert was valid at signing time — ignore + // --use-tlog-verify for this path and always check tlog. l.Infof("verifying keyless signature for [%s]", cfg.Name) - err := cosign.VerifyKeylessSignature(ctx, s, o.CertIdentity, o.CertIdentityRegexp, o.CertOidcIssuer, o.CertOidcIssuerRegexp, o.CertGithubWorkflowRepository, o.Tlog, cfg.Name, rso, ro) + err := cosign.VerifyKeylessSignature(ctx, o.CertIdentity, o.CertIdentityRegexp, o.CertOidcIssuer, o.CertOidcIssuerRegexp, o.CertGithubWorkflowRepository, cfg.Name, rso, ro) if err != nil { return err } l.Infof("keyless signature verified for image [%s]", cfg.Name) } - return storeImage(ctx, s, cfg, o.Platform, rso, ro, o.Rewrite) + return storeImage(ctx, s, cfg, o.Platform, o.ExcludeExtras, rso, ro, o.Rewrite) } -func storeImage(ctx context.Context, s *store.Layout, i v1.Image, platform string, rso *flags.StoreRootOpts, ro *flags.CliRootOpts, rewrite string) error { +func storeImage(ctx context.Context, s *store.Layout, i v1.Image, platform string, excludeExtras bool, rso *flags.StoreRootOpts, ro *flags.CliRootOpts, rewrite string) error { l := log.FromContext(ctx) if !ro.IgnoreErrors { @@ -114,8 +118,10 @@ func storeImage(ctx context.Context, s *store.Layout, i v1.Image, platform strin } } - // copy and sig verification - err = cosign.SaveImage(ctx, s, r.Name(), platform, rso, ro) + // fetch image along with any associated signatures and attestations + err = retry.Operation(ctx, rso, ro, func() error { + return s.AddImage(ctx, r.Name(), platform, excludeExtras) + }) if err != nil { if ro.IgnoreErrors { l.Warnf("unable to add image [%s] to store: %v... skipping...", r.Name(), err) @@ -127,39 +133,61 @@ func storeImage(ctx context.Context, s *store.Layout, i v1.Image, platform strin } if rewrite != "" { + rawRewrite := rewrite rewrite = strings.TrimPrefix(rewrite, "/") if !strings.Contains(rewrite, ":") { - rewrite = strings.Join([]string{rewrite, r.(name.Tag).TagStr()}, ":") + if tag, ok := r.(name.Tag); ok { + rewrite = rewrite + ":" + tag.TagStr() + } else { + return fmt.Errorf("cannot rewrite digest reference [%s] without an explicit tag in the rewrite", r.Name()) + } } // rename image name in store newRef, err := name.ParseReference(rewrite) if err != nil { - l.Errorf("unable to parse rewrite name: %w", err) + return fmt.Errorf("unable to parse rewrite name [%s]: %w", rewrite, err) + } + if err := rewriteReference(ctx, s, r, newRef, rawRewrite); err != nil { + return err } - rewriteReference(ctx, s, r, newRef) } l.Infof("successfully added image [%s]", r.Name()) return nil } -func rewriteReference(ctx context.Context, s *store.Layout, oldRef name.Reference, newRef name.Reference) error { +func rewriteReference(ctx context.Context, s *store.Layout, oldRef name.Reference, newRef name.Reference, rawRewrite string) error { l := log.FromContext(ctx) - s.OCI.LoadIndex() + if err := s.OCI.LoadIndex(); err != nil { + return fmt.Errorf("failed to load index: %w", err) + } //TODO: improve string manipulation oldRefContext := oldRef.Context() newRefContext := newRef.Context() oldRepo := oldRefContext.RepositoryStr() newRepo := newRefContext.RepositoryStr() - oldTag := oldRef.(name.Tag).TagStr() - newTag := newRef.(name.Tag).TagStr() - oldRegistry := strings.TrimPrefix(oldRefContext.RegistryStr(), "index.") - newRegistry := strings.TrimPrefix(newRefContext.RegistryStr(), "index.") - // If new registry not set in rewrite, keep old registry instead of defaulting to docker.io - if newRegistry == "docker.io" && oldRegistry != "docker.io" { + + oldTag := oldRef.Identifier() + if tag, ok := oldRef.(name.Tag); ok { + oldTag = tag.TagStr() + } + newTag := newRef.Identifier() + if tag, ok := newRef.(name.Tag); ok { + newTag = tag.TagStr() + } + + // ContainerdImageNameKey stores annotationRef.Name() verbatim, which includes the + // "index.docker.io" prefix for docker.io images. Do not strip "index." here or the + // comparison will never match images stored by writeImage/writeIndex. + oldRegistry := oldRefContext.RegistryStr() + newRegistry := newRefContext.RegistryStr() + // If user omitted a registry in the rewrite string, go-containerregistry defaults to + // index.docker.io. Preserve the original registry when the source is non-docker. + if newRegistry == "index.docker.io" && !strings.HasPrefix(rawRewrite, "docker.io") && !strings.HasPrefix(rawRewrite, "index.docker.io") { newRegistry = oldRegistry + newRepo = strings.TrimPrefix(newRepo, "library/") //if rewrite has library/ prefix in path it is stripped off unless registry specified in rewrite } oldTotal := oldRepo + ":" + oldTag newTotal := newRepo + ":" + newTag @@ -349,7 +377,7 @@ func storeChart(ctx context.Context, s *store.Layout, cfg v1.Chart, opts *flags. return err } - if _, err := s.AddOCI(ctx, chrt, ref.Name()); err != nil { + if _, err := s.AddArtifact(ctx, chrt, ref.Name()); err != nil { return err } if err := s.OCI.SaveIndex(); err != nil { @@ -494,14 +522,16 @@ func storeChart(ctx context.Context, s *store.Layout, cfg v1.Chart, opts *flags. } imgCfg := v1.Image{Name: image} - if err := storeImage(ctx, s, imgCfg, opts.Platform, rso, ro, ""); err != nil { + if err := storeImage(ctx, s, imgCfg, opts.Platform, opts.ExcludeExtras, rso, ro, ""); err != nil { if ro.IgnoreErrors { l.Warnf("%s ↳ failed to store image [%s]: %v... skipping...", prefix, image, err) continue } return fmt.Errorf("failed to store image [%s]: %w", image, err) } - s.OCI.LoadIndex() + if err := s.OCI.LoadIndex(); err != nil { + return err + } if err := s.OCI.SaveIndex(); err != nil { return err } @@ -521,7 +551,7 @@ func storeChart(ctx context.Context, s *store.Layout, cfg v1.Chart, opts *flags. var depCfg v1.Chart var err error - if strings.HasPrefix(dep.Repository, "file://") { + if strings.HasPrefix(dep.Repository, "file://") || dep.Repository == "" { subchartPath := filepath.Join(chartPath, "charts", dep.Name) depCfg = v1.Chart{Name: subchartPath, RepoURL: "", Version: ""} @@ -558,7 +588,10 @@ func storeChart(ctx context.Context, s *store.Layout, cfg v1.Chart, opts *flags. } // if rewrite omits a tag... keep the existing tag - oldTag := ref.(name.Tag).TagStr() + oldTag := ref.Identifier() + if tag, ok := ref.(name.Tag); ok { + oldTag = tag.TagStr() + } if !strings.Contains(rewrite, ":") { rewrite = strings.Join([]string{rewrite, oldTag}, ":") newRef, err = name.ParseReference(rewrite) @@ -568,14 +601,19 @@ func storeChart(ctx context.Context, s *store.Layout, cfg v1.Chart, opts *flags. } // rename chart name in store - s.OCI.LoadIndex() + if err := s.OCI.LoadIndex(); err != nil { + return err + } oldRefContext := ref.Context() newRefContext := newRef.Context() oldRepo := oldRefContext.RepositoryStr() newRepo := newRefContext.RepositoryStr() - newTag := newRef.(name.Tag).TagStr() + newTag := newRef.Identifier() + if tag, ok := newRef.(name.Tag); ok { + newTag = tag.TagStr() + } oldTotal := oldRepo + ":" + oldTag newTotal := newRepo + ":" + newTag diff --git a/cmd/hauler/cli/store/add_test.go b/cmd/hauler/cli/store/add_test.go new file mode 100644 index 0000000..14b2a82 --- /dev/null +++ b/cmd/hauler/cli/store/add_test.go @@ -0,0 +1,857 @@ +package store + +import ( + "net" + "net/http/httptest" + "os" + "path/filepath" + "reflect" + "strings" + "testing" + + "github.com/google/go-containerregistry/pkg/name" + "github.com/google/go-containerregistry/pkg/registry" + "github.com/google/go-containerregistry/pkg/v1/remote" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + helmchart "helm.sh/helm/v3/pkg/chart" + "helm.sh/helm/v3/pkg/chartutil" + + "hauler.dev/go/hauler/internal/flags" + v1 "hauler.dev/go/hauler/pkg/apis/hauler.cattle.io/v1" + "hauler.dev/go/hauler/pkg/consts" +) + +// newLocalhostRegistry creates an in-memory OCI registry server listening on +// localhost (rather than 127.0.0.1) so go-containerregistry's Scheme() method +// automatically selects plain HTTP for "localhost:PORT/…" refs. This is +// required for tests that exercise storeImage, which calls s.AddImage without +// any custom transport options. +func newLocalhostRegistry(t *testing.T) (host string, remoteOpts []remote.Option) { + t.Helper() + l, err := net.Listen("tcp", "localhost:0") + if err != nil { + t.Fatalf("newLocalhostRegistry listen: %v", err) + } + srv := httptest.NewUnstartedServer(registry.New()) + srv.Listener = l + srv.Start() + t.Cleanup(srv.Close) + host = strings.TrimPrefix(srv.URL, "http://") + remoteOpts = []remote.Option{remote.WithTransport(srv.Client().Transport)} + return host, remoteOpts +} + +// chartTestdataDir is the relative path from cmd/hauler/cli/store/ to the +// top-level testdata directory, matching the convention in chart_test.go. +// It must remain relative so that url.ParseRequestURI rejects it (an absolute +// path would be mistakenly treated as a URL by chart.NewChart's isUrl check). +const chartTestdataDir = "../../../../testdata" + +// -------------------------------------------------------------------------- +// Unit tests — unexported helpers +// -------------------------------------------------------------------------- + +func TestImagesFromChartAnnotations(t *testing.T) { + tests := []struct { + name string + chart *helmchart.Chart + want []string + wantErr bool + }{ + { + name: "nil chart returns nil", + chart: nil, + want: nil, + }, + { + name: "no annotations returns nil", + chart: &helmchart.Chart{Metadata: &helmchart.Metadata{}}, + want: nil, + }, + { + name: "helm.sh/images annotation returns sorted refs", + chart: &helmchart.Chart{ + Metadata: &helmchart.Metadata{ + Annotations: map[string]string{ + "helm.sh/images": "- image: nginx:1.24\n- image: alpine:3.18\n", + }, + }, + }, + want: []string{"alpine:3.18", "nginx:1.24"}, + }, + { + name: "both annotations with overlap returns deduped union", + chart: &helmchart.Chart{ + Metadata: &helmchart.Metadata{ + Annotations: map[string]string{ + "helm.sh/images": "- image: nginx:1.24\n- image: alpine:3.18\n", + "images": "- image: nginx:1.24\n- image: busybox:latest\n", + }, + }, + }, + want: []string{"alpine:3.18", "busybox:latest", "nginx:1.24"}, + }, + { + name: "malformed YAML returns error", + chart: &helmchart.Chart{ + Metadata: &helmchart.Metadata{ + Annotations: map[string]string{ + // Unclosed flow sequence → YAML syntax error. + "helm.sh/images": "- image: [unclosed bracket", + }, + }, + }, + wantErr: true, + }, + } + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + got, err := imagesFromChartAnnotations(tc.chart) + if (err != nil) != tc.wantErr { + t.Fatalf("error = %v, wantErr %v", err, tc.wantErr) + } + if !reflect.DeepEqual(got, tc.want) { + t.Errorf("got %v, want %v", got, tc.want) + } + }) + } +} + +func TestImagesFromImagesLock(t *testing.T) { + writeFile := func(dir, fname, content string) { + t.Helper() + if err := os.WriteFile(filepath.Join(dir, fname), []byte(content), 0o644); err != nil { + t.Fatalf("write %s: %v", fname, err) + } + } + + t.Run("images.lock with image lines returns sorted refs", func(t *testing.T) { + dir := t.TempDir() + writeFile(dir, "images.lock", "image: rancher/rancher:v2.9\nimage: nginx:1.24\n") + got, err := imagesFromImagesLock(dir) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + want := []string{"nginx:1.24", "rancher/rancher:v2.9"} + if !reflect.DeepEqual(got, want) { + t.Errorf("got %v, want %v", got, want) + } + }) + + t.Run("images-lock.yaml returns refs", func(t *testing.T) { + dir := t.TempDir() + writeFile(dir, "images-lock.yaml", "image: alpine:3.18\n") + got, err := imagesFromImagesLock(dir) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + want := []string{"alpine:3.18"} + if !reflect.DeepEqual(got, want) { + t.Errorf("got %v, want %v", got, want) + } + }) + + t.Run("empty dir returns nil", func(t *testing.T) { + dir := t.TempDir() + got, err := imagesFromImagesLock(dir) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if got != nil { + t.Errorf("expected nil, got %v", got) + } + }) + + t.Run("multiple lock files merged and deduped", func(t *testing.T) { + dir := t.TempDir() + writeFile(dir, "images.lock", "image: nginx:1.24\nimage: alpine:3.18\n") + writeFile(dir, "images-lock.yaml", "image: nginx:1.24\nimage: busybox:latest\n") + got, err := imagesFromImagesLock(dir) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + want := []string{"alpine:3.18", "busybox:latest", "nginx:1.24"} + if !reflect.DeepEqual(got, want) { + t.Errorf("got %v, want %v", got, want) + } + }) +} + +func TestApplyDefaultRegistry(t *testing.T) { + tests := []struct { + name string + img string + registry string + want string + wantErr bool + }{ + { + name: "empty img returns empty", + img: "", + registry: "myregistry.io", + want: "", + }, + { + name: "empty registry returns img unchanged", + img: "rancher/rancher:v2.9", + registry: "", + want: "rancher/rancher:v2.9", + }, + { + name: "img without registry gets registry prepended", + img: "rancher/rancher:v2.9", + registry: "myregistry.io", + want: "myregistry.io/rancher/rancher:v2.9", + }, + { + name: "img with existing registry unchanged", + img: "ghcr.io/rancher/rancher:v2.9", + registry: "myregistry.io", + want: "ghcr.io/rancher/rancher:v2.9", + }, + { + name: "invalid ref with spaces returns error", + img: "invalid ref with spaces", + registry: "myregistry.io", + wantErr: true, + }, + } + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + got, err := applyDefaultRegistry(tc.img, tc.registry) + if (err != nil) != tc.wantErr { + t.Fatalf("error = %v, wantErr %v", err, tc.wantErr) + } + if !tc.wantErr && got != tc.want { + t.Errorf("got %q, want %q", got, tc.want) + } + }) + } +} + +func TestRewriteReference(t *testing.T) { + ctx := newTestContext(t) + + t.Run("valid rewrite updates store annotations", func(t *testing.T) { + host, rOpts := newTestRegistry(t) + seedImage(t, host, "src/repo", "v1", rOpts...) + + s := newTestStore(t) + if err := s.AddImage(ctx, host+"/src/repo:v1", "", false, rOpts...); err != nil { + t.Fatalf("AddImage: %v", err) + } + + oldRef, err := name.NewTag(host+"/src/repo:v1", name.Insecure) + if err != nil { + t.Fatalf("parse oldRef: %v", err) + } + newRef, err := name.NewTag(host+"/dst/repo:v2", name.Insecure) + if err != nil { + t.Fatalf("parse newRef: %v", err) + } + + rawRewrite := newRef.String() + + if err := rewriteReference(ctx, s, oldRef, newRef, rawRewrite); err != nil { + t.Fatalf("rewriteReference: %v", err) + } + + assertArtifactInStore(t, s, "dst/repo:v2") + }) + + t.Run("old ref not found returns error", func(t *testing.T) { + s := newTestStore(t) + oldRef, _ := name.NewTag("docker.io/missing/repo:v1") + newRef, _ := name.NewTag("docker.io/new/repo:v2") + rawRewrite := newRef.String() + + err := rewriteReference(ctx, s, oldRef, newRef, rawRewrite) + if err == nil { + t.Fatal("expected error, got nil") + } + if !strings.Contains(err.Error(), "could not find") { + t.Errorf("expected 'could not find' in error, got: %v", err) + } + }) + + // Tests for the registry-preservation / library/-stripping logic (lines 188-191). + // go-containerregistry normalises bare single-name Docker Hub refs (e.g. "nginx:latest") + // to "index.docker.io/library/nginx:latest". When the rewrite string omits a registry, + // rewriteReference must (a) preserve the source registry and (b) strip the injected + // "library/" prefix so that the stored ref looks like "nginx:v2", not "library/nginx:v2". + + t.Run("path-only rewrite strips library/ prefix from docker hub official image", func(t *testing.T) { + s := newTestStore(t) + seedStoreDescriptor(t, s, map[string]string{ + ocispec.AnnotationRefName: "library/nginx:latest", + consts.ContainerdImageNameKey: "index.docker.io/library/nginx:latest", + }) + + oldRef, _ := name.NewTag("nginx:latest") // → index.docker.io/library/nginx:latest + newRef, _ := name.NewTag("nginx:v2") // → index.docker.io/library/nginx:v2 + rawRewrite := "nginx:v2" + + if err := rewriteReference(ctx, s, oldRef, newRef, rawRewrite); err != nil { + t.Fatalf("rewriteReference: %v", err) + } + // library/ must be stripped; registry stays index.docker.io + assertAnnotationsInStore(t, s, "nginx:v2", "index.docker.io/nginx:v2") + }) + + t.Run("explicit docker.io rewrite preserves library/ prefix", func(t *testing.T) { + s := newTestStore(t) + seedStoreDescriptor(t, s, map[string]string{ + ocispec.AnnotationRefName: "library/nginx:latest", + consts.ContainerdImageNameKey: "index.docker.io/library/nginx:latest", + }) + + oldRef, _ := name.NewTag("nginx:latest") + newRef, _ := name.NewTag("docker.io/nginx:v2") // → index.docker.io/library/nginx:v2 + rawRewrite := "docker.io/nginx:v2" + + if err := rewriteReference(ctx, s, oldRef, newRef, rawRewrite); err != nil { + t.Fatalf("rewriteReference: %v", err) + } + // rawRewrite starts with "docker.io" → condition must NOT fire → library/ preserved + assertAnnotationsInStore(t, s, "library/nginx:v2", "index.docker.io/library/nginx:v2") + }) + + t.Run("explicit index.docker.io rewrite preserves library/ prefix", func(t *testing.T) { + s := newTestStore(t) + seedStoreDescriptor(t, s, map[string]string{ + ocispec.AnnotationRefName: "library/nginx:latest", + consts.ContainerdImageNameKey: "index.docker.io/library/nginx:latest", + }) + + oldRef, _ := name.NewTag("nginx:latest") + newRef, _ := name.NewTag("index.docker.io/nginx:v2") // → index.docker.io/library/nginx:v2 + rawRewrite := "index.docker.io/nginx:v2" + + if err := rewriteReference(ctx, s, oldRef, newRef, rawRewrite); err != nil { + t.Fatalf("rewriteReference: %v", err) + } + // rawRewrite starts with "index.docker.io" → condition must NOT fire → library/ preserved + assertAnnotationsInStore(t, s, "library/nginx:v2", "index.docker.io/library/nginx:v2") + }) + + t.Run("non-docker source with path-only rewrite preserves original registry", func(t *testing.T) { + host, rOpts := newTestRegistry(t) + seedImage(t, host, "src/repo", "v1", rOpts...) + + s := newTestStore(t) + if err := s.AddImage(ctx, host+"/src/repo:v1", "", false, rOpts...); err != nil { + t.Fatalf("AddImage: %v", err) + } + + oldRef, _ := name.NewTag(host+"/src/repo:v1", name.Insecure) + newRef, _ := name.NewTag("newrepo/img:v2") // defaults to index.docker.io + rawRewrite := "newrepo/img:v2" + + if err := rewriteReference(ctx, s, oldRef, newRef, rawRewrite); err != nil { + t.Fatalf("rewriteReference: %v", err) + } + // condition fires → registry reverts to host, no library/ to strip + assertAnnotationsInStore(t, s, "newrepo/img:v2", host+"/newrepo/img:v2") + }) +} + +// -------------------------------------------------------------------------- +// Integration tests +// -------------------------------------------------------------------------- + +func TestStoreFile(t *testing.T) { + ctx := newTestContext(t) + + t.Run("local file stored successfully", func(t *testing.T) { + tmp, err := os.CreateTemp(t.TempDir(), "testfile-*.txt") + if err != nil { + t.Fatal(err) + } + tmp.WriteString("hello hauler") //nolint:errcheck + tmp.Close() + + s := newTestStore(t) + if err := storeFile(ctx, s, v1.File{Path: tmp.Name()}); err != nil { + t.Fatalf("storeFile: %v", err) + } + assertArtifactInStore(t, s, filepath.Base(tmp.Name())) + }) + + t.Run("HTTP URL stored under basename", func(t *testing.T) { + url := seedFileInHTTPServer(t, "script.sh", "#!/bin/sh\necho ok") + s := newTestStore(t) + if err := storeFile(ctx, s, v1.File{Path: url}); err != nil { + t.Fatalf("storeFile: %v", err) + } + assertArtifactInStore(t, s, "script.sh") + }) + + t.Run("name override changes stored ref", func(t *testing.T) { + tmp, err := os.CreateTemp(t.TempDir(), "orig-*.txt") + if err != nil { + t.Fatal(err) + } + tmp.Close() + + s := newTestStore(t) + if err := storeFile(ctx, s, v1.File{Path: tmp.Name(), Name: "custom.sh"}); err != nil { + t.Fatalf("storeFile: %v", err) + } + assertArtifactInStore(t, s, "custom.sh") + }) + + t.Run("nonexistent local path returns error", func(t *testing.T) { + s := newTestStore(t) + err := storeFile(ctx, s, v1.File{Path: "/nonexistent/path/missing-file.txt"}) + if err == nil { + t.Fatal("expected error for nonexistent path, got nil") + } + }) +} + +func TestAddFileCmd(t *testing.T) { + ctx := newTestContext(t) + s := newTestStore(t) + + tmp, err := os.CreateTemp(t.TempDir(), "rawfile-*.txt") + if err != nil { + t.Fatal(err) + } + tmp.WriteString("raw content") //nolint:errcheck + tmp.Close() + + o := &flags.AddFileOpts{Name: "renamed.txt"} + if err := AddFileCmd(ctx, o, s, tmp.Name()); err != nil { + t.Fatalf("AddFileCmd: %v", err) + } + assertArtifactInStore(t, s, "renamed.txt") +} + +func TestStoreImage(t *testing.T) { + ctx := newTestContext(t) + host, rOpts := newLocalhostRegistry(t) + seedImage(t, host, "test/repo", "v1", rOpts...) + + tests := []struct { + name string + imageName string + ignoreErrors bool + wantErr bool + wantInStore string + }{ + { + name: "valid image tag stored", + imageName: host + "/test/repo:v1", + wantInStore: "test/repo:v1", + }, + { + name: "invalid ref string returns error", + imageName: "INVALID IMAGE REF !! ##", + wantErr: true, + }, + { + name: "nonexistent image with IgnoreErrors returns nil", + imageName: host + "/nonexistent/image:missing", + ignoreErrors: true, + wantErr: false, + }, + { + name: "nonexistent image without IgnoreErrors returns error", + imageName: host + "/nonexistent/image:missing", + wantErr: true, + }, + } + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + s := newTestStore(t) + rso := defaultRootOpts(s.Root) + ro := defaultCliOpts() + ro.IgnoreErrors = tc.ignoreErrors + + err := storeImage(ctx, s, v1.Image{Name: tc.imageName}, "", false, rso, ro, "") + if (err != nil) != tc.wantErr { + t.Fatalf("error = %v, wantErr %v", err, tc.wantErr) + } + if tc.wantInStore != "" { + assertArtifactInStore(t, s, tc.wantInStore) + } + }) + } +} + +func TestStoreImage_Rewrite(t *testing.T) { + ctx := newTestContext(t) + host, rOpts := newLocalhostRegistry(t) + + t.Run("explicit rewrite tag changes ref", func(t *testing.T) { + seedImage(t, host, "src/repo", "v1", rOpts...) + s := newTestStore(t) + rso := defaultRootOpts(s.Root) + ro := defaultCliOpts() + + err := storeImage(ctx, s, v1.Image{Name: host + "/src/repo:v1"}, "", false, rso, ro, "newrepo/img:v2") + if err != nil { + t.Fatalf("storeImage with rewrite: %v", err) + } + assertArtifactInStore(t, s, "newrepo/img:v2") + }) + + t.Run("rewrite without tag inherits source tag", func(t *testing.T) { + seedImage(t, host, "src/repo", "v3", rOpts...) + s := newTestStore(t) + rso := defaultRootOpts(s.Root) + ro := defaultCliOpts() + + err := storeImage(ctx, s, v1.Image{Name: host + "/src/repo:v3"}, "", false, rso, ro, "newrepo/img") + if err != nil { + t.Fatalf("storeImage with tagless rewrite: %v", err) + } + // tag is inherited from source ("v3") + assertArtifactInStore(t, s, "newrepo/img:v3") + }) + + t.Run("rewrite without tag on digest source ref returns error", func(t *testing.T) { + img := seedImage(t, host, "src/repo", "digest-src", rOpts...) + h, err := img.Digest() + if err != nil { + t.Fatalf("img.Digest: %v", err) + } + + s := newTestStore(t) + rso := defaultRootOpts(s.Root) + ro := defaultCliOpts() + + digestRef := host + "/src/repo@" + h.String() + err = storeImage(ctx, s, v1.Image{Name: digestRef}, "", false, rso, ro, "newrepo/img") + if err == nil { + t.Fatal("expected error for digest ref rewrite without explicit tag, got nil") + } + if !strings.Contains(err.Error(), "cannot rewrite digest reference") { + t.Errorf("unexpected error: %v", err) + } + }) +} + +func TestStoreImage_MultiArch(t *testing.T) { + ctx := newTestContext(t) + host, rOpts := newLocalhostRegistry(t) + seedIndex(t, host, "test/multiarch", "v1", rOpts...) + + s := newTestStore(t) + rso := defaultRootOpts(s.Root) + ro := defaultCliOpts() + + if err := storeImage(ctx, s, v1.Image{Name: host + "/test/multiarch:v1"}, "", false, rso, ro, ""); err != nil { + t.Fatalf("storeImage multi-arch index: %v", err) + } + // Full index (both platforms) must be stored as an index, not a single image. + assertArtifactKindInStore(t, s, "test/multiarch:v1", consts.KindAnnotationIndex) +} + +func TestStoreImage_PlatformFilter(t *testing.T) { + ctx := newTestContext(t) + host, rOpts := newLocalhostRegistry(t) + seedIndex(t, host, "test/multiarch", "v2", rOpts...) + + s := newTestStore(t) + rso := defaultRootOpts(s.Root) + ro := defaultCliOpts() + + if err := storeImage(ctx, s, v1.Image{Name: host + "/test/multiarch:v2"}, "linux/amd64", false, rso, ro, ""); err != nil { + t.Fatalf("storeImage with platform filter: %v", err) + } + // Platform filter resolves a single manifest from the index → stored as a single image. + assertArtifactKindInStore(t, s, "test/multiarch:v2", consts.KindAnnotationImage) +} + +func TestStoreImage_CosignV2Artifacts(t *testing.T) { + ctx := newTestContext(t) + host, rOpts := newLocalhostRegistry(t) + + img := seedImage(t, host, "test/signed", "v1", rOpts...) + seedCosignV2Artifacts(t, host, "test/signed", img, rOpts...) + + s := newTestStore(t) + rso := defaultRootOpts(s.Root) + ro := defaultCliOpts() + + if err := storeImage(ctx, s, v1.Image{Name: host + "/test/signed:v1"}, "", false, rso, ro, ""); err != nil { + t.Fatalf("storeImage: %v", err) + } + assertArtifactKindInStore(t, s, "test/signed:v1", consts.KindAnnotationSigs) + assertArtifactKindInStore(t, s, "test/signed:v1", consts.KindAnnotationAtts) + assertArtifactKindInStore(t, s, "test/signed:v1", consts.KindAnnotationSboms) +} + +func TestStoreImage_CosignV3Referrer(t *testing.T) { + ctx := newTestContext(t) + host, rOpts := newLocalhostRegistry(t) + + img := seedImage(t, host, "test/image", "v1", rOpts...) + seedOCI11Referrer(t, host, "test/image", img, rOpts...) + + s := newTestStore(t) + rso := defaultRootOpts(s.Root) + ro := defaultCliOpts() + + if err := storeImage(ctx, s, v1.Image{Name: host + "/test/image:v1"}, "", false, rso, ro, ""); err != nil { + t.Fatalf("storeImage: %v", err) + } + assertReferrerInStore(t, s, "test/image:v1") +} + +func TestStoreImage_ExcludeExtras(t *testing.T) { + ctx := newTestContext(t) + + t.Run("cosign v2 artifacts excluded when excludeExtras=true", func(t *testing.T) { + host, rOpts := newLocalhostRegistry(t) + + img := seedImage(t, host, "test/signed", "v1", rOpts...) + seedCosignV2Artifacts(t, host, "test/signed", img, rOpts...) + + s := newTestStore(t) + rso := defaultRootOpts(s.Root) + ro := defaultCliOpts() + + if err := storeImage(ctx, s, v1.Image{Name: host + "/test/signed:v1"}, "", true, rso, ro, ""); err != nil { + t.Fatalf("storeImage with excludeExtras: %v", err) + } + + // Only the primary image must be present — no sigs, atts, or sboms. + count := countArtifactsInStore(t, s) + if count != 1 { + t.Errorf("expected 1 artifact in store, got %d", count) + } + assertArtifactKindInStore(t, s, "test/signed:v1", consts.KindAnnotationImage) + + // Verify no sig/att/sbom kind annotations are present. + for _, kind := range []string{consts.KindAnnotationSigs, consts.KindAnnotationAtts, consts.KindAnnotationSboms} { + found := false + if err := s.OCI.Walk(func(_ string, desc ocispec.Descriptor) error { + if desc.Annotations[consts.KindAnnotationName] == kind { + found = true + } + return nil + }); err != nil { + t.Fatalf("walk: %v", err) + } + if found { + t.Errorf("unexpected artifact with kind %q found in store", kind) + } + } + }) + + t.Run("OCI 1.1 referrers excluded when excludeExtras=true", func(t *testing.T) { + host, rOpts := newLocalhostRegistry(t) + + img := seedImage(t, host, "test/image", "v1", rOpts...) + seedOCI11Referrer(t, host, "test/image", img, rOpts...) + + s := newTestStore(t) + rso := defaultRootOpts(s.Root) + ro := defaultCliOpts() + + if err := storeImage(ctx, s, v1.Image{Name: host + "/test/image:v1"}, "", true, rso, ro, ""); err != nil { + t.Fatalf("storeImage with excludeExtras: %v", err) + } + + // Only the primary image must be present — no referrers. + count := countArtifactsInStore(t, s) + if count != 1 { + t.Errorf("expected 1 artifact in store, got %d", count) + } + + // Verify no referrer kind annotations are present. + found := false + if err := s.OCI.Walk(func(_ string, desc ocispec.Descriptor) error { + if strings.HasPrefix(desc.Annotations[consts.KindAnnotationName], consts.KindAnnotationReferrers) { + found = true + } + return nil + }); err != nil { + t.Fatalf("walk: %v", err) + } + if found { + t.Errorf("unexpected OCI referrer found in store when excludeExtras=true") + } + }) + + t.Run("cosign v2 artifacts included when excludeExtras=false", func(t *testing.T) { + host, rOpts := newLocalhostRegistry(t) + + img := seedImage(t, host, "test/signed", "v2", rOpts...) + seedCosignV2Artifacts(t, host, "test/signed", img, rOpts...) + + s := newTestStore(t) + rso := defaultRootOpts(s.Root) + ro := defaultCliOpts() + + if err := storeImage(ctx, s, v1.Image{Name: host + "/test/signed:v2"}, "", false, rso, ro, ""); err != nil { + t.Fatalf("storeImage without excludeExtras: %v", err) + } + + // All four artifacts (image + sig + att + sbom) must be present. + assertArtifactKindInStore(t, s, "test/signed:v2", consts.KindAnnotationSigs) + assertArtifactKindInStore(t, s, "test/signed:v2", consts.KindAnnotationAtts) + assertArtifactKindInStore(t, s, "test/signed:v2", consts.KindAnnotationSboms) + }) +} + +func TestAddChartCmd_LocalTgz(t *testing.T) { + ctx := newTestContext(t) + s := newTestStore(t) + rso := defaultRootOpts(s.Root) + ro := defaultCliOpts() + + o := newAddChartOpts(chartTestdataDir, "") + if err := AddChartCmd(ctx, o, s, "rancher-cluster-templates-0.5.2.tgz", rso, ro); err != nil { + t.Fatalf("AddChartCmd: %v", err) + } + // Hauler stores all artifacts (files, charts) via store.AddArtifact, which + // unconditionally sets KindAnnotationName = KindAnnotationImage (see + // pkg/store/store.go). There is no separate "chart" kind — charts are + // wrapped in an OCI image manifest and tagged with KindAnnotationImage. + assertArtifactKindInStore(t, s, "rancher-cluster-templates", consts.KindAnnotationImage) +} + +func TestAddChartCmd_WithFileDep(t *testing.T) { + ctx := newTestContext(t) + s := newTestStore(t) + rso := defaultRootOpts(s.Root) + ro := defaultCliOpts() + + o := newAddChartOpts(chartTestdataDir, "") + if err := AddChartCmd(ctx, o, s, "chart-with-file-dependency-chart-1.0.0.tgz", rso, ro); err != nil { + t.Fatalf("AddChartCmd: %v", err) + } + assertArtifactInStore(t, s, "chart-with-file-dependency-chart") +} + +func TestStoreChart_Rewrite(t *testing.T) { + ctx := newTestContext(t) + s := newTestStore(t) + rso := defaultRootOpts(s.Root) + ro := defaultCliOpts() + + o := newAddChartOpts(chartTestdataDir, "") + o.Rewrite = "myorg/custom-chart" + + if err := AddChartCmd(ctx, o, s, "rancher-cluster-templates-0.5.2.tgz", rso, ro); err != nil { + t.Fatalf("AddChartCmd with rewrite: %v", err) + } + assertArtifactInStore(t, s, "myorg/custom-chart") +} + +// seedChartWithImages builds a minimal Helm chart whose helm.sh/images +// annotation lists the given image refs and saves it as a .tgz into dir. +// Returns the path to the saved .tgz file. +func seedChartWithImages(t *testing.T, dir string, images []string) string { + t.Helper() + + // Build a helm.sh/images YAML list from the image refs. + var sb strings.Builder + for _, img := range images { + sb.WriteString("- image: ") + sb.WriteString(img) + sb.WriteString("\n") + } + + c := &helmchart.Chart{ + Metadata: &helmchart.Metadata{ + APIVersion: "v2", + Name: "test-chart", + Version: "0.1.0", + Annotations: map[string]string{ + "helm.sh/images": sb.String(), + }, + }, + } + + saved, err := chartutil.Save(c, dir) + if err != nil { + t.Fatalf("seedChartWithImages: chartutil.Save: %v", err) + } + return saved +} + +func TestStoreChart_AddImages_ExcludeExtras(t *testing.T) { + ctx := newTestContext(t) + host, rOpts := newLocalhostRegistry(t) + + // Seed an image with cosign v2 artifacts (sig + att + sbom). + img := seedImage(t, host, "test/chart-image", "v1", rOpts...) + seedCosignV2Artifacts(t, host, "test/chart-image", img, rOpts...) + + // Build a minimal chart whose helm.sh/images annotation references the image. + chartDir := t.TempDir() + imageRef := host + "/test/chart-image:v1" + tgzPath := seedChartWithImages(t, chartDir, []string{imageRef}) + + s := newTestStore(t) + rso := defaultRootOpts(s.Root) + ro := defaultCliOpts() + + t.Run("excludeExtras=true suppresses sigs/atts/sboms for chart-discovered images", func(t *testing.T) { + o := &flags.AddChartOpts{ + ChartOpts: newAddChartOpts("", "").ChartOpts, + AddImages: true, + ExcludeExtras: true, + } + if err := storeChart(ctx, s, v1.Chart{Name: tgzPath}, o, rso, ro, ""); err != nil { + t.Fatalf("storeChart with ExcludeExtras: %v", err) + } + + // The chart itself is stored as an OCI image artifact. + assertArtifactInStore(t, s, "test-chart") + // The discovered image is stored (bare, no extras). + assertArtifactInStore(t, s, "test/chart-image:v1") + + // No sig / att / sbom entries must be present. + for _, kind := range []string{consts.KindAnnotationSigs, consts.KindAnnotationAtts, consts.KindAnnotationSboms} { + found := false + if err := s.OCI.Walk(func(_ string, desc ocispec.Descriptor) error { + if desc.Annotations[consts.KindAnnotationName] == kind { + found = true + } + return nil + }); err != nil { + t.Fatalf("walk: %v", err) + } + if found { + t.Errorf("unexpected artifact with kind %q found in store when ExcludeExtras=true", kind) + } + } + }) +} + +func TestStoreChart_AddImages_IncludeExtras(t *testing.T) { + ctx := newTestContext(t) + host, rOpts := newLocalhostRegistry(t) + + // Seed an image with cosign v2 artifacts. + img := seedImage(t, host, "test/chart-image", "v2", rOpts...) + seedCosignV2Artifacts(t, host, "test/chart-image", img, rOpts...) + + chartDir := t.TempDir() + imageRef := host + "/test/chart-image:v2" + tgzPath := seedChartWithImages(t, chartDir, []string{imageRef}) + + s := newTestStore(t) + rso := defaultRootOpts(s.Root) + ro := defaultCliOpts() + + t.Run("excludeExtras=false includes sigs/atts/sboms for chart-discovered images", func(t *testing.T) { + o := &flags.AddChartOpts{ + ChartOpts: newAddChartOpts("", "").ChartOpts, + AddImages: true, + ExcludeExtras: false, + } + if err := storeChart(ctx, s, v1.Chart{Name: tgzPath}, o, rso, ro, ""); err != nil { + t.Fatalf("storeChart without ExcludeExtras: %v", err) + } + + assertArtifactKindInStore(t, s, "test/chart-image:v2", consts.KindAnnotationSigs) + assertArtifactKindInStore(t, s, "test/chart-image:v2", consts.KindAnnotationAtts) + assertArtifactKindInStore(t, s, "test/chart-image:v2", consts.KindAnnotationSboms) + }) +} diff --git a/cmd/hauler/cli/store/copy.go b/cmd/hauler/cli/store/copy.go index 0b80a98..e99868a 100644 --- a/cmd/hauler/cli/store/copy.go +++ b/cmd/hauler/cli/store/copy.go @@ -2,14 +2,22 @@ package store import ( "context" + "encoding/json" "fmt" + "io" + "os" "strings" - "oras.land/oras-go/pkg/content" + "github.com/containerd/containerd/remotes" + "github.com/containerd/errdefs" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" "hauler.dev/go/hauler/internal/flags" - "hauler.dev/go/hauler/pkg/cosign" + "hauler.dev/go/hauler/internal/mapper" + "hauler.dev/go/hauler/pkg/consts" + "hauler.dev/go/hauler/pkg/content" "hauler.dev/go/hauler/pkg/log" + "hauler.dev/go/hauler/pkg/retry" "hauler.dev/go/hauler/pkg/store" ) @@ -20,26 +28,248 @@ func CopyCmd(ctx context.Context, o *flags.CopyOpts, s *store.Layout, targetRef return fmt.Errorf("--username/--password have been deprecated, please use 'hauler login'") } + if !s.IndexExists() { + return fmt.Errorf("store index not found: run 'hauler store add/sync/load' first") + } + components := strings.SplitN(targetRef, "://", 2) switch components[0] { - case "dir": - l.Debugf("identified directory target reference of [%s]", components[1]) - fs := content.NewFile(components[1]) - defer fs.Close() + case "directory", "dir": + l.Debugf("identified [directory] target reference of [%s]", components[1]) - _, err := s.CopyAll(ctx, fs, nil) + // Create destination directory if it doesn't exist + if err := os.MkdirAll(components[1], 0755); err != nil { + return fmt.Errorf("failed to create destination directory: %w", err) + } + + // For directory targets, extract files and charts (not images) + err := s.Walk(func(reference string, desc ocispec.Descriptor) error { + // Skip cosign sig/att/sbom artifacts — they're registry-only metadata, + // not extractable as files or charts. + kind := desc.Annotations[consts.KindAnnotationName] + switch kind { + case consts.KindAnnotationSigs, consts.KindAnnotationAtts, consts.KindAnnotationSboms: + l.Debugf("skipping cosign artifact [%s] for directory target", reference) + return nil + } + if strings.HasPrefix(kind, consts.KindAnnotationReferrers) { + l.Debugf("skipping OCI referrer [%s] for directory target", reference) + return nil + } + + // Handle different media types + switch desc.MediaType { + case ocispec.MediaTypeImageIndex, consts.DockerManifestListSchema2: + // Multi-platform index - process each child manifest + rc, err := s.Fetch(ctx, desc) + if err != nil { + l.Warnf("failed to fetch index [%s]: %v", reference, err) + return nil + } + + var index ocispec.Index + if err := json.NewDecoder(rc).Decode(&index); err != nil { + if cerr := rc.Close(); cerr != nil { + l.Warnf("failed to close index reader for [%s]: %v", reference, cerr) + } + l.Warnf("failed to decode index for [%s]: %v", reference, err) + return nil + } + + // Close rc immediately after decoding - we're done reading from it + if cerr := rc.Close(); cerr != nil { + l.Warnf("failed to close index reader for [%s]: %v", reference, cerr) + } + + // Process each manifest in the index + for _, manifestDesc := range index.Manifests { + manifestRC, err := s.Fetch(ctx, manifestDesc) + if err != nil { + l.Warnf("failed to fetch child manifest: %v", err) + continue + } + + var m ocispec.Manifest + if err := json.NewDecoder(manifestRC).Decode(&m); err != nil { + manifestRC.Close() + l.Warnf("failed to decode child manifest: %v", err) + continue + } + manifestRC.Close() + + // Skip images - only extract files and charts + if m.Config.MediaType == consts.DockerConfigJSON || + m.Config.MediaType == ocispec.MediaTypeImageConfig { + l.Debugf("skipping image manifest in index [%s]", reference) + continue + } + + // Create mapper and extract + mapperStore, err := mapper.FromManifest(m, components[1]) + if err != nil { + l.Warnf("failed to create mapper for child: %v", err) + continue + } + + // Note: We can't call s.Copy with manifestDesc because it's not in the nameMap + // Instead, we need to manually push through the mapper + if err := extractManifestContent(ctx, s, manifestDesc, m, mapperStore); err != nil { + l.Warnf("failed to extract child: %v", err) + continue + } + + l.Debugf("extracted child manifest from index [%s]", reference) + } + + case ocispec.MediaTypeImageManifest, consts.DockerManifestSchema2: + // Single-platform manifest + rc, err := s.Fetch(ctx, desc) + if err != nil { + l.Warnf("failed to fetch [%s]: %v", reference, err) + return nil + } + + var m ocispec.Manifest + if err := json.NewDecoder(rc).Decode(&m); err != nil { + rc.Close() + l.Warnf("failed to decode manifest for [%s]: %v", reference, err) + return nil + } + + // Skip images - only extract files and charts for directory targets + if m.Config.MediaType == consts.DockerConfigJSON || + m.Config.MediaType == ocispec.MediaTypeImageConfig { + rc.Close() + l.Debugf("skipping image [%s] for directory target", reference) + return nil + } + + // Create a mapper store based on the manifest type + mapperStore, err := mapper.FromManifest(m, components[1]) + if err != nil { + rc.Close() + l.Warnf("failed to create mapper for [%s]: %v", reference, err) + return nil + } + + // Copy/extract the content + _, err = s.Copy(ctx, reference, mapperStore, "") + if err != nil { + rc.Close() + l.Warnf("failed to extract [%s]: %v", reference, err) + return nil + } + rc.Close() + + l.Debugf("extracted [%s] to directory", reference) + + default: + l.Debugf("skipping unsupported media type [%s] for [%s]", desc.MediaType, reference) + } + + return nil + }) if err != nil { return err } - case "registry": - l.Debugf("identified registry target reference of [%s]", components[1]) - ropts := content.RegistryOptions{ - Insecure: o.Insecure, + case "registry", "reg", "oci": + l.Debugf("identified [registry] target reference of [%s]", components[1]) + registryOpts := content.RegistryOptions{ PlainHTTP: o.PlainHTTP, + Insecure: o.Insecure, } - err := cosign.LoadImages(ctx, s, components[1], o.Only, ropts, ro) + // Pre-build a map from base ref → image manifest digest so that sig/att/sbom + // descriptors (which store the base image ref, not the cosign tag) can be routed + // to the correct destination tag using the cosign tag convention. + refDigest := make(map[string]string) + if err := s.Walk(func(_ string, desc ocispec.Descriptor) error { + kind := desc.Annotations[consts.KindAnnotationName] + if kind == consts.KindAnnotationImage || kind == consts.KindAnnotationIndex { + if baseRef := desc.Annotations[ocispec.AnnotationRefName]; baseRef != "" { + refDigest[baseRef] = desc.Digest.String() + } + } + return nil + }); err != nil { + return err + } + + sigExts := map[string]string{ + consts.KindAnnotationSigs: ".sig", + consts.KindAnnotationAtts: ".att", + consts.KindAnnotationSboms: ".sbom", + } + + var fatalErr error + err := s.Walk(func(reference string, desc ocispec.Descriptor) error { + if fatalErr != nil { + return nil + } + baseRef := desc.Annotations[ocispec.AnnotationRefName] + if baseRef == "" { + return nil + } + if o.Only != "" && !strings.Contains(baseRef, o.Only) { + l.Debugf("skipping [%s] (not matching --only filter)", baseRef) + return nil + } + + // For sig/att/sbom descriptors, derive the cosign tag from the parent + // image's manifest digest rather than using AnnotationRefName directly. + destRef := baseRef + kind := desc.Annotations[consts.KindAnnotationName] + if ext, isSigKind := sigExts[kind]; isSigKind { + if imgDigest, ok := refDigest[baseRef]; ok { + digestTag := strings.ReplaceAll(imgDigest, ":", "-") + repo := baseRef + if colon := strings.LastIndex(baseRef, ":"); colon != -1 { + repo = baseRef[:colon] + } + destRef = repo + ":" + digestTag + ext + } + } else if strings.HasPrefix(kind, consts.KindAnnotationReferrers) { + // OCI 1.1 referrer (cosign v3 new-bundle-format): push by manifest digest so + // the target registry wires it up via the OCI Referrers API (subject field). + // For registries that don't support the Referrers API natively, the manifest + // is still pushed intact; the subject linkage depends on registry support. + repo := baseRef + if colon := strings.LastIndex(baseRef, ":"); colon != -1 { + repo = baseRef[:colon] + } + destRef = repo + "@" + desc.Digest.String() + } + + toRef, err := content.RewriteRefToRegistry(destRef, components[1]) + if err != nil { + l.Warnf("failed to rewrite ref [%s]: %v", baseRef, err) + return nil + } + l.Infof("%s", destRef) + // A fresh target per artifact gives each push its own in-memory status + // tracker. Containerd's tracker keys blobs by digest only (not repo), + // so a shared tracker would mark shared blobs as "already exists" after + // the first image, skipping the per-repository blob link creation that + // Docker Distribution requires for manifest validation. + target := content.NewRegistryTarget(components[1], registryOpts) + var pushed ocispec.Descriptor + if err := retry.Operation(ctx, o.StoreRootOpts, ro, func() error { + var copyErr error + pushed, copyErr = s.Copy(ctx, reference, target, toRef) + return copyErr + }); err != nil { + if !ro.IgnoreErrors { + fatalErr = err + } + return nil + } + l.Infof("%s: digest: %s size: %d", toRef, pushed.Digest, pushed.Size) + return nil + }) + if fatalErr != nil { + return fatalErr + } if err != nil { return err } @@ -51,3 +281,73 @@ func CopyCmd(ctx context.Context, o *flags.CopyOpts, s *store.Layout, targetRef l.Infof("copied artifacts to [%s]", components[1]) return nil } + +// extractManifestContent extracts a manifest's layers through a mapper target +// This is used for child manifests in indexes that aren't in the store's nameMap +func extractManifestContent(ctx context.Context, s *store.Layout, desc ocispec.Descriptor, m ocispec.Manifest, target content.Target) error { + // Get a pusher from the target + pusher, err := target.Pusher(ctx, "") + if err != nil { + return fmt.Errorf("failed to get pusher: %w", err) + } + + // Copy config blob + if err := copyBlobDescriptor(ctx, s, m.Config, pusher); err != nil { + return fmt.Errorf("failed to copy config: %w", err) + } + + // Copy each layer blob + for _, layer := range m.Layers { + if err := copyBlobDescriptor(ctx, s, layer, pusher); err != nil { + return fmt.Errorf("failed to copy layer: %w", err) + } + } + + // Copy the manifest itself + if err := copyBlobDescriptor(ctx, s, desc, pusher); err != nil { + return fmt.Errorf("failed to copy manifest: %w", err) + } + + return nil +} + +// copyBlobDescriptor copies a single descriptor blob from the store to a pusher +func copyBlobDescriptor(ctx context.Context, s *store.Layout, desc ocispec.Descriptor, pusher remotes.Pusher) (err error) { + // Fetch the content from the store + rc, err := s.OCI.Fetch(ctx, desc) + if err != nil { + return fmt.Errorf("failed to fetch blob: %w", err) + } + defer func() { + if closeErr := rc.Close(); closeErr != nil && err == nil { + err = fmt.Errorf("failed to close reader: %w", closeErr) + } + }() + + // Get a writer from the pusher + writer, err := pusher.Push(ctx, desc) + if err != nil { + if errdefs.IsAlreadyExists(err) { + return nil // content already present on remote + } + return fmt.Errorf("failed to push: %w", err) + } + defer func() { + if closeErr := writer.Close(); closeErr != nil && err == nil { + err = fmt.Errorf("failed to close writer: %w", closeErr) + } + }() + + // Copy the content + n, err := io.Copy(writer, rc) + if err != nil { + return fmt.Errorf("failed to copy content: %w", err) + } + + // Commit the written content + if err := writer.Commit(ctx, n, desc.Digest); err != nil { + return fmt.Errorf("failed to commit: %w", err) + } + + return nil +} diff --git a/cmd/hauler/cli/store/copy_test.go b/cmd/hauler/cli/store/copy_test.go new file mode 100644 index 0000000..28b41f5 --- /dev/null +++ b/cmd/hauler/cli/store/copy_test.go @@ -0,0 +1,338 @@ +package store + +// copy_test.go covers CopyCmd for both registry:// and dir:// targets. + +import ( + "os" + "path/filepath" + "strings" + "testing" + + "github.com/google/go-containerregistry/pkg/name" + "github.com/google/go-containerregistry/pkg/v1/remote" + + "hauler.dev/go/hauler/internal/flags" + v1 "hauler.dev/go/hauler/pkg/apis/hauler.cattle.io/v1" +) + +// -------------------------------------------------------------------------- +// Error / guard tests +// -------------------------------------------------------------------------- + +// TestCopyCmd_EmptyStoreFails verifies that CopyCmd returns an error when the +// store has no index.json on disk (i.e. nothing has been added yet). +func TestCopyCmd_EmptyStoreFails(t *testing.T) { + ctx := newTestContext(t) + s := newTestStore(t) // freshly created — index.json not yet on disk + + o := &flags.CopyOpts{StoreRootOpts: defaultRootOpts(s.Root)} + err := CopyCmd(ctx, o, s, "registry://127.0.0.1:5000", defaultCliOpts()) + if err == nil { + t.Fatal("expected error for empty store, got nil") + } + if !strings.Contains(err.Error(), "store index not found") { + t.Errorf("unexpected error: %v", err) + } +} + +// TestCopyCmd_DeprecatedCredentials verifies that passing Username returns the +// deprecation error before any other check. +func TestCopyCmd_DeprecatedCredentials(t *testing.T) { + ctx := newTestContext(t) + s := newTestStore(t) + + o := &flags.CopyOpts{ + StoreRootOpts: defaultRootOpts(s.Root), + Username: "user", + Password: "pass", + } + err := CopyCmd(ctx, o, s, "registry://127.0.0.1:5000", defaultCliOpts()) + if err == nil { + t.Fatal("expected deprecation error, got nil") + } + if !strings.Contains(err.Error(), "deprecated") { + t.Errorf("unexpected error: %v", err) + } +} + +// TestCopyCmd_UnknownProtocol verifies that an unrecognized scheme returns an +// error containing "detecting protocol". +func TestCopyCmd_UnknownProtocol(t *testing.T) { + ctx := newTestContext(t) + s := newTestStore(t) + // Write index.json so IndexExists() passes. + if err := s.SaveIndex(); err != nil { + t.Fatalf("SaveIndex: %v", err) + } + + o := &flags.CopyOpts{StoreRootOpts: defaultRootOpts(s.Root)} + err := CopyCmd(ctx, o, s, "ftp://somehost/path", defaultCliOpts()) + if err == nil { + t.Fatal("expected error for unknown protocol, got nil") + } + if !strings.Contains(err.Error(), "detecting protocol") { + t.Errorf("unexpected error: %v", err) + } +} + +// -------------------------------------------------------------------------- +// Registry copy tests +// -------------------------------------------------------------------------- + +// TestCopyCmd_Registry seeds a store with a single image, copies it to an +// in-memory target registry, and verifies the image is reachable there. +func TestCopyCmd_Registry(t *testing.T) { + ctx := newTestContext(t) + + srcHost, _ := newLocalhostRegistry(t) + seedImage(t, srcHost, "test/copy", "v1") + + s := newTestStore(t) + rso := defaultRootOpts(s.Root) + ro := defaultCliOpts() + if err := storeImage(ctx, s, v1.Image{Name: srcHost + "/test/copy:v1"}, "", false, rso, ro, ""); err != nil { + t.Fatalf("storeImage: %v", err) + } + + dstHost, dstOpts := newTestRegistry(t) + o := &flags.CopyOpts{ + StoreRootOpts: defaultRootOpts(s.Root), + PlainHTTP: true, + } + if err := CopyCmd(ctx, o, s, "registry://"+dstHost, ro); err != nil { + t.Fatalf("CopyCmd registry: %v", err) + } + + // Verify the image is reachable in the target registry. + dstRef, err := name.NewTag(dstHost+"/test/copy:v1", name.Insecure) + if err != nil { + t.Fatalf("name.NewTag: %v", err) + } + if _, err := remote.Get(dstRef, dstOpts...); err != nil { + t.Errorf("image not found in target registry after copy: %v", err) + } +} + +// TestCopyCmd_Registry_OnlyFilter seeds two images in distinct repos, copies +// with --only=repo1, and asserts only repo1 reaches the target. +func TestCopyCmd_Registry_OnlyFilter(t *testing.T) { + ctx := newTestContext(t) + + srcHost, _ := newLocalhostRegistry(t) + seedImage(t, srcHost, "myorg/repo1", "v1") + seedImage(t, srcHost, "myorg/repo2", "v1") + + s := newTestStore(t) + rso := defaultRootOpts(s.Root) + ro := defaultCliOpts() + for _, repo := range []string{"myorg/repo1:v1", "myorg/repo2:v1"} { + if err := storeImage(ctx, s, v1.Image{Name: srcHost + "/" + repo}, "", false, rso, ro, ""); err != nil { + t.Fatalf("storeImage %s: %v", repo, err) + } + } + + dstHost, dstOpts := newTestRegistry(t) + o := &flags.CopyOpts{ + StoreRootOpts: defaultRootOpts(s.Root), + PlainHTTP: true, + Only: "repo1", + } + if err := CopyCmd(ctx, o, s, "registry://"+dstHost, ro); err != nil { + t.Fatalf("CopyCmd with --only: %v", err) + } + + // repo1 must be in target. + ref1, err := name.NewTag(dstHost+"/myorg/repo1:v1", name.Insecure) + if err != nil { + t.Fatalf("name.NewTag repo1: %v", err) + } + if _, err := remote.Get(ref1, dstOpts...); err != nil { + t.Errorf("repo1 should be in target registry but was not found: %v", err) + } + + // repo2 must NOT be in target. + ref2, err := name.NewTag(dstHost+"/myorg/repo2:v1", name.Insecure) + if err != nil { + t.Fatalf("name.NewTag repo2: %v", err) + } + if _, err := remote.Get(ref2, dstOpts...); err == nil { + t.Error("repo2 should NOT be in target registry after --only=repo1, but was found") + } +} + +// TestCopyCmd_Registry_SigTagDerivation seeds a base image along with cosign +// v2 signature artifacts, adds everything to the store via AddImage (which +// auto-discovers the .sig/.att/.sbom tags), then copies to a target registry +// and verifies the sig arrives at the expected sha256-.sig tag. +func TestCopyCmd_Registry_SigTagDerivation(t *testing.T) { + ctx := newTestContext(t) + + srcHost, _ := newLocalhostRegistry(t) + srcImg := seedImage(t, srcHost, "test/signed", "v1") + seedCosignV2Artifacts(t, srcHost, "test/signed", srcImg) + + // AddImage discovers and stores the .sig/.att/.sbom tags automatically. + s := newTestStore(t) + if err := s.AddImage(ctx, srcHost+"/test/signed:v1", "", false); err != nil { + t.Fatalf("AddImage: %v", err) + } + + dstHost, dstOpts := newTestRegistry(t) + o := &flags.CopyOpts{ + StoreRootOpts: defaultRootOpts(s.Root), + PlainHTTP: true, + } + if err := CopyCmd(ctx, o, s, "registry://"+dstHost, defaultCliOpts()); err != nil { + t.Fatalf("CopyCmd: %v", err) + } + + // Compute the expected cosign sig tag from the image's manifest digest. + hash, err := srcImg.Digest() + if err != nil { + t.Fatalf("srcImg.Digest: %v", err) + } + sigTag := strings.ReplaceAll(hash.String(), ":", "-") + ".sig" + + sigRef, err := name.NewTag(dstHost+"/test/signed:"+sigTag, name.Insecure) + if err != nil { + t.Fatalf("name.NewTag sigRef: %v", err) + } + if _, err := remote.Get(sigRef, dstOpts...); err != nil { + t.Errorf("sig not found at expected tag %s in target registry: %v", sigTag, err) + } +} + +// TestCopyCmd_Registry_IgnoreErrors verifies that a push failure to a +// non-listening address is swallowed when IgnoreErrors is set. +func TestCopyCmd_Registry_IgnoreErrors(t *testing.T) { + ctx := newTestContext(t) + + srcHost, _ := newLocalhostRegistry(t) + seedImage(t, srcHost, "test/ignore", "v1") + + s := newTestStore(t) + rso := defaultRootOpts(s.Root) + ro := defaultCliOpts() + if err := storeImage(ctx, s, v1.Image{Name: srcHost + "/test/ignore:v1"}, "", false, rso, ro, ""); err != nil { + t.Fatalf("storeImage: %v", err) + } + + // localhost:1 is a port that is never listening. + o := &flags.CopyOpts{ + StoreRootOpts: defaultRootOpts(s.Root), + PlainHTTP: true, + } + roIgnore := defaultCliOpts() + roIgnore.IgnoreErrors = true + if err := CopyCmd(ctx, o, s, "registry://localhost:1", roIgnore); err != nil { + t.Errorf("expected no error with IgnoreErrors=true, got: %v", err) + } +} + +// -------------------------------------------------------------------------- +// Directory copy tests +// -------------------------------------------------------------------------- + +// TestCopyCmd_Dir_Files copies a file artifact to a directory target and +// verifies the file appears under its original basename. +func TestCopyCmd_Dir_Files(t *testing.T) { + ctx := newTestContext(t) + + content := "hello from hauler file" + url := seedFileInHTTPServer(t, "data.txt", content) + + s := newTestStore(t) + if err := storeFile(ctx, s, v1.File{Path: url}); err != nil { + t.Fatalf("storeFile: %v", err) + } + + destDir := t.TempDir() + o := &flags.CopyOpts{StoreRootOpts: defaultRootOpts(s.Root)} + if err := CopyCmd(ctx, o, s, "dir://"+destDir, defaultCliOpts()); err != nil { + t.Fatalf("CopyCmd dir: %v", err) + } + + outPath := filepath.Join(destDir, "data.txt") + data, err := os.ReadFile(outPath) + if err != nil { + t.Fatalf("file not found in destDir after dir copy: %v", err) + } + if string(data) != content { + t.Errorf("file content mismatch: got %q, want %q", string(data), content) + } +} + +// TestCopyCmd_Dir_SkipsImages verifies that container images are not extracted +// when copying to a directory target. +func TestCopyCmd_Dir_SkipsImages(t *testing.T) { + ctx := newTestContext(t) + + srcHost, _ := newLocalhostRegistry(t) + seedImage(t, srcHost, "test/imgskip", "v1") + + s := newTestStore(t) + rso := defaultRootOpts(s.Root) + ro := defaultCliOpts() + if err := storeImage(ctx, s, v1.Image{Name: srcHost + "/test/imgskip:v1"}, "", false, rso, ro, ""); err != nil { + t.Fatalf("storeImage: %v", err) + } + + destDir := t.TempDir() + o := &flags.CopyOpts{StoreRootOpts: defaultRootOpts(s.Root)} + if err := CopyCmd(ctx, o, s, "dir://"+destDir, ro); err != nil { + t.Fatalf("CopyCmd dir: %v", err) + } + + entries, err := os.ReadDir(destDir) + if err != nil { + t.Fatalf("ReadDir: %v", err) + } + if len(entries) != 0 { + names := make([]string, len(entries)) + for i, e := range entries { + names[i] = e.Name() + } + t.Errorf("expected empty destDir for image-only store, found: %s", strings.Join(names, ", ")) + } +} + +// TestCopyCmd_Dir_Charts copies a local Helm chart artifact to a directory +// target and verifies a .tgz file is present. +func TestCopyCmd_Dir_Charts(t *testing.T) { + ctx := newTestContext(t) + + s := newTestStore(t) + rso := defaultRootOpts(s.Root) + ro := defaultCliOpts() + + o := newAddChartOpts(chartTestdataDir, "") + if err := AddChartCmd(ctx, o, s, "rancher-cluster-templates-0.5.2.tgz", rso, ro); err != nil { + t.Fatalf("AddChartCmd: %v", err) + } + + destDir := t.TempDir() + copyOpts := &flags.CopyOpts{StoreRootOpts: defaultRootOpts(s.Root)} + if err := CopyCmd(ctx, copyOpts, s, "dir://"+destDir, ro); err != nil { + t.Fatalf("CopyCmd dir charts: %v", err) + } + + entries, err := os.ReadDir(destDir) + if err != nil { + t.Fatalf("ReadDir: %v", err) + } + + var found bool + for _, e := range entries { + if strings.HasSuffix(e.Name(), ".tgz") || strings.HasSuffix(e.Name(), ".tar.gz") { + found = true + break + } + } + if !found { + names := make([]string, len(entries)) + for i, e := range entries { + names[i] = e.Name() + } + t.Errorf("no .tgz found in destDir after chart copy; found: %v", names) + } +} diff --git a/cmd/hauler/cli/store/extract.go b/cmd/hauler/cli/store/extract.go index ade07c2..a1c79b4 100644 --- a/cmd/hauler/cli/store/extract.go +++ b/cmd/hauler/cli/store/extract.go @@ -10,11 +10,79 @@ import ( "hauler.dev/go/hauler/internal/flags" "hauler.dev/go/hauler/internal/mapper" + "hauler.dev/go/hauler/pkg/consts" "hauler.dev/go/hauler/pkg/log" "hauler.dev/go/hauler/pkg/reference" "hauler.dev/go/hauler/pkg/store" ) +// isIndexMediaType returns true for OCI and Docker manifest list media types. +func isIndexMediaType(mt string) bool { + return mt == ocispec.MediaTypeImageIndex || mt == consts.DockerManifestListSchema2 +} + +// firstLeafManifest walks a (potentially nested) OCI index and returns the +// decoded manifest of the first non-index child. It prefers non-index children +// at each level; if all children are indexes it descends into the first one. +// Returns an error if any nested index or manifest cannot be decoded. +func firstLeafManifest(ctx context.Context, s *store.Layout, idx ocispec.Index) (ocispec.Manifest, error) { + for { + if len(idx.Manifests) == 0 { + return ocispec.Manifest{}, fmt.Errorf("image index has no child manifests") + } + + // Prefer the first non-index child; fall back to the first child (an index) if all are indexes. + desc := idx.Manifests[0] + for _, d := range idx.Manifests { + if !isIndexMediaType(d.MediaType) { + desc = d + break + } + } + + rc, err := s.Fetch(ctx, desc) + if err != nil { + return ocispec.Manifest{}, err + } + + if isIndexMediaType(desc.MediaType) { + var nested ocispec.Index + err = json.NewDecoder(rc).Decode(&nested) + rc.Close() + if err != nil { + return ocispec.Manifest{}, fmt.Errorf("decoding nested index: %w", err) + } + idx = nested + continue + } + + var m ocispec.Manifest + err = json.NewDecoder(rc).Decode(&m) + rc.Close() + if err != nil { + return ocispec.Manifest{}, fmt.Errorf("decoding child manifest: %w", err) + } + return m, nil + } +} + +// isContainerImageManifest returns true when the manifest describes a real +// container image — i.e. an OCI/Docker image config with no AnnotationTitle on +// any layer. File artifacts distributed as OCI images always carry AnnotationTitle +// on their layers, so they are NOT considered container images by this check. +func isContainerImageManifest(m ocispec.Manifest) bool { + switch m.Config.MediaType { + case consts.DockerConfigJSON, ocispec.MediaTypeImageConfig: + for _, layer := range m.Layers { + if _, ok := layer.Annotations[ocispec.AnnotationTitle]; ok { + return false + } + } + return true + } + return false +} + func ExtractCmd(ctx context.Context, o *flags.ExtractOpts, s *store.Layout, ref string) error { l := log.FromContext(ctx) @@ -39,9 +107,36 @@ func ExtractCmd(ctx context.Context, o *flags.ExtractOpts, s *store.Layout, ref } defer rc.Close() + // For image indexes, decoding the index JSON as ocispec.Manifest produces + // an empty Config.MediaType and nil Layers — causing FromManifest to fall + // back to Default() mapper, which writes config blobs as sha256:.bin. + // Instead, peek at the first child manifest to get real config/layer info. var m ocispec.Manifest - if err := json.NewDecoder(rc).Decode(&m); err != nil { - return err + if desc.MediaType == ocispec.MediaTypeImageIndex || desc.MediaType == consts.DockerManifestListSchema2 { + var idx ocispec.Index + if err := json.NewDecoder(rc).Decode(&idx); err != nil { + return err + } + if len(idx.Manifests) == 0 { + l.Warnf("skipping [%s]: image index has no child manifests", reference) + return nil + } + var err error + m, err = firstLeafManifest(ctx, s, idx) + if err != nil { + return err + } + } else { + if err := json.NewDecoder(rc).Decode(&m); err != nil { + return err + } + } + + // Container images (no AnnotationTitle on any layer) are not extractable + // to disk in a meaningful way — use `hauler store copy` to push to a registry. + if isContainerImageManifest(m) { + l.Warnf("skipping [%s]: container images cannot be extracted (use `hauler store copy` to push to a registry)", reference) + return nil } mapperStore, err := mapper.FromManifest(m, o.DestinationDir) diff --git a/cmd/hauler/cli/store/extract_test.go b/cmd/hauler/cli/store/extract_test.go new file mode 100644 index 0000000..6d951e0 --- /dev/null +++ b/cmd/hauler/cli/store/extract_test.go @@ -0,0 +1,556 @@ +package store + +import ( + "os" + "path/filepath" + "strings" + "testing" + + "github.com/google/go-containerregistry/pkg/name" + gcrv1 "github.com/google/go-containerregistry/pkg/v1" + "github.com/google/go-containerregistry/pkg/v1/empty" + "github.com/google/go-containerregistry/pkg/v1/mutate" + "github.com/google/go-containerregistry/pkg/v1/remote" + "github.com/google/go-containerregistry/pkg/v1/static" + gvtypes "github.com/google/go-containerregistry/pkg/v1/types" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + + "hauler.dev/go/hauler/internal/flags" + v1 "hauler.dev/go/hauler/pkg/apis/hauler.cattle.io/v1" + "hauler.dev/go/hauler/pkg/consts" +) + +// chartTestdataDir is defined in add_test.go as "../../../../testdata". + +func TestExtractCmd_File(t *testing.T) { + ctx := newTestContext(t) + s := newTestStore(t) + + fileContent := "hello extract test" + url := seedFileInHTTPServer(t, "extract-me.txt", fileContent) + if err := storeFile(ctx, s, v1.File{Path: url}); err != nil { + t.Fatalf("storeFile: %v", err) + } + + // reference.Parse("extract-me.txt") normalises to "hauler/extract-me.txt:latest" + // (DefaultNamespace = "hauler", DefaultTag = "latest"). ExtractCmd builds + // repo = RepositoryStr() + ":" + Identifier() = "hauler/extract-me.txt:latest" + // and uses strings.Contains against the stored ref — which matches exactly. + ref := "hauler/extract-me.txt:latest" + + destDir := t.TempDir() + eo := &flags.ExtractOpts{ + StoreRootOpts: defaultRootOpts(s.Root), + DestinationDir: destDir, + } + + if err := ExtractCmd(ctx, eo, s, ref); err != nil { + t.Fatalf("ExtractCmd: %v", err) + } + + // The file mapper writes the layer using its AnnotationTitle ("extract-me.txt"). + outPath := filepath.Join(destDir, "extract-me.txt") + data, err := os.ReadFile(outPath) + if err != nil { + t.Fatalf("expected extracted file at %s: %v", outPath, err) + } + if string(data) != fileContent { + t.Errorf("content mismatch: got %q, want %q", string(data), fileContent) + } +} + +func TestExtractCmd_Chart(t *testing.T) { + ctx := newTestContext(t) + s := newTestStore(t) + rso := defaultRootOpts(s.Root) + ro := defaultCliOpts() + + o := newAddChartOpts(chartTestdataDir, "") + if err := AddChartCmd(ctx, o, s, "rancher-cluster-templates-0.5.2.tgz", rso, ro); err != nil { + t.Fatalf("AddChartCmd: %v", err) + } + + // Chart stored as "hauler/rancher-cluster-templates:0.5.2". + ref := "hauler/rancher-cluster-templates:0.5.2" + + destDir := t.TempDir() + eo := &flags.ExtractOpts{ + StoreRootOpts: defaultRootOpts(s.Root), + DestinationDir: destDir, + } + + if err := ExtractCmd(ctx, eo, s, ref); err != nil { + t.Fatalf("ExtractCmd: %v", err) + } + + // The chart mapper writes the chart layer as a .tgz (using AnnotationTitle, + // or "chart.tar.gz" as fallback). + entries, err := os.ReadDir(destDir) + if err != nil { + t.Fatalf("ReadDir: %v", err) + } + + found := false + for _, e := range entries { + if strings.HasSuffix(e.Name(), ".tgz") || strings.HasSuffix(e.Name(), ".tar.gz") { + found = true + break + } + } + if !found { + names := make([]string, len(entries)) + for i, e := range entries { + names[i] = e.Name() + } + t.Errorf("expected a .tgz or .tar.gz in destDir, got: %v", names) + } +} + +func TestExtractCmd_NotFound(t *testing.T) { + ctx := newTestContext(t) + s := newTestStore(t) + + eo := &flags.ExtractOpts{ + StoreRootOpts: defaultRootOpts(s.Root), + DestinationDir: t.TempDir(), + } + + err := ExtractCmd(ctx, eo, s, "hauler/nonexistent:v99") + if err == nil { + t.Fatal("expected error for nonexistent ref, got nil") + } + if !strings.Contains(err.Error(), "not found in store") { + t.Errorf("expected 'not found in store' in error, got: %v", err) + } +} + +func TestExtractCmd_OciArtifactKindImage(t *testing.T) { + // OCI artifacts pulled from a registry via AddImage() are always labelled + // kind=KindAnnotationImage regardless of their actual content type (file, + // chart, etc.). ExtractCmd must still dispatch via the manifest's + // Config.MediaType — not the kind annotation — so extraction works correctly. + ctx := newTestContext(t) + + // newLocalhostRegistry is required: s.AddImage uses authn.DefaultKeychain and + // go-containerregistry auto-selects plain HTTP only for "localhost:" hosts. + host, rOpts := newLocalhostRegistry(t) + + // Build a synthetic OCI file artifact: + // config.MediaType = FileLocalConfigMediaType (triggers Files() mapper) + // layer.MediaType = FileLayerMediaType + // layer annotation AnnotationTitle = "oci-pulled-file.txt" + fileContent := []byte("oci file content from registry") + fileLayer := static.NewLayer(fileContent, gvtypes.MediaType(consts.FileLayerMediaType)) + img, err := mutate.Append(empty.Image, mutate.Addendum{ + Layer: fileLayer, + Annotations: map[string]string{ + ocispec.AnnotationTitle: "oci-pulled-file.txt", + }, + }) + if err != nil { + t.Fatalf("mutate.Append: %v", err) + } + img = mutate.MediaType(img, gvtypes.OCIManifestSchema1) + img = mutate.ConfigMediaType(img, gvtypes.MediaType(consts.FileLocalConfigMediaType)) + + ref := host + "/oci-artifacts/myfile:v1" + tag, err := name.NewTag(ref, name.Insecure) + if err != nil { + t.Fatalf("name.NewTag: %v", err) + } + if err := remote.Write(tag, img, rOpts...); err != nil { + t.Fatalf("remote.Write: %v", err) + } + + // Pull into a fresh store — AddImage sets kind=KindAnnotationImage on all manifests. + s := newTestStore(t) + if err := s.AddImage(ctx, ref, "", false, rOpts...); err != nil { + t.Fatalf("AddImage: %v", err) + } + + // ExtractCmd receives the short ref (no registry prefix) as stored in AnnotationRefName. + // reference.Parse("oci-artifacts/myfile:v1") → repo "oci-artifacts/myfile:v1" matches. + destDir := t.TempDir() + eo := &flags.ExtractOpts{ + StoreRootOpts: defaultRootOpts(s.Root), + DestinationDir: destDir, + } + if err := ExtractCmd(ctx, eo, s, "oci-artifacts/myfile:v1"); err != nil { + t.Fatalf("ExtractCmd: %v", err) + } + + // Files() mapper uses AnnotationTitle → "oci-pulled-file.txt". + outPath := filepath.Join(destDir, "oci-pulled-file.txt") + data, err := os.ReadFile(outPath) + if err != nil { + t.Fatalf("expected extracted file at %s: %v", outPath, err) + } + if string(data) != string(fileContent) { + t.Errorf("content mismatch: got %q, want %q", string(data), string(fileContent)) + } +} + +func TestExtractCmd_OciImageIndex_NoBinFiles(t *testing.T) { + // Regression test: extracting an OCI image index whose platform manifests + // carry binary layers with AnnotationTitle must yield only the named binary + // files — no sha256:.bin metadata files. + // Before the fix, decoding the index as an ocispec.Manifest produced an + // empty Config.MediaType, causing FromManifest to select Default() mapper + // which wrote config blobs and child manifests as sha256:.bin. + ctx := newTestContext(t) + host, rOpts := newLocalhostRegistry(t) + + buildPlatformImg := func(content []byte, title string) gcrv1.Image { + layer := static.NewLayer(content, gvtypes.OCILayer) + img, err := mutate.Append(empty.Image, mutate.Addendum{ + Layer: layer, + Annotations: map[string]string{ + ocispec.AnnotationTitle: title, + }, + }) + if err != nil { + t.Fatalf("mutate.Append: %v", err) + } + img = mutate.MediaType(img, gvtypes.OCIManifestSchema1) + img = mutate.ConfigMediaType(img, gvtypes.MediaType(ocispec.MediaTypeImageConfig)) + return img + } + + amd64Img := buildPlatformImg([]byte("amd64 binary content"), "mybinary.linux-amd64") + arm64Img := buildPlatformImg([]byte("arm64 binary content"), "mybinary.linux-arm64") + + idx := mutate.AppendManifests(empty.Index, + mutate.IndexAddendum{ + Add: amd64Img, + Descriptor: gcrv1.Descriptor{ + MediaType: gvtypes.OCIManifestSchema1, + Platform: &gcrv1.Platform{OS: "linux", Architecture: "amd64"}, + }, + }, + mutate.IndexAddendum{ + Add: arm64Img, + Descriptor: gcrv1.Descriptor{ + MediaType: gvtypes.OCIManifestSchema1, + Platform: &gcrv1.Platform{OS: "linux", Architecture: "arm64"}, + }, + }, + ) + + ref := host + "/binaries/mybinary:v1" + tag, err := name.NewTag(ref, name.Insecure) + if err != nil { + t.Fatalf("name.NewTag: %v", err) + } + if err := remote.WriteIndex(tag, idx, rOpts...); err != nil { + t.Fatalf("remote.WriteIndex: %v", err) + } + + s := newTestStore(t) + if err := s.AddImage(ctx, ref, "", false, rOpts...); err != nil { + t.Fatalf("AddImage: %v", err) + } + + destDir := t.TempDir() + eo := &flags.ExtractOpts{ + StoreRootOpts: defaultRootOpts(s.Root), + DestinationDir: destDir, + } + if err := ExtractCmd(ctx, eo, s, "binaries/mybinary:v1"); err != nil { + t.Fatalf("ExtractCmd: %v", err) + } + + entries, err := os.ReadDir(destDir) + if err != nil { + t.Fatalf("ReadDir: %v", err) + } + var names []string + for _, e := range entries { + names = append(names, e.Name()) + } + + // No sha256: digest-named files should be extracted + for _, n := range names { + if strings.HasPrefix(n, "sha256:") { + t.Errorf("unexpected digest-named file %q extracted (all files: %v)", n, names) + } + } + + // Both platform binaries must be present + for _, want := range []string{"mybinary.linux-amd64", "mybinary.linux-arm64"} { + found := false + for _, n := range names { + if n == want { + found = true + break + } + } + if !found { + t.Errorf("expected binary %q not found; got: %v", want, names) + } + } +} + +func TestExtractCmd_NestedImageIndex_NoBinFiles(t *testing.T) { + // Regression test: extracting a nested OCI image index (outer index whose only + // children are inner indexes, which in turn contain the platform manifests) must + // yield only the named binary files — no sha256:.bin metadata files. + // firstLeafManifest must descend through the outer index into the inner index to + // find a leaf manifest so that FromManifest selects the correct Files() mapper. + ctx := newTestContext(t) + host, rOpts := newLocalhostRegistry(t) + + buildPlatformImg := func(content []byte, title string) gcrv1.Image { + layer := static.NewLayer(content, gvtypes.OCILayer) + img, err := mutate.Append(empty.Image, mutate.Addendum{ + Layer: layer, + Annotations: map[string]string{ + ocispec.AnnotationTitle: title, + }, + }) + if err != nil { + t.Fatalf("mutate.Append: %v", err) + } + img = mutate.MediaType(img, gvtypes.OCIManifestSchema1) + img = mutate.ConfigMediaType(img, gvtypes.MediaType(ocispec.MediaTypeImageConfig)) + return img + } + + amd64Img := buildPlatformImg([]byte("amd64 binary content"), "mybinary.linux-amd64") + arm64Img := buildPlatformImg([]byte("arm64 binary content"), "mybinary.linux-arm64") + + // Inner index contains the leaf platform manifests. + innerIdx := mutate.AppendManifests(empty.Index, + mutate.IndexAddendum{ + Add: amd64Img, + Descriptor: gcrv1.Descriptor{ + MediaType: gvtypes.OCIManifestSchema1, + Platform: &gcrv1.Platform{OS: "linux", Architecture: "amd64"}, + }, + }, + mutate.IndexAddendum{ + Add: arm64Img, + Descriptor: gcrv1.Descriptor{ + MediaType: gvtypes.OCIManifestSchema1, + Platform: &gcrv1.Platform{OS: "linux", Architecture: "arm64"}, + }, + }, + ) + + // Outer index contains only the inner index — all children are indexes. + outerIdx := mutate.AppendManifests(empty.Index, + mutate.IndexAddendum{ + Add: innerIdx, + Descriptor: gcrv1.Descriptor{ + MediaType: gvtypes.OCIImageIndex, + }, + }, + ) + + ref := host + "/binaries/nested:v1" + tag, err := name.NewTag(ref, name.Insecure) + if err != nil { + t.Fatalf("name.NewTag: %v", err) + } + if err := remote.WriteIndex(tag, outerIdx, rOpts...); err != nil { + t.Fatalf("remote.WriteIndex: %v", err) + } + + s := newTestStore(t) + if err := s.AddImage(ctx, ref, "", false, rOpts...); err != nil { + t.Fatalf("AddImage: %v", err) + } + + destDir := t.TempDir() + eo := &flags.ExtractOpts{ + StoreRootOpts: defaultRootOpts(s.Root), + DestinationDir: destDir, + } + if err := ExtractCmd(ctx, eo, s, "binaries/nested:v1"); err != nil { + t.Fatalf("ExtractCmd: %v", err) + } + + entries, err := os.ReadDir(destDir) + if err != nil { + t.Fatalf("ReadDir: %v", err) + } + var names []string + for _, e := range entries { + names = append(names, e.Name()) + } + + // No sha256: digest-named files should be extracted. + for _, n := range names { + if strings.HasPrefix(n, "sha256:") { + t.Errorf("unexpected digest-named file %q extracted (all files: %v)", n, names) + } + } + + // Both platform binaries must be present. + for _, want := range []string{"mybinary.linux-amd64", "mybinary.linux-arm64"} { + found := false + for _, n := range names { + if n == want { + found = true + break + } + } + if !found { + t.Errorf("expected binary %q not found; got: %v", want, names) + } + } +} + +func TestExtractCmd_ContainerImage_Skipped(t *testing.T) { + // A real container image (no AnnotationTitle on any layer) should be skipped + // without error and without writing any files to the destination directory. + ctx := newTestContext(t) + host, rOpts := newLocalhostRegistry(t) + + layer := static.NewLayer([]byte("layer content"), gvtypes.OCILayer) + img, err := mutate.Append(empty.Image, mutate.Addendum{Layer: layer}) + if err != nil { + t.Fatalf("mutate.Append: %v", err) + } + img = mutate.MediaType(img, gvtypes.OCIManifestSchema1) + img = mutate.ConfigMediaType(img, gvtypes.MediaType(ocispec.MediaTypeImageConfig)) + + ref := host + "/myapp/myimage:v1" + tag, err := name.NewTag(ref, name.Insecure) + if err != nil { + t.Fatalf("name.NewTag: %v", err) + } + if err := remote.Write(tag, img, rOpts...); err != nil { + t.Fatalf("remote.Write: %v", err) + } + + s := newTestStore(t) + if err := s.AddImage(ctx, ref, "", false, rOpts...); err != nil { + t.Fatalf("AddImage: %v", err) + } + + destDir := t.TempDir() + eo := &flags.ExtractOpts{ + StoreRootOpts: defaultRootOpts(s.Root), + DestinationDir: destDir, + } + if err := ExtractCmd(ctx, eo, s, "myapp/myimage:v1"); err != nil { + t.Fatalf("ExtractCmd: %v", err) + } + + entries, err := os.ReadDir(destDir) + if err != nil { + t.Fatalf("ReadDir: %v", err) + } + if len(entries) != 0 { + names := make([]string, len(entries)) + for i, e := range entries { + names[i] = e.Name() + } + t.Errorf("expected no files extracted for container image, got: %v", names) + } +} + +func TestExtractCmd_ContainerImageIndex_Skipped(t *testing.T) { + // A real multi-arch container image index (no AnnotationTitle on any layer) + // should be skipped without error and without writing any files. + ctx := newTestContext(t) + host, rOpts := newLocalhostRegistry(t) + + buildPlatformImg := func(content []byte) gcrv1.Image { + layer := static.NewLayer(content, gvtypes.OCILayer) + img, err := mutate.Append(empty.Image, mutate.Addendum{Layer: layer}) + if err != nil { + t.Fatalf("mutate.Append: %v", err) + } + img = mutate.MediaType(img, gvtypes.OCIManifestSchema1) + img = mutate.ConfigMediaType(img, gvtypes.MediaType(ocispec.MediaTypeImageConfig)) + return img + } + + idx := mutate.AppendManifests(empty.Index, + mutate.IndexAddendum{ + Add: buildPlatformImg([]byte("amd64 content")), + Descriptor: gcrv1.Descriptor{ + MediaType: gvtypes.OCIManifestSchema1, + Platform: &gcrv1.Platform{OS: "linux", Architecture: "amd64"}, + }, + }, + mutate.IndexAddendum{ + Add: buildPlatformImg([]byte("arm64 content")), + Descriptor: gcrv1.Descriptor{ + MediaType: gvtypes.OCIManifestSchema1, + Platform: &gcrv1.Platform{OS: "linux", Architecture: "arm64"}, + }, + }, + ) + + ref := host + "/myapp/multiarch:v1" + tag, err := name.NewTag(ref, name.Insecure) + if err != nil { + t.Fatalf("name.NewTag: %v", err) + } + if err := remote.WriteIndex(tag, idx, rOpts...); err != nil { + t.Fatalf("remote.WriteIndex: %v", err) + } + + s := newTestStore(t) + if err := s.AddImage(ctx, ref, "", false, rOpts...); err != nil { + t.Fatalf("AddImage: %v", err) + } + + destDir := t.TempDir() + eo := &flags.ExtractOpts{ + StoreRootOpts: defaultRootOpts(s.Root), + DestinationDir: destDir, + } + if err := ExtractCmd(ctx, eo, s, "myapp/multiarch:v1"); err != nil { + t.Fatalf("ExtractCmd: %v", err) + } + + entries, err := os.ReadDir(destDir) + if err != nil { + t.Fatalf("ReadDir: %v", err) + } + if len(entries) != 0 { + names := make([]string, len(entries)) + for i, e := range entries { + names[i] = e.Name() + } + t.Errorf("expected no files extracted for container image index, got: %v", names) + } +} + +func TestExtractCmd_SubstringMatch(t *testing.T) { + // reference.Parse applies DefaultTag ("latest") when no tag is given, so + // Parse("hauler/extract-sub.txt") and Parse("hauler/extract-sub.txt:latest") + // produce the same repo string "hauler/extract-sub.txt:latest". + // This means a no-tag ref substring-matches a stored "hauler/...:latest" key. + ctx := newTestContext(t) + s := newTestStore(t) + + fileContent := "substring match content" + url := seedFileInHTTPServer(t, "extract-sub.txt", fileContent) + if err := storeFile(ctx, s, v1.File{Path: url}); err != nil { + t.Fatalf("storeFile: %v", err) + } + + destDir := t.TempDir() + eo := &flags.ExtractOpts{ + StoreRootOpts: defaultRootOpts(s.Root), + DestinationDir: destDir, + } + + // No explicit tag — Parse adds ":latest" as default, which still matches. + if err := ExtractCmd(ctx, eo, s, "hauler/extract-sub.txt"); err != nil { + t.Fatalf("ExtractCmd with no-tag ref: %v", err) + } + + outPath := filepath.Join(destDir, "extract-sub.txt") + data, err := os.ReadFile(outPath) + if err != nil { + t.Fatalf("expected extracted file at %s: %v", outPath, err) + } + if string(data) != fileContent { + t.Errorf("content mismatch: got %q, want %q", string(data), fileContent) + } +} diff --git a/cmd/hauler/cli/store/info.go b/cmd/hauler/cli/store/info.go index faa2e9d..086aa4c 100644 --- a/cmd/hauler/cli/store/info.go +++ b/cmd/hauler/cli/store/info.go @@ -300,13 +300,15 @@ func newItem(s *store.Layout, desc ocispec.Descriptor, m ocispec.Manifest, plat ctype = "image" } - switch desc.Annotations["kind"] { - case "dev.cosignproject.cosign/sigs": + switch { + case desc.Annotations[consts.KindAnnotationName] == consts.KindAnnotationSigs: ctype = "sigs" - case "dev.cosignproject.cosign/atts": + case desc.Annotations[consts.KindAnnotationName] == consts.KindAnnotationAtts: ctype = "atts" - case "dev.cosignproject.cosign/sboms": + case desc.Annotations[consts.KindAnnotationName] == consts.KindAnnotationSboms: ctype = "sbom" + case strings.HasPrefix(desc.Annotations[consts.KindAnnotationName], consts.KindAnnotationReferrers): + ctype = "referrer" } refName := desc.Annotations["io.containerd.image.name"] diff --git a/cmd/hauler/cli/store/info_test.go b/cmd/hauler/cli/store/info_test.go new file mode 100644 index 0000000..51af7e0 --- /dev/null +++ b/cmd/hauler/cli/store/info_test.go @@ -0,0 +1,238 @@ +package store + +import ( + "encoding/json" + "os" + "testing" + + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + + "hauler.dev/go/hauler/internal/flags" + v1 "hauler.dev/go/hauler/pkg/apis/hauler.cattle.io/v1" + "hauler.dev/go/hauler/pkg/consts" +) + +func TestByteCountSI(t *testing.T) { + tests := []struct { + input int64 + want string + }{ + {0, "0 B"}, + {999, "999 B"}, + {1000, "1.0 kB"}, + {1500000, "1.5 MB"}, + {1000000000, "1.0 GB"}, + } + for _, tc := range tests { + got := byteCountSI(tc.input) + if got != tc.want { + t.Errorf("byteCountSI(%d) = %q, want %q", tc.input, got, tc.want) + } + } +} + +func TestTruncateReference(t *testing.T) { + longDigest := "sha256:abcdefabcdefabcdefabcdefabcdefabcdefabcdefabcdefabcdefabcdefabcd" + tests := []struct { + name string + input string + want string + }{ + {"tag ref unchanged", "nginx:latest", "nginx:latest"}, + {"long digest truncated", "nginx@" + longDigest, "nginx@sha256:abcdefabcdef\u2026"}, + {"short digest not truncated", "nginx@sha256:abcdef", "nginx@sha256:abcdef"}, + {"no digest unchanged", "myrepo/myimage:v1", "myrepo/myimage:v1"}, + } + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + got := truncateReference(tc.input) + if got != tc.want { + t.Errorf("truncateReference(%q) = %q, want %q", tc.input, got, tc.want) + } + }) + } +} + +func TestBuildJson(t *testing.T) { + items := []item{ + {Reference: "myrepo/myimage:v1", Type: "image", Platform: "linux/amd64", Size: 1024, Layers: 2}, + {Reference: "myrepo/mychart:v1", Type: "chart", Platform: "-", Size: 512, Layers: 1}, + } + out := buildJson(items...) + if out == "" { + t.Fatal("buildJson returned empty string") + } + var got []item + if err := json.Unmarshal([]byte(out), &got); err != nil { + t.Fatalf("buildJson output is not valid JSON: %v\noutput: %s", err, out) + } + if len(got) != len(items) { + t.Fatalf("buildJson: got %d items, want %d", len(got), len(items)) + } + for i, want := range items { + if got[i].Reference != want.Reference { + t.Errorf("item[%d].Reference = %q, want %q", i, got[i].Reference, want.Reference) + } + if got[i].Type != want.Type { + t.Errorf("item[%d].Type = %q, want %q", i, got[i].Type, want.Type) + } + if got[i].Size != want.Size { + t.Errorf("item[%d].Size = %d, want %d", i, got[i].Size, want.Size) + } + } +} + +func TestNewItem(t *testing.T) { + // newItem uses s only for its signature; it does not dereference s in practice. + // We pass nil to keep tests dependency-free. + const validRef = "myrepo/myimage:latest" + + makeDesc := func(kindAnnotation string) ocispec.Descriptor { + desc := ocispec.Descriptor{ + Annotations: map[string]string{ + "io.containerd.image.name": validRef, + }, + } + if kindAnnotation != "" { + desc.Annotations[consts.KindAnnotationName] = kindAnnotation + } + return desc + } + makeManifest := func(configMediaType string) ocispec.Manifest { + return ocispec.Manifest{ + Config: ocispec.Descriptor{MediaType: configMediaType}, + } + } + + tests := []struct { + name string + configMedia string + kindAnnotation string + typeFilter string + wantType string + wantEmpty bool + }{ + { + name: "DockerConfigJSON → image", + configMedia: consts.DockerConfigJSON, + typeFilter: "all", + wantType: "image", + }, + { + name: "ChartConfigMediaType → chart", + configMedia: consts.ChartConfigMediaType, + typeFilter: "all", + wantType: "chart", + }, + { + name: "FileLocalConfigMediaType → file", + configMedia: consts.FileLocalConfigMediaType, + typeFilter: "all", + wantType: "file", + }, + { + name: "KindAnnotationSigs → sigs", + configMedia: consts.DockerConfigJSON, + kindAnnotation: consts.KindAnnotationSigs, + typeFilter: "all", + wantType: "sigs", + }, + { + name: "KindAnnotationAtts → atts", + configMedia: consts.DockerConfigJSON, + kindAnnotation: consts.KindAnnotationAtts, + typeFilter: "all", + wantType: "atts", + }, + { + name: "KindAnnotationReferrers prefix → referrer", + configMedia: consts.DockerConfigJSON, + kindAnnotation: consts.KindAnnotationReferrers + "/abc123", + typeFilter: "all", + wantType: "referrer", + }, + { + name: "TypeFilter:image with chart → empty item", + configMedia: consts.ChartConfigMediaType, + typeFilter: "image", + wantEmpty: true, + }, + { + name: "TypeFilter:file with image → empty item", + configMedia: consts.DockerConfigJSON, + typeFilter: "file", + wantEmpty: true, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + desc := makeDesc(tc.kindAnnotation) + m := makeManifest(tc.configMedia) + o := &flags.InfoOpts{TypeFilter: tc.typeFilter} + + got := newItem(nil, desc, m, "linux/amd64", o) + var empty item + if tc.wantEmpty { + if got != empty { + t.Errorf("expected empty item, got %+v", got) + } + return + } + if got == empty { + t.Fatalf("got empty item, want type %q", tc.wantType) + } + if got.Type != tc.wantType { + t.Errorf("got type %q, want %q", got.Type, tc.wantType) + } + }) + } +} + +func TestInfoCmd(t *testing.T) { + ctx := newTestContext(t) + s := newTestStore(t) + + // Seed a file artifact using a local temp file. + tmpFile := t.TempDir() + "/hello.txt" + if err := os.WriteFile(tmpFile, []byte("hello hauler"), 0o644); err != nil { + t.Fatalf("write tmpFile: %v", err) + } + fi := v1.File{Path: tmpFile} + if err := storeFile(ctx, s, fi); err != nil { + t.Fatalf("storeFile: %v", err) + } + + baseOpts := func(typeFilter, format string) *flags.InfoOpts { + return &flags.InfoOpts{ + StoreRootOpts: defaultRootOpts(s.Root), + OutputFormat: format, + TypeFilter: typeFilter, + } + } + + t.Run("TypeFilter:all json", func(t *testing.T) { + if err := InfoCmd(ctx, baseOpts("all", "json"), s); err != nil { + t.Errorf("InfoCmd(all, json): %v", err) + } + }) + + t.Run("TypeFilter:file json", func(t *testing.T) { + if err := InfoCmd(ctx, baseOpts("file", "json"), s); err != nil { + t.Errorf("InfoCmd(file, json): %v", err) + } + }) + + t.Run("TypeFilter:image json", func(t *testing.T) { + // Store has only a file artifact; image filter returns no items (no error). + if err := InfoCmd(ctx, baseOpts("image", "json"), s); err != nil { + t.Errorf("InfoCmd(image, json): %v", err) + } + }) + + t.Run("TypeFilter:all table", func(t *testing.T) { + if err := InfoCmd(ctx, baseOpts("all", "table"), s); err != nil { + t.Errorf("InfoCmd(all, table): %v", err) + } + }) +} diff --git a/cmd/hauler/cli/store/lifecycle_test.go b/cmd/hauler/cli/store/lifecycle_test.go new file mode 100644 index 0000000..f1f7a9e --- /dev/null +++ b/cmd/hauler/cli/store/lifecycle_test.go @@ -0,0 +1,309 @@ +package store + +// lifecycle_test.go covers the end-to-end add->save->load->copy/extract lifecycle +// for file, image, and chart artifact types. +// +// Do NOT use t.Parallel() -- SaveCmd calls os.Chdir(storeDir). +// Always use absolute paths for StoreDir and FileName. + +import ( + "os" + "path/filepath" + "strings" + "testing" + + "github.com/google/go-containerregistry/pkg/name" + "github.com/google/go-containerregistry/pkg/v1/remote" + + "hauler.dev/go/hauler/internal/flags" + v1 "hauler.dev/go/hauler/pkg/apis/hauler.cattle.io/v1" + "hauler.dev/go/hauler/pkg/store" +) + +// TestLifecycle_FileArtifact_AddSaveLoadCopy exercises the full lifecycle for a +// file artifact: seed HTTP server -> storeFile -> SaveCmd -> LoadCmd -> CopyCmd dir://. +func TestLifecycle_FileArtifact_AddSaveLoadCopy(t *testing.T) { + ctx := newTestContext(t) + + // Step 1: Seed an HTTP file server with known content. + fileContent := "lifecycle file artifact content" + url := seedFileInHTTPServer(t, "lifecycle.txt", fileContent) + + // Step 2: storeFile into store A. + storeA := newTestStore(t) + if err := storeFile(ctx, storeA, v1.File{Path: url}); err != nil { + t.Fatalf("storeFile: %v", err) + } + assertArtifactInStore(t, storeA, "lifecycle.txt") + + // Flush index.json so SaveCmd can read it from disk. + if err := storeA.SaveIndex(); err != nil { + t.Fatalf("SaveIndex: %v", err) + } + + // Step 3: SaveCmd -> archive (absolute paths required). + archivePath := filepath.Join(t.TempDir(), "lifecycle-file.tar.zst") + saveOpts := newSaveOpts(storeA.Root, archivePath) + if err := SaveCmd(ctx, saveOpts, defaultRootOpts(storeA.Root), defaultCliOpts()); err != nil { + t.Fatalf("SaveCmd: %v", err) + } + + fi, err := os.Stat(archivePath) + if err != nil { + t.Fatalf("archive stat: %v", err) + } + if fi.Size() == 0 { + t.Fatal("archive is empty") + } + + // Step 4: LoadCmd -> store B. + storeBDir := t.TempDir() + loadOpts := &flags.LoadOpts{ + StoreRootOpts: defaultRootOpts(storeBDir), + FileName: []string{archivePath}, + } + if err := LoadCmd(ctx, loadOpts, defaultRootOpts(storeBDir), defaultCliOpts()); err != nil { + t.Fatalf("LoadCmd: %v", err) + } + + storeB, err := store.NewLayout(storeBDir) + if err != nil { + t.Fatalf("store.NewLayout(storeB): %v", err) + } + assertArtifactInStore(t, storeB, "lifecycle.txt") + + // Step 5: CopyCmd dir:// -> extract file to destDir. + extractDir := t.TempDir() + copyOpts := &flags.CopyOpts{StoreRootOpts: defaultRootOpts(storeB.Root)} + if err := CopyCmd(ctx, copyOpts, storeB, "dir://"+extractDir, defaultCliOpts()); err != nil { + t.Fatalf("CopyCmd dir: %v", err) + } + + // Step 6: Assert file content matches original. + outPath := filepath.Join(extractDir, "lifecycle.txt") + data, err := os.ReadFile(outPath) + if err != nil { + t.Fatalf("expected extracted file at %s: %v", outPath, err) + } + if string(data) != fileContent { + t.Errorf("file content mismatch: got %q, want %q", string(data), fileContent) + } +} + +// TestLifecycle_Image_AddSaveLoadCopyRegistry exercises the full lifecycle for +// a container image: seed registry 1 -> storeImage -> SaveCmd -> LoadCmd -> +// CopyCmd registry:// -> verify in registry 2. +func TestLifecycle_Image_AddSaveLoadCopyRegistry(t *testing.T) { + ctx := newTestContext(t) + + // Step 1: Seed image into in-memory registry 1. + srcHost, srcOpts := newLocalhostRegistry(t) + srcImg := seedImage(t, srcHost, "lifecycle/app", "v1", srcOpts...) + + srcDigest, err := srcImg.Digest() + if err != nil { + t.Fatalf("srcImg.Digest: %v", err) + } + + // Step 2: storeImage into store A. + storeA := newTestStore(t) + rso := defaultRootOpts(storeA.Root) + ro := defaultCliOpts() + if err := storeImage(ctx, storeA, v1.Image{Name: srcHost + "/lifecycle/app:v1"}, "", false, rso, ro, ""); err != nil { + t.Fatalf("storeImage: %v", err) + } + assertArtifactInStore(t, storeA, "lifecycle/app:v1") + + // Flush index.json for SaveCmd. + if err := storeA.SaveIndex(); err != nil { + t.Fatalf("SaveIndex: %v", err) + } + + // Step 3: SaveCmd -> archive. + archivePath := filepath.Join(t.TempDir(), "lifecycle-image.tar.zst") + saveOpts := newSaveOpts(storeA.Root, archivePath) + if err := SaveCmd(ctx, saveOpts, defaultRootOpts(storeA.Root), defaultCliOpts()); err != nil { + t.Fatalf("SaveCmd: %v", err) + } + + // Step 4: LoadCmd -> store B. + storeBDir := t.TempDir() + loadOpts := &flags.LoadOpts{ + StoreRootOpts: defaultRootOpts(storeBDir), + FileName: []string{archivePath}, + } + if err := LoadCmd(ctx, loadOpts, defaultRootOpts(storeBDir), defaultCliOpts()); err != nil { + t.Fatalf("LoadCmd: %v", err) + } + + storeB, err := store.NewLayout(storeBDir) + if err != nil { + t.Fatalf("store.NewLayout(storeB): %v", err) + } + assertArtifactInStore(t, storeB, "lifecycle/app:v1") + + // Step 5: CopyCmd registry:// -> in-memory registry 2. + dstHost, dstOpts := newTestRegistry(t) + copyOpts := &flags.CopyOpts{ + StoreRootOpts: defaultRootOpts(storeB.Root), + PlainHTTP: true, + } + if err := CopyCmd(ctx, copyOpts, storeB, "registry://"+dstHost, defaultCliOpts()); err != nil { + t.Fatalf("CopyCmd registry: %v", err) + } + + // Step 6: Pull from registry 2 and compare digest to original. + dstRef, err := name.NewTag(dstHost+"/lifecycle/app:v1", name.Insecure) + if err != nil { + t.Fatalf("name.NewTag: %v", err) + } + desc, err := remote.Get(dstRef, dstOpts...) + if err != nil { + t.Fatalf("image not found in target registry: %v", err) + } + if desc.Digest != srcDigest { + t.Errorf("digest mismatch: got %s, want %s", desc.Digest, srcDigest) + } +} + +// TestLifecycle_Chart_AddSaveLoadExtract exercises the full lifecycle for a +// Helm chart: AddChartCmd -> SaveCmd -> LoadCmd -> ExtractCmd -> .tgz in destDir. +func TestLifecycle_Chart_AddSaveLoadExtract(t *testing.T) { + ctx := newTestContext(t) + + // Step 1: AddChartCmd with local testdata chart into store A. + storeA := newTestStore(t) + rso := defaultRootOpts(storeA.Root) + ro := defaultCliOpts() + + chartOpts := newAddChartOpts(chartTestdataDir, "") + if err := AddChartCmd(ctx, chartOpts, storeA, "rancher-cluster-templates-0.5.2.tgz", rso, ro); err != nil { + t.Fatalf("AddChartCmd: %v", err) + } + assertArtifactInStore(t, storeA, "rancher-cluster-templates") + + // Flush index.json for SaveCmd. + if err := storeA.SaveIndex(); err != nil { + t.Fatalf("SaveIndex: %v", err) + } + + // Step 2: SaveCmd -> archive. + archivePath := filepath.Join(t.TempDir(), "lifecycle-chart.tar.zst") + saveOpts := newSaveOpts(storeA.Root, archivePath) + if err := SaveCmd(ctx, saveOpts, defaultRootOpts(storeA.Root), defaultCliOpts()); err != nil { + t.Fatalf("SaveCmd: %v", err) + } + + // Step 3: LoadCmd -> new store. + storeBDir := t.TempDir() + loadOpts := &flags.LoadOpts{ + StoreRootOpts: defaultRootOpts(storeBDir), + FileName: []string{archivePath}, + } + if err := LoadCmd(ctx, loadOpts, defaultRootOpts(storeBDir), defaultCliOpts()); err != nil { + t.Fatalf("LoadCmd: %v", err) + } + + storeB, err := store.NewLayout(storeBDir) + if err != nil { + t.Fatalf("store.NewLayout(storeB): %v", err) + } + assertArtifactInStore(t, storeB, "rancher-cluster-templates") + + // Step 4: ExtractCmd -> .tgz in destDir. + destDir := t.TempDir() + extractOpts := &flags.ExtractOpts{ + StoreRootOpts: defaultRootOpts(storeB.Root), + DestinationDir: destDir, + } + if err := ExtractCmd(ctx, extractOpts, storeB, "hauler/rancher-cluster-templates:0.5.2"); err != nil { + t.Fatalf("ExtractCmd: %v", err) + } + + entries, err := os.ReadDir(destDir) + if err != nil { + t.Fatalf("ReadDir: %v", err) + } + + found := false + for _, e := range entries { + if strings.HasSuffix(e.Name(), ".tgz") || strings.HasSuffix(e.Name(), ".tar.gz") { + found = true + break + } + } + if !found { + names := make([]string, len(entries)) + for i, e := range entries { + names[i] = e.Name() + } + t.Errorf("expected a .tgz or .tar.gz in destDir, got: %v", names) + } +} + +// TestLifecycle_Remove_ThenSave verifies that removing one artifact from a store +// with two file artifacts, then saving/loading, results in only the retained +// artifact being present. +func TestLifecycle_Remove_ThenSave(t *testing.T) { + ctx := newTestContext(t) + + // Step 1: Add two file artifacts. + url1 := seedFileInHTTPServer(t, "keep-me.txt", "content to keep") + url2 := seedFileInHTTPServer(t, "remove-me.txt", "content to remove") + + storeA := newTestStore(t) + if err := storeFile(ctx, storeA, v1.File{Path: url1}); err != nil { + t.Fatalf("storeFile keep-me: %v", err) + } + if err := storeFile(ctx, storeA, v1.File{Path: url2}); err != nil { + t.Fatalf("storeFile remove-me: %v", err) + } + + if n := countArtifactsInStore(t, storeA); n != 2 { + t.Fatalf("expected 2 artifacts after adding both files, got %d", n) + } + + // Step 2: RemoveCmd(Force:true) on the "remove-me" artifact. + if err := RemoveCmd(ctx, &flags.RemoveOpts{Force: true}, storeA, "remove-me"); err != nil { + t.Fatalf("RemoveCmd: %v", err) + } + + if n := countArtifactsInStore(t, storeA); n != 1 { + t.Fatalf("expected 1 artifact after removal, got %d", n) + } + assertArtifactInStore(t, storeA, "keep-me.txt") + + // Flush index.json for SaveCmd. RemoveCmd calls OCI.SaveIndex() internally + // (via Layout.Remove), but call it again for safety. + if err := storeA.SaveIndex(); err != nil { + t.Fatalf("SaveIndex: %v", err) + } + + // Step 3: SaveCmd -> archive. + archivePath := filepath.Join(t.TempDir(), "lifecycle-remove.tar.zst") + saveOpts := newSaveOpts(storeA.Root, archivePath) + if err := SaveCmd(ctx, saveOpts, defaultRootOpts(storeA.Root), defaultCliOpts()); err != nil { + t.Fatalf("SaveCmd: %v", err) + } + + // Step 4: LoadCmd -> new store. + storeBDir := t.TempDir() + loadOpts := &flags.LoadOpts{ + StoreRootOpts: defaultRootOpts(storeBDir), + FileName: []string{archivePath}, + } + if err := LoadCmd(ctx, loadOpts, defaultRootOpts(storeBDir), defaultCliOpts()); err != nil { + t.Fatalf("LoadCmd: %v", err) + } + + storeB, err := store.NewLayout(storeBDir) + if err != nil { + t.Fatalf("store.NewLayout(storeB): %v", err) + } + + // Step 5: Assert only the retained artifact is present. + if n := countArtifactsInStore(t, storeB); n != 1 { + t.Errorf("expected 1 artifact in loaded store, got %d", n) + } + assertArtifactInStore(t, storeB, "keep-me.txt") +} diff --git a/cmd/hauler/cli/store/load.go b/cmd/hauler/cli/store/load.go index ebd59de..62d1917 100644 --- a/cmd/hauler/cli/store/load.go +++ b/cmd/hauler/cli/store/load.go @@ -39,8 +39,9 @@ func LoadCmd(ctx context.Context, o *flags.LoadOpts, rso *flags.StoreRootOpts, r l.Debugf("using temporary directory at [%s]", tempDir) for _, fileName := range o.FileName { - l.Infof("loading haul [%s] to [%s]", fileName, o.StoreDir) - err := unarchiveLayoutTo(ctx, fileName, o.StoreDir, tempDir) + resolved := resolveHaulPath(fileName) + l.Infof("loading haul [%s] to [%s]", resolved, o.StoreDir) + err := unarchiveLayoutTo(ctx, resolved, o.StoreDir, tempDir) if err != nil { return err } @@ -85,6 +86,13 @@ func unarchiveLayoutTo(ctx context.Context, haulPath string, dest string, tempDi } } + // reassemble chunk files if haulPath matches the chunk naming pattern + joined, err := archives.JoinChunks(ctx, haulPath, tempDir) + if err != nil { + return err + } + haulPath = joined + if err := archives.Unarchive(ctx, haulPath, tempDir); err != nil { return err } @@ -107,6 +115,9 @@ func unarchiveLayoutTo(ctx context.Context, haulPath string, dest string, tempDi if _, exists := idx.Manifests[i].Annotations[consts.KindAnnotationName]; !exists { idx.Manifests[i].Annotations[consts.KindAnnotationName] = consts.KindAnnotationImage } + // Translate legacy dev.cosignproject.cosign values to dev.hauler equivalents. + kind := idx.Manifests[i].Annotations[consts.KindAnnotationName] + idx.Manifests[i].Annotations[consts.KindAnnotationName] = consts.NormalizeLegacyKind(kind) if ref, ok := idx.Manifests[i].Annotations[consts.ContainerdImageNameKey]; ok { if slash := strings.Index(ref, "/"); slash != -1 { ref = ref[slash+1:] @@ -139,6 +150,29 @@ func unarchiveLayoutTo(ctx context.Context, haulPath string, dest string, tempDi return err } +// resolveHaulPath returns path as-is if it exists or is a URL. If the file is +// not found, it globs for chunk files matching _* in the same +// directory and returns the first match so JoinChunks can reassemble them. +func resolveHaulPath(path string) string { + if strings.HasPrefix(path, "http://") || strings.HasPrefix(path, "https://") { + return path + } + if _, err := os.Stat(path); err == nil { + return path + } + base := path + ext := "" + for filepath.Ext(base) != "" { + ext = filepath.Ext(base) + ext + base = strings.TrimSuffix(base, filepath.Ext(base)) + } + matches, err := filepath.Glob(base + "_*" + ext) + if err != nil || len(matches) == 0 { + return path + } + return matches[0] +} + func clearDir(path string) error { entries, err := os.ReadDir(path) if err != nil { diff --git a/cmd/hauler/cli/store/load_test.go b/cmd/hauler/cli/store/load_test.go new file mode 100644 index 0000000..15d2804 --- /dev/null +++ b/cmd/hauler/cli/store/load_test.go @@ -0,0 +1,416 @@ +package store + +// load_test.go covers unarchiveLayoutTo, LoadCmd, and clearDir. +// +// Do NOT call t.Parallel() on tests that invoke createRootLevelArchive — +// that helper uses the mholt/archives library directly to avoid os.Chdir, +// so it is safe for concurrent use, but the tests themselves exercise +// unarchiveLayoutTo which is already sequential. + +import ( + "context" + "encoding/json" + "net/http" + "net/http/httptest" + "os" + "path/filepath" + "strings" + "testing" + + mholtarchives "github.com/mholt/archives" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + + "hauler.dev/go/hauler/internal/flags" + "hauler.dev/go/hauler/pkg/archives" + "hauler.dev/go/hauler/pkg/consts" + "hauler.dev/go/hauler/pkg/store" +) + +// testHaulArchive is the relative path from cmd/hauler/cli/store/ to the +// bundled test haul archive produced by the project's own CI/test setup. +const testHaulArchive = "../../../../testdata/haul.tar.zst" + +// createRootLevelArchive creates a tar.zst archive from dir with files placed +// at the archive root (no directory prefix). This matches the layout produced +// by SaveCmd, which uses os.Chdir + Archive(".", ...) to achieve the same +// effect. Using mholt/archives directly avoids the os.Chdir side-effect. +func createRootLevelArchive(dir, outfile string) error { + // A trailing path separator tells mholt/archives to enumerate the + // directory's *contents* only — files land at archive root with no prefix. + // Without the trailing slash, an empty value uses filepath.Base(dir) as + // the archive subdirectory name instead of placing files at root. + files, err := mholtarchives.FilesFromDisk(context.Background(), nil, map[string]string{ + dir + string(filepath.Separator): "", + }) + if err != nil { + return err + } + + f, err := os.Create(outfile) + if err != nil { + return err + } + defer f.Close() + + format := mholtarchives.CompressedArchive{ + Compression: mholtarchives.Zstd{}, + Archival: mholtarchives.Tar{}, + } + return format.Archive(context.Background(), f, files) +} + +// -------------------------------------------------------------------------- +// TestUnarchiveLayoutTo +// -------------------------------------------------------------------------- + +// TestUnarchiveLayoutTo verifies that unarchiveLayoutTo correctly extracts a +// haul archive into a destination OCI layout, backfills missing annotations, +// and propagates the ContainerdImageNameKey → ImageRefKey mapping. +func TestUnarchiveLayoutTo(t *testing.T) { + ctx := newTestContext(t) + destDir := t.TempDir() + tempDir := t.TempDir() + + if err := unarchiveLayoutTo(ctx, testHaulArchive, destDir, tempDir); err != nil { + t.Fatalf("unarchiveLayoutTo: %v", err) + } + + s, err := store.NewLayout(destDir) + if err != nil { + t.Fatalf("store.NewLayout(destDir): %v", err) + } + + if count := countArtifactsInStore(t, s); count == 0 { + t.Fatal("expected at least one descriptor in dest store after unarchiveLayoutTo") + } + + // Every top-level descriptor must carry KindAnnotationName. + // Descriptors that were loaded with ContainerdImageNameKey must also have + // ImageRefKey set (the backfill logic in unarchiveLayoutTo ensures this). + if err := s.OCI.Walk(func(_ string, desc ocispec.Descriptor) error { + if desc.Annotations[consts.KindAnnotationName] == "" { + t.Errorf("descriptor %s missing KindAnnotationName", desc.Digest) + } + if _, hasContainerd := desc.Annotations[consts.ContainerdImageNameKey]; hasContainerd { + if desc.Annotations[consts.ImageRefKey] == "" { + t.Errorf("descriptor %s has %s but missing %s", + desc.Digest, consts.ContainerdImageNameKey, consts.ImageRefKey) + } + } + return nil + }); err != nil { + t.Fatalf("Walk: %v", err) + } +} + +// -------------------------------------------------------------------------- +// TestLoadCmd_LocalFile +// -------------------------------------------------------------------------- + +// TestLoadCmd_LocalFile verifies that LoadCmd loads one or more local haul +// archives into the destination store. +func TestLoadCmd_LocalFile(t *testing.T) { + ctx := newTestContext(t) + + t.Run("single archive", func(t *testing.T) { + destDir := t.TempDir() + o := &flags.LoadOpts{ + StoreRootOpts: defaultRootOpts(destDir), + FileName: []string{testHaulArchive}, + } + if err := LoadCmd(ctx, o, defaultRootOpts(destDir), defaultCliOpts()); err != nil { + t.Fatalf("LoadCmd: %v", err) + } + s, err := store.NewLayout(destDir) + if err != nil { + t.Fatalf("store.NewLayout: %v", err) + } + if countArtifactsInStore(t, s) == 0 { + t.Error("expected artifacts in store after LoadCmd") + } + }) + + t.Run("two archives", func(t *testing.T) { + // Loading the same archive twice must be idempotent: duplicate blobs are + // silently discarded by the OCI pusher. The descriptor count after two + // loads must equal the count after a single load. + singleDir := t.TempDir() + singleOpts := &flags.LoadOpts{ + StoreRootOpts: defaultRootOpts(singleDir), + FileName: []string{testHaulArchive}, + } + if err := LoadCmd(ctx, singleOpts, defaultRootOpts(singleDir), defaultCliOpts()); err != nil { + t.Fatalf("LoadCmd single: %v", err) + } + singleStore, err := store.NewLayout(singleDir) + if err != nil { + t.Fatalf("store.NewLayout single: %v", err) + } + singleCount := countArtifactsInStore(t, singleStore) + + doubleDir := t.TempDir() + doubleOpts := &flags.LoadOpts{ + StoreRootOpts: defaultRootOpts(doubleDir), + FileName: []string{testHaulArchive, testHaulArchive}, + } + if err := LoadCmd(ctx, doubleOpts, defaultRootOpts(doubleDir), defaultCliOpts()); err != nil { + t.Fatalf("LoadCmd double: %v", err) + } + doubleStore, err := store.NewLayout(doubleDir) + if err != nil { + t.Fatalf("store.NewLayout double: %v", err) + } + doubleCount := countArtifactsInStore(t, doubleStore) + + if doubleCount != singleCount { + t.Errorf("loading the same archive twice: got %d descriptors, want %d (same as single load)", + doubleCount, singleCount) + } + }) +} + +// -------------------------------------------------------------------------- +// TestLoadCmd_RemoteArchive +// -------------------------------------------------------------------------- + +// TestLoadCmd_RemoteArchive verifies that LoadCmd can fetch and load a haul +// archive served over HTTP. +func TestLoadCmd_RemoteArchive(t *testing.T) { + ctx := newTestContext(t) + + archiveData, err := os.ReadFile(testHaulArchive) + if err != nil { + t.Fatalf("read test archive: %v", err) + } + + srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/octet-stream") + w.Write(archiveData) //nolint:errcheck + })) + t.Cleanup(srv.Close) + + destDir := t.TempDir() + remoteURL := srv.URL + "/haul.tar.zst" + + o := &flags.LoadOpts{ + StoreRootOpts: defaultRootOpts(destDir), + FileName: []string{remoteURL}, + } + + if err := LoadCmd(ctx, o, defaultRootOpts(destDir), defaultCliOpts()); err != nil { + t.Fatalf("LoadCmd remote: %v", err) + } + + s, err := store.NewLayout(destDir) + if err != nil { + t.Fatalf("store.NewLayout: %v", err) + } + if countArtifactsInStore(t, s) == 0 { + t.Error("expected artifacts in store after remote LoadCmd") + } +} + +// -------------------------------------------------------------------------- +// TestUnarchiveLayoutTo_AnnotationBackfill +// -------------------------------------------------------------------------- + +// TestUnarchiveLayoutTo_AnnotationBackfill crafts a haul archive whose +// index.json entries are missing KindAnnotationName, then verifies that +// unarchiveLayoutTo backfills every entry with KindAnnotationImage. +func TestUnarchiveLayoutTo_AnnotationBackfill(t *testing.T) { + ctx := newTestContext(t) + + // Step 1: Extract the real test archive to obtain a valid OCI layout on disk. + extractDir := t.TempDir() + if err := archives.Unarchive(ctx, testHaulArchive, extractDir); err != nil { + t.Fatalf("Unarchive: %v", err) + } + + // Step 2: Read index.json and strip KindAnnotationName from every descriptor. + indexPath := filepath.Join(extractDir, "index.json") + data, err := os.ReadFile(indexPath) + if err != nil { + t.Fatalf("read index.json: %v", err) + } + + var idx ocispec.Index + if err := json.Unmarshal(data, &idx); err != nil { + t.Fatalf("unmarshal index.json: %v", err) + } + if len(idx.Manifests) == 0 { + t.Skip("testdata/haul.tar.zst has no top-level manifests — cannot test backfill") + } + for i := range idx.Manifests { + delete(idx.Manifests[i].Annotations, consts.KindAnnotationName) + } + + out, err := json.MarshalIndent(idx, "", " ") + if err != nil { + t.Fatalf("marshal stripped index.json: %v", err) + } + if err := os.WriteFile(indexPath, out, 0644); err != nil { + t.Fatalf("write stripped index.json: %v", err) + } + + // Step 3: Re-archive with files at the archive root (no subdir prefix) so + // the layout matches what unarchiveLayoutTo expects after extraction. + strippedArchive := filepath.Join(t.TempDir(), "stripped.tar.zst") + if err := createRootLevelArchive(extractDir, strippedArchive); err != nil { + t.Fatalf("createRootLevelArchive: %v", err) + } + + // Step 4: Load the stripped archive. + destDir := t.TempDir() + tempDir := t.TempDir() + if err := unarchiveLayoutTo(ctx, strippedArchive, destDir, tempDir); err != nil { + t.Fatalf("unarchiveLayoutTo stripped: %v", err) + } + + // Step 5: Every descriptor in the dest store must now have + // KindAnnotationName set to KindAnnotationImage (the backfill default). + s, err := store.NewLayout(destDir) + if err != nil { + t.Fatalf("store.NewLayout(destDir): %v", err) + } + + if err := s.OCI.Walk(func(_ string, desc ocispec.Descriptor) error { + kind := desc.Annotations[consts.KindAnnotationName] + if kind == "" { + t.Errorf("descriptor %s missing KindAnnotationName after backfill", desc.Digest) + } else if kind != consts.KindAnnotationImage { + t.Errorf("descriptor %s: expected backfilled kind=%q, got %q", + desc.Digest, consts.KindAnnotationImage, kind) + } + return nil + }); err != nil { + t.Fatalf("Walk: %v", err) + } +} + +// -------------------------------------------------------------------------- +// TestUnarchiveLayoutTo_LegacyKindMigration +// -------------------------------------------------------------------------- + +// TestUnarchiveLayoutTo_LegacyKindMigration crafts a haul archive whose +// index.json contains old dev.cosignproject.cosign kind values, then verifies +// that unarchiveLayoutTo translates them to dev.hauler equivalents. +func TestUnarchiveLayoutTo_LegacyKindMigration(t *testing.T) { + ctx := newTestContext(t) + + // Step 1: Extract the real test archive to obtain a valid OCI layout on disk. + extractDir := t.TempDir() + if err := archives.Unarchive(ctx, testHaulArchive, extractDir); err != nil { + t.Fatalf("Unarchive: %v", err) + } + + // Step 2: Read index.json and inject old dev.cosignproject.cosign kind values. + indexPath := filepath.Join(extractDir, "index.json") + data, err := os.ReadFile(indexPath) + if err != nil { + t.Fatalf("read index.json: %v", err) + } + + var idx ocispec.Index + if err := json.Unmarshal(data, &idx); err != nil { + t.Fatalf("unmarshal index.json: %v", err) + } + if len(idx.Manifests) == 0 { + t.Skip("testdata/haul.tar.zst has no top-level manifests — cannot test legacy kind migration") + } + + // Replace all kind annotations with old-prefix equivalents so we can verify + // that unarchiveLayoutTo normalizes them to the new dev.hauler prefix. + const legacyPrefix = "dev.cosignproject.cosign" + const newPrefix = "dev.hauler" + for i := range idx.Manifests { + if idx.Manifests[i].Annotations == nil { + idx.Manifests[i].Annotations = make(map[string]string) + } + kind := idx.Manifests[i].Annotations[consts.KindAnnotationName] + if kind == "" { + kind = consts.KindAnnotationImage + } + // Rewrite dev.hauler/* → dev.cosignproject.cosign/* to simulate legacy archive. + if strings.HasPrefix(kind, newPrefix) { + kind = legacyPrefix + kind[len(newPrefix):] + } + idx.Manifests[i].Annotations[consts.KindAnnotationName] = kind + } + + out, err := json.MarshalIndent(idx, "", " ") + if err != nil { + t.Fatalf("marshal legacy index.json: %v", err) + } + if err := os.WriteFile(indexPath, out, 0644); err != nil { + t.Fatalf("write legacy index.json: %v", err) + } + + // Step 3: Re-archive with files at the archive root (no subdir prefix). + legacyArchive := filepath.Join(t.TempDir(), "legacy.tar.zst") + if err := createRootLevelArchive(extractDir, legacyArchive); err != nil { + t.Fatalf("createRootLevelArchive: %v", err) + } + + // Step 4: Load the legacy archive. + destDir := t.TempDir() + tempDir := t.TempDir() + if err := unarchiveLayoutTo(ctx, legacyArchive, destDir, tempDir); err != nil { + t.Fatalf("unarchiveLayoutTo legacy: %v", err) + } + + // Step 5: Every descriptor in the dest store must now have a dev.hauler kind. + s, err := store.NewLayout(destDir) + if err != nil { + t.Fatalf("store.NewLayout(destDir): %v", err) + } + + if err := s.OCI.Walk(func(_ string, desc ocispec.Descriptor) error { + kind := desc.Annotations[consts.KindAnnotationName] + if strings.HasPrefix(kind, legacyPrefix) { + t.Errorf("descriptor %s still has legacy kind %q; expected dev.hauler prefix", + desc.Digest, kind) + } + if !strings.HasPrefix(kind, newPrefix) { + t.Errorf("descriptor %s has unexpected kind %q; expected dev.hauler prefix", + desc.Digest, kind) + } + return nil + }); err != nil { + t.Fatalf("Walk: %v", err) + } +} + +// -------------------------------------------------------------------------- +// TestClearDir +// -------------------------------------------------------------------------- + +// TestClearDir verifies that clearDir removes all entries from a directory +// without removing the directory itself. +func TestClearDir(t *testing.T) { + dir := t.TempDir() + + for _, name := range []string{"a.txt", "b.txt"} { + if err := os.WriteFile(filepath.Join(dir, name), []byte(name), 0644); err != nil { + t.Fatalf("write %s: %v", name, err) + } + } + if err := os.Mkdir(filepath.Join(dir, "subdir"), 0755); err != nil { + t.Fatalf("mkdir subdir: %v", err) + } + + if err := clearDir(dir); err != nil { + t.Fatalf("clearDir: %v", err) + } + + entries, err := os.ReadDir(dir) + if err != nil { + t.Fatalf("ReadDir after clearDir: %v", err) + } + if len(entries) != 0 { + names := make([]string, len(entries)) + for i, e := range entries { + names[i] = e.Name() + } + t.Errorf("clearDir: expected empty dir, found: %s", strings.Join(names, ", ")) + } +} diff --git a/cmd/hauler/cli/store/remove_test.go b/cmd/hauler/cli/store/remove_test.go new file mode 100644 index 0000000..a013e3d --- /dev/null +++ b/cmd/hauler/cli/store/remove_test.go @@ -0,0 +1,155 @@ +package store + +import ( + "strings" + "testing" + + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + + "hauler.dev/go/hauler/internal/flags" + v1 "hauler.dev/go/hauler/pkg/apis/hauler.cattle.io/v1" +) + +// -------------------------------------------------------------------------- +// Unit tests — formatReference +// -------------------------------------------------------------------------- + +func TestFormatReference(t *testing.T) { + tests := []struct { + name string + ref string + want string + }{ + { + name: "empty string returns empty", + ref: "", + want: "", + }, + { + name: "no colon returns unchanged", + ref: "nocolon", + want: "nocolon", + }, + { + name: "tag without dash returns unchanged", + ref: "rancher/rancher:v2.8.5", + want: "rancher/rancher:v2.8.5", + }, + { + name: "cosign sig tag splits at first dash after last colon", + ref: "repo:sha256-abc123.sig", + want: "repo:sha256 [abc123.sig]", + }, + { + name: "cosign att tag format", + ref: "myrepo:sha256-deadbeef.att", + want: "myrepo:sha256 [deadbeef.att]", + }, + { + name: "cosign sbom tag format", + ref: "myrepo:sha256-deadbeef.sbom", + want: "myrepo:sha256 [deadbeef.sbom]", + }, + { + name: "tag is only a dash returns unchanged (empty suffix)", + ref: "repo:-", + want: "repo:-", + }, + { + name: "multiple colons uses last one", + ref: "host:5000/repo:sha256-abc.sig", + want: "host:5000/repo:sha256 [abc.sig]", + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + got := formatReference(tc.ref) + if got != tc.want { + t.Errorf("formatReference(%q) = %q, want %q", tc.ref, got, tc.want) + } + }) + } +} + +// -------------------------------------------------------------------------- +// Integration tests — RemoveCmd +// -------------------------------------------------------------------------- + +func TestRemoveCmd_Force(t *testing.T) { + ctx := newTestContext(t) + s := newTestStore(t) + + url := seedFileInHTTPServer(t, "removeme.txt", "file-to-remove") + if err := storeFile(ctx, s, v1.File{Path: url}); err != nil { + t.Fatalf("storeFile: %v", err) + } + + if n := countArtifactsInStore(t, s); n == 0 { + t.Fatal("expected at least 1 artifact after storeFile, got 0") + } + + // Confirm the artifact ref contains "removeme". + var ref string + if err := s.Walk(func(reference string, _ ocispec.Descriptor) error { + if strings.Contains(reference, "removeme") { + ref = reference + } + return nil + }); err != nil { + t.Fatalf("walk to find ref: %v", err) + } + if ref == "" { + t.Fatal("could not find stored artifact reference containing 'removeme'") + } + + if err := RemoveCmd(ctx, &flags.RemoveOpts{Force: true}, s, "removeme"); err != nil { + t.Fatalf("RemoveCmd: %v", err) + } + + if n := countArtifactsInStore(t, s); n != 0 { + t.Errorf("expected 0 artifacts after removal, got %d", n) + } +} + +func TestRemoveCmd_NotFound(t *testing.T) { + ctx := newTestContext(t) + s := newTestStore(t) + + err := RemoveCmd(ctx, &flags.RemoveOpts{Force: true}, s, "nonexistent-ref") + if err == nil { + t.Fatal("expected error for non-existent ref, got nil") + } + if !strings.Contains(err.Error(), "not found") { + t.Errorf("expected error containing 'not found', got: %v", err) + } +} + +func TestRemoveCmd_Force_MultipleMatches(t *testing.T) { + ctx := newTestContext(t) + s := newTestStore(t) + + // Seed two file artifacts whose names share the substring "testfile". + url1 := seedFileInHTTPServer(t, "testfile-alpha.txt", "content-alpha") + url2 := seedFileInHTTPServer(t, "testfile-beta.txt", "content-beta") + + if err := storeFile(ctx, s, v1.File{Path: url1}); err != nil { + t.Fatalf("storeFile alpha: %v", err) + } + if err := storeFile(ctx, s, v1.File{Path: url2}); err != nil { + t.Fatalf("storeFile beta: %v", err) + } + + if n := countArtifactsInStore(t, s); n < 2 { + t.Fatalf("expected at least 2 artifacts, got %d", n) + } + + // Remove using a substring that matches both. + if err := RemoveCmd(ctx, &flags.RemoveOpts{Force: true}, s, "testfile"); err != nil { + t.Fatalf("RemoveCmd: %v", err) + } + + if n := countArtifactsInStore(t, s); n != 0 { + t.Errorf("expected 0 artifacts after removal of both, got %d", n) + } +} diff --git a/cmd/hauler/cli/store/save.go b/cmd/hauler/cli/store/save.go index 68150f8..9db2f39 100644 --- a/cmd/hauler/cli/store/save.go +++ b/cmd/hauler/cli/store/save.go @@ -4,10 +4,13 @@ import ( "bytes" "context" "encoding/json" + "fmt" "os" "path" "path/filepath" "slices" + "strconv" + "strings" referencev3 "github.com/distribution/distribution/v3/reference" "github.com/google/go-containerregistry/pkg/name" @@ -72,10 +75,64 @@ func SaveCmd(ctx context.Context, o *flags.SaveOpts, rso *flags.StoreRootOpts, r return err } - l.Infof("saving store [%s] to archive [%s]", o.StoreDir, o.FileName) + if o.ChunkSize != "" { + if o.ContainerdCompatibility == true { + l.Warnf("compatibility warning... stores split by chunk size must be imported using `hauler store load` to rejoin before import to containerd") + } + maxBytes, err := parseChunkSize(o.ChunkSize) + if err != nil { + return err + } + chunks, err := archives.SplitArchive(ctx, absOutputfile, maxBytes) + if err != nil { + return err + } + for _, c := range chunks { + l.Infof("saving store [%s] to chunk [%s]", o.StoreDir, filepath.Base(c)) + } + } else { + l.Infof("saving store [%s] to archive [%s]", o.StoreDir, o.FileName) + } + return nil } +// parseChunkSize parses a human-readable byte size string (e.g. "1G", "500M", "2GB") +// into a byte count. Suffixes are treated as binary units (1K = 1024). +func parseChunkSize(s string) (int64, error) { + units := map[string]int64{ + "K": 1 << 10, "KB": 1 << 10, + "M": 1 << 20, "MB": 1 << 20, + "G": 1 << 30, "GB": 1 << 30, + "T": 1 << 40, "TB": 1 << 40, + } + s = strings.ToUpper(strings.TrimSpace(s)) + var result int64 + matched := false + for suffix, mult := range units { + if strings.HasSuffix(s, suffix) { + n, err := strconv.ParseInt(strings.TrimSuffix(s, suffix), 10, 64) + if err != nil { + return 0, fmt.Errorf("invalid chunk size %q", s) + } + result = n * mult + matched = true + break + } + } + if !matched { + n, err := strconv.ParseInt(s, 10, 64) + if err != nil { + return 0, fmt.Errorf("invalid chunk size %q: %w", s, err) + } + result = n + } + if result <= 0 { + return 0, fmt.Errorf("chunk size must be greater than zero, received %q", s) + } + return result, nil +} + type exports struct { digests []string records map[string]tarball.Descriptor @@ -116,59 +173,68 @@ func writeExportsManifest(ctx context.Context, dir string, platformStr string) e l.Debugf("descriptor [%s] <<< SKIPPING ARTIFACT [%q]", desc.Digest.String(), desc.ArtifactType) continue } - if desc.Annotations != nil { - // we only care about images that cosign has added to the layout index - if kind, hasKind := desc.Annotations[consts.KindAnnotationName]; hasKind { - if refName, hasRefName := desc.Annotations["io.containerd.image.name"]; hasRefName { - // branch on image (aka image manifest) or image index - switch kind { - case consts.KindAnnotationImage: - if err := x.record(ctx, idx, desc, refName); err != nil { - return err - } - case consts.KindAnnotationIndex: - l.Debugf("index [%s]: digest=[%s]... type=[%s]... size=[%d]", refName, desc.Digest.String(), desc.MediaType, desc.Size) + // The kind annotation is the only reliable way to distinguish container images from + // cosign signatures/attestations/SBOMs: those are stored as standard Docker/OCI + // manifests (same media type as real images) so media type alone is insufficient. + kind := desc.Annotations[consts.KindAnnotationName] + if kind != consts.KindAnnotationImage && kind != consts.KindAnnotationIndex { + l.Debugf("descriptor [%s] <<< SKIPPING KIND [%q]", desc.Digest.String(), kind) + continue + } - // when no platform is inputted... warn the user of potential mismatch on import for docker - // required for docker to be able to interpret and load the image correctly - if platform.String() == "" { - l.Warnf("compatibility warning... docker... specify platform to prevent potential mismatch on import of index [%s]", refName) - } + refName, hasRefName := desc.Annotations[consts.ContainerdImageNameKey] + if !hasRefName { + l.Debugf("descriptor [%s] <<< SKIPPING (no containerd image name)", desc.Digest.String()) + continue + } - iix, err := idx.ImageIndex(desc.Digest) - if err != nil { - return err - } - ixm, err := iix.IndexManifest() - if err != nil { - return err - } - for _, ixd := range ixm.Manifests { - if ixd.MediaType.IsImage() { - if platform.String() != "" { - if ixd.Platform.Architecture != platform.Architecture || ixd.Platform.OS != platform.OS { - l.Debugf("index [%s]: digest=[%s], platform=[%s/%s]: does not match the supplied platform... skipping...", refName, desc.Digest.String(), ixd.Platform.OS, ixd.Platform.Architecture) - continue - } - } + // Use the descriptor's actual media type to discriminate single-image manifests + // from multi-arch indexes, rather than relying on the kind string for this. + switch { + case desc.MediaType.IsImage(): + if err := x.record(ctx, idx, desc, refName); err != nil { + return err + } + case desc.MediaType.IsIndex(): + l.Debugf("index [%s]: digest=[%s]... type=[%s]... size=[%d]", refName, desc.Digest.String(), desc.MediaType, desc.Size) - // skip any platforms of 'unknown/unknown'... docker hates - // required for docker to be able to interpret and load the image correctly - if ixd.Platform.Architecture == "unknown" && ixd.Platform.OS == "unknown" { - l.Debugf("index [%s]: digest=[%s], platform=[%s/%s]: matches unknown platform... skipping...", refName, desc.Digest.String(), ixd.Platform.OS, ixd.Platform.Architecture) - continue - } + // when no platform is inputted... warn the user of potential mismatch on import for docker + // required for docker to be able to interpret and load the image correctly + if platform.String() == "" { + l.Warnf("compatibility warning... docker... specify platform to prevent potential mismatch on import of index [%s]", refName) + } - if err := x.record(ctx, iix, ixd, refName); err != nil { - return err - } - } + iix, err := idx.ImageIndex(desc.Digest) + if err != nil { + return err + } + ixm, err := iix.IndexManifest() + if err != nil { + return err + } + for _, ixd := range ixm.Manifests { + if ixd.MediaType.IsImage() { + if platform.String() != "" { + if ixd.Platform.Architecture != platform.Architecture || ixd.Platform.OS != platform.OS { + l.Debugf("index [%s]: digest=[%s], platform=[%s/%s]: does not match the supplied platform... skipping...", refName, desc.Digest.String(), ixd.Platform.OS, ixd.Platform.Architecture) + continue } - default: - l.Debugf("descriptor [%s] <<< SKIPPING KIND [%q]", desc.Digest.String(), kind) + } + + // skip any platforms of 'unknown/unknown'... docker hates + // required for docker to be able to interpret and load the image correctly + if ixd.Platform.Architecture == "unknown" && ixd.Platform.OS == "unknown" { + l.Debugf("index [%s]: digest=[%s], platform=[%s/%s]: matches unknown platform... skipping...", refName, desc.Digest.String(), ixd.Platform.OS, ixd.Platform.Architecture) + continue + } + + if err := x.record(ctx, iix, ixd, refName); err != nil { + return err } } } + default: + l.Debugf("descriptor [%s] <<< SKIPPING media type [%q]", desc.Digest.String(), desc.MediaType) } } @@ -199,6 +265,17 @@ func (x *exports) record(ctx context.Context, index libv1.ImageIndex, desc libv1 return err } + // Verify this is a real container image by inspecting its manifest config media type. + // Non-image OCI artifacts (Helm charts, files, cosign sigs) use distinct config types. + manifest, err := image.Manifest() + if err != nil { + return err + } + if manifest.Config.MediaType != types.DockerConfigJSON && manifest.Config.MediaType != types.OCIConfigJSON { + l.Debugf("descriptor [%s] <<< SKIPPING NON-IMAGE config media type [%q]", desc.Digest.String(), manifest.Config.MediaType) + return nil + } + config, err := image.ConfigName() if err != nil { return err diff --git a/cmd/hauler/cli/store/save_test.go b/cmd/hauler/cli/store/save_test.go new file mode 100644 index 0000000..5902519 --- /dev/null +++ b/cmd/hauler/cli/store/save_test.go @@ -0,0 +1,319 @@ +package store + +// save_test.go covers writeExportsManifest and SaveCmd. +// +// IMPORTANT: SaveCmd calls os.Chdir(storeDir) and defers os.Chdir back. Do +// NOT call t.Parallel() on any SaveCmd test, and always use absolute paths for +// StoreDir and FileName so they remain valid after the chdir. + +import ( + "encoding/json" + "os" + "path/filepath" + "testing" + + "hauler.dev/go/hauler/internal/flags" + v1 "hauler.dev/go/hauler/pkg/apis/hauler.cattle.io/v1" + "hauler.dev/go/hauler/pkg/archives" + "hauler.dev/go/hauler/pkg/consts" +) + +// manifestEntry mirrors tarball.Descriptor for asserting manifest.json contents. +type manifestEntry struct { + Config string `json:"Config"` + RepoTags []string `json:"RepoTags"` + Layers []string `json:"Layers"` +} + +// readManifestJSON reads and unmarshals manifest.json from the given OCI layout dir. +func readManifestJSON(t *testing.T, dir string) []manifestEntry { + t.Helper() + data, err := os.ReadFile(filepath.Join(dir, consts.ImageManifestFile)) + if err != nil { + t.Fatalf("readManifestJSON: %v", err) + } + var entries []manifestEntry + if err := json.Unmarshal(data, &entries); err != nil { + t.Fatalf("readManifestJSON unmarshal: %v", err) + } + return entries +} + +// newSaveOpts builds a SaveOpts pointing at storeDir with an absolute archive path. +func newSaveOpts(storeDir, archivePath string) *flags.SaveOpts { + return &flags.SaveOpts{ + StoreRootOpts: defaultRootOpts(storeDir), + FileName: archivePath, + } +} + +// -------------------------------------------------------------------------- +// writeExportsManifest unit tests +// -------------------------------------------------------------------------- + +func TestWriteExportsManifest(t *testing.T) { + ctx := newTestContext(t) + + t.Run("no platform filter includes all platforms", func(t *testing.T) { + host, rOpts := newLocalhostRegistry(t) + seedIndex(t, host, "test/multiarch", "v1", rOpts...) + + s := newTestStore(t) + if err := s.AddImage(ctx, host+"/test/multiarch:v1", "", false); err != nil { + t.Fatalf("AddImage: %v", err) + } + + if err := writeExportsManifest(ctx, s.Root, ""); err != nil { + t.Fatalf("writeExportsManifest: %v", err) + } + + entries := readManifestJSON(t, s.Root) + if len(entries) < 2 { + t.Errorf("expected >=2 entries (all platforms), got %d", len(entries)) + } + }) + + t.Run("linux/amd64 filter yields single entry", func(t *testing.T) { + host, rOpts := newLocalhostRegistry(t) + seedIndex(t, host, "test/multiarch", "v2", rOpts...) + + s := newTestStore(t) + if err := s.AddImage(ctx, host+"/test/multiarch:v2", "", false); err != nil { + t.Fatalf("AddImage: %v", err) + } + + if err := writeExportsManifest(ctx, s.Root, "linux/amd64"); err != nil { + t.Fatalf("writeExportsManifest: %v", err) + } + + entries := readManifestJSON(t, s.Root) + if len(entries) != 1 { + t.Errorf("expected 1 entry for linux/amd64, got %d", len(entries)) + } + }) + + t.Run("chart artifact excluded via config media type check", func(t *testing.T) { + s := newTestStore(t) + rso := defaultRootOpts(s.Root) + ro := defaultCliOpts() + + o := newAddChartOpts(chartTestdataDir, "") + if err := AddChartCmd(ctx, o, s, "rancher-cluster-templates-0.5.2.tgz", rso, ro); err != nil { + t.Fatalf("AddChartCmd: %v", err) + } + + if err := writeExportsManifest(ctx, s.Root, ""); err != nil { + t.Fatalf("writeExportsManifest: %v", err) + } + + entries := readManifestJSON(t, s.Root) + if len(entries) != 0 { + t.Errorf("expected 0 entries (chart excluded from manifest.json), got %d", len(entries)) + } + }) +} + +func TestWriteExportsManifest_SkipsNonImages(t *testing.T) { + ctx := newTestContext(t) + + url := seedFileInHTTPServer(t, "skip.sh", "#!/bin/sh\necho skip") + s := newTestStore(t) + if err := storeFile(ctx, s, v1.File{Path: url}); err != nil { + t.Fatalf("storeFile: %v", err) + } + + if err := writeExportsManifest(ctx, s.Root, ""); err != nil { + t.Fatalf("writeExportsManifest: %v", err) + } + + entries := readManifestJSON(t, s.Root) + if len(entries) != 0 { + t.Errorf("expected 0 entries for file-only store, got %d", len(entries)) + } +} + +// -------------------------------------------------------------------------- +// SaveCmd integration tests +// Do NOT use t.Parallel() — SaveCmd calls os.Chdir. +// -------------------------------------------------------------------------- + +func TestSaveCmd(t *testing.T) { + ctx := newTestContext(t) + host, _ := newLocalhostRegistry(t) + seedImage(t, host, "test/save", "v1") + + s := newTestStore(t) + if err := s.AddImage(ctx, host+"/test/save:v1", "", false); err != nil { + t.Fatalf("AddImage: %v", err) + } + + // FileName must be absolute so it remains valid after SaveCmd's os.Chdir. + archivePath := filepath.Join(t.TempDir(), "haul.tar.zst") + o := newSaveOpts(s.Root, archivePath) + + if err := SaveCmd(ctx, o, defaultRootOpts(s.Root), defaultCliOpts()); err != nil { + t.Fatalf("SaveCmd: %v", err) + } + + fi, err := os.Stat(archivePath) + if err != nil { + t.Fatalf("archive stat: %v", err) + } + if fi.Size() == 0 { + t.Fatal("archive is empty") + } + + // Validate it is a well-formed zst archive by unarchiving it. + destDir := t.TempDir() + if err := archives.Unarchive(ctx, archivePath, destDir); err != nil { + t.Fatalf("Unarchive: %v", err) + } +} + +func TestSaveCmd_ContainerdCompatibility(t *testing.T) { + ctx := newTestContext(t) + host, _ := newLocalhostRegistry(t) + seedImage(t, host, "test/containerd-compat", "v1") + + s := newTestStore(t) + if err := s.AddImage(ctx, host+"/test/containerd-compat:v1", "", false); err != nil { + t.Fatalf("AddImage: %v", err) + } + + archivePath := filepath.Join(t.TempDir(), "haul-compat.tar.zst") + o := newSaveOpts(s.Root, archivePath) + o.ContainerdCompatibility = true + + if err := SaveCmd(ctx, o, defaultRootOpts(s.Root), defaultCliOpts()); err != nil { + t.Fatalf("SaveCmd ContainerdCompatibility: %v", err) + } + + destDir := t.TempDir() + if err := archives.Unarchive(ctx, archivePath, destDir); err != nil { + t.Fatalf("Unarchive: %v", err) + } + + // oci-layout must be absent from the extracted archive. + ociLayoutPath := filepath.Join(destDir, "oci-layout") + if _, err := os.Stat(ociLayoutPath); !os.IsNotExist(err) { + t.Errorf("expected oci-layout to be absent in containerd-compatible archive, got: %v", err) + } +} + +func TestSaveCmd_EmptyStore(t *testing.T) { + ctx := newTestContext(t) + s := newTestStore(t) + + // SaveCmd uses layout.FromPath which stats index.json — it must exist on + // disk. A fresh store holds the index only in memory; SaveIndex flushes it. + if err := s.SaveIndex(); err != nil { + t.Fatalf("SaveIndex: %v", err) + } + + archivePath := filepath.Join(t.TempDir(), "haul-empty.tar.zst") + o := newSaveOpts(s.Root, archivePath) + + if err := SaveCmd(ctx, o, defaultRootOpts(s.Root), defaultCliOpts()); err != nil { + t.Fatalf("SaveCmd empty store: %v", err) + } + + if _, err := os.Stat(archivePath); err != nil { + t.Fatalf("archive not created for empty store: %v", err) + } +} + +// -------------------------------------------------------------------------- +// parseChunkSize unit tests +// -------------------------------------------------------------------------- + +func TestParseChunkSize(t *testing.T) { + tests := []struct { + name string + input string + want int64 + wantErr bool + }{ + {name: "kilobytes", input: "1K", want: 1 << 10}, + {name: "kilobytes long", input: "1KB", want: 1 << 10}, + {name: "megabytes", input: "500M", want: 500 << 20}, + {name: "megabytes long", input: "500MB", want: 500 << 20}, + {name: "gigabytes", input: "2G", want: 2 << 30}, + {name: "gigabytes long", input: "2GB", want: 2 << 30}, + {name: "terabytes", input: "1T", want: 1 << 40}, + {name: "terabytes long", input: "1TB", want: 1 << 40}, + {name: "plain bytes", input: "1024", want: 1024}, + {name: "lowercase", input: "1g", want: 1 << 30}, + {name: "whitespace trimmed", input: " 1G ", want: 1 << 30}, + {name: "zero is invalid", input: "0", wantErr: true}, + {name: "zero with suffix", input: "0M", wantErr: true}, + {name: "negative bytes", input: "-1", wantErr: true}, + {name: "negative with suffix", input: "-1G", wantErr: true}, + {name: "empty string", input: "", wantErr: true}, + {name: "invalid suffix", input: "1X", wantErr: true}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := parseChunkSize(tt.input) + if (err != nil) != tt.wantErr { + t.Fatalf("parseChunkSize(%q) error = %v, wantErr %v", tt.input, err, tt.wantErr) + } + if !tt.wantErr && got != tt.want { + t.Errorf("parseChunkSize(%q) = %d, want %d", tt.input, got, tt.want) + } + }) + } +} + +// -------------------------------------------------------------------------- +// SaveCmd chunk-size integration tests +// Do NOT use t.Parallel() — SaveCmd calls os.Chdir. +// -------------------------------------------------------------------------- + +func TestSaveCmd_ChunkSize(t *testing.T) { + ctx := newTestContext(t) + host, _ := newLocalhostRegistry(t) + seedImage(t, host, "test/chunksave", "v1") + + s := newTestStore(t) + if err := s.AddImage(ctx, host+"/test/chunksave:v1", "", false); err != nil { + t.Fatalf("AddImage: %v", err) + } + + archiveDir := t.TempDir() + archivePath := filepath.Join(archiveDir, "haul-chunked.tar.zst") + o := newSaveOpts(s.Root, archivePath) + o.ChunkSize = "1K" + + if err := SaveCmd(ctx, o, defaultRootOpts(s.Root), defaultCliOpts()); err != nil { + t.Fatalf("SaveCmd with chunk-size: %v", err) + } + + // original archive must be replaced by chunk files + if _, err := os.Stat(archivePath); !os.IsNotExist(err) { + t.Error("original archive should be removed after chunking") + } + + // at least one chunk must exist + matches, err := filepath.Glob(filepath.Join(archiveDir, "haul-chunked_*.tar.zst")) + if err != nil { + t.Fatalf("glob chunks: %v", err) + } + if len(matches) == 0 { + t.Fatal("expected at least one chunk file, found none") + } +} + +func TestSaveCmd_ChunkSize_Invalid(t *testing.T) { + ctx := newTestContext(t) + s := newTestStore(t) + if err := s.SaveIndex(); err != nil { + t.Fatalf("SaveIndex: %v", err) + } + + o := newSaveOpts(s.Root, filepath.Join(t.TempDir(), "haul.tar.zst")) + o.ChunkSize = "0" + + if err := SaveCmd(ctx, o, defaultRootOpts(s.Root), defaultCliOpts()); err == nil { + t.Fatal("SaveCmd: expected error for chunk-size=0, got nil") + } +} diff --git a/cmd/hauler/cli/store/serve.go b/cmd/hauler/cli/store/serve.go index 806632b..b4cb168 100644 --- a/cmd/hauler/cli/store/serve.go +++ b/cmd/hauler/cli/store/serve.go @@ -51,6 +51,7 @@ func loadConfig(filename string) (*configuration.Configuration, error) { if err != nil { return nil, err } + defer f.Close() return configuration.Parse(f) } @@ -96,7 +97,7 @@ func ServeRegistryCmd(ctx context.Context, o *flags.ServeRegistryOpts, s *store. return err } - opts := &flags.CopyOpts{} + opts := &flags.CopyOpts{StoreRootOpts: rso, PlainHTTP: true} if err := CopyCmd(ctx, opts, s, "registry://"+tr.Registry(), ro); err != nil { return err } @@ -143,8 +144,8 @@ func ServeFilesCmd(ctx context.Context, o *flags.ServeFilesOpts, s *store.Layout return err } - opts := &flags.CopyOpts{} - if err := CopyCmd(ctx, opts, s, "dir://"+o.RootDir, ro); err != nil { + opts := &flags.CopyOpts{StoreRootOpts: &flags.StoreRootOpts{}} + if err := CopyCmd(ctx, opts, s, "directory://"+o.RootDir, ro); err != nil { return err } diff --git a/cmd/hauler/cli/store/serve_test.go b/cmd/hauler/cli/store/serve_test.go new file mode 100644 index 0000000..4af1ecd --- /dev/null +++ b/cmd/hauler/cli/store/serve_test.go @@ -0,0 +1,164 @@ +package store + +import ( + "os" + "path/filepath" + "strings" + "testing" + + "hauler.dev/go/hauler/internal/flags" + "hauler.dev/go/hauler/pkg/consts" +) + +// writeIndexJSON writes a minimal valid OCI index.json to dir so that +// validateStoreExists can find it. NewLayout only writes index.json on +// SaveIndex, which is triggered by adding content — so tests that need a +// "valid store on disk" must create the file themselves. +func writeIndexJSON(t *testing.T, dir string) { + t.Helper() + const minimal = `{"schemaVersion":2,"mediaType":"application/vnd.oci.image.index.v1+json","manifests":[]}` + if err := os.WriteFile(filepath.Join(dir, "index.json"), []byte(minimal), 0o644); err != nil { + t.Fatalf("writeIndexJSON: %v", err) + } +} + +func TestValidateStoreExists(t *testing.T) { + t.Run("valid store", func(t *testing.T) { + s := newTestStore(t) + writeIndexJSON(t, s.Root) + if err := validateStoreExists(s); err != nil { + t.Errorf("validateStoreExists on valid store: %v", err) + } + }) + + t.Run("missing index.json", func(t *testing.T) { + s := newTestStore(t) + err := validateStoreExists(s) + if err == nil { + t.Fatal("expected error for missing index.json, got nil") + } + if !strings.Contains(err.Error(), "no store found") { + t.Errorf("expected 'no store found' in error, got: %v", err) + } + }) + + t.Run("nonexistent directory", func(t *testing.T) { + s := newTestStore(t) + // Point the layout root at a path that does not exist. + s.Root = filepath.Join(t.TempDir(), "does-not-exist", "nested") + err := validateStoreExists(s) + if err == nil { + t.Fatal("expected error for nonexistent dir, got nil") + } + }) +} + +func TestDefaultRegistryConfig(t *testing.T) { + rootDir := t.TempDir() + o := &flags.ServeRegistryOpts{ + Port: consts.DefaultRegistryPort, + RootDir: rootDir, + } + rso := defaultRootOpts(rootDir) + ro := defaultCliOpts() + + cfg := DefaultRegistryConfig(o, rso, ro) + if cfg == nil { + t.Fatal("DefaultRegistryConfig returned nil") + } + + // Port + wantAddr := ":5000" + if cfg.HTTP.Addr != wantAddr { + t.Errorf("HTTP.Addr = %q, want %q", cfg.HTTP.Addr, wantAddr) + } + + // No TLS by default. + if cfg.HTTP.TLS.Certificate != "" || cfg.HTTP.TLS.Key != "" { + t.Errorf("expected no TLS cert/key by default, got cert=%q key=%q", + cfg.HTTP.TLS.Certificate, cfg.HTTP.TLS.Key) + } + + // Log level matches ro.LogLevel. + if string(cfg.Log.Level) != ro.LogLevel { + t.Errorf("Log.Level = %q, want %q", cfg.Log.Level, ro.LogLevel) + } + + // Storage rootdirectory. + fsParams := cfg.Storage["filesystem"] + if fsParams == nil { + t.Fatal("storage.filesystem not set") + } + if fsParams["rootdirectory"] != rootDir { + t.Errorf("storage.filesystem.rootdirectory = %v, want %q", fsParams["rootdirectory"], rootDir) + } + + // URL allow rules. + if len(cfg.Validation.Manifests.URLs.Allow) == 0 { + t.Error("Validation.Manifests.URLs.Allow is empty, want at least one rule") + } +} + +func TestDefaultRegistryConfig_WithTLS(t *testing.T) { + rootDir := t.TempDir() + o := &flags.ServeRegistryOpts{ + Port: consts.DefaultRegistryPort, + RootDir: rootDir, + TLSCert: "/path/to/cert.pem", + TLSKey: "/path/to/key.pem", + } + rso := defaultRootOpts(rootDir) + ro := defaultCliOpts() + + cfg := DefaultRegistryConfig(o, rso, ro) + if cfg.HTTP.TLS.Certificate != o.TLSCert { + t.Errorf("TLS.Certificate = %q, want %q", cfg.HTTP.TLS.Certificate, o.TLSCert) + } + if cfg.HTTP.TLS.Key != o.TLSKey { + t.Errorf("TLS.Key = %q, want %q", cfg.HTTP.TLS.Key, o.TLSKey) + } +} + +func TestLoadConfig_ValidFile(t *testing.T) { + // Write a minimal valid distribution registry config. + cfg := ` +version: 0.1 +log: + level: info +storage: + filesystem: + rootdirectory: /tmp/registry + cache: + blobdescriptor: inmemory +http: + addr: :5000 + headers: + X-Content-Type-Options: [nosniff] +` + f, err := os.CreateTemp(t.TempDir(), "registry-config-*.yaml") + if err != nil { + t.Fatalf("create temp file: %v", err) + } + if _, err := f.WriteString(cfg); err != nil { + t.Fatalf("write config: %v", err) + } + f.Close() + + got, err := loadConfig(f.Name()) + if err != nil { + t.Fatalf("loadConfig: %v", err) + } + if got == nil { + t.Fatal("loadConfig returned nil config") + } + if got.HTTP.Addr != ":5000" { + t.Errorf("HTTP.Addr = %q, want %q", got.HTTP.Addr, ":5000") + } +} + +func TestLoadConfig_InvalidFile(t *testing.T) { + _, err := loadConfig("/nonexistent/path/to/config.yaml") + if err == nil { + t.Fatal("expected error for nonexistent config file, got nil") + } +} diff --git a/cmd/hauler/cli/store/sync.go b/cmd/hauler/cli/store/sync.go index 7e81967..72e57ea 100644 --- a/cmd/hauler/cli/store/sync.go +++ b/cmd/hauler/cli/store/sync.go @@ -15,11 +15,7 @@ import ( "k8s.io/apimachinery/pkg/util/yaml" "hauler.dev/go/hauler/internal/flags" - convert "hauler.dev/go/hauler/pkg/apis/hauler.cattle.io/convert" v1 "hauler.dev/go/hauler/pkg/apis/hauler.cattle.io/v1" - v1alpha1 "hauler.dev/go/hauler/pkg/apis/hauler.cattle.io/v1alpha1" - tchart "hauler.dev/go/hauler/pkg/collection/chart" - "hauler.dev/go/hauler/pkg/collection/imagetxt" "hauler.dev/go/hauler/pkg/consts" "hauler.dev/go/hauler/pkg/content" "hauler.dev/go/hauler/pkg/cosign" @@ -63,7 +59,7 @@ func SyncCmd(ctx context.Context, o *flags.SyncOpts, s *store.Layout, rso *flags img := v1.Image{ Name: manifestLoc, } - err := storeImage(ctx, s, img, o.Platform, rso, ro, "") + err := storeImage(ctx, s, img, o.Platform, o.ExcludeExtras, rso, ro, "") if err != nil { return err } @@ -77,6 +73,7 @@ func SyncCmd(ctx context.Context, o *flags.SyncOpts, s *store.Layout, rso *flags if err != nil { return err } + defer fi.Close() err = processContent(ctx, fi, o, s, rso, ro) if err != nil { return err @@ -84,54 +81,108 @@ func SyncCmd(ctx context.Context, o *flags.SyncOpts, s *store.Layout, rso *flags l.Infof("processing completed successfully") } - // If passed a local manifest, process it - for _, fileName := range o.FileName { - l.Infof("processing manifest [%s] to store [%s]", fileName, o.StoreDir) + // If passed a hauler manifest, process it + if len(o.FileName) != 0 { + for _, fileName := range o.FileName { + l.Infof("processing manifest [%s] to store [%s]", fileName, o.StoreDir) - haulPath := fileName - if strings.HasPrefix(haulPath, "http://") || strings.HasPrefix(haulPath, "https://") { - l.Debugf("detected remote manifest... starting download... [%s]", haulPath) + haulPath := fileName + if strings.HasPrefix(haulPath, "http://") || strings.HasPrefix(haulPath, "https://") { + l.Debugf("detected remote manifest... starting download... [%s]", haulPath) - h := getter.NewHttp() - parsedURL, err := url.Parse(haulPath) + h := getter.NewHttp() + parsedURL, err := url.Parse(haulPath) + if err != nil { + return err + } + rc, err := h.Open(ctx, parsedURL) + if err != nil { + return err + } + defer rc.Close() + + fileName := h.Name(parsedURL) + if fileName == "" { + fileName = filepath.Base(parsedURL.Path) + } + haulPath = filepath.Join(tempDir, fileName) + + out, err := os.Create(haulPath) + if err != nil { + return err + } + defer out.Close() + + if _, err = io.Copy(out, rc); err != nil { + return err + } + } + + fi, err := os.Open(haulPath) if err != nil { return err } - rc, err := h.Open(ctx, parsedURL) + defer fi.Close() + + err = processContent(ctx, fi, o, s, rso, ro) if err != nil { return err } - defer rc.Close() - fileName := h.Name(parsedURL) - if fileName == "" { - fileName = filepath.Base(parsedURL.Path) + l.Infof("processing completed successfully") + } + } + + // If passed an image.txt file, process it + if len(o.ImageTxt) != 0 { + for _, imageTxt := range o.ImageTxt { + l.Infof("processing image.txt [%s] to store [%s]", imageTxt, o.StoreDir) + + haulPath := imageTxt + if strings.HasPrefix(haulPath, "http://") || strings.HasPrefix(haulPath, "https://") { + l.Debugf("detected remote image.txt... starting download... [%s]", haulPath) + + h := getter.NewHttp() + parsedURL, err := url.Parse(haulPath) + if err != nil { + return err + } + rc, err := h.Open(ctx, parsedURL) + if err != nil { + return err + } + defer rc.Close() + + fileName := h.Name(parsedURL) + if fileName == "" { + fileName = filepath.Base(parsedURL.Path) + } + haulPath = filepath.Join(tempDir, fileName) + + out, err := os.Create(haulPath) + if err != nil { + return err + } + defer out.Close() + + if _, err = io.Copy(out, rc); err != nil { + return err + } } - haulPath = filepath.Join(tempDir, fileName) - out, err := os.Create(haulPath) + fi, err := os.Open(haulPath) if err != nil { return err } - defer out.Close() + defer fi.Close() - if _, err = io.Copy(out, rc); err != nil { + err = processImageTxt(ctx, fi, o, s, rso, ro) + if err != nil { return err } - } - fi, err := os.Open(haulPath) - if err != nil { - return err + l.Infof("processing completed successfully") } - defer fi.Close() - - err = processContent(ctx, fi, o, s, rso, ro) - if err != nil { - return err - } - - l.Infof("processing completed successfully") } return nil @@ -168,23 +219,6 @@ func processContent(ctx context.Context, fi *os.File, o *flags.SyncOpts, s *stor case consts.FilesContentKind: switch gvk.Version { - case "v1alpha1": - l.Warnf("!!! DEPRECATION WARNING !!! apiVersion [%s] will be removed in a future release...", gvk.Version) - - var alphaCfg v1alpha1.Files - if err := yaml.Unmarshal(doc, &alphaCfg); err != nil { - return err - } - var v1Cfg v1.Files - if err := convert.ConvertFiles(&alphaCfg, &v1Cfg); err != nil { - return err - } - for _, f := range v1Cfg.Spec.Files { - if err := storeFile(ctx, s, f); err != nil { - return err - } - } - case "v1": var cfg v1.Files if err := yaml.Unmarshal(doc, &cfg); err != nil { @@ -197,158 +231,11 @@ func processContent(ctx context.Context, fi *os.File, o *flags.SyncOpts, s *stor } default: - return fmt.Errorf("unsupported version [%s] for kind [%s]... valid versions are [v1 and v1alpha1]", gvk.Version, gvk.Kind) + return fmt.Errorf("unsupported version [%s] for kind [%s]... valid versions are [v1]", gvk.Version, gvk.Kind) } case consts.ImagesContentKind: switch gvk.Version { - case "v1alpha1": - l.Warnf("!!! DEPRECATION WARNING !!! apiVersion [%s] will be removed in a future release...", gvk.Version) - - var alphaCfg v1alpha1.Images - if err := yaml.Unmarshal(doc, &alphaCfg); err != nil { - return err - } - var v1Cfg v1.Images - if err := convert.ConvertImages(&alphaCfg, &v1Cfg); err != nil { - return err - } - - a := v1Cfg.GetAnnotations() - for _, i := range v1Cfg.Spec.Images { - - if a[consts.ImageAnnotationRegistry] != "" || o.Registry != "" { - newRef, _ := reference.Parse(i.Name) - newReg := o.Registry - if o.Registry == "" && a[consts.ImageAnnotationRegistry] != "" { - newReg = a[consts.ImageAnnotationRegistry] - } - if newRef.Context().RegistryStr() == "" { - newRef, err = reference.Relocate(i.Name, newReg) - if err != nil { - return err - } - } - i.Name = newRef.Name() - } - - hasAnnotationIdentityOptions := a[consts.ImageAnnotationCertIdentityRegexp] != "" || a[consts.ImageAnnotationCertIdentity] != "" - hasCliIdentityOptions := o.CertIdentityRegexp != "" || o.CertIdentity != "" - hasImageIdentityOptions := i.CertIdentityRegexp != "" || i.CertIdentity != "" - - needsKeylessVerificaton := hasAnnotationIdentityOptions || hasCliIdentityOptions || hasImageIdentityOptions - needsPubKeyVerification := a[consts.ImageAnnotationKey] != "" || o.Key != "" || i.Key != "" - if needsPubKeyVerification { - key := o.Key - if o.Key == "" && a[consts.ImageAnnotationKey] != "" { - key, err = homedir.Expand(a[consts.ImageAnnotationKey]) - if err != nil { - return err - } - } - if i.Key != "" { - key, err = homedir.Expand(i.Key) - if err != nil { - return err - } - } - l.Debugf("key for image [%s]", key) - - tlog := o.Tlog - if !o.Tlog && a[consts.ImageAnnotationTlog] == "true" { - tlog = true - } - if i.Tlog { - tlog = i.Tlog - } - l.Debugf("transparency log for verification [%b]", tlog) - - if err := cosign.VerifySignature(ctx, s, key, tlog, i.Name, rso, ro); err != nil { - l.Errorf("signature verification failed for image [%s]... skipping...\n%v", i.Name, err) - continue - } - l.Infof("signature verified for image [%s]", i.Name) - } else if needsKeylessVerificaton { //Keyless signature verification - certIdentityRegexp := o.CertIdentityRegexp - if o.CertIdentityRegexp == "" && a[consts.ImageAnnotationCertIdentityRegexp] != "" { - certIdentityRegexp = a[consts.ImageAnnotationCertIdentityRegexp] - } - if i.CertIdentityRegexp != "" { - certIdentityRegexp = i.CertIdentityRegexp - } - l.Debugf("certIdentityRegexp for image [%s]", certIdentityRegexp) - - certIdentity := o.CertIdentity - if o.CertIdentity == "" && a[consts.ImageAnnotationCertIdentity] != "" { - certIdentity = a[consts.ImageAnnotationCertIdentity] - } - if i.CertIdentity != "" { - certIdentity = i.CertIdentity - } - l.Debugf("certIdentity for image [%s]", certIdentity) - - certOidcIssuer := o.CertOidcIssuer - if o.CertOidcIssuer == "" && a[consts.ImageAnnotationCertOidcIssuer] != "" { - certOidcIssuer = a[consts.ImageAnnotationCertOidcIssuer] - } - if i.CertOidcIssuer != "" { - certOidcIssuer = i.CertOidcIssuer - } - l.Debugf("certOidcIssuer for image [%s]", certOidcIssuer) - - certOidcIssuerRegexp := o.CertOidcIssuerRegexp - if o.CertOidcIssuerRegexp == "" && a[consts.ImageAnnotationCertOidcIssuerRegexp] != "" { - certOidcIssuerRegexp = a[consts.ImageAnnotationCertOidcIssuerRegexp] - } - if i.CertOidcIssuerRegexp != "" { - certOidcIssuerRegexp = i.CertOidcIssuerRegexp - } - l.Debugf("certOidcIssuerRegexp for image [%s]", certOidcIssuerRegexp) - - certGithubWorkflowRepository := o.CertGithubWorkflowRepository - if o.CertGithubWorkflowRepository == "" && a[consts.ImageAnnotationCertGithubWorkflowRepository] != "" { - certGithubWorkflowRepository = a[consts.ImageAnnotationCertGithubWorkflowRepository] - } - if i.CertGithubWorkflowRepository != "" { - certGithubWorkflowRepository = i.CertGithubWorkflowRepository - } - l.Debugf("certGithubWorkflowRepository for image [%s]", certGithubWorkflowRepository) - - tlog := o.Tlog - if !o.Tlog && a[consts.ImageAnnotationTlog] == "true" { - tlog = true - } - if i.Tlog { - tlog = i.Tlog - } - l.Debugf("transparency log for verification [%b]", tlog) - - if err := cosign.VerifyKeylessSignature(ctx, s, certIdentity, certIdentityRegexp, certOidcIssuer, certOidcIssuerRegexp, certGithubWorkflowRepository, tlog, i.Name, rso, ro); err != nil { - l.Errorf("keyless signature verification failed for image [%s]... skipping...\n%v", i.Name, err) - continue - } - l.Infof("keyless signature verified for image [%s]", i.Name) - } - - platform := o.Platform - if o.Platform == "" && a[consts.ImageAnnotationPlatform] != "" { - platform = a[consts.ImageAnnotationPlatform] - } - if i.Platform != "" { - platform = i.Platform - } - - rewrite := "" - if i.Rewrite != "" { - rewrite = i.Rewrite - } - - if err := storeImage(ctx, s, i, platform, rso, ro, rewrite); err != nil { - return err - } - } - s.CopyAll(ctx, s.OCI, nil) - case "v1": var cfg v1.Images if err := yaml.Unmarshal(doc, &cfg); err != nil { @@ -404,7 +291,7 @@ func processContent(ctx context.Context, fi *os.File, o *flags.SyncOpts, s *stor } l.Debugf("transparency log for verification [%b]", tlog) - if err := cosign.VerifySignature(ctx, s, key, tlog, i.Name, rso, ro); err != nil { + if err := cosign.VerifySignature(ctx, key, tlog, i.Name, rso, ro); err != nil { l.Errorf("signature verification failed for image [%s]... skipping...\n%v", i.Name, err) continue } @@ -455,17 +342,10 @@ func processContent(ctx context.Context, fi *os.File, o *flags.SyncOpts, s *stor } l.Debugf("certGithubWorkflowRepository for image [%s]", certGithubWorkflowRepository) - tlog := o.Tlog - if !o.Tlog && a[consts.ImageAnnotationTlog] == "true" { - tlog = true - } - if i.Tlog { - tlog = i.Tlog - } - l.Debugf("transparency log for verification [%b]", tlog) - - if err := cosign.VerifyKeylessSignature(ctx, s, certIdentity, certIdentityRegexp, certOidcIssuer, certOidcIssuerRegexp, certGithubWorkflowRepository, tlog, i.Name, rso, ro); err != nil { - l.Errorf("keyless signature verification failed for image [%s]... skipping...\n%v", i.Name, err) + // Keyless (Fulcio) certs expire after ~10 min; tlog is always + // required to prove the cert was valid at signing time. + if err := cosign.VerifyKeylessSignature(ctx, certIdentity, certIdentityRegexp, certOidcIssuer, certOidcIssuerRegexp, certGithubWorkflowRepository, i.Name, rso, ro); err != nil { + l.Errorf("signature verification failed for image [%s]... skipping...\n%v", i.Name, err) continue } l.Infof("keyless signature verified for image [%s]", i.Name) @@ -483,58 +363,49 @@ func processContent(ctx context.Context, fi *os.File, o *flags.SyncOpts, s *stor rewrite = i.Rewrite } - if err := storeImage(ctx, s, i, platform, rso, ro, rewrite); err != nil { + excludeExtras := o.ExcludeExtras + if !o.ExcludeExtras && a[consts.ImageAnnotationExcludeExtras] == "true" { + excludeExtras = true + } + if i.ExcludeExtras { + excludeExtras = i.ExcludeExtras + } + + if err := storeImage(ctx, s, i, platform, excludeExtras, rso, ro, rewrite); err != nil { return err } } s.CopyAll(ctx, s.OCI, nil) default: - return fmt.Errorf("unsupported version [%s] for kind [%s]... valid versions are [v1 and v1alpha1]", gvk.Version, gvk.Kind) + return fmt.Errorf("unsupported version [%s] for kind [%s]... valid versions are [v1]", gvk.Version, gvk.Kind) } case consts.ChartsContentKind: switch gvk.Version { - case "v1alpha1": - l.Warnf("!!! DEPRECATION WARNING !!! apiVersion [%s] will be removed in a future release...", gvk.Version) - - var alphaCfg v1alpha1.Charts - if err := yaml.Unmarshal(doc, &alphaCfg); err != nil { - return err - } - var v1Cfg v1.Charts - if err := convert.ConvertCharts(&alphaCfg, &v1Cfg); err != nil { - return err - } - for _, ch := range v1Cfg.Spec.Charts { - if err := storeChart(ctx, s, ch, - &flags.AddChartOpts{ - ChartOpts: &action.ChartPathOptions{ - RepoURL: ch.RepoURL, - Version: ch.Version, - }, - }, - rso, ro, - "", - ); err != nil { - return err - } - } - case "v1": var cfg v1.Charts if err := yaml.Unmarshal(doc, &cfg); err != nil { return err } registry := o.Registry + annotation := cfg.GetAnnotations() if registry == "" { - annotation := cfg.GetAnnotations() if annotation != nil { registry = annotation[consts.ImageAnnotationRegistry] } } for i, ch := range cfg.Spec.Charts { + // Resolve excludeExtras: per-chart field > chart manifest annotation > CLI flag. + excludeExtras := o.ExcludeExtras + if !o.ExcludeExtras && annotation != nil && annotation[consts.ImageAnnotationExcludeExtras] == "true" { + excludeExtras = true + } + if ch.ExcludeExtras { + excludeExtras = ch.ExcludeExtras + } + if err := storeChart(ctx, s, ch, &flags.AddChartOpts{ ChartOpts: &action.ChartPathOptions{ @@ -543,6 +414,7 @@ func processContent(ctx context.Context, fi *os.File, o *flags.SyncOpts, s *stor }, AddImages: ch.AddImages, AddDependencies: ch.AddDependencies, + ExcludeExtras: excludeExtras, Registry: registry, Platform: o.Platform, }, @@ -554,108 +426,30 @@ func processContent(ctx context.Context, fi *os.File, o *flags.SyncOpts, s *stor } default: - return fmt.Errorf("unsupported version [%s] for kind [%s]... valid versions are [v1 and v1alpha1]", gvk.Version, gvk.Kind) - } - - case consts.ChartsCollectionKind: - switch gvk.Version { - case "v1alpha1": - l.Warnf("!!! DEPRECATION WARNING !!! apiVersion [%s] will be removed in a future release...", gvk.Version) - - var alphaCfg v1alpha1.ThickCharts - if err := yaml.Unmarshal(doc, &alphaCfg); err != nil { - return err - } - var v1Cfg v1.ThickCharts - if err := convert.ConvertThickCharts(&alphaCfg, &v1Cfg); err != nil { - return err - } - for _, chObj := range v1Cfg.Spec.Charts { - tc, err := tchart.NewThickChart(chObj, &action.ChartPathOptions{ - RepoURL: chObj.RepoURL, - Version: chObj.Version, - }) - if err != nil { - return err - } - if _, err := s.AddOCICollection(ctx, tc); err != nil { - return err - } - } - - case "v1": - var cfg v1.ThickCharts - if err := yaml.Unmarshal(doc, &cfg); err != nil { - return err - } - for _, chObj := range cfg.Spec.Charts { - tc, err := tchart.NewThickChart(chObj, &action.ChartPathOptions{ - RepoURL: chObj.RepoURL, - Version: chObj.Version, - }) - if err != nil { - return err - } - if _, err := s.AddOCICollection(ctx, tc); err != nil { - return err - } - } - - default: - return fmt.Errorf("unsupported version [%s] for kind [%s]... valid versions are [v1 and v1alpha1]", gvk.Version, gvk.Kind) - } - - case consts.ImageTxtsContentKind: - switch gvk.Version { - case "v1alpha1": - l.Warnf("!!! DEPRECATION WARNING !!! apiVersion [%s] will be removed in a future release...", gvk.Version) - - var alphaCfg v1alpha1.ImageTxts - if err := yaml.Unmarshal(doc, &alphaCfg); err != nil { - return err - } - var v1Cfg v1.ImageTxts - if err := convert.ConvertImageTxts(&alphaCfg, &v1Cfg); err != nil { - return err - } - for _, cfgIt := range v1Cfg.Spec.ImageTxts { - it, err := imagetxt.New(cfgIt.Ref, - imagetxt.WithIncludeSources(cfgIt.Sources.Include...), - imagetxt.WithExcludeSources(cfgIt.Sources.Exclude...), - ) - if err != nil { - return fmt.Errorf("convert ImageTxt %s: %v", v1Cfg.Name, err) - } - if _, err := s.AddOCICollection(ctx, it); err != nil { - return fmt.Errorf("add ImageTxt %s to store: %v", v1Cfg.Name, err) - } - } - - case "v1": - var cfg v1.ImageTxts - if err := yaml.Unmarshal(doc, &cfg); err != nil { - return err - } - for _, cfgIt := range cfg.Spec.ImageTxts { - it, err := imagetxt.New(cfgIt.Ref, - imagetxt.WithIncludeSources(cfgIt.Sources.Include...), - imagetxt.WithExcludeSources(cfgIt.Sources.Exclude...), - ) - if err != nil { - return fmt.Errorf("convert ImageTxt %s: %v", cfg.Name, err) - } - if _, err := s.AddOCICollection(ctx, it); err != nil { - return fmt.Errorf("add ImageTxt %s to store: %v", cfg.Name, err) - } - } - - default: - return fmt.Errorf("unsupported version [%s] for kind [%s]... valid versions are [v1 and v1alpha1]", gvk.Version, gvk.Kind) + return fmt.Errorf("unsupported version [%s] for kind [%s]... valid versions are [v1]", gvk.Version, gvk.Kind) } default: - return fmt.Errorf("unsupported kind [%s]... valid kinds are [Files, Images, Charts, ThickCharts, ImageTxts]", gvk.Kind) + return fmt.Errorf("unsupported kind [%s]... valid kinds are [Files, Images, Charts]", gvk.Kind) } } return nil } + +func processImageTxt(ctx context.Context, fi *os.File, o *flags.SyncOpts, s *store.Layout, rso *flags.StoreRootOpts, ro *flags.CliRootOpts) error { + l := log.FromContext(ctx) + l.Infof("syncing images from [%s] to store", filepath.Base(fi.Name())) + scanner := bufio.NewScanner(fi) + for scanner.Scan() { + line := strings.TrimSpace(scanner.Text()) + if line == "" || strings.HasPrefix(line, "#") { + continue + } + img := v1.Image{Name: line} + l.Infof("adding image [%s] to the store [%s]", line, o.StoreDir) + if err := storeImage(ctx, s, img, o.Platform, o.ExcludeExtras, rso, ro, ""); err != nil { + return err + } + } + return scanner.Err() +} diff --git a/cmd/hauler/cli/store/sync_test.go b/cmd/hauler/cli/store/sync_test.go new file mode 100644 index 0000000..8a4ecf1 --- /dev/null +++ b/cmd/hauler/cli/store/sync_test.go @@ -0,0 +1,441 @@ +package store + +import ( + "fmt" + "io" + "net/http" + "net/http/httptest" + "os" + "testing" + + "hauler.dev/go/hauler/internal/flags" +) + +// writeManifestFile writes yamlContent to a temp file, seeks back to the +// start, and registers t.Cleanup to close + remove it. Returns the open +// *os.File, ready for processContent to read. +func writeManifestFile(t *testing.T, yamlContent string) *os.File { + t.Helper() + fi, err := os.CreateTemp(t.TempDir(), "hauler-manifest-*.yaml") + if err != nil { + t.Fatalf("writeManifestFile CreateTemp: %v", err) + } + t.Cleanup(func() { fi.Close() }) + if _, err := fi.WriteString(yamlContent); err != nil { + t.Fatalf("writeManifestFile WriteString: %v", err) + } + if _, err := fi.Seek(0, io.SeekStart); err != nil { + t.Fatalf("writeManifestFile Seek: %v", err) + } + return fi +} + +// newSyncOpts builds a SyncOpts pointing at storeDir. +func newSyncOpts(storeDir string) *flags.SyncOpts { + return &flags.SyncOpts{ + StoreRootOpts: defaultRootOpts(storeDir), + } +} + +// -------------------------------------------------------------------------- +// processContent tests +// -------------------------------------------------------------------------- + +func TestProcessContent_Files_v1(t *testing.T) { + ctx := newTestContext(t) + s := newTestStore(t) + + fileURL := seedFileInHTTPServer(t, "synced.sh", "#!/bin/sh\necho hello") + + manifest := fmt.Sprintf(`apiVersion: content.hauler.cattle.io/v1 +kind: Files +metadata: + name: test-files +spec: + files: + - path: %s +`, fileURL) + + fi := writeManifestFile(t, manifest) + o := newSyncOpts(s.Root) + ro := defaultCliOpts() + + if err := processContent(ctx, fi, o, s, o.StoreRootOpts, ro); err != nil { + t.Fatalf("processContent Files v1: %v", err) + } + assertArtifactInStore(t, s, "synced.sh") +} + +func TestProcessContent_Charts_v1(t *testing.T) { + ctx := newTestContext(t) + s := newTestStore(t) + + // Use the same relative path as add_test.go: url.ParseRequestURI accepts + // absolute Unix paths, making isUrl() return true for them. A relative + // path correctly keeps isUrl() false so Helm sees it as a local directory. + manifest := fmt.Sprintf(`apiVersion: content.hauler.cattle.io/v1 +kind: Charts +metadata: + name: test-charts +spec: + charts: + - name: rancher-cluster-templates-0.5.2.tgz + repoURL: %s +`, chartTestdataDir) + + fi := writeManifestFile(t, manifest) + o := newSyncOpts(s.Root) + ro := defaultCliOpts() + + if err := processContent(ctx, fi, o, s, o.StoreRootOpts, ro); err != nil { + t.Fatalf("processContent Charts v1: %v", err) + } + assertArtifactInStore(t, s, "rancher-cluster-templates") +} + +func TestProcessContent_Images_v1(t *testing.T) { + ctx := newTestContext(t) + s := newTestStore(t) + + host, _ := newLocalhostRegistry(t) + seedImage(t, host, "myorg/myimage", "v1") // transport not needed; AddImage reads via localhost scheme + + manifest := fmt.Sprintf(`apiVersion: content.hauler.cattle.io/v1 +kind: Images +metadata: + name: test-images +spec: + images: + - name: %s/myorg/myimage:v1 +`, host) + + fi := writeManifestFile(t, manifest) + o := newSyncOpts(s.Root) + ro := defaultCliOpts() + + if err := processContent(ctx, fi, o, s, o.StoreRootOpts, ro); err != nil { + t.Fatalf("processContent Images v1: %v", err) + } + assertArtifactInStore(t, s, "myorg/myimage") +} + +func TestProcessContent_UnsupportedKind(t *testing.T) { + ctx := newTestContext(t) + s := newTestStore(t) + + // A valid apiVersion with an unsupported kind passes content.Load but hits + // the default branch of the kind switch, returning an error. + manifest := `apiVersion: content.hauler.cattle.io/v1 +kind: Unknown +metadata: + name: test +` + + fi := writeManifestFile(t, manifest) + o := newSyncOpts(s.Root) + ro := defaultCliOpts() + + if err := processContent(ctx, fi, o, s, o.StoreRootOpts, ro); err == nil { + t.Fatal("expected error for unsupported kind, got nil") + } +} + +func TestProcessContent_UnsupportedVersion(t *testing.T) { + ctx := newTestContext(t) + s := newTestStore(t) + + // An unrecognized apiVersion causes content.Load to return an error, which + // processContent treats as a warn-and-skip — the function returns nil and + // no artifact is added to the store. + manifest := `apiVersion: content.hauler.cattle.io/v2 +kind: Files +metadata: + name: test +spec: + files: + - path: /dev/null +` + + fi := writeManifestFile(t, manifest) + o := newSyncOpts(s.Root) + ro := defaultCliOpts() + + if err := processContent(ctx, fi, o, s, o.StoreRootOpts, ro); err != nil { + t.Fatalf("expected nil for unrecognized apiVersion (warn-and-skip), got: %v", err) + } + if n := countArtifactsInStore(t, s); n != 0 { + t.Errorf("expected 0 artifacts after skipped document, got %d", n) + } +} + +func TestProcessContent_MultiDoc(t *testing.T) { + ctx := newTestContext(t) + s := newTestStore(t) + + fileURL := seedFileInHTTPServer(t, "multi.sh", "#!/bin/sh\necho multi") + host, _ := newLocalhostRegistry(t) + seedImage(t, host, "myorg/multiimage", "v1") + + manifest := fmt.Sprintf(`apiVersion: content.hauler.cattle.io/v1 +kind: Files +metadata: + name: test-files +spec: + files: + - path: %s +--- +apiVersion: content.hauler.cattle.io/v1 +kind: Charts +metadata: + name: test-charts +spec: + charts: + - name: rancher-cluster-templates-0.5.2.tgz + repoURL: %s +--- +apiVersion: content.hauler.cattle.io/v1 +kind: Images +metadata: + name: test-images +spec: + images: + - name: %s/myorg/multiimage:v1 +`, fileURL, chartTestdataDir, host) + + fi := writeManifestFile(t, manifest) + o := newSyncOpts(s.Root) + ro := defaultCliOpts() + + if err := processContent(ctx, fi, o, s, o.StoreRootOpts, ro); err != nil { + t.Fatalf("processContent MultiDoc: %v", err) + } + assertArtifactInStore(t, s, "multi.sh") + assertArtifactInStore(t, s, "rancher-cluster-templates") + assertArtifactInStore(t, s, "myorg/multiimage") +} + +// -------------------------------------------------------------------------- +// SyncCmd integration tests +// -------------------------------------------------------------------------- + +func TestSyncCmd_LocalFile(t *testing.T) { + ctx := newTestContext(t) + s := newTestStore(t) + + fileURL := seedFileInHTTPServer(t, "synced-local.sh", "#!/bin/sh\necho local") + + manifest := fmt.Sprintf(`apiVersion: content.hauler.cattle.io/v1 +kind: Files +metadata: + name: test-sync-local +spec: + files: + - path: %s +`, fileURL) + + // SyncCmd reads by file path, so write and close the manifest file first. + manifestFile, err := os.CreateTemp(t.TempDir(), "hauler-sync-local-*.yaml") + if err != nil { + t.Fatalf("CreateTemp: %v", err) + } + manifestPath := manifestFile.Name() + if _, err := manifestFile.WriteString(manifest); err != nil { + manifestFile.Close() + t.Fatalf("WriteString: %v", err) + } + manifestFile.Close() + + o := newSyncOpts(s.Root) + o.FileName = []string{manifestPath} + rso := defaultRootOpts(s.Root) + ro := defaultCliOpts() + + if err := SyncCmd(ctx, o, s, rso, ro); err != nil { + t.Fatalf("SyncCmd LocalFile: %v", err) + } + assertArtifactInStore(t, s, "synced-local.sh") +} + +// -------------------------------------------------------------------------- +// processImageTxt tests +// -------------------------------------------------------------------------- + +// writeImageTxtFile writes lines to a temp file and returns it seeked to the +// start, ready for processImageTxt to consume. +func writeImageTxtFile(t *testing.T, lines string) *os.File { + t.Helper() + fi, err := os.CreateTemp(t.TempDir(), "images-*.txt") + if err != nil { + t.Fatalf("writeImageTxtFile CreateTemp: %v", err) + } + t.Cleanup(func() { fi.Close() }) + if _, err := fi.WriteString(lines); err != nil { + t.Fatalf("writeImageTxtFile WriteString: %v", err) + } + if _, err := fi.Seek(0, io.SeekStart); err != nil { + t.Fatalf("writeImageTxtFile Seek: %v", err) + } + return fi +} + +func TestProcessImageTxt_SingleImage(t *testing.T) { + ctx := newTestContext(t) + s := newTestStore(t) + + host, _ := newLocalhostRegistry(t) + seedImage(t, host, "myorg/txtimage", "v1") + + fi := writeImageTxtFile(t, fmt.Sprintf("%s/myorg/txtimage:v1\n", host)) + o := newSyncOpts(s.Root) + ro := defaultCliOpts() + + if err := processImageTxt(ctx, fi, o, s, o.StoreRootOpts, ro); err != nil { + t.Fatalf("processImageTxt single image: %v", err) + } + assertArtifactInStore(t, s, "myorg/txtimage") +} + +func TestProcessImageTxt_MultipleImages(t *testing.T) { + ctx := newTestContext(t) + s := newTestStore(t) + + host, _ := newLocalhostRegistry(t) + seedImage(t, host, "myorg/alpha", "v1") + seedImage(t, host, "myorg/beta", "v2") + + content := fmt.Sprintf("%s/myorg/alpha:v1\n%s/myorg/beta:v2\n", host, host) + fi := writeImageTxtFile(t, content) + o := newSyncOpts(s.Root) + ro := defaultCliOpts() + + if err := processImageTxt(ctx, fi, o, s, o.StoreRootOpts, ro); err != nil { + t.Fatalf("processImageTxt multiple images: %v", err) + } + assertArtifactInStore(t, s, "myorg/alpha") + assertArtifactInStore(t, s, "myorg/beta") +} + +func TestProcessImageTxt_SkipsBlankLinesAndComments(t *testing.T) { + ctx := newTestContext(t) + s := newTestStore(t) + + host, _ := newLocalhostRegistry(t) + seedImage(t, host, "myorg/commenttest", "v1") + + content := fmt.Sprintf("# this is a comment\n\n%s/myorg/commenttest:v1\n\n# another comment\n", host) + fi := writeImageTxtFile(t, content) + o := newSyncOpts(s.Root) + ro := defaultCliOpts() + + if err := processImageTxt(ctx, fi, o, s, o.StoreRootOpts, ro); err != nil { + t.Fatalf("processImageTxt skip blanks/comments: %v", err) + } + assertArtifactInStore(t, s, "myorg/commenttest") + if n := countArtifactsInStore(t, s); n != 1 { + t.Errorf("expected 1 artifact, got %d", n) + } +} + +func TestProcessImageTxt_EmptyFile(t *testing.T) { + ctx := newTestContext(t) + s := newTestStore(t) + + fi := writeImageTxtFile(t, "") + o := newSyncOpts(s.Root) + ro := defaultCliOpts() + + if err := processImageTxt(ctx, fi, o, s, o.StoreRootOpts, ro); err != nil { + t.Fatalf("processImageTxt empty file: %v", err) + } + if n := countArtifactsInStore(t, s); n != 0 { + t.Errorf("expected 0 artifacts for empty file, got %d", n) + } +} + +// -------------------------------------------------------------------------- +// SyncCmd --image-txt integration tests +// -------------------------------------------------------------------------- + +func TestSyncCmd_ImageTxt_LocalFile(t *testing.T) { + ctx := newTestContext(t) + s := newTestStore(t) + + host, _ := newLocalhostRegistry(t) + seedImage(t, host, "myorg/syncedtxt", "v1") + + txtFile, err := os.CreateTemp(t.TempDir(), "images-*.txt") + if err != nil { + t.Fatalf("CreateTemp: %v", err) + } + txtPath := txtFile.Name() + fmt.Fprintf(txtFile, "%s/myorg/syncedtxt:v1\n", host) + txtFile.Close() + + o := newSyncOpts(s.Root) + o.ImageTxt = []string{txtPath} + rso := defaultRootOpts(s.Root) + ro := defaultCliOpts() + + if err := SyncCmd(ctx, o, s, rso, ro); err != nil { + t.Fatalf("SyncCmd ImageTxt LocalFile: %v", err) + } + assertArtifactInStore(t, s, "myorg/syncedtxt") +} + +func TestSyncCmd_ImageTxt_RemoteFile(t *testing.T) { + ctx := newTestContext(t) + s := newTestStore(t) + + host, _ := newLocalhostRegistry(t) + seedImage(t, host, "myorg/remotetxt", "v1") + + imageListContent := fmt.Sprintf("%s/myorg/remotetxt:v1\n", host) + imageSrv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "text/plain") + io.WriteString(w, imageListContent) //nolint:errcheck + })) + t.Cleanup(imageSrv.Close) + + o := newSyncOpts(s.Root) + o.ImageTxt = []string{imageSrv.URL + "/images.txt"} + rso := defaultRootOpts(s.Root) + ro := defaultCliOpts() + + if err := SyncCmd(ctx, o, s, rso, ro); err != nil { + t.Fatalf("SyncCmd ImageTxt RemoteFile: %v", err) + } + assertArtifactInStore(t, s, "myorg/remotetxt") +} + +func TestSyncCmd_RemoteManifest(t *testing.T) { + ctx := newTestContext(t) + s := newTestStore(t) + + fileURL := seedFileInHTTPServer(t, "synced-remote.sh", "#!/bin/sh\necho remote") + + manifest := fmt.Sprintf(`apiVersion: content.hauler.cattle.io/v1 +kind: Files +metadata: + name: test-sync-remote +spec: + files: + - path: %s +`, fileURL) + + // Serve the manifest itself over HTTP so SyncCmd's remote-download path is exercised. + manifestSrv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/yaml") + io.WriteString(w, manifest) //nolint:errcheck + })) + t.Cleanup(manifestSrv.Close) + + o := newSyncOpts(s.Root) + o.FileName = []string{manifestSrv.URL + "/manifest.yaml"} + rso := defaultRootOpts(s.Root) + ro := defaultCliOpts() + + if err := SyncCmd(ctx, o, s, rso, ro); err != nil { + t.Fatalf("SyncCmd RemoteManifest: %v", err) + } + assertArtifactInStore(t, s, "synced-remote.sh") +} diff --git a/cmd/hauler/cli/store/testhelpers_test.go b/cmd/hauler/cli/store/testhelpers_test.go new file mode 100644 index 0000000..27cbd60 --- /dev/null +++ b/cmd/hauler/cli/store/testhelpers_test.go @@ -0,0 +1,338 @@ +package store + +// testhelpers_test.go provides shared test helpers for cmd/hauler/cli/store tests. +// +// This file is in-package (package store) so tests can call unexported +// helpers like storeImage, storeFile, rewriteReference, etc. + +import ( + "context" + "io" + "net/http" + "net/http/httptest" + "strings" + "testing" + + "github.com/google/go-containerregistry/pkg/name" + "github.com/google/go-containerregistry/pkg/registry" + gcrv1 "github.com/google/go-containerregistry/pkg/v1" + "github.com/google/go-containerregistry/pkg/v1/empty" + "github.com/google/go-containerregistry/pkg/v1/mutate" + "github.com/google/go-containerregistry/pkg/v1/random" + "github.com/google/go-containerregistry/pkg/v1/remote" + "github.com/google/go-containerregistry/pkg/v1/static" + gvtypes "github.com/google/go-containerregistry/pkg/v1/types" + digest "github.com/opencontainers/go-digest" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/rs/zerolog" + "helm.sh/helm/v3/pkg/action" + + "hauler.dev/go/hauler/internal/flags" + "hauler.dev/go/hauler/pkg/consts" + "hauler.dev/go/hauler/pkg/store" +) + +// newTestStore creates a fresh store in a temp directory. Fatal on error. +func newTestStore(t *testing.T) *store.Layout { + t.Helper() + s, err := store.NewLayout(t.TempDir()) + if err != nil { + t.Fatalf("newTestStore: %v", err) + } + return s +} + +// newTestRegistry starts an in-memory OCI registry backed by httptest. +// Returns the host (host:port) and remote.Options that route requests through +// the server's plain-HTTP transport. The server is shut down via t.Cleanup. +// +// Pass the returned remoteOpts to seedImage/seedIndex and to store.AddImage +// calls so that both sides use the same plain-HTTP transport. +func newTestRegistry(t *testing.T) (host string, remoteOpts []remote.Option) { + t.Helper() + srv := httptest.NewServer(registry.New()) + t.Cleanup(srv.Close) + host = strings.TrimPrefix(srv.URL, "http://") + remoteOpts = []remote.Option{remote.WithTransport(srv.Client().Transport)} + return host, remoteOpts +} + +// seedImage pushes a random single-platform image to the test registry. +// repo is a bare path like "myorg/myimage"; tag is the image tag string. +// Pass the remoteOpts from newTestRegistry so writes use the correct transport. +func seedImage(t *testing.T, host, repo, tag string, opts ...remote.Option) gcrv1.Image { + t.Helper() + img, err := random.Image(512, 2) + if err != nil { + t.Fatalf("seedImage random.Image: %v", err) + } + ref, err := name.NewTag(host+"/"+repo+":"+tag, name.Insecure) + if err != nil { + t.Fatalf("seedImage name.NewTag: %v", err) + } + if err := remote.Write(ref, img, opts...); err != nil { + t.Fatalf("seedImage remote.Write: %v", err) + } + return img +} + +// seedIndex pushes a 2-platform image index (linux/amd64 + linux/arm64) to +// the test registry. Pass the remoteOpts from newTestRegistry. +func seedIndex(t *testing.T, host, repo, tag string, opts ...remote.Option) gcrv1.ImageIndex { + t.Helper() + amd64Img, err := random.Image(512, 2) + if err != nil { + t.Fatalf("seedIndex random.Image amd64: %v", err) + } + arm64Img, err := random.Image(512, 2) + if err != nil { + t.Fatalf("seedIndex random.Image arm64: %v", err) + } + idx := mutate.AppendManifests( + empty.Index, + mutate.IndexAddendum{ + Add: amd64Img, + Descriptor: gcrv1.Descriptor{ + MediaType: gvtypes.OCIManifestSchema1, + Platform: &gcrv1.Platform{OS: "linux", Architecture: "amd64"}, + }, + }, + mutate.IndexAddendum{ + Add: arm64Img, + Descriptor: gcrv1.Descriptor{ + MediaType: gvtypes.OCIManifestSchema1, + Platform: &gcrv1.Platform{OS: "linux", Architecture: "arm64"}, + }, + }, + ) + ref, err := name.NewTag(host+"/"+repo+":"+tag, name.Insecure) + if err != nil { + t.Fatalf("seedIndex name.NewTag: %v", err) + } + if err := remote.WriteIndex(ref, idx, opts...); err != nil { + t.Fatalf("seedIndex remote.WriteIndex: %v", err) + } + return idx +} + +// seedFileInHTTPServer starts an httptest server serving a single file at +// /filename with the given content. Returns the full URL. Server closed via t.Cleanup. +func seedFileInHTTPServer(t *testing.T, filename, content string) string { + t.Helper() + mux := http.NewServeMux() + mux.HandleFunc("/"+filename, func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/octet-stream") + io.WriteString(w, content) //nolint:errcheck + }) + srv := httptest.NewServer(mux) + t.Cleanup(srv.Close) + return srv.URL + "/" + filename +} + +// defaultRootOpts returns a StoreRootOpts pointed at storeDir with Retries=1. +// Using Retries=1 avoids the 5-second RetriesInterval sleep in failure tests. +func defaultRootOpts(storeDir string) *flags.StoreRootOpts { + return &flags.StoreRootOpts{ + StoreDir: storeDir, + Retries: 1, + } +} + +// defaultCliOpts returns CliRootOpts with error-level logging and IgnoreErrors=false. +func defaultCliOpts() *flags.CliRootOpts { + return &flags.CliRootOpts{ + IgnoreErrors: false, + LogLevel: "error", + } +} + +// newTestContext returns a context with a no-op zerolog logger attached so that +// log.FromContext does not emit to stdout/stderr during tests. +func newTestContext(t *testing.T) context.Context { + t.Helper() + zl := zerolog.New(io.Discard) + return zl.WithContext(context.Background()) +} + +// newAddChartOpts builds an AddChartOpts for loading a local .tgz chart from +// repoURL (typically a testdata directory path) at the given version string. +func newAddChartOpts(repoURL, version string) *flags.AddChartOpts { + return &flags.AddChartOpts{ + ChartOpts: &action.ChartPathOptions{ + RepoURL: repoURL, + Version: version, + }, + } +} + +// assertArtifactInStore walks the store and fails the test if no descriptor +// has an AnnotationRefName containing refSubstring. +func assertArtifactInStore(t *testing.T, s *store.Layout, refSubstring string) { + t.Helper() + found := false + if err := s.OCI.Walk(func(_ string, desc ocispec.Descriptor) error { + if strings.Contains(desc.Annotations[ocispec.AnnotationRefName], refSubstring) { + found = true + } + return nil + }); err != nil { + t.Fatalf("assertArtifactInStore walk: %v", err) + } + if !found { + t.Errorf("no artifact with ref containing %q found in store", refSubstring) + } +} + +// assertArtifactKindInStore walks the store and fails if no descriptor has an +// AnnotationRefName containing refSubstring AND KindAnnotationName equal to kind. +func assertArtifactKindInStore(t *testing.T, s *store.Layout, refSubstring, kind string) { + t.Helper() + found := false + if err := s.OCI.Walk(func(_ string, desc ocispec.Descriptor) error { + if strings.Contains(desc.Annotations[ocispec.AnnotationRefName], refSubstring) && + desc.Annotations[consts.KindAnnotationName] == kind { + found = true + } + return nil + }); err != nil { + t.Fatalf("assertArtifactKindInStore walk: %v", err) + } + if !found { + t.Errorf("no artifact with ref containing %q and kind %q found in store", refSubstring, kind) + } +} + +// countArtifactsInStore returns the number of descriptors in the store index. +func countArtifactsInStore(t *testing.T, s *store.Layout) int { + t.Helper() + count := 0 + if err := s.OCI.Walk(func(_ string, _ ocispec.Descriptor) error { + count++ + return nil + }); err != nil { + t.Fatalf("countArtifactsInStore walk: %v", err) + } + return count +} + +// seedCosignV2Artifacts pushes synthetic cosign v2 signature, attestation, and SBOM +// manifests at the sha256-.sig / .att / .sbom tags derived from baseImg's digest. +// Pass the remoteOpts from newLocalhostRegistry or newTestRegistry. +func seedCosignV2Artifacts(t *testing.T, host, repo string, baseImg gcrv1.Image, opts ...remote.Option) { + t.Helper() + hash, err := baseImg.Digest() + if err != nil { + t.Fatalf("seedCosignV2Artifacts: get digest: %v", err) + } + tagPrefix := strings.ReplaceAll(hash.String(), ":", "-") + for _, suffix := range []string{".sig", ".att", ".sbom"} { + img, err := random.Image(64, 1) + if err != nil { + t.Fatalf("seedCosignV2Artifacts: random.Image (%s): %v", suffix, err) + } + ref, err := name.NewTag(host+"/"+repo+":"+tagPrefix+suffix, name.Insecure) + if err != nil { + t.Fatalf("seedCosignV2Artifacts: NewTag (%s): %v", suffix, err) + } + if err := remote.Write(ref, img, opts...); err != nil { + t.Fatalf("seedCosignV2Artifacts: Write (%s): %v", suffix, err) + } + } +} + +// seedOCI11Referrer pushes a synthetic OCI 1.1 / cosign v3 Sigstore bundle manifest +// whose subject field points at baseImg. The in-process registry auto-registers it in +// the referrers index so remote.Referrers returns it. +// Pass the remoteOpts from newLocalhostRegistry or newTestRegistry. +func seedOCI11Referrer(t *testing.T, host, repo string, baseImg gcrv1.Image, opts ...remote.Option) { + t.Helper() + hash, err := baseImg.Digest() + if err != nil { + t.Fatalf("seedOCI11Referrer: get digest: %v", err) + } + rawManifest, err := baseImg.RawManifest() + if err != nil { + t.Fatalf("seedOCI11Referrer: raw manifest: %v", err) + } + mt, err := baseImg.MediaType() + if err != nil { + t.Fatalf("seedOCI11Referrer: media type: %v", err) + } + baseDesc := gcrv1.Descriptor{ + MediaType: mt, + Digest: hash, + Size: int64(len(rawManifest)), + } + + bundleJSON := []byte(`{"mediaType":"application/vnd.dev.sigstore.bundle.v0.3+json"}`) + bundleLayer := static.NewLayer(bundleJSON, gvtypes.MediaType(consts.SigstoreBundleMediaType)) + referrerImg, err := mutate.AppendLayers(empty.Image, bundleLayer) + if err != nil { + t.Fatalf("seedOCI11Referrer: AppendLayers: %v", err) + } + referrerImg = mutate.MediaType(referrerImg, gvtypes.OCIManifestSchema1) + referrerImg = mutate.ConfigMediaType(referrerImg, gvtypes.MediaType(consts.OCIEmptyConfigMediaType)) + referrerImg = mutate.Subject(referrerImg, baseDesc).(gcrv1.Image) + + referrerTag, err := name.NewTag(host+"/"+repo+":bundle-referrer", name.Insecure) + if err != nil { + t.Fatalf("seedOCI11Referrer: NewTag: %v", err) + } + if err := remote.Write(referrerTag, referrerImg, opts...); err != nil { + t.Fatalf("seedOCI11Referrer: Write: %v", err) + } +} + +// seedStoreDescriptor injects a descriptor with the given annotations directly +// into the store index without requiring a real registry or blob. This is used +// to pre-populate the store for rewriteReference unit tests. +func seedStoreDescriptor(t *testing.T, s *store.Layout, annotations map[string]string) { + t.Helper() + desc := ocispec.Descriptor{ + MediaType: ocispec.MediaTypeImageManifest, + Digest: digest.Digest("sha256:" + strings.Repeat("a", 64)), + Size: 1, + Annotations: annotations, + } + if err := s.OCI.AddIndex(desc); err != nil { + t.Fatalf("seedStoreDescriptor: %v", err) + } +} + +// assertAnnotationsInStore walks the store and fails if no descriptor has both +// AnnotationRefName == refName AND ContainerdImageNameKey == containerdName. +func assertAnnotationsInStore(t *testing.T, s *store.Layout, refName, containerdName string) { + t.Helper() + found := false + if err := s.OCI.Walk(func(_ string, desc ocispec.Descriptor) error { + if desc.Annotations[ocispec.AnnotationRefName] == refName && + desc.Annotations[consts.ContainerdImageNameKey] == containerdName { + found = true + } + return nil + }); err != nil { + t.Fatalf("assertAnnotationsInStore walk: %v", err) + } + if !found { + t.Errorf("no artifact with AnnotationRefName=%q and ContainerdImageNameKey=%q found in store", refName, containerdName) + } +} + +// assertReferrerInStore walks the store and fails if no descriptor has a kind +// annotation with the KindAnnotationReferrers prefix and a ref containing refSubstring. +func assertReferrerInStore(t *testing.T, s *store.Layout, refSubstring string) { + t.Helper() + found := false + if err := s.OCI.Walk(func(_ string, desc ocispec.Descriptor) error { + if strings.Contains(desc.Annotations[ocispec.AnnotationRefName], refSubstring) && + strings.HasPrefix(desc.Annotations[consts.KindAnnotationName], consts.KindAnnotationReferrers) { + found = true + } + return nil + }); err != nil { + t.Fatalf("assertReferrerInStore walk: %v", err) + } + if !found { + t.Errorf("no OCI referrer with ref containing %q found in store", refSubstring) + } +} diff --git a/go.mod b/go.mod index ccb5e4a..fb8eae5 100644 --- a/go.mod +++ b/go.mod @@ -2,15 +2,12 @@ module hauler.dev/go/hauler go 1.25.5 -replace github.com/sigstore/cosign/v3 => github.com/hauler-dev/cosign/v3 v3.0.5-0.20260212234448-00b85d677dfc - replace github.com/distribution/distribution/v3 => github.com/distribution/distribution/v3 v3.0.0-20221208165359-362910506bc2 -replace github.com/docker/cli => github.com/docker/cli v28.5.1+incompatible // needed to keep oras v1.2.7 working, which depends on docker/cli v28.5.1+incompatible - require ( github.com/common-nighthawk/go-figure v0.0.0-20210622060536-734e95fb86be github.com/containerd/containerd v1.7.29 + github.com/containerd/errdefs v1.0.0 github.com/distribution/distribution/v3 v3.0.0 github.com/google/go-containerregistry v0.20.7 github.com/gorilla/handlers v1.5.2 @@ -22,24 +19,22 @@ require ( github.com/opencontainers/image-spec v1.1.1 github.com/pkg/errors v0.9.1 github.com/rs/zerolog v1.34.0 - github.com/sigstore/cosign/v3 v3.0.2 + github.com/sigstore/cosign/v3 v3.0.5 github.com/sirupsen/logrus v1.9.4 github.com/spf13/afero v1.15.0 github.com/spf13/cobra v1.10.2 golang.org/x/sync v0.19.0 gopkg.in/yaml.v3 v3.0.1 helm.sh/helm/v3 v3.19.0 - k8s.io/apimachinery v0.35.0 - k8s.io/client-go v0.35.0 - oras.land/oras-go v1.2.7 + k8s.io/apimachinery v0.35.1 ) require ( - cloud.google.com/go/auth v0.18.0 // indirect + cloud.google.com/go/auth v0.18.1 // indirect cloud.google.com/go/auth/oauth2adapt v0.2.8 // indirect cloud.google.com/go/compute/metadata v0.9.0 // indirect cuelabs.dev/go/oci/ociregistry v0.0.0-20250722084951-074d06050084 // indirect - cuelang.org/go v0.15.3 // indirect + cuelang.org/go v0.15.4 // indirect dario.cat/mergo v1.0.1 // indirect github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 // indirect github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider v0.14.0 // indirect @@ -77,21 +72,21 @@ require ( github.com/aliyun/credentials-go v1.3.2 // indirect github.com/andybalholm/brotli v1.2.0 // indirect github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect - github.com/aws/aws-sdk-go-v2 v1.41.0 // indirect - github.com/aws/aws-sdk-go-v2/config v1.32.5 // indirect - github.com/aws/aws-sdk-go-v2/credentials v1.19.5 // indirect - github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.16 // indirect - github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.16 // indirect - github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.16 // indirect + github.com/aws/aws-sdk-go-v2 v1.41.1 // indirect + github.com/aws/aws-sdk-go-v2/config v1.32.7 // indirect + github.com/aws/aws-sdk-go-v2/credentials v1.19.7 // indirect + github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.17 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.17 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.17 // indirect github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4 // indirect github.com/aws/aws-sdk-go-v2/service/ecr v1.51.2 // indirect github.com/aws/aws-sdk-go-v2/service/ecrpublic v1.38.2 // indirect github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.4 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.16 // indirect - github.com/aws/aws-sdk-go-v2/service/signin v1.0.4 // indirect - github.com/aws/aws-sdk-go-v2/service/sso v1.30.7 // indirect - github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.12 // indirect - github.com/aws/aws-sdk-go-v2/service/sts v1.41.5 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.17 // indirect + github.com/aws/aws-sdk-go-v2/service/signin v1.0.5 // indirect + github.com/aws/aws-sdk-go-v2/service/sso v1.30.9 // indirect + github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.13 // indirect + github.com/aws/aws-sdk-go-v2/service/sts v1.41.6 // indirect github.com/aws/smithy-go v1.24.0 // indirect github.com/awslabs/amazon-ecr-credential-helper/ecr-login v0.11.0 // indirect github.com/beorn7/perks v1.0.1 // indirect @@ -104,7 +99,7 @@ require ( github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd // indirect github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b // indirect github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0 // indirect - github.com/buildkite/agent/v3 v3.115.2 // indirect + github.com/buildkite/agent/v3 v3.115.4 // indirect github.com/buildkite/go-pipeline v0.16.0 // indirect github.com/buildkite/interpolate v0.1.5 // indirect github.com/buildkite/roko v1.4.0 // indirect @@ -112,13 +107,11 @@ require ( github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/chai2010/gettext-go v1.0.2 // indirect github.com/chrismellard/docker-credential-acr-env v0.0.0-20230304212654-82a0ddb27589 // indirect - github.com/chzyer/readline v1.5.1 // indirect github.com/clbanning/mxj/v2 v2.7.0 // indirect github.com/clipperhouse/displaywidth v0.6.0 // indirect github.com/clipperhouse/stringish v0.1.1 // indirect github.com/clipperhouse/uax29/v2 v2.3.0 // indirect github.com/cockroachdb/apd/v3 v3.2.1 // indirect - github.com/containerd/errdefs v1.0.0 // indirect github.com/containerd/log v0.1.0 // indirect github.com/containerd/platforms v1.0.0-rc.2 // indirect github.com/containerd/stargz-snapshotter/estargz v0.18.1 // indirect @@ -130,12 +123,9 @@ require ( github.com/digitorus/pkcs7 v0.0.0-20230818184609-3a137a874352 // indirect github.com/digitorus/timestamp v0.0.0-20231217203849-220c5c2851b7 // indirect github.com/dimchansky/utfbom v1.1.1 // indirect - github.com/distribution/reference v0.6.0 // indirect - github.com/docker/cli v29.0.3+incompatible // indirect + github.com/docker/cli v29.2.0+incompatible // indirect github.com/docker/distribution v2.8.3+incompatible // indirect - github.com/docker/docker v28.5.2+incompatible // indirect github.com/docker/docker-credential-helpers v0.9.4 // indirect - github.com/docker/go-connections v0.6.0 // indirect github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c // indirect github.com/docker/go-metrics v0.0.1 // indirect github.com/docker/libtrust v0.0.0-20150114040149-fa567046d9b1 // indirect @@ -153,7 +143,7 @@ require ( github.com/go-errors/errors v1.4.2 // indirect github.com/go-gorp/gorp/v3 v3.1.0 // indirect github.com/go-ini/ini v1.67.0 // indirect - github.com/go-jose/go-jose/v4 v4.1.3 // indirect + github.com/go-jose/go-jose/v4 v4.1.4 // indirect github.com/go-logr/logr v1.4.3 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-openapi/analysis v0.24.1 // indirect @@ -193,12 +183,12 @@ require ( github.com/google/go-querystring v1.2.0 // indirect github.com/google/s2a-go v0.1.9 // indirect github.com/google/uuid v1.6.0 // indirect - github.com/googleapis/enterprise-certificate-proxy v0.3.9 // indirect - github.com/googleapis/gax-go/v2 v2.16.0 // indirect + github.com/googleapis/enterprise-certificate-proxy v0.3.11 // indirect + github.com/googleapis/gax-go/v2 v2.17.0 // indirect github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 // indirect github.com/gosuri/uitable v0.0.4 // indirect github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 // indirect - github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.4 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.5 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-cleanhttp v0.5.2 // indirect github.com/hashicorp/go-multierror v1.1.1 // indirect @@ -227,11 +217,10 @@ require ( github.com/letsencrypt/boulder v0.20251110.0 // indirect github.com/lib/pq v1.10.9 // indirect github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect - github.com/manifoldco/promptui v0.9.0 // indirect github.com/mattn/go-colorable v0.1.14 // indirect github.com/mattn/go-isatty v0.0.20 // indirect github.com/mattn/go-runewidth v0.0.19 // indirect - github.com/miekg/pkcs11 v1.1.1 // indirect + github.com/miekg/pkcs11 v1.1.2 // indirect github.com/mikelolasagasti/xz v1.0.1 // indirect github.com/minio/minlz v1.0.1 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect @@ -253,7 +242,7 @@ require ( github.com/olekukonko/cat v0.0.0-20250911104152-50322a0618f6 // indirect github.com/olekukonko/errors v1.1.0 // indirect github.com/olekukonko/ll v0.1.3 // indirect - github.com/open-policy-agent/opa v1.12.1 // indirect + github.com/open-policy-agent/opa v1.12.3 // indirect github.com/pborman/uuid v1.2.1 // indirect github.com/pelletier/go-toml/v2 v2.2.4 // indirect github.com/peterbourgon/diskv v2.0.1+incompatible // indirect @@ -263,7 +252,7 @@ require ( github.com/prometheus/client_golang v1.23.2 // indirect github.com/prometheus/client_model v0.6.2 // indirect github.com/prometheus/common v0.67.5 // indirect - github.com/prometheus/procfs v0.17.0 // indirect + github.com/prometheus/procfs v0.19.2 // indirect github.com/protocolbuffers/txtpbfmt v0.0.0-20251016062345-16587c79cd91 // indirect github.com/rcrowley/go-metrics v0.0.0-20250401214520-65e299d6c5c9 // indirect github.com/rogpeppe/go-internal v1.14.1 // indirect @@ -279,7 +268,7 @@ require ( github.com/sigstore/fulcio v1.8.5 // indirect github.com/sigstore/protobuf-specs v0.5.0 // indirect github.com/sigstore/rekor v1.5.0 // indirect - github.com/sigstore/rekor-tiles/v2 v2.0.1 // indirect + github.com/sigstore/rekor-tiles/v2 v2.2.0 // indirect github.com/sigstore/sigstore v1.10.4 // indirect github.com/sigstore/sigstore-go v1.1.4 // indirect github.com/sigstore/timestamp-authority/v2 v2.0.4 // indirect @@ -303,7 +292,6 @@ require ( github.com/valyala/fastjson v1.6.4 // indirect github.com/vbatts/tar-split v0.12.2 // indirect github.com/vektah/gqlparser/v2 v2.5.31 // indirect - github.com/withfig/autocomplete-tools/integrations/cobra v1.2.1 // indirect github.com/x448/float16 v0.8.4 // indirect github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect @@ -312,40 +300,41 @@ require ( github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43 // indirect github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50 // indirect github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f // indirect - gitlab.com/gitlab-org/api/client-go v1.11.0 // indirect + gitlab.com/gitlab-org/api/client-go v1.25.0 // indirect go.mongodb.org/mongo-driver v1.17.6 // indirect go.opentelemetry.io/auto/sdk v1.2.1 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0 // indirect - go.opentelemetry.io/otel v1.39.0 // indirect - go.opentelemetry.io/otel/metric v1.39.0 // indirect - go.opentelemetry.io/otel/sdk v1.39.0 // indirect - go.opentelemetry.io/otel/trace v1.39.0 // indirect + go.opentelemetry.io/otel v1.40.0 // indirect + go.opentelemetry.io/otel/metric v1.40.0 // indirect + go.opentelemetry.io/otel/sdk v1.40.0 // indirect + go.opentelemetry.io/otel/trace v1.40.0 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.1 // indirect go.yaml.in/yaml/v2 v2.4.3 // indirect go.yaml.in/yaml/v3 v3.0.4 // indirect go4.org v0.0.0-20230225012048-214862532bf5 // indirect - golang.org/x/crypto v0.46.0 // indirect - golang.org/x/mod v0.31.0 // indirect - golang.org/x/net v0.48.0 // indirect - golang.org/x/oauth2 v0.34.0 // indirect - golang.org/x/sys v0.39.0 // indirect - golang.org/x/term v0.38.0 // indirect - golang.org/x/text v0.32.0 // indirect + golang.org/x/crypto v0.47.0 // indirect + golang.org/x/mod v0.32.0 // indirect + golang.org/x/net v0.49.0 // indirect + golang.org/x/oauth2 v0.35.0 // indirect + golang.org/x/sys v0.40.0 // indirect + golang.org/x/term v0.39.0 // indirect + golang.org/x/text v0.33.0 // indirect golang.org/x/time v0.14.0 // indirect - google.golang.org/api v0.260.0 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20251222181119-0a764e51fe1b // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20251222181119-0a764e51fe1b // indirect - google.golang.org/grpc v1.78.0 // indirect + google.golang.org/api v0.267.0 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20260128011058-8636f8732409 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20260203192932-546029d2fa20 // indirect + google.golang.org/grpc v1.79.3 // indirect google.golang.org/protobuf v1.36.11 // indirect gopkg.in/evanphx/json-patch.v4 v4.13.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/ini.v1 v1.67.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect - k8s.io/api v0.35.0 // indirect + k8s.io/api v0.35.1 // indirect k8s.io/apiextensions-apiserver v0.34.0 // indirect k8s.io/apiserver v0.34.0 // indirect k8s.io/cli-runtime v0.34.0 // indirect + k8s.io/client-go v0.35.1 // indirect k8s.io/component-base v0.34.0 // indirect k8s.io/klog/v2 v2.130.1 // indirect k8s.io/kube-openapi v0.0.0-20250910181357-589584f1c912 // indirect diff --git a/go.sum b/go.sum index 521b434..f6884dd 100644 --- a/go.sum +++ b/go.sum @@ -9,10 +9,10 @@ cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTj cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= -cloud.google.com/go v0.121.6 h1:waZiuajrI28iAf40cWgycWNgaXPO06dupuS+sgibK6c= -cloud.google.com/go v0.121.6/go.mod h1:coChdst4Ea5vUpiALcYKXEpR1S9ZgXbhEzzMcMR66vI= -cloud.google.com/go/auth v0.18.0 h1:wnqy5hrv7p3k7cShwAU/Br3nzod7fxoqG+k0VZ+/Pk0= -cloud.google.com/go/auth v0.18.0/go.mod h1:wwkPM1AgE1f2u6dG443MiWoD8C3BtOywNsUMcUTVDRo= +cloud.google.com/go v0.123.0 h1:2NAUJwPR47q+E35uaJeYoNhuNEM9kM8SjgRgdeOJUSE= +cloud.google.com/go v0.123.0/go.mod h1:xBoMV08QcqUGuPW65Qfm1o9Y4zKZBpGS+7bImXLTAZU= +cloud.google.com/go/auth v0.18.1 h1:IwTEx92GFUo2pJ6Qea0EU3zYvKnTAeRCODxfA/G5UWs= +cloud.google.com/go/auth v0.18.1/go.mod h1:GfTYoS9G3CWpRA3Va9doKN9mjPGRS+v41jmZAhBzbrA= cloud.google.com/go/auth/oauth2adapt v0.2.8 h1:keo8NaayQZ6wimpNSmW5OPc283g65QNIiLpZnkHRbnc= cloud.google.com/go/auth/oauth2adapt v0.2.8/go.mod h1:XQ9y31RkqZCcwJWNSx2Xvric3RrU88hAYYbjDWYDL+c= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= @@ -22,23 +22,24 @@ cloud.google.com/go/compute/metadata v0.9.0/go.mod h1:E0bWwX5wTnLPedCKqk3pJmVgCB cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/iam v1.5.3 h1:+vMINPiDF2ognBJ97ABAYYwRgsaqxPbQDlMnbHMjolc= cloud.google.com/go/iam v1.5.3/go.mod h1:MR3v9oLkZCTlaqljW6Eb2d3HGDGK5/bDv93jhfISFvU= -cloud.google.com/go/kms v1.23.2 h1:4IYDQL5hG4L+HzJBhzejUySoUOheh3Lk5YT4PCyyW6k= -cloud.google.com/go/kms v1.23.2/go.mod h1:rZ5kK0I7Kn9W4erhYVoIRPtpizjunlrfU4fUkumUp8g= -cloud.google.com/go/longrunning v0.7.0 h1:FV0+SYF1RIj59gyoWDRi45GiYUMM3K1qO51qoboQT1E= -cloud.google.com/go/longrunning v0.7.0/go.mod h1:ySn2yXmjbK9Ba0zsQqunhDkYi0+9rlXIwnoAf+h+TPY= +cloud.google.com/go/kms v1.25.0 h1:gVqvGGUmz0nYCmtoxWmdc1wli2L1apgP8U4fghPGSbQ= +cloud.google.com/go/kms v1.25.0/go.mod h1:XIdHkzfj0bUO3E+LvwPg+oc7s58/Ns8Nd8Sdtljihbk= +cloud.google.com/go/longrunning v0.8.0 h1:LiKK77J3bx5gDLi4SMViHixjD2ohlkwBi+mKA7EhfW8= +cloud.google.com/go/longrunning v0.8.0/go.mod h1:UmErU2Onzi+fKDg2gR7dusz11Pe26aknR4kHmJJqIfk= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= cuelabs.dev/go/oci/ociregistry v0.0.0-20250722084951-074d06050084 h1:4k1yAtPvZJZQTu8DRY8muBo0LHv6TqtrE0AO5n6IPYs= cuelabs.dev/go/oci/ociregistry v0.0.0-20250722084951-074d06050084/go.mod h1:4WWeZNxUO1vRoZWAHIG0KZOd6dA25ypyWuwD3ti0Tdc= -cuelang.org/go v0.15.3 h1:JKR/lZVwuIGlLTGIaJ0jONz9+CK3UDx06sQ6DDxNkaE= -cuelang.org/go v0.15.3/go.mod h1:NYw6n4akZcTjA7QQwJ1/gqWrrhsN4aZwhcAL0jv9rZE= +cuelang.org/go v0.15.4 h1:lrkTDhqy8dveHgX1ZLQ6WmgbhD8+rXa0fD25hxEKYhw= +cuelang.org/go v0.15.4/go.mod h1:NYw6n4akZcTjA7QQwJ1/gqWrrhsN4aZwhcAL0jv9rZE= dario.cat/mergo v1.0.1 h1:Ra4+bf83h2ztPIQYNP99R6m+Y7KfnARDfID+a+vLl4s= dario.cat/mergo v1.0.1/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= -filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA= filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4= +filippo.io/edwards25519 v1.1.1 h1:YpjwWWlNmGIDyXOn8zLzqiD+9TyIlPhGFG96P39uBpw= +filippo.io/edwards25519 v1.1.1/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4= github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 h1:bvDV9vkmnHYOMsOr4WLk+Vo07yKIzd94sVoIqshQ4bU= github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24/go.mod h1:8o94RPi1/7XTJvwPpRSzSUedZrtlirdB3r9Z20bi2f8= github.com/AdamKorcz/go-fuzz-headers-1 v0.0.0-20230919221257-8b5d3ce2d11d h1:zjqpY4C7H15HjRPEenkS4SAn3Jy2eRRjkjZbGR30TOg= @@ -47,8 +48,8 @@ github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider v0.14.0 github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/provider v0.14.0/go.mod h1:tlqp9mUGbsP+0z3Q+c0Q5MgSdq/OMwQhm5bffR3Q3ss= github.com/Azure/azure-sdk-for-go v68.0.0+incompatible h1:fcYLmCpyNYRnvJbPerq7U0hS+6+I79yEDJBqVNcqUzU= github.com/Azure/azure-sdk-for-go v68.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.20.0 h1:JXg2dwJUmPB9JmtVmdEB16APJ7jurfbY5jnfXpJoRMc= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.20.0/go.mod h1:YD5h/ldMsG0XiIw7PdyNhLxaM317eFh5yNLccNfGdyw= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.21.0 h1:fou+2+WFTib47nS+nz/ozhEBnvU96bKHy6LjRsY4E28= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.21.0/go.mod h1:t76Ruy8AHvUAC8GfMWJMa0ElSbuIcO03NLpynfbgsPA= github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.13.1 h1:Hk5QBxZQC1jb2Fwj6mpzme37xbCDdNTxU7O9eb5+LB4= github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.13.1/go.mod h1:IYus9qsFobWIc2YVwe/WPjcnyCkPKtnHAqUYeebc8z0= github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.2 h1:9iefClla7iYpfYWdzPCRDozdmndjTm8DXdpCzPajMgA= @@ -168,18 +169,18 @@ github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3d github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= github.com/aws/aws-sdk-go v1.55.8 h1:JRmEUbU52aJQZ2AjX4q4Wu7t4uZjOu71uyNmaWlUkJQ= github.com/aws/aws-sdk-go v1.55.8/go.mod h1:ZkViS9AqA6otK+JBBNH2++sx1sgxrPKcSzPPvQkUtXk= -github.com/aws/aws-sdk-go-v2 v1.41.0 h1:tNvqh1s+v0vFYdA1xq0aOJH+Y5cRyZ5upu6roPgPKd4= -github.com/aws/aws-sdk-go-v2 v1.41.0/go.mod h1:MayyLB8y+buD9hZqkCW3kX1AKq07Y5pXxtgB+rRFhz0= -github.com/aws/aws-sdk-go-v2/config v1.32.5 h1:pz3duhAfUgnxbtVhIK39PGF/AHYyrzGEyRD9Og0QrE8= -github.com/aws/aws-sdk-go-v2/config v1.32.5/go.mod h1:xmDjzSUs/d0BB7ClzYPAZMmgQdrodNjPPhd6bGASwoE= -github.com/aws/aws-sdk-go-v2/credentials v1.19.5 h1:xMo63RlqP3ZZydpJDMBsH9uJ10hgHYfQFIk1cHDXrR4= -github.com/aws/aws-sdk-go-v2/credentials v1.19.5/go.mod h1:hhbH6oRcou+LpXfA/0vPElh/e0M3aFeOblE1sssAAEk= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.16 h1:80+uETIWS1BqjnN9uJ0dBUaETh+P1XwFy5vwHwK5r9k= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.16/go.mod h1:wOOsYuxYuB/7FlnVtzeBYRcjSRtQpAW0hCP7tIULMwo= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.16 h1:rgGwPzb82iBYSvHMHXc8h9mRoOUBZIGFgKb9qniaZZc= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.16/go.mod h1:L/UxsGeKpGoIj6DxfhOWHWQ/kGKcd4I1VncE4++IyKA= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.16 h1:1jtGzuV7c82xnqOVfx2F0xmJcOw5374L7N6juGW6x6U= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.16/go.mod h1:M2E5OQf+XLe+SZGmmpaI2yy+J326aFf6/+54PoxSANc= +github.com/aws/aws-sdk-go-v2 v1.41.1 h1:ABlyEARCDLN034NhxlRUSZr4l71mh+T5KAeGh6cerhU= +github.com/aws/aws-sdk-go-v2 v1.41.1/go.mod h1:MayyLB8y+buD9hZqkCW3kX1AKq07Y5pXxtgB+rRFhz0= +github.com/aws/aws-sdk-go-v2/config v1.32.7 h1:vxUyWGUwmkQ2g19n7JY/9YL8MfAIl7bTesIUykECXmY= +github.com/aws/aws-sdk-go-v2/config v1.32.7/go.mod h1:2/Qm5vKUU/r7Y+zUk/Ptt2MDAEKAfUtKc1+3U1Mo3oY= +github.com/aws/aws-sdk-go-v2/credentials v1.19.7 h1:tHK47VqqtJxOymRrNtUXN5SP/zUTvZKeLx4tH6PGQc8= +github.com/aws/aws-sdk-go-v2/credentials v1.19.7/go.mod h1:qOZk8sPDrxhf+4Wf4oT2urYJrYt3RejHSzgAquYeppw= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.17 h1:I0GyV8wiYrP8XpA70g1HBcQO1JlQxCMTW9npl5UbDHY= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.17/go.mod h1:tyw7BOl5bBe/oqvoIeECFJjMdzXoa/dfVz3QQ5lgHGA= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.17 h1:xOLELNKGp2vsiteLsvLPwxC+mYmO6OZ8PYgiuPJzF8U= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.17/go.mod h1:5M5CI3D12dNOtH3/mk6minaRwI2/37ifCURZISxA/IQ= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.17 h1:WWLqlh79iO48yLkj1v3ISRNiv+3KdQoZ6JWyfcsyQik= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.17/go.mod h1:EhG22vHRrvF8oXSTYStZhJc1aUgKtnJe+aOiFEV90cM= github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4 h1:WKuaxf++XKWlHWu9ECbMlha8WOEGm0OUEZqm4K/Gcfk= github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4/go.mod h1:ZWy7j6v1vWGmPReu0iSGvRiise4YI5SkR3OHKTZ6Wuc= github.com/aws/aws-sdk-go-v2/service/ecr v1.51.2 h1:aq2N/9UkbEyljIQ7OFcudEgUsJzO8MYucmfsM/k/dmc= @@ -188,18 +189,18 @@ github.com/aws/aws-sdk-go-v2/service/ecrpublic v1.38.2 h1:9fe6w8bydUwNAhFVmjo+SR github.com/aws/aws-sdk-go-v2/service/ecrpublic v1.38.2/go.mod h1:x7gU4CAyAz4BsM9hlRkhHiYw2GIr1QCmN45uwQw9l/E= github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.4 h1:0ryTNEdJbzUCEWkVXEXoqlXV72J5keC1GvILMOuD00E= github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.4/go.mod h1:HQ4qwNZh32C3CBeO6iJLQlgtMzqeG17ziAA/3KDJFow= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.16 h1:oHjJHeUy0ImIV0bsrX0X91GkV5nJAyv1l1CC9lnO0TI= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.16/go.mod h1:iRSNGgOYmiYwSCXxXaKb9HfOEj40+oTKn8pTxMlYkRM= -github.com/aws/aws-sdk-go-v2/service/kms v1.49.4 h1:2gom8MohxN0SnhHZBYAC4S8jHG+ENEnXjyJ5xKe3vLc= -github.com/aws/aws-sdk-go-v2/service/kms v1.49.4/go.mod h1:HO31s0qt0lso/ADvZQyzKs8js/ku0fMHsfyXW8OPVYc= -github.com/aws/aws-sdk-go-v2/service/signin v1.0.4 h1:HpI7aMmJ+mm1wkSHIA2t5EaFFv5EFYXePW30p1EIrbQ= -github.com/aws/aws-sdk-go-v2/service/signin v1.0.4/go.mod h1:C5RdGMYGlfM0gYq/tifqgn4EbyX99V15P2V3R+VHbQU= -github.com/aws/aws-sdk-go-v2/service/sso v1.30.7 h1:eYnlt6QxnFINKzwxP5/Ucs1vkG7VT3Iezmvfgc2waUw= -github.com/aws/aws-sdk-go-v2/service/sso v1.30.7/go.mod h1:+fWt2UHSb4kS7Pu8y+BMBvJF0EWx+4H0hzNwtDNRTrg= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.12 h1:AHDr0DaHIAo8c9t1emrzAlVDFp+iMMKnPdYy6XO4MCE= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.12/go.mod h1:GQ73XawFFiWxyWXMHWfhiomvP3tXtdNar/fi8z18sx0= -github.com/aws/aws-sdk-go-v2/service/sts v1.41.5 h1:SciGFVNZ4mHdm7gpD1dgZYnCuVdX1s+lFTg4+4DOy70= -github.com/aws/aws-sdk-go-v2/service/sts v1.41.5/go.mod h1:iW40X4QBmUxdP+fZNOpfmkdMZqsovezbAeO+Ubiv2pk= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.17 h1:RuNSMoozM8oXlgLG/n6WLaFGoea7/CddrCfIiSA+xdY= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.17/go.mod h1:F2xxQ9TZz5gDWsclCtPQscGpP0VUOc8RqgFM3vDENmU= +github.com/aws/aws-sdk-go-v2/service/kms v1.49.5 h1:DKibav4XF66XSeaXcrn9GlWGHos6D/vJ4r7jsK7z5CE= +github.com/aws/aws-sdk-go-v2/service/kms v1.49.5/go.mod h1:1SdcmEGUEQE1mrU2sIgeHtcMSxHuybhPvuEPANzIDfI= +github.com/aws/aws-sdk-go-v2/service/signin v1.0.5 h1:VrhDvQib/i0lxvr3zqlUwLwJP4fpmpyD9wYG1vfSu+Y= +github.com/aws/aws-sdk-go-v2/service/signin v1.0.5/go.mod h1:k029+U8SY30/3/ras4G/Fnv/b88N4mAfliNn08Dem4M= +github.com/aws/aws-sdk-go-v2/service/sso v1.30.9 h1:v6EiMvhEYBoHABfbGB4alOYmCIrcgyPPiBE1wZAEbqk= +github.com/aws/aws-sdk-go-v2/service/sso v1.30.9/go.mod h1:yifAsgBxgJWn3ggx70A3urX2AN49Y5sJTD1UQFlfqBw= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.13 h1:gd84Omyu9JLriJVCbGApcLzVR3XtmC4ZDPcAI6Ftvds= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.13/go.mod h1:sTGThjphYE4Ohw8vJiRStAcu3rbjtXRsdNB0TvZ5wwo= +github.com/aws/aws-sdk-go-v2/service/sts v1.41.6 h1:5fFjR/ToSOzB2OQ/XqWpZBmNvmP/pJ1jOWYlFDJTjRQ= +github.com/aws/aws-sdk-go-v2/service/sts v1.41.6/go.mod h1:qgFDZQSD/Kys7nJnVqYlWKnh0SSdMjAi0uSwON4wgYQ= github.com/aws/smithy-go v1.24.0 h1:LpilSUItNPFr1eY85RYgTIg5eIEPtvFbskaFcmmIUnk= github.com/aws/smithy-go v1.24.0/go.mod h1:LEj2LM3rBRQJxPZTB4KuzZkaZYnZPnvgIhb4pu07mx0= github.com/awslabs/amazon-ecr-credential-helper/ecr-login v0.11.0 h1:GOPttfOAf5qAgx7r6b+zCWZrvCsfKffkL4H6mSYx1kA= @@ -228,8 +229,8 @@ github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b h1:otBG+dV+YK+Soembj github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b/go.mod h1:obH5gd0BsqsP2LwDJ9aOkm/6J86V6lyAXCoQWGw3K50= github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0 h1:nvj0OLI3YqYXer/kZD8Ri1aaunCxIEsOst1BVJswV0o= github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE= -github.com/buildkite/agent/v3 v3.115.2 h1:26A/dEabfzjorS3Wh/low+yOBM/u8QaT59BYWu0M92w= -github.com/buildkite/agent/v3 v3.115.2/go.mod h1:a3t090/PPxAIIPCjlXF5fhfRvG0E9huFsnMX7B76iIQ= +github.com/buildkite/agent/v3 v3.115.4 h1:oxuLAjwHADBlTZuTrTb0JPt0FBcbGo55ZqDHPJ0jn+E= +github.com/buildkite/agent/v3 v3.115.4/go.mod h1:LKY99ujcnFwX8ihEXuMLuPIy3SPL2unKWGJ/DRLICr0= github.com/buildkite/go-pipeline v0.16.0 h1:wEgWUMRAgSg1ZnWOoA3AovtYYdTvN0dLY1zwUWmPP+4= github.com/buildkite/go-pipeline v0.16.0/go.mod h1:VE37qY3X5pmAKKUMoDZvPsHOQuyakB9cmXj9Qn6QasA= github.com/buildkite/interpolate v0.1.5 h1:v2Ji3voik69UZlbfoqzx+qfcsOKLA61nHdU79VV+tPU= @@ -251,14 +252,8 @@ github.com/chai2010/gettext-go v1.0.2/go.mod h1:y+wnP2cHYaVj19NZhYKAwEMH2CI1gNHe github.com/chrismellard/docker-credential-acr-env v0.0.0-20230304212654-82a0ddb27589 h1:krfRl01rzPzxSxyLyrChD+U+MzsBXbm0OwYYB67uF+4= github.com/chrismellard/docker-credential-acr-env v0.0.0-20230304212654-82a0ddb27589/go.mod h1:OuDyvmLnMCwa2ep4Jkm6nyA0ocJuZlGyk2gGseVzERM= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= -github.com/chzyer/logex v1.2.1 h1:XHDu3E6q+gdHgsdTPH6ImJMIp436vR6MPtH8gP05QzM= -github.com/chzyer/logex v1.2.1/go.mod h1:JLbx6lG2kDbNRFnfkgvh4eRJRPX1QCoOIWomwysCBrQ= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= -github.com/chzyer/readline v1.5.1 h1:upd/6fQk4src78LMRzh5vItIt361/o4uq553V8B5sGI= -github.com/chzyer/readline v1.5.1/go.mod h1:Eh+b79XXUwfKfcPLepksvw2tcLE/Ct21YObkaSkeBlk= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= -github.com/chzyer/test v1.0.0 h1:p3BQDXSxOhOG0P9z6/hGnII4LGiEPOYBhs8asl/fC04= -github.com/chzyer/test v1.0.0/go.mod h1:2JlltgoNkt4TW/z9V/IzDdFaMTM2JPIi26O1pF38GC8= github.com/clbanning/mxj/v2 v2.5.5/go.mod h1:hNiWqW14h+kc+MdF9C6/YoRfjEJoR3ou6tn/Qo+ve2s= github.com/clbanning/mxj/v2 v2.7.0 h1:WA/La7UGCanFe5NpHF0Q3DNtnCsVoxbPKuyBNHWRyME= github.com/clbanning/mxj/v2 v2.7.0/go.mod h1:hNiWqW14h+kc+MdF9C6/YoRfjEJoR3ou6tn/Qo+ve2s= @@ -325,20 +320,14 @@ github.com/dimchansky/utfbom v1.1.1 h1:vV6w1AhK4VMnhBno/TPVCoK9U/LP0PkLCS9tbxHdi github.com/dimchansky/utfbom v1.1.1/go.mod h1:SxdoEBH5qIqFocHMyGOXVAybYJdr71b1Q/j0mACtrfE= github.com/distribution/distribution/v3 v3.0.0-20221208165359-362910506bc2 h1:aBfCb7iqHmDEIp6fBvC/hQUddQfg+3qdYjwzaiP9Hnc= github.com/distribution/distribution/v3 v3.0.0-20221208165359-362910506bc2/go.mod h1:WHNsWjnIn2V1LYOrME7e8KxSeKunYHsxEm4am0BUtcI= -github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk= -github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= github.com/dlclark/regexp2 v1.11.0 h1:G/nrcoOa7ZXlpoa/91N3X7mM3r8eIlMBBJZvsz/mxKI= github.com/dlclark/regexp2 v1.11.0/go.mod h1:DHkYz0B9wPfa6wondMfaivmHpzrQ3v9q8cnmRbL6yW8= -github.com/docker/cli v28.5.1+incompatible h1:ESutzBALAD6qyCLqbQSEf1a/U8Ybms5agw59yGVc+yY= -github.com/docker/cli v28.5.1+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= +github.com/docker/cli v29.2.0+incompatible h1:9oBd9+YM7rxjZLfyMGxjraKBKE4/nVyvVfN4qNl9XRM= +github.com/docker/cli v29.2.0+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk= github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/docker v28.5.2+incompatible h1:DBX0Y0zAjZbSrm1uzOkdr1onVghKaftjlSWt4AFexzM= -github.com/docker/docker v28.5.2+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker-credential-helpers v0.9.4 h1:76ItO69/AP/V4yT9V4uuuItG0B1N8hvt0T0c0NN/DzI= github.com/docker/docker-credential-helpers v0.9.4/go.mod h1:v1S+hepowrQXITkEfw6o4+BMbGot02wiKpzWhGUZK6c= -github.com/docker/go-connections v0.6.0 h1:LlMG9azAe1TqfR7sO+NJttz1gy6KO7VJBh+pMmjSD94= -github.com/docker/go-connections v0.6.0/go.mod h1:AahvXYshr6JgfUJGdDCs2b5EZG/vmaMAntpSFH5BFKE= github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c h1:+pKlWGMw7gf6bQ+oDZB4KHQFypsfjYlq/C4rfL7D3g8= github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= github.com/docker/go-metrics v0.0.1 h1:AgB/0SvBxihN0X8OR4SjsblXkbMvalQ8cjmtKQ2rQV8= @@ -389,8 +378,8 @@ github.com/go-gorp/gorp/v3 v3.1.0 h1:ItKF/Vbuj31dmV4jxA1qblpSwkl9g1typ24xoe70IGs github.com/go-gorp/gorp/v3 v3.1.0/go.mod h1:dLEjIyyRNiXvNZ8PSmzpt1GsWAUK8kjVhEpjH8TixEw= github.com/go-ini/ini v1.67.0 h1:z6ZrTEZqSWOTyH2FlglNbNgARyHG8oLW9gMELqKr06A= github.com/go-ini/ini v1.67.0/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= -github.com/go-jose/go-jose/v4 v4.1.3 h1:CVLmWDhDVRa6Mi/IgCgaopNosCaHz7zrMeF9MlZRkrs= -github.com/go-jose/go-jose/v4 v4.1.3/go.mod h1:x4oUasVrzR7071A4TnHLGSPpNOm2a21K9Kf04k1rs08= +github.com/go-jose/go-jose/v4 v4.1.4 h1:moDMcTHmvE6Groj34emNPLs/qtYXRVcd6S7NHbHz3kA= +github.com/go-jose/go-jose/v4 v4.1.4/go.mod h1:x4oUasVrzR7071A4TnHLGSPpNOm2a21K9Kf04k1rs08= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= @@ -550,12 +539,12 @@ github.com/google/trillian v1.7.2/go.mod h1:mfQJW4qRH6/ilABtPYNBerVJAJ/upxHLX81z github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/enterprise-certificate-proxy v0.3.9 h1:TOpi/QG8iDcZlkQlGlFUti/ZtyLkliXvHDcyUIMuFrU= -github.com/googleapis/enterprise-certificate-proxy v0.3.9/go.mod h1:MkHOF77EYAE7qfSuSS9PU6g4Nt4e11cnsDUowfwewLA= +github.com/googleapis/enterprise-certificate-proxy v0.3.11 h1:vAe81Msw+8tKUxi2Dqh/NZMz7475yUvmRIkXr4oN2ao= +github.com/googleapis/enterprise-certificate-proxy v0.3.11/go.mod h1:RFV7MUdlb7AgEq2v7FmMCfeSMCllAzWxFgRdusoGks8= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= -github.com/googleapis/gax-go/v2 v2.16.0 h1:iHbQmKLLZrexmb0OSsNGTeSTS0HO4YvFOG8g5E4Zd0Y= -github.com/googleapis/gax-go/v2 v2.16.0/go.mod h1:o1vfQjjNZn4+dPnRdl/4ZD7S9414Y4xA+a/6Icj6l14= +github.com/googleapis/gax-go/v2 v2.17.0 h1:RksgfBpxqff0EZkDWYuz9q/uWsTVz+kf43LsZ1J6SMc= +github.com/googleapis/gax-go/v2 v2.17.0/go.mod h1:mzaqghpQp4JDh3HvADwrat+6M3MOIDp5YKHhb9PAgDY= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gopherjs/gopherjs v0.0.0-20200217142428-fce0ec30dd00/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gorilla/handlers v1.5.2 h1:cLTUSsNkgcwhgRqvCNmdbRWG0A3N4F+M2nWKdScwyEE= @@ -570,8 +559,8 @@ github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 h1:+ngKgrYPPJr github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 h1:UH//fgunKIs4JdUbpDl1VZCDaL56wXCB/5+wF6uHfaI= github.com/grpc-ecosystem/go-grpc-middleware v1.4.0/go.mod h1:g5qyo/la0ALbONm6Vbp88Yd8NsDy6rZz+RcrMPxvld8= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.4 h1:kEISI/Gx67NzH3nJxAmY/dGac80kKZgZt134u7Y/k1s= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.4/go.mod h1:6Nz966r3vQYCqIzWsuEl9d7cf7mRhtDmm++sOxlnfxI= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.5 h1:jP1RStw811EvUDzsUQ9oESqw2e4RqCjSAD9qIL8eMns= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.5/go.mod h1:WXNBZ64q3+ZUemCMXD9kYnr56H7CgZxDBHCVwstfl3s= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= @@ -601,8 +590,6 @@ github.com/hashicorp/hcl v1.0.1-vault-7 h1:ag5OxFVy3QYTFTJODRzTKVZ6xvdfLLCA1cy/Y github.com/hashicorp/hcl v1.0.1-vault-7/go.mod h1:XYhtn6ijBSAj6n4YqAaf7RBPS4I06AItNorpy+MoQNM= github.com/hashicorp/vault/api v1.22.0 h1:+HYFquE35/B74fHoIeXlZIP2YADVboaPjaSicHEZiH0= github.com/hashicorp/vault/api v1.22.0/go.mod h1:IUZA2cDvr4Ok3+NtK2Oq/r+lJeXkeCrHRmqdyWfpmGM= -github.com/hauler-dev/cosign/v3 v3.0.5-0.20260212234448-00b85d677dfc h1:heoWkq5ahyHmVyBVtLLPH9JTNVoUr1gBW91yFopk5yQ= -github.com/hauler-dev/cosign/v3 v3.0.5-0.20260212234448-00b85d677dfc/go.mod h1:DJY5LPzHiI6bWpG/Q/NQUTfeASjkN8TDAUx1Nnt3I0I= github.com/howeyc/gopass v0.0.0-20210920133722-c8aef6fb66ef h1:A9HsByNhogrvm9cWb28sjiS3i7tcKCkflWFEkHfuAgM= github.com/howeyc/gopass v0.0.0-20210920133722-c8aef6fb66ef/go.mod h1:lADxMC39cJJqL93Duh1xhAs4I2Zs8mKS89XWXFGp9cs= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= @@ -688,8 +675,6 @@ github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw= github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de h1:9TO3cAIGXtEhnIaL+V+BEER86oLrvS+kWobKpbJuye0= github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9B/r2mtpb6U+EI2rYA5OAXxsYw6wTamcNW+zcE= -github.com/manifoldco/promptui v0.9.0 h1:3V4HzJk1TtXW1MTZMP7mdlwbBpIinw3HztaIlYthEiA= -github.com/manifoldco/promptui v0.9.0/go.mod h1:ka04sppxSGFAtxX0qhlYQjISsg9mR4GWtQEhdbn6Pgg= github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE= github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8= @@ -708,8 +693,8 @@ github.com/mholt/archives v0.1.5/go.mod h1:3TPMmBLPsgszL+1As5zECTuKwKvIfj6YcwWPp github.com/miekg/dns v1.1.61 h1:nLxbwF3XxhwVSm8g9Dghm9MHPaUZuqhPiGL+675ZmEs= github.com/miekg/dns v1.1.61/go.mod h1:mnAarhS3nWaW+NVP2wTkYVIZyHNJ098SJZUki3eykwQ= github.com/miekg/pkcs11 v1.0.3-0.20190429190417-a667d056470f/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= -github.com/miekg/pkcs11 v1.1.1 h1:Ugu9pdy6vAYku5DEpVWVFPYnzV+bxB+iRdbuFSu7TvU= -github.com/miekg/pkcs11 v1.1.1/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= +github.com/miekg/pkcs11 v1.1.2 h1:/VxmeAX5qU6Q3EwafypogwWbYryHFmF2RpkJmw3m4MQ= +github.com/miekg/pkcs11 v1.1.2/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= github.com/mikelolasagasti/xz v1.0.1 h1:Q2F2jX0RYJUG3+WsM+FJknv+6eVjsjXNDV0KJXZzkD0= github.com/mikelolasagasti/xz v1.0.1/go.mod h1:muAirjiOUxPRXwm9HdDtB3uoRPrGnL85XHtokL9Hcgc= github.com/minio/minlz v1.0.1 h1:OUZUzXcib8diiX+JYxyRLIdomyZYzHct6EShOKtQY2A= @@ -790,8 +775,8 @@ github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAl github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro= github.com/onsi/gomega v1.38.2 h1:eZCjf2xjZAqe+LeWvKb5weQ+NcPwX84kqJ0cZNxok2A= github.com/onsi/gomega v1.38.2/go.mod h1:W2MJcYxRGV63b418Ai34Ud0hEdTVXq9NW9+Sx6uXf3k= -github.com/open-policy-agent/opa v1.12.1 h1:MWfmXuXB119O7rSOJ5GdKAaW15yBirjnLkFRBGy0EX0= -github.com/open-policy-agent/opa v1.12.1/go.mod h1:RnDgm04GA1RjEXJvrsG9uNT/+FyBNmozcPvA2qz60M4= +github.com/open-policy-agent/opa v1.12.3 h1:qe3m/w52baKC/HJtippw+hYBUKCzuBCPjB+D5P9knfc= +github.com/open-policy-agent/opa v1.12.3/go.mod h1:RnDgm04GA1RjEXJvrsG9uNT/+FyBNmozcPvA2qz60M4= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.1.1 h1:y0fUlFfIZhPF1W537XOLg0/fcx6zcHCJwooC2xJA040= @@ -834,8 +819,8 @@ github.com/prometheus/common v0.67.5/go.mod h1:SjE/0MzDEEAyrdr5Gqc6G+sXI67maCxza github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= -github.com/prometheus/procfs v0.17.0 h1:FuLQ+05u4ZI+SS/w9+BWEM2TXiHKsUQ9TADiRH7DuK0= -github.com/prometheus/procfs v0.17.0/go.mod h1:oPQLaDAMRbA+u8H5Pbfq+dl3VDAvHxMUOVhe0wYB2zw= +github.com/prometheus/procfs v0.19.2 h1:zUMhqEW66Ex7OXIiDkll3tl9a1ZdilUOd/F6ZXw4Vws= +github.com/prometheus/procfs v0.19.2/go.mod h1:M0aotyiemPhBCM0z5w87kL22CxfcH05ZpYlu+b4J7mw= github.com/protocolbuffers/txtpbfmt v0.0.0-20251016062345-16587c79cd91 h1:s1LvMaU6mVwoFtbxv/rCZKE7/fwDmDY684FfUe4c1Io= github.com/protocolbuffers/txtpbfmt v0.0.0-20251016062345-16587c79cd91/go.mod h1:JSbkp0BviKovYYt9XunS95M3mLPibE9bGg+Y95DsEEY= github.com/rcrowley/go-metrics v0.0.0-20250401214520-65e299d6c5c9 h1:bsUq1dX0N8AOIL7EB/X911+m4EHsnWEHeJ0c+3TTBrg= @@ -871,26 +856,28 @@ github.com/shibumi/go-pathspec v1.3.0 h1:QUyMZhFo0Md5B8zV8x2tesohbb5kfbpTi9rBnKh github.com/shibumi/go-pathspec v1.3.0/go.mod h1:Xutfslp817l2I1cZvgcfeMQJG5QnU2lh5tVaaMCl3jE= github.com/shopspring/decimal v1.4.0 h1:bxl37RwXBklmTi0C79JfXCEBD1cqqHt0bbgBAGFp81k= github.com/shopspring/decimal v1.4.0/go.mod h1:gawqmDU56v4yIKSwfBSFip1HdCCXN8/+DMd9qYNcwME= +github.com/sigstore/cosign/v3 v3.0.5 h1:c1zPqjU+H4wmirgysC+AkWMg7a7fykyOYF/m+F1150I= +github.com/sigstore/cosign/v3 v3.0.5/go.mod h1:ble1vMvJagCFyTIDkibCq6MIHiWDw00JNYl0f9rB4T4= github.com/sigstore/fulcio v1.8.5 h1:HYTD1/L5wlBp8JxsWxUf8hmfaNBBF/x3r3p5l6tZwbA= github.com/sigstore/fulcio v1.8.5/go.mod h1:tSLYK3JsKvJpDW1BsIsVHZgHj+f8TjXARzqIUWSsSPQ= github.com/sigstore/protobuf-specs v0.5.0 h1:F8YTI65xOHw70NrvPwJ5PhAzsvTnuJMGLkA4FIkofAY= github.com/sigstore/protobuf-specs v0.5.0/go.mod h1:+gXR+38nIa2oEupqDdzg4qSBT0Os+sP7oYv6alWewWc= github.com/sigstore/rekor v1.5.0 h1:rL7SghHd5HLCtsCrxw0yQg+NczGvM75EjSPPWuGjaiQ= github.com/sigstore/rekor v1.5.0/go.mod h1:D7JoVCUkxwQOpPDNYeu+CE8zeBC18Y5uDo6tF8s2rcQ= -github.com/sigstore/rekor-tiles/v2 v2.0.1 h1:1Wfz15oSRNGF5Dzb0lWn5W8+lfO50ork4PGIfEKjZeo= -github.com/sigstore/rekor-tiles/v2 v2.0.1/go.mod h1:Pjsbhzj5hc3MKY8FfVTYHBUHQEnP0ozC4huatu4x7OU= +github.com/sigstore/rekor-tiles/v2 v2.2.0 h1:QwJNwxT+k5A3id+Hrg+8vYcNsTaB0Sj51xjfW2rKyAs= +github.com/sigstore/rekor-tiles/v2 v2.2.0/go.mod h1:/WNRYctHKdxcjgXydYwO5OclW72Zqh6fNHSyGE8zQOE= github.com/sigstore/sigstore v1.10.4 h1:ytOmxMgLdcUed3w1SbbZOgcxqwMG61lh1TmZLN+WeZE= github.com/sigstore/sigstore v1.10.4/go.mod h1:tDiyrdOref3q6qJxm2G+JHghqfmvifB7hw+EReAfnbI= github.com/sigstore/sigstore-go v1.1.4 h1:wTTsgCHOfqiEzVyBYA6mDczGtBkN7cM8mPpjJj5QvMg= github.com/sigstore/sigstore-go v1.1.4/go.mod h1:2U/mQOT9cjjxrtIUeKDVhL+sHBKsnWddn8URlswdBsg= -github.com/sigstore/sigstore/pkg/signature/kms/aws v1.10.3 h1:D/FRl5J9UYAJPGZRAJbP0dH78pfwWnKsyCSBwFBU8CI= -github.com/sigstore/sigstore/pkg/signature/kms/aws v1.10.3/go.mod h1:2GIWuNvTRMvrzd0Nl8RNqxrt9H7X0OBStwOSzGYRjYw= -github.com/sigstore/sigstore/pkg/signature/kms/azure v1.10.3 h1:k5VMLf/ms7hh6MLgVoorM0K+hSMwZLXoywlxh4CXqP8= -github.com/sigstore/sigstore/pkg/signature/kms/azure v1.10.3/go.mod h1:S1Bp3dmP7jYlXcGLAxG81wRbE01NIZING8ZIy0dJlAI= -github.com/sigstore/sigstore/pkg/signature/kms/gcp v1.10.3 h1:AVWs0E6rVZMoDTE0Iyezrpo1J6RlI5B4QZhAC4FLE30= -github.com/sigstore/sigstore/pkg/signature/kms/gcp v1.10.3/go.mod h1:nxQYF0D6u7mVtiP1azj1YVDIrtz7S0RYCVTqUG8IcCk= -github.com/sigstore/sigstore/pkg/signature/kms/hashivault v1.10.3 h1:lJSdaC/aOlFHlvqmmV696n1HdXLMLEKGwpNZMV0sKts= -github.com/sigstore/sigstore/pkg/signature/kms/hashivault v1.10.3/go.mod h1:b2rV9qPbt/jv/Yy75AIOZThP8j+pe1ZdLEjOwmjPdoA= +github.com/sigstore/sigstore/pkg/signature/kms/aws v1.10.4 h1:VZ+L6SKVWbLPHznIF0tBuO7qKMFdJiJMVwFKu9DlY5o= +github.com/sigstore/sigstore/pkg/signature/kms/aws v1.10.4/go.mod h1:Rstj47WpJym25il8j4jTL0BfikzP/9AhVD+DsBcYzZc= +github.com/sigstore/sigstore/pkg/signature/kms/azure v1.10.4 h1:G7yOv8bxk3zIEEZyVCixPxtePIAm+t3ZWSaKRPzVw+o= +github.com/sigstore/sigstore/pkg/signature/kms/azure v1.10.4/go.mod h1:hxJelB/bRItMYOzi6qD9xEKjse2QZcikh4TbysfdDHc= +github.com/sigstore/sigstore/pkg/signature/kms/gcp v1.10.4 h1:Qxt6dE4IwhJ6gIXmg2q4S/SeqEDSZ29nmfsv7Zb6LL4= +github.com/sigstore/sigstore/pkg/signature/kms/gcp v1.10.4/go.mod h1:hJVeNOwarqfyALjOwsf0OR8YA/A96NABucEaQumPr30= +github.com/sigstore/sigstore/pkg/signature/kms/hashivault v1.10.4 h1:KVavYMPfSf5NryOl6VrZ9nRG3fXOOJOPp7Czk/YCPkM= +github.com/sigstore/sigstore/pkg/signature/kms/hashivault v1.10.4/go.mod h1:J7CA1AaBkyK8dYq6EdQANhj+8oEcsA7PrIp088qgPiY= github.com/sigstore/timestamp-authority/v2 v2.0.4 h1:65IBa4LUeFWDQu9hiTt5lBpi/F5jonJWZtH6VLn4InU= github.com/sigstore/timestamp-authority/v2 v2.0.4/go.mod h1:EXJLiMDBqRPlzC02hPiFSiYTCqSuUpU68a4vr0DFePM= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= @@ -975,8 +962,6 @@ github.com/vbatts/tar-split v0.12.2 h1:w/Y6tjxpeiFMR47yzZPlPj/FcPLpXbTUi/9H7d3CP github.com/vbatts/tar-split v0.12.2/go.mod h1:eF6B6i6ftWQcDqEn3/iGFRFRo8cBIMSJVOpnNdfTMFA= github.com/vektah/gqlparser/v2 v2.5.31 h1:YhWGA1mfTjID7qJhd1+Vxhpk5HTgydrGU9IgkWBTJ7k= github.com/vektah/gqlparser/v2 v2.5.31/go.mod h1:c1I28gSOVNzlfc4WuDlqU7voQnsqI6OG2amkBAFmgts= -github.com/withfig/autocomplete-tools/integrations/cobra v1.2.1 h1:+dBg5k7nuTE38VVdoroRsT0Z88fmvdYrI2EjzJst35I= -github.com/withfig/autocomplete-tools/integrations/cobra v1.2.1/go.mod h1:nmuySobZb4kFgFy6BptpXp/BBw+xFSyvVPP6auoJB4k= github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo= @@ -1011,8 +996,8 @@ github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f h1 github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f/go.mod h1:GlGEuHIJweS1mbCqG+7vt2nvWLzLLnRHbXz5JKd/Qbg= github.com/zalando/go-keyring v0.2.6 h1:r7Yc3+H+Ux0+M72zacZoItR3UDxeWfKTcabvkI8ua9s= github.com/zalando/go-keyring v0.2.6/go.mod h1:2TCrxYrbUNYfNS/Kgy/LSrkSQzZ5UPVH85RwfczwvcI= -gitlab.com/gitlab-org/api/client-go v1.11.0 h1:L+qzw4kiCf3jKdKHQAwiqYKITvzBrW/tl8ampxNLlv0= -gitlab.com/gitlab-org/api/client-go v1.11.0/go.mod h1:adtVJ4zSTEJ2fP5Pb1zF4Ox1OKFg0MH43yxpb0T0248= +gitlab.com/gitlab-org/api/client-go v1.25.0 h1:9YVk2o1CjZWKh2/KGOsNbOReBSxFIdBv6LrdOnBfEQY= +gitlab.com/gitlab-org/api/client-go v1.25.0/go.mod h1:r060AandE8Md/L5oKdUVjljL8YQprOAxKzUnpqWqP3A= go.mongodb.org/mongo-driver v1.17.6 h1:87JUG1wZfWsr6rIz3ZmpH90rL5tea7O3IHuSwHUpsss= go.mongodb.org/mongo-driver v1.17.6/go.mod h1:Hy04i7O2kC4RS06ZrhPRqj/u4DTYkFDAAccj+rVKqgQ= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= @@ -1027,26 +1012,26 @@ go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.6 go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.63.0/go.mod h1:fvPi2qXDqFs8M4B4fmJhE92TyQs9Ydjlg3RvfUp+NbQ= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0 h1:RbKq8BG0FI8OiXhBfcRtqqHcZcka+gU3cskNuf05R18= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0/go.mod h1:h06DGIukJOevXaj/xrNjhi/2098RZzcLTbc0jDAUbsg= -go.opentelemetry.io/otel v1.39.0 h1:8yPrr/S0ND9QEfTfdP9V+SiwT4E0G7Y5MO7p85nis48= -go.opentelemetry.io/otel v1.39.0/go.mod h1:kLlFTywNWrFyEdH0oj2xK0bFYZtHRYUdv1NklR/tgc8= +go.opentelemetry.io/otel v1.40.0 h1:oA5YeOcpRTXq6NN7frwmwFR0Cn3RhTVZvXsP4duvCms= +go.opentelemetry.io/otel v1.40.0/go.mod h1:IMb+uXZUKkMXdPddhwAHm6UfOwJyh4ct1ybIlV14J0g= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.39.0 h1:f0cb2XPmrqn4XMy9PNliTgRKJgS5WcL/u0/WRYGz4t0= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.39.0/go.mod h1:vnakAaFckOMiMtOIhFI2MNH4FYrZzXCYxmb1LlhoGz8= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.39.0 h1:in9O8ESIOlwJAEGTkkf34DesGRAc/Pn8qJ7k3r/42LM= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.39.0/go.mod h1:Rp0EXBm5tfnv0WL+ARyO/PHBEaEAT8UUHQ6AGJcSq6c= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.39.0 h1:Ckwye2FpXkYgiHX7fyVrN1uA/UYd9ounqqTuSNAv0k4= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.39.0/go.mod h1:teIFJh5pW2y+AN7riv6IBPX2DuesS3HgP39mwOspKwU= -go.opentelemetry.io/otel/metric v1.39.0 h1:d1UzonvEZriVfpNKEVmHXbdf909uGTOQjA0HF0Ls5Q0= -go.opentelemetry.io/otel/metric v1.39.0/go.mod h1:jrZSWL33sD7bBxg1xjrqyDjnuzTUB0x1nBERXd7Ftcs= -go.opentelemetry.io/otel/sdk v1.39.0 h1:nMLYcjVsvdui1B/4FRkwjzoRVsMK8uL/cj0OyhKzt18= -go.opentelemetry.io/otel/sdk v1.39.0/go.mod h1:vDojkC4/jsTJsE+kh+LXYQlbL8CgrEcwmt1ENZszdJE= -go.opentelemetry.io/otel/sdk/metric v1.39.0 h1:cXMVVFVgsIf2YL6QkRF4Urbr/aMInf+2WKg+sEJTtB8= -go.opentelemetry.io/otel/sdk/metric v1.39.0/go.mod h1:xq9HEVH7qeX69/JnwEfp6fVq5wosJsY1mt4lLfYdVew= -go.opentelemetry.io/otel/trace v1.39.0 h1:2d2vfpEDmCJ5zVYz7ijaJdOF59xLomrvj7bjt6/qCJI= -go.opentelemetry.io/otel/trace v1.39.0/go.mod h1:88w4/PnZSazkGzz/w84VHpQafiU4EtqqlVdxWy+rNOA= +go.opentelemetry.io/otel/metric v1.40.0 h1:rcZe317KPftE2rstWIBitCdVp89A2HqjkxR3c11+p9g= +go.opentelemetry.io/otel/metric v1.40.0/go.mod h1:ib/crwQH7N3r5kfiBZQbwrTge743UDc7DTFVZrrXnqc= +go.opentelemetry.io/otel/sdk v1.40.0 h1:KHW/jUzgo6wsPh9At46+h4upjtccTmuZCFAc9OJ71f8= +go.opentelemetry.io/otel/sdk v1.40.0/go.mod h1:Ph7EFdYvxq72Y8Li9q8KebuYUr2KoeyHx0DRMKrYBUE= +go.opentelemetry.io/otel/sdk/metric v1.40.0 h1:mtmdVqgQkeRxHgRv4qhyJduP3fYJRMX4AtAlbuWdCYw= +go.opentelemetry.io/otel/sdk/metric v1.40.0/go.mod h1:4Z2bGMf0KSK3uRjlczMOeMhKU2rhUqdWNoKcYrtcBPg= +go.opentelemetry.io/otel/trace v1.40.0 h1:WA4etStDttCSYuhwvEa8OP8I5EWu24lkOzp+ZYblVjw= +go.opentelemetry.io/otel/trace v1.40.0/go.mod h1:zeAhriXecNGP/s2SEG3+Y8X9ujcJOTqQ5RgdEJcawiA= go.opentelemetry.io/proto/otlp v1.9.0 h1:l706jCMITVouPOqEnii2fIAuO3IVGBRPV5ICjceRb/A= go.opentelemetry.io/proto/otlp v1.9.0/go.mod h1:xE+Cx5E/eEHw+ISFkwPLwCZefwVjY+pqKg1qcK03+/4= -go.step.sm/crypto v0.75.0 h1:UAHYD6q6ggYyzLlIKHv1MCUVjZIesXRZpGTlRC/HSHw= -go.step.sm/crypto v0.75.0/go.mod h1:wwQ57+ajmDype9mrI/2hRyrvJd7yja5xVgWYqpUN3PE= +go.step.sm/crypto v0.76.0 h1:K23BSaeoiY7Y5dvvijTeYC9EduDBetNwQYMBwMhi1aA= +go.step.sm/crypto v0.76.0/go.mod h1:PXYJdKkK8s+GHLwLguFaLxHNAFsFL3tL1vSBrYfey5k= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= @@ -1074,8 +1059,8 @@ golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0 golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= golang.org/x/crypto v0.10.0/go.mod h1:o4eNf7Ede1fv+hwOwZsTHl9EsPFO6q6ZvYR8vYfY45I= golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= -golang.org/x/crypto v0.46.0 h1:cKRW/pmt1pKAfetfu+RCEvjvZkA9RimPbh7bhFjGVBU= -golang.org/x/crypto v0.46.0/go.mod h1:Evb/oLKmMraqjZ2iQTwDwvCtJkczlDuTmdJXoZVzqU0= +golang.org/x/crypto v0.47.0 h1:V6e3FRj+n4dbpw86FJ8Fv7XVOql7TEwpHapKoMJ/GO8= +golang.org/x/crypto v0.47.0/go.mod h1:ff3Y9VzzKbwSSEzWqJsJVBnWmRwRSHt/6Op5n9bQc4A= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -1084,8 +1069,6 @@ golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= -golang.org/x/exp v0.0.0-20250813145105-42675adae3e6 h1:SbTAbRFnd5kjQXbczszQ0hdk3ctwYf3qBNH9jIsGclE= -golang.org/x/exp v0.0.0-20250813145105-42675adae3e6/go.mod h1:4QTo5u+SEIbbKW1RacMZq1YEfOBqeXa19JeshGi+zc4= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -1106,8 +1089,8 @@ golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.31.0 h1:HaW9xtz0+kOcWKwli0ZXy79Ix+UW/vOfmWI5QVd2tgI= -golang.org/x/mod v0.31.0/go.mod h1:43JraMp9cGx1Rx3AqioxrbrhNsLl2l/iNAvuBkrezpg= +golang.org/x/mod v0.32.0 h1:9F4d3PHLljb6x//jOyokMv3eX+YDeepZSEo3mFJy93c= +golang.org/x/mod v0.32.0/go.mod h1:SgipZ/3h2Ci89DlEtEXWUk/HteuRin+HHhN+WbNhguU= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -1140,15 +1123,15 @@ golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/net v0.11.0/go.mod h1:2L/ixqYpgIVXmeoSA/4Lu7BzTG4KIyPIryS4IsOd1oQ= golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= -golang.org/x/net v0.48.0 h1:zyQRTTrjc33Lhh0fBgT/H3oZq9WuvRR5gPC70xpDiQU= -golang.org/x/net v0.48.0/go.mod h1:+ndRgGjkh8FGtu1w1FGbEC31if4VrNVMuKTgcAAnQRY= +golang.org/x/net v0.49.0 h1:eeHFmOGUTtaaPSGNmjBKpbng9MulQsJURQUAfUwY++o= +golang.org/x/net v0.49.0/go.mod h1:/ysNB2EvaqvesRkuLAyjI1ycPZlQHM3q01F02UY/MV8= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.34.0 h1:hqK/t4AKgbqWkdkcAeI8XLmbK+4m4G5YeQRrmiotGlw= -golang.org/x/oauth2 v0.34.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA= +golang.org/x/oauth2 v0.35.0 h1:Mv2mzuHuZuY2+bkyWXIHMfhNdJAdwW3FuWeCPYN5GVQ= +golang.org/x/oauth2 v0.35.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -1166,7 +1149,6 @@ golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1191,7 +1173,6 @@ golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220310020820-b874c991c1a5/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -1203,16 +1184,16 @@ golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.9.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.39.0 h1:CvCKL8MeisomCi6qNZ+wbb0DN9E5AATixKsvNtMoMFk= -golang.org/x/sys v0.39.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= +golang.org/x/sys v0.40.0 h1:DBZZqJ2Rkml6QMQsZywtnjnnGvHza6BTfYFWY9kjEWQ= +golang.org/x/sys v0.40.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= golang.org/x/term v0.9.0/go.mod h1:M6DEAAIenWoTxdKrOltXcmDY3rSplQUkrvaDU5FcQyo= golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= -golang.org/x/term v0.38.0 h1:PQ5pkm/rLO6HnxFR7N2lJHOZX6Kez5Y1gDSJla6jo7Q= -golang.org/x/term v0.38.0/go.mod h1:bSEAKrOT1W+VSu9TSCMtoGEOUcKxOKgl3LE5QEF/xVg= +golang.org/x/term v0.39.0 h1:RclSuaJf32jOqZz74CkPA9qFuVTX7vhLlpfj/IGWlqY= +golang.org/x/term v0.39.0/go.mod h1:yxzUCTP/U+FzoxfdKmLaA0RV1WgE0VY7hXBwKtY/4ww= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -1224,8 +1205,8 @@ golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.10.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= -golang.org/x/text v0.32.0 h1:ZD01bjUt1FQ9WJ0ClOL5vxgxOI/sVCNgX1YtKwcY0mU= -golang.org/x/text v0.32.0/go.mod h1:o/rUWzghvpD5TXrTIBuJU77MTaN0ljMWE47kxGJQ7jY= +golang.org/x/text v0.33.0 h1:B3njUFyqtHDUI5jMn1YIr5B0IE2U0qck04r6d4KPAxE= +golang.org/x/text v0.33.0/go.mod h1:LuMebE6+rBincTi9+xWTY8TztLzKHc/9C1uBCG27+q8= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.14.0 h1:MRx4UaLrDotUKUdCIqzPC48t1Y9hANFKIRpNx+Te8PI= @@ -1278,8 +1259,8 @@ google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsb google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.260.0 h1:XbNi5E6bOVEj/uLXQRlt6TKuEzMD7zvW/6tNwltE4P4= -google.golang.org/api v0.260.0/go.mod h1:Shj1j0Phr/9sloYrKomICzdYgsSDImpTxME8rGLaZ/o= +google.golang.org/api v0.267.0 h1:w+vfWPMPYeRs8qH1aYYsFX68jMls5acWl/jocfLomwE= +google.golang.org/api v0.267.0/go.mod h1:Jzc0+ZfLnyvXma3UtaTl023TdhZu6OMBP9tJ+0EmFD0= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -1298,12 +1279,12 @@ google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvx google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20251202230838-ff82c1b0f217 h1:GvESR9BIyHUahIb0NcTum6itIWtdoglGX+rnGxm2934= -google.golang.org/genproto v0.0.0-20251202230838-ff82c1b0f217/go.mod h1:yJ2HH4EHEDTd3JiLmhds6NkJ17ITVYOdV3m3VKOnws0= -google.golang.org/genproto/googleapis/api v0.0.0-20251222181119-0a764e51fe1b h1:uA40e2M6fYRBf0+8uN5mLlqUtV192iiksiICIBkYJ1E= -google.golang.org/genproto/googleapis/api v0.0.0-20251222181119-0a764e51fe1b/go.mod h1:Xa7le7qx2vmqB/SzWUBa7KdMjpdpAHlh5QCSnjessQk= -google.golang.org/genproto/googleapis/rpc v0.0.0-20251222181119-0a764e51fe1b h1:Mv8VFug0MP9e5vUxfBcE3vUkV6CImK3cMNMIDFjmzxU= -google.golang.org/genproto/googleapis/rpc v0.0.0-20251222181119-0a764e51fe1b/go.mod h1:j9x/tPzZkyxcgEFkiKEEGxfvyumM01BEtsW8xzOahRQ= +google.golang.org/genproto v0.0.0-20260128011058-8636f8732409 h1:VQZ/yAbAtjkHgH80teYd2em3xtIkkHd7ZhqfH2N9CsM= +google.golang.org/genproto v0.0.0-20260128011058-8636f8732409/go.mod h1:rxKD3IEILWEu3P44seeNOAwZN4SaoKaQ/2eTg4mM6EM= +google.golang.org/genproto/googleapis/api v0.0.0-20260128011058-8636f8732409 h1:merA0rdPeUV3YIIfHHcH4qBkiQAc1nfCKSI7lB4cV2M= +google.golang.org/genproto/googleapis/api v0.0.0-20260128011058-8636f8732409/go.mod h1:fl8J1IvUjCilwZzQowmw2b7HQB2eAuYBabMXzWurF+I= +google.golang.org/genproto/googleapis/rpc v0.0.0-20260203192932-546029d2fa20 h1:Jr5R2J6F6qWyzINc+4AM8t5pfUz6beZpHp678GNrMbE= +google.golang.org/genproto/googleapis/rpc v0.0.0-20260203192932-546029d2fa20/go.mod h1:j9x/tPzZkyxcgEFkiKEEGxfvyumM01BEtsW8xzOahRQ= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= @@ -1313,8 +1294,8 @@ google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8 google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.78.0 h1:K1XZG/yGDJnzMdd/uZHAkVqJE+xIDOcmdSFZkBUicNc= -google.golang.org/grpc v1.78.0/go.mod h1:I47qjTo4OKbMkjA/aOOwxDIiPSBofUtQUI5EfpWvW7U= +google.golang.org/grpc v1.79.3 h1:sybAEdRIEtvcD68Gx7dmnwjZKlyfuc61Dyo9pGXXkKE= +google.golang.org/grpc v1.79.3/go.mod h1:KmT0Kjez+0dde/v2j9vzwoAScgEPx/Bw1CYChhHLrHQ= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -1361,18 +1342,18 @@ honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= -k8s.io/api v0.35.0 h1:iBAU5LTyBI9vw3L5glmat1njFK34srdLmktWwLTprlY= -k8s.io/api v0.35.0/go.mod h1:AQ0SNTzm4ZAczM03QH42c7l3bih1TbAXYo0DkF8ktnA= +k8s.io/api v0.35.1 h1:0PO/1FhlK/EQNVK5+txc4FuhQibV25VLSdLMmGpDE/Q= +k8s.io/api v0.35.1/go.mod h1:28uR9xlXWml9eT0uaGo6y71xK86JBELShLy4wR1XtxM= k8s.io/apiextensions-apiserver v0.34.0 h1:B3hiB32jV7BcyKcMU5fDaDxk882YrJ1KU+ZSkA9Qxoc= k8s.io/apiextensions-apiserver v0.34.0/go.mod h1:hLI4GxE1BDBy9adJKxUxCEHBGZtGfIg98Q+JmTD7+g0= -k8s.io/apimachinery v0.35.0 h1:Z2L3IHvPVv/MJ7xRxHEtk6GoJElaAqDCCU0S6ncYok8= -k8s.io/apimachinery v0.35.0/go.mod h1:jQCgFZFR1F4Ik7hvr2g84RTJSZegBc8yHgFWKn//hns= +k8s.io/apimachinery v0.35.1 h1:yxO6gV555P1YV0SANtnTjXYfiivaTPvCTKX6w6qdDsU= +k8s.io/apimachinery v0.35.1/go.mod h1:jQCgFZFR1F4Ik7hvr2g84RTJSZegBc8yHgFWKn//hns= k8s.io/apiserver v0.34.0 h1:Z51fw1iGMqN7uJ1kEaynf2Aec1Y774PqU+FVWCFV3Jg= k8s.io/apiserver v0.34.0/go.mod h1:52ti5YhxAvewmmpVRqlASvaqxt0gKJxvCeW7ZrwgazQ= k8s.io/cli-runtime v0.34.0 h1:N2/rUlJg6TMEBgtQ3SDRJwa8XyKUizwjlOknT1mB2Cw= k8s.io/cli-runtime v0.34.0/go.mod h1:t/skRecS73Piv+J+FmWIQA2N2/rDjdYSQzEE67LUUs8= -k8s.io/client-go v0.35.0 h1:IAW0ifFbfQQwQmga0UdoH0yvdqrbwMdq9vIFEhRpxBE= -k8s.io/client-go v0.35.0/go.mod h1:q2E5AAyqcbeLGPdoRB+Nxe3KYTfPce1Dnu1myQdqz9o= +k8s.io/client-go v0.35.1 h1:+eSfZHwuo/I19PaSxqumjqZ9l5XiTEKbIaJ+j1wLcLM= +k8s.io/client-go v0.35.1/go.mod h1:1p1KxDt3a0ruRfc/pG4qT/3oHmUj1AhSHEcxNSGg+OA= k8s.io/component-base v0.34.0 h1:bS8Ua3zlJzapklsB1dZgjEJuJEeHjj8yTu1gxE2zQX8= k8s.io/component-base v0.34.0/go.mod h1:RSCqUdvIjjrEm81epPcjQ/DS+49fADvGSCkIP3IC6vg= k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= @@ -1383,8 +1364,6 @@ k8s.io/kubectl v0.34.0 h1:NcXz4TPTaUwhiX4LU+6r6udrlm0NsVnSkP3R9t0dmxs= k8s.io/kubectl v0.34.0/go.mod h1:bmd0W5i+HuG7/p5sqicr0Li0rR2iIhXL0oUyLF3OjR4= k8s.io/utils v0.0.0-20251002143259-bc988d571ff4 h1:SjGebBtkBqHFOli+05xYbK8YF1Dzkbzn+gDM4X9T4Ck= k8s.io/utils v0.0.0-20251002143259-bc988d571ff4/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= -oras.land/oras-go v1.2.7 h1:KF9rBAtKYMGB5gjgHV5XquUfYDER3ecQBEXjdI7KZWI= -oras.land/oras-go v1.2.7/go.mod h1:WVpIPbm82xjWT/GJU3TqZ0y9Ctj3DGco4wLYvGdOVvA= oras.land/oras-go/v2 v2.6.0 h1:X4ELRsiGkrbeox69+9tzTu492FMUu7zJQW6eJU+I2oc= oras.land/oras-go/v2 v2.6.0/go.mod h1:magiQDfG6H1O9APp+rOsvCPcW1GD2MM7vgnKY0Y+u1o= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= diff --git a/internal/flags/add.go b/internal/flags/add.go index 92089a9..b927866 100644 --- a/internal/flags/add.go +++ b/internal/flags/add.go @@ -17,6 +17,7 @@ type AddImageOpts struct { Tlog bool Platform string Rewrite string + ExcludeExtras bool } func (o *AddImageOpts) AddFlags(cmd *cobra.Command) { @@ -27,9 +28,10 @@ func (o *AddImageOpts) AddFlags(cmd *cobra.Command) { f.StringVar(&o.CertOidcIssuer, "certificate-oidc-issuer", "", "(Optional) Cosign option to validate oidc issuer") f.StringVar(&o.CertOidcIssuerRegexp, "certificate-oidc-issuer-regexp", "", "(Optional) Cosign option to validate oidc issuer with regex") f.StringVar(&o.CertGithubWorkflowRepository, "certificate-github-workflow-repository", "", "(Optional) Cosign certificate-github-workflow-repository option") - f.BoolVar(&o.Tlog, "use-tlog-verify", false, "(Optional) Allow transparency log verification (defaults to false)") + f.BoolVar(&o.Tlog, "use-tlog-verify", false, "(Optional) Enable transparency log verification for key-based signature verification (keyless/OIDC verification always uses the tlog)") f.StringVarP(&o.Platform, "platform", "p", "", "(Optional) Specify the platform of the image... i.e. linux/amd64 (defaults to all)") f.StringVar(&o.Rewrite, "rewrite", "", "(EXPERIMENTAL & Optional) Rewrite artifact path to specified string") + f.BoolVar(&o.ExcludeExtras, "exclude-extras", false, "(Optional) Exclude cosign signatures, attestations, SBOMs, and OCI referrers when pulling the image") } type AddFileOpts struct { @@ -49,6 +51,7 @@ type AddChartOpts struct { Rewrite string AddDependencies bool AddImages bool + ExcludeExtras bool HelmValues string Platform string Registry string @@ -74,6 +77,7 @@ func (o *AddChartOpts) AddFlags(cmd *cobra.Command) { cmd.Flags().BoolVar(&o.AddDependencies, "add-dependencies", false, "(EXPERIMENTAL & Optional) Fetch dependent helm charts") f.BoolVar(&o.AddImages, "add-images", false, "(EXPERIMENTAL & Optional) Fetch images referenced in helm charts") + f.BoolVar(&o.ExcludeExtras, "exclude-extras", false, "(Optional) Exclude cosign signatures, attestations, SBOMs, and OCI referrers when pulling images discovered via --add-images") f.StringVar(&o.HelmValues, "values", "", "(EXPERIMENTAL & Optional) Specify helm chart values when fetching images") f.StringVarP(&o.Platform, "platform", "p", "", "(Optional) Specify the platform of the image, e.g. linux/amd64") f.StringVarP(&o.Registry, "registry", "g", "", "(Optional) Specify the registry of the image for images that do not alredy define one") diff --git a/internal/flags/info.go b/internal/flags/info.go index ead8996..bc3ac42 100644 --- a/internal/flags/info.go +++ b/internal/flags/info.go @@ -16,7 +16,7 @@ func (o *InfoOpts) AddFlags(cmd *cobra.Command) { f := cmd.Flags() f.StringVarP(&o.OutputFormat, "output", "o", "table", "(Optional) Specify the output format (table | json)") - f.StringVar(&o.TypeFilter, "type", "all", "(Optional) Filter on content type (image | chart | file | sigs | atts | sbom)") + f.StringVar(&o.TypeFilter, "type", "all", "(Optional) Filter on content type (image | chart | file | sigs | atts | sbom | referrer)") f.BoolVar(&o.ListRepos, "list-repos", false, "(Optional) List all repository names") f.BoolVar(&o.ShowDigests, "digests", false, "(Optional) Show digests of each artifact in the output table") } diff --git a/internal/flags/save.go b/internal/flags/save.go index 83bd2a6..92ea88a 100644 --- a/internal/flags/save.go +++ b/internal/flags/save.go @@ -10,6 +10,7 @@ type SaveOpts struct { FileName string Platform string ContainerdCompatibility bool + ChunkSize string } func (o *SaveOpts) AddFlags(cmd *cobra.Command) { @@ -18,5 +19,6 @@ func (o *SaveOpts) AddFlags(cmd *cobra.Command) { f.StringVarP(&o.FileName, "filename", "f", consts.DefaultHaulerArchiveName, "(Optional) Specify the name of outputted haul") f.StringVarP(&o.Platform, "platform", "p", "", "(Optional) Specify the platform for runtime imports... i.e. linux/amd64 (unspecified implies all)") f.BoolVar(&o.ContainerdCompatibility, "containerd", false, "(Optional) Enable import compatibility with containerd... removes oci-layout from the haul") + f.StringVar(&o.ChunkSize, "chunk-size", "", "(Optional) Split the output archive into chunks of the specified size (e.g. 1G, 500M, 2048M)") } diff --git a/internal/flags/sync.go b/internal/flags/sync.go index 3246c65..455808e 100644 --- a/internal/flags/sync.go +++ b/internal/flags/sync.go @@ -2,12 +2,12 @@ package flags import ( "github.com/spf13/cobra" - "hauler.dev/go/hauler/pkg/consts" ) type SyncOpts struct { *StoreRootOpts FileName []string + ImageTxt []string Key string CertOidcIssuer string CertOidcIssuerRegexp string @@ -20,12 +20,14 @@ type SyncOpts struct { ProductRegistry string Tlog bool Rewrite string + ExcludeExtras bool } func (o *SyncOpts) AddFlags(cmd *cobra.Command) { f := cmd.Flags() - f.StringSliceVarP(&o.FileName, "filename", "f", []string{consts.DefaultHaulerManifestName}, "Specify the name of manifest(s) to sync") + f.StringSliceVarP(&o.FileName, "filename", "f", []string{}, "Specify the name of manifest(s) to sync") + f.StringSliceVarP(&o.ImageTxt, "image-txt", "i", []string{}, "Specify local or remote image.txt file(s) to sync images") f.StringVarP(&o.Key, "key", "k", "", "(Optional) Location of public key to use for signature verification") f.StringVar(&o.CertIdentity, "certificate-identity", "", "(Optional) Cosign certificate-identity (either --certificate-identity or --certificate-identity-regexp required for keyless verification)") f.StringVar(&o.CertIdentityRegexp, "certificate-identity-regexp", "", "(Optional) Cosign certificate-identity-regexp (either --certificate-identity or --certificate-identity-regexp required for keyless verification)") @@ -38,4 +40,5 @@ func (o *SyncOpts) AddFlags(cmd *cobra.Command) { f.StringVarP(&o.ProductRegistry, "product-registry", "c", "", "(Optional) Specify the product registry. Defaults to RGS Carbide Registry (rgcrprod.azurecr.us)") f.BoolVar(&o.Tlog, "use-tlog-verify", false, "(Optional) Allow transparency log verification (defaults to false)") f.StringVar(&o.Rewrite, "rewrite", "", "(EXPERIMENTAL & Optional) Rewrite artifact path to specified string") + f.BoolVar(&o.ExcludeExtras, "exclude-extras", false, "(Optional) Exclude cosign signatures, attestations, SBOMs, and OCI referrers when pulling images") } diff --git a/internal/mapper/filestore.go b/internal/mapper/filestore.go index 984266e..2122d44 100644 --- a/internal/mapper/filestore.go +++ b/internal/mapper/filestore.go @@ -2,6 +2,7 @@ package mapper import ( "context" + "fmt" "io" "os" "path/filepath" @@ -11,18 +12,21 @@ import ( "github.com/containerd/containerd/remotes" ocispec "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" - "oras.land/oras-go/pkg/content" + "hauler.dev/go/hauler/pkg/content" ) // NewMapperFileStore creates a new file store that uses mapper functions for each detected descriptor. // -// This extends content.File, and differs in that it allows much more functionality into how each descriptor is written. -func NewMapperFileStore(root string, mapper map[string]Fn) *store { - fs := content.NewFile(root) - return &store{ - File: fs, - mapper: mapper, +// This extends content.OCI, and differs in that it allows much more functionality into how each descriptor is written. +func NewMapperFileStore(root string, mapper map[string]Fn) (*store, error) { + fs, err := content.NewOCI(root) + if err != nil { + return nil, err } + return &store{ + OCI: fs, + mapper: mapper, + }, nil } func (s *store) Pusher(ctx context.Context, ref string) (remotes.Pusher, error) { @@ -35,7 +39,7 @@ func (s *store) Pusher(ctx context.Context, ref string) (remotes.Pusher, error) hash = parts[1] } return &pusher{ - store: s.File, + store: s.OCI, tag: tag, ref: hash, mapper: s.mapper, @@ -43,43 +47,76 @@ func (s *store) Pusher(ctx context.Context, ref string) (remotes.Pusher, error) } type store struct { - *content.File + *content.OCI mapper map[string]Fn } func (s *pusher) Push(ctx context.Context, desc ocispec.Descriptor) (ccontent.Writer, error) { - // TODO: This is suuuuuper ugly... redo this when oras v2 is out + // For manifests and indexes (which have AnnotationRefName), discard them + // They're metadata and don't need to be extracted if _, ok := content.ResolveName(desc); ok { - p, err := s.store.Pusher(ctx, s.ref) - if err != nil { - return nil, err - } - return p.Push(ctx, desc) + // Discard manifests/indexes, they're just metadata + return content.NewIoContentWriter(&nopCloser{io.Discard}, content.WithOutputHash(desc.Digest.String())), nil } - // If no custom mapper found, fall back to content.File mapper - if _, ok := s.mapper[desc.MediaType]; !ok { - return content.NewIoContentWriter(io.Discard, content.WithOutputHash(desc.Digest)), nil + // Check if this descriptor has a mapper for its media type + mapperFn, hasMapper := s.mapper[desc.MediaType] + if !hasMapper { + // Fall back to catch-all sentinel, then discard + mapperFn, hasMapper = s.mapper[DefaultCatchAll] + } + if !hasMapper { + // No mapper for this media type, discard it (config blobs, etc.) + return content.NewIoContentWriter(&nopCloser{io.Discard}, content.WithOutputHash(desc.Digest.String())), nil } - filename, err := s.mapper[desc.MediaType](desc) + // Get the filename from the mapper function. + // An empty filename means the mapper explicitly declined this descriptor (e.g. a + // config blob that has no title annotation); treat it the same as no mapper. + filename, err := mapperFn(desc) if err != nil { return nil, err } - - fullFileName := filepath.Join(s.store.ResolvePath(""), filename) - // TODO: Don't rewrite everytime, we can check the digest - f, err := os.OpenFile(fullFileName, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644) - if err != nil { - return nil, errors.Wrap(err, "pushing file") + if filename == "" { + return content.NewIoContentWriter(&nopCloser{io.Discard}, content.WithOutputHash(desc.Digest.String())), nil } - w := content.NewIoContentWriter(f, content.WithInputHash(desc.Digest), content.WithOutputHash(desc.Digest)) + // Get the destination directory and create the full path. + // Use absolute paths so the traversal check works even when destDir is relative (e.g. "."). + destDir, err := filepath.Abs(s.store.ResolvePath("")) + if err != nil { + return nil, errors.Wrap(err, "resolving destination dir") + } + fullFileName := filepath.Join(destDir, filename) + + // Guard against path traversal (e.g. filename containing "../") + if !strings.HasPrefix(fullFileName, destDir+string(filepath.Separator)) { + return nil, fmt.Errorf("path_traversal_disallowed: %q resolves outside destination dir", filename) + } + + // Create parent directories (e.g. when filename is "subdir/file.txt") + if err := os.MkdirAll(filepath.Dir(fullFileName), 0755); err != nil { + return nil, errors.Wrap(err, fmt.Sprintf("creating directory for %s", fullFileName)) + } + + // Create the file + f, err := os.OpenFile(fullFileName, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644) + if err != nil { + return nil, errors.Wrap(err, fmt.Sprintf("creating file %s", fullFileName)) + } + + w := content.NewIoContentWriter(f, content.WithOutputHash(desc.Digest.String())) return w, nil } +type nopCloser struct { + io.Writer +} + +func (*nopCloser) Close() error { return nil } + type pusher struct { - store *content.File + store *content.OCI tag string ref string mapper map[string]Fn diff --git a/internal/mapper/mapper_test.go b/internal/mapper/mapper_test.go new file mode 100644 index 0000000..3ef61cd --- /dev/null +++ b/internal/mapper/mapper_test.go @@ -0,0 +1,349 @@ +package mapper + +import ( + "strings" + "testing" + + digest "github.com/opencontainers/go-digest" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + + "hauler.dev/go/hauler/pkg/consts" +) + +func TestFromManifest_DockerImage(t *testing.T) { + manifest := ocispec.Manifest{ + Config: ocispec.Descriptor{ + MediaType: consts.DockerConfigJSON, + }, + } + + target, err := FromManifest(manifest, t.TempDir()) + if err != nil { + t.Fatalf("expected no error, got: %v", err) + } + if target == nil { + t.Fatal("expected non-nil Target") + } +} + +func TestFromManifest_HelmChart(t *testing.T) { + manifest := ocispec.Manifest{ + Config: ocispec.Descriptor{ + MediaType: consts.ChartConfigMediaType, + }, + } + + target, err := FromManifest(manifest, t.TempDir()) + if err != nil { + t.Fatalf("expected no error, got: %v", err) + } + if target == nil { + t.Fatal("expected non-nil Target") + } +} + +func TestFromManifest_File(t *testing.T) { + manifest := ocispec.Manifest{ + Config: ocispec.Descriptor{ + MediaType: consts.FileLocalConfigMediaType, + }, + } + + target, err := FromManifest(manifest, t.TempDir()) + if err != nil { + t.Fatalf("expected no error, got: %v", err) + } + if target == nil { + t.Fatal("expected non-nil Target") + } +} + +func TestFromManifest_OciImageConfigWithTitleAnnotation(t *testing.T) { + // OCI artifacts distributed as "fake images" (e.g. rke2-binary) use the standard + // OCI image config type but set AnnotationTitle on their layers. FromManifest must + // dispatch to Files() (not Images()) so the title is used as the output filename. + manifest := ocispec.Manifest{ + Config: ocispec.Descriptor{ + MediaType: ocispec.MediaTypeImageConfig, + }, + Layers: []ocispec.Descriptor{ + { + MediaType: consts.OCILayer, + Annotations: map[string]string{ + ocispec.AnnotationTitle: "rke2.linux-amd64", + }, + }, + }, + } + + target, err := FromManifest(manifest, t.TempDir()) + if err != nil { + t.Fatalf("expected no error, got: %v", err) + } + s, ok := target.(*store) + if !ok { + t.Fatal("expected target to be *store") + } + if _, exists := s.mapper[consts.OCILayer]; !exists { + t.Fatal("expected Files() mapper (OCILayer key) for OCI image config with title annotation") + } +} + +func TestFromManifest_FileLayerFallback(t *testing.T) { + manifest := ocispec.Manifest{ + Config: ocispec.Descriptor{ + MediaType: "application/vnd.unknown.config.v1+json", + }, + Layers: []ocispec.Descriptor{ + { + MediaType: consts.FileLayerMediaType, + Annotations: map[string]string{ + ocispec.AnnotationTitle: "somefile.txt", + }, + }, + }, + } + + target, err := FromManifest(manifest, t.TempDir()) + if err != nil { + t.Fatalf("expected no error, got: %v", err) + } + if target == nil { + t.Fatal("expected non-nil Target") + } + + // Verify the returned store uses the Files() mapper by checking that the + // mapper contains the FileLayerMediaType key. + s, ok := target.(*store) + if !ok { + t.Fatal("expected target to be *store") + } + if s.mapper == nil { + t.Fatal("expected non-nil mapper for file layer fallback") + } + if _, exists := s.mapper[consts.FileLayerMediaType]; !exists { + t.Fatal("expected mapper to contain consts.FileLayerMediaType key") + } +} + +func TestFromManifest_UnknownNoTitle(t *testing.T) { + manifest := ocispec.Manifest{ + Config: ocispec.Descriptor{ + MediaType: "application/vnd.unknown.config.v1+json", + }, + Layers: []ocispec.Descriptor{ + { + MediaType: "application/vnd.unknown.layer", + }, + }, + } + + target, err := FromManifest(manifest, t.TempDir()) + if err != nil { + t.Fatalf("expected no error, got: %v", err) + } + if target == nil { + t.Fatal("expected non-nil Target") + } + + // Unknown artifacts must use the Default catch-all mapper so blobs are not silently discarded + s, ok := target.(*store) + if !ok { + t.Fatal("expected target to be *store") + } + if _, exists := s.mapper[DefaultCatchAll]; !exists { + t.Fatal("expected default catch-all mapper for unknown artifact type") + } +} + +func TestFiles_CatchAll_WithTitle(t *testing.T) { + // OCI artifacts with custom layer media types (e.g. rke2-binary) must be + // extracted by the Files() catch-all when they carry AnnotationTitle. + mappers := Files() + + fn, ok := mappers[DefaultCatchAll] + if !ok { + t.Fatal("Files() must contain a DefaultCatchAll entry") + } + + d := digest.Digest("sha256:" + strings.Repeat("b", 64)) + desc := ocispec.Descriptor{ + MediaType: "application/vnd.rancher.rke2.binary", + Digest: d, + Annotations: map[string]string{ + ocispec.AnnotationTitle: "rke2.linux-amd64", + }, + } + + result, err := fn(desc) + if err != nil { + t.Fatalf("expected no error, got: %v", err) + } + if result != "rke2.linux-amd64" { + t.Errorf("expected %q, got %q", "rke2.linux-amd64", result) + } +} + +func TestFiles_CatchAll_NoTitle(t *testing.T) { + // Blobs without AnnotationTitle (e.g. config blobs) must be discarded by the + // Files() catch-all (empty filename = discard signal for Push). + mappers := Files() + + fn, ok := mappers[DefaultCatchAll] + if !ok { + t.Fatal("Files() must contain a DefaultCatchAll entry") + } + + d := digest.Digest("sha256:" + strings.Repeat("c", 64)) + desc := ocispec.Descriptor{ + MediaType: "application/vnd.oci.image.config.v1+json", + Digest: d, + } + + result, err := fn(desc) + if err != nil { + t.Fatalf("expected no error, got: %v", err) + } + if result != "" { + t.Errorf("expected empty string (discard) for config blob, got %q", result) + } +} + +func TestImages_MapperFn(t *testing.T) { + mappers := Images() + + fn, ok := mappers[consts.DockerLayer] + if !ok { + t.Fatalf("expected mapper for %s", consts.DockerLayer) + } + + d := digest.Digest("sha256:abcdef1234567890abcdef1234567890abcdef1234567890abcdef1234567890") + desc := ocispec.Descriptor{ + Digest: d, + } + + result, err := fn(desc) + if err != nil { + t.Fatalf("expected no error, got: %v", err) + } + + expected := "sha256:abcdef1234567890abcdef1234567890abcdef1234567890abcdef1234567890.tar.gz" + if result != expected { + t.Fatalf("expected %q, got %q", expected, result) + } +} + +func TestImages_ConfigMapperFn(t *testing.T) { + mappers := Images() + + fn, ok := mappers[consts.DockerConfigJSON] + if !ok { + t.Fatalf("expected mapper for %s", consts.DockerConfigJSON) + } + + desc := ocispec.Descriptor{} + result, err := fn(desc) + if err != nil { + t.Fatalf("expected no error, got: %v", err) + } + + if result != consts.ImageConfigFile { + t.Fatalf("expected %q, got %q", consts.ImageConfigFile, result) + } +} + +func TestChart_MapperFn_WithTitle(t *testing.T) { + mappers := Chart() + + fn, ok := mappers[consts.ChartLayerMediaType] + if !ok { + t.Fatalf("expected mapper for %s", consts.ChartLayerMediaType) + } + + desc := ocispec.Descriptor{ + Annotations: map[string]string{ + ocispec.AnnotationTitle: "mychart-1.0.0.tgz", + }, + } + + result, err := fn(desc) + if err != nil { + t.Fatalf("expected no error, got: %v", err) + } + + if result != "mychart-1.0.0.tgz" { + t.Fatalf("expected %q, got %q", "mychart-1.0.0.tgz", result) + } +} + +func TestChart_MapperFn_NoTitle(t *testing.T) { + mappers := Chart() + + fn, ok := mappers[consts.ChartLayerMediaType] + if !ok { + t.Fatalf("expected mapper for %s", consts.ChartLayerMediaType) + } + + desc := ocispec.Descriptor{} + + result, err := fn(desc) + if err != nil { + t.Fatalf("expected no error, got: %v", err) + } + + if result != "chart.tar.gz" { + t.Fatalf("expected %q, got %q", "chart.tar.gz", result) + } +} + +func TestFiles_MapperFn_WithTitle(t *testing.T) { + mappers := Files() + + fn, ok := mappers[consts.FileLayerMediaType] + if !ok { + t.Fatalf("expected mapper for %s", consts.FileLayerMediaType) + } + + desc := ocispec.Descriptor{ + Annotations: map[string]string{ + ocispec.AnnotationTitle: "install.sh", + }, + } + + result, err := fn(desc) + if err != nil { + t.Fatalf("expected no error, got: %v", err) + } + + if result != "install.sh" { + t.Fatalf("expected %q, got %q", "install.sh", result) + } +} + +func TestFiles_MapperFn_NoTitle(t *testing.T) { + mappers := Files() + + fn, ok := mappers[consts.FileLayerMediaType] + if !ok { + t.Fatalf("expected mapper for %s", consts.FileLayerMediaType) + } + + d := digest.Digest("sha256:" + strings.Repeat("a", 64)) + desc := ocispec.Descriptor{ + Digest: d, + } + + result, err := fn(desc) + if err != nil { + t.Fatalf("expected no error, got: %v", err) + } + + if !strings.HasSuffix(result, ".file") { + t.Fatalf("expected result to end with .file, got %q", result) + } + + expected := "sha256:" + strings.Repeat("a", 64) + ".file" + if result != expected { + t.Fatalf("expected %q, got %q", expected, result) + } +} diff --git a/internal/mapper/mappers.go b/internal/mapper/mappers.go index 154daef..2d6cf6a 100644 --- a/internal/mapper/mappers.go +++ b/internal/mapper/mappers.go @@ -4,32 +4,44 @@ import ( "fmt" ocispec "github.com/opencontainers/image-spec/specs-go/v1" - "oras.land/oras-go/pkg/target" "hauler.dev/go/hauler/pkg/consts" + "hauler.dev/go/hauler/pkg/content" ) type Fn func(desc ocispec.Descriptor) (string, error) // FromManifest will return the appropriate content store given a reference and source type adequate for storing the results on disk -func FromManifest(manifest ocispec.Manifest, root string) (target.Target, error) { - // TODO: Don't rely solely on config mediatype +func FromManifest(manifest ocispec.Manifest, root string) (content.Target, error) { + // First, switch on config mediatype to identify known types. switch manifest.Config.MediaType { - case consts.DockerConfigJSON, consts.OCIManifestSchema1: - s := NewMapperFileStore(root, Images()) - defer s.Close() - return s, nil - case consts.ChartLayerMediaType, consts.ChartConfigMediaType: - s := NewMapperFileStore(root, Chart()) - defer s.Close() - return s, nil + return NewMapperFileStore(root, Chart()) - default: - s := NewMapperFileStore(root, nil) - defer s.Close() - return s, nil + case consts.FileLocalConfigMediaType, consts.FileDirectoryConfigMediaType, consts.FileHttpConfigMediaType: + return NewMapperFileStore(root, Files()) + + case consts.DockerConfigJSON, ocispec.MediaTypeImageConfig: + // Standard OCI/Docker image config. OCI artifacts that distribute files + // (e.g. rke2-binary) reuse this config type but set AnnotationTitle on their + // layers. When title annotations are present prefer Files() so the title is + // used as the output filename; otherwise treat as a container image. + for _, layer := range manifest.Layers { + if _, ok := layer.Annotations[ocispec.AnnotationTitle]; ok { + return NewMapperFileStore(root, Files()) + } + } + return NewMapperFileStore(root, Images()) } + + // Unknown config type: title annotation indicates a file artifact; otherwise use + // a catch-all mapper that writes blobs by digest. + for _, layer := range manifest.Layers { + if _, ok := layer.Annotations[ocispec.AnnotationTitle]; ok { + return NewMapperFileStore(root, Files()) + } + } + return NewMapperFileStore(root, Default()) } func Images() map[string]Fn { @@ -81,3 +93,52 @@ func Chart() map[string]Fn { m[consts.ProvLayerMediaType] = provMapperFn return m } + +// DefaultCatchAll is the sentinel key used in a mapper map to match any media type +// not explicitly registered. Push checks for this key as a fallback. +const DefaultCatchAll = "" + +// Default returns a catch-all mapper that extracts any layer blob using its title +// annotation as the filename, falling back to a digest-based name. Used when the +// manifest config media type is not a known hauler type. +func Default() map[string]Fn { + m := make(map[string]Fn) + m[DefaultCatchAll] = Fn(func(desc ocispec.Descriptor) (string, error) { + if title, ok := desc.Annotations[ocispec.AnnotationTitle]; ok { + return title, nil + } + return fmt.Sprintf("%s.bin", desc.Digest.String()), nil + }) + return m +} + +func Files() map[string]Fn { + m := make(map[string]Fn) + + fileMapperFn := Fn(func(desc ocispec.Descriptor) (string, error) { + // Use the title annotation to determine the filename + if title, ok := desc.Annotations[ocispec.AnnotationTitle]; ok { + return title, nil + } + // Fallback to digest-based filename if no title + return fmt.Sprintf("%s.file", desc.Digest.String()), nil + }) + + // Match the media type that's actually used in the manifest + // (set by getter.LayerFrom in pkg/getter/getter.go) + m[consts.FileLayerMediaType] = fileMapperFn + m[consts.OCILayer] = fileMapperFn // Also handle standard OCI layers that have title annotation + m["application/vnd.oci.image.layer.v1.tar"] = fileMapperFn // And the tar variant + + // Catch-all for OCI artifacts that use custom layer media types (e.g. rke2-binary). + // Write the blob if it carries an AnnotationTitle; silently discard everything else + // (config blobs, metadata) by returning an empty filename. + m[DefaultCatchAll] = Fn(func(desc ocispec.Descriptor) (string, error) { + if title, ok := desc.Annotations[ocispec.AnnotationTitle]; ok { + return title, nil + } + return "", nil // No title → discard (config blob or unrecognised metadata) + }) + + return m +} diff --git a/internal/server/server_test.go b/internal/server/server_test.go new file mode 100644 index 0000000..6229881 --- /dev/null +++ b/internal/server/server_test.go @@ -0,0 +1,90 @@ +package server + +import ( + "context" + "net/http" + "strings" + "testing" + + // Register the filesystem storage driver for the distribution registry. + _ "github.com/distribution/distribution/v3/registry/storage/driver/filesystem" + + "hauler.dev/go/hauler/internal/flags" +) + +func TestNewTempRegistry_StartStop(t *testing.T) { + ctx := context.Background() + srv := NewTempRegistry(ctx, t.TempDir()) + + // Start the httptest server directly to avoid the Start() method's + // retry logic which only accepts HTTP 200, while /v2 returns 401 + // from the distribution registry. + srv.Server.Start() + t.Cleanup(func() { srv.Stop() }) + + resp, err := http.Get(srv.Server.URL + "/v2") + if err != nil { + t.Fatalf("expected GET /v2 to succeed, got error: %v", err) + } + resp.Body.Close() + + if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusUnauthorized { + t.Fatalf("expected status 200 or 401, got %d", resp.StatusCode) + } + + // Stop and verify unreachable. + srv.Stop() + + _, err = http.Get(srv.Server.URL + "/v2") + if err == nil { + t.Fatal("expected error after stopping server, got nil") + } +} + +func TestNewTempRegistry_Registry(t *testing.T) { + ctx := context.Background() + srv := NewTempRegistry(ctx, t.TempDir()) + + srv.Server.Start() + t.Cleanup(func() { srv.Stop() }) + + host := srv.Registry() + if host == "" { + t.Fatal("expected non-empty registry host") + } + if strings.Contains(host, "http://") { + t.Fatalf("registry host should not contain protocol prefix, got %q", host) + } +} + +func TestNewFile_Configuration(t *testing.T) { + ctx := context.Background() + opts := flags.ServeFilesOpts{ + RootDir: t.TempDir(), + Port: 0, + Timeout: 0, + } + + srv, err := NewFile(ctx, opts) + if err != nil { + t.Fatalf("expected no error, got: %v", err) + } + if srv == nil { + t.Fatal("expected non-nil server") + } +} + +func TestNewFile_DefaultPort(t *testing.T) { + ctx := context.Background() + opts := flags.ServeFilesOpts{ + RootDir: t.TempDir(), + } + + srv, err := NewFile(ctx, opts) + if err != nil { + t.Fatalf("expected no error, got: %v", err) + } + if srv == nil { + t.Fatal("expected non-nil server") + } +} diff --git a/pkg/apis/hauler.cattle.io/convert/convert.go b/pkg/apis/hauler.cattle.io/convert/convert.go deleted file mode 100644 index ca3e1cc..0000000 --- a/pkg/apis/hauler.cattle.io/convert/convert.go +++ /dev/null @@ -1,121 +0,0 @@ -package v1alpha1 - -import ( - "fmt" - - v1 "hauler.dev/go/hauler/pkg/apis/hauler.cattle.io/v1" - v1alpha1 "hauler.dev/go/hauler/pkg/apis/hauler.cattle.io/v1alpha1" -) - -// converts v1alpha1.Files -> v1.Files -func ConvertFiles(in *v1alpha1.Files, out *v1.Files) error { - out.TypeMeta = in.TypeMeta - out.ObjectMeta = in.ObjectMeta - out.Spec.Files = make([]v1.File, len(in.Spec.Files)) - for i := range in.Spec.Files { - out.Spec.Files[i].Name = in.Spec.Files[i].Name - out.Spec.Files[i].Path = in.Spec.Files[i].Path - } - return nil -} - -// converts v1alpha1.Images -> v1.Images -func ConvertImages(in *v1alpha1.Images, out *v1.Images) error { - out.TypeMeta = in.TypeMeta - out.ObjectMeta = in.ObjectMeta - out.Spec.Images = make([]v1.Image, len(in.Spec.Images)) - for i := range in.Spec.Images { - out.Spec.Images[i].Name = in.Spec.Images[i].Name - out.Spec.Images[i].Platform = in.Spec.Images[i].Platform - out.Spec.Images[i].Key = in.Spec.Images[i].Key - } - return nil -} - -// converts v1alpha1.Charts -> v1.Charts -func ConvertCharts(in *v1alpha1.Charts, out *v1.Charts) error { - out.TypeMeta = in.TypeMeta - out.ObjectMeta = in.ObjectMeta - out.Spec.Charts = make([]v1.Chart, len(in.Spec.Charts)) - for i := range in.Spec.Charts { - out.Spec.Charts[i].Name = in.Spec.Charts[i].Name - out.Spec.Charts[i].RepoURL = in.Spec.Charts[i].RepoURL - out.Spec.Charts[i].Version = in.Spec.Charts[i].Version - } - return nil -} - -// converts v1alpha1.ThickCharts -> v1.ThickCharts -func ConvertThickCharts(in *v1alpha1.ThickCharts, out *v1.ThickCharts) error { - out.TypeMeta = in.TypeMeta - out.ObjectMeta = in.ObjectMeta - out.Spec.Charts = make([]v1.ThickChart, len(in.Spec.Charts)) - for i := range in.Spec.Charts { - out.Spec.Charts[i].Chart.Name = in.Spec.Charts[i].Chart.Name - out.Spec.Charts[i].Chart.RepoURL = in.Spec.Charts[i].Chart.RepoURL - out.Spec.Charts[i].Chart.Version = in.Spec.Charts[i].Chart.Version - } - return nil -} - -// converts v1alpha1.ImageTxts -> v1.ImageTxts -func ConvertImageTxts(in *v1alpha1.ImageTxts, out *v1.ImageTxts) error { - out.TypeMeta = in.TypeMeta - out.ObjectMeta = in.ObjectMeta - out.Spec.ImageTxts = make([]v1.ImageTxt, len(in.Spec.ImageTxts)) - for i := range in.Spec.ImageTxts { - out.Spec.ImageTxts[i].Ref = in.Spec.ImageTxts[i].Ref - out.Spec.ImageTxts[i].Sources.Include = append( - out.Spec.ImageTxts[i].Sources.Include, - in.Spec.ImageTxts[i].Sources.Include..., - ) - out.Spec.ImageTxts[i].Sources.Exclude = append( - out.Spec.ImageTxts[i].Sources.Exclude, - in.Spec.ImageTxts[i].Sources.Exclude..., - ) - } - return nil -} - -// convert v1alpha1 object to v1 object -func ConvertObject(in interface{}) (interface{}, error) { - switch src := in.(type) { - - case *v1alpha1.Files: - dst := &v1.Files{} - if err := ConvertFiles(src, dst); err != nil { - return nil, err - } - return dst, nil - - case *v1alpha1.Images: - dst := &v1.Images{} - if err := ConvertImages(src, dst); err != nil { - return nil, err - } - return dst, nil - - case *v1alpha1.Charts: - dst := &v1.Charts{} - if err := ConvertCharts(src, dst); err != nil { - return nil, err - } - return dst, nil - - case *v1alpha1.ThickCharts: - dst := &v1.ThickCharts{} - if err := ConvertThickCharts(src, dst); err != nil { - return nil, err - } - return dst, nil - - case *v1alpha1.ImageTxts: - dst := &v1.ImageTxts{} - if err := ConvertImageTxts(src, dst); err != nil { - return nil, err - } - return dst, nil - } - - return nil, fmt.Errorf("unsupported object type [%T]", in) -} diff --git a/pkg/apis/hauler.cattle.io/v1/chart.go b/pkg/apis/hauler.cattle.io/v1/chart.go index 5c8b6ac..c87897a 100644 --- a/pkg/apis/hauler.cattle.io/v1/chart.go +++ b/pkg/apis/hauler.cattle.io/v1/chart.go @@ -23,24 +23,5 @@ type Chart struct { AddImages bool `json:"add-images,omitempty"` AddDependencies bool `json:"add-dependencies,omitempty"` -} - -type ThickCharts struct { - *metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata,omitempty"` - - Spec ThickChartSpec `json:"spec,omitempty"` -} - -type ThickChartSpec struct { - Charts []ThickChart `json:"charts,omitempty"` -} - -type ThickChart struct { - Chart `json:",inline,omitempty"` - ExtraImages []ChartImage `json:"extraImages,omitempty"` -} - -type ChartImage struct { - Reference string `json:"ref"` + ExcludeExtras bool `json:"exclude-extras,omitempty"` } diff --git a/pkg/apis/hauler.cattle.io/v1/image.go b/pkg/apis/hauler.cattle.io/v1/image.go index b213156..b5cc5b6 100644 --- a/pkg/apis/hauler.cattle.io/v1/image.go +++ b/pkg/apis/hauler.cattle.io/v1/image.go @@ -36,6 +36,7 @@ type Image struct { // Platform of the image to be pulled. If not specified, all platforms will be pulled. //Platform string `json:"key,omitempty"` - Platform string `json:"platform"` - Rewrite string `json:"rewrite"` + Platform string `json:"platform"` + Rewrite string `json:"rewrite"` + ExcludeExtras bool `json:"exclude-extras"` } diff --git a/pkg/apis/hauler.cattle.io/v1/imagetxt.go b/pkg/apis/hauler.cattle.io/v1/imagetxt.go deleted file mode 100644 index 31e73f4..0000000 --- a/pkg/apis/hauler.cattle.io/v1/imagetxt.go +++ /dev/null @@ -1,26 +0,0 @@ -package v1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -type ImageTxts struct { - *metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata,omitempty"` - - Spec ImageTxtsSpec `json:"spec,omitempty"` -} - -type ImageTxtsSpec struct { - ImageTxts []ImageTxt `json:"imageTxts,omitempty"` -} - -type ImageTxt struct { - Ref string `json:"ref,omitempty"` - Sources ImageTxtSources `json:"sources,omitempty"` -} - -type ImageTxtSources struct { - Include []string `json:"include,omitempty"` - Exclude []string `json:"exclude,omitempty"` -} diff --git a/pkg/apis/hauler.cattle.io/v1alpha1/chart.go b/pkg/apis/hauler.cattle.io/v1alpha1/chart.go deleted file mode 100644 index fdf1748..0000000 --- a/pkg/apis/hauler.cattle.io/v1alpha1/chart.go +++ /dev/null @@ -1,42 +0,0 @@ -package v1alpha1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -type Charts struct { - *metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata,omitempty"` - - Spec ChartSpec `json:"spec,omitempty"` -} - -type ChartSpec struct { - Charts []Chart `json:"charts,omitempty"` -} - -type Chart struct { - Name string `json:"name,omitempty"` - RepoURL string `json:"repoURL,omitempty"` - Version string `json:"version,omitempty"` -} - -type ThickCharts struct { - *metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata,omitempty"` - - Spec ThickChartSpec `json:"spec,omitempty"` -} - -type ThickChartSpec struct { - Charts []ThickChart `json:"charts,omitempty"` -} - -type ThickChart struct { - Chart `json:",inline,omitempty"` - ExtraImages []ChartImage `json:"extraImages,omitempty"` -} - -type ChartImage struct { - Reference string `json:"ref"` -} diff --git a/pkg/apis/hauler.cattle.io/v1alpha1/driver.go b/pkg/apis/hauler.cattle.io/v1alpha1/driver.go deleted file mode 100644 index 62a5b51..0000000 --- a/pkg/apis/hauler.cattle.io/v1alpha1/driver.go +++ /dev/null @@ -1,17 +0,0 @@ -package v1alpha1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -type Driver struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata,omitempty"` - - Spec DriverSpec `json:"spec"` -} - -type DriverSpec struct { - Type string `json:"type"` - Version string `json:"version"` -} diff --git a/pkg/apis/hauler.cattle.io/v1alpha1/file.go b/pkg/apis/hauler.cattle.io/v1alpha1/file.go deleted file mode 100644 index 6bd9e74..0000000 --- a/pkg/apis/hauler.cattle.io/v1alpha1/file.go +++ /dev/null @@ -1,25 +0,0 @@ -package v1alpha1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -type Files struct { - *metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata,omitempty"` - - Spec FileSpec `json:"spec,omitempty"` -} - -type FileSpec struct { - Files []File `json:"files,omitempty"` -} - -type File struct { - // Path is the path to the file contents, can be a local or remote path - Path string `json:"path"` - - // Name is an optional field specifying the name of the file when specified, - // it will override any dynamic name discovery from Path - Name string `json:"name,omitempty"` -} diff --git a/pkg/apis/hauler.cattle.io/v1alpha1/groupversion_info.go b/pkg/apis/hauler.cattle.io/v1alpha1/groupversion_info.go deleted file mode 100644 index 0519314..0000000 --- a/pkg/apis/hauler.cattle.io/v1alpha1/groupversion_info.go +++ /dev/null @@ -1,12 +0,0 @@ -package v1alpha1 - -import ( - "k8s.io/apimachinery/pkg/runtime/schema" - - "hauler.dev/go/hauler/pkg/consts" -) - -var ( - ContentGroupVersion = schema.GroupVersion{Group: consts.ContentGroup, Version: "v1alpha1"} - CollectionGroupVersion = schema.GroupVersion{Group: consts.CollectionGroup, Version: "v1alpha1"} -) diff --git a/pkg/apis/hauler.cattle.io/v1alpha1/image.go b/pkg/apis/hauler.cattle.io/v1alpha1/image.go deleted file mode 100644 index a171b5d..0000000 --- a/pkg/apis/hauler.cattle.io/v1alpha1/image.go +++ /dev/null @@ -1,41 +0,0 @@ -package v1alpha1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -type Images struct { - *metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata,omitempty"` - - Spec ImageSpec `json:"spec,omitempty"` -} - -type ImageSpec struct { - Images []Image `json:"images,omitempty"` -} - -type Image struct { - // Name is the full location for the image, can be referenced by tags or digests - Name string `json:"name"` - - // Path is the path to the cosign public key used for verifying image signatures - //Key string `json:"key,omitempty"` - Key string `json:"key"` - - // Path is the path to the cosign public key used for verifying image signatures - //Tlog string `json:"use-tlog-verify,omitempty"` - Tlog bool `json:"use-tlog-verify"` - - // cosign keyless validation options - CertIdentity string `json:"certificate-identity"` - CertIdentityRegexp string `json:"certificate-identity-regexp"` - CertOidcIssuer string `json:"certificate-oidc-issuer"` - CertOidcIssuerRegexp string `json:"certificate-oidc-issuer-regexp"` - CertGithubWorkflowRepository string `json:"certificate-github-workflow-repository"` - - // Platform of the image to be pulled. If not specified, all platforms will be pulled. - //Platform string `json:"key,omitempty"` - Platform string `json:"platform"` - Rewrite string `json:"rewrite"` -} diff --git a/pkg/apis/hauler.cattle.io/v1alpha1/imagetxt.go b/pkg/apis/hauler.cattle.io/v1alpha1/imagetxt.go deleted file mode 100644 index 85e5e37..0000000 --- a/pkg/apis/hauler.cattle.io/v1alpha1/imagetxt.go +++ /dev/null @@ -1,26 +0,0 @@ -package v1alpha1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -type ImageTxts struct { - *metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata,omitempty"` - - Spec ImageTxtsSpec `json:"spec,omitempty"` -} - -type ImageTxtsSpec struct { - ImageTxts []ImageTxt `json:"imageTxts,omitempty"` -} - -type ImageTxt struct { - Ref string `json:"ref,omitempty"` - Sources ImageTxtSources `json:"sources,omitempty"` -} - -type ImageTxtSources struct { - Include []string `json:"include,omitempty"` - Exclude []string `json:"exclude,omitempty"` -} diff --git a/pkg/archives/archiver.go b/pkg/archives/archiver.go index bedbd8f..cb9320b 100644 --- a/pkg/archives/archiver.go +++ b/pkg/archives/archiver.go @@ -3,8 +3,10 @@ package archives import ( "context" "fmt" + "io" "os" "path/filepath" + "strings" "github.com/mholt/archives" "hauler.dev/go/hauler/pkg/log" @@ -102,3 +104,85 @@ func Archive(ctx context.Context, dir, outfile string, compression archives.Comp l.Debugf("archive created successfully [%s]", outfile) return nil } + +// SplitArchive splits an existing archive into chunks of at most maxBytes each. +// Chunks are named _0, _1, ... where base is the archive +// path with all extensions stripped, and ext is the compound extension (e.g. .tar.zst). +// The original archive is removed after successful splitting. +func SplitArchive(ctx context.Context, archivePath string, maxBytes int64) ([]string, error) { + l := log.FromContext(ctx) + + // derive base path and compound extension by stripping all extensions + base := archivePath + ext := "" + for filepath.Ext(base) != "" { + ext = filepath.Ext(base) + ext + base = strings.TrimSuffix(base, filepath.Ext(base)) + } + + f, err := os.Open(archivePath) + if err != nil { + return nil, fmt.Errorf("failed to open archive for splitting: %w", err) + } + + var chunks []string + buf := make([]byte, 32*1024) + chunkIdx := 0 + var written int64 + var outf *os.File + + for { + if outf == nil { + chunkPath := fmt.Sprintf("%s_%d%s", base, chunkIdx, ext) + outf, err = os.Create(chunkPath) + if err != nil { + f.Close() + return nil, fmt.Errorf("failed to create chunk %d: %w", chunkIdx, err) + } + chunks = append(chunks, chunkPath) + l.Debugf("creating chunk [%s]", chunkPath) + written = 0 + chunkIdx++ + } + + remaining := maxBytes - written + readSize := int64(len(buf)) + if readSize > remaining { + readSize = remaining + } + + n, readErr := f.Read(buf[:readSize]) + if n > 0 { + if _, writeErr := outf.Write(buf[:n]); writeErr != nil { + outf.Close() + f.Close() + return nil, fmt.Errorf("failed to write to chunk: %w", writeErr) + } + written += int64(n) + } + + if readErr == io.EOF { + outf.Close() + outf = nil + break + } + if readErr != nil { + outf.Close() + f.Close() + return nil, fmt.Errorf("failed to read archive: %w", readErr) + } + + if written >= maxBytes { + outf.Close() + outf = nil + } + } + + f.Close() + if err := os.Remove(archivePath); err != nil { + return nil, fmt.Errorf("failed to remove original archive after splitting: %w", err) + } + + l.Infof("split archive [%s] into %d chunk(s)", filepath.Base(archivePath), len(chunks)) + return chunks, nil +} diff --git a/pkg/archives/archives_test.go b/pkg/archives/archives_test.go new file mode 100644 index 0000000..00c7ce2 --- /dev/null +++ b/pkg/archives/archives_test.go @@ -0,0 +1,441 @@ +package archives + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io" + "os" + "path/filepath" + "testing" + + "github.com/mholt/archives" + "github.com/rs/zerolog" +) + +func testContext(t *testing.T) context.Context { + t.Helper() + l := zerolog.New(io.Discard) + return l.WithContext(context.Background()) +} + +func TestArchive_RoundTrip(t *testing.T) { + ctx := testContext(t) + + srcDir := t.TempDir() + files := map[string]string{ + "file1.txt": "hello world", + "subdir/file2.txt": "nested content", + "subdir/file3.json": `{"key":"value"}`, + } + for relPath, content := range files { + full := filepath.Join(srcDir, relPath) + if err := os.MkdirAll(filepath.Dir(full), 0o755); err != nil { + t.Fatalf("create parent dir for %s: %v", relPath, err) + } + if err := os.WriteFile(full, []byte(content), 0o644); err != nil { + t.Fatalf("write %s: %v", relPath, err) + } + } + + outFile := filepath.Join(t.TempDir(), "test.tar.zst") + if err := Archive(ctx, srcDir, outFile, archives.Zstd{}, archives.Tar{}); err != nil { + t.Fatalf("Archive() error: %v", err) + } + + info, err := os.Stat(outFile) + if err != nil { + t.Fatalf("archive file missing: %v", err) + } + if info.Size() == 0 { + t.Fatal("archive file is empty") + } + + dstDir := t.TempDir() + if err := Unarchive(ctx, outFile, dstDir); err != nil { + t.Fatalf("Unarchive() error: %v", err) + } + + // Archive maps files under the source directory's base name. + baseName := filepath.Base(srcDir) + for relPath, expectedContent := range files { + full := filepath.Join(dstDir, baseName, relPath) + data, err := os.ReadFile(full) + if err != nil { + t.Errorf("read extracted file %s: %v", relPath, err) + continue + } + if string(data) != expectedContent { + t.Errorf("content mismatch for %s: got %q, want %q", relPath, string(data), expectedContent) + } + } +} + +func TestArchive_NonExistentDir(t *testing.T) { + ctx := testContext(t) + nonExistent := filepath.Join(t.TempDir(), "does-not-exist") + outFile := filepath.Join(t.TempDir(), "out.tar.zst") + if err := Archive(ctx, nonExistent, outFile, archives.Zstd{}, archives.Tar{}); err == nil { + t.Fatal("Archive() should return an error for a non-existent source directory") + } +} + +func TestUnarchive_ExistingHaul(t *testing.T) { + ctx := testContext(t) + + // testdata/ is two levels up from pkg/archives/ + haulPath := filepath.Join("..", "..", "testdata", "haul.tar.zst") + if _, err := os.Stat(haulPath); err != nil { + t.Skipf("testdata/haul.tar.zst not found at %s: %v", haulPath, err) + } + + dstDir := t.TempDir() + if err := Unarchive(ctx, haulPath, dstDir); err != nil { + t.Fatalf("Unarchive() error: %v", err) + } + + var indexPath string + if err := filepath.Walk(dstDir, func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + if info.Name() == "index.json" { + indexPath = path + } + return nil + }); err != nil { + t.Fatalf("walk extracted dir: %v", err) + } + if indexPath == "" { + t.Fatal("index.json not found in extracted haul archive") + } + + data, err := os.ReadFile(indexPath) + if err != nil { + t.Fatalf("read index.json: %v", err) + } + if !json.Valid(data) { + t.Fatal("index.json is not valid JSON") + } +} + +func TestSecurePath(t *testing.T) { + basePath := "/tmp/extract" + + tests := []struct { + name string + relativePath string + wantResult string + }{ + { + name: "normal relative path", + relativePath: "subdir/file.txt", + wantResult: "/tmp/extract/subdir/file.txt", + }, + { + name: "simple filename", + relativePath: "readme.txt", + wantResult: "/tmp/extract/readme.txt", + }, + // Path traversal attempts are sanitized (not rejected): "/../../../etc/passwd" + // cleans to "/etc/passwd", strips leading "/" → "etc/passwd", joined → base/etc/passwd. + { + name: "path traversal is sanitized to safe path", + relativePath: "../../../etc/passwd", + wantResult: "/tmp/extract/etc/passwd", + }, + { + name: "deeply nested traversal is sanitized", + relativePath: "a/b/../../../../etc/shadow", + wantResult: "/tmp/extract/etc/shadow", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result, err := securePath(basePath, tt.relativePath) + if err != nil { + t.Fatalf("securePath(%q, %q) unexpected error: %v", basePath, tt.relativePath, err) + } + if result != tt.wantResult { + t.Errorf("securePath(%q, %q) = %q, want %q", basePath, tt.relativePath, result, tt.wantResult) + } + }) + } +} + +// -------------------------------------------------------------------------- +// chunkInfo +// -------------------------------------------------------------------------- + +func TestChunkInfo(t *testing.T) { + tests := []struct { + name string + path string + wantBase string + wantExt string + wantIndex int + wantOk bool + }{ + { + name: "compound extension", + path: "/tmp/haul_3.tar.zst", + wantBase: "/tmp/haul", + wantExt: ".tar.zst", + wantIndex: 3, + wantOk: true, + }, + { + name: "single extension", + path: "/tmp/archive_0.zst", + wantBase: "/tmp/archive", + wantExt: ".zst", + wantIndex: 0, + wantOk: true, + }, + { + name: "large index", + path: "/tmp/haul_42.tar.zst", + wantBase: "/tmp/haul", + wantExt: ".tar.zst", + wantIndex: 42, + wantOk: true, + }, + { + name: "no numeric suffix", + path: "/tmp/haul.tar.zst", + wantOk: false, + }, + { + name: "alphabetic suffix", + path: "/tmp/haul_abc.tar.zst", + wantOk: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + base, ext, index, ok := chunkInfo(tt.path) + if ok != tt.wantOk { + t.Fatalf("chunkInfo() ok = %v, want %v", ok, tt.wantOk) + } + if !ok { + return + } + if base != tt.wantBase { + t.Errorf("chunkInfo() base = %q, want %q", base, tt.wantBase) + } + if ext != tt.wantExt { + t.Errorf("chunkInfo() ext = %q, want %q", ext, tt.wantExt) + } + if index != tt.wantIndex { + t.Errorf("chunkInfo() index = %d, want %d", index, tt.wantIndex) + } + }) + } +} + +// -------------------------------------------------------------------------- +// SplitArchive +// -------------------------------------------------------------------------- + +func TestSplitArchive(t *testing.T) { + ctx := testContext(t) + + tests := []struct { + name string + dataSize int + maxBytes int64 + }{ + {name: "splits into multiple chunks", dataSize: 100, maxBytes: 30}, + {name: "single chunk when data fits", dataSize: 50, maxBytes: 100}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + dir := t.TempDir() + archivePath := filepath.Join(dir, "haul.tar.zst") + data := make([]byte, tt.dataSize) + for i := range data { + data[i] = byte(i % 256) + } + if err := os.WriteFile(archivePath, data, 0o644); err != nil { + t.Fatal(err) + } + + chunks, err := SplitArchive(ctx, archivePath, tt.maxBytes) + if err != nil { + t.Fatalf("SplitArchive() error = %v", err) + } + if len(chunks) == 0 { + t.Fatal("SplitArchive() returned no chunks") + } + + // original archive must be removed + if _, err := os.Stat(archivePath); !os.IsNotExist(err) { + t.Error("original archive should be removed after splitting") + } + + // chunks must follow _N naming + for i, chunk := range chunks { + expected := filepath.Join(dir, fmt.Sprintf("haul_%d.tar.zst", i)) + if chunk != expected { + t.Errorf("chunk[%d] = %s, want %s", i, chunk, expected) + } + } + + // concatenating chunks must reproduce the original data + var combined []byte + for _, chunk := range chunks { + b, err := os.ReadFile(chunk) + if err != nil { + t.Fatal(err) + } + combined = append(combined, b...) + } + if !bytes.Equal(combined, data) { + t.Error("combined chunks do not match original data") + } + }) + } +} + +func TestSplitArchive_MissingFile(t *testing.T) { + ctx := testContext(t) + dir := t.TempDir() + _, err := SplitArchive(ctx, filepath.Join(dir, "nonexistent.tar.zst"), 1<<30) + if err == nil { + t.Fatal("SplitArchive() expected error for missing file, got nil") + } +} + +// -------------------------------------------------------------------------- +// JoinChunks +// -------------------------------------------------------------------------- + +func TestJoinChunks(t *testing.T) { + ctx := testContext(t) + + t.Run("joins multiple chunks in order", func(t *testing.T) { + dir := t.TempDir() + tempDir := t.TempDir() + for i, content := range []string{"chunk0-data", "chunk1-data", "chunk2-data"} { + if err := os.WriteFile(filepath.Join(dir, fmt.Sprintf("haul_%d.tar.zst", i)), []byte(content), 0o644); err != nil { + t.Fatal(err) + } + } + + got, err := JoinChunks(ctx, filepath.Join(dir, "haul_0.tar.zst"), tempDir) + if err != nil { + t.Fatalf("JoinChunks() error = %v", err) + } + data, err := os.ReadFile(got) + if err != nil { + t.Fatal(err) + } + if want := []byte("chunk0-datachunk1-datachunk2-data"); !bytes.Equal(data, want) { + t.Errorf("JoinChunks() content = %q, want %q", data, want) + } + }) + + t.Run("any chunk triggers full assembly", func(t *testing.T) { + dir := t.TempDir() + tempDir := t.TempDir() + for i, content := range []string{"aaa", "bbb"} { + if err := os.WriteFile(filepath.Join(dir, fmt.Sprintf("data_%d.tar.zst", i)), []byte(content), 0o644); err != nil { + t.Fatal(err) + } + } + + // pass chunk_1, not chunk_0 — should still assemble from chunk_0 + got, err := JoinChunks(ctx, filepath.Join(dir, "data_1.tar.zst"), tempDir) + if err != nil { + t.Fatalf("JoinChunks() error = %v", err) + } + data, err := os.ReadFile(got) + if err != nil { + t.Fatal(err) + } + if want := []byte("aaabbb"); !bytes.Equal(data, want) { + t.Errorf("JoinChunks() content = %q, want %q", data, want) + } + }) + + t.Run("non-chunk file returned unchanged", func(t *testing.T) { + dir := t.TempDir() + nonChunk := filepath.Join(dir, "haul.tar.zst") + if err := os.WriteFile(nonChunk, []byte("not-a-chunk"), 0o644); err != nil { + t.Fatal(err) + } + got, err := JoinChunks(ctx, nonChunk, t.TempDir()) + if err != nil { + t.Fatalf("JoinChunks() error = %v", err) + } + if got != nonChunk { + t.Errorf("JoinChunks() = %s, want %s (unchanged)", got, nonChunk) + } + }) + + t.Run("non-numeric suffix files excluded", func(t *testing.T) { + dir := t.TempDir() + tempDir := t.TempDir() + if err := os.WriteFile(filepath.Join(dir, "haul_0.tar.zst"), []byte("valid"), 0o644); err != nil { + t.Fatal(err) + } + // glob matches this but chunkInfo rejects it + if err := os.WriteFile(filepath.Join(dir, "haul_foo.tar.zst"), []byte("invalid"), 0o644); err != nil { + t.Fatal(err) + } + + got, err := JoinChunks(ctx, filepath.Join(dir, "haul_0.tar.zst"), tempDir) + if err != nil { + t.Fatalf("JoinChunks() error = %v", err) + } + data, err := os.ReadFile(got) + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(data, []byte("valid")) { + t.Errorf("JoinChunks() included non-numeric suffix file; content = %q", data) + } + }) +} + +// -------------------------------------------------------------------------- +// SplitArchive + JoinChunks round-trip +// -------------------------------------------------------------------------- + +func TestSplitJoinChunks_RoundTrip(t *testing.T) { + ctx := testContext(t) + + original := make([]byte, 1000) + for i := range original { + original[i] = byte(i % 256) + } + + dir := t.TempDir() + archivePath := filepath.Join(dir, "haul.tar.zst") + if err := os.WriteFile(archivePath, original, 0o644); err != nil { + t.Fatal(err) + } + + chunks, err := SplitArchive(ctx, archivePath, 100) + if err != nil { + t.Fatalf("SplitArchive() error = %v", err) + } + if len(chunks) == 0 { + t.Fatal("SplitArchive() returned no chunks") + } + + joined, err := JoinChunks(ctx, chunks[0], t.TempDir()) + if err != nil { + t.Fatalf("JoinChunks() error = %v", err) + } + + got, err := os.ReadFile(joined) + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(got, original) { + t.Error("round-trip: joined data does not match original") + } +} diff --git a/pkg/archives/unarchiver.go b/pkg/archives/unarchiver.go index 722f195..7678f26 100644 --- a/pkg/archives/unarchiver.go +++ b/pkg/archives/unarchiver.go @@ -6,6 +6,9 @@ import ( "io" "os" "path/filepath" + "regexp" + "sort" + "strconv" "strings" "github.com/mholt/archives" @@ -156,3 +159,86 @@ func Unarchive(ctx context.Context, tarball, dst string) error { l.Infof("unarchiving completed successfully") return nil } + +var chunkSuffixRe = regexp.MustCompile(`^(.+)_(\d+)$`) + +// chunkInfo checks whether archivePath matches the chunk naming pattern (_N). +// Returns the base path (without index), compound extension, numeric index, and whether it matched. +func chunkInfo(archivePath string) (base, ext string, index int, ok bool) { + dir := filepath.Dir(archivePath) + name := filepath.Base(archivePath) + + // strip compound extension (e.g. .tar.zst) + nameBase := name + nameExt := "" + for filepath.Ext(nameBase) != "" { + nameExt = filepath.Ext(nameBase) + nameExt + nameBase = strings.TrimSuffix(nameBase, filepath.Ext(nameBase)) + } + + m := chunkSuffixRe.FindStringSubmatch(nameBase) + if m == nil { + return "", "", 0, false + } + + idx, _ := strconv.Atoi(m[2]) + return filepath.Join(dir, m[1]), nameExt, idx, true +} + +// JoinChunks detects whether archivePath is a chunk file and, if so, finds all +// sibling chunks, concatenates them in numeric order into a single file in tempDir, +// and returns the path to the joined file. If archivePath is not a chunk, it is +// returned unchanged. +func JoinChunks(ctx context.Context, archivePath, tempDir string) (string, error) { + l := log.FromContext(ctx) + + base, ext, _, ok := chunkInfo(archivePath) + if !ok { + return archivePath, nil + } + + all, err := filepath.Glob(base + "_*" + ext) + if err != nil { + return archivePath, nil + } + var matches []string + for _, m := range all { + if _, _, _, ok := chunkInfo(m); ok { + matches = append(matches, m) + } + } + if len(matches) == 0 { + return archivePath, nil + } + + sort.Slice(matches, func(i, j int) bool { + _, _, idxI, _ := chunkInfo(matches[i]) + _, _, idxJ, _ := chunkInfo(matches[j]) + return idxI < idxJ + }) + + l.Debugf("joining %d chunk(s) for [%s]", len(matches), base) + + joinedPath := filepath.Join(tempDir, filepath.Base(base)+ext) + outf, err := os.Create(joinedPath) + if err != nil { + return "", fmt.Errorf("failed to create joined archive: %w", err) + } + defer outf.Close() + + for _, chunk := range matches { + l.Debugf("joining chunk [%s]", chunk) + cf, err := os.Open(chunk) + if err != nil { + return "", fmt.Errorf("failed to open chunk [%s]: %w", chunk, err) + } + if _, err := io.Copy(outf, cf); err != nil { + cf.Close() + return "", fmt.Errorf("failed to copy chunk [%s]: %w", chunk, err) + } + cf.Close() + } + + l.Infof("joined %d chunk(s) into [%s]", len(matches), filepath.Base(joinedPath)) + return joinedPath, nil +} diff --git a/pkg/artifacts/file/file.go b/pkg/artifacts/file/file.go index 214c909..0adafa0 100644 --- a/pkg/artifacts/file/file.go +++ b/pkg/artifacts/file/file.go @@ -6,6 +6,7 @@ import ( gv1 "github.com/google/go-containerregistry/pkg/v1" "github.com/google/go-containerregistry/pkg/v1/partial" gtypes "github.com/google/go-containerregistry/pkg/v1/types" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" "hauler.dev/go/hauler/pkg/artifacts" "hauler.dev/go/hauler/pkg/consts" @@ -90,6 +91,13 @@ func (f *File) compute() error { return err } + // Manually preserve the Title annotation from the layer + // The layer was created with this annotation in getter.LayerFrom + if layer.Annotations == nil { + layer.Annotations = make(map[string]string) + } + layer.Annotations[ocispec.AnnotationTitle] = f.client.Name(f.Path) + cfg := f.client.Config(f.Path) if cfg == nil { cfg = f.client.Config(f.Path) diff --git a/pkg/artifacts/image/image.go b/pkg/artifacts/image/image.go index 6c27849..993ca8e 100644 --- a/pkg/artifacts/image/image.go +++ b/pkg/artifacts/image/image.go @@ -71,10 +71,10 @@ func IsMultiArchImage(name string, opts ...remote.Option) (bool, error) { _, err = desc.ImageIndex() if err != nil { - // If the descriptor could not be converted to an image index, it's not a multi-arch image + // if the descriptor could not be converted to an image index... it's not a multi-arch image return false, nil } - // If the descriptor could be converted to an image index, it's a multi-arch image + // if the descriptor could be converted to an image index... it's a multi-arch image return true, nil } diff --git a/pkg/collection/chart/chart.go b/pkg/collection/chart/chart.go deleted file mode 100644 index 9543b49..0000000 --- a/pkg/collection/chart/chart.go +++ /dev/null @@ -1,107 +0,0 @@ -package chart - -import ( - "helm.sh/helm/v3/pkg/action" - - "hauler.dev/go/hauler/pkg/apis/hauler.cattle.io/v1" - "hauler.dev/go/hauler/pkg/artifacts" - "hauler.dev/go/hauler/pkg/artifacts/image" - "hauler.dev/go/hauler/pkg/content/chart" - "hauler.dev/go/hauler/pkg/reference" -) - -var _ artifacts.OCICollection = (*tchart)(nil) - -// tchart is a thick chart that includes all the dependent images as well as the chart itself -type tchart struct { - chart *chart.Chart - config v1.ThickChart - - computed bool - contents map[string]artifacts.OCI -} - -func NewThickChart(cfg v1.ThickChart, opts *action.ChartPathOptions) (artifacts.OCICollection, error) { - o, err := chart.NewChart(cfg.Chart.Name, opts) - if err != nil { - return nil, err - } - - return &tchart{ - chart: o, - config: cfg, - contents: make(map[string]artifacts.OCI), - }, nil -} - -func (c *tchart) Contents() (map[string]artifacts.OCI, error) { - if err := c.compute(); err != nil { - return nil, err - } - return c.contents, nil -} - -func (c *tchart) compute() error { - if c.computed { - return nil - } - - if err := c.dependentImages(); err != nil { - return err - } - if err := c.chartContents(); err != nil { - return err - } - if err := c.extraImages(); err != nil { - return err - } - - c.computed = true - return nil -} - -func (c *tchart) chartContents() error { - ch, err := c.chart.Load() - if err != nil { - return err - } - - ref, err := reference.NewTagged(ch.Name(), ch.Metadata.Version) - if err != nil { - return err - } - c.contents[ref.Name()] = c.chart - return nil -} - -func (c *tchart) dependentImages() error { - ch, err := c.chart.Load() - if err != nil { - return err - } - - imgs, err := ImagesInChart(ch) - if err != nil { - return err - } - - for _, img := range imgs.Spec.Images { - i, err := image.NewImage(img.Name) - if err != nil { - return err - } - c.contents[img.Name] = i - } - return nil -} - -func (c *tchart) extraImages() error { - for _, img := range c.config.ExtraImages { - i, err := image.NewImage(img.Reference) - if err != nil { - return err - } - c.contents[img.Reference] = i - } - return nil -} diff --git a/pkg/collection/chart/dependents.go b/pkg/collection/chart/dependents.go deleted file mode 100644 index 14261f3..0000000 --- a/pkg/collection/chart/dependents.go +++ /dev/null @@ -1,129 +0,0 @@ -package chart - -import ( - "bufio" - "bytes" - "io" - "strings" - - "helm.sh/helm/v3/pkg/action" - helmchart "helm.sh/helm/v3/pkg/chart" - "helm.sh/helm/v3/pkg/chartutil" - "helm.sh/helm/v3/pkg/kube/fake" - "helm.sh/helm/v3/pkg/storage" - "helm.sh/helm/v3/pkg/storage/driver" - "k8s.io/apimachinery/pkg/util/yaml" - "k8s.io/client-go/util/jsonpath" - - "hauler.dev/go/hauler/pkg/apis/hauler.cattle.io/v1" -) - -var defaultKnownImagePaths = []string{ - // Deployments & DaemonSets - "{.spec.template.spec.initContainers[*].image}", - "{.spec.template.spec.containers[*].image}", - - // Pods - "{.spec.initContainers[*].image}", - "{.spec.containers[*].image}", -} - -// ImagesInChart will render a chart and identify all dependent images from it -func ImagesInChart(c *helmchart.Chart) (v1.Images, error) { - docs, err := template(c) - if err != nil { - return v1.Images{}, err - } - - var images []v1.Image - reader := yaml.NewYAMLReader(bufio.NewReader(strings.NewReader(docs))) - for { - raw, err := reader.Read() - if err == io.EOF { - break - } - if err != nil { - return v1.Images{}, err - } - - found := find(raw, defaultKnownImagePaths...) - for _, f := range found { - images = append(images, v1.Image{Name: f}) - } - } - - ims := v1.Images{ - Spec: v1.ImageSpec{ - Images: images, - }, - } - return ims, nil -} - -func template(c *helmchart.Chart) (string, error) { - s := storage.Init(driver.NewMemory()) - - templateCfg := &action.Configuration{ - RESTClientGetter: nil, - Releases: s, - KubeClient: &fake.PrintingKubeClient{Out: io.Discard}, - Capabilities: chartutil.DefaultCapabilities, - Log: func(format string, v ...interface{}) {}, - } - - // TODO: Do we need values if we're claiming this is best effort image detection? - // Justification being: if users are relying on us to get images from their values, they could just add images to the []ImagesInChart spec of the Store api - vals := make(map[string]interface{}) - - client := action.NewInstall(templateCfg) - client.ReleaseName = "dry" - client.DryRun = true - client.Replace = true - client.ClientOnly = true - client.IncludeCRDs = true - - release, err := client.Run(c, vals) - if err != nil { - return "", err - } - - return release.Manifest, nil -} - -func find(data []byte, paths ...string) []string { - var ( - pathMatches []string - obj interface{} - ) - - if err := yaml.Unmarshal(data, &obj); err != nil { - return nil - } - j := jsonpath.New("") - j.AllowMissingKeys(true) - - for _, p := range paths { - r, err := parseJSONPath(obj, j, p) - if err != nil { - continue - } - - pathMatches = append(pathMatches, r...) - } - return pathMatches -} - -func parseJSONPath(data interface{}, parser *jsonpath.JSONPath, template string) ([]string, error) { - buf := new(bytes.Buffer) - if err := parser.Parse(template); err != nil { - return nil, err - } - - if err := parser.Execute(buf, data); err != nil { - return nil, err - } - - f := func(s rune) bool { return s == ' ' } - r := strings.FieldsFunc(buf.String(), f) - return r, nil -} diff --git a/pkg/collection/imagetxt/imagetxt.go b/pkg/collection/imagetxt/imagetxt.go deleted file mode 100644 index 60f25dc..0000000 --- a/pkg/collection/imagetxt/imagetxt.go +++ /dev/null @@ -1,232 +0,0 @@ -package imagetxt - -import ( - "bufio" - "context" - "fmt" - "io" - "os" - "strings" - "sync" - - "github.com/google/go-containerregistry/pkg/name" - - artifact "hauler.dev/go/hauler/pkg/artifacts" - "hauler.dev/go/hauler/pkg/artifacts/image" - "hauler.dev/go/hauler/pkg/getter" - "hauler.dev/go/hauler/pkg/log" -) - -type ImageTxt struct { - Ref string - IncludeSources map[string]bool - ExcludeSources map[string]bool - - lock *sync.Mutex - client *getter.Client - computed bool - contents map[string]artifact.OCI -} - -var _ artifact.OCICollection = (*ImageTxt)(nil) - -type Option interface { - Apply(*ImageTxt) error -} - -type withIncludeSources []string - -func (o withIncludeSources) Apply(it *ImageTxt) error { - if it.IncludeSources == nil { - it.IncludeSources = make(map[string]bool) - } - for _, s := range o { - it.IncludeSources[s] = true - } - return nil -} - -func WithIncludeSources(include ...string) Option { - return withIncludeSources(include) -} - -type withExcludeSources []string - -func (o withExcludeSources) Apply(it *ImageTxt) error { - if it.ExcludeSources == nil { - it.ExcludeSources = make(map[string]bool) - } - for _, s := range o { - it.ExcludeSources[s] = true - } - return nil -} - -func WithExcludeSources(exclude ...string) Option { - return withExcludeSources(exclude) -} - -func New(ref string, opts ...Option) (*ImageTxt, error) { - it := &ImageTxt{ - Ref: ref, - - client: getter.NewClient(getter.ClientOptions{}), - lock: &sync.Mutex{}, - } - - for i, o := range opts { - if err := o.Apply(it); err != nil { - return nil, fmt.Errorf("invalid option %d: %v", i, err) - } - } - - return it, nil -} - -func (it *ImageTxt) Contents() (map[string]artifact.OCI, error) { - it.lock.Lock() - defer it.lock.Unlock() - if !it.computed { - if err := it.compute(); err != nil { - return nil, fmt.Errorf("compute OCI layout: %v", err) - } - it.computed = true - } - return it.contents, nil -} - -func (it *ImageTxt) compute() error { - // TODO - pass in logger from context - l := log.NewLogger(os.Stdout) - - it.contents = make(map[string]artifact.OCI) - - ctx := context.TODO() - - rc, err := it.client.ContentFrom(ctx, it.Ref) - if err != nil { - return fmt.Errorf("fetch image.txt ref %s: %w", it.Ref, err) - } - defer rc.Close() - - entries, err := splitImagesTxt(rc) - if err != nil { - return fmt.Errorf("parse image.txt ref %s: %v", it.Ref, err) - } - - foundSources := make(map[string]bool) - for _, e := range entries { - for s := range e.Sources { - foundSources[s] = true - } - } - - var pullAll bool - targetSources := make(map[string]bool) - - if len(foundSources) == 0 || (len(it.IncludeSources) == 0 && len(it.ExcludeSources) == 0) { - // pull all found images - pullAll = true - - if len(foundSources) == 0 { - l.Infof("image txt file appears to have no sources; pulling all found images") - if len(it.IncludeSources) != 0 || len(it.ExcludeSources) != 0 { - l.Warnf("ImageTxt provided include or exclude sources; ignoring") - } - } else if len(it.IncludeSources) == 0 && len(it.ExcludeSources) == 0 { - l.Infof("image-sources txt file not filtered; pulling all found images") - } - } else { - // determine sources to pull - if len(it.IncludeSources) != 0 && len(it.ExcludeSources) != 0 { - l.Warnf("ImageTxt provided include and exclude sources; using only include sources") - } - - if len(it.IncludeSources) != 0 { - targetSources = it.IncludeSources - } else { - for s := range foundSources { - targetSources[s] = true - } - for s := range it.ExcludeSources { - delete(targetSources, s) - } - } - var targetSourcesArr []string - for s := range targetSources { - targetSourcesArr = append(targetSourcesArr, s) - } - l.Infof("pulling images covering sources %s", strings.Join(targetSourcesArr, ", ")) - } - - for _, e := range entries { - var matchesSourceFilter bool - if pullAll { - l.Infof("pulling image %s", e.Reference) - } else { - for s := range e.Sources { - if targetSources[s] { - matchesSourceFilter = true - l.Infof("pulling image %s (matched source %s)", e.Reference, s) - break - } - } - } - - if pullAll || matchesSourceFilter { - curImage, err := image.NewImage(e.Reference.String()) - if err != nil { - return fmt.Errorf("pull image %s: %v", e.Reference, err) - } - it.contents[e.Reference.String()] = curImage - } - } - - return nil -} - -type imageTxtEntry struct { - Reference name.Reference - Sources map[string]bool -} - -func splitImagesTxt(r io.Reader) ([]imageTxtEntry, error) { - var entries []imageTxtEntry - scanner := bufio.NewScanner(r) - for scanner.Scan() { - curEntry := imageTxtEntry{ - Sources: make(map[string]bool), - } - - lineContent := scanner.Text() - if lineContent == "" || strings.HasPrefix(lineContent, "#") { - // skip past empty and commented lines - continue - } - splitContent := strings.Split(lineContent, " ") - if len(splitContent) > 2 { - return nil, fmt.Errorf( - "invalid image.txt format: must contain only an image reference and sources separated by space; invalid line: %q", - lineContent) - } - - curRef, err := name.ParseReference(splitContent[0]) - if err != nil { - return nil, fmt.Errorf("invalid reference %s: %v", splitContent[0], err) - } - curEntry.Reference = curRef - - if len(splitContent) == 2 { - for _, source := range strings.Split(splitContent[1], ",") { - curEntry.Sources[source] = true - } - } - - entries = append(entries, curEntry) - } - if err := scanner.Err(); err != nil { - return nil, fmt.Errorf("scan contents: %v", err) - } - - return entries, nil -} diff --git a/pkg/collection/imagetxt/imagetxt_test.go b/pkg/collection/imagetxt/imagetxt_test.go deleted file mode 100644 index 04264c4..0000000 --- a/pkg/collection/imagetxt/imagetxt_test.go +++ /dev/null @@ -1,209 +0,0 @@ -package imagetxt - -import ( - "errors" - "fmt" - "net/http" - "net/http/httptest" - "os" - "testing" - - "hauler.dev/go/hauler/pkg/artifacts" - "hauler.dev/go/hauler/pkg/artifacts/image" -) - -var ( - ErrRefNotFound = errors.New("ref not found") - ErrRefNotImage = errors.New("ref is not image") - ErrExtraRefsFound = errors.New("extra refs found in contents") -) - -var ( - testServer *httptest.Server -) - -func TestMain(m *testing.M) { - setup() - code := m.Run() - teardown() - os.Exit(code) -} - -func setup() { - dir := http.Dir("./testdata/http/") - h := http.FileServer(dir) - testServer = httptest.NewServer(h) -} - -func teardown() { - if testServer != nil { - testServer.Close() - } -} - -type failKind string - -const ( - failKindNew = failKind("New") - failKindContents = failKind("Contents") -) - -func checkError(checkedFailKind failKind) func(*testing.T, error, bool, failKind) { - return func(cet *testing.T, err error, testShouldFail bool, testFailKind failKind) { - if err != nil { - // if error should not have happened at all OR error should have happened - // at a different point, test failed - if !testShouldFail || testFailKind != checkedFailKind { - cet.Fatalf("unexpected error at %s: %v", checkedFailKind, err) - } - // test should fail at this point, test passed - return - } - // if no error occurred but error should have happened at this point, test - // failed - if testShouldFail && testFailKind == checkedFailKind { - cet.Fatalf("unexpected nil error at %s", checkedFailKind) - } - } -} - -func TestImageTxtCollection(t *testing.T) { - type testEntry struct { - Name string - Ref string - IncludeSources []string - ExcludeSources []string - ExpectedImages []string - ShouldFail bool - FailKind failKind - } - tt := []testEntry{ - { - Name: "http ref basic", - Ref: fmt.Sprintf("%s/images-http.txt", testServer.URL), - ExpectedImages: []string{ - "busybox", - "nginx:1.19", - "rancher/hyperkube:v1.21.7-rancher1", - "docker.io/rancher/klipper-lb:v0.3.4", - "quay.io/jetstack/cert-manager-controller:v1.6.1", - }, - }, - { - Name: "http ref sources format pull all", - Ref: fmt.Sprintf("%s/images-src-http.txt", testServer.URL), - ExpectedImages: []string{ - "busybox", - "nginx:1.19", - "rancher/hyperkube:v1.21.7-rancher1", - "docker.io/rancher/klipper-lb:v0.3.4", - "quay.io/jetstack/cert-manager-controller:v1.6.1", - }, - }, - { - Name: "http ref sources format include sources A", - Ref: fmt.Sprintf("%s/images-src-http.txt", testServer.URL), - IncludeSources: []string{ - "core", "rke", - }, - ExpectedImages: []string{ - "busybox", - "nginx:1.19", - "rancher/hyperkube:v1.21.7-rancher1", - }, - }, - { - Name: "http ref sources format include sources B", - Ref: fmt.Sprintf("%s/images-src-http.txt", testServer.URL), - IncludeSources: []string{ - "nginx", "rancher", "cert-manager", - }, - ExpectedImages: []string{ - "nginx:1.19", - "rancher/hyperkube:v1.21.7-rancher1", - "docker.io/rancher/klipper-lb:v0.3.4", - "quay.io/jetstack/cert-manager-controller:v1.6.1", - }, - }, - { - Name: "http ref sources format exclude sources A", - Ref: fmt.Sprintf("%s/images-src-http.txt", testServer.URL), - ExcludeSources: []string{ - "cert-manager", - }, - ExpectedImages: []string{ - "busybox", - "nginx:1.19", - "rancher/hyperkube:v1.21.7-rancher1", - "docker.io/rancher/klipper-lb:v0.3.4", - }, - }, - { - Name: "http ref sources format exclude sources B", - Ref: fmt.Sprintf("%s/images-src-http.txt", testServer.URL), - ExcludeSources: []string{ - "core", - }, - ExpectedImages: []string{ - "nginx:1.19", - "rancher/hyperkube:v1.21.7-rancher1", - "docker.io/rancher/klipper-lb:v0.3.4", - "quay.io/jetstack/cert-manager-controller:v1.6.1", - }, - }, - { - Name: "local file ref", - Ref: "./testdata/images-file.txt", - ExpectedImages: []string{ - "busybox", - "nginx:1.19", - "rancher/hyperkube:v1.21.7-rancher1", - "docker.io/rancher/klipper-lb:v0.3.4", - "quay.io/jetstack/cert-manager-controller:v1.6.1", - }, - }, - } - - checkErrorNew := checkError(failKindNew) - checkErrorContents := checkError(failKindContents) - - for _, curTest := range tt { - t.Run(curTest.Name, func(innerT *testing.T) { - curImageTxt, err := New(curTest.Ref, - WithIncludeSources(curTest.IncludeSources...), - WithExcludeSources(curTest.ExcludeSources...), - ) - checkErrorNew(innerT, err, curTest.ShouldFail, curTest.FailKind) - - ociContents, err := curImageTxt.Contents() - checkErrorContents(innerT, err, curTest.ShouldFail, curTest.FailKind) - - if err := checkImages(ociContents, curTest.ExpectedImages); err != nil { - innerT.Fatal(err) - } - }) - } -} - -func checkImages(content map[string]artifacts.OCI, refs []string) error { - contentCopy := make(map[string]artifacts.OCI, len(content)) - for k, v := range content { - contentCopy[k] = v - } - for _, ref := range refs { - target, ok := content[ref] - if !ok { - return fmt.Errorf("ref %s: %w", ref, ErrRefNotFound) - } - if _, ok := target.(*image.Image); !ok { - return fmt.Errorf("got underlying type %T: %w", target, ErrRefNotImage) - } - delete(contentCopy, ref) - } - - if len(contentCopy) != 0 { - return ErrExtraRefsFound - } - - return nil -} diff --git a/pkg/collection/imagetxt/testdata/http/images-http.txt b/pkg/collection/imagetxt/testdata/http/images-http.txt deleted file mode 100644 index 0ae5fa8..0000000 --- a/pkg/collection/imagetxt/testdata/http/images-http.txt +++ /dev/null @@ -1,5 +0,0 @@ -busybox -nginx:1.19 -rancher/hyperkube:v1.21.7-rancher1 -docker.io/rancher/klipper-lb:v0.3.4 -quay.io/jetstack/cert-manager-controller:v1.6.1 diff --git a/pkg/collection/imagetxt/testdata/http/images-src-http.txt b/pkg/collection/imagetxt/testdata/http/images-src-http.txt deleted file mode 100644 index 013abe6..0000000 --- a/pkg/collection/imagetxt/testdata/http/images-src-http.txt +++ /dev/null @@ -1,5 +0,0 @@ -busybox core -nginx:1.19 core,nginx -rancher/hyperkube:v1.21.7-rancher1 rancher,rke -docker.io/rancher/klipper-lb:v0.3.4 rancher,k3s -quay.io/jetstack/cert-manager-controller:v1.6.1 cert-manager diff --git a/pkg/collection/imagetxt/testdata/images-file.txt b/pkg/collection/imagetxt/testdata/images-file.txt deleted file mode 100644 index 0ae5fa8..0000000 --- a/pkg/collection/imagetxt/testdata/images-file.txt +++ /dev/null @@ -1,5 +0,0 @@ -busybox -nginx:1.19 -rancher/hyperkube:v1.21.7-rancher1 -docker.io/rancher/klipper-lb:v0.3.4 -quay.io/jetstack/cert-manager-controller:v1.6.1 diff --git a/pkg/consts/consts.go b/pkg/consts/consts.go index 34d4249..6fd8d6d 100644 --- a/pkg/consts/consts.go +++ b/pkg/consts/consts.go @@ -42,16 +42,30 @@ const ( HaulerVendorPrefix = "vnd.hauler" // annotation keys - ContainerdImageNameKey = "io.containerd.image.name" - KindAnnotationName = "kind" - KindAnnotationImage = "dev.cosignproject.cosign/image" - KindAnnotationIndex = "dev.cosignproject.cosign/imageIndex" - ImageAnnotationKey = "hauler.dev/key" - ImageAnnotationPlatform = "hauler.dev/platform" - ImageAnnotationRegistry = "hauler.dev/registry" - ImageAnnotationTlog = "hauler.dev/use-tlog-verify" - ImageAnnotationRewrite = "hauler.dev/rewrite" - ImageRefKey = "org.opencontainers.image.ref.name" + ContainerdImageNameKey = "io.containerd.image.name" + KindAnnotationName = "kind" + KindAnnotationImage = "dev.hauler/image" + KindAnnotationIndex = "dev.hauler/imageIndex" + KindAnnotationSigs = "dev.hauler/sigs" + KindAnnotationAtts = "dev.hauler/atts" + KindAnnotationSboms = "dev.hauler/sboms" + // KindAnnotationReferrers is the kind prefix for OCI 1.1 referrer manifests (cosign v3 + // new-bundle-format). Each referrer gets a unique kind with the referrer manifest digest + // appended (e.g. "dev.hauler/referrers/sha256hex") so multiple referrers for the same + // base image coexist in the OCI index. + KindAnnotationReferrers = "dev.hauler/referrers" + + // Sigstore / OCI 1.1 artifact media types used by cosign v3 new-bundle-format. + SigstoreBundleMediaType = "application/vnd.dev.sigstore.bundle.v0.3+json" + OCIEmptyConfigMediaType = "application/vnd.oci.empty.v1+json" + + ImageAnnotationKey = "hauler.dev/key" + ImageAnnotationPlatform = "hauler.dev/platform" + ImageAnnotationRegistry = "hauler.dev/registry" + ImageAnnotationTlog = "hauler.dev/use-tlog-verify" + ImageAnnotationRewrite = "hauler.dev/rewrite" + ImageAnnotationExcludeExtras = "hauler.dev/exclude-extras" + ImageRefKey = "org.opencontainers.image.ref.name" // cosign keyless validation options ImageAnnotationCertIdentity = "hauler.dev/certificate-identity" @@ -61,12 +75,10 @@ const ( ImageAnnotationCertGithubWorkflowRepository = "hauler.dev/certificate-github-workflow-repository" // content kinds - ImagesContentKind = "Images" - ChartsContentKind = "Charts" - FilesContentKind = "Files" - DriverContentKind = "Driver" - ImageTxtsContentKind = "ImageTxts" - ChartsCollectionKind = "ThickCharts" + ImagesContentKind = "Images" + ChartsContentKind = "Charts" + FilesContentKind = "Files" + // DriverContentKind = "Driver" // content groups ContentGroup = "content.hauler.cattle.io" diff --git a/pkg/consts/migrate.go b/pkg/consts/migrate.go new file mode 100644 index 0000000..9ce8241 --- /dev/null +++ b/pkg/consts/migrate.go @@ -0,0 +1,19 @@ +package consts + +import "strings" + +// NormalizeLegacyKind translates old dev.cosignproject.cosign kind annotation +// values to their dev.hauler equivalents. Returns the input unchanged if it is +// already a current value or empty. +// +// This handles all cases including the dynamic referrer suffix: +// +// dev.cosignproject.cosign/referrers/ → dev.hauler/referrers/ +func NormalizeLegacyKind(kind string) string { + const oldPrefix = "dev.cosignproject.cosign" + const newPrefix = "dev.hauler" + if strings.HasPrefix(kind, oldPrefix) { + return newPrefix + kind[len(oldPrefix):] + } + return kind +} diff --git a/pkg/consts/migrate_test.go b/pkg/consts/migrate_test.go new file mode 100644 index 0000000..e4e1eba --- /dev/null +++ b/pkg/consts/migrate_test.go @@ -0,0 +1,30 @@ +package consts + +import "testing" + +func TestNormalizeLegacyKind(t *testing.T) { + tests := []struct { + input string + want string + }{ + // Old dev.cosignproject.cosign values → new dev.hauler equivalents + {"dev.cosignproject.cosign/image", "dev.hauler/image"}, + {"dev.cosignproject.cosign/imageIndex", "dev.hauler/imageIndex"}, + {"dev.cosignproject.cosign/sigs", "dev.hauler/sigs"}, + {"dev.cosignproject.cosign/atts", "dev.hauler/atts"}, + {"dev.cosignproject.cosign/sboms", "dev.hauler/sboms"}, + {"dev.cosignproject.cosign/referrers/abc123def456", "dev.hauler/referrers/abc123def456"}, + // Already-new values pass through unchanged + {"dev.hauler/image", "dev.hauler/image"}, + {"dev.hauler/imageIndex", "dev.hauler/imageIndex"}, + {"dev.hauler/referrers/abc123", "dev.hauler/referrers/abc123"}, + // Empty string passes through unchanged + {"", ""}, + } + for _, tt := range tests { + got := NormalizeLegacyKind(tt.input) + if got != tt.want { + t.Errorf("NormalizeLegacyKind(%q) = %q, want %q", tt.input, got, tt.want) + } + } +} diff --git a/pkg/content/content.go b/pkg/content/content.go index 321c13b..4355a24 100644 --- a/pkg/content/content.go +++ b/pkg/content/content.go @@ -8,7 +8,6 @@ import ( "k8s.io/apimachinery/pkg/util/yaml" v1 "hauler.dev/go/hauler/pkg/apis/hauler.cattle.io/v1" - v1alpha1 "hauler.dev/go/hauler/pkg/apis/hauler.cattle.io/v1alpha1" ) func Load(data []byte) (schema.ObjectKind, error) { @@ -26,12 +25,10 @@ func Load(data []byte) (schema.ObjectKind, error) { } gv := tm.GroupVersionKind().GroupVersion() - // allow v1 and v1alpha1 content/collection + // allow v1 content and collections if gv != v1.ContentGroupVersion && - gv != v1.CollectionGroupVersion && - gv != v1alpha1.ContentGroupVersion && - gv != v1alpha1.CollectionGroupVersion { - return nil, fmt.Errorf("unrecognized content/collection [%s] with [kind=%s]", tm.APIVersion, tm.Kind) + gv != v1.CollectionGroupVersion { + return nil, fmt.Errorf("unrecognized content or collection [%s] with [kind=%s]", tm.APIVersion, tm.Kind) } return &tm, nil diff --git a/pkg/content/oci.go b/pkg/content/oci.go index 9f5773c..39bc3c6 100644 --- a/pkg/content/oci.go +++ b/pkg/content/oci.go @@ -5,6 +5,7 @@ import ( "encoding/json" "fmt" "io" + "maps" "os" "path/filepath" "sort" @@ -17,14 +18,12 @@ import ( "github.com/containerd/containerd/remotes" "github.com/opencontainers/image-spec/specs-go" ocispec "github.com/opencontainers/image-spec/specs-go/v1" - "oras.land/oras-go/pkg/content" - "oras.land/oras-go/pkg/target" "hauler.dev/go/hauler/pkg/consts" "hauler.dev/go/hauler/pkg/reference" ) -var _ target.Target = (*OCI)(nil) +var _ Target = (*OCI)(nil) type OCI struct { root string @@ -76,6 +75,7 @@ func (o *OCI) LoadIndex() error { Versioned: specs.Versioned{ SchemaVersion: 2, }, + MediaType: ocispec.MediaTypeImageIndex, } return nil } @@ -88,15 +88,31 @@ func (o *OCI) LoadIndex() error { for _, desc := range o.index.Manifests { key, err := reference.Parse(desc.Annotations[ocispec.AnnotationRefName]) if err != nil { - return err + // skip malformed entries rather than making the entire store unreadable + continue } + // Set default kind if missing; normalize legacy dev.cosignproject.cosign values. + kind := desc.Annotations[consts.KindAnnotationName] + kind = consts.NormalizeLegacyKind(kind) + if kind == "" { + kind = consts.KindAnnotationImage + } + + // Write the normalized kind back into a copy of the annotations map so + // that Walk() callers receive descriptors with dev.hauler/... values. + // We copy the map to avoid mutating the slice element's shared map. + normalized := make(map[string]string, len(desc.Annotations)+1) + maps.Copy(normalized, desc.Annotations) + normalized[consts.KindAnnotationName] = kind + desc.Annotations = normalized + if strings.TrimSpace(key.String()) != "--" { switch key.(type) { case name.Digest: - o.nameMap.Store(fmt.Sprintf("%s-%s", key.Context().String(), desc.Annotations[consts.KindAnnotationName]), desc) + o.nameMap.Store(fmt.Sprintf("%s-%s", key.Context().String(), kind), desc) case name.Tag: - o.nameMap.Store(fmt.Sprintf("%s-%s", key.String(), desc.Annotations[consts.KindAnnotationName]), desc) + o.nameMap.Store(fmt.Sprintf("%s-%s", key.String(), kind), desc) } } } @@ -124,7 +140,7 @@ func (o *OCI) SaveIndex() error { kindI := descs[i].Annotations["kind"] kindJ := descs[j].Annotations["kind"] - // Objects with the prefix of "dev.cosignproject.cosign/image" should be at the top. + // Objects with the prefix of KindAnnotationImage should be at the top. if strings.HasPrefix(kindI, consts.KindAnnotationImage) && !strings.HasPrefix(kindJ, consts.KindAnnotationImage) { return true } else if !strings.HasPrefix(kindI, consts.KindAnnotationImage) && strings.HasPrefix(kindJ, consts.KindAnnotationImage) { @@ -152,16 +168,16 @@ func (o *OCI) SaveIndex() error { // While the name may differ from ref, it should itself be a valid ref. // // If the resolution fails, an error will be returned. -func (o *OCI) Resolve(ctx context.Context, ref string) (name string, desc ocispec.Descriptor, err error) { +func (o *OCI) Resolve(ctx context.Context, ref string) (ocispec.Descriptor, error) { if err := o.LoadIndex(); err != nil { - return "", ocispec.Descriptor{}, err + return ocispec.Descriptor{}, err } d, ok := o.nameMap.Load(ref) if !ok { - return "", ocispec.Descriptor{}, err + return ocispec.Descriptor{}, fmt.Errorf("reference %s not found", ref) } - desc = d.(ocispec.Descriptor) - return ref, desc, nil + desc := d.(ocispec.Descriptor) + return desc, nil } // Fetcher returns a new fetcher for the provided reference. @@ -271,6 +287,12 @@ func (o *OCI) path(elem ...string) string { return filepath.Join(append(complete, elem...)...) } +// IndexExists reports whether the store's OCI layout index.json exists on disk. +func (o *OCI) IndexExists() bool { + _, err := os.Stat(o.path(ocispec.ImageIndexFile)) + return err == nil +} + type ociPusher struct { oci *OCI ref string @@ -287,7 +309,20 @@ func (p *ociPusher) Push(ctx context.Context, d ocispec.Descriptor) (ccontent.Wr if err := p.oci.LoadIndex(); err != nil { return nil, err } - p.oci.nameMap.Store(p.ref, d) + // Use compound key format: "reference-kind"; normalize legacy values. + kind := d.Annotations[consts.KindAnnotationName] + kind = consts.NormalizeLegacyKind(kind) + if kind == "" { + kind = consts.KindAnnotationImage + } + // Copy annotations map to avoid mutating the caller's descriptor, + // then write the normalized kind so Walk() callers see dev.hauler/... values. + normalizedAnnotations := make(map[string]string, len(d.Annotations)+1) + maps.Copy(normalizedAnnotations, d.Annotations) + normalizedAnnotations[consts.KindAnnotationName] = kind + d.Annotations = normalizedAnnotations + key := fmt.Sprintf("%s-%s", p.ref, kind) + p.oci.nameMap.Store(key, d) if err := p.oci.SaveIndex(); err != nil { return nil, err } @@ -301,7 +336,7 @@ func (p *ociPusher) Push(ctx context.Context, d ocispec.Descriptor) (ccontent.Wr if _, err := os.Stat(blobPath); err == nil { // file already exists, discard (but validate digest) - return content.NewIoContentWriter(io.Discard, content.WithOutputHash(d.Digest)), nil + return NewIoContentWriter(nopCloser{io.Discard}, WithOutputHash(d.Digest.String())), nil } f, err := os.Create(blobPath) @@ -309,10 +344,25 @@ func (p *ociPusher) Push(ctx context.Context, d ocispec.Descriptor) (ccontent.Wr return nil, err } - w := content.NewIoContentWriter(f, content.WithInputHash(d.Digest), content.WithOutputHash(d.Digest)) + w := NewIoContentWriter(f, WithOutputHash(d.Digest.String())) return w, nil } func (o *OCI) RemoveFromIndex(ref string) { o.nameMap.Delete(ref) } + +// ResolvePath returns the absolute path for a given relative path within the OCI root +func (o *OCI) ResolvePath(elem string) string { + if elem == "" { + return o.root + } + return filepath.Join(o.root, elem) +} + +// nopCloser wraps an io.Writer to implement io.WriteCloser +type nopCloser struct { + io.Writer +} + +func (nopCloser) Close() error { return nil } diff --git a/pkg/content/oci_test.go b/pkg/content/oci_test.go new file mode 100644 index 0000000..4bf650a --- /dev/null +++ b/pkg/content/oci_test.go @@ -0,0 +1,281 @@ +package content + +// oci_test.go covers the annotation-normalization correctness of LoadIndex() +// and ociPusher.Push(). Specifically, it verifies that descriptors returned +// by Walk() carry the normalized dev.hauler/... kind annotation value, not the +// legacy dev.cosignproject.cosign/... value that may be present on disk. + +import ( + "context" + "encoding/json" + "os" + "path/filepath" + "strings" + "testing" + + "github.com/opencontainers/go-digest" + "github.com/opencontainers/image-spec/specs-go" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + + "hauler.dev/go/hauler/pkg/consts" +) + +// buildMinimalOCILayout writes the smallest valid OCI layout (oci-layout marker +// + index.json with the supplied descriptors) into dir. No blobs are written; +// this is sufficient for testing LoadIndex/Walk without a full store. +func buildMinimalOCILayout(t *testing.T, dir string, manifests []ocispec.Descriptor) { + t.Helper() + + // oci-layout marker + layoutMarker := map[string]string{"imageLayoutVersion": "1.0.0"} + markerData, err := json.Marshal(layoutMarker) + if err != nil { + t.Fatalf("marshal oci-layout: %v", err) + } + if err := os.WriteFile(filepath.Join(dir, ocispec.ImageLayoutFile), markerData, 0644); err != nil { + t.Fatalf("write oci-layout: %v", err) + } + + // index.json + idx := ocispec.Index{ + Versioned: specs.Versioned{SchemaVersion: 2}, + MediaType: ocispec.MediaTypeImageIndex, + Manifests: manifests, + } + data, err := json.MarshalIndent(idx, "", " ") + if err != nil { + t.Fatalf("marshal index.json: %v", err) + } + if err := os.WriteFile(filepath.Join(dir, ocispec.ImageIndexFile), data, 0644); err != nil { + t.Fatalf("write index.json: %v", err) + } +} + +// fakeDigest returns a syntactically valid digest string that can be used in +// test descriptors without any real blob. +func fakeDigest(hex string) digest.Digest { + // pad hex to 64 chars + for len(hex) < 64 { + hex += "0" + } + return digest.Digest("sha256:" + hex) +} + +// -------------------------------------------------------------------------- +// TestLoadIndex_NormalizesLegacyKindInDescriptorAnnotations +// -------------------------------------------------------------------------- + +// TestLoadIndex_NormalizesLegacyKindInDescriptorAnnotations verifies that +// after LoadIndex() (called implicitly by Walk()), every descriptor returned +// by Walk carries a normalized dev.hauler/... kind annotation, not the legacy +// dev.cosignproject.cosign/... value stored on disk. +func TestLoadIndex_NormalizesLegacyKindInDescriptorAnnotations(t *testing.T) { + dir := t.TempDir() + + legacyKinds := []string{ + "dev.cosignproject.cosign/image", + "dev.cosignproject.cosign/imageIndex", + "dev.cosignproject.cosign/sigs", + "dev.cosignproject.cosign/atts", + "dev.cosignproject.cosign/sboms", + } + + var manifests []ocispec.Descriptor + for i, legacyKind := range legacyKinds { + d := ocispec.Descriptor{ + MediaType: ocispec.MediaTypeImageManifest, + Digest: fakeDigest(strings.Repeat(string(rune('a'+i)), 1)), + Size: 100, + Annotations: map[string]string{ + ocispec.AnnotationRefName: "example.com/repo:tag" + strings.Repeat(string(rune('a'+i)), 1), + consts.KindAnnotationName: legacyKind, + }, + } + manifests = append(manifests, d) + } + + buildMinimalOCILayout(t, dir, manifests) + + o, err := NewOCI(dir) + if err != nil { + t.Fatalf("NewOCI: %v", err) + } + + var walked []ocispec.Descriptor + if err := o.Walk(func(_ string, desc ocispec.Descriptor) error { + walked = append(walked, desc) + return nil + }); err != nil { + t.Fatalf("Walk: %v", err) + } + + if len(walked) == 0 { + t.Fatal("Walk returned no descriptors") + } + + const legacyPrefix = "dev.cosignproject.cosign" + const newPrefix = "dev.hauler" + for _, desc := range walked { + kind := desc.Annotations[consts.KindAnnotationName] + if strings.HasPrefix(kind, legacyPrefix) { + t.Errorf("descriptor %s: Walk returned legacy kind %q; want normalized dev.hauler/... value", + desc.Digest, kind) + } + if !strings.HasPrefix(kind, newPrefix) { + t.Errorf("descriptor %s: Walk returned unexpected kind %q; want dev.hauler/... prefix", + desc.Digest, kind) + } + } +} + +// -------------------------------------------------------------------------- +// TestLoadIndex_DoesNotMutateOnDiskAnnotations +// -------------------------------------------------------------------------- + +// TestLoadIndex_DoesNotMutateOnDiskAnnotations verifies that the normalization +// performed by LoadIndex() is in-memory only: the index.json on disk must +// still carry the original (legacy) annotation values after a Walk() call. +func TestLoadIndex_DoesNotMutateOnDiskAnnotations(t *testing.T) { + dir := t.TempDir() + + legacyKind := "dev.cosignproject.cosign/image" + manifests := []ocispec.Descriptor{ + { + MediaType: ocispec.MediaTypeImageManifest, + Digest: fakeDigest("b"), + Size: 100, + Annotations: map[string]string{ + ocispec.AnnotationRefName: "example.com/repo:tagb", + consts.KindAnnotationName: legacyKind, + }, + }, + } + buildMinimalOCILayout(t, dir, manifests) + + o, err := NewOCI(dir) + if err != nil { + t.Fatalf("NewOCI: %v", err) + } + // Trigger LoadIndex via Walk. + if err := o.Walk(func(_ string, _ ocispec.Descriptor) error { return nil }); err != nil { + t.Fatalf("Walk: %v", err) + } + + // Re-read index.json from disk and verify the annotation is unchanged. + data, err := os.ReadFile(filepath.Join(dir, ocispec.ImageIndexFile)) + if err != nil { + t.Fatalf("read index.json: %v", err) + } + var idx ocispec.Index + if err := json.Unmarshal(data, &idx); err != nil { + t.Fatalf("unmarshal index.json: %v", err) + } + for _, desc := range idx.Manifests { + got := desc.Annotations[consts.KindAnnotationName] + if got != legacyKind { + t.Errorf("on-disk kind was mutated: got %q, want %q", got, legacyKind) + } + } +} + +// -------------------------------------------------------------------------- +// TestPush_NormalizesLegacyKindInStoredDescriptor +// -------------------------------------------------------------------------- + +// TestPush_NormalizesLegacyKindInStoredDescriptor verifies that after a Push() +// that matches the root digest, the descriptor stored in nameMap (and therefore +// returned by subsequent Walk() calls) carries the normalized dev.hauler/... +// kind annotation rather than the legacy value. +func TestPush_NormalizesLegacyKindInStoredDescriptor(t *testing.T) { + dir := t.TempDir() + buildMinimalOCILayout(t, dir, nil) // start with empty index + + o, err := NewOCI(dir) + if err != nil { + t.Fatalf("NewOCI: %v", err) + } + + // Build a minimal manifest blob so Push() can write it to disk. + manifest := ocispec.Manifest{ + Versioned: specs.Versioned{SchemaVersion: 2}, + MediaType: ocispec.MediaTypeImageManifest, + Config: ocispec.Descriptor{ + MediaType: ocispec.MediaTypeImageConfig, + Digest: fakeDigest("config0"), + Size: 2, + }, + } + manifestData, err := json.Marshal(manifest) + if err != nil { + t.Fatalf("marshal manifest: %v", err) + } + manifestDigest := digest.FromBytes(manifestData) + + // Ensure the blobs directory exists so Push can write. + blobsDir := filepath.Join(dir, ocispec.ImageBlobsDir, "sha256") + if err := os.MkdirAll(blobsDir, 0755); err != nil { + t.Fatalf("mkdir blobs: %v", err) + } + + legacyKind := "dev.cosignproject.cosign/sigs" + baseRef := "example.com/repo:tagsig" + + pusher, err := o.Pusher(context.Background(), baseRef+"@"+manifestDigest.String()) + if err != nil { + t.Fatalf("Pusher: %v", err) + } + + desc := ocispec.Descriptor{ + MediaType: ocispec.MediaTypeImageManifest, + Digest: manifestDigest, + Size: int64(len(manifestData)), + Annotations: map[string]string{ + ocispec.AnnotationRefName: baseRef, + consts.KindAnnotationName: legacyKind, + }, + } + + w, err := pusher.Push(context.Background(), desc) + if err != nil { + t.Fatalf("Push: %v", err) + } + if _, err := w.Write(manifestData); err != nil { + t.Fatalf("Write manifest: %v", err) + } + if err := w.Close(); err != nil { + t.Fatalf("Close writer: %v", err) + } + + // Now Walk and verify the descriptor in nameMap has the normalized kind. + // We need a fresh OCI instance so Walk calls LoadIndex (which reads SaveIndex output). + o2, err := NewOCI(dir) + if err != nil { + t.Fatalf("NewOCI second: %v", err) + } + + const legacyPrefix = "dev.cosignproject.cosign" + const newPrefix = "dev.hauler" + var found bool + if err := o2.Walk(func(_ string, d ocispec.Descriptor) error { + found = true + kind := d.Annotations[consts.KindAnnotationName] + if strings.HasPrefix(kind, legacyPrefix) { + t.Errorf("Push stored descriptor with legacy kind %q; want normalized dev.hauler/... value", kind) + } + if !strings.HasPrefix(kind, newPrefix) { + t.Errorf("Push stored descriptor with unexpected kind %q; want dev.hauler/... prefix", kind) + } + return nil + }); err != nil { + t.Fatalf("Walk: %v", err) + } + if !found { + t.Fatal("Walk returned no descriptors after Push") + } + + // Also verify the caller's original descriptor map was NOT mutated. + if desc.Annotations[consts.KindAnnotationName] != legacyKind { + t.Errorf("Push mutated caller's descriptor annotations: got %q, want %q", + desc.Annotations[consts.KindAnnotationName], legacyKind) + } +} diff --git a/pkg/content/registry.go b/pkg/content/registry.go new file mode 100644 index 0000000..edf8138 --- /dev/null +++ b/pkg/content/registry.go @@ -0,0 +1,106 @@ +package content + +import ( + "context" + "fmt" + "net/http" + "strings" + + "github.com/containerd/containerd/remotes" + cdocker "github.com/containerd/containerd/remotes/docker" + goauthn "github.com/google/go-containerregistry/pkg/authn" + goname "github.com/google/go-containerregistry/pkg/name" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" +) + +var _ Target = (*RegistryTarget)(nil) + +// RegistryTarget implements Target for pushing to a remote OCI registry. +// Authentication is sourced from the local Docker credential store via go-containerregistry's +// default keychain unless explicit credentials are provided in RegistryOptions. +type RegistryTarget struct { + resolver remotes.Resolver +} + +// NewRegistryTarget returns a RegistryTarget that pushes to host (e.g. "localhost:5000"). +func NewRegistryTarget(host string, opts RegistryOptions) *RegistryTarget { + authorizer := cdocker.NewDockerAuthorizer( + cdocker.WithAuthCreds(func(h string) (string, string, error) { + if opts.Username != "" { + return opts.Username, opts.Password, nil + } + // Bridge to go-containerregistry's keychain for credential lookup. + reg, err := goname.NewRegistry(h, goname.Insecure) + if err != nil { + return "", "", nil + } + a, err := goauthn.DefaultKeychain.Resolve(reg) + if err != nil || a == goauthn.Anonymous { + return "", "", nil + } + cfg, err := a.Authorization() + if err != nil { + return "", "", nil + } + return cfg.Username, cfg.Password, nil + }), + ) + + hosts := func(h string) ([]cdocker.RegistryHost, error) { + host, err := cdocker.DefaultHost(h) + if err != nil { + return nil, err + } + scheme := "https" + if opts.PlainHTTP || opts.Insecure { + scheme = "http" + } + return []cdocker.RegistryHost{{ + Client: http.DefaultClient, + Authorizer: authorizer, + Scheme: scheme, + Host: host, + Path: "/v2", + Capabilities: cdocker.HostCapabilityPull | cdocker.HostCapabilityResolve | cdocker.HostCapabilityPush, + }}, nil + } + + return &RegistryTarget{ + resolver: cdocker.NewResolver(cdocker.ResolverOptions{ + Hosts: hosts, + }), + } +} + +func (t *RegistryTarget) Resolve(ctx context.Context, ref string) (ocispec.Descriptor, error) { + _, desc, err := t.resolver.Resolve(ctx, ref) + return desc, err +} + +func (t *RegistryTarget) Fetcher(ctx context.Context, ref string) (remotes.Fetcher, error) { + return t.resolver.Fetcher(ctx, ref) +} + +func (t *RegistryTarget) Pusher(ctx context.Context, ref string) (remotes.Pusher, error) { + return t.resolver.Pusher(ctx, ref) +} + +// RewriteRefToRegistry rewrites sourceRef to use targetRegistry as its host, preserving the +// repository path and tag or digest. For example: +// +// "index.docker.io/library/nginx:latest" + "localhost:5000" → "localhost:5000/library/nginx:latest" +func RewriteRefToRegistry(sourceRef string, targetRegistry string) (string, error) { + ref, err := goname.ParseReference(sourceRef) + if err != nil { + return "", fmt.Errorf("parsing reference %q: %w", sourceRef, err) + } + repo := strings.TrimPrefix(ref.Context().RepositoryStr(), "/") + switch r := ref.(type) { + case goname.Tag: + return fmt.Sprintf("%s/%s:%s", targetRegistry, repo, r.TagStr()), nil + case goname.Digest: + return fmt.Sprintf("%s/%s@%s", targetRegistry, repo, r.DigestStr()), nil + default: + return fmt.Sprintf("%s/%s:latest", targetRegistry, repo), nil + } +} diff --git a/pkg/content/types.go b/pkg/content/types.go new file mode 100644 index 0000000..9accd72 --- /dev/null +++ b/pkg/content/types.go @@ -0,0 +1,106 @@ +package content + +import ( + "context" + "fmt" + "io" + + ccontent "github.com/containerd/containerd/content" + "github.com/containerd/containerd/remotes" + "github.com/opencontainers/go-digest" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" +) + +// Target represents a content storage target with resolver, fetcher, and pusher capabilities +type Target interface { + Resolve(ctx context.Context, ref string) (ocispec.Descriptor, error) + Fetcher(ctx context.Context, ref string) (remotes.Fetcher, error) + Pusher(ctx context.Context, ref string) (remotes.Pusher, error) +} + +// RegistryOptions holds registry configuration +type RegistryOptions struct { + PlainHTTP bool + Insecure bool + Username string + Password string +} + +// ResolveName extracts the reference name from a descriptor's annotations +func ResolveName(desc ocispec.Descriptor) (string, bool) { + name, ok := desc.Annotations[ocispec.AnnotationRefName] + return name, ok +} + +// IoContentWriter wraps an io.Writer to implement containerd's content.Writer +type IoContentWriter struct { + writer io.WriteCloser + digester digest.Digester + status ccontent.Status + outputHash string +} + +// Write writes data to the underlying writer and updates the digest +func (w *IoContentWriter) Write(p []byte) (n int, err error) { + n, err = w.writer.Write(p) + if n > 0 { + w.digester.Hash().Write(p[:n]) + } + return n, err +} + +// Close closes the writer and verifies the digest if configured +func (w *IoContentWriter) Close() error { + if w.outputHash != "" { + computed := w.digester.Digest().String() + if computed != w.outputHash { + return fmt.Errorf("digest mismatch: expected %s, got %s", w.outputHash, computed) + } + } + return w.writer.Close() +} + +// Digest returns the current digest of written data +func (w *IoContentWriter) Digest() digest.Digest { + return w.digester.Digest() +} + +// Commit is a no-op for this implementation +func (w *IoContentWriter) Commit(ctx context.Context, size int64, expected digest.Digest, opts ...ccontent.Opt) error { + return nil +} + +// Status returns the current status +func (w *IoContentWriter) Status() (ccontent.Status, error) { + return w.status, nil +} + +// Truncate is not supported +func (w *IoContentWriter) Truncate(size int64) error { + return fmt.Errorf("truncate not supported") +} + +type writerOption func(*IoContentWriter) + +// WithOutputHash configures expected output hash for verification +func WithOutputHash(hash string) writerOption { + return func(w *IoContentWriter) { + w.outputHash = hash + } +} + +// NewIoContentWriter creates a new IoContentWriter +func NewIoContentWriter(writer io.WriteCloser, opts ...writerOption) *IoContentWriter { + w := &IoContentWriter{ + writer: writer, + digester: digest.Canonical.Digester(), + status: ccontent.Status{}, + } + for _, opt := range opts { + opt(w) + } + return w +} + +// AnnotationUnpack is the annotation key for unpacking +const AnnotationUnpack = "io.containerd.image.unpack" diff --git a/pkg/cosign/cosign.go b/pkg/cosign/cosign.go index 8f0ac7b..56fd3a5 100644 --- a/pkg/cosign/cosign.go +++ b/pkg/cosign/cosign.go @@ -2,24 +2,16 @@ package cosign import ( "context" - "fmt" - "os" - "strings" - "time" - "github.com/sigstore/cosign/v3/cmd/cosign/cli" "github.com/sigstore/cosign/v3/cmd/cosign/cli/options" "github.com/sigstore/cosign/v3/cmd/cosign/cli/verify" "hauler.dev/go/hauler/internal/flags" - "hauler.dev/go/hauler/pkg/artifacts/image" - "hauler.dev/go/hauler/pkg/consts" "hauler.dev/go/hauler/pkg/log" - "hauler.dev/go/hauler/pkg/store" - "oras.land/oras-go/pkg/content" + "hauler.dev/go/hauler/pkg/retry" ) -// VerifySignature verifies the digital signature of a file using Sigstore/Cosign. -func VerifySignature(ctx context.Context, s *store.Layout, keyPath string, useTlog bool, ref string, rso *flags.StoreRootOpts, ro *flags.CliRootOpts) error { +// VerifySignature verifies the digital signature of an image using Sigstore/Cosign. +func VerifySignature(ctx context.Context, keyPath string, useTlog bool, ref string, rso *flags.StoreRootOpts, ro *flags.CliRootOpts) error { l := log.FromContext(ctx) operation := func() error { v := &verify.VerifyCommand{ @@ -28,29 +20,21 @@ func VerifySignature(ctx context.Context, s *store.Layout, keyPath string, useTl NewBundleFormat: true, } - // if the user wants to use the transparency log, set the flag to false if useTlog { v.IgnoreTlog = false } - err := log.CaptureOutput(l, true, func() error { + return log.CaptureOutput(l, true, func() error { return v.Exec(ctx, []string{ref}) }) - if err != nil { - return err - } - - return nil } - - return RetryOperation(ctx, rso, ro, operation) + return retry.Operation(ctx, rso, ro, operation) } -// VerifyKeylessSignature verifies the digital signature of a file using Sigstore/Cosign. -func VerifyKeylessSignature(ctx context.Context, s *store.Layout, identity string, identityRegexp string, oidcIssuer string, oidcIssuerRegexp string, ghWorkflowRepository string, useTlog bool, ref string, rso *flags.StoreRootOpts, ro *flags.CliRootOpts) error { +// VerifyKeylessSignature verifies an image signature using keyless/OIDC identity. +func VerifyKeylessSignature(ctx context.Context, identity string, identityRegexp string, oidcIssuer string, oidcIssuerRegexp string, ghWorkflowRepository string, ref string, rso *flags.StoreRootOpts, ro *flags.CliRootOpts) error { l := log.FromContext(ctx) operation := func() error { - certVerifyOptions := options.CertVerifyOptions{ CertOidcIssuer: oidcIssuer, CertOidcIssuerRegexp: oidcIssuerRegexp, @@ -61,153 +45,14 @@ func VerifyKeylessSignature(ctx context.Context, s *store.Layout, identity strin v := &verify.VerifyCommand{ CertVerifyOptions: certVerifyOptions, - IgnoreTlog: false, // Ignore transparency log is set to false by default for keyless signature verification + IgnoreTlog: false, // Use transparency log by default for keyless verification. CertGithubWorkflowRepository: ghWorkflowRepository, NewBundleFormat: true, } - // if the user wants to use the transparency log, set the flag to false - if useTlog { - v.IgnoreTlog = false - } - - err := log.CaptureOutput(l, true, func() error { + return log.CaptureOutput(l, true, func() error { return v.Exec(ctx, []string{ref}) }) - if err != nil { - return err - } - - return nil } - - return RetryOperation(ctx, rso, ro, operation) -} - -// SaveImage saves image and any signatures/attestations to the store. -func SaveImage(ctx context.Context, s *store.Layout, ref string, platform string, rso *flags.StoreRootOpts, ro *flags.CliRootOpts) error { - l := log.FromContext(ctx) - - if !ro.IgnoreErrors { - envVar := os.Getenv(consts.HaulerIgnoreErrors) - if envVar == "true" { - ro.IgnoreErrors = true - } - } - - operation := func() error { - o := &options.SaveOptions{ - Directory: s.Root, - } - - // check to see if the image is multi-arch - isMultiArch, err := image.IsMultiArchImage(ref) - if err != nil { - return err - } - l.Debugf("multi-arch image [%v]", isMultiArch) - - // Conditionally add platform. - if platform != "" && isMultiArch { - l.Debugf("platform for image [%s]", platform) - o.Platform = platform - } - - err = cli.SaveCmd(ctx, *o, ref) - if err != nil { - return err - } - - return nil - - } - - return RetryOperation(ctx, rso, ro, operation) -} - -// LoadImage loads store to a remote registry. -func LoadImages(ctx context.Context, s *store.Layout, registry string, only string, ropts content.RegistryOptions, ro *flags.CliRootOpts) error { - l := log.FromContext(ctx) - - o := &options.LoadOptions{ - Directory: s.Root, - Registry: options.RegistryOptions{ - Name: registry, - }, - } - - // Conditionally add extra flags. - if len(only) > 0 { - o.LoadOnly = only - } - - if ropts.Insecure { - o.Registry.AllowInsecure = true - } - - if ropts.PlainHTTP { - o.Registry.AllowHTTPRegistry = true - } - - if ropts.Username != "" { - o.Registry.AuthConfig.Username = ropts.Username - o.Registry.AuthConfig.Password = ropts.Password - } - - // execute the cosign load and capture the output in our logger - err := log.CaptureOutput(l, false, func() error { - return cli.LoadCmd(ctx, *o, "") - }) - if err != nil { - return err - } - - return nil -} - -func RetryOperation(ctx context.Context, rso *flags.StoreRootOpts, ro *flags.CliRootOpts, operation func() error) error { - l := log.FromContext(ctx) - - if !ro.IgnoreErrors { - envVar := os.Getenv(consts.HaulerIgnoreErrors) - if envVar == "true" { - ro.IgnoreErrors = true - } - } - - // Validate retries and fall back to a default - retries := rso.Retries - if retries <= 0 { - retries = consts.DefaultRetries - } - - for attempt := 1; attempt <= rso.Retries; attempt++ { - err := operation() - if err == nil { - // If the operation succeeds, return nil (no error) - return nil - } - - if ro.IgnoreErrors { - if strings.HasPrefix(err.Error(), "function execution failed: no matching signatures: rekor client not provided for online verification") { - l.Warnf("warning (attempt %d/%d)... failed tlog verification", attempt, rso.Retries) - } else { - l.Warnf("warning (attempt %d/%d)... %v", attempt, rso.Retries, err) - } - } else { - if strings.HasPrefix(err.Error(), "function execution failed: no matching signatures: rekor client not provided for online verification") { - l.Errorf("error (attempt %d/%d)... failed tlog verification", attempt, rso.Retries) - } else { - l.Errorf("error (attempt %d/%d)... %v", attempt, rso.Retries, err) - } - } - - // If this is not the last attempt, wait before retrying - if attempt < rso.Retries { - time.Sleep(time.Second * consts.RetriesInterval) - } - } - - // If all attempts fail, return an error - return fmt.Errorf("operation unsuccessful after %d attempts", rso.Retries) + return retry.Operation(ctx, rso, ro, operation) } diff --git a/pkg/getter/directory.go b/pkg/getter/directory.go index 2399393..88ec04c 100644 --- a/pkg/getter/directory.go +++ b/pkg/getter/directory.go @@ -33,30 +33,37 @@ func (d directory) Open(ctx context.Context, u *url.URL) (io.ReadCloser, error) digester := digest.Canonical.Digester() zw := gzip.NewWriter(io.MultiWriter(tmpfile, digester.Hash())) - defer zw.Close() tarDigester := digest.Canonical.Digester() if err := tarDir(d.path(u), d.Name(u), io.MultiWriter(zw, tarDigester.Hash()), false); err != nil { + zw.Close() + tmpfile.Close() + os.Remove(tmpfile.Name()) return nil, err } if err := zw.Close(); err != nil { + tmpfile.Close() + os.Remove(tmpfile.Name()) return nil, err } if err := tmpfile.Sync(); err != nil { + tmpfile.Close() + os.Remove(tmpfile.Name()) return nil, err } - fi, err := os.Open(tmpfile.Name()) + // Close the write handle; re-open as read-only + tmpName := tmpfile.Name() + tmpfile.Close() + + fi, err := os.Open(tmpName) if err != nil { + os.Remove(tmpName) return nil, err } - // rc := &closer{ - // t: io.TeeReader(tmpfile, fi), - // closes: []func() error{fi.Close, tmpfile.Close, zw.Close}, - // } - return fi, nil + return &tempFileReadCloser{File: fi, path: tmpName}, nil } func (d directory) Detect(u *url.URL) bool { @@ -144,22 +151,15 @@ func tarDir(root string, prefix string, w io.Writer, stripTimes bool) error { return nil } -type closer struct { - t io.Reader - closes []func() error +// tempFileReadCloser wraps an *os.File and removes the underlying +// temp file when closed. +type tempFileReadCloser struct { + *os.File + path string } -func (c *closer) Read(p []byte) (n int, err error) { - return c.t.Read(p) -} - -func (c *closer) Close() error { - var err error - for _, c := range c.closes { - lastErr := c() - if err == nil { - err = lastErr - } - } +func (t *tempFileReadCloser) Close() error { + err := t.File.Close() + os.Remove(t.path) return err } diff --git a/pkg/getter/getter.go b/pkg/getter/getter.go index 63274df..c022c21 100644 --- a/pkg/getter/getter.go +++ b/pkg/getter/getter.go @@ -9,10 +9,10 @@ import ( v1 "github.com/google/go-containerregistry/pkg/v1" ocispec "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" - "oras.land/oras-go/pkg/content" content2 "hauler.dev/go/hauler/pkg/artifacts" "hauler.dev/go/hauler/pkg/consts" + "hauler.dev/go/hauler/pkg/content" "hauler.dev/go/hauler/pkg/layer" ) diff --git a/pkg/getter/https.go b/pkg/getter/https.go index ee2ae73..4bd4d23 100644 --- a/pkg/getter/https.go +++ b/pkg/getter/https.go @@ -2,6 +2,7 @@ package getter import ( "context" + "fmt" "io" "mime" "net/http" @@ -24,8 +25,9 @@ func (h Http) Name(u *url.URL) string { if err != nil { return "" } + defer resp.Body.Close() - name, _ := url.PathUnescape(u.String()) + unescaped, err := url.PathUnescape(u.String()) if err != nil { return "" } @@ -40,8 +42,7 @@ func (h Http) Name(u *url.URL) string { _ = t } - // TODO: Not this - return filepath.Base(name) + return filepath.Base(unescaped) } func (h Http) Open(ctx context.Context, u *url.URL) (io.ReadCloser, error) { @@ -49,6 +50,10 @@ func (h Http) Open(ctx context.Context, u *url.URL) (io.ReadCloser, error) { if err != nil { return nil, err } + if resp.StatusCode != http.StatusOK { + resp.Body.Close() + return nil, fmt.Errorf("unexpected status fetching %s: %s", u.String(), resp.Status) + } return resp.Body, nil } diff --git a/pkg/layer/filesystem.go b/pkg/layer/filesystem.go index 8330fdb..0abce5a 100644 --- a/pkg/layer/filesystem.go +++ b/pkg/layer/filesystem.go @@ -66,10 +66,11 @@ func (l *cachedLayer) create(h v1.Hash) (io.WriteCloser, error) { func (l *cachedLayer) Compressed() (io.ReadCloser, error) { f, err := l.create(l.digest) if err != nil { - return nil, nil + return nil, err } rc, err := l.Layer.Compressed() if err != nil { + f.Close() return nil, err } return &readcloser{ @@ -85,6 +86,7 @@ func (l *cachedLayer) Uncompressed() (io.ReadCloser, error) { } rc, err := l.Layer.Uncompressed() if err != nil { + f.Close() return nil, err } return &readcloser{ diff --git a/pkg/retry/retry.go b/pkg/retry/retry.go new file mode 100644 index 0000000..4e93e46 --- /dev/null +++ b/pkg/retry/retry.go @@ -0,0 +1,57 @@ +package retry + +import ( + "context" + "fmt" + "os" + "strings" + "time" + + "hauler.dev/go/hauler/internal/flags" + "hauler.dev/go/hauler/pkg/consts" + "hauler.dev/go/hauler/pkg/log" +) + +// Operation retries the given operation according to the retry settings in rso/ro. +func Operation(ctx context.Context, rso *flags.StoreRootOpts, ro *flags.CliRootOpts, operation func() error) error { + l := log.FromContext(ctx) + + if !ro.IgnoreErrors { + if os.Getenv(consts.HaulerIgnoreErrors) == "true" { + ro.IgnoreErrors = true + } + } + + retries := rso.Retries + if retries <= 0 { + retries = consts.DefaultRetries + } + + for attempt := 1; attempt <= retries; attempt++ { + err := operation() + if err == nil { + return nil + } + + isTlogErr := strings.HasPrefix(err.Error(), "function execution failed: no matching signatures: rekor client not provided for online verification") + if ro.IgnoreErrors { + if isTlogErr { + l.Warnf("warning (attempt %d/%d)... failed tlog verification", attempt, retries) + } else { + l.Warnf("warning (attempt %d/%d)... %v", attempt, retries, err) + } + } else { + if isTlogErr { + l.Errorf("error (attempt %d/%d)... failed tlog verification", attempt, retries) + } else { + l.Errorf("error (attempt %d/%d)... %v", attempt, retries, err) + } + } + + if attempt < retries { + time.Sleep(time.Second * consts.RetriesInterval) + } + } + + return fmt.Errorf("operation unsuccessful after %d attempts", retries) +} diff --git a/pkg/retry/retry_test.go b/pkg/retry/retry_test.go new file mode 100644 index 0000000..5588b93 --- /dev/null +++ b/pkg/retry/retry_test.go @@ -0,0 +1,160 @@ +package retry + +import ( + "context" + "fmt" + "io" + "testing" + + "github.com/rs/zerolog" + + "hauler.dev/go/hauler/internal/flags" + "hauler.dev/go/hauler/pkg/consts" +) + +func testContext() context.Context { + l := zerolog.New(io.Discard) + return l.WithContext(context.Background()) +} + +func TestOperation_SucceedsFirstAttempt(t *testing.T) { + ctx := testContext() + rso := &flags.StoreRootOpts{Retries: 1} + ro := &flags.CliRootOpts{} + + callCount := 0 + err := Operation(ctx, rso, ro, func() error { + callCount++ + return nil + }) + + if err != nil { + t.Fatalf("expected no error, got: %v", err) + } + if callCount != 1 { + t.Fatalf("expected 1 call, got %d", callCount) + } +} + +func TestOperation_ExhaustsRetries(t *testing.T) { + ctx := testContext() + // Retries=1 → 1 attempt, 0 sleeps (sleep is skipped on last attempt). + rso := &flags.StoreRootOpts{Retries: 1} + ro := &flags.CliRootOpts{} + + callCount := 0 + err := Operation(ctx, rso, ro, func() error { + callCount++ + return fmt.Errorf("always fails") + }) + + if err == nil { + t.Fatal("expected error after exhausting retries, got nil") + } + if callCount != 1 { + t.Fatalf("expected 1 call, got %d", callCount) + } + want := fmt.Sprintf("operation unsuccessful after %d attempts", 1) + if err.Error() != want { + t.Fatalf("error = %q, want %q", err.Error(), want) + } +} + +func TestOperation_RetriesAndSucceeds(t *testing.T) { + if testing.Short() { + t.Skip("skipping: requires one RetriesInterval sleep (5s)") + } + ctx := testContext() + // Retries=2: fails on attempt 1, succeeds on attempt 2 (one 5s sleep). + rso := &flags.StoreRootOpts{Retries: 2} + ro := &flags.CliRootOpts{} + + callCount := 0 + err := Operation(ctx, rso, ro, func() error { + callCount++ + if callCount < 2 { + return fmt.Errorf("transient error") + } + return nil + }) + + if err != nil { + t.Fatalf("expected success on retry, got: %v", err) + } + if callCount != 2 { + t.Fatalf("expected 2 calls, got %d", callCount) + } +} + +func TestOperation_DefaultRetries(t *testing.T) { + ctx := testContext() + // Retries=0 → falls back to consts.DefaultRetries (3). + // Verify happy path (success first attempt) is unaffected. + rso := &flags.StoreRootOpts{Retries: 0} + ro := &flags.CliRootOpts{} + + callCount := 0 + err := Operation(ctx, rso, ro, func() error { + callCount++ + return nil + }) + if err != nil { + t.Fatalf("expected no error, got: %v", err) + } + if callCount != 1 { + t.Fatalf("expected 1 call, got %d", callCount) + } + + // Exhausting all default retries requires (DefaultRetries-1) sleeps of 5s each. + // Only run this sub-test in non-short mode. + t.Run("FailAllWithDefault", func(t *testing.T) { + if testing.Short() { + t.Skip("skipping: requires (DefaultRetries-1) * 5s sleeps") + } + rso2 := &flags.StoreRootOpts{Retries: 0} + ro2 := &flags.CliRootOpts{} + callCount2 := 0 + err2 := Operation(ctx, rso2, ro2, func() error { + callCount2++ + return fmt.Errorf("fail") + }) + if err2 == nil { + t.Fatal("expected error, got nil") + } + if callCount2 != consts.DefaultRetries { + t.Fatalf("expected %d calls (DefaultRetries), got %d", consts.DefaultRetries, callCount2) + } + want := fmt.Sprintf("operation unsuccessful after %d attempts", consts.DefaultRetries) + if err2.Error() != want { + t.Fatalf("error = %q, want %q", err2.Error(), want) + } + }) +} + +func TestOperation_EnvVar_IgnoreErrors(t *testing.T) { + ctx := testContext() + // Retries=1 → 1 attempt, no sleep. + rso := &flags.StoreRootOpts{Retries: 1} + ro := &flags.CliRootOpts{IgnoreErrors: false} + + t.Setenv(consts.HaulerIgnoreErrors, "true") + + callCount := 0 + err := Operation(ctx, rso, ro, func() error { + callCount++ + return fmt.Errorf("some error") + }) + + // IgnoreErrors controls logging severity (WARN instead of ERR) — it does NOT + // suppress error returns. Operation always returns an error after exhausting + // all retries regardless of this flag (see pkg/retry/retry.go). + if err == nil { + t.Fatal("expected error after exhausting retries, got nil") + } + if !ro.IgnoreErrors { + t.Fatal("expected ro.IgnoreErrors=true after env var override") + } + if callCount != 1 { + t.Fatalf("expected 1 call, got %d", callCount) + } +} diff --git a/pkg/store/store.go b/pkg/store/store.go index cef607a..103b7d8 100644 --- a/pkg/store/store.go +++ b/pkg/store/store.go @@ -1,20 +1,26 @@ package store import ( + "bytes" "context" "encoding/json" "fmt" "io" "os" "path/filepath" + "strings" + "github.com/containerd/containerd/remotes" + "github.com/containerd/errdefs" + "github.com/google/go-containerregistry/pkg/authn" + gname "github.com/google/go-containerregistry/pkg/name" v1 "github.com/google/go-containerregistry/pkg/v1" + "github.com/google/go-containerregistry/pkg/v1/remote" "github.com/google/go-containerregistry/pkg/v1/static" "github.com/opencontainers/go-digest" ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/rs/zerolog" "golang.org/x/sync/errgroup" - "oras.land/oras-go/pkg/oras" - "oras.land/oras-go/pkg/target" "hauler.dev/go/hauler/pkg/artifacts" "hauler.dev/go/hauler/pkg/consts" @@ -58,13 +64,13 @@ func NewLayout(rootdir string, opts ...Options) (*Layout, error) { return l, nil } -// AddOCI adds an artifacts.OCI to the store +// AddArtifact adds an artifacts.OCI to the store // // The method to achieve this is to save artifact.OCI to a temporary directory in an OCI layout compatible form. Once // saved, the entirety of the layout is copied to the store (which is just a registry). This allows us to not only use // strict types to define generic content, but provides a processing pipeline suitable for extensibility. In the // future we'll allow users to define their own content that must adhere either by artifact.OCI or simply an OCI layout. -func (l *Layout) AddOCI(ctx context.Context, oci artifacts.OCI, ref string) (ocispec.Descriptor, error) { +func (l *Layout) AddArtifact(ctx context.Context, oci artifacts.OCI, ref string) (ocispec.Descriptor, error) { if l.cache != nil { cached := layer.OCICache(oci, l.cache) oci = cached @@ -90,8 +96,6 @@ func (l *Layout) AddOCI(ctx context.Context, oci artifacts.OCI, ref string) (oci return ocispec.Descriptor{}, err } - static.NewLayer(cdata, "") - if err := l.writeBlobData(cdata); err != nil { return ocispec.Descriptor{}, err } @@ -129,8 +133,8 @@ func (l *Layout) AddOCI(ctx context.Context, oci artifacts.OCI, ref string) (oci return idx, l.OCI.AddIndex(idx) } -// AddOCICollection . -func (l *Layout) AddOCICollection(ctx context.Context, collection artifacts.OCICollection) ([]ocispec.Descriptor, error) { +// AddArtifactCollection . +func (l *Layout) AddArtifactCollection(ctx context.Context, collection artifacts.OCICollection) ([]ocispec.Descriptor, error) { cnts, err := collection.Contents() if err != nil { return nil, err @@ -138,7 +142,7 @@ func (l *Layout) AddOCICollection(ctx context.Context, collection artifacts.OCIC var descs []ocispec.Descriptor for ref, oci := range cnts { - desc, err := l.AddOCI(ctx, oci, ref) + desc, err := l.AddArtifact(ctx, oci, ref) if err != nil { return nil, err } @@ -147,6 +151,329 @@ func (l *Layout) AddOCICollection(ctx context.Context, collection artifacts.OCIC return descs, nil } +// AddImage fetches a container image (or full index for multi-arch images) from a remote registry +// and saves it to the store along with any associated signatures, attestations, and SBOMs +// discovered via cosign's tag convention (.sig, .att, .sbom). +// When platform is non-empty and the ref is a multi-arch index, only that platform is fetched. +// When excludeExtras is true, cosign signatures, attestations, SBOMs, and OCI referrers are skipped. +func (l *Layout) AddImage(ctx context.Context, ref string, platform string, excludeExtras bool, opts ...remote.Option) error { + allOpts := append([]remote.Option{ + remote.WithAuthFromKeychain(authn.DefaultKeychain), + remote.WithContext(ctx), + }, opts...) + + parsedRef, err := gname.ParseReference(ref) + if err != nil { + return fmt.Errorf("parsing reference %q: %w", ref, err) + } + + desc, err := remote.Get(parsedRef, allOpts...) + if err != nil { + return fmt.Errorf("fetching descriptor for %q: %w", ref, err) + } + + var imageDigest v1.Hash + + if idx, idxErr := desc.ImageIndex(); idxErr == nil && platform == "" { + // Multi-arch image with no platform filter: save the full index. + imageDigest, err = idx.Digest() + if err != nil { + return fmt.Errorf("getting index digest for %q: %w", ref, err) + } + if err := l.writeIndex(parsedRef, idx, consts.KindAnnotationIndex); err != nil { + return err + } + } else { + // Single-platform image, or the caller requested a specific platform. + imgOpts := append([]remote.Option{}, allOpts...) + if platform != "" { + p, err := parsePlatform(platform) + if err != nil { + return err + } + imgOpts = append(imgOpts, remote.WithPlatform(p)) + } + img, err := remote.Image(parsedRef, imgOpts...) + if err != nil { + return fmt.Errorf("fetching image %q: %w", ref, err) + } + imageDigest, err = img.Digest() + if err != nil { + return fmt.Errorf("getting image digest for %q: %w", ref, err) + } + if err := l.writeImage(parsedRef, img, consts.KindAnnotationImage, ""); err != nil { + return err + } + } + + if !excludeExtras { + savedDigests, err := l.saveRelatedArtifacts(ctx, parsedRef, imageDigest, allOpts...) + if err != nil { + return err + } + return l.saveReferrers(ctx, parsedRef, imageDigest, savedDigests, allOpts...) + } + return nil +} + +// writeImageBlobs writes all blobs for a single image (layers, config, manifest) to the store's +// blob directory. It does not add an entry to the OCI index. +func (l *Layout) writeImageBlobs(img v1.Image) error { + layers, err := img.Layers() + if err != nil { + return fmt.Errorf("getting layers: %w", err) + } + var g errgroup.Group + for _, lyr := range layers { + lyr := lyr + g.Go(func() error { return l.writeLayer(lyr) }) + } + if err := g.Wait(); err != nil { + return err + } + + cfgData, err := img.RawConfigFile() + if err != nil { + return fmt.Errorf("getting config: %w", err) + } + if err := l.writeBlobData(cfgData); err != nil { + return fmt.Errorf("writing config blob: %w", err) + } + + manifestData, err := img.RawManifest() + if err != nil { + return fmt.Errorf("getting manifest: %w", err) + } + return l.writeBlobData(manifestData) +} + +// writeImage writes all blobs for img and adds a descriptor entry to the OCI index with the +// given annotationRef and kind. containerdName overrides the io.containerd.image.name annotation; +// if empty it defaults to annotationRef.Name(). +func (l *Layout) writeImage(annotationRef gname.Reference, img v1.Image, kind string, containerdName string) error { + if err := l.writeImageBlobs(img); err != nil { + return err + } + + mt, err := img.MediaType() + if err != nil { + return fmt.Errorf("getting media type: %w", err) + } + hash, err := img.Digest() + if err != nil { + return fmt.Errorf("getting digest: %w", err) + } + d, err := digest.Parse(hash.String()) + if err != nil { + return fmt.Errorf("parsing digest: %w", err) + } + raw, err := img.RawManifest() + if err != nil { + return fmt.Errorf("getting raw manifest size: %w", err) + } + + if containerdName == "" { + containerdName = annotationRef.Name() + } + desc := ocispec.Descriptor{ + MediaType: string(mt), + Digest: d, + Size: int64(len(raw)), + Annotations: map[string]string{ + consts.KindAnnotationName: kind, + ocispec.AnnotationRefName: strings.TrimPrefix(annotationRef.Name(), annotationRef.Context().RegistryStr()+"/"), + consts.ContainerdImageNameKey: containerdName, + }, + } + return l.OCI.AddIndex(desc) +} + +// writeIndexBlobs recursively writes all child image blobs for an image index to the store's blob +// directory. It does not write the top-level index manifest or add index entries. +func (l *Layout) writeIndexBlobs(idx v1.ImageIndex) error { + manifest, err := idx.IndexManifest() + if err != nil { + return fmt.Errorf("getting index manifest: %w", err) + } + + for _, childDesc := range manifest.Manifests { + // Try as a nested index first, then fall back to a regular image. + if childIdx, err := idx.ImageIndex(childDesc.Digest); err == nil { + if err := l.writeIndexBlobs(childIdx); err != nil { + return err + } + raw, err := childIdx.RawManifest() + if err != nil { + return fmt.Errorf("getting nested index manifest: %w", err) + } + if err := l.writeBlobData(raw); err != nil { + return err + } + } else { + childImg, err := idx.Image(childDesc.Digest) + if err != nil { + return fmt.Errorf("getting child image %v: %w", childDesc.Digest, err) + } + if err := l.writeImageBlobs(childImg); err != nil { + return err + } + } + } + return nil +} + +// writeIndex writes all blobs for an image index (including all child platform images) and adds +// a descriptor entry to the OCI index with the given annotationRef and kind. +func (l *Layout) writeIndex(annotationRef gname.Reference, idx v1.ImageIndex, kind string) error { + if err := l.writeIndexBlobs(idx); err != nil { + return err + } + + raw, err := idx.RawManifest() + if err != nil { + return fmt.Errorf("getting index manifest: %w", err) + } + if err := l.writeBlobData(raw); err != nil { + return fmt.Errorf("writing index manifest blob: %w", err) + } + + mt, err := idx.MediaType() + if err != nil { + return fmt.Errorf("getting index media type: %w", err) + } + hash, err := idx.Digest() + if err != nil { + return fmt.Errorf("getting index digest: %w", err) + } + d, err := digest.Parse(hash.String()) + if err != nil { + return fmt.Errorf("parsing index digest: %w", err) + } + + desc := ocispec.Descriptor{ + MediaType: string(mt), + Digest: d, + Size: int64(len(raw)), + Annotations: map[string]string{ + consts.KindAnnotationName: kind, + ocispec.AnnotationRefName: strings.TrimPrefix(annotationRef.Name(), annotationRef.Context().RegistryStr()+"/"), + consts.ContainerdImageNameKey: annotationRef.Name(), + }, + } + return l.OCI.AddIndex(desc) +} + +// saveReferrers discovers and saves OCI 1.1 referrers for the image identified by ref/hash. +// This captures cosign v3 new-bundle-format signatures/attestations stored as OCI referrers +// (via the subject field) rather than the legacy sha256-.sig/.att/.sbom tag convention. +// go-containerregistry handles both the native referrers API and the tag-based fallback. +// Missing referrers and fetch errors are logged at debug level and silently skipped. +func (l *Layout) saveReferrers(ctx context.Context, ref gname.Reference, hash v1.Hash, alreadySaved map[string]bool, opts ...remote.Option) error { + log := zerolog.Ctx(ctx) + + imageDigestRef, err := gname.NewDigest(ref.Context().String() + "@" + hash.String()) + if err != nil { + log.Debug().Err(err).Msgf("saveReferrers: could not construct digest ref for %s", ref.Name()) + return nil + } + + idx, err := remote.Referrers(imageDigestRef, opts...) + if err != nil { + // Most registries that don't support the referrers API return 404; not an error. + log.Debug().Err(err).Msgf("no OCI referrers found for %s@%s", ref.Name(), hash) + return nil + } + + idxManifest, err := idx.IndexManifest() + if err != nil { + log.Debug().Err(err).Msgf("saveReferrers: could not read referrers index for %s", ref.Name()) + return nil + } + + for _, referrerDesc := range idxManifest.Manifests { + digestRef, err := gname.NewDigest(ref.Context().String() + "@" + referrerDesc.Digest.String()) + if err != nil { + log.Debug().Err(err).Msgf("saveReferrers: could not construct digest ref for referrer %s", referrerDesc.Digest) + continue + } + + img, err := remote.Image(digestRef, opts...) + if err != nil { + log.Debug().Err(err).Msgf("saveReferrers: could not fetch referrer manifest %s", referrerDesc.Digest) + continue + } + + // Skip referrers already saved via the cosign tag convention to avoid duplicates. + // Registries like Harbor expose the same manifest via both the .sig/.att/.sbom tags + // and the OCI Referrers API when the manifest carries a subject field. + if alreadySaved[referrerDesc.Digest.String()] { + log.Debug().Msgf("saveReferrers: skipping referrer %s (already saved via tag convention)", referrerDesc.Digest) + continue + } + + // Embed the referrer manifest digest in the kind annotation so that multiple + // referrers for the same base image each get a unique entry in the OCI index. + kind := consts.KindAnnotationReferrers + "/" + referrerDesc.Digest.Hex + if err := l.writeImage(ref, img, kind, ""); err != nil { + return fmt.Errorf("saving OCI referrer %s for %s: %w", referrerDesc.Digest, ref.Name(), err) + } + log.Debug().Msgf("saved OCI referrer %s (%s) for %s", referrerDesc.Digest, string(referrerDesc.ArtifactType), ref.Name()) + } + return nil +} + +// saveRelatedArtifacts discovers and saves cosign-compatible signature, attestation, and SBOM +// artifacts for the image identified by ref/hash. Missing artifacts are silently skipped. +// Returns the set of manifest digest strings (e.g. "sha256:abc...") that were saved, so that +// saveReferrers can skip duplicates when a registry exposes the same manifest via both paths. +func (l *Layout) saveRelatedArtifacts(ctx context.Context, ref gname.Reference, hash v1.Hash, opts ...remote.Option) (map[string]bool, error) { + saved := make(map[string]bool) + + // Cosign tag convention: "sha256:hexvalue" → "sha256-hexvalue.sig" / ".att" / ".sbom" + tagPrefix := strings.ReplaceAll(hash.String(), ":", "-") + + related := []struct { + tag string + kind string + }{ + {tagPrefix + ".sig", consts.KindAnnotationSigs}, + {tagPrefix + ".att", consts.KindAnnotationAtts}, + {tagPrefix + ".sbom", consts.KindAnnotationSboms}, + } + + for _, r := range related { + artifactRef, err := gname.ParseReference(ref.Context().String() + ":" + r.tag) + if err != nil { + continue + } + img, err := remote.Image(artifactRef, opts...) + if err != nil { + // Artifact doesn't exist at this registry; skip silently. + continue + } + if err := l.writeImage(ref, img, r.kind, ""); err != nil { + return saved, fmt.Errorf("saving %s for %s: %w", r.kind, ref.Name(), err) + } + if d, err := img.Digest(); err == nil { + saved[d.String()] = true + } + } + return saved, nil +} + +// parsePlatform parses a platform string in "os/arch[/variant]" format into a v1.Platform. +func parsePlatform(s string) (v1.Platform, error) { + parts := strings.SplitN(s, "/", 3) + if len(parts) < 2 { + return v1.Platform{}, fmt.Errorf("invalid platform %q: expected os/arch[/variant]", s) + } + p := v1.Platform{OS: parts[0], Architecture: parts[1]} + if len(parts) == 3 { + p.Variant = parts[2] + } + return p, nil +} + // Flush is a fancy name for delete-all-the-things, in this case it's as trivial as deleting oci-layout content // // This can be a highly destructive operation if the store's directory happens to be inline with other non-store contents @@ -170,27 +497,217 @@ func (l *Layout) Flush(ctx context.Context) error { return nil } -// Copy will copy a given reference to a given target.Target +// Copy will copy a given reference to a given content.Target // -// This is essentially a wrapper around oras.Copy, but locked to this content store -func (l *Layout) Copy(ctx context.Context, ref string, to target.Target, toRef string) (ocispec.Descriptor, error) { - return oras.Copy(ctx, l.OCI, ref, to, toRef, - oras.WithAdditionalCachedMediaTypes(consts.DockerManifestSchema2, consts.DockerManifestListSchema2)) +// This is essentially a replacement for oras.Copy, custom implementation for content stores +func (l *Layout) Copy(ctx context.Context, ref string, to content.Target, toRef string) (ocispec.Descriptor, error) { + // Resolve the source descriptor + desc, err := l.OCI.Resolve(ctx, ref) + if err != nil { + return ocispec.Descriptor{}, fmt.Errorf("failed to resolve reference: %w", err) + } + + // Get fetcher and pusher + fetcher, err := l.OCI.Fetcher(ctx, ref) + if err != nil { + return ocispec.Descriptor{}, fmt.Errorf("failed to get fetcher: %w", err) + } + + pusher, err := to.Pusher(ctx, toRef) + if err != nil { + return ocispec.Descriptor{}, fmt.Errorf("failed to get pusher: %w", err) + } + + // Recursively copy the descriptor graph (matches oras.Copy behavior) + if err := l.copyDescriptorGraph(ctx, desc, fetcher, pusher); err != nil { + return ocispec.Descriptor{}, err + } + + return desc, nil } -// CopyAll performs bulk copy operations on the stores oci layout to a provided target.Target -func (l *Layout) CopyAll(ctx context.Context, to target.Target, toMapper func(string) (string, error)) ([]ocispec.Descriptor, error) { +// copyDescriptorGraph recursively copies a descriptor and all its referenced content +// This matches the behavior of oras.Copy by walking the entire descriptor graph +func (l *Layout) copyDescriptorGraph(ctx context.Context, desc ocispec.Descriptor, fetcher remotes.Fetcher, pusher remotes.Pusher) (err error) { + switch desc.MediaType { + case ocispec.MediaTypeImageManifest, consts.DockerManifestSchema2: + // Fetch and parse the manifest + rc, err := fetcher.Fetch(ctx, desc) + if err != nil { + return fmt.Errorf("failed to fetch manifest: %w", err) + } + defer func() { + if closeErr := rc.Close(); closeErr != nil && err == nil { + err = fmt.Errorf("failed to close manifest reader: %w", closeErr) + } + }() + + data, err := io.ReadAll(rc) + if err != nil { + return fmt.Errorf("failed to read manifest: %w", err) + } + + var manifest ocispec.Manifest + if err := json.Unmarshal(data, &manifest); err != nil { + return fmt.Errorf("failed to unmarshal manifest: %w", err) + } + + // Copy config blob + if err := l.copyDescriptor(ctx, manifest.Config, fetcher, pusher); err != nil { + return fmt.Errorf("failed to copy config: %w", err) + } + + // Copy all layer blobs + for _, layer := range manifest.Layers { + if err := l.copyDescriptor(ctx, layer, fetcher, pusher); err != nil { + return fmt.Errorf("failed to copy layer: %w", err) + } + } + + // Push the manifest itself using the already-fetched data to avoid double-fetching + if err := l.pushData(ctx, desc, data, pusher); err != nil { + return fmt.Errorf("failed to push manifest: %w", err) + } + + case ocispec.MediaTypeImageIndex, consts.DockerManifestListSchema2: + // Fetch and parse the index + rc, err := fetcher.Fetch(ctx, desc) + if err != nil { + return fmt.Errorf("failed to fetch index: %w", err) + } + defer func() { + if closeErr := rc.Close(); closeErr != nil && err == nil { + err = fmt.Errorf("failed to close index reader: %w", closeErr) + } + }() + + data, err := io.ReadAll(rc) + if err != nil { + return fmt.Errorf("failed to read index: %w", err) + } + + var index ocispec.Index + if err := json.Unmarshal(data, &index); err != nil { + return fmt.Errorf("failed to unmarshal index: %w", err) + } + + // Recursively copy each child (could be manifest or nested index) + for _, child := range index.Manifests { + if err := l.copyDescriptorGraph(ctx, child, fetcher, pusher); err != nil { + return fmt.Errorf("failed to copy child: %w", err) + } + } + + // Push the index itself using the already-fetched data to avoid double-fetching + if err := l.pushData(ctx, desc, data, pusher); err != nil { + return fmt.Errorf("failed to push index: %w", err) + } + + default: + // For other types (config blobs, layers, etc.), just copy the blob + if err := l.copyDescriptor(ctx, desc, fetcher, pusher); err != nil { + return fmt.Errorf("failed to copy descriptor: %w", err) + } + } + + return nil +} + +// copyDescriptor copies a single descriptor from source to target +func (l *Layout) copyDescriptor(ctx context.Context, desc ocispec.Descriptor, fetcher remotes.Fetcher, pusher remotes.Pusher) (err error) { + // Fetch the content + rc, err := fetcher.Fetch(ctx, desc) + if err != nil { + return err + } + defer func() { + if closeErr := rc.Close(); closeErr != nil && err == nil { + err = fmt.Errorf("failed to close reader: %w", closeErr) + } + }() + + // Get a writer from the pusher + writer, err := pusher.Push(ctx, desc) + if err != nil { + if errdefs.IsAlreadyExists(err) { + zerolog.Ctx(ctx).Debug().Msgf("existing blob: %s", desc.Digest) + return nil // content already present on remote + } + return err + } + defer func() { + if closeErr := writer.Close(); closeErr != nil && err == nil { + err = closeErr + } + }() + + // Copy the content + n, err := io.Copy(writer, rc) + if err != nil { + return err + } + + // Commit the written content with the expected digest + if err := writer.Commit(ctx, n, desc.Digest); err != nil { + return err + } + zerolog.Ctx(ctx).Debug().Msgf("pushed blob: %s", desc.Digest) + return nil +} + +// pushData pushes already-fetched data to the pusher without re-fetching. +// This is used when we've already read the data for parsing and want to avoid double-fetching. +func (l *Layout) pushData(ctx context.Context, desc ocispec.Descriptor, data []byte, pusher remotes.Pusher) (err error) { + // Get a writer from the pusher + writer, err := pusher.Push(ctx, desc) + if err != nil { + if errdefs.IsAlreadyExists(err) { + return nil // content already present on remote + } + return fmt.Errorf("failed to get writer: %w", err) + } + defer func() { + if closeErr := writer.Close(); closeErr != nil && err == nil { + err = fmt.Errorf("failed to close writer: %w", closeErr) + } + }() + + // Write the data using io.Copy to handle short writes properly + n, err := io.Copy(writer, bytes.NewReader(data)) + if err != nil { + return fmt.Errorf("failed to write data: %w", err) + } + + // Commit the written content with the expected digest + return writer.Commit(ctx, n, desc.Digest) +} + +// CopyAll performs bulk copy operations on the stores oci layout to a provided target +func (l *Layout) CopyAll(ctx context.Context, to content.Target, toMapper func(string) (string, error)) ([]ocispec.Descriptor, error) { var descs []ocispec.Descriptor err := l.OCI.Walk(func(reference string, desc ocispec.Descriptor) error { - toRef := "" + // Use the clean reference from annotations (without -kind suffix) as the base + // The reference parameter from Walk is the nameMap key with format "ref-kind", + // but we need the clean ref for the destination to avoid double-appending kind + baseRef := desc.Annotations[ocispec.AnnotationRefName] + if baseRef == "" { + return fmt.Errorf("descriptor %s missing required annotation %q", reference, ocispec.AnnotationRefName) + } + toRef := baseRef if toMapper != nil { - tr, err := toMapper(reference) + tr, err := toMapper(baseRef) if err != nil { return err } toRef = tr } + // Append the digest to help the target pusher identify the root descriptor + // Format: "reference@digest" allows the pusher to update its index.json + if desc.Digest.Validate() == nil { + toRef = fmt.Sprintf("%s@%s", toRef, desc.Digest) + } + desc, err := l.Copy(ctx, reference, to, toRef) if err != nil { return err @@ -236,11 +753,6 @@ func (l *Layout) writeLayer(layer v1.Layer) error { return err } - r, err := layer.Compressed() - if err != nil { - return err - } - dir := filepath.Join(l.Root, ocispec.ImageBlobsDir, d.Algorithm) if err := os.MkdirAll(dir, os.ModePerm); err != nil && !os.IsExist(err) { return err @@ -252,14 +764,29 @@ func (l *Layout) writeLayer(layer v1.Layer) error { return nil } + r, err := layer.Compressed() + if err != nil { + return err + } + defer r.Close() + w, err := os.Create(blobPath) if err != nil { return err } - defer w.Close() - _, err = io.Copy(w, r) - return err + _, copyErr := io.Copy(w, r) + if closeErr := w.Close(); closeErr != nil && copyErr == nil { + copyErr = closeErr + } + + // Remove a partially-written or corrupt blob on any failure so retries + // can attempt a fresh download rather than skipping the file. + if copyErr != nil { + os.Remove(blobPath) + } + + return copyErr } // Remove artifact reference from the store @@ -280,7 +807,7 @@ func (l *Layout) CleanUp(ctx context.Context) (int, int64, error) { } var processManifest func(desc ocispec.Descriptor) error - processManifest = func(desc ocispec.Descriptor) error { + processManifest = func(desc ocispec.Descriptor) (err error) { if desc.Digest.Validate() != nil { return nil } @@ -293,7 +820,11 @@ func (l *Layout) CleanUp(ctx context.Context) (int, int64, error) { if err != nil { return nil // skip if can't be read } - defer rc.Close() + defer func() { + if closeErr := rc.Close(); closeErr != nil && err == nil { + err = closeErr + } + }() var manifest struct { Config struct { @@ -350,7 +881,7 @@ func (l *Layout) CleanUp(ctx context.Context) (int, int64, error) { } // read all entries - blobsPath := filepath.Join(l.Root, "blobs", "sha256") + blobsPath := filepath.Join(l.Root, ocispec.ImageBlobsDir, digest.Canonical.String()) entries, err := os.ReadDir(blobsPath) if err != nil { return 0, 0, fmt.Errorf("failed to read blobs directory: %w", err) diff --git a/pkg/store/store_test.go b/pkg/store/store_test.go index c7bcf76..eb0ec32 100644 --- a/pkg/store/store_test.go +++ b/pkg/store/store_test.go @@ -1,14 +1,32 @@ package store_test import ( + "bytes" "context" + "encoding/json" + "fmt" + "io" + "net/http/httptest" "os" + "path/filepath" + "strings" "testing" + ccontent "github.com/containerd/containerd/content" + gname "github.com/google/go-containerregistry/pkg/name" + "github.com/google/go-containerregistry/pkg/registry" v1 "github.com/google/go-containerregistry/pkg/v1" + "github.com/google/go-containerregistry/pkg/v1/empty" + "github.com/google/go-containerregistry/pkg/v1/mutate" "github.com/google/go-containerregistry/pkg/v1/random" + "github.com/google/go-containerregistry/pkg/v1/remote" + "github.com/google/go-containerregistry/pkg/v1/static" + "github.com/google/go-containerregistry/pkg/v1/types" + "github.com/opencontainers/go-digest" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" "hauler.dev/go/hauler/pkg/artifacts" + "hauler.dev/go/hauler/pkg/consts" "hauler.dev/go/hauler/pkg/store" ) @@ -17,7 +35,7 @@ var ( root string ) -func TestLayout_AddOCI(t *testing.T) { +func TestLayout_AddArtifact(t *testing.T) { teardown := setup(t) defer teardown() @@ -46,16 +64,16 @@ func TestLayout_AddOCI(t *testing.T) { } moci := genArtifact(t, tt.args.ref) - got, err := s.AddOCI(ctx, moci, tt.args.ref) + got, err := s.AddArtifact(ctx, moci, tt.args.ref) if (err != nil) != tt.wantErr { - t.Errorf("AddOCI() error = %v, wantErr %v", err, tt.wantErr) + t.Errorf("AddArtifact() error = %v, wantErr %v", err, tt.wantErr) return } _ = got - _, err = s.AddOCI(ctx, moci, tt.args.ref) + _, err = s.AddArtifact(ctx, moci, tt.args.ref) if err != nil { - t.Errorf("AddOCI() error = %v, wantErr %v", err, tt.wantErr) + t.Errorf("AddArtifact() error = %v, wantErr %v", err, tt.wantErr) return } }) @@ -103,3 +121,669 @@ func genArtifact(t *testing.T, ref string) artifacts.OCI { img, } } + +// Mock fetcher/pusher for testing +type mockFetcher struct { + blobs map[digest.Digest][]byte +} + +func newMockFetcher() *mockFetcher { + return &mockFetcher{ + blobs: make(map[digest.Digest][]byte), + } +} + +func (m *mockFetcher) addBlob(data []byte) ocispec.Descriptor { + dgst := digest.FromBytes(data) + m.blobs[dgst] = data + return ocispec.Descriptor{ + MediaType: "application/octet-stream", + Digest: dgst, + Size: int64(len(data)), + } +} + +func (m *mockFetcher) Fetch(ctx context.Context, desc ocispec.Descriptor) (io.ReadCloser, error) { + data, ok := m.blobs[desc.Digest] + if !ok { + return nil, fmt.Errorf("blob not found: %s", desc.Digest) + } + return io.NopCloser(bytes.NewReader(data)), nil +} + +type mockPusher struct { + blobs map[digest.Digest][]byte +} + +func newMockPusher() *mockPusher { + return &mockPusher{ + blobs: make(map[digest.Digest][]byte), + } +} + +func (m *mockPusher) Push(ctx context.Context, desc ocispec.Descriptor) (ccontent.Writer, error) { + return &mockWriter{ + pusher: m, + desc: desc, + buf: &bytes.Buffer{}, + }, nil +} + +type mockWriter struct { + pusher *mockPusher + desc ocispec.Descriptor + buf *bytes.Buffer + closed bool +} + +func (m *mockWriter) Write(p []byte) (int, error) { + if m.closed { + return 0, fmt.Errorf("writer closed") + } + return m.buf.Write(p) +} + +func (m *mockWriter) Close() error { + m.closed = true + return nil +} + +func (m *mockWriter) Commit(ctx context.Context, size int64, expected digest.Digest, opts ...ccontent.Opt) error { + data := m.buf.Bytes() + if int64(len(data)) != size { + return fmt.Errorf("size mismatch: expected %d, got %d", size, len(data)) + } + dgst := digest.FromBytes(data) + if expected != "" && dgst != expected { + return fmt.Errorf("digest mismatch: expected %s, got %s", expected, dgst) + } + m.pusher.blobs[dgst] = data + return nil +} + +func (m *mockWriter) Digest() digest.Digest { + return digest.FromBytes(m.buf.Bytes()) +} + +func (m *mockWriter) Status() (ccontent.Status, error) { + return ccontent.Status{}, nil +} + +func (m *mockWriter) Truncate(size int64) error { + return fmt.Errorf("truncate not supported") +} + +// blobPath returns the expected filesystem path for a blob in an OCI layout store. +func blobPath(root string, d digest.Digest) string { + return filepath.Join(root, "blobs", d.Algorithm().String(), d.Encoded()) +} + +// findRefKey walks the store's index and returns the nameMap key for the first +// descriptor whose AnnotationRefName matches ref. +func findRefKey(t *testing.T, s *store.Layout, ref string) string { + t.Helper() + var key string + _ = s.OCI.Walk(func(reference string, desc ocispec.Descriptor) error { + if desc.Annotations[ocispec.AnnotationRefName] == ref && key == "" { + key = reference + } + return nil + }) + if key == "" { + t.Fatalf("reference %q not found in store", ref) + } + return key +} + +// findRefKeyByKind walks the store's index and returns the nameMap key for the +// descriptor whose AnnotationRefName matches ref and whose kind annotation matches kind. +func findRefKeyByKind(t *testing.T, s *store.Layout, ref, kind string) string { + t.Helper() + var key string + _ = s.OCI.Walk(func(reference string, desc ocispec.Descriptor) error { + if desc.Annotations[ocispec.AnnotationRefName] == ref && + desc.Annotations[consts.KindAnnotationName] == kind { + key = reference + } + return nil + }) + if key == "" { + t.Fatalf("reference %q with kind %q not found in store", ref, kind) + } + return key +} + +// readManifestBlob reads and parses an OCI manifest from the store's blob directory. +func readManifestBlob(t *testing.T, root string, d digest.Digest) ocispec.Manifest { + t.Helper() + data, err := os.ReadFile(blobPath(root, d)) + if err != nil { + t.Fatalf("read manifest blob %s: %v", d, err) + } + var m ocispec.Manifest + if err := json.Unmarshal(data, &m); err != nil { + t.Fatalf("unmarshal manifest: %v", err) + } + return m +} + +// TestCopyDescriptor verifies that copyDescriptor (exercised via Copy) transfers +// each individual blob — config and every layer — into the destination store's blob +// directory, and that a second Copy of the same content succeeds gracefully when +// blobs are already present (AlreadyExists path). +func TestCopyDescriptor(t *testing.T) { + teardown := setup(t) + defer teardown() + + srcRoot := t.TempDir() + src, err := store.NewLayout(srcRoot) + if err != nil { + t.Fatal(err) + } + + ref := "test/blob:v1" + // genArtifact creates random.Image(1024, 3): 1 config blob + 3 layer blobs. + manifestDesc, err := src.AddArtifact(ctx, genArtifact(t, ref), ref) + if err != nil { + t.Fatal(err) + } + if err := src.OCI.SaveIndex(); err != nil { + t.Fatal(err) + } + + refKey := findRefKey(t, src, ref) + manifest := readManifestBlob(t, srcRoot, manifestDesc.Digest) + + dstRoot := t.TempDir() + dst, err := store.NewLayout(dstRoot) + if err != nil { + t.Fatal(err) + } + + // First copy: should transfer all individual blobs via copyDescriptor. + gotDesc, err := src.Copy(ctx, refKey, dst.OCI, "test/blob:dst") + if err != nil { + t.Fatalf("Copy failed: %v", err) + } + if gotDesc.Digest != manifestDesc.Digest { + t.Errorf("returned descriptor digest mismatch: got %s, want %s", gotDesc.Digest, manifestDesc.Digest) + } + + // Verify the config blob is present in the destination. + if _, err := os.Stat(blobPath(dstRoot, manifest.Config.Digest)); err != nil { + t.Errorf("config blob missing in dest: %v", err) + } + + // Verify every layer blob is present in the destination. + for i, layer := range manifest.Layers { + if _, err := os.Stat(blobPath(dstRoot, layer.Digest)); err != nil { + t.Errorf("layer[%d] blob missing in dest: %v", i, err) + } + } + + // Verify the manifest blob itself was pushed. + if _, err := os.Stat(blobPath(dstRoot, manifestDesc.Digest)); err != nil { + t.Errorf("manifest blob missing in dest: %v", err) + } + + // Second copy: blobs already exist — AlreadyExists must be handled without error. + gotDesc2, err := src.Copy(ctx, refKey, dst.OCI, "test/blob:dst2") + if err != nil { + t.Fatalf("second Copy failed (AlreadyExists should be a no-op): %v", err) + } + if gotDesc2.Digest != manifestDesc.Digest { + t.Errorf("second Copy digest mismatch: got %s, want %s", gotDesc2.Digest, manifestDesc.Digest) + } +} + +// TestCopyDescriptorGraph_Manifest verifies that copyDescriptorGraph reconstructs a +// complete manifest in the destination (config digest and each layer digest match the +// source), and returns an error when a required blob is absent from the source. +func TestCopyDescriptorGraph_Manifest(t *testing.T) { + teardown := setup(t) + defer teardown() + + srcRoot := t.TempDir() + src, err := store.NewLayout(srcRoot) + if err != nil { + t.Fatal(err) + } + + ref := "test/manifest:v1" + manifestDesc, err := src.AddArtifact(ctx, genArtifact(t, ref), ref) + if err != nil { + t.Fatal(err) + } + if err := src.OCI.SaveIndex(); err != nil { + t.Fatal(err) + } + + refKey := findRefKey(t, src, ref) + srcManifest := readManifestBlob(t, srcRoot, manifestDesc.Digest) + + // --- Happy path: all blobs present, manifest structure preserved --- + dstRoot := t.TempDir() + dst, err := store.NewLayout(dstRoot) + if err != nil { + t.Fatal(err) + } + + gotDesc, err := src.Copy(ctx, refKey, dst.OCI, "test/manifest:dst") + if err != nil { + t.Fatalf("Copy failed: %v", err) + } + + // Parse the manifest from the destination and compare structure with source. + dstManifest := readManifestBlob(t, dstRoot, gotDesc.Digest) + if dstManifest.Config.Digest != srcManifest.Config.Digest { + t.Errorf("config digest mismatch: got %s, want %s", + dstManifest.Config.Digest, srcManifest.Config.Digest) + } + if len(dstManifest.Layers) != len(srcManifest.Layers) { + t.Fatalf("layer count mismatch: dst=%d src=%d", + len(dstManifest.Layers), len(srcManifest.Layers)) + } + for i, l := range srcManifest.Layers { + if dstManifest.Layers[i].Digest != l.Digest { + t.Errorf("layer[%d] digest mismatch: got %s, want %s", + i, dstManifest.Layers[i].Digest, l.Digest) + } + } + + // --- Error path: delete a layer blob from source, expect Copy to fail --- + if len(srcManifest.Layers) == 0 { + t.Skip("artifact has no layers; skipping missing-blob error path") + } + if err := os.Remove(blobPath(srcRoot, srcManifest.Layers[0].Digest)); err != nil { + t.Fatalf("could not remove layer blob to simulate corruption: %v", err) + } + + dst2Root := t.TempDir() + dst2, err := store.NewLayout(dst2Root) + if err != nil { + t.Fatal(err) + } + + _, err = src.Copy(ctx, refKey, dst2.OCI, "test/manifest:missing-blob") + if err == nil { + t.Error("expected Copy to fail when a source layer blob is missing, but it succeeded") + } +} + +// TestCopyDescriptorGraph_Index verifies that copyDescriptorGraph handles an OCI +// image index (multi-platform) by recursively copying all child manifests and their +// blobs into the destination store, and that the index blob itself is present. +func TestCopyDescriptorGraph_Index(t *testing.T) { + teardown := setup(t) + defer teardown() + + // Start an in-process OCI registry. + srv := httptest.NewServer(registry.New()) + t.Cleanup(srv.Close) + host := strings.TrimPrefix(srv.URL, "http://") + remoteOpts := []remote.Option{remote.WithTransport(srv.Client().Transport)} + + // Build a 2-platform image index. + img1, err := random.Image(512, 2) + if err != nil { + t.Fatalf("random image (amd64): %v", err) + } + img2, err := random.Image(512, 2) + if err != nil { + t.Fatalf("random image (arm64): %v", err) + } + idx := mutate.AppendManifests( + empty.Index, + mutate.IndexAddendum{ + Add: img1, + Descriptor: v1.Descriptor{ + MediaType: types.OCIManifestSchema1, + Platform: &v1.Platform{OS: "linux", Architecture: "amd64"}, + }, + }, + mutate.IndexAddendum{ + Add: img2, + Descriptor: v1.Descriptor{ + MediaType: types.OCIManifestSchema1, + Platform: &v1.Platform{OS: "linux", Architecture: "arm64"}, + }, + }, + ) + + idxTag, err := gname.NewTag(host+"/test/multiarch:v1", gname.Insecure) + if err != nil { + t.Fatalf("new tag: %v", err) + } + if err := remote.WriteIndex(idxTag, idx, remoteOpts...); err != nil { + t.Fatalf("push index: %v", err) + } + + // Pull the index into a hauler store via AddImage. + srcRoot := t.TempDir() + src, err := store.NewLayout(srcRoot) + if err != nil { + t.Fatal(err) + } + if err := src.AddImage(ctx, idxTag.Name(), "", false, remoteOpts...); err != nil { + t.Fatalf("AddImage: %v", err) + } + if err := src.OCI.SaveIndex(); err != nil { + t.Fatal(err) + } + + // Locate the index descriptor (kind=imageIndex) in the source store. + refKey := findRefKeyByKind(t, src, "test/multiarch:v1", consts.KindAnnotationIndex) + + // Copy the entire index graph to a fresh destination store. + dstRoot := t.TempDir() + dst, err := store.NewLayout(dstRoot) + if err != nil { + t.Fatal(err) + } + gotDesc, err := src.Copy(ctx, refKey, dst.OCI, "test/multiarch:copied") + if err != nil { + t.Fatalf("Copy of image index failed: %v", err) + } + + // The index blob itself must be present in the destination. + if _, err := os.Stat(blobPath(dstRoot, gotDesc.Digest)); err != nil { + t.Errorf("index manifest blob missing in dest: %v", err) + } + + // Parse the index from the source and verify every child manifest blob landed + // in the destination (exercising recursive copyDescriptorGraph for each child). + var ociIdx ocispec.Index + if err := json.Unmarshal(mustReadFile(t, blobPath(srcRoot, gotDesc.Digest)), &ociIdx); err != nil { + t.Fatalf("unmarshal index: %v", err) + } + if len(ociIdx.Manifests) < 2 { + t.Fatalf("expected ≥2 child manifests in index, got %d", len(ociIdx.Manifests)) + } + for i, child := range ociIdx.Manifests { + if _, err := os.Stat(blobPath(dstRoot, child.Digest)); err != nil { + t.Errorf("child manifest[%d] (platform=%v) blob missing in dest: %v", + i, child.Platform, err) + } + } +} + +// mustReadFile reads a file and fails the test on error. +func mustReadFile(t *testing.T, path string) []byte { + t.Helper() + data, err := os.ReadFile(path) + if err != nil { + t.Fatalf("read %s: %v", path, err) + } + return data +} + +// TestCopy_Integration tests the full Copy workflow including copyDescriptorGraph +func TestCopy_Integration(t *testing.T) { + teardown := setup(t) + defer teardown() + + // Create source store + sourceRoot, err := os.MkdirTemp("", "hauler-source") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(sourceRoot) + + sourceStore, err := store.NewLayout(sourceRoot) + if err != nil { + t.Fatal(err) + } + + // Add an artifact to source + ref := "test/image:v1" + artifact := genArtifact(t, ref) + _, err = sourceStore.AddArtifact(ctx, artifact, ref) + if err != nil { + t.Fatal(err) + } + + // Save the index to persist the reference + if err := sourceStore.OCI.SaveIndex(); err != nil { + t.Fatalf("Failed to save index: %v", err) + } + + // Find the actual reference key in the nameMap (includes kind suffix) + var sourceRefKey string + err = sourceStore.OCI.Walk(func(reference string, desc ocispec.Descriptor) error { + if desc.Annotations[ocispec.AnnotationRefName] == ref { + sourceRefKey = reference + } + return nil + }) + if err != nil { + t.Fatalf("Failed to walk source store: %v", err) + } + if sourceRefKey == "" { + t.Fatal("Failed to find reference in source store") + } + + // Create destination store + destRoot, err := os.MkdirTemp("", "hauler-dest") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(destRoot) + + destStore, err := store.NewLayout(destRoot) + if err != nil { + t.Fatal(err) + } + + // Copy from source to destination + destRef := "test/image:copied" + desc, err := sourceStore.Copy(ctx, sourceRefKey, destStore.OCI, destRef) + if err != nil { + t.Fatalf("Copy failed: %v", err) + } + + // Copy doesn't automatically add to destination index for generic targets + // For OCI stores, we need to add the descriptor manually with the reference + desc.Annotations = map[string]string{ + ocispec.AnnotationRefName: destRef, + consts.KindAnnotationName: consts.KindAnnotationImage, + } + if err := destStore.OCI.AddIndex(desc); err != nil { + t.Fatalf("Failed to add descriptor to destination index: %v", err) + } + + // Verify the descriptor was copied + if desc.Digest == "" { + t.Error("Expected non-empty digest") + } + + // Find the copied reference in destination + var foundInDest bool + var destDesc ocispec.Descriptor + err = destStore.OCI.Walk(func(reference string, d ocispec.Descriptor) error { + if d.Digest == desc.Digest { + foundInDest = true + destDesc = d + } + return nil + }) + if err != nil { + t.Fatalf("Failed to walk destination store: %v", err) + } + + if !foundInDest { + t.Error("Copied descriptor not found in destination store") + } + + if destDesc.Digest != desc.Digest { + t.Errorf("Digest mismatch: got %s, want %s", destDesc.Digest, desc.Digest) + } +} + +// TestCopy_ErrorHandling tests error cases +func TestCopy_ErrorHandling(t *testing.T) { + teardown := setup(t) + defer teardown() + + sourceRoot, err := os.MkdirTemp("", "hauler-source") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(sourceRoot) + + sourceStore, err := store.NewLayout(sourceRoot) + if err != nil { + t.Fatal(err) + } + + destRoot, err := os.MkdirTemp("", "hauler-dest") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(destRoot) + + destStore, err := store.NewLayout(destRoot) + if err != nil { + t.Fatal(err) + } + + // Test copying non-existent reference + _, err = sourceStore.Copy(ctx, "nonexistent:tag", destStore.OCI, "dest:tag") + if err == nil { + t.Error("Expected error when copying non-existent reference") + } +} + +// TestCopy_DockerFormats tests copying Docker manifest formats +func TestCopy_DockerFormats(t *testing.T) { + // This test verifies that Docker format media types are recognized + // The actual copying is tested in the integration test + if consts.DockerManifestSchema2 == "" { + t.Error("DockerManifestSchema2 constant should not be empty") + } + t.Skip("Docker format copying is tested via integration tests") +} + +// TestCopy_MultiPlatform tests copying multi-platform images with manifest lists +func TestCopy_MultiPlatform(t *testing.T) { + teardown := setup(t) + defer teardown() + + sourceRoot, err := os.MkdirTemp("", "hauler-source") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(sourceRoot) + + // This test would require creating a multi-platform image + // which is more complex - marking as future enhancement + t.Skip("Multi-platform image test requires additional setup") +} + +// TestAddImage_OCI11Referrers verifies that AddImage captures OCI 1.1 referrers +// (cosign v3 new-bundle-format) stored via the subject field rather than the legacy +// sha256-.sig/.att/.sbom tag convention. +// +// The test: +// 1. Starts an in-process OCI 1.1–capable registry (go-containerregistry/pkg/registry) +// 2. Pushes a random base image to it +// 3. Builds a synthetic cosign v3-style Sigstore bundle referrer manifest (with a +// "subject" field pointing at the base image) and pushes it so the registry +// registers it in the referrers index automatically +// 4. Calls store.AddImage and then walks the OCI layout to confirm that a +// KindAnnotationReferrers-prefixed entry was saved +func TestAddImage_OCI11Referrers(t *testing.T) { + // 1. Start an in-process OCI 1.1 registry. + srv := httptest.NewServer(registry.New()) + t.Cleanup(srv.Close) + host := strings.TrimPrefix(srv.URL, "http://") + + remoteOpts := []remote.Option{ + remote.WithTransport(srv.Client().Transport), + } + + // 2. Push a random base image. + baseTag, err := gname.NewTag(host+"/test/image:v1", gname.Insecure) + if err != nil { + t.Fatalf("new tag: %v", err) + } + baseImg, err := random.Image(512, 2) + if err != nil { + t.Fatalf("random image: %v", err) + } + if err := remote.Write(baseTag, baseImg, remoteOpts...); err != nil { + t.Fatalf("push base image: %v", err) + } + + // Build the v1.Descriptor for the base image so we can set it as the referrer subject. + baseHash, err := baseImg.Digest() + if err != nil { + t.Fatalf("base image digest: %v", err) + } + baseRawManifest, err := baseImg.RawManifest() + if err != nil { + t.Fatalf("base image raw manifest: %v", err) + } + baseMT, err := baseImg.MediaType() + if err != nil { + t.Fatalf("base image media type: %v", err) + } + baseDesc := v1.Descriptor{ + MediaType: baseMT, + Digest: baseHash, + Size: int64(len(baseRawManifest)), + } + + // 3. Build a synthetic cosign v3 Sigstore bundle referrer. + // + // Real cosign new-bundle-format: artifactType=application/vnd.dev.sigstore.bundle.v0.3+json, + // config.mediaType=application/vnd.oci.empty.v1+json, single layer containing the bundle JSON, + // and a "subject" field pointing at the base image digest. + bundleJSON := []byte(`{"mediaType":"application/vnd.dev.sigstore.bundle.v0.3+json",` + + `"verificationMaterial":{},"messageSignature":{"messageDigest":` + + `{"algorithm":"SHA2_256","digest":"AAAA"},"signature":"AAAA"}}`) + bundleLayer := static.NewLayer(bundleJSON, types.MediaType(consts.SigstoreBundleMediaType)) + + referrerImg, err := mutate.AppendLayers(empty.Image, bundleLayer) + if err != nil { + t.Fatalf("append bundle layer: %v", err) + } + referrerImg = mutate.MediaType(referrerImg, types.OCIManifestSchema1) + referrerImg = mutate.ConfigMediaType(referrerImg, types.MediaType(consts.OCIEmptyConfigMediaType)) + referrerImg = mutate.Subject(referrerImg, baseDesc).(v1.Image) + + // Push the referrer under an arbitrary tag; the in-process registry auto-wires the + // subject field and makes the manifest discoverable via GET /v2/.../referrers/. + referrerTag, err := gname.NewTag(host+"/test/image:bundle-referrer", gname.Insecure) + if err != nil { + t.Fatalf("referrer tag: %v", err) + } + if err := remote.Write(referrerTag, referrerImg, remoteOpts...); err != nil { + t.Fatalf("push referrer: %v", err) + } + + // 4. Let hauler add the base image (which should also fetch its OCI referrers). + storeRoot := t.TempDir() + s, err := store.NewLayout(storeRoot) + if err != nil { + t.Fatalf("new layout: %v", err) + } + if err := s.AddImage(context.Background(), baseTag.Name(), "", false, remoteOpts...); err != nil { + t.Fatalf("AddImage: %v", err) + } + + // 5. Walk the store and verify that at least one referrer entry was captured. + var referrerCount int + if err := s.Walk(func(_ string, desc ocispec.Descriptor) error { + if strings.HasPrefix(desc.Annotations[consts.KindAnnotationName], consts.KindAnnotationReferrers) { + referrerCount++ + } + return nil + }); err != nil { + t.Fatalf("Walk: %v", err) + } + + if referrerCount == 0 { + t.Fatal("expected at least one OCI referrer entry in the store, got none") + } + t.Logf("captured %d OCI referrer(s) for %s", referrerCount, baseTag.Name()) +} diff --git a/testdata/chart-with-file-dependency-chart-1.0.0.tgz b/testdata/chart-with-file-dependency-chart-1.0.0.tgz index 30d96ee..76f8107 100644 Binary files a/testdata/chart-with-file-dependency-chart-1.0.0.tgz and b/testdata/chart-with-file-dependency-chart-1.0.0.tgz differ diff --git a/testdata/hauler-manifest-pipeline.yaml b/testdata/hauler-manifest-pipeline.yaml index dc1051d..35ffcb7 100755 --- a/testdata/hauler-manifest-pipeline.yaml +++ b/testdata/hauler-manifest-pipeline.yaml @@ -1,4 +1,5 @@ -# v1 manifests +# hauler manifests +# api version of v1 apiVersion: content.hauler.cattle.io/v1 kind: Images metadata: @@ -9,6 +10,9 @@ spec: - name: ghcr.io/hauler-dev/library/busybox:stable platform: linux/amd64 - name: gcr.io/distroless/base@sha256:7fa7445dfbebae4f4b7ab0e6ef99276e96075ae42584af6286ba080750d6dfe5 + - name: ghcr.io/kubewarden/audit-scanner:v1.30.0-rc1 + certificate-identity-regexp: https://github.com/kubewarden/audit-scanner/.github/workflows/release.yml@refs/tags/v1.30.0-rc1 + certificate-oidc-issuer: https://token.actions.githubusercontent.com --- apiVersion: content.hauler.cattle.io/v1 kind: Charts @@ -50,53 +54,3 @@ spec: - path: testdata/hauler-manifest.yaml - path: testdata/hauler-manifest.yaml name: hauler-manifest-local.yaml ---- -# v1alpha1 manifests -apiVersion: content.hauler.cattle.io/v1alpha1 -kind: Images -metadata: - name: hauler-content-images-example -spec: - images: - - name: ghcr.io/hauler-dev/library/busybox - - name: ghcr.io/hauler-dev/library/busybox:stable - platform: linux/amd64 - - name: gcr.io/distroless/base@sha256:7fa7445dfbebae4f4b7ab0e6ef99276e96075ae42584af6286ba080750d6dfe5 ---- -apiVersion: content.hauler.cattle.io/v1alpha1 -kind: Charts -metadata: - name: hauler-content-charts-example -spec: - charts: - - name: rancher - repoURL: https://releases.rancher.com/server-charts/stable - - name: rancher - repoURL: https://releases.rancher.com/server-charts/stable - version: 2.8.4 - - name: rancher - repoURL: https://releases.rancher.com/server-charts/stable - version: 2.8.3 - - name: hauler-helm - repoURL: oci://ghcr.io/hauler-dev - - name: hauler-helm - repoURL: oci://ghcr.io/hauler-dev - version: 1.0.6 - - name: hauler-helm - repoURL: oci://ghcr.io/hauler-dev - version: 1.0.4 - - name: rancher-cluster-templates-0.5.2.tgz - repoURL: testdata ---- -apiVersion: content.hauler.cattle.io/v1alpha1 -kind: Files -metadata: - name: hauler-content-files-example -spec: - files: - - path: https://get.rke2.io/install.sh - - path: https://get.rke2.io/install.sh - name: rke2-install.sh - - path: testdata/hauler-manifest.yaml - - path: testdata/hauler-manifest.yaml - name: hauler-manifest-local.yaml diff --git a/testdata/hauler-manifest.yaml b/testdata/hauler-manifest.yaml index 4c7d013..24432ac 100755 --- a/testdata/hauler-manifest.yaml +++ b/testdata/hauler-manifest.yaml @@ -1,4 +1,5 @@ -# v1 manifests +# hauler manifest +# api version of v1 apiVersion: content.hauler.cattle.io/v1 kind: Images metadata: @@ -9,6 +10,9 @@ spec: - name: ghcr.io/hauler-dev/library/busybox:stable platform: linux/amd64 - name: gcr.io/distroless/base@sha256:7fa7445dfbebae4f4b7ab0e6ef99276e96075ae42584af6286ba080750d6dfe5 + - name: ghcr.io/kubewarden/audit-scanner:v1.30.0-rc1 + certificate-identity-regexp: https://github.com/kubewarden/audit-scanner/.github/workflows/release.yml@refs/tags/v1.30.0-rc1 + certificate-oidc-issuer: https://token.actions.githubusercontent.com --- apiVersion: content.hauler.cattle.io/v1 kind: Charts @@ -31,37 +35,3 @@ spec: - path: https://get.rke2.io name: install.sh - path: testdata/hauler-manifest.yaml ---- -# v1alpha1 manifests -apiVersion: content.hauler.cattle.io/v1alpha1 -kind: Images -metadata: - name: hauler-content-images-example -spec: - images: - - name: ghcr.io/hauler-dev/library/busybox - - name: ghcr.io/hauler-dev/library/busybox:stable - platform: linux/amd64 - - name: gcr.io/distroless/base@sha256:7fa7445dfbebae4f4b7ab0e6ef99276e96075ae42584af6286ba080750d6dfe5 ---- -apiVersion: content.hauler.cattle.io/v1alpha1 -kind: Charts -metadata: - name: hauler-content-charts-example -spec: - charts: - - name: rancher - repoURL: https://releases.rancher.com/server-charts/stable - version: 2.8.5 - - name: hauler-helm - repoURL: oci://ghcr.io/hauler-dev ---- -apiVersion: content.hauler.cattle.io/v1alpha1 -kind: Files -metadata: - name: hauler-content-files-example -spec: - files: - - path: https://get.rke2.io - name: install.sh - - path: testdata/hauler-manifest.yaml diff --git a/testdata/images.txt b/testdata/images.txt new file mode 100644 index 0000000..239d922 --- /dev/null +++ b/testdata/images.txt @@ -0,0 +1,4 @@ +# hauler image list +# one image reference per line; blank lines and comments are ignored +ghcr.io/hauler-dev/library/busybox +ghcr.io/hauler-dev/library/busybox:stable